diff --git a/.gitattributes b/.gitattributes index 0319b4d2f2a9d..1d4c6252f2c75 100644 --- a/.gitattributes +++ b/.gitattributes @@ -7,3 +7,4 @@ src/etc/pkg/rust-logo.ico binary src/etc/pkg/rust-logo.png binary *.woff binary +src/vendor/* binary diff --git a/.gitignore b/.gitignore index 572111bf96158..bf66eabc1c800 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,7 @@ *.orig *.out *.patch +*.pdb *.pdf *.pg *.pot @@ -49,13 +50,19 @@ .cproject .hg/ .hgignore +.idea +__pycache__/ +*.py[cod] +*$py.class .project .settings/ .valgrindrc +.vscode/ /*-*-*-*/ /*-*-*/ /Makefile /build +/config.toml /dist/ /dl/ /doc @@ -63,6 +70,7 @@ /llvm/ /mingw-build/ /nd/ +/obj/ /rt/ /rustllvm/ /src/libunicode/DerivedCoreProperties.txt @@ -71,10 +79,8 @@ /src/libunicode/PropList.txt /src/libunicode/Scripts.txt /src/libunicode/UnicodeData.txt -/stage0/ -/stage1/ -/stage2/ -/stage3/ +/stage[0-9]+/ +/target /test/ /tmp/ TAGS @@ -86,10 +92,10 @@ config.mk config.stamp keywords.md lexer.ml -src/.DS_Store src/etc/dl src/librustc_llvm/llvmdeps.rs tmp.*.rs version.md version.ml version.texi +.cargo diff --git a/.gitmodules b/.gitmodules index eb033f5401d3c..39288a7ae4907 100644 --- a/.gitmodules +++ b/.gitmodules @@ -16,4 +16,4 @@ url = https://github.com/rust-lang/rust-installer.git [submodule "src/liblibc"] path = src/liblibc - url = https://github.com/rust-lang-nursery/libc.git + url = https://github.com/rust-lang/libc.git diff --git a/.mailmap b/.mailmap index 317a92b8f43cb..e0759cb856381 100644 --- a/.mailmap +++ b/.mailmap @@ -82,7 +82,7 @@ Gareth Daniel Smith Gareth Smith Graham Fawcett Graham Fawcett Graydon Hoare Graydon Hoare -Guillaume Gomez +Guillaume Gomez Guillaume Gomez Heather Heather Herman J. Radtke III Herman J. Radtke III diff --git a/.travis.yml b/.travis.yml index cc93b1127c3a7..a1bbb8a884fef 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,31 +1,82 @@ -language: generic +language: rust sudo: required dist: trusty +services: + - docker -# LLVM takes awhile to check out and otherwise we'll manage the submodules in -# our configure script, so disable auto submodule management. git: + depth: 1 submodules: false -before_install: - - echo 0 | sudo tee /proc/sys/net/ipv6/conf/lo/disable_ipv6 - - echo 'deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty-3.7 main' | sudo tee -a /etc/apt/sources.list - - echo 'deb-src http://llvm.org/apt/trusty/ llvm-toolchain-trusty-3.7 main' | sudo tee -a /etc/apt/sources.list - - sudo apt-get update - - sudo apt-get --force-yes install curl make g++ python2.7 git zlib1g-dev libedit-dev llvm-3.7-tools +matrix: + include: + # Linux builders, all docker images + - env: IMAGE=arm-android + - env: IMAGE=cross + - env: IMAGE=i686-gnu + - env: IMAGE=i686-gnu-nopt + - env: IMAGE=x86_64-freebsd + - env: IMAGE=x86_64-gnu + - env: IMAGE=x86_64-gnu-cargotest + - env: IMAGE=x86_64-gnu-debug + - env: IMAGE=x86_64-gnu-nopt + - env: IMAGE=x86_64-gnu-rustbuild + - env: IMAGE=x86_64-gnu-llvm-3.7 ALLOW_PR=1 RUST_BACKTRACE=1 + - env: IMAGE=x86_64-musl + + # OSX builders + - env: > + RUST_CHECK_TARGET=check + RUST_CONFIGURE_ARGS=--target=x86_64-apple-darwin + SRC=. + os: osx + install: brew install ccache + - env: > + RUST_CHECK_TARGET=check + RUST_CONFIGURE_ARGS=--target=i686-apple-darwin + SRC=. + os: osx + install: brew install ccache + - env: > + RUST_CHECK_TARGET=check + RUST_CONFIGURE_ARGS=--target=x86_64-apple-darwin --enable-rustbuild + SRC=. + os: osx + install: brew install ccache + - env: > + RUST_CHECK_TARGET= + RUST_CONFIGURE_ARGS=--target=aarch64-apple-ios,armv7-apple-ios,armv7s-apple-ios,i386-apple-ios,x86_64-apple-ios + SRC=. + os: osx + install: brew install ccache script: - - ./configure --llvm-root=/usr/lib/llvm-3.7 - - make tidy && make check-notidy -j4 + - if [ -z "$ALLOW_PR" ] && [ "$TRAVIS_BRANCH" != "auto" ]; then + echo skipping, not a full build; + elif [ -z "$ENABLE_AUTO" ] then + echo skipping, not quite ready yet + elif [ "$TRAVIS_OS_NAME" = "osx" ]; then + git submodule update --init; + src/ci/run.sh; + else + git submodule update --init; + src/ci/docker/run.sh $IMAGE; + fi -# Real testing happens on http://buildbot.rust-lang.org/ -# -# See https://github.com/rust-lang/rust-buildbot -# CONTRIBUTING.md#pull-requests +# Save tagged docker images we created and load them if they're available +before_cache: + - docker history -q rust-ci | + grep -v missing | + xargs docker save | + gzip -9 > $HOME/docker/rust-ci.tar.gz +before_install: + - zcat $HOME/docker/rust-ci.tar.gz | docker load || true notifications: email: false -branches: - only: - - master +cache: + directories: + - $HOME/docker + - $HOME/.ccache + - $HOME/.cargo diff --git a/COMPILER_TESTS.md b/COMPILER_TESTS.md index e2a957e396191..91975c1f9ed9a 100644 --- a/COMPILER_TESTS.md +++ b/COMPILER_TESTS.md @@ -42,3 +42,43 @@ whole, instead of just a few lines inside the test. * `ignore-test` always ignores the test * `ignore-lldb` and `ignore-gdb` will skip the debuginfo tests * `min-{gdb,lldb}-version` +* `should-fail` indicates that the test should fail; used for "meta testing", + where we test the compiletest program itself to check that it will generate + errors in appropriate scenarios. This header is ignored for pretty-printer tests. + +## Revisions + +Certain classes of tests support "revisions" (as of the time of this +writing, this includes run-pass, compile-fail, run-fail, and +incremental, though incremental tests are somewhat +different). Revisions allow a single test file to be used for multiple +tests. This is done by adding a special header at the top of the file: + +``` +// revisions: foo bar baz +``` + +This will result in the test being compiled (and tested) three times, +once with `--cfg foo`, once with `--cfg bar`, and once with `--cfg +baz`. You can therefore use `#[cfg(foo)]` etc within the test to tweak +each of these results. + +You can also customize headers and expected error messages to a particular +revision. To do this, add `[foo]` (or `bar`, `baz`, etc) after the `//` +comment, like so: + +``` +// A flag to pass in only for cfg `foo`: +//[foo]compile-flags: -Z verbose + +#[cfg(foo)] +fn test_foo() { + let x: usize = 32_u32; //[foo]~ ERROR mismatched types +} +``` + +Note that not all headers have meaning when customized to a revision. +For example, the `ignore-test` header (and all "ignore" headers) +currently only apply to the test as a whole, not to particular +revisions. The only headers that are intended to really work when +customized to a revision are error patterns and compiler flags. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e864172e81332..4c0f93c3703a5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -6,6 +6,7 @@ links to the major sections: * [Feature Requests](#feature-requests) * [Bug Reports](#bug-reports) +* [The Build System](#the-build-system) * [Pull Requests](#pull-requests) * [Writing Documentation](#writing-documentation) * [Issue Triage](#issue-triage) @@ -70,13 +71,91 @@ which includes important information about what platform you're on, what version of Rust you're using, etc. Sometimes, a backtrace is helpful, and so including that is nice. To get -a backtrace, set the `RUST_BACKTRACE` environment variable. The easiest way +a backtrace, set the `RUST_BACKTRACE` environment variable to a value +other than `0`. The easiest way to do this is to invoke `rustc` like this: ```bash $ RUST_BACKTRACE=1 rustc ... ``` +## The Build System + +Rust's build system allows you to bootstrap the compiler, run tests & +benchmarks, generate documentation, install a fresh build of Rust, and more. +It's your best friend when working on Rust, allowing you to compile & test +your contributions before submission. + +All the configuration for the build system lives in [the `mk` directory][mkdir] +in the project root. It can be hard to follow in places, as it uses some +advanced Make features which make for some challenging reading. If you have +questions on the build system internals, try asking in +[`#rust-internals`][pound-rust-internals]. + +[mkdir]: https://github.com/rust-lang/rust/tree/master/mk/ + +### Configuration + +Before you can start building the compiler you need to configure the build for +your system. In most cases, that will just mean using the defaults provided +for Rust. Configuring involves invoking the `configure` script in the project +root. + +``` +./configure +``` + +There are large number of options accepted by this script to alter the +configuration used later in the build process. Some options to note: + +- `--enable-debug` - Build a debug version of the compiler (disables optimizations, + which speeds up compilation of stage1 rustc) +- `--enable-optimize` - Enable optimizations (can be used with `--enable-debug` + to make a debug build with optimizations) +- `--disable-valgrind-rpass` - Don't run tests with valgrind +- `--enable-clang` - Prefer clang to gcc for building dependencies (e.g., LLVM) +- `--enable-ccache` - Invoke clang/gcc with ccache to re-use object files between builds +- `--enable-compiler-docs` - Build compiler documentation + +To see a full list of options, run `./configure --help`. + +### Useful Targets + +Some common make targets are: + +- `make tips` - show useful targets, variables and other tips for working with + the build system. +- `make rustc-stage1` - build up to (and including) the first stage. For most + cases we don't need to build the stage2 compiler, so we can save time by not + building it. The stage1 compiler is a fully functioning compiler and + (probably) will be enough to determine if your change works as expected. +- `make $host/stage1/bin/rustc` - Where $host is a target triple like x86_64-unknown-linux-gnu. + This will build just rustc, without libstd. This is the fastest way to recompile after + you changed only rustc source code. Note however that the resulting rustc binary + won't have a stdlib to link against by default. You can build libstd once with + `make rustc-stage1`, rustc will pick it up afterwards. libstd is only guaranteed to + work if recompiled, so if there are any issues recompile it. +- `make check` - build the full compiler & run all tests (takes a while). This + is what gets run by the continuous integration system against your pull + request. You should run this before submitting to make sure your tests pass + & everything builds in the correct manner. +- `make check-stage1-std NO_REBUILD=1` - test the standard library without + rebuilding the entire compiler +- `make check TESTNAME=` - Run a matching set of tests. + - `TESTNAME` should be a substring of the tests to match against e.g. it could + be the fully qualified test name, or just a part of it. + `TESTNAME=collections::hash::map::test_map::test_capacity_not_less_than_len` + or `TESTNAME=test_capacity_not_less_than_len`. +- `make check-stage1-rpass TESTNAME=` - Run a single + rpass test with the stage1 compiler (this will be quicker than running the + command above as we only build the stage1 compiler, not the entire thing). + You can also leave off the `-rpass` to run all stage1 test types. +- `make check-stage1-coretest` - Run stage1 tests in `libcore`. +- `make tidy` - Check that the source code is in compliance with Rust's style + guidelines. There is no official document describing Rust's full guidelines + as of yet, but basic rules like 4 spaces for indentation and no more than 99 + characters in a single line should be kept in mind when writing code. + ## Pull Requests Pull requests are the primary mechanism we use to change Rust. GitHub itself @@ -102,6 +181,15 @@ you’re adding something to the standard library, try This will not rebuild the compiler, but will run the tests. +Please make sure your pull request is in compliance with Rust's style +guidelines by running + + $ make tidy + +Make this check before every pull request (and every new commit in a pull +request) ; you can add [git hooks](https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks) +before every push to make sure you never forget to make this check. + All pull requests are reviewed by another person. We have a bot, @rust-highfive, that will automatically assign a random person to review your request. @@ -155,7 +243,7 @@ To find documentation-related issues, sort by the [A-docs label][adocs]. In many cases, you don't need a full `make doc`. You can use `rustdoc` directly to check small fixes. For example, `rustdoc src/doc/reference.md` will render reference to `doc/reference.html`. The CSS might be messed up, but you can -verify that HTML is right. +verify that the HTML is right. ## Issue Triage @@ -241,7 +329,7 @@ are: [gsearchdocs]: https://www.google.com/search?q=site:doc.rust-lang.org+your+query+here [rif]: http://internals.rust-lang.org [rr]: https://doc.rust-lang.org/book/README.html -[tlgba]: http://tomlee.co/2014/04/03/a-more-detailed-tour-of-the-rust-compiler/ +[tlgba]: http://tomlee.co/2014/04/a-more-detailed-tour-of-the-rust-compiler/ [ro]: http://www.rustaceans.org/ [rctd]: ./COMPILER_TESTS.md [cheatsheet]: http://buildbot.rust-lang.org/homu/ diff --git a/COPYRIGHT b/COPYRIGHT index 5ab70b7120fd9..abe8998030871 100644 --- a/COPYRIGHT +++ b/COPYRIGHT @@ -6,8 +6,8 @@ terms. Longer version: -The Rust Project is copyright 2016, The Rust Project -Developers (given in the file AUTHORS.txt). +The Rust Project is copyright 2010, The Rust Project +Developers. Licensed under the Apache License, Version 2.0 # # @@ -210,29 +238,15 @@ include $(CFG_SRC_DIR)mk/debuggers.mk # Secondary makefiles, conditionalized for speed ###################################################################### -# Binary snapshots -ifneq ($(strip $(findstring snap,$(MAKECMDGOALS)) \ - $(findstring clean,$(MAKECMDGOALS))),) - CFG_INFO := $(info cfg: including snap rules) - include $(CFG_SRC_DIR)mk/snap.mk -endif - # The test suite ifneq ($(strip $(findstring check,$(MAKECMDGOALS)) \ $(findstring test,$(MAKECMDGOALS)) \ - $(findstring perf,$(MAKECMDGOALS)) \ $(findstring tidy,$(MAKECMDGOALS))),) CFG_INFO := $(info cfg: including test rules) include $(CFG_SRC_DIR)mk/tests.mk include $(CFG_SRC_DIR)mk/grammar.mk endif -# Performance and benchmarking -ifneq ($(findstring perf,$(MAKECMDGOALS)),) - CFG_INFO := $(info cfg: including perf rules) - include $(CFG_SRC_DIR)mk/perf.mk -endif - # Copy all the distributables to another directory for binary install ifneq ($(strip $(findstring prepare,$(MAKECMDGOALS)) \ $(findstring dist,$(MAKECMDGOALS)) \ @@ -263,7 +277,17 @@ endif # CTAGS building ifneq ($(strip $(findstring TAGS.emacs,$(MAKECMDGOALS)) \ - $(findstring TAGS.vi,$(MAKECMDGOALS))),) + $(findstring TAGS.vi,$(MAKECMDGOALS)) \ + $(findstring TAGS.rustc.emacs,$(MAKECMDGOALS)) \ + $(findstring TAGS.rustc.vi,$(MAKECMDGOALS))),) CFG_INFO := $(info cfg: including ctags rules) include $(CFG_SRC_DIR)mk/ctags.mk endif + +.DEFAULT: + @echo + @echo "======================================================" + @echo "== If you need help, run 'make help' or 'make tips' ==" + @echo "======================================================" + @echo + exit 1 diff --git a/README.md b/README.md index 84fb8f3e5b089..7360651095bb5 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # The Rust Programming Language -This is the main source code repository for [Rust]. It contains the compiler, standard library, -and documentation. +This is the main source code repository for [Rust]. It contains the compiler, +standard library, and documentation. [Rust]: https://www.rust-lang.org @@ -9,16 +9,17 @@ and documentation. Read ["Installing Rust"] from [The Book]. -["Installing Rust"]: https://doc.rust-lang.org/book/installing-rust.html +["Installing Rust"]: https://doc.rust-lang.org/book/getting-started.html#installing-rust [The Book]: https://doc.rust-lang.org/book/index.html ## Building from Source 1. Make sure you have installed the dependencies: - * `g++` 4.7 or `clang++` 3.x - * `python` 2.7 or later (but not 3.x) + * `g++` 4.7 or later or `clang++` 3.x + * `python` 2.7 (but not 3.x) * GNU `make` 3.81 or later + * `cmake` 3.4.3 or later * `curl` * `git` @@ -63,35 +64,38 @@ build. #### MinGW -[MSYS2](http://msys2.github.io/) can be used to easily build Rust on Windows: +[MSYS2][msys2] can be used to easily build Rust on Windows: -1. Grab the latest MSYS2 installer and go through the installer. +[msys2]: https://msys2.github.io/ -2. From the MSYS2 terminal, install the `mingw64` toolchain and other required - tools. +1. Grab the latest [MSYS2 installer][msys2] and go through the installer. + +2. Run `mingw32_shell.bat` or `mingw64_shell.bat` from wherever you installed + MSYS2 (i.e. `C:\msys64`), depending on whether you want 32-bit or 64-bit + Rust. (As of the latest version of MSYS2 you have to run `msys2_shell.cmd + -mingw32` or `msys2_shell.cmd -mingw64` from the command line instead) + +3. From this terminal, install the required tools: ```sh # Update package mirrors (may be needed if you have a fresh install of MSYS2) $ pacman -Sy pacman-mirrors - ``` - -Download [MinGW from -here](http://mingw-w64.org/doku.php/download/mingw-builds), and choose the -`threads=win32,exceptions=dwarf/seh` flavor when installing. After installing, -add its `bin` directory to your `PATH`. This is due to [#28260](https://github.com/rust-lang/rust/issues/28260), in the future, -installing from pacman should be just fine. + # Install build tools needed for Rust. If you're building a 32-bit compiler, + # then replace "x86_64" below with "i686". If you've already got git, python, + # or CMake installed and in PATH you can remove them from this list. Note + # that it is important that the `python2` and `cmake` packages **not** used. + # The build has historically been known to fail with these packages. + $ pacman -S git \ + make \ + diffutils \ + tar \ + mingw-w64-x86_64-python2 \ + mingw-w64-x86_64-cmake \ + mingw-w64-x86_64-gcc ``` - # Make git available in MSYS2 (if not already available on path) - $ pacman -S git - $ pacman -S base-devel - ``` - -3. Run `mingw32_shell.bat` or `mingw64_shell.bat` from wherever you installed - MSYS2 (i.e. `C:\msys`), depending on whether you want 32-bit or 64-bit Rust. - -4. Navigate to Rust's source code, configure and build it: +4. Navigate to Rust's source code (or clone it), then configure and build it: ```sh $ ./configure @@ -102,7 +106,7 @@ installing from pacman should be just fine. MSVC builds of Rust additionally require an installation of Visual Studio 2013 (or later) so `rustc` can use its linker. Make sure to check the “C++ tools” -option. In addition, `cmake` needs to be installed to build LLVM. +option. With these dependencies installed, the build takes two steps: @@ -111,12 +115,37 @@ $ ./configure $ make && make install ``` +#### MSVC with rustbuild + +The old build system, based on makefiles, is currently being rewritten into a +Rust-based build system called rustbuild. This can be used to bootstrap the +compiler on MSVC without needing to install MSYS or MinGW. All you need are +[Python 2](https://www.python.org/downloads/), +[CMake](https://cmake.org/download/), and +[Git](https://git-scm.com/downloads) in your PATH (make sure you do not use the +ones from MSYS if you have it installed). You'll also need Visual Studio 2013 or +newer with the C++ tools. Then all you need to do is to kick off rustbuild. + +``` +python x.py build +``` + +Currently rustbuild only works with some known versions of Visual Studio. If you +have a more recent version installed that a part of rustbuild doesn't understand +then you may need to force rustbuild to use an older version. This can be done +by manually calling the appropriate vcvars file before running the bootstrap. + +``` +CALL "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\amd64\vcvars64.bat" +python x.py build +``` + ## Building Documentation If you’d like to build the documentation, it’s almost the same: ```sh -./configure +$ ./configure $ make docs ``` @@ -142,7 +171,7 @@ fetch snapshots, and an OS that can execute the available snapshot binaries. Snapshot binaries are currently built and tested on several platforms: -| Platform \ Architecture | x86 | x86_64 | +| Platform / Architecture | x86 | x86_64 | |--------------------------------|-----|--------| | Windows (7, 8, Server 2008 R2) | ✓ | ✓ | | Linux (2.6.18 or later) | ✓ | ✓ | @@ -151,8 +180,8 @@ Snapshot binaries are currently built and tested on several platforms: You may find that other platforms work, but these are our officially supported build environments that are most likely to work. -Rust currently needs between 600MiB and 1.5GiB to build, depending on platform. If it hits -swap, it will take a very long time to build. +Rust currently needs between 600MiB and 1.5GiB to build, depending on platform. +If it hits swap, it will take a very long time to build. There is more advice about hacking on Rust in [CONTRIBUTING.md]. @@ -177,10 +206,11 @@ To contribute to Rust, please see [CONTRIBUTING](CONTRIBUTING.md). Rust has an [IRC] culture and most real-time collaboration happens in a variety of channels on Mozilla's IRC network, irc.mozilla.org. The most popular channel is [#rust], a venue for general discussion about -Rust, and a good place to ask for help. +Rust. And a good place to ask for help would be [#rust-beginners]. [IRC]: https://en.wikipedia.org/wiki/Internet_Relay_Chat [#rust]: irc://irc.mozilla.org/rust +[#rust-beginners]: irc://irc.mozilla.org/rust-beginners ## License @@ -188,4 +218,5 @@ Rust is primarily distributed under the terms of both the MIT license and the Apache License (Version 2.0), with portions covered by various BSD-like licenses. -See [LICENSE-APACHE](LICENSE-APACHE), [LICENSE-MIT](LICENSE-MIT), and [COPYRIGHT](COPYRIGHT) for details. +See [LICENSE-APACHE](LICENSE-APACHE), [LICENSE-MIT](LICENSE-MIT), and +[COPYRIGHT](COPYRIGHT) for details. diff --git a/RELEASES.md b/RELEASES.md index f8679431339da..e468a86e7acc3 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,3 +1,1886 @@ +Version 1.13.0 (2016-11-10) +=========================== + +Language +-------- + +* [Stabilize the `?` operator][36995]. `?` is a simple way to propagate + errors, like the `try!` macro, described in [RFC 0243]. +* [Stabilize macros in type position][36014]. Described in [RFC 873]. +* [Stabilize attributes on statements][36995]. Described in [RFC 0016]. +* [Fix `#[derive]` for empty tuple structs/variants][35728] +* [Fix lifetime rules for 'if' conditions][36029] +* [Avoid loading and parsing unconfigured non-inline modules][36482] + +Compiler +-------- + +* [Add the `-C link-arg` argument][36574] +* [Remove the old AST-based backend from rustc_trans][35764] +* [Don't enable NEON by default on armv7 Linux][35814] +* [Fix debug line number info for macro expansions][35238] +* [Do not emit "class method" debuginfo for types that are not + DICompositeType][36008] +* [Warn about multiple conflicting #[repr] hints][34623] +* [When sizing DST, don't double-count nested struct prefixes][36351] +* [Default RUST_MIN_STACK to 16MiB for now][36505] +* [Improve rlib metadata format][36551]. Reduces rlib size significantly. +* [Reject macros with empty repetitions to avoid infinite loop][36721] +* [Expand macros without recursing to avoid stack overflows][36214] + +Diagnostics +----------- + +* [Replace macro backtraces with labeled local uses][35702] +* [Improve error message for missplaced doc comments][33922] +* [Buffer unix and lock windows to prevent message interleaving][35975] +* [Update lifetime errors to specifically note temporaries][36171] +* [Special case a few colors for Windows][36178] +* [Suggest `use self` when such an import resolves][36289] +* [Be more specific when type parameter shadows primitive type][36338] +* Many minor improvements + +Compile-time Optimizations +-------------------------- + +* [Compute and cache HIR hashes at beginning][35854] +* [Don't hash types in loan paths][36004] +* [Cache projections in trans][35761] +* [Optimize the parser's last token handling][36527] +* [Only instantiate #[inline] functions in codegen units referencing + them][36524]. This leads to big improvements in cases where crates export + define many inline functions without using them directly. +* [Lazily allocate TypedArena's first chunk][36592] +* [Don't allocate during default HashSet creation][36734] + +Stabilized APIs +--------------- + +* [`checked_abs`] +* [`wrapping_abs`] +* [`overflowing_abs`] +* [`RefCell::try_borrow`] +* [`RefCell::try_borrow_mut`] + +Libraries +--------- + +* [Add `assert_ne!` and `debug_assert_ne!`][35074] +* [Make `vec_deque::Drain`, `hash_map::Drain`, and `hash_set::Drain` + covariant][35354] +* [Implement `AsRef<[T]>` for `std::slice::Iter`][35559] +* [Implement `Debug` for `std::vec::IntoIter`][35707] +* [`CString`: avoid excessive growth just to 0-terminate][35871] +* [Implement `CoerceUnsized` for `{Cell, RefCell, UnsafeCell}`][35627] +* [Use arc4rand on FreeBSD][35884] +* [memrchr: Correct aligned offset computation][35969] +* [Improve Demangling of Rust Symbols][36059] +* [Use monotonic time in condition variables][35048] +* [Implement `Debug` for `std::path::{Components,Iter}`][36101] +* [Implement conversion traits for `char`][35755] +* [Fix illegal instruction caused by overflow in channel cloning][36104] +* [Zero first byte of CString on drop][36264] +* [Inherit overflow checks for sum and product][36372] +* [Add missing Eq implementations][36423] +* [Implement `Debug` for `DirEntry`][36631] +* [When `getaddrinfo` returns `EAI_SYSTEM` retrieve actual error from + `errno`][36754] +* [`SipHasher`] is deprecated. Use [`DefaultHasher`]. +* [Implement more traits for `std::io::ErrorKind`][35911] +* [Optimize BinaryHeap bounds checking][36072] +* [Work around pointer aliasing issue in `Vec::extend_from_slice`, + `extend_with_element`][36355] +* [Fix overflow checking in unsigned pow()][34942] + +Cargo +----- + +* This release includes security fixes to both curl and OpenSSL. +* [Fix transitive doctests when panic=abort][cargo/3021] +* [Add --all-features flag to cargo][cargo/3038] +* [Reject path-based dependencies in `cargo package`][cargo/3060] +* [Don't parse the home directory more than once][cargo/3078] +* [Don't try to generate Cargo.lock on empty workspaces][cargo/3092] +* [Update OpenSSL to 1.0.2j][cargo/3121] +* [Add license and license_file to cargo metadata output][cargo/3110] +* [Make crates-io registry URL optional in config; ignore all changes to + source.crates-io][cargo/3089] +* [Don't download dependencies from other platforms][cargo/3123] +* [Build transitive dev-dependencies when needed][cargo/3125] +* [Add support for per-target rustflags in .cargo/config][cargo/3157] +* [Avoid updating registry when adding existing deps][cargo/3144] +* [Warn about path overrides that won't work][cargo/3136] +* [Use workspaces during `cargo install`][cargo/3146] +* [Leak mspdbsrv.exe processes on Windows][cargo/3162] +* [Add --message-format flag][cargo/3000] +* [Pass target environment for rustdoc][cargo/3205] +* [Use `CommandExt::exec` for `cargo run` on Unix][cargo/2818] +* [Update curl and curl-sys][cargo/3241] +* [Call rustdoc test with the correct cfg flags of a package][cargo/3242] + +Tooling +------- + +* [rustdoc: Add the `--sysroot` argument][36586] +* [rustdoc: Fix a couple of issues with the search results][35655] +* [rustdoc: remove the `!` from macro URLs and titles][35234] +* [gdb: Fix pretty-printing special-cased Rust types][35585] +* [rustdoc: Filter more incorrect methods inherited through Deref][36266] + +Misc +---- + +* [Remove unmaintained style guide][35124] +* [Add s390x support][36369] +* [Initial work at Haiku OS support][36727] +* [Add mips-uclibc targets][35734] +* [Crate-ify compiler-rt into compiler-builtins][35021] +* [Add rustc version info (git hash + date) to dist tarball][36213] +* Many documentation improvements + +Compatibility Notes +------------------- + +* [`SipHasher`] is deprecated. Use [`DefaultHasher`]. +* [Deny (by default) transmuting from fn item types to pointer-sized + types][34923]. Continuing the long transition to zero-sized fn items, + per [RFC 401]. +* [Fix `#[derive]` for empty tuple structs/variants][35728]. + Part of [RFC 1506]. +* [Issue deprecation warnings for safe accesses to extern statics][36173] +* [Fix lifetime rules for 'if' conditions][36029]. +* [Inherit overflow checks for sum and product][36372]. +* [Forbid user-defined macros named "macro_rules"][36730]. + +[33922]: https://github.com/rust-lang/rust/pull/33922 +[34623]: https://github.com/rust-lang/rust/pull/34623 +[34923]: https://github.com/rust-lang/rust/pull/34923 +[34942]: https://github.com/rust-lang/rust/pull/34942 +[34982]: https://github.com/rust-lang/rust/pull/34982 +[35021]: https://github.com/rust-lang/rust/pull/35021 +[35048]: https://github.com/rust-lang/rust/pull/35048 +[35074]: https://github.com/rust-lang/rust/pull/35074 +[35124]: https://github.com/rust-lang/rust/pull/35124 +[35234]: https://github.com/rust-lang/rust/pull/35234 +[35238]: https://github.com/rust-lang/rust/pull/35238 +[35354]: https://github.com/rust-lang/rust/pull/35354 +[35559]: https://github.com/rust-lang/rust/pull/35559 +[35585]: https://github.com/rust-lang/rust/pull/35585 +[35627]: https://github.com/rust-lang/rust/pull/35627 +[35655]: https://github.com/rust-lang/rust/pull/35655 +[35702]: https://github.com/rust-lang/rust/pull/35702 +[35707]: https://github.com/rust-lang/rust/pull/35707 +[35728]: https://github.com/rust-lang/rust/pull/35728 +[35734]: https://github.com/rust-lang/rust/pull/35734 +[35755]: https://github.com/rust-lang/rust/pull/35755 +[35761]: https://github.com/rust-lang/rust/pull/35761 +[35764]: https://github.com/rust-lang/rust/pull/35764 +[35814]: https://github.com/rust-lang/rust/pull/35814 +[35854]: https://github.com/rust-lang/rust/pull/35854 +[35871]: https://github.com/rust-lang/rust/pull/35871 +[35884]: https://github.com/rust-lang/rust/pull/35884 +[35911]: https://github.com/rust-lang/rust/pull/35911 +[35969]: https://github.com/rust-lang/rust/pull/35969 +[35975]: https://github.com/rust-lang/rust/pull/35975 +[36004]: https://github.com/rust-lang/rust/pull/36004 +[36008]: https://github.com/rust-lang/rust/pull/36008 +[36014]: https://github.com/rust-lang/rust/pull/36014 +[36029]: https://github.com/rust-lang/rust/pull/36029 +[36059]: https://github.com/rust-lang/rust/pull/36059 +[36072]: https://github.com/rust-lang/rust/pull/36072 +[36101]: https://github.com/rust-lang/rust/pull/36101 +[36104]: https://github.com/rust-lang/rust/pull/36104 +[36171]: https://github.com/rust-lang/rust/pull/36171 +[36173]: https://github.com/rust-lang/rust/pull/36173 +[36178]: https://github.com/rust-lang/rust/pull/36178 +[36213]: https://github.com/rust-lang/rust/pull/36213 +[36214]: https://github.com/rust-lang/rust/pull/36214 +[36264]: https://github.com/rust-lang/rust/pull/36264 +[36266]: https://github.com/rust-lang/rust/pull/36266 +[36289]: https://github.com/rust-lang/rust/pull/36289 +[36338]: https://github.com/rust-lang/rust/pull/36338 +[36351]: https://github.com/rust-lang/rust/pull/36351 +[36355]: https://github.com/rust-lang/rust/pull/36355 +[36369]: https://github.com/rust-lang/rust/pull/36369 +[36372]: https://github.com/rust-lang/rust/pull/36372 +[36423]: https://github.com/rust-lang/rust/pull/36423 +[36482]: https://github.com/rust-lang/rust/pull/36482 +[36505]: https://github.com/rust-lang/rust/pull/36505 +[36524]: https://github.com/rust-lang/rust/pull/36524 +[36527]: https://github.com/rust-lang/rust/pull/36527 +[36551]: https://github.com/rust-lang/rust/pull/36551 +[36574]: https://github.com/rust-lang/rust/pull/36574 +[36586]: https://github.com/rust-lang/rust/pull/36586 +[36592]: https://github.com/rust-lang/rust/pull/36592 +[36631]: https://github.com/rust-lang/rust/pull/36631 +[36639]: https://github.com/rust-lang/rust/pull/36639 +[36721]: https://github.com/rust-lang/rust/pull/36721 +[36727]: https://github.com/rust-lang/rust/pull/36727 +[36730]: https://github.com/rust-lang/rust/pull/36730 +[36734]: https://github.com/rust-lang/rust/pull/36734 +[36754]: https://github.com/rust-lang/rust/pull/36754 +[36995]: https://github.com/rust-lang/rust/pull/36995 +[RFC 0016]: https://github.com/rust-lang/rfcs/blob/master/text/0016-more-attributes.md +[RFC 0243]: https://github.com/rust-lang/rfcs/blob/master/text/0243-trait-based-exception-handling.md +[RFC 1506]: https://github.com/rust-lang/rfcs/blob/master/text/1506-adt-kinds.md +[RFC 401]: https://github.com/rust-lang/rfcs/blob/master/text/0401-coercions.md +[RFC 873]: https://github.com/rust-lang/rfcs/blob/master/text/0873-type-macros.md +[cargo/2818]: https://github.com/rust-lang/cargo/pull/2818 +[cargo/3000]: https://github.com/rust-lang/cargo/pull/3000 +[cargo/3021]: https://github.com/rust-lang/cargo/pull/3021 +[cargo/3038]: https://github.com/rust-lang/cargo/pull/3038 +[cargo/3060]: https://github.com/rust-lang/cargo/pull/3060 +[cargo/3078]: https://github.com/rust-lang/cargo/pull/3078 +[cargo/3089]: https://github.com/rust-lang/cargo/pull/3089 +[cargo/3092]: https://github.com/rust-lang/cargo/pull/3092 +[cargo/3110]: https://github.com/rust-lang/cargo/pull/3110 +[cargo/3121]: https://github.com/rust-lang/cargo/pull/3121 +[cargo/3123]: https://github.com/rust-lang/cargo/pull/3123 +[cargo/3125]: https://github.com/rust-lang/cargo/pull/3125 +[cargo/3136]: https://github.com/rust-lang/cargo/pull/3136 +[cargo/3144]: https://github.com/rust-lang/cargo/pull/3144 +[cargo/3146]: https://github.com/rust-lang/cargo/pull/3146 +[cargo/3157]: https://github.com/rust-lang/cargo/pull/3157 +[cargo/3162]: https://github.com/rust-lang/cargo/pull/3162 +[cargo/3205]: https://github.com/rust-lang/cargo/pull/3205 +[cargo/3241]: https://github.com/rust-lang/cargo/pull/3241 +[cargo/3242]: https://github.com/rust-lang/cargo/pull/3242 +[rustup]: https://www.rustup.rs +[`checked_abs`]: https://doc.rust-lang.org/std/primitive.i32.html#method.checked_abs +[`wrapping_abs`]: https://doc.rust-lang.org/std/primitive.i32.html#method.wrapping_abs +[`overflowing_abs`]: https://doc.rust-lang.org/std/primitive.i32.html#method.overflowing_abs +[`RefCell::try_borrow`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html#method.try_borrow +[`RefCell::try_borrow_mut`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html#method.try_borrow_mut +[`SipHasher`]: https://doc.rust-lang.org/std/hash/struct.SipHasher.html +[`DefaultHasher`]: https://doc.rust-lang.org/std/collections/hash_map/struct.DefaultHasher.html + + +Version 1.12.1 (2016-10-20) +=========================== + +Regression Fixes +---------------- + +* [ICE: 'rustc' panicked at 'assertion failed: concrete_substs.is_normalized_for_trans()' #36381][36381] +* [Confusion with double negation and booleans][36856] +* [rustc 1.12.0 fails with SIGSEGV in release mode (syn crate 0.8.0)][36875] +* [Rustc 1.12.0 Windows build of `ethcore` crate fails with LLVM error][36924] +* [1.12.0: High memory usage when linking in release mode with debug info][36926] +* [Corrupted memory after updated to 1.12][36936] +* ["Let NullaryConstructor = something;" causes internal compiler error: "tried to overwrite interned AdtDef"][37026] +* [Fix ICE: inject bitcast if types mismatch for invokes/calls/stores][37112] +* [debuginfo: Handle spread_arg case in MIR-trans in a more stable way.][37153] + +[36381]: https://github.com/rust-lang/rust/issues/36381 +[36856]: https://github.com/rust-lang/rust/issues/36856 +[36875]: https://github.com/rust-lang/rust/issues/36875 +[36924]: https://github.com/rust-lang/rust/issues/36924 +[36926]: https://github.com/rust-lang/rust/issues/36926 +[36936]: https://github.com/rust-lang/rust/issues/36936 +[37026]: https://github.com/rust-lang/rust/issues/37026 +[37112]: https://github.com/rust-lang/rust/issues/37112 +[37153]: https://github.com/rust-lang/rust/issues/37153 + + +Version 1.12.0 (2016-09-29) +=========================== + +Highlights +---------- + +* [`rustc` translates code to LLVM IR via its own "middle" IR (MIR)] + (https://github.com/rust-lang/rust/pull/34096). + This translation pass is far simpler than the previous AST->LLVM pass, and + creates opportunities to perform new optimizations directly on the MIR. It + was previously described [on the Rust blog] + (https://blog.rust-lang.org/2016/04/19/MIR.html). +* [`rustc` presents a new, more readable error format, along with + machine-readable JSON error output for use by IDEs] + (https://github.com/rust-lang/rust/pull/35401). + Most common editors supporting Rust have been updated to work with it. It was + previously described [on the Rust blog] + (https://blog.rust-lang.org/2016/08/10/Shape-of-errors-to-come.html). + +Compiler +-------- + +* [`rustc` translates code to LLVM IR via its own "middle" IR (MIR)] + (https://github.com/rust-lang/rust/pull/34096). + This translation pass is far simpler than the previous AST->LLVM pass, and + creates opportunities to perform new optimizations directly on the MIR. It + was previously described [on the Rust blog] + (https://blog.rust-lang.org/2016/04/19/MIR.html). +* [Print the Rust target name, not the LLVM target name, with + `--print target-list`] + (https://github.com/rust-lang/rust/pull/35489) +* [The computation of `TypeId` is correct in some cases where it was previously + producing inconsistent results] + (https://github.com/rust-lang/rust/pull/35267) +* [The `mips-unknown-linux-gnu` target uses hardware floating point by default] + (https://github.com/rust-lang/rust/pull/34910) +* [The `rustc` arguments, `--print target-cpus`, `--print target-features`, + `--print relocation-models`, and `--print code-models` print the available + options to the `-C target-cpu`, `-C target-feature`, `-C relocation-model` and + `-C code-model` code generation arguments] + (https://github.com/rust-lang/rust/pull/34845) +* [`rustc` supports three new MUSL targets on ARM: `arm-unknown-linux-musleabi`, + `arm-unknown-linux-musleabihf`, and `armv7-unknown-linux-musleabihf`] + (https://github.com/rust-lang/rust/pull/35060). + These targets produce statically-linked binaries. There are no binary release + builds yet though. + +Diagnostics +----------- + +* [`rustc` presents a new, more readable error format, along with + machine-readable JSON error output for use by IDEs] + (https://github.com/rust-lang/rust/pull/35401). + Most common editors supporting Rust have been updated to work with it. It was + previously described [on the Rust blog] + (https://blog.rust-lang.org/2016/08/10/Shape-of-errors-to-come.html). +* [In error descriptions, references are now described in plain English, + instead of as "&-ptr"] + (https://github.com/rust-lang/rust/pull/35611) +* [In error type descriptions, unknown numeric types are named `{integer}` or + `{float}` instead of `_`] + (https://github.com/rust-lang/rust/pull/35080) +* [`rustc` emits a clearer error when inner attributes follow a doc comment] + (https://github.com/rust-lang/rust/pull/34676) + +Language +-------- + +* [`macro_rules!` invocations can be made within `macro_rules!` invocations] + (https://github.com/rust-lang/rust/pull/34925) +* [`macro_rules!` meta-variables are hygienic] + (https://github.com/rust-lang/rust/pull/35453) +* [`macro_rules!` `tt` matchers can be reparsed correctly, making them much more + useful] + (https://github.com/rust-lang/rust/pull/34908) +* [`macro_rules!` `stmt` matchers correctly consume the entire contents when + inside non-braces invocations] + (https://github.com/rust-lang/rust/pull/34886) +* [Semicolons are properly required as statement delimeters inside + `macro_rules!` invocations] + (https://github.com/rust-lang/rust/pull/34660) +* [`cfg_attr` works on `path` attributes] + (https://github.com/rust-lang/rust/pull/34546) + +Stabilized APIs +--------------- + +* [`Cell::as_ptr`] + (https://doc.rust-lang.org/std/cell/struct.Cell.html#method.as_ptr) +* [`RefCell::as_ptr`] + (https://doc.rust-lang.org/std/cell/struct.RefCell.html#method.as_ptr) +* [`IpAddr::is_unspecified`] + (https://doc.rust-lang.org/std/net/enum.IpAddr.html#method.is_unspecified) +* [`IpAddr::is_loopback`] + (https://doc.rust-lang.org/std/net/enum.IpAddr.html#method.is_loopback) +* [`IpAddr::is_multicast`] + (https://doc.rust-lang.org/std/net/enum.IpAddr.html#method.is_multicast) +* [`Ipv4Addr::is_unspecified`] + (https://doc.rust-lang.org/std/net/struct.Ipv4Addr.html#method.is_unspecified) +* [`Ipv6Addr::octets`] + (https://doc.rust-lang.org/std/net/struct.Ipv6Addr.html#method.octets) +* [`LinkedList::contains`] + (https://doc.rust-lang.org/std/collections/linked_list/struct.LinkedList.html#method.contains) +* [`VecDeque::contains`] + (https://doc.rust-lang.org/std/collections/vec_deque/struct.VecDeque.html#method.contains) +* [`ExitStatusExt::from_raw`] + (https://doc.rust-lang.org/std/os/unix/process/trait.ExitStatusExt.html#tymethod.from_raw). + Both on Unix and Windows. +* [`Receiver::recv_timeout`] + (https://doc.rust-lang.org/std/sync/mpsc/struct.Receiver.html#method.recv_timeout) +* [`RecvTimeoutError`] + (https://doc.rust-lang.org/std/sync/mpsc/enum.RecvTimeoutError.html) +* [`BinaryHeap::peek_mut`] + (https://doc.rust-lang.org/std/collections/binary_heap/struct.BinaryHeap.html#method.peek_mut) +* [`PeekMut`] + (https://doc.rust-lang.org/std/collections/binary_heap/struct.PeekMut.html) +* [`iter::Product`] + (https://doc.rust-lang.org/std/iter/trait.Product.html) +* [`iter::Sum`] + (https://doc.rust-lang.org/std/iter/trait.Sum.html) +* [`OccupiedEntry::remove_entry`] + (https://doc.rust-lang.org/std/collections/btree_map/struct.OccupiedEntry.html#method.remove_entry) +* [`VacantEntry::into_key`] + (https://doc.rust-lang.org/std/collections/btree_map/struct.VacantEntry.html#method.into_key) + +Libraries +--------- + +* [The `format!` macro and friends now allow a single argument to be formatted + in multiple styles] + (https://github.com/rust-lang/rust/pull/33642) +* [The lifetime bounds on `[T]::binary_search_by` and + `[T]::binary_search_by_key` have been adjusted to be more flexible] + (https://github.com/rust-lang/rust/pull/34762) +* [`Option` implements `From` for its contained type] + (https://github.com/rust-lang/rust/pull/34828) +* [`Cell`, `RefCell` and `UnsafeCell` implement `From` for their contained type] + (https://github.com/rust-lang/rust/pull/35392) +* [`RwLock` panics if the reader count overflows] + (https://github.com/rust-lang/rust/pull/35378) +* [`vec_deque::Drain`, `hash_map::Drain` and `hash_set::Drain` are covariant] + (https://github.com/rust-lang/rust/pull/35354) +* [`vec::Drain` and `binary_heap::Drain` are covariant] + (https://github.com/rust-lang/rust/pull/34951) +* [`Cow` implements `FromIterator` for `char`, `&str` and `String`] + (https://github.com/rust-lang/rust/pull/35064) +* [Sockets on Linux are correctly closed in subprocesses via `SOCK_CLOEXEC`] + (https://github.com/rust-lang/rust/pull/34946) +* [`hash_map::Entry`, `hash_map::VacantEntry` and `hash_map::OccupiedEntry` + implement `Debug`] + (https://github.com/rust-lang/rust/pull/34937) +* [`btree_map::Entry`, `btree_map::VacantEntry` and `btree_map::OccupiedEntry` + implement `Debug`] + (https://github.com/rust-lang/rust/pull/34885) +* [`String` implements `AddAssign`] + (https://github.com/rust-lang/rust/pull/34890) +* [Variadic `extern fn` pointers implement the `Clone`, `PartialEq`, `Eq`, + `PartialOrd`, `Ord`, `Hash`, `fmt::Pointer`, and `fmt::Debug` traits] + (https://github.com/rust-lang/rust/pull/34879) +* [`FileType` implements `Debug`] + (https://github.com/rust-lang/rust/pull/34757) +* [References to `Mutex` and `RwLock` are unwind-safe] + (https://github.com/rust-lang/rust/pull/34756) +* [`mpsc::sync_channel` `Receiver`s return any available message before + reporting a disconnect] + (https://github.com/rust-lang/rust/pull/34731) +* [Unicode definitions have been updated to 9.0] + (https://github.com/rust-lang/rust/pull/34599) +* [`env` iterators implement `DoubleEndedIterator`] + (https://github.com/rust-lang/rust/pull/33312) + +Cargo +----- + +* [Support local mirrors of registries] + (https://github.com/rust-lang/cargo/pull/2857) +* [Add support for command aliases] + (https://github.com/rust-lang/cargo/pull/2679) +* [Allow `opt-level="s"` / `opt-level="z"` in profile overrides] + (https://github.com/rust-lang/cargo/pull/3007) +* [Make `cargo doc --open --target` work as expected] + (https://github.com/rust-lang/cargo/pull/2988) +* [Speed up noop registry updates] + (https://github.com/rust-lang/cargo/pull/2974) +* [Update OpenSSL] + (https://github.com/rust-lang/cargo/pull/2971) +* [Fix `--panic=abort` with plugins] + (https://github.com/rust-lang/cargo/pull/2954) +* [Always pass `-C metadata` to the compiler] + (https://github.com/rust-lang/cargo/pull/2946) +* [Fix depending on git repos with workspaces] + (https://github.com/rust-lang/cargo/pull/2938) +* [Add a `--lib` flag to `cargo new`] + (https://github.com/rust-lang/cargo/pull/2921) +* [Add `http.cainfo` for custom certs] + (https://github.com/rust-lang/cargo/pull/2917) +* [Indicate the compilation profile after compiling] + (https://github.com/rust-lang/cargo/pull/2909) +* [Allow enabling features for dependencies with `--features`] + (https://github.com/rust-lang/cargo/pull/2876) +* [Add `--jobs` flag to `cargo package`] + (https://github.com/rust-lang/cargo/pull/2867) +* [Add `--dry-run` to `cargo publish`] + (https://github.com/rust-lang/cargo/pull/2849) +* [Add support for `RUSTDOCFLAGS`] + (https://github.com/rust-lang/cargo/pull/2794) + +Performance +----------- + +* [`panic::catch_unwind` is more optimized] + (https://github.com/rust-lang/rust/pull/35444) +* [`panic::catch_unwind` no longer accesses thread-local storage on entry] + (https://github.com/rust-lang/rust/pull/34866) + +Tooling +------- + +* [Test binaries now support a `--test-threads` argument to specify the number + of threads used to run tests, and which acts the same as the + `RUST_TEST_THREADS` environment variable] + (https://github.com/rust-lang/rust/pull/35414) +* [The test runner now emits a warning when tests run over 60 seconds] + (https://github.com/rust-lang/rust/pull/35405) +* [rustdoc: Fix methods in search results] + (https://github.com/rust-lang/rust/pull/34752) +* [`rust-lldb` warns about unsupported versions of LLDB] + (https://github.com/rust-lang/rust/pull/34646) +* [Rust releases now come with source packages that can be installed by rustup + via `rustup component add rust-src`] + (https://github.com/rust-lang/rust/pull/34366). + The resulting source code can be used by tools and IDES, located in the + sysroot under `lib/rustlib/src`. + +Misc +---- + +* [The compiler can now be built against LLVM 3.9] + (https://github.com/rust-lang/rust/pull/35594) +* Many minor improvements to the documentation. +* [The Rust exception handling "personality" routine is now written in Rust] + (https://github.com/rust-lang/rust/pull/34832) + +Compatibility Notes +------------------- + +* [When printing Windows `OsStr`s, unpaired surrogate codepoints are escaped + with the lowercase format instead of the uppercase] + (https://github.com/rust-lang/rust/pull/35084) +* [When formatting strings, if "precision" is specified, the "fill", + "align" and "width" specifiers are no longer ignored] + (https://github.com/rust-lang/rust/pull/34544) +* [The `Debug` impl for strings no longer escapes all non-ASCII characters] + (https://github.com/rust-lang/rust/pull/34485) + + +Version 1.11.0 (2016-08-18) +=========================== + +Language +-------- + +* [`cfg_attr` works on `path` attributes] + (https://github.com/rust-lang/rust/pull/34546) +* [Support nested `cfg_attr` attributes] + (https://github.com/rust-lang/rust/pull/34216) +* [Allow statement-generating braced macro invocations at the end of blocks] + (https://github.com/rust-lang/rust/pull/34436) +* [Macros can be expanded inside of trait definitions] + (https://github.com/rust-lang/rust/pull/34213) +* [`#[macro_use]` works properly when it is itself expanded from a macro] + (https://github.com/rust-lang/rust/pull/34032) + +Stabilized APIs +--------------- + +* [`BinaryHeap::append`] + (https://doc.rust-lang.org/std/collections/binary_heap/struct.BinaryHeap.html#method.append) +* [`BTreeMap::append`] + (https://doc.rust-lang.org/std/collections/btree_map/struct.BTreeMap.html#method.append) +* [`BTreeMap::split_off`] + (https://doc.rust-lang.org/std/collections/btree_map/struct.BTreeMap.html#method.split_off) +* [`BTreeSet::append`] + (https://doc.rust-lang.org/std/collections/btree_set/struct.BTreeSet.html#method.append) +* [`BTreeSet::split_off`] + (https://doc.rust-lang.org/std/collections/btree_set/struct.BTreeSet.html#method.split_off) +* [`f32::to_degrees`] + (https://doc.rust-lang.org/std/primitive.f32.html#method.to_degrees) + (in libcore - previously stabilized in libstd) +* [`f32::to_radians`] + (https://doc.rust-lang.org/std/primitive.f32.html#method.to_radians) + (in libcore - previously stabilized in libstd) +* [`f64::to_degrees`] + (https://doc.rust-lang.org/std/primitive.f64.html#method.to_degrees) + (in libcore - previously stabilized in libstd) +* [`f64::to_radians`] + (https://doc.rust-lang.org/std/primitive.f64.html#method.to_radians) + (in libcore - previously stabilized in libstd) +* [`Iterator::sum`] + (https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.sum) +* [`Iterator::product`] + (https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.sum) +* [`Cell::get_mut`] + (https://doc.rust-lang.org/std/cell/struct.Cell.html#method.get_mut) +* [`RefCell::get_mut`] + (https://doc.rust-lang.org/std/cell/struct.RefCell.html#method.get_mut) + +Libraries +--------- + +* [The `thread_local!` macro supports multiple definitions in a single + invocation, and can apply attributes] + (https://github.com/rust-lang/rust/pull/34077) +* [`Cow` implements `Default`] + (https://github.com/rust-lang/rust/pull/34305) +* [`Wrapping` implements binary, octal, lower-hex and upper-hex + `Display` formatting] + (https://github.com/rust-lang/rust/pull/34190) +* [The range types implement `Hash`] + (https://github.com/rust-lang/rust/pull/34180) +* [`lookup_host` ignores unknown address types] + (https://github.com/rust-lang/rust/pull/34067) +* [`assert_eq!` accepts a custom error message, like `assert!` does] + (https://github.com/rust-lang/rust/pull/33976) +* [The main thread is now called "main" instead of "<main>"] + (https://github.com/rust-lang/rust/pull/33803) + +Cargo +----- + +* [Disallow specifying features of transitive deps] + (https://github.com/rust-lang/cargo/pull/2821) +* [Add color support for Windows consoles] + (https://github.com/rust-lang/cargo/pull/2804) +* [Fix `harness = false` on `[lib]` sections] + (https://github.com/rust-lang/cargo/pull/2795) +* [Don't panic when `links` contains a '.'] + (https://github.com/rust-lang/cargo/pull/2787) +* [Build scripts can emit warnings] + (https://github.com/rust-lang/cargo/pull/2630), + and `-vv` prints warnings for all crates. +* [Ignore file locks on OS X NFS mounts] + (https://github.com/rust-lang/cargo/pull/2720) +* [Don't warn about `package.metadata` keys] + (https://github.com/rust-lang/cargo/pull/2668). + This provides room for expansion by arbitrary tools. +* [Add support for cdylib crate types] + (https://github.com/rust-lang/cargo/pull/2741) +* [Prevent publishing crates when files are dirty] + (https://github.com/rust-lang/cargo/pull/2781) +* [Don't fetch all crates on clean] + (https://github.com/rust-lang/cargo/pull/2704) +* [Propagate --color option to rustc] + (https://github.com/rust-lang/cargo/pull/2779) +* [Fix `cargo doc --open` on Windows] + (https://github.com/rust-lang/cargo/pull/2780) +* [Improve autocompletion] + (https://github.com/rust-lang/cargo/pull/2772) +* [Configure colors of stderr as well as stdout] + (https://github.com/rust-lang/cargo/pull/2739) + +Performance +----------- + +* [Caching projections speeds up type check dramatically for some + workloads] + (https://github.com/rust-lang/rust/pull/33816) +* [The default `HashMap` hasher is SipHash 1-3 instead of SipHash 2-4] + (https://github.com/rust-lang/rust/pull/33940) + This hasher is faster, but is believed to provide sufficient + protection from collision attacks. +* [Comparison of `Ipv4Addr` is 10x faster] + (https://github.com/rust-lang/rust/pull/33891) + +Rustdoc +------- + +* [Fix empty implementation section on some module pages] + (https://github.com/rust-lang/rust/pull/34536) +* [Fix inlined renamed reexports in import lists] + (https://github.com/rust-lang/rust/pull/34479) +* [Fix search result layout for enum variants and struct fields] + (https://github.com/rust-lang/rust/pull/34477) +* [Fix issues with source links to external crates] + (https://github.com/rust-lang/rust/pull/34387) +* [Fix redirect pages for renamed reexports] + (https://github.com/rust-lang/rust/pull/34245) + +Tooling +------- + +* [rustc is better at finding the MSVC toolchain] + (https://github.com/rust-lang/rust/pull/34492) +* [When emitting debug info, rustc emits frame pointers for closures, + shims and glue, as it does for all other functions] + (https://github.com/rust-lang/rust/pull/33909) +* [rust-lldb warns about unsupported versions of LLDB] + (https://github.com/rust-lang/rust/pull/34646) +* Many more errors have been given error codes and extended + explanations +* API documentation continues to be improved, with many new examples + +Misc +---- + +* [rustc no longer hangs when dependencies recursively re-export + submodules] + (https://github.com/rust-lang/rust/pull/34542) +* [rustc requires LLVM 3.7+] + (https://github.com/rust-lang/rust/pull/34104) +* [The 'How Safe and Unsafe Interact' chapter of The Rustonomicon was + rewritten] + (https://github.com/rust-lang/rust/pull/33895) +* [rustc support 16-bit pointer sizes] + (https://github.com/rust-lang/rust/pull/33460). + No targets use this yet, but it works toward AVR support. + +Compatibility Notes +------------------- + +* [`const`s and `static`s may not have unsized types] + (https://github.com/rust-lang/rust/pull/34443) +* [The new follow-set rules that place restrictions on `macro_rules!` + in order to ensure syntax forward-compatibility have been enabled] + (https://github.com/rust-lang/rust/pull/33982) + This was an [ammendment to RFC 550] + (https://github.com/rust-lang/rfcs/pull/1384), + and has been a warning since 1.10. +* [`cfg` attribute process has been refactored to fix various bugs] + (https://github.com/rust-lang/rust/pull/33706). + This causes breakage in some corner cases. + + +Version 1.10.0 (2016-07-07) +=========================== + +Language +-------- + +* [Allow `concat_idents!` in type positions as well as in expression + positions] + (https://github.com/rust-lang/rust/pull/33735). +* [`Copy` types are required to have a trivial implementation of `Clone`] + (https://github.com/rust-lang/rust/pull/33420). + [RFC 1521](https://github.com/rust-lang/rfcs/blob/master/text/1521-copy-clone-semantics.md). +* [Single-variant enums support the `#[repr(..)]` attribute] + (https://github.com/rust-lang/rust/pull/33355). +* [Fix `#[derive(RustcEncodable)]` in the presence of other `encode` methods] + (https://github.com/rust-lang/rust/pull/32908). +* [`panic!` can be converted to a runtime abort with the + `-C panic=abort` flag] + (https://github.com/rust-lang/rust/pull/32900). + [RFC 1513](https://github.com/rust-lang/rfcs/blob/master/text/1513-less-unwinding.md). +* [Add a new crate type, 'cdylib'] + (https://github.com/rust-lang/rust/pull/33553). + cdylibs are dynamic libraries suitable for loading by non-Rust hosts. + [RFC 1510](https://github.com/rust-lang/rfcs/blob/master/text/1510-rdylib.md). + Note that Cargo does not yet directly support cdylibs. + +Stabilized APIs +--------------- + +* `os::windows::fs::OpenOptionsExt::access_mode` +* `os::windows::fs::OpenOptionsExt::share_mode` +* `os::windows::fs::OpenOptionsExt::custom_flags` +* `os::windows::fs::OpenOptionsExt::attributes` +* `os::windows::fs::OpenOptionsExt::security_qos_flags` +* `os::unix::fs::OpenOptionsExt::custom_flags` +* [`sync::Weak::new`] + (http://doc.rust-lang.org/alloc/arc/struct.Weak.html#method.new) +* `Default for sync::Weak` +* [`panic::set_hook`] + (http://doc.rust-lang.org/std/panic/fn.set_hook.html) +* [`panic::take_hook`] + (http://doc.rust-lang.org/std/panic/fn.take_hook.html) +* [`panic::PanicInfo`] + (http://doc.rust-lang.org/std/panic/struct.PanicInfo.html) +* [`panic::PanicInfo::payload`] + (http://doc.rust-lang.org/std/panic/struct.PanicInfo.html#method.payload) +* [`panic::PanicInfo::location`] + (http://doc.rust-lang.org/std/panic/struct.PanicInfo.html#method.location) +* [`panic::Location`] + (http://doc.rust-lang.org/std/panic/struct.Location.html) +* [`panic::Location::file`] + (http://doc.rust-lang.org/std/panic/struct.Location.html#method.file) +* [`panic::Location::line`] + (http://doc.rust-lang.org/std/panic/struct.Location.html#method.line) +* [`ffi::CStr::from_bytes_with_nul`] + (http://doc.rust-lang.org/std/ffi/struct.CStr.html#method.from_bytes_with_nul) +* [`ffi::CStr::from_bytes_with_nul_unchecked`] + (http://doc.rust-lang.org/std/ffi/struct.CStr.html#method.from_bytes_with_nul_unchecked) +* [`ffi::FromBytesWithNulError`] + (http://doc.rust-lang.org/std/ffi/struct.FromBytesWithNulError.html) +* [`fs::Metadata::modified`] + (http://doc.rust-lang.org/std/fs/struct.Metadata.html#method.modified) +* [`fs::Metadata::accessed`] + (http://doc.rust-lang.org/std/fs/struct.Metadata.html#method.accessed) +* [`fs::Metadata::created`] + (http://doc.rust-lang.org/std/fs/struct.Metadata.html#method.created) +* `sync::atomic::Atomic{Usize,Isize,Bool,Ptr}::compare_exchange` +* `sync::atomic::Atomic{Usize,Isize,Bool,Ptr}::compare_exchange_weak` +* `collections::{btree,hash}_map::{Occupied,Vacant,}Entry::key` +* `os::unix::net::{UnixStream, UnixListener, UnixDatagram, SocketAddr}` +* [`SocketAddr::is_unnamed`] + (http://doc.rust-lang.org/std/os/unix/net/struct.SocketAddr.html#method.is_unnamed) +* [`SocketAddr::as_pathname`] + (http://doc.rust-lang.org/std/os/unix/net/struct.SocketAddr.html#method.as_pathname) +* [`UnixStream::connect`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.connect) +* [`UnixStream::pair`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.pair) +* [`UnixStream::try_clone`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.try_clone) +* [`UnixStream::local_addr`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.local_addr) +* [`UnixStream::peer_addr`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.peer_addr) +* [`UnixStream::set_read_timeout`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.read_timeout) +* [`UnixStream::set_write_timeout`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.write_timeout) +* [`UnixStream::read_timeout`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.read_timeout) +* [`UnixStream::write_timeout`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.write_timeout) +* [`UnixStream::set_nonblocking`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.set_nonblocking) +* [`UnixStream::take_error`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.take_error) +* [`UnixStream::shutdown`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixStream.html#method.shutdown) +* Read/Write/RawFd impls for `UnixStream` +* [`UnixListener::bind`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixListener.html#method.bind) +* [`UnixListener::accept`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixListener.html#method.accept) +* [`UnixListener::try_clone`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixListener.html#method.try_clone) +* [`UnixListener::local_addr`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixListener.html#method.local_addr) +* [`UnixListener::set_nonblocking`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixListener.html#method.set_nonblocking) +* [`UnixListener::take_error`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixListener.html#method.take_error) +* [`UnixListener::incoming`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixListener.html#method.incoming) +* RawFd impls for `UnixListener` +* [`UnixDatagram::bind`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.bind) +* [`UnixDatagram::unbound`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.unbound) +* [`UnixDatagram::pair`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.pair) +* [`UnixDatagram::connect`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.connect) +* [`UnixDatagram::try_clone`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.try_clone) +* [`UnixDatagram::local_addr`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.local_addr) +* [`UnixDatagram::peer_addr`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.peer_addr) +* [`UnixDatagram::recv_from`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.recv_from) +* [`UnixDatagram::recv`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.recv) +* [`UnixDatagram::send_to`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.send_to) +* [`UnixDatagram::send`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.send) +* [`UnixDatagram::set_read_timeout`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.set_read_timeout) +* [`UnixDatagram::set_write_timeout`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.set_write_timeout) +* [`UnixDatagram::read_timeout`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.read_timeout) +* [`UnixDatagram::write_timeout`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.write_timeout) +* [`UnixDatagram::set_nonblocking`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.set_nonblocking) +* [`UnixDatagram::take_error`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.take_error) +* [`UnixDatagram::shutdown`] + (http://doc.rust-lang.org/std/os/unix/net/struct.UnixDatagram.html#method.shutdown) +* RawFd impls for `UnixDatagram` +* `{BTree,Hash}Map::values_mut` +* [`<[_]>::binary_search_by_key`] + (http://doc.rust-lang.org/beta/std/primitive.slice.html#method.binary_search_by_key) + +Libraries +--------- + +* [The `abs_sub` method of floats is deprecated] + (https://github.com/rust-lang/rust/pull/33664). + The semantics of this minor method are subtle and probably not what + most people want. +* [Add implementation of Ord for Cell and RefCell where T: Ord] + (https://github.com/rust-lang/rust/pull/33306). +* [On Linux, if `HashMap`s can't be initialized with `getrandom` they + will fall back to `/dev/urandom` temporarily to avoid blocking + during early boot] + (https://github.com/rust-lang/rust/pull/33086). +* [Implemented negation for wrapping numerals] + (https://github.com/rust-lang/rust/pull/33067). +* [Implement `Clone` for `binary_heap::IntoIter`] + (https://github.com/rust-lang/rust/pull/33050). +* [Implement `Display` and `Hash` for `std::num::Wrapping`] + (https://github.com/rust-lang/rust/pull/33023). +* [Add `Default` implementation for `&CStr`, `CString`] + (https://github.com/rust-lang/rust/pull/32990). +* [Implement `From>` and `Into>` for `VecDeque`] + (https://github.com/rust-lang/rust/pull/32866). +* [Implement `Default` for `UnsafeCell`, `fmt::Error`, `Condvar`, + `Mutex`, `RwLock`] + (https://github.com/rust-lang/rust/pull/32785). + +Cargo +----- +* [Cargo.toml supports the `profile.*.panic` option] + (https://github.com/rust-lang/cargo/pull/2687). + This controls the runtime behavior of the `panic!` macro + and can be either "unwind" (the default), or "abort". + [RFC 1513](https://github.com/rust-lang/rfcs/blob/master/text/1513-less-unwinding.md). +* [Don't throw away errors with `-p` arguments] + (https://github.com/rust-lang/cargo/pull/2723). +* [Report status to stderr instead of stdout] + (https://github.com/rust-lang/cargo/pull/2693). +* [Build scripts are passed a `CARGO_MANIFEST_LINKS` environment + variable that corresponds to the `links` field of the manifest] + (https://github.com/rust-lang/cargo/pull/2710). +* [Ban keywords from crate names] + (https://github.com/rust-lang/cargo/pull/2707). +* [Canonicalize `CARGO_HOME` on Windows] + (https://github.com/rust-lang/cargo/pull/2604). +* [Retry network requests] + (https://github.com/rust-lang/cargo/pull/2396). + By default they are retried twice, which can be customized with the + `net.retry` value in `.cargo/config`. +* [Don't print extra error info for failing subcommands] + (https://github.com/rust-lang/cargo/pull/2674). +* [Add `--force` flag to `cargo install`] + (https://github.com/rust-lang/cargo/pull/2405). +* [Don't use `flock` on NFS mounts] + (https://github.com/rust-lang/cargo/pull/2623). +* [Prefer building `cargo install` artifacts in temporary directories] + (https://github.com/rust-lang/cargo/pull/2610). + Makes it possible to install multiple crates in parallel. +* [Add `cargo test --doc`] + (https://github.com/rust-lang/cargo/pull/2578). +* [Add `cargo --explain`] + (https://github.com/rust-lang/cargo/pull/2551). +* [Don't print warnings when `-q` is passed] + (https://github.com/rust-lang/cargo/pull/2576). +* [Add `cargo doc --lib` and `--bin`] + (https://github.com/rust-lang/cargo/pull/2577). +* [Don't require build script output to be UTF-8] + (https://github.com/rust-lang/cargo/pull/2560). +* [Correctly attempt multiple git usernames] + (https://github.com/rust-lang/cargo/pull/2584). + +Performance +----------- + +* [rustc memory usage was reduced by refactoring the context used for + type checking] + (https://github.com/rust-lang/rust/pull/33425). +* [Speed up creation of `HashMap`s by caching the random keys used + to initialize the hash state] + (https://github.com/rust-lang/rust/pull/33318). +* [The `find` implementation for `Chain` iterators is 2x faster] + (https://github.com/rust-lang/rust/pull/33289). +* [Trait selection optimizations speed up type checking by 15%] + (https://github.com/rust-lang/rust/pull/33138). +* [Efficient trie lookup for boolean Unicode properties] + (https://github.com/rust-lang/rust/pull/33098). + 10x faster than the previous lookup tables. +* [Special case `#[derive(Copy, Clone)]` to avoid bloat] + (https://github.com/rust-lang/rust/pull/31414). + +Usability +--------- + +* Many incremental improvements to documentation and rustdoc. +* [rustdoc: List blanket trait impls] + (https://github.com/rust-lang/rust/pull/33514). +* [rustdoc: Clean up ABI rendering] + (https://github.com/rust-lang/rust/pull/33151). +* [Indexing with the wrong type produces a more informative error] + (https://github.com/rust-lang/rust/pull/33401). +* [Improve diagnostics for constants being used in irrefutable patterns] + (https://github.com/rust-lang/rust/pull/33406). +* [When many method candidates are in scope limit the suggestions to 10] + (https://github.com/rust-lang/rust/pull/33338). +* [Remove confusing suggestion when calling a `fn` type] + (https://github.com/rust-lang/rust/pull/33325). +* [Do not suggest changing `&mut self` to `&mut mut self`] + (https://github.com/rust-lang/rust/pull/33319). + +Misc +---- + +* [Update i686-linux-android features to match Android ABI] + (https://github.com/rust-lang/rust/pull/33651). +* [Update aarch64-linux-android features to match Android ABI] + (https://github.com/rust-lang/rust/pull/33500). +* [`std` no longer prints backtraces on platforms where the running + module must be loaded with `env::current_exe`, which can't be relied + on](https://github.com/rust-lang/rust/pull/33554). +* This release includes std binaries for the i586-unknown-linux-gnu, + i686-unknown-linux-musl, and armv7-linux-androideabi targets. The + i586 target is for old x86 hardware without SSE2, and the armv7 + target is for Android running on modern ARM architectures. +* [The `rust-gdb` and `rust-lldb` scripts are distributed on all + Unix platforms](https://github.com/rust-lang/rust/pull/32835). +* [On Unix the runtime aborts by calling `libc::abort` instead of + generating an illegal instruction] + (https://github.com/rust-lang/rust/pull/31457). +* [Rust is now bootstrapped from the previous release of Rust, + instead of a snapshot from an arbitrary commit] + (https://github.com/rust-lang/rust/pull/32942). + +Compatibility Notes +------------------- + +* [`AtomicBool` is now bool-sized, not word-sized] + (https://github.com/rust-lang/rust/pull/33579). +* [`target_env` for Linux ARM targets is just `gnu`, not + `gnueabihf`, `gnueabi`, etc] + (https://github.com/rust-lang/rust/pull/33403). +* [Consistently panic on overflow in `Duration::new`] + (https://github.com/rust-lang/rust/pull/33072). +* [Change `String::truncate` to panic less] + (https://github.com/rust-lang/rust/pull/32977). +* [Add `:block` to the follow set for `:ty` and `:path`] + (https://github.com/rust-lang/rust/pull/32945). + Affects how macros are parsed. +* [Fix macro hygiene bug] + (https://github.com/rust-lang/rust/pull/32923). +* [Feature-gated attributes on macro-generated macro invocations are + now rejected] + (https://github.com/rust-lang/rust/pull/32791). +* [Suppress fallback and ambiguity errors during type inference] + (https://github.com/rust-lang/rust/pull/32258). + This caused some minor changes to type inference. + + +Version 1.9.0 (2016-05-26) +========================== + +Language +-------- + +* The `#[deprecated]` attribute when applied to an API will generate + warnings when used. The warnings may be suppressed with + `#[allow(deprecated)]`. [RFC 1270]. +* [`fn` item types are zero sized, and each `fn` names a unique + type][1.9fn]. This will break code that transmutes `fn`s, so calling + `transmute` on a `fn` type will generate a warning for a few cycles, + then will be converted to an error. +* [Field and method resolution understand visibility, so private + fields and methods cannot prevent the proper use of public fields + and methods][1.9fv]. +* [The parser considers unicode codepoints in the + `PATTERN_WHITE_SPACE` category to be whitespace][1.9ws]. + +Stabilized APIs +--------------- + +* [`std::panic`] +* [`std::panic::catch_unwind`][] (renamed from `recover`) +* [`std::panic::resume_unwind`][] (renamed from `propagate`) +* [`std::panic::AssertUnwindSafe`][] (renamed from `AssertRecoverSafe`) +* [`std::panic::UnwindSafe`][] (renamed from `RecoverSafe`) +* [`str::is_char_boundary`] +* [`<*const T>::as_ref`] +* [`<*mut T>::as_ref`] +* [`<*mut T>::as_mut`] +* [`AsciiExt::make_ascii_uppercase`] +* [`AsciiExt::make_ascii_lowercase`] +* [`char::decode_utf16`] +* [`char::DecodeUtf16`] +* [`char::DecodeUtf16Error`] +* [`char::DecodeUtf16Error::unpaired_surrogate`] +* [`BTreeSet::take`] +* [`BTreeSet::replace`] +* [`BTreeSet::get`] +* [`HashSet::take`] +* [`HashSet::replace`] +* [`HashSet::get`] +* [`OsString::with_capacity`] +* [`OsString::clear`] +* [`OsString::capacity`] +* [`OsString::reserve`] +* [`OsString::reserve_exact`] +* [`OsStr::is_empty`] +* [`OsStr::len`] +* [`std::os::unix::thread`] +* [`RawPthread`] +* [`JoinHandleExt`] +* [`JoinHandleExt::as_pthread_t`] +* [`JoinHandleExt::into_pthread_t`] +* [`HashSet::hasher`] +* [`HashMap::hasher`] +* [`CommandExt::exec`] +* [`File::try_clone`] +* [`SocketAddr::set_ip`] +* [`SocketAddr::set_port`] +* [`SocketAddrV4::set_ip`] +* [`SocketAddrV4::set_port`] +* [`SocketAddrV6::set_ip`] +* [`SocketAddrV6::set_port`] +* [`SocketAddrV6::set_flowinfo`] +* [`SocketAddrV6::set_scope_id`] +* [`slice::copy_from_slice`] +* [`ptr::read_volatile`] +* [`ptr::write_volatile`] +* [`OpenOptions::create_new`] +* [`TcpStream::set_nodelay`] +* [`TcpStream::nodelay`] +* [`TcpStream::set_ttl`] +* [`TcpStream::ttl`] +* [`TcpStream::set_only_v6`] +* [`TcpStream::only_v6`] +* [`TcpStream::take_error`] +* [`TcpStream::set_nonblocking`] +* [`TcpListener::set_ttl`] +* [`TcpListener::ttl`] +* [`TcpListener::set_only_v6`] +* [`TcpListener::only_v6`] +* [`TcpListener::take_error`] +* [`TcpListener::set_nonblocking`] +* [`UdpSocket::set_broadcast`] +* [`UdpSocket::broadcast`] +* [`UdpSocket::set_multicast_loop_v4`] +* [`UdpSocket::multicast_loop_v4`] +* [`UdpSocket::set_multicast_ttl_v4`] +* [`UdpSocket::multicast_ttl_v4`] +* [`UdpSocket::set_multicast_loop_v6`] +* [`UdpSocket::multicast_loop_v6`] +* [`UdpSocket::set_multicast_ttl_v6`] +* [`UdpSocket::multicast_ttl_v6`] +* [`UdpSocket::set_ttl`] +* [`UdpSocket::ttl`] +* [`UdpSocket::set_only_v6`] +* [`UdpSocket::only_v6`] +* [`UdpSocket::join_multicast_v4`] +* [`UdpSocket::join_multicast_v6`] +* [`UdpSocket::leave_multicast_v4`] +* [`UdpSocket::leave_multicast_v6`] +* [`UdpSocket::take_error`] +* [`UdpSocket::connect`] +* [`UdpSocket::send`] +* [`UdpSocket::recv`] +* [`UdpSocket::set_nonblocking`] + +Libraries +--------- + +* [`std::sync::Once` is poisoned if its initialization function + fails][1.9o]. +* [`cell::Ref` and `cell::RefMut` can contain unsized types][1.9cu]. +* [Most types implement `fmt::Debug`][1.9db]. +* [The default buffer size used by `BufReader` and `BufWriter` was + reduced to 8K, from 64K][1.9bf]. This is in line with the buffer size + used by other languages. +* [`Instant`, `SystemTime` and `Duration` implement `+=` and `-=`. + `Duration` additionally implements `*=` and `/=`][1.9ta]. +* [`Skip` is a `DoubleEndedIterator`][1.9sk]. +* [`From<[u8; 4]>` is implemented for `Ipv4Addr`][1.9fi]. +* [`Chain` implements `BufRead`][1.9ch]. +* [`HashMap`, `HashSet` and iterators are covariant][1.9hc]. + +Cargo +----- + +* [Cargo can now run concurrently][1.9cc]. +* [Top-level overrides allow specific revisions of crates to be + overridden through the entire crate graph][1.9ct]. This is intended + to make upgrades easier for large projects, by allowing crates to be + forked temporarily until they've been upgraded and republished. +* [Cargo exports a `CARGO_PKG_AUTHORS` environment variable][1.9cp]. +* [Cargo will pass the contents of the `RUSTFLAGS` variable to `rustc` + on the commandline][1.9cf]. `rustc` arguments can also be specified + in the `build.rustflags` configuration key. + +Performance +----------- + +* [The time complexity of comparing variables for equivalence during type + unification is reduced from _O_(_n_!) to _O_(_n_)][1.9tu]. This leads + to major compilation time improvement in some scenarios. +* [`ToString` is specialized for `str`, giving it the same performance + as `to_owned`][1.9ts]. +* [Spawning processes with `Command::output` no longer creates extra + threads][1.9sp]. +* [`#[derive(PartialEq)]` and `#[derive(PartialOrd)]` emit less code + for C-like enums][1.9cl]. + +Misc +---- + +* [Passing the `--quiet` flag to a test runner will produce + much-abbreviated output][1.9q]. +* The Rust Project now publishes std binaries for the + `mips-unknown-linux-musl`, `mipsel-unknown-linux-musl`, and + `i586-pc-windows-msvc` targets. + +Compatibility Notes +------------------- + +* [`std::sync::Once` is poisoned if its initialization function + fails][1.9o]. +* [It is illegal to define methods with the same name in overlapping + inherent `impl` blocks][1.9sn]. +* [`fn` item types are zero sized, and each `fn` names a unique + type][1.9fn]. This will break code that transmutes `fn`s, so calling + `transmute` on a `fn` type will generate a warning for a few cycles, + then will be converted to an error. +* [Improvements to const evaluation may trigger new errors when integer + literals are out of range][1.9ce]. + + +[1.9bf]: https://github.com/rust-lang/rust/pull/32695 +[1.9cc]: https://github.com/rust-lang/cargo/pull/2486 +[1.9ce]: https://github.com/rust-lang/rust/pull/30587 +[1.9cf]: https://github.com/rust-lang/cargo/pull/2241 +[1.9ch]: https://github.com/rust-lang/rust/pull/32541 +[1.9cl]: https://github.com/rust-lang/rust/pull/31977 +[1.9cp]: https://github.com/rust-lang/cargo/pull/2465 +[1.9ct]: https://github.com/rust-lang/cargo/pull/2385 +[1.9cu]: https://github.com/rust-lang/rust/pull/32652 +[1.9db]: https://github.com/rust-lang/rust/pull/32054 +[1.9fi]: https://github.com/rust-lang/rust/pull/32050 +[1.9fn]: https://github.com/rust-lang/rust/pull/31710 +[1.9fv]: https://github.com/rust-lang/rust/pull/31938 +[1.9hc]: https://github.com/rust-lang/rust/pull/32635 +[1.9o]: https://github.com/rust-lang/rust/pull/32325 +[1.9q]: https://github.com/rust-lang/rust/pull/31887 +[1.9sk]: https://github.com/rust-lang/rust/pull/31700 +[1.9sn]: https://github.com/rust-lang/rust/pull/31925 +[1.9sp]: https://github.com/rust-lang/rust/pull/31618 +[1.9ta]: https://github.com/rust-lang/rust/pull/32448 +[1.9ts]: https://github.com/rust-lang/rust/pull/32586 +[1.9tu]: https://github.com/rust-lang/rust/pull/32062 +[1.9ws]: https://github.com/rust-lang/rust/pull/29734 +[RFC 1270]: https://github.com/rust-lang/rfcs/blob/master/text/1270-deprecation.md +[`<*const T>::as_ref`]: http://doc.rust-lang.org/nightly/std/primitive.pointer.html#method.as_ref +[`<*mut T>::as_mut`]: http://doc.rust-lang.org/nightly/std/primitive.pointer.html#method.as_mut +[`<*mut T>::as_ref`]: http://doc.rust-lang.org/nightly/std/primitive.pointer.html#method.as_ref +[`slice::copy_from_slice`]: http://doc.rust-lang.org/nightly/std/primitive.slice.html#method.copy_from_slice +[`AsciiExt::make_ascii_lowercase`]: http://doc.rust-lang.org/nightly/std/ascii/trait.AsciiExt.html#tymethod.make_ascii_lowercase +[`AsciiExt::make_ascii_uppercase`]: http://doc.rust-lang.org/nightly/std/ascii/trait.AsciiExt.html#tymethod.make_ascii_uppercase +[`BTreeSet::get`]: http://doc.rust-lang.org/nightly/collections/btree/set/struct.BTreeSet.html#method.get +[`BTreeSet::replace`]: http://doc.rust-lang.org/nightly/collections/btree/set/struct.BTreeSet.html#method.replace +[`BTreeSet::take`]: http://doc.rust-lang.org/nightly/collections/btree/set/struct.BTreeSet.html#method.take +[`CommandExt::exec`]: http://doc.rust-lang.org/nightly/std/os/unix/process/trait.CommandExt.html#tymethod.exec +[`File::try_clone`]: http://doc.rust-lang.org/nightly/std/fs/struct.File.html#method.try_clone +[`HashMap::hasher`]: http://doc.rust-lang.org/nightly/std/collections/struct.HashMap.html#method.hasher +[`HashSet::get`]: http://doc.rust-lang.org/nightly/std/collections/struct.HashSet.html#method.get +[`HashSet::hasher`]: http://doc.rust-lang.org/nightly/std/collections/struct.HashSet.html#method.hasher +[`HashSet::replace`]: http://doc.rust-lang.org/nightly/std/collections/struct.HashSet.html#method.replace +[`HashSet::take`]: http://doc.rust-lang.org/nightly/std/collections/struct.HashSet.html#method.take +[`JoinHandleExt::as_pthread_t`]: http://doc.rust-lang.org/nightly/std/os/unix/thread/trait.JoinHandleExt.html#tymethod.as_pthread_t +[`JoinHandleExt::into_pthread_t`]: http://doc.rust-lang.org/nightly/std/os/unix/thread/trait.JoinHandleExt.html#tymethod.into_pthread_t +[`JoinHandleExt`]: http://doc.rust-lang.org/nightly/std/os/unix/thread/trait.JoinHandleExt.html +[`OpenOptions::create_new`]: http://doc.rust-lang.org/nightly/std/fs/struct.OpenOptions.html#method.create_new +[`OsStr::is_empty`]: http://doc.rust-lang.org/nightly/std/ffi/struct.OsStr.html#method.is_empty +[`OsStr::len`]: http://doc.rust-lang.org/nightly/std/ffi/struct.OsStr.html#method.len +[`OsString::capacity`]: http://doc.rust-lang.org/nightly/std/ffi/struct.OsString.html#method.capacity +[`OsString::clear`]: http://doc.rust-lang.org/nightly/std/ffi/struct.OsString.html#method.clear +[`OsString::reserve_exact`]: http://doc.rust-lang.org/nightly/std/ffi/struct.OsString.html#method.reserve_exact +[`OsString::reserve`]: http://doc.rust-lang.org/nightly/std/ffi/struct.OsString.html#method.reserve +[`OsString::with_capacity`]: http://doc.rust-lang.org/nightly/std/ffi/struct.OsString.html#method.with_capacity +[`RawPthread`]: http://doc.rust-lang.org/nightly/std/os/unix/thread/type.RawPthread.html +[`SocketAddr::set_ip`]: http://doc.rust-lang.org/nightly/std/net/enum.SocketAddr.html#method.set_ip +[`SocketAddr::set_port`]: http://doc.rust-lang.org/nightly/std/net/enum.SocketAddr.html#method.set_port +[`SocketAddrV4::set_ip`]: http://doc.rust-lang.org/nightly/std/net/struct.SocketAddrV4.html#method.set_ip +[`SocketAddrV4::set_port`]: http://doc.rust-lang.org/nightly/std/net/struct.SocketAddrV4.html#method.set_port +[`SocketAddrV6::set_flowinfo`]: http://doc.rust-lang.org/nightly/std/net/struct.SocketAddrV6.html#method.set_flowinfo +[`SocketAddrV6::set_ip`]: http://doc.rust-lang.org/nightly/std/net/struct.SocketAddrV6.html#method.set_ip +[`SocketAddrV6::set_port`]: http://doc.rust-lang.org/nightly/std/net/struct.SocketAddrV6.html#method.set_port +[`SocketAddrV6::set_scope_id`]: http://doc.rust-lang.org/nightly/std/net/struct.SocketAddrV6.html#method.set_scope_id +[`TcpListener::only_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.only_v6 +[`TcpListener::set_nonblocking`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.set_nonblocking +[`TcpListener::set_only_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.set_only_v6 +[`TcpListener::set_ttl`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.set_ttl +[`TcpListener::take_error`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.take_error +[`TcpListener::ttl`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.ttl +[`TcpStream::nodelay`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.nodelay +[`TcpStream::only_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.only_v6 +[`TcpStream::set_nodelay`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.set_nodelay +[`TcpStream::set_nonblocking`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.set_nonblocking +[`TcpStream::set_only_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.set_only_v6 +[`TcpStream::set_ttl`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.set_ttl +[`TcpStream::take_error`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.take_error +[`TcpStream::ttl`]: http://doc.rust-lang.org/nightly/std/net/struct.TcpStream.html#method.ttl +[`UdpSocket::broadcast`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.broadcast +[`UdpSocket::connect`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.connect +[`UdpSocket::join_multicast_v4`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.join_multicast_v4 +[`UdpSocket::join_multicast_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.join_multicast_v6 +[`UdpSocket::leave_multicast_v4`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.leave_multicast_v4 +[`UdpSocket::leave_multicast_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.leave_multicast_v6 +[`UdpSocket::multicast_loop_v4`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.multicast_loop_v4 +[`UdpSocket::multicast_loop_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.multicast_loop_v6 +[`UdpSocket::multicast_ttl_v4`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.multicast_ttl_v4 +[`UdpSocket::multicast_ttl_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.multicast_ttl_v6 +[`UdpSocket::only_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.only_v6 +[`UdpSocket::recv`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.recv +[`UdpSocket::send`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.send +[`UdpSocket::set_broadcast`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.set_broadcast +[`UdpSocket::set_multicast_loop_v4`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.set_multicast_loop_v4 +[`UdpSocket::set_multicast_loop_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.set_multicast_loop_v6 +[`UdpSocket::set_multicast_ttl_v4`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.set_multicast_ttl_v4 +[`UdpSocket::set_multicast_ttl_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.set_multicast_ttl_v6 +[`UdpSocket::set_nonblocking`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.set_nonblocking +[`UdpSocket::set_only_v6`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.set_only_v6 +[`UdpSocket::set_ttl`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.set_ttl +[`UdpSocket::take_error`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.take_error +[`UdpSocket::ttl`]: http://doc.rust-lang.org/nightly/std/net/struct.UdpSocket.html#method.ttl +[`char::DecodeUtf16Error::unpaired_surrogate`]: http://doc.rust-lang.org/nightly/std/char/struct.DecodeUtf16Error.html#method.unpaired_surrogate +[`char::DecodeUtf16Error`]: http://doc.rust-lang.org/nightly/std/char/struct.DecodeUtf16Error.html +[`char::DecodeUtf16`]: http://doc.rust-lang.org/nightly/std/char/struct.DecodeUtf16.html +[`char::decode_utf16`]: http://doc.rust-lang.org/nightly/std/char/fn.decode_utf16.html +[`ptr::read_volatile`]: http://doc.rust-lang.org/nightly/std/ptr/fn.read_volatile.html +[`ptr::write_volatile`]: http://doc.rust-lang.org/nightly/std/ptr/fn.write_volatile.html +[`std::os::unix::thread`]: http://doc.rust-lang.org/nightly/std/os/unix/thread/index.html +[`std::panic::AssertUnwindSafe`]: http://doc.rust-lang.org/nightly/std/panic/struct.AssertUnwindSafe.html +[`std::panic::UnwindSafe`]: http://doc.rust-lang.org/nightly/std/panic/trait.UnwindSafe.html +[`std::panic::catch_unwind`]: http://doc.rust-lang.org/nightly/std/panic/fn.catch_unwind.html +[`std::panic::resume_unwind`]: http://doc.rust-lang.org/nightly/std/panic/fn.resume_unwind.html +[`std::panic`]: http://doc.rust-lang.org/nightly/std/panic/index.html +[`str::is_char_boundary`]: http://doc.rust-lang.org/nightly/std/primitive.str.html#method.is_char_boundary + + +Version 1.8.0 (2016-04-14) +========================== + +Language +-------- + +* Rust supports overloading of compound assignment statements like + `+=` by implementing the [`AddAssign`], [`SubAssign`], + [`MulAssign`], [`DivAssign`], [`RemAssign`], [`BitAndAssign`], + [`BitOrAssign`], [`BitXorAssign`], [`ShlAssign`], or [`ShrAssign`] + traits. [RFC 953]. +* Empty structs can be defined with braces, as in `struct Foo { }`, in + addition to the non-braced form, `struct Foo;`. [RFC 218]. + +Libraries +--------- + +* Stabilized APIs: + * [`str::encode_utf16`][] (renamed from `utf16_units`) + * [`str::EncodeUtf16`][] (renamed from `Utf16Units`) + * [`Ref::map`] + * [`RefMut::map`] + * [`ptr::drop_in_place`] + * [`time::Instant`] + * [`time::SystemTime`] + * [`Instant::now`] + * [`Instant::duration_since`][] (renamed from `duration_from_earlier`) + * [`Instant::elapsed`] + * [`SystemTime::now`] + * [`SystemTime::duration_since`][] (renamed from `duration_from_earlier`) + * [`SystemTime::elapsed`] + * Various `Add`/`Sub` impls for `Time` and `SystemTime` + * [`SystemTimeError`] + * [`SystemTimeError::duration`] + * Various impls for `SystemTimeError` + * [`UNIX_EPOCH`] + * [`AddAssign`], [`SubAssign`], [`MulAssign`], [`DivAssign`], + [`RemAssign`], [`BitAndAssign`], [`BitOrAssign`], + [`BitXorAssign`], [`ShlAssign`], [`ShrAssign`]. +* [The `write!` and `writeln!` macros correctly emit errors if any of + their arguments can't be formatted][1.8w]. +* [Various I/O functions support large files on 32-bit Linux][1.8l]. +* [The Unix-specific `raw` modules, which contain a number of + redefined C types are deprecated][1.8r], including `os::raw::unix`, + `os::raw::macos`, and `os::raw::linux`. These modules defined types + such as `ino_t` and `dev_t`. The inconsistency of these definitions + across platforms was making it difficult to implement `std` + correctly. Those that need these definitions should use the `libc` + crate. [RFC 1415]. +* The Unix-specific `MetadataExt` traits, including + `os::unix::fs::MetadataExt`, which expose values such as inode + numbers [no longer return platform-specific types][1.8r], but + instead return widened integers. [RFC 1415]. +* [`btree_set::{IntoIter, Iter, Range}` are covariant][1.8cv]. +* [Atomic loads and stores are not volatile][1.8a]. +* [All types in `sync::mpsc` implement `fmt::Debug`][1.8mp]. + +Performance +----------- + +* [Inlining hash functions lead to a 3% compile-time improvement in + some workloads][1.8h]. +* When using jemalloc, its symbols are [unprefixed so that it + overrides the libc malloc implementation][1.8h]. This means that for + rustc, LLVM is now using jemalloc, which results in a 6% + compile-time improvement on a specific workload. +* [Avoid quadratic growth in function size due to cleanups][1.8cu]. + +Misc +---- + +* [32-bit MSVC builds finally implement unwinding][1.8ms]. + i686-pc-windows-msvc is now considered a tier-1 platform. +* [The `--print targets` flag prints a list of supported targets][1.8t]. +* [The `--print cfg` flag prints the `cfg`s defined for the current + target][1.8cf]. +* [`rustc` can be built with an new Cargo-based build system, written + in Rust][1.8b]. It will eventually replace Rust's Makefile-based + build system. To enable it configure with `configure --rustbuild`. +* [Errors for non-exhaustive `match` patterns now list up to 3 missing + variants while also indicating the total number of missing variants + if more than 3][1.8m]. +* [Executable stacks are disabled on Linux and BSD][1.8nx]. +* The Rust Project now publishes binary releases of the standard + library for a number of tier-2 targets: + `armv7-unknown-linux-gnueabihf`, `powerpc-unknown-linux-gnu`, + `powerpc64-unknown-linux-gnu`, `powerpc64le-unknown-linux-gnu` + `x86_64-rumprun-netbsd`. These can be installed with + tools such as [multirust][1.8mr]. + +Cargo +----- + +* [`cargo init` creates a new Cargo project in the current + directory][1.8ci]. It is otherwise like `cargo new`. +* [Cargo has configuration keys for `-v` and + `--color`][1.8cc]. `verbose` and `color`, respectively, go in the + `[term]` section of `.cargo/config`. +* [Configuration keys that evaluate to strings or integers can be set + via environment variables][1.8ce]. For example the `build.jobs` key + can be set via `CARGO_BUILD_JOBS`. Environment variables take + precedence over config files. +* [Target-specific dependencies support Rust `cfg` syntax for + describing targets][1.8cfg] so that dependencies for multiple + targets can be specified together. [RFC 1361]. +* [The environment variables `CARGO_TARGET_ROOT`, `RUSTC`, and + `RUSTDOC` take precedence over the `build.target-dir`, + `build.rustc`, and `build.rustdoc` configuration values][1.8cv]. +* [The child process tree is killed on Windows when Cargo is + killed][1.8ck]. +* [The `build.target` configuration value sets the target platform, + like `--target`][1.8ct]. + +Compatibility Notes +------------------- + +* [Unstable compiler flags have been further restricted][1.8u]. Since + 1.0 `-Z` flags have been considered unstable, and other flags that + were considered unstable additionally required passing `-Z + unstable-options` to access. Unlike unstable language and library + features though, these options have been accessible on the stable + release channel. Going forward, *new unstable flags will not be + available on the stable release channel*, and old unstable flags + will warn about their usage. In the future, all unstable flags will + be unavailable on the stable release channel. +* [It is no longer possible to `match` on empty enum variants using + the `Variant(..)` syntax][1.8v]. This has been a warning since 1.6. +* The Unix-specific `MetadataExt` traits, including + `os::unix::fs::MetadataExt`, which expose values such as inode + numbers [no longer return platform-specific types][1.8r], but + instead return widened integers. [RFC 1415]. +* [Modules sourced from the filesystem cannot appear within arbitrary + blocks, but only within other modules][1.8mf]. +* [`--cfg` compiler flags are parsed strictly as identifiers][1.8c]. +* On Unix, [stack overflow triggers a runtime abort instead of a + SIGSEGV][1.8so]. +* [`Command::spawn` and its equivalents return an error if any of + its command-line arguments contain interior `NUL`s][1.8n]. +* [Tuple and unit enum variants from other crates are in the type + namespace][1.8tn]. +* [On Windows `rustc` emits `.lib` files for the `staticlib` library + type instead of `.a` files][1.8st]. Additionally, for the MSVC + toolchain, `rustc` emits import libraries named `foo.dll.lib` + instead of `foo.lib`. + + +[1.8a]: https://github.com/rust-lang/rust/pull/30962 +[1.8b]: https://github.com/rust-lang/rust/pull/31123 +[1.8c]: https://github.com/rust-lang/rust/pull/31530 +[1.8cc]: https://github.com/rust-lang/cargo/pull/2397 +[1.8ce]: https://github.com/rust-lang/cargo/pull/2398 +[1.8cf]: https://github.com/rust-lang/rust/pull/31278 +[1.8cfg]: https://github.com/rust-lang/cargo/pull/2328 +[1.8ci]: https://github.com/rust-lang/cargo/pull/2081 +[1.8ck]: https://github.com/rust-lang/cargo/pull/2370 +[1.8ct]: https://github.com/rust-lang/cargo/pull/2335 +[1.8cu]: https://github.com/rust-lang/rust/pull/31390 +[1.8cv]: https://github.com/rust-lang/cargo/issues/2365 +[1.8cv]: https://github.com/rust-lang/rust/pull/30998 +[1.8h]: https://github.com/rust-lang/rust/pull/31460 +[1.8l]: https://github.com/rust-lang/rust/pull/31668 +[1.8m]: https://github.com/rust-lang/rust/pull/31020 +[1.8mf]: https://github.com/rust-lang/rust/pull/31534 +[1.8mp]: https://github.com/rust-lang/rust/pull/30894 +[1.8mr]: https://users.rust-lang.org/t/multirust-0-8-with-cross-std-installation/4901 +[1.8ms]: https://github.com/rust-lang/rust/pull/30448 +[1.8n]: https://github.com/rust-lang/rust/pull/31056 +[1.8nx]: https://github.com/rust-lang/rust/pull/30859 +[1.8r]: https://github.com/rust-lang/rust/pull/31551 +[1.8so]: https://github.com/rust-lang/rust/pull/31333 +[1.8st]: https://github.com/rust-lang/rust/pull/29520 +[1.8t]: https://github.com/rust-lang/rust/pull/31358 +[1.8tn]: https://github.com/rust-lang/rust/pull/30882 +[1.8u]: https://github.com/rust-lang/rust/pull/31793 +[1.8v]: https://github.com/rust-lang/rust/pull/31757 +[1.8w]: https://github.com/rust-lang/rust/pull/31904 +[RFC 1361]: https://github.com/rust-lang/rfcs/blob/master/text/1361-cargo-cfg-dependencies.md +[RFC 1415]: https://github.com/rust-lang/rfcs/blob/master/text/1415-trim-std-os.md +[RFC 218]: https://github.com/rust-lang/rfcs/blob/master/text/0218-empty-struct-with-braces.md +[RFC 953]: https://github.com/rust-lang/rfcs/blob/master/text/0953-op-assign.md +[`AddAssign`]: http://doc.rust-lang.org/nightly/std/ops/trait.AddAssign.html +[`BitAndAssign`]: http://doc.rust-lang.org/nightly/std/ops/trait.BitAndAssign.html +[`BitOrAssign`]: http://doc.rust-lang.org/nightly/std/ops/trait.BitOrAssign.html +[`BitXorAssign`]: http://doc.rust-lang.org/nightly/std/ops/trait.BitXorAssign.html +[`DivAssign`]: http://doc.rust-lang.org/nightly/std/ops/trait.DivAssign.html +[`Instant::duration_since`]: http://doc.rust-lang.org/nightly/std/time/struct.Instant.html#method.duration_since +[`Instant::elapsed`]: http://doc.rust-lang.org/nightly/std/time/struct.Instant.html#method.elapsed +[`Instant::now`]: http://doc.rust-lang.org/nightly/std/time/struct.Instant.html#method.now +[`MulAssign`]: http://doc.rust-lang.org/nightly/std/ops/trait.MulAssign.html +[`Ref::map`]: http://doc.rust-lang.org/nightly/std/cell/struct.Ref.html#method.map +[`RefMut::map`]: http://doc.rust-lang.org/nightly/std/cell/struct.RefMut.html#method.map +[`RemAssign`]: http://doc.rust-lang.org/nightly/std/ops/trait.RemAssign.html +[`ShlAssign`]: http://doc.rust-lang.org/nightly/std/ops/trait.ShlAssign.html +[`ShrAssign`]: http://doc.rust-lang.org/nightly/std/ops/trait.ShrAssign.html +[`SubAssign`]: http://doc.rust-lang.org/nightly/std/ops/trait.SubAssign.html +[`SystemTime::duration_since`]: http://doc.rust-lang.org/nightly/std/time/struct.SystemTime.html#method.duration_since +[`SystemTime::elapsed`]: http://doc.rust-lang.org/nightly/std/time/struct.SystemTime.html#method.elapsed +[`SystemTime::now`]: http://doc.rust-lang.org/nightly/std/time/struct.SystemTime.html#method.now +[`SystemTimeError::duration`]: http://doc.rust-lang.org/nightly/std/time/struct.SystemTimeError.html#method.duration +[`SystemTimeError`]: http://doc.rust-lang.org/nightly/std/time/struct.SystemTimeError.html +[`UNIX_EPOCH`]: http://doc.rust-lang.org/nightly/std/time/constant.UNIX_EPOCH.html +[`ptr::drop_in_place`]: http://doc.rust-lang.org/nightly/std/ptr/fn.drop_in_place.html +[`str::EncodeUtf16`]: http://doc.rust-lang.org/nightly/std/str/struct.EncodeUtf16.html +[`str::encode_utf16`]: http://doc.rust-lang.org/nightly/std/primitive.str.html#method.encode_utf16 +[`time::Instant`]: http://doc.rust-lang.org/nightly/std/time/struct.Instant.html +[`time::SystemTime`]: http://doc.rust-lang.org/nightly/std/time/struct.SystemTime.html + + +Version 1.7.0 (2016-03-03) +========================== + +Libraries +--------- + +* Stabilized APIs + * `Path` + * [`Path::strip_prefix`][] (renamed from relative_from) + * [`path::StripPrefixError`][] (new error type returned from strip_prefix) + * `Ipv4Addr` + * [`Ipv4Addr::is_loopback`] + * [`Ipv4Addr::is_private`] + * [`Ipv4Addr::is_link_local`] + * [`Ipv4Addr::is_multicast`] + * [`Ipv4Addr::is_broadcast`] + * [`Ipv4Addr::is_documentation`] + * `Ipv6Addr` + * [`Ipv6Addr::is_unspecified`] + * [`Ipv6Addr::is_loopback`] + * [`Ipv6Addr::is_multicast`] + * `Vec` + * [`Vec::as_slice`] + * [`Vec::as_mut_slice`] + * `String` + * [`String::as_str`] + * [`String::as_mut_str`] + * Slices + * `<[T]>::`[`clone_from_slice`], which now requires the two slices to + be the same length + * `<[T]>::`[`sort_by_key`] + * checked, saturated, and overflowing operations + * [`i32::checked_rem`], [`i32::checked_neg`], [`i32::checked_shl`], [`i32::checked_shr`] + * [`i32::saturating_mul`] + * [`i32::overflowing_add`], [`i32::overflowing_sub`], [`i32::overflowing_mul`], [`i32::overflowing_div`] + * [`i32::overflowing_rem`], [`i32::overflowing_neg`], [`i32::overflowing_shl`], [`i32::overflowing_shr`] + * [`u32::checked_rem`], [`u32::checked_neg`], [`u32::checked_shl`], [`u32::checked_shl`] + * [`u32::saturating_mul`] + * [`u32::overflowing_add`], [`u32::overflowing_sub`], [`u32::overflowing_mul`], [`u32::overflowing_div`] + * [`u32::overflowing_rem`], [`u32::overflowing_neg`], [`u32::overflowing_shl`], [`u32::overflowing_shr`] + * and checked, saturated, and overflowing operations for other primitive types + * FFI + * [`ffi::IntoStringError`] + * [`CString::into_string`] + * [`CString::into_bytes`] + * [`CString::into_bytes_with_nul`] + * `From for Vec` + * `IntoStringError` + * [`IntoStringError::into_cstring`] + * [`IntoStringError::utf8_error`] + * `Error for IntoStringError` + * Hashing + * [`std::hash::BuildHasher`] + * [`BuildHasher::Hasher`] + * [`BuildHasher::build_hasher`] + * [`std::hash::BuildHasherDefault`] + * [`HashMap::with_hasher`] + * [`HashMap::with_capacity_and_hasher`] + * [`HashSet::with_hasher`] + * [`HashSet::with_capacity_and_hasher`] + * [`std::collections::hash_map::RandomState`] + * [`RandomState::new`] +* [Validating UTF-8 is faster by a factor of between 7 and 14x for + ASCII input][1.7utf8]. This means that creating `String`s and `str`s + from bytes is faster. +* [The performance of `LineWriter` (and thus `io::stdout`) was + improved by using `memchr` to search for newlines][1.7m]. +* [`f32::to_degrees` and `f32::to_radians` are stable][1.7f]. The + `f64` variants were stabilized previously. +* [`BTreeMap` was rewritten to use less memory and improve the performance + of insertion and iteration, the latter by as much as 5x][1.7bm]. +* [`BTreeSet` and its iterators, `Iter`, `IntoIter`, and `Range` are + covariant over their contained type][1.7bt]. +* [`LinkedList` and its iterators, `Iter` and `IntoIter` are covariant + over their contained type][1.7ll]. +* [`str::replace` now accepts a `Pattern`][1.7rp], like other string + searching methods. +* [`Any` is implemented for unsized types][1.7a]. +* [`Hash` is implemented for `Duration`][1.7h]. + +Misc +---- + +* [When running tests with `--test`, rustdoc will pass `--cfg` + arguments to the compiler][1.7dt]. +* [The compiler is built with RPATH information by default][1.7rpa]. + This means that it will be possible to run `rustc` when installed in + unusual configurations without configuring the dynamic linker search + path explicitly. +* [`rustc` passes `--enable-new-dtags` to GNU ld][1.7dta]. This makes + any RPATH entries (emitted with `-C rpath`) *not* take precedence + over `LD_LIBRARY_PATH`. + +Cargo +----- + +* [`cargo rustc` accepts a `--profile` flag that runs `rustc` under + any of the compilation profiles, 'dev', 'bench', or 'test'][1.7cp]. +* [The `rerun-if-changed` build script directive no longer causes the + build script to incorrectly run twice in certain scenarios][1.7rr]. + +Compatibility Notes +------------------- + +* Soundness fixes to the interactions between associated types and + lifetimes, specified in [RFC 1214], [now generate errors][1.7sf] for + code that violates the new rules. This is a significant change that + is known to break existing code, so it has emitted warnings for the + new error cases since 1.4 to give crate authors time to adapt. The + details of what is changing are subtle; read the RFC for more. +* [Several bugs in the compiler's visibility calculations were + fixed][1.7v]. Since this was found to break significant amounts of + code, the new errors will be emitted as warnings for several release + cycles, under the `private_in_public` lint. +* Defaulted type parameters were accidentally accepted in positions + that were not intended. In this release, [defaulted type parameters + appearing outside of type definitions will generate a + warning][1.7d], which will become an error in future releases. +* [Parsing "." as a float results in an error instead of 0][1.7p]. + That is, `".".parse::()` returns `Err`, not `Ok(0.0)`. +* [Borrows of closure parameters may not outlive the closure][1.7bc]. + +[1.7a]: https://github.com/rust-lang/rust/pull/30928 +[1.7bc]: https://github.com/rust-lang/rust/pull/30341 +[1.7bm]: https://github.com/rust-lang/rust/pull/30426 +[1.7bt]: https://github.com/rust-lang/rust/pull/30998 +[1.7cp]: https://github.com/rust-lang/cargo/pull/2224 +[1.7d]: https://github.com/rust-lang/rust/pull/30724 +[1.7dt]: https://github.com/rust-lang/rust/pull/30372 +[1.7dta]: https://github.com/rust-lang/rust/pull/30394 +[1.7f]: https://github.com/rust-lang/rust/pull/30672 +[1.7h]: https://github.com/rust-lang/rust/pull/30818 +[1.7ll]: https://github.com/rust-lang/rust/pull/30663 +[1.7m]: https://github.com/rust-lang/rust/pull/30381 +[1.7p]: https://github.com/rust-lang/rust/pull/30681 +[1.7rp]: https://github.com/rust-lang/rust/pull/29498 +[1.7rpa]: https://github.com/rust-lang/rust/pull/30353 +[1.7rr]: https://github.com/rust-lang/cargo/pull/2279 +[1.7sf]: https://github.com/rust-lang/rust/pull/30389 +[1.7utf8]: https://github.com/rust-lang/rust/pull/30740 +[1.7v]: https://github.com/rust-lang/rust/pull/29973 +[RFC 1214]: https://github.com/rust-lang/rfcs/blob/master/text/1214-projections-lifetimes-and-wf.md +[`BuildHasher::Hasher`]: http://doc.rust-lang.org/nightly/std/hash/trait.Hasher.html +[`BuildHasher::build_hasher`]: http://doc.rust-lang.org/nightly/std/hash/trait.BuildHasher.html#tymethod.build_hasher +[`CString::into_bytes_with_nul`]: http://doc.rust-lang.org/nightly/std/ffi/struct.CString.html#method.into_bytes_with_nul +[`CString::into_bytes`]: http://doc.rust-lang.org/nightly/std/ffi/struct.CString.html#method.into_bytes +[`CString::into_string`]: http://doc.rust-lang.org/nightly/std/ffi/struct.CString.html#method.into_string +[`HashMap::with_capacity_and_hasher`]: http://doc.rust-lang.org/nightly/std/collections/struct.HashMap.html#method.with_capacity_and_hasher +[`HashMap::with_hasher`]: http://doc.rust-lang.org/nightly/std/collections/struct.HashMap.html#method.with_hasher +[`HashSet::with_capacity_and_hasher`]: http://doc.rust-lang.org/nightly/std/collections/struct.HashSet.html#method.with_capacity_and_hasher +[`HashSet::with_hasher`]: http://doc.rust-lang.org/nightly/std/collections/struct.HashSet.html#method.with_hasher +[`IntoStringError::into_cstring`]: http://doc.rust-lang.org/nightly/std/ffi/struct.IntoStringError.html#method.into_cstring +[`IntoStringError::utf8_error`]: http://doc.rust-lang.org/nightly/std/ffi/struct.IntoStringError.html#method.utf8_error +[`Ipv4Addr::is_broadcast`]: http://doc.rust-lang.org/nightly/std/net/struct.Ipv4Addr.html#method.is_broadcast +[`Ipv4Addr::is_documentation`]: http://doc.rust-lang.org/nightly/std/net/struct.Ipv4Addr.html#method.is_documentation +[`Ipv4Addr::is_link_local`]: http://doc.rust-lang.org/nightly/std/net/struct.Ipv4Addr.html#method.is_link_local +[`Ipv4Addr::is_loopback`]: http://doc.rust-lang.org/nightly/std/net/struct.Ipv4Addr.html#method.is_loopback +[`Ipv4Addr::is_multicast`]: http://doc.rust-lang.org/nightly/std/net/struct.Ipv4Addr.html#method.is_multicast +[`Ipv4Addr::is_private`]: http://doc.rust-lang.org/nightly/std/net/struct.Ipv4Addr.html#method.is_private +[`Ipv6Addr::is_loopback`]: http://doc.rust-lang.org/nightly/std/net/struct.Ipv6Addr.html#method.is_loopback +[`Ipv6Addr::is_multicast`]: http://doc.rust-lang.org/nightly/std/net/struct.Ipv6Addr.html#method.is_multicast +[`Ipv6Addr::is_unspecified`]: http://doc.rust-lang.org/nightly/std/net/struct.Ipv6Addr.html#method.is_unspecified +[`Path::strip_prefix`]: http://doc.rust-lang.org/nightly/std/path/struct.Path.html#method.strip_prefix +[`RandomState::new`]: http://doc.rust-lang.org/nightly/std/collections/hash_map/struct.RandomState.html#method.new +[`String::as_mut_str`]: http://doc.rust-lang.org/nightly/std/string/struct.String.html#method.as_mut_str +[`String::as_str`]: http://doc.rust-lang.org/nightly/std/string/struct.String.html#method.as_str +[`Vec::as_mut_slice`]: http://doc.rust-lang.org/nightly/std/vec/struct.Vec.html#method.as_mut_slice +[`Vec::as_slice`]: http://doc.rust-lang.org/nightly/std/vec/struct.Vec.html#method.as_slice +[`clone_from_slice`]: http://doc.rust-lang.org/nightly/std/primitive.slice.html#method.clone_from_slice +[`ffi::IntoStringError`]: http://doc.rust-lang.org/nightly/std/ffi/struct.IntoStringError.html +[`i32::checked_neg`]: http://doc.rust-lang.org/nightly/std/primitive.i32.html#method.checked_neg +[`i32::checked_rem`]: http://doc.rust-lang.org/nightly/std/primitive.i32.html#method.checked_rem +[`i32::checked_shl`]: http://doc.rust-lang.org/nightly/std/primitive.i32.html#method.checked_shl +[`i32::checked_shr`]: http://doc.rust-lang.org/nightly/std/primitive.i32.html#method.checked_shr +[`i32::overflowing_add`]: http://doc.rust-lang.org/nightly/std/primitive.i32.html#method.overflowing_add +[`i32::overflowing_div`]: http://doc.rust-lang.org/nightly/std/primitive.i32.html#method.overflowing_div +[`i32::overflowing_mul`]: http://doc.rust-lang.org/nightly/std/primitive.i32.html#method.overflowing_mul +[`i32::overflowing_neg`]: http://doc.rust-lang.org/nightly/std/primitive.i32.html#method.overflowing_neg +[`i32::overflowing_rem`]: http://doc.rust-lang.org/nightly/std/primitive.i32.html#method.overflowing_rem +[`i32::overflowing_shl`]: http://doc.rust-lang.org/nightly/std/primitive.i32.html#method.overflowing_shl +[`i32::overflowing_shr`]: http://doc.rust-lang.org/nightly/std/primitive.i32.html#method.overflowing_shr +[`i32::overflowing_sub`]: http://doc.rust-lang.org/nightly/std/primitive.i32.html#method.overflowing_sub +[`i32::saturating_mul`]: http://doc.rust-lang.org/nightly/std/primitive.i32.html#method.saturating_mul +[`path::StripPrefixError`]: http://doc.rust-lang.org/nightly/std/path/struct.StripPrefixError.html +[`sort_by_key`]: http://doc.rust-lang.org/nightly/std/primitive.slice.html#method.sort_by_key +[`std::collections::hash_map::RandomState`]: http://doc.rust-lang.org/nightly/std/collections/hash_map/struct.RandomState.html +[`std::hash::BuildHasherDefault`]: http://doc.rust-lang.org/nightly/std/hash/struct.BuildHasherDefault.html +[`std::hash::BuildHasher`]: http://doc.rust-lang.org/nightly/std/hash/trait.BuildHasher.html +[`u32::checked_neg`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.checked_neg +[`u32::checked_rem`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.checked_rem +[`u32::checked_neg`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.checked_neg +[`u32::checked_shl`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.checked_shl +[`u32::overflowing_add`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.overflowing_add +[`u32::overflowing_div`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.overflowing_div +[`u32::overflowing_mul`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.overflowing_mul +[`u32::overflowing_neg`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.overflowing_neg +[`u32::overflowing_rem`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.overflowing_rem +[`u32::overflowing_shl`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.overflowing_shl +[`u32::overflowing_shr`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.overflowing_shr +[`u32::overflowing_sub`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.overflowing_sub +[`u32::saturating_mul`]: http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.saturating_mul + + +Version 1.6.0 (2016-01-21) +========================== + +Language +-------- + +* The `#![no_std]` attribute causes a crate to not be linked to the + standard library, but only the [core library][1.6co], as described + in [RFC 1184]. The core library defines common types and traits but + has no platform dependencies whatsoever, and is the basis for Rust + software in environments that cannot support a full port of the + standard library, such as operating systems. Most of the core + library is now stable. + +Libraries +--------- + +* Stabilized APIs: + [`Read::read_exact`], + [`ErrorKind::UnexpectedEof`][] (renamed from `UnexpectedEOF`), + [`fs::DirBuilder`], [`fs::DirBuilder::new`], + [`fs::DirBuilder::recursive`], [`fs::DirBuilder::create`], + [`os::unix::fs::DirBuilderExt`], + [`os::unix::fs::DirBuilderExt::mode`], [`vec::Drain`], + [`vec::Vec::drain`], [`string::Drain`], [`string::String::drain`], + [`vec_deque::Drain`], [`vec_deque::VecDeque::drain`], + [`collections::hash_map::Drain`], + [`collections::hash_map::HashMap::drain`], + [`collections::hash_set::Drain`], + [`collections::hash_set::HashSet::drain`], + [`collections::binary_heap::Drain`], + [`collections::binary_heap::BinaryHeap::drain`], + [`Vec::extend_from_slice`][] (renamed from `push_all`), + [`Mutex::get_mut`], [`Mutex::into_inner`], [`RwLock::get_mut`], + [`RwLock::into_inner`], + [`Iterator::min_by_key`][] (renamed from `min_by`), + [`Iterator::max_by_key`][] (renamed from `max_by`). +* The [core library][1.6co] is stable, as are most of its APIs. +* [The `assert_eq!` macro supports arguments that don't implement + `Sized`][1.6ae], such as arrays. In this way it behaves more like + `assert!`. +* Several timer functions that take duration in milliseconds [are + deprecated in favor of those that take `Duration`][1.6ms]. These + include `Condvar::wait_timeout_ms`, `thread::sleep_ms`, and + `thread::park_timeout_ms`. +* The algorithm by which `Vec` reserves additional elements was + [tweaked to not allocate excessive space][1.6a] while still growing + exponentially. +* `From` conversions are [implemented from integers to floats][1.6f] + in cases where the conversion is lossless. Thus they are not + implemented for 32-bit ints to `f32`, nor for 64-bit ints to `f32` + or `f64`. They are also not implemented for `isize` and `usize` + because the implementations would be platform-specific. `From` is + also implemented from `f32` to `f64`. +* `From<&Path>` and `From` are implemented for `Cow`. +* `From` is implemented for `Box`, `Rc` and `Arc`. +* `IntoIterator` is implemented for `&PathBuf` and `&Path`. +* [`BinaryHeap` was refactored][1.6bh] for modest performance + improvements. +* Sorting slices that are already sorted [is 50% faster in some + cases][1.6s]. + +Cargo +----- + +* Cargo will look in `$CARGO_HOME/bin` for subcommands [by default][1.6c]. +* Cargo build scripts can specify their dependencies by emitting the + [`rerun-if-changed`][1.6rr] key. +* crates.io will reject publication of crates with dependencies that + have a wildcard version constraint. Crates with wildcard + dependencies were seen to cause a variety of problems, as described + in [RFC 1241]. Since 1.5 publication of such crates has emitted a + warning. +* `cargo clean` [accepts a `--release` flag][1.6cc] to clean the + release folder. A variety of artifacts that Cargo failed to clean + are now correctly deleted. + +Misc +---- + +* The `unreachable_code` lint [warns when a function call's argument + diverges][1.6dv]. +* The parser indicates [failures that may be caused by + confusingly-similar Unicode characters][1.6uc] +* Certain macro errors [are reported at definition time][1.6m], not + expansion. + +Compatibility Notes +------------------- + +* The compiler no longer makes use of the [`RUST_PATH`][1.6rp] + environment variable when locating crates. This was a pre-cargo + feature for integrating with the package manager that was + accidentally never removed. +* [A number of bugs were fixed in the privacy checker][1.6p] that + could cause previously-accepted code to break. +* [Modules and unit/tuple structs may not share the same name][1.6ts]. +* [Bugs in pattern matching unit structs were fixed][1.6us]. The tuple + struct pattern syntax (`Foo(..)`) can no longer be used to match + unit structs. This is a warning now, but will become an error in + future releases. Patterns that share the same name as a const are + now an error. +* A bug was fixed that causes [rustc not to apply default type + parameters][1.6xc] when resolving certain method implementations of + traits defined in other crates. + +[1.6a]: https://github.com/rust-lang/rust/pull/29454 +[1.6ae]: https://github.com/rust-lang/rust/pull/29770 +[1.6bh]: https://github.com/rust-lang/rust/pull/29811 +[1.6c]: https://github.com/rust-lang/cargo/pull/2192 +[1.6cc]: https://github.com/rust-lang/cargo/pull/2131 +[1.6co]: http://doc.rust-lang.org/beta/core/index.html +[1.6dv]: https://github.com/rust-lang/rust/pull/30000 +[1.6f]: https://github.com/rust-lang/rust/pull/29129 +[1.6m]: https://github.com/rust-lang/rust/pull/29828 +[1.6ms]: https://github.com/rust-lang/rust/pull/29604 +[1.6p]: https://github.com/rust-lang/rust/pull/29726 +[1.6rp]: https://github.com/rust-lang/rust/pull/30034 +[1.6rr]: https://github.com/rust-lang/cargo/pull/2134 +[1.6s]: https://github.com/rust-lang/rust/pull/29675 +[1.6ts]: https://github.com/rust-lang/rust/issues/21546 +[1.6uc]: https://github.com/rust-lang/rust/pull/29837 +[1.6us]: https://github.com/rust-lang/rust/pull/29383 +[1.6xc]: https://github.com/rust-lang/rust/issues/30123 +[RFC 1184]: https://github.com/rust-lang/rfcs/blob/master/text/1184-stabilize-no_std.md +[RFC 1241]: https://github.com/rust-lang/rfcs/blob/master/text/1241-no-wildcard-deps.md +[`ErrorKind::UnexpectedEof`]: http://doc.rust-lang.org/nightly/std/io/enum.ErrorKind.html#variant.UnexpectedEof +[`Iterator::max_by_key`]: http://doc.rust-lang.org/nightly/std/iter/trait.Iterator.html#method.max_by_key +[`Iterator::min_by_key`]: http://doc.rust-lang.org/nightly/std/iter/trait.Iterator.html#method.min_by_key +[`Mutex::get_mut`]: http://doc.rust-lang.org/nightly/std/sync/struct.Mutex.html#method.get_mut +[`Mutex::into_inner`]: http://doc.rust-lang.org/nightly/std/sync/struct.Mutex.html#method.into_inner +[`Read::read_exact`]: http://doc.rust-lang.org/nightly/std/io/trait.Read.html#method.read_exact +[`RwLock::get_mut`]: http://doc.rust-lang.org/nightly/std/sync/struct.RwLock.html#method.get_mut +[`RwLock::into_inner`]: http://doc.rust-lang.org/nightly/std/sync/struct.RwLock.html#method.into_inner +[`Vec::extend_from_slice`]: http://doc.rust-lang.org/nightly/collections/vec/struct.Vec.html#method.extend_from_slice +[`collections::binary_heap::BinaryHeap::drain`]: http://doc.rust-lang.org/nightly/std/collections/binary_heap/struct.BinaryHeap.html#method.drain +[`collections::binary_heap::Drain`]: http://doc.rust-lang.org/nightly/std/collections/binary_heap/struct.Drain.html +[`collections::hash_map::Drain`]: http://doc.rust-lang.org/nightly/std/collections/hash_map/struct.Drain.html +[`collections::hash_map::HashMap::drain`]: http://doc.rust-lang.org/nightly/std/collections/hash_map/struct.HashMap.html#method.drain +[`collections::hash_set::Drain`]: http://doc.rust-lang.org/nightly/std/collections/hash_set/struct.Drain.html +[`collections::hash_set::HashSet::drain`]: http://doc.rust-lang.org/nightly/std/collections/hash_set/struct.HashSet.html#method.drain +[`fs::DirBuilder::create`]: http://doc.rust-lang.org/nightly/std/fs/struct.DirBuilder.html#method.create +[`fs::DirBuilder::new`]: http://doc.rust-lang.org/nightly/std/fs/struct.DirBuilder.html#method.new +[`fs::DirBuilder::recursive`]: http://doc.rust-lang.org/nightly/std/fs/struct.DirBuilder.html#method.recursive +[`fs::DirBuilder`]: http://doc.rust-lang.org/nightly/std/fs/struct.DirBuilder.html +[`os::unix::fs::DirBuilderExt::mode`]: http://doc.rust-lang.org/nightly/std/os/unix/fs/trait.DirBuilderExt.html#tymethod.mode +[`os::unix::fs::DirBuilderExt`]: http://doc.rust-lang.org/nightly/std/os/unix/fs/trait.DirBuilderExt.html +[`string::Drain`]: http://doc.rust-lang.org/nightly/std/string/struct.Drain.html +[`string::String::drain`]: http://doc.rust-lang.org/nightly/std/string/struct.String.html#method.drain +[`vec::Drain`]: http://doc.rust-lang.org/nightly/std/vec/struct.Drain.html +[`vec::Vec::drain`]: http://doc.rust-lang.org/nightly/std/vec/struct.Vec.html#method.drain +[`vec_deque::Drain`]: http://doc.rust-lang.org/nightly/std/collections/vec_deque/struct.Drain.html +[`vec_deque::VecDeque::drain`]: http://doc.rust-lang.org/nightly/std/collections/vec_deque/struct.VecDeque.html#method.drain + + Version 1.5.0 (2015-12-10) ========================== diff --git a/appveyor.yml b/appveyor.yml new file mode 100644 index 0000000000000..686c48abb30cd --- /dev/null +++ b/appveyor.yml @@ -0,0 +1,110 @@ +environment: + matrix: + # 32/64 bit MSVC + - MSYS_BITS: 64 + TARGET: x86_64-pc-windows-msvc + CHECK: check + CONFIGURE_ARGS: --enable-llvm-assertions --enable-debug-assertions + - MSYS_BITS: 32 + TARGET: i686-pc-windows-msvc + CHECK: check + CONFIGURE_ARGS: --enable-llvm-assertions --enable-debug-assertions + + # MSVC rustbuild + - MSYS_BITS: 64 + CONFIGURE_ARGS: --enable-rustbuild --enable-llvm-assertions --enable-debug-assertions + TARGET: x86_64-pc-windows-msvc + CHECK: check + + # MSVC cargotest + - MSYS_BITS: 64 + CONFIGURE_ARGS: --enable-rustbuild --enable-llvm-assertions --enable-debug-assertions + TARGET: x86_64-pc-windows-msvc + CHECK: check-cargotest + + # 32/64-bit MinGW builds. + # + # The MinGW builds unfortunately have to both download a custom toolchain and + # avoid the one installed by AppVeyor by default. Interestingly, though, for + # different reasons! + # + # For 32-bit the installed gcc toolchain on AppVeyor uses the pthread + # threading model. This is unfortunately not what we want, and if we compile + # with it then there's lots of link errors in the standard library (undefined + # references to pthread symbols). + # + # For 64-bit the installed gcc toolchain is currently 5.3.0 which + # unfortunately segfaults on Windows with --enable-llvm-assertions (segfaults + # in LLVM). See rust-lang/rust#28445 for more information, but to work around + # this we go back in time to 4.9.2 specifically. + # + # Finally, note that the downloads below are all in the `rust-lang-ci` S3 + # bucket, but they cleraly didn't originate there! The downloads originally + # came from the mingw-w64 SourceForge download site. Unfortunately + # SourceForge is notoriously flaky, so we mirror it on our own infrastructure. + # + # And as a final point of note, the 32-bit MinGW build using the makefiles do + # *not* use debug assertions and llvm assertions. This is because they take + # too long on appveyor and this is tested by rustbuild below. + - MSYS_BITS: 32 + TARGET: i686-pc-windows-gnu + CHECK: check + MINGW_URL: https://s3.amazonaws.com/rust-lang-ci + MINGW_ARCHIVE: i686-4.9.2-release-win32-dwarf-rt_v4-rev4.7z + MINGW_DIR: mingw32 + + - MSYS_BITS: 32 + CONFIGURE_ARGS: --enable-rustbuild --enable-llvm-assertions --enable-debug-assertions + TARGET: i686-pc-windows-gnu + CHECK: check + MINGW_URL: https://s3.amazonaws.com/rust-lang-ci + MINGW_ARCHIVE: i686-4.9.2-release-win32-dwarf-rt_v4-rev4.7z + MINGW_DIR: mingw32 + + - MSYS_BITS: 64 + CONFIGURE_ARGS: --enable-llvm-assertions --enable-debug-assertions + TARGET: x86_64-pc-windows-gnu + CHECK: check + MINGW_URL: https://s3.amazonaws.com/rust-lang-ci + MINGW_ARCHIVE: x86_64-4.9.2-release-win32-seh-rt_v4-rev4.7z + MINGW_DIR: mingw64 + +clone_depth: 1 +build: false + +install: + # If we need to download a custom MinGW, do so here and set the path + # appropriately. + # + # Note that this *also* means that we're not using what is typically + # /mingw32/bin/python2.7.exe, which is a "correct" python interpreter where + # /usr/bin/python2.7.exe is not. To ensure we use the right interpreter we + # move `C:\Python27` ahead in PATH and then also make sure the `python2.7.exe` + # file exists in there (which it doesn't by default). + - if defined MINGW_URL appveyor DownloadFile %MINGW_URL%/%MINGW_ARCHIVE% + - if defined MINGW_URL 7z x -y %MINGW_ARCHIVE% > nul + - if defined MINGW_URL set PATH=C:\Python27;%CD%\%MINGW_DIR%\bin;C:\msys64\usr\bin;%PATH% + - if defined MINGW_URL copy C:\Python27\python.exe C:\Python27\python2.7.exe + + # Otherwise pull in the MinGW installed on appveyor + - if NOT defined MINGW_URL set PATH=C:\msys64\mingw%MSYS_BITS%\bin;C:\msys64\usr\bin;%PATH% + +test_script: + - sh ./configure + %CONFIGURE_ARGS% + --build=%TARGET% + - bash -c "make -j$(nproc)" + - bash -c "make %CHECK% -j$(nproc)" + +cache: + - build/%TARGET%/llvm -> src/rustllvm/llvm-auto-clean-trigger + - "%TARGET%/llvm -> src/rustllvm/llvm-auto-clean-trigger" + +branches: + only: + - auto + +# init: +# - ps: iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1')) +# on_finish: +# - ps: $blockRdp = $true; iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/appveyor/ci/master/scripts/enable-rdp.ps1')) diff --git a/configure b/configure index 0255b04caa316..5311bf4b064eb 100755 --- a/configure +++ b/configure @@ -1,5 +1,13 @@ #!/bin/sh +# /bin/sh on Solaris is not a POSIX compatible shell, but /usr/bin/bash is. +if [ `uname -s` = 'SunOS' -a "${POSIX_SHELL}" != "true" ]; then + POSIX_SHELL="true" + export POSIX_SHELL + exec /usr/bin/env bash $0 "$@" +fi +unset POSIX_SHELL # clear it so if we invoke other scripts, they run as bash as well + msg() { echo "configure: $*" } @@ -125,12 +133,13 @@ probe() { } probe_need() { - local V=$1 probe $* + local V=$1 + shift eval VV=\$$V if [ -z "$VV" ] then - err "needed, but unable to find any of: $*" + err "$V needed, but unable to find any of: $*" fi } @@ -334,14 +343,6 @@ enable_if_not_disabled() { fi } -to_llvm_triple() { - case $1 in - i686-w64-mingw32) echo i686-pc-windows-gnu ;; - x86_64-w64-mingw32) echo x86_64-pc-windows-gnu ;; - *) echo $1 ;; - esac -} - to_gnu_triple() { case $1 in i686-pc-windows-gnu) echo i686-w64-mingw32 ;; @@ -359,6 +360,13 @@ abs_path() { (unset CDPATH && cd "$_path" > /dev/null && pwd) } +HELP=0 +for arg; do + case "$arg" in + --help) HELP=1;; + esac +done + msg "looking for configure programs" need_cmd cmp need_cmd mkdir @@ -424,6 +432,15 @@ case $CFG_OSTYPE in CFG_OSTYPE=apple-darwin ;; + SunOS) + CFG_OSTYPE=sun-solaris + CFG_CPUTYPE=$(isainfo -n) + ;; + + Haiku) + CFG_OSTYPE=unknown-haiku + ;; + MINGW*) # msys' `uname` does not print gcc configuration, but prints msys # configuration. so we cannot believe `uname -m`: @@ -490,11 +507,16 @@ case $CFG_CPUTYPE in CFG_CPUTYPE=arm ;; - armv7l) + armv6l) CFG_CPUTYPE=arm CFG_OSTYPE="${CFG_OSTYPE}eabihf" ;; + armv7l) + CFG_CPUTYPE=armv7 + CFG_OSTYPE="${CFG_OSTYPE}eabihf" + ;; + aarch64) CFG_CPUTYPE=aarch64 ;; @@ -511,10 +533,18 @@ case $CFG_CPUTYPE in CFG_CPUTYPE=powerpc64le ;; + s390x) + CFG_CPUTYPE=s390x + ;; + x86_64 | x86-64 | x64 | amd64) CFG_CPUTYPE=x86_64 ;; + BePC) + CFG_CPUTYPE=i686 + ;; + *) err "unknown CPU type: $CFG_CPUTYPE" esac @@ -550,7 +580,7 @@ CFG_SELF="$0" CFG_CONFIGURE_ARGS="$@" -case "${CFG_SRC_DIR}" in +case "${CFG_SRC_DIR}" in *\ * ) err "The path to the rust source directory contains spaces, which is not supported" ;; @@ -560,11 +590,8 @@ esac OPTIONS="" -HELP=0 -if [ "$1" = "--help" ] +if [ "$HELP" -eq 1 ] then - HELP=1 - shift echo echo "Usage: $CFG_SELF [options]" echo @@ -588,19 +615,27 @@ opt docs 1 "build standard library documentation" opt compiler-docs 0 "build compiler documentation" opt optimize-tests 1 "build tests with optimizations" opt debuginfo-tests 0 "build tests with debugger metadata" -opt libcpp 1 "build with llvm with libc++ instead of libstdc++ when using clang" +opt quiet-tests 0 "enable quieter output when running tests" +opt libcpp 1 "build llvm with libc++ instead of libstdc++ when using clang" opt llvm-assertions 0 "build LLVM with assertions" opt debug-assertions 0 "build with debugging assertions" opt fast-make 0 "use .gitmodules as timestamp for submodule deps" opt ccache 0 "invoke gcc/clang via ccache to reuse object files between builds" opt local-rust 0 "use an installed rustc rather than downloading a snapshot" +opt local-rebuild 0 "assume local-rust matches the current version, for rebuilds; implies local-rust, and is implied if local-rust already matches the current version" opt llvm-static-stdcpp 0 "statically link to libstdc++ for LLVM" +opt llvm-link-shared 0 "prefer shared linking to LLVM (llvm-config --link-shared)" opt rpath 1 "build rpaths into rustc itself" opt stage0-landing-pads 1 "enable landing pads during bootstrap with stage0" # This is used by the automation to produce single-target nightlies opt dist-host-only 0 "only install bins for the host architecture" opt inject-std-version 1 "inject the current compiler version of libstd into programs" opt llvm-version-check 1 "check if the LLVM version is supported, build anyway" +opt rustbuild 0 "use the rust and cargo based build system" +opt codegen-tests 1 "run the src/test/codegen tests" +opt option-checking 1 "complain about unrecognized options in this configure script" +opt ninja 0 "build LLVM using the Ninja generator (for MSVC, requires building in the correct environment)" +opt vendor 0 "enable usage of vendored Rust crates" # Optimization and debugging options. These may be overridden by the release channel, etc. opt_nosave optimize 1 "build optimized rust code" @@ -608,7 +643,9 @@ opt_nosave optimize-cxx 1 "build optimized C++ code" opt_nosave optimize-llvm 1 "build optimized LLVM" opt_nosave llvm-assertions 0 "build LLVM with assertions" opt_nosave debug-assertions 0 "build with debugging assertions" +opt_nosave llvm-release-debuginfo 0 "build LLVM with debugger metadata" opt_nosave debuginfo 0 "build with debugger metadata" +opt_nosave debuginfo-lines 0 "build with line number debugger metadata" opt_nosave debug-jemalloc 0 "build jemalloc with --enable-debug --enable-fill" valopt localstatedir "/var/lib" "local state directory" @@ -620,15 +657,31 @@ valopt llvm-root "" "set LLVM root" valopt python "" "set path to python" valopt jemalloc-root "" "set directory where libjemalloc_pic.a is located" valopt build "${DEFAULT_BUILD}" "GNUs ./configure syntax LLVM build triple" -valopt android-cross-path "/opt/ndk_standalone" "Android NDK standalone path (deprecated)" +valopt android-cross-path "" "Android NDK standalone path (deprecated)" valopt i686-linux-android-ndk "" "i686-linux-android NDK standalone path" valopt arm-linux-androideabi-ndk "" "arm-linux-androideabi NDK standalone path" +valopt armv7-linux-androideabi-ndk "" "armv7-linux-androideabi NDK standalone path" valopt aarch64-linux-android-ndk "" "aarch64-linux-android NDK standalone path" valopt nacl-cross-path "" "NaCl SDK path (Pepper Canary is recommended). Must be absolute!" -valopt release-channel "dev" "the name of the release channel to build" -valopt musl-root "/usr/local" "MUSL root installation directory" +valopt musl-root "/usr/local" "MUSL root installation directory (deprecated)" +valopt musl-root-x86_64 "/usr/local" "x86_64-unknown-linux-musl install directory" +valopt musl-root-i686 "/usr/local" "i686-unknown-linux-musl install directory" +valopt musl-root-arm "/usr/local" "arm-unknown-linux-musleabi install directory" +valopt musl-root-armhf "/usr/local" "arm-unknown-linux-musleabihf install directory" +valopt musl-root-armv7 "/usr/local" "armv7-unknown-linux-musleabihf install directory" valopt extra-filename "" "Additional data that is hashed and passed to the -C extra-filename flag" +if [ -e ${CFG_SRC_DIR}.git ] +then + valopt release-channel "dev" "the name of the release channel to build" +else + # If we have no git directory then we are probably a tarball distribution + # and should default to stable channel - Issue 28322 + probe CFG_GIT git + msg "git: no git directory. Changing default release channel to stable" + valopt release-channel "stable" "the name of the release channel to build" +fi + # Used on systems where "cc" and "ar" are unavailable valopt default-linker "cc" "the default linker" valopt default-ar "ar" "the default ar" @@ -645,12 +698,7 @@ valopt_nosave local-rust-root "/usr/local" "set prefix for local rust binary" valopt_nosave host "${CFG_BUILD}" "GNUs ./configure syntax LLVM host triples" valopt_nosave target "${CFG_HOST}" "GNUs ./configure syntax LLVM target triples" valopt_nosave mandir "${CFG_PREFIX}/share/man" "install man pages in PATH" - -# Temporarily support old triples until buildbots get updated -CFG_BUILD=$(to_llvm_triple $CFG_BUILD) -putvar CFG_BUILD # Yes, this creates a duplicate entry, but the last one wins. -CFG_HOST=$(to_llvm_triple $CFG_HOST) -CFG_TARGET=$(to_llvm_triple $CFG_TARGET) +valopt_nosave docdir "${CFG_PREFIX}/share/doc/rust" "install documentation in PATH" # On Windows this determines root of the subtree for target libraries. # Host runtime libs always go to 'bin'. @@ -672,16 +720,38 @@ then fi # Validate Options -step_msg "validating $CFG_SELF args" -validate_opt +if [ -z "$CFG_DISABLE_OPTION_CHECKING" ] +then + step_msg "validating $CFG_SELF args" + validate_opt +fi # Validate the release channel, and configure options case "$CFG_RELEASE_CHANNEL" in nightly ) msg "overriding settings for $CFG_RELEASE_CHANNEL" CFG_ENABLE_LLVM_ASSERTIONS=1 + + # FIXME(#37364) shouldn't have to disable this on windows-gnu + case "$CFG_BUILD" in + *-pc-windows-gnu) + ;; + *) + CFG_ENABLE_DEBUGINFO_LINES=1 + ;; + esac ;; - dev | beta | stable) + beta | stable) + msg "overriding settings for $CFG_RELEASE_CHANNEL" + case "$CFG_BUILD" in + *-pc-windows-gnu) + ;; + *) + CFG_ENABLE_DEBUGINFO_LINES=1 + ;; + esac + ;; + dev) ;; *) err "release channel must be 'dev', 'nightly', 'beta' or 'stable'" @@ -710,31 +780,21 @@ if [ -n "$CFG_DISABLE_OPTIMIZE_CXX" ]; then putvar CFG_DISABLE_OPTIMIZE_CXX; fi if [ -n "$CFG_DISABLE_OPTIMIZE_LLVM" ]; then putvar CFG_DISABLE_OPTIMIZE_LLVM; fi if [ -n "$CFG_ENABLE_LLVM_ASSERTIONS" ]; then putvar CFG_ENABLE_LLVM_ASSERTIONS; fi if [ -n "$CFG_ENABLE_DEBUG_ASSERTIONS" ]; then putvar CFG_ENABLE_DEBUG_ASSERTIONS; fi +if [ -n "$CFG_ENABLE_LLVM_RELEASE_DEBUGINFO" ]; then putvar CFG_ENABLE_LLVM_RELEASE_DEBUGINFO; fi if [ -n "$CFG_ENABLE_DEBUGINFO" ]; then putvar CFG_ENABLE_DEBUGINFO; fi +if [ -n "$CFG_ENABLE_DEBUGINFO_LINES" ]; then putvar CFG_ENABLE_DEBUGINFO_LINES; fi if [ -n "$CFG_ENABLE_DEBUG_JEMALLOC" ]; then putvar CFG_ENABLE_DEBUG_JEMALLOC; fi -# A magic value that allows the compiler to use unstable features -# during the bootstrap even when doing so would normally be an error -# because of feature staging or because the build turns on -# warnings-as-errors and unstable features default to warnings. The -# build has to match this key in an env var. Meant to be a mild -# deterrent from users just turning on unstable features on the stable -# channel. -# Basing CFG_BOOTSTRAP_KEY on CFG_BOOTSTRAP_KEY lets it get picked up -# during a Makefile reconfig. -CFG_BOOTSTRAP_KEY="${CFG_BOOTSTRAP_KEY-`date +%H:%M:%S`}" -putvar CFG_BOOTSTRAP_KEY - step_msg "looking for build programs" -probe_need CFG_CURLORWGET curl wget +probe_need CFG_CURL curl if [ -z "$CFG_PYTHON_PROVIDED" ]; then - probe_need CFG_PYTHON python2.7 python2.6 python2 python + probe_need CFG_PYTHON python2.7 python2 python fi python_version=$($CFG_PYTHON -V 2>&1) -if [ $(echo $python_version | grep -c '^Python 2\.[4567]') -ne 1 ]; then - err "Found $python_version, but LLVM requires Python 2.4-2.7" +if [ $(echo $python_version | grep -c '^Python 2\.7') -ne 1 ]; then + err "Found $python_version, but Python 2.7 is required" fi # If we have no git directory then we are probably a tarball distribution @@ -776,11 +836,31 @@ probe CFG_BISON bison probe CFG_GDB gdb probe CFG_LLDB lldb +if [ -n "$CFG_ENABLE_NINJA" ] +then + probe CFG_NINJA ninja + if [ -z "$CFG_NINJA" ] + then + # On Debian and Fedora, the `ninja` binary is an IRC bot, so the build tool was + # renamed. Handle this case. + probe CFG_NINJA ninja-build + fi +fi + +# For building LLVM +probe_need CFG_CMAKE cmake + # On MacOS X, invoking `javac` pops up a dialog if the JDK is not # installed. Since `javac` is only used if `antlr4` is available, # probe for it only in this case. if [ -n "$CFG_ANTLR4" ] then + CFG_ANTLR4_JAR="\"$(find /usr/ -name antlr-complete.jar 2>/dev/null | head -n 1)\"" + if [ "x" = "x$CFG_ANTLR4_JAR" ] + then + CFG_ANTLR4_JAR="\"$(find ~ -name antlr-complete.jar 2>/dev/null | head -n 1)\"" + fi + putvar CFG_ANTLR4_JAR $CFG_ANTLR4_JAR probe CFG_JAVAC javac fi @@ -798,13 +878,6 @@ then fi fi -if [ -n "$CFG_GDB" ] -then - # Store GDB's version - CFG_GDB_VERSION=$($CFG_GDB --version 2>/dev/null | head -1) - putvar CFG_GDB_VERSION -fi - if [ -n "$CFG_LLDB" ] then # Store LLDB's version @@ -827,6 +900,19 @@ then fi fi +# LLDB tests on OSX require /usr/bin/python, not something like Homebrew's +# /usr/local/bin/python. We're loading a compiled module for LLDB tests which is +# only compatible with the system. +case $CFG_BUILD in + *-apple-darwin) + CFG_LLDB_PYTHON=/usr/bin/python + ;; + *) + CFG_LLDB_PYTHON=$CFG_PYTHON + ;; +esac +putvar CFG_LLDB_PYTHON + step_msg "looking for target specific programs" probe CFG_ADB adb @@ -837,6 +923,16 @@ then BIN_SUF=.exe fi +# --enable-local-rebuild implies --enable-local-rust too +if [ -n "$CFG_ENABLE_LOCAL_REBUILD" ] +then + if [ -z "$CFG_ENABLE_LOCAL_RUST" ] + then + CFG_ENABLE_LOCAL_RUST=1 + putvar CFG_ENABLE_LOCAL_RUST + fi +fi + if [ -n "$CFG_ENABLE_LOCAL_RUST" ] then system_rustc=$(which rustc) @@ -852,7 +948,7 @@ then fi CMD="${CFG_LOCAL_RUST_ROOT}/bin/rustc${BIN_SUF}" - LRV=`$CMD --version` + LRV=`LD_LIBRARY_PATH=${CFG_LOCAL_RUST_ROOT}/lib $CMD --version` if [ $? -ne 0 ] then step_msg "failure while running $CMD --version" @@ -865,9 +961,8 @@ fi # Force bitrig to build with clang; gcc doesn't like us there if [ $CFG_OSTYPE = unknown-bitrig ] then - step_msg "on Bitrig, forcing use of clang, disabling jemalloc" + step_msg "on Bitrig, forcing use of clang" CFG_ENABLE_CLANG=1 - CFG_DISABLE_JEMALLOC=1 fi # default gcc version under OpenBSD maybe too old, try using egcc, which is a @@ -887,9 +982,6 @@ then CXX="${CXX:-eg++}" fi fi - - step_msg "on OpenBSD, disabling jemalloc" - CFG_DISABLE_JEMALLOC=1 fi # OS X 10.9, gcc is actually clang. This can cause some confusion in the build @@ -970,13 +1062,19 @@ then LLVM_VERSION=$($LLVM_CONFIG --version) case $LLVM_VERSION in - (3.[5-8]*) + (3.[7-9]*) msg "found ok version of LLVM: $LLVM_VERSION" ;; (*) - err "bad LLVM version: $LLVM_VERSION, need >=3.5" + err "bad LLVM version: $LLVM_VERSION, need >=3.7" ;; esac + + if "$CFG_LLVM_ROOT/bin/llvm-mc" -help | grep -- "-relocation-model"; then + msg "found older llvm-mc" + CFG_LLVM_MC_HAS_RELOCATION_MODEL=1 + putvar CFG_LLVM_MC_HAS_RELOCATION_MODEL + fi fi # Even when the user overrides the choice of CC, still try to detect @@ -1022,37 +1120,6 @@ if [ -n "$CFG_ENABLE_CLANG" ] then case "$CC" in (''|*clang) - CFG_CLANG_REPORTED_VERSION=$($CFG_CC --version | grep version) - - if echo $CFG_CLANG_REPORTED_VERSION | grep -q "(based on LLVM "; then - CFG_CLANG_VERSION=$(echo $CFG_CLANG_REPORTED_VERSION | sed 's/.*(based on LLVM \(.*\))/\1/') - elif echo $CFG_CLANG_REPORTED_VERSION | grep -q "Apple LLVM"; then - CFG_OSX_CLANG_VERSION=$(echo $CFG_CLANG_REPORTED_VERSION | sed 's/.*version \(.*\) .*/\1/') - else - CFG_CLANG_VERSION=$(echo $CFG_CLANG_REPORTED_VERSION | sed 's/.*version \(.*\) .*/\1/') - fi - - if [ -n "$CFG_OSX_CLANG_VERSION" ] - then - case $CFG_OSX_CLANG_VERSION in - (7.0*) - step_msg "found ok version of APPLE CLANG: $CFG_OSX_CLANG_VERSION" - ;; - (*) - err "bad APPLE CLANG version: $CFG_OSX_CLANG_VERSION, need >=7.0" - ;; - esac - else - case $CFG_CLANG_VERSION in - (3.2* | 3.3* | 3.4* | 3.5* | 3.6* | 3.7* | 3.8*) - step_msg "found ok version of CLANG: $CFG_CLANG_VERSION" - ;; - (*) - err "bad CLANG version: $CFG_CLANG_VERSION, need >=3.0svn" - ;; - esac - fi - if [ -z "$CC" ] then CFG_CC="clang" @@ -1097,6 +1164,7 @@ putvar CFG_STDCPP_NAME # a little post-processing of various config values CFG_PREFIX=${CFG_PREFIX%/} CFG_MANDIR=${CFG_MANDIR%/} +CFG_DOCDIR=${CFG_DOCDIR%/} CFG_HOST="$(echo $CFG_HOST | tr ',' ' ')" CFG_TARGET="$(echo $CFG_TARGET | tr ',' ' ')" CFG_SUPPORTED_TARGET="" @@ -1139,6 +1207,15 @@ do case $i in *android*) + case $i in + armv7-linux-androideabi) + cmd_prefix="arm-linux-androideabi" + ;; + *) + cmd_prefix=$i + ;; + esac + upper_snake_target=$(echo "$i" | tr '[:lower:]' '[:upper:]' | tr '\-' '\_') eval ndk=\$"CFG_${upper_snake_target}_NDK" if [ -z "$ndk" ] @@ -1149,7 +1226,7 @@ do fi # Perform a basic sanity check of the NDK - for android_ndk_tool in "$ndk/bin/$i-gcc" "$ndk/bin/$i-g++" "$ndk/bin/$i-ar" + for android_ndk_tool in "$ndk/bin/$cmd_prefix-gcc" "$ndk/bin/$cmd_prefix-g++" "$ndk/bin/$cmd_prefix-ar" do if [ ! -f $android_ndk_tool ] then @@ -1170,51 +1247,7 @@ do fi ;; - - *-musl) - if [ ! -f $CFG_MUSL_ROOT/lib/libc.a ] - then - err "musl libc $CFG_MUSL_ROOT/lib/libc.a not found" - fi - ;; - *-msvc) - # Currently the build system is not configured to build jemalloc - # with MSVC, so we omit this optional dependency. - step_msg "targeting MSVC, disabling jemalloc" - CFG_DISABLE_JEMALLOC=1 - putvar CFG_DISABLE_JEMALLOC - - # There are some MSYS python builds which will auto-translate - # windows-style paths to MSYS-style paths in Python itself. - # Unfortunately this breaks LLVM's build system as somewhere along - # the line LLVM prints a path into a file from Python and then CMake - # later tries to interpret that path. If Python prints a MSYS path - # and CMake tries to use it as a Windows path, you're gonna have a - # Bad Time. - # - # Consequently here we try to detect when that happens and print an - # error if it does. - if $CFG_PYTHON -c 'import sys; print sys.argv[1]' `pwd` | grep '^/' > /dev/null - then - err " - -python is silently translating windows paths to MSYS paths \ -and the build will fail if this python is used. - -Either an official python install must be used or an \ -alternative python package in MinGW must be used. - -If you are building under msys2 try installing the mingw-w64-x86_64-python2 \ -package instead of python2: - -$ pacman -R python2 && pacman -S mingw-w64-x86_64-python2 -" - fi - - # MSVC requires cmake because that's how we're going to build LLVM - probe_need CFG_CMAKE cmake - # There are three builds of cmake on windows: MSVC, MinGW and Cygwin # The Cygwin build does not have generators for Visual Studio, so # detect that here and error. @@ -1259,7 +1292,7 @@ $ pacman -R cmake && pacman -S mingw-w64-x86_64-cmake bits=x86_64 msvc_part=amd64 ;; - i686-*) + i*86-*) bits=i386 msvc_part= ;; @@ -1293,17 +1326,41 @@ $ pacman -R cmake && pacman -S mingw-w64-x86_64-cmake putvar CFG_MSVC_LIB_PATH_${bits} ;; - *-rumprun-netbsd) - step_msg "targeting rumprun-netbsd, disabling jemalloc" - CFG_DISABLE_JEMALLOC=1 - putvar CFG_DISABLE_JEMALLOC - ;; - *) ;; esac done +if [ "$CFG_OSTYPE" = "pc-windows-gnu" ] || [ "$CFG_OSTYPE" = "pc-windows-msvc" ] +then + # There are some MSYS python builds which will auto-translate + # windows-style paths to MSYS-style paths in Python itself. + # Unfortunately this breaks LLVM's build system as somewhere along + # the line LLVM prints a path into a file from Python and then CMake + # later tries to interpret that path. If Python prints a MSYS path + # and CMake tries to use it as a Windows path, you're gonna have a + # Bad Time. + # + # Consequently here we try to detect when that happens and print an + # error if it does. + if $CFG_PYTHON -c 'import sys; print sys.argv[1]' `pwd` | grep '^/' > /dev/null + then + err " + +python is silently translating windows paths to MSYS paths \ +and the build will fail if this python is used. + +Either an official python install must be used or an \ +alternative python package in MinGW must be used. + +If you are building under msys2 try installing the mingw-w64-x86_64-python2 \ +package instead of python2: + +$ pacman -S mingw-w64-x86_64-python2 +" + fi +fi + if [ -n "$CFG_PERF" ] then HAVE_PERF_LOGFD=`$CFG_PERF stat --log-fd 2>&1 | grep 'unknown option'` @@ -1314,96 +1371,107 @@ then fi fi -step_msg "making directories" +if [ -z "$CFG_ENABLE_RUSTBUILD" ]; then -for i in \ - doc doc/std doc/extra \ - dl tmp dist -do - make_dir $i -done + step_msg "making directories" -for t in $CFG_HOST -do - make_dir $t/llvm -done + for i in \ + doc doc/std doc/extra \ + dl tmp dist + do + make_dir $i + done -for t in $CFG_HOST -do - make_dir $t/rustllvm -done + for t in $CFG_HOST + do + make_dir $t/llvm + done -for t in $CFG_TARGET -do - make_dir $t/rt - for s in 0 1 2 3 + for t in $CFG_HOST do - make_dir $t/rt/stage$s - make_dir $t/rt/jemalloc - make_dir $t/rt/compiler-rt - for i in \ - isaac sync test \ - arch/i386 arch/x86_64 arch/arm arch/aarch64 arch/mips arch/powerpc - do - make_dir $t/rt/stage$s/$i - done + make_dir $t/rustllvm done -done -for h in $CFG_HOST -do - for t in $CFG_TARGET + for t in $CFG_TARGET + do + make_dir $t/rt + for s in 0 1 2 3 do - # host lib dir stage0 - make_dir $h/stage0/lib - - # target bin dir stage0 - make_dir $h/stage0/lib/rustlib/$t/bin - - # target lib dir stage0 - make_dir $h/stage0/lib/rustlib/$t/lib - - for i in 0 1 2 3 - do - # host bin dir - make_dir $h/stage$i/bin - - # host lib dir - make_dir $h/stage$i/$CFG_LIBDIR_RELATIVE - - # host test dir - make_dir $h/stage$i/test - - # target bin dir - make_dir $h/stage$i/$CFG_LIBDIR_RELATIVE/rustlib/$t/bin - - # target lib dir - make_dir $h/stage$i/$CFG_LIBDIR_RELATIVE/rustlib/$t/lib - done + make_dir $t/rt/stage$s + make_dir $t/rt/jemalloc + make_dir $t/rt/compiler-rt + for i in \ + isaac sync test \ + arch/i386 arch/x86_64 arch/arm arch/aarch64 arch/mips arch/powerpc + do + make_dir $t/rt/stage$s/$i + done done + done - make_dir $h/test/run-pass - make_dir $h/test/run-pass-valgrind - make_dir $h/test/run-pass-fulldeps - make_dir $h/test/run-fail - make_dir $h/test/run-fail-fulldeps - make_dir $h/test/compile-fail - make_dir $h/test/parse-fail - make_dir $h/test/compile-fail-fulldeps - make_dir $h/test/bench - make_dir $h/test/perf - make_dir $h/test/pretty - make_dir $h/test/debuginfo-gdb - make_dir $h/test/debuginfo-lldb - make_dir $h/test/codegen - make_dir $h/test/rustdoc -done + for h in $CFG_HOST + do + for t in $CFG_TARGET + do + # host bin dir stage0 + make_dir $h/stage0/bin + + # host lib dir stage0 + make_dir $h/stage0/lib + + # host test dir stage0 + make_dir $h/stage0/test + + # target bin dir stage0 + make_dir $h/stage0/lib/rustlib/$t/bin + + # target lib dir stage0 + make_dir $h/stage0/lib/rustlib/$t/lib + + for i in 1 2 3 + do + # host bin dir + make_dir $h/stage$i/bin + + # host lib dir + make_dir $h/stage$i/$CFG_LIBDIR_RELATIVE + + # host test dir + make_dir $h/stage$i/test + + # target bin dir + make_dir $h/stage$i/$CFG_LIBDIR_RELATIVE/rustlib/$t/bin + + # target lib dir + make_dir $h/stage$i/$CFG_LIBDIR_RELATIVE/rustlib/$t/lib + done + done + + make_dir $h/test/run-pass + make_dir $h/test/run-pass-valgrind + make_dir $h/test/run-pass-fulldeps + make_dir $h/test/run-fail + make_dir $h/test/run-fail-fulldeps + make_dir $h/test/compile-fail + make_dir $h/test/parse-fail + make_dir $h/test/compile-fail-fulldeps + make_dir $h/test/bench + make_dir $h/test/perf + make_dir $h/test/pretty + make_dir $h/test/debuginfo-gdb + make_dir $h/test/debuginfo-lldb + make_dir $h/test/codegen + make_dir $h/test/codegen-units + make_dir $h/test/rustdoc + done + +fi # Configure submodules step_msg "configuring submodules" # Have to be in the top of src directory for this -if [ -z $CFG_DISABLE_MANAGE_SUBMODULES ] +if [ -z $CFG_DISABLE_MANAGE_SUBMODULES ] && [ -z $CFG_ENABLE_RUSTBUILD ] then cd ${CFG_SRC_DIR} @@ -1449,6 +1517,19 @@ then cd ${CFG_BUILD_DIR} fi +# Do a sanity check that the submodule source exists. Because GitHub +# automatically publishes broken tarballs that can't be disabled, and +# people download them and try to use them. +if [ ! -e "${CFG_SRC_DIR}/src/liblibc" ]; then + err "some submodules are missing. Is this a broken tarball? + +If you downloaded this tarball from the GitHub release pages at +https://github.com/rust-lang/rust/releases, +then please delete it and instead download the source from +https://www.rust-lang.org/downloads.html" + +fi + # Configure llvm, only if necessary step_msg "looking at LLVM" CFG_LLVM_SRC_DIR=${CFG_SRC_DIR}src/llvm/ @@ -1462,30 +1543,23 @@ do ;; esac - if [ -z $CFG_LLVM_ROOT ] + if [ -n "$CFG_ENABLE_RUSTBUILD" ] + then + msg "not configuring LLVM, rustbuild in use" + do_reconfigure=0 + elif [ -z $CFG_LLVM_ROOT ] then LLVM_BUILD_DIR=${CFG_BUILD_DIR}$t/llvm - if [ -n "$CFG_DISABLE_OPTIMIZE_LLVM" ] - then - LLVM_DBG_OPTS="--enable-debug-symbols --disable-optimized" - # Just use LLVM straight from its build directory to - # avoid 'make install' time - LLVM_INST_DIR=$LLVM_BUILD_DIR/Debug - else - LLVM_DBG_OPTS="--enable-optimized" - LLVM_INST_DIR=$LLVM_BUILD_DIR/Release - fi - if [ -z "$CFG_ENABLE_LLVM_ASSERTIONS" ] - then - LLVM_ASSERTION_OPTS="--disable-assertions" - else - LLVM_ASSERTION_OPTS="--enable-assertions" - - # Apparently even if we request assertions be enabled for MSVC, - # LLVM's CMake build system ignore this and outputs in `Release` - # anyway. - if [ ${is_msvc} -eq 0 ]; then - LLVM_INST_DIR=${LLVM_INST_DIR}+Asserts + LLVM_INST_DIR=$LLVM_BUILD_DIR + # For some crazy reason the MSVC output dir is different than Unix + if [ ${is_msvc} -ne 0 ]; then + if [ -n "$CFG_DISABLE_OPTIMIZE_LLVM" ] + then + # Just use LLVM straight from its build directory to + # avoid 'make install' time + LLVM_INST_DIR=$LLVM_BUILD_DIR/Debug + else + LLVM_INST_DIR=$LLVM_BUILD_DIR/Release fi fi else @@ -1495,7 +1569,9 @@ do LLVM_INST_DIR=$CFG_LLVM_ROOT do_reconfigure=0 # Check that LLVm FileCheck is available. Needed for the tests - need_cmd $LLVM_INST_DIR/bin/FileCheck + if [ -z "$CFG_DISABLE_CODEGEN_TESTS" ]; then + need_cmd $LLVM_INST_DIR/bin/FileCheck + fi fi if [ ${do_reconfigure} -ne 0 ] @@ -1518,7 +1594,10 @@ do fi # We need the generator later on for compiler-rt even if LLVM's not built - if [ ${is_msvc} -ne 0 ] + if [ -n "$CFG_NINJA" ] + then + generator="Ninja" + elif [ ${is_msvc} -ne 0 ] then case "$CFG_MSVC_ROOT" in *14.0*) @@ -1541,88 +1620,60 @@ do err "can only build LLVM for x86 platforms" ;; esac - CFG_CMAKE_GENERATOR=$generator - putvar CFG_CMAKE_GENERATOR - fi - - if [ ${do_reconfigure} -ne 0 ] && [ ${is_msvc} -ne 0 ] - then - msg "configuring LLVM for $t with cmake" - - CMAKE_ARGS="-DLLVM_INCLUDE_TESTS=OFF" - if [ -n "$CFG_DISABLE_OPTIMIZE_LLVM" ]; then - CMAKE_ARGS="$CMAKE_ARGS -DCMAKE_BUILD_TYPE=Debug" - else - CMAKE_ARGS="$CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release" - fi - if [ -z "$CFG_ENABLE_LLVM_ASSERTIONS" ] - then - CMAKE_ARGS="$CMAKE_ARGS -DLLVM_ENABLE_ASSERTIONS=OFF" - else - CMAKE_ARGS="$CMAKE_ARGS -DLLVM_ENABLE_ASSERTIONS=ON" - fi - - msg "configuring LLVM with:" - msg "$CMAKE_ARGS" - - (cd $LLVM_BUILD_DIR && "$CFG_CMAKE" $CFG_LLVM_SRC_DIR \ - -G "$CFG_CMAKE_GENERATOR" \ - $CMAKE_ARGS) - need_ok "LLVM cmake configure failed" + else + generator="Unix Makefiles" fi - - if [ ${do_reconfigure} -ne 0 ] && [ ${is_msvc} -eq 0 ] - then - # LLVM's configure doesn't recognize the new Windows triples yet - gnu_t=$(to_gnu_triple $t) - - msg "configuring LLVM for $gnu_t" - - LLVM_TARGETS="--enable-targets=x86,x86_64,arm,aarch64,mips,powerpc" - LLVM_BUILD="--build=$gnu_t" - LLVM_HOST="--host=$gnu_t" - LLVM_TARGET="--target=$gnu_t" - - # Disable unused LLVM features - LLVM_OPTS="$LLVM_DBG_OPTS $LLVM_ASSERTION_OPTS --disable-docs --enable-bindings=none" - # Disable term-info, linkage of which comes in multiple forms, - # making our snapshots incompatible (#9334) - LLVM_OPTS="$LLVM_OPTS --disable-terminfo" - # Try to have LLVM pull in as few dependencies as possible (#9397) - LLVM_OPTS="$LLVM_OPTS --disable-zlib --disable-libffi" - - # Use win32 native thread/lock apis instead of pthread wrapper. - # (llvm's configure tries to find pthread first, so we have to disable it explicitly.) - # Also note that pthreads works badly on mingw-w64 systems: #8996 - case "$CFG_BUILD" in - (*-windows-gnu) - LLVM_OPTS="$LLVM_OPTS --disable-pthreads" - ;; - esac - - case "$CFG_CC" in - ("ccache clang") - LLVM_CXX_32="ccache clang++ -Qunused-arguments" - LLVM_CC_32="ccache clang -Qunused-arguments" - - LLVM_CXX_64="ccache clang++ -Qunused-arguments" - LLVM_CC_64="ccache clang -Qunused-arguments" + CFG_CMAKE_GENERATOR=$generator + putvar CFG_CMAKE_GENERATOR + + msg "configuring LLVM for $t" + + LLVM_CFLAGS_32="" + LLVM_CXXFLAGS_32="" + LLVM_LDFLAGS_32="" + LLVM_CFLAGS_64="" + LLVM_CXXFLAGS_64="" + LLVM_LDFLAGS_64="" + + case "$CFG_CC" in + ("ccache clang") + LLVM_CXX_32="ccache" + LLVM_CC_32="ccache" + LLVM_CXX_32_ARG1="clang++" + LLVM_CC_32_ARG1="clang" + LLVM_CFLAGS_32="-Qunused-arguments" + LLVM_CXXFLAGS_32="-Qunused-arguments" + + LLVM_CXX_64="ccache" + LLVM_CC_64="ccache" + LLVM_CXX_64_ARG1="clang++" + LLVM_CC_64_ARG1="clang" + LLVM_CFLAGS_64="-Qunused-arguments" + LLVM_CXXFLAGS_64="-Qunused-arguments" ;; - ("clang") - LLVM_CXX_32="clang++ -Qunused-arguments" - LLVM_CC_32="clang -Qunused-arguments" - - LLVM_CXX_64="clang++ -Qunused-arguments" - LLVM_CC_64="clang -Qunused-arguments" + ("clang") + LLVM_CXX_32="clang++" + LLVM_CC_32="clang" + LLVM_CFLAGS_32="-Qunused-arguments" + LLVM_CXXFLAGS_32="-Qunused-arguments" + + LLVM_CXX_64="clang++" + LLVM_CC_64="clang" + LLVM_CFLAGS_64="-Qunused-arguments" + LLVM_CXXFLAGS_64="-Qunused-arguments" ;; - ("ccache gcc") - LLVM_CXX_32="ccache g++" - LLVM_CC_32="ccache gcc" - - LLVM_CXX_64="ccache g++" - LLVM_CC_64="ccache gcc" + ("ccache gcc") + LLVM_CXX_32="ccache" + LLVM_CC_32="ccache" + LLVM_CXX_32_ARG1="g++" + LLVM_CC_32_ARG1="gcc" + + LLVM_CXX_64="ccache" + LLVM_CC_64="ccache" + LLVM_CXX_64_ARG1="g++" + LLVM_CC_64_ARG1="gcc" ;; - ("gcc") + ("gcc") LLVM_CXX_32="g++" LLVM_CC_32="gcc" @@ -1630,7 +1681,7 @@ do LLVM_CC_64="gcc" ;; - (*) + (*) msg "inferring LLVM_CXX/CC from CXX/CC = $CXX/$CC" if [ -n "$CFG_ENABLE_CCACHE" ] then @@ -1639,11 +1690,15 @@ do err "ccache requested but not found" fi - LLVM_CXX_32="ccache $CXX" - LLVM_CC_32="ccache $CC" + LLVM_CXX_32="ccache" + LLVM_CC_32="ccache" + LLVM_CXX_32_ARG1="$CXX" + LLVM_CC_32_ARG1="$CC" - LLVM_CXX_64="ccache $CXX" - LLVM_CC_64="ccache $CC" + LLVM_CXX_64="ccache" + LLVM_CC_64="ccache" + LLVM_CXX_64_ARG1="$CXX" + LLVM_CC_64_ARG1="$CC" else LLVM_CXX_32="$CXX" LLVM_CC_32="$CC" @@ -1653,86 +1708,104 @@ do fi ;; - esac + esac - case "$CFG_CPUTYPE" in - (x86*) - LLVM_CXX_32="$LLVM_CXX_32 -m32" - LLVM_CC_32="$LLVM_CC_32 -m32" + case "$CFG_CPUTYPE" in + (x86*) + LLVM_CFLAGS_32="$LLVM_CFLAGS_32 -m32" + LLVM_CXXFLAGS_32="$LLVM_CXXFLAGS_32 -m32" + LLVM_LDFLAGS_32="$LLVM_LDFLAGS_32 -m32" + ;; + esac - LLVM_CFLAGS_32="-m32" - LLVM_CXXFLAGS_32="-m32" - LLVM_LDFLAGS_32="-m32" + if echo $t | grep -q x86_64 + then + LLVM_CXX=$LLVM_CXX_64 + LLVM_CC=$LLVM_CC_64 + LLVM_CXX_ARG1=$LLVM_CXX_64_ARG1 + LLVM_CC_ARG1=$LLVM_CC_64_ARG1 + LLVM_CFLAGS=$LLVM_CFLAGS_64 + LLVM_CXXFLAGS=$LLVM_CXXFLAGS_64 + LLVM_LDFLAGS=$LLVM_LDFLAGS_64 + else + LLVM_CXX=$LLVM_CXX_32 + LLVM_CC=$LLVM_CC_32 + LLVM_CXX_ARG1=$LLVM_CXX_32_ARG1 + LLVM_CC_ARG1=$LLVM_CC_32_ARG1 + LLVM_CFLAGS=$LLVM_CFLAGS_32 + LLVM_CXXFLAGS=$LLVM_CXXFLAGS_32 + LLVM_LDFLAGS=$LLVM_LDFLAGS_32 + fi - LLVM_CFLAGS_64="" - LLVM_CXXFLAGS_64="" - LLVM_LDFLAGS_64="" + if [ "$CFG_USING_LIBCPP" != "0" ]; then + CMAKE_ARGS="$CMAKE_ARGS -DLLVM_ENABLE_LIBCXX=ON" + fi - LLVM_CXX_32="$LLVM_CXX_32 -m32" - LLVM_CC_32="$LLVM_CC_32 -m32" - ;; + # Turn off things we don't need + CMAKE_ARGS="$CMAKE_ARGS -DLLVM_INCLUDE_TESTS=OFF" + CMAKE_ARGS="$CMAKE_ARGS -DLLVM_INCLUDE_EXAMPLES=OFF" + CMAKE_ARGS="$CMAKE_ARGS -DLLVM_INCLUDE_DOCS=OFF" + CMAKE_ARGS="$CMAKE_ARGS -DLLVM_ENABLE_ZLIB=OFF" + CMAKE_ARGS="$CMAKE_ARGS -DWITH_POLY=OFF" + CMAKE_ARGS="$CMAKE_ARGS -DLLVM_ENABLE_TERMINFO=OFF" + CMAKE_ARGS="$CMAKE_ARGS -DLLVM_ENABLE_LIBEDIT=OFF" - (*) - LLVM_CFLAGS_32="" - LLVM_CXXFLAGS_32="" - LLVM_LDFLAGS_32="" + arch="$(echo "$t" | cut -d - -f 1)" - LLVM_CFLAGS_64="" - LLVM_CXXFLAGS_64="" - LLVM_LDFLAGS_64="" - ;; - esac + if [ "$arch" = i686 ]; then + CMAKE_ARGS="$CMAKE_ARGS -DLLVM_BUILD_32_BITS=ON" + fi - if echo $t | grep -q x86_64 - then - LLVM_CXX=$LLVM_CXX_64 - LLVM_CC=$LLVM_CC_64 - LLVM_CFLAGS=$LLVM_CFLAGS_64 - LLVM_CXXFLAGS=$LLVM_CXXFLAGS_64 - LLVM_LDFLAGS=$LLVM_LDFLAGS_64 - else - LLVM_CXX=$LLVM_CXX_32 - LLVM_CC=$LLVM_CC_32 - LLVM_CFLAGS=$LLVM_CFLAGS_32 - LLVM_CXXFLAGS=$LLVM_CXXFLAGS_32 - LLVM_LDFLAGS=$LLVM_LDFLAGS_32 + if [ "$t" != "$CFG_BUILD" ]; then + # see http://llvm.org/docs/HowToCrossCompileLLVM.html + CMAKE_ARGS="$CMAKE_ARGS -DCMAKE_CROSSCOMPILING=True" + CMAKE_ARGS="$CMAKE_ARGS -DLLVM_TARGET_ARCH=$arch" + CMAKE_ARGS="$CMAKE_ARGS -DLLVM_TABLEGEN=$CFG_BUILD_DIR/$CFG_BUILD/llvm/bin/llvm-tblgen" + CMAKE_ARGS="$CMAKE_ARGS -DLLVM_DEFAULT_TARGET_TRIPLE=$t" + fi + + # MSVC handles compiler business itself + if [ ${is_msvc} -eq 0 ]; then + CMAKE_ARGS="$CMAKE_ARGS -DCMAKE_C_COMPILER=$LLVM_CC" + CMAKE_ARGS="$CMAKE_ARGS -DCMAKE_CXX_COMPILER=$LLVM_CXX" + CMAKE_ARGS="$CMAKE_ARGS '-DCMAKE_C_FLAGS=$LLVM_CFLAGS'" + CMAKE_ARGS="$CMAKE_ARGS '-DCMAKE_CXX_FLAGS=$LLVM_CXXFLAGS'" + if [ -n "$LLVM_CC_ARG1" ]; then + CMAKE_ARGS="$CMAKE_ARGS -DCMAKE_C_COMPILER_ARG1=$LLVM_CC_ARG1" + fi + if [ -n "$LLVM_CXX_ARG1" ]; then + CMAKE_ARGS="$CMAKE_ARGS -DCMAKE_CXX_COMPILER_ARG1=$LLVM_CXX_ARG1" fi + # FIXME: What about LDFLAGS? + fi - CXX=$LLVM_CXX - CC=$LLVM_CC - CFLAGS="$CFLAGS $LLVM_CFLAGS" - CXXFLAGS="$CXXFLAGS $LLVM_CXXFLAGS" - LDFLAGS="$LDFLAGS $LLVM_LDFLAGS" + if [ -n "$CFG_DISABLE_OPTIMIZE_LLVM" ]; then + CMAKE_ARGS="$CMAKE_ARGS -DCMAKE_BUILD_TYPE=Debug" + elif [ -n "$CFG_ENABLE_LLVM_RELEASE_DEBUGINFO" ]; then + CMAKE_ARGS="$CMAKE_ARGS -DCMAKE_BUILD_TYPE=RelWithDebInfo" + else + CMAKE_ARGS="$CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release" + fi + if [ -z "$CFG_ENABLE_LLVM_ASSERTIONS" ] + then + CMAKE_ARGS="$CMAKE_ARGS -DLLVM_ENABLE_ASSERTIONS=OFF" + else + CMAKE_ARGS="$CMAKE_ARGS -DLLVM_ENABLE_ASSERTIONS=ON" + fi - if [ "$CFG_USING_LIBCPP" != "0" ]; then - LLVM_OPTS="$LLVM_OPTS --enable-libcpp" - fi + CMAKE_ARGS="$CMAKE_ARGS -DLLVM_TARGETS_TO_BUILD='X86;ARM;AArch64;Mips;PowerPC;SystemZ;JSBackend;MSP430'" + CMAKE_ARGS="$CMAKE_ARGS -G '$CFG_CMAKE_GENERATOR'" + CMAKE_ARGS="$CMAKE_ARGS $CFG_LLVM_SRC_DIR" - LLVM_FLAGS="$LLVM_TARGETS $LLVM_OPTS $LLVM_BUILD \ - $LLVM_HOST $LLVM_TARGET --with-python=$CFG_PYTHON" + if [ ${do_reconfigure} -ne 0 ] + then + msg "configuring LLVM for $t with cmake" msg "configuring LLVM with:" - msg "$LLVM_FLAGS" - - export CXX - export CC - export CFLAGS - export CXXFLAGS - export LDFLAGS - - cd $LLVM_BUILD_DIR - case $CFG_SRC_DIR in - /* | [a-z]:* | [A-Z]:*) - ${CFG_LLVM_SRC_DIR}configure $LLVM_FLAGS - ;; - *) - ${CFG_BUILD_DIR}${CFG_LLVM_SRC_DIR}configure \ - $LLVM_FLAGS - ;; - esac - need_ok "LLVM configure failed" + msg "$CMAKE_ARGS" - cd $CFG_BUILD_DIR + (cd $LLVM_BUILD_DIR && eval "\"$CFG_CMAKE\"" $CMAKE_ARGS) + need_ok "LLVM cmake configure failed" fi # Construct variables for LLVM build and install directories for @@ -1763,9 +1836,11 @@ putvar CFG_LIBDIR_RELATIVE putvar CFG_DISABLE_MANAGE_SUBMODULES putvar CFG_AARCH64_LINUX_ANDROID_NDK putvar CFG_ARM_LINUX_ANDROIDEABI_NDK +putvar CFG_ARMV7_LINUX_ANDROIDEABI_NDK putvar CFG_I686_LINUX_ANDROID_NDK putvar CFG_NACL_CROSS_PATH putvar CFG_MANDIR +putvar CFG_DOCDIR putvar CFG_USING_LIBCPP # Avoid spurious warnings from clang by feeding it original source on @@ -1793,8 +1868,15 @@ do putvar $CFG_LLVM_INST_DIR done +if [ -n "$CFG_ENABLE_RUSTBUILD" ] +then + INPUT_MAKEFILE=src/bootstrap/mk/Makefile.in +else + INPUT_MAKEFILE=Makefile.in +fi + msg -copy_if_changed ${CFG_SRC_DIR}Makefile.in ./Makefile +copy_if_changed ${CFG_SRC_DIR}${INPUT_MAKEFILE} ./Makefile move_if_changed config.tmp config.mk rm -f config.tmp touch config.stamp diff --git a/man/rustc.1 b/man/rustc.1 index f383f51052f95..1656255956191 100644 --- a/man/rustc.1 +++ b/man/rustc.1 @@ -1,4 +1,4 @@ -.TH RUSTC "1" "August 2015" "rustc 1.2.0" "User Commands" +.TH RUSTC "1" "September 2016" "rustc 1.13.0" "User Commands" .SH NAME rustc \- The Rust compiler .SH SYNOPSIS @@ -6,9 +6,7 @@ rustc \- The Rust compiler [\fIOPTIONS\fR] \fIINPUT\fR .SH DESCRIPTION -This program is a compiler for the Rust language, available at -.UR https://www.rust\-lang.org -.UE . +This program is a compiler for the Rust language, available at https://www.rust\-lang.org. .SH OPTIONS @@ -46,17 +44,16 @@ The optional \fIKIND\fR can be one of \fIstatic\fR, \fIdylib\fR, or \fIframework\fR. If omitted, \fIdylib\fR is assumed. .TP -\fB\-\-crate\-type\fR [bin|lib|rlib|dylib|staticlib] +\fB\-\-crate\-type\fR [bin|lib|rlib|dylib|cdylib|staticlib] Comma separated list of types of crates for the compiler to emit. .TP \fB\-\-crate\-name\fR \fINAME\fR Specify the name of the crate being built. .TP \fB\-\-emit\fR [asm|llvm\-bc|llvm\-ir|obj|link|dep\-info][=\fIPATH\fR] -Configure the output that \fBrustc\fR will produce. - -Each emission may also have an optional explicit output \fIPATH\fR specified for that particular -emission kind. This path takes precedence over the \fB-o\fR option. +Configure the output that \fBrustc\fR will produce. Each emission may also have +an optional explicit output \fIPATH\fR specified for that particular emission +kind. This path takes precedence over the \fB-o\fR option. .TP \fB\-\-print\fR [crate\-name|file\-names|sysroot] Comma separated list of compiler information to print on stdout. @@ -68,13 +65,11 @@ Equivalent to \fI\-C\ debuginfo=2\fR. Equivalent to \fI\-C\ opt\-level=2\fR. .TP \fB\-o\fR \fIFILENAME\fR -Write output to \fIFILENAME\fR. -Ignored if multiple \fI\-\-emit\fR outputs are specified which don't have an -explicit path otherwise. +Write output to \fIFILENAME\fR. Ignored if multiple \fI\-\-emit\fR outputs are specified which +don't have an explicit path otherwise. .TP \fB\-\-out\-dir\fR \fIDIR\fR -Write output to compiler\[hy]chosen filename in \fIDIR\fR. -Ignored if \fI\-o\fR is specified. +Write output to compiler\[hy]chosen filename in \fIDIR\fR. Ignored if \fI\-o\fR is specified. Defaults to the current directory. .TP \fB\-\-explain\fR \fIOPT\fR @@ -83,12 +78,26 @@ Provide a detailed explanation of an error message. \fB\-\-test\fR Build a test harness. .TP -\fB\-\-target\fR \fITRIPLE\fR -Target triple \fIcpu\fR\-\fImanufacturer\fR\-\fIkernel\fR[\-\fIos\fR] -to compile for (see chapter 3.4 of -.UR http://www.sourceware.org/autobook/ -.UE -for details). +\fB\-\-target\fR \fITARGET\fR +Target triple for which the code is compiled. This option defaults to the host’s target +triple. The target triple has the general format \-\-\-, where: +.RS +.TP +.B +x86, arm, thumb, mips, etc. +.TP +.B +for example on ARM: v5, v6m, v7a, v7m, etc. +.TP +.B +pc, apple, nvidia, ibm, etc. +.TP +.B +none, linux, win32, darwin, cuda, etc. +.TP +.B +eabi, gnu, android, macho, elf, etc. +.RE .TP \fB\-W help\fR Print 'lint' options and default settings. @@ -255,11 +264,12 @@ which link to the standard library. .TP \fBRUST_TEST_THREADS\fR The test framework Rust provides executes tests in parallel. This variable sets -the maximum number of threads used for this purpose. +the maximum number of threads used for this purpose. This setting is overridden +by the --test-threads option. .TP \fBRUST_TEST_NOCAPTURE\fR -A synonym for the --nocapture flag. +If set to a value other than "0", a synonym for the --nocapture flag. .TP \fBRUST_MIN_STACK\fR @@ -267,7 +277,7 @@ Sets the minimum stack size for new threads. .TP \fBRUST_BACKTRACE\fR -If set, produces a backtrace in the output of a program which panics. +If set to a value different than "0", produces a backtrace in the output of a program which panics. .SH "EXAMPLES" To build an executable from a source file with a main function: @@ -287,13 +297,10 @@ To build an executable with debug info: .BR rustdoc (1) .SH "BUGS" -See -.UR https://github.com/rust\-lang/rust/issues -.UE -for issues. +See https://github.com/rust\-lang/rust/issues for issues. .SH "AUTHOR" -See \fIAUTHORS.txt\fR in the Rust source distribution. +See https://github.com/rust\-lang/rust/graphs/contributors or use `git log --all --format='%cN <%cE>' | sort -u` in the rust source distribution. .SH "COPYRIGHT" This work is dual\[hy]licensed under Apache\ 2.0 and MIT terms. diff --git a/man/rustdoc.1 b/man/rustdoc.1 index b710c2c3a2560..4d885bd14363f 100644 --- a/man/rustdoc.1 +++ b/man/rustdoc.1 @@ -1,4 +1,4 @@ -.TH RUSTDOC "1" "August 2015" "rustdoc 1.2.0" "User Commands" +.TH RUSTDOC "1" "September 2016" "rustdoc 1.13.0" "User Commands" .SH NAME rustdoc \- generate documentation from Rust source code .SH SYNOPSIS @@ -8,10 +8,8 @@ rustdoc \- generate documentation from Rust source code .SH DESCRIPTION This tool generates API reference documentation by extracting comments from source code written in the Rust language, available at -.UR https://www.rust\-lang.org -.UE . -It accepts several input formats and provides several output formats -for the generated documentation. +<\fBhttps://www.rust-lang.org\fR>. It accepts several input formats and +provides several output formats for the generated documentation. .SH OPTIONS @@ -131,9 +129,7 @@ The generated HTML can be viewed with any standard web browser. .BR rustc (1) .SH "BUGS" -See -.UR https://github.com/rust\-lang/rust/issues -.UE +See <\fBhttps://github.com/rust\-lang/rust/issues\fR> for issues. .SH "AUTHOR" diff --git a/mk/cfg/aarch64-apple-ios.mk b/mk/cfg/aarch64-apple-ios.mk index 8cd09fa9043c8..5d822f1b1aba6 100644 --- a/mk/cfg/aarch64-apple-ios.mk +++ b/mk/cfg/aarch64-apple-ios.mk @@ -17,7 +17,7 @@ CFG_STATIC_LIB_NAME_aarch64-apple-ios=lib$(1).a CFG_LIB_DSYM_GLOB_aarch64-apple-ios = lib$(1)-*.a.dSYM CFG_CFLAGS_aarch64-apple-ios := $(CFG_IOS_SDK_FLAGS_aarch64-apple-ios) CFG_JEMALLOC_CFLAGS_aarch64-apple-ios := $(CFG_IOS_SDK_FLAGS_aarch64-apple-ios) -CFG_GCCISH_CFLAGS_aarch64-apple-ios := -Wall -Werror -fPIC $(CFG_IOS_SDK_FLAGS_aarch64-apple-ios) +CFG_GCCISH_CFLAGS_aarch64-apple-ios := -fPIC $(CFG_IOS_SDK_FLAGS_aarch64-apple-ios) CFG_GCCISH_CXXFLAGS_aarch64-apple-ios := -fno-rtti $(CFG_IOS_SDK_FLAGS_aarch64-apple-ios) -I$(CFG_IOS_SDK_aarch64-apple-ios)/usr/include/c++/4.2.1 CFG_GCCISH_LINK_FLAGS_aarch64-apple-ios := -lpthread -syslibroot $(CFG_IOS_SDK_aarch64-apple-ios) -Wl,-no_compact_unwind CFG_GCCISH_DEF_FLAG_aarch64-apple-ios := -Wl,-exported_symbols_list, diff --git a/mk/cfg/aarch64-unknown-fuchsia.mk b/mk/cfg/aarch64-unknown-fuchsia.mk new file mode 100644 index 0000000000000..34aee77ae2107 --- /dev/null +++ b/mk/cfg/aarch64-unknown-fuchsia.mk @@ -0,0 +1 @@ +# rustbuild-only target diff --git a/mk/cfg/arm-unknown-linux-gnueabi.mk b/mk/cfg/arm-unknown-linux-gnueabi.mk index 9244cc43650fe..f66ad04eefe8e 100644 --- a/mk/cfg/arm-unknown-linux-gnueabi.mk +++ b/mk/cfg/arm-unknown-linux-gnueabi.mk @@ -8,8 +8,8 @@ CFG_LIB_NAME_arm-unknown-linux-gnueabi=lib$(1).so CFG_STATIC_LIB_NAME_arm-unknown-linux-gnueabi=lib$(1).a CFG_LIB_GLOB_arm-unknown-linux-gnueabi=lib$(1)-*.so CFG_LIB_DSYM_GLOB_arm-unknown-linux-gnueabi=lib$(1)-*.dylib.dSYM -CFG_JEMALLOC_CFLAGS_arm-unknown-linux-gnueabi := -D__arm__ -mfloat-abi=soft $(CFLAGS) -CFG_GCCISH_CFLAGS_arm-unknown-linux-gnueabi := -Wall -g -fPIC -D__arm__ -mfloat-abi=soft $(CFLAGS) +CFG_JEMALLOC_CFLAGS_arm-unknown-linux-gnueabi := -D__arm__ -mfloat-abi=soft $(CFLAGS) -march=armv6 -marm +CFG_GCCISH_CFLAGS_arm-unknown-linux-gnueabi := -Wall -g -fPIC -D__arm__ -mfloat-abi=soft $(CFLAGS) -march=armv6 -marm CFG_GCCISH_CXXFLAGS_arm-unknown-linux-gnueabi := -fno-rtti $(CXXFLAGS) CFG_GCCISH_LINK_FLAGS_arm-unknown-linux-gnueabi := -shared -fPIC -g CFG_GCCISH_DEF_FLAG_arm-unknown-linux-gnueabi := -Wl,--export-dynamic,--dynamic-list= diff --git a/mk/cfg/arm-unknown-linux-gnueabihf.mk b/mk/cfg/arm-unknown-linux-gnueabihf.mk index 0bd661ea00db2..defe0dc3e70eb 100644 --- a/mk/cfg/arm-unknown-linux-gnueabihf.mk +++ b/mk/cfg/arm-unknown-linux-gnueabihf.mk @@ -8,8 +8,8 @@ CFG_LIB_NAME_arm-unknown-linux-gnueabihf=lib$(1).so CFG_STATIC_LIB_NAME_arm-unknown-linux-gnueabihf=lib$(1).a CFG_LIB_GLOB_arm-unknown-linux-gnueabihf=lib$(1)-*.so CFG_LIB_DSYM_GLOB_arm-unknown-linux-gnueabihf=lib$(1)-*.dylib.dSYM -CFG_JEMALLOC_CFLAGS_arm-unknown-linux-gnueabihf := -D__arm__ $(CFLAGS) -CFG_GCCISH_CFLAGS_arm-unknown-linux-gnueabihf := -Wall -g -fPIC -D__arm__ $(CFLAGS) +CFG_JEMALLOC_CFLAGS_arm-unknown-linux-gnueabihf := -D__arm__ $(CFLAGS) -march=armv6 -marm +CFG_GCCISH_CFLAGS_arm-unknown-linux-gnueabihf := -Wall -g -fPIC -D__arm__ $(CFLAGS) -march=armv6 -marm CFG_GCCISH_CXXFLAGS_arm-unknown-linux-gnueabihf := -fno-rtti $(CXXFLAGS) CFG_GCCISH_LINK_FLAGS_arm-unknown-linux-gnueabihf := -shared -fPIC -g CFG_GCCISH_DEF_FLAG_arm-unknown-linux-gnueabihf := -Wl,--export-dynamic,--dynamic-list= diff --git a/mk/cfg/arm-unknown-linux-musleabi.mk b/mk/cfg/arm-unknown-linux-musleabi.mk new file mode 100644 index 0000000000000..4d1438d592ece --- /dev/null +++ b/mk/cfg/arm-unknown-linux-musleabi.mk @@ -0,0 +1,26 @@ +# arm-unknown-linux-musleabi configuration +CROSS_PREFIX_arm-unknown-linux-musleabi=arm-linux-musleabi- +CC_arm-unknown-linux-musleabi=gcc +CXX_arm-unknown-linux-musleabi=g++ +CPP_arm-unknown-linux-musleabi=gcc -E +AR_arm-unknown-linux-musleabi=ar +CFG_LIB_NAME_arm-unknown-linux-musleabi=lib$(1).so +CFG_STATIC_LIB_NAME_arm-unknown-linux-musleabi=lib$(1).a +CFG_LIB_GLOB_arm-unknown-linux-musleabi=lib$(1)-*.so +CFG_LIB_DSYM_GLOB_arm-unknown-linux-musleabi=lib$(1)-*.dylib.dSYM +CFG_JEMALLOC_CFLAGS_arm-unknown-linux-musleabi := -D__arm__ -mfloat-abi=soft $(CFLAGS) -march=armv6 -marm +CFG_GCCISH_CFLAGS_arm-unknown-linux-musleabi := -Wall -g -fPIC -D__arm__ -mfloat-abi=soft $(CFLAGS) -march=armv6 -marm +CFG_GCCISH_CXXFLAGS_arm-unknown-linux-musleabi := -fno-rtti $(CXXFLAGS) +CFG_GCCISH_LINK_FLAGS_arm-unknown-linux-musleabi := -shared -fPIC -g +CFG_GCCISH_DEF_FLAG_arm-unknown-linux-musleabi := -Wl,--export-dynamic,--dynamic-list= +CFG_LLC_FLAGS_arm-unknown-linux-musleabi := +CFG_INSTALL_NAME_arm-unknown-linux-musleabi = +CFG_EXE_SUFFIX_arm-unknown-linux-musleabi := +CFG_WINDOWSY_arm-unknown-linux-musleabi := +CFG_UNIXY_arm-unknown-linux-musleabi := 1 +CFG_LDPATH_arm-unknown-linux-musleabi := +CFG_RUN_arm-unknown-linux-musleabi=$(2) +CFG_RUN_TARG_arm-unknown-linux-musleabi=$(call CFG_RUN_arm-unknown-linux-musleabi,,$(2)) +RUSTC_FLAGS_arm-unknown-linux-musleabi := +RUSTC_CROSS_FLAGS_arm-unknown-linux-musleabi := +CFG_GNU_TRIPLE_arm-unknown-linux-musleabi := arm-unknown-linux-musleabi diff --git a/mk/cfg/arm-unknown-linux-musleabihf.mk b/mk/cfg/arm-unknown-linux-musleabihf.mk new file mode 100644 index 0000000000000..8120250150d43 --- /dev/null +++ b/mk/cfg/arm-unknown-linux-musleabihf.mk @@ -0,0 +1,3 @@ +# This file is intentially left empty to indicate that, while this target is +# supported, it's not supported using plain GNU Make builds. Use a --rustbuild +# instead. \ No newline at end of file diff --git a/mk/cfg/armv5te-unknown-linux-gnueabi.mk b/mk/cfg/armv5te-unknown-linux-gnueabi.mk new file mode 100644 index 0000000000000..98567a03c28a9 --- /dev/null +++ b/mk/cfg/armv5te-unknown-linux-gnueabi.mk @@ -0,0 +1,26 @@ +# armv5-unknown-linux-gnueabi configuration +CROSS_PREFIX_armv5te-unknown-linux-gnueabi=arm-linux-gnueabi- +CC_armv5te-unknown-linux-gnueabi=gcc +CXX_armv5te-unknown-linux-gnueabi=g++ +CPP_armv5te-unknown-linux-gnueabi=gcc -E +AR_armv5te-unknown-linux-gnueabi=ar +CFG_LIB_NAME_armv5te-unknown-linux-gnueabi=lib$(1).so +CFG_STATIC_LIB_NAME_armv5te-unknown-linux-gnueabi=lib$(1).a +CFG_LIB_GLOB_armv5te-unknown-linux-gnueabi=lib$(1)-*.so +CFG_LIB_DSYM_GLOB_armv5te-unknown-linux-gnueabi=lib$(1)-*.dylib.dSYM +CFG_JEMALLOC_CFLAGS_armv5te-unknown-linux-gnueabi := -D__arm__ -mfloat-abi=soft $(CFLAGS) -march=armv5te -marm +CFG_GCCISH_CFLAGS_armv5te-unknown-linux-gnueabi := -Wall -g -fPIC -D__arm__ -mfloat-abi=soft $(CFLAGS) -march=armv5te -marm +CFG_GCCISH_CXXFLAGS_armv5te-unknown-linux-gnueabi := -fno-rtti $(CXXFLAGS) +CFG_GCCISH_LINK_FLAGS_armv5te-unknown-linux-gnueabi := -shared -fPIC -g +CFG_GCCISH_DEF_FLAG_armv5te-unknown-linux-gnueabi := -Wl,--export-dynamic,--dynamic-list= +CFG_LLC_FLAGS_armv5te-unknown-linux-gnueabi := +CFG_INSTALL_NAME_ar,-unknown-linux-gnueabi = +CFG_EXE_SUFFIX_armv5te-unknown-linux-gnueabi := +CFG_WINDOWSY_armv5te-unknown-linux-gnueabi := +CFG_UNIXY_armv5te-unknown-linux-gnueabi := 1 +CFG_LDPATH_armv5te-unknown-linux-gnueabi := +CFG_RUN_armv5te-unknown-linux-gnueabi=$(2) +CFG_RUN_TARG_armv5te-unknown-linux-gnueabi=$(call CFG_RUN_armv5te-unknown-linux-gnueabi,,$(2)) +RUSTC_FLAGS_armv5te-unknown-linux-gnueabi := +RUSTC_CROSS_FLAGS_armv5te-unknown-linux-gnueabi := +CFG_GNU_TRIPLE_armv5te-unknown-linux-gnueabi := armv5te-unknown-linux-gnueabi diff --git a/mk/cfg/armv7-apple-ios.mk b/mk/cfg/armv7-apple-ios.mk index d4696976574e9..34ca4de6563e4 100644 --- a/mk/cfg/armv7-apple-ios.mk +++ b/mk/cfg/armv7-apple-ios.mk @@ -15,7 +15,7 @@ CFG_INSTALL_ONLY_RLIB_armv7-apple-ios = 1 CFG_STATIC_LIB_NAME_armv7-apple-ios=lib$(1).a CFG_LIB_DSYM_GLOB_armv7-apple-ios = lib$(1)-*.a.dSYM CFG_JEMALLOC_CFLAGS_armv7-apple-ios := -arch armv7 -mfpu=vfp3 $(CFG_IOS_SDK_FLAGS_armv7-apple-ios) -CFG_GCCISH_CFLAGS_armv7-apple-ios := -Wall -Werror -g -fPIC $(CFG_IOS_SDK_FLAGS_armv7-apple-ios) -mfpu=vfp3 -arch armv7 +CFG_GCCISH_CFLAGS_armv7-apple-ios := -g -fPIC $(CFG_IOS_SDK_FLAGS_armv7-apple-ios) -mfpu=vfp3 -arch armv7 CFG_GCCISH_CXXFLAGS_armv7-apple-ios := -fno-rtti $(CFG_IOS_SDK_FLAGS_armv7-apple-ios) -I$(CFG_IOS_SDK_armv7-apple-ios)/usr/include/c++/4.2.1 CFG_GCCISH_LINK_FLAGS_armv7-apple-ios := -lpthread -syslibroot $(CFG_IOS_SDK_armv7-apple-ios) -Wl,-no_compact_unwind CFG_GCCISH_DEF_FLAG_armv7-apple-ios := -Wl,-exported_symbols_list, diff --git a/mk/cfg/armv7-linux-androideabi.mk b/mk/cfg/armv7-linux-androideabi.mk new file mode 100644 index 0000000000000..e5bf2e4df7ab3 --- /dev/null +++ b/mk/cfg/armv7-linux-androideabi.mk @@ -0,0 +1,25 @@ +# armv7-linux-androideabi configuration +CC_armv7-linux-androideabi=$(CFG_ARMV7_LINUX_ANDROIDEABI_NDK)/bin/arm-linux-androideabi-gcc +CXX_armv7-linux-androideabi=$(CFG_ARMV7_LINUX_ANDROIDEABI_NDK)/bin/arm-linux-androideabi-g++ +CPP_armv7-linux-androideabi=$(CFG_ARMV7_LINUX_ANDROIDEABI_NDK)/bin/arm-linux-androideabi-gcc -E +AR_armv7-linux-androideabi=$(CFG_ARMV7_LINUX_ANDROIDEABI_NDK)/bin/arm-linux-androideabi-ar +CFG_LIB_NAME_armv7-linux-androideabi=lib$(1).so +CFG_STATIC_LIB_NAME_armv7-linux-androideabi=lib$(1).a +CFG_LIB_GLOB_armv7-linux-androideabi=lib$(1)-*.so +CFG_LIB_DSYM_GLOB_armv7-linux-androideabi=lib$(1)-*.dylib.dSYM +CFG_JEMALLOC_CFLAGS_armv7-linux-androideabi := -D__arm__ -DANDROID -D__ANDROID__ $(CFLAGS) +CFG_GCCISH_CFLAGS_armv7-linux-androideabi := -Wall -g -fPIC -D__arm__ -mfloat-abi=softfp -march=armv7-a -mfpu=vfpv3-d16 -DANDROID -D__ANDROID__ $(CFLAGS) +CFG_GCCISH_CXXFLAGS_armv7-linux-androideabi := -fno-rtti $(CXXFLAGS) +CFG_GCCISH_LINK_FLAGS_armv7-linux-androideabi := -shared -fPIC -ldl -g -lm -lsupc++ +CFG_GCCISH_DEF_FLAG_armv7-linux-androideabi := -Wl,--export-dynamic,--dynamic-list= +CFG_LLC_FLAGS_armv7-linux-androideabi := +CFG_INSTALL_NAME_armv7-linux-androideabi = +CFG_EXE_SUFFIX_armv7-linux-androideabi := +CFG_WINDOWSY_armv7-linux-androideabi := +CFG_UNIXY_armv7-linux-androideabi := 1 +CFG_LDPATH_armv7-linux-androideabi := +CFG_RUN_armv7-linux-androideabi= +CFG_RUN_TARG_armv7-linux-androideabi= +RUSTC_FLAGS_armv7-linux-androideabi := +RUSTC_CROSS_FLAGS_armv7-linux-androideabi := +CFG_GNU_TRIPLE_armv7-linux-androideabi := arm-linux-androideabi diff --git a/mk/cfg/armv7-unknown-linux-gnueabihf.mk b/mk/cfg/armv7-unknown-linux-gnueabihf.mk new file mode 100644 index 0000000000000..a8e39668ded5e --- /dev/null +++ b/mk/cfg/armv7-unknown-linux-gnueabihf.mk @@ -0,0 +1,26 @@ +# armv7-unknown-linux-gnueabihf configuration +CROSS_PREFIX_armv7-unknown-linux-gnueabihf=arm-linux-gnueabihf- +CC_armv7-unknown-linux-gnueabihf=gcc +CXX_armv7-unknown-linux-gnueabihf=g++ +CPP_armv7-unknown-linux-gnueabihf=gcc -E +AR_armv7-unknown-linux-gnueabihf=ar +CFG_LIB_NAME_armv7-unknown-linux-gnueabihf=lib$(1).so +CFG_STATIC_LIB_NAME_armv7-unknown-linux-gnueabihf=lib$(1).a +CFG_LIB_GLOB_armv7-unknown-linux-gnueabihf=lib$(1)-*.so +CFG_LIB_DSYM_GLOB_armv7-unknown-linux-gnueabihf=lib$(1)-*.dylib.dSYM +CFG_JEMALLOC_CFLAGS_armv7-unknown-linux-gnueabihf := -D__arm__ $(CFLAGS) -march=armv7-a +CFG_GCCISH_CFLAGS_armv7-unknown-linux-gnueabihf := -Wall -g -fPIC -D__arm__ $(CFLAGS) -march=armv7-a +CFG_GCCISH_CXXFLAGS_armv7-unknown-linux-gnueabihf := -fno-rtti $(CXXFLAGS) +CFG_GCCISH_LINK_FLAGS_armv7-unknown-linux-gnueabihf := -shared -fPIC -g +CFG_GCCISH_DEF_FLAG_armv7-unknown-linux-gnueabihf := -Wl,--export-dynamic,--dynamic-list= +CFG_LLC_FLAGS_armv7-unknown-linux-gnueabihf := +CFG_INSTALL_NAME_ar,-unknown-linux-gnueabihf = +CFG_EXE_SUFFIX_armv7-unknown-linux-gnueabihf := +CFG_WINDOWSY_armv7-unknown-linux-gnueabihf := +CFG_UNIXY_armv7-unknown-linux-gnueabihf := 1 +CFG_LDPATH_armv7-unknown-linux-gnueabihf := +CFG_RUN_armv7-unknown-linux-gnueabihf=$(2) +CFG_RUN_TARG_armv7-unknown-linux-gnueabihf=$(call CFG_RUN_armv7-unknown-linux-gnueabihf,,$(2)) +RUSTC_FLAGS_armv7-unknown-linux-gnueabihf := +RUSTC_CROSS_FLAGS_armv7-unknown-linux-gnueabihf := +CFG_GNU_TRIPLE_armv7-unknown-linux-gnueabihf := armv7-unknown-linux-gnueabihf diff --git a/mk/cfg/armv7-unknown-linux-musleabihf.mk b/mk/cfg/armv7-unknown-linux-musleabihf.mk new file mode 100644 index 0000000000000..8120250150d43 --- /dev/null +++ b/mk/cfg/armv7-unknown-linux-musleabihf.mk @@ -0,0 +1,3 @@ +# This file is intentially left empty to indicate that, while this target is +# supported, it's not supported using plain GNU Make builds. Use a --rustbuild +# instead. \ No newline at end of file diff --git a/mk/cfg/armv7s-apple-ios.mk b/mk/cfg/armv7s-apple-ios.mk index 96ca07648949f..6da7905a7003b 100644 --- a/mk/cfg/armv7s-apple-ios.mk +++ b/mk/cfg/armv7s-apple-ios.mk @@ -14,8 +14,8 @@ CFG_LIB_GLOB_armv7s-apple-ios = lib$(1)-*.a CFG_INSTALL_ONLY_RLIB_armv7s-apple-ios = 1 CFG_STATIC_LIB_NAME_armv7s-apple-ios=lib$(1).a CFG_LIB_DSYM_GLOB_armv7s-apple-ios = lib$(1)-*.a.dSYM -CFG_JEMALLOC_CFLAGS_armv7s-apple-ios := -arch armv7s -mfpu=vfp4 $(CFG_IOS_SDK_FLAGS_armv7s-apple-ios) -CFG_GCCISH_CFLAGS_armv7s-apple-ios := -Wall -Werror -g -fPIC $(CFG_IOS_SDK_FLAGS_armv7s-apple-ios) -mfpu=vfp4 -arch armv7s +CFG_JEMALLOC_CFLAGS_armv7s-apple-ios := -arch armv7s $(CFG_IOS_SDK_FLAGS_armv7s-apple-ios) +CFG_GCCISH_CFLAGS_armv7s-apple-ios := -g -fPIC $(CFG_IOS_SDK_FLAGS_armv7s-apple-ios) -arch armv7s CFG_GCCISH_CXXFLAGS_armv7s-apple-ios := -fno-rtti $(CFG_IOS_SDK_FLAGS_armv7s-apple-ios) -I$(CFG_IOS_SDK_armv7s-apple-ios)/usr/include/c++/4.2.1 CFG_GCCISH_LINK_FLAGS_armv7s-apple-ios := -lpthread -syslibroot $(CFG_IOS_SDK_armv7s-apple-ios) -Wl,-no_compact_unwind CFG_GCCISH_DEF_FLAG_armv7s-apple-ios := -Wl,-exported_symbols_list, diff --git a/mk/cfg/asmjs-unknown-emscripten.mk b/mk/cfg/asmjs-unknown-emscripten.mk new file mode 100644 index 0000000000000..a98a51b06b5d3 --- /dev/null +++ b/mk/cfg/asmjs-unknown-emscripten.mk @@ -0,0 +1,24 @@ +# asmjs-unknown-emscripten configuration +CC_asmjs-unknown-emscripten=emcc +CXX_asmjs-unknown-emscripten=em++ +CPP_asmjs-unknown-emscripten=$(CPP) +AR_asmjs-unknown-emscripten=emar +CFG_LIB_NAME_asmjs-unknown-emscripten=lib$(1).so +CFG_STATIC_LIB_NAME_asmjs-unknown-emscripten=lib$(1).a +CFG_LIB_GLOB_asmjs-unknown-emscripten=lib$(1)-*.so +CFG_LIB_DSYM_GLOB_asmjs-unknown-emscripten=lib$(1)-*.dylib.dSYM +CFG_JEMALLOC_CFLAGS_asmjs-unknown-emscripten := -m32 $(CFLAGS) +CFG_GCCISH_CFLAGS_asmjs-unknown-emscripten := -g -fPIC -m32 $(CFLAGS) +CFG_GCCISH_CXXFLAGS_asmjs-unknown-emscripten := -fno-rtti $(CXXFLAGS) +CFG_GCCISH_LINK_FLAGS_asmjs-unknown-emscripten := -shared -fPIC -ldl -pthread -lrt -g -m32 +CFG_GCCISH_DEF_FLAG_asmjs-unknown-emscripten := -Wl,--export-dynamic,--dynamic-list= +CFG_LLC_FLAGS_asmjs-unknown-emscripten := +CFG_INSTALL_NAME_asmjs-unknown-emscripten = +CFG_EXE_SUFFIX_asmjs-unknown-emscripten = +CFG_WINDOWSY_asmjs-unknown-emscripten := +CFG_UNIXY_asmjs-unknown-emscripten := 1 +CFG_LDPATH_asmjs-unknown-emscripten := +CFG_RUN_asmjs-unknown-emscripten=$(2) +CFG_RUN_TARG_asmjs-unknown-emscripten=$(call CFG_RUN_asmjs-unknown-emscripten,,$(2)) +CFG_GNU_TRIPLE_asmjs-unknown-emscripten := asmjs-unknown-emscripten +CFG_DISABLE_JEMALLOC_asmjs-unknown-emscripten := 1 diff --git a/mk/cfg/i386-apple-ios.mk b/mk/cfg/i386-apple-ios.mk index 373e2e3b65d15..bfb7fa281f242 100644 --- a/mk/cfg/i386-apple-ios.mk +++ b/mk/cfg/i386-apple-ios.mk @@ -14,7 +14,7 @@ CFG_LIB_GLOB_i386-apple-ios = lib$(1)-*.dylib CFG_INSTALL_ONLY_RLIB_i386-apple-ios = 1 CFG_STATIC_LIB_NAME_i386-apple-ios=lib$(1).a CFG_LIB_DSYM_GLOB_i386-apple-ios = lib$(1)-*.dylib.dSYM -CFG_GCCISH_CFLAGS_i386-apple-ios := -Wall -Werror -g -fPIC -m32 $(CFG_IOSSIM_FLAGS_i386-apple-ios) +CFG_GCCISH_CFLAGS_i386-apple-ios := -g -fPIC -m32 $(CFG_IOSSIM_FLAGS_i386-apple-ios) CFG_GCCISH_CXXFLAGS_i386-apple-ios := -fno-rtti $(CFG_IOSSIM_FLAGS_i386-apple-ios) -I$(CFG_IOSSIM_SDK_i386-apple-ios)/usr/include/c++/4.2.1 CFG_GCCISH_LINK_FLAGS_i386-apple-ios := -lpthread -m32 -Wl,-no_compact_unwind -m32 -Wl,-syslibroot $(CFG_IOSSIM_SDK_i386-apple-ios) CFG_GCCISH_DEF_FLAG_i386-apple-ios := -Wl,-exported_symbols_list, diff --git a/mk/cfg/i586-pc-windows-msvc.mk b/mk/cfg/i586-pc-windows-msvc.mk new file mode 100644 index 0000000000000..48f1ecec3a704 --- /dev/null +++ b/mk/cfg/i586-pc-windows-msvc.mk @@ -0,0 +1,28 @@ +# i586-pc-windows-msvc configuration +CC_i586-pc-windows-msvc=$(CFG_MSVC_CL_i386) +LINK_i586-pc-windows-msvc=$(CFG_MSVC_LINK_i386) +CXX_i586-pc-windows-msvc=$(CFG_MSVC_CL_i386) +CPP_i586-pc-windows-msvc=$(CFG_MSVC_CL_i386) +AR_i586-pc-windows-msvc=$(CFG_MSVC_LIB_i386) +CFG_LIB_NAME_i586-pc-windows-msvc=$(1).dll +CFG_STATIC_LIB_NAME_i586-pc-windows-msvc=$(1).lib +CFG_LIB_GLOB_i586-pc-windows-msvc=$(1)-*.{dll,lib} +CFG_LIB_DSYM_GLOB_i586-pc-windows-msvc=$(1)-*.dylib.dSYM +CFG_JEMALLOC_CFLAGS_i586-pc-windows-msvc := +CFG_GCCISH_CFLAGS_i586-pc-windows-msvc := -MD -arch:IA32 -nologo +CFG_GCCISH_CXXFLAGS_i586-pc-windows-msvc := -MD -arch:IA32 -nologo +CFG_GCCISH_LINK_FLAGS_i586-pc-windows-msvc := +CFG_GCCISH_DEF_FLAG_i586-pc-windows-msvc := +CFG_LLC_FLAGS_i586-pc-windows-msvc := +CFG_INSTALL_NAME_i586-pc-windows-msvc = +CFG_EXE_SUFFIX_i586-pc-windows-msvc := .exe +CFG_WINDOWSY_i586-pc-windows-msvc := 1 +CFG_UNIXY_i586-pc-windows-msvc := +CFG_LDPATH_i586-pc-windows-msvc := +CFG_RUN_i586-pc-windows-msvc=$(2) +CFG_RUN_TARG_i586-pc-windows-msvc=$(call CFG_RUN_i586-pc-windows-msvc,,$(2)) +CFG_GNU_TRIPLE_i586-pc-windows-msvc := i586-pc-win32 + +# Currently the build system is not configured to build jemalloc +# with MSVC, so we omit this optional dependency. +CFG_DISABLE_JEMALLOC_i586-pc-windows-msvc := 1 diff --git a/mk/cfg/i586-unknown-linux-gnu.mk b/mk/cfg/i586-unknown-linux-gnu.mk new file mode 100644 index 0000000000000..fa2909196dcf1 --- /dev/null +++ b/mk/cfg/i586-unknown-linux-gnu.mk @@ -0,0 +1,23 @@ +# i586-unknown-linux-gnu configuration +CC_i586-unknown-linux-gnu=$(CC) +CXX_i586-unknown-linux-gnu=$(CXX) +CPP_i586-unknown-linux-gnu=$(CPP) +AR_i586-unknown-linux-gnu=$(AR) +CFG_LIB_NAME_i586-unknown-linux-gnu=lib$(1).so +CFG_STATIC_LIB_NAME_i586-unknown-linux-gnu=lib$(1).a +CFG_LIB_GLOB_i586-unknown-linux-gnu=lib$(1)-*.so +CFG_LIB_DSYM_GLOB_i586-unknown-linux-gnu=lib$(1)-*.dylib.dSYM +CFG_JEMALLOC_CFLAGS_i586-unknown-linux-gnu := -m32 $(CFLAGS) -march=pentium -Wa,-mrelax-relocations=no +CFG_GCCISH_CFLAGS_i586-unknown-linux-gnu := -g -fPIC -m32 $(CFLAGS) -march=pentium -Wa,-mrelax-relocations=no +CFG_GCCISH_CXXFLAGS_i586-unknown-linux-gnu := -fno-rtti $(CXXFLAGS) -march=pentium +CFG_GCCISH_LINK_FLAGS_i586-unknown-linux-gnu := -shared -fPIC -ldl -pthread -lrt -g -m32 +CFG_GCCISH_DEF_FLAG_i586-unknown-linux-gnu := -Wl,--export-dynamic,--dynamic-list= +CFG_LLC_FLAGS_i586-unknown-linux-gnu := +CFG_INSTALL_NAME_i586-unknown-linux-gnu = +CFG_EXE_SUFFIX_i586-unknown-linux-gnu = +CFG_WINDOWSY_i586-unknown-linux-gnu := +CFG_UNIXY_i586-unknown-linux-gnu := 1 +CFG_LDPATH_i586-unknown-linux-gnu := +CFG_RUN_i586-unknown-linux-gnu=$(2) +CFG_RUN_TARG_i586-unknown-linux-gnu=$(call CFG_RUN_i586-unknown-linux-gnu,,$(2)) +CFG_GNU_TRIPLE_i586-unknown-linux-gnu := i586-unknown-linux-gnu diff --git a/mk/cfg/i686-apple-darwin.mk b/mk/cfg/i686-apple-darwin.mk index 7ebb492bb21fe..e4b3431e8b67f 100644 --- a/mk/cfg/i686-apple-darwin.mk +++ b/mk/cfg/i686-apple-darwin.mk @@ -8,7 +8,7 @@ CFG_STATIC_LIB_NAME_i686-apple-darwin=lib$(1).a CFG_LIB_GLOB_i686-apple-darwin=lib$(1)-*.dylib CFG_LIB_DSYM_GLOB_i686-apple-darwin=lib$(1)-*.dylib.dSYM CFG_JEMALLOC_CFLAGS_i686-apple-darwin := -m32 -arch i386 $(CFLAGS) -CFG_GCCISH_CFLAGS_i686-apple-darwin := -Wall -Werror -g -fPIC -m32 -arch i386 $(CFLAGS) +CFG_GCCISH_CFLAGS_i686-apple-darwin := -g -fPIC -m32 -arch i386 $(CFLAGS) CFG_GCCISH_CXXFLAGS_i686-apple-darwin := -fno-rtti $(CXXFLAGS) CFG_GCCISH_LINK_FLAGS_i686-apple-darwin := -dynamiclib -pthread -framework CoreServices -m32 CFG_GCCISH_DEF_FLAG_i686-apple-darwin := -Wl,-exported_symbols_list, diff --git a/mk/cfg/i686-pc-windows-gnu.mk b/mk/cfg/i686-pc-windows-gnu.mk index 3426b30aeeb85..50c2b8c98acd3 100644 --- a/mk/cfg/i686-pc-windows-gnu.mk +++ b/mk/cfg/i686-pc-windows-gnu.mk @@ -9,7 +9,7 @@ CFG_STATIC_LIB_NAME_i686-pc-windows-gnu=$(1).lib CFG_LIB_GLOB_i686-pc-windows-gnu=$(1)-*.dll CFG_LIB_DSYM_GLOB_i686-pc-windows-gnu=$(1)-*.dylib.dSYM CFG_JEMALLOC_CFLAGS_i686-pc-windows-gnu := -march=i686 -m32 -D_WIN32_WINNT=0x0600 -D__USE_MINGW_ANSI_STDIO=1 $(CFLAGS) -CFG_GCCISH_CFLAGS_i686-pc-windows-gnu := -Wall -Werror -g -m32 -D_WIN32_WINNT=0x0600 -D__USE_MINGW_ANSI_STDIO=1 $(CFLAGS) +CFG_GCCISH_CFLAGS_i686-pc-windows-gnu := -g -m32 -D_WIN32_WINNT=0x0600 -D__USE_MINGW_ANSI_STDIO=1 $(CFLAGS) CFG_GCCISH_CXXFLAGS_i686-pc-windows-gnu := -fno-rtti $(CXXFLAGS) CFG_GCCISH_LINK_FLAGS_i686-pc-windows-gnu := -shared -g -m32 CFG_GCCISH_DEF_FLAG_i686-pc-windows-gnu := diff --git a/mk/cfg/i686-pc-windows-msvc.mk b/mk/cfg/i686-pc-windows-msvc.mk index 4c8f110373483..b0289b9892e20 100644 --- a/mk/cfg/i686-pc-windows-msvc.mk +++ b/mk/cfg/i686-pc-windows-msvc.mk @@ -1,16 +1,16 @@ # i686-pc-windows-msvc configuration -CC_i686-pc-windows-msvc="$(CFG_MSVC_CL_i386)" -nologo -LINK_i686-pc-windows-msvc="$(CFG_MSVC_LINK_i386)" -nologo -CXX_i686-pc-windows-msvc="$(CFG_MSVC_CL_i386)" -nologo -CPP_i686-pc-windows-msvc="$(CFG_MSVC_CL_i386)" -nologo -AR_i686-pc-windows-msvc="$(CFG_MSVC_LIB_i386)" -nologo +CC_i686-pc-windows-msvc=$(CFG_MSVC_CL_i386) +LINK_i686-pc-windows-msvc=$(CFG_MSVC_LINK_i386) +CXX_i686-pc-windows-msvc=$(CFG_MSVC_CL_i386) +CPP_i686-pc-windows-msvc=$(CFG_MSVC_CL_i386) +AR_i686-pc-windows-msvc=$(CFG_MSVC_LIB_i386) CFG_LIB_NAME_i686-pc-windows-msvc=$(1).dll CFG_STATIC_LIB_NAME_i686-pc-windows-msvc=$(1).lib CFG_LIB_GLOB_i686-pc-windows-msvc=$(1)-*.{dll,lib} CFG_LIB_DSYM_GLOB_i686-pc-windows-msvc=$(1)-*.dylib.dSYM CFG_JEMALLOC_CFLAGS_i686-pc-windows-msvc := -CFG_GCCISH_CFLAGS_i686-pc-windows-msvc := -MD -CFG_GCCISH_CXXFLAGS_i686-pc-windows-msvc := -MD +CFG_GCCISH_CFLAGS_i686-pc-windows-msvc := -MD -nologo +CFG_GCCISH_CXXFLAGS_i686-pc-windows-msvc := -MD -nologo CFG_GCCISH_LINK_FLAGS_i686-pc-windows-msvc := CFG_GCCISH_DEF_FLAG_i686-pc-windows-msvc := CFG_LLC_FLAGS_i686-pc-windows-msvc := @@ -22,3 +22,7 @@ CFG_LDPATH_i686-pc-windows-msvc := CFG_RUN_i686-pc-windows-msvc=$(2) CFG_RUN_TARG_i686-pc-windows-msvc=$(call CFG_RUN_i686-pc-windows-msvc,,$(2)) CFG_GNU_TRIPLE_i686-pc-windows-msvc := i686-pc-win32 + +# Currently the build system is not configured to build jemalloc +# with MSVC, so we omit this optional dependency. +CFG_DISABLE_JEMALLOC_i686-pc-windows-msvc := 1 diff --git a/mk/cfg/i686-unknown-freebsd.mk b/mk/cfg/i686-unknown-freebsd.mk index bbc0c2d6f396e..a9d4446d5d49b 100644 --- a/mk/cfg/i686-unknown-freebsd.mk +++ b/mk/cfg/i686-unknown-freebsd.mk @@ -8,7 +8,7 @@ CFG_STATIC_LIB_NAME_i686-unknown-freebsd=lib$(1).a CFG_LIB_GLOB_i686-unknown-freebsd=lib$(1)-*.so CFG_LIB_DSYM_GLOB_i686-unknown-freebsd=$(1)-*.dylib.dSYM CFG_JEMALLOC_CFLAGS_i686-unknown-freebsd := -m32 -I/usr/local/include $(CFLAGS) -CFG_GCCISH_CFLAGS_i686-unknown-freebsd := -Wall -Werror -g -fPIC -m32 -arch i386 -I/usr/local/include $(CFLAGS) +CFG_GCCISH_CFLAGS_i686-unknown-freebsd := -g -fPIC -m32 -arch i386 -I/usr/local/include $(CFLAGS) CFG_GCCISH_LINK_FLAGS_i686-unknown-freebsd := -m32 -shared -fPIC -g -pthread -lrt CFG_GCCISH_DEF_FLAG_i686-unknown-freebsd := -Wl,--export-dynamic,--dynamic-list= CFG_LLC_FLAGS_i686-unknown-freebsd := diff --git a/mk/cfg/i686-unknown-haiku.mk b/mk/cfg/i686-unknown-haiku.mk new file mode 100644 index 0000000000000..cbacbff070e88 --- /dev/null +++ b/mk/cfg/i686-unknown-haiku.mk @@ -0,0 +1,27 @@ +# i686-unknown-haiku configuration +CROSS_PREFIX_i686-unknown-haiku=i586-pc-haiku- +CC_i686-unknown-haiku=$(CC) +CXX_i686-unknown-haiku=$(CXX) +CPP_i686-unknown-haiku=$(CPP) +AR_i686-unknown-haiku=$(AR) +CFG_LIB_NAME_i686-unknown-haiku=lib$(1).so +CFG_STATIC_LIB_NAME_i686-unknown-haiku=lib$(1).a +CFG_LIB_GLOB_i686-unknown-haiku=lib$(1)-*.so +CFG_LIB_DSYM_GLOB_i686-unknown-haiku=lib$(1)-*.dylib.dSYM +CFG_CFLAGS_i686-unknown-haiku := -m32 $(CFLAGS) +CFG_GCCISH_CFLAGS_i686-unknown-haiku := -Wall -Werror -g -fPIC -m32 $(CFLAGS) +CFG_GCCISH_CXXFLAGS_i686-unknown-haiku := -fno-rtti $(CXXFLAGS) +CFG_GCCISH_LINK_FLAGS_i686-unknown-haiku := -shared -fPIC -ldl -pthread -lrt -g -m32 +CFG_GCCISH_PRE_LIB_FLAGS_i686-unknown-haiku := -Wl,-whole-archive +CFG_GCCISH_POST_LIB_FLAGS_i686-unknown-haiku := -Wl,-no-whole-archive +CFG_DEF_SUFFIX_i686-unknown-haiku := .linux.def +CFG_LLC_FLAGS_i686-unknown-haiku := +CFG_INSTALL_NAME_i686-unknown-haiku = +CFG_EXE_SUFFIX_i686-unknown-haiku = +CFG_WINDOWSY_i686-unknown-haiku := +CFG_UNIXY_i686-unknown-haiku := 1 +CFG_PATH_MUNGE_i686-unknown-haiku := true +CFG_LDPATH_i686-unknown-haiku := +CFG_RUN_i686-unknown-haiku=$(2) +CFG_RUN_TARG_i686-unknown-haiku=$(call CFG_RUN_i686-unknown-haiku,,$(2)) +CFG_GNU_TRIPLE_i686-unknown-haiku := i686-unknown-haiku diff --git a/mk/cfg/i686-unknown-linux-gnu.mk b/mk/cfg/i686-unknown-linux-gnu.mk index 88c0907f63b2a..9e2312008a10a 100644 --- a/mk/cfg/i686-unknown-linux-gnu.mk +++ b/mk/cfg/i686-unknown-linux-gnu.mk @@ -8,7 +8,7 @@ CFG_STATIC_LIB_NAME_i686-unknown-linux-gnu=lib$(1).a CFG_LIB_GLOB_i686-unknown-linux-gnu=lib$(1)-*.so CFG_LIB_DSYM_GLOB_i686-unknown-linux-gnu=lib$(1)-*.dylib.dSYM CFG_JEMALLOC_CFLAGS_i686-unknown-linux-gnu := -m32 $(CFLAGS) -CFG_GCCISH_CFLAGS_i686-unknown-linux-gnu := -Wall -Werror -g -fPIC -m32 $(CFLAGS) +CFG_GCCISH_CFLAGS_i686-unknown-linux-gnu := -g -fPIC -m32 $(CFLAGS) -march=i686 CFG_GCCISH_CXXFLAGS_i686-unknown-linux-gnu := -fno-rtti $(CXXFLAGS) CFG_GCCISH_LINK_FLAGS_i686-unknown-linux-gnu := -shared -fPIC -ldl -pthread -lrt -g -m32 CFG_GCCISH_DEF_FLAG_i686-unknown-linux-gnu := -Wl,--export-dynamic,--dynamic-list= diff --git a/mk/cfg/i686-unknown-linux-musl.mk b/mk/cfg/i686-unknown-linux-musl.mk new file mode 100644 index 0000000000000..d6c1ce8967a20 --- /dev/null +++ b/mk/cfg/i686-unknown-linux-musl.mk @@ -0,0 +1,29 @@ +# i686-unknown-linux-musl configuration +CC_i686-unknown-linux-musl=$(CFG_MUSL_ROOT)/bin/musl-gcc +CXX_i686-unknown-linux-musl=$(CXX) +CPP_i686-unknown-linux-musl=$(CFG_MUSL_ROOT)/bin/musl-gcc -E +AR_i686-unknown-linux-musl=$(AR) +CFG_INSTALL_ONLY_RLIB_i686-unknown-linux-musl = 1 +CFG_LIB_NAME_i686-unknown-linux-musl=lib$(1).so +CFG_STATIC_LIB_NAME_i686-unknown-linux-musl=lib$(1).a +CFG_LIB_GLOB_i686-unknown-linux-musl=lib$(1)-*.so +CFG_JEMALLOC_CFLAGS_i686-unknown-linux-musl := -m32 -Wl,-melf_i386 -Wa,-mrelax-relocations=no +CFG_GCCISH_CFLAGS_i686-unknown-linux-musl := -g -fPIC -m32 -Wl,-melf_i386 -Wa,-mrelax-relocations=no +CFG_GCCISH_CXXFLAGS_i686-unknown-linux-musl := +CFG_GCCISH_LINK_FLAGS_i686-unknown-linux-musl := +CFG_GCCISH_DEF_FLAG_i686-unknown-linux-musl := +CFG_LLC_FLAGS_i686-unknown-linux-musl := +CFG_INSTALL_NAME_i686-unknown-linux-musl = +CFG_EXE_SUFFIX_i686-unknown-linux-musl = +CFG_WINDOWSY_i686-unknown-linux-musl := +CFG_UNIXY_i686-unknown-linux-musl := 1 +CFG_LDPATH_i686-unknown-linux-musl := +CFG_RUN_i686-unknown-linux-musl=$(2) +CFG_RUN_TARG_i686-unknown-linux-musl=$(call CFG_RUN_i686-unknown-linux-musl,,$(2)) +CFG_GNU_TRIPLE_i686-unknown-linux-musl := i686-unknown-linux-musl +CFG_THIRD_PARTY_OBJECTS_i686-unknown-linux-musl := crt1.o crti.o crtn.o +CFG_INSTALLED_OBJECTS_i686-unknown-linux-musl := crt1.o crti.o crtn.o + +NATIVE_DEPS_libc_T_i686-unknown-linux-musl += libc.a +NATIVE_DEPS_std_T_i686-unknown-linux-musl += crt1.o crti.o crtn.o +NATIVE_DEPS_unwind_T_i686-unknown-linux-musl += libunwind.a diff --git a/mk/cfg/mips-unknown-linux-gnu.mk b/mk/cfg/mips-unknown-linux-gnu.mk index 65b08774d49a0..0783a4c17a4f2 100644 --- a/mk/cfg/mips-unknown-linux-gnu.mk +++ b/mk/cfg/mips-unknown-linux-gnu.mk @@ -7,10 +7,10 @@ CFG_LIB_NAME_mips-unknown-linux-gnu=lib$(1).so CFG_STATIC_LIB_NAME_mips-unknown-linux-gnu=lib$(1).a CFG_LIB_GLOB_mips-unknown-linux-gnu=lib$(1)-*.so CFG_LIB_DSYM_GLOB_mips-unknown-linux-gnu=lib$(1)-*.dylib.dSYM -CFG_JEMALLOC_CFLAGS_mips-unknown-linux-gnu := -mips32r2 -msoft-float -mabi=32 $(CFLAGS) -CFG_GCCISH_CFLAGS_mips-unknown-linux-gnu := -Wall -g -fPIC -mips32r2 -msoft-float -mabi=32 $(CFLAGS) +CFG_JEMALLOC_CFLAGS_mips-unknown-linux-gnu := -mips32r2 -mabi=32 $(CFLAGS) +CFG_GCCISH_CFLAGS_mips-unknown-linux-gnu := -Wall -g -fPIC -mips32r2 -mabi=32 $(CFLAGS) CFG_GCCISH_CXXFLAGS_mips-unknown-linux-gnu := -fno-rtti $(CXXFLAGS) -CFG_GCCISH_LINK_FLAGS_mips-unknown-linux-gnu := -shared -fPIC -g -mips32r2 -msoft-float -mabi=32 +CFG_GCCISH_LINK_FLAGS_mips-unknown-linux-gnu := -shared -fPIC -g -mips32r2 -mabi=32 CFG_GCCISH_DEF_FLAG_mips-unknown-linux-gnu := -Wl,--export-dynamic,--dynamic-list= CFG_LLC_FLAGS_mips-unknown-linux-gnu := CFG_INSTALL_NAME_mips-unknown-linux-gnu = @@ -20,5 +20,5 @@ CFG_UNIXY_mips-unknown-linux-gnu := 1 CFG_LDPATH_mips-unknown-linux-gnu := CFG_RUN_mips-unknown-linux-gnu= CFG_RUN_TARG_mips-unknown-linux-gnu= -RUSTC_FLAGS_mips-unknown-linux-gnu := -C target-cpu=mips32r2 -C target-feature="+mips32r2" -C soft-float +RUSTC_FLAGS_mips-unknown-linux-gnu := CFG_GNU_TRIPLE_mips-unknown-linux-gnu := mips-unknown-linux-gnu diff --git a/mk/cfg/mips-unknown-linux-musl.mk b/mk/cfg/mips-unknown-linux-musl.mk new file mode 100644 index 0000000000000..33528b986f6e5 --- /dev/null +++ b/mk/cfg/mips-unknown-linux-musl.mk @@ -0,0 +1,24 @@ +# mips-unknown-linux-musl configuration +CC_mips-unknown-linux-musl=mips-linux-musl-gcc +CXX_mips-unknown-linux-musl=mips-linux-musl-g++ +CPP_mips-unknown-linux-musl=mips-linux-musl-gcc -E +AR_mips-unknown-linux-musl=mips-linux-musl-ar +CFG_LIB_NAME_mips-unknown-linux-musl=lib$(1).so +CFG_STATIC_LIB_NAME_mips-unknown-linux-musl=lib$(1).a +CFG_LIB_GLOB_mips-unknown-linux-musl=lib$(1)-*.so +CFG_LIB_DSYM_GLOB_mips-unknown-linux-musl=lib$(1)-*.dylib.dSYM +CFG_JEMALLOC_CFLAGS_mips-unknown-linux-musl := -mips32r2 -msoft-float -mabi=32 $(CFLAGS) +CFG_GCCISH_CFLAGS_mips-unknown-linux-musl := -Wall -g -fPIC -mips32r2 -msoft-float -mabi=32 $(CFLAGS) +CFG_GCCISH_CXXFLAGS_mips-unknown-linux-musl := -fno-rtti $(CXXFLAGS) +CFG_GCCISH_LINK_FLAGS_mips-unknown-linux-musl := -shared -fPIC -g -mips32r2 -msoft-float -mabi=32 +CFG_GCCISH_DEF_FLAG_mips-unknown-linux-musl := -Wl,--export-dynamic,--dynamic-list= +CFG_LLC_FLAGS_mips-unknown-linux-musl := +CFG_INSTALL_NAME_mips-unknown-linux-musl = +CFG_EXE_SUFFIX_mips-unknown-linux-musl = +CFG_WINDOWSY_mips-unknown-linux-musl := +CFG_UNIXY_mips-unknown-linux-musl := 1 +CFG_LDPATH_mips-unknown-linux-musl := +CFG_RUN_mips-unknown-linux-musl= +CFG_RUN_TARG_mips-unknown-linux-musl= +RUSTC_FLAGS_mips-unknown-linux-musl := +CFG_GNU_TRIPLE_mips-unknown-linux-musl := mips-unknown-linux-musl diff --git a/mk/cfg/mips-unknown-linux-uclibc.mk b/mk/cfg/mips-unknown-linux-uclibc.mk new file mode 100644 index 0000000000000..34aee77ae2107 --- /dev/null +++ b/mk/cfg/mips-unknown-linux-uclibc.mk @@ -0,0 +1 @@ +# rustbuild-only target diff --git a/mk/cfg/mips64-unknown-linux-gnuabi64.mk b/mk/cfg/mips64-unknown-linux-gnuabi64.mk new file mode 100644 index 0000000000000..34aee77ae2107 --- /dev/null +++ b/mk/cfg/mips64-unknown-linux-gnuabi64.mk @@ -0,0 +1 @@ +# rustbuild-only target diff --git a/mk/cfg/mips64el-unknown-linux-gnuabi64.mk b/mk/cfg/mips64el-unknown-linux-gnuabi64.mk new file mode 100644 index 0000000000000..34aee77ae2107 --- /dev/null +++ b/mk/cfg/mips64el-unknown-linux-gnuabi64.mk @@ -0,0 +1 @@ +# rustbuild-only target diff --git a/mk/cfg/mipsel-unknown-linux-gnu.mk b/mk/cfg/mipsel-unknown-linux-gnu.mk index 4dadfc275d3c5..f15a086b64e88 100644 --- a/mk/cfg/mipsel-unknown-linux-gnu.mk +++ b/mk/cfg/mipsel-unknown-linux-gnu.mk @@ -20,5 +20,5 @@ CFG_UNIXY_mipsel-unknown-linux-gnu := 1 CFG_LDPATH_mipsel-unknown-linux-gnu := CFG_RUN_mipsel-unknown-linux-gnu= CFG_RUN_TARG_mipsel-unknown-linux-gnu= -RUSTC_FLAGS_mipsel-unknown-linux-gnu := -C target-cpu=mips32 -C target-feature="+mips32" +RUSTC_FLAGS_mipsel-unknown-linux-gnu := CFG_GNU_TRIPLE_mipsel-unknown-linux-gnu := mipsel-unknown-linux-gnu diff --git a/mk/cfg/mipsel-unknown-linux-musl.mk b/mk/cfg/mipsel-unknown-linux-musl.mk new file mode 100644 index 0000000000000..db836b81c5fd5 --- /dev/null +++ b/mk/cfg/mipsel-unknown-linux-musl.mk @@ -0,0 +1,24 @@ +# mipsel-unknown-linux-musl configuration +CC_mipsel-unknown-linux-musl=mipsel-linux-musl-gcc +CXX_mipsel-unknown-linux-musl=mipsel-linux-musl-g++ +CPP_mipsel-unknown-linux-musl=mipsel-linux-musl-gcc +AR_mipsel-unknown-linux-musl=mipsel-linux-musl-ar +CFG_LIB_NAME_mipsel-unknown-linux-musl=lib$(1).so +CFG_STATIC_LIB_NAME_mipsel-unknown-linux-musl=lib$(1).a +CFG_LIB_GLOB_mipsel-unknown-linux-musl=lib$(1)-*.so +CFG_LIB_DSYM_GLOB_mipsel-unknown-linux-musl=lib$(1)-*.dylib.dSYM +CFG_JEMALLOC_CFLAGS_mipsel-unknown-linux-musl := -mips32 -mabi=32 $(CFLAGS) +CFG_GCCISH_CFLAGS_mipsel-unknown-linux-musl := -Wall -g -fPIC -mips32 -mabi=32 $(CFLAGS) +CFG_GCCISH_CXXFLAGS_mipsel-unknown-linux-musl := -fno-rtti $(CXXFLAGS) +CFG_GCCISH_LINK_FLAGS_mipsel-unknown-linux-musl := -shared -fPIC -g -mips32 +CFG_GCCISH_DEF_FLAG_mipsel-unknown-linux-musl := -Wl,--export-dynamic,--dynamic-list= +CFG_LLC_FLAGS_mipsel-unknown-linux-musl := +CFG_INSTALL_NAME_mipsel-unknown-linux-musl = +CFG_EXE_SUFFIX_mipsel-unknown-linux-musl := +CFG_WINDOWSY_mipsel-unknown-linux-musl := +CFG_UNIXY_mipsel-unknown-linux-musl := 1 +CFG_LDPATH_mipsel-unknown-linux-musl := +CFG_RUN_mipsel-unknown-linux-musl= +CFG_RUN_TARG_mipsel-unknown-linux-musl= +RUSTC_FLAGS_mipsel-unknown-linux-musl := +CFG_GNU_TRIPLE_mipsel-unknown-linux-musl := mipsel-unknown-linux-musl diff --git a/mk/cfg/mipsel-unknown-linux-uclibc.mk b/mk/cfg/mipsel-unknown-linux-uclibc.mk new file mode 100644 index 0000000000000..34aee77ae2107 --- /dev/null +++ b/mk/cfg/mipsel-unknown-linux-uclibc.mk @@ -0,0 +1 @@ +# rustbuild-only target diff --git a/mk/cfg/powerpc-unknown-linux-gnu.mk b/mk/cfg/powerpc-unknown-linux-gnu.mk index dda957673eba6..9c5720de4b310 100644 --- a/mk/cfg/powerpc-unknown-linux-gnu.mk +++ b/mk/cfg/powerpc-unknown-linux-gnu.mk @@ -9,7 +9,7 @@ CFG_STATIC_LIB_NAME_powerpc-unknown-linux-gnu=lib$(1).a CFG_LIB_GLOB_powerpc-unknown-linux-gnu=lib$(1)-*.so CFG_LIB_DSYM_GLOB_powerpc-unknown-linux-gnu=lib$(1)-*.dylib.dSYM CFG_CFLAGS_powerpc-unknown-linux-gnu := -m32 $(CFLAGS) -CFG_GCCISH_CFLAGS_powerpc-unknown-linux-gnu := -Wall -Werror -g -fPIC -m32 $(CFLAGS) +CFG_GCCISH_CFLAGS_powerpc-unknown-linux-gnu := -g -fPIC -m32 $(CFLAGS) CFG_GCCISH_CXXFLAGS_powerpc-unknown-linux-gnu := -fno-rtti $(CXXFLAGS) CFG_GCCISH_LINK_FLAGS_powerpc-unknown-linux-gnu := -shared -fPIC -ldl -pthread -lrt -g -m32 CFG_GCCISH_DEF_FLAG_powerpc-unknown-linux-gnu := -Wl,--export-dynamic,--dynamic-list= diff --git a/mk/cfg/powerpc64-unknown-linux-gnu.mk b/mk/cfg/powerpc64-unknown-linux-gnu.mk index a9e8585ad6db5..389bb6f0cab49 100644 --- a/mk/cfg/powerpc64-unknown-linux-gnu.mk +++ b/mk/cfg/powerpc64-unknown-linux-gnu.mk @@ -1,5 +1,5 @@ # powerpc64-unknown-linux-gnu configuration -CROSS_PREFIX_powerpc64-unknown-linux-gnu=powerpc64-linux-gnu- +CROSS_PREFIX_powerpc64-unknown-linux-gnu=powerpc-linux-gnu- CC_powerpc64-unknown-linux-gnu=$(CC) CXX_powerpc64-unknown-linux-gnu=$(CXX) CPP_powerpc64-unknown-linux-gnu=$(CPP) @@ -8,8 +8,9 @@ CFG_LIB_NAME_powerpc64-unknown-linux-gnu=lib$(1).so CFG_STATIC_LIB_NAME_powerpc64-unknown-linux-gnu=lib$(1).a CFG_LIB_GLOB_powerpc64-unknown-linux-gnu=lib$(1)-*.so CFG_LIB_DSYM_GLOB_powerpc64-unknown-linux-gnu=lib$(1)-*.dylib.dSYM +CFG_JEMALLOC_CFLAGS_powerpc64-unknown-linux-gnu := -m64 CFG_CFLAGS_powerpc64-unknown-linux-gnu := -m64 $(CFLAGS) -CFG_GCCISH_CFLAGS_powerpc64-unknown-linux-gnu := -Wall -Werror -g -fPIC -m64 $(CFLAGS) +CFG_GCCISH_CFLAGS_powerpc64-unknown-linux-gnu := -g -fPIC -m64 $(CFLAGS) CFG_GCCISH_CXXFLAGS_powerpc64-unknown-linux-gnu := -fno-rtti $(CXXFLAGS) CFG_GCCISH_LINK_FLAGS_powerpc64-unknown-linux-gnu := -shared -fPIC -ldl -pthread -lrt -g -m64 CFG_GCCISH_DEF_FLAG_powerpc64-unknown-linux-gnu := -Wl,--export-dynamic,--dynamic-list= diff --git a/mk/cfg/powerpc64le-unknown-linux-gnu.mk b/mk/cfg/powerpc64le-unknown-linux-gnu.mk index a2049331ab2e9..6884fa11e7412 100644 --- a/mk/cfg/powerpc64le-unknown-linux-gnu.mk +++ b/mk/cfg/powerpc64le-unknown-linux-gnu.mk @@ -9,7 +9,7 @@ CFG_STATIC_LIB_NAME_powerpc64le-unknown-linux-gnu=lib$(1).a CFG_LIB_GLOB_powerpc64le-unknown-linux-gnu=lib$(1)-*.so CFG_LIB_DSYM_GLOB_powerpc64le-unknown-linux-gnu=lib$(1)-*.dylib.dSYM CFG_CFLAGS_powerpc64le-unknown-linux-gnu := -m64 $(CFLAGS) -CFG_GCCISH_CFLAGS_powerpc64le-unknown-linux-gnu := -Wall -Werror -g -fPIC -m64 $(CFLAGS) +CFG_GCCISH_CFLAGS_powerpc64le-unknown-linux-gnu := -g -fPIC -m64 $(CFLAGS) CFG_GCCISH_CXXFLAGS_powerpc64le-unknown-linux-gnu := -fno-rtti $(CXXFLAGS) CFG_GCCISH_LINK_FLAGS_powerpc64le-unknown-linux-gnu := -shared -fPIC -ldl -pthread -lrt -g -m64 CFG_GCCISH_DEF_FLAG_powerpc64le-unknown-linux-gnu := -Wl,--export-dynamic,--dynamic-list= diff --git a/mk/cfg/s390x-unknown-linux-gnu.mk b/mk/cfg/s390x-unknown-linux-gnu.mk new file mode 100644 index 0000000000000..eb1cb2329c4f3 --- /dev/null +++ b/mk/cfg/s390x-unknown-linux-gnu.mk @@ -0,0 +1,24 @@ +# s390x-unknown-linux-gnu configuration +CROSS_PREFIX_s390x-unknown-linux-gnu=s390x-linux-gnu- +CC_s390x-unknown-linux-gnu=$(CC) +CXX_s390x-unknown-linux-gnu=$(CXX) +CPP_s390x-unknown-linux-gnu=$(CPP) +AR_s390x-unknown-linux-gnu=$(AR) +CFG_LIB_NAME_s390x-unknown-linux-gnu=lib$(1).so +CFG_STATIC_LIB_NAME_s390x-unknown-linux-gnu=lib$(1).a +CFG_LIB_GLOB_s390x-unknown-linux-gnu=lib$(1)-*.so +CFG_LIB_DSYM_GLOB_s390x-unknown-linux-gnu=lib$(1)-*.dylib.dSYM +CFG_CFLAGS_s390x-unknown-linux-gnu := -m64 $(CFLAGS) +CFG_GCCISH_CFLAGS_s390x-unknown-linux-gnu := -g -fPIC -m64 $(CFLAGS) +CFG_GCCISH_CXXFLAGS_s390x-unknown-linux-gnu := -fno-rtti $(CXXFLAGS) +CFG_GCCISH_LINK_FLAGS_s390x-unknown-linux-gnu := -shared -fPIC -ldl -pthread -lrt -g -m64 +CFG_GCCISH_DEF_FLAG_s390x-unknown-linux-gnu := -Wl,--export-dynamic,--dynamic-list= +CFG_LLC_FLAGS_s390x-unknown-linux-gnu := +CFG_INSTALL_NAME_s390x-unknown-linux-gnu = +CFG_EXE_SUFFIX_s390x-unknown-linux-gnu = +CFG_WINDOWSY_s390x-unknown-linux-gnu := +CFG_UNIXY_s390x-unknown-linux-gnu := 1 +CFG_LDPATH_s390x-unknown-linux-gnu := +CFG_RUN_s390x-unknown-linux-gnu=$(2) +CFG_RUN_TARG_s390x-unknown-linux-gnu=$(call CFG_RUN_s390x-unknown-linux-gnu,,$(2)) +CFG_GNU_TRIPLE_s390x-unknown-linux-gnu := s390x-unknown-linux-gnu diff --git a/mk/cfg/wasm32-unknown-emscripten.mk b/mk/cfg/wasm32-unknown-emscripten.mk new file mode 100644 index 0000000000000..997bdfbf03ab1 --- /dev/null +++ b/mk/cfg/wasm32-unknown-emscripten.mk @@ -0,0 +1,24 @@ +# wasm32-unknown-emscripten configuration +CC_wasm32-unknown-emscripten=emcc +CXX_wasm32-unknown-emscripten=em++ +CPP_wasm32-unknown-emscripten=$(CPP) +AR_wasm32-unknown-emscripten=emar +CFG_LIB_NAME_wasm32-unknown-emscripten=lib$(1).so +CFG_STATIC_LIB_NAME_wasm32-unknown-emscripten=lib$(1).a +CFG_LIB_GLOB_wasm32-unknown-emscripten=lib$(1)-*.so +CFG_LIB_DSYM_GLOB_wasm32-unknown-emscripten=lib$(1)-*.dylib.dSYM +CFG_JEMALLOC_CFLAGS_wasm32-unknown-emscripten := -m32 $(CFLAGS) +CFG_GCCISH_CFLAGS_wasm32-unknown-emscripten := -g -fPIC -m32 -s BINARYEN=1 $(CFLAGS) +CFG_GCCISH_CXXFLAGS_wasm32-unknown-emscripten := -fno-rtti -s BINARYEN=1 $(CXXFLAGS) +CFG_GCCISH_LINK_FLAGS_wasm32-unknown-emscripten := -shared -fPIC -ldl -pthread -lrt -g -m32 -s BINARYEN=1 +CFG_GCCISH_DEF_FLAG_wasm32-unknown-emscripten := -Wl,--export-dynamic,--dynamic-list= +CFG_LLC_FLAGS_wasm32-unknown-emscripten := +CFG_INSTALL_NAME_wasm32-unknown-emscripten = +CFG_EXE_SUFFIX_wasm32-unknown-emscripten = +CFG_WINDOWSY_wasm32-unknown-emscripten := +CFG_UNIXY_wasm32-unknown-emscripten := 1 +CFG_LDPATH_wasm32-unknown-emscripten := +CFG_RUN_wasm32-unknown-emscripten=$(2) +CFG_RUN_TARG_wasm32-unknown-emscripten=$(call CFG_RUN_wasm32-unknown-emscripten,,$(2)) +CFG_GNU_TRIPLE_wasm32-unknown-emscripten := wasm32-unknown-emscripten +CFG_DISABLE_JEMALLOC_wasm32-unknown-emscripten := 1 diff --git a/mk/cfg/x86_64-apple-darwin.mk b/mk/cfg/x86_64-apple-darwin.mk index 4c68d3dcf37b4..8af47b671a850 100644 --- a/mk/cfg/x86_64-apple-darwin.mk +++ b/mk/cfg/x86_64-apple-darwin.mk @@ -8,7 +8,7 @@ CFG_STATIC_LIB_NAME_x86_64-apple-darwin=lib$(1).a CFG_LIB_GLOB_x86_64-apple-darwin=lib$(1)-*.dylib CFG_LIB_DSYM_GLOB_x86_64-apple-darwin=lib$(1)-*.dylib.dSYM CFG_JEMALLOC_CFLAGS_x86_64-apple-darwin := -m64 -arch x86_64 $(CFLAGS) -CFG_GCCISH_CFLAGS_x86_64-apple-darwin := -Wall -Werror -g -fPIC -m64 -arch x86_64 $(CFLAGS) +CFG_GCCISH_CFLAGS_x86_64-apple-darwin := -g -fPIC -m64 -arch x86_64 $(CFLAGS) CFG_GCCISH_CXXFLAGS_x86_64-apple-darwin := -fno-rtti $(CXXFLAGS) CFG_GCCISH_LINK_FLAGS_x86_64-apple-darwin := -dynamiclib -pthread -framework CoreServices -m64 CFG_GCCISH_DEF_FLAG_x86_64-apple-darwin := -Wl,-exported_symbols_list, diff --git a/mk/cfg/x86_64-apple-ios.mk b/mk/cfg/x86_64-apple-ios.mk index dd6080fdb0bab..764cdc15996d0 100644 --- a/mk/cfg/x86_64-apple-ios.mk +++ b/mk/cfg/x86_64-apple-ios.mk @@ -16,7 +16,7 @@ CFG_STATIC_LIB_NAME_x86_64-apple-ios=lib$(1).a CFG_LIB_DSYM_GLOB_x86_64-apple-ios = lib$(1)-*.a.dSYM CFG_CFLAGS_x86_64-apple-ios := $(CFG_IOSSIM_FLAGS_x86_64-apple-ios) CFG_JEMALLOC_CFLAGS_x86_64-apple-ios := $(CFG_IOSSIM_FLAGS_x86_64-apple-ios) -CFG_GCCISH_CFLAGS_x86_64-apple-ios := -Wall -Werror -fPIC $(CFG_IOSSIM_FLAGS_x86_64-apple-ios) +CFG_GCCISH_CFLAGS_x86_64-apple-ios := -fPIC $(CFG_IOSSIM_FLAGS_x86_64-apple-ios) CFG_GCCISH_CXXFLAGS_x86_64-apple-ios := -fno-rtti $(CFG_IOSSIM_FLAGS_x86_64-apple-ios) -I$(CFG_IOSSIM_SDK_x86_64-apple-ios)/usr/include/c++/4.2.1 CFG_GCCISH_LINK_FLAGS_x86_64-apple-ios := -lpthread -Wl,-no_compact_unwind -m64 -Wl,-syslibroot $(CFG_IOSSIM_SDK_x86_64-apple-ios) CFG_GCCISH_DEF_FLAG_x86_64-apple-ios := -Wl,-exported_symbols_list, diff --git a/mk/cfg/x86_64-pc-windows-gnu.mk b/mk/cfg/x86_64-pc-windows-gnu.mk index f0732d08c71ea..82e7b23279fb5 100644 --- a/mk/cfg/x86_64-pc-windows-gnu.mk +++ b/mk/cfg/x86_64-pc-windows-gnu.mk @@ -9,7 +9,7 @@ CFG_STATIC_LIB_NAME_x86_64-pc-windows-gnu=$(1).lib CFG_LIB_GLOB_x86_64-pc-windows-gnu=$(1)-*.dll CFG_LIB_DSYM_GLOB_x86_64-pc-windows-gnu=$(1)-*.dylib.dSYM CFG_JEMALLOC_CFLAGS_x86_64-pc-windows-gnu := -m64 -D_WIN32_WINNT=0x0600 -D__USE_MINGW_ANSI_STDIO=1 $(CFLAGS) -CFG_GCCISH_CFLAGS_x86_64-pc-windows-gnu := -Wall -Werror -g -m64 -D_WIN32_WINNT=0x0600 -D__USE_MINGW_ANSI_STDIO=1 $(CFLAGS) +CFG_GCCISH_CFLAGS_x86_64-pc-windows-gnu := -g -m64 -D_WIN32_WINNT=0x0600 -D__USE_MINGW_ANSI_STDIO=1 $(CFLAGS) CFG_GCCISH_CXXFLAGS_x86_64-pc-windows-gnu := -fno-rtti $(CXXFLAGS) CFG_GCCISH_LINK_FLAGS_x86_64-pc-windows-gnu := -shared -g -m64 CFG_GCCISH_DEF_FLAG_x86_64-pc-windows-gnu := diff --git a/mk/cfg/x86_64-pc-windows-msvc.mk b/mk/cfg/x86_64-pc-windows-msvc.mk index 65cf28f6852b2..30e996a97273f 100644 --- a/mk/cfg/x86_64-pc-windows-msvc.mk +++ b/mk/cfg/x86_64-pc-windows-msvc.mk @@ -1,16 +1,16 @@ # x86_64-pc-windows-msvc configuration -CC_x86_64-pc-windows-msvc="$(CFG_MSVC_CL_x86_64)" -nologo -LINK_x86_64-pc-windows-msvc="$(CFG_MSVC_LINK_x86_64)" -nologo -CXX_x86_64-pc-windows-msvc="$(CFG_MSVC_CL_x86_64)" -nologo -CPP_x86_64-pc-windows-msvc="$(CFG_MSVC_CL_x86_64)" -nologo -AR_x86_64-pc-windows-msvc="$(CFG_MSVC_LIB_x86_64)" -nologo +CC_x86_64-pc-windows-msvc=$(CFG_MSVC_CL_x86_64) +LINK_x86_64-pc-windows-msvc=$(CFG_MSVC_LINK_x86_64) +CXX_x86_64-pc-windows-msvc=$(CFG_MSVC_CL_x86_64) +CPP_x86_64-pc-windows-msvc=$(CFG_MSVC_CL_x86_64) +AR_x86_64-pc-windows-msvc=$(CFG_MSVC_LIB_x86_64) CFG_LIB_NAME_x86_64-pc-windows-msvc=$(1).dll CFG_STATIC_LIB_NAME_x86_64-pc-windows-msvc=$(1).lib CFG_LIB_GLOB_x86_64-pc-windows-msvc=$(1)-*.{dll,lib} CFG_LIB_DSYM_GLOB_x86_64-pc-windows-msvc=$(1)-*.dylib.dSYM CFG_JEMALLOC_CFLAGS_x86_64-pc-windows-msvc := -CFG_GCCISH_CFLAGS_x86_64-pc-windows-msvc := -MD -CFG_GCCISH_CXXFLAGS_x86_64-pc-windows-msvc := -MD +CFG_GCCISH_CFLAGS_x86_64-pc-windows-msvc := -MD -nologo +CFG_GCCISH_CXXFLAGS_x86_64-pc-windows-msvc := -MD -nologo CFG_GCCISH_LINK_FLAGS_x86_64-pc-windows-msvc := CFG_GCCISH_DEF_FLAG_x86_64-pc-windows-msvc := CFG_LLC_FLAGS_x86_64-pc-windows-msvc := @@ -22,3 +22,7 @@ CFG_LDPATH_x86_64-pc-windows-msvc := CFG_RUN_x86_64-pc-windows-msvc=$(2) CFG_RUN_TARG_x86_64-pc-windows-msvc=$(call CFG_RUN_x86_64-pc-windows-msvc,,$(2)) CFG_GNU_TRIPLE_x86_64-pc-windows-msvc := x86_64-pc-win32 + +# Currently the build system is not configured to build jemalloc +# with MSVC, so we omit this optional dependency. +CFG_DISABLE_JEMALLOC_x86_64-pc-windows-msvc := 1 diff --git a/mk/cfg/x86_64-rumprun-netbsd.mk b/mk/cfg/x86_64-rumprun-netbsd.mk index 5894805e3e5c9..53d58b9fceaa6 100644 --- a/mk/cfg/x86_64-rumprun-netbsd.mk +++ b/mk/cfg/x86_64-rumprun-netbsd.mk @@ -9,7 +9,7 @@ CFG_LIB_NAME_x86_64-rumprun-netbsd=lib$(1).so CFG_STATIC_LIB_NAME_x86_64-rumprun-netbsd=lib$(1).a CFG_LIB_GLOB_x86_64-rumprun-netbsd=lib$(1)-*.so CFG_JEMALLOC_CFLAGS_x86_64-rumprun-netbsd := -m64 -CFG_GCCISH_CFLAGS_x86_64-rumprun-netbsd := -Wall -Werror -g -fPIC -m64 +CFG_GCCISH_CFLAGS_x86_64-rumprun-netbsd := -g -fPIC -m64 CFG_GCCISH_CXXFLAGS_x86_64-rumprun-netbsd := CFG_GCCISH_LINK_FLAGS_x86_64-rumprun-netbsd := CFG_GCCISH_DEF_FLAG_x86_64-rumprun-netbsd := @@ -22,3 +22,4 @@ CFG_LDPATH_x86_64-rumprun-netbsd := CFG_RUN_x86_64-rumprun-netbsd=$(2) CFG_RUN_TARG_x86_64-rumprun-netbsd=$(call CFG_RUN_x86_64-rumprun-netbsd,,$(2)) CFG_GNU_TRIPLE_x86_64-rumprun-netbsd := x86_64-rumprun-netbsd +CFG_DISABLE_JEMALLOC_x86_64-rumprun-netbsd := 1 diff --git a/mk/cfg/x86_64-sun-solaris.mk b/mk/cfg/x86_64-sun-solaris.mk new file mode 100644 index 0000000000000..7fc323b234aee --- /dev/null +++ b/mk/cfg/x86_64-sun-solaris.mk @@ -0,0 +1,23 @@ +# x86_64-sun-solaris configuration +CROSS_PREFIX_x86_64-sun-solaris=x86_64-sun-solaris2.11- +CC_x86_64-sun-solaris=$(CC) +CXX_x86_64-sun-solaris=$(CXX) +CPP_x86_64-sun-solaris=$(CPP) +AR_x86_64-sun-solaris=$(AR) +CFG_LIB_NAME_x86_64-sun-solaris=lib$(1).so +CFG_STATIC_LIB_NAME_x86_64-sun-solaris=lib$(1).a +CFG_LIB_GLOB_x86_64-sun-solaris=lib$(1)-*.so +CFG_LIB_DSYM_GLOB_x86_64-sun-solaris=$(1)-*.dylib.dSYM +CFG_JEMALLOC_CFLAGS_x86_64-sun-solaris := -I/usr/local/include $(CFLAGS) +CFG_GCCISH_CFLAGS_x86_64-sun-solaris := -g -D_POSIX_PTHREAD_SEMANTICS -fPIC -I/usr/local/include $(CFLAGS) +CFG_GCCISH_LINK_FLAGS_x86_64-sun-solaris := -shared -fPIC -g -pthread -lrt +CFG_GCCISH_DEF_FLAG_x86_64-sun-solaris := -Wl,--export-dynamic,--dynamic-list= +CFG_LLC_FLAGS_x86_64-sun-solaris := +CFG_INSTALL_NAME_x86_64-sun-solaris = +CFG_EXE_SUFFIX_x86_64-sun-solaris := +CFG_WINDOWSY_x86_64-sun-solaris := +CFG_UNIXY_x86_64-sun-solaris := 1 +CFG_LDPATH_x86_64-sun-solaris := +CFG_RUN_x86_64-sun-solaris=$(2) +CFG_RUN_TARG_x86_64-sun-solaris=$(call CFG_RUN_x86_64-sun-solaris,,$(2)) +CFG_GNU_TRIPLE_x86_64-sun-solaris := x86_64-sun-solaris diff --git a/mk/cfg/x86_64-unknown-bitrig.mk b/mk/cfg/x86_64-unknown-bitrig.mk index afffec1a53a91..8ac31c176188b 100644 --- a/mk/cfg/x86_64-unknown-bitrig.mk +++ b/mk/cfg/x86_64-unknown-bitrig.mk @@ -8,7 +8,7 @@ CFG_STATIC_LIB_NAME_x86_64-unknown-bitrig=lib$(1).a CFG_LIB_GLOB_x86_64-unknown-bitrig=lib$(1)-*.so CFG_LIB_DSYM_GLOB_x86_64-unknown-bitrig=$(1)-*.dylib.dSYM CFG_JEMALLOC_CFLAGS_x86_64-unknown-bitrig := -m64 -I/usr/include $(CFLAGS) -CFG_GCCISH_CFLAGS_x86_64-unknown-bitrig := -Wall -Werror -fPIE -fPIC -m64 -I/usr/include $(CFLAGS) +CFG_GCCISH_CFLAGS_x86_64-unknown-bitrig := -fPIE -fPIC -m64 -I/usr/include $(CFLAGS) CFG_GCCISH_LINK_FLAGS_x86_64-unknown-bitrig := -shared -pic -pthread -m64 $(LDFLAGS) CFG_GCCISH_DEF_FLAG_x86_64-unknown-bitrig := -Wl,--export-dynamic,--dynamic-list= CFG_LLC_FLAGS_x86_64-unknown-bitrig := @@ -20,3 +20,4 @@ CFG_LDPATH_x86_64-unknown-bitrig := CFG_RUN_x86_64-unknown-bitrig=$(2) CFG_RUN_TARG_x86_64-unknown-bitrig=$(call CFG_RUN_x86_64-unknown-bitrig,,$(2)) CFG_GNU_TRIPLE_x86_64-unknown-bitrig := x86_64-unknown-bitrig +CFG_DISABLE_JEMALLOC_x86_64-unknown-bitrig := 1 diff --git a/mk/cfg/x86_64-unknown-dragonfly.mk b/mk/cfg/x86_64-unknown-dragonfly.mk index 4015293826e1a..579a9a809e205 100644 --- a/mk/cfg/x86_64-unknown-dragonfly.mk +++ b/mk/cfg/x86_64-unknown-dragonfly.mk @@ -8,7 +8,7 @@ CFG_STATIC_LIB_NAME_x86_64-unknown-dragonfly=lib$(1).a CFG_LIB_GLOB_x86_64-unknown-dragonfly=lib$(1)-*.so CFG_LIB_DSYM_GLOB_x86_64-unknown-dragonfly=$(1)-*.dylib.dSYM CFG_JEMALLOC_CFLAGS_x86_64-unknown-dragonfly := -m64 -I/usr/include -I/usr/local/include $(CFLAGS) -CFG_GCCISH_CFLAGS_x86_64-unknown-dragonfly := -Wall -Werror -g -fPIC -m64 -I/usr/include -I/usr/local/include $(CFLAGS) +CFG_GCCISH_CFLAGS_x86_64-unknown-dragonfly := -g -fPIC -m64 -I/usr/include -I/usr/local/include $(CFLAGS) CFG_GCCISH_LINK_FLAGS_x86_64-unknown-dragonfly := -shared -fPIC -g -pthread -lrt -m64 CFG_GCCISH_DEF_FLAG_x86_64-unknown-dragonfly := -Wl,--export-dynamic,--dynamic-list= CFG_LLC_FLAGS_x86_64-unknown-dragonfly := diff --git a/mk/cfg/x86_64-unknown-freebsd.mk b/mk/cfg/x86_64-unknown-freebsd.mk index 1bd43168b4f69..c700601eac7a1 100644 --- a/mk/cfg/x86_64-unknown-freebsd.mk +++ b/mk/cfg/x86_64-unknown-freebsd.mk @@ -8,7 +8,7 @@ CFG_STATIC_LIB_NAME_x86_64-unknown-freebsd=lib$(1).a CFG_LIB_GLOB_x86_64-unknown-freebsd=lib$(1)-*.so CFG_LIB_DSYM_GLOB_x86_64-unknown-freebsd=$(1)-*.dylib.dSYM CFG_JEMALLOC_CFLAGS_x86_64-unknown-freebsd := -I/usr/local/include $(CFLAGS) -CFG_GCCISH_CFLAGS_x86_64-unknown-freebsd := -Wall -Werror -g -fPIC -I/usr/local/include $(CFLAGS) +CFG_GCCISH_CFLAGS_x86_64-unknown-freebsd := -g -fPIC -I/usr/local/include $(CFLAGS) CFG_GCCISH_LINK_FLAGS_x86_64-unknown-freebsd := -shared -fPIC -g -pthread -lrt CFG_GCCISH_DEF_FLAG_x86_64-unknown-freebsd := -Wl,--export-dynamic,--dynamic-list= CFG_LLC_FLAGS_x86_64-unknown-freebsd := diff --git a/mk/cfg/x86_64-unknown-fuchsia.mk b/mk/cfg/x86_64-unknown-fuchsia.mk new file mode 100644 index 0000000000000..34aee77ae2107 --- /dev/null +++ b/mk/cfg/x86_64-unknown-fuchsia.mk @@ -0,0 +1 @@ +# rustbuild-only target diff --git a/mk/cfg/x86_64-unknown-haiku.mk b/mk/cfg/x86_64-unknown-haiku.mk new file mode 100644 index 0000000000000..4c2d888be06fb --- /dev/null +++ b/mk/cfg/x86_64-unknown-haiku.mk @@ -0,0 +1,27 @@ +# x86_64-unknown-haiku configuration +CROSS_PREFIX_x86_64-unknown-haiku=x86_64-unknown-haiku- +CC_x86_64-unknown-haiku=$(CC) +CXX_x86_64-unknown-haiku=$(CXX) +CPP_x86_64-unknown-haiku=$(CPP) +AR_x86_64-unknown-haiku=$(AR) +CFG_LIB_NAME_x86_64-unknown-haiku=lib$(1).so +CFG_STATIC_LIB_NAME_x86_64-unknown-haiku=lib$(1).a +CFG_LIB_GLOB_x86_64-unknown-haiku=lib$(1)-*.so +CFG_LIB_DSYM_GLOB_x86_64-unknown-haiku=lib$(1)-*.dylib.dSYM +CFG_CFLAGS_x86_64-unknown-haiku := -m64 $(CFLAGS) +CFG_GCCISH_CFLAGS_x86_64-unknown-haiku := -Wall -Werror -g -fPIC -m64 $(CFLAGS) +CFG_GCCISH_CXXFLAGS_x86_64-unknown-haiku := -fno-rtti $(CXXFLAGS) +CFG_GCCISH_LINK_FLAGS_x86_64-unknown-haiku := -shared -fPIC -ldl -pthread -lrt -g -m64 +CFG_GCCISH_PRE_LIB_FLAGS_x86_64-unknown-haiku := -Wl,-whole-archive +CFG_GCCISH_POST_LIB_FLAGS_x86_64-unknown-haiku := -Wl,-no-whole-archive +CFG_DEF_SUFFIX_x86_64-unknown-haiku := .linux.def +CFG_LLC_FLAGS_x86_64-unknown-haiku := +CFG_INSTALL_NAME_x86_64-unknown-haiku = +CFG_EXE_SUFFIX_x86_64-unknown-haiku = +CFG_WINDOWSY_x86_64-unknown-haiku := +CFG_UNIXY_x86_64-unknown-haiku := 1 +CFG_PATH_MUNGE_x86_64-unknown-haiku := true +CFG_LDPATH_x86_64-unknown-haiku := +CFG_RUN_x86_64-unknown-haiku=$(2) +CFG_RUN_TARG_x86_64-unknown-haiku=$(call CFG_RUN_x86_64-unknown-haiku,,$(2)) +CFG_GNU_TRIPLE_x86_64-unknown-haiku := x86_64-unknown-haiku diff --git a/mk/cfg/x86_64-unknown-linux-gnu.mk b/mk/cfg/x86_64-unknown-linux-gnu.mk index 044c687c9fc4c..817ce22e4f59d 100644 --- a/mk/cfg/x86_64-unknown-linux-gnu.mk +++ b/mk/cfg/x86_64-unknown-linux-gnu.mk @@ -8,7 +8,7 @@ CFG_STATIC_LIB_NAME_x86_64-unknown-linux-gnu=lib$(1).a CFG_LIB_GLOB_x86_64-unknown-linux-gnu=lib$(1)-*.so CFG_LIB_DSYM_GLOB_x86_64-unknown-linux-gnu=lib$(1)-*.dylib.dSYM CFG_JEMALLOC_CFLAGS_x86_64-unknown-linux-gnu := -m64 -CFG_GCCISH_CFLAGS_x86_64-unknown-linux-gnu := -Wall -Werror -g -fPIC -m64 +CFG_GCCISH_CFLAGS_x86_64-unknown-linux-gnu := -g -fPIC -m64 CFG_GCCISH_CXXFLAGS_x86_64-unknown-linux-gnu := -fno-rtti CFG_GCCISH_LINK_FLAGS_x86_64-unknown-linux-gnu := -shared -fPIC -ldl -pthread -lrt -g -m64 CFG_GCCISH_DEF_FLAG_x86_64-unknown-linux-gnu := -Wl,--export-dynamic,--dynamic-list= diff --git a/mk/cfg/x86_64-unknown-linux-musl.mk b/mk/cfg/x86_64-unknown-linux-musl.mk index 9d6dd5c73d7b7..6f707ac3b3fb8 100644 --- a/mk/cfg/x86_64-unknown-linux-musl.mk +++ b/mk/cfg/x86_64-unknown-linux-musl.mk @@ -1,14 +1,14 @@ # x86_64-unknown-linux-musl configuration CC_x86_64-unknown-linux-musl=$(CFG_MUSL_ROOT)/bin/musl-gcc -CXX_x86_64-unknown-linux-musl=notaprogram +CXX_x86_64-unknown-linux-musl=$(CXX) CPP_x86_64-unknown-linux-musl=$(CFG_MUSL_ROOT)/bin/musl-gcc -E AR_x86_64-unknown-linux-musl=$(AR) CFG_INSTALL_ONLY_RLIB_x86_64-unknown-linux-musl = 1 CFG_LIB_NAME_x86_64-unknown-linux-musl=lib$(1).so CFG_STATIC_LIB_NAME_x86_64-unknown-linux-musl=lib$(1).a CFG_LIB_GLOB_x86_64-unknown-linux-musl=lib$(1)-*.so -CFG_JEMALLOC_CFLAGS_x86_64-unknown-linux-musl := -m64 -CFG_GCCISH_CFLAGS_x86_64-unknown-linux-musl := -Wall -Werror -g -fPIC -m64 +CFG_JEMALLOC_CFLAGS_x86_64-unknown-linux-musl := -m64 -Wa,-mrelax-relocations=no +CFG_GCCISH_CFLAGS_x86_64-unknown-linux-musl := -g -fPIC -m64 -Wa,-mrelax-relocations=no CFG_GCCISH_CXXFLAGS_x86_64-unknown-linux-musl := CFG_GCCISH_LINK_FLAGS_x86_64-unknown-linux-musl := CFG_GCCISH_DEF_FLAG_x86_64-unknown-linux-musl := @@ -25,4 +25,5 @@ CFG_THIRD_PARTY_OBJECTS_x86_64-unknown-linux-musl := crt1.o crti.o crtn.o CFG_INSTALLED_OBJECTS_x86_64-unknown-linux-musl := crt1.o crti.o crtn.o NATIVE_DEPS_libc_T_x86_64-unknown-linux-musl += libc.a -NATIVE_DEPS_std_T_x86_64-unknown-linux-musl += libunwind.a crt1.o crti.o crtn.o +NATIVE_DEPS_std_T_x86_64-unknown-linux-musl += crt1.o crti.o crtn.o +NATIVE_DEPS_unwind_T_x86_64-unknown-linux-musl += libunwind.a diff --git a/mk/cfg/x86_64-unknown-netbsd.mk b/mk/cfg/x86_64-unknown-netbsd.mk index a77c5fa542eb1..93bb2d672653e 100644 --- a/mk/cfg/x86_64-unknown-netbsd.mk +++ b/mk/cfg/x86_64-unknown-netbsd.mk @@ -9,7 +9,7 @@ CFG_STATIC_LIB_NAME_x86_64-unknown-netbsd=lib$(1).a CFG_LIB_GLOB_x86_64-unknown-netbsd=lib$(1)-*.so CFG_LIB_DSYM_GLOB_x86_64-unknown-netbsd=$(1)-*.dylib.dSYM CFG_JEMALLOC_CFLAGS_x86_64-unknown-netbsd := -I/usr/local/include $(CFLAGS) -CFG_GCCISH_CFLAGS_x86_64-unknown-netbsd := -Wall -Werror -g -fPIC -I/usr/local/include $(CFLAGS) +CFG_GCCISH_CFLAGS_x86_64-unknown-netbsd := -g -fPIC -I/usr/local/include $(CFLAGS) CFG_GCCISH_LINK_FLAGS_x86_64-unknown-netbsd := -shared -fPIC -g -pthread -lrt CFG_GCCISH_DEF_FLAG_x86_64-unknown-netbsd := -Wl,--export-dynamic,--dynamic-list= CFG_LLC_FLAGS_x86_64-unknown-netbsd := diff --git a/mk/cfg/x86_64-unknown-openbsd.mk b/mk/cfg/x86_64-unknown-openbsd.mk index e6f28482284e0..7cca1f7b18b3d 100644 --- a/mk/cfg/x86_64-unknown-openbsd.mk +++ b/mk/cfg/x86_64-unknown-openbsd.mk @@ -8,7 +8,7 @@ CFG_STATIC_LIB_NAME_x86_64-unknown-openbsd=lib$(1).a CFG_LIB_GLOB_x86_64-unknown-openbsd=lib$(1)-*.so CFG_LIB_DSYM_GLOB_x86_64-unknown-openbsd=$(1)-*.dylib.dSYM CFG_JEMALLOC_CFLAGS_x86_64-unknown-openbsd := -m64 -I/usr/include $(CFLAGS) -CFG_GCCISH_CFLAGS_x86_64-unknown-openbsd := -Wall -Werror -g -fPIC -m64 -I/usr/include $(CFLAGS) +CFG_GCCISH_CFLAGS_x86_64-unknown-openbsd := -g -fPIC -m64 -I/usr/include $(CFLAGS) CFG_GCCISH_LINK_FLAGS_x86_64-unknown-openbsd := -shared -fPIC -g -pthread -m64 CFG_GCCISH_DEF_FLAG_x86_64-unknown-openbsd := -Wl,--export-dynamic,--dynamic-list= CFG_LLC_FLAGS_x86_64-unknown-openbsd := @@ -21,3 +21,4 @@ CFG_RUN_x86_64-unknown-openbsd=$(2) CFG_RUN_TARG_x86_64-unknown-openbsd=$(call CFG_RUN_x86_64-unknown-openbsd,,$(2)) CFG_GNU_TRIPLE_x86_64-unknown-openbsd := x86_64-unknown-openbsd RUSTC_FLAGS_x86_64-unknown-openbsd=-C linker=$(call FIND_COMPILER,$(CC)) +CFG_DISABLE_JEMALLOC_x86_64-unknown-openbsd := 1 diff --git a/mk/clean.mk b/mk/clean.mk index ac34ac506bb17..7013d9f03f836 100644 --- a/mk/clean.mk +++ b/mk/clean.mk @@ -35,7 +35,7 @@ clean-all: clean clean-llvm clean-llvm: $(CLEAN_LLVM_RULES) -clean: clean-misc $(CLEAN_STAGE_RULES) +clean: clean-misc clean-grammar $(CLEAN_STAGE_RULES) clean-misc: @$(call E, cleaning) @@ -47,6 +47,9 @@ clean-misc: $(Q)rm -Rf dist/* $(Q)rm -Rf doc +clean-grammar: + @$(call E, cleaning grammar verification) + $(Q)rm -Rf grammar define CLEAN_GENERIC clean-generic-$(2)-$(1): @@ -102,7 +105,6 @@ define CLEAN_TARGET_STAGE_N clean$(1)_T_$(2)_H_$(3): \ $$(foreach crate,$$(CRATES),clean$(1)_T_$(2)_H_$(3)-lib-$$(crate)) \ $$(foreach tool,$$(TOOLS) $$(DEBUGGER_BIN_SCRIPTS_ALL),clean$(1)_T_$(2)_H_$(3)-tool-$$(tool)) - $$(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/libcompiler-rt.a $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/librun_pass_stage* # For unix $(Q)rm -f $$(TLIB$(1)_T_$(2)_H_$(3))/run_pass_stage* # For windows diff --git a/mk/crates.mk b/mk/crates.mk index 5cc8a46878439..acb36b2f7da6c 100644 --- a/mk/crates.mk +++ b/mk/crates.mk @@ -49,30 +49,41 @@ # automatically generated for all stage/host/target combinations. ################################################################################ -TARGET_CRATES := libc std flate arena term \ - serialize getopts collections test rand \ - log graphviz core rbml alloc \ +TARGET_CRATES := libc std term \ + getopts collections test rand \ + compiler_builtins core alloc \ rustc_unicode rustc_bitflags \ - alloc_system + alloc_system alloc_jemalloc \ + panic_abort panic_unwind unwind RUSTC_CRATES := rustc rustc_typeck rustc_mir rustc_borrowck rustc_resolve rustc_driver \ rustc_trans rustc_back rustc_llvm rustc_privacy rustc_lint \ - rustc_data_structures rustc_front rustc_platform_intrinsics \ - rustc_plugin rustc_metadata rustc_passes -HOST_CRATES := syntax syntax_ext $(RUSTC_CRATES) rustdoc fmt_macros -TOOLS := compiletest rustdoc rustc rustbook error-index-generator + rustc_data_structures rustc_platform_intrinsics rustc_errors \ + rustc_plugin rustc_metadata rustc_passes rustc_save_analysis \ + rustc_const_eval rustc_const_math rustc_incremental proc_macro +HOST_CRATES := syntax syntax_ext proc_macro_tokens proc_macro_plugin syntax_pos $(RUSTC_CRATES) \ + rustdoc fmt_macros flate arena graphviz log serialize +TOOLS := compiletest rustdoc rustc rustbook error_index_generator DEPS_core := +DEPS_compiler_builtins := core native:compiler-rt DEPS_alloc := core libc alloc_system DEPS_alloc_system := core libc +DEPS_alloc_jemalloc := core libc native:jemalloc DEPS_collections := core alloc rustc_unicode DEPS_libc := core DEPS_rand := core DEPS_rustc_bitflags := core DEPS_rustc_unicode := core +DEPS_panic_abort := libc alloc +DEPS_panic_unwind := libc alloc unwind +DEPS_unwind := libc + +RUSTFLAGS_compiler_builtins := -lstatic=compiler-rt +RUSTFLAGS_panic_abort := -C panic=abort -DEPS_std := core libc rand alloc collections rustc_unicode \ +DEPS_std := core libc rand alloc collections compiler_builtins rustc_unicode \ native:backtrace \ - alloc_system + alloc_system panic_abort panic_unwind unwind DEPS_arena := std DEPS_glob := std DEPS_flate := std native:miniz @@ -81,52 +92,67 @@ DEPS_getopts := std DEPS_graphviz := std DEPS_log := std DEPS_num := std -DEPS_rbml := std log serialize DEPS_serialize := std log -DEPS_term := std log -DEPS_test := std getopts serialize rbml term native:rust_test_helpers - -DEPS_syntax := std term serialize log arena libc rustc_bitflags -DEPS_syntax_ext := syntax fmt_macros - -DEPS_rustc := syntax fmt_macros flate arena serialize getopts rbml rustc_front\ - log graphviz rustc_llvm rustc_back rustc_data_structures -DEPS_rustc_back := std syntax rustc_llvm rustc_front flate log libc -DEPS_rustc_borrowck := rustc rustc_front log graphviz syntax -DEPS_rustc_data_structures := std log serialize +DEPS_term := std +DEPS_test := std getopts term native:rust_test_helpers + +DEPS_syntax := std term serialize log arena libc rustc_bitflags rustc_unicode rustc_errors syntax_pos rustc_data_structures +DEPS_syntax_ext := syntax syntax_pos rustc_errors fmt_macros proc_macro +DEPS_syntax_pos := serialize +DEPS_proc_macro_tokens := syntax syntax_pos log +DEPS_proc_macro_plugin := syntax syntax_pos rustc_plugin log proc_macro_tokens + +DEPS_rustc_const_math := std syntax log serialize +DEPS_rustc_const_eval := rustc_const_math rustc syntax log serialize \ + rustc_back graphviz syntax_pos + +DEPS_rustc := syntax fmt_macros flate arena serialize getopts \ + log graphviz rustc_llvm rustc_back rustc_data_structures\ + rustc_const_math syntax_pos rustc_errors +DEPS_rustc_back := std syntax flate log libc +DEPS_rustc_borrowck := rustc log graphviz syntax syntax_pos rustc_errors rustc_mir +DEPS_rustc_data_structures := std log serialize libc DEPS_rustc_driver := arena flate getopts graphviz libc rustc rustc_back rustc_borrowck \ rustc_typeck rustc_mir rustc_resolve log syntax serialize rustc_llvm \ - rustc_trans rustc_privacy rustc_lint rustc_front rustc_plugin \ - rustc_metadata syntax_ext rustc_passes -DEPS_rustc_front := std syntax log serialize -DEPS_rustc_lint := rustc log syntax + rustc_trans rustc_privacy rustc_lint rustc_plugin \ + rustc_metadata syntax_ext proc_macro_plugin \ + rustc_passes rustc_save_analysis rustc_const_eval \ + rustc_incremental syntax_pos rustc_errors proc_macro rustc_data_structures +DEPS_rustc_errors := log libc serialize syntax_pos +DEPS_rustc_lint := rustc log syntax syntax_pos rustc_const_eval DEPS_rustc_llvm := native:rustllvm libc std rustc_bitflags -DEPS_rustc_metadata := rustc rustc_front syntax rbml -DEPS_rustc_passes := syntax rustc core -DEPS_rustc_mir := rustc rustc_front syntax -DEPS_rustc_resolve := arena rustc rustc_front log syntax -DEPS_rustc_platform_intrinsics := rustc rustc_llvm -DEPS_rustc_plugin := rustc rustc_metadata syntax -DEPS_rustc_privacy := rustc rustc_front log syntax -DEPS_rustc_trans := arena flate getopts graphviz libc rustc rustc_back rustc_mir \ - log syntax serialize rustc_llvm rustc_front rustc_platform_intrinsics -DEPS_rustc_typeck := rustc syntax rustc_front rustc_platform_intrinsics - -DEPS_rustdoc := rustc rustc_driver native:hoedown serialize getopts \ - test rustc_lint rustc_front - - -TOOL_DEPS_compiletest := test getopts +DEPS_proc_macro := std syntax +DEPS_rustc_metadata := rustc syntax syntax_pos rustc_errors rustc_const_math \ + proc_macro syntax_ext +DEPS_rustc_passes := syntax syntax_pos rustc core rustc_const_eval rustc_errors +DEPS_rustc_mir := rustc syntax syntax_pos rustc_const_math rustc_const_eval rustc_bitflags +DEPS_rustc_resolve := arena rustc log syntax syntax_pos rustc_errors +DEPS_rustc_platform_intrinsics := std +DEPS_rustc_plugin := rustc rustc_metadata syntax syntax_pos rustc_errors +DEPS_rustc_privacy := rustc log syntax syntax_pos +DEPS_rustc_trans := arena flate getopts graphviz libc rustc rustc_back \ + log syntax serialize rustc_llvm rustc_platform_intrinsics \ + rustc_const_math rustc_const_eval rustc_incremental rustc_errors syntax_pos +DEPS_rustc_incremental := rustc syntax_pos serialize rustc_data_structures +DEPS_rustc_save_analysis := rustc log syntax syntax_pos serialize +DEPS_rustc_typeck := rustc syntax syntax_pos rustc_platform_intrinsics rustc_const_math \ + rustc_const_eval rustc_errors rustc_data_structures + +DEPS_rustdoc := rustc rustc_driver native:hoedown serialize getopts test \ + rustc_lint rustc_const_eval syntax_pos rustc_data_structures + +TOOL_DEPS_compiletest := test getopts log serialize TOOL_DEPS_rustdoc := rustdoc TOOL_DEPS_rustc := rustc_driver TOOL_DEPS_rustbook := std rustdoc -TOOL_DEPS_error-index-generator := rustdoc syntax serialize -TOOL_SOURCE_compiletest := $(S)src/compiletest/compiletest.rs +TOOL_DEPS_error_index_generator := rustdoc syntax serialize +TOOL_SOURCE_compiletest := $(S)src/tools/compiletest/src/main.rs TOOL_SOURCE_rustdoc := $(S)src/driver/driver.rs TOOL_SOURCE_rustc := $(S)src/driver/driver.rs -TOOL_SOURCE_rustbook := $(S)src/rustbook/main.rs -TOOL_SOURCE_error-index-generator := $(S)src/error-index-generator/main.rs +TOOL_SOURCE_rustbook := $(S)src/tools/rustbook/main.rs +TOOL_SOURCE_error_index_generator := $(S)src/tools/error_index_generator/main.rs +ONLY_RLIB_compiler_builtins := 1 ONLY_RLIB_core := 1 ONLY_RLIB_libc := 1 ONLY_RLIB_alloc := 1 @@ -135,17 +161,18 @@ ONLY_RLIB_collections := 1 ONLY_RLIB_rustc_unicode := 1 ONLY_RLIB_rustc_bitflags := 1 ONLY_RLIB_alloc_system := 1 +ONLY_RLIB_alloc_jemalloc := 1 +ONLY_RLIB_panic_unwind := 1 +ONLY_RLIB_panic_abort := 1 +ONLY_RLIB_unwind := 1 + +TARGET_SPECIFIC_alloc_jemalloc := 1 # Documented-by-default crates DOC_CRATES := std alloc collections core libc rustc_unicode ifeq ($(CFG_DISABLE_JEMALLOC),) -TARGET_CRATES += alloc_jemalloc -DEPS_std += alloc_jemalloc -DEPS_alloc_jemalloc := core libc native:jemalloc -ONLY_RLIB_alloc_jemalloc := 1 -else -RUSTFLAGS_rustc_back := --cfg disable_jemalloc +RUSTFLAGS_rustc_back := --cfg 'feature="jemalloc"' endif ################################################################################ @@ -161,12 +188,32 @@ CRATES := $(TARGET_CRATES) $(HOST_CRATES) define RUST_CRATE CRATEFILE_$(1) := $$(SREL)src/lib$(1)/lib.rs RSINPUTS_$(1) := $$(call rwildcard,$(S)src/lib$(1)/,*.rs) -RUST_DEPS_$(1) := $$(filter-out native:%,$$(DEPS_$(1))) NATIVE_DEPS_$(1) := $$(patsubst native:%,%,$$(filter native:%,$$(DEPS_$(1)))) endef $(foreach crate,$(CRATES),$(eval $(call RUST_CRATE,$(crate)))) +# $(1) - crate +# $(2) - target +define RUST_CRATE_DEPS +RUST_DEPS_$(1)_T_$(2) := $$(filter-out native:%,$$(DEPS_$(1))) +endef + +$(foreach target,$(CFG_TARGET),\ + $(foreach crate,$(CRATES),$(eval $(call RUST_CRATE_DEPS,$(crate),$(target))))) + +# $(1) - target +# $(2) - crate +define DEFINE_TARGET_CRATES +ifndef TARGET_SPECIFIC_$(2) +TARGET_CRATES_$(1) += $(2) +endif +endef + +$(foreach target,$(CFG_TARGET),\ + $(foreach crate,$(TARGET_CRATES),\ + $(eval $(call DEFINE_TARGET_CRATES,$(target),$(crate))))) + # Similar to the macro above for crates, this macro is for tools # # $(1) is the crate to generate variables for diff --git a/mk/ctags.mk b/mk/ctags.mk index a116f2aba6437..1fcb0bb4debbc 100644 --- a/mk/ctags.mk +++ b/mk/ctags.mk @@ -15,14 +15,21 @@ .PHONY: TAGS.emacs TAGS.vi -CTAGS_LOCATIONS=$(wildcard ${CFG_SRC_DIR}src/lib*) +CTAGS_RUSTC_LOCATIONS=$(patsubst ${CFG_SRC_DIR}src/lib%test,, \ + $(wildcard ${CFG_SRC_DIR}src/lib*)) ${CFG_SRC_DIR}src/libtest CTAGS_LOCATIONS=$(patsubst ${CFG_SRC_DIR}src/librust%,, \ $(patsubst ${CFG_SRC_DIR}src/lib%test,, \ $(wildcard ${CFG_SRC_DIR}src/lib*))) ${CFG_SRC_DIR}src/libtest -CTAGS_OPTS=--options="${CFG_SRC_DIR}src/etc/ctags.rust" --languages=Rust --recurse ${CTAGS_LOCATIONS} +CTAGS_OPTS=--options="${CFG_SRC_DIR}src/etc/ctags.rust" --languages=Rust --recurse + +TAGS.rustc.emacs: + ctags -e -f $@ ${CTAGS_OPTS} ${CTAGS_RUSTC_LOCATIONS} TAGS.emacs: - ctags -e -f $@ ${CTAGS_OPTS} + ctags -e -f $@ ${CTAGS_OPTS} ${CTAGS_LOCATIONS} + +TAGS.rustc.vi: + ctags -f $@ ${CTAGS_OPTS} ${CTAGS_RUSTC_LOCATIONS} TAGS.vi: - ctags -f $@ ${CTAGS_OPTS} + ctags -f $@ ${CTAGS_OPTS} ${CTAGS_LOCATIONS} diff --git a/mk/debuggers.mk b/mk/debuggers.mk index aa7b62e13b86c..fbf32dc1a397f 100644 --- a/mk/debuggers.mk +++ b/mk/debuggers.mk @@ -41,10 +41,13 @@ DEBUGGER_BIN_SCRIPTS_LLDB_ABS=\ ## ALL ## -DEBUGGER_RUSTLIB_ETC_SCRIPTS_ALL=$(DEBUGGER_RUSTLIB_ETC_SCRIPTS_GDB) \ - $(DEBUGGER_RUSTLIB_ETC_SCRIPTS_LLDB) -DEBUGGER_RUSTLIB_ETC_SCRIPTS_ALL_ABS=$(DEBUGGER_RUSTLIB_ETC_SCRIPTS_GDB_ABS) \ - $(DEBUGGER_RUSTLIB_ETC_SCRIPTS_LLDB_ABS) +DEBUGGER_RUSTLIB_ETC_SCRIPTS_ALL=gdb_load_rust_pretty_printers.py \ + gdb_rust_pretty_printing.py \ + lldb_rust_formatters.py \ + debugger_pretty_printers_common.py +DEBUGGER_RUSTLIB_ETC_SCRIPTS_ALL_ABS=\ + $(foreach script,$(DEBUGGER_RUSTLIB_ETC_SCRIPTS_ALL), \ + $(CFG_SRC_DIR)src/etc/$(script)) DEBUGGER_BIN_SCRIPTS_ALL=$(DEBUGGER_BIN_SCRIPTS_GDB) \ $(DEBUGGER_BIN_SCRIPTS_LLDB) DEBUGGER_BIN_SCRIPTS_ALL_ABS=$(DEBUGGER_BIN_SCRIPTS_GDB_ABS) \ diff --git a/mk/dist.mk b/mk/dist.mk index 685fb2b5b4679..238ba8acee42f 100644 --- a/mk/dist.mk +++ b/mk/dist.mk @@ -24,6 +24,7 @@ PKG_NAME := $(CFG_PACKAGE_NAME) STD_PKG_NAME := rust-std-$(CFG_PACKAGE_VERS) DOC_PKG_NAME := rust-docs-$(CFG_PACKAGE_VERS) MINGW_PKG_NAME := rust-mingw-$(CFG_PACKAGE_VERS) +SRC_PKG_NAME := rust-src-$(CFG_PACKAGE_VERS) # License suitable for displaying in a popup LICENSE.txt: $(S)COPYRIGHT $(S)LICENSE-APACHE $(S)LICENSE-MIT @@ -48,11 +49,11 @@ PKG_FILES := \ $(S)configure $(S)Makefile.in \ $(S)man \ $(addprefix $(S)src/, \ - compiletest \ + bootstrap \ + build_helper \ doc \ driver \ etc \ - error-index-generator \ $(foreach crate,$(CRATES),lib$(crate)) \ libcollectionstest \ libcoretest \ @@ -60,22 +61,26 @@ PKG_FILES := \ rt \ rtstartup \ rustllvm \ - snapshots.txt \ + rustc \ + stage0.txt \ rust-installer \ - rustbook \ - test) \ + tools \ + test \ + vendor) \ $(PKG_GITMODULES) \ $(filter-out config.stamp, \ $(MKFILES_FOR_TARBALL)) UNROOTED_PKG_FILES := $(patsubst $(S)%,./%,$(PKG_FILES)) -$(PKG_TAR): $(PKG_FILES) - @$(call E, making dist dir) - $(Q)rm -Rf tmp/dist/$(PKG_NAME) - $(Q)mkdir -p tmp/dist/$(PKG_NAME) +tmp/dist/$$(SRC_PKG_NAME)-image: $(PKG_FILES) + @$(call E, making src image) + $(Q)rm -Rf tmp/dist/$(SRC_PKG_NAME)-image + $(Q)mkdir -p tmp/dist/$(SRC_PKG_NAME)-image/lib/rustlib/src/rust + $(Q)echo "$(CFG_VERSION)" > tmp/dist/$(SRC_PKG_NAME)-image/lib/rustlib/src/rust/version $(Q)tar \ -C $(S) \ + -f - \ --exclude-vcs \ --exclude=*~ \ --exclude=*.pyc \ @@ -85,10 +90,11 @@ $(PKG_TAR): $(PKG_FILES) --exclude=*/llvm/test/*/*/*.ll \ --exclude=*/llvm/test/*/*/*.td \ --exclude=*/llvm/test/*/*/*.s \ - -c $(UNROOTED_PKG_FILES) | tar -x -C tmp/dist/$(PKG_NAME) + -c $(UNROOTED_PKG_FILES) | tar -x -f - -C tmp/dist/$(SRC_PKG_NAME)-image/lib/rustlib/src/rust + +$(PKG_TAR): tmp/dist/$$(SRC_PKG_NAME)-image @$(call E, making $@) - $(Q)tar -czf $(PKG_TAR) -C tmp/dist $(PKG_NAME) - $(Q)rm -Rf tmp/dist/$(PKG_NAME) + $(Q)tar -czf $(PKG_TAR) -C tmp/dist/$(SRC_PKG_NAME)-image/lib/rustlib/src rust --transform 's,^rust,$(PKG_NAME),S' dist-tar-src: $(PKG_TAR) @@ -257,6 +263,19 @@ endef $(foreach host,$(CFG_HOST),\ $(eval $(call DEF_INSTALLER,$(host)))) +dist/$(SRC_PKG_NAME).tar.gz: tmp/dist/$(SRC_PKG_NAME)-image + @$(call E, build: $@) + $(Q)$(S)src/rust-installer/gen-installer.sh \ + --product-name=Rust \ + --rel-manifest-dir=rustlib \ + --success-message=Awesome-Source. \ + --image-dir=tmp/dist/$(SRC_PKG_NAME)-image \ + --work-dir=tmp/dist \ + --output-dir=dist \ + --package-name=$(SRC_PKG_NAME) \ + --component-name=rust-src \ + --legacy-manifest-dirs=rustlib,cargo + # When generating packages for the standard library, we've actually got a lot of # artifacts to choose from. Each of the CFG_HOST compilers will have a copy of # the standard library for each CFG_TARGET, but we only want to generate one @@ -327,8 +346,8 @@ distcheck-docs: dist-docs # Primary targets (dist, distcheck) ###################################################################### -MAYBE_DIST_TAR_SRC=dist-tar-src -MAYBE_DISTCHECK_TAR_SRC=distcheck-tar-src +MAYBE_DIST_TAR_SRC=dist-tar-src dist/$(SRC_PKG_NAME).tar.gz +MAYBE_DISTCHECK_TAR_SRC=distcheck-tar-src dist/$(SRC_PKG_NAME).tar.gz # FIXME #13224: On OS X don't produce tarballs simply because --exclude-vcs don't work. # This is a huge hack because I just don't have time to figure out another solution. diff --git a/mk/docs.mk b/mk/docs.mk index f76368e3d0b86..6c0be654e1f5d 100644 --- a/mk/docs.mk +++ b/mk/docs.mk @@ -59,13 +59,14 @@ RUSTBOOK_EXE = $(HBIN2_H_$(CFG_BUILD))/rustbook$(X_$(CFG_BUILD)) # ./configure RUSTBOOK = $(RPATH_VAR2_T_$(CFG_BUILD)_H_$(CFG_BUILD)) $(RUSTBOOK_EXE) -# The error-index-generator executable... -ERR_IDX_GEN_EXE = $(HBIN2_H_$(CFG_BUILD))/error-index-generator$(X_$(CFG_BUILD)) +# The error_index_generator executable... +ERR_IDX_GEN_EXE = $(HBIN2_H_$(CFG_BUILD))/error_index_generator$(X_$(CFG_BUILD)) ERR_IDX_GEN = $(RPATH_VAR2_T_$(CFG_BUILD)_H_$(CFG_BUILD)) $(ERR_IDX_GEN_EXE) +ERR_IDX_GEN_MD = $(RPATH_VAR2_T_$(CFG_BUILD)_H_$(CFG_BUILD)) $(ERR_IDX_GEN_EXE) markdown D := $(S)src/doc -DOC_TARGETS := book nomicon style error-index +DOC_TARGETS := book nomicon error-index COMPILER_DOC_TARGETS := DOC_L10N_TARGETS := @@ -157,9 +158,9 @@ LIB_DOC_DEP_$(1) = \ $$(CRATEFILE_$(1)) \ $$(RSINPUTS_$(1)) \ $$(RUSTDOC_EXE) \ - $$(foreach dep,$$(RUST_DEPS_$(1)), \ + $$(foreach dep,$$(RUST_DEPS_$(1)_T_$(CFG_BUILD)), \ $$(TLIB2_T_$(CFG_BUILD)_H_$(CFG_BUILD))/stamp.$$(dep)) \ - $$(foreach dep,$$(filter $$(DOC_CRATES), $$(RUST_DEPS_$(1))), \ + $$(foreach dep,$$(filter $$(DOC_CRATES), $$(RUST_DEPS_$(1)_T_$(CFG_BUILD))), \ doc/$$(dep)/) else LIB_DOC_DEP_$(1) = $$(CRATEFILE_$(1)) $$(RSINPUTS_$(1)) @@ -208,15 +209,14 @@ doc/nomicon/index.html: $(RUSTBOOK_EXE) $(wildcard $(S)/src/doc/nomicon/*.md) | $(Q)rm -rf doc/nomicon $(Q)$(RUSTBOOK) build $(S)src/doc/nomicon doc/nomicon -style: doc/style/index.html - -doc/style/index.html: $(RUSTBOOK_EXE) $(wildcard $(S)/src/doc/style/*.md) | doc/ - @$(call E, rustbook: $@) - $(Q)rm -rf doc/style - $(Q)$(RUSTBOOK) build $(S)src/doc/style doc/style - error-index: doc/error-index.html -doc/error-index.html: $(ERR_IDX_GEN_EXE) | doc/ - $(Q)$(call E, error-index-generator: $@) +# Metadata used to generate the index is created as a side effect of +# the build so this depends on every crate being up to date. +doc/error-index.html: $(ERR_IDX_GEN_EXE) $(CSREQ$(2)_T_$(CFG_BUILD)_H_$(CFG_BUILD)) | doc/ + $(Q)$(call E, error_index_generator: $@) $(Q)$(ERR_IDX_GEN) + +doc/error-index.md: $(ERR_IDX_GEN_EXE) $(CSREQ$(2)_T_$(CFG_BUILD)_H_$(CFG_BUILD)) | doc/ + $(Q)$(call E, error_index_generator: $@) + $(Q)$(ERR_IDX_GEN_MD) diff --git a/mk/grammar.mk b/mk/grammar.mk index 0d527bd068860..1bd042adb218f 100644 --- a/mk/grammar.mk +++ b/mk/grammar.mk @@ -37,7 +37,7 @@ $(BG): $(BG)RustLexer.class: $(BG) $(SG)RustLexer.g4 $(Q)$(CFG_ANTLR4) -o $(BG) $(SG)RustLexer.g4 - $(Q)$(CFG_JAVAC) -d $(BG) $(BG)RustLexer.java + $(Q)$(CFG_JAVAC) -d $(BG) -classpath $(CFG_ANTLR4_JAR) $(BG)RustLexer.java check-build-lexer-verifier: $(BG)verify diff --git a/mk/host.mk b/mk/host.mk index 59a00950b5cfd..d17479bd894e5 100644 --- a/mk/host.mk +++ b/mk/host.mk @@ -21,7 +21,7 @@ define CP_HOST_STAGE_N_CRATE ifeq ($$(ONLY_RLIB_$(5)),) $$(HLIB$(2)_H_$(4))/stamp.$(5): \ $$(TLIB$(1)_T_$(3)_H_$(4))/stamp.$(5) \ - $$(RUST_DEPS_$(5):%=$$(HLIB$(2)_H_$(4))/stamp.%) \ + $$(RUST_DEPS_$(5)_T_$(3):%=$$(HLIB$(2)_H_$(4))/stamp.%) \ | $$(HLIB$(2)_H_$(4))/ @$$(call E, cp: $$(@D)/lib$(5)) $$(call REMOVE_ALL_OLD_GLOB_MATCHES, \ diff --git a/mk/install.mk b/mk/install.mk index af6f3ff6ad2b2..be212869f0103 100644 --- a/mk/install.mk +++ b/mk/install.mk @@ -12,7 +12,8 @@ RUN_INSTALLER = cd tmp/empty_dir && \ sh ../../tmp/dist/$(1)/install.sh \ --prefix="$(DESTDIR)$(CFG_PREFIX)" \ --libdir="$(DESTDIR)$(CFG_LIBDIR)" \ - --mandir="$(DESTDIR)$(CFG_MANDIR)" + --mandir="$(DESTDIR)$(CFG_MANDIR)" \ + --docdir="$(DESTDIR)$(CFG_DOCDIR)" install: ifeq (root user, $(USER) $(patsubst %,user,$(SUDO_USER))) @@ -108,7 +109,7 @@ endif define INSTALL_RUNTIME_TARGET_N install-runtime-target-$(1)-host-$(2): $$(TSREQ$$(ISTAGE)_T_$(1)_H_$(2)) $$(SREQ$$(ISTAGE)_T_$(1)_H_$(2)) $$(Q)$$(call ADB_SHELL,mkdir,$(CFG_RUNTIME_PUSH_DIR)) - $$(Q)$$(foreach crate,$$(TARGET_CRATES), \ + $$(Q)$$(foreach crate,$$(TARGET_CRATES_$(1)), \ $$(call ADB_PUSH,$$(TL$(1)$(2))/$$(call CFG_LIB_GLOB_$(1),$$(crate)), \ $$(CFG_RUNTIME_PUSH_DIR));) endef @@ -116,7 +117,7 @@ endef define INSTALL_RUNTIME_TARGET_CLEANUP_N install-runtime-target-$(1)-cleanup: $$(Q)$$(call ADB,remount) - $$(Q)$$(foreach crate,$$(TARGET_CRATES), \ + $$(Q)$$(foreach crate,$$(TARGET_CRATES_$(1)), \ $$(call ADB_SHELL,rm,$$(CFG_RUNTIME_PUSH_DIR)/$$(call CFG_LIB_GLOB_$(1),$$(crate)));) endef diff --git a/mk/llvm.mk b/mk/llvm.mk index a4174efa5efa9..76367e6f3a628 100644 --- a/mk/llvm.mk +++ b/mk/llvm.mk @@ -21,37 +21,59 @@ endif ifdef CFG_DISABLE_OPTIMIZE_LLVM LLVM_BUILD_CONFIG_MODE := Debug +else ifdef CFG_ENABLE_LLVM_RELEASE_DEBUGINFO +LLVM_BUILD_CONFIG_MODE := RelWithDebInfo else LLVM_BUILD_CONFIG_MODE := Release endif define DEF_LLVM_RULES +ifeq ($(1),$$(CFG_BUILD)) +LLVM_DEPS_TARGET_$(1) := $$(LLVM_DEPS) +else +LLVM_DEPS_TARGET_$(1) := $$(LLVM_DEPS) $$(LLVM_CONFIG_$$(CFG_BUILD)) +endif + # If CFG_LLVM_ROOT is defined then we don't build LLVM ourselves ifeq ($(CFG_LLVM_ROOT),) -LLVM_STAMP_$(1) = $$(CFG_LLVM_BUILD_DIR_$(1))/llvm-auto-clean-stamp +LLVM_STAMP_$(1) = $(S)src/rustllvm/llvm-auto-clean-trigger +LLVM_DONE_$(1) = $$(CFG_LLVM_BUILD_DIR_$(1))/llvm-finished-building -ifeq ($$(findstring msvc,$(1)),msvc) +$$(LLVM_CONFIG_$(1)): $$(LLVM_DONE_$(1)) -$$(LLVM_CONFIG_$(1)): $$(LLVM_DEPS) $$(LLVM_STAMP_$(1)) +ifneq ($$(CFG_NINJA),) +BUILD_LLVM_$(1) := $$(CFG_NINJA) -C $$(CFG_LLVM_BUILD_DIR_$(1)) +else ifeq ($$(findstring msvc,$(1)),msvc) +BUILD_LLVM_$(1) := $$(CFG_CMAKE) --build $$(CFG_LLVM_BUILD_DIR_$(1)) \ + --config $$(LLVM_BUILD_CONFIG_MODE) +else +BUILD_LLVM_$(1) := $$(MAKE) -C $$(CFG_LLVM_BUILD_DIR_$(1)) +endif + +$$(LLVM_DONE_$(1)): $$(LLVM_DEPS_TARGET_$(1)) $$(LLVM_STAMP_$(1)) @$$(call E, cmake: llvm) - $$(Q)$$(CFG_CMAKE) --build $$(CFG_LLVM_BUILD_DIR_$(1)) \ - --config $$(LLVM_BUILD_CONFIG_MODE) - $$(Q)touch $$(LLVM_CONFIG_$(1)) + $$(Q)if ! cmp $$(LLVM_STAMP_$(1)) $$(LLVM_DONE_$(1)); then \ + $$(MAKE) clean-llvm$(1); \ + $$(BUILD_LLVM_$(1)); \ + fi + $$(Q)cp $$(LLVM_STAMP_$(1)) $$@ +ifneq ($$(CFG_NINJA),) clean-llvm$(1): - + @$$(call E, clean: llvm) + $$(Q)$$(CFG_NINJA) -C $$(CFG_LLVM_BUILD_DIR_$(1)) -t clean +else ifeq ($$(findstring msvc,$(1)),msvc) +clean-llvm$(1): + @$$(call E, clean: llvm) + $$(Q)$$(CFG_CMAKE) --build $$(CFG_LLVM_BUILD_DIR_$(1)) \ + --config $$(LLVM_BUILD_CONFIG_MODE) \ + --target clean else - -$$(LLVM_CONFIG_$(1)): $$(LLVM_DEPS) $$(LLVM_STAMP_$(1)) - @$$(call E, make: llvm) - $$(Q)$$(MAKE) -C $$(CFG_LLVM_BUILD_DIR_$(1)) $$(CFG_LLVM_BUILD_ENV_$(1)) ONLY_TOOLS="$$(LLVM_TOOLS)" - $$(Q)touch $$(LLVM_CONFIG_$(1)) - clean-llvm$(1): + @$$(call E, clean: llvm) $$(Q)$$(MAKE) -C $$(CFG_LLVM_BUILD_DIR_$(1)) clean - endif else @@ -60,17 +82,6 @@ endif $$(LLVM_AR_$(1)): $$(LLVM_CONFIG_$(1)) -# This is used to independently force an LLVM clean rebuild -# when we changed something not otherwise captured by builtin -# dependencies. In these cases, commit a change that touches -# the stamp in the source dir. -$$(LLVM_STAMP_$(1)): $$(S)src/rustllvm/llvm-auto-clean-trigger - @$$(call E, make: cleaning llvm) - $$(Q)touch $$@.start_time - $$(Q)$$(MAKE) clean-llvm$(1) - @$$(call E, make: done cleaning llvm) - touch -r $$@.start_time $$@ && rm $$@.start_time - ifeq ($$(CFG_ENABLE_LLVM_STATIC_STDCPP),1) LLVM_STDCPP_RUSTFLAGS_$(1) = -L "$$(dir $$(shell $$(CC_$(1)) $$(CFG_GCCISH_CFLAGS_$(1)) \ -print-file-name=lib$(CFG_STDCPP_NAME).a))" @@ -102,7 +113,7 @@ $(foreach host,$(CFG_HOST), \ define LLVM_LINKAGE_DEPS $$(TLIB$(1)_T_$(2)_H_$(3))/stamp.rustc_llvm: $$(LLVM_LINKAGE_PATH_$(2)) RUSTFLAGS$(1)_rustc_llvm_T_$(2) += $$(shell echo $$(LLVM_ALL_COMPONENTS_$(2)) | tr '-' '_' |\ - sed -e 's/^ //;s/\([^ ]*\)/\-\-cfg have_component_\1/g') + sed -e 's/^ //;s/\([^ ]*\)/\-\-cfg "llvm_component=\\"\1\\""/g') endef $(foreach source,$(CFG_HOST), \ diff --git a/mk/main.mk b/mk/main.mk index a478cafd6b958..49fdfc4118df5 100644 --- a/mk/main.mk +++ b/mk/main.mk @@ -13,17 +13,13 @@ ###################################################################### # The version number -CFG_RELEASE_NUM=1.7.0 +CFG_RELEASE_NUM=1.15.0 # An optional number to put after the label, e.g. '.2' -> '-beta.2' # NB Make sure it starts with a dot to conform to semver pre-release # versions (section 9) CFG_PRERELEASE_VERSION=.1 -# Append a version-dependent hash to each library, so we can install different -# versions in the same place -CFG_FILENAME_EXTRA=$(shell printf '%s' $(CFG_RELEASE)$(CFG_EXTRA_FILENAME) | $(CFG_HASH_COMMAND)) - ifeq ($(CFG_RELEASE_CHANNEL),stable) # This is the normal semver version string, e.g. "0.12.0", "0.12.0-nightly" CFG_RELEASE=$(CFG_RELEASE_NUM) @@ -53,6 +49,20 @@ CFG_RELEASE=$(CFG_RELEASE_NUM)-dev CFG_PACKAGE_VERS=$(CFG_RELEASE_NUM)-dev endif +# Append a version-dependent hash to each library, so we can install different +# versions in the same place +CFG_FILENAME_EXTRA=$(shell printf '%s' $(CFG_RELEASE)$(CFG_EXTRA_FILENAME) | $(CFG_HASH_COMMAND)) + +# If local-rust is the same major.minor as the current version, then force a local-rebuild +ifdef CFG_ENABLE_LOCAL_RUST +SEMVER_PREFIX=$(shell echo $(CFG_RELEASE_NUM) | grep -E -o '^[[:digit:]]+\.[[:digit:]]+') +LOCAL_RELEASE=$(shell $(S)src/etc/local_stage0.sh --print-rustc-release $(CFG_LOCAL_RUST_ROOT)) +ifneq (,$(filter $(SEMVER_PREFIX).%,$(LOCAL_RELEASE))) + CFG_INFO := $(info cfg: auto-detected local-rebuild using $(LOCAL_RELEASE)) + CFG_ENABLE_LOCAL_REBUILD = 1 +endif +endif + # The name of the package to use for creating tarballs, installers etc. CFG_PACKAGE_NAME=rustc-$(CFG_PACKAGE_VERS) @@ -132,10 +142,13 @@ endif ifdef CFG_ENABLE_DEBUGINFO $(info cfg: enabling debuginfo (CFG_ENABLE_DEBUGINFO)) CFG_RUSTC_FLAGS += -g +else ifdef CFG_ENABLE_DEBUGINFO_LINES + $(info cfg: enabling line number debuginfo (CFG_ENABLE_DEBUGINFO_LINES)) + CFG_RUSTC_FLAGS += -Cdebuginfo=1 endif ifdef SAVE_TEMPS - CFG_RUSTC_FLAGS += --save-temps + CFG_RUSTC_FLAGS += -C save-temps endif ifdef ASM_COMMENTS CFG_RUSTC_FLAGS += -Z asm-comments @@ -272,7 +285,7 @@ endif # LLVM macros ###################################################################### -LLVM_OPTIONAL_COMPONENTS=x86 arm aarch64 mips powerpc pnacl +LLVM_OPTIONAL_COMPONENTS=x86 arm aarch64 mips powerpc pnacl systemz jsbackend msp430 LLVM_REQUIRED_COMPONENTS=ipo bitreader bitwriter linker asmparser mcjit \ interpreter instrumentation @@ -320,6 +333,7 @@ LLVM_AS_$(1)=$$(CFG_LLVM_INST_DIR_$(1))/bin/llvm-as$$(X_$(1)) LLC_$(1)=$$(CFG_LLVM_INST_DIR_$(1))/bin/llc$$(X_$(1)) LLVM_ALL_COMPONENTS_$(1)=$$(shell "$$(LLVM_CONFIG_$(1))" --components) +LLVM_VERSION_$(1)=$$(shell "$$(LLVM_CONFIG_$(1))" --version) endef @@ -358,9 +372,12 @@ CFG_INFO := $(info cfg: disabling unstable features (CFG_DISABLE_UNSTABLE_FEATUR # Turn on feature-staging export CFG_DISABLE_UNSTABLE_FEATURES # Subvert unstable feature lints to do the self-build -export RUSTC_BOOTSTRAP_KEY:=$(CFG_BOOTSTRAP_KEY) endif -export CFG_BOOTSTRAP_KEY +ifdef CFG_MUSL_ROOT +export CFG_MUSL_ROOT +endif + +export RUSTC_BOOTSTRAP := 1 ###################################################################### # Per-stage targets and runner @@ -370,7 +387,7 @@ export CFG_BOOTSTRAP_KEY # This 'function' will determine which debugger scripts to copy based on a # target triple. See debuggers.mk for more information. TRIPLE_TO_DEBUGGER_SCRIPT_SETTING=\ - $(if $(findstring windows,$(1)),none,$(if $(findstring darwin,$(1)),lldb,gdb)) + $(if $(findstring windows-msvc,$(1)),none,all) STAGES = 0 1 2 3 @@ -429,7 +446,7 @@ TSREQ$(1)_T_$(2)_H_$(3) = \ # target SREQ$(1)_T_$(2)_H_$(3) = \ $$(TSREQ$(1)_T_$(2)_H_$(3)) \ - $$(foreach dep,$$(TARGET_CRATES), \ + $$(foreach dep,$$(TARGET_CRATES_$(2)), \ $$(TLIB$(1)_T_$(2)_H_$(3))/stamp.$$(dep)) \ tmp/install-debugger-scripts$(1)_T_$(2)_H_$(3)-$$(call TRIPLE_TO_DEBUGGER_SCRIPT_SETTING,$(2)).done @@ -438,7 +455,7 @@ SREQ$(1)_T_$(2)_H_$(3) = \ CSREQ$(1)_T_$(2)_H_$(3) = \ $$(TSREQ$(1)_T_$(2)_H_$(3)) \ $$(HBIN$(1)_H_$(3))/rustdoc$$(X_$(3)) \ - $$(foreach dep,$$(CRATES),$$(TLIB$(1)_T_$(2)_H_$(3))/stamp.$$(dep)) + $$(foreach dep,$$(HOST_CRATES),$$(TLIB$(1)_T_$(2)_H_$(3))/stamp.$$(dep)) ifeq ($(1),0) # Don't run the stage0 compiler under valgrind - that ship has sailed @@ -477,15 +494,19 @@ ifeq ($$(OSTYPE_$(3)),apple-darwin) else ifeq ($$(CFG_WINDOWSY_$(3)),1) LD_LIBRARY_PATH_ENV_NAME$(1)_T_$(2)_H_$(3) := PATH +else +ifeq ($$(OSTYPE_$(3)),unknown-haiku) + LD_LIBRARY_PATH_ENV_NAME$(1)_T_$(2)_H_$(3) := LIBRARY_PATH else LD_LIBRARY_PATH_ENV_NAME$(1)_T_$(2)_H_$(3) := LD_LIBRARY_PATH endif endif +endif LD_LIBRARY_PATH_ENV_HOSTDIR$(1)_T_$(2)_H_$(3) := \ $$(CURDIR)/$$(HLIB$(1)_H_$(3)):$$(CFG_LLVM_INST_DIR_$(3))/lib LD_LIBRARY_PATH_ENV_TARGETDIR$(1)_T_$(2)_H_$(3) := \ - $$(CURDIR)/$$(TLIB1_T_$(2)_H_$(CFG_BUILD)) + $$(CURDIR)/$$(TLIB$(1)_T_$(2)_H_$(3)) HOST_RPATH_VAR$(1)_T_$(2)_H_$(3) := \ $$(LD_LIBRARY_PATH_ENV_NAME$(1)_T_$(2)_H_$(3))=$$(LD_LIBRARY_PATH_ENV_HOSTDIR$(1)_T_$(2)_H_$(3)):$$$$$$(LD_LIBRARY_PATH_ENV_NAME$(1)_T_$(2)_H_$(3)) @@ -498,18 +519,19 @@ RPATH_VAR$(1)_T_$(2)_H_$(3) := $$(HOST_RPATH_VAR$(1)_T_$(2)_H_$(3)) # if you're building a cross config, the host->* parts are # effectively stage1, since it uses the just-built stage0. # -# This logic is similar to how the LD_LIBRARY_PATH variable must -# change be slightly different when doing cross compilations. -# The build doesn't copy over all target libraries into -# a new directory, so we need to point the library path at -# the build directory where all the target libraries came -# from (the stage0 build host). Otherwise the relative rpaths -# inside of the rustc binary won't get resolved correctly. +# Also be sure to use the right rpath because we're loading libraries from the +# CFG_BUILD's stage1 directory for our target, so switch this one instance of +# `RPATH_VAR` to get the bootstrap working. ifeq ($(1),0) ifneq ($(strip $(CFG_BUILD)),$(strip $(3))) CFGFLAG$(1)_T_$(2)_H_$(3) = stage1 -RPATH_VAR$(1)_T_$(2)_H_$(3) := $$(TARGET_RPATH_VAR$(1)_T_$(2)_H_$(3)) +RPATH_VAR$(1)_T_$(2)_H_$(3) := $$(TARGET_RPATH_VAR1_T_$(2)_H_$$(CFG_BUILD)) +else +ifdef CFG_ENABLE_LOCAL_REBUILD +# Assume the local-rebuild rustc already has stage1 features too. +CFGFLAG$(1)_T_$(2)_H_$(3) = stage1 +endif endif endif @@ -522,14 +544,6 @@ STAGE$(1)_T_$(2)_H_$(3) := \ $$(CFG_RUSTC_FLAGS) $$(EXTRAFLAGS_STAGE$(1)) --target=$(2)) \ $$(RUSTC_FLAGS_$(2)) -PERF_STAGE$(1)_T_$(2)_H_$(3) := \ - $$(Q)$$(call CFG_RUN_TARG_$(3),$(1), \ - $$(CFG_PERF_TOOL) \ - $$(HBIN$(1)_H_$(3))/rustc$$(X_$(3)) \ - --cfg $$(CFGFLAG$(1)_T_$(2)_H_$(3)) \ - $$(CFG_RUSTC_FLAGS) $$(EXTRAFLAGS_STAGE$(1)) --target=$(2)) \ - $$(RUSTC_FLAGS_$(2)) - endef $(foreach build,$(CFG_HOST), \ @@ -605,7 +619,8 @@ ALL_TARGET_RULES = $(foreach target,$(CFG_TARGET), \ $(foreach host,$(CFG_HOST), \ all-target-$(target)-host-$(host))) -all: $(ALL_TARGET_RULES) $(GENERATED) docs +all-no-docs: $(ALL_TARGET_RULES) $(GENERATED) +all: all-no-docs docs ###################################################################### # Build system documentation diff --git a/mk/perf.mk b/mk/perf.mk deleted file mode 100644 index 16cbaab495d82..0000000000000 --- a/mk/perf.mk +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2012 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - - -ifdef CFG_PERF_TOOL -rustc-perf$(X): $(CFG_BUILD)/stage2/bin/rustc$(X_$(CFG_BUILD)) - @$(call E, perf compile: $@) - $(PERF_STAGE2_T_$(CFG_BUILD)_H_$(CFG_BUILD)) \ - -o $@ $(COMPILER_CRATE) >rustc-perf.err 2>&1 - $(Q)rm -f $(LIBRUSTC_GLOB) -else -rustc-perf$(X): $(CFG_BUILD)/stage2/bin/rustc$(X_$(CFG_BUILD)) - $(Q)touch $@ -endif - -perf: check-stage2-perf rustc-perf$(X_$(CFG_BUILD)) - $(Q)find $(CFG_BUILD)/test/perf -name \*.err | xargs cat - $(Q)cat rustc-perf.err diff --git a/mk/platform.mk b/mk/platform.mk index 5239086a6552c..6a7a20cbfdb99 100644 --- a/mk/platform.mk +++ b/mk/platform.mk @@ -77,23 +77,6 @@ define DEF_GOOD_VALGRIND endef $(foreach t,$(CFG_TARGET),$(eval $(call DEF_GOOD_VALGRIND,$(t)))) -ifneq ($(findstring linux,$(CFG_OSTYPE)),) - ifdef CFG_PERF - ifneq ($(CFG_PERF_WITH_LOGFD),) - CFG_PERF_TOOL := $(CFG_PERF) stat -r 3 --log-fd 2 - else - CFG_PERF_TOOL := $(CFG_PERF) stat -r 3 - endif - else - ifdef CFG_VALGRIND - CFG_PERF_TOOL := \ - $(CFG_VALGRIND) --tool=cachegrind --cache-sim=yes --branch-sim=yes - else - CFG_PERF_TOOL := /usr/bin/time --verbose - endif - endif -endif - AR := ar define SET_FROM_CFG @@ -119,8 +102,6 @@ include $(wildcard $(CFG_SRC_DIR)mk/cfg/*.mk) define ADD_INSTALLED_OBJECTS INSTALLED_OBJECTS_$(1) += $$(CFG_INSTALLED_OBJECTS_$(1)) REQUIRED_OBJECTS_$(1) += $$(CFG_THIRD_PARTY_OBJECTS_$(1)) - INSTALLED_OBJECTS_$(1) += $$(call CFG_STATIC_LIB_NAME_$(1),compiler-rt) - REQUIRED_OBJECTS_$(1) += $$(call CFG_STATIC_LIB_NAME_$(1),compiler-rt) endef $(foreach target,$(CFG_TARGET), \ @@ -135,6 +116,18 @@ endef $(foreach target,$(CFG_TARGET), \ $(eval $(call DEFINE_LINKER,$(target)))) +define ADD_JEMALLOC_DEP + ifndef CFG_DISABLE_JEMALLOC_$(1) + ifndef CFG_DISABLE_JEMALLOC + RUST_DEPS_std_T_$(1) += alloc_jemalloc + TARGET_CRATES_$(1) += alloc_jemalloc + endif + endif +endef + +$(foreach target,$(CFG_TARGET), \ + $(eval $(call ADD_JEMALLOC_DEP,$(target)))) + # The -Qunused-arguments sidesteps spurious warnings from clang define FILTER_FLAGS ifeq ($$(CFG_USING_CLANG),1) @@ -153,7 +146,7 @@ define CC_MACROS CFG_CC_INCLUDE_$(1)=-I $$(1) ifeq ($$(findstring msvc,$(1)),msvc) CFG_CC_OUTPUT_$(1)=-Fo:$$(1) - CFG_CREATE_ARCHIVE_$(1)=$$(AR_$(1)) -OUT:$$(1) + CFG_CREATE_ARCHIVE_$(1)='$$(AR_$(1))' -OUT:$$(1) else CFG_CC_OUTPUT_$(1)=-o $$(1) CFG_CREATE_ARCHIVE_$(1)=$$(AR_$(1)) crus $$(1) @@ -174,7 +167,7 @@ ifdef CFG_CCACHE_BASEDIR export CCACHE_BASEDIR endif -FIND_COMPILER = $(word 1,$(1:ccache=)) +FIND_COMPILER = $(strip $(1:ccache=)) define CFG_MAKE_TOOLCHAIN # Prepend the tools with their prefix if cross compiling @@ -192,7 +185,7 @@ define CFG_MAKE_TOOLCHAIN endif endif - CFG_COMPILE_C_$(1) = $$(CC_$(1)) \ + CFG_COMPILE_C_$(1) = '$$(call FIND_COMPILER,$$(CC_$(1)))' \ $$(CFLAGS) \ $$(CFG_GCCISH_CFLAGS) \ $$(CFG_GCCISH_CFLAGS_$(1)) \ @@ -203,7 +196,7 @@ define CFG_MAKE_TOOLCHAIN $$(CFG_GCCISH_LINK_FLAGS_$(1)) \ $$(CFG_GCCISH_DEF_FLAG_$(1))$$(3) $$(2) \ $$(call CFG_INSTALL_NAME_$(1),$$(4)) - CFG_COMPILE_CXX_$(1) = $$(CXX_$(1)) \ + CFG_COMPILE_CXX_$(1) = '$$(call FIND_COMPILER,$$(CXX_$(1)))' \ $$(CXXFLAGS) \ $$(CFG_GCCISH_CFLAGS) \ $$(CFG_GCCISH_CXXFLAGS) \ @@ -226,12 +219,19 @@ define CFG_MAKE_TOOLCHAIN LLVM_MC_RELOCATION_MODEL="default" endif + # LLVM changed this flag in 3.9 + ifdef CFG_LLVM_MC_HAS_RELOCATION_MODEL + LLVM_MC_RELOC_FLAG := -relocation-model=$$(LLVM_MC_RELOCATION_MODEL) + else + LLVM_MC_RELOC_FLAG := -position-independent + endif + # We're using llvm-mc as our assembler because it supports # .cfi pseudo-ops on mac CFG_ASSEMBLE_$(1)=$$(CPP_$(1)) -E $$(2) | \ $$(LLVM_MC_$$(CFG_BUILD)) \ -assemble \ - -relocation-model=$$(LLVM_MC_RELOCATION_MODEL) \ + $$(LLVM_MC_RELOC_FLAG) \ -filetype=obj \ -triple=$(1) \ -o=$$(1) diff --git a/mk/prepare.mk b/mk/prepare.mk index 87a445000ada4..20e20e9b5df7e 100644 --- a/mk/prepare.mk +++ b/mk/prepare.mk @@ -82,7 +82,7 @@ define PREPARE_MAN endef -PREPARE_TOOLS = $(filter-out compiletest rustbook error-index-generator, $(TOOLS)) +PREPARE_TOOLS = $(filter-out compiletest rustbook error_index_generator, $(TOOLS)) # $(1) is tool @@ -122,7 +122,7 @@ prepare-host-lib-$(1)-$(2)-$(3)-$(4): \ prepare-host-lib-$(1)-$(2)-$(3)-$(4): \ PREPARE_WORKING_DEST_LIB_DIR=$$(PREPARE_DEST_DIR)/$$(call PREPARE_TAR_LIB_DIR,$$(HLIB_RELATIVE$(2)_H_$(3))) prepare-host-lib-$(1)-$(2)-$(3)-$(4): prepare-maybe-clean-$(4) \ - $$(foreach dep,$$(RUST_DEPS_$(1)),prepare-host-lib-$$(dep)-$(2)-$(3)-$(4)) \ + $$(foreach dep,$$(RUST_DEPS_$(1)_T_$(3)),prepare-host-lib-$$(dep)-$(2)-$(3)-$(4)) \ $$(HLIB$(2)_H_$(3))/stamp.$(1) \ prepare-host-dirs-$(4) $$(if $$(findstring $(2), $$(PREPARE_STAGE)), \ @@ -147,7 +147,7 @@ prepare-target-$(2)-host-$(3)-$(1)-$(4): \ prepare-target-$(2)-host-$(3)-$(1)-$(4): \ PREPARE_DEST_BIN_DIR=$$(PREPARE_DEST_LIB_DIR)/rustlib/$(3)/bin prepare-target-$(2)-host-$(3)-$(1)-$(4): prepare-maybe-clean-$(4) \ - $$(foreach crate,$$(TARGET_CRATES), \ + $$(foreach crate,$$(TARGET_CRATES_$(2)), \ $$(TLIB$(1)_T_$(2)_H_$(3))/stamp.$$(crate)) \ $$(if $$(findstring $(2),$$(CFG_HOST)), \ $$(foreach crate,$$(HOST_CRATES), \ @@ -161,7 +161,7 @@ prepare-target-$(2)-host-$(3)-$(1)-$(4): prepare-maybe-clean-$(4) \ $$(if $$(findstring $(3), $$(PREPARE_HOST)), \ $$(call PREPARE_DIR,$$(PREPARE_WORKING_DEST_LIB_DIR)) \ $$(call PREPARE_DIR,$$(PREPARE_DEST_BIN_DIR)) \ - $$(foreach crate,$$(TARGET_CRATES), \ + $$(foreach crate,$$(TARGET_CRATES_$(2)), \ $$(if $$(or $$(findstring 1, $$(ONLY_RLIB_$$(crate))),$$(findstring 1,$$(CFG_INSTALL_ONLY_RLIB_$(2)))),, \ $$(call PREPARE_LIB,$$(call CFG_LIB_GLOB_$(2),$$(crate)))) \ $$(call PREPARE_LIB,$$(call CFG_RLIB_GLOB,$$(crate)))) \ diff --git a/mk/reconfig.mk b/mk/reconfig.mk index 1a3a1774384d8..b8f51097868d4 100644 --- a/mk/reconfig.mk +++ b/mk/reconfig.mk @@ -38,6 +38,6 @@ else SREL_ROOT := $(SREL) endif -config.stamp: $(S)configure $(S)Makefile.in $(S)src/snapshots.txt +config.stamp: $(S)configure $(S)Makefile.in $(S)src/stage0.txt @$(call E, cfg: reconfiguring) $(SREL_ROOT)configure $(CFG_CONFIGURE_ARGS) diff --git a/mk/rt.mk b/mk/rt.mk index 9dbbcbebb979d..a67bded288e20 100644 --- a/mk/rt.mk +++ b/mk/rt.mk @@ -37,6 +37,16 @@ ################################################################################ NATIVE_LIBS := hoedown miniz rust_test_helpers +# A macro to add a generic implementation of intrinsics iff a arch optimized implementation is not +# already in the list. +# $(1) is the target +# $(2) is the intrinsic +define ADD_INTRINSIC + ifeq ($$(findstring X,$$(foreach intrinsic,$$(COMPRT_OBJS_$(1)),$$(if $$(findstring $(2),$$(intrinsic)),X,))),) + COMPRT_OBJS_$(1) += $(2) + endif +endef + # $(1) is the target triple define NATIVE_LIBRARIES @@ -148,7 +158,17 @@ ifeq ($$(CFG_WINDOWSY_$(1)),1) else ifeq ($(OSTYPE_$(1)), apple-ios) JEMALLOC_ARGS_$(1) := --disable-tls else ifeq ($(findstring android, $(OSTYPE_$(1))), android) - JEMALLOC_ARGS_$(1) := --disable-tls + # We force android to have prefixed symbols because apparently replacement of + # the libc allocator doesn't quite work. When this was tested (unprefixed + # symbols), it was found that the `realpath` function in libc would allocate + # with libc malloc (not jemalloc malloc), and then the standard library would + # free with jemalloc free, causing a segfault. + # + # If the test suite passes, however, without symbol prefixes then we should be + # good to go! + JEMALLOC_ARGS_$(1) := --disable-tls --with-jemalloc-prefix=je_ +else ifeq ($(findstring dragonfly, $(OSTYPE_$(1))), dragonfly) + JEMALLOC_ARGS_$(1) := --with-jemalloc-prefix=je_ endif ifdef CFG_ENABLE_DEBUG_JEMALLOC @@ -186,7 +206,7 @@ JEMALLOC_LOCAL_$(1) := $$(JEMALLOC_BUILD_DIR_$(1))/lib/$$(JEMALLOC_REAL_NAME_$(1 $$(JEMALLOC_LOCAL_$(1)): $$(JEMALLOC_DEPS) $$(MKFILE_DEPS) @$$(call E, make: jemalloc) cd "$$(JEMALLOC_BUILD_DIR_$(1))"; "$(S)src/jemalloc/configure" \ - $$(JEMALLOC_ARGS_$(1)) --with-jemalloc-prefix=je_ $(CFG_JEMALLOC_FLAGS) \ + $$(JEMALLOC_ARGS_$(1)) $(CFG_JEMALLOC_FLAGS) \ --build=$$(CFG_GNU_TRIPLE_$(CFG_BUILD)) --host=$$(CFG_GNU_TRIPLE_$(1)) \ CC="$$(CC_$(1)) $$(CFG_JEMALLOC_CFLAGS_$(1))" \ AR="$$(AR_$(1))" \ @@ -213,60 +233,384 @@ endif # compiler-rt ################################################################################ -ifdef CFG_ENABLE_FAST_MAKE -COMPRT_DEPS := $(S)/.gitmodules -else -COMPRT_DEPS := $(wildcard \ - $(S)src/compiler-rt/* \ - $(S)src/compiler-rt/*/* \ - $(S)src/compiler-rt/*/*/* \ - $(S)src/compiler-rt/*/*/*/*) -endif +# Everything below is a manual compilation of compiler-rt, disregarding its +# build system. See comments in `src/bootstrap/native.rs` for more information. COMPRT_NAME_$(1) := $$(call CFG_STATIC_LIB_NAME_$(1),compiler-rt) COMPRT_LIB_$(1) := $$(RT_OUTPUT_DIR_$(1))/$$(COMPRT_NAME_$(1)) COMPRT_BUILD_DIR_$(1) := $$(RT_OUTPUT_DIR_$(1))/compiler-rt +# We must avoid compiling both a generic implementation (e.g. `floatdidf.c) and an arch optimized +# implementation (e.g. `x86_64/floatdidf.S) of the same symbol (e.g. `floatdidf) because that causes +# linker errors. To avoid that, we first add all the arch optimized implementations and then add the +# generic implementations if and only if its arch optimized version is not already in the list. This +# last part is handled by the ADD_INTRINSIC macro. + +COMPRT_OBJS_$(1) := + +ifeq ($$(findstring msvc,$(1)),) +ifeq ($$(findstring x86_64,$(1)),x86_64) +COMPRT_OBJS_$(1) += \ + x86_64/chkstk.o \ + x86_64/chkstk2.o \ + x86_64/floatdidf.o \ + x86_64/floatdisf.o \ + x86_64/floatdixf.o \ + x86_64/floatundidf.o \ + x86_64/floatundisf.o \ + x86_64/floatundixf.o +endif + +ifeq ($$(findstring i686,$$(patsubts i%86,i686,$(1))),i686) +COMPRT_OBJS_$(1) += \ + i386/ashldi3.o \ + i386/ashrdi3.o \ + i386/chkstk.o \ + i386/chkstk2.o \ + i386/divdi3.o \ + i386/floatdidf.o \ + i386/floatdisf.o \ + i386/floatdixf.o \ + i386/floatundidf.o \ + i386/floatundisf.o \ + i386/floatundixf.o \ + i386/lshrdi3.o \ + i386/moddi3.o \ + i386/muldi3.o \ + i386/udivdi3.o \ + i386/umoddi3.o +endif + +else + +ifeq ($$(findstring x86_64,$(1)),x86_64) +COMPRT_OBJS_$(1) += \ + x86_64/floatdidf.o \ + x86_64/floatdisf.o \ + x86_64/floatdixf.o +endif + +endif + +# Generic ARM sources, nothing compiles on iOS though +ifeq ($$(findstring arm,$(1)),arm) +ifeq ($$(findstring ios,$(1)),) +COMPRT_OBJS_$(1) += \ + arm/aeabi_cdcmp.o \ + arm/aeabi_cdcmpeq_check_nan.o \ + arm/aeabi_cfcmp.o \ + arm/aeabi_cfcmpeq_check_nan.o \ + arm/aeabi_dcmp.o \ + arm/aeabi_div0.o \ + arm/aeabi_drsub.o \ + arm/aeabi_fcmp.o \ + arm/aeabi_frsub.o \ + arm/aeabi_idivmod.o \ + arm/aeabi_ldivmod.o \ + arm/aeabi_memcmp.o \ + arm/aeabi_memcpy.o \ + arm/aeabi_memmove.o \ + arm/aeabi_memset.o \ + arm/aeabi_uidivmod.o \ + arm/aeabi_uldivmod.o \ + arm/bswapdi2.o \ + arm/bswapsi2.o \ + arm/clzdi2.o \ + arm/clzsi2.o \ + arm/comparesf2.o \ + arm/divmodsi4.o \ + arm/divsi3.o \ + arm/modsi3.o \ + arm/switch16.o \ + arm/switch32.o \ + arm/switch8.o \ + arm/switchu8.o \ + arm/sync_synchronize.o \ + arm/udivmodsi4.o \ + arm/udivsi3.o \ + arm/umodsi3.o +endif +endif + +# Thumb sources +ifeq ($$(findstring armv7,$(1)),armv7) +COMPRT_OBJS_$(1) += \ + arm/sync_fetch_and_add_4.o \ + arm/sync_fetch_and_add_8.o \ + arm/sync_fetch_and_and_4.o \ + arm/sync_fetch_and_and_8.o \ + arm/sync_fetch_and_max_4.o \ + arm/sync_fetch_and_max_8.o \ + arm/sync_fetch_and_min_4.o \ + arm/sync_fetch_and_min_8.o \ + arm/sync_fetch_and_nand_4.o \ + arm/sync_fetch_and_nand_8.o \ + arm/sync_fetch_and_or_4.o \ + arm/sync_fetch_and_or_8.o \ + arm/sync_fetch_and_sub_4.o \ + arm/sync_fetch_and_sub_8.o \ + arm/sync_fetch_and_umax_4.o \ + arm/sync_fetch_and_umax_8.o \ + arm/sync_fetch_and_umin_4.o \ + arm/sync_fetch_and_umin_8.o \ + arm/sync_fetch_and_xor_4.o \ + arm/sync_fetch_and_xor_8.o +endif + +# VFP sources +ifeq ($$(findstring eabihf,$(1)),eabihf) +COMPRT_OBJS_$(1) += \ + arm/adddf3vfp.o \ + arm/addsf3vfp.o \ + arm/divdf3vfp.o \ + arm/divsf3vfp.o \ + arm/eqdf2vfp.o \ + arm/eqsf2vfp.o \ + arm/extendsfdf2vfp.o \ + arm/fixdfsivfp.o \ + arm/fixsfsivfp.o \ + arm/fixunsdfsivfp.o \ + arm/fixunssfsivfp.o \ + arm/floatsidfvfp.o \ + arm/floatsisfvfp.o \ + arm/floatunssidfvfp.o \ + arm/floatunssisfvfp.o \ + arm/gedf2vfp.o \ + arm/gesf2vfp.o \ + arm/gtdf2vfp.o \ + arm/gtsf2vfp.o \ + arm/ledf2vfp.o \ + arm/lesf2vfp.o \ + arm/ltdf2vfp.o \ + arm/ltsf2vfp.o \ + arm/muldf3vfp.o \ + arm/mulsf3vfp.o \ + arm/negdf2vfp.o \ + arm/negsf2vfp.o \ + arm/nedf2vfp.o \ + arm/nesf2vfp.o \ + arm/restore_vfp_d8_d15_regs.o \ + arm/save_vfp_d8_d15_regs.o \ + arm/subdf3vfp.o \ + arm/subsf3vfp.o \ + arm/truncdfsf2vfp.o \ + arm/unorddf2vfp.o \ + arm/unordsf2vfp.o +endif + +$(foreach intrinsic,absvdi2.o \ + absvsi2.o \ + adddf3.o \ + addsf3.o \ + addvdi3.o \ + addvsi3.o \ + apple_versioning.o \ + ashldi3.o \ + ashrdi3.o \ + clear_cache.o \ + clzdi2.o \ + clzsi2.o \ + cmpdi2.o \ + comparedf2.o \ + comparesf2.o \ + ctzdi2.o \ + ctzsi2.o \ + divdc3.o \ + divdf3.o \ + divdi3.o \ + divmoddi4.o \ + divmodsi4.o \ + divsc3.o \ + divsf3.o \ + divsi3.o \ + divxc3.o \ + extendsfdf2.o \ + extendhfsf2.o \ + ffsdi2.o \ + fixdfdi.o \ + fixdfsi.o \ + fixsfdi.o \ + fixsfsi.o \ + fixunsdfdi.o \ + fixunsdfsi.o \ + fixunssfdi.o \ + fixunssfsi.o \ + fixunsxfdi.o \ + fixunsxfsi.o \ + fixxfdi.o \ + floatdidf.o \ + floatdisf.o \ + floatdixf.o \ + floatsidf.o \ + floatsisf.o \ + floatundidf.o \ + floatundisf.o \ + floatundixf.o \ + floatunsidf.o \ + floatunsisf.o \ + int_util.o \ + lshrdi3.o \ + moddi3.o \ + modsi3.o \ + muldc3.o \ + muldf3.o \ + muldi3.o \ + mulodi4.o \ + mulosi4.o \ + muloti4.o \ + mulsc3.o \ + mulsf3.o \ + mulvdi3.o \ + mulvsi3.o \ + mulxc3.o \ + negdf2.o \ + negdi2.o \ + negsf2.o \ + negvdi2.o \ + negvsi2.o \ + paritydi2.o \ + paritysi2.o \ + popcountdi2.o \ + popcountsi2.o \ + powidf2.o \ + powisf2.o \ + powixf2.o \ + subdf3.o \ + subsf3.o \ + subvdi3.o \ + subvsi3.o \ + truncdfhf2.o \ + truncdfsf2.o \ + truncsfhf2.o \ + ucmpdi2.o \ + udivdi3.o \ + udivmoddi4.o \ + udivmodsi4.o \ + udivsi3.o \ + umoddi3.o \ + umodsi3.o, + $(call ADD_INTRINSIC,$(1),$(intrinsic))) + +ifeq ($$(findstring ios,$(1)),) +$(foreach intrinsic,absvti2.o \ + addtf3.o \ + addvti3.o \ + ashlti3.o \ + ashrti3.o \ + clzti2.o \ + cmpti2.o \ + ctzti2.o \ + divtf3.o \ + divti3.o \ + ffsti2.o \ + fixdfti.o \ + fixsfti.o \ + fixunsdfti.o \ + fixunssfti.o \ + fixunsxfti.o \ + fixxfti.o \ + floattidf.o \ + floattisf.o \ + floattixf.o \ + floatuntidf.o \ + floatuntisf.o \ + floatuntixf.o \ + lshrti3.o \ + modti3.o \ + multf3.o \ + multi3.o \ + mulvti3.o \ + negti2.o \ + negvti2.o \ + parityti2.o \ + popcountti2.o \ + powitf2.o \ + subtf3.o \ + subvti3.o \ + trampoline_setup.o \ + ucmpti2.o \ + udivmodti4.o \ + udivti3.o \ + umodti3.o, + $(call ADD_INTRINSIC,$(1),$(intrinsic))) +endif + +ifeq ($$(findstring apple,$(1)),apple) +$(foreach intrinsic,atomic_flag_clear.o \ + atomic_flag_clear_explicit.o \ + atomic_flag_test_and_set.o \ + atomic_flag_test_and_set_explicit.o \ + atomic_signal_fence.o \ + atomic_thread_fence.o, + $(call ADD_INTRINSIC,$(1),$(intrinsic))) +endif + +ifeq ($$(findstring windows,$(1)),) +$(call ADD_INTRINSIC,$(1),emutls.o) +endif + +ifeq ($$(findstring msvc,$(1)),) + +ifeq ($$(findstring freebsd,$(1)),) +$(call ADD_INTRINSIC,$(1),gcc_personality_v0.o) +endif +endif + +ifeq ($$(findstring aarch64,$(1)),aarch64) +$(foreach intrinsic,comparetf2.o \ + extenddftf2.o \ + extendsftf2.o \ + fixtfdi.o \ + fixtfsi.o \ + fixtfti.o \ + fixunstfdi.o \ + fixunstfsi.o \ + fixunstfti.o \ + floatditf.o \ + floatsitf.o \ + floatunditf.o \ + floatunsitf.o \ + multc3.o \ + trunctfdf2.o \ + trunctfsf2.o, + $(call ADD_INTRINSIC,$(1),$(intrinsic))) +endif + ifeq ($$(findstring msvc,$(1)),msvc) -$$(COMPRT_LIB_$(1)): $$(COMPRT_DEPS) $$(MKFILE_DEPS) $$(LLVM_CONFIG_$(1)) - @$$(call E, cmake: compiler-rt) - $$(Q)cd "$$(COMPRT_BUILD_DIR_$(1))"; $$(CFG_CMAKE) "$(S)src/compiler-rt" \ - -DCMAKE_BUILD_TYPE=$$(LLVM_BUILD_CONFIG_MODE) \ - -DLLVM_CONFIG_PATH=$$(LLVM_CONFIG_$(1)) \ - -G"$$(CFG_CMAKE_GENERATOR)" - $$(Q)$$(CFG_CMAKE) --build "$$(COMPRT_BUILD_DIR_$(1))" \ - --target lib/builtins/builtins \ - --config $$(LLVM_BUILD_CONFIG_MODE) \ - -- //v:m //nologo - $$(Q)cp $$(COMPRT_BUILD_DIR_$(1))/lib/windows/$$(LLVM_BUILD_CONFIG_MODE)/clang_rt.builtins-$$(HOST_$(1)).lib $$@ +$$(COMPRT_BUILD_DIR_$(1))/%.o: CFLAGS += -Zl -D__func__=__FUNCTION__ else -COMPRT_CC_$(1) := $$(CC_$(1)) -COMPRT_AR_$(1) := $$(AR_$(1)) -# We chomp -Werror here because GCC warns about the type signature of -# builtins not matching its own and the build fails. It's a bit hacky, -# but what can we do, we're building libclang-rt using GCC ...... -COMPRT_CFLAGS_$(1) := $$(subst -Werror,,$$(CFG_GCCISH_CFLAGS_$(1))) -std=c99 - -# FreeBSD Clang's packaging is problematic; it doesn't copy unwind.h to -# the standard include directory. This should really be in our changes to -# compiler-rt, but we override the CFLAGS here so there isn't much choice -ifeq ($$(findstring freebsd,$(1)),freebsd) - COMPRT_CFLAGS_$(1) += -I/usr/include/c++/v1 +$$(COMPRT_BUILD_DIR_$(1))/%.o: CFLAGS += -fno-builtin -fvisibility=hidden \ + -fomit-frame-pointer -ffreestanding +endif + +COMPRT_OBJS_$(1) := $$(COMPRT_OBJS_$(1):%=$$(COMPRT_BUILD_DIR_$(1))/%) + +$$(COMPRT_BUILD_DIR_$(1))/%.o: $(S)src/compiler-rt/lib/builtins/%.c + @mkdir -p $$(@D) + @$$(call E, compile: $$@) + $$(Q)$$(call CFG_COMPILE_C_$(1),$$@,$$<) + +$$(COMPRT_BUILD_DIR_$(1))/%.o: $(S)src/compiler-rt/lib/builtins/%.S \ + $$(LLVM_CONFIG_$$(CFG_BUILD)) + @mkdir -p $$(@D) + @$$(call E, compile: $$@) + $$(Q)$$(call CFG_ASSEMBLE_$(1),$$@,$$<) + +ifeq ($$(findstring msvc,$(1)),msvc) +$$(COMPRT_BUILD_DIR_$(1))/%.o: \ + export INCLUDE := $$(CFG_MSVC_INCLUDE_PATH_$$(HOST_$(1))) endif -$$(COMPRT_LIB_$(1)): $$(COMPRT_DEPS) $$(MKFILE_DEPS) - @$$(call E, make: compiler-rt) - $$(Q)$$(MAKE) -C "$(S)src/compiler-rt" \ - ProjSrcRoot="$(S)src/compiler-rt" \ - ProjObjRoot="$$(abspath $$(COMPRT_BUILD_DIR_$(1)))" \ - CC='$$(COMPRT_CC_$(1))' \ - AR='$$(COMPRT_AR_$(1))' \ - RANLIB='$$(COMPRT_AR_$(1)) s' \ - CFLAGS="$$(COMPRT_CFLAGS_$(1))" \ - TargetTriple=$(1) \ - triple-builtins - $$(Q)cp $$(COMPRT_BUILD_DIR_$(1))/triple/builtins/libcompiler_rt.a $$@ +ifeq ($$(findstring emscripten,$(1)),emscripten) +# FIXME: emscripten doesn't use compiler-rt and can't build it without +# further hacks +COMPRT_OBJS_$(1) := endif + +$$(COMPRT_LIB_$(1)): $$(COMPRT_OBJS_$(1)) + @$$(call E, link: $$@) + $$(Q)$$(call CFG_CREATE_ARCHIVE_$(1),$$@) $$^ + ################################################################################ # libbacktrace # @@ -288,15 +632,16 @@ ifeq ($$(findstring darwin,$$(OSTYPE_$(1))),darwin) $$(BACKTRACE_LIB_$(1)): touch $$@ -else -ifeq ($$(findstring ios,$$(OSTYPE_$(1))),ios) +else ifeq ($$(findstring ios,$$(OSTYPE_$(1))),ios) # See comment above $$(BACKTRACE_LIB_$(1)): touch $$@ -else - -ifeq ($$(findstring msvc,$(1)),msvc) +else ifeq ($$(findstring msvc,$(1)),msvc) # See comment above +$$(BACKTRACE_LIB_$(1)): + touch $$@ +else ifeq ($$(findstring emscripten,$(1)),emscripten) +# FIXME: libbacktrace doesn't understand the emscripten triple $$(BACKTRACE_LIB_$(1)): touch $$@ else @@ -335,7 +680,7 @@ $$(BACKTRACE_BUILD_DIR_$(1))/Makefile: $$(BACKTRACE_DEPS) $$(MKFILE_DEPS) CC="$$(CC_$(1))" \ AR="$$(AR_$(1))" \ RANLIB="$$(AR_$(1)) s" \ - CFLAGS="$$(CFG_GCCISH_CFLAGS_$(1):-Werror=) -fno-stack-protector" \ + CFLAGS="$$(CFG_GCCISH_CFLAGS_$(1)) -Wno-error -fno-stack-protector" \ $(S)src/libbacktrace/configure --build=$(CFG_GNU_TRIPLE_$(CFG_BUILD)) --host=$(CFG_GNU_TRIPLE_$(1))) $$(Q)echo '#undef HAVE_ATOMIC_FUNCTIONS' >> \ $$(BACKTRACE_BUILD_DIR_$(1))/config.h @@ -348,9 +693,7 @@ $$(BACKTRACE_LIB_$(1)): $$(BACKTRACE_BUILD_DIR_$(1))/Makefile $$(MKFILE_DEPS) INCDIR=$(S)src/libbacktrace $$(Q)cp $$(BACKTRACE_BUILD_DIR_$(1))/.libs/libbacktrace.a $$@ -endif # endif for msvc -endif # endif for ios -endif # endif for darwin +endif ################################################################################ # libc/libunwind for musl diff --git a/mk/rustllvm.mk b/mk/rustllvm.mk index 6adffda7d1b32..2d63f69960f78 100644 --- a/mk/rustllvm.mk +++ b/mk/rustllvm.mk @@ -24,7 +24,7 @@ LLVM_EXTRA_INCDIRS_$(1)= $$(call CFG_CC_INCLUDE_$(1),$(S)src/llvm/include) \ endif RUSTLLVM_OBJS_CS_$(1) := $$(addprefix rustllvm/, \ - ExecutionEngineWrapper.cpp RustWrapper.cpp PassWrapper.cpp \ + RustWrapper.cpp PassWrapper.cpp \ ArchiveWrapper.cpp) RUSTLLVM_INCS_$(1) = $$(LLVM_EXTRA_INCDIRS_$(1)) \ @@ -32,6 +32,11 @@ RUSTLLVM_INCS_$(1) = $$(LLVM_EXTRA_INCDIRS_$(1)) \ $$(call CFG_CC_INCLUDE_$(1),$$(S)src/rustllvm/include) RUSTLLVM_OBJS_OBJS_$(1) := $$(RUSTLLVM_OBJS_CS_$(1):rustllvm/%.cpp=$(1)/rustllvm/%.o) +# Flag that we are building with Rust's llvm fork +ifeq ($(CFG_LLVM_ROOT),) +RUSTLLVM_CXXFLAGS_$(1) := -DLLVM_RUSTLLVM +endif + # Note that we appease `cl.exe` and its need for some sort of exception # handling flag with the `EHsc` argument here as well. ifeq ($$(findstring msvc,$(1)),msvc) @@ -43,6 +48,9 @@ $$(RT_OUTPUT_DIR_$(1))/$$(call CFG_STATIC_LIB_NAME_$(1),rustllvm): \ @$$(call E, link: $$@) $$(Q)$$(call CFG_CREATE_ARCHIVE_$(1),$$@) $$^ +RUSTLLVM_COMPONENTS_$(1) = $$(shell echo $$(LLVM_ALL_COMPONENTS_$(1)) |\ + tr 'a-z-' 'A-Z_'| sed -e 's/^ //;s/\([^ ]*\)/\-DLLVM_COMPONENT_\1/g') + # On MSVC we need to double-escape arguments that llvm-config printed which # start with a '/'. The shell we're running in will auto-translate the argument # `/foo` to `C:/msys64/foo` but we really want it to be passed through as `/foo` @@ -51,6 +59,8 @@ $(1)/rustllvm/%.o: $(S)src/rustllvm/%.cpp $$(MKFILE_DEPS) $$(LLVM_CONFIG_$(1)) @$$(call E, compile: $$@) $$(Q)$$(call CFG_COMPILE_CXX_$(1), $$@,) \ $$(subst /,//,$$(LLVM_CXXFLAGS_$(1))) \ + $$(RUSTLLVM_COMPONENTS_$(1)) \ + $$(RUSTLLVM_CXXFLAGS_$(1)) \ $$(EXTRA_RUSTLLVM_CXXFLAGS_$(1)) \ $$(RUSTLLVM_INCS_$(1)) \ $$< diff --git a/mk/snap.mk b/mk/snap.mk deleted file mode 100644 index 0b34f52b7ebbe..0000000000000 --- a/mk/snap.mk +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2012 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -define DEF_SNAP_FOR_STAGE_H -# $(1) stage -# $(2) triple - -snap-stage$(1)-H-$(2): $$(HSREQ$(1)_H_$(2)) - $(CFG_PYTHON) $(S)src/etc/make-snapshot.py stage$(1) $(2) - -endef - -$(foreach host,$(CFG_HOST), \ - $(foreach stage,1 2 3, \ - $(eval $(call DEF_SNAP_FOR_STAGE_H,$(stage),$(host))))) - -snap-stage1: snap-stage1-H-$(CFG_BUILD) - -snap-stage2: snap-stage2-H-$(CFG_BUILD) - -snap-stage3: snap-stage3-H-$(CFG_BUILD) diff --git a/mk/stage0.mk b/mk/stage0.mk index 460a4a7f445e3..8a2bf2ebbde64 100644 --- a/mk/stage0.mk +++ b/mk/stage0.mk @@ -10,17 +10,15 @@ $(HLIB0_H_$(CFG_BUILD))/: endif $(SNAPSHOT_RUSTC_POST_CLEANUP): \ - $(S)src/snapshots.txt \ - $(S)src/etc/get-snapshot.py $(MKFILE_DEPS) \ + $(S)src/stage0.txt \ + $(S)src/etc/local_stage0.sh \ + $(S)src/etc/get-stage0.py $(MKFILE_DEPS) \ | $(HBIN0_H_$(CFG_BUILD))/ - @$(call E, fetch: $@) -# Note: the variable "SNAPSHOT_FILE" is generally not set, and so -# we generally only pass one argument to this script. ifdef CFG_ENABLE_LOCAL_RUST $(Q)$(S)src/etc/local_stage0.sh $(CFG_BUILD) $(CFG_LOCAL_RUST_ROOT) rustlib else - $(Q)$(CFG_PYTHON) $(S)src/etc/get-snapshot.py $(CFG_BUILD) $(SNAPSHOT_FILE) + $(Q)$(CFG_PYTHON) $(S)src/etc/get-stage0.py $(CFG_BUILD) endif $(Q)if [ -e "$@" ]; then touch "$@"; else echo "ERROR: snapshot $@ not found"; exit 1; fi diff --git a/mk/target.mk b/mk/target.mk index f90b09479c985..1b139909ab458 100644 --- a/mk/target.mk +++ b/mk/target.mk @@ -17,14 +17,6 @@ export CFG_COMPILER_HOST_TRIPLE export CFG_DEFAULT_LINKER export CFG_DEFAULT_AR -# The standard libraries should be held up to a higher standard than any old -# code, make sure that these common warnings are denied by default. These can -# be overridden during development temporarily. For stage0, we allow warnings -# which may be bugs in stage0 (should be fixed in stage1+) -RUST_LIB_FLAGS_ST0 += -W warnings -RUST_LIB_FLAGS_ST1 += -D warnings -RUST_LIB_FLAGS_ST2 += -D warnings - # Macro that generates the full list of dependencies for a crate at a particular # stage/target/host tuple. # @@ -36,7 +28,7 @@ define RUST_CRATE_FULLDEPS CRATE_FULLDEPS_$(1)_T_$(2)_H_$(3)_$(4) := \ $$(CRATEFILE_$(4)) \ $$(RSINPUTS_$(4)) \ - $$(foreach dep,$$(RUST_DEPS_$(4)), \ + $$(foreach dep,$$(RUST_DEPS_$(4)_T_$(2)), \ $$(TLIB$(1)_T_$(2)_H_$(3))/stamp.$$(dep)) \ $$(foreach dep,$$(NATIVE_DEPS_$(4)), \ $$(RT_OUTPUT_DIR_$(2))/$$(call CFG_STATIC_LIB_NAME_$(2),$$(dep))) \ @@ -97,6 +89,7 @@ $$(TLIB$(1)_T_$(2)_H_$(3))/stamp.$(4): \ $$(RUSTFLAGS$(1)_$(4)_T_$(2)) \ --out-dir $$(@D) \ -C extra-filename=-$$(CFG_FILENAME_EXTRA) \ + -C metadata=$$(CFG_FILENAME_EXTRA) \ $$< @touch -r $$@.start_time $$@ && rm $$@.start_time $$(call LIST_ALL_OLD_GLOB_MATCHES, \ @@ -155,7 +148,7 @@ ifeq ($$(CFG_RUSTRT_HAS_STARTUP_OBJS_$(2)), 1) # Add dependencies on Rust startup objects to all crates that depend on core. # This ensures that they are built after core (since they depend on it), # but before everything else (since they are needed for linking dylib crates). -$$(foreach crate, $$(TARGET_CRATES), \ +$$(foreach crate, $$(TARGET_CRATES_$(2)), \ $$(if $$(findstring core,$$(DEPS_$$(crate))), \ $$(TLIB$(1)_T_$(2)_H_$(3))/stamp.$$(crate))) : $$(TLIB$(1)_T_$(2)_H_$(3))/$(4).o endif @@ -174,11 +167,15 @@ SNAPSHOT_RUSTC_POST_CLEANUP=$(HBIN0_H_$(CFG_BUILD))/rustc$(X_$(CFG_BUILD)) define TARGET_HOST_RULES -$$(TLIB$(1)_T_$(2)_H_$(3))/: +$$(TLIB$(1)_T_$(2)_H_$(3))/: $$(SNAPSHOT_RUSTC_POST_CLEANUP) + mkdir -p $$@ + +$$(TBIN$(1)_T_$(2)_H_$(3))/: $$(SNAPSHOT_RUSTC_POST_CLEANUP) mkdir -p $$@ $$(TLIB$(1)_T_$(2)_H_$(3))/%: $$(RT_OUTPUT_DIR_$(2))/% \ - | $$(TLIB$(1)_T_$(2)_H_$(3))/ $$(SNAPSHOT_RUSTC_POST_CLEANUP) + $$(SNAPSHOT_RUSTC_POST_CLEANUP) \ + | $$(TLIB$(1)_T_$(2)_H_$(3))/ @$$(call E, cp: $$@) $$(Q)cp $$< $$@ endef diff --git a/mk/tests.mk b/mk/tests.mk index 0f30ff8711e5e..35ee7697a7a69 100644 --- a/mk/tests.mk +++ b/mk/tests.mk @@ -23,10 +23,11 @@ DEPS_collectionstest := $(eval $(call RUST_CRATE,collectionstest)) TEST_TARGET_CRATES = $(filter-out core rustc_unicode alloc_system libc \ - alloc_jemalloc,$(TARGET_CRATES)) \ + alloc_jemalloc panic_unwind \ + panic_abort,$(TARGET_CRATES)) \ collectionstest coretest TEST_DOC_CRATES = $(DOC_CRATES) arena flate fmt_macros getopts graphviz \ - log rand rbml serialize syntax term test + log rand serialize syntax term test TEST_HOST_CRATES = $(filter-out rustc_typeck rustc_borrowck rustc_resolve \ rustc_trans rustc_lint,\ $(HOST_CRATES)) @@ -45,16 +46,11 @@ ifdef CHECK_IGNORED TESTARGS += --ignored endif -# Arguments to the cfail/rfail/rpass/bench tests +# Arguments to the cfail/rfail/rpass tests ifdef CFG_VALGRIND CTEST_RUNTOOL = --runtool "$(CFG_VALGRIND)" endif -# Arguments to the perf tests -ifdef CFG_PERF_TOOL - CTEST_PERF_RUNTOOL = --runtool "$(CFG_PERF_TOOL)" -endif - CTEST_TESTARGS := $(TESTARGS) # --bench is only relevant for crate tests, not for the compile tests @@ -70,12 +66,6 @@ endif # This prevents tests from failing with some locales (fixes #17423). export LC_ALL=C -# If we're running perf then set this environment variable -# to put the benchmarks into 'hard mode' -ifeq ($(MAKECMDGOALS),perf) - export RUST_BENCH=1 -endif - TEST_LOG_FILE=tmp/check-stage$(1)-T-$(2)-H-$(3)-$(4).log TEST_OK_FILE=tmp/check-stage$(1)-T-$(2)-H-$(3)-$(4).ok @@ -146,7 +136,7 @@ $(info check: android device test dir $(CFG_ADB_TEST_DIR) ready \ $(foreach target,$(CFG_TARGET), \ $(if $(findstring android, $(target)), \ $(shell $(CFG_ADB) shell mkdir $(CFG_ADB_TEST_DIR)/$(target)) \ - $(foreach crate,$(TARGET_CRATES), \ + $(foreach crate,$(TARGET_CRATES_$(target)), \ $(shell $(CFG_ADB) push $(TLIB2_T_$(target)_H_$(CFG_BUILD))/$(call CFG_LIB_GLOB_$(target),$(crate)) \ $(CFG_ADB_TEST_DIR)/$(target))), \ ))) @@ -251,52 +241,19 @@ cleantestlibs: # Tidy ###################################################################### -ifdef CFG_NOTIDY .PHONY: tidy -tidy: -else - -# Run the tidy script in multiple parts to avoid huge 'echo' commands -.PHONY: tidy -tidy: tidy-basic tidy-binaries tidy-errors tidy-features - -endif - -.PHONY: tidy-basic -tidy-basic: - @$(call E, check: formatting) - $(Q) $(CFG_PYTHON) $(S)src/etc/tidy.py $(S)src/ - -.PHONY: tidy-binaries -tidy-binaries: - @$(call E, check: binaries) - $(Q)find $(S)src -type f \ - \( -perm -u+x -or -perm -g+x -or -perm -o+x \) \ - -not -name '*.rs' -and -not -name '*.py' \ - -and -not -name '*.sh' -and -not -name '*.pp' \ - | grep '^$(S)src/jemalloc' -v \ - | grep '^$(S)src/libuv' -v \ - | grep '^$(S)src/llvm' -v \ - | grep '^$(S)src/rt/hoedown' -v \ - | grep '^$(S)src/gyp' -v \ - | grep '^$(S)src/etc' -v \ - | grep '^$(S)src/doc' -v \ - | grep '^$(S)src/compiler-rt' -v \ - | grep '^$(S)src/libbacktrace' -v \ - | grep '^$(S)src/rust-installer' -v \ - | grep '^$(S)src/liblibc' -v \ - | xargs $(CFG_PYTHON) $(S)src/etc/check-binaries.py - -.PHONY: tidy-errors -tidy-errors: - @$(call E, check: extended errors) - $(Q) $(CFG_PYTHON) $(S)src/etc/errorck.py $(S)src/ - -.PHONY: tidy-features -tidy-features: - @$(call E, check: feature sanity) - $(Q) $(CFG_PYTHON) $(S)src/etc/featureck.py $(S)src/ - +tidy: $(HBIN0_H_$(CFG_BUILD))/tidy$(X_$(CFG_BUILD)) \ + $(SNAPSHOT_RUSTC_POST_CLEANUP) + $(TARGET_RPATH_VAR0_T_$(CFG_BUILD)_H_$(CFG_BUILD)) $< $(S)src + +$(HBIN0_H_$(CFG_BUILD))/tidy$(X_$(CFG_BUILD)): \ + $(TSREQ0_T_$(CFG_BUILD)_H_$(CFG_BUILD)) \ + $(TLIB0_T_$(CFG_BUILD)_H_$(CFG_BUILD))/stamp.std \ + $(call rwildcard,$(S)src/tools/tidy/src,*.rs) \ + $(SNAPSHOT_RUSTC_POST_CLEANUP) | \ + $(TLIB0_T_$(CFG_BUILD)_H_$(CFG_BUILD)) + $(STAGE0_T_$(CFG_BUILD)_H_$(CFG_BUILD)) $(S)src/tools/tidy/src/main.rs \ + --out-dir $(@D) --crate-name tidy ###################################################################### # Sets of tests @@ -309,25 +266,39 @@ check-stage$(1)-T-$(2)-H-$(3)-exec: \ check-stage$(1)-T-$(2)-H-$(3)-rfail-exec \ check-stage$(1)-T-$(2)-H-$(3)-cfail-exec \ check-stage$(1)-T-$(2)-H-$(3)-pfail-exec \ - check-stage$(1)-T-$(2)-H-$(3)-rpass-valgrind-exec \ - check-stage$(1)-T-$(2)-H-$(3)-rpass-full-exec \ - check-stage$(1)-T-$(2)-H-$(3)-rfail-full-exec \ - check-stage$(1)-T-$(2)-H-$(3)-cfail-full-exec \ + check-stage$(1)-T-$(2)-H-$(3)-rpass-valgrind-exec \ check-stage$(1)-T-$(2)-H-$(3)-rmake-exec \ check-stage$(1)-T-$(2)-H-$(3)-rustdocck-exec \ - check-stage$(1)-T-$(2)-H-$(3)-crates-exec \ - check-stage$(1)-T-$(2)-H-$(3)-doc-crates-exec \ - check-stage$(1)-T-$(2)-H-$(3)-bench-exec \ + check-stage$(1)-T-$(2)-H-$(3)-crates-exec \ + check-stage$(1)-T-$(2)-H-$(3)-doc-crates-exec \ check-stage$(1)-T-$(2)-H-$(3)-debuginfo-gdb-exec \ check-stage$(1)-T-$(2)-H-$(3)-debuginfo-lldb-exec \ - check-stage$(1)-T-$(2)-H-$(3)-codegen-exec \ + check-stage$(1)-T-$(2)-H-$(3)-incremental-exec \ + check-stage$(1)-T-$(2)-H-$(3)-ui-exec \ check-stage$(1)-T-$(2)-H-$(3)-doc-exec \ - check-stage$(1)-T-$(2)-H-$(3)-pretty-exec + check-stage$(1)-T-$(2)-H-$(3)-doc-error-index-exec \ + check-stage$(1)-T-$(2)-H-$(3)-pretty-exec \ + check-stage$(1)-T-$(2)-H-$(3)-mir-opt-exec + +ifndef CFG_DISABLE_CODEGEN_TESTS +check-stage$(1)-T-$(2)-H-$(3)-exec: \ + check-stage$(1)-T-$(2)-H-$(3)-codegen-exec \ + check-stage$(1)-T-$(2)-H-$(3)-codegen-units-exec +endif # Only test the compiler-dependent crates when the target is # able to build a compiler (when the target triple is in the set of host triples) ifneq ($$(findstring $(2),$$(CFG_HOST)),) +check-stage$(1)-T-$(2)-H-$(3)-exec: \ + check-stage$(1)-T-$(2)-H-$(3)-rpass-full-exec \ + check-stage$(1)-T-$(2)-H-$(3)-rfail-full-exec \ + check-stage$(1)-T-$(2)-H-$(3)-cfail-full-exec + +check-stage$(1)-T-$(2)-H-$(3)-pretty-exec: \ + check-stage$(1)-T-$(2)-H-$(3)-pretty-rpass-full-exec \ + check-stage$(1)-T-$(2)-H-$(3)-pretty-rfail-full-exec + check-stage$(1)-T-$(2)-H-$(3)-crates-exec: \ $$(foreach crate,$$(TEST_CRATES), \ check-stage$(1)-T-$(2)-H-$(3)-$$(crate)-exec) @@ -351,10 +322,7 @@ check-stage$(1)-T-$(2)-H-$(3)-doc-exec: \ check-stage$(1)-T-$(2)-H-$(3)-pretty-exec: \ check-stage$(1)-T-$(2)-H-$(3)-pretty-rpass-exec \ check-stage$(1)-T-$(2)-H-$(3)-pretty-rpass-valgrind-exec \ - check-stage$(1)-T-$(2)-H-$(3)-pretty-rpass-full-exec \ check-stage$(1)-T-$(2)-H-$(3)-pretty-rfail-exec \ - check-stage$(1)-T-$(2)-H-$(3)-pretty-rfail-full-exec \ - check-stage$(1)-T-$(2)-H-$(3)-pretty-bench-exec \ check-stage$(1)-T-$(2)-H-$(3)-pretty-pretty-exec endef @@ -376,7 +344,7 @@ define TEST_RUNNER # parent crates. ifeq ($(NO_REBUILD),) TESTDEP_$(1)_$(2)_$(3)_$(4) = $$(SREQ$(1)_T_$(2)_H_$(3)) \ - $$(foreach crate,$$(TARGET_CRATES), \ + $$(foreach crate,$$(TARGET_CRATES_$(2)), \ $$(TLIB$(1)_T_$(2)_H_$(3))/stamp.$$(crate)) \ $$(CRATE_FULLDEPS_$(1)_T_$(2)_H_$(3)_$(4)) @@ -391,7 +359,7 @@ $(3)/stage$(1)/test/$(4)test-$(2)$$(X_$(2)): \ @$$(call E, rustc: $$@) $(Q)CFG_LLVM_LINKAGE_FILE=$$(LLVM_LINKAGE_PATH_$(2)) \ $$(subst @,,$$(STAGE$(1)_T_$(2)_H_$(3))) -o $$@ $$< --test \ - -L "$$(RT_OUTPUT_DIR_$(2))" \ + -Cmetadata="test-crate" -L "$$(RT_OUTPUT_DIR_$(2))" \ $$(LLVM_LIBDIR_RUSTFLAGS_$(2)) \ $$(RUSTFLAGS_$(4)) @@ -470,25 +438,28 @@ $(foreach host,$(CFG_HOST), \ # Rules for the compiletest tests (rpass, rfail, etc.) ###################################################################### -RPASS_RS := $(wildcard $(S)src/test/run-pass/*.rs) -RPASS_VALGRIND_RS := $(wildcard $(S)src/test/run-pass-valgrind/*.rs) -RPASS_FULL_RS := $(wildcard $(S)src/test/run-pass-fulldeps/*.rs) -RFAIL_FULL_RS := $(wildcard $(S)src/test/run-fail-fulldeps/*.rs) -CFAIL_FULL_RS := $(wildcard $(S)src/test/compile-fail-fulldeps/*.rs) -RFAIL_RS := $(wildcard $(S)src/test/run-fail/*.rs) -CFAIL_RS := $(wildcard $(S)src/test/compile-fail/*.rs) -PFAIL_RS := $(wildcard $(S)src/test/parse-fail/*.rs) -BENCH_RS := $(wildcard $(S)src/test/bench/*.rs) -PRETTY_RS := $(wildcard $(S)src/test/pretty/*.rs) -DEBUGINFO_GDB_RS := $(wildcard $(S)src/test/debuginfo/*.rs) -DEBUGINFO_LLDB_RS := $(wildcard $(S)src/test/debuginfo/*.rs) -CODEGEN_RS := $(wildcard $(S)src/test/codegen/*.rs) -CODEGEN_CC := $(wildcard $(S)src/test/codegen/*.cc) -RUSTDOCCK_RS := $(wildcard $(S)src/test/rustdoc/*.rs) - -# perf tests are the same as bench tests only they run under -# a performance monitor. -PERF_RS := $(wildcard $(S)src/test/bench/*.rs) +RPASS_RS := $(call rwildcard,$(S)src/test/run-pass/,*.rs) +RPASS_VALGRIND_RS := $(call rwildcard,$(S)src/test/run-pass-valgrind/,*.rs) +RPASS_FULL_RS := $(call rwildcard,$(S)src/test/run-pass-fulldeps/,*.rs) +RFAIL_FULL_RS := $(call rwildcard,$(S)src/test/run-fail-fulldeps/,*.rs) +CFAIL_FULL_RS := $(call rwildcard,$(S)src/test/compile-fail-fulldeps/,*.rs) +RFAIL_RS := $(call rwildcard,$(S)src/test/run-fail/,*.rs) +RFAIL_RS := $(call rwildcard,$(S)src/test/run-fail/,*.rs) +CFAIL_RS := $(call rwildcard,$(S)src/test/compile-fail/,*.rs) +PFAIL_RS := $(call rwildcard,$(S)src/test/parse-fail/,*.rs) +PRETTY_RS := $(call rwildcard,$(S)src/test/pretty/,*.rs) +DEBUGINFO_GDB_RS := $(call rwildcard,$(S)src/test/debuginfo/,*.rs) +DEBUGINFO_LLDB_RS := $(call rwildcard,$(S)src/test/debuginfo/,*.rs) +CODEGEN_RS := $(call rwildcard,$(S)src/test/codegen/,*.rs) +CODEGEN_CC := $(call rwildcard,$(S)src/test/codegen/,*.cc) +CODEGEN_UNITS_RS := $(call rwildcard,$(S)src/test/codegen-units/,*.rs) +INCREMENTAL_RS := $(call rwildcard,$(S)src/test/incremental/,*.rs) +RMAKE_RS := $(wildcard $(S)src/test/run-make/*/Makefile) +UI_RS := $(call rwildcard,$(S)src/test/ui/,*.rs) \ + $(call rwildcard,$(S)src/test/ui/,*.stdout) \ + $(call rwildcard,$(S)src/test/ui/,*.stderr) +RUSTDOCCK_RS := $(call rwildcard,$(S)src/test/rustdoc/,*.rs) +MIR_OPT_RS := $(call rwildcard,$(S)src/test/mir-opt/,*.rs) RPASS_TESTS := $(RPASS_RS) RPASS_VALGRIND_TESTS := $(RPASS_VALGRIND_RS) @@ -498,12 +469,15 @@ CFAIL_FULL_TESTS := $(CFAIL_FULL_RS) RFAIL_TESTS := $(RFAIL_RS) CFAIL_TESTS := $(CFAIL_RS) PFAIL_TESTS := $(PFAIL_RS) -BENCH_TESTS := $(BENCH_RS) -PERF_TESTS := $(PERF_RS) PRETTY_TESTS := $(PRETTY_RS) DEBUGINFO_GDB_TESTS := $(DEBUGINFO_GDB_RS) DEBUGINFO_LLDB_TESTS := $(DEBUGINFO_LLDB_RS) CODEGEN_TESTS := $(CODEGEN_RS) $(CODEGEN_CC) +CODEGEN_UNITS_TESTS := $(CODEGEN_UNITS_RS) +INCREMENTAL_TESTS := $(INCREMENTAL_RS) +RMAKE_TESTS := $(RMAKE_RS) +UI_TESTS := $(UI_RS) +MIR_OPT_TESTS := $(MIR_OPT_RS) RUSTDOCCK_TESTS := $(RUSTDOCCK_RS) CTEST_SRC_BASE_rpass = run-pass @@ -546,16 +520,6 @@ CTEST_BUILD_BASE_pfail = parse-fail CTEST_MODE_pfail = parse-fail CTEST_RUNTOOL_pfail = $(CTEST_RUNTOOL) -CTEST_SRC_BASE_bench = bench -CTEST_BUILD_BASE_bench = bench -CTEST_MODE_bench = run-pass -CTEST_RUNTOOL_bench = $(CTEST_RUNTOOL) - -CTEST_SRC_BASE_perf = bench -CTEST_BUILD_BASE_perf = perf -CTEST_MODE_perf = run-pass -CTEST_RUNTOOL_perf = $(CTEST_PERF_RUNTOOL) - CTEST_SRC_BASE_debuginfo-gdb = debuginfo CTEST_BUILD_BASE_debuginfo-gdb = debuginfo-gdb CTEST_MODE_debuginfo-gdb = debuginfo-gdb @@ -571,6 +535,31 @@ CTEST_BUILD_BASE_codegen = codegen CTEST_MODE_codegen = codegen CTEST_RUNTOOL_codegen = $(CTEST_RUNTOOL) +CTEST_SRC_BASE_codegen-units = codegen-units +CTEST_BUILD_BASE_codegen-units = codegen-units +CTEST_MODE_codegen-units = codegen-units +CTEST_RUNTOOL_codegen-units = $(CTEST_RUNTOOL) + +CTEST_SRC_BASE_incremental = incremental +CTEST_BUILD_BASE_incremental = incremental +CTEST_MODE_incremental = incremental +CTEST_RUNTOOL_incremental = $(CTEST_RUNTOOL) + +CTEST_SRC_BASE_rmake = run-make +CTEST_BUILD_BASE_rmake = run-make +CTEST_MODE_rmake = run-make +CTEST_RUNTOOL_rmake = $(CTEST_RUNTOOL) + +CTEST_SRC_BASE_ui = ui +CTEST_BUILD_BASE_ui = ui +CTEST_MODE_ui = ui +CTEST_RUNTOOL_ui = $(CTEST_RUNTOOL) + +CTEST_SRC_BASE_mir-opt = mir-opt +CTEST_BUILD_BASE_mir-opt = mir-opt +CTEST_MODE_mir-opt = mir-opt +CTEST_RUNTOOL_mir-opt = $(CTEST_RUNTOOL) + CTEST_SRC_BASE_rustdocck = rustdoc CTEST_BUILD_BASE_rustdocck = rustdoc CTEST_MODE_rustdocck = rustdoc @@ -625,7 +614,7 @@ TEST_SREQ$(1)_T_$(2)_H_$(3) = \ $$(HBIN$(1)_H_$(3))/compiletest$$(X_$(3)) \ $$(SREQ$(1)_T_$(2)_H_$(3)) -# Rules for the cfail/rfail/rpass/bench/perf test runner +# Rules for the cfail/rfail/rpass test runner # The tests select when to use debug configuration on their own; # remove directive, if present, from CFG_RUSTC_FLAGS (issue #7898). @@ -643,29 +632,36 @@ endif # is a separate choice from whether to pass `-g` when building the # compiler and standard library themselves. CTEST_RUSTC_FLAGS := $$(subst -g,,$$(CTEST_RUSTC_FLAGS)) +CTEST_RUSTC_FLAGS := $$(subst -Cdebuginfo=1,,$$(CTEST_RUSTC_FLAGS)) ifdef CFG_ENABLE_DEBUGINFO_TESTS CTEST_RUSTC_FLAGS += -g endif -CTEST_COMMON_ARGS$(1)-T-$(2)-H-$(3) := \ +CTEST_COMMON_ARGS$(1)-T-$(2)-H-$(3) = \ --compile-lib-path $$(HLIB$(1)_H_$(3)) \ --run-lib-path $$(TLIB$(1)_T_$(2)_H_$(3)) \ --rustc-path $$(HBIN$(1)_H_$(3))/rustc$$(X_$(3)) \ --rustdoc-path $$(HBIN$(1)_H_$(3))/rustdoc$$(X_$(3)) \ - --llvm-bin-path $(CFG_LLVM_INST_DIR_$(CFG_BUILD))/bin \ - --aux-base $$(S)src/test/auxiliary/ \ + --llvm-filecheck $(CFG_LLVM_INST_DIR_$(CFG_BUILD))/bin/FileCheck \ --stage-id stage$(1)-$(2) \ --target $(2) \ --host $(3) \ - --python $$(CFG_PYTHON) \ - --gdb-version="$(CFG_GDB_VERSION)" \ + --docck-python $$(CFG_PYTHON) \ + --lldb-python $$(CFG_LLDB_PYTHON) \ + --gdb="$(CFG_GDB)" \ --lldb-version="$(CFG_LLDB_VERSION)" \ - --android-cross-path=$(CFG_ANDROID_CROSS_PATH) \ + --llvm-version="$$(LLVM_VERSION_$(3))" \ + --android-cross-path=$(CFG_ARM_LINUX_ANDROIDEABI_NDK) \ --adb-path=$(CFG_ADB) \ --adb-test-dir=$(CFG_ADB_TEST_DIR) \ --host-rustcflags "$(RUSTC_FLAGS_$(3)) $$(CTEST_RUSTC_FLAGS) -L $$(RT_OUTPUT_DIR_$(3))" \ --lldb-python-dir=$(CFG_LLDB_PYTHON_DIR) \ --target-rustcflags "$(RUSTC_FLAGS_$(2)) $$(CTEST_RUSTC_FLAGS) -L $$(RT_OUTPUT_DIR_$(2))" \ + --cc '$$(call FIND_COMPILER,$$(CC_$(2)))' \ + --cxx '$$(call FIND_COMPILER,$$(CXX_$(2)))' \ + --cflags "$$(CFG_GCCISH_CFLAGS_$(2))" \ + --llvm-components "$$(LLVM_ALL_COMPONENTS_$(2))" \ + --llvm-cxxflags "$$(LLVM_CXXFLAGS_$(2))" \ $$(CTEST_TESTARGS) ifdef CFG_VALGRIND_RPASS @@ -688,16 +684,22 @@ CTEST_DEPS_cfail-full_$(1)-T-$(2)-H-$(3) = $$(CFAIL_FULL_TESTS) $$(CSREQ$(1)_T_$ CTEST_DEPS_rfail_$(1)-T-$(2)-H-$(3) = $$(RFAIL_TESTS) CTEST_DEPS_cfail_$(1)-T-$(2)-H-$(3) = $$(CFAIL_TESTS) CTEST_DEPS_pfail_$(1)-T-$(2)-H-$(3) = $$(PFAIL_TESTS) -CTEST_DEPS_bench_$(1)-T-$(2)-H-$(3) = $$(BENCH_TESTS) -CTEST_DEPS_perf_$(1)-T-$(2)-H-$(3) = $$(PERF_TESTS) CTEST_DEPS_debuginfo-gdb_$(1)-T-$(2)-H-$(3) = $$(DEBUGINFO_GDB_TESTS) CTEST_DEPS_debuginfo-lldb_$(1)-T-$(2)-H-$(3) = $$(DEBUGINFO_LLDB_TESTS) \ $(S)src/etc/lldb_batchmode.py \ $(S)src/etc/lldb_rust_formatters.py CTEST_DEPS_codegen_$(1)-T-$(2)-H-$(3) = $$(CODEGEN_TESTS) +CTEST_DEPS_codegen-units_$(1)-T-$(2)-H-$(3) = $$(CODEGEN_UNITS_TESTS) +CTEST_DEPS_incremental_$(1)-T-$(2)-H-$(3) = $$(INCREMENTAL_TESTS) +CTEST_DEPS_rmake_$(1)-T-$(2)-H-$(3) = $$(RMAKE_TESTS) \ + $$(CSREQ$(1)_T_$(3)_H_$(3)) $$(SREQ$(1)_T_$(2)_H_$(3)) +CTEST_DEPS_ui_$(1)-T-$(2)-H-$(3) = $$(UI_TESTS) +CTEST_DEPS_mir-opt_$(1)-T-$(2)-H-$(3) = $$(MIR_OPT_TESTS) CTEST_DEPS_rustdocck_$(1)-T-$(2)-H-$(3) = $$(RUSTDOCCK_TESTS) \ - $$(HBIN$(1)_H_$(3))/rustdoc$$(X_$(3)) \ - $(S)src/etc/htmldocck.py + $$(HBIN$(1)_H_$(3))/rustdoc$$(X_$(3)) \ + $$(CSREQ$(1)_T_$(3)_H_$(3)) \ + $$(SREQ$(1)_T_$(3)_H_$(3)) \ + $(S)src/etc/htmldocck.py endef @@ -708,7 +710,7 @@ $(foreach host,$(CFG_HOST), \ define DEF_RUN_COMPILETEST -CTEST_ARGS$(1)-T-$(2)-H-$(3)-$(4) := \ +CTEST_ARGS$(1)-T-$(2)-H-$(3)-$(4) = \ $$(CTEST_COMMON_ARGS$(1)-T-$(2)-H-$(3)) \ --src-base $$(S)src/test/$$(CTEST_SRC_BASE_$(4))/ \ --build-base $(3)/test/$$(CTEST_BUILD_BASE_$(4))/ \ @@ -739,6 +741,10 @@ endif endif ifeq ($$(CTEST_DONT_RUN_$(1)-T-$(2)-H-$(3)-$(4)),) +$$(call TEST_OK_FILE,$(1),$(2),$(3),$(4)): \ + export INCLUDE := $$(CFG_MSVC_INCLUDE_PATH_$$(HOST_$(3))) +$$(call TEST_OK_FILE,$(1),$(2),$(3),$(4)): \ + export LIB := $$(CFG_MSVC_LIB_PATH_$$(HOST_$(3))) $$(call TEST_OK_FILE,$(1),$(2),$(3),$(4)): \ $$(TEST_SREQ$(1)_T_$(2)_H_$(3)) \ $$(CTEST_DEPS_$(4)_$(1)-T-$(2)-H-$(3)) @@ -761,7 +767,8 @@ endif endef CTEST_NAMES = rpass rpass-valgrind rpass-full rfail-full cfail-full rfail cfail pfail \ - bench perf debuginfo-gdb debuginfo-lldb codegen rustdocck + debuginfo-gdb debuginfo-lldb codegen codegen-units rustdocck incremental \ + rmake ui mir-opt $(foreach host,$(CFG_HOST), \ $(eval $(foreach target,$(CFG_TARGET), \ @@ -770,20 +777,18 @@ $(foreach host,$(CFG_HOST), \ $(eval $(call DEF_RUN_COMPILETEST,$(stage),$(target),$(host),$(name)))))))))) PRETTY_NAMES = pretty-rpass pretty-rpass-valgrind pretty-rpass-full pretty-rfail-full pretty-rfail \ - pretty-bench pretty-pretty + pretty-pretty PRETTY_DEPS_pretty-rpass = $(RPASS_TESTS) PRETTY_DEPS_pretty-rpass-valgrind = $(RPASS_VALGRIND_TESTS) PRETTY_DEPS_pretty-rpass-full = $(RPASS_FULL_TESTS) PRETTY_DEPS_pretty-rfail-full = $(RFAIL_FULL_TESTS) PRETTY_DEPS_pretty-rfail = $(RFAIL_TESTS) -PRETTY_DEPS_pretty-bench = $(BENCH_TESTS) PRETTY_DEPS_pretty-pretty = $(PRETTY_TESTS) PRETTY_DIRNAME_pretty-rpass = run-pass PRETTY_DIRNAME_pretty-rpass-valgrind = run-pass-valgrind PRETTY_DIRNAME_pretty-rpass-full = run-pass-fulldeps PRETTY_DIRNAME_pretty-rfail-full = run-fail-fulldeps PRETTY_DIRNAME_pretty-rfail = run-fail -PRETTY_DIRNAME_pretty-bench = bench PRETTY_DIRNAME_pretty-pretty = pretty define DEF_PRETTY_FULLDEPS @@ -798,7 +803,7 @@ $(foreach host,$(CFG_HOST), \ define DEF_RUN_PRETTY_TEST -PRETTY_ARGS$(1)-T-$(2)-H-$(3)-$(4) := \ +PRETTY_ARGS$(1)-T-$(2)-H-$(3)-$(4) = \ $$(CTEST_COMMON_ARGS$(1)-T-$(2)-H-$(3)) \ --src-base $$(S)src/test/$$(PRETTY_DIRNAME_$(4))/ \ --build-base $(3)/test/$$(PRETTY_DIRNAME_$(4))/ \ @@ -917,6 +922,28 @@ $(foreach host,$(CFG_HOST), \ $(foreach crate,$(TEST_DOC_CRATES), \ $(eval $(call DEF_CRATE_DOC_TEST,$(stage),$(target),$(host),$(crate))))))) +define DEF_DOC_TEST_ERROR_INDEX + +check-stage$(1)-T-$(2)-H-$(3)-doc-error-index-exec: $$(call TEST_OK_FILE,$(1),$(2),$(3),doc-error-index) + +ifeq ($(2),$$(CFG_BUILD)) +$$(call TEST_OK_FILE,$(1),$(2),$(3),doc-error-index): \ + $$(TEST_SREQ$(1)_T_$(2)_H_$(3)) \ + doc/error-index.md + $$(Q)touch $$@.start_time + $$(RUSTDOC_$(1)_T_$(2)_H_$(3)) --test doc/error-index.md + $$(Q)touch -r $$@.start_time $$@ && rm $$@.start_time +else +$$(call TEST_OK_FILE,$(1),$(2),$(3),doc-error-index): + $$(Q)touch $$@ +endif +endef + +$(foreach host,$(CFG_HOST), \ + $(foreach target,$(CFG_TARGET), \ + $(foreach stage,$(STAGES), \ + $(eval $(call DEF_DOC_TEST_ERROR_INDEX,$(stage),$(target),$(host)))))) + ###################################################################### # Shortcut rules ###################################################################### @@ -926,30 +953,31 @@ TEST_GROUPS = \ $(foreach crate,$(TEST_CRATES),$(crate)) \ $(foreach crate,$(TEST_DOC_CRATES),doc-crate-$(crate)) \ rpass \ - rpass-valgrind \ + rpass-valgrind \ rpass-full \ rfail-full \ cfail-full \ rfail \ cfail \ pfail \ - bench \ - perf \ rmake \ rustdocck \ debuginfo-gdb \ debuginfo-lldb \ codegen \ + codegen-units \ + incremental \ + ui \ doc \ $(foreach docname,$(DOC_NAMES),doc-$(docname)) \ pretty \ pretty-rpass \ - pretty-rpass-valgrind \ + pretty-rpass-valgrind \ pretty-rpass-full \ pretty-rfail-full \ pretty-rfail \ - pretty-bench \ pretty-pretty \ + mir-opt \ $(NULL) define DEF_CHECK_FOR_STAGE_AND_TARGET_AND_HOST @@ -1014,7 +1042,8 @@ define DEF_CHECK_DOC_FOR_STAGE check-stage$(1)-docs: $$(foreach docname,$$(DOC_NAMES), \ check-stage$(1)-T-$$(CFG_BUILD)-H-$$(CFG_BUILD)-doc-$$(docname)) \ $$(foreach crate,$$(TEST_DOC_CRATES), \ - check-stage$(1)-T-$$(CFG_BUILD)-H-$$(CFG_BUILD)-doc-crate-$$(crate)) + check-stage$(1)-T-$$(CFG_BUILD)-H-$$(CFG_BUILD)-doc-crate-$$(crate)) \ + check-stage$(1)-T-$$(CFG_BUILD)-H-$$(CFG_BUILD)-doc-error-index-exec endef $(foreach stage,$(STAGES), \ @@ -1026,66 +1055,3 @@ endef $(foreach crate,$(TEST_CRATES), \ $(eval $(call DEF_CHECK_CRATE,$(crate)))) - -###################################################################### -# RMAKE rules -###################################################################### - -RMAKE_TESTS := $(shell ls -d $(S)src/test/run-make/*/) -RMAKE_TESTS := $(RMAKE_TESTS:$(S)src/test/run-make/%/=%) - -define DEF_RMAKE_FOR_T_H -# $(1) the stage -# $(2) target triple -# $(3) host triple - - -ifeq ($(2)$(3),$$(CFG_BUILD)$$(CFG_BUILD)) -check-stage$(1)-T-$(2)-H-$(3)-rmake-exec: \ - $$(call TEST_OK_FILE,$(1),$(2),$(3),rmake) - -$$(call TEST_OK_FILE,$(1),$(2),$(3),rmake): \ - $$(RMAKE_TESTS:%=$(3)/test/run-make/%-$(1)-T-$(2)-H-$(3).ok) - @touch $$@ - -$(3)/test/run-make/%-$(1)-T-$(2)-H-$(3).ok: \ - export INCLUDE := $$(CFG_MSVC_INCLUDE_PATH_$$(HOST_$(3))) -$(3)/test/run-make/%-$(1)-T-$(2)-H-$(3).ok: \ - export LIB := $$(CFG_MSVC_LIB_PATH_$$(HOST_$(3))) -$(3)/test/run-make/%-$(1)-T-$(2)-H-$(3).ok: \ - $(S)src/test/run-make/%/Makefile \ - $$(CSREQ$(1)_T_$(2)_H_$(3)) - @rm -rf $(3)/test/run-make/$$* - @mkdir -p $(3)/test/run-make/$$* - $$(Q)touch $$@.start_time - $$(Q)$$(CFG_PYTHON) $(S)src/etc/maketest.py $$(dir $$<) \ - $$(MAKE) \ - $$(HBIN$(1)_H_$(3))/rustc$$(X_$(3)) \ - $(3)/test/run-make/$$* \ - '$$(CC_$(3))' \ - "$$(CFG_GCCISH_CFLAGS_$(3))" \ - $$(HBIN$(1)_H_$(3))/rustdoc$$(X_$(3)) \ - "$$(TESTNAME)" \ - $$(LD_LIBRARY_PATH_ENV_NAME$(1)_T_$(2)_H_$(3)) \ - "$$(LD_LIBRARY_PATH_ENV_HOSTDIR$(1)_T_$(2)_H_$(3))" \ - "$$(LD_LIBRARY_PATH_ENV_TARGETDIR$(1)_T_$(2)_H_$(3))" \ - $(1) \ - $$(S) \ - $(3) \ - "$$(LLVM_LIBDIR_RUSTFLAGS_$(3))" \ - "$$(LLVM_ALL_COMPONENTS_$(3))" - @touch -r $$@.start_time $$@ && rm $$@.start_time -else -# FIXME #11094 - The above rule doesn't work right for multiple targets -check-stage$(1)-T-$(2)-H-$(3)-rmake-exec: - @true - -endif - - -endef - -$(foreach stage,$(STAGES), \ - $(foreach target,$(CFG_TARGET), \ - $(foreach host,$(CFG_HOST), \ - $(eval $(call DEF_RMAKE_FOR_T_H,$(stage),$(target),$(host)))))) diff --git a/src/Cargo.lock b/src/Cargo.lock new file mode 100644 index 0000000000000..b3388563adc52 --- /dev/null +++ b/src/Cargo.lock @@ -0,0 +1,686 @@ +[root] +name = "unwind" +version = "0.0.0" +dependencies = [ + "core 0.0.0", + "libc 0.0.0", +] + +[[package]] +name = "alloc" +version = "0.0.0" +dependencies = [ + "core 0.0.0", +] + +[[package]] +name = "alloc_jemalloc" +version = "0.0.0" +dependencies = [ + "build_helper 0.1.0", + "core 0.0.0", + "gcc 0.3.38 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.0.0", +] + +[[package]] +name = "alloc_system" +version = "0.0.0" +dependencies = [ + "core 0.0.0", + "libc 0.0.0", +] + +[[package]] +name = "arena" +version = "0.0.0" + +[[package]] +name = "bootstrap" +version = "0.0.0" +dependencies = [ + "build_helper 0.1.0", + "cmake 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", + "filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "gcc 0.3.38 (registry+https://github.com/rust-lang/crates.io-index)", + "getopts 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.1.30 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "build_helper" +version = "0.1.0" + +[[package]] +name = "cargotest" +version = "0.1.0" + +[[package]] +name = "cmake" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "gcc 0.3.38 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "collections" +version = "0.0.0" +dependencies = [ + "alloc 0.0.0", + "core 0.0.0", + "rustc_unicode 0.0.0", +] + +[[package]] +name = "compiler_builtins" +version = "0.0.0" +dependencies = [ + "core 0.0.0", + "gcc 0.3.38 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "compiletest" +version = "0.0.0" +dependencies = [ + "env_logger 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serialize 0.0.0", +] + +[[package]] +name = "core" +version = "0.0.0" + +[[package]] +name = "env_logger" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "error_index_generator" +version = "0.0.0" + +[[package]] +name = "filetime" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "flate" +version = "0.0.0" +dependencies = [ + "build_helper 0.1.0", + "gcc 0.3.38 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "fmt_macros" +version = "0.0.0" + +[[package]] +name = "gcc" +version = "0.3.38" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "getopts" +version = "0.0.0" + +[[package]] +name = "getopts" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "graphviz" +version = "0.0.0" + +[[package]] +name = "libc" +version = "0.0.0" +dependencies = [ + "core 0.0.0", +] + +[[package]] +name = "libc" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "linkchecker" +version = "0.1.0" + +[[package]] +name = "log" +version = "0.0.0" + +[[package]] +name = "log" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "num_cpus" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "panic_abort" +version = "0.0.0" +dependencies = [ + "core 0.0.0", + "libc 0.0.0", +] + +[[package]] +name = "panic_unwind" +version = "0.0.0" +dependencies = [ + "alloc 0.0.0", + "core 0.0.0", + "libc 0.0.0", + "unwind 0.0.0", +] + +[[package]] +name = "proc_macro" +version = "0.0.0" +dependencies = [ + "syntax 0.0.0", +] + +[[package]] +name = "proc_macro_plugin" +version = "0.0.0" +dependencies = [ + "log 0.0.0", + "proc_macro_tokens 0.0.0", + "rustc_plugin 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "proc_macro_tokens" +version = "0.0.0" +dependencies = [ + "log 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "rand" +version = "0.0.0" +dependencies = [ + "core 0.0.0", +] + +[[package]] +name = "rustbook" +version = "0.0.0" + +[[package]] +name = "rustc" +version = "0.0.0" +dependencies = [ + "arena 0.0.0", + "flate 0.0.0", + "fmt_macros 0.0.0", + "graphviz 0.0.0", + "log 0.0.0", + "rustc_back 0.0.0", + "rustc_bitflags 0.0.0", + "rustc_const_math 0.0.0", + "rustc_data_structures 0.0.0", + "rustc_errors 0.0.0", + "rustc_llvm 0.0.0", + "serialize 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "rustc-main" +version = "0.0.0" +dependencies = [ + "rustc_back 0.0.0", + "rustc_driver 0.0.0", + "rustdoc 0.0.0", +] + +[[package]] +name = "rustc-serialize" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rustc_back" +version = "0.0.0" +dependencies = [ + "log 0.0.0", + "serialize 0.0.0", + "syntax 0.0.0", +] + +[[package]] +name = "rustc_bitflags" +version = "0.0.0" + +[[package]] +name = "rustc_borrowck" +version = "0.0.0" +dependencies = [ + "graphviz 0.0.0", + "log 0.0.0", + "rustc 0.0.0", + "rustc_data_structures 0.0.0", + "rustc_errors 0.0.0", + "rustc_mir 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "rustc_const_eval" +version = "0.0.0" +dependencies = [ + "arena 0.0.0", + "graphviz 0.0.0", + "log 0.0.0", + "rustc 0.0.0", + "rustc_back 0.0.0", + "rustc_const_math 0.0.0", + "rustc_data_structures 0.0.0", + "rustc_errors 0.0.0", + "serialize 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "rustc_const_math" +version = "0.0.0" +dependencies = [ + "log 0.0.0", + "serialize 0.0.0", + "syntax 0.0.0", +] + +[[package]] +name = "rustc_data_structures" +version = "0.0.0" +dependencies = [ + "log 0.0.0", + "serialize 0.0.0", +] + +[[package]] +name = "rustc_driver" +version = "0.0.0" +dependencies = [ + "arena 0.0.0", + "flate 0.0.0", + "graphviz 0.0.0", + "log 0.0.0", + "proc_macro_plugin 0.0.0", + "rustc 0.0.0", + "rustc_back 0.0.0", + "rustc_borrowck 0.0.0", + "rustc_const_eval 0.0.0", + "rustc_data_structures 0.0.0", + "rustc_errors 0.0.0", + "rustc_incremental 0.0.0", + "rustc_lint 0.0.0", + "rustc_llvm 0.0.0", + "rustc_metadata 0.0.0", + "rustc_mir 0.0.0", + "rustc_passes 0.0.0", + "rustc_plugin 0.0.0", + "rustc_privacy 0.0.0", + "rustc_resolve 0.0.0", + "rustc_save_analysis 0.0.0", + "rustc_trans 0.0.0", + "rustc_typeck 0.0.0", + "serialize 0.0.0", + "syntax 0.0.0", + "syntax_ext 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "rustc_errors" +version = "0.0.0" +dependencies = [ + "log 0.0.0", + "serialize 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "rustc_incremental" +version = "0.0.0" +dependencies = [ + "graphviz 0.0.0", + "log 0.0.0", + "rustc 0.0.0", + "rustc_data_structures 0.0.0", + "serialize 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "rustc_lint" +version = "0.0.0" +dependencies = [ + "log 0.0.0", + "rustc 0.0.0", + "rustc_back 0.0.0", + "rustc_const_eval 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "rustc_llvm" +version = "0.0.0" +dependencies = [ + "build_helper 0.1.0", + "gcc 0.3.38 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_bitflags 0.0.0", +] + +[[package]] +name = "rustc_metadata" +version = "0.0.0" +dependencies = [ + "flate 0.0.0", + "log 0.0.0", + "proc_macro 0.0.0", + "rustc 0.0.0", + "rustc_back 0.0.0", + "rustc_const_math 0.0.0", + "rustc_data_structures 0.0.0", + "rustc_errors 0.0.0", + "rustc_llvm 0.0.0", + "serialize 0.0.0", + "syntax 0.0.0", + "syntax_ext 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "rustc_mir" +version = "0.0.0" +dependencies = [ + "graphviz 0.0.0", + "log 0.0.0", + "rustc 0.0.0", + "rustc_back 0.0.0", + "rustc_bitflags 0.0.0", + "rustc_const_eval 0.0.0", + "rustc_const_math 0.0.0", + "rustc_data_structures 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "rustc_passes" +version = "0.0.0" +dependencies = [ + "log 0.0.0", + "rustc 0.0.0", + "rustc_const_eval 0.0.0", + "rustc_const_math 0.0.0", + "rustc_errors 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "rustc_platform_intrinsics" +version = "0.0.0" + +[[package]] +name = "rustc_plugin" +version = "0.0.0" +dependencies = [ + "log 0.0.0", + "rustc 0.0.0", + "rustc_back 0.0.0", + "rustc_bitflags 0.0.0", + "rustc_errors 0.0.0", + "rustc_metadata 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "rustc_privacy" +version = "0.0.0" +dependencies = [ + "rustc 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "rustc_resolve" +version = "0.0.0" +dependencies = [ + "arena 0.0.0", + "log 0.0.0", + "rustc 0.0.0", + "rustc_errors 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "rustc_save_analysis" +version = "0.0.0" +dependencies = [ + "log 0.0.0", + "rustc 0.0.0", + "serialize 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "rustc_trans" +version = "0.0.0" +dependencies = [ + "arena 0.0.0", + "flate 0.0.0", + "graphviz 0.0.0", + "log 0.0.0", + "rustc 0.0.0", + "rustc_back 0.0.0", + "rustc_bitflags 0.0.0", + "rustc_const_eval 0.0.0", + "rustc_const_math 0.0.0", + "rustc_data_structures 0.0.0", + "rustc_errors 0.0.0", + "rustc_incremental 0.0.0", + "rustc_llvm 0.0.0", + "rustc_platform_intrinsics 0.0.0", + "serialize 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "rustc_typeck" +version = "0.0.0" +dependencies = [ + "arena 0.0.0", + "fmt_macros 0.0.0", + "log 0.0.0", + "rustc 0.0.0", + "rustc_back 0.0.0", + "rustc_const_eval 0.0.0", + "rustc_const_math 0.0.0", + "rustc_data_structures 0.0.0", + "rustc_errors 0.0.0", + "rustc_platform_intrinsics 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "rustc_unicode" +version = "0.0.0" +dependencies = [ + "core 0.0.0", +] + +[[package]] +name = "rustdoc" +version = "0.0.0" +dependencies = [ + "arena 0.0.0", + "build_helper 0.1.0", + "gcc 0.3.38 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.0.0", + "rustc 0.0.0", + "rustc_back 0.0.0", + "rustc_const_eval 0.0.0", + "rustc_const_math 0.0.0", + "rustc_data_structures 0.0.0", + "rustc_driver 0.0.0", + "rustc_errors 0.0.0", + "rustc_lint 0.0.0", + "rustc_metadata 0.0.0", + "rustc_resolve 0.0.0", + "rustc_trans 0.0.0", + "serialize 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "serialize" +version = "0.0.0" +dependencies = [ + "log 0.0.0", +] + +[[package]] +name = "std" +version = "0.0.0" +dependencies = [ + "alloc 0.0.0", + "alloc_jemalloc 0.0.0", + "alloc_system 0.0.0", + "build_helper 0.1.0", + "collections 0.0.0", + "compiler_builtins 0.0.0", + "core 0.0.0", + "gcc 0.3.38 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.0.0", + "panic_abort 0.0.0", + "panic_unwind 0.0.0", + "rand 0.0.0", + "rustc_unicode 0.0.0", + "unwind 0.0.0", +] + +[[package]] +name = "std_shim" +version = "0.1.0" +dependencies = [ + "core 0.0.0", + "std 0.0.0", +] + +[[package]] +name = "syntax" +version = "0.0.0" +dependencies = [ + "log 0.0.0", + "rustc_bitflags 0.0.0", + "rustc_data_structures 0.0.0", + "rustc_errors 0.0.0", + "serialize 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "syntax_ext" +version = "0.0.0" +dependencies = [ + "fmt_macros 0.0.0", + "log 0.0.0", + "proc_macro 0.0.0", + "rustc_errors 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + +[[package]] +name = "syntax_pos" +version = "0.0.0" +dependencies = [ + "serialize 0.0.0", +] + +[[package]] +name = "term" +version = "0.0.0" + +[[package]] +name = "test" +version = "0.0.0" +dependencies = [ + "getopts 0.0.0", + "term 0.0.0", +] + +[[package]] +name = "test_shim" +version = "0.1.0" +dependencies = [ + "test 0.0.0", +] + +[[package]] +name = "tidy" +version = "0.1.0" + +[[package]] +name = "toml" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[metadata] +"checksum cmake 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "0e5bcf27e097a184c1df4437654ed98df3d7a516e8508a6ba45d8b092bbdf283" +"checksum env_logger 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "15abd780e45b3ea4f76b4e9a26ff4843258dd8a3eed2775a0e7368c2e7936c2f" +"checksum filetime 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "5363ab8e4139b8568a6237db5248646e5a8a2f89bd5ccb02092182b11fd3e922" +"checksum gcc 0.3.38 (registry+https://github.com/rust-lang/crates.io-index)" = "553f11439bdefe755bf366b264820f1da70f3aaf3924e594b886beb9c831bcf5" +"checksum getopts 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "d9047cfbd08a437050b363d35ef160452c5fe8ea5187ae0a624708c91581d685" +"checksum libc 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)" = "044d1360593a78f5c8e5e710beccdc24ab71d1f01bc19a29bcacdba22e8475d8" +"checksum log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ab83497bf8bf4ed2a74259c1c802351fcd67a65baa86394b6ba73c36f4838054" +"checksum num_cpus 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "cee7e88156f3f9e19bdd598f8d6c9db7bf4078f99f8381f43a55b09648d1a6e3" +"checksum rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)" = "6159e4e6e559c81bd706afe9c8fd68f547d3e851ce12e76b1de7914bab61691b" +"checksum toml 0.1.30 (registry+https://github.com/rust-lang/crates.io-index)" = "0590d72182e50e879c4da3b11c6488dae18fccb1ae0c7a3eda18e16795844796" diff --git a/src/Cargo.toml b/src/Cargo.toml new file mode 100644 index 0000000000000..8fb5c70c41bf1 --- /dev/null +++ b/src/Cargo.toml @@ -0,0 +1,30 @@ +[workspace] +members = [ + "bootstrap", + "rustc", + "rustc/std_shim", + "rustc/test_shim", + "tools/cargotest", + "tools/compiletest", + "tools/error_index_generator", + "tools/linkchecker", + "tools/rustbook", + "tools/tidy", +] + +# Curiously, compiletest will segfault if compiled with opt-level=3 on 64-bit +# MSVC when running the compile-fail test suite when a should-fail test panics. +# But hey if this is removed and it gets past the bots, sounds good to me. +[profile.release] +opt-level = 2 +[profile.bench] +opt-level = 2 + +# These options are controlled from our rustc wrapper script, so turn them off +# here and have them controlled elsewhere. +[profile.dev] +debug = false +debug-assertions = false +[profile.test] +debug = false +debug-assertions = false diff --git a/src/bootstrap/Cargo.toml b/src/bootstrap/Cargo.toml new file mode 100644 index 0000000000000..35f8fb43f7b56 --- /dev/null +++ b/src/bootstrap/Cargo.toml @@ -0,0 +1,31 @@ +[package] +authors = ["The Rust Project Developers"] +name = "bootstrap" +version = "0.0.0" + +[lib] +name = "bootstrap" +path = "lib.rs" + +[[bin]] +name = "bootstrap" +path = "bin/main.rs" + +[[bin]] +name = "rustc" +path = "bin/rustc.rs" + +[[bin]] +name = "rustdoc" +path = "bin/rustdoc.rs" + +[dependencies] +build_helper = { path = "../build_helper" } +cmake = "0.1.17" +filetime = "0.1" +num_cpus = "0.2" +toml = "0.1" +getopts = "0.2" +rustc-serialize = "0.3" +gcc = "0.3.38" +libc = "0.2" diff --git a/src/bootstrap/README.md b/src/bootstrap/README.md new file mode 100644 index 0000000000000..24d716c11958e --- /dev/null +++ b/src/bootstrap/README.md @@ -0,0 +1,288 @@ +# rustbuild - Bootstrapping Rust + +This is an in-progress README which is targeted at helping to explain how Rust +is bootstrapped and in general some of the technical details of the build +system. + +> **Note**: This build system is currently under active development and is not +> intended to be the primarily used one just yet. The makefiles are currently +> the ones that are still "guaranteed to work" as much as possible at least. + +## Using rustbuild + +The rustbuild build system has a primary entry point, a top level `x.py` script: + +``` +python ./x.py build +``` + +Note that if you're on Unix you should be able to execute the script directly: + +``` +./x.py build +``` + +The script accepts commands, flags, and filters to determine what to do: + +* `build` - a general purpose command for compiling code. Alone `build` will + bootstrap the entire compiler, and otherwise arguments passed indicate what to + build. For example: + + ``` + # build the whole compiler + ./x.py build + + # build the stage1 compiler + ./x.py build --stage 1 + + # build stage0 libstd + ./x.py build --stage 0 src/libstd + + # build a particular crate in stage0 + ./x.py build --stage 0 src/libtest + ``` + +* `test` - a command for executing unit tests. Like the `build` command this + will execute the entire test suite by default, and otherwise it can be used to + select which test suite is run: + + ``` + # run all unit tests + ./x.py test + + # execute the run-pass test suite + ./x.py test src/test/run-pass + + # execute only some tests in the run-pass test suite + ./x.py test src/test/run-pass --filter my-filter + + # execute tests in the standard library in stage0 + ./x.py test --stage 0 src/libstd + + # execute all doc tests + ./x.py test src/doc + ``` + +* `doc` - a command for building documentation. Like above can take arguments + for what to document. + +If you're more used to `./configure` and `make`, however, then you can also +configure the build system to use rustbuild instead of the old makefiles: + +``` +./configure --enable-rustbuild +make +``` + +Afterwards the `Makefile` which is generated will have a few commands like +`make check`, `make tidy`, etc. + +## Configuring rustbuild + +There are currently two primary methods for configuring the rustbuild build +system. First, the `./configure` options serialized in `config.mk` will be +parsed and read. That is, if any `./configure` options are passed, they'll be +handled naturally. + +Next, rustbuild offers a TOML-based configuration system with a `config.toml` +file in the same location as `config.mk`. An example of this configuration can +be found at `src/bootstrap/config.toml.example`, and the configuration file +can also be passed as `--config path/to/config.toml` if the build system is +being invoked manually (via the python script). + +## Build stages + +The rustbuild build system goes through a few phases to actually build the +compiler. What actually happens when you invoke rustbuild is: + +1. The entry point script, `x.py` is run. This script is + responsible for downloading the stage0 compiler/Cargo binaries, and it then + compiles the build system itself (this folder). Finally, it then invokes the + actual `bootstrap` binary build system. +2. In Rust, `bootstrap` will slurp up all configuration, perform a number of + sanity checks (compilers exist for example), and then start building the + stage0 artifacts. +3. The stage0 `cargo` downloaded earlier is used to build the standard library + and the compiler, and then these binaries are then copied to the `stage1` + directory. That compiler is then used to generate the stage1 artifacts which + are then copied to the stage2 directory, and then finally the stage2 + artifacts are generated using that compiler. + +The goal of each stage is to (a) leverage Cargo as much as possible and failing +that (b) leverage Rust as much as possible! + +## Directory Layout + +This build system houses all output under the `build` directory, which looks +like this: + +``` +# Root folder of all output. Everything is scoped underneath here +build/ + + # Location where the stage0 compiler downloads are all cached. This directory + # only contains the tarballs themselves as they're extracted elsewhere. + cache/ + 2015-12-19/ + 2016-01-15/ + 2016-01-21/ + ... + + # Output directory for building this build system itself. The stage0 + # cargo/rustc are used to build the build system into this location. + bootstrap/ + debug/ + release/ + + # Output of the dist-related steps like dist-std, dist-rustc, and dist-docs + dist/ + + # Temporary directory used for various input/output as part of various stages + tmp/ + + # Each remaining directory is scoped by the "host" triple of compilation at + # hand. + x86_64-unknown-linux-gnu/ + + # The build artifacts for the `compiler-rt` library for the target this + # folder is under. The exact layout here will likely depend on the platform, + # and this is also built with CMake so the build system is also likely + # different. + compiler-rt/ + build/ + + # Output folder for LLVM if it is compiled for this target + llvm/ + + # build folder (e.g. the platform-specific build system). Like with + # compiler-rt this is compiled with CMake + build/ + + # Installation of LLVM. Note that we run the equivalent of 'make install' + # for LLVM to setup these folders. + bin/ + lib/ + include/ + share/ + ... + + # Output folder for all documentation of this target. This is what's filled + # in whenever the `doc` step is run. + doc/ + + # Output for all compiletest-based test suites + test/ + run-pass/ + compile-fail/ + debuginfo/ + ... + + # Location where the stage0 Cargo and Rust compiler are unpacked. This + # directory is purely an extracted and overlaid tarball of these two (done + # by the bootstrapy python script). In theory the build system does not + # modify anything under this directory afterwards. + stage0/ + + # These to build directories are the cargo output directories for builds of + # the standard library and compiler, respectively. Internally these may also + # have other target directories, which represent artifacts being compiled + # from the host to the specified target. + # + # Essentially, each of these directories is filled in by one `cargo` + # invocation. The build system instruments calling Cargo in the right order + # with the right variables to ensure these are filled in correctly. + stageN-std/ + stageN-test/ + stageN-rustc/ + stageN-tools/ + + # This is a special case of the above directories, **not** filled in via + # Cargo but rather the build system itself. The stage0 compiler already has + # a set of target libraries for its own host triple (in its own sysroot) + # inside of stage0/. When we run the stage0 compiler to bootstrap more + # things, however, we don't want to use any of these libraries (as those are + # the ones that we're building). So essentially, when the stage1 compiler is + # being compiled (e.g. after libstd has been built), *this* is used as the + # sysroot for the stage0 compiler being run. + # + # Basically this directory is just a temporary artifact use to configure the + # stage0 compiler to ensure that the libstd we just built is used to + # compile the stage1 compiler. + stage0-sysroot/lib/ + + # These output directories are intended to be standalone working + # implementations of the compiler (corresponding to each stage). The build + # system will link (using hard links) output from stageN-{std,rustc} into + # each of these directories. + # + # In theory there is no extra build output in these directories. + stage1/ + stage2/ + stage3/ +``` + +## Cargo projects + +The current build is unfortunately not quite as simple as `cargo build` in a +directory, but rather the compiler is split into three different Cargo projects: + +* `src/rustc/std_shim` - a project which builds and compiles libstd +* `src/rustc/test_shim` - a project which builds and compiles libtest +* `src/rustc` - the actual compiler itself + +Each "project" has a corresponding Cargo.lock file with all dependencies, and +this means that building the compiler involves running Cargo three times. The +structure here serves two goals: + +1. Facilitating dependencies coming from crates.io. These dependencies don't + depend on `std`, so libstd is a separate project compiled ahead of time + before the actual compiler builds. +2. Splitting "host artifacts" from "target artifacts". That is, when building + code for an arbitrary target you don't need the entire compiler, but you'll + end up needing libraries like libtest that depend on std but also want to use + crates.io dependencies. Hence, libtest is split out as its own project that + is sequenced after `std` but before `rustc`. This project is built for all + targets. + +There is some loss in build parallelism here because libtest can be compiled in +parallel with a number of rustc artifacts, but in theory the loss isn't too bad! + +## Build tools + +We've actually got quite a few tools that we use in the compiler's build system +and for testing. To organize these, each tool is a project in `src/tools` with a +corresponding `Cargo.toml`. All tools are compiled with Cargo (currently having +independent `Cargo.lock` files) and do not currently explicitly depend on the +compiler or standard library. Compiling each tool is sequenced after the +appropriate libstd/libtest/librustc compile above. + +## Extending rustbuild + +So you'd like to add a feature to the rustbuild build system or just fix a bug. +Great! One of the major motivational factors for moving away from `make` is that +Rust is in theory much easier to read, modify, and write. If you find anything +excessively confusing, please open an issue on this and we'll try to get it +documented or simplified pronto. + +First up, you'll probably want to read over the documentation above as that'll +give you a high level overview of what rustbuild is doing. You also probably +want to play around a bit yourself by just getting it up and running before you +dive too much into the actual build system itself. + +After that, each module in rustbuild should have enough documentation to keep +you up and running. Some general areas that you may be interested in modifying +are: + +* Adding a new build tool? Take a look at `build/step.rs` for examples of other + tools, as well as `build/mod.rs`. +* Adding a new compiler crate? Look no further! Adding crates can be done by + adding a new directory with `Cargo.toml` followed by configuring all + `Cargo.toml` files accordingly. +* Adding a new dependency from crates.io? We're still working on that, so hold + off on that for now. +* Adding a new configuration option? Take a look at `build/config.rs` or perhaps + `build/flags.rs` and then modify the build elsewhere to read that option. +* Adding a sanity check? Take a look at `build/sanity.rs`. + +If you have any questions feel free to reach out on `#rust-internals` on IRC or +open an issue in the bug tracker! diff --git a/src/bootstrap/bin/main.rs b/src/bootstrap/bin/main.rs new file mode 100644 index 0000000000000..c47f4fd8ec64b --- /dev/null +++ b/src/bootstrap/bin/main.rs @@ -0,0 +1,37 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! rustbuild, the Rust build system +//! +//! This is the entry point for the build system used to compile the `rustc` +//! compiler. Lots of documentation can be found in the `README.md` file next to +//! this file, and otherwise documentation can be found throughout the `build` +//! directory in each respective module. + +#![deny(warnings)] + +extern crate bootstrap; + +use std::env; + +use bootstrap::{Flags, Config, Build}; + +fn main() { + let args = env::args().skip(1).collect::>(); + let flags = Flags::parse(&args); + let mut config = Config::parse(&flags.build, flags.config.clone()); + + // compat with `./configure` while we're still using that + if std::fs::metadata("config.mk").is_ok() { + config.update_with_config_mk(); + } + + Build::new(flags, config).build(); +} diff --git a/src/bootstrap/bin/rustc.rs b/src/bootstrap/bin/rustc.rs new file mode 100644 index 0000000000000..879eca60cc751 --- /dev/null +++ b/src/bootstrap/bin/rustc.rs @@ -0,0 +1,173 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Shim which is passed to Cargo as "rustc" when running the bootstrap. +//! +//! This shim will take care of some various tasks that our build process +//! requires that Cargo can't quite do through normal configuration: +//! +//! 1. When compiling build scripts and build dependencies, we need a guaranteed +//! full standard library available. The only compiler which actually has +//! this is the snapshot, so we detect this situation and always compile with +//! the snapshot compiler. +//! 2. We pass a bunch of `--cfg` and other flags based on what we're compiling +//! (and this slightly differs based on a whether we're using a snapshot or +//! not), so we do that all here. +//! +//! This may one day be replaced by RUSTFLAGS, but the dynamic nature of +//! switching compilers for the bootstrap and for build scripts will probably +//! never get replaced. + +extern crate bootstrap; + +use std::env; +use std::ffi::OsString; +use std::path::PathBuf; +use std::process::Command; + +fn main() { + let args = env::args_os().skip(1).collect::>(); + // Detect whether or not we're a build script depending on whether --target + // is passed (a bit janky...) + let target = args.windows(2) + .find(|w| &*w[0] == "--target") + .and_then(|w| w[1].to_str()); + let version = args.iter().find(|w| &**w == "-vV"); + + // Build scripts always use the snapshot compiler which is guaranteed to be + // able to produce an executable, whereas intermediate compilers may not + // have the standard library built yet and may not be able to produce an + // executable. Otherwise we just use the standard compiler we're + // bootstrapping with. + // + // Also note that cargo will detect the version of the compiler to trigger + // a rebuild when the compiler changes. If this happens, we want to make + // sure to use the actual compiler instead of the snapshot compiler becase + // that's the one that's actually changing. + let (rustc, libdir) = if target.is_none() && version.is_none() { + ("RUSTC_SNAPSHOT", "RUSTC_SNAPSHOT_LIBDIR") + } else { + ("RUSTC_REAL", "RUSTC_LIBDIR") + }; + let stage = env::var("RUSTC_STAGE").expect("RUSTC_STAGE was not set"); + + let rustc = env::var_os(rustc).unwrap_or_else(|| panic!("{:?} was not set", rustc)); + let libdir = env::var_os(libdir).unwrap_or_else(|| panic!("{:?} was not set", libdir)); + let mut dylib_path = bootstrap::util::dylib_path(); + dylib_path.insert(0, PathBuf::from(libdir)); + + let mut cmd = Command::new(rustc); + cmd.args(&args) + .arg("--cfg") + .arg(format!("stage{}", stage)) + .env(bootstrap::util::dylib_path_var(), + env::join_paths(&dylib_path).unwrap()); + + if let Some(target) = target { + // The stage0 compiler has a special sysroot distinct from what we + // actually downloaded, so we just always pass the `--sysroot` option. + cmd.arg("--sysroot").arg(env::var_os("RUSTC_SYSROOT").expect("RUSTC_SYSROOT was not set")); + + // When we build Rust dylibs they're all intended for intermediate + // usage, so make sure we pass the -Cprefer-dynamic flag instead of + // linking all deps statically into the dylib. + cmd.arg("-Cprefer-dynamic"); + + // Help the libc crate compile by assisting it in finding the MUSL + // native libraries. + if let Some(s) = env::var_os("MUSL_ROOT") { + let mut root = OsString::from("native="); + root.push(&s); + root.push("/lib"); + cmd.arg("-L").arg(&root); + } + + // Pass down extra flags, commonly used to configure `-Clinker` when + // cross compiling. + if let Ok(s) = env::var("RUSTC_FLAGS") { + cmd.args(&s.split(" ").filter(|s| !s.is_empty()).collect::>()); + } + + // If we're compiling specifically the `panic_abort` crate then we pass + // the `-C panic=abort` option. Note that we do not do this for any + // other crate intentionally as this is the only crate for now that we + // ship with panic=abort. + // + // This... is a bit of a hack how we detect this. Ideally this + // information should be encoded in the crate I guess? Would likely + // require an RFC amendment to RFC 1513, however. + let is_panic_abort = args.windows(2) + .any(|a| &*a[0] == "--crate-name" && &*a[1] == "panic_abort"); + if is_panic_abort { + cmd.arg("-C").arg("panic=abort"); + } + + // Set various options from config.toml to configure how we're building + // code. + if env::var("RUSTC_DEBUGINFO") == Ok("true".to_string()) { + cmd.arg("-g"); + } else if env::var("RUSTC_DEBUGINFO_LINES") == Ok("true".to_string()) { + cmd.arg("-Cdebuginfo=1"); + } + let debug_assertions = match env::var("RUSTC_DEBUG_ASSERTIONS") { + Ok(s) => if s == "true" { "y" } else { "n" }, + Err(..) => "n", + }; + cmd.arg("-C").arg(format!("debug-assertions={}", debug_assertions)); + if let Ok(s) = env::var("RUSTC_CODEGEN_UNITS") { + cmd.arg("-C").arg(format!("codegen-units={}", s)); + } + + // Dealing with rpath here is a little special, so let's go into some + // detail. First off, `-rpath` is a linker option on Unix platforms + // which adds to the runtime dynamic loader path when looking for + // dynamic libraries. We use this by default on Unix platforms to ensure + // that our nightlies behave the same on Windows, that is they work out + // of the box. This can be disabled, of course, but basically that's why + // we're gated on RUSTC_RPATH here. + // + // Ok, so the astute might be wondering "why isn't `-C rpath` used + // here?" and that is indeed a good question to task. This codegen + // option is the compiler's current interface to generating an rpath. + // Unfortunately it doesn't quite suffice for us. The flag currently + // takes no value as an argument, so the compiler calculates what it + // should pass to the linker as `-rpath`. This unfortunately is based on + // the **compile time** directory structure which when building with + // Cargo will be very different than the runtime directory structure. + // + // All that's a really long winded way of saying that if we use + // `-Crpath` then the executables generated have the wrong rpath of + // something like `$ORIGIN/deps` when in fact the way we distribute + // rustc requires the rpath to be `$ORIGIN/../lib`. + // + // So, all in all, to set up the correct rpath we pass the linker + // argument manually via `-C link-args=-Wl,-rpath,...`. Plus isn't it + // fun to pass a flag to a tool to pass a flag to pass a flag to a tool + // to change a flag in a binary? + if env::var("RUSTC_RPATH") == Ok("true".to_string()) { + let rpath = if target.contains("apple") { + Some("-Wl,-rpath,@loader_path/../lib") + } else if !target.contains("windows") { + Some("-Wl,-rpath,$ORIGIN/../lib") + } else { + None + }; + if let Some(rpath) = rpath { + cmd.arg("-C").arg(format!("link-args={}", rpath)); + } + } + } + + // Actually run the compiler! + std::process::exit(match cmd.status() { + Ok(s) => s.code().unwrap_or(1), + Err(e) => panic!("\n\nfailed to run {:?}: {}\n\n", cmd, e), + }) +} diff --git a/src/bootstrap/bin/rustdoc.rs b/src/bootstrap/bin/rustdoc.rs new file mode 100644 index 0000000000000..67358e540dad0 --- /dev/null +++ b/src/bootstrap/bin/rustdoc.rs @@ -0,0 +1,42 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Shim which is passed to Cargo as "rustdoc" when running the bootstrap. +//! +//! See comments in `src/bootstrap/rustc.rs` for more information. + +extern crate bootstrap; + +use std::env; +use std::process::Command; +use std::path::PathBuf; + +fn main() { + let args = env::args_os().skip(1).collect::>(); + let rustdoc = env::var_os("RUSTDOC_REAL").expect("RUSTDOC_REAL was not set"); + let libdir = env::var_os("RUSTC_LIBDIR").expect("RUSTC_LIBDIR was not set"); + let stage = env::var("RUSTC_STAGE").expect("RUSTC_STAGE was not set"); + + let mut dylib_path = bootstrap::util::dylib_path(); + dylib_path.insert(0, PathBuf::from(libdir)); + + let mut cmd = Command::new(rustdoc); + cmd.args(&args) + .arg("--cfg") + .arg(format!("stage{}", stage)) + .arg("--cfg") + .arg("dox") + .env(bootstrap::util::dylib_path_var(), + env::join_paths(&dylib_path).unwrap()); + std::process::exit(match cmd.status() { + Ok(s) => s.code().unwrap_or(1), + Err(e) => panic!("\n\nfailed to run {:?}: {}\n\n", cmd, e), + }) +} diff --git a/src/bootstrap/bootstrap.py b/src/bootstrap/bootstrap.py new file mode 100644 index 0000000000000..a3fabbb3e8094 --- /dev/null +++ b/src/bootstrap/bootstrap.py @@ -0,0 +1,454 @@ +# Copyright 2015-2016 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +import argparse +import contextlib +import datetime +import hashlib +import os +import shutil +import subprocess +import sys +import tarfile +import tempfile + +from time import time + + +def get(url, path, verbose=False): + sha_url = url + ".sha256" + with tempfile.NamedTemporaryFile(delete=False) as temp_file: + temp_path = temp_file.name + with tempfile.NamedTemporaryFile(suffix=".sha256", delete=False) as sha_file: + sha_path = sha_file.name + + try: + download(sha_path, sha_url, verbose) + if os.path.exists(path): + if verify(path, sha_path, False): + print("using already-download file " + path) + return + else: + print("ignoring already-download file " + path + " due to failed verification") + os.unlink(path) + download(temp_path, url, verbose) + if not verify(temp_path, sha_path, True): + raise RuntimeError("failed verification") + print("moving {} to {}".format(temp_path, path)) + shutil.move(temp_path, path) + finally: + delete_if_present(sha_path) + delete_if_present(temp_path) + + +def delete_if_present(path): + if os.path.isfile(path): + print("removing " + path) + os.unlink(path) + + +def download(path, url, verbose): + print("downloading {} to {}".format(url, path)) + # see http://serverfault.com/questions/301128/how-to-download + if sys.platform == 'win32': + run(["PowerShell.exe", "/nologo", "-Command", + "(New-Object System.Net.WebClient)" + ".DownloadFile('{}', '{}')".format(url, path)], + verbose=verbose) + else: + run(["curl", "-o", path, url], verbose=verbose) + + +def verify(path, sha_path, verbose): + print("verifying " + path) + with open(path, "rb") as f: + found = hashlib.sha256(f.read()).hexdigest() + with open(sha_path, "r") as f: + expected, _ = f.readline().split() + verified = found == expected + if not verified and verbose: + print("invalid checksum:\n" + " found: {}\n" + " expected: {}".format(found, expected)) + return verified + + +def unpack(tarball, dst, verbose=False, match=None): + print("extracting " + tarball) + fname = os.path.basename(tarball).replace(".tar.gz", "") + with contextlib.closing(tarfile.open(tarball)) as tar: + for p in tar.getnames(): + if "/" not in p: + continue + name = p.replace(fname + "/", "", 1) + if match is not None and not name.startswith(match): + continue + name = name[len(match) + 1:] + + fp = os.path.join(dst, name) + if verbose: + print(" extracting " + p) + tar.extract(p, dst) + tp = os.path.join(dst, p) + if os.path.isdir(tp) and os.path.exists(fp): + continue + shutil.move(tp, fp) + shutil.rmtree(os.path.join(dst, fname)) + +def run(args, verbose=False): + if verbose: + print("running: " + ' '.join(args)) + sys.stdout.flush() + # Use Popen here instead of call() as it apparently allows powershell on + # Windows to not lock up waiting for input presumably. + ret = subprocess.Popen(args) + code = ret.wait() + if code != 0: + err = "failed to run: " + ' '.join(args) + if verbose: + raise RuntimeError(err) + sys.exit(err) + +def stage0_data(rust_root): + nightlies = os.path.join(rust_root, "src/stage0.txt") + data = {} + with open(nightlies, 'r') as nightlies: + for line in nightlies: + line = line.rstrip() # Strip newline character, '\n' + if line.startswith("#") or line == '': + continue + a, b = line.split(": ", 1) + data[a] = b + return data + +def format_build_time(duration): + return str(datetime.timedelta(seconds=int(duration))) + + +class RustBuild(object): + def download_stage0(self): + cache_dst = os.path.join(self.build_dir, "cache") + rustc_cache = os.path.join(cache_dst, self.stage0_rustc_date()) + cargo_cache = os.path.join(cache_dst, self.stage0_cargo_date()) + if not os.path.exists(rustc_cache): + os.makedirs(rustc_cache) + if not os.path.exists(cargo_cache): + os.makedirs(cargo_cache) + + if self.rustc().startswith(self.bin_root()) and \ + (not os.path.exists(self.rustc()) or self.rustc_out_of_date()): + if os.path.exists(self.bin_root()): + shutil.rmtree(self.bin_root()) + channel = self.stage0_rustc_channel() + filename = "rust-std-{}-{}.tar.gz".format(channel, self.build) + url = "https://static.rust-lang.org/dist/" + self.stage0_rustc_date() + tarball = os.path.join(rustc_cache, filename) + if not os.path.exists(tarball): + get("{}/{}".format(url, filename), tarball, verbose=self.verbose) + unpack(tarball, self.bin_root(), + match="rust-std-" + self.build, + verbose=self.verbose) + + filename = "rustc-{}-{}.tar.gz".format(channel, self.build) + url = "https://static.rust-lang.org/dist/" + self.stage0_rustc_date() + tarball = os.path.join(rustc_cache, filename) + if not os.path.exists(tarball): + get("{}/{}".format(url, filename), tarball, verbose=self.verbose) + unpack(tarball, self.bin_root(), match="rustc", verbose=self.verbose) + with open(self.rustc_stamp(), 'w') as f: + f.write(self.stage0_rustc_date()) + + if self.cargo().startswith(self.bin_root()) and \ + (not os.path.exists(self.cargo()) or self.cargo_out_of_date()): + channel = self.stage0_cargo_channel() + filename = "cargo-{}-{}.tar.gz".format(channel, self.build) + url = "https://static.rust-lang.org/cargo-dist/" + self.stage0_cargo_date() + tarball = os.path.join(cargo_cache, filename) + if not os.path.exists(tarball): + get("{}/{}".format(url, filename), tarball, verbose=self.verbose) + unpack(tarball, self.bin_root(), match="cargo", verbose=self.verbose) + with open(self.cargo_stamp(), 'w') as f: + f.write(self.stage0_cargo_date()) + + def stage0_cargo_date(self): + return self._cargo_date + + def stage0_cargo_channel(self): + return self._cargo_channel + + def stage0_rustc_date(self): + return self._rustc_date + + def stage0_rustc_channel(self): + return self._rustc_channel + + def rustc_stamp(self): + return os.path.join(self.bin_root(), '.rustc-stamp') + + def cargo_stamp(self): + return os.path.join(self.bin_root(), '.cargo-stamp') + + def rustc_out_of_date(self): + if not os.path.exists(self.rustc_stamp()) or self.clean: + return True + with open(self.rustc_stamp(), 'r') as f: + return self.stage0_rustc_date() != f.read() + + def cargo_out_of_date(self): + if not os.path.exists(self.cargo_stamp()) or self.clean: + return True + with open(self.cargo_stamp(), 'r') as f: + return self.stage0_cargo_date() != f.read() + + def bin_root(self): + return os.path.join(self.build_dir, self.build, "stage0") + + def get_toml(self, key): + for line in self.config_toml.splitlines(): + if line.startswith(key + ' ='): + return self.get_string(line) + return None + + def get_mk(self, key): + for line in iter(self.config_mk.splitlines()): + if line.startswith(key): + return line[line.find(':=') + 2:].strip() + return None + + def cargo(self): + config = self.get_toml('cargo') + if config: + return config + config = self.get_mk('CFG_LOCAL_RUST_ROOT') + if config: + return config + '/bin/cargo' + self.exe_suffix() + return os.path.join(self.bin_root(), "bin/cargo" + self.exe_suffix()) + + def rustc(self): + config = self.get_toml('rustc') + if config: + return config + config = self.get_mk('CFG_LOCAL_RUST_ROOT') + if config: + return config + '/bin/rustc' + self.exe_suffix() + return os.path.join(self.bin_root(), "bin/rustc" + self.exe_suffix()) + + def get_string(self, line): + start = line.find('"') + end = start + 1 + line[start + 1:].find('"') + return line[start + 1:end] + + def exe_suffix(self): + if sys.platform == 'win32': + return '.exe' + else: + return '' + + def build_bootstrap(self): + build_dir = os.path.join(self.build_dir, "bootstrap") + if self.clean and os.path.exists(build_dir): + shutil.rmtree(build_dir) + env = os.environ.copy() + env["CARGO_TARGET_DIR"] = build_dir + env["RUSTC"] = self.rustc() + env["LD_LIBRARY_PATH"] = os.path.join(self.bin_root(), "lib") + env["DYLD_LIBRARY_PATH"] = os.path.join(self.bin_root(), "lib") + env["PATH"] = os.path.join(self.bin_root(), "bin") + \ + os.pathsep + env["PATH"] + args = [self.cargo(), "build", "--manifest-path", + os.path.join(self.rust_root, "src/bootstrap/Cargo.toml")] + if self.use_vendored_sources: + args.append("--frozen") + self.run(args, env) + + def run(self, args, env): + proc = subprocess.Popen(args, env=env) + ret = proc.wait() + if ret != 0: + sys.exit(ret) + + def build_triple(self): + default_encoding = sys.getdefaultencoding() + config = self.get_toml('build') + if config: + return config + config = self.get_mk('CFG_BUILD') + if config: + return config + try: + ostype = subprocess.check_output(['uname', '-s']).strip().decode(default_encoding) + cputype = subprocess.check_output(['uname', '-m']).strip().decode(default_encoding) + except (subprocess.CalledProcessError, WindowsError): + if sys.platform == 'win32': + return 'x86_64-pc-windows-msvc' + err = "uname not found" + if self.verbose: + raise Exception(err) + sys.exit(err) + + # Darwin's `uname -s` lies and always returns i386. We have to use + # sysctl instead. + if ostype == 'Darwin' and cputype == 'i686': + args = ['sysctl', 'hw.optional.x86_64'] + sysctl = subprocess.check_output(args).decode(default_encoding) + if ': 1' in sysctl: + cputype = 'x86_64' + + # The goal here is to come up with the same triple as LLVM would, + # at least for the subset of platforms we're willing to target. + if ostype == 'Linux': + ostype = 'unknown-linux-gnu' + elif ostype == 'FreeBSD': + ostype = 'unknown-freebsd' + elif ostype == 'DragonFly': + ostype = 'unknown-dragonfly' + elif ostype == 'Bitrig': + ostype = 'unknown-bitrig' + elif ostype == 'OpenBSD': + ostype = 'unknown-openbsd' + elif ostype == 'NetBSD': + ostype = 'unknown-netbsd' + elif ostype == 'Darwin': + ostype = 'apple-darwin' + elif ostype.startswith('MINGW'): + # msys' `uname` does not print gcc configuration, but prints msys + # configuration. so we cannot believe `uname -m`: + # msys1 is always i686 and msys2 is always x86_64. + # instead, msys defines $MSYSTEM which is MINGW32 on i686 and + # MINGW64 on x86_64. + ostype = 'pc-windows-gnu' + cputype = 'i686' + if os.environ.get('MSYSTEM') == 'MINGW64': + cputype = 'x86_64' + elif ostype.startswith('MSYS'): + ostype = 'pc-windows-gnu' + elif ostype.startswith('CYGWIN_NT'): + cputype = 'i686' + if ostype.endswith('WOW64'): + cputype = 'x86_64' + ostype = 'pc-windows-gnu' + else: + err = "unknown OS type: " + ostype + if self.verbose: + raise ValueError(err) + sys.exit(err) + + if cputype in {'i386', 'i486', 'i686', 'i786', 'x86'}: + cputype = 'i686' + elif cputype in {'xscale', 'arm'}: + cputype = 'arm' + elif cputype == 'armv7l': + cputype = 'arm' + ostype += 'eabihf' + elif cputype == 'aarch64': + cputype = 'aarch64' + elif cputype == 'mips': + if sys.byteorder == 'big': + cputype = 'mips' + elif sys.byteorder == 'little': + cputype = 'mipsel' + else: + raise ValueError('unknown byteorder: ' + sys.byteorder) + elif cputype == 'mips64': + if sys.byteorder == 'big': + cputype = 'mips64' + elif sys.byteorder == 'little': + cputype = 'mips64el' + else: + raise ValueError('unknown byteorder: ' + sys.byteorder) + # only the n64 ABI is supported, indicate it + ostype += 'abi64' + elif cputype in {'powerpc', 'ppc', 'ppc64'}: + cputype = 'powerpc' + elif cputype in {'amd64', 'x86_64', 'x86-64', 'x64'}: + cputype = 'x86_64' + else: + err = "unknown cpu type: " + cputype + if self.verbose: + raise ValueError(err) + sys.exit(err) + + return "{}-{}".format(cputype, ostype) + +def main(): + parser = argparse.ArgumentParser(description='Build rust') + parser.add_argument('--config') + parser.add_argument('--clean', action='store_true') + parser.add_argument('-v', '--verbose', action='store_true') + + args = [a for a in sys.argv if a != '-h' and a != '--help'] + args, _ = parser.parse_known_args(args) + + # Configure initial bootstrap + rb = RustBuild() + rb.config_toml = '' + rb.config_mk = '' + rb.rust_root = os.path.abspath(os.path.join(__file__, '../../..')) + rb.build_dir = os.path.join(os.getcwd(), "build") + rb.verbose = args.verbose + rb.clean = args.clean + + try: + with open(args.config or 'config.toml') as config: + rb.config_toml = config.read() + except: + pass + try: + rb.config_mk = open('config.mk').read() + except: + pass + + rb.use_vendored_sources = '\nvendor = true' in rb.config_toml or \ + 'CFG_ENABLE_VENDOR' in rb.config_mk + + if rb.use_vendored_sources: + if not os.path.exists('.cargo'): + os.makedirs('.cargo') + f = open('.cargo/config','w') + f.write(""" + [source.crates-io] + replace-with = 'vendored-sources' + registry = 'https://example.com' + + [source.vendored-sources] + directory = '{}/src/vendor' + """.format(rb.rust_root)) + f.close() + else: + if os.path.exists('.cargo'): + shutil.rmtree('.cargo') + data = stage0_data(rb.rust_root) + rb._rustc_channel, rb._rustc_date = data['rustc'].split('-', 1) + rb._cargo_channel, rb._cargo_date = data['cargo'].split('-', 1) + + start_time = time() + + # Fetch/build the bootstrap + rb.build = rb.build_triple() + rb.download_stage0() + sys.stdout.flush() + rb.build_bootstrap() + sys.stdout.flush() + + # Run the bootstrap + args = [os.path.join(rb.build_dir, "bootstrap/debug/bootstrap")] + args.extend(sys.argv[1:]) + env = os.environ.copy() + env["BUILD"] = rb.build + env["SRC"] = rb.rust_root + env["BOOTSTRAP_PARENT_ID"] = str(os.getpid()) + rb.run(args, env) + + end_time = time() + + print("Build completed in %s" % format_build_time(end_time - start_time)) + +if __name__ == '__main__': + main() diff --git a/src/bootstrap/cc.rs b/src/bootstrap/cc.rs new file mode 100644 index 0000000000000..e2bde4a658611 --- /dev/null +++ b/src/bootstrap/cc.rs @@ -0,0 +1,124 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! C-compiler probing and detection. +//! +//! This module will fill out the `cc` and `cxx` maps of `Build` by looking for +//! C and C++ compilers for each target configured. A compiler is found through +//! a number of vectors (in order of precedence) +//! +//! 1. Configuration via `target.$target.cc` in `config.toml`. +//! 2. Configuration via `target.$target.android-ndk` in `config.toml`, if +//! applicable +//! 3. Special logic to probe on OpenBSD +//! 4. The `CC_$target` environment variable. +//! 5. The `CC` environment variable. +//! 6. "cc" +//! +//! Some of this logic is implemented here, but much of it is farmed out to the +//! `gcc` crate itself, so we end up having the same fallbacks as there. +//! Similar logic is then used to find a C++ compiler, just some s/cc/c++/ is +//! used. +//! +//! It is intended that after this module has run no C/C++ compiler will +//! ever be probed for. Instead the compilers found here will be used for +//! everything. + +use std::process::Command; + +use build_helper::{cc2ar, output}; +use gcc; + +use Build; +use config::Target; + +pub fn find(build: &mut Build) { + // For all targets we're going to need a C compiler for building some shims + // and such as well as for being a linker for Rust code. + for target in build.config.target.iter() { + let mut cfg = gcc::Config::new(); + cfg.cargo_metadata(false).opt_level(0).debug(false) + .target(target).host(&build.config.build); + + let config = build.config.target_config.get(target); + if let Some(cc) = config.and_then(|c| c.cc.as_ref()) { + cfg.compiler(cc); + } else { + set_compiler(&mut cfg, "gcc", target, config); + } + + let compiler = cfg.get_compiler(); + let ar = cc2ar(compiler.path(), target); + build.verbose(&format!("CC_{} = {:?}", target, compiler.path())); + if let Some(ref ar) = ar { + build.verbose(&format!("AR_{} = {:?}", target, ar)); + } + build.cc.insert(target.to_string(), (compiler, ar)); + } + + // For all host triples we need to find a C++ compiler as well + for host in build.config.host.iter() { + let mut cfg = gcc::Config::new(); + cfg.cargo_metadata(false).opt_level(0).debug(false).cpp(true) + .target(host).host(&build.config.build); + let config = build.config.target_config.get(host); + if let Some(cxx) = config.and_then(|c| c.cxx.as_ref()) { + cfg.compiler(cxx); + } else { + set_compiler(&mut cfg, "g++", host, config); + } + let compiler = cfg.get_compiler(); + build.verbose(&format!("CXX_{} = {:?}", host, compiler.path())); + build.cxx.insert(host.to_string(), compiler); + } +} + +fn set_compiler(cfg: &mut gcc::Config, + gnu_compiler: &str, + target: &str, + config: Option<&Target>) { + match target { + // When compiling for android we may have the NDK configured in the + // config.toml in which case we look there. Otherwise the default + // compiler already takes into account the triple in question. + t if t.contains("android") => { + if let Some(ndk) = config.and_then(|c| c.ndk.as_ref()) { + let target = target.replace("armv7", "arm"); + let compiler = format!("{}-{}", target, gnu_compiler); + cfg.compiler(ndk.join("bin").join(compiler)); + } + } + + // The default gcc version from OpenBSD may be too old, try using egcc, + // which is a gcc version from ports, if this is the case. + t if t.contains("openbsd") => { + let c = cfg.get_compiler(); + if !c.path().ends_with(gnu_compiler) { + return + } + + let output = output(c.to_command().arg("--version")); + let i = match output.find(" 4.") { + Some(i) => i, + None => return, + }; + match output[i + 3..].chars().next().unwrap() { + '0' ... '6' => {} + _ => return, + } + let alternative = format!("e{}", gnu_compiler); + if Command::new(&alternative).output().is_ok() { + cfg.compiler(alternative); + } + } + + _ => {} + } +} diff --git a/src/bootstrap/channel.rs b/src/bootstrap/channel.rs new file mode 100644 index 0000000000000..b2341f5978704 --- /dev/null +++ b/src/bootstrap/channel.rs @@ -0,0 +1,93 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Build configuration for Rust's release channels. +//! +//! Implements the stable/beta/nightly channel distinctions by setting various +//! flags like the `unstable_features`, calculating variables like `release` and +//! `package_vers`, and otherwise indicating to the compiler what it should +//! print out as part of its version information. + +use std::fs::{self, File}; +use std::io::prelude::*; +use std::process::Command; + +use build_helper::output; + +use Build; + +pub fn collect(build: &mut Build) { + // Currently the canonical source for the release number (e.g. 1.10.0) and + // the prerelease version (e.g. `.1`) is in `mk/main.mk`. We "parse" that + // here to learn about those numbers. + let mut main_mk = String::new(); + t!(t!(File::open(build.src.join("mk/main.mk"))).read_to_string(&mut main_mk)); + let mut release_num = ""; + let mut prerelease_version = ""; + for line in main_mk.lines() { + if line.starts_with("CFG_RELEASE_NUM") { + release_num = line.split('=').skip(1).next().unwrap().trim(); + } + if line.starts_with("CFG_PRERELEASE_VERSION") { + prerelease_version = line.split('=').skip(1).next().unwrap().trim(); + } + } + + // Depending on the channel, passed in `./configure --release-channel`, + // determine various properties of the build. + match &build.config.channel[..] { + "stable" => { + build.release = release_num.to_string(); + build.package_vers = build.release.clone(); + build.unstable_features = false; + } + "beta" => { + build.release = format!("{}-beta{}", release_num, + prerelease_version); + build.package_vers = "beta".to_string(); + build.unstable_features = false; + } + "nightly" => { + build.release = format!("{}-nightly", release_num); + build.package_vers = "nightly".to_string(); + build.unstable_features = true; + } + _ => { + build.release = format!("{}-dev", release_num); + build.package_vers = build.release.clone(); + build.unstable_features = true; + } + } + build.version = build.release.clone(); + + // If we have a git directory, add in some various SHA information of what + // commit this compiler was compiled from. + if fs::metadata(build.src.join(".git")).is_ok() { + let ver_date = output(Command::new("git").current_dir(&build.src) + .arg("log").arg("-1") + .arg("--date=short") + .arg("--pretty=format:%cd")); + let ver_hash = output(Command::new("git").current_dir(&build.src) + .arg("rev-parse").arg("HEAD")); + let short_ver_hash = output(Command::new("git") + .current_dir(&build.src) + .arg("rev-parse") + .arg("--short=9") + .arg("HEAD")); + let ver_date = ver_date.trim().to_string(); + let ver_hash = ver_hash.trim().to_string(); + let short_ver_hash = short_ver_hash.trim().to_string(); + build.version.push_str(&format!(" ({} {})", short_ver_hash, + ver_date)); + build.ver_date = Some(ver_date.to_string()); + build.ver_hash = Some(ver_hash); + build.short_ver_hash = Some(short_ver_hash); + } +} diff --git a/src/bootstrap/check.rs b/src/bootstrap/check.rs new file mode 100644 index 0000000000000..b67eab38f5dd2 --- /dev/null +++ b/src/bootstrap/check.rs @@ -0,0 +1,499 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation of the various `check-*` targets of the build system. +//! +//! This file implements the various regression test suites that we execute on +//! our CI. + +use std::collections::HashSet; +use std::env; +use std::fmt; +use std::fs; +use std::path::{PathBuf, Path}; +use std::process::Command; + +use build_helper::output; + +use {Build, Compiler, Mode}; +use util::{self, dylib_path, dylib_path_var}; + +const ADB_TEST_DIR: &'static str = "/data/tmp"; + +/// The two modes of the test runner; tests or benchmarks. +#[derive(Copy, Clone)] +pub enum TestKind { + /// Run `cargo test` + Test, + /// Run `cargo bench` + Bench, +} + +impl TestKind { + // Return the cargo subcommand for this test kind + fn subcommand(self) -> &'static str { + match self { + TestKind::Test => "test", + TestKind::Bench => "bench", + } + } +} + +impl fmt::Display for TestKind { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(match *self { + TestKind::Test => "Testing", + TestKind::Bench => "Benchmarking", + }) + } +} + +/// Runs the `linkchecker` tool as compiled in `stage` by the `host` compiler. +/// +/// This tool in `src/tools` will verify the validity of all our links in the +/// documentation to ensure we don't have a bunch of dead ones. +pub fn linkcheck(build: &Build, stage: u32, host: &str) { + println!("Linkcheck stage{} ({})", stage, host); + let compiler = Compiler::new(stage, host); + build.run(build.tool_cmd(&compiler, "linkchecker") + .arg(build.out.join(host).join("doc"))); +} + +/// Runs the `cargotest` tool as compiled in `stage` by the `host` compiler. +/// +/// This tool in `src/tools` will check out a few Rust projects and run `cargo +/// test` to ensure that we don't regress the test suites there. +pub fn cargotest(build: &Build, stage: u32, host: &str) { + let ref compiler = Compiler::new(stage, host); + + // Configure PATH to find the right rustc. NB. we have to use PATH + // and not RUSTC because the Cargo test suite has tests that will + // fail if rustc is not spelled `rustc`. + let path = build.sysroot(compiler).join("bin"); + let old_path = ::std::env::var("PATH").expect(""); + let sep = if cfg!(windows) { ";" } else {":" }; + let ref newpath = format!("{}{}{}", path.display(), sep, old_path); + + // Note that this is a short, cryptic, and not scoped directory name. This + // is currently to minimize the length of path on Windows where we otherwise + // quickly run into path name limit constraints. + let out_dir = build.out.join("ct"); + t!(fs::create_dir_all(&out_dir)); + + build.run(build.tool_cmd(compiler, "cargotest") + .env("PATH", newpath) + .arg(&build.cargo) + .arg(&out_dir)); +} + +/// Runs the `tidy` tool as compiled in `stage` by the `host` compiler. +/// +/// This tool in `src/tools` checks up on various bits and pieces of style and +/// otherwise just implements a few lint-like checks that are specific to the +/// compiler itself. +pub fn tidy(build: &Build, stage: u32, host: &str) { + println!("tidy check stage{} ({})", stage, host); + let compiler = Compiler::new(stage, host); + build.run(build.tool_cmd(&compiler, "tidy") + .arg(build.src.join("src"))); +} + +fn testdir(build: &Build, host: &str) -> PathBuf { + build.out.join(host).join("test") +} + +/// Executes the `compiletest` tool to run a suite of tests. +/// +/// Compiles all tests with `compiler` for `target` with the specified +/// compiletest `mode` and `suite` arguments. For example `mode` can be +/// "run-pass" or `suite` can be something like `debuginfo`. +pub fn compiletest(build: &Build, + compiler: &Compiler, + target: &str, + mode: &str, + suite: &str) { + println!("Check compiletest {} ({} -> {})", suite, compiler.host, target); + let mut cmd = build.tool_cmd(compiler, "compiletest"); + + // compiletest currently has... a lot of arguments, so let's just pass all + // of them! + + cmd.arg("--compile-lib-path").arg(build.rustc_libdir(compiler)); + cmd.arg("--run-lib-path").arg(build.sysroot_libdir(compiler, target)); + cmd.arg("--rustc-path").arg(build.compiler_path(compiler)); + cmd.arg("--rustdoc-path").arg(build.rustdoc(compiler)); + cmd.arg("--src-base").arg(build.src.join("src/test").join(suite)); + cmd.arg("--build-base").arg(testdir(build, compiler.host).join(suite)); + cmd.arg("--stage-id").arg(format!("stage{}-{}", compiler.stage, target)); + cmd.arg("--mode").arg(mode); + cmd.arg("--target").arg(target); + cmd.arg("--host").arg(compiler.host); + cmd.arg("--llvm-filecheck").arg(build.llvm_filecheck(&build.config.build)); + + if let Some(nodejs) = build.config.nodejs.as_ref() { + cmd.arg("--nodejs").arg(nodejs); + } + + let mut flags = vec!["-Crpath".to_string()]; + if build.config.rust_optimize_tests { + flags.push("-O".to_string()); + } + if build.config.rust_debuginfo_tests { + flags.push("-g".to_string()); + } + + let mut hostflags = build.rustc_flags(&compiler.host); + hostflags.extend(flags.clone()); + cmd.arg("--host-rustcflags").arg(hostflags.join(" ")); + + let mut targetflags = build.rustc_flags(&target); + targetflags.extend(flags); + targetflags.push(format!("-Lnative={}", + build.test_helpers_out(target).display())); + cmd.arg("--target-rustcflags").arg(targetflags.join(" ")); + + cmd.arg("--docck-python").arg(build.python()); + + if build.config.build.ends_with("apple-darwin") { + // Force /usr/bin/python on OSX for LLDB tests because we're loading the + // LLDB plugin's compiled module which only works with the system python + // (namely not Homebrew-installed python) + cmd.arg("--lldb-python").arg("/usr/bin/python"); + } else { + cmd.arg("--lldb-python").arg(build.python()); + } + + if let Some(ref gdb) = build.config.gdb { + cmd.arg("--gdb").arg(gdb); + } + if let Some(ref vers) = build.lldb_version { + cmd.arg("--lldb-version").arg(vers); + } + if let Some(ref dir) = build.lldb_python_dir { + cmd.arg("--lldb-python-dir").arg(dir); + } + let llvm_config = build.llvm_config(target); + let llvm_version = output(Command::new(&llvm_config).arg("--version")); + cmd.arg("--llvm-version").arg(llvm_version); + + cmd.args(&build.flags.cmd.test_args()); + + if build.config.verbose || build.flags.verbose { + cmd.arg("--verbose"); + } + + if build.config.quiet_tests { + cmd.arg("--quiet"); + } + + // Only pass correct values for these flags for the `run-make` suite as it + // requires that a C++ compiler was configured which isn't always the case. + if suite == "run-make" { + let llvm_components = output(Command::new(&llvm_config).arg("--components")); + let llvm_cxxflags = output(Command::new(&llvm_config).arg("--cxxflags")); + cmd.arg("--cc").arg(build.cc(target)) + .arg("--cxx").arg(build.cxx(target)) + .arg("--cflags").arg(build.cflags(target).join(" ")) + .arg("--llvm-components").arg(llvm_components.trim()) + .arg("--llvm-cxxflags").arg(llvm_cxxflags.trim()); + } else { + cmd.arg("--cc").arg("") + .arg("--cxx").arg("") + .arg("--cflags").arg("") + .arg("--llvm-components").arg("") + .arg("--llvm-cxxflags").arg(""); + } + + // Running a C compiler on MSVC requires a few env vars to be set, to be + // sure to set them here. + if target.contains("msvc") { + for &(ref k, ref v) in build.cc[target].0.env() { + if k != "PATH" { + cmd.env(k, v); + } + } + } + cmd.env("RUSTC_BOOTSTRAP", "1"); + + cmd.arg("--adb-path").arg("adb"); + cmd.arg("--adb-test-dir").arg(ADB_TEST_DIR); + if target.contains("android") { + // Assume that cc for this target comes from the android sysroot + cmd.arg("--android-cross-path") + .arg(build.cc(target).parent().unwrap().parent().unwrap()); + } else { + cmd.arg("--android-cross-path").arg(""); + } + + build.run(&mut cmd); +} + +/// Run `rustdoc --test` for all documentation in `src/doc`. +/// +/// This will run all tests in our markdown documentation (e.g. the book) +/// located in `src/doc`. The `rustdoc` that's run is the one that sits next to +/// `compiler`. +pub fn docs(build: &Build, compiler: &Compiler) { + // Do a breadth-first traversal of the `src/doc` directory and just run + // tests for all files that end in `*.md` + let mut stack = vec![build.src.join("src/doc")]; + + while let Some(p) = stack.pop() { + if p.is_dir() { + stack.extend(t!(p.read_dir()).map(|p| t!(p).path())); + continue + } + + if p.extension().and_then(|s| s.to_str()) != Some("md") { + continue + } + + println!("doc tests for: {}", p.display()); + markdown_test(build, compiler, &p); + } +} + +/// Run the error index generator tool to execute the tests located in the error +/// index. +/// +/// The `error_index_generator` tool lives in `src/tools` and is used to +/// generate a markdown file from the error indexes of the code base which is +/// then passed to `rustdoc --test`. +pub fn error_index(build: &Build, compiler: &Compiler) { + println!("Testing error-index stage{}", compiler.stage); + + let dir = testdir(build, compiler.host); + t!(fs::create_dir_all(&dir)); + let output = dir.join("error-index.md"); + build.run(build.tool_cmd(compiler, "error_index_generator") + .arg("markdown") + .arg(&output) + .env("CFG_BUILD", &build.config.build)); + + markdown_test(build, compiler, &output); +} + +fn markdown_test(build: &Build, compiler: &Compiler, markdown: &Path) { + let mut cmd = Command::new(build.rustdoc(compiler)); + build.add_rustc_lib_path(compiler, &mut cmd); + cmd.arg("--test"); + cmd.arg(markdown); + + let mut test_args = build.flags.cmd.test_args().join(" "); + if build.config.quiet_tests { + test_args.push_str(" --quiet"); + } + cmd.arg("--test-args").arg(test_args); + + build.run(&mut cmd); +} + +/// Run all unit tests plus documentation tests for an entire crate DAG defined +/// by a `Cargo.toml` +/// +/// This is what runs tests for crates like the standard library, compiler, etc. +/// It essentially is the driver for running `cargo test`. +/// +/// Currently this runs all tests for a DAG by passing a bunch of `-p foo` +/// arguments, and those arguments are discovered from `cargo metadata`. +pub fn krate(build: &Build, + compiler: &Compiler, + target: &str, + mode: Mode, + test_kind: TestKind, + krate: Option<&str>) { + let (name, path, features, root) = match mode { + Mode::Libstd => { + ("libstd", "src/rustc/std_shim", build.std_features(), "std_shim") + } + Mode::Libtest => { + ("libtest", "src/rustc/test_shim", String::new(), "test_shim") + } + Mode::Librustc => { + ("librustc", "src/rustc", build.rustc_features(), "rustc-main") + } + _ => panic!("can only test libraries"), + }; + println!("{} {} stage{} ({} -> {})", test_kind, name, compiler.stage, + compiler.host, target); + + // Build up the base `cargo test` command. + // + // Pass in some standard flags then iterate over the graph we've discovered + // in `cargo metadata` with the maps above and figure out what `-p` + // arguments need to get passed. + let mut cargo = build.cargo(compiler, mode, target, test_kind.subcommand()); + cargo.arg("--manifest-path") + .arg(build.src.join(path).join("Cargo.toml")) + .arg("--features").arg(features); + + match krate { + Some(krate) => { + cargo.arg("-p").arg(krate); + } + None => { + let mut visited = HashSet::new(); + let mut next = vec![root]; + while let Some(name) = next.pop() { + // Right now jemalloc is our only target-specific crate in the sense + // that it's not present on all platforms. Custom skip it here for now, + // but if we add more this probably wants to get more generalized. + if !name.contains("jemalloc") { + cargo.arg("-p").arg(name); + } + for dep in build.crates[name].deps.iter() { + if visited.insert(dep) { + next.push(dep); + } + } + } + } + } + + // The tests are going to run with the *target* libraries, so we need to + // ensure that those libraries show up in the LD_LIBRARY_PATH equivalent. + // + // Note that to run the compiler we need to run with the *host* libraries, + // but our wrapper scripts arrange for that to be the case anyway. + let mut dylib_path = dylib_path(); + dylib_path.insert(0, build.sysroot_libdir(compiler, target)); + cargo.env(dylib_path_var(), env::join_paths(&dylib_path).unwrap()); + + if build.config.quiet_tests { + cargo.arg("--"); + cargo.arg("--quiet"); + } + + if target.contains("android") { + build.run(cargo.arg("--no-run")); + krate_android(build, compiler, target, mode); + } else if target.contains("emscripten") { + build.run(cargo.arg("--no-run")); + krate_emscripten(build, compiler, target, mode); + } else { + cargo.args(&build.flags.cmd.test_args()); + build.run(&mut cargo); + } +} + +fn krate_android(build: &Build, + compiler: &Compiler, + target: &str, + mode: Mode) { + let mut tests = Vec::new(); + let out_dir = build.cargo_out(compiler, mode, target); + find_tests(&out_dir, target, &mut tests); + find_tests(&out_dir.join("deps"), target, &mut tests); + + for test in tests { + build.run(Command::new("adb").arg("push").arg(&test).arg(ADB_TEST_DIR)); + + let test_file_name = test.file_name().unwrap().to_string_lossy(); + let log = format!("{}/check-stage{}-T-{}-H-{}-{}.log", + ADB_TEST_DIR, + compiler.stage, + target, + compiler.host, + test_file_name); + let program = format!("(cd {dir}; \ + LD_LIBRARY_PATH=./{target} ./{test} \ + --logfile {log} \ + {args})", + dir = ADB_TEST_DIR, + target = target, + test = test_file_name, + log = log, + args = build.flags.cmd.test_args().join(" ")); + + let output = output(Command::new("adb").arg("shell").arg(&program)); + println!("{}", output); + build.run(Command::new("adb") + .arg("pull") + .arg(&log) + .arg(build.out.join("tmp"))); + build.run(Command::new("adb").arg("shell").arg("rm").arg(&log)); + if !output.contains("result: ok") { + panic!("some tests failed"); + } + } +} + +fn krate_emscripten(build: &Build, + compiler: &Compiler, + target: &str, + mode: Mode) { + let mut tests = Vec::new(); + let out_dir = build.cargo_out(compiler, mode, target); + find_tests(&out_dir, target, &mut tests); + find_tests(&out_dir.join("deps"), target, &mut tests); + + for test in tests { + let test_file_name = test.to_string_lossy().into_owned(); + println!("running {}", test_file_name); + let nodejs = build.config.nodejs.as_ref().expect("nodejs not configured"); + let status = Command::new(nodejs) + .arg(&test_file_name) + .stderr(::std::process::Stdio::inherit()) + .status(); + match status { + Ok(status) => { + if !status.success() { + panic!("some tests failed"); + } + } + Err(e) => panic!(format!("failed to execute command: {}", e)), + }; + } + } + + +fn find_tests(dir: &Path, + target: &str, + dst: &mut Vec) { + for e in t!(dir.read_dir()).map(|e| t!(e)) { + let file_type = t!(e.file_type()); + if !file_type.is_file() { + continue + } + let filename = e.file_name().into_string().unwrap(); + if (target.contains("windows") && filename.ends_with(".exe")) || + (!target.contains("windows") && !filename.contains(".")) || + (target.contains("emscripten") && filename.contains(".js")){ + dst.push(e.path()); + } + } +} + +pub fn android_copy_libs(build: &Build, + compiler: &Compiler, + target: &str) { + println!("Android copy libs to emulator ({})", target); + build.run(Command::new("adb").arg("remount")); + build.run(Command::new("adb").args(&["shell", "rm", "-r", ADB_TEST_DIR])); + build.run(Command::new("adb").args(&["shell", "mkdir", ADB_TEST_DIR])); + build.run(Command::new("adb") + .arg("push") + .arg(build.src.join("src/etc/adb_run_wrapper.sh")) + .arg(ADB_TEST_DIR)); + + let target_dir = format!("{}/{}", ADB_TEST_DIR, target); + build.run(Command::new("adb").args(&["shell", "mkdir", &target_dir[..]])); + + for f in t!(build.sysroot_libdir(compiler, target).read_dir()) { + let f = t!(f); + let name = f.file_name().into_string().unwrap(); + if util::is_dylib(&name) { + build.run(Command::new("adb") + .arg("push") + .arg(f.path()) + .arg(&target_dir)); + } + } +} diff --git a/src/bootstrap/clean.rs b/src/bootstrap/clean.rs new file mode 100644 index 0000000000000..75bcbfee6ee0b --- /dev/null +++ b/src/bootstrap/clean.rs @@ -0,0 +1,83 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation of `make clean` in rustbuild. +//! +//! Responsible for cleaning out a build directory of all old and stale +//! artifacts to prepare for a fresh build. Currently doesn't remove the +//! `build/cache` directory (download cache) or the `build/$target/llvm` +//! directory as we want that cached between builds. + +use std::fs; +use std::io::{self, ErrorKind}; +use std::path::Path; + +use Build; + +pub fn clean(build: &Build) { + rm_rf(build, "tmp".as_ref()); + rm_rf(build, &build.out.join("tmp")); + + for host in build.config.host.iter() { + let entries = match build.out.join(host).read_dir() { + Ok(iter) => iter, + Err(_) => continue, + }; + + for entry in entries { + let entry = t!(entry); + if entry.file_name().to_str() == Some("llvm") { + continue + } + let path = t!(entry.path().canonicalize()); + rm_rf(build, &path); + } + } +} + +fn rm_rf(build: &Build, path: &Path) { + if !path.exists() { + return + } + + for file in t!(fs::read_dir(path)) { + let file = t!(file).path(); + + if file.is_dir() { + rm_rf(build, &file); + } else { + // On windows we can't remove a readonly file, and git will + // often clone files as readonly. As a result, we have some + // special logic to remove readonly files on windows. + do_op(&file, "remove file", |p| fs::remove_file(p)); + } + } + do_op(path, "remove dir", |p| fs::remove_dir(p)); +} + +fn do_op(path: &Path, desc: &str, mut f: F) + where F: FnMut(&Path) -> io::Result<()> +{ + match f(path) { + Ok(()) => {} + Err(ref e) if cfg!(windows) && + e.kind() == ErrorKind::PermissionDenied => { + let mut p = t!(path.metadata()).permissions(); + p.set_readonly(false); + t!(fs::set_permissions(path, p)); + f(path).unwrap_or_else(|e| { + panic!("failed to {} {}: {}", desc, path.display(), e); + }) + } + Err(e) => { + panic!("failed to {} {}: {}", desc, path.display(), e); + } + } +} diff --git a/src/bootstrap/compile.rs b/src/bootstrap/compile.rs new file mode 100644 index 0000000000000..b268686ca6c3b --- /dev/null +++ b/src/bootstrap/compile.rs @@ -0,0 +1,400 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation of compiling various phases of the compiler and standard +//! library. +//! +//! This module contains some of the real meat in the rustbuild build system +//! which is where Cargo is used to compiler the standard library, libtest, and +//! compiler. This module is also responsible for assembling the sysroot as it +//! goes along from the output of the previous stage. + +use std::cmp; +use std::collections::HashMap; +use std::fs::{self, File}; +use std::path::{Path, PathBuf}; +use std::process::Command; + +use build_helper::output; +use filetime::FileTime; + +use util::{exe, libdir, mtime, is_dylib, copy}; +use {Build, Compiler, Mode}; + +/// Build the standard library. +/// +/// This will build the standard library for a particular stage of the build +/// using the `compiler` targeting the `target` architecture. The artifacts +/// created will also be linked into the sysroot directory. +pub fn std<'a>(build: &'a Build, target: &str, compiler: &Compiler<'a>) { + println!("Building stage{} std artifacts ({} -> {})", compiler.stage, + compiler.host, target); + + let libdir = build.sysroot_libdir(compiler, target); + let _ = fs::remove_dir_all(&libdir); + t!(fs::create_dir_all(&libdir)); + + // Some platforms have startup objects that may be required to produce the + // libstd dynamic library, for example. + build_startup_objects(build, target, &libdir); + + let out_dir = build.cargo_out(compiler, Mode::Libstd, target); + build.clear_if_dirty(&out_dir, &build.compiler_path(compiler)); + let mut cargo = build.cargo(compiler, Mode::Libstd, target, "build"); + cargo.arg("--features").arg(build.std_features()) + .arg("--manifest-path") + .arg(build.src.join("src/rustc/std_shim/Cargo.toml")); + + if let Some(target) = build.config.target_config.get(target) { + if let Some(ref jemalloc) = target.jemalloc { + cargo.env("JEMALLOC_OVERRIDE", jemalloc); + } + } + if target.contains("musl") { + if let Some(p) = build.musl_root(target) { + cargo.env("MUSL_ROOT", p); + } + } + + build.run(&mut cargo); + update_mtime(&libstd_stamp(build, &compiler, target)); + std_link(build, target, compiler.stage, compiler.host); +} + +/// Link all libstd rlibs/dylibs into the sysroot location. +/// +/// Links those artifacts generated in the given `stage` for `target` produced +/// by `compiler` into `host`'s sysroot. +pub fn std_link(build: &Build, + target: &str, + stage: u32, + host: &str) { + let compiler = Compiler::new(stage, &build.config.build); + let target_compiler = Compiler::new(compiler.stage, host); + let libdir = build.sysroot_libdir(&target_compiler, target); + let out_dir = build.cargo_out(&compiler, Mode::Libstd, target); + + // If we're linking one compiler host's output into another, then we weren't + // called from the `std` method above. In that case we clean out what's + // already there. + if host != compiler.host { + let _ = fs::remove_dir_all(&libdir); + t!(fs::create_dir_all(&libdir)); + } + add_to_sysroot(&out_dir, &libdir); + + if target.contains("musl") && !target.contains("mips") { + copy_musl_third_party_objects(build, target, &libdir); + } +} + +/// Copies the crt(1,i,n).o startup objects +/// +/// Only required for musl targets that statically link to libc +fn copy_musl_third_party_objects(build: &Build, target: &str, into: &Path) { + for &obj in &["crt1.o", "crti.o", "crtn.o"] { + copy(&build.musl_root(target).unwrap().join("lib").join(obj), &into.join(obj)); + } +} + +/// Build and prepare startup objects like rsbegin.o and rsend.o +/// +/// These are primarily used on Windows right now for linking executables/dlls. +/// They don't require any library support as they're just plain old object +/// files, so we just use the nightly snapshot compiler to always build them (as +/// no other compilers are guaranteed to be available). +fn build_startup_objects(build: &Build, target: &str, into: &Path) { + if !target.contains("pc-windows-gnu") { + return + } + let compiler = Compiler::new(0, &build.config.build); + let compiler_path = build.compiler_path(&compiler); + + for file in t!(fs::read_dir(build.src.join("src/rtstartup"))) { + let file = t!(file); + let mut cmd = Command::new(&compiler_path); + build.run(cmd.env("RUSTC_BOOTSTRAP", "1") + .arg("--target").arg(target) + .arg("--emit=obj") + .arg("--out-dir").arg(into) + .arg(file.path())); + } + + for obj in ["crt2.o", "dllcrt2.o"].iter() { + copy(&compiler_file(build.cc(target), obj), &into.join(obj)); + } +} + +/// Build libtest. +/// +/// This will build libtest and supporting libraries for a particular stage of +/// the build using the `compiler` targeting the `target` architecture. The +/// artifacts created will also be linked into the sysroot directory. +pub fn test<'a>(build: &'a Build, target: &str, compiler: &Compiler<'a>) { + println!("Building stage{} test artifacts ({} -> {})", compiler.stage, + compiler.host, target); + let out_dir = build.cargo_out(compiler, Mode::Libtest, target); + build.clear_if_dirty(&out_dir, &libstd_stamp(build, compiler, target)); + let mut cargo = build.cargo(compiler, Mode::Libtest, target, "build"); + cargo.arg("--manifest-path") + .arg(build.src.join("src/rustc/test_shim/Cargo.toml")); + build.run(&mut cargo); + update_mtime(&libtest_stamp(build, compiler, target)); + test_link(build, target, compiler.stage, compiler.host); +} + +/// Link all libtest rlibs/dylibs into the sysroot location. +/// +/// Links those artifacts generated in the given `stage` for `target` produced +/// by `compiler` into `host`'s sysroot. +pub fn test_link(build: &Build, + target: &str, + stage: u32, + host: &str) { + let compiler = Compiler::new(stage, &build.config.build); + let target_compiler = Compiler::new(compiler.stage, host); + let libdir = build.sysroot_libdir(&target_compiler, target); + let out_dir = build.cargo_out(&compiler, Mode::Libtest, target); + add_to_sysroot(&out_dir, &libdir); +} + +/// Build the compiler. +/// +/// This will build the compiler for a particular stage of the build using +/// the `compiler` targeting the `target` architecture. The artifacts +/// created will also be linked into the sysroot directory. +pub fn rustc<'a>(build: &'a Build, target: &str, compiler: &Compiler<'a>) { + println!("Building stage{} compiler artifacts ({} -> {})", + compiler.stage, compiler.host, target); + + let out_dir = build.cargo_out(compiler, Mode::Librustc, target); + build.clear_if_dirty(&out_dir, &libtest_stamp(build, compiler, target)); + + let mut cargo = build.cargo(compiler, Mode::Librustc, target, "build"); + cargo.arg("--features").arg(build.rustc_features()) + .arg("--manifest-path") + .arg(build.src.join("src/rustc/Cargo.toml")); + + // Set some configuration variables picked up by build scripts and + // the compiler alike + cargo.env("CFG_RELEASE", &build.release) + .env("CFG_RELEASE_CHANNEL", &build.config.channel) + .env("CFG_VERSION", &build.version) + .env("CFG_PREFIX", build.config.prefix.clone().unwrap_or(String::new())) + .env("CFG_LIBDIR_RELATIVE", "lib"); + + if let Some(ref ver_date) = build.ver_date { + cargo.env("CFG_VER_DATE", ver_date); + } + if let Some(ref ver_hash) = build.ver_hash { + cargo.env("CFG_VER_HASH", ver_hash); + } + if !build.unstable_features { + cargo.env("CFG_DISABLE_UNSTABLE_FEATURES", "1"); + } + // Flag that rust llvm is in use + if build.is_rust_llvm(target) { + cargo.env("LLVM_RUSTLLVM", "1"); + } + cargo.env("LLVM_CONFIG", build.llvm_config(target)); + let target_config = build.config.target_config.get(target); + if let Some(s) = target_config.and_then(|c| c.llvm_config.as_ref()) { + cargo.env("CFG_LLVM_ROOT", s); + } + if build.config.llvm_static_stdcpp { + cargo.env("LLVM_STATIC_STDCPP", + compiler_file(build.cxx(target), "libstdc++.a")); + } + if build.config.llvm_link_shared { + cargo.env("LLVM_LINK_SHARED", "1"); + } + if let Some(ref s) = build.config.rustc_default_linker { + cargo.env("CFG_DEFAULT_LINKER", s); + } + if let Some(ref s) = build.config.rustc_default_ar { + cargo.env("CFG_DEFAULT_AR", s); + } + build.run(&mut cargo); + + rustc_link(build, target, compiler.stage, compiler.host); +} + +/// Link all librustc rlibs/dylibs into the sysroot location. +/// +/// Links those artifacts generated in the given `stage` for `target` produced +/// by `compiler` into `host`'s sysroot. +pub fn rustc_link(build: &Build, + target: &str, + stage: u32, + host: &str) { + let compiler = Compiler::new(stage, &build.config.build); + let target_compiler = Compiler::new(compiler.stage, host); + let libdir = build.sysroot_libdir(&target_compiler, target); + let out_dir = build.cargo_out(&compiler, Mode::Librustc, target); + add_to_sysroot(&out_dir, &libdir); +} + +/// Cargo's output path for the standard library in a given stage, compiled +/// by a particular compiler for the specified target. +fn libstd_stamp(build: &Build, compiler: &Compiler, target: &str) -> PathBuf { + build.cargo_out(compiler, Mode::Libstd, target).join(".libstd.stamp") +} + +/// Cargo's output path for libtest in a given stage, compiled by a particular +/// compiler for the specified target. +fn libtest_stamp(build: &Build, compiler: &Compiler, target: &str) -> PathBuf { + build.cargo_out(compiler, Mode::Libtest, target).join(".libtest.stamp") +} + +fn compiler_file(compiler: &Path, file: &str) -> PathBuf { + let out = output(Command::new(compiler) + .arg(format!("-print-file-name={}", file))); + PathBuf::from(out.trim()) +} + +/// Prepare a new compiler from the artifacts in `stage` +/// +/// This will assemble a compiler in `build/$host/stage$stage`. The compiler +/// must have been previously produced by the `stage - 1` build.config.build +/// compiler. +pub fn assemble_rustc(build: &Build, stage: u32, host: &str) { + // nothing to do in stage0 + if stage == 0 { + return + } + // The compiler that we're assembling + let target_compiler = Compiler::new(stage, host); + + // The compiler that compiled the compiler we're assembling + let build_compiler = Compiler::new(stage - 1, &build.config.build); + + // Clear out old files + let sysroot = build.sysroot(&target_compiler); + let _ = fs::remove_dir_all(&sysroot); + t!(fs::create_dir_all(&sysroot)); + + // Link in all dylibs to the libdir + let sysroot_libdir = sysroot.join(libdir(host)); + t!(fs::create_dir_all(&sysroot_libdir)); + let src_libdir = build.sysroot_libdir(&build_compiler, host); + for f in t!(fs::read_dir(&src_libdir)).map(|f| t!(f)) { + let filename = f.file_name().into_string().unwrap(); + if is_dylib(&filename) { + copy(&f.path(), &sysroot_libdir.join(&filename)); + } + } + + let out_dir = build.cargo_out(&build_compiler, Mode::Librustc, host); + + // Link the compiler binary itself into place + let rustc = out_dir.join(exe("rustc", host)); + let bindir = sysroot.join("bin"); + t!(fs::create_dir_all(&bindir)); + let compiler = build.compiler_path(&Compiler::new(stage, host)); + let _ = fs::remove_file(&compiler); + copy(&rustc, &compiler); + + // See if rustdoc exists to link it into place + let rustdoc = exe("rustdoc", host); + let rustdoc_src = out_dir.join(&rustdoc); + let rustdoc_dst = bindir.join(&rustdoc); + if fs::metadata(&rustdoc_src).is_ok() { + let _ = fs::remove_file(&rustdoc_dst); + copy(&rustdoc_src, &rustdoc_dst); + } +} + +/// Link some files into a rustc sysroot. +/// +/// For a particular stage this will link all of the contents of `out_dir` +/// into the sysroot of the `host` compiler, assuming the artifacts are +/// compiled for the specified `target`. +fn add_to_sysroot(out_dir: &Path, sysroot_dst: &Path) { + // Collect the set of all files in the dependencies directory, keyed + // off the name of the library. We assume everything is of the form + // `foo-.{rlib,so,...}`, and there could be multiple different + // `` values for the same name (of old builds). + let mut map = HashMap::new(); + for file in t!(fs::read_dir(out_dir.join("deps"))).map(|f| t!(f)) { + let filename = file.file_name().into_string().unwrap(); + + // We're only interested in linking rlibs + dylibs, other things like + // unit tests don't get linked in + if !filename.ends_with(".rlib") && + !filename.ends_with(".lib") && + !is_dylib(&filename) { + continue + } + let file = file.path(); + let dash = filename.find("-").unwrap(); + let key = (filename[..dash].to_string(), + file.extension().unwrap().to_owned()); + map.entry(key).or_insert(Vec::new()) + .push(file.clone()); + } + + // For all hash values found, pick the most recent one to move into the + // sysroot, that should be the one we just built. + for (_, paths) in map { + let (_, path) = paths.iter().map(|path| { + (mtime(&path).seconds(), path) + }).max().unwrap(); + copy(&path, &sysroot_dst.join(path.file_name().unwrap())); + } +} + +/// Build a tool in `src/tools` +/// +/// This will build the specified tool with the specified `host` compiler in +/// `stage` into the normal cargo output directory. +pub fn tool(build: &Build, stage: u32, host: &str, tool: &str) { + println!("Building stage{} tool {} ({})", stage, tool, host); + + let compiler = Compiler::new(stage, host); + + // FIXME: need to clear out previous tool and ideally deps, may require + // isolating output directories or require a pseudo shim step to + // clear out all the info. + // + // Maybe when libstd is compiled it should clear out the rustc of the + // corresponding stage? + // let out_dir = build.cargo_out(stage, &host, Mode::Librustc, target); + // build.clear_if_dirty(&out_dir, &libstd_stamp(build, stage, &host, target)); + + let mut cargo = build.cargo(&compiler, Mode::Tool, host, "build"); + cargo.arg("--manifest-path") + .arg(build.src.join(format!("src/tools/{}/Cargo.toml", tool))); + build.run(&mut cargo); +} + +/// Updates the mtime of a stamp file if necessary, only changing it if it's +/// older than some other file in the same directory. +/// +/// We don't know what file Cargo is going to output (because there's a hash in +/// the file name) but we know where it's going to put it. We use this helper to +/// detect changes to that output file by looking at the modification time for +/// all files in a directory and updating the stamp if any are newer. +fn update_mtime(path: &Path) { + let mut max = None; + if let Ok(entries) = path.parent().unwrap().read_dir() { + for entry in entries.map(|e| t!(e)) { + if t!(entry.file_type()).is_file() { + let meta = t!(entry.metadata()); + let time = FileTime::from_last_modification_time(&meta); + max = cmp::max(max, Some(time)); + } + } + } + + if !max.is_none() && max <= Some(mtime(path)) { + return + } + t!(File::create(path)); +} diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs new file mode 100644 index 0000000000000..60f65f623006c --- /dev/null +++ b/src/bootstrap/config.rs @@ -0,0 +1,512 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Serialized configuration of a build. +//! +//! This module implements parsing `config.mk` and `config.toml` configuration +//! files to tweak how the build runs. + +use std::collections::HashMap; +use std::env; +use std::fs::File; +use std::io::prelude::*; +use std::path::PathBuf; +use std::process; + +use num_cpus; +use rustc_serialize::Decodable; +use toml::{Parser, Decoder, Value}; +use util::push_exe_path; + +/// Global configuration for the entire build and/or bootstrap. +/// +/// This structure is derived from a combination of both `config.toml` and +/// `config.mk`. As of the time of this writing it's unlikely that `config.toml` +/// is used all that much, so this is primarily filled out by `config.mk` which +/// is generated from `./configure`. +/// +/// Note that this structure is not decoded directly into, but rather it is +/// filled out from the decoded forms of the structs below. For documentation +/// each field, see the corresponding fields in +/// `src/bootstrap/config.toml.example`. +#[derive(Default)] +pub struct Config { + pub ccache: bool, + pub ninja: bool, + pub verbose: bool, + pub submodules: bool, + pub compiler_docs: bool, + pub docs: bool, + pub vendor: bool, + pub target_config: HashMap, + + // llvm codegen options + pub llvm_assertions: bool, + pub llvm_optimize: bool, + pub llvm_release_debuginfo: bool, + pub llvm_version_check: bool, + pub llvm_static_stdcpp: bool, + pub llvm_link_shared: bool, + + // rust codegen options + pub rust_optimize: bool, + pub rust_codegen_units: u32, + pub rust_debug_assertions: bool, + pub rust_debuginfo: bool, + pub rust_debuginfo_lines: bool, + pub rust_rpath: bool, + pub rustc_default_linker: Option, + pub rustc_default_ar: Option, + pub rust_optimize_tests: bool, + pub rust_debuginfo_tests: bool, + + pub build: String, + pub host: Vec, + pub target: Vec, + pub rustc: Option, + pub cargo: Option, + pub local_rebuild: bool, + + // libstd features + pub debug_jemalloc: bool, + pub use_jemalloc: bool, + pub backtrace: bool, // support for RUST_BACKTRACE + + // misc + pub channel: String, + pub quiet_tests: bool, + // Fallback musl-root for all targets + pub musl_root: Option, + pub prefix: Option, + pub docdir: Option, + pub libdir: Option, + pub mandir: Option, + pub codegen_tests: bool, + pub nodejs: Option, + pub gdb: Option, + pub python: Option, +} + +/// Per-target configuration stored in the global configuration structure. +#[derive(Default)] +pub struct Target { + pub llvm_config: Option, + pub jemalloc: Option, + pub cc: Option, + pub cxx: Option, + pub ndk: Option, + pub musl_root: Option, +} + +/// Structure of the `config.toml` file that configuration is read from. +/// +/// This structure uses `Decodable` to automatically decode a TOML configuration +/// file into this format, and then this is traversed and written into the above +/// `Config` structure. +#[derive(RustcDecodable, Default)] +struct TomlConfig { + build: Option, + llvm: Option, + rust: Option, + target: Option>, +} + +/// TOML representation of various global build decisions. +#[derive(RustcDecodable, Default, Clone)] +struct Build { + build: Option, + host: Vec, + target: Vec, + cargo: Option, + rustc: Option, + compiler_docs: Option, + docs: Option, + submodules: Option, + gdb: Option, + vendor: Option, + nodejs: Option, + python: Option, +} + +/// TOML representation of how the LLVM build is configured. +#[derive(RustcDecodable, Default)] +struct Llvm { + ccache: Option, + ninja: Option, + assertions: Option, + optimize: Option, + release_debuginfo: Option, + version_check: Option, + static_libstdcpp: Option, +} + +/// TOML representation of how the Rust build is configured. +#[derive(RustcDecodable, Default)] +struct Rust { + optimize: Option, + codegen_units: Option, + debug_assertions: Option, + debuginfo: Option, + debuginfo_lines: Option, + debug_jemalloc: Option, + use_jemalloc: Option, + backtrace: Option, + default_linker: Option, + default_ar: Option, + channel: Option, + musl_root: Option, + rpath: Option, + optimize_tests: Option, + debuginfo_tests: Option, + codegen_tests: Option, +} + +/// TOML representation of how each build target is configured. +#[derive(RustcDecodable, Default)] +struct TomlTarget { + llvm_config: Option, + jemalloc: Option, + cc: Option, + cxx: Option, + android_ndk: Option, + musl_root: Option, +} + +impl Config { + pub fn parse(build: &str, file: Option) -> Config { + let mut config = Config::default(); + config.llvm_optimize = true; + config.use_jemalloc = true; + config.backtrace = true; + config.rust_optimize = true; + config.rust_optimize_tests = true; + config.submodules = true; + config.docs = true; + config.rust_rpath = true; + config.rust_codegen_units = 1; + config.build = build.to_string(); + config.channel = "dev".to_string(); + config.codegen_tests = true; + + let toml = file.map(|file| { + let mut f = t!(File::open(&file)); + let mut toml = String::new(); + t!(f.read_to_string(&mut toml)); + let mut p = Parser::new(&toml); + let table = match p.parse() { + Some(table) => table, + None => { + println!("failed to parse TOML configuration:"); + for err in p.errors.iter() { + let (loline, locol) = p.to_linecol(err.lo); + let (hiline, hicol) = p.to_linecol(err.hi); + println!("{}:{}-{}:{}: {}", loline, locol, hiline, + hicol, err.desc); + } + process::exit(2); + } + }; + let mut d = Decoder::new(Value::Table(table)); + match Decodable::decode(&mut d) { + Ok(cfg) => cfg, + Err(e) => { + println!("failed to decode TOML: {}", e); + process::exit(2); + } + } + }).unwrap_or_else(|| TomlConfig::default()); + + let build = toml.build.clone().unwrap_or(Build::default()); + set(&mut config.build, build.build.clone()); + config.host.push(config.build.clone()); + for host in build.host.iter() { + if !config.host.contains(host) { + config.host.push(host.clone()); + } + } + for target in config.host.iter().chain(&build.target) { + if !config.target.contains(target) { + config.target.push(target.clone()); + } + } + config.rustc = build.rustc.map(PathBuf::from); + config.cargo = build.cargo.map(PathBuf::from); + config.nodejs = build.nodejs.map(PathBuf::from); + config.gdb = build.gdb.map(PathBuf::from); + config.python = build.python.map(PathBuf::from); + set(&mut config.compiler_docs, build.compiler_docs); + set(&mut config.docs, build.docs); + set(&mut config.submodules, build.submodules); + set(&mut config.vendor, build.vendor); + + if let Some(ref llvm) = toml.llvm { + set(&mut config.ccache, llvm.ccache); + set(&mut config.ninja, llvm.ninja); + set(&mut config.llvm_assertions, llvm.assertions); + set(&mut config.llvm_optimize, llvm.optimize); + set(&mut config.llvm_release_debuginfo, llvm.release_debuginfo); + set(&mut config.llvm_version_check, llvm.version_check); + set(&mut config.llvm_static_stdcpp, llvm.static_libstdcpp); + } + if let Some(ref rust) = toml.rust { + set(&mut config.rust_debug_assertions, rust.debug_assertions); + set(&mut config.rust_debuginfo, rust.debuginfo); + set(&mut config.rust_debuginfo_lines, rust.debuginfo_lines); + set(&mut config.rust_optimize, rust.optimize); + set(&mut config.rust_optimize_tests, rust.optimize_tests); + set(&mut config.rust_debuginfo_tests, rust.debuginfo_tests); + set(&mut config.codegen_tests, rust.codegen_tests); + set(&mut config.rust_rpath, rust.rpath); + set(&mut config.debug_jemalloc, rust.debug_jemalloc); + set(&mut config.use_jemalloc, rust.use_jemalloc); + set(&mut config.backtrace, rust.backtrace); + set(&mut config.channel, rust.channel.clone()); + config.rustc_default_linker = rust.default_linker.clone(); + config.rustc_default_ar = rust.default_ar.clone(); + config.musl_root = rust.musl_root.clone().map(PathBuf::from); + + match rust.codegen_units { + Some(0) => config.rust_codegen_units = num_cpus::get() as u32, + Some(n) => config.rust_codegen_units = n, + None => {} + } + } + + if let Some(ref t) = toml.target { + for (triple, cfg) in t { + let mut target = Target::default(); + + if let Some(ref s) = cfg.llvm_config { + target.llvm_config = Some(env::current_dir().unwrap().join(s)); + } + if let Some(ref s) = cfg.jemalloc { + target.jemalloc = Some(env::current_dir().unwrap().join(s)); + } + if let Some(ref s) = cfg.android_ndk { + target.ndk = Some(env::current_dir().unwrap().join(s)); + } + target.cxx = cfg.cxx.clone().map(PathBuf::from); + target.cc = cfg.cc.clone().map(PathBuf::from); + target.musl_root = cfg.musl_root.clone().map(PathBuf::from); + + config.target_config.insert(triple.clone(), target); + } + } + + return config + } + + /// "Temporary" routine to parse `config.mk` into this configuration. + /// + /// While we still have `./configure` this implements the ability to decode + /// that configuration into this. This isn't exactly a full-blown makefile + /// parser, but hey it gets the job done! + pub fn update_with_config_mk(&mut self) { + let mut config = String::new(); + File::open("config.mk").unwrap().read_to_string(&mut config).unwrap(); + for line in config.lines() { + let mut parts = line.splitn(2, ":=").map(|s| s.trim()); + let key = parts.next().unwrap(); + let value = match parts.next() { + Some(n) if n.starts_with('\"') => &n[1..n.len() - 1], + Some(n) => n, + None => continue + }; + + macro_rules! check { + ($(($name:expr, $val:expr),)*) => { + if value == "1" { + $( + if key == concat!("CFG_ENABLE_", $name) { + $val = true; + continue + } + if key == concat!("CFG_DISABLE_", $name) { + $val = false; + continue + } + )* + } + } + } + + check! { + ("CCACHE", self.ccache), + ("MANAGE_SUBMODULES", self.submodules), + ("COMPILER_DOCS", self.compiler_docs), + ("DOCS", self.docs), + ("LLVM_ASSERTIONS", self.llvm_assertions), + ("LLVM_RELEASE_DEBUGINFO", self.llvm_release_debuginfo), + ("OPTIMIZE_LLVM", self.llvm_optimize), + ("LLVM_VERSION_CHECK", self.llvm_version_check), + ("LLVM_STATIC_STDCPP", self.llvm_static_stdcpp), + ("LLVM_LINK_SHARED", self.llvm_link_shared), + ("OPTIMIZE", self.rust_optimize), + ("DEBUG_ASSERTIONS", self.rust_debug_assertions), + ("DEBUGINFO", self.rust_debuginfo), + ("DEBUGINFO_LINES", self.rust_debuginfo_lines), + ("JEMALLOC", self.use_jemalloc), + ("DEBUG_JEMALLOC", self.debug_jemalloc), + ("RPATH", self.rust_rpath), + ("OPTIMIZE_TESTS", self.rust_optimize_tests), + ("DEBUGINFO_TESTS", self.rust_debuginfo_tests), + ("QUIET_TESTS", self.quiet_tests), + ("LOCAL_REBUILD", self.local_rebuild), + ("NINJA", self.ninja), + ("CODEGEN_TESTS", self.codegen_tests), + ("VENDOR", self.vendor), + } + + match key { + "CFG_BUILD" => self.build = value.to_string(), + "CFG_HOST" => { + self.host = value.split(" ").map(|s| s.to_string()) + .collect(); + } + "CFG_TARGET" => { + self.target = value.split(" ").map(|s| s.to_string()) + .collect(); + } + "CFG_MUSL_ROOT" if value.len() > 0 => { + self.musl_root = Some(parse_configure_path(value)); + } + "CFG_MUSL_ROOT_X86_64" if value.len() > 0 => { + let target = "x86_64-unknown-linux-musl".to_string(); + let target = self.target_config.entry(target) + .or_insert(Target::default()); + target.musl_root = Some(parse_configure_path(value)); + } + "CFG_MUSL_ROOT_I686" if value.len() > 0 => { + let target = "i686-unknown-linux-musl".to_string(); + let target = self.target_config.entry(target) + .or_insert(Target::default()); + target.musl_root = Some(parse_configure_path(value)); + } + "CFG_MUSL_ROOT_ARM" if value.len() > 0 => { + let target = "arm-unknown-linux-musleabi".to_string(); + let target = self.target_config.entry(target) + .or_insert(Target::default()); + target.musl_root = Some(parse_configure_path(value)); + } + "CFG_MUSL_ROOT_ARMHF" if value.len() > 0 => { + let target = "arm-unknown-linux-musleabihf".to_string(); + let target = self.target_config.entry(target) + .or_insert(Target::default()); + target.musl_root = Some(parse_configure_path(value)); + } + "CFG_MUSL_ROOT_ARMV7" if value.len() > 0 => { + let target = "armv7-unknown-linux-musleabihf".to_string(); + let target = self.target_config.entry(target) + .or_insert(Target::default()); + target.musl_root = Some(parse_configure_path(value)); + } + "CFG_DEFAULT_AR" if value.len() > 0 => { + self.rustc_default_ar = Some(value.to_string()); + } + "CFG_DEFAULT_LINKER" if value.len() > 0 => { + self.rustc_default_linker = Some(value.to_string()); + } + "CFG_GDB" if value.len() > 0 => { + self.gdb = Some(parse_configure_path(value)); + } + "CFG_RELEASE_CHANNEL" => { + self.channel = value.to_string(); + } + "CFG_PREFIX" => { + self.prefix = Some(value.to_string()); + } + "CFG_DOCDIR" => { + self.docdir = Some(value.to_string()); + } + "CFG_LIBDIR" => { + self.libdir = Some(value.to_string()); + } + "CFG_MANDIR" => { + self.mandir = Some(value.to_string()); + } + "CFG_LLVM_ROOT" if value.len() > 0 => { + let target = self.target_config.entry(self.build.clone()) + .or_insert(Target::default()); + let root = parse_configure_path(value); + target.llvm_config = Some(push_exe_path(root, &["bin", "llvm-config"])); + } + "CFG_JEMALLOC_ROOT" if value.len() > 0 => { + let target = self.target_config.entry(self.build.clone()) + .or_insert(Target::default()); + target.jemalloc = Some(parse_configure_path(value)); + } + "CFG_ARM_LINUX_ANDROIDEABI_NDK" if value.len() > 0 => { + let target = "arm-linux-androideabi".to_string(); + let target = self.target_config.entry(target) + .or_insert(Target::default()); + target.ndk = Some(parse_configure_path(value)); + } + "CFG_ARMV7_LINUX_ANDROIDEABI_NDK" if value.len() > 0 => { + let target = "armv7-linux-androideabi".to_string(); + let target = self.target_config.entry(target) + .or_insert(Target::default()); + target.ndk = Some(parse_configure_path(value)); + } + "CFG_I686_LINUX_ANDROID_NDK" if value.len() > 0 => { + let target = "i686-linux-android".to_string(); + let target = self.target_config.entry(target) + .or_insert(Target::default()); + target.ndk = Some(parse_configure_path(value)); + } + "CFG_AARCH64_LINUX_ANDROID_NDK" if value.len() > 0 => { + let target = "aarch64-linux-android".to_string(); + let target = self.target_config.entry(target) + .or_insert(Target::default()); + target.ndk = Some(parse_configure_path(value)); + } + "CFG_LOCAL_RUST_ROOT" if value.len() > 0 => { + let path = parse_configure_path(value); + self.rustc = Some(push_exe_path(path.clone(), &["bin", "rustc"])); + self.cargo = Some(push_exe_path(path, &["bin", "cargo"])); + } + "CFG_PYTHON" if value.len() > 0 => { + let path = parse_configure_path(value); + self.python = Some(path); + } + _ => {} + } + } + } +} + +#[cfg(not(windows))] +fn parse_configure_path(path: &str) -> PathBuf { + path.into() +} + +#[cfg(windows)] +fn parse_configure_path(path: &str) -> PathBuf { + // on windows, configure produces unix style paths e.g. /c/some/path but we + // only want real windows paths + + use std::process::Command; + use build_helper; + + // '/' is invalid in windows paths, so we can detect unix paths by the presence of it + if !path.contains('/') { + return path.into(); + } + + let win_path = build_helper::output(Command::new("cygpath").arg("-w").arg(path)); + let win_path = win_path.trim(); + + win_path.into() +} + +fn set(field: &mut T, val: Option) { + if let Some(v) = val { + *field = v; + } +} diff --git a/src/bootstrap/config.toml.example b/src/bootstrap/config.toml.example new file mode 100644 index 0000000000000..b6774b3af20a5 --- /dev/null +++ b/src/bootstrap/config.toml.example @@ -0,0 +1,196 @@ +# Sample TOML configuration file for building Rust. +# +# To configure rustbuild, copy this file to the directory from which you will be +# running the build, and name it config.toml. +# +# All options are commented out by default in this file, and they're commented +# out with their default values. The build system by default looks for +# `config.toml` in the current directory of a build for build configuration, but +# a custom configuration file can also be specified with `--config` to the build +# system. + +# ============================================================================= +# Tweaking how LLVM is compiled +# ============================================================================= +[llvm] + +# Indicates whether the LLVM build is a Release or Debug build +#optimize = true + +# Indicates whether an LLVM Release build should include debug info +#release-debuginfo = false + +# Indicates whether the LLVM assertions are enabled or not +#assertions = false + +# Indicates whether ccache is used when building LLVM +#ccache = false + +# If an external LLVM root is specified, we automatically check the version by +# default to make sure it's within the range that we're expecting, but setting +# this flag will indicate that this version check should not be done. +#version-check = false + +# Link libstdc++ statically into the librustc_llvm instead of relying on a +# dynamic version to be available. +#static-libstdcpp = false + +# Tell the LLVM build system to use Ninja instead of the platform default for +# the generated build system. This can sometimes be faster than make, for +# example. +#ninja = false + +# ============================================================================= +# General build configuration options +# ============================================================================= +[build] + +# Build triple for the original snapshot compiler. This must be a compiler that +# nightlies are already produced for. The current platform must be able to run +# binaries of this build triple and the nightly will be used to bootstrap the +# first compiler. +#build = "x86_64-unknown-linux-gnu" # defaults to your host platform + +# In addition to the build triple, other triples to produce full compiler +# toolchains for. Each of these triples will be bootstrapped from the build +# triple and then will continue to bootstrap themselves. This platform must +# currently be able to run all of the triples provided here. +#host = ["x86_64-unknown-linux-gnu"] # defaults to just the build triple + +# In addition to all host triples, other triples to produce the standard library +# for. Each host triple will be used to produce a copy of the standard library +# for each target triple. +#target = ["x86_64-unknown-linux-gnu"] # defaults to just the build triple + +# Instead of downloading the src/nightlies.txt version of Cargo specified, use +# this Cargo binary instead to build all Rust code +#cargo = "/path/to/bin/cargo" + +# Instead of downloading the src/nightlies.txt version of the compiler +# specified, use this rustc binary instead as the stage0 snapshot compiler. +#rustc = "/path/to/bin/rustc" + +# Flag to specify whether any documentation is built. If false, rustdoc and +# friends will still be compiled but they will not be used to generate any +# documentation. +#docs = true + +# Indicate whether the compiler should be documented in addition to the standard +# library and facade crates. +#compiler-docs = false + +# Indicate whether submodules are managed and updated automatically. +#submodules = true + +# The path to (or name of) the GDB executable to use. This is only used for +# executing the debuginfo test suite. +#gdb = "gdb" + +# The node.js executable to use. Note that this is only used for the emscripten +# target when running tests, otherwise this can be omitted. +#nodejs = "node" + +# Python interpreter to use for various tasks throughout the build, notably +# rustdoc tests, the lldb python interpreter, and some dist bits and pieces. +# Note that Python 2 is currently required. +#python = "python2.7" + +# Indicate whether the vendored sources are used for Rust dependencies or not +#vendor = false + +# ============================================================================= +# Options for compiling Rust code itself +# ============================================================================= +[rust] + +# Whether or not to optimize the compiler and standard library +#optimize = true + +# Number of codegen units to use for each compiler invocation. A value of 0 +# means "the number of cores on this machine", and 1+ is passed through to the +# compiler. +#codegen-units = 1 + +# Whether or not debug assertions are enabled for the compiler and standard +# library +#debug-assertions = false + +# Whether or not debuginfo is emitted +#debuginfo = false + +# Whether or not line number debug information is emitted +#debuginfo-lines = false + +# Whether or not jemalloc is built and enabled +#use-jemalloc = true + +# Whether or not jemalloc is built with its debug option set +#debug-jemalloc = false + +# Whether or not `panic!`s generate backtraces (RUST_BACKTRACE) +#backtrace = true + +# The default linker that will be used by the generated compiler. Note that this +# is not the linker used to link said compiler. +#default-linker = "cc" + +# The default ar utility that will be used by the generated compiler if LLVM +# cannot be used. Note that this is not used to assemble said compiler. +#default-ar = "ar" + +# The "channel" for the Rust build to produce. The stable/beta channels only +# allow using stable features, whereas the nightly and dev channels allow using +# nightly features +#channel = "dev" + +# By default the `rustc` executable is built with `-Wl,-rpath` flags on Unix +# platforms to ensure that the compiler is usable by default from the build +# directory (as it links to a number of dynamic libraries). This may not be +# desired in distributions, for example. +#rpath = true + +# Flag indicating whether tests are compiled with optimizations (the -O flag) or +# with debuginfo (the -g flag) +#optimize-tests = true +#debuginfo-tests = true + +# Flag indicating whether codegen tests will be run or not. If you get an error +# saying that the FileCheck executable is missing, you may want to disable this. +#codegen-tests = true + +# ============================================================================= +# Options for specific targets +# +# Each of the following options is scoped to the specific target triple in +# question and is used for determining how to compile each target. +# ============================================================================= +[target.x86_64-unknown-linux-gnu] + +# C compiler to be used to compiler C code and link Rust code. Note that the +# default value is platform specific, and if not specified it may also depend on +# what platform is crossing to what platform. +#cc = "cc" + +# C++ compiler to be used to compiler C++ code (e.g. LLVM and our LLVM shims). +# This is only used for host targets. +#cxx = "c++" + +# Path to the `llvm-config` binary of the installation of a custom LLVM to link +# against. Note that if this is specifed we don't compile LLVM at all for this +# target. +#llvm-config = "../path/to/llvm/root/bin/llvm-config" + +# Path to the custom jemalloc static library to link into the standard library +# by default. This is only used if jemalloc is still enabled above +#jemalloc = "/path/to/jemalloc/libjemalloc_pic.a" + +# If this target is for Android, this option will be required to specify where +# the NDK for the target lives. This is used to find the C compiler to link and +# build native code. +#android-ndk = "/path/to/ndk" + +# The root location of the MUSL installation directory. The library directory +# will also need to contain libunwind.a for an unwinding implementation. Note +# that this option only makes sense for MUSL targets that produce statically +# linked binaries +#musl-root = "..." diff --git a/src/bootstrap/dist.rs b/src/bootstrap/dist.rs new file mode 100644 index 0000000000000..d603455122eb2 --- /dev/null +++ b/src/bootstrap/dist.rs @@ -0,0 +1,422 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation of the various distribution aspects of the compiler. +//! +//! This module is responsible for creating tarballs of the standard library, +//! compiler, and documentation. This ends up being what we distribute to +//! everyone as well. +//! +//! No tarball is actually created literally in this file, but rather we shell +//! out to `rust-installer` still. This may one day be replaced with bits and +//! pieces of `rustup.rs`! + +use std::fs::{self, File}; +use std::io::Write; +use std::path::{PathBuf, Path}; +use std::process::Command; + +use {Build, Compiler}; +use util::{cp_r, libdir, is_dylib, cp_filtered, copy}; + +pub fn package_vers(build: &Build) -> &str { + match &build.config.channel[..] { + "stable" => &build.release, + "beta" => "beta", + "nightly" => "nightly", + _ => &build.release, + } +} + +fn distdir(build: &Build) -> PathBuf { + build.out.join("dist") +} + +pub fn tmpdir(build: &Build) -> PathBuf { + build.out.join("tmp/dist") +} + +/// Builds the `rust-docs` installer component. +/// +/// Slurps up documentation from the `stage`'s `host`. +pub fn docs(build: &Build, stage: u32, host: &str) { + println!("Dist docs stage{} ({})", stage, host); + let name = format!("rust-docs-{}", package_vers(build)); + let image = tmpdir(build).join(format!("{}-{}-image", name, name)); + let _ = fs::remove_dir_all(&image); + + let dst = image.join("share/doc/rust/html"); + t!(fs::create_dir_all(&dst)); + let src = build.out.join(host).join("doc"); + cp_r(&src, &dst); + + let mut cmd = Command::new("sh"); + cmd.arg(sanitize_sh(&build.src.join("src/rust-installer/gen-installer.sh"))) + .arg("--product-name=Rust-Documentation") + .arg("--rel-manifest-dir=rustlib") + .arg("--success-message=Rust-documentation-is-installed.") + .arg(format!("--image-dir={}", sanitize_sh(&image))) + .arg(format!("--work-dir={}", sanitize_sh(&tmpdir(build)))) + .arg(format!("--output-dir={}", sanitize_sh(&distdir(build)))) + .arg(format!("--package-name={}-{}", name, host)) + .arg("--component-name=rust-docs") + .arg("--legacy-manifest-dirs=rustlib,cargo") + .arg("--bulk-dirs=share/doc/rust/html"); + build.run(&mut cmd); + t!(fs::remove_dir_all(&image)); + + // As part of this step, *also* copy the docs directory to a directory which + // buildbot typically uploads. + if host == build.config.build { + let dst = distdir(build).join("doc").join(&build.package_vers); + t!(fs::create_dir_all(&dst)); + cp_r(&src, &dst); + } +} + +/// Build the `rust-mingw` installer component. +/// +/// This contains all the bits and pieces to run the MinGW Windows targets +/// without any extra installed software (e.g. we bundle gcc, libraries, etc). +/// Currently just shells out to a python script, but that should be rewritten +/// in Rust. +pub fn mingw(build: &Build, host: &str) { + println!("Dist mingw ({})", host); + let name = format!("rust-mingw-{}", package_vers(build)); + let image = tmpdir(build).join(format!("{}-{}-image", name, host)); + let _ = fs::remove_dir_all(&image); + + // The first argument to the script is a "temporary directory" which is just + // thrown away (this contains the runtime DLLs included in the rustc package + // above) and the second argument is where to place all the MinGW components + // (which is what we want). + // + // FIXME: this script should be rewritten into Rust + let mut cmd = Command::new(build.python()); + cmd.arg(build.src.join("src/etc/make-win-dist.py")) + .arg(tmpdir(build)) + .arg(&image) + .arg(host); + build.run(&mut cmd); + + let mut cmd = Command::new("sh"); + cmd.arg(sanitize_sh(&build.src.join("src/rust-installer/gen-installer.sh"))) + .arg("--product-name=Rust-MinGW") + .arg("--rel-manifest-dir=rustlib") + .arg("--success-message=Rust-MinGW-is-installed.") + .arg(format!("--image-dir={}", sanitize_sh(&image))) + .arg(format!("--work-dir={}", sanitize_sh(&tmpdir(build)))) + .arg(format!("--output-dir={}", sanitize_sh(&distdir(build)))) + .arg(format!("--package-name={}-{}", name, host)) + .arg("--component-name=rust-mingw") + .arg("--legacy-manifest-dirs=rustlib,cargo"); + build.run(&mut cmd); + t!(fs::remove_dir_all(&image)); +} + +/// Creates the `rustc` installer component. +pub fn rustc(build: &Build, stage: u32, host: &str) { + println!("Dist rustc stage{} ({})", stage, host); + let name = format!("rustc-{}", package_vers(build)); + let image = tmpdir(build).join(format!("{}-{}-image", name, host)); + let _ = fs::remove_dir_all(&image); + let overlay = tmpdir(build).join(format!("{}-{}-overlay", name, host)); + let _ = fs::remove_dir_all(&overlay); + + // Prepare the rustc "image", what will actually end up getting installed + prepare_image(build, stage, host, &image); + + // Prepare the overlay which is part of the tarball but won't actually be + // installed + let cp = |file: &str| { + install(&build.src.join(file), &overlay, 0o644); + }; + cp("COPYRIGHT"); + cp("LICENSE-APACHE"); + cp("LICENSE-MIT"); + cp("README.md"); + // tiny morsel of metadata is used by rust-packaging + let version = &build.version; + t!(t!(File::create(overlay.join("version"))).write_all(version.as_bytes())); + + // On MinGW we've got a few runtime DLL dependencies that we need to + // include. The first argument to this script is where to put these DLLs + // (the image we're creating), and the second argument is a junk directory + // to ignore all other MinGW stuff the script creates. + // + // On 32-bit MinGW we're always including a DLL which needs some extra + // licenses to distribute. On 64-bit MinGW we don't actually distribute + // anything requiring us to distribute a license, but it's likely the + // install will *also* include the rust-mingw package, which also needs + // licenses, so to be safe we just include it here in all MinGW packages. + // + // FIXME: this script should be rewritten into Rust + if host.contains("pc-windows-gnu") { + let mut cmd = Command::new(build.python()); + cmd.arg(build.src.join("src/etc/make-win-dist.py")) + .arg(&image) + .arg(tmpdir(build)) + .arg(host); + build.run(&mut cmd); + + let dst = image.join("share/doc"); + t!(fs::create_dir_all(&dst)); + cp_r(&build.src.join("src/etc/third-party"), &dst); + } + + // Finally, wrap everything up in a nice tarball! + let mut cmd = Command::new("sh"); + cmd.arg(sanitize_sh(&build.src.join("src/rust-installer/gen-installer.sh"))) + .arg("--product-name=Rust") + .arg("--rel-manifest-dir=rustlib") + .arg("--success-message=Rust-is-ready-to-roll.") + .arg(format!("--image-dir={}", sanitize_sh(&image))) + .arg(format!("--work-dir={}", sanitize_sh(&tmpdir(build)))) + .arg(format!("--output-dir={}", sanitize_sh(&distdir(build)))) + .arg(format!("--non-installed-overlay={}", sanitize_sh(&overlay))) + .arg(format!("--package-name={}-{}", name, host)) + .arg("--component-name=rustc") + .arg("--legacy-manifest-dirs=rustlib,cargo"); + build.run(&mut cmd); + t!(fs::remove_dir_all(&image)); + t!(fs::remove_dir_all(&overlay)); + + fn prepare_image(build: &Build, stage: u32, host: &str, image: &Path) { + let src = build.sysroot(&Compiler::new(stage, host)); + let libdir = libdir(host); + + // Copy rustc/rustdoc binaries + t!(fs::create_dir_all(image.join("bin"))); + cp_r(&src.join("bin"), &image.join("bin")); + + // Copy runtime DLLs needed by the compiler + if libdir != "bin" { + for entry in t!(src.join(libdir).read_dir()).map(|e| t!(e)) { + let name = entry.file_name(); + if let Some(s) = name.to_str() { + if is_dylib(s) { + install(&entry.path(), &image.join(libdir), 0o644); + } + } + } + } + + // Man pages + t!(fs::create_dir_all(image.join("share/man/man1"))); + cp_r(&build.src.join("man"), &image.join("share/man/man1")); + + // Debugger scripts + debugger_scripts(build, &image, host); + + // Misc license info + let cp = |file: &str| { + install(&build.src.join(file), &image.join("share/doc/rust"), 0o644); + }; + cp("COPYRIGHT"); + cp("LICENSE-APACHE"); + cp("LICENSE-MIT"); + cp("README.md"); + } +} + +/// Copies debugger scripts for `host` into the `sysroot` specified. +pub fn debugger_scripts(build: &Build, + sysroot: &Path, + host: &str) { + let cp_debugger_script = |file: &str| { + let dst = sysroot.join("lib/rustlib/etc"); + t!(fs::create_dir_all(&dst)); + install(&build.src.join("src/etc/").join(file), &dst, 0o644); + }; + if host.contains("windows-msvc") { + // no debugger scripts + } else { + cp_debugger_script("debugger_pretty_printers_common.py"); + + // gdb debugger scripts + install(&build.src.join("src/etc/rust-gdb"), &sysroot.join("bin"), + 0o755); + + cp_debugger_script("gdb_load_rust_pretty_printers.py"); + cp_debugger_script("gdb_rust_pretty_printing.py"); + + // lldb debugger scripts + install(&build.src.join("src/etc/rust-lldb"), &sysroot.join("bin"), + 0o755); + + cp_debugger_script("lldb_rust_formatters.py"); + } +} + +/// Creates the `rust-std` installer component as compiled by `compiler` for the +/// target `target`. +pub fn std(build: &Build, compiler: &Compiler, target: &str) { + println!("Dist std stage{} ({} -> {})", compiler.stage, compiler.host, + target); + let name = format!("rust-std-{}", package_vers(build)); + let image = tmpdir(build).join(format!("{}-{}-image", name, target)); + let _ = fs::remove_dir_all(&image); + + let dst = image.join("lib/rustlib").join(target); + t!(fs::create_dir_all(&dst)); + let src = build.sysroot(compiler).join("lib/rustlib"); + cp_r(&src.join(target), &dst); + + let mut cmd = Command::new("sh"); + cmd.arg(sanitize_sh(&build.src.join("src/rust-installer/gen-installer.sh"))) + .arg("--product-name=Rust") + .arg("--rel-manifest-dir=rustlib") + .arg("--success-message=std-is-standing-at-the-ready.") + .arg(format!("--image-dir={}", sanitize_sh(&image))) + .arg(format!("--work-dir={}", sanitize_sh(&tmpdir(build)))) + .arg(format!("--output-dir={}", sanitize_sh(&distdir(build)))) + .arg(format!("--package-name={}-{}", name, target)) + .arg(format!("--component-name=rust-std-{}", target)) + .arg("--legacy-manifest-dirs=rustlib,cargo"); + build.run(&mut cmd); + t!(fs::remove_dir_all(&image)); +} + +/// Creates the `rust-src` installer component and the plain source tarball +pub fn rust_src(build: &Build) { + println!("Dist src"); + let plain_name = format!("rustc-{}-src", package_vers(build)); + let name = format!("rust-src-{}", package_vers(build)); + let image = tmpdir(build).join(format!("{}-image", name)); + let _ = fs::remove_dir_all(&image); + + let dst = image.join("lib/rustlib/src"); + let dst_src = dst.join("rust"); + let plain_dst_src = dst.join(&plain_name); + t!(fs::create_dir_all(&dst_src)); + + // This is the set of root paths which will become part of the source package + let src_files = [ + "COPYRIGHT", + "LICENSE-APACHE", + "LICENSE-MIT", + "CONTRIBUTING.md", + "README.md", + "RELEASES.md", + "configure", + "Makefile.in" + ]; + let src_dirs = [ + "man", + "src", + "mk" + ]; + + let filter_fn = move |path: &Path| { + let spath = match path.to_str() { + Some(path) => path, + None => return false, + }; + if spath.ends_with("~") || spath.ends_with(".pyc") { + return false + } + if spath.contains("llvm/test") || spath.contains("llvm\\test") { + if spath.ends_with(".ll") || + spath.ends_with(".td") || + spath.ends_with(".s") { + return false + } + } + + let excludes = [ + "CVS", "RCS", "SCCS", ".git", ".gitignore", ".gitmodules", + ".gitattributes", ".cvsignore", ".svn", ".arch-ids", "{arch}", + "=RELEASE-ID", "=meta-update", "=update", ".bzr", ".bzrignore", + ".bzrtags", ".hg", ".hgignore", ".hgrags", "_darcs", + ]; + !path.iter() + .map(|s| s.to_str().unwrap()) + .any(|s| excludes.contains(&s)) + }; + + // Copy the directories using our filter + for item in &src_dirs { + let dst = &dst_src.join(item); + t!(fs::create_dir(dst)); + cp_filtered(&build.src.join(item), dst, &filter_fn); + } + // Copy the files normally + for item in &src_files { + copy(&build.src.join(item), &dst_src.join(item)); + } + + // Create source tarball in rust-installer format + let mut cmd = Command::new("sh"); + cmd.arg(sanitize_sh(&build.src.join("src/rust-installer/gen-installer.sh"))) + .arg("--product-name=Rust") + .arg("--rel-manifest-dir=rustlib") + .arg("--success-message=Awesome-Source.") + .arg(format!("--image-dir={}", sanitize_sh(&image))) + .arg(format!("--work-dir={}", sanitize_sh(&tmpdir(build)))) + .arg(format!("--output-dir={}", sanitize_sh(&distdir(build)))) + .arg(format!("--package-name={}", name)) + .arg("--component-name=rust-src") + .arg("--legacy-manifest-dirs=rustlib,cargo"); + build.run(&mut cmd); + + // Rename directory, so that root folder of tarball has the correct name + t!(fs::rename(&dst_src, &plain_dst_src)); + + // Create the version file + write_file(&plain_dst_src.join("version"), build.version.as_bytes()); + + // Create plain source tarball + let mut cmd = Command::new("tar"); + cmd.arg("-czf").arg(sanitize_sh(&distdir(build).join(&format!("{}.tar.gz", plain_name)))) + .arg(&plain_name) + .current_dir(&dst); + build.run(&mut cmd); + + t!(fs::remove_dir_all(&image)); +} + +fn install(src: &Path, dstdir: &Path, perms: u32) { + let dst = dstdir.join(src.file_name().unwrap()); + t!(fs::create_dir_all(dstdir)); + t!(fs::copy(src, &dst)); + chmod(&dst, perms); +} + +#[cfg(unix)] +fn chmod(path: &Path, perms: u32) { + use std::os::unix::fs::*; + t!(fs::set_permissions(path, fs::Permissions::from_mode(perms))); +} +#[cfg(windows)] +fn chmod(_path: &Path, _perms: u32) {} + +// We have to run a few shell scripts, which choke quite a bit on both `\` +// characters and on `C:\` paths, so normalize both of them away. +pub fn sanitize_sh(path: &Path) -> String { + let path = path.to_str().unwrap().replace("\\", "/"); + return change_drive(&path).unwrap_or(path); + + fn change_drive(s: &str) -> Option { + let mut ch = s.chars(); + let drive = ch.next().unwrap_or('C'); + if ch.next() != Some(':') { + return None + } + if ch.next() != Some('/') { + return None + } + Some(format!("/{}/{}", drive, &s[drive.len_utf8() + 2..])) + } +} + +fn write_file(path: &Path, data: &[u8]) { + let mut vf = t!(fs::File::create(path)); + t!(vf.write_all(data)); +} diff --git a/src/bootstrap/doc.rs b/src/bootstrap/doc.rs new file mode 100644 index 0000000000000..30c7fefad8745 --- /dev/null +++ b/src/bootstrap/doc.rs @@ -0,0 +1,214 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Documentation generation for rustbuild. +//! +//! This module implements generation for all bits and pieces of documentation +//! for the Rust project. This notably includes suites like the rust book, the +//! nomicon, standalone documentation, etc. +//! +//! Everything here is basically just a shim around calling either `rustbook` or +//! `rustdoc`. + +use std::fs::{self, File}; +use std::io::prelude::*; +use std::process::Command; + +use {Build, Compiler, Mode}; +use util::{up_to_date, cp_r}; + +/// Invoke `rustbook` as compiled in `stage` for `target` for the doc book +/// `name` into the `out` path. +/// +/// This will not actually generate any documentation if the documentation has +/// already been generated. +pub fn rustbook(build: &Build, stage: u32, target: &str, name: &str) { + let out = build.doc_out(target); + t!(fs::create_dir_all(&out)); + + let out = out.join(name); + let compiler = Compiler::new(stage, &build.config.build); + let src = build.src.join("src/doc").join(name); + let index = out.join("index.html"); + let rustbook = build.tool(&compiler, "rustbook"); + if up_to_date(&src, &index) && up_to_date(&rustbook, &index) { + return + } + println!("Rustbook stage{} ({}) - {}", stage, target, name); + let _ = fs::remove_dir_all(&out); + build.run(build.tool_cmd(&compiler, "rustbook") + .arg("build") + .arg(&src) + .arg(out)); +} + +/// Generates all standalone documentation as compiled by the rustdoc in `stage` +/// for the `target` into `out`. +/// +/// This will list all of `src/doc` looking for markdown files and appropriately +/// perform transformations like substituting `VERSION`, `SHORT_HASH`, and +/// `STAMP` alongw ith providing the various header/footer HTML we've cutomized. +/// +/// In the end, this is just a glorified wrapper around rustdoc! +pub fn standalone(build: &Build, stage: u32, target: &str) { + println!("Documenting stage{} standalone ({})", stage, target); + let out = build.doc_out(target); + t!(fs::create_dir_all(&out)); + + let compiler = Compiler::new(stage, &build.config.build); + + let favicon = build.src.join("src/doc/favicon.inc"); + let footer = build.src.join("src/doc/footer.inc"); + let full_toc = build.src.join("src/doc/full-toc.inc"); + t!(fs::copy(build.src.join("src/doc/rust.css"), out.join("rust.css"))); + + let version_input = build.src.join("src/doc/version_info.html.template"); + let version_info = out.join("version_info.html"); + + if !up_to_date(&version_input, &version_info) { + let mut info = String::new(); + t!(t!(File::open(&version_input)).read_to_string(&mut info)); + let blank = String::new(); + let short = build.short_ver_hash.as_ref().unwrap_or(&blank); + let hash = build.ver_hash.as_ref().unwrap_or(&blank); + let info = info.replace("VERSION", &build.release) + .replace("SHORT_HASH", short) + .replace("STAMP", hash); + t!(t!(File::create(&version_info)).write_all(info.as_bytes())); + } + + for file in t!(fs::read_dir(build.src.join("src/doc"))) { + let file = t!(file); + let path = file.path(); + let filename = path.file_name().unwrap().to_str().unwrap(); + if !filename.ends_with(".md") || filename == "README.md" { + continue + } + + let html = out.join(filename).with_extension("html"); + let rustdoc = build.rustdoc(&compiler); + if up_to_date(&path, &html) && + up_to_date(&footer, &html) && + up_to_date(&favicon, &html) && + up_to_date(&full_toc, &html) && + up_to_date(&version_info, &html) && + up_to_date(&rustdoc, &html) { + continue + } + + let mut cmd = Command::new(&rustdoc); + build.add_rustc_lib_path(&compiler, &mut cmd); + cmd.arg("--html-after-content").arg(&footer) + .arg("--html-before-content").arg(&version_info) + .arg("--html-in-header").arg(&favicon) + .arg("--markdown-playground-url") + .arg("https://play.rust-lang.org/") + .arg("-o").arg(&out) + .arg(&path); + + if filename == "reference.md" { + cmd.arg("--html-in-header").arg(&full_toc); + } + + if filename == "not_found.md" { + cmd.arg("--markdown-no-toc") + .arg("--markdown-css") + .arg("https://doc.rust-lang.org/rust.css"); + } else { + cmd.arg("--markdown-css").arg("rust.css"); + } + build.run(&mut cmd); + } +} + +/// Compile all standard library documentation. +/// +/// This will generate all documentation for the standard library and its +/// dependencies. This is largely just a wrapper around `cargo doc`. +pub fn std(build: &Build, stage: u32, target: &str) { + println!("Documenting stage{} std ({})", stage, target); + let out = build.doc_out(target); + t!(fs::create_dir_all(&out)); + let compiler = Compiler::new(stage, &build.config.build); + let out_dir = build.stage_out(&compiler, Mode::Libstd) + .join(target).join("doc"); + let rustdoc = build.rustdoc(&compiler); + + build.clear_if_dirty(&out_dir, &rustdoc); + + let mut cargo = build.cargo(&compiler, Mode::Libstd, target, "doc"); + cargo.arg("--manifest-path") + .arg(build.src.join("src/rustc/std_shim/Cargo.toml")) + .arg("--features").arg(build.std_features()); + build.run(&mut cargo); + cp_r(&out_dir, &out) +} + +/// Compile all libtest documentation. +/// +/// This will generate all documentation for libtest and its dependencies. This +/// is largely just a wrapper around `cargo doc`. +pub fn test(build: &Build, stage: u32, target: &str) { + println!("Documenting stage{} test ({})", stage, target); + let out = build.doc_out(target); + t!(fs::create_dir_all(&out)); + let compiler = Compiler::new(stage, &build.config.build); + let out_dir = build.stage_out(&compiler, Mode::Libtest) + .join(target).join("doc"); + let rustdoc = build.rustdoc(&compiler); + + build.clear_if_dirty(&out_dir, &rustdoc); + + let mut cargo = build.cargo(&compiler, Mode::Libtest, target, "doc"); + cargo.arg("--manifest-path") + .arg(build.src.join("src/rustc/test_shim/Cargo.toml")); + build.run(&mut cargo); + cp_r(&out_dir, &out) +} + +/// Generate all compiler documentation. +/// +/// This will generate all documentation for the compiler libraries and their +/// dependencies. This is largely just a wrapper around `cargo doc`. +pub fn rustc(build: &Build, stage: u32, target: &str) { + println!("Documenting stage{} compiler ({})", stage, target); + let out = build.doc_out(target); + t!(fs::create_dir_all(&out)); + let compiler = Compiler::new(stage, &build.config.build); + let out_dir = build.stage_out(&compiler, Mode::Librustc) + .join(target).join("doc"); + let rustdoc = build.rustdoc(&compiler); + if !up_to_date(&rustdoc, &out_dir.join("rustc/index.html")) && out_dir.exists() { + t!(fs::remove_dir_all(&out_dir)); + } + let mut cargo = build.cargo(&compiler, Mode::Librustc, target, "doc"); + cargo.arg("--manifest-path") + .arg(build.src.join("src/rustc/Cargo.toml")) + .arg("--features").arg(build.rustc_features()); + build.run(&mut cargo); + cp_r(&out_dir, &out) +} + +/// Generates the HTML rendered error-index by running the +/// `error_index_generator` tool. +pub fn error_index(build: &Build, stage: u32, target: &str) { + println!("Documenting stage{} error index ({})", stage, target); + let out = build.doc_out(target); + t!(fs::create_dir_all(&out)); + let compiler = Compiler::new(stage, &build.config.build); + let mut index = build.tool_cmd(&compiler, "error_index_generator"); + index.arg("html"); + index.arg(out.join("error-index.html")); + + // FIXME: shouldn't have to pass this env var + index.env("CFG_BUILD", &build.config.build); + + build.run(&mut index); +} diff --git a/src/bootstrap/flags.rs b/src/bootstrap/flags.rs new file mode 100644 index 0000000000000..a7d80e4cdc466 --- /dev/null +++ b/src/bootstrap/flags.rs @@ -0,0 +1,283 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Command-line interface of the rustbuild build system. +//! +//! This module implements the command-line parsing of the build system which +//! has various flags to configure how it's run. + +use std::env; +use std::fs; +use std::path::PathBuf; +use std::process; + +use getopts::{Matches, Options}; + +use Build; +use config::Config; +use metadata; +use step; + +/// Deserialized version of all flags for this compile. +pub struct Flags { + pub verbose: bool, + pub stage: Option, + pub build: String, + pub host: Vec, + pub target: Vec, + pub config: Option, + pub src: Option, + pub jobs: Option, + pub cmd: Subcommand, +} + +pub enum Subcommand { + Build { + paths: Vec, + }, + Doc { + paths: Vec, + }, + Test { + paths: Vec, + test_args: Vec, + }, + Bench { + paths: Vec, + test_args: Vec, + }, + Clean, + Dist { + install: bool, + }, +} + +impl Flags { + pub fn parse(args: &[String]) -> Flags { + let mut opts = Options::new(); + opts.optflag("v", "verbose", "use verbose output"); + opts.optopt("", "config", "TOML configuration file for build", "FILE"); + opts.optopt("", "build", "build target of the stage0 compiler", "BUILD"); + opts.optmulti("", "host", "host targets to build", "HOST"); + opts.optmulti("", "target", "target targets to build", "TARGET"); + opts.optopt("", "stage", "stage to build", "N"); + opts.optopt("", "src", "path to the root of the rust checkout", "DIR"); + opts.optopt("j", "jobs", "number of jobs to run in parallel", "JOBS"); + opts.optflag("h", "help", "print this help message"); + + let usage = |n, opts: &Options| -> ! { + let command = args.get(0).map(|s| &**s); + let brief = format!("Usage: x.py {} [options] [...]", + command.unwrap_or("")); + + println!("{}", opts.usage(&brief)); + match command { + Some("build") => { + println!("\ +Arguments: + This subcommand accepts a number of positional arguments of directories to + the crates and/or artifacts to compile. For example: + + ./x.py build src/libcore + ./x.py build src/libproc_macro + ./x.py build src/libstd --stage 1 + + If no arguments are passed then the complete artifacts for that stage are + also compiled. + + ./x.py build + ./x.py build --stage 1 + + For a quick build with a usable compile, you can pass: + + ./x.py build --stage 1 src/libtest +"); + } + + Some("test") => { + println!("\ +Arguments: + This subcommand accepts a number of positional arguments of directories to + tests that should be compiled and run. For example: + + ./x.py test src/test/run-pass + ./x.py test src/test/run-pass/assert-* + ./x.py test src/libstd --test-args hash_map + ./x.py test src/libstd --stage 0 + + If no arguments are passed then the complete artifacts for that stage are + compiled and tested. + + ./x.py test + ./x.py test --stage 1 +"); + } + + Some("doc") => { + println!("\ +Arguments: + This subcommand accepts a number of positional arguments of directories of + documentation to build. For example: + + ./x.py doc src/doc/book + ./x.py doc src/doc/nomicon + ./x.py doc src/libstd + + If no arguments are passed then everything is documented: + + ./x.py doc + ./x.py doc --stage 1 +"); + } + + _ => {} + } + + if let Some(command) = command { + if command == "build" || + command == "dist" || + command == "doc" || + command == "test" || + command == "bench" || + command == "clean" { + println!("Available invocations:"); + if args.iter().any(|a| a == "-v") { + let flags = Flags::parse(&["build".to_string()]); + let mut config = Config::default(); + config.build = flags.build.clone(); + let mut build = Build::new(flags, config); + metadata::build(&mut build); + step::build_rules(&build).print_help(command); + } else { + println!(" ... elided, run `./x.py {} -h -v` to see", + command); + } + + println!(""); + } + } + +println!("\ +Subcommands: + build Compile either the compiler or libraries + test Build and run some test suites + bench Build and run some benchmarks + doc Build documentation + clean Clean out build directories + dist Build and/or install distribution artifacts + +To learn more about a subcommand, run `./x.py -h` +"); + + process::exit(n); + }; + if args.len() == 0 { + println!("a command must be passed"); + usage(1, &opts); + } + let parse = |opts: &Options| { + let m = opts.parse(&args[1..]).unwrap_or_else(|e| { + println!("failed to parse options: {}", e); + usage(1, opts); + }); + if m.opt_present("h") { + usage(0, opts); + } + return m + }; + + let cwd = t!(env::current_dir()); + let remaining_as_path = |m: &Matches| { + m.free.iter().map(|p| cwd.join(p)).collect::>() + }; + + let m: Matches; + let cmd = match &args[0][..] { + "build" => { + m = parse(&opts); + Subcommand::Build { paths: remaining_as_path(&m) } + } + "doc" => { + m = parse(&opts); + Subcommand::Doc { paths: remaining_as_path(&m) } + } + "test" => { + opts.optmulti("", "test-args", "extra arguments", "ARGS"); + m = parse(&opts); + Subcommand::Test { + paths: remaining_as_path(&m), + test_args: m.opt_strs("test-args"), + } + } + "bench" => { + opts.optmulti("", "test-args", "extra arguments", "ARGS"); + m = parse(&opts); + Subcommand::Bench { + paths: remaining_as_path(&m), + test_args: m.opt_strs("test-args"), + } + } + "clean" => { + m = parse(&opts); + if m.free.len() > 0 { + println!("clean takes no arguments"); + usage(1, &opts); + } + Subcommand::Clean + } + "dist" => { + opts.optflag("", "install", "run installer as well"); + m = parse(&opts); + Subcommand::Dist { + install: m.opt_present("install"), + } + } + cmd => { + println!("unknown command: {}", cmd); + usage(1, &opts); + } + }; + + + let cfg_file = m.opt_str("config").map(PathBuf::from).or_else(|| { + if fs::metadata("config.toml").is_ok() { + Some(PathBuf::from("config.toml")) + } else { + None + } + }); + + Flags { + verbose: m.opt_present("v"), + stage: m.opt_str("stage").map(|j| j.parse().unwrap()), + build: m.opt_str("build").unwrap_or_else(|| { + env::var("BUILD").unwrap() + }), + host: m.opt_strs("host"), + target: m.opt_strs("target"), + config: cfg_file, + src: m.opt_str("src").map(PathBuf::from), + jobs: m.opt_str("jobs").map(|j| j.parse().unwrap()), + cmd: cmd, + } + } +} + +impl Subcommand { + pub fn test_args(&self) -> Vec<&str> { + match *self { + Subcommand::Test { ref test_args, .. } | + Subcommand::Bench { ref test_args, .. } => { + test_args.iter().flat_map(|s| s.split_whitespace()).collect() + } + _ => Vec::new(), + } + } +} diff --git a/src/bootstrap/install.rs b/src/bootstrap/install.rs new file mode 100644 index 0000000000000..9bc5a7c00abaf --- /dev/null +++ b/src/bootstrap/install.rs @@ -0,0 +1,61 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation of the install aspects of the compiler. +//! +//! This module is responsible for installing the standard library, +//! compiler, and documentation. + +use std::fs; +use std::borrow::Cow; +use std::path::Path; +use std::process::Command; + +use Build; +use dist::{package_vers, sanitize_sh, tmpdir}; + +/// Installs everything. +pub fn install(build: &Build, stage: u32, host: &str) { + let prefix = build.config.prefix.as_ref().clone().map(|x| Path::new(x)) + .unwrap_or(Path::new("/usr/local")); + let docdir = build.config.docdir.as_ref().clone().map(|x| Cow::Borrowed(Path::new(x))) + .unwrap_or(Cow::Owned(prefix.join("share/doc/rust"))); + let libdir = build.config.libdir.as_ref().clone().map(|x| Cow::Borrowed(Path::new(x))) + .unwrap_or(Cow::Owned(prefix.join("lib"))); + let mandir = build.config.mandir.as_ref().clone().map(|x| Cow::Borrowed(Path::new(x))) + .unwrap_or(Cow::Owned(prefix.join("share/man"))); + let empty_dir = build.out.join("tmp/empty_dir"); + t!(fs::create_dir_all(&empty_dir)); + if build.config.docs { + install_sh(&build, "docs", "rust-docs", stage, host, prefix, + &docdir, &libdir, &mandir, &empty_dir); + } + install_sh(&build, "std", "rust-std", stage, host, prefix, + &docdir, &libdir, &mandir, &empty_dir); + install_sh(&build, "rustc", "rustc", stage, host, prefix, + &docdir, &libdir, &mandir, &empty_dir); + t!(fs::remove_dir_all(&empty_dir)); +} + +fn install_sh(build: &Build, package: &str, name: &str, stage: u32, host: &str, + prefix: &Path, docdir: &Path, libdir: &Path, mandir: &Path, empty_dir: &Path) { + println!("Install {} stage{} ({})", package, stage, host); + let package_name = format!("{}-{}-{}", name, package_vers(build), host); + + let mut cmd = Command::new("sh"); + cmd.current_dir(empty_dir) + .arg(sanitize_sh(&tmpdir(build).join(&package_name).join("install.sh"))) + .arg(format!("--prefix={}", sanitize_sh(prefix))) + .arg(format!("--docdir={}", sanitize_sh(docdir))) + .arg(format!("--libdir={}", sanitize_sh(libdir))) + .arg(format!("--mandir={}", sanitize_sh(mandir))) + .arg("--disable-ldconfig"); + build.run(&mut cmd); +} diff --git a/src/bootstrap/job.rs b/src/bootstrap/job.rs new file mode 100644 index 0000000000000..b4d7aff97da6a --- /dev/null +++ b/src/bootstrap/job.rs @@ -0,0 +1,178 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Job management on Windows for bootstrapping +//! +//! Most of the time when you're running a build system (e.g. make) you expect +//! Ctrl-C or abnormal termination to actually terminate the entire tree of +//! process in play, not just the one at the top. This currently works "by +//! default" on Unix platforms because Ctrl-C actually sends a signal to the +//! *process group* rather than the parent process, so everything will get torn +//! down. On Windows, however, this does not happen and Ctrl-C just kills the +//! parent process. +//! +//! To achieve the same semantics on Windows we use Job Objects to ensure that +//! all processes die at the same time. Job objects have a mode of operation +//! where when all handles to the object are closed it causes all child +//! processes associated with the object to be terminated immediately. +//! Conveniently whenever a process in the job object spawns a new process the +//! child will be associated with the job object as well. This means if we add +//! ourselves to the job object we create then everything will get torn down! +//! +//! Unfortunately most of the time the build system is actually called from a +//! python wrapper (which manages things like building the build system) so this +//! all doesn't quite cut it so far. To go the last mile we duplicate the job +//! object handle into our parent process (a python process probably) and then +//! close our own handle. This means that the only handle to the job object +//! resides in the parent python process, so when python dies the whole build +//! system dies (as one would probably expect!). +//! +//! Note that this module has a #[cfg(windows)] above it as none of this logic +//! is required on Unix. + +#![allow(bad_style, dead_code)] + +use std::env; +use std::io; +use std::mem; + +type HANDLE = *mut u8; +type BOOL = i32; +type DWORD = u32; +type LPHANDLE = *mut HANDLE; +type LPVOID = *mut u8; +type JOBOBJECTINFOCLASS = i32; +type SIZE_T = usize; +type LARGE_INTEGER = i64; +type ULONG_PTR = usize; +type ULONGLONG = u64; + +const FALSE: BOOL = 0; +const DUPLICATE_SAME_ACCESS: DWORD = 0x2; +const PROCESS_DUP_HANDLE: DWORD = 0x40; +const JobObjectExtendedLimitInformation: JOBOBJECTINFOCLASS = 9; +const JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE: DWORD = 0x2000; + +extern "system" { + fn CreateJobObjectW(lpJobAttributes: *mut u8, lpName: *const u8) -> HANDLE; + fn CloseHandle(hObject: HANDLE) -> BOOL; + fn GetCurrentProcess() -> HANDLE; + fn OpenProcess(dwDesiredAccess: DWORD, + bInheritHandle: BOOL, + dwProcessId: DWORD) -> HANDLE; + fn DuplicateHandle(hSourceProcessHandle: HANDLE, + hSourceHandle: HANDLE, + hTargetProcessHandle: HANDLE, + lpTargetHandle: LPHANDLE, + dwDesiredAccess: DWORD, + bInheritHandle: BOOL, + dwOptions: DWORD) -> BOOL; + fn AssignProcessToJobObject(hJob: HANDLE, hProcess: HANDLE) -> BOOL; + fn SetInformationJobObject(hJob: HANDLE, + JobObjectInformationClass: JOBOBJECTINFOCLASS, + lpJobObjectInformation: LPVOID, + cbJobObjectInformationLength: DWORD) -> BOOL; +} + +#[repr(C)] +struct JOBOBJECT_EXTENDED_LIMIT_INFORMATION { + BasicLimitInformation: JOBOBJECT_BASIC_LIMIT_INFORMATION, + IoInfo: IO_COUNTERS, + ProcessMemoryLimit: SIZE_T, + JobMemoryLimit: SIZE_T, + PeakProcessMemoryUsed: SIZE_T, + PeakJobMemoryUsed: SIZE_T, +} + +#[repr(C)] +struct IO_COUNTERS { + ReadOperationCount: ULONGLONG, + WriteOperationCount: ULONGLONG, + OtherOperationCount: ULONGLONG, + ReadTransferCount: ULONGLONG, + WriteTransferCount: ULONGLONG, + OtherTransferCount: ULONGLONG, +} + +#[repr(C)] +struct JOBOBJECT_BASIC_LIMIT_INFORMATION { + PerProcessUserTimeLimit: LARGE_INTEGER, + PerJobUserTimeLimit: LARGE_INTEGER, + LimitFlags: DWORD, + MinimumWorkingsetSize: SIZE_T, + MaximumWorkingsetSize: SIZE_T, + ActiveProcessLimit: DWORD, + Affinity: ULONG_PTR, + PriorityClass: DWORD, + SchedulingClass: DWORD, +} + +pub unsafe fn setup() { + // Create a new job object for us to use + let job = CreateJobObjectW(0 as *mut _, 0 as *const _); + assert!(job != 0 as *mut _, "{}", io::Error::last_os_error()); + + // Indicate that when all handles to the job object are gone that all + // process in the object should be killed. Note that this includes our + // entire process tree by default because we've added ourselves and our + // children will reside in the job by default. + let mut info = mem::zeroed::(); + info.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE; + let r = SetInformationJobObject(job, + JobObjectExtendedLimitInformation, + &mut info as *mut _ as LPVOID, + mem::size_of_val(&info) as DWORD); + assert!(r != 0, "{}", io::Error::last_os_error()); + + // Assign our process to this job object. Note that if this fails, one very + // likely reason is that we are ourselves already in a job object! This can + // happen on the build bots that we've got for Windows, or if just anyone + // else is instrumenting the build. In this case we just bail out + // immediately and assume that they take care of it. + // + // Also note that nested jobs (why this might fail) are supported in recent + // versions of Windows, but the version of Windows that our bots are running + // at least don't support nested job objects. + let r = AssignProcessToJobObject(job, GetCurrentProcess()); + if r == 0 { + CloseHandle(job); + return + } + + // If we've got a parent process (e.g. the python script that called us) + // then move ownership of this job object up to them. That way if the python + // script is killed (e.g. via ctrl-c) then we'll all be torn down. + // + // If we don't have a parent (e.g. this was run directly) then we + // intentionally leak the job object handle. When our process exits + // (normally or abnormally) it will close the handle implicitly, causing all + // processes in the job to be cleaned up. + let pid = match env::var("BOOTSTRAP_PARENT_ID") { + Ok(s) => s, + Err(..) => return, + }; + + let parent = OpenProcess(PROCESS_DUP_HANDLE, FALSE, pid.parse().unwrap()); + assert!(parent != 0 as *mut _, "{}", io::Error::last_os_error()); + let mut parent_handle = 0 as *mut _; + let r = DuplicateHandle(GetCurrentProcess(), job, + parent, &mut parent_handle, + 0, FALSE, DUPLICATE_SAME_ACCESS); + + // If this failed, well at least we tried! An example of DuplicateHandle + // failing in the past has been when the wrong python2 package spawed this + // build system (e.g. the `python2` package in MSYS instead of + // `mingw-w64-x86_64-python2`. Not sure why it failed, but the "failure + // mode" here is that we only clean everything up when the build system + // dies, not when the python parent does, so not too bad. + if r != 0 { + CloseHandle(job); + } +} diff --git a/src/bootstrap/lib.rs b/src/bootstrap/lib.rs new file mode 100644 index 0000000000000..590c967d147f2 --- /dev/null +++ b/src/bootstrap/lib.rs @@ -0,0 +1,784 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation of rustbuild, the Rust build system. +//! +//! This module, and its descendants, are the implementation of the Rust build +//! system. Most of this build system is backed by Cargo but the outer layer +//! here serves as the ability to orchestrate calling Cargo, sequencing Cargo +//! builds, building artifacts like LLVM, etc. +//! +//! More documentation can be found in each respective module below. + +extern crate build_helper; +extern crate cmake; +extern crate filetime; +extern crate gcc; +extern crate getopts; +extern crate num_cpus; +extern crate rustc_serialize; +extern crate toml; + +use std::collections::HashMap; +use std::env; +use std::fs::{self, File}; +use std::path::{Component, PathBuf, Path}; +use std::process::Command; + +use build_helper::{run_silent, output}; + +use util::{exe, mtime, libdir, add_lib_path}; + +/// A helper macro to `unwrap` a result except also print out details like: +/// +/// * The file/line of the panic +/// * The expression that failed +/// * The error itself +/// +/// This is currently used judiciously throughout the build system rather than +/// using a `Result` with `try!`, but this may change one day... +macro_rules! t { + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {}", stringify!($e), e), + }) +} + +mod cc; +mod channel; +mod check; +mod clean; +mod compile; +mod metadata; +mod config; +mod dist; +mod doc; +mod flags; +mod install; +mod native; +mod sanity; +mod step; +pub mod util; + +#[cfg(windows)] +mod job; + +#[cfg(not(windows))] +mod job { + pub unsafe fn setup() {} +} + +pub use config::Config; +pub use flags::{Flags, Subcommand}; + +/// A structure representing a Rust compiler. +/// +/// Each compiler has a `stage` that it is associated with and a `host` that +/// corresponds to the platform the compiler runs on. This structure is used as +/// a parameter to many methods below. +#[derive(Eq, PartialEq, Clone, Copy, Hash, Debug)] +pub struct Compiler<'a> { + stage: u32, + host: &'a str, +} + +/// Global configuration for the build system. +/// +/// This structure transitively contains all configuration for the build system. +/// All filesystem-encoded configuration is in `config`, all flags are in +/// `flags`, and then parsed or probed information is listed in the keys below. +/// +/// This structure is a parameter of almost all methods in the build system, +/// although most functions are implemented as free functions rather than +/// methods specifically on this structure itself (to make it easier to +/// organize). +pub struct Build { + // User-specified configuration via config.toml + config: Config, + + // User-specified configuration via CLI flags + flags: Flags, + + // Derived properties from the above two configurations + cargo: PathBuf, + rustc: PathBuf, + src: PathBuf, + out: PathBuf, + release: String, + unstable_features: bool, + ver_hash: Option, + short_ver_hash: Option, + ver_date: Option, + version: String, + package_vers: String, + local_rebuild: bool, + + // Probed tools at runtime + lldb_version: Option, + lldb_python_dir: Option, + + // Runtime state filled in later on + cc: HashMap)>, + cxx: HashMap, + crates: HashMap, +} + +#[derive(Debug)] +struct Crate { + name: String, + deps: Vec, + path: PathBuf, + doc_step: String, + build_step: String, + test_step: String, + bench_step: String, +} + +/// The various "modes" of invoking Cargo. +/// +/// These entries currently correspond to the various output directories of the +/// build system, with each mod generating output in a different directory. +#[derive(Clone, Copy)] +pub enum Mode { + /// This cargo is going to build the standard library, placing output in the + /// "stageN-std" directory. + Libstd, + + /// This cargo is going to build libtest, placing output in the + /// "stageN-test" directory. + Libtest, + + /// This cargo is going to build librustc and compiler libraries, placing + /// output in the "stageN-rustc" directory. + Librustc, + + /// This cargo is going to some build tool, placing output in the + /// "stageN-tools" directory. + Tool, +} + +impl Build { + /// Creates a new set of build configuration from the `flags` on the command + /// line and the filesystem `config`. + /// + /// By default all build output will be placed in the current directory. + pub fn new(flags: Flags, config: Config) -> Build { + let cwd = t!(env::current_dir()); + let src = flags.src.clone().or_else(|| { + env::var_os("SRC").map(|x| x.into()) + }).unwrap_or(cwd.clone()); + let out = cwd.join("build"); + + let stage0_root = out.join(&config.build).join("stage0/bin"); + let rustc = match config.rustc { + Some(ref s) => PathBuf::from(s), + None => stage0_root.join(exe("rustc", &config.build)), + }; + let cargo = match config.cargo { + Some(ref s) => PathBuf::from(s), + None => stage0_root.join(exe("cargo", &config.build)), + }; + let local_rebuild = config.local_rebuild; + + Build { + flags: flags, + config: config, + cargo: cargo, + rustc: rustc, + src: src, + out: out, + + release: String::new(), + unstable_features: false, + ver_hash: None, + short_ver_hash: None, + ver_date: None, + version: String::new(), + local_rebuild: local_rebuild, + package_vers: String::new(), + cc: HashMap::new(), + cxx: HashMap::new(), + crates: HashMap::new(), + lldb_version: None, + lldb_python_dir: None, + } + } + + /// Executes the entire build, as configured by the flags and configuration. + pub fn build(&mut self) { + unsafe { + job::setup(); + } + + if let Subcommand::Clean = self.flags.cmd { + return clean::clean(self); + } + + self.verbose("finding compilers"); + cc::find(self); + self.verbose("running sanity check"); + sanity::check(self); + self.verbose("collecting channel variables"); + channel::collect(self); + // If local-rust is the same major.minor as the current version, then force a local-rebuild + let local_version_verbose = output( + Command::new(&self.rustc).arg("--version").arg("--verbose")); + let local_release = local_version_verbose + .lines().filter(|x| x.starts_with("release:")) + .next().unwrap().trim_left_matches("release:").trim(); + if local_release.split('.').take(2).eq(self.release.split('.').take(2)) { + self.verbose(&format!("auto-detected local-rebuild {}", local_release)); + self.local_rebuild = true; + } + self.verbose("updating submodules"); + self.update_submodules(); + self.verbose("learning about cargo"); + metadata::build(self); + + step::run(self); + } + + /// Updates all git submodules that we have. + /// + /// This will detect if any submodules are out of date an run the necessary + /// commands to sync them all with upstream. + fn update_submodules(&self) { + struct Submodule<'a> { + path: &'a Path, + state: State, + } + + enum State { + // The submodule may have staged/unstaged changes + MaybeDirty, + // Or could be initialized but never updated + NotInitialized, + // The submodule, itself, has extra commits but those changes haven't been commited to + // the (outer) git repository + OutOfSync, + } + + if !self.config.submodules { + return + } + if fs::metadata(self.src.join(".git")).is_err() { + return + } + let git = || { + let mut cmd = Command::new("git"); + cmd.current_dir(&self.src); + return cmd + }; + let git_submodule = || { + let mut cmd = Command::new("git"); + cmd.current_dir(&self.src).arg("submodule"); + return cmd + }; + + // FIXME: this takes a seriously long time to execute on Windows and a + // nontrivial amount of time on Unix, we should have a better way + // of detecting whether we need to run all the submodule commands + // below. + let out = output(git_submodule().arg("status")); + let mut submodules = vec![]; + for line in out.lines() { + // NOTE `git submodule status` output looks like this: + // + // -5066b7dcab7e700844b0e2ba71b8af9dc627a59b src/liblibc + // +b37ef24aa82d2be3a3cc0fe89bf82292f4ca181c src/compiler-rt (remotes/origin/..) + // e058ca661692a8d01f8cf9d35939dfe3105ce968 src/jemalloc (3.6.0-533-ge058ca6) + // + // The first character can be '-', '+' or ' ' and denotes the `State` of the submodule + // Right next to this character is the SHA-1 of the submodule HEAD + // And after that comes the path to the submodule + let path = Path::new(line[1..].split(' ').skip(1).next().unwrap()); + let state = if line.starts_with('-') { + State::NotInitialized + } else if line.starts_with('+') { + State::OutOfSync + } else if line.starts_with(' ') { + State::MaybeDirty + } else { + panic!("unexpected git submodule state: {:?}", line.chars().next()); + }; + + submodules.push(Submodule { path: path, state: state }) + } + + self.run(git_submodule().arg("sync")); + + for submodule in submodules { + // If using llvm-root then don't touch the llvm submodule. + if submodule.path.components().any(|c| c == Component::Normal("llvm".as_ref())) && + self.config.target_config.get(&self.config.build) + .and_then(|c| c.llvm_config.as_ref()).is_some() + { + continue + } + + if submodule.path.components().any(|c| c == Component::Normal("jemalloc".as_ref())) && + !self.config.use_jemalloc + { + continue + } + + // `submodule.path` is the relative path to a submodule (from the repository root) + // `submodule_path` is the path to a submodule from the cwd + + // use `submodule.path` when e.g. executing a submodule specific command from the + // repository root + // use `submodule_path` when e.g. executing a normal git command for the submodule + // (set via `current_dir`) + let submodule_path = self.src.join(submodule.path); + + match submodule.state { + State::MaybeDirty => { + // drop staged changes + self.run(git().current_dir(&submodule_path) + .args(&["reset", "--hard"])); + // drops unstaged changes + self.run(git().current_dir(&submodule_path) + .args(&["clean", "-fdx"])); + }, + State::NotInitialized => { + self.run(git_submodule().arg("init").arg(submodule.path)); + self.run(git_submodule().arg("update").arg(submodule.path)); + }, + State::OutOfSync => { + // drops submodule commits that weren't reported to the (outer) git repository + self.run(git_submodule().arg("update").arg(submodule.path)); + self.run(git().current_dir(&submodule_path) + .args(&["reset", "--hard"])); + self.run(git().current_dir(&submodule_path) + .args(&["clean", "-fdx"])); + }, + } + } + } + + /// Clear out `dir` if `input` is newer. + /// + /// After this executes, it will also ensure that `dir` exists. + fn clear_if_dirty(&self, dir: &Path, input: &Path) { + let stamp = dir.join(".stamp"); + if mtime(&stamp) < mtime(input) { + self.verbose(&format!("Dirty - {}", dir.display())); + let _ = fs::remove_dir_all(dir); + } else if stamp.exists() { + return + } + t!(fs::create_dir_all(dir)); + t!(File::create(stamp)); + } + + /// Prepares an invocation of `cargo` to be run. + /// + /// This will create a `Command` that represents a pending execution of + /// Cargo. This cargo will be configured to use `compiler` as the actual + /// rustc compiler, its output will be scoped by `mode`'s output directory, + /// it will pass the `--target` flag for the specified `target`, and will be + /// executing the Cargo command `cmd`. + fn cargo(&self, + compiler: &Compiler, + mode: Mode, + target: &str, + cmd: &str) -> Command { + let mut cargo = Command::new(&self.cargo); + let out_dir = self.stage_out(compiler, mode); + cargo.env("CARGO_TARGET_DIR", out_dir) + .arg(cmd) + .arg("-j").arg(self.jobs().to_string()) + .arg("--target").arg(target); + + // FIXME: Temporary fix for https://github.com/rust-lang/cargo/issues/3005 + // Force cargo to output binaries with disambiguating hashes in the name + cargo.env("__CARGO_DEFAULT_LIB_METADATA", "1"); + + let stage; + if compiler.stage == 0 && self.local_rebuild { + // Assume the local-rebuild rustc already has stage1 features. + stage = 1; + } else { + stage = compiler.stage; + } + + // Customize the compiler we're running. Specify the compiler to cargo + // as our shim and then pass it some various options used to configure + // how the actual compiler itself is called. + // + // These variables are primarily all read by + // src/bootstrap/{rustc,rustdoc.rs} + cargo.env("RUSTC", self.out.join("bootstrap/debug/rustc")) + .env("RUSTC_REAL", self.compiler_path(compiler)) + .env("RUSTC_STAGE", stage.to_string()) + .env("RUSTC_DEBUGINFO", self.config.rust_debuginfo.to_string()) + .env("RUSTC_DEBUGINFO_LINES", self.config.rust_debuginfo_lines.to_string()) + .env("RUSTC_CODEGEN_UNITS", + self.config.rust_codegen_units.to_string()) + .env("RUSTC_DEBUG_ASSERTIONS", + self.config.rust_debug_assertions.to_string()) + .env("RUSTC_SNAPSHOT", &self.rustc) + .env("RUSTC_SYSROOT", self.sysroot(compiler)) + .env("RUSTC_LIBDIR", self.rustc_libdir(compiler)) + .env("RUSTC_SNAPSHOT_LIBDIR", self.rustc_snapshot_libdir()) + .env("RUSTC_RPATH", self.config.rust_rpath.to_string()) + .env("RUSTDOC", self.out.join("bootstrap/debug/rustdoc")) + .env("RUSTDOC_REAL", self.rustdoc(compiler)) + .env("RUSTC_FLAGS", self.rustc_flags(target).join(" ")); + + // Enable usage of unstable features + cargo.env("RUSTC_BOOTSTRAP", "1"); + + // Specify some various options for build scripts used throughout + // the build. + // + // FIXME: the guard against msvc shouldn't need to be here + if !target.contains("msvc") { + cargo.env(format!("CC_{}", target), self.cc(target)) + .env(format!("AR_{}", target), self.ar(target).unwrap()) // only msvc is None + .env(format!("CFLAGS_{}", target), self.cflags(target).join(" ")); + } + + // Environment variables *required* needed throughout the build + // + // FIXME: should update code to not require this env var + cargo.env("CFG_COMPILER_HOST_TRIPLE", target); + + if self.config.verbose || self.flags.verbose { + cargo.arg("-v"); + } + // FIXME: cargo bench does not accept `--release` + if self.config.rust_optimize && cmd != "bench" { + cargo.arg("--release"); + } + if self.config.vendor { + cargo.arg("--frozen"); + } + return cargo + } + + /// Get a path to the compiler specified. + fn compiler_path(&self, compiler: &Compiler) -> PathBuf { + if compiler.is_snapshot(self) { + self.rustc.clone() + } else { + self.sysroot(compiler).join("bin").join(exe("rustc", compiler.host)) + } + } + + /// Get the specified tool built by the specified compiler + fn tool(&self, compiler: &Compiler, tool: &str) -> PathBuf { + self.cargo_out(compiler, Mode::Tool, compiler.host) + .join(exe(tool, compiler.host)) + } + + /// Get the `rustdoc` executable next to the specified compiler + fn rustdoc(&self, compiler: &Compiler) -> PathBuf { + let mut rustdoc = self.compiler_path(compiler); + rustdoc.pop(); + rustdoc.push(exe("rustdoc", compiler.host)); + return rustdoc + } + + /// Get a `Command` which is ready to run `tool` in `stage` built for + /// `host`. + fn tool_cmd(&self, compiler: &Compiler, tool: &str) -> Command { + let mut cmd = Command::new(self.tool(&compiler, tool)); + let host = compiler.host; + let paths = vec![ + self.cargo_out(compiler, Mode::Libstd, host).join("deps"), + self.cargo_out(compiler, Mode::Libtest, host).join("deps"), + self.cargo_out(compiler, Mode::Librustc, host).join("deps"), + self.cargo_out(compiler, Mode::Tool, host).join("deps"), + ]; + add_lib_path(paths, &mut cmd); + return cmd + } + + /// Get the space-separated set of activated features for the standard + /// library. + fn std_features(&self) -> String { + let mut features = "panic-unwind".to_string(); + if self.config.debug_jemalloc { + features.push_str(" debug-jemalloc"); + } + if self.config.use_jemalloc { + features.push_str(" jemalloc"); + } + if self.config.backtrace { + features.push_str(" backtrace"); + } + return features + } + + /// Get the space-separated set of activated features for the compiler. + fn rustc_features(&self) -> String { + let mut features = String::new(); + if self.config.use_jemalloc { + features.push_str(" jemalloc"); + } + return features + } + + /// Component directory that Cargo will produce output into (e.g. + /// release/debug) + fn cargo_dir(&self) -> &'static str { + if self.config.rust_optimize {"release"} else {"debug"} + } + + /// Returns the sysroot for the `compiler` specified that *this build system + /// generates*. + /// + /// That is, the sysroot for the stage0 compiler is not what the compiler + /// thinks it is by default, but it's the same as the default for stages + /// 1-3. + fn sysroot(&self, compiler: &Compiler) -> PathBuf { + if compiler.stage == 0 { + self.out.join(compiler.host).join("stage0-sysroot") + } else { + self.out.join(compiler.host).join(format!("stage{}", compiler.stage)) + } + } + + /// Returns the libdir where the standard library and other artifacts are + /// found for a compiler's sysroot. + fn sysroot_libdir(&self, compiler: &Compiler, target: &str) -> PathBuf { + self.sysroot(compiler).join("lib").join("rustlib") + .join(target).join("lib") + } + + /// Returns the root directory for all output generated in a particular + /// stage when running with a particular host compiler. + /// + /// The mode indicates what the root directory is for. + fn stage_out(&self, compiler: &Compiler, mode: Mode) -> PathBuf { + let suffix = match mode { + Mode::Libstd => "-std", + Mode::Libtest => "-test", + Mode::Tool => "-tools", + Mode::Librustc => "-rustc", + }; + self.out.join(compiler.host) + .join(format!("stage{}{}", compiler.stage, suffix)) + } + + /// Returns the root output directory for all Cargo output in a given stage, + /// running a particular comipler, wehther or not we're building the + /// standard library, and targeting the specified architecture. + fn cargo_out(&self, + compiler: &Compiler, + mode: Mode, + target: &str) -> PathBuf { + self.stage_out(compiler, mode).join(target).join(self.cargo_dir()) + } + + /// Root output directory for LLVM compiled for `target` + /// + /// Note that if LLVM is configured externally then the directory returned + /// will likely be empty. + fn llvm_out(&self, target: &str) -> PathBuf { + self.out.join(target).join("llvm") + } + + /// Output directory for all documentation for a target + fn doc_out(&self, target: &str) -> PathBuf { + self.out.join(target).join("doc") + } + + /// Returns true if no custom `llvm-config` is set for the specified target. + /// + /// If no custom `llvm-config` was specified then Rust's llvm will be used. + fn is_rust_llvm(&self, target: &str) -> bool { + match self.config.target_config.get(target) { + Some(ref c) => c.llvm_config.is_none(), + None => true + } + } + + /// Returns the path to `llvm-config` for the specified target. + /// + /// If a custom `llvm-config` was specified for target then that's returned + /// instead. + fn llvm_config(&self, target: &str) -> PathBuf { + let target_config = self.config.target_config.get(target); + if let Some(s) = target_config.and_then(|c| c.llvm_config.as_ref()) { + s.clone() + } else { + self.llvm_out(&self.config.build).join("bin") + .join(exe("llvm-config", target)) + } + } + + /// Returns the path to `FileCheck` binary for the specified target + fn llvm_filecheck(&self, target: &str) -> PathBuf { + let target_config = self.config.target_config.get(target); + if let Some(s) = target_config.and_then(|c| c.llvm_config.as_ref()) { + s.parent().unwrap().join(exe("FileCheck", target)) + } else { + let base = self.llvm_out(&self.config.build).join("build"); + let exe = exe("FileCheck", target); + if self.config.build.contains("msvc") { + base.join("Release/bin").join(exe) + } else { + base.join("bin").join(exe) + } + } + } + + /// Root output directory for rust_test_helpers library compiled for + /// `target` + fn test_helpers_out(&self, target: &str) -> PathBuf { + self.out.join(target).join("rust-test-helpers") + } + + /// Adds the compiler's directory of dynamic libraries to `cmd`'s dynamic + /// library lookup path. + fn add_rustc_lib_path(&self, compiler: &Compiler, cmd: &mut Command) { + // Windows doesn't need dylib path munging because the dlls for the + // compiler live next to the compiler and the system will find them + // automatically. + if cfg!(windows) { + return + } + + add_lib_path(vec![self.rustc_libdir(compiler)], cmd); + } + + /// Returns the compiler's libdir where it stores the dynamic libraries that + /// it itself links against. + /// + /// For example this returns `/lib` on Unix and `/bin` on + /// Windows. + fn rustc_libdir(&self, compiler: &Compiler) -> PathBuf { + if compiler.is_snapshot(self) { + self.rustc_snapshot_libdir() + } else { + self.sysroot(compiler).join(libdir(compiler.host)) + } + } + + /// Returns the libdir of the snapshot compiler. + fn rustc_snapshot_libdir(&self) -> PathBuf { + self.rustc.parent().unwrap().parent().unwrap() + .join(libdir(&self.config.build)) + } + + /// Runs a command, printing out nice contextual information if it fails. + fn run(&self, cmd: &mut Command) { + self.verbose(&format!("running: {:?}", cmd)); + run_silent(cmd) + } + + /// Prints a message if this build is configured in verbose mode. + fn verbose(&self, msg: &str) { + if self.flags.verbose || self.config.verbose { + println!("{}", msg); + } + } + + /// Returns the number of parallel jobs that have been configured for this + /// build. + fn jobs(&self) -> u32 { + self.flags.jobs.unwrap_or(num_cpus::get() as u32) + } + + /// Returns the path to the C compiler for the target specified. + fn cc(&self, target: &str) -> &Path { + self.cc[target].0.path() + } + + /// Returns a list of flags to pass to the C compiler for the target + /// specified. + fn cflags(&self, target: &str) -> Vec { + // Filter out -O and /O (the optimization flags) that we picked up from + // gcc-rs because the build scripts will determine that for themselves. + let mut base = self.cc[target].0.args().iter() + .map(|s| s.to_string_lossy().into_owned()) + .filter(|s| !s.starts_with("-O") && !s.starts_with("/O")) + .collect::>(); + + // If we're compiling on OSX then we add a few unconditional flags + // indicating that we want libc++ (more filled out than libstdc++) and + // we want to compile for 10.7. This way we can ensure that + // LLVM/jemalloc/etc are all properly compiled. + if target.contains("apple-darwin") { + base.push("-stdlib=libc++".into()); + } + // This is a hack, because newer binutils broke things on some vms/distros + // (i.e., linking against unknown relocs disabled by the following flag) + // See: https://github.com/rust-lang/rust/issues/34978 + match target { + "i586-unknown-linux-gnu" | + "i686-unknown-linux-musl" | + "x86_64-unknown-linux-musl" => { + base.push("-Wa,-mrelax-relocations=no".into()); + }, + _ => {}, + } + return base + } + + /// Returns the path to the `ar` archive utility for the target specified. + fn ar(&self, target: &str) -> Option<&Path> { + self.cc[target].1.as_ref().map(|p| &**p) + } + + /// Returns the path to the C++ compiler for the target specified, may panic + /// if no C++ compiler was configured for the target. + fn cxx(&self, target: &str) -> &Path { + match self.cxx.get(target) { + Some(p) => p.path(), + None => panic!("\n\ntarget `{}` is not configured as a host, + only as a target\n\n", target), + } + } + + /// Returns flags to pass to the compiler to generate code for `target`. + fn rustc_flags(&self, target: &str) -> Vec { + // New flags should be added here with great caution! + // + // It's quite unfortunate to **require** flags to generate code for a + // target, so it should only be passed here if absolutely necessary! + // Most default configuration should be done through target specs rather + // than an entry here. + + let mut base = Vec::new(); + if target != self.config.build && !target.contains("msvc") && + !target.contains("emscripten") { + base.push(format!("-Clinker={}", self.cc(target).display())); + } + return base + } + + /// Returns the "musl root" for this `target`, if defined + fn musl_root(&self, target: &str) -> Option<&Path> { + self.config.target_config.get(target) + .and_then(|t| t.musl_root.as_ref()) + .or(self.config.musl_root.as_ref()) + .map(|p| &**p) + } + + /// Path to the python interpreter to use + fn python(&self) -> &Path { + self.config.python.as_ref().unwrap() + } +} + +impl<'a> Compiler<'a> { + /// Creates a new complier for the specified stage/host + fn new(stage: u32, host: &'a str) -> Compiler<'a> { + Compiler { stage: stage, host: host } + } + + /// Returns whether this is a snapshot compiler for `build`'s configuration + fn is_snapshot(&self, build: &Build) -> bool { + self.stage == 0 && self.host == build.config.build + } +} diff --git a/src/bootstrap/metadata.rs b/src/bootstrap/metadata.rs new file mode 100644 index 0000000000000..8befb105ff618 --- /dev/null +++ b/src/bootstrap/metadata.rs @@ -0,0 +1,96 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::collections::HashMap; +use std::process::Command; +use std::path::PathBuf; + +use build_helper::output; +use rustc_serialize::json; + +use {Build, Crate}; + +#[derive(RustcDecodable)] +struct Output { + packages: Vec, + resolve: Resolve, +} + +#[derive(RustcDecodable)] +struct Package { + id: String, + name: String, + source: Option, + manifest_path: String, +} + +#[derive(RustcDecodable)] +struct Resolve { + nodes: Vec, +} + +#[derive(RustcDecodable)] +struct ResolveNode { + id: String, + dependencies: Vec, +} + +pub fn build(build: &mut Build) { + build_krate(build, "src/rustc/std_shim"); + build_krate(build, "src/rustc/test_shim"); + build_krate(build, "src/rustc"); +} + +fn build_krate(build: &mut Build, krate: &str) { + // Run `cargo metadata` to figure out what crates we're testing. + // + // Down below we're going to call `cargo test`, but to test the right set + // of packages we're going to have to know what `-p` arguments to pass it + // to know what crates to test. Here we run `cargo metadata` to learn about + // the dependency graph and what `-p` arguments there are. + let mut cargo = Command::new(&build.cargo); + cargo.arg("metadata") + .arg("--manifest-path").arg(build.src.join(krate).join("Cargo.toml")); + let output = output(&mut cargo); + let output: Output = json::decode(&output).unwrap(); + let mut id2name = HashMap::new(); + for package in output.packages { + if package.source.is_none() { + id2name.insert(package.id, package.name.clone()); + let mut path = PathBuf::from(package.manifest_path); + path.pop(); + build.crates.insert(package.name.clone(), Crate { + build_step: format!("build-crate-{}", package.name), + doc_step: format!("doc-crate-{}", package.name), + test_step: format!("test-crate-{}", package.name), + bench_step: format!("bench-crate-{}", package.name), + name: package.name, + deps: Vec::new(), + path: path, + }); + } + } + + for node in output.resolve.nodes { + let name = match id2name.get(&node.id) { + Some(name) => name, + None => continue, + }; + + let krate = build.crates.get_mut(name).unwrap(); + for dep in node.dependencies.iter() { + let dep = match id2name.get(dep) { + Some(dep) => dep, + None => continue, + }; + krate.deps.push(dep.clone()); + } + } +} diff --git a/src/bootstrap/mk/Makefile.in b/src/bootstrap/mk/Makefile.in new file mode 100644 index 0000000000000..1e73595ec9983 --- /dev/null +++ b/src/bootstrap/mk/Makefile.in @@ -0,0 +1,65 @@ +# Copyright 20126 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +include config.mk +include $(CFG_SRC_DIR)mk/util.mk + +ifdef VERBOSE +BOOTSTRAP_ARGS := -v +else +BOOTSTRAP_ARGS := +endif + +BOOTSTRAP := $(CFG_PYTHON) $(CFG_SRC_DIR)src/bootstrap/bootstrap.py + +all: + $(Q)$(BOOTSTRAP) build $(BOOTSTRAP_ARGS) + $(Q)$(BOOTSTRAP) doc $(BOOTSTRAP_ARGS) + +# Don’t use $(Q) here, always show how to invoke the bootstrap script directly +help: + $(BOOTSTRAP) --help + +clean: + $(Q)$(BOOTSTRAP) clean $(BOOTSTRAP_ARGS) + +rustc-stage1: + $(Q)$(BOOTSTRAP) build --stage 1 src/libtest $(BOOTSTRAP_ARGS) +rustc-stage2: + $(Q)$(BOOTSTRAP) build --stage 2 src/libtest $(BOOTSTRAP_ARGS) + +docs: doc +doc: + $(Q)$(BOOTSTRAP) doc $(BOOTSTRAP_ARGS) +nomicon: + $(Q)$(BOOTSTRAP) doc src/doc/nomicon $(BOOTSTRAP_ARGS) +book: + $(Q)$(BOOTSTRAP) doc src/doc/book $(BOOTSTRAP_ARGS) +standalone-docs: + $(Q)$(BOOTSTRAP) doc src/doc $(BOOTSTRAP_ARGS) +check: + $(Q)$(BOOTSTRAP) test $(BOOTSTRAP_ARGS) +check-cargotest: + $(Q)$(BOOTSTRAP) test src/tools/cargotest $(BOOTSTRAP_ARGS) +dist: + $(Q)$(BOOTSTRAP) dist $(BOOTSTRAP_ARGS) +install: +ifeq (root user, $(USER) $(patsubst %,user,$(SUDO_USER))) + $(Q)echo "'sudo make install' is not supported currently." +else + $(Q)$(BOOTSTRAP) dist --install $(BOOTSTRAP_ARGS) +endif +tidy: + $(Q)$(BOOTSTRAP) test src/tools/tidy $(BOOTSTRAP_ARGS) --stage 0 + +check-stage2-android: + $(Q)$(BOOTSTRAP) --step check-target --target arm-linux-androideabi + +.PHONY: dist diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs new file mode 100644 index 0000000000000..96d1b695dd707 --- /dev/null +++ b/src/bootstrap/native.rs @@ -0,0 +1,169 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Compilation of native dependencies like LLVM. +//! +//! Native projects like LLVM unfortunately aren't suited just yet for +//! compilation in build scripts that Cargo has. This is because thie +//! compilation takes a *very* long time but also because we don't want to +//! compile LLVM 3 times as part of a normal bootstrap (we want it cached). +//! +//! LLVM and compiler-rt are essentially just wired up to everything else to +//! ensure that they're always in place if needed. + +use std::fs::{self, File}; +use std::io::{Read, Write}; +use std::path::Path; +use std::process::Command; + +use build_helper::output; +use cmake; +use gcc; + +use Build; +use util::up_to_date; + +/// Compile LLVM for `target`. +pub fn llvm(build: &Build, target: &str) { + // If we're using a custom LLVM bail out here, but we can only use a + // custom LLVM for the build triple. + if let Some(config) = build.config.target_config.get(target) { + if let Some(ref s) = config.llvm_config { + return check_llvm_version(build, s); + } + } + + // If the cleaning trigger is newer than our built artifacts (or if the + // artifacts are missing) then we keep going, otherwise we bail out. + let dst = build.llvm_out(target); + let stamp = build.src.join("src/rustllvm/llvm-auto-clean-trigger"); + let mut stamp_contents = String::new(); + t!(t!(File::open(&stamp)).read_to_string(&mut stamp_contents)); + let done_stamp = dst.join("llvm-finished-building"); + if done_stamp.exists() { + let mut done_contents = String::new(); + t!(t!(File::open(&done_stamp)).read_to_string(&mut done_contents)); + if done_contents == stamp_contents { + return + } + } + drop(fs::remove_dir_all(&dst)); + + println!("Building LLVM for {}", target); + + let _ = fs::remove_dir_all(&dst.join("build")); + t!(fs::create_dir_all(&dst.join("build"))); + let assertions = if build.config.llvm_assertions {"ON"} else {"OFF"}; + + // http://llvm.org/docs/CMake.html + let mut cfg = cmake::Config::new(build.src.join("src/llvm")); + if build.config.ninja { + cfg.generator("Ninja"); + } + + let profile = match (build.config.llvm_optimize, build.config.llvm_release_debuginfo) { + (false, _) => "Debug", + (true, false) => "Release", + (true, true) => "RelWithDebInfo", + }; + + cfg.target(target) + .host(&build.config.build) + .out_dir(&dst) + .profile(profile) + .define("LLVM_ENABLE_ASSERTIONS", assertions) + .define("LLVM_TARGETS_TO_BUILD", + "X86;ARM;AArch64;Mips;PowerPC;SystemZ;JSBackend;MSP430") + .define("LLVM_INCLUDE_EXAMPLES", "OFF") + .define("LLVM_INCLUDE_TESTS", "OFF") + .define("LLVM_INCLUDE_DOCS", "OFF") + .define("LLVM_ENABLE_ZLIB", "OFF") + .define("WITH_POLLY", "OFF") + .define("LLVM_ENABLE_TERMINFO", "OFF") + .define("LLVM_ENABLE_LIBEDIT", "OFF") + .define("LLVM_PARALLEL_COMPILE_JOBS", build.jobs().to_string()) + .define("LLVM_TARGET_ARCH", target.split('-').next().unwrap()) + .define("LLVM_DEFAULT_TARGET_TRIPLE", target); + + if target.starts_with("i686") { + cfg.define("LLVM_BUILD_32_BITS", "ON"); + } + + // http://llvm.org/docs/HowToCrossCompileLLVM.html + if target != build.config.build { + // FIXME: if the llvm root for the build triple is overridden then we + // should use llvm-tblgen from there, also should verify that it + // actually exists most of the time in normal installs of LLVM. + let host = build.llvm_out(&build.config.build).join("bin/llvm-tblgen"); + cfg.define("CMAKE_CROSSCOMPILING", "True") + .define("LLVM_TABLEGEN", &host); + } + + // MSVC handles compiler business itself + if !target.contains("msvc") { + if build.config.ccache { + cfg.define("CMAKE_C_COMPILER", "ccache") + .define("CMAKE_C_COMPILER_ARG1", build.cc(target)) + .define("CMAKE_CXX_COMPILER", "ccache") + .define("CMAKE_CXX_COMPILER_ARG1", build.cxx(target)); + } else { + cfg.define("CMAKE_C_COMPILER", build.cc(target)) + .define("CMAKE_CXX_COMPILER", build.cxx(target)); + } + cfg.build_arg("-j").build_arg(build.jobs().to_string()); + + cfg.define("CMAKE_C_FLAGS", build.cflags(target).join(" ")); + cfg.define("CMAKE_CXX_FLAGS", build.cflags(target).join(" ")); + } + + // FIXME: we don't actually need to build all LLVM tools and all LLVM + // libraries here, e.g. we just want a few components and a few + // tools. Figure out how to filter them down and only build the right + // tools and libs on all platforms. + cfg.build(); + + t!(t!(File::create(&done_stamp)).write_all(stamp_contents.as_bytes())); +} + +fn check_llvm_version(build: &Build, llvm_config: &Path) { + if !build.config.llvm_version_check { + return + } + + let mut cmd = Command::new(llvm_config); + let version = output(cmd.arg("--version")); + if version.starts_with("3.5") || version.starts_with("3.6") || + version.starts_with("3.7") { + return + } + panic!("\n\nbad LLVM version: {}, need >=3.5\n\n", version) +} + +/// Compiles the `rust_test_helpers.c` library which we used in various +/// `run-pass` test suites for ABI testing. +pub fn test_helpers(build: &Build, target: &str) { + let dst = build.test_helpers_out(target); + let src = build.src.join("src/rt/rust_test_helpers.c"); + if up_to_date(&src, &dst.join("librust_test_helpers.a")) { + return + } + + println!("Building test helpers"); + t!(fs::create_dir_all(&dst)); + let mut cfg = gcc::Config::new(); + cfg.cargo_metadata(false) + .out_dir(&dst) + .target(target) + .host(&build.config.build) + .opt_level(0) + .debug(false) + .file(build.src.join("src/rt/rust_test_helpers.c")) + .compile("librust_test_helpers.a"); +} diff --git a/src/bootstrap/sanity.rs b/src/bootstrap/sanity.rs new file mode 100644 index 0000000000000..47efa6952177c --- /dev/null +++ b/src/bootstrap/sanity.rs @@ -0,0 +1,222 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Sanity checking performed by rustbuild before actually executing anything. +//! +//! This module contains the implementation of ensuring that the build +//! environment looks reasonable before progressing. This will verify that +//! various programs like git and python exist, along with ensuring that all C +//! compilers for cross-compiling are found. +//! +//! In theory if we get past this phase it's a bug if a build fails, but in +//! practice that's likely not true! + +use std::collections::HashSet; +use std::env; +use std::ffi::{OsStr, OsString}; +use std::fs; +use std::process::Command; + +use build_helper::output; + +use Build; + +pub fn check(build: &mut Build) { + let mut checked = HashSet::new(); + let path = env::var_os("PATH").unwrap_or(OsString::new()); + // On Windows, quotes are invalid characters for filename paths, and if + // one is present as part of the PATH then that can lead to the system + // being unable to identify the files properly. See + // https://github.com/rust-lang/rust/issues/34959 for more details. + if cfg!(windows) { + if path.to_string_lossy().contains("\"") { + panic!("PATH contains invalid character '\"'"); + } + } + let have_cmd = |cmd: &OsStr| { + for path in env::split_paths(&path).map(|p| p.join(cmd)) { + if fs::metadata(&path).is_ok() || + fs::metadata(path.with_extension("exe")).is_ok() { + return Some(path); + } + } + return None; + }; + + let mut need_cmd = |cmd: &OsStr| { + if !checked.insert(cmd.to_owned()) { + return + } + if have_cmd(cmd).is_none() { + panic!("\n\ncouldn't find required command: {:?}\n\n", cmd); + } + }; + + // If we've got a git directory we're gona need git to update + // submodules and learn about various other aspects. + if fs::metadata(build.src.join(".git")).is_ok() { + need_cmd("git".as_ref()); + } + + // We need cmake, but only if we're actually building LLVM + for host in build.config.host.iter() { + if let Some(config) = build.config.target_config.get(host) { + if config.llvm_config.is_some() { + continue + } + } + need_cmd("cmake".as_ref()); + if build.config.ninja { + need_cmd("ninja".as_ref()) + } + break + } + + if build.config.python.is_none() { + build.config.python = have_cmd("python2.7".as_ref()); + } + if build.config.python.is_none() { + build.config.python = have_cmd("python2".as_ref()); + } + if build.config.python.is_none() { + need_cmd("python".as_ref()); + build.config.python = Some("python".into()); + } + need_cmd(build.config.python.as_ref().unwrap().as_ref()); + + + if let Some(ref s) = build.config.nodejs { + need_cmd(s.as_ref()); + } else { + // Look for the nodejs command, needed for emscripten testing + if let Some(node) = have_cmd("node".as_ref()) { + build.config.nodejs = Some(node); + } else if let Some(node) = have_cmd("nodejs".as_ref()) { + build.config.nodejs = Some(node); + } + } + + if let Some(ref gdb) = build.config.gdb { + need_cmd(gdb.as_ref()); + } else { + build.config.gdb = have_cmd("gdb".as_ref()); + } + + // We're gonna build some custom C code here and there, host triples + // also build some C++ shims for LLVM so we need a C++ compiler. + for target in build.config.target.iter() { + // On emscripten we don't actually need the C compiler to just + // build the target artifacts, only for testing. For the sake + // of easier bot configuration, just skip detection. + if target.contains("emscripten") { + continue; + } + + need_cmd(build.cc(target).as_ref()); + if let Some(ar) = build.ar(target) { + need_cmd(ar.as_ref()); + } + } + for host in build.config.host.iter() { + need_cmd(build.cxx(host).as_ref()); + } + + // The msvc hosts don't use jemalloc, turn it off globally to + // avoid packaging the dummy liballoc_jemalloc on that platform. + for host in build.config.host.iter() { + if host.contains("msvc") { + build.config.use_jemalloc = false; + } + } + + // Externally configured LLVM requires FileCheck to exist + let filecheck = build.llvm_filecheck(&build.config.build); + if !filecheck.starts_with(&build.out) && !filecheck.exists() && build.config.codegen_tests { + panic!("filecheck executable {:?} does not exist", filecheck); + } + + for target in build.config.target.iter() { + // Can't compile for iOS unless we're on OSX + if target.contains("apple-ios") && + !build.config.build.contains("apple-darwin") { + panic!("the iOS target is only supported on OSX"); + } + + // Make sure musl-root is valid if specified + if target.contains("musl") && !target.contains("mips") { + match build.musl_root(target) { + Some(root) => { + if fs::metadata(root.join("lib/libc.a")).is_err() { + panic!("couldn't find libc.a in musl dir: {}", + root.join("lib").display()); + } + if fs::metadata(root.join("lib/libunwind.a")).is_err() { + panic!("couldn't find libunwind.a in musl dir: {}", + root.join("lib").display()); + } + } + None => { + panic!("when targeting MUSL either the rust.musl-root \ + option or the target.$TARGET.musl-root option must \ + be specified in config.toml") + } + } + } + + if target.contains("msvc") { + // There are three builds of cmake on windows: MSVC, MinGW, and + // Cygwin. The Cygwin build does not have generators for Visual + // Studio, so detect that here and error. + let out = output(Command::new("cmake").arg("--help")); + if !out.contains("Visual Studio") { + panic!(" +cmake does not support Visual Studio generators. + +This is likely due to it being an msys/cygwin build of cmake, +rather than the required windows version, built using MinGW +or Visual Studio. + +If you are building under msys2 try installing the mingw-w64-x86_64-cmake +package instead of cmake: + +$ pacman -R cmake && pacman -S mingw-w64-x86_64-cmake +"); + } + } + + if target.contains("arm-linux-android") { + need_cmd("adb".as_ref()); + } + } + + for host in build.flags.host.iter() { + if !build.config.host.contains(host) { + panic!("specified host `{}` is not in the ./configure list", host); + } + } + for target in build.flags.target.iter() { + if !build.config.target.contains(target) { + panic!("specified target `{}` is not in the ./configure list", + target); + } + } + + let run = |cmd: &mut Command| { + cmd.output().map(|output| { + String::from_utf8_lossy(&output.stdout) + .lines().next().unwrap() + .to_string() + }) + }; + build.lldb_version = run(Command::new("lldb").arg("--version")).ok(); + if build.lldb_version.is_some() { + build.lldb_python_dir = run(Command::new("lldb").arg("-P")).ok(); + } +} diff --git a/src/bootstrap/step.rs b/src/bootstrap/step.rs new file mode 100644 index 0000000000000..b8683831af122 --- /dev/null +++ b/src/bootstrap/step.rs @@ -0,0 +1,734 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::collections::{HashMap, HashSet}; +use std::mem; + +use check::{self, TestKind}; +use compile; +use dist; +use doc; +use flags::Subcommand; +use install; +use native; +use {Compiler, Build, Mode}; + +#[derive(PartialEq, Eq, Hash, Clone, Debug)] +struct Step<'a> { + name: &'a str, + stage: u32, + host: &'a str, + target: &'a str, +} + +impl<'a> Step<'a> { + fn name(&self, name: &'a str) -> Step<'a> { + Step { name: name, ..*self } + } + + fn stage(&self, stage: u32) -> Step<'a> { + Step { stage: stage, ..*self } + } + + fn host(&self, host: &'a str) -> Step<'a> { + Step { host: host, ..*self } + } + + fn target(&self, target: &'a str) -> Step<'a> { + Step { target: target, ..*self } + } + + fn compiler(&self) -> Compiler<'a> { + Compiler::new(self.stage, self.host) + } +} + +pub fn run(build: &Build) { + let rules = build_rules(build); + let steps = rules.plan(); + rules.run(&steps); +} + +pub fn build_rules(build: &Build) -> Rules { + let mut rules: Rules = Rules::new(build); + // dummy rule to do nothing, useful when a dep maps to no deps + rules.build("dummy", "path/to/nowhere"); + fn dummy<'a>(s: &Step<'a>, build: &'a Build) -> Step<'a> { + s.name("dummy").stage(0) + .target(&build.config.build) + .host(&build.config.build) + } + + // Helper for loading an entire DAG of crates, rooted at `name` + let krates = |name: &str| { + let mut ret = Vec::new(); + let mut list = vec![name]; + let mut visited = HashSet::new(); + while let Some(krate) = list.pop() { + let default = krate == name; + let krate = &build.crates[krate]; + let path = krate.path.strip_prefix(&build.src).unwrap(); + ret.push((krate, path.to_str().unwrap(), default)); + for dep in krate.deps.iter() { + if visited.insert(dep) && dep != "build_helper" { + list.push(dep); + } + } + } + return ret + }; + + rules.build("rustc", "path/to/nowhere") + .dep(move |s| { + if s.stage == 0 { + dummy(s, build) + } else { + s.name("librustc") + .host(&build.config.build) + .stage(s.stage - 1) + } + }) + .run(move |s| compile::assemble_rustc(build, s.stage, s.target)); + rules.build("llvm", "src/llvm") + .host(true) + .dep(move |s| { + if s.target == build.config.build { + dummy(s, build) + } else { + s.target(&build.config.build) + } + }) + .run(move |s| native::llvm(build, s.target)); + + // ======================================================================== + // Crate compilations + // + // Tools used during the build system but not shipped + rules.build("libstd", "src/libstd") + .dep(|s| s.name("build-crate-std_shim")); + rules.build("libtest", "src/libtest") + .dep(|s| s.name("build-crate-test_shim")); + rules.build("librustc", "src/librustc") + .dep(|s| s.name("build-crate-rustc-main")); + for (krate, path, _default) in krates("std_shim") { + rules.build(&krate.build_step, path) + .dep(move |s| s.name("rustc").host(&build.config.build).target(s.host)) + .dep(move |s| { + if s.host == build.config.build { + dummy(s, build) + } else { + s.host(&build.config.build) + } + }) + .run(move |s| { + if s.host == build.config.build { + compile::std(build, s.target, &s.compiler()) + } else { + compile::std_link(build, s.target, s.stage, s.host) + } + }); + } + for (krate, path, default) in krates("test_shim") { + rules.build(&krate.build_step, path) + .dep(|s| s.name("libstd")) + .dep(move |s| { + if s.host == build.config.build { + dummy(s, build) + } else { + s.host(&build.config.build) + } + }) + .default(default) + .run(move |s| { + if s.host == build.config.build { + compile::test(build, s.target, &s.compiler()) + } else { + compile::test_link(build, s.target, s.stage, s.host) + } + }); + } + for (krate, path, default) in krates("rustc-main") { + rules.build(&krate.build_step, path) + .dep(|s| s.name("libtest")) + .dep(move |s| s.name("llvm").host(&build.config.build).stage(0)) + .dep(move |s| { + if s.host == build.config.build { + dummy(s, build) + } else { + s.host(&build.config.build) + } + }) + .host(true) + .default(default) + .run(move |s| { + if s.host == build.config.build { + compile::rustc(build, s.target, &s.compiler()) + } else { + compile::rustc_link(build, s.target, s.stage, s.host) + } + }); + } + + // ======================================================================== + // Test targets + // + // Various unit tests and tests suites we can run + { + let mut suite = |name, path, dir, mode| { + rules.test(name, path) + .dep(|s| s.name("libtest")) + .dep(|s| s.name("tool-compiletest").target(s.host)) + .dep(|s| s.name("test-helpers")) + .dep(move |s| { + if s.target.contains("android") { + s.name("android-copy-libs") + } else { + dummy(s, build) + } + }) + .default(true) + .run(move |s| { + check::compiletest(build, &s.compiler(), s.target, dir, mode) + }); + }; + + suite("check-rpass", "src/test/run-pass", "run-pass", "run-pass"); + suite("check-cfail", "src/test/compile-fail", "compile-fail", "compile-fail"); + suite("check-pfail", "src/test/parse-fail", "parse-fail", "parse-fail"); + suite("check-rfail", "src/test/run-fail", "run-fail", "run-fail"); + suite("check-rpass-valgrind", "src/test/run-pass-valgrind", + "run-pass-valgrind", "run-pass-valgrind"); + suite("check-mir-opt", "src/test/mir-opt", "mir-opt", "mir-opt"); + if build.config.codegen_tests { + suite("check-codegen", "src/test/codegen", "codegen", "codegen"); + } + suite("check-codegen-units", "src/test/codegen-units", "codegen-units", + "codegen-units"); + suite("check-incremental", "src/test/incremental", "incremental", + "incremental"); + suite("check-ui", "src/test/ui", "ui", "ui"); + suite("check-pretty", "src/test/pretty", "pretty", "pretty"); + suite("check-pretty-rpass", "src/test/run-pass/pretty", "pretty", + "run-pass"); + suite("check-pretty-rfail", "src/test/run-pass/pretty", "pretty", + "run-fail"); + suite("check-pretty-valgrind", "src/test/run-pass-valgrind", "pretty", + "run-pass-valgrind"); + } + + if build.config.build.contains("msvc") { + // nothing to do for debuginfo tests + } else if build.config.build.contains("apple") { + rules.test("check-debuginfo", "src/test/debuginfo") + .dep(|s| s.name("libtest")) + .dep(|s| s.name("tool-compiletest").host(s.host)) + .dep(|s| s.name("test-helpers")) + .dep(|s| s.name("debugger-scripts")) + .run(move |s| check::compiletest(build, &s.compiler(), s.target, + "debuginfo-lldb", "debuginfo")); + } else { + rules.test("check-debuginfo", "src/test/debuginfo") + .dep(|s| s.name("libtest")) + .dep(|s| s.name("tool-compiletest").host(s.host)) + .dep(|s| s.name("test-helpers")) + .dep(|s| s.name("debugger-scripts")) + .run(move |s| check::compiletest(build, &s.compiler(), s.target, + "debuginfo-gdb", "debuginfo")); + } + + rules.test("debugger-scripts", "src/etc/lldb_batchmode.py") + .run(move |s| dist::debugger_scripts(build, &build.sysroot(&s.compiler()), + s.target)); + + { + let mut suite = |name, path, dir, mode| { + rules.test(name, path) + .dep(|s| s.name("librustc")) + .dep(|s| s.name("tool-compiletest").target(s.host)) + .default(true) + .host(true) + .run(move |s| { + check::compiletest(build, &s.compiler(), s.target, dir, mode) + }); + }; + + suite("check-rpass-full", "src/test/run-pass-fulldeps", + "run-pass", "run-pass-fulldeps"); + suite("check-cfail-full", "src/test/compile-fail-fulldeps", + "compile-fail", "compile-fail-fulldeps"); + suite("check-rmake", "src/test/run-make", "run-make", "run-make"); + suite("check-rustdoc", "src/test/rustdoc", "rustdoc", "rustdoc"); + suite("check-pretty-rpass-full", "src/test/run-pass-fulldeps", + "pretty", "run-pass-fulldeps"); + suite("check-pretty-rfail-full", "src/test/run-fail-fulldeps", + "pretty", "run-fail-fulldeps"); + } + + for (krate, path, _default) in krates("std_shim") { + rules.test(&krate.test_step, path) + .dep(|s| s.name("libtest")) + .run(move |s| check::krate(build, &s.compiler(), s.target, + Mode::Libstd, TestKind::Test, + Some(&krate.name))); + } + rules.test("check-std-all", "path/to/nowhere") + .dep(|s| s.name("libtest")) + .default(true) + .run(move |s| check::krate(build, &s.compiler(), s.target, + Mode::Libstd, TestKind::Test, None)); + + // std benchmarks + for (krate, path, _default) in krates("std_shim") { + rules.bench(&krate.bench_step, path) + .dep(|s| s.name("libtest")) + .run(move |s| check::krate(build, &s.compiler(), s.target, + Mode::Libstd, TestKind::Bench, + Some(&krate.name))); + } + rules.bench("bench-std-all", "path/to/nowhere") + .dep(|s| s.name("libtest")) + .default(true) + .run(move |s| check::krate(build, &s.compiler(), s.target, + Mode::Libstd, TestKind::Bench, None)); + + for (krate, path, _default) in krates("test_shim") { + rules.test(&krate.test_step, path) + .dep(|s| s.name("libtest")) + .run(move |s| check::krate(build, &s.compiler(), s.target, + Mode::Libtest, TestKind::Test, + Some(&krate.name))); + } + rules.test("check-test-all", "path/to/nowhere") + .dep(|s| s.name("libtest")) + .default(true) + .run(move |s| check::krate(build, &s.compiler(), s.target, + Mode::Libtest, TestKind::Test, None)); + for (krate, path, _default) in krates("rustc-main") { + rules.test(&krate.test_step, path) + .dep(|s| s.name("librustc")) + .host(true) + .run(move |s| check::krate(build, &s.compiler(), s.target, + Mode::Librustc, TestKind::Test, + Some(&krate.name))); + } + rules.test("check-rustc-all", "path/to/nowhere") + .dep(|s| s.name("librustc")) + .default(true) + .host(true) + .run(move |s| check::krate(build, &s.compiler(), s.target, + Mode::Librustc, TestKind::Test, None)); + + rules.test("check-linkchecker", "src/tools/linkchecker") + .dep(|s| s.name("tool-linkchecker")) + .dep(|s| s.name("default:doc")) + .default(true) + .host(true) + .run(move |s| check::linkcheck(build, s.stage, s.target)); + rules.test("check-cargotest", "src/tools/cargotest") + .dep(|s| s.name("tool-cargotest")) + .dep(|s| s.name("librustc")) + .host(true) + .run(move |s| check::cargotest(build, s.stage, s.target)); + rules.test("check-tidy", "src/tools/tidy") + .dep(|s| s.name("tool-tidy")) + .default(true) + .host(true) + .run(move |s| check::tidy(build, s.stage, s.target)); + rules.test("check-error-index", "src/tools/error_index_generator") + .dep(|s| s.name("libstd")) + .dep(|s| s.name("tool-error-index").host(s.host)) + .default(true) + .host(true) + .run(move |s| check::error_index(build, &s.compiler())); + rules.test("check-docs", "src/doc") + .dep(|s| s.name("libtest")) + .default(true) + .host(true) + .run(move |s| check::docs(build, &s.compiler())); + + rules.build("test-helpers", "src/rt/rust_test_helpers.c") + .run(move |s| native::test_helpers(build, s.target)); + rules.test("android-copy-libs", "path/to/nowhere") + .dep(|s| s.name("libtest")) + .run(move |s| check::android_copy_libs(build, &s.compiler(), s.target)); + + // ======================================================================== + // Build tools + // + // Tools used during the build system but not shipped + rules.build("tool-rustbook", "src/tools/rustbook") + .dep(|s| s.name("librustc")) + .run(move |s| compile::tool(build, s.stage, s.target, "rustbook")); + rules.build("tool-error-index", "src/tools/error_index_generator") + .dep(|s| s.name("librustc")) + .run(move |s| compile::tool(build, s.stage, s.target, "error_index_generator")); + rules.build("tool-tidy", "src/tools/tidy") + .dep(|s| s.name("libstd")) + .run(move |s| compile::tool(build, s.stage, s.target, "tidy")); + rules.build("tool-linkchecker", "src/tools/linkchecker") + .dep(|s| s.name("libstd")) + .run(move |s| compile::tool(build, s.stage, s.target, "linkchecker")); + rules.build("tool-cargotest", "src/tools/cargotest") + .dep(|s| s.name("libstd")) + .run(move |s| compile::tool(build, s.stage, s.target, "cargotest")); + rules.build("tool-compiletest", "src/tools/compiletest") + .dep(|s| s.name("libtest")) + .run(move |s| compile::tool(build, s.stage, s.target, "compiletest")); + + // ======================================================================== + // Documentation targets + rules.doc("doc-book", "src/doc/book") + .dep(move |s| s.name("tool-rustbook").target(&build.config.build)) + .default(build.config.docs) + .run(move |s| doc::rustbook(build, s.stage, s.target, "book")); + rules.doc("doc-nomicon", "src/doc/nomicon") + .dep(move |s| s.name("tool-rustbook").target(&build.config.build)) + .default(build.config.docs) + .run(move |s| doc::rustbook(build, s.stage, s.target, "nomicon")); + rules.doc("doc-standalone", "src/doc") + .dep(move |s| s.name("rustc").host(&build.config.build).target(&build.config.build)) + .default(build.config.docs) + .run(move |s| doc::standalone(build, s.stage, s.target)); + rules.doc("doc-error-index", "src/tools/error_index_generator") + .dep(move |s| s.name("tool-error-index").target(&build.config.build)) + .dep(move |s| s.name("librustc")) + .default(build.config.docs) + .host(true) + .run(move |s| doc::error_index(build, s.stage, s.target)); + for (krate, path, default) in krates("std_shim") { + rules.doc(&krate.doc_step, path) + .dep(|s| s.name("libstd")) + .default(default && build.config.docs) + .run(move |s| doc::std(build, s.stage, s.target)); + } + for (krate, path, default) in krates("test_shim") { + rules.doc(&krate.doc_step, path) + .dep(|s| s.name("libtest")) + .default(default && build.config.docs) + .run(move |s| doc::test(build, s.stage, s.target)); + } + for (krate, path, default) in krates("rustc-main") { + rules.doc(&krate.doc_step, path) + .dep(|s| s.name("librustc")) + .host(true) + .default(default && build.config.compiler_docs) + .run(move |s| doc::rustc(build, s.stage, s.target)); + } + + // ======================================================================== + // Distribution targets + rules.dist("dist-rustc", "src/librustc") + .dep(move |s| s.name("rustc").host(&build.config.build)) + .host(true) + .default(true) + .run(move |s| dist::rustc(build, s.stage, s.target)); + rules.dist("dist-std", "src/libstd") + .dep(move |s| { + // We want to package up as many target libraries as possible + // for the `rust-std` package, so if this is a host target we + // depend on librustc and otherwise we just depend on libtest. + if build.config.host.iter().any(|t| t == s.target) { + s.name("librustc") + } else { + s.name("libtest") + } + }) + .default(true) + .run(move |s| dist::std(build, &s.compiler(), s.target)); + rules.dist("dist-mingw", "path/to/nowhere") + .run(move |s| dist::mingw(build, s.target)); + rules.dist("dist-src", "src") + .default(true) + .host(true) + .run(move |_| dist::rust_src(build)); + rules.dist("dist-docs", "src/doc") + .default(true) + .dep(|s| s.name("default:doc")) + .run(move |s| dist::docs(build, s.stage, s.target)); + rules.dist("install", "src") + .dep(|s| s.name("default:dist")) + .run(move |s| install::install(build, s.stage, s.target)); + + rules.verify(); + return rules +} + +struct Rule<'a> { + name: &'a str, + path: &'a str, + kind: Kind, + deps: Vec) -> Step<'a> + 'a>>, + run: Box) + 'a>, + default: bool, + host: bool, +} + +#[derive(PartialEq)] +enum Kind { + Build, + Test, + Bench, + Dist, + Doc, +} + +impl<'a> Rule<'a> { + fn new(name: &'a str, path: &'a str, kind: Kind) -> Rule<'a> { + Rule { + name: name, + deps: Vec::new(), + run: Box::new(|_| ()), + path: path, + kind: kind, + default: false, + host: false, + } + } +} + +struct RuleBuilder<'a: 'b, 'b> { + rules: &'b mut Rules<'a>, + rule: Rule<'a>, +} + +impl<'a, 'b> RuleBuilder<'a, 'b> { + fn dep(&mut self, f: F) -> &mut Self + where F: Fn(&Step<'a>) -> Step<'a> + 'a, + { + self.rule.deps.push(Box::new(f)); + self + } + + fn run(&mut self, f: F) -> &mut Self + where F: Fn(&Step<'a>) + 'a, + { + self.rule.run = Box::new(f); + self + } + + fn default(&mut self, default: bool) -> &mut Self { + self.rule.default = default; + self + } + + fn host(&mut self, host: bool) -> &mut Self { + self.rule.host = host; + self + } +} + +impl<'a, 'b> Drop for RuleBuilder<'a, 'b> { + fn drop(&mut self) { + let rule = mem::replace(&mut self.rule, Rule::new("", "", Kind::Build)); + let prev = self.rules.rules.insert(rule.name, rule); + if let Some(prev) = prev { + panic!("duplicate rule named: {}", prev.name); + } + } +} + +pub struct Rules<'a> { + build: &'a Build, + sbuild: Step<'a>, + rules: HashMap<&'a str, Rule<'a>>, +} + +impl<'a> Rules<'a> { + fn new(build: &'a Build) -> Rules<'a> { + Rules { + build: build, + sbuild: Step { + stage: build.flags.stage.unwrap_or(2), + target: &build.config.build, + host: &build.config.build, + name: "", + }, + rules: HashMap::new(), + } + } + + fn build<'b>(&'b mut self, name: &'a str, path: &'a str) + -> RuleBuilder<'a, 'b> { + self.rule(name, path, Kind::Build) + } + + fn test<'b>(&'b mut self, name: &'a str, path: &'a str) + -> RuleBuilder<'a, 'b> { + self.rule(name, path, Kind::Test) + } + + fn bench<'b>(&'b mut self, name: &'a str, path: &'a str) + -> RuleBuilder<'a, 'b> { + self.rule(name, path, Kind::Bench) + } + + fn doc<'b>(&'b mut self, name: &'a str, path: &'a str) + -> RuleBuilder<'a, 'b> { + self.rule(name, path, Kind::Doc) + } + + fn dist<'b>(&'b mut self, name: &'a str, path: &'a str) + -> RuleBuilder<'a, 'b> { + self.rule(name, path, Kind::Dist) + } + + fn rule<'b>(&'b mut self, + name: &'a str, + path: &'a str, + kind: Kind) -> RuleBuilder<'a, 'b> { + RuleBuilder { + rules: self, + rule: Rule::new(name, path, kind), + } + } + + /// Verify the dependency graph defined by all our rules are correct, e.g. + /// everything points to a valid something else. + fn verify(&self) { + for rule in self.rules.values() { + for dep in rule.deps.iter() { + let dep = dep(&self.sbuild.name(rule.name)); + if self.rules.contains_key(&dep.name) || dep.name.starts_with("default:") { + continue + } + panic!("\ + +invalid rule dependency graph detected, was a rule added and maybe typo'd? + + `{}` depends on `{}` which does not exist + +", rule.name, dep.name); + } + } + } + + pub fn print_help(&self, command: &str) { + let kind = match command { + "build" => Kind::Build, + "doc" => Kind::Doc, + "test" => Kind::Test, + "bench" => Kind::Bench, + "dist" => Kind::Dist, + _ => return, + }; + let rules = self.rules.values().filter(|r| r.kind == kind); + let rules = rules.filter(|r| !r.path.contains("nowhere")); + let mut rules = rules.collect::>(); + rules.sort_by_key(|r| r.path); + + println!("Available paths:\n"); + for rule in rules { + print!(" ./x.py {} {}", command, rule.path); + + println!(""); + } + } + + /// Construct the top-level build steps that we're going to be executing, + /// given the subcommand that our build is performing. + fn plan(&self) -> Vec> { + let (kind, paths) = match self.build.flags.cmd { + Subcommand::Build { ref paths } => (Kind::Build, &paths[..]), + Subcommand::Doc { ref paths } => (Kind::Doc, &paths[..]), + Subcommand::Test { ref paths, test_args: _ } => (Kind::Test, &paths[..]), + Subcommand::Bench { ref paths, test_args: _ } => (Kind::Bench, &paths[..]), + Subcommand::Dist { install } => { + if install { + return vec![self.sbuild.name("install")] + } else { + (Kind::Dist, &[][..]) + } + } + Subcommand::Clean => panic!(), + }; + + self.rules.values().filter(|rule| rule.kind == kind).filter(|rule| { + (paths.len() == 0 && rule.default) || paths.iter().any(|path| { + path.ends_with(rule.path) + }) + }).flat_map(|rule| { + let hosts = if self.build.flags.host.len() > 0 { + &self.build.flags.host + } else { + &self.build.config.host + }; + let targets = if self.build.flags.target.len() > 0 { + &self.build.flags.target + } else { + &self.build.config.target + }; + let arr = if rule.host {hosts} else {targets}; + + hosts.iter().flat_map(move |host| { + arr.iter().map(move |target| { + self.sbuild.name(rule.name).target(target).host(host) + }) + }) + }).collect() + } + + /// Execute all top-level targets indicated by `steps`. + /// + /// This will take the list returned by `plan` and then execute each step + /// along with all required dependencies as it goes up the chain. + fn run(&self, steps: &[Step<'a>]) { + self.build.verbose("bootstrap top targets:"); + for step in steps.iter() { + self.build.verbose(&format!("\t{:?}", step)); + } + + // Using `steps` as the top-level targets, make a topological ordering + // of what we need to do. + let mut order = Vec::new(); + let mut added = HashSet::new(); + for step in steps.iter().cloned() { + self.fill(step, &mut order, &mut added); + } + + // Print out what we're doing for debugging + self.build.verbose("bootstrap build plan:"); + for step in order.iter() { + self.build.verbose(&format!("\t{:?}", step)); + } + + // And finally, iterate over everything and execute it. + for step in order.iter() { + self.build.verbose(&format!("executing step {:?}", step)); + (self.rules[step.name].run)(step); + } + } + + fn fill(&self, + step: Step<'a>, + order: &mut Vec>, + added: &mut HashSet>) { + if !added.insert(step.clone()) { + return + } + for dep in self.rules[step.name].deps.iter() { + let dep = dep(&step); + if dep.name.starts_with("default:") { + let kind = match &dep.name[8..] { + "doc" => Kind::Doc, + "dist" => Kind::Dist, + kind => panic!("unknown kind: `{}`", kind), + }; + let host = self.build.config.host.iter().any(|h| h == dep.target); + let rules = self.rules.values().filter(|r| r.default); + for rule in rules.filter(|r| r.kind == kind && (!r.host || host)) { + self.fill(dep.name(rule.name), order, added); + } + } else { + self.fill(dep, order, added); + } + } + order.push(step); + } +} diff --git a/src/bootstrap/util.rs b/src/bootstrap/util.rs new file mode 100644 index 0000000000000..e028c52236662 --- /dev/null +++ b/src/bootstrap/util.rs @@ -0,0 +1,191 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Various utility functions used throughout rustbuild. +//! +//! Simple things like testing the various filesystem operations here and there, +//! not a lot of interesting happenings here unfortunately. + +use std::env; +use std::ffi::OsString; +use std::fs; +use std::path::{Path, PathBuf}; +use std::process::Command; + +use filetime::FileTime; + +/// Returns the `name` as the filename of a static library for `target`. +pub fn staticlib(name: &str, target: &str) -> String { + if target.contains("windows") { + format!("{}.lib", name) + } else { + format!("lib{}.a", name) + } +} + +/// Returns the last-modified time for `path`, or zero if it doesn't exist. +pub fn mtime(path: &Path) -> FileTime { + fs::metadata(path).map(|f| { + FileTime::from_last_modification_time(&f) + }).unwrap_or(FileTime::zero()) +} + +/// Copies a file from `src` to `dst`, attempting to use hard links and then +/// falling back to an actually filesystem copy if necessary. +pub fn copy(src: &Path, dst: &Path) { + let res = fs::hard_link(src, dst); + let res = res.or_else(|_| fs::copy(src, dst).map(|_| ())); + if let Err(e) = res { + panic!("failed to copy `{}` to `{}`: {}", src.display(), + dst.display(), e) + } +} + +/// Copies the `src` directory recursively to `dst`. Both are assumed to exist +/// when this function is called. +pub fn cp_r(src: &Path, dst: &Path) { + for f in t!(fs::read_dir(src)) { + let f = t!(f); + let path = f.path(); + let name = path.file_name().unwrap(); + let dst = dst.join(name); + if t!(f.file_type()).is_dir() { + t!(fs::create_dir_all(&dst)); + cp_r(&path, &dst); + } else { + let _ = fs::remove_file(&dst); + copy(&path, &dst); + } + } +} + +/// Copies the `src` directory recursively to `dst`. Both are assumed to exist +/// when this function is called. Unwanted files or directories can be skipped +/// by returning `false` from the filter function. +pub fn cp_filtered bool>(src: &Path, dst: &Path, filter: &F) { + // Inner function does the actual work + fn recurse bool>(src: &Path, dst: &Path, relative: &Path, filter: &F) { + for f in t!(fs::read_dir(src)) { + let f = t!(f); + let path = f.path(); + let name = path.file_name().unwrap(); + let dst = dst.join(name); + let relative = relative.join(name); + // Only copy file or directory if the filter function returns true + if filter(&relative) { + if t!(f.file_type()).is_dir() { + let _ = fs::remove_dir_all(&dst); + t!(fs::create_dir(&dst)); + recurse(&path, &dst, &relative, filter); + } else { + let _ = fs::remove_file(&dst); + copy(&path, &dst); + } + } + } + } + // Immediately recurse with an empty relative path + recurse(src, dst, Path::new(""), filter) +} + +/// Given an executable called `name`, return the filename for the +/// executable for a particular target. +pub fn exe(name: &str, target: &str) -> String { + if target.contains("windows") { + format!("{}.exe", name) + } else { + name.to_string() + } +} + +/// Returns whether the file name given looks like a dynamic library. +pub fn is_dylib(name: &str) -> bool { + name.ends_with(".dylib") || name.ends_with(".so") || name.ends_with(".dll") +} + +/// Returns the corresponding relative library directory that the compiler's +/// dylibs will be found in. +pub fn libdir(target: &str) -> &'static str { + if target.contains("windows") {"bin"} else {"lib"} +} + +/// Adds a list of lookup paths to `cmd`'s dynamic library lookup path. +pub fn add_lib_path(path: Vec, cmd: &mut Command) { + let mut list = dylib_path(); + for path in path { + list.insert(0, path); + } + cmd.env(dylib_path_var(), t!(env::join_paths(list))); +} + +/// Returns whether `dst` is up to date given that the file or files in `src` +/// are used to generate it. +/// +/// Uses last-modified time checks to verify this. +pub fn up_to_date(src: &Path, dst: &Path) -> bool { + let threshold = mtime(dst); + let meta = match fs::metadata(src) { + Ok(meta) => meta, + Err(e) => panic!("source {:?} failed to get metadata: {}", src, e), + }; + if meta.is_dir() { + dir_up_to_date(src, &threshold) + } else { + FileTime::from_last_modification_time(&meta) <= threshold + } +} + +fn dir_up_to_date(src: &Path, threshold: &FileTime) -> bool { + t!(fs::read_dir(src)).map(|e| t!(e)).all(|e| { + let meta = t!(e.metadata()); + if meta.is_dir() { + dir_up_to_date(&e.path(), threshold) + } else { + FileTime::from_last_modification_time(&meta) < *threshold + } + }) +} + +/// Returns the environment variable which the dynamic library lookup path +/// resides in for this platform. +pub fn dylib_path_var() -> &'static str { + if cfg!(target_os = "windows") { + "PATH" + } else if cfg!(target_os = "macos") { + "DYLD_LIBRARY_PATH" + } else { + "LD_LIBRARY_PATH" + } +} + +/// Parses the `dylib_path_var()` environment variable, returning a list of +/// paths that are members of this lookup path. +pub fn dylib_path() -> Vec { + env::split_paths(&env::var_os(dylib_path_var()).unwrap_or(OsString::new())) + .collect() +} + +/// `push` all components to `buf`. On windows, append `.exe` to the last component. +pub fn push_exe_path(mut buf: PathBuf, components: &[&str]) -> PathBuf { + let (&file, components) = components.split_last().expect("at least one component required"); + let mut file = file.to_owned(); + + if cfg!(windows) { + file.push_str(".exe"); + } + + for c in components { + buf.push(c); + } + + buf.push(file); + + buf +} diff --git a/src/build_helper/Cargo.toml b/src/build_helper/Cargo.toml new file mode 100644 index 0000000000000..01d704f816bbc --- /dev/null +++ b/src/build_helper/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "build_helper" +version = "0.1.0" +authors = ["The Rust Project Developers"] + +[lib] +name = "build_helper" +path = "lib.rs" diff --git a/src/build_helper/lib.rs b/src/build_helper/lib.rs new file mode 100644 index 0000000000000..38844fb6c9ef0 --- /dev/null +++ b/src/build_helper/lib.rs @@ -0,0 +1,80 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![deny(warnings)] + +use std::process::{Command, Stdio}; +use std::path::{Path, PathBuf}; + +pub fn run(cmd: &mut Command) { + println!("running: {:?}", cmd); + run_silent(cmd); +} + +pub fn run_silent(cmd: &mut Command) { + let status = match cmd.status() { + Ok(status) => status, + Err(e) => fail(&format!("failed to execute command: {}", e)), + }; + if !status.success() { + fail(&format!("command did not execute successfully: {:?}\n\ + expected success, got: {}", + cmd, + status)); + } +} + +pub fn gnu_target(target: &str) -> String { + match target { + "i686-pc-windows-msvc" => "i686-pc-win32".to_string(), + "x86_64-pc-windows-msvc" => "x86_64-pc-win32".to_string(), + "i686-pc-windows-gnu" => "i686-w64-mingw32".to_string(), + "x86_64-pc-windows-gnu" => "x86_64-w64-mingw32".to_string(), + s => s.to_string(), + } +} + +pub fn cc2ar(cc: &Path, target: &str) -> Option { + if target.contains("msvc") { + None + } else if target.contains("musl") { + Some(PathBuf::from("ar")) + } else { + let parent = cc.parent().unwrap(); + let file = cc.file_name().unwrap().to_str().unwrap(); + for suffix in &["gcc", "cc", "clang"] { + if let Some(idx) = file.rfind(suffix) { + let mut file = file[..idx].to_owned(); + file.push_str("ar"); + return Some(parent.join(&file)); + } + } + Some(parent.join(file)) + } +} + +pub fn output(cmd: &mut Command) -> String { + let output = match cmd.stderr(Stdio::inherit()).output() { + Ok(status) => status, + Err(e) => fail(&format!("failed to execute command: {}", e)), + }; + if !output.status.success() { + panic!("command did not execute successfully: {:?}\n\ + expected success, got: {}", + cmd, + output.status); + } + String::from_utf8(output.stdout).unwrap() +} + +fn fail(s: &str) -> ! { + println!("\n\n{}\n\n", s); + std::process::exit(1); +} diff --git a/src/ci/docker/arm-android/Dockerfile b/src/ci/docker/arm-android/Dockerfile new file mode 100644 index 0000000000000..c5b70c227c408 --- /dev/null +++ b/src/ci/docker/arm-android/Dockerfile @@ -0,0 +1,46 @@ +FROM ubuntu:16.04 + +RUN dpkg --add-architecture i386 && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + g++ \ + make \ + file \ + curl \ + ca-certificates \ + python2.7 \ + python-minimal \ + git \ + cmake \ + ccache \ + unzip \ + expect \ + openjdk-9-jre \ + sudo \ + libstdc++6:i386 + +WORKDIR /android/ +ENV PATH=$PATH:/android/ndk-arm-9/bin:/android/sdk/tools:/android/sdk/platform-tools + +COPY install-ndk.sh install-sdk.sh accept-licenses.sh /android/ +RUN sh /android/install-ndk.sh +RUN sh /android/install-sdk.sh + +COPY start-emulator.sh /android/ +ENTRYPOINT ["/android/start-emulator.sh"] + +ENV TARGETS=arm-linux-androideabi +ENV TARGETS=$TARGETS,i686-linux-android +ENV TARGETS=$TARGETS,aarch64-linux-android +ENV TARGETS=$TARGETS,armv7-linux-androideabi + +ENV RUST_CONFIGURE_ARGS \ + --target=$TARGETS \ + --arm-linux-androideabi-ndk=/android/ndk-arm-9 \ + --armv7-linux-androideabi-ndk=/android/ndk-arm-9 \ + --i686-linux-android-ndk=/android/ndk-x86-9 \ + --aarch64-linux-android-ndk=/android/ndk-aarch64 \ + --enable-rustbuild +ENV RUST_CHECK_TARGET check-stage2-android +RUN mkdir /tmp/obj +RUN chmod 777 /tmp/obj diff --git a/src/ci/docker/arm-android/accept-licenses.sh b/src/ci/docker/arm-android/accept-licenses.sh new file mode 100755 index 0000000000000..8d8f60a5ec260 --- /dev/null +++ b/src/ci/docker/arm-android/accept-licenses.sh @@ -0,0 +1,15 @@ +#!/usr/bin/expect -f +# ignore-license + +set timeout 1800 +set cmd [lindex $argv 0] +set licenses [lindex $argv 1] + +spawn {*}$cmd +expect { + "Do you accept the license '*'*" { + exp_send "y\r" + exp_continue + } + eof +} diff --git a/src/ci/docker/arm-android/install-ndk.sh b/src/ci/docker/arm-android/install-ndk.sh new file mode 100644 index 0000000000000..418ce69c5b1e5 --- /dev/null +++ b/src/ci/docker/arm-android/install-ndk.sh @@ -0,0 +1,45 @@ +#!/bin/sh +# Copyright 2016 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +set -ex + +cpgdb() { + cp android-ndk-r11c/prebuilt/linux-x86_64/bin/gdb /android/$1/bin/$2-gdb + cp android-ndk-r11c/prebuilt/linux-x86_64/bin/gdb-orig /android/$1/bin/gdb-orig + cp -r android-ndk-r11c/prebuilt/linux-x86_64/share /android/$1/share +} + +# Prep the Android NDK +# +# See https://github.com/servo/servo/wiki/Building-for-Android +curl -O https://dl.google.com/android/repository/android-ndk-r11c-linux-x86_64.zip +unzip -q android-ndk-r11c-linux-x86_64.zip +bash android-ndk-r11c/build/tools/make-standalone-toolchain.sh \ + --platform=android-9 \ + --toolchain=arm-linux-androideabi-4.9 \ + --install-dir=/android/ndk-arm-9 \ + --ndk-dir=/android/android-ndk-r11c \ + --arch=arm +cpgdb ndk-arm-9 arm-linux-androideabi +bash android-ndk-r11c/build/tools/make-standalone-toolchain.sh \ + --platform=android-21 \ + --toolchain=aarch64-linux-android-4.9 \ + --install-dir=/android/ndk-aarch64 \ + --ndk-dir=/android/android-ndk-r11c \ + --arch=arm64 +bash android-ndk-r11c/build/tools/make-standalone-toolchain.sh \ + --platform=android-9 \ + --toolchain=x86-4.9 \ + --install-dir=/android/ndk-x86-9 \ + --ndk-dir=/android/android-ndk-r11c \ + --arch=x86 + +rm -rf ./android-ndk-r11c-linux-x86_64.zip ./android-ndk-r11c diff --git a/src/ci/docker/arm-android/install-sdk.sh b/src/ci/docker/arm-android/install-sdk.sh new file mode 100644 index 0000000000000..2db1d46ba2273 --- /dev/null +++ b/src/ci/docker/arm-android/install-sdk.sh @@ -0,0 +1,33 @@ +#!/bin/sh +# Copyright 2016 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +set -ex + +# Prep the SDK and emulator +# +# Note that the update process requires that we accept a bunch of licenses, and +# we can't just pipe `yes` into it for some reason, so we take the same strategy +# located in https://github.com/appunite/docker by just wrapping it in a script +# which apparently magically accepts the licenses. + +mkdir sdk +curl https://dl.google.com/android/android-sdk_r24.4-linux.tgz | \ + tar xzf - -C sdk --strip-components=1 + +filter="platform-tools,android-18" +filter="$filter,sys-img-armeabi-v7a-android-18" + +./accept-licenses.sh "android - update sdk -a --no-ui --filter $filter" + +echo "no" | android create avd \ + --name arm-18 \ + --target android-18 \ + --abi armeabi-v7a diff --git a/src/ci/docker/arm-android/start-emulator.sh b/src/ci/docker/arm-android/start-emulator.sh new file mode 100755 index 0000000000000..93f20b28b8689 --- /dev/null +++ b/src/ci/docker/arm-android/start-emulator.sh @@ -0,0 +1,15 @@ +#!/bin/sh +# Copyright 2016 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +set -ex +ANDROID_EMULATOR_FORCE_32BIT=true \ + emulator @arm-18 -no-window -partition-size 2047 & +exec "$@" diff --git a/src/ci/docker/cross/Dockerfile b/src/ci/docker/cross/Dockerfile new file mode 100644 index 0000000000000..d8af878a95863 --- /dev/null +++ b/src/ci/docker/cross/Dockerfile @@ -0,0 +1,66 @@ +FROM ubuntu:16.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++ \ + make \ + file \ + curl \ + ca-certificates \ + python2.7 \ + python-minimal \ + git \ + cmake \ + ccache \ + sudo \ + gcc-aarch64-linux-gnu libc6-dev-arm64-cross \ + gcc-arm-linux-gnueabi libc6-dev-armel-cross \ + gcc-arm-linux-gnueabihf libc6-dev-armhf-cross \ + gcc-mips-linux-gnu libc6-dev-mips-cross \ + gcc-mipsel-linux-gnu libc6-dev-mipsel-cross \ + gcc-mips64-linux-gnuabi64 libc6-dev-mips64-cross \ + gcc-mips64el-linux-gnuabi64 libc6-dev-mips64el-cross \ + gcc-powerpc-linux-gnu libc6-dev-powerpc-cross \ + gcc-powerpc64-linux-gnu libc6-dev-ppc64-cross \ + gcc-powerpc64le-linux-gnu libc6-dev-ppc64el-cross \ + gcc-s390x-linux-gnu libc6-dev-s390x-cross + +ENV TARGETS=aarch64-unknown-linux-gnu +ENV TARGETS=$TARGETS,arm-unknown-linux-gnueabi +ENV TARGETS=$TARGETS,arm-unknown-linux-gnueabihf +ENV TARGETS=$TARGETS,armv7-unknown-linux-gnueabihf +ENV TARGETS=$TARGETS,asmjs-unknown-emscripten +ENV TARGETS=$TARGETS,mips-unknown-linux-gnu +ENV TARGETS=$TARGETS,mips64-unknown-linux-gnuabi64 +ENV TARGETS=$TARGETS,mips64el-unknown-linux-gnuabi64 +ENV TARGETS=$TARGETS,mipsel-unknown-linux-gnu +ENV TARGETS=$TARGETS,powerpc-unknown-linux-gnu +ENV TARGETS=$TARGETS,powerpc64-unknown-linux-gnu +ENV TARGETS=$TARGETS,powerpc64le-unknown-linux-gnu +ENV TARGETS=$TARGETS,s390x-unknown-linux-gnu +ENV TARGETS=$TARGETS,wasm32-unknown-emscripten + +#ENV TARGETS=$TARGETS,mips-unknown-linux-musl +#ENV TARGETS=$TARGETS,arm-unknown-linux-musleabi +#ENV TARGETS=$TARGETS,arm-unknown-linux-musleabihf +#ENV TARGETS=$TARGETS,armv7-unknown-linux-musleabihf +#ENV TARGETS=$TARGETS,x86_64-rumprun-netbsd + +ENV RUST_CONFIGURE_ARGS \ + --target=$TARGETS \ + --enable-rustbuild +ENV RUST_CHECK_TARGET "" + +ENV AR_s390x_unknown_linux_gnu=s390x-linux-gnu-ar \ + CC_s390x_unknown_linux_gnu=s390x-linux-gnu-gcc \ + AR_mips64_unknown_linux_gnuabi64=mips64-linux-gnuabi64-ar \ + CC_mips64_unknown_linux_gnuabi64=mips64-linux-gnuabi64-gcc \ + AR_mips64el_unknown_linux_gnuabi64=mips64el-linux-gnuabi64-ar \ + CC_mips64el_unknown_linux_gnuabi64=mips64el-linux-gnuabi64-gcc \ + AR_powerpc64_unknown_linux_gnu=powerpc64-linux-gnu-ar \ + CC_powerpc64_unknown_linux_gnu=powerpc64-linux-gnu-gcc + +# FIXME(rust-lang/rust#36150): powerpc unfortunately aborts right now +ENV NO_LLVM_ASSERTIONS=1 + +RUN mkdir /tmp/obj +RUN chmod 777 /tmp/obj diff --git a/src/ci/docker/i686-gnu-nopt/Dockerfile b/src/ci/docker/i686-gnu-nopt/Dockerfile new file mode 100644 index 0000000000000..a9ef29daaf1a1 --- /dev/null +++ b/src/ci/docker/i686-gnu-nopt/Dockerfile @@ -0,0 +1,19 @@ +FROM ubuntu:16.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++-multilib \ + make \ + file \ + curl \ + ca-certificates \ + python2.7 \ + git \ + cmake \ + ccache \ + sudo \ + gdb + +ENV RUST_CONFIGURE_ARGS --build=i686-unknown-linux-gnu --disable-optimize-tests +ENV RUST_CHECK_TARGET check +RUN mkdir /tmp/obj +RUN chmod 777 /tmp/obj diff --git a/src/ci/docker/i686-gnu/Dockerfile b/src/ci/docker/i686-gnu/Dockerfile new file mode 100644 index 0000000000000..d0ddde95b4473 --- /dev/null +++ b/src/ci/docker/i686-gnu/Dockerfile @@ -0,0 +1,19 @@ +FROM ubuntu:16.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++-multilib \ + make \ + file \ + curl \ + ca-certificates \ + python2.7 \ + git \ + cmake \ + ccache \ + sudo \ + gdb + +ENV RUST_CONFIGURE_ARGS --build=i686-unknown-linux-gnu +ENV RUST_CHECK_TARGET check +RUN mkdir /tmp/obj +RUN chmod 777 /tmp/obj diff --git a/src/ci/docker/run.sh b/src/ci/docker/run.sh new file mode 100755 index 0000000000000..c5b1d00fb7cc1 --- /dev/null +++ b/src/ci/docker/run.sh @@ -0,0 +1,42 @@ +#!/bin/sh +# Copyright 2016 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +set -e + +script=`cd $(dirname $0) && pwd`/`basename $0` +image=$1 + +docker_dir="`dirname $script`" +ci_dir="`dirname $docker_dir`" +src_dir="`dirname $ci_dir`" +root_dir="`dirname $src_dir`" + +docker build \ + --rm \ + -t rust-ci \ + "`dirname "$script"`/$image" + +mkdir -p $HOME/.ccache +mkdir -p $HOME/.cargo + +exec docker run \ + --volume "$root_dir:/checkout:ro" \ + --workdir /tmp/obj \ + --env SRC=/checkout \ + --env CCACHE_DIR=/ccache \ + --volume "$HOME/.ccache:/ccache" \ + --env CARGO_HOME=/cargo \ + --env LOCAL_USER_ID=`id -u` \ + --volume "$HOME/.cargo:/cargo" \ + --interactive \ + --tty \ + rust-ci \ + /checkout/src/ci/run.sh diff --git a/src/ci/docker/x86_64-freebsd/Dockerfile b/src/ci/docker/x86_64-freebsd/Dockerfile new file mode 100644 index 0000000000000..dc16c39961c45 --- /dev/null +++ b/src/ci/docker/x86_64-freebsd/Dockerfile @@ -0,0 +1,29 @@ +FROM ubuntu:16.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++ \ + make \ + file \ + curl \ + ca-certificates \ + python2.7 \ + python-minimal \ + git \ + cmake \ + ccache \ + sudo \ + bzip2 \ + xz-utils \ + wget + +COPY build-toolchain.sh /tmp/ +RUN sh /tmp/build-toolchain.sh + +ENV \ + AR_x86_64_unknown_freebsd=x86_64-unknown-freebsd10-ar \ + CC_x86_64_unknown_freebsd=x86_64-unknown-freebsd10-gcc + +ENV RUST_CONFIGURE_ARGS --target=x86_64-unknown-freebsd --enable-rustbuild +ENV RUST_CHECK_TARGET "" +RUN mkdir /tmp/obj +RUN chmod 777 /tmp/obj diff --git a/src/ci/docker/x86_64-freebsd/build-toolchain.sh b/src/ci/docker/x86_64-freebsd/build-toolchain.sh new file mode 100644 index 0000000000000..d4bc886d50ea4 --- /dev/null +++ b/src/ci/docker/x86_64-freebsd/build-toolchain.sh @@ -0,0 +1,96 @@ +#!/bin/bash +# Copyright 2016 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +set -ex + +ARCH=x86_64 +BINUTILS=2.25.1 +GCC=5.3.0 + +mkdir binutils +cd binutils + +# First up, build binutils +curl https://ftp.gnu.org/gnu/binutils/binutils-$BINUTILS.tar.bz2 | tar xjf - +mkdir binutils-build +cd binutils-build +../binutils-$BINUTILS/configure \ + --target=$ARCH-unknown-freebsd10 +make -j10 +make install +cd ../.. +rm -rf binutils + +# Next, download the FreeBSD libc and relevant header files + +mkdir freebsd +case "$ARCH" in + x86_64) + URL=ftp://ftp.freebsd.org/pub/FreeBSD/releases/amd64/10.2-RELEASE/base.txz + ;; + i686) + URL=ftp://ftp.freebsd.org/pub/FreeBSD/releases/i386/10.2-RELEASE/base.txz + ;; +esac +curl $URL | tar xJf - -C freebsd ./usr/include ./usr/lib ./lib + +dst=/usr/local/$ARCH-unknown-freebsd10 + +cp -r freebsd/usr/include $dst/ +cp freebsd/usr/lib/crt1.o $dst/lib +cp freebsd/usr/lib/Scrt1.o $dst/lib +cp freebsd/usr/lib/crti.o $dst/lib +cp freebsd/usr/lib/crtn.o $dst/lib +cp freebsd/usr/lib/libc.a $dst/lib +cp freebsd/usr/lib/libutil.a $dst/lib +cp freebsd/usr/lib/libutil_p.a $dst/lib +cp freebsd/usr/lib/libm.a $dst/lib +cp freebsd/usr/lib/librt.so.1 $dst/lib +cp freebsd/usr/lib/libexecinfo.so.1 $dst/lib +cp freebsd/lib/libc.so.7 $dst/lib +cp freebsd/lib/libm.so.5 $dst/lib +cp freebsd/lib/libutil.so.9 $dst/lib +cp freebsd/lib/libthr.so.3 $dst/lib/libpthread.so + +ln -s libc.so.7 $dst/lib/libc.so +ln -s libm.so.5 $dst/lib/libm.so +ln -s librt.so.1 $dst/lib/librt.so +ln -s libutil.so.9 $dst/lib/libutil.so +ln -s libexecinfo.so.1 $dst/lib/libexecinfo.so +rm -rf freebsd + +# Finally, download and build gcc to target FreeBSD +mkdir gcc +cd gcc +curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.bz2 | tar xjf - +cd gcc-$GCC +./contrib/download_prerequisites + +mkdir ../gcc-build +cd ../gcc-build +../gcc-$GCC/configure \ + --enable-languages=c \ + --target=$ARCH-unknown-freebsd10 \ + --disable-multilib \ + --disable-nls \ + --disable-libgomp \ + --disable-libquadmath \ + --disable-libssp \ + --disable-libvtv \ + --disable-libcilkrts \ + --disable-libada \ + --disable-libsanitizer \ + --disable-libquadmath-support \ + --disable-lto +make -j10 +make install +cd ../.. +rm -rf gcc diff --git a/src/ci/docker/x86_64-gnu-cargotest/Dockerfile b/src/ci/docker/x86_64-gnu-cargotest/Dockerfile new file mode 100644 index 0000000000000..1db01f2b48d46 --- /dev/null +++ b/src/ci/docker/x86_64-gnu-cargotest/Dockerfile @@ -0,0 +1,20 @@ +FROM ubuntu:16.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++ \ + make \ + file \ + curl \ + ca-certificates \ + python2.7 \ + python-minimal \ + git \ + cmake \ + ccache \ + libssl-dev \ + sudo + +ENV RUST_CONFIGURE_ARGS --build=x86_64-unknown-linux-gnu --enable-rustbuild +ENV RUST_CHECK_TARGET check-cargotest +RUN mkdir /tmp/obj +RUN chmod 777 /tmp/obj diff --git a/src/ci/docker/x86_64-gnu-debug/Dockerfile b/src/ci/docker/x86_64-gnu-debug/Dockerfile new file mode 100644 index 0000000000000..9e98215775e51 --- /dev/null +++ b/src/ci/docker/x86_64-gnu-debug/Dockerfile @@ -0,0 +1,22 @@ +FROM ubuntu:16.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++ \ + make \ + file \ + curl \ + ca-certificates \ + python2.7 \ + git \ + cmake \ + ccache \ + sudo \ + gdb + +ENV RUST_CONFIGURE_ARGS \ + --build=x86_64-unknown-linux-gnu \ + --enable-debug \ + --enable-optimize +ENV RUST_CHECK_TARGET "" +RUN mkdir /tmp/obj +RUN chmod 777 /tmp/obj diff --git a/src/ci/docker/x86_64-gnu-llvm-3.7/Dockerfile b/src/ci/docker/x86_64-gnu-llvm-3.7/Dockerfile new file mode 100644 index 0000000000000..ca06940ae5e2d --- /dev/null +++ b/src/ci/docker/x86_64-gnu-llvm-3.7/Dockerfile @@ -0,0 +1,26 @@ +FROM ubuntu:16.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++ \ + make \ + file \ + curl \ + ca-certificates \ + python2.7 \ + python2.7-minimal \ + git \ + cmake \ + ccache \ + sudo \ + gdb \ + llvm-3.7-tools \ + libedit-dev \ + zlib1g-dev + +ENV RUST_CONFIGURE_ARGS \ + --build=x86_64-unknown-linux-gnu \ + --enable-rustbuild \ + --llvm-root=/usr/lib/llvm-3.7 +ENV RUST_CHECK_TARGET check +RUN mkdir /tmp/obj +RUN chmod 777 /tmp/obj diff --git a/src/ci/docker/x86_64-gnu-nopt/Dockerfile b/src/ci/docker/x86_64-gnu-nopt/Dockerfile new file mode 100644 index 0000000000000..73a3e2c726cee --- /dev/null +++ b/src/ci/docker/x86_64-gnu-nopt/Dockerfile @@ -0,0 +1,19 @@ +FROM ubuntu:16.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++ \ + make \ + file \ + curl \ + ca-certificates \ + python2.7 \ + git \ + cmake \ + ccache \ + sudo \ + gdb + +ENV RUST_CONFIGURE_ARGS --build=x86_64-unknown-linux-gnu --disable-optimize-tests +ENV RUST_CHECK_TARGET check +RUN mkdir /tmp/obj +RUN chmod 777 /tmp/obj diff --git a/src/ci/docker/x86_64-gnu-rustbuild/Dockerfile b/src/ci/docker/x86_64-gnu-rustbuild/Dockerfile new file mode 100644 index 0000000000000..d4d0492e2a260 --- /dev/null +++ b/src/ci/docker/x86_64-gnu-rustbuild/Dockerfile @@ -0,0 +1,20 @@ +FROM ubuntu:16.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++ \ + make \ + file \ + curl \ + ca-certificates \ + python2.7 \ + python-minimal \ + git \ + cmake \ + ccache \ + sudo \ + gdb + +ENV RUST_CONFIGURE_ARGS --build=x86_64-unknown-linux-gnu --enable-rustbuild +ENV RUST_CHECK_TARGET check +RUN mkdir /tmp/obj +RUN chmod 777 /tmp/obj diff --git a/src/ci/docker/x86_64-gnu/Dockerfile b/src/ci/docker/x86_64-gnu/Dockerfile new file mode 100644 index 0000000000000..f125693e7ae1a --- /dev/null +++ b/src/ci/docker/x86_64-gnu/Dockerfile @@ -0,0 +1,19 @@ +FROM ubuntu:16.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++ \ + make \ + file \ + curl \ + ca-certificates \ + python2.7 \ + git \ + cmake \ + ccache \ + sudo \ + gdb + +ENV RUST_CONFIGURE_ARGS --build=x86_64-unknown-linux-gnu +ENV RUST_CHECK_TARGET check +RUN mkdir /tmp/obj +RUN chmod 777 /tmp/obj diff --git a/src/ci/docker/x86_64-musl/Dockerfile b/src/ci/docker/x86_64-musl/Dockerfile new file mode 100644 index 0000000000000..1afaef2e05678 --- /dev/null +++ b/src/ci/docker/x86_64-musl/Dockerfile @@ -0,0 +1,27 @@ +FROM ubuntu:16.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++ \ + make \ + file \ + curl \ + ca-certificates \ + python2.7 \ + git \ + cmake \ + ccache \ + xz-utils \ + sudo \ + gdb + +WORKDIR /build/ +COPY build-musl.sh /build/ +RUN sh /build/build-musl.sh && rm -rf /build + +ENV RUST_CONFIGURE_ARGS \ + --target=x86_64-unknown-linux-musl \ + --musl-root=/musl-x86_64 +ENV RUST_CHECK_TARGET check-stage2-T-x86_64-unknown-linux-musl-H-x86_64-unknown-linux-gnu + +RUN mkdir /tmp/obj +RUN chmod 777 /tmp/obj diff --git a/src/ci/docker/x86_64-musl/build-musl.sh b/src/ci/docker/x86_64-musl/build-musl.sh new file mode 100644 index 0000000000000..2bfbd646b75c8 --- /dev/null +++ b/src/ci/docker/x86_64-musl/build-musl.sh @@ -0,0 +1,33 @@ +#!/bin/sh +# Copyright 2016 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +set -ex + +export CFLAGS="-fPIC" +MUSL=musl-1.1.14 +curl https://www.musl-libc.org/releases/$MUSL.tar.gz | tar xzf - +cd $MUSL +./configure --prefix=/musl-x86_64 --disable-shared +make -j10 +make install +make clean +cd .. + +# To build MUSL we're going to need a libunwind lying around, so acquire that +# here and build it. +curl -L https://github.com/llvm-mirror/llvm/archive/release_37.tar.gz | tar xzf - +curl -L https://github.com/llvm-mirror/libunwind/archive/release_37.tar.gz | tar xzf - +mkdir libunwind-build +cd libunwind-build +cmake ../libunwind-release_37 -DLLVM_PATH=/build/llvm-release_37 \ + -DLIBUNWIND_ENABLE_SHARED=0 +make -j10 +cp lib/libunwind.a /musl-x86_64/lib diff --git a/src/ci/run.sh b/src/ci/run.sh new file mode 100755 index 0000000000000..da238dddecacb --- /dev/null +++ b/src/ci/run.sh @@ -0,0 +1,44 @@ +#!/bin/sh +# Copyright 2016 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +set -e + +if [ "$LOCAL_USER_ID" != "" ]; then + useradd --shell /bin/bash -u $LOCAL_USER_ID -o -c "" -m user + export HOME=/home/user + export LOCAL_USER_ID= + exec sudo -E -u user env PATH=$PATH "$0" +fi + +if [ "$NO_LLVM_ASSERTIONS" = "" ]; then + LLVM_ASSERTIONS=--enable-llvm-assertions +fi + +set -ex + +$SRC/configure \ + --disable-manage-submodules \ + --enable-debug-assertions \ + --enable-quiet-tests \ + --enable-ccache \ + --enable-vendor \ + $LLVM_ASSERTIONS \ + $RUST_CONFIGURE_ARGS + +if [ "$TRAVIS_OS_NAME" = "osx" ]; then + ncpus=$(sysctl -n hw.ncpu) +else + ncpus=$(nproc) +fi + +make -j $ncpus tidy +make -j $ncpus +exec make $RUST_CHECK_TARGET -j $ncpus diff --git a/src/compiler-rt b/src/compiler-rt index b6087e82ba138..a8fc4c169fac4 160000 --- a/src/compiler-rt +++ b/src/compiler-rt @@ -1 +1 @@ -Subproject commit b6087e82ba1384c4af3adf2dc68e92316f0d4caf +Subproject commit a8fc4c169fac43a5dc204d4fd56ddb1739f8c178 diff --git a/src/compiletest/common.rs b/src/compiletest/common.rs deleted file mode 100644 index eb6c29eefbe78..0000000000000 --- a/src/compiletest/common.rs +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -pub use self::Mode::*; - -use std::fmt; -use std::str::FromStr; -use std::path::PathBuf; - -#[derive(Clone, Copy, PartialEq, Debug)] -pub enum Mode { - CompileFail, - ParseFail, - RunFail, - RunPass, - RunPassValgrind, - Pretty, - DebugInfoGdb, - DebugInfoLldb, - Codegen, - Rustdoc, -} - -impl FromStr for Mode { - type Err = (); - fn from_str(s: &str) -> Result { - match s { - "compile-fail" => Ok(CompileFail), - "parse-fail" => Ok(ParseFail), - "run-fail" => Ok(RunFail), - "run-pass" => Ok(RunPass), - "run-pass-valgrind" => Ok(RunPassValgrind), - "pretty" => Ok(Pretty), - "debuginfo-lldb" => Ok(DebugInfoLldb), - "debuginfo-gdb" => Ok(DebugInfoGdb), - "codegen" => Ok(Codegen), - "rustdoc" => Ok(Rustdoc), - _ => Err(()), - } - } -} - -impl fmt::Display for Mode { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(match *self { - CompileFail => "compile-fail", - ParseFail => "parse-fail", - RunFail => "run-fail", - RunPass => "run-pass", - RunPassValgrind => "run-pass-valgrind", - Pretty => "pretty", - DebugInfoGdb => "debuginfo-gdb", - DebugInfoLldb => "debuginfo-lldb", - Codegen => "codegen", - Rustdoc => "rustdoc", - }, f) - } -} - -#[derive(Clone)] -pub struct Config { - // The library paths required for running the compiler - pub compile_lib_path: String, - - // The library paths required for running compiled programs - pub run_lib_path: String, - - // The rustc executable - pub rustc_path: PathBuf, - - // The rustdoc executable - pub rustdoc_path: PathBuf, - - // The python executable - pub python: String, - - // The llvm binaries path - pub llvm_bin_path: Option, - - // The valgrind path - pub valgrind_path: Option, - - // Whether to fail if we can't run run-pass-valgrind tests under valgrind - // (or, alternatively, to silently run them like regular run-pass tests). - pub force_valgrind: bool, - - // The directory containing the tests to run - pub src_base: PathBuf, - - // The directory where programs should be built - pub build_base: PathBuf, - - // Directory for auxiliary libraries - pub aux_base: PathBuf, - - // The name of the stage being built (stage1, etc) - pub stage_id: String, - - // The test mode, compile-fail, run-fail, run-pass - pub mode: Mode, - - // Run ignored tests - pub run_ignored: bool, - - // Only run tests that match this filter - pub filter: Option, - - // Write out a parseable log of tests that were run - pub logfile: Option, - - // A command line to prefix program execution with, - // for running under valgrind - pub runtool: Option, - - // Flags to pass to the compiler when building for the host - pub host_rustcflags: Option, - - // Flags to pass to the compiler when building for the target - pub target_rustcflags: Option, - - // Target system to be tested - pub target: String, - - // Host triple for the compiler being invoked - pub host: String, - - // Version of GDB - pub gdb_version: Option, - - // Version of LLDB - pub lldb_version: Option, - - // Path to the android tools - pub android_cross_path: PathBuf, - - // Extra parameter to run adb on arm-linux-androideabi - pub adb_path: String, - - // Extra parameter to run test suite on arm-linux-androideabi - pub adb_test_dir: String, - - // status whether android device available or not - pub adb_device_status: bool, - - // the path containing LLDB's Python module - pub lldb_python_dir: Option, - - // Explain what's going on - pub verbose: bool -} diff --git a/src/compiletest/compiletest.rs b/src/compiletest/compiletest.rs deleted file mode 100644 index da8331fc06283..0000000000000 --- a/src/compiletest/compiletest.rs +++ /dev/null @@ -1,409 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![crate_type = "bin"] - -#![feature(box_syntax)] -#![feature(dynamic_lib)] -#![feature(libc)] -#![feature(rustc_private)] -#![feature(str_char)] -#![feature(test)] - -#![deny(warnings)] - -extern crate libc; -extern crate test; -extern crate getopts; - -#[macro_use] -extern crate log; - -use std::env; -use std::fs; -use std::path::{Path, PathBuf}; -use getopts::{optopt, optflag, reqopt}; -use common::Config; -use common::{Pretty, DebugInfoGdb, DebugInfoLldb}; -use util::logv; - -pub mod procsrv; -pub mod util; -pub mod header; -pub mod runtest; -pub mod common; -pub mod errors; -mod raise_fd_limit; - -pub fn main() { - let config = parse_config(env::args().collect()); - - if config.valgrind_path.is_none() && config.force_valgrind { - panic!("Can't find Valgrind to run Valgrind tests"); - } - - log_config(&config); - run_tests(&config); -} - -pub fn parse_config(args: Vec ) -> Config { - - let groups : Vec = - vec!(reqopt("", "compile-lib-path", "path to host shared libraries", "PATH"), - reqopt("", "run-lib-path", "path to target shared libraries", "PATH"), - reqopt("", "rustc-path", "path to rustc to use for compiling", "PATH"), - reqopt("", "rustdoc-path", "path to rustdoc to use for compiling", "PATH"), - reqopt("", "python", "path to python to use for doc tests", "PATH"), - optopt("", "valgrind-path", "path to Valgrind executable for Valgrind tests", "PROGRAM"), - optflag("", "force-valgrind", "fail if Valgrind tests cannot be run under Valgrind"), - optopt("", "llvm-bin-path", "path to directory holding llvm binaries", "DIR"), - reqopt("", "src-base", "directory to scan for test files", "PATH"), - reqopt("", "build-base", "directory to deposit test outputs", "PATH"), - reqopt("", "aux-base", "directory to find auxiliary test files", "PATH"), - reqopt("", "stage-id", "the target-stage identifier", "stageN-TARGET"), - reqopt("", "mode", "which sort of compile tests to run", - "(compile-fail|parse-fail|run-fail|run-pass|run-pass-valgrind|pretty|debug-info)"), - optflag("", "ignored", "run tests marked as ignored"), - optopt("", "runtool", "supervisor program to run tests under \ - (eg. emulator, valgrind)", "PROGRAM"), - optopt("", "host-rustcflags", "flags to pass to rustc for host", "FLAGS"), - optopt("", "target-rustcflags", "flags to pass to rustc for target", "FLAGS"), - optflag("", "verbose", "run tests verbosely, showing all output"), - optopt("", "logfile", "file to log test execution to", "FILE"), - optopt("", "target", "the target to build for", "TARGET"), - optopt("", "host", "the host to build for", "HOST"), - optopt("", "gdb-version", "the version of GDB used", "VERSION STRING"), - optopt("", "lldb-version", "the version of LLDB used", "VERSION STRING"), - optopt("", "android-cross-path", "Android NDK standalone path", "PATH"), - optopt("", "adb-path", "path to the android debugger", "PATH"), - optopt("", "adb-test-dir", "path to tests for the android debugger", "PATH"), - optopt("", "lldb-python-dir", "directory containing LLDB's python module", "PATH"), - optflag("h", "help", "show this message")); - - let (argv0, args_) = args.split_first().unwrap(); - if args.len() == 1 || args[1] == "-h" || args[1] == "--help" { - let message = format!("Usage: {} [OPTIONS] [TESTNAME...]", argv0); - println!("{}", getopts::usage(&message, &groups)); - println!(""); - panic!() - } - - let matches = - &match getopts::getopts(args_, &groups) { - Ok(m) => m, - Err(f) => panic!("{:?}", f) - }; - - if matches.opt_present("h") || matches.opt_present("help") { - let message = format!("Usage: {} [OPTIONS] [TESTNAME...]", argv0); - println!("{}", getopts::usage(&message, &groups)); - println!(""); - panic!() - } - - fn opt_path(m: &getopts::Matches, nm: &str) -> PathBuf { - match m.opt_str(nm) { - Some(s) => PathBuf::from(&s), - None => panic!("no option (=path) found for {}", nm), - } - } - - let filter = if !matches.free.is_empty() { - Some(matches.free[0].clone()) - } else { - None - }; - - Config { - compile_lib_path: matches.opt_str("compile-lib-path").unwrap(), - run_lib_path: matches.opt_str("run-lib-path").unwrap(), - rustc_path: opt_path(matches, "rustc-path"), - rustdoc_path: opt_path(matches, "rustdoc-path"), - python: matches.opt_str("python").unwrap(), - valgrind_path: matches.opt_str("valgrind-path"), - force_valgrind: matches.opt_present("force-valgrind"), - llvm_bin_path: matches.opt_str("llvm-bin-path").map(|s| PathBuf::from(&s)), - src_base: opt_path(matches, "src-base"), - build_base: opt_path(matches, "build-base"), - aux_base: opt_path(matches, "aux-base"), - stage_id: matches.opt_str("stage-id").unwrap(), - mode: matches.opt_str("mode").unwrap().parse().ok().expect("invalid mode"), - run_ignored: matches.opt_present("ignored"), - filter: filter, - logfile: matches.opt_str("logfile").map(|s| PathBuf::from(&s)), - runtool: matches.opt_str("runtool"), - host_rustcflags: matches.opt_str("host-rustcflags"), - target_rustcflags: matches.opt_str("target-rustcflags"), - target: opt_str2(matches.opt_str("target")), - host: opt_str2(matches.opt_str("host")), - gdb_version: extract_gdb_version(matches.opt_str("gdb-version")), - lldb_version: extract_lldb_version(matches.opt_str("lldb-version")), - android_cross_path: opt_path(matches, "android-cross-path"), - adb_path: opt_str2(matches.opt_str("adb-path")), - adb_test_dir: format!("{}/{}", - opt_str2(matches.opt_str("adb-test-dir")), - opt_str2(matches.opt_str("target"))), - adb_device_status: - opt_str2(matches.opt_str("target")).contains("android") && - "(none)" != opt_str2(matches.opt_str("adb-test-dir")) && - !opt_str2(matches.opt_str("adb-test-dir")).is_empty(), - lldb_python_dir: matches.opt_str("lldb-python-dir"), - verbose: matches.opt_present("verbose"), - } -} - -pub fn log_config(config: &Config) { - let c = config; - logv(c, format!("configuration:")); - logv(c, format!("compile_lib_path: {:?}", config.compile_lib_path)); - logv(c, format!("run_lib_path: {:?}", config.run_lib_path)); - logv(c, format!("rustc_path: {:?}", config.rustc_path.display())); - logv(c, format!("rustdoc_path: {:?}", config.rustdoc_path.display())); - logv(c, format!("src_base: {:?}", config.src_base.display())); - logv(c, format!("build_base: {:?}", config.build_base.display())); - logv(c, format!("stage_id: {}", config.stage_id)); - logv(c, format!("mode: {}", config.mode)); - logv(c, format!("run_ignored: {}", config.run_ignored)); - logv(c, format!("filter: {}", - opt_str(&config.filter - .as_ref() - .map(|re| re.to_owned())))); - logv(c, format!("runtool: {}", opt_str(&config.runtool))); - logv(c, format!("host-rustcflags: {}", - opt_str(&config.host_rustcflags))); - logv(c, format!("target-rustcflags: {}", - opt_str(&config.target_rustcflags))); - logv(c, format!("target: {}", config.target)); - logv(c, format!("host: {}", config.host)); - logv(c, format!("android-cross-path: {:?}", - config.android_cross_path.display())); - logv(c, format!("adb_path: {:?}", config.adb_path)); - logv(c, format!("adb_test_dir: {:?}", config.adb_test_dir)); - logv(c, format!("adb_device_status: {}", - config.adb_device_status)); - logv(c, format!("verbose: {}", config.verbose)); - logv(c, format!("\n")); -} - -pub fn opt_str<'a>(maybestr: &'a Option) -> &'a str { - match *maybestr { - None => "(none)", - Some(ref s) => s, - } -} - -pub fn opt_str2(maybestr: Option) -> String { - match maybestr { - None => "(none)".to_owned(), - Some(s) => s, - } -} - -pub fn run_tests(config: &Config) { - if config.target.contains("android") { - if let DebugInfoGdb = config.mode { - println!("{} debug-info test uses tcp 5039 port.\ - please reserve it", config.target); - } - - // android debug-info test uses remote debugger - // so, we test 1 thread at once. - // also trying to isolate problems with adb_run_wrapper.sh ilooping - env::set_var("RUST_TEST_THREADS","1"); - } - - match config.mode { - DebugInfoLldb => { - // Some older versions of LLDB seem to have problems with multiple - // instances running in parallel, so only run one test thread at a - // time. - env::set_var("RUST_TEST_THREADS", "1"); - } - _ => { /* proceed */ } - } - - let opts = test_opts(config); - let tests = make_tests(config); - // sadly osx needs some file descriptor limits raised for running tests in - // parallel (especially when we have lots and lots of child processes). - // For context, see #8904 - unsafe { raise_fd_limit::raise_fd_limit(); } - // Prevent issue #21352 UAC blocking .exe containing 'patch' etc. on Windows - // If #11207 is resolved (adding manifest to .exe) this becomes unnecessary - env::set_var("__COMPAT_LAYER", "RunAsInvoker"); - let res = test::run_tests_console(&opts, tests.into_iter().collect()); - match res { - Ok(true) => {} - Ok(false) => panic!("Some tests failed"), - Err(e) => { - println!("I/O failure during tests: {:?}", e); - } - } -} - -pub fn test_opts(config: &Config) -> test::TestOpts { - test::TestOpts { - filter: match config.filter { - None => None, - Some(ref filter) => Some(filter.clone()), - }, - run_ignored: config.run_ignored, - logfile: config.logfile.clone(), - run_tests: true, - bench_benchmarks: true, - nocapture: env::var("RUST_TEST_NOCAPTURE").is_ok(), - color: test::AutoColor, - } -} - -pub fn make_tests(config: &Config) -> Vec { - debug!("making tests from {:?}", - config.src_base.display()); - let mut tests = Vec::new(); - let dirs = fs::read_dir(&config.src_base).unwrap(); - for file in dirs { - let file = file.unwrap().path(); - debug!("inspecting file {:?}", file.display()); - if is_test(config, &file) { - tests.push(make_test(config, &file)) - } - } - tests -} - -pub fn is_test(config: &Config, testfile: &Path) -> bool { - // Pretty-printer does not work with .rc files yet - let valid_extensions = - match config.mode { - Pretty => vec!(".rs".to_owned()), - _ => vec!(".rc".to_owned(), ".rs".to_owned()) - }; - let invalid_prefixes = vec!(".".to_owned(), "#".to_owned(), "~".to_owned()); - let name = testfile.file_name().unwrap().to_str().unwrap(); - - let mut valid = false; - - for ext in &valid_extensions { - if name.ends_with(ext) { - valid = true; - } - } - - for pre in &invalid_prefixes { - if name.starts_with(pre) { - valid = false; - } - } - - return valid; -} - -pub fn make_test(config: &Config, testfile: &Path) -> test::TestDescAndFn -{ - test::TestDescAndFn { - desc: test::TestDesc { - name: make_test_name(config, testfile), - ignore: header::is_test_ignored(config, testfile), - should_panic: test::ShouldPanic::No, - }, - testfn: make_test_closure(config, &testfile), - } -} - -pub fn make_test_name(config: &Config, testfile: &Path) -> test::TestName { - - // Try to elide redundant long paths - fn shorten(path: &Path) -> String { - let filename = path.file_name().unwrap().to_str(); - let p = path.parent().unwrap(); - let dir = p.file_name().unwrap().to_str(); - format!("{}/{}", dir.unwrap_or(""), filename.unwrap_or("")) - } - - test::DynTestName(format!("[{}] {}", config.mode, shorten(testfile))) -} - -pub fn make_test_closure(config: &Config, testfile: &Path) -> test::TestFn { - let config = (*config).clone(); - let testfile = testfile.to_path_buf(); - test::DynTestFn(Box::new(move || { - runtest::run(config, &testfile) - })) -} - -fn extract_gdb_version(full_version_line: Option) -> Option { - match full_version_line { - Some(ref full_version_line) - if !full_version_line.trim().is_empty() => { - let full_version_line = full_version_line.trim(); - - // used to be a regex "(^|[^0-9])([0-9]\.[0-9]+)" - for (pos, c) in full_version_line.char_indices() { - if !c.is_digit(10) { continue } - if pos + 2 >= full_version_line.len() { continue } - if full_version_line.char_at(pos + 1) != '.' { continue } - if !full_version_line.char_at(pos + 2).is_digit(10) { continue } - if pos > 0 && full_version_line.char_at_reverse(pos).is_digit(10) { - continue - } - let mut end = pos + 3; - while end < full_version_line.len() && - full_version_line.char_at(end).is_digit(10) { - end += 1; - } - return Some(full_version_line[pos..end].to_owned()); - } - println!("Could not extract GDB version from line '{}'", - full_version_line); - None - }, - _ => None - } -} - -fn extract_lldb_version(full_version_line: Option) -> Option { - // Extract the major LLDB version from the given version string. - // LLDB version strings are different for Apple and non-Apple platforms. - // At the moment, this function only supports the Apple variant, which looks - // like this: - // - // LLDB-179.5 (older versions) - // lldb-300.2.51 (new versions) - // - // We are only interested in the major version number, so this function - // will return `Some("179")` and `Some("300")` respectively. - - if let Some(ref full_version_line) = full_version_line { - if !full_version_line.trim().is_empty() { - let full_version_line = full_version_line.trim(); - - for (pos, l) in full_version_line.char_indices() { - if l != 'l' && l != 'L' { continue } - if pos + 5 >= full_version_line.len() { continue } - let l = full_version_line.char_at(pos + 1); - if l != 'l' && l != 'L' { continue } - let d = full_version_line.char_at(pos + 2); - if d != 'd' && d != 'D' { continue } - let b = full_version_line.char_at(pos + 3); - if b != 'b' && b != 'B' { continue } - let dash = full_version_line.char_at(pos + 4); - if dash != '-' { continue } - - let vers = full_version_line[pos + 5..].chars().take_while(|c| { - c.is_digit(10) - }).collect::(); - if !vers.is_empty() { return Some(vers) } - } - println!("Could not extract LLDB version from line '{}'", - full_version_line); - } - } - None -} diff --git a/src/compiletest/errors.rs b/src/compiletest/errors.rs deleted file mode 100644 index a3ad022ebd52f..0000000000000 --- a/src/compiletest/errors.rs +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -use self::WhichLine::*; - -use std::fs::File; -use std::io::BufReader; -use std::io::prelude::*; -use std::path::Path; - -pub struct ExpectedError { - pub line: usize, - pub kind: String, - pub msg: String, -} - -#[derive(PartialEq, Debug)] -enum WhichLine { ThisLine, FollowPrevious(usize), AdjustBackward(usize) } - -/// Looks for either "//~| KIND MESSAGE" or "//~^^... KIND MESSAGE" -/// The former is a "follow" that inherits its target from the preceding line; -/// the latter is an "adjusts" that goes that many lines up. -/// -/// Goal is to enable tests both like: //~^^^ ERROR go up three -/// and also //~^ ERROR message one for the preceding line, and -/// //~| ERROR message two for that same line. -// Load any test directives embedded in the file -pub fn load_errors(testfile: &Path) -> Vec { - let rdr = BufReader::new(File::open(testfile).unwrap()); - - // `last_nonfollow_error` tracks the most recently seen - // line with an error template that did not use the - // follow-syntax, "//~| ...". - // - // (pnkfelix could not find an easy way to compose Iterator::scan - // and Iterator::filter_map to pass along this information into - // `parse_expected`. So instead I am storing that state here and - // updating it in the map callback below.) - let mut last_nonfollow_error = None; - - rdr.lines().enumerate().filter_map(|(line_no, ln)| { - parse_expected(last_nonfollow_error, - line_no + 1, - &ln.unwrap()) - .map(|(which, error)| { - match which { - FollowPrevious(_) => {} - _ => last_nonfollow_error = Some(error.line), - } - error - }) - }).collect() -} - -fn parse_expected(last_nonfollow_error: Option, - line_num: usize, - line: &str) -> Option<(WhichLine, ExpectedError)> { - let start = match line.find("//~") { Some(i) => i, None => return None }; - let (follow, adjusts) = if line.char_at(start + 3) == '|' { - (true, 0) - } else { - (false, line[start + 3..].chars().take_while(|c| *c == '^').count()) - }; - let kind_start = start + 3 + adjusts + (follow as usize); - let letters = line[kind_start..].chars(); - let kind = letters.skip_while(|c| c.is_whitespace()) - .take_while(|c| !c.is_whitespace()) - .flat_map(|c| c.to_lowercase()) - .collect::(); - let letters = line[kind_start..].chars(); - let msg = letters.skip_while(|c| c.is_whitespace()) - .skip_while(|c| !c.is_whitespace()) - .collect::().trim().to_owned(); - - let (which, line) = if follow { - assert!(adjusts == 0, "use either //~| or //~^, not both."); - let line = last_nonfollow_error.unwrap_or_else(|| { - panic!("encountered //~| without preceding //~^ line.") - }); - (FollowPrevious(line), line) - } else { - let which = - if adjusts > 0 { AdjustBackward(adjusts) } else { ThisLine }; - let line = line_num - adjusts; - (which, line) - }; - - debug!("line={} which={:?} kind={:?} msg={:?}", line_num, which, kind, msg); - Some((which, ExpectedError { line: line, - kind: kind, - msg: msg, })) -} diff --git a/src/compiletest/header.rs b/src/compiletest/header.rs deleted file mode 100644 index 6efe6e608e8ad..0000000000000 --- a/src/compiletest/header.rs +++ /dev/null @@ -1,392 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::env; -use std::fs::File; -use std::io::BufReader; -use std::io::prelude::*; -use std::path::{Path, PathBuf}; - -use common::Config; -use common; -use util; - -pub struct TestProps { - // Lines that should be expected, in order, on standard out - pub error_patterns: Vec , - // Extra flags to pass to the compiler - pub compile_flags: Option, - // Extra flags to pass when the compiled code is run (such as --bench) - pub run_flags: Option, - // If present, the name of a file that this test should match when - // pretty-printed - pub pp_exact: Option, - // Modules from aux directory that should be compiled - pub aux_builds: Vec , - // Environment settings to use during execution - pub exec_env: Vec<(String,String)> , - // Lines to check if they appear in the expected debugger output - pub check_lines: Vec , - // Build documentation for all specified aux-builds as well - pub build_aux_docs: bool, - // Flag to force a crate to be built with the host architecture - pub force_host: bool, - // Check stdout for error-pattern output as well as stderr - pub check_stdout: bool, - // Don't force a --crate-type=dylib flag on the command line - pub no_prefer_dynamic: bool, - // Run --pretty expanded when running pretty printing tests - pub pretty_expanded: bool, - // Which pretty mode are we testing with, default to 'normal' - pub pretty_mode: String, - // Only compare pretty output and don't try compiling - pub pretty_compare_only: bool, - // Patterns which must not appear in the output of a cfail test. - pub forbid_output: Vec, -} - -// Load any test directives embedded in the file -pub fn load_props(testfile: &Path) -> TestProps { - let mut error_patterns = Vec::new(); - let mut aux_builds = Vec::new(); - let mut exec_env = Vec::new(); - let mut compile_flags = None; - let mut run_flags = None; - let mut pp_exact = None; - let mut check_lines = Vec::new(); - let mut build_aux_docs = false; - let mut force_host = false; - let mut check_stdout = false; - let mut no_prefer_dynamic = false; - let mut pretty_expanded = false; - let mut pretty_mode = None; - let mut pretty_compare_only = false; - let mut forbid_output = Vec::new(); - iter_header(testfile, &mut |ln| { - if let Some(ep) = parse_error_pattern(ln) { - error_patterns.push(ep); - } - - if compile_flags.is_none() { - compile_flags = parse_compile_flags(ln); - } - - if run_flags.is_none() { - run_flags = parse_run_flags(ln); - } - - if pp_exact.is_none() { - pp_exact = parse_pp_exact(ln, testfile); - } - - if !build_aux_docs { - build_aux_docs = parse_build_aux_docs(ln); - } - - if !force_host { - force_host = parse_force_host(ln); - } - - if !check_stdout { - check_stdout = parse_check_stdout(ln); - } - - if !no_prefer_dynamic { - no_prefer_dynamic = parse_no_prefer_dynamic(ln); - } - - if !pretty_expanded { - pretty_expanded = parse_pretty_expanded(ln); - } - - if pretty_mode.is_none() { - pretty_mode = parse_pretty_mode(ln); - } - - if !pretty_compare_only { - pretty_compare_only = parse_pretty_compare_only(ln); - } - - if let Some(ab) = parse_aux_build(ln) { - aux_builds.push(ab); - } - - if let Some(ee) = parse_exec_env(ln) { - exec_env.push(ee); - } - - if let Some(cl) = parse_check_line(ln) { - check_lines.push(cl); - } - - if let Some(of) = parse_forbid_output(ln) { - forbid_output.push(of); - } - - true - }); - - for key in vec!["RUST_TEST_NOCAPTURE", "RUST_TEST_THREADS"] { - match env::var(key) { - Ok(val) => - if exec_env.iter().find(|&&(ref x, _)| *x == key).is_none() { - exec_env.push((key.to_owned(), val)) - }, - Err(..) => {} - } - } - - TestProps { - error_patterns: error_patterns, - compile_flags: compile_flags, - run_flags: run_flags, - pp_exact: pp_exact, - aux_builds: aux_builds, - exec_env: exec_env, - check_lines: check_lines, - build_aux_docs: build_aux_docs, - force_host: force_host, - check_stdout: check_stdout, - no_prefer_dynamic: no_prefer_dynamic, - pretty_expanded: pretty_expanded, - pretty_mode: pretty_mode.unwrap_or("normal".to_owned()), - pretty_compare_only: pretty_compare_only, - forbid_output: forbid_output, - } -} - -pub fn is_test_ignored(config: &Config, testfile: &Path) -> bool { - fn ignore_target(config: &Config) -> String { - format!("ignore-{}", util::get_os(&config.target)) - } - fn ignore_architecture(config: &Config) -> String { - format!("ignore-{}", util::get_arch(&config.target)) - } - fn ignore_stage(config: &Config) -> String { - format!("ignore-{}", - config.stage_id.split('-').next().unwrap()) - } - fn ignore_env(config: &Config) -> String { - format!("ignore-{}", util::get_env(&config.target).unwrap_or("")) - } - fn ignore_gdb(config: &Config, line: &str) -> bool { - if config.mode != common::DebugInfoGdb { - return false; - } - - if parse_name_directive(line, "ignore-gdb") { - return true; - } - - if let Some(ref actual_version) = config.gdb_version { - if line.contains("min-gdb-version") { - let min_version = line.trim() - .split(' ') - .last() - .expect("Malformed GDB version directive"); - // Ignore if actual version is smaller the minimum required - // version - gdb_version_to_int(actual_version) < - gdb_version_to_int(min_version) - } else { - false - } - } else { - false - } - } - - fn ignore_lldb(config: &Config, line: &str) -> bool { - if config.mode != common::DebugInfoLldb { - return false; - } - - if parse_name_directive(line, "ignore-lldb") { - return true; - } - - if let Some(ref actual_version) = config.lldb_version { - if line.contains("min-lldb-version") { - let min_version = line.trim() - .split(' ') - .last() - .expect("Malformed lldb version directive"); - // Ignore if actual version is smaller the minimum required - // version - lldb_version_to_int(actual_version) < - lldb_version_to_int(min_version) - } else { - false - } - } else { - false - } - } - - let val = iter_header(testfile, &mut |ln| { - !parse_name_directive(ln, "ignore-test") && - !parse_name_directive(ln, &ignore_target(config)) && - !parse_name_directive(ln, &ignore_architecture(config)) && - !parse_name_directive(ln, &ignore_stage(config)) && - !parse_name_directive(ln, &ignore_env(config)) && - !(config.mode == common::Pretty && parse_name_directive(ln, "ignore-pretty")) && - !(config.target != config.host && parse_name_directive(ln, "ignore-cross-compile")) && - !ignore_gdb(config, ln) && - !ignore_lldb(config, ln) - }); - - !val -} - -fn iter_header(testfile: &Path, it: &mut FnMut(&str) -> bool) -> bool { - let rdr = BufReader::new(File::open(testfile).unwrap()); - for ln in rdr.lines() { - // Assume that any directives will be found before the first - // module or function. This doesn't seem to be an optimization - // with a warm page cache. Maybe with a cold one. - let ln = ln.unwrap(); - if ln.starts_with("fn") || - ln.starts_with("mod") { - return true; - } else { - if !(it(ln.trim())) { - return false; - } - } - } - return true; -} - -fn parse_error_pattern(line: &str) -> Option { - parse_name_value_directive(line, "error-pattern") -} - -fn parse_forbid_output(line: &str) -> Option { - parse_name_value_directive(line, "forbid-output") -} - -fn parse_aux_build(line: &str) -> Option { - parse_name_value_directive(line, "aux-build") -} - -fn parse_compile_flags(line: &str) -> Option { - parse_name_value_directive(line, "compile-flags") -} - -fn parse_run_flags(line: &str) -> Option { - parse_name_value_directive(line, "run-flags") -} - -fn parse_check_line(line: &str) -> Option { - parse_name_value_directive(line, "check") -} - -fn parse_force_host(line: &str) -> bool { - parse_name_directive(line, "force-host") -} - -fn parse_build_aux_docs(line: &str) -> bool { - parse_name_directive(line, "build-aux-docs") -} - -fn parse_check_stdout(line: &str) -> bool { - parse_name_directive(line, "check-stdout") -} - -fn parse_no_prefer_dynamic(line: &str) -> bool { - parse_name_directive(line, "no-prefer-dynamic") -} - -fn parse_pretty_expanded(line: &str) -> bool { - parse_name_directive(line, "pretty-expanded") -} - -fn parse_pretty_mode(line: &str) -> Option { - parse_name_value_directive(line, "pretty-mode") -} - -fn parse_pretty_compare_only(line: &str) -> bool { - parse_name_directive(line, "pretty-compare-only") -} - -fn parse_exec_env(line: &str) -> Option<(String, String)> { - parse_name_value_directive(line, "exec-env").map(|nv| { - // nv is either FOO or FOO=BAR - let mut strs: Vec = nv - .splitn(2, '=') - .map(str::to_owned) - .collect(); - - match strs.len() { - 1 => (strs.pop().unwrap(), "".to_owned()), - 2 => { - let end = strs.pop().unwrap(); - (strs.pop().unwrap(), end) - } - n => panic!("Expected 1 or 2 strings, not {}", n) - } - }) -} - -fn parse_pp_exact(line: &str, testfile: &Path) -> Option { - if let Some(s) = parse_name_value_directive(line, "pp-exact") { - Some(PathBuf::from(&s)) - } else { - if parse_name_directive(line, "pp-exact") { - testfile.file_name().map(PathBuf::from) - } else { - None - } - } -} - -fn parse_name_directive(line: &str, directive: &str) -> bool { - // This 'no-' rule is a quick hack to allow pretty-expanded and no-pretty-expanded to coexist - line.contains(directive) && !line.contains(&("no-".to_owned() + directive)) -} - -pub fn parse_name_value_directive(line: &str, directive: &str) - -> Option { - let keycolon = format!("{}:", directive); - if let Some(colon) = line.find(&keycolon) { - let value = line[(colon + keycolon.len()) .. line.len()].to_owned(); - debug!("{}: {}", directive, value); - Some(value) - } else { - None - } -} - -pub fn gdb_version_to_int(version_string: &str) -> isize { - let error_string = format!( - "Encountered GDB version string with unexpected format: {}", - version_string); - let error_string = error_string; - - let components: Vec<&str> = version_string.trim().split('.').collect(); - - if components.len() != 2 { - panic!("{}", error_string); - } - - let major: isize = components[0].parse().ok().expect(&error_string); - let minor: isize = components[1].parse().ok().expect(&error_string); - - return major * 1000 + minor; -} - -pub fn lldb_version_to_int(version_string: &str) -> isize { - let error_string = format!( - "Encountered LLDB version string with unexpected format: {}", - version_string); - let error_string = error_string; - let major: isize = version_string.parse().ok().expect(&error_string); - return major; -} diff --git a/src/compiletest/procsrv.rs b/src/compiletest/procsrv.rs deleted file mode 100644 index 7c5397a1af989..0000000000000 --- a/src/compiletest/procsrv.rs +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(deprecated)] - -use std::dynamic_lib::DynamicLibrary; -use std::io::prelude::*; -use std::path::PathBuf; -use std::process::{ExitStatus, Command, Child, Output, Stdio}; - -fn add_target_env(cmd: &mut Command, lib_path: &str, aux_path: Option<&str>) { - // Need to be sure to put both the lib_path and the aux path in the dylib - // search path for the child. - let mut path = DynamicLibrary::search_path(); - if let Some(p) = aux_path { - path.insert(0, PathBuf::from(p)) - } - path.insert(0, PathBuf::from(lib_path)); - - // Add the new dylib search path var - let var = DynamicLibrary::envvar(); - let newpath = DynamicLibrary::create_path(&path); - cmd.env(var, newpath); -} - -pub struct Result {pub status: ExitStatus, pub out: String, pub err: String} - -pub fn run(lib_path: &str, - prog: &str, - aux_path: Option<&str>, - args: &[String], - env: Vec<(String, String)> , - input: Option) -> Option { - - let mut cmd = Command::new(prog); - cmd.args(args) - .stdin(Stdio::piped()) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()); - add_target_env(&mut cmd, lib_path, aux_path); - for (key, val) in env { - cmd.env(&key, &val); - } - - match cmd.spawn() { - Ok(mut process) => { - if let Some(input) = input { - process.stdin.as_mut().unwrap().write_all(input.as_bytes()).unwrap(); - } - let Output { status, stdout, stderr } = - process.wait_with_output().unwrap(); - - Some(Result { - status: status, - out: String::from_utf8(stdout).unwrap(), - err: String::from_utf8(stderr).unwrap() - }) - }, - Err(..) => None - } -} - -pub fn run_background(lib_path: &str, - prog: &str, - aux_path: Option<&str>, - args: &[String], - env: Vec<(String, String)> , - input: Option) -> Option { - - let mut cmd = Command::new(prog); - cmd.args(args) - .stdin(Stdio::piped()) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()); - add_target_env(&mut cmd, lib_path, aux_path); - for (key, val) in env { - cmd.env(&key, &val); - } - - match cmd.spawn() { - Ok(mut process) => { - if let Some(input) = input { - process.stdin.as_mut().unwrap().write_all(input.as_bytes()).unwrap(); - } - - Some(process) - }, - Err(..) => None - } -} diff --git a/src/compiletest/runtest.rs b/src/compiletest/runtest.rs deleted file mode 100644 index 459b43b4ffe5d..0000000000000 --- a/src/compiletest/runtest.rs +++ /dev/null @@ -1,1749 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use common::Config; -use common::{CompileFail, ParseFail, Pretty, RunFail, RunPass, RunPassValgrind}; -use common::{Codegen, DebugInfoLldb, DebugInfoGdb, Rustdoc}; -use errors; -use header::TestProps; -use header; -use procsrv; -use util::logv; - -use std::env; -use std::fmt; -use std::fs::{self, File}; -use std::io::BufReader; -use std::io::prelude::*; -use std::net::TcpStream; -use std::path::{Path, PathBuf, Component}; -use std::process::{Command, Output, ExitStatus}; - -pub fn run(config: Config, testfile: &Path) { - match &*config.target { - - "arm-linux-androideabi" | "aarch64-linux-android" => { - if !config.adb_device_status { - panic!("android device not available"); - } - } - - _=> { } - } - - if config.verbose { - // We're going to be dumping a lot of info. Start on a new line. - print!("\n\n"); - } - debug!("running {:?}", testfile.display()); - let props = header::load_props(&testfile); - debug!("loaded props"); - match config.mode { - CompileFail => run_cfail_test(&config, &props, &testfile), - ParseFail => run_cfail_test(&config, &props, &testfile), - RunFail => run_rfail_test(&config, &props, &testfile), - RunPass => run_rpass_test(&config, &props, &testfile), - RunPassValgrind => run_valgrind_test(&config, &props, &testfile), - Pretty => run_pretty_test(&config, &props, &testfile), - DebugInfoGdb => run_debuginfo_gdb_test(&config, &props, &testfile), - DebugInfoLldb => run_debuginfo_lldb_test(&config, &props, &testfile), - Codegen => run_codegen_test(&config, &props, &testfile), - Rustdoc => run_rustdoc_test(&config, &props, &testfile), - } -} - -fn get_output(props: &TestProps, proc_res: &ProcRes) -> String { - if props.check_stdout { - format!("{}{}", proc_res.stdout, proc_res.stderr) - } else { - proc_res.stderr.clone() - } -} - -fn run_cfail_test(config: &Config, props: &TestProps, testfile: &Path) { - let proc_res = compile_test(config, props, testfile); - - if proc_res.status.success() { - fatal_proc_rec(&format!("{} test compiled successfully!", config.mode)[..], - &proc_res); - } - - check_correct_failure_status(&proc_res); - - if proc_res.status.success() { - fatal("process did not return an error status"); - } - - let output_to_check = get_output(props, &proc_res); - let expected_errors = errors::load_errors(testfile); - if !expected_errors.is_empty() { - if !props.error_patterns.is_empty() { - fatal("both error pattern and expected errors specified"); - } - check_expected_errors(expected_errors, testfile, &proc_res); - } else { - check_error_patterns(props, testfile, &output_to_check, &proc_res); - } - check_no_compiler_crash(&proc_res); - check_forbid_output(props, &output_to_check, &proc_res); -} - -fn run_rfail_test(config: &Config, props: &TestProps, testfile: &Path) { - let proc_res = compile_test(config, props, testfile); - - if !proc_res.status.success() { - fatal_proc_rec("compilation failed!", &proc_res); - } - - let proc_res = exec_compiled_test(config, props, testfile); - - // The value our Makefile configures valgrind to return on failure - const VALGRIND_ERR: i32 = 100; - if proc_res.status.code() == Some(VALGRIND_ERR) { - fatal_proc_rec("run-fail test isn't valgrind-clean!", &proc_res); - } - - let output_to_check = get_output(props, &proc_res); - check_correct_failure_status(&proc_res); - check_error_patterns(props, testfile, &output_to_check, &proc_res); -} - -fn check_correct_failure_status(proc_res: &ProcRes) { - // The value the rust runtime returns on failure - const RUST_ERR: i32 = 101; - if proc_res.status.code() != Some(RUST_ERR) { - fatal_proc_rec( - &format!("failure produced the wrong error: {}", - proc_res.status), - proc_res); - } -} - -fn run_rpass_test(config: &Config, props: &TestProps, testfile: &Path) { - let proc_res = compile_test(config, props, testfile); - - if !proc_res.status.success() { - fatal_proc_rec("compilation failed!", &proc_res); - } - - let proc_res = exec_compiled_test(config, props, testfile); - - if !proc_res.status.success() { - fatal_proc_rec("test run failed!", &proc_res); - } -} - -fn run_valgrind_test(config: &Config, props: &TestProps, testfile: &Path) { - if config.valgrind_path.is_none() { - assert!(!config.force_valgrind); - return run_rpass_test(config, props, testfile); - } - - let mut proc_res = compile_test(config, props, testfile); - - if !proc_res.status.success() { - fatal_proc_rec("compilation failed!", &proc_res); - } - - let mut new_config = config.clone(); - new_config.runtool = new_config.valgrind_path.clone(); - proc_res = exec_compiled_test(&new_config, props, testfile); - - if !proc_res.status.success() { - fatal_proc_rec("test run failed!", &proc_res); - } -} - -fn run_pretty_test(config: &Config, props: &TestProps, testfile: &Path) { - if props.pp_exact.is_some() { - logv(config, "testing for exact pretty-printing".to_owned()); - } else { - logv(config, "testing for converging pretty-printing".to_owned()); - } - - let rounds = - match props.pp_exact { Some(_) => 1, None => 2 }; - - let mut src = String::new(); - File::open(testfile).unwrap().read_to_string(&mut src).unwrap(); - let mut srcs = vec!(src); - - let mut round = 0; - while round < rounds { - logv(config, format!("pretty-printing round {}", round)); - let proc_res = print_source(config, - props, - testfile, - srcs[round].to_owned(), - &props.pretty_mode); - - if !proc_res.status.success() { - fatal_proc_rec(&format!("pretty-printing failed in round {}", round), - &proc_res); - } - - let ProcRes{ stdout, .. } = proc_res; - srcs.push(stdout); - round += 1; - } - - let mut expected = match props.pp_exact { - Some(ref file) => { - let filepath = testfile.parent().unwrap().join(file); - let mut s = String::new(); - File::open(&filepath).unwrap().read_to_string(&mut s).unwrap(); - s - } - None => { srcs[srcs.len() - 2].clone() } - }; - let mut actual = srcs[srcs.len() - 1].clone(); - - if props.pp_exact.is_some() { - // Now we have to care about line endings - let cr = "\r".to_owned(); - actual = actual.replace(&cr, "").to_owned(); - expected = expected.replace(&cr, "").to_owned(); - } - - compare_source(&expected, &actual); - - // If we're only making sure that the output matches then just stop here - if props.pretty_compare_only { return; } - - // Finally, let's make sure it actually appears to remain valid code - let proc_res = typecheck_source(config, props, testfile, actual); - - if !proc_res.status.success() { - fatal_proc_rec("pretty-printed source does not typecheck", &proc_res); - } - if !props.pretty_expanded { return } - - // additionally, run `--pretty expanded` and try to build it. - let proc_res = print_source(config, props, testfile, srcs[round].clone(), "expanded"); - if !proc_res.status.success() { - fatal_proc_rec("pretty-printing (expanded) failed", &proc_res); - } - - let ProcRes{ stdout: expanded_src, .. } = proc_res; - let proc_res = typecheck_source(config, props, testfile, expanded_src); - if !proc_res.status.success() { - fatal_proc_rec("pretty-printed source (expanded) does not typecheck", - &proc_res); - } - - return; - - fn print_source(config: &Config, - props: &TestProps, - testfile: &Path, - src: String, - pretty_type: &str) -> ProcRes { - let aux_dir = aux_output_dir_name(config, testfile); - compose_and_run(config, - testfile, - make_pp_args(config, - props, - testfile, - pretty_type.to_owned()), - props.exec_env.clone(), - &config.compile_lib_path, - Some(aux_dir.to_str().unwrap()), - Some(src)) - } - - fn make_pp_args(config: &Config, - props: &TestProps, - testfile: &Path, - pretty_type: String) -> ProcArgs { - let aux_dir = aux_output_dir_name(config, testfile); - // FIXME (#9639): This needs to handle non-utf8 paths - let mut args = vec!("-".to_owned(), - "-Zunstable-options".to_owned(), - "--unpretty".to_owned(), - pretty_type, - format!("--target={}", config.target), - "-L".to_owned(), - aux_dir.to_str().unwrap().to_owned()); - args.extend(split_maybe_args(&config.target_rustcflags)); - args.extend(split_maybe_args(&props.compile_flags)); - return ProcArgs { - prog: config.rustc_path.to_str().unwrap().to_owned(), - args: args, - }; - } - - fn compare_source(expected: &str, actual: &str) { - if expected != actual { - error("pretty-printed source does not match expected source"); - println!("\n\ -expected:\n\ -------------------------------------------\n\ -{}\n\ -------------------------------------------\n\ -actual:\n\ -------------------------------------------\n\ -{}\n\ -------------------------------------------\n\ -\n", - expected, actual); - panic!(); - } - } - - fn typecheck_source(config: &Config, props: &TestProps, - testfile: &Path, src: String) -> ProcRes { - let args = make_typecheck_args(config, props, testfile); - compose_and_run_compiler(config, props, testfile, args, Some(src)) - } - - fn make_typecheck_args(config: &Config, props: &TestProps, testfile: &Path) -> ProcArgs { - let aux_dir = aux_output_dir_name(config, testfile); - let target = if props.force_host { - &*config.host - } else { - &*config.target - }; - // FIXME (#9639): This needs to handle non-utf8 paths - let mut args = vec!("-".to_owned(), - "-Zno-trans".to_owned(), - format!("--target={}", target), - "-L".to_owned(), - config.build_base.to_str().unwrap().to_owned(), - "-L".to_owned(), - aux_dir.to_str().unwrap().to_owned()); - args.extend(split_maybe_args(&config.target_rustcflags)); - args.extend(split_maybe_args(&props.compile_flags)); - // FIXME (#9639): This needs to handle non-utf8 paths - return ProcArgs { - prog: config.rustc_path.to_str().unwrap().to_owned(), - args: args, - }; - } -} - -fn run_debuginfo_gdb_test(config: &Config, props: &TestProps, testfile: &Path) { - let mut config = Config { - target_rustcflags: cleanup_debug_info_options(&config.target_rustcflags), - host_rustcflags: cleanup_debug_info_options(&config.host_rustcflags), - .. config.clone() - }; - - let config = &mut config; - let DebuggerCommands { - commands, - check_lines, - breakpoint_lines - } = parse_debugger_commands(testfile, "gdb"); - let mut cmds = commands.join("\n"); - - // compile test file (it should have 'compile-flags:-g' in the header) - let compiler_run_result = compile_test(config, props, testfile); - if !compiler_run_result.status.success() { - fatal_proc_rec("compilation failed!", &compiler_run_result); - } - - let exe_file = make_exe_name(config, testfile); - - let debugger_run_result; - match &*config.target { - "arm-linux-androideabi" | "aarch64-linux-android" => { - - cmds = cmds.replace("run", "continue"); - - // write debugger script - let mut script_str = String::with_capacity(2048); - script_str.push_str(&format!("set charset {}\n", charset())); - script_str.push_str(&format!("file {}\n", exe_file.to_str().unwrap())); - script_str.push_str("target remote :5039\n"); - script_str.push_str(&format!("set solib-search-path \ - ./{}/stage2/lib/rustlib/{}/lib/\n", - config.host, config.target)); - for line in &breakpoint_lines { - script_str.push_str(&format!("break {:?}:{}\n", - testfile.file_name().unwrap() - .to_string_lossy(), - *line)[..]); - } - script_str.push_str(&cmds); - script_str.push_str("\nquit\n"); - - debug!("script_str = {}", script_str); - dump_output_file(config, - testfile, - &script_str, - "debugger.script"); - - - procsrv::run("", - &config.adb_path, - None, - &[ - "push".to_owned(), - exe_file.to_str().unwrap().to_owned(), - config.adb_test_dir.clone() - ], - vec!(("".to_owned(), "".to_owned())), - Some("".to_owned())) - .expect(&format!("failed to exec `{:?}`", config.adb_path)); - - procsrv::run("", - &config.adb_path, - None, - &[ - "forward".to_owned(), - "tcp:5039".to_owned(), - "tcp:5039".to_owned() - ], - vec!(("".to_owned(), "".to_owned())), - Some("".to_owned())) - .expect(&format!("failed to exec `{:?}`", config.adb_path)); - - let adb_arg = format!("export LD_LIBRARY_PATH={}; \ - gdbserver{} :5039 {}/{}", - config.adb_test_dir.clone(), - if config.target.contains("aarch64") - {"64"} else {""}, - config.adb_test_dir.clone(), - exe_file.file_name().unwrap().to_str() - .unwrap()); - - let mut process = procsrv::run_background("", - &config.adb_path - , - None, - &[ - "shell".to_owned(), - adb_arg.clone() - ], - vec!(("".to_owned(), - "".to_owned())), - Some("".to_owned())) - .expect(&format!("failed to exec `{:?}`", config.adb_path)); - loop { - //waiting 1 second for gdbserver start - ::std::thread::sleep(::std::time::Duration::new(1,0)); - if TcpStream::connect("127.0.0.1:5039").is_ok() { - break - } - } - - let tool_path = match config.android_cross_path.to_str() { - Some(x) => x.to_owned(), - None => fatal("cannot find android cross path") - }; - - let debugger_script = make_out_name(config, testfile, "debugger.script"); - // FIXME (#9639): This needs to handle non-utf8 paths - let debugger_opts = - vec!("-quiet".to_owned(), - "-batch".to_owned(), - "-nx".to_owned(), - format!("-command={}", debugger_script.to_str().unwrap())); - - let mut gdb_path = tool_path; - gdb_path.push_str(&format!("/bin/{}-gdb", config.target)); - let procsrv::Result { - out, - err, - status - } = procsrv::run("", - &gdb_path, - None, - &debugger_opts, - vec!(("".to_owned(), "".to_owned())), - None) - .expect(&format!("failed to exec `{:?}`", gdb_path)); - let cmdline = { - let cmdline = make_cmdline("", - &format!("{}-gdb", config.target), - &debugger_opts); - logv(config, format!("executing {}", cmdline)); - cmdline - }; - - debugger_run_result = ProcRes { - status: Status::Normal(status), - stdout: out, - stderr: err, - cmdline: cmdline - }; - if process.kill().is_err() { - println!("Adb process is already finished."); - } - } - - _=> { - let rust_src_root = find_rust_src_root(config) - .expect("Could not find Rust source root"); - let rust_pp_module_rel_path = Path::new("./src/etc"); - let rust_pp_module_abs_path = rust_src_root.join(rust_pp_module_rel_path) - .to_str() - .unwrap() - .to_owned(); - // write debugger script - let mut script_str = String::with_capacity(2048); - script_str.push_str(&format!("set charset {}\n", charset())); - script_str.push_str("show version\n"); - - match config.gdb_version { - Some(ref version) => { - println!("NOTE: compiletest thinks it is using GDB version {}", - version); - - if header::gdb_version_to_int(version) > - header::gdb_version_to_int("7.4") { - // Add the directory containing the pretty printers to - // GDB's script auto loading safe path - script_str.push_str( - &format!("add-auto-load-safe-path {}\n", - rust_pp_module_abs_path.replace(r"\", r"\\")) - ); - } - } - _ => { - println!("NOTE: compiletest does not know which version of \ - GDB it is using"); - } - } - - // The following line actually doesn't have to do anything with - // pretty printing, it just tells GDB to print values on one line: - script_str.push_str("set print pretty off\n"); - - // Add the pretty printer directory to GDB's source-file search path - script_str.push_str(&format!("directory {}\n", - rust_pp_module_abs_path)); - - // Load the target executable - script_str.push_str(&format!("file {}\n", - exe_file.to_str().unwrap() - .replace(r"\", r"\\"))); - - // Add line breakpoints - for line in &breakpoint_lines { - script_str.push_str(&format!("break '{}':{}\n", - testfile.file_name().unwrap() - .to_string_lossy(), - *line)); - } - - script_str.push_str(&cmds); - script_str.push_str("\nquit\n"); - - debug!("script_str = {}", script_str); - dump_output_file(config, - testfile, - &script_str, - "debugger.script"); - - // run debugger script with gdb - fn debugger() -> &'static str { - if cfg!(windows) {"gdb.exe"} else {"gdb"} - } - - let debugger_script = make_out_name(config, testfile, "debugger.script"); - - // FIXME (#9639): This needs to handle non-utf8 paths - let debugger_opts = - vec!("-quiet".to_owned(), - "-batch".to_owned(), - "-nx".to_owned(), - format!("-command={}", debugger_script.to_str().unwrap())); - - let proc_args = ProcArgs { - prog: debugger().to_owned(), - args: debugger_opts, - }; - - let environment = vec![("PYTHONPATH".to_owned(), rust_pp_module_abs_path)]; - - debugger_run_result = compose_and_run(config, - testfile, - proc_args, - environment, - &config.run_lib_path, - None, - None); - } - } - - if !debugger_run_result.status.success() { - fatal("gdb failed to execute"); - } - - check_debugger_output(&debugger_run_result, &check_lines); -} - -fn find_rust_src_root(config: &Config) -> Option { - let mut path = config.src_base.clone(); - let path_postfix = Path::new("src/etc/lldb_batchmode.py"); - - while path.pop() { - if path.join(&path_postfix).is_file() { - return Some(path); - } - } - - return None; -} - -fn run_debuginfo_lldb_test(config: &Config, props: &TestProps, testfile: &Path) { - if config.lldb_python_dir.is_none() { - fatal("Can't run LLDB test because LLDB's python path is not set."); - } - - let mut config = Config { - target_rustcflags: cleanup_debug_info_options(&config.target_rustcflags), - host_rustcflags: cleanup_debug_info_options(&config.host_rustcflags), - .. config.clone() - }; - - let config = &mut config; - - // compile test file (it should have 'compile-flags:-g' in the header) - let compile_result = compile_test(config, props, testfile); - if !compile_result.status.success() { - fatal_proc_rec("compilation failed!", &compile_result); - } - - let exe_file = make_exe_name(config, testfile); - - match config.lldb_version { - Some(ref version) => { - println!("NOTE: compiletest thinks it is using LLDB version {}", - version); - } - _ => { - println!("NOTE: compiletest does not know which version of \ - LLDB it is using"); - } - } - - // Parse debugger commands etc from test files - let DebuggerCommands { - commands, - check_lines, - breakpoint_lines, - .. - } = parse_debugger_commands(testfile, "lldb"); - - // Write debugger script: - // We don't want to hang when calling `quit` while the process is still running - let mut script_str = String::from("settings set auto-confirm true\n"); - - // Make LLDB emit its version, so we have it documented in the test output - script_str.push_str("version\n"); - - // Switch LLDB into "Rust mode" - let rust_src_root = find_rust_src_root(config) - .expect("Could not find Rust source root"); - let rust_pp_module_rel_path = Path::new("./src/etc/lldb_rust_formatters.py"); - let rust_pp_module_abs_path = rust_src_root.join(rust_pp_module_rel_path) - .to_str() - .unwrap() - .to_owned(); - - script_str.push_str(&format!("command script import {}\n", - &rust_pp_module_abs_path[..])[..]); - script_str.push_str("type summary add --no-value "); - script_str.push_str("--python-function lldb_rust_formatters.print_val "); - script_str.push_str("-x \".*\" --category Rust\n"); - script_str.push_str("type category enable Rust\n"); - - // Set breakpoints on every line that contains the string "#break" - for line in &breakpoint_lines { - script_str.push_str(&format!("breakpoint set --line {}\n", line)); - } - - // Append the other commands - for line in &commands { - script_str.push_str(line); - script_str.push_str("\n"); - } - - // Finally, quit the debugger - script_str.push_str("\nquit\n"); - - // Write the script into a file - debug!("script_str = {}", script_str); - dump_output_file(config, - testfile, - &script_str, - "debugger.script"); - let debugger_script = make_out_name(config, testfile, "debugger.script"); - - // Let LLDB execute the script via lldb_batchmode.py - let debugger_run_result = run_lldb(config, - &exe_file, - &debugger_script, - &rust_src_root); - - if !debugger_run_result.status.success() { - fatal_proc_rec("Error while running LLDB", &debugger_run_result); - } - - check_debugger_output(&debugger_run_result, &check_lines); - - fn run_lldb(config: &Config, - test_executable: &Path, - debugger_script: &Path, - rust_src_root: &Path) - -> ProcRes { - // Prepare the lldb_batchmode which executes the debugger script - let lldb_script_path = rust_src_root.join("src/etc/lldb_batchmode.py"); - cmd2procres(config, - test_executable, - Command::new(&config.python) - .arg(&lldb_script_path) - .arg(test_executable) - .arg(debugger_script) - .env("PYTHONPATH", - config.lldb_python_dir.as_ref().unwrap())) - } -} - -fn cmd2procres(config: &Config, test_executable: &Path, cmd: &mut Command) - -> ProcRes { - let (status, out, err) = match cmd.output() { - Ok(Output { status, stdout, stderr }) => { - (status, - String::from_utf8(stdout).unwrap(), - String::from_utf8(stderr).unwrap()) - }, - Err(e) => { - fatal(&format!("Failed to setup Python process for \ - LLDB script: {}", e)) - } - }; - - dump_output(config, test_executable, &out, &err); - ProcRes { - status: Status::Normal(status), - stdout: out, - stderr: err, - cmdline: format!("{:?}", cmd) - } -} - -struct DebuggerCommands { - commands: Vec, - check_lines: Vec, - breakpoint_lines: Vec, -} - -fn parse_debugger_commands(file_path: &Path, debugger_prefix: &str) - -> DebuggerCommands { - let command_directive = format!("{}-command", debugger_prefix); - let check_directive = format!("{}-check", debugger_prefix); - - let mut breakpoint_lines = vec!(); - let mut commands = vec!(); - let mut check_lines = vec!(); - let mut counter = 1; - let reader = BufReader::new(File::open(file_path).unwrap()); - for line in reader.lines() { - match line { - Ok(line) => { - if line.contains("#break") { - breakpoint_lines.push(counter); - } - - header::parse_name_value_directive( - &line, - &command_directive).map(|cmd| { - commands.push(cmd) - }); - - header::parse_name_value_directive( - &line, - &check_directive).map(|cmd| { - check_lines.push(cmd) - }); - } - Err(e) => { - fatal(&format!("Error while parsing debugger commands: {}", e)) - } - } - counter += 1; - } - - DebuggerCommands { - commands: commands, - check_lines: check_lines, - breakpoint_lines: breakpoint_lines, - } -} - -fn cleanup_debug_info_options(options: &Option) -> Option { - if options.is_none() { - return None; - } - - // Remove options that are either unwanted (-O) or may lead to duplicates due to RUSTFLAGS. - let options_to_remove = [ - "-O".to_owned(), - "-g".to_owned(), - "--debuginfo".to_owned() - ]; - let new_options = - split_maybe_args(options).into_iter() - .filter(|x| !options_to_remove.contains(x)) - .collect::>() - .join(" "); - Some(new_options) -} - -fn check_debugger_output(debugger_run_result: &ProcRes, check_lines: &[String]) { - let num_check_lines = check_lines.len(); - if num_check_lines > 0 { - // Allow check lines to leave parts unspecified (e.g., uninitialized - // bits in the wrong case of an enum) with the notation "[...]". - let check_fragments: Vec> = - check_lines.iter().map(|s| { - s - .trim() - .split("[...]") - .map(str::to_owned) - .collect() - }).collect(); - // check if each line in props.check_lines appears in the - // output (in order) - let mut i = 0; - for line in debugger_run_result.stdout.lines() { - let mut rest = line.trim(); - let mut first = true; - let mut failed = false; - for frag in &check_fragments[i] { - let found = if first { - if rest.starts_with(frag) { - Some(0) - } else { - None - } - } else { - rest.find(frag) - }; - match found { - None => { - failed = true; - break; - } - Some(i) => { - rest = &rest[(i + frag.len())..]; - } - } - first = false; - } - if !failed && rest.is_empty() { - i += 1; - } - if i == num_check_lines { - // all lines checked - break; - } - } - if i != num_check_lines { - fatal_proc_rec(&format!("line not found in debugger output: {}", - check_lines.get(i).unwrap()), - debugger_run_result); - } - } -} - -fn check_error_patterns(props: &TestProps, - testfile: &Path, - output_to_check: &str, - proc_res: &ProcRes) { - if props.error_patterns.is_empty() { - fatal(&format!("no error pattern specified in {:?}", testfile.display())); - } - let mut next_err_idx = 0; - let mut next_err_pat = &props.error_patterns[next_err_idx]; - let mut done = false; - for line in output_to_check.lines() { - if line.contains(next_err_pat) { - debug!("found error pattern {}", next_err_pat); - next_err_idx += 1; - if next_err_idx == props.error_patterns.len() { - debug!("found all error patterns"); - done = true; - break; - } - next_err_pat = &props.error_patterns[next_err_idx]; - } - } - if done { return; } - - let missing_patterns = &props.error_patterns[next_err_idx..]; - if missing_patterns.len() == 1 { - fatal_proc_rec(&format!("error pattern '{}' not found!", missing_patterns[0]), - proc_res); - } else { - for pattern in missing_patterns { - error(&format!("error pattern '{}' not found!", *pattern)); - } - fatal_proc_rec("multiple error patterns not found", proc_res); - } -} - -fn check_no_compiler_crash(proc_res: &ProcRes) { - for line in proc_res.stderr.lines() { - if line.starts_with("error: internal compiler error:") { - fatal_proc_rec("compiler encountered internal error", - proc_res); - } - } -} - -fn check_forbid_output(props: &TestProps, - output_to_check: &str, - proc_res: &ProcRes) { - for pat in &props.forbid_output { - if output_to_check.contains(pat) { - fatal_proc_rec("forbidden pattern found in compiler output", proc_res); - } - } -} - -fn check_expected_errors(expected_errors: Vec, - testfile: &Path, - proc_res: &ProcRes) { - - // true if we found the error in question - let mut found_flags = vec![false; expected_errors.len()]; - - if proc_res.status.success() { - fatal("process did not return an error status"); - } - - let prefixes = expected_errors.iter().map(|ee| { - format!("{}:{}:", testfile.display(), ee.line) - }).collect::>(); - - fn prefix_matches(line: &str, prefix: &str) -> bool { - use std::ascii::AsciiExt; - // On windows just translate all '\' path separators to '/' - let line = line.replace(r"\", "/"); - if cfg!(windows) { - line.to_ascii_lowercase().starts_with(&prefix.to_ascii_lowercase()) - } else { - line.starts_with(prefix) - } - } - - // A multi-line error will have followup lines which start with a space - // or open paren. - fn continuation( line: &str) -> bool { - line.starts_with(" ") || line.starts_with("(") - } - - // Scan and extract our error/warning messages, - // which look like: - // filename:line1:col1: line2:col2: *error:* msg - // filename:line1:col1: line2:col2: *warning:* msg - // where line1:col1: is the starting point, line2:col2: - // is the ending point, and * represents ANSI color codes. - // - // This pattern is ambiguous on windows, because filename may contain - // a colon, so any path prefix must be detected and removed first. - for line in proc_res.stderr.lines() { - let mut was_expected = false; - let mut prev = 0; - for (i, ee) in expected_errors.iter().enumerate() { - if !found_flags[i] { - debug!("prefix={} ee.kind={} ee.msg={} line={}", - prefixes[i], - ee.kind, - ee.msg, - line); - // Suggestions have no line number in their output, so take on the line number of - // the previous expected error - if ee.kind == "suggestion" { - assert!(expected_errors[prev].kind == "help", - "SUGGESTIONs must be preceded by a HELP"); - if line.contains(&ee.msg) { - found_flags[i] = true; - was_expected = true; - break; - } - } - if (prefix_matches(line, &prefixes[i]) || continuation(line)) && - line.contains(&ee.kind) && - line.contains(&ee.msg) { - found_flags[i] = true; - was_expected = true; - break; - } - } - prev = i; - } - - // ignore this msg which gets printed at the end - if line.contains("aborting due to") { - was_expected = true; - } - - if !was_expected && is_compiler_error_or_warning(line) { - fatal_proc_rec(&format!("unexpected compiler error or warning: '{}'", - line), - proc_res); - } - } - - for (i, &flag) in found_flags.iter().enumerate() { - if !flag { - let ee = &expected_errors[i]; - fatal_proc_rec(&format!("expected {} on line {} not found: {}", - ee.kind, ee.line, ee.msg), - proc_res); - } - } -} - -fn is_compiler_error_or_warning(line: &str) -> bool { - let mut c = Path::new(line).components(); - let line = match c.next() { - Some(Component::Prefix(_)) => c.as_path().to_str().unwrap(), - _ => line, - }; - - let mut i = 0; - return - scan_until_char(line, ':', &mut i) && - scan_char(line, ':', &mut i) && - scan_integer(line, &mut i) && - scan_char(line, ':', &mut i) && - scan_integer(line, &mut i) && - scan_char(line, ':', &mut i) && - scan_char(line, ' ', &mut i) && - scan_integer(line, &mut i) && - scan_char(line, ':', &mut i) && - scan_integer(line, &mut i) && - scan_char(line, ' ', &mut i) && - (scan_string(line, "error", &mut i) || - scan_string(line, "warning", &mut i)); -} - -fn scan_until_char(haystack: &str, needle: char, idx: &mut usize) -> bool { - if *idx >= haystack.len() { - return false; - } - let opt = haystack[(*idx)..].find(needle); - if opt.is_none() { - return false; - } - *idx = opt.unwrap(); - return true; -} - -fn scan_char(haystack: &str, needle: char, idx: &mut usize) -> bool { - if *idx >= haystack.len() { - return false; - } - let ch = haystack.char_at(*idx); - if ch != needle { - return false; - } - *idx += ch.len_utf8(); - return true; -} - -fn scan_integer(haystack: &str, idx: &mut usize) -> bool { - let mut i = *idx; - while i < haystack.len() { - let ch = haystack.char_at(i); - if ch < '0' || '9' < ch { - break; - } - i += ch.len_utf8(); - } - if i == *idx { - return false; - } - *idx = i; - return true; -} - -fn scan_string(haystack: &str, needle: &str, idx: &mut usize) -> bool { - let mut haystack_i = *idx; - let mut needle_i = 0; - while needle_i < needle.len() { - if haystack_i >= haystack.len() { - return false; - } - let ch = haystack.char_at(haystack_i); - haystack_i += ch.len_utf8(); - if !scan_char(needle, ch, &mut needle_i) { - return false; - } - } - *idx = haystack_i; - return true; -} - -struct ProcArgs { - prog: String, - args: Vec, -} - -struct ProcRes { - status: Status, - stdout: String, - stderr: String, - cmdline: String, -} - -enum Status { - Parsed(i32), - Normal(ExitStatus), -} - -impl Status { - fn code(&self) -> Option { - match *self { - Status::Parsed(i) => Some(i), - Status::Normal(ref e) => e.code(), - } - } - - fn success(&self) -> bool { - match *self { - Status::Parsed(i) => i == 0, - Status::Normal(ref e) => e.success(), - } - } -} - -impl fmt::Display for Status { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Status::Parsed(i) => write!(f, "exit code: {}", i), - Status::Normal(ref e) => e.fmt(f), - } - } -} - -fn compile_test(config: &Config, props: &TestProps, - testfile: &Path) -> ProcRes { - let aux_dir = aux_output_dir_name(config, testfile); - // FIXME (#9639): This needs to handle non-utf8 paths - let link_args = vec!("-L".to_owned(), - aux_dir.to_str().unwrap().to_owned()); - let args = make_compile_args(config, - props, - link_args, - |a, b| TargetLocation::ThisFile(make_exe_name(a, b)), testfile); - compose_and_run_compiler(config, props, testfile, args, None) -} - -fn document(config: &Config, props: &TestProps, - testfile: &Path, out_dir: &Path) -> ProcRes { - if props.build_aux_docs { - for rel_ab in &props.aux_builds { - let abs_ab = config.aux_base.join(rel_ab); - let aux_props = header::load_props(&abs_ab); - - let auxres = document(config, &aux_props, &abs_ab, out_dir); - if !auxres.status.success() { - return auxres; - } - } - } - - let aux_dir = aux_output_dir_name(config, testfile); - let mut args = vec!["-L".to_owned(), - aux_dir.to_str().unwrap().to_owned(), - "-o".to_owned(), - out_dir.to_str().unwrap().to_owned(), - testfile.to_str().unwrap().to_owned()]; - args.extend(split_maybe_args(&props.compile_flags)); - let args = ProcArgs { - prog: config.rustdoc_path.to_str().unwrap().to_owned(), - args: args, - }; - compose_and_run_compiler(config, props, testfile, args, None) -} - -fn exec_compiled_test(config: &Config, props: &TestProps, - testfile: &Path) -> ProcRes { - - let env = props.exec_env.clone(); - - match &*config.target { - - "arm-linux-androideabi" | "aarch64-linux-android" => { - _arm_exec_compiled_test(config, props, testfile, env) - } - - _=> { - let aux_dir = aux_output_dir_name(config, testfile); - compose_and_run(config, - testfile, - make_run_args(config, props, testfile), - env, - &config.run_lib_path, - Some(aux_dir.to_str().unwrap()), - None) - } - } -} - -fn compose_and_run_compiler(config: &Config, props: &TestProps, - testfile: &Path, args: ProcArgs, - input: Option) -> ProcRes { - if !props.aux_builds.is_empty() { - ensure_dir(&aux_output_dir_name(config, testfile)); - } - - let aux_dir = aux_output_dir_name(config, testfile); - // FIXME (#9639): This needs to handle non-utf8 paths - let extra_link_args = vec!["-L".to_owned(), - aux_dir.to_str().unwrap().to_owned()]; - - for rel_ab in &props.aux_builds { - let abs_ab = config.aux_base.join(rel_ab); - let aux_props = header::load_props(&abs_ab); - let mut crate_type = if aux_props.no_prefer_dynamic { - Vec::new() - } else { - // We primarily compile all auxiliary libraries as dynamic libraries - // to avoid code size bloat and large binaries as much as possible - // for the test suite (otherwise including libstd statically in all - // executables takes up quite a bit of space). - // - // For targets like MUSL, however, there is no support for dynamic - // libraries so we just go back to building a normal library. Note, - // however, that if the library is built with `force_host` then it's - // ok to be a dylib as the host should always support dylibs. - if config.target.contains("musl") && !aux_props.force_host { - vec!("--crate-type=lib".to_owned()) - } else { - vec!("--crate-type=dylib".to_owned()) - } - }; - crate_type.extend(extra_link_args.clone()); - let aux_args = - make_compile_args(config, - &aux_props, - crate_type, - |a,b| { - let f = make_lib_name(a, b, testfile); - let parent = f.parent().unwrap(); - TargetLocation::ThisDirectory(parent.to_path_buf()) - }, - &abs_ab); - let auxres = compose_and_run(config, - &abs_ab, - aux_args, - Vec::new(), - &config.compile_lib_path, - Some(aux_dir.to_str().unwrap()), - None); - if !auxres.status.success() { - fatal_proc_rec( - &format!("auxiliary build of {:?} failed to compile: ", - abs_ab.display()), - &auxres); - } - - match &*config.target { - "arm-linux-androideabi" | "aarch64-linux-android" => { - _arm_push_aux_shared_library(config, testfile); - } - _ => {} - } - } - - compose_and_run(config, - testfile, - args, - Vec::new(), - &config.compile_lib_path, - Some(aux_dir.to_str().unwrap()), - input) -} - -fn ensure_dir(path: &Path) { - if path.is_dir() { return; } - fs::create_dir(path).unwrap(); -} - -fn compose_and_run(config: &Config, testfile: &Path, - ProcArgs{ args, prog }: ProcArgs, - procenv: Vec<(String, String)> , - lib_path: &str, - aux_path: Option<&str>, - input: Option) -> ProcRes { - return program_output(config, testfile, lib_path, - prog, aux_path, args, procenv, input); -} - -enum TargetLocation { - ThisFile(PathBuf), - ThisDirectory(PathBuf), -} - -fn make_compile_args(config: &Config, - props: &TestProps, - extras: Vec , - xform: F, - testfile: &Path) - -> ProcArgs where - F: FnOnce(&Config, &Path) -> TargetLocation, -{ - let xform_file = xform(config, testfile); - let target = if props.force_host { - &*config.host - } else { - &*config.target - }; - // FIXME (#9639): This needs to handle non-utf8 paths - let mut args = vec!(testfile.to_str().unwrap().to_owned(), - "-L".to_owned(), - config.build_base.to_str().unwrap().to_owned(), - format!("--target={}", target)); - args.extend_from_slice(&extras); - if !props.no_prefer_dynamic { - args.push("-C".to_owned()); - args.push("prefer-dynamic".to_owned()); - } - let path = match xform_file { - TargetLocation::ThisFile(path) => { - args.push("-o".to_owned()); - path - } - TargetLocation::ThisDirectory(path) => { - args.push("--out-dir".to_owned()); - path - } - }; - args.push(path.to_str().unwrap().to_owned()); - if props.force_host { - args.extend(split_maybe_args(&config.host_rustcflags)); - } else { - args.extend(split_maybe_args(&config.target_rustcflags)); - } - args.extend(split_maybe_args(&props.compile_flags)); - return ProcArgs { - prog: config.rustc_path.to_str().unwrap().to_owned(), - args: args, - }; -} - -fn make_lib_name(config: &Config, auxfile: &Path, testfile: &Path) -> PathBuf { - // what we return here is not particularly important, as it - // happens; rustc ignores everything except for the directory. - let auxname = output_testname(auxfile); - aux_output_dir_name(config, testfile).join(&auxname) -} - -fn make_exe_name(config: &Config, testfile: &Path) -> PathBuf { - let mut f = output_base_name(config, testfile); - if !env::consts::EXE_SUFFIX.is_empty() { - let mut fname = f.file_name().unwrap().to_os_string(); - fname.push(env::consts::EXE_SUFFIX); - f.set_file_name(&fname); - } - f -} - -fn make_run_args(config: &Config, props: &TestProps, testfile: &Path) - -> ProcArgs { - // If we've got another tool to run under (valgrind), - // then split apart its command - let mut args = split_maybe_args(&config.runtool); - let exe_file = make_exe_name(config, testfile); - - // FIXME (#9639): This needs to handle non-utf8 paths - args.push(exe_file.to_str().unwrap().to_owned()); - - // Add the arguments in the run_flags directive - args.extend(split_maybe_args(&props.run_flags)); - - let prog = args.remove(0); - return ProcArgs { - prog: prog, - args: args, - }; -} - -fn split_maybe_args(argstr: &Option) -> Vec { - match *argstr { - Some(ref s) => { - s - .split(' ') - .filter_map(|s| { - if s.chars().all(|c| c.is_whitespace()) { - None - } else { - Some(s.to_owned()) - } - }).collect() - } - None => Vec::new() - } -} - -fn program_output(config: &Config, testfile: &Path, lib_path: &str, prog: String, - aux_path: Option<&str>, args: Vec, - env: Vec<(String, String)>, - input: Option) -> ProcRes { - let cmdline = - { - let cmdline = make_cmdline(lib_path, - &prog, - &args); - logv(config, format!("executing {}", cmdline)); - cmdline - }; - let procsrv::Result { - out, - err, - status - } = procsrv::run(lib_path, - &prog, - aux_path, - &args, - env, - input).expect(&format!("failed to exec `{}`", prog)); - dump_output(config, testfile, &out, &err); - return ProcRes { - status: Status::Normal(status), - stdout: out, - stderr: err, - cmdline: cmdline, - }; -} - -fn make_cmdline(libpath: &str, prog: &str, args: &[String]) -> String { - use util; - - // Linux and mac don't require adjusting the library search path - if cfg!(unix) { - format!("{} {}", prog, args.join(" ")) - } else { - // Build the LD_LIBRARY_PATH variable as it would be seen on the command line - // for diagnostic purposes - fn lib_path_cmd_prefix(path: &str) -> String { - format!("{}=\"{}\"", util::lib_path_env_var(), util::make_new_path(path)) - } - - format!("{} {} {}", lib_path_cmd_prefix(libpath), prog, args.join(" ")) - } -} - -fn dump_output(config: &Config, testfile: &Path, out: &str, err: &str) { - dump_output_file(config, testfile, out, "out"); - dump_output_file(config, testfile, err, "err"); - maybe_dump_to_stdout(config, out, err); -} - -fn dump_output_file(config: &Config, testfile: &Path, - out: &str, extension: &str) { - let outfile = make_out_name(config, testfile, extension); - File::create(&outfile).unwrap().write_all(out.as_bytes()).unwrap(); -} - -fn make_out_name(config: &Config, testfile: &Path, extension: &str) -> PathBuf { - output_base_name(config, testfile).with_extension(extension) -} - -fn aux_output_dir_name(config: &Config, testfile: &Path) -> PathBuf { - let f = output_base_name(config, testfile); - let mut fname = f.file_name().unwrap().to_os_string(); - fname.push(&format!(".{}.libaux", config.mode)); - f.with_file_name(&fname) -} - -fn output_testname(testfile: &Path) -> PathBuf { - PathBuf::from(testfile.file_stem().unwrap()) -} - -fn output_base_name(config: &Config, testfile: &Path) -> PathBuf { - config.build_base - .join(&output_testname(testfile)) - .with_extension(&config.stage_id) -} - -fn maybe_dump_to_stdout(config: &Config, out: &str, err: &str) { - if config.verbose { - println!("------{}------------------------------", "stdout"); - println!("{}", out); - println!("------{}------------------------------", "stderr"); - println!("{}", err); - println!("------------------------------------------"); - } -} - -fn error(err: &str) { println!("\nerror: {}", err); } - -fn fatal(err: &str) -> ! { error(err); panic!(); } - -fn fatal_proc_rec(err: &str, proc_res: &ProcRes) -> ! { - print!("\n\ -error: {}\n\ -status: {}\n\ -command: {}\n\ -stdout:\n\ -------------------------------------------\n\ -{}\n\ -------------------------------------------\n\ -stderr:\n\ -------------------------------------------\n\ -{}\n\ -------------------------------------------\n\ -\n", - err, proc_res.status, proc_res.cmdline, proc_res.stdout, - proc_res.stderr); - panic!(); -} - -fn _arm_exec_compiled_test(config: &Config, - props: &TestProps, - testfile: &Path, - env: Vec<(String, String)>) - -> ProcRes { - let args = make_run_args(config, props, testfile); - let cmdline = make_cmdline("", - &args.prog, - &args.args); - - // get bare program string - let mut tvec: Vec = args.prog - .split('/') - .map(str::to_owned) - .collect(); - let prog_short = tvec.pop().unwrap(); - - // copy to target - let copy_result = procsrv::run("", - &config.adb_path, - None, - &[ - "push".to_owned(), - args.prog.clone(), - config.adb_test_dir.clone() - ], - vec!(("".to_owned(), "".to_owned())), - Some("".to_owned())) - .expect(&format!("failed to exec `{}`", config.adb_path)); - - if config.verbose { - println!("push ({}) {} {} {}", - config.target, - args.prog, - copy_result.out, - copy_result.err); - } - - logv(config, format!("executing ({}) {}", config.target, cmdline)); - - let mut runargs = Vec::new(); - - // run test via adb_run_wrapper - runargs.push("shell".to_owned()); - for (key, val) in env { - runargs.push(format!("{}={}", key, val)); - } - runargs.push(format!("{}/../adb_run_wrapper.sh", config.adb_test_dir)); - runargs.push(format!("{}", config.adb_test_dir)); - runargs.push(format!("{}", prog_short)); - - for tv in &args.args { - runargs.push(tv.to_owned()); - } - procsrv::run("", - &config.adb_path, - None, - &runargs, - vec!(("".to_owned(), "".to_owned())), Some("".to_owned())) - .expect(&format!("failed to exec `{}`", config.adb_path)); - - // get exitcode of result - runargs = Vec::new(); - runargs.push("shell".to_owned()); - runargs.push("cat".to_owned()); - runargs.push(format!("{}/{}.exitcode", config.adb_test_dir, prog_short)); - - let procsrv::Result{ out: exitcode_out, err: _, status: _ } = - procsrv::run("", - &config.adb_path, - None, - &runargs, - vec!(("".to_owned(), "".to_owned())), - Some("".to_owned())) - .expect(&format!("failed to exec `{}`", config.adb_path)); - - let mut exitcode: i32 = 0; - for c in exitcode_out.chars() { - if !c.is_numeric() { break; } - exitcode = exitcode * 10 + match c { - '0' ... '9' => c as i32 - ('0' as i32), - _ => 101, - } - } - - // get stdout of result - runargs = Vec::new(); - runargs.push("shell".to_owned()); - runargs.push("cat".to_owned()); - runargs.push(format!("{}/{}.stdout", config.adb_test_dir, prog_short)); - - let procsrv::Result{ out: stdout_out, err: _, status: _ } = - procsrv::run("", - &config.adb_path, - None, - &runargs, - vec!(("".to_owned(), "".to_owned())), - Some("".to_owned())) - .expect(&format!("failed to exec `{}`", config.adb_path)); - - // get stderr of result - runargs = Vec::new(); - runargs.push("shell".to_owned()); - runargs.push("cat".to_owned()); - runargs.push(format!("{}/{}.stderr", config.adb_test_dir, prog_short)); - - let procsrv::Result{ out: stderr_out, err: _, status: _ } = - procsrv::run("", - &config.adb_path, - None, - &runargs, - vec!(("".to_owned(), "".to_owned())), - Some("".to_owned())) - .expect(&format!("failed to exec `{}`", config.adb_path)); - - dump_output(config, - testfile, - &stdout_out, - &stderr_out); - - ProcRes { - status: Status::Parsed(exitcode), - stdout: stdout_out, - stderr: stderr_out, - cmdline: cmdline - } -} - -fn _arm_push_aux_shared_library(config: &Config, testfile: &Path) { - let tdir = aux_output_dir_name(config, testfile); - - let dirs = fs::read_dir(&tdir).unwrap(); - for file in dirs { - let file = file.unwrap().path(); - if file.extension().and_then(|s| s.to_str()) == Some("so") { - // FIXME (#9639): This needs to handle non-utf8 paths - let copy_result = procsrv::run("", - &config.adb_path, - None, - &[ - "push".to_owned(), - file.to_str() - .unwrap() - .to_owned(), - config.adb_test_dir.to_owned(), - ], - vec!(("".to_owned(), - "".to_owned())), - Some("".to_owned())) - .expect(&format!("failed to exec `{}`", config.adb_path)); - - if config.verbose { - println!("push ({}) {:?} {} {}", - config.target, file.display(), - copy_result.out, copy_result.err); - } - } - } -} - -// codegen tests (using FileCheck) - -fn compile_test_and_save_ir(config: &Config, props: &TestProps, - testfile: &Path) -> ProcRes { - let aux_dir = aux_output_dir_name(config, testfile); - // FIXME (#9639): This needs to handle non-utf8 paths - let mut link_args = vec!("-L".to_owned(), - aux_dir.to_str().unwrap().to_owned()); - let llvm_args = vec!("--emit=llvm-ir".to_owned(),); - link_args.extend(llvm_args); - let args = make_compile_args(config, - props, - link_args, - |a, b| TargetLocation::ThisDirectory( - output_base_name(a, b).parent() - .unwrap().to_path_buf()), - testfile); - compose_and_run_compiler(config, props, testfile, args, None) -} - -fn check_ir_with_filecheck(config: &Config, testfile: &Path) -> ProcRes { - let irfile = output_base_name(config, testfile).with_extension("ll"); - let prog = config.llvm_bin_path.as_ref().unwrap().join("FileCheck"); - let proc_args = ProcArgs { - // FIXME (#9639): This needs to handle non-utf8 paths - prog: prog.to_str().unwrap().to_owned(), - args: vec!(format!("-input-file={}", irfile.to_str().unwrap()), - testfile.to_str().unwrap().to_owned()) - }; - compose_and_run(config, testfile, proc_args, Vec::new(), "", None, None) -} - -fn run_codegen_test(config: &Config, props: &TestProps, testfile: &Path) { - - if config.llvm_bin_path.is_none() { - fatal("missing --llvm-bin-path"); - } - - let mut proc_res = compile_test_and_save_ir(config, props, testfile); - if !proc_res.status.success() { - fatal_proc_rec("compilation failed!", &proc_res); - } - - proc_res = check_ir_with_filecheck(config, testfile); - if !proc_res.status.success() { - fatal_proc_rec("verification with 'FileCheck' failed", - &proc_res); - } -} - -fn charset() -> &'static str { - // FreeBSD 10.1 defaults to GDB 6.1.1 which doesn't support "auto" charset - if cfg!(target_os = "bitrig") { - "auto" - } else if cfg!(target_os = "freebsd") { - "ISO-8859-1" - } else { - "UTF-8" - } -} - -fn run_rustdoc_test(config: &Config, props: &TestProps, testfile: &Path) { - let out_dir = output_base_name(config, testfile); - let _ = fs::remove_dir_all(&out_dir); - ensure_dir(&out_dir); - - let proc_res = document(config, props, testfile, &out_dir); - if !proc_res.status.success() { - fatal_proc_rec("rustdoc failed!", &proc_res); - } - let root = find_rust_src_root(config).unwrap(); - - let res = cmd2procres(config, - testfile, - Command::new(&config.python) - .arg(root.join("src/etc/htmldocck.py")) - .arg(out_dir) - .arg(testfile)); - if !res.status.success() { - fatal_proc_rec("htmldocck failed!", &res); - } -} diff --git a/src/compiletest/util.rs b/src/compiletest/util.rs deleted file mode 100644 index 103ca463f7a58..0000000000000 --- a/src/compiletest/util.rs +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::env; -use common::Config; - -/// Conversion table from triple OS name to Rust SYSNAME -const OS_TABLE: &'static [(&'static str, &'static str)] = &[ - ("android", "android"), - ("bitrig", "bitrig"), - ("darwin", "macos"), - ("dragonfly", "dragonfly"), - ("freebsd", "freebsd"), - ("ios", "ios"), - ("linux", "linux"), - ("mingw32", "windows"), - ("netbsd", "netbsd"), - ("openbsd", "openbsd"), - ("win32", "windows"), - ("windows", "windows"), -]; - -const ARCH_TABLE: &'static [(&'static str, &'static str)] = &[ - ("aarch64", "aarch64"), - ("amd64", "x86_64"), - ("arm", "arm"), - ("arm64", "aarch64"), - ("hexagon", "hexagon"), - ("i386", "x86"), - ("i686", "x86"), - ("mips", "mips"), - ("msp430", "msp430"), - ("powerpc", "powerpc"), - ("powerpc64", "powerpc64"), - ("powerpc64le", "powerpc64le"), - ("s390x", "systemz"), - ("sparc", "sparc"), - ("x86_64", "x86_64"), - ("xcore", "xcore"), -]; - -pub fn get_os(triple: &str) -> &'static str { - for &(triple_os, os) in OS_TABLE { - if triple.contains(triple_os) { - return os - } - } - panic!("Cannot determine OS from triple"); -} -pub fn get_arch(triple: &str) -> &'static str { - for &(triple_arch, arch) in ARCH_TABLE { - if triple.contains(triple_arch) { - return arch - } - } - panic!("Cannot determine Architecture from triple"); -} - -pub fn get_env(triple: &str) -> Option<&str> { - triple.split('-').nth(3) -} - -pub fn make_new_path(path: &str) -> String { - assert!(cfg!(windows)); - // Windows just uses PATH as the library search path, so we have to - // maintain the current value while adding our own - match env::var(lib_path_env_var()) { - Ok(curr) => { - format!("{}{}{}", path, path_div(), curr) - } - Err(..) => path.to_owned() - } -} - -pub fn lib_path_env_var() -> &'static str { "PATH" } -fn path_div() -> &'static str { ";" } - -pub fn logv(config: &Config, s: String) { - debug!("{}", s); - if config.verbose { println!("{}", s); } -} diff --git a/src/doc/README.md b/src/doc/README.md index b5972f7ddb9ee..e1d95732b467c 100644 --- a/src/doc/README.md +++ b/src/doc/README.md @@ -9,7 +9,7 @@ libraries. To generate HTML documentation from one source file/crate, do something like: -~~~~ +~~~~text rustdoc --output html-doc/ --output-format html ../src/libstd/path.rs ~~~~ @@ -20,7 +20,7 @@ rustdoc --output html-doc/ --output-format html ../src/libstd/path.rs To generate an HTML version of a doc from Markdown manually, you can do something like: -~~~~ +~~~~text rustdoc reference.md ~~~~ diff --git a/src/doc/book/SUMMARY.md b/src/doc/book/SUMMARY.md index fe5e1c3990c5c..18aa9f24580d5 100644 --- a/src/doc/book/SUMMARY.md +++ b/src/doc/book/SUMMARY.md @@ -9,6 +9,7 @@ * [Comments](comments.md) * [if](if.md) * [Loops](loops.md) + * [Vectors](vectors.md) * [Ownership](ownership.md) * [References and Borrowing](references-and-borrowing.md) * [Lifetimes](lifetimes.md) @@ -18,7 +19,6 @@ * [Match](match.md) * [Patterns](patterns.md) * [Method Syntax](method-syntax.md) - * [Vectors](vectors.md) * [Strings](strings.md) * [Generics](generics.md) * [Traits](traits.md) diff --git a/src/doc/book/advanced-linking.md b/src/doc/book/advanced-linking.md index 9ef6d5c2bffbb..ddaebaf98d99d 100644 --- a/src/doc/book/advanced-linking.md +++ b/src/doc/book/advanced-linking.md @@ -12,7 +12,7 @@ the `link_args` attribute. This attribute is applied to `extern` blocks and specifies raw flags which need to get passed to the linker when producing an artifact. An example usage would be: -``` no_run +```rust,no_run #![feature(link_args)] #[link_args = "-foo -bar -baz"] @@ -52,7 +52,7 @@ By default, all Rust programs on Linux will link to the system `libc` along with a number of other libraries. Let's look at an example on a 64-bit Linux machine with GCC and `glibc` (by far the most common `libc` on Linux): -``` text +```text $ cat example.rs fn main() {} $ rustc example.rs @@ -134,7 +134,7 @@ $ ldd example not a dynamic executable $ ./example hi! -thread '
' panicked at 'failed', example.rs:1 +thread 'main' panicked at 'failed', example.rs:1 ``` Success! This binary can be copied to almost any Linux machine with the same diff --git a/src/doc/book/associated-types.md b/src/doc/book/associated-types.md index a0676a33996fc..f416e600415bd 100644 --- a/src/doc/book/associated-types.md +++ b/src/doc/book/associated-types.md @@ -11,7 +11,7 @@ this: trait Graph { fn has_edge(&self, &N, &N) -> bool; fn edges(&self, &N) -> Vec; - // etc + // Etc. } ``` @@ -36,7 +36,7 @@ trait Graph { fn has_edge(&self, &Self::N, &Self::N) -> bool; fn edges(&self, &Self::N) -> Vec; - // etc + // Etc. } ``` @@ -67,7 +67,7 @@ trait Graph { Simple enough. Associated types use the `type` keyword, and go inside the body of the trait, with the functions. -These `type` declarations can have all the same thing as functions do. For example, +These type declarations work the same way as those for functions. For example, if we wanted our `N` type to implement `Display`, so we can print the nodes out, we could do this: @@ -131,7 +131,7 @@ declarations. ## Trait objects with associated types There’s one more bit of syntax we should talk about: trait objects. If you -try to create a trait object from an associated type, like this: +try to create a trait object from a trait with an associated type, like this: ```rust,ignore # trait Graph { diff --git a/src/doc/book/benchmark-tests.md b/src/doc/book/benchmark-tests.md index 797ec94774d7d..e054736eb30b6 100644 --- a/src/doc/book/benchmark-tests.md +++ b/src/doc/book/benchmark-tests.md @@ -110,7 +110,7 @@ computation entirely. This could be done for the example above by adjusting the # struct X; # impl X { fn iter(&self, _: F) where F: FnMut() -> T {} } let b = X; b.iter(|| { - // note lack of `;` (could also use an explicit `return`). + // Note lack of `;` (could also use an explicit `return`). (0..1000).fold(0, |old, new| old ^ new) }); ``` diff --git a/src/doc/book/bibliography.md b/src/doc/book/bibliography.md index d32b1a91944e4..6f6f51d1f6082 100644 --- a/src/doc/book/bibliography.md +++ b/src/doc/book/bibliography.md @@ -80,3 +80,4 @@ Language](http://www.cs.indiana.edu/~eholk/papers/hips2013.pdf). Early GPU work Rust](http://munksgaard.me/papers/laumann-munksgaard-larsen.pdf). Philip Munksgaard's master's thesis. Research for Servo. * [Ownership is Theft: Experiences Building an Embedded OS in Rust - Amit Levy, et. al.](http://amitlevy.com/papers/tock-plos2015.pdf) +* [You can't spell trust without Rust](https://raw.githubusercontent.com/Gankro/thesis/master/thesis.pdf). Alexis Beingessner's master's thesis. diff --git a/src/doc/book/borrow-and-asref.md b/src/doc/book/borrow-and-asref.md index 1cfeb2620bd08..c30b2e68665f1 100644 --- a/src/doc/book/borrow-and-asref.md +++ b/src/doc/book/borrow-and-asref.md @@ -8,7 +8,7 @@ different. Here’s a quick refresher on what these two traits mean. # Borrow -The `Borrow` trait is used when you’re writing a datastructure, and you want to +The `Borrow` trait is used when you’re writing a data structure, and you want to use either an owned or borrowed type as synonymous for some purpose. For example, [`HashMap`][hashmap] has a [`get` method][get] which uses `Borrow`: @@ -86,7 +86,7 @@ We can see how they’re kind of the same: they both deal with owned and borrowe versions of some type. However, they’re a bit different. Choose `Borrow` when you want to abstract over different kinds of borrowing, or -when you’re building a datastructure that treats owned and borrowed values in +when you’re building a data structure that treats owned and borrowed values in equivalent ways, such as hashing and comparison. Choose `AsRef` when you want to convert something to a reference directly, and diff --git a/src/doc/book/box-syntax-and-patterns.md b/src/doc/book/box-syntax-and-patterns.md index 8d83b64d68313..cbf65dfa9ba8c 100644 --- a/src/doc/book/box-syntax-and-patterns.md +++ b/src/doc/book/box-syntax-and-patterns.md @@ -38,7 +38,7 @@ so as to avoid copying a large data structure. For example: struct BigStruct { one: i32, two: i32, - // etc + // Etc. one_hundred: i32, } @@ -68,7 +68,7 @@ This is an antipattern in Rust. Instead, write this: struct BigStruct { one: i32, two: i32, - // etc + // Etc. one_hundred: i32, } diff --git a/src/doc/book/casting-between-types.md b/src/doc/book/casting-between-types.md index 5cafe1693690d..296384ab6efd9 100644 --- a/src/doc/book/casting-between-types.md +++ b/src/doc/book/casting-between-types.md @@ -17,12 +17,12 @@ function result. The most common case of coercion is removing mutability from a reference: * `&mut T` to `&T` - + An analogous conversion is to remove mutability from a [raw pointer](raw-pointers.md): * `*mut T` to `*const T` - + References can also be coerced to raw pointers: * `&T` to `*const T` @@ -32,7 +32,7 @@ References can also be coerced to raw pointers: Custom coercions may be defined using [`Deref`](deref-coercions.md). Coercion is transitive. - + # `as` The `as` keyword does safe casting: @@ -64,7 +64,7 @@ A cast `e as U` is also valid in any of the following cases: and `U` is an integer type; *enum-cast* * `e` has type `bool` or `char` and `U` is an integer type; *prim-int-cast* * `e` has type `u8` and `U` is `char`; *u8-char-cast* - + For example ```rust @@ -98,15 +98,15 @@ The semantics of numeric casts are: [float-int]: https://github.com/rust-lang/rust/issues/10184 [float-float]: https://github.com/rust-lang/rust/issues/15536 - + ## Pointer casts - + Perhaps surprisingly, it is safe to cast [raw pointers](raw-pointers.md) to and from integers, and to cast between pointers to different types subject to some constraints. It is only unsafe to dereference the pointer: ```rust -let a = 300 as *const char; // a pointer to location 300 +let a = 300 as *const char; // `a` is a pointer to location 300. let b = a as u32; ``` @@ -114,7 +114,7 @@ let b = a as u32; * `e` has type `*T`, `U` has type `*U_0`, and either `U_0: Sized` or `unsize_kind(T) == unsize_kind(U_0)`; a *ptr-ptr-cast* - + * `e` has type `*T` and `U` is a numeric type, while `T: Sized`; *ptr-addr-cast* * `e` is an integer and `U` is `*U_0`, while `U_0: Sized`; *addr-ptr-cast* @@ -135,14 +135,14 @@ cast four bytes into a `u32`: ```rust,ignore let a = [0u8, 0u8, 0u8, 0u8]; -let b = a as u32; // four eights makes 32 +let b = a as u32; // Four u8s makes a u32. ``` This errors with: ```text error: non-scalar cast: `[u8; 4]` as `u32` -let b = a as u32; // four eights makes 32 +let b = a as u32; // Four u8s makes a u32. ^~~~~~~~ ``` @@ -165,10 +165,15 @@ Rust lets us: ```rust use std::mem; -unsafe { - let a = [0u8, 0u8, 0u8, 0u8]; - - let b = mem::transmute::<[u8; 4], u32>(a); +fn main() { + unsafe { + let a = [0u8, 1u8, 0u8, 0u8]; + let b = mem::transmute::<[u8; 4], u32>(a); + println!("{}", b); // 256 + // Or, more concisely: + let c: u32 = mem::transmute(a); + println!("{}", c); // 256 + } } ``` diff --git a/src/doc/book/choosing-your-guarantees.md b/src/doc/book/choosing-your-guarantees.md index f2b92e6dec4dd..9dca3479d35e8 100644 --- a/src/doc/book/choosing-your-guarantees.md +++ b/src/doc/book/choosing-your-guarantees.md @@ -25,7 +25,7 @@ the following: ```rust let x = Box::new(1); let y = x; -// x no longer accessible here +// `x` is no longer accessible here. ``` Here, the box was _moved_ into `y`. As `x` no longer owns it, the compiler will no longer allow the @@ -204,7 +204,7 @@ borrow checker. Generally we know that such mutations won't happen in a nested f to check. For large, complicated programs, it becomes useful to put some things in `RefCell`s to make things -simpler. For example, a lot of the maps in [the `ctxt` struct][ctxt] in the Rust compiler internals +simpler. For example, a lot of the maps in the `ctxt` struct in the Rust compiler internals are inside this wrapper. These are only modified once (during creation, which is not right after initialization) or a couple of times in well-separated places. However, since this struct is pervasively used everywhere, juggling mutable and immutable pointers would be hard (perhaps @@ -232,10 +232,9 @@ indicator (one word in size) along with the data. At runtime each borrow causes a modification/check of the refcount. -[cell-mod]: ../std/cell/ +[cell-mod]: ../std/cell/index.html [cell]: ../std/cell/struct.Cell.html [refcell]: ../std/cell/struct.RefCell.html -[ctxt]: ../rustc/middle/ty/struct.ctxt.html # Synchronous types @@ -292,9 +291,9 @@ the inner data (mutably), and the lock will be released when the guard goes out ```rust,ignore { let guard = mutex.lock(); - // guard dereferences mutably to the inner type + // `guard` dereferences mutably to the inner type. *guard += 1; -} // lock released when destructor runs +} // Lock is released when destructor runs. ``` diff --git a/src/doc/book/closures.md b/src/doc/book/closures.md index 237545edc05bb..a3c7333c6bec2 100644 --- a/src/doc/book/closures.md +++ b/src/doc/book/closures.md @@ -116,7 +116,7 @@ let mut num = 5; { let plus_num = |x: i32| x + num; -} // plus_num goes out of scope, borrow of num ends +} // `plus_num` goes out of scope; borrow of `num` ends. let y = &mut num; ``` @@ -223,6 +223,7 @@ trait system to overload operators. Calling functions is no different. We have three separate traits to overload with: ```rust +# #![feature(unboxed_closures)] # mod foo { pub trait Fn : FnMut { extern "rust-call" fn call(&self, args: Args) -> Self::Output; @@ -261,7 +262,7 @@ the result: ```rust fn call_with_one(some_closure: F) -> i32 - where F : Fn(i32) -> i32 { + where F: Fn(i32) -> i32 { some_closure(1) } @@ -278,7 +279,7 @@ Let’s examine the signature of `call_with_one` in more depth: ```rust fn call_with_one(some_closure: F) -> i32 -# where F : Fn(i32) -> i32 { +# where F: Fn(i32) -> i32 { # some_closure(1) } ``` @@ -287,13 +288,13 @@ isn’t interesting. The next part is: ```rust # fn call_with_one(some_closure: F) -> i32 - where F : Fn(i32) -> i32 { + where F: Fn(i32) -> i32 { # some_closure(1) } ``` -Because `Fn` is a trait, we can bound our generic with it. In this case, our -closure takes a `i32` as an argument and returns an `i32`, and so the generic -bound we use is `Fn(i32) -> i32`. +Because `Fn` is a trait, we can use it as a bound for our generic type. In +this case, our closure takes a `i32` as an argument and returns an `i32`, and +so the generic bound we use is `Fn(i32) -> i32`. There’s one other key point here: because we’re bounding a generic with a trait, this will get monomorphized, and therefore, we’ll be doing static @@ -319,6 +320,54 @@ assert_eq!(3, answer); Now we take a trait object, a `&Fn`. And we have to make a reference to our closure when we pass it to `call_with_one`, so we use `&||`. +A quick note about closures that use explicit lifetimes. Sometimes you might have a closure +that takes a reference like so: + +```rust +fn call_with_ref(some_closure:F) -> i32 + where F: Fn(&i32) -> i32 { + + let value = 0; + some_closure(&value) +} +``` + +Normally you can specify the lifetime of the parameter to our closure. We +could annotate it on the function declaration: + +```rust,ignore +fn call_with_ref<'a, F>(some_closure:F) -> i32 + where F: Fn(&'a i32) -> i32 { +``` + +However, this presents a problem in our case. When a function has an explicit +lifetime parameter, that lifetime must be at least as long as the *entire* +call to that function. The borrow checker will complain that `value` doesn't +live long enough, because it is only in scope after its declaration inside the +function body. + +What we need is a closure that can borrow its argument only for its own +invocation scope, not for the outer function's scope. In order to say that, +we can use Higher-Ranked Trait Bounds with the `for<...>` syntax: + +```ignore +fn call_with_ref(some_closure:F) -> i32 + where F: for<'a> Fn(&'a i32) -> i32 { +``` + +This lets the Rust compiler find the minimum lifetime to invoke our closure and +satisfy the borrow checker's rules. Our function then compiles and executes as we +expect. + +```rust +fn call_with_ref(some_closure:F) -> i32 + where F: for<'a> Fn(&'a i32) -> i32 { + + let value = 0; + some_closure(&value) +} +``` + # Function pointers and closures A function pointer is kind of like a closure that has no environment. As such, @@ -344,7 +393,7 @@ assert_eq!(2, answer); In this example, we don’t strictly need the intermediate variable `f`, the name of the function works just fine too: -```ignore +```rust,ignore let answer = call_with_one(&add_one); ``` @@ -371,14 +420,13 @@ assert_eq!(6, answer); This gives us these long, related errors: ```text -error: the trait `core::marker::Sized` is not implemented for the type -`core::ops::Fn(i32) -> i32` [E0277] +error: the trait bound `core::ops::Fn(i32) -> i32 : core::marker::Sized` is not satisfied [E0277] fn factory() -> (Fn(i32) -> i32) { ^~~~~~~~~~~~~~~~ note: `core::ops::Fn(i32) -> i32` does not have a constant size known at compile-time fn factory() -> (Fn(i32) -> i32) { ^~~~~~~~~~~~~~~~ -error: the trait `core::marker::Sized` is not implemented for the type `core::ops::Fn(i32) -> i32` [E0277] +error: the trait bound `core::ops::Fn(i32) -> i32 : core::marker::Sized` is not satisfied [E0277] let f = factory(); ^ note: `core::ops::Fn(i32) -> i32` does not have a constant size known at compile-time @@ -463,12 +511,11 @@ fn factory() -> Box i32> { Box::new(|x| x + num) } -# fn main() { + let f = factory(); let answer = f(1); assert_eq!(6, answer); -# } ``` There’s just one last problem: @@ -493,14 +540,13 @@ fn factory() -> Box i32> { Box::new(move |x| x + num) } -# fn main() { + let f = factory(); let answer = f(1); assert_eq!(6, answer); -# } ``` By making the inner closure a `move Fn`, we create a new stack frame for our -closure. By `Box`ing it up, we’ve given it a known size, and allowing it to +closure. By `Box`ing it up, we’ve given it a known size, allowing it to escape our stack frame. diff --git a/src/doc/book/comments.md b/src/doc/book/comments.md index e7eb48dc42c52..8fa397cd9a666 100644 --- a/src/doc/book/comments.md +++ b/src/doc/book/comments.md @@ -10,7 +10,7 @@ and *doc comments*. ```rust // Line comments are anything after ‘//’ and extend to the end of the line. -let x = 5; // this is also a line comment. +let x = 5; // This is also a line comment. // If you have a long explanation for something, you can put line comments next // to each other. Put a space between the // and your comment so that it’s diff --git a/src/doc/book/compiler-plugins.md b/src/doc/book/compiler-plugins.md index 800be13a243fb..ff29358df9407 100644 --- a/src/doc/book/compiler-plugins.md +++ b/src/doc/book/compiler-plugins.md @@ -8,12 +8,12 @@ extend the compiler's behavior with new syntax extensions, lint checks, etc. A plugin is a dynamic library crate with a designated *registrar* function that registers extensions with `rustc`. Other crates can load these extensions using the crate attribute `#![plugin(...)]`. See the -[`rustc_plugin`](../rustc_plugin/index.html) documentation for more about the +`rustc_plugin` documentation for more about the mechanics of defining and loading a plugin. If present, arguments passed as `#![plugin(foo(... args ...))]` are not interpreted by rustc itself. They are provided to the plugin through the -`Registry`'s [`args` method](../rustc_plugin/registry/struct.Registry.html#method.args). +`Registry`'s `args` method. In the vast majority of cases, a plugin should *only* be used through `#![plugin]` and not through an `extern crate` item. Linking a plugin would @@ -30,14 +30,14 @@ of a library. Plugins can extend Rust's syntax in various ways. One kind of syntax extension is the procedural macro. These are invoked the same way as [ordinary macros](macros.html), but the expansion is performed by arbitrary Rust -code that manipulates [syntax trees](../syntax/ast/index.html) at +code that manipulates syntax trees at compile time. Let's write a plugin -[`roman_numerals.rs`](https://github.com/rust-lang/rust/tree/master/src/test/auxiliary/roman_numerals.rs) +[`roman_numerals.rs`](https://github.com/rust-lang/rust/blob/master/src/test/run-pass-fulldeps/auxiliary/roman_numerals.rs) that implements Roman numeral integer literals. -```ignore +```rust,ignore #![crate_type="dylib"] #![feature(plugin_registrar, rustc_private)] @@ -45,11 +45,11 @@ extern crate syntax; extern crate rustc; extern crate rustc_plugin; -use syntax::codemap::Span; use syntax::parse::token; -use syntax::ast::TokenTree; +use syntax::tokenstream::TokenTree; use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacEager}; -use syntax::ext::build::AstBuilder; // trait for expr_usize +use syntax::ext::build::AstBuilder; // A trait for expr_usize. +use syntax::ext::quote::rt::Span; use rustc_plugin::Registry; fn expand_rn(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree]) @@ -69,7 +69,7 @@ fn expand_rn(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree]) } let text = match args[0] { - TokenTree::Token(_, token::Ident(s, _)) => s.to_string(), + TokenTree::Token(_, token::Ident(s)) => s.to_string(), _ => { cx.span_err(sp, "argument should be a single identifier"); return DummyResult::any(sp); @@ -102,7 +102,7 @@ pub fn plugin_registrar(reg: &mut Registry) { Then we can use `rn!()` like any other macro: -```ignore +```rust,ignore #![feature(plugin)] #![plugin(roman_numerals)] @@ -120,11 +120,8 @@ The advantages over a simple `fn(&str) -> u32` are: In addition to procedural macros, you can define new [`derive`](../reference.html#derive)-like attributes and other kinds of -extensions. See -[`Registry::register_syntax_extension`](../rustc_plugin/registry/struct.Registry.html#method.register_syntax_extension) -and the [`SyntaxExtension` -enum](https://doc.rust-lang.org/syntax/ext/base/enum.SyntaxExtension.html). For -a more involved macro example, see +extensions. See `Registry::register_syntax_extension` and the `SyntaxExtension` +enum. For a more involved macro example, see [`regex_macros`](https://github.com/rust-lang/regex/blob/master/regex_macros/src/lib.rs). @@ -132,10 +129,10 @@ a more involved macro example, see Some of the [macro debugging tips](macros.html#debugging-macro-code) are applicable. -You can use [`syntax::parse`](../syntax/parse/index.html) to turn token trees into +You can use `syntax::parse` to turn token trees into higher-level syntax elements like expressions: -```ignore +```rust,ignore fn expand_foo(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree]) -> Box { @@ -148,40 +145,32 @@ Looking through [`libsyntax` parser code](https://github.com/rust-lang/rust/blob/master/src/libsyntax/parse/parser.rs) will give you a feel for how the parsing infrastructure works. -Keep the [`Span`s](../syntax/codemap/struct.Span.html) of -everything you parse, for better error reporting. You can wrap -[`Spanned`](../syntax/codemap/struct.Spanned.html) around -your custom data structures. - -Calling -[`ExtCtxt::span_fatal`](../syntax/ext/base/struct.ExtCtxt.html#method.span_fatal) -will immediately abort compilation. It's better to instead call -[`ExtCtxt::span_err`](../syntax/ext/base/struct.ExtCtxt.html#method.span_err) -and return -[`DummyResult`](../syntax/ext/base/struct.DummyResult.html), -so that the compiler can continue and find further errors. - -To print syntax fragments for debugging, you can use -[`span_note`](../syntax/ext/base/struct.ExtCtxt.html#method.span_note) together -with -[`syntax::print::pprust::*_to_string`](https://doc.rust-lang.org/syntax/print/pprust/index.html#functions). - -The example above produced an integer literal using -[`AstBuilder::expr_usize`](../syntax/ext/build/trait.AstBuilder.html#tymethod.expr_usize). +Keep the `Span`s of everything you parse, for better error reporting. You can +wrap `Spanned` around your custom data structures. + +Calling `ExtCtxt::span_fatal` will immediately abort compilation. It's better to +instead call `ExtCtxt::span_err` and return `DummyResult` so that the compiler +can continue and find further errors. + +To print syntax fragments for debugging, you can use `span_note` together with +`syntax::print::pprust::*_to_string`. + +The example above produced an integer literal using `AstBuilder::expr_usize`. As an alternative to the `AstBuilder` trait, `libsyntax` provides a set of -[quasiquote macros](../syntax/ext/quote/index.html). They are undocumented and -very rough around the edges. However, the implementation may be a good -starting point for an improved quasiquote as an ordinary plugin library. +quasiquote macros. They are undocumented and very rough around the edges. +However, the implementation may be a good starting point for an improved +quasiquote as an ordinary plugin library. # Lint plugins Plugins can extend [Rust's lint infrastructure](../reference.html#lint-check-attributes) with additional checks for -code style, safety, etc. Now let's write a plugin [`lint_plugin_test.rs`](https://github.com/rust-lang/rust/blob/master/src/test/auxiliary/lint_plugin_test.rs) +code style, safety, etc. Now let's write a plugin +[`lint_plugin_test.rs`](https://github.com/rust-lang/rust/blob/master/src/test/run-pass-fulldeps/auxiliary/lint_plugin_test.rs) that warns about any item named `lintme`. -```ignore +```rust,ignore #![feature(plugin_registrar)] #![feature(box_syntax, rustc_private)] @@ -223,7 +212,7 @@ pub fn plugin_registrar(reg: &mut Registry) { Then code like -```ignore +```rust,ignore #![plugin(lint_plugin_test)] fn lintme() { } @@ -239,12 +228,11 @@ foo.rs:4 fn lintme() { } The components of a lint plugin are: -* one or more `declare_lint!` invocations, which define static - [`Lint`](../rustc/lint/struct.Lint.html) structs; +* one or more `declare_lint!` invocations, which define static `Lint` structs; * a struct holding any state needed by the lint pass (here, none); -* a [`LintPass`](../rustc/lint/trait.LintPass.html) +* a `LintPass` implementation defining how to check each syntax element. A single `LintPass` may call `span_lint` for several different `Lint`s, but should register them all through the `get_lints` method. diff --git a/src/doc/book/concurrency.md b/src/doc/book/concurrency.md index 44569a04b9824..67d89d5484ca7 100644 --- a/src/doc/book/concurrency.md +++ b/src/doc/book/concurrency.md @@ -4,7 +4,7 @@ Concurrency and parallelism are incredibly important topics in computer science, and are also a hot topic in industry today. Computers are gaining more and more cores, yet many programmers aren't prepared to fully utilize them. -Rust's memory safety features also apply to its concurrency story too. Even +Rust's memory safety features also apply to its concurrency story. Even concurrent Rust programs must be memory safe, having no data races. Rust's type system is up to the task, and gives you powerful ways to reason about concurrent code at compile time. @@ -94,6 +94,52 @@ fn main() { } ``` +As closures can capture variables from their environment, we can also try to +bring some data into the other thread: + +```rust,ignore +use std::thread; + +fn main() { + let x = 1; + thread::spawn(|| { + println!("x is {}", x); + }); +} +``` + +However, this gives us an error: + +```text +5:19: 7:6 error: closure may outlive the current function, but it + borrows `x`, which is owned by the current function +... +5:19: 7:6 help: to force the closure to take ownership of `x` (and any other referenced variables), + use the `move` keyword, as shown: + thread::spawn(move || { + println!("x is {}", x); + }); +``` + +This is because by default closures capture variables by reference, and thus the +closure only captures a _reference to `x`_. This is a problem, because the +thread may outlive the scope of `x`, leading to a dangling pointer. + +To fix this, we use a `move` closure as mentioned in the error message. `move` +closures are explained in depth [here](closures.html#move-closures); basically +they move variables from their environment into themselves. + +```rust +use std::thread; + +fn main() { + let x = 1; + thread::spawn(move || { + println!("x is {}", x); + }); +} +``` + Many languages have the ability to execute threads, but it's wildly unsafe. There are entire books about how to prevent errors that occur from shared mutable state. Rust helps out with its type system here as well, by preventing @@ -119,7 +165,7 @@ concurrency bugs. As an example, here is a Rust program that would have a data race in many languages. It will not compile: -```ignore +```rust,ignore use std::thread; use std::time::Duration; @@ -128,7 +174,7 @@ fn main() { for i in 0..3 { thread::spawn(move || { - data[i] += 1; + data[0] += i; }); } @@ -140,30 +186,71 @@ This gives us an error: ```text 8:17 error: capture of moved value: `data` - data[i] += 1; + data[0] += i; ^~~~ ``` Rust knows this wouldn't be safe! If we had a reference to `data` in each -thread, and the thread takes ownership of the reference, we'd have three -owners! +thread, and the thread takes ownership of the reference, we'd have three owners! +`data` gets moved out of `main` in the first call to `spawn()`, so subsequent +calls in the loop cannot use this variable. -So, we need some type that lets us have more than one reference to a value and -that we can share between threads, that is it must implement `Sync`. +So, we need some type that lets us have more than one owning reference to a +value. Usually, we'd use `Rc` for this, which is a reference counted type +that provides shared ownership. It has some runtime bookkeeping that keeps track +of the number of references to it, hence the "reference count" part of its name. -We'll use `Arc`, Rust's standard atomic reference count type, which -wraps a value up with some extra runtime bookkeeping which allows us to -share the ownership of the value between multiple references at the same time. +Calling `clone()` on an `Rc` will return a new owned reference and bump the +internal reference count. We create one of these for each thread: -The bookkeeping consists of a count of how many of these references exist to -the value, hence the reference count part of the name. + +```rust,ignore +use std::thread; +use std::time::Duration; +use std::rc::Rc; + +fn main() { + let mut data = Rc::new(vec![1, 2, 3]); + + for i in 0..3 { + // Create a new owned reference: + let data_ref = data.clone(); + + // Use it in a thread: + thread::spawn(move || { + data_ref[0] += i; + }); + } + + thread::sleep(Duration::from_millis(50)); +} +``` + +This won't work, however, and will give us the error: + +```text +13:9: 13:22 error: the trait bound `alloc::rc::Rc> : core::marker::Send` + is not satisfied +... +13:9: 13:22 note: `alloc::rc::Rc>` + cannot be sent between threads safely +``` + +As the error message mentions, `Rc` cannot be sent between threads safely. This +is because the internal reference count is not maintained in a thread safe +matter and can have a data race. + +To solve this, we'll use `Arc`, Rust's standard atomic reference count type. The Atomic part means `Arc` can safely be accessed from multiple threads. To do this the compiler guarantees that mutations of the internal count use indivisible operations which can't have data races. +In essence, `Arc` is a type that lets us share ownership of data _across +threads_. + -```ignore +```rust,ignore use std::thread; use std::sync::Arc; use std::time::Duration; @@ -174,7 +261,7 @@ fn main() { for i in 0..3 { let data = data.clone(); thread::spawn(move || { - data[i] += 1; + data[0] += i; }); } @@ -182,25 +269,32 @@ fn main() { } ``` -We now call `clone()` on our `Arc`, which increases the internal count. +Similarly to last time, we use `clone()` to create a new owned handle. This handle is then moved into the new thread. And... still gives us an error. ```text :11:24 error: cannot borrow immutable borrowed content as mutable -:11 data[i] += 1; +:11 data[0] += i; ^~~~ ``` -`Arc` assumes one more property about its contents to ensure that it is safe -to share across threads: it assumes its contents are `Sync`. This is true for -our value if it's immutable, but we want to be able to mutate it, so we need -something else to persuade the borrow checker we know what we're doing. +`Arc` by default has immutable contents. It allows the _sharing_ of data +between threads, but shared mutable data is unsafe—and when threads are +involved—can cause data races! + -It looks like we need some type that allows us to safely mutate a shared value, -for example a type that can ensure only one thread at a time is able to -mutate the value inside it at any one time. +Usually when we wish to make something in an immutable position mutable, we use +`Cell` or `RefCell` which allow safe mutation via runtime checks or +otherwise (see also: [Choosing Your Guarantees](choosing-your-guarantees.html)). +However, similar to `Rc`, these are not thread safe. If we try using these, we +will get an error about these types not being `Sync`, and the code will fail to +compile. + +It looks like we need some type that allows us to safely mutate a shared value +across threads, for example a type that can ensure only one thread at a time is +able to mutate the value inside it at any one time. For that, we can use the `Mutex` type! @@ -218,7 +312,7 @@ fn main() { let data = data.clone(); thread::spawn(move || { let mut data = data.lock().unwrap(); - data[i] += 1; + data[0] += i; }); } @@ -229,10 +323,20 @@ fn main() { Note that the value of `i` is bound (copied) to the closure and not shared among the threads. -Also note that [`lock`](../std/sync/struct.Mutex.html#method.lock) method of +We're "locking" the mutex here. A mutex (short for "mutual exclusion"), as +mentioned, only allows one thread at a time to access a value. When we wish to +access the value, we use `lock()` on it. This will "lock" the mutex, and no +other thread will be able to lock it (and hence, do anything with the value) +until we're done with it. If a thread attempts to lock a mutex which is already +locked, it will wait until the other thread releases the lock. + +The lock "release" here is implicit; when the result of the lock (in this case, +`data`) goes out of scope, the lock is automatically released. + +Note that [`lock`](../std/sync/struct.Mutex.html#method.lock) method of [`Mutex`](../std/sync/struct.Mutex.html) has this signature: -```ignore +```rust,ignore fn lock(&self) -> LockResult> ``` @@ -251,7 +355,7 @@ Let's examine the body of the thread more closely: # let data = data.clone(); thread::spawn(move || { let mut data = data.lock().unwrap(); - data[i] += 1; + data[0] += i; }); # } # thread::sleep(Duration::from_millis(50)); @@ -259,7 +363,7 @@ thread::spawn(move || { ``` First, we call `lock()`, which acquires the mutex's lock. Because this may fail, -it returns an `Result`, and because this is just an example, we `unwrap()` +it returns a `Result`, and because this is just an example, we `unwrap()` it to get a reference to the data. Real code would have more robust error handling here. We're then free to mutate it, since we have the lock. @@ -286,6 +390,8 @@ use std::sync::mpsc; fn main() { let data = Arc::new(Mutex::new(0)); + // `tx` is the "transmitter" or "sender". + // `rx` is the "receiver". let (tx, rx) = mpsc::channel(); for _ in 0..10 { diff --git a/src/doc/book/conditional-compilation.md b/src/doc/book/conditional-compilation.md index a6ff75db89b88..78ab3c18e4561 100644 --- a/src/doc/book/conditional-compilation.md +++ b/src/doc/book/conditional-compilation.md @@ -41,8 +41,9 @@ they get set in the [`[features]` section][features] of your `Cargo.toml`: # no features by default default = [] -# The “secure-password” feature depends on the bcrypt package. -secure-password = ["bcrypt"] +# Add feature "foo" here, then you can use it. +# Our "foo" feature depends on nothing else. +foo = [] ``` When you do this, Cargo passes along a flag to `rustc`: diff --git a/src/doc/book/const-and-static.md b/src/doc/book/const-and-static.md index 7d555b52a986d..e8f17a41cbeab 100644 --- a/src/doc/book/const-and-static.md +++ b/src/doc/book/const-and-static.md @@ -1,4 +1,4 @@ -% `const` and `static` +% const and static Rust has a way of defining constants with the `const` keyword: @@ -64,20 +64,20 @@ unsafe { [unsafe]: unsafe.html -Furthermore, any type stored in a `static` must be `Sync`, and may not have +Furthermore, any type stored in a `static` must be `Sync`, and must not have a [`Drop`][drop] implementation. [drop]: drop.html # Initializing -Both `const` and `static` have requirements for giving them a value. They may -only be given a value that’s a constant expression. In other words, you cannot -use the result of a function call or anything similarly complex or at runtime. +Both `const` and `static` have requirements for giving them a value. They must +be given a value that’s a constant expression. In other words, you cannot use +the result of a function call or anything similarly complex or at runtime. # Which construct should I use? Almost always, if you can choose between the two, choose `const`. It’s pretty rare that you actually want a memory location associated with your constant, -and using a const allows for optimizations like constant propagation not only +and using a `const` allows for optimizations like constant propagation not only in your crate but downstream crates. diff --git a/src/doc/book/crates-and-modules.md b/src/doc/book/crates-and-modules.md index 849c5f1212a57..0e336635235b3 100644 --- a/src/doc/book/crates-and-modules.md +++ b/src/doc/book/crates-and-modules.md @@ -26,8 +26,7 @@ two languages for those phrases to be in. We’ll use this module layout: ```text +-----------+ +---| greetings | - | +-----------+ - +---------+ | + +---------+ | +-----------+ +---| english |---+ | +---------+ | +-----------+ | +---| farewells | @@ -37,8 +36,7 @@ two languages for those phrases to be in. We’ll use this module layout: | +---| greetings | | +----------+ | +-----------+ +---| japanese |--+ - +----------+ | - | +-----------+ + +----------+ | +-----------+ +---| farewells | +-----------+ ``` @@ -115,10 +113,10 @@ $ ls target/debug build deps examples libphrases-a7448e02a0468eaa.rlib native ``` -`libphrases-hash.rlib` is the compiled crate. Before we see how to use this +`libphrases-.rlib` is the compiled crate. Before we see how to use this crate from another crate, let’s break it up into multiple files. -# Multiple file crates +# Multiple File Crates If each crate were just one file, these files would get very large. It’s often easier to split up crates into multiple files, and Rust supports this in two @@ -128,7 +126,7 @@ Instead of declaring a module like this: ```rust,ignore mod english { - // contents of our module go here + // Contents of our module go here. } ``` @@ -190,13 +188,19 @@ mod farewells; ``` Again, these declarations tell Rust to look for either -`src/english/greetings.rs` and `src/japanese/greetings.rs` or -`src/english/farewells/mod.rs` and `src/japanese/farewells/mod.rs`. Because -these sub-modules don’t have their own sub-modules, we’ve chosen to make them -`src/english/greetings.rs` and `src/japanese/farewells.rs`. Whew! - -The contents of `src/english/greetings.rs` and `src/japanese/farewells.rs` are -both empty at the moment. Let’s add some functions. +`src/english/greetings.rs`, `src/english/farewells.rs`, +`src/japanese/greetings.rs` and `src/japanese/farewells.rs` or +`src/english/greetings/mod.rs`, `src/english/farewells/mod.rs`, +`src/japanese/greetings/mod.rs` and +`src/japanese/farewells/mod.rs`. Because these sub-modules don’t have +their own sub-modules, we’ve chosen to make them +`src/english/greetings.rs`, `src/english/farewells.rs`, +`src/japanese/greetings.rs` and `src/japanese/farewells.rs`. Whew! + +The contents of `src/english/greetings.rs`, +`src/english/farewells.rs`, `src/japanese/greetings.rs` and +`src/japanese/farewells.rs` are all empty at the moment. Let’s add +some functions. Put this in `src/english/greetings.rs`: @@ -567,10 +571,11 @@ to it as "sayings". Similarly, the first `use` statement pulls in the `ja_greetings` as opposed to simply `greetings`. This can help to avoid ambiguity when importing similarly-named items from different places. -The second `use` statement uses a star glob to bring in _all_ symbols from the -`sayings::japanese::farewells` module. As you can see we can later refer to +The second `use` statement uses a star glob to bring in all public symbols from +the `sayings::japanese::farewells` module. As you can see we can later refer to the Japanese `goodbye` function with no module qualifiers. This kind of glob -should be used sparingly. +should be used sparingly. It’s worth noting that it only imports the public +symbols, even if the code doing the globbing is in the same module. The third `use` statement bears more explanation. It's using "brace expansion" globbing to compress three `use` statements into one (this sort of syntax diff --git a/src/doc/book/custom-allocators.md b/src/doc/book/custom-allocators.md index d69ef6cf7e83a..1996305f09e73 100644 --- a/src/doc/book/custom-allocators.md +++ b/src/doc/book/custom-allocators.md @@ -41,7 +41,7 @@ which allocator is in use is done simply by linking to the desired allocator: extern crate alloc_system; fn main() { - let a = Box::new(4); // allocates from the system allocator + let a = Box::new(4); // Allocates from the system allocator. println!("{}", a); } ``` @@ -57,7 +57,7 @@ uses jemalloc by default one would write: extern crate alloc_jemalloc; pub fn foo() { - let a = Box::new(4); // allocates from jemalloc + let a = Box::new(4); // Allocates from jemalloc. println!("{}", a); } # fn main() {} @@ -72,11 +72,11 @@ crate which implements the allocator API (e.g. the same as `alloc_system` or annotated version of `alloc_system` ```rust,no_run -# // only needed for rustdoc --test down below +# // Only needed for rustdoc --test down below. # #![feature(lang_items)] // The compiler needs to be instructed that this crate is an allocator in order // to realize that when this is linked in another allocator like jemalloc should -// not be linked in +// not be linked in. #![feature(allocator)] #![allocator] @@ -85,7 +85,7 @@ annotated version of `alloc_system` // however, can use all of libcore. #![no_std] -// Let's give a unique name to our custom allocator +// Let's give a unique name to our custom allocator: #![crate_name = "my_allocator"] #![crate_type = "rlib"] @@ -126,7 +126,7 @@ pub extern fn __rust_reallocate(ptr: *mut u8, _old_size: usize, size: usize, #[no_mangle] pub extern fn __rust_reallocate_inplace(_ptr: *mut u8, old_size: usize, _size: usize, _align: usize) -> usize { - old_size // this api is not supported by libc + old_size // This api is not supported by libc. } #[no_mangle] @@ -134,7 +134,7 @@ pub extern fn __rust_usable_size(size: usize, _align: usize) -> usize { size } -# // only needed to get rustdoc to test this +# // Only needed to get rustdoc to test this: # fn main() {} # #[lang = "panic_fmt"] fn panic_fmt() {} # #[lang = "eh_personality"] fn eh_personality() {} @@ -149,7 +149,7 @@ After we compile this crate, it can be used as follows: extern crate my_allocator; fn main() { - let a = Box::new(8); // allocates memory via our custom allocator crate + let a = Box::new(8); // Allocates memory via our custom allocator crate. println!("{}", a); } ``` diff --git a/src/doc/book/deref-coercions.md b/src/doc/book/deref-coercions.md index beb65c4ce358a..864cd282d9371 100644 --- a/src/doc/book/deref-coercions.md +++ b/src/doc/book/deref-coercions.md @@ -33,13 +33,13 @@ automatically coerce to a `&T`. Here’s an example: ```rust fn foo(s: &str) { - // borrow a string for a second + // Borrow a string for a second. } -// String implements Deref +// String implements Deref. let owned = "Hello".to_string(); -// therefore, this works: +// Therefore, this works: foo(&owned); ``` @@ -55,31 +55,31 @@ type implements `Deref`, so this works: use std::rc::Rc; fn foo(s: &str) { - // borrow a string for a second + // Borrow a string for a second. } -// String implements Deref +// String implements Deref. let owned = "Hello".to_string(); let counted = Rc::new(owned); -// therefore, this works: +// Therefore, this works: foo(&counted); ``` All we’ve done is wrap our `String` in an `Rc`. But we can now pass the `Rc` around anywhere we’d have a `String`. The signature of `foo` didn’t change, but works just as well with either type. This example has two -conversions: `Rc` to `String` and then `String` to `&str`. Rust will do +conversions: `&Rc` to `&String` and then `&String` to `&str`. Rust will do this as many times as possible until the types match. Another very common implementation provided by the standard library is: ```rust fn foo(s: &[i32]) { - // borrow a slice for a second + // Borrow a slice for a second. } -// Vec implements Deref +// Vec implements Deref. let owned = vec![1, 2, 3]; foo(&owned); diff --git a/src/doc/book/documentation.md b/src/doc/book/documentation.md index 4053e5776e39f..f30a95b4e7890 100644 --- a/src/doc/book/documentation.md +++ b/src/doc/book/documentation.md @@ -28,7 +28,7 @@ code. You can use documentation comments for this purpose: /// let five = Rc::new(5); /// ``` pub fn new(value: T) -> Rc { - // implementation goes here + // Implementation goes here. } ``` @@ -76,7 +76,7 @@ This [unfortunate error](https://github.com/rust-lang/rust/issues/22547) is correct; documentation comments apply to the thing after them, and there's nothing after that last comment. -[rc-new]: https://doc.rust-lang.org/nightly/std/rc/struct.Rc.html#method.new +[rc-new]: ../std/rc/struct.Rc.html#method.new ### Writing documentation comments @@ -118,7 +118,7 @@ least. If your function has a non-trivial contract like this, that is detected/enforced by panics, documenting it is very important. ```rust -/// # Failures +/// # Errors # fn foo() {} ``` @@ -319,7 +319,7 @@ our source code: ```text First, we set `x` to five: - ```text + ```rust let x = 5; # let y = 6; # println!("{}", x + y); @@ -327,7 +327,7 @@ our source code: Next, we set `y` to six: - ```text + ```rust # let x = 5; let y = 6; # println!("{}", x + y); @@ -335,7 +335,7 @@ our source code: Finally, we print the sum of `x` and `y`: - ```text + ```rust # let x = 5; # let y = 6; println!("{}", x + y); @@ -362,7 +362,7 @@ Here’s an example of documenting a macro: /// # } /// ``` /// -/// ```should_panic +/// ```rust,should_panic /// # #[macro_use] extern crate foo; /// # fn main() { /// panic_unless!(true == false, “I’m broken.”); @@ -429,7 +429,7 @@ There are a few more annotations that are useful to help `rustdoc` do the right thing when testing your code: ```rust -/// ```ignore +/// ```rust,ignore /// fn foo() { /// ``` # fn foo() {} @@ -441,7 +441,7 @@ with `text` if it's not code, or using `#`s to get a working example that only shows the part you care about. ```rust -/// ```should_panic +/// ```rust,should_panic /// assert!(false); /// ``` # fn foo() {} @@ -451,7 +451,7 @@ only shows the part you care about. not actually pass as a test. ```rust -/// ```no_run +/// ```rust,no_run /// loop { /// println!("Hello, world"); /// } @@ -483,7 +483,18 @@ you have a module in `foo.rs`, you'll often open its code and see this: ```rust //! A module for using `foo`s. //! -//! The `foo` module contains a lot of useful functionality blah blah blah +//! The `foo` module contains a lot of useful functionality blah blah blah... +``` + +### Crate documentation + +Crates can be documented by placing an inner doc comment (`//!`) at the +beginning of the crate root, aka `lib.rs`: + +```rust +//! This is documentation for the `foo` crate. +//! +//! The foo crate is meant to be used for bar. ``` ### Documentation comment style @@ -563,7 +574,7 @@ can be useful when changing some options, or when writing a macro. `rustdoc` will show the documentation for a public re-export in both places: -```ignore +```rust,ignore extern crate foo; pub use foo::bar; @@ -575,7 +586,7 @@ documentation in both places. This behavior can be suppressed with `no_inline`: -```ignore +```rust,ignore extern crate foo; #[doc(no_inline)] diff --git a/src/doc/book/drop.md b/src/doc/book/drop.md index 8bc25ef90d382..0b7ddcfbe8856 100644 --- a/src/doc/book/drop.md +++ b/src/doc/book/drop.md @@ -18,9 +18,9 @@ impl Drop for HasDrop { fn main() { let x = HasDrop; - // do stuff + // Do stuff. -} // x goes out of scope here +} // `x` goes out of scope here. ``` When `x` goes out of scope at the end of `main()`, the code for `Drop` will @@ -55,7 +55,7 @@ BOOM times 100!!! BOOM times 1!!! ``` -The TNT goes off before the firecracker does, because it was declared +The `tnt` goes off before the `firecracker` does, because it was declared afterwards. Last in, first out. So what is `Drop` good for? Generally, `Drop` is used to clean up any resources diff --git a/src/doc/book/enums.md b/src/doc/book/enums.md index 5e05b4ebbdfa9..790d6ff85469f 100644 --- a/src/doc/book/enums.md +++ b/src/doc/book/enums.md @@ -51,7 +51,7 @@ possible variants: ```rust,ignore fn process_color_change(msg: Message) { - let Message::ChangeColor(r, g, b) = msg; // compile-time error + let Message::ChangeColor(r, g, b) = msg; // This causes a compile-time error. } ``` diff --git a/src/doc/book/error-handling.md b/src/doc/book/error-handling.md index 9b1d16170b97f..0d9f49d66cbd8 100644 --- a/src/doc/book/error-handling.md +++ b/src/doc/book/error-handling.md @@ -59,13 +59,13 @@ handling is reducing the amount of explicit case analysis the programmer has to do while keeping code composable. Keeping code composable is important, because without that requirement, we -could [`panic`](../std/macro.panic!.html) whenever we +could [`panic`](../std/macro.panic.html) whenever we come across something unexpected. (`panic` causes the current task to unwind, and in most cases, the entire program aborts.) Here's an example: ```rust,should_panic // Guess a number between 1 and 10. -// If it matches the number we had in mind, return true. Else, return false. +// If it matches the number we had in mind, return `true`. Else, return `false`. fn guess(n: i32) -> bool { if n < 1 || n > 10 { panic!("Invalid number: {}", n); @@ -81,7 +81,7 @@ fn main() { If you try running this code, the program will crash with a message like this: ```text -thread '
' panicked at 'Invalid number: 11', src/bin/panic-simple.rs:5 +thread 'main' panicked at 'Invalid number: 11', src/bin/panic-simple.rs:5 ``` Here's another example that is slightly less contrived. A program that accepts @@ -166,7 +166,7 @@ story. The other half is *using* the `find` function we've written. Let's try to use it to find the extension in a file name. ```rust -# fn find(_: &str, _: char) -> Option { None } +# fn find(haystack: &str, needle: char) -> Option { haystack.find(needle) } fn main() { let file_name = "foobar.rs"; match find(file_name, '.') { @@ -223,9 +223,9 @@ Getting the extension of a file name is a pretty common operation, so it makes sense to put it into a function: ```rust -# fn find(_: &str, _: char) -> Option { None } +# fn find(haystack: &str, needle: char) -> Option { haystack.find(needle) } // Returns the extension of the given file name, where the extension is defined -// as all characters proceeding the first `.`. +// as all characters following the first `.`. // If `file_name` has no `.`, then `None` is returned. fn extension_explicit(file_name: &str) -> Option<&str> { match find(file_name, '.') { @@ -265,14 +265,16 @@ fn map(option: Option, f: F) -> Option where F: FnOnce(T) -> A { ``` Indeed, `map` is [defined as a method][2] on `Option` in the standard library. +As a method, it has a slightly different signature: methods take `self`, `&self`, +or `&mut self` as their first argument. Armed with our new combinator, we can rewrite our `extension_explicit` method to get rid of the case analysis: ```rust -# fn find(_: &str, _: char) -> Option { None } +# fn find(haystack: &str, needle: char) -> Option { haystack.find(needle) } // Returns the extension of the given file name, where the extension is defined -// as all characters proceeding the first `.`. +// as all characters following the first `.`. // If `file_name` has no `.`, then `None` is returned. fn extension(file_name: &str) -> Option<&str> { find(file_name, '.').map(|i| &file_name[i+1..]) @@ -294,6 +296,9 @@ fn unwrap_or(option: Option, default: T) -> T { } ``` +Like with `map` above, the standard library implementation is a method instead +of a free function. + The trick here is that the default value must have the same type as the value that might be inside the `Option`. Using it is dead simple in our case: @@ -345,17 +350,34 @@ fn file_path_ext_explicit(file_path: &str) -> Option<&str> { } fn file_name(file_path: &str) -> Option<&str> { - // implementation elided + // Implementation elided. unimplemented!() } ``` You might think that we could use the `map` combinator to reduce the case -analysis, but its type doesn't quite fit. Namely, `map` takes a function that -does something only with the inner value. The result of that function is then -*always* [rewrapped with `Some`](#code-option-map). Instead, we need something -like `map`, but which allows the caller to return another `Option`. Its generic -implementation is even simpler than `map`: +analysis, but its type doesn't quite fit... + +```rust,ignore +fn file_path_ext(file_path: &str) -> Option<&str> { + file_name(file_path).map(|x| extension(x)) // This causes a compilation error. +} +``` + +The `map` function here wraps the value returned by the `extension` function +inside an `Option<_>` and since the `extension` function itself returns an +`Option<&str>` the expression `file_name(file_path).map(|x| extension(x))` +actually returns an `Option>`. + +But since `file_path_ext` just returns `Option<&str>` (and not +`Option>`) we get a compilation error. + +The result of the function taken by map as input is *always* [rewrapped with +`Some`](#code-option-map). Instead, we need something like `map`, but which +allows the caller to return a `Option<_>` directly without wrapping it in +another `Option<_>`. + +Its generic implementation is even simpler than `map`: ```rust fn and_then(option: Option, f: F) -> Option @@ -377,6 +399,10 @@ fn file_path_ext(file_path: &str) -> Option<&str> { } ``` +Side note: Since `and_then` essentially works like `map` but returns an +`Option<_>` instead of an `Option>` it is known as `flatmap` in some +other languages. + The `Option` type has many other combinators [defined in the standard library][5]. It is a good idea to skim this list and familiarize yourself with what's available—they can often reduce case analysis @@ -472,7 +498,7 @@ At this point, you should be skeptical of calling `unwrap`. For example, if the string doesn't parse as a number, you'll get a panic: ```text -thread '
' panicked at 'called `Result::unwrap()` on an `Err` value: ParseIntError { kind: InvalidDigit }', /home/rustbuild/src/rust-buildbot/slave/beta-dist-rustc-linux/build/src/libcore/result.rs:729 +thread 'main' panicked at 'called `Result::unwrap()` on an `Err` value: ParseIntError { kind: InvalidDigit }', /home/rustbuild/src/rust-buildbot/slave/beta-dist-rustc-linux/build/src/libcore/result.rs:729 ``` This is rather unsightly, and if this happened inside a library you're @@ -918,7 +944,7 @@ macro_rules! try { } ``` -(The [real definition](../std/macro.try!.html) is a bit more +(The [real definition](../std/macro.try.html) is a bit more sophisticated. We will address that later.) Using the `try!` macro makes it very easy to simplify our last example. Since @@ -1209,11 +1235,11 @@ use std::fs; use std::io; use std::num; -// We have to jump through some hoops to actually get error values. +// We have to jump through some hoops to actually get error values: let io_err: io::Error = io::Error::last_os_error(); let parse_err: num::ParseIntError = "not a number".parse::().unwrap_err(); -// OK, here are the conversions. +// OK, here are the conversions: let err1: Box = From::from(io_err); let err2: Box = From::from(parse_err); ``` @@ -1245,7 +1271,7 @@ macro_rules! try { ``` This is not its real definition. Its real definition is -[in the standard library](../std/macro.try!.html): +[in the standard library](../std/macro.try.html): @@ -1512,7 +1538,7 @@ and [`rustc-serialize`](https://crates.io/crates/rustc-serialize) crates. We're not going to spend a lot of time on setting up a project with Cargo because it is already covered well in [the Cargo -section](../book/hello-cargo.html) and [Cargo's documentation][14]. +section](getting-started.html#hello-cargo) and [Cargo's documentation][14]. To get started from scratch, run `cargo new --bin city-pop` and make sure your `Cargo.toml` looks something like this: @@ -1547,8 +1573,9 @@ detail on Getopts, but there is [some good documentation][15] describing it. The short story is that Getopts generates an argument parser and a help message from a vector of options (The fact that it is a vector is hidden behind a struct and a set of methods). Once the -parsing is done, we can decode the program arguments into a Rust -struct. From there, we can get information about the flags, for +parsing is done, the parser returns a struct that records matches +for defined options, and remaining "free" arguments. +From there, we can get information about the flags, for instance, whether they were passed in, and what arguments they had. Here's our program with the appropriate `extern crate` statements, and the basic argument setup for Getopts: @@ -1566,7 +1593,7 @@ fn print_usage(program: &str, opts: Options) { fn main() { let args: Vec = env::args().collect(); - let program = args[0].clone(); + let program = &args[0]; let mut opts = Options::new(); opts.optflag("h", "help", "Show this usage message."); @@ -1579,10 +1606,10 @@ fn main() { print_usage(&program, opts); return; } - let data_path = args[1].clone(); - let city = args[2].clone(); + let data_path = &matches.free[0]; + let city: &str = &matches.free[1]; - // Do stuff with information + // Do stuff with information. } ``` @@ -1614,7 +1641,6 @@ sure to add `extern crate csv;` to the top of your file.) ```rust,ignore use std::fs::File; -use std::path::Path; // This struct represents the data in each row of the CSV file. // Type based decoding absolves us of a lot of the nitty gritty error @@ -1640,7 +1666,7 @@ fn print_usage(program: &str, opts: Options) { fn main() { let args: Vec = env::args().collect(); - let program = args[0].clone(); + let program = &args[0]; let mut opts = Options::new(); opts.optflag("h", "help", "Show this usage message."); @@ -1652,25 +1678,24 @@ fn main() { if matches.opt_present("h") { print_usage(&program, opts); - return; - } + return; + } - let data_file = args[1].clone(); - let data_path = Path::new(&data_file); - let city = args[2].clone(); + let data_path = &matches.free[0]; + let city: &str = &matches.free[1]; - let file = File::open(data_path).unwrap(); - let mut rdr = csv::Reader::from_reader(file); + let file = File::open(data_path).unwrap(); + let mut rdr = csv::Reader::from_reader(file); - for row in rdr.decode::() { - let row = row.unwrap(); + for row in rdr.decode::() { + let row = row.unwrap(); - if row.city == city { - println!("{}, {}: {:?}", - row.city, row.country, - row.population.expect("population count")); - } - } + if row.city == city { + println!("{}, {}: {:?}", + row.city, row.country, + row.population.expect("population count")); + } + } } ``` @@ -1719,8 +1744,10 @@ Note that we opt to handle the possibility of a missing population count by simply ignoring that row. ```rust,ignore +use std::path::Path; + struct Row { - // unchanged + // This struct remains unchanged. } struct PopulationCount { @@ -1742,7 +1769,7 @@ fn search>(file_path: P, city: &str) -> Vec { for row in rdr.decode::() { let row = row.unwrap(); match row.population { - None => { } // skip it + None => { } // Skip it. Some(count) => if row.city == city { found.push(PopulationCount { city: row.city, @@ -1756,27 +1783,28 @@ fn search>(file_path: P, city: &str) -> Vec { } fn main() { - let args: Vec = env::args().collect(); - let program = args[0].clone(); + let args: Vec = env::args().collect(); + let program = &args[0]; + + let mut opts = Options::new(); + opts.optflag("h", "help", "Show this usage message."); + + let matches = match opts.parse(&args[1..]) { + Ok(m) => { m } + Err(e) => { panic!(e.to_string()) } + }; - let mut opts = Options::new(); - opts.optflag("h", "help", "Show this usage message."); + if matches.opt_present("h") { + print_usage(&program, opts); + return; + } - let matches = match opts.parse(&args[1..]) { - Ok(m) => { m } - Err(e) => { panic!(e.to_string()) } - }; - if matches.opt_present("h") { - print_usage(&program, opts); - return; - } + let data_path = &matches.free[0]; + let city: &str = &matches.free[1]; - let data_file = args[1].clone(); - let data_path = Path::new(&data_file); - let city = args[2].clone(); - for pop in search(&data_path, &city) { - println!("{}, {}: {:?}", pop.city, pop.country, pop.count); - } + for pop in search(data_path, city) { + println!("{}, {}: {:?}", pop.city, pop.country, pop.count); + } } ``` @@ -1797,18 +1825,18 @@ Let's try it: ```rust,ignore use std::error::Error; -// The rest of the code before this is unchanged +// The rest of the code before this is unchanged. fn search> (file_path: P, city: &str) - -> Result, Box> { + -> Result, Box> { let mut found = vec![]; let file = try!(File::open(file_path)); let mut rdr = csv::Reader::from_reader(file); for row in rdr.decode::() { let row = try!(row); match row.population { - None => { } // skip it + None => { } // Skip it. Some(count) => if row.city == city { found.push(PopulationCount { city: row.city, @@ -1830,20 +1858,17 @@ Instead of `x.unwrap()`, we now have `try!(x)`. Since our function returns a `Result`, the `try!` macro will return early from the function if an error occurs. -There is one big gotcha in this code: we used `Box` -instead of `Box`. We did this so we could convert a plain string to an -error type. We need these extra bounds so that we can use the -[corresponding `From` -impls](../std/convert/trait.From.html): +At the end of `search` we also convert a plain string to an error type +by using the [corresponding `From` impls](../std/convert/trait.From.html): ```rust,ignore // We are making use of this impl in the code above, since we call `From::from` // on a `&'static str`. -impl<'a, 'b> From<&'b str> for Box +impl<'a> From<&'a str> for Box // But this is also useful when you need to allocate a new string for an // error message, usually with `format!`. -impl From for Box +impl From for Box ``` Since `search` now returns a `Result`, `main` should use case analysis @@ -1851,14 +1876,14 @@ when calling `search`: ```rust,ignore ... -match search(&data_file, &city) { - Ok(pops) => { - for pop in pops { - println!("{}, {}: {:?}", pop.city, pop.country, pop.count); + match search(data_path, city) { + Ok(pops) => { + for pop in pops { + println!("{}, {}: {:?}", pop.city, pop.country, pop.count); + } } + Err(err) => println!("{}", err) } - Err(err) => println!("{}", err) -} ... ``` @@ -1886,46 +1911,40 @@ First, here's the new usage: ```rust,ignore fn print_usage(program: &str, opts: Options) { - println!("{}", opts.usage(&format!("Usage: {} [options] ", program))); + println!("{}", opts.usage(&format!("Usage: {} [options] ", program))); } ``` -The next part is going to be only a little harder: +Of course we need to adapt the argument handling code: ```rust,ignore ... -let mut opts = Options::new(); -opts.optopt("f", "file", "Choose an input file, instead of using STDIN.", "NAME"); -opts.optflag("h", "help", "Show this usage message."); -... -let file = matches.opt_str("f"); -let data_file = file.as_ref().map(Path::new); - -let city = if !matches.free.is_empty() { - matches.free[0].clone() -} else { - print_usage(&program, opts); - return; -}; - -match search(&data_file, &city) { - Ok(pops) => { - for pop in pops { - println!("{}, {}: {:?}", pop.city, pop.country, pop.count); + let mut opts = Options::new(); + opts.optopt("f", "file", "Choose an input file, instead of using STDIN.", "NAME"); + opts.optflag("h", "help", "Show this usage message."); + ... + let data_path = matches.opt_str("f"); + + let city = if !matches.free.is_empty() { + &matches.free[0] + } else { + print_usage(&program, opts); + return; + }; + + match search(&data_path, city) { + Ok(pops) => { + for pop in pops { + println!("{}, {}: {:?}", pop.city, pop.country, pop.count); + } } + Err(err) => println!("{}", err) } - Err(err) => println!("{}", err) -} ... ``` -In this piece of code, we take `file` (which has the type -`Option`), and convert it to a type that `search` can use, in -this case, `&Option>`. To do this, we take a reference of -file, and map `Path::new` onto it. In this case, `as_ref()` converts -the `Option` into an `Option<&str>`, and from there, we can -execute `Path::new` to the content of the optional, and return the -optional of the new value. Once we have that, it is a simple matter of -getting the `city` argument and executing `search`. +We've made the user experience a bit nicer by showing the usage message, +instead of a panic from an out-of-bounds index, when `city`, the +remaining free argument, is not present. Modifying `search` is slightly trickier. The `csv` crate can build a parser out of @@ -1938,11 +1957,11 @@ that it is generic on some type parameter `R` that satisfies ```rust,ignore use std::io; -// The rest of the code before this is unchanged +// The rest of the code before this is unchanged. fn search> (file_path: &Option

, city: &str) - -> Result, Box> { + -> Result, Box> { let mut found = vec![]; let input: Box = match *file_path { None => Box::new(io::stdin()), @@ -1975,6 +1994,8 @@ enum CliError { And now for impls on `Display` and `Error`: ```rust,ignore +use std::fmt; + impl fmt::Display for CliError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { @@ -1994,6 +2015,16 @@ impl Error for CliError { CliError::NotFound => "not found", } } + + fn cause(&self) -> Option<&Error> { + match *self { + CliError::Io(ref err) => Some(err), + CliError::Csv(ref err) => Some(err), + // Our custom error doesn't have an underlying cause, + // but we could modify it so that it does. + CliError::NotFound => None, + } + } } ``` @@ -2039,7 +2070,7 @@ fn search> for row in rdr.decode::() { let row = try!(row); match row.population { - None => { } // skip it + None => { } // Skip it. Some(count) => if row.city == city { found.push(PopulationCount { city: row.city, @@ -2087,10 +2118,10 @@ string and add a flag to the Option variable. Once we've done that, Getopts does ```rust,ignore ... -let mut opts = Options::new(); -opts.optopt("f", "file", "Choose an input file, instead of using STDIN.", "NAME"); -opts.optflag("h", "help", "Show this usage message."); -opts.optflag("q", "quiet", "Silences errors and warnings."); + let mut opts = Options::new(); + opts.optopt("f", "file", "Choose an input file, instead of using STDIN.", "NAME"); + opts.optflag("h", "help", "Show this usage message."); + opts.optflag("q", "quiet", "Silences errors and warnings."); ... ``` @@ -2098,13 +2129,16 @@ Now we only need to implement our “quiet” functionality. This requires us to tweak the case analysis in `main`: ```rust,ignore -match search(&args.arg_data_path, &args.arg_city) { - Err(CliError::NotFound) if args.flag_quiet => process::exit(1), - Err(err) => panic!("{}", err), - Ok(pops) => for pop in pops { - println!("{}, {}: {:?}", pop.city, pop.country, pop.count); +use std::process; +... + match search(&data_path, city) { + Err(CliError::NotFound) if matches.opt_present("q") => process::exit(1), + Err(err) => panic!("{}", err), + Ok(pops) => for pop in pops { + println!("{}, {}: {:?}", pop.city, pop.country, pop.count); + } } -} +... ``` Certainly, we don't want to be quiet if there was an IO error or if the data @@ -2138,14 +2172,13 @@ heuristics! `unwrap`. Be warned: if it winds up in someone else's hands, don't be surprised if they are agitated by poor error messages! * If you're writing a quick 'n' dirty program and feel ashamed about panicking - anyway, then use either a `String` or a `Box` for your - error type (the `Box` type is because of the - [available `From` impls](../std/convert/trait.From.html)). + anyway, then use either a `String` or a `Box` for your + error type. * Otherwise, in a program, define your own error types with appropriate [`From`](../std/convert/trait.From.html) and [`Error`](../std/error/trait.Error.html) - impls to make the [`try!`](../std/macro.try!.html) + impls to make the [`try!`](../std/macro.try.html) macro more ergonomic. * If you're writing a library and your code can produce errors, define your own error type and implement the @@ -2168,7 +2201,7 @@ heuristics! [3]: ../std/option/enum.Option.html#method.unwrap_or [4]: ../std/option/enum.Option.html#method.unwrap_or_else [5]: ../std/option/enum.Option.html -[6]: ../std/result/ +[6]: ../std/result/index.html [7]: ../std/result/enum.Result.html#method.unwrap [8]: ../std/fmt/trait.Debug.html [9]: ../std/primitive.str.html#method.parse diff --git a/src/doc/book/ffi.md b/src/doc/book/ffi.md index 6aec8d2a048aa..7510cd0b3b591 100644 --- a/src/doc/book/ffi.md +++ b/src/doc/book/ffi.md @@ -28,7 +28,7 @@ and add `extern crate libc;` to your crate root. The following is a minimal example of calling a foreign function which will compile if snappy is installed: -```no_run +```rust,no_run # #![feature(libc)] extern crate libc; use libc::size_t; @@ -62,7 +62,7 @@ keeping the binding correct at runtime. The `extern` block can be extended to cover the entire snappy API: -```no_run +```rust,no_run # #![feature(libc)] extern crate libc; use libc::{c_int, size_t}; @@ -95,7 +95,7 @@ internal details. Wrapping the functions which expect buffers involves using the `slice::raw` module to manipulate Rust vectors as pointers to memory. Rust's vectors are guaranteed to be a contiguous block of memory. The -length is number of elements currently contained, and the capacity is the total size in elements of +length is the number of elements currently contained, and the capacity is the total size in elements of the allocated memory. The length is less than or equal to the capacity. ```rust @@ -183,8 +183,62 @@ pub fn uncompress(src: &[u8]) -> Option> { } ``` -For reference, the examples used here are also available as a [library on -GitHub](https://github.com/thestinger/rust-snappy). +Then, we can add some tests to show how to use them. + +```rust +# #![feature(libc)] +# extern crate libc; +# use libc::{c_int, size_t}; +# unsafe fn snappy_compress(input: *const u8, +# input_length: size_t, +# compressed: *mut u8, +# compressed_length: *mut size_t) +# -> c_int { 0 } +# unsafe fn snappy_uncompress(compressed: *const u8, +# compressed_length: size_t, +# uncompressed: *mut u8, +# uncompressed_length: *mut size_t) +# -> c_int { 0 } +# unsafe fn snappy_max_compressed_length(source_length: size_t) -> size_t { 0 } +# unsafe fn snappy_uncompressed_length(compressed: *const u8, +# compressed_length: size_t, +# result: *mut size_t) +# -> c_int { 0 } +# unsafe fn snappy_validate_compressed_buffer(compressed: *const u8, +# compressed_length: size_t) +# -> c_int { 0 } +# fn main() { } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn valid() { + let d = vec![0xde, 0xad, 0xd0, 0x0d]; + let c: &[u8] = &compress(&d); + assert!(validate_compressed_buffer(c)); + assert!(uncompress(c) == Some(d)); + } + + #[test] + fn invalid() { + let d = vec![0, 0, 0, 0]; + assert!(!validate_compressed_buffer(&d)); + assert!(uncompress(&d).is_none()); + } + + #[test] + fn empty() { + let d = vec![]; + assert!(!validate_compressed_buffer(&d)); + assert!(uncompress(&d).is_none()); + let c = compress(&d); + assert!(validate_compressed_buffer(&c)); + assert!(uncompress(&c) == Some(d)); + } +} +``` # Destructors @@ -209,7 +263,7 @@ A basic example is: Rust code: -```no_run +```rust,no_run extern fn callback(a: i32) { println!("I'm called from C with value {0}", a); } @@ -223,7 +277,7 @@ extern { fn main() { unsafe { register_callback(callback); - trigger_callback(); // Triggers the callback + trigger_callback(); // Triggers the callback. } } ``` @@ -240,7 +294,7 @@ int32_t register_callback(rust_callback callback) { } void trigger_callback() { - cb(7); // Will call callback(7) in Rust + cb(7); // Will call callback(7) in Rust. } ``` @@ -262,17 +316,17 @@ referenced Rust object. Rust code: -```no_run +```rust,no_run #[repr(C)] struct RustObject { a: i32, - // other members + // Other members... } extern "C" fn callback(target: *mut RustObject, a: i32) { println!("I'm called from C with value {0}", a); unsafe { - // Update the value in RustObject with the value received from the callback + // Update the value in RustObject with the value received from the callback: (*target).a = a; } } @@ -285,7 +339,7 @@ extern { } fn main() { - // Create the object that will be referenced in the callback + // Create the object that will be referenced in the callback: let mut rust_object = Box::new(RustObject { a: 5 }); unsafe { @@ -309,7 +363,7 @@ int32_t register_callback(void* callback_target, rust_callback callback) { } void trigger_callback() { - cb(cb_target, 7); // Will call callback(&rustObject, 7) in Rust + cb(cb_target, 7); // Will call callback(&rustObject, 7) in Rust. } ``` @@ -406,7 +460,7 @@ Foreign APIs often export a global variable which could do something like track global state. In order to access these variables, you declare them in `extern` blocks with the `static` keyword: -```no_run +```rust,no_run # #![feature(libc)] extern crate libc; @@ -417,7 +471,7 @@ extern { fn main() { println!("You have readline version {} installed.", - rl_readline_version as i32); + unsafe { rl_readline_version as i32 }); } ``` @@ -425,7 +479,7 @@ Alternatively, you may need to alter global state provided by a foreign interface. To do this, statics can be declared with `mut` so we can mutate them. -```no_run +```rust,no_run # #![feature(libc)] extern crate libc; @@ -485,6 +539,7 @@ This is currently hidden behind the `abi_vectorcall` gate and is subject to chan * `system` * `C` * `win64` +* `sysv64` Most of the abis in this list are self-explanatory, but the `system` abi may seem a little odd. This constraint selects whatever the appropriate ABI is for @@ -521,16 +576,69 @@ against `libc` and `libm` by default. # The "nullable pointer optimization" -Certain types are defined to not be `null`. This includes references (`&T`, -`&mut T`), boxes (`Box`), and function pointers (`extern "abi" fn()`). -When interfacing with C, pointers that might be null are often used. -As a special case, a generic `enum` that contains exactly two variants, one of -which contains no data and the other containing a single field, is eligible -for the "nullable pointer optimization". When such an enum is instantiated -with one of the non-nullable types, it is represented as a single pointer, -and the non-data variant is represented as the null pointer. So -`Option c_int>` is how one represents a nullable -function pointer using the C ABI. +Certain Rust types are defined to never be `null`. This includes references (`&T`, +`&mut T`), boxes (`Box`), and function pointers (`extern "abi" fn()`). When +interfacing with C, pointers that might be `null` are often used, which would seem to +require some messy `transmute`s and/or unsafe code to handle conversions to/from Rust types. +However, the language provides a workaround. + +As a special case, an `enum` is eligible for the "nullable pointer optimization" if it contains +exactly two variants, one of which contains no data and the other contains a field of one of the +non-nullable types listed above. This means no extra space is required for a discriminant; rather, +the empty variant is represented by putting a `null` value into the non-nullable field. This is +called an "optimization", but unlike other optimizations it is guaranteed to apply to eligible +types. + +The most common type that takes advantage of the nullable pointer optimization is `Option`, +where `None` corresponds to `null`. So `Option c_int>` is a correct way +to represent a nullable function pointer using the C ABI (corresponding to the C type +`int (*)(int)`). + +Here is a contrived example. Let's say some C library has a facility for registering a +callback, which gets called in certain situations. The callback is passed a function pointer +and an integer and it is supposed to run the function with the integer as a parameter. So +we have function pointers flying across the FFI boundary in both directions. + +```rust +# #![feature(libc)] +extern crate libc; +use libc::c_int; + +# #[cfg(hidden)] +extern "C" { + /// Registers the callback. + fn register(cb: Option c_int>, c_int) -> c_int>); +} +# unsafe fn register(_: Option c_int>, +# c_int) -> c_int>) +# {} + +/// This fairly useless function receives a function pointer and an integer +/// from C, and returns the result of calling the function with the integer. +/// In case no function is provided, it squares the integer by default. +extern "C" fn apply(process: Option c_int>, int: c_int) -> c_int { + match process { + Some(f) => f(int), + None => int * int + } +} + +fn main() { + unsafe { + register(Some(apply)); + } +} +``` + +And the code on the C side looks like this: + +```c +void register(void (*f)(void (*)(int), int)) { + ... +} +``` + +No `transmute` required! # Calling Rust code from C diff --git a/src/doc/book/functions.md b/src/doc/book/functions.md index be905599c6441..b453936fe00d5 100644 --- a/src/doc/book/functions.md +++ b/src/doc/book/functions.md @@ -68,7 +68,7 @@ You get this error: ```text expected one of `!`, `:`, or `@`, found `)` -fn print_number(x, y) { +fn print_sum(x, y) { ``` This is a deliberate design decision. While full-program inference is possible, @@ -134,8 +134,8 @@ x = y = 5 In Rust, however, using `let` to introduce a binding is _not_ an expression. The following will produce a compile-time error: -```ignore -let x = (let y = 5); // expected identifier, found keyword `let` +```rust,ignore +let x = (let y = 5); // Expected identifier, found keyword `let`. ``` The compiler is telling us here that it was expecting to see the beginning of @@ -151,7 +151,7 @@ other returned value would be too surprising: ```rust let mut y = 5; -let x = (y = 6); // x has the value `()`, not `6` +let x = (y = 6); // `x` has the value `()`, not `6`. ``` The second kind of statement in Rust is the *expression statement*. Its @@ -183,7 +183,7 @@ But what about early returns? Rust does have a keyword for that, `return`: fn foo(x: i32) -> i32 { return x; - // we never run this code! + // We never run this code! x + 1 } ``` @@ -221,7 +221,7 @@ If you add a main function that calls `diverges()` and run it, you’ll get some output that looks like this: ```text -thread ‘

’ panicked at ‘This function never returns!’, hello.rs:2 +thread ‘main’ panicked at ‘This function never returns!’, hello.rs:2 ``` If you want more information, you can get a backtrace by setting the @@ -229,7 +229,7 @@ If you want more information, you can get a backtrace by setting the ```text $ RUST_BACKTRACE=1 ./diverges -thread '
' panicked at 'This function never returns!', hello.rs:2 +thread 'main' panicked at 'This function never returns!', hello.rs:2 stack backtrace: 1: 0x7f402773a829 - sys::backtrace::write::h0942de78b6c02817K8r 2: 0x7f402773d7fc - panicking::on_panic::h3f23f9d0b5f4c91bu9w @@ -246,12 +246,25 @@ stack backtrace: 13: 0x0 - ``` +If you need to override an already set `RUST_BACKTRACE`, +in cases when you cannot just unset the variable, +then set it to `0` to avoid getting a backtrace. +Any other value (even no value at all) turns on backtrace. + +```text +$ export RUST_BACKTRACE=1 +... +$ RUST_BACKTRACE=0 ./diverges +thread 'main' panicked at 'This function never returns!', hello.rs:2 +note: Run with `RUST_BACKTRACE=1` for a backtrace. +``` + `RUST_BACKTRACE` also works with Cargo’s `run` command: ```text $ RUST_BACKTRACE=1 cargo run Running `target/debug/diverges` -thread '
' panicked at 'This function never returns!', hello.rs:2 +thread 'main' panicked at 'This function never returns!', hello.rs:2 stack backtrace: 1: 0x7f402773a829 - sys::backtrace::write::h0942de78b6c02817K8r 2: 0x7f402773d7fc - panicking::on_panic::h3f23f9d0b5f4c91bu9w @@ -270,7 +283,7 @@ stack backtrace: A diverging function can be used as any type: -```should_panic +```rust,should_panic # fn diverges() -> ! { # panic!("This function never returns!"); # } @@ -294,10 +307,10 @@ fn plus_one(i: i32) -> i32 { i + 1 } -// without type inference +// Without type inference: let f: fn(i32) -> i32 = plus_one; -// with type inference +// With type inference: let f = plus_one; ``` diff --git a/src/doc/book/generics.md b/src/doc/book/generics.md index 9ab601419cd7c..eafad6a05fc3d 100644 --- a/src/doc/book/generics.md +++ b/src/doc/book/generics.md @@ -78,7 +78,7 @@ We can write functions that take generic types with a similar syntax: ```rust fn takes_anything(x: T) { - // do something with x + // Do something with `x`. } ``` diff --git a/src/doc/book/getting-started.md b/src/doc/book/getting-started.md index e9d271e753768..2eab449dbd421 100644 --- a/src/doc/book/getting-started.md +++ b/src/doc/book/getting-started.md @@ -4,104 +4,25 @@ This first chapter of the book will get us going with Rust and its tooling. First, we’ll install Rust. Then, the classic ‘Hello World’ program. Finally, we’ll talk about Cargo, Rust’s build system and package manager. +We’ll be showing off a number of commands using a terminal, and those lines all +start with `$`. You don't need to type in the `$`s, they are there to indicate +the start of each command. We’ll see many tutorials and examples around the web +that follow this convention: `$` for commands run as our regular user, and `#` +for commands we should be running as an administrator. + # Installing Rust The first step to using Rust is to install it. Generally speaking, you’ll need an Internet connection to run the commands in this section, as we’ll be -downloading Rust from the internet. +downloading Rust from the Internet. -We’ll be showing off a number of commands using a terminal, and those lines all -start with `$`. We don't need to type in the `$`s, they are there to indicate -the start of each command. We’ll see many tutorials and examples around the web -that follow this convention: `$` for commands run as our regular user, and `#` -for commands we should be running as an administrator. +The Rust compiler runs on, and compiles to, a great number of platforms, but is +best supported on Linux, Mac, and Windows, on the x86 and x86-64 CPU +architecture. There are official builds of the Rust compiler and standard +library for these platforms and more. [For full details on Rust platform support +see the website][platform-support]. -## Platform support - -The Rust compiler runs on, and compiles to, a great number of platforms, though -not all platforms are equally supported. Rust's support levels are organized -into three tiers, each with a different set of guarantees. - -Platforms are identified by their "target triple" which is the string to inform -the compiler what kind of output should be produced. The columns below indicate -whether the corresponding component works on the specified platform. - -### Tier 1 - -Tier 1 platforms can be thought of as "guaranteed to build and work". -Specifically they will each satisfy the following requirements: - -* Automated testing is set up to run tests for the platform. -* Landing changes to the `rust-lang/rust` repository's master branch is gated on - tests passing. -* Official release artifacts are provided for the platform. -* Documentation for how to use and how to build the platform is available. - -| Target | std |rustc|cargo| notes | -|-------------------------------|-----|-----|-----|----------------------------| -| `x86_64-pc-windows-msvc` | ✓ | ✓ | ✓ | 64-bit MSVC (Windows 7+) | -| `i686-pc-windows-gnu` | ✓ | ✓ | ✓ | 32-bit MinGW (Windows 7+) | -| `x86_64-pc-windows-gnu` | ✓ | ✓ | ✓ | 64-bit MinGW (Windows 7+) | -| `i686-apple-darwin` | ✓ | ✓ | ✓ | 32-bit OSX (10.7+, Lion+) | -| `x86_64-apple-darwin` | ✓ | ✓ | ✓ | 64-bit OSX (10.7+, Lion+) | -| `i686-unknown-linux-gnu` | ✓ | ✓ | ✓ | 32-bit Linux (2.6.18+) | -| `x86_64-unknown-linux-gnu` | ✓ | ✓ | ✓ | 64-bit Linux (2.6.18+) | - -### Tier 2 - -Tier 2 platforms can be thought of as "guaranteed to build". Automated tests -are not run so it's not guaranteed to produce a working build, but platforms -often work to quite a good degree and patches are always welcome! Specifically, -these platforms are required to have each of the following: - -* Automated building is set up, but may not be running tests. -* Landing changes to the `rust-lang/rust` repository's master branch is gated on - platforms **building**. Note that this means for some platforms only the - standard library is compiled, but for others the full bootstrap is run. -* Official release artifacts are provided for the platform. - -| Target | std |rustc|cargo| notes | -|-------------------------------|-----|-----|-----|----------------------------| -| `i686-pc-windows-msvc` | ✓ | ✓ | ✓ | 32-bit MSVC (Windows 7+) | -| `x86_64-unknown-linux-musl` | ✓ | | | 64-bit Linux with MUSL | -| `arm-linux-androideabi` | ✓ | | | ARM Android | -| `arm-unknown-linux-gnueabi` | ✓ | ✓ | | ARM Linux (2.6.18+) | -| `arm-unknown-linux-gnueabihf` | ✓ | ✓ | | ARM Linux (2.6.18+) | -| `aarch64-unknown-linux-gnu` | ✓ | | | ARM64 Linux (2.6.18+) | -| `mips-unknown-linux-gnu` | ✓ | | | MIPS Linux (2.6.18+) | -| `mipsel-unknown-linux-gnu` | ✓ | | | MIPS (LE) Linux (2.6.18+) | - -### Tier 3 - -Tier 3 platforms are those which Rust has support for, but landing changes is -not gated on the platform either building or passing tests. Working builds for -these platforms may be spotty as their reliability is often defined in terms of -community contributions. Additionally, release artifacts and installers are not -provided, but there may be community infrastructure producing these in -unofficial locations. - -| Target | std |rustc|cargo| notes | -|-------------------------------|-----|-----|-----|----------------------------| -| `i686-linux-android` | ✓ | | | 32-bit x86 Android | -| `aarch64-linux-android` | ✓ | | | ARM64 Android | -| `powerpc-unknown-linux-gnu` | ✓ | | | PowerPC Linux (2.6.18+) | -| `i386-apple-ios` | ✓ | | | 32-bit x86 iOS | -| `x86_64-apple-ios` | ✓ | | | 64-bit x86 iOS | -| `armv7-apple-ios` | ✓ | | | ARM iOS | -| `armv7s-apple-ios` | ✓ | | | ARM iOS | -| `aarch64-apple-ios` | ✓ | | | ARM64 iOS | -| `i686-unknown-freebsd` | ✓ | ✓ | | 32-bit FreeBSD | -| `x86_64-unknown-freebsd` | ✓ | ✓ | | 64-bit FreeBSD | -| `x86_64-unknown-openbsd` | ✓ | ✓ | | 64-bit OpenBSD | -| `x86_64-unknown-netbsd` | ✓ | ✓ | | 64-bit NetBSD | -| `x86_64-unknown-bitrig` | ✓ | ✓ | | 64-bit Bitrig | -| `x86_64-unknown-dragonfly` | ✓ | ✓ | | 64-bit DragonFlyBSD | -| `x86_64-rumprun-netbsd` | ✓ | | | 64-bit NetBSD Rump Kernel | -| `i686-pc-windows-msvc` (XP) | ✓ | | | Windows XP support | -| `x86_64-pc-windows-msvc` (XP) | ✓ | | | Windows XP support | - -Note that this table can be expanded over time, this isn't the exhaustive set of -tier 3 platforms that will ever be! +[platform-support]: https://forge.rust-lang.org/platform-support.html ## Installing on Linux or Mac @@ -111,23 +32,11 @@ If we're on Linux or a Mac, all we need to do is open a terminal and type this: $ curl -sSf https://static.rust-lang.org/rustup.sh | sh ``` -This will download a script, and stat the installation. If it all goes well, +This will download a script, and start the installation. If it all goes well, you’ll see this appear: ```text -Welcome to Rust. - -This script will download the Rust compiler and its package manager, Cargo, and -install them to /usr/local. You may install elsewhere by running this script -with the --prefix= option. - -The installer will run under ‘sudo’ and may ask you for your password. If you do -not want the script to run ‘sudo’ then pass it the --disable-sudo flag. - -You may uninstall later by running /usr/local/lib/rustlib/uninstall.sh, -or by running this script again with the --uninstall flag. - -Continue? (y/N) +Rust is ready to roll. ``` From here, press `y` for ‘yes’, and then follow the rest of the prompts. @@ -163,18 +72,36 @@ You should see the version number, commit hash, and commit date. If you do, Rust has been installed successfully! Congrats! If you don't and you're on Windows, check that Rust is in your %PATH% system -variable. If it isn't, run the installer again, select "Change" on the "Change, -repair, or remove installation" page and ensure "Add to PATH" is installed on -the local hard drive. - -If not, there are a number of places where we can get help. The easiest is -[the #rust IRC channel on irc.mozilla.org][irc], which we can access through -[Mibbit][mibbit]. Click that link, and we'll be chatting with other Rustaceans -(a silly nickname we call ourselves) who can help us out. Other great resources -include [the user’s forum][users], and [Stack Overflow][stackoverflow]. - +variable: `$ echo %PATH%`. If it isn't, run the installer again, select "Change" +on the "Change, repair, or remove installation" page and ensure "Add to PATH" is +installed on the local hard drive. If you need to configure your path manually, +you can find the Rust executables in a directory like +`"C:\Program Files\Rust stable GNU 1.x\bin"`. + +Rust does not do its own linking, and so you’ll need to have a linker +installed. Doing so will depend on your specific system. For +Linux-based systems, Rust will attempt to call `cc` for linking. On +`windows-msvc` (Rust built on Windows with Microsoft Visual Studio), +this depends on having [Microsoft Visual C++ Build Tools][msvbt] +installed. These do not need to be in `%PATH%` as `rustc` will find +them automatically. In general, if you have your linker in a +non-traditional location you can call `rustc +linker=/path/to/cc`, where `/path/to/cc` should point to your linker path. + +[msvbt]: http://landinghub.visualstudio.com/visual-cpp-build-tools + +If you are still stuck, there are a number of places where we can get +help. The easiest is +[the #rust-beginners IRC channel on irc.mozilla.org][irc-beginners] +and for general discussion +[the #rust IRC channel on irc.mozilla.org][irc], which we +can access through [Mibbit][mibbit]. Then we'll be chatting with other +Rustaceans (a silly nickname we call ourselves) who can help us out. Other great +resources include [the user’s forum][users] and [Stack Overflow][stackoverflow]. + +[irc-beginners]: irc://irc.mozilla.org/#rust-beginners [irc]: irc://irc.mozilla.org/#rust -[mibbit]: http://chat.mibbit.com/?server=irc.mozilla.org&channel=%23rust +[mibbit]: http://chat.mibbit.com/?server=irc.mozilla.org&channel=%23rust-beginners,%23rust [users]: https://users.rust-lang.org/ [stackoverflow]: http://stackoverflow.com/questions/tagged/rust @@ -226,12 +153,13 @@ $ cd hello_world ## Writing and Running a Rust Program -Next, make a new source file and call it *main.rs*. Rust files always end -in a *.rs* extension. If you’re using more than one word in your filename, use -an underscore to separate them; for example, you'd use *hello_world.rs* rather -than *helloworld.rs*. +We need to create a source file for our Rust program. Rust files always end +in a *.rs* extension. If you are using more than one word in your filename, +use an underscore to separate them; for example, you would use +*my_program.rs* rather than *myprogram.rs*. -Now open the *main.rs* file you just created, and type the following code: +Now, make a new file and call it *main.rs*. Open the file and type +the following code: ```rust fn main() { @@ -337,7 +265,8 @@ On Windows, you'd enter: ```bash $ dir -main.exe main.rs +main.exe +main.rs ``` This shows we have two files: the source code, with an `.rs` extension, and the @@ -345,7 +274,7 @@ executable (`main.exe` on Windows, `main` everywhere else). All that's left to do from here is run the `main` or `main.exe` file, like this: ```bash -$ ./main # or main.exe on Windows +$ ./main # or .\main.exe on Windows ``` If *main.rs* were your "Hello, world!" program, this would print `Hello, @@ -398,20 +327,20 @@ Let’s convert the Hello World program to Cargo. To Cargo-fy a project, you nee to do three things: 1. Put your source file in the right directory. -2. Get rid of the old executable (`main.exe` on Windows, `main` everywhere else) - and make a new one. +2. Get rid of the old executable (`main.exe` on Windows, `main` everywhere + else). 3. Make a Cargo configuration file. Let's get started! -### Creating a new Executable and Source Directory +### Creating a Source Directory and Removing the Old Executable First, go back to your terminal, move to your *hello_world* directory, and enter the following commands: ```bash $ mkdir src -$ mv main.rs src/main.rs +$ mv main.rs src/main.rs # or 'move main.rs src/main.rs' on Windows $ rm main # or 'del main.exe' on Windows ``` @@ -421,7 +350,7 @@ first. This leaves the top-level project directory (in this case, to your code. In this way, using Cargo helps you keep your projects nice and tidy. There's a place for everything, and everything is in its place. -Now, copy *main.rs* to the *src* directory, and delete the compiled file you +Now, move *main.rs* into the *src* directory, and delete the compiled file you created with `rustc`. As usual, replace `main` with `main.exe` if you're on Windows. @@ -489,6 +418,9 @@ $ cargo run Hello, world! ``` +The `run` command comes in handy when you need to rapidly iterate on a +project. + Notice that this example didn’t re-build the project. Cargo figured out that the file hasn’t changed, and so it just ran the binary. If you'd modified your source code, Cargo would have rebuilt the project before running it, and you @@ -505,21 +437,23 @@ Cargo checks to see if any of your project’s files have been modified, and onl rebuilds your project if they’ve changed since the last time you built it. With simple projects, Cargo doesn't bring a whole lot over just using `rustc`, -but it will become useful in future. This is especially true when you start +but it will become useful in the future. This is especially true when you start using crates; these are synonymous with a ‘library’ or ‘package’ in other programming languages. For complex projects composed of multiple crates, it’s much easier to let Cargo coordinate the build. Using Cargo, you can run `cargo build`, and it should work the right way. -## Building for Release +### Building for Release -When your project is finally ready for release, you can use `cargo build +When your project is ready for release, you can use `cargo build --release` to compile your project with optimizations. These optimizations make your Rust code run faster, but turning them on makes your program take longer to compile. This is why there are two different profiles, one for development, and one for building the final program you’ll give to a user. -Running this command also causes Cargo to create a new file called +### What Is That `Cargo.lock`? + +Running `cargo build` also causes Cargo to create a new file called *Cargo.lock*, which looks like this: ```toml @@ -563,7 +497,7 @@ executable application, as opposed to a library. Executables are often called *binaries* (as in `/usr/bin`, if you’re on a Unix system). Cargo has generated two files and one directory for us: a `Cargo.toml` and a -*src* directory with a *main.rs* file inside. These should look familliar, +*src* directory with a *main.rs* file inside. These should look familiar, they’re exactly what we created by hand, above. This output is all you need to get started. First, open `Cargo.toml`. It should @@ -575,8 +509,12 @@ look something like this: name = "hello_world" version = "0.1.0" authors = ["Your Name "] + +[dependencies] ``` +Do not worry about the `[dependencies]` line, we will come back to it later. + Cargo has populated *Cargo.toml* with reasonable defaults based on the arguments you gave it and your `git` global configuration. You may notice that Cargo has also initialized the `hello_world` directory as a `git` repository. @@ -602,11 +540,11 @@ This chapter covered the basics that will serve you well through the rest of this book, and the rest of your time with Rust. Now that you’ve got the tools down, we'll cover more about the Rust language itself. -You have two options: Dive into a project with ‘[Learn Rust][learnrust]’, or +You have two options: Dive into a project with ‘[Tutorial: Guessing Game][guessinggame]’, or start from the bottom and work your way up with ‘[Syntax and Semantics][syntax]’. More experienced systems programmers will probably prefer -‘Learn Rust’, while those from dynamic backgrounds may enjoy either. Different +‘Tutorial: Guessing Game’, while those from dynamic backgrounds may enjoy either. Different people learn differently! Choose whatever’s right for you. -[learnrust]: learn-rust.html +[guessinggame]: guessing-game.html [syntax]: syntax-and-semantics.html diff --git a/src/doc/book/glossary.md b/src/doc/book/glossary.md index 0956580ade0bb..8aa7fdff94803 100644 --- a/src/doc/book/glossary.md +++ b/src/doc/book/glossary.md @@ -46,6 +46,12 @@ must abide by that constraint. [traits]: traits.html +### Combinators + +Combinators are higher-order functions that apply only functions and +earlier defined combinators to provide a result from its arguments. +They can be used to manage control flow in a modular fashion. + ### DST (Dynamically Sized Type) A type without a statically known size or alignment. ([more info][link]) diff --git a/src/doc/book/guessing-game.md b/src/doc/book/guessing-game.md index 2e315333565c7..c854b7c373d20 100644 --- a/src/doc/book/guessing-game.md +++ b/src/doc/book/guessing-game.md @@ -19,6 +19,7 @@ has a command that does that for us. Let’s give it a shot: ```bash $ cd ~/projects $ cargo new guessing_game --bin + Created binary (application) `guessing_game` project $ cd guessing_game ``` @@ -51,25 +52,24 @@ Let’s try compiling what Cargo gave us: ```{bash} $ cargo build Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game) + Finished debug [unoptimized + debuginfo] target(s) in 0.53 secs ``` Excellent! Open up your `src/main.rs` again. We’ll be writing all of our code in this file. -Before we move on, let me show you one more Cargo command: `run`. `cargo run` -is kind of like `cargo build`, but it also then runs the produced executable. -Try it out: +Remember the `run` command from last chapter? Try it out again here: ```bash $ cargo run Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game) + Finished debug [unoptimized + debuginfo] target(s) in 0.0 secs Running `target/debug/guessing_game` Hello, world! ``` -Great! The `run` command comes in handy when you need to rapidly iterate on a -project. Our game is such a project, we need to quickly test each -iteration before moving on to the next one. +Great! Our game is just the kind of project `run` is good for: we need +to quickly test each iteration before moving on to the next one. # Processing a Guess @@ -158,8 +158,8 @@ take a name on the left hand side of the assignment, it actually accepts a to use for now: ```rust -let foo = 5; // immutable. -let mut bar = 5; // mutable +let foo = 5; // `foo` is immutable. +let mut bar = 5; // `bar` is mutable. ``` [immutable]: mutability.html @@ -258,7 +258,7 @@ done: io::stdin().read_line(&mut guess).expect("failed to read line"); ``` -But that gets hard to read. So we’ve split it up, three lines for three method +But that gets hard to read. So we’ve split it up, two lines for two method calls. We already talked about `read_line()`, but what about `expect()`? Well, we already mentioned that `read_line()` puts what the user types into the `&mut String` we pass it. But it also returns a value: in this case, an @@ -276,26 +276,29 @@ it’s called on, and if it isn’t a successful one, [`panic!`][panic]s with a message you passed it. A `panic!` like this will cause our program to crash, displaying the message. -[expect]: ../std/option/enum.Option.html#method.expect +[expect]: ../std/result/enum.Result.html#method.expect [panic]: error-handling.html -If we leave off calling these two methods, our program will compile, but +If we do not call `expect()`, our program will compile, but we’ll get a warning: ```bash $ cargo build Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game) -src/main.rs:10:5: 10:39 warning: unused result which must be used, -#[warn(unused_must_use)] on by default -src/main.rs:10 io::stdin().read_line(&mut guess); - ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +warning: unused result which must be used, #[warn(unused_must_use)] on by default + --> src/main.rs:10:5 + | +10 | io::stdin().read_line(&mut guess); + | ^ + + Finished debug [unoptimized + debuginfo] target(s) in 0.42 secs ``` Rust warns us that we haven’t used the `Result` value. This warning comes from a special annotation that `io::Result` has. Rust is trying to tell you that you haven’t handled a possible error. The right way to suppress the error is to actually write error handling. Luckily, if we want to crash if there’s -a problem, we can use these two little methods. If we can recover from the +a problem, we can use `expect()`. If we can recover from the error somehow, we’d do something else, but we’ll save that for a future project. @@ -324,6 +327,7 @@ Anyway, that’s the tour. We can run what we have with `cargo run`: ```bash $ cargo run Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game) + Finished debug [unoptimized + debuginfo] target(s) in 0.44 secs Running `target/debug/guessing_game` Guess the number! Please input your guess. @@ -365,23 +369,23 @@ numbers. A bare number like above is actually shorthand for `^0.3.0`, meaning "anything compatible with 0.3.0". If we wanted to use only `0.3.0` exactly, we could say `rand="=0.3.0"` (note the two equal signs). -And if we wanted to use the latest version we could use `*`. We could also use a range of versions. [Cargo’s documentation][cargodoc] contains more details. [semver]: http://semver.org -[cargodoc]: http://doc.crates.io/crates-io.html +[cargodoc]: http://doc.crates.io/specifying-dependencies.html Now, without changing any of our code, let’s build our project: ```bash $ cargo build Updating registry `https://github.com/rust-lang/crates.io-index` - Downloading rand v0.3.8 - Downloading libc v0.1.6 - Compiling libc v0.1.6 - Compiling rand v0.3.8 + Downloading rand v0.3.14 + Downloading libc v0.2.17 + Compiling libc v0.2.17 + Compiling rand v0.3.14 Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game) + Finished debug [unoptimized + debuginfo] target(s) in 5.88 secs ``` (You may see different versions, of course.) @@ -403,22 +407,24 @@ If we run `cargo build` again, we’ll get different output: ```bash $ cargo build + Finished debug [unoptimized + debuginfo] target(s) in 0.0 secs ``` -That’s right, no output! Cargo knows that our project has been built, and that +That’s right, nothing was done! Cargo knows that our project has been built, and that all of its dependencies are built, and so there’s no reason to do all that stuff. With nothing to do, it simply exits. If we open up `src/main.rs` again, -make a trivial change, and then save it again, we’ll only see one line: +make a trivial change, and then save it again, we’ll only see two lines: ```bash $ cargo build Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game) + Finished debug [unoptimized + debuginfo] target(s) in 0.45 secs ``` So, we told Cargo we wanted any `0.3.x` version of `rand`, and so it fetched the latest -version at the time this was written, `v0.3.8`. But what happens when next -week, version `v0.3.9` comes out, with an important bugfix? While getting -bugfixes is important, what if `0.3.9` contains a regression that breaks our +version at the time this was written, `v0.3.14`. But what happens when next +week, version `v0.3.15` comes out, with an important bugfix? While getting +bugfixes is important, what if `0.3.15` contains a regression that breaks our code? The answer to this problem is the `Cargo.lock` file you’ll now find in your @@ -427,11 +433,11 @@ figures out all of the versions that fit your criteria, and then writes them to the `Cargo.lock` file. When you build your project in the future, Cargo will see that the `Cargo.lock` file exists, and then use that specific version rather than do all the work of figuring out versions again. This lets you -have a repeatable build automatically. In other words, we’ll stay at `0.3.8` +have a repeatable build automatically. In other words, we’ll stay at `0.3.14` until we explicitly upgrade, and so will anyone who we share our code with, thanks to the lock file. -What about when we _do_ want to use `v0.3.9`? Cargo has another command, +What about when we _do_ want to use `v0.3.15`? Cargo has another command, `update`, which says ‘ignore the lock, figure out all the latest versions that fit what we’ve specified. If that works, write those versions out to the lock file’. But, by default, Cargo will only look for versions larger than `0.3.0` @@ -514,6 +520,7 @@ Try running our new program a few times: ```bash $ cargo run Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game) + Finished debug [unoptimized + debuginfo] target(s) in 0.55 secs Running `target/debug/guessing_game` Guess the number! The secret number is: 7 @@ -521,6 +528,7 @@ Please input your guess. 4 You guessed: 4 $ cargo run + Finished debug [unoptimized + debuginfo] target(s) in 0.0 secs Running `target/debug/guessing_game` Guess the number! The secret number is: 83 @@ -622,15 +630,20 @@ I did mention that this won’t quite compile yet, though. Let’s try it: ```bash $ cargo build Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game) -src/main.rs:28:21: 28:35 error: mismatched types: - expected `&collections::string::String`, - found `&_` -(expected struct `collections::string::String`, - found integral variable) [E0308] -src/main.rs:28 match guess.cmp(&secret_number) { - ^~~~~~~~~~~~~~ +error[E0308]: mismatched types + --> src/main.rs:23:21 + | +23 | match guess.cmp(&secret_number) { + | ^^^^^^^^^^^^^^ expected struct `std::string::String`, found integral variable + | + = note: expected type `&std::string::String` + = note: found type `&{integer}` + error: aborting due to previous error -Could not compile `guessing_game`. + +error: Could not compile `guessing_game`. + +To learn more, run the command again with --verbose. ``` Whew! This is a big error. The core of it is that we have ‘mismatched types’. @@ -644,7 +657,7 @@ So far, that hasn’t mattered, and so Rust defaults to an `i32`. However, here, Rust doesn’t know how to compare the `guess` and the `secret_number`. They need to be the same type. Ultimately, we want to convert the `String` we read as input into a real number type, for comparison. We can do that -with three more lines. Here’s our new program: +with two more lines. Here’s our new program: ```rust,ignore extern crate rand; @@ -680,7 +693,7 @@ fn main() { } ``` -The new three lines: +The new two lines: ```rust,ignore let guess: u32 = guess.trim().parse() @@ -726,6 +739,7 @@ Let’s try our program out! ```bash $ cargo run Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game) + Finished debug [unoptimized + debuginfo] target(s) in 0.57 secs Running `target/guessing_game` Guess the number! The secret number is: 58 @@ -789,6 +803,7 @@ and quit. Observe: ```bash $ cargo run Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game) + Finished debug [unoptimized + debuginfo] target(s) in 0.58 secs Running `target/guessing_game` Guess the number! The secret number is: 59 @@ -806,7 +821,7 @@ You guessed: 59 You win! Please input your guess. quit -thread '
' panicked at 'Please type a number!' +thread 'main' panicked at 'Please type a number!' ``` Ha! `quit` actually quits. As does any other non-number input. Well, this is @@ -906,22 +921,24 @@ let guess: u32 = match guess.trim().parse() { Err(_) => continue, }; ``` - This is how you generally move from ‘crash on error’ to ‘actually handle the -returned by `parse()` is an `enum` like `Ordering`, but in this case, each -variant has some data associated with it: `Ok` is a success, and `Err` is a +error’, by switching from `expect()` to a `match` statement. A `Result` is +returned by `parse()`, this is an `enum` like `Ordering`, but in this case, +each variant has some data associated with it: `Ok` is a success, and `Err` is a failure. Each contains more information: the successfully parsed integer, or an -error type. In this case, we `match` on `Ok(num)`, which sets the inner value -of the `Ok` to the name `num`, and then we return it on the right-hand -side. In the `Err` case, we don’t care what kind of error it is, so we -use `_` instead of a name. This ignores the error, and `continue` causes us -to go to the next iteration of the `loop`. +error type. In this case, we `match` on `Ok(num)`, which sets the name `num` to +the unwrapped `Ok` value (the integer), and then we return it on the +right-hand side. In the `Err` case, we don’t care what kind of error it is, so +we just use the catch all `_` instead of a name. This catches everything that +isn't `Ok`, and `continue` lets us move to the next iteration of the loop; in +effect, this enables us to ignore all errors and continue with our program. Now we should be good! Let’s try: ```bash $ cargo run Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game) + Finished debug [unoptimized + debuginfo] target(s) in 0.57 secs Running `target/guessing_game` Guess the number! The secret number is: 61 @@ -987,8 +1004,7 @@ fn main() { # Complete! -At this point, you have successfully built the Guessing Game! Congratulations! +This project showed you a lot: `let`, `match`, methods, associated +functions, using external crates, and more. -This first project showed you a lot: `let`, `match`, methods, associated -functions, using external crates, and more. Our next project will show off -even more. +At this point, you have successfully built the Guessing Game! Congratulations! diff --git a/src/doc/book/if.md b/src/doc/book/if.md index a532dabf8d12d..52d0dd888efef 100644 --- a/src/doc/book/if.md +++ b/src/doc/book/if.md @@ -4,7 +4,7 @@ Rust’s take on `if` is not particularly complex, but it’s much more like the `if` you’ll find in a dynamically typed language than in a more traditional systems language. So let’s talk about it, to make sure you grasp the nuances. -`if` is a specific form of a more general concept, the ‘branch’. The name comes +`if` is a specific form of a more general concept, the ‘branch’, whose name comes from a branch in a tree: a decision point, where depending on a choice, multiple paths can be taken. diff --git a/src/doc/book/inline-assembly.md b/src/doc/book/inline-assembly.md index 7659c4ff88dae..e531d5d7fc0ff 100644 --- a/src/doc/book/inline-assembly.md +++ b/src/doc/book/inline-assembly.md @@ -2,10 +2,9 @@ For extremely low-level manipulations and performance reasons, one might wish to control the CPU directly. Rust supports using inline -assembly to do this via the `asm!` macro. The syntax roughly matches -that of GCC & Clang: +assembly to do this via the `asm!` macro. -```ignore +```rust,ignore asm!(assembly template : output operands : input operands @@ -35,7 +34,7 @@ fn foo() { } } -// other platforms +// Other platforms: #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] fn foo() { /* ... */ } @@ -58,9 +57,11 @@ but you must add the right number of `:` if you skip them: asm!("xor %eax, %eax" : : - : "{eax}" + : "eax" ); # } } +# #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] +# fn main() {} ``` Whitespace also doesn't matter: @@ -69,8 +70,10 @@ Whitespace also doesn't matter: # #![feature(asm)] # #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] # fn main() { unsafe { -asm!("xor %eax, %eax" ::: "{eax}"); +asm!("xor %eax, %eax" ::: "eax"); # } } +# #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] +# fn main() {} ``` ## Operands @@ -127,9 +130,11 @@ stay valid. # #![feature(asm)] # #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] # fn main() { unsafe { -// Put the value 0x200 in eax -asm!("mov $$0x200, %eax" : /* no outputs */ : /* no inputs */ : "{eax}"); +// Put the value 0x200 in eax: +asm!("mov $$0x200, %eax" : /* no outputs */ : /* no inputs */ : "eax"); # } } +# #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] +# fn main() {} ``` Input and output registers need not be listed since that information @@ -165,6 +170,8 @@ unsafe { } println!("eax is currently {}", result); # } +# #[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] +# fn main() {} ``` ## More Information diff --git a/src/doc/book/iterators.md b/src/doc/book/iterators.md index 5622326d20c31..c174d2d6bacb6 100644 --- a/src/doc/book/iterators.md +++ b/src/doc/book/iterators.md @@ -14,6 +14,11 @@ Now that you know more Rust, we can talk in detail about how this works. Ranges (the `0..10`) are 'iterators'. An iterator is something that we can call the `.next()` method on repeatedly, and it gives us a sequence of things. +(By the way, a range with two dots like `0..10` is inclusive on the left (so it +starts at 0) and exclusive on the right (so it ends at 9). A mathematician +would write "[0, 10)". To get a range that goes all the way up to 10 you can +write `0...10`.) + Like this: ```rust @@ -311,10 +316,12 @@ for i in (1..100).filter(|&x| x % 2 == 0) { ``` This will print all of the even numbers between one and a hundred. -(Note that because `filter` doesn't consume the elements that are -being iterated over, it is passed a reference to each element, and -thus the filter predicate uses the `&x` pattern to extract the integer -itself.) +(Note that, unlike `map`, the closure passed to `filter` is passed a reference +to the element instead of the element itself. The filter predicate here uses +the `&x` pattern to extract the integer. The filter closure is passed a +reference because it returns `true` or `false` instead of the element, +so the `filter` implementation must retain ownership to put the elements +into the newly constructed iterator.) You can chain all three things together: start with an iterator, adapt it a few times, and then consume the result. Check it out: diff --git a/src/doc/book/lang-items.md b/src/doc/book/lang-items.md index e492bd3e7820d..6a08c1b6bb468 100644 --- a/src/doc/book/lang-items.md +++ b/src/doc/book/lang-items.md @@ -15,7 +15,7 @@ For example, `Box` pointers require two lang items, one for allocation and one for deallocation. A freestanding program that uses the `Box` sugar for dynamic allocations via `malloc` and `free`: -```rust +```rust,ignore #![feature(lang_items, box_syntax, start, libc)] #![no_std] @@ -32,18 +32,24 @@ pub struct Box(*mut T); unsafe fn allocate(size: usize, _align: usize) -> *mut u8 { let p = libc::malloc(size as libc::size_t) as *mut u8; - // malloc failed + // Check if `malloc` failed: if p as usize == 0 { abort(); } p } + #[lang = "exchange_free"] unsafe fn deallocate(ptr: *mut u8, _size: usize, _align: usize) { libc::free(ptr as *mut libc::c_void) } +#[lang = "box_free"] +unsafe fn box_free(ptr: *mut T) { + deallocate(ptr as *mut u8, ::core::mem::size_of_val(&*ptr), ::core::mem::align_of_val(&*ptr)); +} + #[start] fn main(argc: isize, argv: *const *const u8) -> isize { let x = box 1; @@ -51,8 +57,8 @@ fn main(argc: isize, argv: *const *const u8) -> isize { 0 } -#[lang = "eh_personality"] extern fn eh_personality() {} -#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} } +#[lang = "eh_personality"] extern fn rust_eh_personality() {} +#[lang = "panic_fmt"] extern fn rust_begin_panic() -> ! { loop {} } # #[lang = "eh_unwind_resume"] extern fn rust_eh_unwind_resume() {} # #[no_mangle] pub extern fn rust_eh_register_frames () {} # #[no_mangle] pub extern fn rust_eh_unregister_frames () {} @@ -67,8 +73,8 @@ Other features provided by lang items include: `==`, `<`, dereferencing (`*`) and `+` (etc.) operators are all marked with lang items; those specific four are `eq`, `ord`, `deref`, and `add` respectively. -- stack unwinding and general failure; the `eh_personality`, `fail` - and `fail_bounds_checks` lang items. +- stack unwinding and general failure; the `eh_personality`, + `eh_unwind_resume`, `fail` and `fail_bounds_checks` lang items. - the traits in `std::marker` used to indicate types of various kinds; lang items `send`, `sync` and `copy`. - the marker types and variance indicators found in diff --git a/src/doc/book/learn-rust.md b/src/doc/book/learn-rust.md deleted file mode 100644 index 7be7fa4f039a7..0000000000000 --- a/src/doc/book/learn-rust.md +++ /dev/null @@ -1,9 +0,0 @@ -% Learn Rust - -Welcome! This chapter has a few tutorials that teach you Rust through building -projects. You’ll get a high-level overview, but we’ll skim over the details. - -If you’d prefer a more ‘from the ground up’-style experience, check -out [Syntax and Semantics][ss]. - -[ss]: syntax-and-semantics.html diff --git a/src/doc/book/lifetimes.md b/src/doc/book/lifetimes.md index 8bf90b4ea4d8a..140e27d19248c 100644 --- a/src/doc/book/lifetimes.md +++ b/src/doc/book/lifetimes.md @@ -1,7 +1,7 @@ % Lifetimes -This guide is three of three presenting Rust’s ownership system. This is one of -Rust’s most unique and compelling features, with which Rust developers should +This is the last of three sections presenting Rust’s ownership system. This is one of +Rust’s most distinct and compelling features, with which Rust developers should become quite acquainted. Ownership is how Rust achieves its largest goal, memory safety. There are a few distinct concepts, each with its own chapter: @@ -50,29 +50,94 @@ complicated. For example, imagine this set of operations: 4. You decide to use the resource. Uh oh! Your reference is pointing to an invalid resource. This is called a -dangling pointer or ‘use after free’, when the resource is memory. +dangling pointer or ‘use after free’, when the resource is memory. A small +example of such a situation would be: + +```rust,compile_fail +let r; // Introduce reference: `r`. +{ + let i = 1; // Introduce scoped value: `i`. + r = &i; // Store reference of `i` in `r`. +} // `i` goes out of scope and is dropped. + +println!("{}", r); // `r` still refers to `i`. +``` To fix this, we have to make sure that step four never happens after step -three. The ownership system in Rust does this through a concept called -lifetimes, which describe the scope that a reference is valid for. +three. In the small example above the Rust compiler is able to report the issue +as it can see the lifetimes of the various values in the function. -When we have a function that takes a reference by argument, we can be implicit -or explicit about the lifetime of the reference: +When we have a function that takes arguments by reference the situation becomes +more complex. Consider the following example: -```rust -// implicit -fn foo(x: &i32) { +```rust,compile_fail,E0106 +fn skip_prefix(line: &str, prefix: &str) -> &str { + // ... +# line } -// explicit -fn bar<'a>(x: &'a i32) { +let line = "lang:en=Hello World!"; +let lang = "en"; + +let v; +{ + let p = format!("lang:{}=", lang); // -+ `p` comes into scope. + v = skip_prefix(line, p.as_str()); // | +} // -+ `p` goes out of scope. +println!("{}", v); +``` + +Here we have a function `skip_prefix` which takes two `&str` references +as parameters and returns a single `&str` reference. We call it +by passing in references to `line` and `p`: Two variables with different +lifetimes. Now the safety of the `println!`-line depends on whether the +reference returned by `skip_prefix` function references the still living +`line` or the already dropped `p` string. + +Because of the above ambiguity, Rust will refuse to compile the example +code. To get it to compile we need to tell the compiler more about the +lifetimes of the references. This can be done by making the lifetimes +explicit in the function declaration: + +```rust +fn skip_prefix<'a, 'b>(line: &'a str, prefix: &'b str) -> &'a str { + // ... +# line } ``` +Let's examine the changes without going too deep into the syntax for now - +we'll get to that later. The first change was adding the `<'a, 'b>` after the +method name. This introduces two lifetime parameters: `'a` and `'b`. Next each +reference in the function signature was associated with one of the lifetime +parameters by adding the lifetime name after the `&`. This tells the compiler +how the lifetimes between different references are related. + +As a result the compiler is now able to deduce that the return value of +`skip_prefix` has the same lifetime as the `line` parameter, which makes the `v` +reference safe to use even after the `p` goes out of scope in the original +example. + +In addition to the compiler being able to validate the usage of `skip_prefix` +return value, it can also ensure that the implementation follows the contract +established by the function declaration. This is useful especially when you are +implementing traits that are introduced [later in the book][traits]. + +**Note** It's important to understand that lifetime annotations are +_descriptive_, not _prescriptive_. This means that how long a reference is valid +is determined by the code, not by the annotations. The annotations, however, +give information about lifetimes to the compiler that uses them to check the +validity of references. The compiler can do so without annotations in simple +cases, but needs the programmers support in complex scenarios. + +[traits]: traits.html + +# Syntax + The `'a` reads ‘the lifetime a’. Technically, every reference has some lifetime associated with it, but the compiler lets you elide (i.e. omit, see -["Lifetime Elision"][lifetime-elision] below) them in common cases. -Before we get to that, though, let’s break the explicit example down: +["Lifetime Elision"][lifetime-elision] below) them in common cases. Before we +get to that, though, let’s look at a short example with explicit lifetimes: [lifetime-elision]: #lifetime-elision @@ -90,7 +155,8 @@ focus on the lifetimes aspect. [generics]: generics.html We use `<>` to declare our lifetimes. This says that `bar` has one lifetime, -`'a`. If we had two reference parameters, it would look like this: +`'a`. If we had two reference parameters with different lifetimes, it would +look like this: ```rust,ignore @@ -125,7 +191,7 @@ struct Foo<'a> { } fn main() { - let y = &5; // this is the same as `let _y = 5; let y = &_y;` + let y = &5; // This is the same as `let _y = 5; let y = &_y;`. let f = Foo { x: y }; println!("{}", f.x); @@ -167,7 +233,7 @@ impl<'a> Foo<'a> { } fn main() { - let y = &5; // this is the same as `let _y = 5; let y = &_y;` + let y = &5; // This is the same as `let _y = 5; let y = &_y;`. let f = Foo { x: y }; println!("x is: {}", f.x()); @@ -208,11 +274,11 @@ valid for. For example: ```rust fn main() { - let y = &5; // -+ y goes into scope + let y = &5; // -+ `y` comes into scope. // | - // stuff // | + // Stuff... // | // | -} // -+ y goes out of scope +} // -+ `y` goes out of scope. ``` Adding in our `Foo`: @@ -223,11 +289,12 @@ struct Foo<'a> { } fn main() { - let y = &5; // -+ y goes into scope - let f = Foo { x: y }; // -+ f goes into scope - // stuff // | + let y = &5; // -+ `y` comes into scope. + let f = Foo { x: y }; // -+ `f` comes into scope. + // | + // Stuff... // | // | -} // -+ f and y go out of scope +} // -+ `f` and `y` go out of scope. ``` Our `f` lives within the scope of `y`, so everything works. What if it didn’t? @@ -239,16 +306,16 @@ struct Foo<'a> { } fn main() { - let x; // -+ x goes into scope + let x; // -+ `x` comes into scope. // | { // | - let y = &5; // ---+ y goes into scope - let f = Foo { x: y }; // ---+ f goes into scope - x = &f.x; // | | error here - } // ---+ f and y go out of scope + let y = &5; // ---+ `y` comes into scope. + let f = Foo { x: y }; // ---+ `f` comes into scope. + x = &f.x; // | | This causes an error. + } // ---+ `f` and y go out of scope. // | println!("{}", x); // | -} // -+ x goes out of scope +} // -+ `x` goes out of scope. ``` Whew! As you can see here, the scopes of `f` and `y` are smaller than the scope @@ -282,17 +349,15 @@ to it. ## Lifetime Elision -Rust supports powerful local type inference in function bodies, but it’s -forbidden in item signatures to allow reasoning about the types based on -the item signature alone. However, for ergonomic reasons a very restricted -secondary inference algorithm called “lifetime elision” applies in function -signatures. It infers only based on the signature components themselves and not -based on the body of the function, only infers lifetime parameters, and does -this with only three easily memorizable and unambiguous rules. This makes -lifetime elision a shorthand for writing an item signature, while not hiding +Rust supports powerful local type inference in the bodies of functions but not in their item signatures. +It's forbidden to allow reasoning about types based on the item signature alone. +However, for ergonomic reasons, a very restricted secondary inference algorithm called +“lifetime elision” does apply when judging lifetimes. Lifetime elision is concerned solely with inferring +lifetime parameters using three easily memorizable and unambiguous rules. This means lifetime elision +acts as a shorthand for writing an item signature, while not hiding away the actual types involved as full local inference would if applied to it. -When talking about lifetime elision, we use the term *input lifetime* and +When talking about lifetime elision, we use the terms *input lifetime* and *output lifetime*. An *input lifetime* is a lifetime associated with a parameter of a function, and an *output lifetime* is a lifetime associated with the return value of a function. For example, this function has an input lifetime: @@ -337,11 +402,13 @@ fn print<'a>(s: &'a str); // expanded fn debug(lvl: u32, s: &str); // elided fn debug<'a>(lvl: u32, s: &'a str); // expanded +``` -// In the preceding example, `lvl` doesn’t need a lifetime because it’s not a -// reference (`&`). Only things relating to references (such as a `struct` -// which contains a reference) need lifetimes. +In the preceding example, `lvl` doesn’t need a lifetime because it’s not a +reference (`&`). Only things relating to references (such as a `struct` +which contains a reference) need lifetimes. +```rust,ignore fn substr(s: &str, until: u32) -> &str; // elided fn substr<'a>(s: &'a str, until: u32) -> &'a str; // expanded @@ -353,8 +420,8 @@ fn frob<'a, 'b>(s: &'a str, t: &'b str) -> &str; // Expanded: Output lifetime is fn get_mut(&mut self) -> &mut T; // elided fn get_mut<'a>(&'a mut self) -> &'a mut T; // expanded -fn args(&mut self, args: &[T]) -> &mut Command; // elided -fn args<'a, 'b, T:ToCStr>(&'a mut self, args: &'b [T]) -> &'a mut Command; // expanded +fn args(&mut self, args: &[T]) -> &mut Command; // elided +fn args<'a, 'b, T: ToCStr>(&'a mut self, args: &'b [T]) -> &'a mut Command; // expanded fn new(buf: &mut [u8]) -> BufWriter; // elided fn new<'a>(buf: &'a mut [u8]) -> BufWriter<'a>; // expanded diff --git a/src/doc/book/loops.md b/src/doc/book/loops.md index 68bb49d2c2966..688e8c552653b 100644 --- a/src/doc/book/loops.md +++ b/src/doc/book/loops.md @@ -74,7 +74,7 @@ for x in 0..10 { In slightly more abstract terms, -```ignore +```rust,ignore for var in expression { code } @@ -105,19 +105,19 @@ When you need to keep track of how many times you already looped, you can use th #### On ranges: ```rust -for (i,j) in (5..10).enumerate() { - println!("i = {} and j = {}", i, j); +for (index, value) in (5..10).enumerate() { + println!("index = {} and value = {}", index, value); } ``` Outputs: ```text -i = 0 and j = 5 -i = 1 and j = 6 -i = 2 and j = 7 -i = 3 and j = 8 -i = 4 and j = 9 +index = 0 and value = 5 +index = 1 and value = 6 +index = 2 and value = 7 +index = 3 and value = 8 +index = 4 and value = 9 ``` Don't forget to add the parentheses around the range. @@ -125,7 +125,8 @@ Don't forget to add the parentheses around the range. #### On iterators: ```rust -# let lines = "hello\nworld".lines(); +let lines = "hello\nworld".lines(); + for (linenumber, line) in lines.enumerate() { println!("{}: {}", linenumber, line); } @@ -134,10 +135,8 @@ for (linenumber, line) in lines.enumerate() { Outputs: ```text -0: Content of line one -1: Content of line two -2: Content of line three -3: Content of line four +0: hello +1: world ``` ## Ending iteration early @@ -179,7 +178,7 @@ loop { We now loop forever with `loop` and use `break` to break out early. Issuing an explicit `return` statement will also serve to terminate the loop early. -`continue` is similar, but instead of ending the loop, goes to the next +`continue` is similar, but instead of ending the loop, it goes to the next iteration. This will only print the odd numbers: ```rust @@ -195,7 +194,7 @@ for x in 0..10 { You may also encounter situations where you have nested loops and need to specify which one your `break` or `continue` statement is for. Like most other languages, by default a `break` or `continue` will apply to innermost -loop. In a situation where you would like to a `break` or `continue` for one +loop. In a situation where you would like to `break` or `continue` for one of the outer loops, you can use labels to specify which loop the `break` or `continue` statement applies to. This will only print when both `x` and `y` are odd: @@ -203,8 +202,8 @@ of the outer loops, you can use labels to specify which loop the `break` or ```rust 'outer: for x in 0..10 { 'inner: for y in 0..10 { - if x % 2 == 0 { continue 'outer; } // continues the loop over x - if y % 2 == 0 { continue 'inner; } // continues the loop over y + if x % 2 == 0 { continue 'outer; } // Continues the loop over `x`. + if y % 2 == 0 { continue 'inner; } // Continues the loop over `y`. println!("x: {}, y: {}", x, y); } } diff --git a/src/doc/book/macros.md b/src/doc/book/macros.md index 7c8b74bd6495d..7f52b33948ee1 100644 --- a/src/doc/book/macros.md +++ b/src/doc/book/macros.md @@ -78,7 +78,7 @@ macro_rules! vec { Whoa, that’s a lot of new syntax! Let’s break it down. -```ignore +```rust,ignore macro_rules! vec { ... } ``` @@ -92,7 +92,7 @@ syntax and serves to distinguish a macro from an ordinary function. The macro is defined through a series of rules, which are pattern-matching cases. Above, we had -```ignore +```rust,ignore ( $( $x:expr ),* ) => { ... }; ``` @@ -112,7 +112,7 @@ separated by commas. Aside from the special matcher syntax, any Rust tokens that appear in a matcher must match exactly. For example, -```rust +```rust,ignore macro_rules! foo { (x => $e:expr) => (println!("mode X: {}", $e)); (y => $e:expr) => (println!("mode Y: {}", $e)); @@ -147,7 +147,7 @@ The right-hand side of a macro rule is ordinary Rust syntax, for the most part. But we can splice in bits of syntax captured by the matcher. From the original example: -```ignore +```rust,ignore $( temp_vec.push($x); )* @@ -165,7 +165,7 @@ within the repeated block. Another detail: the `vec!` macro has *two* pairs of braces on the right-hand side. They are often combined like so: -```ignore +```rust,ignore macro_rules! foo { () => {{ ... @@ -285,9 +285,11 @@ This expands to ```text const char *state = "reticulating splines"; -int state = get_log_state(); -if (state > 0) { - printf("log(%d): %s\n", state, state); +{ + int state = get_log_state(); + if (state > 0) { + printf("log(%d): %s\n", state, state); + } } ``` @@ -326,7 +328,7 @@ invocation site. Code such as the following will not work: ```rust,ignore macro_rules! foo { - () => (let x = 3); + () => (let x = 3;); } fn main() { @@ -335,12 +337,12 @@ fn main() { } ``` -Instead you need to pass the variable name into the invocation, so it’s tagged -with the right syntax context. +Instead you need to pass the variable name into the invocation, so that it’s +tagged with the right syntax context. ```rust macro_rules! foo { - ($v:ident) => (let $v = 3); + ($v:ident) => (let $v = 3;); } fn main() { @@ -468,7 +470,7 @@ which syntactic form it matches. * `ty`: a type. Examples: `i32`; `Vec<(char, String)>`; `&T`. * `pat`: a pattern. Examples: `Some(t)`; `(17, 'a')`; `_`. * `stmt`: a single statement. Example: `let x = 3`. -* `block`: a brace-delimited sequence of statements. Example: +* `block`: a brace-delimited sequence of statements and optionally an expression. Example: `{ log(error, "hi"); return 12; }`. * `item`: an [item][item]. Examples: `fn foo() { }`; `struct Bar;`. * `meta`: a "meta item", as found in attributes. Example: `cfg(target_os = "windows")`. @@ -476,9 +478,9 @@ which syntactic form it matches. There are additional rules regarding the next token after a metavariable: -* `expr` variables may only be followed by one of: `=> , ;` -* `ty` and `path` variables may only be followed by one of: `=> , : = > as` -* `pat` variables may only be followed by one of: `=> , = if in` +* `expr` and `stmt` variables may only be followed by one of: `=> , ;` +* `ty` and `path` variables may only be followed by one of: `=> , = | ; : > [ { as where` +* `pat` variables may only be followed by one of: `=> , = | if in` * Other variables may be followed by any token. These rules provide some flexibility for Rust’s syntax to evolve without @@ -531,33 +533,33 @@ An example: ```rust macro_rules! m1 { () => (()) } -// visible here: m1 +// Visible here: `m1`. mod foo { - // visible here: m1 + // Visible here: `m1`. #[macro_export] macro_rules! m2 { () => (()) } - // visible here: m1, m2 + // Visible here: `m1`, `m2`. } -// visible here: m1 +// Visible here: `m1`. macro_rules! m3 { () => (()) } -// visible here: m1, m3 +// Visible here: `m1`, `m3`. #[macro_use] mod bar { - // visible here: m1, m3 + // Visible here: `m1`, `m3`. macro_rules! m4 { () => (()) } - // visible here: m1, m3, m4 + // Visible here: `m1`, `m3`, `m4`. } -// visible here: m1, m3, m4 +// Visible here: `m1`, `m3`, `m4`. # fn main() { } ``` @@ -642,7 +644,7 @@ macro_rules! bct { (1, $p:tt, $($ps:tt),* ; $($ds:tt),*) => (bct!($($ps),*, 1, $p ; $($ds),*)); - // halt on empty data string + // Halt on empty data string: ( $($ps:tt),* ; ) => (()); } @@ -660,7 +662,7 @@ Here are some common macros you’ll see in Rust code. This macro causes the current thread to panic. You can give it a message to panic with: -```rust,no_run +```rust,should_panic panic!("oh no!"); ``` @@ -686,13 +688,13 @@ These two macros are used in tests. `assert!` takes a boolean. `assert_eq!` takes two values and checks them for equality. `true` passes, `false` `panic!`s. Like this: -```rust,no_run +```rust,should_panic // A-ok! assert!(true); assert_eq!(5, 3 + 2); -// nope :( +// Nope :( assert!(5 < 3); assert_eq!(5, 3); diff --git a/src/doc/book/match.md b/src/doc/book/match.md index acffaf4544b10..d01a20083efb5 100644 --- a/src/doc/book/match.md +++ b/src/doc/book/match.md @@ -28,18 +28,18 @@ patterns][patterns] that covers all the patterns that are possible here. [patterns]: patterns.html -One of the many advantages of `match` is it enforces ‘exhaustiveness checking’. -For example if we remove the last arm with the underscore `_`, the compiler will +One of the many advantages of `match` is it enforces ‘exhaustiveness checking’. +For example if we remove the last arm with the underscore `_`, the compiler will give us an error: ```text error: non-exhaustive patterns: `_` not covered ``` -Rust is telling us that we forgot a value. The compiler infers from `x` that it -can have any positive 32bit value; for example 1 to 2,147,483,647. The `_` acts +Rust is telling us that we forgot some value. The compiler infers from `x` that it +can have any 32bit integer value; for example -2,147,483,648 to 2,147,483,647. The `_` acts as a 'catch-all', and will catch all possible values that *aren't* specified in -an arm of `match`. As you can see with the previous example, we provide `match` +an arm of `match`. As you can see in the previous example, we provide `match` arms for integers 1-5, if `x` is 6 or any other value, then it is caught by `_`. `match` is also an expression, which means we can use it on the right-hand @@ -58,7 +58,7 @@ let number = match x { }; ``` -Sometimes it’s a nice way of converting something from one type to another; in +Sometimes it’s a nice way of converting something from one type to another; in this example the integers are converted to `String`. # Matching on enums @@ -90,7 +90,7 @@ fn process_message(msg: Message) { Again, the Rust compiler checks exhaustiveness, so it demands that you have a match arm for every variant of the enum. If you leave one off, it -will give you a compile-time error unless you use `_` or provide all possible +will give you a compile-time error unless you use `_` or provide all possible arms. Unlike the previous uses of `match`, you can’t use the normal `if` diff --git a/src/doc/book/mutability.md b/src/doc/book/mutability.md index 71acb551e6e3c..18017cc4a5e54 100644 --- a/src/doc/book/mutability.md +++ b/src/doc/book/mutability.md @@ -6,7 +6,7 @@ status: ```rust,ignore let x = 5; -x = 6; // error! +x = 6; // Error! ``` We can introduce mutability with the `mut` keyword: @@ -14,7 +14,7 @@ We can introduce mutability with the `mut` keyword: ```rust let mut x = 5; -x = 6; // no problem! +x = 6; // No problem! ``` This is a mutable [variable binding][vb]. When a binding is mutable, it means @@ -24,18 +24,16 @@ changed from one `i32` to another. [vb]: variable-bindings.html -If you want to change what the binding points to, you’ll need a [mutable reference][mr]: +You can also create a [reference][ref] to it, using `&x`, but if you want to use the reference to change it, you will need a mutable reference: ```rust let mut x = 5; let y = &mut x; ``` -[mr]: references-and-borrowing.html +[ref]: references-and-borrowing.html -`y` is an immutable binding to a mutable reference, which means that you can’t -bind `y` to something else (`y = &mut z`), but you can mutate the thing that’s -bound to `y` (`*y = 5`). A subtle distinction. +`y` is an immutable binding to a mutable reference, which means that you can’t bind 'y' to something else (`y = &mut z`), but `y` can be used to bind `x` to something else (`*y = 5`). A subtle distinction. Of course, if you need both: @@ -57,13 +55,15 @@ fn foo(mut x: i32) { # } ``` +Note that here, the `x` is mutable, but not the `y`. + [pattern]: patterns.html # Interior vs. Exterior Mutability However, when we say something is ‘immutable’ in Rust, that doesn’t mean that -it’s not able to be changed: we mean something has ‘exterior mutability’. Consider, -for example, [`Arc`][arc]: +it’s not able to be changed: we are referring to its ‘exterior mutability’ that +in this case is immutable. Consider, for example, [`Arc`][arc]: ```rust use std::sync::Arc; @@ -136,7 +136,7 @@ some fields mutable and some immutable: ```rust,ignore struct Point { x: i32, - mut y: i32, // nope + mut y: i32, // Nope. } ``` @@ -154,7 +154,7 @@ a.x = 10; let b = Point { x: 5, y: 6}; -b.x = 10; // error: cannot assign to immutable field `b.x` +b.x = 10; // Error: cannot assign to immutable field `b.x`. ``` [struct]: structs.html diff --git a/src/doc/book/nightly-rust.md b/src/doc/book/nightly-rust.md index b3be71038a992..25570cb5503c9 100644 --- a/src/doc/book/nightly-rust.md +++ b/src/doc/book/nightly-rust.md @@ -54,7 +54,7 @@ binary downloads][install-page]. Oh, we should also mention the officially supported platforms: -* Windows (7, 8, Server 2008 R2) +* Windows (7+) * Linux (2.6.18 or later, various distributions), x86 and x86-64 * OSX 10.7 (Lion) or greater, x86 and x86-64 diff --git a/src/doc/book/no-stdlib.md b/src/doc/book/no-stdlib.md index 65beaed2fc7e9..a06de35c0ce69 100644 --- a/src/doc/book/no-stdlib.md +++ b/src/doc/book/no-stdlib.md @@ -12,37 +12,65 @@ don’t want to use the standard library via an attribute: `#![no_std]`. > `#![no_std]`](using-rust-without-the-standard-library.html) Obviously there's more to life than just libraries: one can use -`#[no_std]` with an executable, controlling the entry point is -possible in two ways: the `#[start]` attribute, or overriding the -default shim for the C `main` function with your own. +`#[no_std]` with an executable. + +### Using libc + +In order to build a `#[no_std]` executable we will need libc as a dependency. We can specify +this using our `Cargo.toml` file: + +```toml +[dependencies] +libc = { version = "0.2.14", default-features = false } +``` + +Note that the default features have been disabled. This is a critical step - +**the default features of libc include the standard library and so must be +disabled.** + +### Writing an executable without stdlib + +Controlling the entry point is possible in two ways: the `#[start]` attribute, +or overriding the default shim for the C `main` function with your own. The function marked `#[start]` is passed the command line parameters in the same format as C: -```rust -# #![feature(libc)] +```rust,ignore #![feature(lang_items)] #![feature(start)] #![no_std] -// Pull in the system libc library for what crt0.o likely requires +// Pull in the system libc library for what crt0.o likely requires. extern crate libc; -// Entry point for this program +// Entry point for this program. #[start] fn start(_argc: isize, _argv: *const *const u8) -> isize { 0 } -// These functions and traits are used by the compiler, but not +// These functions are used by the compiler, but not // for a bare-bones hello world. These are normally // provided by libstd. -#[lang = "eh_personality"] extern fn eh_personality() {} -#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} } -# #[lang = "eh_unwind_resume"] extern fn rust_eh_unwind_resume() {} -# #[no_mangle] pub extern fn rust_eh_register_frames () {} -# #[no_mangle] pub extern fn rust_eh_unregister_frames () {} -# // fn main() {} tricked you, rustdoc! +#[lang = "eh_personality"] +#[no_mangle] +pub extern fn rust_eh_personality() { +} + +// This function may be needed based on the compilation target. +#[lang = "eh_unwind_resume"] +#[no_mangle] +pub extern fn rust_eh_unwind_resume() { +} + +#[lang = "panic_fmt"] +#[no_mangle] +pub extern fn rust_begin_panic(_msg: core::fmt::Arguments, + _file: &'static str, + _line: u32) -> ! { + loop {} +} ``` To override the compiler-inserted `main` shim, one has to disable it @@ -50,37 +78,66 @@ with `#![no_main]` and then create the appropriate symbol with the correct ABI and the correct name, which requires overriding the compiler's name mangling too: -```rust -# #![feature(libc)] +```rust,ignore #![feature(lang_items)] #![feature(start)] #![no_std] #![no_main] +// Pull in the system libc library for what crt0.o likely requires. extern crate libc; +// Entry point for this program. #[no_mangle] // ensure that this symbol is called `main` in the output -pub extern fn main(argc: i32, argv: *const *const u8) -> i32 { +pub extern fn main(_argc: i32, _argv: *const *const u8) -> i32 { 0 } -#[lang = "eh_personality"] extern fn eh_personality() {} -#[lang = "panic_fmt"] fn panic_fmt() -> ! { loop {} } -# #[lang = "eh_unwind_resume"] extern fn rust_eh_unwind_resume() {} -# #[no_mangle] pub extern fn rust_eh_register_frames () {} -# #[no_mangle] pub extern fn rust_eh_unregister_frames () {} -# // fn main() {} tricked you, rustdoc! +// These functions are used by the compiler, but not +// for a bare-bones hello world. These are normally +// provided by libstd. +#[lang = "eh_personality"] +#[no_mangle] +pub extern fn rust_eh_personality() { +} + +// This function may be needed based on the compilation target. +#[lang = "eh_unwind_resume"] +#[no_mangle] +pub extern fn rust_eh_unwind_resume() { +} + +#[lang = "panic_fmt"] +#[no_mangle] +pub extern fn rust_begin_panic(_msg: core::fmt::Arguments, + _file: &'static str, + _line: u32) -> ! { + loop {} +} ``` +## More about the language items + +The compiler currently makes a few assumptions about symbols which are +available in the executable to call. Normally these functions are provided by +the standard library, but without it you must define your own. These symbols +are called "language items", and they each have an internal name, and then a +signature that an implementation must conform to. + +The first of these functions, `rust_eh_personality`, is used by the failure +mechanisms of the compiler. This is often mapped to GCC's personality function +(see the [libstd implementation][unwind] for more information), but crates +which do not trigger a panic can be assured that this function is never +called. The language item's name is `eh_personality`. + +[unwind]: https://github.com/rust-lang/rust/blob/master/src/libpanic_unwind/gcc.rs -The compiler currently makes a few assumptions about symbols which are available -in the executable to call. Normally these functions are provided by the standard -library, but without it you must define your own. +The second function, `rust_begin_panic`, is also used by the failure mechanisms of the +compiler. When a panic happens, this controls the message that's displayed on +the screen. While the language item's name is `panic_fmt`, the symbol name is +`rust_begin_panic`. -The first of these two functions, `eh_personality`, is used by the -failure mechanisms of the compiler. This is often mapped to GCC's -personality function (see the -[libstd implementation](../std/rt/unwind/index.html) for more -information), but crates which do not trigger a panic can be assured -that this function is never called. The second function, `panic_fmt`, is -also used by the failure mechanisms of the compiler. +A third function, `rust_eh_unwind_resume`, is also needed if the `custom_unwind_resume` +flag is set in the options of the compilation target. It allows customizing the +process of resuming unwind at the end of the landing pads. The language item's name +is `eh_unwind_resume`. diff --git a/src/doc/book/operators-and-overloading.md b/src/doc/book/operators-and-overloading.md index fcce831c2d09d..78ff871046ef7 100644 --- a/src/doc/book/operators-and-overloading.md +++ b/src/doc/book/operators-and-overloading.md @@ -69,7 +69,7 @@ impl Add for Point { type Output = f64; fn add(self, rhs: i32) -> f64 { - // add an i32 to a Point and get an f64 + // Add an i32 to a Point and get an f64. # 1.0 } } @@ -123,7 +123,7 @@ fn main() { For `HasArea` and `Square`, we declare a type parameter `T` and replace `f64` with it. The `impl` needs more involved modifications: -```ignore +```rust,ignore impl HasArea for Square where T: Mul + Copy { ... } ``` diff --git a/src/doc/book/ownership.md b/src/doc/book/ownership.md index a62d31d362b14..11eda399adc9b 100644 --- a/src/doc/book/ownership.md +++ b/src/doc/book/ownership.md @@ -1,7 +1,7 @@ % Ownership -This guide is one of three presenting Rust’s ownership system. This is one of -Rust’s most unique and compelling features, with which Rust developers should +This is the first of three sections presenting Rust’s ownership system. This is one of +Rust’s most distinct and compelling features, with which Rust developers should become quite acquainted. Ownership is how Rust achieves its largest goal, memory safety. There are a few distinct concepts, each with its own chapter: @@ -51,22 +51,24 @@ fn foo() { } ``` -When `v` comes into scope, a new [vector] is created, and it allocates space on -[the heap][heap] for each of its elements. When `v` goes out of scope at the -end of `foo()`, Rust will clean up everything related to the vector, even the -heap-allocated memory. This happens deterministically, at the end of the scope. +When `v` comes into scope, a new [vector][vectors] is created on [the stack][stack], +and it allocates space on [the heap][heap] for its elements. When `v` goes out +of scope at the end of `foo()`, Rust will clean up everything related to the +vector, even the heap-allocated memory. This happens deterministically, at the +end of the scope. -We'll cover [vectors] in detail later in this chapter; we only use them +We covered [vectors] in the previous chapter; we use them here as an example of a type that allocates space on the heap at runtime. They behave like [arrays], except their size may change by `push()`ing more elements onto them. Vectors have a [generic type][generics] `Vec`, so in this example `v` will have type -`Vec`. We'll cover generics in detail later in this chapter. +`Vec`. We'll cover [generics] in detail in a later chapter. [arrays]: primitive-types.html#arrays [vectors]: vectors.html -[heap]: the-stack-and-the-heap.html +[heap]: the-stack-and-the-heap.html#the-heap +[stack]: the-stack-and-the-heap.html#the-stack [bindings]: variable-bindings.html [generics]: generics.html @@ -105,7 +107,7 @@ try to use something after we’ve passed it as an argument: ```rust,ignore fn take(v: Vec) { - // what happens here isn’t important. + // What happens here isn’t important. } let v = vec![1, 2, 3]; @@ -122,21 +124,67 @@ special annotation here, it’s the default thing that Rust does. ## The details The reason that we cannot use a binding after we’ve moved it is subtle, but -important. When we write code like this: +important. + +When we write code like this: + +```rust +let x = 10; +``` + +Rust allocates memory for an integer [i32] on the [stack][sh], copies the bit +pattern representing the value of 10 to the allocated memory and binds the +variable name x to this memory region for future reference. + +[i32]: primitive-types.html#numeric-types + +Now consider the following code fragment: ```rust let v = vec![1, 2, 3]; -let v2 = v; +let mut v2 = v; +``` + +The first line allocates memory for the vector object `v` on the stack like +it does for `x` above. But in addition to that it also allocates some memory +on the [heap][sh] for the actual data (`[1, 2, 3]`). Rust copies the address +of this heap allocation to an internal pointer, which is part of the vector +object placed on the stack (let's call it the data pointer). + +It is worth pointing out (even at the risk of stating the obvious) that the +vector object and its data live in separate memory regions instead of being a +single contiguous memory allocation (due to reasons we will not go into at +this point of time). These two parts of the vector (the one on the stack and +one on the heap) must agree with each other at all times with regards to +things like the length, capacity, etc. + +When we move `v` to `v2`, Rust actually does a bitwise copy of the vector +object `v` into the stack allocation represented by `v2`. This shallow copy +does not create a copy of the heap allocation containing the actual data. +Which means that there would be two pointers to the contents of the vector +both pointing to the same memory allocation on the heap. It would violate +Rust’s safety guarantees by introducing a data race if one could access both +`v` and `v2` at the same time. + +For example if we truncated the vector to just two elements through `v2`: + +```rust +# let v = vec![1, 2, 3]; +# let mut v2 = v; +v2.truncate(2); ``` -The first line allocates memory for the vector object, `v`, and for the data it -contains. The vector object is stored on the [stack][sh] and contains a pointer -to the content (`[1, 2, 3]`) stored on the [heap][sh]. When we move `v` to `v2`, -it creates a copy of that pointer, for `v2`. Which means that there would be two -pointers to the content of the vector on the heap. It would violate Rust’s -safety guarantees by introducing a data race. Therefore, Rust forbids using `v` -after we’ve done the move. +and `v` were still accessible we'd end up with an invalid vector since `v` +would not know that the heap data has been truncated. Now, the part of the +vector `v` on the stack does not agree with the corresponding part on the +heap. `v` still thinks there are three elements in the vector and will +happily let us access the non existent element `v[2]` but as you might +already know this is a recipe for disaster. Especially because it might lead +to a segmentation fault or worse allow an unauthorized user to read from +memory to which they don't have access. + +This is why Rust forbids using `v` after we’ve done the move. [sh]: the-stack-and-the-heap.html @@ -166,7 +214,7 @@ But, unlike a move, we can still use `v` afterward. This is because an `i32` has no pointers to data somewhere else, copying it is a full copy. All primitive types implement the `Copy` trait and their ownership is -therefore not moved like one would assume, following the ´ownership rules´. +therefore not moved like one would assume, following the ‘ownership rules’. To give an example, the two following snippets of code only compile because the `i32` and `bool` types implement the `Copy` trait. @@ -216,9 +264,9 @@ Of course, if we had to hand ownership back with every function we wrote: ```rust fn foo(v: Vec) -> Vec { - // do stuff with v + // Do stuff with `v`. - // hand back ownership + // Hand back ownership. v } ``` @@ -227,9 +275,9 @@ This would get very tedious. It gets worse the more things we want to take owner ```rust fn foo(v1: Vec, v2: Vec) -> (Vec, Vec, i32) { - // do stuff with v1 and v2 + // Do stuff with `v1` and `v2`. - // hand back ownership, and the result of our function + // Hand back ownership, and the result of our function. (v1, v2, 42) } @@ -242,6 +290,6 @@ let (v1, v2, answer) = foo(v1, v2); Ugh! The return type, return line, and calling the function gets way more complicated. -Luckily, Rust offers a feature, borrowing, which helps us solve this problem. -It’s the topic of the next section! +Luckily, Rust offers a feature which helps us solve this problem. +It’s called borrowing and is the topic of the next section! diff --git a/src/doc/book/patterns.md b/src/doc/book/patterns.md index 8e9e7246e56f0..b50fa01b8e2be 100644 --- a/src/doc/book/patterns.md +++ b/src/doc/book/patterns.md @@ -1,7 +1,7 @@ % Patterns Patterns are quite common in Rust. We use them in [variable -bindings][bindings], [match statements][match], and other places, too. Let’s go +bindings][bindings], [match expressions][match], and other places, too. Let’s go on a whirlwind tour of all of the things patterns can do! [bindings]: variable-bindings.html @@ -109,14 +109,14 @@ struct Point { y: i32, } -let origin = Point { x: 0, y: 0 }; +let point = Point { x: 2, y: 3 }; -match origin { +match point { Point { x, .. } => println!("x is {}", x), } ``` -This prints `x is 0`. +This prints `x is 2`. You can do this kind of match on any member, not only the first: @@ -126,14 +126,14 @@ struct Point { y: i32, } -let origin = Point { x: 0, y: 0 }; +let point = Point { x: 2, y: 3 }; -match origin { +match point { Point { y, .. } => println!("y is {}", y), } ``` -This prints `y is 0`. +This prints `y is 3`. This ‘destructuring’ behavior works on any compound data type, like [tuples][tuples] or [enums][enums]. @@ -163,7 +163,7 @@ ignore parts of a larger structure: ```rust fn coordinate() -> (i32, i32, i32) { - // generate and return some sort of triple tuple + // Generate and return some sort of triple tuple. # (1, 2, 3) } @@ -173,7 +173,39 @@ let (x, _, z) = coordinate(); Here, we bind the first and last element of the tuple to `x` and `z`, but ignore the middle element. -Similarly, you can use `..` in a pattern to disregard multiple values. +It’s worth noting that using `_` never binds the value in the first place, +which means that the value does not move: + +```rust +let tuple: (u32, String) = (5, String::from("five")); + +// Here, tuple is moved, because the String moved: +let (x, _s) = tuple; + +// The next line would give "error: use of partially moved value: `tuple`". +// println!("Tuple is: {:?}", tuple); + +// However, + +let tuple = (5, String::from("five")); + +// Here, tuple is _not_ moved, as the String was never moved, and u32 is Copy: +let (x, _) = tuple; + +// That means this works: +println!("Tuple is: {:?}", tuple); +``` + +This also means that any temporary variables will be dropped at the end of the +statement: + +```rust +// Here, the String created will be dropped immediately, as it’s not bound: + +let _ = String::from(" hello ").trim(); +``` + +You can also use `..` in a pattern to disregard multiple values: ```rust enum OptionalTuple { @@ -271,7 +303,7 @@ struct Person { } let name = "Steve".to_string(); -let mut x: Option = Some(Person { name: Some(name) }); +let x: Option = Some(Person { name: Some(name) }); match x { Some(Person { name: ref a @ Some(_), .. }) => println!("{:?}", a), _ => {} diff --git a/src/doc/book/primitive-types.md b/src/doc/book/primitive-types.md index cfd5372b90f91..c4169d64ccc67 100644 --- a/src/doc/book/primitive-types.md +++ b/src/doc/book/primitive-types.md @@ -7,7 +7,7 @@ of these ones, as well, but these are the most primitive. # Booleans -Rust has a built in boolean type, named `bool`. It has two values, `true` and `false`: +Rust has a built-in boolean type, named `bool`. It has two values, `true` and `false`: ```rust let x = true; @@ -54,9 +54,9 @@ bigger numbers. If a number literal has nothing to cause its type to be inferred, it defaults: ```rust -let x = 42; // x has type i32 +let x = 42; // `x` has type `i32`. -let y = 1.0; // y has type f64 +let y = 1.0; // `y` has type `f64`. ``` Here’s a list of the different numeric types, with links to their documentation @@ -89,17 +89,18 @@ Unsigned types use a `u` for their category, and signed types use `i`. The `i` is for ‘integer’. So `u8` is an eight-bit unsigned number, and `i8` is an eight-bit signed number. -## Fixed size types +## Fixed-size types -Fixed size types have a specific number of bits in their representation. Valid +Fixed-size types have a specific number of bits in their representation. Valid bit sizes are `8`, `16`, `32`, and `64`. So, `u32` is an unsigned, 32-bit integer, and `i64` is a signed, 64-bit integer. -## Variable sized types +## Variable-size types -Rust also provides types whose size depends on the size of a pointer of the -underlying machine. These types have ‘size’ as the category, and come in signed -and unsigned varieties. This makes for two types: `isize` and `usize`. +Rust also provides types whose particular size depends on the underlying machine +architecture. Their range is sufficient to express the size of any collection, so +these types have ‘size’ as the category. They come in signed and unsigned varieties +which account for two types: `isize` and `usize`. ## Floating-point types @@ -162,7 +163,10 @@ A ‘slice’ is a reference to (or “view” into) another data structure. The useful for allowing safe, efficient access to a portion of an array without copying. For example, you might want to reference only one line of a file read into memory. By nature, a slice is not created directly, but from an existing -variable binding. Slices have a defined length, can be mutable or immutable. +variable binding. Slices have a defined length, and can be mutable or immutable. + +Internally, slices are represented as a pointer to the beginning of the data +and a length. ## Slicing syntax @@ -171,12 +175,10 @@ You can use a combo of `&` and `[]` to create a slice from various things. The detail later in this section. The `[]`s, with a range, let you define the length of the slice: -[references]: references-and-borrowing.html - ```rust let a = [0, 1, 2, 3, 4]; -let complete = &a[..]; // A slice containing all of the elements in a -let middle = &a[1..4]; // A slice of a: only the elements 1, 2, and 3 +let complete = &a[..]; // A slice containing all of the elements in `a`. +let middle = &a[1..4]; // A slice of `a`: only the elements `1`, `2`, and `3`. ``` Slices have type `&[T]`. We’ll talk about that `T` when we cover @@ -262,8 +264,8 @@ You can disambiguate a single-element tuple from a value in parentheses with a comma: ```rust -(0,); // single-element tuple -(0); // zero in parentheses +(0,); // A single-element tuple. +(0); // A zero in parentheses. ``` ## Tuple Indexing diff --git a/src/doc/book/raw-pointers.md b/src/doc/book/raw-pointers.md index 679f5489ea8f4..2386475d15ea3 100644 --- a/src/doc/book/raw-pointers.md +++ b/src/doc/book/raw-pointers.md @@ -17,7 +17,7 @@ Here are some things to remember about raw pointers that are different than other pointer types. They: - are not guaranteed to point to valid memory and are not even - guaranteed to be non-null (unlike both `Box` and `&`); + guaranteed to be non-NULL (unlike both `Box` and `&`); - do not have any automatic clean-up, unlike `Box`, and so require manual resource management; - are plain-old-data, that is, they don't move ownership, again unlike @@ -101,11 +101,11 @@ programmer *must* guarantee this. The recommended method for the conversion is: ```rust -// explicit cast +// Explicit cast: let i: u32 = 1; let p_imm: *const u32 = &i as *const u32; -// implicit coercion +// Implicit coercion: let mut m: u32 = 2; let p_mut: *mut u32 = &mut m; diff --git a/src/doc/book/references-and-borrowing.md b/src/doc/book/references-and-borrowing.md index e7faf174600a9..6c9c4fa7dd4b7 100644 --- a/src/doc/book/references-and-borrowing.md +++ b/src/doc/book/references-and-borrowing.md @@ -1,7 +1,7 @@ % References and Borrowing -This guide is two of three presenting Rust’s ownership system. This is one of -Rust’s most unique and compelling features, with which Rust developers should +This is the second of three sections presenting Rust’s ownership system. This is one of +Rust’s most distinct and compelling features, with which Rust developers should become quite acquainted. Ownership is how Rust achieves its largest goal, memory safety. There are a few distinct concepts, each with its own chapter: @@ -23,7 +23,7 @@ Before we get to the details, two important notes about the ownership system. Rust has a focus on safety and speed. It accomplishes these goals through many ‘zero-cost abstractions’, which means that in Rust, abstractions cost as little as possible in order to make them work. The ownership system is a prime example -of a zero cost abstraction. All of the analysis we’ll talk about in this guide +of a zero-cost abstraction. All of the analysis we’ll talk about in this guide is _done at compile time_. You do not pay any run-time cost for any of these features. @@ -46,9 +46,9 @@ like this: ```rust fn foo(v1: Vec, v2: Vec) -> (Vec, Vec, i32) { - // do stuff with v1 and v2 + // Do stuff with `v1` and `v2`. - // hand back ownership, and the result of our function + // Hand back ownership, and the result of our function. (v1, v2, 42) } @@ -63,9 +63,9 @@ the first step: ```rust fn foo(v1: &Vec, v2: &Vec) -> i32 { - // do stuff with v1 and v2 + // Do stuff with `v1` and `v2`. - // return the answer + // Return the answer. 42 } @@ -74,7 +74,33 @@ let v2 = vec![1, 2, 3]; let answer = foo(&v1, &v2); -// we can use v1 and v2 here! +// We can use `v1` and `v2` here! +``` + +A more concrete example: + +```rust +fn main() { + // Don't worry if you don't understand how `fold` works, the point here is that an immutable reference is borrowed. + fn sum_vec(v: &Vec) -> i32 { + return v.iter().fold(0, |a, &b| a + b); + } + // Borrow two vectors and sum them. + // This kind of borrowing does not allow mutation through the borrowed reference. + fn foo(v1: &Vec, v2: &Vec) -> i32 { + // Do stuff with `v1` and `v2`. + let s1 = sum_vec(v1); + let s2 = sum_vec(v2); + // Return the answer. + s1 + s2 + } + + let v1 = vec![1, 2, 3]; + let v2 = vec![4, 5, 6]; + + let answer = foo(&v1, &v2); + println!("{}", answer); +} ``` Instead of taking `Vec`s as our arguments, we take a reference: @@ -97,7 +123,7 @@ let v = vec![]; foo(&v); ``` -errors with: +will give us this error: ```text error: cannot borrow immutable borrowed content `*v` as mutable @@ -126,8 +152,8 @@ the thing `y` points at. You’ll notice that `x` had to be marked `mut` as well If it wasn’t, we couldn’t take a mutable borrow to an immutable value. You'll also notice we added an asterisk (`*`) in front of `y`, making it `*y`, -this is because `y` is a `&mut` reference. You'll also need to use them for -accessing the contents of a reference as well. +this is because `y` is a `&mut` reference. You'll need to use asterisks to +access the contents of a reference as well. Otherwise, `&mut` references are like references. There _is_ a large difference between the two, and how they interact, though. You can tell @@ -153,7 +179,7 @@ As it turns out, there are rules. # The Rules -Here’s the rules about borrowing in Rust: +Here are the rules for borrowing in Rust: First, any borrow must last for a scope no greater than that of the owner. Second, you may have one or the other of these two kinds of borrows, but not @@ -163,8 +189,8 @@ both at the same time: * exactly one mutable reference (`&mut T`). -You may notice that this is very similar, though not exactly the same as, -to the definition of a data race: +You may notice that this is very similar to, though not exactly the same as, +the definition of a data race: > There is a ‘data race’ when two or more pointers access the same memory > location at the same time, where at least one of them is writing, and the @@ -182,12 +208,14 @@ With this in mind, let’s consider our example again. Here’s the code: ```rust,ignore -let mut x = 5; -let y = &mut x; +fn main() { + let mut x = 5; + let y = &mut x; -*y += 1; + *y += 1; -println!("{}", x); + println!("{}", x); +} ``` This code gives us this error: @@ -199,7 +227,7 @@ error: cannot borrow `x` as immutable because it is also borrowed as mutable ``` This is because we’ve violated the rules: we have a `&mut T` pointing to `x`, -and so we aren’t allowed to create any `&T`s. One or the other. The note +and so we aren’t allowed to create any `&T`s. It's one or the other. The note hints at how to think about this problem: ```text @@ -211,19 +239,22 @@ fn main() { ``` In other words, the mutable borrow is held through the rest of our example. What -we want is for the mutable borrow to end _before_ we try to call `println!` and -make an immutable borrow. In Rust, borrowing is tied to the scope that the -borrow is valid for. And our scopes look like this: +we want is for the mutable borrow by `y` to end so that the resource can be +returned to the owner, `x`. `x` can then provide an immutable borrow to `println!`. +In Rust, borrowing is tied to the scope that the borrow is valid for. And our +scopes look like this: ```rust,ignore -let mut x = 5; - -let y = &mut x; // -+ &mut borrow of x starts here - // | -*y += 1; // | - // | -println!("{}", x); // -+ - try to borrow x here - // -+ &mut borrow of x ends here +fn main() { + let mut x = 5; + + let y = &mut x; // -+ &mut borrow of `x` starts here. + // | + *y += 1; // | + // | + println!("{}", x); // -+ - Try to borrow `x` here. +} // -+ &mut borrow of `x` ends here. + ``` The scopes conflict: we can’t make an `&x` while `y` is in scope. @@ -234,20 +265,20 @@ So when we add the curly braces: let mut x = 5; { - let y = &mut x; // -+ &mut borrow starts here + let y = &mut x; // -+ &mut borrow starts here. *y += 1; // | -} // -+ ... and ends here +} // -+ ... and ends here. -println!("{}", x); // <- try to borrow x here +println!("{}", x); // <- Try to borrow `x` here. ``` There’s no problem. Our mutable borrow goes out of scope before we create an -immutable one. But scope is the key to seeing how long a borrow lasts for. +immutable one. So scope is the key to seeing how long a borrow lasts for. ## Issues borrowing prevents Why have these restrictive rules? Well, as we noted, these rules prevent data -races. What kinds of issues do data races cause? Here’s a few. +races. What kinds of issues do data races cause? Here are a few. ### Iterator invalidation @@ -296,7 +327,7 @@ for i in &v { We can’t modify `v` because it’s borrowed by the loop. -### use after free +### Use after free References must not live longer than the resource they refer to. Rust will check the scopes of your references to ensure that this is true. @@ -378,4 +409,3 @@ statement 1 at 3:14 In the above example, `y` is declared before `x`, meaning that `y` lives longer than `x`, which is not allowed. - diff --git a/src/doc/book/slice-patterns.md b/src/doc/book/slice-patterns.md index de165b70fc402..fcedf0c994f9c 100644 --- a/src/doc/book/slice-patterns.md +++ b/src/doc/book/slice-patterns.md @@ -10,7 +10,7 @@ fn main() { let v = vec!["match_this", "1"]; match &v[..] { - ["match_this", second] => println!("The second element is {}", second), + &["match_this", second] => println!("The second element is {}", second), _ => {}, } } @@ -26,8 +26,8 @@ slice will be bound to that name. For example: fn is_symmetric(list: &[u32]) -> bool { match list { - [] | [_] => true, - [x, inside.., y] if x == y => is_symmetric(inside), + &[] | &[_] => true, + &[x, ref inside.., y] if x == y => is_symmetric(inside), _ => false } } diff --git a/src/doc/book/strings.md b/src/doc/book/strings.md index 751619d544a4a..6af15d8768363 100644 --- a/src/doc/book/strings.md +++ b/src/doc/book/strings.md @@ -9,7 +9,7 @@ strings also work differently than in some other systems languages, such as C. Let’s dig into the details. A ‘string’ is a sequence of Unicode scalar values encoded as a stream of UTF-8 bytes. All strings are guaranteed to be a valid encoding of UTF-8 sequences. Additionally, unlike some systems languages, -strings are not null-terminated and can contain null bytes. +strings are not NUL-terminated and can contain NUL bytes. Rust has two main types of strings: `&str` and `String`. Let’s talk about `&str` first. These are called ‘string slices’. A string slice has a fixed @@ -32,19 +32,24 @@ include the newline and the leading spaces: let s = "foo bar"; -assert_eq!("foo\n bar", s); +assert_eq!("foo\n bar", s); ``` The second, with a `\`, trims the spaces and the newline: ```rust let s = "foo\ - bar"; + bar"; assert_eq!("foobar", s); ``` -Rust has more than only `&str`s though. A `String`, is a heap-allocated string. +Note that you normally cannot access a `str` directly, but only through a `&str` +reference. This is because `str` is an unsized type which requires additional +runtime information to be usable. For more information see the chapter on +[unsized types][ut]. + +Rust has more than only `&str`s though. A `String` is a heap-allocated string. This string is growable, and is also guaranteed to be UTF-8. `String`s are commonly created by converting from a string slice using the `to_string` method. @@ -78,10 +83,10 @@ converted using `&*`. ```rust,no_run use std::net::TcpStream; -TcpStream::connect("192.168.0.1:3000"); // &str parameter +TcpStream::connect("192.168.0.1:3000"); // Parameter is of type &str. let addr_string = "192.168.0.1:3000".to_string(); -TcpStream::connect(&*addr_string); // convert addr_string to &str +TcpStream::connect(&*addr_string); // Convert `addr_string` to &str. ``` Viewing a `String` as a `&str` is cheap, but converting the `&str` to a @@ -89,7 +94,7 @@ Viewing a `String` as a `&str` is cheap, but converting the `&str` to a ## Indexing -Because strings are valid UTF-8, strings do not support indexing: +Because strings are valid UTF-8, they do not support indexing: ```rust,ignore let s = "hello"; @@ -133,7 +138,7 @@ You can get something similar to an index like this: ```rust # let hachiko = "忠犬ハチ公"; -let dog = hachiko.chars().nth(1); // kinda like hachiko[1] +let dog = hachiko.chars().nth(1); // Kinda like `hachiko[1]`. ``` This emphasizes that we have to walk from the beginning of the list of `chars`. @@ -158,7 +163,7 @@ let hachi = &dog[0..2]; with this error: ```text -thread '
' panicked at 'index 0 and/or 2 in `忠犬ハチ公` do not lie on +thread 'main' panicked at 'index 0 and/or 2 in `忠犬ハチ公` do not lie on character boundary' ``` @@ -185,5 +190,6 @@ let hello_world = hello + &world; This is because `&String` can automatically coerce to a `&str`. This is a feature called ‘[`Deref` coercions][dc]’. +[ut]: unsized-types.html [dc]: deref-coercions.html [connect]: ../std/net/struct.TcpStream.html#method.connect diff --git a/src/doc/book/structs.md b/src/doc/book/structs.md index b2fddf336273f..cfd00cf997e0b 100644 --- a/src/doc/book/structs.md +++ b/src/doc/book/structs.md @@ -61,7 +61,7 @@ write something like this: ```rust,ignore struct Point { - mut x: i32, + mut x: i32, // This causes an error. y: i32, } ``` @@ -82,9 +82,9 @@ fn main() { point.x = 5; - let point = point; // now immutable + let point = point; // `point` is now immutable. - point.y = 6; // this causes an error + point.y = 6; // This causes an error. } ``` @@ -163,11 +163,51 @@ struct Point(i32, i32, i32); let black = Color(0, 0, 0); let origin = Point(0, 0, 0); ``` -Here, `black` and `origin` are not equal, even though they contain the same -values. -It is almost always better to use a `struct` than a tuple struct. We -would write `Color` and `Point` like this instead: +Here, `black` and `origin` are not the same type, even though they contain the +same values. + +The members of a tuple struct may be accessed by dot notation or destructuring +`let`, just like regular tuples: + +```rust +# struct Color(i32, i32, i32); +# struct Point(i32, i32, i32); +# let black = Color(0, 0, 0); +# let origin = Point(0, 0, 0); +let black_r = black.0; +let Point(_, origin_y, origin_z) = origin; +``` + +Patterns like `Point(_, origin_y, origin_z)` are also used in +[match expressions][match]. + +One case when a tuple struct is very useful is when it has only one element. +We call this the ‘newtype’ pattern, because it allows you to create a new type +that is distinct from its contained value and also expresses its own semantic +meaning: + +```rust +struct Inches(i32); + +let length = Inches(10); + +let Inches(integer_length) = length; +println!("length is {} inches", integer_length); +``` + +As above, you can extract the inner integer type through a destructuring `let`. +In this case, the `let Inches(integer_length)` assigns `10` to `integer_length`. +We could have used dot notation to do the same thing: + +```rust +# struct Inches(i32); +# let length = Inches(10); +let integer_length = length.0; +``` + +It's always possible to use a `struct` instead of a tuple struct, and can be +clearer. We could write `Color` and `Point` like this instead: ```rust struct Color { @@ -187,32 +227,19 @@ Good names are important, and while values in a tuple struct can be referenced with dot notation as well, a `struct` gives us actual names, rather than positions. -There _is_ one case when a tuple struct is very useful, though, and that is when -it has only one element. We call this the ‘newtype’ pattern, because -it allows you to create a new type that is distinct from its contained value -and also expresses its own semantic meaning: - -```rust -struct Inches(i32); - -let length = Inches(10); - -let Inches(integer_length) = length; -println!("length is {} inches", integer_length); -``` - -As you can see here, you can extract the inner integer type through a -destructuring `let`, as with regular tuples. In this case, the -`let Inches(integer_length)` assigns `10` to `integer_length`. +[match]: match.html # Unit-like structs You can define a `struct` with no members at all: ```rust -struct Electron; +struct Electron {} // Use empty braces... +struct Proton; // ...or just a semicolon. -let x = Electron; +// Whether you declared the struct with braces or not, do the same when creating one. +let x = Electron {}; +let y = Proton; ``` Such a `struct` is called ‘unit-like’ because it resembles the empty diff --git a/src/doc/book/syntax-index.md b/src/doc/book/syntax-index.md index f7e32943c638e..28403711cd701 100644 --- a/src/doc/book/syntax-index.md +++ b/src/doc/book/syntax-index.md @@ -2,7 +2,7 @@ ## Keywords -* `as`: primitive casting. See [Casting Between Types (`as`)]. +* `as`: primitive casting, or disambiguating the specific trait containing an item. See [Casting Between Types (`as`)], [Universal Function Call Syntax (Angle-bracket Form)], [Associated Types]. * `break`: break out of loop. See [Loops (Ending Iteration Early)]. * `const`: constant items and constant raw pointers. See [`const` and `static`], [Raw Pointers]. * `continue`: continue to next loop iteration. See [Loops (Ending Iteration Early)]. @@ -43,39 +43,39 @@ * `!` (`!expr`): bitwise or logical complement. Overloadable (`Not`). * `!=` (`var != expr`): nonequality comparison. Overloadable (`PartialEq`). * `%` (`expr % expr`): arithmetic remainder. Overloadable (`Rem`). -* `%=` (`var %= expr`): arithmetic remainder & assignment. +* `%=` (`var %= expr`): arithmetic remainder & assignment. Overloadable (`RemAssign`). * `&` (`expr & expr`): bitwise and. Overloadable (`BitAnd`). * `&` (`&expr`): borrow. See [References and Borrowing]. * `&` (`&type`, `&mut type`, `&'a type`, `&'a mut type`): borrowed pointer type. See [References and Borrowing]. -* `&=` (`var &= expr`): bitwise and & assignment. +* `&=` (`var &= expr`): bitwise and & assignment. Overloadable (`BitAndAssign`). * `&&` (`expr && expr`): logical and. * `*` (`expr * expr`): arithmetic multiplication. Overloadable (`Mul`). * `*` (`*expr`): dereference. * `*` (`*const type`, `*mut type`): raw pointer. See [Raw Pointers]. -* `*=` (`var *= expr`): arithmetic multiplication & assignment. +* `*=` (`var *= expr`): arithmetic multiplication & assignment. Overloadable (`MulAssign`). * `+` (`expr + expr`): arithmetic addition. Overloadable (`Add`). * `+` (`trait + trait`, `'a + trait`): compound type constraint. See [Traits (Multiple Trait Bounds)]. -* `+=` (`var += expr`): arithmetic addition & assignment. +* `+=` (`var += expr`): arithmetic addition & assignment. Overloadable (`AddAssign`). * `,`: argument and element separator. See [Attributes], [Functions], [Structs], [Generics], [Match], [Closures], [Crates and Modules (Importing Modules with `use`)]. * `-` (`expr - expr`): arithmetic subtraction. Overloadable (`Sub`). * `-` (`- expr`): arithmetic negation. Overloadable (`Neg`). -* `-=` (`var -= expr`): arithmetic subtraction & assignment. +* `-=` (`var -= expr`): arithmetic subtraction & assignment. Overloadable (`SubAssign`). * `->` (`fn(…) -> type`, `|…| -> type`): function and closure return type. See [Functions], [Closures]. -* `-> !` (`fn(…) -> !`, `|…| -> !`): diverging function or closure. See [Diverging Functions]. * `.` (`expr.ident`): member access. See [Structs], [Method Syntax]. * `..` (`..`, `expr..`, `..expr`, `expr..expr`): right-exclusive range literal. * `..` (`..expr`): struct literal update syntax. See [Structs (Update syntax)]. * `..` (`variant(x, ..)`, `struct_type { x, .. }`): "and the rest" pattern binding. See [Patterns (Ignoring bindings)]. -* `...` (`expr ... expr`): inclusive range pattern. See [Patterns (Ranges)]. +* `...` (`...expr`, `expr...expr`) *in an expression*: inclusive range expression. See [Iterators]. +* `...` (`expr...expr`) *in a pattern*: inclusive range pattern. See [Patterns (Ranges)]. * `/` (`expr / expr`): arithmetic division. Overloadable (`Div`). -* `/=` (`var /= expr`): arithmetic division & assignment. +* `/=` (`var /= expr`): arithmetic division & assignment. Overloadable (`DivAssign`). * `:` (`pat: type`, `ident: type`): constraints. See [Variable Bindings], [Functions], [Structs], [Traits]. * `:` (`ident: expr`): struct field initializer. See [Structs]. * `:` (`'a: loop {…}`): loop label. See [Loops (Loops Labels)]. * `;`: statement and item terminator. * `;` (`[…; len]`): part of fixed-size array syntax. See [Primitive Types (Arrays)]. * `<<` (`expr << expr`): left-shift. Overloadable (`Shl`). -* `<<=` (`var <<= expr`): left-shift & assignment. +* `<<=` (`var <<= expr`): left-shift & assignment. Overloadable (`ShlAssign`). * `<` (`expr < expr`): less-than comparison. Overloadable (`PartialOrd`). * `<=` (`var <= expr`): less-than or equal-to comparison. Overloadable (`PartialOrd`). * `=` (`var = expr`, `ident = type`): assignment/equivalence. See [Variable Bindings], [`type` Aliases], generic parameter defaults. @@ -84,16 +84,17 @@ * `>` (`expr > expr`): greater-than comparison. Overloadable (`PartialOrd`). * `>=` (`var >= expr`): greater-than or equal-to comparison. Overloadable (`PartialOrd`). * `>>` (`expr >> expr`): right-shift. Overloadable (`Shr`). -* `>>=` (`var >>= expr`): right-shift & assignment. +* `>>=` (`var >>= expr`): right-shift & assignment. Overloadable (`ShrAssign`). * `@` (`ident @ pat`): pattern binding. See [Patterns (Bindings)]. * `^` (`expr ^ expr`): bitwise exclusive or. Overloadable (`BitXor`). -* `^=` (`var ^= expr`): bitwise exclusive or & assignment. +* `^=` (`var ^= expr`): bitwise exclusive or & assignment. Overloadable (`BitXorAssign`). * `|` (`expr | expr`): bitwise or. Overloadable (`BitOr`). * `|` (`pat | pat`): pattern alternatives. See [Patterns (Multiple patterns)]. * `|` (`|…| expr`): closures. See [Closures]. -* `|=` (`var |= expr`): bitwise or & assignment. +* `|=` (`var |= expr`): bitwise or & assignment. Overloadable (`BitOrAssign`). * `||` (`expr || expr`): logical or. -* `_`: "ignored" pattern binding. See [Patterns (Ignoring bindings)]. +* `_`: "ignored" pattern binding (see [Patterns (Ignoring bindings)]). Also used to make integer-literals readable (see [Reference (Integer literals)]). +* `?` (`expr?`): Error propagation. Returns early when `Err(_)` is encountered, unwraps otherwise. Similar to the [`try!` macro]. ## Other Syntax @@ -115,8 +116,11 @@ * `::path`: path relative to the crate root (*i.e.* an explicitly absolute path). See [Crates and Modules (Re-exporting with `pub use`)]. * `self::path`: path relative to the current module (*i.e.* an explicitly relative path). See [Crates and Modules (Re-exporting with `pub use`)]. * `super::path`: path relative to the parent of the current module. See [Crates and Modules (Re-exporting with `pub use`)]. -* `type::ident`: associated constants, functions, and types. See [Associated Types]. +* `type::ident`, `::ident`: associated constants, functions, and types. See [Associated Types]. * `::…`: associated item for a type which cannot be directly named (*e.g.* `<&T>::…`, `<[T]>::…`, *etc.*). See [Associated Types]. +* `trait::method(…)`: disambiguating a method call by naming the trait which defines it. See [Universal Function Call Syntax]. +* `type::method(…)`: disambiguating a method call by naming the type for which it's defined. See [Universal Function Call Syntax]. +* `::method(…)`: disambiguating a method call by naming the trait _and_ type. See [Universal Function Call Syntax (Angle-bracket Form)]. @@ -132,7 +136,8 @@ * `T: U`: generic parameter `T` constrained to types that implement `U`. See [Traits]. -* `T: 'a`: generic type `T` must outlive lifetime `'a`. +* `T: 'a`: generic type `T` must outlive lifetime `'a`. When we say that a type 'outlives' the lifetime, we mean that it cannot transitively contain any references with lifetimes shorter than `'a`. +* `T : 'static`: The generic type `T` contains no borrowed references other than `'static` ones. * `'b: 'a`: generic lifetime `'b` must outlive lifetime `'a`. * `T: ?Sized`: allow generic type parameter to be a dynamically-sized type. See [Unsized Types (`?Sized`)]. * `'a + trait`, `trait + trait`: compound type constraint. See [Traits (Multiple Trait Bounds)]. @@ -154,6 +159,10 @@ * `/*!…*/`: inner block doc comment. See [Comments]. * `/**…*/`: outer block doc comment. See [Comments]. + + +* `!`: always empty Never type. See [Diverging Functions]. + * `()`: empty tuple (*a.k.a.* unit), both literal and type. @@ -201,6 +210,8 @@ [Functions (Early Returns)]: functions.html#early-returns [Functions]: functions.html [Generics]: generics.html +[Iterators]: iterators.html +[`try!` macro]: error-handling.html#the-try-macro [Lifetimes]: lifetimes.html [Loops (`for`)]: loops.html#for [Loops (`loop`)]: loops.html#loop @@ -225,6 +236,7 @@ [Primitive Types (Tuples)]: primitive-types.html#tuples [Raw Pointers]: raw-pointers.html [Reference (Byte String Literals)]: ../reference.html#byte-string-literals +[Reference (Integer literals)]: ../reference.html#integer-literals [Reference (Raw Byte String Literals)]: ../reference.html#raw-byte-string-literals [Reference (Raw String Literals)]: ../reference.html#raw-string-literals [References and Borrowing]: references-and-borrowing.html @@ -234,6 +246,8 @@ [Traits (`where` clause)]: traits.html#where-clause [Traits (Multiple Trait Bounds)]: traits.html#multiple-trait-bounds [Traits]: traits.html +[Universal Function Call Syntax]: ufcs.html +[Universal Function Call Syntax (Angle-bracket Form)]: ufcs.html#angle-bracket-form [Unsafe]: unsafe.html [Unsized Types (`?Sized`)]: unsized-types.html#sized [Variable Bindings]: variable-bindings.html diff --git a/src/doc/book/testing.md b/src/doc/book/testing.md index 005184e90a7e9..ebeb9923197a9 100644 --- a/src/doc/book/testing.md +++ b/src/doc/book/testing.md @@ -23,7 +23,26 @@ $ cd adder Cargo will automatically generate a simple test when you make a new project. Here's the contents of `src/lib.rs`: -```rust +```rust,ignore +# // The next line exists to trick play.rust-lang.org into running our code as a +# // test: +# // fn main +# +#[cfg(test)] +mod tests { + #[test] + fn it_works() { + } +} +``` + +For now, let's remove the `mod` bit, and focus on just the function: + +```rust,ignore +# // The next line exists to trick play.rust-lang.org into running our code as a +# // test: +# // fn main +# #[test] fn it_works() { } @@ -35,8 +54,9 @@ currently has no body. That's good enough to pass! We can run the tests with ```bash $ cargo test - Compiling adder v0.0.1 (file:///home/you/projects/adder) - Running target/adder-91b3e234d4ed382a + Compiling adder v0.1.0 (file:///home/you/projects/adder) + Finished debug [unoptimized + debuginfo] target(s) in 0.15 secs + Running target/debug/deps/adder-941f01916ca4a642 running 1 test test it_works ... ok @@ -61,7 +81,9 @@ test it_works ... ok Note the `it_works`. This comes from the name of our function: ```rust +# fn main() { fn it_works() { +} # } ``` @@ -74,7 +96,11 @@ test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured So why does our do-nothing test pass? Any test which doesn't `panic!` passes, and any test that does `panic!` fails. Let's make our test fail: -```rust +```rust,ignore +# // The next line exists to trick play.rust-lang.org into running our code as a +# // test: +# // fn main +# #[test] fn it_works() { assert!(false); @@ -82,13 +108,14 @@ fn it_works() { ``` `assert!` is a macro provided by Rust which takes one argument: if the argument -is `true`, nothing happens. If the argument is `false`, it `panic!`s. Let's run -our tests again: +is `true`, nothing happens. If the argument is `false`, it will `panic!`. Let's +run our tests again: ```bash $ cargo test - Compiling adder v0.0.1 (file:///home/you/projects/adder) - Running target/adder-91b3e234d4ed382a + Compiling adder v0.1.0 (file:///home/you/projects/adder) + Finished debug [unoptimized + debuginfo] target(s) in 0.17 secs + Running target/debug/deps/adder-941f01916ca4a642 running 1 test test it_works ... FAILED @@ -96,8 +123,8 @@ test it_works ... FAILED failures: ---- it_works stdout ---- - thread 'it_works' panicked at 'assertion failed: false', /home/steve/tmp/adder/src/lib.rs:3 - + thread 'it_works' panicked at 'assertion failed: false', src/lib.rs:5 +note: Run with `RUST_BACKTRACE=1` for a backtrace. failures: @@ -105,7 +132,7 @@ failures: test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured -thread '
' panicked at 'Some tests failed', /home/steve/src/rust/src/libtest/lib.rs:247 +error: test failed ``` Rust indicates that our test failed: @@ -144,7 +171,11 @@ This is useful if you want to integrate `cargo test` into other tooling. We can invert our test's failure with another attribute: `should_panic`: -```rust +```rust,ignore +# // The next line exists to trick play.rust-lang.org into running our code as a +# // test: +# // fn main +# #[test] #[should_panic] fn it_works() { @@ -156,8 +187,9 @@ This test will now succeed if we `panic!` and fail if we complete. Let's try it: ```bash $ cargo test - Compiling adder v0.0.1 (file:///home/you/projects/adder) - Running target/adder-91b3e234d4ed382a + Compiling adder v0.1.0 (file:///home/you/projects/adder) + Finished debug [unoptimized + debuginfo] target(s) in 0.17 secs + Running target/debug/deps/adder-941f01916ca4a642 running 1 test test it_works ... ok @@ -174,7 +206,11 @@ test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured Rust provides another macro, `assert_eq!`, that compares two arguments for equality: -```rust +```rust,ignore +# // The next line exists to trick play.rust-lang.org into running our code as a +# // test: +# // fn main +# #[test] #[should_panic] fn it_works() { @@ -187,8 +223,9 @@ passes: ```bash $ cargo test - Compiling adder v0.0.1 (file:///home/you/projects/adder) - Running target/adder-91b3e234d4ed382a + Compiling adder v0.1.0 (file:///home/you/projects/adder) + Finished debug [unoptimized + debuginfo] target(s) in 0.21 secs + Running target/debug/deps/adder-941f01916ca4a642 running 1 test test it_works ... ok @@ -208,7 +245,11 @@ parameter can be added to the `should_panic` attribute. The test harness will make sure that the failure message contains the provided text. A safer version of the example above would be: -```rust +```rust,ignore +# // The next line exists to trick play.rust-lang.org into running our code as a +# // test: +# // fn main +# #[test] #[should_panic(expected = "assertion failed")] fn it_works() { @@ -219,6 +260,10 @@ fn it_works() { That's all there is to the basics! Let's write one 'real' test: ```rust,ignore +# // The next line exists to trick play.rust-lang.org into running our code as a +# // test: +# // fn main +# pub fn add_two(a: i32) -> i32 { a + 2 } @@ -237,7 +282,15 @@ some known arguments and compare it to the expected output. Sometimes a few specific tests can be very time-consuming to execute. These can be disabled by default by using the `ignore` attribute: -```rust +```rust,ignore +# // The next line exists to trick play.rust-lang.org into running our code as a +# // test: +# // fn main +# +pub fn add_two(a: i32) -> i32 { + a + 2 +} + #[test] fn it_works() { assert_eq!(4, add_two(2)); @@ -246,7 +299,7 @@ fn it_works() { #[test] #[ignore] fn expensive_test() { - // code that takes an hour to run + // Code that takes an hour to run... } ``` @@ -255,8 +308,9 @@ not: ```bash $ cargo test - Compiling adder v0.0.1 (file:///home/you/projects/adder) - Running target/adder-91b3e234d4ed382a + Compiling adder v0.1.0 (file:///home/you/projects/adder) + Finished debug [unoptimized + debuginfo] target(s) in 0.20 secs + Running target/debug/deps/adder-941f01916ca4a642 running 2 tests test expensive_test ... ignored @@ -275,7 +329,8 @@ The expensive tests can be run explicitly using `cargo test -- --ignored`: ```bash $ cargo test -- --ignored - Running target/adder-91b3e234d4ed382a + Finished debug [unoptimized + debuginfo] target(s) in 0.0 secs + Running target/debug/deps/adder-941f01916ca4a642 running 1 test test expensive_test ... ok @@ -295,10 +350,17 @@ which is why the command is `cargo test -- --ignored`. # The `tests` module There is one way in which our existing example is not idiomatic: it's -missing the `tests` module. The idiomatic way of writing our example -looks like this: +missing the `tests` module. You might have noticed this test module was +present in the code that was initially generated with `cargo new` but +was missing from our last example. Let's explain what this does. + +The idiomatic way of writing our example looks like this: ```rust,ignore +# // The next line exists to trick play.rust-lang.org into running our code as a +# // test: +# // fn main +# pub fn add_two(a: i32) -> i32 { a + 2 } @@ -327,6 +389,10 @@ a large module, and so this is a common use of globs. Let's change our `src/lib.rs` to make use of it: ```rust,ignore +# // The next line exists to trick play.rust-lang.org into running our code as a +# // test: +# // fn main +# pub fn add_two(a: i32) -> i32 { a + 2 } @@ -347,8 +413,8 @@ Note the different `use` line. Now we run our tests: ```bash $ cargo test Updating registry `https://github.com/rust-lang/crates.io-index` - Compiling adder v0.0.1 (file:///home/you/projects/adder) - Running target/adder-91b3e234d4ed382a + Compiling adder v0.1.0 (file:///home/you/projects/adder) + Running target/debug/deps/adder-91b3e234d4ed382a running 1 test test tests::it_works ... ok @@ -371,10 +437,17 @@ the `tests` directory. # The `tests` directory -To write an integration test, let's make a `tests` directory, and -put a `tests/lib.rs` file inside, with this as its contents: +Each file in `tests/*.rs` directory is treated as an individual crate. +To write an integration test, let's make a `tests` directory and +put a `tests/integration_test.rs` file inside with this as its contents: ```rust,ignore +# // The next line exists to trick play.rust-lang.org into running our code as a +# // test: +# // fn main +# +# // Sadly, this code will not work in play.rust-lang.org, because we have no +# // crate adder to import. You'll need to try this part on your own machine. extern crate adder; #[test] @@ -384,8 +457,8 @@ fn it_works() { ``` This looks similar to our previous tests, but slightly different. We now have -an `extern crate adder` at the top. This is because the tests in the `tests` -directory are an entirely separate crate, and so we need to import our library. +an `extern crate adder` at the top. This is because each test in the `tests` +directory is an entirely separate crate, and so we need to import our library. This is also why `tests` is a suitable place to write integration-style tests: they use the library like any other consumer of it would. @@ -393,15 +466,15 @@ Let's run them: ```bash $ cargo test - Compiling adder v0.0.1 (file:///home/you/projects/adder) - Running target/adder-91b3e234d4ed382a + Compiling adder v0.1.0 (file:///home/you/projects/adder) + Running target/debug/deps/adder-91b3e234d4ed382a running 1 test test tests::it_works ... ok test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured - Running target/lib-c18e7d3494509e74 + Running target/debug/integration_test-68064b69521c828a running 1 test test it_works ... ok @@ -418,6 +491,11 @@ test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured Now we have three sections: our previous test is also run, as well as our new one. +Cargo will ignore files in subdirectories of the `tests/` directory. +Therefore shared modules in integrations tests are possible. +For example `tests/common/mod.rs` is not separately compiled by cargo but can +be imported in every test with `mod common;` + That's all there is to the `tests` directory. The `tests` module isn't needed here, since the whole thing is focused on tests. @@ -432,6 +510,10 @@ running examples in your documentation (**note:** this only works in library crates, not binary crates). Here's a fleshed-out `src/lib.rs` with examples: ```rust,ignore +# // The next line exists to trick play.rust-lang.org into running our code as a +# // test: +# // fn main +# //! The `adder` crate provides functions that add numbers to other numbers. //! //! # Examples @@ -473,15 +555,15 @@ Let's run the tests again: ```bash $ cargo test - Compiling adder v0.0.1 (file:///home/steve/tmp/adder) - Running target/adder-91b3e234d4ed382a + Compiling adder v0.1.0. (file:///home/you/projects/adder) + Running target/debug/deps/adder-91b3e234d4ed382a running 1 test test tests::it_works ... ok test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured - Running target/lib-c18e7d3494509e74 + Running target/debug/integration_test-68064b69521c828a running 1 test test it_works ... ok @@ -505,6 +587,44 @@ you add more examples. We haven’t covered all of the details with writing documentation tests. For more, please see the [Documentation chapter](documentation.html). -One final note: documentation tests *cannot* be run on binary crates. -To see more on file arrangement see the [Crates and -Modules](crates-and-modules.html) section. +# Testing and concurrency + +One thing that is important to note when writing tests is that they may be run +concurrently using threads. For this reason you should take care that your tests +are written in such a way as to not depend on each-other, or on any shared +state. "Shared state" can also include the environment, such as the current +working directory, or environment variables. + +If this is an issue it is possible to control this concurrency, either by +setting the environment variable `RUST_TEST_THREADS`, or by passing the argument +`--test-threads` to the tests: + +```bash +$ RUST_TEST_THREADS=1 cargo test # Run tests with no concurrency +... +$ cargo test -- --test-threads=1 # Same as above +... +``` + +# Test output + +By default Rust's test library captures and discards output to standard +out/error, e.g. output from `println!()`. This too can be controlled using the +environment or a switch: + + +```bash +$ RUST_TEST_NOCAPTURE=1 cargo test # Preserve stdout/stderr +... +$ cargo test -- --nocapture # Same as above +... +``` + +However a better method avoiding capture is to use logging rather than raw +output. Rust has a [standard logging API][log], which provides a frontend to +multiple logging implementations. This can be used in conjunction with the +default [env_logger] to output any debugging information in a manner that can be +controlled at runtime. + +[log]: https://crates.io/crates/log +[env_logger]: https://crates.io/crates/env_logger diff --git a/src/doc/book/the-stack-and-the-heap.md b/src/doc/book/the-stack-and-the-heap.md index a7b6faccd8d6e..aee45299cf22d 100644 --- a/src/doc/book/the-stack-and-the-heap.md +++ b/src/doc/book/the-stack-and-the-heap.md @@ -26,6 +26,8 @@ The stack is very fast, and is where memory is allocated in Rust by default. But the allocation is local to a function call, and is limited in size. The heap, on the other hand, is slower, and is explicitly allocated by your program. But it’s effectively unlimited in size, and is globally accessible. +Note this meaning of heap, which allocates arbitrary-sized blocks of memory in arbitrary +order, is quite different from the heap data structure. # The Stack @@ -175,6 +177,7 @@ And then `bold()` calls `italic()`: | **2** | **b**|**100**| | **1** | **a**| **5** | | 0 | x | 42 | + Whew! Our stack is growing tall. After `italic()` is over, its frame is deallocated, leaving only `bold()` and @@ -260,8 +263,7 @@ layout of a program which has been running for a while now: | (230) - 3 | | | | (230) - 4 | | 42 | | ... | ... | ... | -| 3 | y | → (230) - 4 | -| 2 | y | 42 | +| 2 | z | → (230) - 4 | | 1 | y | 42 | | 0 | x | → (230) - 1 | diff --git a/src/doc/book/trait-objects.md b/src/doc/book/trait-objects.md index 1d63435ed5fe7..a0396a75fa26e 100644 --- a/src/doc/book/trait-objects.md +++ b/src/doc/book/trait-objects.md @@ -123,7 +123,6 @@ dispatch with trait objects by casting: # trait Foo { fn method(&self) -> String; } # impl Foo for u8 { fn method(&self) -> String { format!("u8: {}", *self) } } # impl Foo for String { fn method(&self) -> String { format!("string: {}", *self) } } - fn do_something(x: &Foo) { x.method(); } @@ -140,7 +139,6 @@ or by coercing: # trait Foo { fn method(&self) -> String; } # impl Foo for u8 { fn method(&self) -> String { format!("u8: {}", *self) } } # impl Foo for String { fn method(&self) -> String { format!("string: {}", *self) } } - fn do_something(x: &Foo) { x.method(); } @@ -223,8 +221,8 @@ struct FooVtable { // u8: fn call_method_on_u8(x: *const ()) -> String { - // the compiler guarantees that this function is only called - // with `x` pointing to a u8 + // The compiler guarantees that this function is only called + // with `x` pointing to a u8. let byte: &u8 = unsafe { &*(x as *const u8) }; byte.method() @@ -235,7 +233,7 @@ static Foo_for_u8_vtable: FooVtable = FooVtable { size: 1, align: 1, - // cast to a function pointer + // Cast to a function pointer: method: call_method_on_u8 as fn(*const ()) -> String, }; @@ -243,8 +241,8 @@ static Foo_for_u8_vtable: FooVtable = FooVtable { // String: fn call_method_on_String(x: *const ()) -> String { - // the compiler guarantees that this function is only called - // with `x` pointing to a String + // The compiler guarantees that this function is only called + // with `x` pointing to a String. let string: &String = unsafe { &*(x as *const String) }; string.method() @@ -252,7 +250,7 @@ fn call_method_on_String(x: *const ()) -> String { static Foo_for_String_vtable: FooVtable = FooVtable { destructor: /* compiler magic */, - // values for a 64-bit computer, halve them for 32-bit ones + // Values for a 64-bit computer, halve them for 32-bit ones. size: 24, align: 8, @@ -280,17 +278,17 @@ let x: u8 = 1; // let b: &Foo = &a; let b = TraitObject { - // store the data + // Store the data: data: &a, - // store the methods + // Store the methods: vtable: &Foo_for_String_vtable }; // let y: &Foo = x; let y = TraitObject { - // store the data + // Store the data: data: &x, - // store the methods + // Store the methods: vtable: &Foo_for_u8_vtable }; @@ -306,7 +304,7 @@ let y = TraitObject { Not every trait can be used to make a trait object. For example, vectors implement `Clone`, but if we try to make a trait object: -```ignore +```rust,ignore let v = vec![1, 2, 3]; let o = &v as &Clone; ``` diff --git a/src/doc/book/traits.md b/src/doc/book/traits.md index d40689190e7fe..4747869b65c70 100644 --- a/src/doc/book/traits.md +++ b/src/doc/book/traits.md @@ -47,6 +47,34 @@ As you can see, the `trait` block looks very similar to the `impl` block, but we don’t define a body, only a type signature. When we `impl` a trait, we use `impl Trait for Item`, rather than only `impl Item`. +`Self` may be used in a type annotation to refer to an instance of the type +implementing this trait passed as a parameter. `Self`, `&Self` or `&mut Self` +may be used depending on the level of ownership required. + +```rust +struct Circle { + x: f64, + y: f64, + radius: f64, +} + +trait HasArea { + fn area(&self) -> f64; + + fn is_larger(&self, &Self) -> bool; +} + +impl HasArea for Circle { + fn area(&self) -> f64 { + std::f64::consts::PI * (self.radius * self.radius) + } + + fn is_larger(&self, other: &Self) -> bool { + self.area() > other.area() + } +} +``` + ## Trait bounds on generic functions Traits are useful because they allow a type to make certain promises about its @@ -154,7 +182,7 @@ print_area(5); We get a compile-time error: ```text -error: the trait `HasArea` is not implemented for the type `_` [E0277] +error: the trait bound `_ : HasArea` is not satisfied [E0277] ``` ## Trait bounds on generic structs @@ -195,7 +223,7 @@ fn main() { `is_square()` needs to check that the sides are equal, so the sides must be of a type that implements the [`core::cmp::PartialEq`][PartialEq] trait: -```ignore +```rust,ignore impl Rectangle { ... } ``` @@ -215,28 +243,22 @@ to know more about [operator traits][operators-and-overloading]. # Rules for implementing traits So far, we’ve only added trait implementations to structs, but you can -implement a trait for any type. So technically, we _could_ implement `HasArea` -for `i32`: +implement a trait for any type such as `f32`: ```rust -trait HasArea { - fn area(&self) -> f64; +trait ApproxEqual { + fn approx_equal(&self, other: &Self) -> bool; } - -impl HasArea for i32 { - fn area(&self) -> f64 { - println!("this is silly"); - - *self as f64 +impl ApproxEqual for f32 { + fn approx_equal(&self, other: &Self) -> bool { + // Appropriate for `self` and `other` being close to 1.0. + (self - other).abs() <= ::std::f32::EPSILON } } -5.area(); +println!("{}", 1.0.approx_equal(&1.00000001)); ``` -It is considered poor style to implement methods on such primitive types, even -though it is possible. - This may seem like the Wild West, but there are two restrictions around implementing traits that prevent this from getting out of hand. The first is that if the trait isn’t defined in your scope, it doesn’t apply. Here’s an @@ -247,10 +269,10 @@ won’t have its methods: [write]: ../std/io/trait.Write.html ```rust,ignore -let mut f = std::fs::File::open("foo.txt").expect("Couldn’t open foo.txt"); -let buf = b"whatever"; // byte string literal. buf: &[u8; 8] +let mut f = std::fs::File::create("foo.txt").expect("Couldn’t create foo.txt"); +let buf = b"whatever"; // buf: &[u8; 8], a byte string literal. let result = f.write(buf); -# result.unwrap(); // ignore the error +# result.unwrap(); // Ignore the error. ``` Here’s the error: @@ -263,13 +285,13 @@ let result = f.write(buf); We need to `use` the `Write` trait first: -```rust,ignore +```rust,no_run use std::io::Write; -let mut f = std::fs::File::open("foo.txt").expect("Couldn’t open foo.txt"); +let mut f = std::fs::File::create("foo.txt").expect("Couldn’t create foo.txt"); let buf = b"whatever"; let result = f.write(buf); -# result.unwrap(); // ignore the error +# result.unwrap(); // Ignore the error. ``` This will compile without error. @@ -277,16 +299,22 @@ This will compile without error. This means that even if someone does something bad like add methods to `i32`, it won’t affect you, unless you `use` that trait. -There’s one more restriction on implementing traits: either the trait, or the -type you’re writing the `impl` for, must be defined by you. So, we could -implement the `HasArea` type for `i32`, because `HasArea` is in our code. But -if we tried to implement `ToString`, a trait provided by Rust, for `i32`, we could -not, because neither the trait nor the type are in our code. +There’s one more restriction on implementing traits: either the trait +or the type you’re implementing it for must be defined by you. Or more +precisely, one of them must be defined in the same crate as the `impl` +you're writing. For more on Rust's module and package system, see the +chapter on [crates and modules][cm]. + +So, we could implement the `HasArea` type for `i32`, because we defined +`HasArea` in our code. But if we tried to implement `ToString`, a trait +provided by Rust, for `i32`, we could not, because neither the trait nor +the type are defined in our crate. One last thing about traits: generic functions with a trait bound use ‘monomorphization’ (mono: one, morph: form), so they are statically dispatched. What’s that mean? Check out the chapter on [trait objects][to] for more details. +[cm]: crates-and-modules.html [to]: trait-objects.html # Multiple trait bounds @@ -385,16 +413,16 @@ impl ConvertTo for i32 { fn convert(&self) -> i64 { *self as i64 } } -// can be called with T == i32 +// Can be called with T == i32. fn normal>(x: &T) -> i64 { x.convert() } -// can be called with T == i64 -fn inverse() -> T - // this is using ConvertTo as if it were "ConvertTo" +// Can be called with T == i64. +fn inverse(x: i32) -> T + // This is using ConvertTo as if it were "ConvertTo". where i32: ConvertTo { - 42.convert() + x.convert() } ``` @@ -442,15 +470,15 @@ impl Foo for OverrideDefault { fn is_invalid(&self) -> bool { println!("Called OverrideDefault.is_invalid!"); - true // overrides the expected value of is_invalid() + true // Overrides the expected value of `is_invalid()`. } } let default = UseDefault; -assert!(!default.is_invalid()); // prints "Called UseDefault.is_valid." +assert!(!default.is_invalid()); // Prints "Called UseDefault.is_valid." let over = OverrideDefault; -assert!(over.is_invalid()); // prints "Called OverrideDefault.is_invalid!" +assert!(over.is_invalid()); // Prints "Called OverrideDefault.is_invalid!" ``` # Inheritance @@ -490,7 +518,7 @@ impl FooBar for Baz { If we forget to implement `Foo`, Rust will tell us: ```text -error: the trait `main::Foo` is not implemented for the type `main::Baz` [E0277] +error: the trait bound `main::Baz : main::Foo` is not satisfied [E0277] ``` # Deriving diff --git a/src/doc/book/type-aliases.md b/src/doc/book/type-aliases.md index def2e31f3514b..3798336f0a524 100644 --- a/src/doc/book/type-aliases.md +++ b/src/doc/book/type-aliases.md @@ -1,4 +1,4 @@ -% `type` Aliases +% Type Aliases The `type` keyword lets you declare an alias of another type: diff --git a/src/doc/book/unsafe.md b/src/doc/book/unsafe.md index ecd196a9f0d1f..a272afa70bb1b 100644 --- a/src/doc/book/unsafe.md +++ b/src/doc/book/unsafe.md @@ -4,7 +4,7 @@ Rust’s main draw is its powerful static guarantees about behavior. But safety checks are conservative by nature: there are some programs that are actually safe, but the compiler is not able to verify this is true. To write these kinds of programs, we need to tell the compiler to relax its restrictions a bit. For -this, Rust has a keyword, `unsafe`. Code using `unsafe` has less restrictions +this, Rust has a keyword, `unsafe`. Code using `unsafe` has fewer restrictions than normal code does. Let’s go over the syntax, and then we’ll talk semantics. `unsafe` is used in @@ -12,7 +12,7 @@ four contexts. The first one is to mark a function as unsafe: ```rust unsafe fn danger_will_robinson() { - // scary stuff + // Scary stuff... } ``` @@ -23,7 +23,7 @@ The second use of `unsafe` is an unsafe block: ```rust unsafe { - // scary stuff + // Scary stuff... } ``` @@ -63,7 +63,7 @@ In addition, the following are all undefined behaviors in Rust, and must be avoided, even when writing `unsafe` code: * Data races -* Dereferencing a null/dangling raw pointer +* Dereferencing a NULL/dangling raw pointer * Reads of [undef][undef] (uninitialized) memory * Breaking the [pointer aliasing rules][aliasing] with raw pointers. * `&mut T` and `&T` follow LLVM’s scoped [noalias][noalias] model, except if @@ -77,7 +77,7 @@ avoided, even when writing `unsafe` code: * Using `std::ptr::copy_nonoverlapping_memory` (`memcpy32`/`memcpy64` intrinsics) on overlapping buffers * Invalid values in primitive types, even in private fields/locals: - * Null/dangling references or boxes + * NULL/dangling references or boxes * A value other than `false` (0) or `true` (1) in a `bool` * A discriminant in an `enum` not included in its type definition * A value in a `char` which is a surrogate or above `char::MAX` diff --git a/src/doc/book/unsized-types.md b/src/doc/book/unsized-types.md index 73b90355e4f1b..a23470d39fa09 100644 --- a/src/doc/book/unsized-types.md +++ b/src/doc/book/unsized-types.md @@ -47,7 +47,7 @@ pointers, can use this `impl`. # ?Sized If you want to write a function that accepts a dynamically sized type, you -can use the special bound, `?Sized`: +can use the special bound syntax, `?Sized`: ```rust struct Foo { @@ -55,6 +55,7 @@ struct Foo { } ``` -This `?`, read as “T may be `Sized`”, means that this bound is special: it -lets us match more kinds, not less. It’s almost like every `T` implicitly has -`T: Sized`, and the `?` undoes this default. +This `?Sized`, read as “T may or may not be `Sized`”, which allows us to match +both sized and unsized types. All generic type parameters implicitly +have the `Sized` bound, so the `?Sized` can be used to opt-out of the implicit +bound. diff --git a/src/doc/book/using-rust-without-the-standard-library.md b/src/doc/book/using-rust-without-the-standard-library.md index 59182e1a4efce..69958dd3e68a4 100644 --- a/src/doc/book/using-rust-without-the-standard-library.md +++ b/src/doc/book/using-rust-without-the-standard-library.md @@ -11,7 +11,7 @@ don’t want to use the standard library via an attribute: `#![no_std]`. > For details on binaries without the standard library, see [the nightly > chapter on `#![no_std]`](no-stdlib.html) -To use `#![no_std]`, add a it to your crate root: +To use `#![no_std]`, add it to your crate root: ```rust #![no_std] @@ -22,11 +22,12 @@ fn plus_one(x: i32) -> i32 { ``` Much of the functionality that’s exposed in the standard library is also -available via the [`core` crate](../core/). When we’re using the standard -library, Rust automatically brings `std` into scope, allowing you to use -its features without an explicit import. By the same token, when using -`!#[no_std]`, Rust will bring `core` into scope for you, as well as [its -prelude](../core/prelude/v1/). This means that a lot of code will Just Work: +available via the [`core` crate](../core/index.html). When we’re using the +standard library, Rust automatically brings `std` into scope, allowing you to +use its features without an explicit import. By the same token, when using +`#![no_std]`, Rust will bring `core` into scope for you, as well as [its +prelude](../core/prelude/v1/index.html). This means that a lot of code will Just +Work: ```rust #![no_std] diff --git a/src/doc/book/variable-bindings.md b/src/doc/book/variable-bindings.md index 29b59937a63fa..37b6c0513fc96 100644 --- a/src/doc/book/variable-bindings.md +++ b/src/doc/book/variable-bindings.md @@ -18,14 +18,14 @@ function, rather than leaving it off. Otherwise, you’ll get an error. In many languages, a variable binding would be called a *variable*, but Rust’s variable bindings have a few tricks up their sleeves. For example the -left-hand side of a `let` expression is a ‘[pattern][pattern]’, not a +left-hand side of a `let` statement is a ‘[pattern][pattern]’, not a variable name. This means we can do things like: ```rust let (x, y) = (1, 2); ``` -After this expression is evaluated, `x` will be one, and `y` will be two. +After this statement is evaluated, `x` will be one, and `y` will be two. Patterns are really powerful, and have [their own section][pattern] in the book. We don’t need those features for now, so we’ll keep this in the back of our minds as we go forward. @@ -37,8 +37,8 @@ of our minds as we go forward. Rust is a statically typed language, which means that we specify our types up front, and they’re checked at compile time. So why does our first example compile? Well, Rust has this thing called ‘type inference’. If it can figure -out what the type of something is, Rust doesn’t require you to actually type it -out. +out what the type of something is, Rust doesn’t require you to explicitly type +it out. We can add the type if we want to, though. Types come after a colon (`:`): @@ -47,7 +47,7 @@ let x: i32 = 5; ``` If I asked you to read this out loud to the rest of the class, you’d say “`x` -is a binding with the type `i32` and the value `five`.” +is a binding with the type `i32` and the value `5`.” In this case we chose to represent `x` as a 32-bit signed integer. Rust has many different primitive integer types. They begin with `i` for signed integers @@ -125,7 +125,7 @@ warning, but it will still print "Hello, world!": ```text Compiling hello_world v0.0.1 (file:///home/you/projects/hello_world) -src/main.rs:2:9: 2:10 warning: unused variable: `x`, #[warn(unused_variable)] +src/main.rs:2:9: 2:10 warning: unused variable: `x`, #[warn(unused_variables)] on by default src/main.rs:2 let x: i32; ^ @@ -159,8 +159,9 @@ error: aborting due to previous error Could not compile `hello_world`. ``` -Rust will not let us use a value that has not been initialized. Next, let’s -talk about this stuff we've added to `println!`. +Rust will not let us use a value that has not been initialized. + +Let us take a minute to talk about this stuff we've added to `println!`. If you include two curly braces (`{}`, some call them moustaches...) in your string to print, Rust will interpret this as a request to interpolate some sort @@ -193,7 +194,7 @@ fn main() { let y: i32 = 3; println!("The value of x is {} and value of y is {}", x, y); } - println!("The value of x is {} and value of y is {}", x, y); // This won't work + println!("The value of x is {} and value of y is {}", x, y); // This won't work. } ``` @@ -206,7 +207,7 @@ Instead we get this error: $ cargo build Compiling hello v0.1.0 (file:///home/you/projects/hello_world) main.rs:7:62: 7:63 error: unresolved name `y`. Did you mean `x`? [E0425] -main.rs:7 println!("The value of x is {} and value of y is {}", x, y); // This won't work +main.rs:7 println!("The value of x is {} and value of y is {}", x, y); // This won't work. ^ note: in expansion of format_args! :2:25: 2:56 note: expansion site @@ -222,31 +223,34 @@ To learn more, run the command again with --verbose. ``` Additionally, variable bindings can be shadowed. This means that a later -variable binding with the same name as another binding, that's currently in -scope, will override the previous binding. +variable binding with the same name as another binding that is currently in +scope will override the previous binding. ```rust let x: i32 = 8; { - println!("{}", x); // Prints "8" + println!("{}", x); // Prints "8". let x = 12; - println!("{}", x); // Prints "12" + println!("{}", x); // Prints "12". } -println!("{}", x); // Prints "8" +println!("{}", x); // Prints "8". let x = 42; -println!("{}", x); // Prints "42" +println!("{}", x); // Prints "42". ``` Shadowing and mutable bindings may appear as two sides of the same coin, but they are two distinct concepts that can't always be used interchangeably. For one, shadowing enables us to rebind a name to a value of a different type. It -is also possible to change the mutability of a binding. +is also possible to change the mutability of a binding. Note that shadowing a +name does not alter or destroy the value it was bound to, and the value will +continue to exist until it goes out of scope, even if it is no longer accessible +by any means. ```rust let mut x: i32 = 1; x = 7; -let x = x; // x is now immutable and is bound to 7 +let x = x; // `x` is now immutable and is bound to `7`. let y = 4; -let y = "I can also be bound to text!"; // y is now of a different type +let y = "I can also be bound to text!"; // `y` is now of a different type. ``` diff --git a/src/doc/book/vectors.md b/src/doc/book/vectors.md index b09735c3feee6..b948a54f44a5b 100644 --- a/src/doc/book/vectors.md +++ b/src/doc/book/vectors.md @@ -11,15 +11,21 @@ let v = vec![1, 2, 3, 4, 5]; // v: Vec ``` (Notice that unlike the `println!` macro we’ve used in the past, we use square -brackets `[]` with `vec!` macro. Rust allows you to use either in either situation, -this is just convention.) +brackets `[]` with `vec!` macro. Rust allows you to use either in either +situation, this is just convention.) There’s an alternate form of `vec!` for repeating an initial value: ```rust -let v = vec![0; 10]; // ten zeroes +let v = vec![0; 10]; // A vector of ten zeroes. ``` +Vectors store their contents as contiguous arrays of `T` on the heap. This means +that they must be able to know the size of `T` at compile time (that is, how +many bytes are needed to store a `T`?). The size of some things can't be known +at compile time. For these you'll have to store a pointer to that thing: +thankfully, the [`Box`][box] type works perfectly for this. + ## Accessing elements To get the value at a particular index in the vector, we use `[]`s: @@ -34,24 +40,24 @@ The indices count from `0`, so the third element is `v[2]`. It’s also important to note that you must index with the `usize` type: -```ignore +```rust,ignore let v = vec![1, 2, 3, 4, 5]; let i: usize = 0; let j: i32 = 0; -// works +// Works: v[i]; -// doesn’t +// Doesn’t: v[j]; ``` Indexing with a non-`usize` type gives an error that looks like this: ```text -error: the trait `core::ops::Index` is not implemented for the type -`collections::vec::Vec<_>` [E0277] +error: the trait bound `collections::vec::Vec<_> : core::ops::Index` +is not satisfied [E0277] v[j]; ^~~~ note: the type `collections::vec::Vec<_>` cannot be indexed by `i32` @@ -65,7 +71,7 @@ you cannot index with an `i32`. If you try to access an index that doesn’t exist: -```ignore +```rust,ignore let v = vec![1, 2, 3]; println!("Item 7 is {}", v[7]); ``` @@ -73,7 +79,7 @@ println!("Item 7 is {}", v[7]); then the current thread will [panic] with a message like this: ```text -thread '
' panicked at 'index out of bounds: the len is 3 but the index is 7' +thread 'main' panicked at 'index out of bounds: the len is 3 but the index is 7' ``` If you want to handle out-of-bounds errors without panicking, you can use @@ -109,11 +115,42 @@ for i in v { } ``` +Note: You cannot use the vector again once you have iterated by taking ownership of the vector. +You can iterate the vector multiple times by taking a reference to the vector whilst iterating. +For example, the following code does not compile. + +```rust,ignore +let v = vec![1, 2, 3, 4, 5]; + +for i in v { + println!("Take ownership of the vector and its element {}", i); +} + +for i in v { + println!("Take ownership of the vector and its element {}", i); +} +``` + +Whereas the following works perfectly, + +```rust +let v = vec![1, 2, 3, 4, 5]; + +for i in &v { + println!("This is a reference to {}", i); +} + +for i in &v { + println!("This is a reference to {}", i); +} +``` + Vectors have many more useful methods, which you can read about in [their API documentation][vec]. [vec]: ../std/vec/index.html +[box]: ../std/boxed/index.html [generic]: generics.html [panic]: concurrency.html#panics -[get]: http://doc.rust-lang.org/std/vec/struct.Vec.html#method.get -[get_mut]: http://doc.rust-lang.org/std/vec/struct.Vec.html#method.get_mut +[get]: ../std/vec/struct.Vec.html#method.get +[get_mut]: ../std/vec/struct.Vec.html#method.get_mut diff --git a/src/doc/footer.inc b/src/doc/footer.inc index b5eb589eb5398..77e151235e822 100644 --- a/src/doc/footer.inc +++ b/src/doc/footer.inc @@ -1,8 +1,7 @@ - diff --git a/src/doc/grammar.md b/src/doc/grammar.md index a400e70e97dfd..690d44cc2cb7b 100644 --- a/src/doc/grammar.md +++ b/src/doc/grammar.md @@ -172,6 +172,11 @@ token : simple_token | ident | literal | symbol | whitespace token ; Each of these keywords has special meaning in its grammar, and all of them are excluded from the `ident` rule. +Not all of these keywords are used by the language. Some of them were used +before Rust 1.0, and were left reserved once their implementations were +removed. Some of them were reserved before 1.0 to make space for possible +future features. + ### Literals ```antlr @@ -516,7 +521,7 @@ struct_expr : expr_path '{' ident ':' expr ### Block expressions ```antlr -block_expr : '{' [ stmt ';' | item ] * +block_expr : '{' [ stmt | item ] * [ expr ] '}' ; ``` @@ -759,6 +764,13 @@ bound-list := bound | bound '+' bound-list bound := path | lifetime ``` +### Never type +An empty type + +```antlr +never_type : "!" ; +``` + ### Object types **FIXME:** grammar? diff --git a/src/doc/guide-plugins.md b/src/doc/guide-plugins.md index d6495d02e1189..742433b99ac5f 100644 --- a/src/doc/guide-plugins.md +++ b/src/doc/guide-plugins.md @@ -1,4 +1,4 @@ % The (old) Rust Compiler Plugins Guide This content has moved into -[the Rust Programming Language book](book/plugins.html). +[the Rust Programming Language book](book/compiler-plugins.html). diff --git a/src/doc/index.md b/src/doc/index.md index f8a1ec134d924..71dfcf0b067ec 100644 --- a/src/doc/index.md +++ b/src/doc/index.md @@ -17,7 +17,7 @@ the language. [**The Rust Reference**][ref]. While Rust does not have a specification, the reference tries to describe its working in -detail. It tends to be out of date. +detail. It is accurate, but not necessarily complete. [**Standard Library API Reference**][api]. Documentation for the standard library. diff --git a/src/doc/nomicon/README.md b/src/doc/nomicon/README.md index 4554652a17a2a..b2e1eac5e0dcc 100644 --- a/src/doc/nomicon/README.md +++ b/src/doc/nomicon/README.md @@ -35,4 +35,4 @@ exception-safety, pointer aliasing, memory models, and even some type-theory. We will also be spending a lot of time talking about the different kinds of safety and guarantees. -[trpl]: ../book/ +[trpl]: ../book/index.html diff --git a/src/doc/nomicon/coercions.md b/src/doc/nomicon/coercions.md index 1d2897ce3bd1f..6a9ebd6edf8fb 100644 --- a/src/doc/nomicon/coercions.md +++ b/src/doc/nomicon/coercions.md @@ -64,7 +64,7 @@ fn main() { ``` ```text -:10:5: 10:8 error: the trait `Trait` is not implemented for the type `&mut i32` [E0277] +:10:5: 10:8 error: the trait bound `&mut i32 : Trait` is not satisfied [E0277] :10 foo(t); ^~~ ``` diff --git a/src/doc/nomicon/lifetime-elision.md b/src/doc/nomicon/lifetime-elision.md index 41014f46dd953..bcd93a58d859a 100644 --- a/src/doc/nomicon/lifetime-elision.md +++ b/src/doc/nomicon/lifetime-elision.md @@ -55,8 +55,8 @@ fn frob(s: &str, t: &str) -> &str; // ILLEGAL fn get_mut(&mut self) -> &mut T; // elided fn get_mut<'a>(&'a mut self) -> &'a mut T; // expanded -fn args(&mut self, args: &[T]) -> &mut Command // elided -fn args<'a, 'b, T:ToCStr>(&'a mut self, args: &'b [T]) -> &'a mut Command // expanded +fn args(&mut self, args: &[T]) -> &mut Command // elided +fn args<'a, 'b, T: ToCStr>(&'a mut self, args: &'b [T]) -> &'a mut Command // expanded fn new(buf: &mut [u8]) -> BufWriter; // elided fn new<'a>(buf: &'a mut [u8]) -> BufWriter<'a> // expanded diff --git a/src/doc/nomicon/other-reprs.md b/src/doc/nomicon/other-reprs.md index 2639c1d4d6f76..b124f3ffc4652 100644 --- a/src/doc/nomicon/other-reprs.md +++ b/src/doc/nomicon/other-reprs.md @@ -57,7 +57,7 @@ These reprs have no effect on a struct. # repr(packed) -`repr(packed)` forces rust to strip any padding, and only align the type to a +`repr(packed)` forces Rust to strip any padding, and only align the type to a byte. This may improve the memory footprint, but will likely have other negative side-effects. diff --git a/src/doc/nomicon/ownership.md b/src/doc/nomicon/ownership.md index 6be8d3b70286a..a6ecf6ab91b4f 100644 --- a/src/doc/nomicon/ownership.md +++ b/src/doc/nomicon/ownership.md @@ -52,7 +52,7 @@ let mut data = vec![1, 2, 3]; let x = &data[0]; // OH NO! `push` causes the backing storage of `data` to be reallocated. -// Dangling pointer! User after free! Alas! +// Dangling pointer! Use after free! Alas! // (this does not compile in Rust) data.push(4); diff --git a/src/doc/nomicon/phantom-data.md b/src/doc/nomicon/phantom-data.md index 0d7ec7f161796..189695716deb1 100644 --- a/src/doc/nomicon/phantom-data.md +++ b/src/doc/nomicon/phantom-data.md @@ -50,13 +50,13 @@ struct Vec { } ``` -Unlike the previous example it *appears* that everything is exactly as we -want. Every generic argument to Vec shows up in the at least one field. +Unlike the previous example, it *appears* that everything is exactly as we +want. Every generic argument to Vec shows up in at least one field. Good to go! Nope. -The drop checker will generously determine that Vec does not own any values +The drop checker will generously determine that `Vec` does not own any values of type T. This will in turn make it conclude that it doesn't need to worry about Vec dropping any T's in its destructor for determining drop check soundness. This will in turn allow people to create unsoundness using @@ -81,7 +81,6 @@ Raw pointers that own an allocation is such a pervasive pattern that the standard library made a utility for itself called `Unique` which: * wraps a `*const T` for variance -* includes a `PhantomData`, +* includes a `PhantomData` * auto-derives Send/Sync as if T was contained * marks the pointer as NonZero for the null-pointer optimization - diff --git a/src/doc/nomicon/safe-unsafe-meaning.md b/src/doc/nomicon/safe-unsafe-meaning.md index 5fd61eb51dd1c..adede0ec91117 100644 --- a/src/doc/nomicon/safe-unsafe-meaning.md +++ b/src/doc/nomicon/safe-unsafe-meaning.md @@ -1,150 +1,123 @@ % How Safe and Unsafe Interact -So what's the relationship between Safe and Unsafe Rust? How do they interact? - -Rust models the separation between Safe and Unsafe Rust with the `unsafe` -keyword, which can be thought as a sort of *foreign function interface* (FFI) -between Safe and Unsafe Rust. This is the magic behind why we can say Safe Rust -is a safe language: all the scary unsafe bits are relegated exclusively to FFI -*just like every other safe language*. - -However because one language is a subset of the other, the two can be cleanly -intermixed as long as the boundary between Safe and Unsafe Rust is denoted with -the `unsafe` keyword. No need to write headers, initialize runtimes, or any of -that other FFI boiler-plate. - -There are several places `unsafe` can appear in Rust today, which can largely be -grouped into two categories: - -* There are unchecked contracts here. To declare you understand this, I require -you to write `unsafe` elsewhere: - * On functions, `unsafe` is declaring the function to be unsafe to call. - Users of the function must check the documentation to determine what this - means, and then have to write `unsafe` somewhere to identify that they're - aware of the danger. - * On trait declarations, `unsafe` is declaring that *implementing* the trait - is an unsafe operation, as it has contracts that other unsafe code is free - to trust blindly. (More on this below.) - -* I am declaring that I have, to the best of my knowledge, adhered to the -unchecked contracts: - * On trait implementations, `unsafe` is declaring that the contract of the - `unsafe` trait has been upheld. - * On blocks, `unsafe` is declaring any unsafety from an unsafe - operation within to be handled, and therefore the parent function is safe. - -There is also `#[unsafe_no_drop_flag]`, which is a special case that exists for -historical reasons and is in the process of being phased out. See the section on -[drop flags] for details. - -Some examples of unsafe functions: - -* `slice::get_unchecked` will perform unchecked indexing, allowing memory - safety to be freely violated. -* every raw pointer to sized type has intrinsic `offset` method that invokes - Undefined Behavior if it is not "in bounds" as defined by LLVM. -* `mem::transmute` reinterprets some value as having the given type, - bypassing type safety in arbitrary ways. (see [conversions] for details) -* All FFI functions are `unsafe` because they can do arbitrary things. - C being an obvious culprit, but generally any language can do something - that Rust isn't happy about. +What's the relationship between Safe Rust and Unsafe Rust? How do they +interact? + +The separation between Safe Rust and Unsafe Rust is controlled with the +`unsafe` keyword, which acts as an interface from one to the other. This is +why we can say Safe Rust is a safe language: all the unsafe parts are kept +exclusively behind the boundary. + +The `unsafe` keyword has two uses: to declare the existence of contracts the +compiler can't check, and to declare that the adherence of some code to +those contracts has been checked by the programmer. + +You can use `unsafe` to indicate the existence of unchecked contracts on +_functions_ and on _trait declarations_. On functions, `unsafe` means that +users of the function must check that function's documentation to ensure +they are using it in a way that maintains the contracts the function +requires. On trait declarations, `unsafe` means that implementors of the +trait must check the trait documentation to ensure their implementation +maintains the contracts the trait requires. + +You can use `unsafe` on a block to declare that all constraints required +by an unsafe function within the block have been adhered to, and the code +can therefore be trusted. You can use `unsafe` on a trait implementation +to declare that the implementation of that trait has adhered to whatever +contracts the trait's documentation requires. + +The standard library has a number of unsafe functions, including: + +* `slice::get_unchecked`, which performs unchecked indexing, allowing + memory safety to be freely violated. +* `mem::transmute` reinterprets some value as having a given type, bypassing + type safety in arbitrary ways (see [conversions] for details). +* Every raw pointer to a sized type has an intrinstic `offset` method that + invokes Undefined Behavior if the passed offset is not "in bounds" as + defined by LLVM. +* All FFI functions are `unsafe` because the other language can do arbitrary + operations that the Rust compiler can't check. As of Rust 1.0 there are exactly two unsafe traits: -* `Send` is a marker trait (it has no actual API) that promises implementors - are safe to send (move) to another thread. -* `Sync` is a marker trait that promises that threads can safely share - implementors through a shared reference. - -The need for unsafe traits boils down to the fundamental property of safe code: - -**No matter how completely awful Safe code is, it can't cause Undefined -Behavior.** - -This means that Unsafe Rust, **the royal vanguard of Undefined Behavior**, has to be -*super paranoid* about generic safe code. To be clear, Unsafe Rust is totally free to trust -specific safe code. Anything else would degenerate into infinite spirals of -paranoid despair. In particular it's generally regarded as ok to trust the standard library -to be correct. `std` is effectively an extension of the language, and you -really just have to trust the language. If `std` fails to uphold the -guarantees it declares, then it's basically a language bug. - -That said, it would be best to minimize *needlessly* relying on properties of -concrete safe code. Bugs happen! Of course, I must reinforce that this is only -a concern for Unsafe code. Safe code can blindly trust anyone and everyone -as far as basic memory-safety is concerned. - -On the other hand, safe traits are free to declare arbitrary contracts, but because -implementing them is safe, unsafe code can't trust those contracts to actually -be upheld. This is different from the concrete case because *anyone* can -randomly implement the interface. There is something fundamentally different -about trusting a particular piece of code to be correct, and trusting *all the -code that will ever be written* to be correct. - -For instance Rust has `PartialOrd` and `Ord` traits to try to differentiate -between types which can "just" be compared, and those that actually implement a -total ordering. Pretty much every API that wants to work with data that can be -compared wants Ord data. For instance, a sorted map like BTreeMap -*doesn't even make sense* for partially ordered types. If you claim to implement -Ord for a type, but don't actually provide a proper total ordering, BTreeMap will -get *really confused* and start making a total mess of itself. Data that is -inserted may be impossible to find! - -But that's okay. BTreeMap is safe, so it guarantees that even if you give it a -completely garbage Ord implementation, it will still do something *safe*. You -won't start reading uninitialized or unallocated memory. In fact, BTreeMap -manages to not actually lose any of your data. When the map is dropped, all the -destructors will be successfully called! Hooray! - -However BTreeMap is implemented using a modest spoonful of Unsafe Rust (most collections -are). That means that it's not necessarily *trivially true* that a bad Ord -implementation will make BTreeMap behave safely. BTreeMap must be sure not to rely -on Ord *where safety is at stake*. Ord is provided by safe code, and safety is not -safe code's responsibility to uphold. - -But wouldn't it be grand if there was some way for Unsafe to trust some trait -contracts *somewhere*? This is the problem that unsafe traits tackle: by marking -*the trait itself* as unsafe to implement, unsafe code can trust the implementation -to uphold the trait's contract. Although the trait implementation may be -incorrect in arbitrary other ways. - -For instance, given a hypothetical UnsafeOrd trait, this is technically a valid -implementation: +* `Send` is a marker trait (a trait with no API) that promises implementors are + safe to send (move) to another thread. +* `Sync` is a marker trait that promises threads can safely share implementors + through a shared reference. + +Much of the Rust standard library also uses Unsafe Rust internally, although +these implementations are rigorously manually checked, and the Safe Rust +interfaces provided on top of these implementations can be assumed to be safe. + +The need for all of this separation boils down a single fundamental property +of Safe Rust: + +**No matter what, Safe Rust can't cause Undefined Behavior.** + +The design of the safe/unsafe split means that Safe Rust inherently has to +trust that any Unsafe Rust it touches has been written correctly (meaning +the Unsafe Rust actually maintains whatever contracts it is supposed to +maintain). On the other hand, Unsafe Rust has to be very careful about +trusting Safe Rust. + +As an example, Rust has the `PartialOrd` and `Ord` traits to differentiate +between types which can "just" be compared, and those that provide a total +ordering (where every value of the type is either equal to, greater than, +or less than any other value of the same type). The sorted map type +`BTreeMap` doesn't make sense for partially-ordered types, and so it +requires that any key type for it implements the `Ord` trait. However, +`BTreeMap` has Unsafe Rust code inside of its implementation, and this +Unsafe Rust code cannot assume that any `Ord` implementation it gets makes +sense. The unsafe portions of `BTreeMap`'s internals have to be careful to +maintain all necessary contracts, even if a key type's `Ord` implementation +does not implement a total ordering. + +Unsafe Rust cannot automatically trust Safe Rust. When writing Unsafe Rust, +you must be careful to only rely on specific Safe Rust code, and not make +assumptions about potential future Safe Rust code providing the same +guarantees. + +This is the problem that `unsafe` traits exist to resolve. The `BTreeMap` +type could theoretically require that keys implement a new trait called +`UnsafeOrd`, rather than `Ord`, that might look like this: ```rust -# use std::cmp::Ordering; -# struct MyType; -# unsafe trait UnsafeOrd { fn cmp(&self, other: &Self) -> Ordering; } -unsafe impl UnsafeOrd for MyType { - fn cmp(&self, other: &Self) -> Ordering { - Ordering::Equal - } +use std::cmp::Ordering; + +unsafe trait UnsafeOrd { + fn cmp(&self, other: &Self) -> Ordering; } ``` -But it's probably not the implementation you want. - -Rust has traditionally avoided making traits unsafe because it makes Unsafe -pervasive, which is not desirable. The reason Send and Sync are unsafe is because thread -safety is a *fundamental property* that unsafe code cannot possibly hope to defend -against in the same way it would defend against a bad Ord implementation. The -only way to possibly defend against thread-unsafety would be to *not use -threading at all*. Making every load and store atomic isn't even sufficient, -because it's possible for complex invariants to exist between disjoint locations -in memory. For instance, the pointer and capacity of a Vec must be in sync. - -Even concurrent paradigms that are traditionally regarded as Totally Safe like -message passing implicitly rely on some notion of thread safety -- are you -really message-passing if you pass a pointer? Send and Sync therefore require -some fundamental level of trust that Safe code can't provide, so they must be -unsafe to implement. To help obviate the pervasive unsafety that this would -introduce, Send (resp. Sync) is automatically derived for all types composed only -of Send (resp. Sync) values. 99% of types are Send and Sync, and 99% of those -never actually say it (the remaining 1% is overwhelmingly synchronization -primitives). - - - +Then, a type would use `unsafe` to implement `UnsafeOrd`, indicating that +they've ensured their implementation maintains whatever contracts the +trait expects. In this situation, the Unsafe Rust in the internals of +`BTreeMap` could trust that the key type's `UnsafeOrd` implementation is +correct. If it isn't, it's the fault of the unsafe trait implementation +code, which is consistent with Rust's safety guarantees. + +The decision of whether to mark a trait `unsafe` is an API design choice. +Rust has traditionally avoided marking traits unsafe because it makes Unsafe +Rust pervasive, which is not desirable. `Send` and `Sync` are marked unsafe +because thread safety is a *fundamental property* that unsafe code can't +possibly hope to defend against in the way it could defend against a bad +`Ord` implementation. The decision of whether to mark your own traits `unsafe` +depends on the same sort of consideration. If `unsafe` code cannot reasonably +expect to defend against a bad implementation of the trait, then marking the +trait `unsafe` is a reasonable choice. + +As an aside, while `Send` and `Sync` are `unsafe` traits, they are +automatically implemented for types when such derivations are provably safe +to do. `Send` is automatically derived for all types composed only of values +whose types also implement `Send`. `Sync` is automatically derived for all +types composed only of values whose types also implement `Sync`. + +This is the dance of Safe Rust and Unsafe Rust. It is designed to make using +Safe Rust as ergonomic as possible, but requires extra effort and care when +writing Unsafe Rust. The rest of the book is largely a discussion of the sort +of care that must be taken, and what contracts it is expected of Unsafe Rust +to uphold. [drop flags]: drop-flags.html [conversions]: conversions.html + diff --git a/src/doc/nomicon/subtyping.md b/src/doc/nomicon/subtyping.md index 5def5c3903353..eb940e811a4fa 100644 --- a/src/doc/nomicon/subtyping.md +++ b/src/doc/nomicon/subtyping.md @@ -53,7 +53,7 @@ inferred variance, so `Fn(T)` is invariant in `T`). Some important variances: * `&'a T` is variant over `'a` and `T` (as is `*const T` by metaphor) -* `&'a mut T` is variant with over `'a` but invariant over `T` +* `&'a mut T` is variant over `'a` but invariant over `T` * `Fn(T) -> U` is invariant over `T`, but variant over `U` * `Box`, `Vec`, and all other collections are variant over the types of their contents diff --git a/src/doc/nomicon/vec-alloc.md b/src/doc/nomicon/vec-alloc.md index c2ae1a4eb6d26..bc60a577bd35c 100644 --- a/src/doc/nomicon/vec-alloc.md +++ b/src/doc/nomicon/vec-alloc.md @@ -150,7 +150,7 @@ LLVM needs to work with different languages' semantics and custom allocators, it can't really intimately understand allocation. Instead, the main idea behind allocation is "doesn't overlap with other stuff". That is, heap allocations, stack allocations, and globals don't randomly overlap. Yep, it's about alias -analysis. As such, Rust can technically play a bit fast an loose with the notion of +analysis. As such, Rust can technically play a bit fast and loose with the notion of an allocation as long as it's *consistent*. Getting back to the empty allocation case, there are a couple of places where diff --git a/src/doc/nomicon/vec.md b/src/doc/nomicon/vec.md index 63f83788c4bac..691301946de4b 100644 --- a/src/doc/nomicon/vec.md +++ b/src/doc/nomicon/vec.md @@ -2,7 +2,7 @@ To bring everything together, we're going to write `std::Vec` from scratch. Because all the best tools for writing unsafe code are unstable, this -project will only work on nightly (as of Rust 1.2.0). With the exception of the +project will only work on nightly (as of Rust 1.9.0). With the exception of the allocator API, much of the unstable code we'll use is expected to be stabilized in a similar form as it is today. diff --git a/src/doc/reference.md b/src/doc/reference.md index f0fdae27ac7fb..8655bab4b21bf 100644 --- a/src/doc/reference.md +++ b/src/doc/reference.md @@ -21,6 +21,11 @@ separately by extracting documentation attributes from their source code. Many of the features that one might expect to be language features are library features in Rust, so what you're looking for may be there, not here. +Finally, this document is not normative. It may include details that are +specific to `rustc` itself, and should not be taken as a specification for +the Rust language. We intend to produce such a document someday, but this +is what we have for now. + You may also be interested in the [grammar]. [book]: book/index.html @@ -104,7 +109,7 @@ comments (`/** ... */`), are interpreted as a special syntax for `doc` `#[doc="..."]` around the body of the comment, i.e., `/// Foo` turns into `#[doc="Foo"]`. -Line comments beginning with `//!` and block comments `/*! ... !*/` are +Line comments beginning with `//!` and block comments `/*! ... */` are doc comments that apply to the parent of the comment, rather than the item that follows. That is, they are equivalent to writing `#![doc="..."]` around the body of the comment. `//!` comments are usually used to document @@ -114,12 +119,20 @@ Non-doc comments are interpreted as a form of whitespace. ## Whitespace -Whitespace is any non-empty string containing only the following characters: +Whitespace is any non-empty string containing only characters that have the +`Pattern_White_Space` Unicode property, namely: +- `U+0009` (horizontal tab, `'\t'`) +- `U+000A` (line feed, `'\n'`) +- `U+000B` (vertical tab) +- `U+000C` (form feed) +- `U+000D` (carriage return, `'\r'`) - `U+0020` (space, `' '`) -- `U+0009` (tab, `'\t'`) -- `U+000A` (LF, `'\n'`) -- `U+000D` (CR, `'\r'`) +- `U+0085` (next line) +- `U+200E` (left-to-right mark) +- `U+200F` (right-to-left mark) +- `U+2028` (line separator) +- `U+2029` (paragraph separator) Rust is a "free-form" language, meaning that all forms of whitespace serve only to separate _tokens_ in the grammar, and have no semantic significance. @@ -236,6 +249,8 @@ following forms: * A _whitespace escape_ is one of the characters `U+006E` (`n`), `U+0072` (`r`), or `U+0074` (`t`), denoting the Unicode values `U+000A` (LF), `U+000D` (CR) or `U+0009` (HT) respectively. +* The _null escape_ is the character `U+0030` (`0`) and denotes the Unicode + value `U+0000` (NUL). * The _backslash escape_ is the character `U+005C` (`\`) which must be escaped in order to denote *itself*. @@ -297,6 +312,8 @@ following forms: * A _whitespace escape_ is one of the characters `U+006E` (`n`), `U+0072` (`r`), or `U+0074` (`t`), denoting the bytes values `0x0A` (ASCII LF), `0x0D` (ASCII CR) or `0x09` (ASCII HT) respectively. +* The _null escape_ is the character `U+0030` (`0`) and denotes the byte + value `0x00` (ASCII NUL). * The _backslash escape_ is the character `U+005C` (`\`) which must be escaped in order to denote its ASCII encoding `0x5C`. @@ -375,6 +392,10 @@ Examples of integer literals of various forms: 0usize; // type usize ``` +Note that the Rust syntax considers `-1i8` as an application of the [unary minus +operator](#unary-operator-expressions) to an integer literal `1i8`, rather than +a single integer literal. + ##### Floating-point literals A _floating-point literal_ has one of two forms: @@ -719,13 +740,14 @@ There are several kinds of item: * [`extern crate` declarations](#extern-crate-declarations) * [`use` declarations](#use-declarations) * [modules](#modules) -* [functions](#functions) +* [function definitions](#functions) +* [`extern` blocks](#external-blocks) * [type definitions](grammar.html#type-definitions) -* [structs](#structs) -* [enumerations](#enumerations) +* [struct definitions](#structs) +* [enumeration definitions](#enumerations) * [constant items](#constant-items) * [static items](#static-items) -* [traits](#traits) +* [trait definitions](#traits) * [implementations](#implementations) Some items form an implicit scope for the declaration of sub-items. In other @@ -837,12 +859,26 @@ extern crate std; // equivalent to: extern crate std as std; extern crate std as ruststd; // linking to 'std' under another name ``` +When naming Rust crates, hyphens are disallowed. However, Cargo packages may +make use of them. In such case, when `Cargo.toml` doesn't specify a crate name, +Cargo will transparently replace `-` with `_` (Refer to [RFC 940] for more +details). + +Here is an example: + +```{.ignore} +// Importing the Cargo package hello-world +extern crate hello_world; // hyphen replaced with an underscore +``` + +[RFC 940]: https://github.com/rust-lang/rfcs/blob/master/text/0940-hyphens-considered-harmful.md + #### Use declarations A _use declaration_ creates one or more local name bindings synonymous with some other [path](#paths). Usually a `use` declaration is used to shorten the -path required to refer to a module item. These declarations may appear at the -top of [modules](#modules) and [blocks](grammar.html#block-expressions). +path required to refer to a module item. These declarations may appear in +[modules](#modules) and [blocks](grammar.html#block-expressions), usually at the top. > **Note**: Unlike in many languages, > `use` declarations in Rust do *not* declare linkage dependency with external crates. @@ -984,8 +1020,8 @@ fn first((value, _): (i32, i32)) -> i32 { value } #### Generic functions A _generic function_ allows one or more _parameterized types_ to appear in its -signature. Each type parameter must be explicitly declared, in an -angle-bracket-enclosed, comma-separated list following the function name. +signature. Each type parameter must be explicitly declared in an +angle-bracket-enclosed and comma-separated list, following the function name. ```rust,ignore // foo is generic over A and B @@ -1110,6 +1146,16 @@ type Point = (u8, u8); let p: Point = (41, 68); ``` +Currently a type alias to an enum type cannot be used to qualify the +constructors: + +``` +enum E { A } +type F = E; +let _: F = E::A; // OK +// let _: F = F::A; // Doesn't work +``` + ### Structs A _struct_ is a nominal [struct type](#struct-types) defined with the @@ -1137,7 +1183,6 @@ the list of fields entirely. Such a struct implicitly defines a constant of its type with the same name. For example: ``` -# #![feature(braced_empty_structs)] struct Cookie; let c = [Cookie, Cookie {}, Cookie, Cookie {}]; ``` @@ -1145,7 +1190,6 @@ let c = [Cookie, Cookie {}, Cookie, Cookie {}]; is equivalent to ``` -# #![feature(braced_empty_structs)] struct Cookie {} const Cookie: Cookie = Cookie {}; let c = [Cookie, Cookie {}, Cookie, Cookie {}]; @@ -1179,7 +1223,7 @@ Enumeration constructors can have either named or unnamed fields: ```rust enum Animal { Dog (String, f64), - Cat { name: String, weight: f64 } + Cat { name: String, weight: f64 }, } let mut a: Animal = Animal::Dog("Cocoa".to_string(), 37.2); @@ -1189,7 +1233,8 @@ a = Animal::Cat { name: "Spotty".to_string(), weight: 2.7 }; In this example, `Cat` is a _struct-like enum variant_, whereas `Dog` is simply called an enum variant. -Enums have a discriminant. You can assign them explicitly: +Each enum value has a _discriminant_ which is an integer associated to it. You +can specify it explicitly: ``` enum Foo { @@ -1197,10 +1242,15 @@ enum Foo { } ``` -If a discriminant isn't assigned, they start at zero, and add one for each +The right hand side of the specification is interpreted as an `isize` value, +but the compiler is allowed to use a smaller type in the actual memory layout. +The [`repr` attribute](#ffi-attributes) can be added in order to change +the type of the right hand side and specify the memory layout. + +If a discriminant isn't specified, they start at zero, and add one for each variant, in order. -You can cast an enum to get this value: +You can cast an enum to get its discriminant: ``` # enum Foo { Bar = 123 } @@ -1237,12 +1287,12 @@ const STRING: &'static str = "bitstring"; struct BitsNStrings<'a> { mybits: [u32; 2], - mystring: &'a str + mystring: &'a str, } const BITS_N_STRINGS: BitsNStrings<'static> = BitsNStrings { mybits: BITS, - mystring: STRING + mystring: STRING, }; ``` @@ -1606,17 +1656,47 @@ Functions within external blocks may be called by Rust code, just like functions defined in Rust. The Rust compiler automatically translates between the Rust ABI and the foreign ABI. -A number of [attributes](#attributes) control the behavior of external blocks. +A number of [attributes](#ffi-attributes) control the behavior of external blocks. By default external blocks assume that the library they are calling uses the -standard C "cdecl" ABI. Other ABIs may be specified using an `abi` string, as -shown here: +standard C ABI on the specific platform. Other ABIs may be specified using an +`abi` string, as shown here: ```ignore // Interface to the Windows API extern "stdcall" { } ``` +There are three ABI strings which are cross-platform, and which all compilers +are guaranteed to support: + +* `extern "Rust"` -- The default ABI when you write a normal `fn foo()` in any + Rust code. +* `extern "C"` -- This is the same as `extern fn foo()`; whatever the default + your C compiler supports. +* `extern "system"` -- Usually the same as `extern "C"`, except on Win32, in + which case it's `"stdcall"`, or what you should use to link to the Windows API + itself + +There are also some platform-specific ABI strings: + +* `extern "cdecl"` -- The default for x86\_32 C code. +* `extern "stdcall"` -- The default for the Win32 API on x86\_32. +* `extern "win64"` -- The default for C code on x86\_64 Windows. +* `extern "sysv64"` -- The default for C code on non-Windows x86\_64. +* `extern "aapcs"` -- The default for ARM. +* `extern "fastcall"` -- The `fastcall` ABI -- corresponds to MSVC's + `__fastcall` and GCC and clang's `__attribute__((fastcall))` +* `extern "vectorcall"` -- The `vectorcall` ABI -- corresponds to MSVC's + `__vectorcall` and clang's `__attribute__((vectorcall))` + +Finally, there are some rustc-specific ABI strings: + +* `extern "rust-intrinsic"` -- The ABI of rustc intrinsics. +* `extern "rust-call"` -- The ABI of the Fn::call trait functions. +* `extern "platform-intrinsic"` -- Specific platform intrinsics -- like, for + example, `sqrt` -- have this ABI. You should never have to deal with it. + The `link` attribute allows the name of the library to be specified. When specified the compiler will attempt to link against the native library of the specified name. @@ -1661,7 +1741,7 @@ struct Foo; // Declare a public struct with a private field pub struct Bar { - field: i32 + field: i32, } // Declare a public enum with two public variants @@ -1764,7 +1844,7 @@ pub mod submodule { # fn main() {} ``` -For a rust program to pass the privacy checking pass, all paths must be valid +For a Rust program to pass the privacy checking pass, all paths must be valid accesses given the two rules above. This includes all use statements, expressions, types, etc. @@ -1883,6 +1963,8 @@ type int8_t = i8; - `should_panic` - indicates that this test function should panic, inverting the success condition. - `cold` - The function is unlikely to be executed, so optimize it (and calls to it) differently. +- `naked` - The function utilizes a custom ABI or custom inline ASM that requires + epilogue and prologue to be skipped. ### Static-only attributes @@ -1959,6 +2041,7 @@ macro scope. ### Miscellaneous attributes +- `deprecated` - mark the item as deprecated; the full attribute is `#[deprecated(since = "crate version", note = "...")`, where both arguments are optional. - `export_name` - on statics and functions, this determines the name of the exported symbol. - `link_section` - on statics and functions, this specifies the section of the @@ -1982,10 +2065,6 @@ macro scope. outside of its dynamic extent), and thus this attribute has the word "unsafe" in its name. To use this, the `unsafe_destructor_blind_to_params` feature gate must be enabled. -- `unsafe_no_drop_flag` - on structs, remove the flag that prevents - destructors from being run twice. Destructors might be run multiple times on - the same object with this attribute. To use this, the `unsafe_no_drop_flag` feature - gate must be enabled. - `doc` - Doc comments such as `/// foo` are equivalent to `#[doc = "foo"]`. - `rustc_on_unimplemented` - Write a custom note to be shown along with the error when the trait is found to be unimplemented on a type. @@ -1994,6 +2073,9 @@ macro scope. trait of the same name. `{Self}` will be replaced with the type that is supposed to implement the trait but doesn't. To use this, the `on_unimplemented` feature gate must be enabled. +- `must_use` - on structs and enums, will warn if a value of this type isn't used or + assigned to a variable. You may also include an optional message by using + `#[must_use = "message"]` which will be given alongside the warning. ### Conditional compilation @@ -2039,33 +2121,43 @@ arbitrarily complex configurations through nesting. The following configurations must be defined by the implementation: -* `debug_assertions` - Enabled by default when compiling without optimizations. - This can be used to enable extra debugging code in development but not in - production. For example, it controls the behavior of the standard library's - `debug_assert!` macro. -* `target_arch = "..."` - Target CPU architecture, such as `"x86"`, `"x86_64"` - `"mips"`, `"powerpc"`, `"powerpc64"`, `"powerpc64le"`, `"arm"`, or `"aarch64"`. -* `target_endian = "..."` - Endianness of the target CPU, either `"little"` or - `"big"`. -* `target_env = ".."` - An option provided by the compiler by default - describing the runtime environment of the target platform. Some examples of - this are `musl` for builds targeting the MUSL libc implementation, `msvc` for - Windows builds targeting MSVC, and `gnu` frequently the rest of the time. This - option may also be blank on some platforms. +* `target_arch = "..."` - Target CPU architecture, such as `"x86"`, + `"x86_64"` `"mips"`, `"powerpc"`, `"powerpc64"`, `"arm"`, or + `"aarch64"`. This value is closely related to the first element of + the platform target triple, though it is not identical. +* `target_os = "..."` - Operating system of the target, examples + include `"windows"`, `"macos"`, `"ios"`, `"linux"`, `"android"`, + `"freebsd"`, `"dragonfly"`, `"bitrig"` , `"openbsd"` or + `"netbsd"`. This value is closely related to the second and third + element of the platform target triple, though it is not identical. * `target_family = "..."` - Operating system family of the target, e. g. `"unix"` or `"windows"`. The value of this configuration option is defined as a configuration itself, like `unix` or `windows`. -* `target_os = "..."` - Operating system of the target, examples include - `"windows"`, `"macos"`, `"ios"`, `"linux"`, `"android"`, `"freebsd"`, `"dragonfly"`, - `"bitrig"` , `"openbsd"` or `"netbsd"`. +* `unix` - See `target_family`. +* `windows` - See `target_family`. +* `target_env = ".."` - Further disambiguates the target platform with + information about the ABI/libc. Presently this value is either + `"gnu"`, `"msvc"`, `"musl"`, or the empty string. For historical + reasons this value has only been defined as non-empty when needed + for disambiguation. Thus on many GNU platforms this value will be + empty. This value is closely related to the fourth element of the + platform target triple, though it is not identical. For example, + embedded ABIs such as `gnueabihf` will simply define `target_env` as + `"gnu"`. +* `target_endian = "..."` - Endianness of the target CPU, either `"little"` or + `"big"`. * `target_pointer_width = "..."` - Target pointer width in bits. This is set to `"32"` for targets with 32-bit pointers, and likewise set to `"64"` for 64-bit pointers. +* `target_has_atomic = "..."` - Set of integer sizes on which the target can perform + atomic operations. Values are `"8"`, `"16"`, `"32"`, `"64"` and `"ptr"`. * `target_vendor = "..."` - Vendor of the target, for example `apple`, `pc`, or simply `"unknown"`. * `test` - Enabled when compiling the test harness (using the `--test` flag). -* `unix` - See `target_family`. -* `windows` - See `target_family`. +* `debug_assertions` - Enabled by default when compiling without optimizations. + This can be used to enable extra debugging code in development but not in + production. For example, it controls the behavior of the standard library's + `debug_assert!` macro. You can also set another attribute based on a `cfg` variable with `cfg_attr`: @@ -2095,7 +2187,7 @@ along with their default settings. [Compiler plugins](book/compiler-plugins.html#lint-plugins) can provide additional lint checks. ```{.ignore} -mod m1 { +pub mod m1 { // Missing documentation is ignored here #[allow(missing_docs)] pub fn undocumented_one() -> i32 { 1 } @@ -2115,9 +2207,9 @@ check on and off: ```{.ignore} #[warn(missing_docs)] -mod m2{ +pub mod m2{ #[allow(missing_docs)] - mod nested { + pub mod nested { // Missing documentation is ignored here pub fn undocumented_one() -> i32 { 1 } @@ -2137,7 +2229,7 @@ that lint check: ```{.ignore} #[forbid(missing_docs)] -mod m3 { +pub mod m3 { // Attempting to toggle warning signals an error here #[allow(missing_docs)] /// Returns 2. @@ -2197,7 +2289,7 @@ the `PartialEq` or `Clone` constraints for the appropriate `impl`: #[derive(PartialEq, Clone)] struct Foo { a: i32, - b: T + b: T, } ``` @@ -2263,6 +2355,9 @@ The currently implemented features of the reference compiler are: * `cfg_target_vendor` - Allows conditional compilation using the `target_vendor` matcher which is subject to change. +* `cfg_target_has_atomic` - Allows conditional compilation using the `target_has_atomic` + matcher which is subject to change. + * `concat_idents` - Allows use of the `concat_idents` macro, which is in many ways insufficient for concatenating identifiers, and may be removed entirely for something more wholesome. @@ -2275,6 +2370,10 @@ The currently implemented features of the reference compiler are: `#[derive_Foo] #[derive_Bar]`, which can be user-defined syntax extensions. +* `inclusive_range_syntax` - Allows use of the `a...b` and `...b` syntax for inclusive ranges. + +* `inclusive_range` - Allows use of the types that represent desugared inclusive ranges. + * `intrinsics` - Allows use of the "rust-intrinsics" ABI. Compiler intrinsics are inherently unstable and no promise about them is made. @@ -2348,6 +2447,9 @@ The currently implemented features of the reference compiler are: into a Rust program. This capability, especially the signature for the annotated function, is subject to change. +* `static_in_const` - Enables lifetime elision with a `'static` default for + `const` and `static` item declarations. + * `thread_local` - The usage of the `#[thread_local]` attribute is experimental and should be seen as unstable. This attribute is used to declare a `static` as being unique per-thread leveraging @@ -2361,12 +2463,6 @@ The currently implemented features of the reference compiler are: * `unboxed_closures` - Rust's new closure design, which is currently a work in progress feature with many known bugs. -* `unsafe_no_drop_flag` - Allows use of the `#[unsafe_no_drop_flag]` attribute, - which removes hidden flag added to a type that - implements the `Drop` trait. The design for the - `Drop` flag is subject to change, and this feature - may be removed in the future. - * `unmarked_api` - Allows use of items within a `#![staged_api]` crate which have not been marked with a stability marker. Such items should not be allowed by the compiler to exist, @@ -2379,19 +2475,19 @@ The currently implemented features of the reference compiler are: internally without imposing on callers (i.e. making them behave like function calls in terms of encapsulation). -* - `default_type_parameter_fallback` - Allows type parameter defaults to - influence type inference. -* - `braced_empty_structs` - Allows use of empty structs and enum variants with braces. -* - `stmt_expr_attributes` - Allows attributes on expressions and - non-item statements. +* `default_type_parameter_fallback` - Allows type parameter defaults to + influence type inference. + +* `stmt_expr_attributes` - Allows attributes on expressions. -* - `deprecated` - Allows using the `#[deprecated]` attribute. +* `type_ascription` - Allows type ascription expressions `expr: Type`. -* - `type_ascription` - Allows type ascription expressions `expr: Type`. +* `abi_vectorcall` - Allows the usage of the vectorcall calling convention + (e.g. `extern "vectorcall" func fn_();`) -* - `abi_vectorcall` - Allows the usage of the vectorcall calling convention - (e.g. `extern "vectorcall" func fn_();`) +* `abi_sysv64` - Allows the usage of the system V AMD64 calling convention + (e.g. `extern "sysv64" func fn_();`) If a feature is promoted to a language feature, then all existing programs will start to receive compilation warnings about `#![feature]` directives which enabled @@ -2584,7 +2680,7 @@ comma: There are several forms of struct expressions. A _struct expression_ consists of the [path](#paths) of a [struct item](#structs), followed by -a brace-enclosed list of one or more comma-separated name-value pairs, +a brace-enclosed list of zero or more comma-separated name-value pairs, providing the field values of a new instance of the struct. A field name can be any identifier, and is separated from its value expression by a colon. The location denoted by a struct field is mutable if and only if the @@ -2603,10 +2699,12 @@ The following are examples of struct expressions: ``` # struct Point { x: f64, y: f64 } +# struct NothingInMe { } # struct TuplePoint(f64, f64); # mod game { pub struct User<'a> { pub name: &'a str, pub age: u32, pub score: usize } } # struct Cookie; fn some_fn(t: T) {} Point {x: 10.0, y: 20.0}; +NothingInMe {}; TuplePoint(10.0, 20.0); let u = game::User {name: "Joe", age: 35, score: 100_000}; some_fn::(Cookie); @@ -2746,13 +2844,34 @@ let y = 0..10; assert_eq!(x, y); ``` +Similarly, the `...` operator will construct an object of one of the +`std::ops::RangeInclusive` variants. + +``` +# #![feature(inclusive_range_syntax)] +1...2; // std::ops::RangeInclusive +...4; // std::ops::RangeToInclusive +``` + +The following expressions are equivalent. + +``` +# #![feature(inclusive_range_syntax, inclusive_range)] +let x = std::ops::RangeInclusive::NonEmpty {start: 0, end: 10}; +let y = 0...10; + +assert_eq!(x, y); +``` + ### Unary operator expressions -Rust defines the following unary operators. They are all written as prefix operators, -before the expression they apply to. +Rust defines the following unary operators. With the exception of `?`, they are +all written as prefix operators, before the expression they apply to. * `-` - : Negation. May only be applied to numeric types. + : Negation. Signed integer types and floating-point types support negation. It + is an error to apply negation to unsigned types; for example, the compiler + rejects `-1u32`. * `*` : Dereference. When applied to a [pointer](#pointer-types) it denotes the pointed-to location. For pointers to mutable locations, the resulting @@ -2776,6 +2895,10 @@ before the expression they apply to. If the `&` or `&mut` operators are applied to an rvalue, a temporary value is created; the lifetime of this temporary value is defined by [syntactic rules](#temporary-lifetimes). +* `?` + : Propagating errors if applied to `Err(_)` and unwrapping if + applied to `Ok(_)`. Only works on the `Result` type, + and written in postfix notation. ### Binary operator expressions @@ -2924,7 +3047,7 @@ The precedence of Rust binary operators is ordered as follows, going from strong to weak: ```{.text .precedence} -as +as : * / % + - << >> @@ -2934,7 +3057,9 @@ as == != < > <= >= && || -= .. +.. ... +<- += ``` Operators at the same precedence level are evaluated left-to-right. [Unary @@ -2993,10 +3118,12 @@ the lambda expression captures its environment by reference, effectively borrowing pointers to all outer variables mentioned inside the function. Alternately, the compiler may infer that a lambda expression should copy or move values (depending on their type) from the environment into the lambda -expression's captured environment. +expression's captured environment. A lambda can be forced to capture its +environment by moving values by prefixing it with the `move` keyword. In this example, we define a function `ten_times` that takes a higher-order -function argument, and we then call it with a lambda expression as an argument: +function argument, and we then call it with a lambda expression as an argument, +followed by a lambda expression that moves values from its environment. ``` fn ten_times(f: F) where F: Fn(i32) { @@ -3006,6 +3133,9 @@ fn ten_times(f: F) where F: Fn(i32) { } ten_times(|j| println!("hello, {}", j)); + +let word = "konnichiwa".to_owned(); +ten_times(move |j| println!("{}, {}", word, j)); ``` ### Infinite loops @@ -3036,7 +3166,7 @@ the case of a `while` loop, the head is the conditional expression controlling the loop. In the case of a `for` loop, the head is the call-expression controlling the loop. If the label is present, then `continue 'foo` returns control to the head of the loop with label `'foo`, which need not be the -innermost label enclosing the `break` expression, but must enclose it. +innermost label enclosing the `continue` expression, but must enclose it. A `continue` expression is only permitted in the body of a loop. @@ -3212,7 +3342,7 @@ may refer to the variables bound within the pattern they follow. let message = match maybe_digit { Some(x) if x < 10 => process_digit(x), Some(x) => process_other(x), - None => panic!() + None => panic!(), }; ``` @@ -3282,6 +3412,10 @@ The primitive types are the following: * The boolean type `bool` with values `true` and `false`. * The machine types (integer and floating-point). * The machine-dependent integer types. +* Arrays +* Tuples +* Slices +* Function pointers #### Machine types @@ -3504,7 +3638,7 @@ An example of a `fn` type: ``` fn add(x: i32, y: i32) -> i32 { - return x + y; + x + y } let mut x = add(5,7); @@ -3564,8 +3698,9 @@ Each instance of a trait object includes: each method of `SomeTrait` that `T` implements, a pointer to `T`'s implementation (i.e. a function pointer). -The purpose of trait objects is to permit "late binding" of methods. A call to -a method on a trait object is only resolved to a vtable entry at compile time. +The purpose of trait objects is to permit "late binding" of methods. Calling a +method on a trait object results in virtual dispatch at runtime: that is, a +function pointer is loaded from the trait object vtable and invoked indirectly. The actual implementation for each vtable entry can vary on an object-by-object basis. @@ -3629,6 +3764,21 @@ The special type `Self` has a meaning within traits and impls. In a trait defini to an implicit type parameter representing the "implementing" type. In an impl, it is an alias for the implementing type. For example, in: +``` +pub trait From { + fn from(T) -> Self; +} + +impl From for String { + fn from(x: i32) -> Self { + x.to_string() + } +} +``` + +The notation `Self` in the impl refers to the implementing type: `String`. In another +example: + ``` trait Printable { fn make_string(&self) -> String; @@ -3667,9 +3817,9 @@ Since `'static` "lives longer" than `'a`, `&'static str` is a subtype of ## Type coercions -Coercions are defined in [RFC401]. A coercion is implicit and has no syntax. +Coercions are defined in [RFC 401]. A coercion is implicit and has no syntax. -[RFC401]: https://github.com/rust-lang/rfcs/blob/master/text/0401-coercions.md +[RFC 401]: https://github.com/rust-lang/rfcs/blob/master/text/0401-coercions.md ### Coercion sites @@ -3774,7 +3924,7 @@ Coercion is allowed between the following types: use std::ops::Deref; struct CharContainer { - value: char + value: char, } impl Deref for CharContainer { @@ -3809,7 +3959,7 @@ Coercion is allowed between the following types: In the future, coerce_inner will be recursively extended to tuples and structs. In addition, coercions from sub-traits to super-traits will be - added. See [RFC401] for more details. + added. See [RFC 401] for more details. # Special traits @@ -3837,6 +3987,16 @@ the top-level type for the implementation of the called method. If no such metho found, `.deref()` is called and the compiler continues to search for the method implementation in the returned type `U`. +## The `Send` trait + +The `Send` trait indicates that a value of this type is safe to send from one +thread to another. + +## The `Sync` trait + +The `Sync` trait indicates that a value of this type is safe to share between +multiple threads. + # Memory model A Rust program's memory consists of a static set of *items* and a *heap*. @@ -3858,6 +4018,9 @@ The _heap_ is a general term that describes boxes. The lifetime of an allocation in the heap depends on the lifetime of the box values pointing to it. Since box values may themselves be passed in and out of frames, or stored in the heap, heap allocations may outlive the frame they are allocated within. +An allocation in the heap is guaranteed to reside at a single location in the +heap for the whole lifetime of the allocation - it will never be relocated as +a result of moving a box value. ### Memory ownership @@ -3884,9 +4047,9 @@ Methods that take either `self` or `Box` can optionally place them in a mutable variable by prefixing them with `mut` (similar to regular arguments): ``` -trait Changer { - fn change(mut self) -> Self; - fn modify(mut self: Box) -> Box; +trait Changer: Sized { + fn change(mut self) {} + fn modify(mut self: Box) {} } ``` @@ -3939,6 +4102,12 @@ be ignored in favor of only building the artifacts specified by command line. Rust code into an existing non-Rust application because it will not have dynamic dependencies on other Rust code. +* `--crate-type=cdylib`, `#[crate_type = "cdylib"]` - A dynamic system + library will be produced. This is used when compiling Rust code as + a dynamic library to be loaded from another language. This output type will + create `*.so` files on Linux, `*.dylib` files on OSX, and `*.dll` files on + Windows. + * `--crate-type=rlib`, `#[crate_type = "rlib"]` - A "Rust library" file will be produced. This is used as an intermediate artifact and can be thought of as a "static Rust library". These `rlib` files, unlike `staticlib` files, are @@ -4060,7 +4229,7 @@ the guarantee that these issues are never caused by safe code. * Breaking the [pointer aliasing rules](http://llvm.org/docs/LangRef.html#pointer-aliasing-rules) with raw pointers (a subset of the rules used by C) -* `&mut` and `&` follow LLVM’s scoped [noalias] model, except if the `&T` +* `&mut T` and `&T` follow LLVM’s scoped [noalias] model, except if the `&T` contains an `UnsafeCell`. Unsafe code must not violate these aliasing guarantees. * Mutating non-mutable data (that is, data reached through a shared reference or diff --git a/src/doc/rust.css b/src/doc/rust.css index 874f69766196f..932594b99126d 100644 --- a/src/doc/rust.css +++ b/src/doc/rust.css @@ -159,7 +159,7 @@ em { footer { border-top: 1px solid #ddd; - font-size: 14.3px; + font-size: 14px; font-style: italic; padding-top: 5px; margin-top: 3em; @@ -229,7 +229,7 @@ a > code { pre.rust .kw { color: #8959A8; } pre.rust .kw-2, pre.rust .prelude-ty { color: #4271AE; } pre.rust .number, pre.rust .string { color: #718C00; } -pre.rust .self, pre.rust .boolval, pre.rust .prelude-val, +pre.rust .self, pre.rust .bool-val, pre.rust .prelude-val, pre.rust .attribute, pre.rust .attribute .ident { color: #C82829; } pre.rust .comment { color: #8E908C; } pre.rust .doccomment { color: #4D4D4C; } @@ -336,13 +336,11 @@ table th { /* Code snippets */ -.rusttest { display: none; } pre.rust { position: relative; } a.test-arrow { + background-color: rgba(78, 139, 202, 0.2); display: inline-block; position: absolute; - - background-color: #4e8bca; color: #f5f5f5; padding: 5px 10px 5px 10px; border-radius: 5px; @@ -350,6 +348,10 @@ a.test-arrow { top: 5px; right: 5px; } +a.test-arrow:hover{ + background-color: #4e8bca; + text-decoration: none; +} .unstable-feature { border: 2px solid red; diff --git a/src/doc/rustc-ux-guidelines.md b/src/doc/rustc-ux-guidelines.md index 563614f873d22..15b3bfebfac2e 100644 --- a/src/doc/rustc-ux-guidelines.md +++ b/src/doc/rustc-ux-guidelines.md @@ -1,3 +1,5 @@ +% Rustc UX guidelines + Don't forget the user. Whether human or another program, such as an IDE, a good user experience with the compiler goes a long way into making developer lives better. We don't want users to be baffled by compiler output or @@ -54,7 +56,19 @@ Error explanations are long form descriptions of error messages provided with the compiler. They are accessible via the `--explain` flag. Each explanation comes with an example of how to trigger it and advice on how to fix it. -* All of them are accessible [online](https://github.com/rust-lang/rust/blob/master/src/librustc/diagnostics.rs). +* All of them are accessible [online](http://doc.rust-lang.org/error-index.html), + which are auto-generated from rustc source code in different places: + [librustc](https://github.com/rust-lang/rust/blob/master/src/librustc/diagnostics.rs), + [librustc_borrowck](https://github.com/rust-lang/rust/blob/master/src/librustc_borrowck/diagnostics.rs), + [librustc_const_eval](https://github.com/rust-lang/rust/blob/master/src/librustc_const_eval/diagnostics.rs), + [librustc_lint](https://github.com/rust-lang/rust/blob/master/src/librustc_lint/types.rs), + [librustc_metadata](https://github.com/rust-lang/rust/blob/master/src/librustc_metadata/diagnostics.rs), + [librustc_mir](https://github.com/rust-lang/rust/blob/master/src/librustc_mir/diagnostics.rs), + [librustc_passes](https://github.com/rust-lang/rust/blob/master/src/librustc_passes/diagnostics.rs), + [librustc_privacy](https://github.com/rust-lang/rust/blob/master/src/librustc_privacy/diagnostics.rs), + [librustc_resolve](https://github.com/rust-lang/rust/blob/master/src/librustc_resolve/diagnostics.rs), + [librustc_trans](https://github.com/rust-lang/rust/blob/master/src/librustc_trans/diagnostics.rs), + [librustc_typeck](https://github.com/rust-lang/rust/blob/master/src/librustc_typeck/diagnostics.rs). * Explanations have full markdown support. Use it, especially to highlight code with backticks. * When talking about the compiler, call it `the compiler`, not `Rust` or @@ -70,4 +84,4 @@ understandable compiler scripts. * The `--verbose` flag is for adding verbose information to `rustc` output when not compiling a program. For example, using it with the `--version` flag gives information about the hashes of the code. -* Experimental flags and options must be guarded behind the `-Z unstable-options` flag. \ No newline at end of file +* Experimental flags and options must be guarded behind the `-Z unstable-options` flag. diff --git a/src/doc/style/README.md b/src/doc/style/README.md deleted file mode 100644 index 5ab1a1d9c10f4..0000000000000 --- a/src/doc/style/README.md +++ /dev/null @@ -1,64 +0,0 @@ -% Style Guidelines - -This document collects the emerging principles, conventions, abstractions, and -best practices for writing Rust code. - -Since Rust is evolving at a rapid pace, these guidelines are -preliminary. The hope is that writing them down explicitly will help -drive discussion, consensus and adoption. - -Whenever feasible, guidelines provide specific examples from Rust's standard -libraries. - -### Guideline statuses - -Every guideline has a status: - -* **[FIXME]**: Marks places where there is more work to be done. In - some cases, that just means going through the RFC process. - -* **[FIXME #NNNNN]**: Like **[FIXME]**, but links to the issue tracker. - -* **[RFC #NNNN]**: Marks accepted guidelines, linking to the rust-lang - RFC establishing them. - -### Guideline stabilization - -One purpose of these guidelines is to reach decisions on a number of -cross-cutting API and stylistic choices. Discussion and development of -the guidelines will happen primarily on https://internals.rust-lang.org/, -using the Guidelines category. Discussion can also occur on the -[guidelines issue tracker](https://github.com/rust-lang/rust-guidelines). - -Guidelines that are under development or discussion will be marked with the -status **[FIXME]**, with a link to the issue tracker when appropriate. - -Once a concrete guideline is ready to be proposed, it should be filed -as an [FIXME: needs RFC](https://github.com/rust-lang/rfcs). If the RFC is -accepted, the official guidelines will be updated to match, and will -include the tag **[RFC #NNNN]** linking to the RFC document. - -### What's in this document - -This document is broken into four parts: - -* **[Style](style/README.md)** provides a set of rules governing naming conventions, - whitespace, and other stylistic issues. - -* **[Guidelines by Rust feature](features/README.md)** places the focus on each of - Rust's features, starting from expressions and working the way out toward - crates, dispensing guidelines relevant to each. - -* **Topical guidelines and patterns**. The rest of the document proceeds by - cross-cutting topic, starting with - [Ownership and resources](ownership/README.md). - -* **[APIs for a changing Rust](changing/README.md)** - discusses the forward-compatibility hazards, especially those that interact - with the pre-1.0 library stabilization process. - -> **[FIXME]** Add cross-references throughout this document to the tutorial, -> reference manual, and other guides. - -> **[FIXME]** What are some _non_-goals, _non_-principles, or _anti_-patterns that -> we should document? diff --git a/src/doc/style/SUMMARY.md b/src/doc/style/SUMMARY.md deleted file mode 100644 index 508ede6c4a0ac..0000000000000 --- a/src/doc/style/SUMMARY.md +++ /dev/null @@ -1,50 +0,0 @@ -# Summary - -* [Style](style/README.md) - * [Whitespace](style/whitespace.md) - * [Comments](style/comments.md) - * [Braces, semicolons, commas](style/braces.md) - * [Naming](style/naming/README.md) - * [Ownership variants](style/naming/ownership.md) - * [Containers/wrappers](style/naming/containers.md) - * [Conversions](style/naming/conversions.md) - * [Iterators](style/naming/iterators.md) - * [Imports](style/imports.md) - * [Organization](style/organization.md) -* [Guidelines by Rust feature](features/README.md) - * [Let binding](features/let.md) - * [Pattern matching](features/match.md) - * [Loops](features/loops.md) - * [Functions and methods](features/functions-and-methods/README.md) - * [Input](features/functions-and-methods/input.md) - * [Output](features/functions-and-methods/output.md) - * [For convenience](features/functions-and-methods/convenience.md) - * [Types](features/types/README.md) - * [Conversions](features/types/conversions.md) - * [The newtype pattern](features/types/newtype.md) - * [Traits](features/traits/README.md) - * [For generics](features/traits/generics.md) - * [For objects](features/traits/objects.md) - * [For overloading](features/traits/overloading.md) - * [For extensions](features/traits/extensions.md) - * [For reuse](features/traits/reuse.md) - * [Common traits](features/traits/common.md) - * [Modules](features/modules.md) - * [Crates](features/crates.md) -* [Ownership and resources](ownership/README.md) - * [Constructors](ownership/constructors.md) - * [Builders](ownership/builders.md) - * [Destructors](ownership/destructors.md) - * [RAII](ownership/raii.md) - * [Cells and smart pointers](ownership/cell-smart.md) -* [Errors](errors/README.md) - * [Signaling](errors/signaling.md) - * [Handling](errors/handling.md) - * [Propagation](errors/propagation.md) - * [Ergonomics](errors/ergonomics.md) -* [Safety and guarantees](safety/README.md) - * [Using unsafe](safety/unsafe.md) - * [Library guarantees](safety/lib-guarantees.md) -* [Testing](testing/README.md) - * [Unit testing](testing/unit.md) -* [FFI, platform-specific code](platform.md) diff --git a/src/doc/style/errors/README.md b/src/doc/style/errors/README.md deleted file mode 100644 index 444da26ff8fed..0000000000000 --- a/src/doc/style/errors/README.md +++ /dev/null @@ -1,3 +0,0 @@ -% Errors - -> **[FIXME]** Add some general text here. diff --git a/src/doc/style/errors/ergonomics.md b/src/doc/style/errors/ergonomics.md deleted file mode 100644 index a404d25bf3703..0000000000000 --- a/src/doc/style/errors/ergonomics.md +++ /dev/null @@ -1,66 +0,0 @@ -% Ergonomic error handling - -Error propagation with raw `Result`s can require tedious matching and -repackaging. This tedium is largely alleviated by the `try!` macro, -and can be completely removed (in some cases) by the "`Result`-`impl`" -pattern. - -### The `try!` macro - -Prefer - -```rust -use std::io::{File, Open, Write, IoError}; - -struct Info { - name: String, - age: i32, - rating: i32 -} - -fn write_info(info: &Info) -> Result<(), IoError> { - let mut file = File::open_mode(&Path::new("my_best_friends.txt"), - Open, Write); - // Early return on error - try!(file.write_line(&format!("name: {}", info.name))); - try!(file.write_line(&format!("age: {}", info.age))); - try!(file.write_line(&format!("rating: {}", info.rating))); - return Ok(()); -} -``` - -over - -```rust -use std::io::{File, Open, Write, IoError}; - -struct Info { - name: String, - age: i32, - rating: i32 -} - -fn write_info(info: &Info) -> Result<(), IoError> { - let mut file = File::open_mode(&Path::new("my_best_friends.txt"), - Open, Write); - // Early return on error - match file.write_line(&format!("name: {}", info.name)) { - Ok(_) => (), - Err(e) => return Err(e) - } - match file.write_line(&format!("age: {}", info.age)) { - Ok(_) => (), - Err(e) => return Err(e) - } - return file.write_line(&format!("rating: {}", info.rating)); -} -``` - -See -[the `result` module documentation](https://doc.rust-lang.org/stable/std/result/index.html#the-try-macro) -for more details. - -### The `Result`-`impl` pattern [FIXME] - -> **[FIXME]** Document the way that the `io` module uses trait impls -> on `std::io::Result` to painlessly propagate errors. diff --git a/src/doc/style/errors/handling.md b/src/doc/style/errors/handling.md deleted file mode 100644 index 9b8a00d73665b..0000000000000 --- a/src/doc/style/errors/handling.md +++ /dev/null @@ -1,7 +0,0 @@ -% Handling errors - -### Use thread isolation to cope with failure. [FIXME] - -> **[FIXME]** Explain how to isolate threads and detect thread failure for recovery. - -### Consuming `Result` [FIXME] diff --git a/src/doc/style/errors/propagation.md b/src/doc/style/errors/propagation.md deleted file mode 100644 index 0a347cd577b90..0000000000000 --- a/src/doc/style/errors/propagation.md +++ /dev/null @@ -1,8 +0,0 @@ -% Propagation - -> **[FIXME]** We need guidelines on how to layer error information up a stack of -> abstractions. - -### Error interoperation [FIXME] - -> **[FIXME]** Document the `FromError` infrastructure. diff --git a/src/doc/style/errors/signaling.md b/src/doc/style/errors/signaling.md deleted file mode 100644 index 4038ec10b9ab5..0000000000000 --- a/src/doc/style/errors/signaling.md +++ /dev/null @@ -1,125 +0,0 @@ -% Signaling errors [RFC #236] - -> The guidelines below were approved by [RFC #236](https://github.com/rust-lang/rfcs/pull/236). - -Errors fall into one of three categories: - -* Catastrophic errors, e.g. out-of-memory. -* Contract violations, e.g. wrong input encoding, index out of bounds. -* Obstructions, e.g. file not found, parse error. - -The basic principle of the convention is that: - -* Catastrophic errors and programming errors (bugs) can and should only be -recovered at a *coarse grain*, i.e. a thread boundary. -* Obstructions preventing an operation should be reported at a maximally *fine -grain* -- to the immediate invoker of the operation. - -## Catastrophic errors - -An error is _catastrophic_ if there is no meaningful way for the current thread to -continue after the error occurs. - -Catastrophic errors are _extremely_ rare, especially outside of `libstd`. - -**Canonical examples**: out of memory, stack overflow. - -### For catastrophic errors, panic - -For errors like stack overflow, Rust currently aborts the process, but -could in principle panic, which (in the best case) would allow -reporting and recovery from a supervisory thread. - -## Contract violations - -An API may define a contract that goes beyond the type checking enforced by the -compiler. For example, slices support an indexing operation, with the contract -that the supplied index must be in bounds. - -Contracts can be complex and involve more than a single function invocation. For -example, the `RefCell` type requires that `borrow_mut` not be called until all -existing borrows have been relinquished. - -### For contract violations, panic - -A contract violation is always a bug, and for bugs we follow the Erlang -philosophy of "let it crash": we assume that software *will* have bugs, and we -design coarse-grained thread boundaries to report, and perhaps recover, from these -bugs. - -### Contract design - -One subtle aspect of these guidelines is that the contract for a function is -chosen by an API designer -- and so the designer also determines what counts as -a violation. - -This RFC does not attempt to give hard-and-fast rules for designing -contracts. However, here are some rough guidelines: - -* Prefer expressing contracts through static types whenever possible. - -* It *must* be possible to write code that uses the API without violating the - contract. - -* Contracts are most justified when violations are *inarguably* bugs -- but this - is surprisingly rare. - -* Consider whether the API client could benefit from the contract-checking - logic. The checks may be expensive. Or there may be useful programming - patterns where the client does not want to check inputs before hand, but would - rather attempt the operation and then find out whether the inputs were invalid. - -* When a contract violation is the *only* kind of error a function may encounter - -- i.e., there are no obstructions to its success other than "bad" inputs -- - using `Result` or `Option` instead is especially warranted. Clients can then use - `unwrap` to assert that they have passed valid input, or re-use the error - checking done by the API for their own purposes. - -* When in doubt, use loose contracts and instead return a `Result` or `Option`. - -## Obstructions - -An operation is *obstructed* if it cannot be completed for some reason, even -though the operation's contract has been satisfied. Obstructed operations may -have (documented!) side effects -- they are not required to roll back after -encountering an obstruction. However, they should leave the data structures in -a "coherent" state (satisfying their invariants, continuing to guarantee safety, -etc.). - -Obstructions may involve external conditions (e.g., I/O), or they may involve -aspects of the input that are not covered by the contract. - -**Canonical examples**: file not found, parse error. - -### For obstructions, use `Result` - -The -[`Result` type](https://doc.rust-lang.org/stable/std/result/index.html) -represents either a success (yielding `T`) or failure (yielding `E`). By -returning a `Result`, a function allows its clients to discover and react to -obstructions in a fine-grained way. - -#### What about `Option`? - -The `Option` type should not be used for "obstructed" operations; it -should only be used when a `None` return value could be considered a -"successful" execution of the operation. - -This is of course a somewhat subjective question, but a good litmus -test is: would a reasonable client ever ignore the result? The -`Result` type provides a lint that ensures the result is actually -inspected, while `Option` does not, and this difference of behavior -can help when deciding between the two types. - -Another litmus test: can the operation be understood as asking a -question (possibly with sideeffects)? Operations like `pop` on a -vector can be viewed as asking for the contents of the first element, -with the side effect of removing it if it exists -- with an `Option` -return value. - -## Do not provide both `Result` and `panic!` variants. - -An API should not provide both `Result`-producing and `panic`king versions of an -operation. It should provide just the `Result` version, allowing clients to use -`try!` or `unwrap` instead as needed. This is part of the general pattern of -cutting down on redundant variants by instead using method chaining. diff --git a/src/doc/style/features/README.md b/src/doc/style/features/README.md deleted file mode 100644 index 09657503d20d1..0000000000000 --- a/src/doc/style/features/README.md +++ /dev/null @@ -1,9 +0,0 @@ -% Guidelines by language feature - -Rust provides a unique combination of language features, some new and some -old. This section gives guidance on when and how to use Rust's features, and -brings attention to some of the tradeoffs between different features. - -Notably missing from this section is an in-depth discussion of Rust's pointer -types (both built-in and in the library). The topic of pointers is discussed at -length in a [separate section on ownership](../ownership/README.md). diff --git a/src/doc/style/features/crates.md b/src/doc/style/features/crates.md deleted file mode 100644 index 4748b05f17f74..0000000000000 --- a/src/doc/style/features/crates.md +++ /dev/null @@ -1,6 +0,0 @@ -% Crates - -> **[FIXME]** What general guidelines should we provide for crate design? - -> Possible topics: facades; per-crate preludes (to be imported as globs); -> "lib.rs" diff --git a/src/doc/style/features/functions-and-methods/README.md b/src/doc/style/features/functions-and-methods/README.md deleted file mode 100644 index 611cd564ccac7..0000000000000 --- a/src/doc/style/features/functions-and-methods/README.md +++ /dev/null @@ -1,44 +0,0 @@ -% Functions and methods - -### Prefer methods to functions if there is a clear receiver. **[FIXME: needs RFC]** - -Prefer - -```rust -impl Foo { - pub fn frob(&self, w: widget) { ... } -} -``` - -over - -```rust -pub fn frob(foo: &Foo, w: widget) { ... } -``` - -for any operation that is clearly associated with a particular -type. - -Methods have numerous advantages over functions: - -* They do not need to be imported or qualified to be used: all you - need is a value of the appropriate type. -* Their invocation performs autoborrowing (including mutable borrows). -* They make it easy to answer the question "what can I do with a value - of type `T`" (especially when using rustdoc). -* They provide `self` notation, which is more concise and often more - clearly conveys ownership distinctions. - -> **[FIXME]** Revisit these guidelines with -> [UFCS](https://github.com/nick29581/rfcs/blob/ufcs/0000-ufcs.md) and -> conventions developing around it. - - - -### Guidelines for inherent methods. **[FIXME]** - -> **[FIXME]** We need guidelines for when to provide inherent methods on a type, -> versus methods through a trait or functions. - -> **NOTE**: Rules for method resolution around inherent methods are in flux, -> which may impact the guidelines. diff --git a/src/doc/style/features/functions-and-methods/convenience.md b/src/doc/style/features/functions-and-methods/convenience.md deleted file mode 100644 index 69fd3772a761f..0000000000000 --- a/src/doc/style/features/functions-and-methods/convenience.md +++ /dev/null @@ -1,43 +0,0 @@ -% Convenience methods - -### Provide small, coherent sets of convenience methods. **[FIXME: needs RFC]** - -_Convenience methods_ wrap up existing functionality in a more convenient -way. The work done by a convenience method varies widely: - -* _Re-providing functions as methods_. For example, the `std::path::Path` type - provides methods like `stat` on `Path`s that simply invoke the corresponding - function in `std::io::fs`. -* _Skipping through conversions_. For example, the `str` type provides a - `.len()` convenience method which is also expressible as `.as_bytes().len()`. - Sometimes the conversion is more complex: the `str` module also provides - `from_chars`, which encapsulates a simple use of iterators. -* _Encapsulating common arguments_. For example, vectors of `&str`s - provide a `connect` as well as a special case, `concat`, that is expressible - using `connect` with a fixed separator of `""`. -* _Providing more efficient special cases_. The `connect` and `concat` example - also applies here: singling out `concat` as a special case allows for a more - efficient implementation. - - Note, however, that the `connect` method actually detects the special case - internally and invokes `concat`. Usually, it is not necessary to add a public - convenience method just for efficiency gains; there should also be a - _conceptual_ reason to add it, e.g. because it is such a common special case. - -It is tempting to add convenience methods in a one-off, haphazard way as -common use patterns emerge. Avoid this temptation, and instead _design_ small, -coherent sets of convenience methods that are easy to remember: - -* _Small_: Avoid combinatorial explosions of convenience methods. For example, - instead of adding `_str` variants of methods that provide a `str` output, - instead ensure that the normal output type of methods is easily convertible to - `str`. -* _Coherent_: Look for small groups of convenience methods that make sense to - include together. For example, the `Path` API mentioned above includes a small - selection of the most common filesystem operations that take a `Path` - argument. If one convenience method strongly suggests the existence of others, - consider adding the whole group. -* _Memorable_: It is not worth saving a few characters of typing if you have to - look up the name of a convenience method every time you use it. Add - convenience methods with names that are obvious and easy to remember, and add - them for the most common or painful use cases. diff --git a/src/doc/style/features/functions-and-methods/input.md b/src/doc/style/features/functions-and-methods/input.md deleted file mode 100644 index 9ea1d21816191..0000000000000 --- a/src/doc/style/features/functions-and-methods/input.md +++ /dev/null @@ -1,203 +0,0 @@ -% Input to functions and methods - -### Let the client decide when to copy and where to place data. [FIXME: needs RFC] - -#### Copying: - -Prefer - -```rust -fn foo(b: Bar) { - // use b as owned, directly -} -``` - -over - -```rust -fn foo(b: &Bar) { - let b = b.clone(); - // use b as owned after cloning -} -``` - -If a function requires ownership of a value of unknown type `T`, but does not -otherwise need to make copies, the function should take ownership of the -argument (pass by value `T`) rather than using `.clone()`. That way, the caller -can decide whether to relinquish ownership or to `clone`. - -Similarly, the `Copy` trait bound should only be demanded it when absolutely -needed, not as a way of signaling that copies should be cheap to make. - -#### Placement: - -Prefer - -```rust -fn foo(b: Bar) -> Bar { ... } -``` - -over - -```rust -fn foo(b: Box) -> Box { ... } -``` - -for concrete types `Bar` (as opposed to trait objects). This way, the caller can -decide whether to place data on the stack or heap. No overhead is imposed by -letting the caller determine the placement. - -### Minimize assumptions about parameters. [FIXME: needs RFC] - -The fewer assumptions a function makes about its inputs, the more widely usable -it becomes. - -#### Minimizing assumptions through generics: - -Prefer - -```rust -fn foo>(c: T) { ... } -``` - -over any of - -```rust -fn foo(c: &[i32]) { ... } -fn foo(c: &Vec) { ... } -fn foo(c: &SomeOtherCollection) { ... } -``` - -if the function only needs to iterate over the data. - -More generally, consider using generics to pinpoint the assumptions a function -needs to make about its arguments. - -On the other hand, generics can make it more difficult to read and understand a -function's signature. Aim for "natural" parameter types that a neither overly -concrete nor overly abstract. See the discussion on -[traits](../../traits/README.md) for more guidance. - - -#### Minimizing ownership assumptions: - -Prefer either of - -```rust -fn foo(b: &Bar) { ... } -fn foo(b: &mut Bar) { ... } -``` - -over - -```rust -fn foo(b: Bar) { ... } -``` - -That is, prefer borrowing arguments rather than transferring ownership, unless -ownership is actually needed. - -### Prefer compound return types to out-parameters. [FIXME: needs RFC] - -Prefer - -```rust -fn foo() -> (Bar, Bar) -``` - -over - -```rust -fn foo(output: &mut Bar) -> Bar -``` - -for returning multiple `Bar` values. - -Compound return types like tuples and structs are efficiently compiled -and do not require heap allocation. If a function needs to return -multiple values, it should do so via one of these types. - -The primary exception: sometimes a function is meant to modify data -that the caller already owns, for example to re-use a buffer: - -```rust -fn read(&mut self, buf: &mut [u8]) -> std::io::Result -``` - -(From the [Read trait](https://doc.rust-lang.org/stable/std/io/trait.Read.html#tymethod.read).) - -### Consider validating arguments, statically or dynamically. [FIXME: needs RFC] - -_Note: this material is closely related to - [library-level guarantees](../../safety/lib-guarantees.md)._ - -Rust APIs do _not_ generally follow the -[robustness principle](https://en.wikipedia.org/wiki/Robustness_principle): "be -conservative in what you send; be liberal in what you accept". - -Instead, Rust code should _enforce_ the validity of input whenever practical. - -Enforcement can be achieved through the following mechanisms (listed -in order of preference). - -#### Static enforcement: - -Choose an argument type that rules out bad inputs. - -For example, prefer - -```rust -enum FooMode { - Mode1, - Mode2, - Mode3, -} -fn foo(mode: FooMode) { ... } -``` - -over - -```rust -fn foo(mode2: bool, mode3: bool) { - assert!(!mode2 || !mode3); - ... -} -``` - -Static enforcement usually comes at little run-time cost: it pushes the -costs to the boundaries. It also catches bugs early, during compilation, -rather than through run-time failures. - -On the other hand, some properties are difficult or impossible to -express using types. - -#### Dynamic enforcement: - -Validate the input as it is processed (or ahead of time, if necessary). Dynamic -checking is often easier to implement than static checking, but has several -downsides: - -1. Runtime overhead (unless checking can be done as part of processing the input). -2. Delayed detection of bugs. -3. Introduces failure cases, either via `panic!` or `Result`/`Option` types (see - the [error handling guidelines](../../errors/README.md)), which must then be - dealt with by client code. - -#### Dynamic enforcement with `debug_assert!`: - -Same as dynamic enforcement, but with the possibility of easily turning off -expensive checks for production builds. - -#### Dynamic enforcement with opt-out: - -Same as dynamic enforcement, but adds sibling functions that opt out of the -checking. - -The convention is to mark these opt-out functions with a suffix like -`_unchecked` or by placing them in a `raw` submodule. - -The unchecked functions can be used judiciously in cases where (1) performance -dictates avoiding checks and (2) the client is otherwise confident that the -inputs are valid. - -> **[FIXME]** Should opt-out functions be marked `unsafe`? diff --git a/src/doc/style/features/functions-and-methods/output.md b/src/doc/style/features/functions-and-methods/output.md deleted file mode 100644 index 3e43d1e416d76..0000000000000 --- a/src/doc/style/features/functions-and-methods/output.md +++ /dev/null @@ -1,56 +0,0 @@ -% Output from functions and methods - -### Don't overpromise. [FIXME] - -> **[FIXME]** Add discussion of overly-specific return types, -> e.g. returning a compound iterator type rather than hiding it behind -> a use of newtype. - -### Let clients choose what to throw away. [FIXME: needs RFC] - -#### Return useful intermediate results: - -Many functions that answer a question also compute interesting related data. If -this data is potentially of interest to the client, consider exposing it in the -API. - -Prefer - -```rust -struct SearchResult { - found: bool, // item in container? - expected_index: usize // what would the item's index be? -} - -fn binary_search(&self, k: Key) -> SearchResult -``` -or - -```rust -fn binary_search(&self, k: Key) -> (bool, usize) -``` - -over - -```rust -fn binary_search(&self, k: Key) -> bool -``` - -#### Yield back ownership: - -Prefer - -```rust -fn from_utf8_owned(vv: Vec) -> Result> -``` - -over - -```rust -fn from_utf8_owned(vv: Vec) -> Option -``` - -The `from_utf8_owned` function gains ownership of a vector. In the successful -case, the function consumes its input, returning an owned string without -allocating or copying. In the unsuccessful case, however, the function returns -back ownership of the original slice. diff --git a/src/doc/style/features/let.md b/src/doc/style/features/let.md deleted file mode 100644 index 01dff3dcceaf1..0000000000000 --- a/src/doc/style/features/let.md +++ /dev/null @@ -1,103 +0,0 @@ -% Let binding - -### Always separately bind RAII guards. [FIXME: needs RFC] - -Prefer - -```rust -fn use_mutex(m: sync::mutex::Mutex) { - let guard = m.lock(); - do_work(guard); - drop(guard); // unlock the lock - // do other work -} -``` - -over - -```rust -fn use_mutex(m: sync::mutex::Mutex) { - do_work(m.lock()); - // do other work -} -``` - -As explained in the [RAII guide](../ownership/raii.md), RAII guards are values -that represent ownership of some resource and whose destructor releases the -resource. Because the lifetime of guards are significant, they should always be -explicitly `let`-bound to make the lifetime clear. Consider using an explicit -`drop` to release the resource early. - -### Prefer conditional expressions to deferred initialization. [FIXME: needs RFC] - -Prefer - -```rust -let foo = match bar { - Baz => 0, - Quux => 1 -}; -``` - -over - -```rust -let foo; -match bar { - Baz => { - foo = 0; - } - Quux => { - foo = 1; - } -} -``` - -unless the conditions for initialization are too complex to fit into a simple -conditional expression. - -### Use type annotations for clarification; prefer explicit generics when inference fails. [FIXME: needs RFC] - -Prefer - -```rust -let v = s.iter().map(|x| x * 2) - .collect::>(); -``` - -over - -```rust -let v: Vec<_> = s.iter().map(|x| x * 2) - .collect(); -``` - -When the type of a value might be unclear to the _reader_ of the code, consider -explicitly annotating it in a `let`. - -On the other hand, when the type is unclear to the _compiler_, prefer to specify -the type by explicit generics instantiation, which is usually more clear. - -### Shadowing [FIXME] - -> **[FIXME]** Repeatedly shadowing a binding is somewhat common in Rust code. We -> need to articulate a guideline on when it is appropriate/useful and when not. - -### Prefer immutable bindings. [FIXME: needs RFC] - -Use `mut` bindings to signal the span during which a value is mutated: - -```rust -let mut v = Vec::new(); -// push things onto v -let v = v; -// use v immutably henceforth -``` - -### Prefer to bind all `struct` or tuple fields. [FIXME: needs RFC] - -When consuming a `struct` or tuple via a `let`, bind all of the fields rather -than using `..` to elide the ones you don't need. The benefit is that when -fields are added, the compiler will pinpoint all of the places where that type -of value was consumed, which will often need to be adjusted to take the new -field properly into account. diff --git a/src/doc/style/features/loops.md b/src/doc/style/features/loops.md deleted file mode 100644 index b144825f98183..0000000000000 --- a/src/doc/style/features/loops.md +++ /dev/null @@ -1,13 +0,0 @@ -% Loops - -### Prefer `for` to `while`. [FIXME: needs RFC] - -A `for` loop is preferable to a `while` loop, unless the loop counts in a -non-uniform way (making it difficult to express using `for`). - -### Guidelines for `loop`. [FIXME] - -> **[FIXME]** When is `loop` recommended? Some possibilities: -> * For optimistic retry algorithms -> * For servers -> * To avoid mutating local variables sometimes needed to fit `while` diff --git a/src/doc/style/features/match.md b/src/doc/style/features/match.md deleted file mode 100644 index 131e0fad79a92..0000000000000 --- a/src/doc/style/features/match.md +++ /dev/null @@ -1,26 +0,0 @@ -% Pattern matching - -### Dereference `match` targets when possible. [FIXME: needs RFC] - -Prefer - -~~~~ -match *foo { - X(...) => ... - Y(...) => ... -} -~~~~ - -over - -~~~~ -match foo { - box X(...) => ... - box Y(...) => ... -} -~~~~ - - - - - diff --git a/src/doc/style/features/modules.md b/src/doc/style/features/modules.md deleted file mode 100644 index c55b38b915b3d..0000000000000 --- a/src/doc/style/features/modules.md +++ /dev/null @@ -1,133 +0,0 @@ -% Modules - -> **[FIXME]** What general guidelines should we provide for module design? - -> We should discuss visibility, nesting, `mod.rs`, and any interesting patterns -> around modules. - -### Headers [FIXME: needs RFC] - -Organize module headers as follows: - 1. [Imports](../style/imports.md). - 1. `mod` declarations. - 1. `pub mod` declarations. - -### Avoid `path` directives. [FIXME: needs RFC] - -Avoid using `#[path="..."]` directives; make the file system and -module hierarchy match, instead. - -### Use the module hierarchy to organize APIs into coherent sections. [FIXME] - -> **[FIXME]** Flesh this out with examples; explain what a "coherent -> section" is with examples. -> -> The module hierarchy defines both the public and internal API of your module. -> Breaking related functionality into submodules makes it understandable to both -> users and contributors to the module. - -### Place modules in their own file. [FIXME: needs RFC] - -> **[FIXME]** -> - "<100 lines" is arbitrary, but it's a clearer recommendation -> than "~1 page" or similar suggestions that vary by screen size, etc. - -For all except very short modules (<100 lines) and [tests](../testing/README.md), -place the module `foo` in a separate file, as in: - -```rust -pub mod foo; - -// in foo.rs or foo/mod.rs -pub fn bar() { println!("..."); } -/* ... */ -``` - -rather than declaring it inline: - -```rust -pub mod foo { - pub fn bar() { println!("..."); } - /* ... */ -} -``` - -#### Use subdirectories for modules with children. [FIXME: needs RFC] - -For modules that themselves have submodules, place the module in a separate -directory (e.g., `bar/mod.rs` for a module `bar`) rather than the same directory. - -Note the structure of -[`std::io`](https://doc.rust-lang.org/std/io/). Many of the submodules lack -children, like -[`io::fs`](https://doc.rust-lang.org/std/io/fs/) -and -[`io::stdio`](https://doc.rust-lang.org/std/io/stdio/). -On the other hand, -[`io::net`](https://doc.rust-lang.org/std/io/net/) -contains submodules, so it lives in a separate directory: - -``` -io/mod.rs - io/extensions.rs - io/fs.rs - io/net/mod.rs - io/net/addrinfo.rs - io/net/ip.rs - io/net/tcp.rs - io/net/udp.rs - io/net/unix.rs - io/pipe.rs - ... -``` - -While it is possible to define all of `io` within a single directory, -mirroring the module hierarchy in the directory structure makes -submodules of `io::net` easier to find. - -### Consider top-level definitions or reexports. [FIXME: needs RFC] - -For modules with submodules, -define or [reexport](https://doc.rust-lang.org/std/io/#reexports) commonly used -definitions at the top level: - -* Functionality relevant to the module itself or to many of its - children should be defined in `mod.rs`. -* Functionality specific to a submodule should live in that - submodule. Reexport at the top level for the most important or - common definitions. - -For example, -[`IoError`](https://doc.rust-lang.org/std/io/struct.IoError.html) -is defined in `io/mod.rs`, since it pertains to the entirety of `io`, -while -[`TcpStream`](https://doc.rust-lang.org/std/io/net/tcp/struct.TcpStream.html) -is defined in `io/net/tcp.rs` and reexported in the `io` module. - -### Use internal module hierarchies for organization. [FIXME: needs RFC] - -> **[FIXME]** -> - Referencing internal modules from the standard library is subject to -> becoming outdated. - -Internal module hierarchies (i.e., private submodules) may be used to -hide implementation details that are not part of the module's API. - -For example, in [`std::io`](https://doc.rust-lang.org/std/io/), `mod mem` -provides implementations for -[`BufReader`](https://doc.rust-lang.org/std/io/struct.BufReader.html) -and -[`BufWriter`](https://doc.rust-lang.org/std/io/struct.BufWriter.html), -but these are re-exported in `io/mod.rs` at the top level of the module: - -```rust -// libstd/io/mod.rs - -pub use self::mem::{MemReader, BufReader, MemWriter, BufWriter}; -/* ... */ -mod mem; -``` - -This hides the detail that there even exists a `mod mem` in `io`, and -helps keep code organized while offering freedom to change the -implementation. diff --git a/src/doc/style/features/traits/README.md b/src/doc/style/features/traits/README.md deleted file mode 100644 index 1893db24466fa..0000000000000 --- a/src/doc/style/features/traits/README.md +++ /dev/null @@ -1,22 +0,0 @@ -% Traits - -Traits are probably Rust's most complex feature, supporting a wide range of use -cases and design tradeoffs. Patterns of trait usage are still emerging. - -### Know whether a trait will be used as an object. [FIXME: needs RFC] - -Trait objects have some [significant limitations](objects.md): methods -invoked through a trait object cannot use generics, and cannot use -`Self` except in receiver position. - -When designing a trait, decide early on whether the trait will be used -as an [object](objects.md) or as a [bound on generics](generics.md); -the tradeoffs are discussed in each of the linked sections. - -If a trait is meant to be used as an object, its methods should take -and return trait objects rather than use generics. - - -### Default methods [FIXME] - -> **[FIXME]** Guidelines for default methods. diff --git a/src/doc/style/features/traits/common.md b/src/doc/style/features/traits/common.md deleted file mode 100644 index 18346c092547f..0000000000000 --- a/src/doc/style/features/traits/common.md +++ /dev/null @@ -1,71 +0,0 @@ -% Common traits - -### Eagerly implement common traits. [FIXME: needs RFC] - -Rust's trait system does not allow _orphans_: roughly, every `impl` must live -either in the crate that defines the trait or the implementing -type. Consequently, crates that define new types should eagerly implement all -applicable, common traits. - -To see why, consider the following situation: - -* Crate `std` defines trait `Debug`. -* Crate `url` defines type `Url`, without implementing `Debug`. -* Crate `webapp` imports from both `std` and `url`, - -There is no way for `webapp` to add `Debug` to `url`, since it defines neither. -(Note: the newtype pattern can provide an efficient, but inconvenient -workaround; see [newtype for views](../types/newtype.md)) - -The most important common traits to implement from `std` are: - -```rust -Clone, Debug, Hash, Eq -``` - -#### When safe, derive or otherwise implement `Send` and `Share`. [FIXME] - -> **[FIXME]**. This guideline is in flux while the "opt-in" nature of -> built-in traits is being decided. See https://github.com/rust-lang/rfcs/pull/127 - -### Prefer to derive, rather than implement. [FIXME: needs RFC] - -Deriving saves implementation effort, makes correctness trivial, and -automatically adapts to upstream changes. - -### Do not overload operators in surprising ways. [FIXME: needs RFC] - -Operators with built in syntax (`*`, `|`, and so on) can be provided for a type -by implementing the traits in `core::ops`. These operators come with strong -expectations: implement `Mul` only for an operation that bears some resemblance -to multiplication (and shares the expected properties, e.g. associativity), and -so on for the other traits. - -### The `Drop` trait - -The `Drop` trait is treated specially by the compiler as a way of -associating destructors with types. See -[the section on destructors](../../ownership/destructors.md) for -guidance. - -### The `Deref`/`DerefMut` traits - -#### Use `Deref`/`DerefMut` only for smart pointers. [FIXME: needs RFC] - -The `Deref` traits are used implicitly by the compiler in many circumstances, -and interact with method resolution. The relevant rules are designed -specifically to accommodate smart pointers, and so the traits should be used -only for that purpose. - -#### Do not fail within a `Deref`/`DerefMut` implementation. [FIXME: needs RFC] - -Because the `Deref` traits are invoked implicitly by the compiler in sometimes -subtle ways, failure during dereferencing can be extremely confusing. If a -dereference might not succeed, target the `Deref` trait as a `Result` or -`Option` type instead. - -#### Avoid inherent methods when implementing `Deref`/`DerefMut` [FIXME: needs RFC] - -The rules around method resolution and `Deref` are in flux, but inherent methods -on a type implementing `Deref` are likely to shadow any methods of the referent -with the same name. diff --git a/src/doc/style/features/traits/extensions.md b/src/doc/style/features/traits/extensions.md deleted file mode 100644 index fc3a03c01f5a1..0000000000000 --- a/src/doc/style/features/traits/extensions.md +++ /dev/null @@ -1,7 +0,0 @@ -% Using traits to add extension methods - -> **[FIXME]** Elaborate. - -### Consider using default methods rather than extension traits **[FIXME]** - -> **[FIXME]** Elaborate. diff --git a/src/doc/style/features/traits/generics.md b/src/doc/style/features/traits/generics.md deleted file mode 100644 index 26ffda50ac53d..0000000000000 --- a/src/doc/style/features/traits/generics.md +++ /dev/null @@ -1,68 +0,0 @@ -% Using traits for bounds on generics - -The most widespread use of traits is for writing generic functions or types. For -example, the following signature describes a function for consuming any iterator -yielding items of type `A` to produce a collection of `A`: - -```rust -fn from_iter>(iterator: T) -> SomeCollection -``` - -Here, the `Iterator` trait specifies an interface that a type `T` must -explicitly implement to be used by this generic function. - -**Pros**: - -* _Reusability_. Generic functions can be applied to an open-ended collection of - types, while giving a clear contract for the functionality those types must - provide. -* _Static dispatch and optimization_. Each use of a generic function is - specialized ("monomorphized") to the particular types implementing the trait - bounds, which means that (1) invocations of trait methods are static, direct - calls to the implementation and (2) the compiler can inline and otherwise - optimize these calls. -* _Inline layout_. If a `struct` and `enum` type is generic over some type - parameter `T`, values of type `T` will be laid out _inline_ in the - `struct`/`enum`, without any indirection. -* _Inference_. Since the type parameters to generic functions can usually be - inferred, generic functions can help cut down on verbosity in code where - explicit conversions or other method calls would usually be necessary. See the - [overloading/implicits use case](#use-case-limited-overloading-andor-implicit-conversions) - below. -* _Precise types_. Because generics give a _name_ to the specific type - implementing a trait, it is possible to be precise about places where that - exact type is required or produced. For example, a function - - ```rust - fn binary(x: T, y: T) -> T - ``` - - is guaranteed to consume and produce elements of exactly the same type `T`; it - cannot be invoked with parameters of different types that both implement - `Trait`. - -**Cons**: - -* _Code size_. Specializing generic functions means that the function body is - duplicated. The increase in code size must be weighed against the performance - benefits of static dispatch. -* _Homogeneous types_. This is the other side of the "precise types" coin: if - `T` is a type parameter, it stands for a _single_ actual type. So for example - a `Vec` contains elements of a single concrete type (and, indeed, the - vector representation is specialized to lay these out in line). Sometimes - heterogeneous collections are useful; see - [trait objects](#use-case-trait-objects) below. -* _Signature verbosity_. Heavy use of generics can bloat function signatures. - **[Ed. note]** This problem may be mitigated by some language improvements; stay tuned. - -### Favor widespread traits. **[FIXME: needs RFC]** - -Generic types are a form of abstraction, which entails a mental indirection: if -a function takes an argument of type `T` bounded by `Trait`, clients must first -think about the concrete types that implement `Trait` to understand how and when -the function is callable. - -To keep the cost of abstraction low, favor widely-known traits. Whenever -possible, implement and use traits provided as part of the standard library. Do -not introduce new traits for generics lightly; wait until there are a wide range -of types that can implement the type. diff --git a/src/doc/style/features/traits/objects.md b/src/doc/style/features/traits/objects.md deleted file mode 100644 index 38494a9b9bc3c..0000000000000 --- a/src/doc/style/features/traits/objects.md +++ /dev/null @@ -1,49 +0,0 @@ -% Using trait objects - -> **[FIXME]** What are uses of trait objects other than heterogeneous collections? - -Trait objects are useful primarily when _heterogeneous_ collections of objects -need to be treated uniformly; it is the closest that Rust comes to -object-oriented programming. - -```rust -struct Frame { ... } -struct Button { ... } -struct Label { ... } - -trait Widget { ... } - -impl Widget for Frame { ... } -impl Widget for Button { ... } -impl Widget for Label { ... } - -impl Frame { - fn new(contents: &[Box]) -> Frame { - ... - } -} - -fn make_gui() -> Box { - let b: Box = box Button::new(...); - let l: Box = box Label::new(...); - - box Frame::new([b, l]) as Box -} -``` - -By using trait objects, we can set up a GUI framework with a `Frame` widget that -contains a heterogeneous collection of children widgets. - -**Pros**: - -* _Heterogeneity_. When you need it, you really need it. -* _Code size_. Unlike generics, trait objects do not generate specialized - (monomorphized) versions of code, which can greatly reduce code size. - -**Cons**: - -* _No generic methods_. Trait objects cannot currently provide generic methods. -* _Dynamic dispatch and fat pointers_. Trait objects inherently involve - indirection and vtable dispatch, which can carry a performance penalty. -* _No Self_. Except for the method receiver argument, methods on trait objects - cannot use the `Self` type. diff --git a/src/doc/style/features/traits/overloading.md b/src/doc/style/features/traits/overloading.md deleted file mode 100644 index d7482c9619072..0000000000000 --- a/src/doc/style/features/traits/overloading.md +++ /dev/null @@ -1,7 +0,0 @@ -% Using traits for overloading - -> **[FIXME]** Elaborate. - -> **[FIXME]** We need to decide on guidelines for this use case. There are a few -> patterns emerging in current Rust code, but it's not clear how widespread they -> should be. diff --git a/src/doc/style/features/traits/reuse.md b/src/doc/style/features/traits/reuse.md deleted file mode 100644 index feedd3937fc9d..0000000000000 --- a/src/doc/style/features/traits/reuse.md +++ /dev/null @@ -1,30 +0,0 @@ -% Using traits to share implementations - -> **[FIXME]** Elaborate. - -> **[FIXME]** We probably want to discourage this, at least when used in a way -> that is publicly exposed. - -Traits that provide default implementations for function can provide code reuse -across types. For example, a `print` method can be defined across multiple -types as follows: - -``` Rust -trait Printable { - // Default method implementation - fn print(&self) { println!("{:?}", *self) } -} - -impl Printable for i32 {} - -impl Printable for String { - fn print(&self) { println!("{}", *self) } -} - -impl Printable for bool {} - -impl Printable for f32 {} -``` - -This allows the implementation of `print` to be shared across types, yet -overridden where needed, as seen in the `impl` for `String`. diff --git a/src/doc/style/features/types/README.md b/src/doc/style/features/types/README.md deleted file mode 100644 index c675eb581c6ae..0000000000000 --- a/src/doc/style/features/types/README.md +++ /dev/null @@ -1,68 +0,0 @@ -% Data types - -### Use custom types to imbue meaning; do not abuse `bool`, `Option` or other core types. **[FIXME: needs RFC]** - -Prefer - -```rust -let w = Widget::new(Small, Round) -``` - -over - -```rust -let w = Widget::new(true, false) -``` - -Core types like `bool`, `u8` and `Option` have many possible interpretations. - -Use custom types (whether `enum`s, `struct`, or tuples) to convey -interpretation and invariants. In the above example, -it is not immediately clear what `true` and `false` are conveying without -looking up the argument names, but `Small` and `Round` are more suggestive. - -Using custom types makes it easier to expand the -options later on, for example by adding an `ExtraLarge` variant. - -See [the newtype pattern](newtype.md) for a no-cost way to wrap -existing types with a distinguished name. - -### Prefer private fields, except for passive data. **[FIXME: needs RFC]** - -Making a field public is a strong commitment: it pins down a representation -choice, _and_ prevents the type from providing any validation or maintaining any -invariants on the contents of the field, since clients can mutate it arbitrarily. - -Public fields are most appropriate for `struct` types in the C spirit: compound, -passive data structures. Otherwise, consider providing getter/setter methods -and hiding fields instead. - -> **[FIXME]** Cross-reference validation for function arguments. - -### Use custom `enum`s for alternatives, `bitflags` for C-style flags. **[FIXME: needs RFC]** - -Rust supports `enum` types with "custom discriminants": - -~~~~ -enum Color { - Red = 0xff0000, - Green = 0x00ff00, - Blue = 0x0000ff -} -~~~~ - -Custom discriminants are useful when an `enum` type needs to be serialized to an -integer value compatibly with some other system/language. They support -"typesafe" APIs: by taking a `Color`, rather than an integer, a function is -guaranteed to get well-formed inputs, even if it later views those inputs as -integers. - -An `enum` allows an API to request exactly one choice from among many. Sometimes -an API's input is instead the presence or absence of a set of flags. In C code, -this is often done by having each flag correspond to a particular bit, allowing -a single integer to represent, say, 32 or 64 flags. Rust's `std::bitflags` -module provides a typesafe way for doing so. - -### Phantom types. [FIXME] - -> **[FIXME]** Add some material on phantom types (https://blog.mozilla.org/research/2014/06/23/static-checking-of-units-in-servo/) diff --git a/src/doc/style/features/types/conversions.md b/src/doc/style/features/types/conversions.md deleted file mode 100644 index f0f230f57e557..0000000000000 --- a/src/doc/style/features/types/conversions.md +++ /dev/null @@ -1,22 +0,0 @@ -% Conversions between types - -### Associate conversions with the most specific type involved. **[FIXME: needs RFC]** - -When in doubt, prefer `to_`/`as_`/`into_` to `from_`, because they are -more ergonomic to use (and can be chained with other methods). - -For many conversions between two types, one of the types is clearly more -"specific": it provides some additional invariant or interpretation that is not -present in the other type. For example, `str` is more specific than `&[u8]`, -since it is a utf-8 encoded sequence of bytes. - -Conversions should live with the more specific of the involved types. Thus, -`str` provides both the `as_bytes` method and the `from_utf8` constructor for -converting to and from `&[u8]` values. Besides being intuitive, this convention -avoids polluting concrete types like `&[u8]` with endless conversion methods. - -### Explicitly mark lossy conversions, or do not label them as conversions. **[FIXME: needs RFC]** - -If a function's name implies that it is a conversion (prefix `from_`, `as_`, -`to_` or `into_`), but the function loses information, add a suffix `_lossy` or -otherwise indicate the lossyness. Consider avoiding the conversion name prefix. diff --git a/src/doc/style/features/types/newtype.md b/src/doc/style/features/types/newtype.md deleted file mode 100644 index e69aa3b83bfa4..0000000000000 --- a/src/doc/style/features/types/newtype.md +++ /dev/null @@ -1,69 +0,0 @@ -% The newtype pattern - -A "newtype" is a tuple or `struct` with a single field. The terminology is borrowed from Haskell. - -Newtypes are a zero-cost abstraction: they introduce a new, distinct name for an -existing type, with no runtime overhead when converting between the two types. - -### Use newtypes to provide static distinctions. [FIXME: needs RFC] - -Newtypes can statically distinguish between different interpretations of an -underlying type. - -For example, a `f64` value might be used to represent a quantity in miles or in -kilometers. Using newtypes, we can keep track of the intended interpretation: - -```rust -struct Miles(pub f64); -struct Kilometers(pub f64); - -impl Miles { - fn as_kilometers(&self) -> Kilometers { ... } -} -impl Kilometers { - fn as_miles(&self) -> Miles { ... } -} -``` - -Once we have separated these two types, we can statically ensure that we do not -confuse them. For example, the function - -```rust -fn are_we_there_yet(distance_travelled: Miles) -> bool { ... } -``` - -cannot accidentally be called with a `Kilometers` value. The compiler will -remind us to perform the conversion, thus averting certain -[catastrophic bugs](http://en.wikipedia.org/wiki/Mars_Climate_Orbiter). - -### Use newtypes with private fields for hiding. [FIXME: needs RFC] - -A newtype can be used to hide representation details while making precise -promises to the client. - -For example, consider a function `my_transform` that returns a compound iterator -type `Enumerate>>`. We wish to hide this type from the -client, so that the client's view of the return type is roughly `Iterator<(usize, -T)>`. We can do so using the newtype pattern: - -```rust -struct MyTransformResult(Enumerate>>); -impl Iterator<(usize, T)> for MyTransformResult { ... } - -fn my_transform>(iter: Iter) -> MyTransformResult { - ... -} -``` - -Aside from simplifying the signature, this use of newtypes allows us to make a -expose and promise less to the client. The client does not know _how_ the result -iterator is constructed or represented, which means the representation can -change in the future without breaking client code. - -> **[FIXME]** Interaction with auto-deref. - -### Use newtypes to provide cost-free _views_ of another type. **[FIXME]** - -> **[FIXME]** Describe the pattern of using newtypes to provide a new set of -> inherent or trait methods, providing a different perspective on the underlying -> type. diff --git a/src/doc/style/ownership/README.md b/src/doc/style/ownership/README.md deleted file mode 100644 index 11bdb03a3a818..0000000000000 --- a/src/doc/style/ownership/README.md +++ /dev/null @@ -1,3 +0,0 @@ -% Ownership and resource management - -> **[FIXME]** Add general remarks about ownership/resources here. diff --git a/src/doc/style/ownership/builders.md b/src/doc/style/ownership/builders.md deleted file mode 100644 index 9fc640890fe8a..0000000000000 --- a/src/doc/style/ownership/builders.md +++ /dev/null @@ -1,176 +0,0 @@ -% The builder pattern - -Some data structures are complicated to construct, due to their construction needing: - -* a large number of inputs -* compound data (e.g. slices) -* optional configuration data -* choice between several flavors - -which can easily lead to a large number of distinct constructors with -many arguments each. - -If `T` is such a data structure, consider introducing a `T` _builder_: - -1. Introduce a separate data type `TBuilder` for incrementally configuring a `T` - value. When possible, choose a better name: e.g. `Command` is the builder for - `Process`. -2. The builder constructor should take as parameters only the data _required_ to - make a `T`. -3. The builder should offer a suite of convenient methods for configuration, - including setting up compound inputs (like slices) incrementally. - These methods should return `self` to allow chaining. -4. The builder should provide one or more "_terminal_" methods for actually building a `T`. - -The builder pattern is especially appropriate when building a `T` involves side -effects, such as spawning a thread or launching a process. - -In Rust, there are two variants of the builder pattern, differing in the -treatment of ownership, as described below. - -### Non-consuming builders (preferred): - -In some cases, constructing the final `T` does not require the builder itself to -be consumed. The follow variant on -[`std::process::Command`](https://doc.rust-lang.org/stable/std/process/struct.Command.html) -is one example: - -```rust -// NOTE: the actual Command API does not use owned Strings; -// this is a simplified version. - -pub struct Command { - program: String, - args: Vec, - cwd: Option, - // etc -} - -impl Command { - pub fn new(program: String) -> Command { - Command { - program: program, - args: Vec::new(), - cwd: None, - } - } - - /// Add an argument to pass to the program. - pub fn arg<'a>(&'a mut self, arg: String) -> &'a mut Command { - self.args.push(arg); - self - } - - /// Add multiple arguments to pass to the program. - pub fn args<'a>(&'a mut self, args: &[String]) - -> &'a mut Command { - self.args.push_all(args); - self - } - - /// Set the working directory for the child process. - pub fn cwd<'a>(&'a mut self, dir: String) -> &'a mut Command { - self.cwd = Some(dir); - self - } - - /// Executes the command as a child process, which is returned. - pub fn spawn(&self) -> std::io::Result { - ... - } -} -``` - -Note that the `spawn` method, which actually uses the builder configuration to -spawn a process, takes the builder by immutable reference. This is possible -because spawning the process does not require ownership of the configuration -data. - -Because the terminal `spawn` method only needs a reference, the configuration -methods take and return a mutable borrow of `self`. - -#### The benefit - -By using borrows throughout, `Command` can be used conveniently for both -one-liner and more complex constructions: - -```rust -// One-liners -Command::new("/bin/cat").arg("file.txt").spawn(); - -// Complex configuration -let mut cmd = Command::new("/bin/ls"); -cmd.arg("."); - -if size_sorted { - cmd.arg("-S"); -} - -cmd.spawn(); -``` - -### Consuming builders: - -Sometimes builders must transfer ownership when constructing the final type -`T`, meaning that the terminal methods must take `self` rather than `&self`: - -```rust -// A simplified excerpt from std::thread::Builder - -impl ThreadBuilder { - /// Name the thread-to-be. Currently the name is used for identification - /// only in failure messages. - pub fn named(mut self, name: String) -> ThreadBuilder { - self.name = Some(name); - self - } - - /// Redirect thread-local stdout. - pub fn stdout(mut self, stdout: Box) -> ThreadBuilder { - self.stdout = Some(stdout); - // ^~~~~~ this is owned and cannot be cloned/re-used - self - } - - /// Creates and executes a new child thread. - pub fn spawn(self, f: proc():Send) { - // consume self - ... - } -} -``` - -Here, the `stdout` configuration involves passing ownership of a `Writer`, -which must be transferred to the thread upon construction (in `spawn`). - -When the terminal methods of the builder require ownership, there is a basic tradeoff: - -* If the other builder methods take/return a mutable borrow, the complex - configuration case will work well, but one-liner configuration becomes - _impossible_. - -* If the other builder methods take/return an owned `self`, one-liners - continue to work well but complex configuration is less convenient. - -Under the rubric of making easy things easy and hard things possible, _all_ -builder methods for a consuming builder should take and returned an owned -`self`. Then client code works as follows: - -```rust -// One-liners -ThreadBuilder::new().named("my_thread").spawn(proc() { ... }); - -// Complex configuration -let mut thread = ThreadBuilder::new(); -thread = thread.named("my_thread_2"); // must re-assign to retain ownership - -if reroute { - thread = thread.stdout(mywriter); -} - -thread.spawn(proc() { ... }); -``` - -One-liners work as before, because ownership is threaded through each of the -builder methods until being consumed by `spawn`. Complex configuration, -however, is more verbose: it requires re-assigning the builder at each step. diff --git a/src/doc/style/ownership/cell-smart.md b/src/doc/style/ownership/cell-smart.md deleted file mode 100644 index cd027cc4aaffc..0000000000000 --- a/src/doc/style/ownership/cell-smart.md +++ /dev/null @@ -1,4 +0,0 @@ -% Cells and smart pointers - -> **[FIXME]** Add guidelines about when to use Cell, RefCell, Rc and -> Arc (and how to use them together). diff --git a/src/doc/style/ownership/constructors.md b/src/doc/style/ownership/constructors.md deleted file mode 100644 index b4a1147315679..0000000000000 --- a/src/doc/style/ownership/constructors.md +++ /dev/null @@ -1,62 +0,0 @@ -% Constructors - -### Define constructors as static, inherent methods. [FIXME: needs RFC] - -In Rust, "constructors" are just a convention: - -```rust -impl Vec { - pub fn new() -> Vec { ... } -} -``` - -Constructors are static (no `self`) inherent methods for the type that they -construct. Combined with the practice of -[fully importing type names](../style/imports.md), this convention leads to -informative but concise construction: - -```rust -use vec::Vec; - -// construct a new vector -let mut v = Vec::new(); -``` - -This convention also applied to conversion constructors (prefix `from` rather -than `new`). - -### Provide constructors for passive `struct`s with defaults. [FIXME: needs RFC] - -Given the `struct` - -```rust -pub struct Config { - pub color: Color, - pub size: Size, - pub shape: Shape, -} -``` - -provide a constructor if there are sensible defaults: - -```rust -impl Config { - pub fn new() -> Config { - Config { - color: Brown, - size: Medium, - shape: Square, - } - } -} -``` - -which then allows clients to concisely override using `struct` update syntax: - -```rust -Config { color: Red, .. Config::new() }; -``` - -See the [guideline for field privacy](../features/types/README.md) for -discussion on when to create such "passive" `struct`s with public -fields. diff --git a/src/doc/style/ownership/destructors.md b/src/doc/style/ownership/destructors.md deleted file mode 100644 index 1cfcd78d20da8..0000000000000 --- a/src/doc/style/ownership/destructors.md +++ /dev/null @@ -1,22 +0,0 @@ -% Destructors - -Unlike constructors, destructors in Rust have a special status: they are added -by implementing `Drop` for a type, and they are automatically invoked as values -go out of scope. - -> **[FIXME]** This section needs to be expanded. - -### Destructors should not fail. [FIXME: needs RFC] - -Destructors are executed on thread failure, and in that context a failing -destructor causes the program to abort. - -Instead of failing in a destructor, provide a separate method for checking for -clean teardown, e.g. a `close` method, that returns a `Result` to signal -problems. - -### Destructors should not block. [FIXME: needs RFC] - -Similarly, destructors should not invoke blocking operations, which can make -debugging much more difficult. Again, consider providing a separate method for -preparing for an infallible, nonblocking teardown. diff --git a/src/doc/style/ownership/raii.md b/src/doc/style/ownership/raii.md deleted file mode 100644 index 244e8096a1a2f..0000000000000 --- a/src/doc/style/ownership/raii.md +++ /dev/null @@ -1,12 +0,0 @@ -% RAII - -Resource Acquisition is Initialization - -> **[FIXME]** Explain the RAII pattern and give best practices. - -### Whenever possible, tie resource access to guard scopes [FIXME] - -> **[FIXME]** Example: Mutex guards guarantee that access to the -> protected resource only happens when the guard is in scope. - -`must_use` diff --git a/src/doc/style/platform.md b/src/doc/style/platform.md deleted file mode 100644 index d29d060b69461..0000000000000 --- a/src/doc/style/platform.md +++ /dev/null @@ -1,7 +0,0 @@ -% FFI and platform-specific code **[FIXME]** - -> **[FIXME]** Not sure where this should live. - -When writing cross-platform code, group platform-specific code into a -module called `platform`. Avoid `#[cfg]` directives outside this -`platform` module. diff --git a/src/doc/style/safety/README.md b/src/doc/style/safety/README.md deleted file mode 100644 index 1ac6e704d23eb..0000000000000 --- a/src/doc/style/safety/README.md +++ /dev/null @@ -1,19 +0,0 @@ -% Safety and guarantees - -> **[FIXME]** Is there a better phrase than "strong guarantees" that encompasses -> both e.g. memory safety and e.g. data structure invariants? - -A _guarantee_ is a property that holds no matter what client code does, unless -the client explicitly opts out: - -* Rust guarantees memory safety and data-race freedom, with `unsafe` - blocks as an opt-out mechanism. - -* APIs in Rust often provide their own guarantees. For example, `std::str` -guarantees that its underlying buffer is valid utf-8. The `std::path::Path` type -guarantees no interior nulls. Both strings and paths provide `unsafe` mechanisms -for opting out of these guarantees (and thereby avoiding runtime checks). - -Thinking about guarantees is an essential part of writing good Rust code. The -rest of this subsection outlines some cross-cutting principles around -guarantees. diff --git a/src/doc/style/safety/lib-guarantees.md b/src/doc/style/safety/lib-guarantees.md deleted file mode 100644 index 8ee64f1806a69..0000000000000 --- a/src/doc/style/safety/lib-guarantees.md +++ /dev/null @@ -1,81 +0,0 @@ -% Library-level guarantees - -Most libraries rely on internal invariants, e.g. about their data, resource -ownership, or protocol states. In Rust, broken invariants cannot produce -segfaults, but they can still lead to wrong answers. - -### Provide library-level guarantees whenever practical. **[FIXME: needs RFC]** - -Library-level invariants should be turned into guarantees whenever -practical. They should hold no matter what the client does, modulo -explicit opt-outs. Depending on the kind of invariant, this can be -achieved through a combination of static and dynamic enforcement, as -described below. - -#### Static enforcement: - -Guaranteeing invariants almost always requires _hiding_, -i.e. preventing the client from directly accessing or modifying -internal data. - -For example, the representation of the `str` type is hidden, -which means that any value of type `str` must have been produced -through an API under the control of the `str` module, and these -APIs in turn ensure valid utf-8 encoding. - -Rust's type system makes it possible to provide guarantees even while -revealing more of the representation than usual. For example, the -`as_bytes()` method on `&str` gives a _read-only_ view into the -underlying buffer, which cannot be used to violate the utf-8 property. - -#### Dynamic enforcement: - -Malformed inputs from the client are hazards to library-level -guarantees, so library APIs should validate their input. - -For example, `std::str::from_utf8_owned` attempts to convert a `u8` -slice into an owned string, but dynamically checks that the slice is -valid utf-8 and returns `Err` if not. - -See -[the discussion on input validation](../features/functions-and-methods/input.md) -for more detail. - - -### Prefer static enforcement of guarantees. **[FIXME: needs RFC]** - -Static enforcement provides two strong benefits over dynamic enforcement: - -* Bugs are caught at compile time. -* There is no runtime cost. - -Sometimes purely static enforcement is impossible or impractical. In these -cases, a library should check as much as possible statically, but defer to -dynamic checks where needed. - -For example, the `std::string` module exports a `String` type with the guarantee -that all instances are valid utf-8: - -* Any _consumer_ of a `String` is statically guaranteed utf-8 contents. For example, - the `append` method can push a `&str` onto the end of a `String` without - checking anything dynamically, since the existing `String` and `&str` are - statically guaranteed to be in utf-8. - -* Some _producers_ of a `String` must perform dynamic checks. For example, the - `from_utf8` function attempts to convert a `Vec` into a `String`, but - dynamically checks that the contents are utf-8. - -### Provide opt-outs with caution; make them explicit. **[FIXME: needs RFC]** - -Providing library-level guarantees sometimes entails inconvenience (for static -checks) or overhead (for dynamic checks). So it is sometimes desirable to allow -clients to sidestep this checking, while promising to use the API in a way that -still provides the guarantee. Such escape hatches should only be introduced when -there is a demonstrated need for them. - -It should be trivial for clients to audit their use of the library for -escape hatches. - -See -[the discussion on input validation](../features/functions-and-methods/input.md) -for conventions on marking opt-out functions. diff --git a/src/doc/style/safety/unsafe.md b/src/doc/style/safety/unsafe.md deleted file mode 100644 index a8a50af044c29..0000000000000 --- a/src/doc/style/safety/unsafe.md +++ /dev/null @@ -1,22 +0,0 @@ -% Using `unsafe` - -### Unconditionally guarantee safety, or mark API as `unsafe`. **[FIXME: needs RFC]** - -Memory safety, type safety, and data race freedom are basic assumptions for all -Rust code. - -APIs that use `unsafe` blocks internally thus have two choices: - -* They can guarantee safety _unconditionally_ (i.e., regardless of client - behavior or inputs) and be exported as safe code. Any safety violation is then - the library's fault, not the client's fault. - -* They can export potentially unsafe functions with the `unsafe` qualifier. In - this case, the documentation should make very clear the conditions under which - safety is guaranteed. - -The result is that a client program can never violate safety merely by having a -bug; it must have explicitly opted out by using an `unsafe` block. - -Of the two options for using `unsafe`, creating such safe abstractions (the -first option above) is strongly preferred. diff --git a/src/doc/style/style/README.md b/src/doc/style/style/README.md deleted file mode 100644 index 87449710543c0..0000000000000 --- a/src/doc/style/style/README.md +++ /dev/null @@ -1,5 +0,0 @@ -% Style - -This section gives a set of strict rules for styling Rust code. - -> **[FIXME]** General remarks about the style guidelines diff --git a/src/doc/style/style/braces.md b/src/doc/style/style/braces.md deleted file mode 100644 index 0f61bac9fd229..0000000000000 --- a/src/doc/style/style/braces.md +++ /dev/null @@ -1,77 +0,0 @@ -% Braces, semicolons, and commas [FIXME: needs RFC] - -### Opening braces always go on the same line. - -``` rust -fn foo() { - ... -} - -fn frobnicate(a: Bar, b: Bar, - c: Bar, d: Bar) - -> Bar { - ... -} - -trait Bar { - fn baz(&self); -} - -impl Bar for Baz { - fn baz(&self) { - ... - } -} - -frob(|x| { - x.transpose() -}) -``` - -### `match` arms get braces, except for single-line expressions. - -``` rust -match foo { - bar => baz, - quux => { - do_something(); - do_something_else() - } -} -``` - -### `return` statements get semicolons. - -``` rust -fn foo() { - do_something(); - - if condition() { - return; - } - - do_something_else(); -} -``` - -### Trailing commas - -> **[FIXME]** We should have a guideline for when to include trailing -> commas in `struct`s, `match`es, function calls, etc. -> -> One possible rule: a trailing comma should be included whenever the -> closing delimiter appears on a separate line: - -```rust -Foo { bar: 0, baz: 1 } - -Foo { - bar: 0, - baz: 1, -} - -match a_thing { - None => 0, - Some(x) => 1, -} -``` diff --git a/src/doc/style/style/comments.md b/src/doc/style/style/comments.md deleted file mode 100644 index 3851187b52034..0000000000000 --- a/src/doc/style/style/comments.md +++ /dev/null @@ -1,104 +0,0 @@ -% Comments [FIXME: needs RFC] - -### Avoid block comments. - -Use line comments: - -``` rust -// Wait for the main thread to return, and set the process error code -// appropriately. -``` - -Instead of: - -``` rust -/* - * Wait for the main thread to return, and set the process error code - * appropriately. - */ -``` - -## Doc comments - -Doc comments are prefixed by three slashes (`///`) and indicate -documentation that you would like to be included in Rustdoc's output. -They support -[Markdown syntax](https://en.wikipedia.org/wiki/Markdown) -and are the main way of documenting your public APIs. - -The supported markdown syntax includes all of the extensions listed in the -[GitHub Flavored Markdown] -(https://help.github.com/articles/github-flavored-markdown) documentation, -plus superscripts. - -### Summary line - -The first line in any doc comment should be a single-line short sentence -providing a summary of the code. This line is used as a short summary -description throughout Rustdoc's output, so it's a good idea to keep it -short. - -### Sentence structure - -All doc comments, including the summary line, should begin with a -capital letter and end with a period, question mark, or exclamation -point. Prefer full sentences to fragments. - -The summary line should be written in -[third person singular present indicative form] -(http://en.wikipedia.org/wiki/English_verbs#Third_person_singular_present). -Basically, this means write "Returns" instead of "Return". - -For example: - -``` rust -/// Sets up a default runtime configuration, given compiler-supplied arguments. -/// -/// This function will block until the entire pool of M:N schedulers has -/// exited. This function also requires a local thread to be available. -/// -/// # Arguments -/// -/// * `argc` & `argv` - The argument vector. On Unix this information is used -/// by `os::args`. -/// * `main` - The initial procedure to run inside of the M:N scheduling pool. -/// Once this procedure exits, the scheduling pool will begin to shut -/// down. The entire pool (and this function) will only return once -/// all child threads have finished executing. -/// -/// # Return value -/// -/// The return value is used as the process return code. 0 on success, 101 on -/// error. -``` - -### Code snippets - -> **[FIXME]** - -### Avoid inner doc comments. - -Use inner doc comments _only_ to document crates and file-level modules: - -``` rust -//! The core library. -//! -//! The core library is a something something... -``` - -### Explain context. - -Rust doesn't have special constructors, only functions that return new -instances. These aren't visible in the automatically generated documentation -for a type, so you should specifically link to them: - -``` rust -/// An iterator that yields `None` forever after the underlying iterator -/// yields `None` once. -/// -/// These can be created through -/// [`iter.fuse()`](trait.Iterator.html#method.fuse). -pub struct Fuse { - // ... -} -``` diff --git a/src/doc/style/style/features.md b/src/doc/style/style/features.md deleted file mode 100644 index b5d0b484ccda5..0000000000000 --- a/src/doc/style/style/features.md +++ /dev/null @@ -1,13 +0,0 @@ -## `return` [FIXME: needs RFC] - -Terminate `return` statements with semicolons: - -``` rust -fn foo(bar: i32) -> Option { - if some_condition() { - return None; - } - - ... -} -``` diff --git a/src/doc/style/style/imports.md b/src/doc/style/style/imports.md deleted file mode 100644 index cf3fd4163a26e..0000000000000 --- a/src/doc/style/style/imports.md +++ /dev/null @@ -1,50 +0,0 @@ -% Imports [FIXME: needs RFC] - -The imports of a crate/module should consist of the following -sections, in order, with a blank space between each: - -* `extern crate` directives -* external `use` imports -* local `use` imports -* `pub use` imports - -For example: - -```rust -// Crates. -extern crate getopts; -extern crate mylib; - -// Standard library imports. -use getopts::{optopt, getopts}; -use std::os; - -// Import from a library that we wrote. -use mylib::webserver; - -// Will be reexported when we import this module. -pub use self::types::Webdata; -``` - -### Avoid `use *`, except in tests. - -Glob imports have several downsides: -* They make it harder to tell where names are bound. -* They are forwards-incompatible, since new upstream exports can clash - with existing names. - -When writing a [`test` submodule](../testing/README.md), importing `super::*` is appropriate -as a convenience. - -### Prefer fully importing types/traits while module-qualifying functions. - -For example: - -```rust -use option::Option; -use mem; - -let i: isize = mem::transmute(Option(0)); -``` - -> **[FIXME]** Add rationale. diff --git a/src/doc/style/style/naming/README.md b/src/doc/style/style/naming/README.md deleted file mode 100644 index 9d78721ad3644..0000000000000 --- a/src/doc/style/style/naming/README.md +++ /dev/null @@ -1,115 +0,0 @@ -% Naming conventions - -### General conventions [RFC #430] - -> The guidelines below were approved by [RFC #430](https://github.com/rust-lang/rfcs/pull/430). - -In general, Rust tends to use `CamelCase` for "type-level" constructs -(types and traits) and `snake_case` for "value-level" constructs. More -precisely: - -| Item | Convention | -| ---- | ---------- | -| Crates | `snake_case` (but prefer single word) | -| Modules | `snake_case` | -| Types | `CamelCase` | -| Traits | `CamelCase` | -| Enum variants | `CamelCase` | -| Functions | `snake_case` | -| Methods | `snake_case` | -| General constructors | `new` or `with_more_details` | -| Conversion constructors | `from_some_other_type` | -| Local variables | `snake_case` | -| Static variables | `SCREAMING_SNAKE_CASE` | -| Constant variables | `SCREAMING_SNAKE_CASE` | -| Type parameters | concise `CamelCase`, usually single uppercase letter: `T` | -| Lifetimes | short, lowercase: `'a` | - -

-In `CamelCase`, acronyms count as one word: use `Uuid` rather than -`UUID`. In `snake_case`, acronyms are lower-cased: `is_xid_start`. - -In `snake_case` or `SCREAMING_SNAKE_CASE`, a "word" should never -consist of a single letter unless it is the last "word". So, we have -`btree_map` rather than `b_tree_map`, but `PI_2` rather than `PI2`. - -### Referring to types in function/method names [RFC 344] - -> The guidelines below were approved by [RFC #344](https://github.com/rust-lang/rfcs/pull/344). - -Function names often involve type names, the most common example being conversions -like `as_slice`. If the type has a purely textual name (ignoring parameters), it -is straightforward to convert between type conventions and function conventions: - -Type name | Text in methods ---------- | --------------- -`String` | `string` -`Vec` | `vec` -`YourType`| `your_type` - -Types that involve notation follow the convention below. There is some -overlap on these rules; apply the most specific applicable rule: - -Type name | Text in methods ---------- | --------------- -`&str` | `str` -`&[T]` | `slice` -`&mut [T]`| `mut_slice` -`&[u8]` | `bytes` -`&T` | `ref` -`&mut T` | `mut` -`*const T`| `ptr` -`*mut T` | `mut_ptr` - -### Avoid redundant prefixes [RFC 356] - -> The guidelines below were approved by [RFC #356](https://github.com/rust-lang/rfcs/pull/356). - -Names of items within a module should not be prefixed with that module's name: - -Prefer - -``` rust -mod foo { - pub struct Error { ... } -} -``` - -over - -``` rust -mod foo { - pub struct FooError { ... } -} -``` - -This convention avoids stuttering (like `io::IoError`). Library clients can -rename on import to avoid clashes. - -### Getter/setter methods [RFC 344] - -> The guidelines below were approved by [RFC #344](https://github.com/rust-lang/rfcs/pull/344). - -Some data structures do not wish to provide direct access to their fields, but -instead offer "getter" and "setter" methods for manipulating the field state -(often providing checking or other functionality). - -The convention for a field `foo: T` is: - -* A method `foo(&self) -> &T` for getting the current value of the field. -* A method `set_foo(&self, val: T)` for setting the field. (The `val` argument - here may take `&T` or some other type, depending on the context.) - -Note that this convention is about getters/setters on ordinary data types, *not* -on [builder objects](../ownership/builders.html). - -### Escape hatches [FIXME] - -> **[FIXME]** Should we standardize a convention for functions that may break API -> guarantees? e.g. `ToCStr::to_c_str_unchecked` - -### Predicates - -* Simple boolean predicates should be prefixed with `is_` or another - short question word, e.g., `is_empty`. -* Common exceptions: `lt`, `gt`, and other established predicate names. diff --git a/src/doc/style/style/naming/containers.md b/src/doc/style/style/naming/containers.md deleted file mode 100644 index dfed4f9f75a58..0000000000000 --- a/src/doc/style/style/naming/containers.md +++ /dev/null @@ -1,69 +0,0 @@ -% Common container/wrapper methods [FIXME: needs RFC] - -Containers, wrappers, and cells all provide ways to access the data -they enclose. Accessor methods often have variants to access the data -by value, by reference, and by mutable reference. - -In general, the `get` family of methods is used to access contained -data without any risk of thread failure; they return `Option` as -appropriate. This name is chosen rather than names like `find` or -`lookup` because it is appropriate for a wider range of container types. - -#### Containers - -For a container with keys/indexes of type `K` and elements of type `V`: - -```rust -// Look up element without failing -fn get(&self, key: K) -> Option<&V> -fn get_mut(&mut self, key: K) -> Option<&mut V> - -// Convenience for .get(key).map(|elt| elt.clone()) -fn get_clone(&self, key: K) -> Option - -// Lookup element, failing if it is not found: -impl Index for Container { ... } -impl IndexMut for Container { ... } -``` - -#### Wrappers/Cells - -Prefer specific conversion functions like `as_bytes` or `into_vec` whenever -possible. Otherwise, use: - -```rust -// Extract contents without failing -fn get(&self) -> &V -fn get_mut(&mut self) -> &mut V -fn unwrap(self) -> V -``` - -#### Wrappers/Cells around `Copy` data - -```rust -// Extract contents without failing -fn get(&self) -> V -``` - -#### `Option`-like types - -Finally, we have the cases of types like `Option` and `Result`, which -play a special role for failure. - -For `Option`: - -```rust -// Extract contents or fail if not available -fn assert(self) -> V -fn expect(self, &str) -> V -``` - -For `Result`: - -```rust -// Extract the contents of Ok variant; fail if Err -fn assert(self) -> V - -// Extract the contents of Err variant; fail if Ok -fn assert_err(self) -> E -``` diff --git a/src/doc/style/style/naming/conversions.md b/src/doc/style/style/naming/conversions.md deleted file mode 100644 index 0287919c78aae..0000000000000 --- a/src/doc/style/style/naming/conversions.md +++ /dev/null @@ -1,32 +0,0 @@ -% Conversions [Rust issue #7087] - -> The guidelines below were approved by [rust issue #7087](https://github.com/rust-lang/rust/issues/7087). - -> **[FIXME]** Should we provide standard traits for conversions? Doing -> so nicely will require -> [trait reform](https://github.com/rust-lang/rfcs/pull/48) to land. - -Conversions should be provided as methods, with names prefixed as follows: - -| Prefix | Cost | Consumes convertee | -| ------ | ---- | ------------------ | -| `as_` | Free | No | -| `to_` | Expensive | No | -| `into_` | Variable | Yes | - -

-For example: - -* `as_bytes()` gives a `&[u8]` view into a `&str`, which is a no-op. -* `to_owned()` copies a `&str` to a new `String`. -* `into_bytes()` consumes a `String` and yields the underlying - `Vec`, which is a no-op. - -Conversions prefixed `as_` and `into_` typically _decrease abstraction_, either -exposing a view into the underlying representation (`as`) or deconstructing data -into its underlying representation (`into`). Conversions prefixed `to_`, on the -other hand, typically stay at the same level of abstraction but do some work to -change one representation into another. - -> **[FIXME]** The distinctions between conversion methods does not work -> so well for `from_` conversion constructors. Is that a problem? diff --git a/src/doc/style/style/naming/iterators.md b/src/doc/style/style/naming/iterators.md deleted file mode 100644 index 38138b5e39d3a..0000000000000 --- a/src/doc/style/style/naming/iterators.md +++ /dev/null @@ -1,32 +0,0 @@ -% Iterators - -#### Method names [RFC #199] - -> The guidelines below were approved by [RFC #199](https://github.com/rust-lang/rfcs/pull/199). - -For a container with elements of type `U`, iterator methods should be named: - -```rust -fn iter(&self) -> T // where T implements Iterator<&U> -fn iter_mut(&mut self) -> T // where T implements Iterator<&mut U> -fn into_iter(self) -> T // where T implements Iterator -``` - -The default iterator variant yields shared references `&U`. - -#### Type names [RFC #344] - -> The guidelines below were approved by [RFC #344](https://github.com/rust-lang/rfcs/pull/344). - -The name of an iterator type should be the same as the method that -produces the iterator. - -For example: - -* `iter` should yield an `Iter` -* `iter_mut` should yield an `IterMut` -* `into_iter` should yield an `IntoIter` -* `keys` should yield `Keys` - -These type names make the most sense when prefixed with their owning module, -e.g. `vec::IntoIter`. diff --git a/src/doc/style/style/naming/ownership.md b/src/doc/style/style/naming/ownership.md deleted file mode 100644 index 32cd8a1595afb..0000000000000 --- a/src/doc/style/style/naming/ownership.md +++ /dev/null @@ -1,34 +0,0 @@ -% Ownership variants [RFC #199] - -> The guidelines below were approved by [RFC #199](https://github.com/rust-lang/rfcs/pull/199). - -Functions often come in multiple variants: immutably borrowed, mutably -borrowed, and owned. - -The right default depends on the function in question. Variants should -be marked through suffixes. - -#### Immutably borrowed by default - -If `foo` uses/produces an immutable borrow by default, use: - -* The `_mut` suffix (e.g. `foo_mut`) for the mutably borrowed variant. -* The `_move` suffix (e.g. `foo_move`) for the owned variant. - -#### Owned by default - -If `foo` uses/produces owned data by default, use: - -* The `_ref` suffix (e.g. `foo_ref`) for the immutably borrowed variant. -* The `_mut` suffix (e.g. `foo_mut`) for the mutably borrowed variant. - -#### Exceptions - -In the case of iterators, the moving variant can also be understood as -an `into` conversion, `into_iter`, and `for x in v.into_iter()` reads -arguably better than `for x in v.iter_move()`, so the convention is -`into_iter`. - -For mutably borrowed variants, if the `mut` qualifier is part of a -type name (e.g. `as_mut_slice`), it should appear as it would appear -in the type. diff --git a/src/doc/style/style/optional.md b/src/doc/style/style/optional.md deleted file mode 100644 index d3c2178cc993f..0000000000000 --- a/src/doc/style/style/optional.md +++ /dev/null @@ -1,3 +0,0 @@ -* - -* diff --git a/src/doc/style/style/organization.md b/src/doc/style/style/organization.md deleted file mode 100644 index 85065406d761c..0000000000000 --- a/src/doc/style/style/organization.md +++ /dev/null @@ -1,14 +0,0 @@ -% Organization [FIXME: needs RFC] - -> **[FIXME]** What else? - -### Reexport the most important types at the crate level. - -Crates `pub use` the most common types for convenience, so that clients do not -have to remember or write the crate's module hierarchy to use these types. - -### Define types and operations together. - -Type definitions and the functions/methods that operate on them should be -defined together in a single module, with the type appearing above the -functions/methods. diff --git a/src/doc/style/style/whitespace.md b/src/doc/style/style/whitespace.md deleted file mode 100644 index c28a723209563..0000000000000 --- a/src/doc/style/style/whitespace.md +++ /dev/null @@ -1,133 +0,0 @@ -% Whitespace [FIXME: needs RFC] - -* Lines must not exceed 99 characters. -* Use 4 spaces for indentation, _not_ tabs. -* No trailing whitespace at the end of lines or files. - -### Spaces - -* Use spaces around binary operators, including the equals sign in attributes: - -``` rust -#[deprecated = "Use `bar` instead."] -fn foo(a: usize, b: usize) -> usize { - a + b -} -``` - -* Use a space after colons and commas: - -``` rust -fn foo(a: Bar); - -MyStruct { foo: 3, bar: 4 } - -foo(bar, baz); -``` - -* Use a space after the opening and before the closing brace for - single line blocks or `struct` expressions: - -``` rust -spawn(proc() { do_something(); }) - -Point { x: 0.1, y: 0.3 } -``` - -### Line wrapping - -* For multiline function signatures, each new line should align with the - first parameter. Multiple parameters per line are permitted: - -``` rust -fn frobnicate(a: Bar, b: Bar, - c: Bar, d: Bar) - -> Bar { - ... -} - -fn foo( - a: Bar, - b: Bar) - -> Baz { - ... -} -``` - -* Multiline function invocations generally follow the same rule as for - signatures. However, if the final argument begins a new block, the - contents of the block may begin on a new line, indented one level: - -``` rust -fn foo_bar(a: Bar, b: Bar, - c: |Bar|) -> Bar { - ... -} - -// Same line is fine: -foo_bar(x, y, |z| { z.transpose(y) }); - -// Indented body on new line is also fine: -foo_bar(x, y, |z| { - z.quux(); - z.rotate(x) -}) -``` - -> **[FIXME]** Do we also want to allow the following? -> -> ```rust -> frobnicate( -> arg1, -> arg2, -> arg3) -> ``` -> -> This style could ease the conflict between line length and functions -> with many parameters (or long method chains). - -### Matches - -> * **[Deprecated]** If you have multiple patterns in a single `match` -> arm, write each pattern on a separate line: -> -> ``` rust -> match foo { -> bar(_) -> | baz => quux, -> x -> | y -> | z => { -> quuux -> } -> } -> ``` - -### Alignment - -Idiomatic code should not use extra whitespace in the middle of a line -to provide alignment. - - -``` rust -// Good -struct Foo { - short: f64, - really_long: f64, -} - -// Bad -struct Bar { - short: f64, - really_long: f64, -} - -// Good -let a = 0; -let radius = 7; - -// Bad -let b = 0; -let diameter = 7; -``` diff --git a/src/doc/style/testing/README.md b/src/doc/style/testing/README.md deleted file mode 100644 index a21f69414d326..0000000000000 --- a/src/doc/style/testing/README.md +++ /dev/null @@ -1,5 +0,0 @@ -% Testing - -> **[FIXME]** Add some general remarks about when and how to unit -> test, versus other kinds of testing. What are our expectations for -> Rust's core libraries? diff --git a/src/doc/style/testing/unit.md b/src/doc/style/testing/unit.md deleted file mode 100644 index dbbe9fc3ac6da..0000000000000 --- a/src/doc/style/testing/unit.md +++ /dev/null @@ -1,30 +0,0 @@ -% Unit testing - -Unit tests should live in a `tests` submodule at the bottom of the module they -test. Mark the `tests` submodule with `#[cfg(test)]` so it is only compiled when -testing. - -The `tests` module should contain: - -* Imports needed only for testing. -* Functions marked with `#[test]` striving for full coverage of the parent module's - definitions. -* Auxiliary functions needed for writing the tests. - -For example: - -``` rust -// Excerpt from std::str - -#[cfg(test)] -mod tests { - #[test] - fn test_eq() { - assert!((eq(&"".to_owned(), &"".to_owned()))); - assert!((eq(&"foo".to_owned(), &"foo".to_owned()))); - assert!((!eq(&"foo".to_owned(), &"bar".to_owned()))); - } -} -``` - -> **[FIXME]** add details about useful macros for testing, e.g. `assert!` diff --git a/src/doc/style/todo.md b/src/doc/style/todo.md deleted file mode 100644 index 28ef2a1832d8b..0000000000000 --- a/src/doc/style/todo.md +++ /dev/null @@ -1,5 +0,0 @@ -* [Containers and iteration]() -* [The visitor pattern]() -* [Concurrency]() -* [Documentation]() -* [Macros]() diff --git a/src/doc/uptack.tex b/src/doc/uptack.tex deleted file mode 100644 index 32158ea549627..0000000000000 --- a/src/doc/uptack.tex +++ /dev/null @@ -1,2 +0,0 @@ -\usepackage{newunicodechar} -\newunicodechar⊥{{$\bot$}} diff --git a/src/doc/version_info.html.template b/src/doc/version_info.html.template index 2fda57923cd8f..7215e4f13c9bb 100644 --- a/src/doc/version_info.html.template +++ b/src/doc/version_info.html.template @@ -1,5 +1,5 @@

-
+ Rust logo
Rust VERSION
SHORT_HASH diff --git a/src/error-index-generator/main.rs b/src/error-index-generator/main.rs deleted file mode 100644 index 4b10b02f2d407..0000000000000 --- a/src/error-index-generator/main.rs +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![feature(rustc_private, rustdoc)] - -extern crate syntax; -extern crate rustdoc; -extern crate serialize as rustc_serialize; - -use std::collections::BTreeMap; -use std::fs::{read_dir, File}; -use std::io::{Read, Write}; -use std::env; -use std::path::Path; -use std::error::Error; - -use syntax::diagnostics::metadata::{get_metadata_dir, ErrorMetadataMap}; - -use rustdoc::html::markdown::Markdown; -use rustc_serialize::json; - -/// Load all the metadata files from `metadata_dir` into an in-memory map. -fn load_all_errors(metadata_dir: &Path) -> Result> { - let mut all_errors = BTreeMap::new(); - - for entry in try!(read_dir(metadata_dir)) { - let path = try!(entry).path(); - - let mut metadata_str = String::new(); - try!(File::open(&path).and_then(|mut f| f.read_to_string(&mut metadata_str))); - - let some_errors: ErrorMetadataMap = try!(json::decode(&metadata_str)); - - for (err_code, info) in some_errors { - all_errors.insert(err_code, info); - } - } - - Ok(all_errors) -} - -/// Output an HTML page for the errors in `err_map` to `output_path`. -fn render_error_page(err_map: &ErrorMetadataMap, output_path: &Path) -> Result<(), Box> { - let mut output_file = try!(File::create(output_path)); - - try!(write!(&mut output_file, -r##" - - -Rust Compiler Error Index - - - - - - - -"## - )); - - try!(write!(&mut output_file, "

Rust Compiler Error Index

\n")); - - for (err_code, info) in err_map { - // Enclose each error in a div so they can be shown/hidden en masse. - let desc_desc = match info.description { - Some(_) => "error-described", - None => "error-undescribed", - }; - let use_desc = match info.use_site { - Some(_) => "error-used", - None => "error-unused", - }; - try!(write!(&mut output_file, "
", desc_desc, use_desc)); - - // Error title (with self-link). - try!(write!(&mut output_file, - "

{0}

\n", - err_code)); - - // Description rendered as markdown. - match info.description { - Some(ref desc) => try!(write!(&mut output_file, "{}", Markdown(desc))), - None => try!(write!(&mut output_file, "

No description.

\n")), - } - - try!(write!(&mut output_file, "
\n")); - } - - try!(write!(&mut output_file, "\n")); - - Ok(()) -} - -fn main_with_result() -> Result<(), Box> { - let build_arch = try!(env::var("CFG_BUILD")); - let metadata_dir = get_metadata_dir(&build_arch); - let err_map = try!(load_all_errors(&metadata_dir)); - try!(render_error_page(&err_map, Path::new("doc/error-index.html"))); - Ok(()) -} - -fn main() { - if let Err(e) = main_with_result() { - panic!("{}", e.description()); - } -} diff --git a/src/etc/CONFIGS.md b/src/etc/CONFIGS.md index 74837a06faecd..542b7bf797b35 100644 --- a/src/etc/CONFIGS.md +++ b/src/etc/CONFIGS.md @@ -6,11 +6,8 @@ These are some links to repos with configs which ease the use of rust. * [rust.vim](https://github.com/rust-lang/rust.vim) * [emacs rust-mode](https://github.com/rust-lang/rust-mode) +* [sublime-rust](https://github.com/rust-lang/sublime-rust) * [gedit-config](https://github.com/rust-lang/gedit-config) * [kate-config](https://github.com/rust-lang/kate-config) * [nano-config](https://github.com/rust-lang/nano-config) * [zsh-config](https://github.com/rust-lang/zsh-config) - -## Community-maintained Configs - -* [.editorconfig](https://gist.github.com/derhuerst/c9d1b9309e308d9851fa) ([what is this?](http://editorconfig.org/)) diff --git a/src/etc/Dockerfile b/src/etc/Dockerfile new file mode 100644 index 0000000000000..83d54789ff357 --- /dev/null +++ b/src/etc/Dockerfile @@ -0,0 +1,27 @@ +FROM ubuntu:xenial + +# curl +# Download stage0, see src/bootstrap/bootstrap.py +# g++ +# Compile LLVM binding in src/rustllvm +# gdb +# Used to run tests in src/test/debuginfo +# git +# Get commit hash and commit date in version string +# make +# Run build scripts in mk +# libedit-dev zlib1g-dev +# LLVM dependencies as packaged in Ubuntu +# (They are optional, but Ubuntu package enables them) +# llvm-3.7-dev (installed by llvm-3.7-tools) +# LLVM +# llvm-3.7-tools +# FileCheck is used to run tests in src/test/codegen + +RUN apt-get update && apt-get -y install \ + curl g++ gdb git make \ + libedit-dev zlib1g-dev \ + llvm-3.7-tools cmake + +RUN mkdir /build +WORKDIR /build diff --git a/src/etc/char_private.py b/src/etc/char_private.py new file mode 100644 index 0000000000000..9d15f98e06709 --- /dev/null +++ b/src/etc/char_private.py @@ -0,0 +1,190 @@ +#!/usr/bin/env python +# +# Copyright 2011-2016 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +# This script uses the following Unicode tables: +# - UnicodeData.txt + + +from collections import namedtuple +import csv +import os +import subprocess + +NUM_CODEPOINTS=0x110000 + +def to_ranges(iter): + current = None + for i in iter: + if current is None or i != current[1] or i in (0x10000, 0x20000): + if current is not None: + yield tuple(current) + current = [i, i + 1] + else: + current[1] += 1 + if current is not None: + yield tuple(current) + +def get_escaped(codepoints): + for c in codepoints: + if (c.class_ or "Cn") in "Cc Cf Cs Co Cn Zl Zp Zs".split() and c.value != ord(' '): + yield c.value + +def get_file(f): + try: + return open(os.path.basename(f)) + except FileNotFoundError: + subprocess.run(["curl", "-O", f], check=True) + return open(os.path.basename(f)) + +Codepoint = namedtuple('Codepoint', 'value class_') + +def get_codepoints(f): + r = csv.reader(f, delimiter=";") + prev_codepoint = 0 + class_first = None + for row in r: + codepoint = int(row[0], 16) + name = row[1] + class_ = row[2] + + if class_first is not None: + if not name.endswith("Last>"): + raise ValueError("Missing Last after First") + + for c in range(prev_codepoint + 1, codepoint): + yield Codepoint(c, class_first) + + class_first = None + if name.endswith("First>"): + class_first = class_ + + yield Codepoint(codepoint, class_) + prev_codepoint = codepoint + + if class_first != None: + raise ValueError("Missing Last after First") + + for c in range(prev_codepoint + 1, NUM_CODEPOINTS): + yield Codepoint(c, None) + +def main(): + file = get_file("http://www.unicode.org/Public/UNIDATA/UnicodeData.txt") + + codepoints = get_codepoints(file) + + CUTOFF=0x10000 + singletons0 = [] + singletons1 = [] + normal0 = [] + normal1 = [] + extra = [] + + for a, b in to_ranges(get_escaped(codepoints)): + if a > 2 * CUTOFF: + extra.append((a, b - a)) + elif a == b - 1: + if a & CUTOFF: + singletons1.append(a & ~CUTOFF) + else: + singletons0.append(a) + elif a == b - 2: + if a & CUTOFF: + singletons1.append(a & ~CUTOFF) + singletons1.append((a + 1) & ~CUTOFF) + else: + singletons0.append(a) + singletons0.append(a + 1) + else: + if a >= 2 * CUTOFF: + extra.append((a, b - a)) + elif a & CUTOFF: + normal1.append((a & ~CUTOFF, b - a)) + else: + normal0.append((a, b - a)) + + print("""\ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// NOTE: The following code was generated by "src/etc/char_private.py", +// do not edit directly! + +use slice::SliceExt; + +fn check(x: u16, singletons: &[u16], normal: &[u16]) -> bool { + for &s in singletons { + if x == s { + return false; + } else if x < s { + break; + } + } + for w in normal.chunks(2) { + let start = w[0]; + let len = w[1]; + let difference = (x as i32) - (start as i32); + if 0 <= difference { + if difference < len as i32 { + return false; + } + } else { + break; + } + } + true +} + +pub fn is_printable(x: char) -> bool { + let x = x as u32; + let lower = x as u16; + if x < 0x10000 { + check(lower, SINGLETONS0, NORMAL0) + } else if x < 0x20000 { + check(lower, SINGLETONS1, NORMAL1) + } else {\ +""") + for a, b in extra: + print(" if 0x{:x} <= x && x < 0x{:x} {{".format(a, a + b)) + print(" return false;") + print(" }") + print("""\ + true + } +}\ +""") + print() + print("const SINGLETONS0: &'static [u16] = &[") + for s in singletons0: + print(" 0x{:x},".format(s)) + print("];") + print("const SINGLETONS1: &'static [u16] = &[") + for s in singletons1: + print(" 0x{:x},".format(s)) + print("];") + print("const NORMAL0: &'static [u16] = &[") + for a, b in normal0: + print(" 0x{:x}, 0x{:x},".format(a, b)) + print("];") + print("const NORMAL1: &'static [u16] = &[") + for a, b in normal1: + print(" 0x{:x}, 0x{:x},".format(a, b)) + print("];") + +if __name__ == '__main__': + main() diff --git a/src/etc/check-binaries.py b/src/etc/check-binaries.py deleted file mode 100755 index 91c01b178075d..0000000000000 --- a/src/etc/check-binaries.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -import sys - -offenders = sys.argv[1:] -if len(offenders) > 0: - print("Binaries checked into src:") - for offender in offenders: - print(offender) - sys.exit(1) diff --git a/src/etc/cmathconsts.c b/src/etc/cmathconsts.c deleted file mode 100644 index f523b4c93858b..0000000000000 --- a/src/etc/cmathconsts.c +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -// -// -// This is a helper C program for generating required math constants -// -// Should only be required when porting to a different target architecture -// (or c compiler/libmath) -// -// Call with -// and ensure that libcore/cmath.rs complies to the output -// -// Requires a printf that supports "%a" specifiers -// - -#include -#include -#include - -// must match std::ctypes - -#define C_FLT(x) (float)x -#define C_DBL(x) (double)x - -int main(int argc, char** argv) { - if (argc != 3) { - fprintf(stderr, "%s \n", argv[0]); - return 1; - } - char* c_flt = argv[1]; - char* c_dbl = argv[2]; - - printf("mod c_float_math_consts {\n"); - printf(" const pi: c_float = %a_%s;\n", C_FLT(M_PI), c_flt); - printf(" const div_1_pi: c_float = %a_%s;\n", C_FLT(M_1_PI), c_flt); - printf(" const div_2_pi: c_float = %a_%s;\n", C_FLT(M_2_PI), c_flt); - printf(" const div_pi_2: c_float = %a_%s;\n", C_FLT(M_PI_2), c_flt); - printf(" const div_pi_4: c_float = %a_%s;\n", C_FLT(M_PI_4), c_flt); - printf(" const div_2_sqrtpi: c_float = %a_%s;\n", - C_FLT(M_2_SQRTPI), c_flt); - printf(" const e: c_float = %a_%s;\n", C_FLT(M_E), c_flt); - printf(" const log2_e: c_float = %a_%s;\n", C_FLT(M_LOG2E), c_flt); - printf(" const log10_e: c_float = %a_%s;\n", C_FLT(M_LOG10E), c_flt); - printf(" const ln_2: c_float = %a_%s;\n", C_FLT(M_LN2), c_flt); - printf(" const ln_10: c_float = %a_%s;\n", C_FLT(M_LN10), c_flt); - printf(" const sqrt2: c_float = %a_%s;\n", C_FLT(M_SQRT2), c_flt); - printf(" const div_1_sqrt2: c_float = %a_%s;\n", - C_FLT(M_SQRT1_2), c_flt); - printf("}\n\n"); - - printf("mod c_double_math_consts {\n"); - printf(" const pi: c_double = %a_%s;\n", C_DBL(M_PI), c_dbl); - printf(" const div_1_pi: c_double = %a_%s;\n", C_DBL(M_1_PI), c_dbl); - printf(" const div_2_pi: c_double = %a_%s;\n", C_DBL(M_2_PI), c_dbl); - printf(" const div_pi_2: c_double = %a_%s;\n", C_DBL(M_PI_2), c_dbl); - printf(" const div_pi_4: c_double = %a_%s;\n", C_DBL(M_PI_4), c_dbl); - printf(" const div_2_sqrtpi: c_double = %a_%s;\n", - C_DBL(M_2_SQRTPI), c_dbl); - printf(" const e: c_double = %a_%s;\n", C_DBL(M_E), c_dbl); - printf(" const log2_e: c_double = %a_%s;\n", C_DBL(M_LOG2E), c_dbl); - printf(" const log10_e: c_double = %a_%s;\n", C_DBL(M_LOG10E), c_dbl); - printf(" const ln_2: c_double = %a_%s;\n", C_DBL(M_LN2), c_dbl); - printf(" const ln_10: c_double = %a_%s;\n", C_DBL(M_LN10), c_dbl); - printf(" const sqrt2: c_double = %a_%s;\n", C_DBL(M_SQRT2), c_dbl); - printf(" const div_1_sqrt2: c_double = %a_%s;\n", - C_DBL(M_SQRT1_2), c_dbl); - printf("}\n\n"); - - printf("mod c_float_targ_consts {\n"); - printf(" const radix: uint = %uu;\n", FLT_RADIX); - printf(" const mantissa_digits: uint = %uu;\n", FLT_MANT_DIG); - printf(" const digits: uint = %uu;\n", FLT_DIG); - printf(" const min_exp: int = %i;\n", FLT_MIN_EXP); - printf(" const max_exp: int = %i;\n", FLT_MAX_EXP); - printf(" const min_10_exp: int = %i;\n", FLT_MIN_10_EXP); - printf(" const max_10_exp: int = %i;\n", FLT_MAX_10_EXP); - printf(" const min_value: c_float = %a_%s;\n", C_FLT(FLT_MIN), c_flt); - printf(" const max_value: c_float = %a_%s;\n", C_FLT(FLT_MAX), c_flt); - printf(" const epsilon: c_float = %a_%s;\n", C_FLT(FLT_EPSILON), c_flt); - printf("}\n\n"); - - printf("mod c_double_targ_consts {\n"); - printf(" const radix: uint = %uu;\n", FLT_RADIX); - printf(" const mantissa_digits: uint = %uu;\n", DBL_MANT_DIG); - printf(" const digits: uint = %uu;\n", DBL_DIG); - printf(" const min_exp: int = %i;\n", DBL_MIN_EXP); - printf(" const max_exp: int = %i;\n", DBL_MAX_EXP); - printf(" const min_10_exp: int = %i;\n", DBL_MIN_10_EXP); - printf(" const max_10_exp: int = %i;\n", DBL_MAX_10_EXP); - printf(" const min_value: c_double = %a_%s;\n", C_DBL(DBL_MIN), c_dbl); - printf(" const max_value: c_double = %a_%s;\n", C_DBL(DBL_MAX), c_dbl); - printf(" const epsilon: c_double = %a_%s;\n", C_DBL(DBL_EPSILON), c_dbl); - printf("}\n"); - - return 0; -} diff --git a/src/etc/debugger_pretty_printers_common.py b/src/etc/debugger_pretty_printers_common.py index 06a83c75936fe..eb562877c8573 100644 --- a/src/etc/debugger_pretty_printers_common.py +++ b/src/etc/debugger_pretty_printers_common.py @@ -139,7 +139,7 @@ def __classify_struct(self): return TYPE_KIND_STR_SLICE # REGULAR SLICE - if (unqualified_type_name.startswith("&[") and + if (unqualified_type_name.startswith(("&[", "&mut [")) and unqualified_type_name.endswith("]") and self.__conforms_to_field_layout(SLICE_FIELD_NAMES)): return TYPE_KIND_SLICE @@ -324,3 +324,20 @@ def extract_length_and_ptr_from_slice(slice_val): assert data_ptr.type.get_dwarf_type_kind() == DWARF_TYPE_CODE_PTR return (length, data_ptr) + +UNQUALIFIED_TYPE_MARKERS = frozenset(["(", "[", "&", "*"]) + +def extract_type_name(qualified_type_name): + """Extracts the type name from a fully qualified path""" + if qualified_type_name[0] in UNQUALIFIED_TYPE_MARKERS: + return qualified_type_name + + end_of_search = qualified_type_name.find("<") + if end_of_search < 0: + end_of_search = len(qualified_type_name) + + index = qualified_type_name.rfind("::", 0, end_of_search) + if index < 0: + return qualified_type_name + else: + return qualified_type_name[index + 2:] diff --git a/src/etc/errorck.py b/src/etc/errorck.py deleted file mode 100644 index 48736542f20c7..0000000000000 --- a/src/etc/errorck.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2015 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -# Digs error codes out of files named 'diagnostics.rs' across -# the tree, and ensures thare are no duplicates. - -import sys -import os -import re - -if len(sys.argv) < 2: - print("usage: errorck.py ") - sys.exit(1) - -src_dir = sys.argv[1] -errcode_map = {} -error_re = re.compile("(E\d\d\d\d)") - -# In the register_long_diagnostics! macro, entries look like this: -# -# EXXXX: r##" -# -# "##, -# -# These two variables are for detecting the beginning and end of diagnostic -# messages so that duplicate error codes are not reported when a code occurs -# inside a diagnostic message -long_diag_begin = "r##\"" -long_diag_end = "\"##" - -for (dirpath, dirnames, filenames) in os.walk(src_dir): - if "src/test" in dirpath or "src/llvm" in dirpath: - # Short circuit for fast - continue - - for filename in filenames: - if filename != "diagnostics.rs": - continue - - path = os.path.join(dirpath, filename) - - with open(path, 'r') as f: - inside_long_diag = False - for line_num, line in enumerate(f, start=1): - if inside_long_diag: - # Skip duplicate error code checking for this line - if long_diag_end in line: - inside_long_diag = False - continue - - match = error_re.search(line) - if match: - errcode = match.group(1) - new_record = [(errcode, path, line_num, line)] - existing = errcode_map.get(errcode) - if existing is not None: - # This is a dupe - errcode_map[errcode] = existing + new_record - else: - errcode_map[errcode] = new_record - - if long_diag_begin in line: - inside_long_diag = True - -errors = False -all_errors = [] - -for errcode, entries in errcode_map.items(): - all_errors.append(entries[0][0]) - if len(entries) > 1: - print("error: duplicate error code " + errcode) - for entry in entries: - print("{1}: {2}\n{3}".format(*entry)) - errors = True - -print -print("* {0} error codes".format(len(errcode_map))) -print("* highest error code: " + max(all_errors)) -print - -if errors: - sys.exit(1) diff --git a/src/etc/featureck.py b/src/etc/featureck.py deleted file mode 100644 index d6cc25177e4ae..0000000000000 --- a/src/etc/featureck.py +++ /dev/null @@ -1,251 +0,0 @@ -# Copyright 2015 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -# This script does a tree-wide sanity checks against stability -# attributes, currently: -# * For all feature_name/level pairs the 'since' field is the same -# * That no features are both stable and unstable. -# * That lib features don't have the same name as lang features -# unless they are on the 'joint_features' whitelist -# * That features that exist in both lang and lib and are stable -# since the same version -# * Prints information about features - -import sys -import os -import re -import codecs - -if len(sys.argv) < 2: - print("usage: featureck.py ") - sys.exit(1) - -src_dir = sys.argv[1] - -# Features that are allowed to exist in both the language and the library -joint_features = [ ] - -# Grab the list of language features from the compiler -language_gate_statuses = [ "Active", "Deprecated", "Removed", "Accepted" ] -feature_gate_source = os.path.join(src_dir, "libsyntax", "feature_gate.rs") -language_features = [] -language_feature_names = [] -with open(feature_gate_source, 'r') as f: - for line in f: - original_line = line - line = line.strip() - is_feature_line = False - for status in language_gate_statuses: - if status in line and line.startswith("("): - is_feature_line = True - - if is_feature_line: - # turn ` ("foo", "1.0.0", Some(10), Active)` into - # `"foo", "1.0.0", Some(10), Active` - line = line.strip(' ,()') - parts = line.split(",") - if len(parts) != 4: - print("error: unexpected number of components in line: " + original_line) - sys.exit(1) - feature_name = parts[0].strip().replace('"', "") - since = parts[1].strip().replace('"', "") - issue = parts[2].strip() - status = parts[3].strip() - assert len(feature_name) > 0 - assert len(since) > 0 - assert len(issue) > 0 - assert len(status) > 0 - - language_feature_names += [feature_name] - language_features += [(feature_name, since, issue, status)] - -assert len(language_features) > 0 - -errors = False - -lib_features = { } -lib_features_and_level = { } -for (dirpath, dirnames, filenames) in os.walk(src_dir): - # Don't look for feature names in tests - if "src/test" in dirpath: - continue - - # Takes a long time to traverse LLVM - if "src/llvm" in dirpath: - continue - - for filename in filenames: - if not filename.endswith(".rs"): - continue - - path = os.path.join(dirpath, filename) - with codecs.open(filename=path, mode='r', encoding="utf-8") as f: - line_num = 0 - for line in f: - line_num += 1 - level = None - if "[unstable(" in line: - level = "unstable" - elif "[stable(" in line: - level = "stable" - else: - continue - - # This is a stability attribute. For the purposes of this - # script we expect both the 'feature' and 'since' attributes on - # the same line, e.g. - # `#[unstable(feature = "foo", since = "1.0.0")]` - - p = re.compile('(unstable|stable).*feature *= *"(\w*)"') - m = p.search(line) - if not m is None: - feature_name = m.group(2) - since = None - if re.compile("\[ *stable").search(line) is not None: - pp = re.compile('since *= *"([\w\.]*)"') - mm = pp.search(line) - if not mm is None: - since = mm.group(1) - else: - print("error: misformed stability attribute") - print("line %d of %:" % (line_num, path)) - print(line) - errors = True - - lib_features[feature_name] = feature_name - if lib_features_and_level.get((feature_name, level)) is None: - # Add it to the observed features - lib_features_and_level[(feature_name, level)] = \ - (since, path, line_num, line) - else: - # Verify that for this combination of feature_name and level the 'since' - # attribute matches. - (expected_since, source_path, source_line_num, source_line) = \ - lib_features_and_level.get((feature_name, level)) - if since != expected_since: - print("error: mismatch in %s feature '%s'" % (level, feature_name)) - print("line %d of %s:" % (source_line_num, source_path)) - print(source_line) - print("line %d of %s:" % (line_num, path)) - print(line) - errors = True - - # Verify that this lib feature doesn't duplicate a lang feature - if feature_name in language_feature_names: - print("error: lib feature '%s' duplicates a lang feature" % (feature_name)) - print("line %d of %s:" % (line_num, path)) - print(line) - errors = True - - else: - print("error: misformed stability attribute") - print("line %d of %s:" % (line_num, path)) - print(line) - errors = True - -# Merge data about both lists -# name, lang, lib, status, stable since - -language_feature_stats = {} - -for f in language_features: - name = f[0] - lang = True - lib = False - status = "unstable" - stable_since = None - - if f[3] == "Accepted": - status = "stable" - if status == "stable": - stable_since = f[1] - - language_feature_stats[name] = (name, lang, lib, status, stable_since) - -lib_feature_stats = {} - -for f in lib_features: - name = f - lang = False - lib = True - status = "unstable" - stable_since = None - - is_stable = lib_features_and_level.get((name, "stable")) is not None - is_unstable = lib_features_and_level.get((name, "unstable")) is not None - - if is_stable and is_unstable: - print("error: feature '%s' is both stable and unstable" % (name)) - errors = True - - if is_stable: - status = "stable" - stable_since = lib_features_and_level[(name, "stable")][0] - elif is_unstable: - status = "unstable" - - lib_feature_stats[name] = (name, lang, lib, status, stable_since) - -# Check for overlap in two sets -merged_stats = { } - -for name in lib_feature_stats: - if language_feature_stats.get(name) is not None: - if not name in joint_features: - print("error: feature '%s' is both a lang and lib feature but not whitelisted" % (name)) - errors = True - lang_status = language_feature_stats[name][3] - lib_status = lib_feature_stats[name][3] - lang_stable_since = language_feature_stats[name][4] - lib_stable_since = lib_feature_stats[name][4] - - if lang_status != lib_status and lib_status != "rustc_deprecated": - print("error: feature '%s' has lang status %s " + - "but lib status %s" % (name, lang_status, lib_status)) - errors = True - - if lang_stable_since != lib_stable_since: - print("error: feature '%s' has lang stable since %s " + - "but lib stable since %s" % (name, lang_stable_since, lib_stable_since)) - errors = True - - merged_stats[name] = (name, True, True, lang_status, lang_stable_since) - - del language_feature_stats[name] - del lib_feature_stats[name] - -if errors: - sys.exit(1) - -# Finally, display the stats -stats = {} -stats.update(language_feature_stats) -stats.update(lib_feature_stats) -stats.update(merged_stats) -lines = [] -for s in stats: - s = stats[s] - type_ = "lang" - if s[1] and s[2]: - type_ = "lang/lib" - elif s[2]: - type_ = "lib" - line = "{: <32}".format(s[0]) + \ - "{: <8}".format(type_) + \ - "{: <12}".format(s[3]) + \ - "{: <8}".format(str(s[4])) - lines += [line] - -lines.sort() - -print -for line in lines: - print("* " + line) -print diff --git a/src/etc/gdb_rust_pretty_printing.py b/src/etc/gdb_rust_pretty_printing.py index f93f3490215d1..afac8d6bbaefc 100755 --- a/src/etc/gdb_rust_pretty_printing.py +++ b/src/etc/gdb_rust_pretty_printing.py @@ -10,8 +10,15 @@ import gdb import re +import sys import debugger_pretty_printers_common as rustpp +# We want a version of `range` which doesn't allocate an intermediate list, +# specifically it should use a lazy iterator. In Python 2 this was `xrange`, but +# if we're running with Python 3 then we need to use `range` instead. +if sys.version_info[0] >= 3: + xrange = range + #=============================================================================== # GDB Pretty Printing Module for Rust #=============================================================================== @@ -29,7 +36,7 @@ def get_unqualified_type_name(self): if tag is None: return tag - return tag.replace("&'static ", "&") + return rustpp.extract_type_name(tag).replace("&'static ", "&") def get_dwarf_type_kind(self): if self.ty.code == gdb.TYPE_CODE_STRUCT: @@ -70,6 +77,8 @@ def get_child_at_index(self, index): return child def as_integer(self): + if self.gdb_val.type.code == gdb.TYPE_CODE_PTR: + return int(str(self.gdb_val), 0) return int(self.gdb_val) def get_wrapped_value(self): @@ -161,7 +170,7 @@ def rust_pretty_printer_lookup_function(gdb_val): #=------------------------------------------------------------------------------ # Pretty Printer Classes #=------------------------------------------------------------------------------ -class RustStructPrinter: +class RustStructPrinter(object): def __init__(self, val, omit_first_field, omit_type_name, is_tuple_like): self.__val = val self.__omit_first_field = omit_first_field @@ -196,11 +205,12 @@ def display_hint(self): return "" -class RustSlicePrinter: +class RustSlicePrinter(object): def __init__(self, val): self.__val = val - def display_hint(self): + @staticmethod + def display_hint(): return "array" def to_string(self): @@ -209,18 +219,15 @@ def to_string(self): ("(len: %i)" % length)) def children(self): - cs = [] (length, data_ptr) = rustpp.extract_length_and_ptr_from_slice(self.__val) assert data_ptr.type.get_dwarf_type_kind() == rustpp.DWARF_TYPE_CODE_PTR raw_ptr = data_ptr.get_wrapped_value() - for index in range(0, length): - cs.append((str(index), (raw_ptr + index).dereference())) - - return cs + for index in xrange(0, length): + yield (str(index), (raw_ptr + index).dereference()) -class RustStringSlicePrinter: +class RustStringSlicePrinter(object): def __init__(self, val): self.__val = val @@ -230,11 +237,12 @@ def to_string(self): return '"%s"' % raw_ptr.string(encoding="utf-8", length=length) -class RustStdVecPrinter: +class RustStdVecPrinter(object): def __init__(self, val): self.__val = val - def display_hint(self): + @staticmethod + def display_hint(): return "array" def to_string(self): @@ -243,15 +251,13 @@ def to_string(self): ("(len: %i, cap: %i)" % (length, cap))) def children(self): - cs = [] (length, data_ptr, cap) = rustpp.extract_length_ptr_and_cap_from_std_vec(self.__val) gdb_ptr = data_ptr.get_wrapped_value() - for index in range(0, length): - cs.append((str(index), (gdb_ptr + index).dereference())) - return cs + for index in xrange(0, length): + yield (str(index), (gdb_ptr + index).dereference()) -class RustStdStringPrinter: +class RustStdStringPrinter(object): def __init__(self, val): self.__val = val @@ -262,7 +268,7 @@ def to_string(self): length=length) -class RustCStyleVariantPrinter: +class RustCStyleVariantPrinter(object): def __init__(self, val): assert val.type.get_dwarf_type_kind() == rustpp.DWARF_TYPE_CODE_ENUM self.__val = val @@ -271,7 +277,7 @@ def to_string(self): return str(self.__val.get_wrapped_value()) -class IdentityPrinter: +class IdentityPrinter(object): def __init__(self, string): self.string = string diff --git a/src/etc/generate-deriving-span-tests.py b/src/etc/generate-deriving-span-tests.py index 790fc8942873e..6642da858e551 100755 --- a/src/etc/generate-deriving-span-tests.py +++ b/src/etc/generate-deriving-span-tests.py @@ -37,8 +37,6 @@ // This file was auto-generated using 'src/etc/generate-deriving-span-tests.py' -extern crate rand; - {error_deriving} struct Error; {code} @@ -106,7 +104,6 @@ def write_file(name, string): ALL = STRUCT | ENUM traits = { - 'Zero': (STRUCT, [], 1), 'Default': (STRUCT, [], 1), 'FromPrimitive': (0, [], 0), # only works for C-like enums @@ -116,7 +113,7 @@ def write_file(name, string): for (trait, supers, errs) in [('Clone', [], 1), ('PartialEq', [], 2), - ('PartialOrd', ['PartialEq'], 8), + ('PartialOrd', ['PartialEq'], 9), ('Eq', ['PartialEq'], 1), ('Ord', ['Eq', 'PartialOrd', 'PartialEq'], 1), ('Debug', [], 1), diff --git a/src/etc/generate-keyword-tests.py b/src/etc/generate-keyword-tests.py index 937c231a473e9..e53d6c718c155 100755 --- a/src/etc/generate-keyword-tests.py +++ b/src/etc/generate-keyword-tests.py @@ -34,15 +34,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// compile-flags: -Z parse-only + // This file was auto-generated using 'src/etc/generate-keyword-tests.py %s' fn main() { - let %s = "foo"; //~ error: ident + let %s = "foo"; //~ error: expected pattern, found keyword `%s` } """ test_dir = os.path.abspath( - os.path.join(os.path.dirname(__file__), '../test/compile-fail') + os.path.join(os.path.dirname(__file__), '../test/parse-fail') ) for kw in sys.argv[1:]: @@ -53,7 +55,7 @@ os.chmod(test_file, stat.S_IWUSR) with open(test_file, 'wt') as f: - f.write(template % (datetime.datetime.now().year, kw, kw)) + f.write(template % (datetime.datetime.now().year, kw, kw, kw)) # mark file read-only os.chmod(test_file, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH) diff --git a/src/etc/get-snapshot.py b/src/etc/get-snapshot.py deleted file mode 100755 index 26246bd2c32a3..0000000000000 --- a/src/etc/get-snapshot.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2011-2014 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -import os -import tarfile -import shutil -import sys -from snapshot import * - - -def unpack_snapshot(triple, dl_path): - print("opening snapshot " + dl_path) - tar = tarfile.open(dl_path) - kernel = get_kernel(triple) - - stagep = os.path.join(triple, "stage0") - - # Remove files from prior unpackings, since snapshot rustc may not - # be able to disambiguate between multiple candidate libraries. - # (Leave dirs in place since extracting step still needs them.) - for root, _, files in os.walk(stagep): - for f in files: - print("removing " + os.path.join(root, f)) - os.unlink(os.path.join(root, f)) - - for p in tar.getnames(): - name = p.replace("rust-stage0/", "", 1) - - fp = os.path.join(stagep, name) - print("extracting " + p) - tar.extract(p, download_unpack_base) - tp = os.path.join(download_unpack_base, p) - if os.path.isdir(tp) and os.path.exists(fp): - continue - shutil.move(tp, fp) - tar.close() - shutil.rmtree(download_unpack_base) - - -# Main - -# this gets called with one or two arguments: -# The first is the O/S triple. -# The second is an optional path to the snapshot to use. - -def main(argv): - triple = argv[1] - if len(argv) == 3: - dl_path = argv[2] - else: - snap = determine_curr_snapshot(triple) - dl = os.path.join(download_dir_base, snap) - url = download_url_base + "/" + snap - print("determined most recent snapshot: " + snap) - - if (not os.path.exists(dl)): - get_url_to_file(url, dl) - - if (snap_filename_hash_part(snap) == hash_file(dl)): - print("got download with ok hash") - else: - raise Exception("bad hash on download") - - dl_path = os.path.join(download_dir_base, snap) - - unpack_snapshot(triple, dl_path) - -if __name__ == '__main__': - main(sys.argv) diff --git a/src/etc/get-stage0.py b/src/etc/get-stage0.py new file mode 100644 index 0000000000000..127251cc802c9 --- /dev/null +++ b/src/etc/get-stage0.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +# +# Copyright 2016 The Rust Project Developers. See the COPYRIGHT +# file at the top-level directory of this distribution and at +# http://rust-lang.org/COPYRIGHT. +# +# Licensed under the Apache License, Version 2.0 or the MIT license +# , at your +# option. This file may not be copied, modified, or distributed +# except according to those terms. + +import os +import sys + +path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../bootstrap")) +sys.path.append(path) + +import bootstrap + +def main(triple): + src_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) + data = bootstrap.stage0_data(src_root) + + channel, date = data['rustc'].split('-', 1) + + dl_dir = 'dl' + if not os.path.exists(dl_dir): + os.makedirs(dl_dir) + + filename = 'rustc-{}-{}.tar.gz'.format(channel, triple) + url = 'https://static.rust-lang.org/dist/{}/{}'.format(date, filename) + dst = dl_dir + '/' + filename + bootstrap.get(url, dst) + + stage0_dst = triple + '/stage0' + if os.path.exists(stage0_dst): + for root, _, files in os.walk(stage0_dst): + for f in files: + os.unlink(os.path.join(root, f)) + else: + os.makedirs(stage0_dst) + bootstrap.unpack(dst, stage0_dst, match='rustc', verbose=True) + +if __name__ == '__main__': + main(sys.argv[1]) diff --git a/src/etc/htmldocck.py b/src/etc/htmldocck.py index 8362c239b655d..a5449b748dd5e 100644 --- a/src/etc/htmldocck.py +++ b/src/etc/htmldocck.py @@ -117,6 +117,7 @@ from htmlentitydefs import entitydefs entitydefs['larrb'] = u'\u21e4' entitydefs['rarrb'] = u'\u21e5' +entitydefs['nbsp'] = ' ' # "void elements" (no closing tag) from the HTML Standard section 12.1.2 VOID_ELEMENTS = set(['area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', @@ -342,9 +343,9 @@ def check_tree_text(tree, path, pat, regexp): return ret -def check_tree_count(tree, path, count): +def get_tree_count(tree, path): path = normalize_xpath(path) - return len(tree.findall(path)) == count + return len(tree.findall(path)) def stderr(*args): print(*args, file=sys.stderr) @@ -393,7 +394,10 @@ def check_command(c, cache): elif c.cmd == 'count': # count test if len(c.args) == 3: # @count = count test - ret = check_tree_count(cache.get_tree(c.args[0]), c.args[1], int(c.args[2])) + expected = int(c.args[2]) + found = get_tree_count(cache.get_tree(c.args[0]), c.args[1]) + cerr = "Expected {} occurrences but found {}".format(expected, found) + ret = expected == found else: raise InvalidCheck('Invalid number of @{} arguments'.format(c.cmd)) elif c.cmd == 'valid-html': diff --git a/src/etc/latest-unix-snaps.py b/src/etc/latest-unix-snaps.py deleted file mode 100755 index 6c93bf23f900f..0000000000000 --- a/src/etc/latest-unix-snaps.py +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -import os -import re -from snapshot import * - -f = open(snapshotfile) -date = None -rev = None -platform = None -snap = None -i = 0 - -newestSet = {} - - -for line in f.readlines(): - i += 1 - parsed = parse_line(i, line) - if not parsed: - continue - - if parsed["type"] == "snapshot": - if (len(newestSet) == 0 or parsed["date"] > newestSet["date"]): - newestSet["date"] = parsed["date"] - newestSet["rev"] = parsed["rev"] - newestSet["files"] = [] - addingMode = True - else: - addingMode = False - - elif addingMode is True and parsed["type"] == "file": - tux = re.compile("linux", re.IGNORECASE) - if (tux.match(parsed["platform"]) is not None): - ff = {} - ff["platform"] = parsed["platform"] - ff["hash"] = parsed["hash"] - newestSet["files"] += [ff] - - -def download_new_file(date, rev, platform, hsh): - snap = full_snapshot_name(date, rev, platform, hsh) - dl = os.path.join(download_dir_base, snap) - url = download_url_base + "/" + snap - if (not os.path.exists(dl)): - print("downloading " + url) - get_url_to_file(url, dl) - if (snap_filename_hash_part(snap) == hash_file(dl)): - print("got download with ok hash") - else: - raise Exception("bad hash on download") - -for ff in newestSet["files"]: - download_new_file(newestSet["date"], newestSet["rev"], - ff["platform"], ff["hash"]) diff --git a/src/etc/libc.c b/src/etc/libc.c deleted file mode 100644 index 249b5d22b6b23..0000000000000 --- a/src/etc/libc.c +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/* - * This calculates the platform-variable portion of the libc module. - * Move code in here only as you discover it is platform-variable. - * - */ - - /* c95 */ -#include -#include -#include -#include -#include -#include - -/* c99 */ -#include - -/* posix */ -#include -#include -#include -#include - -#define S(T) ((((T)-1)<0) ? 'i' : 'u') -#define B(T) (((int)sizeof(T)) * CHAR_BIT) -#define put_type(N,T) \ - printf(" type %s = %c%d;\n", N, S(T), B(T)) - -#define put_ftype(N,T) \ - printf(" type %s = f%d;\n", N, B(T)) - -#define CT(T) ((((T)-1)<0) ? "int" : "uint") -#define CS(T) ((((T)-1)<0) ? "" : "_u") -#define put_const(N,T) \ - printf(" const %s : %s = %d%s;\n", \ - #N, CT(T), N, CS(T)) - -void c95_types() { - printf(" mod c95 {\n"); - - put_type("c_char", char); - put_type("c_schar", signed char); - put_type("c_uchar", unsigned char); - - put_type("c_short", short); - put_type("c_ushort", unsigned short); - - put_type("c_int", int); - put_type("c_uint", unsigned int); - - put_type("c_long", long); - put_type("c_ulong", unsigned long); - - put_ftype("c_float", float); - put_ftype("c_double", double); - - put_type("size_t", size_t); - put_type("ptrdiff_t", ptrdiff_t); - - put_type("clock_t", clock_t); - put_type("time_t", time_t); - - put_type("wchar_t", wchar_t); - - printf(" }\n"); -} - -void c99_types() { - printf(" mod c99 {\n"); - - put_type("c_longlong", long long); - put_type("c_ulonglong", unsigned long long); - - put_type("intptr_t", intptr_t); - put_type("uintptr_t", uintptr_t); - - printf(" }\n"); -} - -void posix88_types() { - printf(" mod posix88 {\n"); - - put_type("off_t", off_t); - put_type("dev_t", dev_t); - put_type("ino_t", ino_t); - put_type("pid_t", pid_t); -#ifndef __WIN32__ - put_type("uid_t", uid_t); - put_type("gid_t", gid_t); -#endif - put_type("useconds_t", useconds_t); - put_type("mode_t", mode_t); - - put_type("ssize_t", ssize_t); - - printf(" }\n"); -} - -void extra_types() { - printf(" mod extra {\n"); - printf(" }\n"); -} - - -void c95_consts() { - printf(" mod c95 {\n"); - - put_const(EXIT_FAILURE, int); - put_const(EXIT_SUCCESS, int); - put_const(RAND_MAX, int); - - put_const(EOF, int); - put_const(SEEK_SET, int); - put_const(SEEK_CUR, int); - put_const(SEEK_END, int); - - put_const(_IOFBF, int); - put_const(_IONBF, int); - put_const(_IOLBF, int); - - put_const(BUFSIZ, size_t); - put_const(FOPEN_MAX, size_t); - put_const(FILENAME_MAX, size_t); - put_const(L_tmpnam, size_t); - put_const(TMP_MAX, size_t); - - printf(" }\n"); -} - - -void posix88_consts() { - printf(" mod posix88 {\n"); - put_const(O_RDONLY, int); - put_const(O_WRONLY, int); - put_const(O_RDWR, int); - put_const(O_APPEND, int); - put_const(O_CREAT, int); - put_const(O_EXCL, int); - put_const(O_TRUNC, int); - - put_const(S_IFIFO, int); - put_const(S_IFCHR, int); - put_const(S_IFBLK, int); - put_const(S_IFDIR, int); - put_const(S_IFREG, int); - put_const(S_IFLNK, int); - put_const(S_IFMT, int); - - put_const(S_IEXEC, int); - put_const(S_IWRITE, int); - put_const(S_IREAD, int); - - put_const(S_IRWXU, int); - put_const(S_IXUSR, int); - put_const(S_IWUSR, int); - put_const(S_IRUSR, int); - - put_const(S_IRWXG, int); - put_const(S_IXGRP, int); - put_const(S_IWGRP, int); - put_const(S_IRGRP, int); - - put_const(S_IRWXO, int); - put_const(S_IXOTH, int); - put_const(S_IWOTH, int); - put_const(S_IROTH, int); - -#ifdef F_OK - put_const(F_OK, int); -#endif -#ifdef R_OK - put_const(R_OK, int); -#endif -#ifdef W_OK - put_const(W_OK, int); -#endif -#ifdef X_OK - put_const(X_OK, int); -#endif - -#ifdef STDIN_FILENO - put_const(STDIN_FILENO, int); -#endif -#ifdef STDOUT_FILENO - put_const(STDOUT_FILENO, int); -#endif -#ifdef STDERR_FILENO - put_const(STDERR_FILENO, int); -#endif - -#ifdef F_LOCK - put_const(F_LOCK, int); -#endif - -#ifdef F_TEST - put_const(F_TEST, int); -#endif - -#ifdef F_TLOCK - put_const(F_TLOCK, int); -#endif - -#ifdef F_ULOCK - put_const(F_ULOCK, int); -#endif - - printf(" }\n"); -} - -void extra_consts() { - printf(" mod extra {\n"); -#ifdef O_RSYNC - put_const(O_RSYNC, int); -#endif - -#ifdef O_DSYNC - put_const(O_DSYNC, int); -#endif - -#ifdef O_SYNC - put_const(O_SYNC, int); -#endif - -#ifdef O_TEXT - put_const(O_TEXT, int); -#endif - -#ifdef O_BINARY - put_const(O_BINARY, int); -#endif - -#ifdef O_IRUSR - put_const(O_IRUSR, int); -#endif - -#ifdef O_IWUSR - put_const(O_IWUSR, int); -#endif - - printf(" }\n"); -} - -int main() { - printf("mod types {"); - c95_types(); - c99_types(); - posix88_types(); - extra_types(); - printf("}\n"); - - printf("mod consts {\n"); - c95_consts(); - posix88_consts(); - extra_consts(); - printf("}\n"); -} diff --git a/src/etc/licenseck.py b/src/etc/licenseck.py deleted file mode 100644 index 889b2c95a7ea8..0000000000000 --- a/src/etc/licenseck.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -import re - -license_re = re.compile( -u"""(#|//) Copyright .* The Rust Project Developers. See the COPYRIGHT -\\1 file at the top-level directory of this distribution and at -\\1 http://rust-lang.org/COPYRIGHT. -\\1 -\\1 Licensed under the Apache License, Version 2.0 or the MIT license -\\1 , at your -\\1 option. This file may not be copied, modified, or distributed -\\1 except according to those terms.""") - -exceptions = [ - "libstd/sync/mpsc/mpsc_queue.rs", # BSD - "libstd/sync/mpsc/spsc_queue.rs", # BSD - "test/bench/shootout-binarytrees.rs", # BSD - "test/bench/shootout-chameneos-redux.rs", # BSD - "test/bench/shootout-fannkuch-redux.rs", # BSD - "test/bench/shootout-fasta.rs", # BSD - "test/bench/shootout-fasta-redux.rs", # BSD - "test/bench/shootout-k-nucleotide.rs", # BSD - "test/bench/shootout-mandelbrot.rs", # BSD - "test/bench/shootout-meteor.rs", # BSD - "test/bench/shootout-nbody.rs", # BSD - "test/bench/shootout-regex-dna.rs", # BSD - "test/bench/shootout-reverse-complement.rs", # BSD - "test/bench/shootout-spectralnorm.rs", # BSD - "test/bench/shootout-threadring.rs", # BSD -] - -def check_license(name, contents): - # Whitelist check - if any(name.endswith(e) for e in exceptions): - return True - - # Xfail check - firstlineish = contents[:100] - if "ignore-license" in firstlineish: - return True - - # License check - boilerplate = contents[:500] - return bool(license_re.search(boilerplate)) diff --git a/src/etc/lldb_batchmode.py b/src/etc/lldb_batchmode.py index b1506285b3ac1..4952cf4f82c3b 100644 --- a/src/etc/lldb_batchmode.py +++ b/src/etc/lldb_batchmode.py @@ -37,14 +37,14 @@ def print_debug(s): - "Print something if DEBUG_OUTPUT is True" + """Print something if DEBUG_OUTPUT is True""" global DEBUG_OUTPUT if DEBUG_OUTPUT: print("DEBUG: " + str(s)) def normalize_whitespace(s): - "Replace newlines, tabs, multiple spaces, etc with exactly one space" + """Replace newlines, tabs, multiple spaces, etc with exactly one space""" return re.sub("\s+", " ", s) @@ -71,7 +71,7 @@ def breakpoint_callback(frame, bp_loc, dict): def execute_command(command_interpreter, command): - "Executes a single CLI command" + """Executes a single CLI command""" global new_breakpoints global registered_breakpoints @@ -216,4 +216,5 @@ def watchdog(): print("Aborting.", file=sys.stderr) sys.exit(1) finally: + debugger.Terminate() script_file.close() diff --git a/src/etc/lldb_rust_formatters.py b/src/etc/lldb_rust_formatters.py index c22a60abf3f76..335acae5fb6f7 100644 --- a/src/etc/lldb_rust_formatters.py +++ b/src/etc/lldb_rust_formatters.py @@ -29,7 +29,7 @@ def get_unqualified_type_name(self): if qualified_name is None: return qualified_name - return extract_type_name(qualified_name).replace("&'static ", "&") + return rustpp.extract_type_name(qualified_name).replace("&'static ", "&") def get_dwarf_type_kind(self): type_class = self.ty.GetTypeClass() @@ -171,10 +171,10 @@ def print_val(lldb_val, internal_dict): #=-------------------------------------------------------------------------------------------------- def print_struct_val(val, internal_dict, omit_first_field, omit_type_name, is_tuple_like): - ''' + """ Prints a struct, tuple, or tuple struct value with Rust syntax. Ignores any fields before field_start_index. - ''' + """ assert val.type.get_dwarf_type_kind() == rustpp.DWARF_TYPE_CODE_STRUCT if omit_type_name: @@ -204,7 +204,7 @@ def render_child(child_index): # LLDB is not good at handling zero-sized values, so we have to help # it a little if field.GetType().GetByteSize() == 0: - return this + extract_type_name(field.GetType().GetName()) + return this + rustpp.extract_type_name(field.GetType().GetName()) else: return this + "" @@ -221,7 +221,7 @@ def render_child(child_index): "body": body} def print_pointer_val(val, internal_dict): - '''Prints a pointer value with Rust syntax''' + """Prints a pointer value with Rust syntax""" assert val.type.get_dwarf_type_kind() == rustpp.DWARF_TYPE_CODE_PTR sigil = "&" type_name = val.type.get_unqualified_type_name() @@ -274,26 +274,9 @@ def print_std_string_val(val, internal_dict): # Helper Functions #=-------------------------------------------------------------------------------------------------- -UNQUALIFIED_TYPE_MARKERS = frozenset(["(", "[", "&", "*"]) - -def extract_type_name(qualified_type_name): - '''Extracts the type name from a fully qualified path''' - if qualified_type_name[0] in UNQUALIFIED_TYPE_MARKERS: - return qualified_type_name - - end_of_search = qualified_type_name.find("<") - if end_of_search < 0: - end_of_search = len(qualified_type_name) - - index = qualified_type_name.rfind("::", 0, end_of_search) - if index < 0: - return qualified_type_name - else: - return qualified_type_name[index + 2:] - def print_array_of_values(array_name, data_ptr_val, length, internal_dict): - '''Prints a contigous memory range, interpreting it as values of the - pointee-type of data_ptr_val.''' + """Prints a contigous memory range, interpreting it as values of the + pointee-type of data_ptr_val.""" data_ptr_type = data_ptr_val.type assert data_ptr_type.get_dwarf_type_kind() == rustpp.DWARF_TYPE_CODE_PTR diff --git a/src/etc/local_stage0.sh b/src/etc/local_stage0.sh index ca59b1c7d34a2..ee77206640eab 100755 --- a/src/etc/local_stage0.sh +++ b/src/etc/local_stage0.sh @@ -18,7 +18,7 @@ LIB_PREFIX=lib OS=`uname -s` case $OS in - ("Linux"|"FreeBSD"|"DragonFly"|"Bitrig"|"OpenBSD") + ("Linux"|"FreeBSD"|"DragonFly"|"Bitrig"|"OpenBSD"|"SunOS"|"Haiku") BIN_SUF= LIB_SUF=.so ;; @@ -49,12 +49,31 @@ if [ -z $TARG_DIR ]; then exit 1 fi +case "$TARG_DIR" in +--print-rustc-release) + # not actually copying to TARG_DIR, just print the local rustc version and exit + ${PREFIX}/bin/rustc${BIN_SUF} --version --verbose | sed -ne 's/^release: //p' +;; +*) + cp ${PREFIX}/bin/rustc${BIN_SUF} ${TARG_DIR}/stage0/bin/ cp ${PREFIX}/${LIB_DIR}/${RUSTLIBDIR}/${TARG_DIR}/${LIB_DIR}/* ${TARG_DIR}/stage0/${LIB_DIR}/ +cp ${PREFIX}/${LIB_DIR}/${LIB_PREFIX}arena*${LIB_SUF} ${TARG_DIR}/stage0/${LIB_DIR}/ cp ${PREFIX}/${LIB_DIR}/${LIB_PREFIX}extra*${LIB_SUF} ${TARG_DIR}/stage0/${LIB_DIR}/ cp ${PREFIX}/${LIB_DIR}/${LIB_PREFIX}rust*${LIB_SUF} ${TARG_DIR}/stage0/${LIB_DIR}/ cp ${PREFIX}/${LIB_DIR}/${LIB_PREFIX}std*${LIB_SUF} ${TARG_DIR}/stage0/${LIB_DIR}/ cp ${PREFIX}/${LIB_DIR}/${LIB_PREFIX}syntax*${LIB_SUF} ${TARG_DIR}/stage0/${LIB_DIR}/ +cp ${PREFIX}/${LIB_DIR}/${LIB_PREFIX}flate*${LIB_SUF} ${TARG_DIR}/stage0/${LIB_DIR}/ +cp ${PREFIX}/${LIB_DIR}/${LIB_PREFIX}fmt_macros*${LIB_SUF} ${TARG_DIR}/stage0/${LIB_DIR}/ +cp ${PREFIX}/${LIB_DIR}/${LIB_PREFIX}getopts*${LIB_SUF} ${TARG_DIR}/stage0/${LIB_DIR}/ +cp ${PREFIX}/${LIB_DIR}/${LIB_PREFIX}graphviz*${LIB_SUF} ${TARG_DIR}/stage0/${LIB_DIR}/ +cp ${PREFIX}/${LIB_DIR}/${LIB_PREFIX}log*${LIB_SUF} ${TARG_DIR}/stage0/${LIB_DIR}/ +cp ${PREFIX}/${LIB_DIR}/${LIB_PREFIX}rbml*${LIB_SUF} ${TARG_DIR}/stage0/${LIB_DIR}/ +cp ${PREFIX}/${LIB_DIR}/${LIB_PREFIX}serialize*${LIB_SUF} ${TARG_DIR}/stage0/${LIB_DIR}/ +cp ${PREFIX}/${LIB_DIR}/${LIB_PREFIX}term*${LIB_SUF} ${TARG_DIR}/stage0/${LIB_DIR}/ +cp ${PREFIX}/${LIB_DIR}/${LIB_PREFIX}proc_macro*${LIB_SUF} ${TARG_DIR}/stage0/${LIB_DIR}/ # do not fail if one of the above fails, as all we need is a working rustc! exit 0 + +esac diff --git a/src/etc/make-snapshot.py b/src/etc/make-snapshot.py deleted file mode 100755 index 31a69a581e4e2..0000000000000 --- a/src/etc/make-snapshot.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2011-2014 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -import snapshot, sys - -print(snapshot.make_snapshot(sys.argv[1], sys.argv[2])) diff --git a/src/etc/maketest.py b/src/etc/maketest.py deleted file mode 100644 index 34c2cdbef3538..0000000000000 --- a/src/etc/maketest.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -import subprocess -import os -import sys - -target_triple = sys.argv[14] - -def normalize_path(v): - """msys1/msys2 automatically converts `/abs/path1:/abs/path2` into - `c:\real\abs\path1;c:\real\abs\path2` (semicolons) if shell thinks - the value is list of paths. - (if there is only one path, it becomes `c:/real/abs/path`.) - this causes great confusion and error: shell and Makefile doesn't like - windows paths so it is really error-prone. revert it for peace.""" - v = v.replace('\\', '/') - # c:/path -> /c/path - # "c:/path" -> "/c/path" - start = v.find(':/') - while start != -1: - v = v[:start - 1] + '/' + v[start - 1:start] + v[start + 1:] - start = v.find(':/') - return v - - -def putenv(name, value): - if os.name == 'nt': - value = normalize_path(value) - os.putenv(name, value) - - -def convert_path_spec(name, value): - if os.name == 'nt' and name != 'PATH': - value = ":".join(normalize_path(v) for v in value.split(";")) - return value - -make = sys.argv[2] -putenv('RUSTC', os.path.abspath(sys.argv[3])) -putenv('TMPDIR', os.path.abspath(sys.argv[4])) -putenv('CC', sys.argv[5] + ' ' + sys.argv[6]) -putenv('RUSTDOC', os.path.abspath(sys.argv[7])) -filt = sys.argv[8] -putenv('LD_LIB_PATH_ENVVAR', sys.argv[9]) -putenv('HOST_RPATH_DIR', os.path.abspath(sys.argv[10])) -putenv('TARGET_RPATH_DIR', os.path.abspath(sys.argv[11])) -putenv('RUST_BUILD_STAGE', sys.argv[12]) -putenv('S', os.path.abspath(sys.argv[13])) -putenv('RUSTFLAGS', sys.argv[15]) -putenv('LLVM_COMPONENTS', sys.argv[16]) -putenv('PYTHON', sys.executable) -os.putenv('TARGET', target_triple) - -if 'msvc' in target_triple: - os.putenv('IS_MSVC', '1') - -if filt not in sys.argv[1]: - sys.exit(0) -print('maketest: ' + os.path.basename(os.path.dirname(sys.argv[1]))) - -path = sys.argv[1] -if path[-1] == '/': - # msys1 has a bug that `make` fails to include `../tools.mk` (parent dir) - # if `-C path` option is given and `path` is absolute directory with - # trailing slash (`c:/path/to/test/`). - # the easist workaround is to remove the slash (`c:/path/to/test`). - # msys2 seems to fix this problem. - path = path[:-1] - -proc = subprocess.Popen([make, '-C', path], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) -out, err = proc.communicate() -i = proc.wait() - -if i != 0: - print """\ ------ %s -------------------- ------- stdout --------------------------------------------- -%s ------- stderr --------------------------------------------- -%s ------- --------------------------------------------- -""" % (sys.argv[1], out, err) - - sys.exit(i) diff --git a/src/etc/mingw-fix-include/README.txt b/src/etc/mingw-fix-include/README.txt deleted file mode 100644 index e36e6abde9178..0000000000000 --- a/src/etc/mingw-fix-include/README.txt +++ /dev/null @@ -1,6 +0,0 @@ -The purpose of these headers is to fix issues with mingw v4.0, as described in #9246. - -This works by adding this directory to GCC include search path before mingw system headers directories, -so we can intercept their inclusions and add missing definitions without having to modify files in mingw/include. - -Once mingw fixes all 3 issues mentioned in #9246, this directory and all references to it from rust/mk/* may be removed. diff --git a/src/etc/mingw-fix-include/bits/c++config.h b/src/etc/mingw-fix-include/bits/c++config.h deleted file mode 100644 index 57533ef522455..0000000000000 --- a/src/etc/mingw-fix-include/bits/c++config.h +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#ifndef _FIX_CXXCONFIG_H -#define _FIX_CXXCONFIG_H 1 - -#define _GLIBCXX_HAVE_FENV_H 1 - -#include_next - -#endif diff --git a/src/etc/mingw-fix-include/winbase.h b/src/etc/mingw-fix-include/winbase.h deleted file mode 100644 index b1674f5f5673a..0000000000000 --- a/src/etc/mingw-fix-include/winbase.h +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#ifndef _FIX_WINBASE_H -#define _FIX_WINBASE_H 1 - -#define NTDDK_VERSION NTDDI_VERSION - -#include_next - -#endif diff --git a/src/etc/mingw-fix-include/winsock2.h b/src/etc/mingw-fix-include/winsock2.h deleted file mode 100644 index 5096c2f67026c..0000000000000 --- a/src/etc/mingw-fix-include/winsock2.h +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#ifndef _FIX_WINSOCK2_H -#define _FIX_WINSOCK2_H 1 - -#include_next - -// mingw 4.0.x has broken headers (#9246) but mingw-w64 does not. -#if defined(__MINGW_MAJOR_VERSION) && __MINGW_MAJOR_VERSION == 4 - -typedef struct pollfd { - SOCKET fd; - short events; - short revents; -} WSAPOLLFD, *PWSAPOLLFD, *LPWSAPOLLFD; - -#endif - -#endif // _FIX_WINSOCK2_H diff --git a/src/etc/mirror-all-snapshots.py b/src/etc/mirror-all-snapshots.py deleted file mode 100644 index cd77f882140fe..0000000000000 --- a/src/etc/mirror-all-snapshots.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2011-2013 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -import os -from snapshot import * - -f = open(snapshotfile) -date = None -rev = None -platform = None -snap = None -i = 0 - -for line in f.readlines(): - i += 1 - parsed = parse_line(i, line) - if not parsed: - continue - - if parsed["type"] == "snapshot": - date = parsed["date"] - rev = parsed["rev"] - - elif rev is not None and parsed["type"] == "file": - platform = parsed["platform"] - hsh = parsed["hash"] - snap = full_snapshot_name(date, rev, platform, hsh) - dl = os.path.join(download_dir_base, snap) - url = download_url_base + "/" + snap - if (not os.path.exists(dl)): - print("downloading " + url) - get_url_to_file(url, dl) - if (snap_filename_hash_part(snap) == hash_file(dl)): - print("got download with ok hash") - else: - raise Exception("bad hash on download") diff --git a/src/etc/mklldef.py b/src/etc/mklldef.py deleted file mode 100644 index d2f8ee469a4a1..0000000000000 --- a/src/etc/mklldef.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2015 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -import sys - -input_file = sys.argv[1] -output_file = sys.argv[2] -name = sys.argv[3] - -with open(input_file, 'r') as f: - with open(output_file, 'w') as g: - print >> g, 'LIBRARY ' + name - print >> g, 'EXPORTS' - for x in f: - x = str(x) - if not x.startswith(' pub fn LLVM'): continue - name = x[11:x.find('(')] - print >> g, ' ' + name diff --git a/src/etc/mklldeps.py b/src/etc/mklldeps.py index 8381e4f704097..24b007576aa24 100644 --- a/src/etc/mklldeps.py +++ b/src/etc/mklldeps.py @@ -77,6 +77,13 @@ def runErr(args): lib = lib.strip()[2:] elif lib[0] == '-': lib = lib.strip()[1:] + # If this actually points at a literal file then we're on MSVC which now + # prints full paths, so get just the name of the library and strip off the + # trailing ".lib" + elif os.path.exists(lib): + lib = os.path.basename(lib)[:-4] + elif lib[-4:] == '.lib': + lib = lib[:-4] f.write("#[link(name = \"" + lib + "\"") if not llvm_shared and 'LLVM' in lib: f.write(", kind = \"static\"") diff --git a/src/etc/platform-intrinsics/aarch64.json b/src/etc/platform-intrinsics/aarch64.json index 79fd769942889..c8cda4077b790 100644 --- a/src/etc/platform-intrinsics/aarch64.json +++ b/src/etc/platform-intrinsics/aarch64.json @@ -1,6 +1,6 @@ { - "platform": "aarch64", - "intrinsic_prefix": "aarch64_v", + "platform": "aarch64_v", + "intrinsic_prefix": "", "llvm_prefix": "llvm.aarch64.neon.", "number_info": { "signed": { diff --git a/src/etc/platform-intrinsics/arm.json b/src/etc/platform-intrinsics/arm.json index 97db7cbb3e0a5..39e49e239f34d 100644 --- a/src/etc/platform-intrinsics/arm.json +++ b/src/etc/platform-intrinsics/arm.json @@ -1,6 +1,6 @@ { - "platform": "arm", - "intrinsic_prefix": "arm_v", + "platform": "arm_v", + "intrinsic_prefix": "", "llvm_prefix": "llvm.neon.v", "number_info": { "signed": { diff --git a/src/etc/platform-intrinsics/generator.py b/src/etc/platform-intrinsics/generator.py index e3aa4e688d38b..e3c08bb35e075 100644 --- a/src/etc/platform-intrinsics/generator.py +++ b/src/etc/platform-intrinsics/generator.py @@ -26,10 +26,9 @@ class PlatformInfo(object): def __init__(self, json): self._platform = json['platform'] - self._intrinsic_prefix = json['intrinsic_prefix'] - def intrinsic_prefix(self): - return self._intrinsic_prefix + def platform_prefix(self): + return self._platform class IntrinsicSet(object): def __init__(self, platform, json): @@ -38,6 +37,7 @@ def __init__(self, platform, json): self._intrinsics = json['intrinsics'] self._widths = json['width_info'] self._platform = platform + self._intrinsic_prefix = json['intrinsic_prefix'] def intrinsics(self): for raw in self._intrinsics: @@ -48,6 +48,9 @@ def intrinsics(self): def platform(self): return self._platform + def intrinsic_prefix(self): + return self._intrinsic_prefix + def llvm_prefix(self): return self._llvm_prefix @@ -116,13 +119,19 @@ class Void(Type): def __init__(self): Type.__init__(self, 0) - def compiler_ctor(self): - return 'void()' + @staticmethod + def compiler_ctor(): + return '::VOID' - def rust_name(self): + def compiler_ctor_ref(self): + return '&' + self.compiler_ctor() + + @staticmethod + def rust_name(): return '()' - def type_info(self, platform_info): + @staticmethod + def type_info(platform_info): return None def __eq__(self, other): @@ -163,10 +172,12 @@ def __init__(self, bitwidth, llvm_bitwidth = None): def compiler_ctor(self): if self._llvm_bitwidth is None: - return 'i({})'.format(self.bitwidth()) + return '::I{}'.format(self.bitwidth()) else: - return 'i_({}, {})'.format(self.bitwidth(), - self._llvm_bitwidth) + return '::I{}_{}'.format(self.bitwidth(), self._llvm_bitwidth) + + def compiler_ctor_ref(self): + return '&' + self.compiler_ctor() def llvm_name(self): bw = self._llvm_bitwidth or self.bitwidth() @@ -182,10 +193,12 @@ def __init__(self, bitwidth, llvm_bitwidth = None): def compiler_ctor(self): if self._llvm_bitwidth is None: - return 'u({})'.format(self.bitwidth()) + return '::U{}'.format(self.bitwidth()) else: - return 'u_({}, {})'.format(self.bitwidth(), - self._llvm_bitwidth) + return '::U{}_{}'.format(self.bitwidth(), self._llvm_bitwidth) + + def compiler_ctor_ref(self): + return '&' + self.compiler_ctor() def llvm_name(self): bw = self._llvm_bitwidth or self.bitwidth() @@ -200,7 +213,10 @@ def __init__(self, bitwidth): Number.__init__(self, bitwidth) def compiler_ctor(self): - return 'f({})'.format(self.bitwidth()) + return '::F{}'.format(self.bitwidth()) + + def compiler_ctor_ref(self): + return '&' + self.compiler_ctor() def llvm_name(self): return 'f{}'.format(self.bitwidth()) @@ -244,12 +260,16 @@ def modify(self, spec, width, previous): def compiler_ctor(self): if self._bitcast is None: - return 'v({}, {})'.format(self._elem.compiler_ctor(), - self._length) + return '{}x{}'.format(self._elem.compiler_ctor(), + self._length) else: - return 'v_({}, {}, {})'.format(self._elem.compiler_ctor(), - self._bitcast.compiler_ctor(), - self._length) + return '{}x{}_{}'.format(self._elem.compiler_ctor(), + self._length, + self._bitcast.compiler_ctor() + .replace('::', '')) + + def compiler_ctor_ref(self): + return '&' + self.compiler_ctor() def rust_name(self): return '{}x{}'.format(self._elem.rust_name(), self._length) @@ -265,7 +285,7 @@ def __eq__(self, other): class Pointer(Type): def __init__(self, elem, llvm_elem, const): - self._elem = elem; + self._elem = elem self._llvm_elem = llvm_elem self._const = const Type.__init__(self, BITWIDTH_POINTER) @@ -284,10 +304,14 @@ def compiler_ctor(self): if self._llvm_elem is None: llvm_elem = 'None' else: - llvm_elem = 'Some({})'.format(self._llvm_elem.compiler_ctor()) - return 'p({}, {}, {})'.format('true' if self._const else 'false', - self._elem.compiler_ctor(), - llvm_elem) + llvm_elem = 'Some({})'.format(self._llvm_elem.compiler_ctor_ref()) + return 'Type::Pointer({}, {}, {})'.format(self._elem.compiler_ctor_ref(), + llvm_elem, + 'true' if self._const else 'false') + + def compiler_ctor_ref(self): + return "{{ static PTR: Type = {}; &PTR }}".format(self.compiler_ctor()) + def rust_name(self): return '*{} {}'.format('const' if self._const else 'mut', @@ -322,8 +346,14 @@ def modify(self, spec, width, previous): raise NotImplementedError() def compiler_ctor(self): - return 'agg({}, vec![{}])'.format('true' if self._flatten else 'false', - ', '.join(elem.compiler_ctor() for elem in self._elems)) + parts = "{{ static PARTS: [&'static Type; {}] = [{}]; &PARTS }}" + elems = ', '.join(elem.compiler_ctor_ref() for elem in self._elems) + parts = parts.format(len(self._elems), elems) + return 'Type::Aggregate({}, {})'.format('true' if self._flatten else 'false', + parts) + + def compiler_ctor_ref(self): + return "{{ static AGG: Type = {}; &AGG }}".format(self.compiler_ctor()) def rust_name(self): return '({})'.format(', '.join(elem.rust_name() for elem in self._elems)) @@ -476,7 +506,7 @@ def monomorphise(self): # must be a power of two assert width & (width - 1) == 0 def recur(processed, untouched): - if untouched == []: + if not untouched: ret = processed[0] args = processed[1:] yield MonomorphicIntrinsic(self._platform, self.intrinsic, width, @@ -514,14 +544,20 @@ def intrinsic_suffix(self): *self._args, width = self._width) + def platform_prefix(self): + return self._platform.platform().platform_prefix() + + def intrinsic_set_name(self): + return self._platform.intrinsic_prefix() + def intrinsic_name(self): - return self._platform.platform().intrinsic_prefix() + self.intrinsic_suffix() + return self._platform.intrinsic_prefix() + self.intrinsic_suffix() def compiler_args(self): - return ', '.join(arg.compiler_ctor() for arg in self._args_raw) + return ', '.join(arg.compiler_ctor_ref() for arg in self._args_raw) def compiler_ret(self): - return self._ret_raw.compiler_ctor() + return self._ret_raw.compiler_ctor_ref() def compiler_signature(self): return '({}) -> {}'.format(self.compiler_args(), self.compiler_ret()) @@ -537,6 +573,27 @@ def parse_args(): formatter_class = argparse.RawDescriptionHelpFormatter, description = 'Render an intrinsic definition JSON to various formats.', epilog = textwrap.dedent('''\ + Quick How-To: + + There are two operating modes: single file and multiple files. + + For example, ARM is specified as a single file. To generate the + compiler-definitions for ARM just pass the script the "arm.json" file: + + python generator.py --format compiler-defs arm.json + + The X86 architecture is specified as multiple files (for the different + instruction sets that x86 supports). To generate the compiler + definitions one needs to pass the script a "platform information file" + (with the -i flag) next to the files of the different intruction sets. + For example, to generate the X86 compiler-definitions for SSE4.2, just: + + python generator.py --format compiler-defs -i x86/info.json sse42.json + + And to generate the compiler-definitions for SSE4.1 and SSE4.2, just: + + python generator.py --format compiler-defs -i x86/info.json sse41.json sse42.json + An intrinsic definition consists of a map with fields: - intrinsic: pattern for the name(s) of the vendor's C intrinsic(s) - llvm: pattern for the name(s) of the internal llvm intrinsic(s) @@ -691,7 +748,7 @@ def parse_args(): parser.add_argument('-o', '--out', type=argparse.FileType('w'), default=sys.stdout, help = 'File to output to (default stdout).') parser.add_argument('-i', '--info', type=argparse.FileType('r'), - help = 'File containing platform specific information to merge into' + help = 'File containing platform specific information to merge into ' 'the input files\' header.') parser.add_argument('in_', metavar="FILE", type=argparse.FileType('r'), nargs='+', help = 'JSON files to load') @@ -702,21 +759,26 @@ class ExternBlock(object): def __init__(self): pass - def open(self, platform): + @staticmethod + def open(platform): return 'extern "platform-intrinsic" {' - def render(self, mono): - return ' fn {}{};'.format(mono.intrinsic_name(), - mono.intrinsic_signature()) + @staticmethod + def render(mono): + return ' fn {}{}{};'.format(mono.platform_prefix(), + mono.intrinsic_name(), + mono.intrinsic_signature()) - def close(self): + @staticmethod + def close(): return '}' class CompilerDefs(object): def __init__(self): pass - def open(self, platform): + @staticmethod + def open(platform): return '''\ // Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at @@ -733,29 +795,31 @@ def open(self, platform): #![allow(unused_imports)] -use {{Intrinsic, i, i_, u, u_, f, v, v_, agg, p, void}}; +use {{Intrinsic, Type}}; use IntrinsicDef::Named; -use rustc::middle::ty; // The default inlining settings trigger a pathological behaviour in // LLVM, which causes makes compilation very slow. See #28273. #[inline(never)] -pub fn find<'tcx>(_tcx: &ty::ctxt<'tcx>, name: &str) -> Option {{ +pub fn find(name: &str) -> Option {{ if !name.starts_with("{0}") {{ return None }} - Some(match &name["{0}".len()..] {{'''.format(platform.intrinsic_prefix()) + Some(match &name["{0}".len()..] {{'''.format(platform.platform_prefix()) - def render(self, mono): + @staticmethod + def render(mono): return '''\ "{}" => Intrinsic {{ - inputs: vec![{}], + inputs: {{ static INPUTS: [&'static Type; {}] = [{}]; &INPUTS }}, output: {}, definition: Named("{}") - }},'''.format(mono.intrinsic_suffix(), + }},'''.format(mono.intrinsic_set_name() + mono.intrinsic_suffix(), + len(mono._args_raw), mono.compiler_args(), mono.compiler_ret(), mono.llvm_name()) - def close(self): + @staticmethod + def close(): return '''\ _ => return None, }) diff --git a/src/etc/platform-intrinsics/x86/avx.json b/src/etc/platform-intrinsics/x86/avx.json index 2c1492c2954c8..1f41e2ecf3e6d 100644 --- a/src/etc/platform-intrinsics/x86/avx.json +++ b/src/etc/platform-intrinsics/x86/avx.json @@ -1,4 +1,5 @@ { + "intrinsic_prefix": "_mm", "llvm_prefix": "llvm.x86.avx.", "intrinsics": [ { @@ -8,6 +9,83 @@ "ret": "f(32-64)", "args": ["0", "0"] }, + { + "intrinsic": "256_blendv_{0.data_type}", + "width": [256], + "llvm": "blendv.{0.data_type}.256", + "ret": "f(32-64)", + "args": ["0", "0", "0"] + }, + { + "intrinsic": "256_broadcast_{0.data_type}", + "width": [256], + "llvm": "vbroadcastf128.{0.data_type}.256", + "ret": "f(32-64)", + "args": ["s8SPc"] + }, + { + "intrinsic": "256_cmp_{0.data_type}", + "width": [256], + "llvm": "cmp.{1.data_type}.256", + "ret": "f(32-64)", + "args": ["0", "0", "s8S"] + }, + { + "intrinsic": "256_cvtepi32_pd", + "width": [256], + "llvm": "cvtdq2.pd.256", + "ret": "f64", + "args": ["s32h"] + }, + { + "intrinsic": "256_cvtepi32_ps", + "width": [256], + "llvm": "cvtdq2.ps.256", + "ret": "f32", + "args": ["s32"] + }, + { + "intrinsic": "256_cvtpd_epi32", + "width": [256], + "llvm": "cvt.pd2dq.256", + "ret": "s32h", + "args": ["f64"] + }, + { + "intrinsic": "256_cvtpd_ps", + "width": [256], + "llvm": "cvt.pd2.ps.256", + "ret": "f32h", + "args": ["f64"] + }, + { + "intrinsic": "256_cvtps_epi32", + "width": [256], + "llvm": "cvt.ps2dq.256", + "ret": "s32", + "args": ["f32"] + }, + { + "intrinsic": "256_cvtps_pd", + "width": [256], + "llvm": "cvt.ps2.pd.256", + "ret": "f64", + "args": ["f32h"] + }, + { + "intrinsic": "256_cvttpd_epi32", + "width": [256], + "llvm": "cvtt.pd2dq.256", + "ret": "s32h", + "args": ["f64"] + }, + { + "intrinsic": "256_cvttps_epi32", + "width": [256], + "llvm": "cvtt.ps2dq.256", + "ret": "s32", + "args": ["f32"] + }, { "intrinsic": "256_dp_ps", "width": [256], diff --git a/src/etc/platform-intrinsics/x86/avx2.json b/src/etc/platform-intrinsics/x86/avx2.json index e88ff3d2b806d..4e006c1c4cf41 100644 --- a/src/etc/platform-intrinsics/x86/avx2.json +++ b/src/etc/platform-intrinsics/x86/avx2.json @@ -1,4 +1,5 @@ { + "intrinsic_prefix": "_mm", "llvm_prefix": "llvm.x86.avx2.", "intrinsics": [ { diff --git a/src/etc/platform-intrinsics/x86/bmi.json b/src/etc/platform-intrinsics/x86/bmi.json new file mode 100644 index 0000000000000..24e2cbcf8aad1 --- /dev/null +++ b/src/etc/platform-intrinsics/x86/bmi.json @@ -0,0 +1,13 @@ +{ + "intrinsic_prefix": "_bmi", + "llvm_prefix": "llvm.x86.bmi.", + "intrinsics": [ + { + "intrinsic": "_bextr_{0.bitwidth}", + "width": ["0"], + "llvm": "bextr.{0.bitwidth}", + "ret": "S(32-64)u", + "args": ["0", "0"] + } + ] +} diff --git a/src/etc/platform-intrinsics/x86/bmi2.json b/src/etc/platform-intrinsics/x86/bmi2.json new file mode 100644 index 0000000000000..f5a0db5ef51da --- /dev/null +++ b/src/etc/platform-intrinsics/x86/bmi2.json @@ -0,0 +1,27 @@ +{ + "intrinsic_prefix": "_bmi2", + "llvm_prefix": "llvm.x86.bmi.", + "intrinsics": [ + { + "intrinsic": "_bzhi_{0.bitwidth}", + "width": ["0"], + "llvm": "bzhi.{0.bitwidth}", + "ret": "S(32-64)u", + "args": ["0", "0"] + }, + { + "intrinsic": "_pdep_{0.bitwidth}", + "width": ["0"], + "llvm": "pdep.{0.bitwidth}", + "ret": "S(32-64)u", + "args": ["0", "0"] + }, + { + "intrinsic": "_pext_{0.bitwidth}", + "width": ["0"], + "llvm": "pext.{0.bitwidth}", + "ret": "S(32-64)u", + "args": ["0", "0"] + } + ] +} diff --git a/src/etc/platform-intrinsics/x86/fma.json b/src/etc/platform-intrinsics/x86/fma.json new file mode 100644 index 0000000000000..dcc26cd501c9b --- /dev/null +++ b/src/etc/platform-intrinsics/x86/fma.json @@ -0,0 +1,48 @@ +{ + "intrinsic_prefix": "_mm", + "llvm_prefix": "llvm.x86.fma.", + "intrinsics": [ + { + "intrinsic": "{0.width_mm}_fmadd_{0.data_type}", + "width": [128, 256], + "llvm": "vfmadd.{0.data_type_short}{0.width_suffix}", + "ret": "f(32-64)", + "args": ["0", "0", "0"] + }, + { + "intrinsic": "{0.width_mm}_fmaddsub_{0.data_type}", + "width": [128, 256], + "llvm": "vfmaddsub.{0.data_type_short}{0.width_suffix}", + "ret": "f(32-64)", + "args": ["0", "0", "0"] + }, + { + "intrinsic": "{0.width_mm}_fmsub_{0.data_type}", + "width": [128, 256], + "llvm": "vfmsub.{0.data_type_short}{0.width_suffix}", + "ret": "f(32-64)", + "args": ["0", "0", "0"] + }, + { + "intrinsic": "{0.width_mm}_fmsubadd_{0.data_type}", + "width": [128, 256], + "llvm": "vfmsubadd.{0.data_type_short}{0.width_suffix}", + "ret": "f(32-64)", + "args": ["0", "0", "0"] + }, + { + "intrinsic": "{0.width_mm}_fnmadd_{0.data_type}", + "width": [128, 256], + "llvm": "vfnmadd.{0.data_type_short}{0.width_suffix}", + "ret": "f(32-64)", + "args": ["0", "0", "0"] + }, + { + "intrinsic": "{0.width_mm}_fnmsub_{0.data_type}", + "width": [128, 256], + "llvm": "vfnmsub.{0.data_type_short}{0.width_suffix}", + "ret": "f(32-64)", + "args": ["0", "0", "0"] + } + ] +} diff --git a/src/etc/platform-intrinsics/x86/info.json b/src/etc/platform-intrinsics/x86/info.json index d48bcd268a004..8e90b8579c49e 100644 --- a/src/etc/platform-intrinsics/x86/info.json +++ b/src/etc/platform-intrinsics/x86/info.json @@ -1,26 +1,30 @@ { "platform": "x86", - "intrinsic_prefix": "x86_mm", "number_info": { "signed": { "kind": "s", "kind_short": "", "data_type": { "pattern": "epi{bitwidth}" }, + "bitwidth": { "pattern": "{bitwidth}" }, "data_type_short": { "8": "b", "16": "w", "32": "d", "64": "q" } }, "unsigned": { "kind": "u", "kind_short": "u", "data_type": { "pattern": "epu{bitwidth}" }, + "bitwidth": { "pattern": "{bitwidth}" }, "data_type_short": { "8": "b", "16": "w", "32": "d", "64": "q" } }, "float": { "kind": "f", "data_type": { "32": "ps", "64": "pd" }, + "bitwidth": { "pattern": "{bitwidth}" }, "data_type_short": { "32": "ps", "64": "pd" } } }, "width_info": { + "32": { "width_mm": "32", "width_suffix": "" }, + "64": { "width_mm": "64", "width_suffix": "" }, "128": { "width_mm": "", "width_suffix": "" }, "256": { "width_mm": "256", "width_suffix": ".256" }, "512": { "width_mm": "512", "width_suffix": ".512" } diff --git a/src/etc/platform-intrinsics/x86/sse.json b/src/etc/platform-intrinsics/x86/sse.json index adff0dc41b2af..d8eef8a3514ae 100644 --- a/src/etc/platform-intrinsics/x86/sse.json +++ b/src/etc/platform-intrinsics/x86/sse.json @@ -1,4 +1,5 @@ { + "intrinsic_prefix": "_mm", "llvm_prefix": "llvm.x86.sse.", "intrinsics": [ { diff --git a/src/etc/platform-intrinsics/x86/sse2.json b/src/etc/platform-intrinsics/x86/sse2.json index d09980d95f31b..4d6317d80a57e 100644 --- a/src/etc/platform-intrinsics/x86/sse2.json +++ b/src/etc/platform-intrinsics/x86/sse2.json @@ -1,4 +1,5 @@ { + "intrinsic_prefix": "_mm", "llvm_prefix": "llvm.x86.sse2.", "intrinsics": [ { diff --git a/src/etc/platform-intrinsics/x86/sse3.json b/src/etc/platform-intrinsics/x86/sse3.json index ed13595929d1b..119bf208f7e34 100644 --- a/src/etc/platform-intrinsics/x86/sse3.json +++ b/src/etc/platform-intrinsics/x86/sse3.json @@ -1,4 +1,5 @@ { + "intrinsic_prefix": "_mm", "llvm_prefix": "llvm.x86.sse3.", "intrinsics": [ { diff --git a/src/etc/platform-intrinsics/x86/sse41.json b/src/etc/platform-intrinsics/x86/sse41.json index de792cd1060bb..b499637e0d3c6 100644 --- a/src/etc/platform-intrinsics/x86/sse41.json +++ b/src/etc/platform-intrinsics/x86/sse41.json @@ -1,4 +1,5 @@ { + "intrinsic_prefix": "_mm", "llvm_prefix": "llvm.x86.sse41.", "intrinsics": [ { diff --git a/src/etc/platform-intrinsics/x86/sse42.json b/src/etc/platform-intrinsics/x86/sse42.json index c43ffef0dc578..fdee9c8a6671b 100644 --- a/src/etc/platform-intrinsics/x86/sse42.json +++ b/src/etc/platform-intrinsics/x86/sse42.json @@ -1,4 +1,5 @@ { + "intrinsic_prefix": "_mm", "llvm_prefix": "llvm.x86.sse42.", "intrinsics": [ { diff --git a/src/etc/platform-intrinsics/x86/ssse3.json b/src/etc/platform-intrinsics/x86/ssse3.json index af6afbb19a26d..5a5617957b3e5 100644 --- a/src/etc/platform-intrinsics/x86/ssse3.json +++ b/src/etc/platform-intrinsics/x86/ssse3.json @@ -1,4 +1,5 @@ { + "intrinsic_prefix": "_mm", "llvm_prefix": "llvm.x86.ssse3.", "intrinsics": [ { diff --git a/src/etc/platform-intrinsics/x86/tbm.json b/src/etc/platform-intrinsics/x86/tbm.json new file mode 100644 index 0000000000000..d1322cd60c4cf --- /dev/null +++ b/src/etc/platform-intrinsics/x86/tbm.json @@ -0,0 +1,13 @@ +{ + "intrinsic_prefix": "_tbm", + "llvm_prefix": "llvm.x86.tbm.", + "intrinsics": [ + { + "intrinsic": "_bextri_u{0.bitwidth}", + "width": ["0"], + "llvm": "bextri.u{0.bitwidth}", + "ret": "S(32-64)u", + "args": ["0", "0"] + } + ] +} diff --git a/src/etc/regex-match-tests.py b/src/etc/regex-match-tests.py deleted file mode 100755 index ea7f51c86f817..0000000000000 --- a/src/etc/regex-match-tests.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python2 - -# Copyright 2014 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -from __future__ import absolute_import, division, print_function -import argparse -import datetime -import os.path as path - - -def print_tests(tests): - print('\n'.join([test_tostr(t) for t in tests])) - - -def read_tests(f): - basename, _ = path.splitext(path.basename(f)) - tests = [] - for lineno, line in enumerate(open(f), 1): - fields = filter(None, map(str.strip, line.split('\t'))) - if not (4 <= len(fields) <= 5) \ - or 'E' not in fields[0] or fields[0][0] == '#': - continue - - opts, pat, text, sgroups = fields[0:4] - groups = [] # groups as integer ranges - if sgroups == 'NOMATCH': - groups = [None] - elif ',' in sgroups: - noparen = map(lambda s: s.strip('()'), sgroups.split(')(')) - for g in noparen: - s, e = map(str.strip, g.split(',')) - if s == '?' and e == '?': - groups.append(None) - else: - groups.append((int(s), int(e))) - else: - # This skips tests that should result in an error. - # There aren't many, so I think we can just capture those - # manually. Possibly fix this in future. - continue - - if pat == 'SAME': - pat = tests[-1][1] - if '$' in opts: - pat = pat.decode('string_escape') - text = text.decode('string_escape') - if 'i' in opts: - pat = '(?i)%s' % pat - - name = '%s_%d' % (basename, lineno) - tests.append((name, pat, text, groups)) - return tests - - -def test_tostr(t): - lineno, pat, text, groups = t - options = map(group_tostr, groups) - return 'mat!{match_%s, r"%s", r"%s", %s}' \ - % (lineno, pat, '' if text == "NULL" else text, ', '.join(options)) - - -def group_tostr(g): - if g is None: - return 'None' - else: - return 'Some((%d, %d))' % (g[0], g[1]) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description='Generate match tests from an AT&T POSIX test file.') - aa = parser.add_argument - aa('files', nargs='+', - help='A list of dat AT&T POSIX test files. See src/libregexp/testdata') - args = parser.parse_args() - - tests = [] - for f in args.files: - tests += read_tests(f) - - tpl = '''// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// ignore-tidy-linelength - -// DO NOT EDIT. Automatically generated by 'src/etc/regexp-match-tests' -// on {date}. -''' - print(tpl.format(date=str(datetime.datetime.now()))) - - for f in args.files: - print('// Tests from %s' % path.basename(f)) - print_tests(read_tests(f)) - print('') diff --git a/src/etc/rust-lldb b/src/etc/rust-lldb index 42902b06aee13..f70ab65bce717 100755 --- a/src/etc/rust-lldb +++ b/src/etc/rust-lldb @@ -12,6 +12,17 @@ # Exit if anything fails set -e +LLDB_VERSION=`lldb --version 2>/dev/null | head -1 | cut -d. -f1` + +if [ "$LLDB_VERSION" = "lldb-350" ] +then + echo "***" + echo \ +"WARNING: This version of LLDB has known issues with Rust and cannot \ +display the contents of local variables!" + echo "***" +fi + # Create a tempfile containing the LLDB script we want to execute on startup TMPFILE=`mktemp /tmp/rust-lldb-commands.XXXXXX` diff --git a/src/etc/snapshot.py b/src/etc/snapshot.py deleted file mode 100644 index 6d62a45c703a2..0000000000000 --- a/src/etc/snapshot.py +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright 2011-2015 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -import re -import os -import sys -import glob -import tarfile -import shutil -import subprocess -import distutils.spawn - -try: - import hashlib - sha_func = hashlib.sha1 -except ImportError: - import sha - sha_func = sha.new - - -def scrub(b): - if sys.version_info >= (3,) and type(b) == bytes: - return b.decode('ascii') - else: - return b - -src_dir = scrub(os.getenv("CFG_SRC_DIR")) -if not src_dir: - raise Exception("missing env var CFG_SRC_DIR") - -snapshotfile = os.path.join(src_dir, "src", "snapshots.txt") -download_url_base = "https://static.rust-lang.org/stage0-snapshots" -download_dir_base = "dl" -download_unpack_base = os.path.join(download_dir_base, "unpack") - -snapshot_files = { - "bitrig": ["bin/rustc"], - "dragonfly": ["bin/rustc"], - "freebsd": ["bin/rustc"], - "linux": ["bin/rustc"], - "macos": ["bin/rustc"], - "netbsd": ["bin/rustc"], - "openbsd": ["bin/rustc"], - "winnt": ["bin/rustc.exe"], - } - -winnt_runtime_deps_32 = ["libgcc_s_dw2-1.dll", "libstdc++-6.dll"] -winnt_runtime_deps_64 = ["libgcc_s_seh-1.dll", "libstdc++-6.dll"] - -def parse_line(n, line): - global snapshotfile - - if re.match(r"\s*$", line): - return None - - if re.match(r"^T\s*$", line): - return None - - match = re.match(r"\s+([\w_-]+) ([a-fA-F\d]{40})\s*$", line) - if match: - return {"type": "file", - "platform": match.group(1), - "hash": match.group(2).lower()} - - match = re.match(r"([ST]) (\d{4}-\d{2}-\d{2}) ([a-fA-F\d]+)\s*$", line) - if not match: - raise Exception("%s:%d:E syntax error: " % (snapshotfile, n)) - return {"type": "snapshot", - "date": match.group(2), - "rev": match.group(3)} - - -def partial_snapshot_name(date, rev, platform): - return ("rust-stage0-%s-%s-%s.tar.bz2" % - (date, rev, platform)) - - -def full_snapshot_name(date, rev, platform, hsh): - return ("rust-stage0-%s-%s-%s-%s.tar.bz2" % - (date, rev, platform, hsh)) - - -def get_kernel(triple): - t = triple.split('-') - if len(t) == 2: - os_name = t[1] - else: - os_name = t[2] - - if os_name == "windows": - return "winnt" - if os_name == "darwin": - return "macos" - if os_name == "freebsd": - return "freebsd" - if os_name == "dragonfly": - return "dragonfly" - if os_name == "bitrig": - return "bitrig" - if os_name == "netbsd": - return "netbsd" - if os_name == "openbsd": - return "openbsd" - return "linux" - - -def get_cpu(triple): - arch = triple.split('-')[0] - if arch == "i686": - return "i386" - return arch - - -def get_platform(triple): - return "%s-%s" % (get_kernel(triple), get_cpu(triple)) - - -def cmd_out(cmdline): - p = subprocess.Popen(cmdline, stdout=subprocess.PIPE) - return scrub(p.communicate()[0].strip()) - - -def local_rev_info(field): - return cmd_out(["git", "--git-dir=" + os.path.join(src_dir, ".git"), - "log", "-n", "1", - "--format=%%%s" % field, "HEAD"]) - - -def local_rev_full_sha(): - return local_rev_info("H").split()[0] - - -def local_rev_short_sha(): - return local_rev_info("h").split()[0] - - -def local_rev_committer_date(): - return local_rev_info("ci") - - -def get_url_to_file(u, f): - # no security issue, just to stop partial download leaving a stale file - tmpf = f + '.tmp' - - returncode = -1 - if distutils.spawn.find_executable("curl"): - returncode = subprocess.call(["curl", "-o", tmpf, u]) - elif distutils.spawn.find_executable("wget"): - returncode = subprocess.call(["wget", "-O", tmpf, u]) - - if returncode != 0: - try: - os.unlink(tmpf) - except OSError: - pass - raise Exception("failed to fetch url") - os.rename(tmpf, f) - - -def snap_filename_hash_part(snap): - match = re.match(r".*([a-fA-F\d]{40}).tar.bz2$", snap) - if not match: - raise Exception("unable to find hash in filename: " + snap) - return match.group(1) - - -def hash_file(x): - h = sha_func() - h.update(open(x, "rb").read()) - return scrub(h.hexdigest()) - - -def get_winnt_runtime_deps(platform): - """Returns a list of paths of Rust's system runtime dependencies""" - if platform == "winnt-x86_64": - deps = winnt_runtime_deps_64 - else: - deps = winnt_runtime_deps_32 - runtime_deps = [] - path_dirs = os.environ["PATH"].split(os.pathsep) - for name in deps: - for dir in path_dirs: - filepath = os.path.join(dir, name) - if os.path.isfile(filepath): - runtime_deps.append(filepath) - break - else: - raise Exception("Could not find runtime dependency: %s" % name) - return runtime_deps - - -def make_snapshot(stage, triple): - kernel = get_kernel(triple) - platform = get_platform(triple) - rev = local_rev_short_sha() - date = local_rev_committer_date().split()[0] - - file0 = partial_snapshot_name(date, rev, platform) - - def in_tar_name(fn): - cs = re.split(r"[\\/]", fn) - if len(cs) >= 2: - return os.sep.join(cs[-2:]) - - tar = tarfile.open(file0, "w:bz2") - - for name in snapshot_files[kernel]: - dir = stage - if stage == "stage1" and re.match(r"^lib/(lib)?std.*", name): - dir = "stage0" - fn_glob = os.path.join(triple, dir, name) - matches = glob.glob(fn_glob) - if not matches: - raise Exception("Not found file with name like " + fn_glob) - if len(matches) == 1: - tar.add(matches[0], "rust-stage0/" + in_tar_name(matches[0])) - else: - raise Exception("Found stale files: \n %s\n" - "Please make a clean build." % "\n ".join(matches)) - - if kernel == "winnt": - for path in get_winnt_runtime_deps(platform): - tar.add(path, "rust-stage0/bin/" + os.path.basename(path)) - tar.add(os.path.join(os.path.dirname(__file__), "third-party"), - "rust-stage0/bin/third-party") - - tar.close() - - h = hash_file(file0) - file1 = full_snapshot_name(date, rev, platform, h) - - shutil.move(file0, file1) - - return file1 - - -def curr_snapshot_rev(): - i = 0 - found_snap = False - date = None - rev = None - - f = open(snapshotfile) - for line in f.readlines(): - i += 1 - parsed = parse_line(i, line) - if not parsed: - continue - - if parsed["type"] == "snapshot": - date = parsed["date"] - rev = parsed["rev"] - found_snap = True - break - - if not found_snap: - raise Exception("no snapshot entries in file") - - return (date, rev) - - -def determine_curr_snapshot(triple): - i = 0 - platform = get_platform(triple) - - found_file = False - found_snap = False - hsh = None - date = None - rev = None - - f = open(snapshotfile) - for line in f.readlines(): - i += 1 - parsed = parse_line(i, line) - if not parsed: - continue - - if found_snap and parsed["type"] == "file": - if parsed["platform"] == platform: - hsh = parsed["hash"] - found_file = True - break - elif parsed["type"] == "snapshot": - date = parsed["date"] - rev = parsed["rev"] - found_snap = True - - if not found_snap: - raise Exception("no snapshot entries in file") - - if not found_file: - raise Exception("no snapshot file found for platform %s, rev %s" % - (platform, rev)) - - return full_snapshot_name(date, rev, platform, hsh) diff --git a/src/etc/test-float-parse/_common.rs b/src/etc/test-float-parse/_common.rs index b4a2a593e0118..725a715f7cf32 100644 --- a/src/etc/test-float-parse/_common.rs +++ b/src/etc/test-float-parse/_common.rs @@ -16,7 +16,7 @@ use std::mem::transmute; #[allow(dead_code)] pub const SEED: [u32; 3] = [0x243f_6a88, 0x85a3_08d3, 0x1319_8a2e]; -pub fn validate(text: String) { +pub fn validate(text: &str) { let mut out = io::stdout(); let x: f64 = text.parse().unwrap(); let f64_bytes: u64 = unsafe { transmute(x) }; diff --git a/src/etc/test-float-parse/few-ones.rs b/src/etc/test-float-parse/few-ones.rs index 390f4da6f63ae..2486df4446631 100644 --- a/src/etc/test-float-parse/few-ones.rs +++ b/src/etc/test-float-parse/few-ones.rs @@ -20,7 +20,7 @@ fn main() { for a in &pow { for b in &pow { for c in &pow { - validate((a | b | c).to_string()); + validate(&(a | b | c).to_string()); } } } diff --git a/src/etc/test-float-parse/huge-pow10.rs b/src/etc/test-float-parse/huge-pow10.rs index e62afc7851564..9d12a03dae290 100644 --- a/src/etc/test-float-parse/huge-pow10.rs +++ b/src/etc/test-float-parse/huge-pow10.rs @@ -15,7 +15,7 @@ use _common::validate; fn main() { for e in 300..310 { for i in 0..100000 { - validate(format!("{}e{}", i, e)); + validate(&format!("{}e{}", i, e)); } } } diff --git a/src/etc/test-float-parse/long-fractions.rs b/src/etc/test-float-parse/long-fractions.rs new file mode 100644 index 0000000000000..9598bd12a0d45 --- /dev/null +++ b/src/etc/test-float-parse/long-fractions.rs @@ -0,0 +1,27 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +mod _common; + +use std::char; +use _common::validate; + +fn main() { + for n in 0..10 { + let digit = char::from_digit(n, 10).unwrap(); + let mut s = "0.".to_string(); + for _ in 0..400 { + s.push(digit); + if s.parse::().is_ok() { + validate(&s); + } + } + } +} diff --git a/src/etc/test-float-parse/many-digits.rs b/src/etc/test-float-parse/many-digits.rs index 0cbf57183dfc2..674c30ad84ed5 100644 --- a/src/etc/test-float-parse/many-digits.rs +++ b/src/etc/test-float-parse/many-digits.rs @@ -23,9 +23,9 @@ fn main() { let mut rnd = IsaacRng::from_seed(&SEED); let mut range = Range::new(0, 10); for _ in 0..5_000_000u64 { - let num_digits = rnd.gen_range(100, 300); + let num_digits = rnd.gen_range(100, 400); let digits = gen_digits(num_digits, &mut range, &mut rnd); - validate(digits); + validate(&digits); } } diff --git a/src/etc/test-float-parse/rand-f64.rs b/src/etc/test-float-parse/rand-f64.rs index 762c3d95ec6eb..1d82912054e2a 100644 --- a/src/etc/test-float-parse/rand-f64.rs +++ b/src/etc/test-float-parse/rand-f64.rs @@ -25,7 +25,7 @@ fn main() { let bits = rnd.next_u64(); let x: f64 = unsafe { transmute(bits) }; if x.is_finite() { - validate(format!("{:e}", x)); + validate(&format!("{:e}", x)); i += 1; } } diff --git a/src/etc/test-float-parse/runtests.py b/src/etc/test-float-parse/runtests.py index 27af63a5876bc..bc141877b373f 100644 --- a/src/etc/test-float-parse/runtests.py +++ b/src/etc/test-float-parse/runtests.py @@ -21,8 +21,9 @@ The actual tests (generating decimal strings and feeding them to dec2flt) is performed by a set of stand-along rust programs. This script compiles, runs, -and supervises them. In particular, the programs report the strings they -generate and the floating point numbers they converted those strings to. +and supervises them. The programs report the strings they generate and the +floating point numbers they converted those strings to, and this script +checks that the results are correct. You can run specific tests rather than all of them by giving their names (without .rs extension) as command line parameters. @@ -64,9 +65,9 @@ exit code that's not 0, the test fails. The output on stdout is treated as (f64, f32, decimal) record, encoded thusly: -- The first eight bytes are a binary64 (native endianness). -- The following four bytes are a binary32 (native endianness). -- Then the corresponding string input follows, in ASCII (no newline). +- First, the bits of the f64 encoded as an ASCII hex string. +- Second, the bits of the f32 encoded as an ASCII hex string. +- Then the corresponding string input, in ASCII - The record is terminated with a newline. Incomplete records are an error. Not-a-Number bit patterns are invalid too. @@ -176,7 +177,6 @@ def run(test): def interact(proc, queue): - line = "" n = 0 while proc.poll() is None: line = proc.stdout.readline() @@ -184,7 +184,6 @@ def interact(proc, queue): continue assert line.endswith('\n'), "incomplete line: " + repr(line) queue.put(line) - line = "" n += 1 if n % UPDATE_EVERY_N == 0: msg("got", str(n // 1000) + "k", "records") diff --git a/src/etc/test-float-parse/short-decimals.rs b/src/etc/test-float-parse/short-decimals.rs index baefb9c930543..4909f7c58f89a 100644 --- a/src/etc/test-float-parse/short-decimals.rs +++ b/src/etc/test-float-parse/short-decimals.rs @@ -22,8 +22,8 @@ fn main() { if i % 10 == 0 { continue; } - validate(format!("{}e{}", i, e)); - validate(format!("{}e-{}", i, e)); + validate(&format!("{}e{}", i, e)); + validate(&format!("{}e-{}", i, e)); } } } diff --git a/src/etc/test-float-parse/subnorm.rs b/src/etc/test-float-parse/subnorm.rs index 70682c9b21810..04a7cc2746675 100644 --- a/src/etc/test-float-parse/subnorm.rs +++ b/src/etc/test-float-parse/subnorm.rs @@ -16,8 +16,8 @@ use _common::validate; fn main() { for bits in 0u32..(1 << 21) { let single: f32 = unsafe { transmute(bits) }; - validate(format!("{:e}", single)); + validate(&format!("{:e}", single)); let double: f64 = unsafe { transmute(bits as u64) }; - validate(format!("{:e}", double)); + validate(&format!("{:e}", double)); } } diff --git a/src/etc/test-float-parse/tiny-pow10.rs b/src/etc/test-float-parse/tiny-pow10.rs index a01c6d5a07893..50ca5e32609ac 100644 --- a/src/etc/test-float-parse/tiny-pow10.rs +++ b/src/etc/test-float-parse/tiny-pow10.rs @@ -15,7 +15,7 @@ use _common::validate; fn main() { for e in 301..327 { for i in 0..100000 { - validate(format!("{}e-{}", i, e)); + validate(&format!("{}e-{}", i, e)); } } } diff --git a/src/etc/test-float-parse/u32-small.rs b/src/etc/test-float-parse/u32-small.rs index a4e8488e74529..571ac80e5b0c5 100644 --- a/src/etc/test-float-parse/u32-small.rs +++ b/src/etc/test-float-parse/u32-small.rs @@ -14,6 +14,6 @@ use _common::validate; fn main() { for i in 0..(1 << 19) { - validate(i.to_string()); + validate(&i.to_string()); } } diff --git a/src/etc/test-float-parse/u64-pow2.rs b/src/etc/test-float-parse/u64-pow2.rs index a31304d3f68aa..5b25c8399319c 100644 --- a/src/etc/test-float-parse/u64-pow2.rs +++ b/src/etc/test-float-parse/u64-pow2.rs @@ -16,13 +16,13 @@ use std::u64; fn main() { for exp in 19..64 { let power: u64 = 1 << exp; - validate(power.to_string()); + validate(&power.to_string()); for offset in 1..123 { - validate((power + offset).to_string()); - validate((power - offset).to_string()); + validate(&(power + offset).to_string()); + validate(&(power - offset).to_string()); } } for offset in 0..123 { - validate((u64::MAX - offset).to_string()); + validate(&(u64::MAX - offset).to_string()); } } diff --git a/src/etc/tidy.py b/src/etc/tidy.py deleted file mode 100644 index 942793adc31de..0000000000000 --- a/src/etc/tidy.py +++ /dev/null @@ -1,210 +0,0 @@ -# Copyright 2010-2014 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -import sys -import fileinput -import subprocess -import re -import os -from licenseck import check_license -import snapshot - -err = 0 -cols = 100 -cr_flag = "ignore-tidy-cr" -tab_flag = "ignore-tidy-tab" -linelength_flag = "ignore-tidy-linelength" - -interesting_files = ['.rs', '.py', '.js', '.sh', '.c', '.h'] -uninteresting_files = ['miniz.c', 'jquery', 'rust_android_dummy'] - - -def report_error_name_no(name, no, s): - global err - print("%s:%d: %s" % (name, no, s)) - err = 1 - - -def report_err(s): - report_error_name_no(fileinput.filename(), fileinput.filelineno(), s) - - -def report_warn(s): - print("%s:%d: %s" % (fileinput.filename(), - fileinput.filelineno(), - s)) - - -def do_license_check(name, contents): - if not check_license(name, contents): - report_error_name_no(name, 1, "incorrect license") - - -def update_counts(current_name): - global file_counts - global count_other_linted_files - - _, ext = os.path.splitext(current_name) - - if ext in interesting_files: - file_counts[ext] += 1 - else: - count_other_linted_files += 1 - - -def interesting_file(f): - if any(x in f for x in uninteresting_files): - return False - - return any(os.path.splitext(f)[1] == ext for ext in interesting_files) - - -# Be careful to support Python 2.4, 2.6, and 3.x here! -config_proc = subprocess.Popen(["git", "config", "core.autocrlf"], - stdout=subprocess.PIPE) -result = config_proc.communicate()[0] - -true = "true".encode('utf8') -autocrlf = result.strip() == true if result is not None else False - -current_name = "" -current_contents = "" -check_tab = True -check_cr = True -check_linelength = True - -if len(sys.argv) < 2: - print("usage: tidy.py ") - sys.exit(1) - -src_dir = sys.argv[1] - -count_lines = 0 -count_non_blank_lines = 0 -count_other_linted_files = 0 - -file_counts = {ext: 0 for ext in interesting_files} - -all_paths = set() - -try: - for (dirpath, dirnames, filenames) in os.walk(src_dir): - # Skip some third-party directories - skippable_dirs = { - 'src/jemalloc', - 'src/llvm', - 'src/gyp', - 'src/libbacktrace', - 'src/libuv', - 'src/compiler-rt', - 'src/rt/hoedown', - 'src/rustllvm', - 'src/rt/valgrind', - 'src/rt/msvc', - 'src/rust-installer', - 'src/liblibc', - } - - if any(d in dirpath for d in skippable_dirs): - continue - - file_names = [os.path.join(dirpath, f) for f in filenames - if interesting_file(f) - and not f.endswith("_gen.rs") - and not ".#" is f] - - if not file_names: - continue - - for line in fileinput.input(file_names, - openhook=fileinput.hook_encoded("utf-8")): - - filename = fileinput.filename() - - if "tidy.py" not in filename: - if "TODO" in line: - report_err("TODO is deprecated; use FIXME") - match = re.match(r'^.*/(\*|/!?)\s*XXX', line) - if match: - report_err("XXX is no longer necessary, use FIXME") - match = re.match(r'^.*//\s*(NOTE.*)$', line) - if match and "TRAVIS" not in os.environ: - m = match.group(1) - if "snap" in m.lower(): - report_warn(match.group(1)) - match = re.match(r'^.*//\s*SNAP\s+(\w+)', line) - if match: - hsh = match.group(1) - date, rev = snapshot.curr_snapshot_rev() - if not hsh.startswith(rev): - report_err("snapshot out of date (" + date - + "): " + line) - else: - if "SNAP" in line: - report_warn("unmatched SNAP line: " + line) - - if cr_flag in line: - check_cr = False - if tab_flag in line: - check_tab = False - if linelength_flag in line: - check_linelength = False - - if check_tab and ('\t' in line and - "Makefile" not in filename): - report_err("tab character") - if check_cr and not autocrlf and '\r' in line: - report_err("CR character") - if line.endswith(" \n") or line.endswith("\t\n"): - report_err("trailing whitespace") - line_len = len(line)-2 if autocrlf else len(line)-1 - - if check_linelength and line_len > cols: - report_err("line longer than %d chars" % cols) - - if fileinput.isfirstline(): - # This happens at the end of each file except the last. - if current_name != "": - update_counts(current_name) - assert len(current_contents) > 0 - do_license_check(current_name, current_contents) - - current_name = filename - current_contents = "" - check_cr = True - check_tab = True - check_linelength = True - - # Put a reasonable limit on the amount of header data we use for - # the licenseck - if len(current_contents) < 1000: - current_contents += line - - count_lines += 1 - if line.strip(): - count_non_blank_lines += 1 - - if current_name != "": - update_counts(current_name) - assert len(current_contents) > 0 - do_license_check(current_name, current_contents) - -except UnicodeDecodeError as e: - report_err("UTF-8 decoding error " + str(e)) - -print -for ext in sorted(file_counts, key=file_counts.get, reverse=True): - print("* linted {} {} files".format(file_counts[ext], ext)) -print("* linted {} other files".format(count_other_linted_files)) -print("* total lines of code: {}".format(count_lines)) -print("* total non-blank lines of code: {}".format(count_non_blank_lines)) -print() - -sys.exit(err) diff --git a/src/etc/unicode.py b/src/etc/unicode.py index 10b864a902dc0..bddc83f63d25d 100755 --- a/src/etc/unicode.py +++ b/src/etc/unicode.py @@ -25,7 +25,10 @@ import fileinput, re, os, sys, operator -preamble = '''// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +bytes_old = 0 +bytes_new = 0 + +preamble = '''// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // @@ -79,28 +82,28 @@ def load_unicode_data(f): canon_decomp = {} compat_decomp = {} - udict = {}; - range_start = -1; + udict = {} + range_start = -1 for line in fileinput.input(f): - data = line.split(';'); + data = line.split(';') if len(data) != 15: continue - cp = int(data[0], 16); + cp = int(data[0], 16) if is_surrogate(cp): continue if range_start >= 0: for i in xrange(range_start, cp): - udict[i] = data; - range_start = -1; + udict[i] = data + range_start = -1 if data[1].endswith(", First>"): - range_start = cp; - continue; - udict[cp] = data; + range_start = cp + continue + udict[cp] = data for code in udict: - [code_org, name, gencat, combine, bidi, + (code_org, name, gencat, combine, bidi, decomp, deci, digit, num, mirror, - old, iso, upcase, lowcase, titlecase ] = udict[code]; + old, iso, upcase, lowcase, titlecase) = udict[code] # generate char to char direct common and simple conversions # uppercase to lowercase @@ -307,12 +310,137 @@ def emit_table(f, name, t_data, t_type = "&'static [(char, char)]", is_pub=True, format_table_content(f, data, 8) f.write("\n ];\n\n") +def emit_trie_lookup_range_table(f): + f.write(""" + +// BoolTrie is a trie for representing a set of Unicode codepoints. It is +// implemented with postfix compression (sharing of identical child nodes), +// which gives both compact size and fast lookup. +// +// The space of Unicode codepoints is divided into 3 subareas, each +// represented by a trie with different depth. In the first (0..0x800), there +// is no trie structure at all; each u64 entry corresponds to a bitvector +// effectively holding 64 bool values. +// +// In the second (0x800..0x10000), each child of the root node represents a +// 64-wide subrange, but instead of storing the full 64-bit value of the leaf, +// the trie stores an 8-bit index into a shared table of leaf values. This +// exploits the fact that in reasonable sets, many such leaves can be shared. +// +// In the third (0x10000..0x110000), each child of the root node represents a +// 4096-wide subrange, and the trie stores an 8-bit index into a 64-byte slice +// of a child tree. Each of these 64 bytes represents an index into the table +// of shared 64-bit leaf values. This exploits the sparse structure in the +// non-BMP range of most Unicode sets. +pub struct BoolTrie { + // 0..0x800 (corresponding to 1 and 2 byte utf-8 sequences) + r1: [u64; 32], // leaves + + // 0x800..0x10000 (corresponding to 3 byte utf-8 sequences) + r2: [u8; 992], // first level + r3: &'static [u64], // leaves + + // 0x10000..0x110000 (corresponding to 4 byte utf-8 sequences) + r4: [u8; 256], // first level + r5: &'static [u8], // second level + r6: &'static [u64], // leaves +} + +fn trie_range_leaf(c: usize, bitmap_chunk: u64) -> bool { + ((bitmap_chunk >> (c & 63)) & 1) != 0 +} + +fn trie_lookup_range_table(c: char, r: &'static BoolTrie) -> bool { + let c = c as usize; + if c < 0x800 { + trie_range_leaf(c, r.r1[c >> 6]) + } else if c < 0x10000 { + let child = r.r2[(c >> 6) - 0x20]; + trie_range_leaf(c, r.r3[child as usize]) + } else { + let child = r.r4[(c >> 12) - 0x10]; + let leaf = r.r5[((child as usize) << 6) + ((c >> 6) & 0x3f)]; + trie_range_leaf(c, r.r6[leaf as usize]) + } +}\n +""") + +def compute_trie(rawdata, chunksize): + root = [] + childmap = {} + child_data = [] + for i in range(len(rawdata) / chunksize): + data = rawdata[i * chunksize: (i + 1) * chunksize] + child = '|'.join(map(str, data)) + if child not in childmap: + childmap[child] = len(childmap) + child_data.extend(data) + root.append(childmap[child]) + return (root, child_data) + +def emit_bool_trie(f, name, t_data, is_pub=True): + global bytes_old, bytes_new + bytes_old += 8 * len(t_data) + CHUNK = 64 + rawdata = [False] * 0x110000 + for (lo, hi) in t_data: + for cp in range(lo, hi + 1): + rawdata[cp] = True + + # convert to bitmap chunks of 64 bits each + chunks = [] + for i in range(0x110000 / CHUNK): + chunk = 0 + for j in range(64): + if rawdata[i * 64 + j]: + chunk |= 1 << j + chunks.append(chunk) + + pub_string = "" + if is_pub: + pub_string = "pub " + f.write(" %sconst %s: &'static super::BoolTrie = &super::BoolTrie {\n" % (pub_string, name)) + f.write(" r1: [\n") + data = ','.join('0x%016x' % chunk for chunk in chunks[0:0x800 / CHUNK]) + format_table_content(f, data, 12) + f.write("\n ],\n") + + # 0x800..0x10000 trie + (r2, r3) = compute_trie(chunks[0x800 / CHUNK : 0x10000 / CHUNK], 64 / CHUNK) + f.write(" r2: [\n") + data = ','.join(str(node) for node in r2) + format_table_content(f, data, 12) + f.write("\n ],\n") + f.write(" r3: &[\n") + data = ','.join('0x%016x' % chunk for chunk in r3) + format_table_content(f, data, 12) + f.write("\n ],\n") + + # 0x10000..0x110000 trie + (mid, r6) = compute_trie(chunks[0x10000 / CHUNK : 0x110000 / CHUNK], 64 / CHUNK) + (r4, r5) = compute_trie(mid, 64) + f.write(" r4: [\n") + data = ','.join(str(node) for node in r4) + format_table_content(f, data, 12) + f.write("\n ],\n") + f.write(" r5: &[\n") + data = ','.join(str(node) for node in r5) + format_table_content(f, data, 12) + f.write("\n ],\n") + f.write(" r6: &[\n") + data = ','.join('0x%016x' % chunk for chunk in r6) + format_table_content(f, data, 12) + f.write("\n ],\n") + + f.write(" };\n\n") + bytes_new += 256 + 992 + 256 + 8 * len(r3) + len(r5) + 8 * len(r6) + def emit_property_module(f, mod, tbl, emit): f.write("pub mod %s {\n" % mod) for cat in sorted(emit): - emit_table(f, "%s_table" % cat, tbl[cat]) + emit_bool_trie(f, "%s_table" % cat, tbl[cat]) f.write(" pub fn %s(c: char) -> bool {\n" % cat) - f.write(" super::bsearch_range_table(c, %s_table)\n" % cat) + f.write(" super::trie_lookup_range_table(c, %s_table)\n" % cat) f.write(" }\n\n") f.write("}\n\n") @@ -398,19 +526,21 @@ def emit_norm_module(f, canon, compat, combine, norm_props): derived = load_properties("DerivedCoreProperties.txt", want_derived) scripts = load_properties("Scripts.txt", []) props = load_properties("PropList.txt", - ["White_Space", "Join_Control", "Noncharacter_Code_Point"]) + ["White_Space", "Join_Control", "Noncharacter_Code_Point", "Pattern_White_Space"]) norm_props = load_properties("DerivedNormalizationProps.txt", ["Full_Composition_Exclusion"]) - # bsearch_range_table is used in all the property modules below - emit_bsearch_range_table(rf) + # trie_lookup_table is used in all the property modules below + emit_trie_lookup_range_table(rf) + # emit_bsearch_range_table(rf) # category tables for (name, cat, pfuns) in ("general_category", gencats, ["N", "Cc"]), \ ("derived_property", derived, want_derived), \ - ("property", props, ["White_Space"]): + ("property", props, ["White_Space", "Pattern_White_Space"]): emit_property_module(rf, name, cat, pfuns) # normalizations and conversions module emit_norm_module(rf, canon_decomp, compat_decomp, combines, norm_props) emit_conversions_module(rf, to_upper, to_lower, to_title) + #print 'bytes before = %d, bytes after = %d' % (bytes_old, bytes_new) diff --git a/src/grammar/README.md b/src/grammar/README.md index 6e0cf17a88040..cd2dd38de36aa 100644 --- a/src/grammar/README.md +++ b/src/grammar/README.md @@ -1,14 +1,18 @@ -Reference grammar. +# Reference grammar. Uses [antlr4](http://www.antlr.org/) and a custom Rust tool to compare -ASTs/token streams generated. You can use the `check-lexer` make target to +ASTs/token streams generated. You can use the `make check-lexer` target to run all of the available tests. -To use manually: +The build of the rust part is included with `make tidy` and can be run with `make check-build-lexer-verifier`. + +# Manual build + +To use manually, assuming antlr4 ist installed at `/usr/share/java/antlr-complete.jar`: ``` antlr4 RustLexer.g4 -javac *.java +javac -classpath /usr/share/java/antlr-complete.jar *.java rustc -O verify.rs for file in ../*/**.rs; do echo $file; @@ -18,3 +22,12 @@ done Note That the `../*/**.rs` glob will match every `*.rs` file in the above directory and all of its recursive children. This is a zsh extension. + + +## Cleanup + +To cleanup you can use a command like this: + +```bash +rm -f verify *.class *.java *.tokens +``` diff --git a/src/grammar/check.sh b/src/grammar/check.sh index 560b6b72471e1..70a8f6fca2e5c 100755 --- a/src/grammar/check.sh +++ b/src/grammar/check.sh @@ -20,11 +20,11 @@ skipped=0 check() { grep --silent "// ignore-lexer-test" "$1"; - # if it's *not* found... + # if it is *not* found... if [ $? -eq 1 ]; then - cd $2 # This `cd` is so java will pick up RustLexer.class. I couldn't + cd $2 # This `cd` is so java will pick up RustLexer.class. I could not # figure out how to wrangle the CLASSPATH, just adding build/grammar - # didn't seem to have any effect. + # did not seem to have any effect. if $3 RustLexer tokens -tokens < $1 | $4 $1 $5; then echo "pass: $1" passed=`expr $passed + 1` diff --git a/src/grammar/verify.rs b/src/grammar/verify.rs index f04830ee969bd..919fc98e438c5 100644 --- a/src/grammar/verify.rs +++ b/src/grammar/verify.rs @@ -8,9 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(plugin, rustc_private, str_char, collections)] +#![feature(plugin, rustc_private)] extern crate syntax; +extern crate syntax_pos; extern crate rustc; #[macro_use] @@ -22,27 +23,28 @@ use std::fs::File; use std::io::{BufRead, Read}; use std::path::Path; -use syntax::parse; use syntax::parse::lexer; +use rustc::dep_graph::DepGraph; use rustc::session::{self, config}; use rustc::middle::cstore::DummyCrateStore; use std::rc::Rc; use syntax::ast; -use syntax::ast::Name; use syntax::codemap; -use syntax::codemap::Pos; -use syntax::parse::token; +use syntax::parse::token::{self, BinOpToken, DelimToken, Lit, Token}; use syntax::parse::lexer::TokenAndSpan; +use syntax_pos::Pos; + +use syntax::symbol::{Symbol, keywords}; fn parse_token_list(file: &str) -> HashMap { fn id() -> token::Token { - token::Ident(ast::Ident::with_empty_ctxt(Name(0)), token::Plain) + Token::Ident(ast::Ident::with_empty_ctxt(keywords::Invalid.name())) } let mut res = HashMap::new(); - res.insert("-1".to_string(), token::Eof); + res.insert("-1".to_string(), Token::Eof); for line in file.split('\n') { let eq = match line.trim().rfind('=') { @@ -54,65 +56,67 @@ fn parse_token_list(file: &str) -> HashMap { let num = &line[eq + 1..]; let tok = match val { - "SHR" => token::BinOp(token::Shr), - "DOLLAR" => token::Dollar, - "LT" => token::Lt, - "STAR" => token::BinOp(token::Star), + "SHR" => Token::BinOp(BinOpToken::Shr), + "DOLLAR" => Token::Dollar, + "LT" => Token::Lt, + "STAR" => Token::BinOp(BinOpToken::Star), "FLOAT_SUFFIX" => id(), "INT_SUFFIX" => id(), - "SHL" => token::BinOp(token::Shl), - "LBRACE" => token::OpenDelim(token::Brace), - "RARROW" => token::RArrow, - "LIT_STR" => token::Literal(token::Str_(Name(0)), None), - "DOTDOT" => token::DotDot, - "MOD_SEP" => token::ModSep, - "DOTDOTDOT" => token::DotDotDot, - "NOT" => token::Not, - "AND" => token::BinOp(token::And), - "LPAREN" => token::OpenDelim(token::Paren), - "ANDAND" => token::AndAnd, - "AT" => token::At, - "LBRACKET" => token::OpenDelim(token::Bracket), - "LIT_STR_RAW" => token::Literal(token::StrRaw(Name(0), 0), None), - "RPAREN" => token::CloseDelim(token::Paren), - "SLASH" => token::BinOp(token::Slash), - "COMMA" => token::Comma, - "LIFETIME" => token::Lifetime(ast::Ident::with_empty_ctxt(Name(0))), - "CARET" => token::BinOp(token::Caret), - "TILDE" => token::Tilde, + "SHL" => Token::BinOp(BinOpToken::Shl), + "LBRACE" => Token::OpenDelim(DelimToken::Brace), + "RARROW" => Token::RArrow, + "LIT_STR" => Token::Literal(Lit::Str_(keywords::Invalid.name()), None), + "DOTDOT" => Token::DotDot, + "MOD_SEP" => Token::ModSep, + "DOTDOTDOT" => Token::DotDotDot, + "NOT" => Token::Not, + "AND" => Token::BinOp(BinOpToken::And), + "LPAREN" => Token::OpenDelim(DelimToken::Paren), + "ANDAND" => Token::AndAnd, + "AT" => Token::At, + "LBRACKET" => Token::OpenDelim(DelimToken::Bracket), + "LIT_STR_RAW" => Token::Literal(Lit::StrRaw(keywords::Invalid.name(), 0), None), + "RPAREN" => Token::CloseDelim(DelimToken::Paren), + "SLASH" => Token::BinOp(BinOpToken::Slash), + "COMMA" => Token::Comma, + "LIFETIME" => Token::Lifetime( + ast::Ident::with_empty_ctxt(keywords::Invalid.name())), + "CARET" => Token::BinOp(BinOpToken::Caret), + "TILDE" => Token::Tilde, "IDENT" => id(), - "PLUS" => token::BinOp(token::Plus), - "LIT_CHAR" => token::Literal(token::Char(Name(0)), None), - "LIT_BYTE" => token::Literal(token::Byte(Name(0)), None), - "EQ" => token::Eq, - "RBRACKET" => token::CloseDelim(token::Bracket), - "COMMENT" => token::Comment, - "DOC_COMMENT" => token::DocComment(Name(0)), - "DOT" => token::Dot, - "EQEQ" => token::EqEq, - "NE" => token::Ne, - "GE" => token::Ge, - "PERCENT" => token::BinOp(token::Percent), - "RBRACE" => token::CloseDelim(token::Brace), - "BINOP" => token::BinOp(token::Plus), - "POUND" => token::Pound, - "OROR" => token::OrOr, - "LIT_INTEGER" => token::Literal(token::Integer(Name(0)), None), - "BINOPEQ" => token::BinOpEq(token::Plus), - "LIT_FLOAT" => token::Literal(token::Float(Name(0)), None), - "WHITESPACE" => token::Whitespace, - "UNDERSCORE" => token::Underscore, - "MINUS" => token::BinOp(token::Minus), - "SEMI" => token::Semi, - "COLON" => token::Colon, - "FAT_ARROW" => token::FatArrow, - "OR" => token::BinOp(token::Or), - "GT" => token::Gt, - "LE" => token::Le, - "LIT_BYTE_STR" => token::Literal(token::ByteStr(Name(0)), None), - "LIT_BYTE_STR_RAW" => token::Literal(token::ByteStrRaw(Name(0), 0), None), - "QUESTION" => token::Question, - "SHEBANG" => token::Shebang(Name(0)), + "PLUS" => Token::BinOp(BinOpToken::Plus), + "LIT_CHAR" => Token::Literal(Lit::Char(keywords::Invalid.name()), None), + "LIT_BYTE" => Token::Literal(Lit::Byte(keywords::Invalid.name()), None), + "EQ" => Token::Eq, + "RBRACKET" => Token::CloseDelim(DelimToken::Bracket), + "COMMENT" => Token::Comment, + "DOC_COMMENT" => Token::DocComment(keywords::Invalid.name()), + "DOT" => Token::Dot, + "EQEQ" => Token::EqEq, + "NE" => Token::Ne, + "GE" => Token::Ge, + "PERCENT" => Token::BinOp(BinOpToken::Percent), + "RBRACE" => Token::CloseDelim(DelimToken::Brace), + "BINOP" => Token::BinOp(BinOpToken::Plus), + "POUND" => Token::Pound, + "OROR" => Token::OrOr, + "LIT_INTEGER" => Token::Literal(Lit::Integer(keywords::Invalid.name()), None), + "BINOPEQ" => Token::BinOpEq(BinOpToken::Plus), + "LIT_FLOAT" => Token::Literal(Lit::Float(keywords::Invalid.name()), None), + "WHITESPACE" => Token::Whitespace, + "UNDERSCORE" => Token::Underscore, + "MINUS" => Token::BinOp(BinOpToken::Minus), + "SEMI" => Token::Semi, + "COLON" => Token::Colon, + "FAT_ARROW" => Token::FatArrow, + "OR" => Token::BinOp(BinOpToken::Or), + "GT" => Token::Gt, + "LE" => Token::Le, + "LIT_BINARY" => Token::Literal(Lit::ByteStr(keywords::Invalid.name()), None), + "LIT_BINARY_RAW" => Token::Literal( + Lit::ByteStrRaw(keywords::Invalid.name(), 0), None), + "QUESTION" => Token::Question, + "SHEBANG" => Token::Shebang(keywords::Invalid.name()), _ => continue, }; @@ -125,16 +129,16 @@ fn parse_token_list(file: &str) -> HashMap { fn str_to_binop(s: &str) -> token::BinOpToken { match s { - "+" => token::Plus, - "/" => token::Slash, - "-" => token::Minus, - "*" => token::Star, - "%" => token::Percent, - "^" => token::Caret, - "&" => token::And, - "|" => token::Or, - "<<" => token::Shl, - ">>" => token::Shr, + "+" => BinOpToken::Plus, + "/" => BinOpToken::Slash, + "-" => BinOpToken::Minus, + "*" => BinOpToken::Star, + "%" => BinOpToken::Percent, + "^" => BinOpToken::Caret, + "&" => BinOpToken::And, + "|" => BinOpToken::Or, + "<<" => BinOpToken::Shl, + ">>" => BinOpToken::Shr, _ => panic!("Bad binop str `{}`", s), } } @@ -142,29 +146,31 @@ fn str_to_binop(s: &str) -> token::BinOpToken { /// Assuming a string/byte string literal, strip out the leading/trailing /// hashes and surrounding quotes/raw/byte prefix. fn fix(mut lit: &str) -> ast::Name { - if lit.char_at(0) == 'r' { - if lit.char_at(1) == 'b' { + let prefix: Vec = lit.chars().take(2).collect(); + if prefix[0] == 'r' { + if prefix[1] == 'b' { lit = &lit[2..] } else { lit = &lit[1..]; } - } else if lit.char_at(0) == 'b' { + } else if prefix[0] == 'b' { lit = &lit[1..]; } let leading_hashes = count(lit); // +1/-1 to adjust for single quotes - parse::token::intern(&lit[leading_hashes + 1..lit.len() - leading_hashes - 1]) + Symbol::intern(&lit[leading_hashes + 1..lit.len() - leading_hashes - 1]) } /// Assuming a char/byte literal, strip the 'b' prefix and the single quotes. fn fixchar(mut lit: &str) -> ast::Name { - if lit.char_at(0) == 'b' { + let prefix = lit.chars().next().unwrap(); + if prefix == 'b' { lit = &lit[1..]; } - parse::token::intern(&lit[1..lit.len() - 1]) + Symbol::intern(&lit[1..lit.len() - 1]) } fn count(lit: &str) -> usize { @@ -192,31 +198,30 @@ fn parse_antlr_token(s: &str, tokens: &HashMap, surrogate_ let not_found = format!("didn't find token {:?} in the map", toknum); let proto_tok = tokens.get(toknum).expect(¬_found[..]); - let nm = parse::token::intern(content); + let nm = Symbol::intern(content); debug!("What we got: content (`{}`), proto: {:?}", content, proto_tok); let real_tok = match *proto_tok { - token::BinOp(..) => token::BinOp(str_to_binop(content)), - token::BinOpEq(..) => token::BinOpEq(str_to_binop(&content[..content.len() - 1])), - token::Literal(token::Str_(..), n) => token::Literal(token::Str_(fix(content)), n), - token::Literal(token::StrRaw(..), n) => token::Literal(token::StrRaw(fix(content), + Token::BinOp(..) => Token::BinOp(str_to_binop(content)), + Token::BinOpEq(..) => Token::BinOpEq(str_to_binop(&content[..content.len() - 1])), + Token::Literal(Lit::Str_(..), n) => Token::Literal(Lit::Str_(fix(content)), n), + Token::Literal(Lit::StrRaw(..), n) => Token::Literal(Lit::StrRaw(fix(content), count(content)), n), - token::Literal(token::Char(..), n) => token::Literal(token::Char(fixchar(content)), n), - token::Literal(token::Byte(..), n) => token::Literal(token::Byte(fixchar(content)), n), - token::DocComment(..) => token::DocComment(nm), - token::Literal(token::Integer(..), n) => token::Literal(token::Integer(nm), n), - token::Literal(token::Float(..), n) => token::Literal(token::Float(nm), n), - token::Literal(token::ByteStr(..), n) => token::Literal(token::ByteStr(nm), n), - token::Literal(token::ByteStrRaw(..), n) => token::Literal(token::ByteStrRaw(fix(content), + Token::Literal(Lit::Char(..), n) => Token::Literal(Lit::Char(fixchar(content)), n), + Token::Literal(Lit::Byte(..), n) => Token::Literal(Lit::Byte(fixchar(content)), n), + Token::DocComment(..) => Token::DocComment(nm), + Token::Literal(Lit::Integer(..), n) => Token::Literal(Lit::Integer(nm), n), + Token::Literal(Lit::Float(..), n) => Token::Literal(Lit::Float(nm), n), + Token::Literal(Lit::ByteStr(..), n) => Token::Literal(Lit::ByteStr(nm), n), + Token::Literal(Lit::ByteStrRaw(..), n) => Token::Literal(Lit::ByteStrRaw(fix(content), count(content)), n), - token::Ident(..) => token::Ident(ast::Ident::with_empty_ctxt(nm), - token::ModName), - token::Lifetime(..) => token::Lifetime(ast::Ident::with_empty_ctxt(nm)), + Token::Ident(..) => Token::Ident(ast::Ident::with_empty_ctxt(nm)), + Token::Lifetime(..) => Token::Lifetime(ast::Ident::with_empty_ctxt(nm)), ref t => t.clone() }; - let start_offset = if real_tok == token::Eof { + let start_offset = if real_tok == Token::Eof { 1 } else { 0 @@ -231,10 +236,10 @@ fn parse_antlr_token(s: &str, tokens: &HashMap, surrogate_ lo -= surrogate_pairs_pos.binary_search(&(lo as usize)).unwrap_or_else(|x| x) as u32; hi -= surrogate_pairs_pos.binary_search(&(hi as usize)).unwrap_or_else(|x| x) as u32; - let sp = codemap::Span { - lo: codemap::BytePos(lo), - hi: codemap::BytePos(hi), - expn_id: codemap::NO_EXPANSION + let sp = syntax_pos::Span { + lo: syntax_pos::BytePos(lo), + hi: syntax_pos::BytePos(hi), + expn_id: syntax_pos::NO_EXPANSION }; TokenAndSpan { @@ -245,8 +250,8 @@ fn parse_antlr_token(s: &str, tokens: &HashMap, surrogate_ fn tok_cmp(a: &token::Token, b: &token::Token) -> bool { match a { - &token::Ident(id, _) => match b { - &token::Ident(id2, _) => id == id2, + &Token::Ident(id) => match b { + &Token::Ident(id2) => id == id2, _ => false }, _ => a == b @@ -287,10 +292,11 @@ fn main() { debug!("Pairs: {:?}", surrogate_pairs_pos); let options = config::basic_options(); - let session = session::build_session(options, None, - syntax::diagnostics::registry::Registry::new(&[]), + let session = session::build_session(options, &DepGraph::new(false), None, + syntax::errors::registry::Registry::new(&[]), Rc::new(DummyCrateStore)); - let filemap = session.parse_sess.codemap().new_filemap(String::from(""), code); + let filemap = session.parse_sess.codemap() + .new_filemap("".to_string(), None, code); let mut lexer = lexer::StringReader::new(session.diagnostic(), filemap); let cm = session.codemap(); @@ -310,7 +316,7 @@ fn main() { for antlr_tok in antlr_tokens { let rustc_tok = next(&mut lexer); - if rustc_tok.tok == token::Eof && antlr_tok.tok == token::Eof { + if rustc_tok.tok == Token::Eof && antlr_tok.tok == Token::Eof { continue } @@ -337,19 +343,19 @@ fn main() { } matches!( - token::Literal(token::Byte(..), _), - token::Literal(token::Char(..), _), - token::Literal(token::Integer(..), _), - token::Literal(token::Float(..), _), - token::Literal(token::Str_(..), _), - token::Literal(token::StrRaw(..), _), - token::Literal(token::ByteStr(..), _), - token::Literal(token::ByteStrRaw(..), _), - token::Ident(..), - token::Lifetime(..), - token::Interpolated(..), - token::DocComment(..), - token::Shebang(..) + Token::Literal(Lit::Byte(..), _), + Token::Literal(Lit::Char(..), _), + Token::Literal(Lit::Integer(..), _), + Token::Literal(Lit::Float(..), _), + Token::Literal(Lit::Str_(..), _), + Token::Literal(Lit::StrRaw(..), _), + Token::Literal(Lit::ByteStr(..), _), + Token::Literal(Lit::ByteStrRaw(..), _), + Token::Ident(..), + Token::Lifetime(..), + Token::Interpolated(..), + Token::DocComment(..), + Token::Shebang(..) ); } } diff --git a/src/jemalloc b/src/jemalloc index f84e30927284b..e058ca661692a 160000 --- a/src/jemalloc +++ b/src/jemalloc @@ -1 +1 @@ -Subproject commit f84e30927284b0c500ed3eaf09e8e159da20ddaf +Subproject commit e058ca661692a8d01f8cf9d35939dfe3105ce968 diff --git a/src/liballoc/Cargo.toml b/src/liballoc/Cargo.toml new file mode 100644 index 0000000000000..0889ca9fc84d4 --- /dev/null +++ b/src/liballoc/Cargo.toml @@ -0,0 +1,11 @@ +[package] +authors = ["The Rust Project Developers"] +name = "alloc" +version = "0.0.0" + +[lib] +name = "alloc" +path = "lib.rs" + +[dependencies] +core = { path = "../libcore" } diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs index 169634a7c8255..3a7da18c8deb1 100644 --- a/src/liballoc/arc.rs +++ b/src/liballoc/arc.rs @@ -10,69 +10,16 @@ #![stable(feature = "rust1", since = "1.0.0")] -//! Threadsafe reference-counted boxes (the `Arc` type). +//! Thread-safe reference-counting pointers. //! -//! The `Arc` type provides shared ownership of an immutable value. -//! Destruction is deterministic, and will occur as soon as the last owner is -//! gone. It is marked as `Send` because it uses atomic reference counting. +//! See the [`Arc`][arc] documentation for more details. //! -//! If you do not need thread-safety, and just need shared ownership, consider -//! the [`Rc` type](../rc/struct.Rc.html). It is the same as `Arc`, but -//! does not use atomics, making it both thread-unsafe as well as significantly -//! faster when updating the reference count. -//! -//! The `downgrade` method can be used to create a non-owning `Weak` pointer -//! to the box. A `Weak` pointer can be upgraded to an `Arc` pointer, but -//! will return `None` if the value has already been dropped. -//! -//! For example, a tree with parent pointers can be represented by putting the -//! nodes behind strong `Arc` pointers, and then storing the parent pointers -//! as `Weak` pointers. -//! -//! # Examples -//! -//! Sharing some immutable data between threads: -//! -//! ```no_run -//! use std::sync::Arc; -//! use std::thread; -//! -//! let five = Arc::new(5); -//! -//! for _ in 0..10 { -//! let five = five.clone(); -//! -//! thread::spawn(move || { -//! println!("{:?}", five); -//! }); -//! } -//! ``` -//! -//! Sharing mutable data safely between threads with a `Mutex`: -//! -//! ```no_run -//! use std::sync::{Arc, Mutex}; -//! use std::thread; -//! -//! let five = Arc::new(Mutex::new(5)); -//! -//! for _ in 0..10 { -//! let five = five.clone(); -//! -//! thread::spawn(move || { -//! let mut number = five.lock().unwrap(); -//! -//! *number += 1; -//! -//! println!("{}", *number); // prints 6 -//! }); -//! } -//! ``` +//! [arc]: struct.Arc.html use boxed::Box; use core::sync::atomic; -use core::sync::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst}; +use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; use core::borrow; use core::fmt; use core::cmp::Ordering; @@ -85,48 +32,127 @@ use core::ops::CoerceUnsized; use core::ptr::{self, Shared}; use core::marker::Unsize; use core::hash::{Hash, Hasher}; -use core::{usize, isize}; +use core::{isize, usize}; use core::convert::From; use heap::deallocate; +/// A soft limit on the amount of references that may be made to an `Arc`. +/// +/// Going above this limit will abort your program (although not +/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references. const MAX_REFCOUNT: usize = (isize::MAX) as usize; -/// An atomically reference counted wrapper for shared state. +/// A thread-safe reference-counting pointer. /// -/// # Examples +/// The type `Arc` provides shared ownership of a value of type `T`, +/// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces +/// a new pointer to the same value in the heap. When the last `Arc` +/// pointer to a given value is destroyed, the pointed-to value is +/// also destroyed. +/// +/// Shared references in Rust disallow mutation by default, and `Arc` is no +/// exception. If you need to mutate through an `Arc`, use [`Mutex`][mutex], +/// [`RwLock`][rwlock], or one of the [`Atomic`][atomic] types. +/// +/// `Arc` uses atomic operations for reference counting, so `Arc`s can be +/// sent between threads. In other words, `Arc` implements [`Send`][send] +/// as long as `T` implements `Send` and [`Sync`][sync]. The disadvantage is +/// that atomic operations are more expensive than ordinary memory accesses. +/// If you are not sharing reference-counted values between threads, consider +/// using [`rc::Rc`][rc] for lower overhead. `Rc` is a safe default, because +/// the compiler will catch any attempt to send an `Rc` between threads. +/// However, a library might choose `Arc` in order to give library consumers +/// more flexibility. +/// +/// The [`downgrade`][downgrade] method can be used to create a non-owning +/// [`Weak`][weak] pointer. A `Weak` pointer can be [`upgrade`][upgrade]d +/// to an `Arc`, but this will return [`None`][option] if the value has +/// already been dropped. +/// +/// A cycle between `Arc` pointers will never be deallocated. For this reason, +/// `Weak` is used to break cycles. For example, a tree could have strong +/// `Arc` pointers from parent nodes to children, and `Weak` pointers from +/// children back to their parents. /// -/// In this example, a large vector is shared between several threads. -/// With simple pipes, without `Arc`, a copy would have to be made for each -/// thread. +/// `Arc` automatically dereferences to `T` (via the [`Deref`][deref] trait), +/// so you can call `T`'s methods on a value of type `Arc`. To avoid name +/// clashes with `T`'s methods, the methods of `Arc` itself are [associated +/// functions][assoc], called using function-like syntax: /// -/// When you clone an `Arc`, it will create another pointer to the data and -/// increase the reference counter. +/// ``` +/// use std::sync::Arc; +/// let my_arc = Arc::new(()); /// +/// Arc::downgrade(&my_arc); /// ``` +/// +/// `Weak` does not auto-dereference to `T`, because the value may have +/// already been destroyed. +/// +/// [arc]: struct.Arc.html +/// [weak]: struct.Weak.html +/// [rc]: ../../std/rc/struct.Rc.html +/// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone +/// [mutex]: ../../std/sync/struct.Mutex.html +/// [rwlock]: ../../std/sync/struct.RwLock.html +/// [atomic]: ../../std/sync/atomic/index.html +/// [send]: ../../std/marker/trait.Send.html +/// [sync]: ../../std/marker/trait.Sync.html +/// [deref]: ../../std/ops/trait.Deref.html +/// [downgrade]: struct.Arc.html#method.downgrade +/// [upgrade]: struct.Weak.html#method.upgrade +/// [option]: ../../std/option/enum.Option.html +/// [assoc]: ../../book/method-syntax.html#associated-functions +/// +/// # Examples +/// +/// Sharing some immutable data between threads: +/// +// Note that we **do not** run these tests here. The windows builders get super +// unhappy if a thread outlives the main thread and then exits at the same time +// (something deadlocks) so we just avoid this entirely by not running these +// tests. +/// ```no_run /// use std::sync::Arc; /// use std::thread; /// -/// fn main() { -/// let numbers: Vec<_> = (0..100u32).collect(); -/// let shared_numbers = Arc::new(numbers); +/// let five = Arc::new(5); +/// +/// for _ in 0..10 { +/// let five = five.clone(); +/// +/// thread::spawn(move || { +/// println!("{:?}", five); +/// }); +/// } +/// ``` +/// +/// Sharing a mutable `AtomicUsize`: +/// +/// ```no_run +/// use std::sync::Arc; +/// use std::sync::atomic::{AtomicUsize, Ordering}; +/// use std::thread; /// -/// for _ in 0..10 { -/// let child_numbers = shared_numbers.clone(); +/// let val = Arc::new(AtomicUsize::new(5)); /// -/// thread::spawn(move || { -/// let local_numbers = &child_numbers[..]; +/// for _ in 0..10 { +/// let val = val.clone(); /// -/// // Work with the local numbers -/// }); -/// } +/// thread::spawn(move || { +/// let v = val.fetch_add(1, Ordering::SeqCst); +/// println!("{:?}", v); +/// }); /// } /// ``` -#[unsafe_no_drop_flag] +/// +/// See the [`rc` documentation][rc_examples] for more examples of reference +/// counting in general. +/// +/// [rc_examples]: ../../std/rc/index.html#examples #[stable(feature = "rust1", since = "1.0.0")] pub struct Arc { - // FIXME #12808: strange name to try to avoid interfering with - // field accesses of the contained type via Deref - _ptr: Shared>, + ptr: Shared>, } #[stable(feature = "rust1", since = "1.0.0")] @@ -137,27 +163,32 @@ unsafe impl Sync for Arc {} #[unstable(feature = "coerce_unsized", issue = "27732")] impl, U: ?Sized> CoerceUnsized> for Arc {} -/// A weak pointer to an `Arc`. +/// A weak version of [`Arc`][arc]. +/// +/// `Weak` pointers do not count towards determining if the inner value +/// should be dropped. +/// +/// The typical way to obtain a `Weak` pointer is to call +/// [`Arc::downgrade`][downgrade]. +/// +/// See the [`Arc`][arc] documentation for more details. /// -/// Weak pointers will not keep the data inside of the `Arc` alive, and can be -/// used to break cycles between `Arc` pointers. -#[unsafe_no_drop_flag] +/// [arc]: struct.Arc.html +/// [downgrade]: struct.Arc.html#method.downgrade #[stable(feature = "arc_weak", since = "1.4.0")] pub struct Weak { - // FIXME #12808: strange name to try to avoid interfering with - // field accesses of the contained type via Deref - _ptr: Shared>, + ptr: Shared>, } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "arc_weak", since = "1.4.0")] unsafe impl Send for Weak {} -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "arc_weak", since = "1.4.0")] unsafe impl Sync for Weak {} #[unstable(feature = "coerce_unsized", issue = "27732")] impl, U: ?Sized> CoerceUnsized> for Weak {} -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "arc_weak", since = "1.4.0")] impl fmt::Debug for Weak { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "(Weak)") @@ -198,13 +229,17 @@ impl Arc { weak: atomic::AtomicUsize::new(1), data: data, }; - Arc { _ptr: unsafe { Shared::new(Box::into_raw(x)) } } + Arc { ptr: unsafe { Shared::new(Box::into_raw(x)) } } } - /// Unwraps the contained value if the `Arc` has only one strong reference. + /// Returns the contained value, if the `Arc` has exactly one strong reference. + /// + /// Otherwise, an [`Err`][result] is returned with the same `Arc` that was + /// passed in. + /// /// This will succeed even if there are outstanding weak references. /// - /// Otherwise, an `Err` is returned with the same `Arc`. + /// [result]: ../../std/result/enum.Result.html /// /// # Examples /// @@ -216,33 +251,97 @@ impl Arc { /// /// let x = Arc::new(4); /// let _y = x.clone(); - /// assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4))); + /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4); /// ``` #[inline] #[stable(feature = "arc_unique", since = "1.4.0")] pub fn try_unwrap(this: Self) -> Result { // See `drop` for why all these atomics are like this - if this.inner().strong.compare_and_swap(1, 0, Release) != 1 { + if this.inner().strong.compare_exchange(1, 0, Release, Relaxed).is_err() { return Err(this); } atomic::fence(Acquire); unsafe { - let ptr = *this._ptr; + let ptr = *this.ptr; let elem = ptr::read(&(*ptr).data); // Make a weak pointer to clean up the implicit strong-weak reference - let _weak = Weak { _ptr: this._ptr }; + let _weak = Weak { ptr: this.ptr }; mem::forget(this); Ok(elem) } } + + /// Consumes the `Arc`, returning the wrapped pointer. + /// + /// To avoid a memory leak the pointer must be converted back to an `Arc` using + /// [`Arc::from_raw`][from_raw]. + /// + /// [from_raw]: struct.Arc.html#method.from_raw + /// + /// # Examples + /// + /// ``` + /// #![feature(rc_raw)] + /// + /// use std::sync::Arc; + /// + /// let x = Arc::new(10); + /// let x_ptr = Arc::into_raw(x); + /// assert_eq!(unsafe { *x_ptr }, 10); + /// ``` + #[unstable(feature = "rc_raw", issue = "37197")] + pub fn into_raw(this: Self) -> *mut T { + let ptr = unsafe { &mut (**this.ptr).data as *mut _ }; + mem::forget(this); + ptr + } + + /// Constructs an `Arc` from a raw pointer. + /// + /// The raw pointer must have been previously returned by a call to a + /// [`Arc::into_raw`][into_raw]. + /// + /// This function is unsafe because improper use may lead to memory problems. For example, a + /// double-free may occur if the function is called twice on the same raw pointer. + /// + /// [into_raw]: struct.Arc.html#method.into_raw + /// + /// # Examples + /// + /// ``` + /// #![feature(rc_raw)] + /// + /// use std::sync::Arc; + /// + /// let x = Arc::new(10); + /// let x_ptr = Arc::into_raw(x); + /// + /// unsafe { + /// // Convert back to an `Arc` to prevent leak. + /// let x = Arc::from_raw(x_ptr); + /// assert_eq!(*x, 10); + /// + /// // Further calls to `Arc::from_raw(x_ptr)` would be memory unsafe. + /// } + /// + /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling! + /// ``` + #[unstable(feature = "rc_raw", issue = "37197")] + pub unsafe fn from_raw(ptr: *mut T) -> Self { + // To find the corresponding pointer to the `ArcInner` we need to subtract the offset of the + // `data` field from the pointer. + Arc { ptr: Shared::new((ptr as *mut u8).offset(-offset_of!(ArcInner, data)) as *mut _) } + } } impl Arc { - /// Downgrades the `Arc` to a `Weak` reference. + /// Creates a new [`Weak`][weak] pointer to this value. + /// + /// [weak]: struct.Weak.html /// /// # Examples /// @@ -255,13 +354,14 @@ impl Arc { /// ``` #[stable(feature = "arc_weak", since = "1.4.0")] pub fn downgrade(this: &Self) -> Weak { - loop { - // This Relaxed is OK because we're checking the value in the CAS - // below. - let cur = this.inner().weak.load(Relaxed); + // This Relaxed is OK because we're checking the value in the CAS + // below. + let mut cur = this.inner().weak.load(Relaxed); + loop { // check if the weak counter is currently "locked"; if so, spin. if cur == usize::MAX { + cur = this.inner().weak.load(Relaxed); continue; } @@ -272,13 +372,37 @@ impl Arc { // Unlike with Clone(), we need this to be an Acquire read to // synchronize with the write coming from `is_unique`, so that the // events prior to that write happen before this read. - if this.inner().weak.compare_and_swap(cur, cur + 1, Acquire) == cur { - return Weak { _ptr: this._ptr }; + match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) { + Ok(_) => return Weak { ptr: this.ptr }, + Err(old) => cur = old, } } } - /// Get the number of weak references to this value. + /// Gets the number of [`Weak`][weak] pointers to this value. + /// + /// [weak]: struct.Weak.html + /// + /// # Safety + /// + /// This method by itself is safe, but using it correctly requires extra care. + /// Another thread can change the weak count at any time, + /// including potentially between calling this method and acting on the result. + /// + /// # Examples + /// + /// ``` + /// #![feature(arc_counts)] + /// + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// let _weak_five = Arc::downgrade(&five); + /// + /// // This assertion is deterministic because we haven't shared + /// // the `Arc` or `Weak` between threads. + /// assert_eq!(1, Arc::weak_count(&five)); + /// ``` #[inline] #[unstable(feature = "arc_counts", reason = "not clearly useful, and racy", issue = "28356")] @@ -286,7 +410,28 @@ impl Arc { this.inner().weak.load(SeqCst) - 1 } - /// Get the number of strong references to this value. + /// Gets the number of strong (`Arc`) pointers to this value. + /// + /// # Safety + /// + /// This method by itself is safe, but using it correctly requires extra care. + /// Another thread can change the strong count at any time, + /// including potentially between calling this method and acting on the result. + /// + /// # Examples + /// + /// ``` + /// #![feature(arc_counts)] + /// + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// let _also_five = five.clone(); + /// + /// // This assertion is deterministic because we haven't shared + /// // the `Arc` between threads. + /// assert_eq!(2, Arc::strong_count(&five)); + /// ``` #[inline] #[unstable(feature = "arc_counts", reason = "not clearly useful, and racy", issue = "28356")] @@ -301,13 +446,13 @@ impl Arc { // `ArcInner` structure itself is `Sync` because the inner data is // `Sync` as well, so we're ok loaning out an immutable pointer to these // contents. - unsafe { &**self._ptr } + unsafe { &**self.ptr } } // Non-inlined part of `drop`. #[inline(never)] unsafe fn drop_slow(&mut self) { - let ptr = *self._ptr; + let ptr = *self.ptr; // Destroy the data at this time, even though we may not free the box // allocation itself (there may still be weak pointers lying around). @@ -318,13 +463,41 @@ impl Arc { deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) } } + + #[inline] + #[unstable(feature = "ptr_eq", + reason = "newly added", + issue = "36497")] + /// Returns true if the two `Arc`s point to the same value (not + /// just values that compare as equal). + /// + /// # Examples + /// + /// ``` + /// #![feature(ptr_eq)] + /// + /// use std::sync::Arc; + /// + /// let five = Arc::new(5); + /// let same_five = five.clone(); + /// let other_five = Arc::new(5); + /// + /// assert!(Arc::ptr_eq(&five, &same_five)); + /// assert!(!Arc::ptr_eq(&five, &other_five)); + /// ``` + pub fn ptr_eq(this: &Self, other: &Self) -> bool { + let this_ptr: *const ArcInner = *this.ptr; + let other_ptr: *const ArcInner = *other.ptr; + this_ptr == other_ptr + } } #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Arc { - /// Makes a clone of the `Arc`. + /// Makes a clone of the `Arc` pointer. /// - /// This increases the strong reference count. + /// This creates another pointer to the same inner value, increasing the + /// strong reference count. /// /// # Examples /// @@ -365,7 +538,7 @@ impl Clone for Arc { } } - Arc { _ptr: self._ptr } + Arc { ptr: self.ptr } } } @@ -380,11 +553,17 @@ impl Deref for Arc { } impl Arc { - /// Make a mutable reference into the given `Arc` by cloning the inner - /// data if the `Arc` doesn't have one strong reference and no weak - /// references. + /// Makes a mutable reference into the given `Arc`. + /// + /// If there are other `Arc` or [`Weak`][weak] pointers to the same value, + /// then `make_mut` will invoke [`clone`][clone] on the inner value to + /// ensure unique ownership. This is also referred to as clone-on-write. + /// + /// See also [`get_mut`][get_mut], which will fail rather than cloning. /// - /// This is also referred to as a copy-on-write. + /// [weak]: struct.Weak.html + /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone + /// [get_mut]: struct.Arc.html#method.get_mut /// /// # Examples /// @@ -399,10 +578,9 @@ impl Arc { /// *Arc::make_mut(&mut data) += 1; // Won't clone anything /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything /// - /// // Note: data and other_data now point to different numbers + /// // Now `data` and `other_data` point to different values. /// assert_eq!(*data, 8); /// assert_eq!(*other_data, 12); - /// /// ``` #[inline] #[stable(feature = "arc_unique", since = "1.4.0")] @@ -415,7 +593,7 @@ impl Arc { // before release writes (i.e., decrements) to `strong`. Since we hold a // weak count, there's no chance the ArcInner itself could be // deallocated. - if this.inner().strong.compare_and_swap(1, 0, Acquire) != 1 { + if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() { // Another strong pointer exists; clone *this = Arc::new((**this).clone()); } else if this.inner().weak.load(Relaxed) != 1 { @@ -433,7 +611,7 @@ impl Arc { // Materialize our own implicit weak pointer, so that it can clean // up the ArcInner as needed. - let weak = Weak { _ptr: this._ptr }; + let weak = Weak { ptr: this.ptr }; // mark the data itself as already deallocated unsafe { @@ -441,7 +619,7 @@ impl Arc { // here (due to zeroing) because data is no longer accessed by // other threads (due to there being no more strong refs at this // point). - let mut swap = Arc::new(ptr::read(&(**weak._ptr).data)); + let mut swap = Arc::new(ptr::read(&(**weak.ptr).data)); mem::swap(this, &mut swap); mem::forget(swap); } @@ -454,15 +632,26 @@ impl Arc { // As with `get_mut()`, the unsafety is ok because our reference was // either unique to begin with, or became one upon cloning the contents. unsafe { - let inner = &mut **this._ptr; + let inner = &mut **this.ptr; &mut inner.data } } } impl Arc { - /// Returns a mutable reference to the contained value if the `Arc` has - /// one strong reference and no weak references. + /// Returns a mutable reference to the inner value, if there are + /// no other `Arc` or [`Weak`][weak] pointers to the same value. + /// + /// Returns [`None`][option] otherwise, because it is not safe to + /// mutate a shared value. + /// + /// See also [`make_mut`][make_mut], which will [`clone`][clone] + /// the inner value when it's shared. + /// + /// [weak]: struct.Weak.html + /// [option]: ../../std/option/enum.Option.html + /// [make_mut]: struct.Arc.html#method.make_mut + /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone /// /// # Examples /// @@ -486,7 +675,7 @@ impl Arc { // the Arc itself to be `mut`, so we're returning the only possible // reference to the inner data. unsafe { - let inner = &mut **this._ptr; + let inner = &mut **this.ptr; Some(&mut inner.data) } } else { @@ -505,7 +694,7 @@ impl Arc { // The acquire label here ensures a happens-before relationship with any // writes to `strong` prior to decrements of the `weak` count (via drop, // which uses Release). - if self.inner().weak.compare_and_swap(1, usize::MAX, Acquire) == 1 { + if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() { // Due to the previous acquire read, this will observe any writes to // `strong` that were due to upgrading weak pointers; only strong // clones remain, which require that the strong count is > 1 anyway. @@ -524,43 +713,36 @@ impl Arc { #[stable(feature = "rust1", since = "1.0.0")] impl Drop for Arc { - /// Drops the `Arc`. + /// Drops the `Arc`. /// /// This will decrement the strong reference count. If the strong reference - /// count becomes zero and the only other references are `Weak` ones, - /// `drop`s the inner value. + /// count reaches zero then the only other references (if any) are + /// [`Weak`][weak], so we `drop` the inner value. + /// + /// [weak]: struct.Weak.html /// /// # Examples /// /// ``` /// use std::sync::Arc; /// - /// { - /// let five = Arc::new(5); + /// struct Foo; /// - /// // stuff - /// - /// drop(five); // explicit drop + /// impl Drop for Foo { + /// fn drop(&mut self) { + /// println!("dropped!"); + /// } /// } - /// { - /// let five = Arc::new(5); /// - /// // stuff + /// let foo = Arc::new(Foo); + /// let foo2 = foo.clone(); /// - /// } // implicit drop + /// drop(foo); // Doesn't print anything + /// drop(foo2); // Prints "dropped!" /// ``` #[unsafe_destructor_blind_to_params] #[inline] fn drop(&mut self) { - // This structure has #[unsafe_no_drop_flag], so this drop glue may run - // more than once (but it is guaranteed to be zeroed after the first if - // it's run more than once) - let thin = *self._ptr as *const (); - - if thin as usize == mem::POST_DROP_USIZE { - return; - } - // Because `fetch_sub` is already atomic, we do not need to synchronize // with other threads unless we are going to delete the object. This // same logic applies to the below `fetch_sub` to the `weak` count. @@ -593,13 +775,46 @@ impl Drop for Arc { } } +impl Weak { + /// Constructs a new `Weak`, without an accompanying instance of `T`. + /// + /// This allocates memory for `T`, but does not initialize it. Calling + /// [`upgrade`][upgrade] on the return value always gives + /// [`None`][option]. + /// + /// [upgrade]: struct.Weak.html#method.upgrade + /// [option]: ../../std/option/enum.Option.html + /// + /// # Examples + /// + /// ``` + /// use std::sync::Weak; + /// + /// let empty: Weak = Weak::new(); + /// assert!(empty.upgrade().is_none()); + /// ``` + #[stable(feature = "downgraded_weak", since = "1.10.0")] + pub fn new() -> Weak { + unsafe { + Weak { + ptr: Shared::new(Box::into_raw(box ArcInner { + strong: atomic::AtomicUsize::new(0), + weak: atomic::AtomicUsize::new(1), + data: uninitialized(), + })), + } + } + } +} + impl Weak { - /// Upgrades a weak reference to a strong reference. + /// Upgrades the `Weak` pointer to an [`Arc`][arc], if possible. /// - /// Upgrades the `Weak` reference to an `Arc`, if possible. + /// Returns [`None`][option] if the strong count has reached zero and the + /// inner value was destroyed. /// - /// Returns `None` if there were no strong references and the data was - /// destroyed. + /// [arc]: struct.Arc.html + /// [option]: ../../std/option/enum.Option.html /// /// # Examples /// @@ -611,31 +826,42 @@ impl Weak { /// let weak_five = Arc::downgrade(&five); /// /// let strong_five: Option> = weak_five.upgrade(); + /// assert!(strong_five.is_some()); + /// + /// // Destroy all strong pointers. + /// drop(strong_five); + /// drop(five); + /// + /// assert!(weak_five.upgrade().is_none()); /// ``` #[stable(feature = "arc_weak", since = "1.4.0")] pub fn upgrade(&self) -> Option> { // We use a CAS loop to increment the strong count instead of a // fetch_add because once the count hits 0 it must never be above 0. let inner = self.inner(); + + // Relaxed load because any write of 0 that we can observe + // leaves the field in a permanently zero state (so a + // "stale" read of 0 is fine), and any other value is + // confirmed via the CAS below. + let mut n = inner.strong.load(Relaxed); + loop { - // Relaxed load because any write of 0 that we can observe - // leaves the field in a permanently zero state (so a - // "stale" read of 0 is fine), and any other value is - // confirmed via the CAS below. - let n = inner.strong.load(Relaxed); if n == 0 { return None; } // See comments in `Arc::clone` for why we do this (for `mem::forget`). if n > MAX_REFCOUNT { - unsafe { abort(); } + unsafe { + abort(); + } } // Relaxed is valid for the same reason it is on Arc's Clone impl - let old = inner.strong.compare_and_swap(n, n + 1, Relaxed); - if old == n { - return Some(Arc { _ptr: self._ptr }); + match inner.strong.compare_exchange_weak(n, n + 1, Relaxed, Relaxed) { + Ok(_) => return Some(Arc { ptr: self.ptr }), + Err(old) => n = old, } } } @@ -643,15 +869,16 @@ impl Weak { #[inline] fn inner(&self) -> &ArcInner { // See comments above for why this is "safe" - unsafe { &**self._ptr } + unsafe { &**self.ptr } } } #[stable(feature = "arc_weak", since = "1.4.0")] impl Clone for Weak { - /// Makes a clone of the `Weak`. + /// Makes a clone of the `Weak` pointer. /// - /// This increases the weak reference count. + /// This creates another pointer to the same inner value, increasing the + /// weak reference count. /// /// # Examples /// @@ -677,13 +904,37 @@ impl Clone for Weak { } } - return Weak { _ptr: self._ptr }; + return Weak { ptr: self.ptr }; } } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "downgraded_weak", since = "1.10.0")] +impl Default for Weak { + /// Constructs a new `Weak`, without an accompanying instance of `T`. + /// + /// This allocates memory for `T`, but does not initialize it. Calling + /// [`upgrade`][upgrade] on the return value always gives + /// [`None`][option]. + /// + /// [upgrade]: struct.Weak.html#method.upgrade + /// [option]: ../../std/option/enum.Option.html + /// + /// # Examples + /// + /// ``` + /// use std::sync::Weak; + /// + /// let empty: Weak = Default::default(); + /// assert!(empty.upgrade().is_none()); + /// ``` + fn default() -> Weak { + Weak::new() + } +} + +#[stable(feature = "arc_weak", since = "1.4.0")] impl Drop for Weak { - /// Drops the `Weak`. + /// Drops the `Weak` pointer. /// /// This will decrement the weak reference count. /// @@ -692,30 +943,25 @@ impl Drop for Weak { /// ``` /// use std::sync::Arc; /// - /// { - /// let five = Arc::new(5); - /// let weak_five = Arc::downgrade(&five); + /// struct Foo; /// - /// // stuff - /// - /// drop(weak_five); // explicit drop + /// impl Drop for Foo { + /// fn drop(&mut self) { + /// println!("dropped!"); + /// } /// } - /// { - /// let five = Arc::new(5); - /// let weak_five = Arc::downgrade(&five); /// - /// // stuff + /// let foo = Arc::new(Foo); + /// let weak_foo = Arc::downgrade(&foo); + /// let other_weak_foo = weak_foo.clone(); + /// + /// drop(weak_foo); // Doesn't print anything + /// drop(foo); // Prints "dropped!" /// - /// } // implicit drop + /// assert!(other_weak_foo.upgrade().is_none()); /// ``` fn drop(&mut self) { - let ptr = *self._ptr; - let thin = ptr as *const (); - - // see comments above for why this check is here - if thin as usize == mem::POST_DROP_USIZE { - return; - } + let ptr = *self.ptr; // If we find out that we were the last weak pointer, then its time to // deallocate the data entirely. See the discussion in Arc::drop() about @@ -734,9 +980,9 @@ impl Drop for Weak { #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq for Arc { - /// Equality for two `Arc`s. + /// Equality for two `Arc`s. /// - /// Two `Arc`s are equal if their inner value are equal. + /// Two `Arc`s are equal if their inner values are equal. /// /// # Examples /// @@ -745,15 +991,15 @@ impl PartialEq for Arc { /// /// let five = Arc::new(5); /// - /// five == Arc::new(5); + /// assert!(five == Arc::new(5)); /// ``` fn eq(&self, other: &Arc) -> bool { *(*self) == *(*other) } - /// Inequality for two `Arc`s. + /// Inequality for two `Arc`s. /// - /// Two `Arc`s are unequal if their inner value are unequal. + /// Two `Arc`s are unequal if their inner values are unequal. /// /// # Examples /// @@ -762,7 +1008,7 @@ impl PartialEq for Arc { /// /// let five = Arc::new(5); /// - /// five != Arc::new(5); + /// assert!(five != Arc::new(6)); /// ``` fn ne(&self, other: &Arc) -> bool { *(*self) != *(*other) @@ -770,7 +1016,7 @@ impl PartialEq for Arc { } #[stable(feature = "rust1", since = "1.0.0")] impl PartialOrd for Arc { - /// Partial comparison for two `Arc`s. + /// Partial comparison for two `Arc`s. /// /// The two are compared by calling `partial_cmp()` on their inner values. /// @@ -778,16 +1024,17 @@ impl PartialOrd for Arc { /// /// ``` /// use std::sync::Arc; + /// use std::cmp::Ordering; /// /// let five = Arc::new(5); /// - /// five.partial_cmp(&Arc::new(5)); + /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6))); /// ``` fn partial_cmp(&self, other: &Arc) -> Option { (**self).partial_cmp(&**other) } - /// Less-than comparison for two `Arc`s. + /// Less-than comparison for two `Arc`s. /// /// The two are compared by calling `<` on their inner values. /// @@ -798,13 +1045,13 @@ impl PartialOrd for Arc { /// /// let five = Arc::new(5); /// - /// five < Arc::new(5); + /// assert!(five < Arc::new(6)); /// ``` fn lt(&self, other: &Arc) -> bool { *(*self) < *(*other) } - /// 'Less-than or equal to' comparison for two `Arc`s. + /// 'Less than or equal to' comparison for two `Arc`s. /// /// The two are compared by calling `<=` on their inner values. /// @@ -815,13 +1062,13 @@ impl PartialOrd for Arc { /// /// let five = Arc::new(5); /// - /// five <= Arc::new(5); + /// assert!(five <= Arc::new(5)); /// ``` fn le(&self, other: &Arc) -> bool { *(*self) <= *(*other) } - /// Greater-than comparison for two `Arc`s. + /// Greater-than comparison for two `Arc`s. /// /// The two are compared by calling `>` on their inner values. /// @@ -832,13 +1079,13 @@ impl PartialOrd for Arc { /// /// let five = Arc::new(5); /// - /// five > Arc::new(5); + /// assert!(five > Arc::new(4)); /// ``` fn gt(&self, other: &Arc) -> bool { *(*self) > *(*other) } - /// 'Greater-than or equal to' comparison for two `Arc`s. + /// 'Greater than or equal to' comparison for two `Arc`s. /// /// The two are compared by calling `>=` on their inner values. /// @@ -849,7 +1096,7 @@ impl PartialOrd for Arc { /// /// let five = Arc::new(5); /// - /// five >= Arc::new(5); + /// assert!(five >= Arc::new(5)); /// ``` fn ge(&self, other: &Arc) -> bool { *(*self) >= *(*other) @@ -857,6 +1104,20 @@ impl PartialOrd for Arc { } #[stable(feature = "rust1", since = "1.0.0")] impl Ord for Arc { + /// Comparison for two `Arc`s. + /// + /// The two are compared by calling `cmp()` on their inner values. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// use std::cmp::Ordering; + /// + /// let five = Arc::new(5); + /// + /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6))); + /// ``` fn cmp(&self, other: &Arc) -> Ordering { (**self).cmp(&**other) } @@ -879,14 +1140,24 @@ impl fmt::Debug for Arc { } #[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Pointer for Arc { +impl fmt::Pointer for Arc { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Pointer::fmt(&*self._ptr, f) + fmt::Pointer::fmt(&*self.ptr, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl Default for Arc { + /// Creates a new `Arc`, with the `Default` value for `T`. + /// + /// # Examples + /// + /// ``` + /// use std::sync::Arc; + /// + /// let x: Arc = Default::default(); + /// assert_eq!(*x, 0); + /// ``` fn default() -> Arc { Arc::new(Default::default()) } @@ -906,35 +1177,6 @@ impl From for Arc { } } -impl Weak { - /// Constructs a new `Weak` without an accompanying instance of T. - /// - /// This allocates memory for T, but does not initialize it. Calling - /// Weak::upgrade() on the return value always gives None. - /// - /// # Examples - /// - /// ``` - /// #![feature(downgraded_weak)] - /// - /// use std::sync::Weak; - /// - /// let empty: Weak = Weak::new(); - /// ``` - #[unstable(feature = "downgraded_weak", - reason = "recently added", - issue = "30425")] - pub fn new() -> Weak { - unsafe { - Weak { _ptr: Shared::new(Box::into_raw(box ArcInner { - strong: atomic::AtomicUsize::new(0), - weak: atomic::AtomicUsize::new(1), - data: uninitialized(), - }))} - } - } -} - #[cfg(test)] mod tests { use std::clone::Clone; @@ -942,7 +1184,7 @@ mod tests { use std::mem::drop; use std::ops::Drop; use std::option::Option; - use std::option::Option::{Some, None}; + use std::option::Option::{None, Some}; use std::sync::atomic; use std::sync::atomic::Ordering::{Acquire, SeqCst}; use std::thread; @@ -966,6 +1208,7 @@ mod tests { } #[test] + #[cfg_attr(target_os = "emscripten", ignore)] fn manually_share_arc() { let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; let arc_v = Arc::new(v); @@ -1008,6 +1251,23 @@ mod tests { assert_eq!(Arc::try_unwrap(x), Ok(5)); } + #[test] + fn into_from_raw() { + let x = Arc::new(box "hello"); + let y = x.clone(); + + let x_ptr = Arc::into_raw(x); + drop(y); + unsafe { + assert_eq!(**x_ptr, "hello"); + + let x = Arc::from_raw(x_ptr); + assert_eq!(**x, "hello"); + + assert_eq!(Arc::try_unwrap(x).map(|x| *x), Ok("hello")); + } + } + #[test] fn test_cowarc_clone_make_mut() { let mut cow0 = Arc::new(75); @@ -1117,7 +1377,7 @@ mod tests { #[test] fn test_strong_count() { - let a = Arc::new(0u32); + let a = Arc::new(0); assert!(Arc::strong_count(&a) == 1); let w = Arc::downgrade(&a); assert!(Arc::strong_count(&a) == 1); @@ -1134,7 +1394,7 @@ mod tests { #[test] fn test_weak_count() { - let a = Arc::new(0u32); + let a = Arc::new(0); assert!(Arc::strong_count(&a) == 1); assert!(Arc::weak_count(&a) == 0); let w = Arc::downgrade(&a); @@ -1160,7 +1420,7 @@ mod tests { #[test] fn show_arc() { - let a = Arc::new(5u32); + let a = Arc::new(5); assert_eq!(format!("{:?}", a), "5"); } @@ -1191,6 +1451,16 @@ mod tests { let foo: Weak = Weak::new(); assert!(foo.upgrade().is_none()); } + + #[test] + fn test_ptr_eq() { + let five = Arc::new(5); + let same_five = five.clone(); + let other_five = Arc::new(5); + + assert!(Arc::ptr_eq(&five, &same_five)); + assert!(!Arc::ptr_eq(&five, &other_five)); + } } #[stable(feature = "rust1", since = "1.0.0")] diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs index be140469eb662..28f4dda140883 100644 --- a/src/liballoc/boxed.rs +++ b/src/liballoc/boxed.rs @@ -61,12 +61,12 @@ use core::borrow; use core::cmp::Ordering; use core::fmt; use core::hash::{self, Hash}; +use core::iter::FusedIterator; use core::marker::{self, Unsize}; use core::mem; use core::ops::{CoerceUnsized, Deref, DerefMut}; -use core::ops::{Placer, Boxed, Place, InPlace, BoxPlace}; +use core::ops::{BoxPlace, Boxed, InPlace, Place, Placer}; use core::ptr::{self, Unique}; -use core::raw::TraitObject; use core::convert::From; /// A value that represents the heap. This is the default place that the `box` @@ -244,11 +244,21 @@ impl Box { /// the destructor of `T` and free the allocated memory. Since the /// way `Box` allocates and releases memory is unspecified, the /// only valid pointer to pass to this function is the one taken - /// from another `Box` via the `Box::into_raw` function. + /// from another `Box` via the [`Box::into_raw`] function. /// /// This function is unsafe because improper use may lead to /// memory problems. For example, a double-free may occur if the /// function is called twice on the same raw pointer. + /// + /// [`Box::into_raw`]: struct.Box.html#method.into_raw + /// + /// # Examples + /// + /// ``` + /// let x = Box::new(5); + /// let ptr = Box::into_raw(x); + /// let x = unsafe { Box::from_raw(ptr) }; + /// ``` #[stable(feature = "box_raw", since = "1.4.0")] #[inline] pub unsafe fn from_raw(raw: *mut T) -> Self { @@ -261,14 +271,19 @@ impl Box { /// memory previously managed by the `Box`. In particular, the /// caller should properly destroy `T` and release the memory. The /// proper way to do so is to convert the raw pointer back into a - /// `Box` with the `Box::from_raw` function. + /// `Box` with the [`Box::from_raw`] function. + /// + /// Note: this is an associated function, which means that you have + /// to call it as `Box::into_raw(b)` instead of `b.into_raw()`. This + /// is so that there is no conflict with a method on the inner type. + /// + /// [`Box::from_raw`]: struct.Box.html#method.from_raw /// /// # Examples /// /// ``` - /// let seventeen = Box::new(17); - /// let raw = Box::into_raw(seventeen); - /// let boxed_again = unsafe { Box::from_raw(raw) }; + /// let x = Box::new(5); + /// let ptr = Box::into_raw(x); /// ``` #[stable(feature = "box_raw", since = "1.4.0")] #[inline] @@ -279,6 +294,7 @@ impl Box { #[stable(feature = "rust1", since = "1.0.0")] impl Default for Box { + /// Creates a `Box`, with the `Default` value for T. fn default() -> Box { box Default::default() } @@ -399,15 +415,29 @@ impl Box { #[inline] #[stable(feature = "rust1", since = "1.0.0")] /// Attempt to downcast the box to a concrete type. + /// + /// # Examples + /// + /// ``` + /// use std::any::Any; + /// + /// fn print_if_string(value: Box) { + /// if let Ok(string) = value.downcast::() { + /// println!("String ({}): {}", string.len(), string); + /// } + /// } + /// + /// fn main() { + /// let my_string = "Hello World".to_string(); + /// print_if_string(Box::new(my_string)); + /// print_if_string(Box::new(0i8)); + /// } + /// ``` pub fn downcast(self) -> Result, Box> { if self.is::() { unsafe { - // Get the raw representation of the trait object - let raw = Box::into_raw(self); - let to: TraitObject = mem::transmute::<*mut Any, TraitObject>(raw); - - // Extract the data pointer - Ok(Box::from_raw(to.data as *mut T)) + let raw: *mut Any = Box::into_raw(self); + Ok(Box::from_raw(raw as *mut T)) } } else { Err(self) @@ -419,6 +449,24 @@ impl Box { #[inline] #[stable(feature = "rust1", since = "1.0.0")] /// Attempt to downcast the box to a concrete type. + /// + /// # Examples + /// + /// ``` + /// use std::any::Any; + /// + /// fn print_if_string(value: Box) { + /// if let Ok(string) = value.downcast::() { + /// println!("String ({}): {}", string.len(), string); + /// } + /// } + /// + /// fn main() { + /// let my_string = "Hello World".to_string(); + /// print_if_string(Box::new(my_string)); + /// print_if_string(Box::new(0i8)); + /// } + /// ``` pub fn downcast(self) -> Result, Box> { >::downcast(self).map_err(|s| unsafe { // reapply the Send marker @@ -442,7 +490,7 @@ impl fmt::Debug for Box { } #[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Pointer for Box { +impl fmt::Pointer for Box { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // It's not possible to extract the inner Uniq directly from the Box, // instead we cast it to a *const which aliases the Unique @@ -486,6 +534,9 @@ impl DoubleEndedIterator for Box { #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for Box {} +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Box {} + /// `FnBox` is a version of the `FnOnce` intended for use with boxed /// closure objects. The idea is that where one would normally store a @@ -525,15 +576,18 @@ impl ExactSizeIterator for Box {} /// } /// ``` #[rustc_paren_sugar] -#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "0")] +#[unstable(feature = "fnbox", + reason = "will be deprecated if and when Box becomes usable", issue = "28796")] pub trait FnBox { type Output; fn call_box(self: Box, args: A) -> Self::Output; } -#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "0")] -impl FnBox for F where F: FnOnce +#[unstable(feature = "fnbox", + reason = "will be deprecated if and when Box becomes usable", issue = "28796")] +impl FnBox for F + where F: FnOnce { type Output = F::Output; @@ -542,7 +596,8 @@ impl FnBox for F where F: FnOnce } } -#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "0")] +#[unstable(feature = "fnbox", + reason = "will be deprecated if and when Box becomes usable", issue = "28796")] impl<'a, A, R> FnOnce for Box + 'a> { type Output = R; @@ -551,7 +606,8 @@ impl<'a, A, R> FnOnce for Box + 'a> { } } -#[unstable(feature = "fnbox", reason = "Newly introduced", issue = "0")] +#[unstable(feature = "fnbox", + reason = "will be deprecated if and when Box becomes usable", issue = "28796")] impl<'a, A, R> FnOnce for Box + Send + 'a> { type Output = R; diff --git a/src/liballoc/boxed_test.rs b/src/liballoc/boxed_test.rs index e7da6d04d3f8f..8d68ce3c1f6e2 100644 --- a/src/liballoc/boxed_test.rs +++ b/src/liballoc/boxed_test.rs @@ -12,10 +12,9 @@ use core::any::Any; use core::ops::Deref; -use core::result::Result::{Ok, Err}; +use core::result::Result::{Err, Ok}; use core::clone::Clone; -use std::boxed; use std::boxed::Box; #[test] diff --git a/src/liballoc/heap.rs b/src/liballoc/heap.rs index 7e7e3c619cb3a..12809171b7438 100644 --- a/src/liballoc/heap.rs +++ b/src/liballoc/heap.rs @@ -16,6 +16,8 @@ issue = "27700")] use core::{isize, usize}; +#[cfg(not(test))] +use core::intrinsics::{min_align_of_val, size_of_val}; #[allow(improper_ctypes)] extern "C" { @@ -147,6 +149,18 @@ unsafe fn exchange_free(ptr: *mut u8, old_size: usize, align: usize) { deallocate(ptr, old_size, align); } +#[cfg(not(test))] +#[lang = "box_free"] +#[inline] +unsafe fn box_free(ptr: *mut T) { + let size = size_of_val(&*ptr); + let align = min_align_of_val(&*ptr); + // We do not allocate for Box when T is ZST, so deallocation is also not necessary. + if size != 0 { + deallocate(ptr as *mut u8, size, align); + } +} + #[cfg(test)] mod tests { extern crate test; diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index 0a232ed0620d4..0d450184ed877 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -70,48 +70,38 @@ test(no_crate_inject, attr(allow(unused_variables), deny(warnings))))] #![no_std] #![needs_allocator] +#![cfg_attr(not(stage0), deny(warnings))] #![feature(allocator)] #![feature(box_syntax)] #![feature(coerce_unsized)] +#![feature(const_fn)] #![feature(core_intrinsics)] #![feature(custom_attribute)] +#![feature(dropck_parametricity)] #![feature(fundamental)] #![feature(lang_items)] +#![feature(needs_allocator)] #![feature(optin_builtin_traits)] #![feature(placement_in_syntax)] -#![feature(placement_new_protocol)] -#![feature(raw)] #![feature(shared)] #![feature(staged_api)] #![feature(unboxed_closures)] #![feature(unique)] -#![feature(unsafe_no_drop_flag, filling_drop)] -#![feature(dropck_parametricity)] #![feature(unsize)] -#![feature(drop_in_place)] -#![feature(fn_traits)] -#![feature(const_fn)] -#![feature(needs_allocator)] - -// Issue# 30592: Systematically use alloc_system during stage0 since jemalloc -// might be unavailable or disabled -#![cfg_attr(stage0, feature(alloc_system))] - -#![cfg_attr(test, feature(test, rustc_private, box_heap))] - -#[cfg(stage0)] -extern crate alloc_system; +#![cfg_attr(not(test), feature(fused, fn_traits, placement_new_protocol))] +#![cfg_attr(test, feature(test, box_heap))] // Allow testing this library #[cfg(test)] #[macro_use] extern crate std; -#[cfg(test)] + +// Module with internal macros used by other modules (needs to be included before other modules). #[macro_use] -extern crate log; +mod macros; // Heaps provided for low-level allocation strategies diff --git a/src/liballoc/macros.rs b/src/liballoc/macros.rs new file mode 100644 index 0000000000000..7da91c87e967e --- /dev/null +++ b/src/liballoc/macros.rs @@ -0,0 +1,28 @@ +// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Private macro to get the offset of a struct field in bytes from the address of the struct. +macro_rules! offset_of { + ($container:path, $field:ident) => {{ + // Make sure the field actually exists. This line ensures that a compile-time error is + // generated if $field is accessed through a Deref impl. + let $container { $field : _, .. }; + + // Create an (invalid) instance of the container and calculate the offset to its + // field. Using a null pointer might be UB if `&(*(0 as *const T)).field` is interpreted to + // be nullptr deref. + let invalid: $container = ::core::mem::uninitialized(); + let offset = &invalid.$field as *const _ as usize - &invalid as *const _ as usize; + + // Do not run destructors on the made up invalid instance. + ::core::mem::forget(invalid); + offset as isize + }}; +} diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs index c407cef25e74c..f23ea0ea8bf71 100644 --- a/src/liballoc/raw_vec.rs +++ b/src/liballoc/raw_vec.rs @@ -17,7 +17,7 @@ use super::boxed::Box; use core::ops::Drop; use core::cmp; -/// A low-level utility for more ergonomically allocating, reallocating, and deallocating a +/// A low-level utility for more ergonomically allocating, reallocating, and deallocating /// a buffer of memory on the heap without having to worry about all the corner cases /// involved. This type is excellent for building your own data structures like Vec and VecDeque. /// In particular: @@ -44,7 +44,6 @@ use core::cmp; /// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity /// field. This allows zero-sized types to not be special-cased by consumers of /// this type. -#[unsafe_no_drop_flag] pub struct RawVec { ptr: Unique, cap: usize, @@ -58,11 +57,7 @@ impl RawVec { pub fn new() -> Self { unsafe { // !0 is usize::MAX. This branch should be stripped at compile time. - let cap = if mem::size_of::() == 0 { - !0 - } else { - 0 - }; + let cap = if mem::size_of::() == 0 { !0 } else { 0 }; // heap::EMPTY doubles as "unallocated" and "zero-sized allocation" RawVec { @@ -147,6 +142,7 @@ impl RawVec { /// Gets the capacity of the allocation. /// /// This will always be `usize::MAX` if `T` is zero-sized. + #[inline(always)] pub fn cap(&self) -> usize { if mem::size_of::() == 0 { !0 @@ -209,11 +205,7 @@ impl RawVec { let (new_cap, ptr) = if self.cap == 0 { // skip to 4 because tiny Vec's are dumb; but not if that would cause overflow - let new_cap = if elem_size > (!0) / 8 { - 1 - } else { - 4 - }; + let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 }; let ptr = heap::allocate(new_cap * elem_size, align); (new_cap, ptr) } else { @@ -347,7 +339,7 @@ impl RawVec { let elem_size = mem::size_of::(); // Nothing we can really do about these checks :( let required_cap = used_cap.checked_add(needed_extra_cap) - .expect("capacity overflow"); + .expect("capacity overflow"); // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`. let double_cap = self.cap * 2; // `double_cap` guarantees exponential growth. @@ -533,8 +525,8 @@ impl RawVec { /// Converts the entire buffer into `Box<[T]>`. /// /// While it is not *strictly* Undefined Behavior to call - /// this procedure while some of the RawVec is unintialized, - /// it cetainly makes it trivial to trigger it. + /// this procedure while some of the RawVec is uninitialized, + /// it certainly makes it trivial to trigger it. /// /// Note that this will correctly reconstitute any `cap` changes /// that may have been performed. (see description of type for details) @@ -545,13 +537,6 @@ impl RawVec { mem::forget(self); output } - - /// This is a stupid name in the hopes that someone will find this in the - /// not too distant future and remove it with the rest of - /// #[unsafe_no_drop_flag] - pub fn unsafe_no_drop_flag_needs_drop(&self) -> bool { - self.cap != mem::POST_DROP_USIZE - } } impl Drop for RawVec { @@ -559,7 +544,7 @@ impl Drop for RawVec { /// Frees the memory owned by the RawVec *without* trying to Drop its contents. fn drop(&mut self) { let elem_size = mem::size_of::(); - if elem_size != 0 && self.cap != 0 && self.unsafe_no_drop_flag_needs_drop() { + if elem_size != 0 && self.cap != 0 { let align = mem::align_of::(); let num_bytes = elem_size * self.cap; @@ -577,9 +562,9 @@ impl Drop for RawVec { // * We don't overflow `usize::MAX` and actually allocate too little // // On 64-bit we just need to check for overflow since trying to allocate -// `> isize::MAX` bytes will surely fail. On 32-bit we need to add an extra -// guard for this in case we're running on a platform which can use all 4GB in -// user-space. e.g. PAE or x32 +// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add +// an extra guard for this in case we're running on a platform which can use +// all 4GB in user-space. e.g. PAE or x32 #[inline] fn alloc_guard(alloc_size: usize) { diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index 2c45e88bb24e8..8d863d7d9e917 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -10,89 +10,139 @@ #![allow(deprecated)] -//! Thread-local reference-counted boxes (the `Rc` type). +//! Single-threaded reference-counting pointers. //! -//! The `Rc` type provides shared ownership of an immutable value. -//! Destruction is deterministic, and will occur as soon as the last owner is -//! gone. It is marked as non-sendable because it avoids the overhead of atomic -//! reference counting. +//! The type [`Rc`][rc] provides shared ownership of a value of type `T`, +//! allocated in the heap. Invoking [`clone`][clone] on `Rc` produces a new +//! pointer to the same value in the heap. When the last `Rc` pointer to a +//! given value is destroyed, the pointed-to value is also destroyed. //! -//! The `downgrade` method can be used to create a non-owning `Weak` pointer -//! to the box. A `Weak` pointer can be upgraded to an `Rc` pointer, but -//! will return `None` if the value has already been dropped. +//! Shared references in Rust disallow mutation by default, and `Rc` is no +//! exception. If you need to mutate through an `Rc`, use [`Cell`][cell] or +//! [`RefCell`][refcell]. //! -//! For example, a tree with parent pointers can be represented by putting the -//! nodes behind strong `Rc` pointers, and then storing the parent pointers -//! as `Weak` pointers. +//! `Rc` uses non-atomic reference counting. This means that overhead is very +//! low, but an `Rc` cannot be sent between threads, and consequently `Rc` +//! does not implement [`Send`][send]. As a result, the Rust compiler +//! will check *at compile time* that you are not sending `Rc`s between +//! threads. If you need multi-threaded, atomic reference counting, use +//! [`sync::Arc`][arc]. +//! +//! The [`downgrade`][downgrade] method can be used to create a non-owning +//! [`Weak`][weak] pointer. A `Weak` pointer can be [`upgrade`][upgrade]d +//! to an `Rc`, but this will return [`None`][option] if the value has +//! already been dropped. +//! +//! A cycle between `Rc` pointers will never be deallocated. For this reason, +//! `Weak` is used to break cycles. For example, a tree could have strong +//! `Rc` pointers from parent nodes to children, and `Weak` pointers from +//! children back to their parents. +//! +//! `Rc` automatically dereferences to `T` (via the [`Deref`][deref] trait), +//! so you can call `T`'s methods on a value of type `Rc`. To avoid name +//! clashes with `T`'s methods, the methods of `Rc` itself are [associated +//! functions][assoc], called using function-like syntax: +//! +//! ``` +//! use std::rc::Rc; +//! let my_rc = Rc::new(()); +//! +//! Rc::downgrade(&my_rc); +//! ``` +//! +//! `Weak` does not auto-dereference to `T`, because the value may have +//! already been destroyed. +//! +//! [rc]: struct.Rc.html +//! [weak]: struct.Weak.html +//! [clone]: ../../std/clone/trait.Clone.html#tymethod.clone +//! [cell]: ../../std/cell/struct.Cell.html +//! [refcell]: ../../std/cell/struct.RefCell.html +//! [send]: ../../std/marker/trait.Send.html +//! [arc]: ../../std/sync/struct.Arc.html +//! [deref]: ../../std/ops/trait.Deref.html +//! [downgrade]: struct.Rc.html#method.downgrade +//! [upgrade]: struct.Weak.html#method.upgrade +//! [option]: ../../std/option/enum.Option.html +//! [assoc]: ../../book/method-syntax.html#associated-functions //! //! # Examples //! //! Consider a scenario where a set of `Gadget`s are owned by a given `Owner`. //! We want to have our `Gadget`s point to their `Owner`. We can't do this with //! unique ownership, because more than one gadget may belong to the same -//! `Owner`. `Rc` allows us to share an `Owner` between multiple `Gadget`s, +//! `Owner`. `Rc` allows us to share an `Owner` between multiple `Gadget`s, //! and have the `Owner` remain allocated as long as any `Gadget` points at it. //! -//! ```rust +//! ``` //! use std::rc::Rc; //! //! struct Owner { -//! name: String +//! name: String, //! // ...other fields //! } //! //! struct Gadget { //! id: i32, -//! owner: Rc +//! owner: Rc, //! // ...other fields //! } //! //! fn main() { -//! // Create a reference counted Owner. -//! let gadget_owner : Rc = Rc::new( -//! Owner { name: String::from("Gadget Man") } +//! // Create a reference-counted `Owner`. +//! let gadget_owner: Rc = Rc::new( +//! Owner { +//! name: "Gadget Man".to_string(), +//! } //! ); //! -//! // Create Gadgets belonging to gadget_owner. To increment the reference -//! // count we clone the `Rc` object. -//! let gadget1 = Gadget { id: 1, owner: gadget_owner.clone() }; -//! let gadget2 = Gadget { id: 2, owner: gadget_owner.clone() }; +//! // Create `Gadget`s belonging to `gadget_owner`. Cloning the `Rc` +//! // value gives us a new pointer to the same `Owner` value, incrementing +//! // the reference count in the process. +//! let gadget1 = Gadget { +//! id: 1, +//! owner: gadget_owner.clone(), +//! }; +//! let gadget2 = Gadget { +//! id: 2, +//! owner: gadget_owner.clone(), +//! }; //! +//! // Dispose of our local variable `gadget_owner`. //! drop(gadget_owner); //! -//! // Despite dropping gadget_owner, we're still able to print out the name -//! // of the Owner of the Gadgets. This is because we've only dropped the -//! // reference count object, not the Owner it wraps. As long as there are -//! // other `Rc` objects pointing at the same Owner, it will remain -//! // allocated. Notice that the `Rc` wrapper around Gadget.owner gets -//! // automatically dereferenced for us. +//! // Despite dropping `gadget_owner`, we're still able to print out the name +//! // of the `Owner` of the `Gadget`s. This is because we've only dropped a +//! // single `Rc`, not the `Owner` it points to. As long as there are +//! // other `Rc` values pointing at the same `Owner`, it will remain +//! // allocated. The field projection `gadget1.owner.name` works because +//! // `Rc` automatically dereferences to `Owner`. //! println!("Gadget {} owned by {}", gadget1.id, gadget1.owner.name); //! println!("Gadget {} owned by {}", gadget2.id, gadget2.owner.name); //! -//! // At the end of the method, gadget1 and gadget2 get destroyed, and with -//! // them the last counted references to our Owner. Gadget Man now gets -//! // destroyed as well. +//! // At the end of the function, `gadget1` and `gadget2` are destroyed, and +//! // with them the last counted references to our `Owner`. Gadget Man now +//! // gets destroyed as well. //! } //! ``` //! //! If our requirements change, and we also need to be able to traverse from -//! Owner → Gadget, we will run into problems: an `Rc` pointer from Owner -//! → Gadget introduces a cycle between the objects. This means that their -//! reference counts can never reach 0, and the objects will remain allocated: a -//! memory leak. In order to get around this, we can use `Weak` pointers. -//! These pointers don't contribute to the total count. +//! `Owner` to `Gadget`, we will run into problems. An `Rc` pointer from `Owner` +//! to `Gadget` introduces a cycle between the values. This means that their +//! reference counts can never reach 0, and the values will remain allocated +//! forever: a memory leak. In order to get around this, we can use `Weak` +//! pointers. //! //! Rust actually makes it somewhat difficult to produce this loop in the first -//! place: in order to end up with two objects that point at each other, one of -//! them needs to be mutable. This is problematic because `Rc` enforces -//! memory safety by only giving out shared references to the object it wraps, +//! place. In order to end up with two values that point at each other, one of +//! them needs to be mutable. This is difficult because `Rc` enforces +//! memory safety by only giving out shared references to the value it wraps, //! and these don't allow direct mutation. We need to wrap the part of the -//! object we wish to mutate in a `RefCell`, which provides *interior +//! value we wish to mutate in a [`RefCell`][refcell], which provides *interior //! mutability*: a method to achieve mutability through a shared reference. -//! `RefCell` enforces Rust's borrowing rules at runtime. Read the `Cell` -//! documentation for more details on interior mutability. +//! `RefCell` enforces Rust's borrowing rules at runtime. //! -//! ```rust +//! ``` //! use std::rc::Rc; //! use std::rc::Weak; //! use std::cell::RefCell; @@ -110,41 +160,58 @@ //! } //! //! fn main() { -//! // Create a reference counted Owner. Note the fact that we've put the -//! // Owner's vector of Gadgets inside a RefCell so that we can mutate it -//! // through a shared reference. -//! let gadget_owner : Rc = Rc::new( +//! // Create a reference-counted `Owner`. Note that we've put the `Owner`'s +//! // vector of `Gadget`s inside a `RefCell` so that we can mutate it through +//! // a shared reference. +//! let gadget_owner: Rc = Rc::new( //! Owner { //! name: "Gadget Man".to_string(), -//! gadgets: RefCell::new(Vec::new()), +//! gadgets: RefCell::new(vec![]), +//! } +//! ); +//! +//! // Create `Gadget`s belonging to `gadget_owner`, as before. +//! let gadget1 = Rc::new( +//! Gadget { +//! id: 1, +//! owner: gadget_owner.clone(), +//! } +//! ); +//! let gadget2 = Rc::new( +//! Gadget { +//! id: 2, +//! owner: gadget_owner.clone(), //! } //! ); //! -//! // Create Gadgets belonging to gadget_owner as before. -//! let gadget1 = Rc::new(Gadget{id: 1, owner: gadget_owner.clone()}); -//! let gadget2 = Rc::new(Gadget{id: 2, owner: gadget_owner.clone()}); +//! // Add the `Gadget`s to their `Owner`. +//! { +//! let mut gadgets = gadget_owner.gadgets.borrow_mut(); +//! gadgets.push(Rc::downgrade(&gadget1)); +//! gadgets.push(Rc::downgrade(&gadget2)); //! -//! // Add the Gadgets to their Owner. To do this we mutably borrow from -//! // the RefCell holding the Owner's Gadgets. -//! gadget_owner.gadgets.borrow_mut().push(Rc::downgrade(&gadget1)); -//! gadget_owner.gadgets.borrow_mut().push(Rc::downgrade(&gadget2)); +//! // `RefCell` dynamic borrow ends here. +//! } +//! +//! // Iterate over our `Gadget`s, printing their details out. +//! for gadget_weak in gadget_owner.gadgets.borrow().iter() { //! -//! // Iterate over our Gadgets, printing their details out -//! for gadget_opt in gadget_owner.gadgets.borrow().iter() { +//! // `gadget_weak` is a `Weak`. Since `Weak` pointers can't +//! // guarantee the value is still allocated, we need to call +//! // `upgrade`, which returns an `Option>`. +//! // +//! // In this case we know the value still exists, so we simply +//! // `unwrap` the `Option`. In a more complicated program, you might +//! // need graceful error handling for a `None` result. //! -//! // gadget_opt is a Weak. Since weak pointers can't guarantee -//! // that their object is still allocated, we need to call upgrade() -//! // on them to turn them into a strong reference. This returns an -//! // Option, which contains a reference to our object if it still -//! // exists. -//! let gadget = gadget_opt.upgrade().unwrap(); +//! let gadget = gadget_weak.upgrade().unwrap(); //! println!("Gadget {} owned by {}", gadget.id, gadget.owner.name); //! } //! -//! // At the end of the method, gadget_owner, gadget1 and gadget2 get -//! // destroyed. There are now no strong (`Rc`) references to the gadgets. -//! // Once they get destroyed, the Gadgets get destroyed. This zeroes the -//! // reference count on Gadget Man, they get destroyed as well. +//! // At the end of the function, `gadget_owner`, `gadget1`, and `gadget2` +//! // are destroyed. There are now no strong (`Rc`) pointers to the +//! // gadgets, so they are destroyed. This zeroes the reference count on +//! // Gadget Man, so he gets destroyed as well. //! } //! ``` @@ -159,17 +226,18 @@ use core::borrow; use core::cell::Cell; use core::cmp::Ordering; use core::fmt; -use core::hash::{Hasher, Hash}; -use core::intrinsics::{assume, abort}; +use core::hash::{Hash, Hasher}; +use core::intrinsics::{abort, assume}; use core::marker; use core::marker::Unsize; -use core::mem::{self, align_of_val, size_of_val, forget, uninitialized}; +use core::mem::{self, align_of_val, forget, size_of, size_of_val, uninitialized}; use core::ops::Deref; use core::ops::CoerceUnsized; use core::ptr::{self, Shared}; use core::convert::From; use heap::deallocate; +use raw_vec::RawVec; struct RcBox { strong: Cell, @@ -178,15 +246,17 @@ struct RcBox { } -/// A reference-counted pointer type over an immutable value. +/// A single-threaded reference-counting pointer. +/// +/// See the [module-level documentation](./index.html) for more details. /// -/// See the [module level documentation](./index.html) for more details. -#[unsafe_no_drop_flag] +/// The inherent methods of `Rc` are all associated functions, which means +/// that you have to call them as e.g. `Rc::get_mut(&value)` instead of +/// `value.get_mut()`. This avoids conflicts with methods of the inner +/// type `T`. #[stable(feature = "rust1", since = "1.0.0")] pub struct Rc { - // FIXME #12808: strange names to try to avoid interfering with field - // accesses of the contained type via Deref - _ptr: Shared>, + ptr: Shared>, } #[stable(feature = "rust1", since = "1.0.0")] @@ -215,7 +285,7 @@ impl Rc { // pointers, which ensures that the weak destructor never frees // the allocation while the strong destructor is running, even // if the weak pointer is stored inside the strong one. - _ptr: Shared::new(Box::into_raw(box RcBox { + ptr: Shared::new(Box::into_raw(box RcBox { strong: Cell::new(1), weak: Cell::new(1), value: value, @@ -224,10 +294,14 @@ impl Rc { } } - /// Unwraps the contained value if the `Rc` has only one strong reference. + /// Returns the contained value, if the `Rc` has exactly one strong reference. + /// + /// Otherwise, an [`Err`][result] is returned with the same `Rc` that was + /// passed in. + /// /// This will succeed even if there are outstanding weak references. /// - /// Otherwise, an `Err` is returned with the same `Rc`. + /// [result]: ../../std/result/enum.Result.html /// /// # Examples /// @@ -239,7 +313,7 @@ impl Rc { /// /// let x = Rc::new(4); /// let _y = x.clone(); - /// assert_eq!(Rc::try_unwrap(x), Err(Rc::new(4))); + /// assert_eq!(*Rc::try_unwrap(x).unwrap_err(), 4); /// ``` #[inline] #[stable(feature = "rc_unique", since = "1.4.0")] @@ -253,7 +327,7 @@ impl Rc { // pointer while also handling drop logic by just crafting a // fake Weak. this.dec_strong(); - let _weak = Weak { _ptr: this._ptr }; + let _weak = Weak { ptr: this.ptr }; forget(this); Ok(val) } @@ -262,17 +336,127 @@ impl Rc { } } - /// Checks if `Rc::try_unwrap` would return `Ok`. + /// Checks whether [`Rc::try_unwrap`][try_unwrap] would return + /// [`Ok`][result]. + /// + /// [try_unwrap]: struct.Rc.html#method.try_unwrap + /// [result]: ../../std/result/enum.Result.html + /// + /// # Examples + /// + /// ``` + /// #![feature(rc_would_unwrap)] + /// + /// use std::rc::Rc; + /// + /// let x = Rc::new(3); + /// assert!(Rc::would_unwrap(&x)); + /// assert_eq!(Rc::try_unwrap(x), Ok(3)); + /// + /// let x = Rc::new(4); + /// let _y = x.clone(); + /// assert!(!Rc::would_unwrap(&x)); + /// assert_eq!(*Rc::try_unwrap(x).unwrap_err(), 4); + /// ``` #[unstable(feature = "rc_would_unwrap", reason = "just added for niche usecase", issue = "28356")] pub fn would_unwrap(this: &Self) -> bool { Rc::strong_count(&this) == 1 } + + /// Consumes the `Rc`, returning the wrapped pointer. + /// + /// To avoid a memory leak the pointer must be converted back to an `Rc` using + /// [`Rc::from_raw`][from_raw]. + /// + /// [from_raw]: struct.Rc.html#method.from_raw + /// + /// # Examples + /// + /// ``` + /// #![feature(rc_raw)] + /// + /// use std::rc::Rc; + /// + /// let x = Rc::new(10); + /// let x_ptr = Rc::into_raw(x); + /// assert_eq!(unsafe { *x_ptr }, 10); + /// ``` + #[unstable(feature = "rc_raw", issue = "37197")] + pub fn into_raw(this: Self) -> *mut T { + let ptr = unsafe { &mut (**this.ptr).value as *mut _ }; + mem::forget(this); + ptr + } + + /// Constructs an `Rc` from a raw pointer. + /// + /// The raw pointer must have been previously returned by a call to a + /// [`Rc::into_raw`][into_raw]. + /// + /// This function is unsafe because improper use may lead to memory problems. For example, a + /// double-free may occur if the function is called twice on the same raw pointer. + /// + /// [into_raw]: struct.Rc.html#method.into_raw + /// + /// # Examples + /// + /// ``` + /// #![feature(rc_raw)] + /// + /// use std::rc::Rc; + /// + /// let x = Rc::new(10); + /// let x_ptr = Rc::into_raw(x); + /// + /// unsafe { + /// // Convert back to an `Rc` to prevent leak. + /// let x = Rc::from_raw(x_ptr); + /// assert_eq!(*x, 10); + /// + /// // Further calls to `Rc::from_raw(x_ptr)` would be memory unsafe. + /// } + /// + /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling! + /// ``` + #[unstable(feature = "rc_raw", issue = "37197")] + pub unsafe fn from_raw(ptr: *mut T) -> Self { + // To find the corresponding pointer to the `RcBox` we need to subtract the offset of the + // `value` field from the pointer. + Rc { ptr: Shared::new((ptr as *mut u8).offset(-offset_of!(RcBox, value)) as *mut _) } + } +} + +impl Rc { + /// Constructs a new `Rc` from a string slice. + #[doc(hidden)] + #[unstable(feature = "rustc_private", + reason = "for internal use in rustc", + issue = "0")] + pub fn __from_str(value: &str) -> Rc { + unsafe { + // Allocate enough space for `RcBox`. + let aligned_len = 2 + (value.len() + size_of::() - 1) / size_of::(); + let vec = RawVec::::with_capacity(aligned_len); + let ptr = vec.ptr(); + forget(vec); + // Initialize fields of `RcBox`. + *ptr.offset(0) = 1; // strong: Cell::new(1) + *ptr.offset(1) = 1; // weak: Cell::new(1) + ptr::copy_nonoverlapping(value.as_ptr(), ptr.offset(2) as *mut u8, value.len()); + // Combine the allocation address and the string length into a fat pointer to `RcBox`. + let rcbox_ptr: *mut RcBox = mem::transmute([ptr as usize, value.len()]); + assert!(aligned_len * size_of::() == size_of_val(&*rcbox_ptr)); + Rc { ptr: Shared::new(rcbox_ptr) } + } + } } impl Rc { - /// Downgrades the `Rc` to a `Weak` reference. + /// Creates a new [`Weak`][weak] pointer to this value. + /// + /// [weak]: struct.Weak.html /// /// # Examples /// @@ -286,10 +470,25 @@ impl Rc { #[stable(feature = "rc_weak", since = "1.4.0")] pub fn downgrade(this: &Self) -> Weak { this.inc_weak(); - Weak { _ptr: this._ptr } + Weak { ptr: this.ptr } } - /// Get the number of weak references to this value. + /// Gets the number of [`Weak`][weak] pointers to this value. + /// + /// [weak]: struct.Weak.html + /// + /// # Examples + /// + /// ``` + /// #![feature(rc_counts)] + /// + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// let _weak_five = Rc::downgrade(&five); + /// + /// assert_eq!(1, Rc::weak_count(&five)); + /// ``` #[inline] #[unstable(feature = "rc_counts", reason = "not clearly useful", issue = "28356")] @@ -297,7 +496,20 @@ impl Rc { this.weak() - 1 } - /// Get the number of strong references to this value. + /// Gets the number of strong (`Rc`) pointers to this value. + /// + /// # Examples + /// + /// ``` + /// #![feature(rc_counts)] + /// + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// let _also_five = five.clone(); + /// + /// assert_eq!(2, Rc::strong_count(&five)); + /// ``` #[inline] #[unstable(feature = "rc_counts", reason = "not clearly useful", issue = "28356")] @@ -305,8 +517,10 @@ impl Rc { this.strong() } - /// Returns true if there are no other `Rc` or `Weak` values that share - /// the same inner value. + /// Returns true if there are no other `Rc` or [`Weak`][weak] pointers to + /// this inner value. + /// + /// [weak]: struct.Weak.html /// /// # Examples /// @@ -326,10 +540,19 @@ impl Rc { Rc::weak_count(this) == 0 && Rc::strong_count(this) == 1 } - /// Returns a mutable reference to the contained value if the `Rc` has - /// one strong reference and no weak references. + /// Returns a mutable reference to the inner value, if there are + /// no other `Rc` or [`Weak`][weak] pointers to the same value. + /// + /// Returns [`None`][option] otherwise, because it is not safe to + /// mutate a shared value. /// - /// Returns `None` if the `Rc` is not unique. + /// See also [`make_mut`][make_mut], which will [`clone`][clone] + /// the inner value when it's shared. + /// + /// [weak]: struct.Weak.html + /// [option]: ../../std/option/enum.Option.html + /// [make_mut]: struct.Rc.html#method.make_mut + /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone /// /// # Examples /// @@ -347,20 +570,53 @@ impl Rc { #[stable(feature = "rc_unique", since = "1.4.0")] pub fn get_mut(this: &mut Self) -> Option<&mut T> { if Rc::is_unique(this) { - let inner = unsafe { &mut **this._ptr }; + let inner = unsafe { &mut **this.ptr }; Some(&mut inner.value) } else { None } } + + #[inline] + #[unstable(feature = "ptr_eq", + reason = "newly added", + issue = "36497")] + /// Returns true if the two `Rc`s point to the same value (not + /// just values that compare as equal). + /// + /// # Examples + /// + /// ``` + /// #![feature(ptr_eq)] + /// + /// use std::rc::Rc; + /// + /// let five = Rc::new(5); + /// let same_five = five.clone(); + /// let other_five = Rc::new(5); + /// + /// assert!(Rc::ptr_eq(&five, &same_five)); + /// assert!(!Rc::ptr_eq(&five, &other_five)); + /// ``` + pub fn ptr_eq(this: &Self, other: &Self) -> bool { + let this_ptr: *const RcBox = *this.ptr; + let other_ptr: *const RcBox = *other.ptr; + this_ptr == other_ptr + } } impl Rc { - /// Make a mutable reference into the given `Rc` by cloning the inner - /// data if the `Rc` doesn't have one strong reference and no weak - /// references. + /// Makes a mutable reference into the given `Rc`. + /// + /// If there are other `Rc` or [`Weak`][weak] pointers to the same value, + /// then `make_mut` will invoke [`clone`][clone] on the inner value to + /// ensure unique ownership. This is also referred to as clone-on-write. /// - /// This is also referred to as a copy-on-write. + /// See also [`get_mut`][get_mut], which will fail rather than cloning. + /// + /// [weak]: struct.Weak.html + /// [clone]: ../../std/clone/trait.Clone.html#tymethod.clone + /// [get_mut]: struct.Rc.html#method.get_mut /// /// # Examples /// @@ -369,16 +625,15 @@ impl Rc { /// /// let mut data = Rc::new(5); /// - /// *Rc::make_mut(&mut data) += 1; // Won't clone anything - /// let mut other_data = data.clone(); // Won't clone inner data - /// *Rc::make_mut(&mut data) += 1; // Clones inner data - /// *Rc::make_mut(&mut data) += 1; // Won't clone anything - /// *Rc::make_mut(&mut other_data) *= 2; // Won't clone anything + /// *Rc::make_mut(&mut data) += 1; // Won't clone anything + /// let mut other_data = data.clone(); // Won't clone inner data + /// *Rc::make_mut(&mut data) += 1; // Clones inner data + /// *Rc::make_mut(&mut data) += 1; // Won't clone anything + /// *Rc::make_mut(&mut other_data) *= 2; // Won't clone anything /// - /// // Note: data and other_data now point to different numbers + /// // Now `data` and `other_data` point to different values. /// assert_eq!(*data, 8); /// assert_eq!(*other_data, 12); - /// /// ``` #[inline] #[stable(feature = "rc_unique", since = "1.4.0")] @@ -389,7 +644,7 @@ impl Rc { } else if Rc::weak_count(this) != 0 { // Can just steal the data, all that's left is Weaks unsafe { - let mut swap = Rc::new(ptr::read(&(**this._ptr).value)); + let mut swap = Rc::new(ptr::read(&(**this.ptr).value)); mem::swap(this, &mut swap); swap.dec_strong(); // Remove implicit strong-weak ref (no need to craft a fake @@ -403,7 +658,7 @@ impl Rc { // reference count is guaranteed to be 1 at this point, and we required // the `Rc` itself to be `mut`, so we're returning the only possible // reference to the inner value. - let inner = unsafe { &mut **this._ptr }; + let inner = unsafe { &mut **this.ptr }; &mut inner.value } } @@ -420,50 +675,49 @@ impl Deref for Rc { #[stable(feature = "rust1", since = "1.0.0")] impl Drop for Rc { - /// Drops the `Rc`. + /// Drops the `Rc`. /// /// This will decrement the strong reference count. If the strong reference - /// count becomes zero and the only other references are `Weak` ones, - /// `drop`s the inner value. + /// count reaches zero then the only other references (if any) are + /// [`Weak`][weak], so we `drop` the inner value. + /// + /// [weak]: struct.Weak.html /// /// # Examples /// /// ``` /// use std::rc::Rc; /// - /// { - /// let five = Rc::new(5); + /// struct Foo; /// - /// // stuff - /// - /// drop(five); // explicit drop + /// impl Drop for Foo { + /// fn drop(&mut self) { + /// println!("dropped!"); + /// } /// } - /// { - /// let five = Rc::new(5); /// - /// // stuff + /// let foo = Rc::new(Foo); + /// let foo2 = foo.clone(); /// - /// } // implicit drop + /// drop(foo); // Doesn't print anything + /// drop(foo2); // Prints "dropped!" /// ``` #[unsafe_destructor_blind_to_params] fn drop(&mut self) { unsafe { - let ptr = *self._ptr; - let thin = ptr as *const (); - - if thin as usize != mem::POST_DROP_USIZE { - self.dec_strong(); - if self.strong() == 0 { - // destroy the contained object - ptr::drop_in_place(&mut (*ptr).value); - - // remove the implicit "strong weak" pointer now that we've - // destroyed the contents. - self.dec_weak(); - - if self.weak() == 0 { - deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) - } + let ptr = *self.ptr; + + self.dec_strong(); + if self.strong() == 0 { + // destroy the contained object + ptr::drop_in_place(&mut (*ptr).value); + + // remove the implicit "strong weak" pointer now that we've + // destroyed the contents. + self.dec_weak(); + + if self.weak() == 0 { + deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) } } } @@ -472,10 +726,10 @@ impl Drop for Rc { #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Rc { - /// Makes a clone of the `Rc`. + /// Makes a clone of the `Rc` pointer. /// - /// When you clone an `Rc`, it will create another pointer to the data and - /// increase the strong reference counter. + /// This creates another pointer to the same inner value, increasing the + /// strong reference count. /// /// # Examples /// @@ -489,7 +743,7 @@ impl Clone for Rc { #[inline] fn clone(&self) -> Rc { self.inc_strong(); - Rc { _ptr: self._ptr } + Rc { ptr: self.ptr } } } @@ -503,6 +757,7 @@ impl Default for Rc { /// use std::rc::Rc; /// /// let x: Rc = Default::default(); + /// assert_eq!(*x, 0); /// ``` #[inline] fn default() -> Rc { @@ -512,9 +767,9 @@ impl Default for Rc { #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq for Rc { - /// Equality for two `Rc`s. + /// Equality for two `Rc`s. /// - /// Two `Rc`s are equal if their inner value are equal. + /// Two `Rc`s are equal if their inner values are equal. /// /// # Examples /// @@ -523,16 +778,16 @@ impl PartialEq for Rc { /// /// let five = Rc::new(5); /// - /// five == Rc::new(5); + /// assert!(five == Rc::new(5)); /// ``` #[inline(always)] fn eq(&self, other: &Rc) -> bool { **self == **other } - /// Inequality for two `Rc`s. + /// Inequality for two `Rc`s. /// - /// Two `Rc`s are unequal if their inner value are unequal. + /// Two `Rc`s are unequal if their inner values are unequal. /// /// # Examples /// @@ -541,7 +796,7 @@ impl PartialEq for Rc { /// /// let five = Rc::new(5); /// - /// five != Rc::new(5); + /// assert!(five != Rc::new(6)); /// ``` #[inline(always)] fn ne(&self, other: &Rc) -> bool { @@ -554,7 +809,7 @@ impl Eq for Rc {} #[stable(feature = "rust1", since = "1.0.0")] impl PartialOrd for Rc { - /// Partial comparison for two `Rc`s. + /// Partial comparison for two `Rc`s. /// /// The two are compared by calling `partial_cmp()` on their inner values. /// @@ -562,17 +817,18 @@ impl PartialOrd for Rc { /// /// ``` /// use std::rc::Rc; + /// use std::cmp::Ordering; /// /// let five = Rc::new(5); /// - /// five.partial_cmp(&Rc::new(5)); + /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Rc::new(6))); /// ``` #[inline(always)] fn partial_cmp(&self, other: &Rc) -> Option { (**self).partial_cmp(&**other) } - /// Less-than comparison for two `Rc`s. + /// Less-than comparison for two `Rc`s. /// /// The two are compared by calling `<` on their inner values. /// @@ -583,14 +839,14 @@ impl PartialOrd for Rc { /// /// let five = Rc::new(5); /// - /// five < Rc::new(5); + /// assert!(five < Rc::new(6)); /// ``` #[inline(always)] fn lt(&self, other: &Rc) -> bool { **self < **other } - /// 'Less-than or equal to' comparison for two `Rc`s. + /// 'Less than or equal to' comparison for two `Rc`s. /// /// The two are compared by calling `<=` on their inner values. /// @@ -601,14 +857,14 @@ impl PartialOrd for Rc { /// /// let five = Rc::new(5); /// - /// five <= Rc::new(5); + /// assert!(five <= Rc::new(5)); /// ``` #[inline(always)] fn le(&self, other: &Rc) -> bool { **self <= **other } - /// Greater-than comparison for two `Rc`s. + /// Greater-than comparison for two `Rc`s. /// /// The two are compared by calling `>` on their inner values. /// @@ -619,14 +875,14 @@ impl PartialOrd for Rc { /// /// let five = Rc::new(5); /// - /// five > Rc::new(5); + /// assert!(five > Rc::new(4)); /// ``` #[inline(always)] fn gt(&self, other: &Rc) -> bool { **self > **other } - /// 'Greater-than or equal to' comparison for two `Rc`s. + /// 'Greater than or equal to' comparison for two `Rc`s. /// /// The two are compared by calling `>=` on their inner values. /// @@ -637,7 +893,7 @@ impl PartialOrd for Rc { /// /// let five = Rc::new(5); /// - /// five >= Rc::new(5); + /// assert!(five >= Rc::new(5)); /// ``` #[inline(always)] fn ge(&self, other: &Rc) -> bool { @@ -647,7 +903,7 @@ impl PartialOrd for Rc { #[stable(feature = "rust1", since = "1.0.0")] impl Ord for Rc { - /// Comparison for two `Rc`s. + /// Comparison for two `Rc`s. /// /// The two are compared by calling `cmp()` on their inner values. /// @@ -655,10 +911,11 @@ impl Ord for Rc { /// /// ``` /// use std::rc::Rc; + /// use std::cmp::Ordering; /// /// let five = Rc::new(5); /// - /// five.partial_cmp(&Rc::new(5)); + /// assert_eq!(Ordering::Less, five.cmp(&Rc::new(6))); /// ``` #[inline] fn cmp(&self, other: &Rc) -> Ordering { @@ -688,9 +945,9 @@ impl fmt::Debug for Rc { } #[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Pointer for Rc { +impl fmt::Pointer for Rc { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Pointer::fmt(&*self._ptr, f) + fmt::Pointer::fmt(&*self.ptr, f) } } @@ -701,35 +958,71 @@ impl From for Rc { } } -/// A weak version of `Rc`. +/// A weak version of [`Rc`][rc]. +/// +/// `Weak` pointers do not count towards determining if the inner value +/// should be dropped. +/// +/// The typical way to obtain a `Weak` pointer is to call +/// [`Rc::downgrade`][downgrade]. /// -/// Weak references do not count when determining if the inner value should be -/// dropped. +/// See the [module-level documentation](./index.html) for more details. /// -/// See the [module level documentation](./index.html) for more. -#[unsafe_no_drop_flag] +/// [rc]: struct.Rc.html +/// [downgrade]: struct.Rc.html#method.downgrade #[stable(feature = "rc_weak", since = "1.4.0")] pub struct Weak { - // FIXME #12808: strange names to try to avoid interfering with - // field accesses of the contained type via Deref - _ptr: Shared>, + ptr: Shared>, } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "rc_weak", since = "1.4.0")] impl !marker::Send for Weak {} -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "rc_weak", since = "1.4.0")] impl !marker::Sync for Weak {} #[unstable(feature = "coerce_unsized", issue = "27732")] impl, U: ?Sized> CoerceUnsized> for Weak {} +impl Weak { + /// Constructs a new `Weak`, without an accompanying instance of `T`. + /// + /// This allocates memory for `T`, but does not initialize it. Calling + /// [`upgrade`][upgrade] on the return value always gives + /// [`None`][option]. + /// + /// [upgrade]: struct.Weak.html#method.upgrade + /// [option]: ../../std/option/enum.Option.html + /// + /// # Examples + /// + /// ``` + /// use std::rc::Weak; + /// + /// let empty: Weak = Weak::new(); + /// assert!(empty.upgrade().is_none()); + /// ``` + #[stable(feature = "downgraded_weak", since = "1.10.0")] + pub fn new() -> Weak { + unsafe { + Weak { + ptr: Shared::new(Box::into_raw(box RcBox { + strong: Cell::new(0), + weak: Cell::new(1), + value: uninitialized(), + })), + } + } + } +} + impl Weak { - /// Upgrades a weak reference to a strong reference. + /// Upgrades the `Weak` pointer to an [`Rc`][rc], if possible. /// - /// Upgrades the `Weak` reference to an `Rc`, if possible. + /// Returns [`None`][option] if the strong count has reached zero and the + /// inner value was destroyed. /// - /// Returns `None` if there were no strong references and the data was - /// destroyed. + /// [rc]: struct.Rc.html + /// [option]: ../../std/option/enum.Option.html /// /// # Examples /// @@ -741,6 +1034,13 @@ impl Weak { /// let weak_five = Rc::downgrade(&five); /// /// let strong_five: Option> = weak_five.upgrade(); + /// assert!(strong_five.is_some()); + /// + /// // Destroy all strong pointers. + /// drop(strong_five); + /// drop(five); + /// + /// assert!(weak_five.upgrade().is_none()); /// ``` #[stable(feature = "rc_weak", since = "1.4.0")] pub fn upgrade(&self) -> Option> { @@ -748,14 +1048,14 @@ impl Weak { None } else { self.inc_strong(); - Some(Rc { _ptr: self._ptr }) + Some(Rc { ptr: self.ptr }) } } } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "rc_weak", since = "1.4.0")] impl Drop for Weak { - /// Drops the `Weak`. + /// Drops the `Weak` pointer. /// /// This will decrement the weak reference count. /// @@ -764,34 +1064,32 @@ impl Drop for Weak { /// ``` /// use std::rc::Rc; /// - /// { - /// let five = Rc::new(5); - /// let weak_five = Rc::downgrade(&five); - /// - /// // stuff + /// struct Foo; /// - /// drop(weak_five); // explicit drop + /// impl Drop for Foo { + /// fn drop(&mut self) { + /// println!("dropped!"); + /// } /// } - /// { - /// let five = Rc::new(5); - /// let weak_five = Rc::downgrade(&five); /// - /// // stuff + /// let foo = Rc::new(Foo); + /// let weak_foo = Rc::downgrade(&foo); + /// let other_weak_foo = weak_foo.clone(); + /// + /// drop(weak_foo); // Doesn't print anything + /// drop(foo); // Prints "dropped!" /// - /// } // implicit drop + /// assert!(other_weak_foo.upgrade().is_none()); /// ``` fn drop(&mut self) { unsafe { - let ptr = *self._ptr; - let thin = ptr as *const (); + let ptr = *self.ptr; - if thin as usize != mem::POST_DROP_USIZE { - self.dec_weak(); - // the weak count starts at 1, and will only go to zero if all - // the strong pointers have disappeared. - if self.weak() == 0 { - deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) - } + self.dec_weak(); + // the weak count starts at 1, and will only go to zero if all + // the strong pointers have disappeared. + if self.weak() == 0 { + deallocate(ptr as *mut u8, size_of_val(&*ptr), align_of_val(&*ptr)) } } } @@ -799,9 +1097,10 @@ impl Drop for Weak { #[stable(feature = "rc_weak", since = "1.4.0")] impl Clone for Weak { - /// Makes a clone of the `Weak`. + /// Makes a clone of the `Weak` pointer. /// - /// This increases the weak reference count. + /// This creates another pointer to the same inner value, increasing the + /// weak reference count. /// /// # Examples /// @@ -815,45 +1114,38 @@ impl Clone for Weak { #[inline] fn clone(&self) -> Weak { self.inc_weak(); - Weak { _ptr: self._ptr } + Weak { ptr: self.ptr } } } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "rc_weak", since = "1.4.0")] impl fmt::Debug for Weak { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "(Weak)") } } -impl Weak { - /// Constructs a new `Weak` without an accompanying instance of T. +#[stable(feature = "downgraded_weak", since = "1.10.0")] +impl Default for Weak { + /// Constructs a new `Weak`, without an accompanying instance of `T`. + /// + /// This allocates memory for `T`, but does not initialize it. Calling + /// [`upgrade`][upgrade] on the return value always gives + /// [`None`][option]. /// - /// This allocates memory for T, but does not initialize it. Calling - /// Weak::upgrade() on the return value always gives None. + /// [upgrade]: struct.Weak.html#method.upgrade + /// [option]: ../../std/option/enum.Option.html /// /// # Examples /// /// ``` - /// #![feature(downgraded_weak)] - /// /// use std::rc::Weak; /// - /// let empty: Weak = Weak::new(); + /// let empty: Weak = Default::default(); + /// assert!(empty.upgrade().is_none()); /// ``` - #[unstable(feature = "downgraded_weak", - reason = "recently added", - issue="30425")] - pub fn new() -> Weak { - unsafe { - Weak { - _ptr: Shared::new(Box::into_raw(box RcBox { - strong: Cell::new(0), - weak: Cell::new(1), - value: uninitialized(), - })), - } - } + fn default() -> Weak { + Weak::new() } } @@ -909,8 +1201,8 @@ impl RcBoxPtr for Rc { // the contract anyway. // This allows the null check to be elided in the destructor if we // manipulated the reference count in the same function. - assume(!(*(&self._ptr as *const _ as *const *const ())).is_null()); - &(**self._ptr) + assume(!(*(&self.ptr as *const _ as *const *const ())).is_null()); + &(**self.ptr) } } } @@ -923,8 +1215,8 @@ impl RcBoxPtr for Weak { // the contract anyway. // This allows the null check to be elided in the destructor if we // manipulated the reference count in the same function. - assume(!(*(&self._ptr as *const _ as *const *const ())).is_null()); - &(**self._ptr) + assume(!(*(&self.ptr as *const _ as *const *const ())).is_null()); + &(**self.ptr) } } } @@ -935,7 +1227,7 @@ mod tests { use std::boxed::Box; use std::cell::RefCell; use std::option::Option; - use std::option::Option::{Some, None}; + use std::option::Option::{None, Some}; use std::result::Result::{Err, Ok}; use std::mem::drop; use std::clone::Clone; @@ -1013,7 +1305,7 @@ mod tests { #[test] fn test_strong_count() { - let a = Rc::new(0u32); + let a = Rc::new(0); assert!(Rc::strong_count(&a) == 1); let w = Rc::downgrade(&a); assert!(Rc::strong_count(&a) == 1); @@ -1030,7 +1322,7 @@ mod tests { #[test] fn test_weak_count() { - let a = Rc::new(0u32); + let a = Rc::new(0); assert!(Rc::strong_count(&a) == 1); assert!(Rc::weak_count(&a) == 0); let w = Rc::downgrade(&a); @@ -1057,6 +1349,23 @@ mod tests { assert_eq!(Rc::try_unwrap(x), Ok(5)); } + #[test] + fn into_from_raw() { + let x = Rc::new(box "hello"); + let y = x.clone(); + + let x_ptr = Rc::into_raw(x); + drop(y); + unsafe { + assert_eq!(**x_ptr, "hello"); + + let x = Rc::from_raw(x_ptr); + assert_eq!(**x, "hello"); + + assert_eq!(Rc::try_unwrap(x).map(|x| *x), Ok("hello")); + } + } + #[test] fn get_mut() { let mut x = Rc::new(3); @@ -1155,6 +1464,16 @@ mod tests { let foo: Weak = Weak::new(); assert!(foo.upgrade().is_none()); } + + #[test] + fn test_ptr_eq() { + let five = Rc::new(5); + let same_five = five.clone(); + let other_five = Rc::new(5); + + assert!(Rc::ptr_eq(&five, &same_five)); + assert!(!Rc::ptr_eq(&five, &other_five)); + } } #[stable(feature = "rust1", since = "1.0.0")] diff --git a/src/liballoc_jemalloc/Cargo.toml b/src/liballoc_jemalloc/Cargo.toml new file mode 100644 index 0000000000000..25b3c8a3a0a83 --- /dev/null +++ b/src/liballoc_jemalloc/Cargo.toml @@ -0,0 +1,22 @@ +[package] +authors = ["The Rust Project Developers"] +name = "alloc_jemalloc" +version = "0.0.0" +build = "build.rs" +links = "jemalloc" + +[lib] +name = "alloc_jemalloc" +path = "lib.rs" +test = false + +[dependencies] +core = { path = "../libcore" } +libc = { path = "../rustc/libc_shim" } + +[build-dependencies] +build_helper = { path = "../build_helper" } +gcc = "0.3.27" + +[features] +debug = [] diff --git a/src/liballoc_jemalloc/build.rs b/src/liballoc_jemalloc/build.rs new file mode 100644 index 0000000000000..08a1f8ae8c6ca --- /dev/null +++ b/src/liballoc_jemalloc/build.rs @@ -0,0 +1,170 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![deny(warnings)] + +extern crate build_helper; +extern crate gcc; + +use std::env; +use std::path::PathBuf; +use std::process::Command; +use build_helper::run; + +fn main() { + println!("cargo:rustc-cfg=cargobuild"); + println!("cargo:rerun-if-changed=build.rs"); + + let target = env::var("TARGET").expect("TARGET was not set"); + let host = env::var("HOST").expect("HOST was not set"); + let build_dir = PathBuf::from(env::var_os("OUT_DIR").unwrap()); + let src_dir = env::current_dir().unwrap(); + + // FIXME: This is a hack to support building targets that don't + // support jemalloc alongside hosts that do. The jemalloc build is + // controlled by a feature of the std crate, and if that feature + // changes between targets, it invalidates the fingerprint of + // std's build script (this is a cargo bug); so we must ensure + // that the feature set used by std is the same across all + // targets, which means we have to build the alloc_jemalloc crate + // for targets like emscripten, even if we don't use it. + if target.contains("rumprun") || target.contains("bitrig") || target.contains("openbsd") || + target.contains("msvc") || target.contains("emscripten") || target.contains("fuchsia") { + println!("cargo:rustc-cfg=dummy_jemalloc"); + return; + } + + if let Some(jemalloc) = env::var_os("JEMALLOC_OVERRIDE") { + let jemalloc = PathBuf::from(jemalloc); + println!("cargo:rustc-link-search=native={}", + jemalloc.parent().unwrap().display()); + let stem = jemalloc.file_stem().unwrap().to_str().unwrap(); + let name = jemalloc.file_name().unwrap().to_str().unwrap(); + let kind = if name.ends_with(".a") { + "static" + } else { + "dylib" + }; + println!("cargo:rustc-link-lib={}={}", kind, &stem[3..]); + return; + } + + let compiler = gcc::Config::new().get_compiler(); + // only msvc returns None for ar so unwrap is okay + let ar = build_helper::cc2ar(compiler.path(), &target).unwrap(); + let cflags = compiler.args() + .iter() + .map(|s| s.to_str().unwrap()) + .collect::>() + .join(" "); + + let mut stack = src_dir.join("../jemalloc") + .read_dir() + .unwrap() + .map(|e| e.unwrap()) + .collect::>(); + while let Some(entry) = stack.pop() { + let path = entry.path(); + if entry.file_type().unwrap().is_dir() { + stack.extend(path.read_dir().unwrap().map(|e| e.unwrap())); + } else { + println!("cargo:rerun-if-changed={}", path.display()); + } + } + + let mut cmd = Command::new("sh"); + cmd.arg(src_dir.join("../jemalloc/configure") + .to_str() + .unwrap() + .replace("C:\\", "/c/") + .replace("\\", "/")) + .current_dir(&build_dir) + .env("CC", compiler.path()) + .env("EXTRA_CFLAGS", cflags.clone()) + // jemalloc generates Makefile deps using GCC's "-MM" flag. This means + // that GCC will run the preprocessor, and only the preprocessor, over + // jemalloc's source files. If we don't specify CPPFLAGS, then at least + // on ARM that step fails with a "Missing implementation for 32-bit + // atomic operations" error. This is because no "-march" flag will be + // passed to GCC, and then GCC won't define the + // "__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4" macro that jemalloc needs to + // select an atomic operation implementation. + .env("CPPFLAGS", cflags.clone()) + .env("AR", &ar) + .env("RANLIB", format!("{} s", ar.display())); + + if target.contains("windows") { + // A bit of history here, this used to be --enable-lazy-lock added in + // #14006 which was filed with jemalloc in jemalloc/jemalloc#83 which + // was also reported to MinGW: + // + // http://sourceforge.net/p/mingw-w64/bugs/395/ + // + // When updating jemalloc to 4.0, however, it was found that binaries + // would exit with the status code STATUS_RESOURCE_NOT_OWNED indicating + // that a thread was unlocking a mutex it never locked. Disabling this + // "lazy lock" option seems to fix the issue, but it was enabled by + // default for MinGW targets in 13473c7 for jemalloc. + // + // As a result of all that, force disabling lazy lock on Windows, and + // after reading some code it at least *appears* that the initialization + // of mutexes is otherwise ok in jemalloc, so shouldn't cause problems + // hopefully... + // + // tl;dr: make windows behave like other platforms by disabling lazy + // locking, but requires passing an option due to a historical + // default with jemalloc. + cmd.arg("--disable-lazy-lock"); + } else if target.contains("ios") { + cmd.arg("--disable-tls"); + } else if target.contains("android") { + // We force android to have prefixed symbols because apparently + // replacement of the libc allocator doesn't quite work. When this was + // tested (unprefixed symbols), it was found that the `realpath` + // function in libc would allocate with libc malloc (not jemalloc + // malloc), and then the standard library would free with jemalloc free, + // causing a segfault. + // + // If the test suite passes, however, without symbol prefixes then we + // should be good to go! + cmd.arg("--with-jemalloc-prefix=je_"); + cmd.arg("--disable-tls"); + } else if target.contains("dragonfly") { + cmd.arg("--with-jemalloc-prefix=je_"); + } + + if cfg!(feature = "debug-jemalloc") { + cmd.arg("--enable-debug"); + } + + // Turn off broken quarantine (see jemalloc/jemalloc#161) + cmd.arg("--disable-fill"); + cmd.arg(format!("--host={}", build_helper::gnu_target(&target))); + cmd.arg(format!("--build={}", build_helper::gnu_target(&host))); + + run(&mut cmd); + run(Command::new("make") + .current_dir(&build_dir) + .arg("build_lib_static") + .arg("-j") + .arg(env::var("NUM_JOBS").expect("NUM_JOBS was not set"))); + + if target.contains("windows") { + println!("cargo:rustc-link-lib=static=jemalloc"); + } else { + println!("cargo:rustc-link-lib=static=jemalloc_pic"); + } + println!("cargo:rustc-link-search=native={}/lib", build_dir.display()); + if target.contains("android") { + println!("cargo:rustc-link-lib=gcc"); + } else if !target.contains("windows") && !target.contains("musl") { + println!("cargo:rustc-link-lib=pthread"); + } +} diff --git a/src/liballoc_jemalloc/lib.rs b/src/liballoc_jemalloc/lib.rs index 91d229b819df1..21e45f9c4b20c 100644 --- a/src/liballoc_jemalloc/lib.rs +++ b/src/liballoc_jemalloc/lib.rs @@ -16,108 +16,177 @@ reason = "this library is unlikely to be stabilized in its current \ form or name", issue = "27783")] +#![cfg_attr(not(stage0), deny(warnings))] #![feature(allocator)] #![feature(libc)] #![feature(staged_api)] extern crate libc; -use libc::{c_int, c_void, size_t}; +pub use imp::*; -// Linkage directives to pull in jemalloc and its dependencies. -// -// On some platforms we need to be sure to link in `pthread` which jemalloc -// depends on, and specifically on android we need to also link to libgcc. -// Currently jemalloc is compiled with gcc which will generate calls to -// intrinsics that are libgcc specific (e.g. those intrinsics aren't present in -// libcompiler-rt), so link that in to get that support. -#[link(name = "jemalloc", kind = "static")] -#[cfg_attr(target_os = "android", link(name = "gcc"))] -#[cfg_attr(all(not(windows), - not(target_os = "android"), - not(target_env = "musl")), - link(name = "pthread"))] -extern "C" { - fn je_mallocx(size: size_t, flags: c_int) -> *mut c_void; - fn je_rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void; - fn je_xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t; - fn je_sdallocx(ptr: *mut c_void, size: size_t, flags: c_int); - fn je_nallocx(size: size_t, flags: c_int) -> size_t; -} +// See comments in build.rs for why we sometimes build a crate that does nothing +#[cfg(not(dummy_jemalloc))] +mod imp { + use libc::{c_int, c_void, size_t}; -// The minimum alignment guaranteed by the architecture. This value is used to -// add fast paths for low alignment values. In practice, the alignment is a -// constant at the call site and the branch will be optimized out. -#[cfg(all(any(target_arch = "arm", - target_arch = "mips", - target_arch = "mipsel", - target_arch = "powerpc")))] -const MIN_ALIGN: usize = 8; -#[cfg(all(any(target_arch = "x86", - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "powerpc64", - target_arch = "powerpc64le")))] -const MIN_ALIGN: usize = 16; - -// MALLOCX_ALIGN(a) macro -fn mallocx_align(a: usize) -> c_int { - a.trailing_zeros() as c_int -} + // Linkage directives to pull in jemalloc and its dependencies. + // + // On some platforms we need to be sure to link in `pthread` which jemalloc + // depends on, and specifically on android we need to also link to libgcc. + // Currently jemalloc is compiled with gcc which will generate calls to + // intrinsics that are libgcc specific (e.g. those intrinsics aren't present in + // libcompiler-rt), so link that in to get that support. + #[link(name = "jemalloc", kind = "static")] + #[cfg_attr(target_os = "android", link(name = "gcc"))] + #[cfg_attr(all(not(windows), + not(target_os = "android"), + not(target_env = "musl")), + link(name = "pthread"))] + #[cfg(not(cargobuild))] + extern "C" {} + + // Note that the symbols here are prefixed by default on OSX and Windows (we + // don't explicitly request it), and on Android and DragonFly we explicitly + // request it as unprefixing cause segfaults (mismatches in allocators). + extern "C" { + #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios", + target_os = "dragonfly", target_os = "windows"), + link_name = "je_mallocx")] + fn mallocx(size: size_t, flags: c_int) -> *mut c_void; + #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios", + target_os = "dragonfly", target_os = "windows"), + link_name = "je_rallocx")] + fn rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void; + #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios", + target_os = "dragonfly", target_os = "windows"), + link_name = "je_xallocx")] + fn xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t; + #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios", + target_os = "dragonfly", target_os = "windows"), + link_name = "je_sdallocx")] + fn sdallocx(ptr: *mut c_void, size: size_t, flags: c_int); + #[cfg_attr(any(target_os = "macos", target_os = "android", target_os = "ios", + target_os = "dragonfly", target_os = "windows"), + link_name = "je_nallocx")] + fn nallocx(size: size_t, flags: c_int) -> size_t; + } + + // The minimum alignment guaranteed by the architecture. This value is used to + // add fast paths for low alignment values. In practice, the alignment is a + // constant at the call site and the branch will be optimized out. + #[cfg(all(any(target_arch = "arm", + target_arch = "mips", + target_arch = "powerpc")))] + const MIN_ALIGN: usize = 8; + #[cfg(all(any(target_arch = "x86", + target_arch = "x86_64", + target_arch = "aarch64", + target_arch = "powerpc64", + target_arch = "mips64", + target_arch = "s390x")))] + const MIN_ALIGN: usize = 16; + + // MALLOCX_ALIGN(a) macro + fn mallocx_align(a: usize) -> c_int { + a.trailing_zeros() as c_int + } + + fn align_to_flags(align: usize) -> c_int { + if align <= MIN_ALIGN { + 0 + } else { + mallocx_align(align) + } + } + + #[no_mangle] + pub extern "C" fn __rust_allocate(size: usize, align: usize) -> *mut u8 { + let flags = align_to_flags(align); + unsafe { mallocx(size as size_t, flags) as *mut u8 } + } + + #[no_mangle] + pub extern "C" fn __rust_reallocate(ptr: *mut u8, + _old_size: usize, + size: usize, + align: usize) + -> *mut u8 { + let flags = align_to_flags(align); + unsafe { rallocx(ptr as *mut c_void, size as size_t, flags) as *mut u8 } + } + + #[no_mangle] + pub extern "C" fn __rust_reallocate_inplace(ptr: *mut u8, + _old_size: usize, + size: usize, + align: usize) + -> usize { + let flags = align_to_flags(align); + unsafe { xallocx(ptr as *mut c_void, size as size_t, 0, flags) as usize } + } -fn align_to_flags(align: usize) -> c_int { - if align <= MIN_ALIGN { + #[no_mangle] + pub extern "C" fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize) { + let flags = align_to_flags(align); + unsafe { sdallocx(ptr as *mut c_void, old_size as size_t, flags) } + } + + #[no_mangle] + pub extern "C" fn __rust_usable_size(size: usize, align: usize) -> usize { + let flags = align_to_flags(align); + unsafe { nallocx(size as size_t, flags) as usize } + } + + // These symbols are used by jemalloc on android but the really old android + // we're building on doesn't have them defined, so just make sure the symbols + // are available. + #[no_mangle] + #[cfg(target_os = "android")] + pub extern "C" fn pthread_atfork(_prefork: *mut u8, + _postfork_parent: *mut u8, + _postfork_child: *mut u8) + -> i32 { 0 - } else { - mallocx_align(align) } } -#[no_mangle] -pub extern "C" fn __rust_allocate(size: usize, align: usize) -> *mut u8 { - let flags = align_to_flags(align); - unsafe { je_mallocx(size as size_t, flags) as *mut u8 } -} +#[cfg(dummy_jemalloc)] +mod imp { + fn bogus() -> ! { + panic!("jemalloc is not implemented for this platform"); + } -#[no_mangle] -pub extern "C" fn __rust_reallocate(ptr: *mut u8, - _old_size: usize, - size: usize, - align: usize) - -> *mut u8 { - let flags = align_to_flags(align); - unsafe { je_rallocx(ptr as *mut c_void, size as size_t, flags) as *mut u8 } -} + #[no_mangle] + pub extern "C" fn __rust_allocate(_size: usize, _align: usize) -> *mut u8 { + bogus() + } -#[no_mangle] -pub extern "C" fn __rust_reallocate_inplace(ptr: *mut u8, - _old_size: usize, - size: usize, - align: usize) - -> usize { - let flags = align_to_flags(align); - unsafe { je_xallocx(ptr as *mut c_void, size as size_t, 0, flags) as usize } -} + #[no_mangle] + pub extern "C" fn __rust_reallocate(_ptr: *mut u8, + _old_size: usize, + _size: usize, + _align: usize) + -> *mut u8 { + bogus() + } -#[no_mangle] -pub extern "C" fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize) { - let flags = align_to_flags(align); - unsafe { je_sdallocx(ptr as *mut c_void, old_size as size_t, flags) } -} + #[no_mangle] + pub extern "C" fn __rust_reallocate_inplace(_ptr: *mut u8, + _old_size: usize, + _size: usize, + _align: usize) + -> usize { + bogus() + } -#[no_mangle] -pub extern "C" fn __rust_usable_size(size: usize, align: usize) -> usize { - let flags = align_to_flags(align); - unsafe { je_nallocx(size as size_t, flags) as usize } -} + #[no_mangle] + pub extern "C" fn __rust_deallocate(_ptr: *mut u8, _old_size: usize, _align: usize) { + bogus() + } -// These symbols are used by jemalloc on android but the really old android -// we're building on doesn't have them defined, so just make sure the symbols -// are available. -#[no_mangle] -#[cfg(target_os = "android")] -pub extern fn pthread_atfork(_prefork: *mut u8, - _postfork_parent: *mut u8, - _postfork_child: *mut u8) -> i32 { - 0 + #[no_mangle] + pub extern "C" fn __rust_usable_size(_size: usize, _align: usize) -> usize { + bogus() + } } diff --git a/src/liballoc_system/Cargo.toml b/src/liballoc_system/Cargo.toml new file mode 100644 index 0000000000000..88e8e2d7adbc3 --- /dev/null +++ b/src/liballoc_system/Cargo.toml @@ -0,0 +1,13 @@ +[package] +authors = ["The Rust Project Developers"] +name = "alloc_system" +version = "0.0.0" + +[lib] +name = "alloc_system" +path = "lib.rs" +test = false + +[dependencies] +core = { path = "../libcore" } +libc = { path = "../rustc/libc_shim" } diff --git a/src/liballoc_system/lib.rs b/src/liballoc_system/lib.rs index ffb6999d6e3fe..a4fabb5a2c96d 100644 --- a/src/liballoc_system/lib.rs +++ b/src/liballoc_system/lib.rs @@ -12,15 +12,14 @@ #![crate_type = "rlib"] #![no_std] #![allocator] +#![cfg_attr(not(stage0), deny(warnings))] #![unstable(feature = "alloc_system", reason = "this library is unlikely to be stabilized in its current \ form or name", issue = "27783")] #![feature(allocator)] -#![feature(libc)] #![feature(staged_api)] - -extern crate libc; +#![cfg_attr(unix, feature(libc))] // The minimum alignment guaranteed by the architecture. This value is used to // add fast paths for low alignment values. In practice, the alignment is a @@ -28,13 +27,15 @@ extern crate libc; #[cfg(all(any(target_arch = "x86", target_arch = "arm", target_arch = "mips", - target_arch = "mipsel", target_arch = "powerpc", target_arch = "powerpc64", - target_arch = "powerpc64le")))] + target_arch = "asmjs", + target_arch = "wasm32")))] const MIN_ALIGN: usize = 8; #[cfg(all(any(target_arch = "x86_64", - target_arch = "aarch64")))] + target_arch = "aarch64", + target_arch = "mips64", + target_arch = "s390x")))] const MIN_ALIGN: usize = 16; #[no_mangle] @@ -72,22 +73,50 @@ pub extern "C" fn __rust_usable_size(size: usize, align: usize) -> usize { #[cfg(unix)] mod imp { + extern crate libc; + use core::cmp; use core::ptr; - use libc; use MIN_ALIGN; pub unsafe fn allocate(size: usize, align: usize) -> *mut u8 { if align <= MIN_ALIGN { libc::malloc(size as libc::size_t) as *mut u8 } else { - let mut out = ptr::null_mut(); - let ret = libc::posix_memalign(&mut out, align as libc::size_t, size as libc::size_t); - if ret != 0 { - ptr::null_mut() - } else { - out as *mut u8 - } + aligned_malloc(size, align) + } + } + + #[cfg(target_os = "android")] + unsafe fn aligned_malloc(size: usize, align: usize) -> *mut u8 { + // On android we currently target API level 9 which unfortunately + // doesn't have the `posix_memalign` API used below. Instead we use + // `memalign`, but this unfortunately has the property on some systems + // where the memory returned cannot be deallocated by `free`! + // + // Upon closer inspection, however, this appears to work just fine with + // Android, so for this platform we should be fine to call `memalign` + // (which is present in API level 9). Some helpful references could + // possibly be chromium using memalign [1], attempts at documenting that + // memalign + free is ok [2] [3], or the current source of chromium + // which still uses memalign on android [4]. + // + // [1]: https://codereview.chromium.org/10796020/ + // [2]: https://code.google.com/p/android/issues/detail?id=35391 + // [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579 + // [4]: https://chromium.googlesource.com/chromium/src/base/+/master/ + // /memory/aligned_memory.cc + libc::memalign(align as libc::size_t, size as libc::size_t) as *mut u8 + } + + #[cfg(not(target_os = "android"))] + unsafe fn aligned_malloc(size: usize, align: usize) -> *mut u8 { + let mut out = ptr::null_mut(); + let ret = libc::posix_memalign(&mut out, align as libc::size_t, size as libc::size_t); + if ret != 0 { + ptr::null_mut() + } else { + out as *mut u8 } } @@ -96,8 +125,10 @@ mod imp { libc::realloc(ptr as *mut libc::c_void, size as libc::size_t) as *mut u8 } else { let new_ptr = allocate(size, align); - ptr::copy(ptr, new_ptr, cmp::min(size, old_size)); - deallocate(ptr, old_size, align); + if !new_ptr.is_null() { + ptr::copy(ptr, new_ptr, cmp::min(size, old_size)); + deallocate(ptr, old_size, align); + } new_ptr } } @@ -135,6 +166,7 @@ mod imp { fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID; fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID; fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL; + fn GetLastError() -> DWORD; } #[repr(C)] @@ -190,11 +222,7 @@ mod imp { HEAP_REALLOC_IN_PLACE_ONLY, ptr as LPVOID, size as SIZE_T) as *mut u8; - if new.is_null() { - old_size - } else { - size - } + if new.is_null() { old_size } else { size } } else { old_size } @@ -203,11 +231,11 @@ mod imp { pub unsafe fn deallocate(ptr: *mut u8, _old_size: usize, align: usize) { if align <= MIN_ALIGN { let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID); - debug_assert!(err != 0); + debug_assert!(err != 0, "Failed to free heap memory: {}", GetLastError()); } else { let header = get_header(ptr); let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID); - debug_assert!(err != 0); + debug_assert!(err != 0, "Failed to free heap memory: {}", GetLastError()); } } diff --git a/src/libarena/Cargo.toml b/src/libarena/Cargo.toml new file mode 100644 index 0000000000000..b53c0a2f48bf7 --- /dev/null +++ b/src/libarena/Cargo.toml @@ -0,0 +1,9 @@ +[package] +authors = ["The Rust Project Developers"] +name = "arena" +version = "0.0.0" + +[lib] +name = "arena" +path = "lib.rs" +crate-type = ["dylib"] diff --git a/src/libarena/lib.rs b/src/libarena/lib.rs index cd2093984e618..6044bec2c5af7 100644 --- a/src/libarena/lib.rs +++ b/src/libarena/lib.rs @@ -15,9 +15,8 @@ //! of individual objects while the arena itself is still alive. The benefit //! of an arena is very fast allocation; just a pointer bump. //! -//! This crate has two arenas implemented: `TypedArena`, which is a simpler -//! arena but can only hold objects of a single type, and `Arena`, which is a -//! more complex, slower arena which can hold objects of any type. +//! This crate implements `TypedArena`, a simple arena that can only hold +//! objects of a single type. #![crate_name = "arena"] #![unstable(feature = "rustc_private", issue = "27812")] @@ -27,12 +26,11 @@ html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/", test(no_crate_inject, attr(deny(warnings))))] +#![cfg_attr(not(stage0), deny(warnings))] #![feature(alloc)] #![feature(core_intrinsics)] -#![feature(drop_in_place)] #![feature(heap_api)] -#![feature(raw)] #![feature(heap_api)] #![feature(staged_api)] #![feature(dropck_parametricity)] @@ -53,322 +51,7 @@ use std::slice; use alloc::heap; use alloc::raw_vec::RawVec; -struct Chunk { - data: RawVec, - /// Index of the first unused byte. - fill: Cell, - /// Indicates whether objects with destructors are stored in this chunk. - is_copy: Cell, -} - -impl Chunk { - fn new(size: usize, is_copy: bool) -> Chunk { - Chunk { - data: RawVec::with_capacity(size), - fill: Cell::new(0), - is_copy: Cell::new(is_copy), - } - } - - fn capacity(&self) -> usize { - self.data.cap() - } - - unsafe fn as_ptr(&self) -> *const u8 { - self.data.ptr() - } - - // Walk down a chunk, running the destructors for any objects stored - // in it. - unsafe fn destroy(&self) { - let mut idx = 0; - let buf = self.as_ptr(); - let fill = self.fill.get(); - - while idx < fill { - let tydesc_data = buf.offset(idx as isize) as *const usize; - let (tydesc, is_done) = un_bitpack_tydesc_ptr(*tydesc_data); - let (size, align) = ((*tydesc).size, (*tydesc).align); - - let after_tydesc = idx + mem::size_of::<*const TyDesc>(); - - let start = round_up(after_tydesc, align); - - if is_done { - ((*tydesc).drop_glue)(buf.offset(start as isize) as *const i8); - } - - // Find where the next tydesc lives - idx = round_up(start + size, mem::align_of::<*const TyDesc>()); - } - } -} - -/// A slower reflection-based arena that can allocate objects of any type. -/// -/// This arena uses `RawVec` as a backing store to allocate objects from. -/// For each allocated object, the arena stores a pointer to the type descriptor -/// followed by the object (potentially with alignment padding after each -/// element). When the arena is destroyed, it iterates through all of its -/// chunks, and uses the tydesc information to trace through the objects, -/// calling the destructors on them. One subtle point that needs to be -/// addressed is how to handle panics while running the user provided -/// initializer function. It is important to not run the destructor on -/// uninitialized objects, but how to detect them is somewhat subtle. Since -/// `alloc()` can be invoked recursively, it is not sufficient to simply exclude -/// the most recent object. To solve this without requiring extra space, we -/// use the low order bit of the tydesc pointer to encode whether the object -/// it describes has been fully initialized. -/// -/// As an optimization, objects with destructors are stored in different chunks -/// than objects without destructors. This reduces overhead when initializing -/// plain-old-data (`Copy` types) and means we don't need to waste time running -/// their destructors. -#[unstable(feature = "rustc_private", - reason = "Private to rustc", issue = "0")] -#[rustc_deprecated(since = "1.6.0-dev", reason = -"The reflection-based arena is superseded by the any-arena crate")] -pub struct Arena<'longer_than_self> { - // The heads are separated out from the list as a unbenchmarked - // microoptimization, to avoid needing to case on the list to access a head. - head: RefCell, - copy_head: RefCell, - chunks: RefCell>, - _marker: PhantomData<*mut &'longer_than_self ()>, -} - -impl<'a> Arena<'a> { - /// Allocates a new Arena with 32 bytes preallocated. - pub fn new() -> Arena<'a> { - Arena::new_with_size(32) - } - - /// Allocates a new Arena with `initial_size` bytes preallocated. - pub fn new_with_size(initial_size: usize) -> Arena<'a> { - Arena { - head: RefCell::new(Chunk::new(initial_size, false)), - copy_head: RefCell::new(Chunk::new(initial_size, true)), - chunks: RefCell::new(Vec::new()), - _marker: PhantomData, - } - } -} - -impl<'longer_than_self> Drop for Arena<'longer_than_self> { - fn drop(&mut self) { - unsafe { - self.head.borrow().destroy(); - for chunk in self.chunks.borrow().iter() { - if !chunk.is_copy.get() { - chunk.destroy(); - } - } - } - } -} - -#[inline] -fn round_up(base: usize, align: usize) -> usize { - (base.checked_add(align - 1)).unwrap() & !(align - 1) -} - -// We encode whether the object a tydesc describes has been -// initialized in the arena in the low bit of the tydesc pointer. This -// is necessary in order to properly do cleanup if a panic occurs -// during an initializer. -#[inline] -fn bitpack_tydesc_ptr(p: *const TyDesc, is_done: bool) -> usize { - p as usize | (is_done as usize) -} -#[inline] -fn un_bitpack_tydesc_ptr(p: usize) -> (*const TyDesc, bool) { - ((p & !1) as *const TyDesc, p & 1 == 1) -} - -// HACK(eddyb) TyDesc replacement using a trait object vtable. -// This could be replaced in the future with a custom DST layout, -// or `&'static (drop_glue, size, align)` created by a `const fn`. -// Requirements: -// * rvalue promotion (issue #1056) -// * mem::{size_of, align_of} must be const fns -struct TyDesc { - drop_glue: fn(*const i8), - size: usize, - align: usize, -} - -trait AllTypes { - fn dummy(&self) {} -} - -impl AllTypes for T {} - -unsafe fn get_tydesc() -> *const TyDesc { - use std::raw::TraitObject; - - let ptr = &*(heap::EMPTY as *const T); - - // Can use any trait that is implemented for all types. - let obj = mem::transmute::<&AllTypes, TraitObject>(ptr); - obj.vtable as *const TyDesc -} - -impl<'longer_than_self> Arena<'longer_than_self> { - // Grows a given chunk and returns `false`, or replaces it with a bigger - // chunk and returns `true`. - // This method is shared by both parts of the arena. - #[cold] - fn alloc_grow(&self, head: &mut Chunk, used_cap: usize, n_bytes: usize) -> bool { - if head.data.reserve_in_place(used_cap, n_bytes) { - // In-place reallocation succeeded. - false - } else { - // Allocate a new chunk. - let new_min_chunk_size = cmp::max(n_bytes, head.capacity()); - let new_chunk = Chunk::new((new_min_chunk_size + 1).next_power_of_two(), false); - let old_chunk = mem::replace(head, new_chunk); - if old_chunk.fill.get() != 0 { - self.chunks.borrow_mut().push(old_chunk); - } - true - } - } - - // Functions for the copyable part of the arena. - - #[inline] - fn alloc_copy_inner(&self, n_bytes: usize, align: usize) -> *const u8 { - let mut copy_head = self.copy_head.borrow_mut(); - let fill = copy_head.fill.get(); - let mut start = round_up(fill, align); - let mut end = start + n_bytes; - - if end > copy_head.capacity() { - if self.alloc_grow(&mut *copy_head, fill, end - fill) { - // Continuing with a newly allocated chunk - start = 0; - end = n_bytes; - copy_head.is_copy.set(true); - } - } - - copy_head.fill.set(end); - - unsafe { copy_head.as_ptr().offset(start as isize) } - } - - #[inline] - fn alloc_copy(&self, op: F) -> &mut T - where F: FnOnce() -> T - { - unsafe { - let ptr = self.alloc_copy_inner(mem::size_of::(), mem::align_of::()); - let ptr = ptr as *mut T; - ptr::write(&mut (*ptr), op()); - &mut *ptr - } - } - - // Functions for the non-copyable part of the arena. - - #[inline] - fn alloc_noncopy_inner(&self, n_bytes: usize, align: usize) -> (*const u8, *const u8) { - let mut head = self.head.borrow_mut(); - let fill = head.fill.get(); - - let mut tydesc_start = fill; - let after_tydesc = fill + mem::size_of::<*const TyDesc>(); - let mut start = round_up(after_tydesc, align); - let mut end = round_up(start + n_bytes, mem::align_of::<*const TyDesc>()); - - if end > head.capacity() { - if self.alloc_grow(&mut *head, tydesc_start, end - tydesc_start) { - // Continuing with a newly allocated chunk - tydesc_start = 0; - start = round_up(mem::size_of::<*const TyDesc>(), align); - end = round_up(start + n_bytes, mem::align_of::<*const TyDesc>()); - } - } - - head.fill.set(end); - - unsafe { - let buf = head.as_ptr(); - (buf.offset(tydesc_start as isize), - buf.offset(start as isize)) - } - } - - #[inline] - fn alloc_noncopy(&self, op: F) -> &mut T - where F: FnOnce() -> T - { - unsafe { - let tydesc = get_tydesc::(); - let (ty_ptr, ptr) = self.alloc_noncopy_inner(mem::size_of::(), mem::align_of::()); - let ty_ptr = ty_ptr as *mut usize; - let ptr = ptr as *mut T; - // Write in our tydesc along with a bit indicating that it - // has *not* been initialized yet. - *ty_ptr = bitpack_tydesc_ptr(tydesc, false); - // Actually initialize it - ptr::write(&mut (*ptr), op()); - // Now that we are done, update the tydesc to indicate that - // the object is there. - *ty_ptr = bitpack_tydesc_ptr(tydesc, true); - - &mut *ptr - } - } - - /// Allocates a new item in the arena, using `op` to initialize the value, - /// and returns a reference to it. - #[inline] - pub fn alloc(&self, op: F) -> &mut T - where F: FnOnce() -> T - { - unsafe { - if intrinsics::needs_drop::() { - self.alloc_noncopy(op) - } else { - self.alloc_copy(op) - } - } - } - - /// Allocates a slice of bytes of requested length. The bytes are not guaranteed to be zero - /// if the arena has previously been cleared. - /// - /// # Panics - /// - /// Panics if the requested length is too large and causes overflow. - pub fn alloc_bytes(&self, len: usize) -> &mut [u8] { - unsafe { - // Check for overflow. - self.copy_head.borrow().fill.get().checked_add(len).expect("length overflow"); - let ptr = self.alloc_copy_inner(len, 1); - intrinsics::assume(!ptr.is_null()); - slice::from_raw_parts_mut(ptr as *mut _, len) - } - } - - /// Clears the arena. Deallocates all but the longest chunk which may be reused. - pub fn clear(&mut self) { - unsafe { - self.head.borrow().destroy(); - self.head.borrow().fill.set(0); - self.copy_head.borrow().fill.set(0); - for chunk in self.chunks.borrow().iter() { - if !chunk.is_copy.get() { - chunk.destroy(); - } - } - self.chunks.borrow_mut().clear(); - } - } -} - -/// A faster arena that can hold objects of only one type. +/// An arena that can hold objects of only one type. pub struct TypedArena { /// A pointer to the next object to be allocated. ptr: Cell<*mut T>, @@ -377,7 +60,7 @@ pub struct TypedArena { /// reached, a new chunk is allocated. end: Cell<*mut T>, - /// A vector arena segments. + /// A vector of arena chunks. chunks: RefCell>>, /// Marker indicating that dropping the arena causes its owned @@ -386,7 +69,7 @@ pub struct TypedArena { } struct TypedArenaChunk { - /// Pointer to the next arena segment. + /// The raw storage for the arena chunk. storage: RawVec, } @@ -434,26 +117,16 @@ impl TypedArenaChunk { const PAGE: usize = 4096; impl TypedArena { - /// Creates a new `TypedArena` with preallocated space for many objects. + /// Creates a new `TypedArena`. #[inline] pub fn new() -> TypedArena { - // Reserve at least one page. - let elem_size = cmp::max(1, mem::size_of::()); - TypedArena::with_capacity(PAGE / elem_size) - } - - /// Creates a new `TypedArena` with preallocated space for the given number of - /// objects. - #[inline] - pub fn with_capacity(capacity: usize) -> TypedArena { - unsafe { - let chunk = TypedArenaChunk::::new(cmp::max(1, capacity)); - TypedArena { - ptr: Cell::new(chunk.start()), - end: Cell::new(chunk.end()), - chunks: RefCell::new(vec![chunk]), - _own: PhantomData, - } + TypedArena { + // We set both `ptr` and `end` to 0 so that the first call to + // alloc() will trigger a grow(). + ptr: Cell::new(0 as *mut T), + end: Cell::new(0 as *mut T), + chunks: RefCell::new(vec![]), + _own: PhantomData, } } @@ -461,7 +134,7 @@ impl TypedArena { #[inline] pub fn alloc(&self, object: T) -> &mut T { if self.ptr == self.end { - self.grow() + self.grow(1) } unsafe { @@ -482,35 +155,79 @@ impl TypedArena { } } + /// Allocates a slice of objects that are copy into the `TypedArena`, returning a mutable + /// reference to it. Will panic if passed a zero-sized types. + /// + /// Panics: + /// - Zero-sized types + /// - Zero-length slices + #[inline] + pub fn alloc_slice(&self, slice: &[T]) -> &mut [T] + where T: Copy { + assert!(mem::size_of::() != 0); + assert!(slice.len() != 0); + + let available_capacity_bytes = self.end.get() as usize - self.ptr.get() as usize; + let at_least_bytes = slice.len() * mem::size_of::(); + if available_capacity_bytes < at_least_bytes { + self.grow(slice.len()); + } + + unsafe { + let start_ptr = self.ptr.get(); + let arena_slice = slice::from_raw_parts_mut(start_ptr, slice.len()); + self.ptr.set(start_ptr.offset(arena_slice.len() as isize)); + arena_slice.copy_from_slice(slice); + arena_slice + } + } + /// Grows the arena. #[inline(never)] #[cold] - fn grow(&self) { + fn grow(&self, n: usize) { unsafe { let mut chunks = self.chunks.borrow_mut(); - let prev_capacity = chunks.last().unwrap().storage.cap(); - let new_capacity = prev_capacity.checked_mul(2).unwrap(); - if chunks.last_mut().unwrap().storage.double_in_place() { - self.end.set(chunks.last().unwrap().end()); + let (chunk, mut new_capacity); + if let Some(last_chunk) = chunks.last_mut() { + let used_bytes = self.ptr.get() as usize - last_chunk.start() as usize; + let currently_used_cap = used_bytes / mem::size_of::(); + if last_chunk.storage.reserve_in_place(currently_used_cap, n) { + self.end.set(last_chunk.end()); + return; + } else { + let prev_capacity = last_chunk.storage.cap(); + loop { + new_capacity = prev_capacity.checked_mul(2).unwrap(); + if new_capacity >= currently_used_cap + n { + break; + } + } + } } else { - let chunk = TypedArenaChunk::::new(new_capacity); - self.ptr.set(chunk.start()); - self.end.set(chunk.end()); - chunks.push(chunk); + let elem_size = cmp::max(1, mem::size_of::()); + new_capacity = cmp::max(n, PAGE / elem_size); } + chunk = TypedArenaChunk::::new(new_capacity); + self.ptr.set(chunk.start()); + self.end.set(chunk.end()); + chunks.push(chunk); } } + /// Clears the arena. Deallocates all but the longest chunk which may be reused. pub fn clear(&mut self) { unsafe { // Clear the last chunk, which is partially filled. let mut chunks_borrow = self.chunks.borrow_mut(); - let last_idx = chunks_borrow.len() - 1; - self.clear_last_chunk(&mut chunks_borrow[last_idx]); - // If `T` is ZST, code below has no effect. - for mut chunk in chunks_borrow.drain(..last_idx) { - let cap = chunk.storage.cap(); - chunk.destroy(cap); + if let Some(mut last_chunk) = chunks_borrow.pop() { + self.clear_last_chunk(&mut last_chunk); + // If `T` is ZST, code below has no effect. + for mut chunk in chunks_borrow.drain(..) { + let cap = chunk.storage.cap(); + chunk.destroy(cap); + } + chunks_borrow.push(last_chunk); } } } @@ -547,13 +264,14 @@ impl Drop for TypedArena { unsafe { // Determine how much was filled. let mut chunks_borrow = self.chunks.borrow_mut(); - let mut last_chunk = chunks_borrow.pop().unwrap(); - // Drop the contents of the last chunk. - self.clear_last_chunk(&mut last_chunk); - // The last chunk will be dropped. Destroy all other chunks. - for chunk in chunks_borrow.iter_mut() { - let cap = chunk.storage.cap(); - chunk.destroy(cap); + if let Some(mut last_chunk) = chunks_borrow.pop() { + // Drop the contents of the last chunk. + self.clear_last_chunk(&mut last_chunk); + // The last chunk will be dropped. Destroy all other chunks. + for chunk in chunks_borrow.iter_mut() { + let cap = chunk.storage.cap(); + chunk.destroy(cap); + } } // RawVec handles deallocation of `last_chunk` and `self.chunks`. } @@ -566,9 +284,8 @@ unsafe impl Send for TypedArena {} mod tests { extern crate test; use self::test::Bencher; - use super::{Arena, TypedArena}; + use super::TypedArena; use std::cell::Cell; - use std::rc::Rc; #[allow(dead_code)] #[derive(Debug, Eq, PartialEq)] @@ -578,6 +295,12 @@ mod tests { z: i32, } + #[test] + pub fn test_unused() { + let arena: TypedArena = TypedArena::new(); + assert!(arena.chunks.borrow().is_empty()); + } + #[test] fn test_arena_alloc_nested() { struct Inner { @@ -614,9 +337,8 @@ mod tests { let arena = Wrap(TypedArena::new()); - let result = arena.alloc_outer(|| { - Outer { inner: arena.alloc_inner(|| Inner { value: 10 }) } - }); + let result = + arena.alloc_outer(|| Outer { inner: arena.alloc_inner(|| Inner { value: 10 }) }); assert_eq!(result.inner.value, 10); } @@ -642,12 +364,6 @@ mod tests { }) } - #[bench] - pub fn bench_copy_old_arena(b: &mut Bencher) { - let arena = Arena::new(); - b.iter(|| arena.alloc(|| Point { x: 1, y: 2, z: 3 })) - } - #[allow(dead_code)] struct Noncopy { string: String, @@ -673,22 +389,6 @@ mod tests { } } - #[test] - pub fn test_arena_zero_sized() { - let arena = Arena::new(); - let mut points = vec![]; - for _ in 0..1000 { - for _ in 0..100 { - arena.alloc(|| ()); - } - let point = arena.alloc(|| Point { x: 1, y: 2, z: 3 }); - points.push(point); - } - for point in &points { - assert_eq!(**point, Point { x: 1, y: 2, z: 3 }); - } - } - #[test] pub fn test_typed_arena_clear() { let mut arena = TypedArena::new(); @@ -700,66 +400,6 @@ mod tests { } } - #[test] - pub fn test_arena_clear() { - let mut arena = Arena::new(); - for _ in 0..10 { - arena.clear(); - for _ in 0..10000 { - arena.alloc(|| Point { x: 1, y: 2, z: 3 }); - arena.alloc(|| { - Noncopy { - string: "hello world".to_string(), - array: vec![], - } - }); - } - } - } - - #[test] - pub fn test_arena_alloc_bytes() { - let arena = Arena::new(); - for i in 0..10000 { - arena.alloc(|| Point { x: 1, y: 2, z: 3 }); - for byte in arena.alloc_bytes(i % 42).iter_mut() { - *byte = i as u8; - } - } - } - - #[test] - fn test_arena_destructors() { - let arena = Arena::new(); - for i in 0..10 { - // Arena allocate something with drop glue to make sure it - // doesn't leak. - arena.alloc(|| Rc::new(i)); - // Allocate something with funny size and alignment, to keep - // things interesting. - arena.alloc(|| [0u8, 1u8, 2u8]); - } - } - - #[test] - #[should_panic] - fn test_arena_destructors_fail() { - let arena = Arena::new(); - // Put some stuff in the arena. - for i in 0..10 { - // Arena allocate something with drop glue to make sure it - // doesn't leak. - arena.alloc(|| Rc::new(i)); - // Allocate something with funny size and alignment, to keep - // things interesting. - arena.alloc(|| [0u8, 1, 2]); - } - // Now, panic while allocating - arena.alloc::, _>(|| { - panic!(); - }); - } - // Drop tests struct DropCounter<'a> { @@ -772,40 +412,6 @@ mod tests { } } - #[test] - fn test_arena_drop_count() { - let counter = Cell::new(0); - { - let arena = Arena::new(); - for _ in 0..100 { - // Allocate something with drop glue to make sure it doesn't leak. - arena.alloc(|| DropCounter { count: &counter }); - // Allocate something with funny size and alignment, to keep - // things interesting. - arena.alloc(|| [0u8, 1u8, 2u8]); - } - // dropping - }; - assert_eq!(counter.get(), 100); - } - - #[test] - fn test_arena_drop_on_clear() { - let counter = Cell::new(0); - for i in 0..10 { - let mut arena = Arena::new(); - for _ in 0..100 { - // Allocate something with drop glue to make sure it doesn't leak. - arena.alloc(|| DropCounter { count: &counter }); - // Allocate something with funny size and alignment, to keep - // things interesting. - arena.alloc(|| [0u8, 1u8, 2u8]); - } - arena.clear(); - assert_eq!(counter.get(), i * 100 + 100); - } - } - #[test] fn test_typed_arena_drop_count() { let counter = Cell::new(0); @@ -845,25 +451,6 @@ mod tests { } } - #[test] - fn test_arena_drop_small_count() { - DROP_COUNTER.with(|c| c.set(0)); - { - let arena = Arena::new(); - for _ in 0..10 { - for _ in 0..10 { - // Allocate something with drop glue to make sure it doesn't leak. - arena.alloc(|| SmallDroppable); - } - // Allocate something with funny size and alignment, to keep - // things interesting. - arena.alloc(|| [0u8, 1u8, 2u8]); - } - // dropping - }; - assert_eq!(DROP_COUNTER.with(|c| c.get()), 100); - } - #[test] fn test_typed_arena_drop_small_count() { DROP_COUNTER.with(|c| c.set(0)); @@ -898,17 +485,4 @@ mod tests { }); }) } - - #[bench] - pub fn bench_noncopy_old_arena(b: &mut Bencher) { - let arena = Arena::new(); - b.iter(|| { - arena.alloc(|| { - Noncopy { - string: "hello world".to_string(), - array: vec![1, 2, 3, 4, 5], - } - }) - }) - } } diff --git a/src/libbacktrace/ChangeLog b/src/libbacktrace/ChangeLog index 2afa4705539aa..acc07047f6729 100644 --- a/src/libbacktrace/ChangeLog +++ b/src/libbacktrace/ChangeLog @@ -1,15 +1,33 @@ +2016-05-18 Uros Bizjak + + PR target/71161 + * elf.c (phdr_callback) [__i386__]: Add + __attribute__((__force_align_arg_pointer__)). + +2016-03-02 Maxim Ostapenko + + * elf.c (backtrace_initialize): Properly initialize elf_fileline_fn to + avoid possible crash. + (elf_add): Don't set *fileline_fn to elf_nodebug value in case of + missing debug info anymore. + +2016-02-06 John David Anglin + + * mmap.c (MAP_FAILED): Define if not defined. + 2016-01-04 Jakub Jelinek Update copyright years. 2015-12-18 Andris Pavenis - * configure.ac: Specify that DJGPP do not have mmap even when sys/mman.h exists + * configure.ac: Specify that DJGPP do not have mmap + even when sys/mman.h exists. * configure: Regenerate 2015-12-09 John David Anglin - PR 68115/libfortran + PR libgfortran/68115 * configure.ac: Set libbacktrace_cv_sys_sync to no on hppa*-*-hpux*. * configure: Regenerate. * elf.c (backtrace_initialize): Cast __sync_bool_compare_and_swap call diff --git a/src/libbacktrace/ansidecl.h b/src/libbacktrace/ansidecl.h index 6e4bfc21f25fb..4087dd7291750 100644 --- a/src/libbacktrace/ansidecl.h +++ b/src/libbacktrace/ansidecl.h @@ -1,4 +1,4 @@ -/* ANSI and traditional C compatability macros +/* ANSI and traditional C compatibility macros Copyright (C) 1991-2015 Free Software Foundation, Inc. This file is part of the GNU C Library. diff --git a/src/libbacktrace/elf.c b/src/libbacktrace/elf.c index 05cc5c04734b7..81ba3440ab7d1 100644 --- a/src/libbacktrace/elf.c +++ b/src/libbacktrace/elf.c @@ -791,7 +791,6 @@ elf_add (struct backtrace_state *state, int descriptor, uintptr_t base_address, { if (!backtrace_close (descriptor, error_callback, data)) goto fail; - *fileline_fn = elf_nodebug; return 1; } @@ -867,6 +866,9 @@ struct phdr_data libraries. */ static int +#ifdef __i386__ +__attribute__ ((__force_align_arg_pointer__)) +#endif phdr_callback (struct dl_phdr_info *info, size_t size ATTRIBUTE_UNUSED, void *pdata) { @@ -925,7 +927,7 @@ backtrace_initialize (struct backtrace_state *state, int descriptor, int ret; int found_sym; int found_dwarf; - fileline elf_fileline_fn; + fileline elf_fileline_fn = elf_nodebug; struct phdr_data pd; ret = elf_add (state, descriptor, 0, error_callback, data, &elf_fileline_fn, diff --git a/src/libbacktrace/mmap.c b/src/libbacktrace/mmap.c index 0ed4802d02de4..138ef70711a02 100644 --- a/src/libbacktrace/mmap.c +++ b/src/libbacktrace/mmap.c @@ -50,6 +50,10 @@ POSSIBILITY OF SUCH DAMAGE. */ #define MAP_ANONYMOUS MAP_ANON #endif +#ifndef MAP_FAILED +#define MAP_FAILED ((void *)-1) +#endif + /* A list of free memory blocks. */ struct backtrace_freelist_struct diff --git a/src/libbacktrace/pecoff.c b/src/libbacktrace/pecoff.c index 31126cf4741c7..04e0bafb14981 100644 --- a/src/libbacktrace/pecoff.c +++ b/src/libbacktrace/pecoff.c @@ -602,6 +602,9 @@ coff_add (struct backtrace_state *state, int descriptor, const b_coff_section_header *sects; struct backtrace_view str_view; int str_view_valid; + // NOTE: upstream this is a `size_t` but this was fixed in Rust commit + // 55e2b7e1b, see #33729 for more info. If you see this in a diff + // against the upstream libbacktrace, that's what's going on. uint32_t str_size; off_t str_off; struct backtrace_view syms_view; diff --git a/src/libcollections/Cargo.toml b/src/libcollections/Cargo.toml new file mode 100644 index 0000000000000..3056977d224cb --- /dev/null +++ b/src/libcollections/Cargo.toml @@ -0,0 +1,21 @@ +[package] +authors = ["The Rust Project Developers"] +name = "collections" +version = "0.0.0" + +[lib] +name = "collections" +path = "lib.rs" + +[dependencies] +alloc = { path = "../liballoc" } +core = { path = "../libcore" } +rustc_unicode = { path = "../librustc_unicode" } + +[[test]] +name = "collectionstest" +path = "../libcollectionstest/lib.rs" + +[[bench]] +name = "collectionstest" +path = "../libcollectionstest/lib.rs" diff --git a/src/libcollections/binary_heap.rs b/src/libcollections/binary_heap.rs index bd329949618e5..b4be8a43213d8 100644 --- a/src/libcollections/binary_heap.rs +++ b/src/libcollections/binary_heap.rs @@ -151,14 +151,18 @@ #![allow(missing_docs)] #![stable(feature = "rust1", since = "1.0.0")] -use core::iter::FromIterator; +use core::ops::{Deref, DerefMut}; +use core::iter::{FromIterator, FusedIterator}; use core::mem::swap; +use core::mem::size_of; use core::ptr; use core::fmt; use slice; use vec::{self, Vec}; +use super::SpecExtend; + /// A priority queue implemented with a binary heap. /// /// This will be a max-heap. @@ -167,11 +171,85 @@ use vec::{self, Vec}; /// item's ordering relative to any other item, as determined by the `Ord` /// trait, changes while it is in the heap. This is normally only possible /// through `Cell`, `RefCell`, global state, I/O, or unsafe code. +/// +/// # Examples +/// +/// ``` +/// use std::collections::BinaryHeap; +/// +/// // Type inference lets us omit an explicit type signature (which +/// // would be `BinaryHeap` in this example). +/// let mut heap = BinaryHeap::new(); +/// +/// // We can use peek to look at the next item in the heap. In this case, +/// // there's no items in there yet so we get None. +/// assert_eq!(heap.peek(), None); +/// +/// // Let's add some scores... +/// heap.push(1); +/// heap.push(5); +/// heap.push(2); +/// +/// // Now peek shows the most important item in the heap. +/// assert_eq!(heap.peek(), Some(&5)); +/// +/// // We can check the length of a heap. +/// assert_eq!(heap.len(), 3); +/// +/// // We can iterate over the items in the heap, although they are returned in +/// // a random order. +/// for x in &heap { +/// println!("{}", x); +/// } +/// +/// // If we instead pop these scores, they should come back in order. +/// assert_eq!(heap.pop(), Some(5)); +/// assert_eq!(heap.pop(), Some(2)); +/// assert_eq!(heap.pop(), Some(1)); +/// assert_eq!(heap.pop(), None); +/// +/// // We can clear the heap of any remaining items. +/// heap.clear(); +/// +/// // The heap should now be empty. +/// assert!(heap.is_empty()) +/// ``` #[stable(feature = "rust1", since = "1.0.0")] pub struct BinaryHeap { data: Vec, } +/// A container object that represents the result of the [`peek_mut()`] method +/// on `BinaryHeap`. See its documentation for details. +/// +/// [`peek_mut()`]: struct.BinaryHeap.html#method.peek_mut +#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")] +pub struct PeekMut<'a, T: 'a + Ord> { + heap: &'a mut BinaryHeap +} + +#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")] +impl<'a, T: Ord> Drop for PeekMut<'a, T> { + fn drop(&mut self) { + self.heap.sift_down(0); + } +} + +#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")] +impl<'a, T: Ord> Deref for PeekMut<'a, T> { + type Target = T; + fn deref(&self) -> &T { + &self.heap.data[0] + } +} + +#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")] +impl<'a, T: Ord> DerefMut for PeekMut<'a, T> { + fn deref_mut(&mut self) -> &mut T { + &mut self.heap.data[0] + } +} + #[stable(feature = "rust1", since = "1.0.0")] impl Clone for BinaryHeap { fn clone(&self) -> Self { @@ -185,6 +263,7 @@ impl Clone for BinaryHeap { #[stable(feature = "rust1", since = "1.0.0")] impl Default for BinaryHeap { + /// Creates an empty `BinaryHeap`. #[inline] fn default() -> BinaryHeap { BinaryHeap::new() @@ -203,6 +282,8 @@ impl BinaryHeap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BinaryHeap; /// let mut heap = BinaryHeap::new(); @@ -220,6 +301,8 @@ impl BinaryHeap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BinaryHeap; /// let mut heap = BinaryHeap::with_capacity(10); @@ -235,6 +318,8 @@ impl BinaryHeap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BinaryHeap; /// let heap = BinaryHeap::from(vec![1, 2, 3, 4]); @@ -253,6 +338,8 @@ impl BinaryHeap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BinaryHeap; /// let mut heap = BinaryHeap::new(); @@ -269,10 +356,47 @@ impl BinaryHeap { self.data.get(0) } + /// Returns a mutable reference to the greatest item in the binary heap, or + /// `None` if it is empty. + /// + /// Note: If the `PeekMut` value is leaked, the heap may be in an + /// inconsistent state. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BinaryHeap; + /// let mut heap = BinaryHeap::new(); + /// assert!(heap.peek_mut().is_none()); + /// + /// heap.push(1); + /// heap.push(5); + /// heap.push(2); + /// { + /// let mut val = heap.peek_mut().unwrap(); + /// *val = 0; + /// } + /// assert_eq!(heap.peek(), Some(&2)); + /// ``` + #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")] + pub fn peek_mut(&mut self) -> Option> { + if self.is_empty() { + None + } else { + Some(PeekMut { + heap: self + }) + } + } + /// Returns the number of elements the binary heap can hold without reallocating. /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BinaryHeap; /// let mut heap = BinaryHeap::with_capacity(100); @@ -297,6 +421,8 @@ impl BinaryHeap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BinaryHeap; /// let mut heap = BinaryHeap::new(); @@ -318,6 +444,8 @@ impl BinaryHeap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BinaryHeap; /// let mut heap = BinaryHeap::new(); @@ -331,6 +459,19 @@ impl BinaryHeap { } /// Discards as much additional capacity as possible. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BinaryHeap; + /// let mut heap: BinaryHeap = BinaryHeap::with_capacity(100); + /// + /// assert!(heap.capacity() >= 100); + /// heap.shrink_to_fit(); + /// assert!(heap.capacity() == 0); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn shrink_to_fit(&mut self) { self.data.shrink_to_fit(); @@ -341,6 +482,8 @@ impl BinaryHeap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BinaryHeap; /// let mut heap = BinaryHeap::from(vec![1, 3]); @@ -364,6 +507,8 @@ impl BinaryHeap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BinaryHeap; /// let mut heap = BinaryHeap::new(); @@ -386,8 +531,11 @@ impl BinaryHeap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// #![feature(binary_heap_extras)] + /// #![allow(deprecated)] /// /// use std::collections::BinaryHeap; /// let mut heap = BinaryHeap::new(); @@ -402,6 +550,7 @@ impl BinaryHeap { #[unstable(feature = "binary_heap_extras", reason = "needs to be audited", issue = "28147")] + #[rustc_deprecated(since = "1.13.0", reason = "use `peek_mut` instead")] pub fn push_pop(&mut self, mut item: T) -> T { match self.data.get_mut(0) { None => return item, @@ -424,8 +573,11 @@ impl BinaryHeap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// #![feature(binary_heap_extras)] + /// #![allow(deprecated)] /// /// use std::collections::BinaryHeap; /// let mut heap = BinaryHeap::new(); @@ -438,6 +590,7 @@ impl BinaryHeap { #[unstable(feature = "binary_heap_extras", reason = "needs to be audited", issue = "28147")] + #[rustc_deprecated(since = "1.13.0", reason = "use `peek_mut` instead")] pub fn replace(&mut self, mut item: T) -> Option { if !self.is_empty() { swap(&mut item, &mut self.data[0]); @@ -454,6 +607,8 @@ impl BinaryHeap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BinaryHeap; /// let heap = BinaryHeap::from(vec![1, 2, 3, 4, 5, 6, 7]); @@ -474,6 +629,8 @@ impl BinaryHeap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BinaryHeap; /// @@ -571,12 +728,40 @@ impl BinaryHeap { } /// Returns the length of the binary heap. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BinaryHeap; + /// let heap = BinaryHeap::from(vec![1, 3]); + /// + /// assert_eq!(heap.len(), 2); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn len(&self) -> usize { self.data.len() } /// Checks if the binary heap is empty. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BinaryHeap; + /// let mut heap = BinaryHeap::new(); + /// + /// assert!(heap.is_empty()); + /// + /// heap.push(3); + /// heap.push(5); + /// heap.push(1); + /// + /// assert!(!heap.is_empty()); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn is_empty(&self) -> bool { self.len() == 0 @@ -585,6 +770,23 @@ impl BinaryHeap { /// Clears the binary heap, returning an iterator over the removed elements. /// /// The elements are removed in arbitrary order. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BinaryHeap; + /// let mut heap = BinaryHeap::from(vec![1, 3]); + /// + /// assert!(!heap.is_empty()); + /// + /// for x in heap.drain() { + /// println!("{}", x); + /// } + /// + /// assert!(heap.is_empty()); + /// ``` #[inline] #[stable(feature = "drain", since = "1.6.0")] pub fn drain(&mut self) -> Drain { @@ -592,10 +794,86 @@ impl BinaryHeap { } /// Drops all items from the binary heap. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BinaryHeap; + /// let mut heap = BinaryHeap::from(vec![1, 3]); + /// + /// assert!(!heap.is_empty()); + /// + /// heap.clear(); + /// + /// assert!(heap.is_empty()); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn clear(&mut self) { self.drain(); } + + fn rebuild(&mut self) { + let mut n = self.len() / 2; + while n > 0 { + n -= 1; + self.sift_down(n); + } + } + + /// Moves all the elements of `other` into `self`, leaving `other` empty. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BinaryHeap; + /// + /// let v = vec![-10, 1, 2, 3, 3]; + /// let mut a = BinaryHeap::from(v); + /// + /// let v = vec![-20, 5, 43]; + /// let mut b = BinaryHeap::from(v); + /// + /// a.append(&mut b); + /// + /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]); + /// assert!(b.is_empty()); + /// ``` + #[stable(feature = "binary_heap_append", since = "1.11.0")] + pub fn append(&mut self, other: &mut Self) { + if self.len() < other.len() { + swap(self, other); + } + + if other.is_empty() { + return; + } + + #[inline(always)] + fn log2_fast(x: usize) -> usize { + 8 * size_of::() - (x.leading_zeros() as usize) - 1 + } + + // `rebuild` takes O(len1 + len2) operations + // and about 2 * (len1 + len2) comparisons in the worst case + // while `extend` takes O(len2 * log_2(len1)) operations + // and about 1 * len2 * log_2(len1) comparisons in the worst case, + // assuming len1 >= len2. + #[inline] + fn better_to_rebuild(len1: usize, len2: usize) -> bool { + 2 * (len1 + len2) < len2 * log2_fast(len1) + } + + if better_to_rebuild(self.len(), other.len()) { + self.data.append(&mut other.data); + self.rebuild(); + } else { + self.extend(other.drain()); + } + } } /// Hole represents a hole in a slice i.e. an index without valid value @@ -611,58 +889,61 @@ struct Hole<'a, T: 'a> { impl<'a, T> Hole<'a, T> { /// Create a new Hole at index `pos`. - fn new(data: &'a mut [T], pos: usize) -> Self { - unsafe { - let elt = ptr::read(&data[pos]); - Hole { - data: data, - elt: Some(elt), - pos: pos, - } + /// + /// Unsafe because pos must be within the data slice. + #[inline] + unsafe fn new(data: &'a mut [T], pos: usize) -> Self { + debug_assert!(pos < data.len()); + let elt = ptr::read(&data[pos]); + Hole { + data: data, + elt: Some(elt), + pos: pos, } } - #[inline(always)] + #[inline] fn pos(&self) -> usize { self.pos } /// Return a reference to the element removed - #[inline(always)] + #[inline] fn element(&self) -> &T { self.elt.as_ref().unwrap() } /// Return a reference to the element at `index`. /// - /// Panics if the index is out of bounds. - /// - /// Unsafe because index must not equal pos. - #[inline(always)] + /// Unsafe because index must be within the data slice and not equal to pos. + #[inline] unsafe fn get(&self, index: usize) -> &T { debug_assert!(index != self.pos); - &self.data[index] + debug_assert!(index < self.data.len()); + self.data.get_unchecked(index) } /// Move hole to new location /// - /// Unsafe because index must not equal pos. - #[inline(always)] + /// Unsafe because index must be within the data slice and not equal to pos. + #[inline] unsafe fn move_to(&mut self, index: usize) { debug_assert!(index != self.pos); - let index_ptr: *const _ = &self.data[index]; - let hole_ptr = &mut self.data[self.pos]; + debug_assert!(index < self.data.len()); + let index_ptr: *const _ = self.data.get_unchecked(index); + let hole_ptr = self.data.get_unchecked_mut(self.pos); ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1); self.pos = index; } } impl<'a, T> Drop for Hole<'a, T> { + #[inline] fn drop(&mut self) { // fill the hole again unsafe { let pos = self.pos; - ptr::write(&mut self.data[pos], self.elt.take().unwrap()); + ptr::write(self.data.get_unchecked_mut(pos), self.elt.take().unwrap()); } } } @@ -707,8 +988,12 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Iter<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Iter<'a, T> {} + /// An iterator that moves out of a `BinaryHeap`. #[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] pub struct IntoIter { iter: vec::IntoIter, } @@ -739,13 +1024,16 @@ impl DoubleEndedIterator for IntoIter { #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for IntoIter {} +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for IntoIter {} + /// An iterator that drains a `BinaryHeap`. #[stable(feature = "drain", since = "1.6.0")] pub struct Drain<'a, T: 'a> { iter: vec::Drain<'a, T>, } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "drain", since = "1.6.0")] impl<'a, T: 'a> Iterator for Drain<'a, T> { type Item = T; @@ -760,7 +1048,7 @@ impl<'a, T: 'a> Iterator for Drain<'a, T> { } } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "drain", since = "1.6.0")] impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { #[inline] fn next_back(&mut self) -> Option { @@ -768,18 +1056,17 @@ impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { } } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "drain", since = "1.6.0")] impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T: 'a> FusedIterator for Drain<'a, T> {} + #[stable(feature = "rust1", since = "1.0.0")] impl From> for BinaryHeap { fn from(vec: Vec) -> BinaryHeap { let mut heap = BinaryHeap { data: vec }; - let mut n = heap.len() / 2; - while n > 0 { - n -= 1; - heap.sift_down(n); - } + heap.rebuild(); heap } } @@ -809,6 +1096,8 @@ impl IntoIterator for BinaryHeap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BinaryHeap; /// let heap = BinaryHeap::from(vec![1, 2, 3, 4]); @@ -836,13 +1125,32 @@ impl<'a, T> IntoIterator for &'a BinaryHeap where T: Ord { #[stable(feature = "rust1", since = "1.0.0")] impl Extend for BinaryHeap { - fn extend>(&mut self, iterable: I) { - let iter = iterable.into_iter(); - let (lower, _) = iter.size_hint(); + #[inline] + fn extend>(&mut self, iter: I) { + >::spec_extend(self, iter); + } +} + +impl> SpecExtend for BinaryHeap { + default fn spec_extend(&mut self, iter: I) { + self.extend_desugared(iter.into_iter()); + } +} + +impl SpecExtend> for BinaryHeap { + fn spec_extend(&mut self, ref mut other: BinaryHeap) { + self.append(other); + } +} + +impl BinaryHeap { + fn extend_desugared>(&mut self, iter: I) { + let iterator = iter.into_iter(); + let (lower, _) = iterator.size_hint(); self.reserve(lower); - for elem in iter { + for elem in iterator { self.push(elem); } } diff --git a/src/libcollections/borrow.rs b/src/libcollections/borrow.rs index f174cc09bcd35..37618b7600a04 100644 --- a/src/libcollections/borrow.rs +++ b/src/libcollections/borrow.rs @@ -12,15 +12,12 @@ #![stable(feature = "rust1", since = "1.0.0")] -use core::clone::Clone; -use core::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd}; -use core::convert::AsRef; +use core::cmp::Ordering; use core::hash::{Hash, Hasher}; -use core::marker::Sized; -use core::ops::Deref; -use core::option::Option; +use core::ops::{Add, AddAssign, Deref}; use fmt; +use string::String; use self::Cow::*; @@ -49,6 +46,18 @@ pub trait ToOwned { type Owned: Borrow; /// Creates owned data from borrowed data, usually by cloning. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s = "a"; // &str + /// let ss = s.to_owned(); // String + /// + /// let v = &[1, 2]; // slice + /// let vv = v.to_owned(); // Vec + /// ``` #[stable(feature = "rust1", since = "1.0.0")] fn to_owned(&self) -> Self::Owned; } @@ -78,16 +87,29 @@ impl ToOwned for T where T: Clone { /// ``` /// use std::borrow::Cow; /// -/// # #[allow(dead_code)] /// fn abs_all(input: &mut Cow<[i32]>) { /// for i in 0..input.len() { /// let v = input[i]; /// if v < 0 { -/// // clones into a vector the first time (if not already owned) +/// // Clones into a vector if not already owned. /// input.to_mut()[i] = -v; /// } /// } /// } +/// +/// // No clone occurs because `input` doesn't need to be mutated. +/// let slice = [0, 1, 2]; +/// let mut input = Cow::from(&slice[..]); +/// abs_all(&mut input); +/// +/// // Clone occurs because `input` needs to be mutated. +/// let slice = [-1, 0, 1]; +/// let mut input = Cow::from(&slice[..]); +/// abs_all(&mut input); +/// +/// // No clone occurs because `input` is already owned. +/// let mut input = Cow::from(vec![-1, 0, 1]); +/// abs_all(&mut input); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub enum Cow<'a, B: ?Sized + 'a> @@ -95,12 +117,12 @@ pub enum Cow<'a, B: ?Sized + 'a> { /// Borrowed data. #[stable(feature = "rust1", since = "1.0.0")] - Borrowed(#[cfg_attr(not(stage0), stable(feature = "rust1", since = "1.0.0"))] &'a B), + Borrowed(#[stable(feature = "rust1", since = "1.0.0")] &'a B), /// Owned data. #[stable(feature = "rust1", since = "1.0.0")] Owned( - #[cfg_attr(not(stage0), stable(feature = "rust1", since = "1.0.0"))] ::Owned + #[stable(feature = "rust1", since = "1.0.0")] ::Owned ), } @@ -138,7 +160,10 @@ impl<'a, B: ?Sized> Cow<'a, B> where B: ToOwned { match *self { Borrowed(borrowed) => { *self = Owned(borrowed.to_owned()); - self.to_mut() + match *self { + Borrowed(..) => unreachable!(), + Owned(ref mut owned) => owned, + } } Owned(ref mut owned) => owned, } @@ -236,6 +261,17 @@ impl<'a, B: ?Sized> fmt::Display for Cow<'a, B> } } +#[stable(feature = "default", since = "1.11.0")] +impl<'a, B: ?Sized> Default for Cow<'a, B> + where B: ToOwned, + ::Owned: Default +{ + /// Creates an owned Cow<'a, B> with the default value for the contained owned value. + fn default() -> Cow<'a, B> { + Owned(::Owned::default()) + } +} + #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> Hash for Cow<'a, B> where B: Hash + ToOwned { #[inline] @@ -244,28 +280,68 @@ impl<'a, B: ?Sized> Hash for Cow<'a, B> where B: Hash + ToOwned { } } -/// Trait for moving into a `Cow`. -#[unstable(feature = "into_cow", reason = "may be replaced by `convert::Into`", - issue = "27735")] -#[rustc_deprecated(since = "1.7.0", - reason = "conflicts with Into, may return with specialization")] -pub trait IntoCow<'a, B: ?Sized> where B: ToOwned { - /// Moves `self` into `Cow` - fn into_cow(self) -> Cow<'a, B>; -} - #[stable(feature = "rust1", since = "1.0.0")] #[allow(deprecated)] -impl<'a, B: ?Sized> IntoCow<'a, B> for Cow<'a, B> where B: ToOwned { - fn into_cow(self) -> Cow<'a, B> { +impl<'a, T: ?Sized + ToOwned> AsRef for Cow<'a, T> { + fn as_ref(&self) -> &T { self } } -#[stable(feature = "rust1", since = "1.0.0")] -#[allow(deprecated)] -impl<'a, T: ?Sized + ToOwned> AsRef for Cow<'a, T> { - fn as_ref(&self) -> &T { +#[stable(feature = "cow_add", since = "1.14.0")] +impl<'a> Add<&'a str> for Cow<'a, str> { + type Output = Cow<'a, str>; + + #[inline] + fn add(mut self, rhs: &'a str) -> Self::Output { + self += rhs; + self + } +} + +#[stable(feature = "cow_add", since = "1.14.0")] +impl<'a> Add> for Cow<'a, str> { + type Output = Cow<'a, str>; + + #[inline] + fn add(mut self, rhs: Cow<'a, str>) -> Self::Output { + self += rhs; self } } + +#[stable(feature = "cow_add", since = "1.14.0")] +impl<'a> AddAssign<&'a str> for Cow<'a, str> { + fn add_assign(&mut self, rhs: &'a str) { + if self.is_empty() { + *self = Cow::Borrowed(rhs) + } else if rhs.is_empty() { + return; + } else { + if let Cow::Borrowed(lhs) = *self { + let mut s = String::with_capacity(lhs.len() + rhs.len()); + s.push_str(lhs); + *self = Cow::Owned(s); + } + self.to_mut().push_str(rhs); + } + } +} + +#[stable(feature = "cow_add", since = "1.14.0")] +impl<'a> AddAssign> for Cow<'a, str> { + fn add_assign(&mut self, rhs: Cow<'a, str>) { + if self.is_empty() { + *self = rhs + } else if rhs.is_empty() { + return; + } else { + if let Cow::Borrowed(lhs) = *self { + let mut s = String::with_capacity(lhs.len() + rhs.len()); + s.push_str(lhs); + *self = Cow::Owned(s); + } + self.to_mut().push_str(&rhs); + } + } +} diff --git a/src/libcollections/btree/map.rs b/src/libcollections/btree/map.rs index 492263da2bc5b..788236c24d063 100644 --- a/src/libcollections/btree/map.rs +++ b/src/libcollections/btree/map.rs @@ -11,15 +11,15 @@ use core::cmp::Ordering; use core::fmt::Debug; use core::hash::{Hash, Hasher}; -use core::iter::FromIterator; +use core::iter::{FromIterator, Peekable, FusedIterator}; use core::marker::PhantomData; use core::ops::Index; use core::{fmt, intrinsics, mem, ptr}; use borrow::Borrow; -use Bound::{self, Included, Excluded, Unbounded}; +use Bound::{self, Excluded, Included, Unbounded}; -use super::node::{self, NodeRef, Handle, marker}; +use super::node::{self, Handle, NodeRef, marker}; use super::search; use super::node::InsertResult::*; @@ -56,40 +56,117 @@ use self::Entry::*; /// however, performance is excellent. /// /// It is a logic error for a key to be modified in such a way that the key's ordering relative to -/// any other key, as determined by the `Ord` trait, changes while it is in the map. This is -/// normally only possible through `Cell`, `RefCell`, global state, I/O, or unsafe code. +/// any other key, as determined by the [`Ord`] trait, changes while it is in the map. This is +/// normally only possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. +/// +/// [`Ord`]: ../../std/cmp/trait.Ord.html +/// [`Cell`]: ../../std/cell/struct.Cell.html +/// [`RefCell`]: ../../std/cell/struct.RefCell.html +/// +/// # Examples +/// +/// ``` +/// use std::collections::BTreeMap; +/// +/// // type inference lets us omit an explicit type signature (which +/// // would be `BTreeMap<&str, &str>` in this example). +/// let mut movie_reviews = BTreeMap::new(); +/// +/// // review some movies. +/// movie_reviews.insert("Office Space", "Deals with real issues in the workplace."); +/// movie_reviews.insert("Pulp Fiction", "Masterpiece."); +/// movie_reviews.insert("The Godfather", "Very enjoyable."); +/// movie_reviews.insert("The Blues Brothers", "Eye lyked it alot."); +/// +/// // check for a specific one. +/// if !movie_reviews.contains_key("Les Misérables") { +/// println!("We've got {} reviews, but Les Misérables ain't one.", +/// movie_reviews.len()); +/// } +/// +/// // oops, this review has a lot of spelling mistakes, let's delete it. +/// movie_reviews.remove("The Blues Brothers"); +/// +/// // look up the values associated with some keys. +/// let to_find = ["Up!", "Office Space"]; +/// for book in &to_find { +/// match movie_reviews.get(book) { +/// Some(review) => println!("{}: {}", book, review), +/// None => println!("{} is unreviewed.", book) +/// } +/// } +/// +/// // iterate over everything. +/// for (movie, review) in &movie_reviews { +/// println!("{}: \"{}\"", movie, review); +/// } +/// ``` +/// +/// `BTreeMap` also implements an [`Entry API`](#method.entry), which allows +/// for more complex methods of getting, setting, updating and removing keys and +/// their values: +/// +/// ``` +/// use std::collections::BTreeMap; +/// +/// // type inference lets us omit an explicit type signature (which +/// // would be `BTreeMap<&str, u8>` in this example). +/// let mut player_stats = BTreeMap::new(); +/// +/// fn random_stat_buff() -> u8 { +/// // could actually return some random value here - let's just return +/// // some fixed value for now +/// 42 +/// } +/// +/// // insert a key only if it doesn't already exist +/// player_stats.entry("health").or_insert(100); +/// +/// // insert a key using a function that provides a new value only if it +/// // doesn't already exist +/// player_stats.entry("defence").or_insert_with(random_stat_buff); +/// +/// // update a key, guarding against the key possibly not being set +/// let stat = player_stats.entry("attack").or_insert(100); +/// *stat += random_stat_buff(); +/// ``` #[stable(feature = "rust1", since = "1.0.0")] pub struct BTreeMap { root: node::Root, - length: usize + length: usize, } +#[stable(feature = "btree_drop", since = "1.7.0")] impl Drop for BTreeMap { #[unsafe_destructor_blind_to_params] fn drop(&mut self) { unsafe { - for _ in ptr::read(self).into_iter() { } + for _ in ptr::read(self).into_iter() { + } } } } +#[stable(feature = "rust1", since = "1.0.0")] impl Clone for BTreeMap { fn clone(&self) -> BTreeMap { - fn clone_subtree( - node: node::NodeRef) - -> BTreeMap { + fn clone_subtree(node: node::NodeRef) + -> BTreeMap { match node.force() { Leaf(leaf) => { let mut out_tree = BTreeMap { root: node::Root::new_leaf(), - length: 0 + length: 0, }; { let mut out_node = match out_tree.root.as_mut().force() { Leaf(leaf) => leaf, - Internal(_) => unreachable!() + Internal(_) => unreachable!(), }; let mut in_edge = leaf.first_edge(); @@ -103,7 +180,7 @@ impl Clone for BTreeMap { } out_tree - }, + } Internal(internal) => { let mut out_tree = clone_subtree(internal.first_edge().descend()); @@ -150,7 +227,7 @@ impl super::Recover for BTreeMap fn get(&self, key: &Q) -> Option<&K> { match search::search_tree(self.root.as_ref(), key) { Found(handle) => Some(handle.into_kv().0), - GoDown(_) => None + GoDown(_) => None, } } @@ -158,12 +235,14 @@ impl super::Recover for BTreeMap match search::search_tree(self.root.as_mut(), key) { Found(handle) => { Some(OccupiedEntry { - handle: handle, - length: &mut self.length, - _marker: PhantomData, - }.remove_kv().0) - }, - GoDown(_) => None + handle: handle, + length: &mut self.length, + _marker: PhantomData, + } + .remove_kv() + .0) + } + GoDown(_) => None, } } @@ -176,7 +255,8 @@ impl super::Recover for BTreeMap handle: handle, length: &mut self.length, _marker: PhantomData, - }.insert(()); + } + .insert(()); None } } @@ -187,14 +267,14 @@ impl super::Recover for BTreeMap #[stable(feature = "rust1", since = "1.0.0")] pub struct Iter<'a, K: 'a, V: 'a> { range: Range<'a, K, V>, - length: usize + length: usize, } /// A mutable iterator over a BTreeMap's entries. #[stable(feature = "rust1", since = "1.0.0")] pub struct IterMut<'a, K: 'a, V: 'a> { range: RangeMut<'a, K, V>, - length: usize + length: usize, } /// An owning iterator over a BTreeMap's entries. @@ -202,7 +282,7 @@ pub struct IterMut<'a, K: 'a, V: 'a> { pub struct IntoIter { front: Handle, marker::Edge>, back: Handle, marker::Edge>, - length: usize + length: usize, } /// An iterator over a BTreeMap's keys. @@ -217,10 +297,16 @@ pub struct Values<'a, K: 'a, V: 'a> { inner: Iter<'a, K, V>, } +/// A mutable iterator over a BTreeMap's values. +#[stable(feature = "map_values_mut", since = "1.10.0")] +pub struct ValuesMut<'a, K: 'a, V: 'a> { + inner: IterMut<'a, K, V>, +} + /// An iterator over a sub-range of BTreeMap's entries. pub struct Range<'a, K: 'a, V: 'a> { front: Handle, K, V, marker::Leaf>, marker::Edge>, - back: Handle, K, V, marker::Leaf>, marker::Edge> + back: Handle, K, V, marker::Leaf>, marker::Edge>, } /// A mutable iterator over a sub-range of BTreeMap's entries. @@ -233,22 +319,40 @@ pub struct RangeMut<'a, K: 'a, V: 'a> { } /// A view into a single entry in a map, which may either be vacant or occupied. +/// This enum is constructed from the [`entry`] method on [`BTreeMap`]. +/// +/// [`BTreeMap`]: struct.BTreeMap.html +/// [`entry`]: struct.BTreeMap.html#method.entry #[stable(feature = "rust1", since = "1.0.0")] pub enum Entry<'a, K: 'a, V: 'a> { /// A vacant Entry #[stable(feature = "rust1", since = "1.0.0")] - Vacant( - #[cfg_attr(not(stage0), stable(feature = "rust1", since = "1.0.0"))] VacantEntry<'a, K, V> - ), + Vacant(#[stable(feature = "rust1", since = "1.0.0")] + VacantEntry<'a, K, V>), /// An occupied Entry #[stable(feature = "rust1", since = "1.0.0")] - Occupied( - #[cfg_attr(not(stage0), stable(feature = "rust1", since = "1.0.0"))] OccupiedEntry<'a, K, V> - ), + Occupied(#[stable(feature = "rust1", since = "1.0.0")] + OccupiedEntry<'a, K, V>), } -/// A vacant Entry. +#[stable(feature= "debug_btree_map", since = "1.12.0")] +impl<'a, K: 'a + Debug + Ord, V: 'a + Debug> Debug for Entry<'a, K, V> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Vacant(ref v) => f.debug_tuple("Entry") + .field(v) + .finish(), + Occupied(ref o) => f.debug_tuple("Entry") + .field(o) + .finish(), + } + } +} + +/// A vacant Entry. It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html #[stable(feature = "rust1", since = "1.0.0")] pub struct VacantEntry<'a, K: 'a, V: 'a> { key: K, @@ -259,14 +363,21 @@ pub struct VacantEntry<'a, K: 'a, V: 'a> { _marker: PhantomData<&'a mut (K, V)>, } -/// An occupied Entry. +#[stable(feature= "debug_btree_map", since = "1.12.0")] +impl<'a, K: 'a + Debug + Ord, V: 'a> Debug for VacantEntry<'a, K, V> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("VacantEntry") + .field(self.key()) + .finish() + } +} + +/// An occupied Entry. It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html #[stable(feature = "rust1", since = "1.0.0")] pub struct OccupiedEntry<'a, K: 'a, V: 'a> { - handle: Handle, - K, V, - marker::LeafOrInternal - >, marker::KV>, + handle: Handle, K, V, marker::LeafOrInternal>, marker::KV>, length: &'a mut usize, @@ -274,13 +385,42 @@ pub struct OccupiedEntry<'a, K: 'a, V: 'a> { _marker: PhantomData<&'a mut (K, V)>, } +#[stable(feature= "debug_btree_map", since = "1.12.0")] +impl<'a, K: 'a + Debug + Ord, V: 'a + Debug> Debug for OccupiedEntry<'a, K, V> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("OccupiedEntry") + .field("key", self.key()) + .field("value", self.get()) + .finish() + } +} + +// An iterator for merging two sorted sequences into one +struct MergeIter> { + left: Peekable, + right: Peekable, +} + impl BTreeMap { /// Makes a new empty BTreeMap with a reasonable choice for B. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BTreeMap; + /// + /// let mut map = BTreeMap::new(); + /// + /// // entries can now be inserted into the empty map + /// map.insert(1, "a"); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn new() -> BTreeMap { BTreeMap { root: node::Root::new_leaf(), - length: 0 + length: 0, } } @@ -288,6 +428,8 @@ impl BTreeMap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BTreeMap; /// @@ -309,6 +451,8 @@ impl BTreeMap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BTreeMap; /// @@ -318,10 +462,13 @@ impl BTreeMap { /// assert_eq!(map.get(&2), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn get(&self, key: &Q) -> Option<&V> where K: Borrow, Q: Ord { + pub fn get(&self, key: &Q) -> Option<&V> + where K: Borrow, + Q: Ord + { match search::search_tree(self.root.as_ref(), key) { Found(handle) => Some(handle.into_kv().1), - GoDown(_) => None + GoDown(_) => None, } } @@ -332,6 +479,8 @@ impl BTreeMap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BTreeMap; /// @@ -341,7 +490,10 @@ impl BTreeMap { /// assert_eq!(map.contains_key(&2), false); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn contains_key(&self, key: &Q) -> bool where K: Borrow, Q: Ord { + pub fn contains_key(&self, key: &Q) -> bool + where K: Borrow, + Q: Ord + { self.get(key).is_some() } @@ -352,6 +504,8 @@ impl BTreeMap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BTreeMap; /// @@ -364,10 +518,13 @@ impl BTreeMap { /// ``` // See `get` for implementation notes, this is basically a copy-paste with mut's added #[stable(feature = "rust1", since = "1.0.0")] - pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> where K: Borrow, Q: Ord { + pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> + where K: Borrow, + Q: Ord + { match search::search_tree(self.root.as_mut(), key) { Found(handle) => Some(handle.into_kv_mut().1), - GoDown(_) => None + GoDown(_) => None, } } @@ -375,14 +532,17 @@ impl BTreeMap { /// /// If the map did not have this key present, `None` is returned. /// - /// If the map did have this key present, the key is not updated, the - /// value is updated and the old value is returned. - /// See the [module-level documentation] for more. + /// If the map did have this key present, the value is updated, and the old + /// value is returned. The key is not updated, though; this matters for + /// types that can be `==` without being identical. See the [module-level + /// documentation] for more. /// /// [module-level documentation]: index.html#insert-and-complex-keys /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BTreeMap; /// @@ -413,6 +573,8 @@ impl BTreeMap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BTreeMap; /// @@ -422,19 +584,77 @@ impl BTreeMap { /// assert_eq!(map.remove(&1), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn remove(&mut self, key: &Q) -> Option where K: Borrow, Q: Ord { + pub fn remove(&mut self, key: &Q) -> Option + where K: Borrow, + Q: Ord + { match search::search_tree(self.root.as_mut(), key) { Found(handle) => { Some(OccupiedEntry { - handle: handle, - length: &mut self.length, - _marker: PhantomData, - }.remove()) - }, - GoDown(_) => None + handle: handle, + length: &mut self.length, + _marker: PhantomData, + } + .remove()) + } + GoDown(_) => None, } } + /// Moves all elements from `other` into `Self`, leaving `other` empty. + /// + /// # Examples + /// + /// ``` + /// use std::collections::BTreeMap; + /// + /// let mut a = BTreeMap::new(); + /// a.insert(1, "a"); + /// a.insert(2, "b"); + /// a.insert(3, "c"); + /// + /// let mut b = BTreeMap::new(); + /// b.insert(3, "d"); + /// b.insert(4, "e"); + /// b.insert(5, "f"); + /// + /// a.append(&mut b); + /// + /// assert_eq!(a.len(), 5); + /// assert_eq!(b.len(), 0); + /// + /// assert_eq!(a[&1], "a"); + /// assert_eq!(a[&2], "b"); + /// assert_eq!(a[&3], "d"); + /// assert_eq!(a[&4], "e"); + /// assert_eq!(a[&5], "f"); + /// ``` + #[stable(feature = "btree_append", since = "1.11.0")] + pub fn append(&mut self, other: &mut Self) { + // Do we have to append anything at all? + if other.len() == 0 { + return; + } + + // We can just swap `self` and `other` if `self` is empty. + if self.len() == 0 { + mem::swap(self, other); + return; + } + + // First, we merge `self` and `other` into a sorted sequence in linear time. + let self_iter = mem::replace(self, BTreeMap::new()).into_iter(); + let other_iter = mem::replace(other, BTreeMap::new()).into_iter(); + let iter = MergeIter { + left: self_iter.peekable(), + right: other_iter.peekable(), + }; + + // Second, we build a tree from the sorted sequence in linear time. + self.from_sorted_iter(iter); + self.fix_right_edge(); + } + /// Constructs a double-ended iterator over a sub-range of elements in the map, starting /// at min, and ending at max. If min is `Unbounded`, then it will be treated as "negative /// infinity", and if max is `Unbounded`, then it will be treated as "positive infinity". @@ -442,6 +662,8 @@ impl BTreeMap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// #![feature(btree_range, collections_bound)] /// @@ -464,47 +686,63 @@ impl BTreeMap { min: Bound<&Min>, max: Bound<&Max>) -> Range - where K: Borrow + Borrow, + where K: Borrow + Borrow { let front = match min { - Included(key) => match search::search_tree(self.root.as_ref(), key) { - Found(kv_handle) => match kv_handle.left_edge().force() { - Leaf(bottom) => bottom, - Internal(internal) => last_leaf_edge(internal.descend()) - }, - GoDown(bottom) => bottom - }, - Excluded(key) => match search::search_tree(self.root.as_ref(), key) { - Found(kv_handle) => match kv_handle.right_edge().force() { - Leaf(bottom) => bottom, - Internal(internal) => first_leaf_edge(internal.descend()) - }, - GoDown(bottom) => bottom - }, - Unbounded => first_leaf_edge(self.root.as_ref()) + Included(key) => { + match search::search_tree(self.root.as_ref(), key) { + Found(kv_handle) => { + match kv_handle.left_edge().force() { + Leaf(bottom) => bottom, + Internal(internal) => last_leaf_edge(internal.descend()), + } + } + GoDown(bottom) => bottom, + } + } + Excluded(key) => { + match search::search_tree(self.root.as_ref(), key) { + Found(kv_handle) => { + match kv_handle.right_edge().force() { + Leaf(bottom) => bottom, + Internal(internal) => first_leaf_edge(internal.descend()), + } + } + GoDown(bottom) => bottom, + } + } + Unbounded => first_leaf_edge(self.root.as_ref()), }; let back = match max { - Included(key) => match search::search_tree(self.root.as_ref(), key) { - Found(kv_handle) => match kv_handle.right_edge().force() { - Leaf(bottom) => bottom, - Internal(internal) => first_leaf_edge(internal.descend()) - }, - GoDown(bottom) => bottom - }, - Excluded(key) => match search::search_tree(self.root.as_ref(), key) { - Found(kv_handle) => match kv_handle.left_edge().force() { - Leaf(bottom) => bottom, - Internal(internal) => last_leaf_edge(internal.descend()) - }, - GoDown(bottom) => bottom - }, - Unbounded => last_leaf_edge(self.root.as_ref()) + Included(key) => { + match search::search_tree(self.root.as_ref(), key) { + Found(kv_handle) => { + match kv_handle.right_edge().force() { + Leaf(bottom) => bottom, + Internal(internal) => first_leaf_edge(internal.descend()), + } + } + GoDown(bottom) => bottom, + } + } + Excluded(key) => { + match search::search_tree(self.root.as_ref(), key) { + Found(kv_handle) => { + match kv_handle.left_edge().force() { + Leaf(bottom) => bottom, + Internal(internal) => last_leaf_edge(internal.descend()), + } + } + GoDown(bottom) => bottom, + } + } + Unbounded => last_leaf_edge(self.root.as_ref()), }; Range { front: front, - back: back + back: back, } } @@ -515,6 +753,8 @@ impl BTreeMap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// #![feature(btree_range, collections_bound)] /// @@ -538,51 +778,67 @@ impl BTreeMap { min: Bound<&Min>, max: Bound<&Max>) -> RangeMut - where K: Borrow + Borrow, + where K: Borrow + Borrow { let root1 = self.root.as_mut(); let root2 = unsafe { ptr::read(&root1) }; let front = match min { - Included(key) => match search::search_tree(root1, key) { - Found(kv_handle) => match kv_handle.left_edge().force() { - Leaf(bottom) => bottom, - Internal(internal) => last_leaf_edge(internal.descend()) - }, - GoDown(bottom) => bottom - }, - Excluded(key) => match search::search_tree(root1, key) { - Found(kv_handle) => match kv_handle.right_edge().force() { - Leaf(bottom) => bottom, - Internal(internal) => first_leaf_edge(internal.descend()) - }, - GoDown(bottom) => bottom - }, - Unbounded => first_leaf_edge(root1) + Included(key) => { + match search::search_tree(root1, key) { + Found(kv_handle) => { + match kv_handle.left_edge().force() { + Leaf(bottom) => bottom, + Internal(internal) => last_leaf_edge(internal.descend()), + } + } + GoDown(bottom) => bottom, + } + } + Excluded(key) => { + match search::search_tree(root1, key) { + Found(kv_handle) => { + match kv_handle.right_edge().force() { + Leaf(bottom) => bottom, + Internal(internal) => first_leaf_edge(internal.descend()), + } + } + GoDown(bottom) => bottom, + } + } + Unbounded => first_leaf_edge(root1), }; let back = match max { - Included(key) => match search::search_tree(root2, key) { - Found(kv_handle) => match kv_handle.right_edge().force() { - Leaf(bottom) => bottom, - Internal(internal) => first_leaf_edge(internal.descend()) - }, - GoDown(bottom) => bottom - }, - Excluded(key) => match search::search_tree(root2, key) { - Found(kv_handle) => match kv_handle.left_edge().force() { - Leaf(bottom) => bottom, - Internal(internal) => last_leaf_edge(internal.descend()) - }, - GoDown(bottom) => bottom - }, - Unbounded => last_leaf_edge(root2) + Included(key) => { + match search::search_tree(root2, key) { + Found(kv_handle) => { + match kv_handle.right_edge().force() { + Leaf(bottom) => bottom, + Internal(internal) => first_leaf_edge(internal.descend()), + } + } + GoDown(bottom) => bottom, + } + } + Excluded(key) => { + match search::search_tree(root2, key) { + Found(kv_handle) => { + match kv_handle.left_edge().force() { + Leaf(bottom) => bottom, + Internal(internal) => last_leaf_edge(internal.descend()), + } + } + GoDown(bottom) => bottom, + } + } + Unbounded => last_leaf_edge(root2), }; RangeMut { front: front, back: back, - _marker: PhantomData + _marker: PhantomData, } } @@ -590,6 +846,8 @@ impl BTreeMap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BTreeMap; /// @@ -605,21 +863,271 @@ impl BTreeMap { #[stable(feature = "rust1", since = "1.0.0")] pub fn entry(&mut self, key: K) -> Entry { match search::search_tree(self.root.as_mut(), &key) { - Found(handle) => Occupied(OccupiedEntry { - handle: handle, - length: &mut self.length, - _marker: PhantomData, - }), - GoDown(handle) => Vacant(VacantEntry { - key: key, - handle: handle, - length: &mut self.length, - _marker: PhantomData, - }) + Found(handle) => { + Occupied(OccupiedEntry { + handle: handle, + length: &mut self.length, + _marker: PhantomData, + }) + } + GoDown(handle) => { + Vacant(VacantEntry { + key: key, + handle: handle, + length: &mut self.length, + _marker: PhantomData, + }) + } + } + } + + fn from_sorted_iter>(&mut self, iter: I) { + let mut cur_node = last_leaf_edge(self.root.as_mut()).into_node(); + // Iterate through all key-value pairs, pushing them into nodes at the right level. + for (key, value) in iter { + // Try to push key-value pair into the current leaf node. + if cur_node.len() < node::CAPACITY { + cur_node.push(key, value); + } else { + // No space left, go up and push there. + let mut open_node; + let mut test_node = cur_node.forget_type(); + loop { + match test_node.ascend() { + Ok(parent) => { + let parent = parent.into_node(); + if parent.len() < node::CAPACITY { + // Found a node with space left, push here. + open_node = parent; + break; + } else { + // Go up again. + test_node = parent.forget_type(); + } + } + Err(node) => { + // We are at the top, create a new root node and push there. + open_node = node.into_root_mut().push_level(); + break; + } + } + } + + // Push key-value pair and new right subtree. + let tree_height = open_node.height() - 1; + let mut right_tree = node::Root::new_leaf(); + for _ in 0..tree_height { + right_tree.push_level(); + } + open_node.push(key, value, right_tree); + + // Go down to the right-most leaf again. + cur_node = last_leaf_edge(open_node.forget_type()).into_node(); + } + + self.length += 1; + } + } + + fn fix_right_edge(&mut self) { + // Handle underfull nodes, start from the top. + let mut cur_node = self.root.as_mut(); + while let Internal(internal) = cur_node.force() { + // Check if right-most child is underfull. + let mut last_edge = internal.last_edge(); + let right_child_len = last_edge.reborrow().descend().len(); + if right_child_len < node::MIN_LEN { + // We need to steal. + let mut last_kv = match last_edge.left_kv() { + Ok(left) => left, + Err(_) => unreachable!(), + }; + last_kv.bulk_steal_left(node::MIN_LEN - right_child_len); + last_edge = last_kv.right_edge(); + } + + // Go further down. + cur_node = last_edge.descend(); + } + } + + /// Splits the collection into two at the given key. Returns everything after the given key, + /// including the key. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BTreeMap; + /// + /// let mut a = BTreeMap::new(); + /// a.insert(1, "a"); + /// a.insert(2, "b"); + /// a.insert(3, "c"); + /// a.insert(17, "d"); + /// a.insert(41, "e"); + /// + /// let b = a.split_off(&3); + /// + /// assert_eq!(a.len(), 2); + /// assert_eq!(b.len(), 3); + /// + /// assert_eq!(a[&1], "a"); + /// assert_eq!(a[&2], "b"); + /// + /// assert_eq!(b[&3], "c"); + /// assert_eq!(b[&17], "d"); + /// assert_eq!(b[&41], "e"); + /// ``` + #[stable(feature = "btree_split_off", since = "1.11.0")] + pub fn split_off(&mut self, key: &Q) -> Self + where K: Borrow + { + if self.is_empty() { + return Self::new(); + } + + let total_num = self.len(); + + let mut right = Self::new(); + for _ in 0..(self.root.as_ref().height()) { + right.root.push_level(); + } + + { + let mut left_node = self.root.as_mut(); + let mut right_node = right.root.as_mut(); + + loop { + let mut split_edge = match search::search_node(left_node, key) { + // key is going to the right tree + Found(handle) => handle.left_edge(), + GoDown(handle) => handle, + }; + + split_edge.move_suffix(&mut right_node); + + match (split_edge.force(), right_node.force()) { + (Internal(edge), Internal(node)) => { + left_node = edge.descend(); + right_node = node.first_edge().descend(); + } + (Leaf(_), Leaf(_)) => { + break; + } + _ => { + unreachable!(); + } + } + } + } + + self.fix_right_border(); + right.fix_left_border(); + + if self.root.as_ref().height() < right.root.as_ref().height() { + self.recalc_length(); + right.length = total_num - self.len(); + } else { + right.recalc_length(); + self.length = total_num - right.len(); + } + + right + } + + /// Calculates the number of elements if it is incorrect. + fn recalc_length(&mut self) { + fn dfs(node: NodeRef) -> usize { + let mut res = node.len(); + + if let Internal(node) = node.force() { + let mut edge = node.first_edge(); + loop { + res += dfs(edge.reborrow().descend()); + match edge.right_kv() { + Ok(right_kv) => { + edge = right_kv.right_edge(); + } + Err(_) => { + break; + } + } + } + } + + res + } + + self.length = dfs(self.root.as_ref()); + } + + /// Removes empty levels on the top. + fn fix_top(&mut self) { + loop { + { + let node = self.root.as_ref(); + if node.height() == 0 || node.len() > 0 { + break; + } + } + self.root.pop_level(); + } + } + + fn fix_right_border(&mut self) { + self.fix_top(); + + { + let mut cur_node = self.root.as_mut(); + + while let Internal(node) = cur_node.force() { + let mut last_kv = node.last_kv(); + + if last_kv.can_merge() { + cur_node = last_kv.merge().descend(); + } else { + let right_len = last_kv.reborrow().right_edge().descend().len(); + // `MINLEN + 1` to avoid readjust if merge happens on the next level. + if right_len < node::MIN_LEN + 1 { + last_kv.bulk_steal_left(node::MIN_LEN + 1 - right_len); + } + cur_node = last_kv.right_edge().descend(); + } + } + } + + self.fix_top(); + } + + /// The symmetric clone of `fix_right_border`. + fn fix_left_border(&mut self) { + self.fix_top(); + + { + let mut cur_node = self.root.as_mut(); + + while let Internal(node) = cur_node.force() { + let mut first_kv = node.first_kv(); + + if first_kv.can_merge() { + cur_node = first_kv.merge().descend(); + } else { + let left_len = first_kv.reborrow().left_edge().descend().len(); + if left_len < node::MIN_LEN + 1 { + first_kv.bulk_steal_right(node::MIN_LEN + 1 - left_len); + } + cur_node = first_kv.left_edge().descend(); + } + } } + + self.fix_top(); } } +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, K: 'a, V: 'a> IntoIterator for &'a BTreeMap { type Item = (&'a K, &'a V); type IntoIter = Iter<'a, K, V>; @@ -629,6 +1137,7 @@ impl<'a, K: 'a, V: 'a> IntoIterator for &'a BTreeMap { } } +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, K: 'a, V: 'a> Iterator for Iter<'a, K, V> { type Item = (&'a K, &'a V); @@ -646,6 +1155,10 @@ impl<'a, K: 'a, V: 'a> Iterator for Iter<'a, K, V> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, K, V> FusedIterator for Iter<'a, K, V> {} + +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, K: 'a, V: 'a> DoubleEndedIterator for Iter<'a, K, V> { fn next_back(&mut self) -> Option<(&'a K, &'a V)> { if self.length == 0 { @@ -657,19 +1170,24 @@ impl<'a, K: 'a, V: 'a> DoubleEndedIterator for Iter<'a, K, V> { } } +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, K: 'a, V: 'a> ExactSizeIterator for Iter<'a, K, V> { - fn len(&self) -> usize { self.length } + fn len(&self) -> usize { + self.length + } } +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, K, V> Clone for Iter<'a, K, V> { fn clone(&self) -> Iter<'a, K, V> { Iter { range: self.range.clone(), - length: self.length + length: self.length, } } } +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, K: 'a, V: 'a> IntoIterator for &'a mut BTreeMap { type Item = (&'a K, &'a mut V); type IntoIter = IterMut<'a, K, V>; @@ -679,6 +1197,7 @@ impl<'a, K: 'a, V: 'a> IntoIterator for &'a mut BTreeMap { } } +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, K: 'a, V: 'a> Iterator for IterMut<'a, K, V> { type Item = (&'a K, &'a mut V); @@ -696,6 +1215,7 @@ impl<'a, K: 'a, V: 'a> Iterator for IterMut<'a, K, V> { } } +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, K: 'a, V: 'a> DoubleEndedIterator for IterMut<'a, K, V> { fn next_back(&mut self) -> Option<(&'a K, &'a mut V)> { if self.length == 0 { @@ -707,10 +1227,17 @@ impl<'a, K: 'a, V: 'a> DoubleEndedIterator for IterMut<'a, K, V> { } } +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, K: 'a, V: 'a> ExactSizeIterator for IterMut<'a, K, V> { - fn len(&self) -> usize { self.length } + fn len(&self) -> usize { + self.length + } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, K, V> FusedIterator for IterMut<'a, K, V> {} + +#[stable(feature = "rust1", since = "1.0.0")] impl IntoIterator for BTreeMap { type Item = (K, V); type IntoIter = IntoIter; @@ -724,14 +1251,16 @@ impl IntoIterator for BTreeMap { IntoIter { front: first_leaf_edge(root1), back: last_leaf_edge(root2), - length: len + length: len, } } } +#[stable(feature = "btree_drop", since = "1.7.0")] impl Drop for IntoIter { fn drop(&mut self) { - for _ in &mut *self { } + for _ in &mut *self { + } unsafe { let leaf_node = ptr::read(&self.front).into_node(); if let Some(first_parent) = leaf_node.deallocate_and_ascend() { @@ -744,6 +1273,7 @@ impl Drop for IntoIter { } } +#[stable(feature = "rust1", since = "1.0.0")] impl Iterator for IntoIter { type Item = (K, V); @@ -762,10 +1292,10 @@ impl Iterator for IntoIter { let v = unsafe { ptr::read(kv.reborrow().into_kv().1) }; self.front = kv.right_edge(); return Some((k, v)); - }, + } Err(last_edge) => unsafe { unwrap_unchecked(last_edge.into_node().deallocate_and_ascend()) - } + }, }; loop { @@ -775,10 +1305,10 @@ impl Iterator for IntoIter { let v = unsafe { ptr::read(kv.reborrow().into_kv().1) }; self.front = first_leaf_edge(kv.right_edge().descend()); return Some((k, v)); - }, + } Err(last_edge) => unsafe { cur_handle = unwrap_unchecked(last_edge.into_node().deallocate_and_ascend()); - } + }, } } } @@ -788,6 +1318,7 @@ impl Iterator for IntoIter { } } +#[stable(feature = "rust1", since = "1.0.0")] impl DoubleEndedIterator for IntoIter { fn next_back(&mut self) -> Option<(K, V)> { if self.length == 0 { @@ -804,10 +1335,10 @@ impl DoubleEndedIterator for IntoIter { let v = unsafe { ptr::read(kv.reborrow().into_kv().1) }; self.back = kv.left_edge(); return Some((k, v)); - }, + } Err(last_edge) => unsafe { unwrap_unchecked(last_edge.into_node().deallocate_and_ascend()) - } + }, }; loop { @@ -817,19 +1348,26 @@ impl DoubleEndedIterator for IntoIter { let v = unsafe { ptr::read(kv.reborrow().into_kv().1) }; self.back = last_leaf_edge(kv.left_edge().descend()); return Some((k, v)); - }, + } Err(last_edge) => unsafe { cur_handle = unwrap_unchecked(last_edge.into_node().deallocate_and_ascend()); - } + }, } } } } +#[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for IntoIter { - fn len(&self) -> usize { self.length } + fn len(&self) -> usize { + self.length + } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for IntoIter {} + +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, K, V> Iterator for Keys<'a, K, V> { type Item = &'a K; @@ -842,26 +1380,31 @@ impl<'a, K, V> Iterator for Keys<'a, K, V> { } } +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, K, V> DoubleEndedIterator for Keys<'a, K, V> { fn next_back(&mut self) -> Option<&'a K> { self.inner.next_back().map(|(k, _)| k) } } +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> { fn len(&self) -> usize { self.inner.len() } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, K, V> FusedIterator for Keys<'a, K, V> {} + +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, K, V> Clone for Keys<'a, K, V> { fn clone(&self) -> Keys<'a, K, V> { - Keys { - inner: self.inner.clone() - } + Keys { inner: self.inner.clone() } } } +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, K, V> Iterator for Values<'a, K, V> { type Item = &'a V; @@ -874,23 +1417,27 @@ impl<'a, K, V> Iterator for Values<'a, K, V> { } } +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, K, V> DoubleEndedIterator for Values<'a, K, V> { fn next_back(&mut self) -> Option<&'a V> { self.inner.next_back().map(|(_, v)| v) } } +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> { fn len(&self) -> usize { self.inner.len() } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, K, V> FusedIterator for Values<'a, K, V> {} + +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, K, V> Clone for Values<'a, K, V> { fn clone(&self) -> Values<'a, K, V> { - Values { - inner: self.inner.clone() - } + Values { inner: self.inner.clone() } } } @@ -906,6 +1453,37 @@ impl<'a, K, V> Iterator for Range<'a, K, V> { } } +#[stable(feature = "map_values_mut", since = "1.10.0")] +impl<'a, K, V> Iterator for ValuesMut<'a, K, V> { + type Item = &'a mut V; + + fn next(&mut self) -> Option<&'a mut V> { + self.inner.next().map(|(_, v)| v) + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +#[stable(feature = "map_values_mut", since = "1.10.0")] +impl<'a, K, V> DoubleEndedIterator for ValuesMut<'a, K, V> { + fn next_back(&mut self) -> Option<&'a mut V> { + self.inner.next_back().map(|(_, v)| v) + } +} + +#[stable(feature = "map_values_mut", since = "1.10.0")] +impl<'a, K, V> ExactSizeIterator for ValuesMut<'a, K, V> { + fn len(&self) -> usize { + self.inner.len() + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl<'a, K, V> FusedIterator for ValuesMut<'a, K, V> {} + + impl<'a, K, V> Range<'a, K, V> { unsafe fn next_unchecked(&mut self) -> (&'a K, &'a V) { let handle = self.front; @@ -915,7 +1493,7 @@ impl<'a, K, V> Range<'a, K, V> { let ret = kv.into_kv(); self.front = kv.right_edge(); return ret; - }, + } Err(last_edge) => { let next_level = last_edge.into_node().ascend().ok(); unwrap_unchecked(next_level) @@ -928,7 +1506,7 @@ impl<'a, K, V> Range<'a, K, V> { let ret = kv.into_kv(); self.front = first_leaf_edge(kv.right_edge().descend()); return ret; - }, + } Err(last_edge) => { let next_level = last_edge.into_node().ascend().ok(); cur_handle = unwrap_unchecked(next_level); @@ -957,7 +1535,7 @@ impl<'a, K, V> Range<'a, K, V> { let ret = kv.into_kv(); self.back = kv.left_edge(); return ret; - }, + } Err(last_edge) => { let next_level = last_edge.into_node().ascend().ok(); unwrap_unchecked(next_level) @@ -970,7 +1548,7 @@ impl<'a, K, V> Range<'a, K, V> { let ret = kv.into_kv(); self.back = last_leaf_edge(kv.left_edge().descend()); return ret; - }, + } Err(last_edge) => { let next_level = last_edge.into_node().ascend().ok(); cur_handle = unwrap_unchecked(next_level); @@ -980,11 +1558,14 @@ impl<'a, K, V> Range<'a, K, V> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, K, V> FusedIterator for Range<'a, K, V> {} + impl<'a, K, V> Clone for Range<'a, K, V> { fn clone(&self) -> Range<'a, K, V> { Range { front: self.front, - back: self.back + back: self.back, } } } @@ -996,7 +1577,7 @@ impl<'a, K, V> Iterator for RangeMut<'a, K, V> { if self.front == self.back { None } else { - unsafe { Some (self.next_unchecked()) } + unsafe { Some(self.next_unchecked()) } } } } @@ -1010,7 +1591,7 @@ impl<'a, K, V> RangeMut<'a, K, V> { let (k, v) = ptr::read(&kv).into_kv_mut(); self.front = kv.right_edge(); return (k, v); - }, + } Err(last_edge) => { let next_level = last_edge.into_node().ascend().ok(); unwrap_unchecked(next_level) @@ -1023,7 +1604,7 @@ impl<'a, K, V> RangeMut<'a, K, V> { let (k, v) = ptr::read(&kv).into_kv_mut(); self.front = first_leaf_edge(kv.right_edge().descend()); return (k, v); - }, + } Err(last_edge) => { let next_level = last_edge.into_node().ascend().ok(); cur_handle = unwrap_unchecked(next_level); @@ -1043,6 +1624,9 @@ impl<'a, K, V> DoubleEndedIterator for RangeMut<'a, K, V> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, K, V> FusedIterator for RangeMut<'a, K, V> {} + impl<'a, K, V> RangeMut<'a, K, V> { unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a mut V) { let handle = ptr::read(&self.back); @@ -1052,7 +1636,7 @@ impl<'a, K, V> RangeMut<'a, K, V> { let (k, v) = ptr::read(&kv).into_kv_mut(); self.back = kv.left_edge(); return (k, v); - }, + } Err(last_edge) => { let next_level = last_edge.into_node().ascend().ok(); unwrap_unchecked(next_level) @@ -1065,7 +1649,7 @@ impl<'a, K, V> RangeMut<'a, K, V> { let (k, v) = ptr::read(&kv).into_kv_mut(); self.back = last_leaf_edge(kv.left_edge().descend()); return (k, v); - }, + } Err(last_edge) => { let next_level = last_edge.into_node().ascend().ok(); cur_handle = unwrap_unchecked(next_level); @@ -1075,29 +1659,33 @@ impl<'a, K, V> RangeMut<'a, K, V> { } } +#[stable(feature = "rust1", since = "1.0.0")] impl FromIterator<(K, V)> for BTreeMap { - fn from_iter>(iter: T) -> BTreeMap { + fn from_iter>(iter: T) -> BTreeMap { let mut map = BTreeMap::new(); map.extend(iter); map } } +#[stable(feature = "rust1", since = "1.0.0")] impl Extend<(K, V)> for BTreeMap { #[inline] - fn extend>(&mut self, iter: T) { + fn extend>(&mut self, iter: T) { for (k, v) in iter { self.insert(k, v); } } } +#[stable(feature = "extend_ref", since = "1.2.0")] impl<'a, K: Ord + Copy, V: Copy> Extend<(&'a K, &'a V)> for BTreeMap { - fn extend>(&mut self, iter: I) { + fn extend>(&mut self, iter: I) { self.extend(iter.into_iter().map(|(&key, &value)| (key, value))); } } +#[stable(feature = "rust1", since = "1.0.0")] impl Hash for BTreeMap { fn hash(&self, state: &mut H) { for elt in self { @@ -1106,21 +1694,25 @@ impl Hash for BTreeMap { } } +#[stable(feature = "rust1", since = "1.0.0")] impl Default for BTreeMap { + /// Creates an empty `BTreeMap`. fn default() -> BTreeMap { BTreeMap::new() } } +#[stable(feature = "rust1", since = "1.0.0")] impl PartialEq for BTreeMap { fn eq(&self, other: &BTreeMap) -> bool { - self.len() == other.len() && - self.iter().zip(other).all(|(a, b)| a == b) + self.len() == other.len() && self.iter().zip(other).all(|(a, b)| a == b) } } +#[stable(feature = "rust1", since = "1.0.0")] impl Eq for BTreeMap {} +#[stable(feature = "rust1", since = "1.0.0")] impl PartialOrd for BTreeMap { #[inline] fn partial_cmp(&self, other: &BTreeMap) -> Option { @@ -1128,6 +1720,7 @@ impl PartialOrd for BTreeMap { } } +#[stable(feature = "rust1", since = "1.0.0")] impl Ord for BTreeMap { #[inline] fn cmp(&self, other: &BTreeMap) -> Ordering { @@ -1135,14 +1728,17 @@ impl Ord for BTreeMap { } } +#[stable(feature = "rust1", since = "1.0.0")] impl Debug for BTreeMap { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_map().entries(self.iter()).finish() } } +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, K: Ord, Q: ?Sized, V> Index<&'a Q> for BTreeMap - where K: Borrow, Q: Ord + where K: Borrow, + Q: Ord { type Output = V; @@ -1152,11 +1748,9 @@ impl<'a, K: Ord, Q: ?Sized, V> Index<&'a Q> for BTreeMap } } -fn first_leaf_edge( - mut node: NodeRef - ) -> Handle, marker::Edge> { +fn first_leaf_edge + (mut node: NodeRef) + -> Handle, marker::Edge> { loop { match node.force() { Leaf(leaf) => return leaf.first_edge(), @@ -1167,11 +1761,9 @@ fn first_leaf_edge( } } -fn last_leaf_edge( - mut node: NodeRef - ) -> Handle, marker::Edge> { +fn last_leaf_edge + (mut node: NodeRef) + -> Handle, marker::Edge> { loop { match node.force() { Leaf(leaf) => return leaf.last_edge(), @@ -1194,17 +1786,19 @@ unsafe fn unwrap_unchecked(val: Option) -> T { } impl BTreeMap { - /// Gets an iterator over the entries of the map. + /// Gets an iterator over the entries of the map, sorted by key. /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); - /// map.insert(1, "a"); - /// map.insert(2, "b"); /// map.insert(3, "c"); + /// map.insert(2, "b"); + /// map.insert(1, "a"); /// /// for (key, value) in map.iter() { /// println!("{}: {}", key, value); @@ -1218,16 +1812,18 @@ impl BTreeMap { Iter { range: Range { front: first_leaf_edge(self.root.as_ref()), - back: last_leaf_edge(self.root.as_ref()) + back: last_leaf_edge(self.root.as_ref()), }, - length: self.length + length: self.length, } } - /// Gets a mutable iterator over the entries of the map. + /// Gets a mutable iterator over the entries of the map, sorted by key. /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BTreeMap; /// @@ -1253,20 +1849,22 @@ impl BTreeMap { back: last_leaf_edge(root2), _marker: PhantomData, }, - length: self.length + length: self.length, } } - /// Gets an iterator over the keys of the map. + /// Gets an iterator over the keys of the map, in sorted order. /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BTreeMap; /// /// let mut a = BTreeMap::new(); - /// a.insert(1, "a"); /// a.insert(2, "b"); + /// a.insert(1, "a"); /// /// let keys: Vec<_> = a.keys().cloned().collect(); /// assert_eq!(keys, [1, 2]); @@ -1276,29 +1874,59 @@ impl BTreeMap { Keys { inner: self.iter() } } - /// Gets an iterator over the values of the map. + /// Gets an iterator over the values of the map, in order by key. /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BTreeMap; /// /// let mut a = BTreeMap::new(); - /// a.insert(1, "a"); - /// a.insert(2, "b"); + /// a.insert(1, "hello"); + /// a.insert(2, "goodbye"); /// /// let values: Vec<&str> = a.values().cloned().collect(); - /// assert_eq!(values, ["a", "b"]); + /// assert_eq!(values, ["hello", "goodbye"]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn values<'a>(&'a self) -> Values<'a, K, V> { Values { inner: self.iter() } } + /// Gets a mutable iterator over the values of the map, in order by key. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BTreeMap; + /// + /// let mut a = BTreeMap::new(); + /// a.insert(1, String::from("hello")); + /// a.insert(2, String::from("goodbye")); + /// + /// for value in a.values_mut() { + /// value.push_str("!"); + /// } + /// + /// let values: Vec = a.values().cloned().collect(); + /// assert_eq!(values, [String::from("hello!"), + /// String::from("goodbye!")]); + /// ``` + #[stable(feature = "map_values_mut", since = "1.10.0")] + pub fn values_mut(&mut self) -> ValuesMut { + ValuesMut { inner: self.iter_mut() } + } + /// Returns the number of elements in the map. /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BTreeMap; /// @@ -1316,6 +1944,8 @@ impl BTreeMap { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// use std::collections::BTreeMap; /// @@ -1333,6 +1963,17 @@ impl BTreeMap { impl<'a, K: Ord, V> Entry<'a, K, V> { /// Ensures a value is in the entry by inserting the default if empty, and returns /// a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use std::collections::BTreeMap; + /// + /// let mut map: BTreeMap<&str, usize> = BTreeMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// assert_eq!(map["poneyland"], 12); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn or_insert(self, default: V) -> &'a mut V { match self { @@ -1343,6 +1984,19 @@ impl<'a, K: Ord, V> Entry<'a, K, V> { /// Ensures a value is in the entry by inserting the result of the default function if empty, /// and returns a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use std::collections::BTreeMap; + /// + /// let mut map: BTreeMap<&str, String> = BTreeMap::new(); + /// let s = "hoho".to_owned(); + /// + /// map.entry("poneyland").or_insert_with(|| s); + /// + /// assert_eq!(map["poneyland"], "hoho".to_owned()); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn or_insert_with V>(self, default: F) -> &'a mut V { match self { @@ -1350,11 +2004,79 @@ impl<'a, K: Ord, V> Entry<'a, K, V> { Vacant(entry) => entry.insert(default()), } } + + /// Returns a reference to this entry's key. + /// + /// # Examples + /// + /// ``` + /// use std::collections::BTreeMap; + /// + /// let mut map: BTreeMap<&str, usize> = BTreeMap::new(); + /// assert_eq!(map.entry("poneyland").key(), &"poneyland"); + /// ``` + #[stable(feature = "map_entry_keys", since = "1.10.0")] + pub fn key(&self) -> &K { + match *self { + Occupied(ref entry) => entry.key(), + Vacant(ref entry) => entry.key(), + } + } } impl<'a, K: Ord, V> VacantEntry<'a, K, V> { - /// Sets the value of the entry with the VacantEntry's key, + /// Gets a reference to the key that would be used when inserting a value + /// through the VacantEntry. + /// + /// # Examples + /// + /// ``` + /// use std::collections::BTreeMap; + /// + /// let mut map: BTreeMap<&str, usize> = BTreeMap::new(); + /// assert_eq!(map.entry("poneyland").key(), &"poneyland"); + /// ``` + #[stable(feature = "map_entry_keys", since = "1.10.0")] + pub fn key(&self) -> &K { + &self.key + } + + /// Take ownership of the key. + /// + /// # Examples + /// + /// ``` + /// use std::collections::BTreeMap; + /// use std::collections::btree_map::Entry; + /// + /// let mut map: BTreeMap<&str, usize> = BTreeMap::new(); + /// + /// if let Entry::Vacant(v) = map.entry("poneyland") { + /// v.into_key(); + /// } + /// ``` + #[stable(feature = "map_entry_recover_keys2", since = "1.12.0")] + pub fn into_key(self) -> K { + self.key + } + + /// Sets the value of the entry with the `VacantEntry`'s key, /// and returns a mutable reference to it. + /// + /// # Examples + /// + /// ``` + /// use std::collections::BTreeMap; + /// + /// let mut count: BTreeMap<&str, usize> = BTreeMap::new(); + /// + /// // count the number of occurrences of letters in the vec + /// for x in vec!["a","b","a","c","a","b"] { + /// *count.entry(x).or_insert(0) += 1; + /// } + /// + /// assert_eq!(count["a"], 3); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn insert(self, value: V) -> &'a mut V { *self.length += 1; @@ -1378,15 +2100,17 @@ impl<'a, K: Ord, V> VacantEntry<'a, K, V> { loop { match cur_parent { - Ok(parent) => match parent.insert(ins_k, ins_v, ins_edge) { - Fit(_) => return unsafe { &mut *out_ptr }, - Split(left, k, v, right) => { - ins_k = k; - ins_v = v; - ins_edge = right; - cur_parent = left.ascend().map_err(|n| n.into_root_mut()); + Ok(parent) => { + match parent.insert(ins_k, ins_v, ins_edge) { + Fit(_) => return unsafe { &mut *out_ptr }, + Split(left, k, v, right) => { + ins_k = k; + ins_v = v; + ins_edge = right; + cur_parent = left.ascend().map_err(|n| n.into_root_mut()); + } } - }, + } Err(root) => { root.push_level().push(ins_k, ins_v, ins_edge); return unsafe { &mut *out_ptr }; @@ -1397,32 +2121,156 @@ impl<'a, K: Ord, V> VacantEntry<'a, K, V> { } impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> { + /// Gets a reference to the key in the entry. + /// + /// # Examples + /// + /// ``` + /// use std::collections::BTreeMap; + /// + /// let mut map: BTreeMap<&str, usize> = BTreeMap::new(); + /// map.entry("poneyland").or_insert(12); + /// assert_eq!(map.entry("poneyland").key(), &"poneyland"); + /// ``` + #[stable(feature = "map_entry_keys", since = "1.10.0")] + pub fn key(&self) -> &K { + self.handle.reborrow().into_kv().0 + } + + /// Deprecated, renamed to `remove_entry` + #[unstable(feature = "map_entry_recover_keys", issue = "34285")] + #[rustc_deprecated(since = "1.12.0", reason = "renamed to `remove_entry`")] + pub fn remove_pair(self) -> (K, V) { + self.remove_entry() + } + + /// Take ownership of the key and value from the map. + /// + /// # Examples + /// + /// ``` + /// use std::collections::BTreeMap; + /// use std::collections::btree_map::Entry; + /// + /// let mut map: BTreeMap<&str, usize> = BTreeMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// if let Entry::Occupied(o) = map.entry("poneyland") { + /// // We delete the entry from the map. + /// o.remove_entry(); + /// } + /// + /// // If now try to get the value, it will panic: + /// // println!("{}", map["poneyland"]); + /// ``` + #[stable(feature = "map_entry_recover_keys2", since = "1.12.0")] + pub fn remove_entry(self) -> (K, V) { + self.remove_kv() + } + /// Gets a reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use std::collections::BTreeMap; + /// use std::collections::btree_map::Entry; + /// + /// let mut map: BTreeMap<&str, usize> = BTreeMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// if let Entry::Occupied(o) = map.entry("poneyland") { + /// assert_eq!(o.get(), &12); + /// } + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn get(&self) -> &V { self.handle.reborrow().into_kv().1 } /// Gets a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use std::collections::BTreeMap; + /// use std::collections::btree_map::Entry; + /// + /// let mut map: BTreeMap<&str, usize> = BTreeMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// assert_eq!(map["poneyland"], 12); + /// if let Entry::Occupied(mut o) = map.entry("poneyland") { + /// *o.get_mut() += 10; + /// } + /// assert_eq!(map["poneyland"], 22); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn get_mut(&mut self) -> &mut V { self.handle.kv_mut().1 } /// Converts the entry into a mutable reference to its value. + /// + /// # Examples + /// + /// ``` + /// use std::collections::BTreeMap; + /// use std::collections::btree_map::Entry; + /// + /// let mut map: BTreeMap<&str, usize> = BTreeMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// assert_eq!(map["poneyland"], 12); + /// if let Entry::Occupied(o) = map.entry("poneyland") { + /// *o.into_mut() += 10; + /// } + /// assert_eq!(map["poneyland"], 22); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn into_mut(self) -> &'a mut V { self.handle.into_kv_mut().1 } - /// Sets the value of the entry with the OccupiedEntry's key, + /// Sets the value of the entry with the `OccupiedEntry`'s key, /// and returns the entry's old value. + /// + /// # Examples + /// + /// ``` + /// use std::collections::BTreeMap; + /// use std::collections::btree_map::Entry; + /// + /// let mut map: BTreeMap<&str, usize> = BTreeMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// if let Entry::Occupied(mut o) = map.entry("poneyland") { + /// assert_eq!(o.insert(15), 12); + /// } + /// assert_eq!(map["poneyland"], 15); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn insert(&mut self, value: V) -> V { mem::replace(self.get_mut(), value) } /// Takes the value of the entry out of the map, and returns it. + /// + /// # Examples + /// + /// ``` + /// use std::collections::BTreeMap; + /// use std::collections::btree_map::Entry; + /// + /// let mut map: BTreeMap<&str, usize> = BTreeMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// if let Entry::Occupied(o) = map.entry("poneyland") { + /// assert_eq!(o.remove(), 12); + /// } + /// // If we try to get "poneyland"'s value, it'll panic: + /// // println!("{}", map["poneyland"]); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn remove(self) -> V { self.remove_kv().1 @@ -1435,7 +2283,7 @@ impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> { Leaf(leaf) => { let (hole, old_key, old_val) = leaf.remove(); (hole.into_node(), old_key, old_val) - }, + } Internal(mut internal) => { let key_loc = internal.kv_mut().0 as *mut K; let val_loc = internal.kv_mut().1 as *mut V; @@ -1445,12 +2293,8 @@ impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> { let (hole, key, val) = to_remove.remove(); - let old_key = unsafe { - mem::replace(&mut *key_loc, key) - }; - let old_val = unsafe { - mem::replace(&mut *val_loc, val) - }; + let old_key = unsafe { mem::replace(&mut *key_loc, key) }; + let old_val = unsafe { mem::replace(&mut *val_loc, val) }; (hole.into_node(), old_key, old_val) } @@ -1462,14 +2306,16 @@ impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> { match handle_underfull_node(cur_node) { AtRoot => break, EmptyParent(_) => unreachable!(), - Merged(parent) => if parent.len() == 0 { - // We must be at the root - parent.into_root_mut().pop_level(); - break; - } else { - cur_node = parent.forget_type(); - }, - Stole(_) => break + Merged(parent) => { + if parent.len() == 0 { + // We must be at the root + parent.into_root_mut().pop_level(); + break; + } else { + cur_node = parent.forget_type(); + } + } + Stole(_) => break, } } @@ -1481,13 +2327,11 @@ enum UnderflowResult<'a, K, V> { AtRoot, EmptyParent(NodeRef, K, V, marker::Internal>), Merged(NodeRef, K, V, marker::Internal>), - Stole(NodeRef, K, V, marker::Internal>) + Stole(NodeRef, K, V, marker::Internal>), } -fn handle_underfull_node<'a, K, V>(node: NodeRef, - K, V, - marker::LeafOrInternal>) - -> UnderflowResult<'a, K, V> { +fn handle_underfull_node<'a, K, V>(node: NodeRef, K, V, marker::LeafOrInternal>) + -> UnderflowResult<'a, K, V> { let parent = if let Ok(parent) = node.ascend() { parent } else { @@ -1496,41 +2340,48 @@ fn handle_underfull_node<'a, K, V>(node: NodeRef, let (is_left, mut handle) = match parent.left_kv() { Ok(left) => (true, left), - Err(parent) => match parent.right_kv() { - Ok(right) => (false, right), - Err(parent) => { - return EmptyParent(parent.into_node()); + Err(parent) => { + match parent.right_kv() { + Ok(right) => (false, right), + Err(parent) => { + return EmptyParent(parent.into_node()); + } } } }; if handle.can_merge() { - return Merged(handle.merge().into_node()); + Merged(handle.merge().into_node()) } else { - unsafe { - let (k, v, edge) = if is_left { - handle.reborrow_mut().left_edge().descend().pop() - } else { - handle.reborrow_mut().right_edge().descend().pop_front() - }; + if is_left { + handle.steal_left(); + } else { + handle.steal_right(); + } + Stole(handle.into_node()) + } +} - let k = mem::replace(handle.reborrow_mut().into_kv_mut().0, k); - let v = mem::replace(handle.reborrow_mut().into_kv_mut().1, v); +impl> Iterator for MergeIter { + type Item = (K, V); - // FIXME: reuse cur_node? - if is_left { - match handle.reborrow_mut().right_edge().descend().force() { - Leaf(mut leaf) => leaf.push_front(k, v), - Internal(mut internal) => internal.push_front(k, v, edge.unwrap()) - } - } else { - match handle.reborrow_mut().left_edge().descend().force() { - Leaf(mut leaf) => leaf.push(k, v), - Internal(mut internal) => internal.push(k, v, edge.unwrap()) - } + fn next(&mut self) -> Option<(K, V)> { + let res = match (self.left.peek(), self.right.peek()) { + (Some(&(ref left_key, _)), Some(&(ref right_key, _))) => left_key.cmp(right_key), + (Some(_), None) => Ordering::Less, + (None, Some(_)) => Ordering::Greater, + (None, None) => return None, + }; + + // Check which elements comes first and only advance the corresponding iterator. + // If two keys are equal, take the value from `right`. + match res { + Ordering::Less => self.left.next(), + Ordering::Greater => self.right.next(), + Ordering::Equal => { + self.left.next(); + self.right.next() } } - - return Stole(handle.into_node()); } } diff --git a/src/libcollections/btree/node.rs b/src/libcollections/btree/node.rs index c8a0f60587e9e..e9bc29118d508 100644 --- a/src/libcollections/btree/node.rs +++ b/src/libcollections/btree/node.rs @@ -28,9 +28,19 @@ // } // ``` // -// Since Rust doesn't acutally have dependent types and polymorphic recursion, +// Since Rust doesn't actually have dependent types and polymorphic recursion, // we make do with lots of unsafety. +// A major goal of this module is to avoid complexity by treating the tree as a generic (if +// weirdly shaped) container and avoiding dealing with most of the B-Tree invariants. As such, +// this module doesn't care whether the entries are sorted, which nodes can be underfull, or +// even what underfull means. However, we do rely on a few invariants: +// +// - Trees must have uniform depth/height. This means that every path down to a leaf from a +// given node has exactly the same length. +// - A node of length `n` has `n` keys, `n` values, and (in an internal node) `n + 1` edges. +// This implies that even an empty internal node has at least one edge. + use alloc::heap; use core::marker::PhantomData; use core::mem; @@ -41,19 +51,46 @@ use core::slice; use boxed::Box; const B: usize = 6; +pub const MIN_LEN: usize = B - 1; pub const CAPACITY: usize = 2 * B - 1; +/// The underlying representation of leaf nodes. Note that it is often unsafe to actually store +/// these, since only the first `len` keys and values are assumed to be initialized. As such, +/// these should always be put behind pointers, and specifically behind `BoxedNode` in the owned +/// case. +/// +/// See also rust-lang/rfcs#197, which would make this structure significantly more safe by +/// avoiding accidentally dropping unused and uninitialized keys and values. struct LeafNode { + /// The arrays storing the actual data of the node. Only the first `len` elements of each + /// array are initialized and valid. keys: [K; CAPACITY], vals: [V; CAPACITY], + + /// We use `*const` as opposed to `*mut` so as to be covariant in `K` and `V`. + /// This either points to an actual node or is null. parent: *const InternalNode, + + /// This node's index into the parent node's `edges` array. + /// `*node.parent.edges[node.parent_idx]` should be the same thing as `node`. + /// This is only guaranteed to be initialized when `parent` is nonnull. parent_idx: u16, + + /// The number of keys and values this node stores. + /// + /// This is at the end of the node's representation and next to `parent_idx` to encourage + /// the compiler to join `len` and `parent_idx` into the same 32-bit word, reducing space + /// overhead. len: u16, } impl LeafNode { + /// Creates a new `LeafNode`. Unsafe because all nodes should really be hidden behind + /// `BoxedNode`, preventing accidental dropping of uninitialized keys and values. unsafe fn new() -> Self { LeafNode { + // As a general policy, we leave fields uninitialized if they can be, as this should + // be both slightly faster and easier to track in Valgrind. keys: mem::uninitialized(), vals: mem::uninitialized(), parent: ptr::null(), @@ -63,15 +100,28 @@ impl LeafNode { } } -// We use repr(C) so that a pointer to an internal node can be -// directly used as a pointer to a leaf node +/// The underlying representation of internal nodes. As with `LeafNode`s, these should be hidden +/// behind `BoxedNode`s to prevent dropping uninitialized keys and values. Any pointer to an +/// `InternalNode` can be directly casted to a pointer to the underlying `LeafNode` portion of the +/// node, allowing code to act on leaf and internal nodes generically without having to even check +/// which of the two a pointer is pointing at. This property is enabled by the use of `repr(C)`. #[repr(C)] struct InternalNode { data: LeafNode, + + /// The pointers to the children of this node. `len + 1` of these are considered + /// initialized and valid. edges: [BoxedNode; 2 * B], } impl InternalNode { + /// Creates a new `InternalNode`. + /// + /// This is unsafe for two reasons. First, it returns an `InternalNode` by value, risking + /// dropping of uninitialized fields. Second, an invariant of internal nodes is that `len + 1` + /// edges are initialized and valid, meaning that even when the node is empty (having a + /// `len` of 0), there must be one initialized and valid edge. This function does not set up + /// such an edge. unsafe fn new() -> Self { InternalNode { data: LeafNode::new(), @@ -80,8 +130,12 @@ impl InternalNode { } } +/// An owned pointer to a node. This basically is either `Box>` or +/// `Box>`. However, it contains no information as to which of the two types +/// of nodes is acutally behind the box, and, partially due to this lack of information, has no +/// destructor. struct BoxedNode { - ptr: Unique> // we don't know if this points to a leaf node or an internal node + ptr: Unique> } impl BoxedNode { @@ -156,7 +210,7 @@ impl Root { } } - /// Add a new internal node with a single edge, pointing to the previous root, and make that + /// Adds a new internal node with a single edge, pointing to the previous root, and make that /// new node the root. This increases the height by 1 and is the opposite of `pop_level`. pub fn push_level(&mut self) -> NodeRef { @@ -180,7 +234,7 @@ impl Root { ret } - /// Remove the root node, using its first child as the new root. This cannot be called when + /// Removes the root node, using its first child as the new root. This cannot be called when /// the tree consists only of a leaf node. As it is intended only to be called when the root /// has only one edge, no cleanup is done on any of the other children are elements of the root. /// This decreases the height by 1 and is the opposite of `push_level`. @@ -229,6 +283,7 @@ impl Root { pub struct NodeRef { height: usize, node: NonZero<*const LeafNode>, + // This is null unless the borrow type is `Mut` root: *const Root, _marker: PhantomData<(BorrowType, Type)> } @@ -268,10 +323,20 @@ impl<'a, K, V> NodeRef, K, V, marker::Internal> { impl NodeRef { + /// Finds the length of the node. This is the number of keys or values. In an + /// internal node, the number of edges is `len() + 1`. pub fn len(&self) -> usize { self.as_leaf().len as usize } + /// Returns the height of this node in the whole tree. Zero height denotes the + /// leaf level. + pub fn height(&self) -> usize { + self.height + } + + /// Removes any static information about whether this node is a `Leaf` or an + /// `Internal` node. pub fn forget_type(self) -> NodeRef { NodeRef { height: self.height, @@ -281,6 +346,7 @@ impl NodeRef { } } + /// Temporarily takes out another, immutable reference to the same node. fn reborrow<'a>(&'a self) -> NodeRef, K, V, Type> { NodeRef { height: self.height, @@ -304,6 +370,13 @@ impl NodeRef { self.reborrow().into_slices().1 } + /// Finds the parent of the current node. Returns `Ok(handle)` if the current + /// node actually has a parent, where `handle` points to the edge of the parent + /// that points to the current node. Returns `Err(self)` if the current node has + /// no parent, giving back the original `NodeRef`. + /// + /// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should + /// both, upon success, do nothing. pub fn ascend(self) -> Result< Handle< NodeRef< @@ -341,9 +414,25 @@ impl NodeRef { let len = self.len(); Handle::new_edge(self, len) } + + /// Note that `self` must be nonempty. + pub fn first_kv(self) -> Handle { + debug_assert!(self.len() > 0); + Handle::new_kv(self, 0) + } + + /// Note that `self` must be nonempty. + pub fn last_kv(self) -> Handle { + let len = self.len(); + debug_assert!(len > 0); + Handle::new_kv(self, len - 1) + } } impl NodeRef { + /// Similar to `ascend`, gets a reference to a node's parent node, but also + /// deallocate the current node in the process. This is unsafe because the + /// current node will still be accessible despite being deallocated. pub unsafe fn deallocate_and_ascend(self) -> Option< Handle< NodeRef< @@ -362,6 +451,9 @@ impl NodeRef { } impl NodeRef { + /// Similar to `ascend`, gets a reference to a node's parent node, but also + /// deallocate the current node in the process. This is unsafe because the + /// current node will still be accessible despite being deallocated. pub unsafe fn deallocate_and_ascend(self) -> Option< Handle< NodeRef< @@ -384,6 +476,8 @@ impl NodeRef { } impl<'a, K, V, Type> NodeRef, K, V, Type> { + /// Unsafely asserts to the compiler some static information about whether this + /// node is a `Leaf`. unsafe fn cast_unchecked(&mut self) -> NodeRef { @@ -395,6 +489,16 @@ impl<'a, K, V, Type> NodeRef, K, V, Type> { } } + /// Temporarily takes out another, mutable reference to the same node. Beware, as + /// this method is very dangerous, doubly so since it may not immediately appear + /// dangerous. + /// + /// Because mutable pointers can roam anywhere around the tree and can even (through + /// `into_root_mut`) mess with the root of the tree, the result of `reborrow_mut` + /// can easily be used to make the original mutable pointer dangling, or, in the case + /// of a reborrowed handle, out of bounds. + // FIXME(@gereeter) consider adding yet another type parameter to `NodeRef` that restricts + // the use of `ascend` and `into_root_mut` on reborrowed pointers, preventing this unsafety. unsafe fn reborrow_mut(&mut self) -> NodeRef { NodeRef { height: self.height, @@ -437,6 +541,8 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { } impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { + /// Gets a mutable reference to the root itself. This is useful primarily when the + /// height of the tree needs to be adjusted. Never call this on a reborrowed pointer. pub fn into_root_mut(self) -> &'a mut Root { unsafe { &mut *(self.root as *mut Root) @@ -460,6 +566,7 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { } impl<'a, K, V> NodeRef, K, V, marker::Leaf> { + /// Adds a key/value pair the end of the node. pub fn push(&mut self, key: K, val: V) { // Necessary for correctness, but this is an internal module debug_assert!(self.len() < CAPACITY); @@ -474,6 +581,7 @@ impl<'a, K, V> NodeRef, K, V, marker::Leaf> { self.as_leaf_mut().len += 1; } + /// Adds a key/value pair to the beginning of the node. pub fn push_front(&mut self, key: K, val: V) { // Necessary for correctness, but this is an internal module debug_assert!(self.len() < CAPACITY); @@ -488,6 +596,8 @@ impl<'a, K, V> NodeRef, K, V, marker::Leaf> { } impl<'a, K, V> NodeRef, K, V, marker::Internal> { + /// Adds a key/value pair and an edge to go to the right of that pair to + /// the end of the node. pub fn push(&mut self, key: K, val: V, edge: Root) { // Necessary for correctness, but this is an internal module debug_assert!(edge.height == self.height - 1); @@ -506,6 +616,19 @@ impl<'a, K, V> NodeRef, K, V, marker::Internal> { } } + fn correct_childrens_parent_links(&mut self, first: usize, after_last: usize) { + for i in first..after_last { + Handle::new_edge(unsafe { self.reborrow_mut() }, i).correct_parent_link(); + } + } + + fn correct_all_childrens_parent_links(&mut self) { + let len = self.len(); + self.correct_childrens_parent_links(0, len + 1); + } + + /// Adds a key/value pair and an edge to go to the left of that pair to + /// the beginning of the node. pub fn push_front(&mut self, key: K, val: V, edge: Root) { // Necessary for correctness, but this is an internal module debug_assert!(edge.height == self.height - 1); @@ -525,15 +648,14 @@ impl<'a, K, V> NodeRef, K, V, marker::Internal> { self.as_leaf_mut().len += 1; - for i in 0..self.len()+1 { - Handle::new_edge(self.reborrow_mut(), i).correct_parent_link(); - } + self.correct_all_childrens_parent_links(); } - } } impl<'a, K, V> NodeRef, K, V, marker::LeafOrInternal> { + /// Removes a key/value pair from the end of this node. If this is an internal node, + /// also removes the edge that was to the right of that pair. pub fn pop(&mut self) -> (K, V, Option>) { // Necessary for correctness, but this is an internal module debug_assert!(self.len() > 0); @@ -558,6 +680,8 @@ impl<'a, K, V> NodeRef, K, V, marker::LeafOrInternal> { } } + /// Removes a key/value pair from the beginning of this node. If this is an internal node, + /// also removes the edge that was to the left of that pair. pub fn pop_front(&mut self) -> (K, V, Option>) { // Necessary for correctness, but this is an internal module debug_assert!(self.len() > 0); @@ -594,9 +718,17 @@ impl<'a, K, V> NodeRef, K, V, marker::LeafOrInternal> { (key, val, edge) } } + + fn into_kv_pointers_mut(mut self) -> (*mut K, *mut V) { + ( + self.keys_mut().as_mut_ptr(), + self.vals_mut().as_mut_ptr() + ) + } } impl NodeRef { + /// Checks whether a node is an `Internal` node or a `Leaf` node. pub fn force(self) -> ForceResult< NodeRef, NodeRef @@ -619,6 +751,14 @@ impl NodeRef { } } +/// A reference to a specific key/value pair or edge within a node. The `Node` parameter +/// must be a `NodeRef`, while the `Type` can either be `KV` (signifying a handle on a key/value +/// pair) or `Edge` (signifying a handle on an edge). +/// +/// Note that even `Leaf` nodes can have `Edge` handles. Instead of representing a pointer to +/// a child node, these represent the spaces where child pointers would go between the key/value +/// pairs. For example, in a node with length 2, there would be 3 possible edge locations - one +/// to the left of the node, one between the two pairs, and one at the right of the node. pub struct Handle { node: Node, idx: usize, @@ -626,6 +766,8 @@ pub struct Handle { } impl Copy for Handle { } +// We don't need the full generality of `#[derive(Clone)]`, as the only time `Node` will be +// `Clone`able is when it is an immutable reference and therefore `Copy`. impl Clone for Handle { fn clone(&self) -> Self { *self @@ -633,12 +775,14 @@ impl Clone for Handle { } impl Handle { + /// Retrieves the node that contains the edge of key/value pair this handle pointes to. pub fn into_node(self) -> Node { self.node } } impl Handle, marker::KV> { + /// Creates a new handle to a key/value pair in `node`. `idx` must be less than `node.len()`. pub fn new_kv(node: NodeRef, idx: usize) -> Self { // Necessary for correctness, but in a private module debug_assert!(idx < node.len()); @@ -670,6 +814,7 @@ impl PartialEq impl Handle, HandleType> { + /// Temporarily takes out another, immutable handle on the same location. pub fn reborrow(&self) -> Handle, HandleType> { @@ -685,6 +830,16 @@ impl impl<'a, K, V, NodeType, HandleType> Handle, K, V, NodeType>, HandleType> { + /// Temporarily takes out another, mutable handle on the same location. Beware, as + /// this method is very dangerous, doubly so since it may not immediately appear + /// dangerous. + /// + /// Because mutable pointers can roam anywhere around the tree and can even (through + /// `into_root_mut`) mess with the root of the tree, the result of `reborrow_mut` + /// can easily be used to make the original mutable pointer dangling, or, in the case + /// of a reborrowed handle, out of bounds. + // FIXME(@gereeter) consider adding yet another type parameter to `NodeRef` that restricts + // the use of `ascend` and `into_root_mut` on reborrowed pointers, preventing this unsafety. pub unsafe fn reborrow_mut(&mut self) -> Handle, HandleType> { @@ -700,6 +855,8 @@ impl<'a, K, V, NodeType, HandleType> impl Handle, marker::Edge> { + /// Creates a new handle to an edge in `node`. `idx` must be less than or equal to + /// `node.len()`. pub fn new_edge(node: NodeRef, idx: usize) -> Self { // Necessary for correctness, but in a private module debug_assert!(idx <= node.len()); @@ -733,6 +890,11 @@ impl } impl<'a, K, V> Handle, K, V, marker::Leaf>, marker::Edge> { + /// Inserts a new key/value pair between the key/value pairs to the right and left of + /// this edge. This method assumes that there is enough space in the node for the new + /// pair to fit. + /// + /// The returned pointer points to the inserted value. fn insert_fit(&mut self, key: K, val: V) -> *mut V { // Necessary for correctness, but in a private module debug_assert!(self.node.len() < CAPACITY); @@ -747,6 +909,10 @@ impl<'a, K, V> Handle, K, V, marker::Leaf>, marker::Edge } } + /// Inserts a new key/value pair between the key/value pairs to the right and left of + /// this edge. This method splits the node if there isn't enough room. + /// + /// The returned pointer points to the inserted value. pub fn insert(mut self, key: K, val: V) -> (InsertResult<'a, K, V, marker::Leaf>, *mut V) { @@ -774,6 +940,8 @@ impl<'a, K, V> Handle, K, V, marker::Leaf>, marker::Edge } impl<'a, K, V> Handle, K, V, marker::Internal>, marker::Edge> { + /// Fixes the parent pointer and index in the child node below this edge. This is useful + /// when the ordering of edges has been changed, such as in the various `insert` methods. fn correct_parent_link(mut self) { let idx = self.idx as u16; let ptr = self.node.as_internal_mut() as *mut _; @@ -782,18 +950,24 @@ impl<'a, K, V> Handle, K, V, marker::Internal>, marker:: child.as_leaf_mut().parent_idx = idx; } + /// Unsafely asserts to the compiler some static information about whether the underlying + /// node of this handle is a `Leaf`. unsafe fn cast_unchecked(&mut self) -> Handle, marker::Edge> { Handle::new_edge(self.node.cast_unchecked(), self.idx) } + /// Inserts a new key/value pair and an edge that will go to the right of that new pair + /// between this edge and the key/value pair to the right of this edge. This method assumes + /// that there is enough space in the node for the new pair to fit. fn insert_fit(&mut self, key: K, val: V, edge: Root) { // Necessary for correctness, but in an internal module debug_assert!(self.node.len() < CAPACITY); debug_assert!(edge.height == self.node.height - 1); unsafe { + // This cast is a lie, but it allows us to reuse the key/value insertion logic. self.cast_unchecked::().insert_fit(key, val); slice_insert( @@ -811,6 +985,9 @@ impl<'a, K, V> Handle, K, V, marker::Internal>, marker:: } } + /// Inserts a new key/value pair and an edge that will go to the right of that new pair + /// between this edge and the key/value pair to the right of this edge. This method splits + /// the node if there isn't enough room. pub fn insert(mut self, key: K, val: V, edge: Root) -> InsertResult<'a, K, V, marker::Internal> { @@ -843,6 +1020,10 @@ impl<'a, K, V> Handle, K, V, marker::Internal>, marker:: impl Handle, marker::Edge> { + /// Finds the node pointed to by this edge. + /// + /// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should + /// both, upon success, do nothing. pub fn descend(self) -> NodeRef { NodeRef { height: self.node.height - 1, @@ -885,6 +1066,13 @@ impl<'a, K, V, NodeType> Handle, K, V, NodeType>, marker } impl<'a, K, V> Handle, K, V, marker::Leaf>, marker::KV> { + /// Splits the underlying node into three parts: + /// + /// - The node is truncated to only contain the key/value pairs to the right of + /// this handle. + /// - The key and value pointed to by this handle and extracted. + /// - All the key/value pairs to the right of this handle are put into a newly + /// allocated node. pub fn split(mut self) -> (NodeRef, K, V, marker::Leaf>, K, V, Root) { unsafe { @@ -920,6 +1108,8 @@ impl<'a, K, V> Handle, K, V, marker::Leaf>, marker::KV> } } + /// Removes the key/value pair pointed to by this handle, returning the edge between the + /// now adjacent key/value pairs to the left and right of this handle. pub fn remove(mut self) -> (Handle, K, V, marker::Leaf>, marker::Edge>, K, V) { unsafe { @@ -932,6 +1122,13 @@ impl<'a, K, V> Handle, K, V, marker::Leaf>, marker::KV> } impl<'a, K, V> Handle, K, V, marker::Internal>, marker::KV> { + /// Splits the underlying node into three parts: + /// + /// - The node is truncated to only contain the edges and key/value pairs to the + /// right of this handle. + /// - The key and value pointed to by this handle and extracted. + /// - All the edges and key/value pairs to the right of this handle are put into + /// a newly allocated node. pub fn split(mut self) -> (NodeRef, K, V, marker::Internal>, K, V, Root) { unsafe { @@ -979,6 +1176,9 @@ impl<'a, K, V> Handle, K, V, marker::Internal>, marker:: } } + /// Returns whether it is valid to call `.merge()`, i.e., whether there is enough room in + /// a node to hold the combination of the nodes to the left and right of this handle along + /// with the key/value pair at this handle. pub fn can_merge(&self) -> bool { ( self.reborrow() @@ -993,6 +1193,11 @@ impl<'a, K, V> Handle, K, V, marker::Internal>, marker:: ) <= CAPACITY } + /// Combines the node immediately to the left of this handle, the key/value pair pointed + /// to by this handle, and the node immediately to the right of this handle into one new + /// child of the underlying node, returning an edge referencing that new child. + /// + /// Assumes that this edge `.can_merge()`. pub fn merge(mut self) -> Handle, K, V, marker::Internal>, marker::Edge> { let self1 = unsafe { ptr::read(&self) }; @@ -1027,6 +1232,8 @@ impl<'a, K, V> Handle, K, V, marker::Internal>, marker:: } self.node.as_leaf_mut().len -= 1; + left_node.as_leaf_mut().len += right_len as u16 + 1; + if self.node.height > 1 { ptr::copy_nonoverlapping( right_node.cast_unchecked().as_internal().edges.as_ptr(), @@ -1058,16 +1265,198 @@ impl<'a, K, V> Handle, K, V, marker::Internal>, marker:: ); } - left_node.as_leaf_mut().len += right_len as u16 + 1; - Handle::new_edge(self.node, self.idx) } } + + /// This removes a key/value pair from the left child and replaces it with the key/value pair + /// pointed to by this handle while pushing the old key/value pair of this handle into the right + /// child. + pub fn steal_left(&mut self) { + unsafe { + let (k, v, edge) = self.reborrow_mut().left_edge().descend().pop(); + + let k = mem::replace(self.reborrow_mut().into_kv_mut().0, k); + let v = mem::replace(self.reborrow_mut().into_kv_mut().1, v); + + match self.reborrow_mut().right_edge().descend().force() { + ForceResult::Leaf(mut leaf) => leaf.push_front(k, v), + ForceResult::Internal(mut internal) => internal.push_front(k, v, edge.unwrap()) + } + } + } + + /// This removes a key/value pair from the right child and replaces it with the key/value pair + /// pointed to by this handle while pushing the old key/value pair of this handle into the left + /// child. + pub fn steal_right(&mut self) { + unsafe { + let (k, v, edge) = self.reborrow_mut().right_edge().descend().pop_front(); + + let k = mem::replace(self.reborrow_mut().into_kv_mut().0, k); + let v = mem::replace(self.reborrow_mut().into_kv_mut().1, v); + + match self.reborrow_mut().left_edge().descend().force() { + ForceResult::Leaf(mut leaf) => leaf.push(k, v), + ForceResult::Internal(mut internal) => internal.push(k, v, edge.unwrap()) + } + } + } + + /// This does stealing similar to `steal_left` but steals multiple elements at once. + pub fn bulk_steal_left(&mut self, count: usize) { + unsafe { + let mut left_node = ptr::read(self).left_edge().descend(); + let left_len = left_node.len(); + let mut right_node = ptr::read(self).right_edge().descend(); + let right_len = right_node.len(); + + // Make sure that we may steal safely. + debug_assert!(right_len + count <= CAPACITY); + debug_assert!(left_len >= count); + + let new_left_len = left_len - count; + + // Move data. + { + let left_kv = left_node.reborrow_mut().into_kv_pointers_mut(); + let right_kv = right_node.reborrow_mut().into_kv_pointers_mut(); + let parent_kv = { + let kv = self.reborrow_mut().into_kv_mut(); + (kv.0 as *mut K, kv.1 as *mut V) + }; + + // Make room for stolen elements in the right child. + ptr::copy(right_kv.0, + right_kv.0.offset(count as isize), + right_len); + ptr::copy(right_kv.1, + right_kv.1.offset(count as isize), + right_len); + + // Move elements from the left child to the right one. + move_kv(left_kv, new_left_len + 1, right_kv, 0, count - 1); + + // Move parent's key/value pair to the right child. + move_kv(parent_kv, 0, right_kv, count - 1, 1); + + // Move the left-most stolen pair to the parent. + move_kv(left_kv, new_left_len, parent_kv, 0, 1); + } + + left_node.reborrow_mut().as_leaf_mut().len -= count as u16; + right_node.reborrow_mut().as_leaf_mut().len += count as u16; + + match (left_node.force(), right_node.force()) { + (ForceResult::Internal(left), ForceResult::Internal(mut right)) => { + // Make room for stolen edges. + let right_edges = right.reborrow_mut().as_internal_mut().edges.as_mut_ptr(); + ptr::copy(right_edges, + right_edges.offset(count as isize), + right_len + 1); + right.correct_childrens_parent_links(count, count + right_len + 1); + + move_edges(left, new_left_len + 1, right, 0, count); + }, + (ForceResult::Leaf(_), ForceResult::Leaf(_)) => { } + _ => { unreachable!(); } + } + } + } + + /// The symmetric clone of `bulk_steal_left`. + pub fn bulk_steal_right(&mut self, count: usize) { + unsafe { + let mut left_node = ptr::read(self).left_edge().descend(); + let left_len = left_node.len(); + let mut right_node = ptr::read(self).right_edge().descend(); + let right_len = right_node.len(); + + // Make sure that we may steal safely. + debug_assert!(left_len + count <= CAPACITY); + debug_assert!(right_len >= count); + + let new_right_len = right_len - count; + + // Move data. + { + let left_kv = left_node.reborrow_mut().into_kv_pointers_mut(); + let right_kv = right_node.reborrow_mut().into_kv_pointers_mut(); + let parent_kv = { + let kv = self.reborrow_mut().into_kv_mut(); + (kv.0 as *mut K, kv.1 as *mut V) + }; + + // Move parent's key/value pair to the left child. + move_kv(parent_kv, 0, left_kv, left_len, 1); + + // Move elements from the right child to the left one. + move_kv(right_kv, 0, left_kv, left_len + 1, count - 1); + + // Move the right-most stolen pair to the parent. + move_kv(right_kv, count - 1, parent_kv, 0, 1); + + // Fix right indexing + ptr::copy(right_kv.0.offset(count as isize), + right_kv.0, + new_right_len); + ptr::copy(right_kv.1.offset(count as isize), + right_kv.1, + new_right_len); + } + + left_node.reborrow_mut().as_leaf_mut().len += count as u16; + right_node.reborrow_mut().as_leaf_mut().len -= count as u16; + + match (left_node.force(), right_node.force()) { + (ForceResult::Internal(left), ForceResult::Internal(mut right)) => { + move_edges(right.reborrow_mut(), 0, left, left_len + 1, count); + + // Fix right indexing. + let right_edges = right.reborrow_mut().as_internal_mut().edges.as_mut_ptr(); + ptr::copy(right_edges.offset(count as isize), + right_edges, + new_right_len + 1); + right.correct_childrens_parent_links(0, new_right_len + 1); + }, + (ForceResult::Leaf(_), ForceResult::Leaf(_)) => { } + _ => { unreachable!(); } + } + } + } +} + +unsafe fn move_kv( + source: (*mut K, *mut V), source_offset: usize, + dest: (*mut K, *mut V), dest_offset: usize, + count: usize) +{ + ptr::copy_nonoverlapping(source.0.offset(source_offset as isize), + dest.0.offset(dest_offset as isize), + count); + ptr::copy_nonoverlapping(source.1.offset(source_offset as isize), + dest.1.offset(dest_offset as isize), + count); +} + +// Source and destination must have the same height. +unsafe fn move_edges( + mut source: NodeRef, source_offset: usize, + mut dest: NodeRef, dest_offset: usize, + count: usize) +{ + let source_ptr = source.as_internal_mut().edges.as_mut_ptr(); + let dest_ptr = dest.as_internal_mut().edges.as_mut_ptr(); + ptr::copy_nonoverlapping(source_ptr.offset(source_offset as isize), + dest_ptr.offset(dest_offset as isize), + count); + dest.correct_childrens_parent_links(dest_offset, dest_offset + count); } impl Handle, HandleType> { + /// Check whether the underlying node is an `Internal` node or a `Leaf` node. pub fn force(self) -> ForceResult< Handle, HandleType>, Handle, HandleType> @@ -1087,6 +1476,41 @@ impl } } +impl<'a, K, V> Handle, K, V, marker::LeafOrInternal>, marker::Edge> { + /// Move the suffix after `self` from one node to another one. `right` must be empty. + /// The first edge of `right` remains unchanged. + pub fn move_suffix(&mut self, + right: &mut NodeRef, K, V, marker::LeafOrInternal>) { + unsafe { + let left_new_len = self.idx; + let mut left_node = self.reborrow_mut().into_node(); + + let right_new_len = left_node.len() - left_new_len; + let mut right_node = right.reborrow_mut(); + + debug_assert!(right_node.len() == 0); + debug_assert!(left_node.height == right_node.height); + + let left_kv = left_node.reborrow_mut().into_kv_pointers_mut(); + let right_kv = right_node.reborrow_mut().into_kv_pointers_mut(); + + + move_kv(left_kv, left_new_len, right_kv, 0, right_new_len); + + left_node.reborrow_mut().as_leaf_mut().len = left_new_len as u16; + right_node.reborrow_mut().as_leaf_mut().len = right_new_len as u16; + + match (left_node.force(), right_node.force()) { + (ForceResult::Internal(left), ForceResult::Internal(right)) => { + move_edges(left, left_new_len + 1, right, 1, right_new_len); + }, + (ForceResult::Leaf(_), ForceResult::Leaf(_)) => { } + _ => { unreachable!(); } + } + } + } +} + pub enum ForceResult { Leaf(Leaf), Internal(Internal) diff --git a/src/libcollections/btree/set.rs b/src/libcollections/btree/set.rs index c1381dde762e5..c57266d9e3b4a 100644 --- a/src/libcollections/btree/set.rs +++ b/src/libcollections/btree/set.rs @@ -12,9 +12,10 @@ // to TreeMap use core::cmp::Ordering::{self, Less, Greater, Equal}; +use core::cmp::{min, max}; use core::fmt::Debug; use core::fmt; -use core::iter::{Peekable, Map, FromIterator}; +use core::iter::{Peekable, FromIterator, FusedIterator}; use core::ops::{BitOr, BitAnd, BitXor, Sub}; use borrow::Borrow; @@ -33,10 +34,40 @@ use Bound; /// to any other item, as determined by the [`Ord`] trait, changes while it is in the set. This is /// normally only possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. /// -/// [`BTreeMap`]: ../struct.BTreeMap.html -/// [`Ord`]: ../../core/cmp/trait.Ord.html +/// [`BTreeMap`]: struct.BTreeMap.html +/// [`Ord`]: ../../std/cmp/trait.Ord.html /// [`Cell`]: ../../std/cell/struct.Cell.html /// [`RefCell`]: ../../std/cell/struct.RefCell.html +/// +/// # Examples +/// +/// ``` +/// use std::collections::BTreeSet; +/// +/// // Type inference lets us omit an explicit type signature (which +/// // would be `BTreeSet<&str>` in this example). +/// let mut books = BTreeSet::new(); +/// +/// // Add some books. +/// books.insert("A Dance With Dragons"); +/// books.insert("To Kill a Mockingbird"); +/// books.insert("The Odyssey"); +/// books.insert("The Great Gatsby"); +/// +/// // Check for a specific one. +/// if !books.contains("The Winds of Winter") { +/// println!("We have {} books, but The Winds of Winter ain't one.", +/// books.len()); +/// } +/// +/// // Remove a book. +/// books.remove("The Odyssey"); +/// +/// // Iterate over everything. +/// for book in &books { +/// println!("{}", book); +/// } +/// ``` #[derive(Clone, Hash, PartialEq, Eq, Ord, PartialOrd)] #[stable(feature = "rust1", since = "1.0.0")] pub struct BTreeSet { @@ -52,12 +83,12 @@ pub struct Iter<'a, T: 'a> { /// An owning iterator over a BTreeSet's items. #[stable(feature = "rust1", since = "1.0.0")] pub struct IntoIter { - iter: Map<::btree_map::IntoIter, fn((T, ())) -> T>, + iter: ::btree_map::IntoIter, } /// An iterator over a sub-range of BTreeSet's items. pub struct Range<'a, T: 'a> { - iter: Map<::btree_map::Range<'a, T, ()>, fn((&'a T, &'a ())) -> &'a T>, + iter: ::btree_map::Range<'a, T, ()>, } /// A lazy iterator producing elements in the set difference (in-order). @@ -160,12 +191,7 @@ impl BTreeSet { -> Range<'a, T> where T: Borrow + Borrow { - fn first((a, _): (A, B)) -> A { - a - } - let first: fn((&'a T, &'a ())) -> &'a T = first; // coerce to fn pointer - - Range { iter: self.map.range(min, max).map(first) } + Range { iter: self.map.range(min, max) } } } @@ -353,7 +379,7 @@ impl BTreeSet { /// The value may be any borrowed form of the set's value type, /// but the ordering on the borrowed form *must* match the /// ordering on the value type. - #[unstable(feature = "set_recovery", issue = "28050")] + #[stable(feature = "set_recovery", since = "1.9.0")] pub fn get(&self, value: &Q) -> Option<&T> where T: Borrow, Q: Ord @@ -451,9 +477,9 @@ impl BTreeSet { /// Adds a value to the set. /// - /// If the set did not have a value present, `true` is returned. + /// If the set did not have this value present, `true` is returned. /// - /// If the set did have this key present, `false` is returned, and the + /// If the set did have this value present, `false` is returned, and the /// entry is not updated. See the [module-level documentation] for more. /// /// [module-level documentation]: index.html#insert-and-complex-keys @@ -476,7 +502,7 @@ impl BTreeSet { /// Adds a value to the set, replacing the existing value, if any, that is equal to the given /// one. Returns the replaced value. - #[unstable(feature = "set_recovery", issue = "28050")] + #[stable(feature = "set_recovery", since = "1.9.0")] pub fn replace(&mut self, value: T) -> Option { Recover::replace(&mut self.map, value) } @@ -512,13 +538,80 @@ impl BTreeSet { /// The value may be any borrowed form of the set's value type, /// but the ordering on the borrowed form *must* match the /// ordering on the value type. - #[unstable(feature = "set_recovery", issue = "28050")] + #[stable(feature = "set_recovery", since = "1.9.0")] pub fn take(&mut self, value: &Q) -> Option where T: Borrow, Q: Ord { Recover::take(&mut self.map, value) } + + /// Moves all elements from `other` into `Self`, leaving `other` empty. + /// + /// # Examples + /// + /// ``` + /// use std::collections::BTreeSet; + /// + /// let mut a = BTreeSet::new(); + /// a.insert(1); + /// a.insert(2); + /// a.insert(3); + /// + /// let mut b = BTreeSet::new(); + /// b.insert(3); + /// b.insert(4); + /// b.insert(5); + /// + /// a.append(&mut b); + /// + /// assert_eq!(a.len(), 5); + /// assert_eq!(b.len(), 0); + /// + /// assert!(a.contains(&1)); + /// assert!(a.contains(&2)); + /// assert!(a.contains(&3)); + /// assert!(a.contains(&4)); + /// assert!(a.contains(&5)); + /// ``` + #[stable(feature = "btree_append", since = "1.11.0")] + pub fn append(&mut self, other: &mut Self) { + self.map.append(&mut other.map); + } + + /// Splits the collection into two at the given key. Returns everything after the given key, + /// including the key. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BTreeMap; + /// + /// let mut a = BTreeMap::new(); + /// a.insert(1, "a"); + /// a.insert(2, "b"); + /// a.insert(3, "c"); + /// a.insert(17, "d"); + /// a.insert(41, "e"); + /// + /// let b = a.split_off(&3); + /// + /// assert_eq!(a.len(), 2); + /// assert_eq!(b.len(), 3); + /// + /// assert_eq!(a[&1], "a"); + /// assert_eq!(a[&2], "b"); + /// + /// assert_eq!(b[&3], "c"); + /// assert_eq!(b[&17], "d"); + /// assert_eq!(b[&41], "e"); + /// ``` + #[stable(feature = "btree_split_off", since = "1.11.0")] + pub fn split_off(&mut self, key: &Q) -> Self where T: Borrow { + BTreeSet { map: self.map.split_off(key) } + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -548,12 +641,7 @@ impl IntoIterator for BTreeSet { /// assert_eq!(v, [1, 2, 3, 4]); /// ``` fn into_iter(self) -> IntoIter { - fn first((a, _): (A, B)) -> A { - a - } - let first: fn((T, ())) -> T = first; // coerce to fn pointer - - IntoIter { iter: self.map.into_iter().map(first) } + IntoIter { iter: self.map.into_iter() } } } @@ -586,6 +674,7 @@ impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BTreeSet { #[stable(feature = "rust1", since = "1.0.0")] impl Default for BTreeSet { + /// Makes an empty `BTreeSet` with a reasonable choice of B. fn default() -> BTreeSet { BTreeSet::new() } @@ -690,6 +779,7 @@ impl Debug for BTreeSet { } } +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Iter<'a, T> { fn clone(&self) -> Iter<'a, T> { Iter { iter: self.iter.clone() } @@ -713,15 +803,19 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> { } } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> ExactSizeIterator for Iter<'a, T> {} +impl<'a, T> ExactSizeIterator for Iter<'a, T> { + fn len(&self) -> usize { self.iter.len() } +} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Iter<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] impl Iterator for IntoIter { type Item = T; fn next(&mut self) -> Option { - self.iter.next() + self.iter.next().map(|(k, _)| k) } fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() @@ -730,12 +824,16 @@ impl Iterator for IntoIter { #[stable(feature = "rust1", since = "1.0.0")] impl DoubleEndedIterator for IntoIter { fn next_back(&mut self) -> Option { - self.iter.next_back() + self.iter.next_back().map(|(k, _)| k) } } #[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for IntoIter {} +impl ExactSizeIterator for IntoIter { + fn len(&self) -> usize { self.iter.len() } +} +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for IntoIter {} impl<'a, T> Clone for Range<'a, T> { fn clone(&self) -> Range<'a, T> { @@ -746,15 +844,18 @@ impl<'a, T> Iterator for Range<'a, T> { type Item = &'a T; fn next(&mut self) -> Option<&'a T> { - self.iter.next() + self.iter.next().map(|(k, _)| k) } } impl<'a, T> DoubleEndedIterator for Range<'a, T> { fn next_back(&mut self) -> Option<&'a T> { - self.iter.next_back() + self.iter.next_back().map(|(k, _)| k) } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Range<'a, T> {} + /// Compare `x` and `y`, but return `short` if x is None and `long` if y is None fn cmp_opt(x: Option<&T>, y: Option<&T>, short: Ordering, long: Ordering) -> Ordering { match (x, y) { @@ -764,6 +865,7 @@ fn cmp_opt(x: Option<&T>, y: Option<&T>, short: Ordering, long: Ordering } } +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Difference<'a, T> { fn clone(&self) -> Difference<'a, T> { Difference { @@ -790,8 +892,18 @@ impl<'a, T: Ord> Iterator for Difference<'a, T> { } } } + + fn size_hint(&self) -> (usize, Option) { + let a_len = self.a.len(); + let b_len = self.b.len(); + (a_len.saturating_sub(b_len), Some(a_len)) + } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T: Ord> FusedIterator for Difference<'a, T> {} + +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for SymmetricDifference<'a, T> { fn clone(&self) -> SymmetricDifference<'a, T> { SymmetricDifference { @@ -816,8 +928,16 @@ impl<'a, T: Ord> Iterator for SymmetricDifference<'a, T> { } } } + + fn size_hint(&self) -> (usize, Option) { + (0, Some(self.a.len() + self.b.len())) + } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T: Ord> FusedIterator for SymmetricDifference<'a, T> {} + +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Intersection<'a, T> { fn clone(&self) -> Intersection<'a, T> { Intersection { @@ -852,8 +972,16 @@ impl<'a, T: Ord> Iterator for Intersection<'a, T> { } } } + + fn size_hint(&self) -> (usize, Option) { + (0, Some(min(self.a.len(), self.b.len()))) + } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T: Ord> FusedIterator for Intersection<'a, T> {} + +#[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Union<'a, T> { fn clone(&self) -> Union<'a, T> { Union { @@ -878,4 +1006,13 @@ impl<'a, T: Ord> Iterator for Union<'a, T> { } } } + + fn size_hint(&self) -> (usize, Option) { + let a_len = self.a.len(); + let b_len = self.b.len(); + (max(a_len, b_len), Some(a_len + b_len)) + } } + +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T: Ord> FusedIterator for Union<'a, T> {} diff --git a/src/libcollections/enum_set.rs b/src/libcollections/enum_set.rs index 8b8ccd526c90f..79e0021b148a0 100644 --- a/src/libcollections/enum_set.rs +++ b/src/libcollections/enum_set.rs @@ -16,11 +16,11 @@ #![unstable(feature = "enumset", reason = "matches collection reform specification, \ waiting for dust to settle", - issue = "0")] + issue = "37966")] use core::marker; use core::fmt; -use core::iter::FromIterator; +use core::iter::{FromIterator, FusedIterator}; use core::ops::{Sub, BitOr, BitAnd, BitXor}; // FIXME(contentions): implement union family of methods? (general design may be @@ -48,7 +48,6 @@ impl Clone for EnumSet { } } -#[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for EnumSet { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_set().entries(self).finish() @@ -81,13 +80,13 @@ pub trait CLike { fn from_usize(usize) -> Self; } -#[allow(deprecated)] fn bit(e: &E) -> usize { - use core::usize; + use core::mem; let value = e.to_usize(); - assert!(value < usize::BITS, + let bits = mem::size_of::() * 8; + assert!(value < bits, "EnumSet only supports up to {} variants.", - usize::BITS - 1); + bits - 1); 1 << value } @@ -266,6 +265,9 @@ impl Iterator for Iter { } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Iter {} + impl FromIterator for EnumSet { fn from_iter>(iter: I) -> EnumSet { let mut ret = EnumSet::new(); @@ -274,7 +276,6 @@ impl FromIterator for EnumSet { } } -#[stable(feature = "rust1", since = "1.0.0")] impl<'a, E> IntoIterator for &'a EnumSet where E: CLike { type Item = E; @@ -293,7 +294,6 @@ impl Extend for EnumSet { } } -#[stable(feature = "extend_ref", since = "1.2.0")] impl<'a, E: 'a + CLike + Copy> Extend<&'a E> for EnumSet { fn extend>(&mut self, iter: I) { self.extend(iter.into_iter().cloned()); diff --git a/src/libcollections/fmt.rs b/src/libcollections/fmt.rs index d46a3e7e89e88..883417e9f4ec7 100644 --- a/src/libcollections/fmt.rs +++ b/src/libcollections/fmt.rs @@ -8,19 +8,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Utilities for formatting and printing strings +//! Utilities for formatting and printing `String`s //! //! This module contains the runtime support for the `format!` syntax extension. //! This macro is implemented in the compiler to emit calls to this module in -//! order to format arguments at runtime into strings and streams. +//! order to format arguments at runtime into strings. //! //! # Usage //! //! The `format!` macro is intended to be familiar to those coming from C's -//! printf/fprintf functions or Python's `str.format` function. In its current -//! revision, the `format!` macro returns a `String` type which is the result of -//! the formatting. In the future it will also be able to pass in a stream to -//! format arguments directly while performing minimal allocations. +//! printf/fprintf functions or Python's `str.format` function. //! //! Some examples of the `format!` extension are: //! @@ -31,6 +28,7 @@ //! format!("{:?}", (3, 4)); // => "(3, 4)" //! format!("{value}", value=4); // => "4" //! format!("{} {}", 1, 2); // => "1 2" +//! format!("{:04}", 42); // => "0042" with leading zeros //! ``` //! //! From these, you can see that the first argument is a format string. It is @@ -81,7 +79,7 @@ //! //! ``` //! format!("{argument}", argument = "test"); // => "test" -//! format!("{name} {}", 1, name = 2); // => "2 1" +//! format!("{name} {}", 1, name = 2); // => "2 1" //! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b" //! ``` //! @@ -104,8 +102,8 @@ //! octal. //! //! There are various parameters which do require a particular type, however. -//! Namely, the `{:.*}` syntax, which sets the number of numbers after the -//! decimal in floating-point types: +//! An example is the `{:.*}` syntax, which sets the number of decimal places +//! in floating-point types: //! //! ``` //! let formatted_number = format!("{:.*}", 2, 1.234567); @@ -167,9 +165,15 @@ //! provides some helper methods. //! //! Additionally, the return value of this function is `fmt::Result` which is a -//! typedef to `Result<(), std::io::Error>` (also known as `std::io::Result<()>`). -//! Formatting implementations should ensure that they return errors from `write!` -//! correctly (propagating errors upward). +//! type alias of `Result<(), std::fmt::Error>`. Formatting implementations +//! should ensure that they propagate errors from the `Formatter` (e.g., when +//! calling `write!`) however, they should never return errors spuriously. That +//! is, a formatting implementation must and may only return an error if the +//! passed-in `Formatter` returns an error. This is because, contrary to what +//! the function signature might suggest, string formatting is an infallible +//! operation. This function only returns a result because writing to the +//! underlying stream might fail and it must provide a way to propagate the fact +//! that an error has occurred back up the stack. //! //! An example of implementing the formatting traits would look //! like: @@ -257,8 +261,8 @@ //! This and `writeln` are two macros which are used to emit the format string //! to a specified stream. This is used to prevent intermediate allocations of //! format strings and instead directly write the output. Under the hood, this -//! function is actually invoking the `write` function defined in this module. -//! Example usage is: +//! function is actually invoking the `write_fmt` function defined on the +//! `std::io::Write` trait. Example usage is: //! //! ``` //! # #![allow(unused_must_use)] @@ -292,15 +296,13 @@ //! use std::fmt; //! use std::io::{self, Write}; //! -//! fmt::format(format_args!("this returns {}", "String")); -//! //! let mut some_writer = io::stdout(); //! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro")); //! //! fn my_fmt_fn(args: fmt::Arguments) { //! write!(&mut io::stdout(), "{}", args); //! } -//! my_fmt_fn(format_args!("or a {} too", "function")); +//! my_fmt_fn(format_args!(", or a {} too", "function")); //! ``` //! //! The result of the `format_args!` macro is a value of type `fmt::Arguments`. @@ -316,7 +318,7 @@ //! # Syntax //! //! The syntax for the formatting language used is drawn from other languages, -//! so it should not be too alien. Arguments are formatted with python-like +//! so it should not be too alien. Arguments are formatted with Python-like //! syntax, meaning that arguments are surrounded by `{}` instead of the C-like //! `%`. The actual grammar for the formatting syntax is: //! @@ -325,7 +327,7 @@ //! format := '{' [ argument ] [ ':' format_spec ] '}' //! argument := integer | identifier //! -//! format_spec := [[fill]align][sign]['#'][0][width]['.' precision][type] +//! format_spec := [[fill]align][sign]['#']['0'][width]['.' precision][type] //! fill := character //! align := '<' | '^' | '>' //! sign := '+' | '-' @@ -333,7 +335,7 @@ //! precision := count | '*' //! type := identifier | '' //! count := parameter | integer -//! parameter := integer '$' +//! parameter := argument '$' //! ``` //! //! # Formatting Parameters @@ -348,8 +350,8 @@ //! The fill character is provided normally in conjunction with the `width` //! parameter. This indicates that if the value being formatted is smaller than //! `width` some extra characters will be printed around it. The extra -//! characters are specified by `fill`, and the alignment can be one of two -//! options: +//! characters are specified by `fill`, and the alignment can be one of the +//! following options: //! //! * `<` - the argument is left-aligned in `width` columns //! * `^` - the argument is center-aligned in `width` columns @@ -395,14 +397,26 @@ //! `0`. //! //! The value for the width can also be provided as a `usize` in the list of -//! parameters by using the `2$` syntax indicating that the second argument is a -//! `usize` specifying the width. +//! parameters by using the dollar syntax indicating that the second argument is +//! a `usize` specifying the width, for example: +//! +//! ``` +//! // All of these print "Hello x !" +//! println!("Hello {:5}!", "x"); +//! println!("Hello {:1$}!", "x", 5); +//! println!("Hello {1:0$}!", 5, "x"); +//! println!("Hello {:width$}!", "x", width = 5); +//! ``` +//! +//! Referring to an argument with the dollar syntax does not affect the "next +//! argument" counter, so it's usually a good idea to refer to arguments by +//! position, or use named arguments. //! //! ## Precision //! //! For non-numeric types, this can be considered a "maximum width". If the resulting string is -//! longer than this width, then it is truncated down to this many characters and only those are -//! emitted. +//! longer than this width, then it is truncated down to this many characters and that truncated +//! value is emitted with proper `fill`, `alignment` and `width` if those parameters are set. //! //! For integral types, this is ignored. //! @@ -415,7 +429,7 @@ //! //! the integer `N` itself is the precision. //! -//! 2. An integer followed by dollar sign `.N$`: +//! 2. An integer or name followed by dollar sign `.N$`: //! //! use format *argument* `N` (which must be a `usize`) as the precision. //! @@ -426,31 +440,29 @@ //! in this case, if one uses the format string `{:.*}`, then the `` part refers //! to the *value* to print, and the `precision` must come in the input preceding ``. //! -//! For example, these: +//! For example, the following calls all print the same thing `Hello x is 0.01000`: //! //! ``` -//! // Hello {arg 0 (x)} is {arg 1 (0.01} with precision specified inline (5)} +//! // Hello {arg 0 ("x")} is {arg 1 (0.01) with precision specified inline (5)} //! println!("Hello {0} is {1:.5}", "x", 0.01); //! -//! // Hello {arg 1 (x)} is {arg 2 (0.01} with precision specified in arg 0 (5)} +//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision specified in arg 0 (5)} //! println!("Hello {1} is {2:.0$}", 5, "x", 0.01); //! -//! // Hello {arg 0 (x)} is {arg 2 (0.01} with precision specified in arg 1 (5)} +//! // Hello {arg 0 ("x")} is {arg 2 (0.01) with precision specified in arg 1 (5)} //! println!("Hello {0} is {2:.1$}", "x", 5, 0.01); //! -//! // Hello {next arg (x)} is {second of next two args (0.01} with precision +//! // Hello {next arg ("x")} is {second of next two args (0.01) with precision //! // specified in first of next two args (5)} //! println!("Hello {} is {:.*}", "x", 5, 0.01); //! -//! // Hello {next arg (x)} is {arg 2 (0.01} with precision +//! // Hello {next arg ("x")} is {arg 2 (0.01) with precision //! // specified in its predecessor (5)} //! println!("Hello {} is {2:.*}", "x", 5, 0.01); -//! ``` -//! -//! All print the same thing: //! -//! ```text -//! Hello x is 0.01000 +//! // Hello {next arg ("x")} is {arg "number" (0.01) with precision specified +//! // in arg "prec" (5)} +//! println!("Hello {} is {number:.prec$}", "x", prec = 5, number = 0.01); //! ``` //! //! While these: @@ -458,6 +470,7 @@ //! ``` //! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56); //! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56"); +//! println!("{}, `{name:>8.*}` has 3 right-aligned characters", "Hello", 3, name="1234.56"); //! ``` //! //! print two significantly different things: @@ -465,6 +478,7 @@ //! ```text //! Hello, `1234.560` has 3 fractional digits //! Hello, `123` has 3 characters +//! Hello, ` 123` has 3 right-aligned characters //! ``` //! //! # Escaping @@ -491,10 +505,6 @@ pub use core::fmt::{LowerExp, UpperExp}; pub use core::fmt::Error; #[stable(feature = "rust1", since = "1.0.0")] pub use core::fmt::{ArgumentV1, Arguments, write}; -#[unstable(feature = "fmt_radix", issue = "27728")] -#[rustc_deprecated(since = "1.7.0", reason = "not used enough to stabilize")] -#[allow(deprecated)] -pub use core::fmt::{radix, Radix, RadixFmt}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple}; @@ -509,12 +519,24 @@ use string; /// /// # Examples /// +/// Basic usage: +/// /// ``` /// use std::fmt; /// /// let s = fmt::format(format_args!("Hello, {}!", "world")); -/// assert_eq!(s, "Hello, world!".to_string()); +/// assert_eq!(s, "Hello, world!"); /// ``` +/// +/// Please note that using [`format!`][format!] might be preferrable. +/// Example: +/// +/// ``` +/// let s = format!("Hello, {}!", "world"); +/// assert_eq!(s, "Hello, world!"); +/// ``` +/// +/// [format!]: ../macro.format.html #[stable(feature = "rust1", since = "1.0.0")] pub fn format(args: Arguments) -> string::String { let mut output = string::String::new(); diff --git a/src/libcollections/lib.rs b/src/libcollections/lib.rs index 6077a4c01045b..08288b4de8bde 100644 --- a/src/libcollections/lib.rs +++ b/src/libcollections/lib.rs @@ -26,36 +26,36 @@ issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/", test(no_crate_inject, attr(allow(unused_variables), deny(warnings))))] -#![allow(trivial_casts)] #![cfg_attr(test, allow(deprecated))] // rand +#![cfg_attr(not(stage0), deny(warnings))] #![feature(alloc)] +#![feature(allow_internal_unstable)] #![feature(box_patterns)] #![feature(box_syntax)] +#![cfg_attr(not(test), feature(char_escape_debug))] #![feature(core_intrinsics)] -#![feature(decode_utf16)] -#![feature(drop_in_place)] #![feature(dropck_parametricity)] +#![feature(exact_size_is_empty)] #![feature(fmt_internals)] -#![feature(fmt_radix)] +#![feature(fused)] #![feature(heap_api)] -#![feature(iter_arith)] -#![feature(iter_arith)] +#![feature(inclusive_range)] #![feature(lang_items)] #![feature(nonzero)] -#![feature(num_bits_bytes)] #![feature(pattern)] +#![feature(placement_in)] +#![feature(placement_new_protocol)] #![feature(shared)] -#![feature(slice_bytes)] #![feature(slice_patterns)] +#![feature(specialization)] #![feature(staged_api)] #![feature(step_by)] -#![feature(str_char)] -#![feature(unboxed_closures)] +#![feature(trusted_len)] #![feature(unicode)] #![feature(unique)] -#![feature(unsafe_no_drop_flag)] -#![cfg_attr(test, feature(clone_from_slice, rand, test))] +#![feature(slice_get_slice)] +#![cfg_attr(test, feature(rand, test))] #![no_std] @@ -68,13 +68,21 @@ extern crate std; #[cfg(test)] extern crate test; +#[doc(no_inline)] pub use binary_heap::BinaryHeap; +#[doc(no_inline)] pub use btree_map::BTreeMap; +#[doc(no_inline)] pub use btree_set::BTreeSet; +#[doc(no_inline)] pub use linked_list::LinkedList; +#[doc(no_inline)] pub use enum_set::EnumSet; +#[doc(no_inline)] pub use vec_deque::VecDeque; +#[doc(no_inline)] pub use string::String; +#[doc(no_inline)] pub use vec::Vec; // Needed for the vec! macro @@ -98,12 +106,14 @@ pub mod vec_deque; #[stable(feature = "rust1", since = "1.0.0")] pub mod btree_map { + //! A map based on a B-Tree. #[stable(feature = "rust1", since = "1.0.0")] pub use btree::map::*; } #[stable(feature = "rust1", since = "1.0.0")] pub mod btree_set { + //! A set based on a B-Tree. #[stable(feature = "rust1", since = "1.0.0")] pub use btree::set::*; } @@ -124,3 +134,10 @@ pub enum Bound { /// An infinite endpoint. Indicates that there is no bound in this direction. Unbounded, } + +/// An intermediate trait for specialization of `Extend`. +#[doc(hidden)] +trait SpecExtend { + /// Extends `self` with the contents of the given iterator. + fn spec_extend(&mut self, iter: I); +} diff --git a/src/libcollections/linked_list.rs b/src/libcollections/linked_list.rs index 1bd5a83d43708..67f3708a62b91 100644 --- a/src/libcollections/linked_list.rs +++ b/src/libcollections/linked_list.rs @@ -13,241 +13,182 @@ //! The `LinkedList` allows pushing and popping elements at either end and is thus //! efficiently usable as a double-ended queue. -// LinkedList is constructed like a singly-linked list over the field `next`. -// including the last link being None; each Node owns its `next` field. -// -// Backlinks over LinkedList::prev are raw pointers that form a full chain in -// the reverse direction. - #![stable(feature = "rust1", since = "1.0.0")] -use alloc::boxed::Box; +use alloc::boxed::{Box, IntermediateBox}; use core::cmp::Ordering; use core::fmt; use core::hash::{Hasher, Hash}; -use core::iter::FromIterator; +use core::iter::{FromIterator, FusedIterator}; +use core::marker::PhantomData; use core::mem; -use core::ptr::Shared; +use core::ops::{BoxPlace, InPlace, Place, Placer}; +use core::ptr::{self, Shared}; + +use super::SpecExtend; /// A doubly-linked list. #[stable(feature = "rust1", since = "1.0.0")] pub struct LinkedList { - length: usize, - list_head: Link, - list_tail: Rawlink>, -} - -type Link = Option>>; - -struct Rawlink { - p: Option>, + head: Option>>, + tail: Option>>, + len: usize, + marker: PhantomData>>, } -impl Copy for Rawlink {} -unsafe impl Send for Rawlink {} -unsafe impl Sync for Rawlink {} - struct Node { - next: Link, - prev: Rawlink>, - value: T, + next: Option>>, + prev: Option>>, + element: T, } -/// An iterator over references to the items of a `LinkedList`. +/// An iterator over references to the elements of a `LinkedList`. #[stable(feature = "rust1", since = "1.0.0")] pub struct Iter<'a, T: 'a> { - head: &'a Link, - tail: Rawlink>, - nelem: usize, + head: Option>>, + tail: Option>>, + len: usize, + marker: PhantomData<&'a Node>, } // FIXME #19839: deriving is too aggressive on the bounds (T doesn't need to be Clone). #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Iter<'a, T> { - fn clone(&self) -> Iter<'a, T> { - Iter { - head: self.head.clone(), - tail: self.tail, - nelem: self.nelem, - } + fn clone(&self) -> Self { + Iter { ..*self } } } -/// An iterator over mutable references to the items of a `LinkedList`. +/// An iterator over mutable references to the elements of a `LinkedList`. #[stable(feature = "rust1", since = "1.0.0")] pub struct IterMut<'a, T: 'a> { list: &'a mut LinkedList, - head: Rawlink>, - tail: Rawlink>, - nelem: usize, + head: Option>>, + tail: Option>>, + len: usize, } -/// An iterator over mutable references to the items of a `LinkedList`. +/// An iterator over the elements of a `LinkedList`. #[derive(Clone)] #[stable(feature = "rust1", since = "1.0.0")] pub struct IntoIter { list: LinkedList, } -/// Rawlink is a type like Option but for holding a raw pointer -impl Rawlink { - /// Like Option::None for Rawlink - fn none() -> Rawlink { - Rawlink { p: None } - } - - /// Like Option::Some for Rawlink - fn some(n: &mut T) -> Rawlink { - unsafe { Rawlink { p: Some(Shared::new(n)) } } - } - - /// Convert the `Rawlink` into an Option value - /// - /// **unsafe** because: - /// - /// - Dereference of raw pointer. - /// - Returns reference of arbitrary lifetime. - unsafe fn resolve<'a>(&self) -> Option<&'a T> { - self.p.map(|p| &**p) - } - - /// Convert the `Rawlink` into an Option value - /// - /// **unsafe** because: - /// - /// - Dereference of raw pointer. - /// - Returns reference of arbitrary lifetime. - unsafe fn resolve_mut<'a>(&mut self) -> Option<&'a mut T> { - self.p.map(|p| &mut **p) - } - - /// Return the `Rawlink` and replace with `Rawlink::none()` - fn take(&mut self) -> Rawlink { - mem::replace(self, Rawlink::none()) - } -} - -impl<'a, T> From<&'a mut Link> for Rawlink> { - fn from(node: &'a mut Link) -> Self { - match node.as_mut() { - None => Rawlink::none(), - Some(ptr) => Rawlink::some(ptr), - } - } -} - -impl Clone for Rawlink { - #[inline] - fn clone(&self) -> Rawlink { - Rawlink { p: self.p } - } -} - impl Node { - fn new(v: T) -> Node { + fn new(element: T) -> Self { Node { - value: v, next: None, - prev: Rawlink::none(), + prev: None, + element: element, } } - /// Update the `prev` link on `next`, then set self's next pointer. - /// - /// `self.next` should be `None` when you call this - /// (otherwise a Node is probably being dropped by mistake). - fn set_next(&mut self, mut next: Box>) { - debug_assert!(self.next.is_none()); - next.prev = Rawlink::some(self); - self.next = Some(next); + fn into_element(self: Box) -> T { + self.element } } -/// Clear the .prev field on `next`, then return `Some(next)` -fn link_no_prev(mut next: Box>) -> Link { - next.prev = Rawlink::none(); - Some(next) -} - // private methods impl LinkedList { - /// Add a Node first in the list + /// Adds the given node to the front of the list. #[inline] - fn push_front_node(&mut self, mut new_head: Box>) { - match self.list_head { - None => { - self.list_head = link_no_prev(new_head); - self.list_tail = Rawlink::from(&mut self.list_head); - } - Some(ref mut head) => { - new_head.prev = Rawlink::none(); - head.prev = Rawlink::some(&mut *new_head); - mem::swap(head, &mut new_head); - head.next = Some(new_head); + fn push_front_node(&mut self, mut node: Box>) { + unsafe { + node.next = self.head; + node.prev = None; + let node = Some(Shared::new(Box::into_raw(node))); + + match self.head { + None => self.tail = node, + Some(head) => (**head).prev = node, } + + self.head = node; + self.len += 1; } - self.length += 1; } - /// Remove the first Node and return it, or None if the list is empty + /// Removes and returns the node at the front of the list. #[inline] fn pop_front_node(&mut self) -> Option>> { - self.list_head.take().map(|mut front_node| { - self.length -= 1; - match front_node.next.take() { - Some(node) => self.list_head = link_no_prev(node), - None => self.list_tail = Rawlink::none(), + self.head.map(|node| unsafe { + let node = Box::from_raw(*node); + self.head = node.next; + + match self.head { + None => self.tail = None, + Some(head) => (**head).prev = None, } - front_node + + self.len -= 1; + node }) } - /// Add a Node last in the list + /// Adds the given node to the back of the list. #[inline] - fn push_back_node(&mut self, new_tail: Box>) { - match unsafe { self.list_tail.resolve_mut() } { - None => return self.push_front_node(new_tail), - Some(tail) => { - tail.set_next(new_tail); - self.list_tail = Rawlink::from(&mut tail.next); + fn push_back_node(&mut self, mut node: Box>) { + unsafe { + node.next = None; + node.prev = self.tail; + let node = Some(Shared::new(Box::into_raw(node))); + + match self.tail { + None => self.head = node, + Some(tail) => (**tail).next = node, } + + self.tail = node; + self.len += 1; } - self.length += 1; } - /// Remove the last Node and return it, or None if the list is empty + /// Removes and returns the node at the back of the list. #[inline] fn pop_back_node(&mut self) -> Option>> { - unsafe { - self.list_tail.resolve_mut().and_then(|tail| { - self.length -= 1; - self.list_tail = tail.prev; - match tail.prev.resolve_mut() { - None => self.list_head.take(), - Some(tail_prev) => tail_prev.next.take(), - } - }) - } + self.tail.map(|node| unsafe { + let node = Box::from_raw(*node); + self.tail = node.prev; + + match self.tail { + None => self.head = None, + Some(tail) => (**tail).next = None, + } + + self.len -= 1; + node + }) } } #[stable(feature = "rust1", since = "1.0.0")] impl Default for LinkedList { + /// Creates an empty `LinkedList`. #[inline] - fn default() -> LinkedList { - LinkedList::new() + fn default() -> Self { + Self::new() } } impl LinkedList { /// Creates an empty `LinkedList`. + /// + /// # Examples + /// + /// ``` + /// use std::collections::LinkedList; + /// + /// let list: LinkedList = LinkedList::new(); + /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn new() -> LinkedList { + pub fn new() -> Self { LinkedList { - list_head: None, - list_tail: Rawlink::none(), - length: 0, + head: None, + tail: None, + len: 0, + marker: PhantomData, } } @@ -263,64 +204,99 @@ impl LinkedList { /// ``` /// use std::collections::LinkedList; /// - /// let mut a = LinkedList::new(); - /// let mut b = LinkedList::new(); - /// a.push_back(1); - /// a.push_back(2); - /// b.push_back(3); - /// b.push_back(4); + /// let mut list1 = LinkedList::new(); + /// list1.push_back('a'); /// - /// a.append(&mut b); + /// let mut list2 = LinkedList::new(); + /// list2.push_back('b'); + /// list2.push_back('c'); /// - /// for e in &a { - /// println!("{}", e); // prints 1, then 2, then 3, then 4 - /// } - /// println!("{}", b.len()); // prints 0 + /// list1.append(&mut list2); + /// + /// let mut iter = list1.iter(); + /// assert_eq!(iter.next(), Some(&'a')); + /// assert_eq!(iter.next(), Some(&'b')); + /// assert_eq!(iter.next(), Some(&'c')); + /// assert!(iter.next().is_none()); + /// + /// assert!(list2.is_empty()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn append(&mut self, other: &mut LinkedList) { - match unsafe { self.list_tail.resolve_mut() } { - None => { - self.length = other.length; - self.list_head = other.list_head.take(); - self.list_tail = other.list_tail.take(); - } - Some(tail) => { - // Carefully empty `other`. - let o_tail = other.list_tail.take(); - let o_length = other.length; - match other.list_head.take() { - None => return, - Some(node) => { - tail.set_next(node); - self.list_tail = o_tail; - self.length += o_length; - } + pub fn append(&mut self, other: &mut Self) { + match self.tail { + None => mem::swap(self, other), + Some(tail) => if let Some(other_head) = other.head.take() { + unsafe { + (**tail).next = Some(other_head); + (**other_head).prev = Some(tail); } - } + + self.tail = other.tail.take(); + self.len += mem::replace(&mut other.len, 0); + }, } - other.length = 0; } /// Provides a forward iterator. + /// + /// # Examples + /// + /// ``` + /// use std::collections::LinkedList; + /// + /// let mut list: LinkedList = LinkedList::new(); + /// + /// list.push_back(0); + /// list.push_back(1); + /// list.push_back(2); + /// + /// let mut iter = list.iter(); + /// assert_eq!(iter.next(), Some(&0)); + /// assert_eq!(iter.next(), Some(&1)); + /// assert_eq!(iter.next(), Some(&2)); + /// assert_eq!(iter.next(), None); + /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn iter(&self) -> Iter { Iter { - nelem: self.len(), - head: &self.list_head, - tail: self.list_tail, + head: self.head, + tail: self.tail, + len: self.len, + marker: PhantomData, } } /// Provides a forward iterator with mutable references. + /// + /// # Examples + /// + /// ``` + /// use std::collections::LinkedList; + /// + /// let mut list: LinkedList = LinkedList::new(); + /// + /// list.push_back(0); + /// list.push_back(1); + /// list.push_back(2); + /// + /// for element in list.iter_mut() { + /// *element += 10; + /// } + /// + /// let mut iter = list.iter(); + /// assert_eq!(iter.next(), Some(&10)); + /// assert_eq!(iter.next(), Some(&11)); + /// assert_eq!(iter.next(), Some(&12)); + /// assert_eq!(iter.next(), None); + /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn iter_mut(&mut self) -> IterMut { IterMut { - nelem: self.len(), - head: Rawlink::from(&mut self.list_head), - tail: self.list_tail, + head: self.head, + tail: self.tail, + len: self.len, list: self, } } @@ -343,7 +319,7 @@ impl LinkedList { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn is_empty(&self) -> bool { - self.list_head.is_none() + self.head.is_none() } /// Returns the length of the `LinkedList`. @@ -365,12 +341,11 @@ impl LinkedList { /// /// dl.push_back(3); /// assert_eq!(dl.len(), 3); - /// /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn len(&self) -> usize { - self.length + self.len } /// Removes all elements from the `LinkedList`. @@ -392,12 +367,35 @@ impl LinkedList { /// dl.clear(); /// assert_eq!(dl.len(), 0); /// assert_eq!(dl.front(), None); - /// /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn clear(&mut self) { - *self = LinkedList::new() + *self = Self::new(); + } + + /// Returns `true` if the `LinkedList` contains an element equal to the + /// given value. + /// + /// # Examples + /// + /// ``` + /// use std::collections::LinkedList; + /// + /// let mut list: LinkedList = LinkedList::new(); + /// + /// list.push_back(0); + /// list.push_back(1); + /// list.push_back(2); + /// + /// assert_eq!(list.contains(&0), true); + /// assert_eq!(list.contains(&10), false); + /// ``` + #[stable(feature = "linked_list_contains", since = "1.12.0")] + pub fn contains(&self, x: &T) -> bool + where T: PartialEq + { + self.iter().any(|e| e == x) } /// Provides a reference to the front element, or `None` if the list is @@ -413,12 +411,11 @@ impl LinkedList { /// /// dl.push_front(1); /// assert_eq!(dl.front(), Some(&1)); - /// /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn front(&self) -> Option<&T> { - self.list_head.as_ref().map(|head| &head.value) + self.head.map(|node| unsafe { &(**node).element }) } /// Provides a mutable reference to the front element, or `None` if the list @@ -440,12 +437,11 @@ impl LinkedList { /// Some(x) => *x = 5, /// } /// assert_eq!(dl.front(), Some(&5)); - /// /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn front_mut(&mut self) -> Option<&mut T> { - self.list_head.as_mut().map(|head| &mut head.value) + self.head.map(|node| unsafe { &mut (**node).element }) } /// Provides a reference to the back element, or `None` if the list is @@ -461,12 +457,11 @@ impl LinkedList { /// /// dl.push_back(1); /// assert_eq!(dl.back(), Some(&1)); - /// /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn back(&self) -> Option<&T> { - unsafe { self.list_tail.resolve().map(|tail| &tail.value) } + self.tail.map(|node| unsafe { &(**node).element }) } /// Provides a mutable reference to the back element, or `None` if the list @@ -488,12 +483,11 @@ impl LinkedList { /// Some(x) => *x = 5, /// } /// assert_eq!(dl.back(), Some(&5)); - /// /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn back_mut(&mut self) -> Option<&mut T> { - unsafe { self.list_tail.resolve_mut().map(|tail| &mut tail.value) } + self.tail.map(|node| unsafe { &mut (**node).element }) } /// Adds an element first in the list. @@ -512,11 +506,10 @@ impl LinkedList { /// /// dl.push_front(1); /// assert_eq!(dl.front().unwrap(), &1); - /// /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn push_front(&mut self, elt: T) { - self.push_front_node(box Node::new(elt)) + self.push_front_node(box Node::new(elt)); } /// Removes the first element and returns it, or `None` if the list is @@ -537,12 +530,10 @@ impl LinkedList { /// assert_eq!(d.pop_front(), Some(3)); /// assert_eq!(d.pop_front(), Some(1)); /// assert_eq!(d.pop_front(), None); - /// /// ``` - /// #[stable(feature = "rust1", since = "1.0.0")] pub fn pop_front(&mut self) -> Option { - self.pop_front_node().map(|box Node { value, .. }| value) + self.pop_front_node().map(Node::into_element) } /// Appends an element to the back of a list @@ -559,7 +550,7 @@ impl LinkedList { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn push_back(&mut self, elt: T) { - self.push_back_node(box Node::new(elt)) + self.push_back_node(box Node::new(elt)); } /// Removes the last element from a list and returns it, or `None` if @@ -578,7 +569,7 @@ impl LinkedList { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn pop_back(&mut self) -> Option { - self.pop_back_node().map(|box Node { value, .. }| value) + self.pop_back_node().map(Node::into_element) } /// Splits the list into two at the given index. Returns everything after the given index, @@ -611,14 +602,14 @@ impl LinkedList { let len = self.len(); assert!(at <= len, "Cannot split off at a nonexistent index"); if at == 0 { - return mem::replace(self, LinkedList::new()); + return mem::replace(self, Self::new()); } else if at == len { - return LinkedList::new(); + return Self::new(); } // Below, we iterate towards the `i-1`th node, either from the start or the end, // depending on which would be faster. - let mut split_node = if at - 1 <= len - 1 - (at - 1) { + let split_node = if at - 1 <= len - 1 - (at - 1) { let mut iter = self.iter_mut(); // instead of skipping using .skip() (which creates a new struct), // we skip manually so we can access the head field without @@ -638,157 +629,184 @@ impl LinkedList { // The split node is the new tail node of the first part and owns // the head of the second part. - let mut second_part_head; + let second_part_head; unsafe { - second_part_head = split_node.resolve_mut().unwrap().next.take(); - match second_part_head { - None => {} - Some(ref mut head) => head.prev = Rawlink::none(), + second_part_head = (**split_node.unwrap()).next.take(); + if let Some(head) = second_part_head { + (**head).prev = None; } } let second_part = LinkedList { - list_head: second_part_head, - list_tail: self.list_tail, - length: len - at, + head: second_part_head, + tail: self.tail, + len: len - at, + marker: PhantomData, }; // Fix the tail ptr of the first part - self.list_tail = split_node; - self.length = at; + self.tail = split_node; + self.len = at; second_part } + + /// Returns a place for insertion at the front of the list. + /// + /// Using this method with placement syntax is equivalent to [`push_front`] + /// (#method.push_front), but may be more efficient. + /// + /// # Examples + /// + /// ``` + /// #![feature(collection_placement)] + /// #![feature(placement_in_syntax)] + /// + /// use std::collections::LinkedList; + /// + /// let mut list = LinkedList::new(); + /// list.front_place() <- 2; + /// list.front_place() <- 4; + /// assert!(list.iter().eq(&[4, 2])); + /// ``` + #[unstable(feature = "collection_placement", + reason = "method name and placement protocol are subject to change", + issue = "30172")] + pub fn front_place(&mut self) -> FrontPlace { + FrontPlace { list: self, node: IntermediateBox::make_place() } + } + + /// Returns a place for insertion at the back of the list. + /// + /// Using this method with placement syntax is equivalent to [`push_back`](#method.push_back), + /// but may be more efficient. + /// + /// # Examples + /// + /// ``` + /// #![feature(collection_placement)] + /// #![feature(placement_in_syntax)] + /// + /// use std::collections::LinkedList; + /// + /// let mut list = LinkedList::new(); + /// list.back_place() <- 2; + /// list.back_place() <- 4; + /// assert!(list.iter().eq(&[2, 4])); + /// ``` + #[unstable(feature = "collection_placement", + reason = "method name and placement protocol are subject to change", + issue = "30172")] + pub fn back_place(&mut self) -> BackPlace { + BackPlace { list: self, node: IntermediateBox::make_place() } + } } #[stable(feature = "rust1", since = "1.0.0")] impl Drop for LinkedList { #[unsafe_destructor_blind_to_params] fn drop(&mut self) { - // Dissolve the linked_list in a loop. - // Just dropping the list_head can lead to stack exhaustion - // when length is >> 1_000_000 - while let Some(mut head_) = self.list_head.take() { - self.list_head = head_.next.take(); - } - self.length = 0; - self.list_tail = Rawlink::none(); + while let Some(_) = self.pop_front_node() {} } } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, A> Iterator for Iter<'a, A> { - type Item = &'a A; +impl<'a, T> Iterator for Iter<'a, T> { + type Item = &'a T; #[inline] - fn next(&mut self) -> Option<&'a A> { - if self.nelem == 0 { - return None; + fn next(&mut self) -> Option<&'a T> { + if self.len == 0 { + None + } else { + self.head.map(|node| unsafe { + let node = &**node; + self.len -= 1; + self.head = node.next; + &node.element + }) } - self.head.as_ref().map(|head| { - self.nelem -= 1; - self.head = &head.next; - &head.value - }) } #[inline] fn size_hint(&self) -> (usize, Option) { - (self.nelem, Some(self.nelem)) + (self.len, Some(self.len)) } } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, A> DoubleEndedIterator for Iter<'a, A> { +impl<'a, T> DoubleEndedIterator for Iter<'a, T> { #[inline] - fn next_back(&mut self) -> Option<&'a A> { - if self.nelem == 0 { - return None; - } - unsafe { - self.tail.resolve().map(|prev| { - self.nelem -= 1; - self.tail = prev.prev; - &prev.value + fn next_back(&mut self) -> Option<&'a T> { + if self.len == 0 { + None + } else { + self.tail.map(|node| unsafe { + let node = &**node; + self.len -= 1; + self.tail = node.prev; + &node.element }) } } } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, A> ExactSizeIterator for Iter<'a, A> {} +impl<'a, T> ExactSizeIterator for Iter<'a, T> {} + +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Iter<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, A> Iterator for IterMut<'a, A> { - type Item = &'a mut A; +impl<'a, T> Iterator for IterMut<'a, T> { + type Item = &'a mut T; + #[inline] - fn next(&mut self) -> Option<&'a mut A> { - if self.nelem == 0 { - return None; - } - unsafe { - self.head.resolve_mut().map(|next| { - self.nelem -= 1; - self.head = Rawlink::from(&mut next.next); - &mut next.value + fn next(&mut self) -> Option<&'a mut T> { + if self.len == 0 { + None + } else { + self.head.map(|node| unsafe { + let node = &mut **node; + self.len -= 1; + self.head = node.next; + &mut node.element }) } } #[inline] fn size_hint(&self) -> (usize, Option) { - (self.nelem, Some(self.nelem)) + (self.len, Some(self.len)) } } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, A> DoubleEndedIterator for IterMut<'a, A> { +impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { #[inline] - fn next_back(&mut self) -> Option<&'a mut A> { - if self.nelem == 0 { - return None; - } - unsafe { - self.tail.resolve_mut().map(|prev| { - self.nelem -= 1; - self.tail = prev.prev; - &mut prev.value + fn next_back(&mut self) -> Option<&'a mut T> { + if self.len == 0 { + None + } else { + self.tail.map(|node| unsafe { + let node = &mut **node; + self.len -= 1; + self.tail = node.prev; + &mut node.element }) } } } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, A> ExactSizeIterator for IterMut<'a, A> {} +impl<'a, T> ExactSizeIterator for IterMut<'a, T> {} -// private methods for IterMut -impl<'a, A> IterMut<'a, A> { - fn insert_next_node(&mut self, mut ins_node: Box>) { - // Insert before `self.head` so that it is between the - // previously yielded element and self.head. - // - // The inserted node will not appear in further iteration. - match unsafe { self.head.resolve_mut() } { - None => { - self.list.push_back_node(ins_node); - } - Some(node) => { - let prev_node = match unsafe { node.prev.resolve_mut() } { - None => return self.list.push_front_node(ins_node), - Some(prev) => prev, - }; - let node_own = prev_node.next.take().unwrap(); - ins_node.set_next(node_own); - prev_node.set_next(ins_node); - self.list.length += 1; - } - } - } -} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for IterMut<'a, T> {} -impl<'a, A> IterMut<'a, A> { - /// Inserts `elt` just after the element most recently returned by `.next()`. +impl<'a, T> IterMut<'a, T> { + /// Inserts the given element just after the element most recently returned by `.next()`. /// The inserted element does not appear in the iteration. /// /// # Examples @@ -815,8 +833,27 @@ impl<'a, A> IterMut<'a, A> { #[unstable(feature = "linked_list_extras", reason = "this is probably better handled by a cursor type -- we'll see", issue = "27794")] - pub fn insert_next(&mut self, elt: A) { - self.insert_next_node(box Node::new(elt)) + pub fn insert_next(&mut self, element: T) { + match self.head { + None => self.list.push_back(element), + Some(head) => unsafe { + let prev = match (**head).prev { + None => return self.list.push_front(element), + Some(prev) => prev, + }; + + let node = Some(Shared::new(Box::into_raw(box Node { + next: Some(head), + prev: Some(prev), + element: element, + }))); + + (**prev).next = node; + (**head).prev = node; + + self.list.len += 1; + } + } } /// Provides a reference to the next element, without changing the iterator. @@ -840,46 +877,50 @@ impl<'a, A> IterMut<'a, A> { #[unstable(feature = "linked_list_extras", reason = "this is probably better handled by a cursor type -- we'll see", issue = "27794")] - pub fn peek_next(&mut self) -> Option<&mut A> { - if self.nelem == 0 { - return None; + pub fn peek_next(&mut self) -> Option<&mut T> { + if self.len == 0 { + None + } else { + self.head.map(|node| unsafe { &mut (**node).element }) } - unsafe { self.head.resolve_mut().map(|head| &mut head.value) } } } #[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for IntoIter { - type Item = A; +impl Iterator for IntoIter { + type Item = T; #[inline] - fn next(&mut self) -> Option { + fn next(&mut self) -> Option { self.list.pop_front() } #[inline] fn size_hint(&self) -> (usize, Option) { - (self.list.length, Some(self.list.length)) + (self.list.len, Some(self.list.len)) } } #[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for IntoIter { +impl DoubleEndedIterator for IntoIter { #[inline] - fn next_back(&mut self) -> Option { + fn next_back(&mut self) -> Option { self.list.pop_back() } } #[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for IntoIter {} +impl ExactSizeIterator for IntoIter {} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for IntoIter {} #[stable(feature = "rust1", since = "1.0.0")] -impl FromIterator for LinkedList { - fn from_iter>(iter: T) -> LinkedList { - let mut ret = LinkedList::new(); - ret.extend(iter); - ret +impl FromIterator for LinkedList { + fn from_iter>(iter: I) -> Self { + let mut list = Self::new(); + list.extend(iter); + list } } @@ -910,20 +951,32 @@ impl<'a, T> IntoIterator for &'a mut LinkedList { type Item = &'a mut T; type IntoIter = IterMut<'a, T>; - fn into_iter(mut self) -> IterMut<'a, T> { + fn into_iter(self) -> IterMut<'a, T> { self.iter_mut() } } #[stable(feature = "rust1", since = "1.0.0")] -impl Extend for LinkedList { - fn extend>(&mut self, iter: T) { +impl Extend for LinkedList { + fn extend>(&mut self, iter: I) { + >::spec_extend(self, iter); + } +} + +impl SpecExtend for LinkedList { + default fn spec_extend(&mut self, iter: I) { for elt in iter { self.push_back(elt); } } } +impl SpecExtend> for LinkedList { + fn spec_extend(&mut self, ref mut other: LinkedList) { + self.append(other); + } +} + #[stable(feature = "extend_ref", since = "1.2.0")] impl<'a, T: 'a + Copy> Extend<&'a T> for LinkedList { fn extend>(&mut self, iter: I) { @@ -932,50 +985,50 @@ impl<'a, T: 'a + Copy> Extend<&'a T> for LinkedList { } #[stable(feature = "rust1", since = "1.0.0")] -impl PartialEq for LinkedList { - fn eq(&self, other: &LinkedList) -> bool { - self.len() == other.len() && self.iter().eq(other.iter()) +impl PartialEq for LinkedList { + fn eq(&self, other: &Self) -> bool { + self.len() == other.len() && self.iter().eq(other) } - fn ne(&self, other: &LinkedList) -> bool { - self.len() != other.len() || self.iter().ne(other.iter()) + fn ne(&self, other: &Self) -> bool { + self.len() != other.len() || self.iter().ne(other) } } #[stable(feature = "rust1", since = "1.0.0")] -impl Eq for LinkedList {} +impl Eq for LinkedList {} #[stable(feature = "rust1", since = "1.0.0")] -impl PartialOrd for LinkedList { - fn partial_cmp(&self, other: &LinkedList) -> Option { - self.iter().partial_cmp(other.iter()) +impl PartialOrd for LinkedList { + fn partial_cmp(&self, other: &Self) -> Option { + self.iter().partial_cmp(other) } } #[stable(feature = "rust1", since = "1.0.0")] -impl Ord for LinkedList { +impl Ord for LinkedList { #[inline] - fn cmp(&self, other: &LinkedList) -> Ordering { - self.iter().cmp(other.iter()) + fn cmp(&self, other: &Self) -> Ordering { + self.iter().cmp(other) } } #[stable(feature = "rust1", since = "1.0.0")] -impl Clone for LinkedList { - fn clone(&self) -> LinkedList { +impl Clone for LinkedList { + fn clone(&self) -> Self { self.iter().cloned().collect() } } #[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Debug for LinkedList { +impl fmt::Debug for LinkedList { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_list().entries(self.iter()).finish() + f.debug_list().entries(self).finish() } } #[stable(feature = "rust1", since = "1.0.0")] -impl Hash for LinkedList { +impl Hash for LinkedList { fn hash(&self, state: &mut H) { self.len().hash(state); for elt in self { @@ -984,6 +1037,101 @@ impl Hash for LinkedList { } } +unsafe fn finalize(node: IntermediateBox>) -> Box> { + let mut node = node.finalize(); + ptr::write(&mut node.next, None); + ptr::write(&mut node.prev, None); + node +} + +/// A place for insertion at the front of a `LinkedList`. +/// +/// See [`LinkedList::front_place`](struct.LinkedList.html#method.front_place) for details. +#[must_use = "places do nothing unless written to with `<-` syntax"] +#[unstable(feature = "collection_placement", + reason = "struct name and placement protocol are subject to change", + issue = "30172")] +pub struct FrontPlace<'a, T: 'a> { + list: &'a mut LinkedList, + node: IntermediateBox>, +} + +#[unstable(feature = "collection_placement", + reason = "placement protocol is subject to change", + issue = "30172")] +impl<'a, T> Placer for FrontPlace<'a, T> { + type Place = Self; + + fn make_place(self) -> Self { + self + } +} + +#[unstable(feature = "collection_placement", + reason = "placement protocol is subject to change", + issue = "30172")] +impl<'a, T> Place for FrontPlace<'a, T> { + fn pointer(&mut self) -> *mut T { + unsafe { &mut (*self.node.pointer()).element } + } +} + +#[unstable(feature = "collection_placement", + reason = "placement protocol is subject to change", + issue = "30172")] +impl<'a, T> InPlace for FrontPlace<'a, T> { + type Owner = (); + + unsafe fn finalize(self) { + let FrontPlace { list, node } = self; + list.push_front_node(finalize(node)); + } +} + +/// A place for insertion at the back of a `LinkedList`. +/// +/// See [`LinkedList::back_place`](struct.LinkedList.html#method.back_place) for details. +#[must_use = "places do nothing unless written to with `<-` syntax"] +#[unstable(feature = "collection_placement", + reason = "struct name and placement protocol are subject to change", + issue = "30172")] +pub struct BackPlace<'a, T: 'a> { + list: &'a mut LinkedList, + node: IntermediateBox>, +} + +#[unstable(feature = "collection_placement", + reason = "placement protocol is subject to change", + issue = "30172")] +impl<'a, T> Placer for BackPlace<'a, T> { + type Place = Self; + + fn make_place(self) -> Self { + self + } +} + +#[unstable(feature = "collection_placement", + reason = "placement protocol is subject to change", + issue = "30172")] +impl<'a, T> Place for BackPlace<'a, T> { + fn pointer(&mut self) -> *mut T { + unsafe { &mut (*self.node.pointer()).element } + } +} + +#[unstable(feature = "collection_placement", + reason = "placement protocol is subject to change", + issue = "30172")] +impl<'a, T> InPlace for BackPlace<'a, T> { + type Owner = (); + + unsafe fn finalize(self) { + let BackPlace { list, node } = self; + list.push_back_node(finalize(node)); + } +} + // Ensure that `LinkedList` and its read-only iterators are covariant in their type parameters. #[allow(dead_code)] fn assert_covariance() { @@ -992,11 +1140,26 @@ fn assert_covariance() { fn c<'a>(x: IntoIter<&'static str>) -> IntoIter<&'a str> { x } } +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl Send for LinkedList {} + +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl Sync for LinkedList {} + +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl<'a, T: Sync> Send for Iter<'a, T> {} + +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl<'a, T: Sync> Sync for Iter<'a, T> {} + +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl<'a, T: Send> Send for IterMut<'a, T> {} + +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl<'a, T: Sync> Sync for IterMut<'a, T> {} + #[cfg(test)] mod tests { - use std::clone::Clone; - use std::iter::{Iterator, IntoIterator, Extend}; - use std::option::Option::{self, Some, None}; use std::__rand::{thread_rng, Rng}; use std::thread; use std::vec::Vec; @@ -1009,38 +1172,40 @@ mod tests { } pub fn check_links(list: &LinkedList) { - let mut len = 0; - let mut last_ptr: Option<&Node> = None; - let mut node_ptr: &Node; - match list.list_head { - None => { - assert_eq!(0, list.length); - return; - } - Some(ref node) => node_ptr = &**node, - } - loop { - match unsafe { (last_ptr, node_ptr.prev.resolve()) } { - (None, None) => {} - (None, _) => panic!("prev link for list_head"), - (Some(p), Some(pptr)) => { - assert_eq!(p as *const Node, pptr as *const Node); + unsafe { + let mut len = 0; + let mut last_ptr: Option<&Node> = None; + let mut node_ptr: &Node; + match list.head { + None => { + assert_eq!(0, list.len); + return; } - _ => panic!("prev link is none, not good"), + Some(node) => node_ptr = &**node, } - match node_ptr.next { - Some(ref next) => { - last_ptr = Some(node_ptr); - node_ptr = &**next; - len += 1; + loop { + match (last_ptr, node_ptr.prev) { + (None, None) => {} + (None, _) => panic!("prev link for head"), + (Some(p), Some(pptr)) => { + assert_eq!(p as *const Node, *pptr as *const Node); + } + _ => panic!("prev link is none, not good"), } - None => { - len += 1; - break; + match node_ptr.next { + Some(next) => { + last_ptr = Some(node_ptr); + node_ptr = &**next; + len += 1; + } + None => { + len += 1; + break; + } } } + assert_eq!(len, list.len); } - assert_eq!(len, list.length); } #[test] @@ -1086,7 +1251,7 @@ mod tests { m.append(&mut n); check_links(&m); let mut sum = v; - sum.push_all(&u); + sum.extend_from_slice(&u); assert_eq!(sum.len(), m.len()); for elt in sum { assert_eq!(m.pop_front(), Some(elt)) @@ -1129,6 +1294,7 @@ mod tests { } #[test] + #[cfg_attr(target_os = "emscripten", ignore)] fn test_send() { let n = list_from(&[1, 2, 3]); thread::spawn(move || { @@ -1152,17 +1318,16 @@ mod tests { #[test] fn test_26021() { - use std::iter::ExactSizeIterator; // There was a bug in split_off that failed to null out the RHS's head's prev ptr. // This caused the RHS's dtor to walk up into the LHS at drop and delete all of // its nodes. // // https://github.com/rust-lang/rust/issues/26021 let mut v1 = LinkedList::new(); - v1.push_front(1u8); - v1.push_front(1u8); - v1.push_front(1u8); - v1.push_front(1u8); + v1.push_front(1); + v1.push_front(1); + v1.push_front(1); + v1.push_front(1); let _ = v1.split_off(3); // Dropping this now should not cause laundry consumption assert_eq!(v1.len(), 3); @@ -1173,10 +1338,10 @@ mod tests { #[test] fn test_split_off() { let mut v1 = LinkedList::new(); - v1.push_front(1u8); - v1.push_front(1u8); - v1.push_front(1u8); - v1.push_front(1u8); + v1.push_front(1); + v1.push_front(1); + v1.push_front(1); + v1.push_front(1); // test all splits for ix in 0..1 + v1.len() { @@ -1189,7 +1354,6 @@ mod tests { } } - #[cfg(test)] fn fuzz_test(sz: i32) { let mut m: LinkedList<_> = LinkedList::new(); diff --git a/src/libcollections/macros.rs b/src/libcollections/macros.rs index 6a683e65c9eb3..3115be00a4d72 100644 --- a/src/libcollections/macros.rs +++ b/src/libcollections/macros.rs @@ -41,12 +41,13 @@ #[cfg(not(test))] #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] +#[allow_internal_unstable] macro_rules! vec { ($elem:expr; $n:expr) => ( $crate::vec::from_elem($elem, $n) ); ($($x:expr),*) => ( - <[_]>::into_vec($crate::boxed::Box::new([$($x),*])) + <[_]>::into_vec(box [$($x),*]) ); ($($x:expr,)*) => (vec![$($x),*]) } @@ -61,13 +62,15 @@ macro_rules! vec { $crate::vec::from_elem($elem, $n) ); ($($x:expr),*) => ( - $crate::slice::into_vec($crate::boxed::Box::new([$($x),*])) + $crate::slice::into_vec(box [$($x),*]) ); ($($x:expr,)*) => (vec![$($x),*]) } /// Use the syntax described in `std::fmt` to create a value of type `String`. -/// See `std::fmt` for more information. +/// See [`std::fmt`][fmt] for more information. +/// +/// [fmt]: ../std/fmt/index.html /// /// # Examples /// diff --git a/src/libcollections/range.rs b/src/libcollections/range.rs index afcd779ddf19f..d331ead2c5ee6 100644 --- a/src/libcollections/range.rs +++ b/src/libcollections/range.rs @@ -14,7 +14,6 @@ //! Range syntax. -use core::option::Option::{self, None, Some}; use core::ops::{RangeFull, Range, RangeTo, RangeFrom}; /// **RangeArgument** is implemented by Rust's built-in range types, produced @@ -23,6 +22,22 @@ pub trait RangeArgument { /// Start index (inclusive) /// /// Return start value if present, else `None`. + /// + /// # Examples + /// + /// ``` + /// #![feature(collections)] + /// #![feature(collections_range)] + /// + /// extern crate collections; + /// + /// # fn main() { + /// use collections::range::RangeArgument; + /// + /// assert_eq!((..10).start(), None); + /// assert_eq!((3..10).start(), Some(&3)); + /// # } + /// ``` fn start(&self) -> Option<&T> { None } @@ -30,11 +45,28 @@ pub trait RangeArgument { /// End index (exclusive) /// /// Return end value if present, else `None`. + /// + /// # Examples + /// + /// ``` + /// #![feature(collections)] + /// #![feature(collections_range)] + /// + /// extern crate collections; + /// + /// # fn main() { + /// use collections::range::RangeArgument; + /// + /// assert_eq!((3..).end(), None); + /// assert_eq!((3..10).end(), Some(&10)); + /// # } + /// ``` fn end(&self) -> Option<&T> { None } } +// FIXME add inclusive ranges to RangeArgument impl RangeArgument for RangeFull {} diff --git a/src/libcollections/slice.rs b/src/libcollections/slice.rs index 8b4497e6f037e..e615e780d2b5c 100644 --- a/src/libcollections/slice.rs +++ b/src/libcollections/slice.rs @@ -36,7 +36,7 @@ //! //! ## Structs //! -//! There are several structs that are useful for slices, such as `Iter`, which +//! There are several structs that are useful for slices, such as [`Iter`], which //! represents iteration over a slice. //! //! ## Trait Implementations @@ -44,9 +44,9 @@ //! There are several implementations of common traits for slices. Some examples //! include: //! -//! * `Clone` -//! * `Eq`, `Ord` - for slices whose element type are `Eq` or `Ord`. -//! * `Hash` - for slices whose element type is `Hash` +//! * [`Clone`] +//! * [`Eq`], [`Ord`] - for slices whose element type are [`Eq`] or [`Ord`]. +//! * [`Hash`] - for slices whose element type is [`Hash`]. //! //! ## Iteration //! @@ -73,30 +73,36 @@ //! the element type of the slice is `i32`, the element type of the iterator is //! `&mut i32`. //! -//! * `.iter()` and `.iter_mut()` are the explicit methods to return the default +//! * [`.iter()`] and [`.iter_mut()`] are the explicit methods to return the default //! iterators. -//! * Further methods that return iterators are `.split()`, `.splitn()`, -//! `.chunks()`, `.windows()` and more. +//! * Further methods that return iterators are [`.split()`], [`.splitn()`], +//! [`.chunks()`], [`.windows()`] and more. //! -//! *[See also the slice primitive type](../primitive.slice.html).* +//! *[See also the slice primitive type](../../std/primitive.slice.html).* +//! +//! [`Clone`]: ../../std/clone/trait.Clone.html +//! [`Eq`]: ../../std/cmp/trait.Eq.html +//! [`Ord`]: ../../std/cmp/trait.Ord.html +//! [`Iter`]: struct.Iter.html +//! [`Hash`]: ../../std/hash/trait.Hash.html +//! [`.iter()`]: ../../std/primitive.slice.html#method.iter +//! [`.iter_mut()`]: ../../std/primitive.slice.html#method.iter_mut +//! [`.split()`]: ../../std/primitive.slice.html#method.split +//! [`.splitn()`]: ../../std/primitive.slice.html#method.splitn +//! [`.chunks()`]: ../../std/primitive.slice.html#method.chunks +//! [`.windows()`]: ../../std/primitive.slice.html#method.windows #![stable(feature = "rust1", since = "1.0.0")] // Many of the usings in this module are only used in the test configuration. // It's cleaner to just turn off the unused_imports warning than to fix them. -#![allow(unused_imports)] +#![cfg_attr(test, allow(unused_imports, dead_code))] use alloc::boxed::Box; -use core::clone::Clone; use core::cmp::Ordering::{self, Greater, Less}; -use core::cmp::{self, Ord, PartialEq}; -use core::iter::Iterator; -use core::marker::Sized; +use core::cmp; use core::mem::size_of; use core::mem; -use core::ops::FnMut; -use core::option::Option::{self, Some, None}; use core::ptr; -use core::result::Result; use core::slice as core_slice; use borrow::{Borrow, BorrowMut, ToOwned}; @@ -110,11 +116,10 @@ pub use core::slice::{Iter, IterMut}; pub use core::slice::{SplitMut, ChunksMut, Split}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::slice::{SplitN, RSplitN, SplitNMut, RSplitNMut}; -#[unstable(feature = "slice_bytes", issue = "27740")] -#[allow(deprecated)] -pub use core::slice::bytes; #[stable(feature = "rust1", since = "1.0.0")] pub use core::slice::{from_raw_parts, from_raw_parts_mut}; +#[unstable(feature = "slice_get_slice", issue = "35729")] +pub use core::slice::SliceIndex; //////////////////////////////////////////////////////////////////////////////// // Basic slice extension methods @@ -136,12 +141,7 @@ pub use self::hack::to_vec; // `test_permutations` test mod hack { use alloc::boxed::Box; - use core::clone::Clone; - #[cfg(test)] - use core::iter::Iterator; use core::mem; - #[cfg(test)] - use core::option::Option::{Some, None}; #[cfg(test)] use string::ToString; @@ -165,7 +165,6 @@ mod hack { } } -/// Allocating extension methods for slices. #[lang = "slice"] #[cfg(not(test))] impl [T] { @@ -183,7 +182,7 @@ impl [T] { core_slice::SliceExt::len(self) } - /// Returns true if the slice has a length of 0 + /// Returns true if the slice has a length of 0. /// /// # Example /// @@ -214,7 +213,18 @@ impl [T] { core_slice::SliceExt::first(self) } - /// Returns a mutable pointer to the first element of a slice, or `None` if it is empty + /// Returns a mutable pointer to the first element of a slice, or `None` if it is empty. + /// + /// # Examples + /// + /// ``` + /// let x = &mut [0, 1, 2]; + /// + /// if let Some(first) = x.first_mut() { + /// *first = 5; + /// } + /// assert_eq!(x, &[5, 1, 2]); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn first_mut(&mut self) -> Option<&mut T> { @@ -222,6 +232,17 @@ impl [T] { } /// Returns the first and all the rest of the elements of a slice. + /// + /// # Examples + /// + /// ``` + /// let x = &[0, 1, 2]; + /// + /// if let Some((first, elements)) = x.split_first() { + /// assert_eq!(first, &0); + /// assert_eq!(elements, &[1, 2]); + /// } + /// ``` #[stable(feature = "slice_splits", since = "1.5.0")] #[inline] pub fn split_first(&self) -> Option<(&T, &[T])> { @@ -229,6 +250,19 @@ impl [T] { } /// Returns the first and all the rest of the elements of a slice. + /// + /// # Examples + /// + /// ``` + /// let x = &mut [0, 1, 2]; + /// + /// if let Some((first, elements)) = x.split_first_mut() { + /// *first = 3; + /// elements[0] = 4; + /// elements[1] = 5; + /// } + /// assert_eq!(x, &[3, 4, 5]); + /// ``` #[stable(feature = "slice_splits", since = "1.5.0")] #[inline] pub fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> { @@ -236,6 +270,17 @@ impl [T] { } /// Returns the last and all the rest of the elements of a slice. + /// + /// # Examples + /// + /// ``` + /// let x = &[0, 1, 2]; + /// + /// if let Some((last, elements)) = x.split_last() { + /// assert_eq!(last, &2); + /// assert_eq!(elements, &[0, 1]); + /// } + /// ``` #[stable(feature = "slice_splits", since = "1.5.0")] #[inline] pub fn split_last(&self) -> Option<(&T, &[T])> { @@ -244,6 +289,19 @@ impl [T] { } /// Returns the last and all the rest of the elements of a slice. + /// + /// # Examples + /// + /// ``` + /// let x = &mut [0, 1, 2]; + /// + /// if let Some((last, elements)) = x.split_last_mut() { + /// *last = 3; + /// elements[0] = 4; + /// elements[1] = 5; + /// } + /// assert_eq!(x, &[4, 5, 3]); + /// ``` #[stable(feature = "slice_splits", since = "1.5.0")] #[inline] pub fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> { @@ -268,6 +326,17 @@ impl [T] { } /// Returns a mutable pointer to the last item in the slice. + /// + /// # Examples + /// + /// ``` + /// let x = &mut [0, 1, 2]; + /// + /// if let Some(last) = x.last_mut() { + /// *last = 10; + /// } + /// assert_eq!(x, &[0, 1, 10]); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn last_mut(&mut self) -> Option<&mut T> { @@ -286,40 +355,95 @@ impl [T] { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] - pub fn get(&self, index: usize) -> Option<&T> { + pub fn get(&self, index: I) -> Option<&I::Output> + where I: SliceIndex + { core_slice::SliceExt::get(self, index) } - /// Returns a mutable reference to the element at the given index, + /// Returns a mutable reference to the element at the given index. + /// + /// # Examples + /// + /// ``` + /// let x = &mut [0, 1, 2]; + /// + /// if let Some(elem) = x.get_mut(1) { + /// *elem = 42; + /// } + /// assert_eq!(x, &[0, 42, 2]); + /// ``` /// or `None` if the index is out of bounds #[stable(feature = "rust1", since = "1.0.0")] #[inline] - pub fn get_mut(&mut self, index: usize) -> Option<&mut T> { + pub fn get_mut(&mut self, index: I) -> Option<&mut I::Output> + where I: SliceIndex + { core_slice::SliceExt::get_mut(self, index) } /// Returns a pointer to the element at the given index, without doing - /// bounds checking. + /// bounds checking. So use it very carefully! + /// + /// # Examples + /// + /// ``` + /// let x = &[1, 2, 4]; + /// + /// unsafe { + /// assert_eq!(x.get_unchecked(1), &2); + /// } + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] - pub unsafe fn get_unchecked(&self, index: usize) -> &T { + pub unsafe fn get_unchecked(&self, index: I) -> &I::Output + where I: SliceIndex + { core_slice::SliceExt::get_unchecked(self, index) } - /// Returns an unsafe mutable pointer to the element in index + /// Returns an unsafe mutable pointer to the element in index. So use it + /// very carefully! + /// + /// # Examples + /// + /// ``` + /// let x = &mut [1, 2, 4]; + /// + /// unsafe { + /// let elem = x.get_unchecked_mut(1); + /// *elem = 13; + /// } + /// assert_eq!(x, &[1, 13, 4]); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] - pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut T { + pub unsafe fn get_unchecked_mut(&mut self, index: I) -> &mut I::Output + where I: SliceIndex + { core_slice::SliceExt::get_unchecked_mut(self, index) } - /// Returns an raw pointer to the slice's buffer + /// Returns an raw pointer to the slice's buffer. /// /// The caller must ensure that the slice outlives the pointer this /// function returns, or else it will end up pointing to garbage. /// /// Modifying the slice may cause its buffer to be reallocated, which /// would also make any pointers to it invalid. + /// + /// # Examples + /// + /// ``` + /// let x = &[1, 2, 4]; + /// let x_ptr = x.as_ptr(); + /// + /// unsafe { + /// for i in 0..x.len() { + /// assert_eq!(x.get_unchecked(i), &*x_ptr.offset(i as isize)); + /// } + /// } + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn as_ptr(&self) -> *const T { @@ -333,6 +457,20 @@ impl [T] { /// /// Modifying the slice may cause its buffer to be reallocated, which /// would also make any pointers to it invalid. + /// + /// # Examples + /// + /// ``` + /// let x = &mut [1, 2, 4]; + /// let x_ptr = x.as_mut_ptr(); + /// + /// unsafe { + /// for i in 0..x.len() { + /// *x_ptr.offset(i as isize) += 2; + /// } + /// } + /// assert_eq!(x, &[3, 4, 6]); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn as_mut_ptr(&mut self) -> *mut T { @@ -350,9 +488,9 @@ impl [T] { /// /// Panics if `a` or `b` are out of bounds. /// - /// # Example + /// # Examples /// - /// ```rust + /// ``` /// let mut v = ["a", "b", "c", "d"]; /// v.swap(1, 3); /// assert!(v == ["a", "d", "c", "b"]); @@ -367,7 +505,7 @@ impl [T] { /// /// # Example /// - /// ```rust + /// ``` /// let mut v = [1, 2, 3]; /// v.reverse(); /// assert!(v == [3, 2, 1]); @@ -379,13 +517,39 @@ impl [T] { } /// Returns an iterator over the slice. + /// + /// # Examples + /// + /// ``` + /// let x = &[1, 2, 4]; + /// let mut iterator = x.iter(); + /// + /// assert_eq!(iterator.next(), Some(&1)); + /// assert_eq!(iterator.next(), Some(&2)); + /// assert_eq!(iterator.next(), Some(&4)); + /// assert_eq!(iterator.next(), None); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn iter(&self) -> Iter { core_slice::SliceExt::iter(self) } - /// Returns an iterator that allows modifying each value + /// Returns an iterator that allows modifying each value. + /// + /// # Examples + /// + /// ``` + /// let x = &mut [1, 2, 4]; + /// { + /// let iterator = x.iter_mut(); + /// + /// for elem in iterator { + /// *elem += 2; + /// } + /// } + /// assert_eq!(x, &[3, 4, 6]); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn iter_mut(&mut self) -> IterMut { @@ -402,14 +566,21 @@ impl [T] { /// /// # Example /// - /// Print the adjacent pairs of a slice (i.e. `[1,2]`, `[2,3]`, - /// `[3,4]`): + /// ``` + /// let slice = ['r', 'u', 's', 't']; + /// let mut iter = slice.windows(2); + /// assert_eq!(iter.next().unwrap(), &['r', 'u']); + /// assert_eq!(iter.next().unwrap(), &['u', 's']); + /// assert_eq!(iter.next().unwrap(), &['s', 't']); + /// assert!(iter.next().is_none()); + /// ``` + /// + /// If the slice is shorter than `size`: /// - /// ```rust - /// let v = &[1, 2, 3, 4]; - /// for win in v.windows(2) { - /// println!("{:?}", win); - /// } + /// ``` + /// let slice = ['f', 'o', 'o']; + /// let mut iter = slice.windows(4); + /// assert!(iter.next().is_none()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -418,9 +589,9 @@ impl [T] { } /// Returns an iterator over `size` elements of the slice at a - /// time. The chunks do not overlap. If `size` does not divide the - /// length of the slice, then the last chunk will not have length - /// `size`. + /// time. The chunks are slices and do not overlap. If `size` does + /// not divide the length of the slice, then the last chunk will + /// not have length `size`. /// /// # Panics /// @@ -428,14 +599,13 @@ impl [T] { /// /// # Example /// - /// Print the slice two elements at a time (i.e. `[1,2]`, - /// `[3,4]`, `[5]`): - /// - /// ```rust - /// let v = &[1, 2, 3, 4, 5]; - /// for win in v.chunks(2) { - /// println!("{:?}", win); - /// } + /// ``` + /// let slice = ['l', 'o', 'r', 'e', 'm']; + /// let mut iter = slice.chunks(2); + /// assert_eq!(iter.next().unwrap(), &['l', 'o']); + /// assert_eq!(iter.next().unwrap(), &['r', 'e']); + /// assert_eq!(iter.next().unwrap(), &['m']); + /// assert!(iter.next().is_none()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -444,13 +614,28 @@ impl [T] { } /// Returns an iterator over `chunk_size` elements of the slice at a time. - /// The chunks are mutable and do not overlap. If `chunk_size` does + /// The chunks are mutable slices, and do not overlap. If `chunk_size` does /// not divide the length of the slice, then the last chunk will not /// have length `chunk_size`. /// /// # Panics /// /// Panics if `chunk_size` is 0. + /// + /// # Examples + /// + /// ``` + /// let v = &mut [0, 0, 0, 0, 0]; + /// let mut count = 1; + /// + /// for chunk in v.chunks_mut(2) { + /// for elem in chunk.iter_mut() { + /// *elem += count; + /// } + /// count += 1; + /// } + /// assert_eq!(v, &[1, 1, 2, 2, 3]); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut { @@ -491,9 +676,9 @@ impl [T] { /// /// Panics if `mid > len`. /// - /// # Example + /// # Examples /// - /// ```rust + /// ``` /// let mut v = [1, 2, 3, 4, 5, 6]; /// /// // scoped to restrict the lifetime of the borrows @@ -522,18 +707,44 @@ impl [T] { } /// Returns an iterator over subslices separated by elements that match - /// `pred`. The matched element is not contained in the subslices. + /// `pred`. The matched element is not contained in the subslices. /// /// # Examples /// - /// Print the slice split by numbers divisible by 3 (i.e. `[10, 40]`, - /// `[20]`, `[50]`): + /// ``` + /// let slice = [10, 40, 33, 20]; + /// let mut iter = slice.split(|num| num % 3 == 0); /// + /// assert_eq!(iter.next().unwrap(), &[10, 40]); + /// assert_eq!(iter.next().unwrap(), &[20]); + /// assert!(iter.next().is_none()); /// ``` - /// let v = [10, 40, 30, 20, 60, 50]; - /// for group in v.split(|num| *num % 3 == 0) { - /// println!("{:?}", group); - /// } + /// + /// If the first element is matched, an empty slice will be the first item + /// returned by the iterator. Similarly, if the last element in the slice + /// is matched, an empty slice will be the last item returned by the + /// iterator: + /// + /// ``` + /// let slice = [10, 40, 33]; + /// let mut iter = slice.split(|num| num % 3 == 0); + /// + /// assert_eq!(iter.next().unwrap(), &[10, 40]); + /// assert_eq!(iter.next().unwrap(), &[]); + /// assert!(iter.next().is_none()); + /// ``` + /// + /// If two matched elements are directly adjacent, an empty slice will be + /// present between them: + /// + /// ``` + /// let slice = [10, 6, 33, 20]; + /// let mut iter = slice.split(|num| num % 3 == 0); + /// + /// assert_eq!(iter.next().unwrap(), &[10]); + /// assert_eq!(iter.next().unwrap(), &[]); + /// assert_eq!(iter.next().unwrap(), &[20]); + /// assert!(iter.next().is_none()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -544,7 +755,18 @@ impl [T] { } /// Returns an iterator over mutable subslices separated by elements that - /// match `pred`. The matched element is not contained in the subslices. + /// match `pred`. The matched element is not contained in the subslices. + /// + /// # Examples + /// + /// ``` + /// let mut v = [10, 40, 30, 20, 60, 50]; + /// + /// for group in v.split_mut(|num| *num % 3 == 0) { + /// group[0] = 1; + /// } + /// assert_eq!(v, [1, 40, 30, 1, 60, 1]); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn split_mut(&mut self, pred: F) -> SplitMut @@ -554,7 +776,7 @@ impl [T] { } /// Returns an iterator over subslices separated by elements that match - /// `pred`, limited to returning at most `n` items. The matched element is + /// `pred`, limited to returning at most `n` items. The matched element is /// not contained in the subslices. /// /// The last element returned, if any, will contain the remainder of the @@ -567,6 +789,7 @@ impl [T] { /// /// ``` /// let v = [10, 40, 30, 20, 60, 50]; + /// /// for group in v.splitn(2, |num| *num % 3 == 0) { /// println!("{:?}", group); /// } @@ -580,11 +803,22 @@ impl [T] { } /// Returns an iterator over subslices separated by elements that match - /// `pred`, limited to returning at most `n` items. The matched element is + /// `pred`, limited to returning at most `n` items. The matched element is /// not contained in the subslices. /// /// The last element returned, if any, will contain the remainder of the /// slice. + /// + /// # Examples + /// + /// ``` + /// let mut v = [10, 40, 30, 20, 60, 50]; + /// + /// for group in v.splitn_mut(2, |num| *num % 3 == 0) { + /// group[0] = 1; + /// } + /// assert_eq!(v, [1, 40, 30, 1, 60, 50]); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn splitn_mut(&mut self, n: usize, pred: F) -> SplitNMut @@ -608,6 +842,7 @@ impl [T] { /// /// ``` /// let v = [10, 40, 30, 20, 60, 50]; + /// /// for group in v.rsplitn(2, |num| *num % 3 == 0) { /// println!("{:?}", group); /// } @@ -622,11 +857,22 @@ impl [T] { /// Returns an iterator over subslices separated by elements that match /// `pred` limited to returning at most `n` items. This starts at the end of - /// the slice and works backwards. The matched element is not contained in + /// the slice and works backwards. The matched element is not contained in /// the subslices. /// /// The last element returned, if any, will contain the remainder of the /// slice. + /// + /// # Examples + /// + /// ``` + /// let mut s = [10, 40, 30, 20, 60, 50]; + /// + /// for group in s.rsplitn_mut(2, |num| *num % 3 == 0) { + /// group[0] = 1; + /// } + /// assert_eq!(s, [1, 40, 30, 20, 60, 1]); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn rsplitn_mut(&mut self, n: usize, pred: F) -> RSplitNMut @@ -698,9 +944,9 @@ impl [T] { /// /// Looks up a series of four elements. The first is found, with a /// uniquely determined position; the second and third are not - /// found; the fourth could match any position in `[1,4]`. + /// found; the fourth could match any position in `[1, 4]`. /// - /// ```rust + /// ``` /// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]; /// /// assert_eq!(s.binary_search(&13), Ok(9)); @@ -732,9 +978,9 @@ impl [T] { /// /// Looks up a series of four elements. The first is found, with a /// uniquely determined position; the second and third are not - /// found; the fourth could match any position in `[1,4]`. + /// found; the fourth could match any position in `[1, 4]`. /// - /// ```rust + /// ``` /// let s = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]; /// /// let seek = 13; @@ -749,21 +995,59 @@ impl [T] { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] - pub fn binary_search_by(&self, f: F) -> Result - where F: FnMut(&T) -> Ordering + pub fn binary_search_by<'a, F>(&'a self, f: F) -> Result + where F: FnMut(&'a T) -> Ordering { core_slice::SliceExt::binary_search_by(self, f) } - /// Sorts the slice, in place. + /// Binary search a sorted slice with a key extraction function. + /// + /// Assumes that the slice is sorted by the key, for instance with + /// [`sort_by_key`] using the same key extraction function. + /// + /// If a matching value is found then returns `Ok`, containing the + /// index for the matched element; if no match is found then `Err` + /// is returned, containing the index where a matching element could + /// be inserted while maintaining sorted order. /// + /// [`sort_by_key`]: #method.sort_by_key + /// + /// # Examples + /// + /// Looks up a series of four elements in a slice of pairs sorted by + /// their second elements. The first is found, with a uniquely + /// determined position; the second and third are not found; the + /// fourth could match any position in `[1, 4]`. + /// + /// ``` + /// let s = [(0, 0), (2, 1), (4, 1), (5, 1), (3, 1), + /// (1, 2), (2, 3), (4, 5), (5, 8), (3, 13), + /// (1, 21), (2, 34), (4, 55)]; + /// + /// assert_eq!(s.binary_search_by_key(&13, |&(a,b)| b), Ok(9)); + /// assert_eq!(s.binary_search_by_key(&4, |&(a,b)| b), Err(7)); + /// assert_eq!(s.binary_search_by_key(&100, |&(a,b)| b), Err(13)); + /// let r = s.binary_search_by_key(&1, |&(a,b)| b); + /// assert!(match r { Ok(1...4) => true, _ => false, }); + /// ``` + #[stable(feature = "slice_binary_search_by_key", since = "1.10.0")] + #[inline] + pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, f: F) -> Result + where F: FnMut(&'a T) -> B, + B: Ord + { + core_slice::SliceExt::binary_search_by_key(self, b, f) + } + /// This is equivalent to `self.sort_by(|a, b| a.cmp(b))`. /// - /// This is a stable sort. + /// This sort is stable and `O(n log n)` worst-case but allocates + /// approximately `2 * n` where `n` is the length of `self`. /// /// # Examples /// - /// ```rust + /// ``` /// let mut v = [-5, 4, 1, -3, 2]; /// /// v.sort(); @@ -777,17 +1061,15 @@ impl [T] { self.sort_by(|a, b| a.cmp(b)) } - /// Sorts the slice, in place, using `key` to extract a key by which to + /// Sorts the slice, in place, using `f` to extract a key by which to /// order the sort by. /// - /// This sort is `O(n log n)` worst-case and stable, but allocates + /// This sort is stable and `O(n log n)` worst-case but allocates /// approximately `2 * n`, where `n` is the length of `self`. /// - /// This is a stable sort. - /// /// # Examples /// - /// ```rust + /// ``` /// let mut v = [-5i32, 4, 1, -3, 2]; /// /// v.sort_by_key(|k| k.abs()); @@ -804,12 +1086,12 @@ impl [T] { /// Sorts the slice, in place, using `compare` to compare /// elements. /// - /// This sort is `O(n log n)` worst-case and stable, but allocates + /// This sort is stable and `O(n log n)` worst-case but allocates /// approximately `2 * n`, where `n` is the length of `self`. /// /// # Examples /// - /// ```rust + /// ``` /// let mut v = [5, 4, 1, 3, 2]; /// v.sort_by(|a, b| a.cmp(b)); /// assert!(v == [1, 2, 3, 4, 5]); @@ -828,7 +1110,7 @@ impl [T] { /// Copies the elements from `src` into `self`. /// - /// The length of this slice must be the same as the slice passed in. + /// The length of `src` must be the same as `self`. /// /// # Panics /// @@ -836,7 +1118,7 @@ impl [T] { /// /// # Example /// - /// ```rust + /// ``` /// let mut dst = [0, 0, 0]; /// let src = [1, 2, 3]; /// @@ -848,7 +1130,38 @@ impl [T] { core_slice::SliceExt::clone_from_slice(self, src) } + /// Copies all elements from `src` into `self`, using a memcpy. + /// + /// The length of `src` must be the same as `self`. + /// + /// # Panics + /// + /// This function will panic if the two slices have different lengths. + /// + /// # Example + /// + /// ``` + /// let mut dst = [0, 0, 0]; + /// let src = [1, 2, 3]; + /// + /// dst.copy_from_slice(&src); + /// assert_eq!(src, dst); + /// ``` + #[stable(feature = "copy_from_slice", since = "1.9.0")] + pub fn copy_from_slice(&mut self, src: &[T]) where T: Copy { + core_slice::SliceExt::copy_from_slice(self, src) + } + + /// Copies `self` into a new `Vec`. + /// + /// # Examples + /// + /// ``` + /// let s = [10, 40, 30]; + /// let x = s.to_vec(); + /// // Here, `s` and `x` can be modified independently. + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn to_vec(&self) -> Vec @@ -859,6 +1172,16 @@ impl [T] { } /// Converts `self` into a vector without clones or allocation. + /// + /// # Examples + /// + /// ``` + /// let s: Box<[i32]> = Box::new([10, 40, 30]); + /// let x = s.into_vec(); + /// // `s` cannot be used anymore because it has been converted into `x`. + /// + /// assert_eq!(x, vec![10, 40, 30]); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn into_vec(self: Box) -> Vec { diff --git a/src/libcollections/str.rs b/src/libcollections/str.rs index 766867f284ee1..48a74bdecbbef 100644 --- a/src/libcollections/str.rs +++ b/src/libcollections/str.rs @@ -8,9 +8,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Unicode string slices +//! Unicode string slices. //! -//! *[See also the `str` primitive type](../primitive.str.html).* +//! *[See also the `str` primitive type](../../std/primitive.str.html).* #![stable(feature = "rust1", since = "1.0.0")] @@ -19,14 +19,11 @@ // It's cleaner to just turn off the unused_imports warning than to fix them. #![allow(unused_imports)] -use core::clone::Clone; -use core::iter::{Iterator, Extend}; -use core::option::Option::{self, Some, None}; -use core::result::Result; use core::str as core_str; use core::str::pattern::Pattern; use core::str::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher}; use core::mem; +use core::iter::FusedIterator; use rustc_unicode::str::{UnicodeStr, Utf16Encoder}; use vec_deque::VecDeque; @@ -41,7 +38,7 @@ use boxed::Box; pub use core::str::{FromStr, Utf8Error}; #[allow(deprecated)] #[stable(feature = "rust1", since = "1.0.0")] -pub use core::str::{Lines, LinesAny, CharRange}; +pub use core::str::{Lines, LinesAny}; #[stable(feature = "rust1", since = "1.0.0")] pub use core::str::{Split, RSplit}; #[stable(feature = "rust1", since = "1.0.0")] @@ -120,13 +117,13 @@ impl> SliceConcatExt for [S] { /// /// For use with the `std::iter` module. #[derive(Clone)] -#[unstable(feature = "str_utf16", issue = "27714")] -pub struct Utf16Units<'a> { +#[stable(feature = "encode_utf16", since = "1.8.0")] +pub struct EncodeUtf16<'a> { encoder: Utf16Encoder>, } -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a> Iterator for Utf16Units<'a> { +#[stable(feature = "encode_utf16", since = "1.8.0")] +impl<'a> Iterator for EncodeUtf16<'a> { type Item = u16; #[inline] @@ -140,6 +137,9 @@ impl<'a> Iterator for Utf16Units<'a> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a> FusedIterator for EncodeUtf16<'a> {} + // Return the initial codepoint accumulator for the first byte. // The first byte is special, only want bottom 5 bits for width 2, 4 bits // for width 3, and 3 bits for width 4 @@ -227,8 +227,6 @@ impl str { /// # Examples /// /// ``` - /// #![feature(str_char)] - /// /// let s = "Löwe 老虎 Léopard"; /// assert!(s.is_char_boundary(0)); /// // start of `老` @@ -241,12 +239,7 @@ impl str { /// // third byte of `老` /// assert!(!s.is_char_boundary(8)); /// ``` - #[unstable(feature = "str_char", - reason = "it is unclear whether this method pulls its weight \ - with the existence of the char_indices iterator or \ - this method may want to be replaced with checked \ - slicing", - issue = "27754")] + #[stable(feature = "is_char_boundary", since = "1.9.0")] #[inline] pub fn is_char_boundary(&self, index: usize) -> bool { core_str::StrExt::is_char_boundary(self, index) @@ -271,9 +264,11 @@ impl str { /// Converts a string slice to a raw pointer. /// /// As string slices are a slice of bytes, the raw pointer points to a - /// `u8`. This pointer will be pointing to the first byte of the string + /// [`u8`]. This pointer will be pointing to the first byte of the string /// slice. /// + /// [`u8`]: primitive.u8.html + /// /// # Examples /// /// Basic usage: @@ -302,7 +297,7 @@ impl str { /// # Safety /// /// Callers of this function are responsible that three preconditions are - /// satisifed: + /// satisfied: /// /// * `begin` must come before `end`. /// * `begin` and `end` must be byte positions within the string slice. @@ -345,7 +340,7 @@ impl str { /// # Safety /// /// Callers of this function are responsible that three preconditions are - /// satisifed: + /// satisfied: /// /// * `begin` must come before `end`. /// * `begin` and `end` must be byte positions within the string slice. @@ -356,210 +351,6 @@ impl str { core_str::StrExt::slice_mut_unchecked(self, begin, end) } - /// Given a byte position, returns the next `char` and its index. - /// - /// # Panics - /// - /// If `i` is greater than or equal to the length of the string. - /// If `i` is not the index of the beginning of a valid UTF-8 sequence. - /// - /// # Examples - /// - /// This example manually iterates through the code points of a string; - /// this should normally be - /// done by `.chars()` or `.char_indices()`. - /// - /// ``` - /// #![feature(str_char)] - /// - /// use std::str::CharRange; - /// - /// let s = "中华Việt Nam"; - /// let mut i = 0; - /// while i < s.len() { - /// let CharRange {ch, next} = s.char_range_at(i); - /// println!("{}: {}", i, ch); - /// i = next; - /// } - /// ``` - /// - /// This outputs: - /// - /// ```text - /// 0: 中 - /// 3: 华 - /// 6: V - /// 7: i - /// 8: e - /// 9: - /// 11: - /// 13: t - /// 14: - /// 15: N - /// 16: a - /// 17: m - /// ``` - #[unstable(feature = "str_char", - reason = "often replaced by char_indices, this method may \ - be removed in favor of just char_at() or eventually \ - removed altogether", - issue = "27754")] - #[inline] - pub fn char_range_at(&self, start: usize) -> CharRange { - core_str::StrExt::char_range_at(self, start) - } - - /// Given a byte position, returns the previous `char` and its position. - /// - /// Note that Unicode has many features, such as combining marks, ligatures, - /// and direction marks, that need to be taken into account to correctly reverse a string. - /// - /// Returns 0 for next index if called on start index 0. - /// - /// # Panics - /// - /// If `i` is greater than the length of the string. - /// If `i` is not an index following a valid UTF-8 sequence. - /// - /// # Examples - /// - /// This example manually iterates through the code points of a string; - /// this should normally be - /// done by `.chars().rev()` or `.char_indices()`. - /// - /// ``` - /// #![feature(str_char)] - /// - /// use std::str::CharRange; - /// - /// let s = "中华Việt Nam"; - /// let mut i = s.len(); - /// while i > 0 { - /// let CharRange {ch, next} = s.char_range_at_reverse(i); - /// println!("{}: {}", i, ch); - /// i = next; - /// } - /// ``` - /// - /// This outputs: - /// - /// ```text - /// 18: m - /// 17: a - /// 16: N - /// 15: - /// 14: t - /// 13: - /// 11: - /// 9: e - /// 8: i - /// 7: V - /// 6: 华 - /// 3: 中 - /// ``` - #[unstable(feature = "str_char", - reason = "often replaced by char_indices, this method may \ - be removed in favor of just char_at_reverse() or \ - eventually removed altogether", - issue = "27754")] - #[inline] - pub fn char_range_at_reverse(&self, start: usize) -> CharRange { - core_str::StrExt::char_range_at_reverse(self, start) - } - - /// Given a byte position, returns the `char` at that position. - /// - /// # Panics - /// - /// If `i` is greater than or equal to the length of the string. - /// If `i` is not the index of the beginning of a valid UTF-8 sequence. - /// - /// # Examples - /// - /// ``` - /// #![feature(str_char)] - /// - /// let s = "abπc"; - /// assert_eq!(s.char_at(1), 'b'); - /// assert_eq!(s.char_at(2), 'π'); - /// assert_eq!(s.char_at(4), 'c'); - /// ``` - #[unstable(feature = "str_char", - reason = "frequently replaced by the chars() iterator, this \ - method may be removed or possibly renamed in the \ - future; it is normally replaced by chars/char_indices \ - iterators or by getting the first char from a \ - subslice", - issue = "27754")] - #[inline] - pub fn char_at(&self, i: usize) -> char { - core_str::StrExt::char_at(self, i) - } - - /// Given a byte position, returns the `char` at that position, counting - /// from the end. - /// - /// # Panics - /// - /// If `i` is greater than the length of the string. - /// If `i` is not an index following a valid UTF-8 sequence. - /// - /// # Examples - /// - /// ``` - /// #![feature(str_char)] - /// - /// let s = "abπc"; - /// assert_eq!(s.char_at_reverse(1), 'a'); - /// assert_eq!(s.char_at_reverse(2), 'b'); - /// assert_eq!(s.char_at_reverse(3), 'π'); - /// ``` - #[unstable(feature = "str_char", - reason = "see char_at for more details, but reverse semantics \ - are also somewhat unclear, especially with which \ - cases generate panics", - issue = "27754")] - #[inline] - pub fn char_at_reverse(&self, i: usize) -> char { - core_str::StrExt::char_at_reverse(self, i) - } - - /// Retrieves the first `char` from a `&str` and returns it. - /// - /// Note that a single Unicode character (grapheme cluster) - /// can be composed of multiple `char`s. - /// - /// This does not allocate a new string; instead, it returns a slice that - /// points one code point beyond the code point that was shifted. - /// - /// `None` is returned if the slice is empty. - /// - /// # Examples - /// - /// ``` - /// #![feature(str_char)] - /// - /// let s = "Łódź"; // \u{141}o\u{301}dz\u{301} - /// let (c, s1) = s.slice_shift_char().unwrap(); - /// - /// assert_eq!(c, 'Ł'); - /// assert_eq!(s1, "ódź"); - /// - /// let (c, s2) = s1.slice_shift_char().unwrap(); - /// - /// assert_eq!(c, 'o'); - /// assert_eq!(s2, "\u{301}dz\u{301}"); - /// ``` - #[unstable(feature = "str_char", - reason = "awaiting conventions about shifting and slices and \ - may not be warranted with the existence of the chars \ - and/or char_indices iterators", - issue = "27754")] - #[inline] - pub fn slice_shift_char(&self) -> Option<(char, &str)> { - core_str::StrExt::slice_shift_char(self) - } - /// Divide one string slice into two at an index. /// /// The argument, `mid`, should be a byte offset from the start of the @@ -618,9 +409,9 @@ impl str { /// Basic usage: /// /// ``` - /// let s = "Per Martin-Löf"; + /// let mut s = "Per Martin-Löf".to_string(); /// - /// let (first, last) = s.split_at(3); + /// let (first, last) = s.split_at_mut(3); /// /// assert_eq!("Per", first); /// assert_eq!(" Martin-Löf", last); @@ -665,7 +456,7 @@ impl str { /// assert_eq!(None, chars.next()); /// ``` /// - /// Remember, `char`s may not match your human intuition about characters: + /// Remember, [`char`]s may not match your human intuition about characters: /// /// ``` /// let y = "y̆"; @@ -682,16 +473,18 @@ impl str { pub fn chars(&self) -> Chars { core_str::StrExt::chars(self) } - /// Returns an iterator over the `char`s of a string slice, and their + /// Returns an iterator over the [`char`]s of a string slice, and their /// positions. /// /// As a string slice consists of valid UTF-8, we can iterate through a - /// string slice by `char`. This method returns an iterator of both - /// these `char`s, as well as their byte positions. + /// string slice by [`char`]. This method returns an iterator of both + /// these [`char`]s, as well as their byte positions. /// - /// The iterator yields tuples. The position is first, the `char` is + /// The iterator yields tuples. The position is first, the [`char`] is /// second. /// + /// [`char`]: primitive.char.html + /// /// # Examples /// /// Basic usage: @@ -715,7 +508,7 @@ impl str { /// assert_eq!(None, char_indices.next()); /// ``` /// - /// Remember, `char`s may not match your human intuition about characters: + /// Remember, [`char`]s may not match your human intuition about characters: /// /// ``` /// let y = "y̆"; @@ -850,11 +643,9 @@ impl str { } /// Returns an iterator of `u16` over the string encoded as UTF-16. - #[unstable(feature = "str_utf16", - reason = "this functionality may only be provided by libunicode", - issue = "27714")] - pub fn utf16_units(&self) -> Utf16Units { - Utf16Units { encoder: Utf16Encoder::new(self[..].chars()) } + #[stable(feature = "encode_utf16", since = "1.8.0")] + pub fn encode_utf16(&self) -> EncodeUtf16 { + EncodeUtf16 { encoder: Utf16Encoder::new(self[..].chars()) } } /// Returns `true` if the given pattern matches a sub-slice of @@ -906,7 +697,7 @@ impl str { /// /// Basic usage: /// - /// ```rust + /// ``` /// let bananas = "bananas"; /// /// assert!(bananas.ends_with("anas")); @@ -922,12 +713,13 @@ impl str { /// Returns the byte index of the first character of this string slice that /// matches the pattern. /// - /// Returns `None` if the pattern doesn't match. + /// Returns [`None`] if the pattern doesn't match. /// /// The pattern can be a `&str`, [`char`], or a closure that determines if /// a character matches. /// /// [`char`]: primitive.char.html + /// [`None`]: option/enum.Option.html#variant.None /// /// # Examples /// @@ -966,12 +758,13 @@ impl str { /// Returns the byte index of the last character of this string slice that /// matches the pattern. /// - /// Returns `None` if the pattern doesn't match. + /// Returns [`None`] if the pattern doesn't match. /// /// The pattern can be a `&str`, [`char`], or a closure that determines if /// a character matches. /// /// [`char`]: primitive.char.html + /// [`None`]: option/enum.Option.html#variant.None /// /// # Examples /// @@ -1069,8 +862,34 @@ impl str { /// assert_eq!(d, &["", "", "", "", "a", "", "b", "c"]); /// ``` /// - /// This can lead to possibly surprising behavior when whitespace is used - /// as the separator. This code is correct: + /// Contiguous separators are separated by the empty string. + /// + /// ``` + /// let x = "(///)".to_string(); + /// let d: Vec<_> = x.split('/').collect();; + /// + /// assert_eq!(d, &["(", "", "", ")"]); + /// ``` + /// + /// Separators at the start or end of a string are neighbored + /// by empty strings. + /// + /// ``` + /// let d: Vec<_> = "010".split("0").collect(); + /// assert_eq!(d, &["", "1", ""]); + /// ``` + /// + /// When the empty string is used as a separator, it separates + /// every character in the string, along with the beginning + /// and end of the string. + /// + /// ``` + /// let f: Vec<_> = "rust".split("").collect(); + /// assert_eq!(f, &["", "r", "u", "s", "t", ""]); + /// ``` + /// + /// Contiguous separators can lead to possibly surprising behavior + /// when whitespace is used as the separator. This code is correct: /// /// ``` /// let x = " a b c".to_string(); @@ -1081,7 +900,7 @@ impl str { /// /// It does _not_ give you: /// - /// ```rust,ignore + /// ```,ignore /// assert_eq!(d, &["a", "b", "c"]); /// ``` /// @@ -1191,14 +1010,18 @@ impl str { /// An iterator over substrings of `self`, separated by characters /// matched by a pattern and yielded in reverse order. /// - /// The pattern can be a simple `&str`, `char`, or a closure that + /// The pattern can be a simple `&str`, [`char`], or a closure that /// determines the split. /// Additional libraries might provide more complex patterns like /// regular expressions. /// - /// Equivalent to `split`, except that the trailing substring is + /// [`char`]: primitive.char.html + /// + /// Equivalent to [`split()`], except that the trailing substring is /// skipped if empty. /// + /// [`split()`]: #method.split + /// /// This method can be used for string data that is _terminated_, /// rather than _separated_ by a pattern. /// @@ -1230,10 +1053,10 @@ impl str { } /// An iterator over substrings of the given string slice, separated by a - /// pattern, restricted to returning at most `count` items. + /// pattern, restricted to returning at most `n` items. /// - /// The last element returned, if any, will contain the remainder of the - /// string slice. + /// If `n` substrings are returned, the last substring (the `n`th substring) + /// will contain the remainder of the string. /// /// The pattern can be a `&str`, [`char`], or a closure that determines the /// split. @@ -1275,16 +1098,16 @@ impl str { /// assert_eq!(v, ["abc", "defXghi"]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn splitn<'a, P: Pattern<'a>>(&'a self, count: usize, pat: P) -> SplitN<'a, P> { - core_str::StrExt::splitn(self, count, pat) + pub fn splitn<'a, P: Pattern<'a>>(&'a self, n: usize, pat: P) -> SplitN<'a, P> { + core_str::StrExt::splitn(self, n, pat) } /// An iterator over substrings of this string slice, separated by a /// pattern, starting from the end of the string, restricted to returning - /// at most `count` items. + /// at most `n` items. /// - /// The last element returned, if any, will contain the remainder of the - /// string slice. + /// If `n` substrings are returned, the last substring (the `n`th substring) + /// will contain the remainder of the string. /// /// The pattern can be a `&str`, [`char`], or a closure that /// determines the split. @@ -1322,10 +1145,10 @@ impl str { /// assert_eq!(v, ["ghi", "abc1def"]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn rsplitn<'a, P: Pattern<'a>>(&'a self, count: usize, pat: P) -> RSplitN<'a, P> + pub fn rsplitn<'a, P: Pattern<'a>>(&'a self, n: usize, pat: P) -> RSplitN<'a, P> where P::Searcher: ReverseSearcher<'a> { - core_str::StrExt::rsplitn(self, count, pat) + core_str::StrExt::rsplitn(self, n, pat) } /// An iterator over the matches of a pattern within the given string @@ -1461,7 +1284,7 @@ impl str { /// # Iterator behavior /// /// The returned iterator requires that the pattern supports a reverse - /// search, and it will be a `[DoubleEndedIterator]` if a forward/reverse + /// search, and it will be a [`DoubleEndedIterator`] if a forward/reverse /// search yields the same elements. /// /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html @@ -1515,6 +1338,13 @@ impl str { /// 'Whitespace' is defined according to the terms of the Unicode Derived /// Core Property `White_Space`. /// + /// # Text directionality + /// + /// A string is a sequence of bytes. 'Left' in this context means the first + /// position of that byte string; for a language like Arabic or Hebrew + /// which are 'right to left' rather than 'left to right', this will be + /// the _right_ side, not the left. + /// /// # Examples /// /// Basic usage: @@ -1524,6 +1354,16 @@ impl str { /// /// assert_eq!("Hello\tworld\t", s.trim_left()); /// ``` + /// + /// Directionality: + /// + /// ``` + /// let s = " English"; + /// assert!(Some('E') == s.trim_left().chars().next()); + /// + /// let s = " עברית"; + /// assert!(Some('ע') == s.trim_left().chars().next()); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn trim_left(&self) -> &str { UnicodeStr::trim_left(self) @@ -1534,6 +1374,13 @@ impl str { /// 'Whitespace' is defined according to the terms of the Unicode Derived /// Core Property `White_Space`. /// + /// # Text directionality + /// + /// A string is a sequence of bytes. 'Right' in this context means the last + /// position of that byte string; for a language like Arabic or Hebrew + /// which are 'right to left' rather than 'left to right', this will be + /// the _left_ side, not the right. + /// /// # Examples /// /// Basic usage: @@ -1543,6 +1390,16 @@ impl str { /// /// assert_eq!(" Hello\tworld", s.trim_right()); /// ``` + /// + /// Directionality: + /// + /// ``` + /// let s = "English "; + /// assert!(Some('h') == s.trim_right().chars().rev().next()); + /// + /// let s = "עברית "; + /// assert!(Some('ת') == s.trim_right().chars().rev().next()); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn trim_right(&self) -> &str { UnicodeStr::trim_right(self) @@ -1551,8 +1408,8 @@ impl str { /// Returns a string slice with all prefixes and suffixes that match a /// pattern repeatedly removed. /// - /// The pattern can be a `&str`, [`char`], or a closure that determines - /// if a character matches. + /// The pattern can be a [`char`] or a closure that determines if a + /// character matches. /// /// [`char`]: primitive.char.html /// @@ -1588,6 +1445,13 @@ impl str { /// /// [`char`]: primitive.char.html /// + /// # Text directionality + /// + /// A string is a sequence of bytes. 'Left' in this context means the first + /// position of that byte string; for a language like Arabic or Hebrew + /// which are 'right to left' rather than 'left to right', this will be + /// the _right_ side, not the left. + /// /// # Examples /// /// Basic usage: @@ -1612,6 +1476,13 @@ impl str { /// /// [`char`]: primitive.char.html /// + /// # Text directionality + /// + /// A string is a sequence of bytes. 'Right' in this context means the last + /// position of that byte string; for a language like Arabic or Hebrew + /// which are 'right to left' rather than 'left to right', this will be + /// the _left_ side, not the right. + /// /// # Examples /// /// Simple patterns: @@ -1648,11 +1519,13 @@ impl str { /// /// [`FromStr`]: str/trait.FromStr.html /// - /// # Failure + /// # Errors /// - /// Will return `Err` if it's not possible to parse this string slice into + /// Will return [`Err`] if it's not possible to parse this string slice into /// the desired type. /// + /// [`Err`]: str/trait.FromStr.html#associatedtype.Err + /// /// # Example /// /// Basic usage @@ -1663,7 +1536,7 @@ impl str { /// assert_eq!(4, four); /// ``` /// - /// Using the 'turbofish' instead of annotationg `four`: + /// Using the 'turbofish' instead of annotating `four`: /// /// ``` /// let four = "4".parse::(); @@ -1721,11 +1594,56 @@ impl str { result } - /// Returns the lowercase equivalent of this string slice, as a new `String`. + /// Replaces first N matches of a pattern with another string. + /// + /// `replacen` creates a new [`String`], and copies the data from this string slice into it. + /// While doing so, it attempts to find matches of a pattern. If it finds any, it + /// replaces them with the replacement string slice at most `N` times. + /// + /// [`String`]: string/struct.String.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// # #![feature(str_replacen)] + /// let s = "foo foo 123 foo"; + /// assert_eq!("new new 123 foo", s.replacen("foo", "new", 2)); + /// assert_eq!("faa fao 123 foo", s.replacen('o', "a", 3)); + /// assert_eq!("foo foo new23 foo", s.replacen(char::is_numeric, "new", 1)); + /// ``` + /// + /// When the pattern doesn't match: + /// + /// ``` + /// # #![feature(str_replacen)] + /// let s = "this is old"; + /// assert_eq!(s, s.replacen("cookie monster", "little lamb", 10)); + /// ``` + #[unstable(feature = "str_replacen", + issue = "36436", + reason = "only need to replace first N matches")] + pub fn replacen<'a, P: Pattern<'a>>(&'a self, pat: P, to: &str, count: usize) -> String { + // Hope to reduce the times of re-allocation + let mut result = String::with_capacity(32); + let mut last_end = 0; + for (start, part) in self.match_indices(pat).take(count) { + result.push_str(unsafe { self.slice_unchecked(last_end, start) }); + result.push_str(to); + last_end = start + part.len(); + } + result.push_str(unsafe { self.slice_unchecked(last_end, self.len()) }); + result + } + + /// Returns the lowercase equivalent of this string slice, as a new [`String`]. /// /// 'Lowercase' is defined according to the terms of the Unicode Derived Core Property /// `Lowercase`. /// + /// [`String`]: string/struct.String.html + /// /// # Examples /// /// Basic usage: @@ -1764,7 +1682,7 @@ impl str { // Σ maps to σ, except at the end of a word where it maps to ς. // This is the only conditional (contextual) but language-independent mapping // in `SpecialCasing.txt`, - // so hard-code it rather than have a generic "condition" mechanim. + // so hard-code it rather than have a generic "condition" mechanism. // See https://github.com/rust-lang/rust/issues/26035 map_uppercase_sigma(self, i, &mut s) } else { @@ -1795,11 +1713,13 @@ impl str { } } - /// Returns the uppercase equivalent of this string slice, as a new `String`. + /// Returns the uppercase equivalent of this string slice, as a new [`String`]. /// /// 'Uppercase' is defined according to the terms of the Unicode Derived Core Property /// `Uppercase`. /// + /// [`String`]: string/struct.String.html + /// /// # Examples /// /// Basic usage: @@ -1824,6 +1744,14 @@ impl str { return s; } + /// Escapes each char in `s` with `char::escape_debug`. + #[unstable(feature = "str_escape", + reason = "return type may change to be an iterator", + issue = "27791")] + pub fn escape_debug(&self) -> String { + self.chars().flat_map(|c| c.escape_debug()).collect() + } + /// Escapes each char in `s` with `char::escape_default`. #[unstable(feature = "str_escape", reason = "return type may change to be an iterator", @@ -1840,7 +1768,9 @@ impl str { self.chars().flat_map(|c| c.escape_unicode()).collect() } - /// Converts a `Box` into a `String` without copying or allocating. + /// Converts a `Box` into a [`String`] without copying or allocating. + /// + /// [`String`]: string/struct.String.html /// /// # Examples /// @@ -1859,4 +1789,24 @@ impl str { String::from_utf8_unchecked(slice.into_vec()) } } + + /// Create a [`String`] by repeating a string `n` times. + /// + /// [`String`]: string/struct.String.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(repeat_str)] + /// + /// assert_eq!("abc".repeat(4), String::from("abcabcabcabc")); + /// ``` + #[unstable(feature = "repeat_str", issue = "37079")] + pub fn repeat(&self, n: usize) -> String { + let mut s = String::with_capacity(self.len() * n); + s.extend((0..n).map(|_| self)); + s + } } diff --git a/src/libcollections/string.rs b/src/libcollections/string.rs index ad9c770a5a5ca..ddde9d06d8176 100644 --- a/src/libcollections/string.rs +++ b/src/libcollections/string.rs @@ -14,24 +14,25 @@ //! [`ToString`]s, and several error types that may result from working with //! [`String`]s. //! -//! [`String`]: struct.String.html //! [`ToString`]: trait.ToString.html //! //! # Examples //! -//! There are multiple ways to create a new `String` from a string literal: +//! There are multiple ways to create a new [`String`] from a string literal: //! -//! ```rust +//! ``` //! let s = "Hello".to_string(); //! //! let s = String::from("world"); //! let s: String = "also this".into(); //! ``` //! -//! You can create a new `String` from an existing one by concatenating with +//! You can create a new [`String`] from an existing one by concatenating with //! `+`: //! -//! ```rust +//! [`String`]: struct.String.html +//! +//! ``` //! let s = "Hello".to_string(); //! //! let message = s + " world!"; @@ -40,7 +41,7 @@ //! If you have a vector of valid UTF-8 bytes, you can make a `String` out of //! it. You can do the reverse too. //! -//! ```rust +//! ``` //! let sparkle_heart = vec![240, 159, 146, 150]; //! //! // We know these bytes are valid, so we'll use `unwrap()`. @@ -57,17 +58,15 @@ use core::fmt; use core::hash; -use core::iter::FromIterator; +use core::iter::{FromIterator, FusedIterator}; use core::mem; -use core::ops::{self, Add}; +use core::ops::{self, Add, AddAssign, Index, IndexMut}; use core::ptr; -use core::slice; use core::str::pattern::Pattern; use rustc_unicode::char::{decode_utf16, REPLACEMENT_CHARACTER}; use rustc_unicode::str as unicode_str; -#[allow(deprecated)] -use borrow::{Cow, IntoCow}; +use borrow::{Cow, ToOwned}; use range::RangeArgument; use str::{self, FromStr, Utf8Error, Chars}; use vec::Vec; @@ -79,7 +78,7 @@ use boxed::Box; /// contents of the string. It has a close relationship with its borrowed /// counterpart, the primitive [`str`]. /// -/// [`str`]: ../primitive.str.html +/// [`str`]: ../../std/primitive.str.html /// /// # Examples /// @@ -99,7 +98,7 @@ use boxed::Box; /// hello.push_str("orld!"); /// ``` /// -/// [`char`]: ../primitive.char.html +/// [`char`]: ../../std/primitive.char.html /// [`push()`]: #method.push /// [`push_str()`]: #method.push_str /// @@ -131,15 +130,15 @@ use boxed::Box; /// println!("The first letter of s is {}", s[0]); // ERROR!!! /// ``` /// -/// [`OsString`]: ../ffi/struct.OsString.html +/// [`OsString`]: ../../std/ffi/struct.OsString.html /// /// Indexing is intended to be a constant-time operation, but UTF-8 encoding -/// does not allow us to do this. Furtheremore, it's not clear what sort of +/// does not allow us to do this. Furthermore, it's not clear what sort of /// thing the index should return: a byte, a codepoint, or a grapheme cluster. -/// The [`as_bytes()`] and [`chars()`] methods return iterators over the first +/// The [`bytes()`] and [`chars()`] methods return iterators over the first /// two, respectively. /// -/// [`as_bytes()`]: #method.as_bytes +/// [`bytes()`]: #method.bytes /// [`chars()`]: #method.chars /// /// # Deref @@ -156,8 +155,8 @@ use boxed::Box; /// takes_str(&s); /// ``` /// -/// [`&str`]: ../primitive.str.html -/// [`Deref`]: ../ops/trait.Deref.html +/// [`&str`]: ../../std/primitive.str.html +/// [`Deref`]: ../../std/ops/trait.Deref.html /// /// This will create a [`&str`] from the `String` and pass it in. This /// conversion is very inexpensive, and so generally, functions will accept @@ -186,14 +185,14 @@ use boxed::Box; /// let len = story.len(); /// let capacity = story.capacity(); /// -/// // story has thirteen bytes +/// // story has nineteen bytes /// assert_eq!(19, len); /// /// // Now that we have our parts, we throw the story away. /// mem::forget(story); /// /// // We can re-build a String out of ptr, len, and capacity. This is all -/// // unsafe becuase we are responsible for making sure the components are +/// // unsafe because we are responsible for making sure the components are /// // valid: /// let s = unsafe { String::from_raw_parts(ptr as *mut _, len, capacity) } ; /// @@ -280,10 +279,10 @@ pub struct String { /// an analogue to `FromUtf8Error`, and you can get one from a `FromUtf8Error` /// through the [`utf8_error()`] method. /// -/// [`Utf8Error`]: ../str/struct.Utf8Error.html -/// [`std::str`]: ../str/index.html -/// [`u8`]: ../primitive.u8.html -/// [`&str`]: ../primitive.str.html +/// [`Utf8Error`]: ../../std/str/struct.Utf8Error.html +/// [`std::str`]: ../../std/str/index.html +/// [`u8`]: ../../std/primitive.u8.html +/// [`&str`]: ../../std/primitive.str.html /// [`utf8_error()`]: #method.utf8_error /// /// # Examples @@ -414,9 +413,9 @@ impl String { /// requires that it is valid UTF-8. `from_utf8()` checks to ensure that /// the bytes are valid UTF-8, and then does the conversion. /// - /// [`&str`]: ../primitive.str.html - /// [`u8`]: ../primitive.u8.html - /// [`Vec`]: ../vec/struct.Vec.html + /// [`&str`]: ../../std/primitive.str.html + /// [`u8`]: ../../std/primitive.u8.html + /// [`Vec`]: ../../std/vec/struct.Vec.html /// /// If you are sure that the byte slice is valid UTF-8, and you don't want /// to incur the overhead of the validity check, there is an unsafe version @@ -431,9 +430,9 @@ impl String { /// If you need a `&str` instead of a `String`, consider /// [`str::from_utf8()`]. /// - /// [`str::from_utf8()`]: ../str/fn.from_utf8.html + /// [`str::from_utf8()`]: ../../std/str/fn.from_utf8.html /// - /// # Failure + /// # Errors /// /// Returns `Err` if the slice is not UTF-8 with a description as to why the /// provided bytes are not UTF-8. The vector you moved in is also included. @@ -479,18 +478,17 @@ impl String { } } - /// Converts a slice of bytes to a `String`, including invalid characters. + /// Converts a slice of bytes to a string, including invalid characters. /// - /// A string slice ([`&str`]) is made of bytes ([`u8`]), and a slice of - /// bytes ([`&[u8]`][byteslice]) is made of bytes, so this function converts between - /// the two. Not all byte slices are valid string slices, however: [`&str`] - /// requires that it is valid UTF-8. During this conversion, + /// Strings are made of bytes ([`u8`]), and a slice of bytes + /// ([`&[u8]`][byteslice]) is made of bytes, so this function converts + /// between the two. Not all byte slices are valid strings, however: strings + /// are required to be valid UTF-8. During this conversion, /// `from_utf8_lossy()` will replace any invalid UTF-8 sequences with /// `U+FFFD REPLACEMENT CHARACTER`, which looks like this: � /// - /// [`&str`]: ../primitive.str.html - /// [`u8`]: ../primitive.u8.html - /// [byteslice]: ../primitive.slice.html + /// [`u8`]: ../../std/primitive.u8.html + /// [byteslice]: ../../std/primitive.slice.html /// /// If you are sure that the byte slice is valid UTF-8, and you don't want /// to incur the overhead of the conversion, there is an unsafe version @@ -499,10 +497,13 @@ impl String { /// /// [`from_utf8_unchecked()`]: struct.String.html#method.from_utf8_unchecked /// - /// If you need a [`&str`] instead of a `String`, consider - /// [`str::from_utf8()`]. + /// This function returns a [`Cow<'a, str>`]. If our byte slice is invalid + /// UTF-8, then we need to insert the replacement characters, which will + /// change the size of the string, and hence, require a `String`. But if + /// it's already valid UTF-8, we don't need a new allocation. This return + /// type allows us to handle both cases. /// - /// [`str::from_utf8()`]: ../str/fn.from_utf8.html + /// [`Cow<'a, str>`]: ../../std/borrow/enum.Cow.html /// /// # Examples /// @@ -512,8 +513,7 @@ impl String { /// // some bytes, in a vector /// let sparkle_heart = vec![240, 159, 146, 150]; /// - /// // We know these bytes are valid, so we'll use `unwrap()`. - /// let sparkle_heart = String::from_utf8(sparkle_heart).unwrap(); + /// let sparkle_heart = String::from_utf8_lossy(&sparkle_heart); /// /// assert_eq!("💖", sparkle_heart); /// ``` @@ -641,7 +641,7 @@ impl String { Cow::Owned(res) } - /// Decode a UTF-16 encoded vector `v` into a `String`, returning `None` + /// Decode a UTF-16 encoded vector `v` into a `String`, returning `Err` /// if `v` contains any invalid data. /// /// # Examples @@ -702,6 +702,12 @@ impl String { /// Violating these may cause problems like corrupting the allocator's /// internal datastructures. /// + /// The ownership of `ptr` is effectively transferred to the + /// `String` which may then deallocate, reallocate or change the + /// contents of memory pointed to by the pointer at will. Ensure + /// that nothing else uses the pointer after calling this + /// function. + /// /// # Examples /// /// Basic usage: @@ -970,22 +976,7 @@ impl String { pub fn push(&mut self, ch: char) { match ch.len_utf8() { 1 => self.vec.push(ch as u8), - ch_len => { - let cur_len = self.len(); - // This may use up to 4 bytes. - self.vec.reserve(ch_len); - - unsafe { - // Attempt to not use an intermediate buffer by just pushing bytes - // directly onto this string. - let slice = slice::from_raw_parts_mut(self.vec - .as_mut_ptr() - .offset(cur_len as isize), - ch_len); - let used = ch.encode_utf8(slice).unwrap_or(0); - self.vec.set_len(cur_len + used); - } - } + _ => self.vec.extend_from_slice(ch.encode_utf8(&mut [0;4]).as_bytes()), } } @@ -1008,12 +999,14 @@ impl String { /// Shortens this `String` to the specified length. /// + /// If `new_len` is greater than the string's current length, this has no + /// effect. + /// /// # Panics /// - /// Panics if `new_len` > current length, or if `new_len` does not lie on a - /// [`char`] boundary. + /// Panics if `new_len` does not lie on a [`char`] boundary. /// - /// [`char`]: ../primitive.char.html + /// [`char`]: ../../std/primitive.char.html /// /// # Examples /// @@ -1029,8 +1022,10 @@ impl String { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn truncate(&mut self, new_len: usize) { - assert!(self.is_char_boundary(new_len)); - self.vec.truncate(new_len) + if new_len <= self.len() { + assert!(self.is_char_boundary(new_len)); + self.vec.truncate(new_len) + } } /// Removes the last character from the string buffer and returns it. @@ -1053,14 +1048,13 @@ impl String { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn pop(&mut self) -> Option { - let len = self.len(); - if len == 0 { - return None; - } - - let ch = self.char_at_reverse(len); + let ch = match self.chars().rev().next() { + Some(ch) => ch, + None => return None, + }; + let newlen = self.len() - ch.len_utf8(); unsafe { - self.vec.set_len(len - ch.len_utf8()); + self.vec.set_len(newlen); } Some(ch) } @@ -1075,7 +1069,7 @@ impl String { /// Panics if `idx` is larger than or equal to the `String`'s length, /// or if it does not lie on a [`char`] boundary. /// - /// [`char`]: ../primitive.char.html + /// [`char`]: ../../std/primitive.char.html /// /// # Examples /// @@ -1091,11 +1085,13 @@ impl String { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn remove(&mut self, idx: usize) -> char { - let len = self.len(); - assert!(idx < len); + let ch = match self[idx..].chars().next() { + Some(ch) => ch, + None => panic!("cannot remove a char from the end of a string"), + }; - let ch = self.char_at(idx); let next = idx + ch.len_utf8(); + let len = self.len(); unsafe { ptr::copy(self.vec.as_ptr().offset(next as isize), self.vec.as_mut_ptr().offset(idx as isize), @@ -1115,7 +1111,7 @@ impl String { /// Panics if `idx` is larger than the `String`'s length, or if it does not /// lie on a [`char`] boundary. /// - /// [`char`]: ../primitive.char.html + /// [`char`]: ../../std/primitive.char.html /// /// # Examples /// @@ -1136,18 +1132,63 @@ impl String { let len = self.len(); assert!(idx <= len); assert!(self.is_char_boundary(idx)); - self.vec.reserve(4); let mut bits = [0; 4]; - let amt = ch.encode_utf8(&mut bits).unwrap(); + let bits = ch.encode_utf8(&mut bits).as_bytes(); unsafe { - ptr::copy(self.vec.as_ptr().offset(idx as isize), - self.vec.as_mut_ptr().offset((idx + amt) as isize), - len - idx); - ptr::copy(bits.as_ptr(), - self.vec.as_mut_ptr().offset(idx as isize), - amt); - self.vec.set_len(len + amt); + self.insert_bytes(idx, bits); + } + } + + unsafe fn insert_bytes(&mut self, idx: usize, bytes: &[u8]) { + let len = self.len(); + let amt = bytes.len(); + self.vec.reserve(amt); + + ptr::copy(self.vec.as_ptr().offset(idx as isize), + self.vec.as_mut_ptr().offset((idx + amt) as isize), + len - idx); + ptr::copy(bytes.as_ptr(), + self.vec.as_mut_ptr().offset(idx as isize), + amt); + self.vec.set_len(len + amt); + } + + /// Inserts a string slice into this `String` at a byte position. + /// + /// This is an `O(n)` operation as it requires copying every element in the + /// buffer. + /// + /// # Panics + /// + /// Panics if `idx` is larger than the `String`'s length, or if it does not + /// lie on a [`char`] boundary. + /// + /// [`char`]: ../../std/primitive.char.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(insert_str)] + /// + /// let mut s = String::from("bar"); + /// + /// s.insert_str(0, "foo"); + /// + /// assert_eq!("foobar", s); + /// ``` + #[inline] + #[unstable(feature = "insert_str", + reason = "recent addition", + issue = "35553")] + pub fn insert_str(&mut self, idx: usize, string: &str) { + assert!(idx <= self.len()); + assert!(self.is_char_boundary(idx)); + + unsafe { + self.insert_bytes(idx, string.as_bytes()); } } @@ -1219,6 +1260,39 @@ impl String { self.len() == 0 } + /// Divide one string into two at an index. + /// + /// The argument, `mid`, should be a byte offset from the start of the string. It must also + /// be on the boundary of a UTF-8 code point. + /// + /// The two strings returned go from the start of the string to `mid`, and from `mid` to the end + /// of the string. + /// + /// # Panics + /// + /// Panics if `mid` is not on a `UTF-8` code point boundary, or if it is beyond the last + /// code point of the string. + /// + /// # Examples + /// + /// ``` + /// # #![feature(string_split_off)] + /// # fn main() { + /// let mut hello = String::from("Hello, World!"); + /// let world = hello.split_off(7); + /// assert_eq!(hello, "Hello, "); + /// assert_eq!(world, "World!"); + /// # } + /// ``` + #[inline] + #[unstable(feature = "string_split_off", issue = "38080")] + pub fn split_off(&mut self, mid: usize) -> String { + assert!(self.is_char_boundary(mid)); + assert!(mid <= self.len()); + let other = self.vec.split_off(mid); + unsafe { String::from_utf8_unchecked(other) } + } + /// Truncates this `String`, removing all contents. /// /// While this means the `String` will have a length of zero, it does not @@ -1254,7 +1328,7 @@ impl String { /// Panics if the starting point or end point do not lie on a [`char`] /// boundary, or if they're out of bounds. /// - /// [`char`]: ../primitive.char.html + /// [`char`]: ../../std/primitive.char.html /// /// # Examples /// @@ -1352,10 +1426,10 @@ impl FromUtf8Error { /// an analogue to `FromUtf8Error`. See its documentation for more details /// on using it. /// - /// [`Utf8Error`]: ../str/struct.Utf8Error.html - /// [`std::str`]: ../str/index.html - /// [`u8`]: ../primitive.u8.html - /// [`&str`]: ../primitive.str.html + /// [`Utf8Error`]: ../../std/str/struct.Utf8Error.html + /// [`std::str`]: ../../std/str/index.html + /// [`u8`]: ../../std/primitive.u8.html + /// [`&str`]: ../../std/primitive.str.html /// /// # Examples /// @@ -1403,35 +1477,35 @@ impl Clone for String { #[stable(feature = "rust1", since = "1.0.0")] impl FromIterator for String { - fn from_iter>(iterable: I) -> String { + fn from_iter>(iter: I) -> String { let mut buf = String::new(); - buf.extend(iterable); + buf.extend(iter); buf } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a> FromIterator<&'a str> for String { - fn from_iter>(iterable: I) -> String { + fn from_iter>(iter: I) -> String { let mut buf = String::new(); - buf.extend(iterable); + buf.extend(iter); buf } } #[stable(feature = "extend_string", since = "1.4.0")] impl FromIterator for String { - fn from_iter>(iterable: I) -> String { + fn from_iter>(iter: I) -> String { let mut buf = String::new(); - buf.extend(iterable); + buf.extend(iter); buf } } #[stable(feature = "rust1", since = "1.0.0")] impl Extend for String { - fn extend>(&mut self, iterable: I) { - let iterator = iterable.into_iter(); + fn extend>(&mut self, iter: I) { + let iterator = iter.into_iter(); let (lower_bound, _) = iterator.size_hint(); self.reserve(lower_bound); for ch in iterator { @@ -1442,15 +1516,15 @@ impl Extend for String { #[stable(feature = "extend_ref", since = "1.2.0")] impl<'a> Extend<&'a char> for String { - fn extend>(&mut self, iterable: I) { - self.extend(iterable.into_iter().cloned()); + fn extend>(&mut self, iter: I) { + self.extend(iter.into_iter().cloned()); } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a> Extend<&'a str> for String { - fn extend>(&mut self, iterable: I) { - for s in iterable { + fn extend>(&mut self, iter: I) { + for s in iter { self.push_str(s) } } @@ -1458,8 +1532,8 @@ impl<'a> Extend<&'a str> for String { #[stable(feature = "extend_string", since = "1.4.0")] impl Extend for String { - fn extend>(&mut self, iterable: I) { - for s in iterable { + fn extend>(&mut self, iter: I) { + for s in iter { self.push_str(&s) } } @@ -1528,6 +1602,7 @@ impl_eq! { Cow<'a, str>, String } #[stable(feature = "rust1", since = "1.0.0")] impl Default for String { + /// Creates an empty `String`. #[inline] fn default() -> String { String::new() @@ -1569,6 +1644,14 @@ impl<'a> Add<&'a str> for String { } } +#[stable(feature = "stringaddassign", since = "1.12.0")] +impl<'a> AddAssign<&'a str> for String { + #[inline] + fn add_assign(&mut self, other: &str) { + self.push_str(other); + } +} + #[stable(feature = "rust1", since = "1.0.0")] impl ops::Index> for String { type Output = str; @@ -1605,6 +1688,24 @@ impl ops::Index for String { unsafe { str::from_utf8_unchecked(&self.vec) } } } +#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +impl ops::Index> for String { + type Output = str; + + #[inline] + fn index(&self, index: ops::RangeInclusive) -> &str { + Index::index(&**self, index) + } +} +#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +impl ops::Index> for String { + type Output = str; + + #[inline] + fn index(&self, index: ops::RangeToInclusive) -> &str { + Index::index(&**self, index) + } +} #[stable(feature = "derefmut_for_string", since = "1.2.0")] impl ops::IndexMut> for String { @@ -1634,6 +1735,20 @@ impl ops::IndexMut for String { unsafe { mem::transmute(&mut *self.vec) } } } +#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +impl ops::IndexMut> for String { + #[inline] + fn index_mut(&mut self, index: ops::RangeInclusive) -> &mut str { + IndexMut::index_mut(&mut **self, index) + } +} +#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +impl ops::IndexMut> for String { + #[inline] + fn index_mut(&mut self, index: ops::RangeToInclusive) -> &mut str { + IndexMut::index_mut(&mut **self, index) + } +} #[stable(feature = "rust1", since = "1.0.0")] impl ops::Deref for String { @@ -1662,9 +1777,9 @@ impl ops::DerefMut for String { /// [`String`] without error, this type will never actually be returned. As /// such, it is only here to satisfy said signature, and is useless otherwise. /// -/// [`FromStr`]: ../str/trait.FromStr.html +/// [`FromStr`]: ../../std/str/trait.FromStr.html /// [`String`]: struct.String.html -/// [`from_str()`]: ../str/trait.FromStr.html#tymethod.from_str +/// [`from_str()`]: ../../std/str/trait.FromStr.html#tymethod.from_str #[stable(feature = "str_parse_error", since = "1.5.0")] #[derive(Copy)] pub enum ParseError {} @@ -1692,6 +1807,13 @@ impl fmt::Debug for ParseError { } } +#[stable(feature = "str_parse_error2", since = "1.8.0")] +impl fmt::Display for ParseError { + fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { + match *self {} + } +} + #[stable(feature = "str_parse_error", since = "1.5.0")] impl PartialEq for ParseError { fn eq(&self, _: &ParseError) -> bool { @@ -1709,7 +1831,7 @@ impl Eq for ParseError {} /// [`Display`] should be implemented instead, and you get the `ToString` /// implementation for free. /// -/// [`Display`]: ../fmt/trait.Display.html +/// [`Display`]: ../../std/fmt/trait.Display.html #[stable(feature = "rust1", since = "1.0.0")] pub trait ToString { /// Converts the given value to a `String`. @@ -1731,7 +1853,7 @@ pub trait ToString { #[stable(feature = "rust1", since = "1.0.0")] impl ToString for T { #[inline] - fn to_string(&self) -> String { + default fn to_string(&self) -> String { use core::fmt::Write; let mut buf = String::new(); let _ = buf.write_fmt(format_args!("{}", self)); @@ -1740,6 +1862,14 @@ impl ToString for T { } } +#[stable(feature = "str_to_string_specialization", since = "1.9.0")] +impl ToString for str { + #[inline] + fn to_string(&self) -> String { + String::from(self) + } +} + #[stable(feature = "rust1", since = "1.0.0")] impl AsRef for String { #[inline] @@ -1758,20 +1888,15 @@ impl AsRef<[u8]> for String { #[stable(feature = "rust1", since = "1.0.0")] impl<'a> From<&'a str> for String { - #[cfg(not(test))] - #[inline] fn from(s: &'a str) -> String { - String { vec: <[_]>::to_vec(s.as_bytes()) } + s.to_owned() } +} - // HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is - // required for this method definition, is not available. Since we don't - // require this method for testing purposes, I'll just stub it - // NB see the slice::hack module in slice.rs for more information - #[inline] - #[cfg(test)] - fn from(_: &str) -> String { - panic!("not available with cfg(test)"); +#[stable(feature = "string_from_cow_str", since = "1.14.0")] +impl<'a> From> for String { + fn from(s: Cow<'a, str>) -> String { + s.into_owned() } } @@ -1791,30 +1916,31 @@ impl<'a> From for Cow<'a, str> { } } -#[stable(feature = "rust1", since = "1.0.0")] -impl Into> for String { - fn into(self) -> Vec { - self.into_bytes() +#[stable(feature = "cow_str_from_iter", since = "1.12.0")] +impl<'a> FromIterator for Cow<'a, str> { + fn from_iter>(it: I) -> Cow<'a, str> { + Cow::Owned(FromIterator::from_iter(it)) } } -#[unstable(feature = "into_cow", reason = "may be replaced by `convert::Into`", - issue= "27735")] -#[allow(deprecated)] -impl IntoCow<'static, str> for String { - #[inline] - fn into_cow(self) -> Cow<'static, str> { - Cow::Owned(self) +#[stable(feature = "cow_str_from_iter", since = "1.12.0")] +impl<'a, 'b> FromIterator<&'b str> for Cow<'a, str> { + fn from_iter>(it: I) -> Cow<'a, str> { + Cow::Owned(FromIterator::from_iter(it)) } } -#[unstable(feature = "into_cow", reason = "may be replaced by `convert::Into`", - issue = "27735")] -#[allow(deprecated)] -impl<'a> IntoCow<'a, str> for &'a str { - #[inline] - fn into_cow(self) -> Cow<'a, str> { - Cow::Borrowed(self) +#[stable(feature = "cow_str_from_iter", since = "1.12.0")] +impl<'a> FromIterator for Cow<'a, str> { + fn from_iter>(it: I) -> Cow<'a, str> { + Cow::Owned(FromIterator::from_iter(it)) + } +} + +#[stable(feature = "from_string_for_vec_u8", since = "1.14.0")] +impl From for Vec { + fn from(string : String) -> Vec { + string.into_bytes() } } @@ -1834,6 +1960,12 @@ impl fmt::Write for String { } /// A draining iterator for `String`. +/// +/// This struct is created by the [`drain()`] method on [`String`]. See its +/// documentation for more. +/// +/// [`drain()`]: struct.String.html#method.drain +/// [`String`]: struct.String.html #[stable(feature = "drain", since = "1.6.0")] pub struct Drain<'a> { /// Will be used as &'a mut String in the destructor @@ -1886,3 +2018,6 @@ impl<'a> DoubleEndedIterator for Drain<'a> { self.iter.next_back() } } + +#[unstable(feature = "fused", issue = "35602")] +impl<'a> FusedIterator for Drain<'a> {} diff --git a/src/libcollections/vec.rs b/src/libcollections/vec.rs index a49b7304643cc..f26324127003b 100644 --- a/src/libcollections/vec.rs +++ b/src/libcollections/vec.rs @@ -8,21 +8,21 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! A growable list type with heap-allocated contents, written `Vec` but -//! pronounced 'vector.' +//! A contiguous growable array type with heap-allocated contents, written +//! `Vec` but pronounced 'vector.' //! //! Vectors have `O(1)` indexing, amortized `O(1)` push (to the end) and //! `O(1)` pop (from the end). //! //! # Examples //! -//! You can explicitly create a `Vec` with `new()`: +//! You can explicitly create a [`Vec`] with [`new()`]: //! //! ``` //! let v: Vec = Vec::new(); //! ``` //! -//! ...or by using the `vec!` macro: +//! ...or by using the [`vec!`] macro: //! //! ``` //! let v: Vec = vec![]; @@ -32,7 +32,7 @@ //! let v = vec![0; 10]; // ten zeroes //! ``` //! -//! You can `push` values onto the end of a vector (which will grow the vector +//! You can [`push`] values onto the end of a vector (which will grow the vector //! as needed): //! //! ``` @@ -49,36 +49,43 @@ //! let two = v.pop(); //! ``` //! -//! Vectors also support indexing (through the `Index` and `IndexMut` traits): +//! Vectors also support indexing (through the [`Index`] and [`IndexMut`] traits): //! //! ``` //! let mut v = vec![1, 2, 3]; //! let three = v[2]; //! v[1] = v[1] + 5; //! ``` +//! +//! [`Vec`]: ../../std/vec/struct.Vec.html +//! [`new()`]: ../../std/vec/struct.Vec.html#method.new +//! [`push`]: ../../std/vec/struct.Vec.html#method.push +//! [`Index`]: ../../std/ops/trait.Index.html +//! [`IndexMut`]: ../../std/ops/trait.IndexMut.html +//! [`vec!`]: ../../std/macro.vec.html #![stable(feature = "rust1", since = "1.0.0")] -use alloc::raw_vec::RawVec; use alloc::boxed::Box; use alloc::heap::EMPTY; +use alloc::raw_vec::RawVec; +use borrow::ToOwned; +use borrow::Cow; use core::cmp::Ordering; use core::fmt; use core::hash::{self, Hash}; -use core::intrinsics::{arith_offset, assume, needs_drop}; -use core::iter::FromIterator; +use core::intrinsics::{arith_offset, assume}; +use core::iter::{FromIterator, FusedIterator, TrustedLen}; use core::mem; use core::ops::{Index, IndexMut}; use core::ops; use core::ptr; +use core::ptr::Shared; use core::slice; -#[allow(deprecated)] -use borrow::{Cow, IntoCow}; - use super::range::RangeArgument; -/// A growable list type, written `Vec` but pronounced 'vector.' +/// A contiguous growable array type, written `Vec` but pronounced 'vector'. /// /// # Examples /// @@ -104,7 +111,7 @@ use super::range::RangeArgument; /// assert_eq!(vec, [7, 1, 2, 3]); /// ``` /// -/// The `vec!` macro is provided to make initialization more convenient: +/// The [`vec!`] macro is provided to make initialization more convenient: /// /// ``` /// let mut vec = vec![1, 2, 3]; @@ -134,6 +141,49 @@ use super::range::RangeArgument; /// } /// ``` /// +/// # Indexing +/// +/// The `Vec` type allows to access values by index, because it implements the +/// [`Index`] trait. An example will be more explicit: +/// +/// ``` +/// let v = vec![0, 2, 4, 6]; +/// println!("{}", v[1]); // it will display '2' +/// ``` +/// +/// However be careful: if you try to access an index which isn't in the `Vec`, +/// your software will panic! You cannot do this: +/// +/// ```ignore +/// let v = vec![0, 2, 4, 6]; +/// println!("{}", v[6]); // it will panic! +/// ``` +/// +/// In conclusion: always check if the index you want to get really exists +/// before doing it. +/// +/// # Slicing +/// +/// A `Vec` can be mutable. Slices, on the other hand, are read-only objects. +/// To get a slice, use `&`. Example: +/// +/// ``` +/// fn read_slice(slice: &[usize]) { +/// // ... +/// } +/// +/// let v = vec![0, 1]; +/// read_slice(&v); +/// +/// // ... and that's all! +/// // you can also do it like this: +/// let x : &[usize] = &v; +/// ``` +/// +/// In Rust, it's more common to pass slices as arguments rather than vectors +/// when you just want to provide a read access. The same goes for [`String`] and +/// [`&str`]. +/// /// # Capacity and reallocation /// /// The capacity of a vector is the amount of space allocated for any future @@ -147,84 +197,100 @@ use super::range::RangeArgument; /// with space for 10 more elements. Pushing 10 or fewer elements onto the /// vector will not change its capacity or cause reallocation to occur. However, /// if the vector's length is increased to 11, it will have to reallocate, which -/// can be slow. For this reason, it is recommended to use `Vec::with_capacity` +/// can be slow. For this reason, it is recommended to use [`Vec::with_capacity`] /// whenever possible to specify how big the vector is expected to get. /// /// # Guarantees /// -/// Due to its incredibly fundamental nature, Vec makes a lot of guarantees +/// Due to its incredibly fundamental nature, `Vec` makes a lot of guarantees /// about its design. This ensures that it's as low-overhead as possible in /// the general case, and can be correctly manipulated in primitive ways /// by unsafe code. Note that these guarantees refer to an unqualified `Vec`. /// If additional type parameters are added (e.g. to support custom allocators), /// overriding their defaults may change the behavior. /// -/// Most fundamentally, Vec is and always will be a (pointer, capacity, length) +/// Most fundamentally, `Vec` is and always will be a (pointer, capacity, length) /// triplet. No more, no less. The order of these fields is completely /// unspecified, and you should use the appropriate methods to modify these. /// The pointer will never be null, so this type is null-pointer-optimized. /// /// However, the pointer may not actually point to allocated memory. In particular, -/// if you construct a Vec with capacity 0 via `Vec::new()`, `vec![]`, -/// `Vec::with_capacity(0)`, or by calling `shrink_to_fit()` on an empty Vec, it -/// will not allocate memory. Similarly, if you store zero-sized types inside -/// a Vec, it will not allocate space for them. *Note that in this case the -/// Vec may not report a `capacity()` of 0*. Vec will allocate if and only -/// if `mem::size_of::() * capacity() > 0`. In general, Vec's allocation +/// if you construct a `Vec` with capacity 0 via [`Vec::new()`], [`vec![]`][`vec!`], +/// [`Vec::with_capacity(0)`][`Vec::with_capacity`], or by calling [`shrink_to_fit()`] +/// on an empty Vec, it will not allocate memory. Similarly, if you store zero-sized +/// types inside a `Vec`, it will not allocate space for them. *Note that in this case +/// the `Vec` may not report a [`capacity()`] of 0*. `Vec` will allocate if and only +/// if [`mem::size_of::()`]` * capacity() > 0`. In general, `Vec`'s allocation /// details are subtle enough that it is strongly recommended that you only -/// free memory allocated by a Vec by creating a new Vec and dropping it. +/// free memory allocated by a `Vec` by creating a new `Vec` and dropping it. /// -/// If a Vec *has* allocated memory, then the memory it points to is on the heap +/// If a `Vec` *has* allocated memory, then the memory it points to is on the heap /// (as defined by the allocator Rust is configured to use by default), and its -/// pointer points to `len()` initialized elements in order (what you would see -/// if you coerced it to a slice), followed by `capacity() - len()` logically -/// uninitialized elements. +/// pointer points to [`len()`] initialized elements in order (what you would see +/// if you coerced it to a slice), followed by [`capacity()`]` - `[`len()`] +/// logically uninitialized elements. /// -/// Vec will never perform a "small optimization" where elements are actually +/// `Vec` will never perform a "small optimization" where elements are actually /// stored on the stack for two reasons: /// /// * It would make it more difficult for unsafe code to correctly manipulate -/// a Vec. The contents of a Vec wouldn't have a stable address if it were -/// only moved, and it would be more difficult to determine if a Vec had +/// a `Vec`. The contents of a `Vec` wouldn't have a stable address if it were +/// only moved, and it would be more difficult to determine if a `Vec` had /// actually allocated memory. /// /// * It would penalize the general case, incurring an additional branch /// on every access. /// -/// Vec will never automatically shrink itself, even if completely empty. This -/// ensures no unnecessary allocations or deallocations occur. Emptying a Vec -/// and then filling it back up to the same `len()` should incur no calls to -/// the allocator. If you wish to free up unused memory, use `shrink_to_fit`. +/// `Vec` will never automatically shrink itself, even if completely empty. This +/// ensures no unnecessary allocations or deallocations occur. Emptying a `Vec` +/// and then filling it back up to the same [`len()`] should incur no calls to +/// the allocator. If you wish to free up unused memory, use +/// [`shrink_to_fit`][`shrink_to_fit()`]. /// -/// `push` and `insert` will never (re)allocate if the reported capacity is -/// sufficient. `push` and `insert` *will* (re)allocate if `len() == capacity()`. -/// That is, the reported capacity is completely accurate, and can be relied on. -/// It can even be used to manually free the memory allocated by a Vec if -/// desired. Bulk insertion methods *may* reallocate, even when not necessary. +/// [`push`] and [`insert`] will never (re)allocate if the reported capacity is +/// sufficient. [`push`] and [`insert`] *will* (re)allocate if +/// [`len()`]` == `[`capacity()`]. That is, the reported capacity is completely +/// accurate, and can be relied on. It can even be used to manually free the memory +/// allocated by a `Vec` if desired. Bulk insertion methods *may* reallocate, even +/// when not necessary. /// -/// Vec does not guarantee any particular growth strategy when reallocating -/// when full, nor when `reserve` is called. The current strategy is basic +/// `Vec` does not guarantee any particular growth strategy when reallocating +/// when full, nor when [`reserve`] is called. The current strategy is basic /// and it may prove desirable to use a non-constant growth factor. Whatever -/// strategy is used will of course guarantee `O(1)` amortized `push`. +/// strategy is used will of course guarantee `O(1)` amortized [`push`]. /// -/// `vec![x; n]`, `vec![a, b, c, d]`, and `Vec::with_capacity(n)`, will all -/// produce a Vec with exactly the requested capacity. If `len() == capacity()`, -/// (as is the case for the `vec!` macro), then a `Vec` can be converted -/// to and from a `Box<[T]>` without reallocating or moving the elements. +/// `vec![x; n]`, `vec![a, b, c, d]`, and +/// [`Vec::with_capacity(n)`][`Vec::with_capacity`], will all produce a `Vec` +/// with exactly the requested capacity. If [`len()`]` == `[`capacity()`], +/// (as is the case for the [`vec!`] macro), then a `Vec` can be converted to +/// and from a [`Box<[T]>`][owned slice] without reallocating or moving the elements. /// -/// Vec will not specifically overwrite any data that is removed from it, +/// `Vec` will not specifically overwrite any data that is removed from it, /// but also won't specifically preserve it. Its uninitialized memory is /// scratch space that it may use however it wants. It will generally just do /// whatever is most efficient or otherwise easy to implement. Do not rely on -/// removed data to be erased for security purposes. Even if you drop a Vec, its -/// buffer may simply be reused by another Vec. Even if you zero a Vec's memory +/// removed data to be erased for security purposes. Even if you drop a `Vec`, its +/// buffer may simply be reused by another `Vec`. Even if you zero a `Vec`'s memory /// first, that may not actually happen because the optimizer does not consider /// this a side-effect that must be preserved. /// -/// Vec does not currently guarantee the order in which elements are dropped +/// `Vec` does not currently guarantee the order in which elements are dropped /// (the order has changed in the past, and may change again). /// -#[unsafe_no_drop_flag] +/// [`vec!`]: ../../std/macro.vec.html +/// [`Index`]: ../../std/ops/trait.Index.html +/// [`String`]: ../../std/string/struct.String.html +/// [`&str`]: ../../std/primitive.str.html +/// [`Vec::with_capacity`]: ../../std/vec/struct.Vec.html#method.with_capacity +/// [`Vec::new()`]: ../../std/vec/struct.Vec.html#method.new +/// [`shrink_to_fit()`]: ../../std/vec/struct.Vec.html#method.shrink_to_fit +/// [`capacity()`]: ../../std/vec/struct.Vec.html#method.capacity +/// [`mem::size_of::()`]: ../../std/mem/fn.size_of.html +/// [`len()`]: ../../std/vec/struct.Vec.html#method.len +/// [`push`]: ../../std/vec/struct.Vec.html#method.push +/// [`insert`]: ../../std/vec/struct.Vec.html#method.insert +/// [`reserve`]: ../../std/vec/struct.Vec.html#method.reserve +/// [owned slice]: ../../std/boxed/struct.Box.html #[stable(feature = "rust1", since = "1.0.0")] pub struct Vec { buf: RawVec, @@ -261,9 +327,10 @@ impl Vec { /// reallocating. If `capacity` is 0, the vector will not allocate. /// /// It is important to note that this function does not specify the *length* - /// of the returned vector, but only the *capacity*. (For an explanation of - /// the difference between length and capacity, see the main `Vec` docs - /// above, 'Capacity and reallocation'.) + /// of the returned vector, but only the *capacity*. For an explanation of + /// the difference between length and capacity, see *[Capacity and reallocation]*. + /// + /// [Capacity and reallocation]: #capacity-and-reallocation /// /// # Examples /// @@ -297,14 +364,22 @@ impl Vec { /// This is highly unsafe, due to the number of invariants that aren't /// checked: /// - /// * `ptr` needs to have been previously allocated via `String`/`Vec` + /// * `ptr` needs to have been previously allocated via [`String`]/`Vec` /// (at least, it's highly likely to be incorrect if it wasn't). - /// * `length` needs to be the length that less than or equal to `capacity`. + /// * `length` needs to be less than or equal to `capacity`. /// * `capacity` needs to be the capacity that the pointer was allocated with. /// /// Violating these may cause problems like corrupting the allocator's /// internal datastructures. /// + /// The ownership of `ptr` is effectively transferred to the + /// `Vec` which may then deallocate, reallocate or change the + /// contents of memory pointed to by the pointer at will. Ensure + /// that nothing else uses the pointer after calling this + /// function. + /// + /// [`String`]: ../../std/string/struct.String.html + /// /// # Examples /// /// ``` @@ -421,11 +496,34 @@ impl Vec { self.buf.shrink_to_fit(self.len); } - /// Converts the vector into Box<[T]>. + /// Converts the vector into [`Box<[T]>`][owned slice]. /// /// Note that this will drop any excess capacity. Calling this and - /// converting back to a vector with `into_vec()` is equivalent to calling - /// `shrink_to_fit()`. + /// converting back to a vector with [`into_vec()`] is equivalent to calling + /// [`shrink_to_fit()`]. + /// + /// [owned slice]: ../../std/boxed/struct.Box.html + /// [`into_vec()`]: ../../std/primitive.slice.html#method.into_vec + /// [`shrink_to_fit()`]: #method.shrink_to_fit + /// + /// # Examples + /// + /// ``` + /// let v = vec![1, 2, 3]; + /// + /// let slice = v.into_boxed_slice(); + /// ``` + /// + /// Any excess capacity is removed: + /// + /// ``` + /// let mut vec = Vec::with_capacity(10); + /// vec.extend([1, 2, 3].iter().cloned()); + /// + /// assert_eq!(vec.capacity(), 10); + /// let slice = vec.into_boxed_slice(); + /// assert_eq!(slice.into_vec().capacity(), 3); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn into_boxed_slice(mut self) -> Box<[T]> { unsafe { @@ -436,27 +534,55 @@ impl Vec { } } - /// Shorten a vector to be `len` elements long, dropping excess elements. + /// Shortens the vector, keeping the first `len` elements and dropping + /// the rest. /// /// If `len` is greater than the vector's current length, this has no /// effect. /// + /// The [`drain`] method can emulate `truncate`, but causes the excess + /// elements to be returned instead of dropped. + /// /// # Examples /// + /// Truncating a five element vector to two elements: + /// /// ``` /// let mut vec = vec![1, 2, 3, 4, 5]; /// vec.truncate(2); /// assert_eq!(vec, [1, 2]); /// ``` + /// + /// No truncation occurs when `len` is greater than the vector's current + /// length: + /// + /// ``` + /// let mut vec = vec![1, 2, 3]; + /// vec.truncate(8); + /// assert_eq!(vec, [1, 2, 3]); + /// ``` + /// + /// Truncating when `len == 0` is equivalent to calling the [`clear`] + /// method. + /// + /// ``` + /// let mut vec = vec![1, 2, 3]; + /// vec.truncate(0); + /// assert_eq!(vec, []); + /// ``` + /// + /// [`clear`]: #method.clear + /// [`drain`]: #method.drain #[stable(feature = "rust1", since = "1.0.0")] pub fn truncate(&mut self, len: usize) { unsafe { // drop any extra elements while len < self.len { - // decrement len before the read(), so a panic on Drop doesn't - // re-drop the just-failed value. + // decrement len before the drop_in_place(), so a panic on Drop + // doesn't re-drop the just-failed value. self.len -= 1; - ptr::read(self.get_unchecked(self.len)); + let len = self.len; + ptr::drop_in_place(self.get_unchecked_mut(len)); } } } @@ -464,6 +590,14 @@ impl Vec { /// Extracts a slice containing the entire vector. /// /// Equivalent to `&s[..]`. + /// + /// # Examples + /// + /// ``` + /// use std::io::{self, Write}; + /// let buffer = vec![1, 2, 3, 5, 8]; + /// io::sink().write(buffer.as_slice()).unwrap(); + /// ``` #[inline] #[stable(feature = "vec_as_slice", since = "1.7.0")] pub fn as_slice(&self) -> &[T] { @@ -473,10 +607,18 @@ impl Vec { /// Extracts a mutable slice of the entire vector. /// /// Equivalent to `&mut s[..]`. + /// + /// # Examples + /// + /// ``` + /// use std::io::{self, Read}; + /// let mut buffer = vec![0; 3]; + /// io::repeat(0b101).read_exact(buffer.as_mut_slice()).unwrap(); + /// ``` #[inline] #[stable(feature = "vec_as_slice", since = "1.7.0")] pub fn as_mut_slice(&mut self) -> &mut [T] { - &mut self[..] + self } /// Sets the length of a vector. @@ -488,9 +630,38 @@ impl Vec { /// # Examples /// /// ``` - /// let mut v = vec![1, 2, 3, 4]; + /// use std::ptr; + /// + /// let mut vec = vec!['r', 'u', 's', 't']; + /// /// unsafe { - /// v.set_len(1); + /// ptr::drop_in_place(&mut vec[3]); + /// vec.set_len(3); + /// } + /// assert_eq!(vec, ['r', 'u', 's']); + /// ``` + /// + /// In this example, there is a memory leak since the memory locations + /// owned by the inner vectors were not freed prior to the `set_len` call: + /// + /// ``` + /// let mut vec = vec![vec![1, 0, 0], + /// vec![0, 1, 0], + /// vec![0, 0, 1]]; + /// unsafe { + /// vec.set_len(0); + /// } + /// ``` + /// + /// In this example, the vector gets expanded from zero to four items + /// without any memory allocations occurring, resulting in vector + /// values of unallocated memory: + /// + /// ``` + /// let mut vec: Vec = Vec::new(); + /// + /// unsafe { + /// vec.set_len(4); /// } /// ``` #[inline] @@ -528,11 +699,11 @@ impl Vec { } /// Inserts an element at position `index` within the vector, shifting all - /// elements after position `i` one position to the right. + /// elements after it to the right. /// /// # Panics /// - /// Panics if `index` is greater than the vector's length. + /// Panics if `index` is out of bounds. /// /// # Examples /// @@ -570,7 +741,7 @@ impl Vec { } /// Removes and returns the element at position `index` within the vector, - /// shifting all elements after position `index` one position to the left. + /// shifting all elements after it to the left. /// /// # Panics /// @@ -607,7 +778,7 @@ impl Vec { /// Retains only the elements specified by the predicate. /// - /// In other words, remove all elements `e` such that `f(&e)` returns false. + /// In other words, remove all elements `e` such that `f(&e)` returns `false`. /// This method operates in place and preserves the order of the retained /// elements. /// @@ -640,6 +811,130 @@ impl Vec { } } + /// Removes consecutive elements in the vector that resolve to the same key. + /// + /// If the vector is sorted, this removes all duplicates. + /// + /// # Examples + /// + /// ``` + /// #![feature(dedup_by)] + /// + /// let mut vec = vec![10, 20, 21, 30, 20]; + /// + /// vec.dedup_by_key(|i| *i / 10); + /// + /// assert_eq!(vec, [10, 20, 30, 20]); + /// ``` + #[unstable(feature = "dedup_by", reason = "recently added", issue = "37087")] + #[inline] + pub fn dedup_by_key(&mut self, mut key: F) where F: FnMut(&mut T) -> K, K: PartialEq { + self.dedup_by(|a, b| key(a) == key(b)) + } + + /// Removes consecutive elements in the vector that resolve to the same key. + /// + /// If the vector is sorted, this removes all duplicates. + /// + /// # Examples + /// + /// ``` + /// #![feature(dedup_by)] + /// use std::ascii::AsciiExt; + /// + /// let mut vec = vec!["foo", "bar", "Bar", "baz", "bar"]; + /// + /// vec.dedup_by(|a, b| a.eq_ignore_ascii_case(b)); + /// + /// assert_eq!(vec, ["foo", "bar", "baz", "bar"]); + /// ``` + #[unstable(feature = "dedup_by", reason = "recently added", issue = "37087")] + pub fn dedup_by(&mut self, mut same_bucket: F) where F: FnMut(&mut T, &mut T) -> bool { + unsafe { + // Although we have a mutable reference to `self`, we cannot make + // *arbitrary* changes. The `same_bucket` calls could panic, so we + // must ensure that the vector is in a valid state at all time. + // + // The way that we handle this is by using swaps; we iterate + // over all the elements, swapping as we go so that at the end + // the elements we wish to keep are in the front, and those we + // wish to reject are at the back. We can then truncate the + // vector. This operation is still O(n). + // + // Example: We start in this state, where `r` represents "next + // read" and `w` represents "next_write`. + // + // r + // +---+---+---+---+---+---+ + // | 0 | 1 | 1 | 2 | 3 | 3 | + // +---+---+---+---+---+---+ + // w + // + // Comparing self[r] against self[w-1], this is not a duplicate, so + // we swap self[r] and self[w] (no effect as r==w) and then increment both + // r and w, leaving us with: + // + // r + // +---+---+---+---+---+---+ + // | 0 | 1 | 1 | 2 | 3 | 3 | + // +---+---+---+---+---+---+ + // w + // + // Comparing self[r] against self[w-1], this value is a duplicate, + // so we increment `r` but leave everything else unchanged: + // + // r + // +---+---+---+---+---+---+ + // | 0 | 1 | 1 | 2 | 3 | 3 | + // +---+---+---+---+---+---+ + // w + // + // Comparing self[r] against self[w-1], this is not a duplicate, + // so swap self[r] and self[w] and advance r and w: + // + // r + // +---+---+---+---+---+---+ + // | 0 | 1 | 2 | 1 | 3 | 3 | + // +---+---+---+---+---+---+ + // w + // + // Not a duplicate, repeat: + // + // r + // +---+---+---+---+---+---+ + // | 0 | 1 | 2 | 3 | 1 | 3 | + // +---+---+---+---+---+---+ + // w + // + // Duplicate, advance r. End of vec. Truncate to w. + + let ln = self.len(); + if ln <= 1 { + return; + } + + // Avoid bounds checks by using raw pointers. + let p = self.as_mut_ptr(); + let mut r: usize = 1; + let mut w: usize = 1; + + while r < ln { + let p_r = p.offset(r as isize); + let p_wm1 = p.offset((w - 1) as isize); + if !same_bucket(&mut *p_r, &mut *p_wm1) { + if r != w { + let p_w = p_wm1.offset(1); + mem::swap(&mut *p_r, &mut *p_w); + } + w += 1; + } + r += 1; + } + + self.truncate(w); + } + } + /// Appends an element to the back of a collection. /// /// # Panics @@ -668,9 +963,11 @@ impl Vec { } } - /// Removes the last element from a vector and returns it, or `None` if it + /// Removes the last element from a vector and returns it, or [`None`] if it /// is empty. /// + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// /// # Examples /// /// ``` @@ -777,8 +1074,8 @@ impl Vec { Drain { tail_start: end, tail_len: len - end, - iter: range_slice.iter_mut(), - vec: self as *mut _, + iter: range_slice.iter(), + vec: Shared::new(self as *mut _), } } } @@ -904,36 +1201,31 @@ impl Vec { self.reserve(n); unsafe { - let len = self.len(); - let mut ptr = self.as_mut_ptr().offset(len as isize); + let mut ptr = self.as_mut_ptr().offset(self.len() as isize); + // Use SetLenOnDrop to work around bug where compiler + // may not realize the store through `ptr` trough self.set_len() + // don't alias. + let mut local_len = SetLenOnDrop::new(&mut self.len); + // Write all elements except the last one - for i in 1..n { + for _ in 1..n { ptr::write(ptr, value.clone()); ptr = ptr.offset(1); // Increment the length in every step in case clone() panics - self.set_len(len + i); + local_len.increment_len(1); } if n > 0 { // We can write the last element directly without cloning needlessly ptr::write(ptr, value); - self.set_len(len + n); + local_len.increment_len(1); } - } - } - #[allow(missing_docs)] - #[inline] - #[unstable(feature = "vec_push_all", - reason = "likely to be replaced by a more optimized extend", - issue = "27744")] - #[rustc_deprecated(reason = "renamed to extend_from_slice", - since = "1.6.0")] - pub fn push_all(&mut self, other: &[T]) { - self.extend_from_slice(other) + // len set by scope guard + } } - /// Appends all elements in a slice to the `Vec`. + /// Clones and appends all elements in a slice to the `Vec`. /// /// Iterates over the slice `other`, clones each element, and then appends /// it to this `Vec`. The `other` vector is traversed in-order. @@ -952,19 +1244,36 @@ impl Vec { /// ``` #[stable(feature = "vec_extend_from_slice", since = "1.6.0")] pub fn extend_from_slice(&mut self, other: &[T]) { - self.reserve(other.len()); + self.extend(other.iter().cloned()) + } +} - for i in 0..other.len() { - let len = self.len(); +// Set the length of the vec when the `SetLenOnDrop` value goes out of scope. +// +// The idea is: The length field in SetLenOnDrop is a local variable +// that the optimizer will see does not alias with any stores through the Vec's data +// pointer. This is a workaround for alias analysis issue #32155 +struct SetLenOnDrop<'a> { + len: &'a mut usize, + local_len: usize, +} - // Unsafe code so this can be optimised to a memcpy (or something - // similarly fast) when T is Copy. LLVM is easily confused, so any - // extra operations during the loop can prevent this optimisation. - unsafe { - ptr::write(self.get_unchecked_mut(len), other.get_unchecked(i).clone()); - self.set_len(len + 1); - } - } +impl<'a> SetLenOnDrop<'a> { + #[inline] + fn new(len: &'a mut usize) -> Self { + SetLenOnDrop { local_len: *len, len: len } + } + + #[inline] + fn increment_len(&mut self, increment: usize) { + self.local_len += increment; + } +} + +impl<'a> Drop for SetLenOnDrop<'a> { + #[inline] + fn drop(&mut self) { + *self.len = self.local_len; } } @@ -983,90 +1292,9 @@ impl Vec { /// assert_eq!(vec, [1, 2, 3, 2]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] + #[inline] pub fn dedup(&mut self) { - unsafe { - // Although we have a mutable reference to `self`, we cannot make - // *arbitrary* changes. The `PartialEq` comparisons could panic, so we - // must ensure that the vector is in a valid state at all time. - // - // The way that we handle this is by using swaps; we iterate - // over all the elements, swapping as we go so that at the end - // the elements we wish to keep are in the front, and those we - // wish to reject are at the back. We can then truncate the - // vector. This operation is still O(n). - // - // Example: We start in this state, where `r` represents "next - // read" and `w` represents "next_write`. - // - // r - // +---+---+---+---+---+---+ - // | 0 | 1 | 1 | 2 | 3 | 3 | - // +---+---+---+---+---+---+ - // w - // - // Comparing self[r] against self[w-1], this is not a duplicate, so - // we swap self[r] and self[w] (no effect as r==w) and then increment both - // r and w, leaving us with: - // - // r - // +---+---+---+---+---+---+ - // | 0 | 1 | 1 | 2 | 3 | 3 | - // +---+---+---+---+---+---+ - // w - // - // Comparing self[r] against self[w-1], this value is a duplicate, - // so we increment `r` but leave everything else unchanged: - // - // r - // +---+---+---+---+---+---+ - // | 0 | 1 | 1 | 2 | 3 | 3 | - // +---+---+---+---+---+---+ - // w - // - // Comparing self[r] against self[w-1], this is not a duplicate, - // so swap self[r] and self[w] and advance r and w: - // - // r - // +---+---+---+---+---+---+ - // | 0 | 1 | 2 | 1 | 3 | 3 | - // +---+---+---+---+---+---+ - // w - // - // Not a duplicate, repeat: - // - // r - // +---+---+---+---+---+---+ - // | 0 | 1 | 2 | 3 | 1 | 3 | - // +---+---+---+---+---+---+ - // w - // - // Duplicate, advance r. End of vec. Truncate to w. - - let ln = self.len(); - if ln <= 1 { - return; - } - - // Avoid bounds checks by using raw pointers. - let p = self.as_mut_ptr(); - let mut r: usize = 1; - let mut w: usize = 1; - - while r < ln { - let p_r = p.offset(r as isize); - let p_wm1 = p.offset((w - 1) as isize); - if *p_r != *p_wm1 { - if r != w { - let p_w = p_wm1.offset(1); - mem::swap(&mut *p_r, &mut *p_w); - } - w += 1; - } - r += 1; - } - - self.truncate(w); - } + self.dedup_by(|a, b| a == b) } } @@ -1181,6 +1409,24 @@ impl ops::Index for Vec { self } } +#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +impl ops::Index> for Vec { + type Output = [T]; + + #[inline] + fn index(&self, index: ops::RangeInclusive) -> &[T] { + Index::index(&**self, index) + } +} +#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +impl ops::Index> for Vec { + type Output = [T]; + + #[inline] + fn index(&self, index: ops::RangeToInclusive) -> &[T] { + Index::index(&**self, index) + } +} #[stable(feature = "rust1", since = "1.0.0")] impl ops::IndexMut> for Vec { @@ -1210,6 +1456,20 @@ impl ops::IndexMut for Vec { self } } +#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +impl ops::IndexMut> for Vec { + #[inline] + fn index_mut(&mut self, index: ops::RangeInclusive) -> &mut [T] { + IndexMut::index_mut(&mut **self, index) + } +} +#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +impl ops::IndexMut> for Vec { + #[inline] + fn index_mut(&mut self, index: ops::RangeToInclusive) -> &mut [T] { + IndexMut::index_mut(&mut **self, index) + } +} #[stable(feature = "rust1", since = "1.0.0")] impl ops::Deref for Vec { @@ -1238,27 +1498,8 @@ impl ops::DerefMut for Vec { #[stable(feature = "rust1", since = "1.0.0")] impl FromIterator for Vec { #[inline] - fn from_iter>(iterable: I) -> Vec { - // Unroll the first iteration, as the vector is going to be - // expanded on this iteration in every case when the iterable is not - // empty, but the loop in extend_desugared() is not going to see the - // vector being full in the few subsequent loop iterations. - // So we get better branch prediction. - let mut iterator = iterable.into_iter(); - let mut vector = match iterator.next() { - None => return Vec::new(), - Some(element) => { - let (lower, _) = iterator.size_hint(); - let mut vector = Vec::with_capacity(lower.saturating_add(1)); - unsafe { - ptr::write(vector.get_unchecked_mut(0), element); - vector.set_len(1); - } - vector - } - }; - vector.extend_desugared(iterator); - vector + fn from_iter>(iter: I) -> Vec { + >::from_iter(iter.into_iter()) } } @@ -1283,18 +1524,18 @@ impl IntoIterator for Vec { #[inline] fn into_iter(mut self) -> IntoIter { unsafe { - let ptr = self.as_mut_ptr(); - assume(!ptr.is_null()); - let begin = ptr as *const T; + let begin = self.as_mut_ptr(); + assume(!begin.is_null()); let end = if mem::size_of::() == 0 { - arith_offset(ptr as *const i8, self.len() as isize) as *const T + arith_offset(begin as *const i8, self.len() as isize) as *const T } else { - ptr.offset(self.len() as isize) as *const T + begin.offset(self.len() as isize) as *const T }; - let buf = ptr::read(&self.buf); + let cap = self.buf.cap(); mem::forget(self); IntoIter { - _buf: buf, + buf: Shared::new(begin), + cap: cap, ptr: begin, end: end, } @@ -1325,13 +1566,86 @@ impl<'a, T> IntoIterator for &'a mut Vec { #[stable(feature = "rust1", since = "1.0.0")] impl Extend for Vec { #[inline] - fn extend>(&mut self, iterable: I) { - self.extend_desugared(iterable.into_iter()) + fn extend>(&mut self, iter: I) { + self.spec_extend(iter.into_iter()) + } +} + +// Specialization trait used for Vec::from_iter and Vec::extend +trait SpecExtend { + fn from_iter(iter: I) -> Self; + fn spec_extend(&mut self, iter: I); +} + +impl SpecExtend for Vec + where I: Iterator, +{ + default fn from_iter(mut iterator: I) -> Self { + // Unroll the first iteration, as the vector is going to be + // expanded on this iteration in every case when the iterable is not + // empty, but the loop in extend_desugared() is not going to see the + // vector being full in the few subsequent loop iterations. + // So we get better branch prediction. + let mut vector = match iterator.next() { + None => return Vec::new(), + Some(element) => { + let (lower, _) = iterator.size_hint(); + let mut vector = Vec::with_capacity(lower.saturating_add(1)); + unsafe { + ptr::write(vector.get_unchecked_mut(0), element); + vector.set_len(1); + } + vector + } + }; + vector.spec_extend(iterator); + vector + } + + default fn spec_extend(&mut self, iter: I) { + self.extend_desugared(iter) + } +} + +impl SpecExtend for Vec + where I: TrustedLen, +{ + fn from_iter(iterator: I) -> Self { + let mut vector = Vec::new(); + vector.spec_extend(iterator); + vector + } + + fn spec_extend(&mut self, iterator: I) { + // This is the case for a TrustedLen iterator. + let (low, high) = iterator.size_hint(); + if let Some(high_value) = high { + debug_assert_eq!(low, high_value, + "TrustedLen iterator's size hint is not exact: {:?}", + (low, high)); + } + if let Some(additional) = high { + self.reserve(additional); + unsafe { + let mut ptr = self.as_mut_ptr().offset(self.len() as isize); + let mut local_len = SetLenOnDrop::new(&mut self.len); + for element in iterator { + ptr::write(ptr, element); + ptr = ptr.offset(1); + // NB can't overflow since we would have had to alloc the address space + local_len.increment_len(1); + } + } + } else { + self.extend_desugared(iterator) + } } } impl Vec { fn extend_desugared>(&mut self, mut iterator: I) { + // This is the case for a general iterator. + // // This function should be the moral equivalent of: // // for item in iterator { @@ -1355,7 +1669,7 @@ impl Vec { #[stable(feature = "extend_ref", since = "1.2.0")] impl<'a, T: 'a + Copy> Extend<&'a T> for Vec { fn extend>(&mut self, iter: I) { - self.extend(iter.into_iter().cloned()); + self.extend(iter.into_iter().map(|&x| x)) } } @@ -1425,16 +1739,9 @@ impl Ord for Vec { impl Drop for Vec { #[unsafe_destructor_blind_to_params] fn drop(&mut self) { - if self.buf.unsafe_no_drop_flag_needs_drop() { - unsafe { - // The branch on needs_drop() is an -O1 performance optimization. - // Without the branch, dropping Vec takes linear time. - if needs_drop::() { - for x in self.iter_mut() { - ptr::drop_in_place(x); - } - } - } + unsafe { + // use drop for [T] + ptr::drop_in_place(&mut self[..]); } // RawVec handles deallocation } @@ -1442,6 +1749,7 @@ impl Drop for Vec { #[stable(feature = "rust1", since = "1.0.0")] impl Default for Vec { + /// Creates an empty `Vec`. fn default() -> Vec { Vec::new() } @@ -1494,6 +1802,13 @@ impl<'a, T: Clone> From<&'a [T]> for Vec { } } +#[stable(feature = "vec_from_cow_slice", since = "1.14.0")] +impl<'a, T> From> for Vec where [T]: ToOwned> { + fn from(s: Cow<'a, [T]>) -> Vec { + s.into_owned() + } +} + #[stable(feature = "rust1", since = "1.0.0")] impl<'a> From<&'a str> for Vec { fn from(s: &'a str) -> Vec { @@ -1505,26 +1820,24 @@ impl<'a> From<&'a str> for Vec { // Clone-on-write //////////////////////////////////////////////////////////////////////////////// -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> FromIterator for Cow<'a, [T]> where T: Clone { - fn from_iter>(it: I) -> Cow<'a, [T]> { - Cow::Owned(FromIterator::from_iter(it)) +#[stable(feature = "cow_from_vec", since = "1.7.0")] +impl<'a, T: Clone> From<&'a [T]> for Cow<'a, [T]> { + fn from(s: &'a [T]) -> Cow<'a, [T]> { + Cow::Borrowed(s) } } -#[stable(feature = "rust1", since = "1.0.0")] -#[allow(deprecated)] -impl<'a, T: 'a> IntoCow<'a, [T]> for Vec where T: Clone { - fn into_cow(self) -> Cow<'a, [T]> { - Cow::Owned(self) +#[stable(feature = "cow_from_vec", since = "1.7.0")] +impl<'a, T: Clone> From> for Cow<'a, [T]> { + fn from(v: Vec) -> Cow<'a, [T]> { + Cow::Owned(v) } } #[stable(feature = "rust1", since = "1.0.0")] -#[allow(deprecated)] -impl<'a, T> IntoCow<'a, [T]> for &'a [T] where T: Clone { - fn into_cow(self) -> Cow<'a, [T]> { - Cow::Borrowed(self) +impl<'a, T> FromIterator for Cow<'a, [T]> where T: Clone { + fn from_iter>(it: I) -> Cow<'a, [T]> { + Cow::Owned(FromIterator::from_iter(it)) } } @@ -1533,13 +1846,71 @@ impl<'a, T> IntoCow<'a, [T]> for &'a [T] where T: Clone { //////////////////////////////////////////////////////////////////////////////// /// An iterator that moves out of a vector. +/// +/// This `struct` is created by the `into_iter` method on [`Vec`][`Vec`] (provided +/// by the [`IntoIterator`] trait). +/// +/// [`Vec`]: struct.Vec.html +/// [`IntoIterator`]: ../../std/iter/trait.IntoIterator.html #[stable(feature = "rust1", since = "1.0.0")] pub struct IntoIter { - _buf: RawVec, + buf: Shared, + cap: usize, ptr: *const T, end: *const T, } +#[stable(feature = "vec_intoiter_debug", since = "1.13.0")] +impl fmt::Debug for IntoIter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("IntoIter") + .field(&self.as_slice()) + .finish() + } +} + +impl IntoIter { + /// Returns the remaining items of this iterator as a slice. + /// + /// # Examples + /// + /// ``` + /// # #![feature(vec_into_iter_as_slice)] + /// let vec = vec!['a', 'b', 'c']; + /// let mut into_iter = vec.into_iter(); + /// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']); + /// let _ = into_iter.next().unwrap(); + /// assert_eq!(into_iter.as_slice(), &['b', 'c']); + /// ``` + #[unstable(feature = "vec_into_iter_as_slice", issue = "35601")] + pub fn as_slice(&self) -> &[T] { + unsafe { + slice::from_raw_parts(self.ptr, self.len()) + } + } + + /// Returns the remaining items of this iterator as a mutable slice. + /// + /// # Examples + /// + /// ``` + /// # #![feature(vec_into_iter_as_slice)] + /// let vec = vec!['a', 'b', 'c']; + /// let mut into_iter = vec.into_iter(); + /// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']); + /// into_iter.as_mut_slice()[2] = 'z'; + /// assert_eq!(into_iter.next().unwrap(), 'a'); + /// assert_eq!(into_iter.next().unwrap(), 'b'); + /// assert_eq!(into_iter.next().unwrap(), 'z'); + /// ``` + #[unstable(feature = "vec_into_iter_as_slice", issue = "35601")] + pub fn as_mut_slice(&self) -> &mut [T] { + unsafe { + slice::from_raw_parts_mut(self.ptr as *mut T, self.len()) + } + } +} + #[stable(feature = "rust1", since = "1.0.0")] unsafe impl Send for IntoIter {} #[stable(feature = "rust1", since = "1.0.0")] @@ -1552,14 +1923,14 @@ impl Iterator for IntoIter { #[inline] fn next(&mut self) -> Option { unsafe { - if self.ptr == self.end { + if self.ptr as *const _ == self.end { None } else { if mem::size_of::() == 0 { // purposefully don't use 'ptr.offset' because for // vectors with 0-size elements this would return the // same pointer. - self.ptr = arith_offset(self.ptr as *const i8, 1) as *const T; + self.ptr = arith_offset(self.ptr as *const i8, 1) as *mut T; // Use a non-null pointer value Some(ptr::read(EMPTY as *mut T)) @@ -1588,7 +1959,7 @@ impl Iterator for IntoIter { #[inline] fn count(self) -> usize { - self.size_hint().0 + self.len() } } @@ -1602,7 +1973,7 @@ impl DoubleEndedIterator for IntoIter { } else { if mem::size_of::() == 0 { // See above for why 'ptr.offset' isn't used - self.end = arith_offset(self.end as *const i8, -1) as *const T; + self.end = arith_offset(self.end as *const i8, -1) as *mut T; // Use a non-null pointer value Some(ptr::read(EMPTY as *mut T)) @@ -1617,20 +1988,43 @@ impl DoubleEndedIterator for IntoIter { } #[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for IntoIter {} +impl ExactSizeIterator for IntoIter { + fn is_empty(&self) -> bool { + self.ptr == self.end + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for IntoIter {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for IntoIter {} + +#[stable(feature = "vec_into_iter_clone", since = "1.8.0")] +impl Clone for IntoIter { + fn clone(&self) -> IntoIter { + self.as_slice().to_owned().into_iter() + } +} #[stable(feature = "rust1", since = "1.0.0")] impl Drop for IntoIter { #[unsafe_destructor_blind_to_params] fn drop(&mut self) { // destroy the remaining elements - for _x in self {} + for _x in self.by_ref() {} // RawVec handles deallocation + let _ = unsafe { RawVec::from_raw_parts(*self.buf, self.cap) }; } } /// A draining iterator for `Vec`. +/// +/// This `struct` is created by the [`drain`] method on [`Vec`]. +/// +/// [`drain`]: struct.Vec.html#method.drain +/// [`Vec`]: struct.Vec.html #[stable(feature = "drain", since = "1.6.0")] pub struct Drain<'a, T: 'a> { /// Index of tail to preserve @@ -1638,8 +2032,8 @@ pub struct Drain<'a, T: 'a> { /// Length of tail tail_len: usize, /// Current remaining range to remove - iter: slice::IterMut<'a, T>, - vec: *mut Vec, + iter: slice::Iter<'a, T>, + vec: Shared>, } #[stable(feature = "drain", since = "1.6.0")] @@ -1647,7 +2041,7 @@ unsafe impl<'a, T: Sync> Sync for Drain<'a, T> {} #[stable(feature = "drain", since = "1.6.0")] unsafe impl<'a, T: Send> Send for Drain<'a, T> {} -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "drain", since = "1.6.0")] impl<'a, T> Iterator for Drain<'a, T> { type Item = T; @@ -1661,7 +2055,7 @@ impl<'a, T> Iterator for Drain<'a, T> { } } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "drain", since = "1.6.0")] impl<'a, T> DoubleEndedIterator for Drain<'a, T> { #[inline] fn next_back(&mut self) -> Option { @@ -1669,7 +2063,7 @@ impl<'a, T> DoubleEndedIterator for Drain<'a, T> { } } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "drain", since = "1.6.0")] impl<'a, T> Drop for Drain<'a, T> { fn drop(&mut self) { // exhaust self first @@ -1677,7 +2071,7 @@ impl<'a, T> Drop for Drain<'a, T> { if self.tail_len > 0 { unsafe { - let source_vec = &mut *self.vec; + let source_vec = &mut **self.vec; // memmove back untouched tail, update to new length let start = source_vec.len(); let tail = self.tail_start; @@ -1691,5 +2085,12 @@ impl<'a, T> Drop for Drain<'a, T> { } -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> ExactSizeIterator for Drain<'a, T> {} +#[stable(feature = "drain", since = "1.6.0")] +impl<'a, T> ExactSizeIterator for Drain<'a, T> { + fn is_empty(&self) -> bool { + self.iter.is_empty() + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Drain<'a, T> {} diff --git a/src/libcollections/vec_deque.rs b/src/libcollections/vec_deque.rs index ba78acc28bccb..5397193cab40f 100644 --- a/src/libcollections/vec_deque.rs +++ b/src/libcollections/vec_deque.rs @@ -20,10 +20,11 @@ use core::cmp::Ordering; use core::fmt; -use core::iter::{repeat, FromIterator}; +use core::iter::{repeat, FromIterator, FusedIterator}; use core::mem; use core::ops::{Index, IndexMut}; use core::ptr; +use core::ptr::Shared; use core::slice; use core::hash::{Hash, Hasher}; @@ -32,6 +33,7 @@ use core::cmp; use alloc::raw_vec::RawVec; use super::range::RangeArgument; +use super::vec::Vec; const INITIAL_CAPACITY: usize = 7; // 2^3 - 1 const MINIMUM_CAPACITY: usize = 1; // 2 - 1 @@ -70,13 +72,19 @@ impl Clone for VecDeque { impl Drop for VecDeque { #[unsafe_destructor_blind_to_params] fn drop(&mut self) { - self.clear(); + let (front, back) = self.as_mut_slices(); + unsafe { + // use drop for [T] + ptr::drop_in_place(front); + ptr::drop_in_place(back); + } // RawVec handles deallocation } } #[stable(feature = "rust1", since = "1.0.0")] impl Default for VecDeque { + /// Creates an empty `VecDeque`. #[inline] fn default() -> VecDeque { VecDeque::new() @@ -359,12 +367,28 @@ impl VecDeque { impl VecDeque { /// Creates an empty `VecDeque`. + /// + /// # Examples + /// + /// ``` + /// use std::collections::VecDeque; + /// + /// let vector: VecDeque = VecDeque::new(); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn new() -> VecDeque { VecDeque::with_capacity(INITIAL_CAPACITY) } /// Creates an empty `VecDeque` with space for at least `n` elements. + /// + /// # Examples + /// + /// ``` + /// use std::collections::VecDeque; + /// + /// let vector: VecDeque = VecDeque::with_capacity(10); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn with_capacity(n: usize) -> VecDeque { // +1 since the ringbuffer always leaves one space empty @@ -380,6 +404,8 @@ impl VecDeque { /// Retrieves an element in the `VecDeque` by index. /// + /// Element at index 0 is the front of the queue. + /// /// # Examples /// /// ``` @@ -403,6 +429,8 @@ impl VecDeque { /// Retrieves an element in the `VecDeque` mutably by index. /// + /// Element at index 0 is the front of the queue. + /// /// # Examples /// /// ``` @@ -434,6 +462,8 @@ impl VecDeque { /// /// Fails if there is no element with either index. /// + /// Element at index 0 is the front of the queue. + /// /// # Examples /// /// ``` @@ -690,43 +720,62 @@ impl VecDeque { /// Returns a pair of slices which contain, in order, the contents of the /// `VecDeque`. + /// + /// # Examples + /// + /// ``` + /// use std::collections::VecDeque; + /// + /// let mut vector = VecDeque::new(); + /// + /// vector.push_back(0); + /// vector.push_back(1); + /// vector.push_back(2); + /// + /// assert_eq!(vector.as_slices(), (&[0, 1, 2][..], &[][..])); + /// + /// vector.push_front(10); + /// vector.push_front(9); + /// + /// assert_eq!(vector.as_slices(), (&[9, 10][..], &[0, 1, 2][..])); + /// ``` #[inline] #[stable(feature = "deque_extras_15", since = "1.5.0")] pub fn as_slices(&self) -> (&[T], &[T]) { unsafe { - let contiguous = self.is_contiguous(); let buf = self.buffer_as_slice(); - if contiguous { - let (empty, buf) = buf.split_at(0); - (&buf[self.tail..self.head], empty) - } else { - let (mid, right) = buf.split_at(self.tail); - let (left, _) = mid.split_at(self.head); - (right, left) - } + RingSlices::ring_slices(buf, self.head, self.tail) } } /// Returns a pair of slices which contain, in order, the contents of the /// `VecDeque`. + /// + /// # Examples + /// + /// ``` + /// use std::collections::VecDeque; + /// + /// let mut vector = VecDeque::new(); + /// + /// vector.push_back(0); + /// vector.push_back(1); + /// + /// vector.push_front(10); + /// vector.push_front(9); + /// + /// vector.as_mut_slices().0[0] = 42; + /// vector.as_mut_slices().1[0] = 24; + /// assert_eq!(vector.as_slices(), (&[42, 10][..], &[24, 1][..])); + /// ``` #[inline] #[stable(feature = "deque_extras_15", since = "1.5.0")] pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) { unsafe { - let contiguous = self.is_contiguous(); let head = self.head; let tail = self.tail; let buf = self.buffer_as_mut_slice(); - - if contiguous { - let (empty, buf) = buf.split_at_mut(0); - (&mut buf[tail..head], empty) - } else { - let (mid, right) = buf.split_at_mut(tail); - let (left, _) = mid.split_at_mut(head); - - (right, left) - } + RingSlices::ring_slices(buf, head, tail) } } @@ -783,7 +832,7 @@ impl VecDeque { /// /// ``` /// use std::collections::VecDeque; - + /// /// let mut v: VecDeque<_> = vec![1, 2, 3].into_iter().collect(); /// assert_eq!(vec![3].into_iter().collect::>(), v.drain(2..).collect()); /// assert_eq!(vec![1, 2].into_iter().collect::>(), v); @@ -838,7 +887,7 @@ impl VecDeque { self.head = drain_tail; Drain { - deque: self as *mut _, + deque: unsafe { Shared::new(self as *mut _) }, after_tail: drain_head, after_head: head, iter: Iter { @@ -867,6 +916,30 @@ impl VecDeque { self.drain(..); } + /// Returns `true` if the `VecDeque` contains an element equal to the + /// given value. + /// + /// # Examples + /// + /// ``` + /// use std::collections::VecDeque; + /// + /// let mut vector: VecDeque = VecDeque::new(); + /// + /// vector.push_back(0); + /// vector.push_back(1); + /// + /// assert_eq!(vector.contains(&1), true); + /// assert_eq!(vector.contains(&10), false); + /// ``` + #[stable(feature = "vec_deque_contains", since = "1.12.0")] + pub fn contains(&self, x: &T) -> bool + where T: PartialEq + { + let (a, b) = self.as_slices(); + a.contains(x) || b.contains(x) + } + /// Provides a reference to the front element, or `None` if the sequence is /// empty. /// @@ -1094,6 +1167,8 @@ impl VecDeque { /// /// Returns `None` if `index` is out of bounds. /// + /// Element at index 0 is the front of the queue. + /// /// # Examples /// /// ``` @@ -1128,6 +1203,8 @@ impl VecDeque { /// /// Returns `None` if `index` is out of bounds. /// + /// Element at index 0 is the front of the queue. + /// /// # Examples /// /// ``` @@ -1159,6 +1236,8 @@ impl VecDeque { /// end is closer to the insertion point will be moved to make room, /// and all the affected elements will be moved to new positions. /// + /// Element at index 0 is the front of the queue. + /// /// # Panics /// /// Panics if `index` is greater than `VecDeque`'s length @@ -1386,7 +1465,10 @@ impl VecDeque { /// room, and all the affected elements will be moved to new positions. /// Returns `None` if `index` is out of bounds. /// + /// Element at index 0 is the front of the queue. + /// /// # Examples + /// /// ``` /// use std::collections::VecDeque; /// @@ -1564,6 +1646,8 @@ impl VecDeque { /// /// Note that the capacity of `self` does not change. /// + /// Element at index 0 is the front of the queue. + /// /// # Panics /// /// Panics if `at > len` @@ -1727,6 +1811,42 @@ fn wrap_index(index: usize, size: usize) -> usize { index & (size - 1) } +/// Returns the two slices that cover the VecDeque's valid range +trait RingSlices : Sized { + fn slice(self, from: usize, to: usize) -> Self; + fn split_at(self, i: usize) -> (Self, Self); + + fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) { + let contiguous = tail <= head; + if contiguous { + let (empty, buf) = buf.split_at(0); + (buf.slice(tail, head), empty) + } else { + let (mid, right) = buf.split_at(tail); + let (left, _) = mid.split_at(head); + (right, left) + } + } +} + +impl<'a, T> RingSlices for &'a [T] { + fn slice(self, from: usize, to: usize) -> Self { + &self[from..to] + } + fn split_at(self, i: usize) -> (Self, Self) { + (*self).split_at(i) + } +} + +impl<'a, T> RingSlices for &'a mut [T] { + fn slice(self, from: usize, to: usize) -> Self { + &mut self[from..to] + } + fn split_at(self, i: usize) -> (Self, Self) { + (*self).split_at_mut(i) + } +} + /// Calculate the number of elements left to be read in the buffer #[inline] fn count(tail: usize, head: usize, size: usize) -> usize { @@ -1773,6 +1893,14 @@ impl<'a, T> Iterator for Iter<'a, T> { let len = count(self.tail, self.head, self.ring.len()); (len, Some(len)) } + + fn fold(self, mut accum: Acc, mut f: F) -> Acc + where F: FnMut(Acc, Self::Item) -> Acc, + { + let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); + accum = front.iter().fold(accum, &mut f); + back.iter().fold(accum, &mut f) + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -1790,6 +1918,10 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Iter<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Iter<'a, T> {} + + /// `VecDeque` mutable iterator. #[stable(feature = "rust1", since = "1.0.0")] pub struct IterMut<'a, T: 'a> { @@ -1821,6 +1953,14 @@ impl<'a, T> Iterator for IterMut<'a, T> { let len = count(self.tail, self.head, self.ring.len()); (len, Some(len)) } + + fn fold(self, mut accum: Acc, mut f: F) -> Acc + where F: FnMut(Acc, Self::Item) -> Acc, + { + let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); + accum = front.iter_mut().fold(accum, &mut f); + back.iter_mut().fold(accum, &mut f) + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -1842,6 +1982,9 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for IterMut<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for IterMut<'a, T> {} + /// A by-value VecDeque iterator #[derive(Clone)] #[stable(feature = "rust1", since = "1.0.0")] @@ -1876,13 +2019,16 @@ impl DoubleEndedIterator for IntoIter { #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for IntoIter {} +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for IntoIter {} + /// A draining VecDeque iterator #[stable(feature = "drain", since = "1.6.0")] pub struct Drain<'a, T: 'a> { after_tail: usize, after_head: usize, iter: Iter<'a, T>, - deque: *mut VecDeque, + deque: Shared>, } #[stable(feature = "drain", since = "1.6.0")] @@ -1890,12 +2036,12 @@ unsafe impl<'a, T: Sync> Sync for Drain<'a, T> {} #[stable(feature = "drain", since = "1.6.0")] unsafe impl<'a, T: Send> Send for Drain<'a, T> {} -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "drain", since = "1.6.0")] impl<'a, T: 'a> Drop for Drain<'a, T> { fn drop(&mut self) { for _ in self.by_ref() {} - let source_deque = unsafe { &mut *self.deque }; + let source_deque = unsafe { &mut **self.deque }; // T = source_deque_tail; H = source_deque_head; t = drain_tail; h = drain_head // @@ -1939,7 +2085,7 @@ impl<'a, T: 'a> Drop for Drain<'a, T> { } } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "drain", since = "1.6.0")] impl<'a, T: 'a> Iterator for Drain<'a, T> { type Item = T; @@ -1954,7 +2100,7 @@ impl<'a, T: 'a> Iterator for Drain<'a, T> { } } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "drain", since = "1.6.0")] impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { #[inline] fn next_back(&mut self) -> Option { @@ -1962,13 +2108,48 @@ impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { } } -#[stable(feature = "rust1", since = "1.0.0")] +#[stable(feature = "drain", since = "1.6.0")] impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T: 'a> FusedIterator for Drain<'a, T> {} + #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq for VecDeque { fn eq(&self, other: &VecDeque) -> bool { - self.len() == other.len() && self.iter().zip(other).all(|(a, b)| a.eq(b)) + if self.len() != other.len() { + return false; + } + let (sa, sb) = self.as_slices(); + let (oa, ob) = other.as_slices(); + if sa.len() == oa.len() { + sa == oa && sb == ob + } else if sa.len() < oa.len() { + // Always divisible in three sections, for example: + // self: [a b c|d e f] + // other: [0 1 2 3|4 5] + // front = 3, mid = 1, + // [a b c] == [0 1 2] && [d] == [3] && [e f] == [4 5] + let front = sa.len(); + let mid = oa.len() - front; + + let (oa_front, oa_mid) = oa.split_at(front); + let (sb_mid, sb_back) = sb.split_at(mid); + debug_assert_eq!(sa.len(), oa_front.len()); + debug_assert_eq!(sb_mid.len(), oa_mid.len()); + debug_assert_eq!(sb_back.len(), ob.len()); + sa == oa_front && sb_mid == oa_mid && sb_back == ob + } else { + let front = oa.len(); + let mid = sa.len() - front; + + let (sa_front, sa_mid) = sa.split_at(front); + let (ob_mid, ob_back) = ob.split_at(mid); + debug_assert_eq!(sa_front.len(), oa.len()); + debug_assert_eq!(sa_mid.len(), ob_mid.len()); + debug_assert_eq!(sb.len(), ob_back.len()); + sa_front == oa && sa_mid == ob_mid && sb == ob_back + } } } @@ -1994,9 +2175,9 @@ impl Ord for VecDeque { impl Hash for VecDeque { fn hash(&self, state: &mut H) { self.len().hash(state); - for elt in self { - elt.hash(state); - } + let (a, b) = self.as_slices(); + Hash::hash_slice(a, state); + Hash::hash_slice(b, state); } } @@ -2020,8 +2201,8 @@ impl IndexMut for VecDeque { #[stable(feature = "rust1", since = "1.0.0")] impl FromIterator for VecDeque { - fn from_iter>(iterable: T) -> VecDeque { - let iterator = iterable.into_iter(); + fn from_iter>(iter: T) -> VecDeque { + let iterator = iter.into_iter(); let (lower, _) = iterator.size_hint(); let mut deq = VecDeque::with_capacity(lower); deq.extend(iterator); @@ -2084,11 +2265,108 @@ impl fmt::Debug for VecDeque { } } +#[stable(feature = "vecdeque_vec_conversions", since = "1.10.0")] +impl From> for VecDeque { + fn from(mut other: Vec) -> Self { + unsafe { + let other_buf = other.as_mut_ptr(); + let mut buf = RawVec::from_raw_parts(other_buf, other.capacity()); + let len = other.len(); + mem::forget(other); + + // We need to extend the buf if it's not a power of two, too small + // or doesn't have at least one free space + if !buf.cap().is_power_of_two() + || (buf.cap() < (MINIMUM_CAPACITY + 1)) + || (buf.cap() == len) + { + let cap = cmp::max(buf.cap() + 1, MINIMUM_CAPACITY + 1).next_power_of_two(); + buf.reserve_exact(len, cap - len); + } + + VecDeque { + tail: 0, + head: len, + buf: buf + } + } + } +} + +#[stable(feature = "vecdeque_vec_conversions", since = "1.10.0")] +impl From> for Vec { + fn from(other: VecDeque) -> Self { + unsafe { + let buf = other.buf.ptr(); + let len = other.len(); + let tail = other.tail; + let head = other.head; + let cap = other.cap(); + + // Need to move the ring to the front of the buffer, as vec will expect this. + if other.is_contiguous() { + ptr::copy(buf.offset(tail as isize), buf, len); + } else { + if (tail - head) >= cmp::min((cap - tail), head) { + // There is enough free space in the centre for the shortest block so we can + // do this in at most three copy moves. + if (cap - tail) > head { + // right hand block is the long one; move that enough for the left + ptr::copy( + buf.offset(tail as isize), + buf.offset((tail - head) as isize), + cap - tail); + // copy left in the end + ptr::copy(buf, buf.offset((cap - head) as isize), head); + // shift the new thing to the start + ptr::copy(buf.offset((tail-head) as isize), buf, len); + } else { + // left hand block is the long one, we can do it in two! + ptr::copy(buf, buf.offset((cap-tail) as isize), head); + ptr::copy(buf.offset(tail as isize), buf, cap-tail); + } + } else { + // Need to use N swaps to move the ring + // We can use the space at the end of the ring as a temp store + + let mut left_edge: usize = 0; + let mut right_edge: usize = tail; + + // The general problem looks like this + // GHIJKLM...ABCDEF - before any swaps + // ABCDEFM...GHIJKL - after 1 pass of swaps + // ABCDEFGHIJM...KL - swap until the left edge reaches the temp store + // - then restart the algorithm with a new (smaller) store + // Sometimes the temp store is reached when the right edge is at the end + // of the buffer - this means we've hit the right order with fewer swaps! + // E.g + // EF..ABCD + // ABCDEF.. - after four only swaps we've finished + + while left_edge < len && right_edge != cap { + let mut right_offset = 0; + for i in left_edge..right_edge { + right_offset = (i - left_edge) % (cap - right_edge); + let src: isize = (right_edge + right_offset) as isize; + ptr::swap(buf.offset(i as isize), buf.offset(src)); + } + let n_ops = right_edge - left_edge; + left_edge += n_ops; + right_edge += right_offset + 1; + + } + } + + } + let out = Vec::from_raw_parts(buf, len, cap); + mem::forget(other); + out + } + } +} + #[cfg(test)] mod tests { - use core::iter::Iterator; - use core::option::Option::Some; - use test; use super::VecDeque; @@ -2366,31 +2644,79 @@ mod tests { } #[test] - fn test_zst_push() { - const N: usize = 8; - - // Zero sized type - struct Zst; - - // Test that for all possible sequences of push_front / push_back, - // we end up with a deque of the correct size - - for len in 0..N { - let mut tester = VecDeque::with_capacity(len); - assert_eq!(tester.len(), 0); - assert!(tester.capacity() >= len); - for case in 0..(1 << len) { - assert_eq!(tester.len(), 0); - for bit in 0..len { - if case & (1 << bit) != 0 { - tester.push_front(Zst); - } else { - tester.push_back(Zst); - } + fn test_from_vec() { + use super::super::vec::Vec; + for cap in 0..35 { + for len in 0..cap + 1 { + let mut vec = Vec::with_capacity(cap); + vec.extend(0..len); + + let vd = VecDeque::from(vec.clone()); + assert!(vd.cap().is_power_of_two()); + assert_eq!(vd.len(), vec.len()); + assert!(vd.into_iter().eq(vec)); + } + } + } + + #[test] + fn test_vec_from_vecdeque() { + use super::super::vec::Vec; + + fn create_vec_and_test_convert(cap: usize, offset: usize, len: usize) { + let mut vd = VecDeque::with_capacity(cap); + for _ in 0..offset { + vd.push_back(0); + vd.pop_front(); + } + vd.extend(0..len); + + let vec: Vec<_> = Vec::from(vd.clone()); + assert_eq!(vec.len(), vd.len()); + assert!(vec.into_iter().eq(vd)); + } + + for cap_pwr in 0..7 { + // Make capacity as a (2^x)-1, so that the ring size is 2^x + let cap = (2i32.pow(cap_pwr) - 1) as usize; + + // In these cases there is enough free space to solve it with copies + for len in 0..((cap+1)/2) { + // Test contiguous cases + for offset in 0..(cap-len) { + create_vec_and_test_convert(cap, offset, len) + } + + // Test cases where block at end of buffer is bigger than block at start + for offset in (cap-len)..(cap-(len/2)) { + create_vec_and_test_convert(cap, offset, len) + } + + // Test cases where block at start of buffer is bigger than block at end + for offset in (cap-(len/2))..cap { + create_vec_and_test_convert(cap, offset, len) + } + } + + // Now there's not (necessarily) space to straighten the ring with simple copies, + // the ring will use swapping when: + // (cap + 1 - offset) > (cap + 1 - len) && (len - (cap + 1 - offset)) > (cap + 1 - len)) + // right block size > free space && left block size > free space + for len in ((cap+1)/2)..cap { + // Test contiguous cases + for offset in 0..(cap-len) { + create_vec_and_test_convert(cap, offset, len) + } + + // Test cases where block at end of buffer is bigger than block at start + for offset in (cap-len)..(cap-(len/2)) { + create_vec_and_test_convert(cap, offset, len) + } + + // Test cases where block at start of buffer is bigger than block at end + for offset in (cap-(len/2))..cap { + create_vec_and_test_convert(cap, offset, len) } - assert_eq!(tester.len(), len); - assert_eq!(tester.iter().count(), len); - tester.clear(); } } } diff --git a/src/libcollectionstest/binary_heap.rs b/src/libcollectionstest/binary_heap.rs index cc4366e8ae463..9cd63d8793184 100644 --- a/src/libcollectionstest/binary_heap.rs +++ b/src/libcollectionstest/binary_heap.rs @@ -9,6 +9,7 @@ // except according to those terms. use std::collections::BinaryHeap; +use std::collections::binary_heap::Drain; #[test] fn test_iterator() { @@ -81,6 +82,18 @@ fn test_peek_and_pop() { } } +#[test] +fn test_peek_mut() { + let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]; + let mut heap = BinaryHeap::from(data); + assert_eq!(heap.peek(), Some(&10)); + { + let mut top = heap.peek_mut().unwrap(); + *top -= 2; + } + assert_eq!(heap.peek(), Some(&9)); +} + #[test] fn test_push() { let mut heap = BinaryHeap::from(vec![2, 4, 9]); @@ -126,6 +139,7 @@ fn test_push_unique() { } #[test] +#[allow(deprecated)] fn test_push_pop() { let mut heap = BinaryHeap::from(vec![5, 5, 2, 1, 3]); assert_eq!(heap.len(), 5); @@ -140,6 +154,7 @@ fn test_push_pop() { } #[test] +#[allow(deprecated)] fn test_replace() { let mut heap = BinaryHeap::from(vec![5, 5, 2, 1, 3]); assert_eq!(heap.len(), 5); @@ -193,6 +208,13 @@ fn test_empty_peek() { } #[test] +fn test_empty_peek_mut() { + let mut empty = BinaryHeap::::new(); + assert!(empty.peek_mut().is_none()); +} + +#[test] +#[allow(deprecated)] fn test_empty_replace() { let mut heap = BinaryHeap::new(); assert!(heap.replace(5).is_none()); @@ -242,3 +264,42 @@ fn test_extend_ref() { assert_eq!(a.len(), 5); assert_eq!(a.into_sorted_vec(), [1, 2, 3, 4, 5]); } + +#[test] +fn test_append() { + let mut a = BinaryHeap::from(vec![-10, 1, 2, 3, 3]); + let mut b = BinaryHeap::from(vec![-20, 5, 43]); + + a.append(&mut b); + + assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]); + assert!(b.is_empty()); +} + +#[test] +fn test_append_to_empty() { + let mut a = BinaryHeap::new(); + let mut b = BinaryHeap::from(vec![-20, 5, 43]); + + a.append(&mut b); + + assert_eq!(a.into_sorted_vec(), [-20, 5, 43]); + assert!(b.is_empty()); +} + +#[test] +fn test_extend_specialization() { + let mut a = BinaryHeap::from(vec![-10, 1, 2, 3, 3]); + let b = BinaryHeap::from(vec![-20, 5, 43]); + + a.extend(b); + + assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]); +} + +#[allow(dead_code)] +fn assert_covariance() { + fn drain<'new>(d: Drain<'static, &'static str>) -> Drain<'new, &'new str> { + d + } +} diff --git a/src/libcollectionstest/btree/map.rs b/src/libcollectionstest/btree/map.rs index 05d4aff108aa9..8222da105ccad 100644 --- a/src/libcollectionstest/btree/map.rs +++ b/src/libcollectionstest/btree/map.rs @@ -9,10 +9,13 @@ // except according to those terms. use std::collections::BTreeMap; -use std::collections::Bound::{Excluded, Included, Unbounded, self}; +use std::collections::Bound::{self, Excluded, Included, Unbounded}; use std::collections::btree_map::Entry::{Occupied, Vacant}; use std::rc::Rc; +use std::iter::FromIterator; +use super::DeterministicRng; + #[test] fn test_basic_large() { let mut map = BTreeMap::new(); @@ -20,41 +23,41 @@ fn test_basic_large() { assert_eq!(map.len(), 0); for i in 0..size { - assert_eq!(map.insert(i, 10*i), None); + assert_eq!(map.insert(i, 10 * i), None); assert_eq!(map.len(), i + 1); } for i in 0..size { - assert_eq!(map.get(&i).unwrap(), &(i*10)); + assert_eq!(map.get(&i).unwrap(), &(i * 10)); } - for i in size..size*2 { + for i in size..size * 2 { assert_eq!(map.get(&i), None); } for i in 0..size { - assert_eq!(map.insert(i, 100*i), Some(10*i)); + assert_eq!(map.insert(i, 100 * i), Some(10 * i)); assert_eq!(map.len(), size); } for i in 0..size { - assert_eq!(map.get(&i).unwrap(), &(i*100)); + assert_eq!(map.get(&i).unwrap(), &(i * 100)); } - for i in 0..size/2 { - assert_eq!(map.remove(&(i*2)), Some(i*200)); + for i in 0..size / 2 { + assert_eq!(map.remove(&(i * 2)), Some(i * 200)); assert_eq!(map.len(), size - i - 1); } - for i in 0..size/2 { - assert_eq!(map.get(&(2*i)), None); - assert_eq!(map.get(&(2*i+1)).unwrap(), &(i*200 + 100)); + for i in 0..size / 2 { + assert_eq!(map.get(&(2 * i)), None); + assert_eq!(map.get(&(2 * i + 1)).unwrap(), &(i * 200 + 100)); } - for i in 0..size/2 { - assert_eq!(map.remove(&(2*i)), None); - assert_eq!(map.remove(&(2*i+1)), Some(i*200 + 100)); - assert_eq!(map.len(), size/2 - i - 1); + for i in 0..size / 2 { + assert_eq!(map.remove(&(2 * i)), None); + assert_eq!(map.remove(&(2 * i + 1)), Some(i * 200 + 100)); + assert_eq!(map.len(), size / 2 - i - 1); } } @@ -81,7 +84,9 @@ fn test_iter() { // Forwards let mut map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect(); - fn test(size: usize, mut iter: T) where T: Iterator { + fn test(size: usize, mut iter: T) + where T: Iterator + { for i in 0..size { assert_eq!(iter.size_hint(), (size - i, Some(size - i))); assert_eq!(iter.next().unwrap(), (i, i)); @@ -101,7 +106,9 @@ fn test_iter_rev() { // Forwards let mut map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect(); - fn test(size: usize, mut iter: T) where T: Iterator { + fn test(size: usize, mut iter: T) + where T: Iterator + { for i in 0..size { assert_eq!(iter.size_hint(), (size - i, Some(size - i))); assert_eq!(iter.next().unwrap(), (size - i - 1, size - i - 1)); @@ -114,6 +121,20 @@ fn test_iter_rev() { test(size, map.into_iter().rev()); } +#[test] +fn test_values_mut() { + let mut a = BTreeMap::new(); + a.insert(1, String::from("hello")); + a.insert(2, String::from("goodbye")); + + for value in a.values_mut() { + value.push_str("!"); + } + + let values: Vec = a.values().cloned().collect(); + assert_eq!(values, [String::from("hello!"), String::from("goodbye!")]); +} + #[test] fn test_iter_mixed() { let size = 10000; @@ -122,7 +143,8 @@ fn test_iter_mixed() { let mut map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect(); fn test(size: usize, mut iter: T) - where T: Iterator + DoubleEndedIterator { + where T: Iterator + DoubleEndedIterator + { for i in 0..size / 4 { assert_eq!(iter.size_hint(), (size - i * 2, Some(size - i * 2))); assert_eq!(iter.next().unwrap(), (i, i)); @@ -187,7 +209,7 @@ fn test_range() { for i in 0..size { for j in i..size { let mut kvs = map.range(Included(&i), Included(&j)).map(|(&k, &v)| (k, v)); - let mut pairs = (i..j+1).map(|i| (i, i)); + let mut pairs = (i..j + 1).map(|i| (i, i)); for (kv, pair) in kvs.by_ref().zip(pairs.by_ref()) { assert_eq!(kv, pair); @@ -227,7 +249,7 @@ fn test_borrow() { } #[test] -fn test_entry(){ +fn test_entry() { let xs = [(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)]; let mut map: BTreeMap<_, _> = xs.iter().cloned().collect(); @@ -326,17 +348,23 @@ fn test_bad_zst() { struct Bad; impl PartialEq for Bad { - fn eq(&self, _: &Self) -> bool { false } + fn eq(&self, _: &Self) -> bool { + false + } } impl Eq for Bad {} impl PartialOrd for Bad { - fn partial_cmp(&self, _: &Self) -> Option { Some(Ordering::Less) } + fn partial_cmp(&self, _: &Self) -> Option { + Some(Ordering::Less) + } } impl Ord for Bad { - fn cmp(&self, _: &Self) -> Ordering { Ordering::Less } + fn cmp(&self, _: &Self) -> Ordering { + Ordering::Less + } } let mut m = BTreeMap::new(); @@ -353,45 +381,197 @@ fn test_clone() { assert_eq!(map.len(), 0); for i in 0..size { - assert_eq!(map.insert(i, 10*i), None); + assert_eq!(map.insert(i, 10 * i), None); assert_eq!(map.len(), i + 1); assert_eq!(map, map.clone()); } for i in 0..size { - assert_eq!(map.insert(i, 100*i), Some(10*i)); + assert_eq!(map.insert(i, 100 * i), Some(10 * i)); assert_eq!(map.len(), size); assert_eq!(map, map.clone()); } - for i in 0..size/2 { - assert_eq!(map.remove(&(i*2)), Some(i*200)); + for i in 0..size / 2 { + assert_eq!(map.remove(&(i * 2)), Some(i * 200)); assert_eq!(map.len(), size - i - 1); assert_eq!(map, map.clone()); } - for i in 0..size/2 { - assert_eq!(map.remove(&(2*i)), None); - assert_eq!(map.remove(&(2*i+1)), Some(i*200 + 100)); - assert_eq!(map.len(), size/2 - i - 1); + for i in 0..size / 2 { + assert_eq!(map.remove(&(2 * i)), None); + assert_eq!(map.remove(&(2 * i + 1)), Some(i * 200 + 100)); + assert_eq!(map.len(), size / 2 - i - 1); assert_eq!(map, map.clone()); } } #[test] +#[allow(dead_code)] fn test_variance() { use std::collections::btree_map::{Iter, IntoIter, Range, Keys, Values}; - fn map_key<'new>(v: BTreeMap<&'static str, ()>) -> BTreeMap<&'new str, ()> { v } - fn map_val<'new>(v: BTreeMap<(), &'static str>) -> BTreeMap<(), &'new str> { v } - fn iter_key<'a, 'new>(v: Iter<'a, &'static str, ()>) -> Iter<'a, &'new str, ()> { v } - fn iter_val<'a, 'new>(v: Iter<'a, (), &'static str>) -> Iter<'a, (), &'new str> { v } - fn into_iter_key<'new>(v: IntoIter<&'static str, ()>) -> IntoIter<&'new str, ()> { v } - fn into_iter_val<'new>(v: IntoIter<(), &'static str>) -> IntoIter<(), &'new str> { v } - fn range_key<'a, 'new>(v: Range<'a, &'static str, ()>) -> Range<'a, &'new str, ()> { v } - fn range_val<'a, 'new>(v: Range<'a, (), &'static str>) -> Range<'a, (), &'new str> { v } - fn keys<'a, 'new>(v: Keys<'a, &'static str, ()>) -> Keys<'a, &'new str, ()> { v } - fn vals<'a, 'new>(v: Values<'a, (), &'static str>) -> Values<'a, (), &'new str> { v } + fn map_key<'new>(v: BTreeMap<&'static str, ()>) -> BTreeMap<&'new str, ()> { + v + } + fn map_val<'new>(v: BTreeMap<(), &'static str>) -> BTreeMap<(), &'new str> { + v + } + fn iter_key<'a, 'new>(v: Iter<'a, &'static str, ()>) -> Iter<'a, &'new str, ()> { + v + } + fn iter_val<'a, 'new>(v: Iter<'a, (), &'static str>) -> Iter<'a, (), &'new str> { + v + } + fn into_iter_key<'new>(v: IntoIter<&'static str, ()>) -> IntoIter<&'new str, ()> { + v + } + fn into_iter_val<'new>(v: IntoIter<(), &'static str>) -> IntoIter<(), &'new str> { + v + } + fn range_key<'a, 'new>(v: Range<'a, &'static str, ()>) -> Range<'a, &'new str, ()> { + v + } + fn range_val<'a, 'new>(v: Range<'a, (), &'static str>) -> Range<'a, (), &'new str> { + v + } + fn keys<'a, 'new>(v: Keys<'a, &'static str, ()>) -> Keys<'a, &'new str, ()> { + v + } + fn vals<'a, 'new>(v: Values<'a, (), &'static str>) -> Values<'a, (), &'new str> { + v + } +} + +#[test] +fn test_occupied_entry_key() { + let mut a = BTreeMap::new(); + let key = "hello there"; + let value = "value goes here"; + assert!(a.is_empty()); + a.insert(key.clone(), value.clone()); + assert_eq!(a.len(), 1); + assert_eq!(a[key], value); + + match a.entry(key.clone()) { + Vacant(_) => panic!(), + Occupied(e) => assert_eq!(key, *e.key()), + } + assert_eq!(a.len(), 1); + assert_eq!(a[key], value); +} + +#[test] +fn test_vacant_entry_key() { + let mut a = BTreeMap::new(); + let key = "hello there"; + let value = "value goes here"; + + assert!(a.is_empty()); + match a.entry(key.clone()) { + Occupied(_) => panic!(), + Vacant(e) => { + assert_eq!(key, *e.key()); + e.insert(value.clone()); + } + } + assert_eq!(a.len(), 1); + assert_eq!(a[key], value); +} + +macro_rules! create_append_test { + ($name:ident, $len:expr) => { + #[test] + fn $name() { + let mut a = BTreeMap::new(); + for i in 0..8 { + a.insert(i, i); + } + + let mut b = BTreeMap::new(); + for i in 5..$len { + b.insert(i, 2*i); + } + + a.append(&mut b); + + assert_eq!(a.len(), $len); + assert_eq!(b.len(), 0); + + for i in 0..$len { + if i < 5 { + assert_eq!(a[&i], i); + } else { + assert_eq!(a[&i], 2*i); + } + } + + assert_eq!(a.remove(&($len-1)), Some(2*($len-1))); + assert_eq!(a.insert($len-1, 20), None); + } + }; +} + +// These are mostly for testing the algorithm that "fixes" the right edge after insertion. +// Single node. +create_append_test!(test_append_9, 9); +// Two leafs that don't need fixing. +create_append_test!(test_append_17, 17); +// Two leafs where the second one ends up underfull and needs stealing at the end. +create_append_test!(test_append_14, 14); +// Two leafs where the second one ends up empty because the insertion finished at the root. +create_append_test!(test_append_12, 12); +// Three levels; insertion finished at the root. +create_append_test!(test_append_144, 144); +// Three levels; insertion finished at leaf while there is an empty node on the second level. +create_append_test!(test_append_145, 145); +// Tests for several randomly chosen sizes. +create_append_test!(test_append_170, 170); +create_append_test!(test_append_181, 181); +create_append_test!(test_append_239, 239); +create_append_test!(test_append_1700, 1700); + +fn rand_data(len: usize) -> Vec<(u32, u32)> { + let mut rng = DeterministicRng::new(); + Vec::from_iter((0..len).map(|_| (rng.next(), rng.next()))) +} + +#[test] +fn test_split_off_empty_right() { + let mut data = rand_data(173); + + let mut map = BTreeMap::from_iter(data.clone()); + let right = map.split_off(&(data.iter().max().unwrap().0 + 1)); + + data.sort(); + assert!(map.into_iter().eq(data)); + assert!(right.into_iter().eq(None)); +} + +#[test] +fn test_split_off_empty_left() { + let mut data = rand_data(314); + + let mut map = BTreeMap::from_iter(data.clone()); + let right = map.split_off(&data.iter().min().unwrap().0); + + data.sort(); + assert!(map.into_iter().eq(None)); + assert!(right.into_iter().eq(data)); +} + +#[test] +fn test_split_off_large_random_sorted() { + let mut data = rand_data(1529); + // special case with maximum height. + data.sort(); + + let mut map = BTreeMap::from_iter(data.clone()); + let key = data[data.len() / 2].0; + let right = map.split_off(&key); + + assert!(map.into_iter().eq(data.clone().into_iter().filter(|x| x.0 < key))); + assert!(right.into_iter().eq(data.into_iter().filter(|x| x.0 >= key))); } mod bench { diff --git a/src/libcollectionstest/btree/mod.rs b/src/libcollectionstest/btree/mod.rs index 0db48f3ce9edb..ae8b18d0c9fd9 100644 --- a/src/libcollectionstest/btree/mod.rs +++ b/src/libcollectionstest/btree/mod.rs @@ -10,3 +10,33 @@ mod map; mod set; + +/// XorShiftRng +struct DeterministicRng { + x: u32, + y: u32, + z: u32, + w: u32, +} + +impl DeterministicRng { + fn new() -> Self { + DeterministicRng { + x: 0x193a6754, + y: 0xa8a7d469, + z: 0x97830e05, + w: 0x113ba7bb, + } + } + + fn next(&mut self) -> u32 { + let x = self.x; + let t = x ^ (x << 11); + self.x = self.y; + self.y = self.z; + self.z = self.w; + let w_ = self.w; + self.w = w_ ^ (w_ >> 19) ^ (t ^ (t >> 8)); + self.w + } +} diff --git a/src/libcollectionstest/btree/set.rs b/src/libcollectionstest/btree/set.rs index 8fcfe97f42afc..6171b8ba624cd 100644 --- a/src/libcollectionstest/btree/set.rs +++ b/src/libcollectionstest/btree/set.rs @@ -10,65 +10,56 @@ use std::collections::BTreeSet; +use std::iter::FromIterator; +use super::DeterministicRng; + #[test] fn test_clone_eq() { - let mut m = BTreeSet::new(); + let mut m = BTreeSet::new(); - m.insert(1); - m.insert(2); + m.insert(1); + m.insert(2); - assert!(m.clone() == m); + assert!(m.clone() == m); } #[test] fn test_hash() { - let mut x = BTreeSet::new(); - let mut y = BTreeSet::new(); - - x.insert(1); - x.insert(2); - x.insert(3); - - y.insert(3); - y.insert(2); - y.insert(1); - - assert!(::hash(&x) == ::hash(&y)); -} - -struct Counter<'a, 'b> { - i: &'a mut usize, - expected: &'b [i32], -} + let mut x = BTreeSet::new(); + let mut y = BTreeSet::new(); -impl<'a, 'b, 'c> FnMut<(&'c i32,)> for Counter<'a, 'b> { - extern "rust-call" fn call_mut(&mut self, (&x,): (&'c i32,)) -> bool { - assert_eq!(x, self.expected[*self.i]); - *self.i += 1; - true - } -} + x.insert(1); + x.insert(2); + x.insert(3); -impl<'a, 'b, 'c> FnOnce<(&'c i32,)> for Counter<'a, 'b> { - type Output = bool; + y.insert(3); + y.insert(2); + y.insert(1); - extern "rust-call" fn call_once(mut self, args: (&'c i32,)) -> bool { - self.call_mut(args) - } + assert!(::hash(&x) == ::hash(&y)); } -fn check(a: &[i32], b: &[i32], expected: &[i32], f: F) where - // FIXME Replace Counter with `Box _>` - F: FnOnce(&BTreeSet, &BTreeSet, Counter) -> bool, +fn check(a: &[i32], b: &[i32], expected: &[i32], f: F) + where F: FnOnce(&BTreeSet, &BTreeSet, &mut FnMut(&i32) -> bool) -> bool { let mut set_a = BTreeSet::new(); let mut set_b = BTreeSet::new(); - for x in a { assert!(set_a.insert(*x)) } - for y in b { assert!(set_b.insert(*y)) } + for x in a { + assert!(set_a.insert(*x)) + } + for y in b { + assert!(set_b.insert(*y)) + } let mut i = 0; - f(&set_a, &set_b, Counter { i: &mut i, expected: expected }); + f(&set_a, + &set_b, + &mut |&x| { + assert_eq!(x, expected[i]); + i += 1; + true + }); assert_eq!(i, expected.len()); } @@ -97,9 +88,7 @@ fn test_difference() { check_difference(&[], &[], &[]); check_difference(&[1, 12], &[], &[1, 12]); check_difference(&[], &[1, 2, 3, 9], &[]); - check_difference(&[1, 3, 5, 9, 11], - &[3, 9], - &[1, 5, 11]); + check_difference(&[1, 3, 5, 9, 11], &[3, 9], &[1, 5, 11]); check_difference(&[-5, 11, 22, 33, 40, 42], &[-12, -5, 14, 23, 34, 38, 39, 50], &[11, 22, 33, 40, 42]); @@ -254,3 +243,89 @@ fn test_recovery() { assert_eq!(s.iter().next(), None); } + +#[test] +#[allow(dead_code)] +fn test_variance() { + use std::collections::btree_set::{IntoIter, Iter, Range}; + + fn set<'new>(v: BTreeSet<&'static str>) -> BTreeSet<&'new str> { + v + } + fn iter<'a, 'new>(v: Iter<'a, &'static str>) -> Iter<'a, &'new str> { + v + } + fn into_iter<'new>(v: IntoIter<&'static str>) -> IntoIter<&'new str> { + v + } + fn range<'a, 'new>(v: Range<'a, &'static str>) -> Range<'a, &'new str> { + v + } +} + +#[test] +fn test_append() { + let mut a = BTreeSet::new(); + a.insert(1); + a.insert(2); + a.insert(3); + + let mut b = BTreeSet::new(); + b.insert(3); + b.insert(4); + b.insert(5); + + a.append(&mut b); + + assert_eq!(a.len(), 5); + assert_eq!(b.len(), 0); + + assert_eq!(a.contains(&1), true); + assert_eq!(a.contains(&2), true); + assert_eq!(a.contains(&3), true); + assert_eq!(a.contains(&4), true); + assert_eq!(a.contains(&5), true); +} + +fn rand_data(len: usize) -> Vec { + let mut rng = DeterministicRng::new(); + Vec::from_iter((0..len).map(|_| rng.next())) +} + +#[test] +fn test_split_off_empty_right() { + let mut data = rand_data(173); + + let mut set = BTreeSet::from_iter(data.clone()); + let right = set.split_off(&(data.iter().max().unwrap() + 1)); + + data.sort(); + assert!(set.into_iter().eq(data)); + assert!(right.into_iter().eq(None)); +} + +#[test] +fn test_split_off_empty_left() { + let mut data = rand_data(314); + + let mut set = BTreeSet::from_iter(data.clone()); + let right = set.split_off(data.iter().min().unwrap()); + + data.sort(); + assert!(set.into_iter().eq(None)); + assert!(right.into_iter().eq(data)); +} + +#[test] +fn test_split_off_large_random_sorted() { + let mut data = rand_data(1529); + // special case with maximum height. + data.sort(); + + let mut set = BTreeSet::from_iter(data.clone()); + let key = data[data.len() / 2]; + let right = set.split_off(&key); + + assert!(set.into_iter().eq(data.clone().into_iter().filter(|x| *x < key))); + assert!(right.into_iter().eq(data.into_iter().filter(|x| *x >= key))); +} diff --git a/src/libcollectionstest/cow_str.rs b/src/libcollectionstest/cow_str.rs new file mode 100644 index 0000000000000..b29245121daad --- /dev/null +++ b/src/libcollectionstest/cow_str.rs @@ -0,0 +1,141 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::borrow::Cow; + +// check that Cow<'a, str> implements addition +#[test] +fn check_cow_add_cow() { + let borrowed1 = Cow::Borrowed("Hello, "); + let borrowed2 = Cow::Borrowed("World!"); + let borrow_empty = Cow::Borrowed(""); + + let owned1: Cow = Cow::Owned(String::from("Hi, ")); + let owned2: Cow = Cow::Owned(String::from("Rustaceans!")); + let owned_empty: Cow = Cow::Owned(String::new()); + + assert_eq!("Hello, World!", borrowed1.clone() + borrowed2.clone()); + assert_eq!("Hello, Rustaceans!", borrowed1.clone() + owned2.clone()); + + assert_eq!("Hi, World!", owned1.clone() + borrowed2.clone()); + assert_eq!("Hi, Rustaceans!", owned1.clone() + owned2.clone()); + + if let Cow::Owned(_) = borrowed1.clone() + borrow_empty.clone() { + panic!("Adding empty strings to a borrow should note allocate"); + } + if let Cow::Owned(_) = borrow_empty.clone() + borrowed1.clone() { + panic!("Adding empty strings to a borrow should note allocate"); + } + if let Cow::Owned(_) = borrowed1.clone() + owned_empty.clone() { + panic!("Adding empty strings to a borrow should note allocate"); + } + if let Cow::Owned(_) = owned_empty.clone() + borrowed1.clone() { + panic!("Adding empty strings to a borrow should note allocate"); + } +} + +#[test] +fn check_cow_add_str() { + let borrowed = Cow::Borrowed("Hello, "); + let borrow_empty = Cow::Borrowed(""); + + let owned: Cow = Cow::Owned(String::from("Hi, ")); + let owned_empty: Cow = Cow::Owned(String::new()); + + assert_eq!("Hello, World!", borrowed.clone() + "World!"); + + assert_eq!("Hi, World!", owned.clone() + "World!"); + + if let Cow::Owned(_) = borrowed.clone() + "" { + panic!("Adding empty strings to a borrow should note allocate"); + } + if let Cow::Owned(_) = borrow_empty.clone() + "Hello, " { + panic!("Adding empty strings to a borrow should note allocate"); + } + if let Cow::Owned(_) = owned_empty.clone() + "Hello, " { + panic!("Adding empty strings to a borrow should note allocate"); + } +} + +#[test] +fn check_cow_add_assign_cow() { + let mut borrowed1 = Cow::Borrowed("Hello, "); + let borrowed2 = Cow::Borrowed("World!"); + let borrow_empty = Cow::Borrowed(""); + + let mut owned1: Cow = Cow::Owned(String::from("Hi, ")); + let owned2: Cow = Cow::Owned(String::from("Rustaceans!")); + let owned_empty: Cow = Cow::Owned(String::new()); + + let mut s = borrowed1.clone(); + s += borrow_empty.clone(); + assert_eq!("Hello, ", s); + if let Cow::Owned(_) = s { + panic!("Adding empty strings to a borrow should note allocate"); + } + let mut s = borrow_empty.clone(); + s += borrowed1.clone(); + assert_eq!("Hello, ", s); + if let Cow::Owned(_) = s { + panic!("Adding empty strings to a borrow should note allocate"); + } + let mut s = borrowed1.clone(); + s += owned_empty.clone(); + assert_eq!("Hello, ", s); + if let Cow::Owned(_) = s { + panic!("Adding empty strings to a borrow should note allocate"); + } + let mut s = owned_empty.clone(); + s += borrowed1.clone(); + assert_eq!("Hello, ", s); + if let Cow::Owned(_) = s { + panic!("Adding empty strings to a borrow should note allocate"); + } + + owned1 += borrowed2; + borrowed1 += owned2; + + assert_eq!("Hi, World!", owned1); + assert_eq!("Hello, Rustaceans!", borrowed1); +} + +#[test] +fn check_cow_add_assign_str() { + let mut borrowed = Cow::Borrowed("Hello, "); + let borrow_empty = Cow::Borrowed(""); + + let mut owned: Cow = Cow::Owned(String::from("Hi, ")); + let owned_empty: Cow = Cow::Owned(String::new()); + + let mut s = borrowed.clone(); + s += ""; + assert_eq!("Hello, ", s); + if let Cow::Owned(_) = s { + panic!("Adding empty strings to a borrow should note allocate"); + } + let mut s = borrow_empty.clone(); + s += "World!"; + assert_eq!("World!", s); + if let Cow::Owned(_) = s { + panic!("Adding empty strings to a borrow should note allocate"); + } + let mut s = owned_empty.clone(); + s += "World!"; + assert_eq!("World!", s); + if let Cow::Owned(_) = s { + panic!("Adding empty strings to a borrow should note allocate"); + } + + owned += "World!"; + borrowed += "World!"; + + assert_eq!("Hi, World!", owned); + assert_eq!("Hello, World!", borrowed); +} diff --git a/src/libcollectionstest/enum_set.rs b/src/libcollectionstest/enum_set.rs index b073c2f3ae4dd..972361326d7bb 100644 --- a/src/libcollectionstest/enum_set.rs +++ b/src/libcollectionstest/enum_set.rs @@ -17,7 +17,9 @@ use self::Foo::*; #[derive(Copy, Clone, PartialEq, Debug)] #[repr(usize)] enum Foo { - A, B, C + A, + B, + C, } impl CLike for Foo { @@ -157,15 +159,15 @@ fn test_iterator() { e1.insert(C); let elems: Vec<_> = e1.iter().collect(); - assert_eq!(elems, [A,C]); + assert_eq!(elems, [A, C]); e1.insert(C); let elems: Vec<_> = e1.iter().collect(); - assert_eq!(elems, [A,C]); + assert_eq!(elems, [A, C]); e1.insert(B); let elems: Vec<_> = e1.iter().collect(); - assert_eq!(elems, [A,B,C]); + assert_eq!(elems, [A, B, C]); } /////////////////////////////////////////////////////////////////////////// @@ -183,7 +185,7 @@ fn test_operators() { let e_union = e1 | e2; let elems: Vec<_> = e_union.iter().collect(); - assert_eq!(elems, [A,B,C]); + assert_eq!(elems, [A, B, C]); let e_intersection = e1 & e2; let elems: Vec<_> = e_intersection.iter().collect(); @@ -201,17 +203,17 @@ fn test_operators() { // Bitwise XOR of two sets, aka symmetric difference let e_symmetric_diff = e1 ^ e2; let elems: Vec<_> = e_symmetric_diff.iter().collect(); - assert_eq!(elems, [A,B]); + assert_eq!(elems, [A, B]); // Another way to express symmetric difference let e_symmetric_diff = (e1 - e2) | (e2 - e1); let elems: Vec<_> = e_symmetric_diff.iter().collect(); - assert_eq!(elems, [A,B]); + assert_eq!(elems, [A, B]); // Yet another way to express symmetric difference let e_symmetric_diff = (e1 | e2) - (e1 & e2); let elems: Vec<_> = e_symmetric_diff.iter().collect(); - assert_eq!(elems, [A,B]); + assert_eq!(elems, [A, B]); } #[test] diff --git a/src/libcollectionstest/lib.rs b/src/libcollectionstest/lib.rs index e57620dfb04eb..58ce78eab9a17 100644 --- a/src/libcollectionstest/lib.rs +++ b/src/libcollectionstest/lib.rs @@ -8,48 +8,43 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(ascii)] +#![deny(warnings)] + #![feature(binary_heap_extras)] #![feature(box_syntax)] #![feature(btree_range)] #![feature(collections)] #![feature(collections_bound)] #![feature(const_fn)] -#![feature(fn_traits)] -#![feature(deque_extras)] -#![feature(drain)] +#![feature(dedup_by)] #![feature(enumset)] -#![feature(into_cow)] -#![feature(iter_arith)] +#![feature(exact_size_is_empty)] #![feature(pattern)] #![feature(rand)] -#![feature(range_inclusive)] -#![feature(rustc_private)] -#![feature(set_recovery)] -#![feature(slice_bytes)] -#![feature(slice_splits)] +#![feature(repeat_str)] #![feature(step_by)] -#![feature(str_char)] #![feature(str_escape)] -#![feature(str_match_indices)] -#![feature(str_utf16)] +#![feature(str_replacen)] +#![feature(string_split_off)] #![feature(test)] #![feature(unboxed_closures)] #![feature(unicode)] -#![feature(vec_push_all)] - -#[macro_use] extern crate log; +#![feature(vec_into_iter_as_slice)] extern crate collections; extern crate test; extern crate rustc_unicode; -use std::hash::{Hash, Hasher, SipHasher}; +use std::hash::{Hash, Hasher}; +use std::collections::hash_map::DefaultHasher; -#[cfg(test)] #[macro_use] mod bench; +#[cfg(test)] +#[macro_use] +mod bench; mod binary_heap; mod btree; +mod cow_str; mod enum_set; mod fmt; mod linked_list; @@ -60,7 +55,7 @@ mod vec_deque; mod vec; fn hash(t: &T) -> u64 { - let mut s = SipHasher::new(); + let mut s = DefaultHasher::new(); t.hash(&mut s); s.finish() } diff --git a/src/libcollectionstest/linked_list.rs b/src/libcollectionstest/linked_list.rs index 7dac967d8030c..956d75a95a58e 100644 --- a/src/libcollectionstest/linked_list.rs +++ b/src/libcollectionstest/linked_list.rs @@ -54,7 +54,7 @@ fn test_basic() { #[cfg(test)] fn generate_test() -> LinkedList { - list_from(&[0,1,2,3,4,5,6]) + list_from(&[0, 1, 2, 3, 4, 5, 6]) } #[cfg(test)] @@ -78,7 +78,7 @@ fn test_split_off() { // not singleton, forwards { - let u = vec![1,2,3,4,5]; + let u = vec![1, 2, 3, 4, 5]; let mut m = list_from(&u); let mut n = m.split_off(2); assert_eq!(m.len(), 2); @@ -92,7 +92,7 @@ fn test_split_off() { } // not singleton, backwards { - let u = vec![1,2,3,4,5]; + let u = vec![1, 2, 3, 4, 5]; let mut m = list_from(&u); let mut n = m.split_off(4); assert_eq!(m.len(), 4); @@ -246,33 +246,33 @@ fn test_eq() { m.push_back(1); assert!(n == m); - let n = list_from(&[2,3,4]); - let m = list_from(&[1,2,3]); + let n = list_from(&[2, 3, 4]); + let m = list_from(&[1, 2, 3]); assert!(n != m); } #[test] fn test_hash() { - let mut x = LinkedList::new(); - let mut y = LinkedList::new(); + let mut x = LinkedList::new(); + let mut y = LinkedList::new(); - assert!(::hash(&x) == ::hash(&y)); + assert!(::hash(&x) == ::hash(&y)); - x.push_back(1); - x.push_back(2); - x.push_back(3); + x.push_back(1); + x.push_back(2); + x.push_back(3); - y.push_front(3); - y.push_front(2); - y.push_front(1); + y.push_front(3); + y.push_front(2); + y.push_front(1); - assert!(::hash(&x) == ::hash(&y)); + assert!(::hash(&x) == ::hash(&y)); } #[test] fn test_ord() { let n = list_from(&[]); - let m = list_from(&[1,2,3]); + let m = list_from(&[1, 2, 3]); assert!(n < m); assert!(m > n); assert!(n <= n); @@ -281,7 +281,7 @@ fn test_ord() { #[test] fn test_ord_nan() { - let nan = 0.0f64/0.0; + let nan = 0.0f64 / 0.0; let n = list_from(&[nan]); let m = list_from(&[nan]); assert!(!(n < m)); @@ -296,15 +296,15 @@ fn test_ord_nan() { assert!(!(n <= one)); assert!(!(n >= one)); - let u = list_from(&[1.0f64,2.0,nan]); - let v = list_from(&[1.0f64,2.0,3.0]); + let u = list_from(&[1.0f64, 2.0, nan]); + let v = list_from(&[1.0f64, 2.0, 3.0]); assert!(!(u < v)); assert!(!(u > v)); assert!(!(u <= v)); assert!(!(u >= v)); - let s = list_from(&[1.0f64,2.0,4.0,2.0]); - let t = list_from(&[1.0f64,2.0,3.0,2.0]); + let s = list_from(&[1.0f64, 2.0, 4.0, 2.0]); + let t = list_from(&[1.0f64, 2.0, 3.0, 2.0]); assert!(!(s < t)); assert!(s > one); assert!(!(s <= one)); @@ -317,7 +317,8 @@ fn test_show() { assert_eq!(format!("{:?}", list), "[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]"); let list: LinkedList<_> = vec!["just", "one", "test", "more"].iter().cloned().collect(); - assert_eq!(format!("{:?}", list), "[\"just\", \"one\", \"test\", \"more\"]"); + assert_eq!(format!("{:?}", list), + "[\"just\", \"one\", \"test\", \"more\"]"); } #[test] @@ -339,6 +340,22 @@ fn test_extend_ref() { assert_eq!(a, list_from(&[1, 2, 3, 4, 5, 6])); } +#[test] +fn test_extend() { + let mut a = LinkedList::new(); + a.push_back(1); + a.extend(vec![2, 3, 4]); // uses iterator + + assert_eq!(a.len(), 4); + assert!(a.iter().eq(&[1, 2, 3, 4])); + + let b: LinkedList<_> = vec![5, 6, 7].into_iter().collect(); + a.extend(b); // specializes to `append` + + assert_eq!(a.len(), 7); + assert!(a.iter().eq(&[1, 2, 3, 4, 5, 6, 7])); +} + #[bench] fn bench_collect_into(b: &mut test::Bencher) { let v = &[0; 64]; @@ -413,3 +430,16 @@ fn bench_iter_mut_rev(b: &mut test::Bencher) { assert!(m.iter_mut().rev().count() == 128); }) } + +#[test] +fn test_contains() { + let mut l = LinkedList::new(); + l.extend(&[2, 3, 4]); + + assert!(l.contains(&3)); + assert!(!l.contains(&1)); + + l.clear(); + + assert!(!l.contains(&3)); +} diff --git a/src/libcollectionstest/slice.rs b/src/libcollectionstest/slice.rs index 80dcd48fbfaa9..0e63e8d4a1ec8 100644 --- a/src/libcollectionstest/slice.rs +++ b/src/libcollectionstest/slice.rs @@ -9,14 +9,17 @@ // except according to those terms. use std::cmp::Ordering::{Equal, Greater, Less}; -use std::default::Default; use std::mem; use std::__rand::{Rng, thread_rng}; use std::rc::Rc; -fn square(n: usize) -> usize { n * n } +fn square(n: usize) -> usize { + n * n +} -fn is_odd(n: &usize) -> bool { *n % 2 == 1 } +fn is_odd(n: &usize) -> bool { + *n % 2 == 1 +} #[test] fn test_from_fn() { @@ -77,9 +80,9 @@ fn test_is_empty() { #[test] fn test_len_divzero() { type Z = [i8; 0]; - let v0 : &[Z] = &[]; - let v1 : &[Z] = &[[]]; - let v2 : &[Z] = &[[], []]; + let v0: &[Z] = &[]; + let v1: &[Z] = &[[]]; + let v2: &[Z] = &[[], []]; assert_eq!(mem::size_of::(), 0); assert_eq!(v0.len(), 0); assert_eq!(v1.len(), 1); @@ -268,9 +271,9 @@ fn test_swap_remove_fail() { fn test_swap_remove_noncopyable() { // Tests that we don't accidentally run destructors twice. let mut v: Vec> = Vec::new(); - v.push(box 0u8); - v.push(box 0u8); - v.push(box 0u8); + v.push(box 0); + v.push(box 0); + v.push(box 0); let mut _e = v.swap_remove(0); assert_eq!(v.len(), 2); _e = v.swap_remove(1); @@ -296,7 +299,7 @@ fn test_push() { #[test] fn test_truncate() { - let mut v: Vec> = vec![box 6,box 5,box 4]; + let mut v: Vec> = vec![box 6, box 5, box 4]; v.truncate(1); let v = v; assert_eq!(v.len(), 1); @@ -306,57 +309,12 @@ fn test_truncate() { #[test] fn test_clear() { - let mut v: Vec> = vec![box 6,box 5,box 4]; + let mut v: Vec> = vec![box 6, box 5, box 4]; v.clear(); assert_eq!(v.len(), 0); // If the unsafe block didn't drop things properly, we blow up here. } -#[test] -fn test_dedup() { - fn case(a: Vec, b: Vec) { - let mut v = a; - v.dedup(); - assert_eq!(v, b); - } - case(vec![], vec![]); - case(vec![1], vec![1]); - case(vec![1,1], vec![1]); - case(vec![1,2,3], vec![1,2,3]); - case(vec![1,1,2,3], vec![1,2,3]); - case(vec![1,2,2,3], vec![1,2,3]); - case(vec![1,2,3,3], vec![1,2,3]); - case(vec![1,1,2,2,2,3,3], vec![1,2,3]); -} - -#[test] -fn test_dedup_unique() { - let mut v0: Vec> = vec![box 1, box 1, box 2, box 3]; - v0.dedup(); - let mut v1: Vec> = vec![box 1, box 2, box 2, box 3]; - v1.dedup(); - let mut v2: Vec> = vec![box 1, box 2, box 3, box 3]; - v2.dedup(); - /* - * If the boxed pointers were leaked or otherwise misused, valgrind - * and/or rt should raise errors. - */ -} - -#[test] -fn test_dedup_shared() { - let mut v0: Vec> = vec![box 1, box 1, box 2, box 3]; - v0.dedup(); - let mut v1: Vec> = vec![box 1, box 2, box 2, box 3]; - v1.dedup(); - let mut v2: Vec> = vec![box 1, box 2, box 3, box 3]; - v2.dedup(); - /* - * If the pointers were leaked or otherwise misused, valgrind and/or - * rt should raise errors. - */ -} - #[test] fn test_retain() { let mut v = vec![1, 2, 3, 4, 5]; @@ -366,31 +324,31 @@ fn test_retain() { #[test] fn test_binary_search() { - assert_eq!([1,2,3,4,5].binary_search(&5).ok(), Some(4)); - assert_eq!([1,2,3,4,5].binary_search(&4).ok(), Some(3)); - assert_eq!([1,2,3,4,5].binary_search(&3).ok(), Some(2)); - assert_eq!([1,2,3,4,5].binary_search(&2).ok(), Some(1)); - assert_eq!([1,2,3,4,5].binary_search(&1).ok(), Some(0)); - - assert_eq!([2,4,6,8,10].binary_search(&1).ok(), None); - assert_eq!([2,4,6,8,10].binary_search(&5).ok(), None); - assert_eq!([2,4,6,8,10].binary_search(&4).ok(), Some(1)); - assert_eq!([2,4,6,8,10].binary_search(&10).ok(), Some(4)); - - assert_eq!([2,4,6,8].binary_search(&1).ok(), None); - assert_eq!([2,4,6,8].binary_search(&5).ok(), None); - assert_eq!([2,4,6,8].binary_search(&4).ok(), Some(1)); - assert_eq!([2,4,6,8].binary_search(&8).ok(), Some(3)); - - assert_eq!([2,4,6].binary_search(&1).ok(), None); - assert_eq!([2,4,6].binary_search(&5).ok(), None); - assert_eq!([2,4,6].binary_search(&4).ok(), Some(1)); - assert_eq!([2,4,6].binary_search(&6).ok(), Some(2)); - - assert_eq!([2,4].binary_search(&1).ok(), None); - assert_eq!([2,4].binary_search(&5).ok(), None); - assert_eq!([2,4].binary_search(&2).ok(), Some(0)); - assert_eq!([2,4].binary_search(&4).ok(), Some(1)); + assert_eq!([1, 2, 3, 4, 5].binary_search(&5).ok(), Some(4)); + assert_eq!([1, 2, 3, 4, 5].binary_search(&4).ok(), Some(3)); + assert_eq!([1, 2, 3, 4, 5].binary_search(&3).ok(), Some(2)); + assert_eq!([1, 2, 3, 4, 5].binary_search(&2).ok(), Some(1)); + assert_eq!([1, 2, 3, 4, 5].binary_search(&1).ok(), Some(0)); + + assert_eq!([2, 4, 6, 8, 10].binary_search(&1).ok(), None); + assert_eq!([2, 4, 6, 8, 10].binary_search(&5).ok(), None); + assert_eq!([2, 4, 6, 8, 10].binary_search(&4).ok(), Some(1)); + assert_eq!([2, 4, 6, 8, 10].binary_search(&10).ok(), Some(4)); + + assert_eq!([2, 4, 6, 8].binary_search(&1).ok(), None); + assert_eq!([2, 4, 6, 8].binary_search(&5).ok(), None); + assert_eq!([2, 4, 6, 8].binary_search(&4).ok(), Some(1)); + assert_eq!([2, 4, 6, 8].binary_search(&8).ok(), Some(3)); + + assert_eq!([2, 4, 6].binary_search(&1).ok(), None); + assert_eq!([2, 4, 6].binary_search(&5).ok(), None); + assert_eq!([2, 4, 6].binary_search(&4).ok(), Some(1)); + assert_eq!([2, 4, 6].binary_search(&6).ok(), Some(2)); + + assert_eq!([2, 4].binary_search(&1).ok(), None); + assert_eq!([2, 4].binary_search(&5).ok(), None); + assert_eq!([2, 4].binary_search(&2).ok(), Some(0)); + assert_eq!([2, 4].binary_search(&4).ok(), Some(1)); assert_eq!([2].binary_search(&1).ok(), None); assert_eq!([2].binary_search(&5).ok(), None); @@ -399,14 +357,14 @@ fn test_binary_search() { assert_eq!([].binary_search(&1).ok(), None); assert_eq!([].binary_search(&5).ok(), None); - assert!([1,1,1,1,1].binary_search(&1).ok() != None); - assert!([1,1,1,1,2].binary_search(&1).ok() != None); - assert!([1,1,1,2,2].binary_search(&1).ok() != None); - assert!([1,1,2,2,2].binary_search(&1).ok() != None); - assert_eq!([1,2,2,2,2].binary_search(&1).ok(), Some(0)); + assert!([1, 1, 1, 1, 1].binary_search(&1).ok() != None); + assert!([1, 1, 1, 1, 2].binary_search(&1).ok() != None); + assert!([1, 1, 1, 2, 2].binary_search(&1).ok() != None); + assert!([1, 1, 2, 2, 2].binary_search(&1).ok() != None); + assert_eq!([1, 2, 2, 2, 2].binary_search(&1).ok(), Some(0)); - assert_eq!([1,2,3,4,5].binary_search(&6).ok(), None); - assert_eq!([1,2,3,4,5].binary_search(&0).ok(), None); + assert_eq!([1, 2, 3, 4, 5].binary_search(&6).ok(), None); + assert_eq!([1, 2, 3, 4, 5].binary_search(&0).ok(), None); } #[test] @@ -461,15 +419,17 @@ fn test_sort_stability() { // the second item represents which occurrence of that // number this element is, i.e. the second elements // will occur in sorted order. - let mut v: Vec<_> = (0..len).map(|_| { + let mut v: Vec<_> = (0..len) + .map(|_| { let n = thread_rng().gen::() % 10; counts[n] += 1; (n, counts[n]) - }).collect(); + }) + .collect(); // only sort on the first element, so an unstable sort // may mix up the counts. - v.sort_by(|&(a,_), &(b,_)| a.cmp(&b)); + v.sort_by(|&(a, _), &(b, _)| a.cmp(&b)); // this comparison includes the count (the second item // of the tuple), so elements with equal first items @@ -575,18 +535,48 @@ fn test_slice_2() { assert_eq!(v[1], 3); } +macro_rules! assert_order { + (Greater, $a:expr, $b:expr) => { + assert_eq!($a.cmp($b), Greater); + assert!($a > $b); + }; + (Less, $a:expr, $b:expr) => { + assert_eq!($a.cmp($b), Less); + assert!($a < $b); + }; + (Equal, $a:expr, $b:expr) => { + assert_eq!($a.cmp($b), Equal); + assert_eq!($a, $b); + } +} + #[test] -fn test_total_ord() { +fn test_total_ord_u8() { + let c = &[1u8, 2, 3]; + assert_order!(Greater, &[1u8, 2, 3, 4][..], &c[..]); + let c = &[1u8, 2, 3, 4]; + assert_order!(Less, &[1u8, 2, 3][..], &c[..]); + let c = &[1u8, 2, 3, 6]; + assert_order!(Equal, &[1u8, 2, 3, 6][..], &c[..]); + let c = &[1u8, 2, 3, 4, 5, 6]; + assert_order!(Less, &[1u8, 2, 3, 4, 5, 5, 5, 5][..], &c[..]); + let c = &[1u8, 2, 3, 4]; + assert_order!(Greater, &[2u8, 2][..], &c[..]); +} + + +#[test] +fn test_total_ord_i32() { let c = &[1, 2, 3]; - [1, 2, 3, 4][..].cmp(c) == Greater; + assert_order!(Greater, &[1, 2, 3, 4][..], &c[..]); let c = &[1, 2, 3, 4]; - [1, 2, 3][..].cmp(c) == Less; + assert_order!(Less, &[1, 2, 3][..], &c[..]); let c = &[1, 2, 3, 6]; - [1, 2, 3, 4][..].cmp(c) == Equal; + assert_order!(Equal, &[1, 2, 3, 6][..], &c[..]); let c = &[1, 2, 3, 4, 5, 6]; - [1, 2, 3, 4, 5, 5, 5, 5][..].cmp(c) == Less; + assert_order!(Less, &[1, 2, 3, 4, 5, 5, 5, 5][..], &c[..]); let c = &[1, 2, 3, 4]; - [2, 2][..].cmp(c) == Greater; + assert_order!(Greater, &[2, 2][..], &c[..]); } #[test] @@ -614,6 +604,24 @@ fn test_iter_size_hints() { assert_eq!(xs.iter_mut().size_hint(), (5, Some(5))); } +#[test] +fn test_iter_as_slice() { + let xs = [1, 2, 5, 10, 11]; + let mut iter = xs.iter(); + assert_eq!(iter.as_slice(), &[1, 2, 5, 10, 11]); + iter.next(); + assert_eq!(iter.as_slice(), &[2, 5, 10, 11]); +} + +#[test] +fn test_iter_as_ref() { + let xs = [1, 2, 5, 10, 11]; + let mut iter = xs.iter(); + assert_eq!(iter.as_ref(), &[1, 2, 5, 10, 11]); + iter.next(); + assert_eq!(iter.as_ref(), &[2, 5, 10, 11]); +} + #[test] fn test_iter_clone() { let xs = [1, 2, 5]; @@ -625,6 +633,16 @@ fn test_iter_clone() { assert_eq!(it.next(), jt.next()); } +#[test] +fn test_iter_is_empty() { + let xs = [1, 2, 5, 10, 11]; + for i in 0..xs.len() { + for j in i..xs.len() { + assert_eq!(xs[i..j].iter().is_empty(), xs[i..j].is_empty()); + } + } +} + #[test] fn test_mut_iterator() { let mut xs = [1, 2, 3, 4, 5]; @@ -650,7 +668,7 @@ fn test_rev_iterator() { #[test] fn test_mut_rev_iterator() { let mut xs = [1, 2, 3, 4, 5]; - for (i,x) in xs.iter_mut().rev().enumerate() { + for (i, x) in xs.iter_mut().rev().enumerate() { *x += i; } assert!(xs == [5, 5, 5, 5, 5]) @@ -658,35 +676,32 @@ fn test_mut_rev_iterator() { #[test] fn test_move_iterator() { - let xs = vec![1,2,3,4,5]; - assert_eq!(xs.into_iter().fold(0, |a: usize, b: usize| 10*a + b), 12345); + let xs = vec![1, 2, 3, 4, 5]; + assert_eq!(xs.into_iter().fold(0, |a: usize, b: usize| 10 * a + b), + 12345); } #[test] fn test_move_rev_iterator() { - let xs = vec![1,2,3,4,5]; - assert_eq!(xs.into_iter().rev().fold(0, |a: usize, b: usize| 10*a + b), 54321); + let xs = vec![1, 2, 3, 4, 5]; + assert_eq!(xs.into_iter().rev().fold(0, |a: usize, b: usize| 10 * a + b), + 54321); } #[test] fn test_splitator() { - let xs = &[1,2,3,4,5]; + let xs = &[1, 2, 3, 4, 5]; let splits: &[&[_]] = &[&[1], &[3], &[5]]; - assert_eq!(xs.split(|x| *x % 2 == 0).collect::>(), - splits); - let splits: &[&[_]] = &[&[], &[2,3,4,5]]; - assert_eq!(xs.split(|x| *x == 1).collect::>(), - splits); - let splits: &[&[_]] = &[&[1,2,3,4], &[]]; - assert_eq!(xs.split(|x| *x == 5).collect::>(), - splits); - let splits: &[&[_]] = &[&[1,2,3,4,5]]; - assert_eq!(xs.split(|x| *x == 10).collect::>(), - splits); + assert_eq!(xs.split(|x| *x % 2 == 0).collect::>(), splits); + let splits: &[&[_]] = &[&[], &[2, 3, 4, 5]]; + assert_eq!(xs.split(|x| *x == 1).collect::>(), splits); + let splits: &[&[_]] = &[&[1, 2, 3, 4], &[]]; + assert_eq!(xs.split(|x| *x == 5).collect::>(), splits); + let splits: &[&[_]] = &[&[1, 2, 3, 4, 5]]; + assert_eq!(xs.split(|x| *x == 10).collect::>(), splits); let splits: &[&[_]] = &[&[], &[], &[], &[], &[], &[]]; - assert_eq!(xs.split(|_| true).collect::>(), - splits); + assert_eq!(xs.split(|_| true).collect::>(), splits); let xs: &[i32] = &[]; let splits: &[&[i32]] = &[&[]]; @@ -695,17 +710,14 @@ fn test_splitator() { #[test] fn test_splitnator() { - let xs = &[1,2,3,4,5]; + let xs = &[1, 2, 3, 4, 5]; - let splits: &[&[_]] = &[&[1,2,3,4,5]]; - assert_eq!(xs.splitn(1, |x| *x % 2 == 0).collect::>(), - splits); - let splits: &[&[_]] = &[&[1], &[3,4,5]]; - assert_eq!(xs.splitn(2, |x| *x % 2 == 0).collect::>(), - splits); - let splits: &[&[_]] = &[&[], &[], &[], &[4,5]]; - assert_eq!(xs.splitn(4, |_| true).collect::>(), - splits); + let splits: &[&[_]] = &[&[1, 2, 3, 4, 5]]; + assert_eq!(xs.splitn(1, |x| *x % 2 == 0).collect::>(), splits); + let splits: &[&[_]] = &[&[1], &[3, 4, 5]]; + assert_eq!(xs.splitn(2, |x| *x % 2 == 0).collect::>(), splits); + let splits: &[&[_]] = &[&[], &[], &[], &[4, 5]]; + assert_eq!(xs.splitn(4, |_| true).collect::>(), splits); let xs: &[i32] = &[]; let splits: &[&[i32]] = &[&[]]; @@ -714,40 +726,34 @@ fn test_splitnator() { #[test] fn test_splitnator_mut() { - let xs = &mut [1,2,3,4,5]; + let xs = &mut [1, 2, 3, 4, 5]; - let splits: &[&mut[_]] = &[&mut [1,2,3,4,5]]; + let splits: &[&mut [_]] = &[&mut [1, 2, 3, 4, 5]]; assert_eq!(xs.splitn_mut(1, |x| *x % 2 == 0).collect::>(), splits); - let splits: &[&mut[_]] = &[&mut [1], &mut [3,4,5]]; + let splits: &[&mut [_]] = &[&mut [1], &mut [3, 4, 5]]; assert_eq!(xs.splitn_mut(2, |x| *x % 2 == 0).collect::>(), splits); - let splits: &[&mut[_]] = &[&mut [], &mut [], &mut [], &mut [4,5]]; - assert_eq!(xs.splitn_mut(4, |_| true).collect::>(), - splits); + let splits: &[&mut [_]] = &[&mut [], &mut [], &mut [], &mut [4, 5]]; + assert_eq!(xs.splitn_mut(4, |_| true).collect::>(), splits); let xs: &mut [i32] = &mut []; - let splits: &[&mut[i32]] = &[&mut []]; - assert_eq!(xs.splitn_mut(2, |x| *x == 5).collect::>(), - splits); + let splits: &[&mut [i32]] = &[&mut []]; + assert_eq!(xs.splitn_mut(2, |x| *x == 5).collect::>(), splits); } #[test] fn test_rsplitator() { - let xs = &[1,2,3,4,5]; + let xs = &[1, 2, 3, 4, 5]; let splits: &[&[_]] = &[&[5], &[3], &[1]]; - assert_eq!(xs.split(|x| *x % 2 == 0).rev().collect::>(), - splits); - let splits: &[&[_]] = &[&[2,3,4,5], &[]]; - assert_eq!(xs.split(|x| *x == 1).rev().collect::>(), - splits); - let splits: &[&[_]] = &[&[], &[1,2,3,4]]; - assert_eq!(xs.split(|x| *x == 5).rev().collect::>(), - splits); - let splits: &[&[_]] = &[&[1,2,3,4,5]]; - assert_eq!(xs.split(|x| *x == 10).rev().collect::>(), - splits); + assert_eq!(xs.split(|x| *x % 2 == 0).rev().collect::>(), splits); + let splits: &[&[_]] = &[&[2, 3, 4, 5], &[]]; + assert_eq!(xs.split(|x| *x == 1).rev().collect::>(), splits); + let splits: &[&[_]] = &[&[], &[1, 2, 3, 4]]; + assert_eq!(xs.split(|x| *x == 5).rev().collect::>(), splits); + let splits: &[&[_]] = &[&[1, 2, 3, 4, 5]]; + assert_eq!(xs.split(|x| *x == 10).rev().collect::>(), splits); let xs: &[i32] = &[]; let splits: &[&[i32]] = &[&[]]; @@ -756,19 +762,16 @@ fn test_rsplitator() { #[test] fn test_rsplitnator() { - let xs = &[1,2,3,4,5]; + let xs = &[1, 2, 3, 4, 5]; - let splits: &[&[_]] = &[&[1,2,3,4,5]]; - assert_eq!(xs.rsplitn(1, |x| *x % 2 == 0).collect::>(), - splits); - let splits: &[&[_]] = &[&[5], &[1,2,3]]; - assert_eq!(xs.rsplitn(2, |x| *x % 2 == 0).collect::>(), - splits); - let splits: &[&[_]] = &[&[], &[], &[], &[1,2]]; - assert_eq!(xs.rsplitn(4, |_| true).collect::>(), - splits); + let splits: &[&[_]] = &[&[1, 2, 3, 4, 5]]; + assert_eq!(xs.rsplitn(1, |x| *x % 2 == 0).collect::>(), splits); + let splits: &[&[_]] = &[&[5], &[1, 2, 3]]; + assert_eq!(xs.rsplitn(2, |x| *x % 2 == 0).collect::>(), splits); + let splits: &[&[_]] = &[&[], &[], &[], &[1, 2]]; + assert_eq!(xs.rsplitn(4, |_| true).collect::>(), splits); - let xs: &[i32] = &[]; + let xs: &[i32] = &[]; let splits: &[&[i32]] = &[&[]]; assert_eq!(xs.rsplitn(2, |x| *x == 5).collect::>(), splits); assert!(xs.rsplitn(0, |x| *x % 2 == 0).next().is_none()); @@ -776,55 +779,55 @@ fn test_rsplitnator() { #[test] fn test_windowsator() { - let v = &[1,2,3,4]; + let v = &[1, 2, 3, 4]; - let wins: &[&[_]] = &[&[1,2], &[2,3], &[3,4]]; + let wins: &[&[_]] = &[&[1, 2], &[2, 3], &[3, 4]]; assert_eq!(v.windows(2).collect::>(), wins); - let wins: &[&[_]] = &[&[1,2,3], &[2,3,4]]; + let wins: &[&[_]] = &[&[1, 2, 3], &[2, 3, 4]]; assert_eq!(v.windows(3).collect::>(), wins); assert!(v.windows(6).next().is_none()); - let wins: &[&[_]] = &[&[3,4], &[2,3], &[1,2]]; + let wins: &[&[_]] = &[&[3, 4], &[2, 3], &[1, 2]]; assert_eq!(v.windows(2).rev().collect::>(), wins); } #[test] #[should_panic] fn test_windowsator_0() { - let v = &[1,2,3,4]; + let v = &[1, 2, 3, 4]; let _it = v.windows(0); } #[test] fn test_chunksator() { - let v = &[1,2,3,4,5]; + let v = &[1, 2, 3, 4, 5]; assert_eq!(v.chunks(2).len(), 3); - let chunks: &[&[_]] = &[&[1,2], &[3,4], &[5]]; + let chunks: &[&[_]] = &[&[1, 2], &[3, 4], &[5]]; assert_eq!(v.chunks(2).collect::>(), chunks); - let chunks: &[&[_]] = &[&[1,2,3], &[4,5]]; + let chunks: &[&[_]] = &[&[1, 2, 3], &[4, 5]]; assert_eq!(v.chunks(3).collect::>(), chunks); - let chunks: &[&[_]] = &[&[1,2,3,4,5]]; + let chunks: &[&[_]] = &[&[1, 2, 3, 4, 5]]; assert_eq!(v.chunks(6).collect::>(), chunks); - let chunks: &[&[_]] = &[&[5], &[3,4], &[1,2]]; + let chunks: &[&[_]] = &[&[5], &[3, 4], &[1, 2]]; assert_eq!(v.chunks(2).rev().collect::>(), chunks); } #[test] #[should_panic] fn test_chunksator_0() { - let v = &[1,2,3,4]; + let v = &[1, 2, 3, 4]; let _it = v.chunks(0); } #[test] fn test_reverse_part() { - let mut values = [1,2,3,4,5]; + let mut values = [1, 2, 3, 4, 5]; values[1..4].reverse(); - assert!(values == [1,4,3,2,5]); + assert!(values == [1, 4, 3, 2, 5]); } #[test] @@ -840,16 +843,15 @@ fn test_show() { test_show_vec!(empty, "[]"); test_show_vec!(vec![1], "[1]"); test_show_vec!(vec![1, 2, 3], "[1, 2, 3]"); - test_show_vec!(vec![vec![], vec![1], vec![1, 1]], - "[[], [1], [1, 1]]"); + test_show_vec!(vec![vec![], vec![1], vec![1, 1]], "[[], [1], [1, 1]]"); - let empty_mut: &mut [i32] = &mut[]; + let empty_mut: &mut [i32] = &mut []; test_show_vec!(empty_mut, "[]"); - let v = &mut[1]; + let v = &mut [1]; test_show_vec!(v, "[1]"); - let v = &mut[1, 2, 3]; + let v = &mut [1, 2, 3]; test_show_vec!(v, "[1, 2, 3]"); - let v: &mut[&mut[_]] = &mut[&mut[], &mut[1], &mut[1, 1]]; + let v: &mut [&mut [_]] = &mut [&mut [], &mut [1], &mut [1, 1]]; test_show_vec!(v, "[[], [1], [1, 1]]"); } @@ -866,17 +868,6 @@ fn test_vec_default() { t!(Vec); } -#[test] -fn test_bytes_set_memory() { - use std::slice::bytes::MutableByteVector; - - let mut values = [1,2,3,4,5]; - values[0..5].set_memory(0xAB); - assert!(values == [0xAB, 0xAB, 0xAB, 0xAB, 0xAB]); - values[2..4].set_memory(0xFF); - assert!(values == [0xAB, 0xAB, 0xFF, 0xFF, 0xAB]); -} - #[test] #[should_panic] fn test_overflow_does_not_cause_segfault() { @@ -896,7 +887,7 @@ fn test_overflow_does_not_cause_segfault_managed() { #[test] fn test_mut_split_at() { - let mut values = [1u8,2,3,4,5]; + let mut values = [1, 2, 3, 4, 5]; { let (left, right) = values.split_at_mut(2); { @@ -1003,32 +994,32 @@ fn test_ends_with() { #[test] fn test_mut_splitator() { - let mut xs = [0,1,0,2,3,0,0,4,5,0]; + let mut xs = [0, 1, 0, 2, 3, 0, 0, 4, 5, 0]; assert_eq!(xs.split_mut(|x| *x == 0).count(), 6); for slice in xs.split_mut(|x| *x == 0) { slice.reverse(); } - assert!(xs == [0,1,0,3,2,0,0,5,4,0]); + assert!(xs == [0, 1, 0, 3, 2, 0, 0, 5, 4, 0]); - let mut xs = [0,1,0,2,3,0,0,4,5,0,6,7]; + let mut xs = [0, 1, 0, 2, 3, 0, 0, 4, 5, 0, 6, 7]; for slice in xs.split_mut(|x| *x == 0).take(5) { slice.reverse(); } - assert!(xs == [0,1,0,3,2,0,0,5,4,0,6,7]); + assert!(xs == [0, 1, 0, 3, 2, 0, 0, 5, 4, 0, 6, 7]); } #[test] fn test_mut_splitator_rev() { - let mut xs = [1,2,0,3,4,0,0,5,6,0]; + let mut xs = [1, 2, 0, 3, 4, 0, 0, 5, 6, 0]; for slice in xs.split_mut(|x| *x == 0).rev().take(4) { slice.reverse(); } - assert!(xs == [1,2,0,4,3,0,0,6,5,0]); + assert!(xs == [1, 2, 0, 4, 3, 0, 0, 6, 5, 0]); } #[test] fn test_get_mut() { - let mut v = [0,1,2]; + let mut v = [0, 1, 2]; assert_eq!(v.get_mut(3), None); v.get_mut(1).map(|e| *e = 7); assert_eq!(v[1], 7); @@ -1094,6 +1085,7 @@ fn test_box_slice_clone() { } #[test] +#[cfg_attr(target_os = "emscripten", ignore)] fn test_box_slice_clone_panics() { use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -1101,7 +1093,7 @@ fn test_box_slice_clone_panics() { struct Canary { count: Arc, - panics: bool + panics: bool, } impl Drop for Canary { @@ -1112,32 +1104,66 @@ fn test_box_slice_clone_panics() { impl Clone for Canary { fn clone(&self) -> Self { - if self.panics { panic!() } + if self.panics { + panic!() + } Canary { count: self.count.clone(), - panics: self.panics + panics: self.panics, } } } let drop_count = Arc::new(AtomicUsize::new(0)); - let canary = Canary { count: drop_count.clone(), panics: false }; - let panic = Canary { count: drop_count.clone(), panics: true }; + let canary = Canary { + count: drop_count.clone(), + panics: false, + }; + let panic = Canary { + count: drop_count.clone(), + panics: true, + }; spawn(move || { - // When xs is dropped, +5. - let xs = vec![canary.clone(), canary.clone(), canary.clone(), - panic, canary].into_boxed_slice(); + // When xs is dropped, +5. + let xs = vec![canary.clone(), canary.clone(), canary.clone(), panic, canary] + .into_boxed_slice(); - // When panic is cloned, +3. - xs.clone(); - }).join().unwrap_err(); + // When panic is cloned, +3. + xs.clone(); + }) + .join() + .unwrap_err(); // Total = 8 assert_eq!(drop_count.load(Ordering::SeqCst), 8); } +#[test] +fn test_copy_from_slice() { + let src = [0, 1, 2, 3, 4, 5]; + let mut dst = [0; 6]; + dst.copy_from_slice(&src); + assert_eq!(src, dst) +} + +#[test] +#[should_panic(expected = "destination and source slices have different lengths")] +fn test_copy_from_slice_dst_longer() { + let src = [0, 1, 2, 3]; + let mut dst = [0; 5]; + dst.copy_from_slice(&src); +} + +#[test] +#[should_panic(expected = "destination and source slices have different lengths")] +fn test_copy_from_slice_dst_shorter() { + let src = [0, 1, 2, 3]; + let mut dst = [0; 3]; + dst.copy_from_slice(&src); +} + mod bench { use std::{mem, ptr}; use std::__rand::{Rng, thread_rng}; @@ -1156,7 +1182,9 @@ mod bench { sum += *x; } // sum == 11806, to stop dead code elimination. - if sum == 0 {panic!()} + if sum == 0 { + panic!() + } }) } @@ -1175,8 +1203,7 @@ mod bench { #[bench] fn concat(b: &mut Bencher) { - let xss: Vec> = - (0..100).map(|i| (0..i).collect()).collect(); + let xss: Vec> = (0..100).map(|i| (0..i).collect()).collect(); b.iter(|| { xss.concat(); }); @@ -1184,11 +1211,8 @@ mod bench { #[bench] fn join(b: &mut Bencher) { - let xss: Vec> = - (0..100).map(|i| (0..i).collect()).collect(); - b.iter(|| { - xss.join(&0) - }); + let xss: Vec> = (0..100).map(|i| (0..i).collect()).collect(); + b.iter(|| xss.join(&0)); } #[bench] @@ -1203,17 +1227,13 @@ mod bench { #[bench] fn starts_with_same_vector(b: &mut Bencher) { let vec: Vec<_> = (0..100).collect(); - b.iter(|| { - vec.starts_with(&vec) - }) + b.iter(|| vec.starts_with(&vec)) } #[bench] fn starts_with_single_element(b: &mut Bencher) { let vec: Vec<_> = vec![0]; - b.iter(|| { - vec.starts_with(&vec) - }) + b.iter(|| vec.starts_with(&vec)) } #[bench] @@ -1221,25 +1241,19 @@ mod bench { let vec: Vec<_> = (0..100).collect(); let mut match_vec: Vec<_> = (0..99).collect(); match_vec.push(0); - b.iter(|| { - vec.starts_with(&match_vec) - }) + b.iter(|| vec.starts_with(&match_vec)) } #[bench] fn ends_with_same_vector(b: &mut Bencher) { let vec: Vec<_> = (0..100).collect(); - b.iter(|| { - vec.ends_with(&vec) - }) + b.iter(|| vec.ends_with(&vec)) } #[bench] fn ends_with_single_element(b: &mut Bencher) { let vec: Vec<_> = vec![0]; - b.iter(|| { - vec.ends_with(&vec) - }) + b.iter(|| vec.ends_with(&vec)) } #[bench] @@ -1247,24 +1261,18 @@ mod bench { let vec: Vec<_> = (0..100).collect(); let mut match_vec: Vec<_> = (0..100).collect(); match_vec[0] = 200; - b.iter(|| { - vec.starts_with(&match_vec) - }) + b.iter(|| vec.starts_with(&match_vec)) } #[bench] fn contains_last_element(b: &mut Bencher) { let vec: Vec<_> = (0..100).collect(); - b.iter(|| { - vec.contains(&99) - }) + b.iter(|| vec.contains(&99)) } #[bench] fn zero_1kb_from_elem(b: &mut Bencher) { - b.iter(|| { - vec![0u8; 1024] - }); + b.iter(|| vec![0u8; 1024]); } #[bench] @@ -1314,8 +1322,7 @@ mod bench { let mut v = vec![(0, 0); 30]; for _ in 0..100 { let l = v.len(); - v.insert(rng.gen::() % (l + 1), - (1, 1)); + v.insert(rng.gen::() % (l + 1), (1, 1)); } }) } @@ -1376,8 +1383,9 @@ mod bench { fn sort_big_random_small(b: &mut Bencher) { let mut rng = thread_rng(); b.iter(|| { - let mut v = rng.gen_iter::().take(5) - .collect::>(); + let mut v = rng.gen_iter::() + .take(5) + .collect::>(); v.sort(); }); b.bytes = 5 * mem::size_of::() as u64; @@ -1387,8 +1395,9 @@ mod bench { fn sort_big_random_medium(b: &mut Bencher) { let mut rng = thread_rng(); b.iter(|| { - let mut v = rng.gen_iter::().take(100) - .collect::>(); + let mut v = rng.gen_iter::() + .take(100) + .collect::>(); v.sort(); }); b.bytes = 100 * mem::size_of::() as u64; @@ -1398,8 +1407,9 @@ mod bench { fn sort_big_random_large(b: &mut Bencher) { let mut rng = thread_rng(); b.iter(|| { - let mut v = rng.gen_iter::().take(10000) - .collect::>(); + let mut v = rng.gen_iter::() + .take(10000) + .collect::>(); v.sort(); }); b.bytes = 10000 * mem::size_of::() as u64; diff --git a/src/libcollectionstest/str.rs b/src/libcollectionstest/str.rs index 0fde70aacdca9..14a0819d381bc 100644 --- a/src/libcollectionstest/str.rs +++ b/src/libcollectionstest/str.rs @@ -8,6 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::borrow::Cow; use std::cmp::Ordering::{Equal, Greater, Less}; use std::str::from_utf8; @@ -122,8 +123,6 @@ macro_rules! test_concat { fn test_concat_for_different_types() { test_concat!("ab", vec![s("a"), s("b")]); test_concat!("ab", vec!["a", "b"]); - test_concat!("ab", vec!["a", "b"]); - test_concat!("ab", vec![s("a"), s("b")]); } #[test] @@ -193,24 +192,24 @@ fn test_unsafe_slice() { #[test] fn test_starts_with() { - assert!(("".starts_with(""))); - assert!(("abc".starts_with(""))); - assert!(("abc".starts_with("a"))); - assert!((!"a".starts_with("abc"))); - assert!((!"".starts_with("abc"))); - assert!((!"ödd".starts_with("-"))); - assert!(("ödd".starts_with("öd"))); + assert!("".starts_with("")); + assert!("abc".starts_with("")); + assert!("abc".starts_with("a")); + assert!(!"a".starts_with("abc")); + assert!(!"".starts_with("abc")); + assert!(!"ödd".starts_with("-")); + assert!("ödd".starts_with("öd")); } #[test] fn test_ends_with() { - assert!(("".ends_with(""))); - assert!(("abc".ends_with(""))); - assert!(("abc".ends_with("c"))); - assert!((!"a".ends_with("abc"))); - assert!((!"".ends_with("abc"))); - assert!((!"ddö".ends_with("-"))); - assert!(("ddö".ends_with("dö"))); + assert!("".ends_with("")); + assert!("abc".ends_with("")); + assert!("abc".ends_with("c")); + assert!(!"a".ends_with("abc")); + assert!(!"".ends_with("abc")); + assert!(!"ddö".ends_with("-")); + assert!("ddö".ends_with("dö")); } #[test] @@ -219,6 +218,20 @@ fn test_is_empty() { assert!(!"a".is_empty()); } +#[test] +fn test_replacen() { + assert_eq!("".replacen('a', "b", 5), ""); + assert_eq!("acaaa".replacen("a", "b", 3), "bcbba"); + assert_eq!("aaaa".replacen("a", "b", 0), "aaaa"); + + let test = "test"; + assert_eq!(" test test ".replacen(test, "toast", 3), " toast toast "); + assert_eq!(" test test ".replacen(test, "toast", 0), " test test "); + assert_eq!(" test test ".replacen(test, "", 5), " "); + + assert_eq!("qwer123zxc789".replacen(char::is_numeric, "", 3), "qwerzxc789"); +} + #[test] fn test_replace() { let a = "a"; @@ -345,6 +358,42 @@ fn test_slice_fail() { &"中华Việt Nam"[0..2]; } + +#[test] +fn test_is_char_boundary() { + let s = "ศไทย中华Việt Nam β-release 🐱123"; + assert!(s.is_char_boundary(0)); + assert!(s.is_char_boundary(s.len())); + assert!(!s.is_char_boundary(s.len() + 1)); + for (i, ch) in s.char_indices() { + // ensure character locations are boundaries and continuation bytes are not + assert!(s.is_char_boundary(i), "{} is a char boundary in {:?}", i, s); + for j in 1..ch.len_utf8() { + assert!(!s.is_char_boundary(i + j), + "{} should not be a char boundary in {:?}", i + j, s); + } + } +} +const LOREM_PARAGRAPH: &'static str = "\ +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse quis lorem sit amet dolor \ +ultricies condimentum. Praesent iaculis purus elit, ac malesuada quam malesuada in. Duis sed orci \ +eros. Suspendisse sit amet magna mollis, mollis nunc luctus, imperdiet mi. Integer fringilla non \ +sem ut lacinia. Fusce varius tortor a risus porttitor hendrerit. Morbi mauris dui, ultricies nec \ +tempus vel, gravida nec quam."; + +// check the panic includes the prefix of the sliced string +#[test] +#[should_panic(expected="Lorem ipsum dolor sit amet")] +fn test_slice_fail_truncated_1() { + &LOREM_PARAGRAPH[..1024]; +} +// check the truncation in the panic message +#[test] +#[should_panic(expected="luctus, im`[...] do not lie on character boundary")] +fn test_slice_fail_truncated_2() { + &LOREM_PARAGRAPH[..1024]; +} + #[test] fn test_slice_from() { assert_eq!(&"abcd"[0..], "abcd"); @@ -442,18 +491,6 @@ fn test_is_whitespace() { assert!(!" _ ".chars().all(|c| c.is_whitespace())); } -#[test] -fn test_slice_shift_char() { - let data = "ประเทศไทย中"; - assert_eq!(data.slice_shift_char(), Some(('ป', "ระเทศไทย中"))); -} - -#[test] -fn test_slice_shift_char_2() { - let empty = ""; - assert_eq!(empty.slice_shift_char(), None); -} - #[test] fn test_is_utf8() { // deny overlong encodings @@ -605,8 +642,6 @@ fn vec_str_conversions() { while i < n1 { let a: u8 = s1.as_bytes()[i]; let b: u8 = s2.as_bytes()[i]; - debug!("{}", a); - debug!("{}", b); assert_eq!(a, b); i += 1; } @@ -637,28 +672,6 @@ fn test_contains_char() { assert!(!"".contains('a')); } -#[test] -fn test_char_at() { - let s = "ศไทย中华Việt Nam"; - let v = vec!['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m']; - let mut pos = 0; - for ch in &v { - assert!(s.char_at(pos) == *ch); - pos += ch.to_string().len(); - } -} - -#[test] -fn test_char_at_reverse() { - let s = "ศไทย中华Việt Nam"; - let v = vec!['ศ','ไ','ท','ย','中','华','V','i','ệ','t',' ','N','a','m']; - let mut pos = s.len(); - for ch in v.iter().rev() { - assert!(s.char_at_reverse(pos) == *ch); - pos -= ch.to_string().len(); - } -} - #[test] fn test_split_at() { let s = "ศไทย中华Việt Nam"; @@ -704,16 +717,32 @@ fn test_escape_unicode() { assert_eq!("\u{1d4ea}\r".escape_unicode(), "\\u{1d4ea}\\u{d}"); } +#[test] +fn test_escape_debug() { + assert_eq!("abc".escape_debug(), "abc"); + assert_eq!("a c".escape_debug(), "a c"); + assert_eq!("éèê".escape_debug(), "éèê"); + assert_eq!("\r\n\t".escape_debug(), "\\r\\n\\t"); + assert_eq!("'\"\\".escape_debug(), "\\'\\\"\\\\"); + assert_eq!("\u{7f}\u{ff}".escape_debug(), "\\u{7f}\u{ff}"); + assert_eq!("\u{100}\u{ffff}".escape_debug(), "\u{100}\\u{ffff}"); + assert_eq!("\u{10000}\u{10ffff}".escape_debug(), "\u{10000}\\u{10ffff}"); + assert_eq!("ab\u{200b}".escape_debug(), "ab\\u{200b}"); + assert_eq!("\u{10d4ea}\r".escape_debug(), "\\u{10d4ea}\\r"); +} + #[test] fn test_escape_default() { assert_eq!("abc".escape_default(), "abc"); assert_eq!("a c".escape_default(), "a c"); + assert_eq!("éèê".escape_default(), "\\u{e9}\\u{e8}\\u{ea}"); assert_eq!("\r\n\t".escape_default(), "\\r\\n\\t"); assert_eq!("'\"\\".escape_default(), "\\'\\\"\\\\"); + assert_eq!("\u{7f}\u{ff}".escape_default(), "\\u{7f}\\u{ff}"); assert_eq!("\u{100}\u{ffff}".escape_default(), "\\u{100}\\u{ffff}"); assert_eq!("\u{10000}\u{10ffff}".escape_default(), "\\u{10000}\\u{10ffff}"); - assert_eq!("ab\u{fb00}".escape_default(), "ab\\u{fb00}"); - assert_eq!("\u{1d4ea}\r".escape_default(), "\\u{1d4ea}\\r"); + assert_eq!("ab\u{200b}".escape_default(), "ab\\u{200b}"); + assert_eq!("\u{10d4ea}\r".escape_default(), "\\u{10d4ea}\\r"); } #[test] @@ -725,24 +754,6 @@ fn test_total_ord() { assert_eq!("22".cmp("1234"), Greater); } -#[test] -fn test_char_range_at() { - let data = "b¢€𤭢𤭢€¢b"; - assert_eq!('b', data.char_range_at(0).ch); - assert_eq!('¢', data.char_range_at(1).ch); - assert_eq!('€', data.char_range_at(3).ch); - assert_eq!('𤭢', data.char_range_at(6).ch); - assert_eq!('𤭢', data.char_range_at(10).ch); - assert_eq!('€', data.char_range_at(14).ch); - assert_eq!('¢', data.char_range_at(17).ch); - assert_eq!('b', data.char_range_at(19).ch); -} - -#[test] -fn test_char_range_at_reverse_underflow() { - assert_eq!("abc".char_range_at_reverse(0).next, 0); -} - #[test] fn test_iterator() { let s = "ศไทย中华Việt Nam"; @@ -756,6 +767,7 @@ fn test_iterator() { pos += 1; } assert_eq!(pos, v.len()); + assert_eq!(s.chars().count(), v.len()); } #[test] @@ -777,8 +789,7 @@ fn test_rev_iterator() { fn test_chars_decoding() { let mut bytes = [0; 4]; for c in (0..0x110000).filter_map(::std::char::from_u32) { - let len = c.encode_utf8(&mut bytes).unwrap_or(0); - let s = ::std::str::from_utf8(&bytes[..len]).unwrap(); + let s = c.encode_utf8(&mut bytes); if Some(c) != s.chars().next() { panic!("character {:x}={} does not decode correctly", c as u32, c); } @@ -789,8 +800,7 @@ fn test_chars_decoding() { fn test_chars_rev_decoding() { let mut bytes = [0; 4]; for c in (0..0x110000).filter_map(::std::char::from_u32) { - let len = c.encode_utf8(&mut bytes).unwrap_or(0); - let s = ::std::str::from_utf8(&bytes[..len]).unwrap(); + let s = c.encode_utf8(&mut bytes); if Some(c) != s.chars().rev().next() { panic!("character {:x}={} does not decode correctly", c as u32, c); } @@ -805,6 +815,14 @@ fn test_iterator_clone() { assert!(it.clone().zip(it).all(|(x,y)| x == y)); } +#[test] +fn test_iterator_last() { + let s = "ศไทย中华Việt Nam"; + let mut it = s.chars(); + it.next(); + assert_eq!(it.last(), Some('m')); +} + #[test] fn test_bytesator() { let s = "ศไทย中华Việt Nam"; @@ -902,6 +920,14 @@ fn test_char_indices_revator() { assert_eq!(pos, p.len()); } +#[test] +fn test_char_indices_last() { + let s = "ศไทย中华Việt Nam"; + let mut it = s.char_indices(); + it.next(); + assert_eq!(it.last(), Some((27, 'm'))); +} + #[test] fn test_splitn_char_iterator() { let data = "\nMäry häd ä little lämb\nLittle lämb\n"; @@ -1267,6 +1293,23 @@ fn test_box_slice_clone() { assert_eq!(data, data2); } +#[test] +fn test_cow_from() { + let borrowed = "borrowed"; + let owned = String::from("owned"); + match (Cow::from(owned.clone()), Cow::from(borrowed)) { + (Cow::Owned(o), Cow::Borrowed(b)) => assert!(o == owned && b == borrowed), + _ => panic!("invalid `Cow::from`"), + } +} + +#[test] +fn test_repeat() { + assert_eq!("".repeat(3), ""); + assert_eq!("abc".repeat(0), ""); + assert_eq!("α".repeat(3), "ααα"); +} + mod pattern { use std::str::pattern::Pattern; use std::str::pattern::{Searcher, ReverseSearcher}; @@ -1497,6 +1540,19 @@ generate_iterator_test! { with str::rsplitn; } +#[test] +fn different_str_pattern_forwarding_lifetimes() { + use std::str::pattern::Pattern; + + fn foo<'a, P>(p: P) where for<'b> &'b P: Pattern<'a> { + for _ in 0..3 { + "asdf".find(&p); + } + } + + foo::<&str>("x"); +} + mod bench { use test::{Bencher, black_box}; diff --git a/src/libcollectionstest/string.rs b/src/libcollectionstest/string.rs index 89df77d074fdc..cb4fcb58452da 100644 --- a/src/libcollectionstest/string.rs +++ b/src/libcollectionstest/string.rs @@ -8,15 +8,37 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::borrow::{IntoCow, Cow}; +use std::borrow::Cow; use std::iter::repeat; use test::Bencher; +pub trait IntoCow<'a, B: ?Sized> where B: ToOwned { + fn into_cow(self) -> Cow<'a, B>; +} + +impl<'a> IntoCow<'a, str> for String { + fn into_cow(self) -> Cow<'a, str> { + Cow::Owned(self) + } +} + +impl<'a> IntoCow<'a, str> for &'a str { + fn into_cow(self) -> Cow<'a, str> { + Cow::Borrowed(self) + } +} + #[test] fn test_from_str() { - let owned: Option<::std::string::String> = "string".parse().ok(); - assert_eq!(owned.as_ref().map(|s| &**s), Some("string")); + let owned: Option<::std::string::String> = "string".parse().ok(); + assert_eq!(owned.as_ref().map(|s| &**s), Some("string")); +} + +#[test] +fn test_from_cow_str() { + assert_eq!(String::from(Cow::Borrowed("string")), "string"); + assert_eq!(String::from(Cow::Owned(String::from("string"))), "string"); } #[test] @@ -28,15 +50,14 @@ fn test_unsized_to_string() { #[test] fn test_from_utf8() { let xs = b"hello".to_vec(); - assert_eq!(String::from_utf8(xs).unwrap(), - String::from("hello")); + assert_eq!(String::from_utf8(xs).unwrap(), String::from("hello")); let xs = "ศไทย中华Việt Nam".as_bytes().to_vec(); assert_eq!(String::from_utf8(xs).unwrap(), String::from("ศไทย中华Việt Nam")); let xs = b"hello\xFF".to_vec(); - let err = String::from_utf8(xs).err().unwrap(); + let err = String::from_utf8(xs).unwrap_err(); assert_eq!(err.into_bytes(), b"hello\xff".to_vec()); } @@ -71,60 +92,44 @@ fn test_from_utf8_lossy() { String::from("\u{FFFD}foo\u{FFFD}bar\u{FFFD}\u{FFFD}baz").into_cow()); let xs = b"\xF0\x80\x80\x80foo\xF0\x90\x80\x80bar"; - assert_eq!(String::from_utf8_lossy(xs), String::from("\u{FFFD}\u{FFFD}\u{FFFD}\u{FFFD}\ - foo\u{10000}bar").into_cow()); + assert_eq!(String::from_utf8_lossy(xs), + String::from("\u{FFFD}\u{FFFD}\u{FFFD}\u{FFFD}foo\u{10000}bar").into_cow()); // surrogates let xs = b"\xED\xA0\x80foo\xED\xBF\xBFbar"; - assert_eq!(String::from_utf8_lossy(xs), String::from("\u{FFFD}\u{FFFD}\u{FFFD}foo\ - \u{FFFD}\u{FFFD}\u{FFFD}bar").into_cow()); + assert_eq!(String::from_utf8_lossy(xs), + String::from("\u{FFFD}\u{FFFD}\u{FFFD}foo\u{FFFD}\u{FFFD}\u{FFFD}bar").into_cow()); } #[test] fn test_from_utf16() { - let pairs = - [(String::from("𐍅𐌿𐌻𐍆𐌹𐌻𐌰\n"), - vec![0xd800, 0xdf45, 0xd800, 0xdf3f, - 0xd800, 0xdf3b, 0xd800, 0xdf46, - 0xd800, 0xdf39, 0xd800, 0xdf3b, - 0xd800, 0xdf30, 0x000a]), - - (String::from("𐐒𐑉𐐮𐑀𐐲𐑋 𐐏𐐲𐑍\n"), - vec![0xd801, 0xdc12, 0xd801, - 0xdc49, 0xd801, 0xdc2e, 0xd801, - 0xdc40, 0xd801, 0xdc32, 0xd801, - 0xdc4b, 0x0020, 0xd801, 0xdc0f, - 0xd801, 0xdc32, 0xd801, 0xdc4d, - 0x000a]), - - (String::from("𐌀𐌖𐌋𐌄𐌑𐌉·𐌌𐌄𐌕𐌄𐌋𐌉𐌑\n"), - vec![0xd800, 0xdf00, 0xd800, 0xdf16, - 0xd800, 0xdf0b, 0xd800, 0xdf04, - 0xd800, 0xdf11, 0xd800, 0xdf09, - 0x00b7, 0xd800, 0xdf0c, 0xd800, - 0xdf04, 0xd800, 0xdf15, 0xd800, - 0xdf04, 0xd800, 0xdf0b, 0xd800, - 0xdf09, 0xd800, 0xdf11, 0x000a ]), - - (String::from("𐒋𐒘𐒈𐒑𐒛𐒒 𐒕𐒓 𐒈𐒚𐒍 𐒏𐒜𐒒𐒖𐒆 𐒕𐒆\n"), - vec![0xd801, 0xdc8b, 0xd801, 0xdc98, - 0xd801, 0xdc88, 0xd801, 0xdc91, - 0xd801, 0xdc9b, 0xd801, 0xdc92, - 0x0020, 0xd801, 0xdc95, 0xd801, - 0xdc93, 0x0020, 0xd801, 0xdc88, - 0xd801, 0xdc9a, 0xd801, 0xdc8d, - 0x0020, 0xd801, 0xdc8f, 0xd801, - 0xdc9c, 0xd801, 0xdc92, 0xd801, - 0xdc96, 0xd801, 0xdc86, 0x0020, - 0xd801, 0xdc95, 0xd801, 0xdc86, - 0x000a ]), - // Issue #12318, even-numbered non-BMP planes - (String::from("\u{20000}"), - vec![0xD840, 0xDC00])]; + let pairs = [(String::from("𐍅𐌿𐌻𐍆𐌹𐌻𐌰\n"), + vec![0xd800, 0xdf45, 0xd800, 0xdf3f, 0xd800, 0xdf3b, 0xd800, 0xdf46, 0xd800, + 0xdf39, 0xd800, 0xdf3b, 0xd800, 0xdf30, 0x000a]), + + (String::from("𐐒𐑉𐐮𐑀𐐲𐑋 𐐏𐐲𐑍\n"), + vec![0xd801, 0xdc12, 0xd801, 0xdc49, 0xd801, 0xdc2e, 0xd801, 0xdc40, 0xd801, + 0xdc32, 0xd801, 0xdc4b, 0x0020, 0xd801, 0xdc0f, 0xd801, 0xdc32, 0xd801, + 0xdc4d, 0x000a]), + + (String::from("𐌀𐌖𐌋𐌄𐌑𐌉·𐌌𐌄𐌕𐌄𐌋𐌉𐌑\n"), + vec![0xd800, 0xdf00, 0xd800, 0xdf16, 0xd800, 0xdf0b, 0xd800, 0xdf04, 0xd800, + 0xdf11, 0xd800, 0xdf09, 0x00b7, 0xd800, 0xdf0c, 0xd800, 0xdf04, 0xd800, + 0xdf15, 0xd800, 0xdf04, 0xd800, 0xdf0b, 0xd800, 0xdf09, 0xd800, 0xdf11, + 0x000a]), + + (String::from("𐒋𐒘𐒈𐒑𐒛𐒒 𐒕𐒓 𐒈𐒚𐒍 𐒏𐒜𐒒𐒖𐒆 𐒕𐒆\n"), + vec![0xd801, 0xdc8b, 0xd801, 0xdc98, 0xd801, 0xdc88, 0xd801, 0xdc91, 0xd801, + 0xdc9b, 0xd801, 0xdc92, 0x0020, 0xd801, 0xdc95, 0xd801, 0xdc93, 0x0020, + 0xd801, 0xdc88, 0xd801, 0xdc9a, 0xd801, 0xdc8d, 0x0020, 0xd801, 0xdc8f, + 0xd801, 0xdc9c, 0xd801, 0xdc92, 0xd801, 0xdc96, 0xd801, 0xdc86, 0x0020, + 0xd801, 0xdc95, 0xd801, 0xdc86, 0x000a]), + // Issue #12318, even-numbered non-BMP planes + (String::from("\u{20000}"), vec![0xD840, 0xDC00])]; for p in &pairs { let (s, u) = (*p).clone(); - let s_as_utf16 = s.utf16_units().collect::>(); + let s_as_utf16 = s.encode_utf16().collect::>(); let u_as_string = String::from_utf16(&u).unwrap(); assert!(::rustc_unicode::str::is_utf16(&u)); @@ -134,7 +139,7 @@ fn test_from_utf16() { assert_eq!(String::from_utf16_lossy(&u), s); assert_eq!(String::from_utf16(&s_as_utf16).unwrap(), s); - assert_eq!(u_as_string.utf16_units().collect::>(), u); + assert_eq!(u_as_string.encode_utf16().collect::>(), u); } } @@ -157,13 +162,15 @@ fn test_utf16_invalid() { fn test_from_utf16_lossy() { // completely positive cases tested above. // lead + eof - assert_eq!(String::from_utf16_lossy(&[0xD800]), String::from("\u{FFFD}")); + assert_eq!(String::from_utf16_lossy(&[0xD800]), + String::from("\u{FFFD}")); // lead + lead assert_eq!(String::from_utf16_lossy(&[0xD800, 0xD800]), String::from("\u{FFFD}\u{FFFD}")); // isolated trail - assert_eq!(String::from_utf16_lossy(&[0x0061, 0xDC00]), String::from("a\u{FFFD}")); + assert_eq!(String::from_utf16_lossy(&[0x0061, 0xDC00]), + String::from("a\u{FFFD}")); // general assert_eq!(String::from_utf16_lossy(&[0xD800, 0xd801, 0xdc8b, 0xD800]), @@ -175,7 +182,7 @@ fn test_push_bytes() { let mut s = String::from("ABC"); unsafe { let mv = s.as_mut_vec(); - mv.push_all(&[b'D']); + mv.extend_from_slice(&[b'D']); } assert_eq!(s, "ABCD"); } @@ -191,6 +198,17 @@ fn test_push_str() { assert_eq!(&s[0..], "abcประเทศไทย中华Việt Nam"); } +#[test] +fn test_add_assign() { + let mut s = String::new(); + s += ""; + assert_eq!(s.as_str(), ""); + s += "abc"; + assert_eq!(s.as_str(), "abc"); + s += "ประเทศไทย中华Việt Nam"; + assert_eq!(s.as_str(), "abcประเทศไทย中华Việt Nam"); +} + #[test] fn test_push() { let mut data = String::from("ประเทศไทย中"); @@ -213,6 +231,45 @@ fn test_pop() { assert_eq!(data, "ประเทศไทย中"); } +#[test] +fn test_split_off_empty() { + let orig = "Hello, world!"; + let mut split = String::from(orig); + let empty: String = split.split_off(orig.len()); + assert!(empty.is_empty()); +} + +#[test] +#[should_panic] +fn test_split_off_past_end() { + let orig = "Hello, world!"; + let mut split = String::from(orig); + split.split_off(orig.len() + 1); +} + +#[test] +#[should_panic] +fn test_split_off_mid_char() { + let mut orig = String::from("山"); + orig.split_off(1); +} + +#[test] +fn test_split_off_ascii() { + let mut ab = String::from("ABCD"); + let cd = ab.split_off(2); + assert_eq!(ab, "AB"); + assert_eq!(cd, "CD"); +} + +#[test] +fn test_split_off_unicode() { + let mut nihon = String::from("日本語"); + let go = nihon.split_off("日本".len()); + assert_eq!(nihon, "日本"); + assert_eq!(go, "語"); +} + #[test] fn test_str_truncate() { let mut s = String::from("12345"); @@ -232,10 +289,10 @@ fn test_str_truncate() { } #[test] -#[should_panic] fn test_str_truncate_invalid_len() { let mut s = String::from("12345"); s.truncate(6); + assert_eq!(s, "12345"); } #[test] @@ -272,7 +329,8 @@ fn remove() { assert_eq!(s, "ไทย中华Vit Nam; foobar"); } -#[test] #[should_panic] +#[test] +#[should_panic] fn remove_bad() { "ศ".to_string().remove(1); } @@ -286,8 +344,16 @@ fn insert() { assert_eq!(s, "ệfooยbar"); } -#[test] #[should_panic] fn insert_bad1() { "".to_string().insert(1, 't'); } -#[test] #[should_panic] fn insert_bad2() { "ệ".to_string().insert(1, 't'); } +#[test] +#[should_panic] +fn insert_bad1() { + "".to_string().insert(1, 't'); +} +#[test] +#[should_panic] +fn insert_bad2() { + "ệ".to_string().insert(1, 't'); +} #[test] fn test_slicing() { @@ -315,8 +381,7 @@ fn test_vectors() { assert_eq!(format!("{:?}", x), "[]"); assert_eq!(format!("{:?}", vec![1]), "[1]"); assert_eq!(format!("{:?}", vec![1, 2, 3]), "[1, 2, 3]"); - assert!(format!("{:?}", vec![vec![], vec![1], vec![1, 1]]) == - "[[], [1], [1, 1]]"); + assert!(format!("{:?}", vec![vec![], vec![1], vec![1, 1]]) == "[[], [1], [1, 1]]"); } #[test] @@ -374,9 +439,7 @@ fn test_into_boxed_str() { #[bench] fn bench_with_capacity(b: &mut Bencher) { - b.iter(|| { - String::with_capacity(100) - }); + b.iter(|| String::with_capacity(100)); } #[bench] @@ -479,25 +542,19 @@ fn bench_exact_size_shrink_to_fit(b: &mut Bencher) { fn bench_from_str(b: &mut Bencher) { let s = "Hello there, the quick brown fox jumped over the lazy dog! \ Lorem ipsum dolor sit amet, consectetur. "; - b.iter(|| { - String::from(s) - }) + b.iter(|| String::from(s)) } #[bench] fn bench_from(b: &mut Bencher) { let s = "Hello there, the quick brown fox jumped over the lazy dog! \ Lorem ipsum dolor sit amet, consectetur. "; - b.iter(|| { - String::from(s) - }) + b.iter(|| String::from(s)) } #[bench] fn bench_to_string(b: &mut Bencher) { let s = "Hello there, the quick brown fox jumped over the lazy dog! \ Lorem ipsum dolor sit amet, consectetur. "; - b.iter(|| { - s.to_string() - }) + b.iter(|| s.to_string()) } diff --git a/src/libcollectionstest/vec.rs b/src/libcollectionstest/vec.rs index 554be72e42681..3bc1321d75653 100644 --- a/src/libcollectionstest/vec.rs +++ b/src/libcollectionstest/vec.rs @@ -8,13 +8,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::ascii::AsciiExt; +use std::borrow::Cow; use std::iter::{FromIterator, repeat}; use std::mem::size_of; +use std::vec::{Drain, IntoIter}; use test::Bencher; struct DropCounter<'a> { - count: &'a mut u32 + count: &'a mut u32, } impl<'a> Drop for DropCounter<'a> { @@ -32,17 +35,17 @@ fn test_small_vec_struct() { fn test_double_drop() { struct TwoVec { x: Vec, - y: Vec + y: Vec, } let (mut count_x, mut count_y) = (0, 0); { let mut tv = TwoVec { x: Vec::new(), - y: Vec::new() + y: Vec::new(), }; - tv.x.push(DropCounter {count: &mut count_x}); - tv.y.push(DropCounter {count: &mut count_y}); + tv.x.push(DropCounter { count: &mut count_x }); + tv.y.push(DropCounter { count: &mut count_y }); // If Vec had a drop flag, here is where it would be zeroed. // Instead, it should rely on its internal state to prevent @@ -84,14 +87,21 @@ fn test_extend() { let mut w = Vec::new(); v.extend(0..3); - for i in 0..3 { w.push(i) } + for i in 0..3 { + w.push(i) + } assert_eq!(v, w); v.extend(3..10); - for i in 3..10 { w.push(i) } + for i in 3..10 { + w.push(i) + } assert_eq!(v, w); + + v.extend(w.clone()); // specializes to `append` + assert!(v.iter().eq(w.iter().chain(w.iter()))); } #[test] @@ -113,7 +123,7 @@ fn test_extend_ref() { fn test_slice_from_mut() { let mut values = vec![1, 2, 3, 4, 5]; { - let slice = &mut values[2 ..]; + let slice = &mut values[2..]; assert!(slice == [3, 4, 5]); for p in slice { *p += 2; @@ -127,7 +137,7 @@ fn test_slice_from_mut() { fn test_slice_to_mut() { let mut values = vec![1, 2, 3, 4, 5]; { - let slice = &mut values[.. 2]; + let slice = &mut values[..2]; assert!(slice == [1, 2]); for p in slice { *p += 1; @@ -165,7 +175,7 @@ fn test_split_at_mut() { #[test] fn test_clone() { let v: Vec = vec![]; - let w = vec!(1, 2, 3); + let w = vec![1, 2, 3]; assert_eq!(v, v.clone()); @@ -177,9 +187,9 @@ fn test_clone() { #[test] fn test_clone_from() { - let mut v = vec!(); - let three: Vec> = vec!(box 1, box 2, box 3); - let two: Vec> = vec!(box 4, box 5); + let mut v = vec![]; + let three: Vec> = vec![box 1, box 2, box 3]; + let two: Vec> = vec![box 4, box 5]; // zero, long v.clone_from(&three); assert_eq!(v, three); @@ -204,6 +214,60 @@ fn test_retain() { assert_eq!(vec, [2, 4]); } +#[test] +fn test_dedup() { + fn case(a: Vec, b: Vec) { + let mut v = a; + v.dedup(); + assert_eq!(v, b); + } + case(vec![], vec![]); + case(vec![1], vec![1]); + case(vec![1, 1], vec![1]); + case(vec![1, 2, 3], vec![1, 2, 3]); + case(vec![1, 1, 2, 3], vec![1, 2, 3]); + case(vec![1, 2, 2, 3], vec![1, 2, 3]); + case(vec![1, 2, 3, 3], vec![1, 2, 3]); + case(vec![1, 1, 2, 2, 2, 3, 3], vec![1, 2, 3]); +} + +#[test] +fn test_dedup_by_key() { + fn case(a: Vec, b: Vec) { + let mut v = a; + v.dedup_by_key(|i| *i / 10); + assert_eq!(v, b); + } + case(vec![], vec![]); + case(vec![10], vec![10]); + case(vec![10, 11], vec![10]); + case(vec![10, 20, 30], vec![10, 20, 30]); + case(vec![10, 11, 20, 30], vec![10, 20, 30]); + case(vec![10, 20, 21, 30], vec![10, 20, 30]); + case(vec![10, 20, 30, 31], vec![10, 20, 30]); + case(vec![10, 11, 20, 21, 22, 30, 31], vec![10, 20, 30]); +} + +#[test] +fn test_dedup_by() { + let mut vec = vec!["foo", "bar", "Bar", "baz", "bar"]; + vec.dedup_by(|a, b| a.eq_ignore_ascii_case(b)); + + assert_eq!(vec, ["foo", "bar", "baz", "bar"]); +} + +#[test] +fn test_dedup_unique() { + let mut v0: Vec> = vec![box 1, box 1, box 2, box 3]; + v0.dedup(); + let mut v1: Vec> = vec![box 1, box 2, box 2, box 3]; + v1.dedup(); + let mut v2: Vec> = vec![box 1, box 2, box 3, box 3]; + v2.dedup(); + // If the boxed pointers were leaked or otherwise misused, valgrind + // and/or rt should raise errors. +} + #[test] fn zero_sized_values() { let mut v = Vec::new(); @@ -231,16 +295,22 @@ fn zero_sized_values() { assert_eq!(v.iter_mut().count(), 4); for &mut () in &mut v {} - unsafe { v.set_len(0); } + unsafe { + v.set_len(0); + } assert_eq!(v.iter_mut().count(), 0); } #[test] fn test_partition() { - assert_eq!(vec![].into_iter().partition(|x: &i32| *x < 3), (vec![], vec![])); - assert_eq!(vec![1, 2, 3].into_iter().partition(|x| *x < 4), (vec![1, 2, 3], vec![])); - assert_eq!(vec![1, 2, 3].into_iter().partition(|x| *x < 2), (vec![1], vec![2, 3])); - assert_eq!(vec![1, 2, 3].into_iter().partition(|x| *x < 0), (vec![], vec![1, 2, 3])); + assert_eq!(vec![].into_iter().partition(|x: &i32| *x < 3), + (vec![], vec![])); + assert_eq!(vec![1, 2, 3].into_iter().partition(|x| *x < 4), + (vec![1, 2, 3], vec![])); + assert_eq!(vec![1, 2, 3].into_iter().partition(|x| *x < 2), + (vec![1], vec![2, 3])); + assert_eq!(vec![1, 2, 3].into_iter().partition(|x| *x < 0), + (vec![], vec![1, 2, 3])); } #[test] @@ -256,20 +326,22 @@ fn test_zip_unzip() { #[test] fn test_vec_truncate_drop() { - static mut drops: u32 = 0; + static mut DROPS: u32 = 0; struct Elem(i32); impl Drop for Elem { fn drop(&mut self) { - unsafe { drops += 1; } + unsafe { + DROPS += 1; + } } } let mut v = vec![Elem(1), Elem(2), Elem(3), Elem(4), Elem(5)]; - assert_eq!(unsafe { drops }, 0); + assert_eq!(unsafe { DROPS }, 0); v.truncate(3); - assert_eq!(unsafe { drops }, 2); + assert_eq!(unsafe { DROPS }, 2); v.truncate(0); - assert_eq!(unsafe { drops }, 5); + assert_eq!(unsafe { DROPS }, 5); } #[test] @@ -340,7 +412,7 @@ fn test_slice_out_of_bounds_5() { #[test] #[should_panic] fn test_swap_remove_empty() { - let mut vec= Vec::::new(); + let mut vec = Vec::::new(); vec.swap_remove(0); } @@ -382,7 +454,7 @@ fn test_drain_items() { vec2.push(i); } assert_eq!(vec, []); - assert_eq!(vec2, [ 1, 2, 3 ]); + assert_eq!(vec2, [1, 2, 3]); } #[test] @@ -461,11 +533,88 @@ fn test_split_off() { assert_eq!(vec2, [5, 6]); } +#[test] +fn test_into_iter_as_slice() { + let vec = vec!['a', 'b', 'c']; + let mut into_iter = vec.into_iter(); + assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']); + let _ = into_iter.next().unwrap(); + assert_eq!(into_iter.as_slice(), &['b', 'c']); + let _ = into_iter.next().unwrap(); + let _ = into_iter.next().unwrap(); + assert_eq!(into_iter.as_slice(), &[]); +} + +#[test] +fn test_into_iter_as_mut_slice() { + let vec = vec!['a', 'b', 'c']; + let mut into_iter = vec.into_iter(); + assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']); + into_iter.as_mut_slice()[0] = 'x'; + into_iter.as_mut_slice()[1] = 'y'; + assert_eq!(into_iter.next().unwrap(), 'x'); + assert_eq!(into_iter.as_slice(), &['y', 'c']); +} + +#[test] +fn test_into_iter_debug() { + let vec = vec!['a', 'b', 'c']; + let into_iter = vec.into_iter(); + let debug = format!("{:?}", into_iter); + assert_eq!(debug, "IntoIter(['a', 'b', 'c'])"); +} + #[test] fn test_into_iter_count() { assert_eq!(vec![1, 2, 3].into_iter().count(), 3); } +#[test] +fn test_into_iter_clone() { + fn iter_equal>(it: I, slice: &[i32]) { + let v: Vec = it.collect(); + assert_eq!(&v[..], slice); + } + let mut it = vec![1, 2, 3].into_iter(); + iter_equal(it.clone(), &[1, 2, 3]); + assert_eq!(it.next(), Some(1)); + let mut it = it.rev(); + iter_equal(it.clone(), &[3, 2]); + assert_eq!(it.next(), Some(3)); + iter_equal(it.clone(), &[2]); + assert_eq!(it.next(), Some(2)); + iter_equal(it.clone(), &[]); + assert_eq!(it.next(), None); +} + +#[test] +fn test_cow_from() { + let borrowed: &[_] = &["borrowed", "(slice)"]; + let owned = vec!["owned", "(vec)"]; + match (Cow::from(owned.clone()), Cow::from(borrowed)) { + (Cow::Owned(o), Cow::Borrowed(b)) => assert!(o == owned && b == borrowed), + _ => panic!("invalid `Cow::from`"), + } +} + +#[test] +fn test_from_cow() { + let borrowed: &[_] = &["borrowed", "(slice)"]; + let owned = vec!["owned", "(vec)"]; + assert_eq!(Vec::from(Cow::Borrowed(borrowed)), vec!["borrowed", "(slice)"]); + assert_eq!(Vec::from(Cow::Owned(owned)), vec!["owned", "(vec)"]); +} + +#[allow(dead_code)] +fn assert_covariance() { + fn drain<'new>(d: Drain<'static, &'static str>) -> Drain<'new, &'new str> { + d + } + fn into_iter<'new>(i: IntoIter<&'static str>) -> IntoIter<&'new str> { + i + } +} + #[bench] fn bench_new(b: &mut Bencher) { b.iter(|| { @@ -686,7 +835,7 @@ fn do_bench_push_all(b: &mut Bencher, dst_len: usize, src_len: usize) { b.iter(|| { let mut dst = dst.clone(); - dst.push_all(&src); + dst.extend_from_slice(&src); assert_eq!(dst.len(), dst_len + src_len); assert!(dst.iter().enumerate().all(|(i, x)| i == *x)); }); diff --git a/src/libcollectionstest/vec_deque.rs b/src/libcollectionstest/vec_deque.rs index 5f587789bd865..f1ea85a6c5bed 100644 --- a/src/libcollectionstest/vec_deque.rs +++ b/src/libcollectionstest/vec_deque.rs @@ -10,6 +10,7 @@ use std::collections::VecDeque; use std::fmt::Debug; +use std::collections::vec_deque::Drain; use test; @@ -45,10 +46,6 @@ fn test_simple() { assert_eq!(d.len(), 3); d.push_front(1); assert_eq!(d.len(), 4); - debug!("{}", d[0]); - debug!("{}", d[1]); - debug!("{}", d[2]); - debug!("{}", d[3]); assert_eq!(d[0], 1); assert_eq!(d[1], 2); assert_eq!(d[2], 3); @@ -56,7 +53,7 @@ fn test_simple() { } #[cfg(test)] -fn test_parameterized(a: T, b: T, c: T, d: T) { +fn test_parameterized(a: T, b: T, c: T, d: T) { let mut deq = VecDeque::new(); assert_eq!(deq.len(), 0); deq.push_front(a.clone()); @@ -190,7 +187,7 @@ enum Taggypar { struct RecCy { x: i32, y: i32, - t: Taggy + t: Taggy, } #[test] @@ -213,10 +210,26 @@ fn test_param_taggypar() { #[test] fn test_param_reccy() { - let reccy1 = RecCy { x: 1, y: 2, t: One(1) }; - let reccy2 = RecCy { x: 345, y: 2, t: Two(1, 2) }; - let reccy3 = RecCy { x: 1, y: 777, t: Three(1, 2, 3) }; - let reccy4 = RecCy { x: 19, y: 252, t: Two(17, 42) }; + let reccy1 = RecCy { + x: 1, + y: 2, + t: One(1), + }; + let reccy2 = RecCy { + x: 345, + y: 2, + t: Two(1, 2), + }; + let reccy3 = RecCy { + x: 1, + y: 777, + t: Three(1, 2, 3), + }; + let reccy4 = RecCy { + x: 19, + y: 252, + t: Two(17, 42), + }; test_parameterized::(reccy1, reccy2, reccy3, reccy4); } @@ -261,13 +274,13 @@ fn test_with_capacity_non_power_two() { // underlying Vec which didn't hold and lead // to corruption. // (Vec grows to next power of two) - //good- [9, 12, 15, X, X, X, X, |6] - //bug- [15, 12, X, X, X, |6, X, X] + // good- [9, 12, 15, X, X, X, X, |6] + // bug- [15, 12, X, X, X, |6, X, X] assert_eq!(d3.pop_front(), Some(6)); // Which leads us to the following state which // would be a failure case. - //bug- [15, 12, X, X, X, X, |X, X] + // bug- [15, 12, X, X, X, X, |X, X] assert_eq!(d3.front(), Some(&9)); } @@ -305,7 +318,7 @@ fn test_iter() { d.push_back(i); } { - let b: &[_] = &[&0,&1,&2,&3,&4]; + let b: &[_] = &[&0, &1, &2, &3, &4]; assert_eq!(d.iter().collect::>(), b); } @@ -313,7 +326,7 @@ fn test_iter() { d.push_front(i); } { - let b: &[_] = &[&8,&7,&6,&0,&1,&2,&3,&4]; + let b: &[_] = &[&8, &7, &6, &0, &1, &2, &3, &4]; assert_eq!(d.iter().collect::>(), b); } @@ -322,7 +335,10 @@ fn test_iter() { loop { match it.next() { None => break, - _ => { len -= 1; assert_eq!(it.size_hint(), (len, Some(len))) } + _ => { + len -= 1; + assert_eq!(it.size_hint(), (len, Some(len))) + } } } } @@ -336,14 +352,14 @@ fn test_rev_iter() { d.push_back(i); } { - let b: &[_] = &[&4,&3,&2,&1,&0]; + let b: &[_] = &[&4, &3, &2, &1, &0]; assert_eq!(d.iter().rev().collect::>(), b); } for i in 6..9 { d.push_front(i); } - let b: &[_] = &[&4,&3,&2,&1,&0,&6,&7,&8]; + let b: &[_] = &[&4, &3, &2, &1, &0, &6, &7, &8]; assert_eq!(d.iter().rev().collect::>(), b); } @@ -428,7 +444,7 @@ fn test_into_iter() { d.push_back(i); } - let b = vec![0,1,2,3,4]; + let b = vec![0, 1, 2, 3, 4]; assert_eq!(d.into_iter().collect::>(), b); } @@ -442,7 +458,7 @@ fn test_into_iter() { d.push_front(i); } - let b = vec![8,7,6,0,1,2,3,4]; + let b = vec![8, 7, 6, 0, 1, 2, 3, 4]; assert_eq!(d.into_iter().collect::>(), b); } @@ -506,7 +522,7 @@ fn test_drain() { d.push_front(i); } - assert_eq!(d.drain(..).collect::>(), [8,7,6,0,1,2,3,4]); + assert_eq!(d.drain(..).collect::>(), [8, 7, 6, 0, 1, 2, 3, 4]); assert!(d.is_empty()); } @@ -536,7 +552,7 @@ fn test_drain() { #[test] fn test_from_iter() { - let v = vec!(1,2,3,4,5,6,7); + let v = vec![1, 2, 3, 4, 5, 6, 7]; let deq: VecDeque<_> = v.iter().cloned().collect(); let u: Vec<_> = deq.iter().cloned().collect(); assert_eq!(u, v); @@ -544,7 +560,7 @@ fn test_from_iter() { let seq = (0..).step_by(2).take(256); let deq: VecDeque<_> = seq.collect(); for (i, &x) in deq.iter().enumerate() { - assert_eq!(2*i, x); + assert_eq!(2 * i, x); } assert_eq!(deq.len(), 256); } @@ -589,20 +605,66 @@ fn test_eq() { #[test] fn test_hash() { - let mut x = VecDeque::new(); - let mut y = VecDeque::new(); + let mut x = VecDeque::new(); + let mut y = VecDeque::new(); + + x.push_back(1); + x.push_back(2); + x.push_back(3); + + y.push_back(0); + y.push_back(1); + y.pop_front(); + y.push_back(2); + y.push_back(3); + + assert!(::hash(&x) == ::hash(&y)); +} - x.push_back(1); - x.push_back(2); - x.push_back(3); +#[test] +fn test_hash_after_rotation() { + // test that two deques hash equal even if elements are laid out differently + let len = 28; + let mut ring: VecDeque = (0..len as i32).collect(); + let orig = ring.clone(); + for _ in 0..ring.capacity() { + // shift values 1 step to the right by pop, sub one, push + ring.pop_front(); + for elt in &mut ring { + *elt -= 1; + } + ring.push_back(len - 1); + assert_eq!(::hash(&orig), ::hash(&ring)); + assert_eq!(orig, ring); + assert_eq!(ring, orig); + } +} - y.push_back(0); - y.push_back(1); - y.pop_front(); - y.push_back(2); - y.push_back(3); +#[test] +fn test_eq_after_rotation() { + // test that two deques are equal even if elements are laid out differently + let len = 28; + let mut ring: VecDeque = (0..len as i32).collect(); + let mut shifted = ring.clone(); + for _ in 0..10 { + // shift values 1 step to the right by pop, sub one, push + ring.pop_front(); + for elt in &mut ring { + *elt -= 1; + } + ring.push_back(len - 1); + } - assert!(::hash(&x) == ::hash(&y)); + // try every shift + for _ in 0..shifted.capacity() { + shifted.pop_front(); + for elt in &mut shifted { + *elt -= 1; + } + shifted.push_back(len - 1); + assert_eq!(shifted, ring); + assert_eq!(ring, shifted); + } } #[test] @@ -623,19 +685,23 @@ fn test_show() { let ringbuf: VecDeque<_> = (0..10).collect(); assert_eq!(format!("{:?}", ringbuf), "[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]"); - let ringbuf: VecDeque<_> = vec!["just", "one", "test", "more"].iter() - .cloned() - .collect(); - assert_eq!(format!("{:?}", ringbuf), "[\"just\", \"one\", \"test\", \"more\"]"); + let ringbuf: VecDeque<_> = vec!["just", "one", "test", "more"] + .iter() + .cloned() + .collect(); + assert_eq!(format!("{:?}", ringbuf), + "[\"just\", \"one\", \"test\", \"more\"]"); } #[test] fn test_drop() { - static mut drops: i32 = 0; + static mut DROPS: i32 = 0; struct Elem; impl Drop for Elem { fn drop(&mut self) { - unsafe { drops += 1; } + unsafe { + DROPS += 1; + } } } @@ -646,16 +712,18 @@ fn test_drop() { ring.push_front(Elem); drop(ring); - assert_eq!(unsafe {drops}, 4); + assert_eq!(unsafe { DROPS }, 4); } #[test] fn test_drop_with_pop() { - static mut drops: i32 = 0; + static mut DROPS: i32 = 0; struct Elem; impl Drop for Elem { fn drop(&mut self) { - unsafe { drops += 1; } + unsafe { + DROPS += 1; + } } } @@ -667,19 +735,21 @@ fn test_drop_with_pop() { drop(ring.pop_back()); drop(ring.pop_front()); - assert_eq!(unsafe {drops}, 2); + assert_eq!(unsafe { DROPS }, 2); drop(ring); - assert_eq!(unsafe {drops}, 4); + assert_eq!(unsafe { DROPS }, 4); } #[test] fn test_drop_clear() { - static mut drops: i32 = 0; + static mut DROPS: i32 = 0; struct Elem; impl Drop for Elem { fn drop(&mut self) { - unsafe { drops += 1; } + unsafe { + DROPS += 1; + } } } @@ -689,10 +759,10 @@ fn test_drop_clear() { ring.push_back(Elem); ring.push_front(Elem); ring.clear(); - assert_eq!(unsafe {drops}, 4); + assert_eq!(unsafe { DROPS }, 4); drop(ring); - assert_eq!(unsafe {drops}, 4); + assert_eq!(unsafe { DROPS }, 4); } #[test] @@ -780,7 +850,7 @@ fn test_get_mut() { match ring.get_mut(1) { Some(x) => *x = -1, - None => () + None => (), }; assert_eq!(ring.get_mut(0), Some(&mut 0)); @@ -810,13 +880,13 @@ fn test_front() { fn test_as_slices() { let mut ring: VecDeque = VecDeque::with_capacity(127); let cap = ring.capacity() as i32; - let first = cap/2; - let last = cap - first; + let first = cap / 2; + let last = cap - first; for i in 0..first { ring.push_back(i); let (left, right) = ring.as_slices(); - let expected: Vec<_> = (0..i+1).collect(); + let expected: Vec<_> = (0..i + 1).collect(); assert_eq!(left, &expected[..]); assert_eq!(right, []); } @@ -824,7 +894,7 @@ fn test_as_slices() { for j in -last..0 { ring.push_front(j); let (left, right) = ring.as_slices(); - let expected_left: Vec<_> = (-last..j+1).rev().collect(); + let expected_left: Vec<_> = (-last..j + 1).rev().collect(); let expected_right: Vec<_> = (0..first).collect(); assert_eq!(left, &expected_left[..]); assert_eq!(right, &expected_right[..]); @@ -838,13 +908,13 @@ fn test_as_slices() { fn test_as_mut_slices() { let mut ring: VecDeque = VecDeque::with_capacity(127); let cap = ring.capacity() as i32; - let first = cap/2; - let last = cap - first; + let first = cap / 2; + let last = cap - first; for i in 0..first { ring.push_back(i); let (left, right) = ring.as_mut_slices(); - let expected: Vec<_> = (0..i+1).collect(); + let expected: Vec<_> = (0..i + 1).collect(); assert_eq!(left, &expected[..]); assert_eq!(right, []); } @@ -852,7 +922,7 @@ fn test_as_mut_slices() { for j in -last..0 { ring.push_front(j); let (left, right) = ring.as_mut_slices(); - let expected_left: Vec<_> = (-last..j+1).rev().collect(); + let expected_left: Vec<_> = (-last..j + 1).rev().collect(); let expected_right: Vec<_> = (0..first).collect(); assert_eq!(left, &expected_left[..]); assert_eq!(right, &expected_right[..]); @@ -917,3 +987,23 @@ fn test_extend_ref() { assert_eq!(v[4], 5); assert_eq!(v[5], 6); } + +#[test] +fn test_contains() { + let mut v = VecDeque::new(); + v.extend(&[2, 3, 4]); + + assert!(v.contains(&3)); + assert!(!v.contains(&1)); + + v.clear(); + + assert!(!v.contains(&3)); +} + +#[allow(dead_code)] +fn assert_covariance() { + fn drain<'new>(d: Drain<'static, &'static str>) -> Drain<'new, &'new str> { + d + } +} diff --git a/src/libcompiler_builtins/Cargo.toml b/src/libcompiler_builtins/Cargo.toml new file mode 100644 index 0000000000000..79570dc025219 --- /dev/null +++ b/src/libcompiler_builtins/Cargo.toml @@ -0,0 +1,17 @@ +[package] +authors = ["The Rust Project Developers"] +build = "build.rs" +name = "compiler_builtins" +version = "0.0.0" + +[lib] +name = "compiler_builtins" +path = "lib.rs" +test = false +bench = false + +[dependencies] +core = { path = "../libcore" } + +[build-dependencies] +gcc = "0.3.27" diff --git a/src/libcompiler_builtins/build.rs b/src/libcompiler_builtins/build.rs new file mode 100644 index 0000000000000..f61e2281a5c30 --- /dev/null +++ b/src/libcompiler_builtins/build.rs @@ -0,0 +1,411 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Compiles the `compiler-rt` library, or at least the builtins part of it. +//! +//! Note that while compiler-rt has a build system associated with it, we +//! specifically don't use it here. The compiler-rt build system, written in +//! CMake, is actually *very* difficult to work with in terms of getting it to +//! compile on all the relevant platforms we want it to compile on. In the end +//! it became so much pain to work with local patches, work around the oddities +//! of the build system, etc, that we're just building everything by hand now. +//! +//! In general compiler-rt is just a bunch of intrinsics that are in practice +//! *very* stable. We just need to make sure that all the relevant functions and +//! such are compiled somewhere and placed in an object file somewhere. +//! Eventually, these should all be written in Rust! +//! +//! So below you'll find a listing of every single file in the compiler-rt repo +//! that we're compiling. We just reach in and compile with the `gcc` crate +//! which should have all the relevant flags and such already configured. +//! +//! The risk here is that if we update compiler-rt we may need to compile some +//! new intrinsics, but to be honest we surely don't use all of the intrinsics +//! listed below today so the likelihood of us actually needing a new intrinsic +//! is quite low. The failure case is also just that someone reports a link +//! error (if any) and then we just add it to the list. Overall, that cost is +//! far far less than working with compiler-rt's build system over time. + +extern crate gcc; + +use std::collections::BTreeMap; +use std::env; +use std::path::Path; + +struct Sources { + // SYMBOL -> PATH TO SOURCE + map: BTreeMap<&'static str, &'static str>, +} + +impl Sources { + fn new() -> Sources { + Sources { map: BTreeMap::new() } + } + + fn extend(&mut self, sources: &[&'static str]) { + // NOTE Some intrinsics have both a generic implementation (e.g. + // `floatdidf.c`) and an arch optimized implementation + // (`x86_64/floatdidf.c`). In those cases, we keep the arch optimized + // implementation and discard the generic implementation. If we don't + // and keep both implementations, the linker will yell at us about + // duplicate symbols! + for &src in sources { + let symbol = Path::new(src).file_stem().unwrap().to_str().unwrap(); + if src.contains("/") { + // Arch-optimized implementation (preferred) + self.map.insert(symbol, src); + } else { + // Generic implementation + if !self.map.contains_key(symbol) { + self.map.insert(symbol, src); + } + } + } + } +} + +fn main() { + let target = env::var("TARGET").expect("TARGET was not set"); + + // Emscripten's runtime includes all the builtins + if target.contains("emscripten") { + return; + } + + let cfg = &mut gcc::Config::new(); + + if target.contains("msvc") { + // Don't pull in extra libraries on MSVC + cfg.flag("/Zl"); + + // Emulate C99 and C++11's __func__ for MSVC prior to 2013 CTP + cfg.define("__func__", Some("__FUNCTION__")); + } else { + // Turn off various features of gcc and such, mostly copying + // compiler-rt's build system already + cfg.flag("-fno-builtin"); + cfg.flag("-fvisibility=hidden"); + cfg.flag("-fomit-frame-pointer"); + cfg.flag("-ffreestanding"); + cfg.define("VISIBILITY_HIDDEN", None); + } + + let mut sources = Sources::new(); + sources.extend(&["absvdi2.c", + "absvsi2.c", + "adddf3.c", + "addsf3.c", + "addvdi3.c", + "addvsi3.c", + "apple_versioning.c", + "ashldi3.c", + "ashrdi3.c", + "clear_cache.c", + "clzdi2.c", + "clzsi2.c", + "cmpdi2.c", + "comparedf2.c", + "comparesf2.c", + "ctzdi2.c", + "ctzsi2.c", + "divdc3.c", + "divdf3.c", + "divdi3.c", + "divmoddi4.c", + "divmodsi4.c", + "divsc3.c", + "divsf3.c", + "divsi3.c", + "divxc3.c", + "extendsfdf2.c", + "extendhfsf2.c", + "ffsdi2.c", + "fixdfdi.c", + "fixdfsi.c", + "fixsfdi.c", + "fixsfsi.c", + "fixunsdfdi.c", + "fixunsdfsi.c", + "fixunssfdi.c", + "fixunssfsi.c", + "fixunsxfdi.c", + "fixunsxfsi.c", + "fixxfdi.c", + "floatdidf.c", + "floatdisf.c", + "floatdixf.c", + "floatsidf.c", + "floatsisf.c", + "floatundidf.c", + "floatundisf.c", + "floatundixf.c", + "floatunsidf.c", + "floatunsisf.c", + "int_util.c", + "lshrdi3.c", + "moddi3.c", + "modsi3.c", + "muldc3.c", + "muldf3.c", + "muldi3.c", + "mulodi4.c", + "mulosi4.c", + "muloti4.c", + "mulsc3.c", + "mulsf3.c", + "mulvdi3.c", + "mulvsi3.c", + "mulxc3.c", + "negdf2.c", + "negdi2.c", + "negsf2.c", + "negvdi2.c", + "negvsi2.c", + "paritydi2.c", + "paritysi2.c", + "popcountdi2.c", + "popcountsi2.c", + "powidf2.c", + "powisf2.c", + "powixf2.c", + "subdf3.c", + "subsf3.c", + "subvdi3.c", + "subvsi3.c", + "truncdfhf2.c", + "truncdfsf2.c", + "truncsfhf2.c", + "ucmpdi2.c", + "udivdi3.c", + "udivmoddi4.c", + "udivmodsi4.c", + "udivsi3.c", + "umoddi3.c", + "umodsi3.c"]); + + if !target.contains("ios") { + sources.extend(&["absvti2.c", + "addtf3.c", + "addvti3.c", + "ashlti3.c", + "ashrti3.c", + "clzti2.c", + "cmpti2.c", + "ctzti2.c", + "divtf3.c", + "divti3.c", + "ffsti2.c", + "fixdfti.c", + "fixsfti.c", + "fixunsdfti.c", + "fixunssfti.c", + "fixunsxfti.c", + "fixxfti.c", + "floattidf.c", + "floattisf.c", + "floattixf.c", + "floatuntidf.c", + "floatuntisf.c", + "floatuntixf.c", + "lshrti3.c", + "modti3.c", + "multf3.c", + "multi3.c", + "mulvti3.c", + "negti2.c", + "negvti2.c", + "parityti2.c", + "popcountti2.c", + "powitf2.c", + "subtf3.c", + "subvti3.c", + "trampoline_setup.c", + "ucmpti2.c", + "udivmodti4.c", + "udivti3.c", + "umodti3.c"]); + } + + if target.contains("apple") { + sources.extend(&["atomic_flag_clear.c", + "atomic_flag_clear_explicit.c", + "atomic_flag_test_and_set.c", + "atomic_flag_test_and_set_explicit.c", + "atomic_signal_fence.c", + "atomic_thread_fence.c"]); + } + + if !target.contains("windows") { + sources.extend(&["emutls.c"]); + } + + if target.contains("msvc") { + if target.contains("x86_64") { + sources.extend(&["x86_64/floatdidf.c", "x86_64/floatdisf.c", "x86_64/floatdixf.c"]); + } + } else { + if !target.contains("freebsd") { + sources.extend(&["gcc_personality_v0.c"]); + } + + if target.contains("x86_64") { + sources.extend(&["x86_64/chkstk.S", + "x86_64/chkstk2.S", + "x86_64/floatdidf.c", + "x86_64/floatdisf.c", + "x86_64/floatdixf.c", + "x86_64/floatundidf.S", + "x86_64/floatundisf.S", + "x86_64/floatundixf.S"]); + } + + if target.contains("i386") || target.contains("i586") || target.contains("i686") { + sources.extend(&["i386/ashldi3.S", + "i386/ashrdi3.S", + "i386/chkstk.S", + "i386/chkstk2.S", + "i386/divdi3.S", + "i386/floatdidf.S", + "i386/floatdisf.S", + "i386/floatdixf.S", + "i386/floatundidf.S", + "i386/floatundisf.S", + "i386/floatundixf.S", + "i386/lshrdi3.S", + "i386/moddi3.S", + "i386/muldi3.S", + "i386/udivdi3.S", + "i386/umoddi3.S"]); + } + } + + if target.contains("arm") && !target.contains("ios") { + sources.extend(&["arm/aeabi_cdcmp.S", + "arm/aeabi_cdcmpeq_check_nan.c", + "arm/aeabi_cfcmp.S", + "arm/aeabi_cfcmpeq_check_nan.c", + "arm/aeabi_dcmp.S", + "arm/aeabi_div0.c", + "arm/aeabi_drsub.c", + "arm/aeabi_fcmp.S", + "arm/aeabi_frsub.c", + "arm/aeabi_idivmod.S", + "arm/aeabi_ldivmod.S", + "arm/aeabi_memcmp.S", + "arm/aeabi_memcpy.S", + "arm/aeabi_memmove.S", + "arm/aeabi_memset.S", + "arm/aeabi_uidivmod.S", + "arm/aeabi_uldivmod.S", + "arm/bswapdi2.S", + "arm/bswapsi2.S", + "arm/clzdi2.S", + "arm/clzsi2.S", + "arm/comparesf2.S", + "arm/divmodsi4.S", + "arm/divsi3.S", + "arm/modsi3.S", + "arm/switch16.S", + "arm/switch32.S", + "arm/switch8.S", + "arm/switchu8.S", + "arm/sync_synchronize.S", + "arm/udivmodsi4.S", + "arm/udivsi3.S", + "arm/umodsi3.S"]); + } + + if target.contains("armv7") { + sources.extend(&["arm/sync_fetch_and_add_4.S", + "arm/sync_fetch_and_add_8.S", + "arm/sync_fetch_and_and_4.S", + "arm/sync_fetch_and_and_8.S", + "arm/sync_fetch_and_max_4.S", + "arm/sync_fetch_and_max_8.S", + "arm/sync_fetch_and_min_4.S", + "arm/sync_fetch_and_min_8.S", + "arm/sync_fetch_and_nand_4.S", + "arm/sync_fetch_and_nand_8.S", + "arm/sync_fetch_and_or_4.S", + "arm/sync_fetch_and_or_8.S", + "arm/sync_fetch_and_sub_4.S", + "arm/sync_fetch_and_sub_8.S", + "arm/sync_fetch_and_umax_4.S", + "arm/sync_fetch_and_umax_8.S", + "arm/sync_fetch_and_umin_4.S", + "arm/sync_fetch_and_umin_8.S", + "arm/sync_fetch_and_xor_4.S", + "arm/sync_fetch_and_xor_8.S"]); + } + + if target.contains("eabihf") { + sources.extend(&["arm/adddf3vfp.S", + "arm/addsf3vfp.S", + "arm/divdf3vfp.S", + "arm/divsf3vfp.S", + "arm/eqdf2vfp.S", + "arm/eqsf2vfp.S", + "arm/extendsfdf2vfp.S", + "arm/fixdfsivfp.S", + "arm/fixsfsivfp.S", + "arm/fixunsdfsivfp.S", + "arm/fixunssfsivfp.S", + "arm/floatsidfvfp.S", + "arm/floatsisfvfp.S", + "arm/floatunssidfvfp.S", + "arm/floatunssisfvfp.S", + "arm/gedf2vfp.S", + "arm/gesf2vfp.S", + "arm/gtdf2vfp.S", + "arm/gtsf2vfp.S", + "arm/ledf2vfp.S", + "arm/lesf2vfp.S", + "arm/ltdf2vfp.S", + "arm/ltsf2vfp.S", + "arm/muldf3vfp.S", + "arm/mulsf3vfp.S", + "arm/negdf2vfp.S", + "arm/negsf2vfp.S", + "arm/nedf2vfp.S", + "arm/nesf2vfp.S", + "arm/restore_vfp_d8_d15_regs.S", + "arm/save_vfp_d8_d15_regs.S", + "arm/subdf3vfp.S", + "arm/subsf3vfp.S", + "arm/truncdfsf2vfp.S", + "arm/unorddf2vfp.S", + "arm/unordsf2vfp.S"]); + } + + if target.contains("aarch64") { + sources.extend(&["comparetf2.c", + "extenddftf2.c", + "extendsftf2.c", + "fixtfdi.c", + "fixtfsi.c", + "fixtfti.c", + "fixunstfdi.c", + "fixunstfsi.c", + "fixunstfti.c", + "floatditf.c", + "floatsitf.c", + "floatunditf.c", + "floatunsitf.c", + "multc3.c", + "trunctfdf2.c", + "trunctfsf2.c"]); + } + + for src in sources.map.values() { + cfg.file(Path::new("../compiler-rt/lib/builtins").join(src)); + } + + cfg.compile("libcompiler-rt.a"); +} diff --git a/src/libcompiler_builtins/lib.rs b/src/libcompiler_builtins/lib.rs new file mode 100644 index 0000000000000..4a703b3da68f6 --- /dev/null +++ b/src/libcompiler_builtins/lib.rs @@ -0,0 +1,19 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(compiler_builtins)] +#![no_std] +#![compiler_builtins] +#![unstable(feature = "compiler_builtins_lib", + reason = "internal implementation detail of rustc right now", + issue = "0")] +#![crate_name = "compiler_builtins"] +#![crate_type = "rlib"] +#![feature(staged_api)] diff --git a/src/libcore/Cargo.toml b/src/libcore/Cargo.toml new file mode 100644 index 0000000000000..a72c712ad1733 --- /dev/null +++ b/src/libcore/Cargo.toml @@ -0,0 +1,18 @@ +[package] +authors = ["The Rust Project Developers"] +name = "core" +version = "0.0.0" + +[lib] +name = "core" +path = "lib.rs" +test = false +bench = false + +[[test]] +name = "coretest" +path = "../libcoretest/lib.rs" + +[[bench]] +name = "coretest" +path = "../libcoretest/lib.rs" diff --git a/src/libcore/any.rs b/src/libcore/any.rs index cb9bf935cdb58..eb0636e8576be 100644 --- a/src/libcore/any.rs +++ b/src/libcore/any.rs @@ -22,7 +22,7 @@ //! Note that &Any is limited to testing whether a value is of a specified //! concrete type, and cannot be used to test whether a type implements a trait. //! -//! [`Box`]: ../boxed/struct.Box.html +//! [`Box`]: ../../std/boxed/struct.Box.html //! //! # Examples //! @@ -72,12 +72,7 @@ #![stable(feature = "rust1", since = "1.0.0")] use fmt; -use marker::Send; -use mem::transmute; -use option::Option::{self, Some, None}; -use raw::TraitObject; use intrinsics; -use marker::{Reflect, Sized}; /////////////////////////////////////////////////////////////////////////////// // Any trait @@ -85,13 +80,30 @@ use marker::{Reflect, Sized}; /// A type to emulate dynamic typing. /// -/// Every type with no non-`'static` references implements `Any`. +/// Most types implement `Any`. However, any type which contains a non-`'static` reference does not. /// See the [module-level documentation][mod] for more details. /// /// [mod]: index.html #[stable(feature = "rust1", since = "1.0.0")] -pub trait Any: Reflect + 'static { +pub trait Any: 'static { /// Gets the `TypeId` of `self`. + /// + /// # Examples + /// + /// ``` + /// #![feature(get_type_id)] + /// + /// use std::any::{Any, TypeId}; + /// + /// fn is_string(s: &Any) -> bool { + /// TypeId::of::() == s.get_type_id() + /// } + /// + /// fn main() { + /// assert_eq!(is_string(&0), false); + /// assert_eq!(is_string(&"cookie monster".to_owned()), true); + /// } + /// ``` #[unstable(feature = "get_type_id", reason = "this method will likely be replaced by an associated static", issue = "27745")] @@ -99,7 +111,7 @@ pub trait Any: Reflect + 'static { } #[stable(feature = "rust1", since = "1.0.0")] -impl Any for T { +impl Any for T { fn get_type_id(&self) -> TypeId { TypeId::of::() } } @@ -125,7 +137,26 @@ impl fmt::Debug for Any + Send { } impl Any { - /// Returns true if the boxed type is the same as `T` + /// Returns true if the boxed type is the same as `T`. + /// + /// # Examples + /// + /// ``` + /// use std::any::Any; + /// + /// fn is_string(s: &Any) { + /// if s.is::() { + /// println!("It's a string!"); + /// } else { + /// println!("Not a string..."); + /// } + /// } + /// + /// fn main() { + /// is_string(&0); + /// is_string(&"cookie monster".to_owned()); + /// } + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is(&self) -> bool { @@ -141,16 +172,31 @@ impl Any { /// Returns some reference to the boxed value if it is of type `T`, or /// `None` if it isn't. + /// + /// # Examples + /// + /// ``` + /// use std::any::Any; + /// + /// fn print_if_string(s: &Any) { + /// if let Some(string) = s.downcast_ref::() { + /// println!("It's a string({}): '{}'", string.len(), string); + /// } else { + /// println!("Not a string..."); + /// } + /// } + /// + /// fn main() { + /// print_if_string(&0); + /// print_if_string(&"cookie monster".to_owned()); + /// } + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn downcast_ref(&self) -> Option<&T> { if self.is::() { unsafe { - // Get the raw representation of the trait object - let to: TraitObject = transmute(self); - - // Extract the data pointer - Some(&*(to.data as *const T)) + Some(&*(self as *const Any as *const T)) } } else { None @@ -159,16 +205,35 @@ impl Any { /// Returns some mutable reference to the boxed value if it is of type `T`, or /// `None` if it isn't. + /// + /// # Examples + /// + /// ``` + /// use std::any::Any; + /// + /// fn modify_if_u32(s: &mut Any) { + /// if let Some(num) = s.downcast_mut::() { + /// *num = 42; + /// } + /// } + /// + /// fn main() { + /// let mut x = 10u32; + /// let mut s = "starlord".to_owned(); + /// + /// modify_if_u32(&mut x); + /// modify_if_u32(&mut s); + /// + /// assert_eq!(x, 42); + /// assert_eq!(&s, "starlord"); + /// } + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn downcast_mut(&mut self) -> Option<&mut T> { if self.is::() { unsafe { - // Get the raw representation of the trait object - let to: TraitObject = transmute(self); - - // Extract the data pointer - Some(&mut *(to.data as *const T as *mut T)) + Some(&mut *(self as *mut Any as *mut T)) } } else { None @@ -178,6 +243,25 @@ impl Any { impl Any+Send { /// Forwards to the method defined on the type `Any`. + /// + /// # Examples + /// + /// ``` + /// use std::any::Any; + /// + /// fn is_string(s: &(Any + Send)) { + /// if s.is::() { + /// println!("It's a string!"); + /// } else { + /// println!("Not a string..."); + /// } + /// } + /// + /// fn main() { + /// is_string(&0); + /// is_string(&"cookie monster".to_owned()); + /// } + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is(&self) -> bool { @@ -185,6 +269,25 @@ impl Any+Send { } /// Forwards to the method defined on the type `Any`. + /// + /// # Examples + /// + /// ``` + /// use std::any::Any; + /// + /// fn print_if_string(s: &(Any + Send)) { + /// if let Some(string) = s.downcast_ref::() { + /// println!("It's a string({}): '{}'", string.len(), string); + /// } else { + /// println!("Not a string..."); + /// } + /// } + /// + /// fn main() { + /// print_if_string(&0); + /// print_if_string(&"cookie monster".to_owned()); + /// } + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn downcast_ref(&self) -> Option<&T> { @@ -192,6 +295,29 @@ impl Any+Send { } /// Forwards to the method defined on the type `Any`. + /// + /// # Examples + /// + /// ``` + /// use std::any::Any; + /// + /// fn modify_if_u32(s: &mut (Any+ Send)) { + /// if let Some(num) = s.downcast_mut::() { + /// *num = 42; + /// } + /// } + /// + /// fn main() { + /// let mut x = 10u32; + /// let mut s = "starlord".to_owned(); + /// + /// modify_if_u32(&mut x); + /// modify_if_u32(&mut s); + /// + /// assert_eq!(x, 42); + /// assert_eq!(&s, "starlord"); + /// } + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn downcast_mut(&mut self) -> Option<&mut T> { @@ -220,9 +346,24 @@ pub struct TypeId { impl TypeId { /// Returns the `TypeId` of the type this generic function has been - /// instantiated with + /// instantiated with. + /// + /// # Examples + /// + /// ``` + /// use std::any::{Any, TypeId}; + /// + /// fn is_string(_s: &T) -> bool { + /// TypeId::of::() == TypeId::of::() + /// } + /// + /// fn main() { + /// assert_eq!(is_string(&0), false); + /// assert_eq!(is_string(&"cookie monster".to_owned()), true); + /// } + /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn of() -> TypeId { + pub fn of() -> TypeId { TypeId { t: unsafe { intrinsics::type_id::() }, } diff --git a/src/libcore/array.rs b/src/libcore/array.rs index 0c5eada21655c..37bd57034a7b6 100644 --- a/src/libcore/array.rs +++ b/src/libcore/array.rs @@ -12,7 +12,7 @@ //! up to a certain length. Eventually we should able to generalize //! to all lengths. //! -//! *[See also the array primitive type](../primitive.array.html).* +//! *[See also the array primitive type](../../std/primitive.array.html).* #![unstable(feature = "fixed_size_array", reason = "traits and impls are better expressed through generic \ @@ -20,16 +20,11 @@ issue = "27778")] use borrow::{Borrow, BorrowMut}; -use clone::Clone; -use cmp::{PartialEq, Eq, PartialOrd, Ord, Ordering}; -use convert::{AsRef, AsMut}; -use default::Default; +use cmp::Ordering; use fmt; use hash::{Hash, self}; -use iter::IntoIterator; -use marker::{Copy, Sized, Unsize}; -use option::Option; -use slice::{Iter, IterMut, SliceExt}; +use marker::Unsize; +use slice::{Iter, IterMut}; /// Utility trait implemented only on arrays of fixed size /// @@ -98,6 +93,7 @@ macro_rules! __impl_slice_eq2 { macro_rules! array_impls { ($($N:expr)+) => { $( + #[stable(feature = "rust1", since = "1.0.0")] impl AsRef<[T]> for [T; $N] { #[inline] fn as_ref(&self) -> &[T] { @@ -105,6 +101,7 @@ macro_rules! array_impls { } } + #[stable(feature = "rust1", since = "1.0.0")] impl AsMut<[T]> for [T; $N] { #[inline] fn as_mut(&mut self) -> &mut [T] { diff --git a/src/libcore/borrow.rs b/src/libcore/borrow.rs index 79330d3a61ea7..3d223465c88a0 100644 --- a/src/libcore/borrow.rs +++ b/src/libcore/borrow.rs @@ -12,8 +12,6 @@ #![stable(feature = "rust1", since = "1.0.0")] -use marker::Sized; - /// A trait for borrowing data. /// /// In general, there may be several ways to "borrow" a piece of data. The diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs index 789b75836d0a7..64a7a8c5ef785 100644 --- a/src/libcore/cell.rs +++ b/src/libcore/cell.rs @@ -119,38 +119,64 @@ //! `Cell`. //! //! ``` +//! #![feature(core_intrinsics)] +//! #![feature(shared)] //! use std::cell::Cell; +//! use std::ptr::Shared; +//! use std::intrinsics::abort; +//! use std::intrinsics::assume; //! -//! struct Rc { -//! ptr: *mut RcBox +//! struct Rc { +//! ptr: Shared> //! } //! -//! struct RcBox { -//! # #[allow(dead_code)] +//! struct RcBox { +//! strong: Cell, +//! refcount: Cell, //! value: T, -//! refcount: Cell //! } //! -//! impl Clone for Rc { +//! impl Clone for Rc { //! fn clone(&self) -> Rc { -//! unsafe { -//! (*self.ptr).refcount.set((*self.ptr).refcount.get() + 1); -//! Rc { ptr: self.ptr } -//! } +//! self.inc_strong(); +//! Rc { ptr: self.ptr } +//! } +//! } +//! +//! trait RcBoxPtr { +//! +//! fn inner(&self) -> &RcBox; +//! +//! fn strong(&self) -> usize { +//! self.inner().strong.get() +//! } +//! +//! fn inc_strong(&self) { +//! self.inner() +//! .strong +//! .set(self.strong() +//! .checked_add(1) +//! .unwrap_or_else(|| unsafe { abort() })); //! } //! } +//! +//! impl RcBoxPtr for Rc { +//! fn inner(&self) -> &RcBox { +//! unsafe { +//! assume(!(*(&self.ptr as *const _ as *const *const ())).is_null()); +//! &(**self.ptr) +//! } +//! } +//! } //! ``` //! #![stable(feature = "rust1", since = "1.0.0")] -use clone::Clone; -use cmp::{PartialEq, Eq}; -use default::Default; -use marker::{Copy, Send, Sync, Sized}; -use ops::{Deref, DerefMut, Drop, FnOnce}; -use option::Option; -use option::Option::{None, Some}; +use cmp::Ordering; +use fmt::{self, Debug, Display}; +use marker::Unsize; +use ops::{Deref, DerefMut, CoerceUnsized}; /// A mutable memory location that admits only `Copy` data. /// @@ -216,10 +242,6 @@ impl Cell { /// Returns a reference to the underlying `UnsafeCell`. /// - /// # Safety - /// - /// This function is `unsafe` because `UnsafeCell`'s field is public. - /// /// # Examples /// /// ``` @@ -229,18 +251,62 @@ impl Cell { /// /// let c = Cell::new(5); /// - /// let uc = unsafe { c.as_unsafe_cell() }; + /// let uc = c.as_unsafe_cell(); /// ``` #[inline] #[unstable(feature = "as_unsafe_cell", issue = "27708")] - pub unsafe fn as_unsafe_cell(&self) -> &UnsafeCell { + #[rustc_deprecated(since = "1.12.0", reason = "renamed to as_ptr")] + pub fn as_unsafe_cell(&self) -> &UnsafeCell { &self.value } + + /// Returns a raw pointer to the underlying data in this cell. + /// + /// # Examples + /// + /// ``` + /// use std::cell::Cell; + /// + /// let c = Cell::new(5); + /// + /// let ptr = c.as_ptr(); + /// ``` + #[inline] + #[stable(feature = "cell_as_ptr", since = "1.12.0")] + pub fn as_ptr(&self) -> *mut T { + self.value.get() + } + + /// Returns a mutable reference to the underlying data. + /// + /// This call borrows `Cell` mutably (at compile-time) which guarantees + /// that we possess the only reference. + /// + /// # Examples + /// + /// ``` + /// use std::cell::Cell; + /// + /// let mut c = Cell::new(5); + /// *c.get_mut() += 1; + /// + /// assert_eq!(c.get(), 6); + /// ``` + #[inline] + #[stable(feature = "cell_get_mut", since = "1.11.0")] + pub fn get_mut(&mut self) -> &mut T { + unsafe { + &mut *self.value.get() + } + } } #[stable(feature = "rust1", since = "1.0.0")] unsafe impl Send for Cell where T: Send {} +#[stable(feature = "rust1", since = "1.0.0")] +impl !Sync for Cell {} + #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Cell { #[inline] @@ -251,6 +317,7 @@ impl Clone for Cell { #[stable(feature = "rust1", since = "1.0.0")] impl Default for Cell { + /// Creates a `Cell`, with the `Default` value for T. #[inline] fn default() -> Cell { Cell::new(Default::default()) @@ -268,6 +335,52 @@ impl PartialEq for Cell { #[stable(feature = "cell_eq", since = "1.2.0")] impl Eq for Cell {} +#[stable(feature = "cell_ord", since = "1.10.0")] +impl PartialOrd for Cell { + #[inline] + fn partial_cmp(&self, other: &Cell) -> Option { + self.get().partial_cmp(&other.get()) + } + + #[inline] + fn lt(&self, other: &Cell) -> bool { + self.get() < other.get() + } + + #[inline] + fn le(&self, other: &Cell) -> bool { + self.get() <= other.get() + } + + #[inline] + fn gt(&self, other: &Cell) -> bool { + self.get() > other.get() + } + + #[inline] + fn ge(&self, other: &Cell) -> bool { + self.get() >= other.get() + } +} + +#[stable(feature = "cell_ord", since = "1.10.0")] +impl Ord for Cell { + #[inline] + fn cmp(&self, other: &Cell) -> Ordering { + self.get().cmp(&other.get()) + } +} + +#[stable(feature = "cell_from", since = "1.12.0")] +impl From for Cell { + fn from(t: T) -> Cell { + Cell::new(t) + } +} + +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl, U> CoerceUnsized> for Cell {} + /// A mutable memory location with dynamically checked borrow rules /// /// See the [module-level documentation](index.html) for more. @@ -289,6 +402,46 @@ pub enum BorrowState { Unused, } +/// An error returned by [`RefCell::try_borrow`](struct.RefCell.html#method.try_borrow). +#[stable(feature = "try_borrow", since = "1.13.0")] +pub struct BorrowError { + _private: (), +} + +#[stable(feature = "try_borrow", since = "1.13.0")] +impl Debug for BorrowError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("BorrowError").finish() + } +} + +#[stable(feature = "try_borrow", since = "1.13.0")] +impl Display for BorrowError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Display::fmt("already mutably borrowed", f) + } +} + +/// An error returned by [`RefCell::try_borrow_mut`](struct.RefCell.html#method.try_borrow_mut). +#[stable(feature = "try_borrow", since = "1.13.0")] +pub struct BorrowMutError { + _private: (), +} + +#[stable(feature = "try_borrow", since = "1.13.0")] +impl Debug for BorrowMutError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("BorrowMutError").finish() + } +} + +#[stable(feature = "try_borrow", since = "1.13.0")] +impl Display for BorrowMutError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + Display::fmt("already borrowed", f) + } +} + // Values [1, MAX-1] represent the number of `Ref` active // (will not outgrow its range since `usize` is the size of the address space) type BorrowFlag = usize; @@ -341,6 +494,22 @@ impl RefCell { /// /// The returned value can be dispatched on to determine if a call to /// `borrow` or `borrow_mut` would succeed. + /// + /// # Examples + /// + /// ``` + /// #![feature(borrow_state)] + /// + /// use std::cell::{BorrowState, RefCell}; + /// + /// let c = RefCell::new(5); + /// + /// match c.borrow_state() { + /// BorrowState::Writing => println!("Cannot be borrowed"), + /// BorrowState::Reading => println!("Cannot be borrowed mutably"), + /// BorrowState::Unused => println!("Can be borrowed (mutably as well)"), + /// } + /// ``` #[unstable(feature = "borrow_state", issue = "27733")] #[inline] pub fn borrow_state(&self) -> BorrowState { @@ -358,7 +527,8 @@ impl RefCell { /// /// # Panics /// - /// Panics if the value is currently mutably borrowed. + /// Panics if the value is currently mutably borrowed. For a non-panicking variant, use + /// [`try_borrow`](#method.try_borrow). /// /// # Examples /// @@ -389,12 +559,43 @@ impl RefCell { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn borrow(&self) -> Ref { + self.try_borrow().expect("already mutably borrowed") + } + + /// Immutably borrows the wrapped value, returning an error if the value is currently mutably + /// borrowed. + /// + /// The borrow lasts until the returned `Ref` exits scope. Multiple immutable borrows can be + /// taken out at the same time. + /// + /// This is the non-panicking variant of [`borrow`](#method.borrow). + /// + /// # Examples + /// + /// ``` + /// use std::cell::RefCell; + /// + /// let c = RefCell::new(5); + /// + /// { + /// let m = c.borrow_mut(); + /// assert!(c.try_borrow().is_err()); + /// } + /// + /// { + /// let m = c.borrow(); + /// assert!(c.try_borrow().is_ok()); + /// } + /// ``` + #[stable(feature = "try_borrow", since = "1.13.0")] + #[inline] + pub fn try_borrow(&self) -> Result, BorrowError> { match BorrowRef::new(&self.borrow) { - Some(b) => Ref { - _value: unsafe { &*self.value.get() }, - _borrow: b, - }, - None => panic!("RefCell already mutably borrowed"), + Some(b) => Ok(Ref { + value: unsafe { &*self.value.get() }, + borrow: b, + }), + None => Err(BorrowError { _private: () }), } } @@ -405,7 +606,8 @@ impl RefCell { /// /// # Panics /// - /// Panics if the value is currently borrowed. + /// Panics if the value is currently borrowed. For a non-panicking variant, use + /// [`try_borrow_mut`](#method.try_borrow_mut). /// /// # Examples /// @@ -414,7 +616,9 @@ impl RefCell { /// /// let c = RefCell::new(5); /// - /// let borrowed_five = c.borrow_mut(); + /// *c.borrow_mut() = 7; + /// + /// assert_eq!(*c.borrow(), 7); /// ``` /// /// An example of panic: @@ -435,12 +639,39 @@ impl RefCell { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn borrow_mut(&self) -> RefMut { + self.try_borrow_mut().expect("already borrowed") + } + + /// Mutably borrows the wrapped value, returning an error if the value is currently borrowed. + /// + /// The borrow lasts until the returned `RefMut` exits scope. The value cannot be borrowed + /// while this borrow is active. + /// + /// This is the non-panicking variant of [`borrow_mut`](#method.borrow_mut). + /// + /// # Examples + /// + /// ``` + /// use std::cell::RefCell; + /// + /// let c = RefCell::new(5); + /// + /// { + /// let m = c.borrow(); + /// assert!(c.try_borrow_mut().is_err()); + /// } + /// + /// assert!(c.try_borrow_mut().is_ok()); + /// ``` + #[stable(feature = "try_borrow", since = "1.13.0")] + #[inline] + pub fn try_borrow_mut(&self) -> Result, BorrowMutError> { match BorrowRefMut::new(&self.borrow) { - Some(b) => RefMut { - _value: unsafe { &mut *self.value.get() }, - _borrow: b, - }, - None => panic!("RefCell already borrowed"), + Some(b) => Ok(RefMut { + value: unsafe { &mut *self.value.get() }, + borrow: b, + }), + None => Err(BorrowMutError { _private: () }), } } @@ -449,16 +680,71 @@ impl RefCell { /// This can be used to circumvent `RefCell`'s safety checks. /// /// This function is `unsafe` because `UnsafeCell`'s field is public. + /// + /// # Examples + /// + /// ``` + /// #![feature(as_unsafe_cell)] + /// + /// use std::cell::RefCell; + /// + /// let c = RefCell::new(5); + /// let c = unsafe { c.as_unsafe_cell() }; + /// ``` #[inline] #[unstable(feature = "as_unsafe_cell", issue = "27708")] + #[rustc_deprecated(since = "1.12.0", reason = "renamed to as_ptr")] pub unsafe fn as_unsafe_cell(&self) -> &UnsafeCell { &self.value } + + /// Returns a raw pointer to the underlying data in this cell. + /// + /// # Examples + /// + /// ``` + /// use std::cell::RefCell; + /// + /// let c = RefCell::new(5); + /// + /// let ptr = c.as_ptr(); + /// ``` + #[inline] + #[stable(feature = "cell_as_ptr", since = "1.12.0")] + pub fn as_ptr(&self) -> *mut T { + self.value.get() + } + + /// Returns a mutable reference to the underlying data. + /// + /// This call borrows `RefCell` mutably (at compile-time) so there is no + /// need for dynamic checks. + /// + /// # Examples + /// + /// ``` + /// use std::cell::RefCell; + /// + /// let mut c = RefCell::new(5); + /// *c.get_mut() += 1; + /// + /// assert_eq!(c, RefCell::new(6)); + /// ``` + #[inline] + #[stable(feature = "cell_get_mut", since = "1.11.0")] + pub fn get_mut(&mut self) -> &mut T { + unsafe { + &mut *self.value.get() + } + } } #[stable(feature = "rust1", since = "1.0.0")] unsafe impl Send for RefCell where T: Send {} +#[stable(feature = "rust1", since = "1.0.0")] +impl !Sync for RefCell {} + #[stable(feature = "rust1", since = "1.0.0")] impl Clone for RefCell { #[inline] @@ -469,6 +755,7 @@ impl Clone for RefCell { #[stable(feature = "rust1", since = "1.0.0")] impl Default for RefCell { + /// Creates a `RefCell`, with the `Default` value for T. #[inline] fn default() -> RefCell { RefCell::new(Default::default()) @@ -486,8 +773,54 @@ impl PartialEq for RefCell { #[stable(feature = "cell_eq", since = "1.2.0")] impl Eq for RefCell {} +#[stable(feature = "cell_ord", since = "1.10.0")] +impl PartialOrd for RefCell { + #[inline] + fn partial_cmp(&self, other: &RefCell) -> Option { + self.borrow().partial_cmp(&*other.borrow()) + } + + #[inline] + fn lt(&self, other: &RefCell) -> bool { + *self.borrow() < *other.borrow() + } + + #[inline] + fn le(&self, other: &RefCell) -> bool { + *self.borrow() <= *other.borrow() + } + + #[inline] + fn gt(&self, other: &RefCell) -> bool { + *self.borrow() > *other.borrow() + } + + #[inline] + fn ge(&self, other: &RefCell) -> bool { + *self.borrow() >= *other.borrow() + } +} + +#[stable(feature = "cell_ord", since = "1.10.0")] +impl Ord for RefCell { + #[inline] + fn cmp(&self, other: &RefCell) -> Ordering { + self.borrow().cmp(&*other.borrow()) + } +} + +#[stable(feature = "cell_from", since = "1.12.0")] +impl From for RefCell { + fn from(t: T) -> RefCell { + RefCell::new(t) + } +} + +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl, U> CoerceUnsized> for RefCell {} + struct BorrowRef<'b> { - _borrow: &'b Cell, + borrow: &'b Cell, } impl<'b> BorrowRef<'b> { @@ -497,7 +830,7 @@ impl<'b> BorrowRef<'b> { WRITING => None, b => { borrow.set(b + 1); - Some(BorrowRef { _borrow: borrow }) + Some(BorrowRef { borrow: borrow }) }, } } @@ -506,9 +839,9 @@ impl<'b> BorrowRef<'b> { impl<'b> Drop for BorrowRef<'b> { #[inline] fn drop(&mut self) { - let borrow = self._borrow.get(); + let borrow = self.borrow.get(); debug_assert!(borrow != WRITING && borrow != UNUSED); - self._borrow.set(borrow - 1); + self.borrow.set(borrow - 1); } } @@ -517,10 +850,12 @@ impl<'b> Clone for BorrowRef<'b> { fn clone(&self) -> BorrowRef<'b> { // Since this Ref exists, we know the borrow flag // is not set to WRITING. - let borrow = self._borrow.get(); - debug_assert!(borrow != WRITING && borrow != UNUSED); - self._borrow.set(borrow + 1); - BorrowRef { _borrow: self._borrow } + let borrow = self.borrow.get(); + debug_assert!(borrow != UNUSED); + // Prevent the borrow counter from overflowing. + assert!(borrow != WRITING); + self.borrow.set(borrow + 1); + BorrowRef { borrow: self.borrow } } } @@ -530,10 +865,8 @@ impl<'b> Clone for BorrowRef<'b> { /// See the [module-level documentation](index.html) for more. #[stable(feature = "rust1", since = "1.0.0")] pub struct Ref<'b, T: ?Sized + 'b> { - // FIXME #12808: strange name to try to avoid interfering with - // field accesses of the contained type via Deref - _value: &'b T, - _borrow: BorrowRef<'b>, + value: &'b T, + borrow: BorrowRef<'b>, } #[stable(feature = "rust1", since = "1.0.0")] @@ -542,7 +875,7 @@ impl<'b, T: ?Sized> Deref for Ref<'b, T> { #[inline] fn deref(&self) -> &T { - self._value + self.value } } @@ -561,8 +894,8 @@ impl<'b, T: ?Sized> Ref<'b, T> { #[inline] pub fn clone(orig: &Ref<'b, T>) -> Ref<'b, T> { Ref { - _value: orig._value, - _borrow: orig._borrow.clone(), + value: orig.value, + borrow: orig.borrow.clone(), } } @@ -577,8 +910,6 @@ impl<'b, T: ?Sized> Ref<'b, T> { /// # Example /// /// ``` - /// #![feature(cell_extras)] - /// /// use std::cell::{RefCell, Ref}; /// /// let c = RefCell::new((5, 'b')); @@ -586,51 +917,21 @@ impl<'b, T: ?Sized> Ref<'b, T> { /// let b2: Ref = Ref::map(b1, |t| &t.0); /// assert_eq!(*b2, 5) /// ``` - #[unstable(feature = "cell_extras", reason = "recently added", - issue = "27746")] + #[stable(feature = "cell_map", since = "1.8.0")] #[inline] pub fn map(orig: Ref<'b, T>, f: F) -> Ref<'b, U> where F: FnOnce(&T) -> &U { Ref { - _value: f(orig._value), - _borrow: orig._borrow, + value: f(orig.value), + borrow: orig.borrow, } } - - /// Make a new `Ref` for an optional component of the borrowed data, e.g. an - /// enum variant. - /// - /// The `RefCell` is already immutably borrowed, so this cannot fail. - /// - /// This is an associated function that needs to be used as - /// `Ref::filter_map(...)`. A method would interfere with methods of the - /// same name on the contents of a `RefCell` used through `Deref`. - /// - /// # Example - /// - /// ``` - /// # #![feature(cell_extras)] - /// use std::cell::{RefCell, Ref}; - /// - /// let c = RefCell::new(Ok(5)); - /// let b1: Ref> = c.borrow(); - /// let b2: Ref = Ref::filter_map(b1, |o| o.as_ref().ok()).unwrap(); - /// assert_eq!(*b2, 5) - /// ``` - #[unstable(feature = "cell_extras", reason = "recently added", - issue = "27746")] - #[inline] - pub fn filter_map(orig: Ref<'b, T>, f: F) -> Option> - where F: FnOnce(&T) -> Option<&U> - { - f(orig._value).map(move |new| Ref { - _value: new, - _borrow: orig._borrow, - }) - } } +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl<'b, T: ?Sized + Unsize, U: ?Sized> CoerceUnsized> for Ref<'b, T> {} + impl<'b, T: ?Sized> RefMut<'b, T> { /// Make a new `RefMut` for a component of the borrowed data, e.g. an enum /// variant. @@ -644,7 +945,6 @@ impl<'b, T: ?Sized> RefMut<'b, T> { /// # Example /// /// ``` - /// # #![feature(cell_extras)] /// use std::cell::{RefCell, RefMut}; /// /// let c = RefCell::new((5, 'b')); @@ -656,68 +956,28 @@ impl<'b, T: ?Sized> RefMut<'b, T> { /// } /// assert_eq!(*c.borrow(), (42, 'b')); /// ``` - #[unstable(feature = "cell_extras", reason = "recently added", - issue = "27746")] + #[stable(feature = "cell_map", since = "1.8.0")] #[inline] pub fn map(orig: RefMut<'b, T>, f: F) -> RefMut<'b, U> where F: FnOnce(&mut T) -> &mut U { RefMut { - _value: f(orig._value), - _borrow: orig._borrow, + value: f(orig.value), + borrow: orig.borrow, } } - - /// Make a new `RefMut` for an optional component of the borrowed data, e.g. - /// an enum variant. - /// - /// The `RefCell` is already mutably borrowed, so this cannot fail. - /// - /// This is an associated function that needs to be used as - /// `RefMut::filter_map(...)`. A method would interfere with methods of the - /// same name on the contents of a `RefCell` used through `Deref`. - /// - /// # Example - /// - /// ``` - /// # #![feature(cell_extras)] - /// use std::cell::{RefCell, RefMut}; - /// - /// let c = RefCell::new(Ok(5)); - /// { - /// let b1: RefMut> = c.borrow_mut(); - /// let mut b2: RefMut = RefMut::filter_map(b1, |o| { - /// o.as_mut().ok() - /// }).unwrap(); - /// assert_eq!(*b2, 5); - /// *b2 = 42; - /// } - /// assert_eq!(*c.borrow(), Ok(42)); - /// ``` - #[unstable(feature = "cell_extras", reason = "recently added", - issue = "27746")] - #[inline] - pub fn filter_map(orig: RefMut<'b, T>, f: F) -> Option> - where F: FnOnce(&mut T) -> Option<&mut U> - { - let RefMut { _value, _borrow } = orig; - f(_value).map(move |new| RefMut { - _value: new, - _borrow: _borrow, - }) - } } struct BorrowRefMut<'b> { - _borrow: &'b Cell, + borrow: &'b Cell, } impl<'b> Drop for BorrowRefMut<'b> { #[inline] fn drop(&mut self) { - let borrow = self._borrow.get(); + let borrow = self.borrow.get(); debug_assert!(borrow == WRITING); - self._borrow.set(UNUSED); + self.borrow.set(UNUSED); } } @@ -727,7 +987,7 @@ impl<'b> BorrowRefMut<'b> { match borrow.get() { UNUSED => { borrow.set(WRITING); - Some(BorrowRefMut { _borrow: borrow }) + Some(BorrowRefMut { borrow: borrow }) }, _ => None, } @@ -739,10 +999,8 @@ impl<'b> BorrowRefMut<'b> { /// See the [module-level documentation](index.html) for more. #[stable(feature = "rust1", since = "1.0.0")] pub struct RefMut<'b, T: ?Sized + 'b> { - // FIXME #12808: strange name to try to avoid interfering with - // field accesses of the contained type via Deref - _value: &'b mut T, - _borrow: BorrowRefMut<'b>, + value: &'b mut T, + borrow: BorrowRefMut<'b>, } #[stable(feature = "rust1", since = "1.0.0")] @@ -751,7 +1009,7 @@ impl<'b, T: ?Sized> Deref for RefMut<'b, T> { #[inline] fn deref(&self) -> &T { - self._value + self.value } } @@ -759,10 +1017,13 @@ impl<'b, T: ?Sized> Deref for RefMut<'b, T> { impl<'b, T: ?Sized> DerefMut for RefMut<'b, T> { #[inline] fn deref_mut(&mut self) -> &mut T { - self._value + self.value } } +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl<'b, T: ?Sized + Unsize, U: ?Sized> CoerceUnsized> for RefMut<'b, T> {} + /// The core primitive for interior mutability in Rust. /// /// `UnsafeCell` is a type that wraps some `T` and indicates unsafe interior operations on the @@ -770,6 +1031,20 @@ impl<'b, T: ?Sized> DerefMut for RefMut<'b, T> { /// The `UnsafeCell` type is the only legal way to obtain aliasable data that is considered /// mutable. In general, transmuting an `&T` type into an `&mut T` is considered undefined behavior. /// +/// The compiler makes optimizations based on the knowledge that `&T` is not mutably aliased or +/// mutated, and that `&mut T` is unique. When building abstractions like `Cell`, `RefCell`, +/// `Mutex`, etc, you need to turn these optimizations off. `UnsafeCell` is the only legal way +/// to do this. When `UnsafeCell` is immutably aliased, it is still safe to obtain a mutable +/// reference to its interior and/or to mutate it. However, it is up to the abstraction designer +/// to ensure that no two mutable references obtained this way are active at the same time, and +/// that there are no active mutable references or mutations when an immutable reference is obtained +/// from the cell. This is often done via runtime checks. +/// +/// Note that while mutating or mutably aliasing the contents of an `& UnsafeCell` is +/// okay (provided you enforce the invariants some other way); it is still undefined behavior +/// to have multiple `&mut UnsafeCell` aliases. +/// +/// /// Types like `Cell` and `RefCell` use this type to wrap their internal data. /// /// # Examples @@ -839,6 +1114,11 @@ impl UnsafeCell { impl UnsafeCell { /// Gets a mutable pointer to the wrapped value. /// + /// This can be cast to a pointer of any kind. + /// Ensure that the access is unique when casting to + /// `&mut T`, and ensure that there are no mutations or mutable + /// aliases going on when casting to `&T` + /// /// # Examples /// /// ``` @@ -854,3 +1134,28 @@ impl UnsafeCell { &self.value as *const T as *mut T } } + +#[stable(feature = "unsafe_cell_default", since = "1.9.0")] +impl Default for UnsafeCell { + /// Creates an `UnsafeCell`, with the `Default` value for T. + fn default() -> UnsafeCell { + UnsafeCell::new(Default::default()) + } +} + +#[stable(feature = "cell_from", since = "1.12.0")] +impl From for UnsafeCell { + fn from(t: T) -> UnsafeCell { + UnsafeCell::new(t) + } +} + +#[unstable(feature = "coerce_unsized", issue = "27732")] +impl, U> CoerceUnsized> for UnsafeCell {} + +#[allow(unused)] +fn assert_coerce_unsized(a: UnsafeCell<&i32>, b: Cell<&i32>, c: RefCell<&i32>) { + let _: UnsafeCell<&Send> = a; + let _: Cell<&Send> = b; + let _: RefCell<&Send> = c; +} diff --git a/src/libcore/char.rs b/src/libcore/char.rs index 0c3807d8ca0b5..966481e7b32bc 100644 --- a/src/libcore/char.rs +++ b/src/libcore/char.rs @@ -15,11 +15,12 @@ #![allow(non_snake_case)] #![stable(feature = "core_char", since = "1.2.0")] -use iter::Iterator; +use char_private::is_printable; +use convert::TryFrom; +use fmt; +use slice; +use iter::FusedIterator; use mem::transmute; -use option::Option::{None, Some}; -use option::Option; -use slice::SliceExt; // UTF-8 ranges and tags for encoding characters const TAG_CONT: u8 = 0b1000_0000; @@ -69,7 +70,7 @@ const MAX_THREE_B: u32 = 0x10000; /// Point], but only ones within a certain range. `MAX` is the highest valid /// code point that's a valid [Unicode Scalar Value]. /// -/// [`char`]: ../primitive.char.html +/// [`char`]: ../../std/primitive.char.html /// [Unicode Scalar Value]: http://www.unicode.org/glossary/#unicode_scalar_value /// [Code Point]: http://www.unicode.org/glossary/#code_point #[stable(feature = "rust1", since = "1.0.0")] @@ -91,8 +92,8 @@ pub const MAX: char = '\u{10ffff}'; /// [`char`]s. `from_u32()` will return `None` if the input is not a valid value /// for a [`char`]. /// -/// [`char`]: ../primitive.char.html -/// [`u32`]: ../primitive.u32.html +/// [`char`]: ../../std/primitive.char.html +/// [`u32`]: ../../std/primitive.u32.html /// [`as`]: ../../book/casting-between-types.html#as /// /// For an unsafe version of this function which ignores these checks, see @@ -124,12 +125,7 @@ pub const MAX: char = '\u{10ffff}'; #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn from_u32(i: u32) -> Option { - // catch out-of-bounds and surrogates - if (i > MAX as u32) || (i >= 0xD800 && i <= 0xDFFF) { - None - } else { - Some(unsafe { from_u32_unchecked(i) }) - } + char::try_from(i).ok() } /// Converts a `u32` to a `char`, ignoring validity. @@ -148,8 +144,8 @@ pub fn from_u32(i: u32) -> Option { /// [`char`]s. `from_u32_unchecked()` will ignore this, and blindly cast to /// [`char`], possibly creating an invalid one. /// -/// [`char`]: ../primitive.char.html -/// [`u32`]: ../primitive.u32.html +/// [`char`]: ../../std/primitive.char.html +/// [`u32`]: ../../std/primitive.u32.html /// [`as`]: ../../book/casting-between-types.html#as /// /// # Safety @@ -177,12 +173,72 @@ pub unsafe fn from_u32_unchecked(i: u32) -> char { transmute(i) } +#[stable(feature = "char_convert", since = "1.13.0")] +impl From for u32 { + #[inline] + fn from(c: char) -> Self { + c as u32 + } +} + +/// Maps a byte in 0x00...0xFF to a `char` whose code point has the same value, in U+0000 to U+00FF. +/// +/// Unicode is designed such that this effectively decodes bytes +/// with the character encoding that IANA calls ISO-8859-1. +/// This encoding is compatible with ASCII. +/// +/// Note that this is different from ISO/IEC 8859-1 a.k.a. ISO 8859-1 (with one less hypen), +/// which leaves some "blanks", byte values that are not assigned to any character. +/// ISO-8859-1 (the IANA one) assigns them to the C0 and C1 control codes. +/// +/// Note that this is *also* different from Windows-1252 a.k.a. code page 1252, +/// which is a superset ISO/IEC 8859-1 that assigns some (not all!) blanks +/// to punctuation and various Latin characters. +/// +/// To confuse things further, [on the Web](https://encoding.spec.whatwg.org/) +/// `ascii`, `iso-8859-1`, and `windows-1252` are all aliases +/// for a superset of Windows-1252 that fills the remaining blanks with corresponding +/// C0 and C1 control codes. +#[stable(feature = "char_convert", since = "1.13.0")] +impl From for char { + #[inline] + fn from(i: u8) -> Self { + i as char + } +} + +#[unstable(feature = "try_from", issue = "33417")] +impl TryFrom for char { + type Err = CharTryFromError; + + #[inline] + fn try_from(i: u32) -> Result { + if (i > MAX as u32) || (i >= 0xD800 && i <= 0xDFFF) { + Err(CharTryFromError(())) + } else { + Ok(unsafe { from_u32_unchecked(i) }) + } + } +} + +/// The error type returned when a conversion from u32 to char fails. +#[unstable(feature = "try_from", issue = "33417")] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct CharTryFromError(()); + +#[unstable(feature = "try_from", issue = "33417")] +impl fmt::Display for CharTryFromError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + "converted integer out of range for `char`".fmt(f) + } +} + /// Converts a digit in the given radix to a `char`. /// /// A 'radix' here is sometimes also called a 'base'. A radix of two /// indicates a binary number, a radix of ten, decimal, and a radix of /// sixteen, hexadecimal, to give some common values. Arbitrary -/// radicum are supported. +/// radices are supported. /// /// `from_digit()` will return `None` if the input is not a digit in /// the given radix. @@ -255,7 +311,7 @@ pub fn from_digit(num: u32, radix: u32) -> Option { #[doc(hidden)] #[unstable(feature = "core_char_ext", reason = "the stable interface is `impl char` in later crate", - issue = "27701")] + issue = "32110")] pub trait CharExt { #[stable(feature = "core", since = "1.6.0")] fn is_digit(self, radix: u32) -> bool; @@ -265,14 +321,16 @@ pub trait CharExt { fn escape_unicode(self) -> EscapeUnicode; #[stable(feature = "core", since = "1.6.0")] fn escape_default(self) -> EscapeDefault; + #[unstable(feature = "char_escape_debug", issue = "35068")] + fn escape_debug(self) -> EscapeDebug; #[stable(feature = "core", since = "1.6.0")] fn len_utf8(self) -> usize; #[stable(feature = "core", since = "1.6.0")] fn len_utf16(self) -> usize; - #[stable(feature = "core", since = "1.6.0")] - fn encode_utf8(self, dst: &mut [u8]) -> Option; - #[stable(feature = "core", since = "1.6.0")] - fn encode_utf16(self, dst: &mut [u16]) -> Option; + #[unstable(feature = "unicode", issue = "27784")] + fn encode_utf8(self, dst: &mut [u8]) -> &mut str; + #[unstable(feature = "unicode", issue = "27784")] + fn encode_utf16(self, dst: &mut [u16]) -> &mut [u16]; } #[stable(feature = "core", since = "1.6.0")] @@ -299,7 +357,20 @@ impl CharExt for char { #[inline] fn escape_unicode(self) -> EscapeUnicode { - EscapeUnicode { c: self, state: EscapeUnicodeState::Backslash } + let c = self as u32; + + // or-ing 1 ensures that for c==0 the code computes that one + // digit should be printed and (which is the same) avoids the + // (31 - 32) underflow + let msb = 31 - (c | 1).leading_zeros(); + + // the index of the most significant hex digit + let ms_hex_digit = msb / 4; + EscapeUnicode { + c: self, + state: EscapeUnicodeState::Backslash, + hex_digit_idx: ms_hex_digit as usize, + } } #[inline] @@ -315,6 +386,19 @@ impl CharExt for char { EscapeDefault { state: init_state } } + #[inline] + fn escape_debug(self) -> EscapeDebug { + let init_state = match self { + '\t' => EscapeDefaultState::Backslash('t'), + '\r' => EscapeDefaultState::Backslash('r'), + '\n' => EscapeDefaultState::Backslash('n'), + '\\' | '\'' | '"' => EscapeDefaultState::Backslash(self), + c if is_printable(c) => EscapeDefaultState::Char(c), + c => EscapeDefaultState::Unicode(c.escape_unicode()), + }; + EscapeDebug(EscapeDefault { state: init_state }) + } + #[inline] fn len_utf8(self) -> usize { let code = self as u32; @@ -336,75 +420,59 @@ impl CharExt for char { } #[inline] - fn encode_utf8(self, dst: &mut [u8]) -> Option { - encode_utf8_raw(self as u32, dst) + fn encode_utf8(self, dst: &mut [u8]) -> &mut str { + let code = self as u32; + unsafe { + let len = + if code < MAX_ONE_B && !dst.is_empty() { + *dst.get_unchecked_mut(0) = code as u8; + 1 + } else if code < MAX_TWO_B && dst.len() >= 2 { + *dst.get_unchecked_mut(0) = (code >> 6 & 0x1F) as u8 | TAG_TWO_B; + *dst.get_unchecked_mut(1) = (code & 0x3F) as u8 | TAG_CONT; + 2 + } else if code < MAX_THREE_B && dst.len() >= 3 { + *dst.get_unchecked_mut(0) = (code >> 12 & 0x0F) as u8 | TAG_THREE_B; + *dst.get_unchecked_mut(1) = (code >> 6 & 0x3F) as u8 | TAG_CONT; + *dst.get_unchecked_mut(2) = (code & 0x3F) as u8 | TAG_CONT; + 3 + } else if dst.len() >= 4 { + *dst.get_unchecked_mut(0) = (code >> 18 & 0x07) as u8 | TAG_FOUR_B; + *dst.get_unchecked_mut(1) = (code >> 12 & 0x3F) as u8 | TAG_CONT; + *dst.get_unchecked_mut(2) = (code >> 6 & 0x3F) as u8 | TAG_CONT; + *dst.get_unchecked_mut(3) = (code & 0x3F) as u8 | TAG_CONT; + 4 + } else { + panic!("encode_utf8: need {} bytes to encode U+{:X}, but the buffer has {}", + from_u32_unchecked(code).len_utf8(), + code, + dst.len()) + }; + transmute(slice::from_raw_parts_mut(dst.as_mut_ptr(), len)) + } } #[inline] - fn encode_utf16(self, dst: &mut [u16]) -> Option { - encode_utf16_raw(self as u32, dst) - } -} - -/// Encodes a raw u32 value as UTF-8 into the provided byte buffer, -/// and then returns the number of bytes written. -/// -/// If the buffer is not large enough, nothing will be written into it -/// and a `None` will be returned. -#[inline] -#[unstable(feature = "char_internals", - reason = "this function should not be exposed publicly", - issue = "0")] -#[doc(hidden)] -pub fn encode_utf8_raw(code: u32, dst: &mut [u8]) -> Option { - // Marked #[inline] to allow llvm optimizing it away - if code < MAX_ONE_B && !dst.is_empty() { - dst[0] = code as u8; - Some(1) - } else if code < MAX_TWO_B && dst.len() >= 2 { - dst[0] = (code >> 6 & 0x1F) as u8 | TAG_TWO_B; - dst[1] = (code & 0x3F) as u8 | TAG_CONT; - Some(2) - } else if code < MAX_THREE_B && dst.len() >= 3 { - dst[0] = (code >> 12 & 0x0F) as u8 | TAG_THREE_B; - dst[1] = (code >> 6 & 0x3F) as u8 | TAG_CONT; - dst[2] = (code & 0x3F) as u8 | TAG_CONT; - Some(3) - } else if dst.len() >= 4 { - dst[0] = (code >> 18 & 0x07) as u8 | TAG_FOUR_B; - dst[1] = (code >> 12 & 0x3F) as u8 | TAG_CONT; - dst[2] = (code >> 6 & 0x3F) as u8 | TAG_CONT; - dst[3] = (code & 0x3F) as u8 | TAG_CONT; - Some(4) - } else { - None - } -} - -/// Encodes a raw u32 value as UTF-16 into the provided `u16` buffer, -/// and then returns the number of `u16`s written. -/// -/// If the buffer is not large enough, nothing will be written into it -/// and a `None` will be returned. -#[inline] -#[unstable(feature = "char_internals", - reason = "this function should not be exposed publicly", - issue = "0")] -#[doc(hidden)] -pub fn encode_utf16_raw(mut ch: u32, dst: &mut [u16]) -> Option { - // Marked #[inline] to allow llvm optimizing it away - if (ch & 0xFFFF) == ch && !dst.is_empty() { - // The BMP falls through (assuming non-surrogate, as it should) - dst[0] = ch as u16; - Some(1) - } else if dst.len() >= 2 { - // Supplementary planes break into surrogates. - ch -= 0x1_0000; - dst[0] = 0xD800 | ((ch >> 10) as u16); - dst[1] = 0xDC00 | ((ch as u16) & 0x3FF); - Some(2) - } else { - None + fn encode_utf16(self, dst: &mut [u16]) -> &mut [u16] { + let mut code = self as u32; + unsafe { + if (code & 0xFFFF) == code && !dst.is_empty() { + // The BMP falls through (assuming non-surrogate, as it should) + *dst.get_unchecked_mut(0) = code as u16; + slice::from_raw_parts_mut(dst.as_mut_ptr(), 1) + } else if dst.len() >= 2 { + // Supplementary planes break into surrogates. + code -= 0x1_0000; + *dst.get_unchecked_mut(0) = 0xD800 | ((code >> 10) as u16); + *dst.get_unchecked_mut(1) = 0xDC00 | ((code as u16) & 0x3FF); + slice::from_raw_parts_mut(dst.as_mut_ptr(), 2) + } else { + panic!("encode_utf16: need {} units to encode U+{:X}, but the buffer has {}", + from_u32_unchecked(code).len_utf16(), + code, + dst.len()) + } + } } } @@ -414,23 +482,31 @@ pub fn encode_utf16_raw(mut ch: u32, dst: &mut [u16]) -> Option { /// This `struct` is created by the [`escape_unicode()`] method on [`char`]. See /// its documentation for more. /// -/// [`escape_unicode()`]: ../primitive.char.html#method.escape_unicode -/// [`char`]: ../primitive.char.html -#[derive(Clone)] +/// [`escape_unicode()`]: ../../std/primitive.char.html#method.escape_unicode +/// [`char`]: ../../std/primitive.char.html +#[derive(Clone, Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub struct EscapeUnicode { c: char, - state: EscapeUnicodeState + state: EscapeUnicodeState, + + // The index of the next hex digit to be printed (0 if none), + // i.e. the number of remaining hex digits to be printed; + // increasing from the least significant digit: 0x543210 + hex_digit_idx: usize, } -#[derive(Clone)] +// The enum values are ordered so that their representation is the +// same as the remaining length (besides the hexadecimal digits). This +// likely makes `len()` a single load from memory) and inline-worth. +#[derive(Clone, Debug)] enum EscapeUnicodeState { - Backslash, - Type, - LeftBrace, - Value(usize), - RightBrace, Done, + RightBrace, + Value, + LeftBrace, + Type, + Backslash, } #[stable(feature = "rust1", since = "1.0.0")] @@ -448,19 +524,16 @@ impl Iterator for EscapeUnicode { Some('u') } EscapeUnicodeState::LeftBrace => { - let mut n = 0; - while (self.c as u32) >> (4 * (n + 1)) != 0 { - n += 1; - } - self.state = EscapeUnicodeState::Value(n); + self.state = EscapeUnicodeState::Value; Some('{') } - EscapeUnicodeState::Value(offset) => { - let c = from_digit(((self.c as u32) >> (offset * 4)) & 0xf, 16).unwrap(); - if offset == 0 { + EscapeUnicodeState::Value => { + let hex_digit = ((self.c as u32) >> (self.hex_digit_idx * 4)) & 0xf; + let c = from_digit(hex_digit, 16).unwrap(); + if self.hex_digit_idx == 0 { self.state = EscapeUnicodeState::RightBrace; } else { - self.state = EscapeUnicodeState::Value(offset - 1); + self.hex_digit_idx -= 1; } Some(c) } @@ -472,41 +545,67 @@ impl Iterator for EscapeUnicode { } } + #[inline] fn size_hint(&self) -> (usize, Option) { - let mut n = 0; - while (self.c as usize) >> (4 * (n + 1)) != 0 { - n += 1; + let n = self.len(); + (n, Some(n)) + } + + #[inline] + fn count(self) -> usize { + self.len() + } + + fn last(self) -> Option { + match self.state { + EscapeUnicodeState::Done => None, + + EscapeUnicodeState::RightBrace | + EscapeUnicodeState::Value | + EscapeUnicodeState::LeftBrace | + EscapeUnicodeState::Type | + EscapeUnicodeState::Backslash => Some('}'), } - let n = match self.state { - EscapeUnicodeState::Backslash => n + 5, - EscapeUnicodeState::Type => n + 4, - EscapeUnicodeState::LeftBrace => n + 3, - EscapeUnicodeState::Value(offset) => offset + 2, - EscapeUnicodeState::RightBrace => 1, + } +} + +#[stable(feature = "exact_size_escape", since = "1.11.0")] +impl ExactSizeIterator for EscapeUnicode { + #[inline] + fn len(&self) -> usize { + // The match is a single memory access with no branching + self.hex_digit_idx + match self.state { EscapeUnicodeState::Done => 0, - }; - (n, Some(n)) + EscapeUnicodeState::RightBrace => 1, + EscapeUnicodeState::Value => 2, + EscapeUnicodeState::LeftBrace => 3, + EscapeUnicodeState::Type => 4, + EscapeUnicodeState::Backslash => 5, + } } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for EscapeUnicode {} + /// An iterator that yields the literal escape code of a `char`. /// /// This `struct` is created by the [`escape_default()`] method on [`char`]. See /// its documentation for more. /// -/// [`escape_default()`]: ../primitive.char.html#method.escape_default -/// [`char`]: ../primitive.char.html -#[derive(Clone)] +/// [`escape_default()`]: ../../std/primitive.char.html#method.escape_default +/// [`char`]: ../../std/primitive.char.html +#[derive(Clone, Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub struct EscapeDefault { state: EscapeDefaultState } -#[derive(Clone)] +#[derive(Clone, Debug)] enum EscapeDefaultState { - Backslash(char), - Char(char), Done, + Char(char), + Backslash(char), Unicode(EscapeUnicode), } @@ -529,22 +628,15 @@ impl Iterator for EscapeDefault { } } + #[inline] fn size_hint(&self) -> (usize, Option) { - match self.state { - EscapeDefaultState::Char(_) => (1, Some(1)), - EscapeDefaultState::Backslash(_) => (2, Some(2)), - EscapeDefaultState::Unicode(ref iter) => iter.size_hint(), - EscapeDefaultState::Done => (0, Some(0)), - } + let n = self.len(); + (n, Some(n)) } + #[inline] fn count(self) -> usize { - match self.state { - EscapeDefaultState::Char(_) => 1, - EscapeDefaultState::Unicode(iter) => iter.count(), - EscapeDefaultState::Done => 0, - EscapeDefaultState::Backslash(_) => 2, - } + self.len() } fn nth(&mut self, n: usize) -> Option { @@ -583,3 +675,149 @@ impl Iterator for EscapeDefault { } } } + +#[stable(feature = "exact_size_escape", since = "1.11.0")] +impl ExactSizeIterator for EscapeDefault { + fn len(&self) -> usize { + match self.state { + EscapeDefaultState::Done => 0, + EscapeDefaultState::Char(_) => 1, + EscapeDefaultState::Backslash(_) => 2, + EscapeDefaultState::Unicode(ref iter) => iter.len(), + } + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for EscapeDefault {} + +/// An iterator that yields the literal escape code of a `char`. +/// +/// This `struct` is created by the [`escape_debug()`] method on [`char`]. See its +/// documentation for more. +/// +/// [`escape_debug()`]: ../../std/primitive.char.html#method.escape_debug +/// [`char`]: ../../std/primitive.char.html +#[unstable(feature = "char_escape_debug", issue = "35068")] +#[derive(Clone, Debug)] +pub struct EscapeDebug(EscapeDefault); + +#[unstable(feature = "char_escape_debug", issue = "35068")] +impl Iterator for EscapeDebug { + type Item = char; + fn next(&mut self) -> Option { self.0.next() } + fn size_hint(&self) -> (usize, Option) { self.0.size_hint() } +} + +#[unstable(feature = "char_escape_debug", issue = "35068")] +impl ExactSizeIterator for EscapeDebug { } + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for EscapeDebug {} + + + +/// An iterator over an iterator of bytes of the characters the bytes represent +/// as UTF-8 +#[unstable(feature = "decode_utf8", issue = "33906")] +#[derive(Clone, Debug)] +pub struct DecodeUtf8>(::iter::Peekable); + +/// Decodes an `Iterator` of bytes as UTF-8. +#[unstable(feature = "decode_utf8", issue = "33906")] +#[inline] +pub fn decode_utf8>(i: I) -> DecodeUtf8 { + DecodeUtf8(i.into_iter().peekable()) +} + +/// `::next` returns this for an invalid input sequence. +#[unstable(feature = "decode_utf8", issue = "33906")] +#[derive(PartialEq, Eq, Debug)] +pub struct InvalidSequence(()); + +#[unstable(feature = "decode_utf8", issue = "33906")] +impl> Iterator for DecodeUtf8 { + type Item = Result; + #[inline] + + fn next(&mut self) -> Option> { + self.0.next().map(|first_byte| { + // Emit InvalidSequence according to + // Unicode §5.22 Best Practice for U+FFFD Substitution + // http://www.unicode.org/versions/Unicode9.0.0/ch05.pdf#G40630 + + // Roughly: consume at least one byte, + // then validate one byte at a time and stop before the first unexpected byte + // (which might be the valid start of the next byte sequence). + + let mut code_point; + macro_rules! first_byte { + ($mask: expr) => { + code_point = u32::from(first_byte & $mask) + } + } + macro_rules! continuation_byte { + () => { continuation_byte!(0x80...0xBF) }; + ($range: pat) => { + match self.0.peek() { + Some(&byte @ $range) => { + code_point = (code_point << 6) | u32::from(byte & 0b0011_1111); + self.0.next(); + } + _ => return Err(InvalidSequence(())) + } + } + } + + match first_byte { + 0x00...0x7F => { + first_byte!(0b1111_1111); + } + 0xC2...0xDF => { + first_byte!(0b0001_1111); + continuation_byte!(); + } + 0xE0 => { + first_byte!(0b0000_1111); + continuation_byte!(0xA0...0xBF); // 0x80...0x9F here are overlong + continuation_byte!(); + } + 0xE1...0xEC | 0xEE...0xEF => { + first_byte!(0b0000_1111); + continuation_byte!(); + continuation_byte!(); + } + 0xED => { + first_byte!(0b0000_1111); + continuation_byte!(0x80...0x9F); // 0xA0..0xBF here are surrogates + continuation_byte!(); + } + 0xF0 => { + first_byte!(0b0000_0111); + continuation_byte!(0x90...0xBF); // 0x80..0x8F here are overlong + continuation_byte!(); + continuation_byte!(); + } + 0xF1...0xF3 => { + first_byte!(0b0000_0111); + continuation_byte!(); + continuation_byte!(); + continuation_byte!(); + } + 0xF4 => { + first_byte!(0b0000_0111); + continuation_byte!(0x80...0x8F); // 0x90..0xBF here are beyond char::MAX + continuation_byte!(); + continuation_byte!(); + } + _ => return Err(InvalidSequence(())) // Illegal first byte, overlong, or beyond MAX + } + unsafe { + Ok(from_u32_unchecked(code_point)) + } + }) + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl> FusedIterator for DecodeUtf8 {} diff --git a/src/libcore/char_private.rs b/src/libcore/char_private.rs new file mode 100644 index 0000000000000..ddc473592a260 --- /dev/null +++ b/src/libcore/char_private.rs @@ -0,0 +1,826 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// NOTE: The following code was generated by "src/etc/char_private.py", +// do not edit directly! + +use slice::SliceExt; + +fn check(x: u16, singletons: &[u16], normal: &[u16]) -> bool { + for &s in singletons { + if x == s { + return false; + } else if x < s { + break; + } + } + for w in normal.chunks(2) { + let start = w[0]; + let len = w[1]; + let difference = (x as i32) - (start as i32); + if 0 <= difference { + if difference < len as i32 { + return false; + } + } else { + break; + } + } + true +} + +pub fn is_printable(x: char) -> bool { + let x = x as u32; + let lower = x as u16; + if x < 0x10000 { + check(lower, SINGLETONS0, NORMAL0) + } else if x < 0x20000 { + check(lower, SINGLETONS1, NORMAL1) + } else { + if 0x2a6d7 <= x && x < 0x2a700 { + return false; + } + if 0x2b735 <= x && x < 0x2b740 { + return false; + } + if 0x2b81e <= x && x < 0x2b820 { + return false; + } + if 0x2cea2 <= x && x < 0x2f800 { + return false; + } + if 0x2fa1e <= x && x < 0xe0100 { + return false; + } + if 0xe01f0 <= x && x < 0x110000 { + return false; + } + true + } +} + +const SINGLETONS0: &'static [u16] = &[ + 0xad, + 0x378, + 0x379, + 0x38b, + 0x38d, + 0x3a2, + 0x530, + 0x557, + 0x558, + 0x560, + 0x588, + 0x58b, + 0x58c, + 0x590, + 0x61c, + 0x61d, + 0x6dd, + 0x70e, + 0x70f, + 0x74b, + 0x74c, + 0x82e, + 0x82f, + 0x83f, + 0x85c, + 0x85d, + 0x8b5, + 0x8e2, + 0x984, + 0x98d, + 0x98e, + 0x991, + 0x992, + 0x9a9, + 0x9b1, + 0x9ba, + 0x9bb, + 0x9c5, + 0x9c6, + 0x9c9, + 0x9ca, + 0x9de, + 0x9e4, + 0x9e5, + 0xa04, + 0xa11, + 0xa12, + 0xa29, + 0xa31, + 0xa34, + 0xa37, + 0xa3a, + 0xa3b, + 0xa3d, + 0xa49, + 0xa4a, + 0xa5d, + 0xa84, + 0xa8e, + 0xa92, + 0xaa9, + 0xab1, + 0xab4, + 0xaba, + 0xabb, + 0xac6, + 0xaca, + 0xace, + 0xacf, + 0xae4, + 0xae5, + 0xb04, + 0xb0d, + 0xb0e, + 0xb11, + 0xb12, + 0xb29, + 0xb31, + 0xb34, + 0xb3a, + 0xb3b, + 0xb45, + 0xb46, + 0xb49, + 0xb4a, + 0xb5e, + 0xb64, + 0xb65, + 0xb84, + 0xb91, + 0xb9b, + 0xb9d, + 0xbc9, + 0xbce, + 0xbcf, + 0xc04, + 0xc0d, + 0xc11, + 0xc29, + 0xc45, + 0xc49, + 0xc57, + 0xc64, + 0xc65, + 0xc84, + 0xc8d, + 0xc91, + 0xca9, + 0xcb4, + 0xcba, + 0xcbb, + 0xcc5, + 0xcc9, + 0xcdf, + 0xce4, + 0xce5, + 0xcf0, + 0xd04, + 0xd0d, + 0xd11, + 0xd3b, + 0xd3c, + 0xd45, + 0xd49, + 0xd64, + 0xd65, + 0xd80, + 0xd81, + 0xd84, + 0xdb2, + 0xdbc, + 0xdbe, + 0xdbf, + 0xdd5, + 0xdd7, + 0xdf0, + 0xdf1, + 0xe83, + 0xe85, + 0xe86, + 0xe89, + 0xe8b, + 0xe8c, + 0xe98, + 0xea0, + 0xea4, + 0xea6, + 0xea8, + 0xea9, + 0xeac, + 0xeba, + 0xebe, + 0xebf, + 0xec5, + 0xec7, + 0xece, + 0xecf, + 0xeda, + 0xedb, + 0xf48, + 0xf98, + 0xfbd, + 0xfcd, + 0x10c6, + 0x10ce, + 0x10cf, + 0x1249, + 0x124e, + 0x124f, + 0x1257, + 0x1259, + 0x125e, + 0x125f, + 0x1289, + 0x128e, + 0x128f, + 0x12b1, + 0x12b6, + 0x12b7, + 0x12bf, + 0x12c1, + 0x12c6, + 0x12c7, + 0x12d7, + 0x1311, + 0x1316, + 0x1317, + 0x135b, + 0x135c, + 0x13f6, + 0x13f7, + 0x13fe, + 0x13ff, + 0x1680, + 0x170d, + 0x176d, + 0x1771, + 0x17de, + 0x17df, + 0x180e, + 0x180f, + 0x191f, + 0x196e, + 0x196f, + 0x1a1c, + 0x1a1d, + 0x1a5f, + 0x1a7d, + 0x1a7e, + 0x1aae, + 0x1aaf, + 0x1cf7, + 0x1f16, + 0x1f17, + 0x1f1e, + 0x1f1f, + 0x1f46, + 0x1f47, + 0x1f4e, + 0x1f4f, + 0x1f58, + 0x1f5a, + 0x1f5c, + 0x1f5e, + 0x1f7e, + 0x1f7f, + 0x1fb5, + 0x1fc5, + 0x1fd4, + 0x1fd5, + 0x1fdc, + 0x1ff0, + 0x1ff1, + 0x1ff5, + 0x2072, + 0x2073, + 0x208f, + 0x23ff, + 0x2b74, + 0x2b75, + 0x2b96, + 0x2b97, + 0x2bc9, + 0x2c2f, + 0x2c5f, + 0x2d26, + 0x2d2e, + 0x2d2f, + 0x2da7, + 0x2daf, + 0x2db7, + 0x2dbf, + 0x2dc7, + 0x2dcf, + 0x2dd7, + 0x2ddf, + 0x2e9a, + 0x3040, + 0x3097, + 0x3098, + 0x318f, + 0x321f, + 0x32ff, + 0xa7af, + 0xa8fe, + 0xa8ff, + 0xa9ce, + 0xa9ff, + 0xaa4e, + 0xaa4f, + 0xaa5a, + 0xaa5b, + 0xab07, + 0xab08, + 0xab0f, + 0xab10, + 0xab27, + 0xab2f, + 0xabee, + 0xabef, + 0xfa6e, + 0xfa6f, + 0xfb37, + 0xfb3d, + 0xfb3f, + 0xfb42, + 0xfb45, + 0xfd90, + 0xfd91, + 0xfdfe, + 0xfdff, + 0xfe53, + 0xfe67, + 0xfe75, + 0xffc8, + 0xffc9, + 0xffd0, + 0xffd1, + 0xffd8, + 0xffd9, + 0xffe7, + 0xfffe, + 0xffff, +]; +const SINGLETONS1: &'static [u16] = &[ + 0xc, + 0x27, + 0x3b, + 0x3e, + 0x4e, + 0x4f, + 0x18f, + 0x39e, + 0x49e, + 0x49f, + 0x806, + 0x807, + 0x809, + 0x836, + 0x83d, + 0x83e, + 0x856, + 0x8f3, + 0x9d0, + 0x9d1, + 0xa04, + 0xa14, + 0xa18, + 0xb56, + 0xb57, + 0x10bd, + 0x1135, + 0x11ce, + 0x11cf, + 0x11e0, + 0x1212, + 0x1287, + 0x1289, + 0x128e, + 0x129e, + 0x1304, + 0x130d, + 0x130e, + 0x1311, + 0x1312, + 0x1329, + 0x1331, + 0x1334, + 0x133a, + 0x133b, + 0x1345, + 0x1346, + 0x1349, + 0x134a, + 0x134e, + 0x134f, + 0x1364, + 0x1365, + 0x145a, + 0x145c, + 0x15b6, + 0x15b7, + 0x1c09, + 0x1c37, + 0x1c90, + 0x1c91, + 0x1ca8, + 0x246f, + 0x6a5f, + 0x6aee, + 0x6aef, + 0x6b5a, + 0x6b62, + 0xbc9a, + 0xbc9b, + 0xd127, + 0xd128, + 0xd455, + 0xd49d, + 0xd4a0, + 0xd4a1, + 0xd4a3, + 0xd4a4, + 0xd4a7, + 0xd4a8, + 0xd4ad, + 0xd4ba, + 0xd4bc, + 0xd4c4, + 0xd506, + 0xd50b, + 0xd50c, + 0xd515, + 0xd51d, + 0xd53a, + 0xd53f, + 0xd545, + 0xd551, + 0xd6a6, + 0xd6a7, + 0xd7cc, + 0xd7cd, + 0xdaa0, + 0xe007, + 0xe019, + 0xe01a, + 0xe022, + 0xe025, + 0xe8c5, + 0xe8c6, + 0xee04, + 0xee20, + 0xee23, + 0xee25, + 0xee26, + 0xee28, + 0xee33, + 0xee38, + 0xee3a, + 0xee48, + 0xee4a, + 0xee4c, + 0xee50, + 0xee53, + 0xee55, + 0xee56, + 0xee58, + 0xee5a, + 0xee5c, + 0xee5e, + 0xee60, + 0xee63, + 0xee65, + 0xee66, + 0xee6b, + 0xee73, + 0xee78, + 0xee7d, + 0xee7f, + 0xee8a, + 0xeea4, + 0xeeaa, + 0xf0af, + 0xf0b0, + 0xf0c0, + 0xf0d0, + 0xf12f, + 0xf91f, + 0xf931, + 0xf932, + 0xf93f, +]; +const NORMAL0: &'static [u16] = &[ + 0x0, 0x20, + 0x7f, 0x22, + 0x380, 0x4, + 0x5c8, 0x8, + 0x5eb, 0x5, + 0x5f5, 0x11, + 0x7b2, 0xe, + 0x7fb, 0x5, + 0x85f, 0x41, + 0x8be, 0x16, + 0x9b3, 0x3, + 0x9cf, 0x8, + 0x9d8, 0x4, + 0x9fc, 0x5, + 0xa0b, 0x4, + 0xa43, 0x4, + 0xa4e, 0x3, + 0xa52, 0x7, + 0xa5f, 0x7, + 0xa76, 0xb, + 0xad1, 0xf, + 0xaf2, 0x7, + 0xafa, 0x7, + 0xb4e, 0x8, + 0xb58, 0x4, + 0xb78, 0xa, + 0xb8b, 0x3, + 0xb96, 0x3, + 0xba0, 0x3, + 0xba5, 0x3, + 0xbab, 0x3, + 0xbba, 0x4, + 0xbc3, 0x3, + 0xbd1, 0x6, + 0xbd8, 0xe, + 0xbfb, 0x5, + 0xc3a, 0x3, + 0xc4e, 0x7, + 0xc5b, 0x5, + 0xc70, 0x8, + 0xcce, 0x7, + 0xcd7, 0x7, + 0xcf3, 0xe, + 0xd50, 0x4, + 0xd97, 0x3, + 0xdc7, 0x3, + 0xdcb, 0x4, + 0xde0, 0x6, + 0xdf5, 0xc, + 0xe3b, 0x4, + 0xe5c, 0x25, + 0xe8e, 0x6, + 0xee0, 0x20, + 0xf6d, 0x4, + 0xfdb, 0x25, + 0x10c8, 0x5, + 0x137d, 0x3, + 0x139a, 0x6, + 0x169d, 0x3, + 0x16f9, 0x7, + 0x1715, 0xb, + 0x1737, 0x9, + 0x1754, 0xc, + 0x1774, 0xc, + 0x17ea, 0x6, + 0x17fa, 0x6, + 0x181a, 0x6, + 0x1878, 0x8, + 0x18ab, 0x5, + 0x18f6, 0xa, + 0x192c, 0x4, + 0x193c, 0x4, + 0x1941, 0x3, + 0x1975, 0xb, + 0x19ac, 0x4, + 0x19ca, 0x6, + 0x19db, 0x3, + 0x1a8a, 0x6, + 0x1a9a, 0x6, + 0x1abf, 0x41, + 0x1b4c, 0x4, + 0x1b7d, 0x3, + 0x1bf4, 0x8, + 0x1c38, 0x3, + 0x1c4a, 0x3, + 0x1c89, 0x37, + 0x1cc8, 0x8, + 0x1cfa, 0x6, + 0x1df6, 0x5, + 0x1fff, 0x11, + 0x2028, 0x8, + 0x205f, 0x11, + 0x209d, 0x3, + 0x20bf, 0x11, + 0x20f1, 0xf, + 0x218c, 0x4, + 0x2427, 0x19, + 0x244b, 0x15, + 0x2bba, 0x3, + 0x2bd2, 0x1a, + 0x2bf0, 0x10, + 0x2cf4, 0x5, + 0x2d28, 0x5, + 0x2d68, 0x7, + 0x2d71, 0xe, + 0x2d97, 0x9, + 0x2e45, 0x3b, + 0x2ef4, 0xc, + 0x2fd6, 0x1a, + 0x2ffc, 0x5, + 0x3100, 0x5, + 0x312e, 0x3, + 0x31bb, 0x5, + 0x31e4, 0xc, + 0x4db6, 0xa, + 0x9fd6, 0x2a, + 0xa48d, 0x3, + 0xa4c7, 0x9, + 0xa62c, 0x14, + 0xa6f8, 0x8, + 0xa7b8, 0x3f, + 0xa82c, 0x4, + 0xa83a, 0x6, + 0xa878, 0x8, + 0xa8c6, 0x8, + 0xa8da, 0x6, + 0xa954, 0xb, + 0xa97d, 0x3, + 0xa9da, 0x4, + 0xaa37, 0x9, + 0xaac3, 0x18, + 0xaaf7, 0xa, + 0xab17, 0x9, + 0xab66, 0xa, + 0xabfa, 0x6, + 0xd7a4, 0xc, + 0xd7c7, 0x4, + 0xd7fc, 0x2104, + 0xfada, 0x26, + 0xfb07, 0xc, + 0xfb18, 0x5, + 0xfbc2, 0x11, + 0xfd40, 0x10, + 0xfdc8, 0x28, + 0xfe1a, 0x6, + 0xfe6c, 0x4, + 0xfefd, 0x4, + 0xffbf, 0x3, + 0xffdd, 0x3, + 0xffef, 0xd, +]; +const NORMAL1: &'static [u16] = &[ + 0x5e, 0x22, + 0xfb, 0x5, + 0x103, 0x4, + 0x134, 0x3, + 0x19c, 0x4, + 0x1a1, 0x2f, + 0x1fe, 0x82, + 0x29d, 0x3, + 0x2d1, 0xf, + 0x2fc, 0x4, + 0x324, 0xc, + 0x34b, 0x5, + 0x37b, 0x5, + 0x3c4, 0x4, + 0x3d6, 0x2a, + 0x4aa, 0x6, + 0x4d4, 0x4, + 0x4fc, 0x4, + 0x528, 0x8, + 0x564, 0xb, + 0x570, 0x90, + 0x737, 0x9, + 0x756, 0xa, + 0x768, 0x98, + 0x839, 0x3, + 0x89f, 0x8, + 0x8b0, 0x30, + 0x8f6, 0x5, + 0x91c, 0x3, + 0x93a, 0x5, + 0x940, 0x40, + 0x9b8, 0x4, + 0xa07, 0x5, + 0xa34, 0x4, + 0xa3b, 0x4, + 0xa48, 0x8, + 0xa59, 0x7, + 0xaa0, 0x20, + 0xae7, 0x4, + 0xaf7, 0x9, + 0xb36, 0x3, + 0xb73, 0x5, + 0xb92, 0x7, + 0xb9d, 0xc, + 0xbb0, 0x50, + 0xc49, 0x37, + 0xcb3, 0xd, + 0xcf3, 0x7, + 0xd00, 0x160, + 0xe7f, 0x181, + 0x104e, 0x4, + 0x1070, 0xf, + 0x10c2, 0xe, + 0x10e9, 0x7, + 0x10fa, 0x6, + 0x1144, 0xc, + 0x1177, 0x9, + 0x11f5, 0xb, + 0x123f, 0x41, + 0x12aa, 0x6, + 0x12eb, 0x5, + 0x12fa, 0x6, + 0x1351, 0x6, + 0x1358, 0x5, + 0x136d, 0x3, + 0x1375, 0x8b, + 0x145e, 0x22, + 0x14c8, 0x8, + 0x14da, 0xa6, + 0x15de, 0x22, + 0x1645, 0xb, + 0x165a, 0x6, + 0x166d, 0x13, + 0x16b8, 0x8, + 0x16ca, 0x36, + 0x171a, 0x3, + 0x172c, 0x4, + 0x1740, 0x160, + 0x18f3, 0xc, + 0x1900, 0x1c0, + 0x1af9, 0x107, + 0x1c46, 0xa, + 0x1c6d, 0x3, + 0x1cb7, 0x349, + 0x239a, 0x66, + 0x2475, 0xb, + 0x2544, 0xabc, + 0x342f, 0xfd1, + 0x4647, 0x21b9, + 0x6a39, 0x7, + 0x6a6a, 0x4, + 0x6a70, 0x60, + 0x6af6, 0xa, + 0x6b46, 0xa, + 0x6b78, 0x5, + 0x6b90, 0x370, + 0x6f45, 0xb, + 0x6f7f, 0x10, + 0x6fa0, 0x40, + 0x6fe1, 0x1f, + 0x87ed, 0x13, + 0x8af3, 0x250d, + 0xb002, 0xbfe, + 0xbc6b, 0x5, + 0xbc7d, 0x3, + 0xbc89, 0x7, + 0xbca0, 0x1360, + 0xd0f6, 0xa, + 0xd173, 0x8, + 0xd1e9, 0x17, + 0xd246, 0xba, + 0xd357, 0x9, + 0xd372, 0x8e, + 0xd547, 0x3, + 0xda8c, 0xf, + 0xdab0, 0x550, + 0xe02b, 0x7d5, + 0xe8d7, 0x29, + 0xe94b, 0x5, + 0xe95a, 0x4, + 0xe960, 0x4a0, + 0xee3c, 0x6, + 0xee43, 0x4, + 0xee9c, 0x5, + 0xeebc, 0x34, + 0xeef2, 0x10e, + 0xf02c, 0x4, + 0xf094, 0xc, + 0xf0f6, 0xa, + 0xf10d, 0x3, + 0xf16c, 0x4, + 0xf1ad, 0x39, + 0xf203, 0xd, + 0xf23c, 0x4, + 0xf249, 0x7, + 0xf252, 0xae, + 0xf6d3, 0xd, + 0xf6ed, 0x3, + 0xf6f7, 0x9, + 0xf774, 0xc, + 0xf7d5, 0x2b, + 0xf80c, 0x4, + 0xf848, 0x8, + 0xf85a, 0x6, + 0xf888, 0x8, + 0xf8ae, 0x62, + 0xf928, 0x8, + 0xf94c, 0x4, + 0xf95f, 0x21, + 0xf992, 0x2e, + 0xf9c1, 0x63f, +]; diff --git a/src/libcore/clone.rs b/src/libcore/clone.rs index 769faedf46e8e..d72b18ae345ce 100644 --- a/src/libcore/clone.rs +++ b/src/libcore/clone.rs @@ -8,24 +8,85 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! The `Clone` trait for types that cannot be 'implicitly copied' +//! The `Clone` trait for types that cannot be 'implicitly copied'. //! //! In Rust, some simple types are "implicitly copyable" and when you //! assign them or pass them as arguments, the receiver will get a copy, //! leaving the original value in place. These types do not require //! allocation to copy and do not have finalizers (i.e. they do not -//! contain owned boxes or implement `Drop`), so the compiler considers +//! contain owned boxes or implement [`Drop`]), so the compiler considers //! them cheap and safe to copy. For other types copies must be made -//! explicitly, by convention implementing the `Clone` trait and calling -//! the `clone` method. +//! explicitly, by convention implementing the [`Clone`] trait and calling +//! the [`clone`][clone] method. +//! +//! [`Clone`]: trait.Clone.html +//! [clone]: trait.Clone.html#tymethod.clone +//! [`Drop`]: ../../std/ops/trait.Drop.html +//! +//! Basic usage example: +//! +//! ``` +//! let s = String::new(); // String type implements Clone +//! let copy = s.clone(); // so we can clone it +//! ``` +//! +//! To easily implement the Clone trait, you can also use +//! `#[derive(Clone)]`. Example: +//! +//! ``` +//! #[derive(Clone)] // we add the Clone trait to Morpheus struct +//! struct Morpheus { +//! blue_pill: f32, +//! red_pill: i64, +//! } +//! +//! fn main() { +//! let f = Morpheus { blue_pill: 0.0, red_pill: 0 }; +//! let copy = f.clone(); // and now we can clone it! +//! } +//! ``` #![stable(feature = "rust1", since = "1.0.0")] -use marker::Sized; - -/// A common trait for cloning an object. +/// A common trait for the ability to explicitly duplicate an object. +/// +/// Differs from [`Copy`] in that [`Copy`] is implicit and extremely inexpensive, while +/// `Clone` is always explicit and may or may not be expensive. In order to enforce +/// these characteristics, Rust does not allow you to reimplement [`Copy`], but you +/// may reimplement `Clone` and run arbitrary code. +/// +/// Since `Clone` is more general than [`Copy`], you can automatically make anything +/// [`Copy`] be `Clone` as well. /// -/// This trait can be used with `#[derive]`. +/// ## Derivable +/// +/// This trait can be used with `#[derive]` if all fields are `Clone`. The `derive`d +/// implementation of [`clone()`] calls [`clone()`] on each field. +/// +/// ## How can I implement `Clone`? +/// +/// Types that are [`Copy`] should have a trivial implementation of `Clone`. More formally: +/// if `T: Copy`, `x: T`, and `y: &T`, then `let x = y.clone();` is equivalent to `let x = *y;`. +/// Manual implementations should be careful to uphold this invariant; however, unsafe code +/// must not rely on it to ensure memory safety. +/// +/// An example is an array holding more than 32 elements of a type that is `Clone`; the standard +/// library only implements `Clone` up until arrays of size 32. In this case, the implementation of +/// `Clone` cannot be `derive`d, but can be implemented as: +/// +/// [`Copy`]: ../../std/marker/trait.Copy.html +/// [`clone()`]: trait.Clone.html#tymethod.clone +/// +/// ``` +/// #[derive(Copy)] +/// struct Stats { +/// frequencies: [i32; 100], +/// } +/// +/// impl Clone for Stats { +/// fn clone(&self) -> Stats { *self } +/// } +/// ``` #[stable(feature = "rust1", since = "1.0.0")] pub trait Clone : Sized { /// Returns a copy of the value. @@ -52,6 +113,23 @@ pub trait Clone : Sized { } } +// FIXME(aburka): these structs are used solely by #[derive] to +// assert that every component of a type implements Clone or Copy. +// +// These structs should never appear in user code. +#[doc(hidden)] +#[allow(missing_debug_implementations)] +#[unstable(feature = "derive_clone_copy", + reason = "deriving hack, should not be public", + issue = "0")] +pub struct AssertParamIsClone { _field: ::marker::PhantomData } +#[doc(hidden)] +#[allow(missing_debug_implementations)] +#[unstable(feature = "derive_clone_copy", + reason = "deriving hack, should not be public", + issue = "0")] +pub struct AssertParamIsCopy { _field: ::marker::PhantomData } + #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: ?Sized> Clone for &'a T { /// Returns a shallow copy of the reference. diff --git a/src/libcore/cmp.rs b/src/libcore/cmp.rs index 3ac4ffb22364f..0daf658a0f42d 100644 --- a/src/libcore/cmp.rs +++ b/src/libcore/cmp.rs @@ -10,19 +10,33 @@ //! Functionality for ordering and comparison. //! -//! This module defines both `PartialOrd` and `PartialEq` traits which are used +//! This module defines both [`PartialOrd`] and [`PartialEq`] traits which are used //! by the compiler to implement comparison operators. Rust programs may -//! implement `PartialOrd` to overload the `<`, `<=`, `>`, and `>=` operators, -//! and may implement `PartialEq` to overload the `==` and `!=` operators. +//! implement [`PartialOrd`] to overload the `<`, `<=`, `>`, and `>=` operators, +//! and may implement [`PartialEq`] to overload the `==` and `!=` operators. +//! +//! [`PartialOrd`]: trait.PartialOrd.html +//! [`PartialEq`]: trait.PartialEq.html +//! +//! # Examples +//! +//! ``` +//! let x: u32 = 0; +//! let y: u32 = 1; +//! +//! // these two lines are equivalent +//! assert_eq!(x < y, true); +//! assert_eq!(x.lt(&y), true); +//! +//! // these two lines are also equivalent +//! assert_eq!(x == y, false); +//! assert_eq!(x.eq(&y), false); +//! ``` #![stable(feature = "rust1", since = "1.0.0")] use self::Ordering::*; -use mem; -use marker::Sized; -use option::Option::{self, Some}; - /// Trait for equality comparisons which are [partial equivalence /// relations](http://en.wikipedia.org/wiki/Partial_equivalence_relation). /// @@ -39,12 +53,53 @@ use option::Option::{self, Some}; /// symmetrically and transitively: if `T: PartialEq` and `U: PartialEq` /// then `U: PartialEq` and `T: PartialEq`. /// +/// ## Derivable +/// +/// This trait can be used with `#[derive]`. When `derive`d on structs, two +/// instances are equal if all fields are equal, and not equal if any fields +/// are not equal. When `derive`d on enums, each variant is equal to itself +/// and not equal to the other variants. +/// +/// ## How can I implement `PartialEq`? +/// /// PartialEq only requires the `eq` method to be implemented; `ne` is defined /// in terms of it by default. Any manual implementation of `ne` *must* respect /// the rule that `eq` is a strict inverse of `ne`; that is, `!(a == b)` if and /// only if `a != b`. /// -/// This trait can be used with `#[derive]`. +/// An example implementation for a domain in which two books are considered +/// the same book if their ISBN matches, even if the formats differ: +/// +/// ``` +/// enum BookFormat { Paperback, Hardback, Ebook } +/// struct Book { +/// isbn: i32, +/// format: BookFormat, +/// } +/// +/// impl PartialEq for Book { +/// fn eq(&self, other: &Book) -> bool { +/// self.isbn == other.isbn +/// } +/// } +/// +/// let b1 = Book { isbn: 3, format: BookFormat::Paperback }; +/// let b2 = Book { isbn: 3, format: BookFormat::Ebook }; +/// let b3 = Book { isbn: 10, format: BookFormat::Paperback }; +/// +/// assert!(b1 == b2); +/// assert!(b1 != b3); +/// ``` +/// +/// # Examples +/// +/// ``` +/// let x: u32 = 0; +/// let y: u32 = 1; +/// +/// assert_eq!(x == y, false); +/// assert_eq!(x.eq(&y), false); +/// ``` #[lang = "eq"] #[stable(feature = "rust1", since = "1.0.0")] pub trait PartialEq { @@ -72,7 +127,32 @@ pub trait PartialEq { /// This property cannot be checked by the compiler, and therefore `Eq` implies /// `PartialEq`, and has no extra methods. /// -/// This trait can be used with `#[derive]`. +/// ## Derivable +/// +/// This trait can be used with `#[derive]`. When `derive`d, because `Eq` has +/// no extra methods, it is only informing the compiler that this is an +/// equivalence relation rather than a partial equivalence relation. Note that +/// the `derive` strategy requires all fields are `Eq`, which isn't +/// always desired. +/// +/// ## How can I implement `Eq`? +/// +/// If you cannot use the `derive` strategy, specify that your type implements +/// `Eq`, which has no methods: +/// +/// ``` +/// enum BookFormat { Paperback, Hardback, Ebook } +/// struct Book { +/// isbn: i32, +/// format: BookFormat, +/// } +/// impl PartialEq for Book { +/// fn eq(&self, other: &Book) -> bool { +/// self.isbn == other.isbn +/// } +/// } +/// impl Eq for Book {} +/// ``` #[stable(feature = "rust1", since = "1.0.0")] pub trait Eq: PartialEq { // FIXME #13101: this method is used solely by #[deriving] to @@ -88,6 +168,17 @@ pub trait Eq: PartialEq { fn assert_receiver_is_total_eq(&self) {} } +// FIXME: this struct is used solely by #[derive] to +// assert that every component of a type implements Eq. +// +// This struct should never appear in user code. +#[doc(hidden)] +#[allow(missing_debug_implementations)] +#[unstable(feature = "derive_eq", + reason = "deriving hack, should not be public", + issue = "0")] +pub struct AssertParamIsEq { _field: ::marker::PhantomData } + /// An `Ordering` is the result of a comparison between two values. /// /// # Examples @@ -104,7 +195,7 @@ pub trait Eq: PartialEq { /// let result = 2.cmp(&1); /// assert_eq!(Ordering::Greater, result); /// ``` -#[derive(Clone, Copy, PartialEq, Debug)] +#[derive(Clone, Copy, PartialEq, Debug, Hash)] #[stable(feature = "rust1", since = "1.0.0")] pub enum Ordering { /// An ordering where a compared value is less [than another]. @@ -119,10 +210,6 @@ pub enum Ordering { } impl Ordering { - unsafe fn from_i8_unchecked(v: i8) -> Ordering { - mem::transmute(v) - } - /// Reverse the `Ordering`. /// /// * `Less` becomes `Greater`. @@ -155,14 +242,84 @@ impl Ordering { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn reverse(self) -> Ordering { - unsafe { - // this compiles really nicely (to a single instruction); - // an explicit match has a pile of branches and - // comparisons. - // - // NB. it is safe because of the explicit discriminants - // given above. - Ordering::from_i8_unchecked(-(self as i8)) + match self { + Less => Greater, + Equal => Equal, + Greater => Less, + } + } + + /// Chains two orderings. + /// + /// Returns `self` when it's not `Equal`. Otherwise returns `other`. + /// # Examples + /// + /// ``` + /// #![feature(ordering_chaining)] + /// + /// use std::cmp::Ordering; + /// + /// let result = Ordering::Equal.then(Ordering::Less); + /// assert_eq!(result, Ordering::Less); + /// + /// let result = Ordering::Less.then(Ordering::Equal); + /// assert_eq!(result, Ordering::Less); + /// + /// let result = Ordering::Less.then(Ordering::Greater); + /// assert_eq!(result, Ordering::Less); + /// + /// let result = Ordering::Equal.then(Ordering::Equal); + /// assert_eq!(result, Ordering::Equal); + /// + /// let x: (i64, i64, i64) = (1, 2, 7); + /// let y: (i64, i64, i64) = (1, 5, 3); + /// let result = x.0.cmp(&y.0).then(x.1.cmp(&y.1)).then(x.2.cmp(&y.2)); + /// + /// assert_eq!(result, Ordering::Less); + /// ``` + #[unstable(feature = "ordering_chaining", issue = "37053")] + pub fn then(self, other: Ordering) -> Ordering { + match self { + Equal => other, + _ => self, + } + } + + /// Chains the ordering with the given function. + /// + /// Returns `self` when it's not `Equal`. Otherwise calls `f` and returns + /// the result. + /// + /// # Examples + /// + /// ``` + /// #![feature(ordering_chaining)] + /// + /// use std::cmp::Ordering; + /// + /// let result = Ordering::Equal.then_with(|| Ordering::Less); + /// assert_eq!(result, Ordering::Less); + /// + /// let result = Ordering::Less.then_with(|| Ordering::Equal); + /// assert_eq!(result, Ordering::Less); + /// + /// let result = Ordering::Less.then_with(|| Ordering::Greater); + /// assert_eq!(result, Ordering::Less); + /// + /// let result = Ordering::Equal.then_with(|| Ordering::Equal); + /// assert_eq!(result, Ordering::Equal); + /// + /// let x: (i64, i64, i64) = (1, 2, 7); + /// let y: (i64, i64, i64) = (1, 5, 3); + /// let result = x.0.cmp(&y.0).then_with(|| x.1.cmp(&y.1)).then_with(|| x.2.cmp(&y.2)); + /// + /// assert_eq!(result, Ordering::Less); + /// ``` + #[unstable(feature = "ordering_chaining", issue = "37053")] + pub fn then_with Ordering>(self, f: F) -> Ordering { + match self { + Equal => f(), + _ => self, } } } @@ -174,9 +331,49 @@ impl Ordering { /// - total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true; and /// - transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`. /// -/// When this trait is `derive`d, it produces a lexicographic ordering. +/// ## Derivable +/// +/// This trait can be used with `#[derive]`. When `derive`d, it will produce a lexicographic +/// ordering based on the top-to-bottom declaration order of the struct's members. +/// +/// ## How can I implement `Ord`? +/// +/// `Ord` requires that the type also be `PartialOrd` and `Eq` (which requires `PartialEq`). /// -/// This trait can be used with `#[derive]`. +/// Then you must define an implementation for `cmp()`. You may find it useful to use +/// `cmp()` on your type's fields. +/// +/// Here's an example where you want to sort people by height only, disregarding `id` +/// and `name`: +/// +/// ``` +/// use std::cmp::Ordering; +/// +/// #[derive(Eq)] +/// struct Person { +/// id: u32, +/// name: String, +/// height: u32, +/// } +/// +/// impl Ord for Person { +/// fn cmp(&self, other: &Person) -> Ordering { +/// self.height.cmp(&other.height) +/// } +/// } +/// +/// impl PartialOrd for Person { +/// fn partial_cmp(&self, other: &Person) -> Option { +/// Some(self.cmp(other)) +/// } +/// } +/// +/// impl PartialEq for Person { +/// fn eq(&self, other: &Person) -> bool { +/// self.height == other.height +/// } +/// } +/// ``` #[stable(feature = "rust1", since = "1.0.0")] pub trait Ord: Eq + PartialOrd { /// This method returns an `Ordering` between `self` and `other`. @@ -227,6 +424,13 @@ impl PartialOrd for Ordering { /// transitively: if `T: PartialOrd` and `U: PartialOrd` then `U: PartialOrd` and `T: /// PartialOrd`. /// +/// ## Derivable +/// +/// This trait can be used with `#[derive]`. When `derive`d, it will produce a lexicographic +/// ordering based on the top-to-bottom declaration order of the struct's members. +/// +/// ## How can I implement `Ord`? +/// /// PartialOrd only requires implementation of the `partial_cmp` method, with the others generated /// from default implementations. /// @@ -234,7 +438,74 @@ impl PartialOrd for Ordering { /// total order. For example, for floating point numbers, `NaN < 0 == false` and `NaN >= 0 == /// false` (cf. IEEE 754-2008 section 5.11). /// -/// This trait can be used with `#[derive]`. +/// `PartialOrd` requires your type to be `PartialEq`. +/// +/// If your type is `Ord`, you can implement `partial_cmp()` by using `cmp()`: +/// +/// ``` +/// use std::cmp::Ordering; +/// +/// #[derive(Eq)] +/// struct Person { +/// id: u32, +/// name: String, +/// height: u32, +/// } +/// +/// impl PartialOrd for Person { +/// fn partial_cmp(&self, other: &Person) -> Option { +/// Some(self.cmp(other)) +/// } +/// } +/// +/// impl Ord for Person { +/// fn cmp(&self, other: &Person) -> Ordering { +/// self.height.cmp(&other.height) +/// } +/// } +/// +/// impl PartialEq for Person { +/// fn eq(&self, other: &Person) -> bool { +/// self.height == other.height +/// } +/// } +/// ``` +/// +/// You may also find it useful to use `partial_cmp()` on your type's fields. Here +/// is an example of `Person` types who have a floating-point `height` field that +/// is the only field to be used for sorting: +/// +/// ``` +/// use std::cmp::Ordering; +/// +/// struct Person { +/// id: u32, +/// name: String, +/// height: f64, +/// } +/// +/// impl PartialOrd for Person { +/// fn partial_cmp(&self, other: &Person) -> Option { +/// self.height.partial_cmp(&other.height) +/// } +/// } +/// +/// impl PartialEq for Person { +/// fn eq(&self, other: &Person) -> bool { +/// self.height == other.height +/// } +/// } +/// ``` +/// +/// # Examples +/// +/// ``` +/// let x : u32 = 0; +/// let y : u32 = 1; +/// +/// assert_eq!(x < y, true); +/// assert_eq!(x.lt(&y), true); +/// ``` #[lang = "ord"] #[stable(feature = "rust1", since = "1.0.0")] pub trait PartialOrd: PartialEq { @@ -385,11 +656,7 @@ pub fn max(v1: T, v2: T) -> T { // Implementation of PartialEq, Eq, PartialOrd and Ord for primitive types mod impls { - use cmp::{PartialOrd, Ord, PartialEq, Eq, Ordering}; - use cmp::Ordering::{Less, Greater, Equal}; - use marker::Sized; - use option::Option; - use option::Option::{Some, None}; + use cmp::Ordering::{self, Less, Greater, Equal}; macro_rules! partial_eq_impl { ($($t:ty)*) => ($( @@ -513,6 +780,30 @@ mod impls { ord_impl! { char usize u8 u16 u32 u64 isize i8 i16 i32 i64 } + #[unstable(feature = "never_type_impls", issue = "35121")] + impl PartialEq for ! { + fn eq(&self, _: &!) -> bool { + *self + } + } + + #[unstable(feature = "never_type_impls", issue = "35121")] + impl Eq for ! {} + + #[unstable(feature = "never_type_impls", issue = "35121")] + impl PartialOrd for ! { + fn partial_cmp(&self, _: &!) -> Option { + *self + } + } + + #[unstable(feature = "never_type_impls", issue = "35121")] + impl Ord for ! { + fn cmp(&self, _: &!) -> Ordering { + *self + } + } + // & pointers #[stable(feature = "rust1", since = "1.0.0")] diff --git a/src/libcore/convert.rs b/src/libcore/convert.rs index b02b2a06b75d5..830bbc079ad1e 100644 --- a/src/libcore/convert.rs +++ b/src/libcore/convert.rs @@ -17,22 +17,48 @@ //! Like many traits, these are often used as bounds for generic functions, to //! support arguments of multiple types. //! +//! - Impl the `As*` traits for reference-to-reference conversions +//! - Impl the `Into` trait when you want to consume the value in the conversion +//! - The `From` trait is the most flexible, useful for value _and_ reference conversions +//! - The `TryFrom` and `TryInto` traits behave like `From` and `Into`, but allow for the +//! conversion to fail +//! +//! As a library author, you should prefer implementing `From` or `TryFrom` rather than +//! `Into` or `TryInto`, as `From` and `TryFrom` provide greater flexibility and offer +//! equivalent `Into` or `TryInto` implementations for free, thanks to a blanket implementation +//! in the standard library. +//! +//! # Generic impl +//! +//! - `AsRef` and `AsMut` auto-dereference if the inner type is a reference +//! - `From for T` implies `Into for U` +//! - `TryFrom for T` implies `TryInto for U` +//! - `From` and `Into` are reflexive, which means that all types can `into()` +//! themselves and `from()` themselves +//! //! See each trait for usage examples. #![stable(feature = "rust1", since = "1.0.0")] -use marker::Sized; - /// A cheap, reference-to-reference conversion. /// -/// `AsRef` is very similar to, but different than, `Borrow`. See +/// `AsRef` is very similar to, but different than, [`Borrow`]. See /// [the book][book] for more. /// /// [book]: ../../book/borrow-and-asref.html +/// [`Borrow`]: ../../std/borrow/trait.Borrow.html +/// +/// **Note: this trait must not fail**. If the conversion can fail, use a dedicated method which +/// returns an [`Option`] or a [`Result`]. +/// +/// [`Option`]: ../../std/option/enum.Option.html +/// [`Result`]: ../../std/result/enum.Result.html /// /// # Examples /// -/// Both `String` and `&str` implement `AsRef`: +/// Both [`String`] and `&str` implement `AsRef`: +/// +/// [`String`]: ../../std/string/struct.String.html /// /// ``` /// fn is_hello>(s: T) { @@ -45,6 +71,12 @@ use marker::Sized; /// let s = "hello".to_string(); /// is_hello(s); /// ``` +/// +/// # Generic Impls +/// +/// - `AsRef` auto-dereferences if the inner type is a reference or a mutable +/// reference (e.g.: `foo.as_ref()` will work the same if `foo` has type `&mut Foo` or `&&mut Foo`) +/// #[stable(feature = "rust1", since = "1.0.0")] pub trait AsRef { /// Performs the conversion. @@ -53,6 +85,34 @@ pub trait AsRef { } /// A cheap, mutable reference-to-mutable reference conversion. +/// +/// **Note: this trait must not fail**. If the conversion can fail, use a dedicated method which +/// returns an [`Option`] or a [`Result`]. +/// +/// [`Option`]: ../../std/option/enum.Option.html +/// [`Result`]: ../../std/result/enum.Result.html +/// +/// # Examples +/// +/// [`Box`] implements `AsMut`: +/// +/// [`Box`]: ../../std/boxed/struct.Box.html +/// +/// ``` +/// fn add_one>(num: &mut T) { +/// *num.as_mut() += 1; +/// } +/// +/// let mut boxed_num = Box::new(0); +/// add_one(&mut boxed_num); +/// assert_eq!(*boxed_num, 1); +/// ``` +/// +/// # Generic Impls +/// +/// - `AsMut` auto-dereferences if the inner type is a reference or a mutable +/// reference (e.g.: `foo.as_ref()` will work the same if `foo` has type `&mut Foo` or `&&mut Foo`) +/// #[stable(feature = "rust1", since = "1.0.0")] pub trait AsMut { /// Performs the conversion. @@ -62,9 +122,16 @@ pub trait AsMut { /// A conversion that consumes `self`, which may or may not be expensive. /// +/// **Note: this trait must not fail**. If the conversion can fail, use [`TryInto`] or a dedicated +/// method which returns an [`Option`] or a [`Result`]. +/// +/// Library authors should not directly implement this trait, but should prefer implementing +/// the [`From`][From] trait, which offers greater flexibility and provides an equivalent `Into` +/// implementation for free, thanks to a blanket implementation in the standard library. +/// /// # Examples /// -/// `String` implements `Into>`: +/// [`String`] implements `Into>`: /// /// ``` /// fn is_hello>>(s: T) { @@ -75,6 +142,18 @@ pub trait AsMut { /// let s = "hello".to_string(); /// is_hello(s); /// ``` +/// +/// # Generic Impls +/// +/// - [`From`][From]` for U` implies `Into for T` +/// - [`into()`] is reflexive, which means that `Into for T` is implemented +/// +/// [`TryInto`]: trait.TryInto.html +/// [`Option`]: ../../std/option/enum.Option.html +/// [`Result`]: ../../std/result/enum.Result.html +/// [`String`]: ../../std/string/struct.String.html +/// [From]: trait.From.html +/// [`into()`]: trait.Into.html#tymethod.into #[stable(feature = "rust1", since = "1.0.0")] pub trait Into: Sized { /// Performs the conversion. @@ -84,9 +163,12 @@ pub trait Into: Sized { /// Construct `Self` via a conversion. /// +/// **Note: this trait must not fail**. If the conversion can fail, use [`TryFrom`] or a dedicated +/// method which returns an [`Option`] or a [`Result`]. +/// /// # Examples /// -/// `String` implements `From<&str>`: +/// [`String`] implements `From<&str>`: /// /// ``` /// let string = "hello".to_string(); @@ -94,6 +176,17 @@ pub trait Into: Sized { /// /// assert_eq!(string, other_string); /// ``` +/// # Generic impls +/// +/// - `From for U` implies [`Into`]` for T` +/// - [`from()`] is reflexive, which means that `From for T` is implemented +/// +/// [`TryFrom`]: trait.TryFrom.html +/// [`Option`]: ../../std/option/enum.Option.html +/// [`Result`]: ../../std/result/enum.Result.html +/// [`String`]: ../../std/string/struct.String.html +/// [`Into`]: trait.Into.html +/// [`from()`]: trait.From.html#tymethod.from #[stable(feature = "rust1", since = "1.0.0")] pub trait From: Sized { /// Performs the conversion. @@ -101,6 +194,32 @@ pub trait From: Sized { fn from(T) -> Self; } +/// An attempted conversion that consumes `self`, which may or may not be expensive. +/// +/// Library authors should not directly implement this trait, but should prefer implementing +/// the [`TryFrom`] trait, which offers greater flexibility and provides an equivalent `TryInto` +/// implementation for free, thanks to a blanket implementation in the standard library. +/// +/// [`TryFrom`]: trait.TryFrom.html +#[unstable(feature = "try_from", issue = "33417")] +pub trait TryInto: Sized { + /// The type returned in the event of a conversion error. + type Err; + + /// Performs the conversion. + fn try_into(self) -> Result; +} + +/// Attempt to construct `Self` via a conversion. +#[unstable(feature = "try_from", issue = "33417")] +pub trait TryFrom: Sized { + /// The type returned in the event of a conversion error. + type Err; + + /// Performs the conversion. + fn try_from(T) -> Result; +} + //////////////////////////////////////////////////////////////////////////////// // GENERIC IMPLS //////////////////////////////////////////////////////////////////////////////// @@ -159,6 +278,17 @@ impl From for T { fn from(t: T) -> T { t } } + +// TryFrom implies TryInto +#[unstable(feature = "try_from", issue = "33417")] +impl TryInto for T where U: TryFrom { + type Err = U::Err; + + fn try_into(self) -> Result { + U::try_from(self) + } +} + //////////////////////////////////////////////////////////////////////////////// // CONCRETE IMPLS //////////////////////////////////////////////////////////////////////////////// diff --git a/src/libcore/default.rs b/src/libcore/default.rs index 12c4a5ca200ad..85e4b2a006769 100644 --- a/src/libcore/default.rs +++ b/src/libcore/default.rs @@ -9,85 +9,76 @@ // except according to those terms. //! The `Default` trait for types which may have meaningful default values. -//! -//! Sometimes, you want to fall back to some kind of default value, and -//! don't particularly care what it is. This comes up often with `struct`s -//! that define a set of options: -//! -//! ``` -//! # #[allow(dead_code)] -//! struct SomeOptions { -//! foo: i32, -//! bar: f32, -//! } -//! ``` -//! -//! How can we define some default values? You can use `Default`: -//! -//! ``` -//! # #[allow(dead_code)] -//! #[derive(Default)] -//! struct SomeOptions { -//! foo: i32, -//! bar: f32, -//! } -//! -//! -//! fn main() { -//! let options: SomeOptions = Default::default(); -//! } -//! ``` -//! -//! Now, you get all of the default values. Rust implements `Default` for various primitives types. -//! If you have your own type, you need to implement `Default` yourself: -//! -//! ``` -//! # #![allow(dead_code)] -//! enum Kind { -//! A, -//! B, -//! C, -//! } -//! -//! impl Default for Kind { -//! fn default() -> Kind { Kind::A } -//! } -//! -//! #[derive(Default)] -//! struct SomeOptions { -//! foo: i32, -//! bar: f32, -//! baz: Kind, -//! } -//! -//! -//! fn main() { -//! let options: SomeOptions = Default::default(); -//! } -//! ``` -//! -//! If you want to override a particular option, but still retain the other defaults: -//! -//! ``` -//! # #[allow(dead_code)] -//! # #[derive(Default)] -//! # struct SomeOptions { -//! # foo: i32, -//! # bar: f32, -//! # } -//! fn main() { -//! let options = SomeOptions { foo: 42, ..Default::default() }; -//! } -//! ``` #![stable(feature = "rust1", since = "1.0.0")] -use marker::Sized; - /// A trait for giving a type a useful default value. /// -/// A struct can derive default implementations of `Default` for basic types using -/// `#[derive(Default)]`. +/// Sometimes, you want to fall back to some kind of default value, and +/// don't particularly care what it is. This comes up often with `struct`s +/// that define a set of options: +/// +/// ``` +/// # #[allow(dead_code)] +/// struct SomeOptions { +/// foo: i32, +/// bar: f32, +/// } +/// ``` +/// +/// How can we define some default values? You can use `Default`: +/// +/// ``` +/// # #[allow(dead_code)] +/// #[derive(Default)] +/// struct SomeOptions { +/// foo: i32, +/// bar: f32, +/// } +/// +/// fn main() { +/// let options: SomeOptions = Default::default(); +/// } +/// ``` +/// +/// Now, you get all of the default values. Rust implements `Default` for various primitives types. +/// +/// If you want to override a particular option, but still retain the other defaults: +/// +/// ``` +/// # #[allow(dead_code)] +/// # #[derive(Default)] +/// # struct SomeOptions { +/// # foo: i32, +/// # bar: f32, +/// # } +/// fn main() { +/// let options = SomeOptions { foo: 42, ..Default::default() }; +/// } +/// ``` +/// +/// ## Derivable +/// +/// This trait can be used with `#[derive]` if all of the type's fields implement +/// `Default`. When `derive`d, it will use the default value for each field's type. +/// +/// ## How can I implement `Default`? +/// +/// Provide an implementation for the `default()` method that returns the value of +/// your type that should be the default: +/// +/// ``` +/// # #![allow(dead_code)] +/// enum Kind { +/// A, +/// B, +/// C, +/// } +/// +/// impl Default for Kind { +/// fn default() -> Kind { Kind::A } +/// } +/// ``` /// /// # Examples /// diff --git a/src/libcore/fmt/builders.rs b/src/libcore/fmt/builders.rs index 7c986131a5285..102e3c0bd7b95 100644 --- a/src/libcore/fmt/builders.rs +++ b/src/libcore/fmt/builders.rs @@ -8,8 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use prelude::v1::*; -use fmt::{self, Write, FlagV1}; +use fmt::{self, FlagV1}; struct PadAdapter<'a, 'b: 'a> { fmt: &'a mut fmt::Formatter<'b>, @@ -29,7 +28,7 @@ impl<'a, 'b: 'a> fmt::Write for PadAdapter<'a, 'b> { fn write_str(&mut self, mut s: &str) -> fmt::Result { while !s.is_empty() { if self.on_newline { - try!(self.fmt.write_str(" ")); + self.fmt.write_str(" ")?; } let split = match s.find('\n') { @@ -42,7 +41,7 @@ impl<'a, 'b: 'a> fmt::Write for PadAdapter<'a, 'b> { s.len() } }; - try!(self.fmt.write_str(&s[..split])); + self.fmt.write_str(&s[..split])?; s = &s[split..]; } @@ -54,6 +53,7 @@ impl<'a, 'b: 'a> fmt::Write for PadAdapter<'a, 'b> { /// /// Constructed by the `Formatter::debug_struct` method. #[must_use] +#[allow(missing_debug_implementations)] #[stable(feature = "debug_builders", since = "1.2.0")] pub struct DebugStruct<'a, 'b: 'a> { fmt: &'a mut fmt::Formatter<'b>, @@ -120,11 +120,13 @@ impl<'a, 'b: 'a> DebugStruct<'a, 'b> { /// /// Constructed by the `Formatter::debug_tuple` method. #[must_use] +#[allow(missing_debug_implementations)] #[stable(feature = "debug_builders", since = "1.2.0")] pub struct DebugTuple<'a, 'b: 'a> { fmt: &'a mut fmt::Formatter<'b>, result: fmt::Result, - has_fields: bool, + fields: usize, + empty_name: bool, } pub fn debug_tuple_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>, name: &str) -> DebugTuple<'a, 'b> { @@ -132,7 +134,8 @@ pub fn debug_tuple_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>, name: &str) -> D DebugTuple { fmt: fmt, result: result, - has_fields: false, + fields: 0, + empty_name: name.is_empty(), } } @@ -141,7 +144,7 @@ impl<'a, 'b: 'a> DebugTuple<'a, 'b> { #[stable(feature = "debug_builders", since = "1.2.0")] pub fn field(&mut self, value: &fmt::Debug) -> &mut DebugTuple<'a, 'b> { self.result = self.result.and_then(|_| { - let (prefix, space) = if self.has_fields { + let (prefix, space) = if self.fields > 0 { (",", " ") } else { ("(", "") @@ -155,20 +158,22 @@ impl<'a, 'b: 'a> DebugTuple<'a, 'b> { } }); - self.has_fields = true; + self.fields += 1; self } /// Finishes output and returns any error encountered. #[stable(feature = "debug_builders", since = "1.2.0")] pub fn finish(&mut self) -> fmt::Result { - if self.has_fields { + if self.fields > 0 { self.result = self.result.and_then(|_| { if self.is_pretty() { - self.fmt.write_str("\n)") - } else { - self.fmt.write_str(")") + self.fmt.write_str("\n")?; + } + if self.fields == 1 && self.empty_name { + self.fmt.write_str(",")?; } + self.fmt.write_str(")") }); } self.result @@ -177,14 +182,6 @@ impl<'a, 'b: 'a> DebugTuple<'a, 'b> { fn is_pretty(&self) -> bool { self.fmt.flags() & (1 << (FlagV1::Alternate as usize)) != 0 } - - /// Returns the wrapped `Formatter`. - #[unstable(feature = "debug_builder_formatter", reason = "recently added", - issue = "27782")] - #[rustc_deprecated(since = "1.7.0", reason = "will be removed")] - pub fn formatter(&mut self) -> &mut fmt::Formatter<'b> { - &mut self.fmt - } } struct DebugInner<'a, 'b: 'a> { @@ -235,6 +232,7 @@ impl<'a, 'b: 'a> DebugInner<'a, 'b> { /// /// Constructed by the `Formatter::debug_set` method. #[must_use] +#[allow(missing_debug_implementations)] #[stable(feature = "debug_builders", since = "1.2.0")] pub struct DebugSet<'a, 'b: 'a> { inner: DebugInner<'a, 'b>, @@ -283,6 +281,7 @@ impl<'a, 'b: 'a> DebugSet<'a, 'b> { /// /// Constructed by the `Formatter::debug_list` method. #[must_use] +#[allow(missing_debug_implementations)] #[stable(feature = "debug_builders", since = "1.2.0")] pub struct DebugList<'a, 'b: 'a> { inner: DebugInner<'a, 'b>, @@ -331,6 +330,7 @@ impl<'a, 'b: 'a> DebugList<'a, 'b> { /// /// Constructed by the `Formatter::debug_map` method. #[must_use] +#[allow(missing_debug_implementations)] #[stable(feature = "debug_builders", since = "1.2.0")] pub struct DebugMap<'a, 'b: 'a> { fmt: &'a mut fmt::Formatter<'b>, diff --git a/src/libcore/fmt/mod.rs b/src/libcore/fmt/mod.rs index 37f03d731dc72..9167264ba9d12 100644 --- a/src/libcore/fmt/mod.rs +++ b/src/libcore/fmt/mod.rs @@ -8,13 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Utilities for formatting and printing strings +//! Utilities for formatting and printing strings. #![stable(feature = "rust1", since = "1.0.0")] -use prelude::v1::*; - -use cell::{Cell, RefCell, Ref, RefMut, BorrowState}; +use cell::{UnsafeCell, Cell, RefCell, Ref, RefMut, BorrowState}; use marker::PhantomData; use mem; use num::flt2dec; @@ -22,20 +20,21 @@ use ops::Deref; use result; use slice; use str; -use self::rt::v1::Alignment; - -#[unstable(feature = "fmt_radix", issue = "27728")] -#[rustc_deprecated(since = "1.7.0", reason = "not used enough to stabilize")] -#[allow(deprecated)] -pub use self::num::radix; -#[unstable(feature = "fmt_radix", issue = "27728")] -#[rustc_deprecated(since = "1.7.0", reason = "not used enough to stabilize")] -#[allow(deprecated)] -pub use self::num::Radix; -#[unstable(feature = "fmt_radix", issue = "27728")] -#[rustc_deprecated(since = "1.7.0", reason = "not used enough to stabilize")] -#[allow(deprecated)] -pub use self::num::RadixFmt; + +#[unstable(feature = "fmt_flags_align", issue = "27726")] +/// Possible alignments returned by `Formatter::align` +#[derive(Debug)] +pub enum Alignment { + /// Indication that contents should be left-aligned. + Left, + /// Indication that contents should be right-aligned. + Right, + /// Indication that contents should be center-aligned. + Center, + /// No alignment was requested. + Unknown, +} + #[stable(feature = "debug_builders", since = "1.2.0")] pub use self::builders::{DebugStruct, DebugTuple, DebugSet, DebugList, DebugMap}; @@ -59,7 +58,7 @@ pub type Result = result::Result<(), Error>; /// occurred. Any extra information must be arranged to be transmitted through /// some other means. #[stable(feature = "rust1", since = "1.0.0")] -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Error; /// A collection of methods that are required to format a message into a stream. @@ -98,9 +97,7 @@ pub trait Write { /// This function will return an instance of `Error` on error. #[stable(feature = "fmt_write_char", since = "1.1.0")] fn write_char(&mut self, c: char) -> Result { - let mut utf_8 = [0u8; 4]; - let bytes_written = c.encode_utf8(&mut utf_8).unwrap_or(0); - self.write_str(unsafe { str::from_utf8_unchecked(&utf_8[..bytes_written]) }) + self.write_str(c.encode_utf8(&mut [0; 4])) } /// Glue for usage of the `write!` macro with implementors of this trait. @@ -121,6 +118,10 @@ pub trait Write { self.0.write_str(s) } + fn write_char(&mut self, c: char) -> Result { + self.0.write_char(c) + } + fn write_fmt(&mut self, args: Arguments) -> Result { self.0.write_fmt(args) } @@ -148,6 +149,7 @@ impl<'a, W: Write + ?Sized> Write for &'a mut W { /// A struct to represent both where to emit formatting strings to and how they /// should be formatted. A mutable version of this is passed to all formatting /// traits. +#[allow(missing_debug_implementations)] #[stable(feature = "rust1", since = "1.0.0")] pub struct Formatter<'a> { flags: u32, @@ -164,13 +166,16 @@ pub struct Formatter<'a> { // NB. Argument is essentially an optimized partially applied formatting function, // equivalent to `exists T.(&T, fn(&T, &mut Formatter) -> Result`. -enum Void {} +struct Void { + _priv: (), +} /// This struct represents the generic "argument" which is taken by the Xprintf /// family of functions. It contains a function to format the given value. At /// compile time it is ensured that the function and the value have the correct /// types, and then this struct is used to canonicalize arguments to one type. #[derive(Copy)] +#[allow(missing_debug_implementations)] #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "0")] #[doc(hidden)] @@ -267,10 +272,14 @@ impl<'a> Arguments<'a> { /// safely be done so, so no constructors are given and the fields are private /// to prevent modification. /// -/// The `format_args!` macro will safely create an instance of this structure +/// The [`format_args!`] macro will safely create an instance of this structure /// and pass it to a function or closure, passed as the first argument. The -/// macro validates the format string at compile-time so usage of the `write` -/// and `format` functions can be safely performed. +/// macro validates the format string at compile-time so usage of the [`write`] +/// and [`format`] functions can be safely performed. +/// +/// [`format_args!`]: ../../std/macro.format_args.html +/// [`format`]: ../../std/fmt/fn.format.html +/// [`write`]: ../../std/fmt/fn.write.html #[stable(feature = "rust1", since = "1.0.0")] #[derive(Copy, Clone)] pub struct Arguments<'a> { @@ -311,7 +320,11 @@ impl<'a> Display for Arguments<'a> { /// /// [module]: ../../std/fmt/index.html /// -/// This trait can be used with `#[derive]`. +/// This trait can be used with `#[derive]` if all fields implement `Debug`. When +/// `derive`d for structs, it will use the name of the `struct`, then `{`, then a +/// comma-separated list of each field's name and `Debug` value, then `}`. For +/// `enum`s, it will use the name of the variant and, if applicable, `(`, then the +/// `Debug` values of the fields, then `)`. /// /// # Examples /// @@ -769,6 +782,32 @@ pub trait UpperExp { /// /// * output - the buffer to write output to /// * args - the precompiled arguments generated by `format_args!` +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::fmt; +/// +/// let mut output = String::new(); +/// fmt::write(&mut output, format_args!("Hello {}!", "world")) +/// .expect("Error occurred while trying to write in String"); +/// assert_eq!(output, "Hello world!"); +/// ``` +/// +/// Please note that using [`write!`] might be preferrable. Example: +/// +/// ``` +/// use std::fmt::Write; +/// +/// let mut output = String::new(); +/// write!(&mut output, "Hello {}!", "world") +/// .expect("Error occurred while trying to write in String"); +/// assert_eq!(output, "Hello world!"); +/// ``` +/// +/// [`write!`]: ../../std/macro.write.html #[stable(feature = "rust1", since = "1.0.0")] pub fn write(output: &mut Write, args: Arguments) -> Result { let mut formatter = Formatter { @@ -776,7 +815,7 @@ pub fn write(output: &mut Write, args: Arguments) -> Result { width: None, precision: None, buf: output, - align: Alignment::Unknown, + align: rt::v1::Alignment::Unknown, fill: ' ', args: args.args, curarg: args.args.iter(), @@ -788,26 +827,23 @@ pub fn write(output: &mut Write, args: Arguments) -> Result { None => { // We can use default formatting parameters for all arguments. for (arg, piece) in args.args.iter().zip(pieces.by_ref()) { - try!(formatter.buf.write_str(*piece)); - try!((arg.formatter)(arg.value, &mut formatter)); + formatter.buf.write_str(*piece)?; + (arg.formatter)(arg.value, &mut formatter)?; } } Some(fmt) => { // Every spec has a corresponding argument that is preceded by // a string piece. for (arg, piece) in fmt.iter().zip(pieces.by_ref()) { - try!(formatter.buf.write_str(*piece)); - try!(formatter.run(arg)); + formatter.buf.write_str(*piece)?; + formatter.run(arg)?; } } } // There can be only one trailing string piece left. - match pieces.next() { - Some(piece) => { - try!(formatter.buf.write_str(*piece)); - } - None => {} + if let Some(piece) = pieces.next() { + formatter.buf.write_str(*piece)?; } Ok(()) @@ -871,8 +907,6 @@ impl<'a> Formatter<'a> { prefix: &str, buf: &str) -> Result { - use char::CharExt; - let mut width = buf.len(); let mut sign = None; @@ -890,10 +924,7 @@ impl<'a> Formatter<'a> { // Writes the sign if it exists, and then the prefix if it was requested let write_prefix = |f: &mut Formatter| { if let Some(c) = sign { - let mut b = [0; 4]; - let n = c.encode_utf8(&mut b).unwrap_or(0); - let b = unsafe { str::from_utf8_unchecked(&b[..n]) }; - try!(f.buf.write_str(b)); + f.buf.write_str(c.encode_utf8(&mut [0; 4]))?; } if prefixed { f.buf.write_str(prefix) } else { Ok(()) } @@ -904,26 +935,26 @@ impl<'a> Formatter<'a> { // If there's no minimum length requirements then we can just // write the bytes. None => { - try!(write_prefix(self)); self.buf.write_str(buf) + write_prefix(self)?; self.buf.write_str(buf) } // Check if we're over the minimum width, if so then we can also // just write the bytes. Some(min) if width >= min => { - try!(write_prefix(self)); self.buf.write_str(buf) + write_prefix(self)?; self.buf.write_str(buf) } // The sign and prefix goes before the padding if the fill character // is zero Some(min) if self.sign_aware_zero_pad() => { self.fill = '0'; - try!(write_prefix(self)); - self.with_padding(min - width, Alignment::Right, |f| { + write_prefix(self)?; + self.with_padding(min - width, rt::v1::Alignment::Right, |f| { f.buf.write_str(buf) }) } // Otherwise, the sign and prefix goes after the padding Some(min) => { - self.with_padding(min - width, Alignment::Right, |f| { - try!(write_prefix(f)); f.buf.write_str(buf) + self.with_padding(min - width, rt::v1::Alignment::Right, |f| { + write_prefix(f)?; f.buf.write_str(buf) }) } } @@ -947,15 +978,19 @@ impl<'a> Formatter<'a> { return self.buf.write_str(s); } // The `precision` field can be interpreted as a `max-width` for the - // string being formatted - if let Some(max) = self.precision { - // If there's a maximum width and our string is longer than - // that, then we must always have truncation. This is the only - // case where the maximum length will matter. + // string being formatted. + let s = if let Some(max) = self.precision { + // If our string is longer that the precision, then we must have + // truncation. However other flags like `fill`, `width` and `align` + // must act as always. if let Some((i, _)) = s.char_indices().skip(max).next() { - return self.buf.write_str(&s[..i]) + &s[..i] + } else { + &s } - } + } else { + &s + }; // The `width` field is more of a `min-width` parameter at this point. match self.width { // If we're under the maximum length, and there's no minimum length @@ -969,7 +1004,8 @@ impl<'a> Formatter<'a> { // If we're under both the maximum and the minimum width, then fill // up the minimum width with the specified string + some alignment. Some(width) => { - self.with_padding(width - s.chars().count(), Alignment::Left, |me| { + let align = rt::v1::Alignment::Left; + self.with_padding(width - s.chars().count(), align, |me| { me.buf.write_str(s) }) } @@ -978,34 +1014,33 @@ impl<'a> Formatter<'a> { /// Runs a callback, emitting the correct padding either before or /// afterwards depending on whether right or left alignment is requested. - fn with_padding(&mut self, padding: usize, default: Alignment, + fn with_padding(&mut self, padding: usize, default: rt::v1::Alignment, f: F) -> Result where F: FnOnce(&mut Formatter) -> Result, { - use char::CharExt; let align = match self.align { - Alignment::Unknown => default, + rt::v1::Alignment::Unknown => default, _ => self.align }; let (pre_pad, post_pad) = match align { - Alignment::Left => (0, padding), - Alignment::Right | Alignment::Unknown => (padding, 0), - Alignment::Center => (padding / 2, (padding + 1) / 2), + rt::v1::Alignment::Left => (0, padding), + rt::v1::Alignment::Right | + rt::v1::Alignment::Unknown => (padding, 0), + rt::v1::Alignment::Center => (padding / 2, (padding + 1) / 2), }; let mut fill = [0; 4]; - let len = self.fill.encode_utf8(&mut fill).unwrap_or(0); - let fill = unsafe { str::from_utf8_unchecked(&fill[..len]) }; + let fill = self.fill.encode_utf8(&mut fill); for _ in 0..pre_pad { - try!(self.buf.write_str(fill)); + self.buf.write_str(fill)?; } - try!(f(self)); + f(self)?; for _ in 0..post_pad { - try!(self.buf.write_str(fill)); + self.buf.write_str(fill)?; } Ok(()) @@ -1024,12 +1059,12 @@ impl<'a> Formatter<'a> { if self.sign_aware_zero_pad() { // a sign always goes first let sign = unsafe { str::from_utf8_unchecked(formatted.sign) }; - try!(self.buf.write_str(sign)); + self.buf.write_str(sign)?; // remove the sign from the formatted parts formatted.sign = b""; width = if width < sign.len() { 0 } else { width - sign.len() }; - align = Alignment::Right; + align = rt::v1::Alignment::Right; self.fill = '0'; } @@ -1056,7 +1091,7 @@ impl<'a> Formatter<'a> { } if !formatted.sign.is_empty() { - try!(write_bytes(self.buf, formatted.sign)); + write_bytes(self.buf, formatted.sign)?; } for part in formatted.parts { match *part { @@ -1064,11 +1099,11 @@ impl<'a> Formatter<'a> { const ZEROES: &'static str = // 64 zeroes "0000000000000000000000000000000000000000000000000000000000000000"; while nzeroes > ZEROES.len() { - try!(self.buf.write_str(ZEROES)); + self.buf.write_str(ZEROES)?; nzeroes -= ZEROES.len(); } if nzeroes > 0 { - try!(self.buf.write_str(&ZEROES[..nzeroes])); + self.buf.write_str(&ZEROES[..nzeroes])?; } } flt2dec::Part::Num(mut v) => { @@ -1078,10 +1113,10 @@ impl<'a> Formatter<'a> { *c = b'0' + (v % 10) as u8; v /= 10; } - try!(write_bytes(self.buf, &s[..len])); + write_bytes(self.buf, &s[..len])?; } flt2dec::Part::Copy(buf) => { - try!(write_bytes(self.buf, buf)); + write_bytes(self.buf, buf)?; } } } @@ -1112,7 +1147,14 @@ impl<'a> Formatter<'a> { /// Flag indicating what form of alignment was requested #[unstable(feature = "fmt_flags_align", reason = "method was just created", issue = "27726")] - pub fn align(&self) -> Alignment { self.align } + pub fn align(&self) -> Alignment { + match self.align { + rt::v1::Alignment::Left => Alignment::Left, + rt::v1::Alignment::Right => Alignment::Right, + rt::v1::Alignment::Center => Alignment::Center, + rt::v1::Alignment::Unknown => Alignment::Unknown, + } + } /// Optionally specified integer width that the output should be #[stable(feature = "fmt_flags", since = "1.5.0")] @@ -1316,6 +1358,20 @@ macro_rules! fmt_refs { fmt_refs! { Debug, Display, Octal, Binary, LowerHex, UpperHex, LowerExp, UpperExp } +#[unstable(feature = "never_type_impls", issue = "35121")] +impl Debug for ! { + fn fmt(&self, _: &mut Formatter) -> Result { + *self + } +} + +#[unstable(feature = "never_type_impls", issue = "35121")] +impl Display for ! { + fn fmt(&self, _: &mut Formatter) -> Result { + *self + } +} + #[stable(feature = "rust1", since = "1.0.0")] impl Debug for bool { fn fmt(&self, f: &mut Formatter) -> Result { @@ -1333,20 +1389,20 @@ impl Display for bool { #[stable(feature = "rust1", since = "1.0.0")] impl Debug for str { fn fmt(&self, f: &mut Formatter) -> Result { - try!(f.write_char('"')); + f.write_char('"')?; let mut from = 0; for (i, c) in self.char_indices() { - let esc = c.escape_default(); + let esc = c.escape_debug(); // If char needs escaping, flush backlog so far and write, else skip - if esc.size_hint() != (1, Some(1)) { - try!(f.write_str(&self[from..i])); + if esc.len() != 1 { + f.write_str(&self[from..i])?; for c in esc { - try!(f.write_char(c)); + f.write_char(c)?; } from = i + c.len_utf8(); } } - try!(f.write_str(&self[from..])); + f.write_str(&self[from..])?; f.write_char('"') } } @@ -1361,9 +1417,9 @@ impl Display for str { #[stable(feature = "rust1", since = "1.0.0")] impl Debug for char { fn fmt(&self, f: &mut Formatter) -> Result { - try!(f.write_char('\'')); - for c in self.escape_default() { - try!(f.write_char(c)) + f.write_char('\'')?; + for c in self.escape_debug() { + f.write_char(c)? } f.write_char('\'') } @@ -1375,16 +1431,13 @@ impl Display for char { if f.width.is_none() && f.precision.is_none() { f.write_char(*self) } else { - let mut utf8 = [0; 4]; - let amt = self.encode_utf8(&mut utf8).unwrap_or(0); - let s: &str = unsafe { str::from_utf8_unchecked(&utf8[..amt]) }; - f.pad(s) + f.pad(self.encode_utf8(&mut [0; 4])) } } } #[stable(feature = "rust1", since = "1.0.0")] -impl Pointer for *const T { +impl Pointer for *const T { fn fmt(&self, f: &mut Formatter) -> Result { let old_width = f.width; let old_flags = f.flags; @@ -1402,7 +1455,7 @@ impl Pointer for *const T { } f.flags |= 1 << (FlagV1::Alternate as u32); - let ret = LowerHex::fmt(&(*self as usize), f); + let ret = LowerHex::fmt(&(*self as *const () as usize), f); f.width = old_width; f.flags = old_flags; @@ -1412,28 +1465,22 @@ impl Pointer for *const T { } #[stable(feature = "rust1", since = "1.0.0")] -impl Pointer for *mut T { +impl Pointer for *mut T { fn fmt(&self, f: &mut Formatter) -> Result { - // FIXME(#23542) Replace with type ascription. - #![allow(trivial_casts)] Pointer::fmt(&(*self as *const T), f) } } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> Pointer for &'a T { +impl<'a, T: ?Sized> Pointer for &'a T { fn fmt(&self, f: &mut Formatter) -> Result { - // FIXME(#23542) Replace with type ascription. - #![allow(trivial_casts)] Pointer::fmt(&(*self as *const T), f) } } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> Pointer for &'a mut T { +impl<'a, T: ?Sized> Pointer for &'a mut T { fn fmt(&self, f: &mut Formatter) -> Result { - // FIXME(#23542) Replace with type ascription. - #![allow(trivial_casts)] Pointer::fmt(&(&**self as *const T), f) } } @@ -1521,11 +1568,11 @@ floating! { f64 } // Implementation of Display/Debug for various core types #[stable(feature = "rust1", since = "1.0.0")] -impl Debug for *const T { +impl Debug for *const T { fn fmt(&self, f: &mut Formatter) -> Result { Pointer::fmt(self, f) } } #[stable(feature = "rust1", since = "1.0.0")] -impl Debug for *mut T { +impl Debug for *mut T { fn fmt(&self, f: &mut Formatter) -> Result { Pointer::fmt(self, f) } } @@ -1542,16 +1589,10 @@ macro_rules! tuple { fn fmt(&self, f: &mut Formatter) -> Result { let mut builder = f.debug_tuple(""); let ($(ref $name,)*) = *self; - let mut n = 0; $( builder.field($name); - n += 1; )* - if n == 1 { - try!(write!(builder.formatter(), ",")); - } - builder.finish() } } @@ -1584,7 +1625,9 @@ impl Debug for PhantomData { #[stable(feature = "rust1", since = "1.0.0")] impl Debug for Cell { fn fmt(&self, f: &mut Formatter) -> Result { - write!(f, "Cell {{ value: {:?} }}", self.get()) + f.debug_struct("Cell") + .field("value", &self.get()) + .finish() } } @@ -1593,9 +1636,15 @@ impl Debug for RefCell { fn fmt(&self, f: &mut Formatter) -> Result { match self.borrow_state() { BorrowState::Unused | BorrowState::Reading => { - write!(f, "RefCell {{ value: {:?} }}", self.borrow()) + f.debug_struct("RefCell") + .field("value", &self.borrow()) + .finish() + } + BorrowState::Writing => { + f.debug_struct("RefCell") + .field("value", &"") + .finish() } - BorrowState::Writing => write!(f, "RefCell {{ }}"), } } } @@ -1614,5 +1663,12 @@ impl<'b, T: ?Sized + Debug> Debug for RefMut<'b, T> { } } +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl Debug for UnsafeCell { + fn fmt(&self, f: &mut Formatter) -> Result { + f.pad("UnsafeCell") + } +} + // If you expected tests to be here, look instead at the run-pass/ifmt.rs test, // it's a lot easier than creating all of the rt::Piece structures here. diff --git a/src/libcore/fmt/num.rs b/src/libcore/fmt/num.rs index 263e03dcc7839..0145897d8f690 100644 --- a/src/libcore/fmt/num.rs +++ b/src/libcore/fmt/num.rs @@ -14,8 +14,6 @@ // FIXME: #6220 Implement floating point formatting -use prelude::v1::*; - use fmt; use num::Zero; use ops::{Div, Rem, Sub}; @@ -29,6 +27,7 @@ trait Int: Zero + PartialEq + PartialOrd + Div + Rem + Sub + Copy { fn from_u8(u: u8) -> Self; fn to_u8(&self) -> u8; + fn to_u16(&self) -> u16; fn to_u32(&self) -> u32; fn to_u64(&self) -> u64; } @@ -37,6 +36,7 @@ macro_rules! doit { ($($t:ident)*) => ($(impl Int for $t { fn from_u8(u: u8) -> $t { u as $t } fn to_u8(&self) -> u8 { *self as u8 } + fn to_u16(&self) -> u16 { *self as u16 } fn to_u32(&self) -> u32 { *self as u32 } fn to_u64(&self) -> u64 { *self as u64 } })*) @@ -140,81 +140,6 @@ radix! { LowerHex, 16, "0x", x @ 0 ... 9 => b'0' + x, radix! { UpperHex, 16, "0x", x @ 0 ... 9 => b'0' + x, x @ 10 ... 15 => b'A' + (x - 10) } -/// A radix with in the range of `2..36`. -#[derive(Clone, Copy, PartialEq)] -#[unstable(feature = "fmt_radix", - reason = "may be renamed or move to a different module", - issue = "27728")] -#[rustc_deprecated(since = "1.7.0", reason = "not used enough to stabilize")] -pub struct Radix { - base: u8, -} - -impl Radix { - fn new(base: u8) -> Radix { - assert!(2 <= base && base <= 36, - "the base must be in the range of 2..36: {}", - base); - Radix { base: base } - } -} - -impl GenericRadix for Radix { - fn base(&self) -> u8 { - self.base - } - fn digit(&self, x: u8) -> u8 { - match x { - x @ 0 ... 9 => b'0' + x, - x if x < self.base() => b'a' + (x - 10), - x => panic!("number not in the range 0..{}: {}", self.base() - 1, x), - } - } -} - -/// A helper type for formatting radixes. -#[unstable(feature = "fmt_radix", - reason = "may be renamed or move to a different module", - issue = "27728")] -#[rustc_deprecated(since = "1.7.0", reason = "not used enough to stabilize")] -#[derive(Copy, Clone)] -pub struct RadixFmt(T, R); - -/// Constructs a radix formatter in the range of `2..36`. -/// -/// # Examples -/// -/// ``` -/// #![feature(fmt_radix)] -/// -/// use std::fmt::radix; -/// assert_eq!(format!("{}", radix(55, 36)), "1j".to_string()); -/// ``` -#[unstable(feature = "fmt_radix", - reason = "may be renamed or move to a different module", - issue = "27728")] -#[rustc_deprecated(since = "1.7.0", reason = "not used enough to stabilize")] -pub fn radix(x: T, base: u8) -> RadixFmt { - RadixFmt(x, Radix::new(base)) -} - -macro_rules! radix_fmt { - ($T:ty as $U:ty, $fmt:ident) => { - #[stable(feature = "rust1", since = "1.0.0")] - impl fmt::Debug for RadixFmt<$T, Radix> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(self, f) - } - } - #[stable(feature = "rust1", since = "1.0.0")] - impl fmt::Display for RadixFmt<$T, Radix> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { RadixFmt(ref x, radix) => radix.$fmt(*x as $U, f) } - } - } - } -} - macro_rules! int_base { ($Trait:ident for $T:ident as $U:ident -> $Radix:ident) => { #[stable(feature = "rust1", since = "1.0.0")] @@ -243,14 +168,12 @@ macro_rules! integer { int_base! { Octal for $Int as $Uint -> Octal } int_base! { LowerHex for $Int as $Uint -> LowerHex } int_base! { UpperHex for $Int as $Uint -> UpperHex } - radix_fmt! { $Int as $Int, fmt_int } debug! { $Int } int_base! { Binary for $Uint as $Uint -> Binary } int_base! { Octal for $Uint as $Uint -> Octal } int_base! { LowerHex for $Uint as $Uint -> LowerHex } int_base! { UpperHex for $Uint as $Uint -> UpperHex } - radix_fmt! { $Uint as $Uint, fmt_int } debug! { $Uint } } } @@ -333,6 +256,8 @@ macro_rules! impl_Display { impl_Display!(i8, u8, i16, u16, i32, u32: to_u32); impl_Display!(i64, u64: to_u64); +#[cfg(target_pointer_width = "16")] +impl_Display!(isize, usize: to_u16); #[cfg(target_pointer_width = "32")] impl_Display!(isize, usize: to_u32); #[cfg(target_pointer_width = "64")] diff --git a/src/libcore/fmt/rt/v1.rs b/src/libcore/fmt/rt/v1.rs index f889045a3f595..ec7add9c3759f 100644 --- a/src/libcore/fmt/rt/v1.rs +++ b/src/libcore/fmt/rt/v1.rs @@ -13,6 +13,7 @@ //! //! These definitions are similar to their `ct` equivalents, but differ in that //! these can be statically allocated and are slightly optimized for the runtime +#![allow(missing_debug_implementations)] #[derive(Copy, Clone)] pub struct Argument { @@ -30,7 +31,7 @@ pub struct FormatSpec { } /// Possible alignments that can be requested as part of a formatting directive. -#[derive(Copy, Clone, PartialEq)] +#[derive(Copy, Clone, PartialEq, Eq)] pub enum Alignment { /// Indication that contents should be left-aligned. Left, diff --git a/src/libcore/hash/mod.rs b/src/libcore/hash/mod.rs index 0781dd3b7742f..ac36cbaace7a8 100644 --- a/src/libcore/hash/mod.rs +++ b/src/libcore/hash/mod.rs @@ -38,7 +38,9 @@ //! ``` //! //! If you need more control over how a value is hashed, you need to implement -//! the trait `Hash`: +//! the [`Hash`] trait: +//! +//! [`Hash`]: trait.Hash.html //! //! ```rust //! use std::hash::{Hash, Hasher, SipHasher}; @@ -71,13 +73,18 @@ #![stable(feature = "rust1", since = "1.0.0")] -use prelude::v1::*; - +use fmt; +use marker; use mem; #[stable(feature = "rust1", since = "1.0.0")] +#[allow(deprecated)] pub use self::sip::SipHasher; +#[unstable(feature = "sip_hash_13", issue = "29754")] +#[allow(deprecated)] +pub use self::sip::{SipHasher13, SipHasher24}; + mod sip; /// A hashable type. @@ -85,7 +92,7 @@ mod sip; /// The `H` type parameter is an abstract hash state that is used by the `Hash` /// to compute the hash. /// -/// If you are also implementing `Eq`, there is an additional property that +/// If you are also implementing [`Eq`], there is an additional property that /// is important: /// /// ```text @@ -93,9 +100,40 @@ mod sip; /// ``` /// /// In other words, if two keys are equal, their hashes should also be equal. -/// `HashMap` and `HashSet` both rely on this behavior. +/// [`HashMap`] and [`HashSet`] both rely on this behavior. +/// +/// ## Derivable +/// +/// This trait can be used with `#[derive]` if all fields implement `Hash`. +/// When `derive`d, the resulting hash will be the combination of the values +/// from calling [`.hash()`] on each field. +/// +/// ## How can I implement `Hash`? +/// +/// If you need more control over how a value is hashed, you need to implement +/// the `Hash` trait: +/// +/// ``` +/// use std::hash::{Hash, Hasher}; +/// +/// struct Person { +/// id: u32, +/// name: String, +/// phone: u64, +/// } +/// +/// impl Hash for Person { +/// fn hash(&self, state: &mut H) { +/// self.id.hash(state); +/// self.phone.hash(state); +/// } +/// } +/// ``` /// -/// This trait can be used with `#[derive]`. +/// [`Eq`]: ../../std/cmp/trait.Eq.html +/// [`HashMap`]: ../../std/collections/struct.HashMap.html +/// [`HashSet`]: ../../std/collections/struct.HashSet.html +/// [`.hash()`]: #tymethod.hash #[stable(feature = "rust1", since = "1.0.0")] pub trait Hash { /// Feeds this value into the state given, updating the hasher as necessary. @@ -120,35 +158,35 @@ pub trait Hasher { #[stable(feature = "rust1", since = "1.0.0")] fn finish(&self) -> u64; - /// Writes some data into this `Hasher` + /// Writes some data into this `Hasher`. #[stable(feature = "rust1", since = "1.0.0")] fn write(&mut self, bytes: &[u8]); - /// Write a single `u8` into this hasher + /// Write a single `u8` into this hasher. #[inline] #[stable(feature = "hasher_write", since = "1.3.0")] fn write_u8(&mut self, i: u8) { self.write(&[i]) } - /// Write a single `u16` into this hasher. + /// Writes a single `u16` into this hasher. #[inline] #[stable(feature = "hasher_write", since = "1.3.0")] fn write_u16(&mut self, i: u16) { self.write(&unsafe { mem::transmute::<_, [u8; 2]>(i) }) } - /// Write a single `u32` into this hasher. + /// Writes a single `u32` into this hasher. #[inline] #[stable(feature = "hasher_write", since = "1.3.0")] fn write_u32(&mut self, i: u32) { self.write(&unsafe { mem::transmute::<_, [u8; 4]>(i) }) } - /// Write a single `u64` into this hasher. + /// Writes a single `u64` into this hasher. #[inline] #[stable(feature = "hasher_write", since = "1.3.0")] fn write_u64(&mut self, i: u64) { self.write(&unsafe { mem::transmute::<_, [u8; 8]>(i) }) } - /// Write a single `usize` into this hasher. + /// Writes a single `usize` into this hasher. #[inline] #[stable(feature = "hasher_write", since = "1.3.0")] fn write_usize(&mut self, i: usize) { @@ -158,31 +196,31 @@ pub trait Hasher { self.write(bytes); } - /// Write a single `i8` into this hasher. + /// Writes a single `i8` into this hasher. #[inline] #[stable(feature = "hasher_write", since = "1.3.0")] fn write_i8(&mut self, i: i8) { self.write_u8(i as u8) } - /// Write a single `i16` into this hasher. + /// Writes a single `i16` into this hasher. #[inline] #[stable(feature = "hasher_write", since = "1.3.0")] fn write_i16(&mut self, i: i16) { self.write_u16(i as u16) } - /// Write a single `i32` into this hasher. + /// Writes a single `i32` into this hasher. #[inline] #[stable(feature = "hasher_write", since = "1.3.0")] fn write_i32(&mut self, i: i32) { self.write_u32(i as u32) } - /// Write a single `i64` into this hasher. + /// Writes a single `i64` into this hasher. #[inline] #[stable(feature = "hasher_write", since = "1.3.0")] fn write_i64(&mut self, i: i64) { self.write_u64(i as u64) } - /// Write a single `isize` into this hasher. + /// Writes a single `isize` into this hasher. #[inline] #[stable(feature = "hasher_write", since = "1.3.0")] fn write_isize(&mut self, i: isize) { @@ -190,11 +228,73 @@ pub trait Hasher { } } +/// A `BuildHasher` is typically used as a factory for instances of `Hasher` +/// which a `HashMap` can then use to hash keys independently. +/// +/// Note that for each instance of `BuildHasher`, the created hashers should be +/// identical. That is, if the same stream of bytes is fed into each hasher, the +/// same output will also be generated. +#[stable(since = "1.7.0", feature = "build_hasher")] +pub trait BuildHasher { + /// Type of the hasher that will be created. + #[stable(since = "1.7.0", feature = "build_hasher")] + type Hasher: Hasher; + + /// Creates a new hasher. + /// + /// # Examples + /// + /// ``` + /// use std::collections::hash_map::RandomState; + /// use std::hash::BuildHasher; + /// + /// let s = RandomState::new(); + /// let new_s = s.build_hasher(); + /// ``` + #[stable(since = "1.7.0", feature = "build_hasher")] + fn build_hasher(&self) -> Self::Hasher; +} + +/// A structure which implements `BuildHasher` for all `Hasher` types which also +/// implement `Default`. +/// +/// This struct is 0-sized and does not need construction. +#[stable(since = "1.7.0", feature = "build_hasher")] +pub struct BuildHasherDefault(marker::PhantomData); + +#[stable(since = "1.9.0", feature = "core_impl_debug")] +impl fmt::Debug for BuildHasherDefault { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.pad("BuildHasherDefault") + } +} + +#[stable(since = "1.7.0", feature = "build_hasher")] +impl BuildHasher for BuildHasherDefault { + type Hasher = H; + + fn build_hasher(&self) -> H { + H::default() + } +} + +#[stable(since = "1.7.0", feature = "build_hasher")] +impl Clone for BuildHasherDefault { + fn clone(&self) -> BuildHasherDefault { + BuildHasherDefault(marker::PhantomData) + } +} + +#[stable(since = "1.7.0", feature = "build_hasher")] +impl Default for BuildHasherDefault { + fn default() -> BuildHasherDefault { + BuildHasherDefault(marker::PhantomData) + } +} + ////////////////////////////////////////////////////////////////////////////// mod impls { - use prelude::v1::*; - use mem; use slice; use super::*; diff --git a/src/libcore/hash/sip.rs b/src/libcore/hash/sip.rs index 722d77a8a11ef..5f5d07b668237 100644 --- a/src/libcore/hash/sip.rs +++ b/src/libcore/hash/sip.rs @@ -8,103 +8,143 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! An implementation of SipHash 2-4. +//! An implementation of SipHash. -use prelude::v1::*; +#![allow(deprecated)] +use marker::PhantomData; use ptr; -use super::Hasher; +use cmp; +use mem; + +/// An implementation of SipHash 1-3. +/// +/// This is currently the default hashing function used by standard library +/// (eg. `collections::HashMap` uses it by default). +/// +/// See: https://131002.net/siphash/ +#[unstable(feature = "sip_hash_13", issue = "34767")] +#[rustc_deprecated(since = "1.13.0", + reason = "use `std::collections::hash_map::DefaultHasher` instead")] +#[derive(Debug, Clone, Default)] +pub struct SipHasher13 { + hasher: Hasher, +} + +/// An implementation of SipHash 2-4. +/// +/// See: https://131002.net/siphash/ +#[unstable(feature = "sip_hash_13", issue = "34767")] +#[rustc_deprecated(since = "1.13.0", + reason = "use `std::collections::hash_map::DefaultHasher` instead")] +#[derive(Debug, Clone, Default)] +pub struct SipHasher24 { + hasher: Hasher, +} /// An implementation of SipHash 2-4. /// -/// See: http://131002.net/siphash/ +/// See: https://131002.net/siphash/ /// -/// Consider this as a main "general-purpose" hash for all hashtables: it -/// runs at good speed (competitive with spooky and city) and permits -/// strong _keyed_ hashing. Key your hashtables from a strong RNG, -/// such as `rand::Rng`. +/// SipHash is a general-purpose hashing function: it runs at a good +/// speed (competitive with Spooky and City) and permits strong _keyed_ +/// hashing. This lets you key your hashtables from a strong RNG, such as +/// [`rand::os::OsRng`](https://doc.rust-lang.org/rand/rand/os/struct.OsRng.html). /// -/// Although the SipHash algorithm is considered to be cryptographically -/// strong, this implementation has not been reviewed for such purposes. -/// As such, all cryptographic uses of this implementation are strongly -/// discouraged. +/// Although the SipHash algorithm is considered to be generally strong, +/// it is not intended for cryptographic purposes. As such, all +/// cryptographic uses of this implementation are _strongly discouraged_. #[stable(feature = "rust1", since = "1.0.0")] -pub struct SipHasher { +#[rustc_deprecated(since = "1.13.0", + reason = "use `std::collections::hash_map::DefaultHasher` instead")] +#[derive(Debug, Clone, Default)] +pub struct SipHasher(SipHasher24); + +#[derive(Debug)] +struct Hasher { k0: u64, k1: u64, length: usize, // how many bytes we've processed + state: State, // hash State + tail: u64, // unprocessed bytes le + ntail: usize, // how many bytes in tail are valid + _marker: PhantomData, +} + +#[derive(Debug, Clone, Copy)] +struct State { // v0, v2 and v1, v3 show up in pairs in the algorithm, // and simd implementations of SipHash will use vectors // of v02 and v13. By placing them in this order in the struct, // the compiler can pick up on just a few simd optimizations by itself. - v0: u64, // hash state + v0: u64, v2: u64, v1: u64, v3: u64, - tail: u64, // unprocessed bytes le - ntail: usize, // how many bytes in tail are valid } -// sadly, these macro definitions can't appear later, -// because they're needed in the following defs; -// this design could be improved. - -macro_rules! u8to64_le { - ($buf:expr, $i:expr) => - ($buf[0+$i] as u64 | - ($buf[1+$i] as u64) << 8 | - ($buf[2+$i] as u64) << 16 | - ($buf[3+$i] as u64) << 24 | - ($buf[4+$i] as u64) << 32 | - ($buf[5+$i] as u64) << 40 | - ($buf[6+$i] as u64) << 48 | - ($buf[7+$i] as u64) << 56); - ($buf:expr, $i:expr, $len:expr) => +macro_rules! compress { + ($state:expr) => ({ + compress!($state.v0, $state.v1, $state.v2, $state.v3) + }); + ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => ({ - let mut t = 0; - let mut out = 0; - while t < $len { - out |= ($buf[t+$i] as u64) << t*8; - t += 1; - } - out + $v0 = $v0.wrapping_add($v1); $v1 = $v1.rotate_left(13); $v1 ^= $v0; + $v0 = $v0.rotate_left(32); + $v2 = $v2.wrapping_add($v3); $v3 = $v3.rotate_left(16); $v3 ^= $v2; + $v0 = $v0.wrapping_add($v3); $v3 = $v3.rotate_left(21); $v3 ^= $v0; + $v2 = $v2.wrapping_add($v1); $v1 = $v1.rotate_left(17); $v1 ^= $v2; + $v2 = $v2.rotate_left(32); }); } -/// Load a full u64 word from a byte stream, in LE order. Use +/// Load an integer of the desired type from a byte stream, in LE order. Uses /// `copy_nonoverlapping` to let the compiler generate the most efficient way -/// to load u64 from a possibly unaligned address. +/// to load it from a possibly unaligned address. /// -/// Unsafe because: unchecked indexing at i..i+8 -#[inline] -unsafe fn load_u64_le(buf: &[u8], i: usize) -> u64 { - debug_assert!(i + 8 <= buf.len()); - let mut data = 0u64; - ptr::copy_nonoverlapping(buf.get_unchecked(i), &mut data as *mut _ as *mut u8, 8); - data.to_le() -} - -macro_rules! rotl { - ($x:expr, $b:expr) => - (($x << $b) | ($x >> (64_i32.wrapping_sub($b)))) +/// Unsafe because: unchecked indexing at i..i+size_of(int_ty) +macro_rules! load_int_le { + ($buf:expr, $i:expr, $int_ty:ident) => + ({ + debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len()); + let mut data = 0 as $int_ty; + ptr::copy_nonoverlapping($buf.get_unchecked($i), + &mut data as *mut _ as *mut u8, + mem::size_of::<$int_ty>()); + data.to_le() + }); } -macro_rules! compress { - ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => - ({ - $v0 = $v0.wrapping_add($v1); $v1 = rotl!($v1, 13); $v1 ^= $v0; - $v0 = rotl!($v0, 32); - $v2 = $v2.wrapping_add($v3); $v3 = rotl!($v3, 16); $v3 ^= $v2; - $v0 = $v0.wrapping_add($v3); $v3 = rotl!($v3, 21); $v3 ^= $v0; - $v2 = $v2.wrapping_add($v1); $v1 = rotl!($v1, 17); $v1 ^= $v2; - $v2 = rotl!($v2, 32); - }) +/// Load an u64 using up to 7 bytes of a byte slice. +/// +/// Unsafe because: unchecked indexing at start..start+len +#[inline] +unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 { + debug_assert!(len < 8); + let mut i = 0; // current byte index (from LSB) in the output u64 + let mut out = 0; + if i + 3 < len { + out = load_int_le!(buf, start + i, u32) as u64; + i += 4; + } + if i + 1 < len { + out |= (load_int_le!(buf, start + i, u16) as u64) << (i * 8); + i += 2 + } + if i < len { + out |= (*buf.get_unchecked(start + i) as u64) << (i * 8); + i += 1; + } + debug_assert_eq!(i, len); + out } impl SipHasher { /// Creates a new `SipHasher` with the two initial keys set to 0. #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_deprecated(since = "1.13.0", + reason = "use `std::collections::hash_map::DefaultHasher` instead")] pub fn new() -> SipHasher { SipHasher::new_with_keys(0, 0) } @@ -112,17 +152,73 @@ impl SipHasher { /// Creates a `SipHasher` that is keyed off the provided keys. #[inline] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_deprecated(since = "1.13.0", + reason = "use `std::collections::hash_map::DefaultHasher` instead")] pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher { - let mut state = SipHasher { + SipHasher(SipHasher24::new_with_keys(key0, key1)) + } +} + +impl SipHasher13 { + /// Creates a new `SipHasher13` with the two initial keys set to 0. + #[inline] + #[unstable(feature = "sip_hash_13", issue = "34767")] + #[rustc_deprecated(since = "1.13.0", + reason = "use `std::collections::hash_map::DefaultHasher` instead")] + pub fn new() -> SipHasher13 { + SipHasher13::new_with_keys(0, 0) + } + + /// Creates a `SipHasher13` that is keyed off the provided keys. + #[inline] + #[unstable(feature = "sip_hash_13", issue = "34767")] + #[rustc_deprecated(since = "1.13.0", + reason = "use `std::collections::hash_map::DefaultHasher` instead")] + pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher13 { + SipHasher13 { + hasher: Hasher::new_with_keys(key0, key1) + } + } +} + +impl SipHasher24 { + /// Creates a new `SipHasher24` with the two initial keys set to 0. + #[inline] + #[unstable(feature = "sip_hash_13", issue = "34767")] + #[rustc_deprecated(since = "1.13.0", + reason = "use `std::collections::hash_map::DefaultHasher` instead")] + pub fn new() -> SipHasher24 { + SipHasher24::new_with_keys(0, 0) + } + + /// Creates a `SipHasher24` that is keyed off the provided keys. + #[inline] + #[unstable(feature = "sip_hash_13", issue = "34767")] + #[rustc_deprecated(since = "1.13.0", + reason = "use `std::collections::hash_map::DefaultHasher` instead")] + pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher24 { + SipHasher24 { + hasher: Hasher::new_with_keys(key0, key1) + } + } +} + +impl Hasher { + #[inline] + fn new_with_keys(key0: u64, key1: u64) -> Hasher { + let mut state = Hasher { k0: key0, k1: key1, length: 0, - v0: 0, - v1: 0, - v2: 0, - v3: 0, + state: State { + v0: 0, + v1: 0, + v2: 0, + v3: 0, + }, tail: 0, ntail: 0, + _marker: PhantomData, }; state.reset(); state @@ -131,16 +227,100 @@ impl SipHasher { #[inline] fn reset(&mut self) { self.length = 0; - self.v0 = self.k0 ^ 0x736f6d6570736575; - self.v1 = self.k1 ^ 0x646f72616e646f6d; - self.v2 = self.k0 ^ 0x6c7967656e657261; - self.v3 = self.k1 ^ 0x7465646279746573; + self.state.v0 = self.k0 ^ 0x736f6d6570736575; + self.state.v1 = self.k1 ^ 0x646f72616e646f6d; + self.state.v2 = self.k0 ^ 0x6c7967656e657261; + self.state.v3 = self.k1 ^ 0x7465646279746573; self.ntail = 0; } + + // Specialized write function that is only valid for buffers with len <= 8. + // It's used to force inlining of write_u8 and write_usize, those would normally be inlined + // except for composite types (that includes slices and str hashing because of delimiter). + // Without this extra push the compiler is very reluctant to inline delimiter writes, + // degrading performance substantially for the most common use cases. + #[inline(always)] + fn short_write(&mut self, msg: &[u8]) { + debug_assert!(msg.len() <= 8); + let length = msg.len(); + self.length += length; + + let needed = 8 - self.ntail; + let fill = cmp::min(length, needed); + if fill == 8 { + self.tail = unsafe { load_int_le!(msg, 0, u64) }; + } else { + self.tail |= unsafe { u8to64_le(msg, 0, fill) } << (8 * self.ntail); + if length < needed { + self.ntail += length; + return; + } + } + self.state.v3 ^= self.tail; + S::c_rounds(&mut self.state); + self.state.v0 ^= self.tail; + + // Buffered tail is now flushed, process new input. + self.ntail = length - needed; + self.tail = unsafe { u8to64_le(msg, needed, self.ntail) }; + } } #[stable(feature = "rust1", since = "1.0.0")] -impl Hasher for SipHasher { +impl super::Hasher for SipHasher { + #[inline] + fn write(&mut self, msg: &[u8]) { + self.0.write(msg) + } + + #[inline] + fn finish(&self) -> u64 { + self.0.finish() + } +} + +#[unstable(feature = "sip_hash_13", issue = "34767")] +impl super::Hasher for SipHasher13 { + #[inline] + fn write(&mut self, msg: &[u8]) { + self.hasher.write(msg) + } + + #[inline] + fn finish(&self) -> u64 { + self.hasher.finish() + } +} + +#[unstable(feature = "sip_hash_13", issue = "34767")] +impl super::Hasher for SipHasher24 { + #[inline] + fn write(&mut self, msg: &[u8]) { + self.hasher.write(msg) + } + + #[inline] + fn finish(&self) -> u64 { + self.hasher.finish() + } +} + +impl super::Hasher for Hasher { + // see short_write comment for explanation + #[inline] + fn write_usize(&mut self, i: usize) { + let bytes = unsafe { + ::slice::from_raw_parts(&i as *const usize as *const u8, mem::size_of::()) + }; + self.short_write(bytes); + } + + // see short_write comment for explanation + #[inline] + fn write_u8(&mut self, i: u8) { + self.short_write(&[i]); + } + #[inline] fn write(&mut self, msg: &[u8]) { let length = msg.len(); @@ -150,20 +330,16 @@ impl Hasher for SipHasher { if self.ntail != 0 { needed = 8 - self.ntail; + self.tail |= unsafe { u8to64_le(msg, 0, cmp::min(length, needed)) } << 8 * self.ntail; if length < needed { - self.tail |= u8to64_le!(msg, 0, length) << 8 * self.ntail; self.ntail += length; return + } else { + self.state.v3 ^= self.tail; + S::c_rounds(&mut self.state); + self.state.v0 ^= self.tail; + self.ntail = 0; } - - let m = self.tail | u8to64_le!(msg, 0, needed) << 8 * self.ntail; - - self.v3 ^= m; - compress!(self.v0, self.v1, self.v2, self.v3); - compress!(self.v0, self.v1, self.v2, self.v3); - self.v0 ^= m; - - self.ntail = 0; } // Buffered tail is now flushed, process new input. @@ -172,65 +348,97 @@ impl Hasher for SipHasher { let mut i = needed; while i < len - left { - let mi = unsafe { load_u64_le(msg, i) }; + let mi = unsafe { load_int_le!(msg, i, u64) }; - self.v3 ^= mi; - compress!(self.v0, self.v1, self.v2, self.v3); - compress!(self.v0, self.v1, self.v2, self.v3); - self.v0 ^= mi; + self.state.v3 ^= mi; + S::c_rounds(&mut self.state); + self.state.v0 ^= mi; i += 8; } - self.tail = u8to64_le!(msg, i, left); + self.tail = unsafe { u8to64_le(msg, i, left) }; self.ntail = left; } #[inline] fn finish(&self) -> u64 { - let mut v0 = self.v0; - let mut v1 = self.v1; - let mut v2 = self.v2; - let mut v3 = self.v3; + let mut state = self.state; let b: u64 = ((self.length as u64 & 0xff) << 56) | self.tail; - v3 ^= b; - compress!(v0, v1, v2, v3); - compress!(v0, v1, v2, v3); - v0 ^= b; + state.v3 ^= b; + S::c_rounds(&mut state); + state.v0 ^= b; - v2 ^= 0xff; - compress!(v0, v1, v2, v3); - compress!(v0, v1, v2, v3); - compress!(v0, v1, v2, v3); - compress!(v0, v1, v2, v3); + state.v2 ^= 0xff; + S::d_rounds(&mut state); - v0 ^ v1 ^ v2 ^ v3 + state.v0 ^ state.v1 ^ state.v2 ^ state.v3 } } -#[stable(feature = "rust1", since = "1.0.0")] -impl Clone for SipHasher { +impl Clone for Hasher { #[inline] - fn clone(&self) -> SipHasher { - SipHasher { + fn clone(&self) -> Hasher { + Hasher { k0: self.k0, k1: self.k1, length: self.length, - v0: self.v0, - v1: self.v1, - v2: self.v2, - v3: self.v3, + state: self.state, tail: self.tail, ntail: self.ntail, + _marker: self._marker, } } } -#[stable(feature = "rust1", since = "1.0.0")] -impl Default for SipHasher { - fn default() -> SipHasher { - SipHasher::new() +impl Default for Hasher { + /// Creates a `Hasher` with the two initial keys set to 0. + #[inline] + fn default() -> Hasher { + Hasher::new_with_keys(0, 0) + } +} + +#[doc(hidden)] +trait Sip { + fn c_rounds(&mut State); + fn d_rounds(&mut State); +} + +#[derive(Debug, Clone, Default)] +struct Sip13Rounds; + +impl Sip for Sip13Rounds { + #[inline] + fn c_rounds(state: &mut State) { + compress!(state); + } + + #[inline] + fn d_rounds(state: &mut State) { + compress!(state); + compress!(state); + compress!(state); + } +} + +#[derive(Debug, Clone, Default)] +struct Sip24Rounds; + +impl Sip for Sip24Rounds { + #[inline] + fn c_rounds(state: &mut State) { + compress!(state); + compress!(state); + } + + #[inline] + fn d_rounds(state: &mut State) { + compress!(state); + compress!(state); + compress!(state); + compress!(state); } } diff --git a/src/libcore/internal_macros.rs b/src/libcore/internal_macros.rs new file mode 100644 index 0000000000000..f2cdc9d6a98c5 --- /dev/null +++ b/src/libcore/internal_macros.rs @@ -0,0 +1,62 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +// implements the unary operator "op &T" +// based on "op T" where T is expected to be `Copy`able +macro_rules! forward_ref_unop { + (impl $imp:ident, $method:ident for $t:ty) => { + #[stable(feature = "rust1", since = "1.0.0")] + impl<'a> $imp for &'a $t { + type Output = <$t as $imp>::Output; + + #[inline] + fn $method(self) -> <$t as $imp>::Output { + $imp::$method(*self) + } + } + } +} + +// implements binary operators "&T op U", "T op &U", "&T op &U" +// based on "T op U" where T and U are expected to be `Copy`able +macro_rules! forward_ref_binop { + (impl $imp:ident, $method:ident for $t:ty, $u:ty) => { + #[stable(feature = "rust1", since = "1.0.0")] + impl<'a> $imp<$u> for &'a $t { + type Output = <$t as $imp<$u>>::Output; + + #[inline] + fn $method(self, other: $u) -> <$t as $imp<$u>>::Output { + $imp::$method(*self, other) + } + } + + #[stable(feature = "rust1", since = "1.0.0")] + impl<'a> $imp<&'a $u> for $t { + type Output = <$t as $imp<$u>>::Output; + + #[inline] + fn $method(self, other: &'a $u) -> <$t as $imp<$u>>::Output { + $imp::$method(self, *other) + } + } + + #[stable(feature = "rust1", since = "1.0.0")] + impl<'a, 'b> $imp<&'a $u> for &'b $t { + type Output = <$t as $imp<$u>>::Output; + + #[inline] + fn $method(self, other: &'a $u) -> <$t as $imp<$u>>::Output { + $imp::$method(*self, *other) + } + } + } +} diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs index 568c4e143e04b..3726eee9a93c6 100644 --- a/src/libcore/intrinsics.rs +++ b/src/libcore/intrinsics.rs @@ -10,7 +10,7 @@ //! rustc compiler intrinsics. //! -//! The corresponding definitions are in librustc_trans/trans/intrinsic.rs. +//! The corresponding definitions are in librustc_trans/intrinsic.rs. //! //! # Volatiles //! @@ -46,69 +46,477 @@ issue = "0")] #![allow(missing_docs)] -use marker::Sized; - extern "rust-intrinsic" { // NB: These intrinsics take raw pointers because they mutate aliased // memory, which is not valid for either `&` or `&mut`. - pub fn atomic_cxchg(dst: *mut T, old: T, src: T) -> T; - pub fn atomic_cxchg_acq(dst: *mut T, old: T, src: T) -> T; - pub fn atomic_cxchg_rel(dst: *mut T, old: T, src: T) -> T; - pub fn atomic_cxchg_acqrel(dst: *mut T, old: T, src: T) -> T; - pub fn atomic_cxchg_relaxed(dst: *mut T, old: T, src: T) -> T; - + /// Stores a value if the current value is the same as the `old` value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `compare_exchange` method by passing + /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) + /// as both the `success` and `failure` parameters. For example, + /// [`AtomicBool::compare_exchange`] + /// (../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange). + pub fn atomic_cxchg(dst: *mut T, old: T, src: T) -> (T, bool); + /// Stores a value if the current value is the same as the `old` value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `compare_exchange` method by passing + /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) + /// as both the `success` and `failure` parameters. For example, + /// [`AtomicBool::compare_exchange`] + /// (../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange). + pub fn atomic_cxchg_acq(dst: *mut T, old: T, src: T) -> (T, bool); + /// Stores a value if the current value is the same as the `old` value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `compare_exchange` method by passing + /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) + /// as the `success` and + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as the `failure` parameters. For example, + /// [`AtomicBool::compare_exchange`] + /// (../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange). + pub fn atomic_cxchg_rel(dst: *mut T, old: T, src: T) -> (T, bool); + /// Stores a value if the current value is the same as the `old` value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `compare_exchange` method by passing + /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) + /// as the `success` and + /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) + /// as the `failure` parameters. For example, + /// [`AtomicBool::compare_exchange`] + /// (../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange). + pub fn atomic_cxchg_acqrel(dst: *mut T, old: T, src: T) -> (T, bool); + /// Stores a value if the current value is the same as the `old` value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `compare_exchange` method by passing + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as both the `success` and `failure` parameters. For example, + /// [`AtomicBool::compare_exchange`] + /// (../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange). + pub fn atomic_cxchg_relaxed(dst: *mut T, old: T, src: T) -> (T, bool); + /// Stores a value if the current value is the same as the `old` value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `compare_exchange` method by passing + /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) + /// as the `success` and + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as the `failure` parameters. For example, + /// [`AtomicBool::compare_exchange`] + /// (../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange). + pub fn atomic_cxchg_failrelaxed(dst: *mut T, old: T, src: T) -> (T, bool); + /// Stores a value if the current value is the same as the `old` value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `compare_exchange` method by passing + /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) + /// as the `success` and + /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) + /// as the `failure` parameters. For example, + /// [`AtomicBool::compare_exchange`] + /// (../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange). + pub fn atomic_cxchg_failacq(dst: *mut T, old: T, src: T) -> (T, bool); + /// Stores a value if the current value is the same as the `old` value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `compare_exchange` method by passing + /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) + /// as the `success` and + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as the `failure` parameters. For example, + /// [`AtomicBool::compare_exchange`] + /// (../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange). + pub fn atomic_cxchg_acq_failrelaxed(dst: *mut T, old: T, src: T) -> (T, bool); + /// Stores a value if the current value is the same as the `old` value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `compare_exchange` method by passing + /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) + /// as the `success` and + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as the `failure` parameters. For example, + /// [`AtomicBool::compare_exchange`] + /// (../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange). + pub fn atomic_cxchg_acqrel_failrelaxed(dst: *mut T, old: T, src: T) -> (T, bool); + + /// Stores a value if the current value is the same as the `old` value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `compare_exchange_weak` method by passing + /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) + /// as both the `success` and `failure` parameters. For example, + /// [`AtomicBool::compare_exchange_weak`] + /// (../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange_weak). + pub fn atomic_cxchgweak(dst: *mut T, old: T, src: T) -> (T, bool); + /// Stores a value if the current value is the same as the `old` value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `compare_exchange_weak` method by passing + /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) + /// as both the `success` and `failure` parameters. For example, + /// [`AtomicBool::compare_exchange_weak`] + /// (../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange_weak). + pub fn atomic_cxchgweak_acq(dst: *mut T, old: T, src: T) -> (T, bool); + /// Stores a value if the current value is the same as the `old` value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `compare_exchange_weak` method by passing + /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) + /// as the `success` and + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as the `failure` parameters. For example, + /// [`AtomicBool::compare_exchange_weak`] + /// (../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange_weak). + pub fn atomic_cxchgweak_rel(dst: *mut T, old: T, src: T) -> (T, bool); + /// Stores a value if the current value is the same as the `old` value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `compare_exchange_weak` method by passing + /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) + /// as the `success` and + /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) + /// as the `failure` parameters. For example, + /// [`AtomicBool::compare_exchange_weak`] + /// (../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange_weak). + pub fn atomic_cxchgweak_acqrel(dst: *mut T, old: T, src: T) -> (T, bool); + /// Stores a value if the current value is the same as the `old` value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `compare_exchange_weak` method by passing + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as both the `success` and `failure` parameters. For example, + /// [`AtomicBool::compare_exchange_weak`] + /// (../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange_weak). + pub fn atomic_cxchgweak_relaxed(dst: *mut T, old: T, src: T) -> (T, bool); + /// Stores a value if the current value is the same as the `old` value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `compare_exchange_weak` method by passing + /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) + /// as the `success` and + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as the `failure` parameters. For example, + /// [`AtomicBool::compare_exchange_weak`] + /// (../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange_weak). + pub fn atomic_cxchgweak_failrelaxed(dst: *mut T, old: T, src: T) -> (T, bool); + /// Stores a value if the current value is the same as the `old` value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `compare_exchange_weak` method by passing + /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) + /// as the `success` and + /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) + /// as the `failure` parameters. For example, + /// [`AtomicBool::compare_exchange_weak`] + /// (../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange_weak). + pub fn atomic_cxchgweak_failacq(dst: *mut T, old: T, src: T) -> (T, bool); + /// Stores a value if the current value is the same as the `old` value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `compare_exchange_weak` method by passing + /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) + /// as the `success` and + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as the `failure` parameters. For example, + /// [`AtomicBool::compare_exchange_weak`] + /// (../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange_weak). + pub fn atomic_cxchgweak_acq_failrelaxed(dst: *mut T, old: T, src: T) -> (T, bool); + /// Stores a value if the current value is the same as the `old` value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `compare_exchange_weak` method by passing + /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) + /// as the `success` and + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as the `failure` parameters. For example, + /// [`AtomicBool::compare_exchange_weak`] + /// (../../std/sync/atomic/struct.AtomicBool.html#method.compare_exchange_weak). + pub fn atomic_cxchgweak_acqrel_failrelaxed(dst: *mut T, old: T, src: T) -> (T, bool); + + /// Loads the current value of the pointer. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `load` method by passing + /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::load`](../../std/sync/atomic/struct.AtomicBool.html#method.load). pub fn atomic_load(src: *const T) -> T; + /// Loads the current value of the pointer. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `load` method by passing + /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::load`](../../std/sync/atomic/struct.AtomicBool.html#method.load). pub fn atomic_load_acq(src: *const T) -> T; + /// Loads the current value of the pointer. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `load` method by passing + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::load`](../../std/sync/atomic/struct.AtomicBool.html#method.load). pub fn atomic_load_relaxed(src: *const T) -> T; pub fn atomic_load_unordered(src: *const T) -> T; + /// Stores the value at the specified memory location. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `store` method by passing + /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::store`](../../std/sync/atomic/struct.AtomicBool.html#method.store). pub fn atomic_store(dst: *mut T, val: T); + /// Stores the value at the specified memory location. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `store` method by passing + /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::store`](../../std/sync/atomic/struct.AtomicBool.html#method.store). pub fn atomic_store_rel(dst: *mut T, val: T); + /// Stores the value at the specified memory location. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `store` method by passing + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::store`](../../std/sync/atomic/struct.AtomicBool.html#method.store). pub fn atomic_store_relaxed(dst: *mut T, val: T); pub fn atomic_store_unordered(dst: *mut T, val: T); + /// Stores the value at the specified memory location, returning the old value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `swap` method by passing + /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::swap`](../../std/sync/atomic/struct.AtomicBool.html#method.swap). pub fn atomic_xchg(dst: *mut T, src: T) -> T; + /// Stores the value at the specified memory location, returning the old value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `swap` method by passing + /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::swap`](../../std/sync/atomic/struct.AtomicBool.html#method.swap). pub fn atomic_xchg_acq(dst: *mut T, src: T) -> T; + /// Stores the value at the specified memory location, returning the old value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `swap` method by passing + /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::swap`](../../std/sync/atomic/struct.AtomicBool.html#method.swap). pub fn atomic_xchg_rel(dst: *mut T, src: T) -> T; + /// Stores the value at the specified memory location, returning the old value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `swap` method by passing + /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::swap`](../../std/sync/atomic/struct.AtomicBool.html#method.swap). pub fn atomic_xchg_acqrel(dst: *mut T, src: T) -> T; + /// Stores the value at the specified memory location, returning the old value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `swap` method by passing + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::swap`](../../std/sync/atomic/struct.AtomicBool.html#method.swap). pub fn atomic_xchg_relaxed(dst: *mut T, src: T) -> T; + /// Add to the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_add` method by passing + /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicIsize::fetch_add`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_add). pub fn atomic_xadd(dst: *mut T, src: T) -> T; + /// Add to the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_add` method by passing + /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicIsize::fetch_add`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_add). pub fn atomic_xadd_acq(dst: *mut T, src: T) -> T; + /// Add to the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_add` method by passing + /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicIsize::fetch_add`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_add). pub fn atomic_xadd_rel(dst: *mut T, src: T) -> T; + /// Add to the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_add` method by passing + /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicIsize::fetch_add`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_add). pub fn atomic_xadd_acqrel(dst: *mut T, src: T) -> T; + /// Add to the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_add` method by passing + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicIsize::fetch_add`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_add). pub fn atomic_xadd_relaxed(dst: *mut T, src: T) -> T; + /// Subtract from the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_sub` method by passing + /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicIsize::fetch_sub`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_sub). pub fn atomic_xsub(dst: *mut T, src: T) -> T; + /// Subtract from the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_sub` method by passing + /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicIsize::fetch_sub`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_sub). pub fn atomic_xsub_acq(dst: *mut T, src: T) -> T; + /// Subtract from the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_sub` method by passing + /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicIsize::fetch_sub`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_sub). pub fn atomic_xsub_rel(dst: *mut T, src: T) -> T; + /// Subtract from the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_sub` method by passing + /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicIsize::fetch_sub`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_sub). pub fn atomic_xsub_acqrel(dst: *mut T, src: T) -> T; + /// Subtract from the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_sub` method by passing + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicIsize::fetch_sub`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_sub). pub fn atomic_xsub_relaxed(dst: *mut T, src: T) -> T; + /// Bitwise and with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_and` method by passing + /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_and`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_and). pub fn atomic_and(dst: *mut T, src: T) -> T; + /// Bitwise and with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_and` method by passing + /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_and`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_and). pub fn atomic_and_acq(dst: *mut T, src: T) -> T; + /// Bitwise and with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_and` method by passing + /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_and`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_and). pub fn atomic_and_rel(dst: *mut T, src: T) -> T; + /// Bitwise and with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_and` method by passing + /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_and`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_and). pub fn atomic_and_acqrel(dst: *mut T, src: T) -> T; + /// Bitwise and with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_and` method by passing + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_and`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_and). pub fn atomic_and_relaxed(dst: *mut T, src: T) -> T; + /// Bitwise nand with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic::AtomicBool` type via the `fetch_nand` method by passing + /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_nand`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_nand). pub fn atomic_nand(dst: *mut T, src: T) -> T; + /// Bitwise nand with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic::AtomicBool` type via the `fetch_nand` method by passing + /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_nand`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_nand). pub fn atomic_nand_acq(dst: *mut T, src: T) -> T; + /// Bitwise nand with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic::AtomicBool` type via the `fetch_nand` method by passing + /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_nand`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_nand). pub fn atomic_nand_rel(dst: *mut T, src: T) -> T; + /// Bitwise nand with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic::AtomicBool` type via the `fetch_nand` method by passing + /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_nand`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_nand). pub fn atomic_nand_acqrel(dst: *mut T, src: T) -> T; + /// Bitwise nand with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic::AtomicBool` type via the `fetch_nand` method by passing + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_nand`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_nand). pub fn atomic_nand_relaxed(dst: *mut T, src: T) -> T; + /// Bitwise or with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_or` method by passing + /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_or`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_or). pub fn atomic_or(dst: *mut T, src: T) -> T; + /// Bitwise or with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_or` method by passing + /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_or`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_or). pub fn atomic_or_acq(dst: *mut T, src: T) -> T; + /// Bitwise or with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_or` method by passing + /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_or`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_or). pub fn atomic_or_rel(dst: *mut T, src: T) -> T; + /// Bitwise or with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_or` method by passing + /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_or`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_or). pub fn atomic_or_acqrel(dst: *mut T, src: T) -> T; + /// Bitwise or with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_or` method by passing + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_or`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_or). pub fn atomic_or_relaxed(dst: *mut T, src: T) -> T; + /// Bitwise xor with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_xor` method by passing + /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_xor`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_xor). pub fn atomic_xor(dst: *mut T, src: T) -> T; + /// Bitwise xor with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_xor` method by passing + /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_xor`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_xor). pub fn atomic_xor_acq(dst: *mut T, src: T) -> T; + /// Bitwise xor with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_xor` method by passing + /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_xor`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_xor). pub fn atomic_xor_rel(dst: *mut T, src: T) -> T; + /// Bitwise xor with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_xor` method by passing + /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_xor`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_xor). pub fn atomic_xor_acqrel(dst: *mut T, src: T) -> T; + /// Bitwise xor with the current value, returning the previous value. + /// The stabilized version of this intrinsic is available on the + /// `std::sync::atomic` types via the `fetch_xor` method by passing + /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) + /// as the `order`. For example, + /// [`AtomicBool::fetch_xor`](../../std/sync/atomic/struct.AtomicBool.html#method.fetch_xor). pub fn atomic_xor_relaxed(dst: *mut T, src: T) -> T; pub fn atomic_max(dst: *mut T, src: T) -> T; @@ -154,6 +562,15 @@ extern "rust-intrinsic" { pub fn atomic_singlethreadfence_rel(); pub fn atomic_singlethreadfence_acqrel(); + /// Magic intrinsic that derives its meaning from attributes + /// attached to the function. + /// + /// For example, dataflow uses this to inject static assertions so + /// that `rustc_peek(potentially_uninitialized)` would actually + /// double-check that dataflow did indeed compute that it is + /// uninitialized at that point in the control flow. + pub fn rustc_peek(_: T) -> T; + /// Aborts the execution of the process. pub fn abort() -> !; @@ -173,16 +590,25 @@ extern "rust-intrinsic" { /// own, or if it does not enable any significant optimizations. pub fn assume(b: bool); + /// Hints to the compiler that branch condition is likely to be true. + /// Returns the value passed to it. + /// + /// Any use other than with `if` statements will probably not have an effect. + pub fn likely(b: bool) -> bool; + + /// Hints to the compiler that branch condition is likely to be false. + /// Returns the value passed to it. + /// + /// Any use other than with `if` statements will probably not have an effect. + pub fn unlikely(b: bool) -> bool; + /// Executes a breakpoint trap, for inspection by a debugger. pub fn breakpoint(); /// The size of a type in bytes. /// - /// This is the exact number of bytes in memory taken up by a - /// value of the given type. In other words, a memset of this size - /// would *exactly* overwrite a value. When laid out in vectors - /// and structures there may be additional padding between - /// elements. + /// More specifically, this is the offset in bytes between successive + /// items of the same type, including alignment padding. pub fn size_of() -> usize; /// Moves a value to an uninitialized memory location. @@ -213,7 +639,7 @@ extern "rust-intrinsic" { /// /// This has all the same safety problems as `ptr::read` with respect to /// invalid pointers, types, and double drops. - #[unstable(feature = "drop_in_place", reason = "just exposed, needs FCP", issue = "27908")] + #[stable(feature = "drop_in_place", since = "1.8.0")] pub fn drop_in_place(to_drop: *mut T); /// Gets a static string slice containing the name of a type. @@ -224,19 +650,6 @@ extern "rust-intrinsic" { /// crate it is invoked in. pub fn type_id() -> u64; - /// Creates a value initialized to so that its drop flag, - /// if any, says that it has been dropped. - /// - /// `init_dropped` is unsafe because it returns a datum with all - /// of its bytes set to the drop flag, which generally does not - /// correspond to a valid value. - /// - /// This intrinsic is likely to be deprecated in the future when - /// Rust moves to non-zeroing dynamic drop (and thus removes the - /// embedded drop flags that are being established by this - /// intrinsic). - pub fn init_dropped() -> T; - /// Creates a value initialized to zero. /// /// `init` is unsafe because it returns a zeroed-out datum, @@ -257,27 +670,217 @@ extern "rust-intrinsic" { /// Moves a value out of scope without running drop glue. pub fn forget(_: T) -> (); - /// Unsafely transforms a value of one type into a value of another type. + /// Reinterprets the bits of a value of one type as another type. + /// + /// Both types must have the same size. Neither the original, nor the result, + /// may be an [invalid value](../../nomicon/meet-safe-and-unsafe.html). /// - /// Both types must have the same size. + /// `transmute` is semantically equivalent to a bitwise move of one type + /// into another. It copies the bits from the source value into the + /// destination value, then forgets the original. It's equivalent to C's + /// `memcpy` under the hood, just like `transmute_copy`. + /// + /// `transmute` is **incredibly** unsafe. There are a vast number of ways to + /// cause [undefined behavior][ub] with this function. `transmute` should be + /// the absolute last resort. + /// + /// The [nomicon](../../nomicon/transmutes.html) has additional + /// documentation. + /// + /// [ub]: ../../reference.html#behavior-considered-undefined /// /// # Examples /// + /// There are a few things that `transmute` is really useful for. + /// + /// Getting the bitpattern of a floating point type (or, more generally, + /// type punning, when `T` and `U` aren't pointers): + /// /// ``` - /// use std::mem; + /// let bitpattern = unsafe { + /// std::mem::transmute::(1.0) + /// }; + /// assert_eq!(bitpattern, 0x3F800000); + /// ``` + /// + /// Turning a pointer into a function pointer. This is *not* portable to + /// machines where function pointers and data pointers have different sizes. + /// + /// ``` + /// fn foo() -> i32 { + /// 0 + /// } + /// let pointer = foo as *const (); + /// let function = unsafe { + /// std::mem::transmute::<*const (), fn() -> i32>(pointer) + /// }; + /// assert_eq!(function(), 0); + /// ``` + /// + /// Extending a lifetime, or shortening an invariant lifetime. This is + /// advanced, very unsafe Rust! + /// + /// ``` + /// struct R<'a>(&'a i32); + /// unsafe fn extend_lifetime<'b>(r: R<'b>) -> R<'static> { + /// std::mem::transmute::, R<'static>>(r) + /// } + /// + /// unsafe fn shorten_invariant_lifetime<'b, 'c>(r: &'b mut R<'static>) + /// -> &'b mut R<'c> { + /// std::mem::transmute::<&'b mut R<'static>, &'b mut R<'c>>(r) + /// } + /// ``` + /// + /// # Alternatives + /// + /// Don't despair: many uses of `transmute` can be achieved through other means. + /// Below are common applications of `transmute` which can be replaced with safer + /// constructs. + /// + /// Turning a pointer into a `usize`: + /// + /// ``` + /// let ptr = &0; + /// let ptr_num_transmute = unsafe { + /// std::mem::transmute::<&i32, usize>(ptr) + /// }; + /// + /// // Use an `as` cast instead + /// let ptr_num_cast = ptr as *const i32 as usize; + /// ``` + /// + /// Turning a `*mut T` into an `&mut T`: + /// + /// ``` + /// let ptr: *mut i32 = &mut 0; + /// let ref_transmuted = unsafe { + /// std::mem::transmute::<*mut i32, &mut i32>(ptr) + /// }; + /// + /// // Use a reborrow instead + /// let ref_casted = unsafe { &mut *ptr }; + /// ``` + /// + /// Turning an `&mut T` into an `&mut U`: + /// + /// ``` + /// let ptr = &mut 0; + /// let val_transmuted = unsafe { + /// std::mem::transmute::<&mut i32, &mut u32>(ptr) + /// }; /// - /// let array: &[u8] = unsafe { mem::transmute("Rust") }; - /// assert_eq!(array, [82, 117, 115, 116]); + /// // Now, put together `as` and reborrowing - note the chaining of `as` + /// // `as` is not transitive + /// let val_casts = unsafe { &mut *(ptr as *mut i32 as *mut u32) }; + /// ``` + /// + /// Turning an `&str` into an `&[u8]`: + /// + /// ``` + /// // this is not a good way to do this. + /// let slice = unsafe { std::mem::transmute::<&str, &[u8]>("Rust") }; + /// assert_eq!(slice, &[82, 117, 115, 116]); + /// + /// // You could use `str::as_bytes` + /// let slice = "Rust".as_bytes(); + /// assert_eq!(slice, &[82, 117, 115, 116]); + /// + /// // Or, just use a byte string, if you have control over the string + /// // literal + /// assert_eq!(b"Rust", &[82, 117, 115, 116]); + /// ``` + /// + /// Turning a `Vec<&T>` into a `Vec>`: + /// + /// ``` + /// let store = [0, 1, 2, 3]; + /// let mut v_orig = store.iter().collect::>(); + /// + /// // Using transmute: this is Undefined Behavior, and a bad idea. + /// // However, it is no-copy. + /// let v_transmuted = unsafe { + /// std::mem::transmute::, Vec>>( + /// v_orig.clone()) + /// }; + /// + /// // This is the suggested, safe way. + /// // It does copy the entire vector, though, into a new array. + /// let v_collected = v_orig.clone() + /// .into_iter() + /// .map(|r| Some(r)) + /// .collect::>>(); + /// + /// // The no-copy, unsafe way, still using transmute, but not UB. + /// // This is equivalent to the original, but safer, and reuses the + /// // same Vec internals. Therefore the new inner type must have the + /// // exact same size, and the same or lesser alignment, as the old + /// // type. The same caveats exist for this method as transmute, for + /// // the original inner type (`&i32`) to the converted inner type + /// // (`Option<&i32>`), so read the nomicon pages linked above. + /// let v_from_raw = unsafe { + /// Vec::from_raw_parts(v_orig.as_mut_ptr(), + /// v_orig.len(), + /// v_orig.capacity()) + /// }; + /// std::mem::forget(v_orig); + /// ``` + /// + /// Implementing `split_at_mut`: + /// + /// ``` + /// use std::{slice, mem}; + /// + /// // There are multiple ways to do this; and there are multiple problems + /// // with the following, transmute, way. + /// fn split_at_mut_transmute(slice: &mut [T], mid: usize) + /// -> (&mut [T], &mut [T]) { + /// let len = slice.len(); + /// assert!(mid <= len); + /// unsafe { + /// let slice2 = mem::transmute::<&mut [T], &mut [T]>(slice); + /// // first: transmute is not typesafe; all it checks is that T and + /// // U are of the same size. Second, right here, you have two + /// // mutable references pointing to the same memory. + /// (&mut slice[0..mid], &mut slice2[mid..len]) + /// } + /// } + /// + /// // This gets rid of the typesafety problems; `&mut *` will *only* give + /// // you an `&mut T` from an `&mut T` or `*mut T`. + /// fn split_at_mut_casts(slice: &mut [T], mid: usize) + /// -> (&mut [T], &mut [T]) { + /// let len = slice.len(); + /// assert!(mid <= len); + /// unsafe { + /// let slice2 = &mut *(slice as *mut [T]); + /// // however, you still have two mutable references pointing to + /// // the same memory. + /// (&mut slice[0..mid], &mut slice2[mid..len]) + /// } + /// } + /// + /// // This is how the standard library does it. This is the best method, if + /// // you need to do something like this + /// fn split_at_stdlib(slice: &mut [T], mid: usize) + /// -> (&mut [T], &mut [T]) { + /// let len = slice.len(); + /// assert!(mid <= len); + /// unsafe { + /// let ptr = slice.as_mut_ptr(); + /// // This now has three mutable references pointing at the same + /// // memory. `slice`, the rvalue ret.0, and the rvalue ret.1. + /// // `slice` is never used after `let ptr = ...`, and so one can + /// // treat it as "dead", and therefore, you only have two real + /// // mutable slices. + /// (slice::from_raw_parts_mut(ptr, mid), + /// slice::from_raw_parts_mut(ptr.offset(mid as isize), len - mid)) + /// } + /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn transmute(e: T) -> U; - /// Gives the address for the return value of the enclosing function. - /// - /// Using this intrinsic in a function that does not use an out pointer - /// will trigger a compiler error. - pub fn return_address() -> *const u8; - /// Returns `true` if the actual type given as `T` requires drop /// glue; returns `false` if the actual type provided for `T` /// implements `Copy`. @@ -387,6 +990,19 @@ extern "rust-intrinsic" { /// Invokes memset on the specified pointer, setting `count * size_of::()` /// bytes of memory starting at `dst` to `val`. + /// + /// # Examples + /// + /// ``` + /// use std::ptr; + /// + /// let mut vec = vec![0; 4]; + /// unsafe { + /// let vec_ptr = vec.as_mut_ptr(); + /// ptr::write_bytes(vec_ptr, b'a', 2); + /// } + /// assert_eq!(vec, [b'a', b'a', 0, 0]); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn write_bytes(dst: *mut T, val: u8, count: usize); @@ -411,8 +1027,12 @@ extern "rust-intrinsic" { pub fn volatile_set_memory(dst: *mut T, val: u8, count: usize); /// Perform a volatile load from the `src` pointer. + /// The stabilized version of this intrinsic is + /// [`std::ptr::read_volatile`](../../std/ptr/fn.read_volatile.html). pub fn volatile_load(src: *const T) -> T; /// Perform a volatile store to the `dst` pointer. + /// The stabilized version of this intrinsic is + /// [`std::ptr::write_volatile`](../../std/ptr/fn.write_volatile.html). pub fn volatile_store(dst: *mut T, val: T); /// Returns the square root of an `f32` @@ -512,6 +1132,27 @@ extern "rust-intrinsic" { /// Returns the nearest integer to an `f64`. Rounds half-way cases away from zero. pub fn roundf64(x: f64) -> f64; + /// Float addition that allows optimizations based on algebraic rules. + /// May assume inputs are finite. + pub fn fadd_fast(a: T, b: T) -> T; + + /// Float subtraction that allows optimizations based on algebraic rules. + /// May assume inputs are finite. + pub fn fsub_fast(a: T, b: T) -> T; + + /// Float multiplication that allows optimizations based on algebraic rules. + /// May assume inputs are finite. + pub fn fmul_fast(a: T, b: T) -> T; + + /// Float division that allows optimizations based on algebraic rules. + /// May assume inputs are finite. + pub fn fdiv_fast(a: T, b: T) -> T; + + /// Float remainder that allows optimizations based on algebraic rules. + /// May assume inputs are finite. + pub fn frem_fast(a: T, b: T) -> T; + + /// Returns the number of bits set in an integer type `T` pub fn ctpop(x: T) -> T; @@ -525,12 +1166,21 @@ extern "rust-intrinsic" { pub fn bswap(x: T) -> T; /// Performs checked integer addition. + /// The stabilized versions of this intrinsic are available on the integer + /// primitives via the `overflowing_add` method. For example, + /// [`std::u32::overflowing_add`](../../std/primitive.u32.html#method.overflowing_add) pub fn add_with_overflow(x: T, y: T) -> (T, bool); /// Performs checked integer subtraction + /// The stabilized versions of this intrinsic are available on the integer + /// primitives via the `overflowing_sub` method. For example, + /// [`std::u32::overflowing_sub`](../../std/primitive.u32.html#method.overflowing_sub) pub fn sub_with_overflow(x: T, y: T) -> (T, bool); /// Performs checked integer multiplication + /// The stabilized versions of this intrinsic are available on the integer + /// primitives via the `overflowing_mul` method. For example, + /// [`std::u32::overflowing_mul`](../../std/primitive.u32.html#method.overflowing_mul) pub fn mul_with_overflow(x: T, y: T) -> (T, bool); /// Performs an unchecked division, resulting in undefined behavior @@ -541,10 +1191,19 @@ extern "rust-intrinsic" { pub fn unchecked_rem(x: T, y: T) -> T; /// Returns (a + b) mod 2^N, where N is the width of T in bits. + /// The stabilized versions of this intrinsic are available on the integer + /// primitives via the `wrapping_add` method. For example, + /// [`std::u32::wrapping_add`](../../std/primitive.u32.html#method.wrapping_add) pub fn overflowing_add(a: T, b: T) -> T; /// Returns (a - b) mod 2^N, where N is the width of T in bits. + /// The stabilized versions of this intrinsic are available on the integer + /// primitives via the `wrapping_sub` method. For example, + /// [`std::u32::wrapping_sub`](../../std/primitive.u32.html#method.wrapping_sub) pub fn overflowing_sub(a: T, b: T) -> T; /// Returns (a * b) mod 2^N, where N is the width of T in bits. + /// The stabilized versions of this intrinsic are available on the integer + /// primitives via the `wrapping_mul` method. For example, + /// [`std::u32::wrapping_mul`](../../std/primitive.u32.html#method.wrapping_mul) pub fn overflowing_mul(a: T, b: T) -> T; /// Returns the value of the discriminant for the variant in 'v', @@ -552,7 +1211,12 @@ extern "rust-intrinsic" { pub fn discriminant_value(v: &T) -> u64; /// Rust's "try catch" construct which invokes the function pointer `f` with - /// the data pointer `data`, returning the exception payload if an exception - /// is thrown (aka the thread panics). - pub fn try(f: fn(*mut u8), data: *mut u8) -> *mut u8; + /// the data pointer `data`. + /// + /// The third pointer is a target-specific data pointer which is filled in + /// with the specifics of the exception that occurred. For examples on Unix + /// platforms this is a `*mut *mut T` which is filled in by the compiler and + /// on MSVC it's `*mut [usize; 2]`. For more information see the compiler's + /// source as well as std's catch implementation. + pub fn try(f: fn(*mut u8), data: *mut u8, local_ptr: *mut u8) -> i32; } diff --git a/src/libcore/iter.rs b/src/libcore/iter.rs deleted file mode 100644 index e3e783329ec81..0000000000000 --- a/src/libcore/iter.rs +++ /dev/null @@ -1,4757 +0,0 @@ -// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Composable external iteration -//! -//! If you've found yourself with a collection of some kind, and needed to -//! perform an operation on the elements of said collection, you'll quickly run -//! into 'iterators'. Iterators are heavily used in idiomatic Rust code, so -//! it's worth becoming familiar with them. -//! -//! Before explaining more, let's talk about how this module is structured: -//! -//! # Organization -//! -//! This module is largely organized by type: -//! -//! * [Traits] are the core portion: these traits define what kind of iterators -//! exist and what you can do with them. The methods of these traits are worth -//! putting some extra study time into. -//! * [Functions] provide some helpful ways to create some basic iterators. -//! * [Structs] are often the return types of the various methods on this -//! module's traits. You'll usually want to look at the method that creates -//! the `struct`, rather than the `struct` itself. For more detail about why, -//! see '[Implementing Iterator](#implementing-iterator)'. -//! -//! [Traits]: #traits -//! [Functions]: #functions -//! [Structs]: #structs -//! -//! That's it! Let's dig into iterators. -//! -//! # Iterator -//! -//! The heart and soul of this module is the [`Iterator`] trait. The core of -//! [`Iterator`] looks like this: -//! -//! ``` -//! trait Iterator { -//! type Item; -//! fn next(&mut self) -> Option; -//! } -//! ``` -//! -//! An iterator has a method, [`next()`], which when called, returns an -//! [`Option`]``. [`next()`] will return `Some(Item)` as long as there -//! are elements, and once they've all been exhausted, will return `None` to -//! indicate that iteration is finished. Individual iterators may choose to -//! resume iteration, and so calling [`next()`] again may or may not eventually -//! start returning `Some(Item)` again at some point. -//! -//! [`Iterator`]'s full definition includes a number of other methods as well, -//! but they are default methods, built on top of [`next()`], and so you get -//! them for free. -//! -//! Iterators are also composable, and it's common to chain them together to do -//! more complex forms of processing. See the [Adapters](#adapters) section -//! below for more details. -//! -//! [`Iterator`]: trait.Iterator.html -//! [`next()`]: trait.Iterator.html#tymethod.next -//! [`Option`]: ../option/enum.Option.html -//! -//! # The three forms of iteration -//! -//! There are three common methods which can create iterators from a collection: -//! -//! * `iter()`, which iterates over `&T`. -//! * `iter_mut()`, which iterates over `&mut T`. -//! * `into_iter()`, which iterates over `T`. -//! -//! Various things in the standard library may implement one or more of the -//! three, where appropriate. -//! -//! # Implementing Iterator -//! -//! Creating an iterator of your own involves two steps: creating a `struct` to -//! hold the iterator's state, and then `impl`ementing [`Iterator`] for that -//! `struct`. This is why there are so many `struct`s in this module: there is -//! one for each iterator and iterator adapter. -//! -//! Let's make an iterator named `Counter` which counts from `1` to `5`: -//! -//! ``` -//! // First, the struct: -//! -//! /// An iterator which counts from one to five -//! struct Counter { -//! count: usize, -//! } -//! -//! // we want our count to start at one, so let's add a new() method to help. -//! // This isn't strictly necessary, but is convenient. Note that we start -//! // `count` at zero, we'll see why in `next()`'s implementation below. -//! impl Counter { -//! fn new() -> Counter { -//! Counter { count: 0 } -//! } -//! } -//! -//! // Then, we implement `Iterator` for our `Counter`: -//! -//! impl Iterator for Counter { -//! // we will be counting with usize -//! type Item = usize; -//! -//! // next() is the only required method -//! fn next(&mut self) -> Option { -//! // increment our count. This is why we started at zero. -//! self.count += 1; -//! -//! // check to see if we've finished counting or not. -//! if self.count < 6 { -//! Some(self.count) -//! } else { -//! None -//! } -//! } -//! } -//! -//! // And now we can use it! -//! -//! let mut counter = Counter::new(); -//! -//! let x = counter.next().unwrap(); -//! println!("{}", x); -//! -//! let x = counter.next().unwrap(); -//! println!("{}", x); -//! -//! let x = counter.next().unwrap(); -//! println!("{}", x); -//! -//! let x = counter.next().unwrap(); -//! println!("{}", x); -//! -//! let x = counter.next().unwrap(); -//! println!("{}", x); -//! ``` -//! -//! This will print `1` through `5`, each on their own line. -//! -//! Calling `next()` this way gets repetitive. Rust has a construct which can -//! call `next()` on your iterator, until it reaches `None`. Let's go over that -//! next. -//! -//! # for Loops and IntoIterator -//! -//! Rust's `for` loop syntax is actually sugar for iterators. Here's a basic -//! example of `for`: -//! -//! ``` -//! let values = vec![1, 2, 3, 4, 5]; -//! -//! for x in values { -//! println!("{}", x); -//! } -//! ``` -//! -//! This will print the numbers one through five, each on their own line. But -//! you'll notice something here: we never called anything on our vector to -//! produce an iterator. What gives? -//! -//! There's a trait in the standard library for converting something into an -//! iterator: [`IntoIterator`]. This trait has one method, [`into_iter()`], -//! which converts the thing implementing [`IntoIterator`] into an iterator. -//! Let's take a look at that `for` loop again, and what the compiler converts -//! it into: -//! -//! [`IntoIterator`]: trait.IntoIterator.html -//! [`into_iter()`]: trait.IntoIterator.html#tymethod.into_iter -//! -//! ``` -//! let values = vec![1, 2, 3, 4, 5]; -//! -//! for x in values { -//! println!("{}", x); -//! } -//! ``` -//! -//! Rust de-sugars this into: -//! -//! ``` -//! let values = vec![1, 2, 3, 4, 5]; -//! { -//! let result = match values.into_iter() { -//! mut iter => loop { -//! match iter.next() { -//! Some(x) => { println!("{}", x); }, -//! None => break, -//! } -//! }, -//! }; -//! result -//! } -//! ``` -//! -//! First, we call `into_iter()` on the value. Then, we match on the iterator -//! that returns, calling [`next()`] over and over until we see a `None`. At -//! that point, we `break` out of the loop, and we're done iterating. -//! -//! There's one more subtle bit here: the standard library contains an -//! interesting implementation of [`IntoIterator`]: -//! -//! ```ignore -//! impl IntoIterator for I -//! ``` -//! -//! In other words, all [`Iterator`]s implement [`IntoIterator`], by just -//! returning themselves. This means two things: -//! -//! 1. If you're writing an [`Iterator`], you can use it with a `for` loop. -//! 2. If you're creating a collection, implementing [`IntoIterator`] for it -//! will allow your collection to be used with the `for` loop. -//! -//! # Adapters -//! -//! Functions which take an [`Iterator`] and return another [`Iterator`] are -//! often called 'iterator adapters', as they're a form of the 'adapter -//! pattern'. -//! -//! Common iterator adapters include [`map()`], [`take()`], and [`collect()`]. -//! For more, see their documentation. -//! -//! [`map()`]: trait.Iterator.html#method.map -//! [`take()`]: trait.Iterator.html#method.take -//! [`collect()`]: trait.Iterator.html#method.collect -//! -//! # Laziness -//! -//! Iterators (and iterator [adapters](#adapters)) are *lazy*. This means that -//! just creating an iterator doesn't _do_ a whole lot. Nothing really happens -//! until you call [`next()`]. This is sometimes a source of confusion when -//! creating an iterator solely for its side effects. For example, the [`map()`] -//! method calls a closure on each element it iterates over: -//! -//! ``` -//! # #![allow(unused_must_use)] -//! let v = vec![1, 2, 3, 4, 5]; -//! v.iter().map(|x| println!("{}", x)); -//! ``` -//! -//! This will not print any values, as we only created an iterator, rather than -//! using it. The compiler will warn us about this kind of behavior: -//! -//! ```text -//! warning: unused result which must be used: iterator adaptors are lazy and -//! do nothing unless consumed -//! ``` -//! -//! The idiomatic way to write a [`map()`] for its side effects is to use a -//! `for` loop instead: -//! -//! ``` -//! let v = vec![1, 2, 3, 4, 5]; -//! -//! for x in &v { -//! println!("{}", x); -//! } -//! ``` -//! -//! [`map()`]: trait.Iterator.html#method.map -//! -//! The two most common ways to evaluate an iterator are to use a `for` loop -//! like this, or using the [`collect()`] adapter to produce a new collection. -//! -//! [`collect()`]: trait.Iterator.html#method.collect -//! -//! # Infinity -//! -//! Iterators do not have to be finite. As an example, an open-ended range is -//! an infinite iterator: -//! -//! ``` -//! let numbers = 0..; -//! ``` -//! -//! It is common to use the [`take()`] iterator adapter to turn an infinite -//! iterator into a finite one: -//! -//! ``` -//! let numbers = 0..; -//! let five_numbers = numbers.take(5); -//! -//! for number in five_numbers { -//! println!("{}", number); -//! } -//! ``` -//! -//! This will print the numbers `0` through `4`, each on their own line. -//! -//! [`take()`]: trait.Iterator.html#method.take - -#![stable(feature = "rust1", since = "1.0.0")] - -use clone::Clone; -use cmp; -use cmp::{Ord, PartialOrd, PartialEq, Ordering}; -use default::Default; -use marker; -use mem; -use num::{Zero, One}; -use ops::{self, Add, Sub, FnMut, Mul, RangeFrom}; -use option::Option::{self, Some, None}; -use marker::Sized; -use usize; - -fn _assert_is_object_safe(_: &Iterator) {} - -/// An interface for dealing with iterators. -/// -/// This is the main iterator trait. For more about the concept of iterators -/// generally, please see the [module-level documentation]. In particular, you -/// may want to know how to [implement `Iterator`][impl]. -/// -/// [module-level documentation]: index.html -/// [impl]: index.html#implementing-iterator -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "`{Self}` is not an iterator; maybe try calling \ - `.iter()` or a similar method"] -pub trait Iterator { - /// The type of the elements being iterated over. - #[stable(feature = "rust1", since = "1.0.0")] - type Item; - - /// Advances the iterator and returns the next value. - /// - /// Returns `None` when iteration is finished. Individual iterator - /// implementations may choose to resume iteration, and so calling `next()` - /// again may or may not eventually start returning `Some(Item)` again at some - /// point. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let mut iter = a.iter(); - /// - /// // A call to next() returns the next value... - /// assert_eq!(Some(&1), iter.next()); - /// assert_eq!(Some(&2), iter.next()); - /// assert_eq!(Some(&3), iter.next()); - /// - /// // ... and then None once it's over. - /// assert_eq!(None, iter.next()); - /// - /// // More calls may or may not return None. Here, they always will. - /// assert_eq!(None, iter.next()); - /// assert_eq!(None, iter.next()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - fn next(&mut self) -> Option; - - /// Returns the bounds on the remaining length of the iterator. - /// - /// Specifically, `size_hint()` returns a tuple where the first element - /// is the lower bound, and the second element is the upper bound. - /// - /// The second half of the tuple that is returned is an `Option`. A - /// `None` here means that either there is no known upper bound, or the - /// upper bound is larger than `usize`. - /// - /// # Implementation notes - /// - /// It is not enforced that an iterator implementation yields the declared - /// number of elements. A buggy iterator may yield less than the lower bound - /// or more than the upper bound of elements. - /// - /// `size_hint()` is primarily intended to be used for optimizations such as - /// reserving space for the elements of the iterator, but must not be - /// trusted to e.g. omit bounds checks in unsafe code. An incorrect - /// implementation of `size_hint()` should not lead to memory safety - /// violations. - /// - /// That said, the implementation should provide a correct estimation, - /// because otherwise it would be a violation of the trait's protocol. - /// - /// The default implementation returns `(0, None)` which is correct for any - /// iterator. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// let iter = a.iter(); - /// - /// assert_eq!((3, Some(3)), iter.size_hint()); - /// ``` - /// - /// A more complex example: - /// - /// ``` - /// // The even numbers from zero to ten. - /// let iter = (0..10).filter(|x| x % 2 == 0); - /// - /// // We might iterate from zero to ten times. Knowing that it's five - /// // exactly wouldn't be possible without executing filter(). - /// assert_eq!((0, Some(10)), iter.size_hint()); - /// - /// // Let's add one five more numbers with chain() - /// let iter = (0..10).filter(|x| x % 2 == 0).chain(15..20); - /// - /// // now both bounds are increased by five - /// assert_eq!((5, Some(15)), iter.size_hint()); - /// ``` - /// - /// Returning `None` for an upper bound: - /// - /// ``` - /// // an infinite iterator has no upper bound - /// let iter = 0..; - /// - /// assert_eq!((0, None), iter.size_hint()); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn size_hint(&self) -> (usize, Option) { (0, None) } - - /// Consumes the iterator, counting the number of iterations and returning it. - /// - /// This method will evaluate the iterator until its [`next()`] returns - /// `None`. Once `None` is encountered, `count()` returns the number of - /// times it called [`next()`]. - /// - /// [`next()`]: #method.next - /// - /// # Overflow Behavior - /// - /// The method does no guarding against overflows, so counting elements of - /// an iterator with more than `usize::MAX` elements either produces the - /// wrong result or panics. If debug assertions are enabled, a panic is - /// guaranteed. - /// - /// # Panics - /// - /// This function might panic if the iterator has more than `usize::MAX` - /// elements. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// assert_eq!(a.iter().count(), 3); - /// - /// let a = [1, 2, 3, 4, 5]; - /// assert_eq!(a.iter().count(), 5); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn count(self) -> usize where Self: Sized { - // Might overflow. - self.fold(0, |cnt, _| cnt + 1) - } - - /// Consumes the iterator, returning the last element. - /// - /// This method will evaluate the iterator until it returns `None`. While - /// doing so, it keeps track of the current element. After `None` is - /// returned, `last()` will then return the last element it saw. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// assert_eq!(a.iter().last(), Some(&3)); - /// - /// let a = [1, 2, 3, 4, 5]; - /// assert_eq!(a.iter().last(), Some(&5)); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn last(self) -> Option where Self: Sized { - let mut last = None; - for x in self { last = Some(x); } - last - } - - /// Consumes the `n` first elements of the iterator, then returns the - /// `next()` one. - /// - /// This method will evaluate the iterator `n` times, discarding those elements. - /// After it does so, it will call [`next()`] and return its value. - /// - /// [`next()`]: #method.next - /// - /// Like most indexing operations, the count starts from zero, so `nth(0)` - /// returns the first value, `nth(1)` the second, and so on. - /// - /// `nth()` will return `None` if `n` is larger than the length of the - /// iterator. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// assert_eq!(a.iter().nth(1), Some(&2)); - /// ``` - /// - /// Calling `nth()` multiple times doesn't rewind the iterator: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let mut iter = a.iter(); - /// - /// assert_eq!(iter.nth(1), Some(&2)); - /// assert_eq!(iter.nth(1), None); - /// ``` - /// - /// Returning `None` if there are less than `n` elements: - /// - /// ``` - /// let a = [1, 2, 3]; - /// assert_eq!(a.iter().nth(10), None); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn nth(&mut self, mut n: usize) -> Option where Self: Sized { - for x in self { - if n == 0 { return Some(x) } - n -= 1; - } - None - } - - /// Takes two iterators and creates a new iterator over both in sequence. - /// - /// `chain()` will return a new iterator which will first iterate over - /// values from the first iterator and then over values from the second - /// iterator. - /// - /// In other words, it links two iterators together, in a chain. 🔗 - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a1 = [1, 2, 3]; - /// let a2 = [4, 5, 6]; - /// - /// let mut iter = a1.iter().chain(a2.iter()); - /// - /// assert_eq!(iter.next(), Some(&1)); - /// assert_eq!(iter.next(), Some(&2)); - /// assert_eq!(iter.next(), Some(&3)); - /// assert_eq!(iter.next(), Some(&4)); - /// assert_eq!(iter.next(), Some(&5)); - /// assert_eq!(iter.next(), Some(&6)); - /// assert_eq!(iter.next(), None); - /// ``` - /// - /// Since the argument to `chain()` uses [`IntoIterator`], we can pass - /// anything that can be converted into an [`Iterator`], not just an - /// [`Iterator`] itself. For example, slices (`&[T]`) implement - /// [`IntoIterator`], and so can be passed to `chain()` directly: - /// - /// [`IntoIterator`]: trait.IntoIterator.html - /// [`Iterator`]: trait.Iterator.html - /// - /// ``` - /// let s1 = &[1, 2, 3]; - /// let s2 = &[4, 5, 6]; - /// - /// let mut iter = s1.iter().chain(s2); - /// - /// assert_eq!(iter.next(), Some(&1)); - /// assert_eq!(iter.next(), Some(&2)); - /// assert_eq!(iter.next(), Some(&3)); - /// assert_eq!(iter.next(), Some(&4)); - /// assert_eq!(iter.next(), Some(&5)); - /// assert_eq!(iter.next(), Some(&6)); - /// assert_eq!(iter.next(), None); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn chain(self, other: U) -> Chain where - Self: Sized, U: IntoIterator, - { - Chain{a: self, b: other.into_iter(), state: ChainState::Both} - } - - /// 'Zips up' two iterators into a single iterator of pairs. - /// - /// `zip()` returns a new iterator that will iterate over two other - /// iterators, returning a tuple where the first element comes from the - /// first iterator, and the second element comes from the second iterator. - /// - /// In other words, it zips two iterators together, into a single one. - /// - /// When either iterator returns `None`, all further calls to `next()` - /// will return `None`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a1 = [1, 2, 3]; - /// let a2 = [4, 5, 6]; - /// - /// let mut iter = a1.iter().zip(a2.iter()); - /// - /// assert_eq!(iter.next(), Some((&1, &4))); - /// assert_eq!(iter.next(), Some((&2, &5))); - /// assert_eq!(iter.next(), Some((&3, &6))); - /// assert_eq!(iter.next(), None); - /// ``` - /// - /// Since the argument to `zip()` uses [`IntoIterator`], we can pass - /// anything that can be converted into an [`Iterator`], not just an - /// [`Iterator`] itself. For example, slices (`&[T]`) implement - /// [`IntoIterator`], and so can be passed to `zip()` directly: - /// - /// [`IntoIterator`]: trait.IntoIterator.html - /// [`Iterator`]: trait.Iterator.html - /// - /// ``` - /// let s1 = &[1, 2, 3]; - /// let s2 = &[4, 5, 6]; - /// - /// let mut iter = s1.iter().zip(s2); - /// - /// assert_eq!(iter.next(), Some((&1, &4))); - /// assert_eq!(iter.next(), Some((&2, &5))); - /// assert_eq!(iter.next(), Some((&3, &6))); - /// assert_eq!(iter.next(), None); - /// ``` - /// - /// `zip()` is often used to zip an infinite iterator to a finite one. - /// This works because the finite iterator will eventually return `None`, - /// ending the zipper. Zipping with `(0..)` can look a lot like [`enumerate()`]: - /// - /// ``` - /// let enumerate: Vec<_> = "foo".chars().enumerate().collect(); - /// - /// let zipper: Vec<_> = (0..).zip("foo".chars()).collect(); - /// - /// assert_eq!((0, 'f'), enumerate[0]); - /// assert_eq!((0, 'f'), zipper[0]); - /// - /// assert_eq!((1, 'o'), enumerate[1]); - /// assert_eq!((1, 'o'), zipper[1]); - /// - /// assert_eq!((2, 'o'), enumerate[2]); - /// assert_eq!((2, 'o'), zipper[2]); - /// ``` - /// - /// [`enumerate()`]: trait.Iterator.html#method.enumerate - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn zip(self, other: U) -> Zip where - Self: Sized, U: IntoIterator - { - Zip{a: self, b: other.into_iter()} - } - - /// Takes a closure and creates an iterator which calls that closure on each - /// element. - /// - /// `map()` transforms one iterator into another, by means of its argument: - /// something that implements `FnMut`. It produces a new iterator which - /// calls this closure on each element of the original iterator. - /// - /// If you are good at thinking in types, you can think of `map()` like this: - /// If you have an iterator that gives you elements of some type `A`, and - /// you want an iterator of some other type `B`, you can use `map()`, - /// passing a closure that takes an `A` and returns a `B`. - /// - /// `map()` is conceptually similar to a [`for`] loop. However, as `map()` is - /// lazy, it is best used when you're already working with other iterators. - /// If you're doing some sort of looping for a side effect, it's considered - /// more idiomatic to use [`for`] than `map()`. - /// - /// [`for`]: ../../book/loops.html#for - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let mut iter = a.into_iter().map(|x| 2 * x); - /// - /// assert_eq!(iter.next(), Some(2)); - /// assert_eq!(iter.next(), Some(4)); - /// assert_eq!(iter.next(), Some(6)); - /// assert_eq!(iter.next(), None); - /// ``` - /// - /// If you're doing some sort of side effect, prefer [`for`] to `map()`: - /// - /// ``` - /// # #![allow(unused_must_use)] - /// // don't do this: - /// (0..5).map(|x| println!("{}", x)); - /// - /// // it won't even execute, as it is lazy. Rust will warn you about this. - /// - /// // Instead, use for: - /// for x in 0..5 { - /// println!("{}", x); - /// } - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn map(self, f: F) -> Map where - Self: Sized, F: FnMut(Self::Item) -> B, - { - Map{iter: self, f: f} - } - - /// Creates an iterator which uses a closure to determine if an element - /// should be yielded. - /// - /// The closure must return `true` or `false`. `filter()` creates an - /// iterator which calls this closure on each element. If the closure - /// returns `true`, then the element is returned. If the closure returns - /// `false`, it will try again, and call the closure on the next element, - /// seeing if it passes the test. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [0i32, 1, 2]; - /// - /// let mut iter = a.into_iter().filter(|x| x.is_positive()); - /// - /// assert_eq!(iter.next(), Some(&1)); - /// assert_eq!(iter.next(), Some(&2)); - /// assert_eq!(iter.next(), None); - /// ``` - /// - /// Because the closure passed to `filter()` takes a reference, and many - /// iterators iterate over references, this leads to a possibly confusing - /// situation, where the type of the closure is a double reference: - /// - /// ``` - /// let a = [0, 1, 2]; - /// - /// let mut iter = a.into_iter().filter(|x| **x > 1); // need two *s! - /// - /// assert_eq!(iter.next(), Some(&2)); - /// assert_eq!(iter.next(), None); - /// ``` - /// - /// It's common to instead use destructuring on the argument to strip away - /// one: - /// - /// ``` - /// let a = [0, 1, 2]; - /// - /// let mut iter = a.into_iter().filter(|&x| *x > 1); // both & and * - /// - /// assert_eq!(iter.next(), Some(&2)); - /// assert_eq!(iter.next(), None); - /// ``` - /// - /// or both: - /// - /// ``` - /// let a = [0, 1, 2]; - /// - /// let mut iter = a.into_iter().filter(|&&x| x > 1); // two &s - /// - /// assert_eq!(iter.next(), Some(&2)); - /// assert_eq!(iter.next(), None); - /// ``` - /// - /// of these layers. - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn filter

(self, predicate: P) -> Filter where - Self: Sized, P: FnMut(&Self::Item) -> bool, - { - Filter{iter: self, predicate: predicate} - } - - /// Creates an iterator that both filters and maps. - /// - /// The closure must return an [`Option`]. `filter_map()` creates an - /// iterator which calls this closure on each element. If the closure - /// returns `Some(element)`, then that element is returned. If the - /// closure returns `None`, it will try again, and call the closure on the - /// next element, seeing if it will return `Some`. - /// - /// [`Option`]: ../option/enum.Option.html - /// - /// Why `filter_map()` and not just [`filter()`].[`map()`]? The key is in this - /// part: - /// - /// [`filter()`]: #method.filter - /// [`map()`]: #method.map - /// - /// > If the closure returns `Some(element)`, then that element is returned. - /// - /// In other words, it removes the [`Option`] layer automatically. If your - /// mapping is already returning an [`Option`] and you want to skip over - /// `None`s, then `filter_map()` is much, much nicer to use. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = ["1", "2", "lol"]; - /// - /// let mut iter = a.iter().filter_map(|s| s.parse().ok()); - /// - /// assert_eq!(iter.next(), Some(1)); - /// assert_eq!(iter.next(), Some(2)); - /// assert_eq!(iter.next(), None); - /// ``` - /// - /// Here's the same example, but with [`filter()`] and [`map()`]: - /// - /// ``` - /// let a = ["1", "2", "lol"]; - /// - /// let mut iter = a.iter() - /// .map(|s| s.parse().ok()) - /// .filter(|s| s.is_some()); - /// - /// assert_eq!(iter.next(), Some(Some(1))); - /// assert_eq!(iter.next(), Some(Some(2))); - /// assert_eq!(iter.next(), None); - /// ``` - /// - /// There's an extra layer of `Some` in there. - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn filter_map(self, f: F) -> FilterMap where - Self: Sized, F: FnMut(Self::Item) -> Option, - { - FilterMap { iter: self, f: f } - } - - /// Creates an iterator which gives the current iteration count as well as - /// the next value. - /// - /// The iterator returned yields pairs `(i, val)`, where `i` is the - /// current index of iteration and `val` is the value returned by the - /// iterator. - /// - /// `enumerate()` keeps its count as a [`usize`]. If you want to count by a - /// different sized integer, the [`zip()`] function provides similar - /// functionality. - /// - /// [`usize`]: ../primitive.usize.html - /// [`zip()`]: #method.zip - /// - /// # Overflow Behavior - /// - /// The method does no guarding against overflows, so enumerating more than - /// [`usize::MAX`] elements either produces the wrong result or panics. If - /// debug assertions are enabled, a panic is guaranteed. - /// - /// [`usize::MAX`]: ../usize/constant.MAX.html - /// - /// # Panics - /// - /// The returned iterator might panic if the to-be-returned index would - /// overflow a `usize`. - /// - /// # Examples - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let mut iter = a.iter().enumerate(); - /// - /// assert_eq!(iter.next(), Some((0, &1))); - /// assert_eq!(iter.next(), Some((1, &2))); - /// assert_eq!(iter.next(), Some((2, &3))); - /// assert_eq!(iter.next(), None); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn enumerate(self) -> Enumerate where Self: Sized { - Enumerate { iter: self, count: 0 } - } - - /// Creates an iterator which can look at the `next()` element without - /// consuming it. - /// - /// Adds a [`peek()`] method to an iterator. See its documentation for - /// more information. - /// - /// [`peek()`]: struct.Peekable.html#method.peek - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let xs = [1, 2, 3]; - /// - /// let mut iter = xs.iter().peekable(); - /// - /// // peek() lets us see into the future - /// assert_eq!(iter.peek(), Some(&&1)); - /// assert_eq!(iter.next(), Some(&1)); - /// - /// assert_eq!(iter.next(), Some(&2)); - /// - /// // we can peek() multiple times, the iterator won't advance - /// assert_eq!(iter.peek(), Some(&&3)); - /// assert_eq!(iter.peek(), Some(&&3)); - /// - /// assert_eq!(iter.next(), Some(&3)); - /// - /// // after the iterator is finished, so is peek() - /// assert_eq!(iter.peek(), None); - /// assert_eq!(iter.next(), None); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn peekable(self) -> Peekable where Self: Sized { - Peekable{iter: self, peeked: None} - } - - /// Creates an iterator that [`skip()`]s elements based on a predicate. - /// - /// [`skip()`]: #method.skip - /// - /// `skip_while()` takes a closure as an argument. It will call this - /// closure on each element of the iterator, and ignore elements - /// until it returns `false`. - /// - /// After `false` is returned, `skip_while()`'s job is over, and the - /// rest of the elements are yielded. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [-1i32, 0, 1]; - /// - /// let mut iter = a.into_iter().skip_while(|x| x.is_negative()); - /// - /// assert_eq!(iter.next(), Some(&0)); - /// assert_eq!(iter.next(), Some(&1)); - /// assert_eq!(iter.next(), None); - /// ``` - /// - /// Because the closure passed to `skip_while()` takes a reference, and many - /// iterators iterate over references, this leads to a possibly confusing - /// situation, where the type of the closure is a double reference: - /// - /// ``` - /// let a = [-1, 0, 1]; - /// - /// let mut iter = a.into_iter().skip_while(|x| **x < 0); // need two *s! - /// - /// assert_eq!(iter.next(), Some(&0)); - /// assert_eq!(iter.next(), Some(&1)); - /// assert_eq!(iter.next(), None); - /// ``` - /// - /// Stopping after an initial `false`: - /// - /// ``` - /// let a = [-1, 0, 1, -2]; - /// - /// let mut iter = a.into_iter().skip_while(|x| **x < 0); - /// - /// assert_eq!(iter.next(), Some(&0)); - /// assert_eq!(iter.next(), Some(&1)); - /// - /// // while this would have been false, since we already got a false, - /// // skip_while() isn't used any more - /// assert_eq!(iter.next(), Some(&-2)); - /// - /// assert_eq!(iter.next(), None); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn skip_while

(self, predicate: P) -> SkipWhile where - Self: Sized, P: FnMut(&Self::Item) -> bool, - { - SkipWhile{iter: self, flag: false, predicate: predicate} - } - - /// Creates an iterator that yields elements based on a predicate. - /// - /// `take_while()` takes a closure as an argument. It will call this - /// closure on each element of the iterator, and yield elements - /// while it returns `true`. - /// - /// After `false` is returned, `take_while()`'s job is over, and the - /// rest of the elements are ignored. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [-1i32, 0, 1]; - /// - /// let mut iter = a.into_iter().take_while(|x| x.is_negative()); - /// - /// assert_eq!(iter.next(), Some(&-1)); - /// assert_eq!(iter.next(), None); - /// ``` - /// - /// Because the closure passed to `take_while()` takes a reference, and many - /// iterators iterate over references, this leads to a possibly confusing - /// situation, where the type of the closure is a double reference: - /// - /// ``` - /// let a = [-1, 0, 1]; - /// - /// let mut iter = a.into_iter().take_while(|x| **x < 0); // need two *s! - /// - /// assert_eq!(iter.next(), Some(&-1)); - /// assert_eq!(iter.next(), None); - /// ``` - /// - /// Stopping after an initial `false`: - /// - /// ``` - /// let a = [-1, 0, 1, -2]; - /// - /// let mut iter = a.into_iter().take_while(|x| **x < 0); - /// - /// assert_eq!(iter.next(), Some(&-1)); - /// - /// // We have more elements that are less than zero, but since we already - /// // got a false, take_while() isn't used any more - /// assert_eq!(iter.next(), None); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn take_while

(self, predicate: P) -> TakeWhile where - Self: Sized, P: FnMut(&Self::Item) -> bool, - { - TakeWhile{iter: self, flag: false, predicate: predicate} - } - - /// Creates an iterator that skips the first `n` elements. - /// - /// After they have been consumed, the rest of the elements are yielded. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let mut iter = a.iter().skip(2); - /// - /// assert_eq!(iter.next(), Some(&3)); - /// assert_eq!(iter.next(), None); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn skip(self, n: usize) -> Skip where Self: Sized { - Skip{iter: self, n: n} - } - - /// Creates an iterator that yields its first `n` elements. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let mut iter = a.iter().take(2); - /// - /// assert_eq!(iter.next(), Some(&1)); - /// assert_eq!(iter.next(), Some(&2)); - /// assert_eq!(iter.next(), None); - /// ``` - /// - /// `take()` is often used with an infinite iterator, to make it finite: - /// - /// ``` - /// let mut iter = (0..).take(3); - /// - /// assert_eq!(iter.next(), Some(0)); - /// assert_eq!(iter.next(), Some(1)); - /// assert_eq!(iter.next(), Some(2)); - /// assert_eq!(iter.next(), None); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn take(self, n: usize) -> Take where Self: Sized, { - Take{iter: self, n: n} - } - - /// An iterator adaptor similar to [`fold()`] that holds internal state and - /// produces a new iterator. - /// - /// [`fold()`]: #method.fold - /// - /// `scan()` takes two arguments: an initial value which seeds the internal - /// state, and a closure with two arguments, the first being a mutable - /// reference to the internal state and the second an iterator element. - /// The closure can assign to the internal state to share state between - /// iterations. - /// - /// On iteration, the closure will be applied to each element of the - /// iterator and the return value from the closure, an [`Option`], is - /// yielded by the iterator. - /// - /// [`Option`]: ../option/enum.Option.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let mut iter = a.iter().scan(1, |state, &x| { - /// // each iteration, we'll multiply the state by the element - /// *state = *state * x; - /// - /// // the value passed on to the next iteration - /// Some(*state) - /// }); - /// - /// assert_eq!(iter.next(), Some(1)); - /// assert_eq!(iter.next(), Some(2)); - /// assert_eq!(iter.next(), Some(6)); - /// assert_eq!(iter.next(), None); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn scan(self, initial_state: St, f: F) -> Scan - where Self: Sized, F: FnMut(&mut St, Self::Item) -> Option, - { - Scan{iter: self, f: f, state: initial_state} - } - - /// Creates an iterator that works like map, but flattens nested structure. - /// - /// The [`map()`] adapter is very useful, but only when the closure - /// argument produces values. If it produces an iterator instead, there's - /// an extra layer of indirection. `flat_map()` will remove this extra layer - /// on its own. - /// - /// [`map()`]: #method.map - /// - /// Another way of thinking about `flat_map()`: [`map()`]'s closure returns - /// one item for each element, and `flat_map()`'s closure returns an - /// iterator for each element. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let words = ["alpha", "beta", "gamma"]; - /// - /// // chars() returns an iterator - /// let merged: String = words.iter() - /// .flat_map(|s| s.chars()) - /// .collect(); - /// assert_eq!(merged, "alphabetagamma"); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn flat_map(self, f: F) -> FlatMap - where Self: Sized, U: IntoIterator, F: FnMut(Self::Item) -> U, - { - FlatMap{iter: self, f: f, frontiter: None, backiter: None } - } - - /// Creates an iterator which ends after the first `None`. - /// - /// After an iterator returns `None`, future calls may or may not yield - /// `Some(T)` again. `fuse()` adapts an iterator, ensuring that after a - /// `None` is given, it will always return `None` forever. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// // an iterator which alternates between Some and None - /// struct Alternate { - /// state: i32, - /// } - /// - /// impl Iterator for Alternate { - /// type Item = i32; - /// - /// fn next(&mut self) -> Option { - /// let val = self.state; - /// self.state = self.state + 1; - /// - /// // if it's even, Some(i32), else None - /// if val % 2 == 0 { - /// Some(val) - /// } else { - /// None - /// } - /// } - /// } - /// - /// let mut iter = Alternate { state: 0 }; - /// - /// // we can see our iterator going back and forth - /// assert_eq!(iter.next(), Some(0)); - /// assert_eq!(iter.next(), None); - /// assert_eq!(iter.next(), Some(2)); - /// assert_eq!(iter.next(), None); - /// - /// // however, once we fuse it... - /// let mut iter = iter.fuse(); - /// - /// assert_eq!(iter.next(), Some(4)); - /// assert_eq!(iter.next(), None); - /// - /// // it will always return None after the first time. - /// assert_eq!(iter.next(), None); - /// assert_eq!(iter.next(), None); - /// assert_eq!(iter.next(), None); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn fuse(self) -> Fuse where Self: Sized { - Fuse{iter: self, done: false} - } - - /// Do something with each element of an iterator, passing the value on. - /// - /// When using iterators, you'll often chain several of them together. - /// While working on such code, you might want to check out what's - /// happening at various parts in the pipeline. To do that, insert - /// a call to `inspect()`. - /// - /// It's much more common for `inspect()` to be used as a debugging tool - /// than to exist in your final code, but never say never. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 4, 2, 3]; - /// - /// // this iterator sequence is complex. - /// let sum = a.iter() - /// .cloned() - /// .filter(|&x| x % 2 == 0) - /// .fold(0, |sum, i| sum + i); - /// - /// println!("{}", sum); - /// - /// // let's add some inspect() calls to investigate what's happening - /// let sum = a.iter() - /// .cloned() - /// .inspect(|x| println!("about to filter: {}", x)) - /// .filter(|&x| x % 2 == 0) - /// .inspect(|x| println!("made it through filter: {}", x)) - /// .fold(0, |sum, i| sum + i); - /// - /// println!("{}", sum); - /// ``` - /// - /// This will print: - /// - /// ```text - /// about to filter: 1 - /// about to filter: 4 - /// made it through filter: 4 - /// about to filter: 2 - /// made it through filter: 2 - /// about to filter: 3 - /// 6 - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn inspect(self, f: F) -> Inspect where - Self: Sized, F: FnMut(&Self::Item), - { - Inspect{iter: self, f: f} - } - - /// Borrows an iterator, rather than consuming it. - /// - /// This is useful to allow applying iterator adaptors while still - /// retaining ownership of the original iterator. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let iter = a.into_iter(); - /// - /// let sum: i32 = iter.take(5) - /// .fold(0, |acc, &i| acc + i ); - /// - /// assert_eq!(sum, 6); - /// - /// // if we try to use iter again, it won't work. The following line - /// // gives "error: use of moved value: `iter` - /// // assert_eq!(iter.next(), None); - /// - /// // let's try that again - /// let a = [1, 2, 3]; - /// - /// let mut iter = a.into_iter(); - /// - /// // instead, we add in a .by_ref() - /// let sum: i32 = iter.by_ref() - /// .take(2) - /// .fold(0, |acc, &i| acc + i ); - /// - /// assert_eq!(sum, 3); - /// - /// // now this is just fine: - /// assert_eq!(iter.next(), Some(&3)); - /// assert_eq!(iter.next(), None); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - fn by_ref(&mut self) -> &mut Self where Self: Sized { self } - - /// Transforms an iterator into a collection. - /// - /// `collect()` can take anything iterable, and turn it into a relevant - /// collection. This is one of the more powerful methods in the standard - /// library, used in a variety of contexts. - /// - /// The most basic pattern in which `collect()` is used is to turn one - /// collection into another. You take a collection, call `iter()` on it, - /// do a bunch of transformations, and then `collect()` at the end. - /// - /// One of the keys to `collect()`'s power is that many things you might - /// not think of as 'collections' actually are. For example, a [`String`] - /// is a collection of [`char`]s. And a collection of [`Result`] can - /// be thought of as single `Result, E>`. See the examples - /// below for more. - /// - /// [`String`]: ../string/struct.String.html - /// [`Result`]: ../result/enum.Result.html - /// [`char`]: ../primitive.char.html - /// - /// Because `collect()` is so general, it can cause problems with type - /// inference. As such, `collect()` is one of the few times you'll see - /// the syntax affectionately known as the 'turbofish': `::<>`. This - /// helps the inference algorithm understand specifically which collection - /// you're trying to collect into. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let doubled: Vec = a.iter() - /// .map(|&x| x * 2) - /// .collect(); - /// - /// assert_eq!(vec![2, 4, 6], doubled); - /// ``` - /// - /// Note that we needed the `: Vec` on the left-hand side. This is because - /// we could collect into, for example, a [`VecDeque`] instead: - /// - /// [`VecDeque`]: ../collections/struct.VecDeque.html - /// - /// ``` - /// use std::collections::VecDeque; - /// - /// let a = [1, 2, 3]; - /// - /// let doubled: VecDeque = a.iter() - /// .map(|&x| x * 2) - /// .collect(); - /// - /// assert_eq!(2, doubled[0]); - /// assert_eq!(4, doubled[1]); - /// assert_eq!(6, doubled[2]); - /// ``` - /// - /// Using the 'turbofish' instead of annotationg `doubled`: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let doubled = a.iter() - /// .map(|&x| x * 2) - /// .collect::>(); - /// - /// assert_eq!(vec![2, 4, 6], doubled); - /// ``` - /// - /// Because `collect()` cares about what you're collecting into, you can - /// still use a partial type hint, `_`, with the turbofish: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let doubled = a.iter() - /// .map(|&x| x * 2) - /// .collect::>(); - /// - /// assert_eq!(vec![2, 4, 6], doubled); - /// ``` - /// - /// Using `collect()` to make a [`String`]: - /// - /// ``` - /// let chars = ['g', 'd', 'k', 'k', 'n']; - /// - /// let hello: String = chars.iter() - /// .map(|&x| x as u8) - /// .map(|x| (x + 1) as char) - /// .collect(); - /// - /// assert_eq!("hello", hello); - /// ``` - /// - /// If you have a list of [`Result`]s, you can use `collect()` to - /// see if any of them failed: - /// - /// ``` - /// let results = [Ok(1), Err("nope"), Ok(3), Err("bad")]; - /// - /// let result: Result, &str> = results.iter().cloned().collect(); - /// - /// // gives us the first error - /// assert_eq!(Err("nope"), result); - /// - /// let results = [Ok(1), Ok(3)]; - /// - /// let result: Result, &str> = results.iter().cloned().collect(); - /// - /// // gives us the list of answers - /// assert_eq!(Ok(vec![1, 3]), result); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn collect>(self) -> B where Self: Sized { - FromIterator::from_iter(self) - } - - /// Consumes an iterator, creating two collections from it. - /// - /// The predicate passed to `partition()` can return `true`, or `false`. - /// `partition()` returns a pair, all of the elements for which it returned - /// `true`, and all of the elements for which it returned `false`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let (even, odd): (Vec, Vec) = a.into_iter() - /// .partition(|&n| n % 2 == 0); - /// - /// assert_eq!(even, vec![2]); - /// assert_eq!(odd, vec![1, 3]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - fn partition(self, mut f: F) -> (B, B) where - Self: Sized, - B: Default + Extend, - F: FnMut(&Self::Item) -> bool - { - let mut left: B = Default::default(); - let mut right: B = Default::default(); - - for x in self { - if f(&x) { - left.extend(Some(x)) - } else { - right.extend(Some(x)) - } - } - - (left, right) - } - - /// An iterator adaptor that applies a function, producing a single, final value. - /// - /// `fold()` takes two arguments: an initial value, and a closure with two - /// arguments: an 'accumulator', and an element. It returns the value that - /// the accumulator should have for the next iteration. - /// - /// The initial value is the value the accumulator will have on the first - /// call. - /// - /// After applying this closure to every element of the iterator, `fold()` - /// returns the accumulator. - /// - /// This operation is sometimes called 'reduce' or 'inject'. - /// - /// Folding is useful whenever you have a collection of something, and want - /// to produce a single value from it. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// // the sum of all of the elements of a - /// let sum = a.iter() - /// .fold(0, |acc, &x| acc + x); - /// - /// assert_eq!(sum, 6); - /// ``` - /// - /// Let's walk through each step of the iteration here: - /// - /// | element | acc | x | result | - /// |---------|-----|---|--------| - /// | | 0 | | | - /// | 1 | 0 | 1 | 1 | - /// | 2 | 1 | 2 | 3 | - /// | 3 | 3 | 3 | 6 | - /// - /// And so, our final result, `6`. - /// - /// It's common for people who haven't used iterators a lot to - /// use a `for` loop with a list of things to build up a result. Those - /// can be turned into `fold()`s: - /// - /// ``` - /// let numbers = [1, 2, 3, 4, 5]; - /// - /// let mut result = 0; - /// - /// // for loop: - /// for i in &numbers { - /// result = result + i; - /// } - /// - /// // fold: - /// let result2 = numbers.iter().fold(0, |acc, &x| acc + x); - /// - /// // they're the same - /// assert_eq!(result, result2); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn fold(self, init: B, mut f: F) -> B where - Self: Sized, F: FnMut(B, Self::Item) -> B, - { - let mut accum = init; - for x in self { - accum = f(accum, x); - } - accum - } - - /// Tests if every element of the iterator matches a predicate. - /// - /// `all()` takes a closure that returns `true` or `false`. It applies - /// this closure to each element of the iterator, and if they all return - /// `true`, then so does `all()`. If any of them return `false`, it - /// returns `false`. - /// - /// `all()` is short-circuting; in other words, it will stop processing - /// as soon as it finds a `false`, given that no matter what else happens, - /// the result will also be `false`. - /// - /// An empty iterator returns `true`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// assert!(a.iter().all(|&x| x > 0)); - /// - /// assert!(!a.iter().all(|&x| x > 2)); - /// ``` - /// - /// Stopping at the first `false`: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let mut iter = a.iter(); - /// - /// assert!(!iter.all(|&x| x != 2)); - /// - /// // we can still use `iter`, as there are more elements. - /// assert_eq!(iter.next(), Some(&3)); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn all(&mut self, mut f: F) -> bool where - Self: Sized, F: FnMut(Self::Item) -> bool - { - for x in self { - if !f(x) { - return false; - } - } - true - } - - /// Tests if any element of the iterator matches a predicate. - /// - /// `any()` takes a closure that returns `true` or `false`. It applies - /// this closure to each element of the iterator, and if any of them return - /// `true`, then so does `any()`. If they all return `false`, it - /// returns `false`. - /// - /// `any()` is short-circuting; in other words, it will stop processing - /// as soon as it finds a `true`, given that no matter what else happens, - /// the result will also be `true`. - /// - /// An empty iterator returns `false`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// assert!(a.iter().any(|&x| x > 0)); - /// - /// assert!(!a.iter().any(|&x| x > 5)); - /// ``` - /// - /// Stopping at the first `true`: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let mut iter = a.iter(); - /// - /// assert!(iter.any(|&x| x != 2)); - /// - /// // we can still use `iter`, as there are more elements. - /// assert_eq!(iter.next(), Some(&2)); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn any(&mut self, mut f: F) -> bool where - Self: Sized, - F: FnMut(Self::Item) -> bool - { - for x in self { - if f(x) { - return true; - } - } - false - } - - /// Searches for an element of an iterator that satisfies a predicate. - /// - /// `find()` takes a closure that returns `true` or `false`. It applies - /// this closure to each element of the iterator, and if any of them return - /// `true`, then `find()` returns `Some(element)`. If they all return - /// `false`, it returns `None`. - /// - /// `find()` is short-circuting; in other words, it will stop processing - /// as soon as the closure returns `true`. - /// - /// Because `find()` takes a reference, and many iterators iterate over - /// references, this leads to a possibly confusing situation where the - /// argument is a double reference. You can see this effect in the - /// examples below, with `&&x`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// assert_eq!(a.iter().find(|&&x| x == 2), Some(&2)); - /// - /// assert_eq!(a.iter().find(|&&x| x == 5), None); - /// ``` - /// - /// Stopping at the first `true`: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let mut iter = a.iter(); - /// - /// assert_eq!(iter.find(|&&x| x == 2), Some(&2)); - /// - /// // we can still use `iter`, as there are more elements. - /// assert_eq!(iter.next(), Some(&3)); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn find

(&mut self, mut predicate: P) -> Option where - Self: Sized, - P: FnMut(&Self::Item) -> bool, - { - for x in self { - if predicate(&x) { return Some(x) } - } - None - } - - /// Searches for an element in an iterator, returning its index. - /// - /// `position()` takes a closure that returns `true` or `false`. It applies - /// this closure to each element of the iterator, and if one of them - /// returns `true`, then `position()` returns `Some(index)`. If all of - /// them return `false`, it returns `None`. - /// - /// `position()` is short-circuting; in other words, it will stop - /// processing as soon as it finds a `true`. - /// - /// # Overflow Behavior - /// - /// The method does no guarding against overflows, so if there are more - /// than `usize::MAX` non-matching elements, it either produces the wrong - /// result or panics. If debug assertions are enabled, a panic is - /// guaranteed. - /// - /// # Panics - /// - /// This function might panic if the iterator has more than `usize::MAX` - /// non-matching elements. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// assert_eq!(a.iter().position(|&x| x == 2), Some(1)); - /// - /// assert_eq!(a.iter().position(|&x| x == 5), None); - /// ``` - /// - /// Stopping at the first `true`: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let mut iter = a.iter(); - /// - /// assert_eq!(iter.position(|&x| x == 2), Some(1)); - /// - /// // we can still use `iter`, as there are more elements. - /// assert_eq!(iter.next(), Some(&3)); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn position

(&mut self, mut predicate: P) -> Option where - Self: Sized, - P: FnMut(Self::Item) -> bool, - { - // `enumerate` might overflow. - for (i, x) in self.enumerate() { - if predicate(x) { - return Some(i); - } - } - None - } - - /// Searches for an element in an iterator from the right, returning its - /// index. - /// - /// `rposition()` takes a closure that returns `true` or `false`. It applies - /// this closure to each element of the iterator, starting from the end, - /// and if one of them returns `true`, then `rposition()` returns - /// `Some(index)`. If all of them return `false`, it returns `None`. - /// - /// `rposition()` is short-circuting; in other words, it will stop - /// processing as soon as it finds a `true`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// assert_eq!(a.iter().rposition(|&x| x == 3), Some(2)); - /// - /// assert_eq!(a.iter().rposition(|&x| x == 5), None); - /// ``` - /// - /// Stopping at the first `true`: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let mut iter = a.iter(); - /// - /// assert_eq!(iter.rposition(|&x| x == 2), Some(1)); - /// - /// // we can still use `iter`, as there are more elements. - /// assert_eq!(iter.next(), Some(&1)); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn rposition

(self) -> P where - P: Mul + One, - Self: Sized, - { - self.fold(One::one(), |p, e| p * e) - } - - /// Lexicographically compares the elements of this `Iterator` with those - /// of another. - #[stable(feature = "iter_order", since = "1.5.0")] - fn cmp(mut self, other: I) -> Ordering where - I: IntoIterator, - Self::Item: Ord, - Self: Sized, - { - let mut other = other.into_iter(); - - loop { - match (self.next(), other.next()) { - (None, None) => return Ordering::Equal, - (None, _ ) => return Ordering::Less, - (_ , None) => return Ordering::Greater, - (Some(x), Some(y)) => match x.cmp(&y) { - Ordering::Equal => (), - non_eq => return non_eq, - }, - } - } - } - - /// Lexicographically compares the elements of this `Iterator` with those - /// of another. - #[stable(feature = "iter_order", since = "1.5.0")] - fn partial_cmp(mut self, other: I) -> Option where - I: IntoIterator, - Self::Item: PartialOrd, - Self: Sized, - { - let mut other = other.into_iter(); - - loop { - match (self.next(), other.next()) { - (None, None) => return Some(Ordering::Equal), - (None, _ ) => return Some(Ordering::Less), - (_ , None) => return Some(Ordering::Greater), - (Some(x), Some(y)) => match x.partial_cmp(&y) { - Some(Ordering::Equal) => (), - non_eq => return non_eq, - }, - } - } - } - - /// Determines if the elements of this `Iterator` are equal to those of - /// another. - #[stable(feature = "iter_order", since = "1.5.0")] - fn eq(mut self, other: I) -> bool where - I: IntoIterator, - Self::Item: PartialEq, - Self: Sized, - { - let mut other = other.into_iter(); - - loop { - match (self.next(), other.next()) { - (None, None) => return true, - (None, _) | (_, None) => return false, - (Some(x), Some(y)) => if x != y { return false }, - } - } - } - - /// Determines if the elements of this `Iterator` are unequal to those of - /// another. - #[stable(feature = "iter_order", since = "1.5.0")] - fn ne(mut self, other: I) -> bool where - I: IntoIterator, - Self::Item: PartialEq, - Self: Sized, - { - let mut other = other.into_iter(); - - loop { - match (self.next(), other.next()) { - (None, None) => return false, - (None, _) | (_, None) => return true, - (Some(x), Some(y)) => if x.ne(&y) { return true }, - } - } - } - - /// Determines if the elements of this `Iterator` are lexicographically - /// less than those of another. - #[stable(feature = "iter_order", since = "1.5.0")] - fn lt(mut self, other: I) -> bool where - I: IntoIterator, - Self::Item: PartialOrd, - Self: Sized, - { - let mut other = other.into_iter(); - - loop { - match (self.next(), other.next()) { - (None, None) => return false, - (None, _ ) => return true, - (_ , None) => return false, - (Some(x), Some(y)) => { - match x.partial_cmp(&y) { - Some(Ordering::Less) => return true, - Some(Ordering::Equal) => {} - Some(Ordering::Greater) => return false, - None => return false, - } - }, - } - } - } - - /// Determines if the elements of this `Iterator` are lexicographically - /// less or equal to those of another. - #[stable(feature = "iter_order", since = "1.5.0")] - fn le(mut self, other: I) -> bool where - I: IntoIterator, - Self::Item: PartialOrd, - Self: Sized, - { - let mut other = other.into_iter(); - - loop { - match (self.next(), other.next()) { - (None, None) => return true, - (None, _ ) => return true, - (_ , None) => return false, - (Some(x), Some(y)) => { - match x.partial_cmp(&y) { - Some(Ordering::Less) => return true, - Some(Ordering::Equal) => {} - Some(Ordering::Greater) => return false, - None => return false, - } - }, - } - } - } - - /// Determines if the elements of this `Iterator` are lexicographically - /// greater than those of another. - #[stable(feature = "iter_order", since = "1.5.0")] - fn gt(mut self, other: I) -> bool where - I: IntoIterator, - Self::Item: PartialOrd, - Self: Sized, - { - let mut other = other.into_iter(); - - loop { - match (self.next(), other.next()) { - (None, None) => return false, - (None, _ ) => return false, - (_ , None) => return true, - (Some(x), Some(y)) => { - match x.partial_cmp(&y) { - Some(Ordering::Less) => return false, - Some(Ordering::Equal) => {} - Some(Ordering::Greater) => return true, - None => return false, - } - } - } - } - } - - /// Determines if the elements of this `Iterator` are lexicographically - /// greater than or equal to those of another. - #[stable(feature = "iter_order", since = "1.5.0")] - fn ge(mut self, other: I) -> bool where - I: IntoIterator, - Self::Item: PartialOrd, - Self: Sized, - { - let mut other = other.into_iter(); - - loop { - match (self.next(), other.next()) { - (None, None) => return true, - (None, _ ) => return false, - (_ , None) => return true, - (Some(x), Some(y)) => { - match x.partial_cmp(&y) { - Some(Ordering::Less) => return false, - Some(Ordering::Equal) => {} - Some(Ordering::Greater) => return true, - None => return false, - } - }, - } - } - } -} - -/// Select an element from an iterator based on the given projection -/// and "comparison" function. -/// -/// This is an idiosyncratic helper to try to factor out the -/// commonalities of {max,min}{,_by}. In particular, this avoids -/// having to implement optimizations several times. -#[inline] -fn select_fold1(mut it: I, - mut f_proj: FProj, - mut f_cmp: FCmp) -> Option<(B, I::Item)> - where I: Iterator, - FProj: FnMut(&I::Item) -> B, - FCmp: FnMut(&B, &I::Item, &B, &I::Item) -> bool -{ - // start with the first element as our selection. This avoids - // having to use `Option`s inside the loop, translating to a - // sizeable performance gain (6x in one case). - it.next().map(|mut sel| { - let mut sel_p = f_proj(&sel); - - for x in it { - let x_p = f_proj(&x); - if f_cmp(&sel_p, &sel, &x_p, &x) { - sel = x; - sel_p = x_p; - } - } - (sel_p, sel) - }) -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, I: Iterator + ?Sized> Iterator for &'a mut I { - type Item = I::Item; - fn next(&mut self) -> Option { (**self).next() } - fn size_hint(&self) -> (usize, Option) { (**self).size_hint() } -} - -/// Conversion from an `Iterator`. -/// -/// By implementing `FromIterator` for a type, you define how it will be -/// created from an iterator. This is common for types which describe a -/// collection of some kind. -/// -/// `FromIterator`'s [`from_iter()`] is rarely called explicitly, and is instead -/// used through [`Iterator`]'s [`collect()`] method. See [`collect()`]'s -/// documentation for more examples. -/// -/// [`from_iter()`]: #tymethod.from_iter -/// [`Iterator`]: trait.Iterator.html -/// [`collect()`]: trait.Iterator.html#method.collect -/// -/// See also: [`IntoIterator`]. -/// -/// [`IntoIterator`]: trait.IntoIterator.html -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use std::iter::FromIterator; -/// -/// let five_fives = std::iter::repeat(5).take(5); -/// -/// let v = Vec::from_iter(five_fives); -/// -/// assert_eq!(v, vec![5, 5, 5, 5, 5]); -/// ``` -/// -/// Using [`collect()`] to implicitly use `FromIterator`: -/// -/// ``` -/// let five_fives = std::iter::repeat(5).take(5); -/// -/// let v: Vec = five_fives.collect(); -/// -/// assert_eq!(v, vec![5, 5, 5, 5, 5]); -/// ``` -/// -/// Implementing `FromIterator` for your type: -/// -/// ``` -/// use std::iter::FromIterator; -/// -/// // A sample collection, that's just a wrapper over Vec -/// #[derive(Debug)] -/// struct MyCollection(Vec); -/// -/// // Let's give it some methods so we can create one and add things -/// // to it. -/// impl MyCollection { -/// fn new() -> MyCollection { -/// MyCollection(Vec::new()) -/// } -/// -/// fn add(&mut self, elem: i32) { -/// self.0.push(elem); -/// } -/// } -/// -/// // and we'll implement FromIterator -/// impl FromIterator for MyCollection { -/// fn from_iter>(iterator: I) -> Self { -/// let mut c = MyCollection::new(); -/// -/// for i in iterator { -/// c.add(i); -/// } -/// -/// c -/// } -/// } -/// -/// // Now we can make a new iterator... -/// let iter = (0..5).into_iter(); -/// -/// // ... and make a MyCollection out of it -/// let c = MyCollection::from_iter(iter); -/// -/// assert_eq!(c.0, vec![0, 1, 2, 3, 4]); -/// -/// // collect works too! -/// -/// let iter = (0..5).into_iter(); -/// let c: MyCollection = iter.collect(); -/// -/// assert_eq!(c.0, vec![0, 1, 2, 3, 4]); -/// ``` -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented="a collection of type `{Self}` cannot be \ - built from an iterator over elements of type `{A}`"] -pub trait FromIterator: Sized { - /// Creates a value from an iterator. - /// - /// See the [module-level documentation] for more. - /// - /// [module-level documentation]: trait.FromIterator.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use std::iter::FromIterator; - /// - /// let five_fives = std::iter::repeat(5).take(5); - /// - /// let v = Vec::from_iter(five_fives); - /// - /// assert_eq!(v, vec![5, 5, 5, 5, 5]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - fn from_iter>(iterator: T) -> Self; -} - -/// Conversion into an `Iterator`. -/// -/// By implementing `IntoIterator` for a type, you define how it will be -/// converted to an iterator. This is common for types which describe a -/// collection of some kind. -/// -/// One benefit of implementing `IntoIterator` is that your type will [work -/// with Rust's `for` loop syntax](index.html#for-loops-and-intoiterator). -/// -/// See also: [`FromIterator`]. -/// -/// [`FromIterator`]: trait.FromIterator.html -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// let v = vec![1, 2, 3]; -/// -/// let mut iter = v.into_iter(); -/// -/// let n = iter.next(); -/// assert_eq!(Some(1), n); -/// -/// let n = iter.next(); -/// assert_eq!(Some(2), n); -/// -/// let n = iter.next(); -/// assert_eq!(Some(3), n); -/// -/// let n = iter.next(); -/// assert_eq!(None, n); -/// ``` -/// -/// Implementing `IntoIterator` for your type: -/// -/// ``` -/// // A sample collection, that's just a wrapper over Vec -/// #[derive(Debug)] -/// struct MyCollection(Vec); -/// -/// // Let's give it some methods so we can create one and add things -/// // to it. -/// impl MyCollection { -/// fn new() -> MyCollection { -/// MyCollection(Vec::new()) -/// } -/// -/// fn add(&mut self, elem: i32) { -/// self.0.push(elem); -/// } -/// } -/// -/// // and we'll implement IntoIterator -/// impl IntoIterator for MyCollection { -/// type Item = i32; -/// type IntoIter = ::std::vec::IntoIter; -/// -/// fn into_iter(self) -> Self::IntoIter { -/// self.0.into_iter() -/// } -/// } -/// -/// // Now we can make a new collection... -/// let mut c = MyCollection::new(); -/// -/// // ... add some stuff to it ... -/// c.add(0); -/// c.add(1); -/// c.add(2); -/// -/// // ... and then turn it into an Iterator: -/// for (i, n) in c.into_iter().enumerate() { -/// assert_eq!(i as i32, n); -/// } -/// ``` -#[stable(feature = "rust1", since = "1.0.0")] -pub trait IntoIterator { - /// The type of the elements being iterated over. - #[stable(feature = "rust1", since = "1.0.0")] - type Item; - - /// Which kind of iterator are we turning this into? - #[stable(feature = "rust1", since = "1.0.0")] - type IntoIter: Iterator; - - /// Creates an iterator from a value. - /// - /// See the [module-level documentation] for more. - /// - /// [module-level documentation]: trait.IntoIterator.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let v = vec![1, 2, 3]; - /// - /// let mut iter = v.into_iter(); - /// - /// let n = iter.next(); - /// assert_eq!(Some(1), n); - /// - /// let n = iter.next(); - /// assert_eq!(Some(2), n); - /// - /// let n = iter.next(); - /// assert_eq!(Some(3), n); - /// - /// let n = iter.next(); - /// assert_eq!(None, n); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - fn into_iter(self) -> Self::IntoIter; -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl IntoIterator for I { - type Item = I::Item; - type IntoIter = I; - - fn into_iter(self) -> I { - self - } -} - -/// Extend a collection with the contents of an iterator. -/// -/// Iterators produce a series of values, and collections can also be thought -/// of as a series of values. The `Extend` trait bridges this gap, allowing you -/// to extend a collection by including the contents of that iterator. -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// // You can extend a String with some chars: -/// let mut message = String::from("The first three letters are: "); -/// -/// message.extend(&['a', 'b', 'c']); -/// -/// assert_eq!("abc", &message[29..32]); -/// ``` -/// -/// Implementing `Extend`: -/// -/// ``` -/// // A sample collection, that's just a wrapper over Vec -/// #[derive(Debug)] -/// struct MyCollection(Vec); -/// -/// // Let's give it some methods so we can create one and add things -/// // to it. -/// impl MyCollection { -/// fn new() -> MyCollection { -/// MyCollection(Vec::new()) -/// } -/// -/// fn add(&mut self, elem: i32) { -/// self.0.push(elem); -/// } -/// } -/// -/// // since MyCollection has a list of i32s, we implement Extend for i32 -/// impl Extend for MyCollection { -/// -/// // This is a bit simpler with the concrete type signature: we can call -/// // extend on anything which can be turned into an Iterator which gives -/// // us i32s. Because we need i32s to put into MyCollection. -/// fn extend>(&mut self, iterable: T) { -/// -/// // The implementation is very straightforward: loop through the -/// // iterator, and add() each element to ourselves. -/// for elem in iterable { -/// self.add(elem); -/// } -/// } -/// } -/// -/// let mut c = MyCollection::new(); -/// -/// c.add(5); -/// c.add(6); -/// c.add(7); -/// -/// // let's extend our collection with three more numbers -/// c.extend(vec![1, 2, 3]); -/// -/// // we've added these elements onto the end -/// assert_eq!("MyCollection([5, 6, 7, 1, 2, 3])", format!("{:?}", c)); -/// ``` -#[stable(feature = "rust1", since = "1.0.0")] -pub trait Extend { - /// Extends a collection with the contents of an iterator. - /// - /// As this is the only method for this trait, the [trait-level] docs - /// contain more details. - /// - /// [trait-level]: trait.Extend.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// // You can extend a String with some chars: - /// let mut message = String::from("abc"); - /// - /// message.extend(['d', 'e', 'f'].iter()); - /// - /// assert_eq!("abcdef", &message); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - fn extend>(&mut self, iterable: T); -} - -/// An iterator able to yield elements from both ends. -/// -/// Something that implements `DoubleEndedIterator` has one extra capability -/// over something that implements [`Iterator`]: the ability to also take -/// `Item`s from the back, as well as the front. -/// -/// It is important to note that both back and forth work on the same range, -/// and do not cross: iteration is over when they meet in the middle. -/// -/// [`Iterator`]: trait.Iterator.html -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// let numbers = vec![1, 2, 3]; -/// -/// let mut iter = numbers.iter(); -/// -/// let n = iter.next(); -/// assert_eq!(Some(&1), n); -/// -/// let n = iter.next_back(); -/// assert_eq!(Some(&3), n); -/// -/// let n = iter.next_back(); -/// assert_eq!(Some(&2), n); -/// -/// let n = iter.next(); -/// assert_eq!(None, n); -/// -/// let n = iter.next_back(); -/// assert_eq!(None, n); -/// ``` -#[stable(feature = "rust1", since = "1.0.0")] -pub trait DoubleEndedIterator: Iterator { - /// An iterator able to yield elements from both ends. - /// - /// As this is the only method for this trait, the [trait-level] docs - /// contain more details. - /// - /// [trait-level]: trait.DoubleEndedIterator.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let numbers = vec![1, 2, 3]; - /// - /// let mut iter = numbers.iter(); - /// - /// let n = iter.next(); - /// assert_eq!(Some(&1), n); - /// - /// let n = iter.next_back(); - /// assert_eq!(Some(&3), n); - /// - /// let n = iter.next_back(); - /// assert_eq!(Some(&2), n); - /// - /// let n = iter.next(); - /// assert_eq!(None, n); - /// - /// let n = iter.next_back(); - /// assert_eq!(None, n); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - fn next_back(&mut self) -> Option; -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for &'a mut I { - fn next_back(&mut self) -> Option { (**self).next_back() } -} - -/// An iterator that knows its exact length. -/// -/// Many [`Iterator`]s don't know how many times they will iterate, but some do. -/// If an iterator knows how many times it can iterate, providing access to -/// that information can be useful. For example, if you want to iterate -/// backwards, a good start is to know where the end is. -/// -/// When implementing an `ExactSizeIterator`, You must also implement -/// [`Iterator`]. When doing so, the implementation of [`size_hint()`] *must* -/// return the exact size of the iterator. -/// -/// [`Iterator`]: trait.Iterator.html -/// [`size_hint()`]: trait.Iterator.html#method.size_hint -/// -/// The [`len()`] method has a default implementation, so you usually shouldn't -/// implement it. However, you may be able to provide a more performant -/// implementation than the default, so overriding it in this case makes sense. -/// -/// [`len()`]: #method.len -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// // a finite range knows exactly how many times it will iterate -/// let five = 0..5; -/// -/// assert_eq!(5, five.len()); -/// ``` -/// -/// In the [module level docs][moddocs], we implemented an [`Iterator`], -/// `Counter`. Let's implement `ExactSizeIterator` for it as well: -/// -/// [moddocs]: index.html -/// -/// ``` -/// # struct Counter { -/// # count: usize, -/// # } -/// # impl Counter { -/// # fn new() -> Counter { -/// # Counter { count: 0 } -/// # } -/// # } -/// # impl Iterator for Counter { -/// # type Item = usize; -/// # fn next(&mut self) -> Option { -/// # self.count += 1; -/// # if self.count < 6 { -/// # Some(self.count) -/// # } else { -/// # None -/// # } -/// # } -/// # } -/// impl ExactSizeIterator for Counter { -/// // We already have the number of iterations, so we can use it directly. -/// fn len(&self) -> usize { -/// self.count -/// } -/// } -/// -/// // And now we can use it! -/// -/// let counter = Counter::new(); -/// -/// assert_eq!(0, counter.len()); -/// ``` -#[stable(feature = "rust1", since = "1.0.0")] -pub trait ExactSizeIterator: Iterator { - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - /// Returns the exact number of times the iterator will iterate. - /// - /// This method has a default implementation, so you usually should not - /// implement it directly. However, if you can provide a more efficient - /// implementation, you can do so. See the [trait-level] docs for an - /// example. - /// - /// This function has the same safety guarantees as the [`size_hint()`] - /// function. - /// - /// [trait-level]: trait.ExactSizeIterator.html - /// [`size_hint()`]: trait.Iterator.html#method.size_hint - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// // a finite range knows exactly how many times it will iterate - /// let five = 0..5; - /// - /// assert_eq!(5, five.len()); - /// ``` - fn len(&self) -> usize { - let (lower, upper) = self.size_hint(); - // Note: This assertion is overly defensive, but it checks the invariant - // guaranteed by the trait. If this trait were rust-internal, - // we could use debug_assert!; assert_eq! will check all Rust user - // implementations too. - assert_eq!(upper, Some(lower)); - lower - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, I: ExactSizeIterator + ?Sized> ExactSizeIterator for &'a mut I {} - -// All adaptors that preserve the size of the wrapped iterator are fine -// Adaptors that may overflow in `size_hint` are not, i.e. `Chain`. -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Enumerate where I: ExactSizeIterator {} -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Inspect where - F: FnMut(&I::Item), -{} -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Rev - where I: ExactSizeIterator + DoubleEndedIterator {} -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Map where - F: FnMut(I::Item) -> B, -{} -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Zip - where A: ExactSizeIterator, B: ExactSizeIterator {} - -/// An double-ended iterator with the direction inverted. -/// -/// This `struct` is created by the [`rev()`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`rev()`]: trait.Iterator.html#method.rev -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Rev { - iter: T -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Rev where I: DoubleEndedIterator { - type Item = ::Item; - - #[inline] - fn next(&mut self) -> Option<::Item> { self.iter.next_back() } - #[inline] - fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Rev where I: DoubleEndedIterator { - #[inline] - fn next_back(&mut self) -> Option<::Item> { self.iter.next() } -} - -/// An iterator that clones the elements of an underlying iterator. -/// -/// This `struct` is created by the [`cloned()`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`cloned()`]: trait.Iterator.html#method.cloned -/// [`Iterator`]: trait.Iterator.html -#[stable(feature = "iter_cloned", since = "1.1.0")] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[derive(Clone)] -pub struct Cloned { - it: I, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, I, T: 'a> Iterator for Cloned - where I: Iterator, T: Clone -{ - type Item = T; - - fn next(&mut self) -> Option { - self.it.next().cloned() - } - - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, I, T: 'a> DoubleEndedIterator for Cloned - where I: DoubleEndedIterator, T: Clone -{ - fn next_back(&mut self) -> Option { - self.it.next_back().cloned() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, I, T: 'a> ExactSizeIterator for Cloned - where I: ExactSizeIterator, T: Clone -{} - -/// An iterator that repeats endlessly. -/// -/// This `struct` is created by the [`cycle()`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`cycle()`]: trait.Iterator.html#method.cycle -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Cycle { - orig: I, - iter: I, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Cycle where I: Clone + Iterator { - type Item = ::Item; - - #[inline] - fn next(&mut self) -> Option<::Item> { - match self.iter.next() { - None => { self.iter = self.orig.clone(); self.iter.next() } - y => y - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - // the cycle iterator is either empty or infinite - match self.orig.size_hint() { - sz @ (0, Some(0)) => sz, - (0, _) => (0, None), - _ => (usize::MAX, None) - } - } -} - -/// An iterator that strings two iterators together. -/// -/// This `struct` is created by the [`chain()`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`chain()`]: trait.Iterator.html#method.chain -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Chain { - a: A, - b: B, - state: ChainState, -} - -// The iterator protocol specifies that iteration ends with the return value -// `None` from `.next()` (or `.next_back()`) and it is unspecified what -// further calls return. The chain adaptor must account for this since it uses -// two subiterators. -// -// It uses three states: -// -// - Both: `a` and `b` are remaining -// - Front: `a` remaining -// - Back: `b` remaining -// -// The fourth state (neither iterator is remaining) only occurs after Chain has -// returned None once, so we don't need to store this state. -#[derive(Clone)] -enum ChainState { - // both front and back iterator are remaining - Both, - // only front is remaining - Front, - // only back is remaining - Back, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Chain where - A: Iterator, - B: Iterator -{ - type Item = A::Item; - - #[inline] - fn next(&mut self) -> Option { - match self.state { - ChainState::Both => match self.a.next() { - elt @ Some(..) => elt, - None => { - self.state = ChainState::Back; - self.b.next() - } - }, - ChainState::Front => self.a.next(), - ChainState::Back => self.b.next(), - } - } - - #[inline] - fn count(self) -> usize { - match self.state { - ChainState::Both => self.a.count() + self.b.count(), - ChainState::Front => self.a.count(), - ChainState::Back => self.b.count(), - } - } - - #[inline] - fn nth(&mut self, mut n: usize) -> Option { - match self.state { - ChainState::Both | ChainState::Front => { - for x in self.a.by_ref() { - if n == 0 { - return Some(x) - } - n -= 1; - } - if let ChainState::Both = self.state { - self.state = ChainState::Back; - } - } - ChainState::Back => {} - } - if let ChainState::Back = self.state { - self.b.nth(n) - } else { - None - } - } - - #[inline] - fn last(self) -> Option { - match self.state { - ChainState::Both => { - // Must exhaust a before b. - let a_last = self.a.last(); - let b_last = self.b.last(); - b_last.or(a_last) - }, - ChainState::Front => self.a.last(), - ChainState::Back => self.b.last() - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (a_lower, a_upper) = self.a.size_hint(); - let (b_lower, b_upper) = self.b.size_hint(); - - let lower = a_lower.saturating_add(b_lower); - - let upper = match (a_upper, b_upper) { - (Some(x), Some(y)) => x.checked_add(y), - _ => None - }; - - (lower, upper) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Chain where - A: DoubleEndedIterator, - B: DoubleEndedIterator, -{ - #[inline] - fn next_back(&mut self) -> Option { - match self.state { - ChainState::Both => match self.b.next_back() { - elt @ Some(..) => elt, - None => { - self.state = ChainState::Front; - self.a.next_back() - } - }, - ChainState::Front => self.a.next_back(), - ChainState::Back => self.b.next_back(), - } - } -} - -/// An iterator that iterates two other iterators simultaneously. -/// -/// This `struct` is created by the [`zip()`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`zip()`]: trait.Iterator.html#method.zip -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Zip { - a: A, - b: B -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Zip where A: Iterator, B: Iterator -{ - type Item = (A::Item, B::Item); - - #[inline] - fn next(&mut self) -> Option<(A::Item, B::Item)> { - self.a.next().and_then(|x| { - self.b.next().and_then(|y| { - Some((x, y)) - }) - }) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (a_lower, a_upper) = self.a.size_hint(); - let (b_lower, b_upper) = self.b.size_hint(); - - let lower = cmp::min(a_lower, b_lower); - - let upper = match (a_upper, b_upper) { - (Some(x), Some(y)) => Some(cmp::min(x,y)), - (Some(x), None) => Some(x), - (None, Some(y)) => Some(y), - (None, None) => None - }; - - (lower, upper) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Zip where - A: DoubleEndedIterator + ExactSizeIterator, - B: DoubleEndedIterator + ExactSizeIterator, -{ - #[inline] - fn next_back(&mut self) -> Option<(A::Item, B::Item)> { - let a_sz = self.a.len(); - let b_sz = self.b.len(); - if a_sz != b_sz { - // Adjust a, b to equal length - if a_sz > b_sz { - for _ in 0..a_sz - b_sz { self.a.next_back(); } - } else { - for _ in 0..b_sz - a_sz { self.b.next_back(); } - } - } - match (self.a.next_back(), self.b.next_back()) { - (Some(x), Some(y)) => Some((x, y)), - (None, None) => None, - _ => unreachable!(), - } - } -} - -/// An iterator that maps the values of `iter` with `f`. -/// -/// This `struct` is created by the [`map()`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`map()`]: trait.Iterator.html#method.map -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct Map { - iter: I, - f: F, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Map where F: FnMut(I::Item) -> B { - type Item = B; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(&mut self.f) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Map where - F: FnMut(I::Item) -> B, -{ - #[inline] - fn next_back(&mut self) -> Option { - self.iter.next_back().map(&mut self.f) - } -} - -/// An iterator that filters the elements of `iter` with `predicate`. -/// -/// This `struct` is created by the [`filter()`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`filter()`]: trait.Iterator.html#method.filter -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct Filter { - iter: I, - predicate: P, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Filter where P: FnMut(&I::Item) -> bool { - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - for x in self.iter.by_ref() { - if (self.predicate)(&x) { - return Some(x); - } - } - None - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (_, upper) = self.iter.size_hint(); - (0, upper) // can't know a lower bound, due to the predicate - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Filter - where P: FnMut(&I::Item) -> bool, -{ - #[inline] - fn next_back(&mut self) -> Option { - for x in self.iter.by_ref().rev() { - if (self.predicate)(&x) { - return Some(x); - } - } - None - } -} - -/// An iterator that uses `f` to both filter and map elements from `iter`. -/// -/// This `struct` is created by the [`filter_map()`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`filter_map()`]: trait.Iterator.html#method.filter_map -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct FilterMap { - iter: I, - f: F, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for FilterMap - where F: FnMut(I::Item) -> Option, -{ - type Item = B; - - #[inline] - fn next(&mut self) -> Option { - for x in self.iter.by_ref() { - if let Some(y) = (self.f)(x) { - return Some(y); - } - } - None - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (_, upper) = self.iter.size_hint(); - (0, upper) // can't know a lower bound, due to the predicate - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for FilterMap - where F: FnMut(I::Item) -> Option, -{ - #[inline] - fn next_back(&mut self) -> Option { - for x in self.iter.by_ref().rev() { - if let Some(y) = (self.f)(x) { - return Some(y); - } - } - None - } -} - -/// An iterator that yields the current count and the element during iteration. -/// -/// This `struct` is created by the [`enumerate()`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`enumerate()`]: trait.Iterator.html#method.enumerate -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Enumerate { - iter: I, - count: usize, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Enumerate where I: Iterator { - type Item = (usize, ::Item); - - /// # Overflow Behavior - /// - /// The method does no guarding against overflows, so enumerating more than - /// `usize::MAX` elements either produces the wrong result or panics. If - /// debug assertions are enabled, a panic is guaranteed. - /// - /// # Panics - /// - /// Might panic if the index of the element overflows a `usize`. - #[inline] - fn next(&mut self) -> Option<(usize, ::Item)> { - self.iter.next().map(|a| { - let ret = (self.count, a); - // Possible undefined overflow. - self.count += 1; - ret - }) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - #[inline] - fn nth(&mut self, n: usize) -> Option<(usize, I::Item)> { - self.iter.nth(n).map(|a| { - let i = self.count + n; - self.count = i + 1; - (i, a) - }) - } - - #[inline] - fn count(self) -> usize { - self.iter.count() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Enumerate where - I: ExactSizeIterator + DoubleEndedIterator -{ - #[inline] - fn next_back(&mut self) -> Option<(usize, ::Item)> { - self.iter.next_back().map(|a| { - let len = self.iter.len(); - // Can safely add, `ExactSizeIterator` promises that the number of - // elements fits into a `usize`. - (self.count + len, a) - }) - } -} - -/// An iterator with a `peek()` that returns an optional reference to the next -/// element. -/// -/// This `struct` is created by the [`peekable()`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`peekable()`]: trait.Iterator.html#method.peekable -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Peekable { - iter: I, - peeked: Option, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Peekable { - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - match self.peeked { - Some(_) => self.peeked.take(), - None => self.iter.next(), - } - } - - #[inline] - fn count(self) -> usize { - (if self.peeked.is_some() { 1 } else { 0 }) + self.iter.count() - } - - #[inline] - fn nth(&mut self, n: usize) -> Option { - match self.peeked { - Some(_) if n == 0 => self.peeked.take(), - Some(_) => { - self.peeked = None; - self.iter.nth(n-1) - }, - None => self.iter.nth(n) - } - } - - #[inline] - fn last(self) -> Option { - self.iter.last().or(self.peeked) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (lo, hi) = self.iter.size_hint(); - if self.peeked.is_some() { - let lo = lo.saturating_add(1); - let hi = hi.and_then(|x| x.checked_add(1)); - (lo, hi) - } else { - (lo, hi) - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Peekable {} - -impl Peekable { - /// Returns a reference to the next() value without advancing the iterator. - /// - /// The `peek()` method will return the value that a call to [`next()`] would - /// return, but does not advance the iterator. Like [`next()`], if there is - /// a value, it's wrapped in a `Some(T)`, but if the iterator is over, it - /// will return `None`. - /// - /// [`next()`]: trait.Iterator.html#tymethod.next - /// - /// Because `peek()` returns reference, and many iterators iterate over - /// references, this leads to a possibly confusing situation where the - /// return value is a double reference. You can see this effect in the - /// examples below, with `&&i32`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let xs = [1, 2, 3]; - /// - /// let mut iter = xs.iter().peekable(); - /// - /// // peek() lets us see into the future - /// assert_eq!(iter.peek(), Some(&&1)); - /// assert_eq!(iter.next(), Some(&1)); - /// - /// assert_eq!(iter.next(), Some(&2)); - /// - /// // we can peek() multiple times, the iterator won't advance - /// assert_eq!(iter.peek(), Some(&&3)); - /// assert_eq!(iter.peek(), Some(&&3)); - /// - /// assert_eq!(iter.next(), Some(&3)); - /// - /// // after the iterator is finished, so is peek() - /// assert_eq!(iter.peek(), None); - /// assert_eq!(iter.next(), None); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn peek(&mut self) -> Option<&I::Item> { - if self.peeked.is_none() { - self.peeked = self.iter.next(); - } - match self.peeked { - Some(ref value) => Some(value), - None => None, - } - } - - /// Checks if the iterator has finished iterating. - /// - /// Returns `true` if there are no more elements in the iterator, and - /// `false` if there are. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// #![feature(peekable_is_empty)] - /// - /// let xs = [1, 2, 3]; - /// - /// let mut iter = xs.iter().peekable(); - /// - /// // there are still elements to iterate over - /// assert_eq!(iter.is_empty(), false); - /// - /// // let's consume the iterator - /// iter.next(); - /// iter.next(); - /// iter.next(); - /// - /// assert_eq!(iter.is_empty(), true); - /// ``` - #[unstable(feature = "peekable_is_empty", issue = "27701")] - #[inline] - pub fn is_empty(&mut self) -> bool { - self.peek().is_none() - } -} - -/// An iterator that rejects elements while `predicate` is true. -/// -/// This `struct` is created by the [`skip_while()`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`skip_while()`]: trait.Iterator.html#method.skip_while -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct SkipWhile { - iter: I, - flag: bool, - predicate: P, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for SkipWhile - where P: FnMut(&I::Item) -> bool -{ - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - for x in self.iter.by_ref() { - if self.flag || !(self.predicate)(&x) { - self.flag = true; - return Some(x); - } - } - None - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (_, upper) = self.iter.size_hint(); - (0, upper) // can't know a lower bound, due to the predicate - } -} - -/// An iterator that only accepts elements while `predicate` is true. -/// -/// This `struct` is created by the [`take_while()`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`take_while()`]: trait.Iterator.html#method.take_while -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct TakeWhile { - iter: I, - flag: bool, - predicate: P, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for TakeWhile - where P: FnMut(&I::Item) -> bool -{ - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - if self.flag { - None - } else { - self.iter.next().and_then(|x| { - if (self.predicate)(&x) { - Some(x) - } else { - self.flag = true; - None - } - }) - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (_, upper) = self.iter.size_hint(); - (0, upper) // can't know a lower bound, due to the predicate - } -} - -/// An iterator that skips over `n` elements of `iter`. -/// -/// This `struct` is created by the [`skip()`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`skip()`]: trait.Iterator.html#method.skip -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Skip { - iter: I, - n: usize -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Skip where I: Iterator { - type Item = ::Item; - - #[inline] - fn next(&mut self) -> Option { - if self.n == 0 { - self.iter.next() - } else { - let old_n = self.n; - self.n = 0; - self.iter.nth(old_n) - } - } - - #[inline] - fn nth(&mut self, n: usize) -> Option { - // Can't just add n + self.n due to overflow. - if self.n == 0 { - self.iter.nth(n) - } else { - let to_skip = self.n; - self.n = 0; - // nth(n) skips n+1 - if self.iter.nth(to_skip-1).is_none() { - return None; - } - self.iter.nth(n) - } - } - - #[inline] - fn count(self) -> usize { - self.iter.count().saturating_sub(self.n) - } - - #[inline] - fn last(mut self) -> Option { - if self.n == 0 { - self.iter.last() - } else { - let next = self.next(); - if next.is_some() { - // recurse. n should be 0. - self.last().or(next) - } else { - None - } - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (lower, upper) = self.iter.size_hint(); - - let lower = lower.saturating_sub(self.n); - let upper = upper.map(|x| x.saturating_sub(self.n)); - - (lower, upper) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Skip where I: ExactSizeIterator {} - -/// An iterator that only iterates over the first `n` iterations of `iter`. -/// -/// This `struct` is created by the [`take()`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`take()`]: trait.Iterator.html#method.take -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Take { - iter: I, - n: usize -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Take where I: Iterator{ - type Item = ::Item; - - #[inline] - fn next(&mut self) -> Option<::Item> { - if self.n != 0 { - self.n -= 1; - self.iter.next() - } else { - None - } - } - - #[inline] - fn nth(&mut self, n: usize) -> Option { - if self.n > n { - self.n -= n + 1; - self.iter.nth(n) - } else { - if self.n > 0 { - self.iter.nth(self.n - 1); - self.n = 0; - } - None - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (lower, upper) = self.iter.size_hint(); - - let lower = cmp::min(lower, self.n); - - let upper = match upper { - Some(x) if x < self.n => Some(x), - _ => Some(self.n) - }; - - (lower, upper) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Take where I: ExactSizeIterator {} - - -/// An iterator to maintain state while iterating another iterator. -/// -/// This `struct` is created by the [`scan()`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`scan()`]: trait.Iterator.html#method.scan -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct Scan { - iter: I, - f: F, - state: St, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Scan where - I: Iterator, - F: FnMut(&mut St, I::Item) -> Option, -{ - type Item = B; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().and_then(|a| (self.f)(&mut self.state, a)) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (_, upper) = self.iter.size_hint(); - (0, upper) // can't know a lower bound, due to the scan function - } -} - -/// An iterator that maps each element to an iterator, and yields the elements -/// of the produced iterators. -/// -/// This `struct` is created by the [`flat_map()`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`flat_map()`]: trait.Iterator.html#method.flat_map -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct FlatMap { - iter: I, - f: F, - frontiter: Option, - backiter: Option, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for FlatMap - where F: FnMut(I::Item) -> U, -{ - type Item = U::Item; - - #[inline] - fn next(&mut self) -> Option { - loop { - if let Some(ref mut inner) = self.frontiter { - if let Some(x) = inner.by_ref().next() { - return Some(x) - } - } - match self.iter.next().map(&mut self.f) { - None => return self.backiter.as_mut().and_then(|it| it.next()), - next => self.frontiter = next.map(IntoIterator::into_iter), - } - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (flo, fhi) = self.frontiter.as_ref().map_or((0, Some(0)), |it| it.size_hint()); - let (blo, bhi) = self.backiter.as_ref().map_or((0, Some(0)), |it| it.size_hint()); - let lo = flo.saturating_add(blo); - match (self.iter.size_hint(), fhi, bhi) { - ((0, Some(0)), Some(a), Some(b)) => (lo, a.checked_add(b)), - _ => (lo, None) - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for FlatMap where - F: FnMut(I::Item) -> U, - U: IntoIterator, - U::IntoIter: DoubleEndedIterator -{ - #[inline] - fn next_back(&mut self) -> Option { - loop { - if let Some(ref mut inner) = self.backiter { - if let Some(y) = inner.next_back() { - return Some(y) - } - } - match self.iter.next_back().map(&mut self.f) { - None => return self.frontiter.as_mut().and_then(|it| it.next_back()), - next => self.backiter = next.map(IntoIterator::into_iter), - } - } - } -} - -/// An iterator that yields `None` forever after the underlying iterator -/// yields `None` once. -/// -/// This `struct` is created by the [`fuse()`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`fuse()`]: trait.Iterator.html#method.fuse -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Fuse { - iter: I, - done: bool -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Fuse where I: Iterator { - type Item = ::Item; - - #[inline] - fn next(&mut self) -> Option<::Item> { - if self.done { - None - } else { - let next = self.iter.next(); - self.done = next.is_none(); - next - } - } - - #[inline] - fn nth(&mut self, n: usize) -> Option { - if self.done { - None - } else { - let nth = self.iter.nth(n); - self.done = nth.is_none(); - nth - } - } - - #[inline] - fn last(self) -> Option { - if self.done { - None - } else { - self.iter.last() - } - } - - #[inline] - fn count(self) -> usize { - if self.done { - 0 - } else { - self.iter.count() - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - if self.done { - (0, Some(0)) - } else { - self.iter.size_hint() - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Fuse where I: DoubleEndedIterator { - #[inline] - fn next_back(&mut self) -> Option<::Item> { - if self.done { - None - } else { - let next = self.iter.next_back(); - self.done = next.is_none(); - next - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Fuse where I: ExactSizeIterator {} - -/// An iterator that calls a function with a reference to each element before -/// yielding it. -/// -/// This `struct` is created by the [`inspect()`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`inspect()`]: trait.Iterator.html#method.inspect -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct Inspect { - iter: I, - f: F, -} - -impl Inspect where F: FnMut(&I::Item) { - #[inline] - fn do_inspect(&mut self, elt: Option) -> Option { - if let Some(ref a) = elt { - (self.f)(a); - } - - elt - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Inspect where F: FnMut(&I::Item) { - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - let next = self.iter.next(); - self.do_inspect(next) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Inspect - where F: FnMut(&I::Item), -{ - #[inline] - fn next_back(&mut self) -> Option { - let next = self.iter.next_back(); - self.do_inspect(next) - } -} - -/// Objects that can be stepped over in both directions. -/// -/// The `steps_between` function provides a way to efficiently compare -/// two `Step` objects. -#[unstable(feature = "step_trait", - reason = "likely to be replaced by finer-grained traits", - issue = "27741")] -pub trait Step: PartialOrd + Sized { - /// Steps `self` if possible. - fn step(&self, by: &Self) -> Option; - - /// Returns the number of steps between two step objects. The count is - /// inclusive of `start` and exclusive of `end`. - /// - /// Returns `None` if it is not possible to calculate `steps_between` - /// without overflow. - fn steps_between(start: &Self, end: &Self, by: &Self) -> Option; -} - -macro_rules! step_impl_unsigned { - ($($t:ty)*) => ($( - #[unstable(feature = "step_trait", - reason = "likely to be replaced by finer-grained traits", - issue = "27741")] - impl Step for $t { - #[inline] - fn step(&self, by: &$t) -> Option<$t> { - (*self).checked_add(*by) - } - #[inline] - #[allow(trivial_numeric_casts)] - fn steps_between(start: &$t, end: &$t, by: &$t) -> Option { - if *by == 0 { return None; } - if *start < *end { - // Note: We assume $t <= usize here - let diff = (*end - *start) as usize; - let by = *by as usize; - if diff % by > 0 { - Some(diff / by + 1) - } else { - Some(diff / by) - } - } else { - Some(0) - } - } - } - )*) -} -macro_rules! step_impl_signed { - ($($t:ty)*) => ($( - #[unstable(feature = "step_trait", - reason = "likely to be replaced by finer-grained traits", - issue = "27741")] - impl Step for $t { - #[inline] - fn step(&self, by: &$t) -> Option<$t> { - (*self).checked_add(*by) - } - #[inline] - #[allow(trivial_numeric_casts)] - fn steps_between(start: &$t, end: &$t, by: &$t) -> Option { - if *by == 0 { return None; } - let diff: usize; - let by_u: usize; - if *by > 0 { - if *start >= *end { - return Some(0); - } - // Note: We assume $t <= isize here - // Use .wrapping_sub and cast to usize to compute the - // difference that may not fit inside the range of isize. - diff = (*end as isize).wrapping_sub(*start as isize) as usize; - by_u = *by as usize; - } else { - if *start <= *end { - return Some(0); - } - diff = (*start as isize).wrapping_sub(*end as isize) as usize; - by_u = (*by as isize).wrapping_mul(-1) as usize; - } - if diff % by_u > 0 { - Some(diff / by_u + 1) - } else { - Some(diff / by_u) - } - } - } - )*) -} - -macro_rules! step_impl_no_between { - ($($t:ty)*) => ($( - #[unstable(feature = "step_trait", - reason = "likely to be replaced by finer-grained traits", - issue = "27741")] - impl Step for $t { - #[inline] - fn step(&self, by: &$t) -> Option<$t> { - (*self).checked_add(*by) - } - #[inline] - fn steps_between(_a: &$t, _b: &$t, _by: &$t) -> Option { - None - } - } - )*) -} - -step_impl_unsigned!(usize u8 u16 u32); -step_impl_signed!(isize i8 i16 i32); -#[cfg(target_pointer_width = "64")] -step_impl_unsigned!(u64); -#[cfg(target_pointer_width = "64")] -step_impl_signed!(i64); -// If the target pointer width is not 64-bits, we -// assume here that it is less than 64-bits. -#[cfg(not(target_pointer_width = "64"))] -step_impl_no_between!(u64 i64); - -/// An adapter for stepping range iterators by a custom amount. -/// -/// The resulting iterator handles overflow by stopping. The `A` -/// parameter is the type being iterated over, while `R` is the range -/// type (usually one of `std::ops::{Range, RangeFrom}`. -#[derive(Clone)] -#[unstable(feature = "step_by", reason = "recent addition", - issue = "27741")] -pub struct StepBy { - step_by: A, - range: R, -} - -impl RangeFrom { - /// Creates an iterator starting at the same point, but stepping by - /// the given amount at each iteration. - /// - /// # Examples - /// - /// ```ignore - /// for i in (0u8..).step_by(2) { - /// println!("{}", i); - /// } - /// ``` - /// - /// This prints all even `u8` values. - #[unstable(feature = "step_by", reason = "recent addition", - issue = "27741")] - pub fn step_by(self, by: A) -> StepBy { - StepBy { - step_by: by, - range: self - } - } -} - -impl ops::Range { - /// Creates an iterator with the same range, but stepping by the - /// given amount at each iteration. - /// - /// The resulting iterator handles overflow by stopping. - /// - /// # Examples - /// - /// ``` - /// #![feature(step_by)] - /// - /// for i in (0..10).step_by(2) { - /// println!("{}", i); - /// } - /// ``` - /// - /// This prints: - /// - /// ```text - /// 0 - /// 2 - /// 4 - /// 6 - /// 8 - /// ``` - #[unstable(feature = "step_by", reason = "recent addition", - issue = "27741")] - pub fn step_by(self, by: A) -> StepBy { - StepBy { - step_by: by, - range: self - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for StepBy> where - A: Clone, - for<'a> &'a A: Add<&'a A, Output = A> -{ - type Item = A; - - #[inline] - fn next(&mut self) -> Option { - let mut n = &self.range.start + &self.step_by; - mem::swap(&mut n, &mut self.range.start); - Some(n) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - (usize::MAX, None) // Too bad we can't specify an infinite lower bound - } -} - -/// An iterator over the range [start, stop] -#[derive(Clone)] -#[unstable(feature = "range_inclusive", - reason = "likely to be replaced by range notation and adapters", - issue = "27777")] -#[rustc_deprecated(since = "1.5.0", reason = "replaced with ... syntax")] -#[allow(deprecated)] -pub struct RangeInclusive { - range: ops::Range, - done: bool, -} - -/// Returns an iterator over the range [start, stop]. -#[inline] -#[unstable(feature = "range_inclusive", - reason = "likely to be replaced by range notation and adapters", - issue = "27777")] -#[rustc_deprecated(since = "1.5.0", reason = "replaced with ... syntax")] -#[allow(deprecated)] -pub fn range_inclusive(start: A, stop: A) -> RangeInclusive - where A: Step + One + Clone -{ - RangeInclusive { - range: start..stop, - done: false, - } -} - -#[unstable(feature = "range_inclusive", - reason = "likely to be replaced by range notation and adapters", - issue = "27777")] -#[rustc_deprecated(since = "1.5.0", reason = "replaced with ... syntax")] -#[allow(deprecated)] -impl Iterator for RangeInclusive where - A: PartialEq + Step + One + Clone, - for<'a> &'a A: Add<&'a A, Output = A> -{ - type Item = A; - - #[inline] - fn next(&mut self) -> Option { - self.range.next().or_else(|| { - if !self.done && self.range.start == self.range.end { - self.done = true; - Some(self.range.end.clone()) - } else { - None - } - }) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (lo, hi) = self.range.size_hint(); - if self.done { - (lo, hi) - } else { - let lo = lo.saturating_add(1); - let hi = hi.and_then(|x| x.checked_add(1)); - (lo, hi) - } - } -} - -#[unstable(feature = "range_inclusive", - reason = "likely to be replaced by range notation and adapters", - issue = "27777")] -#[rustc_deprecated(since = "1.5.0", reason = "replaced with ... syntax")] -#[allow(deprecated)] -impl DoubleEndedIterator for RangeInclusive where - A: PartialEq + Step + One + Clone, - for<'a> &'a A: Add<&'a A, Output = A>, - for<'a> &'a A: Sub -{ - #[inline] - fn next_back(&mut self) -> Option { - if self.range.end > self.range.start { - let result = self.range.end.clone(); - self.range.end = &self.range.end - &A::one(); - Some(result) - } else if !self.done && self.range.start == self.range.end { - self.done = true; - Some(self.range.end.clone()) - } else { - None - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for StepBy> { - type Item = A; - - #[inline] - fn next(&mut self) -> Option { - let rev = self.step_by < A::zero(); - if (rev && self.range.start > self.range.end) || - (!rev && self.range.start < self.range.end) - { - match self.range.start.step(&self.step_by) { - Some(mut n) => { - mem::swap(&mut self.range.start, &mut n); - Some(n) - }, - None => { - let mut n = self.range.end.clone(); - mem::swap(&mut self.range.start, &mut n); - Some(n) - } - } - } else { - None - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - match Step::steps_between(&self.range.start, - &self.range.end, - &self.step_by) { - Some(hint) => (hint, Some(hint)), - None => (0, None) - } - } -} - -macro_rules! range_exact_iter_impl { - ($($t:ty)*) => ($( - #[stable(feature = "rust1", since = "1.0.0")] - impl ExactSizeIterator for ops::Range<$t> { } - )*) -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for ops::Range where - for<'a> &'a A: Add<&'a A, Output = A> -{ - type Item = A; - - #[inline] - fn next(&mut self) -> Option { - if self.start < self.end { - let mut n = &self.start + &A::one(); - mem::swap(&mut n, &mut self.start); - Some(n) - } else { - None - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - match Step::steps_between(&self.start, &self.end, &A::one()) { - Some(hint) => (hint, Some(hint)), - None => (0, None) - } - } -} - -// Ranges of u64 and i64 are excluded because they cannot guarantee having -// a length <= usize::MAX, which is required by ExactSizeIterator. -range_exact_iter_impl!(usize u8 u16 u32 isize i8 i16 i32); - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for ops::Range where - for<'a> &'a A: Add<&'a A, Output = A>, - for<'a> &'a A: Sub<&'a A, Output = A> -{ - #[inline] - fn next_back(&mut self) -> Option { - if self.start < self.end { - self.end = &self.end - &A::one(); - Some(self.end.clone()) - } else { - None - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for ops::RangeFrom where - for<'a> &'a A: Add<&'a A, Output = A> -{ - type Item = A; - - #[inline] - fn next(&mut self) -> Option { - let mut n = &self.start + &A::one(); - mem::swap(&mut n, &mut self.start); - Some(n) - } -} - -/// An iterator that repeats an element endlessly. -/// -/// This `struct` is created by the [`repeat()`] function. See its documentation for more. -/// -/// [`repeat()`]: fn.repeat.html -#[derive(Clone)] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Repeat { - element: A -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Repeat { - type Item = A; - - #[inline] - fn next(&mut self) -> Option { Some(self.element.clone()) } - #[inline] - fn size_hint(&self) -> (usize, Option) { (usize::MAX, None) } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Repeat { - #[inline] - fn next_back(&mut self) -> Option { Some(self.element.clone()) } -} - -/// Creates a new iterator that endlessly repeats a single element. -/// -/// The `repeat()` function repeats a single value over and over and over and -/// over and over and 🔁. -/// -/// Infinite iterators like `repeat()` are often used with adapters like -/// [`take()`], in order to make them finite. -/// -/// [`take()`]: trait.Iterator.html#method.take -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use std::iter; -/// -/// // the number four 4ever: -/// let mut fours = iter::repeat(4); -/// -/// assert_eq!(Some(4), fours.next()); -/// assert_eq!(Some(4), fours.next()); -/// assert_eq!(Some(4), fours.next()); -/// assert_eq!(Some(4), fours.next()); -/// assert_eq!(Some(4), fours.next()); -/// -/// // yup, still four -/// assert_eq!(Some(4), fours.next()); -/// ``` -/// -/// Going finite with [`take()`]: -/// -/// ``` -/// use std::iter; -/// -/// // that last example was too many fours. Let's only have four fours. -/// let mut four_fours = iter::repeat(4).take(4); -/// -/// assert_eq!(Some(4), four_fours.next()); -/// assert_eq!(Some(4), four_fours.next()); -/// assert_eq!(Some(4), four_fours.next()); -/// assert_eq!(Some(4), four_fours.next()); -/// -/// // ... and now we're done -/// assert_eq!(None, four_fours.next()); -/// ``` -#[inline] -#[stable(feature = "rust1", since = "1.0.0")] -pub fn repeat(elt: T) -> Repeat { - Repeat{element: elt} -} - -/// An iterator that yields nothing. -/// -/// This `struct` is created by the [`empty()`] function. See its documentation for more. -/// -/// [`empty()`]: fn.empty.html -#[stable(feature = "iter_empty", since = "1.2.0")] -pub struct Empty(marker::PhantomData); - -#[stable(feature = "iter_empty", since = "1.2.0")] -impl Iterator for Empty { - type Item = T; - - fn next(&mut self) -> Option { - None - } - - fn size_hint(&self) -> (usize, Option){ - (0, Some(0)) - } -} - -#[stable(feature = "iter_empty", since = "1.2.0")] -impl DoubleEndedIterator for Empty { - fn next_back(&mut self) -> Option { - None - } -} - -#[stable(feature = "iter_empty", since = "1.2.0")] -impl ExactSizeIterator for Empty { - fn len(&self) -> usize { - 0 - } -} - -// not #[derive] because that adds a Clone bound on T, -// which isn't necessary. -#[stable(feature = "iter_empty", since = "1.2.0")] -impl Clone for Empty { - fn clone(&self) -> Empty { - Empty(marker::PhantomData) - } -} - -// not #[derive] because that adds a Default bound on T, -// which isn't necessary. -#[stable(feature = "iter_empty", since = "1.2.0")] -impl Default for Empty { - fn default() -> Empty { - Empty(marker::PhantomData) - } -} - -/// Creates an iterator that yields nothing. -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use std::iter; -/// -/// // this could have been an iterator over i32, but alas, it's just not. -/// let mut nope = iter::empty::(); -/// -/// assert_eq!(None, nope.next()); -/// ``` -#[stable(feature = "iter_empty", since = "1.2.0")] -pub fn empty() -> Empty { - Empty(marker::PhantomData) -} - -/// An iterator that yields an element exactly once. -/// -/// This `struct` is created by the [`once()`] function. See its documentation for more. -/// -/// [`once()`]: fn.once.html -#[derive(Clone)] -#[stable(feature = "iter_once", since = "1.2.0")] -pub struct Once { - inner: ::option::IntoIter -} - -#[stable(feature = "iter_once", since = "1.2.0")] -impl Iterator for Once { - type Item = T; - - fn next(&mut self) -> Option { - self.inner.next() - } - - fn size_hint(&self) -> (usize, Option) { - self.inner.size_hint() - } -} - -#[stable(feature = "iter_once", since = "1.2.0")] -impl DoubleEndedIterator for Once { - fn next_back(&mut self) -> Option { - self.inner.next_back() - } -} - -#[stable(feature = "iter_once", since = "1.2.0")] -impl ExactSizeIterator for Once { - fn len(&self) -> usize { - self.inner.len() - } -} - -/// Creates an iterator that yields an element exactly once. -/// -/// This is commonly used to adapt a single value into a [`chain()`] of other -/// kinds of iteration. Maybe you have an iterator that covers almost -/// everything, but you need an extra special case. Maybe you have a function -/// which works on iterators, but you only need to process one value. -/// -/// [`chain()`]: trait.Iterator.html#method.chain -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use std::iter; -/// -/// // one is the loneliest number -/// let mut one = iter::once(1); -/// -/// assert_eq!(Some(1), one.next()); -/// -/// // just one, that's all we get -/// assert_eq!(None, one.next()); -/// ``` -/// -/// Chaining together with another iterator. Let's say that we want to iterate -/// over each file of the `.foo` directory, but also a configuration file, -/// `.foorc`: -/// -/// ```no_run -/// use std::iter; -/// use std::fs; -/// use std::path::PathBuf; -/// -/// let dirs = fs::read_dir(".foo").unwrap(); -/// -/// // we need to convert from an iterator of DirEntry-s to an iterator of -/// // PathBufs, so we use map -/// let dirs = dirs.map(|file| file.unwrap().path()); -/// -/// // now, our iterator just for our config file -/// let config = iter::once(PathBuf::from(".foorc")); -/// -/// // chain the two iterators together into one big iterator -/// let files = dirs.chain(config); -/// -/// // this will give us all of the files in .foo as well as .foorc -/// for f in files { -/// println!("{:?}", f); -/// } -/// ``` -#[stable(feature = "iter_once", since = "1.2.0")] -pub fn once(value: T) -> Once { - Once { inner: Some(value).into_iter() } -} diff --git a/src/libcore/iter/iterator.rs b/src/libcore/iter/iterator.rs new file mode 100644 index 0000000000000..f6b74a91c193b --- /dev/null +++ b/src/libcore/iter/iterator.rs @@ -0,0 +1,2182 @@ +// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use cmp::Ordering; + +use super::{Chain, Cycle, Cloned, Enumerate, Filter, FilterMap, FlatMap, Fuse}; +use super::{Inspect, Map, Peekable, Scan, Skip, SkipWhile, Take, TakeWhile, Rev}; +use super::{Zip, Sum, Product}; +use super::{ChainState, FromIterator, ZipImpl}; + +fn _assert_is_object_safe(_: &Iterator) {} + +/// An interface for dealing with iterators. +/// +/// This is the main iterator trait. For more about the concept of iterators +/// generally, please see the [module-level documentation]. In particular, you +/// may want to know how to [implement `Iterator`][impl]. +/// +/// [module-level documentation]: index.html +/// [impl]: index.html#implementing-iterator +#[stable(feature = "rust1", since = "1.0.0")] +#[rustc_on_unimplemented = "`{Self}` is not an iterator; maybe try calling \ + `.iter()` or a similar method"] +pub trait Iterator { + /// The type of the elements being iterated over. + #[stable(feature = "rust1", since = "1.0.0")] + type Item; + + /// Advances the iterator and returns the next value. + /// + /// Returns [`None`] when iteration is finished. Individual iterator + /// implementations may choose to resume iteration, and so calling `next()` + /// again may or may not eventually start returning [`Some(Item)`] again at some + /// point. + /// + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// [`Some(Item)`]: ../../std/option/enum.Option.html#variant.Some + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let mut iter = a.iter(); + /// + /// // A call to next() returns the next value... + /// assert_eq!(Some(&1), iter.next()); + /// assert_eq!(Some(&2), iter.next()); + /// assert_eq!(Some(&3), iter.next()); + /// + /// // ... and then None once it's over. + /// assert_eq!(None, iter.next()); + /// + /// // More calls may or may not return None. Here, they always will. + /// assert_eq!(None, iter.next()); + /// assert_eq!(None, iter.next()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + fn next(&mut self) -> Option; + + /// Returns the bounds on the remaining length of the iterator. + /// + /// Specifically, `size_hint()` returns a tuple where the first element + /// is the lower bound, and the second element is the upper bound. + /// + /// The second half of the tuple that is returned is an [`Option`]`<`[`usize`]`>`. + /// A [`None`] here means that either there is no known upper bound, or the + /// upper bound is larger than [`usize`]. + /// + /// # Implementation notes + /// + /// It is not enforced that an iterator implementation yields the declared + /// number of elements. A buggy iterator may yield less than the lower bound + /// or more than the upper bound of elements. + /// + /// `size_hint()` is primarily intended to be used for optimizations such as + /// reserving space for the elements of the iterator, but must not be + /// trusted to e.g. omit bounds checks in unsafe code. An incorrect + /// implementation of `size_hint()` should not lead to memory safety + /// violations. + /// + /// That said, the implementation should provide a correct estimation, + /// because otherwise it would be a violation of the trait's protocol. + /// + /// The default implementation returns `(0, None)` which is correct for any + /// iterator. + /// + /// [`usize`]: ../../std/primitive.usize.html + /// [`Option`]: ../../std/option/enum.Option.html + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// let iter = a.iter(); + /// + /// assert_eq!((3, Some(3)), iter.size_hint()); + /// ``` + /// + /// A more complex example: + /// + /// ``` + /// // The even numbers from zero to ten. + /// let iter = (0..10).filter(|x| x % 2 == 0); + /// + /// // We might iterate from zero to ten times. Knowing that it's five + /// // exactly wouldn't be possible without executing filter(). + /// assert_eq!((0, Some(10)), iter.size_hint()); + /// + /// // Let's add one five more numbers with chain() + /// let iter = (0..10).filter(|x| x % 2 == 0).chain(15..20); + /// + /// // now both bounds are increased by five + /// assert_eq!((5, Some(15)), iter.size_hint()); + /// ``` + /// + /// Returning `None` for an upper bound: + /// + /// ``` + /// // an infinite iterator has no upper bound + /// let iter = 0..; + /// + /// assert_eq!((0, None), iter.size_hint()); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn size_hint(&self) -> (usize, Option) { (0, None) } + + /// Consumes the iterator, counting the number of iterations and returning it. + /// + /// This method will evaluate the iterator until its [`next()`] returns + /// [`None`]. Once [`None`] is encountered, `count()` returns the number of + /// times it called [`next()`]. + /// + /// [`next()`]: #tymethod.next + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// + /// # Overflow Behavior + /// + /// The method does no guarding against overflows, so counting elements of + /// an iterator with more than [`usize::MAX`] elements either produces the + /// wrong result or panics. If debug assertions are enabled, a panic is + /// guaranteed. + /// + /// # Panics + /// + /// This function might panic if the iterator has more than [`usize::MAX`] + /// elements. + /// + /// [`usize::MAX`]: ../../std/isize/constant.MAX.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// assert_eq!(a.iter().count(), 3); + /// + /// let a = [1, 2, 3, 4, 5]; + /// assert_eq!(a.iter().count(), 5); + /// ``` + #[inline] + #[rustc_inherit_overflow_checks] + #[stable(feature = "rust1", since = "1.0.0")] + fn count(self) -> usize where Self: Sized { + // Might overflow. + self.fold(0, |cnt, _| cnt + 1) + } + + /// Consumes the iterator, returning the last element. + /// + /// This method will evaluate the iterator until it returns [`None`]. While + /// doing so, it keeps track of the current element. After [`None`] is + /// returned, `last()` will then return the last element it saw. + /// + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// assert_eq!(a.iter().last(), Some(&3)); + /// + /// let a = [1, 2, 3, 4, 5]; + /// assert_eq!(a.iter().last(), Some(&5)); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn last(self) -> Option where Self: Sized { + let mut last = None; + for x in self { last = Some(x); } + last + } + + /// Returns the `n`th element of the iterator. + /// + /// Note that all preceding elements will be consumed (i.e. discarded). + /// + /// Like most indexing operations, the count starts from zero, so `nth(0)` + /// returns the first value, `nth(1)` the second, and so on. + /// + /// `nth()` will return [`None`] if `n` is greater than or equal to the length of the + /// iterator. + /// + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// assert_eq!(a.iter().nth(1), Some(&2)); + /// ``` + /// + /// Calling `nth()` multiple times doesn't rewind the iterator: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let mut iter = a.iter(); + /// + /// assert_eq!(iter.nth(1), Some(&2)); + /// assert_eq!(iter.nth(1), None); + /// ``` + /// + /// Returning `None` if there are less than `n + 1` elements: + /// + /// ``` + /// let a = [1, 2, 3]; + /// assert_eq!(a.iter().nth(10), None); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn nth(&mut self, mut n: usize) -> Option where Self: Sized { + for x in self { + if n == 0 { return Some(x) } + n -= 1; + } + None + } + + /// Takes two iterators and creates a new iterator over both in sequence. + /// + /// `chain()` will return a new iterator which will first iterate over + /// values from the first iterator and then over values from the second + /// iterator. + /// + /// In other words, it links two iterators together, in a chain. 🔗 + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a1 = [1, 2, 3]; + /// let a2 = [4, 5, 6]; + /// + /// let mut iter = a1.iter().chain(a2.iter()); + /// + /// assert_eq!(iter.next(), Some(&1)); + /// assert_eq!(iter.next(), Some(&2)); + /// assert_eq!(iter.next(), Some(&3)); + /// assert_eq!(iter.next(), Some(&4)); + /// assert_eq!(iter.next(), Some(&5)); + /// assert_eq!(iter.next(), Some(&6)); + /// assert_eq!(iter.next(), None); + /// ``` + /// + /// Since the argument to `chain()` uses [`IntoIterator`], we can pass + /// anything that can be converted into an [`Iterator`], not just an + /// [`Iterator`] itself. For example, slices (`&[T]`) implement + /// [`IntoIterator`], and so can be passed to `chain()` directly: + /// + /// [`IntoIterator`]: trait.IntoIterator.html + /// [`Iterator`]: trait.Iterator.html + /// + /// ``` + /// let s1 = &[1, 2, 3]; + /// let s2 = &[4, 5, 6]; + /// + /// let mut iter = s1.iter().chain(s2); + /// + /// assert_eq!(iter.next(), Some(&1)); + /// assert_eq!(iter.next(), Some(&2)); + /// assert_eq!(iter.next(), Some(&3)); + /// assert_eq!(iter.next(), Some(&4)); + /// assert_eq!(iter.next(), Some(&5)); + /// assert_eq!(iter.next(), Some(&6)); + /// assert_eq!(iter.next(), None); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn chain(self, other: U) -> Chain where + Self: Sized, U: IntoIterator, + { + Chain{a: self, b: other.into_iter(), state: ChainState::Both} + } + + /// 'Zips up' two iterators into a single iterator of pairs. + /// + /// `zip()` returns a new iterator that will iterate over two other + /// iterators, returning a tuple where the first element comes from the + /// first iterator, and the second element comes from the second iterator. + /// + /// In other words, it zips two iterators together, into a single one. + /// + /// When either iterator returns [`None`], all further calls to [`next()`] + /// will return [`None`]. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a1 = [1, 2, 3]; + /// let a2 = [4, 5, 6]; + /// + /// let mut iter = a1.iter().zip(a2.iter()); + /// + /// assert_eq!(iter.next(), Some((&1, &4))); + /// assert_eq!(iter.next(), Some((&2, &5))); + /// assert_eq!(iter.next(), Some((&3, &6))); + /// assert_eq!(iter.next(), None); + /// ``` + /// + /// Since the argument to `zip()` uses [`IntoIterator`], we can pass + /// anything that can be converted into an [`Iterator`], not just an + /// [`Iterator`] itself. For example, slices (`&[T]`) implement + /// [`IntoIterator`], and so can be passed to `zip()` directly: + /// + /// [`IntoIterator`]: trait.IntoIterator.html + /// [`Iterator`]: trait.Iterator.html + /// + /// ``` + /// let s1 = &[1, 2, 3]; + /// let s2 = &[4, 5, 6]; + /// + /// let mut iter = s1.iter().zip(s2); + /// + /// assert_eq!(iter.next(), Some((&1, &4))); + /// assert_eq!(iter.next(), Some((&2, &5))); + /// assert_eq!(iter.next(), Some((&3, &6))); + /// assert_eq!(iter.next(), None); + /// ``` + /// + /// `zip()` is often used to zip an infinite iterator to a finite one. + /// This works because the finite iterator will eventually return [`None`], + /// ending the zipper. Zipping with `(0..)` can look a lot like [`enumerate()`]: + /// + /// ``` + /// let enumerate: Vec<_> = "foo".chars().enumerate().collect(); + /// + /// let zipper: Vec<_> = (0..).zip("foo".chars()).collect(); + /// + /// assert_eq!((0, 'f'), enumerate[0]); + /// assert_eq!((0, 'f'), zipper[0]); + /// + /// assert_eq!((1, 'o'), enumerate[1]); + /// assert_eq!((1, 'o'), zipper[1]); + /// + /// assert_eq!((2, 'o'), enumerate[2]); + /// assert_eq!((2, 'o'), zipper[2]); + /// ``` + /// + /// [`enumerate()`]: trait.Iterator.html#method.enumerate + /// [`next()`]: ../../std/iter/trait.Iterator.html#tymethod.next + /// [`None`]: ../../std/option/enum.Option.html#variant.None + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn zip(self, other: U) -> Zip where + Self: Sized, U: IntoIterator + { + Zip::new(self, other.into_iter()) + } + + /// Takes a closure and creates an iterator which calls that closure on each + /// element. + /// + /// `map()` transforms one iterator into another, by means of its argument: + /// something that implements `FnMut`. It produces a new iterator which + /// calls this closure on each element of the original iterator. + /// + /// If you are good at thinking in types, you can think of `map()` like this: + /// If you have an iterator that gives you elements of some type `A`, and + /// you want an iterator of some other type `B`, you can use `map()`, + /// passing a closure that takes an `A` and returns a `B`. + /// + /// `map()` is conceptually similar to a [`for`] loop. However, as `map()` is + /// lazy, it is best used when you're already working with other iterators. + /// If you're doing some sort of looping for a side effect, it's considered + /// more idiomatic to use [`for`] than `map()`. + /// + /// [`for`]: ../../book/loops.html#for + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let mut iter = a.into_iter().map(|x| 2 * x); + /// + /// assert_eq!(iter.next(), Some(2)); + /// assert_eq!(iter.next(), Some(4)); + /// assert_eq!(iter.next(), Some(6)); + /// assert_eq!(iter.next(), None); + /// ``` + /// + /// If you're doing some sort of side effect, prefer [`for`] to `map()`: + /// + /// ``` + /// # #![allow(unused_must_use)] + /// // don't do this: + /// (0..5).map(|x| println!("{}", x)); + /// + /// // it won't even execute, as it is lazy. Rust will warn you about this. + /// + /// // Instead, use for: + /// for x in 0..5 { + /// println!("{}", x); + /// } + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn map(self, f: F) -> Map where + Self: Sized, F: FnMut(Self::Item) -> B, + { + Map{iter: self, f: f} + } + + /// Creates an iterator which uses a closure to determine if an element + /// should be yielded. + /// + /// The closure must return `true` or `false`. `filter()` creates an + /// iterator which calls this closure on each element. If the closure + /// returns `true`, then the element is returned. If the closure returns + /// `false`, it will try again, and call the closure on the next element, + /// seeing if it passes the test. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [0i32, 1, 2]; + /// + /// let mut iter = a.into_iter().filter(|x| x.is_positive()); + /// + /// assert_eq!(iter.next(), Some(&1)); + /// assert_eq!(iter.next(), Some(&2)); + /// assert_eq!(iter.next(), None); + /// ``` + /// + /// Because the closure passed to `filter()` takes a reference, and many + /// iterators iterate over references, this leads to a possibly confusing + /// situation, where the type of the closure is a double reference: + /// + /// ``` + /// let a = [0, 1, 2]; + /// + /// let mut iter = a.into_iter().filter(|x| **x > 1); // need two *s! + /// + /// assert_eq!(iter.next(), Some(&2)); + /// assert_eq!(iter.next(), None); + /// ``` + /// + /// It's common to instead use destructuring on the argument to strip away + /// one: + /// + /// ``` + /// let a = [0, 1, 2]; + /// + /// let mut iter = a.into_iter().filter(|&x| *x > 1); // both & and * + /// + /// assert_eq!(iter.next(), Some(&2)); + /// assert_eq!(iter.next(), None); + /// ``` + /// + /// or both: + /// + /// ``` + /// let a = [0, 1, 2]; + /// + /// let mut iter = a.into_iter().filter(|&&x| x > 1); // two &s + /// + /// assert_eq!(iter.next(), Some(&2)); + /// assert_eq!(iter.next(), None); + /// ``` + /// + /// of these layers. + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn filter

(self, predicate: P) -> Filter where + Self: Sized, P: FnMut(&Self::Item) -> bool, + { + Filter{iter: self, predicate: predicate} + } + + /// Creates an iterator that both filters and maps. + /// + /// The closure must return an [`Option`]. `filter_map()` creates an + /// iterator which calls this closure on each element. If the closure + /// returns [`Some(element)`][`Some`], then that element is returned. If the + /// closure returns [`None`], it will try again, and call the closure on the + /// next element, seeing if it will return [`Some`]. + /// + /// Why `filter_map()` and not just [`filter()`].[`map()`]? The key is in this + /// part: + /// + /// [`filter()`]: #method.filter + /// [`map()`]: #method.map + /// + /// > If the closure returns [`Some(element)`][`Some`], then that element is returned. + /// + /// In other words, it removes the [`Option`] layer automatically. If your + /// mapping is already returning an [`Option`] and you want to skip over + /// [`None`]s, then `filter_map()` is much, much nicer to use. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = ["1", "2", "lol"]; + /// + /// let mut iter = a.iter().filter_map(|s| s.parse().ok()); + /// + /// assert_eq!(iter.next(), Some(1)); + /// assert_eq!(iter.next(), Some(2)); + /// assert_eq!(iter.next(), None); + /// ``` + /// + /// Here's the same example, but with [`filter()`] and [`map()`]: + /// + /// ``` + /// let a = ["1", "2", "lol"]; + /// + /// let mut iter = a.iter() + /// .map(|s| s.parse().ok()) + /// .filter(|s| s.is_some()); + /// + /// assert_eq!(iter.next(), Some(Some(1))); + /// assert_eq!(iter.next(), Some(Some(2))); + /// assert_eq!(iter.next(), None); + /// ``` + /// + /// There's an extra layer of [`Some`] in there. + /// + /// [`Option`]: ../../std/option/enum.Option.html + /// [`Some`]: ../../std/option/enum.Option.html#variant.Some + /// [`None`]: ../../std/option/enum.Option.html#variant.None + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn filter_map(self, f: F) -> FilterMap where + Self: Sized, F: FnMut(Self::Item) -> Option, + { + FilterMap { iter: self, f: f } + } + + /// Creates an iterator which gives the current iteration count as well as + /// the next value. + /// + /// The iterator returned yields pairs `(i, val)`, where `i` is the + /// current index of iteration and `val` is the value returned by the + /// iterator. + /// + /// `enumerate()` keeps its count as a [`usize`]. If you want to count by a + /// different sized integer, the [`zip()`] function provides similar + /// functionality. + /// + /// # Overflow Behavior + /// + /// The method does no guarding against overflows, so enumerating more than + /// [`usize::MAX`] elements either produces the wrong result or panics. If + /// debug assertions are enabled, a panic is guaranteed. + /// + /// # Panics + /// + /// The returned iterator might panic if the to-be-returned index would + /// overflow a [`usize`]. + /// + /// [`usize::MAX`]: ../../std/usize/constant.MAX.html + /// [`usize`]: ../../std/primitive.usize.html + /// [`zip()`]: #method.zip + /// + /// # Examples + /// + /// ``` + /// let a = ['a', 'b', 'c']; + /// + /// let mut iter = a.iter().enumerate(); + /// + /// assert_eq!(iter.next(), Some((0, &'a'))); + /// assert_eq!(iter.next(), Some((1, &'b'))); + /// assert_eq!(iter.next(), Some((2, &'c'))); + /// assert_eq!(iter.next(), None); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn enumerate(self) -> Enumerate where Self: Sized { + Enumerate { iter: self, count: 0 } + } + + /// Creates an iterator which can use `peek` to look at the next element of + /// the iterator without consuming it. + /// + /// Adds a [`peek()`] method to an iterator. See its documentation for + /// more information. + /// + /// Note that the underlying iterator is still advanced when [`peek()`] is + /// called for the first time: In order to retrieve the next element, + /// [`next()`] is called on the underlying iterator, hence any side effects of + /// the [`next()`] method will occur. + /// + /// [`peek()`]: struct.Peekable.html#method.peek + /// [`next()`]: ../../std/iter/trait.Iterator.html#tymethod.next + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let xs = [1, 2, 3]; + /// + /// let mut iter = xs.iter().peekable(); + /// + /// // peek() lets us see into the future + /// assert_eq!(iter.peek(), Some(&&1)); + /// assert_eq!(iter.next(), Some(&1)); + /// + /// assert_eq!(iter.next(), Some(&2)); + /// + /// // we can peek() multiple times, the iterator won't advance + /// assert_eq!(iter.peek(), Some(&&3)); + /// assert_eq!(iter.peek(), Some(&&3)); + /// + /// assert_eq!(iter.next(), Some(&3)); + /// + /// // after the iterator is finished, so is peek() + /// assert_eq!(iter.peek(), None); + /// assert_eq!(iter.next(), None); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn peekable(self) -> Peekable where Self: Sized { + Peekable{iter: self, peeked: None} + } + + /// Creates an iterator that [`skip()`]s elements based on a predicate. + /// + /// [`skip()`]: #method.skip + /// + /// `skip_while()` takes a closure as an argument. It will call this + /// closure on each element of the iterator, and ignore elements + /// until it returns `false`. + /// + /// After `false` is returned, `skip_while()`'s job is over, and the + /// rest of the elements are yielded. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [-1i32, 0, 1]; + /// + /// let mut iter = a.into_iter().skip_while(|x| x.is_negative()); + /// + /// assert_eq!(iter.next(), Some(&0)); + /// assert_eq!(iter.next(), Some(&1)); + /// assert_eq!(iter.next(), None); + /// ``` + /// + /// Because the closure passed to `skip_while()` takes a reference, and many + /// iterators iterate over references, this leads to a possibly confusing + /// situation, where the type of the closure is a double reference: + /// + /// ``` + /// let a = [-1, 0, 1]; + /// + /// let mut iter = a.into_iter().skip_while(|x| **x < 0); // need two *s! + /// + /// assert_eq!(iter.next(), Some(&0)); + /// assert_eq!(iter.next(), Some(&1)); + /// assert_eq!(iter.next(), None); + /// ``` + /// + /// Stopping after an initial `false`: + /// + /// ``` + /// let a = [-1, 0, 1, -2]; + /// + /// let mut iter = a.into_iter().skip_while(|x| **x < 0); + /// + /// assert_eq!(iter.next(), Some(&0)); + /// assert_eq!(iter.next(), Some(&1)); + /// + /// // while this would have been false, since we already got a false, + /// // skip_while() isn't used any more + /// assert_eq!(iter.next(), Some(&-2)); + /// + /// assert_eq!(iter.next(), None); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn skip_while

(self, predicate: P) -> SkipWhile where + Self: Sized, P: FnMut(&Self::Item) -> bool, + { + SkipWhile{iter: self, flag: false, predicate: predicate} + } + + /// Creates an iterator that yields elements based on a predicate. + /// + /// `take_while()` takes a closure as an argument. It will call this + /// closure on each element of the iterator, and yield elements + /// while it returns `true`. + /// + /// After `false` is returned, `take_while()`'s job is over, and the + /// rest of the elements are ignored. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [-1i32, 0, 1]; + /// + /// let mut iter = a.into_iter().take_while(|x| x.is_negative()); + /// + /// assert_eq!(iter.next(), Some(&-1)); + /// assert_eq!(iter.next(), None); + /// ``` + /// + /// Because the closure passed to `take_while()` takes a reference, and many + /// iterators iterate over references, this leads to a possibly confusing + /// situation, where the type of the closure is a double reference: + /// + /// ``` + /// let a = [-1, 0, 1]; + /// + /// let mut iter = a.into_iter().take_while(|x| **x < 0); // need two *s! + /// + /// assert_eq!(iter.next(), Some(&-1)); + /// assert_eq!(iter.next(), None); + /// ``` + /// + /// Stopping after an initial `false`: + /// + /// ``` + /// let a = [-1, 0, 1, -2]; + /// + /// let mut iter = a.into_iter().take_while(|x| **x < 0); + /// + /// assert_eq!(iter.next(), Some(&-1)); + /// + /// // We have more elements that are less than zero, but since we already + /// // got a false, take_while() isn't used any more + /// assert_eq!(iter.next(), None); + /// ``` + /// + /// Because `take_while()` needs to look at the value in order to see if it + /// should be included or not, consuming iterators will see that it is + /// removed: + /// + /// ``` + /// let a = [1, 2, 3, 4]; + /// let mut iter = a.into_iter(); + /// + /// let result: Vec = iter.by_ref() + /// .take_while(|n| **n != 3) + /// .cloned() + /// .collect(); + /// + /// assert_eq!(result, &[1, 2]); + /// + /// let result: Vec = iter.cloned().collect(); + /// + /// assert_eq!(result, &[4]); + /// ``` + /// + /// The `3` is no longer there, because it was consumed in order to see if + /// the iteration should stop, but wasn't placed back into the iterator or + /// some similar thing. + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn take_while

(self, predicate: P) -> TakeWhile where + Self: Sized, P: FnMut(&Self::Item) -> bool, + { + TakeWhile{iter: self, flag: false, predicate: predicate} + } + + /// Creates an iterator that skips the first `n` elements. + /// + /// After they have been consumed, the rest of the elements are yielded. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let mut iter = a.iter().skip(2); + /// + /// assert_eq!(iter.next(), Some(&3)); + /// assert_eq!(iter.next(), None); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn skip(self, n: usize) -> Skip where Self: Sized { + Skip{iter: self, n: n} + } + + /// Creates an iterator that yields its first `n` elements. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let mut iter = a.iter().take(2); + /// + /// assert_eq!(iter.next(), Some(&1)); + /// assert_eq!(iter.next(), Some(&2)); + /// assert_eq!(iter.next(), None); + /// ``` + /// + /// `take()` is often used with an infinite iterator, to make it finite: + /// + /// ``` + /// let mut iter = (0..).take(3); + /// + /// assert_eq!(iter.next(), Some(0)); + /// assert_eq!(iter.next(), Some(1)); + /// assert_eq!(iter.next(), Some(2)); + /// assert_eq!(iter.next(), None); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn take(self, n: usize) -> Take where Self: Sized, { + Take{iter: self, n: n} + } + + /// An iterator adaptor similar to [`fold()`] that holds internal state and + /// produces a new iterator. + /// + /// [`fold()`]: #method.fold + /// + /// `scan()` takes two arguments: an initial value which seeds the internal + /// state, and a closure with two arguments, the first being a mutable + /// reference to the internal state and the second an iterator element. + /// The closure can assign to the internal state to share state between + /// iterations. + /// + /// On iteration, the closure will be applied to each element of the + /// iterator and the return value from the closure, an [`Option`], is + /// yielded by the iterator. + /// + /// [`Option`]: ../../std/option/enum.Option.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let mut iter = a.iter().scan(1, |state, &x| { + /// // each iteration, we'll multiply the state by the element + /// *state = *state * x; + /// + /// // the value passed on to the next iteration + /// Some(*state) + /// }); + /// + /// assert_eq!(iter.next(), Some(1)); + /// assert_eq!(iter.next(), Some(2)); + /// assert_eq!(iter.next(), Some(6)); + /// assert_eq!(iter.next(), None); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn scan(self, initial_state: St, f: F) -> Scan + where Self: Sized, F: FnMut(&mut St, Self::Item) -> Option, + { + Scan{iter: self, f: f, state: initial_state} + } + + /// Creates an iterator that works like map, but flattens nested structure. + /// + /// The [`map()`] adapter is very useful, but only when the closure + /// argument produces values. If it produces an iterator instead, there's + /// an extra layer of indirection. `flat_map()` will remove this extra layer + /// on its own. + /// + /// Another way of thinking about `flat_map()`: [`map()`]'s closure returns + /// one item for each element, and `flat_map()`'s closure returns an + /// iterator for each element. + /// + /// [`map()`]: #method.map + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let words = ["alpha", "beta", "gamma"]; + /// + /// // chars() returns an iterator + /// let merged: String = words.iter() + /// .flat_map(|s| s.chars()) + /// .collect(); + /// assert_eq!(merged, "alphabetagamma"); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn flat_map(self, f: F) -> FlatMap + where Self: Sized, U: IntoIterator, F: FnMut(Self::Item) -> U, + { + FlatMap{iter: self, f: f, frontiter: None, backiter: None } + } + + /// Creates an iterator which ends after the first [`None`]. + /// + /// After an iterator returns [`None`], future calls may or may not yield + /// [`Some(T)`] again. `fuse()` adapts an iterator, ensuring that after a + /// [`None`] is given, it will always return [`None`] forever. + /// + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// [`Some(T)`]: ../../std/option/enum.Option.html#variant.Some + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// // an iterator which alternates between Some and None + /// struct Alternate { + /// state: i32, + /// } + /// + /// impl Iterator for Alternate { + /// type Item = i32; + /// + /// fn next(&mut self) -> Option { + /// let val = self.state; + /// self.state = self.state + 1; + /// + /// // if it's even, Some(i32), else None + /// if val % 2 == 0 { + /// Some(val) + /// } else { + /// None + /// } + /// } + /// } + /// + /// let mut iter = Alternate { state: 0 }; + /// + /// // we can see our iterator going back and forth + /// assert_eq!(iter.next(), Some(0)); + /// assert_eq!(iter.next(), None); + /// assert_eq!(iter.next(), Some(2)); + /// assert_eq!(iter.next(), None); + /// + /// // however, once we fuse it... + /// let mut iter = iter.fuse(); + /// + /// assert_eq!(iter.next(), Some(4)); + /// assert_eq!(iter.next(), None); + /// + /// // it will always return None after the first time. + /// assert_eq!(iter.next(), None); + /// assert_eq!(iter.next(), None); + /// assert_eq!(iter.next(), None); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn fuse(self) -> Fuse where Self: Sized { + Fuse{iter: self, done: false} + } + + /// Do something with each element of an iterator, passing the value on. + /// + /// When using iterators, you'll often chain several of them together. + /// While working on such code, you might want to check out what's + /// happening at various parts in the pipeline. To do that, insert + /// a call to `inspect()`. + /// + /// It's much more common for `inspect()` to be used as a debugging tool + /// than to exist in your final code, but never say never. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 4, 2, 3]; + /// + /// // this iterator sequence is complex. + /// let sum = a.iter() + /// .cloned() + /// .filter(|&x| x % 2 == 0) + /// .fold(0, |sum, i| sum + i); + /// + /// println!("{}", sum); + /// + /// // let's add some inspect() calls to investigate what's happening + /// let sum = a.iter() + /// .cloned() + /// .inspect(|x| println!("about to filter: {}", x)) + /// .filter(|&x| x % 2 == 0) + /// .inspect(|x| println!("made it through filter: {}", x)) + /// .fold(0, |sum, i| sum + i); + /// + /// println!("{}", sum); + /// ``` + /// + /// This will print: + /// + /// ```text + /// about to filter: 1 + /// about to filter: 4 + /// made it through filter: 4 + /// about to filter: 2 + /// made it through filter: 2 + /// about to filter: 3 + /// 6 + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn inspect(self, f: F) -> Inspect where + Self: Sized, F: FnMut(&Self::Item), + { + Inspect{iter: self, f: f} + } + + /// Borrows an iterator, rather than consuming it. + /// + /// This is useful to allow applying iterator adaptors while still + /// retaining ownership of the original iterator. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let iter = a.into_iter(); + /// + /// let sum: i32 = iter.take(5) + /// .fold(0, |acc, &i| acc + i ); + /// + /// assert_eq!(sum, 6); + /// + /// // if we try to use iter again, it won't work. The following line + /// // gives "error: use of moved value: `iter` + /// // assert_eq!(iter.next(), None); + /// + /// // let's try that again + /// let a = [1, 2, 3]; + /// + /// let mut iter = a.into_iter(); + /// + /// // instead, we add in a .by_ref() + /// let sum: i32 = iter.by_ref() + /// .take(2) + /// .fold(0, |acc, &i| acc + i ); + /// + /// assert_eq!(sum, 3); + /// + /// // now this is just fine: + /// assert_eq!(iter.next(), Some(&3)); + /// assert_eq!(iter.next(), None); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + fn by_ref(&mut self) -> &mut Self where Self: Sized { self } + + /// Transforms an iterator into a collection. + /// + /// `collect()` can take anything iterable, and turn it into a relevant + /// collection. This is one of the more powerful methods in the standard + /// library, used in a variety of contexts. + /// + /// The most basic pattern in which `collect()` is used is to turn one + /// collection into another. You take a collection, call [`iter()`] on it, + /// do a bunch of transformations, and then `collect()` at the end. + /// + /// One of the keys to `collect()`'s power is that many things you might + /// not think of as 'collections' actually are. For example, a [`String`] + /// is a collection of [`char`]s. And a collection of [`Result`] can + /// be thought of as single [`Result`]`, E>`. See the examples + /// below for more. + /// + /// Because `collect()` is so general, it can cause problems with type + /// inference. As such, `collect()` is one of the few times you'll see + /// the syntax affectionately known as the 'turbofish': `::<>`. This + /// helps the inference algorithm understand specifically which collection + /// you're trying to collect into. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let doubled: Vec = a.iter() + /// .map(|&x| x * 2) + /// .collect(); + /// + /// assert_eq!(vec![2, 4, 6], doubled); + /// ``` + /// + /// Note that we needed the `: Vec` on the left-hand side. This is because + /// we could collect into, for example, a [`VecDeque`] instead: + /// + /// [`VecDeque`]: ../../std/collections/struct.VecDeque.html + /// + /// ``` + /// use std::collections::VecDeque; + /// + /// let a = [1, 2, 3]; + /// + /// let doubled: VecDeque = a.iter() + /// .map(|&x| x * 2) + /// .collect(); + /// + /// assert_eq!(2, doubled[0]); + /// assert_eq!(4, doubled[1]); + /// assert_eq!(6, doubled[2]); + /// ``` + /// + /// Using the 'turbofish' instead of annotating `doubled`: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let doubled = a.iter() + /// .map(|&x| x * 2) + /// .collect::>(); + /// + /// assert_eq!(vec![2, 4, 6], doubled); + /// ``` + /// + /// Because `collect()` cares about what you're collecting into, you can + /// still use a partial type hint, `_`, with the turbofish: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let doubled = a.iter() + /// .map(|&x| x * 2) + /// .collect::>(); + /// + /// assert_eq!(vec![2, 4, 6], doubled); + /// ``` + /// + /// Using `collect()` to make a [`String`]: + /// + /// ``` + /// let chars = ['g', 'd', 'k', 'k', 'n']; + /// + /// let hello: String = chars.iter() + /// .map(|&x| x as u8) + /// .map(|x| (x + 1) as char) + /// .collect(); + /// + /// assert_eq!("hello", hello); + /// ``` + /// + /// If you have a list of [`Result`][`Result`]s, you can use `collect()` to + /// see if any of them failed: + /// + /// ``` + /// let results = [Ok(1), Err("nope"), Ok(3), Err("bad")]; + /// + /// let result: Result, &str> = results.iter().cloned().collect(); + /// + /// // gives us the first error + /// assert_eq!(Err("nope"), result); + /// + /// let results = [Ok(1), Ok(3)]; + /// + /// let result: Result, &str> = results.iter().cloned().collect(); + /// + /// // gives us the list of answers + /// assert_eq!(Ok(vec![1, 3]), result); + /// ``` + /// + /// [`iter()`]: ../../std/iter/trait.Iterator.html#tymethod.next + /// [`String`]: ../../std/string/struct.String.html + /// [`char`]: ../../std/primitive.char.html + /// [`Result`]: ../../std/result/enum.Result.html + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn collect>(self) -> B where Self: Sized { + FromIterator::from_iter(self) + } + + /// Consumes an iterator, creating two collections from it. + /// + /// The predicate passed to `partition()` can return `true`, or `false`. + /// `partition()` returns a pair, all of the elements for which it returned + /// `true`, and all of the elements for which it returned `false`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let (even, odd): (Vec, Vec) = a.into_iter() + /// .partition(|&n| n % 2 == 0); + /// + /// assert_eq!(even, vec![2]); + /// assert_eq!(odd, vec![1, 3]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + fn partition(self, mut f: F) -> (B, B) where + Self: Sized, + B: Default + Extend, + F: FnMut(&Self::Item) -> bool + { + let mut left: B = Default::default(); + let mut right: B = Default::default(); + + for x in self { + if f(&x) { + left.extend(Some(x)) + } else { + right.extend(Some(x)) + } + } + + (left, right) + } + + /// An iterator adaptor that applies a function, producing a single, final value. + /// + /// `fold()` takes two arguments: an initial value, and a closure with two + /// arguments: an 'accumulator', and an element. The closure returns the value that + /// the accumulator should have for the next iteration. + /// + /// The initial value is the value the accumulator will have on the first + /// call. + /// + /// After applying this closure to every element of the iterator, `fold()` + /// returns the accumulator. + /// + /// This operation is sometimes called 'reduce' or 'inject'. + /// + /// Folding is useful whenever you have a collection of something, and want + /// to produce a single value from it. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// // the sum of all of the elements of a + /// let sum = a.iter() + /// .fold(0, |acc, &x| acc + x); + /// + /// assert_eq!(sum, 6); + /// ``` + /// + /// Let's walk through each step of the iteration here: + /// + /// | element | acc | x | result | + /// |---------|-----|---|--------| + /// | | 0 | | | + /// | 1 | 0 | 1 | 1 | + /// | 2 | 1 | 2 | 3 | + /// | 3 | 3 | 3 | 6 | + /// + /// And so, our final result, `6`. + /// + /// It's common for people who haven't used iterators a lot to + /// use a `for` loop with a list of things to build up a result. Those + /// can be turned into `fold()`s: + /// + /// [`for`]: ../../book/loops.html#for + /// + /// ``` + /// let numbers = [1, 2, 3, 4, 5]; + /// + /// let mut result = 0; + /// + /// // for loop: + /// for i in &numbers { + /// result = result + i; + /// } + /// + /// // fold: + /// let result2 = numbers.iter().fold(0, |acc, &x| acc + x); + /// + /// // they're the same + /// assert_eq!(result, result2); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn fold(self, init: B, mut f: F) -> B where + Self: Sized, F: FnMut(B, Self::Item) -> B, + { + let mut accum = init; + for x in self { + accum = f(accum, x); + } + accum + } + + /// Tests if every element of the iterator matches a predicate. + /// + /// `all()` takes a closure that returns `true` or `false`. It applies + /// this closure to each element of the iterator, and if they all return + /// `true`, then so does `all()`. If any of them return `false`, it + /// returns `false`. + /// + /// `all()` is short-circuiting; in other words, it will stop processing + /// as soon as it finds a `false`, given that no matter what else happens, + /// the result will also be `false`. + /// + /// An empty iterator returns `true`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// assert!(a.iter().all(|&x| x > 0)); + /// + /// assert!(!a.iter().all(|&x| x > 2)); + /// ``` + /// + /// Stopping at the first `false`: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let mut iter = a.iter(); + /// + /// assert!(!iter.all(|&x| x != 2)); + /// + /// // we can still use `iter`, as there are more elements. + /// assert_eq!(iter.next(), Some(&3)); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn all(&mut self, mut f: F) -> bool where + Self: Sized, F: FnMut(Self::Item) -> bool + { + for x in self { + if !f(x) { + return false; + } + } + true + } + + /// Tests if any element of the iterator matches a predicate. + /// + /// `any()` takes a closure that returns `true` or `false`. It applies + /// this closure to each element of the iterator, and if any of them return + /// `true`, then so does `any()`. If they all return `false`, it + /// returns `false`. + /// + /// `any()` is short-circuiting; in other words, it will stop processing + /// as soon as it finds a `true`, given that no matter what else happens, + /// the result will also be `true`. + /// + /// An empty iterator returns `false`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// assert!(a.iter().any(|&x| x > 0)); + /// + /// assert!(!a.iter().any(|&x| x > 5)); + /// ``` + /// + /// Stopping at the first `true`: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let mut iter = a.iter(); + /// + /// assert!(iter.any(|&x| x != 2)); + /// + /// // we can still use `iter`, as there are more elements. + /// assert_eq!(iter.next(), Some(&2)); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn any(&mut self, mut f: F) -> bool where + Self: Sized, + F: FnMut(Self::Item) -> bool + { + for x in self { + if f(x) { + return true; + } + } + false + } + + /// Searches for an element of an iterator that satisfies a predicate. + /// + /// `find()` takes a closure that returns `true` or `false`. It applies + /// this closure to each element of the iterator, and if any of them return + /// `true`, then `find()` returns [`Some(element)`]. If they all return + /// `false`, it returns [`None`]. + /// + /// `find()` is short-circuiting; in other words, it will stop processing + /// as soon as the closure returns `true`. + /// + /// Because `find()` takes a reference, and many iterators iterate over + /// references, this leads to a possibly confusing situation where the + /// argument is a double reference. You can see this effect in the + /// examples below, with `&&x`. + /// + /// [`Some(element)`]: ../../std/option/enum.Option.html#variant.Some + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// assert_eq!(a.iter().find(|&&x| x == 2), Some(&2)); + /// + /// assert_eq!(a.iter().find(|&&x| x == 5), None); + /// ``` + /// + /// Stopping at the first `true`: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let mut iter = a.iter(); + /// + /// assert_eq!(iter.find(|&&x| x == 2), Some(&2)); + /// + /// // we can still use `iter`, as there are more elements. + /// assert_eq!(iter.next(), Some(&3)); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn find

(&mut self, mut predicate: P) -> Option where + Self: Sized, + P: FnMut(&Self::Item) -> bool, + { + for x in self { + if predicate(&x) { return Some(x) } + } + None + } + + /// Searches for an element in an iterator, returning its index. + /// + /// `position()` takes a closure that returns `true` or `false`. It applies + /// this closure to each element of the iterator, and if one of them + /// returns `true`, then `position()` returns [`Some(index)`]. If all of + /// them return `false`, it returns [`None`]. + /// + /// `position()` is short-circuiting; in other words, it will stop + /// processing as soon as it finds a `true`. + /// + /// # Overflow Behavior + /// + /// The method does no guarding against overflows, so if there are more + /// than [`usize::MAX`] non-matching elements, it either produces the wrong + /// result or panics. If debug assertions are enabled, a panic is + /// guaranteed. + /// + /// # Panics + /// + /// This function might panic if the iterator has more than `usize::MAX` + /// non-matching elements. + /// + /// [`Some(index)`]: ../../std/option/enum.Option.html#variant.Some + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// [`usize::MAX`]: ../../std/usize/constant.MAX.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// assert_eq!(a.iter().position(|&x| x == 2), Some(1)); + /// + /// assert_eq!(a.iter().position(|&x| x == 5), None); + /// ``` + /// + /// Stopping at the first `true`: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let mut iter = a.iter(); + /// + /// assert_eq!(iter.position(|&x| x == 2), Some(1)); + /// + /// // we can still use `iter`, as there are more elements. + /// assert_eq!(iter.next(), Some(&3)); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn position

(&mut self, mut predicate: P) -> Option where + Self: Sized, + P: FnMut(Self::Item) -> bool, + { + // `enumerate` might overflow. + for (i, x) in self.enumerate() { + if predicate(x) { + return Some(i); + } + } + None + } + + /// Searches for an element in an iterator from the right, returning its + /// index. + /// + /// `rposition()` takes a closure that returns `true` or `false`. It applies + /// this closure to each element of the iterator, starting from the end, + /// and if one of them returns `true`, then `rposition()` returns + /// [`Some(index)`]. If all of them return `false`, it returns [`None`]. + /// + /// `rposition()` is short-circuiting; in other words, it will stop + /// processing as soon as it finds a `true`. + /// + /// [`Some(index)`]: ../../std/option/enum.Option.html#variant.Some + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// assert_eq!(a.iter().rposition(|&x| x == 3), Some(2)); + /// + /// assert_eq!(a.iter().rposition(|&x| x == 5), None); + /// ``` + /// + /// Stopping at the first `true`: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let mut iter = a.iter(); + /// + /// assert_eq!(iter.rposition(|&x| x == 2), Some(1)); + /// + /// // we can still use `iter`, as there are more elements. + /// assert_eq!(iter.next(), Some(&1)); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn rposition

(&mut self, mut predicate: P) -> Option where + P: FnMut(Self::Item) -> bool, + Self: Sized + ExactSizeIterator + DoubleEndedIterator + { + let mut i = self.len(); + + while let Some(v) = self.next_back() { + if predicate(v) { + return Some(i - 1); + } + // No need for an overflow check here, because `ExactSizeIterator` + // implies that the number of elements fits into a `usize`. + i -= 1; + } + None + } + + /// Returns the maximum element of an iterator. + /// + /// If the two elements are equally maximum, the latest element is + /// returned. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// assert_eq!(a.iter().max(), Some(&3)); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn max(self) -> Option where Self: Sized, Self::Item: Ord + { + select_fold1(self, + |_| (), + // switch to y even if it is only equal, to preserve + // stability. + |_, x, _, y| *x <= *y) + .map(|(_, x)| x) + } + + /// Returns the minimum element of an iterator. + /// + /// If the two elements are equally minimum, the first element is + /// returned. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// assert_eq!(a.iter().min(), Some(&1)); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn min(self) -> Option where Self: Sized, Self::Item: Ord + { + select_fold1(self, + |_| (), + // only switch to y if it is strictly smaller, to + // preserve stability. + |_, x, _, y| *x > *y) + .map(|(_, x)| x) + } + + /// Returns the element that gives the maximum value from the + /// specified function. + /// + /// Returns the rightmost element if the comparison determines two elements + /// to be equally maximum. + /// + /// # Examples + /// + /// ``` + /// let a = [-3_i32, 0, 1, 5, -10]; + /// assert_eq!(*a.iter().max_by_key(|x| x.abs()).unwrap(), -10); + /// ``` + #[inline] + #[stable(feature = "iter_cmp_by_key", since = "1.6.0")] + fn max_by_key(self, f: F) -> Option + where Self: Sized, F: FnMut(&Self::Item) -> B, + { + select_fold1(self, + f, + // switch to y even if it is only equal, to preserve + // stability. + |x_p, _, y_p, _| x_p <= y_p) + .map(|(_, x)| x) + } + + /// Returns the element that gives the maximum value with respect to the + /// specified comparison function. + /// + /// Returns the rightmost element if the comparison determines two elements + /// to be equally maximum. + /// + /// # Examples + /// + /// ``` + /// #![feature(iter_max_by)] + /// let a = [-3_i32, 0, 1, 5, -10]; + /// assert_eq!(*a.iter().max_by(|x, y| x.cmp(y)).unwrap(), 5); + /// ``` + #[inline] + #[unstable(feature = "iter_max_by", issue="36105")] + fn max_by(self, mut compare: F) -> Option + where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering, + { + select_fold1(self, + |_| (), + // switch to y even if it is only equal, to preserve + // stability. + |_, x, _, y| Ordering::Greater != compare(x, y)) + .map(|(_, x)| x) + } + + /// Returns the element that gives the minimum value from the + /// specified function. + /// + /// Returns the latest element if the comparison determines two elements + /// to be equally minimum. + /// + /// # Examples + /// + /// ``` + /// let a = [-3_i32, 0, 1, 5, -10]; + /// assert_eq!(*a.iter().min_by_key(|x| x.abs()).unwrap(), 0); + /// ``` + #[stable(feature = "iter_cmp_by_key", since = "1.6.0")] + fn min_by_key(self, f: F) -> Option + where Self: Sized, F: FnMut(&Self::Item) -> B, + { + select_fold1(self, + f, + // only switch to y if it is strictly smaller, to + // preserve stability. + |x_p, _, y_p, _| x_p > y_p) + .map(|(_, x)| x) + } + + /// Returns the element that gives the minimum value with respect to the + /// specified comparison function. + /// + /// Returns the latest element if the comparison determines two elements + /// to be equally minimum. + /// + /// # Examples + /// + /// ``` + /// #![feature(iter_min_by)] + /// let a = [-3_i32, 0, 1, 5, -10]; + /// assert_eq!(*a.iter().min_by(|x, y| x.cmp(y)).unwrap(), -10); + /// ``` + #[inline] + #[unstable(feature = "iter_min_by", issue="36105")] + fn min_by(self, mut compare: F) -> Option + where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering, + { + select_fold1(self, + |_| (), + // switch to y even if it is strictly smaller, to + // preserve stability. + |_, x, _, y| Ordering::Greater == compare(x, y)) + .map(|(_, x)| x) + } + + + /// Reverses an iterator's direction. + /// + /// Usually, iterators iterate from left to right. After using `rev()`, + /// an iterator will instead iterate from right to left. + /// + /// This is only possible if the iterator has an end, so `rev()` only + /// works on [`DoubleEndedIterator`]s. + /// + /// [`DoubleEndedIterator`]: trait.DoubleEndedIterator.html + /// + /// # Examples + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let mut iter = a.iter().rev(); + /// + /// assert_eq!(iter.next(), Some(&3)); + /// assert_eq!(iter.next(), Some(&2)); + /// assert_eq!(iter.next(), Some(&1)); + /// + /// assert_eq!(iter.next(), None); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn rev(self) -> Rev where Self: Sized + DoubleEndedIterator { + Rev{iter: self} + } + + /// Converts an iterator of pairs into a pair of containers. + /// + /// `unzip()` consumes an entire iterator of pairs, producing two + /// collections: one from the left elements of the pairs, and one + /// from the right elements. + /// + /// This function is, in some sense, the opposite of [`zip()`]. + /// + /// [`zip()`]: #method.zip + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [(1, 2), (3, 4)]; + /// + /// let (left, right): (Vec<_>, Vec<_>) = a.iter().cloned().unzip(); + /// + /// assert_eq!(left, [1, 3]); + /// assert_eq!(right, [2, 4]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + fn unzip(self) -> (FromA, FromB) where + FromA: Default + Extend, + FromB: Default + Extend, + Self: Sized + Iterator, + { + let mut ts: FromA = Default::default(); + let mut us: FromB = Default::default(); + + for (t, u) in self { + ts.extend(Some(t)); + us.extend(Some(u)); + } + + (ts, us) + } + + /// Creates an iterator which [`clone()`]s all of its elements. + /// + /// This is useful when you have an iterator over `&T`, but you need an + /// iterator over `T`. + /// + /// [`clone()`]: ../../std/clone/trait.Clone.html#tymethod.clone + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let v_cloned: Vec<_> = a.iter().cloned().collect(); + /// + /// // cloned is the same as .map(|&x| x), for integers + /// let v_map: Vec<_> = a.iter().map(|&x| x).collect(); + /// + /// assert_eq!(v_cloned, vec![1, 2, 3]); + /// assert_eq!(v_map, vec![1, 2, 3]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + fn cloned<'a, T: 'a>(self) -> Cloned + where Self: Sized + Iterator, T: Clone + { + Cloned { it: self } + } + + /// Repeats an iterator endlessly. + /// + /// Instead of stopping at [`None`], the iterator will instead start again, + /// from the beginning. After iterating again, it will start at the + /// beginning again. And again. And again. Forever. + /// + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let mut it = a.iter().cycle(); + /// + /// assert_eq!(it.next(), Some(&1)); + /// assert_eq!(it.next(), Some(&2)); + /// assert_eq!(it.next(), Some(&3)); + /// assert_eq!(it.next(), Some(&1)); + /// assert_eq!(it.next(), Some(&2)); + /// assert_eq!(it.next(), Some(&3)); + /// assert_eq!(it.next(), Some(&1)); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + #[inline] + fn cycle(self) -> Cycle where Self: Sized + Clone { + Cycle{orig: self.clone(), iter: self} + } + + /// Sums the elements of an iterator. + /// + /// Takes each element, adds them together, and returns the result. + /// + /// An empty iterator returns the zero value of the type. + /// + /// # Panics + /// + /// When calling `sum()` and a primitive integer type is being returned, this + /// method will panic if the computation overflows and debug assertions are + /// enabled. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// let sum: i32 = a.iter().sum(); + /// + /// assert_eq!(sum, 6); + /// ``` + #[stable(feature = "iter_arith", since = "1.11.0")] + fn sum(self) -> S + where Self: Sized, + S: Sum, + { + Sum::sum(self) + } + + /// Iterates over the entire iterator, multiplying all the elements + /// + /// An empty iterator returns the one value of the type. + /// + /// # Panics + /// + /// When calling `product()` and a primitive integer type is being returned, + /// method will panic if the computation overflows and debug assertions are + /// enabled. + /// + /// # Examples + /// + /// ``` + /// fn factorial(n: u32) -> u32 { + /// (1..).take_while(|&i| i <= n).product() + /// } + /// assert_eq!(factorial(0), 1); + /// assert_eq!(factorial(1), 1); + /// assert_eq!(factorial(5), 120); + /// ``` + #[stable(feature = "iter_arith", since = "1.11.0")] + fn product

(self) -> P + where Self: Sized, + P: Product, + { + Product::product(self) + } + + /// Lexicographically compares the elements of this `Iterator` with those + /// of another. + #[stable(feature = "iter_order", since = "1.5.0")] + fn cmp(mut self, other: I) -> Ordering where + I: IntoIterator, + Self::Item: Ord, + Self: Sized, + { + let mut other = other.into_iter(); + + loop { + match (self.next(), other.next()) { + (None, None) => return Ordering::Equal, + (None, _ ) => return Ordering::Less, + (_ , None) => return Ordering::Greater, + (Some(x), Some(y)) => match x.cmp(&y) { + Ordering::Equal => (), + non_eq => return non_eq, + }, + } + } + } + + /// Lexicographically compares the elements of this `Iterator` with those + /// of another. + #[stable(feature = "iter_order", since = "1.5.0")] + fn partial_cmp(mut self, other: I) -> Option where + I: IntoIterator, + Self::Item: PartialOrd, + Self: Sized, + { + let mut other = other.into_iter(); + + loop { + match (self.next(), other.next()) { + (None, None) => return Some(Ordering::Equal), + (None, _ ) => return Some(Ordering::Less), + (_ , None) => return Some(Ordering::Greater), + (Some(x), Some(y)) => match x.partial_cmp(&y) { + Some(Ordering::Equal) => (), + non_eq => return non_eq, + }, + } + } + } + + /// Determines if the elements of this `Iterator` are equal to those of + /// another. + #[stable(feature = "iter_order", since = "1.5.0")] + fn eq(mut self, other: I) -> bool where + I: IntoIterator, + Self::Item: PartialEq, + Self: Sized, + { + let mut other = other.into_iter(); + + loop { + match (self.next(), other.next()) { + (None, None) => return true, + (None, _) | (_, None) => return false, + (Some(x), Some(y)) => if x != y { return false }, + } + } + } + + /// Determines if the elements of this `Iterator` are unequal to those of + /// another. + #[stable(feature = "iter_order", since = "1.5.0")] + fn ne(mut self, other: I) -> bool where + I: IntoIterator, + Self::Item: PartialEq, + Self: Sized, + { + let mut other = other.into_iter(); + + loop { + match (self.next(), other.next()) { + (None, None) => return false, + (None, _) | (_, None) => return true, + (Some(x), Some(y)) => if x.ne(&y) { return true }, + } + } + } + + /// Determines if the elements of this `Iterator` are lexicographically + /// less than those of another. + #[stable(feature = "iter_order", since = "1.5.0")] + fn lt(mut self, other: I) -> bool where + I: IntoIterator, + Self::Item: PartialOrd, + Self: Sized, + { + let mut other = other.into_iter(); + + loop { + match (self.next(), other.next()) { + (None, None) => return false, + (None, _ ) => return true, + (_ , None) => return false, + (Some(x), Some(y)) => { + match x.partial_cmp(&y) { + Some(Ordering::Less) => return true, + Some(Ordering::Equal) => {} + Some(Ordering::Greater) => return false, + None => return false, + } + }, + } + } + } + + /// Determines if the elements of this `Iterator` are lexicographically + /// less or equal to those of another. + #[stable(feature = "iter_order", since = "1.5.0")] + fn le(mut self, other: I) -> bool where + I: IntoIterator, + Self::Item: PartialOrd, + Self: Sized, + { + let mut other = other.into_iter(); + + loop { + match (self.next(), other.next()) { + (None, None) => return true, + (None, _ ) => return true, + (_ , None) => return false, + (Some(x), Some(y)) => { + match x.partial_cmp(&y) { + Some(Ordering::Less) => return true, + Some(Ordering::Equal) => {} + Some(Ordering::Greater) => return false, + None => return false, + } + }, + } + } + } + + /// Determines if the elements of this `Iterator` are lexicographically + /// greater than those of another. + #[stable(feature = "iter_order", since = "1.5.0")] + fn gt(mut self, other: I) -> bool where + I: IntoIterator, + Self::Item: PartialOrd, + Self: Sized, + { + let mut other = other.into_iter(); + + loop { + match (self.next(), other.next()) { + (None, None) => return false, + (None, _ ) => return false, + (_ , None) => return true, + (Some(x), Some(y)) => { + match x.partial_cmp(&y) { + Some(Ordering::Less) => return false, + Some(Ordering::Equal) => {} + Some(Ordering::Greater) => return true, + None => return false, + } + } + } + } + } + + /// Determines if the elements of this `Iterator` are lexicographically + /// greater than or equal to those of another. + #[stable(feature = "iter_order", since = "1.5.0")] + fn ge(mut self, other: I) -> bool where + I: IntoIterator, + Self::Item: PartialOrd, + Self: Sized, + { + let mut other = other.into_iter(); + + loop { + match (self.next(), other.next()) { + (None, None) => return true, + (None, _ ) => return false, + (_ , None) => return true, + (Some(x), Some(y)) => { + match x.partial_cmp(&y) { + Some(Ordering::Less) => return false, + Some(Ordering::Equal) => {} + Some(Ordering::Greater) => return true, + None => return false, + } + }, + } + } + } +} + +/// Select an element from an iterator based on the given projection +/// and "comparison" function. +/// +/// This is an idiosyncratic helper to try to factor out the +/// commonalities of {max,min}{,_by}. In particular, this avoids +/// having to implement optimizations several times. +#[inline] +fn select_fold1(mut it: I, + mut f_proj: FProj, + mut f_cmp: FCmp) -> Option<(B, I::Item)> + where I: Iterator, + FProj: FnMut(&I::Item) -> B, + FCmp: FnMut(&B, &I::Item, &B, &I::Item) -> bool +{ + // start with the first element as our selection. This avoids + // having to use `Option`s inside the loop, translating to a + // sizeable performance gain (6x in one case). + it.next().map(|mut sel| { + let mut sel_p = f_proj(&sel); + + for x in it { + let x_p = f_proj(&x); + if f_cmp(&sel_p, &sel, &x_p, &x) { + sel = x; + sel_p = x_p; + } + } + (sel_p, sel) + }) +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<'a, I: Iterator + ?Sized> Iterator for &'a mut I { + type Item = I::Item; + fn next(&mut self) -> Option { (**self).next() } + fn size_hint(&self) -> (usize, Option) { (**self).size_hint() } +} diff --git a/src/libcore/iter/mod.rs b/src/libcore/iter/mod.rs new file mode 100644 index 0000000000000..3999db0d63c99 --- /dev/null +++ b/src/libcore/iter/mod.rs @@ -0,0 +1,2070 @@ +// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Composable external iteration. +//! +//! If you've found yourself with a collection of some kind, and needed to +//! perform an operation on the elements of said collection, you'll quickly run +//! into 'iterators'. Iterators are heavily used in idiomatic Rust code, so +//! it's worth becoming familiar with them. +//! +//! Before explaining more, let's talk about how this module is structured: +//! +//! # Organization +//! +//! This module is largely organized by type: +//! +//! * [Traits] are the core portion: these traits define what kind of iterators +//! exist and what you can do with them. The methods of these traits are worth +//! putting some extra study time into. +//! * [Functions] provide some helpful ways to create some basic iterators. +//! * [Structs] are often the return types of the various methods on this +//! module's traits. You'll usually want to look at the method that creates +//! the `struct`, rather than the `struct` itself. For more detail about why, +//! see '[Implementing Iterator](#implementing-iterator)'. +//! +//! [Traits]: #traits +//! [Functions]: #functions +//! [Structs]: #structs +//! +//! That's it! Let's dig into iterators. +//! +//! # Iterator +//! +//! The heart and soul of this module is the [`Iterator`] trait. The core of +//! [`Iterator`] looks like this: +//! +//! ``` +//! trait Iterator { +//! type Item; +//! fn next(&mut self) -> Option; +//! } +//! ``` +//! +//! An iterator has a method, [`next()`], which when called, returns an +//! [`Option`]``. [`next()`] will return `Some(Item)` as long as there +//! are elements, and once they've all been exhausted, will return `None` to +//! indicate that iteration is finished. Individual iterators may choose to +//! resume iteration, and so calling [`next()`] again may or may not eventually +//! start returning `Some(Item)` again at some point. +//! +//! [`Iterator`]'s full definition includes a number of other methods as well, +//! but they are default methods, built on top of [`next()`], and so you get +//! them for free. +//! +//! Iterators are also composable, and it's common to chain them together to do +//! more complex forms of processing. See the [Adapters](#adapters) section +//! below for more details. +//! +//! [`Iterator`]: trait.Iterator.html +//! [`next()`]: trait.Iterator.html#tymethod.next +//! [`Option`]: ../../std/option/enum.Option.html +//! +//! # The three forms of iteration +//! +//! There are three common methods which can create iterators from a collection: +//! +//! * `iter()`, which iterates over `&T`. +//! * `iter_mut()`, which iterates over `&mut T`. +//! * `into_iter()`, which iterates over `T`. +//! +//! Various things in the standard library may implement one or more of the +//! three, where appropriate. +//! +//! # Implementing Iterator +//! +//! Creating an iterator of your own involves two steps: creating a `struct` to +//! hold the iterator's state, and then `impl`ementing [`Iterator`] for that +//! `struct`. This is why there are so many `struct`s in this module: there is +//! one for each iterator and iterator adapter. +//! +//! Let's make an iterator named `Counter` which counts from `1` to `5`: +//! +//! ``` +//! // First, the struct: +//! +//! /// An iterator which counts from one to five +//! struct Counter { +//! count: usize, +//! } +//! +//! // we want our count to start at one, so let's add a new() method to help. +//! // This isn't strictly necessary, but is convenient. Note that we start +//! // `count` at zero, we'll see why in `next()`'s implementation below. +//! impl Counter { +//! fn new() -> Counter { +//! Counter { count: 0 } +//! } +//! } +//! +//! // Then, we implement `Iterator` for our `Counter`: +//! +//! impl Iterator for Counter { +//! // we will be counting with usize +//! type Item = usize; +//! +//! // next() is the only required method +//! fn next(&mut self) -> Option { +//! // increment our count. This is why we started at zero. +//! self.count += 1; +//! +//! // check to see if we've finished counting or not. +//! if self.count < 6 { +//! Some(self.count) +//! } else { +//! None +//! } +//! } +//! } +//! +//! // And now we can use it! +//! +//! let mut counter = Counter::new(); +//! +//! let x = counter.next().unwrap(); +//! println!("{}", x); +//! +//! let x = counter.next().unwrap(); +//! println!("{}", x); +//! +//! let x = counter.next().unwrap(); +//! println!("{}", x); +//! +//! let x = counter.next().unwrap(); +//! println!("{}", x); +//! +//! let x = counter.next().unwrap(); +//! println!("{}", x); +//! ``` +//! +//! This will print `1` through `5`, each on their own line. +//! +//! Calling `next()` this way gets repetitive. Rust has a construct which can +//! call `next()` on your iterator, until it reaches `None`. Let's go over that +//! next. +//! +//! # for Loops and IntoIterator +//! +//! Rust's `for` loop syntax is actually sugar for iterators. Here's a basic +//! example of `for`: +//! +//! ``` +//! let values = vec![1, 2, 3, 4, 5]; +//! +//! for x in values { +//! println!("{}", x); +//! } +//! ``` +//! +//! This will print the numbers one through five, each on their own line. But +//! you'll notice something here: we never called anything on our vector to +//! produce an iterator. What gives? +//! +//! There's a trait in the standard library for converting something into an +//! iterator: [`IntoIterator`]. This trait has one method, [`into_iter()`], +//! which converts the thing implementing [`IntoIterator`] into an iterator. +//! Let's take a look at that `for` loop again, and what the compiler converts +//! it into: +//! +//! [`IntoIterator`]: trait.IntoIterator.html +//! [`into_iter()`]: trait.IntoIterator.html#tymethod.into_iter +//! +//! ``` +//! let values = vec![1, 2, 3, 4, 5]; +//! +//! for x in values { +//! println!("{}", x); +//! } +//! ``` +//! +//! Rust de-sugars this into: +//! +//! ``` +//! let values = vec![1, 2, 3, 4, 5]; +//! { +//! let result = match IntoIterator::into_iter(values) { +//! mut iter => loop { +//! match iter.next() { +//! Some(x) => { println!("{}", x); }, +//! None => break, +//! } +//! }, +//! }; +//! result +//! } +//! ``` +//! +//! First, we call `into_iter()` on the value. Then, we match on the iterator +//! that returns, calling [`next()`] over and over until we see a `None`. At +//! that point, we `break` out of the loop, and we're done iterating. +//! +//! There's one more subtle bit here: the standard library contains an +//! interesting implementation of [`IntoIterator`]: +//! +//! ```ignore +//! impl IntoIterator for I +//! ``` +//! +//! In other words, all [`Iterator`]s implement [`IntoIterator`], by just +//! returning themselves. This means two things: +//! +//! 1. If you're writing an [`Iterator`], you can use it with a `for` loop. +//! 2. If you're creating a collection, implementing [`IntoIterator`] for it +//! will allow your collection to be used with the `for` loop. +//! +//! # Adapters +//! +//! Functions which take an [`Iterator`] and return another [`Iterator`] are +//! often called 'iterator adapters', as they're a form of the 'adapter +//! pattern'. +//! +//! Common iterator adapters include [`map()`], [`take()`], and [`filter()`]. +//! For more, see their documentation. +//! +//! [`map()`]: trait.Iterator.html#method.map +//! [`take()`]: trait.Iterator.html#method.take +//! [`filter()`]: trait.Iterator.html#method.filter +//! +//! # Laziness +//! +//! Iterators (and iterator [adapters](#adapters)) are *lazy*. This means that +//! just creating an iterator doesn't _do_ a whole lot. Nothing really happens +//! until you call [`next()`]. This is sometimes a source of confusion when +//! creating an iterator solely for its side effects. For example, the [`map()`] +//! method calls a closure on each element it iterates over: +//! +//! ``` +//! # #![allow(unused_must_use)] +//! let v = vec![1, 2, 3, 4, 5]; +//! v.iter().map(|x| println!("{}", x)); +//! ``` +//! +//! This will not print any values, as we only created an iterator, rather than +//! using it. The compiler will warn us about this kind of behavior: +//! +//! ```text +//! warning: unused result which must be used: iterator adaptors are lazy and +//! do nothing unless consumed +//! ``` +//! +//! The idiomatic way to write a [`map()`] for its side effects is to use a +//! `for` loop instead: +//! +//! ``` +//! let v = vec![1, 2, 3, 4, 5]; +//! +//! for x in &v { +//! println!("{}", x); +//! } +//! ``` +//! +//! [`map()`]: trait.Iterator.html#method.map +//! +//! The two most common ways to evaluate an iterator are to use a `for` loop +//! like this, or using the [`collect()`] method to produce a new collection. +//! +//! [`collect()`]: trait.Iterator.html#method.collect +//! +//! # Infinity +//! +//! Iterators do not have to be finite. As an example, an open-ended range is +//! an infinite iterator: +//! +//! ``` +//! let numbers = 0..; +//! ``` +//! +//! It is common to use the [`take()`] iterator adapter to turn an infinite +//! iterator into a finite one: +//! +//! ``` +//! let numbers = 0..; +//! let five_numbers = numbers.take(5); +//! +//! for number in five_numbers { +//! println!("{}", number); +//! } +//! ``` +//! +//! This will print the numbers `0` through `4`, each on their own line. +//! +//! [`take()`]: trait.Iterator.html#method.take + +#![stable(feature = "rust1", since = "1.0.0")] + +use cmp; +use fmt; +use iter_private::TrustedRandomAccess; +use usize; + +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::iterator::Iterator; + +#[unstable(feature = "step_trait", + reason = "likely to be replaced by finer-grained traits", + issue = "27741")] +pub use self::range::Step; +#[unstable(feature = "step_by", reason = "recent addition", + issue = "27741")] +pub use self::range::StepBy; + +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::sources::{Repeat, repeat}; +#[stable(feature = "iter_empty", since = "1.2.0")] +pub use self::sources::{Empty, empty}; +#[stable(feature = "iter_once", since = "1.2.0")] +pub use self::sources::{Once, once}; + +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::traits::{FromIterator, IntoIterator, DoubleEndedIterator, Extend}; +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::traits::{ExactSizeIterator, Sum, Product}; +#[unstable(feature = "fused", issue = "35602")] +pub use self::traits::FusedIterator; +#[unstable(feature = "trusted_len", issue = "37572")] +pub use self::traits::TrustedLen; + +mod iterator; +mod range; +mod sources; +mod traits; + +/// An double-ended iterator with the direction inverted. +/// +/// This `struct` is created by the [`rev()`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`rev()`]: trait.Iterator.html#method.rev +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Rev { + iter: T +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Rev where I: DoubleEndedIterator { + type Item = ::Item; + + #[inline] + fn next(&mut self) -> Option<::Item> { self.iter.next_back() } + #[inline] + fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Rev where I: DoubleEndedIterator { + #[inline] + fn next_back(&mut self) -> Option<::Item> { self.iter.next() } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Rev + where I: ExactSizeIterator + DoubleEndedIterator +{ + fn len(&self) -> usize { + self.iter.len() + } + + fn is_empty(&self) -> bool { + self.iter.is_empty() + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Rev + where I: FusedIterator + DoubleEndedIterator {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Rev + where I: TrustedLen + DoubleEndedIterator {} + +/// An iterator that clones the elements of an underlying iterator. +/// +/// This `struct` is created by the [`cloned()`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`cloned()`]: trait.Iterator.html#method.cloned +/// [`Iterator`]: trait.Iterator.html +#[stable(feature = "iter_cloned", since = "1.1.0")] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[derive(Clone, Debug)] +pub struct Cloned { + it: I, +} + +#[stable(feature = "iter_cloned", since = "1.1.0")] +impl<'a, I, T: 'a> Iterator for Cloned + where I: Iterator, T: Clone +{ + type Item = T; + + fn next(&mut self) -> Option { + self.it.next().cloned() + } + + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } + + fn fold(self, init: Acc, mut f: F) -> Acc + where F: FnMut(Acc, Self::Item) -> Acc, + { + self.it.fold(init, move |acc, elt| f(acc, elt.clone())) + } +} + +#[stable(feature = "iter_cloned", since = "1.1.0")] +impl<'a, I, T: 'a> DoubleEndedIterator for Cloned + where I: DoubleEndedIterator, T: Clone +{ + fn next_back(&mut self) -> Option { + self.it.next_back().cloned() + } +} + +#[stable(feature = "iter_cloned", since = "1.1.0")] +impl<'a, I, T: 'a> ExactSizeIterator for Cloned + where I: ExactSizeIterator, T: Clone +{ + fn len(&self) -> usize { + self.it.len() + } + + fn is_empty(&self) -> bool { + self.it.is_empty() + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl<'a, I, T: 'a> FusedIterator for Cloned + where I: FusedIterator, T: Clone +{} + +#[doc(hidden)] +unsafe impl<'a, I, T: 'a> TrustedRandomAccess for Cloned + where I: TrustedRandomAccess, T: Clone +{ + unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item { + self.it.get_unchecked(i).clone() + } + + #[inline] + fn may_have_side_effect() -> bool { true } +} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, I, T: 'a> TrustedLen for Cloned + where I: TrustedLen, + T: Clone +{} + +/// An iterator that repeats endlessly. +/// +/// This `struct` is created by the [`cycle()`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`cycle()`]: trait.Iterator.html#method.cycle +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Cycle { + orig: I, + iter: I, +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Cycle where I: Clone + Iterator { + type Item = ::Item; + + #[inline] + fn next(&mut self) -> Option<::Item> { + match self.iter.next() { + None => { self.iter = self.orig.clone(); self.iter.next() } + y => y + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + // the cycle iterator is either empty or infinite + match self.orig.size_hint() { + sz @ (0, Some(0)) => sz, + (0, _) => (0, None), + _ => (usize::MAX, None) + } + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Cycle where I: Clone + Iterator {} + +/// An iterator that strings two iterators together. +/// +/// This `struct` is created by the [`chain()`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`chain()`]: trait.Iterator.html#method.chain +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Chain { + a: A, + b: B, + state: ChainState, +} + +// The iterator protocol specifies that iteration ends with the return value +// `None` from `.next()` (or `.next_back()`) and it is unspecified what +// further calls return. The chain adaptor must account for this since it uses +// two subiterators. +// +// It uses three states: +// +// - Both: `a` and `b` are remaining +// - Front: `a` remaining +// - Back: `b` remaining +// +// The fourth state (neither iterator is remaining) only occurs after Chain has +// returned None once, so we don't need to store this state. +#[derive(Clone, Debug)] +enum ChainState { + // both front and back iterator are remaining + Both, + // only front is remaining + Front, + // only back is remaining + Back, +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Chain where + A: Iterator, + B: Iterator +{ + type Item = A::Item; + + #[inline] + fn next(&mut self) -> Option { + match self.state { + ChainState::Both => match self.a.next() { + elt @ Some(..) => elt, + None => { + self.state = ChainState::Back; + self.b.next() + } + }, + ChainState::Front => self.a.next(), + ChainState::Back => self.b.next(), + } + } + + #[inline] + #[rustc_inherit_overflow_checks] + fn count(self) -> usize { + match self.state { + ChainState::Both => self.a.count() + self.b.count(), + ChainState::Front => self.a.count(), + ChainState::Back => self.b.count(), + } + } + + fn fold(self, init: Acc, mut f: F) -> Acc + where F: FnMut(Acc, Self::Item) -> Acc, + { + let mut accum = init; + match self.state { + ChainState::Both | ChainState::Front => { + accum = self.a.fold(accum, &mut f); + } + _ => { } + } + match self.state { + ChainState::Both | ChainState::Back => { + accum = self.b.fold(accum, &mut f); + } + _ => { } + } + accum + } + + #[inline] + fn nth(&mut self, mut n: usize) -> Option { + match self.state { + ChainState::Both | ChainState::Front => { + for x in self.a.by_ref() { + if n == 0 { + return Some(x) + } + n -= 1; + } + if let ChainState::Both = self.state { + self.state = ChainState::Back; + } + } + ChainState::Back => {} + } + if let ChainState::Back = self.state { + self.b.nth(n) + } else { + None + } + } + + #[inline] + fn find

(&mut self, mut predicate: P) -> Option where + P: FnMut(&Self::Item) -> bool, + { + match self.state { + ChainState::Both => match self.a.find(&mut predicate) { + None => { + self.state = ChainState::Back; + self.b.find(predicate) + } + v => v + }, + ChainState::Front => self.a.find(predicate), + ChainState::Back => self.b.find(predicate), + } + } + + #[inline] + fn last(self) -> Option { + match self.state { + ChainState::Both => { + // Must exhaust a before b. + let a_last = self.a.last(); + let b_last = self.b.last(); + b_last.or(a_last) + }, + ChainState::Front => self.a.last(), + ChainState::Back => self.b.last() + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (a_lower, a_upper) = self.a.size_hint(); + let (b_lower, b_upper) = self.b.size_hint(); + + let lower = a_lower.saturating_add(b_lower); + + let upper = match (a_upper, b_upper) { + (Some(x), Some(y)) => x.checked_add(y), + _ => None + }; + + (lower, upper) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Chain where + A: DoubleEndedIterator, + B: DoubleEndedIterator, +{ + #[inline] + fn next_back(&mut self) -> Option { + match self.state { + ChainState::Both => match self.b.next_back() { + elt @ Some(..) => elt, + None => { + self.state = ChainState::Front; + self.a.next_back() + } + }, + ChainState::Front => self.a.next_back(), + ChainState::Back => self.b.next_back(), + } + } +} + +// Note: *both* must be fused to handle double-ended iterators. +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Chain + where A: FusedIterator, + B: FusedIterator, +{} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Chain + where A: TrustedLen, B: TrustedLen, +{} + +/// An iterator that iterates two other iterators simultaneously. +/// +/// This `struct` is created by the [`zip()`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`zip()`]: trait.Iterator.html#method.zip +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Zip { + a: A, + b: B, + // index and len are only used by the specialized version of zip + index: usize, + len: usize, +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Zip where A: Iterator, B: Iterator +{ + type Item = (A::Item, B::Item); + + #[inline] + fn next(&mut self) -> Option { + ZipImpl::next(self) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + ZipImpl::size_hint(self) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Zip where + A: DoubleEndedIterator + ExactSizeIterator, + B: DoubleEndedIterator + ExactSizeIterator, +{ + #[inline] + fn next_back(&mut self) -> Option<(A::Item, B::Item)> { + ZipImpl::next_back(self) + } +} + +// Zip specialization trait +#[doc(hidden)] +trait ZipImpl { + type Item; + fn new(a: A, b: B) -> Self; + fn next(&mut self) -> Option; + fn size_hint(&self) -> (usize, Option); + fn next_back(&mut self) -> Option + where A: DoubleEndedIterator + ExactSizeIterator, + B: DoubleEndedIterator + ExactSizeIterator; +} + +// General Zip impl +#[doc(hidden)] +impl ZipImpl for Zip + where A: Iterator, B: Iterator +{ + type Item = (A::Item, B::Item); + default fn new(a: A, b: B) -> Self { + Zip { + a: a, + b: b, + index: 0, // unused + len: 0, // unused + } + } + + #[inline] + default fn next(&mut self) -> Option<(A::Item, B::Item)> { + self.a.next().and_then(|x| { + self.b.next().and_then(|y| { + Some((x, y)) + }) + }) + } + + #[inline] + default fn next_back(&mut self) -> Option<(A::Item, B::Item)> + where A: DoubleEndedIterator + ExactSizeIterator, + B: DoubleEndedIterator + ExactSizeIterator + { + let a_sz = self.a.len(); + let b_sz = self.b.len(); + if a_sz != b_sz { + // Adjust a, b to equal length + if a_sz > b_sz { + for _ in 0..a_sz - b_sz { self.a.next_back(); } + } else { + for _ in 0..b_sz - a_sz { self.b.next_back(); } + } + } + match (self.a.next_back(), self.b.next_back()) { + (Some(x), Some(y)) => Some((x, y)), + (None, None) => None, + _ => unreachable!(), + } + } + + #[inline] + default fn size_hint(&self) -> (usize, Option) { + let (a_lower, a_upper) = self.a.size_hint(); + let (b_lower, b_upper) = self.b.size_hint(); + + let lower = cmp::min(a_lower, b_lower); + + let upper = match (a_upper, b_upper) { + (Some(x), Some(y)) => Some(cmp::min(x,y)), + (Some(x), None) => Some(x), + (None, Some(y)) => Some(y), + (None, None) => None + }; + + (lower, upper) + } +} + +#[doc(hidden)] +impl ZipImpl for Zip + where A: TrustedRandomAccess, B: TrustedRandomAccess +{ + fn new(a: A, b: B) -> Self { + let len = cmp::min(a.len(), b.len()); + Zip { + a: a, + b: b, + index: 0, + len: len, + } + } + + #[inline] + fn next(&mut self) -> Option<(A::Item, B::Item)> { + if self.index < self.len { + let i = self.index; + self.index += 1; + unsafe { + Some((self.a.get_unchecked(i), self.b.get_unchecked(i))) + } + } else if A::may_have_side_effect() && self.index < self.a.len() { + // match the base implementation's potential side effects + unsafe { + self.a.get_unchecked(self.index); + } + self.index += 1; + None + } else { + None + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let len = self.len - self.index; + (len, Some(len)) + } + + #[inline] + fn next_back(&mut self) -> Option<(A::Item, B::Item)> + where A: DoubleEndedIterator + ExactSizeIterator, + B: DoubleEndedIterator + ExactSizeIterator + { + // Adjust a, b to equal length + if A::may_have_side_effect() { + let sz = self.a.len(); + if sz > self.len { + for _ in 0..sz - cmp::max(self.len, self.index) { + self.a.next_back(); + } + } + } + if B::may_have_side_effect() { + let sz = self.b.len(); + if sz > self.len { + for _ in 0..sz - self.len { + self.b.next_back(); + } + } + } + if self.index < self.len { + self.len -= 1; + let i = self.len; + unsafe { + Some((self.a.get_unchecked(i), self.b.get_unchecked(i))) + } + } else { + None + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Zip + where A: ExactSizeIterator, B: ExactSizeIterator {} + +#[doc(hidden)] +unsafe impl TrustedRandomAccess for Zip + where A: TrustedRandomAccess, + B: TrustedRandomAccess, +{ + unsafe fn get_unchecked(&mut self, i: usize) -> (A::Item, B::Item) { + (self.a.get_unchecked(i), self.b.get_unchecked(i)) + } + + fn may_have_side_effect() -> bool { + A::may_have_side_effect() || B::may_have_side_effect() + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Zip + where A: FusedIterator, B: FusedIterator, {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Zip + where A: TrustedLen, B: TrustedLen, +{} + +/// An iterator that maps the values of `iter` with `f`. +/// +/// This `struct` is created by the [`map()`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`map()`]: trait.Iterator.html#method.map +/// [`Iterator`]: trait.Iterator.html +/// +/// # Notes about side effects +/// +/// The [`map()`] iterator implements [`DoubleEndedIterator`], meaning that +/// you can also [`map()`] backwards: +/// +/// ```rust +/// let v: Vec = vec![1, 2, 3].into_iter().map(|x| x + 1).rev().collect(); +/// +/// assert_eq!(v, [4, 3, 2]); +/// ``` +/// +/// [`DoubleEndedIterator`]: trait.DoubleEndedIterator.html +/// +/// But if your closure has state, iterating backwards may act in a way you do +/// not expect. Let's go through an example. First, in the forward direction: +/// +/// ```rust +/// let mut c = 0; +/// +/// for pair in vec!['a', 'b', 'c'].into_iter() +/// .map(|letter| { c += 1; (letter, c) }) { +/// println!("{:?}", pair); +/// } +/// ``` +/// +/// This will print "('a', 1), ('b', 2), ('c', 3)". +/// +/// Now consider this twist where we add a call to `rev`. This version will +/// print `('c', 1), ('b', 2), ('a', 3)`. Note that the letters are reversed, +/// but the values of the counter still go in order. This is because `map()` is +/// still being called lazilly on each item, but we are popping items off the +/// back of the vector now, instead of shifting them from the front. +/// +/// ```rust +/// let mut c = 0; +/// +/// for pair in vec!['a', 'b', 'c'].into_iter() +/// .map(|letter| { c += 1; (letter, c) }) +/// .rev() { +/// println!("{:?}", pair); +/// } +/// ``` +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct Map { + iter: I, + f: F, +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for Map { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Map") + .field("iter", &self.iter) + .finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Map where F: FnMut(I::Item) -> B { + type Item = B; + + #[inline] + fn next(&mut self) -> Option { + self.iter.next().map(&mut self.f) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + fn fold(self, init: Acc, mut g: G) -> Acc + where G: FnMut(Acc, Self::Item) -> Acc, + { + let mut f = self.f; + self.iter.fold(init, move |acc, elt| g(acc, f(elt))) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Map where + F: FnMut(I::Item) -> B, +{ + #[inline] + fn next_back(&mut self) -> Option { + self.iter.next_back().map(&mut self.f) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Map + where F: FnMut(I::Item) -> B +{ + fn len(&self) -> usize { + self.iter.len() + } + + fn is_empty(&self) -> bool { + self.iter.is_empty() + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Map + where F: FnMut(I::Item) -> B {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Map + where I: TrustedLen, + F: FnMut(I::Item) -> B {} + +#[doc(hidden)] +unsafe impl TrustedRandomAccess for Map + where I: TrustedRandomAccess, + F: FnMut(I::Item) -> B, +{ + unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item { + (self.f)(self.iter.get_unchecked(i)) + } + #[inline] + fn may_have_side_effect() -> bool { true } +} + +/// An iterator that filters the elements of `iter` with `predicate`. +/// +/// This `struct` is created by the [`filter()`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`filter()`]: trait.Iterator.html#method.filter +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct Filter { + iter: I, + predicate: P, +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for Filter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Filter") + .field("iter", &self.iter) + .finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Filter where P: FnMut(&I::Item) -> bool { + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + for x in self.iter.by_ref() { + if (self.predicate)(&x) { + return Some(x); + } + } + None + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) // can't know a lower bound, due to the predicate + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Filter + where P: FnMut(&I::Item) -> bool, +{ + #[inline] + fn next_back(&mut self) -> Option { + for x in self.iter.by_ref().rev() { + if (self.predicate)(&x) { + return Some(x); + } + } + None + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Filter + where P: FnMut(&I::Item) -> bool {} + +/// An iterator that uses `f` to both filter and map elements from `iter`. +/// +/// This `struct` is created by the [`filter_map()`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`filter_map()`]: trait.Iterator.html#method.filter_map +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct FilterMap { + iter: I, + f: F, +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for FilterMap { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("FilterMap") + .field("iter", &self.iter) + .finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for FilterMap + where F: FnMut(I::Item) -> Option, +{ + type Item = B; + + #[inline] + fn next(&mut self) -> Option { + for x in self.iter.by_ref() { + if let Some(y) = (self.f)(x) { + return Some(y); + } + } + None + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) // can't know a lower bound, due to the predicate + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for FilterMap + where F: FnMut(I::Item) -> Option, +{ + #[inline] + fn next_back(&mut self) -> Option { + for x in self.iter.by_ref().rev() { + if let Some(y) = (self.f)(x) { + return Some(y); + } + } + None + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for FilterMap + where F: FnMut(I::Item) -> Option {} + +/// An iterator that yields the current count and the element during iteration. +/// +/// This `struct` is created by the [`enumerate()`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`enumerate()`]: trait.Iterator.html#method.enumerate +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Enumerate { + iter: I, + count: usize, +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Enumerate where I: Iterator { + type Item = (usize, ::Item); + + /// # Overflow Behavior + /// + /// The method does no guarding against overflows, so enumerating more than + /// `usize::MAX` elements either produces the wrong result or panics. If + /// debug assertions are enabled, a panic is guaranteed. + /// + /// # Panics + /// + /// Might panic if the index of the element overflows a `usize`. + #[inline] + #[rustc_inherit_overflow_checks] + fn next(&mut self) -> Option<(usize, ::Item)> { + self.iter.next().map(|a| { + let ret = (self.count, a); + // Possible undefined overflow. + self.count += 1; + ret + }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + #[inline] + #[rustc_inherit_overflow_checks] + fn nth(&mut self, n: usize) -> Option<(usize, I::Item)> { + self.iter.nth(n).map(|a| { + let i = self.count + n; + self.count = i + 1; + (i, a) + }) + } + + #[inline] + fn count(self) -> usize { + self.iter.count() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Enumerate where + I: ExactSizeIterator + DoubleEndedIterator +{ + #[inline] + fn next_back(&mut self) -> Option<(usize, ::Item)> { + self.iter.next_back().map(|a| { + let len = self.iter.len(); + // Can safely add, `ExactSizeIterator` promises that the number of + // elements fits into a `usize`. + (self.count + len, a) + }) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Enumerate where I: ExactSizeIterator { + fn len(&self) -> usize { + self.iter.len() + } + + fn is_empty(&self) -> bool { + self.iter.is_empty() + } +} + +#[doc(hidden)] +unsafe impl TrustedRandomAccess for Enumerate + where I: TrustedRandomAccess +{ + unsafe fn get_unchecked(&mut self, i: usize) -> (usize, I::Item) { + (self.count + i, self.iter.get_unchecked(i)) + } + + fn may_have_side_effect() -> bool { + I::may_have_side_effect() + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Enumerate where I: FusedIterator {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Enumerate + where I: TrustedLen, +{} + + +/// An iterator with a `peek()` that returns an optional reference to the next +/// element. +/// +/// This `struct` is created by the [`peekable()`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`peekable()`]: trait.Iterator.html#method.peekable +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Peekable { + iter: I, + /// Remember a peeked value, even if it was None. + peeked: Option>, +} + +// Peekable must remember if a None has been seen in the `.peek()` method. +// It ensures that `.peek(); .peek();` or `.peek(); .next();` only advances the +// underlying iterator at most once. This does not by itself make the iterator +// fused. +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Peekable { + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + match self.peeked.take() { + Some(v) => v, + None => self.iter.next(), + } + } + + #[inline] + #[rustc_inherit_overflow_checks] + fn count(mut self) -> usize { + match self.peeked.take() { + Some(None) => 0, + Some(Some(_)) => 1 + self.iter.count(), + None => self.iter.count(), + } + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + match self.peeked.take() { + // the .take() below is just to avoid "move into pattern guard" + Some(ref mut v) if n == 0 => v.take(), + Some(None) => None, + Some(Some(_)) => self.iter.nth(n - 1), + None => self.iter.nth(n), + } + } + + #[inline] + fn last(mut self) -> Option { + let peek_opt = match self.peeked.take() { + Some(None) => return None, + Some(v) => v, + None => None, + }; + self.iter.last().or(peek_opt) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let peek_len = match self.peeked { + Some(None) => return (0, Some(0)), + Some(Some(_)) => 1, + None => 0, + }; + let (lo, hi) = self.iter.size_hint(); + let lo = lo.saturating_add(peek_len); + let hi = hi.and_then(|x| x.checked_add(peek_len)); + (lo, hi) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Peekable {} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Peekable {} + +impl Peekable { + /// Returns a reference to the next() value without advancing the iterator. + /// + /// Like [`next()`], if there is a value, it is wrapped in a `Some(T)`. + /// But if the iteration is over, `None` is returned. + /// + /// [`next()`]: trait.Iterator.html#tymethod.next + /// + /// Because `peek()` returns a reference, and many iterators iterate over + /// references, there can be a possibly confusing situation where the + /// return value is a double reference. You can see this effect in the + /// examples below. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let xs = [1, 2, 3]; + /// + /// let mut iter = xs.iter().peekable(); + /// + /// // peek() lets us see into the future + /// assert_eq!(iter.peek(), Some(&&1)); + /// assert_eq!(iter.next(), Some(&1)); + /// + /// assert_eq!(iter.next(), Some(&2)); + /// + /// // The iterator does not advance even if we `peek` multiple times + /// assert_eq!(iter.peek(), Some(&&3)); + /// assert_eq!(iter.peek(), Some(&&3)); + /// + /// assert_eq!(iter.next(), Some(&3)); + /// + /// // After the iterator is finished, so is `peek()` + /// assert_eq!(iter.peek(), None); + /// assert_eq!(iter.next(), None); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + pub fn peek(&mut self) -> Option<&I::Item> { + if self.peeked.is_none() { + self.peeked = Some(self.iter.next()); + } + match self.peeked { + Some(Some(ref value)) => Some(value), + Some(None) => None, + _ => unreachable!(), + } + } +} + +/// An iterator that rejects elements while `predicate` is true. +/// +/// This `struct` is created by the [`skip_while()`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`skip_while()`]: trait.Iterator.html#method.skip_while +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct SkipWhile { + iter: I, + flag: bool, + predicate: P, +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for SkipWhile { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SkipWhile") + .field("iter", &self.iter) + .field("flag", &self.flag) + .finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for SkipWhile + where P: FnMut(&I::Item) -> bool +{ + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + for x in self.iter.by_ref() { + if self.flag || !(self.predicate)(&x) { + self.flag = true; + return Some(x); + } + } + None + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) // can't know a lower bound, due to the predicate + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for SkipWhile + where I: FusedIterator, P: FnMut(&I::Item) -> bool {} + +/// An iterator that only accepts elements while `predicate` is true. +/// +/// This `struct` is created by the [`take_while()`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`take_while()`]: trait.Iterator.html#method.take_while +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct TakeWhile { + iter: I, + flag: bool, + predicate: P, +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for TakeWhile { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("TakeWhile") + .field("iter", &self.iter) + .field("flag", &self.flag) + .finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for TakeWhile + where P: FnMut(&I::Item) -> bool +{ + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + if self.flag { + None + } else { + self.iter.next().and_then(|x| { + if (self.predicate)(&x) { + Some(x) + } else { + self.flag = true; + None + } + }) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) // can't know a lower bound, due to the predicate + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for TakeWhile + where I: FusedIterator, P: FnMut(&I::Item) -> bool {} + +/// An iterator that skips over `n` elements of `iter`. +/// +/// This `struct` is created by the [`skip()`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`skip()`]: trait.Iterator.html#method.skip +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Skip { + iter: I, + n: usize +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Skip where I: Iterator { + type Item = ::Item; + + #[inline] + fn next(&mut self) -> Option { + if self.n == 0 { + self.iter.next() + } else { + let old_n = self.n; + self.n = 0; + self.iter.nth(old_n) + } + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + // Can't just add n + self.n due to overflow. + if self.n == 0 { + self.iter.nth(n) + } else { + let to_skip = self.n; + self.n = 0; + // nth(n) skips n+1 + if self.iter.nth(to_skip-1).is_none() { + return None; + } + self.iter.nth(n) + } + } + + #[inline] + fn count(self) -> usize { + self.iter.count().saturating_sub(self.n) + } + + #[inline] + fn last(mut self) -> Option { + if self.n == 0 { + self.iter.last() + } else { + let next = self.next(); + if next.is_some() { + // recurse. n should be 0. + self.last().or(next) + } else { + None + } + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (lower, upper) = self.iter.size_hint(); + + let lower = lower.saturating_sub(self.n); + let upper = upper.map(|x| x.saturating_sub(self.n)); + + (lower, upper) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Skip where I: ExactSizeIterator {} + +#[stable(feature = "double_ended_skip_iterator", since = "1.8.0")] +impl DoubleEndedIterator for Skip where I: DoubleEndedIterator + ExactSizeIterator { + fn next_back(&mut self) -> Option { + if self.len() > 0 { + self.iter.next_back() + } else { + None + } + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Skip where I: FusedIterator {} + +/// An iterator that only iterates over the first `n` iterations of `iter`. +/// +/// This `struct` is created by the [`take()`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`take()`]: trait.Iterator.html#method.take +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Take { + iter: I, + n: usize +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Take where I: Iterator{ + type Item = ::Item; + + #[inline] + fn next(&mut self) -> Option<::Item> { + if self.n != 0 { + self.n -= 1; + self.iter.next() + } else { + None + } + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + if self.n > n { + self.n -= n + 1; + self.iter.nth(n) + } else { + if self.n > 0 { + self.iter.nth(self.n - 1); + self.n = 0; + } + None + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (lower, upper) = self.iter.size_hint(); + + let lower = cmp::min(lower, self.n); + + let upper = match upper { + Some(x) if x < self.n => Some(x), + _ => Some(self.n) + }; + + (lower, upper) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Take where I: ExactSizeIterator {} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Take where I: FusedIterator {} + +/// An iterator to maintain state while iterating another iterator. +/// +/// This `struct` is created by the [`scan()`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`scan()`]: trait.Iterator.html#method.scan +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct Scan { + iter: I, + f: F, + state: St, +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for Scan { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Scan") + .field("iter", &self.iter) + .field("state", &self.state) + .finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Scan where + I: Iterator, + F: FnMut(&mut St, I::Item) -> Option, +{ + type Item = B; + + #[inline] + fn next(&mut self) -> Option { + self.iter.next().and_then(|a| (self.f)(&mut self.state, a)) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) // can't know a lower bound, due to the scan function + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Scan + where I: FusedIterator, F: FnMut(&mut St, I::Item) -> Option {} + +/// An iterator that maps each element to an iterator, and yields the elements +/// of the produced iterators. +/// +/// This `struct` is created by the [`flat_map()`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`flat_map()`]: trait.Iterator.html#method.flat_map +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct FlatMap { + iter: I, + f: F, + frontiter: Option, + backiter: Option, +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for FlatMap + where U::IntoIter: fmt::Debug +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("FlatMap") + .field("iter", &self.iter) + .field("frontiter", &self.frontiter) + .field("backiter", &self.backiter) + .finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for FlatMap + where F: FnMut(I::Item) -> U, +{ + type Item = U::Item; + + #[inline] + fn next(&mut self) -> Option { + loop { + if let Some(ref mut inner) = self.frontiter { + if let Some(x) = inner.by_ref().next() { + return Some(x) + } + } + match self.iter.next().map(&mut self.f) { + None => return self.backiter.as_mut().and_then(|it| it.next()), + next => self.frontiter = next.map(IntoIterator::into_iter), + } + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (flo, fhi) = self.frontiter.as_ref().map_or((0, Some(0)), |it| it.size_hint()); + let (blo, bhi) = self.backiter.as_ref().map_or((0, Some(0)), |it| it.size_hint()); + let lo = flo.saturating_add(blo); + match (self.iter.size_hint(), fhi, bhi) { + ((0, Some(0)), Some(a), Some(b)) => (lo, a.checked_add(b)), + _ => (lo, None) + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for FlatMap where + F: FnMut(I::Item) -> U, + U: IntoIterator, + U::IntoIter: DoubleEndedIterator +{ + #[inline] + fn next_back(&mut self) -> Option { + loop { + if let Some(ref mut inner) = self.backiter { + if let Some(y) = inner.next_back() { + return Some(y) + } + } + match self.iter.next_back().map(&mut self.f) { + None => return self.frontiter.as_mut().and_then(|it| it.next_back()), + next => self.backiter = next.map(IntoIterator::into_iter), + } + } + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for FlatMap + where I: FusedIterator, U: IntoIterator, F: FnMut(I::Item) -> U {} + +/// An iterator that yields `None` forever after the underlying iterator +/// yields `None` once. +/// +/// This `struct` is created by the [`fuse()`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`fuse()`]: trait.Iterator.html#method.fuse +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Fuse { + iter: I, + done: bool +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Fuse where I: Iterator {} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Fuse where I: Iterator { + type Item = ::Item; + + #[inline] + default fn next(&mut self) -> Option<::Item> { + if self.done { + None + } else { + let next = self.iter.next(); + self.done = next.is_none(); + next + } + } + + #[inline] + default fn nth(&mut self, n: usize) -> Option { + if self.done { + None + } else { + let nth = self.iter.nth(n); + self.done = nth.is_none(); + nth + } + } + + #[inline] + default fn last(self) -> Option { + if self.done { + None + } else { + self.iter.last() + } + } + + #[inline] + default fn count(self) -> usize { + if self.done { + 0 + } else { + self.iter.count() + } + } + + #[inline] + default fn size_hint(&self) -> (usize, Option) { + if self.done { + (0, Some(0)) + } else { + self.iter.size_hint() + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Fuse where I: DoubleEndedIterator { + #[inline] + default fn next_back(&mut self) -> Option<::Item> { + if self.done { + None + } else { + let next = self.iter.next_back(); + self.done = next.is_none(); + next + } + } +} + +unsafe impl TrustedRandomAccess for Fuse + where I: TrustedRandomAccess, +{ + unsafe fn get_unchecked(&mut self, i: usize) -> I::Item { + self.iter.get_unchecked(i) + } + + fn may_have_side_effect() -> bool { + I::may_have_side_effect() + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl Iterator for Fuse where I: FusedIterator { + #[inline] + fn next(&mut self) -> Option<::Item> { + self.iter.next() + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + self.iter.nth(n) + } + + #[inline] + fn last(self) -> Option { + self.iter.last() + } + + #[inline] + fn count(self) -> usize { + self.iter.count() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +#[unstable(feature = "fused", reason = "recently added", issue = "35602")] +impl DoubleEndedIterator for Fuse + where I: DoubleEndedIterator + FusedIterator +{ + #[inline] + fn next_back(&mut self) -> Option<::Item> { + self.iter.next_back() + } +} + + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Fuse where I: ExactSizeIterator { + fn len(&self) -> usize { + self.iter.len() + } + + fn is_empty(&self) -> bool { + self.iter.is_empty() + } +} + +/// An iterator that calls a function with a reference to each element before +/// yielding it. +/// +/// This `struct` is created by the [`inspect()`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`inspect()`]: trait.Iterator.html#method.inspect +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct Inspect { + iter: I, + f: F, +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for Inspect { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Inspect") + .field("iter", &self.iter) + .finish() + } +} + +impl Inspect where F: FnMut(&I::Item) { + #[inline] + fn do_inspect(&mut self, elt: Option) -> Option { + if let Some(ref a) = elt { + (self.f)(a); + } + + elt + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Inspect where F: FnMut(&I::Item) { + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + let next = self.iter.next(); + self.do_inspect(next) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Inspect + where F: FnMut(&I::Item), +{ + #[inline] + fn next_back(&mut self) -> Option { + let next = self.iter.next_back(); + self.do_inspect(next) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Inspect + where F: FnMut(&I::Item) +{ + fn len(&self) -> usize { + self.iter.len() + } + + fn is_empty(&self) -> bool { + self.iter.is_empty() + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Inspect + where F: FnMut(&I::Item) {} diff --git a/src/libcore/iter/range.rs b/src/libcore/iter/range.rs new file mode 100644 index 0000000000000..e6f21d6c17ae0 --- /dev/null +++ b/src/libcore/iter/range.rs @@ -0,0 +1,678 @@ +// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use mem; +use ops::{self, Add, Sub}; +use usize; + +use super::{FusedIterator, TrustedLen}; + +/// Objects that can be stepped over in both directions. +/// +/// The `steps_between` function provides a way to efficiently compare +/// two `Step` objects. +#[unstable(feature = "step_trait", + reason = "likely to be replaced by finer-grained traits", + issue = "27741")] +pub trait Step: PartialOrd + Sized { + /// Steps `self` if possible. + fn step(&self, by: &Self) -> Option; + + /// Returns the number of steps between two step objects. The count is + /// inclusive of `start` and exclusive of `end`. + /// + /// Returns `None` if it is not possible to calculate `steps_between` + /// without overflow. + fn steps_between(start: &Self, end: &Self, by: &Self) -> Option; + + /// Same as `steps_between`, but with a `by` of 1 + fn steps_between_by_one(start: &Self, end: &Self) -> Option; + + /// Tests whether this step is negative or not (going backwards) + fn is_negative(&self) -> bool; + + /// Replaces this step with `1`, returning itself + fn replace_one(&mut self) -> Self; + + /// Replaces this step with `0`, returning itself + fn replace_zero(&mut self) -> Self; + + /// Adds one to this step, returning the result + fn add_one(&self) -> Self; + + /// Subtracts one to this step, returning the result + fn sub_one(&self) -> Self; +} + +macro_rules! step_impl_unsigned { + ($($t:ty)*) => ($( + #[unstable(feature = "step_trait", + reason = "likely to be replaced by finer-grained traits", + issue = "27741")] + impl Step for $t { + #[inline] + fn step(&self, by: &$t) -> Option<$t> { + (*self).checked_add(*by) + } + #[inline] + #[allow(trivial_numeric_casts)] + fn steps_between(start: &$t, end: &$t, by: &$t) -> Option { + if *by == 0 { return None; } + if *start < *end { + // Note: We assume $t <= usize here + let diff = (*end - *start) as usize; + let by = *by as usize; + if diff % by > 0 { + Some(diff / by + 1) + } else { + Some(diff / by) + } + } else { + Some(0) + } + } + + #[inline] + fn is_negative(&self) -> bool { + false + } + + #[inline] + fn replace_one(&mut self) -> Self { + mem::replace(self, 0) + } + + #[inline] + fn replace_zero(&mut self) -> Self { + mem::replace(self, 1) + } + + #[inline] + fn add_one(&self) -> Self { + Add::add(*self, 1) + } + + #[inline] + fn sub_one(&self) -> Self { + Sub::sub(*self, 1) + } + + #[inline] + fn steps_between_by_one(start: &Self, end: &Self) -> Option { + Self::steps_between(start, end, &1) + } + } + )*) +} +macro_rules! step_impl_signed { + ($($t:ty)*) => ($( + #[unstable(feature = "step_trait", + reason = "likely to be replaced by finer-grained traits", + issue = "27741")] + impl Step for $t { + #[inline] + fn step(&self, by: &$t) -> Option<$t> { + (*self).checked_add(*by) + } + #[inline] + #[allow(trivial_numeric_casts)] + fn steps_between(start: &$t, end: &$t, by: &$t) -> Option { + if *by == 0 { return None; } + let diff: usize; + let by_u: usize; + if *by > 0 { + if *start >= *end { + return Some(0); + } + // Note: We assume $t <= isize here + // Use .wrapping_sub and cast to usize to compute the + // difference that may not fit inside the range of isize. + diff = (*end as isize).wrapping_sub(*start as isize) as usize; + by_u = *by as usize; + } else { + if *start <= *end { + return Some(0); + } + diff = (*start as isize).wrapping_sub(*end as isize) as usize; + by_u = (*by as isize).wrapping_mul(-1) as usize; + } + if diff % by_u > 0 { + Some(diff / by_u + 1) + } else { + Some(diff / by_u) + } + } + + #[inline] + fn is_negative(&self) -> bool { + *self < 0 + } + + #[inline] + fn replace_one(&mut self) -> Self { + mem::replace(self, 0) + } + + #[inline] + fn replace_zero(&mut self) -> Self { + mem::replace(self, 1) + } + + #[inline] + fn add_one(&self) -> Self { + Add::add(*self, 1) + } + + #[inline] + fn sub_one(&self) -> Self { + Sub::sub(*self, 1) + } + + #[inline] + fn steps_between_by_one(start: &Self, end: &Self) -> Option { + Self::steps_between(start, end, &1) + } + } + )*) +} + +macro_rules! step_impl_no_between { + ($($t:ty)*) => ($( + #[unstable(feature = "step_trait", + reason = "likely to be replaced by finer-grained traits", + issue = "27741")] + impl Step for $t { + #[inline] + fn step(&self, by: &$t) -> Option<$t> { + (*self).checked_add(*by) + } + #[inline] + fn steps_between(_a: &$t, _b: &$t, _by: &$t) -> Option { + None + } + + #[inline] + #[allow(unused_comparisons)] + fn is_negative(&self) -> bool { + *self < 0 + } + + #[inline] + fn replace_one(&mut self) -> Self { + mem::replace(self, 0) + } + + #[inline] + fn replace_zero(&mut self) -> Self { + mem::replace(self, 1) + } + + #[inline] + fn add_one(&self) -> Self { + Add::add(*self, 1) + } + + #[inline] + fn sub_one(&self) -> Self { + Sub::sub(*self, 1) + } + + #[inline] + fn steps_between_by_one(start: &Self, end: &Self) -> Option { + Self::steps_between(start, end, &1) + } + } + )*) +} + +step_impl_unsigned!(usize u8 u16 u32); +step_impl_signed!(isize i8 i16 i32); +#[cfg(target_pointer_width = "64")] +step_impl_unsigned!(u64); +#[cfg(target_pointer_width = "64")] +step_impl_signed!(i64); +// If the target pointer width is not 64-bits, we +// assume here that it is less than 64-bits. +#[cfg(not(target_pointer_width = "64"))] +step_impl_no_between!(u64 i64); + +/// An adapter for stepping range iterators by a custom amount. +/// +/// The resulting iterator handles overflow by stopping. The `A` +/// parameter is the type being iterated over, while `R` is the range +/// type (usually one of `std::ops::{Range, RangeFrom, RangeInclusive}`. +#[derive(Clone, Debug)] +#[unstable(feature = "step_by", reason = "recent addition", + issue = "27741")] +pub struct StepBy { + step_by: A, + range: R, +} + +impl ops::RangeFrom { + /// Creates an iterator starting at the same point, but stepping by + /// the given amount at each iteration. + /// + /// # Examples + /// + /// ``` + /// #![feature(step_by)] + /// fn main() { + /// let result: Vec<_> = (0..).step_by(2).take(5).collect(); + /// assert_eq!(result, vec![0, 2, 4, 6, 8]); + /// } + /// ``` + #[unstable(feature = "step_by", reason = "recent addition", + issue = "27741")] + pub fn step_by(self, by: A) -> StepBy { + StepBy { + step_by: by, + range: self + } + } +} + +impl ops::Range { + /// Creates an iterator with the same range, but stepping by the + /// given amount at each iteration. + /// + /// The resulting iterator handles overflow by stopping. + /// + /// # Examples + /// + /// ``` + /// #![feature(step_by)] + /// fn main() { + /// let result: Vec<_> = (0..10).step_by(2).collect(); + /// assert_eq!(result, vec![0, 2, 4, 6, 8]); + /// } + /// ``` + #[unstable(feature = "step_by", reason = "recent addition", + issue = "27741")] + pub fn step_by(self, by: A) -> StepBy { + StepBy { + step_by: by, + range: self + } + } +} + +impl ops::RangeInclusive { + /// Creates an iterator with the same range, but stepping by the + /// given amount at each iteration. + /// + /// The resulting iterator handles overflow by stopping. + /// + /// # Examples + /// + /// ``` + /// #![feature(step_by, inclusive_range_syntax)] + /// + /// let result: Vec<_> = (0...10).step_by(2).collect(); + /// assert_eq!(result, vec![0, 2, 4, 6, 8, 10]); + /// ``` + #[unstable(feature = "step_by", reason = "recent addition", + issue = "27741")] + pub fn step_by(self, by: A) -> StepBy { + StepBy { + step_by: by, + range: self + } + } +} + +#[unstable(feature = "step_by", reason = "recent addition", + issue = "27741")] +impl Iterator for StepBy> where + A: Clone, + for<'a> &'a A: Add<&'a A, Output = A> +{ + type Item = A; + + #[inline] + fn next(&mut self) -> Option { + let mut n = &self.range.start + &self.step_by; + mem::swap(&mut n, &mut self.range.start); + Some(n) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + (usize::MAX, None) // Too bad we can't specify an infinite lower bound + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for StepBy> + where A: Clone, for<'a> &'a A: Add<&'a A, Output = A> {} + +#[unstable(feature = "step_by", reason = "recent addition", + issue = "27741")] +impl Iterator for StepBy> { + type Item = A; + + #[inline] + fn next(&mut self) -> Option { + let rev = self.step_by.is_negative(); + if (rev && self.range.start > self.range.end) || + (!rev && self.range.start < self.range.end) + { + match self.range.start.step(&self.step_by) { + Some(mut n) => { + mem::swap(&mut self.range.start, &mut n); + Some(n) + }, + None => { + let mut n = self.range.end.clone(); + mem::swap(&mut self.range.start, &mut n); + Some(n) + } + } + } else { + None + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + match Step::steps_between(&self.range.start, + &self.range.end, + &self.step_by) { + Some(hint) => (hint, Some(hint)), + None => (0, None) + } + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for StepBy> {} + +#[unstable(feature = "inclusive_range", + reason = "recently added, follows RFC", + issue = "28237")] +impl Iterator for StepBy> { + type Item = A; + + #[inline] + fn next(&mut self) -> Option { + use ops::RangeInclusive::*; + + // this function has a sort of odd structure due to borrowck issues + // we may need to replace self.range, so borrows of start and end need to end early + + let (finishing, n) = match self.range { + Empty { .. } => return None, // empty iterators yield no values + + NonEmpty { ref mut start, ref mut end } => { + let rev = self.step_by.is_negative(); + + // march start towards (maybe past!) end and yield the old value + if (rev && start >= end) || + (!rev && start <= end) + { + match start.step(&self.step_by) { + Some(mut n) => { + mem::swap(start, &mut n); + (None, Some(n)) // yield old value, remain non-empty + }, + None => { + let mut n = end.clone(); + mem::swap(start, &mut n); + (None, Some(n)) // yield old value, remain non-empty + } + } + } else { + // found range in inconsistent state (start at or past end), so become empty + (Some(end.replace_zero()), None) + } + } + }; + + // turn into an empty iterator if we've reached the end + if let Some(end) = finishing { + self.range = Empty { at: end }; + } + + n + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + use ops::RangeInclusive::*; + + match self.range { + Empty { .. } => (0, Some(0)), + + NonEmpty { ref start, ref end } => + match Step::steps_between(start, + end, + &self.step_by) { + Some(hint) => (hint.saturating_add(1), hint.checked_add(1)), + None => (0, None) + } + } + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for StepBy> {} + +macro_rules! range_exact_iter_impl { + ($($t:ty)*) => ($( + #[stable(feature = "rust1", since = "1.0.0")] + impl ExactSizeIterator for ops::Range<$t> { } + )*) +} + +macro_rules! range_incl_exact_iter_impl { + ($($t:ty)*) => ($( + #[unstable(feature = "inclusive_range", + reason = "recently added, follows RFC", + issue = "28237")] + impl ExactSizeIterator for ops::RangeInclusive<$t> { } + )*) +} + +macro_rules! range_trusted_len_impl { + ($($t:ty)*) => ($( + #[unstable(feature = "trusted_len", issue = "37572")] + unsafe impl TrustedLen for ops::Range<$t> { } + )*) +} + +macro_rules! range_incl_trusted_len_impl { + ($($t:ty)*) => ($( + #[unstable(feature = "inclusive_range", + reason = "recently added, follows RFC", + issue = "28237")] + unsafe impl TrustedLen for ops::RangeInclusive<$t> { } + )*) +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for ops::Range where + for<'a> &'a A: Add<&'a A, Output = A> +{ + type Item = A; + + #[inline] + fn next(&mut self) -> Option { + if self.start < self.end { + let mut n = self.start.add_one(); + mem::swap(&mut n, &mut self.start); + Some(n) + } else { + None + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + match Step::steps_between_by_one(&self.start, &self.end) { + Some(hint) => (hint, Some(hint)), + None => (0, None) + } + } +} + +// These macros generate `ExactSizeIterator` impls for various range types. +// Range<{u,i}64> and RangeInclusive<{u,i}{32,64,size}> are excluded +// because they cannot guarantee having a length <= usize::MAX, which is +// required by ExactSizeIterator. +range_exact_iter_impl!(usize u8 u16 u32 isize i8 i16 i32); +range_incl_exact_iter_impl!(u8 u16 i8 i16); + +// These macros generate `TrustedLen` impls. +// +// They need to guarantee that .size_hint() is either exact, or that +// the upper bound is None when it does not fit the type limits. +range_trusted_len_impl!(usize isize u8 i8 u16 i16 u32 i32 i64 u64); +range_incl_trusted_len_impl!(usize isize u8 i8 u16 i16 u32 i32 i64 u64); + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for ops::Range where + for<'a> &'a A: Add<&'a A, Output = A>, + for<'a> &'a A: Sub<&'a A, Output = A> +{ + #[inline] + fn next_back(&mut self) -> Option { + if self.start < self.end { + self.end = self.end.sub_one(); + Some(self.end.clone()) + } else { + None + } + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for ops::Range + where A: Step, for<'a> &'a A: Add<&'a A, Output = A> {} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for ops::RangeFrom where + for<'a> &'a A: Add<&'a A, Output = A> +{ + type Item = A; + + #[inline] + fn next(&mut self) -> Option { + let mut n = self.start.add_one(); + mem::swap(&mut n, &mut self.start); + Some(n) + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for ops::RangeFrom + where A: Step, for<'a> &'a A: Add<&'a A, Output = A> {} + +#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +impl Iterator for ops::RangeInclusive where + for<'a> &'a A: Add<&'a A, Output = A> +{ + type Item = A; + + #[inline] + fn next(&mut self) -> Option { + use ops::RangeInclusive::*; + + // this function has a sort of odd structure due to borrowck issues + // we may need to replace self, so borrows of self.start and self.end need to end early + + let (finishing, n) = match *self { + Empty { .. } => (None, None), // empty iterators yield no values + + NonEmpty { ref mut start, ref mut end } => { + if start == end { + (Some(end.replace_one()), Some(start.replace_one())) + } else if start < end { + let mut n = start.add_one(); + mem::swap(&mut n, start); + + // if the iterator is done iterating, it will change from + // NonEmpty to Empty to avoid unnecessary drops or clones, + // we'll reuse either start or end (they are equal now, so + // it doesn't matter which) to pull out end, we need to swap + // something back in + + (if n == *end { Some(end.replace_one()) } else { None }, + // ^ are we done yet? + Some(n)) // < the value to output + } else { + (Some(start.replace_one()), None) + } + } + }; + + // turn into an empty iterator if this is the last value + if let Some(end) = finishing { + *self = Empty { at: end }; + } + + n + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + use ops::RangeInclusive::*; + + match *self { + Empty { .. } => (0, Some(0)), + + NonEmpty { ref start, ref end } => + match Step::steps_between_by_one(start, end) { + Some(hint) => (hint.saturating_add(1), hint.checked_add(1)), + None => (0, None), + } + } + } +} + +#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +impl DoubleEndedIterator for ops::RangeInclusive where + for<'a> &'a A: Add<&'a A, Output = A>, + for<'a> &'a A: Sub<&'a A, Output = A> +{ + #[inline] + fn next_back(&mut self) -> Option { + use ops::RangeInclusive::*; + + // see Iterator::next for comments + + let (finishing, n) = match *self { + Empty { .. } => return None, + + NonEmpty { ref mut start, ref mut end } => { + if start == end { + (Some(start.replace_one()), Some(end.replace_one())) + } else if start < end { + let mut n = end.sub_one(); + mem::swap(&mut n, end); + + (if n == *start { Some(start.replace_one()) } else { None }, + Some(n)) + } else { + (Some(end.replace_one()), None) + } + } + }; + + if let Some(start) = finishing { + *self = Empty { at: start }; + } + + n + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for ops::RangeInclusive + where A: Step, for<'a> &'a A: Add<&'a A, Output = A> {} diff --git a/src/libcore/iter/sources.rs b/src/libcore/iter/sources.rs new file mode 100644 index 0000000000000..da346eaf1db96 --- /dev/null +++ b/src/libcore/iter/sources.rs @@ -0,0 +1,276 @@ +// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use fmt; +use marker; +use usize; + +use super::FusedIterator; + +/// An iterator that repeats an element endlessly. +/// +/// This `struct` is created by the [`repeat()`] function. See its documentation for more. +/// +/// [`repeat()`]: fn.repeat.html +#[derive(Clone, Debug)] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Repeat { + element: A +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Repeat { + type Item = A; + + #[inline] + fn next(&mut self) -> Option { Some(self.element.clone()) } + #[inline] + fn size_hint(&self) -> (usize, Option) { (usize::MAX, None) } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Repeat { + #[inline] + fn next_back(&mut self) -> Option { Some(self.element.clone()) } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Repeat {} + +/// Creates a new iterator that endlessly repeats a single element. +/// +/// The `repeat()` function repeats a single value over and over and over and +/// over and over and 🔁. +/// +/// Infinite iterators like `repeat()` are often used with adapters like +/// [`take()`], in order to make them finite. +/// +/// [`take()`]: trait.Iterator.html#method.take +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::iter; +/// +/// // the number four 4ever: +/// let mut fours = iter::repeat(4); +/// +/// assert_eq!(Some(4), fours.next()); +/// assert_eq!(Some(4), fours.next()); +/// assert_eq!(Some(4), fours.next()); +/// assert_eq!(Some(4), fours.next()); +/// assert_eq!(Some(4), fours.next()); +/// +/// // yup, still four +/// assert_eq!(Some(4), fours.next()); +/// ``` +/// +/// Going finite with [`take()`]: +/// +/// ``` +/// use std::iter; +/// +/// // that last example was too many fours. Let's only have four fours. +/// let mut four_fours = iter::repeat(4).take(4); +/// +/// assert_eq!(Some(4), four_fours.next()); +/// assert_eq!(Some(4), four_fours.next()); +/// assert_eq!(Some(4), four_fours.next()); +/// assert_eq!(Some(4), four_fours.next()); +/// +/// // ... and now we're done +/// assert_eq!(None, four_fours.next()); +/// ``` +#[inline] +#[stable(feature = "rust1", since = "1.0.0")] +pub fn repeat(elt: T) -> Repeat { + Repeat{element: elt} +} + +/// An iterator that yields nothing. +/// +/// This `struct` is created by the [`empty()`] function. See its documentation for more. +/// +/// [`empty()`]: fn.empty.html +#[stable(feature = "iter_empty", since = "1.2.0")] +pub struct Empty(marker::PhantomData); + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for Empty { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.pad("Empty") + } +} + +#[stable(feature = "iter_empty", since = "1.2.0")] +impl Iterator for Empty { + type Item = T; + + fn next(&mut self) -> Option { + None + } + + fn size_hint(&self) -> (usize, Option){ + (0, Some(0)) + } +} + +#[stable(feature = "iter_empty", since = "1.2.0")] +impl DoubleEndedIterator for Empty { + fn next_back(&mut self) -> Option { + None + } +} + +#[stable(feature = "iter_empty", since = "1.2.0")] +impl ExactSizeIterator for Empty { + fn len(&self) -> usize { + 0 + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Empty {} + +// not #[derive] because that adds a Clone bound on T, +// which isn't necessary. +#[stable(feature = "iter_empty", since = "1.2.0")] +impl Clone for Empty { + fn clone(&self) -> Empty { + Empty(marker::PhantomData) + } +} + +// not #[derive] because that adds a Default bound on T, +// which isn't necessary. +#[stable(feature = "iter_empty", since = "1.2.0")] +impl Default for Empty { + fn default() -> Empty { + Empty(marker::PhantomData) + } +} + +/// Creates an iterator that yields nothing. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::iter; +/// +/// // this could have been an iterator over i32, but alas, it's just not. +/// let mut nope = iter::empty::(); +/// +/// assert_eq!(None, nope.next()); +/// ``` +#[stable(feature = "iter_empty", since = "1.2.0")] +pub fn empty() -> Empty { + Empty(marker::PhantomData) +} + +/// An iterator that yields an element exactly once. +/// +/// This `struct` is created by the [`once()`] function. See its documentation for more. +/// +/// [`once()`]: fn.once.html +#[derive(Clone, Debug)] +#[stable(feature = "iter_once", since = "1.2.0")] +pub struct Once { + inner: ::option::IntoIter +} + +#[stable(feature = "iter_once", since = "1.2.0")] +impl Iterator for Once { + type Item = T; + + fn next(&mut self) -> Option { + self.inner.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +#[stable(feature = "iter_once", since = "1.2.0")] +impl DoubleEndedIterator for Once { + fn next_back(&mut self) -> Option { + self.inner.next_back() + } +} + +#[stable(feature = "iter_once", since = "1.2.0")] +impl ExactSizeIterator for Once { + fn len(&self) -> usize { + self.inner.len() + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Once {} + +/// Creates an iterator that yields an element exactly once. +/// +/// This is commonly used to adapt a single value into a [`chain()`] of other +/// kinds of iteration. Maybe you have an iterator that covers almost +/// everything, but you need an extra special case. Maybe you have a function +/// which works on iterators, but you only need to process one value. +/// +/// [`chain()`]: trait.Iterator.html#method.chain +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::iter; +/// +/// // one is the loneliest number +/// let mut one = iter::once(1); +/// +/// assert_eq!(Some(1), one.next()); +/// +/// // just one, that's all we get +/// assert_eq!(None, one.next()); +/// ``` +/// +/// Chaining together with another iterator. Let's say that we want to iterate +/// over each file of the `.foo` directory, but also a configuration file, +/// `.foorc`: +/// +/// ```no_run +/// use std::iter; +/// use std::fs; +/// use std::path::PathBuf; +/// +/// let dirs = fs::read_dir(".foo").unwrap(); +/// +/// // we need to convert from an iterator of DirEntry-s to an iterator of +/// // PathBufs, so we use map +/// let dirs = dirs.map(|file| file.unwrap().path()); +/// +/// // now, our iterator just for our config file +/// let config = iter::once(PathBuf::from(".foorc")); +/// +/// // chain the two iterators together into one big iterator +/// let files = dirs.chain(config); +/// +/// // this will give us all of the files in .foo as well as .foorc +/// for f in files { +/// println!("{:?}", f); +/// } +/// ``` +#[stable(feature = "iter_once", since = "1.2.0")] +pub fn once(value: T) -> Once { + Once { inner: Some(value).into_iter() } +} diff --git a/src/libcore/iter/traits.rs b/src/libcore/iter/traits.rs new file mode 100644 index 0000000000000..e94582cda7c34 --- /dev/null +++ b/src/libcore/iter/traits.rs @@ -0,0 +1,707 @@ +// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +use ops::{Mul, Add}; +use num::Wrapping; + +/// Conversion from an `Iterator`. +/// +/// By implementing `FromIterator` for a type, you define how it will be +/// created from an iterator. This is common for types which describe a +/// collection of some kind. +/// +/// `FromIterator`'s [`from_iter()`] is rarely called explicitly, and is instead +/// used through [`Iterator`]'s [`collect()`] method. See [`collect()`]'s +/// documentation for more examples. +/// +/// [`from_iter()`]: #tymethod.from_iter +/// [`Iterator`]: trait.Iterator.html +/// [`collect()`]: trait.Iterator.html#method.collect +/// +/// See also: [`IntoIterator`]. +/// +/// [`IntoIterator`]: trait.IntoIterator.html +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::iter::FromIterator; +/// +/// let five_fives = std::iter::repeat(5).take(5); +/// +/// let v = Vec::from_iter(five_fives); +/// +/// assert_eq!(v, vec![5, 5, 5, 5, 5]); +/// ``` +/// +/// Using [`collect()`] to implicitly use `FromIterator`: +/// +/// ``` +/// let five_fives = std::iter::repeat(5).take(5); +/// +/// let v: Vec = five_fives.collect(); +/// +/// assert_eq!(v, vec![5, 5, 5, 5, 5]); +/// ``` +/// +/// Implementing `FromIterator` for your type: +/// +/// ``` +/// use std::iter::FromIterator; +/// +/// // A sample collection, that's just a wrapper over Vec +/// #[derive(Debug)] +/// struct MyCollection(Vec); +/// +/// // Let's give it some methods so we can create one and add things +/// // to it. +/// impl MyCollection { +/// fn new() -> MyCollection { +/// MyCollection(Vec::new()) +/// } +/// +/// fn add(&mut self, elem: i32) { +/// self.0.push(elem); +/// } +/// } +/// +/// // and we'll implement FromIterator +/// impl FromIterator for MyCollection { +/// fn from_iter>(iter: I) -> Self { +/// let mut c = MyCollection::new(); +/// +/// for i in iter { +/// c.add(i); +/// } +/// +/// c +/// } +/// } +/// +/// // Now we can make a new iterator... +/// let iter = (0..5).into_iter(); +/// +/// // ... and make a MyCollection out of it +/// let c = MyCollection::from_iter(iter); +/// +/// assert_eq!(c.0, vec![0, 1, 2, 3, 4]); +/// +/// // collect works too! +/// +/// let iter = (0..5).into_iter(); +/// let c: MyCollection = iter.collect(); +/// +/// assert_eq!(c.0, vec![0, 1, 2, 3, 4]); +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +#[rustc_on_unimplemented="a collection of type `{Self}` cannot be \ + built from an iterator over elements of type `{A}`"] +pub trait FromIterator: Sized { + /// Creates a value from an iterator. + /// + /// See the [module-level documentation] for more. + /// + /// [module-level documentation]: trait.FromIterator.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::iter::FromIterator; + /// + /// let five_fives = std::iter::repeat(5).take(5); + /// + /// let v = Vec::from_iter(five_fives); + /// + /// assert_eq!(v, vec![5, 5, 5, 5, 5]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + fn from_iter>(iter: T) -> Self; +} + +/// Conversion into an `Iterator`. +/// +/// By implementing `IntoIterator` for a type, you define how it will be +/// converted to an iterator. This is common for types which describe a +/// collection of some kind. +/// +/// One benefit of implementing `IntoIterator` is that your type will [work +/// with Rust's `for` loop syntax](index.html#for-loops-and-intoiterator). +/// +/// See also: [`FromIterator`]. +/// +/// [`FromIterator`]: trait.FromIterator.html +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// let v = vec![1, 2, 3]; +/// +/// let mut iter = v.into_iter(); +/// +/// let n = iter.next(); +/// assert_eq!(Some(1), n); +/// +/// let n = iter.next(); +/// assert_eq!(Some(2), n); +/// +/// let n = iter.next(); +/// assert_eq!(Some(3), n); +/// +/// let n = iter.next(); +/// assert_eq!(None, n); +/// ``` +/// +/// Implementing `IntoIterator` for your type: +/// +/// ``` +/// // A sample collection, that's just a wrapper over Vec +/// #[derive(Debug)] +/// struct MyCollection(Vec); +/// +/// // Let's give it some methods so we can create one and add things +/// // to it. +/// impl MyCollection { +/// fn new() -> MyCollection { +/// MyCollection(Vec::new()) +/// } +/// +/// fn add(&mut self, elem: i32) { +/// self.0.push(elem); +/// } +/// } +/// +/// // and we'll implement IntoIterator +/// impl IntoIterator for MyCollection { +/// type Item = i32; +/// type IntoIter = ::std::vec::IntoIter; +/// +/// fn into_iter(self) -> Self::IntoIter { +/// self.0.into_iter() +/// } +/// } +/// +/// // Now we can make a new collection... +/// let mut c = MyCollection::new(); +/// +/// // ... add some stuff to it ... +/// c.add(0); +/// c.add(1); +/// c.add(2); +/// +/// // ... and then turn it into an Iterator: +/// for (i, n) in c.into_iter().enumerate() { +/// assert_eq!(i as i32, n); +/// } +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +pub trait IntoIterator { + /// The type of the elements being iterated over. + #[stable(feature = "rust1", since = "1.0.0")] + type Item; + + /// Which kind of iterator are we turning this into? + #[stable(feature = "rust1", since = "1.0.0")] + type IntoIter: Iterator; + + /// Creates an iterator from a value. + /// + /// See the [module-level documentation] for more. + /// + /// [module-level documentation]: trait.IntoIterator.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let v = vec![1, 2, 3]; + /// + /// let mut iter = v.into_iter(); + /// + /// let n = iter.next(); + /// assert_eq!(Some(1), n); + /// + /// let n = iter.next(); + /// assert_eq!(Some(2), n); + /// + /// let n = iter.next(); + /// assert_eq!(Some(3), n); + /// + /// let n = iter.next(); + /// assert_eq!(None, n); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + fn into_iter(self) -> Self::IntoIter; +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl IntoIterator for I { + type Item = I::Item; + type IntoIter = I; + + fn into_iter(self) -> I { + self + } +} + +/// Extend a collection with the contents of an iterator. +/// +/// Iterators produce a series of values, and collections can also be thought +/// of as a series of values. The `Extend` trait bridges this gap, allowing you +/// to extend a collection by including the contents of that iterator. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// // You can extend a String with some chars: +/// let mut message = String::from("The first three letters are: "); +/// +/// message.extend(&['a', 'b', 'c']); +/// +/// assert_eq!("abc", &message[29..32]); +/// ``` +/// +/// Implementing `Extend`: +/// +/// ``` +/// // A sample collection, that's just a wrapper over Vec +/// #[derive(Debug)] +/// struct MyCollection(Vec); +/// +/// // Let's give it some methods so we can create one and add things +/// // to it. +/// impl MyCollection { +/// fn new() -> MyCollection { +/// MyCollection(Vec::new()) +/// } +/// +/// fn add(&mut self, elem: i32) { +/// self.0.push(elem); +/// } +/// } +/// +/// // since MyCollection has a list of i32s, we implement Extend for i32 +/// impl Extend for MyCollection { +/// +/// // This is a bit simpler with the concrete type signature: we can call +/// // extend on anything which can be turned into an Iterator which gives +/// // us i32s. Because we need i32s to put into MyCollection. +/// fn extend>(&mut self, iter: T) { +/// +/// // The implementation is very straightforward: loop through the +/// // iterator, and add() each element to ourselves. +/// for elem in iter { +/// self.add(elem); +/// } +/// } +/// } +/// +/// let mut c = MyCollection::new(); +/// +/// c.add(5); +/// c.add(6); +/// c.add(7); +/// +/// // let's extend our collection with three more numbers +/// c.extend(vec![1, 2, 3]); +/// +/// // we've added these elements onto the end +/// assert_eq!("MyCollection([5, 6, 7, 1, 2, 3])", format!("{:?}", c)); +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +pub trait Extend { + /// Extends a collection with the contents of an iterator. + /// + /// As this is the only method for this trait, the [trait-level] docs + /// contain more details. + /// + /// [trait-level]: trait.Extend.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// // You can extend a String with some chars: + /// let mut message = String::from("abc"); + /// + /// message.extend(['d', 'e', 'f'].iter()); + /// + /// assert_eq!("abcdef", &message); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + fn extend>(&mut self, iter: T); +} + +/// An iterator able to yield elements from both ends. +/// +/// Something that implements `DoubleEndedIterator` has one extra capability +/// over something that implements [`Iterator`]: the ability to also take +/// `Item`s from the back, as well as the front. +/// +/// It is important to note that both back and forth work on the same range, +/// and do not cross: iteration is over when they meet in the middle. +/// +/// In a similar fashion to the [`Iterator`] protocol, once a +/// `DoubleEndedIterator` returns `None` from a `next_back()`, calling it again +/// may or may not ever return `Some` again. `next()` and `next_back()` are +/// interchangable for this purpose. +/// +/// [`Iterator`]: trait.Iterator.html +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// let numbers = vec![1, 2, 3, 4, 5, 6]; +/// +/// let mut iter = numbers.iter(); +/// +/// assert_eq!(Some(&1), iter.next()); +/// assert_eq!(Some(&6), iter.next_back()); +/// assert_eq!(Some(&5), iter.next_back()); +/// assert_eq!(Some(&2), iter.next()); +/// assert_eq!(Some(&3), iter.next()); +/// assert_eq!(Some(&4), iter.next()); +/// assert_eq!(None, iter.next()); +/// assert_eq!(None, iter.next_back()); +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +pub trait DoubleEndedIterator: Iterator { + /// Removes and returns an element from the end of the iterator. + /// + /// Returns `None` when there are no more elements. + /// + /// The [trait-level] docs contain more details. + /// + /// [trait-level]: trait.DoubleEndedIterator.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let numbers = vec![1, 2, 3, 4, 5, 6]; + /// + /// let mut iter = numbers.iter(); + /// + /// assert_eq!(Some(&1), iter.next()); + /// assert_eq!(Some(&6), iter.next_back()); + /// assert_eq!(Some(&5), iter.next_back()); + /// assert_eq!(Some(&2), iter.next()); + /// assert_eq!(Some(&3), iter.next()); + /// assert_eq!(Some(&4), iter.next()); + /// assert_eq!(None, iter.next()); + /// assert_eq!(None, iter.next_back()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + fn next_back(&mut self) -> Option; +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<'a, I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for &'a mut I { + fn next_back(&mut self) -> Option { (**self).next_back() } +} + +/// An iterator that knows its exact length. +/// +/// Many [`Iterator`]s don't know how many times they will iterate, but some do. +/// If an iterator knows how many times it can iterate, providing access to +/// that information can be useful. For example, if you want to iterate +/// backwards, a good start is to know where the end is. +/// +/// When implementing an `ExactSizeIterator`, You must also implement +/// [`Iterator`]. When doing so, the implementation of [`size_hint()`] *must* +/// return the exact size of the iterator. +/// +/// [`Iterator`]: trait.Iterator.html +/// [`size_hint()`]: trait.Iterator.html#method.size_hint +/// +/// The [`len()`] method has a default implementation, so you usually shouldn't +/// implement it. However, you may be able to provide a more performant +/// implementation than the default, so overriding it in this case makes sense. +/// +/// [`len()`]: #method.len +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// // a finite range knows exactly how many times it will iterate +/// let five = 0..5; +/// +/// assert_eq!(5, five.len()); +/// ``` +/// +/// In the [module level docs][moddocs], we implemented an [`Iterator`], +/// `Counter`. Let's implement `ExactSizeIterator` for it as well: +/// +/// [moddocs]: index.html +/// +/// ``` +/// # struct Counter { +/// # count: usize, +/// # } +/// # impl Counter { +/// # fn new() -> Counter { +/// # Counter { count: 0 } +/// # } +/// # } +/// # impl Iterator for Counter { +/// # type Item = usize; +/// # fn next(&mut self) -> Option { +/// # self.count += 1; +/// # if self.count < 6 { +/// # Some(self.count) +/// # } else { +/// # None +/// # } +/// # } +/// # } +/// impl ExactSizeIterator for Counter { +/// // We already have the number of iterations, so we can use it directly. +/// fn len(&self) -> usize { +/// self.count +/// } +/// } +/// +/// // And now we can use it! +/// +/// let counter = Counter::new(); +/// +/// assert_eq!(0, counter.len()); +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +pub trait ExactSizeIterator: Iterator { + /// Returns the exact number of times the iterator will iterate. + /// + /// This method has a default implementation, so you usually should not + /// implement it directly. However, if you can provide a more efficient + /// implementation, you can do so. See the [trait-level] docs for an + /// example. + /// + /// This function has the same safety guarantees as the [`size_hint()`] + /// function. + /// + /// [trait-level]: trait.ExactSizeIterator.html + /// [`size_hint()`]: trait.Iterator.html#method.size_hint + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// // a finite range knows exactly how many times it will iterate + /// let five = 0..5; + /// + /// assert_eq!(5, five.len()); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn len(&self) -> usize { + let (lower, upper) = self.size_hint(); + // Note: This assertion is overly defensive, but it checks the invariant + // guaranteed by the trait. If this trait were rust-internal, + // we could use debug_assert!; assert_eq! will check all Rust user + // implementations too. + assert_eq!(upper, Some(lower)); + lower + } + + /// Returns whether the iterator is empty. + /// + /// This method has a default implementation using `self.len()`, so you + /// don't need to implement it yourself. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(exact_size_is_empty)] + /// + /// let mut one_element = 0..1; + /// assert!(!one_element.is_empty()); + /// + /// assert_eq!(one_element.next(), Some(0)); + /// assert!(one_element.is_empty()); + /// + /// assert_eq!(one_element.next(), None); + /// ``` + #[inline] + #[unstable(feature = "exact_size_is_empty", issue = "35428")] + fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<'a, I: ExactSizeIterator + ?Sized> ExactSizeIterator for &'a mut I {} + +/// Trait to represent types that can be created by summing up an iterator. +/// +/// This trait is used to implement the [`sum()`] method on iterators. Types which +/// implement the trait can be generated by the [`sum()`] method. Like +/// [`FromIterator`] this trait should rarely be called directly and instead +/// interacted with through [`Iterator::sum()`]. +/// +/// [`sum()`]: ../../std/iter/trait.Sum.html#tymethod.sum +/// [`FromIterator`]: ../../std/iter/trait.FromIterator.html +/// [`Iterator::sum()`]: ../../std/iter/trait.Iterator.html#method.sum +#[stable(feature = "iter_arith_traits", since = "1.12.0")] +pub trait Sum: Sized { + /// Method which takes an iterator and generates `Self` from the elements by + /// "summing up" the items. + #[stable(feature = "iter_arith_traits", since = "1.12.0")] + fn sum>(iter: I) -> Self; +} + +/// Trait to represent types that can be created by multiplying elements of an +/// iterator. +/// +/// This trait is used to implement the [`product()`] method on iterators. Types +/// which implement the trait can be generated by the [`product()`] method. Like +/// [`FromIterator`] this trait should rarely be called directly and instead +/// interacted with through [`Iterator::product()`]. +/// +/// [`product()`]: ../../std/iter/trait.Product.html#tymethod.product +/// [`FromIterator`]: ../../std/iter/trait.FromIterator.html +/// [`Iterator::product()`]: ../../std/iter/trait.Iterator.html#method.product +#[stable(feature = "iter_arith_traits", since = "1.12.0")] +pub trait Product: Sized { + /// Method which takes an iterator and generates `Self` from the elements by + /// multiplying the items. + #[stable(feature = "iter_arith_traits", since = "1.12.0")] + fn product>(iter: I) -> Self; +} + +// NB: explicitly use Add and Mul here to inherit overflow checks +macro_rules! integer_sum_product { + (@impls $zero:expr, $one:expr, $($a:ty)*) => ($( + #[stable(feature = "iter_arith_traits", since = "1.12.0")] + impl Sum for $a { + fn sum>(iter: I) -> $a { + iter.fold($zero, Add::add) + } + } + + #[stable(feature = "iter_arith_traits", since = "1.12.0")] + impl Product for $a { + fn product>(iter: I) -> $a { + iter.fold($one, Mul::mul) + } + } + + #[stable(feature = "iter_arith_traits", since = "1.12.0")] + impl<'a> Sum<&'a $a> for $a { + fn sum>(iter: I) -> $a { + iter.fold($zero, Add::add) + } + } + + #[stable(feature = "iter_arith_traits", since = "1.12.0")] + impl<'a> Product<&'a $a> for $a { + fn product>(iter: I) -> $a { + iter.fold($one, Mul::mul) + } + } + )*); + ($($a:ty)*) => ( + integer_sum_product!(@impls 0, 1, $($a)+); + integer_sum_product!(@impls Wrapping(0), Wrapping(1), $(Wrapping<$a>)+); + ); +} + +macro_rules! float_sum_product { + ($($a:ident)*) => ($( + #[stable(feature = "iter_arith_traits", since = "1.12.0")] + impl Sum for $a { + fn sum>(iter: I) -> $a { + iter.fold(0.0, |a, b| a + b) + } + } + + #[stable(feature = "iter_arith_traits", since = "1.12.0")] + impl Product for $a { + fn product>(iter: I) -> $a { + iter.fold(1.0, |a, b| a * b) + } + } + + #[stable(feature = "iter_arith_traits", since = "1.12.0")] + impl<'a> Sum<&'a $a> for $a { + fn sum>(iter: I) -> $a { + iter.fold(0.0, |a, b| a + *b) + } + } + + #[stable(feature = "iter_arith_traits", since = "1.12.0")] + impl<'a> Product<&'a $a> for $a { + fn product>(iter: I) -> $a { + iter.fold(1.0, |a, b| a * *b) + } + } + )*) +} + +integer_sum_product! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize } +float_sum_product! { f32 f64 } + +/// An iterator that always continues to yield `None` when exhausted. +/// +/// Calling next on a fused iterator that has returned `None` once is guaranteed +/// to return [`None`] again. This trait is should be implemented by all iterators +/// that behave this way because it allows for some significant optimizations. +/// +/// Note: In general, you should not use `FusedIterator` in generic bounds if +/// you need a fused iterator. Instead, you should just call [`Iterator::fuse()`] +/// on the iterator. If the iterator is already fused, the additional [`Fuse`] +/// wrapper will be a no-op with no performance penalty. +/// +/// [`None`]: ../../std/option/enum.Option.html#variant.None +/// [`Iterator::fuse()`]: ../../std/iter/trait.Iterator.html#method.fuse +/// [`Fuse`]: ../../std/iter/struct.Fuse.html +#[unstable(feature = "fused", issue = "35602")] +pub trait FusedIterator: Iterator {} + +#[unstable(feature = "fused", issue = "35602")] +impl<'a, I: FusedIterator + ?Sized> FusedIterator for &'a mut I {} + +/// An iterator that reports an accurate length using size_hint. +/// +/// The iterator reports a size hint where it is either exact +/// (lower bound is equal to upper bound), or the upper bound is [`None`]. +/// The upper bound must only be [`None`] if the actual iterator length is +/// larger than [`usize::MAX`]. +/// +/// The iterator must produce exactly the number of elements it reported. +/// +/// # Safety +/// +/// This trait must only be implemented when the contract is upheld. +/// Consumers of this trait must inspect [`.size_hint()`]’s upper bound. +/// +/// [`None`]: ../../std/option/enum.Option.html#variant.None +/// [`usize::MAX`]: ../../std/usize/constant.MAX.html +/// [`.size_hint()`]: ../../std/iter/trait.Iterator.html#method.size_hint +#[unstable(feature = "trusted_len", issue = "37572")] +pub unsafe trait TrustedLen : Iterator {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, I: TrustedLen + ?Sized> TrustedLen for &'a mut I {} diff --git a/src/libcore/iter_private.rs b/src/libcore/iter_private.rs new file mode 100644 index 0000000000000..bc1aaa09f3dbd --- /dev/null +++ b/src/libcore/iter_private.rs @@ -0,0 +1,28 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +/// An iterator whose items are random accessible efficiently +/// +/// # Safety +/// +/// The iterator's .len() and size_hint() must be exact. +/// `.len()` must be cheap to call. +/// +/// .get_unchecked() must return distinct mutable references for distinct +/// indices (if applicable), and must return a valid reference if index is in +/// 0..self.len(). +#[doc(hidden)] +pub unsafe trait TrustedRandomAccess : ExactSizeIterator { + unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item; + /// Return `true` if getting an iterator element may have + /// side effects. Remember to take inner iterators into account. + fn may_have_side_effect() -> bool; +} diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs index e8803976937d2..9834fca5fdc78 100644 --- a/src/libcore/lib.rs +++ b/src/libcore/lib.rs @@ -25,6 +25,8 @@ //! //! # How to use the core library //! +//! Please note that all of these details are currently not considered stable. +//! // FIXME: Fill me in with more detail when the interface settles //! This library is built on the assumption of a few existing symbols: //! @@ -34,11 +36,17 @@ //! These functions are often provided by the system libc, but can also be //! provided by the [rlibc crate](https://crates.io/crates/rlibc). //! -//! * `rust_begin_unwind` - This function takes three arguments, a -//! `fmt::Arguments`, a `&str`, and a `u32`. These three arguments dictate -//! the panic message, the file at which panic was invoked, and the line. -//! It is up to consumers of this core library to define this panic -//! function; it is only required to never return. +//! * `rust_begin_panic` - This function takes three arguments, a +//! `fmt::Arguments`, a `&'static str`, and a `u32`. These three arguments +//! dictate the panic message, the file at which panic was invoked, and the +//! line. It is up to consumers of this core library to define this panic +//! function; it is only required to never return. This requires a `lang` +//! attribute named `panic_fmt`. +//! +//! * `rust_eh_personality` - is used by the failure mechanisms of the +//! compiler. This is often mapped to GCC's personality function, but crates +//! which do not trigger a panic can be assured that this function is never +//! called. The `lang` attribute is called `eh_personality`. // Since libcore defines many fundamental lang items, all tests live in a // separate crate, libcoretest, to avoid bizarre issues. @@ -56,13 +64,19 @@ #![no_core] #![deny(missing_docs)] +#![deny(missing_debug_implementations)] +#![cfg_attr(not(stage0), deny(warnings))] #![feature(allow_internal_unstable)] +#![feature(asm)] #![feature(associated_type_defaults)] +#![feature(cfg_target_feature)] #![feature(concat_idents)] #![feature(const_fn)] +#![feature(cfg_target_has_atomic)] #![feature(custom_attribute)] #![feature(fundamental)] +#![feature(inclusive_range_syntax)] #![feature(intrinsics)] #![feature(lang_items)] #![feature(no_core)] @@ -71,12 +85,23 @@ #![feature(reflect)] #![feature(unwind_attributes)] #![feature(repr_simd, platform_intrinsics)] +#![feature(rustc_attrs)] +#![feature(specialization)] #![feature(staged_api)] #![feature(unboxed_closures)] +#![feature(never_type)] +#![feature(prelude_import)] + +#[prelude_import] +#[allow(unused)] +use prelude::v1::*; #[macro_use] mod macros; +#[macro_use] +mod internal_macros; + #[path = "num/float_macros.rs"] #[macro_use] mod float_macros; @@ -89,17 +114,17 @@ mod int_macros; #[macro_use] mod uint_macros; -#[path = "num/isize.rs"] pub mod isize; -#[path = "num/i8.rs"] pub mod i8; -#[path = "num/i16.rs"] pub mod i16; -#[path = "num/i32.rs"] pub mod i32; -#[path = "num/i64.rs"] pub mod i64; +#[path = "num/isize.rs"] pub mod isize; +#[path = "num/i8.rs"] pub mod i8; +#[path = "num/i16.rs"] pub mod i16; +#[path = "num/i32.rs"] pub mod i32; +#[path = "num/i64.rs"] pub mod i64; #[path = "num/usize.rs"] pub mod usize; -#[path = "num/u8.rs"] pub mod u8; -#[path = "num/u16.rs"] pub mod u16; -#[path = "num/u32.rs"] pub mod u32; -#[path = "num/u64.rs"] pub mod u64; +#[path = "num/u8.rs"] pub mod u8; +#[path = "num/u16.rs"] pub mod u16; +#[path = "num/u32.rs"] pub mod u32; +#[path = "num/u64.rs"] pub mod u64; #[path = "num/f32.rs"] pub mod f32; #[path = "num/f64.rs"] pub mod f64; @@ -147,4 +172,6 @@ pub mod hash; pub mod fmt; // note: does not need to be public +mod char_private; +mod iter_private; mod tuple; diff --git a/src/libcore/macros.rs b/src/libcore/macros.rs index 154ca30c62dd1..b3f5363f5b15a 100644 --- a/src/libcore/macros.rs +++ b/src/libcore/macros.rs @@ -35,8 +35,20 @@ macro_rules! panic { /// This will invoke the `panic!` macro if the provided expression cannot be /// evaluated to `true` at runtime. /// +/// Assertions are always checked in both debug and release builds, and cannot +/// be disabled. See `debug_assert!` for assertions that are not enabled in +/// release builds by default. +/// +/// Unsafe code relies on `assert!` to enforce run-time invariants that, if +/// violated could lead to unsafety. +/// +/// Other use-cases of `assert!` include [testing] and enforcing run-time +/// invariants in safe code (whose violation cannot result in unsafety). +/// /// This macro has a second version, where a custom panic message can be provided. /// +/// [testing]: ../book/testing.html +/// /// # Examples /// /// ``` @@ -86,15 +98,64 @@ macro_rules! assert { #[stable(feature = "rust1", since = "1.0.0")] macro_rules! assert_eq { ($left:expr , $right:expr) => ({ + match (&$left, &$right) { + (left_val, right_val) => { + if !(*left_val == *right_val) { + panic!("assertion failed: `(left == right)` \ + (left: `{:?}`, right: `{:?}`)", left_val, right_val) + } + } + } + }); + ($left:expr , $right:expr, $($arg:tt)*) => ({ match (&($left), &($right)) { (left_val, right_val) => { if !(*left_val == *right_val) { panic!("assertion failed: `(left == right)` \ + (left: `{:?}`, right: `{:?}`): {}", left_val, right_val, + format_args!($($arg)*)) + } + } + } + }); +} + +/// Asserts that two expressions are not equal to each other. +/// +/// On panic, this macro will print the values of the expressions with their +/// debug representations. +/// +/// # Examples +/// +/// ``` +/// let a = 3; +/// let b = 2; +/// assert_ne!(a, b); +/// ``` +#[macro_export] +#[stable(feature = "assert_ne", since = "1.12.0")] +macro_rules! assert_ne { + ($left:expr , $right:expr) => ({ + match (&$left, &$right) { + (left_val, right_val) => { + if *left_val == *right_val { + panic!("assertion failed: `(left != right)` \ (left: `{:?}`, right: `{:?}`)", left_val, right_val) } } } - }) + }); + ($left:expr , $right:expr, $($arg:tt)*) => ({ + match (&($left), &($right)) { + (left_val, right_val) => { + if *left_val == *right_val { + panic!("assertion failed: `(left != right)` \ + (left: `{:?}`, right: `{:?}`): {}", left_val, right_val, + format_args!($($arg)*)) + } + } + } + }); } /// Ensure that a boolean expression is `true` at runtime. @@ -112,6 +173,13 @@ macro_rules! assert_eq { /// expensive to be present in a release build but may be helpful during /// development. /// +/// An unchecked assertion allows a program in an inconsistent state to keep +/// running, which might have unexpected consequences but does not introduce +/// unsafety as long as this only happens in safe code. The performance cost +/// of assertions, is however, not measurable in general. Replacing `assert!` +/// with `debug_assert!` is thus only encouraged after thorough profiling, and +/// more importantly, only in safe code! +/// /// # Examples /// /// ``` @@ -160,10 +228,47 @@ macro_rules! debug_assert_eq { ($($arg:tt)*) => (if cfg!(debug_assertions) { assert_eq!($($arg)*); }) } -/// Helper macro for unwrapping `Result` values while returning early with an -/// error if the value of the expression is `Err`. Can only be used in -/// functions that return `Result` because of the early return of `Err` that -/// it provides. +/// Asserts that two expressions are not equal to each other. +/// +/// On panic, this macro will print the values of the expressions with their +/// debug representations. +/// +/// Unlike `assert_ne!`, `debug_assert_ne!` statements are only enabled in non +/// optimized builds by default. An optimized build will omit all +/// `debug_assert_ne!` statements unless `-C debug-assertions` is passed to the +/// compiler. This makes `debug_assert_ne!` useful for checks that are too +/// expensive to be present in a release build but may be helpful during +/// development. +/// +/// # Examples +/// +/// ``` +/// let a = 3; +/// let b = 2; +/// debug_assert_ne!(a, b); +/// ``` +#[macro_export] +#[stable(feature = "assert_ne", since = "1.12.0")] +macro_rules! debug_assert_ne { + ($($arg:tt)*) => (if cfg!(debug_assertions) { assert_ne!($($arg)*); }) +} + +/// Helper macro for reducing boilerplate code for matching `Result` together +/// with converting downstream errors. +/// +/// Prefer using `?` syntax to `try!`. `?` is built in to the language and is +/// more succinct than `try!`. It is the standard method for error propagation. +/// +/// `try!` matches the given `Result`. In case of the `Ok` variant, the +/// expression has the value of the wrapped value. +/// +/// In case of the `Err` variant, it retrieves the inner error. `try!` then +/// performs conversion using `From`. This provides automatic conversion +/// between specialized errors and more general ones. The resulting +/// error is then immediately returned. +/// +/// Because of the early return, `try!` can only be used in functions that +/// return `Result`. /// /// # Examples /// @@ -172,18 +277,28 @@ macro_rules! debug_assert_eq { /// use std::fs::File; /// use std::io::prelude::*; /// -/// fn write_to_file_using_try() -> Result<(), io::Error> { +/// enum MyError { +/// FileWriteError +/// } +/// +/// impl From for MyError { +/// fn from(e: io::Error) -> MyError { +/// MyError::FileWriteError +/// } +/// } +/// +/// fn write_to_file_using_try() -> Result<(), MyError> { /// let mut file = try!(File::create("my_best_friends.txt")); /// try!(file.write_all(b"This is a list of my best friends.")); /// println!("I wrote to the file"); /// Ok(()) /// } /// // This is equivalent to: -/// fn write_to_file_using_match() -> Result<(), io::Error> { +/// fn write_to_file_using_match() -> Result<(), MyError> { /// let mut file = try!(File::create("my_best_friends.txt")); /// match file.write_all(b"This is a list of my best friends.") { -/// Ok(_) => (), -/// Err(e) => return Err(e), +/// Ok(v) => v, +/// Err(e) => return Err(From::from(e)), /// } /// println!("I wrote to the file"); /// Ok(()) @@ -200,14 +315,29 @@ macro_rules! try { }) } -/// Use the `format!` syntax to write data into a buffer. +/// Write formatted data into a buffer +/// +/// This macro accepts a 'writer' (any value with a `write_fmt` method), a format string, and a +/// list of arguments to format. +/// +/// The `write_fmt` method usually comes from an implementation of [`std::fmt::Write`][fmt_write] +/// or [`std::io::Write`][io_write] traits. The term 'writer' refers to an implementation of one of +/// these two traits. /// -/// This macro is typically used with a buffer of `&mut `[`Write`][write]. +/// Passed arguments will be formatted according to the specified format string and the resulting +/// string will be passed to the writer. /// /// See [`std::fmt`][fmt] for more information on format syntax. /// -/// [fmt]: fmt/index.html -/// [write]: io/trait.Write.html +/// `write!` returns whatever the 'write_fmt' method returns. +/// +/// Common return values include: [`fmt::Result`][fmt_result], [`io::Result`][io_result] +/// +/// [fmt]: ../std/fmt/index.html +/// [fmt_write]: ../std/fmt/trait.Write.html +/// [io_write]: ../std/io/trait.Write.html +/// [fmt_result]: ../std/fmt/type.Result.html +/// [io_result]: ../std/io/type.Result.html /// /// # Examples /// @@ -220,20 +350,53 @@ macro_rules! try { /// /// assert_eq!(w, b"testformatted arguments"); /// ``` +/// +/// A module can import both `std::fmt::Write` and `std::io::Write` and call `write!` on objects +/// implementing either, as objects do not typically implement both. However, the module must +/// import the traits qualified so their names do not conflict: +/// +/// ``` +/// use std::fmt::Write as FmtWrite; +/// use std::io::Write as IoWrite; +/// +/// let mut s = String::new(); +/// let mut v = Vec::new(); +/// write!(&mut s, "{} {}", "abc", 123).unwrap(); // uses fmt::Write::write_fmt +/// write!(&mut v, "s = {:?}", s).unwrap(); // uses io::Write::write_fmt +/// assert_eq!(v, b"s = \"abc 123\""); +/// ``` #[macro_export] #[stable(feature = "core", since = "1.6.0")] macro_rules! write { ($dst:expr, $($arg:tt)*) => ($dst.write_fmt(format_args!($($arg)*))) } -/// Use the `format!` syntax to write data into a buffer, appending a newline. +/// Write formatted data into a buffer, with a newline appended. /// -/// This macro is typically used with a buffer of `&mut `[`Write`][write]. +/// On all platforms, the newline is the LINE FEED character (`\n`/`U+000A`) alone +/// (no additional CARRIAGE RETURN (`\r`/`U+000D`). +/// +/// This macro accepts a 'writer' (any value with a `write_fmt` method), a format string, and a +/// list of arguments to format. +/// +/// The `write_fmt` method usually comes from an implementation of [`std::fmt::Write`][fmt_write] +/// or [`std::io::Write`][io_write] traits. The term 'writer' refers to an implementation of one of +/// these two traits. +/// +/// Passed arguments will be formatted according to the specified format string and the resulting +/// string will be passed to the writer, along with the appended newline. /// /// See [`std::fmt`][fmt] for more information on format syntax. /// -/// [fmt]: fmt/index.html -/// [write]: io/trait.Write.html +/// `write!` returns whatever the 'write_fmt' method returns. +/// +/// Common return values include: [`fmt::Result`][fmt_result], [`io::Result`][io_result] +/// +/// [fmt]: ../std/fmt/index.html +/// [fmt_write]: ../std/fmt/trait.Write.html +/// [io_write]: ../std/io/trait.Write.html +/// [fmt_result]: ../std/fmt/type.Result.html +/// [io_result]: ../std/io/type.Result.html /// /// # Examples /// @@ -246,6 +409,21 @@ macro_rules! write { /// /// assert_eq!(&w[..], "test\nformatted arguments\n".as_bytes()); /// ``` +/// +/// A module can import both `std::fmt::Write` and `std::io::Write` and call `write!` on objects +/// implementing either, as objects do not typically implement both. However, the module must +/// import the traits qualified so their names do not conflict: +/// +/// ``` +/// use std::fmt::Write as FmtWrite; +/// use std::io::Write as IoWrite; +/// +/// let mut s = String::new(); +/// let mut v = Vec::new(); +/// writeln!(&mut s, "{} {}", "abc", 123).unwrap(); // uses fmt::Write::write_fmt +/// writeln!(&mut v, "s = {:?}", s).unwrap(); // uses io::Write::write_fmt +/// assert_eq!(v, b"s = \"abc 123\\n\"\n"); +/// ``` #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] macro_rules! writeln { @@ -364,3 +542,156 @@ macro_rules! unreachable { macro_rules! unimplemented { () => (panic!("not yet implemented")) } + +/// Built-in macros to the compiler itself. +/// +/// These macros do not have any corresponding definition with a `macro_rules!` +/// macro, but are documented here. Their implementations can be found hardcoded +/// into libsyntax itself. +/// +/// For more information, see documentation for `std`'s macros. +mod builtin { + /// The core macro for formatted string creation & output. + /// + /// For more information, see the documentation for [`std::format_args!`]. + /// + /// [`std::format_args!`]: ../std/macro.format_args.html + #[stable(feature = "rust1", since = "1.0.0")] + #[macro_export] + #[cfg(dox)] + macro_rules! format_args { ($fmt:expr, $($args:tt)*) => ({ + /* compiler built-in */ + }) } + + /// Inspect an environment variable at compile time. + /// + /// For more information, see the documentation for [`std::env!`]. + /// + /// [`std::env!`]: ../std/macro.env.html + #[stable(feature = "rust1", since = "1.0.0")] + #[macro_export] + #[cfg(dox)] + macro_rules! env { ($name:expr) => ({ /* compiler built-in */ }) } + + /// Optionally inspect an environment variable at compile time. + /// + /// For more information, see the documentation for [`std::option_env!`]. + /// + /// [`std::option_env!`]: ../std/macro.option_env.html + #[stable(feature = "rust1", since = "1.0.0")] + #[macro_export] + #[cfg(dox)] + macro_rules! option_env { ($name:expr) => ({ /* compiler built-in */ }) } + + /// Concatenate identifiers into one identifier. + /// + /// For more information, see the documentation for [`std::concat_idents!`]. + /// + /// [`std::concat_idents!`]: ../std/macro.concat_idents.html + #[unstable(feature = "concat_idents_macro", issue = "29599")] + #[macro_export] + #[cfg(dox)] + macro_rules! concat_idents { + ($($e:ident),*) => ({ /* compiler built-in */ }) + } + + /// Concatenates literals into a static string slice. + /// + /// For more information, see the documentation for [`std::concat!`]. + /// + /// [`std::concat!`]: ../std/macro.concat.html + #[stable(feature = "rust1", since = "1.0.0")] + #[macro_export] + #[cfg(dox)] + macro_rules! concat { ($($e:expr),*) => ({ /* compiler built-in */ }) } + + /// A macro which expands to the line number on which it was invoked. + /// + /// For more information, see the documentation for [`std::line!`]. + /// + /// [`std::line!`]: ../std/macro.line.html + #[stable(feature = "rust1", since = "1.0.0")] + #[macro_export] + #[cfg(dox)] + macro_rules! line { () => ({ /* compiler built-in */ }) } + + /// A macro which expands to the column number on which it was invoked. + /// + /// For more information, see the documentation for [`std::column!`]. + /// + /// [`std::column!`]: ../std/macro.column.html + #[stable(feature = "rust1", since = "1.0.0")] + #[macro_export] + #[cfg(dox)] + macro_rules! column { () => ({ /* compiler built-in */ }) } + + /// A macro which expands to the file name from which it was invoked. + /// + /// For more information, see the documentation for [`std::file!`]. + /// + /// [`std::file!`]: ../std/macro.file.html + #[stable(feature = "rust1", since = "1.0.0")] + #[macro_export] + #[cfg(dox)] + macro_rules! file { () => ({ /* compiler built-in */ }) } + + /// A macro which stringifies its argument. + /// + /// For more information, see the documentation for [`std::stringify!`]. + /// + /// [`std::stringify!`]: ../std/macro.stringify.html + #[stable(feature = "rust1", since = "1.0.0")] + #[macro_export] + #[cfg(dox)] + macro_rules! stringify { ($t:tt) => ({ /* compiler built-in */ }) } + + /// Includes a utf8-encoded file as a string. + /// + /// For more information, see the documentation for [`std::include_str!`]. + /// + /// [`std::include_str!`]: ../std/macro.include_str.html + #[stable(feature = "rust1", since = "1.0.0")] + #[macro_export] + #[cfg(dox)] + macro_rules! include_str { ($file:expr) => ({ /* compiler built-in */ }) } + + /// Includes a file as a reference to a byte array. + /// + /// For more information, see the documentation for [`std::include_bytes!`]. + /// + /// [`std::include_bytes!`]: ../std/macro.include_bytes.html + #[stable(feature = "rust1", since = "1.0.0")] + #[macro_export] + #[cfg(dox)] + macro_rules! include_bytes { ($file:expr) => ({ /* compiler built-in */ }) } + + /// Expands to a string that represents the current module path. + /// + /// For more information, see the documentation for [`std::module_path!`]. + /// + /// [`std::module_path!`]: ../std/macro.module_path.html + #[stable(feature = "rust1", since = "1.0.0")] + #[macro_export] + #[cfg(dox)] + macro_rules! module_path { () => ({ /* compiler built-in */ }) } + + /// Boolean evaluation of configuration flags. + /// + /// For more information, see the documentation for [`std::cfg!`]. + /// + /// [`std::cfg!`]: ../std/macro.cfg.html + #[stable(feature = "rust1", since = "1.0.0")] + #[macro_export] + #[cfg(dox)] + macro_rules! cfg { ($($cfg:tt)*) => ({ /* compiler built-in */ }) } + + /// Parse a file as an expression or an item according to the context. + /// + /// For more information, see the documentation for [`std::include!`]. + /// + /// [`std::include!`]: ../std/macro.include.html + #[stable(feature = "rust1", since = "1.0.0")] + #[macro_export] + #[cfg(dox)] + macro_rules! include { ($file:expr) => ({ /* compiler built-in */ }) } +} diff --git a/src/libcore/marker.rs b/src/libcore/marker.rs index 621dce3efc86b..9af10966eda4b 100644 --- a/src/libcore/marker.rs +++ b/src/libcore/marker.rs @@ -8,24 +8,35 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Primitive traits and marker types representing basic 'kinds' of types. +//! Primitive traits and types representing basic properties of types. //! //! Rust types can be classified in various useful ways according to -//! intrinsic properties of the type. These classifications, often called -//! 'kinds', are represented as traits. +//! their intrinsic properties. These classifications are represented +//! as traits. #![stable(feature = "rust1", since = "1.0.0")] -use clone::Clone; use cmp; -use default::Default; -use option::Option; use hash::Hash; use hash::Hasher; /// Types that can be transferred across thread boundaries. /// -/// This trait is automatically derived when the compiler determines it's appropriate. +/// This trait is automatically implemented when the compiler determines it's +/// appropriate. +/// +/// An example of a non-`Send` type is the reference-counting pointer +/// [`rc::Rc`][`Rc`]. If two threads attempt to clone [`Rc`]s that point to the same +/// reference-counted value, they might try to update the reference count at the +/// same time, which is [undefined behavior][ub] because [`Rc`] doesn't use atomic +/// operations. Its cousin [`sync::Arc`][arc] does use atomic operations (incurring +/// some overhead) and thus is `Send`. +/// +/// See [the Nomicon](../../nomicon/send-and-sync.html) for more details. +/// +/// [`Rc`]: ../../std/rc/struct.Rc.html +/// [arc]: ../../std/sync/struct.Arc.html +/// [ub]: ../../reference.html#behavior-considered-undefined #[stable(feature = "rust1", since = "1.0.0")] #[lang = "send"] #[rustc_on_unimplemented = "`{Self}` cannot be sent between threads safely"] @@ -41,10 +52,10 @@ impl !Send for *const T { } #[stable(feature = "rust1", since = "1.0.0")] impl !Send for *mut T { } -/// Types with a constant size known at compile-time. +/// Types with a constant size known at compile time. /// -/// All type parameters which can be bounded have an implicit bound of `Sized`. The special syntax -/// `?Sized` can be used to remove this bound if it is not appropriate. +/// All type parameters have an implicit bound of `Sized`. The special syntax +/// `?Sized` can be used to remove this bound if it's not appropriate. /// /// ``` /// # #![allow(dead_code)] @@ -54,6 +65,26 @@ impl !Send for *mut T { } /// // struct FooUse(Foo<[i32]>); // error: Sized is not implemented for [i32] /// struct BarUse(Bar<[i32]>); // OK /// ``` +/// +/// The one exception is the implicit `Self` type of a trait, which does not +/// get an implicit `Sized` bound. This is because a `Sized` bound prevents +/// the trait from being used to form a [trait object]: +/// +/// ``` +/// # #![allow(unused_variables)] +/// trait Foo { } +/// trait Bar: Sized { } +/// +/// struct Impl; +/// impl Foo for Impl { } +/// impl Bar for Impl { } +/// +/// let x: &Foo = &Impl; // OK +/// // let y: &Bar = &Impl; // error: the trait `Bar` cannot +/// // be made into an object +/// ``` +/// +/// [trait object]: ../../book/trait-objects.html #[stable(feature = "rust1", since = "1.0.0")] #[lang = "sized"] #[rustc_on_unimplemented = "`{Self}` does not have a constant size known at compile-time"] @@ -62,14 +93,27 @@ pub trait Sized { // Empty. } -/// Types that can be "unsized" to a dynamically sized type. +/// Types that can be "unsized" to a dynamically-sized type. +/// +/// For example, the sized array type `[i8; 2]` implements `Unsize<[i8]>` and +/// `Unsize`. +/// +/// All implementations of `Unsize` are provided automatically by the compiler. +/// +/// `Unsize` is used along with [`ops::CoerceUnsized`][coerceunsized] to allow +/// "user-defined" containers such as [`rc::Rc`][rc] to contain dynamically-sized +/// types. See the [DST coercion RFC][RFC982] for more details. +/// +/// [coerceunsized]: ../ops/trait.CoerceUnsized.html +/// [rc]: ../../std/rc/struct.Rc.html +/// [RFC982]: https://github.com/rust-lang/rfcs/blob/master/text/0982-dst-coercion.md #[unstable(feature = "unsize", issue = "27732")] #[lang="unsize"] pub trait Unsize { // Empty. } -/// Types that can be copied by simply copying bits (i.e. `memcpy`). +/// Types whose values can be duplicated simply by copying bits. /// /// By default, variable bindings have 'move semantics.' In other /// words: @@ -90,7 +134,8 @@ pub trait Unsize { /// However, if a type implements `Copy`, it instead has 'copy semantics': /// /// ``` -/// // we can just derive a `Copy` implementation +/// // We can derive a `Copy` implementation. `Clone` is also required, as it's +/// // a supertrait of `Copy`. /// #[derive(Debug, Copy, Clone)] /// struct Foo; /// @@ -103,13 +148,56 @@ pub trait Unsize { /// println!("{:?}", x); // A-OK! /// ``` /// -/// It's important to note that in these two examples, the only difference is if you are allowed to -/// access `x` after the assignment: a move is also a bitwise copy under the hood. +/// It's important to note that in these two examples, the only difference is whether you +/// are allowed to access `x` after the assignment. Under the hood, both a copy and a move +/// can result in bits being copied in memory, although this is sometimes optimized away. +/// +/// ## How can I implement `Copy`? +/// +/// There are two ways to implement `Copy` on your type. The simplest is to use `derive`: +/// +/// ``` +/// #[derive(Copy, Clone)] +/// struct MyStruct; +/// ``` +/// +/// You can also implement `Copy` and `Clone` manually: +/// +/// ``` +/// struct MyStruct; +/// +/// impl Copy for MyStruct { } +/// +/// impl Clone for MyStruct { +/// fn clone(&self) -> MyStruct { +/// *self +/// } +/// } +/// ``` +/// +/// There is a small difference between the two: the `derive` strategy will also place a `Copy` +/// bound on type parameters, which isn't always desired. +/// +/// ## What's the difference between `Copy` and `Clone`? +/// +/// Copies happen implicitly, for example as part of an assignment `y = x`. The behavior of +/// `Copy` is not overloadable; it is always a simple bit-wise copy. +/// +/// Cloning is an explicit action, `x.clone()`. The implementation of [`Clone`] can +/// provide any type-specific behavior necessary to duplicate values safely. For example, +/// the implementation of [`Clone`] for [`String`] needs to copy the pointed-to string +/// buffer in the heap. A simple bitwise copy of [`String`] values would merely copy the +/// pointer, leading to a double free down the line. For this reason, [`String`] is [`Clone`] +/// but not `Copy`. +/// +/// [`Clone`] is a supertrait of `Copy`, so everything which is `Copy` must also implement +/// [`Clone`]. If a type is `Copy` then its [`Clone`] implementation need only return `*self` +/// (see the example above). /// /// ## When can my type be `Copy`? /// /// A type can implement `Copy` if all of its components implement `Copy`. For example, this -/// `struct` can be `Copy`: +/// struct can be `Copy`: /// /// ``` /// # #[allow(dead_code)] @@ -119,7 +207,8 @@ pub trait Unsize { /// } /// ``` /// -/// A `struct` can be `Copy`, and `i32` is `Copy`, so therefore, `Point` is eligible to be `Copy`. +/// A struct can be `Copy`, and [`i32`] is `Copy`, therefore `Point` is eligible to be `Copy`. +/// By contrast, consider /// /// ``` /// # #![allow(dead_code)] @@ -129,100 +218,117 @@ pub trait Unsize { /// } /// ``` /// -/// The `PointList` `struct` cannot implement `Copy`, because `Vec` is not `Copy`. If we +/// The struct `PointList` cannot implement `Copy`, because [`Vec`] is not `Copy`. If we /// attempt to derive a `Copy` implementation, we'll get an error: /// /// ```text /// the trait `Copy` may not be implemented for this type; field `points` does not implement `Copy` /// ``` /// -/// ## How can I implement `Copy`? -/// -/// There are two ways to implement `Copy` on your type: -/// -/// ``` -/// #[derive(Copy, Clone)] -/// struct MyStruct; -/// ``` -/// -/// and -/// -/// ``` -/// struct MyStruct; -/// impl Copy for MyStruct {} -/// impl Clone for MyStruct { fn clone(&self) -> MyStruct { *self } } -/// ``` -/// -/// There is a small difference between the two: the `derive` strategy will also place a `Copy` -/// bound on type parameters, which isn't always desired. -/// -/// ## When can my type _not_ be `Copy`? +/// ## When *can't* my type be `Copy`? /// /// Some types can't be copied safely. For example, copying `&mut T` would create an aliased -/// mutable reference, and copying `String` would result in two attempts to free the same buffer. +/// mutable reference. Copying [`String`] would duplicate responsibility for managing the +/// [`String`]'s buffer, leading to a double free. /// -/// Generalizing the latter case, any type implementing `Drop` can't be `Copy`, because it's -/// managing some resource besides its own `size_of::()` bytes. +/// Generalizing the latter case, any type implementing [`Drop`] can't be `Copy`, because it's +/// managing some resource besides its own [`size_of::()`] bytes. /// -/// ## When should my type be `Copy`? +/// If you try to implement `Copy` on a struct or enum containing non-`Copy` data, you will get a +/// compile-time error. Specifically, with structs you'll get [E0204] and with enums you'll get +/// [E0205]. /// -/// Generally speaking, if your type _can_ implement `Copy`, it should. There's one important thing -/// to consider though: if you think your type may _not_ be able to implement `Copy` in the future, -/// then it might be prudent to not implement `Copy`. This is because removing `Copy` is a breaking -/// change: that second example would fail to compile if we made `Foo` non-`Copy`. +/// [E0204]: ../../error-index.html#E0204 +/// [E0205]: ../../error-index.html#E0205 /// -/// # Derivable +/// ## When *should* my type be `Copy`? /// -/// This trait can be used with `#[derive]`. +/// Generally speaking, if your type _can_ implement `Copy`, it should. Keep in mind, though, +/// that implementing `Copy` is part of the public API of your type. If the type might become +/// non-`Copy` in the future, it could be prudent to omit the `Copy` implementation now, to +/// avoid a breaking API change. +/// +/// [`Vec`]: ../../std/vec/struct.Vec.html +/// [`String`]: ../../std/string/struct.String.html +/// [`Drop`]: ../../std/ops/trait.Drop.html +/// [`size_of::()`]: ../../std/mem/fn.size_of.html +/// [`Clone`]: ../clone/trait.Clone.html +/// [`String`]: ../../std/string/struct.String.html +/// [`i32`]: ../../std/primitive.i32.html #[stable(feature = "rust1", since = "1.0.0")] #[lang = "copy"] pub trait Copy : Clone { // Empty. } -/// Types that can be safely shared between threads when aliased. +/// Types for which it is safe to share references between threads. +/// +/// This trait is automatically implemented when the compiler determines +/// it's appropriate. /// /// The precise definition is: a type `T` is `Sync` if `&T` is -/// thread-safe. In other words, there is no possibility of data races -/// when passing `&T` references between threads. -/// -/// As one would expect, primitive types like `u8` and `f64` are all -/// `Sync`, and so are simple aggregate types containing them (like -/// tuples, structs and enums). More instances of basic `Sync` types -/// include "immutable" types like `&T` and those with simple -/// inherited mutability, such as `Box`, `Vec` and most other -/// collection types. (Generic parameters need to be `Sync` for their -/// container to be `Sync`.) -/// -/// A somewhat surprising consequence of the definition is `&mut T` is -/// `Sync` (if `T` is `Sync`) even though it seems that it might -/// provide unsynchronized mutation. The trick is a mutable reference -/// stored in an aliasable reference (that is, `& &mut T`) becomes -/// read-only, as if it were a `& &T`, hence there is no risk of a data -/// race. +/// [`Send`][send]. In other words, if there is no possibility of +/// [undefined behavior][ub] (including data races) when passing +/// `&T` references between threads. +/// +/// As one would expect, primitive types like [`u8`][u8] and [`f64`][f64] +/// are all `Sync`, and so are simple aggregate types containing them, +/// like tuples, structs and enums. More examples of basic `Sync` +/// types include "immutable" types like `&T`, and those with simple +/// inherited mutability, such as [`Box`][box], [`Vec`][vec] and +/// most other collection types. (Generic parameters need to be `Sync` +/// for their container to be `Sync`.) +/// +/// A somewhat surprising consequence of the definition is that `&mut T` +/// is `Sync` (if `T` is `Sync`) even though it seems like that might +/// provide unsynchronized mutation. The trick is that a mutable +/// reference behind a shared reference (that is, `& &mut T`) +/// becomes read-only, as if it were a `& &T`. Hence there is no risk +/// of a data race. /// /// Types that are not `Sync` are those that have "interior -/// mutability" in a non-thread-safe way, such as `Cell` and `RefCell` -/// in `std::cell`. These types allow for mutation of their contents -/// even when in an immutable, aliasable slot, e.g. the contents of -/// `&Cell` can be `.set`, and do not ensure data races are -/// impossible, hence they cannot be `Sync`. A higher level example -/// of a non-`Sync` type is the reference counted pointer -/// `std::rc::Rc`, because any reference `&Rc` can clone a new -/// reference, which modifies the reference counts in a non-atomic -/// way. +/// mutability" in a non-thread-safe form, such as [`cell::Cell`][cell] +/// and [`cell::RefCell`][refcell]. These types allow for mutation of +/// their contents even through an immutable, shared reference. For +/// example the `set` method on [`Cell`][cell] takes `&self`, so it requires +/// only a shared reference [`&Cell`][cell]. The method performs no +/// synchronization, thus [`Cell`][cell] cannot be `Sync`. /// -/// For cases when one does need thread-safe interior mutability, -/// types like the atomics in `std::sync` and `Mutex` & `RWLock` in -/// the `sync` crate do ensure that any mutation cannot cause data -/// races. Hence these types are `Sync`. +/// Another example of a non-`Sync` type is the reference-counting +/// pointer [`rc::Rc`][rc]. Given any reference [`&Rc`][rc], you can clone +/// a new [`Rc`][rc], modifying the reference counts in a non-atomic way. /// -/// Any types with interior mutability must also use the `std::cell::UnsafeCell` -/// wrapper around the value(s) which can be mutated when behind a `&` -/// reference; not doing this is undefined behavior (for example, -/// `transmute`-ing from `&T` to `&mut T` is invalid). -/// -/// This trait is automatically derived when the compiler determines it's appropriate. +/// For cases when one does need thread-safe interior mutability, +/// Rust provides [atomic data types], as well as explicit locking via +/// [`sync::Mutex`][mutex] and [`sync::RWLock`][rwlock]. These types +/// ensure that any mutation cannot cause data races, hence the types +/// are `Sync`. Likewise, [`sync::Arc`][arc] provides a thread-safe +/// analogue of [`Rc`][rc]. +/// +/// Any types with interior mutability must also use the +/// [`cell::UnsafeCell`][unsafecell] wrapper around the value(s) which +/// can be mutated through a shared reference. Failing to doing this is +/// [undefined behavior][ub]. For example, [`transmute`][transmute]-ing +/// from `&T` to `&mut T` is invalid. +/// +/// See [the Nomicon](../../nomicon/send-and-sync.html) for more +/// details about `Sync`. +/// +/// [send]: trait.Send.html +/// [u8]: ../../std/primitive.u8.html +/// [f64]: ../../std/primitive.f64.html +/// [box]: ../../std/boxed/struct.Box.html +/// [vec]: ../../std/vec/struct.Vec.html +/// [cell]: ../cell/struct.Cell.html +/// [refcell]: ../cell/struct.RefCell.html +/// [rc]: ../../std/rc/struct.Rc.html +/// [arc]: ../../std/sync/struct.Arc.html +/// [atomic data types]: ../sync/atomic/index.html +/// [mutex]: ../../std/sync/struct.Mutex.html +/// [rwlock]: ../../std/sync/struct.RwLock.html +/// [unsafecell]: ../cell/struct.UnsafeCell.html +/// [ub]: ../../reference.html#behavior-considered-undefined +/// [transmute]: ../../std/mem/fn.transmute.html #[stable(feature = "rust1", since = "1.0.0")] #[lang = "sync"] #[rustc_on_unimplemented = "`{Self}` cannot be shared between threads safely"] @@ -291,29 +397,30 @@ macro_rules! impls{ ) } -/// `PhantomData` allows you to describe that a type acts as if it stores a value of type `T`, -/// even though it does not. This allows you to inform the compiler about certain safety properties -/// of your code. +/// Zero-sized type used to mark things that "act like" they own a `T`. /// -/// For a more in-depth explanation of how to use `PhantomData`, please see [the Nomicon]. +/// Adding a `PhantomData` field to your type tells the compiler that your +/// type acts as though it stores a value of type `T`, even though it doesn't +/// really. This information is used when computing certain safety properties. /// -/// [the Nomicon]: ../../nomicon/phantom-data.html +/// For a more in-depth explanation of how to use `PhantomData`, please see +/// [the Nomicon](../../nomicon/phantom-data.html). /// /// # A ghastly note 👻👻👻 /// -/// Though they both have scary names, `PhantomData` and 'phantom types' are related, but not -/// identical. Phantom types are a more general concept that don't require `PhantomData` to -/// implement, but `PhantomData` is the most common way to implement them in a correct manner. +/// Though they both have scary names, `PhantomData` and 'phantom types' are +/// related, but not identical. A phantom type parameter is simply a type +/// parameter which is never used. In Rust, this often causes the compiler to +/// complain, and the solution is to add a "dummy" use by way of `PhantomData`. /// /// # Examples /// -/// ## Unused lifetime parameter +/// ## Unused lifetime parameters /// -/// Perhaps the most common time that `PhantomData` is required is -/// with a struct that has an unused lifetime parameter, typically as -/// part of some unsafe code. For example, here is a struct `Slice` -/// that has two pointers of type `*const T`, presumably pointing into -/// an array somewhere: +/// Perhaps the most common use case for `PhantomData` is a struct that has an +/// unused lifetime parameter, typically as part of some unsafe code. For +/// example, here is a struct `Slice` that has two pointers of type `*const T`, +/// presumably pointing into an array somewhere: /// /// ```ignore /// struct Slice<'a, T> { @@ -327,38 +434,62 @@ macro_rules! impls{ /// intent is not expressed in the code, since there are no uses of /// the lifetime `'a` and hence it is not clear what data it applies /// to. We can correct this by telling the compiler to act *as if* the -/// `Slice` struct contained a borrowed reference `&'a T`: +/// `Slice` struct contained a reference `&'a T`: /// /// ``` /// use std::marker::PhantomData; /// /// # #[allow(dead_code)] -/// struct Slice<'a, T:'a> { +/// struct Slice<'a, T: 'a> { /// start: *const T, /// end: *const T, -/// phantom: PhantomData<&'a T> +/// phantom: PhantomData<&'a T>, /// } /// ``` /// -/// This also in turn requires that we annotate `T:'a`, indicating -/// that `T` is a type that can be borrowed for the lifetime `'a`. +/// This also in turn requires the annotation `T: 'a`, indicating +/// that any references in `T` are valid over the lifetime `'a`. +/// +/// When initializing a `Slice` you simply provide the value +/// `PhantomData` for the field `phantom`: +/// +/// ``` +/// # #![allow(dead_code)] +/// # use std::marker::PhantomData; +/// # struct Slice<'a, T: 'a> { +/// # start: *const T, +/// # end: *const T, +/// # phantom: PhantomData<&'a T>, +/// # } +/// fn borrow_vec<'a, T>(vec: &'a Vec) -> Slice<'a, T> { +/// let ptr = vec.as_ptr(); +/// Slice { +/// start: ptr, +/// end: unsafe { ptr.offset(vec.len() as isize) }, +/// phantom: PhantomData, +/// } +/// } +/// ``` /// /// ## Unused type parameters /// -/// It sometimes happens that there are unused type parameters that +/// It sometimes happens that you have unused type parameters which /// indicate what type of data a struct is "tied" to, even though that /// data is not actually found in the struct itself. Here is an -/// example where this arises when handling external resources over a -/// foreign function interface. `PhantomData` can prevent -/// mismatches by enforcing types in the method implementations: +/// example where this arises with [FFI]. The foreign interface uses +/// handles of type `*mut ()` to refer to Rust values of different +/// types. We track the Rust type using a phantom type parameter on +/// the struct `ExternalResource` which wraps a handle. +/// +/// [FFI]: ../../book/ffi.html /// /// ``` /// # #![allow(dead_code)] -/// # trait ResType { fn foo(&self); } +/// # trait ResType { } /// # struct ParamType; /// # mod foreign_lib { -/// # pub fn new(_: usize) -> *mut () { 42 as *mut () } -/// # pub fn do_stuff(_: *mut (), _: usize) {} +/// # pub fn new(_: usize) -> *mut () { 42 as *mut () } +/// # pub fn do_stuff(_: *mut (), _: usize) {} /// # } /// # fn convert_params(_: ParamType) -> usize { 42 } /// use std::marker::PhantomData; @@ -385,21 +516,20 @@ macro_rules! impls{ /// } /// ``` /// -/// ## Indicating ownership +/// ## Ownership and the drop check /// -/// Adding a field of type `PhantomData` also indicates that your -/// struct owns data of type `T`. This in turn implies that when your -/// struct is dropped, it may in turn drop one or more instances of -/// the type `T`, though that may not be apparent from the other -/// structure of the type itself. This is commonly necessary if the -/// structure is using a raw pointer like `*mut T` whose referent -/// may be dropped when the type is dropped, as a `*mut T` is -/// otherwise not treated as owned. +/// Adding a field of type `PhantomData` indicates that your +/// type owns data of type `T`. This in turn implies that when your +/// type is dropped, it may drop one or more instances of the type +/// `T`. This has bearing on the Rust compiler's [drop check] +/// analysis. /// /// If your struct does not in fact *own* the data of type `T`, it is /// better to use a reference type, like `PhantomData<&'a T>` /// (ideally) or `PhantomData<*const T>` (if no lifetime applies), so /// as not to indicate ownership. +/// +/// [drop check]: ../../nomicon/dropck.html #[lang = "phantom_data"] #[stable(feature = "rust1", since = "1.0.0")] pub struct PhantomData; @@ -407,8 +537,6 @@ pub struct PhantomData; impls! { PhantomData } mod impls { - use super::{Send, Sync, Sized}; - #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<'a, T: Sync + ?Sized> Send for &'a T {} #[stable(feature = "rust1", since = "1.0.0")] @@ -417,10 +545,13 @@ mod impls { /// Types that can be reflected over. /// -/// This trait is implemented for all types. Its purpose is to ensure -/// that when you write a generic function that will employ -/// reflection, that must be reflected (no pun intended) in the -/// generic bounds of that function. Here is an example: +/// By "reflection" we mean use of the [`Any`][any] trait, or related +/// machinery such as [`TypeId`][typeid]. +/// +/// `Reflect` is implemented for all types. Its purpose is to ensure +/// that when you write a generic function that will employ reflection, +/// that must be reflected (no pun intended) in the generic bounds of +/// that function. /// /// ``` /// #![feature(reflect_marker)] @@ -428,31 +559,35 @@ mod impls { /// use std::any::Any; /// /// # #[allow(dead_code)] -/// fn foo(x: &T) { +/// fn foo(x: &T) { /// let any: &Any = x; /// if any.is::() { println!("u32"); } /// } /// ``` /// -/// Without the declaration `T:Reflect`, `foo` would not type check -/// (note: as a matter of style, it would be preferable to write -/// `T:Any`, because `T:Any` implies `T:Reflect` and `T:'static`, but -/// we use `Reflect` here to show how it works). The `Reflect` bound -/// thus serves to alert `foo`'s caller to the fact that `foo` may -/// behave differently depending on whether `T=u32` or not. In -/// particular, thanks to the `Reflect` bound, callers know that a -/// function declared like `fn bar(...)` will always act in -/// precisely the same way no matter what type `T` is supplied, -/// because there are no bounds declared on `T`. (The ability for a -/// caller to reason about what a function may do based solely on what -/// generic bounds are declared is often called the ["parametricity -/// property"][1].) -/// -/// [1]: http://en.wikipedia.org/wiki/Parametricity +/// Without the bound `T: Reflect`, `foo` would not typecheck. (As +/// a matter of style, it would be preferable to write `T: Any`, +/// because `T: Any` implies `T: Reflect` and `T: 'static`, but we +/// use `Reflect` here for illustrative purposes.) +/// +/// The `Reflect` bound serves to alert `foo`'s caller to the +/// fact that `foo` may behave differently depending on whether +/// `T` is `u32` or not. The ability for a caller to reason about what +/// a function may do based solely on what generic bounds are declared +/// is often called the "[parametricity property][param]". Despite the +/// use of `Reflect`, Rust lacks true parametricity because a generic +/// function can, at the very least, call [`mem::size_of`][size_of] +/// without employing any trait bounds whatsoever. +/// +/// [any]: ../any/trait.Any.html +/// [typeid]: ../any/struct.TypeId.html +/// [param]: http://en.wikipedia.org/wiki/Parametricity +/// [size_of]: ../mem/fn.size_of.html #[rustc_reflect_like] #[unstable(feature = "reflect_marker", reason = "requires RFC and more experience", issue = "27749")] +#[rustc_deprecated(since = "1.14.0", reason = "Specialization makes parametricity impossible")] #[rustc_on_unimplemented = "`{Self}` does not implement `Any`; \ ensure all type parameters are bounded by `Any`"] pub trait Reflect {} @@ -460,4 +595,6 @@ pub trait Reflect {} #[unstable(feature = "reflect_marker", reason = "requires RFC and more experience", issue = "27749")] +#[rustc_deprecated(since = "1.14.0", reason = "Specialization makes parametricity impossible")] +#[allow(deprecated)] impl Reflect for .. { } diff --git a/src/libcore/mem.rs b/src/libcore/mem.rs index fb6dac4079834..209107ef92ceb 100644 --- a/src/libcore/mem.rs +++ b/src/libcore/mem.rs @@ -8,68 +8,57 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Basic functions for dealing with memory +//! Basic functions for dealing with memory. //! //! This module contains functions for querying the size and alignment of //! types, initializing and manipulating memory. #![stable(feature = "rust1", since = "1.0.0")] -use marker::Sized; +use clone; +use cmp; +use fmt; +use hash; use intrinsics; +use marker::{Copy, PhantomData, Sized}; use ptr; #[stable(feature = "rust1", since = "1.0.0")] pub use intrinsics::transmute; -/// Leaks a value into the void, consuming ownership and never running its -/// destructor. +/// Leaks a value: takes ownership and "forgets" about the value **without running +/// its destructor**. /// -/// This function will take ownership of its argument, but is distinct from the -/// `mem::drop` function in that it **does not run the destructor**, leaking the -/// value and any resources that it owns. +/// Any resources the value manages, such as heap memory or a file handle, will linger +/// forever in an unreachable state. /// -/// There's only a few reasons to use this function. They mainly come -/// up in unsafe code or FFI code. -/// -/// * You have an uninitialized value, perhaps for performance reasons, and -/// need to prevent the destructor from running on it. -/// * You have two copies of a value (like when writing something like -/// [`mem::swap`][swap]), but need the destructor to only run once to -/// prevent a double `free`. -/// * Transferring resources across [FFI][ffi] boundaries. -/// -/// [swap]: fn.swap.html -/// [ffi]: ../../book/ffi.html +/// If you want to dispose of a value properly, running its destructor, see +/// [`mem::drop`][drop]. /// /// # Safety /// -/// This function is not marked as `unsafe` as Rust does not guarantee that the -/// `Drop` implementation for a value will always run. Note, however, that -/// leaking resources such as memory or I/O objects is likely not desired, so -/// this function is only recommended for specialized use cases. -/// -/// The safety of this function implies that when writing `unsafe` code -/// yourself care must be taken when leveraging a destructor that is required to -/// run to preserve memory safety. There are known situations where the -/// destructor may not run (such as if ownership of the object with the -/// destructor is returned) which must be taken into account. +/// `forget` is not marked as `unsafe`, because Rust's safety guarantees +/// do not include a guarantee that destructors will always run. For example, +/// a program can create a reference cycle using [`Rc`][rc], or call +/// [`process:exit`][exit] to exit without running destructors. Thus, allowing +/// `mem::forget` from safe code does not fundamentally change Rust's safety +/// guarantees. /// -/// # Other forms of Leakage +/// That said, leaking resources such as memory or I/O objects is usually undesirable, +/// so `forget` is only recommended for specialized use cases like those shown below. /// -/// It's important to point out that this function is not the only method by -/// which a value can be leaked in safe Rust code. Other known sources of -/// leakage are: +/// Because forgetting a value is allowed, any `unsafe` code you write must +/// allow for this possibility. You cannot return a value and expect that the +/// caller will necessarily run the value's destructor. /// -/// * `Rc` and `Arc` cycles -/// * `mpsc::{Sender, Receiver}` cycles (they use `Arc` internally) -/// * Panicking destructors are likely to leak local resources +/// [rc]: ../../std/rc/struct.Rc.html +/// [exit]: ../../std/process/fn.exit.html /// -/// # Example +/// # Examples /// /// Leak some heap memory by never deallocating it: /// -/// ```rust +/// ``` /// use std::mem; /// /// let heap_memory = Box::new(3); @@ -78,7 +67,7 @@ pub use intrinsics::transmute; /// /// Leak an I/O object, never closing the file: /// -/// ```rust,no_run +/// ```no_run /// use std::mem; /// use std::fs::File; /// @@ -86,9 +75,43 @@ pub use intrinsics::transmute; /// mem::forget(file); /// ``` /// -/// The `mem::swap` function uses `mem::forget` to good effect: +/// The practical use cases for `forget` are rather specialized and mainly come +/// up in unsafe or FFI code. +/// +/// ## Use case 1 +/// +/// You have created an uninitialized value using [`mem::uninitialized`][uninit]. +/// You must either initialize or `forget` it on every computation path before +/// Rust drops it automatically, like at the end of a scope or after a panic. +/// Running the destructor on an uninitialized value would be [undefined behavior][ub]. +/// +/// ``` +/// use std::mem; +/// use std::ptr; +/// +/// # let some_condition = false; +/// unsafe { +/// let mut uninit_vec: Vec = mem::uninitialized(); +/// +/// if some_condition { +/// // Initialize the variable. +/// ptr::write(&mut uninit_vec, Vec::new()); +/// } else { +/// // Forget the uninitialized value so its destructor doesn't run. +/// mem::forget(uninit_vec); +/// } +/// } +/// ``` +/// +/// ## Use case 2 +/// +/// You have duplicated the bytes making up a value, without doing a proper +/// [`Clone`][clone]. You need the value's destructor to run only once, +/// because a double `free` is undefined behavior. /// -/// ```rust +/// An example is the definition of [`mem::swap`][swap] in this module: +/// +/// ``` /// use std::mem; /// use std::ptr; /// @@ -110,6 +133,42 @@ pub use intrinsics::transmute; /// } /// } /// ``` +/// +/// ## Use case 3 +/// +/// You are transferring ownership across a [FFI] boundary to code written in +/// another language. You need to `forget` the value on the Rust side because Rust +/// code is no longer responsible for it. +/// +/// ```no_run +/// use std::mem; +/// +/// extern "C" { +/// fn my_c_function(x: *const u32); +/// } +/// +/// let x: Box = Box::new(3); +/// +/// // Transfer ownership into C code. +/// unsafe { +/// my_c_function(&*x); +/// } +/// mem::forget(x); +/// ``` +/// +/// In this case, C code must call back into Rust to free the object. Calling C's `free` +/// function on a [`Box`][box] is *not* safe! Also, `Box` provides an [`into_raw`][into_raw] +/// method which is the preferred way to do this in practice. +/// +/// [drop]: fn.drop.html +/// [uninit]: fn.uninitialized.html +/// [clone]: ../clone/trait.Clone.html +/// [swap]: fn.swap.html +/// [FFI]: ../../book/ffi.html +/// [box]: ../../std/boxed/struct.Box.html +/// [into_raw]: ../../std/boxed/struct.Box.html#method.into_raw +/// [ub]: ../../reference.html#behavior-considered-undefined +#[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn forget(t: T) { unsafe { intrinsics::forget(t) } @@ -117,6 +176,9 @@ pub fn forget(t: T) { /// Returns the size of a type in bytes. /// +/// More specifically, this is the offset in bytes between successive +/// items of the same type, including alignment padding. +/// /// # Examples /// /// ``` @@ -130,7 +192,14 @@ pub fn size_of() -> usize { unsafe { intrinsics::size_of::() } } -/// Returns the size of the given value in bytes. +/// Returns the size of the pointed-to value in bytes. +/// +/// This is usually the same as `size_of::()`. However, when `T` *has* no +/// statically known size, e.g. a slice [`[T]`][slice] or a [trait object], +/// then `size_of_val` can be used to get the dynamically-known size. +/// +/// [slice]: ../../std/primitive.slice.html +/// [trait object]: ../../book/trait-objects.html /// /// # Examples /// @@ -138,6 +207,10 @@ pub fn size_of() -> usize { /// use std::mem; /// /// assert_eq!(4, mem::size_of_val(&5i32)); +/// +/// let x: [u8; 13] = [0; 13]; +/// let y: &[u8] = &x; +/// assert_eq!(13, mem::size_of_val(y)); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] @@ -145,10 +218,14 @@ pub fn size_of_val(val: &T) -> usize { unsafe { intrinsics::size_of_val(val) } } -/// Returns the ABI-required minimum alignment of a type +/// Returns the [ABI]-required minimum alignment of a type. +/// +/// Every valid address of a value of the type `T` must be a multiple of this number. /// /// This is the alignment used for struct fields. It may be smaller than the preferred alignment. /// +/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface +/// /// # Examples /// /// ``` @@ -164,7 +241,11 @@ pub fn min_align_of() -> usize { unsafe { intrinsics::min_align_of::() } } -/// Returns the ABI-required minimum alignment of the type of the value that `val` points to +/// Returns the [ABI]-required minimum alignment of the type of the value that `val` points to. +/// +/// Every valid address of a value of the type `T` must be a multiple of this number. +/// +/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface /// /// # Examples /// @@ -181,10 +262,14 @@ pub fn min_align_of_val(val: &T) -> usize { unsafe { intrinsics::min_align_of_val(val) } } -/// Returns the alignment in memory for a type. +/// Returns the [ABI]-required minimum alignment of a type. +/// +/// Every valid address of a value of the type `T` must be a multiple of this number. /// /// This is the alignment used for struct fields. It may be smaller than the preferred alignment. /// +/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface +/// /// # Examples /// /// ``` @@ -198,7 +283,11 @@ pub fn align_of() -> usize { unsafe { intrinsics::min_align_of::() } } -/// Returns the ABI-required minimum alignment of the type of the value that `val` points to +/// Returns the [ABI]-required minimum alignment of the type of the value that `val` points to. +/// +/// Every valid address of a value of the type `T` must be a multiple of this number. +/// +/// [ABI]: https://en.wikipedia.org/wiki/Application_binary_interface /// /// # Examples /// @@ -213,16 +302,23 @@ pub fn align_of_val(val: &T) -> usize { unsafe { intrinsics::min_align_of_val(val) } } -/// Creates a value initialized to zero. +/// Creates a value whose bytes are all zero. +/// +/// This has the same effect as allocating space with +/// [`mem::uninitialized`][uninit] and then zeroing it out. It is useful for +/// [FFI] sometimes, but should generally be avoided. /// -/// This function is similar to allocating space for a local variable and zeroing it out (an unsafe -/// operation). +/// There is no guarantee that an all-zero byte-pattern represents a valid value of +/// some type `T`. If `T` has a destructor and the value is destroyed (due to +/// a panic or the end of a scope) before being initialized, then the destructor +/// will run on zeroed data, likely leading to [undefined behavior][ub]. /// -/// Care must be taken when using this function, if the type `T` has a destructor and the value -/// falls out of scope (due to unwinding or returning) before being initialized, then the -/// destructor will run on zeroed data, likely leading to crashes. +/// See also the documentation for [`mem::uninitialized`][uninit], which has +/// many of the same caveats. /// -/// This is useful for FFI functions sometimes, but should generally be avoided. +/// [uninit]: fn.uninitialized.html +/// [FFI]: ../../book/ffi.html +/// [ub]: ../../reference.html#behavior-considered-undefined /// /// # Examples /// @@ -230,6 +326,7 @@ pub fn align_of_val(val: &T) -> usize { /// use std::mem; /// /// let x: i32 = unsafe { mem::zeroed() }; +/// assert_eq!(0, x); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] @@ -237,57 +334,36 @@ pub unsafe fn zeroed() -> T { intrinsics::init() } -/// Creates a value initialized to an unspecified series of bytes. -/// -/// The byte sequence usually indicates that the value at the memory -/// in question has been dropped. Thus, *if* T carries a drop flag, -/// any associated destructor will not be run when the value falls out -/// of scope. -/// -/// Some code at one time used the `zeroed` function above to -/// accomplish this goal. -/// -/// This function is expected to be deprecated with the transition -/// to non-zeroing drop. -#[inline] -#[unstable(feature = "filling_drop", issue = "5016")] -pub unsafe fn dropped() -> T { - #[inline(always)] - unsafe fn dropped_impl() -> T { intrinsics::init_dropped() } - - dropped_impl() -} - /// Bypasses Rust's normal memory-initialization checks by pretending to -/// produce a value of type T, while doing nothing at all. +/// produce a value of type `T`, while doing nothing at all. /// -/// **This is incredibly dangerous, and should not be done lightly. Deeply +/// **This is incredibly dangerous and should not be done lightly. Deeply /// consider initializing your memory with a default value instead.** /// -/// This is useful for FFI functions and initializing arrays sometimes, +/// This is useful for [FFI] functions and initializing arrays sometimes, /// but should generally be avoided. /// -/// # Undefined Behavior +/// [FFI]: ../../book/ffi.html /// -/// It is Undefined Behavior to read uninitialized memory. Even just an +/// # Undefined behavior +/// +/// It is [undefined behavior][ub] to read uninitialized memory, even just an /// uninitialized boolean. For instance, if you branch on the value of such -/// a boolean your program may take one, both, or neither of the branches. +/// a boolean, your program may take one, both, or neither of the branches. /// -/// Note that this often also includes *writing* to the uninitialized value. -/// Rust believes the value is initialized, and will therefore try to Drop -/// the uninitialized value and its fields if you try to overwrite the memory -/// in a normal manner. The only way to safely initialize an arbitrary -/// uninitialized value is with one of the `ptr` functions: `write`, `copy`, or -/// `copy_nonoverlapping`. This isn't necessary if `T` is a primitive -/// or otherwise only contains types that don't implement Drop. +/// Writing to the uninitialized value is similarly dangerous. Rust believes the +/// value is initialized, and will therefore try to [`Drop`] the uninitialized +/// value and its fields if you try to overwrite it in a normal manner. The only way +/// to safely initialize an uninitialized value is with [`ptr::write`][write], +/// [`ptr::copy`][copy], or [`ptr::copy_nonoverlapping`][copy_no]. /// -/// If this value *does* need some kind of Drop, it must be initialized before +/// If the value does implement [`Drop`], it must be initialized before /// it goes out of scope (and therefore would be dropped). Note that this /// includes a `panic` occurring and unwinding the stack suddenly. /// /// # Examples /// -/// Here's how to safely initialize an array of `Vec`s. +/// Here's how to safely initialize an array of [`Vec`]s. /// /// ``` /// use std::mem; @@ -327,9 +403,9 @@ pub unsafe fn dropped() -> T { /// println!("{:?}", &data[0]); /// ``` /// -/// This example emphasizes exactly how delicate and dangerous doing this is. -/// Note that the `vec!` macro *does* let you initialize every element with a -/// value that is only `Clone`, so the following is semantically equivalent and +/// This example emphasizes exactly how delicate and dangerous using `mem::uninitialized` +/// can be. Note that the [`vec!`] macro *does* let you initialize every element with a +/// value that is only [`Clone`], so the following is semantically equivalent and /// vastly less dangerous, as long as you can live with an extra heap /// allocation: /// @@ -337,27 +413,35 @@ pub unsafe fn dropped() -> T { /// let data: Vec> = vec![Vec::new(); 1000]; /// println!("{:?}", &data[0]); /// ``` +/// +/// [`Vec`]: ../../std/vec/struct.Vec.html +/// [`vec!`]: ../../std/macro.vec.html +/// [`Clone`]: ../../std/clone/trait.Clone.html +/// [ub]: ../../reference.html#behavior-considered-undefined +/// [write]: ../ptr/fn.write.html +/// [copy]: ../intrinsics/fn.copy.html +/// [copy_no]: ../intrinsics/fn.copy_nonoverlapping.html +/// [`Drop`]: ../ops/trait.Drop.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn uninitialized() -> T { intrinsics::uninit() } -/// Swap the values at two mutable locations of the same type, without deinitializing or copying -/// either one. +/// Swaps the values at two mutable locations, without deinitializing either one. /// /// # Examples /// /// ``` /// use std::mem; /// -/// let x = &mut 5; -/// let y = &mut 42; +/// let mut x = 5; +/// let mut y = 42; /// -/// mem::swap(x, y); +/// mem::swap(&mut x, &mut y); /// -/// assert_eq!(42, *x); -/// assert_eq!(5, *y); +/// assert_eq!(42, x); +/// assert_eq!(5, y); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] @@ -379,10 +463,7 @@ pub fn swap(x: &mut T, y: &mut T) { } /// Replaces the value at a mutable location with a new one, returning the old value, without -/// deinitializing or copying either one. -/// -/// This is primarily used for transferring and swapping ownership of a value in a mutable -/// location. +/// deinitializing either one. /// /// # Examples /// @@ -391,15 +472,17 @@ pub fn swap(x: &mut T, y: &mut T) { /// ``` /// use std::mem; /// -/// let mut v: Vec = Vec::new(); +/// let mut v: Vec = vec![1, 2]; /// -/// mem::replace(&mut v, Vec::new()); +/// let old_v = mem::replace(&mut v, vec![3, 4, 5]); +/// assert_eq!(2, old_v.len()); +/// assert_eq!(3, v.len()); /// ``` /// -/// This function allows consumption of one field of a struct by replacing it with another value. -/// The normal approach doesn't always work: +/// `replace` allows consumption of a struct field by replacing it with another value. +/// Without `replace` you can run into issues like these: /// -/// ```rust,ignore +/// ```ignore /// struct Buffer { buf: Vec } /// /// impl Buffer { @@ -412,13 +495,14 @@ pub fn swap(x: &mut T, y: &mut T) { /// } /// ``` /// -/// Note that `T` does not necessarily implement `Clone`, so it can't even clone and reset +/// Note that `T` does not necessarily implement [`Clone`], so it can't even clone and reset /// `self.buf`. But `replace` can be used to disassociate the original value of `self.buf` from /// `self`, allowing it to be returned: /// /// ``` /// # #![allow(dead_code)] /// use std::mem; +/// /// # struct Buffer { buf: Vec } /// impl Buffer { /// fn get_and_reset(&mut self) -> Vec { @@ -426,6 +510,8 @@ pub fn swap(x: &mut T, y: &mut T) { /// } /// } /// ``` +/// +/// [`Clone`]: ../../std/clone/trait.Clone.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn replace(dest: &mut T, mut src: T) -> T { @@ -435,14 +521,25 @@ pub fn replace(dest: &mut T, mut src: T) -> T { /// Disposes of a value. /// -/// While this does call the argument's implementation of `Drop`, it will not -/// release any borrows, as borrows are based on lexical scope. +/// While this does call the argument's implementation of [`Drop`][drop], +/// it will not release any borrows, as borrows are based on lexical scope. /// /// This effectively does nothing for /// [types which implement `Copy`](../../book/ownership.html#copy-types), /// e.g. integers. Such values are copied and _then_ moved into the function, /// so the value persists after this function call. /// +/// This function is not magic; it is literally defined as +/// +/// ``` +/// pub fn drop(_x: T) { } +/// ``` +/// +/// Because `_x` is moved into the function, it is automatically dropped before +/// the function returns. +/// +/// [drop]: ../ops/trait.Drop.html +/// /// # Examples /// /// Basic usage: @@ -479,8 +576,8 @@ pub fn replace(dest: &mut T, mut src: T) -> T { /// v.push(4); // no problems /// ``` /// -/// Since `RefCell` enforces the borrow rules at runtime, `drop()` can -/// seemingly release a borrow of one: +/// Since [`RefCell`] enforces the borrow rules at runtime, `drop` can +/// release a [`RefCell`] borrow: /// /// ``` /// use std::cell::RefCell; @@ -496,7 +593,7 @@ pub fn replace(dest: &mut T, mut src: T) -> T { /// println!("{}", *borrow); /// ``` /// -/// Integers and other types implementing `Copy` are unaffected by `drop()` +/// Integers and other types implementing [`Copy`] are unaffected by `drop`. /// /// ``` /// #[derive(Copy, Clone)] @@ -510,75 +607,132 @@ pub fn replace(dest: &mut T, mut src: T) -> T { /// println!("x: {}, y: {}", x, y.0); // still available /// ``` /// +/// [`RefCell`]: ../../std/cell/struct.RefCell.html +/// [`Copy`]: ../../std/marker/trait.Copy.html #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn drop(_x: T) { } -macro_rules! repeat_u8_as_u32 { - ($name:expr) => { (($name as u32) << 24 | - ($name as u32) << 16 | - ($name as u32) << 8 | - ($name as u32)) } -} -macro_rules! repeat_u8_as_u64 { - ($name:expr) => { ((repeat_u8_as_u32!($name) as u64) << 32 | - (repeat_u8_as_u32!($name) as u64)) } -} - -// NOTE: Keep synchronized with values used in librustc_trans::trans::adt. -// -// In particular, the POST_DROP_U8 marker must never equal the -// DTOR_NEEDED_U8 marker. -// -// For a while pnkfelix was using 0xc1 here. -// But having the sign bit set is a pain, so 0x1d is probably better. -// -// And of course, 0x00 brings back the old world of zero'ing on drop. -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_U8: u8 = 0x1d; -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_U32: u32 = repeat_u8_as_u32!(POST_DROP_U8); -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_U64: u64 = repeat_u8_as_u64!(POST_DROP_U8); - -#[cfg(target_pointer_width = "32")] -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_USIZE: usize = POST_DROP_U32 as usize; -#[cfg(target_pointer_width = "64")] -#[unstable(feature = "filling_drop", issue = "5016")] -#[allow(missing_docs)] -pub const POST_DROP_USIZE: usize = POST_DROP_U64 as usize; - -/// Interprets `src` as `&U`, and then reads `src` without moving the contained -/// value. +/// Interprets `src` as having type `&U`, and then reads `src` without moving +/// the contained value. /// /// This function will unsafely assume the pointer `src` is valid for -/// `sizeof(U)` bytes by transmuting `&T` to `&U` and then reading the `&U`. It -/// will also unsafely create a copy of the contained value instead of moving -/// out of `src`. +/// [`size_of::()`][size_of] bytes by transmuting `&T` to `&U` and then reading +/// the `&U`. It will also unsafely create a copy of the contained value instead of +/// moving out of `src`. /// /// It is not a compile-time error if `T` and `U` have different sizes, but it /// is highly encouraged to only invoke this function where `T` and `U` have the -/// same size. This function triggers undefined behavior if `U` is larger than +/// same size. This function triggers [undefined behavior][ub] if `U` is larger than /// `T`. /// +/// [ub]: ../../reference.html#behavior-considered-undefined +/// [size_of]: fn.size_of.html +/// /// # Examples /// /// ``` /// use std::mem; /// -/// let one = unsafe { mem::transmute_copy(&1) }; +/// #[repr(packed)] +/// struct Foo { +/// bar: u8, +/// } +/// +/// let foo_slice = [10u8]; +/// +/// unsafe { +/// // Copy the data from 'foo_slice' and treat it as a 'Foo' +/// let mut foo_struct: Foo = mem::transmute_copy(&foo_slice); +/// assert_eq!(foo_struct.bar, 10); +/// +/// // Modify the copied data +/// foo_struct.bar = 20; +/// assert_eq!(foo_struct.bar, 20); +/// } /// -/// assert_eq!(1, one); +/// // The contents of 'foo_slice' should not have changed +/// assert_eq!(foo_slice, [10]); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn transmute_copy(src: &T) -> U { - // FIXME(#23542) Replace with type ascription. - #![allow(trivial_casts)] ptr::read(src as *const T as *const U) } + +/// Opaque type representing the discriminant of an enum. +/// +/// See the `discriminant` function in this module for more information. +#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")] +pub struct Discriminant(u64, PhantomData<*const T>); + +// N.B. These trait implementations cannot be derived because we don't want any bounds on T. + +#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")] +impl Copy for Discriminant {} + +#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")] +impl clone::Clone for Discriminant { + fn clone(&self) -> Self { + *self + } +} + +#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")] +impl cmp::PartialEq for Discriminant { + fn eq(&self, rhs: &Self) -> bool { + self.0 == rhs.0 + } +} + +#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")] +impl cmp::Eq for Discriminant {} + +#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")] +impl hash::Hash for Discriminant { + fn hash(&self, state: &mut H) { + self.0.hash(state); + } +} + +#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")] +impl fmt::Debug for Discriminant { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_tuple("Discriminant") + .field(&self.0) + .finish() + } +} + +/// Returns a value uniquely identifying the enum variant in `v`. +/// +/// If `T` is not an enum, calling this function will not result in undefined behavior, but the +/// return value is unspecified. +/// +/// # Stability +/// +/// The discriminant of an enum variant may change if the enum definition changes. A discriminant +/// of some variant will not change between compilations with the same compiler. +/// +/// # Examples +/// +/// This can be used to compare enums that carry data, while disregarding +/// the actual data: +/// +/// ``` +/// #![feature(discriminant_value)] +/// use std::mem; +/// +/// enum Foo { A(&'static str), B(i32), C(i32) } +/// +/// assert!(mem::discriminant(&Foo::A("bar")) == mem::discriminant(&Foo::A("baz"))); +/// assert!(mem::discriminant(&Foo::B(1)) == mem::discriminant(&Foo::B(2))); +/// assert!(mem::discriminant(&Foo::B(3)) != mem::discriminant(&Foo::C(3))); +/// ``` +#[unstable(feature = "discriminant_value", reason = "recently added, follows RFC", issue = "24263")] +pub fn discriminant(v: &T) -> Discriminant { + unsafe { + Discriminant(intrinsics::discriminant_value(v), PhantomData) + } +} + diff --git a/src/libcore/nonzero.rs b/src/libcore/nonzero.rs index 92bbc4efb7cc1..47afaf77353ee 100644 --- a/src/libcore/nonzero.rs +++ b/src/libcore/nonzero.rs @@ -13,7 +13,6 @@ reason = "needs an RFC to flesh out the design", issue = "27730")] -use marker::Sized; use ops::{CoerceUnsized, Deref}; /// Unsafe trait to indicate what types are usable with the NonZero struct diff --git a/src/libcore/num/bignum.rs b/src/libcore/num/bignum.rs index 66c6deb361564..a1f4630c304bf 100644 --- a/src/libcore/num/bignum.rs +++ b/src/libcore/num/bignum.rs @@ -27,28 +27,29 @@ issue = "0")] #![macro_use] -use prelude::v1::*; - use mem; use intrinsics; /// Arithmetic operations required by bignums. -pub trait FullOps { +pub trait FullOps: Sized { /// Returns `(carry', v')` such that `carry' * 2^W + v' = self + other + carry`, /// where `W` is the number of bits in `Self`. - fn full_add(self, other: Self, carry: bool) -> (bool /*carry*/, Self); + fn full_add(self, other: Self, carry: bool) -> (bool /* carry */, Self); /// Returns `(carry', v')` such that `carry' * 2^W + v' = self * other + carry`, /// where `W` is the number of bits in `Self`. - fn full_mul(self, other: Self, carry: Self) -> (Self /*carry*/, Self); + fn full_mul(self, other: Self, carry: Self) -> (Self /* carry */, Self); /// Returns `(carry', v')` such that `carry' * 2^W + v' = self * other + other2 + carry`, /// where `W` is the number of bits in `Self`. - fn full_mul_add(self, other: Self, other2: Self, carry: Self) -> (Self /*carry*/, Self); + fn full_mul_add(self, other: Self, other2: Self, carry: Self) -> (Self /* carry */, Self); /// Returns `(quo, rem)` such that `borrow * 2^W + self = quo * other + rem` /// and `0 <= rem < other`, where `W` is the number of bits in `Self`. - fn full_div_rem(self, other: Self, borrow: Self) -> (Self /*quotient*/, Self /*remainder*/); + fn full_div_rem(self, + other: Self, + borrow: Self) + -> (Self /* quotient */, Self /* remainder */); } macro_rules! impl_full_ops { @@ -102,11 +103,7 @@ impl_full_ops! { /// Table of powers of 5 representable in digits. Specifically, the largest {u8, u16, u32} value /// that's a power of five, plus the corresponding exponent. Used in `mul_pow5`. -const SMALL_POW5: [(u64, usize); 3] = [ - (125, 3), - (15625, 6), - (1_220_703_125, 13), -]; +const SMALL_POW5: [(u64, usize); 3] = [(125, 3), (15625, 6), (1_220_703_125, 13)]; macro_rules! define_bignum { ($name:ident: type=$ty:ty, n=$n:expr) => ( @@ -476,9 +473,9 @@ macro_rules! define_bignum { let sz = if self.size < 1 {1} else {self.size}; let digitlen = mem::size_of::<$ty>() * 2; - try!(write!(f, "{:#x}", self.base[sz-1])); + write!(f, "{:#x}", self.base[sz-1])?; for &v in self.base[..sz-1].iter().rev() { - try!(write!(f, "_{:01$x}", v, digitlen)); + write!(f, "_{:01$x}", v, digitlen)?; } ::result::Result::Ok(()) } @@ -494,6 +491,5 @@ define_bignum!(Big32x40: type=Digit32, n=40); // this one is used for testing only. #[doc(hidden)] pub mod tests { - use prelude::v1::*; define_bignum!(Big8x3: type=u8, n=3); } diff --git a/src/libcore/num/dec2flt/algorithm.rs b/src/libcore/num/dec2flt/algorithm.rs index 82d3389edc478..604bc7c10dea0 100644 --- a/src/libcore/num/dec2flt/algorithm.rs +++ b/src/libcore/num/dec2flt/algorithm.rs @@ -10,7 +10,6 @@ //! The various algorithms from the paper. -use prelude::v1::*; use cmp::min; use cmp::Ordering::{Less, Equal, Greater}; use num::diy_float::Fp; @@ -32,19 +31,79 @@ fn power_of_ten(e: i16) -> Fp { Fp { f: sig, e: exp } } +// In most architectures, floating point operations have an explicit bit size, therefore the +// precision of the computation is determined on a per-operation basis. +#[cfg(any(not(target_arch="x86"), target_feature="sse2"))] +mod fpu_precision { + pub fn set_precision() { } +} + +// On x86, the x87 FPU is used for float operations if the SSE/SSE2 extensions are not available. +// The x87 FPU operates with 80 bits of precision by default, which means that operations will +// round to 80 bits causing double rounding to happen when values are eventually represented as +// 32/64 bit float values. To overcome this, the FPU control word can be set so that the +// computations are performed in the desired precision. +#[cfg(all(target_arch="x86", not(target_feature="sse2")))] +mod fpu_precision { + use mem::size_of; + + /// A structure used to preserve the original value of the FPU control word, so that it can be + /// restored when the structure is dropped. + /// + /// The x87 FPU is a 16-bits register whose fields are as follows: + /// + /// | 12-15 | 10-11 | 8-9 | 6-7 | 5 | 4 | 3 | 2 | 1 | 0 | + /// |------:|------:|----:|----:|---:|---:|---:|---:|---:|---:| + /// | | RC | PC | | PM | UM | OM | ZM | DM | IM | + /// + /// The documentation for all of the fields is available in the IA-32 Architectures Software + /// Developer's Manual (Volume 1). + /// + /// The only field which is relevant for the following code is PC, Precision Control. This + /// field determines the precision of the operations performed by the FPU. It can be set to: + /// - 0b00, single precision i.e. 32-bits + /// - 0b10, double precision i.e. 64-bits + /// - 0b11, double extended precision i.e. 80-bits (default state) + /// The 0b01 value is reserved and should not be used. + pub struct FPUControlWord(u16); + + fn set_cw(cw: u16) { + unsafe { asm!("fldcw $0" :: "m" (cw) :: "volatile") } + } + + /// Set the precision field of the FPU to `T` and return a `FPUControlWord` + pub fn set_precision() -> FPUControlWord { + let cw = 0u16; + + // Compute the value for the Precision Control field that is appropriate for `T`. + let cw_precision = match size_of::() { + 4 => 0x0000, // 32 bits + 8 => 0x0200, // 64 bits + _ => 0x0300, // default, 80 bits + }; + + // Get the original value of the control word to restore it later, when the + // `FPUControlWord` structure is dropped + unsafe { asm!("fnstcw $0" : "=*m" (&cw) ::: "volatile") } + + // Set the control word to the desired precision. This is achieved by masking away the old + // precision (bits 8 and 9, 0x300) and replacing it with the precision flag computed above. + set_cw((cw & 0xFCFF) | cw_precision); + + FPUControlWord(cw) + } + + impl Drop for FPUControlWord { + fn drop(&mut self) { + set_cw(self.0) + } + } +} + /// The fast path of Bellerophon using machine-sized integers and floats. /// /// This is extracted into a separate function so that it can be attempted before constructing /// a bignum. -/// -/// The fast path crucially depends on arithmetic being correctly rounded, so on x86 -/// without SSE or SSE2 it will be **wrong** (as in, off by one ULP occasionally), because the x87 -/// FPU stack will round to 80 bit first before rounding to 64/32 bit. However, as such hardware -/// is extremely rare nowadays and in fact all in-tree target triples assume an SSE2-capable -/// microarchitecture, there is little incentive to deal with that. There's a test that will fail -/// when SSE or SSE2 is disabled, so people building their own non-SSE copy will get a heads up. -/// -/// FIXME: It would nevertheless be nice if we had a good way to detect and deal with x87. pub fn fast_path(integral: &[u8], fractional: &[u8], e: i64) -> Option { let num_digits = integral.len() + fractional.len(); // log_10(f64::max_sig) ~ 15.95. We compare the exact value to max_sig near the end, @@ -60,9 +119,17 @@ pub fn fast_path(integral: &[u8], fractional: &[u8], e: i64) -> Opt if f > T::max_sig() { return None; } + + // The fast path crucially depends on arithmetic being rounded to the correct number of bits + // without any intermediate rounding. On x86 (without SSE or SSE2) this requires the precision + // of the x87 FPU stack to be changed so that it directly rounds to 64/32 bit. + // The `set_precision` function takes care of setting the precision on architectures which + // require setting it by changing the global state (like the control word of the x87 FPU). + let _cw = fpu_precision::set_precision::(); + // The case e < 0 cannot be folded into the other branch. Negative powers result in // a repeating fractional part in binary, which are rounded, which causes real - // (and occasioally quite significant!) errors in the final result. + // (and occasionally quite significant!) errors in the final result. if e >= 0 { Some(T::from_int(f) * T::short_fast_pow10(e as usize)) } else { @@ -127,7 +194,7 @@ fn algorithm_r(f: &Big, e: i16, z0: T) -> T { // This is written a bit awkwardly because our bignums don't support // negative numbers, so we use the absolute value + sign information. // The multiplication with m_digits can't overflow. If `x` or `y` are large enough that - // we need to worry about overflow, then they are also large enough that`make_ratio` has + // we need to worry about overflow, then they are also large enough that `make_ratio` has // reduced the fraction by a factor of 2^64 or more. let (d2, d_negative) = if x >= y { // Don't need x any more, save a clone(). @@ -252,7 +319,7 @@ pub fn algorithm_m(f: &Big, e: i16) -> T { return underflow(x, v, rem); } if k > T::max_exp_int() { - return T::infinity(); + return T::infinity2(); } if x < min_sig { u.mul_pow2(1); @@ -278,7 +345,7 @@ fn quick_start(u: &mut Big, v: &mut Big, k: &mut i16) { // The target ratio is one where u/v is in an in-range significand. Thus our termination // condition is log2(u / v) being the significand bits, plus/minus one. // FIXME Looking at the second bit could improve the estimate and avoid some more divisions. - let target_ratio = f64::sig_bits() as i16; + let target_ratio = T::sig_bits() as i16; let log2_u = u.bit_length() as i16; let log2_v = v.bit_length() as i16; let mut u_shift: i16 = 0; diff --git a/src/libcore/num/dec2flt/mod.rs b/src/libcore/num/dec2flt/mod.rs index 6acc621a61376..eee3e9250fe81 100644 --- a/src/libcore/num/dec2flt/mod.rs +++ b/src/libcore/num/dec2flt/mod.rs @@ -92,7 +92,6 @@ reason = "internal routines only exposed for testing", issue = "0")] -use prelude::v1::*; use fmt; use str::FromStr; @@ -149,13 +148,20 @@ from_str_float_impl!(f32); from_str_float_impl!(f64); /// An error which can be returned when parsing a float. -#[derive(Debug, Clone, PartialEq)] +/// +/// This error is used as the error type for the [`FromStr`] implementation +/// for [`f32`] and [`f64`]. +/// +/// [`FromStr`]: ../str/trait.FromStr.html +/// [`f32`]: ../../std/primitive.f32.html +/// [`f64`]: ../../std/primitive.f64.html +#[derive(Debug, Clone, PartialEq, Eq)] #[stable(feature = "rust1", since = "1.0.0")] pub struct ParseFloatError { kind: FloatErrorKind } -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] enum FloatErrorKind { Empty, Invalid, @@ -207,12 +213,12 @@ fn dec2flt(s: &str) -> Result { } let (sign, s) = extract_sign(s); let flt = match parse_decimal(s) { - ParseResult::Valid(decimal) => try!(convert(decimal)), - ParseResult::ShortcutToInf => T::infinity(), - ParseResult::ShortcutToZero => T::zero(), + ParseResult::Valid(decimal) => convert(decimal)?, + ParseResult::ShortcutToInf => T::infinity2(), + ParseResult::ShortcutToZero => T::zero2(), ParseResult::Invalid => match s { - "inf" => T::infinity(), - "NaN" => T::nan(), + "inf" => T::infinity2(), + "NaN" => T::nan2(), _ => { return Err(pfe_invalid()); } } }; @@ -230,18 +236,15 @@ fn convert(mut decimal: Decimal) -> Result { if let Some(x) = trivial_cases(&decimal) { return Ok(x); } - // AlgorithmM and AlgorithmR both compute approximately `f * 10^e`. - let max_digits = decimal.integral.len() + decimal.fractional.len() + - decimal.exp.abs() as usize; // Remove/shift out the decimal point. let e = decimal.exp - decimal.fractional.len() as i64; if let Some(x) = algorithm::fast_path(decimal.integral, decimal.fractional, e) { return Ok(x); } // Big32x40 is limited to 1280 bits, which translates to about 385 decimal digits. - // If we exceed this, perhaps while calculating `f * 10^e` in Algorithm R or Algorithm M, - // we'll crash. So we error out before getting too close, with a generous safety margin. - if max_digits > 375 { + // If we exceed this, we'll crash, so we error out before getting too close (within 10^10). + let upper_bound = bound_intermediate_digits(&decimal, e); + if upper_bound > 375 { return Err(pfe_invalid()); } let f = digits_to_big(decimal.integral, decimal.fractional); @@ -251,7 +254,7 @@ fn convert(mut decimal: Decimal) -> Result { // FIXME These bounds are rather conservative. A more careful analysis of the failure modes // of Bellerophon could allow using it in more cases for a massive speed up. let exponent_in_range = table::MIN_E <= e && e <= table::MAX_E; - let value_in_range = max_digits <= T::max_normal_digits(); + let value_in_range = upper_bound <= T::max_normal_digits() as u64; if exponent_in_range && value_in_range { Ok(algorithm::bellerophon(&f, e)) } else { @@ -288,18 +291,41 @@ fn simplify(decimal: &mut Decimal) { } } +/// Quick and dirty upper bound on the size (log10) of the largest value that Algorithm R and +/// Algorithm M will compute while working on the given decimal. +fn bound_intermediate_digits(decimal: &Decimal, e: i64) -> u64 { + // We don't need to worry too much about overflow here thanks to trivial_cases() and the + // parser, which filter out the most extreme inputs for us. + let f_len: u64 = decimal.integral.len() as u64 + decimal.fractional.len() as u64; + if e >= 0 { + // In the case e >= 0, both algorithms compute about `f * 10^e`. Algorithm R proceeds to + // do some complicated calculations with this but we can ignore that for the upper bound + // because it also reduces the fraction beforehand, so we have plenty of buffer there. + f_len + (e as u64) + } else { + // If e < 0, Algorithm R does roughly the same thing, but Algorithm M differs: + // It tries to find a positive number k such that `f << k / 10^e` is an in-range + // significand. This will result in about `2^53 * f * 10^e` < `10^17 * f * 10^e`. + // One input that triggers this is 0.33...33 (375 x 3). + f_len + (e.abs() as u64) + 17 + } +} + /// Detect obvious overflows and underflows without even looking at the decimal digits. fn trivial_cases(decimal: &Decimal) -> Option { // There were zeros but they were stripped by simplify() if decimal.integral.is_empty() && decimal.fractional.is_empty() { - return Some(T::zero()); + return Some(T::zero2()); } - // This is a crude approximation of ceil(log10(the real value)). + // This is a crude approximation of ceil(log10(the real value)). We don't need to worry too + // much about overflow here because the input length is tiny (at least compared to 2^64) and + // the parser already handles exponents whose absolute value is greater than 10^18 + // (which is still 10^19 short of 2^64). let max_place = decimal.exp + decimal.integral.len() as i64; if max_place > T::inf_cutoff() { - return Some(T::infinity()); + return Some(T::infinity2()); } else if max_place < T::zero_cutoff() { - return Some(T::zero()); + return Some(T::zero2()); } None } diff --git a/src/libcore/num/dec2flt/num.rs b/src/libcore/num/dec2flt/num.rs index 81e7856633b25..34b41fa9decd2 100644 --- a/src/libcore/num/dec2flt/num.rs +++ b/src/libcore/num/dec2flt/num.rs @@ -12,7 +12,6 @@ // FIXME This module's name is a bit unfortunate, since other modules also import `core::num`. -use prelude::v1::*; use cmp::Ordering::{self, Less, Equal, Greater}; pub use num::bignum::Big32x40 as Big; diff --git a/src/libcore/num/dec2flt/parse.rs b/src/libcore/num/dec2flt/parse.rs index fce1c250a022e..d20986faa0fc2 100644 --- a/src/libcore/num/dec2flt/parse.rs +++ b/src/libcore/num/dec2flt/parse.rs @@ -20,7 +20,6 @@ //! modules rely on to not panic (or overflow) in turn. //! To make matters worse, all that happens in a single pass over the input. //! So, be careful when modifying anything, and double-check with the other modules. -use prelude::v1::*; use super::num; use self::ParseResult::{Valid, ShortcutToInf, ShortcutToZero, Invalid}; diff --git a/src/libcore/num/dec2flt/rawfp.rs b/src/libcore/num/dec2flt/rawfp.rs index 2099c6a7baa76..e3b58b6cc7ce9 100644 --- a/src/libcore/num/dec2flt/rawfp.rs +++ b/src/libcore/num/dec2flt/rawfp.rs @@ -27,7 +27,6 @@ //! Many functions in this module only handle normal numbers. The dec2flt routines conservatively //! take the universally-correct slow path (Algorithm M) for very small and very large numbers. //! That algorithm needs only next_float() which does handle subnormals and zeros. -use prelude::v1::*; use u32; use cmp::Ordering::{Less, Equal, Greater}; use ops::{Mul, Div, Neg}; @@ -61,6 +60,27 @@ impl Unpacked { pub trait RawFloat : Float + Copy + Debug + LowerExp + Mul + Div + Neg { + // suffix of "2" because Float::infinity is deprecated + #[allow(deprecated)] + fn infinity2() -> Self { + Float::infinity() + } + + // suffix of "2" because Float::nan is deprecated + #[allow(deprecated)] + fn nan2() -> Self { + Float::nan() + } + + // suffix of "2" because Float::zero is deprecated + fn zero2() -> Self; + + // suffix of "2" because Float::integer_decode is deprecated + #[allow(deprecated)] + fn integer_decode2(self) -> (u64, i16, i8) { + Float::integer_decode(self) + } + /// Get the raw binary representation of the float. fn transmute(self) -> u64; @@ -146,6 +166,10 @@ pub trait RawFloat : Float + Copy + Debug + LowerExp } impl RawFloat for f32 { + fn zero2() -> Self { + 0.0 + } + fn sig_bits() -> u8 { 24 } @@ -169,7 +193,7 @@ impl RawFloat for f32 { } fn unpack(self) -> Unpacked { - let (sig, exp, _sig) = self.integer_decode(); + let (sig, exp, _sig) = self.integer_decode2(); Unpacked::new(sig, exp) } @@ -198,6 +222,10 @@ impl RawFloat for f32 { impl RawFloat for f64 { + fn zero2() -> Self { + 0.0 + } + fn sig_bits() -> u8 { 53 } @@ -220,7 +248,7 @@ impl RawFloat for f64 { } fn unpack(self) -> Unpacked { - let (sig, exp, _sig) = self.integer_decode(); + let (sig, exp, _sig) = self.integer_decode2(); Unpacked::new(sig, exp) } @@ -351,7 +379,7 @@ pub fn prev_float(x: T) -> T { pub fn next_float(x: T) -> T { match x.classify() { Nan => panic!("next_float: argument is NaN"), - Infinite => T::infinity(), + Infinite => T::infinity2(), // This seems too good to be true, but it works. // 0.0 is encoded as the all-zero word. Subnormals are 0x000m...m where m is the mantissa. // In particular, the smallest subnormal is 0x0...01 and the largest is 0x000F...F. diff --git a/src/libcore/num/diy_float.rs b/src/libcore/num/diy_float.rs index 7c369ee3b3bd7..11eea753f93f9 100644 --- a/src/libcore/num/diy_float.rs +++ b/src/libcore/num/diy_float.rs @@ -49,12 +49,30 @@ impl Fp { pub fn normalize(&self) -> Fp { let mut f = self.f; let mut e = self.e; - if f >> (64 - 32) == 0 { f <<= 32; e -= 32; } - if f >> (64 - 16) == 0 { f <<= 16; e -= 16; } - if f >> (64 - 8) == 0 { f <<= 8; e -= 8; } - if f >> (64 - 4) == 0 { f <<= 4; e -= 4; } - if f >> (64 - 2) == 0 { f <<= 2; e -= 2; } - if f >> (64 - 1) == 0 { f <<= 1; e -= 1; } + if f >> (64 - 32) == 0 { + f <<= 32; + e -= 32; + } + if f >> (64 - 16) == 0 { + f <<= 16; + e -= 16; + } + if f >> (64 - 8) == 0 { + f <<= 8; + e -= 8; + } + if f >> (64 - 4) == 0 { + f <<= 4; + e -= 4; + } + if f >> (64 - 2) == 0 { + f <<= 2; + e -= 2; + } + if f >> (64 - 1) == 0 { + f <<= 1; + e -= 1; + } debug_assert!(f >= (1 >> 63)); Fp { f: f, e: e } } @@ -66,6 +84,9 @@ impl Fp { assert!(edelta >= 0); let edelta = edelta as usize; assert_eq!(self.f << edelta >> edelta, self.f); - Fp { f: self.f << edelta, e: e } + Fp { + f: self.f << edelta, + e: e, + } } } diff --git a/src/libcore/num/f32.rs b/src/libcore/num/f32.rs index 8af1022acdf24..4527d46a27d8a 100644 --- a/src/libcore/num/f32.rs +++ b/src/libcore/num/f32.rs @@ -20,155 +20,169 @@ use mem; use num::Float; use num::FpCategory as Fp; +/// The radix or base of the internal representation of `f32`. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] pub const RADIX: u32 = 2; +/// Number of significant digits in base 2. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] pub const MANTISSA_DIGITS: u32 = 24; +/// Approximate number of significant digits in base 10. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] pub const DIGITS: u32 = 6; +/// Difference between `1.0` and the next largest representable number. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] pub const EPSILON: f32 = 1.19209290e-07_f32; -/// Smallest finite f32 value +/// Smallest finite `f32` value. #[stable(feature = "rust1", since = "1.0.0")] pub const MIN: f32 = -3.40282347e+38_f32; -/// Smallest positive, normalized f32 value +/// Smallest positive normal `f32` value. #[stable(feature = "rust1", since = "1.0.0")] pub const MIN_POSITIVE: f32 = 1.17549435e-38_f32; -/// Largest finite f32 value +/// Largest finite `f32` value. #[stable(feature = "rust1", since = "1.0.0")] pub const MAX: f32 = 3.40282347e+38_f32; +/// One greater than the minimum possible normal power of 2 exponent. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] pub const MIN_EXP: i32 = -125; +/// Maximum possible power of 2 exponent. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] pub const MAX_EXP: i32 = 128; +/// Minimum possible normal power of 10 exponent. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] pub const MIN_10_EXP: i32 = -37; +/// Maximum possible power of 10 exponent. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] pub const MAX_10_EXP: i32 = 38; +/// Not a Number (NaN). #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] -pub const NAN: f32 = 0.0_f32/0.0_f32; +pub const NAN: f32 = 0.0_f32 / 0.0_f32; +/// Infinity (∞). #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] -pub const INFINITY: f32 = 1.0_f32/0.0_f32; +pub const INFINITY: f32 = 1.0_f32 / 0.0_f32; +/// Negative infinity (-∞). #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] -pub const NEG_INFINITY: f32 = -1.0_f32/0.0_f32; +pub const NEG_INFINITY: f32 = -1.0_f32 / 0.0_f32; /// Basic mathematical constants. #[stable(feature = "rust1", since = "1.0.0")] pub mod consts { // FIXME: replace with mathematical constants from cmath. - /// Archimedes' constant + /// Archimedes' constant (π) #[stable(feature = "rust1", since = "1.0.0")] pub const PI: f32 = 3.14159265358979323846264338327950288_f32; - /// pi/2.0 + /// π/2 #[stable(feature = "rust1", since = "1.0.0")] pub const FRAC_PI_2: f32 = 1.57079632679489661923132169163975144_f32; - /// pi/3.0 + /// π/3 #[stable(feature = "rust1", since = "1.0.0")] pub const FRAC_PI_3: f32 = 1.04719755119659774615421446109316763_f32; - /// pi/4.0 + /// π/4 #[stable(feature = "rust1", since = "1.0.0")] pub const FRAC_PI_4: f32 = 0.785398163397448309615660845819875721_f32; - /// pi/6.0 + /// π/6 #[stable(feature = "rust1", since = "1.0.0")] pub const FRAC_PI_6: f32 = 0.52359877559829887307710723054658381_f32; - /// pi/8.0 + /// π/8 #[stable(feature = "rust1", since = "1.0.0")] pub const FRAC_PI_8: f32 = 0.39269908169872415480783042290993786_f32; - /// 1.0/pi + /// 1/π #[stable(feature = "rust1", since = "1.0.0")] pub const FRAC_1_PI: f32 = 0.318309886183790671537767526745028724_f32; - /// 2.0/pi + /// 2/π #[stable(feature = "rust1", since = "1.0.0")] pub const FRAC_2_PI: f32 = 0.636619772367581343075535053490057448_f32; - /// 2.0/sqrt(pi) + /// 2/sqrt(π) #[stable(feature = "rust1", since = "1.0.0")] pub const FRAC_2_SQRT_PI: f32 = 1.12837916709551257389615890312154517_f32; - /// sqrt(2.0) + /// sqrt(2) #[stable(feature = "rust1", since = "1.0.0")] pub const SQRT_2: f32 = 1.41421356237309504880168872420969808_f32; - /// 1.0/sqrt(2.0) + /// 1/sqrt(2) #[stable(feature = "rust1", since = "1.0.0")] pub const FRAC_1_SQRT_2: f32 = 0.707106781186547524400844362104849039_f32; - /// Euler's number + /// Euler's number (e) #[stable(feature = "rust1", since = "1.0.0")] pub const E: f32 = 2.71828182845904523536028747135266250_f32; - /// log2(e) + /// log2(e) #[stable(feature = "rust1", since = "1.0.0")] pub const LOG2_E: f32 = 1.44269504088896340735992468100189214_f32; - /// log10(e) + /// log10(e) #[stable(feature = "rust1", since = "1.0.0")] pub const LOG10_E: f32 = 0.434294481903251827651128918916605082_f32; - /// ln(2.0) + /// ln(2) #[stable(feature = "rust1", since = "1.0.0")] pub const LN_2: f32 = 0.693147180559945309417232121458176568_f32; - /// ln(10.0) + /// ln(10) #[stable(feature = "rust1", since = "1.0.0")] pub const LN_10: f32 = 2.30258509299404568401799145468436421_f32; } #[unstable(feature = "core_float", reason = "stable interface is via `impl f{32,64}` in later crates", - issue = "27702")] + issue = "32110")] impl Float for f32 { #[inline] - fn nan() -> f32 { NAN } + fn nan() -> f32 { + NAN + } #[inline] - fn infinity() -> f32 { INFINITY } + fn infinity() -> f32 { + INFINITY + } #[inline] - fn neg_infinity() -> f32 { NEG_INFINITY } + fn neg_infinity() -> f32 { + NEG_INFINITY + } #[inline] - fn zero() -> f32 { 0.0 } + fn zero() -> f32 { + 0.0 + } #[inline] - fn neg_zero() -> f32 { -0.0 } + fn neg_zero() -> f32 { + -0.0 + } #[inline] - fn one() -> f32 { 1.0 } + fn one() -> f32 { + 1.0 + } /// Returns `true` if the number is NaN. #[inline] - fn is_nan(self) -> bool { self != self } + fn is_nan(self) -> bool { + self != self + } /// Returns `true` if the number is infinite. #[inline] fn is_infinite(self) -> bool { - self == Float::infinity() || self == Float::neg_infinity() + self == INFINITY || self == NEG_INFINITY } /// Returns `true` if the number is neither infinite or NaN. @@ -192,11 +206,11 @@ impl Float for f32 { let bits: u32 = unsafe { mem::transmute(self) }; match (bits & MAN_MASK, bits & EXP_MASK) { - (0, 0) => Fp::Zero, - (_, 0) => Fp::Subnormal, + (0, 0) => Fp::Zero, + (_, 0) => Fp::Subnormal, (0, EXP_MASK) => Fp::Infinite, (_, EXP_MASK) => Fp::Nan, - _ => Fp::Normal, + _ => Fp::Normal, } } @@ -230,7 +244,7 @@ impl Float for f32 { #[inline] fn signum(self) -> f32 { if self.is_nan() { - Float::nan() + NAN } else { unsafe { intrinsics::copysignf32(1.0, self) } } @@ -240,19 +254,21 @@ impl Float for f32 { /// `Float::infinity()`. #[inline] fn is_sign_positive(self) -> bool { - self > 0.0 || (1.0 / self) == Float::infinity() + self > 0.0 || (1.0 / self) == INFINITY } /// Returns `true` if `self` is negative, including `-0.0` and /// `Float::neg_infinity()`. #[inline] fn is_sign_negative(self) -> bool { - self < 0.0 || (1.0 / self) == Float::neg_infinity() + self < 0.0 || (1.0 / self) == NEG_INFINITY } /// Returns the reciprocal (multiplicative inverse) of the number. #[inline] - fn recip(self) -> f32 { 1.0 / self } + fn recip(self) -> f32 { + 1.0 / self + } #[inline] fn powi(self, n: i32) -> f32 { @@ -261,7 +277,9 @@ impl Float for f32 { /// Converts to degrees, assuming the number is in radians. #[inline] - fn to_degrees(self) -> f32 { self * (180.0f32 / consts::PI) } + fn to_degrees(self) -> f32 { + self * (180.0f32 / consts::PI) + } /// Converts to radians, assuming the number is in degrees. #[inline] diff --git a/src/libcore/num/f64.rs b/src/libcore/num/f64.rs index 9486e4337bf58..991a856834948 100644 --- a/src/libcore/num/f64.rs +++ b/src/libcore/num/f64.rs @@ -20,155 +20,169 @@ use mem; use num::FpCategory as Fp; use num::Float; +/// The radix or base of the internal representation of `f64`. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] pub const RADIX: u32 = 2; +/// Number of significant digits in base 2. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] pub const MANTISSA_DIGITS: u32 = 53; +/// Approximate number of significant digits in base 10. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] pub const DIGITS: u32 = 15; +/// Difference between `1.0` and the next largest representable number. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] pub const EPSILON: f64 = 2.2204460492503131e-16_f64; -/// Smallest finite f64 value +/// Smallest finite `f64` value. #[stable(feature = "rust1", since = "1.0.0")] pub const MIN: f64 = -1.7976931348623157e+308_f64; -/// Smallest positive, normalized f64 value +/// Smallest positive normal `f64` value. #[stable(feature = "rust1", since = "1.0.0")] pub const MIN_POSITIVE: f64 = 2.2250738585072014e-308_f64; -/// Largest finite f64 value +/// Largest finite `f64` value. #[stable(feature = "rust1", since = "1.0.0")] pub const MAX: f64 = 1.7976931348623157e+308_f64; +/// One greater than the minimum possible normal power of 2 exponent. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] pub const MIN_EXP: i32 = -1021; +/// Maximum possible power of 2 exponent. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] pub const MAX_EXP: i32 = 1024; +/// Minimum possible normal power of 10 exponent. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] pub const MIN_10_EXP: i32 = -307; +/// Maximum possible power of 10 exponent. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] pub const MAX_10_EXP: i32 = 308; +/// Not a Number (NaN). #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] -pub const NAN: f64 = 0.0_f64/0.0_f64; +pub const NAN: f64 = 0.0_f64 / 0.0_f64; +/// Infinity (∞). #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] -pub const INFINITY: f64 = 1.0_f64/0.0_f64; +pub const INFINITY: f64 = 1.0_f64 / 0.0_f64; +/// Negative infinity (-∞). #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] -pub const NEG_INFINITY: f64 = -1.0_f64/0.0_f64; +pub const NEG_INFINITY: f64 = -1.0_f64 / 0.0_f64; /// Basic mathematical constants. #[stable(feature = "rust1", since = "1.0.0")] pub mod consts { // FIXME: replace with mathematical constants from cmath. - /// Archimedes' constant + /// Archimedes' constant (π) #[stable(feature = "rust1", since = "1.0.0")] pub const PI: f64 = 3.14159265358979323846264338327950288_f64; - /// pi/2.0 + /// π/2 #[stable(feature = "rust1", since = "1.0.0")] pub const FRAC_PI_2: f64 = 1.57079632679489661923132169163975144_f64; - /// pi/3.0 + /// π/3 #[stable(feature = "rust1", since = "1.0.0")] pub const FRAC_PI_3: f64 = 1.04719755119659774615421446109316763_f64; - /// pi/4.0 + /// π/4 #[stable(feature = "rust1", since = "1.0.0")] pub const FRAC_PI_4: f64 = 0.785398163397448309615660845819875721_f64; - /// pi/6.0 + /// π/6 #[stable(feature = "rust1", since = "1.0.0")] pub const FRAC_PI_6: f64 = 0.52359877559829887307710723054658381_f64; - /// pi/8.0 + /// π/8 #[stable(feature = "rust1", since = "1.0.0")] pub const FRAC_PI_8: f64 = 0.39269908169872415480783042290993786_f64; - /// 1.0/pi + /// 1/π #[stable(feature = "rust1", since = "1.0.0")] pub const FRAC_1_PI: f64 = 0.318309886183790671537767526745028724_f64; - /// 2.0/pi + /// 2/π #[stable(feature = "rust1", since = "1.0.0")] pub const FRAC_2_PI: f64 = 0.636619772367581343075535053490057448_f64; - /// 2.0/sqrt(pi) + /// 2/sqrt(π) #[stable(feature = "rust1", since = "1.0.0")] pub const FRAC_2_SQRT_PI: f64 = 1.12837916709551257389615890312154517_f64; - /// sqrt(2.0) + /// sqrt(2) #[stable(feature = "rust1", since = "1.0.0")] pub const SQRT_2: f64 = 1.41421356237309504880168872420969808_f64; - /// 1.0/sqrt(2.0) + /// 1/sqrt(2) #[stable(feature = "rust1", since = "1.0.0")] pub const FRAC_1_SQRT_2: f64 = 0.707106781186547524400844362104849039_f64; - /// Euler's number + /// Euler's number (e) #[stable(feature = "rust1", since = "1.0.0")] pub const E: f64 = 2.71828182845904523536028747135266250_f64; - /// log2(e) + /// log2(e) #[stable(feature = "rust1", since = "1.0.0")] pub const LOG2_E: f64 = 1.44269504088896340735992468100189214_f64; - /// log10(e) + /// log10(e) #[stable(feature = "rust1", since = "1.0.0")] pub const LOG10_E: f64 = 0.434294481903251827651128918916605082_f64; - /// ln(2.0) + /// ln(2) #[stable(feature = "rust1", since = "1.0.0")] pub const LN_2: f64 = 0.693147180559945309417232121458176568_f64; - /// ln(10.0) + /// ln(10) #[stable(feature = "rust1", since = "1.0.0")] pub const LN_10: f64 = 2.30258509299404568401799145468436421_f64; } #[unstable(feature = "core_float", reason = "stable interface is via `impl f{32,64}` in later crates", - issue = "27702")] + issue = "32110")] impl Float for f64 { #[inline] - fn nan() -> f64 { NAN } + fn nan() -> f64 { + NAN + } #[inline] - fn infinity() -> f64 { INFINITY } + fn infinity() -> f64 { + INFINITY + } #[inline] - fn neg_infinity() -> f64 { NEG_INFINITY } + fn neg_infinity() -> f64 { + NEG_INFINITY + } #[inline] - fn zero() -> f64 { 0.0 } + fn zero() -> f64 { + 0.0 + } #[inline] - fn neg_zero() -> f64 { -0.0 } + fn neg_zero() -> f64 { + -0.0 + } #[inline] - fn one() -> f64 { 1.0 } + fn one() -> f64 { + 1.0 + } /// Returns `true` if the number is NaN. #[inline] - fn is_nan(self) -> bool { self != self } + fn is_nan(self) -> bool { + self != self + } /// Returns `true` if the number is infinite. #[inline] fn is_infinite(self) -> bool { - self == Float::infinity() || self == Float::neg_infinity() + self == INFINITY || self == NEG_INFINITY } /// Returns `true` if the number is neither infinite or NaN. @@ -192,11 +206,11 @@ impl Float for f64 { let bits: u64 = unsafe { mem::transmute(self) }; match (bits & MAN_MASK, bits & EXP_MASK) { - (0, 0) => Fp::Zero, - (_, 0) => Fp::Subnormal, + (0, 0) => Fp::Zero, + (_, 0) => Fp::Subnormal, (0, EXP_MASK) => Fp::Infinite, (_, EXP_MASK) => Fp::Nan, - _ => Fp::Normal, + _ => Fp::Normal, } } @@ -230,7 +244,7 @@ impl Float for f64 { #[inline] fn signum(self) -> f64 { if self.is_nan() { - Float::nan() + NAN } else { unsafe { intrinsics::copysignf64(1.0, self) } } @@ -240,19 +254,21 @@ impl Float for f64 { /// `Float::infinity()`. #[inline] fn is_sign_positive(self) -> bool { - self > 0.0 || (1.0 / self) == Float::infinity() + self > 0.0 || (1.0 / self) == INFINITY } /// Returns `true` if `self` is negative, including `-0.0` and /// `Float::neg_infinity()`. #[inline] fn is_sign_negative(self) -> bool { - self < 0.0 || (1.0 / self) == Float::neg_infinity() + self < 0.0 || (1.0 / self) == NEG_INFINITY } /// Returns the reciprocal (multiplicative inverse) of the number. #[inline] - fn recip(self) -> f64 { 1.0 / self } + fn recip(self) -> f64 { + 1.0 / self + } #[inline] fn powi(self, n: i32) -> f64 { @@ -261,7 +277,9 @@ impl Float for f64 { /// Converts to degrees, assuming the number is in radians. #[inline] - fn to_degrees(self) -> f64 { self * (180.0f64 / consts::PI) } + fn to_degrees(self) -> f64 { + self * (180.0f64 / consts::PI) + } /// Converts to radians, assuming the number is in degrees. #[inline] diff --git a/src/libcore/num/flt2dec/decoder.rs b/src/libcore/num/flt2dec/decoder.rs index 6265691bde9e9..72529d3da01d1 100644 --- a/src/libcore/num/flt2dec/decoder.rs +++ b/src/libcore/num/flt2dec/decoder.rs @@ -10,10 +10,9 @@ //! Decodes a floating-point value into individual parts and error ranges. -use prelude::v1::*; - use {f32, f64}; -use num::{Float, FpCategory}; +use num::FpCategory; +use num::dec2flt::rawfp::RawFloat; /// Decoded unsigned finite value, such that: /// @@ -22,7 +21,7 @@ use num::{Float, FpCategory}; /// - Any number from `(mant - minus) * 2^exp` to `(mant + plus) * 2^exp` will /// round to the original value. The range is inclusive only when /// `inclusive` is true. -#[derive(Copy, Clone, Debug, PartialEq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct Decoded { /// The scaled mantissa. pub mant: u64, @@ -39,7 +38,7 @@ pub struct Decoded { } /// Decoded unsigned value. -#[derive(Copy, Clone, Debug, PartialEq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum FullDecoded { /// Not-a-number. Nan, @@ -52,7 +51,7 @@ pub enum FullDecoded { } /// A floating point type which can be `decode`d. -pub trait DecodableFloat: Float + Copy { +pub trait DecodableFloat: RawFloat + Copy { /// The minimum positive normalized value. fn min_pos_norm_value() -> Self; } @@ -68,7 +67,7 @@ impl DecodableFloat for f64 { /// Returns a sign (true when negative) and `FullDecoded` value /// from given floating point number. pub fn decode(v: T) -> (/*negative?*/ bool, FullDecoded) { - let (mant, exp, sign) = v.integer_decode(); + let (mant, exp, sign) = v.integer_decode2(); let even = (mant & 1) == 0; let decoded = match v.classify() { FpCategory::Nan => FullDecoded::Nan, @@ -82,7 +81,7 @@ pub fn decode(v: T) -> (/*negative?*/ bool, FullDecoded) { exp: exp, inclusive: even }) } FpCategory::Normal => { - let minnorm = ::min_pos_norm_value().integer_decode(); + let minnorm = ::min_pos_norm_value().integer_decode2(); if mant == minnorm.0 { // neighbors: (maxmant, exp - 1) -- (minnormmant, exp) -- (minnormmant + 1, exp) // where maxmant = minnormmant * 2 - 1 diff --git a/src/libcore/num/flt2dec/mod.rs b/src/libcore/num/flt2dec/mod.rs index 9f7672a52a183..f6c03a59f81e4 100644 --- a/src/libcore/num/flt2dec/mod.rs +++ b/src/libcore/num/flt2dec/mod.rs @@ -130,7 +130,6 @@ functions. reason = "internal routines only exposed for testing", issue = "0")] -use prelude::v1::*; use i16; pub use self::decoder::{decode, DecodableFloat, FullDecoded, Decoded}; @@ -210,7 +209,7 @@ impl<'a> Part<'a> { } } Part::Copy(buf) => { - out[..buf.len()].clone_from_slice(buf); + out[..buf.len()].copy_from_slice(buf); } } Some(len) @@ -222,6 +221,7 @@ impl<'a> Part<'a> { /// Formatted result containing one or more parts. /// This can be written to the byte buffer or converted to the allocated string. +#[allow(missing_debug_implementations)] #[derive(Clone)] pub struct Formatted<'a> { /// A byte slice representing a sign, either `""`, `"-"` or `"+"`. @@ -245,7 +245,7 @@ impl<'a> Formatted<'a> { /// (It may still leave partially written bytes in the buffer; do not rely on that.) pub fn write(&self, out: &mut [u8]) -> Option { if out.len() < self.sign.len() { return None; } - out[..self.sign.len()].clone_from_slice(self.sign); + out[..self.sign.len()].copy_from_slice(self.sign); let mut written = self.sign.len(); for part in self.parts { diff --git a/src/libcore/num/flt2dec/strategy/dragon.rs b/src/libcore/num/flt2dec/strategy/dragon.rs index 2d68c3a6d026e..6aa4f297e75ba 100644 --- a/src/libcore/num/flt2dec/strategy/dragon.rs +++ b/src/libcore/num/flt2dec/strategy/dragon.rs @@ -15,8 +15,6 @@ Almost direct (but slightly optimized) Rust translation of Figure 3 of [1]. quickly and accurately. SIGPLAN Not. 31, 5 (May. 1996), 108-116. */ -use prelude::v1::*; - use cmp::Ordering; use num::flt2dec::{Decoded, MAX_SIG_DIGITS, round_up}; diff --git a/src/libcore/num/flt2dec/strategy/grisu.rs b/src/libcore/num/flt2dec/strategy/grisu.rs index 13e01d9a7f7ab..cf70a1978f5e6 100644 --- a/src/libcore/num/flt2dec/strategy/grisu.rs +++ b/src/libcore/num/flt2dec/strategy/grisu.rs @@ -16,8 +16,6 @@ Rust adaptation of Grisu3 algorithm described in [1]. It uses about accurately with integers. SIGPLAN Not. 45, 6 (June 2010), 233-243. */ -use prelude::v1::*; - use num::diy_float::Fp; use num::flt2dec::{Decoded, MAX_SIG_DIGITS, round_up}; diff --git a/src/libcore/num/i16.rs b/src/libcore/num/i16.rs index 4054497941797..1dd820980f496 100644 --- a/src/libcore/num/i16.rs +++ b/src/libcore/num/i16.rs @@ -10,7 +10,7 @@ //! The 16-bit signed integer type. //! -//! *[See also the `i16` primitive type](../primitive.i16.html).* +//! *[See also the `i16` primitive type](../../std/primitive.i16.html).* #![stable(feature = "rust1", since = "1.0.0")] diff --git a/src/libcore/num/i32.rs b/src/libcore/num/i32.rs index 5d2ade8d8e0e9..8a2168933dc66 100644 --- a/src/libcore/num/i32.rs +++ b/src/libcore/num/i32.rs @@ -10,7 +10,7 @@ //! The 32-bit signed integer type. //! -//! *[See also the `i32` primitive type](../primitive.i32.html).* +//! *[See also the `i32` primitive type](../../std/primitive.i32.html).* #![stable(feature = "rust1", since = "1.0.0")] diff --git a/src/libcore/num/i64.rs b/src/libcore/num/i64.rs index b1d43a3b83872..2ce9eb11936bc 100644 --- a/src/libcore/num/i64.rs +++ b/src/libcore/num/i64.rs @@ -10,7 +10,7 @@ //! The 64-bit signed integer type. //! -//! *[See also the `i64` primitive type](../primitive.i64.html).* +//! *[See also the `i64` primitive type](../../std/primitive.i64.html).* #![stable(feature = "rust1", since = "1.0.0")] diff --git a/src/libcore/num/i8.rs b/src/libcore/num/i8.rs index ee003d92b2823..8b5a7f1910e20 100644 --- a/src/libcore/num/i8.rs +++ b/src/libcore/num/i8.rs @@ -10,7 +10,7 @@ //! The 8-bit signed integer type. //! -//! *[See also the `i8` primitive type](../primitive.i8.html).* +//! *[See also the `i8` primitive type](../../std/primitive.i8.html).* #![stable(feature = "rust1", since = "1.0.0")] diff --git a/src/libcore/num/int_macros.rs b/src/libcore/num/int_macros.rs index 77f662723c86d..e74c30d5e5af8 100644 --- a/src/libcore/num/int_macros.rs +++ b/src/libcore/num/int_macros.rs @@ -10,37 +10,13 @@ #![doc(hidden)] -macro_rules! int_module { ($T:ty, $bits:expr) => ( +macro_rules! int_module { ($T:ident, $bits:expr) => ( -// FIXME(#11621): Should be deprecated once CTFE is implemented in favour of -// calling the `mem::size_of` function. -#[unstable(feature = "num_bits_bytes", - reason = "may want to be an associated function", - issue = "27753")] -#[rustc_deprecated(since = "1.7.0", - reason = "will be replaced via const fn or associated constants")] -#[allow(missing_docs)] -pub const BITS : usize = $bits; -// FIXME(#11621): Should be deprecated once CTFE is implemented in favour of -// calling the `mem::size_of` function. -#[unstable(feature = "num_bits_bytes", - reason = "may want to be an associated function", - issue = "27753")] -#[rustc_deprecated(since = "1.7.0", - reason = "will be replaced via const fn or associated constants")] -#[allow(missing_docs)] -pub const BYTES : usize = ($bits / 8); - -// FIXME(#11621): Should be deprecated once CTFE is implemented in favour of -// calling the `Bounded::min_value` function. +/// The smallest value that can be represented by this integer type. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] -pub const MIN: $T = (-1 as $T) << ($bits - 1); -// FIXME(#9837): Compute MIN like this so the high bits that shouldn't exist are 0. -// FIXME(#11621): Should be deprecated once CTFE is implemented in favour of -// calling the `Bounded::max_value` function. +pub const MIN: $T = $T::min_value(); +/// The largest value that can be represented by this integer type. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] -pub const MAX: $T = !MIN; +pub const MAX: $T = $T::max_value(); ) } diff --git a/src/libcore/num/isize.rs b/src/libcore/num/isize.rs index 034a5c0eb89d4..86bcef4011d02 100644 --- a/src/libcore/num/isize.rs +++ b/src/libcore/num/isize.rs @@ -10,10 +10,12 @@ //! The pointer-sized signed integer type. //! -//! *[See also the `isize` primitive type](../primitive.isize.html).* +//! *[See also the `isize` primitive type](../../std/primitive.isize.html).* #![stable(feature = "rust1", since = "1.0.0")] +#[cfg(target_pointer_width = "16")] +int_module! { isize, 16 } #[cfg(target_pointer_width = "32")] int_module! { isize, 32 } #[cfg(target_pointer_width = "64")] diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index e3e8bcab4f1d7..a4529909e83ef 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -11,19 +11,12 @@ //! Numeric traits and functions for the built-in numeric types. #![stable(feature = "rust1", since = "1.0.0")] -#![allow(missing_docs)] -use char::CharExt; -use cmp::{Eq, PartialOrd}; -use convert::From; +use convert::TryFrom; use fmt; use intrinsics; -use marker::{Copy, Sized}; use mem::size_of; -use option::Option::{self, Some, None}; -use result::Result::{self, Ok, Err}; -use str::{FromStr, StrExt}; -use slice::SliceExt; +use str::FromStr; /// Provides intentionally-wrapped arithmetic on `T`. /// @@ -37,11 +30,65 @@ use slice::SliceExt; /// `wrapping_add`, or through the `Wrapping` type, which says that /// all standard arithmetic operations on the underlying value are /// intended to have wrapping semantics. +/// +/// # Examples +/// +/// ``` +/// use std::num::Wrapping; +/// +/// let zero = Wrapping(0u32); +/// let one = Wrapping(1u32); +/// +/// assert_eq!(std::u32::MAX, (zero - one).0); +/// ``` #[stable(feature = "rust1", since = "1.0.0")] -#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug, Default)] -pub struct Wrapping(#[stable(feature = "rust1", since = "1.0.0")] pub T); +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Default, Hash)] +pub struct Wrapping(#[stable(feature = "rust1", since = "1.0.0")] + pub T); + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Debug for Wrapping { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +#[stable(feature = "wrapping_display", since = "1.10.0")] +impl fmt::Display for Wrapping { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +#[stable(feature = "wrapping_fmt", since = "1.11.0")] +impl fmt::Binary for Wrapping { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} -pub mod wrapping; +#[stable(feature = "wrapping_fmt", since = "1.11.0")] +impl fmt::Octal for Wrapping { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +#[stable(feature = "wrapping_fmt", since = "1.11.0")] +impl fmt::LowerHex for Wrapping { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +#[stable(feature = "wrapping_fmt", since = "1.11.0")] +impl fmt::UpperHex for Wrapping { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0.fmt(f) + } +} + +mod wrapping; // All these modules are technically private and only exposed for libcoretest: pub mod flt2dec; @@ -56,6 +103,8 @@ pub mod diy_float; #[unstable(feature = "zero_one", reason = "unsure of placement, wants to use associated constants", issue = "27739")] +#[rustc_deprecated(since = "1.11.0", reason = "no longer used for \ + Iterator::sum")] pub trait Zero: Sized { /// The "zero" (usually, additive identity) for this type. fn zero() -> Self; @@ -68,6 +117,8 @@ pub trait Zero: Sized { #[unstable(feature = "zero_one", reason = "unsure of placement, wants to use associated constants", issue = "27739")] +#[rustc_deprecated(since = "1.11.0", reason = "no longer used for \ + Iterator::product")] pub trait One: Sized { /// The "one" (usually, multiplicative identity) for this type. fn one() -> Self; @@ -78,6 +129,7 @@ macro_rules! zero_one_impl { #[unstable(feature = "zero_one", reason = "unsure of placement, wants to use associated constants", issue = "27739")] + #[allow(deprecated)] impl Zero for $t { #[inline] fn zero() -> Self { 0 } @@ -85,6 +137,7 @@ macro_rules! zero_one_impl { #[unstable(feature = "zero_one", reason = "unsure of placement, wants to use associated constants", issue = "27739")] + #[allow(deprecated)] impl One for $t { #[inline] fn one() -> Self { 1 } @@ -98,6 +151,7 @@ macro_rules! zero_one_impl_float { #[unstable(feature = "zero_one", reason = "unsure of placement, wants to use associated constants", issue = "27739")] + #[allow(deprecated)] impl Zero for $t { #[inline] fn zero() -> Self { 0.0 } @@ -105,6 +159,7 @@ macro_rules! zero_one_impl_float { #[unstable(feature = "zero_one", reason = "unsure of placement, wants to use associated constants", issue = "27739")] + #[allow(deprecated)] impl One for $t { #[inline] fn one() -> Self { 1.0 } @@ -127,6 +182,12 @@ macro_rules! int_impl { $sub_with_overflow:path, $mul_with_overflow:path) => { /// Returns the smallest value that can be represented by this integer type. + /// + /// # Examples + /// + /// ``` + /// assert_eq!(i8::min_value(), -128); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub const fn min_value() -> Self { @@ -134,6 +195,12 @@ macro_rules! int_impl { } /// Returns the largest value that can be represented by this integer type. + /// + /// # Examples + /// + /// ``` + /// assert_eq!(i8::max_value(), 127); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub const fn max_value() -> Self { @@ -149,7 +216,7 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// assert_eq!(u32::from_str_radix("A", 16), Ok(10)); + /// assert_eq!(i32::from_str_radix("A", 16), Ok(10)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn from_str_radix(src: &str, radix: u32) -> Result { @@ -163,9 +230,9 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// let n = 0b01001100u8; + /// let n = -0b1000_0000i8; /// - /// assert_eq!(n.count_ones(), 3); + /// assert_eq!(n.count_ones(), 1); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -178,9 +245,9 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// let n = 0b01001100u8; + /// let n = -0b1000_0000i8; /// - /// assert_eq!(n.count_zeros(), 5); + /// assert_eq!(n.count_zeros(), 7); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -196,9 +263,9 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// let n = 0b0101000u16; + /// let n = -1i16; /// - /// assert_eq!(n.leading_zeros(), 10); + /// assert_eq!(n.leading_zeros(), 0); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -214,9 +281,9 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// let n = 0b0101000u16; + /// let n = -4i8; /// - /// assert_eq!(n.trailing_zeros(), 3); + /// assert_eq!(n.trailing_zeros(), 2); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -227,15 +294,17 @@ macro_rules! int_impl { /// Shifts the bits to the left by a specified amount, `n`, /// wrapping the truncated bits to the end of the resulting integer. /// + /// Please note this isn't the same operation as `<<`! + /// /// # Examples /// /// Basic usage: /// /// ``` - /// let n = 0x0123456789ABCDEFu64; - /// let m = 0x3456789ABCDEF012u64; + /// let n = 0x0123456789ABCDEFi64; + /// let m = -0x76543210FEDCBA99i64; /// - /// assert_eq!(n.rotate_left(12), m); + /// assert_eq!(n.rotate_left(32), m); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -247,15 +316,17 @@ macro_rules! int_impl { /// wrapping the truncated bits to the beginning of the resulting /// integer. /// + /// Please note this isn't the same operation as `>>`! + /// /// # Examples /// /// Basic usage: /// /// ``` - /// let n = 0x0123456789ABCDEFu64; - /// let m = 0xDEF0123456789ABCu64; + /// let n = 0x0123456789ABCDEFi64; + /// let m = -0xFEDCBA987654322i64; /// - /// assert_eq!(n.rotate_right(12), m); + /// assert_eq!(n.rotate_right(4), m); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -270,8 +341,8 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// let n = 0x0123456789ABCDEFu64; - /// let m = 0xEFCDAB8967452301u64; + /// let n = 0x0123456789ABCDEFi64; + /// let m = -0x1032547698BADCFFi64; /// /// assert_eq!(n.swap_bytes(), m); /// ``` @@ -291,12 +362,12 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// let n = 0x0123456789ABCDEFu64; + /// let n = 0x0123456789ABCDEFi64; /// /// if cfg!(target_endian = "big") { - /// assert_eq!(u64::from_be(n), n) + /// assert_eq!(i64::from_be(n), n) /// } else { - /// assert_eq!(u64::from_be(n), n.swap_bytes()) + /// assert_eq!(i64::from_be(n), n.swap_bytes()) /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] @@ -315,12 +386,12 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// let n = 0x0123456789ABCDEFu64; + /// let n = 0x0123456789ABCDEFi64; /// /// if cfg!(target_endian = "little") { - /// assert_eq!(u64::from_le(n), n) + /// assert_eq!(i64::from_le(n), n) /// } else { - /// assert_eq!(u64::from_le(n), n.swap_bytes()) + /// assert_eq!(i64::from_le(n), n.swap_bytes()) /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] @@ -339,7 +410,7 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// let n = 0x0123456789ABCDEFu64; + /// let n = 0x0123456789ABCDEFi64; /// /// if cfg!(target_endian = "big") { /// assert_eq!(n.to_be(), n) @@ -363,7 +434,7 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// let n = 0x0123456789ABCDEFu64; + /// let n = 0x0123456789ABCDEFi64; /// /// if cfg!(target_endian = "little") { /// assert_eq!(n.to_le(), n) @@ -385,8 +456,8 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// assert_eq!(5u16.checked_add(65530), Some(65535)); - /// assert_eq!(6u16.checked_add(65530), None); + /// assert_eq!(7i16.checked_add(32760), Some(32767)); + /// assert_eq!(8i16.checked_add(32760), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -421,8 +492,8 @@ macro_rules! int_impl { /// Basic usage: /// /// ``` - /// assert_eq!(5u8.checked_mul(51), Some(255)); - /// assert_eq!(5u8.checked_mul(52), None); + /// assert_eq!(6i8.checked_mul(21), Some(126)); + /// assert_eq!(6i8.checked_mul(22), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -446,11 +517,10 @@ macro_rules! int_impl { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn checked_div(self, other: Self) -> Option { - if other == 0 { + if other == 0 || (self == Self::min_value() && other == -1) { None } else { - let (a, b) = self.overflowing_div(other); - if b {None} else {Some(a)} + Some(unsafe { intrinsics::unchecked_div(self, other) }) } } @@ -471,15 +541,14 @@ macro_rules! int_impl { #[stable(feature = "wrapping", since = "1.7.0")] #[inline] pub fn checked_rem(self, other: Self) -> Option { - if other == 0 { + if other == 0 || (self == Self::min_value() && other == -1) { None } else { - let (a, b) = self.overflowing_rem(other); - if b {None} else {Some(a)} + Some(unsafe { intrinsics::unchecked_rem(self, other) }) } } - /// Checked negation. Computes `!self`, returning `None` if `self == + /// Checked negation. Computes `-self`, returning `None` if `self == /// MIN`. /// /// # Examples @@ -535,6 +604,29 @@ macro_rules! int_impl { if b {None} else {Some(a)} } + /// Checked absolute value. Computes `self.abs()`, returning `None` if + /// `self == MIN`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::i32; + /// + /// assert_eq!((-5i32).checked_abs(), Some(5)); + /// assert_eq!(i32::MIN.checked_abs(), None); + /// ``` + #[stable(feature = "no_panic_abs", since = "1.13.0")] + #[inline] + pub fn checked_abs(self) -> Option { + if self.is_negative() { + self.checked_neg() + } else { + Some(self) + } + } + /// Saturating integer addition. Computes `self + other`, saturating at /// the numeric bounds instead of overflowing. /// @@ -551,7 +643,7 @@ macro_rules! int_impl { pub fn saturating_add(self, other: Self) -> Self { match self.checked_add(other) { Some(x) => x, - None if other >= Self::zero() => Self::max_value(), + None if other >= 0 => Self::max_value(), None => Self::min_value(), } } @@ -572,7 +664,7 @@ macro_rules! int_impl { pub fn saturating_sub(self, other: Self) -> Self { match self.checked_sub(other) { Some(x) => x, - None if other >= Self::zero() => Self::min_value(), + None if other >= 0 => Self::min_value(), None => Self::max_value(), } } @@ -741,13 +833,20 @@ macro_rules! int_impl { /// where `mask` removes any high-order bits of `rhs` that /// would cause the shift to exceed the bitwidth of the type. /// + /// Note that this is *not* the same as a rotate-left; the + /// RHS of a wrapping shift-left is restricted to the range + /// of the type, rather than the bits shifted out of the LHS + /// being returned to the other end. The primitive integer + /// types all implement a `rotate_left` function, which may + /// be what you want instead. + /// /// # Examples /// /// Basic usage: /// /// ``` - /// assert_eq!(1u8.wrapping_shl(7), 128); - /// assert_eq!(1u8.wrapping_shl(8), 1); + /// assert_eq!((-1i8).wrapping_shl(7), -128); + /// assert_eq!((-1i8).wrapping_shl(8), -1); /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[inline(always)] @@ -759,13 +858,20 @@ macro_rules! int_impl { /// where `mask` removes any high-order bits of `rhs` that /// would cause the shift to exceed the bitwidth of the type. /// + /// Note that this is *not* the same as a rotate-right; the + /// RHS of a wrapping shift-right is restricted to the range + /// of the type, rather than the bits shifted out of the LHS + /// being returned to the other end. The primitive integer + /// types all implement a `rotate_right` function, which may + /// be what you want instead. + /// /// # Examples /// /// Basic usage: /// /// ``` - /// assert_eq!(128u8.wrapping_shr(7), 1); - /// assert_eq!(128u8.wrapping_shr(8), 128); + /// assert_eq!((-128i8).wrapping_shr(7), -1); + /// assert_eq!((-128i8).wrapping_shr(8), -128); /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[inline(always)] @@ -773,6 +879,34 @@ macro_rules! int_impl { self.overflowing_shr(rhs).0 } + /// Wrapping (modular) absolute value. Computes `self.abs()`, + /// wrapping around at the boundary of the type. + /// + /// The only case where such wrapping can occur is when one takes + /// the absolute value of the negative minimal value for the type + /// this is a positive value that is too large to represent in the + /// type. In such a case, this function returns `MIN` itself. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// assert_eq!(100i8.wrapping_abs(), 100); + /// assert_eq!((-100i8).wrapping_abs(), 100); + /// assert_eq!((-128i8).wrapping_abs(), -128); + /// assert_eq!((-128i8).wrapping_abs() as u8, 128); + /// ``` + #[stable(feature = "no_panic_abs", since = "1.13.0")] + #[inline(always)] + pub fn wrapping_abs(self) -> Self { + if self.is_negative() { + self.wrapping_neg() + } else { + self + } + } + /// Calculates `self` + `rhs` /// /// Returns a tuple of the addition along with a boolean indicating @@ -981,6 +1115,33 @@ macro_rules! int_impl { (self >> (rhs & ($BITS - 1)), (rhs > ($BITS - 1))) } + /// Computes the absolute value of `self`. + /// + /// Returns a tuple of the absolute version of self along with a + /// boolean indicating whether an overflow happened. If self is the + /// minimum value (e.g. i32::MIN for values of type i32), then the + /// minimum value will be returned again and true will be returned for + /// an overflow happening. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// assert_eq!(10i8.overflowing_abs(), (10,false)); + /// assert_eq!((-10i8).overflowing_abs(), (10,false)); + /// assert_eq!((-128i8).overflowing_abs(), (-128,true)); + /// ``` + #[stable(feature = "no_panic_abs", since = "1.13.0")] + #[inline] + pub fn overflowing_abs(self) -> (Self, bool) { + if self.is_negative() { + self.overflowing_neg() + } else { + (self, false) + } + } + /// Raises self to the power of `exp`, using exponentiation by squaring. /// /// # Examples @@ -994,9 +1155,10 @@ macro_rules! int_impl { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] + #[rustc_inherit_overflow_checks] pub fn pow(self, mut exp: u32) -> Self { let mut base = self; - let mut acc = Self::one(); + let mut acc = 1; while exp > 1 { if (exp & 1) == 1 { @@ -1035,6 +1197,7 @@ macro_rules! int_impl { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] + #[rustc_inherit_overflow_checks] pub fn abs(self) -> Self { if self.is_negative() { // Note that the #[inline] above means that the overflow @@ -1135,6 +1298,15 @@ impl i64 { intrinsics::mul_with_overflow } } +#[cfg(target_pointer_width = "16")] +#[lang = "isize"] +impl isize { + int_impl! { i16, u16, 16, + intrinsics::add_with_overflow, + intrinsics::sub_with_overflow, + intrinsics::mul_with_overflow } +} + #[cfg(target_pointer_width = "32")] #[lang = "isize"] impl isize { @@ -1153,7 +1325,7 @@ impl isize { intrinsics::mul_with_overflow } } -// `Int` + `UnsignedInt` implemented for signed integers +// `Int` + `UnsignedInt` implemented for unsigned integers macro_rules! uint_impl { ($ActualT:ty, $BITS:expr, $ctpop:path, @@ -1164,11 +1336,23 @@ macro_rules! uint_impl { $sub_with_overflow:path, $mul_with_overflow:path) => { /// Returns the smallest value that can be represented by this integer type. + /// + /// # Examples + /// + /// ``` + /// assert_eq!(u8::min_value(), 0); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub const fn min_value() -> Self { 0 } /// Returns the largest value that can be represented by this integer type. + /// + /// # Examples + /// + /// ``` + /// assert_eq!(u8::max_value(), 255); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub const fn max_value() -> Self { !0 } @@ -1177,15 +1361,13 @@ macro_rules! uint_impl { /// /// Leading and trailing whitespace represent an error. /// - /// # Arguments - /// - /// * src - A string slice - /// * radix - The base to use. Must lie in the range [2 .. 36] + /// # Examples /// - /// # Return value + /// Basic usage: /// - /// `Err(ParseIntError)` if the string did not represent a valid number. - /// Otherwise, `Ok(n)` where `n` is the integer represented by `src`. + /// ``` + /// assert_eq!(u32::from_str_radix("A", 16), Ok(10)); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn from_str_radix(src: &str, radix: u32) -> Result { from_str_radix(src, radix) @@ -1277,6 +1459,8 @@ macro_rules! uint_impl { /// Shifts the bits to the left by a specified amount, `n`, /// wrapping the truncated bits to the end of the resulting integer. /// + /// Please note this isn't the same operation as `<<`! + /// /// # Examples /// /// Basic usage: @@ -1299,6 +1483,8 @@ macro_rules! uint_impl { /// wrapping the truncated bits to the beginning of the resulting /// integer. /// + /// Please note this isn't the same operation as `>>`! + /// /// # Examples /// /// Basic usage: @@ -1457,8 +1643,8 @@ macro_rules! uint_impl { /// Basic usage: /// /// ``` - /// assert_eq!((-127i8).checked_sub(1), Some(-128)); - /// assert_eq!((-128i8).checked_sub(1), None); + /// assert_eq!(1u8.checked_sub(1), Some(0)); + /// assert_eq!(0u8.checked_sub(1), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -1493,16 +1679,15 @@ macro_rules! uint_impl { /// Basic usage: /// /// ``` - /// assert_eq!((-127i8).checked_div(-1), Some(127)); - /// assert_eq!((-128i8).checked_div(-1), None); - /// assert_eq!((1i8).checked_div(0), None); + /// assert_eq!(128u8.checked_div(2), Some(64)); + /// assert_eq!(1u8.checked_div(0), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn checked_div(self, other: Self) -> Option { match other { 0 => None, - other => Some(self / other), + other => Some(unsafe { intrinsics::unchecked_div(self, other) }), } } @@ -1523,7 +1708,7 @@ macro_rules! uint_impl { if other == 0 { None } else { - Some(self % other) + Some(unsafe { intrinsics::unchecked_rem(self, other) }) } } @@ -1591,16 +1776,15 @@ macro_rules! uint_impl { /// Basic usage: /// /// ``` - /// assert_eq!(100i8.saturating_add(1), 101); - /// assert_eq!(100i8.saturating_add(127), 127); + /// assert_eq!(100u8.saturating_add(1), 101); + /// assert_eq!(200u8.saturating_add(127), 255); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn saturating_add(self, other: Self) -> Self { match self.checked_add(other) { - Some(x) => x, - None if other >= Self::zero() => Self::max_value(), - None => Self::min_value(), + Some(x) => x, + None => Self::max_value(), } } @@ -1612,16 +1796,15 @@ macro_rules! uint_impl { /// Basic usage: /// /// ``` - /// assert_eq!(100i8.saturating_sub(127), -27); - /// assert_eq!((-100i8).saturating_sub(127), -128); + /// assert_eq!(100u8.saturating_sub(27), 73); + /// assert_eq!(13u8.saturating_sub(127), 0); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn saturating_sub(self, other: Self) -> Self { match self.checked_sub(other) { - Some(x) => x, - None if other >= Self::zero() => Self::min_value(), - None => Self::max_value(), + Some(x) => x, + None => Self::min_value(), } } @@ -1652,8 +1835,8 @@ macro_rules! uint_impl { /// Basic usage: /// /// ``` - /// assert_eq!(100i8.wrapping_add(27), 127); - /// assert_eq!(100i8.wrapping_add(127), -29); + /// assert_eq!(200u8.wrapping_add(55), 255); + /// assert_eq!(200u8.wrapping_add(155), 99); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -1671,8 +1854,8 @@ macro_rules! uint_impl { /// Basic usage: /// /// ``` - /// assert_eq!(0i8.wrapping_sub(127), -127); - /// assert_eq!((-2i8).wrapping_sub(127), 127); + /// assert_eq!(100u8.wrapping_sub(100), 0); + /// assert_eq!(100u8.wrapping_sub(155), 201); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -1690,8 +1873,8 @@ macro_rules! uint_impl { /// Basic usage: /// /// ``` - /// assert_eq!(10i8.wrapping_mul(12), 120); - /// assert_eq!(11i8.wrapping_mul(12), -124); + /// assert_eq!(10u8.wrapping_mul(12), 120); + /// assert_eq!(25u8.wrapping_mul(12), 44); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -1701,15 +1884,11 @@ macro_rules! uint_impl { } } - /// Wrapping (modular) division. Computes `self / other`, - /// wrapping around at the boundary of the type. - /// - /// The only case where such wrapping can occur is when one - /// divides `MIN / -1` on a signed type (where `MIN` is the - /// negative minimal value for the type); this is equivalent - /// to `-MIN`, a positive value that is too large to represent - /// in the type. In such a case, this function returns `MIN` - /// itself. + /// Wrapping (modular) division. Computes `self / other`. + /// Wrapped division on unsigned types is just normal division. + /// There's no way wrapping could ever happen. + /// This function exists, so that all operations + /// are accounted for in the wrapping operations. /// /// # Examples /// @@ -1717,52 +1896,52 @@ macro_rules! uint_impl { /// /// ``` /// assert_eq!(100u8.wrapping_div(10), 10); - /// assert_eq!((-128i8).wrapping_div(-1), -128); /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[inline(always)] pub fn wrapping_div(self, rhs: Self) -> Self { - self.overflowing_div(rhs).0 + self / rhs } - /// Wrapping (modular) remainder. Computes `self % other`, - /// wrapping around at the boundary of the type. - /// - /// Such wrap-around never actually occurs mathematically; - /// implementation artifacts make `x % y` invalid for `MIN / - /// -1` on a signed type (where `MIN` is the negative - /// minimal value). In such a case, this function returns `0`. + /// Wrapping (modular) remainder. Computes `self % other`. + /// Wrapped remainder calculation on unsigned types is + /// just the regular remainder calculation. + /// There's no way wrapping could ever happen. + /// This function exists, so that all operations + /// are accounted for in the wrapping operations. /// /// # Examples /// /// Basic usage: /// /// ``` - /// assert_eq!(100i8.wrapping_rem(10), 0); - /// assert_eq!((-128i8).wrapping_rem(-1), 0); + /// assert_eq!(100u8.wrapping_rem(10), 0); /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[inline(always)] pub fn wrapping_rem(self, rhs: Self) -> Self { - self.overflowing_rem(rhs).0 + self % rhs } /// Wrapping (modular) negation. Computes `-self`, /// wrapping around at the boundary of the type. /// - /// The only case where such wrapping can occur is when one - /// negates `MIN` on a signed type (where `MIN` is the - /// negative minimal value for the type); this is a positive - /// value that is too large to represent in the type. In such - /// a case, this function returns `MIN` itself. + /// Since unsigned types do not have negative equivalents + /// all applications of this function will wrap (except for `-0`). + /// For values smaller than the corresponding signed type's maximum + /// the result is the same as casting the corresponding signed value. + /// Any larger values are equivalent to `MAX + 1 - (val - MAX - 1)` where + /// `MAX` is the corresponding signed type's maximum. /// /// # Examples /// /// Basic usage: /// /// ``` - /// assert_eq!(100i8.wrapping_neg(), -100); - /// assert_eq!((-128i8).wrapping_neg(), -128); + /// assert_eq!(100u8.wrapping_neg(), 156); + /// assert_eq!(0u8.wrapping_neg(), 0); + /// assert_eq!(180u8.wrapping_neg(), 76); + /// assert_eq!(180u8.wrapping_neg(), (127 + 1) - (180u8 - (127 + 1))); /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[inline(always)] @@ -1774,6 +1953,13 @@ macro_rules! uint_impl { /// where `mask` removes any high-order bits of `rhs` that /// would cause the shift to exceed the bitwidth of the type. /// + /// Note that this is *not* the same as a rotate-left; the + /// RHS of a wrapping shift-left is restricted to the range + /// of the type, rather than the bits shifted out of the LHS + /// being returned to the other end. The primitive integer + /// types all implement a `rotate_left` function, which may + /// be what you want instead. + /// /// # Examples /// /// Basic usage: @@ -1792,6 +1978,13 @@ macro_rules! uint_impl { /// where `mask` removes any high-order bits of `rhs` that /// would cause the shift to exceed the bitwidth of the type. /// + /// Note that this is *not* the same as a rotate-right; the + /// RHS of a wrapping shift-right is restricted to the range + /// of the type, rather than the bits shifted out of the LHS + /// being returned to the other end. The primitive integer + /// types all implement a `rotate_right` function, which may + /// be what you want instead. + /// /// # Examples /// /// Basic usage: @@ -2002,33 +2195,30 @@ macro_rules! uint_impl { /// Basic usage: /// /// ``` - /// assert_eq!(2i32.pow(4), 16); + /// assert_eq!(2u32.pow(4), 16); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] + #[rustc_inherit_overflow_checks] pub fn pow(self, mut exp: u32) -> Self { let mut base = self; - let mut acc = Self::one(); + let mut acc = 1; - let mut prev_base = self; - let mut base_oflo = false; - while exp > 0 { + while exp > 1 { if (exp & 1) == 1 { - if base_oflo { - // ensure overflow occurs in the same manner it - // would have otherwise (i.e. signal any exception - // it would have otherwise). - acc = acc * (prev_base * prev_base); - } else { - acc = acc * base; - } + acc = acc * base; } - prev_base = base; - let (new_base, new_base_oflo) = base.overflowing_mul(base); - base = new_base; - base_oflo = new_base_oflo; exp /= 2; + base = base * base; } + + // Deal with the final bit of the exponent separately, since + // squaring the base afterwards is not necessary and may cause a + // needless overflow. + if exp == 1 { + acc = acc * base; + } + acc } @@ -2045,8 +2235,7 @@ macro_rules! uint_impl { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_power_of_two(self) -> bool { - (self.wrapping_sub(Self::one())) & self == Self::zero() && - !(self == Self::zero()) + (self.wrapping_sub(1)) & self == 0 && !(self == 0) } /// Returns the smallest power of two greater than or equal to `self`. @@ -2064,7 +2253,7 @@ macro_rules! uint_impl { #[inline] pub fn next_power_of_two(self) -> Self { let bits = size_of::() * 8; - let one: Self = Self::one(); + let one: Self = 1; one << ((bits - self.wrapping_sub(one).leading_zeros() as usize) % bits) } @@ -2141,6 +2330,18 @@ impl u64 { intrinsics::mul_with_overflow } } +#[cfg(target_pointer_width = "16")] +#[lang = "usize"] +impl usize { + uint_impl! { u16, 16, + intrinsics::ctpop, + intrinsics::ctlz, + intrinsics::cttz, + intrinsics::bswap, + intrinsics::add_with_overflow, + intrinsics::sub_with_overflow, + intrinsics::mul_with_overflow } +} #[cfg(target_pointer_width = "32")] #[lang = "usize"] impl usize { @@ -2167,27 +2368,52 @@ impl usize { intrinsics::mul_with_overflow } } -/// Used for representing the classification of floating point numbers -#[derive(Copy, Clone, PartialEq, Debug)] +/// A classification of floating point numbers. +/// +/// This `enum` is used as the return type for [`f32::classify()`] and [`f64::classify()`]. See +/// their documentation for more. +/// +/// [`f32::classify()`]: ../../std/primitive.f32.html#method.classify +/// [`f64::classify()`]: ../../std/primitive.f64.html#method.classify +/// +/// # Examples +/// +/// ``` +/// use std::num::FpCategory; +/// use std::f32; +/// +/// let num = 12.4_f32; +/// let inf = f32::INFINITY; +/// let zero = 0f32; +/// let sub: f32 = 1.1754942e-38; +/// let nan = f32::NAN; +/// +/// assert_eq!(num.classify(), FpCategory::Normal); +/// assert_eq!(inf.classify(), FpCategory::Infinite); +/// assert_eq!(zero.classify(), FpCategory::Zero); +/// assert_eq!(nan.classify(), FpCategory::Nan); +/// assert_eq!(sub.classify(), FpCategory::Subnormal); +/// ``` +#[derive(Copy, Clone, PartialEq, Eq, Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub enum FpCategory { - /// "Not a Number", often obtained by dividing by zero + /// "Not a Number", often obtained by dividing by zero. #[stable(feature = "rust1", since = "1.0.0")] Nan, - /// Positive or negative infinity + /// Positive or negative infinity. #[stable(feature = "rust1", since = "1.0.0")] - Infinite , + Infinite, - /// Positive or negative zero + /// Positive or negative zero. #[stable(feature = "rust1", since = "1.0.0")] Zero, - /// De-normalized floating point representation (less precise than `Normal`) + /// De-normalized floating point representation (less precise than `Normal`). #[stable(feature = "rust1", since = "1.0.0")] Subnormal, - /// A regular floating point number + /// A regular floating point number. #[stable(feature = "rust1", since = "1.0.0")] Normal, } @@ -2196,31 +2422,49 @@ pub enum FpCategory { #[doc(hidden)] #[unstable(feature = "core_float", reason = "stable interface is via `impl f{32,64}` in later crates", - issue = "27702")] + issue = "32110")] pub trait Float: Sized { /// Returns the NaN value. #[unstable(feature = "float_extras", reason = "needs removal", issue = "27752")] + #[rustc_deprecated(since = "1.11.0", + reason = "never really came to fruition and easily \ + implementable outside the standard library")] fn nan() -> Self; /// Returns the infinite value. #[unstable(feature = "float_extras", reason = "needs removal", issue = "27752")] + #[rustc_deprecated(since = "1.11.0", + reason = "never really came to fruition and easily \ + implementable outside the standard library")] fn infinity() -> Self; /// Returns the negative infinite value. #[unstable(feature = "float_extras", reason = "needs removal", issue = "27752")] + #[rustc_deprecated(since = "1.11.0", + reason = "never really came to fruition and easily \ + implementable outside the standard library")] fn neg_infinity() -> Self; /// Returns -0.0. #[unstable(feature = "float_extras", reason = "needs removal", issue = "27752")] + #[rustc_deprecated(since = "1.11.0", + reason = "never really came to fruition and easily \ + implementable outside the standard library")] fn neg_zero() -> Self; /// Returns 0.0. #[unstable(feature = "float_extras", reason = "needs removal", issue = "27752")] + #[rustc_deprecated(since = "1.11.0", + reason = "never really came to fruition and easily \ + implementable outside the standard library")] fn zero() -> Self; /// Returns 1.0. #[unstable(feature = "float_extras", reason = "needs removal", issue = "27752")] + #[rustc_deprecated(since = "1.11.0", + reason = "never really came to fruition and easily \ + implementable outside the standard library")] fn one() -> Self; /// Returns true if this value is NaN and false otherwise. @@ -2243,6 +2487,9 @@ pub trait Float: Sized { /// Returns the mantissa, exponent and sign as integers, respectively. #[unstable(feature = "float_extras", reason = "signature is undecided", issue = "27752")] + #[rustc_deprecated(since = "1.11.0", + reason = "never really came to fruition and easily \ + implementable outside the standard library")] fn integer_decode(self) -> (u64, i16, i8); /// Computes the absolute value of `self`. Returns `Float::nan()` if the @@ -2277,12 +2524,10 @@ pub trait Float: Sized { fn powi(self, n: i32) -> Self; /// Convert radians to degrees. - #[unstable(feature = "float_extras", reason = "desirability is unclear", - issue = "27752")] + #[stable(feature = "deg_rad_conversions", since="1.7.0")] fn to_degrees(self) -> Self; /// Convert degrees to radians. - #[unstable(feature = "float_extras", reason = "desirability is unclear", - issue = "27752")] + #[stable(feature = "deg_rad_conversions", since="1.7.0")] fn to_radians(self) -> Self; } @@ -2299,9 +2544,101 @@ macro_rules! from_str_radix_int_impl { } from_str_radix_int_impl! { isize i8 i16 i32 i64 usize u8 u16 u32 u64 } +/// The error type returned when a checked integral type conversion fails. +#[unstable(feature = "try_from", issue = "33417")] +#[derive(Debug, Copy, Clone)] +pub struct TryFromIntError(()); + +impl TryFromIntError { + #[unstable(feature = "int_error_internals", + reason = "available through Error trait and this method should \ + not be exposed publicly", + issue = "0")] + #[doc(hidden)] + pub fn __description(&self) -> &str { + "out of range integral type conversion attempted" + } +} + +#[unstable(feature = "try_from", issue = "33417")] +impl fmt::Display for TryFromIntError { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + self.__description().fmt(fmt) + } +} + +macro_rules! same_sign_from_int_impl { + ($storage:ty, $target:ty, $($source:ty),*) => {$( + #[unstable(feature = "try_from", issue = "33417")] + impl TryFrom<$source> for $target { + type Err = TryFromIntError; + + fn try_from(u: $source) -> Result<$target, TryFromIntError> { + let min = <$target as FromStrRadixHelper>::min_value() as $storage; + let max = <$target as FromStrRadixHelper>::max_value() as $storage; + if u as $storage < min || u as $storage > max { + Err(TryFromIntError(())) + } else { + Ok(u as $target) + } + } + } + )*} +} + +same_sign_from_int_impl!(u64, u8, u8, u16, u32, u64, usize); +same_sign_from_int_impl!(i64, i8, i8, i16, i32, i64, isize); +same_sign_from_int_impl!(u64, u16, u8, u16, u32, u64, usize); +same_sign_from_int_impl!(i64, i16, i8, i16, i32, i64, isize); +same_sign_from_int_impl!(u64, u32, u8, u16, u32, u64, usize); +same_sign_from_int_impl!(i64, i32, i8, i16, i32, i64, isize); +same_sign_from_int_impl!(u64, u64, u8, u16, u32, u64, usize); +same_sign_from_int_impl!(i64, i64, i8, i16, i32, i64, isize); +same_sign_from_int_impl!(u64, usize, u8, u16, u32, u64, usize); +same_sign_from_int_impl!(i64, isize, i8, i16, i32, i64, isize); + +macro_rules! cross_sign_from_int_impl { + ($unsigned:ty, $($signed:ty),*) => {$( + #[unstable(feature = "try_from", issue = "33417")] + impl TryFrom<$unsigned> for $signed { + type Err = TryFromIntError; + + fn try_from(u: $unsigned) -> Result<$signed, TryFromIntError> { + let max = <$signed as FromStrRadixHelper>::max_value() as u64; + if u as u64 > max { + Err(TryFromIntError(())) + } else { + Ok(u as $signed) + } + } + } + + #[unstable(feature = "try_from", issue = "33417")] + impl TryFrom<$signed> for $unsigned { + type Err = TryFromIntError; + + fn try_from(u: $signed) -> Result<$unsigned, TryFromIntError> { + let max = <$unsigned as FromStrRadixHelper>::max_value() as u64; + if u < 0 || u as u64 > max { + Err(TryFromIntError(())) + } else { + Ok(u as $unsigned) + } + } + } + )*} +} + +cross_sign_from_int_impl!(u8, i8, i16, i32, i64, isize); +cross_sign_from_int_impl!(u16, i8, i16, i32, i64, isize); +cross_sign_from_int_impl!(u32, i8, i16, i32, i64, isize); +cross_sign_from_int_impl!(u64, i8, i16, i32, i64, isize); +cross_sign_from_int_impl!(usize, i8, i16, i32, i64, isize); + #[doc(hidden)] trait FromStrRadixHelper: PartialOrd + Copy { fn min_value() -> Self; + fn max_value() -> Self; fn from_u32(u: u32) -> Self; fn checked_mul(&self, other: u32) -> Option; fn checked_sub(&self, other: u32) -> Option; @@ -2311,6 +2648,7 @@ trait FromStrRadixHelper: PartialOrd + Copy { macro_rules! doit { ($($t:ty)*) => ($(impl FromStrRadixHelper for $t { fn min_value() -> Self { Self::min_value() } + fn max_value() -> Self { Self::max_value() } fn from_u32(u: u32) -> Self { u as Self } fn checked_mul(&self, other: u32) -> Option { Self::checked_mul(*self, other as Self) @@ -2325,8 +2663,7 @@ macro_rules! doit { } doit! { i8 i16 i32 i64 isize u8 u16 u32 u64 usize } -fn from_str_radix(src: &str, radix: u32) - -> Result { +fn from_str_radix(src: &str, radix: u32) -> Result { use self::IntErrorKind::*; use self::ParseIntError as PIE; @@ -2349,7 +2686,7 @@ fn from_str_radix(src: &str, radix: u32) let (is_positive, digits) = match src[0] { b'+' => (true, &src[1..]), b'-' if is_signed_ty => (false, &src[1..]), - _ => (true, src) + _ => (true, src), }; if digits.is_empty() { @@ -2394,11 +2731,18 @@ fn from_str_radix(src: &str, radix: u32) } /// An error which can be returned when parsing an integer. -#[derive(Debug, Clone, PartialEq)] +/// +/// This error is used as the error type for the `from_str_radix()` functions +/// on the primitive integer types, such as [`i8::from_str_radix()`]. +/// +/// [`i8::from_str_radix()`]: ../../std/primitive.i8.html#method.from_str_radix +#[derive(Debug, Clone, PartialEq, Eq)] #[stable(feature = "rust1", since = "1.0.0")] -pub struct ParseIntError { kind: IntErrorKind } +pub struct ParseIntError { + kind: IntErrorKind, +} -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] enum IntErrorKind { Empty, InvalidDigit, diff --git a/src/libcore/num/u16.rs b/src/libcore/num/u16.rs index 68e50e8a400ea..d34d87caa55fa 100644 --- a/src/libcore/num/u16.rs +++ b/src/libcore/num/u16.rs @@ -10,8 +10,8 @@ //! The 16-bit unsigned integer type. //! -//! *[See also the `u16` primitive type](../primitive.u16.html).* +//! *[See also the `u16` primitive type](../../std/primitive.u16.html).* #![stable(feature = "rust1", since = "1.0.0")] -uint_module! { u16, i16, 16 } +uint_module! { u16, 16 } diff --git a/src/libcore/num/u32.rs b/src/libcore/num/u32.rs index c1ee96b363c27..f9c9099e47f18 100644 --- a/src/libcore/num/u32.rs +++ b/src/libcore/num/u32.rs @@ -10,8 +10,8 @@ //! The 32-bit unsigned integer type. //! -//! *[See also the `u32` primitive type](../primitive.u32.html).* +//! *[See also the `u32` primitive type](../../std/primitive.u32.html).* #![stable(feature = "rust1", since = "1.0.0")] -uint_module! { u32, i32, 32 } +uint_module! { u32, 32 } diff --git a/src/libcore/num/u64.rs b/src/libcore/num/u64.rs index c0d18d850a796..8dfe4335a3d72 100644 --- a/src/libcore/num/u64.rs +++ b/src/libcore/num/u64.rs @@ -10,8 +10,8 @@ //! The 64-bit unsigned integer type. //! -//! *[See also the `u64` primitive type](../primitive.u64.html).* +//! *[See also the `u64` primitive type](../../std/primitive.u64.html).* #![stable(feature = "rust1", since = "1.0.0")] -uint_module! { u64, i64, 64 } +uint_module! { u64, 64 } diff --git a/src/libcore/num/u8.rs b/src/libcore/num/u8.rs index a60c480d810ea..0106ee8e401c9 100644 --- a/src/libcore/num/u8.rs +++ b/src/libcore/num/u8.rs @@ -10,8 +10,8 @@ //! The 8-bit unsigned integer type. //! -//! *[See also the `u8` primitive type](../primitive.u8.html).* +//! *[See also the `u8` primitive type](../../std/primitive.u8.html).* #![stable(feature = "rust1", since = "1.0.0")] -uint_module! { u8, i8, 8 } +uint_module! { u8, 8 } diff --git a/src/libcore/num/uint_macros.rs b/src/libcore/num/uint_macros.rs index 16d84cf81e11d..cc9256ab6bf4e 100644 --- a/src/libcore/num/uint_macros.rs +++ b/src/libcore/num/uint_macros.rs @@ -10,28 +10,13 @@ #![doc(hidden)] -macro_rules! uint_module { ($T:ty, $T_SIGNED:ty, $bits:expr) => ( - -#[unstable(feature = "num_bits_bytes", - reason = "may want to be an associated function", - issue = "27753")] -#[rustc_deprecated(since = "1.7.0", - reason = "will be replaced via const fn or associated constants")] -#[allow(missing_docs)] -pub const BITS : usize = $bits; -#[unstable(feature = "num_bits_bytes", - reason = "may want to be an associated function", - issue = "27753")] -#[rustc_deprecated(since = "1.7.0", - reason = "will be replaced via const fn or associated constants")] -#[allow(missing_docs)] -pub const BYTES : usize = ($bits / 8); +macro_rules! uint_module { ($T:ident, $bits:expr) => ( +/// The smallest value that can be represented by this integer type. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] -pub const MIN: $T = 0 as $T; +pub const MIN: $T = $T::min_value(); +/// The largest value that can be represented by this integer type. #[stable(feature = "rust1", since = "1.0.0")] -#[allow(missing_docs)] -pub const MAX: $T = !0 as $T; +pub const MAX: $T = $T::max_value(); ) } diff --git a/src/libcore/num/usize.rs b/src/libcore/num/usize.rs index a6a7be023ebf4..685c52e271ec0 100644 --- a/src/libcore/num/usize.rs +++ b/src/libcore/num/usize.rs @@ -10,11 +10,13 @@ //! The pointer-sized unsigned integer type. //! -//! *[See also the `usize` primitive type](../primitive.usize.html).* +//! *[See also the `usize` primitive type](../../std/primitive.usize.html).* #![stable(feature = "rust1", since = "1.0.0")] +#[cfg(target_pointer_width = "16")] +uint_module! { usize, 16 } #[cfg(target_pointer_width = "32")] -uint_module! { usize, isize, 32 } +uint_module! { usize, 32 } #[cfg(target_pointer_width = "64")] -uint_module! { usize, isize, 64 } +uint_module! { usize, 64 } diff --git a/src/libcore/num/wrapping.rs b/src/libcore/num/wrapping.rs index a6b3dc744699a..50d64838a5c0b 100644 --- a/src/libcore/num/wrapping.rs +++ b/src/libcore/num/wrapping.rs @@ -8,34 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(missing_docs)] -#![unstable(feature = "old_wrapping", reason = "may be removed or relocated", - issue = "27755")] - -use intrinsics::{add_with_overflow, sub_with_overflow, mul_with_overflow}; - use super::Wrapping; use ops::*; -use ::{i8, i16, i32, i64, isize}; - -#[unstable(feature = "old_wrapping", reason = "may be removed or relocated", - issue = "27755")] -#[rustc_deprecated(since = "1.7.0", reason = "moved to inherent methods")] -pub trait OverflowingOps { - fn overflowing_add(self, rhs: Self) -> (Self, bool); - fn overflowing_sub(self, rhs: Self) -> (Self, bool); - fn overflowing_mul(self, rhs: Self) -> (Self, bool); - - fn overflowing_div(self, rhs: Self) -> (Self, bool); - fn overflowing_rem(self, rhs: Self) -> (Self, bool); - fn overflowing_neg(self) -> (Self, bool); - - fn overflowing_shl(self, rhs: u32) -> (Self, bool); - fn overflowing_shr(self, rhs: u32) -> (Self, bool); -} - macro_rules! sh_impl_signed { ($t:ident, $f:ident) => ( #[stable(feature = "rust1", since = "1.0.0")] @@ -52,7 +28,7 @@ macro_rules! sh_impl_signed { } } - #[unstable(feature = "wrapping_impls", reason = "recently added", issue = "30524")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl ShlAssign<$f> for Wrapping<$t> { #[inline(always)] fn shl_assign(&mut self, other: $f) { @@ -74,7 +50,7 @@ macro_rules! sh_impl_signed { } } - #[unstable(feature = "wrapping_impls", reason = "recently added", issue = "30524")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl ShrAssign<$f> for Wrapping<$t> { #[inline(always)] fn shr_assign(&mut self, other: $f) { @@ -96,7 +72,7 @@ macro_rules! sh_impl_unsigned { } } - #[unstable(feature = "wrapping_impls", reason = "recently added", issue = "30524")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl ShlAssign<$f> for Wrapping<$t> { #[inline(always)] fn shl_assign(&mut self, other: $f) { @@ -114,7 +90,7 @@ macro_rules! sh_impl_unsigned { } } - #[unstable(feature = "wrapping_impls", reason = "recently added", issue = "30524")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl ShrAssign<$f> for Wrapping<$t> { #[inline(always)] fn shr_assign(&mut self, other: $f) { @@ -155,8 +131,9 @@ macro_rules! wrapping_impl { Wrapping(self.0.wrapping_add(other.0)) } } + forward_ref_binop! { impl Add, add for Wrapping<$t>, Wrapping<$t> } - #[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl AddAssign for Wrapping<$t> { #[inline(always)] fn add_assign(&mut self, other: Wrapping<$t>) { @@ -173,8 +150,9 @@ macro_rules! wrapping_impl { Wrapping(self.0.wrapping_sub(other.0)) } } + forward_ref_binop! { impl Sub, sub for Wrapping<$t>, Wrapping<$t> } - #[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl SubAssign for Wrapping<$t> { #[inline(always)] fn sub_assign(&mut self, other: Wrapping<$t>) { @@ -191,8 +169,9 @@ macro_rules! wrapping_impl { Wrapping(self.0.wrapping_mul(other.0)) } } + forward_ref_binop! { impl Mul, mul for Wrapping<$t>, Wrapping<$t> } - #[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl MulAssign for Wrapping<$t> { #[inline(always)] fn mul_assign(&mut self, other: Wrapping<$t>) { @@ -209,8 +188,9 @@ macro_rules! wrapping_impl { Wrapping(self.0.wrapping_div(other.0)) } } + forward_ref_binop! { impl Div, div for Wrapping<$t>, Wrapping<$t> } - #[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl DivAssign for Wrapping<$t> { #[inline(always)] fn div_assign(&mut self, other: Wrapping<$t>) { @@ -218,7 +198,7 @@ macro_rules! wrapping_impl { } } - #[unstable(feature = "wrapping_impls", reason = "recently added", issue = "30524")] + #[stable(feature = "wrapping_impls", since = "1.7.0")] impl Rem for Wrapping<$t> { type Output = Wrapping<$t>; @@ -227,8 +207,9 @@ macro_rules! wrapping_impl { Wrapping(self.0.wrapping_rem(other.0)) } } + forward_ref_binop! { impl Rem, rem for Wrapping<$t>, Wrapping<$t> } - #[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl RemAssign for Wrapping<$t> { #[inline(always)] fn rem_assign(&mut self, other: Wrapping<$t>) { @@ -245,6 +226,7 @@ macro_rules! wrapping_impl { Wrapping(!self.0) } } + forward_ref_unop! { impl Not, not for Wrapping<$t> } #[stable(feature = "rust1", since = "1.0.0")] impl BitXor for Wrapping<$t> { @@ -255,8 +237,9 @@ macro_rules! wrapping_impl { Wrapping(self.0 ^ other.0) } } + forward_ref_binop! { impl BitXor, bitxor for Wrapping<$t>, Wrapping<$t> } - #[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl BitXorAssign for Wrapping<$t> { #[inline(always)] fn bitxor_assign(&mut self, other: Wrapping<$t>) { @@ -273,8 +256,9 @@ macro_rules! wrapping_impl { Wrapping(self.0 | other.0) } } + forward_ref_binop! { impl BitOr, bitor for Wrapping<$t>, Wrapping<$t> } - #[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl BitOrAssign for Wrapping<$t> { #[inline(always)] fn bitor_assign(&mut self, other: Wrapping<$t>) { @@ -291,14 +275,25 @@ macro_rules! wrapping_impl { Wrapping(self.0 & other.0) } } + forward_ref_binop! { impl BitAnd, bitand for Wrapping<$t>, Wrapping<$t> } - #[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl BitAndAssign for Wrapping<$t> { #[inline(always)] fn bitand_assign(&mut self, other: Wrapping<$t>) { *self = *self & other; } } + + #[stable(feature = "wrapping_neg", since = "1.10.0")] + impl Neg for Wrapping<$t> { + type Output = Self; + #[inline(always)] + fn neg(self) -> Self { + Wrapping(0) - self + } + } + forward_ref_unop! { impl Neg, neg for Wrapping<$t> } )*) } @@ -307,6 +302,12 @@ wrapping_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 } mod shift_max { #![allow(non_upper_case_globals)] + #[cfg(target_pointer_width = "16")] + mod platform { + pub const usize: u32 = super::u16; + pub const isize: u32 = super::i16; + } + #[cfg(target_pointer_width = "32")] mod platform { pub const usize: u32 = super::u32; @@ -319,132 +320,15 @@ mod shift_max { pub const isize: u32 = super::i64; } - pub const i8: u32 = (1 << 3) - 1; + pub const i8: u32 = (1 << 3) - 1; pub const i16: u32 = (1 << 4) - 1; pub const i32: u32 = (1 << 5) - 1; pub const i64: u32 = (1 << 6) - 1; pub use self::platform::isize; - pub const u8: u32 = i8; + pub const u8: u32 = i8; pub const u16: u32 = i16; pub const u32: u32 = i32; pub const u64: u32 = i64; pub use self::platform::usize; } - -macro_rules! signed_overflowing_impl { - ($($t:ident)*) => ($( - #[allow(deprecated)] - impl OverflowingOps for $t { - #[inline(always)] - fn overflowing_add(self, rhs: $t) -> ($t, bool) { - unsafe { - add_with_overflow(self, rhs) - } - } - #[inline(always)] - fn overflowing_sub(self, rhs: $t) -> ($t, bool) { - unsafe { - sub_with_overflow(self, rhs) - } - } - #[inline(always)] - fn overflowing_mul(self, rhs: $t) -> ($t, bool) { - unsafe { - mul_with_overflow(self, rhs) - } - } - - #[inline(always)] - fn overflowing_div(self, rhs: $t) -> ($t, bool) { - if self == $t::MIN && rhs == -1 { - (self, true) - } else { - (self/rhs, false) - } - } - #[inline(always)] - fn overflowing_rem(self, rhs: $t) -> ($t, bool) { - if self == $t::MIN && rhs == -1 { - (0, true) - } else { - (self % rhs, false) - } - } - - #[inline(always)] - fn overflowing_shl(self, rhs: u32) -> ($t, bool) { - (self << (rhs & self::shift_max::$t), - (rhs > self::shift_max::$t)) - } - #[inline(always)] - fn overflowing_shr(self, rhs: u32) -> ($t, bool) { - (self >> (rhs & self::shift_max::$t), - (rhs > self::shift_max::$t)) - } - - #[inline(always)] - fn overflowing_neg(self) -> ($t, bool) { - if self == $t::MIN { - ($t::MIN, true) - } else { - (-self, false) - } - } - } - )*) -} - -macro_rules! unsigned_overflowing_impl { - ($($t:ident)*) => ($( - #[allow(deprecated)] - impl OverflowingOps for $t { - #[inline(always)] - fn overflowing_add(self, rhs: $t) -> ($t, bool) { - unsafe { - add_with_overflow(self, rhs) - } - } - #[inline(always)] - fn overflowing_sub(self, rhs: $t) -> ($t, bool) { - unsafe { - sub_with_overflow(self, rhs) - } - } - #[inline(always)] - fn overflowing_mul(self, rhs: $t) -> ($t, bool) { - unsafe { - mul_with_overflow(self, rhs) - } - } - - #[inline(always)] - fn overflowing_div(self, rhs: $t) -> ($t, bool) { - (self/rhs, false) - } - #[inline(always)] - fn overflowing_rem(self, rhs: $t) -> ($t, bool) { - (self % rhs, false) - } - - #[inline(always)] - fn overflowing_shl(self, rhs: u32) -> ($t, bool) { - (self << (rhs & self::shift_max::$t), - (rhs > self::shift_max::$t)) - } - #[inline(always)] - fn overflowing_shr(self, rhs: u32) -> ($t, bool) { - (self >> (rhs & self::shift_max::$t), - (rhs > self::shift_max::$t)) - } - - #[inline(always)] - fn overflowing_neg(self) -> ($t, bool) { - ((!self).wrapping_add(1), true) - } - } - )*) -} - -signed_overflowing_impl! { i8 i16 i32 i64 isize } -unsigned_overflowing_impl! { u8 u16 u32 u64 usize } diff --git a/src/libcore/ops.rs b/src/libcore/ops.rs index 6144f97b13080..07ae5b920b27b 100644 --- a/src/libcore/ops.rs +++ b/src/libcore/ops.rs @@ -8,29 +8,40 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Overloadable operators +//! Overloadable operators. //! -//! Implementing these traits allows you to get an effect similar to -//! overloading operators. +//! Implementing these traits allows you to overload certain operators. //! //! Some of these traits are imported by the prelude, so they are available in -//! every Rust program. +//! every Rust program. Only operators backed by traits can be overloaded. For +//! example, the addition operator (`+`) can be overloaded through the [`Add`] +//! trait, but since the assignment operator (`=`) has no backing trait, there +//! is no way of overloading its semantics. Additionally, this module does not +//! provide any mechanism to create new operators. If traitless overloading or +//! custom operators are required, you should look toward macros or compiler +//! plugins to extend Rust's syntax. +//! +//! Note that the `&&` and `||` operators short-circuit, i.e. they only +//! evaluate their second operand if it contributes to the result. Since this +//! behavior is not enforceable by traits, `&&` and `||` are not supported as +//! overloadable operators. //! //! Many of the operators take their operands by value. In non-generic //! contexts involving built-in types, this is usually not a problem. //! However, using these operators in generic code, requires some //! attention if values have to be reused as opposed to letting the operators -//! consume them. One option is to occasionally use `clone()`. +//! consume them. One option is to occasionally use [`clone()`]. //! Another option is to rely on the types involved providing additional //! operator implementations for references. For example, for a user-defined //! type `T` which is supposed to support addition, it is probably a good -//! idea to have both `T` and `&T` implement the traits `Add` and `Add<&T>` -//! so that generic code can be written without unnecessary cloning. +//! idea to have both `T` and `&T` implement the traits [`Add`][`Add`] and +//! [`Add<&T>`][`Add`] so that generic code can be written without unnecessary +//! cloning. //! //! # Examples //! -//! This example creates a `Point` struct that implements `Add` and `Sub`, and -//! then demonstrates adding and subtracting two `Point`s. +//! This example creates a `Point` struct that implements [`Add`] and [`Sub`], +//! and then demonstrates adding and subtracting two `Point`s. //! //! ```rust //! use std::ops::{Add, Sub}; @@ -62,13 +73,82 @@ //! } //! ``` //! -//! See the documentation for each trait for a minimum implementation that -//! prints something to the screen. +//! See the documentation for each trait for an example implementation. +//! +//! The [`Fn`], [`FnMut`], and [`FnOnce`] traits are implemented by types that can be +//! invoked like functions. Note that [`Fn`] takes `&self`, [`FnMut`] takes `&mut +//! self` and [`FnOnce`] takes `self`. These correspond to the three kinds of +//! methods that can be invoked on an instance: call-by-reference, +//! call-by-mutable-reference, and call-by-value. The most common use of these +//! traits is to act as bounds to higher-level functions that take functions or +//! closures as arguments. +//! +//! Taking a [`Fn`] as a parameter: +//! +//! ```rust +//! fn call_with_one(func: F) -> usize +//! where F: Fn(usize) -> usize +//! { +//! func(1) +//! } +//! +//! let double = |x| x * 2; +//! assert_eq!(call_with_one(double), 2); +//! ``` +//! +//! Taking a [`FnMut`] as a parameter: +//! +//! ```rust +//! fn do_twice(mut func: F) +//! where F: FnMut() +//! { +//! func(); +//! func(); +//! } +//! +//! let mut x: usize = 1; +//! { +//! let add_two_to_x = || x += 2; +//! do_twice(add_two_to_x); +//! } +//! +//! assert_eq!(x, 5); +//! ``` +//! +//! Taking a [`FnOnce`] as a parameter: +//! +//! ```rust +//! fn consume_with_relish(func: F) +//! where F: FnOnce() -> String +//! { +//! // `func` consumes its captured variables, so it cannot be run more +//! // than once +//! println!("Consumed: {}", func()); +//! +//! println!("Delicious!"); +//! +//! // Attempting to invoke `func()` again will throw a `use of moved +//! // value` error for `func` +//! } +//! +//! let x = String::from("x"); +//! let consume_and_return_x = move || x; +//! consume_with_relish(consume_and_return_x); +//! +//! // `consume_and_return_x` can no longer be invoked at this point +//! ``` +//! +//! [`Fn`]: trait.Fn.html +//! [`FnMut`]: trait.FnMut.html +//! [`FnOnce`]: trait.FnOnce.html +//! [`Add`]: trait.Add.html +//! [`Sub`]: trait.Sub.html +//! [`clone()`]: ../clone/trait.Clone.html#tymethod.clone #![stable(feature = "rust1", since = "1.0.0")] -use marker::{Sized, Unsize}; use fmt; +use marker::Unsize; /// The `Drop` trait is used to run some code when a value goes out of scope. /// This is sometimes called a 'destructor'. @@ -95,87 +175,71 @@ use fmt; #[stable(feature = "rust1", since = "1.0.0")] pub trait Drop { /// A method called when the value goes out of scope. + /// + /// When this method has been called, `self` has not yet been deallocated. + /// If it were, `self` would be a dangling reference. + /// + /// After this function is over, the memory of `self` will be deallocated. + /// + /// This function cannot be called explicitly. This is compiler error + /// [E0040]. However, the [`std::mem::drop`] function in the prelude can be + /// used to call the argument's `Drop` implementation. + /// + /// [E0040]: ../../error-index.html#E0040 + /// [`std::mem::drop`]: ../../std/mem/fn.drop.html + /// + /// # Panics + /// + /// Given that a `panic!` will call `drop()` as it unwinds, any `panic!` in + /// a `drop()` implementation will likely abort. #[stable(feature = "rust1", since = "1.0.0")] fn drop(&mut self); } -// implements the unary operator "op &T" -// based on "op T" where T is expected to be `Copy`able -macro_rules! forward_ref_unop { - (impl $imp:ident, $method:ident for $t:ty) => { - #[stable(feature = "rust1", since = "1.0.0")] - impl<'a> $imp for &'a $t { - type Output = <$t as $imp>::Output; - - #[inline] - fn $method(self) -> <$t as $imp>::Output { - $imp::$method(*self) - } - } - } -} - -// implements binary operators "&T op U", "T op &U", "&T op &U" -// based on "T op U" where T and U are expected to be `Copy`able -macro_rules! forward_ref_binop { - (impl $imp:ident, $method:ident for $t:ty, $u:ty) => { - #[stable(feature = "rust1", since = "1.0.0")] - impl<'a> $imp<$u> for &'a $t { - type Output = <$t as $imp<$u>>::Output; - - #[inline] - fn $method(self, other: $u) -> <$t as $imp<$u>>::Output { - $imp::$method(*self, other) - } - } - - #[stable(feature = "rust1", since = "1.0.0")] - impl<'a> $imp<&'a $u> for $t { - type Output = <$t as $imp<$u>>::Output; - - #[inline] - fn $method(self, other: &'a $u) -> <$t as $imp<$u>>::Output { - $imp::$method(self, *other) - } - } - - #[stable(feature = "rust1", since = "1.0.0")] - impl<'a, 'b> $imp<&'a $u> for &'b $t { - type Output = <$t as $imp<$u>>::Output; - - #[inline] - fn $method(self, other: &'a $u) -> <$t as $imp<$u>>::Output { - $imp::$method(*self, *other) - } - } - } -} - /// The `Add` trait is used to specify the functionality of `+`. /// /// # Examples /// -/// A trivial implementation of `Add`. When `Foo + Foo` happens, it ends up -/// calling `add`, and therefore, `main` prints `Adding!`. +/// This example creates a `Point` struct that implements the `Add` trait, and +/// then demonstrates adding two `Point`s. /// /// ``` /// use std::ops::Add; /// -/// struct Foo; +/// #[derive(Debug)] +/// struct Point { +/// x: i32, +/// y: i32, +/// } +/// +/// impl Add for Point { +/// type Output = Point; /// -/// impl Add for Foo { -/// type Output = Foo; +/// fn add(self, other: Point) -> Point { +/// Point { +/// x: self.x + other.x, +/// y: self.y + other.y, +/// } +/// } +/// } /// -/// fn add(self, _rhs: Foo) -> Foo { -/// println!("Adding!"); -/// self +/// impl PartialEq for Point { +/// fn eq(&self, other: &Self) -> bool { +/// self.x == other.x && self.y == other.y /// } /// } /// /// fn main() { -/// Foo + Foo; +/// assert_eq!(Point { x: 1, y: 0 } + Point { x: 2, y: 3 }, +/// Point { x: 3, y: 3 }); /// } /// ``` +/// +/// Note that `RHS = Self` by default, but this is not mandatory. For example, +/// [std::time::SystemTime] implements `Add`, which permits +/// operations of the form `SystemTime = SystemTime + Duration`. +/// +/// [std::time::SystemTime]: ../../std/time/struct.SystemTime.html #[lang = "add"] #[stable(feature = "rust1", since = "1.0.0")] pub trait Add { @@ -195,6 +259,7 @@ macro_rules! add_impl { type Output = $t; #[inline] + #[rustc_inherit_overflow_checks] fn add(self, other: $t) -> $t { self + other } } @@ -208,27 +273,46 @@ add_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// /// # Examples /// -/// A trivial implementation of `Sub`. When `Foo - Foo` happens, it ends up -/// calling `sub`, and therefore, `main` prints `Subtracting!`. +/// This example creates a `Point` struct that implements the `Sub` trait, and +/// then demonstrates subtracting two `Point`s. /// /// ``` /// use std::ops::Sub; /// -/// struct Foo; +/// #[derive(Debug)] +/// struct Point { +/// x: i32, +/// y: i32, +/// } +/// +/// impl Sub for Point { +/// type Output = Point; /// -/// impl Sub for Foo { -/// type Output = Foo; +/// fn sub(self, other: Point) -> Point { +/// Point { +/// x: self.x - other.x, +/// y: self.y - other.y, +/// } +/// } +/// } /// -/// fn sub(self, _rhs: Foo) -> Foo { -/// println!("Subtracting!"); -/// self +/// impl PartialEq for Point { +/// fn eq(&self, other: &Self) -> bool { +/// self.x == other.x && self.y == other.y /// } /// } /// /// fn main() { -/// Foo - Foo; +/// assert_eq!(Point { x: 3, y: 3 } - Point { x: 2, y: 3 }, +/// Point { x: 1, y: 0 }); /// } /// ``` +/// +/// Note that `RHS = Self` by default, but this is not mandatory. For example, +/// [std::time::SystemTime] implements `Sub`, which permits +/// operations of the form `SystemTime = SystemTime - Duration`. +/// +/// [std::time::SystemTime]: ../../std/time/struct.SystemTime.html #[lang = "sub"] #[stable(feature = "rust1", since = "1.0.0")] pub trait Sub { @@ -248,6 +332,7 @@ macro_rules! sub_impl { type Output = $t; #[inline] + #[rustc_inherit_overflow_checks] fn sub(self, other: $t) -> $t { self - other } } @@ -261,26 +346,94 @@ sub_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// /// # Examples /// -/// A trivial implementation of `Mul`. When `Foo * Foo` happens, it ends up -/// calling `mul`, and therefore, `main` prints `Multiplying!`. +/// Implementing a `Mul`tipliable rational number struct: /// /// ``` /// use std::ops::Mul; /// -/// struct Foo; +/// // The uniqueness of rational numbers in lowest terms is a consequence of +/// // the fundamental theorem of arithmetic. +/// #[derive(Eq)] +/// #[derive(PartialEq, Debug)] +/// struct Rational { +/// nominator: usize, +/// denominator: usize, +/// } +/// +/// impl Rational { +/// fn new(nominator: usize, denominator: usize) -> Self { +/// if denominator == 0 { +/// panic!("Zero is an invalid denominator!"); +/// } +/// +/// // Reduce to lowest terms by dividing by the greatest common +/// // divisor. +/// let gcd = gcd(nominator, denominator); +/// Rational { +/// nominator: nominator / gcd, +/// denominator: denominator / gcd, +/// } +/// } +/// } /// -/// impl Mul for Foo { -/// type Output = Foo; +/// impl Mul for Rational { +/// // The multiplication of rational numbers is a closed operation. +/// type Output = Self; /// -/// fn mul(self, _rhs: Foo) -> Foo { -/// println!("Multiplying!"); -/// self +/// fn mul(self, rhs: Self) -> Self { +/// let nominator = self.nominator * rhs.nominator; +/// let denominator = self.denominator * rhs.denominator; +/// Rational::new(nominator, denominator) /// } /// } /// -/// fn main() { -/// Foo * Foo; +/// // Euclid's two-thousand-year-old algorithm for finding the greatest common +/// // divisor. +/// fn gcd(x: usize, y: usize) -> usize { +/// let mut x = x; +/// let mut y = y; +/// while y != 0 { +/// let t = y; +/// y = x % y; +/// x = t; +/// } +/// x +/// } +/// +/// assert_eq!(Rational::new(1, 2), Rational::new(2, 4)); +/// assert_eq!(Rational::new(2, 3) * Rational::new(3, 4), +/// Rational::new(1, 2)); +/// ``` +/// +/// Note that `RHS = Self` by default, but this is not mandatory. Here is an +/// implementation which enables multiplication of vectors by scalars, as is +/// done in linear algebra. +/// +/// ``` +/// use std::ops::Mul; +/// +/// struct Scalar {value: usize}; +/// +/// #[derive(Debug)] +/// struct Vector {value: Vec}; +/// +/// impl Mul for Scalar { +/// type Output = Vector; +/// +/// fn mul(self, rhs: Vector) -> Vector { +/// Vector {value: rhs.value.iter().map(|v| self.value * v).collect()} +/// } /// } +/// +/// impl PartialEq for Vector { +/// fn eq(&self, other: &Self) -> bool { +/// self.value == other.value +/// } +/// } +/// +/// let scalar = Scalar{value: 3}; +/// let vector = Vector{value: vec![2, 4, 6]}; +/// assert_eq!(scalar * vector, Vector{value: vec![6, 12, 18]}); /// ``` #[lang = "mul"] #[stable(feature = "rust1", since = "1.0.0")] @@ -301,6 +454,7 @@ macro_rules! mul_impl { type Output = $t; #[inline] + #[rustc_inherit_overflow_checks] fn mul(self, other: $t) -> $t { self * other } } @@ -314,26 +468,100 @@ mul_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// /// # Examples /// -/// A trivial implementation of `Div`. When `Foo / Foo` happens, it ends up -/// calling `div`, and therefore, `main` prints `Dividing!`. +/// Implementing a `Div`idable rational number struct: /// /// ``` /// use std::ops::Div; /// -/// struct Foo; +/// // The uniqueness of rational numbers in lowest terms is a consequence of +/// // the fundamental theorem of arithmetic. +/// #[derive(Eq)] +/// #[derive(PartialEq, Debug)] +/// struct Rational { +/// nominator: usize, +/// denominator: usize, +/// } /// -/// impl Div for Foo { -/// type Output = Foo; +/// impl Rational { +/// fn new(nominator: usize, denominator: usize) -> Self { +/// if denominator == 0 { +/// panic!("Zero is an invalid denominator!"); +/// } +/// +/// // Reduce to lowest terms by dividing by the greatest common +/// // divisor. +/// let gcd = gcd(nominator, denominator); +/// Rational { +/// nominator: nominator / gcd, +/// denominator: denominator / gcd, +/// } +/// } +/// } /// -/// fn div(self, _rhs: Foo) -> Foo { -/// println!("Dividing!"); -/// self +/// impl Div for Rational { +/// // The division of rational numbers is a closed operation. +/// type Output = Self; +/// +/// fn div(self, rhs: Self) -> Self { +/// if rhs.nominator == 0 { +/// panic!("Cannot divide by zero-valued `Rational`!"); +/// } +/// +/// let nominator = self.nominator * rhs.denominator; +/// let denominator = self.denominator * rhs.nominator; +/// Rational::new(nominator, denominator) +/// } +/// } +/// +/// // Euclid's two-thousand-year-old algorithm for finding the greatest common +/// // divisor. +/// fn gcd(x: usize, y: usize) -> usize { +/// let mut x = x; +/// let mut y = y; +/// while y != 0 { +/// let t = y; +/// y = x % y; +/// x = t; /// } +/// x /// } /// /// fn main() { -/// Foo / Foo; +/// assert_eq!(Rational::new(1, 2), Rational::new(2, 4)); +/// assert_eq!(Rational::new(1, 2) / Rational::new(3, 4), +/// Rational::new(2, 3)); +/// } +/// ``` +/// +/// Note that `RHS = Self` by default, but this is not mandatory. Here is an +/// implementation which enables division of vectors by scalars, as is done in +/// linear algebra. +/// +/// ``` +/// use std::ops::Div; +/// +/// struct Scalar {value: f32}; +/// +/// #[derive(Debug)] +/// struct Vector {value: Vec}; +/// +/// impl Div for Vector { +/// type Output = Vector; +/// +/// fn div(self, rhs: Scalar) -> Vector { +/// Vector {value: self.value.iter().map(|v| v / rhs.value).collect()} +/// } /// } +/// +/// impl PartialEq for Vector { +/// fn eq(&self, other: &Self) -> bool { +/// self.value == other.value +/// } +/// } +/// +/// let scalar = Scalar{value: 2f32}; +/// let vector = Vector{value: vec![2f32, 4f32, 6f32]}; +/// assert_eq!(vector / scalar, Vector{value: vec![1f32, 2f32, 3f32]}); /// ``` #[lang = "div"] #[stable(feature = "rust1", since = "1.0.0")] @@ -385,26 +613,34 @@ div_impl_float! { f32 f64 } /// /// # Examples /// -/// A trivial implementation of `Rem`. When `Foo % Foo` happens, it ends up -/// calling `rem`, and therefore, `main` prints `Remainder-ing!`. +/// This example implements `Rem` on a `SplitSlice` object. After `Rem` is +/// implemented, one can use the `%` operator to find out what the remaining +/// elements of the slice would be after splitting it into equal slices of a +/// given length. /// /// ``` /// use std::ops::Rem; /// -/// struct Foo; +/// #[derive(PartialEq, Debug)] +/// struct SplitSlice<'a, T: 'a> { +/// slice: &'a [T], +/// } /// -/// impl Rem for Foo { -/// type Output = Foo; +/// impl<'a, T> Rem for SplitSlice<'a, T> { +/// type Output = SplitSlice<'a, T>; /// -/// fn rem(self, _rhs: Foo) -> Foo { -/// println!("Remainder-ing!"); -/// self +/// fn rem(self, modulus: usize) -> Self { +/// let len = self.slice.len(); +/// let rem = len % modulus; +/// let start = len - rem; +/// SplitSlice {slice: &self.slice[start..]} /// } /// } /// -/// fn main() { -/// Foo % Foo; -/// } +/// // If we were to divide &[0, 1, 2, 3, 4, 5, 6, 7] into slices of size 3, +/// // the remainder would be &[6, 7] +/// assert_eq!(SplitSlice { slice: &[0, 1, 2, 3, 4, 5, 6, 7] } % 3, +/// SplitSlice { slice: &[6, 7] }); /// ``` #[lang = "rem"] #[stable(feature = "rust1", since = "1.0.0")] @@ -456,26 +692,37 @@ rem_impl_float! { f32 f64 } /// /// # Examples /// -/// A trivial implementation of `Neg`. When `-Foo` happens, it ends up calling -/// `neg`, and therefore, `main` prints `Negating!`. +/// An implementation of `Neg` for `Sign`, which allows the use of `-` to +/// negate its value. /// /// ``` /// use std::ops::Neg; /// -/// struct Foo; +/// #[derive(Debug, PartialEq)] +/// enum Sign { +/// Negative, +/// Zero, +/// Positive, +/// } /// -/// impl Neg for Foo { -/// type Output = Foo; +/// impl Neg for Sign { +/// type Output = Sign; /// -/// fn neg(self) -> Foo { -/// println!("Negating!"); -/// self +/// fn neg(self) -> Sign { +/// match self { +/// Sign::Negative => Sign::Positive, +/// Sign::Zero => Sign::Zero, +/// Sign::Positive => Sign::Negative, +/// } /// } /// } /// -/// fn main() { -/// -Foo; -/// } +/// // a negative positive is a negative +/// assert_eq!(-Sign::Positive, Sign::Negative); +/// // a double negative is a positive +/// assert_eq!(-Sign::Negative, Sign::Positive); +/// // zero is its own negation +/// assert_eq!(-Sign::Zero, Sign::Zero); /// ``` #[lang = "neg"] #[stable(feature = "rust1", since = "1.0.0")] @@ -498,6 +745,7 @@ macro_rules! neg_impl_core { type Output = $t; #[inline] + #[rustc_inherit_overflow_checks] fn neg(self) -> $t { let $id = self; $body } } @@ -523,26 +771,31 @@ neg_impl_numeric! { isize i8 i16 i32 i64 f32 f64 } /// /// # Examples /// -/// A trivial implementation of `Not`. When `!Foo` happens, it ends up calling -/// `not`, and therefore, `main` prints `Not-ing!`. +/// An implementation of `Not` for `Answer`, which enables the use of `!` to +/// invert its value. /// /// ``` /// use std::ops::Not; /// -/// struct Foo; +/// #[derive(Debug, PartialEq)] +/// enum Answer { +/// Yes, +/// No, +/// } /// -/// impl Not for Foo { -/// type Output = Foo; +/// impl Not for Answer { +/// type Output = Answer; /// -/// fn not(self) -> Foo { -/// println!("Not-ing!"); -/// self +/// fn not(self) -> Answer { +/// match self { +/// Answer::Yes => Answer::No, +/// Answer::No => Answer::Yes +/// } /// } /// } /// -/// fn main() { -/// !Foo; -/// } +/// assert_eq!(!Answer::Yes, Answer::No); +/// assert_eq!(!Answer::No, Answer::Yes); /// ``` #[lang = "not"] #[stable(feature = "rust1", since = "1.0.0")] @@ -576,25 +829,55 @@ not_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// /// # Examples /// -/// A trivial implementation of `BitAnd`. When `Foo & Foo` happens, it ends up -/// calling `bitand`, and therefore, `main` prints `Bitwise And-ing!`. +/// In this example, the `&` operator is lifted to a trivial `Scalar` type. /// /// ``` /// use std::ops::BitAnd; /// -/// struct Foo; +/// #[derive(Debug, PartialEq)] +/// struct Scalar(bool); /// -/// impl BitAnd for Foo { -/// type Output = Foo; +/// impl BitAnd for Scalar { +/// type Output = Self; /// -/// fn bitand(self, _rhs: Foo) -> Foo { -/// println!("Bitwise And-ing!"); -/// self +/// // rhs is the "right-hand side" of the expression `a & b` +/// fn bitand(self, rhs: Self) -> Self { +/// Scalar(self.0 & rhs.0) /// } /// } /// /// fn main() { -/// Foo & Foo; +/// assert_eq!(Scalar(true) & Scalar(true), Scalar(true)); +/// assert_eq!(Scalar(true) & Scalar(false), Scalar(false)); +/// assert_eq!(Scalar(false) & Scalar(true), Scalar(false)); +/// assert_eq!(Scalar(false) & Scalar(false), Scalar(false)); +/// } +/// ``` +/// +/// In this example, the `BitAnd` trait is implemented for a `BooleanVector` +/// struct. +/// +/// ``` +/// use std::ops::BitAnd; +/// +/// #[derive(Debug, PartialEq)] +/// struct BooleanVector(Vec); +/// +/// impl BitAnd for BooleanVector { +/// type Output = Self; +/// +/// fn bitand(self, BooleanVector(rhs): Self) -> Self { +/// let BooleanVector(lhs) = self; +/// assert_eq!(lhs.len(), rhs.len()); +/// BooleanVector(lhs.iter().zip(rhs.iter()).map(|(x, y)| *x && *y).collect()) +/// } +/// } +/// +/// fn main() { +/// let bv1 = BooleanVector(vec![true, true, false, false]); +/// let bv2 = BooleanVector(vec![true, false, true, false]); +/// let expected = BooleanVector(vec![true, false, false, false]); +/// assert_eq!(bv1 & bv2, expected); /// } /// ``` #[lang = "bitand"] @@ -629,25 +912,55 @@ bitand_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// /// # Examples /// -/// A trivial implementation of `BitOr`. When `Foo | Foo` happens, it ends up -/// calling `bitor`, and therefore, `main` prints `Bitwise Or-ing!`. +/// In this example, the `|` operator is lifted to a trivial `Scalar` type. /// /// ``` /// use std::ops::BitOr; /// -/// struct Foo; +/// #[derive(Debug, PartialEq)] +/// struct Scalar(bool); /// -/// impl BitOr for Foo { -/// type Output = Foo; +/// impl BitOr for Scalar { +/// type Output = Self; /// -/// fn bitor(self, _rhs: Foo) -> Foo { -/// println!("Bitwise Or-ing!"); -/// self +/// // rhs is the "right-hand side" of the expression `a | b` +/// fn bitor(self, rhs: Self) -> Self { +/// Scalar(self.0 | rhs.0) +/// } +/// } +/// +/// fn main() { +/// assert_eq!(Scalar(true) | Scalar(true), Scalar(true)); +/// assert_eq!(Scalar(true) | Scalar(false), Scalar(true)); +/// assert_eq!(Scalar(false) | Scalar(true), Scalar(true)); +/// assert_eq!(Scalar(false) | Scalar(false), Scalar(false)); +/// } +/// ``` +/// +/// In this example, the `BitOr` trait is implemented for a `BooleanVector` +/// struct. +/// +/// ``` +/// use std::ops::BitOr; +/// +/// #[derive(Debug, PartialEq)] +/// struct BooleanVector(Vec); +/// +/// impl BitOr for BooleanVector { +/// type Output = Self; +/// +/// fn bitor(self, BooleanVector(rhs): Self) -> Self { +/// let BooleanVector(lhs) = self; +/// assert_eq!(lhs.len(), rhs.len()); +/// BooleanVector(lhs.iter().zip(rhs.iter()).map(|(x, y)| *x || *y).collect()) /// } /// } /// /// fn main() { -/// Foo | Foo; +/// let bv1 = BooleanVector(vec![true, true, false, false]); +/// let bv2 = BooleanVector(vec![true, false, true, false]); +/// let expected = BooleanVector(vec![true, true, true, false]); +/// assert_eq!(bv1 | bv2, expected); /// } /// ``` #[lang = "bitor"] @@ -682,25 +995,58 @@ bitor_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// /// # Examples /// -/// A trivial implementation of `BitXor`. When `Foo ^ Foo` happens, it ends up -/// calling `bitxor`, and therefore, `main` prints `Bitwise Xor-ing!`. +/// In this example, the `^` operator is lifted to a trivial `Scalar` type. /// /// ``` /// use std::ops::BitXor; /// -/// struct Foo; +/// #[derive(Debug, PartialEq)] +/// struct Scalar(bool); /// -/// impl BitXor for Foo { -/// type Output = Foo; +/// impl BitXor for Scalar { +/// type Output = Self; /// -/// fn bitxor(self, _rhs: Foo) -> Foo { -/// println!("Bitwise Xor-ing!"); -/// self +/// // rhs is the "right-hand side" of the expression `a ^ b` +/// fn bitxor(self, rhs: Self) -> Self { +/// Scalar(self.0 ^ rhs.0) /// } /// } /// /// fn main() { -/// Foo ^ Foo; +/// assert_eq!(Scalar(true) ^ Scalar(true), Scalar(false)); +/// assert_eq!(Scalar(true) ^ Scalar(false), Scalar(true)); +/// assert_eq!(Scalar(false) ^ Scalar(true), Scalar(true)); +/// assert_eq!(Scalar(false) ^ Scalar(false), Scalar(false)); +/// } +/// ``` +/// +/// In this example, the `BitXor` trait is implemented for a `BooleanVector` +/// struct. +/// +/// ``` +/// use std::ops::BitXor; +/// +/// #[derive(Debug, PartialEq)] +/// struct BooleanVector(Vec); +/// +/// impl BitXor for BooleanVector { +/// type Output = Self; +/// +/// fn bitxor(self, BooleanVector(rhs): Self) -> Self { +/// let BooleanVector(lhs) = self; +/// assert_eq!(lhs.len(), rhs.len()); +/// BooleanVector(lhs.iter() +/// .zip(rhs.iter()) +/// .map(|(x, y)| (*x || *y) && !(*x && *y)) +/// .collect()) +/// } +/// } +/// +/// fn main() { +/// let bv1 = BooleanVector(vec![true, true, false, false]); +/// let bv2 = BooleanVector(vec![true, false, true, false]); +/// let expected = BooleanVector(vec![false, true, true, false]); +/// assert_eq!(bv1 ^ bv2, expected); /// } /// ``` #[lang = "bitxor"] @@ -735,25 +1081,54 @@ bitxor_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// /// # Examples /// -/// A trivial implementation of `Shl`. When `Foo << Foo` happens, it ends up -/// calling `shl`, and therefore, `main` prints `Shifting left!`. +/// An implementation of `Shl` that lifts the `<<` operation on integers to a +/// `Scalar` struct. /// /// ``` /// use std::ops::Shl; /// -/// struct Foo; +/// #[derive(PartialEq, Debug)] +/// struct Scalar(usize); /// -/// impl Shl for Foo { -/// type Output = Foo; +/// impl Shl for Scalar { +/// type Output = Self; /// -/// fn shl(self, _rhs: Foo) -> Foo { -/// println!("Shifting left!"); -/// self +/// fn shl(self, Scalar(rhs): Self) -> Scalar { +/// let Scalar(lhs) = self; +/// Scalar(lhs << rhs) +/// } +/// } +/// fn main() { +/// assert_eq!(Scalar(4) << Scalar(2), Scalar(16)); +/// } +/// ``` +/// +/// An implementation of `Shl` that spins a vector leftward by a given amount. +/// +/// ``` +/// use std::ops::Shl; +/// +/// #[derive(PartialEq, Debug)] +/// struct SpinVector { +/// vec: Vec, +/// } +/// +/// impl Shl for SpinVector { +/// type Output = Self; +/// +/// fn shl(self, rhs: usize) -> SpinVector { +/// // rotate the vector by `rhs` places +/// let (a, b) = self.vec.split_at(rhs); +/// let mut spun_vector: Vec = vec![]; +/// spun_vector.extend_from_slice(b); +/// spun_vector.extend_from_slice(a); +/// SpinVector { vec: spun_vector } /// } /// } /// /// fn main() { -/// Foo << Foo; +/// assert_eq!(SpinVector { vec: vec![0, 1, 2, 3, 4] } << 2, +/// SpinVector { vec: vec![2, 3, 4, 0, 1] }); /// } /// ``` #[lang = "shl"] @@ -775,6 +1150,7 @@ macro_rules! shl_impl { type Output = $t; #[inline] + #[rustc_inherit_overflow_checks] fn shl(self, other: $f) -> $t { self << other } @@ -806,25 +1182,54 @@ shl_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize } /// /// # Examples /// -/// A trivial implementation of `Shr`. When `Foo >> Foo` happens, it ends up -/// calling `shr`, and therefore, `main` prints `Shifting right!`. +/// An implementation of `Shr` that lifts the `>>` operation on integers to a +/// `Scalar` struct. /// /// ``` /// use std::ops::Shr; /// -/// struct Foo; +/// #[derive(PartialEq, Debug)] +/// struct Scalar(usize); /// -/// impl Shr for Foo { -/// type Output = Foo; +/// impl Shr for Scalar { +/// type Output = Self; /// -/// fn shr(self, _rhs: Foo) -> Foo { -/// println!("Shifting right!"); -/// self +/// fn shr(self, Scalar(rhs): Self) -> Scalar { +/// let Scalar(lhs) = self; +/// Scalar(lhs >> rhs) +/// } +/// } +/// fn main() { +/// assert_eq!(Scalar(16) >> Scalar(2), Scalar(4)); +/// } +/// ``` +/// +/// An implementation of `Shr` that spins a vector rightward by a given amount. +/// +/// ``` +/// use std::ops::Shr; +/// +/// #[derive(PartialEq, Debug)] +/// struct SpinVector { +/// vec: Vec, +/// } +/// +/// impl Shr for SpinVector { +/// type Output = Self; +/// +/// fn shr(self, rhs: usize) -> SpinVector { +/// // rotate the vector by `rhs` places +/// let (a, b) = self.vec.split_at(self.vec.len() - rhs); +/// let mut spun_vector: Vec = vec![]; +/// spun_vector.extend_from_slice(b); +/// spun_vector.extend_from_slice(a); +/// SpinVector { vec: spun_vector } /// } /// } /// /// fn main() { -/// Foo >> Foo; +/// assert_eq!(SpinVector { vec: vec![0, 1, 2, 3, 4] } >> 2, +/// SpinVector { vec: vec![3, 4, 0, 1, 2] }); /// } /// ``` #[lang = "shr"] @@ -846,6 +1251,7 @@ macro_rules! shr_impl { type Output = $t; #[inline] + #[rustc_inherit_overflow_checks] fn shr(self, other: $f) -> $t { self >> other } @@ -877,41 +1283,51 @@ shr_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize } /// /// # Examples /// -/// A trivial implementation of `AddAssign`. When `Foo += Foo` happens, it ends up -/// calling `add_assign`, and therefore, `main` prints `Adding!`. +/// This example creates a `Point` struct that implements the `AddAssign` +/// trait, and then demonstrates add-assigning to a mutable `Point`. /// /// ``` -/// #![feature(augmented_assignments)] -/// #![feature(op_assign_traits)] -/// /// use std::ops::AddAssign; /// -/// struct Foo; +/// #[derive(Debug)] +/// struct Point { +/// x: i32, +/// y: i32, +/// } /// -/// impl AddAssign for Foo { -/// fn add_assign(&mut self, _rhs: Foo) { -/// println!("Adding!"); +/// impl AddAssign for Point { +/// fn add_assign(&mut self, other: Point) { +/// *self = Point { +/// x: self.x + other.x, +/// y: self.y + other.y, +/// }; /// } /// } /// -/// # #[allow(unused_assignments)] -/// fn main() { -/// let mut foo = Foo; -/// foo += Foo; +/// impl PartialEq for Point { +/// fn eq(&self, other: &Self) -> bool { +/// self.x == other.x && self.y == other.y +/// } /// } +/// +/// let mut point = Point { x: 1, y: 0 }; +/// point += Point { x: 2, y: 3 }; +/// assert_eq!(point, Point { x: 3, y: 3 }); /// ``` #[lang = "add_assign"] -#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] +#[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait AddAssign { /// The method for the `+=` operator + #[stable(feature = "op_assign_traits", since = "1.8.0")] fn add_assign(&mut self, Rhs); } macro_rules! add_assign_impl { ($($t:ty)+) => ($( - #[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl AddAssign for $t { #[inline] + #[rustc_inherit_overflow_checks] fn add_assign(&mut self, other: $t) { *self += other } } )+) @@ -923,41 +1339,51 @@ add_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// /// # Examples /// -/// A trivial implementation of `SubAssign`. When `Foo -= Foo` happens, it ends up -/// calling `sub_assign`, and therefore, `main` prints `Subtracting!`. +/// This example creates a `Point` struct that implements the `SubAssign` +/// trait, and then demonstrates sub-assigning to a mutable `Point`. /// /// ``` -/// #![feature(augmented_assignments)] -/// #![feature(op_assign_traits)] -/// /// use std::ops::SubAssign; /// -/// struct Foo; +/// #[derive(Debug)] +/// struct Point { +/// x: i32, +/// y: i32, +/// } /// -/// impl SubAssign for Foo { -/// fn sub_assign(&mut self, _rhs: Foo) { -/// println!("Subtracting!"); +/// impl SubAssign for Point { +/// fn sub_assign(&mut self, other: Point) { +/// *self = Point { +/// x: self.x - other.x, +/// y: self.y - other.y, +/// }; /// } /// } /// -/// # #[allow(unused_assignments)] -/// fn main() { -/// let mut foo = Foo; -/// foo -= Foo; +/// impl PartialEq for Point { +/// fn eq(&self, other: &Self) -> bool { +/// self.x == other.x && self.y == other.y +/// } /// } +/// +/// let mut point = Point { x: 3, y: 3 }; +/// point -= Point { x: 2, y: 3 }; +/// assert_eq!(point, Point {x: 1, y: 0}); /// ``` #[lang = "sub_assign"] -#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] +#[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait SubAssign { /// The method for the `-=` operator + #[stable(feature = "op_assign_traits", since = "1.8.0")] fn sub_assign(&mut self, Rhs); } macro_rules! sub_assign_impl { ($($t:ty)+) => ($( - #[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl SubAssign for $t { #[inline] + #[rustc_inherit_overflow_checks] fn sub_assign(&mut self, other: $t) { *self -= other } } )+) @@ -973,9 +1399,6 @@ sub_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// calling `mul_assign`, and therefore, `main` prints `Multiplying!`. /// /// ``` -/// #![feature(augmented_assignments)] -/// #![feature(op_assign_traits)] -/// /// use std::ops::MulAssign; /// /// struct Foo; @@ -993,17 +1416,19 @@ sub_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// } /// ``` #[lang = "mul_assign"] -#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] +#[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait MulAssign { /// The method for the `*=` operator + #[stable(feature = "op_assign_traits", since = "1.8.0")] fn mul_assign(&mut self, Rhs); } macro_rules! mul_assign_impl { ($($t:ty)+) => ($( - #[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl MulAssign for $t { #[inline] + #[rustc_inherit_overflow_checks] fn mul_assign(&mut self, other: $t) { *self *= other } } )+) @@ -1019,9 +1444,6 @@ mul_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// calling `div_assign`, and therefore, `main` prints `Dividing!`. /// /// ``` -/// #![feature(augmented_assignments)] -/// #![feature(op_assign_traits)] -/// /// use std::ops::DivAssign; /// /// struct Foo; @@ -1039,15 +1461,16 @@ mul_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// } /// ``` #[lang = "div_assign"] -#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] +#[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait DivAssign { /// The method for the `/=` operator + #[stable(feature = "op_assign_traits", since = "1.8.0")] fn div_assign(&mut self, Rhs); } macro_rules! div_assign_impl { ($($t:ty)+) => ($( - #[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl DivAssign for $t { #[inline] fn div_assign(&mut self, other: $t) { *self /= other } @@ -1065,9 +1488,6 @@ div_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// calling `rem_assign`, and therefore, `main` prints `Remainder-ing!`. /// /// ``` -/// #![feature(augmented_assignments)] -/// #![feature(op_assign_traits)] -/// /// use std::ops::RemAssign; /// /// struct Foo; @@ -1085,15 +1505,16 @@ div_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// } /// ``` #[lang = "rem_assign"] -#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] +#[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait RemAssign { /// The method for the `%=` operator + #[stable(feature = "op_assign_traits", since = "1.8.0")] fn rem_assign(&mut self, Rhs); } macro_rules! rem_assign_impl { ($($t:ty)+) => ($( - #[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl RemAssign for $t { #[inline] fn rem_assign(&mut self, other: $t) { *self %= other } @@ -1107,39 +1528,79 @@ rem_assign_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// /// # Examples /// -/// A trivial implementation of `BitAndAssign`. When `Foo &= Foo` happens, it ends up -/// calling `bitand_assign`, and therefore, `main` prints `Bitwise And-ing!`. +/// In this example, the `&=` operator is lifted to a trivial `Scalar` type. /// /// ``` -/// #![feature(augmented_assignments)] -/// #![feature(op_assign_traits)] -/// /// use std::ops::BitAndAssign; /// -/// struct Foo; +/// #[derive(Debug, PartialEq)] +/// struct Scalar(bool); /// -/// impl BitAndAssign for Foo { -/// fn bitand_assign(&mut self, _rhs: Foo) { -/// println!("Bitwise And-ing!"); +/// impl BitAndAssign for Scalar { +/// // rhs is the "right-hand side" of the expression `a &= b` +/// fn bitand_assign(&mut self, rhs: Self) { +/// *self = Scalar(self.0 & rhs.0) /// } /// } /// -/// # #[allow(unused_assignments)] /// fn main() { -/// let mut foo = Foo; -/// foo &= Foo; +/// let mut scalar = Scalar(true); +/// scalar &= Scalar(true); +/// assert_eq!(scalar, Scalar(true)); +/// +/// let mut scalar = Scalar(true); +/// scalar &= Scalar(false); +/// assert_eq!(scalar, Scalar(false)); +/// +/// let mut scalar = Scalar(false); +/// scalar &= Scalar(true); +/// assert_eq!(scalar, Scalar(false)); +/// +/// let mut scalar = Scalar(false); +/// scalar &= Scalar(false); +/// assert_eq!(scalar, Scalar(false)); +/// } +/// ``` +/// +/// In this example, the `BitAndAssign` trait is implemented for a +/// `BooleanVector` struct. +/// +/// ``` +/// use std::ops::BitAndAssign; +/// +/// #[derive(Debug, PartialEq)] +/// struct BooleanVector(Vec); +/// +/// impl BitAndAssign for BooleanVector { +/// // rhs is the "right-hand side" of the expression `a &= b` +/// fn bitand_assign(&mut self, rhs: Self) { +/// assert_eq!(self.0.len(), rhs.0.len()); +/// *self = BooleanVector(self.0 +/// .iter() +/// .zip(rhs.0.iter()) +/// .map(|(x, y)| *x && *y) +/// .collect()); +/// } +/// } +/// +/// fn main() { +/// let mut bv = BooleanVector(vec![true, true, false, false]); +/// bv &= BooleanVector(vec![true, false, true, false]); +/// let expected = BooleanVector(vec![true, false, false, false]); +/// assert_eq!(bv, expected); /// } /// ``` #[lang = "bitand_assign"] -#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] +#[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait BitAndAssign { - /// The method for the `&` operator + /// The method for the `&=` operator + #[stable(feature = "op_assign_traits", since = "1.8.0")] fn bitand_assign(&mut self, Rhs); } macro_rules! bitand_assign_impl { ($($t:ty)+) => ($( - #[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl BitAndAssign for $t { #[inline] fn bitand_assign(&mut self, other: $t) { *self &= other } @@ -1157,9 +1618,6 @@ bitand_assign_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// calling `bitor_assign`, and therefore, `main` prints `Bitwise Or-ing!`. /// /// ``` -/// #![feature(augmented_assignments)] -/// #![feature(op_assign_traits)] -/// /// use std::ops::BitOrAssign; /// /// struct Foo; @@ -1177,15 +1635,16 @@ bitand_assign_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// } /// ``` #[lang = "bitor_assign"] -#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] +#[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait BitOrAssign { /// The method for the `|=` operator + #[stable(feature = "op_assign_traits", since = "1.8.0")] fn bitor_assign(&mut self, Rhs); } macro_rules! bitor_assign_impl { ($($t:ty)+) => ($( - #[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl BitOrAssign for $t { #[inline] fn bitor_assign(&mut self, other: $t) { *self |= other } @@ -1203,9 +1662,6 @@ bitor_assign_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// calling `bitxor_assign`, and therefore, `main` prints `Bitwise Xor-ing!`. /// /// ``` -/// #![feature(augmented_assignments)] -/// #![feature(op_assign_traits)] -/// /// use std::ops::BitXorAssign; /// /// struct Foo; @@ -1223,15 +1679,16 @@ bitor_assign_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// } /// ``` #[lang = "bitxor_assign"] -#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] +#[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait BitXorAssign { /// The method for the `^=` operator + #[stable(feature = "op_assign_traits", since = "1.8.0")] fn bitxor_assign(&mut self, Rhs); } macro_rules! bitxor_assign_impl { ($($t:ty)+) => ($( - #[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl BitXorAssign for $t { #[inline] fn bitxor_assign(&mut self, other: $t) { *self ^= other } @@ -1249,9 +1706,6 @@ bitxor_assign_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// calling `shl_assign`, and therefore, `main` prints `Shifting left!`. /// /// ``` -/// #![feature(augmented_assignments)] -/// #![feature(op_assign_traits)] -/// /// use std::ops::ShlAssign; /// /// struct Foo; @@ -1269,17 +1723,19 @@ bitxor_assign_impl! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 } /// } /// ``` #[lang = "shl_assign"] -#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] +#[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait ShlAssign { /// The method for the `<<=` operator + #[stable(feature = "op_assign_traits", since = "1.8.0")] fn shl_assign(&mut self, Rhs); } macro_rules! shl_assign_impl { ($t:ty, $f:ty) => ( - #[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl ShlAssign<$f> for $t { #[inline] + #[rustc_inherit_overflow_checks] fn shl_assign(&mut self, other: $f) { *self <<= other } @@ -1313,9 +1769,6 @@ shl_assign_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize } /// calling `shr_assign`, and therefore, `main` prints `Shifting right!`. /// /// ``` -/// #![feature(augmented_assignments)] -/// #![feature(op_assign_traits)] -/// /// use std::ops::ShrAssign; /// /// struct Foo; @@ -1333,17 +1786,19 @@ shl_assign_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize } /// } /// ``` #[lang = "shr_assign"] -#[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] +#[stable(feature = "op_assign_traits", since = "1.8.0")] pub trait ShrAssign { /// The method for the `>>=` operator + #[stable(feature = "op_assign_traits", since = "1.8.0")] fn shr_assign(&mut self, Rhs); } macro_rules! shr_assign_impl { ($t:ty, $f:ty) => ( - #[unstable(feature = "op_assign_traits", reason = "recently added", issue = "28235")] + #[stable(feature = "op_assign_traits", since = "1.8.0")] impl ShrAssign<$f> for $t { #[inline] + #[rustc_inherit_overflow_checks] fn shr_assign(&mut self, other: $f) { *self >>= other } @@ -1370,32 +1825,56 @@ macro_rules! shr_assign_impl_all { shr_assign_impl_all! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize } /// The `Index` trait is used to specify the functionality of indexing operations -/// like `arr[idx]` when used in an immutable context. +/// like `container[index]` when used in an immutable context. +/// +/// `container[index]` is actually syntactic sugar for `*container.index(index)`, +/// but only when used as an immutable value. If a mutable value is requested, +/// [`IndexMut`] is used instead. This allows nice things such as +/// `let value = v[index]` if `value` implements [`Copy`]. +/// +/// [`IndexMut`]: ../../std/ops/trait.IndexMut.html +/// [`Copy`]: ../../std/marker/trait.Copy.html /// /// # Examples /// -/// A trivial implementation of `Index`. When `Foo[Bar]` happens, it ends up -/// calling `index`, and therefore, `main` prints `Indexing!`. +/// The following example implements `Index` on a read-only `NucleotideCount` +/// container, enabling individual counts to be retrieved with index syntax. /// /// ``` /// use std::ops::Index; /// -/// #[derive(Copy, Clone)] -/// struct Foo; -/// struct Bar; +/// enum Nucleotide { +/// A, +/// C, +/// G, +/// T, +/// } /// -/// impl Index for Foo { -/// type Output = Foo; +/// struct NucleotideCount { +/// a: usize, +/// c: usize, +/// g: usize, +/// t: usize, +/// } +/// +/// impl Index for NucleotideCount { +/// type Output = usize; /// -/// fn index<'a>(&'a self, _index: Bar) -> &'a Foo { -/// println!("Indexing!"); -/// self +/// fn index(&self, nucleotide: Nucleotide) -> &usize { +/// match nucleotide { +/// Nucleotide::A => &self.a, +/// Nucleotide::C => &self.c, +/// Nucleotide::G => &self.g, +/// Nucleotide::T => &self.t, +/// } /// } /// } /// -/// fn main() { -/// Foo[Bar]; -/// } +/// let nucleotide_count = NucleotideCount {a: 14, c: 9, g: 10, t: 12}; +/// assert_eq!(nucleotide_count[Nucleotide::A], 14); +/// assert_eq!(nucleotide_count[Nucleotide::C], 9); +/// assert_eq!(nucleotide_count[Nucleotide::G], 10); +/// assert_eq!(nucleotide_count[Nucleotide::T], 12); /// ``` #[lang = "index"] #[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"] @@ -1405,57 +1884,127 @@ pub trait Index { #[stable(feature = "rust1", since = "1.0.0")] type Output: ?Sized; - /// The method for the indexing (`Foo[Bar]`) operation + /// The method for the indexing (`container[index]`) operation #[stable(feature = "rust1", since = "1.0.0")] fn index(&self, index: Idx) -> &Self::Output; } /// The `IndexMut` trait is used to specify the functionality of indexing -/// operations like `arr[idx]`, when used in a mutable context. +/// operations like `container[index]` when used in a mutable context. +/// +/// `container[index]` is actually syntactic sugar for +/// `*container.index_mut(index)`, but only when used as a mutable value. If +/// an immutable value is requested, the [`Index`] trait is used instead. This +/// allows nice things such as `v[index] = value` if `value` implements [`Copy`]. +/// +/// [`Index`]: ../../std/ops/trait.Index.html +/// [`Copy`]: ../../std/marker/trait.Copy.html /// /// # Examples /// -/// A trivial implementation of `IndexMut`. When `Foo[Bar]` happens, it ends up -/// calling `index_mut`, and therefore, `main` prints `Indexing!`. +/// A very simple implementation of a `Balance` struct that has two sides, where +/// each can be indexed mutably and immutably. /// /// ``` -/// use std::ops::{Index, IndexMut}; +/// use std::ops::{Index,IndexMut}; /// -/// #[derive(Copy, Clone)] -/// struct Foo; -/// struct Bar; +/// #[derive(Debug)] +/// enum Side { +/// Left, +/// Right, +/// } +/// +/// #[derive(Debug, PartialEq)] +/// enum Weight { +/// Kilogram(f32), +/// Pound(f32), +/// } /// -/// impl Index for Foo { -/// type Output = Foo; +/// struct Balance { +/// pub left: Weight, +/// pub right:Weight, +/// } +/// +/// impl Index for Balance { +/// type Output = Weight; /// -/// fn index<'a>(&'a self, _index: Bar) -> &'a Foo { -/// self +/// fn index<'a>(&'a self, index: Side) -> &'a Weight { +/// println!("Accessing {:?}-side of balance immutably", index); +/// match index { +/// Side::Left => &self.left, +/// Side::Right => &self.right, +/// } /// } /// } /// -/// impl IndexMut for Foo { -/// fn index_mut<'a>(&'a mut self, _index: Bar) -> &'a mut Foo { -/// println!("Indexing!"); -/// self +/// impl IndexMut for Balance { +/// fn index_mut<'a>(&'a mut self, index: Side) -> &'a mut Weight { +/// println!("Accessing {:?}-side of balance mutably", index); +/// match index { +/// Side::Left => &mut self.left, +/// Side::Right => &mut self.right, +/// } /// } /// } /// /// fn main() { -/// &mut Foo[Bar]; +/// let mut balance = Balance { +/// right: Weight::Kilogram(2.5), +/// left: Weight::Pound(1.5), +/// }; +/// +/// // In this case balance[Side::Right] is sugar for +/// // *balance.index(Side::Right), since we are only reading +/// // balance[Side::Right], not writing it. +/// assert_eq!(balance[Side::Right],Weight::Kilogram(2.5)); +/// +/// // However in this case balance[Side::Left] is sugar for +/// // *balance.index_mut(Side::Left), since we are writing +/// // balance[Side::Left]. +/// balance[Side::Left] = Weight::Kilogram(3.0); /// } /// ``` #[lang = "index_mut"] #[rustc_on_unimplemented = "the type `{Self}` cannot be mutably indexed by `{Idx}`"] #[stable(feature = "rust1", since = "1.0.0")] pub trait IndexMut: Index { - /// The method for the indexing (`Foo[Bar]`) operation + /// The method for the mutable indexing (`container[index]`) operation #[stable(feature = "rust1", since = "1.0.0")] fn index_mut(&mut self, index: Idx) -> &mut Self::Output; } -/// An unbounded range. -#[derive(Copy, Clone, PartialEq, Eq)] -#[lang = "range_full"] +/// An unbounded range. Use `..` (two dots) for its shorthand. +/// +/// Its primary use case is slicing index. It cannot serve as an iterator +/// because it doesn't have a starting point. +/// +/// # Examples +/// +/// The `..` syntax is a `RangeFull`: +/// +/// ``` +/// assert_eq!((..), std::ops::RangeFull); +/// ``` +/// +/// It does not have an `IntoIterator` implementation, so you can't use it in a +/// `for` loop directly. This won't compile: +/// +/// ```ignore +/// for i in .. { +/// // ... +/// } +/// ``` +/// +/// Used as a slicing index, `RangeFull` produces the full array as a slice. +/// +/// ``` +/// let arr = [0, 1, 2, 3]; +/// assert_eq!(arr[ .. ], [0,1,2,3]); // RangeFull +/// assert_eq!(arr[ ..3], [0,1,2 ]); +/// assert_eq!(arr[1.. ], [ 1,2,3]); +/// assert_eq!(arr[1..3], [ 1,2 ]); +/// ``` +#[derive(Copy, Clone, PartialEq, Eq, Hash)] #[stable(feature = "rust1", since = "1.0.0")] pub struct RangeFull; @@ -1466,9 +2015,26 @@ impl fmt::Debug for RangeFull { } } -/// A (half-open) range which is bounded at both ends. -#[derive(Clone, PartialEq, Eq)] -#[lang = "range"] +/// A (half-open) range which is bounded at both ends: { x | start <= x < end }. +/// Use `start..end` (two dots) for its shorthand. +/// +/// See the [`contains()`](#method.contains) method for its characterization. +/// +/// # Examples +/// +/// ``` +/// fn main() { +/// assert_eq!((3..5), std::ops::Range{ start: 3, end: 5 }); +/// assert_eq!(3+4+5, (3..6).sum()); +/// +/// let arr = [0, 1, 2, 3]; +/// assert_eq!(arr[ .. ], [0,1,2,3]); +/// assert_eq!(arr[ ..3], [0,1,2 ]); +/// assert_eq!(arr[1.. ], [ 1,2,3]); +/// assert_eq!(arr[1..3], [ 1,2 ]); // Range +/// } +/// ``` +#[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186 #[stable(feature = "rust1", since = "1.0.0")] pub struct Range { /// The lower bound of the range (inclusive). @@ -1486,9 +2052,52 @@ impl fmt::Debug for Range { } } -/// A range which is only bounded below. -#[derive(Clone, PartialEq, Eq)] -#[lang = "range_from"] +#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] +impl> Range { + /// # Examples + /// + /// ``` + /// #![feature(range_contains)] + /// fn main() { + /// assert!( ! (3..5).contains(2)); + /// assert!( (3..5).contains(3)); + /// assert!( (3..5).contains(4)); + /// assert!( ! (3..5).contains(5)); + /// + /// assert!( ! (3..3).contains(3)); + /// assert!( ! (3..2).contains(3)); + /// } + /// ``` + pub fn contains(&self, item: Idx) -> bool { + (self.start <= item) && (item < self.end) + } +} + +/// A range which is only bounded below: { x | start <= x }. +/// Use `start..` for its shorthand. +/// +/// See the [`contains()`](#method.contains) method for its characterization. +/// +/// Note: Currently, no overflow checking is done for the iterator +/// implementation; if you use an integer range and the integer overflows, it +/// might panic in debug mode or create an endless loop in release mode. This +/// overflow behavior might change in the future. +/// +/// # Examples +/// +/// ``` +/// fn main() { +/// assert_eq!((2..), std::ops::RangeFrom{ start: 2 }); +/// assert_eq!(2+3+4, (2..).take(3).sum()); +/// +/// let arr = [0, 1, 2, 3]; +/// assert_eq!(arr[ .. ], [0,1,2,3]); +/// assert_eq!(arr[ ..3], [0,1,2 ]); +/// assert_eq!(arr[1.. ], [ 1,2,3]); // RangeFrom +/// assert_eq!(arr[1..3], [ 1,2 ]); +/// } +/// ``` +#[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186 #[stable(feature = "rust1", since = "1.0.0")] pub struct RangeFrom { /// The lower bound of the range (inclusive). @@ -1503,9 +2112,58 @@ impl fmt::Debug for RangeFrom { } } -/// A range which is only bounded above. -#[derive(Copy, Clone, PartialEq, Eq)] -#[lang = "range_to"] +#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] +impl> RangeFrom { + /// # Examples + /// + /// ``` + /// #![feature(range_contains)] + /// fn main() { + /// assert!( ! (3..).contains(2)); + /// assert!( (3..).contains(3)); + /// assert!( (3..).contains(1_000_000_000)); + /// } + /// ``` + pub fn contains(&self, item: Idx) -> bool { + (self.start <= item) + } +} + +/// A range which is only bounded above: { x | x < end }. +/// Use `..end` (two dots) for its shorthand. +/// +/// See the [`contains()`](#method.contains) method for its characterization. +/// +/// It cannot serve as an iterator because it doesn't have a starting point. +/// +/// # Examples +/// +/// The `..{integer}` syntax is a `RangeTo`: +/// +/// ``` +/// assert_eq!((..5), std::ops::RangeTo{ end: 5 }); +/// ``` +/// +/// It does not have an `IntoIterator` implementation, so you can't use it in a +/// `for` loop directly. This won't compile: +/// +/// ```ignore +/// for i in ..5 { +/// // ... +/// } +/// ``` +/// +/// When used as a slicing index, `RangeTo` produces a slice of all array +/// elements before the index indicated by `end`. +/// +/// ``` +/// let arr = [0, 1, 2, 3]; +/// assert_eq!(arr[ .. ], [0,1,2,3]); +/// assert_eq!(arr[ ..3], [0,1,2 ]); // RangeTo +/// assert_eq!(arr[1.. ], [ 1,2,3]); +/// assert_eq!(arr[1..3], [ 1,2 ]); +/// ``` +#[derive(Copy, Clone, PartialEq, Eq, Hash)] #[stable(feature = "rust1", since = "1.0.0")] pub struct RangeTo { /// The upper bound of the range (exclusive). @@ -1520,6 +2178,180 @@ impl fmt::Debug for RangeTo { } } +#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] +impl> RangeTo { + /// # Examples + /// + /// ``` + /// #![feature(range_contains)] + /// fn main() { + /// assert!( (..5).contains(-1_000_000_000)); + /// assert!( (..5).contains(4)); + /// assert!( ! (..5).contains(5)); + /// } + /// ``` + pub fn contains(&self, item: Idx) -> bool { + (item < self.end) + } +} + +/// An inclusive range which is bounded at both ends: { x | start <= x <= end }. +/// Use `start...end` (three dots) for its shorthand. +/// +/// See the [`contains()`](#method.contains) method for its characterization. +/// +/// # Examples +/// +/// ``` +/// #![feature(inclusive_range,inclusive_range_syntax)] +/// fn main() { +/// assert_eq!((3...5), std::ops::RangeInclusive::NonEmpty{ start: 3, end: 5 }); +/// assert_eq!(3+4+5, (3...5).sum()); +/// +/// let arr = [0, 1, 2, 3]; +/// assert_eq!(arr[ ...2], [0,1,2 ]); +/// assert_eq!(arr[1...2], [ 1,2 ]); // RangeInclusive +/// } +/// ``` +#[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186 +#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +pub enum RangeInclusive { + /// Empty range (iteration has finished) + #[unstable(feature = "inclusive_range", + reason = "recently added, follows RFC", + issue = "28237")] + Empty { + /// The point at which iteration finished + #[unstable(feature = "inclusive_range", + reason = "recently added, follows RFC", + issue = "28237")] + at: Idx + }, + /// Non-empty range (iteration will yield value(s)) + #[unstable(feature = "inclusive_range", + reason = "recently added, follows RFC", + issue = "28237")] + NonEmpty { + /// The lower bound of the range (inclusive). + #[unstable(feature = "inclusive_range", + reason = "recently added, follows RFC", + issue = "28237")] + start: Idx, + /// The upper bound of the range (inclusive). + #[unstable(feature = "inclusive_range", + reason = "recently added, follows RFC", + issue = "28237")] + end: Idx, + }, +} + +#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +impl fmt::Debug for RangeInclusive { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use self::RangeInclusive::*; + + match *self { + Empty { ref at } => write!(fmt, "[empty range @ {:?}]", at), + NonEmpty { ref start, ref end } => write!(fmt, "{:?}...{:?}", start, end), + } + } +} + +#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] +impl> RangeInclusive { + /// # Examples + /// + /// ``` + /// #![feature(range_contains,inclusive_range_syntax)] + /// fn main() { + /// assert!( ! (3...5).contains(2)); + /// assert!( (3...5).contains(3)); + /// assert!( (3...5).contains(4)); + /// assert!( (3...5).contains(5)); + /// assert!( ! (3...5).contains(6)); + /// + /// assert!( (3...3).contains(3)); + /// assert!( ! (3...2).contains(3)); + /// } + /// ``` + pub fn contains(&self, item: Idx) -> bool { + if let &RangeInclusive::NonEmpty{ref start, ref end} = self { + (*start <= item) && (item <= *end) + } else { false } + } +} + +/// An inclusive range which is only bounded above: { x | x <= end }. +/// Use `...end` (three dots) for its shorthand. +/// +/// See the [`contains()`](#method.contains) method for its characterization. +/// +/// It cannot serve as an iterator because it doesn't have a starting point. +/// +/// # Examples +/// +/// The `...{integer}` syntax is a `RangeToInclusive`: +/// +/// ``` +/// #![feature(inclusive_range,inclusive_range_syntax)] +/// assert_eq!((...5), std::ops::RangeToInclusive{ end: 5 }); +/// ``` +/// +/// It does not have an `IntoIterator` implementation, so you can't use it in a +/// `for` loop directly. This won't compile: +/// +/// ```ignore +/// for i in ...5 { +/// // ... +/// } +/// ``` +/// +/// When used as a slicing index, `RangeToInclusive` produces a slice of all +/// array elements up to and including the index indicated by `end`. +/// +/// ``` +/// #![feature(inclusive_range_syntax)] +/// let arr = [0, 1, 2, 3]; +/// assert_eq!(arr[ ...2], [0,1,2 ]); // RangeToInclusive +/// assert_eq!(arr[1...2], [ 1,2 ]); +/// ``` +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +pub struct RangeToInclusive { + /// The upper bound of the range (inclusive) + #[unstable(feature = "inclusive_range", + reason = "recently added, follows RFC", + issue = "28237")] + pub end: Idx, +} + +#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")] +impl fmt::Debug for RangeToInclusive { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "...{:?}", self.end) + } +} + +#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] +impl> RangeToInclusive { + /// # Examples + /// + /// ``` + /// #![feature(range_contains,inclusive_range_syntax)] + /// fn main() { + /// assert!( (...5).contains(-1_000_000_000)); + /// assert!( (...5).contains(5)); + /// assert!( ! (...5).contains(6)); + /// } + /// ``` + pub fn contains(&self, item: Idx) -> bool { + (item <= self.end) + } +} + +// RangeToInclusive cannot impl From> +// because underflow would be possible with (..0).into() + /// The `Deref` trait is used to specify the functionality of dereferencing /// operations, like `*v`. /// @@ -1600,13 +2432,13 @@ impl<'a, T: ?Sized> Deref for &'a mut T { /// impl Deref for DerefMutExample { /// type Target = T; /// -/// fn deref<'a>(&'a self) -> &'a T { +/// fn deref(&self) -> &T { /// &self.value /// } /// } /// /// impl DerefMut for DerefMutExample { -/// fn deref_mut<'a>(&'a mut self) -> &'a mut T { +/// fn deref_mut(&mut self) -> &mut T { /// &mut self.value /// } /// } @@ -1631,6 +2463,35 @@ impl<'a, T: ?Sized> DerefMut for &'a mut T { } /// A version of the call operator that takes an immutable receiver. +/// +/// # Examples +/// +/// Closures automatically implement this trait, which allows them to be +/// invoked. Note, however, that `Fn` takes an immutable reference to any +/// captured variables. To take a mutable capture, implement [`FnMut`], and to +/// consume the capture, implement [`FnOnce`]. +/// +/// [`FnMut`]: trait.FnMut.html +/// [`FnOnce`]: trait.FnOnce.html +/// +/// ``` +/// let square = |x| x * x; +/// assert_eq!(square(5), 25); +/// ``` +/// +/// Closures can also be passed to higher-level functions through a `Fn` +/// parameter (or a `FnMut` or `FnOnce` parameter, which are supertraits of +/// `Fn`). +/// +/// ``` +/// fn call_with_one(func: F) -> usize +/// where F: Fn(usize) -> usize { +/// func(1) +/// } +/// +/// let double = |x| x * 2; +/// assert_eq!(call_with_one(double), 2); +/// ``` #[lang = "fn"] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_paren_sugar] @@ -1642,6 +2503,40 @@ pub trait Fn : FnMut { } /// A version of the call operator that takes a mutable receiver. +/// +/// # Examples +/// +/// Closures that mutably capture variables automatically implement this trait, +/// which allows them to be invoked. +/// +/// ``` +/// let mut x = 5; +/// { +/// let mut square_x = || x *= x; +/// square_x(); +/// } +/// assert_eq!(x, 25); +/// ``` +/// +/// Closures can also be passed to higher-level functions through a `FnMut` +/// parameter (or a `FnOnce` parameter, which is a supertrait of `FnMut`). +/// +/// ``` +/// fn do_twice(mut func: F) +/// where F: FnMut() +/// { +/// func(); +/// func(); +/// } +/// +/// let mut x: usize = 1; +/// { +/// let add_two_to_x = || x += 2; +/// do_twice(add_two_to_x); +/// } +/// +/// assert_eq!(x, 5); +/// ``` #[lang = "fn_mut"] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_paren_sugar] @@ -1653,13 +2548,48 @@ pub trait FnMut : FnOnce { } /// A version of the call operator that takes a by-value receiver. +/// +/// # Examples +/// +/// By-value closures automatically implement this trait, which allows them to +/// be invoked. +/// +/// ``` +/// let x = 5; +/// let square_x = move || x * x; +/// assert_eq!(square_x(), 25); +/// ``` +/// +/// By-value Closures can also be passed to higher-level functions through a +/// `FnOnce` parameter. +/// +/// ``` +/// fn consume_with_relish(func: F) +/// where F: FnOnce() -> String +/// { +/// // `func` consumes its captured variables, so it cannot be run more +/// // than once +/// println!("Consumed: {}", func()); +/// +/// println!("Delicious!"); +/// +/// // Attempting to invoke `func()` again will throw a `use of moved +/// // value` error for `func` +/// } +/// +/// let x = String::from("x"); +/// let consume_and_return_x = move || x; +/// consume_with_relish(consume_and_return_x); +/// +/// // `consume_and_return_x` can no longer be invoked at this point +/// ``` #[lang = "fn_once"] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_paren_sugar] #[fundamental] // so that regex can rely that `&str: !FnMut` pub trait FnOnce { /// The returned type after the call operator is used. - #[unstable(feature = "fn_traits", issue = "29625")] + #[stable(feature = "fn_once_output", since = "1.12.0")] type Output; /// This is called when the call operator is used. @@ -1668,9 +2598,6 @@ pub trait FnOnce { } mod impls { - use marker::Sized; - use super::{Fn, FnMut, FnOnce}; - #[stable(feature = "rust1", since = "1.0.0")] impl<'a,A,F:?Sized> Fn for &'a F where F : Fn @@ -1875,3 +2802,74 @@ pub trait BoxPlace : Place { /// Creates a globally fresh place. fn make_place() -> Self; } + +/// A trait for types which have success and error states and are meant to work +/// with the question mark operator. +/// When the `?` operator is used with a value, whether the value is in the +/// success or error state is determined by calling `translate`. +/// +/// This trait is **very** experimental, it will probably be iterated on heavily +/// before it is stabilised. Implementors should expect change. Users of `?` +/// should not rely on any implementations of `Carrier` other than `Result`, +/// i.e., you should not expect `?` to continue to work with `Option`, etc. +#[unstable(feature = "question_mark_carrier", issue = "31436")] +pub trait Carrier { + /// The type of the value when computation succeeds. + type Success; + /// The type of the value when computation errors out. + type Error; + + /// Create a `Carrier` from a success value. + fn from_success(Self::Success) -> Self; + + /// Create a `Carrier` from an error value. + fn from_error(Self::Error) -> Self; + + /// Translate this `Carrier` to another implementation of `Carrier` with the + /// same associated types. + fn translate(self) -> T where T: Carrier; +} + +#[unstable(feature = "question_mark_carrier", issue = "31436")] +impl Carrier for Result { + type Success = U; + type Error = V; + + fn from_success(u: U) -> Result { + Ok(u) + } + + fn from_error(e: V) -> Result { + Err(e) + } + + fn translate(self) -> T + where T: Carrier + { + match self { + Ok(u) => T::from_success(u), + Err(e) => T::from_error(e), + } + } +} + +struct _DummyErrorType; + +impl Carrier for _DummyErrorType { + type Success = (); + type Error = (); + + fn from_success(_: ()) -> _DummyErrorType { + _DummyErrorType + } + + fn from_error(_: ()) -> _DummyErrorType { + _DummyErrorType + } + + fn translate(self) -> T + where T: Carrier + { + T::from_success(()) + } +} diff --git a/src/libcore/option.rs b/src/libcore/option.rs index 8d40faf3bc6ff..8871e1fa840ef 100644 --- a/src/libcore/option.rs +++ b/src/libcore/option.rs @@ -8,11 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Optional values +//! Optional values. //! -//! Type `Option` represents an optional value: every `Option` -//! is either `Some` and contains a value, or `None`, and -//! does not. `Option` types are very common in Rust code, as +//! Type [`Option`] represents an optional value: every [`Option`] +//! is either [`Some`] and contains a value, or [`None`], and +//! does not. [`Option`] types are very common in Rust code, as //! they have a number of uses: //! //! * Initial values @@ -26,8 +26,8 @@ //! * Nullable pointers //! * Swapping things out of difficult situations //! -//! Options are commonly paired with pattern matching to query the presence -//! of a value and take action, always accounting for the `None` case. +//! [`Option`]s are commonly paired with pattern matching to query the presence +//! of a value and take action, always accounting for the [`None`] case. //! //! ``` //! fn divide(numerator: f64, denominator: f64) -> Option { @@ -57,13 +57,13 @@ //! //! Rust's pointer types must always point to a valid location; there are //! no "null" pointers. Instead, Rust has *optional* pointers, like -//! the optional owned box, `Option>`. +//! the optional owned box, [`Option`]`<`[`Box`]`>`. //! -//! The following example uses `Option` to create an optional box of -//! `i32`. Notice that in order to use the inner `i32` value first the +//! The following example uses [`Option`] to create an optional box of +//! [`i32`]. Notice that in order to use the inner [`i32`] value first the //! `check_optional` function needs to use pattern matching to -//! determine whether the box has a value (i.e. it is `Some(...)`) or -//! not (`None`). +//! determine whether the box has a value (i.e. it is [`Some(...)`][`Some`]) or +//! not ([`None`]). //! //! ``` //! let optional: Option> = None; @@ -80,32 +80,28 @@ //! } //! ``` //! -//! This usage of `Option` to create safe nullable pointers is so +//! This usage of [`Option`] to create safe nullable pointers is so //! common that Rust does special optimizations to make the -//! representation of `Option>` a single pointer. Optional pointers +//! representation of [`Option`]`<`[`Box`]`>` a single pointer. Optional pointers //! in Rust are stored as efficiently as any other pointer type. //! //! # Examples //! -//! Basic pattern matching on `Option`: +//! Basic pattern matching on [`Option`]: //! //! ``` //! let msg = Some("howdy"); //! //! // Take a reference to the contained string -//! match msg { -//! Some(ref m) => println!("{}", *m), -//! None => (), +//! if let Some(ref m) = msg { +//! println!("{}", *m); //! } //! //! // Remove the contained string, destroying the Option -//! let unwrapped_msg = match msg { -//! Some(m) => m, -//! None => "default message", -//! }; +//! let unwrapped_msg = msg.unwrap_or("default message"); //! ``` //! -//! Initialize a result to `None` before a loop: +//! Initialize a result to [`None`] before a loop: //! //! ``` //! enum Kingdom { Plant(u32, &'static str), Animal(u32, &'static str) } @@ -140,20 +136,17 @@ //! None => println!("there are no animals :("), //! } //! ``` +//! +//! [`Option`]: enum.Option.html +//! [`Some`]: enum.Option.html#variant.Some +//! [`None`]: enum.Option.html#variant.None +//! [`Box`]: ../../std/boxed/struct.Box.html +//! [`i32`]: ../../std/primitive.i32.html #![stable(feature = "rust1", since = "1.0.0")] -use self::Option::*; - -use clone::Clone; -use cmp::{Eq, Ord}; -use default::Default; -use iter::ExactSizeIterator; -use iter::{Iterator, DoubleEndedIterator, FromIterator, IntoIterator}; +use iter::{FromIterator, FusedIterator, TrustedLen}; use mem; -use ops::FnOnce; -use result::Result::{Ok, Err}; -use result::Result; // Note that this is not a lang item per se, but it has a hidden dependency on // `Iterator`, which is one. The compiler assumes that the `next` method of @@ -169,7 +162,7 @@ pub enum Option { None, /// Some value `T` #[stable(feature = "rust1", since = "1.0.0")] - Some(#[cfg_attr(not(stage0), stable(feature = "rust1", since = "1.0.0"))] T) + Some(#[stable(feature = "rust1", since = "1.0.0")] T), } ///////////////////////////////////////////////////////////////////////////// @@ -181,7 +174,7 @@ impl Option { // Querying the contained values ///////////////////////////////////////////////////////////////////////// - /// Returns `true` if the option is a `Some` value + /// Returns `true` if the option is a `Some` value. /// /// # Examples /// @@ -201,7 +194,7 @@ impl Option { } } - /// Returns `true` if the option is a `None` value + /// Returns `true` if the option is a `None` value. /// /// # Examples /// @@ -222,15 +215,17 @@ impl Option { // Adapter for working with references ///////////////////////////////////////////////////////////////////////// - /// Converts from `Option` to `Option<&T>` + /// Converts from `Option` to `Option<&T>`. /// /// # Examples /// /// Convert an `Option` into an `Option`, preserving the original. - /// The `map` method takes the `self` argument by value, consuming the original, + /// The [`map`] method takes the `self` argument by value, consuming the original, /// so this technique uses `as_ref` to first take an `Option` to a reference /// to the value inside the original. /// + /// [`map`]: enum.Option.html#method.map + /// /// ``` /// let num_as_str: Option = Some("10".to_string()); /// // First, cast `Option` to `Option<&String>` with `as_ref`, @@ -247,7 +242,7 @@ impl Option { } } - /// Converts from `Option` to `Option<&mut T>` + /// Converts from `Option` to `Option<&mut T>`. /// /// # Examples /// @@ -295,22 +290,20 @@ impl Option { pub fn expect(self, msg: &str) -> T { match self { Some(val) => val, - None => panic!("{}", msg), + None => expect_failed(msg), } } /// Moves the value `v` out of the `Option` if it is `Some(v)`. /// - /// # Panics - /// - /// Panics if the self value equals `None`. - /// - /// # Safety note - /// /// In general, because this function may panic, its use is discouraged. /// Instead, prefer to use pattern matching and handle the `None` /// case explicitly. /// + /// # Panics + /// + /// Panics if the self value equals `None`. + /// /// # Examples /// /// ``` @@ -370,7 +363,7 @@ impl Option { // Transforming contained values ///////////////////////////////////////////////////////////////////////// - /// Maps an `Option` to `Option` by applying a function to a contained value + /// Maps an `Option` to `Option` by applying a function to a contained value. /// /// # Examples /// @@ -436,8 +429,12 @@ impl Option { } } - /// Transforms the `Option` into a `Result`, mapping `Some(v)` to - /// `Ok(v)` and `None` to `Err(err)`. + /// Transforms the `Option` into a [`Result`], mapping `Some(v)` to + /// [`Ok(v)`] and `None` to [`Err(err)`][Err]. + /// + /// [`Result`]: ../../std/result/enum.Result.html + /// [`Ok(v)`]: ../../std/result/enum.Result.html#variant.Ok + /// [Err]: ../../std/result/enum.Result.html#variant.Err /// /// # Examples /// @@ -457,8 +454,12 @@ impl Option { } } - /// Transforms the `Option` into a `Result`, mapping `Some(v)` to - /// `Ok(v)` and `None` to `Err(err())`. + /// Transforms the `Option` into a [`Result`], mapping `Some(v)` to + /// [`Ok(v)`] and `None` to [`Err(err())`][Err]. + /// + /// [`Result`]: ../../std/result/enum.Result.html + /// [`Ok(v)`]: ../../std/result/enum.Result.html#variant.Ok + /// [Err]: ../../std/result/enum.Result.html#variant.Err /// /// # Examples /// @@ -658,6 +659,16 @@ impl Option { impl<'a, T: Clone> Option<&'a T> { /// Maps an `Option<&T>` to an `Option` by cloning the contents of the /// option. + /// + /// # Examples + /// + /// ``` + /// let x = 12; + /// let opt_x = Some(&x); + /// assert_eq!(opt_x, Some(&12)); + /// let cloned = opt_x.cloned(); + /// assert_eq!(cloned, Some(12)); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn cloned(self) -> Option { self.map(|t| t.clone()) @@ -697,12 +708,21 @@ impl Option { } } +// This is a separate function to reduce the code size of .expect() itself. +#[inline(never)] +#[cold] +fn expect_failed(msg: &str) -> ! { + panic!("{}", msg) +} + + ///////////////////////////////////////////////////////////////////////////// // Trait implementations ///////////////////////////////////////////////////////////////////////////// #[stable(feature = "rust1", since = "1.0.0")] impl Default for Option { + /// Returns None. #[inline] fn default() -> Option { None } } @@ -751,11 +771,18 @@ impl<'a, T> IntoIterator for &'a mut Option { } } +#[stable(since = "1.12.0", feature = "option_from")] +impl From for Option { + fn from(val: T) -> Option { + Some(val) + } +} + ///////////////////////////////////////////////////////////////////////////// // The Option Iterators ///////////////////////////////////////////////////////////////////////////// -#[derive(Clone)] +#[derive(Clone, Debug)] struct Item { opt: Option } @@ -785,9 +812,14 @@ impl DoubleEndedIterator for Item { } impl ExactSizeIterator for Item {} +impl FusedIterator for Item {} +unsafe impl TrustedLen for Item {} -/// An iterator over a reference of the contained item in an Option. +/// An iterator over a reference of the contained item in an [`Option`]. +/// +/// [`Option`]: enum.Option.html #[stable(feature = "rust1", since = "1.0.0")] +#[derive(Debug)] pub struct Iter<'a, A: 'a> { inner: Item<&'a A> } #[stable(feature = "rust1", since = "1.0.0")] @@ -809,6 +841,12 @@ impl<'a, A> DoubleEndedIterator for Iter<'a, A> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, A> ExactSizeIterator for Iter<'a, A> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, A> FusedIterator for Iter<'a, A> {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, A> TrustedLen for Iter<'a, A> {} + #[stable(feature = "rust1", since = "1.0.0")] impl<'a, A> Clone for Iter<'a, A> { fn clone(&self) -> Iter<'a, A> { @@ -816,8 +854,11 @@ impl<'a, A> Clone for Iter<'a, A> { } } -/// An iterator over a mutable reference of the contained item in an Option. +/// An iterator over a mutable reference of the contained item in an [`Option`]. +/// +/// [`Option`]: enum.Option.html #[stable(feature = "rust1", since = "1.0.0")] +#[derive(Debug)] pub struct IterMut<'a, A: 'a> { inner: Item<&'a mut A> } #[stable(feature = "rust1", since = "1.0.0")] @@ -839,8 +880,15 @@ impl<'a, A> DoubleEndedIterator for IterMut<'a, A> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, A> ExactSizeIterator for IterMut<'a, A> {} -/// An iterator over the item contained inside an Option. -#[derive(Clone)] +#[unstable(feature = "fused", issue = "35602")] +impl<'a, A> FusedIterator for IterMut<'a, A> {} +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, A> TrustedLen for IterMut<'a, A> {} + +/// An iterator over the item contained inside an [`Option`]. +/// +/// [`Option`]: enum.Option.html +#[derive(Clone, Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub struct IntoIter { inner: Item } @@ -863,6 +911,12 @@ impl DoubleEndedIterator for IntoIter { #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for IntoIter {} +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for IntoIter {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for IntoIter {} + ///////////////////////////////////////////////////////////////////////////// // FromIterator ///////////////////////////////////////////////////////////////////////////// @@ -879,12 +933,12 @@ impl> FromIterator> for Option { /// ``` /// use std::u16; /// - /// let v = vec!(1, 2); + /// let v = vec![1, 2]; /// let res: Option> = v.iter().map(|&x: &u16| /// if x == u16::MAX { None } /// else { Some(x + 1) } /// ).collect(); - /// assert!(res == Some(vec!(2, 3))); + /// assert!(res == Some(vec![2, 3])); /// ``` #[inline] fn from_iter>>(iter: I) -> Option { diff --git a/src/libcore/prelude/v1.rs b/src/libcore/prelude/v1.rs index 75db6fceab9b7..3fa6a97d4cd15 100644 --- a/src/libcore/prelude/v1.rs +++ b/src/libcore/prelude/v1.rs @@ -18,36 +18,50 @@ // Reexported core operators #[stable(feature = "core_prelude", since = "1.4.0")] -#[doc(no_inline)] pub use marker::{Copy, Send, Sized, Sync}; +#[doc(no_inline)] +pub use marker::{Copy, Send, Sized, Sync}; #[stable(feature = "core_prelude", since = "1.4.0")] -#[doc(no_inline)] pub use ops::{Drop, Fn, FnMut, FnOnce}; +#[doc(no_inline)] +pub use ops::{Drop, Fn, FnMut, FnOnce}; // Reexported functions #[stable(feature = "core_prelude", since = "1.4.0")] -#[doc(no_inline)] pub use mem::drop; +#[doc(no_inline)] +pub use mem::drop; // Reexported types and traits #[stable(feature = "core_prelude", since = "1.4.0")] -#[doc(no_inline)] pub use clone::Clone; +#[doc(no_inline)] +pub use clone::Clone; #[stable(feature = "core_prelude", since = "1.4.0")] -#[doc(no_inline)] pub use cmp::{PartialEq, PartialOrd, Eq, Ord}; +#[doc(no_inline)] +pub use cmp::{PartialEq, PartialOrd, Eq, Ord}; #[stable(feature = "core_prelude", since = "1.4.0")] -#[doc(no_inline)] pub use convert::{AsRef, AsMut, Into, From}; +#[doc(no_inline)] +pub use convert::{AsRef, AsMut, Into, From}; #[stable(feature = "core_prelude", since = "1.4.0")] -#[doc(no_inline)] pub use default::Default; +#[doc(no_inline)] +pub use default::Default; #[stable(feature = "core_prelude", since = "1.4.0")] -#[doc(no_inline)] pub use iter::{Iterator, Extend, IntoIterator}; +#[doc(no_inline)] +pub use iter::{Iterator, Extend, IntoIterator}; #[stable(feature = "core_prelude", since = "1.4.0")] -#[doc(no_inline)] pub use iter::{DoubleEndedIterator, ExactSizeIterator}; +#[doc(no_inline)] +pub use iter::{DoubleEndedIterator, ExactSizeIterator}; #[stable(feature = "core_prelude", since = "1.4.0")] -#[doc(no_inline)] pub use option::Option::{self, Some, None}; +#[doc(no_inline)] +pub use option::Option::{self, Some, None}; #[stable(feature = "core_prelude", since = "1.4.0")] -#[doc(no_inline)] pub use result::Result::{self, Ok, Err}; +#[doc(no_inline)] +pub use result::Result::{self, Ok, Err}; // Reexported extension traits for primitive types #[stable(feature = "core_prelude", since = "1.4.0")] -#[doc(no_inline)] pub use slice::SliceExt; +#[doc(no_inline)] +pub use slice::SliceExt; #[stable(feature = "core_prelude", since = "1.4.0")] -#[doc(no_inline)] pub use str::StrExt; +#[doc(no_inline)] +pub use str::StrExt; #[stable(feature = "core_prelude", since = "1.4.0")] -#[doc(no_inline)] pub use char::CharExt; +#[doc(no_inline)] +pub use char::CharExt; diff --git a/src/libcore/ptr.rs b/src/libcore/ptr.rs index e1e7869d548db..2ad38de72b1b9 100644 --- a/src/libcore/ptr.rs +++ b/src/libcore/ptr.rs @@ -10,23 +10,20 @@ // FIXME: talk about offset, copy_memory, copy_nonoverlapping_memory -//! Raw, unsafe pointers, `*const T`, and `*mut T` +//! Raw, unsafe pointers, `*const T`, and `*mut T`. //! -//! *[See also the pointer primitive types](../primitive.pointer.html).* +//! *[See also the pointer primitive types](../../std/primitive.pointer.html).* #![stable(feature = "rust1", since = "1.0.0")] -use clone::Clone; use intrinsics; use ops::{CoerceUnsized, Deref}; use fmt; use hash; -use option::Option::{self, Some, None}; -use marker::{Copy, PhantomData, Send, Sized, Sync, Unsize}; +use marker::{PhantomData, Unsize}; use mem; use nonzero::NonZero; -use cmp::{PartialEq, Eq, Ord, PartialOrd}; use cmp::Ordering::{self, Less, Equal, Greater}; // FIXME #19649: intrinsic docs don't render, so these have no docs :( @@ -40,7 +37,7 @@ pub use intrinsics::copy; #[stable(feature = "rust1", since = "1.0.0")] pub use intrinsics::write_bytes; -#[unstable(feature = "drop_in_place", reason = "just exposed, needs FCP", issue = "27908")] +#[stable(feature = "drop_in_place", since = "1.8.0")] pub use intrinsics::drop_in_place; /// Creates a null raw pointer. @@ -119,6 +116,19 @@ pub unsafe fn replace(dest: *mut T, mut src: T) -> T { /// `src` is not used before the data is overwritten again (e.g. with `write`, /// `zero_memory`, or `copy_memory`). Note that `*src = foo` counts as a use /// because it will attempt to drop the value previously at `*src`. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// let x = 12; +/// let y = &x as *const i32; +/// +/// unsafe { +/// assert_eq!(std::ptr::read(y), 12); +/// } +/// ``` #[inline(always)] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn read(src: *const T) -> T { @@ -127,22 +137,6 @@ pub unsafe fn read(src: *const T) -> T { tmp } -/// Variant of read_and_zero that writes the specific drop-flag byte -/// (which may be more appropriate than zero). -#[inline(always)] -#[unstable(feature = "filling_drop", - reason = "may play a larger role in std::ptr future extensions", - issue = "5016")] -pub unsafe fn read_and_drop(dest: *mut T) -> T { - // Copy the data out from `dest`: - let tmp = read(&*dest); - - // Now mark `dest` as dropped: - write_bytes(dest, mem::POST_DROP_U8, 1); - - tmp -} - /// Overwrites a memory location with the given value without reading or /// dropping the old value. /// @@ -156,15 +150,130 @@ pub unsafe fn read_and_drop(dest: *mut T) -> T { /// /// This is appropriate for initializing uninitialized memory, or overwriting /// memory that has previously been `read` from. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// let mut x = 0; +/// let y = &mut x as *mut i32; +/// let z = 12; +/// +/// unsafe { +/// std::ptr::write(y, z); +/// assert_eq!(std::ptr::read(y), 12); +/// } +/// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn write(dst: *mut T, src: T) { intrinsics::move_val_init(&mut *dst, src) } +/// Performs a volatile read of the value from `src` without moving it. This +/// leaves the memory in `src` unchanged. +/// +/// Volatile operations are intended to act on I/O memory, and are guaranteed +/// to not be elided or reordered by the compiler across other volatile +/// operations. +/// +/// # Notes +/// +/// Rust does not currently have a rigorously and formally defined memory model, +/// so the precise semantics of what "volatile" means here is subject to change +/// over time. That being said, the semantics will almost always end up pretty +/// similar to [C11's definition of volatile][c11]. +/// +/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf +/// +/// # Safety +/// +/// Beyond accepting a raw pointer, this is unsafe because it semantically +/// moves the value out of `src` without preventing further usage of `src`. +/// If `T` is not `Copy`, then care must be taken to ensure that the value at +/// `src` is not used before the data is overwritten again (e.g. with `write`, +/// `zero_memory`, or `copy_memory`). Note that `*src = foo` counts as a use +/// because it will attempt to drop the value previously at `*src`. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// let x = 12; +/// let y = &x as *const i32; +/// +/// unsafe { +/// assert_eq!(std::ptr::read_volatile(y), 12); +/// } +/// ``` +#[inline] +#[stable(feature = "volatile", since = "1.9.0")] +pub unsafe fn read_volatile(src: *const T) -> T { + intrinsics::volatile_load(src) +} + +/// Performs a volatile write of a memory location with the given value without +/// reading or dropping the old value. +/// +/// Volatile operations are intended to act on I/O memory, and are guaranteed +/// to not be elided or reordered by the compiler across other volatile +/// operations. +/// +/// # Notes +/// +/// Rust does not currently have a rigorously and formally defined memory model, +/// so the precise semantics of what "volatile" means here is subject to change +/// over time. That being said, the semantics will almost always end up pretty +/// similar to [C11's definition of volatile][c11]. +/// +/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf +/// +/// # Safety +/// +/// This operation is marked unsafe because it accepts a raw pointer. +/// +/// It does not drop the contents of `dst`. This is safe, but it could leak +/// allocations or resources, so care must be taken not to overwrite an object +/// that should be dropped. +/// +/// This is appropriate for initializing uninitialized memory, or overwriting +/// memory that has previously been `read` from. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// let mut x = 0; +/// let y = &mut x as *mut i32; +/// let z = 12; +/// +/// unsafe { +/// std::ptr::write_volatile(y, z); +/// assert_eq!(std::ptr::read_volatile(y), 12); +/// } +/// ``` +#[inline] +#[stable(feature = "volatile", since = "1.9.0")] +pub unsafe fn write_volatile(dst: *mut T, src: T) { + intrinsics::volatile_store(dst, src); +} + #[lang = "const_ptr"] impl *const T { /// Returns true if the pointer is null. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s: &str = "Follow the rabbit"; + /// let ptr: *const u8 = s.as_ptr(); + /// assert!(!ptr.is_null()); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_null(self) -> bool where T: Sized { @@ -180,17 +289,30 @@ impl *const T { /// null-safety, it is important to note that this is still an unsafe /// operation because the returned value could be pointing to invalid /// memory. - #[unstable(feature = "ptr_as_ref", - reason = "Option is not clearly the right return type, and we \ - may want to tie the return lifetime to a borrow of \ - the raw pointer", - issue = "27780")] + /// + /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does + /// not necessarily reflect the actual lifetime of the data. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ```ignore + /// let val: *const u8 = &10u8 as *const u8; + /// + /// unsafe { + /// if let Some(val_back) = val.as_ref() { + /// println!("We got back the value: {}!", val_back); + /// } + /// } + /// ``` + #[stable(feature = "ptr_as_ref", since = "1.9.0")] #[inline] - pub unsafe fn as_ref<'a>(&self) -> Option<&'a T> where T: Sized { + pub unsafe fn as_ref<'a>(self) -> Option<&'a T> where T: Sized { if self.is_null() { None } else { - Some(&**self) + Some(&*self) } } @@ -203,16 +325,80 @@ impl *const T { /// byte past the end of an allocated object. If either pointer is out of /// bounds or arithmetic overflow occurs then /// any further use of the returned value will result in undefined behavior. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let s: &str = "123"; + /// let ptr: *const u8 = s.as_ptr(); + /// + /// unsafe { + /// println!("{}", *ptr.offset(1) as char); + /// println!("{}", *ptr.offset(2) as char); + /// } + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub unsafe fn offset(self, count: isize) -> *const T where T: Sized { intrinsics::offset(self, count) } + + /// Calculates the offset from a pointer using wrapping arithmetic. + /// `count` is in units of T; e.g. a `count` of 3 represents a pointer + /// offset of `3 * sizeof::()` bytes. + /// + /// # Safety + /// + /// The resulting pointer does not need to be in bounds, but it is + /// potentially hazardous to dereference (which requires `unsafe`). + /// + /// Always use `.offset(count)` instead when possible, because `offset` + /// allows the compiler to optimize better. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(ptr_wrapping_offset)] + /// // Iterate using a raw pointer in increments of two elements + /// let data = [1u8, 2, 3, 4, 5]; + /// let mut ptr: *const u8 = data.as_ptr(); + /// let step = 2; + /// let end_rounded_up = ptr.wrapping_offset(6); + /// + /// // This loop prints "1, 3, 5, " + /// while ptr != end_rounded_up { + /// unsafe { + /// print!("{}, ", *ptr); + /// } + /// ptr = ptr.wrapping_offset(step); + /// } + /// ``` + #[unstable(feature = "ptr_wrapping_offset", issue = "37570")] + #[inline] + pub fn wrapping_offset(self, count: isize) -> *const T where T: Sized { + unsafe { + intrinsics::arith_offset(self, count) + } + } } #[lang = "mut_ptr"] impl *mut T { /// Returns true if the pointer is null. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let mut s = [1, 2, 3]; + /// let ptr: *mut u32 = s.as_mut_ptr(); + /// assert!(!ptr.is_null()); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_null(self) -> bool where T: Sized { @@ -228,17 +414,30 @@ impl *mut T { /// null-safety, it is important to note that this is still an unsafe /// operation because the returned value could be pointing to invalid /// memory. - #[unstable(feature = "ptr_as_ref", - reason = "Option is not clearly the right return type, and we \ - may want to tie the return lifetime to a borrow of \ - the raw pointer", - issue = "27780")] + /// + /// Additionally, the lifetime `'a` returned is arbitrarily chosen and does + /// not necessarily reflect the actual lifetime of the data. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ```ignore + /// let val: *mut u8 = &mut 10u8 as *mut u8; + /// + /// unsafe { + /// if let Some(val_back) = val.as_ref() { + /// println!("We got back the value: {}!", val_back); + /// } + /// } + /// ``` + #[stable(feature = "ptr_as_ref", since = "1.9.0")] #[inline] - pub unsafe fn as_ref<'a>(&self) -> Option<&'a T> where T: Sized { + pub unsafe fn as_ref<'a>(self) -> Option<&'a T> where T: Sized { if self.is_null() { None } else { - Some(&**self) + Some(&*self) } } @@ -250,29 +449,93 @@ impl *mut T { /// The offset must be in-bounds of the object, or one-byte-past-the-end. /// Otherwise `offset` invokes Undefined Behavior, regardless of whether /// the pointer is used. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let mut s = [1, 2, 3]; + /// let ptr: *mut u32 = s.as_mut_ptr(); + /// + /// unsafe { + /// println!("{}", *ptr.offset(1)); + /// println!("{}", *ptr.offset(2)); + /// } + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub unsafe fn offset(self, count: isize) -> *mut T where T: Sized { intrinsics::offset(self, count) as *mut T } + /// Calculates the offset from a pointer using wrapping arithmetic. + /// `count` is in units of T; e.g. a `count` of 3 represents a pointer + /// offset of `3 * sizeof::()` bytes. + /// + /// # Safety + /// + /// The resulting pointer does not need to be in bounds, but it is + /// potentially hazardous to dereference (which requires `unsafe`). + /// + /// Always use `.offset(count)` instead when possible, because `offset` + /// allows the compiler to optimize better. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(ptr_wrapping_offset)] + /// // Iterate using a raw pointer in increments of two elements + /// let mut data = [1u8, 2, 3, 4, 5]; + /// let mut ptr: *mut u8 = data.as_mut_ptr(); + /// let step = 2; + /// let end_rounded_up = ptr.wrapping_offset(6); + /// + /// while ptr != end_rounded_up { + /// unsafe { + /// *ptr = 0; + /// } + /// ptr = ptr.wrapping_offset(step); + /// } + /// assert_eq!(&data, &[0, 2, 0, 4, 0]); + /// ``` + #[unstable(feature = "ptr_wrapping_offset", issue = "37570")] + #[inline] + pub fn wrapping_offset(self, count: isize) -> *mut T where T: Sized { + unsafe { + intrinsics::arith_offset(self, count) as *mut T + } + } + /// Returns `None` if the pointer is null, or else returns a mutable /// reference to the value wrapped in `Some`. /// /// # Safety /// /// As with `as_ref`, this is unsafe because it cannot verify the validity - /// of the returned pointer. - #[unstable(feature = "ptr_as_ref", - reason = "return value does not necessarily convey all possible \ - information", - issue = "27780")] + /// of the returned pointer, nor can it ensure that the lifetime `'a` + /// returned is indeed a valid lifetime for the contained data. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let mut s = [1, 2, 3]; + /// let ptr: *mut u32 = s.as_mut_ptr(); + /// let first_value = unsafe { ptr.as_mut().unwrap() }; + /// *first_value = 4; + /// println!("{:?}", s); // It'll print: "[4, 2, 3]". + /// ``` + #[stable(feature = "ptr_as_ref", since = "1.9.0")] #[inline] - pub unsafe fn as_mut<'a>(&self) -> Option<&'a mut T> where T: Sized { + pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> where T: Sized { if self.is_null() { None } else { - Some(&mut **self) + Some(&mut *self) } } } @@ -296,6 +559,40 @@ impl PartialEq for *mut T { #[stable(feature = "rust1", since = "1.0.0")] impl Eq for *mut T {} +/// Compare raw pointers for equality. +/// +/// This is the same as using the `==` operator, but less generic: +/// the arguments have to be `*const T` raw pointers, +/// not anything that implements `PartialEq`. +/// +/// This can be used to compare `&T` references (which coerce to `*const T` implicitly) +/// by their address rather than comparing the values they point to +/// (which is what the `PartialEq for &T` implementation does). +/// +/// # Examples +/// +/// ``` +/// #![feature(ptr_eq)] +/// use std::ptr; +/// +/// let five = 5; +/// let other_five = 5; +/// let five_ref = &five; +/// let same_five_ref = &five; +/// let other_five_ref = &other_five; +/// +/// assert!(five_ref == same_five_ref); +/// assert!(five_ref == other_five_ref); +/// +/// assert!(ptr::eq(five_ref, same_five_ref)); +/// assert!(!ptr::eq(five_ref, other_five_ref)); +/// ``` +#[unstable(feature = "ptr_eq", reason = "newly added", issue = "36497")] +#[inline] +pub fn eq(a: *const T, b: *const T) -> bool { + a == b +} + #[stable(feature = "rust1", since = "1.0.0")] impl Clone for *const T { #[inline] @@ -374,12 +671,21 @@ macro_rules! fnptr_impls_safety_abi { } macro_rules! fnptr_impls_args { - ($($Arg: ident),*) => { + ($($Arg: ident),+) => { fnptr_impls_safety_abi! { extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { extern "C" fn($($Arg),*) -> Ret, $($Arg),* } + fnptr_impls_safety_abi! { extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { unsafe extern "Rust" fn($($Arg),*) -> Ret, $($Arg),* } fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),*) -> Ret, $($Arg),* } - } + fnptr_impls_safety_abi! { unsafe extern "C" fn($($Arg),* , ...) -> Ret, $($Arg),* } + }; + () => { + // No variadic functions with 0 parameters + fnptr_impls_safety_abi! { extern "Rust" fn() -> Ret, } + fnptr_impls_safety_abi! { extern "C" fn() -> Ret, } + fnptr_impls_safety_abi! { unsafe extern "Rust" fn() -> Ret, } + fnptr_impls_safety_abi! { unsafe extern "C" fn() -> Ret, } + }; } fnptr_impls_args! { } @@ -465,7 +771,7 @@ impl PartialOrd for *mut T { fn ge(&self, other: &*mut T) -> bool { *self >= *other } } -/// A wrapper around a raw `*mut T` that indicates that the possessor +/// A wrapper around a raw non-null `*mut T` that indicates that the possessor /// of this wrapper owns the referent. This in turn implies that the /// `Unique` is `Send`/`Sync` if `T` is `Send`/`Sync`, unlike a raw /// `*mut T` (which conveys no particular ownership semantics). It @@ -473,6 +779,7 @@ impl PartialOrd for *mut T { /// modified without a unique path to the `Unique` reference. Useful /// for building abstractions like `Vec` or `Box`, which /// internally use raw pointers to manage the memory that they own. +#[allow(missing_debug_implementations)] #[unstable(feature = "unique", reason = "needs an RFC to flesh out design", issue = "27730")] pub struct Unique { @@ -502,6 +809,10 @@ unsafe impl Sync for Unique { } #[unstable(feature = "unique", issue = "27730")] impl Unique { /// Creates a new `Unique`. + /// + /// # Safety + /// + /// `ptr` must be non-null. pub const unsafe fn new(ptr: *mut T) -> Unique { Unique { pointer: NonZero::new(ptr), _marker: PhantomData } } @@ -530,17 +841,18 @@ impl Deref for Unique { } } -#[stable(feature = "rust1", since = "1.0.0")] +#[unstable(feature = "unique", issue = "27730")] impl fmt::Pointer for Unique { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&*self.pointer, f) } } -/// A wrapper around a raw `*mut T` that indicates that the possessor +/// A wrapper around a raw non-null `*mut T` that indicates that the possessor /// of this wrapper has shared ownership of the referent. Useful for /// building abstractions like `Rc` or `Arc`, which internally /// use raw pointers to manage the memory that they own. +#[allow(missing_debug_implementations)] #[unstable(feature = "shared", reason = "needs an RFC to flesh out design", issue = "27730")] pub struct Shared { @@ -566,6 +878,10 @@ impl !Sync for Shared { } #[unstable(feature = "shared", issue = "27730")] impl Shared { /// Creates a new `Shared`. + /// + /// # Safety + /// + /// `ptr` must be non-null. pub unsafe fn new(ptr: *mut T) -> Self { Shared { pointer: NonZero::new(ptr), _marker: PhantomData } } diff --git a/src/libcore/raw.rs b/src/libcore/raw.rs index 84467be6eca56..a7d0d3899b181 100644 --- a/src/libcore/raw.rs +++ b/src/libcore/raw.rs @@ -18,57 +18,6 @@ //! //! Their definition should always match the ABI defined in `rustc::back::abi`. -use clone::Clone; -use marker::Copy; -use mem; - -/// The representation of a slice like `&[T]`. -/// -/// This struct is guaranteed to have the layout of types like `&[T]`, -/// `&str`, and `Box<[T]>`, but is not the type of such slices -/// (e.g. the fields are not directly accessible on a `&[T]`) nor does -/// it control that layout (changing the definition will not change -/// the layout of a `&[T]`). It is only designed to be used by unsafe -/// code that needs to manipulate the low-level details. -/// -/// However, it is not recommended to use this type for such code, -/// since there are alternatives which may be safer: -/// -/// - Creating a slice from a data pointer and length can be done with -/// `std::slice::from_raw_parts` or `std::slice::from_raw_parts_mut` -/// instead of `std::mem::transmute`ing a value of type `Slice`. -/// - Extracting the data pointer and length from a slice can be -/// performed with the `as_ptr` (or `as_mut_ptr`) and `len` -/// methods. -/// -/// If one does decide to convert a slice value to a `Slice`, the -/// `Repr` trait in this module provides a method for a safe -/// conversion from `&[T]` (and `&str`) to a `Slice`, more type-safe -/// than a call to `transmute`. -/// -/// # Examples -/// -/// ``` -/// #![feature(raw)] -/// -/// use std::raw::{self, Repr}; -/// -/// let slice: &[u16] = &[1, 2, 3, 4]; -/// -/// let repr: raw::Slice = slice.repr(); -/// println!("data pointer = {:?}, length = {}", repr.data, repr.len); -/// ``` -#[repr(C)] -pub struct Slice { - pub data: *const T, - pub len: usize, -} - -impl Copy for Slice {} -impl Clone for Slice { - fn clone(&self) -> Slice { *self } -} - /// The representation of a trait object like `&SomeTrait`. /// /// This struct has the same layout as types like `&SomeTrait` and @@ -85,12 +34,13 @@ impl Clone for Slice { /// only designed to be used by unsafe code that needs to manipulate /// the low-level details. /// -/// There is no `Repr` implementation for `TraitObject` because there -/// is no way to refer to all trait objects generically, so the only +/// There is no way to refer to all trait objects generically, so the only /// way to create values of this type is with functions like -/// `std::mem::transmute`. Similarly, the only way to create a true +/// [`std::mem::transmute`][transmute]. Similarly, the only way to create a true /// trait object from a `TraitObject` value is with `transmute`. /// +/// [transmute]: ../intrinsics/fn.transmute.html +/// /// Synthesizing a trait object with mismatched types—one where the /// vtable does not correspond to the type of the value to which the /// data pointer points—is highly likely to lead to undefined @@ -101,13 +51,13 @@ impl Clone for Slice { /// ``` /// #![feature(raw)] /// -/// use std::mem; -/// use std::raw; +/// use std::{mem, raw}; /// /// // an example trait /// trait Foo { /// fn bar(&self) -> i32; /// } +/// /// impl Foo for i32 { /// fn bar(&self) -> i32 { /// *self + 1 @@ -125,7 +75,6 @@ impl Clone for Slice { /// // the data pointer is the address of `value` /// assert_eq!(raw_object.data as *const i32, &value as *const _); /// -/// /// let other_value: i32 = 456; /// /// // construct a new object, pointing to a different `i32`, being @@ -133,31 +82,18 @@ impl Clone for Slice { /// let synthesized: &Foo = unsafe { /// mem::transmute(raw::TraitObject { /// data: &other_value as *const _ as *mut (), -/// vtable: raw_object.vtable +/// vtable: raw_object.vtable, /// }) /// }; /// -/// // it should work just like we constructed a trait object out of +/// // it should work just as if we had constructed a trait object out of /// // `other_value` directly /// assert_eq!(synthesized.bar(), 457); /// ``` #[repr(C)] #[derive(Copy, Clone)] +#[allow(missing_debug_implementations)] pub struct TraitObject { pub data: *mut (), pub vtable: *mut (), } - -/// This trait is meant to map equivalences between raw structs and their -/// corresponding rust values. -pub unsafe trait Repr { - /// This function "unwraps" a rust value (without consuming it) into its raw - /// struct representation. This can be used to read/write different values - /// for the struct. This is a safe method because by default it does not - /// enable write-access to the fields of the return value in safe code. - #[inline] - fn repr(&self) -> T { unsafe { mem::transmute_copy(&self) } } -} - -unsafe impl Repr> for [T] {} -unsafe impl Repr> for str {} diff --git a/src/libcore/result.rs b/src/libcore/result.rs index 6ec76c821b30d..afed99d265f19 100644 --- a/src/libcore/result.rs +++ b/src/libcore/result.rs @@ -8,26 +8,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Error handling with the `Result` type +//! Error handling with the `Result` type. //! -//! `Result` is the type used for returning and propagating -//! errors. It is an enum with the variants, `Ok(T)`, representing -//! success and containing a value, and `Err(E)`, representing error +//! [`Result`][`Result`] is the type used for returning and propagating +//! errors. It is an enum with the variants, [`Ok(T)`], representing +//! success and containing a value, and [`Err(E)`], representing error //! and containing an error value. //! //! ``` //! # #[allow(dead_code)] //! enum Result { //! Ok(T), -//! Err(E) +//! Err(E), //! } //! ``` //! -//! Functions return `Result` whenever errors are expected and -//! recoverable. In the `std` crate `Result` is most prominently used +//! Functions return [`Result`] whenever errors are expected and +//! recoverable. In the `std` crate, [`Result`] is most prominently used //! for [I/O](../../std/io/index.html). //! -//! A simple function returning `Result` might be +//! A simple function returning [`Result`] might be //! defined and used like so: //! //! ``` @@ -39,7 +39,7 @@ //! None => Err("invalid header length"), //! Some(&1) => Ok(Version::Version1), //! Some(&2) => Ok(Version::Version2), -//! Some(_) => Err("invalid version") +//! Some(_) => Err("invalid version"), //! } //! } //! @@ -50,8 +50,8 @@ //! } //! ``` //! -//! Pattern matching on `Result`s is clear and straightforward for -//! simple cases, but `Result` comes with some convenience methods +//! Pattern matching on [`Result`]s is clear and straightforward for +//! simple cases, but [`Result`] comes with some convenience methods //! that make working with it more succinct. //! //! ``` @@ -80,14 +80,14 @@ //! //! A common problem with using return values to indicate errors is //! that it is easy to ignore the return value, thus failing to handle -//! the error. Result is annotated with the #[must_use] attribute, +//! the error. [`Result`] is annotated with the `#[must_use]` attribute, //! which will cause the compiler to issue a warning when a Result -//! value is ignored. This makes `Result` especially useful with +//! value is ignored. This makes [`Result`] especially useful with //! functions that may encounter errors but don't otherwise return a //! useful value. //! -//! Consider the `write_all` method defined for I/O types -//! by the [`Write`](../../std/io/trait.Write.html) trait: +//! Consider the [`write_all`] method defined for I/O types +//! by the [`Write`] trait: //! //! ``` //! use std::io; @@ -97,8 +97,8 @@ //! } //! ``` //! -//! *Note: The actual definition of `Write` uses `io::Result`, which -//! is just a synonym for `Result`.* +//! *Note: The actual definition of [`Write`] uses [`io::Result`], which +//! is just a synonym for [`Result`]``.* //! //! This method doesn't produce a value, but the write may //! fail. It's crucial to handle the error case, and *not* write @@ -119,7 +119,7 @@ //! warning (by default, controlled by the `unused_must_use` lint). //! //! You might instead, if you don't want to handle the error, simply -//! assert success with `expect`. This will panic if the +//! assert success with [`expect`]. This will panic if the //! write fails, providing a marginally useful message indicating why: //! //! ```{.no_run} @@ -139,7 +139,7 @@ //! assert!(file.write_all(b"important message").is_ok()); //! ``` //! -//! Or propagate the error up the call stack with `try!`: +//! Or propagate the error up the call stack with [`try!`]: //! //! ``` //! # use std::fs::File; @@ -156,7 +156,7 @@ //! # The `try!` macro //! //! When writing code that calls many functions that return the -//! `Result` type, the error handling can be tedious. The `try!` +//! [`Result`] type, the error handling can be tedious. The [`try!`] //! macro hides some of the boilerplate of propagating errors up the //! call stack. //! @@ -175,8 +175,11 @@ //! } //! //! fn write_info(info: &Info) -> io::Result<()> { -//! let mut file = try!(File::create("my_best_friends.txt")); //! // Early return on error +//! let mut file = match File::create("my_best_friends.txt") { +//! Err(e) => return Err(e), +//! Ok(f) => f, +//! }; //! if let Err(e) = file.write_all(format!("name: {}\n", info.name).as_bytes()) { //! return Err(e) //! } @@ -216,9 +219,9 @@ //! //! *It's much nicer!* //! -//! Wrapping an expression in `try!` will result in the unwrapped -//! success (`Ok`) value, unless the result is `Err`, in which case -//! `Err` is returned early from the enclosing function. Its simple definition +//! Wrapping an expression in [`try!`] will result in the unwrapped +//! success ([`Ok`]) value, unless the result is [`Err`], in which case +//! [`Err`] is returned early from the enclosing function. Its simple definition //! makes it clear: //! //! ``` @@ -227,19 +230,26 @@ //! } //! ``` //! -//! `try!` is imported by the prelude and is available everywhere, but it can only -//! be used in functions that return `Result` because of the early return of -//! `Err` that it provides. +//! [`try!`] is imported by the prelude and is available everywhere, but it can only +//! be used in functions that return [`Result`] because of the early return of +//! [`Err`] that it provides. +//! +//! [`expect`]: enum.Result.html#method.expect +//! [`Write`]: ../../std/io/trait.Write.html +//! [`write_all`]: ../../std/io/trait.Write.html#method.write_all +//! [`io::Result`]: ../../std/io/type.Result.html +//! [`try!`]: ../../std/macro.try.html +//! [`Result`]: enum.Result.html +//! [`Ok(T)`]: enum.Result.html#variant.Ok +//! [`Err(E)`]: enum.Result.html#variant.Err +//! [`io::Error`]: ../../std/io/struct.Error.html +//! [`Ok`]: enum.Result.html#variant.Ok +//! [`Err`]: enum.Result.html#variant.Err #![stable(feature = "rust1", since = "1.0.0")] -use self::Result::{Ok, Err}; - -use clone::Clone; use fmt; -use iter::{Iterator, DoubleEndedIterator, FromIterator, ExactSizeIterator, IntoIterator}; -use ops::FnOnce; -use option::Option::{self, None, Some}; +use iter::{FromIterator, FusedIterator, TrustedLen}; /// `Result` is a type that represents either success (`Ok`) or failure (`Err`). /// @@ -250,11 +260,11 @@ use option::Option::{self, None, Some}; pub enum Result { /// Contains the success value #[stable(feature = "rust1", since = "1.0.0")] - Ok(#[cfg_attr(not(stage0), stable(feature = "rust1", since = "1.0.0"))] T), + Ok(#[stable(feature = "rust1", since = "1.0.0")] T), /// Contains the error value #[stable(feature = "rust1", since = "1.0.0")] - Err(#[cfg_attr(not(stage0), stable(feature = "rust1", since = "1.0.0"))] E) + Err(#[stable(feature = "rust1", since = "1.0.0")] E), } ///////////////////////////////////////////////////////////////////////////// @@ -266,10 +276,12 @@ impl Result { // Querying the contained values ///////////////////////////////////////////////////////////////////////// - /// Returns true if the result is `Ok` + /// Returns true if the result is `Ok`. /// /// # Examples /// + /// Basic usage: + /// /// ``` /// let x: Result = Ok(-3); /// assert_eq!(x.is_ok(), true); @@ -286,10 +298,12 @@ impl Result { } } - /// Returns true if the result is `Err` + /// Returns true if the result is `Err`. /// /// # Examples /// + /// Basic usage: + /// /// ``` /// let x: Result = Ok(-3); /// assert_eq!(x.is_err(), false); @@ -307,13 +321,17 @@ impl Result { // Adapter for each variant ///////////////////////////////////////////////////////////////////////// - /// Converts from `Result` to `Option` + /// Converts from `Result` to [`Option`]. /// - /// Converts `self` into an `Option`, consuming `self`, + /// Converts `self` into an [`Option`], consuming `self`, /// and discarding the error, if any. /// + /// [`Option`]: ../../std/option/enum.Option.html + /// /// # Examples /// + /// Basic usage: + /// /// ``` /// let x: Result = Ok(2); /// assert_eq!(x.ok(), Some(2)); @@ -330,13 +348,17 @@ impl Result { } } - /// Converts from `Result` to `Option` + /// Converts from `Result` to [`Option`]. /// - /// Converts `self` into an `Option`, consuming `self`, + /// Converts `self` into an [`Option`], consuming `self`, /// and discarding the success value, if any. /// + /// [`Option`]: ../../std/option/enum.Option.html + /// /// # Examples /// + /// Basic usage: + /// /// ``` /// let x: Result = Ok(2); /// assert_eq!(x.err(), None); @@ -357,11 +379,15 @@ impl Result { // Adapter for working with references ///////////////////////////////////////////////////////////////////////// - /// Converts from `Result` to `Result<&T, &E>` + /// Converts from `Result` to `Result<&T, &E>`. /// /// Produces a new `Result`, containing a reference /// into the original, leaving the original in place. /// + /// # Examples + /// + /// Basic usage: + /// /// ``` /// let x: Result = Ok(2); /// assert_eq!(x.as_ref(), Ok(&2)); @@ -378,13 +404,17 @@ impl Result { } } - /// Converts from `Result` to `Result<&mut T, &mut E>` + /// Converts from `Result` to `Result<&mut T, &mut E>`. + /// + /// # Examples + /// + /// Basic usage: /// /// ``` /// fn mutate(r: &mut Result) { /// match r.as_mut() { - /// Ok(&mut ref mut v) => *v = 42, - /// Err(&mut ref mut e) => *e = 0, + /// Ok(v) => *v = 42, + /// Err(e) => *e = 0, /// } /// } /// @@ -445,6 +475,8 @@ impl Result { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// fn stringify(x: u32) -> String { format!("error code: {}", x) } /// @@ -471,6 +503,8 @@ impl Result { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// let x: Result = Ok(7); /// assert_eq!(x.iter().next(), Some(&7)); @@ -488,6 +522,8 @@ impl Result { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// let mut x: Result = Ok(7); /// match x.iter_mut().next() { @@ -513,6 +549,8 @@ impl Result { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// let x: Result = Ok(2); /// let y: Result<&str, &str> = Err("late error"); @@ -541,10 +579,12 @@ impl Result { /// Calls `op` if the result is `Ok`, otherwise returns the `Err` value of `self`. /// - /// This function can be used for control flow based on result values. + /// This function can be used for control flow based on `Result` values. /// /// # Examples /// + /// Basic usage: + /// /// ``` /// fn sq(x: u32) -> Result { Ok(x * x) } /// fn err(x: u32) -> Result { Err(x) } @@ -567,6 +607,8 @@ impl Result { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// let x: Result = Ok(2); /// let y: Result = Err("late error"); @@ -599,6 +641,8 @@ impl Result { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// fn sq(x: u32) -> Result { Ok(x * x) } /// fn err(x: u32) -> Result { Err(x) } @@ -618,10 +662,12 @@ impl Result { } /// Unwraps a result, yielding the content of an `Ok`. - /// Else it returns `optb`. + /// Else, it returns `optb`. /// /// # Examples /// + /// Basic usage: + /// /// ``` /// let optb = 2; /// let x: Result = Ok(9); @@ -644,6 +690,8 @@ impl Result { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// fn count(x: &str) -> usize { x.len() } /// @@ -670,6 +718,8 @@ impl Result { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// let x: Result = Ok(2); /// assert_eq!(x.unwrap(), 2); @@ -684,8 +734,7 @@ impl Result { pub fn unwrap(self) -> T { match self { Ok(t) => t, - Err(e) => - panic!("called `Result::unwrap()` on an `Err` value: {:?}", e) + Err(e) => unwrap_failed("called `Result::unwrap()` on an `Err` value", e), } } @@ -697,6 +746,9 @@ impl Result { /// passed message, and the content of the `Err`. /// /// # Examples + /// + /// Basic usage: + /// /// ```{.should_panic} /// let x: Result = Err("emergency failure"); /// x.expect("Testing expect"); // panics with `Testing expect: emergency failure` @@ -706,7 +758,7 @@ impl Result { pub fn expect(self, msg: &str) -> T { match self { Ok(t) => t, - Err(e) => panic!("{}: {:?}", msg, e), + Err(e) => unwrap_failed(msg, e), } } } @@ -734,13 +786,57 @@ impl Result { #[stable(feature = "rust1", since = "1.0.0")] pub fn unwrap_err(self) -> E { match self { - Ok(t) => - panic!("called `Result::unwrap_err()` on an `Ok` value: {:?}", t), - Err(e) => e + Ok(t) => unwrap_failed("called `Result::unwrap_err()` on an `Ok` value", t), + Err(e) => e, + } + } +} + +impl Result { + /// Returns the contained value or a default + /// + /// Consumes the `self` argument then, if `Ok`, returns the contained + /// value, otherwise if `Err`, returns the default value for that + /// type. + /// + /// # Examples + /// + /// Convert a string to an integer, turning poorly-formed strings + /// into 0 (the default value for integers). [`parse`] converts + /// a string to any other type that implements [`FromStr`], returning an + /// `Err` on error. + /// + /// ``` + /// #![feature(result_unwrap_or_default)] + /// + /// let good_year_from_input = "1909"; + /// let bad_year_from_input = "190blarg"; + /// let good_year = good_year_from_input.parse().unwrap_or_default(); + /// let bad_year = bad_year_from_input.parse().unwrap_or_default(); + /// + /// assert_eq!(1909, good_year); + /// assert_eq!(0, bad_year); + /// + /// [`parse`]: ../../std/primitive.str.html#method.parse + /// [`FromStr`]: ../../std/str/trait.FromStr.html + /// ``` + #[inline] + #[unstable(feature = "result_unwrap_or_default", issue = "37516")] + pub fn unwrap_or_default(self) -> T { + match self { + Ok(x) => x, + Err(_) => Default::default(), } } } +// This is a separate function to reduce the code size of the methods +#[inline(never)] +#[cold] +fn unwrap_failed(msg: &str, error: E) -> ! { + panic!("{}: {:?}", msg, error) +} + ///////////////////////////////////////////////////////////////////////////// // Trait implementations ///////////////////////////////////////////////////////////////////////////// @@ -754,6 +850,8 @@ impl IntoIterator for Result { /// /// # Examples /// + /// Basic usage: + /// /// ``` /// let x: Result = Ok(5); /// let v: Vec = x.into_iter().collect(); @@ -793,7 +891,11 @@ impl<'a, T, E> IntoIterator for &'a mut Result { // The Result Iterators ///////////////////////////////////////////////////////////////////////////// -/// An iterator over a reference to the `Ok` variant of a `Result`. +/// An iterator over a reference to the [`Ok`] variant of a [`Result`]. +/// +/// [`Ok`]: enum.Result.html#variant.Ok +/// [`Result`]: enum.Result.html +#[derive(Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub struct Iter<'a, T: 'a> { inner: Option<&'a T> } @@ -819,12 +921,22 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Iter<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Iter<'a, T> {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, A> TrustedLen for Iter<'a, A> {} + #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Iter<'a, T> { fn clone(&self) -> Iter<'a, T> { Iter { inner: self.inner } } } -/// An iterator over a mutable reference to the `Ok` variant of a `Result`. +/// An iterator over a mutable reference to the [`Ok`] variant of a [`Result`]. +/// +/// [`Ok`]: enum.Result.html#variant.Ok +/// [`Result`]: enum.Result.html +#[derive(Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub struct IterMut<'a, T: 'a> { inner: Option<&'a mut T> } @@ -850,7 +962,21 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for IterMut<'a, T> {} -/// An iterator over the value in a `Ok` variant of a `Result`. +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for IterMut<'a, T> {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, A> TrustedLen for IterMut<'a, A> {} + +/// An iterator over the value in a [`Ok`] variant of a [`Result`]. This struct is +/// created by the [`into_iter`] method on [`Result`][`Result`] (provided by +/// the [`IntoIterator`] trait). +/// +/// [`Ok`]: enum.Result.html#variant.Ok +/// [`Result`]: enum.Result.html +/// [`into_iter`]: ../iter/trait.IntoIterator.html#tymethod.into_iter +/// [`IntoIterator`]: ../iter/trait.IntoIterator.html +#[derive(Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub struct IntoIter { inner: Option } @@ -876,6 +1002,12 @@ impl DoubleEndedIterator for IntoIter { #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for IntoIter {} +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for IntoIter {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for IntoIter {} + ///////////////////////////////////////////////////////////////////////////// // FromIterator ///////////////////////////////////////////////////////////////////////////// @@ -892,12 +1024,12 @@ impl> FromIterator> for Result { /// ``` /// use std::u32; /// - /// let v = vec!(1, 2); + /// let v = vec![1, 2]; /// let res: Result, &'static str> = v.iter().map(|&x: &u32| /// if x == u32::MAX { Err("Overflow!") } /// else { Ok(x + 1) } /// ).collect(); - /// assert!(res == Ok(vec!(2, 3))); + /// assert!(res == Ok(vec![2, 3])); /// ``` #[inline] fn from_iter>>(iter: I) -> Result { @@ -923,6 +1055,11 @@ impl> FromIterator> for Result { None => None, } } + + fn size_hint(&self) -> (usize, Option) { + let (_min, max) = self.iter.size_hint(); + (0, max) + } } let mut adapter = Adapter { iter: iter.into_iter(), err: None }; diff --git a/src/libcore/slice.rs b/src/libcore/slice.rs index 583bb57a4a6f1..a4a90e7a9da7a 100644 --- a/src/libcore/slice.rs +++ b/src/libcore/slice.rs @@ -33,26 +33,26 @@ // * The `raw` and `bytes` submodules. // * Boilerplate trait implementations. -use clone::Clone; -use cmp::{Ordering, PartialEq, PartialOrd, Eq, Ord}; -use cmp::Ordering::{Less, Equal, Greater}; +use cmp::Ordering::{self, Less, Equal, Greater}; use cmp; -use default::Default; +use fmt; use intrinsics::assume; use iter::*; -use ops::{FnMut, self, Index}; -use ops::RangeFull; +use ops::{FnMut, self}; use option::Option; use option::Option::{None, Some}; use result::Result; use result::Result::{Ok, Err}; use ptr; use mem; -use marker::{Send, Sync, self}; -use raw::Repr; -// Avoid conflicts with *both* the Slice trait (buggy) and the `slice::raw` module. -use raw::Slice as RawSlice; +use marker::{Copy, Send, Sync, Sized, self}; +use iter_private::TrustedRandomAccess; +#[repr(C)] +struct Repr { + pub data: *const T, + pub len: usize, +} // // Extension traits @@ -61,7 +61,7 @@ use raw::Slice as RawSlice; /// Extension methods for slices. #[unstable(feature = "core_slice_ext", reason = "stable interface provided by `impl [T]` in later crates", - issue = "27701")] + issue = "32110")] #[allow(missing_docs)] // documented elsewhere pub trait SliceExt { type Item; @@ -84,7 +84,8 @@ pub trait SliceExt { #[stable(feature = "core", since = "1.6.0")] fn chunks(&self, size: usize) -> Chunks; #[stable(feature = "core", since = "1.6.0")] - fn get(&self, index: usize) -> Option<&Self::Item>; + fn get(&self, index: I) -> Option<&I::Output> + where I: SliceIndex; #[stable(feature = "core", since = "1.6.0")] fn first(&self) -> Option<&Self::Item>; #[stable(feature = "core", since = "1.6.0")] @@ -94,21 +95,27 @@ pub trait SliceExt { #[stable(feature = "core", since = "1.6.0")] fn last(&self) -> Option<&Self::Item>; #[stable(feature = "core", since = "1.6.0")] - unsafe fn get_unchecked(&self, index: usize) -> &Self::Item; + unsafe fn get_unchecked(&self, index: I) -> &I::Output + where I: SliceIndex; #[stable(feature = "core", since = "1.6.0")] fn as_ptr(&self) -> *const Self::Item; #[stable(feature = "core", since = "1.6.0")] fn binary_search(&self, x: &Self::Item) -> Result where Self::Item: Ord; #[stable(feature = "core", since = "1.6.0")] - fn binary_search_by(&self, f: F) -> Result - where F: FnMut(&Self::Item) -> Ordering; + fn binary_search_by<'a, F>(&'a self, f: F) -> Result + where F: FnMut(&'a Self::Item) -> Ordering; + #[stable(feature = "slice_binary_search_by_key", since = "1.10.0")] + fn binary_search_by_key<'a, B, F>(&'a self, b: &B, f: F) -> Result + where F: FnMut(&'a Self::Item) -> B, + B: Ord; #[stable(feature = "core", since = "1.6.0")] fn len(&self) -> usize; #[stable(feature = "core", since = "1.6.0")] fn is_empty(&self) -> bool { self.len() == 0 } #[stable(feature = "core", since = "1.6.0")] - fn get_mut(&mut self, index: usize) -> Option<&mut Self::Item>; + fn get_mut(&mut self, index: I) -> Option<&mut I::Output> + where I: SliceIndex; #[stable(feature = "core", since = "1.6.0")] fn iter_mut(&mut self) -> IterMut; #[stable(feature = "core", since = "1.6.0")] @@ -137,7 +144,8 @@ pub trait SliceExt { #[stable(feature = "core", since = "1.6.0")] fn reverse(&mut self); #[stable(feature = "core", since = "1.6.0")] - unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut Self::Item; + unsafe fn get_unchecked_mut(&mut self, index: I) -> &mut I::Output + where I: SliceIndex; #[stable(feature = "core", since = "1.6.0")] fn as_mut_ptr(&mut self) -> *mut Self::Item; @@ -151,7 +159,9 @@ pub trait SliceExt { fn ends_with(&self, needle: &[Self::Item]) -> bool where Self::Item: PartialEq; #[stable(feature = "clone_from_slice", since = "1.7.0")] - fn clone_from_slice(&mut self, &[Self::Item]) where Self::Item: Clone; + fn clone_from_slice(&mut self, src: &[Self::Item]) where Self::Item: Clone; + #[stable(feature = "copy_from_slice", since = "1.9.0")] + fn copy_from_slice(&mut self, src: &[Self::Item]) where Self::Item: Copy; } // Use macros to be generic over const/mut @@ -180,7 +190,7 @@ macro_rules! slice_ref { #[unstable(feature = "core_slice_ext", reason = "stable interface provided by `impl [T]` in later crates", - issue = "27701")] + issue = "32110")] impl SliceExt for [T] { type Item = T; @@ -256,8 +266,10 @@ impl SliceExt for [T] { } #[inline] - fn get(&self, index: usize) -> Option<&T> { - if index < self.len() { Some(&self[index]) } else { None } + fn get(&self, index: I) -> Option<&I::Output> + where I: SliceIndex + { + index.get(self) } #[inline] @@ -282,42 +294,51 @@ impl SliceExt for [T] { } #[inline] - unsafe fn get_unchecked(&self, index: usize) -> &T { - &*(self.repr().data.offset(index as isize)) + unsafe fn get_unchecked(&self, index: I) -> &I::Output + where I: SliceIndex + { + index.get_unchecked(self) } #[inline] fn as_ptr(&self) -> *const T { - self.repr().data + self as *const [T] as *const T } - fn binary_search_by(&self, mut f: F) -> Result where - F: FnMut(&T) -> Ordering + fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result + where F: FnMut(&'a T) -> Ordering { - let mut base : usize = 0; - let mut lim : usize = self.len(); + let mut base = 0usize; + let mut s = self; - while lim != 0 { - let ix = base + (lim >> 1); - match f(&self[ix]) { - Equal => return Ok(ix), + loop { + let (head, tail) = s.split_at(s.len() >> 1); + if tail.is_empty() { + return Err(base) + } + match f(&tail[0]) { Less => { - base = ix + 1; - lim -= 1; + base += head.len() + 1; + s = &tail[1..]; } - Greater => () + Greater => s = head, + Equal => return Ok(base + head.len()), } - lim >>= 1; } - Err(base) } #[inline] - fn len(&self) -> usize { self.repr().len } + fn len(&self) -> usize { + unsafe { + mem::transmute::<&[T], Repr>(self).len + } + } #[inline] - fn get_mut(&mut self, index: usize) -> Option<&mut T> { - if index < self.len() { Some(&mut self[index]) } else { None } + fn get_mut(&mut self, index: I) -> Option<&mut I::Output> + where I: SliceIndex + { + index.get_mut(self) } #[inline] @@ -444,13 +465,15 @@ impl SliceExt for [T] { } #[inline] - unsafe fn get_unchecked_mut(&mut self, index: usize) -> &mut T { - &mut *(self.repr().data as *mut T).offset(index as isize) + unsafe fn get_unchecked_mut(&mut self, index: I) -> &mut I::Output + where I: SliceIndex + { + index.get_unchecked_mut(self) } #[inline] fn as_mut_ptr(&mut self) -> *mut T { - self.repr().data as *mut T + self as *mut [T] as *mut T } #[inline] @@ -478,28 +501,56 @@ impl SliceExt for [T] { fn clone_from_slice(&mut self, src: &[T]) where T: Clone { assert!(self.len() == src.len(), "destination and source slices have different lengths"); - for (dst, src) in self.iter_mut().zip(src) { - dst.clone_from(src); + // NOTE: We need to explicitly slice them to the same length + // for bounds checking to be elided, and the optimizer will + // generate memcpy for simple cases (for example T = u8). + let len = self.len(); + let src = &src[..len]; + for i in 0..len { + self[i].clone_from(&src[i]); } } + + #[inline] + fn copy_from_slice(&mut self, src: &[T]) where T: Copy { + assert!(self.len() == src.len(), + "destination and source slices have different lengths"); + unsafe { + ptr::copy_nonoverlapping( + src.as_ptr(), self.as_mut_ptr(), self.len()); + } + } + + #[inline] + fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result + where F: FnMut(&'a Self::Item) -> B, + B: Ord + { + self.binary_search_by(|k| f(k).cmp(b)) + } } #[stable(feature = "rust1", since = "1.0.0")] -impl ops::Index for [T] { - type Output = T; +#[rustc_on_unimplemented = "slice indices are of type `usize` or ranges of `usize`"] +impl ops::Index for [T] + where I: SliceIndex +{ + type Output = I::Output; - fn index(&self, index: usize) -> &T { - assert!(index < self.len()); - unsafe { self.get_unchecked(index) } + #[inline] + fn index(&self, index: I) -> &I::Output { + index.index(self) } } #[stable(feature = "rust1", since = "1.0.0")] -impl ops::IndexMut for [T] { +#[rustc_on_unimplemented = "slice indices are of type `usize` or ranges of `usize`"] +impl ops::IndexMut for [T] + where I: SliceIndex +{ #[inline] - fn index_mut(&mut self, index: usize) -> &mut T { - assert!(index < self.len()); - unsafe { self.get_unchecked_mut(index) } + fn index_mut(&mut self, index: I) -> &mut I::Output { + index.index_mut(self) } } @@ -515,93 +566,351 @@ fn slice_index_order_fail(index: usize, end: usize) -> ! { panic!("slice index starts at {} but ends at {}", index, end); } -#[stable(feature = "rust1", since = "1.0.0")] -impl ops::Index> for [T] { +/// A helper trait used for indexing operations. +#[unstable(feature = "slice_get_slice", issue = "35729")] +#[rustc_on_unimplemented = "slice indices are of type `usize` or ranges of `usize`"] +pub trait SliceIndex { + /// The output type returned by methods. + type Output: ?Sized; + + /// Returns a shared reference to the output at this location, if in + /// bounds. + fn get(self, slice: &[T]) -> Option<&Self::Output>; + + /// Returns a mutable reference to the output at this location, if in + /// bounds. + fn get_mut(self, slice: &mut [T]) -> Option<&mut Self::Output>; + + /// Returns a shared reference to the output at this location, without + /// performing any bounds checking. + unsafe fn get_unchecked(self, slice: &[T]) -> &Self::Output; + + /// Returns a mutable reference to the output at this location, without + /// performing any bounds checking. + unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut Self::Output; + + /// Returns a shared reference to the output at this location, panicking + /// if out of bounds. + fn index(self, slice: &[T]) -> &Self::Output; + + /// Returns a mutable reference to the output at this location, panicking + /// if out of bounds. + fn index_mut(self, slice: &mut [T]) -> &mut Self::Output; +} + +#[stable(feature = "slice-get-slice-impls", since = "1.13.0")] +impl SliceIndex for usize { + type Output = T; + + #[inline] + fn get(self, slice: &[T]) -> Option<&T> { + if self < slice.len() { + unsafe { + Some(self.get_unchecked(slice)) + } + } else { + None + } + } + + #[inline] + fn get_mut(self, slice: &mut [T]) -> Option<&mut T> { + if self < slice.len() { + unsafe { + Some(self.get_unchecked_mut(slice)) + } + } else { + None + } + } + + #[inline] + unsafe fn get_unchecked(self, slice: &[T]) -> &T { + &*slice.as_ptr().offset(self as isize) + } + + #[inline] + unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut T { + &mut *slice.as_mut_ptr().offset(self as isize) + } + + #[inline] + fn index(self, slice: &[T]) -> &T { + // NB: use intrinsic indexing + &(*slice)[self] + } + + #[inline] + fn index_mut(self, slice: &mut [T]) -> &mut T { + // NB: use intrinsic indexing + &mut (*slice)[self] + } +} + +#[stable(feature = "slice-get-slice-impls", since = "1.13.0")] +impl SliceIndex for ops::Range { type Output = [T]; #[inline] - fn index(&self, index: ops::Range) -> &[T] { - if index.start > index.end { - slice_index_order_fail(index.start, index.end); - } else if index.end > self.len() { - slice_index_len_fail(index.end, self.len()); + fn get(self, slice: &[T]) -> Option<&[T]> { + if self.start > self.end || self.end > slice.len() { + None + } else { + unsafe { + Some(self.get_unchecked(slice)) + } + } + } + + #[inline] + fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> { + if self.start > self.end || self.end > slice.len() { + None + } else { + unsafe { + Some(self.get_unchecked_mut(slice)) + } + } + } + + #[inline] + unsafe fn get_unchecked(self, slice: &[T]) -> &[T] { + from_raw_parts(slice.as_ptr().offset(self.start as isize), self.end - self.start) + } + + #[inline] + unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] { + from_raw_parts_mut(slice.as_mut_ptr().offset(self.start as isize), self.end - self.start) + } + + #[inline] + fn index(self, slice: &[T]) -> &[T] { + if self.start > self.end { + slice_index_order_fail(self.start, self.end); + } else if self.end > slice.len() { + slice_index_len_fail(self.end, slice.len()); } unsafe { - from_raw_parts ( - self.as_ptr().offset(index.start as isize), - index.end - index.start - ) + self.get_unchecked(slice) + } + } + + #[inline] + fn index_mut(self, slice: &mut [T]) -> &mut [T] { + if self.start > self.end { + slice_index_order_fail(self.start, self.end); + } else if self.end > slice.len() { + slice_index_len_fail(self.end, slice.len()); + } + unsafe { + self.get_unchecked_mut(slice) } } } -#[stable(feature = "rust1", since = "1.0.0")] -impl ops::Index> for [T] { + +#[stable(feature = "slice-get-slice-impls", since = "1.13.0")] +impl SliceIndex for ops::RangeTo { type Output = [T]; #[inline] - fn index(&self, index: ops::RangeTo) -> &[T] { - self.index(ops::Range{ start: 0, end: index.end }) + fn get(self, slice: &[T]) -> Option<&[T]> { + (0..self.end).get(slice) + } + + #[inline] + fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> { + (0..self.end).get_mut(slice) + } + + #[inline] + unsafe fn get_unchecked(self, slice: &[T]) -> &[T] { + (0..self.end).get_unchecked(slice) + } + + #[inline] + unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] { + (0..self.end).get_unchecked_mut(slice) + } + + #[inline] + fn index(self, slice: &[T]) -> &[T] { + (0..self.end).index(slice) + } + + #[inline] + fn index_mut(self, slice: &mut [T]) -> &mut [T] { + (0..self.end).index_mut(slice) } } -#[stable(feature = "rust1", since = "1.0.0")] -impl ops::Index> for [T] { + +#[stable(feature = "slice-get-slice-impls", since = "1.13.0")] +impl SliceIndex for ops::RangeFrom { type Output = [T]; #[inline] - fn index(&self, index: ops::RangeFrom) -> &[T] { - self.index(ops::Range{ start: index.start, end: self.len() }) + fn get(self, slice: &[T]) -> Option<&[T]> { + (self.start..slice.len()).get(slice) + } + + #[inline] + fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> { + (self.start..slice.len()).get_mut(slice) + } + + #[inline] + unsafe fn get_unchecked(self, slice: &[T]) -> &[T] { + (self.start..slice.len()).get_unchecked(slice) + } + + #[inline] + unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] { + (self.start..slice.len()).get_unchecked_mut(slice) + } + + #[inline] + fn index(self, slice: &[T]) -> &[T] { + (self.start..slice.len()).index(slice) + } + + #[inline] + fn index_mut(self, slice: &mut [T]) -> &mut [T] { + (self.start..slice.len()).index_mut(slice) } } -#[stable(feature = "rust1", since = "1.0.0")] -impl ops::Index for [T] { + +#[stable(feature = "slice-get-slice-impls", since = "1.13.0")] +impl SliceIndex for ops::RangeFull { type Output = [T]; #[inline] - fn index(&self, _index: RangeFull) -> &[T] { - self + fn get(self, slice: &[T]) -> Option<&[T]> { + Some(slice) + } + + #[inline] + fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> { + Some(slice) + } + + #[inline] + unsafe fn get_unchecked(self, slice: &[T]) -> &[T] { + slice + } + + #[inline] + unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] { + slice + } + + #[inline] + fn index(self, slice: &[T]) -> &[T] { + slice + } + + #[inline] + fn index_mut(self, slice: &mut [T]) -> &mut [T] { + slice } } -#[stable(feature = "rust1", since = "1.0.0")] -impl ops::IndexMut> for [T] { + +#[stable(feature = "slice-get-slice-impls", since = "1.13.0")] +impl SliceIndex for ops::RangeInclusive { + type Output = [T]; + #[inline] - fn index_mut(&mut self, index: ops::Range) -> &mut [T] { - if index.start > index.end { - slice_index_order_fail(index.start, index.end); - } else if index.end > self.len() { - slice_index_len_fail(index.end, self.len()); + fn get(self, slice: &[T]) -> Option<&[T]> { + match self { + ops::RangeInclusive::Empty { .. } => Some(&[]), + ops::RangeInclusive::NonEmpty { end, .. } if end == usize::max_value() => None, + ops::RangeInclusive::NonEmpty { start, end } => (start..end + 1).get(slice), } - unsafe { - from_raw_parts_mut( - self.as_mut_ptr().offset(index.start as isize), - index.end - index.start - ) + } + + #[inline] + fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> { + match self { + ops::RangeInclusive::Empty { .. } => Some(&mut []), + ops::RangeInclusive::NonEmpty { end, .. } if end == usize::max_value() => None, + ops::RangeInclusive::NonEmpty { start, end } => (start..end + 1).get_mut(slice), } } -} -#[stable(feature = "rust1", since = "1.0.0")] -impl ops::IndexMut> for [T] { + #[inline] - fn index_mut(&mut self, index: ops::RangeTo) -> &mut [T] { - self.index_mut(ops::Range{ start: 0, end: index.end }) + unsafe fn get_unchecked(self, slice: &[T]) -> &[T] { + match self { + ops::RangeInclusive::Empty { .. } => &[], + ops::RangeInclusive::NonEmpty { start, end } => (start..end + 1).get_unchecked(slice), + } } -} -#[stable(feature = "rust1", since = "1.0.0")] -impl ops::IndexMut> for [T] { + #[inline] - fn index_mut(&mut self, index: ops::RangeFrom) -> &mut [T] { - let len = self.len(); - self.index_mut(ops::Range{ start: index.start, end: len }) + unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] { + match self { + ops::RangeInclusive::Empty { .. } => &mut [], + ops::RangeInclusive::NonEmpty { start, end } => { + (start..end + 1).get_unchecked_mut(slice) + } + } } -} -#[stable(feature = "rust1", since = "1.0.0")] -impl ops::IndexMut for [T] { + + #[inline] + fn index(self, slice: &[T]) -> &[T] { + match self { + ops::RangeInclusive::Empty { .. } => &[], + ops::RangeInclusive::NonEmpty { end, .. } if end == usize::max_value() => { + panic!("attempted to index slice up to maximum usize"); + }, + ops::RangeInclusive::NonEmpty { start, end } => (start..end + 1).index(slice), + } + } + #[inline] - fn index_mut(&mut self, _index: RangeFull) -> &mut [T] { - self + fn index_mut(self, slice: &mut [T]) -> &mut [T] { + match self { + ops::RangeInclusive::Empty { .. } => &mut [], + ops::RangeInclusive::NonEmpty { end, .. } if end == usize::max_value() => { + panic!("attempted to index slice up to maximum usize"); + }, + ops::RangeInclusive::NonEmpty { start, end } => (start..end + 1).index_mut(slice), + } } } +#[stable(feature = "slice-get-slice-impls", since = "1.13.0")] +impl SliceIndex for ops::RangeToInclusive { + type Output = [T]; + + #[inline] + fn get(self, slice: &[T]) -> Option<&[T]> { + (0...self.end).get(slice) + } + + #[inline] + fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> { + (0...self.end).get_mut(slice) + } + + #[inline] + unsafe fn get_unchecked(self, slice: &[T]) -> &[T] { + (0...self.end).get_unchecked(slice) + } + + #[inline] + unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] { + (0...self.end).get_unchecked_mut(slice) + } + + #[inline] + fn index(self, slice: &[T]) -> &[T] { + (0...self.end).index(slice) + } + + #[inline] + fn index_mut(self, slice: &mut [T]) -> &mut [T] { + (0...self.end).index_mut(slice) + } +} //////////////////////////////////////////////////////////////////////////////// // Common traits @@ -609,11 +918,13 @@ impl ops::IndexMut for [T] { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Default for &'a [T] { + /// Creates an empty slice. fn default() -> &'a [T] { &[] } } #[stable(feature = "mut_slice_default", since = "1.5.0")] impl<'a, T> Default for &'a mut [T] { + /// Creates a mutable empty slice. fn default() -> &'a mut [T] { &mut [] } } @@ -681,7 +992,7 @@ macro_rules! iterator { #[inline] fn count(self) -> usize { - self.size_hint().0 + self.len() } #[inline] @@ -747,6 +1058,25 @@ macro_rules! make_mut_slice { } /// Immutable slice iterator +/// +/// This struct is created by the [`iter`] method on [slices]. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// // First, we declare a type which has `iter` method to get the `Iter` struct (&[usize here]): +/// let slice = &[1, 2, 3]; +/// +/// // Then, we iterate over it: +/// for element in slice.iter() { +/// println!("{}", element); +/// } +/// ``` +/// +/// [`iter`]: ../../std/primitive.slice.html#method.iter +/// [slices]: ../../std/primitive.slice.html #[stable(feature = "rust1", since = "1.0.0")] pub struct Iter<'a, T: 'a> { ptr: *const T, @@ -754,6 +1084,15 @@ pub struct Iter<'a, T: 'a> { _marker: marker::PhantomData<&'a T>, } +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("Iter") + .field(&self.as_slice()) + .finish() + } +} + #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<'a, T: Sync> Sync for Iter<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -764,6 +1103,26 @@ impl<'a, T> Iter<'a, T> { /// /// This has the same lifetime as the original slice, and so the /// iterator can continue to be used while this exists. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// // First, we declare a type which has the `iter` method to get the `Iter` + /// // struct (&[usize here]): + /// let slice = &[1, 2, 3]; + /// + /// // Then, we get the iterator: + /// let mut iter = slice.iter(); + /// // So if we print what `as_slice` method returns here, we have "[1, 2, 3]": + /// println!("{:?}", iter.as_slice()); + /// + /// // Next, we move to the second element of the slice: + /// iter.next(); + /// // Now `as_slice` returns "[2, 3]": + /// println!("{:?}", iter.as_slice()); + /// ``` #[stable(feature = "iter_to_slice", since = "1.4.0")] pub fn as_slice(&self) -> &'a [T] { make_slice!(self.ptr, self.end) @@ -787,14 +1146,54 @@ impl<'a, T> Iter<'a, T> { iterator!{struct Iter -> *const T, &'a T} #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> ExactSizeIterator for Iter<'a, T> {} +impl<'a, T> ExactSizeIterator for Iter<'a, T> { + fn is_empty(&self) -> bool { + self.ptr == self.end + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Iter<'a, T> {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, T> TrustedLen for Iter<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Iter<'a, T> { fn clone(&self) -> Iter<'a, T> { Iter { ptr: self.ptr, end: self.end, _marker: self._marker } } } +#[stable(feature = "slice_iter_as_ref", since = "1.12.0")] +impl<'a, T> AsRef<[T]> for Iter<'a, T> { + fn as_ref(&self) -> &[T] { + self.as_slice() + } +} + /// Mutable slice iterator. +/// +/// This struct is created by the [`iter_mut`] method on [slices]. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// // First, we declare a type which has `iter_mut` method to get the `IterMut` +/// // struct (&[usize here]): +/// let mut slice = &mut [1, 2, 3]; +/// +/// // Then, we iterate over it and increment each element value: +/// for element in slice.iter_mut() { +/// *element += 1; +/// } +/// +/// // We now have "[2, 3, 4]": +/// println!("{:?}", slice); +/// ``` +/// +/// [`iter_mut`]: ../../std/primitive.slice.html#method.iter_mut +/// [slices]: ../../std/primitive.slice.html #[stable(feature = "rust1", since = "1.0.0")] pub struct IterMut<'a, T: 'a> { ptr: *mut T, @@ -802,6 +1201,15 @@ pub struct IterMut<'a, T: 'a> { _marker: marker::PhantomData<&'a mut T>, } +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl<'a, T: 'a + fmt::Debug> fmt::Debug for IterMut<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("IterMut") + .field(&make_slice!(self.ptr, self.end)) + .finish() + } +} + #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<'a, T: Sync> Sync for IterMut<'a, T> {} #[stable(feature = "rust1", since = "1.0.0")] @@ -814,6 +1222,35 @@ impl<'a, T> IterMut<'a, T> { /// to consume the iterator. Consider using the `Slice` and /// `SliceMut` implementations for obtaining slices with more /// restricted lifetimes that do not consume the iterator. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// // First, we declare a type which has `iter_mut` method to get the `IterMut` + /// // struct (&[usize here]): + /// let mut slice = &mut [1, 2, 3]; + /// + /// { + /// // Then, we get the iterator: + /// let mut iter = slice.iter_mut(); + /// // We move to next element: + /// iter.next(); + /// // So if we print what `into_slice` method returns here, we have "[2, 3]": + /// println!("{:?}", iter.into_slice()); + /// } + /// + /// // Now let's modify a value of the slice: + /// { + /// // First we get back the iterator: + /// let mut iter = slice.iter_mut(); + /// // We change the value of the first element of the slice returned by the `next` method: + /// *iter.next().unwrap() += 1; + /// } + /// // Now slice is "[2, 2, 3]": + /// println!("{:?}", slice); + /// ``` #[stable(feature = "iter_to_slice", since = "1.4.0")] pub fn into_slice(self) -> &'a mut [T] { make_mut_slice!(self.ptr, self.end) @@ -837,10 +1274,21 @@ impl<'a, T> IterMut<'a, T> { iterator!{struct IterMut -> *mut T, &'a mut T} #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> ExactSizeIterator for IterMut<'a, T> {} +impl<'a, T> ExactSizeIterator for IterMut<'a, T> { + fn is_empty(&self) -> bool { + self.ptr == self.end + } +} + +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for IterMut<'a, T> {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, T> TrustedLen for IterMut<'a, T> {} /// An internal abstraction over the splitting iterators, so that /// splitn, splitn_mut etc can be implemented once. +#[doc(hidden)] trait SplitIter: DoubleEndedIterator { /// Mark the underlying iterator as complete, extracting the remaining /// portion of the slice. @@ -856,6 +1304,16 @@ pub struct Split<'a, T:'a, P> where P: FnMut(&T) -> bool { finished: bool } +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for Split<'a, T, P> where P: FnMut(&T) -> bool { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Split") + .field("v", &self.v) + .field("finished", &self.finished) + .finish() + } +} + // FIXME(#19839) Remove in favor of `#[derive(Clone)]` #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T, P> Clone for Split<'a, T, P> where P: Clone + FnMut(&T) -> bool { @@ -920,6 +1378,9 @@ impl<'a, T, P> SplitIter for Split<'a, T, P> where P: FnMut(&T) -> bool { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T, P> FusedIterator for Split<'a, T, P> where P: FnMut(&T) -> bool {} + /// An iterator over the subslices of the vector which are separated /// by elements that match `pred`. #[stable(feature = "rust1", since = "1.0.0")] @@ -929,6 +1390,16 @@ pub struct SplitMut<'a, T:'a, P> where P: FnMut(&T) -> bool { finished: bool } +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for SplitMut<'a, T, P> where P: FnMut(&T) -> bool { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SplitMut") + .field("v", &self.v) + .field("finished", &self.finished) + .finish() + } +} + impl<'a, T, P> SplitIter for SplitMut<'a, T, P> where P: FnMut(&T) -> bool { #[inline] fn finish(&mut self) -> Option<&'a mut [T]> { @@ -1000,9 +1471,13 @@ impl<'a, T, P> DoubleEndedIterator for SplitMut<'a, T, P> where } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T, P> FusedIterator for SplitMut<'a, T, P> where P: FnMut(&T) -> bool {} + /// An private iterator over subslices separated by elements that /// match a predicate function, splitting at most a fixed number of /// times. +#[derive(Debug)] struct GenericSplitN { iter: I, count: usize, @@ -1038,6 +1513,15 @@ pub struct SplitN<'a, T: 'a, P> where P: FnMut(&T) -> bool { inner: GenericSplitN> } +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for SplitN<'a, T, P> where P: FnMut(&T) -> bool { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SplitN") + .field("inner", &self.inner) + .finish() + } +} + /// An iterator over subslices separated by elements that match a /// predicate function, limited to a given number of splits, starting /// from the end of the slice. @@ -1046,6 +1530,15 @@ pub struct RSplitN<'a, T: 'a, P> where P: FnMut(&T) -> bool { inner: GenericSplitN> } +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for RSplitN<'a, T, P> where P: FnMut(&T) -> bool { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("RSplitN") + .field("inner", &self.inner) + .finish() + } +} + /// An iterator over subslices separated by elements that match a predicate /// function, limited to a given number of splits. #[stable(feature = "rust1", since = "1.0.0")] @@ -1053,6 +1546,15 @@ pub struct SplitNMut<'a, T: 'a, P> where P: FnMut(&T) -> bool { inner: GenericSplitN> } +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for SplitNMut<'a, T, P> where P: FnMut(&T) -> bool { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SplitNMut") + .field("inner", &self.inner) + .finish() + } +} + /// An iterator over subslices separated by elements that match a /// predicate function, limited to a given number of splits, starting /// from the end of the slice. @@ -1061,6 +1563,15 @@ pub struct RSplitNMut<'a, T: 'a, P> where P: FnMut(&T) -> bool { inner: GenericSplitN> } +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl<'a, T: 'a + fmt::Debug, P> fmt::Debug for RSplitNMut<'a, T, P> where P: FnMut(&T) -> bool { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("RSplitNMut") + .field("inner", &self.inner) + .finish() + } +} + macro_rules! forward_iterator { ($name:ident: $elem:ident, $iter_of:ty) => { #[stable(feature = "rust1", since = "1.0.0")] @@ -1079,6 +1590,10 @@ macro_rules! forward_iterator { self.inner.size_hint() } } + + #[unstable(feature = "fused", issue = "35602")] + impl<'a, $elem, P> FusedIterator for $name<'a, $elem, P> + where P: FnMut(&T) -> bool {} } } @@ -1088,6 +1603,7 @@ forward_iterator! { SplitNMut: T, &'a mut [T] } forward_iterator! { RSplitNMut: T, &'a mut [T] } /// An iterator over overlapping subslices of length `size`. +#[derive(Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub struct Windows<'a, T:'a> { v: &'a [T], @@ -1132,7 +1648,7 @@ impl<'a, T> Iterator for Windows<'a, T> { #[inline] fn count(self) -> usize { - self.size_hint().0 + self.len() } #[inline] @@ -1176,11 +1692,15 @@ impl<'a, T> DoubleEndedIterator for Windows<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Windows<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Windows<'a, T> {} + /// An iterator over a slice in (non-overlapping) chunks (`size` elements at a /// time). /// /// When the slice len is not evenly divided by the chunk size, the last slice /// of the iteration will be the remainder. +#[derive(Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub struct Chunks<'a, T:'a> { v: &'a [T], @@ -1228,7 +1748,7 @@ impl<'a, T> Iterator for Chunks<'a, T> { #[inline] fn count(self) -> usize { - self.size_hint().0 + self.len() } #[inline] @@ -1278,9 +1798,13 @@ impl<'a, T> DoubleEndedIterator for Chunks<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for Chunks<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for Chunks<'a, T> {} + /// An iterator over a slice in (non-overlapping) mutable chunks (`size` /// elements at a time). When the slice len is not evenly divided by the chunk /// size, the last slice of the iteration will be the remainder. +#[derive(Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub struct ChunksMut<'a, T:'a> { v: &'a mut [T], @@ -1318,7 +1842,7 @@ impl<'a, T> Iterator for ChunksMut<'a, T> { #[inline] fn count(self) -> usize { - self.size_hint().0 + self.len() } #[inline] @@ -1372,6 +1896,9 @@ impl<'a, T> DoubleEndedIterator for ChunksMut<'a, T> { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> ExactSizeIterator for ChunksMut<'a, T> {} +#[unstable(feature = "fused", issue = "35602")] +impl<'a, T> FusedIterator for ChunksMut<'a, T> {} + // // Free functions // @@ -1411,7 +1938,7 @@ impl<'a, T> ExactSizeIterator for ChunksMut<'a, T> {} #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn from_raw_parts<'a, T>(p: *const T, len: usize) -> &'a [T] { - mem::transmute(RawSlice { data: p, len: len }) + mem::transmute(Repr { data: p, len: len }) } /// Performs the same functionality as `from_raw_parts`, except that a mutable @@ -1423,62 +1950,65 @@ pub unsafe fn from_raw_parts<'a, T>(p: *const T, len: usize) -> &'a [T] { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn from_raw_parts_mut<'a, T>(p: *mut T, len: usize) -> &'a mut [T] { - mem::transmute(RawSlice { data: p, len: len }) + mem::transmute(Repr { data: p, len: len }) } // -// Submodules +// Comparison traits // -/// Operations on `[u8]`. -#[unstable(feature = "slice_bytes", reason = "needs review", - issue = "27740")] -#[rustc_deprecated(reason = "unidiomatic functions not pulling their weight", - since = "1.6.0")] -#[allow(deprecated)] -pub mod bytes { - use ptr; - use slice::SliceExt; +extern { + /// Call implementation provided memcmp + /// + /// Interprets the data as u8. + /// + /// Return 0 for equal, < 0 for less than and > 0 for greater + /// than. + // FIXME(#32610): Return type should be c_int + fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32; +} - /// A trait for operations on mutable `[u8]`s. - pub trait MutableByteVector { - /// Sets all bytes of the receiver to the given value. - fn set_memory(&mut self, value: u8); +#[stable(feature = "rust1", since = "1.0.0")] +impl PartialEq<[B]> for [A] where A: PartialEq { + fn eq(&self, other: &[B]) -> bool { + SlicePartialEq::equal(self, other) } - impl MutableByteVector for [u8] { - #[inline] - fn set_memory(&mut self, value: u8) { - unsafe { ptr::write_bytes(self.as_mut_ptr(), value, self.len()) }; - } + fn ne(&self, other: &[B]) -> bool { + SlicePartialEq::not_equal(self, other) } +} - /// Copies data from `src` to `dst` - /// - /// Panics if the length of `dst` is less than the length of `src`. - #[inline] - pub fn copy_memory(src: &[u8], dst: &mut [u8]) { - let len_src = src.len(); - assert!(dst.len() >= len_src); - // `dst` is unaliasable, so we know statically it doesn't overlap - // with `src`. - unsafe { - ptr::copy_nonoverlapping(src.as_ptr(), - dst.as_mut_ptr(), - len_src); - } +#[stable(feature = "rust1", since = "1.0.0")] +impl Eq for [T] {} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Ord for [T] { + fn cmp(&self, other: &[T]) -> Ordering { + SliceOrd::compare(self, other) } } +#[stable(feature = "rust1", since = "1.0.0")] +impl PartialOrd for [T] { + fn partial_cmp(&self, other: &[T]) -> Option { + SlicePartialOrd::partial_compare(self, other) + } +} +#[doc(hidden)] +// intermediate trait for specialization of slice's PartialEq +trait SlicePartialEq { + fn equal(&self, other: &[B]) -> bool; -// -// Boilerplate traits -// + fn not_equal(&self, other: &[B]) -> bool { !self.equal(other) } +} -#[stable(feature = "rust1", since = "1.0.0")] -impl PartialEq<[B]> for [A] where A: PartialEq { - fn eq(&self, other: &[B]) -> bool { +// Generic slice equality +impl SlicePartialEq for [A] + where A: PartialEq +{ + default fn equal(&self, other: &[B]) -> bool { if self.len() != other.len() { return false; } @@ -1491,27 +2021,37 @@ impl PartialEq<[B]> for [A] where A: PartialEq { true } - fn ne(&self, other: &[B]) -> bool { +} + +// Use memcmp for bytewise equality when the types allow +impl SlicePartialEq for [A] + where A: PartialEq + BytewiseEquality +{ + fn equal(&self, other: &[A]) -> bool { if self.len() != other.len() { + return false; + } + if self.as_ptr() == other.as_ptr() { return true; } - - for i in 0..self.len() { - if self[i].ne(&other[i]) { - return true; - } + unsafe { + let size = mem::size_of_val(self); + memcmp(self.as_ptr() as *const u8, + other.as_ptr() as *const u8, size) == 0 } - - false } } -#[stable(feature = "rust1", since = "1.0.0")] -impl Eq for [T] {} +#[doc(hidden)] +// intermediate trait for specialization of slice's PartialOrd +trait SlicePartialOrd { + fn partial_compare(&self, other: &[B]) -> Option; +} -#[stable(feature = "rust1", since = "1.0.0")] -impl Ord for [T] { - fn cmp(&self, other: &[T]) -> Ordering { +impl SlicePartialOrd for [A] + where A: PartialOrd +{ + default fn partial_compare(&self, other: &[A]) -> Option { let l = cmp::min(self.len(), other.len()); // Slice to the loop iteration range to enable bound check @@ -1520,19 +2060,33 @@ impl Ord for [T] { let rhs = &other[..l]; for i in 0..l { - match lhs[i].cmp(&rhs[i]) { - Ordering::Equal => (), + match lhs[i].partial_cmp(&rhs[i]) { + Some(Ordering::Equal) => (), non_eq => return non_eq, } } - self.len().cmp(&other.len()) + self.len().partial_cmp(&other.len()) } } -#[stable(feature = "rust1", since = "1.0.0")] -impl PartialOrd for [T] { - fn partial_cmp(&self, other: &[T]) -> Option { +impl SlicePartialOrd for [u8] { + #[inline] + fn partial_compare(&self, other: &[u8]) -> Option { + Some(SliceOrd::compare(self, other)) + } +} + +#[doc(hidden)] +// intermediate trait for specialization of slice's Ord +trait SliceOrd { + fn compare(&self, other: &[B]) -> Ordering; +} + +impl SliceOrd for [A] + where A: Ord +{ + default fn compare(&self, other: &[A]) -> Ordering { let l = cmp::min(self.len(), other.len()); // Slice to the loop iteration range to enable bound check @@ -1541,12 +2095,63 @@ impl PartialOrd for [T] { let rhs = &other[..l]; for i in 0..l { - match lhs[i].partial_cmp(&rhs[i]) { - Some(Ordering::Equal) => (), + match lhs[i].cmp(&rhs[i]) { + Ordering::Equal => (), non_eq => return non_eq, } } - self.len().partial_cmp(&other.len()) + self.len().cmp(&other.len()) + } +} + +// memcmp compares a sequence of unsigned bytes lexicographically. +// this matches the order we want for [u8], but no others (not even [i8]). +impl SliceOrd for [u8] { + #[inline] + fn compare(&self, other: &[u8]) -> Ordering { + let order = unsafe { + memcmp(self.as_ptr(), other.as_ptr(), + cmp::min(self.len(), other.len())) + }; + if order == 0 { + self.len().cmp(&other.len()) + } else if order < 0 { + Less + } else { + Greater + } + } +} + +#[doc(hidden)] +/// Trait implemented for types that can be compared for equality using +/// their bytewise representation +trait BytewiseEquality { } + +macro_rules! impl_marker_for { + ($traitname:ident, $($ty:ty)*) => { + $( + impl $traitname for $ty { } + )* + } +} + +impl_marker_for!(BytewiseEquality, + u8 i8 u16 i16 u32 i32 u64 i64 usize isize char bool); + +#[doc(hidden)] +unsafe impl<'a, T> TrustedRandomAccess for Iter<'a, T> { + unsafe fn get_unchecked(&mut self, i: usize) -> &'a T { + &*self.ptr.offset(i as isize) + } + fn may_have_side_effect() -> bool { false } +} + +#[doc(hidden)] +unsafe impl<'a, T> TrustedRandomAccess for IterMut<'a, T> { + unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut T { + &mut *self.ptr.offset(i as isize) } + fn may_have_side_effect() -> bool { false } } diff --git a/src/libcore/str/mod.rs b/src/libcore/str/mod.rs index dd111981f0e57..b4cd52e59f658 100644 --- a/src/libcore/str/mod.rs +++ b/src/libcore/str/mod.rs @@ -17,21 +17,11 @@ use self::pattern::Pattern; use self::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher}; -use char::{self, CharExt}; -use clone::Clone; -use cmp::Eq; -use convert::AsRef; -use default::Default; +use char; use fmt; -use iter::ExactSizeIterator; -use iter::{Map, Cloned, Iterator, DoubleEndedIterator}; -use marker::Sized; +use iter::{Map, Cloned, FusedIterator}; use mem; -use ops::{Fn, FnMut, FnOnce}; -use option::Option::{self, None, Some}; -use raw::{Repr, Slice}; -use result::Result::{self, Ok, Err}; -use slice::{self, SliceExt}; +use slice; pub mod pattern; @@ -42,8 +32,8 @@ pub mod pattern; /// [`str`]'s [`parse()`] method. See [`parse()`]'s documentation for examples. /// /// [`from_str()`]: #tymethod.from_str -/// [`str`]: ../primitive.str.html -/// [`parse()`]: ../primitive.str.html#method.parse +/// [`str`]: ../../std/primitive.str.html +/// [`parse()`]: ../../std/primitive.str.html#method.parse #[stable(feature = "rust1", since = "1.0.0")] pub trait FromStr: Sized { /// The associated error which can be returned from parsing. @@ -60,7 +50,7 @@ pub trait FromStr: Sized { /// /// Basic usage with [`i32`][ithirtytwo], a type that implements `FromStr`: /// - /// [ithirtytwo]: ../primitive.i32.html + /// [ithirtytwo]: ../../std/primitive.i32.html /// /// ``` /// use std::str::FromStr; @@ -111,7 +101,7 @@ impl FromStr for bool { } /// An error returned when parsing a `bool` from a string fails. -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Eq)] #[stable(feature = "rust1", since = "1.0.0")] pub struct ParseBoolError { _priv: () } @@ -142,7 +132,7 @@ impl Utf8Error { /// verified. /// /// It is the maximum index such that `from_utf8(input[..index])` - /// would return `Some(_)`. + /// would return `Ok(_)`. /// /// # Examples /// @@ -157,7 +147,7 @@ impl Utf8Error { /// // std::str::from_utf8 returns a Utf8Error /// let error = str::from_utf8(&sparkle_heart).unwrap_err(); /// - /// // the first byte is invalid here + /// // the second byte is invalid here /// assert_eq!(1, error.valid_up_to()); /// ``` #[stable(feature = "utf8_error", since = "1.5.0")] @@ -174,21 +164,21 @@ impl Utf8Error { /// /// If you are sure that the byte slice is valid UTF-8, and you don't want to /// incur the overhead of the validity check, there is an unsafe version of -/// this function, [`from_utf8_unchecked()`][fromutf8], which has the same +/// this function, [`from_utf8_unchecked()`][fromutf8u], which has the same /// behavior but skips the check. /// -/// [fromutf8]: fn.from_utf8.html +/// [fromutf8u]: fn.from_utf8_unchecked.html /// /// If you need a `String` instead of a `&str`, consider /// [`String::from_utf8()`][string]. /// -/// [string]: ../string/struct.String.html#method.from_utf8 +/// [string]: ../../std/string/struct.String.html#method.from_utf8 /// /// Because you can stack-allocate a `[u8; N]`, and you can take a `&[u8]` of /// it, this function is one way to have a stack-allocated string. There is /// an example of this in the examples section below. /// -/// # Failure +/// # Errors /// /// Returns `Err` if the slice is not UTF-8 with a description as to why the /// provided slice is not UTF-8. @@ -240,14 +230,42 @@ impl Utf8Error { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> { - try!(run_utf8_validation(v)); + run_utf8_validation(v)?; Ok(unsafe { from_utf8_unchecked(v) }) } +/// Forms a str from a pointer and a length. +/// +/// The `len` argument is the number of bytes in the string. +/// +/// # Safety +/// +/// This function is unsafe as there is no guarantee that the given pointer is +/// valid for `len` bytes, nor whether the lifetime inferred is a suitable +/// lifetime for the returned str. +/// +/// The data must be valid UTF-8 +/// +/// `p` must be non-null, even for zero-length str. +/// +/// # Caveat +/// +/// The lifetime for the returned str is inferred from its usage. To +/// prevent accidental misuse, it's suggested to tie the lifetime to whichever +/// source lifetime is safe in the context, such as by providing a helper +/// function taking the lifetime of a host value for the str, or by explicit +/// annotation. +/// Performs the same functionality as `from_raw_parts`, except that a mutable +/// str is returned. +/// +unsafe fn from_raw_parts_mut<'a>(p: *mut u8, len: usize) -> &'a mut str { + mem::transmute::<&mut [u8], &mut str>(slice::from_raw_parts_mut(p, len)) +} + /// Converts a slice of bytes to a string slice without checking /// that the string contains valid UTF-8. /// -/// See the safe version, [`from_utf8()`][fromutf8], for more. +/// See the safe version, [`from_utf8()`][fromutf8], for more information. /// /// [fromutf8]: fn.from_utf8.html /// @@ -294,8 +312,8 @@ Section: Iterators /// /// Created with the method [`chars()`]. /// -/// [`chars()`]: ../primitive.str.html#method.chars -#[derive(Clone)] +/// [`chars()`]: ../../std/primitive.str.html#method.chars +#[derive(Clone, Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub struct Chars<'a> { iter: slice::Iter<'a, u8> @@ -328,7 +346,7 @@ fn unwrap_or_0(opt: Option<&u8>) -> u8 { /// UTF-8-like encoding). #[unstable(feature = "str_internals", issue = "0")] #[inline] -pub fn next_code_point(bytes: &mut slice::Iter) -> Option { +pub fn next_code_point<'a, I: Iterator>(bytes: &mut I) -> Option { // Decode UTF-8 let x = match bytes.next() { None => return None, @@ -362,7 +380,9 @@ pub fn next_code_point(bytes: &mut slice::Iter) -> Option { /// Reads the last code point out of a byte iterator (assuming a /// UTF-8-like encoding). #[inline] -fn next_code_point_reverse(bytes: &mut slice::Iter) -> Option { +fn next_code_point_reverse<'a, I>(bytes: &mut I) -> Option + where I: DoubleEndedIterator, +{ // Decode UTF-8 let w = match bytes.next_back() { None => return None, @@ -404,14 +424,31 @@ impl<'a> Iterator for Chars<'a> { }) } + #[inline] + fn count(self) -> usize { + // length in `char` is equal to the number of non-continuation bytes + let bytes_len = self.iter.len(); + let mut cont_bytes = 0; + for &byte in self.iter { + cont_bytes += utf8_is_cont_byte(byte) as usize; + } + bytes_len - cont_bytes + } + #[inline] fn size_hint(&self) -> (usize, Option) { - let (len, _) = self.iter.size_hint(); + let len = self.iter.len(); // `(len + 3)` can't overflow, because we know that the `slice::Iter` // belongs to a slice in memory which has a maximum length of // `isize::MAX` (that's well below `usize::MAX`). ((len + 3) / 4, Some(len)) } + + #[inline] + fn last(mut self) -> Option { + // No need to go through the entire string. + self.next_back() + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -427,11 +464,27 @@ impl<'a> DoubleEndedIterator for Chars<'a> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a> FusedIterator for Chars<'a> {} + impl<'a> Chars<'a> { /// View the underlying data as a subslice of the original data. /// /// This has the same lifetime as the original slice, and so the /// iterator can continue to be used while this exists. + /// + /// # Examples + /// + /// ``` + /// let mut chars = "abc".chars(); + /// + /// assert_eq!(chars.as_str(), "abc"); + /// chars.next(); + /// assert_eq!(chars.as_str(), "bc"); + /// chars.next(); + /// chars.next(); + /// assert_eq!(chars.as_str(), ""); + /// ``` #[stable(feature = "iter_to_slice", since = "1.4.0")] #[inline] pub fn as_str(&self) -> &'a str { @@ -440,7 +493,7 @@ impl<'a> Chars<'a> { } /// Iterator for a string's characters and their byte offsets. -#[derive(Clone)] +#[derive(Clone, Debug)] #[stable(feature = "rust1", since = "1.0.0")] pub struct CharIndices<'a> { front_offset: usize, @@ -453,22 +506,33 @@ impl<'a> Iterator for CharIndices<'a> { #[inline] fn next(&mut self) -> Option<(usize, char)> { - let (pre_len, _) = self.iter.iter.size_hint(); + let pre_len = self.iter.iter.len(); match self.iter.next() { None => None, Some(ch) => { let index = self.front_offset; - let (len, _) = self.iter.iter.size_hint(); + let len = self.iter.iter.len(); self.front_offset += pre_len - len; Some((index, ch)) } } } + #[inline] + fn count(self) -> usize { + self.iter.count() + } + #[inline] fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } + + #[inline] + fn last(mut self) -> Option<(usize, char)> { + // No need to go through the entire string. + self.next_back() + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -478,14 +542,16 @@ impl<'a> DoubleEndedIterator for CharIndices<'a> { match self.iter.next_back() { None => None, Some(ch) => { - let (len, _) = self.iter.iter.size_hint(); - let index = self.front_offset + len; + let index = self.front_offset + self.iter.iter.len(); Some((index, ch)) } } } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a> FusedIterator for CharIndices<'a> {} + impl<'a> CharIndices<'a> { /// View the underlying data as a subslice of the original data. /// @@ -503,9 +569,9 @@ impl<'a> CharIndices<'a> { /// /// Created with the method [`bytes()`]. /// -/// [`bytes()`]: ../primitive.str.html#method.bytes +/// [`bytes()`]: ../../std/primitive.str.html#method.bytes #[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Bytes<'a>(Cloned>); #[stable(feature = "rust1", since = "1.0.0")] @@ -554,6 +620,9 @@ impl<'a> ExactSizeIterator for Bytes<'a> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a> FusedIterator for Bytes<'a> {} + /// This macro generates a Clone impl for string pattern API /// wrapper types of the form X<'a, P> macro_rules! derive_pattern_clone { @@ -634,6 +703,17 @@ macro_rules! generate_pattern_iterators { $(#[$common_stability_attribute])* pub struct $forward_iterator<'a, P: Pattern<'a>>($internal_iterator<'a, P>); + $(#[$common_stability_attribute])* + impl<'a, P: Pattern<'a>> fmt::Debug for $forward_iterator<'a, P> + where P::Searcher: fmt::Debug + { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple(stringify!($forward_iterator)) + .field(&self.0) + .finish() + } + } + $(#[$common_stability_attribute])* impl<'a, P: Pattern<'a>> Iterator for $forward_iterator<'a, P> { type Item = $iterty; @@ -657,6 +737,17 @@ macro_rules! generate_pattern_iterators { $(#[$common_stability_attribute])* pub struct $reverse_iterator<'a, P: Pattern<'a>>($internal_iterator<'a, P>); + $(#[$common_stability_attribute])* + impl<'a, P: Pattern<'a>> fmt::Debug for $reverse_iterator<'a, P> + where P::Searcher: fmt::Debug + { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple(stringify!($reverse_iterator)) + .field(&self.0) + .finish() + } + } + $(#[$common_stability_attribute])* impl<'a, P: Pattern<'a>> Iterator for $reverse_iterator<'a, P> where P::Searcher: ReverseSearcher<'a> @@ -678,6 +769,13 @@ macro_rules! generate_pattern_iterators { } } + #[unstable(feature = "fused", issue = "35602")] + impl<'a, P: Pattern<'a>> FusedIterator for $forward_iterator<'a, P> {} + + #[unstable(feature = "fused", issue = "35602")] + impl<'a, P: Pattern<'a>> FusedIterator for $reverse_iterator<'a, P> + where P::Searcher: ReverseSearcher<'a> {} + generate_pattern_iterators!($($t)* with $(#[$common_stability_attribute])*, $forward_iterator, $reverse_iterator, $iterty); @@ -718,6 +816,7 @@ derive_pattern_clone!{ clone SplitInternal with |s| SplitInternal { matcher: s.matcher.clone(), ..*s } } + struct SplitInternal<'a, P: Pattern<'a>> { start: usize, end: usize, @@ -726,6 +825,18 @@ struct SplitInternal<'a, P: Pattern<'a>> { finished: bool, } +impl<'a, P: Pattern<'a>> fmt::Debug for SplitInternal<'a, P> where P::Searcher: fmt::Debug { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SplitInternal") + .field("start", &self.start) + .field("end", &self.end) + .field("matcher", &self.matcher) + .field("allow_trailing_empty", &self.allow_trailing_empty) + .field("finished", &self.finished) + .finish() + } +} + impl<'a, P: Pattern<'a>> SplitInternal<'a, P> { #[inline] fn get_end(&mut self) -> Option<&'a str> { @@ -788,12 +899,12 @@ generate_pattern_iterators! { forward: /// Created with the method [`split()`]. /// - /// [`split()`]: ../primitive.str.html#method.split + /// [`split()`]: ../../std/primitive.str.html#method.split struct Split; reverse: /// Created with the method [`rsplit()`]. /// - /// [`rsplit()`]: ../primitive.str.html#method.rsplit + /// [`rsplit()`]: ../../std/primitive.str.html#method.rsplit struct RSplit; stability: #[stable(feature = "rust1", since = "1.0.0")] @@ -806,12 +917,12 @@ generate_pattern_iterators! { forward: /// Created with the method [`split_terminator()`]. /// - /// [`split_terminator()`]: ../primitive.str.html#method.split_terminator + /// [`split_terminator()`]: ../../std/primitive.str.html#method.split_terminator struct SplitTerminator; reverse: /// Created with the method [`rsplit_terminator()`]. /// - /// [`rsplit_terminator()`]: ../primitive.str.html#method.rsplit_terminator + /// [`rsplit_terminator()`]: ../../std/primitive.str.html#method.rsplit_terminator struct RSplitTerminator; stability: #[stable(feature = "rust1", since = "1.0.0")] @@ -824,12 +935,22 @@ derive_pattern_clone!{ clone SplitNInternal with |s| SplitNInternal { iter: s.iter.clone(), ..*s } } + struct SplitNInternal<'a, P: Pattern<'a>> { iter: SplitInternal<'a, P>, /// The number of splits remaining count: usize, } +impl<'a, P: Pattern<'a>> fmt::Debug for SplitNInternal<'a, P> where P::Searcher: fmt::Debug { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SplitNInternal") + .field("iter", &self.iter) + .field("count", &self.count) + .finish() + } +} + impl<'a, P: Pattern<'a>> SplitNInternal<'a, P> { #[inline] fn next(&mut self) -> Option<&'a str> { @@ -856,12 +977,12 @@ generate_pattern_iterators! { forward: /// Created with the method [`splitn()`]. /// - /// [`splitn()`]: ../primitive.str.html#method.splitn + /// [`splitn()`]: ../../std/primitive.str.html#method.splitn struct SplitN; reverse: /// Created with the method [`rsplitn()`]. /// - /// [`rsplitn()`]: ../primitive.str.html#method.rsplitn + /// [`rsplitn()`]: ../../std/primitive.str.html#method.rsplitn struct RSplitN; stability: #[stable(feature = "rust1", since = "1.0.0")] @@ -874,8 +995,17 @@ derive_pattern_clone!{ clone MatchIndicesInternal with |s| MatchIndicesInternal(s.0.clone()) } + struct MatchIndicesInternal<'a, P: Pattern<'a>>(P::Searcher); +impl<'a, P: Pattern<'a>> fmt::Debug for MatchIndicesInternal<'a, P> where P::Searcher: fmt::Debug { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("MatchIndicesInternal") + .field(&self.0) + .finish() + } +} + impl<'a, P: Pattern<'a>> MatchIndicesInternal<'a, P> { #[inline] fn next(&mut self) -> Option<(usize, &'a str)> { @@ -898,12 +1028,12 @@ generate_pattern_iterators! { forward: /// Created with the method [`match_indices()`]. /// - /// [`match_indices()`]: ../primitive.str.html#method.match_indices + /// [`match_indices()`]: ../../std/primitive.str.html#method.match_indices struct MatchIndices; reverse: /// Created with the method [`rmatch_indices()`]. /// - /// [`rmatch_indices()`]: ../primitive.str.html#method.rmatch_indices + /// [`rmatch_indices()`]: ../../std/primitive.str.html#method.rmatch_indices struct RMatchIndices; stability: #[stable(feature = "str_match_indices", since = "1.5.0")] @@ -916,8 +1046,17 @@ derive_pattern_clone!{ clone MatchesInternal with |s| MatchesInternal(s.0.clone()) } + struct MatchesInternal<'a, P: Pattern<'a>>(P::Searcher); +impl<'a, P: Pattern<'a>> fmt::Debug for MatchesInternal<'a, P> where P::Searcher: fmt::Debug { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("MatchesInternal") + .field(&self.0) + .finish() + } +} + impl<'a, P: Pattern<'a>> MatchesInternal<'a, P> { #[inline] fn next(&mut self) -> Option<&'a str> { @@ -942,12 +1081,12 @@ generate_pattern_iterators! { forward: /// Created with the method [`matches()`]. /// - /// [`matches()`]: ../primitive.str.html#method.matches + /// [`matches()`]: ../../std/primitive.str.html#method.matches struct Matches; reverse: /// Created with the method [`rmatches()`]. /// - /// [`rmatches()`]: ../primitive.str.html#method.rmatches + /// [`rmatches()`]: ../../std/primitive.str.html#method.rmatches struct RMatches; stability: #[stable(feature = "str_matches", since = "1.2.0")] @@ -958,9 +1097,9 @@ generate_pattern_iterators! { /// Created with the method [`lines()`]. /// -/// [`lines()`]: ../primitive.str.html#method.lines +/// [`lines()`]: ../../std/primitive.str.html#method.lines #[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Lines<'a>(Map, LinesAnyMap>); #[stable(feature = "rust1", since = "1.0.0")] @@ -986,12 +1125,15 @@ impl<'a> DoubleEndedIterator for Lines<'a> { } } +#[unstable(feature = "fused", issue = "35602")] +impl<'a> FusedIterator for Lines<'a> {} + /// Created with the method [`lines_any()`]. /// -/// [`lines_any()`]: ../primitive.str.html#method.lines_any +/// [`lines_any()`]: ../../std/primitive.str.html#method.lines_any #[stable(feature = "rust1", since = "1.0.0")] #[rustc_deprecated(since = "1.4.0", reason = "use lines()/Lines instead now")] -#[derive(Clone)] +#[derive(Clone, Debug)] #[allow(deprecated)] pub struct LinesAny<'a>(Lines<'a>); @@ -1049,6 +1191,10 @@ impl<'a> DoubleEndedIterator for LinesAny<'a> { } } +#[unstable(feature = "fused", issue = "35602")] +#[allow(deprecated)] +impl<'a> FusedIterator for LinesAny<'a> {} + /* Section: Comparing strings */ @@ -1059,18 +1205,7 @@ Section: Comparing strings #[lang = "str_eq"] #[inline] fn eq_slice(a: &str, b: &str) -> bool { - a.len() == b.len() && unsafe { cmp_slice(a, b, a.len()) == 0 } -} - -/// Bytewise slice comparison. -/// NOTE: This uses the system's memcmp, which is currently dramatically -/// faster than comparing each byte in a loop. -#[inline] -unsafe fn cmp_slice(a: &str, b: &str, len: usize) -> i32 { - // NOTE: In theory n should be libc::size_t and not usize, but libc is not available here - #[allow(improper_ctypes)] - extern { fn memcmp(s1: *const i8, s2: *const i8, n: usize) -> i32; } - memcmp(a.as_ptr() as *const i8, b.as_ptr() as *const i8, len) + a.as_bytes() == b.as_bytes() } /* @@ -1213,22 +1348,6 @@ static UTF8_CHAR_WIDTH: [u8; 256] = [ 4,4,4,4,4,0,0,0,0,0,0,0,0,0,0,0, // 0xFF ]; -/// Struct that contains a `char` and the index of the first byte of -/// the next `char` in a string. This can be used as a data structure -/// for iterating over the UTF-8 bytes of a string. -#[derive(Copy, Clone)] -#[unstable(feature = "str_char", - reason = "existence of this struct is uncertain as it is frequently \ - able to be replaced with char.len_utf8() and/or \ - char/char_indices iterators", - issue = "27754")] -pub struct CharRange { - /// Current `char` - pub ch: char, - /// Index of the first byte of the next `char` - pub next: usize, -} - /// Mask of the value bits of a continuation byte const CONT_MASK: u8 = 0b0011_1111; /// Value of the tag bits (tag mask is !CONT_MASK) of a continuation byte @@ -1239,28 +1358,15 @@ Section: Trait implementations */ mod traits { - use cmp::{self, Ordering, Ord, PartialEq, PartialOrd, Eq}; - use cmp::Ordering::{Less, Greater}; - use iter::Iterator; - use option::Option; - use option::Option::Some; + use cmp::Ordering; use ops; - use str::{StrExt, eq_slice}; + use str::eq_slice; #[stable(feature = "rust1", since = "1.0.0")] impl Ord for str { #[inline] fn cmp(&self, other: &str) -> Ordering { - let cmp = unsafe { - super::cmp_slice(self, other, cmp::min(self.len(), other.len())) - }; - if cmp == 0 { - self.len().cmp(&other.len()) - } else if cmp < 0 { - Less - } else { - Greater - } + self.as_bytes().cmp(other.as_bytes()) } } @@ -1285,13 +1391,19 @@ mod traits { } } + /// Implements substring slicing with syntax `&self[begin .. end]`. + /// /// Returns a slice of the given string from the byte range /// [`begin`..`end`). /// /// This operation is `O(1)`. /// - /// Panics when `begin` and `end` do not point to valid characters - /// or point beyond the last character of the string. + /// # Panics + /// + /// Panics if `begin` or `end` does not point to the starting + /// byte offset of a character (as defined by `is_char_boundary`). + /// Requires that `begin <= end` and `end <= len` where `len` is the + /// length of the string. /// /// # Examples /// @@ -1327,8 +1439,20 @@ mod traits { } } + /// Implements mutable substring slicing with syntax + /// `&mut self[begin .. end]`. + /// /// Returns a mutable slice of the given string from the byte range /// [`begin`..`end`). + /// + /// This operation is `O(1)`. + /// + /// # Panics + /// + /// Panics if `begin` or `end` does not point to the starting + /// byte offset of a character (as defined by `is_char_boundary`). + /// Requires that `begin <= end` and `end <= len` where `len` is the + /// length of the string. #[stable(feature = "derefmut_for_string", since = "1.2.0")] impl ops::IndexMut> for str { #[inline] @@ -1344,13 +1468,12 @@ mod traits { } } - /// Returns a slice of the string from the beginning to byte - /// `end`. + /// Implements substring slicing with syntax `&self[.. end]`. /// - /// Equivalent to `self[0 .. end]`. + /// Returns a slice of the string from the beginning to byte offset + /// `end`. /// - /// Panics when `end` does not point to a valid character, or is - /// out of bounds. + /// Equivalent to `&self[0 .. end]`. #[stable(feature = "rust1", since = "1.0.0")] impl ops::Index> for str { type Output = str; @@ -1366,8 +1489,12 @@ mod traits { } } - /// Returns a mutable slice of the string from the beginning to byte + /// Implements mutable substring slicing with syntax `&mut self[.. end]`. + /// + /// Returns a mutable slice of the string from the beginning to byte offset /// `end`. + /// + /// Equivalent to `&mut self[0 .. end]`. #[stable(feature = "derefmut_for_string", since = "1.2.0")] impl ops::IndexMut> for str { #[inline] @@ -1381,12 +1508,12 @@ mod traits { } } - /// Returns a slice of the string from `begin` to its end. + /// Implements substring slicing with syntax `&self[begin ..]`. /// - /// Equivalent to `self[begin .. self.len()]`. + /// Returns a slice of the string from byte offset `begin` + /// to the end of the string. /// - /// Panics when `begin` does not point to a valid character, or is - /// out of bounds. + /// Equivalent to `&self[begin .. len]`. #[stable(feature = "rust1", since = "1.0.0")] impl ops::Index> for str { type Output = str; @@ -1402,7 +1529,12 @@ mod traits { } } - /// Returns a slice of the string from `begin` to its end. + /// Implements mutable substring slicing with syntax `&mut self[begin ..]`. + /// + /// Returns a mutable slice of the string from byte offset `begin` + /// to the end of the string. + /// + /// Equivalent to `&mut self[begin .. len]`. #[stable(feature = "derefmut_for_string", since = "1.2.0")] impl ops::IndexMut> for str { #[inline] @@ -1417,6 +1549,12 @@ mod traits { } } + /// Implements substring slicing with syntax `&self[..]`. + /// + /// Returns a slice of the whole string. This operation can + /// never panic. + /// + /// Equivalent to `&self[0 .. len]`. #[stable(feature = "rust1", since = "1.0.0")] impl ops::Index for str { type Output = str; @@ -1427,6 +1565,12 @@ mod traits { } } + /// Implements mutable substring slicing with syntax `&mut self[..]`. + /// + /// Returns a mutable slice of the whole string. This operation can + /// never panic. + /// + /// Equivalent to `&mut self[0 .. len]`. #[stable(feature = "derefmut_for_string", since = "1.2.0")] impl ops::IndexMut for str { #[inline] @@ -1434,6 +1578,60 @@ mod traits { self } } + + #[unstable(feature = "inclusive_range", + reason = "recently added, follows RFC", + issue = "28237")] + impl ops::Index> for str { + type Output = str; + + #[inline] + fn index(&self, index: ops::RangeInclusive) -> &str { + match index { + ops::RangeInclusive::Empty { .. } => "", + ops::RangeInclusive::NonEmpty { end, .. } if end == usize::max_value() => + panic!("attempted to index slice up to maximum usize"), + ops::RangeInclusive::NonEmpty { start, end } => + self.index(start .. end+1) + } + } + } + #[unstable(feature = "inclusive_range", + reason = "recently added, follows RFC", + issue = "28237")] + impl ops::Index> for str { + type Output = str; + + #[inline] + fn index(&self, index: ops::RangeToInclusive) -> &str { + self.index(0...index.end) + } + } + + #[unstable(feature = "inclusive_range", + reason = "recently added, follows RFC", + issue = "28237")] + impl ops::IndexMut> for str { + #[inline] + fn index_mut(&mut self, index: ops::RangeInclusive) -> &mut str { + match index { + ops::RangeInclusive::Empty { .. } => &mut self[0..0], // `&mut ""` doesn't work + ops::RangeInclusive::NonEmpty { end, .. } if end == usize::max_value() => + panic!("attempted to index str up to maximum usize"), + ops::RangeInclusive::NonEmpty { start, end } => + self.index_mut(start .. end+1) + } + } + } + #[unstable(feature = "inclusive_range", + reason = "recently added, follows RFC", + issue = "28237")] + impl ops::IndexMut> for str { + #[inline] + fn index_mut(&mut self, index: ops::RangeToInclusive) -> &mut str { + self.index_mut(0...index.end) + } + } } /// Methods for string slices @@ -1441,7 +1639,7 @@ mod traits { #[doc(hidden)] #[unstable(feature = "core_str_ext", reason = "stable interface provided by `impl str` in later crates", - issue = "27701")] + issue = "32110")] pub trait StrExt { // NB there are no docs here are they're all located on the StrExt trait in // libcollections, not here. @@ -1502,39 +1700,8 @@ pub trait StrExt { #[stable(feature = "core", since = "1.6.0")] fn trim_right_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str where P::Searcher: ReverseSearcher<'a>; - #[unstable(feature = "str_char", - reason = "it is unclear whether this method pulls its weight \ - with the existence of the char_indices iterator or \ - this method may want to be replaced with checked \ - slicing", - issue = "27754")] + #[stable(feature = "is_char_boundary", since = "1.9.0")] fn is_char_boundary(&self, index: usize) -> bool; - #[unstable(feature = "str_char", - reason = "often replaced by char_indices, this method may \ - be removed in favor of just char_at() or eventually \ - removed altogether", - issue = "27754")] - fn char_range_at(&self, start: usize) -> CharRange; - #[unstable(feature = "str_char", - reason = "often replaced by char_indices, this method may \ - be removed in favor of just char_at_reverse() or \ - eventually removed altogether", - issue = "27754")] - fn char_range_at_reverse(&self, start: usize) -> CharRange; - #[unstable(feature = "str_char", - reason = "frequently replaced by the chars() iterator, this \ - method may be removed or possibly renamed in the \ - future; it is normally replaced by chars/char_indices \ - iterators or by getting the first char from a \ - subslice", - issue = "27754")] - fn char_at(&self, i: usize) -> char; - #[unstable(feature = "str_char", - reason = "see char_at for more details, but reverse semantics \ - are also somewhat unclear, especially with which \ - cases generate panics", - issue = "27754")] - fn char_at_reverse(&self, i: usize) -> char; #[stable(feature = "core", since = "1.6.0")] fn as_bytes(&self) -> &[u8]; #[stable(feature = "core", since = "1.6.0")] @@ -1547,12 +1714,6 @@ pub trait StrExt { fn split_at(&self, mid: usize) -> (&str, &str); #[stable(feature = "core", since = "1.6.0")] fn split_at_mut(&mut self, mid: usize) -> (&mut str, &mut str); - #[unstable(feature = "str_char", - reason = "awaiting conventions about shifting and slices and \ - may not be warranted with the existence of the chars \ - and/or char_indices iterators", - issue = "27754")] - fn slice_shift_char(&self) -> Option<(char, &str)>; #[stable(feature = "core", since = "1.6.0")] fn as_ptr(&self) -> *const u8; #[stable(feature = "core", since = "1.6.0")] @@ -1563,12 +1724,30 @@ pub trait StrExt { fn parse(&self) -> Result; } +// truncate `&str` to length at most equal to `max` +// return `true` if it were truncated, and the new str. +fn truncate_to_char_boundary(s: &str, mut max: usize) -> (bool, &str) { + if max >= s.len() { + (false, s) + } else { + while !s.is_char_boundary(max) { + max -= 1; + } + (true, &s[..max]) + } +} + #[inline(never)] #[cold] fn slice_error_fail(s: &str, begin: usize, end: usize) -> ! { - assert!(begin <= end); - panic!("index {} and/or {} in `{}` do not lie on character boundary", - begin, end, s); + const MAX_DISPLAY_LENGTH: usize = 256; + let (truncated, s) = truncate_to_char_boundary(s, MAX_DISPLAY_LENGTH); + let ellipsis = if truncated { "[...]" } else { "" }; + + assert!(begin <= end, "begin <= end ({} <= {}) when slicing `{}`{}", + begin, end, s, ellipsis); + panic!("index {} and/or {} in `{}`{} do not lie on character boundary", + begin, end, s, ellipsis); } #[stable(feature = "core", since = "1.6.0")] @@ -1677,18 +1856,16 @@ impl StrExt for str { #[inline] unsafe fn slice_unchecked(&self, begin: usize, end: usize) -> &str { - mem::transmute(Slice { - data: self.as_ptr().offset(begin as isize), - len: end - begin, - }) + let ptr = self.as_ptr().offset(begin as isize); + let len = end - begin; + from_utf8_unchecked(slice::from_raw_parts(ptr, len)) } #[inline] unsafe fn slice_mut_unchecked(&mut self, begin: usize, end: usize) -> &mut str { - mem::transmute(Slice { - data: self.as_ptr().offset(begin as isize), - len: end - begin, - }) + let ptr = self.as_ptr().offset(begin as isize); + let len = end - begin; + mem::transmute(slice::from_raw_parts_mut(ptr as *mut u8, len)) } #[inline] @@ -1712,7 +1889,7 @@ impl StrExt for str { let mut matcher = pat.into_searcher(self); if let Some((a, b)) = matcher.next_reject() { i = a; - j = b; // Rember earliest known match, correct it below if + j = b; // Remember earliest known match, correct it below if // last match is different } if let Some((_, b)) = matcher.next_reject_back() { @@ -1754,60 +1931,17 @@ impl StrExt for str { #[inline] fn is_char_boundary(&self, index: usize) -> bool { - if index == self.len() { return true; } + // 0 and len are always ok. + // Test for 0 explicitly so that it can optimize out the check + // easily and skip reading string data for that case. + if index == 0 || index == self.len() { return true; } match self.as_bytes().get(index) { None => false, - Some(&b) => b < 128 || b >= 192, + // This is bit magic equivalent to: b < 128 || b >= 192 + Some(&b) => (b as i8) >= -0x40, } } - #[inline] - fn char_range_at(&self, i: usize) -> CharRange { - let (c, n) = char_range_at_raw(self.as_bytes(), i); - CharRange { ch: unsafe { char::from_u32_unchecked(c) }, next: n } - } - - #[inline] - fn char_range_at_reverse(&self, start: usize) -> CharRange { - let mut prev = start; - - prev = prev.saturating_sub(1); - if self.as_bytes()[prev] < 128 { - return CharRange{ch: self.as_bytes()[prev] as char, next: prev} - } - - // Multibyte case is a fn to allow char_range_at_reverse to inline cleanly - fn multibyte_char_range_at_reverse(s: &str, mut i: usize) -> CharRange { - // while there is a previous byte == 10...... - while i > 0 && s.as_bytes()[i] & !CONT_MASK == TAG_CONT_U8 { - i -= 1; - } - - let first= s.as_bytes()[i]; - let w = UTF8_CHAR_WIDTH[first as usize]; - assert!(w != 0); - - let mut val = utf8_first_byte(first, w as u32); - val = utf8_acc_cont_byte(val, s.as_bytes()[i + 1]); - if w > 2 { val = utf8_acc_cont_byte(val, s.as_bytes()[i + 2]); } - if w > 3 { val = utf8_acc_cont_byte(val, s.as_bytes()[i + 3]); } - - CharRange {ch: unsafe { char::from_u32_unchecked(val) }, next: i} - } - - multibyte_char_range_at_reverse(self, prev) - } - - #[inline] - fn char_at(&self, i: usize) -> char { - self.char_range_at(i).ch - } - - #[inline] - fn char_at_reverse(&self, i: usize) -> char { - self.char_range_at_reverse(i).ch - } - #[inline] fn as_bytes(&self) -> &[u8] { unsafe { mem::transmute(self) } @@ -1827,6 +1961,7 @@ impl StrExt for str { self.find(pat) } + #[inline] fn split_at(&self, mid: usize) -> (&str, &str) { // is_char_boundary checks that the index is in [0, .len()] if self.is_char_boundary(mid) { @@ -1843,34 +1978,25 @@ impl StrExt for str { // is_char_boundary checks that the index is in [0, .len()] if self.is_char_boundary(mid) { let len = self.len(); + let ptr = self.as_ptr() as *mut u8; unsafe { - let self2: &mut str = mem::transmute_copy(&self); - (self.slice_mut_unchecked(0, mid), - self2.slice_mut_unchecked(mid, len)) + (from_raw_parts_mut(ptr, mid), + from_raw_parts_mut(ptr.offset(mid as isize), len - mid)) } } else { slice_error_fail(self, 0, mid) } } - #[inline] - fn slice_shift_char(&self) -> Option<(char, &str)> { - if self.is_empty() { - None - } else { - let ch = self.char_at(0); - let next_s = unsafe { self.slice_unchecked(ch.len_utf8(), self.len()) }; - Some((ch, next_s)) - } - } - #[inline] fn as_ptr(&self) -> *const u8 { - self.repr().data + self as *const str as *const u8 } #[inline] - fn len(&self) -> usize { self.repr().len } + fn len(&self) -> usize { + self.as_bytes().len() + } #[inline] fn is_empty(&self) -> bool { self.len() == 0 } @@ -1887,32 +2013,8 @@ impl AsRef<[u8]> for str { } } -/// Pluck a code point out of a UTF-8-like byte slice and return the -/// index of the next code point. -#[inline] -fn char_range_at_raw(bytes: &[u8], i: usize) -> (u32, usize) { - if bytes[i] < 128 { - return (bytes[i] as u32, i + 1); - } - - // Multibyte case is a fn to allow char_range_at to inline cleanly - fn multibyte_char_range_at(bytes: &[u8], i: usize) -> (u32, usize) { - let first = bytes[i]; - let w = UTF8_CHAR_WIDTH[first as usize]; - assert!(w != 0); - - let mut val = utf8_first_byte(first, w as u32); - val = utf8_acc_cont_byte(val, bytes[i + 1]); - if w > 2 { val = utf8_acc_cont_byte(val, bytes[i + 2]); } - if w > 3 { val = utf8_acc_cont_byte(val, bytes[i + 3]); } - - (val, i + w as usize) - } - - multibyte_char_range_at(bytes, i) -} - #[stable(feature = "rust1", since = "1.0.0")] impl<'a> Default for &'a str { + /// Creates an empty str fn default() -> &'a str { "" } } diff --git a/src/libcore/str/pattern.rs b/src/libcore/str/pattern.rs index 29130100e996f..7dced2ba7514c 100644 --- a/src/libcore/str/pattern.rs +++ b/src/libcore/str/pattern.rs @@ -17,9 +17,8 @@ reason = "API not fully fleshed out and ready to be stabilized", issue = "27721")] -use prelude::v1::*; - use cmp; +use fmt; use usize; // Pattern @@ -275,7 +274,7 @@ impl<'a> CharEq for &'a [char] { struct CharEqPattern(C); -#[derive(Clone)] +#[derive(Clone, Debug)] struct CharEqSearcher<'a, C: CharEq> { char_eq: C, haystack: &'a str, @@ -309,9 +308,9 @@ unsafe impl<'a, C: CharEq> Searcher<'a> for CharEqSearcher<'a, C> { let s = &mut self.char_indices; // Compare lengths of the internal byte slice iterator // to find length of current char - let (pre_len, _) = s.iter.iter.size_hint(); + let pre_len = s.iter.iter.len(); if let Some((i, c)) = s.next() { - let (len, _) = s.iter.iter.size_hint(); + let len = s.iter.iter.len(); let char_len = pre_len - len; if self.char_eq.matches(c) { return SearchStep::Match(i, i + char_len); @@ -329,9 +328,9 @@ unsafe impl<'a, C: CharEq> ReverseSearcher<'a> for CharEqSearcher<'a, C> { let s = &mut self.char_indices; // Compare lengths of the internal byte slice iterator // to find length of current char - let (pre_len, _) = s.iter.iter.size_hint(); + let pre_len = s.iter.iter.len(); if let Some((i, c)) = s.next_back() { - let (len, _) = s.iter.iter.size_hint(); + let len = s.iter.iter.len(); let char_len = pre_len - len; if self.char_eq.matches(c) { return SearchStep::Match(i, i + char_len); @@ -415,7 +414,7 @@ macro_rules! searcher_methods { ///////////////////////////////////////////////////////////////////////////// /// Associated type for `>::Searcher`. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct CharSearcher<'a>( as Pattern<'a>>::Searcher); unsafe impl<'a> Searcher<'a> for CharSearcher<'a> { @@ -440,7 +439,7 @@ impl<'a> Pattern<'a> for char { // Todo: Change / Remove due to ambiguity in meaning. /// Associated type for `<&[char] as Pattern<'a>>::Searcher`. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct CharSliceSearcher<'a, 'b>( as Pattern<'a>>::Searcher); unsafe impl<'a, 'b> Searcher<'a> for CharSliceSearcher<'a, 'b> { @@ -467,6 +466,17 @@ impl<'a, 'b> Pattern<'a> for &'b [char] { pub struct CharPredicateSearcher<'a, F>( as Pattern<'a>>::Searcher) where F: FnMut(char) -> bool; +impl<'a, F> fmt::Debug for CharPredicateSearcher<'a, F> + where F: FnMut(char) -> bool +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("CharPredicateSearcher") + .field("haystack", &self.0.haystack) + .field("char_indices", &self.0.char_indices) + .field("ascii_only", &self.0.ascii_only) + .finish() + } +} unsafe impl<'a, F> Searcher<'a> for CharPredicateSearcher<'a, F> where F: FnMut(char) -> bool { @@ -492,7 +502,7 @@ impl<'a, F> Pattern<'a> for F where F: FnMut(char) -> bool { ///////////////////////////////////////////////////////////////////////////// /// Delegates to the `&str` impl. -impl<'a, 'b> Pattern<'a> for &'b &'b str { +impl<'a, 'b, 'c> Pattern<'a> for &'c &'b str { pattern_methods!(StrSearcher<'a, 'b>, |&s| s, |s| s); } diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs index 21b76c1f4bec1..c10f7e39fc39d 100644 --- a/src/libcore/sync/atomic.rs +++ b/src/libcore/sync/atomic.rs @@ -26,12 +26,13 @@ //! [1]: http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations //! //! Atomic variables are safe to share between threads (they implement `Sync`) -//! but they do not themselves provide the mechanism for sharing. The most -//! common way to share an atomic variable is to put it into an `Arc` (an +//! but they do not themselves provide the mechanism for sharing and follow the +//! [threading model](../../../std/thread/index.html#the-threading-model) of rust. +//! The most common way to share an atomic variable is to put it into an `Arc` (an //! atomically-reference-counted shared pointer). //! //! Most atomic types may be stored in static variables, initialized using -//! the provided static initializers like `INIT_ATOMIC_BOOL`. Atomic statics +//! the provided static initializers like `ATOMIC_BOOL_INIT`. Atomic statics //! are often used for lazy global initialization. //! //! @@ -48,12 +49,16 @@ //! let spinlock = Arc::new(AtomicUsize::new(1)); //! //! let spinlock_clone = spinlock.clone(); -//! thread::spawn(move|| { +//! let thread = thread::spawn(move|| { //! spinlock_clone.store(0, Ordering::SeqCst); //! }); //! //! // Wait for the other thread to release the lock //! while spinlock.load(Ordering::SeqCst) != 0 {} +//! +//! if let Err(panic) = thread.join() { +//! println!("Thread had an error: {:?}", panic); +//! } //! } //! ``` //! @@ -69,83 +74,60 @@ //! ``` #![stable(feature = "rust1", since = "1.0.0")] +#![cfg_attr(not(target_has_atomic = "8"), allow(dead_code))] +#![cfg_attr(not(target_has_atomic = "8"), allow(unused_imports))] use self::Ordering::*; -use marker::{Send, Sync}; - use intrinsics; use cell::UnsafeCell; - -use default::Default; use fmt; /// A boolean type which can be safely shared between threads. +/// +/// This type has the same in-memory representation as a `bool`. +#[cfg(target_has_atomic = "8")] #[stable(feature = "rust1", since = "1.0.0")] pub struct AtomicBool { - v: UnsafeCell, + v: UnsafeCell, } +#[cfg(target_has_atomic = "8")] #[stable(feature = "rust1", since = "1.0.0")] impl Default for AtomicBool { + /// Creates an `AtomicBool` initialized to `false`. fn default() -> Self { - Self::new(Default::default()) + Self::new(false) } } // Send is implicitly implemented for AtomicBool. +#[cfg(target_has_atomic = "8")] #[stable(feature = "rust1", since = "1.0.0")] unsafe impl Sync for AtomicBool {} -/// A signed integer type which can be safely shared between threads. -#[stable(feature = "rust1", since = "1.0.0")] -pub struct AtomicIsize { - v: UnsafeCell, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Default for AtomicIsize { - fn default() -> Self { - Self::new(Default::default()) - } -} - -// Send is implicitly implemented for AtomicIsize. -#[stable(feature = "rust1", since = "1.0.0")] -unsafe impl Sync for AtomicIsize {} - -/// An unsigned integer type which can be safely shared between threads. -#[stable(feature = "rust1", since = "1.0.0")] -pub struct AtomicUsize { - v: UnsafeCell, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Default for AtomicUsize { - fn default() -> Self { - Self::new(Default::default()) - } -} - -// Send is implicitly implemented for AtomicUsize. -#[stable(feature = "rust1", since = "1.0.0")] -unsafe impl Sync for AtomicUsize {} - /// A raw pointer type which can be safely shared between threads. +/// +/// This type has the same in-memory representation as a `*mut T`. +#[cfg(target_has_atomic = "ptr")] #[stable(feature = "rust1", since = "1.0.0")] pub struct AtomicPtr { p: UnsafeCell<*mut T>, } +#[cfg(target_has_atomic = "ptr")] #[stable(feature = "rust1", since = "1.0.0")] impl Default for AtomicPtr { + /// Creates a null `AtomicPtr`. fn default() -> AtomicPtr { AtomicPtr::new(::ptr::null_mut()) } } +#[cfg(target_has_atomic = "ptr")] #[stable(feature = "rust1", since = "1.0.0")] unsafe impl Send for AtomicPtr {} +#[cfg(target_has_atomic = "ptr")] #[stable(feature = "rust1", since = "1.0.0")] unsafe impl Sync for AtomicPtr {} @@ -160,20 +142,20 @@ unsafe impl Sync for AtomicPtr {} /// Rust's memory orderings are [the same as /// LLVM's](http://llvm.org/docs/LangRef.html#memory-model-for-concurrent-operations). #[stable(feature = "rust1", since = "1.0.0")] -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Debug)] pub enum Ordering { /// No ordering constraints, only atomic operations. Corresponds to LLVM's /// `Monotonic` ordering. #[stable(feature = "rust1", since = "1.0.0")] Relaxed, /// When coupled with a store, all previous writes become visible - /// to another thread that performs a load with `Acquire` ordering + /// to the other threads that perform a load with `Acquire` ordering /// on the same value. #[stable(feature = "rust1", since = "1.0.0")] Release, /// When coupled with a load, all subsequent loads will see data /// written before a store with `Release` ordering on the same value - /// in another thread. + /// in other threads. #[stable(feature = "rust1", since = "1.0.0")] Acquire, /// When coupled with a load, uses `Acquire` ordering, and with a store @@ -184,21 +166,18 @@ pub enum Ordering { /// sequentially consistent operations in the same order. #[stable(feature = "rust1", since = "1.0.0")] SeqCst, + // Prevent exhaustive matching to allow for future extension + #[doc(hidden)] + #[unstable(feature = "future_atomic_orderings", issue = "0")] + __Nonexhaustive, } /// An `AtomicBool` initialized to `false`. +#[cfg(target_has_atomic = "8")] #[stable(feature = "rust1", since = "1.0.0")] pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false); -/// An `AtomicIsize` initialized to `0`. -#[stable(feature = "rust1", since = "1.0.0")] -pub const ATOMIC_ISIZE_INIT: AtomicIsize = AtomicIsize::new(0); -/// An `AtomicUsize` initialized to `0`. -#[stable(feature = "rust1", since = "1.0.0")] -pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize::new(0); - -// NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly -const UINT_TRUE: usize = !0; +#[cfg(target_has_atomic = "8")] impl AtomicBool { /// Creates a new `AtomicBool`. /// @@ -213,7 +192,49 @@ impl AtomicBool { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub const fn new(v: bool) -> AtomicBool { - AtomicBool { v: UnsafeCell::new(-(v as isize) as usize) } + AtomicBool { v: UnsafeCell::new(v as u8) } + } + + /// Returns a mutable reference to the underlying `bool`. + /// + /// This is safe because the mutable reference guarantees that no other threads are + /// concurrently accessing the atomic data. + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_access)] + /// use std::sync::atomic::{AtomicBool, Ordering}; + /// + /// let mut some_bool = AtomicBool::new(true); + /// assert_eq!(*some_bool.get_mut(), true); + /// *some_bool.get_mut() = false; + /// assert_eq!(some_bool.load(Ordering::SeqCst), false); + /// ``` + #[inline] + #[unstable(feature = "atomic_access", issue = "35603")] + pub fn get_mut(&mut self) -> &mut bool { + unsafe { &mut *(self.v.get() as *mut bool) } + } + + /// Consumes the atomic and returns the contained value. + /// + /// This is safe because passing `self` by value guarantees that no other threads are + /// concurrently accessing the atomic data. + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_access)] + /// use std::sync::atomic::AtomicBool; + /// + /// let some_bool = AtomicBool::new(true); + /// assert_eq!(some_bool.into_inner(), true); + /// ``` + #[inline] + #[unstable(feature = "atomic_access", issue = "35603")] + pub fn into_inner(self) -> bool { + unsafe { self.v.into_inner() != 0 } } /// Loads a value from the bool. @@ -236,7 +257,7 @@ impl AtomicBool { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn load(&self, order: Ordering) -> bool { - unsafe { atomic_load(self.v.get(), order) > 0 } + unsafe { atomic_load(self.v.get(), order) != 0 } } /// Stores a value into the bool. @@ -260,9 +281,9 @@ impl AtomicBool { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn store(&self, val: bool, order: Ordering) { - let val = if val { UINT_TRUE } else { 0 }; - - unsafe { atomic_store(self.v.get(), val, order); } + unsafe { + atomic_store(self.v.get(), val as u8, order); + } } /// Stores a value into the bool, returning the old value. @@ -282,9 +303,7 @@ impl AtomicBool { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn swap(&self, val: bool, order: Ordering) -> bool { - let val = if val { UINT_TRUE } else { 0 }; - - unsafe { atomic_swap(self.v.get(), val, order) > 0 } + unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 } } /// Stores a value into the `bool` if the current value is the same as the `current` value. @@ -311,10 +330,101 @@ impl AtomicBool { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool { - let current = if current { UINT_TRUE } else { 0 }; - let new = if new { UINT_TRUE } else { 0 }; + match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) { + Ok(x) => x, + Err(x) => x, + } + } - unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) > 0 } + /// Stores a value into the `bool` if the current value is the same as the `current` value. + /// + /// The return value is a result indicating whether the new value was written and containing + /// the previous value. On success this value is guaranteed to be equal to `current`. + /// + /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this + /// operation. The first describes the required ordering if the operation succeeds while the + /// second describes the required ordering when the operation fails. The failure ordering can't + /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering. + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomic::{AtomicBool, Ordering}; + /// + /// let some_bool = AtomicBool::new(true); + /// + /// assert_eq!(some_bool.compare_exchange(true, + /// false, + /// Ordering::Acquire, + /// Ordering::Relaxed), + /// Ok(true)); + /// assert_eq!(some_bool.load(Ordering::Relaxed), false); + /// + /// assert_eq!(some_bool.compare_exchange(true, true, + /// Ordering::SeqCst, + /// Ordering::Acquire), + /// Err(false)); + /// assert_eq!(some_bool.load(Ordering::Relaxed), false); + /// ``` + #[inline] + #[stable(feature = "extended_compare_and_swap", since = "1.10.0")] + pub fn compare_exchange(&self, + current: bool, + new: bool, + success: Ordering, + failure: Ordering) + -> Result { + match unsafe { + atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure) + } { + Ok(x) => Ok(x != 0), + Err(x) => Err(x != 0), + } + } + + /// Stores a value into the `bool` if the current value is the same as the `current` value. + /// + /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the + /// comparison succeeds, which can result in more efficient code on some platforms. The + /// return value is a result indicating whether the new value was written and containing the + /// previous value. + /// + /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory + /// ordering of this operation. The first describes the required ordering if the operation + /// succeeds while the second describes the required ordering when the operation fails. The + /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the + /// success ordering. + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomic::{AtomicBool, Ordering}; + /// + /// let val = AtomicBool::new(false); + /// + /// let new = true; + /// let mut old = val.load(Ordering::Relaxed); + /// loop { + /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { + /// Ok(_) => break, + /// Err(x) => old = x, + /// } + /// } + /// ``` + #[inline] + #[stable(feature = "extended_compare_and_swap", since = "1.10.0")] + pub fn compare_exchange_weak(&self, + current: bool, + new: bool, + success: Ordering, + failure: Ordering) + -> Result { + match unsafe { + atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure) + } { + Ok(x) => Ok(x != 0), + Err(x) => Err(x != 0), + } } /// Logical "and" with a boolean value. @@ -344,9 +454,7 @@ impl AtomicBool { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn fetch_and(&self, val: bool, order: Ordering) -> bool { - let val = if val { UINT_TRUE } else { 0 }; - - unsafe { atomic_and(self.v.get(), val, order) > 0 } + unsafe { atomic_and(self.v.get(), val as u8, order) != 0 } } /// Logical "nand" with a boolean value. @@ -377,9 +485,20 @@ impl AtomicBool { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool { - let val = if val { UINT_TRUE } else { 0 }; - - unsafe { atomic_nand(self.v.get(), val, order) > 0 } + // We can't use atomic_nand here because it can result in a bool with + // an invalid value. This happens because the atomic operation is done + // with an 8-bit integer internally, which would set the upper 7 bits. + // So we just use a compare-exchange loop instead, which is what the + // intrinsic actually expands to anyways on many platforms. + let mut old = self.load(Relaxed); + loop { + let new = !(old && val); + match self.compare_exchange_weak(old, new, order, Relaxed) { + Ok(_) => break, + Err(x) => old = x, + } + } + old } /// Logical "or" with a boolean value. @@ -409,9 +528,7 @@ impl AtomicBool { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn fetch_or(&self, val: bool, order: Ordering) -> bool { - let val = if val { UINT_TRUE } else { 0 }; - - unsafe { atomic_or(self.v.get(), val, order) > 0 } + unsafe { atomic_or(self.v.get(), val as u8, order) != 0 } } /// Logical "xor" with a boolean value. @@ -441,221 +558,70 @@ impl AtomicBool { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool { - let val = if val { UINT_TRUE } else { 0 }; - - unsafe { atomic_xor(self.v.get(), val, order) > 0 } + unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 } } } -impl AtomicIsize { - /// Creates a new `AtomicIsize`. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::AtomicIsize; - /// - /// let atomic_forty_two = AtomicIsize::new(42); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub const fn new(v: isize) -> AtomicIsize { - AtomicIsize {v: UnsafeCell::new(v)} - } - - /// Loads a value from the isize. - /// - /// `load` takes an `Ordering` argument which describes the memory ordering of this operation. - /// - /// # Panics - /// - /// Panics if `order` is `Release` or `AcqRel`. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; - /// - /// let some_isize = AtomicIsize::new(5); - /// - /// assert_eq!(some_isize.load(Ordering::Relaxed), 5); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn load(&self, order: Ordering) -> isize { - unsafe { atomic_load(self.v.get(), order) } - } - - /// Stores a value into the isize. - /// - /// `store` takes an `Ordering` argument which describes the memory ordering of this operation. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; - /// - /// let some_isize = AtomicIsize::new(5); - /// - /// some_isize.store(10, Ordering::Relaxed); - /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); - /// ``` - /// - /// # Panics - /// - /// Panics if `order` is `Acquire` or `AcqRel`. - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn store(&self, val: isize, order: Ordering) { - unsafe { atomic_store(self.v.get(), val, order); } - } - - /// Stores a value into the isize, returning the old value. - /// - /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; - /// - /// let some_isize = AtomicIsize::new(5); - /// - /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn swap(&self, val: isize, order: Ordering) -> isize { - unsafe { atomic_swap(self.v.get(), val, order) } - } - - /// Stores a value into the `isize` if the current value is the same as the `current` value. - /// - /// The return value is always the previous value. If it is equal to `current`, then the value - /// was updated. - /// - /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of - /// this operation. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; - /// - /// let some_isize = AtomicIsize::new(5); - /// - /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5); - /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); - /// - /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10); - /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn compare_and_swap(&self, current: isize, new: isize, order: Ordering) -> isize { - unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) } - } - - /// Add an isize to the current value, returning the previous value. +#[cfg(target_has_atomic = "ptr")] +impl AtomicPtr { + /// Creates a new `AtomicPtr`. /// /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; + /// use std::sync::atomic::AtomicPtr; /// - /// let foo = AtomicIsize::new(0); - /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0); - /// assert_eq!(foo.load(Ordering::SeqCst), 10); + /// let ptr = &mut 5; + /// let atomic_ptr = AtomicPtr::new(ptr); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn fetch_add(&self, val: isize, order: Ordering) -> isize { - unsafe { atomic_add(self.v.get(), val, order) } + pub const fn new(p: *mut T) -> AtomicPtr { + AtomicPtr { p: UnsafeCell::new(p) } } - /// Subtract an isize from the current value, returning the previous value. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; + /// Returns a mutable reference to the underlying pointer. /// - /// let foo = AtomicIsize::new(0); - /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0); - /// assert_eq!(foo.load(Ordering::SeqCst), -10); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn fetch_sub(&self, val: isize, order: Ordering) -> isize { - unsafe { atomic_sub(self.v.get(), val, order) } - } - - /// Bitwise and with the current isize, returning the previous value. + /// This is safe because the mutable reference guarantees that no other threads are + /// concurrently accessing the atomic data. /// /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; - /// - /// let foo = AtomicIsize::new(0b101101); - /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101); - /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001); - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn fetch_and(&self, val: isize, order: Ordering) -> isize { - unsafe { atomic_and(self.v.get(), val, order) } - } - - /// Bitwise or with the current isize, returning the previous value. - /// - /// # Examples + /// #![feature(atomic_access)] + /// use std::sync::atomic::{AtomicPtr, Ordering}; /// + /// let mut atomic_ptr = AtomicPtr::new(&mut 10); + /// *atomic_ptr.get_mut() = &mut 5; + /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5); /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; - /// - /// let foo = AtomicIsize::new(0b101101); - /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101); - /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111); #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn fetch_or(&self, val: isize, order: Ordering) -> isize { - unsafe { atomic_or(self.v.get(), val, order) } + #[unstable(feature = "atomic_access", issue = "35603")] + pub fn get_mut(&mut self) -> &mut *mut T { + unsafe { &mut *self.p.get() } } - /// Bitwise xor with the current isize, returning the previous value. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicIsize, Ordering}; + /// Consumes the atomic and returns the contained value. /// - /// let foo = AtomicIsize::new(0b101101); - /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101); - /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110); - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn fetch_xor(&self, val: isize, order: Ordering) -> isize { - unsafe { atomic_xor(self.v.get(), val, order) } - } -} - -impl AtomicUsize { - /// Creates a new `AtomicUsize`. + /// This is safe because passing `self` by value guarantees that no other threads are + /// concurrently accessing the atomic data. /// /// # Examples /// /// ``` - /// use std::sync::atomic::AtomicUsize; + /// #![feature(atomic_access)] + /// use std::sync::atomic::AtomicPtr; /// - /// let atomic_forty_two = AtomicUsize::new(42); + /// let atomic_ptr = AtomicPtr::new(&mut 5); + /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5); /// ``` #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub const fn new(v: usize) -> AtomicUsize { - AtomicUsize { v: UnsafeCell::new(v) } + #[unstable(feature = "atomic_access", issue = "35603")] + pub fn into_inner(self) -> *mut T { + unsafe { self.p.into_inner() } } - /// Loads a value from the usize. + /// Loads a value from the pointer. /// /// `load` takes an `Ordering` argument which describes the memory ordering of this operation. /// @@ -666,31 +632,34 @@ impl AtomicUsize { /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicUsize, Ordering}; + /// use std::sync::atomic::{AtomicPtr, Ordering}; /// - /// let some_usize = AtomicUsize::new(5); + /// let ptr = &mut 5; + /// let some_ptr = AtomicPtr::new(ptr); /// - /// assert_eq!(some_usize.load(Ordering::Relaxed), 5); + /// let value = some_ptr.load(Ordering::Relaxed); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn load(&self, order: Ordering) -> usize { - unsafe { atomic_load(self.v.get(), order) } + pub fn load(&self, order: Ordering) -> *mut T { + unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T } } - /// Stores a value into the usize. + /// Stores a value into the pointer. /// /// `store` takes an `Ordering` argument which describes the memory ordering of this operation. /// /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicUsize, Ordering}; + /// use std::sync::atomic::{AtomicPtr, Ordering}; /// - /// let some_usize = AtomicUsize::new(5); + /// let ptr = &mut 5; + /// let some_ptr = AtomicPtr::new(ptr); /// - /// some_usize.store(10, Ordering::Relaxed); - /// assert_eq!(some_usize.load(Ordering::Relaxed), 10); + /// let other_ptr = &mut 10; + /// + /// some_ptr.store(other_ptr, Ordering::Relaxed); /// ``` /// /// # Panics @@ -698,31 +667,35 @@ impl AtomicUsize { /// Panics if `order` is `Acquire` or `AcqRel`. #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn store(&self, val: usize, order: Ordering) { - unsafe { atomic_store(self.v.get(), val, order); } + pub fn store(&self, ptr: *mut T, order: Ordering) { + unsafe { + atomic_store(self.p.get() as *mut usize, ptr as usize, order); + } } - /// Stores a value into the usize, returning the old value. + /// Stores a value into the pointer, returning the old value. /// /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation. /// /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicUsize, Ordering}; + /// use std::sync::atomic::{AtomicPtr, Ordering}; /// - /// let some_usize= AtomicUsize::new(5); + /// let ptr = &mut 5; + /// let some_ptr = AtomicPtr::new(ptr); /// - /// assert_eq!(some_usize.swap(10, Ordering::Relaxed), 5); - /// assert_eq!(some_usize.load(Ordering::Relaxed), 10); + /// let other_ptr = &mut 10; + /// + /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn swap(&self, val: usize, order: Ordering) -> usize { - unsafe { atomic_swap(self.v.get(), val, order) } + pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T { + unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T } } - /// Stores a value into the `usize` if the current value is the same as the `current` value. + /// Stores a value into the pointer if the current value is the same as the `current` value. /// /// The return value is always the previous value. If it is equal to `current`, then the value /// was updated. @@ -733,177 +706,34 @@ impl AtomicUsize { /// # Examples /// /// ``` - /// use std::sync::atomic::{AtomicUsize, Ordering}; - /// - /// let some_usize = AtomicUsize::new(5); - /// - /// assert_eq!(some_usize.compare_and_swap(5, 10, Ordering::Relaxed), 5); - /// assert_eq!(some_usize.load(Ordering::Relaxed), 10); - /// - /// assert_eq!(some_usize.compare_and_swap(6, 12, Ordering::Relaxed), 10); - /// assert_eq!(some_usize.load(Ordering::Relaxed), 10); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn compare_and_swap(&self, current: usize, new: usize, order: Ordering) -> usize { - unsafe { atomic_compare_and_swap(self.v.get(), current, new, order) } - } - - /// Add to the current usize, returning the previous value. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicUsize, Ordering}; - /// - /// let foo = AtomicUsize::new(0); - /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0); - /// assert_eq!(foo.load(Ordering::SeqCst), 10); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn fetch_add(&self, val: usize, order: Ordering) -> usize { - unsafe { atomic_add(self.v.get(), val, order) } - } - - /// Subtract from the current usize, returning the previous value. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicUsize, Ordering}; - /// - /// let foo = AtomicUsize::new(10); - /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 10); - /// assert_eq!(foo.load(Ordering::SeqCst), 0); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn fetch_sub(&self, val: usize, order: Ordering) -> usize { - unsafe { atomic_sub(self.v.get(), val, order) } - } - - /// Bitwise and with the current usize, returning the previous value. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicUsize, Ordering}; - /// - /// let foo = AtomicUsize::new(0b101101); - /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101); - /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001); - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn fetch_and(&self, val: usize, order: Ordering) -> usize { - unsafe { atomic_and(self.v.get(), val, order) } - } - - /// Bitwise or with the current usize, returning the previous value. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicUsize, Ordering}; - /// - /// let foo = AtomicUsize::new(0b101101); - /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101); - /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111); - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn fetch_or(&self, val: usize, order: Ordering) -> usize { - unsafe { atomic_or(self.v.get(), val, order) } - } - - /// Bitwise xor with the current usize, returning the previous value. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicUsize, Ordering}; - /// - /// let foo = AtomicUsize::new(0b101101); - /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101); - /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110); - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn fetch_xor(&self, val: usize, order: Ordering) -> usize { - unsafe { atomic_xor(self.v.get(), val, order) } - } -} - -impl AtomicPtr { - /// Creates a new `AtomicPtr`. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::AtomicPtr; - /// - /// let ptr = &mut 5; - /// let atomic_ptr = AtomicPtr::new(ptr); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub const fn new(p: *mut T) -> AtomicPtr { - AtomicPtr { p: UnsafeCell::new(p) } - } - - /// Loads a value from the pointer. - /// - /// `load` takes an `Ordering` argument which describes the memory ordering of this operation. - /// - /// # Panics - /// - /// Panics if `order` is `Release` or `AcqRel`. - /// - /// # Examples - /// - /// ``` /// use std::sync::atomic::{AtomicPtr, Ordering}; /// /// let ptr = &mut 5; /// let some_ptr = AtomicPtr::new(ptr); /// - /// let value = some_ptr.load(Ordering::Relaxed); + /// let other_ptr = &mut 10; + /// let another_ptr = &mut 10; + /// + /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn load(&self, order: Ordering) -> *mut T { - unsafe { - atomic_load(self.p.get() as *mut usize, order) as *mut T + pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T { + match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) { + Ok(x) => x, + Err(x) => x, } } - /// Stores a value into the pointer. - /// - /// `store` takes an `Ordering` argument which describes the memory ordering of this operation. - /// - /// # Examples - /// - /// ``` - /// use std::sync::atomic::{AtomicPtr, Ordering}; - /// - /// let ptr = &mut 5; - /// let some_ptr = AtomicPtr::new(ptr); - /// - /// let other_ptr = &mut 10; - /// - /// some_ptr.store(other_ptr, Ordering::Relaxed); - /// ``` + /// Stores a value into the pointer if the current value is the same as the `current` value. /// - /// # Panics + /// The return value is a result indicating whether the new value was written and containing + /// the previous value. On success this value is guaranteed to be equal to `current`. /// - /// Panics if `order` is `Acquire` or `AcqRel`. - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn store(&self, ptr: *mut T, order: Ordering) { - unsafe { atomic_store(self.p.get() as *mut usize, ptr as usize, order); } - } - - /// Stores a value into the pointer, returning the old value. - /// - /// `swap` takes an `Ordering` argument which describes the memory ordering of this operation. + /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of this + /// operation. The first describes the required ordering if the operation succeeds while the + /// second describes the required ordering when the operation fails. The failure ordering can't + /// be `Release` or `AcqRel` and must be equivalent or weaker than the success ordering. /// /// # Examples /// @@ -913,66 +743,569 @@ impl AtomicPtr { /// let ptr = &mut 5; /// let some_ptr = AtomicPtr::new(ptr); /// - /// let other_ptr = &mut 10; + /// let other_ptr = &mut 10; + /// let another_ptr = &mut 10; /// - /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed); + /// let value = some_ptr.compare_exchange(other_ptr, another_ptr, + /// Ordering::SeqCst, Ordering::Relaxed); /// ``` #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T { - unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T } + #[stable(feature = "extended_compare_and_swap", since = "1.10.0")] + pub fn compare_exchange(&self, + current: *mut T, + new: *mut T, + success: Ordering, + failure: Ordering) + -> Result<*mut T, *mut T> { + unsafe { + let res = atomic_compare_exchange(self.p.get() as *mut usize, + current as usize, + new as usize, + success, + failure); + match res { + Ok(x) => Ok(x as *mut T), + Err(x) => Err(x as *mut T), + } + } } /// Stores a value into the pointer if the current value is the same as the `current` value. /// - /// The return value is always the previous value. If it is equal to `current`, then the value - /// was updated. + /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the + /// comparison succeeds, which can result in more efficient code on some platforms. The + /// return value is a result indicating whether the new value was written and containing the + /// previous value. /// - /// `compare_and_swap` also takes an `Ordering` argument which describes the memory ordering of - /// this operation. + /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory + /// ordering of this operation. The first describes the required ordering if the operation + /// succeeds while the second describes the required ordering when the operation fails. The + /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the + /// success ordering. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicPtr, Ordering}; /// - /// let ptr = &mut 5; - /// let some_ptr = AtomicPtr::new(ptr); + /// let some_ptr = AtomicPtr::new(&mut 5); /// - /// let other_ptr = &mut 10; - /// let another_ptr = &mut 10; - /// - /// let value = some_ptr.compare_and_swap(other_ptr, another_ptr, Ordering::Relaxed); + /// let new = &mut 10; + /// let mut old = some_ptr.load(Ordering::Relaxed); + /// loop { + /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { + /// Ok(_) => break, + /// Err(x) => old = x, + /// } + /// } /// ``` #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T { + #[stable(feature = "extended_compare_and_swap", since = "1.10.0")] + pub fn compare_exchange_weak(&self, + current: *mut T, + new: *mut T, + success: Ordering, + failure: Ordering) + -> Result<*mut T, *mut T> { unsafe { - atomic_compare_and_swap(self.p.get() as *mut usize, current as usize, - new as usize, order) as *mut T + let res = atomic_compare_exchange_weak(self.p.get() as *mut usize, + current as usize, + new as usize, + success, + failure); + match res { + Ok(x) => Ok(x as *mut T), + Err(x) => Err(x as *mut T), + } } } } +macro_rules! atomic_int { + ($stable:meta, + $stable_cxchg:meta, + $stable_debug:meta, + $stable_access:meta, + $int_type:ident $atomic_type:ident $atomic_init:ident) => { + /// An integer type which can be safely shared between threads. + /// + /// This type has the same in-memory representation as the underlying integer type. + #[$stable] + pub struct $atomic_type { + v: UnsafeCell<$int_type>, + } + + /// An atomic integer initialized to `0`. + #[$stable] + pub const $atomic_init: $atomic_type = $atomic_type::new(0); + + #[$stable] + impl Default for $atomic_type { + fn default() -> Self { + Self::new(Default::default()) + } + } + + #[$stable_debug] + impl fmt::Debug for $atomic_type { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple(stringify!($atomic_type)) + .field(&self.load(Ordering::SeqCst)) + .finish() + } + } + + // Send is implicitly implemented. + #[$stable] + unsafe impl Sync for $atomic_type {} + + impl $atomic_type { + /// Creates a new atomic integer. + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomic::AtomicIsize; + /// + /// let atomic_forty_two = AtomicIsize::new(42); + /// ``` + #[inline] + #[$stable] + pub const fn new(v: $int_type) -> Self { + $atomic_type {v: UnsafeCell::new(v)} + } + + /// Returns a mutable reference to the underlying integer. + /// + /// This is safe because the mutable reference guarantees that no other threads are + /// concurrently accessing the atomic data. + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_access)] + /// use std::sync::atomic::{AtomicIsize, Ordering}; + /// + /// let mut some_isize = AtomicIsize::new(10); + /// assert_eq!(*some_isize.get_mut(), 10); + /// *some_isize.get_mut() = 5; + /// assert_eq!(some_isize.load(Ordering::SeqCst), 5); + /// ``` + #[inline] + #[$stable_access] + pub fn get_mut(&mut self) -> &mut $int_type { + unsafe { &mut *self.v.get() } + } + + /// Consumes the atomic and returns the contained value. + /// + /// This is safe because passing `self` by value guarantees that no other threads are + /// concurrently accessing the atomic data. + /// + /// # Examples + /// + /// ``` + /// #![feature(atomic_access)] + /// use std::sync::atomic::AtomicIsize; + /// + /// let some_isize = AtomicIsize::new(5); + /// assert_eq!(some_isize.into_inner(), 5); + /// ``` + #[inline] + #[$stable_access] + pub fn into_inner(self) -> $int_type { + unsafe { self.v.into_inner() } + } + + /// Loads a value from the atomic integer. + /// + /// `load` takes an `Ordering` argument which describes the memory ordering of this + /// operation. + /// + /// # Panics + /// + /// Panics if `order` is `Release` or `AcqRel`. + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomic::{AtomicIsize, Ordering}; + /// + /// let some_isize = AtomicIsize::new(5); + /// + /// assert_eq!(some_isize.load(Ordering::Relaxed), 5); + /// ``` + #[inline] + #[$stable] + pub fn load(&self, order: Ordering) -> $int_type { + unsafe { atomic_load(self.v.get(), order) } + } + + /// Stores a value into the atomic integer. + /// + /// `store` takes an `Ordering` argument which describes the memory ordering of this + /// operation. + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomic::{AtomicIsize, Ordering}; + /// + /// let some_isize = AtomicIsize::new(5); + /// + /// some_isize.store(10, Ordering::Relaxed); + /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); + /// ``` + /// + /// # Panics + /// + /// Panics if `order` is `Acquire` or `AcqRel`. + #[inline] + #[$stable] + pub fn store(&self, val: $int_type, order: Ordering) { + unsafe { atomic_store(self.v.get(), val, order); } + } + + /// Stores a value into the atomic integer, returning the old value. + /// + /// `swap` takes an `Ordering` argument which describes the memory ordering of this + /// operation. + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomic::{AtomicIsize, Ordering}; + /// + /// let some_isize = AtomicIsize::new(5); + /// + /// assert_eq!(some_isize.swap(10, Ordering::Relaxed), 5); + /// ``` + #[inline] + #[$stable] + pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type { + unsafe { atomic_swap(self.v.get(), val, order) } + } + + /// Stores a value into the atomic integer if the current value is the same as the + /// `current` value. + /// + /// The return value is always the previous value. If it is equal to `current`, then the + /// value was updated. + /// + /// `compare_and_swap` also takes an `Ordering` argument which describes the memory + /// ordering of this operation. + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomic::{AtomicIsize, Ordering}; + /// + /// let some_isize = AtomicIsize::new(5); + /// + /// assert_eq!(some_isize.compare_and_swap(5, 10, Ordering::Relaxed), 5); + /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); + /// + /// assert_eq!(some_isize.compare_and_swap(6, 12, Ordering::Relaxed), 10); + /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); + /// ``` + #[inline] + #[$stable] + pub fn compare_and_swap(&self, + current: $int_type, + new: $int_type, + order: Ordering) -> $int_type { + match self.compare_exchange(current, + new, + order, + strongest_failure_ordering(order)) { + Ok(x) => x, + Err(x) => x, + } + } + + /// Stores a value into the atomic integer if the current value is the same as the + /// `current` value. + /// + /// The return value is a result indicating whether the new value was written and + /// containing the previous value. On success this value is guaranteed to be equal to + /// `current`. + /// + /// `compare_exchange` takes two `Ordering` arguments to describe the memory ordering of + /// this operation. The first describes the required ordering if the operation succeeds + /// while the second describes the required ordering when the operation fails. The + /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker + /// than the success ordering. + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomic::{AtomicIsize, Ordering}; + /// + /// let some_isize = AtomicIsize::new(5); + /// + /// assert_eq!(some_isize.compare_exchange(5, 10, + /// Ordering::Acquire, + /// Ordering::Relaxed), + /// Ok(5)); + /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); + /// + /// assert_eq!(some_isize.compare_exchange(6, 12, + /// Ordering::SeqCst, + /// Ordering::Acquire), + /// Err(10)); + /// assert_eq!(some_isize.load(Ordering::Relaxed), 10); + /// ``` + #[inline] + #[$stable_cxchg] + pub fn compare_exchange(&self, + current: $int_type, + new: $int_type, + success: Ordering, + failure: Ordering) -> Result<$int_type, $int_type> { + unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) } + } + + /// Stores a value into the atomic integer if the current value is the same as the + /// `current` value. + /// + /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the + /// comparison succeeds, which can result in more efficient code on some platforms. The + /// return value is a result indicating whether the new value was written and containing + /// the previous value. + /// + /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory + /// ordering of this operation. The first describes the required ordering if the + /// operation succeeds while the second describes the required ordering when the + /// operation fails. The failure ordering can't be `Release` or `AcqRel` and must be + /// equivalent or weaker than the success ordering. + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomic::{AtomicIsize, Ordering}; + /// + /// let val = AtomicIsize::new(4); + /// + /// let mut old = val.load(Ordering::Relaxed); + /// loop { + /// let new = old * 2; + /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { + /// Ok(_) => break, + /// Err(x) => old = x, + /// } + /// } + /// ``` + #[inline] + #[$stable_cxchg] + pub fn compare_exchange_weak(&self, + current: $int_type, + new: $int_type, + success: Ordering, + failure: Ordering) -> Result<$int_type, $int_type> { + unsafe { + atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) + } + } + + /// Add to the current value, returning the previous value. + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomic::{AtomicIsize, Ordering}; + /// + /// let foo = AtomicIsize::new(0); + /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0); + /// assert_eq!(foo.load(Ordering::SeqCst), 10); + /// ``` + #[inline] + #[$stable] + pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type { + unsafe { atomic_add(self.v.get(), val, order) } + } + + /// Subtract from the current value, returning the previous value. + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomic::{AtomicIsize, Ordering}; + /// + /// let foo = AtomicIsize::new(0); + /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 0); + /// assert_eq!(foo.load(Ordering::SeqCst), -10); + /// ``` + #[inline] + #[$stable] + pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type { + unsafe { atomic_sub(self.v.get(), val, order) } + } + + /// Bitwise and with the current value, returning the previous value. + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomic::{AtomicIsize, Ordering}; + /// + /// let foo = AtomicIsize::new(0b101101); + /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101); + /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001); + #[inline] + #[$stable] + pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type { + unsafe { atomic_and(self.v.get(), val, order) } + } + + /// Bitwise or with the current value, returning the previous value. + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomic::{AtomicIsize, Ordering}; + /// + /// let foo = AtomicIsize::new(0b101101); + /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101); + /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111); + #[inline] + #[$stable] + pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type { + unsafe { atomic_or(self.v.get(), val, order) } + } + + /// Bitwise xor with the current value, returning the previous value. + /// + /// # Examples + /// + /// ``` + /// use std::sync::atomic::{AtomicIsize, Ordering}; + /// + /// let foo = AtomicIsize::new(0b101101); + /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101); + /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110); + #[inline] + #[$stable] + pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type { + unsafe { atomic_xor(self.v.get(), val, order) } + } + } + } +} + +#[cfg(target_has_atomic = "8")] +atomic_int! { + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + i8 AtomicI8 ATOMIC_I8_INIT +} +#[cfg(target_has_atomic = "8")] +atomic_int! { + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + u8 AtomicU8 ATOMIC_U8_INIT +} +#[cfg(target_has_atomic = "16")] +atomic_int! { + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + i16 AtomicI16 ATOMIC_I16_INIT +} +#[cfg(target_has_atomic = "16")] +atomic_int! { + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + u16 AtomicU16 ATOMIC_U16_INIT +} +#[cfg(target_has_atomic = "32")] +atomic_int! { + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + i32 AtomicI32 ATOMIC_I32_INIT +} +#[cfg(target_has_atomic = "32")] +atomic_int! { + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + u32 AtomicU32 ATOMIC_U32_INIT +} +#[cfg(target_has_atomic = "64")] +atomic_int! { + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + i64 AtomicI64 ATOMIC_I64_INIT +} +#[cfg(target_has_atomic = "64")] +atomic_int! { + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), + u64 AtomicU64 ATOMIC_U64_INIT +} +#[cfg(target_has_atomic = "ptr")] +atomic_int!{ + stable(feature = "rust1", since = "1.0.0"), + stable(feature = "extended_compare_and_swap", since = "1.10.0"), + stable(feature = "atomic_debug", since = "1.3.0"), + unstable(feature = "atomic_access", issue = "35603"), + isize AtomicIsize ATOMIC_ISIZE_INIT +} +#[cfg(target_has_atomic = "ptr")] +atomic_int!{ + stable(feature = "rust1", since = "1.0.0"), + stable(feature = "extended_compare_and_swap", since = "1.10.0"), + stable(feature = "atomic_debug", since = "1.3.0"), + unstable(feature = "atomic_access", issue = "35603"), + usize AtomicUsize ATOMIC_USIZE_INIT +} + +#[inline] +fn strongest_failure_ordering(order: Ordering) -> Ordering { + match order { + Release => Relaxed, + Relaxed => Relaxed, + SeqCst => SeqCst, + Acquire => Acquire, + AcqRel => Acquire, + __Nonexhaustive => __Nonexhaustive, + } +} + #[inline] -unsafe fn atomic_store(dst: *mut T, val: T, order:Ordering) { +unsafe fn atomic_store(dst: *mut T, val: T, order: Ordering) { match order { Release => intrinsics::atomic_store_rel(dst, val), Relaxed => intrinsics::atomic_store_relaxed(dst, val), - SeqCst => intrinsics::atomic_store(dst, val), + SeqCst => intrinsics::atomic_store(dst, val), Acquire => panic!("there is no such thing as an acquire store"), - AcqRel => panic!("there is no such thing as an acquire/release store"), + AcqRel => panic!("there is no such thing as an acquire/release store"), + __Nonexhaustive => panic!("invalid memory ordering"), } } #[inline] -unsafe fn atomic_load(dst: *const T, order:Ordering) -> T { +unsafe fn atomic_load(dst: *const T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_load_acq(dst), Relaxed => intrinsics::atomic_load_relaxed(dst), - SeqCst => intrinsics::atomic_load(dst), + SeqCst => intrinsics::atomic_load(dst), Release => panic!("there is no such thing as a release load"), - AcqRel => panic!("there is no such thing as an acquire/release load"), + AcqRel => panic!("there is no such thing as an acquire/release load"), + __Nonexhaustive => panic!("invalid memory ordering"), } } @@ -981,9 +1314,10 @@ unsafe fn atomic_swap(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xchg_acq(dst, val), Release => intrinsics::atomic_xchg_rel(dst, val), - AcqRel => intrinsics::atomic_xchg_acqrel(dst, val), + AcqRel => intrinsics::atomic_xchg_acqrel(dst, val), Relaxed => intrinsics::atomic_xchg_relaxed(dst, val), - SeqCst => intrinsics::atomic_xchg(dst, val) + SeqCst => intrinsics::atomic_xchg(dst, val), + __Nonexhaustive => panic!("invalid memory ordering"), } } @@ -993,9 +1327,10 @@ unsafe fn atomic_add(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xadd_acq(dst, val), Release => intrinsics::atomic_xadd_rel(dst, val), - AcqRel => intrinsics::atomic_xadd_acqrel(dst, val), + AcqRel => intrinsics::atomic_xadd_acqrel(dst, val), Relaxed => intrinsics::atomic_xadd_relaxed(dst, val), - SeqCst => intrinsics::atomic_xadd(dst, val) + SeqCst => intrinsics::atomic_xadd(dst, val), + __Nonexhaustive => panic!("invalid memory ordering"), } } @@ -1005,21 +1340,63 @@ unsafe fn atomic_sub(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xsub_acq(dst, val), Release => intrinsics::atomic_xsub_rel(dst, val), - AcqRel => intrinsics::atomic_xsub_acqrel(dst, val), + AcqRel => intrinsics::atomic_xsub_acqrel(dst, val), Relaxed => intrinsics::atomic_xsub_relaxed(dst, val), - SeqCst => intrinsics::atomic_xsub(dst, val) + SeqCst => intrinsics::atomic_xsub(dst, val), + __Nonexhaustive => panic!("invalid memory ordering"), } } #[inline] -unsafe fn atomic_compare_and_swap(dst: *mut T, old:T, new:T, order: Ordering) -> T { - match order { - Acquire => intrinsics::atomic_cxchg_acq(dst, old, new), - Release => intrinsics::atomic_cxchg_rel(dst, old, new), - AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new), - Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new), - SeqCst => intrinsics::atomic_cxchg(dst, old, new), - } +unsafe fn atomic_compare_exchange(dst: *mut T, + old: T, + new: T, + success: Ordering, + failure: Ordering) + -> Result { + let (val, ok) = match (success, failure) { + (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new), + (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new), + (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new), + (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new), + (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new), + (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new), + (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new), + (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new), + (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new), + (__Nonexhaustive, _) => panic!("invalid memory ordering"), + (_, __Nonexhaustive) => panic!("invalid memory ordering"), + (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"), + (_, Release) => panic!("there is no such thing as a release failure ordering"), + _ => panic!("a failure ordering can't be stronger than a success ordering"), + }; + if ok { Ok(val) } else { Err(val) } +} + +#[inline] +unsafe fn atomic_compare_exchange_weak(dst: *mut T, + old: T, + new: T, + success: Ordering, + failure: Ordering) + -> Result { + let (val, ok) = match (success, failure) { + (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new), + (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new), + (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new), + (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new), + (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new), + (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new), + (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new), + (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new), + (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new), + (__Nonexhaustive, _) => panic!("invalid memory ordering"), + (_, __Nonexhaustive) => panic!("invalid memory ordering"), + (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"), + (_, Release) => panic!("there is no such thing as a release failure ordering"), + _ => panic!("a failure ordering can't be stronger than a success ordering"), + }; + if ok { Ok(val) } else { Err(val) } } #[inline] @@ -1027,48 +1404,37 @@ unsafe fn atomic_and(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_and_acq(dst, val), Release => intrinsics::atomic_and_rel(dst, val), - AcqRel => intrinsics::atomic_and_acqrel(dst, val), + AcqRel => intrinsics::atomic_and_acqrel(dst, val), Relaxed => intrinsics::atomic_and_relaxed(dst, val), - SeqCst => intrinsics::atomic_and(dst, val) - } -} - -#[inline] -unsafe fn atomic_nand(dst: *mut T, val: T, order: Ordering) -> T { - match order { - Acquire => intrinsics::atomic_nand_acq(dst, val), - Release => intrinsics::atomic_nand_rel(dst, val), - AcqRel => intrinsics::atomic_nand_acqrel(dst, val), - Relaxed => intrinsics::atomic_nand_relaxed(dst, val), - SeqCst => intrinsics::atomic_nand(dst, val) + SeqCst => intrinsics::atomic_and(dst, val), + __Nonexhaustive => panic!("invalid memory ordering"), } } - #[inline] unsafe fn atomic_or(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_or_acq(dst, val), Release => intrinsics::atomic_or_rel(dst, val), - AcqRel => intrinsics::atomic_or_acqrel(dst, val), + AcqRel => intrinsics::atomic_or_acqrel(dst, val), Relaxed => intrinsics::atomic_or_relaxed(dst, val), - SeqCst => intrinsics::atomic_or(dst, val) + SeqCst => intrinsics::atomic_or(dst, val), + __Nonexhaustive => panic!("invalid memory ordering"), } } - #[inline] unsafe fn atomic_xor(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xor_acq(dst, val), Release => intrinsics::atomic_xor_rel(dst, val), - AcqRel => intrinsics::atomic_xor_acqrel(dst, val), + AcqRel => intrinsics::atomic_xor_acqrel(dst, val), Relaxed => intrinsics::atomic_xor_relaxed(dst, val), - SeqCst => intrinsics::atomic_xor(dst, val) + SeqCst => intrinsics::atomic_xor(dst, val), + __Nonexhaustive => panic!("invalid memory ordering"), } } - /// An atomic fence. /// /// A fence 'A' which has `Release` ordering semantics, synchronizes with a @@ -1096,26 +1462,24 @@ pub fn fence(order: Ordering) { match order { Acquire => intrinsics::atomic_fence_acq(), Release => intrinsics::atomic_fence_rel(), - AcqRel => intrinsics::atomic_fence_acqrel(), - SeqCst => intrinsics::atomic_fence(), - Relaxed => panic!("there is no such thing as a relaxed fence") + AcqRel => intrinsics::atomic_fence_acqrel(), + SeqCst => intrinsics::atomic_fence(), + Relaxed => panic!("there is no such thing as a relaxed fence"), + __Nonexhaustive => panic!("invalid memory ordering"), } } } -macro_rules! impl_Debug { - ($($t:ident)*) => ($( - #[stable(feature = "atomic_debug", since = "1.3.0")] - impl fmt::Debug for $t { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple(stringify!($t)).field(&self.load(Ordering::SeqCst)).finish() - } - } - )*); -} -impl_Debug!{ AtomicUsize AtomicIsize AtomicBool } +#[cfg(target_has_atomic = "8")] +#[stable(feature = "atomic_debug", since = "1.3.0")] +impl fmt::Debug for AtomicBool { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("AtomicBool").field(&self.load(Ordering::SeqCst)).finish() + } +} +#[cfg(target_has_atomic = "ptr")] #[stable(feature = "atomic_debug", since = "1.3.0")] impl fmt::Debug for AtomicPtr { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { diff --git a/src/libcore/tuple.rs b/src/libcore/tuple.rs index 4127e182e1d0f..55d55079ddc1b 100644 --- a/src/libcore/tuple.rs +++ b/src/libcore/tuple.rs @@ -8,36 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! A finite heterogeneous sequence, `(T, U, ..)` -//! -//! To access a single element of a tuple one can use the `.0` -//! field access syntax. -//! -//! Indexing starts from zero, so `.0` returns first value, `.1` -//! returns second value, and so on. In general, a tuple with *N* -//! elements has field accessors from 0 to *N* - 1. -//! -//! If every type inside a tuple implements one of the following -//! traits, then a tuple itself also implements it. -//! -//! * `Clone` -//! * `PartialEq` -//! * `Eq` -//! * `PartialOrd` -//! * `Ord` -//! * `Default` +// See src/libstd/primitive_docs.rs for documentation. -use clone::Clone; use cmp::*; use cmp::Ordering::*; -use default::Default; -use option::Option; -use option::Option::Some; - -// FIXME(#19630) Remove this work-around -macro_rules! e { - ($e:expr) => { $e } -} // macro for implementing n-ary tuple functions and operations macro_rules! tuple_impls { @@ -50,7 +24,7 @@ macro_rules! tuple_impls { #[stable(feature = "rust1", since = "1.0.0")] impl<$($T:Clone),+> Clone for ($($T,)+) { fn clone(&self) -> ($($T,)+) { - ($(e!(self.$idx.clone()),)+) + ($(self.$idx.clone(),)+) } } @@ -58,11 +32,11 @@ macro_rules! tuple_impls { impl<$($T:PartialEq),+> PartialEq for ($($T,)+) { #[inline] fn eq(&self, other: &($($T,)+)) -> bool { - e!($(self.$idx == other.$idx)&&+) + $(self.$idx == other.$idx)&&+ } #[inline] fn ne(&self, other: &($($T,)+)) -> bool { - e!($(self.$idx != other.$idx)||+) + $(self.$idx != other.$idx)||+ } } diff --git a/src/libcoretest/cell.rs b/src/libcoretest/cell.rs index 309a3d51c7602..a7c230ba979be 100644 --- a/src/libcoretest/cell.rs +++ b/src/libcoretest/cell.rs @@ -158,19 +158,6 @@ fn ref_map_accessor() { assert_eq!(*d, 7); } -#[test] -fn ref_filter_map_accessor() { - struct X(RefCell>); - impl X { - fn accessor(&self) -> Option> { - Ref::filter_map(self.0.borrow(), |r| r.as_ref().ok()) - } - } - let x = X(RefCell::new(Ok(7))); - let d: Ref = x.accessor().unwrap(); - assert_eq!(*d, 7); -} - #[test] fn ref_mut_map_accessor() { struct X(RefCell<(u32, char)>); @@ -189,38 +176,21 @@ fn ref_mut_map_accessor() { } #[test] -fn ref_mut_filter_map_accessor() { - struct X(RefCell>); - impl X { - fn accessor(&self) -> Option> { - RefMut::filter_map(self.0.borrow_mut(), |r| r.as_mut().ok()) - } - } - let x = X(RefCell::new(Ok(7))); - { - let mut d: RefMut = x.accessor().unwrap(); - assert_eq!(*d, 7); - *d += 1; - } - assert_eq!(*x.0.borrow(), Ok(8)); -} - -#[test] -fn as_unsafe_cell() { +fn as_ptr() { let c1: Cell = Cell::new(0); c1.set(1); - assert_eq!(1, unsafe { *c1.as_unsafe_cell().get() }); + assert_eq!(1, unsafe { *c1.as_ptr() }); let c2: Cell = Cell::new(0); - unsafe { *c2.as_unsafe_cell().get() = 1; } + unsafe { *c2.as_ptr() = 1; } assert_eq!(1, c2.get()); let r1: RefCell = RefCell::new(0); *r1.borrow_mut() = 1; - assert_eq!(1, unsafe { *r1.as_unsafe_cell().get() }); + assert_eq!(1, unsafe { *r1.as_ptr() }); let r2: RefCell = RefCell::new(0); - unsafe { *r2.as_unsafe_cell().get() = 1; } + unsafe { *r2.as_ptr() = 1; } assert_eq!(1, *r2.borrow()); } @@ -259,3 +229,23 @@ fn refcell_unsized() { let comp: &mut [i32] = &mut [4, 2, 5]; assert_eq!(&*cell.borrow(), comp); } + +#[test] +fn refcell_ref_coercion() { + let cell: RefCell<[i32; 3]> = RefCell::new([1, 2, 3]); + { + let mut cellref: RefMut<[i32; 3]> = cell.borrow_mut(); + cellref[0] = 4; + let mut coerced: RefMut<[i32]> = cellref; + coerced[2] = 5; + } + { + let comp: &mut [i32] = &mut [4, 2, 5]; + let cellref: Ref<[i32; 3]> = cell.borrow(); + assert_eq!(&*cellref, comp); + let coerced: Ref<[i32]> = cellref; + assert_eq!(&*coerced, comp); + } +} + + diff --git a/src/libcoretest/char.rs b/src/libcoretest/char.rs index c1f3ea42ef4f7..b4088ffbf89a9 100644 --- a/src/libcoretest/char.rs +++ b/src/libcoretest/char.rs @@ -8,6 +8,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{char,str}; +use std::convert::TryFrom; + +#[test] +fn test_convert() { + assert_eq!(u32::from('a'), 0x61); + assert_eq!(char::from(b'\0'), '\0'); + assert_eq!(char::from(b'a'), 'a'); + assert_eq!(char::from(b'\xFF'), '\u{FF}'); + assert_eq!(char::try_from(0_u32), Ok('\0')); + assert_eq!(char::try_from(0x61_u32), Ok('a')); + assert_eq!(char::try_from(0xD7FF_u32), Ok('\u{D7FF}')); + assert!(char::try_from(0xD800_u32).is_err()); + assert!(char::try_from(0xDFFF_u32).is_err()); + assert_eq!(char::try_from(0xE000_u32), Ok('\u{E000}')); + assert_eq!(char::try_from(0x10FFFF_u32), Ok('\u{10FFFF}')); + assert!(char::try_from(0x110000_u32).is_err()); + assert!(char::try_from(0xFFFF_FFFF_u32).is_err()); +} + #[test] fn test_is_lowercase() { assert!('a'.is_lowercase()); @@ -121,6 +141,51 @@ fn test_is_digit() { assert!(!'Q'.is_numeric()); } +#[test] +fn test_escape_debug() { + fn string(c: char) -> String { + c.escape_debug().collect() + } + let s = string('\n'); + assert_eq!(s, "\\n"); + let s = string('\r'); + assert_eq!(s, "\\r"); + let s = string('\''); + assert_eq!(s, "\\'"); + let s = string('"'); + assert_eq!(s, "\\\""); + let s = string(' '); + assert_eq!(s, " "); + let s = string('a'); + assert_eq!(s, "a"); + let s = string('~'); + assert_eq!(s, "~"); + let s = string('é'); + assert_eq!(s, "é"); + let s = string('文'); + assert_eq!(s, "文"); + let s = string('\x00'); + assert_eq!(s, "\\u{0}"); + let s = string('\x1f'); + assert_eq!(s, "\\u{1f}"); + let s = string('\x7f'); + assert_eq!(s, "\\u{7f}"); + let s = string('\u{80}'); + assert_eq!(s, "\\u{80}"); + let s = string('\u{ff}'); + assert_eq!(s, "\u{ff}"); + let s = string('\u{11b}'); + assert_eq!(s, "\u{11b}"); + let s = string('\u{1d4b6}'); + assert_eq!(s, "\u{1d4b6}"); + let s = string('\u{200b}'); // zero width space + assert_eq!(s, "\\u{200b}"); + let s = string('\u{e000}'); // private use 1 + assert_eq!(s, "\\u{e000}"); + let s = string('\u{100000}'); // private use 2 + assert_eq!(s, "\\u{100000}"); +} + #[test] fn test_escape_default() { fn string(c: char) -> String { @@ -140,18 +205,28 @@ fn test_escape_default() { assert_eq!(s, "a"); let s = string('~'); assert_eq!(s, "~"); + let s = string('é'); + assert_eq!(s, "\\u{e9}"); let s = string('\x00'); assert_eq!(s, "\\u{0}"); let s = string('\x1f'); assert_eq!(s, "\\u{1f}"); let s = string('\x7f'); assert_eq!(s, "\\u{7f}"); + let s = string('\u{80}'); + assert_eq!(s, "\\u{80}"); let s = string('\u{ff}'); assert_eq!(s, "\\u{ff}"); let s = string('\u{11b}'); assert_eq!(s, "\\u{11b}"); let s = string('\u{1d4b6}'); assert_eq!(s, "\\u{1d4b6}"); + let s = string('\u{200b}'); // zero width space + assert_eq!(s, "\\u{200b}"); + let s = string('\u{e000}'); // private use 1 + assert_eq!(s, "\\u{e000}"); + let s = string('\u{100000}'); // private use 2 + assert_eq!(s, "\\u{100000}"); } #[test] @@ -176,8 +251,11 @@ fn test_escape_unicode() { fn test_encode_utf8() { fn check(input: char, expect: &[u8]) { let mut buf = [0; 4]; - let n = input.encode_utf8(&mut buf).unwrap_or(0); - assert_eq!(&buf[..n], expect); + let ptr = buf.as_ptr(); + let s = input.encode_utf8(&mut buf); + assert_eq!(s.as_ptr() as usize, ptr as usize); + assert!(str::from_utf8(s.as_bytes()).is_ok()); + assert_eq!(s.as_bytes(), expect); } check('x', &[0x78]); @@ -190,8 +268,10 @@ fn test_encode_utf8() { fn test_encode_utf16() { fn check(input: char, expect: &[u16]) { let mut buf = [0; 2]; - let n = input.encode_utf16(&mut buf).unwrap_or(0); - assert_eq!(&buf[..n], expect); + let ptr = buf.as_mut_ptr(); + let b = input.encode_utf16(&mut buf); + assert_eq!(b.as_mut_ptr() as usize, ptr as usize); + assert_eq!(b, expect); } check('x', &[0x0078]); @@ -211,7 +291,10 @@ fn test_len_utf16() { #[test] fn test_decode_utf16() { fn check(s: &[u16], expected: &[Result]) { - assert_eq!(::std::char::decode_utf16(s.iter().cloned()).collect::>(), expected); + let v = char::decode_utf16(s.iter().cloned()) + .map(|r| r.map_err(|e| e.unpaired_surrogate())) + .collect::>(); + assert_eq!(v, expected); } check(&[0xD800, 0x41, 0x42], &[Err(0xD800), Ok('A'), Ok('B')]); check(&[0xD800, 0], &[Err(0xD800), Ok('\0')]); @@ -255,4 +338,93 @@ fn ed_iterator_specializations() { assert_eq!('\''.escape_default().last(), Some('\'')); } +#[test] +fn eu_iterator_specializations() { + fn check(c: char) { + let len = c.escape_unicode().count(); + + // Check OoB + assert_eq!(c.escape_unicode().nth(len), None); + + // For all possible in-bound offsets + let mut iter = c.escape_unicode(); + for offset in 0..len { + // Check last + assert_eq!(iter.clone().last(), Some('}')); + + // Check len + assert_eq!(iter.len(), len - offset); + + // Check size_hint (= len in ExactSizeIterator) + assert_eq!(iter.size_hint(), (iter.len(), Some(iter.len()))); + + // Check counting + assert_eq!(iter.clone().count(), len - offset); + // Check nth + assert_eq!(c.escape_unicode().nth(offset), iter.next()); + } + + // Check post-last + assert_eq!(iter.clone().last(), None); + assert_eq!(iter.clone().count(), 0); + } + + check('\u{0}'); + check('\u{1}'); + check('\u{12}'); + check('\u{123}'); + check('\u{1234}'); + check('\u{12340}'); + check('\u{10FFFF}'); +} + +#[test] +fn test_decode_utf8() { + macro_rules! assert_decode_utf8 { + ($input_bytes: expr, $expected_str: expr) => { + let input_bytes: &[u8] = &$input_bytes; + let s = char::decode_utf8(input_bytes.iter().cloned()) + .map(|r_b| r_b.unwrap_or('\u{FFFD}')) + .collect::(); + assert_eq!(s, $expected_str, + "input bytes: {:?}, expected str: {:?}, result: {:?}", + input_bytes, $expected_str, s); + assert_eq!(String::from_utf8_lossy(&$input_bytes), $expected_str); + } + } + + assert_decode_utf8!([], ""); + assert_decode_utf8!([0x41], "A"); + assert_decode_utf8!([0xC1, 0x81], "��"); + assert_decode_utf8!([0xE2, 0x99, 0xA5], "♥"); + assert_decode_utf8!([0xE2, 0x99, 0xA5, 0x41], "♥A"); + assert_decode_utf8!([0xE2, 0x99], "�"); + assert_decode_utf8!([0xE2, 0x99, 0x41], "�A"); + assert_decode_utf8!([0xC0], "�"); + assert_decode_utf8!([0xC0, 0x41], "�A"); + assert_decode_utf8!([0x80], "�"); + assert_decode_utf8!([0x80, 0x41], "�A"); + assert_decode_utf8!([0xFE], "�"); + assert_decode_utf8!([0xFE, 0x41], "�A"); + assert_decode_utf8!([0xFF], "�"); + assert_decode_utf8!([0xFF, 0x41], "�A"); + assert_decode_utf8!([0xC0, 0x80], "��"); + + // Surrogates + assert_decode_utf8!([0xED, 0x9F, 0xBF], "\u{D7FF}"); + assert_decode_utf8!([0xED, 0xA0, 0x80], "���"); + assert_decode_utf8!([0xED, 0xBF, 0x80], "���"); + assert_decode_utf8!([0xEE, 0x80, 0x80], "\u{E000}"); + + // char::MAX + assert_decode_utf8!([0xF4, 0x8F, 0xBF, 0xBF], "\u{10FFFF}"); + assert_decode_utf8!([0xF4, 0x8F, 0xBF, 0x41], "�A"); + assert_decode_utf8!([0xF4, 0x90, 0x80, 0x80], "����"); + + // 5 and 6 bytes sequence + // Part of the original design of UTF-8, + // but invalid now that UTF-8 is artificially restricted to match the range of UTF-16. + assert_decode_utf8!([0xF8, 0x80, 0x80, 0x80, 0x80], "�����"); + assert_decode_utf8!([0xFC, 0x80, 0x80, 0x80, 0x80, 0x80], "������"); +} diff --git a/src/libcoretest/cmp.rs b/src/libcoretest/cmp.rs index 051356cad1640..e3c65ad8b33c0 100644 --- a/src/libcoretest/cmp.rs +++ b/src/libcoretest/cmp.rs @@ -41,6 +41,32 @@ fn test_ordering_order() { assert_eq!(Greater.cmp(&Less), Greater); } +#[test] +fn test_ordering_then() { + assert_eq!(Equal.then(Less), Less); + assert_eq!(Equal.then(Equal), Equal); + assert_eq!(Equal.then(Greater), Greater); + assert_eq!(Less.then(Less), Less); + assert_eq!(Less.then(Equal), Less); + assert_eq!(Less.then(Greater), Less); + assert_eq!(Greater.then(Less), Greater); + assert_eq!(Greater.then(Equal), Greater); + assert_eq!(Greater.then(Greater), Greater); +} + +#[test] +fn test_ordering_then_with() { + assert_eq!(Equal.then_with(|| Less), Less); + assert_eq!(Equal.then_with(|| Equal), Equal); + assert_eq!(Equal.then_with(|| Greater), Greater); + assert_eq!(Less.then_with(|| Less), Less); + assert_eq!(Less.then_with(|| Equal), Less); + assert_eq!(Less.then_with(|| Greater), Less); + assert_eq!(Greater.then_with(|| Less), Greater); + assert_eq!(Greater.then_with(|| Equal), Greater); + assert_eq!(Greater.then_with(|| Greater), Greater); +} + #[test] fn test_user_defined_eq() { // Our type. diff --git a/src/libcoretest/fmt/builders.rs b/src/libcoretest/fmt/builders.rs index 885ee3f9c3be2..e71e61bda5efd 100644 --- a/src/libcoretest/fmt/builders.rs +++ b/src/libcoretest/fmt/builders.rs @@ -53,7 +53,7 @@ mod debug_struct { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("Foo") .field("bar", &true) - .field("baz", &format_args!("{}/{}", 10i32, 20i32)) + .field("baz", &format_args!("{}/{}", 10, 20)) .finish() } } @@ -75,7 +75,7 @@ mod debug_struct { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("Foo") .field("bar", &true) - .field("baz", &format_args!("{}/{}", 10i32, 20i32)) + .field("baz", &format_args!("{}/{}", 10, 20)) .finish() } } @@ -150,7 +150,7 @@ mod debug_tuple { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_tuple("Foo") .field(&true) - .field(&format_args!("{}/{}", 10i32, 20i32)) + .field(&format_args!("{}/{}", 10, 20)) .finish() } } @@ -172,7 +172,7 @@ mod debug_tuple { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_tuple("Foo") .field(&true) - .field(&format_args!("{}/{}", 10i32, 20i32)) + .field(&format_args!("{}/{}", 10, 20)) .finish() } } @@ -247,7 +247,7 @@ mod debug_map { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_map() .entry(&"bar", &true) - .entry(&10i32, &format_args!("{}/{}", 10i32, 20i32)) + .entry(&10, &format_args!("{}/{}", 10, 20)) .finish() } } @@ -269,7 +269,7 @@ mod debug_map { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_map() .entry(&"bar", &true) - .entry(&10i32, &format_args!("{}/{}", 10i32, 20i32)) + .entry(&10, &format_args!("{}/{}", 10, 20)) .finish() } } @@ -348,7 +348,7 @@ mod debug_set { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_set() .entry(&true) - .entry(&format_args!("{}/{}", 10i32, 20i32)) + .entry(&format_args!("{}/{}", 10, 20)) .finish() } } @@ -370,7 +370,7 @@ mod debug_set { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_set() .entry(&true) - .entry(&format_args!("{}/{}", 10i32, 20i32)) + .entry(&format_args!("{}/{}", 10, 20)) .finish() } } @@ -445,7 +445,7 @@ mod debug_list { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_list() .entry(&true) - .entry(&format_args!("{}/{}", 10i32, 20i32)) + .entry(&format_args!("{}/{}", 10, 20)) .finish() } } @@ -467,7 +467,7 @@ mod debug_list { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_list() .entry(&true) - .entry(&format_args!("{}/{}", 10i32, 20i32)) + .entry(&format_args!("{}/{}", 10, 20)) .finish() } } diff --git a/src/libcoretest/fmt/mod.rs b/src/libcoretest/fmt/mod.rs index 99ea39c619f77..ed33596e1c264 100644 --- a/src/libcoretest/fmt/mod.rs +++ b/src/libcoretest/fmt/mod.rs @@ -20,3 +20,11 @@ fn test_format_flags() { assert_eq!(format!("{: >3}", 'a'), " a"); } + +#[test] +fn test_pointer_formats_data_pointer() { + let b: &[u8] = b""; + let s: &str = ""; + assert_eq!(format!("{:p}", s), format!("{:p}", s.as_ptr())); + assert_eq!(format!("{:p}", b), format!("{:p}", b.as_ptr())); +} diff --git a/src/libcoretest/fmt/num.rs b/src/libcoretest/fmt/num.rs index 247c3dcb9c705..4ddedd9100486 100644 --- a/src/libcoretest/fmt/num.rs +++ b/src/libcoretest/fmt/num.rs @@ -7,7 +7,6 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -use core::fmt::radix; #[test] fn test_format_int() { @@ -151,101 +150,3 @@ fn test_format_int_twos_complement() { assert!(format!("{}", i32::MIN) == "-2147483648"); assert!(format!("{}", i64::MIN) == "-9223372036854775808"); } - -#[test] -fn test_format_radix() { - assert!(format!("{:04}", radix(3, 2)) == "0011"); - assert!(format!("{}", radix(55, 36)) == "1j"); -} - -#[test] -#[should_panic] -fn test_radix_base_too_large() { - let _ = radix(55, 37); -} - -mod u32 { - use test::Bencher; - use core::fmt::radix; - use std::__rand::{thread_rng, Rng}; - use std::io::{Write, sink}; - - #[bench] - fn format_bin(b: &mut Bencher) { - let mut rng = thread_rng(); - b.iter(|| { write!(&mut sink(), "{:b}", rng.gen::()) }) - } - - #[bench] - fn format_oct(b: &mut Bencher) { - let mut rng = thread_rng(); - b.iter(|| { write!(&mut sink(), "{:o}", rng.gen::()) }) - } - - #[bench] - fn format_dec(b: &mut Bencher) { - let mut rng = thread_rng(); - b.iter(|| { write!(&mut sink(), "{}", rng.gen::()) }) - } - - #[bench] - fn format_hex(b: &mut Bencher) { - let mut rng = thread_rng(); - b.iter(|| { write!(&mut sink(), "{:x}", rng.gen::()) }) - } - - #[bench] - fn format_show(b: &mut Bencher) { - let mut rng = thread_rng(); - b.iter(|| { write!(&mut sink(), "{:?}", rng.gen::()) }) - } - - #[bench] - fn format_base_36(b: &mut Bencher) { - let mut rng = thread_rng(); - b.iter(|| { write!(&mut sink(), "{}", radix(rng.gen::(), 36)) }) - } -} - -mod i32 { - use test::Bencher; - use core::fmt::radix; - use std::__rand::{thread_rng, Rng}; - use std::io::{Write, sink}; - - #[bench] - fn format_bin(b: &mut Bencher) { - let mut rng = thread_rng(); - b.iter(|| { write!(&mut sink(), "{:b}", rng.gen::()) }) - } - - #[bench] - fn format_oct(b: &mut Bencher) { - let mut rng = thread_rng(); - b.iter(|| { write!(&mut sink(), "{:o}", rng.gen::()) }) - } - - #[bench] - fn format_dec(b: &mut Bencher) { - let mut rng = thread_rng(); - b.iter(|| { write!(&mut sink(), "{}", rng.gen::()) }) - } - - #[bench] - fn format_hex(b: &mut Bencher) { - let mut rng = thread_rng(); - b.iter(|| { write!(&mut sink(), "{:x}", rng.gen::()) }) - } - - #[bench] - fn format_show(b: &mut Bencher) { - let mut rng = thread_rng(); - b.iter(|| { write!(&mut sink(), "{:?}", rng.gen::()) }) - } - - #[bench] - fn format_base_36(b: &mut Bencher) { - let mut rng = thread_rng(); - b.iter(|| { write!(&mut sink(), "{}", radix(rng.gen::(), 36)) }) - } -} diff --git a/src/libcoretest/hash/sip.rs b/src/libcoretest/hash/sip.rs index 9b6cedd25b741..fa3bfdea42df8 100644 --- a/src/libcoretest/hash/sip.rs +++ b/src/libcoretest/hash/sip.rs @@ -7,10 +7,14 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. + +#![allow(deprecated)] + use test::{Bencher, black_box}; use core::hash::{Hash, Hasher}; -use core::hash::SipHasher; +use core::hash::{SipHasher, SipHasher13, SipHasher24}; +use core::{slice, mem}; // Hash just the bytes of the slice, without length prefix struct Bytes<'a>(&'a [u8]); @@ -45,27 +49,117 @@ macro_rules! u8to64_le { }); } -fn hash(x: &T) -> u64 { - let mut st = SipHasher::new(); +fn hash_with(mut st: H, x: &T) -> u64 { x.hash(&mut st); st.finish() } -fn hash_with_keys(k1: u64, k2: u64, x: &T) -> u64 { - let mut st = SipHasher::new_with_keys(k1, k2); - x.hash(&mut st); - st.finish() +fn hash(x: &T) -> u64 { + hash_with(SipHasher::new(), x) } -fn hash_bytes(x: &[u8]) -> u64 { - let mut s = SipHasher::default(); +fn hash_bytes(mut s: H, x: &[u8]) -> u64 { Hasher::write(&mut s, x); s.finish() } #[test] #[allow(unused_must_use)] -fn test_siphash() { +fn test_siphash_1_3() { + let vecs : [[u8; 8]; 64] = [ + [ 0xdc, 0xc4, 0x0f, 0x05, 0x58, 0x01, 0xac, 0xab ], + [ 0x93, 0xca, 0x57, 0x7d, 0xf3, 0x9b, 0xf4, 0xc9 ], + [ 0x4d, 0xd4, 0xc7, 0x4d, 0x02, 0x9b, 0xcb, 0x82 ], + [ 0xfb, 0xf7, 0xdd, 0xe7, 0xb8, 0x0a, 0xf8, 0x8b ], + [ 0x28, 0x83, 0xd3, 0x88, 0x60, 0x57, 0x75, 0xcf ], + [ 0x67, 0x3b, 0x53, 0x49, 0x2f, 0xd5, 0xf9, 0xde ], + [ 0xa7, 0x22, 0x9f, 0xc5, 0x50, 0x2b, 0x0d, 0xc5 ], + [ 0x40, 0x11, 0xb1, 0x9b, 0x98, 0x7d, 0x92, 0xd3 ], + [ 0x8e, 0x9a, 0x29, 0x8d, 0x11, 0x95, 0x90, 0x36 ], + [ 0xe4, 0x3d, 0x06, 0x6c, 0xb3, 0x8e, 0xa4, 0x25 ], + [ 0x7f, 0x09, 0xff, 0x92, 0xee, 0x85, 0xde, 0x79 ], + [ 0x52, 0xc3, 0x4d, 0xf9, 0xc1, 0x18, 0xc1, 0x70 ], + [ 0xa2, 0xd9, 0xb4, 0x57, 0xb1, 0x84, 0xa3, 0x78 ], + [ 0xa7, 0xff, 0x29, 0x12, 0x0c, 0x76, 0x6f, 0x30 ], + [ 0x34, 0x5d, 0xf9, 0xc0, 0x11, 0xa1, 0x5a, 0x60 ], + [ 0x56, 0x99, 0x51, 0x2a, 0x6d, 0xd8, 0x20, 0xd3 ], + [ 0x66, 0x8b, 0x90, 0x7d, 0x1a, 0xdd, 0x4f, 0xcc ], + [ 0x0c, 0xd8, 0xdb, 0x63, 0x90, 0x68, 0xf2, 0x9c ], + [ 0x3e, 0xe6, 0x73, 0xb4, 0x9c, 0x38, 0xfc, 0x8f ], + [ 0x1c, 0x7d, 0x29, 0x8d, 0xe5, 0x9d, 0x1f, 0xf2 ], + [ 0x40, 0xe0, 0xcc, 0xa6, 0x46, 0x2f, 0xdc, 0xc0 ], + [ 0x44, 0xf8, 0x45, 0x2b, 0xfe, 0xab, 0x92, 0xb9 ], + [ 0x2e, 0x87, 0x20, 0xa3, 0x9b, 0x7b, 0xfe, 0x7f ], + [ 0x23, 0xc1, 0xe6, 0xda, 0x7f, 0x0e, 0x5a, 0x52 ], + [ 0x8c, 0x9c, 0x34, 0x67, 0xb2, 0xae, 0x64, 0xf4 ], + [ 0x79, 0x09, 0x5b, 0x70, 0x28, 0x59, 0xcd, 0x45 ], + [ 0xa5, 0x13, 0x99, 0xca, 0xe3, 0x35, 0x3e, 0x3a ], + [ 0x35, 0x3b, 0xde, 0x4a, 0x4e, 0xc7, 0x1d, 0xa9 ], + [ 0x0d, 0xd0, 0x6c, 0xef, 0x02, 0xed, 0x0b, 0xfb ], + [ 0xf4, 0xe1, 0xb1, 0x4a, 0xb4, 0x3c, 0xd9, 0x88 ], + [ 0x63, 0xe6, 0xc5, 0x43, 0xd6, 0x11, 0x0f, 0x54 ], + [ 0xbc, 0xd1, 0x21, 0x8c, 0x1f, 0xdd, 0x70, 0x23 ], + [ 0x0d, 0xb6, 0xa7, 0x16, 0x6c, 0x7b, 0x15, 0x81 ], + [ 0xbf, 0xf9, 0x8f, 0x7a, 0xe5, 0xb9, 0x54, 0x4d ], + [ 0x3e, 0x75, 0x2a, 0x1f, 0x78, 0x12, 0x9f, 0x75 ], + [ 0x91, 0x6b, 0x18, 0xbf, 0xbe, 0xa3, 0xa1, 0xce ], + [ 0x06, 0x62, 0xa2, 0xad, 0xd3, 0x08, 0xf5, 0x2c ], + [ 0x57, 0x30, 0xc3, 0xa3, 0x2d, 0x1c, 0x10, 0xb6 ], + [ 0xa1, 0x36, 0x3a, 0xae, 0x96, 0x74, 0xf4, 0xb3 ], + [ 0x92, 0x83, 0x10, 0x7b, 0x54, 0x57, 0x6b, 0x62 ], + [ 0x31, 0x15, 0xe4, 0x99, 0x32, 0x36, 0xd2, 0xc1 ], + [ 0x44, 0xd9, 0x1a, 0x3f, 0x92, 0xc1, 0x7c, 0x66 ], + [ 0x25, 0x88, 0x13, 0xc8, 0xfe, 0x4f, 0x70, 0x65 ], + [ 0xa6, 0x49, 0x89, 0xc2, 0xd1, 0x80, 0xf2, 0x24 ], + [ 0x6b, 0x87, 0xf8, 0xfa, 0xed, 0x1c, 0xca, 0xc2 ], + [ 0x96, 0x21, 0x04, 0x9f, 0xfc, 0x4b, 0x16, 0xc2 ], + [ 0x23, 0xd6, 0xb1, 0x68, 0x93, 0x9c, 0x6e, 0xa1 ], + [ 0xfd, 0x14, 0x51, 0x8b, 0x9c, 0x16, 0xfb, 0x49 ], + [ 0x46, 0x4c, 0x07, 0xdf, 0xf8, 0x43, 0x31, 0x9f ], + [ 0xb3, 0x86, 0xcc, 0x12, 0x24, 0xaf, 0xfd, 0xc6 ], + [ 0x8f, 0x09, 0x52, 0x0a, 0xd1, 0x49, 0xaf, 0x7e ], + [ 0x9a, 0x2f, 0x29, 0x9d, 0x55, 0x13, 0xf3, 0x1c ], + [ 0x12, 0x1f, 0xf4, 0xa2, 0xdd, 0x30, 0x4a, 0xc4 ], + [ 0xd0, 0x1e, 0xa7, 0x43, 0x89, 0xe9, 0xfa, 0x36 ], + [ 0xe6, 0xbc, 0xf0, 0x73, 0x4c, 0xb3, 0x8f, 0x31 ], + [ 0x80, 0xe9, 0xa7, 0x70, 0x36, 0xbf, 0x7a, 0xa2 ], + [ 0x75, 0x6d, 0x3c, 0x24, 0xdb, 0xc0, 0xbc, 0xb4 ], + [ 0x13, 0x15, 0xb7, 0xfd, 0x52, 0xd8, 0xf8, 0x23 ], + [ 0x08, 0x8a, 0x7d, 0xa6, 0x4d, 0x5f, 0x03, 0x8f ], + [ 0x48, 0xf1, 0xe8, 0xb7, 0xe5, 0xd0, 0x9c, 0xd8 ], + [ 0xee, 0x44, 0xa6, 0xf7, 0xbc, 0xe6, 0xf4, 0xf6 ], + [ 0xf2, 0x37, 0x18, 0x0f, 0xd8, 0x9a, 0xc5, 0xae ], + [ 0xe0, 0x94, 0x66, 0x4b, 0x15, 0xf6, 0xb2, 0xc3 ], + [ 0xa8, 0xb3, 0xbb, 0xb7, 0x62, 0x90, 0x19, 0x9d ] + ]; + + let k0 = 0x_07_06_05_04_03_02_01_00; + let k1 = 0x_0f_0e_0d_0c_0b_0a_09_08; + let mut buf = Vec::new(); + let mut t = 0; + let mut state_inc = SipHasher13::new_with_keys(k0, k1); + + while t < 64 { + let vec = u8to64_le!(vecs[t], 0); + let out = hash_with(SipHasher13::new_with_keys(k0, k1), &Bytes(&buf)); + assert_eq!(vec, out); + + let full = hash_with(SipHasher13::new_with_keys(k0, k1), &Bytes(&buf)); + let i = state_inc.finish(); + + assert_eq!(full, i); + assert_eq!(full, vec); + + buf.push(t as u8); + Hasher::write(&mut state_inc, &[t as u8]); + + t += 1; + } +} + +#[test] +#[allow(unused_must_use)] +fn test_siphash_2_4() { let vecs : [[u8; 8]; 64] = [ [ 0x31, 0x0e, 0x0e, 0xdd, 0x47, 0xdb, 0x6f, 0x72, ], [ 0xfd, 0x67, 0xdc, 0x93, 0xc5, 0x39, 0xf8, 0x74, ], @@ -137,14 +231,14 @@ fn test_siphash() { let k1 = 0x_0f_0e_0d_0c_0b_0a_09_08; let mut buf = Vec::new(); let mut t = 0; - let mut state_inc = SipHasher::new_with_keys(k0, k1); + let mut state_inc = SipHasher24::new_with_keys(k0, k1); while t < 64 { let vec = u8to64_le!(vecs[t], 0); - let out = hash_with_keys(k0, k1, &Bytes(&buf)); + let out = hash_with(SipHasher24::new_with_keys(k0, k1), &Bytes(&buf)); assert_eq!(vec, out); - let full = hash_with_keys(k0, k1, &Bytes(&buf)); + let full = hash_with(SipHasher24::new_with_keys(k0, k1), &Bytes(&buf)); let i = state_inc.finish(); assert_eq!(full, i); @@ -156,7 +250,6 @@ fn test_siphash() { t += 1; } } - #[test] #[cfg(target_arch = "arm")] fn test_hash_usize() { let val = 0xdeadbeef_deadbeef_u64; @@ -235,6 +328,26 @@ fn test_hash_no_concat_alias() { assert!(hash(&v) != hash(&w)); } +#[test] +fn test_write_short_works() { + let test_usize = 0xd0c0b0a0usize; + let mut h1 = SipHasher24::new(); + h1.write_usize(test_usize); + h1.write(b"bytes"); + h1.write(b"string"); + h1.write_u8(0xFFu8); + h1.write_u8(0x01u8); + let mut h2 = SipHasher24::new(); + h2.write(unsafe { + slice::from_raw_parts(&test_usize as *const _ as *const u8, + mem::size_of::()) + }); + h2.write(b"bytes"); + h2.write(b"string"); + h2.write(&[0xFFu8, 0x01u8]); + assert_eq!(h1.finish(), h2.finish()); +} + #[bench] fn bench_str_under_8_bytes(b: &mut Bencher) { let s = "foo"; @@ -289,7 +402,7 @@ fn bench_u32_keyed(b: &mut Bencher) { let k1 = black_box(0x1); let k2 = black_box(0x2); b.iter(|| { - hash_with_keys(k1, k2, &u) + hash_with(SipHasher::new_with_keys(k1, k2), &u) }); b.bytes = 8; } @@ -308,7 +421,7 @@ fn bench_u64(b: &mut Bencher) { fn bench_bytes_4(b: &mut Bencher) { let data = black_box([b' '; 4]); b.iter(|| { - hash_bytes(&data) + hash_bytes(SipHasher::default(), &data) }); b.bytes = 4; } @@ -317,7 +430,7 @@ fn bench_bytes_4(b: &mut Bencher) { fn bench_bytes_7(b: &mut Bencher) { let data = black_box([b' '; 7]); b.iter(|| { - hash_bytes(&data) + hash_bytes(SipHasher::default(), &data) }); b.bytes = 7; } @@ -326,7 +439,7 @@ fn bench_bytes_7(b: &mut Bencher) { fn bench_bytes_8(b: &mut Bencher) { let data = black_box([b' '; 8]); b.iter(|| { - hash_bytes(&data) + hash_bytes(SipHasher::default(), &data) }); b.bytes = 8; } @@ -335,7 +448,7 @@ fn bench_bytes_8(b: &mut Bencher) { fn bench_bytes_a_16(b: &mut Bencher) { let data = black_box([b' '; 16]); b.iter(|| { - hash_bytes(&data) + hash_bytes(SipHasher::default(), &data) }); b.bytes = 16; } @@ -344,7 +457,7 @@ fn bench_bytes_a_16(b: &mut Bencher) { fn bench_bytes_b_32(b: &mut Bencher) { let data = black_box([b' '; 32]); b.iter(|| { - hash_bytes(&data) + hash_bytes(SipHasher::default(), &data) }); b.bytes = 32; } @@ -353,7 +466,7 @@ fn bench_bytes_b_32(b: &mut Bencher) { fn bench_bytes_c_128(b: &mut Bencher) { let data = black_box([b' '; 128]); b.iter(|| { - hash_bytes(&data) + hash_bytes(SipHasher::default(), &data) }); b.bytes = 128; } diff --git a/src/libcoretest/iter.rs b/src/libcoretest/iter.rs index ba308314e9e8f..274539dfa6699 100644 --- a/src/libcoretest/iter.rs +++ b/src/libcoretest/iter.rs @@ -13,6 +13,7 @@ use core::{i8, i16, isize}; use core::usize; use test::Bencher; +use test::black_box; #[test] fn test_lt() { @@ -133,6 +134,19 @@ fn test_iterator_chain_count() { assert_eq!(zs.iter().chain(&ys).count(), 4); } +#[test] +fn test_iterator_chain_find() { + let xs = [0, 1, 2, 3, 4, 5]; + let ys = [30, 40, 50, 60]; + let mut iter = xs.iter().chain(&ys); + assert_eq!(iter.find(|&&i| i == 4), Some(&4)); + assert_eq!(iter.next(), Some(&5)); + assert_eq!(iter.find(|&&i| i == 40), Some(&40)); + assert_eq!(iter.next(), Some(&50)); + assert_eq!(iter.find(|&&i| i == 100), None); + assert_eq!(iter.next(), None); +} + #[test] fn test_filter_map() { let it = (0..).step_by(1).take(10) @@ -260,6 +274,74 @@ fn test_iterator_peekable_last() { let mut it = ys.iter().peekable(); assert_eq!(it.peek(), Some(&&0)); assert_eq!(it.last(), Some(&0)); + + let mut it = ys.iter().peekable(); + assert_eq!(it.next(), Some(&0)); + assert_eq!(it.peek(), None); + assert_eq!(it.last(), None); +} + +/// This is an iterator that follows the Iterator contract, +/// but it is not fused. After having returned None once, it will start +/// producing elements if .next() is called again. +pub struct CycleIter<'a, T: 'a> { + index: usize, + data: &'a [T], +} + +pub fn cycle(data: &[T]) -> CycleIter { + CycleIter { + index: 0, + data: data, + } +} + +impl<'a, T> Iterator for CycleIter<'a, T> { + type Item = &'a T; + fn next(&mut self) -> Option { + let elt = self.data.get(self.index); + self.index += 1; + self.index %= 1 + self.data.len(); + elt + } +} + +#[test] +fn test_iterator_peekable_remember_peek_none_1() { + // Check that the loop using .peek() terminates + let data = [1, 2, 3]; + let mut iter = cycle(&data).peekable(); + + let mut n = 0; + while let Some(_) = iter.next() { + let is_the_last = iter.peek().is_none(); + assert_eq!(is_the_last, n == data.len() - 1); + n += 1; + if n > data.len() { break; } + } + assert_eq!(n, data.len()); +} + +#[test] +fn test_iterator_peekable_remember_peek_none_2() { + let data = [0]; + let mut iter = cycle(&data).peekable(); + iter.next(); + assert_eq!(iter.peek(), None); + assert_eq!(iter.last(), None); +} + +#[test] +fn test_iterator_peekable_remember_peek_none_3() { + let data = [0]; + let mut iter = cycle(&data).peekable(); + iter.peek(); + assert_eq!(iter.nth(0), Some(&0)); + + let mut iter = cycle(&data).peekable(); + iter.next(); + assert_eq!(iter.peek(), None); + assert_eq!(iter.nth(0), None); } #[test] @@ -303,6 +385,44 @@ fn test_iterator_skip() { assert_eq!(it.len(), 0); } +#[test] +fn test_iterator_skip_doubleended() { + let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30]; + let mut it = xs.iter().rev().skip(5); + assert_eq!(it.next(), Some(&15)); + assert_eq!(it.by_ref().rev().next(), Some(&0)); + assert_eq!(it.next(), Some(&13)); + assert_eq!(it.by_ref().rev().next(), Some(&1)); + assert_eq!(it.next(), Some(&5)); + assert_eq!(it.by_ref().rev().next(), Some(&2)); + assert_eq!(it.next(), Some(&3)); + assert_eq!(it.next(), None); + let mut it = xs.iter().rev().skip(5).rev(); + assert_eq!(it.next(), Some(&0)); + assert_eq!(it.rev().next(), Some(&15)); + let mut it_base = xs.iter(); + { + let mut it = it_base.by_ref().skip(5).rev(); + assert_eq!(it.next(), Some(&30)); + assert_eq!(it.next(), Some(&20)); + assert_eq!(it.next(), Some(&19)); + assert_eq!(it.next(), Some(&17)); + assert_eq!(it.next(), Some(&16)); + assert_eq!(it.next(), Some(&15)); + assert_eq!(it.next(), Some(&13)); + assert_eq!(it.next(), None); + } + // make sure the skipped parts have not been consumed + assert_eq!(it_base.next(), Some(&0)); + assert_eq!(it_base.next(), Some(&1)); + assert_eq!(it_base.next(), Some(&2)); + assert_eq!(it_base.next(), Some(&3)); + assert_eq!(it_base.next(), Some(&5)); + assert_eq!(it_base.next(), None); + let it = xs.iter().skip(5).rev(); + assert_eq!(it.last(), Some(&13)); +} + #[test] fn test_iterator_skip_nth() { let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30]; @@ -606,16 +726,28 @@ fn test_count() { assert_eq!(xs.iter().filter(|x| **x == 95).count(), 0); } +#[test] +fn test_max_by_key() { + let xs: &[isize] = &[-3, 0, 1, 5, -10]; + assert_eq!(*xs.iter().max_by_key(|x| x.abs()).unwrap(), -10); +} + #[test] fn test_max_by() { let xs: &[isize] = &[-3, 0, 1, 5, -10]; - assert_eq!(*xs.iter().max_by(|x| x.abs()).unwrap(), -10); + assert_eq!(*xs.iter().max_by(|x, y| x.abs().cmp(&y.abs())).unwrap(), -10); +} + +#[test] +fn test_min_by_key() { + let xs: &[isize] = &[-3, 0, 1, 5, -10]; + assert_eq!(*xs.iter().min_by_key(|x| x.abs()).unwrap(), 0); } #[test] fn test_min_by() { let xs: &[isize] = &[-3, 0, 1, 5, -10]; - assert_eq!(*xs.iter().min_by(|x| x.abs()).unwrap(), 0); + assert_eq!(*xs.iter().min_by(|x, y| x.abs().cmp(&y.abs())).unwrap(), 0); } #[test] @@ -639,7 +771,7 @@ fn test_rev() { #[test] fn test_cloned() { - let xs = [2u8, 4, 6, 8]; + let xs = [2, 4, 6, 8]; let mut it = xs.iter().cloned(); assert_eq!(it.len(), 4); @@ -823,8 +955,8 @@ fn test_range() { assert_eq!((-10..-1).size_hint(), (9, Some(9))); assert_eq!((-1..-10).size_hint(), (0, Some(0))); - assert_eq!((-70..58i8).size_hint(), (128, Some(128))); - assert_eq!((-128..127i8).size_hint(), (255, Some(255))); + assert_eq!((-70..58).size_hint(), (128, Some(128))); + assert_eq!((-128..127).size_hint(), (255, Some(255))); assert_eq!((-2..isize::MAX).size_hint(), (isize::MAX as usize + 2, Some(isize::MAX as usize + 2))); } @@ -851,15 +983,6 @@ fn test_range_step() { assert_eq!((isize::MIN..isize::MAX).step_by(1).size_hint(), (usize::MAX, Some(usize::MAX))); } -#[test] -fn test_peekable_is_empty() { - let a = [1]; - let mut it = a.iter().peekable(); - assert!( !it.is_empty() ); - it.next(); - assert!( it.is_empty() ); -} - #[test] fn test_repeat() { let mut it = repeat(42); @@ -930,6 +1053,18 @@ fn test_empty() { assert_eq!(it.next(), None); } +#[test] +fn test_chain_fold() { + let xs = [1, 2, 3]; + let ys = [1, 2, 0]; + + let mut iter = xs.iter().chain(&ys); + iter.next(); + let mut result = Vec::new(); + iter.fold((), |(), &elt| result.push(elt)); + assert_eq!(&[2, 3, 1, 2, 0], &result[..]); +} + #[bench] fn bench_rposition(b: &mut Bencher) { let it: Vec = (0..300).collect(); @@ -961,21 +1096,21 @@ fn bench_multiple_take(b: &mut Bencher) { fn scatter(x: i32) -> i32 { (x * 31) % 127 } #[bench] -fn bench_max_by(b: &mut Bencher) { +fn bench_max_by_key(b: &mut Bencher) { b.iter(|| { let it = 0..100; - it.max_by(|&x| scatter(x)) + it.max_by_key(|&x| scatter(x)) }) } // http://www.reddit.com/r/rust/comments/31syce/using_iterators_to_find_the_index_of_the_min_or/ #[bench] -fn bench_max_by2(b: &mut Bencher) { +fn bench_max_by_key2(b: &mut Bencher) { fn max_index_iter(array: &[i32]) -> usize { - array.iter().enumerate().max_by(|&(_, item)| item).unwrap().0 + array.iter().enumerate().max_by_key(|&(_, item)| item).unwrap().0 } - let mut data = vec![0i32; 1638]; + let mut data = vec![0; 1638]; data[514] = 9999; b.iter(|| max_index_iter(&data)); @@ -988,3 +1123,33 @@ fn bench_max(b: &mut Bencher) { it.map(scatter).max() }) } + +pub fn copy_zip(xs: &[u8], ys: &mut [u8]) { + for (a, b) in ys.iter_mut().zip(xs) { + *a = *b; + } +} + +pub fn add_zip(xs: &[f32], ys: &mut [f32]) { + for (a, b) in ys.iter_mut().zip(xs) { + *a += *b; + } +} + +#[bench] +fn bench_zip_copy(b: &mut Bencher) { + let source = vec![0u8; 16 * 1024]; + let mut dst = black_box(vec![0u8; 16 * 1024]); + b.iter(|| { + copy_zip(&source, &mut dst) + }) +} + +#[bench] +fn bench_zip_add(b: &mut Bencher) { + let source = vec![1.; 16 * 1024]; + let mut dst = vec![0.; 16 * 1024]; + b.iter(|| { + add_zip(&source, &mut dst) + }); +} diff --git a/src/libcoretest/lib.rs b/src/libcoretest/lib.rs index 88f1835d2cce4..b8c01e570f509 100644 --- a/src/libcoretest/lib.rs +++ b/src/libcoretest/lib.rs @@ -8,42 +8,34 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(as_unsafe_cell)] +#![deny(warnings)] + #![feature(borrow_state)] #![feature(box_syntax)] #![feature(cell_extras)] +#![feature(char_escape_debug)] #![feature(const_fn)] -#![feature(core)] -#![feature(core_float)] #![feature(core_private_bignum)] #![feature(core_private_diy_float)] #![feature(dec2flt)] -#![feature(decode_utf16)] +#![feature(decode_utf8)] #![feature(fixed_size_array)] -#![feature(float_extras)] -#![feature(float_from_str_radix)] #![feature(flt2dec)] -#![feature(fmt_radix)] -#![feature(iter_arith)] -#![feature(iter_arith)] -#![feature(iter_cmp)] -#![feature(iter_order)] #![feature(libc)] #![feature(nonzero)] -#![feature(num_bits_bytes)] -#![feature(peekable_is_empty)] -#![feature(ptr_as_ref)] #![feature(rand)] -#![feature(range_inclusive)] #![feature(raw)] -#![feature(slice_bytes)] +#![feature(sip_hash_13)] #![feature(slice_patterns)] #![feature(step_by)] #![feature(test)] -#![feature(unboxed_closures)] +#![feature(try_from)] #![feature(unicode)] #![feature(unique)] -#![feature(clone_from_slice)] +#![feature(iter_max_by)] +#![feature(iter_min_by)] +#![feature(ordering_chaining)] +#![feature(result_unwrap_or_default)] extern crate core; extern crate test; diff --git a/src/libcoretest/mem.rs b/src/libcoretest/mem.rs index 5bc08376d257c..01bafe49a7acd 100644 --- a/src/libcoretest/mem.rs +++ b/src/libcoretest/mem.rs @@ -18,6 +18,13 @@ fn size_of_basic() { assert_eq!(size_of::(), 8); } +#[test] +#[cfg(target_pointer_width = "16")] +fn size_of_16() { + assert_eq!(size_of::(), 2); + assert_eq!(size_of::<*const usize>(), 2); +} + #[test] #[cfg(target_pointer_width = "32")] fn size_of_32() { @@ -47,6 +54,13 @@ fn align_of_basic() { assert_eq!(align_of::(), 4); } +#[test] +#[cfg(target_pointer_width = "16")] +fn align_of_16() { + assert_eq!(align_of::(), 2); + assert_eq!(align_of::<*const usize>(), 2); +} + #[test] #[cfg(target_pointer_width = "32")] fn align_of_32() { diff --git a/src/libcoretest/num/dec2flt/mod.rs b/src/libcoretest/num/dec2flt/mod.rs index 7b25333e21ed2..fe6f52406fbc8 100644 --- a/src/libcoretest/num/dec2flt/mod.rs +++ b/src/libcoretest/num/dec2flt/mod.rs @@ -25,13 +25,11 @@ macro_rules! test_literal { let x64: f64 = $x; let inputs = &[stringify!($x).into(), format!("{:?}", x64), format!("{:e}", x64)]; for input in inputs { - if input != "inf" { - assert_eq!(input.parse(), Ok(x64)); - assert_eq!(input.parse(), Ok(x32)); - let neg_input = &format!("-{}", input); - assert_eq!(neg_input.parse(), Ok(-x64)); - assert_eq!(neg_input.parse(), Ok(-x32)); - } + assert_eq!(input.parse(), Ok(x64)); + assert_eq!(input.parse(), Ok(x32)); + let neg_input = &format!("-{}", input); + assert_eq!(neg_input.parse(), Ok(-x64)); + assert_eq!(neg_input.parse(), Ok(-x32)); } }) } @@ -136,6 +134,17 @@ fn massive_exponent() { assert_eq!(format!("1e{}000", max).parse(), Ok(f64::INFINITY)); } +#[test] +fn borderline_overflow() { + let mut s = "0.".to_string(); + for _ in 0..375 { + s.push('3'); + } + // At the time of this writing, this returns Err(..), but this is a bug that should be fixed. + // It makes no sense to enshrine that in a test, the important part is that it doesn't panic. + let _ = s.parse::(); +} + #[bench] fn bench_0(b: &mut test::Bencher) { b.iter(|| "0.0".parse::()); diff --git a/src/libcoretest/num/dec2flt/rawfp.rs b/src/libcoretest/num/dec2flt/rawfp.rs index 4c0a403e574a3..1a3533317dae6 100644 --- a/src/libcoretest/num/dec2flt/rawfp.rs +++ b/src/libcoretest/num/dec2flt/rawfp.rs @@ -9,9 +9,24 @@ // except according to those terms. use std::f64; +use std::mem; use core::num::diy_float::Fp; use core::num::dec2flt::rawfp::{fp_to_float, prev_float, next_float, round_normal}; +fn integer_decode(f: f64) -> (u64, i16, i8) { + let bits: u64 = unsafe { mem::transmute(f) }; + let sign: i8 = if bits >> 63 == 0 { 1 } else { -1 }; + let mut exponent: i16 = ((bits >> 52) & 0x7ff) as i16; + let mantissa = if exponent == 0 { + (bits & 0xfffffffffffff) << 1 + } else { + (bits & 0xfffffffffffff) | 0x10000000000000 + }; + // Exponent bias + mantissa shift + exponent -= 1023 + 52; + (mantissa, exponent, sign) +} + #[test] fn fp_to_float_half_to_even() { fn is_normalized(sig: u64) -> bool { @@ -21,12 +36,12 @@ fn fp_to_float_half_to_even() { fn conv(sig: u64) -> u64 { // The significands are perfectly in range, so the exponent should not matter - let (m1, e1, _) = fp_to_float::(Fp { f: sig, e: 0 }).integer_decode(); + let (m1, e1, _) = integer_decode(fp_to_float::(Fp { f: sig, e: 0 })); assert_eq!(e1, 0 + 64 - 53); - let (m2, e2, _) = fp_to_float::(Fp { f: sig, e: 55 }).integer_decode(); + let (m2, e2, _) = integer_decode(fp_to_float::(Fp { f: sig, e: 55 })); assert_eq!(e2, 55 + 64 - 53); assert_eq!(m2, m1); - let (m3, e3, _) = fp_to_float::(Fp { f: sig, e: -78 }).integer_decode(); + let (m3, e3, _) = integer_decode(fp_to_float::(Fp { f: sig, e: -78 })); assert_eq!(e3, -78 + 64 - 53); assert_eq!(m3, m2); m3 @@ -65,7 +80,7 @@ const SOME_FLOATS: [f64; 9] = #[test] fn human_f64_roundtrip() { for &x in &SOME_FLOATS { - let (f, e, _) = x.integer_decode(); + let (f, e, _) = integer_decode(x); let fp = Fp { f: f, e: e}; assert_eq!(fp_to_float::(fp), x); } diff --git a/src/libcoretest/num/flt2dec/estimator.rs b/src/libcoretest/num/flt2dec/estimator.rs index 21260c520f623..0bca616ea9abc 100644 --- a/src/libcoretest/num/flt2dec/estimator.rs +++ b/src/libcoretest/num/flt2dec/estimator.rs @@ -8,7 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::f64; +// FIXME https://github.com/kripken/emscripten/issues/4563 +// NB we have to actually not compile this test to avoid +// an undefined symbol error +#![cfg(not(target_os = "emscripten"))] + use core::num::flt2dec::estimator::*; #[test] @@ -54,7 +58,7 @@ fn test_estimate_scaling_factor() { assert_almost_eq!(estimate_scaling_factor(0x1fffffffffffff, 971), 309); for i in -1074..972 { - let expected = f64::ldexp(1.0, i).log10().ceil(); + let expected = super::ldexp_f64(1.0, i).log10().ceil(); assert_almost_eq!(estimate_scaling_factor(1, i as i16), expected as i16); } } diff --git a/src/libcoretest/num/flt2dec/mod.rs b/src/libcoretest/num/flt2dec/mod.rs index 48a5501acb791..0f4d19e709257 100644 --- a/src/libcoretest/num/flt2dec/mod.rs +++ b/src/libcoretest/num/flt2dec/mod.rs @@ -89,6 +89,17 @@ macro_rules! try_fixed { }) } +fn ldexp_f32(a: f32, b: i32) -> f32 { + ldexp_f64(a as f64, b) as f32 +} + +fn ldexp_f64(a: f64, b: i32) -> f64 { + extern { + fn ldexp(x: f64, n: i32) -> f64; + } + unsafe { ldexp(a, b) } +} + fn check_exact(mut f: F, v: T, vstr: &str, expected: &[u8], expectedk: i16) where T: DecodableFloat, F: FnMut(&Decoded, &mut [u8], i16) -> (usize, i16) { // use a large enough buffer @@ -100,7 +111,7 @@ fn check_exact(mut f: F, v: T, vstr: &str, expected: &[u8], expectedk: i16 // check significant digits for i in 1..cut.unwrap_or(expected.len() - 1) { - expected_[..i].clone_from_slice(&expected[..i]); + expected_[..i].copy_from_slice(&expected[..i]); let mut expectedk_ = expectedk; if expected[i] >= b'5' { // check if this is a rounding-to-even case. @@ -147,7 +158,7 @@ fn check_exact(mut f: F, v: T, vstr: &str, expected: &[u8], expectedk: i16 // check infinite zero digits if let Some(cut) = cut { for i in cut..expected.len()-1 { - expected_[..cut].clone_from_slice(&expected[..cut]); + expected_[..cut].copy_from_slice(&expected[..cut]); for c in &mut expected_[cut..i] { *c = b'0'; } try_exact!(f(&decoded) => &mut buf, &expected_[..i], expectedk; @@ -237,7 +248,7 @@ pub fn f32_shortest_sanity_test(mut f: F) where F: FnMut(&Decoded, &mut [u8]) // 10^8 * 0.3355443 // 10^8 * 0.33554432 // 10^8 * 0.33554436 - check_shortest!(f(f32::ldexp(1.0, 25)) => b"33554432", 8); + check_shortest!(f(ldexp_f32(1.0, 25)) => b"33554432", 8); // 10^39 * 0.340282326356119256160033759537265639424 // 10^39 * 0.34028234663852885981170418348451692544 @@ -252,13 +263,13 @@ pub fn f32_shortest_sanity_test(mut f: F) where F: FnMut(&Decoded, &mut [u8]) // 10^-44 * 0 // 10^-44 * 0.1401298464324817070923729583289916131280... // 10^-44 * 0.2802596928649634141847459166579832262560... - let minf32 = f32::ldexp(1.0, -149); + let minf32 = ldexp_f32(1.0, -149); check_shortest!(f(minf32) => b"1", -44); } pub fn f32_exact_sanity_test(mut f: F) where F: FnMut(&Decoded, &mut [u8], i16) -> (usize, i16) { - let minf32 = f32::ldexp(1.0, -149); + let minf32 = ldexp_f32(1.0, -149); check_exact!(f(0.1f32) => b"100000001490116119384765625 ", 0); check_exact!(f(0.5f32) => b"5 ", 0); @@ -336,7 +347,7 @@ pub fn f64_shortest_sanity_test(mut f: F) where F: FnMut(&Decoded, &mut [u8]) // 10^20 * 0.18446744073709549568 // 10^20 * 0.18446744073709551616 // 10^20 * 0.18446744073709555712 - check_shortest!(f(f64::ldexp(1.0, 64)) => b"18446744073709552", 20); + check_shortest!(f(ldexp_f64(1.0, 64)) => b"18446744073709552", 20); // pathological case: high = 10^23 (exact). tie breaking should always prefer that. // 10^24 * 0.099999999999999974834176 @@ -357,13 +368,13 @@ pub fn f64_shortest_sanity_test(mut f: F) where F: FnMut(&Decoded, &mut [u8]) // 10^-323 * 0 // 10^-323 * 0.4940656458412465441765687928682213723650... // 10^-323 * 0.9881312916824930883531375857364427447301... - let minf64 = f64::ldexp(1.0, -1074); + let minf64 = ldexp_f64(1.0, -1074); check_shortest!(f(minf64) => b"5", -323); } pub fn f64_exact_sanity_test(mut f: F) where F: FnMut(&Decoded, &mut [u8], i16) -> (usize, i16) { - let minf64 = f64::ldexp(1.0, -1074); + let minf64 = ldexp_f64(1.0, -1074); check_exact!(f(0.1f64) => b"1000000000000000055511151231257827021181", 0); check_exact!(f(0.45f64) => b"4500000000000000111022302462515654042363", 0); @@ -616,7 +627,7 @@ pub fn to_shortest_str_test(mut f_: F) assert_eq!(to_string(f, f32::MAX, Minus, 1, false), format!("34028235{:0>31}.0", "")); assert_eq!(to_string(f, f32::MAX, Minus, 8, false), format!("34028235{:0>31}.00000000", "")); - let minf32 = f32::ldexp(1.0, -149); + let minf32 = ldexp_f32(1.0, -149); assert_eq!(to_string(f, minf32, Minus, 0, false), format!("0.{:0>44}1", "")); assert_eq!(to_string(f, minf32, Minus, 45, false), format!("0.{:0>44}1", "")); assert_eq!(to_string(f, minf32, Minus, 46, false), format!("0.{:0>44}10", "")); @@ -628,7 +639,7 @@ pub fn to_shortest_str_test(mut f_: F) assert_eq!(to_string(f, f64::MAX, Minus, 8, false), format!("17976931348623157{:0>292}.00000000", "")); - let minf64 = f64::ldexp(1.0, -1074); + let minf64 = ldexp_f64(1.0, -1074); assert_eq!(to_string(f, minf64, Minus, 0, false), format!("0.{:0>323}5", "")); assert_eq!(to_string(f, minf64, Minus, 324, false), format!("0.{:0>323}5", "")); assert_eq!(to_string(f, minf64, Minus, 325, false), format!("0.{:0>323}50", "")); @@ -730,7 +741,7 @@ pub fn to_shortest_exp_str_test(mut f_: F) assert_eq!(to_string(f, f32::MAX, Minus, (-39, 38), false), "3.4028235e38"); assert_eq!(to_string(f, f32::MAX, Minus, (-38, 39), false), format!("34028235{:0>31}", "")); - let minf32 = f32::ldexp(1.0, -149); + let minf32 = ldexp_f32(1.0, -149); assert_eq!(to_string(f, minf32, Minus, ( -4, 16), false), "1e-45"); assert_eq!(to_string(f, minf32, Minus, (-44, 45), false), "1e-45"); assert_eq!(to_string(f, minf32, Minus, (-45, 44), false), format!("0.{:0>44}1", "")); @@ -742,7 +753,7 @@ pub fn to_shortest_exp_str_test(mut f_: F) assert_eq!(to_string(f, f64::MAX, Minus, (-309, 308), false), "1.7976931348623157e308"); - let minf64 = f64::ldexp(1.0, -1074); + let minf64 = ldexp_f64(1.0, -1074); assert_eq!(to_string(f, minf64, Minus, ( -4, 16), false), "5e-324"); assert_eq!(to_string(f, minf64, Minus, (-324, 323), false), format!("0.{:0>323}5", "")); assert_eq!(to_string(f, minf64, Minus, (-323, 324), false), "5e-324"); @@ -874,7 +885,7 @@ pub fn to_exact_exp_str_test(mut f_: F) assert_eq!(to_string(f, f32::MAX, Minus, 64, false), "3.402823466385288598117041834845169254400000000000000000000000000e38"); - let minf32 = f32::ldexp(1.0, -149); + let minf32 = ldexp_f32(1.0, -149); assert_eq!(to_string(f, minf32, Minus, 1, false), "1e-45"); assert_eq!(to_string(f, minf32, Minus, 2, false), "1.4e-45"); assert_eq!(to_string(f, minf32, Minus, 4, false), "1.401e-45"); @@ -914,7 +925,7 @@ pub fn to_exact_exp_str_test(mut f_: F) 0000000000000000000000000000000000000000000000000000000000000000e308"); // okay, this is becoming tough. fortunately for us, this is almost the worst case. - let minf64 = f64::ldexp(1.0, -1074); + let minf64 = ldexp_f64(1.0, -1074); assert_eq!(to_string(f, minf64, Minus, 1, false), "5e-324"); assert_eq!(to_string(f, minf64, Minus, 2, false), "4.9e-324"); assert_eq!(to_string(f, minf64, Minus, 4, false), "4.941e-324"); @@ -1120,7 +1131,7 @@ pub fn to_exact_fixed_str_test(mut f_: F) assert_eq!(to_string(f, f32::MAX, Minus, 2, false), "340282346638528859811704183484516925440.00"); - let minf32 = f32::ldexp(1.0, -149); + let minf32 = ldexp_f32(1.0, -149); assert_eq!(to_string(f, minf32, Minus, 0, false), "0"); assert_eq!(to_string(f, minf32, Minus, 1, false), "0.0"); assert_eq!(to_string(f, minf32, Minus, 2, false), "0.00"); @@ -1152,7 +1163,7 @@ pub fn to_exact_fixed_str_test(mut f_: F) 9440758685084551339423045832369032229481658085593321233482747978\ 26204144723168738177180919299881250404026184124858368.0000000000"); - let minf64 = f64::ldexp(1.0, -1074); + let minf64 = ldexp_f64(1.0, -1074); assert_eq!(to_string(f, minf64, Minus, 0, false), "0"); assert_eq!(to_string(f, minf64, Minus, 1, false), "0.0"); assert_eq!(to_string(f, minf64, Minus, 10, false), "0.0000000000"); diff --git a/src/libcoretest/num/flt2dec/strategy/dragon.rs b/src/libcoretest/num/flt2dec/strategy/dragon.rs index 79dcca7671a2d..08c2cd0a7326f 100644 --- a/src/libcoretest/num/flt2dec/strategy/dragon.rs +++ b/src/libcoretest/num/flt2dec/strategy/dragon.rs @@ -11,7 +11,6 @@ use std::prelude::v1::*; use std::{i16, f64}; use super::super::*; -use core::num::flt2dec::*; use core::num::bignum::Big32x40 as Big; use core::num::flt2dec::strategy::dragon::*; diff --git a/src/libcoretest/num/flt2dec/strategy/grisu.rs b/src/libcoretest/num/flt2dec/strategy/grisu.rs index 2d4afceda191f..311bd252353c7 100644 --- a/src/libcoretest/num/flt2dec/strategy/grisu.rs +++ b/src/libcoretest/num/flt2dec/strategy/grisu.rs @@ -10,7 +10,6 @@ use std::{i16, f64}; use super::super::*; -use core::num::flt2dec::*; use core::num::flt2dec::strategy::grisu::*; #[test] diff --git a/src/libcoretest/num/int_macros.rs b/src/libcoretest/num/int_macros.rs index b1c8aec3c35e9..8d791283ab87e 100644 --- a/src/libcoretest/num/int_macros.rs +++ b/src/libcoretest/num/int_macros.rs @@ -14,6 +14,8 @@ mod tests { use core::$T_i::*; use core::isize; use core::ops::{Shl, Shr, Not, BitXor, BitAnd, BitOr}; + use core::mem; + use num; #[test] @@ -85,9 +87,10 @@ mod tests { #[test] fn test_count_zeros() { - assert!(A.count_zeros() == BITS as u32 - 3); - assert!(B.count_zeros() == BITS as u32 - 2); - assert!(C.count_zeros() == BITS as u32 - 5); + let bits = mem::size_of::<$T>() * 8; + assert!(A.count_zeros() == bits as u32 - 3); + assert!(B.count_zeros() == bits as u32 - 2); + assert!(C.count_zeros() == bits as u32 - 5); } #[test] @@ -205,11 +208,11 @@ mod tests { fn test_pow() { let mut r = 2 as $T; - assert_eq!(r.pow(2u32), 4 as $T); - assert_eq!(r.pow(0u32), 1 as $T); + assert_eq!(r.pow(2), 4 as $T); + assert_eq!(r.pow(0), 1 as $T); r = -2 as $T; - assert_eq!(r.pow(2u32), 4 as $T); - assert_eq!(r.pow(3u32), -8 as $T); + assert_eq!(r.pow(2), 4 as $T); + assert_eq!(r.pow(3), -8 as $T); } } diff --git a/src/libcoretest/num/mod.rs b/src/libcoretest/num/mod.rs index fba56db32bb4c..4834c0e072c9e 100644 --- a/src/libcoretest/num/mod.rs +++ b/src/libcoretest/num/mod.rs @@ -8,10 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use core::convert::TryFrom; use core::cmp::PartialEq; use core::fmt::Debug; -use core::ops::{Add, Sub, Mul, Div, Rem}; use core::marker::Copy; +use core::ops::{Add, Sub, Mul, Div, Rem}; +use core::option::Option; +use core::option::Option::{Some, None}; #[macro_use] mod int_macros; @@ -48,169 +51,318 @@ pub fn test_num(ten: T, two: T) where assert_eq!(ten.rem(two), ten % two); } -#[cfg(test)] -mod tests { - use core::option::Option; - use core::option::Option::{Some, None}; - use core::num::Float; - - #[test] - fn from_str_issue7588() { - let u : Option = u8::from_str_radix("1000", 10).ok(); - assert_eq!(u, None); - let s : Option = i16::from_str_radix("80000", 10).ok(); - assert_eq!(s, None); - } +#[test] +fn from_str_issue7588() { + let u : Option = u8::from_str_radix("1000", 10).ok(); + assert_eq!(u, None); + let s : Option = i16::from_str_radix("80000", 10).ok(); + assert_eq!(s, None); +} - #[test] - fn test_int_from_str_overflow() { - let mut i8_val: i8 = 127; - assert_eq!("127".parse::().ok(), Some(i8_val)); - assert_eq!("128".parse::().ok(), None); +#[test] +fn test_int_from_str_overflow() { + let mut i8_val: i8 = 127; + assert_eq!("127".parse::().ok(), Some(i8_val)); + assert_eq!("128".parse::().ok(), None); - i8_val = i8_val.wrapping_add(1); - assert_eq!("-128".parse::().ok(), Some(i8_val)); - assert_eq!("-129".parse::().ok(), None); + i8_val = i8_val.wrapping_add(1); + assert_eq!("-128".parse::().ok(), Some(i8_val)); + assert_eq!("-129".parse::().ok(), None); - let mut i16_val: i16 = 32_767; - assert_eq!("32767".parse::().ok(), Some(i16_val)); - assert_eq!("32768".parse::().ok(), None); + let mut i16_val: i16 = 32_767; + assert_eq!("32767".parse::().ok(), Some(i16_val)); + assert_eq!("32768".parse::().ok(), None); - i16_val = i16_val.wrapping_add(1); - assert_eq!("-32768".parse::().ok(), Some(i16_val)); - assert_eq!("-32769".parse::().ok(), None); + i16_val = i16_val.wrapping_add(1); + assert_eq!("-32768".parse::().ok(), Some(i16_val)); + assert_eq!("-32769".parse::().ok(), None); - let mut i32_val: i32 = 2_147_483_647; - assert_eq!("2147483647".parse::().ok(), Some(i32_val)); - assert_eq!("2147483648".parse::().ok(), None); + let mut i32_val: i32 = 2_147_483_647; + assert_eq!("2147483647".parse::().ok(), Some(i32_val)); + assert_eq!("2147483648".parse::().ok(), None); - i32_val = i32_val.wrapping_add(1); - assert_eq!("-2147483648".parse::().ok(), Some(i32_val)); - assert_eq!("-2147483649".parse::().ok(), None); + i32_val = i32_val.wrapping_add(1); + assert_eq!("-2147483648".parse::().ok(), Some(i32_val)); + assert_eq!("-2147483649".parse::().ok(), None); - let mut i64_val: i64 = 9_223_372_036_854_775_807; - assert_eq!("9223372036854775807".parse::().ok(), Some(i64_val)); - assert_eq!("9223372036854775808".parse::().ok(), None); + let mut i64_val: i64 = 9_223_372_036_854_775_807; + assert_eq!("9223372036854775807".parse::().ok(), Some(i64_val)); + assert_eq!("9223372036854775808".parse::().ok(), None); - i64_val = i64_val.wrapping_add(1); - assert_eq!("-9223372036854775808".parse::().ok(), Some(i64_val)); - assert_eq!("-9223372036854775809".parse::().ok(), None); - } + i64_val = i64_val.wrapping_add(1); + assert_eq!("-9223372036854775808".parse::().ok(), Some(i64_val)); + assert_eq!("-9223372036854775809".parse::().ok(), None); +} + +#[test] +fn test_leading_plus() { + assert_eq!("+127".parse::().ok(), Some(127)); + assert_eq!("+9223372036854775807".parse::().ok(), Some(9223372036854775807)); +} - #[test] - fn test_leading_plus() { - assert_eq!("+127".parse::().ok(), Some(127u8)); - assert_eq!("+9223372036854775807".parse::().ok(), Some(9223372036854775807i64)); +#[test] +fn test_invalid() { + assert_eq!("--129".parse::().ok(), None); + assert_eq!("++129".parse::().ok(), None); + assert_eq!("Съешь".parse::().ok(), None); +} + +#[test] +fn test_empty() { + assert_eq!("-".parse::().ok(), None); + assert_eq!("+".parse::().ok(), None); + assert_eq!("".parse::().ok(), None); +} + +macro_rules! test_impl_from { + ($fn_name: ident, $Small: ty, $Large: ty) => { + #[test] + fn $fn_name() { + let small_max = <$Small>::max_value(); + let small_min = <$Small>::min_value(); + let large_max: $Large = small_max.into(); + let large_min: $Large = small_min.into(); + assert_eq!(large_max as $Small, small_max); + assert_eq!(large_min as $Small, small_min); + } } +} + +// Unsigned -> Unsigned +test_impl_from! { test_u8u16, u8, u16 } +test_impl_from! { test_u8u32, u8, u32 } +test_impl_from! { test_u8u64, u8, u64 } +test_impl_from! { test_u8usize, u8, usize } +test_impl_from! { test_u16u32, u16, u32 } +test_impl_from! { test_u16u64, u16, u64 } +test_impl_from! { test_u32u64, u32, u64 } + +// Signed -> Signed +test_impl_from! { test_i8i16, i8, i16 } +test_impl_from! { test_i8i32, i8, i32 } +test_impl_from! { test_i8i64, i8, i64 } +test_impl_from! { test_i8isize, i8, isize } +test_impl_from! { test_i16i32, i16, i32 } +test_impl_from! { test_i16i64, i16, i64 } +test_impl_from! { test_i32i64, i32, i64 } + +// Unsigned -> Signed +test_impl_from! { test_u8i16, u8, i16 } +test_impl_from! { test_u8i32, u8, i32 } +test_impl_from! { test_u8i64, u8, i64 } +test_impl_from! { test_u16i32, u16, i32 } +test_impl_from! { test_u16i64, u16, i64 } +test_impl_from! { test_u32i64, u32, i64 } + +// Signed -> Float +test_impl_from! { test_i8f32, i8, f32 } +test_impl_from! { test_i8f64, i8, f64 } +test_impl_from! { test_i16f32, i16, f32 } +test_impl_from! { test_i16f64, i16, f64 } +test_impl_from! { test_i32f64, i32, f64 } + +// Unsigned -> Float +test_impl_from! { test_u8f32, u8, f32 } +test_impl_from! { test_u8f64, u8, f64 } +test_impl_from! { test_u16f32, u16, f32 } +test_impl_from! { test_u16f64, u16, f64 } +test_impl_from! { test_u32f64, u32, f64 } + +// Float -> Float +#[test] +fn test_f32f64() { + use core::f32; + + let max: f64 = f32::MAX.into(); + assert_eq!(max as f32, f32::MAX); + assert!(max.is_normal()); + + let min: f64 = f32::MIN.into(); + assert_eq!(min as f32, f32::MIN); + assert!(min.is_normal()); + + let min_positive: f64 = f32::MIN_POSITIVE.into(); + assert_eq!(min_positive as f32, f32::MIN_POSITIVE); + assert!(min_positive.is_normal()); + + let epsilon: f64 = f32::EPSILON.into(); + assert_eq!(epsilon as f32, f32::EPSILON); + assert!(epsilon.is_normal()); + + let zero: f64 = (0.0f32).into(); + assert_eq!(zero as f32, 0.0f32); + assert!(zero.is_sign_positive()); + + let neg_zero: f64 = (-0.0f32).into(); + assert_eq!(neg_zero as f32, -0.0f32); + assert!(neg_zero.is_sign_negative()); + + let infinity: f64 = f32::INFINITY.into(); + assert_eq!(infinity as f32, f32::INFINITY); + assert!(infinity.is_infinite()); + assert!(infinity.is_sign_positive()); + + let neg_infinity: f64 = f32::NEG_INFINITY.into(); + assert_eq!(neg_infinity as f32, f32::NEG_INFINITY); + assert!(neg_infinity.is_infinite()); + assert!(neg_infinity.is_sign_negative()); + + let nan: f64 = f32::NAN.into(); + assert!(nan.is_nan()); +} - #[test] - fn test_invalid() { - assert_eq!("--129".parse::().ok(), None); - assert_eq!("++129".parse::().ok(), None); - assert_eq!("Съешь".parse::().ok(), None); +macro_rules! test_impl_try_from_always_ok { + ($fn_name:ident, $source:ty, $target: ty) => { + #[test] + fn $fn_name() { + let max = <$source>::max_value(); + let min = <$source>::min_value(); + let zero: $source = 0; + assert_eq!(<$target as TryFrom<$source>>::try_from(max).unwrap(), + max as $target); + assert_eq!(<$target as TryFrom<$source>>::try_from(min).unwrap(), + min as $target); + assert_eq!(<$target as TryFrom<$source>>::try_from(zero).unwrap(), + zero as $target); + } } +} - #[test] - fn test_empty() { - assert_eq!("-".parse::().ok(), None); - assert_eq!("+".parse::().ok(), None); - assert_eq!("".parse::().ok(), None); +test_impl_try_from_always_ok! { test_try_u8u8, u8, u8 } +test_impl_try_from_always_ok! { test_try_u8u16, u8, u16 } +test_impl_try_from_always_ok! { test_try_u8u32, u8, u32 } +test_impl_try_from_always_ok! { test_try_u8u64, u8, u64 } +test_impl_try_from_always_ok! { test_try_u8i16, u8, i16 } +test_impl_try_from_always_ok! { test_try_u8i32, u8, i32 } +test_impl_try_from_always_ok! { test_try_u8i64, u8, i64 } + +test_impl_try_from_always_ok! { test_try_u16u16, u16, u16 } +test_impl_try_from_always_ok! { test_try_u16u32, u16, u32 } +test_impl_try_from_always_ok! { test_try_u16u64, u16, u64 } +test_impl_try_from_always_ok! { test_try_u16i32, u16, i32 } +test_impl_try_from_always_ok! { test_try_u16i64, u16, i64 } + +test_impl_try_from_always_ok! { test_try_u32u32, u32, u32 } +test_impl_try_from_always_ok! { test_try_u32u64, u32, u64 } +test_impl_try_from_always_ok! { test_try_u32i64, u32, i64 } + +test_impl_try_from_always_ok! { test_try_u64u64, u64, u64 } + +test_impl_try_from_always_ok! { test_try_i8i8, i8, i8 } +test_impl_try_from_always_ok! { test_try_i8i16, i8, i16 } +test_impl_try_from_always_ok! { test_try_i8i32, i8, i32 } +test_impl_try_from_always_ok! { test_try_i8i64, i8, i64 } + +test_impl_try_from_always_ok! { test_try_i16i16, i16, i16 } +test_impl_try_from_always_ok! { test_try_i16i32, i16, i32 } +test_impl_try_from_always_ok! { test_try_i16i64, i16, i64 } + +test_impl_try_from_always_ok! { test_try_i32i32, i32, i32 } +test_impl_try_from_always_ok! { test_try_i32i64, i32, i64 } + +test_impl_try_from_always_ok! { test_try_i64i64, i64, i64 } + +macro_rules! test_impl_try_from_signed_to_unsigned_upper_ok { + ($fn_name:ident, $source:ty, $target:ty) => { + #[test] + fn $fn_name() { + let max = <$source>::max_value(); + let min = <$source>::min_value(); + let zero: $source = 0; + let neg_one: $source = -1; + assert_eq!(<$target as TryFrom<$source>>::try_from(max).unwrap(), + max as $target); + assert!(<$target as TryFrom<$source>>::try_from(min).is_err()); + assert_eq!(<$target as TryFrom<$source>>::try_from(zero).unwrap(), + zero as $target); + assert!(<$target as TryFrom<$source>>::try_from(neg_one).is_err()); + } } +} - macro_rules! test_impl_from { - ($fn_name: ident, $Small: ty, $Large: ty) => { - #[test] - fn $fn_name() { - let small_max = <$Small>::max_value(); - let small_min = <$Small>::min_value(); - let large_max: $Large = small_max.into(); - let large_min: $Large = small_min.into(); - assert_eq!(large_max as $Small, small_max); - assert_eq!(large_min as $Small, small_min); - } +test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i8u8, i8, u8 } +test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i8u16, i8, u16 } +test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i8u32, i8, u32 } +test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i8u64, i8, u64 } + +test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i16u16, i16, u16 } +test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i16u32, i16, u32 } +test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i16u64, i16, u64 } + +test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i32u32, i32, u32 } +test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i32u64, i32, u64 } + +test_impl_try_from_signed_to_unsigned_upper_ok! { test_try_i64u64, i64, u64 } + +macro_rules! test_impl_try_from_unsigned_to_signed_upper_err { + ($fn_name:ident, $source:ty, $target:ty) => { + #[test] + fn $fn_name() { + let max = <$source>::max_value(); + let min = <$source>::min_value(); + let zero: $source = 0; + assert!(<$target as TryFrom<$source>>::try_from(max).is_err()); + assert_eq!(<$target as TryFrom<$source>>::try_from(min).unwrap(), + min as $target); + assert_eq!(<$target as TryFrom<$source>>::try_from(zero).unwrap(), + zero as $target); } } +} - // Unsigned -> Unsigned - test_impl_from! { test_u8u16, u8, u16 } - test_impl_from! { test_u8u32, u8, u32 } - test_impl_from! { test_u8u64, u8, u64 } - test_impl_from! { test_u8usize, u8, usize } - test_impl_from! { test_u16u32, u16, u32 } - test_impl_from! { test_u16u64, u16, u64 } - test_impl_from! { test_u32u64, u32, u64 } - - // Signed -> Signed - test_impl_from! { test_i8i16, i8, i16 } - test_impl_from! { test_i8i32, i8, i32 } - test_impl_from! { test_i8i64, i8, i64 } - test_impl_from! { test_i8isize, i8, isize } - test_impl_from! { test_i16i32, i16, i32 } - test_impl_from! { test_i16i64, i16, i64 } - test_impl_from! { test_i32i64, i32, i64 } - - // Unsigned -> Signed - test_impl_from! { test_u8i16, u8, i16 } - test_impl_from! { test_u8i32, u8, i32 } - test_impl_from! { test_u8i64, u8, i64 } - test_impl_from! { test_u16i32, u16, i32 } - test_impl_from! { test_u16i64, u16, i64 } - test_impl_from! { test_u32i64, u32, i64 } - - // Signed -> Float - test_impl_from! { test_i8f32, i8, f32 } - test_impl_from! { test_i8f64, i8, f64 } - test_impl_from! { test_i16f32, i16, f32 } - test_impl_from! { test_i16f64, i16, f64 } - test_impl_from! { test_i32f64, i32, f64 } - - // Unsigned -> Float - test_impl_from! { test_u8f32, u8, f32 } - test_impl_from! { test_u8f64, u8, f64 } - test_impl_from! { test_u16f32, u16, f32 } - test_impl_from! { test_u16f64, u16, f64 } - test_impl_from! { test_u32f64, u32, f64 } - - // Float -> Float - #[test] - fn test_f32f64() { - use core::f32; - - let max: f64 = f32::MAX.into(); - assert_eq!(max as f32, f32::MAX); - assert!(max.is_normal()); - - let min: f64 = f32::MIN.into(); - assert_eq!(min as f32, f32::MIN); - assert!(min.is_normal()); - - let min_positive: f64 = f32::MIN_POSITIVE.into(); - assert_eq!(min_positive as f32, f32::MIN_POSITIVE); - assert!(min_positive.is_normal()); - - let epsilon: f64 = f32::EPSILON.into(); - assert_eq!(epsilon as f32, f32::EPSILON); - assert!(epsilon.is_normal()); - - let zero: f64 = (0.0f32).into(); - assert_eq!(zero as f32, 0.0f32); - assert!(zero.is_sign_positive()); - - let neg_zero: f64 = (-0.0f32).into(); - assert_eq!(neg_zero as f32, -0.0f32); - assert!(neg_zero.is_sign_negative()); - - let infinity: f64 = f32::INFINITY.into(); - assert_eq!(infinity as f32, f32::INFINITY); - assert!(infinity.is_infinite()); - assert!(infinity.is_sign_positive()); - - let neg_infinity: f64 = f32::NEG_INFINITY.into(); - assert_eq!(neg_infinity as f32, f32::NEG_INFINITY); - assert!(neg_infinity.is_infinite()); - assert!(neg_infinity.is_sign_negative()); - - let nan: f64 = f32::NAN.into(); - assert!(nan.is_nan()); +test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u8i8, u8, i8 } + +test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u16i8, u16, i8 } +test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u16i16, u16, i16 } + +test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u32i8, u32, i8 } +test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u32i16, u32, i16 } +test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u32i32, u32, i32 } + +test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u64i8, u64, i8 } +test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u64i16, u64, i16 } +test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u64i32, u64, i32 } +test_impl_try_from_unsigned_to_signed_upper_err! { test_try_u64i64, u64, i64 } + +macro_rules! test_impl_try_from_same_sign_err { + ($fn_name:ident, $source:ty, $target:ty) => { + #[test] + fn $fn_name() { + let max = <$source>::max_value(); + let min = <$source>::min_value(); + let zero: $source = 0; + let t_max = <$target>::max_value(); + let t_min = <$target>::min_value(); + assert!(<$target as TryFrom<$source>>::try_from(max).is_err()); + if min != 0 { + assert!(<$target as TryFrom<$source>>::try_from(min).is_err()); + } + assert_eq!(<$target as TryFrom<$source>>::try_from(zero).unwrap(), + zero as $target); + assert_eq!(<$target as TryFrom<$source>>::try_from(t_max as $source) + .unwrap(), + t_max as $target); + assert_eq!(<$target as TryFrom<$source>>::try_from(t_min as $source) + .unwrap(), + t_min as $target); + } } } + +test_impl_try_from_same_sign_err! { test_try_u16u8, u16, u8 } + +test_impl_try_from_same_sign_err! { test_try_u32u8, u32, u8 } +test_impl_try_from_same_sign_err! { test_try_u32u16, u32, u16 } + +test_impl_try_from_same_sign_err! { test_try_u64u8, u64, u8 } +test_impl_try_from_same_sign_err! { test_try_u64u16, u64, u16 } +test_impl_try_from_same_sign_err! { test_try_u64u32, u64, u32 } + +test_impl_try_from_same_sign_err! { test_try_i16i8, i16, i8 } + +test_impl_try_from_same_sign_err! { test_try_i32i8, i32, i8 } +test_impl_try_from_same_sign_err! { test_try_i32i16, i32, i16 } + +test_impl_try_from_same_sign_err! { test_try_i64i8, i64, i8 } +test_impl_try_from_same_sign_err! { test_try_i64i16, i64, i16 } +test_impl_try_from_same_sign_err! { test_try_i64i32, i64, i32 } diff --git a/src/libcoretest/num/uint_macros.rs b/src/libcoretest/num/uint_macros.rs index 25591db64d907..daa1cc3a7f4fb 100644 --- a/src/libcoretest/num/uint_macros.rs +++ b/src/libcoretest/num/uint_macros.rs @@ -15,6 +15,7 @@ mod tests { use num; use core::ops::{BitOr, BitAnd, BitXor, Shl, Shr, Not}; use std::str::FromStr; + use std::mem; #[test] fn test_overflows() { @@ -54,9 +55,10 @@ mod tests { #[test] fn test_count_zeros() { - assert!(A.count_zeros() == BITS as u32 - 3); - assert!(B.count_zeros() == BITS as u32 - 2); - assert!(C.count_zeros() == BITS as u32 - 5); + let bits = mem::size_of::<$T>() * 8; + assert!(A.count_zeros() == bits as u32 - 3); + assert!(B.count_zeros() == bits as u32 - 2); + assert!(C.count_zeros() == bits as u32 - 5); } #[test] diff --git a/src/libcoretest/option.rs b/src/libcoretest/option.rs index 3e564cf197061..51b0655f680f6 100644 --- a/src/libcoretest/option.rs +++ b/src/libcoretest/option.rs @@ -251,7 +251,7 @@ fn test_collect() { #[test] fn test_cloned() { - let val = 1u32; + let val = 1; let val_ref = &val; let opt_none: Option<&'static u32> = None; let opt_ref = Some(&val); @@ -263,10 +263,10 @@ fn test_cloned() { // Immutable ref works assert_eq!(opt_ref.clone(), Some(&val)); - assert_eq!(opt_ref.cloned(), Some(1u32)); + assert_eq!(opt_ref.cloned(), Some(1)); // Double Immutable ref works assert_eq!(opt_ref_ref.clone(), Some(&val_ref)); assert_eq!(opt_ref_ref.clone().cloned(), Some(&val)); - assert_eq!(opt_ref_ref.cloned().cloned(), Some(1u32)); + assert_eq!(opt_ref_ref.cloned().cloned(), Some(1)); } diff --git a/src/libcoretest/ptr.rs b/src/libcoretest/ptr.rs index 343db93d4a970..f7fe61896f85e 100644 --- a/src/libcoretest/ptr.rs +++ b/src/libcoretest/ptr.rs @@ -171,3 +171,21 @@ fn test_unsized_unique() { let zs: &mut [i32] = &mut [1, 2, 3]; assert!(ys == zs); } + +#[test] +#[allow(warnings)] +// Have a symbol for the test below. It doesn’t need to be an actual variadic function, match the +// ABI, or even point to an actual executable code, because the function itself is never invoked. +#[no_mangle] +pub fn test_variadic_fnptr() { + use core::hash::{Hash, SipHasher}; + extern { + fn test_variadic_fnptr(_: u64, ...) -> f64; + } + let p: unsafe extern fn(u64, ...) -> f64 = test_variadic_fnptr; + let q = p.clone(); + assert_eq!(p, q); + assert!(!(p < q)); + let mut s = SipHasher::new(); + assert_eq!(p.hash(&mut s), q.hash(&mut s)); +} diff --git a/src/libcoretest/result.rs b/src/libcoretest/result.rs index 6e9f653dcd8ac..bc2cd8bbfc651 100644 --- a/src/libcoretest/result.rs +++ b/src/libcoretest/result.rs @@ -183,3 +183,9 @@ pub fn test_iter_mut() { } assert_eq!(err, Err("error")); } + +#[test] +pub fn test_unwrap_or_default() { + assert_eq!(op1().unwrap_or_default(), 666); + assert_eq!(op2().unwrap_or_default(), 0); +} diff --git a/src/libcoretest/slice.rs b/src/libcoretest/slice.rs index d60eeb76ccd4a..ad39e6b081b42 100644 --- a/src/libcoretest/slice.rs +++ b/src/libcoretest/slice.rs @@ -11,24 +11,20 @@ use core::result::Result::{Ok, Err}; #[test] -fn binary_search_not_found() { +fn test_binary_search() { let b = [1, 2, 4, 6, 8, 9]; assert!(b.binary_search_by(|v| v.cmp(&6)) == Ok(3)); - let b = [1, 2, 4, 6, 8, 9]; assert!(b.binary_search_by(|v| v.cmp(&5)) == Err(3)); let b = [1, 2, 4, 6, 7, 8, 9]; assert!(b.binary_search_by(|v| v.cmp(&6)) == Ok(3)); - let b = [1, 2, 4, 6, 7, 8, 9]; assert!(b.binary_search_by(|v| v.cmp(&5)) == Err(3)); let b = [1, 2, 4, 6, 8, 9]; assert!(b.binary_search_by(|v| v.cmp(&8)) == Ok(4)); - let b = [1, 2, 4, 6, 8, 9]; assert!(b.binary_search_by(|v| v.cmp(&7)) == Err(4)); let b = [1, 2, 4, 6, 7, 8, 9]; assert!(b.binary_search_by(|v| v.cmp(&8)) == Ok(5)); let b = [1, 2, 4, 5, 6, 8, 9]; assert!(b.binary_search_by(|v| v.cmp(&7)) == Err(5)); - let b = [1, 2, 4, 5, 6, 8, 9]; assert!(b.binary_search_by(|v| v.cmp(&0)) == Err(0)); let b = [1, 2, 4, 5, 6, 8]; assert!(b.binary_search_by(|v| v.cmp(&9)) == Err(6)); @@ -184,3 +180,47 @@ fn test_windows_last() { let c2 = v2.windows(2); assert_eq!(c2.last().unwrap()[0], 3); } + +#[test] +fn get_range() { + let v: &[i32] = &[0, 1, 2, 3, 4, 5]; + assert_eq!(v.get(..), Some(&[0, 1, 2, 3, 4, 5][..])); + assert_eq!(v.get(..2), Some(&[0, 1][..])); + assert_eq!(v.get(2..), Some(&[2, 3, 4, 5][..])); + assert_eq!(v.get(1..4), Some(&[1, 2, 3][..])); + assert_eq!(v.get(7..), None); + assert_eq!(v.get(7..10), None); +} + +#[test] +fn get_mut_range() { + let mut v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5]; + assert_eq!(v.get_mut(..), Some(&mut [0, 1, 2, 3, 4, 5][..])); + assert_eq!(v.get_mut(..2), Some(&mut [0, 1][..])); + assert_eq!(v.get_mut(2..), Some(&mut [2, 3, 4, 5][..])); + assert_eq!(v.get_mut(1..4), Some(&mut [1, 2, 3][..])); + assert_eq!(v.get_mut(7..), None); + assert_eq!(v.get_mut(7..10), None); +} + +#[test] +fn get_unchecked_range() { + unsafe { + let v: &[i32] = &[0, 1, 2, 3, 4, 5]; + assert_eq!(v.get_unchecked(..), &[0, 1, 2, 3, 4, 5][..]); + assert_eq!(v.get_unchecked(..2), &[0, 1][..]); + assert_eq!(v.get_unchecked(2..), &[2, 3, 4, 5][..]); + assert_eq!(v.get_unchecked(1..4), &[1, 2, 3][..]); + } +} + +#[test] +fn get_unchecked_mut_range() { + unsafe { + let v: &mut [i32] = &mut [0, 1, 2, 3, 4, 5]; + assert_eq!(v.get_unchecked_mut(..), &mut [0, 1, 2, 3, 4, 5][..]); + assert_eq!(v.get_unchecked_mut(..2), &mut [0, 1][..]); + assert_eq!(v.get_unchecked_mut(2..), &mut[2, 3, 4, 5][..]); + assert_eq!(v.get_unchecked_mut(1..4), &mut [1, 2, 3][..]); + } +} diff --git a/src/libflate/Cargo.toml b/src/libflate/Cargo.toml new file mode 100644 index 0000000000000..5423da9c81c02 --- /dev/null +++ b/src/libflate/Cargo.toml @@ -0,0 +1,14 @@ +[package] +authors = ["The Rust Project Developers"] +name = "flate" +version = "0.0.0" +build = "build.rs" + +[lib] +name = "flate" +path = "lib.rs" +crate-type = ["dylib"] + +[build-dependencies] +build_helper = { path = "../build_helper" } +gcc = "0.3.27" diff --git a/src/libflate/build.rs b/src/libflate/build.rs new file mode 100644 index 0000000000000..245c705dfcc2a --- /dev/null +++ b/src/libflate/build.rs @@ -0,0 +1,18 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +extern crate gcc; + +fn main() { + println!("cargo:rustc-cfg=cargobuild"); + gcc::Config::new() + .file("../rt/miniz.c") + .compile("libminiz.a"); +} diff --git a/src/libflate/lib.rs b/src/libflate/lib.rs index a60a1c67e175d..3c608ef9c9268 100644 --- a/src/libflate/lib.rs +++ b/src/libflate/lib.rs @@ -22,19 +22,16 @@ html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))] +#![cfg_attr(not(stage0), deny(warnings))] #![feature(libc)] #![feature(staged_api)] #![feature(unique)] -#![cfg_attr(test, feature(rustc_private, rand, vec_push_all))] - -#[cfg(test)] -#[macro_use] -extern crate log; +#![cfg_attr(test, feature(rand))] extern crate libc; -use libc::{c_void, size_t, c_int}; +use libc::{c_int, c_void, size_t}; use std::fmt; use std::ops::Deref; use std::ptr::Unique; @@ -78,6 +75,9 @@ impl Drop for Bytes { } #[link(name = "miniz", kind = "static")] +#[cfg(not(cargobuild))] +extern "C" {} + extern "C" { /// Raw miniz compression function. fn tdefl_compress_mem_to_heap(psrc_buf: *const c_void, @@ -94,11 +94,14 @@ extern "C" { -> *mut c_void; } -const LZ_NORM: c_int = 0x80; // LZ with 128 probes, "normal" -const TINFL_FLAG_PARSE_ZLIB_HEADER: c_int = 0x1; // parse zlib header and adler32 checksum -const TDEFL_WRITE_ZLIB_HEADER: c_int = 0x01000; // write zlib header and adler32 checksum +const LZ_FAST: c_int = 0x01; // LZ with 1 probe, "fast" +const TDEFL_GREEDY_PARSING_FLAG: c_int = 0x04000; // fast greedy parsing instead of lazy parsing -fn deflate_bytes_internal(bytes: &[u8], flags: c_int) -> Bytes { +/// Compress a buffer without writing any sort of header on the output. Fast +/// compression is used because it is almost twice as fast as default +/// compression and the compression ratio is only marginally worse. +pub fn deflate_bytes(bytes: &[u8]) -> Bytes { + let flags = LZ_FAST | TDEFL_GREEDY_PARSING_FLAG; unsafe { let mut outsz: size_t = 0; let res = tdefl_compress_mem_to_heap(bytes.as_ptr() as *const _, @@ -113,17 +116,9 @@ fn deflate_bytes_internal(bytes: &[u8], flags: c_int) -> Bytes { } } -/// Compress a buffer, without writing any sort of header on the output. -pub fn deflate_bytes(bytes: &[u8]) -> Bytes { - deflate_bytes_internal(bytes, LZ_NORM) -} - -/// Compress a buffer, using a header that zlib can understand. -pub fn deflate_bytes_zlib(bytes: &[u8]) -> Bytes { - deflate_bytes_internal(bytes, LZ_NORM | TDEFL_WRITE_ZLIB_HEADER) -} - -fn inflate_bytes_internal(bytes: &[u8], flags: c_int) -> Result { +/// Decompress a buffer without parsing any sort of header on the input. +pub fn inflate_bytes(bytes: &[u8]) -> Result { + let flags = 0; unsafe { let mut outsz: size_t = 0; let res = tinfl_decompress_mem_to_heap(bytes.as_ptr() as *const _, @@ -141,21 +136,11 @@ fn inflate_bytes_internal(bytes: &[u8], flags: c_int) -> Result { } } -/// Decompress a buffer, without parsing any sort of header on the input. -pub fn inflate_bytes(bytes: &[u8]) -> Result { - inflate_bytes_internal(bytes, 0) -} - -/// Decompress a buffer that starts with a zlib header. -pub fn inflate_bytes_zlib(bytes: &[u8]) -> Result { - inflate_bytes_internal(bytes, TINFL_FLAG_PARSE_ZLIB_HEADER) -} - #[cfg(test)] mod tests { #![allow(deprecated)] - use super::{inflate_bytes, deflate_bytes}; - use std::__rand::{thread_rng, Rng}; + use super::{deflate_bytes, inflate_bytes}; + use std::__rand::{Rng, thread_rng}; #[test] fn test_flate_round_trip() { @@ -169,16 +154,10 @@ mod tests { for _ in 0..20 { let mut input = vec![]; for _ in 0..2000 { - input.push_all(r.choose(&words).unwrap()); + input.extend_from_slice(r.choose(&words).unwrap()); } - debug!("de/inflate of {} bytes of random word-sequences", - input.len()); let cmp = deflate_bytes(&input); let out = inflate_bytes(&cmp).unwrap(); - debug!("{} bytes deflated to {} ({:.1}% size)", - input.len(), - cmp.len(), - 100.0 * ((cmp.len() as f64) / (input.len() as f64))); assert_eq!(&*input, &*out); } } diff --git a/src/libfmt_macros/Cargo.toml b/src/libfmt_macros/Cargo.toml new file mode 100644 index 0000000000000..b3f4d2deae2fc --- /dev/null +++ b/src/libfmt_macros/Cargo.toml @@ -0,0 +1,9 @@ +[package] +authors = ["The Rust Project Developers"] +name = "fmt_macros" +version = "0.0.0" + +[lib] +name = "fmt_macros" +path = "lib.rs" +crate-type = ["dylib"] diff --git a/src/libfmt_macros/lib.rs b/src/libfmt_macros/lib.rs index 7a229ad522227..b179a16e55e58 100644 --- a/src/libfmt_macros/lib.rs +++ b/src/libfmt_macros/lib.rs @@ -23,6 +23,7 @@ html_root_url = "https://doc.rust-lang.org/nightly/", html_playground_url = "https://play.rust-lang.org/", test(attr(deny(warnings))))] +#![cfg_attr(not(stage0), deny(warnings))] #![feature(staged_api)] #![feature(unicode)] @@ -79,8 +80,6 @@ pub struct FormatSpec<'a> { /// Enum describing where an argument for a format can be located. #[derive(Copy, Clone, PartialEq)] pub enum Position<'a> { - /// The argument will be in the next position. This is the default. - ArgumentNext, /// The argument is located at a specific index. ArgumentIs(usize), /// The argument has a name. @@ -126,8 +125,6 @@ pub enum Count<'a> { CountIsName(&'a str), /// The count is specified by the argument at the given index. CountIsParam(usize), - /// The count is specified by the next parameter. - CountIsNextParam, /// The count is implied and cannot be explicitly specified. CountImplied, } @@ -142,7 +139,9 @@ pub struct Parser<'a> { input: &'a str, cur: iter::Peekable>, /// Error messages accumulated during parsing - pub errors: Vec, + pub errors: Vec<(string::String, Option)>, + /// Current position of implicit positional argument pointer + curarg: usize, } impl<'a> Iterator for Parser<'a> { @@ -166,7 +165,9 @@ impl<'a> Iterator for Parser<'a> { if self.consume('}') { Some(String(self.string(pos + 1))) } else { - self.err("unmatched `}` found"); + self.err_with_note("unmatched `}` found", + "if you intended to print `}`, \ + you can escape it using `}}`"); None } } @@ -185,6 +186,7 @@ impl<'a> Parser<'a> { input: s, cur: s.char_indices().peekable(), errors: vec![], + curarg: 0, } } @@ -192,7 +194,14 @@ impl<'a> Parser<'a> { /// String, but I think it does when this eventually uses conditions so it /// might as well start using it now. fn err(&mut self, msg: &str) { - self.errors.push(msg.to_owned()); + self.errors.push((msg.to_owned(), None)); + } + + /// Notifies of an error. The message doesn't actually need to be of type + /// String, but I think it does when this eventually uses conditions so it + /// might as well start using it now. + fn err_with_note(&mut self, msg: &str, note: &str) { + self.errors.push((msg.to_owned(), Some(note.to_owned()))); } /// Optionally consumes the specified character. If the character is not at @@ -222,7 +231,13 @@ impl<'a> Parser<'a> { self.err(&format!("expected `{:?}`, found `{:?}`", c, maybe)); } } else { - self.err(&format!("expected `{:?}` but string was terminated", c)); + let msg = &format!("expected `{:?}` but string was terminated", c); + if c == '}' { + self.err_with_note(msg, + "if you intended to print `{`, you can escape it using `{{`"); + } else { + self.err(msg); + } } } @@ -258,21 +273,40 @@ impl<'a> Parser<'a> { /// Parses an Argument structure, or what's contained within braces inside /// the format string fn argument(&mut self) -> Argument<'a> { + let pos = self.position(); + let format = self.format(); + + // Resolve position after parsing format spec. + let pos = match pos { + Some(position) => position, + None => { + let i = self.curarg; + self.curarg += 1; + ArgumentIs(i) + } + }; + Argument { - position: self.position(), - format: self.format(), + position: pos, + format: format, } } /// Parses a positional argument for a format. This could either be an /// integer index of an argument, a named argument, or a blank string. - fn position(&mut self) -> Position<'a> { + /// Returns `Some(parsed_position)` if the position is not implicitly + /// consuming a macro argument, `None` if it's the case. + fn position(&mut self) -> Option> { if let Some(i) = self.integer() { - ArgumentIs(i) + Some(ArgumentIs(i)) } else { match self.cur.peek() { - Some(&(_, c)) if c.is_alphabetic() => ArgumentNamed(self.word()), - _ => ArgumentNext, + Some(&(_, c)) if c.is_alphabetic() => Some(ArgumentNamed(self.word())), + + // This is an `ArgumentNext`. + // Record the fact and do the resolution after parsing the + // format spec, to make things like `{:.*}` work. + _ => None, } } } @@ -339,7 +373,11 @@ impl<'a> Parser<'a> { } if self.consume('.') { if self.consume('*') { - spec.precision = CountIsNextParam; + // Resolve `CountIsNextParam`. + // We can do this immediately as `position` is resolved later. + let i = self.curarg; + self.curarg += 1; + spec.precision = CountIsParam(i); } else { spec.precision = self.count(); } @@ -486,7 +524,7 @@ mod tests { fn format_nothing() { same("{}", &[NextArgument(Argument { - position: ArgumentNext, + position: ArgumentIs(0), format: fmtdflt(), })]); } @@ -564,7 +602,7 @@ mod tests { fn format_counts() { same("{:10s}", &[NextArgument(Argument { - position: ArgumentNext, + position: ArgumentIs(0), format: FormatSpec { fill: None, align: AlignUnknown, @@ -576,7 +614,7 @@ mod tests { })]); same("{:10$.10s}", &[NextArgument(Argument { - position: ArgumentNext, + position: ArgumentIs(0), format: FormatSpec { fill: None, align: AlignUnknown, @@ -588,19 +626,19 @@ mod tests { })]); same("{:.*s}", &[NextArgument(Argument { - position: ArgumentNext, + position: ArgumentIs(1), format: FormatSpec { fill: None, align: AlignUnknown, flags: 0, - precision: CountIsNextParam, + precision: CountIsParam(0), width: CountImplied, ty: "s", }, })]); same("{:.10$s}", &[NextArgument(Argument { - position: ArgumentNext, + position: ArgumentIs(0), format: FormatSpec { fill: None, align: AlignUnknown, @@ -612,7 +650,7 @@ mod tests { })]); same("{:a$.b$s}", &[NextArgument(Argument { - position: ArgumentNext, + position: ArgumentIs(0), format: FormatSpec { fill: None, align: AlignUnknown, @@ -627,7 +665,7 @@ mod tests { fn format_flags() { same("{:-}", &[NextArgument(Argument { - position: ArgumentNext, + position: ArgumentIs(0), format: FormatSpec { fill: None, align: AlignUnknown, @@ -639,7 +677,7 @@ mod tests { })]); same("{:+#}", &[NextArgument(Argument { - position: ArgumentNext, + position: ArgumentIs(0), format: FormatSpec { fill: None, align: AlignUnknown, diff --git a/src/libgetopts/Cargo.toml b/src/libgetopts/Cargo.toml new file mode 100644 index 0000000000000..99e3b89285888 --- /dev/null +++ b/src/libgetopts/Cargo.toml @@ -0,0 +1,9 @@ +[package] +authors = ["The Rust Project Developers"] +name = "getopts" +version = "0.0.0" + +[lib] +name = "getopts" +path = "lib.rs" +crate-type = ["dylib", "rlib"] diff --git a/src/libgetopts/lib.rs b/src/libgetopts/lib.rs index 57ce53e73b025..4d2f1b999a2ae 100644 --- a/src/libgetopts/lib.rs +++ b/src/libgetopts/lib.rs @@ -91,12 +91,6 @@ #![deny(missing_docs)] #![feature(staged_api)] -#![feature(str_char)] -#![cfg_attr(test, feature(rustc_private))] - -#[cfg(test)] -#[macro_use] -extern crate log; use self::Name::*; use self::HasArg::*; @@ -228,7 +222,7 @@ pub type Result = result::Result; impl Name { fn from_str(nm: &str) -> Name { if nm.len() == 1 { - Short(nm.char_at(0)) + Short(nm.chars().next().unwrap()) } else { Long(nm.to_owned()) } @@ -266,7 +260,7 @@ impl OptGroup { } (1, 0) => { Opt { - name: Short(short_name.char_at(0)), + name: Short(short_name.chars().next().unwrap()), hasarg: hasarg, occur: occur, aliases: Vec::new(), @@ -278,14 +272,14 @@ impl OptGroup { hasarg: hasarg, occur: occur, aliases: vec![Opt { - name: Short(short_name.char_at(0)), + name: Short(short_name.chars().next().unwrap()), hasarg: hasarg, occur: occur, aliases: Vec::new(), }], } } - (_, _) => panic!("something is wrong with the long-form opt"), + _ => panic!("something is wrong with the long-form opt"), } } } @@ -331,9 +325,8 @@ impl Matches { /// Returns the string argument supplied to one of several matching options or `None`. pub fn opts_str(&self, names: &[String]) -> Option { for nm in names { - match self.opt_val(&nm[..]) { - Some(Val(ref s)) => return Some(s.clone()), - _ => (), + if let Some(Val(ref s)) = self.opt_val(&nm[..]) { + return Some(s.clone()) } } None @@ -605,7 +598,7 @@ pub fn getopts(args: &[String], optgrps: &[OptGroup]) -> Result { let mut j = 1; names = Vec::new(); while j < curlen { - let ch = cur.char_at(j); + let ch = cur[j..].chars().next().unwrap(); let opt = Short(ch); // In a series of potential options (eg. -aheJ), if we @@ -1545,8 +1538,6 @@ Options: let generated_usage = usage("Usage: fruits", &optgroups); - debug!("expected: <<{}>>", expected); - debug!("generated: <<{}>>", generated_usage); assert_eq!(generated_usage, expected); } @@ -1574,8 +1565,6 @@ Options: let usage = usage("Usage: fruits", &optgroups); - debug!("expected: <<{}>>", expected); - debug!("generated: <<{}>>", usage); assert!(usage == expected) } @@ -1602,8 +1591,6 @@ Options: let usage = usage("Usage: fruits", &optgroups); - debug!("expected: <<{}>>", expected); - debug!("generated: <<{}>>", usage); assert!(usage == expected) } @@ -1618,15 +1605,13 @@ Options: let expected = "Usage: fruits -b VAL [-a VAL] [-k] [-p [VAL]] [-l VAL]..".to_string(); let generated_usage = short_usage("fruits", &optgroups); - debug!("expected: <<{}>>", expected); - debug!("generated: <<{}>>", generated_usage); assert_eq!(generated_usage, expected); } #[test] fn test_args_with_equals() { - let args = vec!("--one".to_string(), "A=B".to_string(), - "--two=C=D".to_string()); + let args = vec!["--one".to_string(), "A=B".to_string(), + "--two=C=D".to_string()]; let opts = vec![optopt("o", "one", "One", "INFO"), optopt("t", "two", "Two", "INFO")]; let matches = &match getopts(&args, &opts) { diff --git a/src/libgraphviz/Cargo.toml b/src/libgraphviz/Cargo.toml new file mode 100644 index 0000000000000..76ef3a1d188ce --- /dev/null +++ b/src/libgraphviz/Cargo.toml @@ -0,0 +1,9 @@ +[package] +authors = ["The Rust Project Developers"] +name = "graphviz" +version = "0.0.0" + +[lib] +name = "graphviz" +path = "lib.rs" +crate-type = ["dylib"] diff --git a/src/libgraphviz/lib.rs b/src/libgraphviz/lib.rs index 38b45ec0feaed..220051c9d35ea 100644 --- a/src/libgraphviz/lib.rs +++ b/src/libgraphviz/lib.rs @@ -58,11 +58,13 @@ //! struct Edges(Vec); //! //! pub fn render_to(output: &mut W) { -//! let edges = Edges(vec!((0,1), (0,2), (1,3), (2,3), (3,4), (4,4))); +//! let edges = Edges(vec![(0,1), (0,2), (1,3), (2,3), (3,4), (4,4)]); //! dot::render(&edges, output).unwrap() //! } //! -//! impl<'a> dot::Labeller<'a, Nd, Ed> for Edges { +//! impl<'a> dot::Labeller<'a> for Edges { +//! type Node = Nd; +//! type Edge = Ed; //! fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example1").unwrap() } //! //! fn node_id(&'a self, n: &Nd) -> dot::Id<'a> { @@ -70,7 +72,9 @@ //! } //! } //! -//! impl<'a> dot::GraphWalk<'a, Nd, Ed> for Edges { +//! impl<'a> dot::GraphWalk<'a> for Edges { +//! type Node = Nd; +//! type Edge = Ed; //! fn nodes(&self) -> dot::Nodes<'a,Nd> { //! // (assumes that |N| \approxeq |E|) //! let &Edges(ref v) = self; @@ -160,14 +164,16 @@ //! struct Graph { nodes: Vec<&'static str>, edges: Vec<(usize,usize)> } //! //! pub fn render_to(output: &mut W) { -//! let nodes = vec!("{x,y}","{x}","{y}","{}"); -//! let edges = vec!((0,1), (0,2), (1,3), (2,3)); +//! let nodes = vec!["{x,y}","{x}","{y}","{}"]; +//! let edges = vec![(0,1), (0,2), (1,3), (2,3)]; //! let graph = Graph { nodes: nodes, edges: edges }; //! //! dot::render(&graph, output).unwrap() //! } //! -//! impl<'a> dot::Labeller<'a, Nd, Ed<'a>> for Graph { +//! impl<'a> dot::Labeller<'a> for Graph { +//! type Node = Nd; +//! type Edge = Ed<'a>; //! fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example2").unwrap() } //! fn node_id(&'a self, n: &Nd) -> dot::Id<'a> { //! dot::Id::new(format!("N{}", n)).unwrap() @@ -180,7 +186,9 @@ //! } //! } //! -//! impl<'a> dot::GraphWalk<'a, Nd, Ed<'a>> for Graph { +//! impl<'a> dot::GraphWalk<'a> for Graph { +//! type Node = Nd; +//! type Edge = Ed<'a>; //! fn nodes(&self) -> dot::Nodes<'a,Nd> { (0..self.nodes.len()).collect() } //! fn edges(&'a self) -> dot::Edges<'a,Ed<'a>> { self.edges.iter().collect() } //! fn source(&self, e: &Ed) -> Nd { let & &(s,_) = e; s } @@ -218,14 +226,16 @@ //! struct Graph { nodes: Vec<&'static str>, edges: Vec<(usize,usize)> } //! //! pub fn render_to(output: &mut W) { -//! let nodes = vec!("{x,y}","{x}","{y}","{}"); -//! let edges = vec!((0,1), (0,2), (1,3), (2,3)); +//! let nodes = vec!["{x,y}","{x}","{y}","{}"]; +//! let edges = vec![(0,1), (0,2), (1,3), (2,3)]; //! let graph = Graph { nodes: nodes, edges: edges }; //! //! dot::render(&graph, output).unwrap() //! } //! -//! impl<'a> dot::Labeller<'a, Nd<'a>, Ed<'a>> for Graph { +//! impl<'a> dot::Labeller<'a> for Graph { +//! type Node = Nd<'a>; +//! type Edge = Ed<'a>; //! fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example3").unwrap() } //! fn node_id(&'a self, n: &Nd<'a>) -> dot::Id<'a> { //! dot::Id::new(format!("N{}", n.0)).unwrap() @@ -239,7 +249,9 @@ //! } //! } //! -//! impl<'a> dot::GraphWalk<'a, Nd<'a>, Ed<'a>> for Graph { +//! impl<'a> dot::GraphWalk<'a> for Graph { +//! type Node = Nd<'a>; +//! type Edge = Ed<'a>; //! fn nodes(&'a self) -> dot::Nodes<'a,Nd<'a>> { //! self.nodes.iter().map(|s| &s[..]).enumerate().collect() //! } @@ -280,6 +292,7 @@ html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(allow(unused_variables), deny(warnings))))] +#![cfg_attr(not(stage0), deny(warnings))] #![feature(str_escape)] @@ -446,45 +459,48 @@ impl<'a> Id<'a> { /// The graph instance is responsible for providing the DOT compatible /// identifiers for the nodes and (optionally) rendered labels for the nodes and /// edges, as well as an identifier for the graph itself. -pub trait Labeller<'a,N,E> { +pub trait Labeller<'a> { + type Node; + type Edge; + /// Must return a DOT compatible identifier naming the graph. fn graph_id(&'a self) -> Id<'a>; /// Maps `n` to a unique identifier with respect to `self`. The /// implementor is responsible for ensuring that the returned name /// is a valid DOT identifier. - fn node_id(&'a self, n: &N) -> Id<'a>; + fn node_id(&'a self, n: &Self::Node) -> Id<'a>; /// Maps `n` to one of the [graphviz `shape` names][1]. If `None` /// is returned, no `shape` attribute is specified. /// /// [1]: http://www.graphviz.org/content/node-shapes - fn node_shape(&'a self, _node: &N) -> Option> { + fn node_shape(&'a self, _node: &Self::Node) -> Option> { None } /// Maps `n` to a label that will be used in the rendered output. /// The label need not be unique, and may be the empty string; the /// default is just the output from `node_id`. - fn node_label(&'a self, n: &N) -> LabelText<'a> { + fn node_label(&'a self, n: &Self::Node) -> LabelText<'a> { LabelStr(self.node_id(n).name) } /// Maps `e` to a label that will be used in the rendered output. /// The label need not be unique, and may be the empty string; the /// default is in fact the empty string. - fn edge_label(&'a self, e: &E) -> LabelText<'a> { + fn edge_label(&'a self, e: &Self::Edge) -> LabelText<'a> { let _ignored = e; LabelStr("".into_cow()) } /// Maps `n` to a style that will be used in the rendered output. - fn node_style(&'a self, _n: &N) -> Style { + fn node_style(&'a self, _n: &Self::Node) -> Style { Style::None } /// Maps `e` to a style that will be used in the rendered output. - fn edge_style(&'a self, _e: &E) -> Style { + fn edge_style(&'a self, _e: &Self::Edge) -> Style { Style::None } } @@ -595,15 +611,18 @@ pub type Edges<'a,E> = Cow<'a,[E]>; /// `Cow<[T]>` to leave implementors the freedom to create /// entirely new vectors or to pass back slices into internally owned /// vectors. -pub trait GraphWalk<'a, N: Clone, E: Clone> { +pub trait GraphWalk<'a> { + type Node: Clone; + type Edge: Clone; + /// Returns all the nodes in this graph. - fn nodes(&'a self) -> Nodes<'a, N>; + fn nodes(&'a self) -> Nodes<'a, Self::Node>; /// Returns all of the edges in this graph. - fn edges(&'a self) -> Edges<'a, E>; + fn edges(&'a self) -> Edges<'a, Self::Edge>; /// The source node for `edge`. - fn source(&'a self, edge: &E) -> N; + fn source(&'a self, edge: &Self::Edge) -> Self::Node; /// The target node for `edge`. - fn target(&'a self, edge: &E) -> N; + fn target(&'a self, edge: &Self::Edge) -> Self::Node; } #[derive(Copy, Clone, PartialEq, Eq, Debug)] @@ -621,31 +640,29 @@ pub fn default_options() -> Vec { /// Renders directed graph `g` into the writer `w` in DOT syntax. /// (Simple wrapper around `render_opts` that passes a default set of options.) -pub fn render<'a, - N: Clone + 'a, - E: Clone + 'a, - G: Labeller<'a, N, E> + GraphWalk<'a, N, E>, - W: Write> - (g: &'a G, - w: &mut W) - -> io::Result<()> { +pub fn render<'a,N,E,G,W>(g: &'a G, w: &mut W) -> io::Result<()> + where N: Clone + 'a, + E: Clone + 'a, + G: Labeller<'a, Node=N, Edge=E> + GraphWalk<'a, Node=N, Edge=E>, + W: Write +{ render_opts(g, w, &[]) } /// Renders directed graph `g` into the writer `w` in DOT syntax. /// (Main entry point for the library.) -pub fn render_opts<'a, - N: Clone + 'a, - E: Clone + 'a, - G: Labeller<'a, N, E> + GraphWalk<'a, N, E>, - W: Write> - (g: &'a G, - w: &mut W, - options: &[RenderOption]) - -> io::Result<()> { +pub fn render_opts<'a, N, E, G, W>(g: &'a G, + w: &mut W, + options: &[RenderOption]) + -> io::Result<()> + where N: Clone + 'a, + E: Clone + 'a, + G: Labeller<'a, Node=N, Edge=E> + GraphWalk<'a, Node=N, Edge=E>, + W: Write +{ fn writeln(w: &mut W, arg: &[&str]) -> io::Result<()> { for &s in arg { - try!(w.write_all(s.as_bytes())); + w.write_all(s.as_bytes())?; } write!(w, "\n") } @@ -654,9 +671,9 @@ pub fn render_opts<'a, w.write_all(b" ") } - try!(writeln(w, &["digraph ", g.graph_id().as_slice(), " {"])); + writeln(w, &["digraph ", g.graph_id().as_slice(), " {"])?; for n in g.nodes().iter() { - try!(indent(w)); + indent(w)?; let id = g.node_id(n); let escaped = &g.node_label(n).to_dot_string(); @@ -685,12 +702,12 @@ pub fn render_opts<'a, } text.push(";"); - try!(writeln(w, &text)); + writeln(w, &text)?; } for e in g.edges().iter() { let escaped_label = &g.edge_label(e).to_dot_string(); - try!(indent(w)); + indent(w)?; let source = g.source(e); let target = g.target(e); let source_id = g.node_id(&source); @@ -712,7 +729,7 @@ pub fn render_opts<'a, } text.push(";"); - try!(writeln(w, &text)); + writeln(w, &text)?; } writeln(w, &["}"]) @@ -857,7 +874,9 @@ mod tests { Id::new(format!("N{}", *n)).unwrap() } - impl<'a> Labeller<'a, Node, &'a Edge> for LabelledGraph { + impl<'a> Labeller<'a> for LabelledGraph { + type Node = Node; + type Edge = &'a Edge; fn graph_id(&'a self) -> Id<'a> { Id::new(&self.name[..]).unwrap() } @@ -881,7 +900,9 @@ mod tests { } } - impl<'a> Labeller<'a, Node, &'a Edge> for LabelledGraphWithEscStrs { + impl<'a> Labeller<'a> for LabelledGraphWithEscStrs { + type Node = Node; + type Edge = &'a Edge; fn graph_id(&'a self) -> Id<'a> { self.graph.graph_id() } @@ -900,7 +921,9 @@ mod tests { } } - impl<'a> GraphWalk<'a, Node, &'a Edge> for LabelledGraph { + impl<'a> GraphWalk<'a> for LabelledGraph { + type Node = Node; + type Edge = &'a Edge; fn nodes(&'a self) -> Nodes<'a, Node> { (0..self.node_labels.len()).collect() } @@ -915,7 +938,9 @@ mod tests { } } - impl<'a> GraphWalk<'a, Node, &'a Edge> for LabelledGraphWithEscStrs { + impl<'a> GraphWalk<'a> for LabelledGraphWithEscStrs { + type Node = Node; + type Edge = &'a Edge; fn nodes(&'a self) -> Nodes<'a, Node> { self.graph.nodes() } @@ -934,7 +959,7 @@ mod tests { let mut writer = Vec::new(); render(&g, &mut writer).unwrap(); let mut s = String::new(); - try!(Read::read_to_string(&mut &*writer, &mut s)); + Read::read_to_string(&mut &*writer, &mut s)?; Ok(s) } diff --git a/src/liblibc b/src/liblibc index 9aa6600bd8f4e..6e8c1b490ccbe 160000 --- a/src/liblibc +++ b/src/liblibc @@ -1 +1 @@ -Subproject commit 9aa6600bd8f4e4f370a7d2fb76c4b3efc669cadf +Subproject commit 6e8c1b490ccbe5e84d248bab883515bc85394b5f diff --git a/src/liblog/Cargo.toml b/src/liblog/Cargo.toml new file mode 100644 index 0000000000000..31a862478d034 --- /dev/null +++ b/src/liblog/Cargo.toml @@ -0,0 +1,9 @@ +[package] +authors = ["The Rust Project Developers"] +name = "log" +version = "0.0.0" + +[lib] +name = "log" +path = "lib.rs" +crate-type = ["dylib", "rlib"] diff --git a/src/liblog/directive.rs b/src/liblog/directive.rs index f1ebf16737831..eb50d6e6135ef 100644 --- a/src/liblog/directive.rs +++ b/src/liblog/directive.rs @@ -22,12 +22,12 @@ pub const LOG_LEVEL_NAMES: [&'static str; 5] = ["ERROR", "WARN", "INFO", "DEBUG" /// Parse an individual log level that is either a number or a symbolic log level fn parse_log_level(level: &str) -> Option { level.parse::() - .ok() - .or_else(|| { - let pos = LOG_LEVEL_NAMES.iter().position(|&name| name.eq_ignore_ascii_case(level)); - pos.map(|p| p as u32 + 1) - }) - .map(|p| cmp::min(p, ::MAX_LOG_LEVEL)) + .ok() + .or_else(|| { + let pos = LOG_LEVEL_NAMES.iter().position(|&name| name.eq_ignore_ascii_case(level)); + pos.map(|p| p as u32 + 1) + }) + .map(|p| cmp::min(p, ::MAX_LOG_LEVEL)) } /// Parse a logging specification string (e.g: "crate1,crate2::mod3,crate3::x=1/foo") @@ -52,32 +52,31 @@ pub fn parse_logging_spec(spec: &str) -> (Vec, Option) { continue; } let mut parts = s.split('='); - let (log_level, name) = match (parts.next(), - parts.next().map(|s| s.trim()), - parts.next()) { - (Some(part0), None, None) => { - // if the single argument is a log-level string or number, - // treat that as a global fallback - match parse_log_level(part0) { - Some(num) => (num, None), - None => (::MAX_LOG_LEVEL, Some(part0)), + let (log_level, name) = + match (parts.next(), parts.next().map(|s| s.trim()), parts.next()) { + (Some(part0), None, None) => { + // if the single argument is a log-level string or number, + // treat that as a global fallback + match parse_log_level(part0) { + Some(num) => (num, None), + None => (::MAX_LOG_LEVEL, Some(part0)), + } } - } - (Some(part0), Some(""), None) => (::MAX_LOG_LEVEL, Some(part0)), - (Some(part0), Some(part1), None) => { - match parse_log_level(part1) { - Some(num) => (num, Some(part0)), - _ => { - println!("warning: invalid logging spec '{}', ignoring it", part1); - continue; + (Some(part0), Some(""), None) => (::MAX_LOG_LEVEL, Some(part0)), + (Some(part0), Some(part1), None) => { + match parse_log_level(part1) { + Some(num) => (num, Some(part0)), + _ => { + println!("warning: invalid logging spec '{}', ignoring it", part1); + continue; + } } } - } - _ => { - println!("warning: invalid logging spec '{}', ignoring it", s); - continue; - } - }; + _ => { + println!("warning: invalid logging spec '{}', ignoring it", s); + continue; + } + }; dirs.push(LogDirective { name: name.map(str::to_owned), level: log_level, diff --git a/src/liblog/lib.rs b/src/liblog/lib.rs index dbd553acd68fc..517cd016e8a35 100644 --- a/src/liblog/lib.rs +++ b/src/liblog/lib.rs @@ -168,11 +168,9 @@ html_playground_url = "https://play.rust-lang.org/", test(attr(deny(warnings))))] #![deny(missing_docs)] +#![cfg_attr(not(stage0), deny(warnings))] -#![feature(box_syntax)] -#![feature(const_fn)] #![feature(staged_api)] -#![feature(static_mutex)] use std::cell::RefCell; use std::fmt; @@ -180,9 +178,8 @@ use std::io::{self, Stderr}; use std::io::prelude::*; use std::mem; use std::env; -use std::ptr; use std::slice; -use std::sync::{Once, StaticMutex}; +use std::sync::{Mutex, ONCE_INIT, Once}; use directive::LOG_LEVEL_NAMES; @@ -198,18 +195,13 @@ pub const MAX_LOG_LEVEL: u32 = 255; /// The default logging level of a crate if no other is specified. const DEFAULT_LOG_LEVEL: u32 = 1; -static LOCK: StaticMutex = StaticMutex::new(); +static mut LOCK: *mut Mutex<(Vec, Option)> = 0 as *mut _; /// An unsafe constant that is the maximum logging level of any module /// specified. This is the first line of defense to determining whether a /// logging statement should be run. static mut LOG_LEVEL: u32 = MAX_LOG_LEVEL; -static mut DIRECTIVES: *mut Vec = ptr::null_mut(); - -/// Optional filter. -static mut FILTER: *mut String = ptr::null_mut(); - /// Debug log level pub const DEBUG: u32 = 4; /// Info log level @@ -286,14 +278,10 @@ pub fn log(level: u32, loc: &'static LogLocation, args: fmt::Arguments) { // Test the literal string from args against the current filter, if there // is one. unsafe { - let _g = LOCK.lock(); - match FILTER as usize { - 0 => {} - n => { - let filter = mem::transmute::<_, &String>(n); - if !args.to_string().contains(filter) { - return; - } + let filter = (*LOCK).lock().unwrap(); + if let Some(ref filter) = filter.1 { + if !args.to_string().contains(filter) { + return; } } } @@ -301,10 +289,8 @@ pub fn log(level: u32, loc: &'static LogLocation, args: fmt::Arguments) { // Completely remove the local logger from TLS in case anyone attempts to // frob the slot while we're doing the logging. This will destroy any logger // set during logging. - let mut logger: Box = LOCAL_LOGGER.with(|s| s.borrow_mut().take()) - .unwrap_or_else(|| { - box DefaultLogger { handle: io::stderr() } - }); + let logger = LOCAL_LOGGER.with(|s| s.borrow_mut().take()); + let mut logger = logger.unwrap_or_else(|| Box::new(DefaultLogger { handle: io::stderr() })); logger.log(&LogRecord { level: LogLevel(level), args: args, @@ -362,7 +348,7 @@ pub struct LogLocation { /// module's log statement should be emitted or not. #[doc(hidden)] pub fn mod_enabled(level: u32, module: &str) -> bool { - static INIT: Once = Once::new(); + static INIT: Once = ONCE_INIT; INIT.call_once(init); // It's possible for many threads are in this function, only one of them @@ -377,10 +363,9 @@ pub fn mod_enabled(level: u32, module: &str) -> bool { // This assertion should never get tripped unless we're in an at_exit // handler after logging has been torn down and a logging attempt was made. - let _g = LOCK.lock(); unsafe { - assert!(DIRECTIVES as usize != 0); - enabled(level, module, (*DIRECTIVES).iter()) + let directives = (*LOCK).lock().unwrap(); + enabled(level, module, directives.0.iter()) } } @@ -421,14 +406,8 @@ fn init() { unsafe { LOG_LEVEL = max_level; - assert!(FILTER.is_null()); - match filter { - Some(f) => FILTER = Box::into_raw(box f), - None => {} - } - - assert!(DIRECTIVES.is_null()); - DIRECTIVES = Box::into_raw(box directives); + assert!(LOCK.is_null()); + LOCK = Box::into_raw(Box::new(Mutex::new((directives, filter)))); } } diff --git a/src/libpanic_abort/Cargo.toml b/src/libpanic_abort/Cargo.toml new file mode 100644 index 0000000000000..d90d2864813c9 --- /dev/null +++ b/src/libpanic_abort/Cargo.toml @@ -0,0 +1,13 @@ +[package] +authors = ["The Rust Project Developers"] +name = "panic_abort" +version = "0.0.0" + +[lib] +path = "lib.rs" +test = false +bench = false + +[dependencies] +core = { path = "../libcore" } +libc = { path = "../rustc/libc_shim" } diff --git a/src/libpanic_abort/lib.rs b/src/libpanic_abort/lib.rs new file mode 100644 index 0000000000000..853f81ceaa9b8 --- /dev/null +++ b/src/libpanic_abort/lib.rs @@ -0,0 +1,134 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation of Rust panics via process aborts +//! +//! When compared to the implementation via unwinding, this crate is *much* +//! simpler! That being said, it's not quite as versatile, but here goes! + +#![no_std] +#![crate_name = "panic_abort"] +#![crate_type = "rlib"] +#![unstable(feature = "panic_abort", issue = "32837")] +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/", + issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/")] +#![cfg_attr(not(stage0), deny(warnings))] + +#![feature(staged_api)] + +#![panic_runtime] +#![feature(panic_runtime)] +#![cfg_attr(unix, feature(libc))] +#![cfg_attr(windows, feature(core_intrinsics))] + +// Rust's "try" function, but if we're aborting on panics we just call the +// function as there's nothing else we need to do here. +#[no_mangle] +pub unsafe extern fn __rust_maybe_catch_panic(f: fn(*mut u8), + data: *mut u8, + _data_ptr: *mut usize, + _vtable_ptr: *mut usize) -> u32 { + f(data); + 0 +} + +// "Leak" the payload and shim to the relevant abort on the platform in +// question. +// +// For Unix we just use `abort` from libc as it'll trigger debuggers, core +// dumps, etc, as one might expect. On Windows, however, the best option we have +// is the `__fastfail` intrinsics, but that's unfortunately not defined in LLVM, +// and the `RaiseFailFastException` function isn't available until Windows 7 +// which would break compat with XP. For now just use `intrinsics::abort` which +// will kill us with an illegal instruction, which will do a good enough job for +// now hopefully. +#[no_mangle] +pub unsafe extern fn __rust_start_panic(_data: usize, _vtable: usize) -> u32 { + abort(); + + #[cfg(unix)] + unsafe fn abort() -> ! { + extern crate libc; + libc::abort(); + } + + #[cfg(windows)] + unsafe fn abort() -> ! { + core::intrinsics::abort(); + } +} + +// This... is a bit of an oddity. The tl;dr; is that this is required to link +// correctly, the longer explanation is below. +// +// Right now the binaries of libcore/libstd that we ship are all compiled with +// `-C panic=unwind`. This is done to ensure that the binaries are maximally +// compatible with as many situations as possible. The compiler, however, +// requires a "personality function" for all functions compiled with `-C +// panic=unwind`. This personality function is hardcoded to the symbol +// `rust_eh_personality` and is defined by the `eh_personality` lang item. +// +// So... why not just define that lang item here? Good question! The way that +// panic runtimes are linked in is actually a little subtle in that they're +// "sort of" in the compiler's crate store, but only actually linked if another +// isn't actually linked. This ends up meaning that both this crate and the +// panic_unwind crate can appear in the compiler's crate store, and if both +// define the `eh_personality` lang item then that'll hit an error. +// +// To handle this the compiler only requires the `eh_personality` is defined if +// the panic runtime being linked in is the unwinding runtime, and otherwise +// it's not required to be defined (rightfully so). In this case, however, this +// library just defines this symbol so there's at least some personality +// somewhere. +// +// Essentially this symbol is just defined to get wired up to libcore/libstd +// binaries, but it should never be called as we don't link in an unwinding +// runtime at all. +pub mod personalities { + + #[no_mangle] + #[cfg(not(all(target_os = "windows", + target_env = "gnu", + target_arch = "x86_64")))] + pub extern fn rust_eh_personality() {} + + // On x86_64-pc-windows-gnu we use our own personality function that needs + // to return `ExceptionContinueSearch` as we're passing on all our frames. + #[no_mangle] + #[cfg(all(target_os = "windows", + target_env = "gnu", + target_arch = "x86_64"))] + pub extern fn rust_eh_personality(_record: usize, + _frame: usize, + _context: usize, + _dispatcher: usize) -> u32 { + 1 // `ExceptionContinueSearch` + } + + // Similar to above, this corresponds to the `eh_unwind_resume` lang item + // that's only used on Windows currently. + // + // Note that we don't execute landing pads, so this is never called, so it's + // body is empty. + #[no_mangle] + #[cfg(all(target_os = "windows", target_env = "gnu"))] + pub extern fn rust_eh_unwind_resume() {} + + // These two are called by our startup objects on i686-pc-windows-gnu, but + // they don't need to do anything so the bodies are nops. + #[no_mangle] + #[cfg(all(target_os = "windows", target_env = "gnu", target_arch = "x86"))] + pub extern fn rust_eh_register_frames() {} + #[no_mangle] + #[cfg(all(target_os = "windows", target_env = "gnu", target_arch = "x86"))] + pub extern fn rust_eh_unregister_frames() {} +} diff --git a/src/libpanic_unwind/Cargo.toml b/src/libpanic_unwind/Cargo.toml new file mode 100644 index 0000000000000..90c16fff6f1f1 --- /dev/null +++ b/src/libpanic_unwind/Cargo.toml @@ -0,0 +1,15 @@ +[package] +authors = ["The Rust Project Developers"] +name = "panic_unwind" +version = "0.0.0" + +[lib] +path = "lib.rs" +test = false +bench = false + +[dependencies] +alloc = { path = "../liballoc" } +core = { path = "../libcore" } +libc = { path = "../rustc/libc_shim" } +unwind = { path = "../libunwind" } diff --git a/src/libpanic_unwind/dwarf/eh.rs b/src/libpanic_unwind/dwarf/eh.rs new file mode 100644 index 0000000000000..e7994f4e0ef0a --- /dev/null +++ b/src/libpanic_unwind/dwarf/eh.rs @@ -0,0 +1,195 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Parsing of GCC-style Language-Specific Data Area (LSDA) +//! For details see: +//! http://refspecs.linuxfoundation.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html +//! http://mentorembedded.github.io/cxx-abi/exceptions.pdf +//! http://www.airs.com/blog/archives/460 +//! http://www.airs.com/blog/archives/464 +//! +//! A reference implementation may be found in the GCC source tree +//! (/libgcc/unwind-c.c as of this writing) + +#![allow(non_upper_case_globals)] +#![allow(unused)] + +use dwarf::DwarfReader; +use core::mem; + +pub const DW_EH_PE_omit: u8 = 0xFF; +pub const DW_EH_PE_absptr: u8 = 0x00; + +pub const DW_EH_PE_uleb128: u8 = 0x01; +pub const DW_EH_PE_udata2: u8 = 0x02; +pub const DW_EH_PE_udata4: u8 = 0x03; +pub const DW_EH_PE_udata8: u8 = 0x04; +pub const DW_EH_PE_sleb128: u8 = 0x09; +pub const DW_EH_PE_sdata2: u8 = 0x0A; +pub const DW_EH_PE_sdata4: u8 = 0x0B; +pub const DW_EH_PE_sdata8: u8 = 0x0C; + +pub const DW_EH_PE_pcrel: u8 = 0x10; +pub const DW_EH_PE_textrel: u8 = 0x20; +pub const DW_EH_PE_datarel: u8 = 0x30; +pub const DW_EH_PE_funcrel: u8 = 0x40; +pub const DW_EH_PE_aligned: u8 = 0x50; + +pub const DW_EH_PE_indirect: u8 = 0x80; + +#[derive(Copy, Clone)] +pub struct EHContext<'a> { + pub ip: usize, // Current instruction pointer + pub func_start: usize, // Address of the current function + pub get_text_start: &'a Fn() -> usize, // Get address of the code section + pub get_data_start: &'a Fn() -> usize, // Get address of the data section +} + +pub enum EHAction { + None, + Cleanup(usize), + Catch(usize), + Terminate, +} + +pub const USING_SJLJ_EXCEPTIONS: bool = cfg!(all(target_os = "ios", target_arch = "arm")); + +pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext) -> EHAction { + if lsda.is_null() { + return EHAction::None; + } + + let func_start = context.func_start; + let mut reader = DwarfReader::new(lsda); + + let start_encoding = reader.read::(); + // base address for landing pad offsets + let lpad_base = if start_encoding != DW_EH_PE_omit { + read_encoded_pointer(&mut reader, context, start_encoding) + } else { + func_start + }; + + let ttype_encoding = reader.read::(); + if ttype_encoding != DW_EH_PE_omit { + // Rust doesn't analyze exception types, so we don't care about the type table + reader.read_uleb128(); + } + + let call_site_encoding = reader.read::(); + let call_site_table_length = reader.read_uleb128(); + let action_table = reader.ptr.offset(call_site_table_length as isize); + let ip = context.ip; + + if !USING_SJLJ_EXCEPTIONS { + while reader.ptr < action_table { + let cs_start = read_encoded_pointer(&mut reader, context, call_site_encoding); + let cs_len = read_encoded_pointer(&mut reader, context, call_site_encoding); + let cs_lpad = read_encoded_pointer(&mut reader, context, call_site_encoding); + let cs_action = reader.read_uleb128(); + // Callsite table is sorted by cs_start, so if we've passed the ip, we + // may stop searching. + if ip < func_start + cs_start { + break; + } + if ip < func_start + cs_start + cs_len { + if cs_lpad == 0 { + return EHAction::None; + } else { + let lpad = lpad_base + cs_lpad; + return interpret_cs_action(cs_action, lpad); + } + } + } + // Ip is not present in the table. This should not happen... but it does: issue #35011. + // So rather than returning EHAction::Terminate, we do this. + EHAction::None + } else { + // SjLj version: + // The "IP" is an index into the call-site table, with two exceptions: + // -1 means 'no-action', and 0 means 'terminate'. + match ip as isize { + -1 => return EHAction::None, + 0 => return EHAction::Terminate, + _ => (), + } + let mut idx = ip; + loop { + let cs_lpad = reader.read_uleb128(); + let cs_action = reader.read_uleb128(); + idx -= 1; + if idx == 0 { + // Can never have null landing pad for sjlj -- that would have + // been indicated by a -1 call site index. + let lpad = (cs_lpad + 1) as usize; + return interpret_cs_action(cs_action, lpad); + } + } + } +} + +fn interpret_cs_action(cs_action: u64, lpad: usize) -> EHAction { + if cs_action == 0 { + EHAction::Cleanup(lpad) + } else { + EHAction::Catch(lpad) + } +} + +#[inline] +fn round_up(unrounded: usize, align: usize) -> usize { + assert!(align.is_power_of_two()); + (unrounded + align - 1) & !(align - 1) +} + +unsafe fn read_encoded_pointer(reader: &mut DwarfReader, + context: &EHContext, + encoding: u8) + -> usize { + assert!(encoding != DW_EH_PE_omit); + + // DW_EH_PE_aligned implies it's an absolute pointer value + if encoding == DW_EH_PE_aligned { + reader.ptr = round_up(reader.ptr as usize, mem::size_of::()) as *const u8; + return reader.read::(); + } + + let mut result = match encoding & 0x0F { + DW_EH_PE_absptr => reader.read::(), + DW_EH_PE_uleb128 => reader.read_uleb128() as usize, + DW_EH_PE_udata2 => reader.read::() as usize, + DW_EH_PE_udata4 => reader.read::() as usize, + DW_EH_PE_udata8 => reader.read::() as usize, + DW_EH_PE_sleb128 => reader.read_sleb128() as usize, + DW_EH_PE_sdata2 => reader.read::() as usize, + DW_EH_PE_sdata4 => reader.read::() as usize, + DW_EH_PE_sdata8 => reader.read::() as usize, + _ => panic!(), + }; + + result += match encoding & 0x70 { + DW_EH_PE_absptr => 0, + // relative to address of the encoded value, despite the name + DW_EH_PE_pcrel => reader.ptr as usize, + DW_EH_PE_funcrel => { + assert!(context.func_start != 0); + context.func_start + } + DW_EH_PE_textrel => (*context.get_text_start)(), + DW_EH_PE_datarel => (*context.get_data_start)(), + _ => panic!(), + }; + + if encoding & DW_EH_PE_indirect != 0 { + result = *(result as *const usize); + } + + result +} diff --git a/src/libpanic_unwind/dwarf/mod.rs b/src/libpanic_unwind/dwarf/mod.rs new file mode 100644 index 0000000000000..5c05ac11d307d --- /dev/null +++ b/src/libpanic_unwind/dwarf/mod.rs @@ -0,0 +1,98 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Utilities for parsing DWARF-encoded data streams. +//! See http://www.dwarfstd.org, +//! DWARF-4 standard, Section 7 - "Data Representation" + +// This module is used only by x86_64-pc-windows-gnu for now, but we +// are compiling it everywhere to avoid regressions. +#![allow(unused)] + +pub mod eh; + +use core::mem; + +pub struct DwarfReader { + pub ptr: *const u8, +} + +#[repr(C,packed)] +struct Unaligned(T); + +impl DwarfReader { + pub fn new(ptr: *const u8) -> DwarfReader { + DwarfReader { ptr: ptr } + } + + // DWARF streams are packed, so e.g. a u32 would not necessarily be aligned + // on a 4-byte boundary. This may cause problems on platforms with strict + // alignment requirements. By wrapping data in a "packed" struct, we are + // telling the backend to generate "misalignment-safe" code. + pub unsafe fn read(&mut self) -> T { + let Unaligned(result) = *(self.ptr as *const Unaligned); + self.ptr = self.ptr.offset(mem::size_of::() as isize); + result + } + + // ULEB128 and SLEB128 encodings are defined in Section 7.6 - "Variable + // Length Data". + pub unsafe fn read_uleb128(&mut self) -> u64 { + let mut shift: usize = 0; + let mut result: u64 = 0; + let mut byte: u8; + loop { + byte = self.read::(); + result |= ((byte & 0x7F) as u64) << shift; + shift += 7; + if byte & 0x80 == 0 { + break; + } + } + result + } + + pub unsafe fn read_sleb128(&mut self) -> i64 { + let mut shift: usize = 0; + let mut result: u64 = 0; + let mut byte: u8; + loop { + byte = self.read::(); + result |= ((byte & 0x7F) as u64) << shift; + shift += 7; + if byte & 0x80 == 0 { + break; + } + } + // sign-extend + if shift < 8 * mem::size_of::() && (byte & 0x40) != 0 { + result |= (!0 as u64) << shift; + } + result as i64 + } +} + +#[test] +fn dwarf_reader() { + let encoded: &[u8] = &[1, 2, 3, 4, 5, 6, 7, 0xE5, 0x8E, 0x26, 0x9B, 0xF1, 0x59, 0xFF, 0xFF]; + + let mut reader = DwarfReader::new(encoded.as_ptr()); + + unsafe { + assert!(reader.read::() == u8::to_be(1u8)); + assert!(reader.read::() == u16::to_be(0x0203)); + assert!(reader.read::() == u32::to_be(0x04050607)); + + assert!(reader.read_uleb128() == 624485); + assert!(reader.read_sleb128() == -624485); + + assert!(reader.read::() == i8::to_be(-1)); + } +} diff --git a/src/libpanic_unwind/emcc.rs b/src/libpanic_unwind/emcc.rs new file mode 100644 index 0000000000000..0e48e37c92358 --- /dev/null +++ b/src/libpanic_unwind/emcc.rs @@ -0,0 +1,75 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Unwinding for emscripten +//! +//! Whereas Rust's usual unwinding implementation for Unix platforms +//! calls into the libunwind APIs directly, on emscripten we instead +//! call into the C++ unwinding APIs. This is just an expedience since +//! emscripten's runtime always implements those APIs and does not +//! implement libunwind. + +#![allow(private_no_mangle_fns)] + +use core::any::Any; +use core::ptr; +use alloc::boxed::Box; +use libc::{self, c_int}; +use unwind as uw; +use core::mem; + +pub fn payload() -> *mut u8 { + ptr::null_mut() +} + +pub unsafe fn cleanup(ptr: *mut u8) -> Box { + assert!(!ptr.is_null()); + let ex = ptr::read(ptr as *mut _); + __cxa_free_exception(ptr as *mut _); + ex +} + +pub unsafe fn panic(data: Box) -> u32 { + let sz = mem::size_of_val(&data); + let exception = __cxa_allocate_exception(sz); + if exception == ptr::null_mut() { + return uw::_URC_FATAL_PHASE1_ERROR as u32; + } + let exception = exception as *mut Box; + ptr::write(exception, data); + __cxa_throw(exception as *mut _, ptr::null_mut(), ptr::null_mut()); + + unreachable!() +} + +#[lang = "eh_personality"] +#[no_mangle] +unsafe extern "C" fn rust_eh_personality(version: c_int, + actions: uw::_Unwind_Action, + exception_class: uw::_Unwind_Exception_Class, + exception_object: *mut uw::_Unwind_Exception, + context: *mut uw::_Unwind_Context) + -> uw::_Unwind_Reason_Code { + __gxx_personality_v0(version, actions, exception_class, exception_object, context) +} + +extern "C" { + fn __cxa_allocate_exception(thrown_size: libc::size_t) -> *mut libc::c_void; + fn __cxa_free_exception(thrown_exception: *mut libc::c_void); + fn __cxa_throw(thrown_exception: *mut libc::c_void, + tinfo: *mut libc::c_void, + dest: *mut libc::c_void); + fn __gxx_personality_v0(version: c_int, + actions: uw::_Unwind_Action, + exception_class: uw::_Unwind_Exception_Class, + exception_object: *mut uw::_Unwind_Exception, + context: *mut uw::_Unwind_Context) + -> uw::_Unwind_Reason_Code; +} diff --git a/src/libpanic_unwind/gcc.rs b/src/libpanic_unwind/gcc.rs new file mode 100644 index 0000000000000..73264fab69c26 --- /dev/null +++ b/src/libpanic_unwind/gcc.rs @@ -0,0 +1,320 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation of panics backed by libgcc/libunwind (in some form) +//! +//! For background on exception handling and stack unwinding please see +//! "Exception Handling in LLVM" (llvm.org/docs/ExceptionHandling.html) and +//! documents linked from it. +//! These are also good reads: +//! http://mentorembedded.github.io/cxx-abi/abi-eh.html +//! http://monoinfinito.wordpress.com/series/exception-handling-in-c/ +//! http://www.airs.com/blog/index.php?s=exception+frames +//! +//! ## A brief summary +//! +//! Exception handling happens in two phases: a search phase and a cleanup +//! phase. +//! +//! In both phases the unwinder walks stack frames from top to bottom using +//! information from the stack frame unwind sections of the current process's +//! modules ("module" here refers to an OS module, i.e. an executable or a +//! dynamic library). +//! +//! For each stack frame, it invokes the associated "personality routine", whose +//! address is also stored in the unwind info section. +//! +//! In the search phase, the job of a personality routine is to examine +//! exception object being thrown, and to decide whether it should be caught at +//! that stack frame. Once the handler frame has been identified, cleanup phase +//! begins. +//! +//! In the cleanup phase, the unwinder invokes each personality routine again. +//! This time it decides which (if any) cleanup code needs to be run for +//! the current stack frame. If so, the control is transferred to a special +//! branch in the function body, the "landing pad", which invokes destructors, +//! frees memory, etc. At the end of the landing pad, control is transferred +//! back to the unwinder and unwinding resumes. +//! +//! Once stack has been unwound down to the handler frame level, unwinding stops +//! and the last personality routine transfers control to the catch block. +//! +//! ## `eh_personality` and `eh_unwind_resume` +//! +//! These language items are used by the compiler when generating unwind info. +//! The first one is the personality routine described above. The second one +//! allows compilation target to customize the process of resuming unwind at the +//! end of the landing pads. `eh_unwind_resume` is used only if +//! `custom_unwind_resume` flag in the target options is set. + +#![allow(private_no_mangle_fns)] + +use core::any::Any; +use core::ptr; +use alloc::boxed::Box; + +use unwind as uw; +use libc::{c_int, uintptr_t}; +use dwarf::eh::{self, EHContext, EHAction}; + +#[repr(C)] +struct Exception { + _uwe: uw::_Unwind_Exception, + cause: Option>, +} + +pub unsafe fn panic(data: Box) -> u32 { + let exception = Box::new(Exception { + _uwe: uw::_Unwind_Exception { + exception_class: rust_exception_class(), + exception_cleanup: exception_cleanup, + private: [0; uw::unwinder_private_data_size], + }, + cause: Some(data), + }); + let exception_param = Box::into_raw(exception) as *mut uw::_Unwind_Exception; + return uw::_Unwind_RaiseException(exception_param) as u32; + + extern "C" fn exception_cleanup(_unwind_code: uw::_Unwind_Reason_Code, + exception: *mut uw::_Unwind_Exception) { + unsafe { + let _: Box = Box::from_raw(exception as *mut Exception); + } + } +} + +pub fn payload() -> *mut u8 { + ptr::null_mut() +} + +pub unsafe fn cleanup(ptr: *mut u8) -> Box { + let my_ep = ptr as *mut Exception; + let cause = (*my_ep).cause.take(); + uw::_Unwind_DeleteException(ptr as *mut _); + cause.unwrap() +} + +// Rust's exception class identifier. This is used by personality routines to +// determine whether the exception was thrown by their own runtime. +fn rust_exception_class() -> uw::_Unwind_Exception_Class { + // M O Z \0 R U S T -- vendor, language + 0x4d4f5a_00_52555354 +} + + +// Register ids were lifted from LLVM's TargetLowering::getExceptionPointerRegister() +// and TargetLowering::getExceptionSelectorRegister() for each architecture, +// then mapped to DWARF register numbers via register definition tables +// (typically RegisterInfo.td, search for "DwarfRegNum"). +// See also http://llvm.org/docs/WritingAnLLVMBackend.html#defining-a-register. + +#[cfg(target_arch = "x86")] +const UNWIND_DATA_REG: (i32, i32) = (0, 2); // EAX, EDX + +#[cfg(target_arch = "x86_64")] +const UNWIND_DATA_REG: (i32, i32) = (0, 1); // RAX, RDX + +#[cfg(any(target_arch = "arm", target_arch = "aarch64"))] +const UNWIND_DATA_REG: (i32, i32) = (0, 1); // R0, R1 / X0, X1 + +#[cfg(any(target_arch = "mips", target_arch = "mips64"))] +const UNWIND_DATA_REG: (i32, i32) = (4, 5); // A0, A1 + +#[cfg(any(target_arch = "powerpc", target_arch = "powerpc64"))] +const UNWIND_DATA_REG: (i32, i32) = (3, 4); // R3, R4 / X3, X4 + +#[cfg(target_arch = "s390x")] +const UNWIND_DATA_REG: (i32, i32) = (6, 7); // R6, R7 + +// The following code is based on GCC's C and C++ personality routines. For reference, see: +// https://github.com/gcc-mirror/gcc/blob/master/libstdc++-v3/libsupc++/eh_personality.cc +// https://github.com/gcc-mirror/gcc/blob/trunk/libgcc/unwind-c.c + +// The personality routine for most of our targets, except ARM, which has a slightly different ABI +// (however, iOS goes here as it uses SjLj unwinding). Also, the 64-bit Windows implementation +// lives in seh64_gnu.rs +#[cfg(all(any(target_os = "ios", not(target_arch = "arm"))))] +#[lang = "eh_personality"] +#[no_mangle] +#[allow(unused)] +unsafe extern "C" fn rust_eh_personality(version: c_int, + actions: uw::_Unwind_Action, + exception_class: uw::_Unwind_Exception_Class, + exception_object: *mut uw::_Unwind_Exception, + context: *mut uw::_Unwind_Context) + -> uw::_Unwind_Reason_Code { + if version != 1 { + return uw::_URC_FATAL_PHASE1_ERROR; + } + let eh_action = find_eh_action(context); + if actions as i32 & uw::_UA_SEARCH_PHASE as i32 != 0 { + match eh_action { + EHAction::None | + EHAction::Cleanup(_) => return uw::_URC_CONTINUE_UNWIND, + EHAction::Catch(_) => return uw::_URC_HANDLER_FOUND, + EHAction::Terminate => return uw::_URC_FATAL_PHASE1_ERROR, + } + } else { + match eh_action { + EHAction::None => return uw::_URC_CONTINUE_UNWIND, + EHAction::Cleanup(lpad) | + EHAction::Catch(lpad) => { + uw::_Unwind_SetGR(context, UNWIND_DATA_REG.0, exception_object as uintptr_t); + uw::_Unwind_SetGR(context, UNWIND_DATA_REG.1, 0); + uw::_Unwind_SetIP(context, lpad); + return uw::_URC_INSTALL_CONTEXT; + } + EHAction::Terminate => return uw::_URC_FATAL_PHASE2_ERROR, + } + } +} + +// ARM EHABI personality routine. +// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0038b/IHI0038B_ehabi.pdf +#[cfg(all(target_arch = "arm", not(target_os = "ios")))] +#[lang = "eh_personality"] +#[no_mangle] +unsafe extern "C" fn rust_eh_personality(state: uw::_Unwind_State, + exception_object: *mut uw::_Unwind_Exception, + context: *mut uw::_Unwind_Context) + -> uw::_Unwind_Reason_Code { + let state = state as c_int; + let action = state & uw::_US_ACTION_MASK as c_int; + let search_phase = if action == uw::_US_VIRTUAL_UNWIND_FRAME as c_int { + // Backtraces on ARM will call the personality routine with + // state == _US_VIRTUAL_UNWIND_FRAME | _US_FORCE_UNWIND. In those cases + // we want to continue unwinding the stack, otherwise all our backtraces + // would end at __rust_try + if state & uw::_US_FORCE_UNWIND as c_int != 0 { + return continue_unwind(exception_object, context); + } + true + } else if action == uw::_US_UNWIND_FRAME_STARTING as c_int { + false + } else if action == uw::_US_UNWIND_FRAME_RESUME as c_int { + return continue_unwind(exception_object, context); + } else { + return uw::_URC_FAILURE; + }; + + // The DWARF unwinder assumes that _Unwind_Context holds things like the function + // and LSDA pointers, however ARM EHABI places them into the exception object. + // To preserve signatures of functions like _Unwind_GetLanguageSpecificData(), which + // take only the context pointer, GCC personality routines stash a pointer to exception_object + // in the context, using location reserved for ARM's "scratch register" (r12). + uw::_Unwind_SetGR(context, + uw::UNWIND_POINTER_REG, + exception_object as uw::_Unwind_Ptr); + // ...A more principled approach would be to provide the full definition of ARM's + // _Unwind_Context in our libunwind bindings and fetch the required data from there directly, + // bypassing DWARF compatibility functions. + + let eh_action = find_eh_action(context); + if search_phase { + match eh_action { + EHAction::None | + EHAction::Cleanup(_) => return continue_unwind(exception_object, context), + EHAction::Catch(_) => return uw::_URC_HANDLER_FOUND, + EHAction::Terminate => return uw::_URC_FAILURE, + } + } else { + match eh_action { + EHAction::None => return continue_unwind(exception_object, context), + EHAction::Cleanup(lpad) | + EHAction::Catch(lpad) => { + uw::_Unwind_SetGR(context, UNWIND_DATA_REG.0, exception_object as uintptr_t); + uw::_Unwind_SetGR(context, UNWIND_DATA_REG.1, 0); + uw::_Unwind_SetIP(context, lpad); + return uw::_URC_INSTALL_CONTEXT; + } + EHAction::Terminate => return uw::_URC_FAILURE, + } + } + + // On ARM EHABI the personality routine is responsible for actually + // unwinding a single stack frame before returning (ARM EHABI Sec. 6.1). + unsafe fn continue_unwind(exception_object: *mut uw::_Unwind_Exception, + context: *mut uw::_Unwind_Context) + -> uw::_Unwind_Reason_Code { + if __gnu_unwind_frame(exception_object, context) == uw::_URC_NO_REASON { + uw::_URC_CONTINUE_UNWIND + } else { + uw::_URC_FAILURE + } + } + // defined in libgcc + extern "C" { + fn __gnu_unwind_frame(exception_object: *mut uw::_Unwind_Exception, + context: *mut uw::_Unwind_Context) + -> uw::_Unwind_Reason_Code; + } +} + +unsafe fn find_eh_action(context: *mut uw::_Unwind_Context) -> EHAction { + let lsda = uw::_Unwind_GetLanguageSpecificData(context) as *const u8; + let mut ip_before_instr: c_int = 0; + let ip = uw::_Unwind_GetIPInfo(context, &mut ip_before_instr); + let eh_context = EHContext { + // The return address points 1 byte past the call instruction, + // which could be in the next IP range in LSDA range table. + ip: if ip_before_instr != 0 { ip } else { ip - 1 }, + func_start: uw::_Unwind_GetRegionStart(context), + get_text_start: &|| uw::_Unwind_GetTextRelBase(context), + get_data_start: &|| uw::_Unwind_GetDataRelBase(context), + }; + eh::find_eh_action(lsda, &eh_context) +} + +// See docs in the `unwind` module. +#[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu"))] +#[lang = "eh_unwind_resume"] +#[unwind] +unsafe extern "C" fn rust_eh_unwind_resume(panic_ctx: *mut u8) -> ! { + uw::_Unwind_Resume(panic_ctx as *mut uw::_Unwind_Exception); +} + +// Frame unwind info registration +// +// Each module's image contains a frame unwind info section (usually +// ".eh_frame"). When a module is loaded/unloaded into the process, the +// unwinder must be informed about the location of this section in memory. The +// methods of achieving that vary by the platform. On some (e.g. Linux), the +// unwinder can discover unwind info sections on its own (by dynamically +// enumerating currently loaded modules via the dl_iterate_phdr() API and +// finding their ".eh_frame" sections); Others, like Windows, require modules +// to actively register their unwind info sections via unwinder API. +// +// This module defines two symbols which are referenced and called from +// rsbegin.rs to register our information with the GCC runtime. The +// implementation of stack unwinding is (for now) deferred to libgcc_eh, however +// Rust crates use these Rust-specific entry points to avoid potential clashes +// with any GCC runtime. +#[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu"))] +pub mod eh_frame_registry { + #[link(name = "gcc_eh")] + #[cfg(not(cargobuild))] + extern "C" {} + + extern "C" { + fn __register_frame_info(eh_frame_begin: *const u8, object: *mut u8); + fn __deregister_frame_info(eh_frame_begin: *const u8, object: *mut u8); + } + + #[no_mangle] + pub unsafe extern "C" fn rust_eh_register_frames(eh_frame_begin: *const u8, object: *mut u8) { + __register_frame_info(eh_frame_begin, object); + } + + #[no_mangle] + pub unsafe extern "C" fn rust_eh_unregister_frames(eh_frame_begin: *const u8, + object: *mut u8) { + __deregister_frame_info(eh_frame_begin, object); + } +} diff --git a/src/libpanic_unwind/lib.rs b/src/libpanic_unwind/lib.rs new file mode 100644 index 0000000000000..ff483fa823e0c --- /dev/null +++ b/src/libpanic_unwind/lib.rs @@ -0,0 +1,116 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation of panics via stack unwinding +//! +//! This crate is an implementation of panics in Rust using "most native" stack +//! unwinding mechanism of the platform this is being compiled for. This +//! essentially gets categorized into three buckets currently: +//! +//! 1. MSVC targets use SEH in the `seh.rs` file. +//! 2. The 64-bit MinGW target half-uses SEH and half-use gcc-like information +//! in the `seh64_gnu.rs` module. +//! 3. All other targets use libunwind/libgcc in the `gcc/mod.rs` module. +//! +//! More documentation about each implementation can be found in the respective +//! module. + +#![no_std] +#![crate_name = "panic_unwind"] +#![crate_type = "rlib"] +#![unstable(feature = "panic_unwind", issue = "32837")] +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/", + issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/")] +#![cfg_attr(not(stage0), deny(warnings))] + +#![feature(alloc)] +#![feature(core_intrinsics)] +#![feature(lang_items)] +#![feature(libc)] +#![feature(panic_unwind)] +#![feature(raw)] +#![feature(staged_api)] +#![feature(unwind_attributes)] +#![cfg_attr(target_env = "msvc", feature(raw))] + +#![panic_runtime] +#![feature(panic_runtime)] + +extern crate alloc; +extern crate libc; +extern crate unwind; + +use core::intrinsics; +use core::mem; +use core::raw; + +// Rust runtime's startup objects depend on these symbols, so make them public. +#[cfg(all(target_os="windows", target_arch = "x86", target_env="gnu"))] +pub use imp::eh_frame_registry::*; + +// *-pc-windows-msvc +#[cfg(target_env = "msvc")] +#[path = "seh.rs"] +mod imp; + +// x86_64-pc-windows-gnu +#[cfg(all(windows, target_arch = "x86_64", target_env = "gnu"))] +#[path = "seh64_gnu.rs"] +mod imp; + +// i686-pc-windows-gnu and all others +#[cfg(any(all(unix, not(target_os = "emscripten")), + all(windows, target_arch = "x86", target_env = "gnu")))] +#[path = "gcc.rs"] +mod imp; + +// emscripten +#[cfg(target_os = "emscripten")] +#[path = "emcc.rs"] +mod imp; + +mod dwarf; +mod windows; + +// Entry point for catching an exception, implemented using the `try` intrinsic +// in the compiler. +// +// The interaction between the `payload` function and the compiler is pretty +// hairy and tightly coupled, for more information see the compiler's +// implementation of this. +#[no_mangle] +pub unsafe extern "C" fn __rust_maybe_catch_panic(f: fn(*mut u8), + data: *mut u8, + data_ptr: *mut usize, + vtable_ptr: *mut usize) + -> u32 { + let mut payload = imp::payload(); + if intrinsics::try(f, data, &mut payload as *mut _ as *mut _) == 0 { + 0 + } else { + let obj = mem::transmute::<_, raw::TraitObject>(imp::cleanup(payload)); + *data_ptr = obj.data as usize; + *vtable_ptr = obj.vtable as usize; + 1 + } +} + +// Entry point for raising an exception, just delegates to the platform-specific +// implementation. +#[no_mangle] +#[unwind] +pub unsafe extern "C" fn __rust_start_panic(data: usize, vtable: usize) -> u32 { + imp::panic(mem::transmute(raw::TraitObject { + data: data as *mut (), + vtable: vtable as *mut (), + })) +} diff --git a/src/libpanic_unwind/seh.rs b/src/libpanic_unwind/seh.rs new file mode 100644 index 0000000000000..5896421493008 --- /dev/null +++ b/src/libpanic_unwind/seh.rs @@ -0,0 +1,315 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Windows SEH +//! +//! On Windows (currently only on MSVC), the default exception handling +//! mechanism is Structured Exception Handling (SEH). This is quite different +//! than Dwarf-based exception handling (e.g. what other unix platforms use) in +//! terms of compiler internals, so LLVM is required to have a good deal of +//! extra support for SEH. +//! +//! In a nutshell, what happens here is: +//! +//! 1. The `panic` function calls the standard Windows function +//! `_CxxThrowException` to throw a C++-like exception, triggering the +//! unwinding process. +//! 2. All landing pads generated by the compiler use the personality function +//! `__CxxFrameHandler3`, a function in the CRT, and the unwinding code in +//! Windows will use this personality function to execute all cleanup code on +//! the stack. +//! 3. All compiler-generated calls to `invoke` have a landing pad set as a +//! `cleanuppad` LLVM instruction, which indicates the start of the cleanup +//! routine. The personality (in step 2, defined in the CRT) is responsible +//! for running the cleanup routines. +//! 4. Eventually the "catch" code in the `try` intrinsic (generated by the +//! compiler) is executed and indicates that control should come back to +//! Rust. This is done via a `catchswitch` plus a `catchpad` instruction in +//! LLVM IR terms, finally returning normal control to the program with a +//! `catchret` instruction. +//! +//! Some specific differences from the gcc-based exception handling are: +//! +//! * Rust has no custom personality function, it is instead *always* +//! `__CxxFrameHandler3`. Additionally, no extra filtering is performed, so we +//! end up catching any C++ exceptions that happen to look like the kind we're +//! throwing. Note that throwing an exception into Rust is undefined behavior +//! anyway, so this should be fine. +//! * We've got some data to transmit across the unwinding boundary, +//! specifically a `Box`. Like with Dwarf exceptions +//! these two pointers are stored as a payload in the exception itself. On +//! MSVC, however, there's no need for an extra heap allocation because the +//! call stack is preserved while filter functions are being executed. This +//! means that the pointers are passed directly to `_CxxThrowException` which +//! are then recovered in the filter function to be written to the stack frame +//! of the `try` intrinsic. +//! +//! [win64]: http://msdn.microsoft.com/en-us/library/1eyas8tf.aspx +//! [llvm]: http://llvm.org/docs/ExceptionHandling.html#background-on-windows-exceptions + +#![allow(bad_style)] +#![allow(private_no_mangle_fns)] + +use alloc::boxed::Box; +use core::any::Any; +use core::mem; +use core::raw; + +use windows as c; +use libc::{c_int, c_uint}; + +// First up, a whole bunch of type definitions. There's a few platform-specific +// oddities here, and a lot that's just blatantly copied from LLVM. The purpose +// of all this is to implement the `panic` function below through a call to +// `_CxxThrowException`. +// +// This function takes two arguments. The first is a pointer to the data we're +// passing in, which in this case is our trait object. Pretty easy to find! The +// next, however, is more complicated. This is a pointer to a `_ThrowInfo` +// structure, and it generally is just intended to just describe the exception +// being thrown. +// +// Currently the definition of this type [1] is a little hairy, and the main +// oddity (and difference from the online article) is that on 32-bit the +// pointers are pointers but on 64-bit the pointers are expressed as 32-bit +// offsets from the `__ImageBase` symbol. The `ptr_t` and `ptr!` macro in the +// modules below are used to express this. +// +// The maze of type definitions also closely follows what LLVM emits for this +// sort of operation. For example, if you compile this C++ code on MSVC and emit +// the LLVM IR: +// +// #include +// +// void foo() { +// uint64_t a[2] = {0, 1}; +// throw a; +// } +// +// That's essentially what we're trying to emulate. Most of the constant values +// below were just copied from LLVM, I'm at least not 100% sure what's going on +// everywhere. For example the `.PA_K\0` and `.PEA_K\0` strings below (stuck in +// the names of a few of these) I'm not actually sure what they do, but it seems +// to mirror what LLVM does! +// +// In any case, these structures are all constructed in a similar manner, and +// it's just somewhat verbose for us. +// +// [1]: http://www.geoffchappell.com/studies/msvc/language/predefined/ + +#[cfg(target_arch = "x86")] +#[macro_use] +mod imp { + pub type ptr_t = *mut u8; + pub const OFFSET: i32 = 4; + + pub const NAME1: [u8; 7] = [b'.', b'P', b'A', b'_', b'K', 0, 0]; + pub const NAME2: [u8; 7] = [b'.', b'P', b'A', b'X', 0, 0, 0]; + + macro_rules! ptr { + (0) => (0 as *mut u8); + ($e:expr) => ($e as *mut u8); + } +} + +#[cfg(target_arch = "x86_64")] +#[macro_use] +mod imp { + pub type ptr_t = u32; + pub const OFFSET: i32 = 8; + + pub const NAME1: [u8; 7] = [b'.', b'P', b'E', b'A', b'_', b'K', 0]; + pub const NAME2: [u8; 7] = [b'.', b'P', b'E', b'A', b'X', 0, 0]; + + extern "C" { + pub static __ImageBase: u8; + } + + macro_rules! ptr { + (0) => (0); + ($e:expr) => { + (($e as usize) - (&imp::__ImageBase as *const _ as usize)) as u32 + } + } +} + +#[repr(C)] +pub struct _ThrowInfo { + pub attribues: c_uint, + pub pnfnUnwind: imp::ptr_t, + pub pForwardCompat: imp::ptr_t, + pub pCatchableTypeArray: imp::ptr_t, +} + +#[repr(C)] +pub struct _CatchableTypeArray { + pub nCatchableTypes: c_int, + pub arrayOfCatchableTypes: [imp::ptr_t; 2], +} + +#[repr(C)] +pub struct _CatchableType { + pub properties: c_uint, + pub pType: imp::ptr_t, + pub thisDisplacement: _PMD, + pub sizeOrOffset: c_int, + pub copy_function: imp::ptr_t, +} + +#[repr(C)] +pub struct _PMD { + pub mdisp: c_int, + pub pdisp: c_int, + pub vdisp: c_int, +} + +#[repr(C)] +pub struct _TypeDescriptor { + pub pVFTable: *const u8, + pub spare: *mut u8, + pub name: [u8; 7], +} + +static mut THROW_INFO: _ThrowInfo = _ThrowInfo { + attribues: 0, + pnfnUnwind: ptr!(0), + pForwardCompat: ptr!(0), + pCatchableTypeArray: ptr!(0), +}; + +static mut CATCHABLE_TYPE_ARRAY: _CatchableTypeArray = _CatchableTypeArray { + nCatchableTypes: 2, + arrayOfCatchableTypes: [ptr!(0), ptr!(0)], +}; + +static mut CATCHABLE_TYPE1: _CatchableType = _CatchableType { + properties: 1, + pType: ptr!(0), + thisDisplacement: _PMD { + mdisp: 0, + pdisp: -1, + vdisp: 0, + }, + sizeOrOffset: imp::OFFSET, + copy_function: ptr!(0), +}; + +static mut CATCHABLE_TYPE2: _CatchableType = _CatchableType { + properties: 1, + pType: ptr!(0), + thisDisplacement: _PMD { + mdisp: 0, + pdisp: -1, + vdisp: 0, + }, + sizeOrOffset: imp::OFFSET, + copy_function: ptr!(0), +}; + +extern "C" { + // The leading `\x01` byte here is actually a magical signal to LLVM to + // *not* apply any other mangling like prefixing with a `_` character. + // + // This symbol is the vtable used by C++'s `std::type_info`. Objects of type + // `std::type_info`, type descriptors, have a pointer to this table. Type + // descriptors are referenced by the C++ EH structures defined above and + // that we construct below. + #[link_name = "\x01??_7type_info@@6B@"] + static TYPE_INFO_VTABLE: *const u8; +} + +// We use #[lang = "msvc_try_filter"] here as this is the type descriptor which +// we'll use in LLVM's `catchpad` instruction which ends up also being passed as +// an argument to the C++ personality function. +// +// Again, I'm not entirely sure what this is describing, it just seems to work. +#[cfg_attr(not(test), lang = "msvc_try_filter")] +static mut TYPE_DESCRIPTOR1: _TypeDescriptor = _TypeDescriptor { + pVFTable: unsafe { &TYPE_INFO_VTABLE } as *const _ as *const _, + spare: 0 as *mut _, + name: imp::NAME1, +}; + +static mut TYPE_DESCRIPTOR2: _TypeDescriptor = _TypeDescriptor { + pVFTable: unsafe { &TYPE_INFO_VTABLE } as *const _ as *const _, + spare: 0 as *mut _, + name: imp::NAME2, +}; + +pub unsafe fn panic(data: Box) -> u32 { + use core::intrinsics::atomic_store; + + // _CxxThrowException executes entirely on this stack frame, so there's no + // need to otherwise transfer `data` to the heap. We just pass a stack + // pointer to this function. + // + // The first argument is the payload being thrown (our two pointers), and + // the second argument is the type information object describing the + // exception (constructed above). + let ptrs = mem::transmute::<_, raw::TraitObject>(data); + let mut ptrs = [ptrs.data as u64, ptrs.vtable as u64]; + let mut ptrs_ptr = ptrs.as_mut_ptr(); + + // This... may seems surprising, and justifiably so. On 32-bit MSVC the + // pointers between these structure are just that, pointers. On 64-bit MSVC, + // however, the pointers between structures are rather expressed as 32-bit + // offsets from `__ImageBase`. + // + // Consequently, on 32-bit MSVC we can declare all these pointers in the + // `static`s above. On 64-bit MSVC, we would have to express subtraction of + // pointers in statics, which Rust does not currently allow, so we can't + // actually do that. + // + // The next best thing, then is to fill in these structures at runtime + // (panicking is already the "slow path" anyway). So here we reinterpret all + // of these pointer fields as 32-bit integers and then store the + // relevant value into it (atomically, as concurrent panics may be + // happening). Technically the runtime will probably do a nonatomic read of + // these fields, but in theory they never read the *wrong* value so it + // shouldn't be too bad... + // + // In any case, we basically need to do something like this until we can + // express more operations in statics (and we may never be able to). + atomic_store(&mut THROW_INFO.pCatchableTypeArray as *mut _ as *mut u32, + ptr!(&CATCHABLE_TYPE_ARRAY as *const _) as u32); + atomic_store(&mut CATCHABLE_TYPE_ARRAY.arrayOfCatchableTypes[0] as *mut _ as *mut u32, + ptr!(&CATCHABLE_TYPE1 as *const _) as u32); + atomic_store(&mut CATCHABLE_TYPE_ARRAY.arrayOfCatchableTypes[1] as *mut _ as *mut u32, + ptr!(&CATCHABLE_TYPE2 as *const _) as u32); + atomic_store(&mut CATCHABLE_TYPE1.pType as *mut _ as *mut u32, + ptr!(&TYPE_DESCRIPTOR1 as *const _) as u32); + atomic_store(&mut CATCHABLE_TYPE2.pType as *mut _ as *mut u32, + ptr!(&TYPE_DESCRIPTOR2 as *const _) as u32); + + c::_CxxThrowException(&mut ptrs_ptr as *mut _ as *mut _, + &mut THROW_INFO as *mut _ as *mut _); + u32::max_value() +} + +pub fn payload() -> [u64; 2] { + [0; 2] +} + +pub unsafe fn cleanup(payload: [u64; 2]) -> Box { + mem::transmute(raw::TraitObject { + data: payload[0] as *mut _, + vtable: payload[1] as *mut _, + }) +} + +// This is required by the compiler to exist (e.g. it's a lang item), but +// it's never actually called by the compiler because __C_specific_handler +// or _except_handler3 is the personality function that is always used. +// Hence this is just an aborting stub. +#[lang = "eh_personality"] +#[cfg(not(test))] +fn rust_eh_personality() { + unsafe { ::core::intrinsics::abort() } +} diff --git a/src/libpanic_unwind/seh64_gnu.rs b/src/libpanic_unwind/seh64_gnu.rs new file mode 100644 index 0000000000000..d4906b556b31a --- /dev/null +++ b/src/libpanic_unwind/seh64_gnu.rs @@ -0,0 +1,136 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Unwinding implementation of top of native Win64 SEH, +//! however the unwind handler data (aka LSDA) uses GCC-compatible encoding. + +#![allow(bad_style)] +#![allow(private_no_mangle_fns)] + +use alloc::boxed::Box; + +use core::any::Any; +use core::intrinsics; +use core::ptr; +use dwarf::eh::{EHContext, EHAction, find_eh_action}; +use windows as c; + +// Define our exception codes: +// according to http://msdn.microsoft.com/en-us/library/het71c37(v=VS.80).aspx, +// [31:30] = 3 (error), 2 (warning), 1 (info), 0 (success) +// [29] = 1 (user-defined) +// [28] = 0 (reserved) +// we define bits: +// [24:27] = type +// [0:23] = magic +const ETYPE: c::DWORD = 0b1110_u32 << 28; +const MAGIC: c::DWORD = 0x525354; // "RST" + +const RUST_PANIC: c::DWORD = ETYPE | (1 << 24) | MAGIC; + +#[repr(C)] +struct PanicData { + data: Box, +} + +pub unsafe fn panic(data: Box) -> u32 { + let panic_ctx = Box::new(PanicData { data: data }); + let params = [Box::into_raw(panic_ctx) as c::ULONG_PTR]; + c::RaiseException(RUST_PANIC, + c::EXCEPTION_NONCONTINUABLE, + params.len() as c::DWORD, + ¶ms as *const c::ULONG_PTR); + u32::max_value() +} + +pub fn payload() -> *mut u8 { + ptr::null_mut() +} + +pub unsafe fn cleanup(ptr: *mut u8) -> Box { + let panic_ctx = Box::from_raw(ptr as *mut PanicData); + return panic_ctx.data; +} + +// SEH doesn't support resuming unwinds after calling a landing pad like +// libunwind does. For this reason, MSVC compiler outlines landing pads into +// separate functions that can be called directly from the personality function +// but are nevertheless able to find and modify stack frame of the "parent" +// function. +// +// Since this cannot be done with libdwarf-style landing pads, +// rust_eh_personality instead catches RUST_PANICs, runs the landing pad, then +// reraises the exception. +// +// Note that it makes certain assumptions about the exception: +// +// 1. That RUST_PANIC is non-continuable, so no lower stack frame may choose to +// resume execution. +// 2. That the first parameter of the exception is a pointer to an extra data +// area (PanicData). +// Since these assumptions do not generally hold true for foreign exceptions +// (system faults, C++ exceptions, etc), we make no attempt to invoke our +// landing pads (and, thus, destructors!) for anything other than RUST_PANICs. +// This is considered acceptable, because the behavior of throwing exceptions +// through a C ABI boundary is undefined. + +#[lang = "eh_personality"] +#[cfg(not(test))] +unsafe extern "C" fn rust_eh_personality(exceptionRecord: *mut c::EXCEPTION_RECORD, + establisherFrame: c::LPVOID, + contextRecord: *mut c::CONTEXT, + dispatcherContext: *mut c::DISPATCHER_CONTEXT) + -> c::EXCEPTION_DISPOSITION { + let er = &*exceptionRecord; + let dc = &*dispatcherContext; + + if er.ExceptionFlags & c::EXCEPTION_UNWIND == 0 { + // we are in the dispatch phase + if er.ExceptionCode == RUST_PANIC { + if let Some(lpad) = find_landing_pad(dc) { + c::RtlUnwindEx(establisherFrame, + lpad as c::LPVOID, + exceptionRecord, + er.ExceptionInformation[0] as c::LPVOID, // pointer to PanicData + contextRecord, + dc.HistoryTable); + } + } + } + c::ExceptionContinueSearch +} + +#[lang = "eh_unwind_resume"] +#[unwind] +unsafe extern "C" fn rust_eh_unwind_resume(panic_ctx: c::LPVOID) -> ! { + let params = [panic_ctx as c::ULONG_PTR]; + c::RaiseException(RUST_PANIC, + c::EXCEPTION_NONCONTINUABLE, + params.len() as c::DWORD, + ¶ms as *const c::ULONG_PTR); + intrinsics::abort(); +} + +unsafe fn find_landing_pad(dc: &c::DISPATCHER_CONTEXT) -> Option { + let eh_ctx = EHContext { + // The return address points 1 byte past the call instruction, + // which could be in the next IP range in LSDA range table. + ip: dc.ControlPc as usize - 1, + func_start: dc.ImageBase as usize + (*dc.FunctionEntry).BeginAddress as usize, + get_text_start: &|| dc.ImageBase as usize, + get_data_start: &|| unimplemented!(), + }; + match find_eh_action(dc.HandlerData, &eh_ctx) { + EHAction::None => None, + EHAction::Cleanup(lpad) | + EHAction::Catch(lpad) => Some(lpad), + EHAction::Terminate => intrinsics::abort(), + } +} diff --git a/src/libpanic_unwind/windows.rs b/src/libpanic_unwind/windows.rs new file mode 100644 index 0000000000000..fd8429d262e6e --- /dev/null +++ b/src/libpanic_unwind/windows.rs @@ -0,0 +1,96 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(bad_style)] +#![allow(dead_code)] +#![cfg(windows)] + +use libc::{c_long, c_ulong, c_ulonglong, c_void}; + +pub type DWORD = c_ulong; +pub type LONG = c_long; +pub type ULONG_PTR = c_ulonglong; +pub type LPVOID = *mut c_void; + +pub const EXCEPTION_MAXIMUM_PARAMETERS: usize = 15; +pub const EXCEPTION_NONCONTINUABLE: DWORD = 0x1; // Noncontinuable exception +pub const EXCEPTION_UNWINDING: DWORD = 0x2; // Unwind is in progress +pub const EXCEPTION_EXIT_UNWIND: DWORD = 0x4; // Exit unwind is in progress +pub const EXCEPTION_TARGET_UNWIND: DWORD = 0x20; // Target unwind in progress +pub const EXCEPTION_COLLIDED_UNWIND: DWORD = 0x40; // Collided exception handler call +pub const EXCEPTION_UNWIND: DWORD = EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND | + EXCEPTION_TARGET_UNWIND | + EXCEPTION_COLLIDED_UNWIND; + +#[repr(C)] +pub struct EXCEPTION_RECORD { + pub ExceptionCode: DWORD, + pub ExceptionFlags: DWORD, + pub ExceptionRecord: *mut EXCEPTION_RECORD, + pub ExceptionAddress: LPVOID, + pub NumberParameters: DWORD, + pub ExceptionInformation: [LPVOID; EXCEPTION_MAXIMUM_PARAMETERS], +} + +#[repr(C)] +pub struct EXCEPTION_POINTERS { + pub ExceptionRecord: *mut EXCEPTION_RECORD, + pub ContextRecord: *mut CONTEXT, +} + +pub enum UNWIND_HISTORY_TABLE {} + +#[repr(C)] +pub struct RUNTIME_FUNCTION { + pub BeginAddress: DWORD, + pub EndAddress: DWORD, + pub UnwindData: DWORD, +} + +pub enum CONTEXT {} + +#[repr(C)] +pub struct DISPATCHER_CONTEXT { + pub ControlPc: LPVOID, + pub ImageBase: LPVOID, + pub FunctionEntry: *const RUNTIME_FUNCTION, + pub EstablisherFrame: LPVOID, + pub TargetIp: LPVOID, + pub ContextRecord: *const CONTEXT, + pub LanguageHandler: LPVOID, + pub HandlerData: *const u8, + pub HistoryTable: *const UNWIND_HISTORY_TABLE, +} + +#[repr(C)] +pub enum EXCEPTION_DISPOSITION { + ExceptionContinueExecution, + ExceptionContinueSearch, + ExceptionNestedException, + ExceptionCollidedUnwind, +} +pub use self::EXCEPTION_DISPOSITION::*; + +extern "system" { + #[unwind] + pub fn RaiseException(dwExceptionCode: DWORD, + dwExceptionFlags: DWORD, + nNumberOfArguments: DWORD, + lpArguments: *const ULONG_PTR); + #[unwind] + pub fn RtlUnwindEx(TargetFrame: LPVOID, + TargetIp: LPVOID, + ExceptionRecord: *const EXCEPTION_RECORD, + ReturnValue: LPVOID, + OriginalContext: *const CONTEXT, + HistoryTable: *const UNWIND_HISTORY_TABLE); + #[unwind] + pub fn _CxxThrowException(pExceptionObject: *mut c_void, pThrowInfo: *mut u8); +} diff --git a/src/libproc_macro/Cargo.toml b/src/libproc_macro/Cargo.toml new file mode 100644 index 0000000000000..7ce65d0fe4dbc --- /dev/null +++ b/src/libproc_macro/Cargo.toml @@ -0,0 +1,11 @@ +[package] +authors = ["The Rust Project Developers"] +name = "proc_macro" +version = "0.0.0" + +[lib] +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +syntax = { path = "../libsyntax" } diff --git a/src/libproc_macro/lib.rs b/src/libproc_macro/lib.rs new file mode 100644 index 0000000000000..5ee9fecfb21b3 --- /dev/null +++ b/src/libproc_macro/lib.rs @@ -0,0 +1,166 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A support library for macro authors when defining new macros. +//! +//! This library, provided by the standard distribution, provides the types +//! consumed in the interfaces of procedurally defined macro definitions. +//! Currently the primary use of this crate is to provide the ability to define +//! new custom derive modes through `#[proc_macro_derive]`. +//! +//! Added recently as part of [RFC 1681] this crate is currently *unstable* and +//! requires the `#![feature(proc_macro_lib)]` directive to use. +//! +//! [RFC 1681]: https://github.com/rust-lang/rfcs/blob/master/text/1681-macros-1.1.md +//! +//! Note that this crate is intentionally very bare-bones currently. The main +//! type, `TokenStream`, only supports `fmt::Display` and `FromStr` +//! implementations, indicating that it can only go to and come from a string. +//! This functionality is intended to be expanded over time as more surface +//! area for macro authors is stabilized. + +#![crate_name = "proc_macro"] +#![unstable(feature = "proc_macro_lib", issue = "27812")] +#![crate_type = "rlib"] +#![crate_type = "dylib"] +#![cfg_attr(not(stage0), deny(warnings))] +#![deny(missing_docs)] + +#![feature(rustc_private)] +#![feature(staged_api)] +#![feature(lang_items)] + +extern crate syntax; + +use std::fmt; +use std::str::FromStr; + +use syntax::ast; +use syntax::parse; +use syntax::ptr::P; + +/// The main type provided by this crate, representing an abstract stream of +/// tokens. +/// +/// This is both the input and output of `#[proc_macro_derive]` definitions. +/// Currently it's required to be a list of valid Rust items, but this +/// restriction may be lifted in the future. +/// +/// The API of this type is intentionally bare-bones, but it'll be expanded over +/// time! +pub struct TokenStream { + inner: Vec>, +} + +/// Error returned from `TokenStream::from_str`. +#[derive(Debug)] +pub struct LexError { + _inner: (), +} + +/// Permanently unstable internal implementation details of this crate. This +/// should not be used. +/// +/// These methods are used by the rest of the compiler to generate instances of +/// `TokenStream` to hand to macro definitions, as well as consume the output. +/// +/// Note that this module is also intentionally separate from the rest of the +/// crate. This allows the `#[unstable]` directive below to naturally apply to +/// all of the contents. +#[unstable(feature = "proc_macro_internals", issue = "27812")] +#[doc(hidden)] +pub mod __internal { + use std::cell::Cell; + + use syntax::ast; + use syntax::ptr::P; + use syntax::parse::ParseSess; + use super::TokenStream; + + pub fn new_token_stream(item: P) -> TokenStream { + TokenStream { inner: vec![item] } + } + + pub fn token_stream_items(stream: TokenStream) -> Vec> { + stream.inner + } + + pub trait Registry { + fn register_custom_derive(&mut self, + trait_name: &str, + expand: fn(TokenStream) -> TokenStream, + attributes: &[&'static str]); + } + + // Emulate scoped_thread_local!() here essentially + thread_local! { + static CURRENT_SESS: Cell<*const ParseSess> = Cell::new(0 as *const _); + } + + pub fn set_parse_sess(sess: &ParseSess, f: F) -> R + where F: FnOnce() -> R + { + struct Reset { prev: *const ParseSess } + + impl Drop for Reset { + fn drop(&mut self) { + CURRENT_SESS.with(|p| p.set(self.prev)); + } + } + + CURRENT_SESS.with(|p| { + let _reset = Reset { prev: p.get() }; + p.set(sess); + f() + }) + } + + pub fn with_parse_sess(f: F) -> R + where F: FnOnce(&ParseSess) -> R + { + let p = CURRENT_SESS.with(|p| p.get()); + assert!(!p.is_null()); + f(unsafe { &*p }) + } +} + +impl FromStr for TokenStream { + type Err = LexError; + + fn from_str(src: &str) -> Result { + __internal::with_parse_sess(|sess| { + let src = src.to_string(); + let name = "".to_string(); + let mut parser = parse::new_parser_from_source_str(sess, name, src); + let mut ret = TokenStream { inner: Vec::new() }; + loop { + match parser.parse_item() { + Ok(Some(item)) => ret.inner.push(item), + Ok(None) => return Ok(ret), + Err(mut err) => { + err.cancel(); + return Err(LexError { _inner: () }) + } + } + } + }) + } +} + +impl fmt::Display for TokenStream { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + for item in self.inner.iter() { + let item = syntax::print::pprust::item_to_string(item); + try!(f.write_str(&item)); + try!(f.write_str("\n")); + } + Ok(()) + } +} diff --git a/src/libproc_macro_plugin/Cargo.toml b/src/libproc_macro_plugin/Cargo.toml new file mode 100644 index 0000000000000..4bc3f488d3280 --- /dev/null +++ b/src/libproc_macro_plugin/Cargo.toml @@ -0,0 +1,15 @@ +[package] +authors = ["The Rust Project Developers"] +name = "proc_macro_plugin" +version = "0.0.0" + +[lib] +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +log = { path = "../liblog" } +rustc_plugin = { path = "../librustc_plugin" } +syntax = { path = "../libsyntax" } +syntax_pos = { path = "../libsyntax_pos" } +proc_macro_tokens = { path = "../libproc_macro_tokens" } diff --git a/src/libproc_macro_plugin/lib.rs b/src/libproc_macro_plugin/lib.rs new file mode 100644 index 0000000000000..c45762bfb6e71 --- /dev/null +++ b/src/libproc_macro_plugin/lib.rs @@ -0,0 +1,107 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! # Proc_Macro +//! +//! A library for procedural macro writers. +//! +//! ## Usage +//! This crate provides the `qquote!` macro for syntax creation. +//! +//! The `qquote!` macro imports `syntax::ext::proc_macro_shim::prelude::*`, so you +//! will need to `extern crate syntax` for usage. (This is a temporary solution until more +//! of the external API in libproc_macro_tokens is stabilized to support the token construction +//! operations that the qausiquoter relies on.) The shim file also provides additional +//! operations, such as `build_block_emitter` (as used in the `cond` example below). +//! +//! ## Quasiquotation +//! +//! The quasiquoter creates output that, when run, constructs the tokenstream specified as +//! input. For example, `qquote!(5 + 5)` will produce a program, that, when run, will +//! construct the TokenStream `5 | + | 5`. +//! +//! ### Unquoting +//! +//! Unquoting is currently done as `unquote`, and works by taking the single next +//! TokenTree in the TokenStream as the unquoted term. Ergonomically, `unquote(foo)` works +//! fine, but `unquote foo` is also supported. +//! +//! A simple example might be: +//! +//!``` +//!fn double(tmp: TokenStream) -> TokenStream { +//! qquote!(unquote(tmp) * 2) +//!} +//!``` +//! +//! ### Large Example: Implementing Scheme's `cond` +//! +//! Below is the full implementation of Scheme's `cond` operator. +//! +//! ``` +//! fn cond_rec(input: TokenStream) -> TokenStream { +//! if input.is_empty() { return quote!(); } +//! +//! let next = input.slice(0..1); +//! let rest = input.slice_from(1..); +//! +//! let clause : TokenStream = match next.maybe_delimited() { +//! Some(ts) => ts, +//! _ => panic!("Invalid input"), +//! }; +//! +//! // clause is ([test]) [rhs] +//! if clause.len() < 2 { panic!("Invalid macro usage in cond: {:?}", clause) } +//! +//! let test: TokenStream = clause.slice(0..1); +//! let rhs: TokenStream = clause.slice_from(1..); +//! +//! if ident_eq(&test[0], str_to_ident("else")) || rest.is_empty() { +//! quote!({unquote(rhs)}) +//! } else { +//! quote!({if unquote(test) { unquote(rhs) } else { cond!(unquote(rest)) } }) +//! } +//! } +//! ``` +//! + +#![crate_name = "proc_macro_plugin"] +#![unstable(feature = "rustc_private", issue = "27812")] +#![feature(plugin_registrar)] +#![crate_type = "dylib"] +#![crate_type = "rlib"] +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] + +#![feature(staged_api)] +#![feature(rustc_diagnostic_macros)] +#![feature(rustc_private)] + +extern crate rustc_plugin; +extern crate syntax; +extern crate syntax_pos; +extern crate proc_macro_tokens; +#[macro_use] extern crate log; + +mod qquote; + +use qquote::qquote; + +use rustc_plugin::Registry; + +// ____________________________________________________________________________________________ +// Main macro definition + +#[plugin_registrar] +pub fn plugin_registrar(reg: &mut Registry) { + reg.register_macro("qquote", qquote); +} diff --git a/src/libproc_macro_plugin/qquote.rs b/src/libproc_macro_plugin/qquote.rs new file mode 100644 index 0000000000000..1ae906e0aa4e0 --- /dev/null +++ b/src/libproc_macro_plugin/qquote.rs @@ -0,0 +1,468 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! # Quasiquoter +//! This file contains the implementation internals of the quasiquoter provided by `quote!`. +//! +//! ## Ouput +//! The quasiquoter produces output of the form: +//! let tmp0 = ...; +//! let tmp1 = ...; +//! ... +//! concat(from_tokens(...), concat(...)) +//! +//! To the more explicit, the quasiquoter produces a series of bindings that each +//! construct TokenStreams via constructing Tokens and using `from_tokens`, ultimately +//! invoking `concat` on these bindings (and inlined expressions) to construct a +//! TokenStream that resembles the output syntax. +//! + +use proc_macro_tokens::build::*; +use proc_macro_tokens::parse::lex; + +use qquote::int_build::*; + +use syntax::ast::Ident; +use syntax::codemap::Span; +use syntax::ext::base::*; +use syntax::ext::base; +use syntax::ext::proc_macro_shim::build_block_emitter; +use syntax::parse::token::{self, Token}; +use syntax::print::pprust; +use syntax::symbol::Symbol; +use syntax::tokenstream::{TokenTree, TokenStream}; + +// ____________________________________________________________________________________________ +// Main definition +/// The user should use the macro, not this procedure. +pub fn qquote<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[TokenTree]) + -> Box { + + debug!("\nTTs in: {:?}\n", pprust::tts_to_string(&tts[..])); + let output = qquoter(cx, TokenStream::from_tts(tts.clone().to_owned())); + debug!("\nQQ out: {}\n", pprust::tts_to_string(&output.to_tts()[..])); + let imports = concat(lex("use syntax::ext::proc_macro_shim::prelude::*;"), + lex("use proc_macro_tokens::prelude::*;")); + build_block_emitter(cx, sp, build_brace_delimited(concat(imports, output))) +} + +// ____________________________________________________________________________________________ +// Datatype Definitions + +#[derive(Debug)] +struct QDelimited { + delim: token::DelimToken, + open_span: Span, + tts: Vec, + close_span: Span, +} + +#[derive(Debug)] +enum QTT { + TT(TokenTree), + QDL(QDelimited), + QIdent(TokenTree), +} + +type Bindings = Vec<(Ident, TokenStream)>; + +// ____________________________________________________________________________________________ +// Quasiquoter Algorithm +// This algorithm works as follows: +// Input: TokenStream +// 1. Walk the TokenStream, gathering up the unquoted expressions and marking them separately. +// 2. Hoist any unquoted term into its own let-binding via a gensym'd identifier +// 3. Convert the body from a `complex expression` into a simplified one via `convert_complex_tts +// 4. Stitch everything together with `concat`. +fn qquoter<'cx>(cx: &'cx mut ExtCtxt, ts: TokenStream) -> TokenStream { + if ts.is_empty() { + return lex("TokenStream::mk_empty()"); + } + let qq_res = qquote_iter(cx, 0, ts); + let mut bindings = qq_res.0; + let body = qq_res.1; + let mut cct_res = convert_complex_tts(cx, body); + + bindings.append(&mut cct_res.0); + + if bindings.is_empty() { + cct_res.1 + } else { + debug!("BINDINGS"); + for b in bindings.clone() { + debug!("{:?} = {}", b.0, pprust::tts_to_string(&b.1.to_tts()[..])); + } + TokenStream::concat(unravel(bindings), cct_res.1) + } +} + +fn qquote_iter<'cx>(cx: &'cx mut ExtCtxt, depth: i64, ts: TokenStream) -> (Bindings, Vec) { + let mut depth = depth; + let mut bindings: Bindings = Vec::new(); + let mut output: Vec = Vec::new(); + + let mut iter = ts.iter(); + + loop { + let next = iter.next(); + if next.is_none() { + break; + } + let next = next.unwrap().clone(); + match next { + TokenTree::Token(_, Token::Ident(id)) if is_unquote(id) => { + if depth == 0 { + let exp = iter.next(); + if exp.is_none() { + break; + } // produce an error or something first + let exp = vec![exp.unwrap().to_owned()]; + debug!("RHS: {:?}", exp.clone()); + let new_id = Ident::with_empty_ctxt(Symbol::gensym("tmp")); + debug!("RHS TS: {:?}", TokenStream::from_tts(exp.clone())); + debug!("RHS TS TT: {:?}", TokenStream::from_tts(exp.clone()).to_vec()); + bindings.push((new_id, TokenStream::from_tts(exp))); + debug!("BINDINGS"); + for b in bindings.clone() { + debug!("{:?} = {}", b.0, pprust::tts_to_string(&b.1.to_tts()[..])); + } + output.push(QTT::QIdent(as_tt(Token::Ident(new_id.clone())))); + } else { + depth = depth - 1; + output.push(QTT::TT(next.clone())); + } + } + TokenTree::Token(_, Token::Ident(id)) if is_qquote(id) => { + depth = depth + 1; + } + TokenTree::Delimited(_, ref dl) => { + let br = qquote_iter(cx, depth, TokenStream::from_tts(dl.tts.clone().to_owned())); + let mut bind_ = br.0; + let res_ = br.1; + bindings.append(&mut bind_); + + let new_dl = QDelimited { + delim: dl.delim, + open_span: dl.open_span, + tts: res_, + close_span: dl.close_span, + }; + + output.push(QTT::QDL(new_dl)); + } + t => { + output.push(QTT::TT(t)); + } + } + } + + (bindings, output) +} + +// ____________________________________________________________________________________________ +// Turns QQTs into a TokenStream and some Bindings. +/// Construct a chain of concatenations. +fn unravel_concats(tss: Vec) -> TokenStream { + let mut pushes: Vec = + tss.into_iter().filter(|&ref ts| !ts.is_empty()).collect(); + let mut output = match pushes.pop() { + Some(ts) => ts, + None => { + return TokenStream::mk_empty(); + } + }; + + while let Some(ts) = pushes.pop() { + output = build_fn_call(Ident::from_str("concat"), + concat(concat(ts, + from_tokens(vec![Token::Comma])), + output)); + } + output +} + +/// This converts the vector of QTTs into a seet of Bindings for construction and the main +/// body as a TokenStream. +fn convert_complex_tts<'cx>(cx: &'cx mut ExtCtxt, tts: Vec) -> (Bindings, TokenStream) { + let mut pushes: Vec = Vec::new(); + let mut bindings: Bindings = Vec::new(); + + let mut iter = tts.into_iter(); + + loop { + let next = iter.next(); + if next.is_none() { + break; + } + let next = next.unwrap(); + match next { + QTT::TT(TokenTree::Token(_, t)) => { + let token_out = emit_token(t); + pushes.push(token_out); + } + // FIXME handle sequence repetition tokens + QTT::QDL(qdl) => { + debug!(" QDL: {:?} ", qdl.tts); + let new_id = Ident::with_empty_ctxt(Symbol::gensym("qdl_tmp")); + let mut cct_rec = convert_complex_tts(cx, qdl.tts); + bindings.append(&mut cct_rec.0); + bindings.push((new_id, cct_rec.1)); + + let sep = build_delim_tok(qdl.delim); + + pushes.push(build_mod_call( + vec![Ident::from_str("proc_macro_tokens"), + Ident::from_str("build"), + Ident::from_str("build_delimited")], + concat(from_tokens(vec![Token::Ident(new_id)]), concat(lex(","), sep)), + )); + } + QTT::QIdent(t) => { + pushes.push(TokenStream::from_tts(vec![t])); + pushes.push(TokenStream::mk_empty()); + } + _ => panic!("Unhandled case!"), + } + + } + + (bindings, unravel_concats(pushes)) +} + +// ____________________________________________________________________________________________ +// Utilities + +/// Unravels Bindings into a TokenStream of `let` declarations. +fn unravel(binds: Bindings) -> TokenStream { + let mut output = TokenStream::mk_empty(); + + for b in binds { + output = concat(output, build_let(b.0, b.1)); + } + + output +} + +/// Checks if the Ident is `unquote`. +fn is_unquote(id: Ident) -> bool { + let qq = Ident::from_str("unquote"); + id.name == qq.name // We disregard context; unquote is _reserved_ +} + +/// Checks if the Ident is `quote`. +fn is_qquote(id: Ident) -> bool { + let qq = Ident::from_str("qquote"); + id.name == qq.name // We disregard context; qquote is _reserved_ +} + +mod int_build { + use proc_macro_tokens::build::*; + use proc_macro_tokens::parse::*; + + use syntax::ast::{self, Ident}; + use syntax::codemap::{DUMMY_SP}; + use syntax::parse::token::{self, Token, Lit}; + use syntax::symbol::keywords; + use syntax::tokenstream::{TokenTree, TokenStream}; + + // ____________________________________________________________________________________________ + // Emitters + + pub fn emit_token(t: Token) -> TokenStream { + concat(lex("TokenStream::from_tokens"), + build_paren_delimited(build_vec(build_token_tt(t)))) + } + + pub fn emit_lit(l: Lit, n: Option) -> TokenStream { + let suf = match n { + Some(n) => format!("Some(ast::Name({}))", n.as_u32()), + None => "None".to_string(), + }; + + let lit = match l { + Lit::Byte(n) => format!("Lit::Byte(Symbol::intern(\"{}\"))", n.to_string()), + Lit::Char(n) => format!("Lit::Char(Symbol::intern(\"{}\"))", n.to_string()), + Lit::Float(n) => format!("Lit::Float(Symbol::intern(\"{}\"))", n.to_string()), + Lit::Str_(n) => format!("Lit::Str_(Symbol::intern(\"{}\"))", n.to_string()), + Lit::Integer(n) => format!("Lit::Integer(Symbol::intern(\"{}\"))", n.to_string()), + Lit::ByteStr(n) => format!("Lit::ByteStr(Symbol::intern(\"{}\"))", n.to_string()), + _ => panic!("Unsupported literal"), + }; + + let res = format!("Token::Literal({},{})", lit, suf); + debug!("{}", res); + lex(&res) + } + + // ____________________________________________________________________________________________ + // Token Builders + + pub fn build_binop_tok(bot: token::BinOpToken) -> TokenStream { + match bot { + token::BinOpToken::Plus => lex("Token::BinOp(BinOpToken::Plus)"), + token::BinOpToken::Minus => lex("Token::BinOp(BinOpToken::Minus)"), + token::BinOpToken::Star => lex("Token::BinOp(BinOpToken::Star)"), + token::BinOpToken::Slash => lex("Token::BinOp(BinOpToken::Slash)"), + token::BinOpToken::Percent => lex("Token::BinOp(BinOpToken::Percent)"), + token::BinOpToken::Caret => lex("Token::BinOp(BinOpToken::Caret)"), + token::BinOpToken::And => lex("Token::BinOp(BinOpToken::And)"), + token::BinOpToken::Or => lex("Token::BinOp(BinOpToken::Or)"), + token::BinOpToken::Shl => lex("Token::BinOp(BinOpToken::Shl)"), + token::BinOpToken::Shr => lex("Token::BinOp(BinOpToken::Shr)"), + } + } + + pub fn build_binopeq_tok(bot: token::BinOpToken) -> TokenStream { + match bot { + token::BinOpToken::Plus => lex("Token::BinOpEq(BinOpToken::Plus)"), + token::BinOpToken::Minus => lex("Token::BinOpEq(BinOpToken::Minus)"), + token::BinOpToken::Star => lex("Token::BinOpEq(BinOpToken::Star)"), + token::BinOpToken::Slash => lex("Token::BinOpEq(BinOpToken::Slash)"), + token::BinOpToken::Percent => lex("Token::BinOpEq(BinOpToken::Percent)"), + token::BinOpToken::Caret => lex("Token::BinOpEq(BinOpToken::Caret)"), + token::BinOpToken::And => lex("Token::BinOpEq(BinOpToken::And)"), + token::BinOpToken::Or => lex("Token::BinOpEq(BinOpToken::Or)"), + token::BinOpToken::Shl => lex("Token::BinOpEq(BinOpToken::Shl)"), + token::BinOpToken::Shr => lex("Token::BinOpEq(BinOpToken::Shr)"), + } + } + + pub fn build_delim_tok(dt: token::DelimToken) -> TokenStream { + match dt { + token::DelimToken::Paren => lex("DelimToken::Paren"), + token::DelimToken::Bracket => lex("DelimToken::Bracket"), + token::DelimToken::Brace => lex("DelimToken::Brace"), + token::DelimToken::NoDelim => lex("DelimToken::NoDelim"), + } + } + + pub fn build_token_tt(t: Token) -> TokenStream { + match t { + Token::Eq => lex("Token::Eq"), + Token::Lt => lex("Token::Lt"), + Token::Le => lex("Token::Le"), + Token::EqEq => lex("Token::EqEq"), + Token::Ne => lex("Token::Ne"), + Token::Ge => lex("Token::Ge"), + Token::Gt => lex("Token::Gt"), + Token::AndAnd => lex("Token::AndAnd"), + Token::OrOr => lex("Token::OrOr"), + Token::Not => lex("Token::Not"), + Token::Tilde => lex("Token::Tilde"), + Token::BinOp(tok) => build_binop_tok(tok), + Token::BinOpEq(tok) => build_binopeq_tok(tok), + Token::At => lex("Token::At"), + Token::Dot => lex("Token::Dot"), + Token::DotDot => lex("Token::DotDot"), + Token::DotDotDot => lex("Token::DotDotDot"), + Token::Comma => lex("Token::Comma"), + Token::Semi => lex("Token::Semi"), + Token::Colon => lex("Token::Colon"), + Token::ModSep => lex("Token::ModSep"), + Token::RArrow => lex("Token::RArrow"), + Token::LArrow => lex("Token::LArrow"), + Token::FatArrow => lex("Token::FatArrow"), + Token::Pound => lex("Token::Pound"), + Token::Dollar => lex("Token::Dollar"), + Token::Question => lex("Token::Question"), + Token::OpenDelim(dt) => { + match dt { + token::DelimToken::Paren => lex("Token::OpenDelim(DelimToken::Paren)"), + token::DelimToken::Bracket => lex("Token::OpenDelim(DelimToken::Bracket)"), + token::DelimToken::Brace => lex("Token::OpenDelim(DelimToken::Brace)"), + token::DelimToken::NoDelim => lex("DelimToken::NoDelim"), + } + } + Token::CloseDelim(dt) => { + match dt { + token::DelimToken::Paren => lex("Token::CloseDelim(DelimToken::Paren)"), + token::DelimToken::Bracket => lex("Token::CloseDelim(DelimToken::Bracket)"), + token::DelimToken::Brace => lex("Token::CloseDelim(DelimToken::Brace)"), + token::DelimToken::NoDelim => lex("DelimToken::NoDelim"), + } + } + Token::Underscore => lex("_"), + Token::Literal(lit, sfx) => emit_lit(lit, sfx), + // fix ident expansion information... somehow + Token::Ident(ident) => + lex(&format!("Token::Ident(Ident::from_str(\"{}\"))", ident.name)), + Token::Lifetime(ident) => + lex(&format!("Token::Ident(Ident::from_str(\"{}\"))", ident.name)), + _ => panic!("Unhandled case!"), + } + } + + // ____________________________________________________________________________________________ + // Conversion operators + + pub fn as_tt(t: Token) -> TokenTree { + // FIXME do something nicer with the spans + TokenTree::Token(DUMMY_SP, t) + } + + // ____________________________________________________________________________________________ + // Build Procedures + + /// Takes `input` and returns `vec![input]`. + pub fn build_vec(ts: TokenStream) -> TokenStream { + build_mac_call(Ident::from_str("vec"), ts) + // tts.clone().to_owned() + } + + /// Takes `ident` and `rhs` and produces `let ident = rhs;`. + pub fn build_let(id: Ident, tts: TokenStream) -> TokenStream { + concat(from_tokens(vec![keyword_to_token_ident(keywords::Let), + Token::Ident(id), + Token::Eq]), + concat(tts, from_tokens(vec![Token::Semi]))) + } + + /// Takes `ident ...`, and `args ...` and produces `ident::...(args ...)`. + pub fn build_mod_call(ids: Vec, args: TokenStream) -> TokenStream { + let call = from_tokens(intersperse(ids.into_iter().map(|id| Token::Ident(id)).collect(), + Token::ModSep)); + concat(call, build_paren_delimited(args)) + } + + /// Takes `ident` and `args ...` and produces `ident(args ...)`. + pub fn build_fn_call(name: Ident, args: TokenStream) -> TokenStream { + concat(from_tokens(vec![Token::Ident(name)]), build_paren_delimited(args)) + } + + /// Takes `ident` and `args ...` and produces `ident!(args ...)`. + pub fn build_mac_call(name: Ident, args: TokenStream) -> TokenStream { + concat(from_tokens(vec![Token::Ident(name), Token::Not]), + build_paren_delimited(args)) + } + + // ____________________________________________________________________________________________ + // Utilities + + /// A wrapper around `TokenStream::from_tokens` to avoid extra namespace specification and + /// provide it as a generic operator. + pub fn from_tokens(tokens: Vec) -> TokenStream { + TokenStream::from_tokens(tokens) + } + + pub fn intersperse(vs: Vec, t: T) -> Vec + where T: Clone + { + if vs.len() < 2 { + return vs; + } + let mut output = vec![vs.get(0).unwrap().to_owned()]; + + for v in vs.into_iter().skip(1) { + output.push(t.clone()); + output.push(v); + } + output + } +} diff --git a/src/libproc_macro_tokens/Cargo.toml b/src/libproc_macro_tokens/Cargo.toml new file mode 100644 index 0000000000000..2b66d56759f35 --- /dev/null +++ b/src/libproc_macro_tokens/Cargo.toml @@ -0,0 +1,13 @@ +[package] +authors = ["The Rust Project Developers"] +name = "proc_macro_tokens" +version = "0.0.0" + +[lib] +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +syntax = { path = "../libsyntax" } +syntax_pos = { path = "../libsyntax_pos" } +log = { path = "../liblog" } diff --git a/src/libproc_macro_tokens/build.rs b/src/libproc_macro_tokens/build.rs new file mode 100644 index 0000000000000..d39aba0aa7787 --- /dev/null +++ b/src/libproc_macro_tokens/build.rs @@ -0,0 +1,90 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +extern crate syntax; +extern crate syntax_pos; + +use syntax::ast::Ident; +use syntax::codemap::DUMMY_SP; +use syntax::parse::token::{self, Token}; +use syntax::symbol::keywords; +use syntax::tokenstream::{self, TokenTree, TokenStream}; +use std::rc::Rc; + +/// A wrapper around `TokenStream::concat` to avoid extra namespace specification and +/// provide TokenStream concatenation as a generic operator. +pub fn concat(ts1: TokenStream, ts2: TokenStream) -> TokenStream { + TokenStream::concat(ts1, ts2) +} + +/// Checks if two identifiers have the same name, disregarding context. This allows us to +/// fake 'reserved' keywords. +// FIXME We really want `free-identifier-=?` (a la Dybvig 1993). von Tander 2007 is +// probably the easiest way to do that. +pub fn ident_eq(tident: &TokenTree, id: Ident) -> bool { + let tid = match *tident { + TokenTree::Token(_, Token::Ident(ref id)) => id, + _ => { + return false; + } + }; + + tid.name == id.name +} + +// ____________________________________________________________________________________________ +// Conversion operators + +/// Convert a `&str` into a Token. +pub fn str_to_token_ident(s: &str) -> Token { + Token::Ident(Ident::from_str(s)) +} + +/// Converts a keyword (from `syntax::parse::token::keywords`) into a Token that +/// corresponds to it. +pub fn keyword_to_token_ident(kw: keywords::Keyword) -> Token { + Token::Ident(Ident::from_str(&kw.name().as_str()[..])) +} + +// ____________________________________________________________________________________________ +// Build Procedures + +/// Generically takes a `ts` and delimiter and returns `ts` delimited by the specified +/// delimiter. +pub fn build_delimited(ts: TokenStream, delim: token::DelimToken) -> TokenStream { + let tts = ts.to_tts(); + TokenStream::from_tts(vec![TokenTree::Delimited(DUMMY_SP, + Rc::new(tokenstream::Delimited { + delim: delim, + open_span: DUMMY_SP, + tts: tts, + close_span: DUMMY_SP, + }))]) +} + +/// Takes `ts` and returns `[ts]`. +pub fn build_bracket_delimited(ts: TokenStream) -> TokenStream { + build_delimited(ts, token::DelimToken::Bracket) +} + +/// Takes `ts` and returns `{ts}`. +pub fn build_brace_delimited(ts: TokenStream) -> TokenStream { + build_delimited(ts, token::DelimToken::Brace) +} + +/// Takes `ts` and returns `(ts)`. +pub fn build_paren_delimited(ts: TokenStream) -> TokenStream { + build_delimited(ts, token::DelimToken::Paren) +} + +/// Constructs `()`. +pub fn build_empty_args() -> TokenStream { + build_paren_delimited(TokenStream::mk_empty()) +} diff --git a/src/libproc_macro_tokens/lib.rs b/src/libproc_macro_tokens/lib.rs new file mode 100644 index 0000000000000..3bfa2fbb29fbd --- /dev/null +++ b/src/libproc_macro_tokens/lib.rs @@ -0,0 +1,66 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! # Proc_Macro +//! +//! A library for procedural macro writers. +//! +//! ## Usage +//! This crate provides the prelude (at libproc_macro_tokens::prelude), which +//! provides a number of operations: +//! - `concat`, for concatenating two TokenStreams. +//! - `ident_eq`, for checking if two identifiers are equal regardless of syntax context. +//! - `str_to_token_ident`, for converting an `&str` into a Token. +//! - `keyword_to_token_delim`, for converting a `parse::token::keywords::Keyword` into a +//! Token. +//! - `build_delimited`, for creating a new TokenStream from an existing one and a delimiter +//! by wrapping the TokenStream in the delimiter. +//! - `build_bracket_delimited`, `build_brace_delimited`, and `build_paren_delimited`, for +//! easing the above. +//! - `build_empty_args`, which returns a TokenStream containing `()`. +//! - `lex`, which takes an `&str` and returns the TokenStream it represents. +//! +//! ## TokenStreams +//! +//! TokenStreams serve as the basis of the macro system. They are, in essence, vectors of +//! TokenTrees, where indexing treats delimited values as a single term. That is, the term +//! `even(a+c) && even(b)` will be indexibly encoded as `even | (a+c) | even | (b)` where, +//! in reality, `(a+c)` is actually a decorated pointer to `a | + | c`. +//! +//! If a user has a TokenStream that is a single, delimited value, they can use +//! `maybe_delimited` to destruct it and receive the internal vector as a new TokenStream +//! as: +//! ``` +//! `(a+c)`.maybe_delimited() ~> Some(a | + | c)` +//! ``` +//! +//! Check the TokenStream documentation for more information; the structure also provides +//! cheap concatenation and slicing. +//! + +#![crate_name = "proc_macro_tokens"] +#![unstable(feature = "rustc_private", issue = "27812")] +#![crate_type = "dylib"] +#![crate_type = "rlib"] +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] + +#![feature(staged_api)] +#![feature(rustc_private)] + +extern crate syntax; +extern crate syntax_pos; +#[macro_use] extern crate log; + +pub mod build; +pub mod parse; +pub mod prelude; diff --git a/src/libproc_macro_tokens/parse.rs b/src/libproc_macro_tokens/parse.rs new file mode 100644 index 0000000000000..5ab4fcd5dab29 --- /dev/null +++ b/src/libproc_macro_tokens/parse.rs @@ -0,0 +1,26 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Parsing utilities for writing procedural macros. + +extern crate syntax; + +use syntax::parse::{ParseSess, filemap_to_tts}; +use syntax::tokenstream::TokenStream; + +/// Map a string to tts, using a made-up filename. For example, `lex("15")` will return a +/// TokenStream containing the literal 15. +pub fn lex(source_str: &str) -> TokenStream { + let ps = ParseSess::new(); + TokenStream::from_tts(filemap_to_tts(&ps, + ps.codemap().new_filemap("".to_string(), + None, + source_str.to_owned()))) +} diff --git a/src/libproc_macro_tokens/prelude.rs b/src/libproc_macro_tokens/prelude.rs new file mode 100644 index 0000000000000..4c0c8ba6c6684 --- /dev/null +++ b/src/libproc_macro_tokens/prelude.rs @@ -0,0 +1,12 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub use build::*; +pub use parse::*; diff --git a/src/librand/Cargo.toml b/src/librand/Cargo.toml new file mode 100644 index 0000000000000..86b061db05413 --- /dev/null +++ b/src/librand/Cargo.toml @@ -0,0 +1,11 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rand" +version = "0.0.0" + +[lib] +name = "rand" +path = "lib.rs" + +[dependencies] +core = { path = "../libcore" } diff --git a/src/librand/chacha.rs b/src/librand/chacha.rs index cd099c69005f3..7dc0d19e6a615 100644 --- a/src/librand/chacha.rs +++ b/src/librand/chacha.rs @@ -10,7 +10,7 @@ //! The ChaCha random number generator. -use {Rng, SeedableRng, Rand}; +use {Rand, Rng, SeedableRng}; const KEY_WORDS: usize = 8; // 8 words for the 256-bit key const STATE_WORDS: usize = 16; @@ -216,8 +216,9 @@ mod tests { let s = ::test::rng().gen_iter::().take(8).collect::>(); let mut ra: ChaChaRng = SeedableRng::from_seed(&*s); let mut rb: ChaChaRng = SeedableRng::from_seed(&*s); - assert!(ra.gen_ascii_chars().take(100) - .eq(rb.gen_ascii_chars().take(100))); + assert!(ra.gen_ascii_chars() + .take(100) + .eq(rb.gen_ascii_chars().take(100))); } #[test] @@ -225,8 +226,9 @@ mod tests { let seed: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7]; let mut ra: ChaChaRng = SeedableRng::from_seed(seed); let mut rb: ChaChaRng = SeedableRng::from_seed(seed); - assert!(ra.gen_ascii_chars().take(100) - .eq(rb.gen_ascii_chars().take(100))); + assert!(ra.gen_ascii_chars() + .take(100) + .eq(rb.gen_ascii_chars().take(100))); } #[test] @@ -251,17 +253,17 @@ mod tests { let v = (0..16).map(|_| ra.next_u32()).collect::>(); assert_eq!(v, - vec!(0xade0b876, 0x903df1a0, 0xe56a5d40, 0x28bd8653, + vec![0xade0b876, 0x903df1a0, 0xe56a5d40, 0x28bd8653, 0xb819d2bd, 0x1aed8da0, 0xccef36a8, 0xc70d778b, 0x7c5941da, 0x8d485751, 0x3fe02477, 0x374ad8b8, - 0xf4b8436a, 0x1ca11815, 0x69b687c3, 0x8665eeb2)); + 0xf4b8436a, 0x1ca11815, 0x69b687c3, 0x8665eeb2]); let v = (0..16).map(|_| ra.next_u32()).collect::>(); assert_eq!(v, - vec!(0xbee7079f, 0x7a385155, 0x7c97ba98, 0x0d082d73, + vec![0xbee7079f, 0x7a385155, 0x7c97ba98, 0x0d082d73, 0xa0290fcb, 0x6965e348, 0x3e53c612, 0xed7aee32, 0x7621b729, 0x434ee69c, 0xb03371d5, 0xd539d874, - 0x281fed31, 0x45fb0a51, 0x1f0ae1ac, 0x6f4d794b)); + 0x281fed31, 0x45fb0a51, 0x1f0ae1ac, 0x6f4d794b]); let seed: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7]; @@ -278,10 +280,10 @@ mod tests { } assert_eq!(v, - vec!(0xf225c81a, 0x6ab1be57, 0x04d42951, 0x70858036, + vec![0xf225c81a, 0x6ab1be57, 0x04d42951, 0x70858036, 0x49884684, 0x64efec72, 0x4be2d186, 0x3615b384, 0x11cfa18e, 0xd3c50049, 0x75c775f6, 0x434c6530, - 0x2c5bad8f, 0x898881dc, 0x5f1c86d9, 0xc1f8e7f4)); + 0x2c5bad8f, 0x898881dc, 0x5f1c86d9, 0xc1f8e7f4]); } #[test] diff --git a/src/librand/distributions/exponential.rs b/src/librand/distributions/exponential.rs index f02b945178fb9..5a8558efc0244 100644 --- a/src/librand/distributions/exponential.rs +++ b/src/librand/distributions/exponential.rs @@ -10,10 +10,11 @@ //! The exponential distribution. +#[cfg(not(test))] // only necessary for no_std use FloatMath; -use {Rng, Rand}; -use distributions::{ziggurat, ziggurat_tables, Sample, IndependentSample}; +use {Rand, Rng}; +use distributions::{IndependentSample, Sample, ziggurat, ziggurat_tables}; /// A wrapper around an `f64` to generate Exp(1) random numbers. /// @@ -87,7 +88,7 @@ impl IndependentSample for Exp { #[cfg(test)] mod tests { - use distributions::{Sample, IndependentSample}; + use distributions::{IndependentSample, Sample}; use super::Exp; #[test] diff --git a/src/librand/distributions/gamma.rs b/src/librand/distributions/gamma.rs index 8cd7ac06f991b..9ca13e85b5333 100644 --- a/src/librand/distributions/gamma.rs +++ b/src/librand/distributions/gamma.rs @@ -13,11 +13,12 @@ use self::GammaRepr::*; use self::ChiSquaredRepr::*; +#[cfg(not(test))] // only necessary for no_std use FloatMath; -use {Rng, Open01}; +use {Open01, Rng}; use super::normal::StandardNormal; -use super::{IndependentSample, Sample, Exp}; +use super::{Exp, IndependentSample, Sample}; /// The Gamma distribution `Gamma(shape, scale)` distribution. /// @@ -290,8 +291,8 @@ impl IndependentSample for StudentT { #[cfg(test)] mod tests { - use distributions::{Sample, IndependentSample}; - use super::{ChiSquared, StudentT, FisherF}; + use distributions::{IndependentSample, Sample}; + use super::{ChiSquared, FisherF, StudentT}; #[test] fn test_chi_squared_one() { diff --git a/src/librand/distributions/mod.rs b/src/librand/distributions/mod.rs index a54c8df2352ac..41175c81df891 100644 --- a/src/librand/distributions/mod.rs +++ b/src/librand/distributions/mod.rs @@ -17,14 +17,16 @@ //! internally. The `IndependentSample` trait is for generating values //! that do not need to record state. +#[cfg(not(test))] // only necessary for no_std use core::num::Float; + use core::marker::PhantomData; -use {Rng, Rand}; +use {Rand, Rng}; pub use self::range::Range; -pub use self::gamma::{Gamma, ChiSquared, FisherF, StudentT}; -pub use self::normal::{Normal, LogNormal}; +pub use self::gamma::{ChiSquared, FisherF, Gamma, StudentT}; +pub use self::normal::{LogNormal, Normal}; pub use self::exponential::Exp; pub mod range; @@ -235,18 +237,10 @@ fn ziggurat(rng: &mut R, // u is either U(-1, 1) or U(0, 1) depending on if this is a // symmetric distribution or not. - let u = if symmetric { - 2.0 * f - 1.0 - } else { - f - }; + let u = if symmetric { 2.0 * f - 1.0 } else { f }; let x = u * x_tab[i]; - let test_x = if symmetric { - x.abs() - } else { - x - }; + let test_x = if symmetric { x.abs() } else { x }; // algebraically equivalent to |u| < x_tab[i+1]/x_tab[i] (or u < x_tab[i+1]/x_tab[i]) if test_x < x_tab[i + 1] { @@ -264,8 +258,8 @@ fn ziggurat(rng: &mut R, #[cfg(test)] mod tests { - use {Rng, Rand}; - use super::{RandSample, WeightedChoice, Weighted, Sample, IndependentSample}; + use {Rand, Rng}; + use super::{IndependentSample, RandSample, Sample, Weighted, WeightedChoice}; #[derive(PartialEq, Debug)] struct ConstRand(usize); @@ -318,37 +312,37 @@ mod tests { }} } - t!(vec!(Weighted { weight: 1, item: 10 }), + t!(vec![Weighted { weight: 1, item: 10 }], [10]); // skip some - t!(vec!(Weighted { weight: 0, item: 20 }, + t!(vec![Weighted { weight: 0, item: 20 }, Weighted { weight: 2, item: 21 }, Weighted { weight: 0, item: 22 }, - Weighted { weight: 1, item: 23 }), + Weighted { weight: 1, item: 23 }], [21, 21, 23]); // different weights - t!(vec!(Weighted { weight: 4, item: 30 }, - Weighted { weight: 3, item: 31 }), + t!(vec![Weighted { weight: 4, item: 30 }, + Weighted { weight: 3, item: 31 }], [30, 30, 30, 30, 31, 31, 31]); // check that we're binary searching // correctly with some vectors of odd // length. - t!(vec!(Weighted { weight: 1, item: 40 }, + t!(vec![Weighted { weight: 1, item: 40 }, Weighted { weight: 1, item: 41 }, Weighted { weight: 1, item: 42 }, Weighted { weight: 1, item: 43 }, - Weighted { weight: 1, item: 44 }), + Weighted { weight: 1, item: 44 }], [40, 41, 42, 43, 44]); - t!(vec!(Weighted { weight: 1, item: 50 }, + t!(vec![Weighted { weight: 1, item: 50 }, Weighted { weight: 1, item: 51 }, Weighted { weight: 1, item: 52 }, Weighted { weight: 1, item: 53 }, Weighted { weight: 1, item: 54 }, Weighted { weight: 1, item: 55 }, - Weighted { weight: 1, item: 56 }), + Weighted { weight: 1, item: 56 }], [50, 51, 52, 53, 54, 55, 56]); } diff --git a/src/librand/distributions/normal.rs b/src/librand/distributions/normal.rs index b2ccc5eb6095b..811d5b14c7112 100644 --- a/src/librand/distributions/normal.rs +++ b/src/librand/distributions/normal.rs @@ -10,10 +10,11 @@ //! The normal and derived distributions. +#[cfg(not(test))] // only necessary for no_std use FloatMath; -use {Rng, Rand, Open01}; -use distributions::{ziggurat, ziggurat_tables, Sample, IndependentSample}; +use {Open01, Rand, Rng}; +use distributions::{IndependentSample, Sample, ziggurat, ziggurat_tables}; /// A wrapper around an `f64` to generate N(0, 1) random numbers /// (a.k.a. a standard normal, or Gaussian). @@ -144,8 +145,8 @@ impl IndependentSample for LogNormal { #[cfg(test)] mod tests { - use distributions::{Sample, IndependentSample}; - use super::{Normal, LogNormal}; + use distributions::{IndependentSample, Sample}; + use super::{LogNormal, Normal}; #[test] fn test_normal() { diff --git a/src/librand/distributions/range.rs b/src/librand/distributions/range.rs index f94ef059dae8f..ba8554a979b89 100644 --- a/src/librand/distributions/range.rs +++ b/src/librand/distributions/range.rs @@ -14,7 +14,7 @@ use core::marker::Sized; use Rng; -use distributions::{Sample, IndependentSample}; +use distributions::{IndependentSample, Sample}; /// Sample values uniformly between two bounds. /// @@ -148,7 +148,7 @@ float_impl! { f64 } #[cfg(test)] mod tests { - use distributions::{Sample, IndependentSample}; + use distributions::{IndependentSample, Sample}; use super::Range; #[should_panic] diff --git a/src/librand/isaac.rs b/src/librand/isaac.rs index 28eff87bde3b7..69d5015f18140 100644 --- a/src/librand/isaac.rs +++ b/src/librand/isaac.rs @@ -16,7 +16,7 @@ use core::slice; use core::iter::repeat; use core::num::Wrapping as w; -use {Rng, SeedableRng, Rand}; +use {Rand, Rng, SeedableRng}; type w32 = w; type w64 = w; @@ -591,23 +591,25 @@ mod tests { use std::prelude::v1::*; use {Rng, SeedableRng}; - use super::{IsaacRng, Isaac64Rng}; + use super::{Isaac64Rng, IsaacRng}; #[test] fn test_rng_32_rand_seeded() { let s = ::test::rng().gen_iter::().take(256).collect::>(); let mut ra: IsaacRng = SeedableRng::from_seed(&s[..]); let mut rb: IsaacRng = SeedableRng::from_seed(&s[..]); - assert!(ra.gen_ascii_chars().take(100) - .eq(rb.gen_ascii_chars().take(100))); + assert!(ra.gen_ascii_chars() + .take(100) + .eq(rb.gen_ascii_chars().take(100))); } #[test] fn test_rng_64_rand_seeded() { let s = ::test::rng().gen_iter::().take(256).collect::>(); let mut ra: Isaac64Rng = SeedableRng::from_seed(&s[..]); let mut rb: Isaac64Rng = SeedableRng::from_seed(&s[..]); - assert!(ra.gen_ascii_chars().take(100) - .eq(rb.gen_ascii_chars().take(100))); + assert!(ra.gen_ascii_chars() + .take(100) + .eq(rb.gen_ascii_chars().take(100))); } #[test] @@ -615,16 +617,18 @@ mod tests { let seed: &[_] = &[1, 23, 456, 7890, 12345]; let mut ra: IsaacRng = SeedableRng::from_seed(seed); let mut rb: IsaacRng = SeedableRng::from_seed(seed); - assert!(ra.gen_ascii_chars().take(100) - .eq(rb.gen_ascii_chars().take(100))); + assert!(ra.gen_ascii_chars() + .take(100) + .eq(rb.gen_ascii_chars().take(100))); } #[test] fn test_rng_64_seeded() { let seed: &[_] = &[1, 23, 456, 7890, 12345]; let mut ra: Isaac64Rng = SeedableRng::from_seed(seed); let mut rb: Isaac64Rng = SeedableRng::from_seed(seed); - assert!(ra.gen_ascii_chars().take(100) - .eq(rb.gen_ascii_chars().take(100))); + assert!(ra.gen_ascii_chars() + .take(100) + .eq(rb.gen_ascii_chars().take(100))); } #[test] @@ -658,8 +662,8 @@ mod tests { // Regression test that isaac is actually using the above vector let v = (0..10).map(|_| ra.next_u32()).collect::>(); assert_eq!(v, - vec!(2558573138, 873787463, 263499565, 2103644246, 3595684709, - 4203127393, 264982119, 2765226902, 2737944514, 3900253796)); + vec![2558573138, 873787463, 263499565, 2103644246, 3595684709, + 4203127393, 264982119, 2765226902, 2737944514, 3900253796]); let seed: &[_] = &[12345, 67890, 54321, 9876]; let mut rb: IsaacRng = SeedableRng::from_seed(seed); @@ -670,8 +674,8 @@ mod tests { let v = (0..10).map(|_| rb.next_u32()).collect::>(); assert_eq!(v, - vec!(3676831399, 3183332890, 2834741178, 3854698763, 2717568474, - 1576568959, 3507990155, 179069555, 141456972, 2478885421)); + vec![3676831399, 3183332890, 2834741178, 3854698763, 2717568474, + 1576568959, 3507990155, 179069555, 141456972, 2478885421]); } #[test] #[rustfmt_skip] @@ -681,10 +685,10 @@ mod tests { // Regression test that isaac is actually using the above vector let v = (0..10).map(|_| ra.next_u64()).collect::>(); assert_eq!(v, - vec!(547121783600835980, 14377643087320773276, 17351601304698403469, + vec![547121783600835980, 14377643087320773276, 17351601304698403469, 1238879483818134882, 11952566807690396487, 13970131091560099343, 4469761996653280935, 15552757044682284409, 6860251611068737823, - 13722198873481261842)); + 13722198873481261842]); let seed: &[_] = &[12345, 67890, 54321, 9876]; let mut rb: Isaac64Rng = SeedableRng::from_seed(seed); @@ -695,10 +699,10 @@ mod tests { let v = (0..10).map(|_| rb.next_u64()).collect::>(); assert_eq!(v, - vec!(18143823860592706164, 8491801882678285927, 2699425367717515619, + vec![18143823860592706164, 8491801882678285927, 2699425367717515619, 17196852593171130876, 2606123525235546165, 15790932315217671084, 596345674630742204, 9947027391921273664, 11788097613744130851, - 10391409374914919106)); + 10391409374914919106]); } diff --git a/src/librand/lib.rs b/src/librand/lib.rs index 06f4c8dfd20a8..c31a0ed53207d 100644 --- a/src/librand/lib.rs +++ b/src/librand/lib.rs @@ -23,37 +23,34 @@ html_root_url = "https://doc.rust-lang.org/nightly/", html_playground_url = "https://play.rust-lang.org/", test(attr(deny(warnings))))] +#![cfg_attr(not(stage0), deny(warnings))] #![no_std] #![unstable(feature = "rand", reason = "use `rand` from crates.io", issue = "27703")] -#![feature(core_float)] #![feature(core_intrinsics)] -#![feature(num_bits_bytes)] #![feature(staged_api)] #![feature(step_by)] #![feature(custom_attribute)] #![allow(unused_attributes)] -#![cfg_attr(test, feature(test, rand, rustc_private, iter_order_deprecated))] +#![cfg_attr(not(test), feature(core_float))] // only necessary for no_std +#![cfg_attr(test, feature(test, rand))] #![allow(deprecated)] #[cfg(test)] #[macro_use] extern crate std; -#[cfg(test)] -#[macro_use] -extern crate log; use core::f64; use core::intrinsics; use core::marker::PhantomData; -pub use isaac::{IsaacRng, Isaac64Rng}; +pub use isaac::{Isaac64Rng, IsaacRng}; pub use chacha::ChaChaRng; -use distributions::{Range, IndependentSample}; +use distributions::{IndependentSample, Range}; use distributions::range::SampleRange; #[cfg(test)] @@ -69,7 +66,8 @@ mod rand_impls; // needed by librand; this is necessary because librand doesn't // depend on libstd. This will go away when librand is integrated // into libstd. -trait FloatMath : Sized { +#[doc(hidden)] +trait FloatMath: Sized { fn exp(self) -> Self; fn ln(self) -> Self; fn sqrt(self) -> Self; @@ -104,14 +102,14 @@ impl FloatMath for f64 { /// A type that can be randomly generated using an `Rng`. #[doc(hidden)] -pub trait Rand : Sized { +pub trait Rand: Sized { /// Generates a random instance of this type using the specified source of /// randomness. fn rand(rng: &mut R) -> Self; } /// A random number generator. -pub trait Rng : Sized { +pub trait Rng: Sized { /// Return the next random u32. /// /// This rarely needs to be called directly, prefer `r.gen()` to diff --git a/src/librand/rand_impls.rs b/src/librand/rand_impls.rs index 34b7f37a6788f..b0d824da3ab47 100644 --- a/src/librand/rand_impls.rs +++ b/src/librand/rand_impls.rs @@ -11,15 +11,14 @@ //! The implementations of `Rand` for the built-in types. use core::char; -use core::isize; -use core::usize; +use core::mem; use {Rand, Rng}; impl Rand for isize { #[inline] fn rand(rng: &mut R) -> isize { - if isize::BITS == 32 { + if mem::size_of::() == 4 { rng.gen::() as isize } else { rng.gen::() as isize @@ -58,7 +57,7 @@ impl Rand for i64 { impl Rand for usize { #[inline] fn rand(rng: &mut R) -> usize { - if usize::BITS == 32 { + if mem::size_of::() == 4 { rng.gen::() as usize } else { rng.gen::() as usize @@ -145,9 +144,8 @@ impl Rand for char { // Rejection sampling. About 0.2% of numbers with at most // 21-bits are invalid codepoints (surrogates), so this // will succeed first go almost every time. - match char::from_u32(rng.next_u32() & CHAR_MASK) { - Some(c) => return c, - None => {} + if let Some(c) = char::from_u32(rng.next_u32() & CHAR_MASK) { + return c; } } } @@ -205,10 +203,6 @@ tuple_impl!{A, B, C, D, E, F, G, H, I, J, K, L} impl Rand for Option { #[inline] fn rand(rng: &mut R) -> Option { - if rng.gen() { - Some(rng.gen()) - } else { - None - } + if rng.gen() { Some(rng.gen()) } else { None } } } diff --git a/src/librand/reseeding.rs b/src/librand/reseeding.rs index db5e0213726d9..b8a65842e2ff5 100644 --- a/src/librand/reseeding.rs +++ b/src/librand/reseeding.rs @@ -83,8 +83,8 @@ impl, Rsdr: Reseeder + Default> self.bytes_generated = 0; } - /// Create a new `ReseedingRng` from the given reseeder and - /// seed. This uses a default value for `generation_threshold`. +/// Create a new `ReseedingRng` from the given reseeder and +/// seed. This uses a default value for `generation_threshold`. fn from_seed((rsdr, seed): (Rsdr, S)) -> ReseedingRng { ReseedingRng { rng: SeedableRng::from_seed(seed), @@ -113,6 +113,7 @@ impl Reseeder for ReseedWithDefault { } #[stable(feature = "rust1", since = "1.0.0")] impl Default for ReseedWithDefault { + /// Creates an instance of `ReseedWithDefault`. fn default() -> ReseedWithDefault { ReseedWithDefault } @@ -122,8 +123,8 @@ impl Default for ReseedWithDefault { mod tests { use std::prelude::v1::*; - use super::{ReseedingRng, ReseedWithDefault}; - use {SeedableRng, Rng}; + use super::{ReseedWithDefault, ReseedingRng}; + use {Rng, SeedableRng}; struct Counter { i: u32, @@ -137,6 +138,7 @@ mod tests { } } impl Default for Counter { + /// Constructs a `Counter` with initial value zero. fn default() -> Counter { Counter { i: 0 } } @@ -166,8 +168,9 @@ mod tests { fn test_rng_seeded() { let mut ra: MyRng = SeedableRng::from_seed((ReseedWithDefault, 2)); let mut rb: MyRng = SeedableRng::from_seed((ReseedWithDefault, 2)); - assert!(ra.gen_ascii_chars().take(100) - .eq(rb.gen_ascii_chars().take(100))); + assert!(ra.gen_ascii_chars() + .take(100) + .eq(rb.gen_ascii_chars().take(100))); } #[test] diff --git a/src/librbml/lib.rs b/src/librbml/lib.rs deleted file mode 100644 index fe15f1c53f992..0000000000000 --- a/src/librbml/lib.rs +++ /dev/null @@ -1,1535 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Really Bad Markup Language (rbml) is an internal serialization format of rustc. -//! This is not intended to be used by users. -//! -//! Originally based on the Extensible Binary Markup Language -//! (ebml; http://www.matroska.org/technical/specs/rfc/index.html), -//! it is now a separate format tuned for the rust object metadata. -//! -//! # Encoding -//! -//! RBML document consists of the tag, length and data. -//! The encoded data can contain multiple RBML documents concatenated. -//! -//! **Tags** are a hint for the following data. -//! Tags are a number from 0x000 to 0xfff, where 0xf0 through 0xff is reserved. -//! Tags less than 0xf0 are encoded in one literal byte. -//! Tags greater than 0xff are encoded in two big-endian bytes, -//! where the tag number is ORed with 0xf000. (E.g. tag 0x123 = `f1 23`) -//! -//! **Lengths** encode the length of the following data. -//! It is a variable-length unsigned isize, and one of the following forms: -//! -//! - `80` through `fe` for lengths up to 0x7e; -//! - `40 ff` through `7f ff` for lengths up to 0x3fff; -//! - `20 40 00` through `3f ff ff` for lengths up to 0x1fffff; -//! - `10 20 00 00` through `1f ff ff ff` for lengths up to 0xfffffff. -//! -//! The "overlong" form is allowed so that the length can be encoded -//! without the prior knowledge of the encoded data. -//! For example, the length 0 can be represented either by `80`, `40 00`, -//! `20 00 00` or `10 00 00 00`. -//! The encoder tries to minimize the length if possible. -//! Also, some predefined tags listed below are so commonly used that -//! their lengths are omitted ("implicit length"). -//! -//! **Data** can be either binary bytes or zero or more nested RBML documents. -//! Nested documents cannot overflow, and should be entirely contained -//! within a parent document. -//! -//! # Predefined Tags -//! -//! Most RBML tags are defined by the application. -//! (For the rust object metadata, see also `rustc::metadata::common`.) -//! RBML itself does define a set of predefined tags however, -//! intended for the auto-serialization implementation. -//! -//! Predefined tags with an implicit length: -//! -//! - `U8` (`00`): 1-byte unsigned integer. -//! - `U16` (`01`): 2-byte big endian unsigned integer. -//! - `U32` (`02`): 4-byte big endian unsigned integer. -//! - `U64` (`03`): 8-byte big endian unsigned integer. -//! Any of `U*` tags can be used to encode primitive unsigned integer types, -//! as long as it is no greater than the actual size. -//! For example, `u8` can only be represented via the `U8` tag. -//! -//! - `I8` (`04`): 1-byte signed integer. -//! - `I16` (`05`): 2-byte big endian signed integer. -//! - `I32` (`06`): 4-byte big endian signed integer. -//! - `I64` (`07`): 8-byte big endian signed integer. -//! Similar to `U*` tags. Always uses two's complement encoding. -//! -//! - `Bool` (`08`): 1-byte boolean value, `00` for false and `01` for true. -//! -//! - `Char` (`09`): 4-byte big endian Unicode scalar value. -//! Surrogate pairs or out-of-bound values are invalid. -//! -//! - `F32` (`0a`): 4-byte big endian unsigned integer representing -//! IEEE 754 binary32 floating-point format. -//! - `F64` (`0b`): 8-byte big endian unsigned integer representing -//! IEEE 754 binary64 floating-point format. -//! -//! - `Sub8` (`0c`): 1-byte unsigned integer for supplementary information. -//! - `Sub32` (`0d`): 4-byte unsigned integer for supplementary information. -//! Those two tags normally occur as the first subdocument of certain tags, -//! namely `Enum`, `Vec` and `Map`, to provide a variant or size information. -//! They can be used interchangeably. -//! -//! Predefined tags with an explicit length: -//! -//! - `Str` (`10`): A UTF-8-encoded string. -//! -//! - `Enum` (`11`): An enum. -//! The first subdocument should be `Sub*` tags with a variant ID. -//! Subsequent subdocuments, if any, encode variant arguments. -//! -//! - `Vec` (`12`): A vector (sequence). -//! - `VecElt` (`13`): A vector element. -//! The first subdocument should be `Sub*` tags with the number of elements. -//! Subsequent subdocuments should be `VecElt` tag per each element. -//! -//! - `Map` (`14`): A map (associated array). -//! - `MapKey` (`15`): A key part of the map entry. -//! - `MapVal` (`16`): A value part of the map entry. -//! The first subdocument should be `Sub*` tags with the number of entries. -//! Subsequent subdocuments should be an alternating sequence of -//! `MapKey` and `MapVal` tags per each entry. -//! -//! - `Opaque` (`17`): An opaque, custom-format tag. -//! Used to wrap ordinary custom tags or data in the auto-serialized context. -//! Rustc typically uses this to encode type informations. -//! -//! First 0x20 tags are reserved by RBML; custom tags start at 0x20. - -#![crate_name = "rbml"] -#![unstable(feature = "rustc_private", issue = "27812")] -#![crate_type = "rlib"] -#![crate_type = "dylib"] -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/", - html_playground_url = "https://play.rust-lang.org/", - test(attr(deny(warnings))))] - -#![feature(rustc_private)] -#![feature(staged_api)] - -#![cfg_attr(test, feature(test))] - -extern crate serialize; - -#[cfg(test)] -extern crate serialize as rustc_serialize; // Used by RustcEncodable - -#[macro_use] extern crate log; - -#[cfg(test)] extern crate test; - -pub mod opaque; -pub mod leb128; - -pub use self::EbmlEncoderTag::*; -pub use self::Error::*; - -use std::str; -use std::fmt; - -/// Common data structures -#[derive(Clone, Copy)] -pub struct Doc<'a> { - pub data: &'a [u8], - pub start: usize, - pub end: usize, -} - -impl<'doc> Doc<'doc> { - pub fn new(data: &'doc [u8]) -> Doc<'doc> { - Doc { data: data, start: 0, end: data.len() } - } - - pub fn get<'a>(&'a self, tag: usize) -> Doc<'a> { - reader::get_doc(*self, tag) - } - - pub fn is_empty(&self) -> bool { - self.start == self.end - } - - pub fn as_str_slice<'a>(&'a self) -> &'a str { - str::from_utf8(&self.data[self.start..self.end]).unwrap() - } - - pub fn as_str(&self) -> String { - self.as_str_slice().to_string() - } -} - -pub struct TaggedDoc<'a> { - tag: usize, - pub doc: Doc<'a>, -} - -#[derive(Copy, Clone, Debug)] -pub enum EbmlEncoderTag { - // tags 00..1f are reserved for auto-serialization. - // first NUM_IMPLICIT_TAGS tags are implicitly sized and lengths are not encoded. - - EsU8 = 0x00, // + 1 byte - EsU16 = 0x01, // + 2 bytes - EsU32 = 0x02, // + 4 bytes - EsU64 = 0x03, // + 8 bytes - EsI8 = 0x04, // + 1 byte - EsI16 = 0x05, // + 2 bytes - EsI32 = 0x06, // + 4 bytes - EsI64 = 0x07, // + 8 bytes - EsBool = 0x08, // + 1 byte - EsChar = 0x09, // + 4 bytes - EsF32 = 0x0a, // + 4 bytes - EsF64 = 0x0b, // + 8 bytes - EsSub8 = 0x0c, // + 1 byte - EsSub32 = 0x0d, // + 4 bytes - // 0x0e and 0x0f are reserved - - EsStr = 0x10, - EsEnum = 0x11, // encodes the variant id as the first EsSub* - EsVec = 0x12, // encodes the # of elements as the first EsSub* - EsVecElt = 0x13, - EsMap = 0x14, // encodes the # of pairs as the first EsSub* - EsMapKey = 0x15, - EsMapVal = 0x16, - EsOpaque = 0x17, -} - -const NUM_TAGS: usize = 0x1000; -const NUM_IMPLICIT_TAGS: usize = 0x0e; - -static TAG_IMPLICIT_LEN: [i8; NUM_IMPLICIT_TAGS] = [ - 1, 2, 4, 8, // EsU* - 1, 2, 4, 8, // ESI* - 1, // EsBool - 4, // EsChar - 4, 8, // EsF* - 1, 4, // EsSub* -]; - -#[derive(Debug)] -pub enum Error { - IntTooBig(usize), - InvalidTag(usize), - Expected(String), - IoError(std::io::Error), - ApplicationError(String) -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // FIXME: this should be a more useful display form - fmt::Debug::fmt(self, f) - } -} -// -------------------------------------- - -pub mod reader { - use std::char; - - use std::isize; - use std::mem::transmute; - - use serialize; - - use super::opaque; - use super::{ ApplicationError, EsVec, EsMap, EsEnum, EsSub8, EsSub32, - EsVecElt, EsMapKey, EsU64, EsU32, EsU16, EsU8, EsI64, - EsI32, EsI16, EsI8, EsBool, EsF64, EsF32, EsChar, EsStr, EsMapVal, - EsOpaque, EbmlEncoderTag, Doc, TaggedDoc, - Error, IntTooBig, InvalidTag, Expected, NUM_IMPLICIT_TAGS, TAG_IMPLICIT_LEN }; - - pub type DecodeResult = Result; - // rbml reading - - macro_rules! try_or { - ($e:expr, $r:expr) => ( - match $e { - Ok(e) => e, - Err(e) => { - debug!("ignored error: {:?}", e); - return $r - } - } - ) - } - - #[derive(Copy, Clone)] - pub struct Res { - pub val: usize, - pub next: usize - } - - pub fn tag_at(data: &[u8], start: usize) -> DecodeResult { - let v = data[start] as usize; - if v < 0xf0 { - Ok(Res { val: v, next: start + 1 }) - } else if v > 0xf0 { - Ok(Res { val: ((v & 0xf) << 8) | data[start + 1] as usize, next: start + 2 }) - } else { - // every tag starting with byte 0xf0 is an overlong form, which is prohibited. - Err(InvalidTag(v)) - } - } - - #[inline(never)] - fn vuint_at_slow(data: &[u8], start: usize) -> DecodeResult { - let a = data[start]; - if a & 0x80 != 0 { - return Ok(Res {val: (a & 0x7f) as usize, next: start + 1}); - } - if a & 0x40 != 0 { - return Ok(Res {val: ((a & 0x3f) as usize) << 8 | - (data[start + 1] as usize), - next: start + 2}); - } - if a & 0x20 != 0 { - return Ok(Res {val: ((a & 0x1f) as usize) << 16 | - (data[start + 1] as usize) << 8 | - (data[start + 2] as usize), - next: start + 3}); - } - if a & 0x10 != 0 { - return Ok(Res {val: ((a & 0x0f) as usize) << 24 | - (data[start + 1] as usize) << 16 | - (data[start + 2] as usize) << 8 | - (data[start + 3] as usize), - next: start + 4}); - } - Err(IntTooBig(a as usize)) - } - - pub fn vuint_at(data: &[u8], start: usize) -> DecodeResult { - if data.len() - start < 4 { - return vuint_at_slow(data, start); - } - - // Lookup table for parsing EBML Element IDs as per - // http://ebml.sourceforge.net/specs/ The Element IDs are parsed by - // reading a big endian u32 positioned at data[start]. Using the four - // most significant bits of the u32 we lookup in the table below how - // the element ID should be derived from it. - // - // The table stores tuples (shift, mask) where shift is the number the - // u32 should be right shifted with and mask is the value the right - // shifted value should be masked with. If for example the most - // significant bit is set this means it's a class A ID and the u32 - // should be right shifted with 24 and masked with 0x7f. Therefore we - // store (24, 0x7f) at index 0x8 - 0xF (four bit numbers where the most - // significant bit is set). - // - // By storing the number of shifts and masks in a table instead of - // checking in order if the most significant bit is set, the second - // most significant bit is set etc. we can replace up to three - // "and+branch" with a single table lookup which gives us a measured - // speedup of around 2x on x86_64. - static SHIFT_MASK_TABLE: [(usize, u32); 16] = [ - (0, 0x0), (0, 0x0fffffff), - (8, 0x1fffff), (8, 0x1fffff), - (16, 0x3fff), (16, 0x3fff), (16, 0x3fff), (16, 0x3fff), - (24, 0x7f), (24, 0x7f), (24, 0x7f), (24, 0x7f), - (24, 0x7f), (24, 0x7f), (24, 0x7f), (24, 0x7f) - ]; - - unsafe { - let ptr = data.as_ptr().offset(start as isize) as *const u32; - let val = u32::from_be(*ptr); - - let i = (val >> 28) as usize; - let (shift, mask) = SHIFT_MASK_TABLE[i]; - Ok(Res { - val: ((val >> shift) & mask) as usize, - next: start + ((32 - shift) >> 3), - }) - } - } - - pub fn tag_len_at(data: &[u8], tag: Res) -> DecodeResult { - if tag.val < NUM_IMPLICIT_TAGS && TAG_IMPLICIT_LEN[tag.val] >= 0 { - Ok(Res { val: TAG_IMPLICIT_LEN[tag.val] as usize, next: tag.next }) - } else { - vuint_at(data, tag.next) - } - } - - pub fn doc_at<'a>(data: &'a [u8], start: usize) -> DecodeResult> { - let elt_tag = try!(tag_at(data, start)); - let elt_size = try!(tag_len_at(data, elt_tag)); - let end = elt_size.next + elt_size.val; - Ok(TaggedDoc { - tag: elt_tag.val, - doc: Doc { data: data, start: elt_size.next, end: end } - }) - } - - pub fn maybe_get_doc<'a>(d: Doc<'a>, tg: usize) -> Option> { - let mut pos = d.start; - while pos < d.end { - let elt_tag = try_or!(tag_at(d.data, pos), None); - let elt_size = try_or!(tag_len_at(d.data, elt_tag), None); - pos = elt_size.next + elt_size.val; - if elt_tag.val == tg { - return Some(Doc { data: d.data, start: elt_size.next, - end: pos }); - } - } - None - } - - pub fn get_doc<'a>(d: Doc<'a>, tg: usize) -> Doc<'a> { - match maybe_get_doc(d, tg) { - Some(d) => d, - None => { - error!("failed to find block with tag {:?}", tg); - panic!(); - } - } - } - - pub fn docs<'a>(d: Doc<'a>) -> DocsIterator<'a> { - DocsIterator { - d: d - } - } - - pub struct DocsIterator<'a> { - d: Doc<'a>, - } - - impl<'a> Iterator for DocsIterator<'a> { - type Item = (usize, Doc<'a>); - - fn next(&mut self) -> Option<(usize, Doc<'a>)> { - if self.d.start >= self.d.end { - return None; - } - - let elt_tag = try_or!(tag_at(self.d.data, self.d.start), { - self.d.start = self.d.end; - None - }); - let elt_size = try_or!(tag_len_at(self.d.data, elt_tag), { - self.d.start = self.d.end; - None - }); - - let end = elt_size.next + elt_size.val; - let doc = Doc { - data: self.d.data, - start: elt_size.next, - end: end, - }; - - self.d.start = end; - return Some((elt_tag.val, doc)); - } - } - - pub fn tagged_docs<'a>(d: Doc<'a>, tag: usize) -> TaggedDocsIterator<'a> { - TaggedDocsIterator { - iter: docs(d), - tag: tag, - } - } - - pub struct TaggedDocsIterator<'a> { - iter: DocsIterator<'a>, - tag: usize, - } - - impl<'a> Iterator for TaggedDocsIterator<'a> { - type Item = Doc<'a>; - - fn next(&mut self) -> Option> { - while let Some((tag, doc)) = self.iter.next() { - if tag == self.tag { - return Some(doc); - } - } - None - } - } - - pub fn with_doc_data(d: Doc, f: F) -> T where - F: FnOnce(&[u8]) -> T, - { - f(&d.data[d.start..d.end]) - } - - pub fn doc_as_u8(d: Doc) -> u8 { - assert_eq!(d.end, d.start + 1); - d.data[d.start] - } - - pub fn doc_as_u64(d: Doc) -> u64 { - if d.end >= 8 { - // For performance, we read 8 big-endian bytes, - // and mask off the junk if there is any. This - // obviously won't work on the first 8 bytes - // of a file - we will fall of the start - // of the page and segfault. - - let mut b = [0; 8]; - b.clone_from_slice(&d.data[d.end-8..d.end]); - let data = unsafe { (*(b.as_ptr() as *const u64)).to_be() }; - let len = d.end - d.start; - if len < 8 { - data & ((1<<(len*8))-1) - } else { - data - } - } else { - let mut result = 0; - for b in &d.data[d.start..d.end] { - result = (result<<8) + (*b as u64); - } - result - } - } - - #[inline] pub fn doc_as_u16(d: Doc) -> u16 { doc_as_u64(d) as u16 } - #[inline] pub fn doc_as_u32(d: Doc) -> u32 { doc_as_u64(d) as u32 } - - #[inline] pub fn doc_as_i8(d: Doc) -> i8 { doc_as_u8(d) as i8 } - #[inline] pub fn doc_as_i16(d: Doc) -> i16 { doc_as_u16(d) as i16 } - #[inline] pub fn doc_as_i32(d: Doc) -> i32 { doc_as_u32(d) as i32 } - #[inline] pub fn doc_as_i64(d: Doc) -> i64 { doc_as_u64(d) as i64 } - - pub struct Decoder<'a> { - parent: Doc<'a>, - pos: usize, - } - - impl<'doc> Decoder<'doc> { - pub fn new(d: Doc<'doc>) -> Decoder<'doc> { - Decoder { - parent: d, - pos: d.start - } - } - - fn next_doc(&mut self, exp_tag: EbmlEncoderTag) -> DecodeResult> { - debug!(". next_doc(exp_tag={:?})", exp_tag); - if self.pos >= self.parent.end { - return Err(Expected(format!("no more documents in \ - current node!"))); - } - let TaggedDoc { tag: r_tag, doc: r_doc } = - try!(doc_at(self.parent.data, self.pos)); - debug!("self.parent={:?}-{:?} self.pos={:?} r_tag={:?} r_doc={:?}-{:?}", - self.parent.start, - self.parent.end, - self.pos, - r_tag, - r_doc.start, - r_doc.end); - if r_tag != (exp_tag as usize) { - return Err(Expected(format!("expected EBML doc with tag {:?} but \ - found tag {:?}", exp_tag, r_tag))); - } - if r_doc.end > self.parent.end { - return Err(Expected(format!("invalid EBML, child extends to \ - {:#x}, parent to {:#x}", - r_doc.end, self.parent.end))); - } - self.pos = r_doc.end; - Ok(r_doc) - } - - fn push_doc(&mut self, exp_tag: EbmlEncoderTag, f: F) -> DecodeResult where - F: FnOnce(&mut Decoder<'doc>) -> DecodeResult, - { - let d = try!(self.next_doc(exp_tag)); - let old_parent = self.parent; - let old_pos = self.pos; - self.parent = d; - self.pos = d.start; - let r = try!(f(self)); - self.parent = old_parent; - self.pos = old_pos; - Ok(r) - } - - fn _next_sub(&mut self) -> DecodeResult { - // empty vector/map optimization - if self.parent.is_empty() { - return Ok(0); - } - - let TaggedDoc { tag: r_tag, doc: r_doc } = - try!(doc_at(self.parent.data, self.pos)); - let r = if r_tag == (EsSub8 as usize) { - doc_as_u8(r_doc) as usize - } else if r_tag == (EsSub32 as usize) { - doc_as_u32(r_doc) as usize - } else { - return Err(Expected(format!("expected EBML doc with tag {:?} or {:?} but \ - found tag {:?}", EsSub8, EsSub32, r_tag))); - }; - if r_doc.end > self.parent.end { - return Err(Expected(format!("invalid EBML, child extends to \ - {:#x}, parent to {:#x}", - r_doc.end, self.parent.end))); - } - self.pos = r_doc.end; - debug!("_next_sub result={:?}", r); - Ok(r) - } - - // variable-length unsigned integer with different tags. - // `first_tag` should be a tag for u8 or i8. - // `last_tag` should be the largest allowed integer tag with the matching signedness. - // all tags between them should be valid, in the order of u8, u16, u32 and u64. - fn _next_int(&mut self, - first_tag: EbmlEncoderTag, - last_tag: EbmlEncoderTag) -> DecodeResult { - if self.pos >= self.parent.end { - return Err(Expected(format!("no more documents in \ - current node!"))); - } - - let TaggedDoc { tag: r_tag, doc: r_doc } = - try!(doc_at(self.parent.data, self.pos)); - let r = if first_tag as usize <= r_tag && r_tag <= last_tag as usize { - match r_tag - first_tag as usize { - 0 => doc_as_u8(r_doc) as u64, - 1 => doc_as_u16(r_doc) as u64, - 2 => doc_as_u32(r_doc) as u64, - 3 => doc_as_u64(r_doc), - _ => unreachable!(), - } - } else { - return Err(Expected(format!("expected EBML doc with tag {:?} through {:?} but \ - found tag {:?}", first_tag, last_tag, r_tag))); - }; - if r_doc.end > self.parent.end { - return Err(Expected(format!("invalid EBML, child extends to \ - {:#x}, parent to {:#x}", - r_doc.end, self.parent.end))); - } - self.pos = r_doc.end; - debug!("_next_int({:?}, {:?}) result={:?}", first_tag, last_tag, r); - Ok(r) - } - - pub fn read_opaque(&mut self, op: F) -> DecodeResult where - F: FnOnce(&mut opaque::Decoder, Doc) -> DecodeResult, - { - let doc = try!(self.next_doc(EsOpaque)); - - let result = { - let mut opaque_decoder = opaque::Decoder::new(doc.data, - doc.start); - try!(op(&mut opaque_decoder, doc)) - }; - - Ok(result) - } - - pub fn position(&self) -> usize { - self.pos - } - - pub fn advance(&mut self, bytes: usize) { - self.pos += bytes; - } - } - - impl<'doc> serialize::Decoder for Decoder<'doc> { - type Error = Error; - fn read_nil(&mut self) -> DecodeResult<()> { Ok(()) } - - fn read_u64(&mut self) -> DecodeResult { self._next_int(EsU8, EsU64) } - fn read_u32(&mut self) -> DecodeResult { Ok(try!(self._next_int(EsU8, EsU32)) as u32) } - fn read_u16(&mut self) -> DecodeResult { Ok(try!(self._next_int(EsU8, EsU16)) as u16) } - fn read_u8(&mut self) -> DecodeResult { Ok(doc_as_u8(try!(self.next_doc(EsU8)))) } - fn read_uint(&mut self) -> DecodeResult { - let v = try!(self._next_int(EsU8, EsU64)); - if v > (::std::usize::MAX as u64) { - Err(IntTooBig(v as usize)) - } else { - Ok(v as usize) - } - } - - fn read_i64(&mut self) -> DecodeResult { Ok(try!(self._next_int(EsI8, EsI64)) as i64) } - fn read_i32(&mut self) -> DecodeResult { Ok(try!(self._next_int(EsI8, EsI32)) as i32) } - fn read_i16(&mut self) -> DecodeResult { Ok(try!(self._next_int(EsI8, EsI16)) as i16) } - fn read_i8(&mut self) -> DecodeResult { Ok(doc_as_u8(try!(self.next_doc(EsI8))) as i8) } - fn read_int(&mut self) -> DecodeResult { - let v = try!(self._next_int(EsI8, EsI64)) as i64; - if v > (isize::MAX as i64) || v < (isize::MIN as i64) { - debug!("FIXME \\#6122: Removing this makes this function miscompile"); - Err(IntTooBig(v as usize)) - } else { - Ok(v as isize) - } - } - - fn read_bool(&mut self) -> DecodeResult { - Ok(doc_as_u8(try!(self.next_doc(EsBool))) != 0) - } - - fn read_f64(&mut self) -> DecodeResult { - let bits = doc_as_u64(try!(self.next_doc(EsF64))); - Ok(unsafe { transmute(bits) }) - } - fn read_f32(&mut self) -> DecodeResult { - let bits = doc_as_u32(try!(self.next_doc(EsF32))); - Ok(unsafe { transmute(bits) }) - } - fn read_char(&mut self) -> DecodeResult { - Ok(char::from_u32(doc_as_u32(try!(self.next_doc(EsChar)))).unwrap()) - } - fn read_str(&mut self) -> DecodeResult { - Ok(try!(self.next_doc(EsStr)).as_str()) - } - - // Compound types: - fn read_enum(&mut self, name: &str, f: F) -> DecodeResult where - F: FnOnce(&mut Decoder<'doc>) -> DecodeResult, - { - debug!("read_enum({})", name); - - let doc = try!(self.next_doc(EsEnum)); - - let (old_parent, old_pos) = (self.parent, self.pos); - self.parent = doc; - self.pos = self.parent.start; - - let result = try!(f(self)); - - self.parent = old_parent; - self.pos = old_pos; - Ok(result) - } - - fn read_enum_variant(&mut self, _: &[&str], - mut f: F) -> DecodeResult - where F: FnMut(&mut Decoder<'doc>, usize) -> DecodeResult, - { - debug!("read_enum_variant()"); - let idx = try!(self._next_sub()); - debug!(" idx={}", idx); - - f(self, idx) - } - - fn read_enum_variant_arg(&mut self, idx: usize, f: F) -> DecodeResult where - F: FnOnce(&mut Decoder<'doc>) -> DecodeResult, - { - debug!("read_enum_variant_arg(idx={})", idx); - f(self) - } - - fn read_enum_struct_variant(&mut self, _: &[&str], - mut f: F) -> DecodeResult - where F: FnMut(&mut Decoder<'doc>, usize) -> DecodeResult, - { - debug!("read_enum_struct_variant()"); - let idx = try!(self._next_sub()); - debug!(" idx={}", idx); - - f(self, idx) - } - - fn read_enum_struct_variant_field(&mut self, - name: &str, - idx: usize, - f: F) - -> DecodeResult where - F: FnOnce(&mut Decoder<'doc>) -> DecodeResult, - { - debug!("read_enum_struct_variant_arg(name={}, idx={})", name, idx); - f(self) - } - - fn read_struct(&mut self, name: &str, _: usize, f: F) -> DecodeResult where - F: FnOnce(&mut Decoder<'doc>) -> DecodeResult, - { - debug!("read_struct(name={})", name); - f(self) - } - - fn read_struct_field(&mut self, name: &str, idx: usize, f: F) -> DecodeResult where - F: FnOnce(&mut Decoder<'doc>) -> DecodeResult, - { - debug!("read_struct_field(name={}, idx={})", name, idx); - f(self) - } - - fn read_tuple(&mut self, tuple_len: usize, f: F) -> DecodeResult where - F: FnOnce(&mut Decoder<'doc>) -> DecodeResult, - { - debug!("read_tuple()"); - self.read_seq(move |d, len| { - if len == tuple_len { - f(d) - } else { - Err(Expected(format!("Expected tuple of length `{}`, \ - found tuple of length `{}`", tuple_len, len))) - } - }) - } - - fn read_tuple_arg(&mut self, idx: usize, f: F) -> DecodeResult where - F: FnOnce(&mut Decoder<'doc>) -> DecodeResult, - { - debug!("read_tuple_arg(idx={})", idx); - self.read_seq_elt(idx, f) - } - - fn read_tuple_struct(&mut self, name: &str, len: usize, f: F) -> DecodeResult where - F: FnOnce(&mut Decoder<'doc>) -> DecodeResult, - { - debug!("read_tuple_struct(name={})", name); - self.read_tuple(len, f) - } - - fn read_tuple_struct_arg(&mut self, - idx: usize, - f: F) - -> DecodeResult where - F: FnOnce(&mut Decoder<'doc>) -> DecodeResult, - { - debug!("read_tuple_struct_arg(idx={})", idx); - self.read_tuple_arg(idx, f) - } - - fn read_option(&mut self, mut f: F) -> DecodeResult where - F: FnMut(&mut Decoder<'doc>, bool) -> DecodeResult, - { - debug!("read_option()"); - self.read_enum("Option", move |this| { - this.read_enum_variant(&["None", "Some"], move |this, idx| { - match idx { - 0 => f(this, false), - 1 => f(this, true), - _ => { - Err(Expected(format!("Expected None or Some"))) - } - } - }) - }) - } - - fn read_seq(&mut self, f: F) -> DecodeResult where - F: FnOnce(&mut Decoder<'doc>, usize) -> DecodeResult, - { - debug!("read_seq()"); - self.push_doc(EsVec, move |d| { - let len = try!(d._next_sub()); - debug!(" len={}", len); - f(d, len) - }) - } - - fn read_seq_elt(&mut self, idx: usize, f: F) -> DecodeResult where - F: FnOnce(&mut Decoder<'doc>) -> DecodeResult, - { - debug!("read_seq_elt(idx={})", idx); - self.push_doc(EsVecElt, f) - } - - fn read_map(&mut self, f: F) -> DecodeResult where - F: FnOnce(&mut Decoder<'doc>, usize) -> DecodeResult, - { - debug!("read_map()"); - self.push_doc(EsMap, move |d| { - let len = try!(d._next_sub()); - debug!(" len={}", len); - f(d, len) - }) - } - - fn read_map_elt_key(&mut self, idx: usize, f: F) -> DecodeResult where - F: FnOnce(&mut Decoder<'doc>) -> DecodeResult, - { - debug!("read_map_elt_key(idx={})", idx); - self.push_doc(EsMapKey, f) - } - - fn read_map_elt_val(&mut self, idx: usize, f: F) -> DecodeResult where - F: FnOnce(&mut Decoder<'doc>) -> DecodeResult, - { - debug!("read_map_elt_val(idx={})", idx); - self.push_doc(EsMapVal, f) - } - - fn error(&mut self, err: &str) -> Error { - ApplicationError(err.to_string()) - } - } -} - -pub mod writer { - use std::mem; - use std::io::prelude::*; - use std::io::{self, SeekFrom, Cursor}; - - use super::opaque; - use super::{ EsVec, EsMap, EsEnum, EsSub8, EsSub32, EsVecElt, EsMapKey, - EsU64, EsU32, EsU16, EsU8, EsI64, EsI32, EsI16, EsI8, - EsBool, EsF64, EsF32, EsChar, EsStr, EsMapVal, - EsOpaque, NUM_IMPLICIT_TAGS, NUM_TAGS }; - - use serialize; - - - pub type EncodeResult = io::Result<()>; - - // rbml writing - pub struct Encoder<'a> { - pub writer: &'a mut Cursor>, - size_positions: Vec, - relax_limit: u64, // do not move encoded bytes before this position - } - - fn write_tag(w: &mut W, n: usize) -> EncodeResult { - if n < 0xf0 { - w.write_all(&[n as u8]) - } else if 0x100 <= n && n < NUM_TAGS { - w.write_all(&[0xf0 | (n >> 8) as u8, n as u8]) - } else { - Err(io::Error::new(io::ErrorKind::Other, - &format!("invalid tag: {}", n)[..])) - } - } - - fn write_sized_vuint(w: &mut W, n: usize, size: usize) -> EncodeResult { - match size { - 1 => w.write_all(&[0x80 | (n as u8)]), - 2 => w.write_all(&[0x40 | ((n >> 8) as u8), n as u8]), - 3 => w.write_all(&[0x20 | ((n >> 16) as u8), (n >> 8) as u8, - n as u8]), - 4 => w.write_all(&[0x10 | ((n >> 24) as u8), (n >> 16) as u8, - (n >> 8) as u8, n as u8]), - _ => Err(io::Error::new(io::ErrorKind::Other, - &format!("isize too big: {}", n)[..])) - } - } - - pub fn write_vuint(w: &mut W, n: usize) -> EncodeResult { - if n < 0x7f { return write_sized_vuint(w, n, 1); } - if n < 0x4000 { return write_sized_vuint(w, n, 2); } - if n < 0x200000 { return write_sized_vuint(w, n, 3); } - if n < 0x10000000 { return write_sized_vuint(w, n, 4); } - Err(io::Error::new(io::ErrorKind::Other, - &format!("isize too big: {}", n)[..])) - } - - impl<'a> Encoder<'a> { - pub fn new(w: &'a mut Cursor>) -> Encoder<'a> { - Encoder { - writer: w, - size_positions: vec!(), - relax_limit: 0, - } - } - - pub fn start_tag(&mut self, tag_id: usize) -> EncodeResult { - debug!("Start tag {:?}", tag_id); - assert!(tag_id >= NUM_IMPLICIT_TAGS); - - // Write the enum ID: - try!(write_tag(self.writer, tag_id)); - - // Write a placeholder four-byte size. - let cur_pos = try!(self.writer.seek(SeekFrom::Current(0))); - self.size_positions.push(cur_pos); - let zeroes: &[u8] = &[0, 0, 0, 0]; - self.writer.write_all(zeroes) - } - - pub fn end_tag(&mut self) -> EncodeResult { - let last_size_pos = self.size_positions.pop().unwrap(); - let cur_pos = try!(self.writer.seek(SeekFrom::Current(0))); - try!(self.writer.seek(SeekFrom::Start(last_size_pos))); - let size = (cur_pos - last_size_pos - 4) as usize; - - // relax the size encoding for small tags (bigger tags are costly to move). - // we should never try to move the stable positions, however. - const RELAX_MAX_SIZE: usize = 0x100; - if size <= RELAX_MAX_SIZE && last_size_pos >= self.relax_limit { - // we can't alter the buffer in place, so have a temporary buffer - let mut buf = [0u8; RELAX_MAX_SIZE]; - { - let last_size_pos = last_size_pos as usize; - let data = &self.writer.get_ref()[last_size_pos+4..cur_pos as usize]; - buf[..size].clone_from_slice(data); - } - - // overwrite the size and data and continue - try!(write_vuint(self.writer, size)); - try!(self.writer.write_all(&buf[..size])); - } else { - // overwrite the size with an overlong encoding and skip past the data - try!(write_sized_vuint(self.writer, size, 4)); - try!(self.writer.seek(SeekFrom::Start(cur_pos))); - } - - debug!("End tag (size = {:?})", size); - Ok(()) - } - - pub fn wr_tag(&mut self, tag_id: usize, blk: F) -> EncodeResult where - F: FnOnce() -> EncodeResult, - { - try!(self.start_tag(tag_id)); - try!(blk()); - self.end_tag() - } - - pub fn wr_tagged_bytes(&mut self, tag_id: usize, b: &[u8]) -> EncodeResult { - assert!(tag_id >= NUM_IMPLICIT_TAGS); - try!(write_tag(self.writer, tag_id)); - try!(write_vuint(self.writer, b.len())); - self.writer.write_all(b) - } - - pub fn wr_tagged_u64(&mut self, tag_id: usize, v: u64) -> EncodeResult { - let bytes: [u8; 8] = unsafe { mem::transmute(v.to_be()) }; - // tagged integers are emitted in big-endian, with no - // leading zeros. - let leading_zero_bytes = v.leading_zeros()/8; - self.wr_tagged_bytes(tag_id, &bytes[leading_zero_bytes as usize..]) - } - - #[inline] - pub fn wr_tagged_u32(&mut self, tag_id: usize, v: u32) -> EncodeResult { - self.wr_tagged_u64(tag_id, v as u64) - } - - #[inline] - pub fn wr_tagged_u16(&mut self, tag_id: usize, v: u16) -> EncodeResult { - self.wr_tagged_u64(tag_id, v as u64) - } - - #[inline] - pub fn wr_tagged_u8(&mut self, tag_id: usize, v: u8) -> EncodeResult { - self.wr_tagged_bytes(tag_id, &[v]) - } - - #[inline] - pub fn wr_tagged_i64(&mut self, tag_id: usize, v: i64) -> EncodeResult { - self.wr_tagged_u64(tag_id, v as u64) - } - - #[inline] - pub fn wr_tagged_i32(&mut self, tag_id: usize, v: i32) -> EncodeResult { - self.wr_tagged_u32(tag_id, v as u32) - } - - #[inline] - pub fn wr_tagged_i16(&mut self, tag_id: usize, v: i16) -> EncodeResult { - self.wr_tagged_u16(tag_id, v as u16) - } - - #[inline] - pub fn wr_tagged_i8(&mut self, tag_id: usize, v: i8) -> EncodeResult { - self.wr_tagged_bytes(tag_id, &[v as u8]) - } - - pub fn wr_tagged_str(&mut self, tag_id: usize, v: &str) -> EncodeResult { - self.wr_tagged_bytes(tag_id, v.as_bytes()) - } - - // for auto-serialization - fn wr_tagged_raw_bytes(&mut self, tag_id: usize, b: &[u8]) -> EncodeResult { - try!(write_tag(self.writer, tag_id)); - self.writer.write_all(b) - } - - fn wr_tagged_raw_u64(&mut self, tag_id: usize, v: u64) -> EncodeResult { - let bytes: [u8; 8] = unsafe { mem::transmute(v.to_be()) }; - self.wr_tagged_raw_bytes(tag_id, &bytes) - } - - fn wr_tagged_raw_u32(&mut self, tag_id: usize, v: u32) -> EncodeResult{ - let bytes: [u8; 4] = unsafe { mem::transmute(v.to_be()) }; - self.wr_tagged_raw_bytes(tag_id, &bytes) - } - - fn wr_tagged_raw_u16(&mut self, tag_id: usize, v: u16) -> EncodeResult { - let bytes: [u8; 2] = unsafe { mem::transmute(v.to_be()) }; - self.wr_tagged_raw_bytes(tag_id, &bytes) - } - - fn wr_tagged_raw_u8(&mut self, tag_id: usize, v: u8) -> EncodeResult { - self.wr_tagged_raw_bytes(tag_id, &[v]) - } - - fn wr_tagged_raw_i64(&mut self, tag_id: usize, v: i64) -> EncodeResult { - self.wr_tagged_raw_u64(tag_id, v as u64) - } - - fn wr_tagged_raw_i32(&mut self, tag_id: usize, v: i32) -> EncodeResult { - self.wr_tagged_raw_u32(tag_id, v as u32) - } - - fn wr_tagged_raw_i16(&mut self, tag_id: usize, v: i16) -> EncodeResult { - self.wr_tagged_raw_u16(tag_id, v as u16) - } - - fn wr_tagged_raw_i8(&mut self, tag_id: usize, v: i8) -> EncodeResult { - self.wr_tagged_raw_bytes(tag_id, &[v as u8]) - } - - pub fn wr_bytes(&mut self, b: &[u8]) -> EncodeResult { - debug!("Write {:?} bytes", b.len()); - self.writer.write_all(b) - } - - pub fn wr_str(&mut self, s: &str) -> EncodeResult { - debug!("Write str: {:?}", s); - self.writer.write_all(s.as_bytes()) - } - - /// Returns the current position while marking it stable, i.e. - /// generated bytes so far wouldn't be affected by relaxation. - pub fn mark_stable_position(&mut self) -> u64 { - let pos = self.writer.seek(SeekFrom::Current(0)).unwrap(); - if self.relax_limit < pos { - self.relax_limit = pos; - } - pos - } - } - - impl<'a> Encoder<'a> { - // used internally to emit things like the vector length and so on - fn _emit_tagged_sub(&mut self, v: usize) -> EncodeResult { - if v as u8 as usize == v { - self.wr_tagged_raw_u8(EsSub8 as usize, v as u8) - } else if v as u32 as usize == v { - self.wr_tagged_raw_u32(EsSub32 as usize, v as u32) - } else { - Err(io::Error::new(io::ErrorKind::Other, - &format!("length or variant id too big: {}", - v)[..])) - } - } - - pub fn emit_opaque(&mut self, f: F) -> EncodeResult where - F: FnOnce(&mut opaque::Encoder) -> EncodeResult, - { - try!(self.start_tag(EsOpaque as usize)); - - { - let mut opaque_encoder = opaque::Encoder::new(self.writer); - try!(f(&mut opaque_encoder)); - } - - self.mark_stable_position(); - self.end_tag() - } - } - - impl<'a> serialize::Encoder for Encoder<'a> { - type Error = io::Error; - - fn emit_nil(&mut self) -> EncodeResult { - Ok(()) - } - - fn emit_uint(&mut self, v: usize) -> EncodeResult { - self.emit_u64(v as u64) - } - fn emit_u64(&mut self, v: u64) -> EncodeResult { - if v as u32 as u64 == v { - self.emit_u32(v as u32) - } else { - self.wr_tagged_raw_u64(EsU64 as usize, v) - } - } - fn emit_u32(&mut self, v: u32) -> EncodeResult { - if v as u16 as u32 == v { - self.emit_u16(v as u16) - } else { - self.wr_tagged_raw_u32(EsU32 as usize, v) - } - } - fn emit_u16(&mut self, v: u16) -> EncodeResult { - if v as u8 as u16 == v { - self.emit_u8(v as u8) - } else { - self.wr_tagged_raw_u16(EsU16 as usize, v) - } - } - fn emit_u8(&mut self, v: u8) -> EncodeResult { - self.wr_tagged_raw_u8(EsU8 as usize, v) - } - - fn emit_int(&mut self, v: isize) -> EncodeResult { - self.emit_i64(v as i64) - } - fn emit_i64(&mut self, v: i64) -> EncodeResult { - if v as i32 as i64 == v { - self.emit_i32(v as i32) - } else { - self.wr_tagged_raw_i64(EsI64 as usize, v) - } - } - fn emit_i32(&mut self, v: i32) -> EncodeResult { - if v as i16 as i32 == v { - self.emit_i16(v as i16) - } else { - self.wr_tagged_raw_i32(EsI32 as usize, v) - } - } - fn emit_i16(&mut self, v: i16) -> EncodeResult { - if v as i8 as i16 == v { - self.emit_i8(v as i8) - } else { - self.wr_tagged_raw_i16(EsI16 as usize, v) - } - } - fn emit_i8(&mut self, v: i8) -> EncodeResult { - self.wr_tagged_raw_i8(EsI8 as usize, v) - } - - fn emit_bool(&mut self, v: bool) -> EncodeResult { - self.wr_tagged_raw_u8(EsBool as usize, v as u8) - } - - fn emit_f64(&mut self, v: f64) -> EncodeResult { - let bits = unsafe { mem::transmute(v) }; - self.wr_tagged_raw_u64(EsF64 as usize, bits) - } - fn emit_f32(&mut self, v: f32) -> EncodeResult { - let bits = unsafe { mem::transmute(v) }; - self.wr_tagged_raw_u32(EsF32 as usize, bits) - } - fn emit_char(&mut self, v: char) -> EncodeResult { - self.wr_tagged_raw_u32(EsChar as usize, v as u32) - } - - fn emit_str(&mut self, v: &str) -> EncodeResult { - self.wr_tagged_str(EsStr as usize, v) - } - - fn emit_enum(&mut self, _name: &str, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - try!(self.start_tag(EsEnum as usize)); - try!(f(self)); - self.end_tag() - } - - fn emit_enum_variant(&mut self, - _: &str, - v_id: usize, - _: usize, - f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - try!(self._emit_tagged_sub(v_id)); - f(self) - } - - fn emit_enum_variant_arg(&mut self, _: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - f(self) - } - - fn emit_enum_struct_variant(&mut self, - v_name: &str, - v_id: usize, - cnt: usize, - f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - self.emit_enum_variant(v_name, v_id, cnt, f) - } - - fn emit_enum_struct_variant_field(&mut self, - _: &str, - idx: usize, - f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - self.emit_enum_variant_arg(idx, f) - } - - fn emit_struct(&mut self, _: &str, _len: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - f(self) - } - - fn emit_struct_field(&mut self, _name: &str, _: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - f(self) - } - - fn emit_tuple(&mut self, len: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - self.emit_seq(len, f) - } - fn emit_tuple_arg(&mut self, idx: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - self.emit_seq_elt(idx, f) - } - - fn emit_tuple_struct(&mut self, _: &str, len: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - self.emit_seq(len, f) - } - fn emit_tuple_struct_arg(&mut self, idx: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - self.emit_seq_elt(idx, f) - } - - fn emit_option(&mut self, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - self.emit_enum("Option", f) - } - fn emit_option_none(&mut self) -> EncodeResult { - self.emit_enum_variant("None", 0, 0, |_| Ok(())) - } - fn emit_option_some(&mut self, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - - self.emit_enum_variant("Some", 1, 1, f) - } - - fn emit_seq(&mut self, len: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - if len == 0 { - // empty vector optimization - return self.wr_tagged_bytes(EsVec as usize, &[]); - } - - try!(self.start_tag(EsVec as usize)); - try!(self._emit_tagged_sub(len)); - try!(f(self)); - self.end_tag() - } - - fn emit_seq_elt(&mut self, _idx: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - - try!(self.start_tag(EsVecElt as usize)); - try!(f(self)); - self.end_tag() - } - - fn emit_map(&mut self, len: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - if len == 0 { - // empty map optimization - return self.wr_tagged_bytes(EsMap as usize, &[]); - } - - try!(self.start_tag(EsMap as usize)); - try!(self._emit_tagged_sub(len)); - try!(f(self)); - self.end_tag() - } - - fn emit_map_elt_key(&mut self, _idx: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - - try!(self.start_tag(EsMapKey as usize)); - try!(f(self)); - self.end_tag() - } - - fn emit_map_elt_val(&mut self, _idx: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - try!(self.start_tag(EsMapVal as usize)); - try!(f(self)); - self.end_tag() - } - } -} - -// ___________________________________________________________________________ -// Testing - -#[cfg(test)] -mod tests { - use super::{Doc, reader, writer}; - - use serialize::{Encodable, Decodable}; - - use std::io::Cursor; - - #[test] - fn test_vuint_at() { - let data = &[ - 0x80, - 0xff, - 0x40, 0x00, - 0x7f, 0xff, - 0x20, 0x00, 0x00, - 0x3f, 0xff, 0xff, - 0x10, 0x00, 0x00, 0x00, - 0x1f, 0xff, 0xff, 0xff - ]; - - let mut res: reader::Res; - - // Class A - res = reader::vuint_at(data, 0).unwrap(); - assert_eq!(res.val, 0); - assert_eq!(res.next, 1); - res = reader::vuint_at(data, res.next).unwrap(); - assert_eq!(res.val, (1 << 7) - 1); - assert_eq!(res.next, 2); - - // Class B - res = reader::vuint_at(data, res.next).unwrap(); - assert_eq!(res.val, 0); - assert_eq!(res.next, 4); - res = reader::vuint_at(data, res.next).unwrap(); - assert_eq!(res.val, (1 << 14) - 1); - assert_eq!(res.next, 6); - - // Class C - res = reader::vuint_at(data, res.next).unwrap(); - assert_eq!(res.val, 0); - assert_eq!(res.next, 9); - res = reader::vuint_at(data, res.next).unwrap(); - assert_eq!(res.val, (1 << 21) - 1); - assert_eq!(res.next, 12); - - // Class D - res = reader::vuint_at(data, res.next).unwrap(); - assert_eq!(res.val, 0); - assert_eq!(res.next, 16); - res = reader::vuint_at(data, res.next).unwrap(); - assert_eq!(res.val, (1 << 28) - 1); - assert_eq!(res.next, 20); - } - - #[test] - fn test_option_int() { - fn test_v(v: Option) { - debug!("v == {:?}", v); - let mut wr = Cursor::new(Vec::new()); - { - let mut rbml_w = writer::Encoder::new(&mut wr); - let _ = v.encode(&mut rbml_w); - } - let rbml_doc = Doc::new(wr.get_ref()); - let mut deser = reader::Decoder::new(rbml_doc); - let v1 = Decodable::decode(&mut deser).unwrap(); - debug!("v1 == {:?}", v1); - assert_eq!(v, v1); - } - - test_v(Some(22)); - test_v(None); - test_v(Some(3)); - } -} - -#[cfg(test)] -mod bench { - #![allow(non_snake_case)] - use test::Bencher; - use super::reader; - - #[bench] - pub fn vuint_at_A_aligned(b: &mut Bencher) { - let data = (0..4*100).map(|i| { - match i % 2 { - 0 => 0x80, - _ => i as u8, - } - }).collect::>(); - let mut sum = 0; - b.iter(|| { - let mut i = 0; - while i < data.len() { - sum += reader::vuint_at(&data, i).unwrap().val; - i += 4; - } - }); - } - - #[bench] - pub fn vuint_at_A_unaligned(b: &mut Bencher) { - let data = (0..4*100+1).map(|i| { - match i % 2 { - 1 => 0x80, - _ => i as u8 - } - }).collect::>(); - let mut sum = 0; - b.iter(|| { - let mut i = 1; - while i < data.len() { - sum += reader::vuint_at(&data, i).unwrap().val; - i += 4; - } - }); - } - - #[bench] - pub fn vuint_at_D_aligned(b: &mut Bencher) { - let data = (0..4*100).map(|i| { - match i % 4 { - 0 => 0x10, - 3 => i as u8, - _ => 0 - } - }).collect::>(); - let mut sum = 0; - b.iter(|| { - let mut i = 0; - while i < data.len() { - sum += reader::vuint_at(&data, i).unwrap().val; - i += 4; - } - }); - } - - #[bench] - pub fn vuint_at_D_unaligned(b: &mut Bencher) { - let data = (0..4*100+1).map(|i| { - match i % 4 { - 1 => 0x10, - 0 => i as u8, - _ => 0 - } - }).collect::>(); - let mut sum = 0; - b.iter(|| { - let mut i = 1; - while i < data.len() { - sum += reader::vuint_at(&data, i).unwrap().val; - i += 4; - } - }); - } -} diff --git a/src/librbml/opaque.rs b/src/librbml/opaque.rs deleted file mode 100644 index 64756090e8b48..0000000000000 --- a/src/librbml/opaque.rs +++ /dev/null @@ -1,811 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use Error as DecodeError; -use writer::EncodeResult; -use leb128::{read_signed_leb128, read_unsigned_leb128, write_signed_leb128, - write_unsigned_leb128}; -use std::io::{self, Write}; -use serialize; - -//=----------------------------------------------------------------------------- -// Encoder -//=----------------------------------------------------------------------------- - -pub struct Encoder<'a> { - pub cursor: &'a mut io::Cursor>, -} - -impl<'a> Encoder<'a> { - pub fn new(cursor: &'a mut io::Cursor>) -> Encoder<'a> { - Encoder { - cursor: cursor - } - } -} - - -macro_rules! write_uleb128 { - ($enc:expr, $value:expr) => {{ - let pos = $enc.cursor.position() as usize; - let bytes_written = write_unsigned_leb128($enc.cursor.get_mut(), pos, $value as u64); - $enc.cursor.set_position((pos + bytes_written) as u64); - Ok(()) - }} -} - -macro_rules! write_sleb128 { - ($enc:expr, $value:expr) => {{ - let pos = $enc.cursor.position() as usize; - let bytes_written = write_signed_leb128($enc.cursor.get_mut(), pos, $value as i64); - $enc.cursor.set_position((pos + bytes_written) as u64); - Ok(()) - }} -} - -impl<'a> serialize::Encoder for Encoder<'a> { - type Error = io::Error; - - fn emit_nil(&mut self) -> EncodeResult { - Ok(()) - } - - fn emit_uint(&mut self, v: usize) -> EncodeResult { - write_uleb128!(self, v) - } - - fn emit_u64(&mut self, v: u64) -> EncodeResult { - write_uleb128!(self, v) - } - - fn emit_u32(&mut self, v: u32) -> EncodeResult { - write_uleb128!(self, v) - } - - fn emit_u16(&mut self, v: u16) -> EncodeResult { - write_uleb128!(self, v) - } - - fn emit_u8(&mut self, v: u8) -> EncodeResult { - let _ = self.cursor.write_all(&[v]); - Ok(()) - } - - fn emit_int(&mut self, v: isize) -> EncodeResult { - write_sleb128!(self, v) - } - - fn emit_i64(&mut self, v: i64) -> EncodeResult { - write_sleb128!(self, v) - } - - fn emit_i32(&mut self, v: i32) -> EncodeResult { - write_sleb128!(self, v) - } - - fn emit_i16(&mut self, v: i16) -> EncodeResult { - write_sleb128!(self, v) - } - - fn emit_i8(&mut self, v: i8) -> EncodeResult { - let as_u8: u8 = unsafe { ::std::mem::transmute(v) }; - let _ = self.cursor.write_all(&[as_u8]); - Ok(()) - } - - fn emit_bool(&mut self, v: bool) -> EncodeResult { - self.emit_u8(if v { 1 } else { 0 }) - } - - fn emit_f64(&mut self, v: f64) -> EncodeResult { - let as_u64: u64 = unsafe { ::std::mem::transmute(v) }; - self.emit_u64(as_u64) - } - - fn emit_f32(&mut self, v: f32) -> EncodeResult { - let as_u32: u32 = unsafe { ::std::mem::transmute(v) }; - self.emit_u32(as_u32) - } - - fn emit_char(&mut self, v: char) -> EncodeResult { - self.emit_u32(v as u32) - } - - fn emit_str(&mut self, v: &str) -> EncodeResult { - try!(self.emit_uint(v.len())); - let _ = self.cursor.write_all(v.as_bytes()); - Ok(()) - } - - fn emit_enum(&mut self, _name: &str, f: F) -> EncodeResult - where F: FnOnce(&mut Self) -> EncodeResult { - f(self) - } - - fn emit_enum_variant(&mut self, - _v_name: &str, - v_id: usize, - _len: usize, - f: F) -> EncodeResult - where F: FnOnce(&mut Self) -> EncodeResult - { - try!(self.emit_uint(v_id)); - f(self) - } - - fn emit_enum_variant_arg(&mut self, _: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - f(self) - } - - fn emit_enum_struct_variant(&mut self, - v_name: &str, - v_id: usize, - cnt: usize, - f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - self.emit_enum_variant(v_name, v_id, cnt, f) - } - - fn emit_enum_struct_variant_field(&mut self, - _: &str, - idx: usize, - f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - self.emit_enum_variant_arg(idx, f) - } - - fn emit_struct(&mut self, _: &str, _len: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - f(self) - } - - fn emit_struct_field(&mut self, _name: &str, _: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - f(self) - } - - fn emit_tuple(&mut self, len: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - self.emit_seq(len, f) - } - - fn emit_tuple_arg(&mut self, idx: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - self.emit_seq_elt(idx, f) - } - - fn emit_tuple_struct(&mut self, _: &str, len: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - self.emit_seq(len, f) - } - - fn emit_tuple_struct_arg(&mut self, idx: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - self.emit_seq_elt(idx, f) - } - - fn emit_option(&mut self, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - self.emit_enum("Option", f) - } - - fn emit_option_none(&mut self) -> EncodeResult { - self.emit_enum_variant("None", 0, 0, |_| Ok(())) - } - - fn emit_option_some(&mut self, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - self.emit_enum_variant("Some", 1, 1, f) - } - - fn emit_seq(&mut self, len: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - try!(self.emit_uint(len)); - f(self) - } - - fn emit_seq_elt(&mut self, _idx: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - f(self) - } - - fn emit_map(&mut self, len: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - try!(self.emit_uint(len)); - f(self) - } - - fn emit_map_elt_key(&mut self, _idx: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - f(self) - } - - fn emit_map_elt_val(&mut self, _idx: usize, f: F) -> EncodeResult where - F: FnOnce(&mut Encoder<'a>) -> EncodeResult, - { - f(self) - } -} - -impl<'a> Encoder<'a> { - pub fn position(&self) -> usize { - self.cursor.position() as usize - } - - pub fn from_rbml<'b: 'c, 'c>(rbml: &'c mut ::writer::Encoder<'b>) -> Encoder<'c> { - Encoder { - cursor: rbml.writer - } - } -} - -//=----------------------------------------------------------------------------- -// Decoder -//=----------------------------------------------------------------------------- - -pub struct Decoder<'a> { - pub data: &'a [u8], - position: usize, -} - -impl<'a> Decoder<'a> { - pub fn new(data: &'a [u8], position: usize) -> Decoder<'a> { - Decoder { - data: data, - position: position - } - } - - pub fn position(&self) -> usize { - self.position - } - - pub fn advance(&mut self, bytes: usize) { - self.position += bytes; - } -} - -macro_rules! read_uleb128 { - ($dec:expr, $t:ty) => ({ - let (value, bytes_read) = read_unsigned_leb128($dec.data, $dec.position); - $dec.position += bytes_read; - Ok(value as $t) - }) -} - -macro_rules! read_sleb128 { - ($dec:expr, $t:ty) => ({ - let (value, bytes_read) = read_signed_leb128($dec.data, $dec.position); - $dec.position += bytes_read; - Ok(value as $t) - }) -} - - -impl<'a> serialize::Decoder for Decoder<'a> { - type Error = DecodeError; - - fn read_nil(&mut self) -> Result<(), Self::Error> { - Ok(()) - } - - fn read_u64(&mut self) -> Result { - read_uleb128!(self, u64) - } - - fn read_u32(&mut self) -> Result { - read_uleb128!(self, u32) - } - - fn read_u16(&mut self) -> Result { - read_uleb128!(self, u16) - } - - fn read_u8(&mut self) -> Result { - let value = self.data[self.position]; - self.position += 1; - Ok(value) - } - - fn read_uint(&mut self) -> Result { - read_uleb128!(self, usize) - } - - fn read_i64(&mut self) -> Result { - read_sleb128!(self, i64) - } - - fn read_i32(&mut self) -> Result { - read_sleb128!(self, i32) - } - - fn read_i16(&mut self) -> Result { - read_sleb128!(self, i16) - } - - fn read_i8(&mut self) -> Result { - let as_u8 = self.data[self.position]; - self.position += 1; - unsafe { - Ok(::std::mem::transmute(as_u8)) - } - } - - fn read_int(&mut self) -> Result { - read_sleb128!(self, isize) - } - - fn read_bool(&mut self) -> Result { - let value = try!(self.read_u8()); - Ok(value != 0) - } - - fn read_f64(&mut self) -> Result { - let bits = try!(self.read_u64()); - Ok(unsafe { ::std::mem::transmute(bits) }) - } - - fn read_f32(&mut self) -> Result { - let bits = try!(self.read_u32()); - Ok(unsafe { ::std::mem::transmute(bits) }) - } - - fn read_char(&mut self) -> Result { - let bits = try!(self.read_u32()); - Ok(::std::char::from_u32(bits).unwrap()) - } - - fn read_str(&mut self) -> Result { - let len = try!(self.read_uint()); - let s = ::std::str::from_utf8(&self.data[self.position .. self.position + len]).unwrap(); - self.position += len; - Ok(s.to_string()) - } - - fn read_enum(&mut self, _name: &str, f: F) -> Result where - F: FnOnce(&mut Decoder<'a>) -> Result, - { - f(self) - } - - fn read_enum_variant(&mut self, - _: &[&str], - mut f: F) - -> Result - where F: FnMut(&mut Decoder<'a>, usize) -> Result, - { - let disr = try!(self.read_uint()); - f(self, disr) - } - - fn read_enum_variant_arg(&mut self, _idx: usize, f: F) -> Result where - F: FnOnce(&mut Decoder<'a>) -> Result, - { - f(self) - } - - fn read_enum_struct_variant(&mut self, - _: &[&str], - mut f: F) -> Result - where F: FnMut(&mut Decoder<'a>, usize) -> Result, - { - let disr = try!(self.read_uint()); - f(self, disr) - } - - fn read_enum_struct_variant_field(&mut self, - _name: &str, - _idx: usize, - f: F) - -> Result where - F: FnOnce(&mut Decoder<'a>) -> Result, - { - f(self) - } - - fn read_struct(&mut self, _name: &str, _: usize, f: F) -> Result where - F: FnOnce(&mut Decoder<'a>) -> Result, - { - f(self) - } - - fn read_struct_field(&mut self, - _name: &str, - _idx: usize, f: F) - -> Result where - F: FnOnce(&mut Decoder<'a>) -> Result, - { - f(self) - } - - fn read_tuple(&mut self, tuple_len: usize, f: F) -> Result where - F: FnOnce(&mut Decoder<'a>) -> Result, - { - self.read_seq(move |d, len| { - if len == tuple_len { - f(d) - } else { - let err = format!("Invalid tuple length. Expected {}, found {}", - tuple_len, - len); - Err(DecodeError::Expected(err)) - } - }) - } - - fn read_tuple_arg(&mut self, idx: usize, f: F) -> Result where - F: FnOnce(&mut Decoder<'a>) -> Result, - { - self.read_seq_elt(idx, f) - } - - fn read_tuple_struct(&mut self, - _name: &str, - len: usize, f: F) - -> Result where - F: FnOnce(&mut Decoder<'a>) -> Result, - { - self.read_tuple(len, f) - } - - fn read_tuple_struct_arg(&mut self, - idx: usize, - f: F) - -> Result where - F: FnOnce(&mut Decoder<'a>) -> Result, - { - self.read_tuple_arg(idx, f) - } - - fn read_option(&mut self, mut f: F) -> Result where - F: FnMut(&mut Decoder<'a>, bool) -> Result, - { - self.read_enum("Option", move |this| { - this.read_enum_variant(&["None", "Some"], move |this, idx| { - match idx { - 0 => f(this, false), - 1 => f(this, true), - _ => { - let msg = format!("Invalid Option index: {}", idx); - Err(DecodeError::Expected(msg)) - } - } - }) - }) - } - - fn read_seq(&mut self, f: F) -> Result where - F: FnOnce(&mut Decoder<'a>, usize) -> Result, - { - let len = try!(self.read_uint()); - f(self, len) - } - - fn read_seq_elt(&mut self, _idx: usize, f: F) -> Result where - F: FnOnce(&mut Decoder<'a>) -> Result, - { - f(self) - } - - fn read_map(&mut self, f: F) -> Result where - F: FnOnce(&mut Decoder<'a>, usize) -> Result, - { - let len = try!(self.read_uint()); - f(self, len) - } - - fn read_map_elt_key(&mut self, _idx: usize, f: F) -> Result where - F: FnOnce(&mut Decoder<'a>) -> Result, - { - f(self) - } - - fn read_map_elt_val(&mut self, _idx: usize, f: F) -> Result where - F: FnOnce(&mut Decoder<'a>) -> Result, - { - f(self) - } - - fn error(&mut self, err: &str) -> Self::Error { - DecodeError::ApplicationError(err.to_string()) - } -} - - -#[cfg(test)] -mod tests { - use serialize::{Encodable, Decodable}; - use std::io::{Cursor}; - use std::fmt::Debug; - use super::{Encoder, Decoder}; - - #[derive(PartialEq, Clone, Debug, RustcEncodable, RustcDecodable)] - struct Struct { - a: (), - b: u8, - c: u16, - d: u32, - e: u64, - f: usize, - - g: i8, - h: i16, - i: i32, - j: i64, - k: isize, - - l: char, - m: String, - n: f32, - o: f64, - p: bool, - q: Option, - } - - - fn check_round_trip(values: Vec) { - let mut cursor = Cursor::new(Vec::new()); - - for value in &values { - let mut encoder = Encoder::new(&mut cursor); - Encodable::encode(&value, &mut encoder).unwrap(); - } - - let data = cursor.into_inner(); - let mut decoder = Decoder::new(&data[..], 0); - - for value in values { - let decoded = Decodable::decode(&mut decoder).unwrap(); - assert_eq!(value, decoded); - } - } - - #[test] - fn test_unit() { - check_round_trip(vec![(), (), (), ()]); - } - - #[test] - fn test_u8() { - let mut vec = vec![]; - for i in ::std::u8::MIN .. ::std::u8::MAX { - vec.push(i); - } - check_round_trip(vec); - } - - #[test] - fn test_u16() { - for i in ::std::u16::MIN .. ::std::u16::MAX { - check_round_trip(vec![1, 2, 3, i, i, i]); - } - } - - #[test] - fn test_u32() { - check_round_trip(vec![1, 2, 3, ::std::u32::MIN, 0, 1, ::std::u32::MAX, 2, 1]); - } - - #[test] - fn test_u64() { - check_round_trip(vec![1, 2, 3, ::std::u64::MIN, 0, 1, ::std::u64::MAX, 2, 1]); - } - - #[test] - fn test_usize() { - check_round_trip(vec![1, 2, 3, ::std::usize::MIN, 0, 1, ::std::usize::MAX, 2, 1]); - } - - #[test] - fn test_i8() { - let mut vec = vec![]; - for i in ::std::i8::MIN .. ::std::i8::MAX { - vec.push(i); - } - check_round_trip(vec); - } - - #[test] - fn test_i16() { - for i in ::std::i16::MIN .. ::std::i16::MAX { - check_round_trip(vec![-1, 2, -3, i, i, i, 2]); - } - } - - #[test] - fn test_i32() { - check_round_trip(vec![-1, 2, -3, ::std::i32::MIN, 0, 1, ::std::i32::MAX, 2, 1]); - } - - #[test] - fn test_i64() { - check_round_trip(vec![-1, 2, -3, ::std::i64::MIN, 0, 1, ::std::i64::MAX, 2, 1]); - } - - #[test] - fn test_isize() { - check_round_trip(vec![-1, 2, -3, ::std::isize::MIN, 0, 1, ::std::isize::MAX, 2, 1]); - } - - #[test] - fn test_bool() { - check_round_trip(vec![false, true, true, false, false]); - } - - #[test] - fn test_f32() { - let mut vec = vec![]; - for i in -100 .. 100 { - vec.push( (i as f32) / 3.0 ); - } - check_round_trip(vec); - } - - #[test] - fn test_f64() { - let mut vec = vec![]; - for i in -100 .. 100 { - vec.push( (i as f64) / 3.0 ); - } - check_round_trip(vec); - } - - #[test] - fn test_char() { - let vec = vec!['a', 'b', 'c', 'd', 'A', 'X', ' ', '#', 'Ö', 'Ä', 'µ', '€']; - check_round_trip(vec); - } - - #[test] - fn test_string() { - let vec = vec![ - "abcbuÖeiovÄnameÜavmpßvmea€µsbpnvapeapmaebn".to_string(), - "abcbuÖganeiovÄnameÜavmpßvmea€µsbpnvapeapmaebn".to_string(), - "abcbuÖganeiovÄnameÜavmpßvmea€µsbpapmaebn".to_string(), - "abcbuÖganeiovÄnameÜavmpßvmeabpnvapeapmaebn".to_string(), - "abcbuÖganeiÄnameÜavmpßvmea€µsbpnvapeapmaebn".to_string(), - "abcbuÖganeiovÄnameÜavmpßvmea€µsbpmaebn".to_string(), - "abcbuÖganeiovÄnameÜavmpßvmea€µnvapeapmaebn".to_string()]; - - check_round_trip(vec); - } - - #[test] - fn test_option() { - check_round_trip(vec![Some(-1i8)]); - check_round_trip(vec![Some(-2i16)]); - check_round_trip(vec![Some(-3i32)]); - check_round_trip(vec![Some(-4i64)]); - check_round_trip(vec![Some(-5isize)]); - - let none_i8: Option = None; - check_round_trip(vec![none_i8]); - - let none_i16: Option = None; - check_round_trip(vec![none_i16]); - - let none_i32: Option = None; - check_round_trip(vec![none_i32]); - - let none_i64: Option = None; - check_round_trip(vec![none_i64]); - - let none_isize: Option = None; - check_round_trip(vec![none_isize]); - } - - #[test] - fn test_struct() { - check_round_trip(vec![Struct { - a: (), - b: 10, - c: 11, - d: 12, - e: 13, - f: 14, - - g: 15, - h: 16, - i: 17, - j: 18, - k: 19, - - l: 'x', - m: "abc".to_string(), - n: 20.5, - o: 21.5, - p: false, - q: None, - }]); - - check_round_trip(vec![Struct { - a: (), - b: 101, - c: 111, - d: 121, - e: 131, - f: 141, - - g: -15, - h: -16, - i: -17, - j: -18, - k: -19, - - l: 'y', - m: "def".to_string(), - n: -20.5, - o: -21.5, - p: true, - q: Some(1234567), - }]); - } - - #[derive(PartialEq, Clone, Debug, RustcEncodable, RustcDecodable)] - enum Enum { - Variant1, - Variant2(usize, f32), - Variant3 { a: i32, b: char, c: bool } - } - - #[test] - fn test_enum() { - check_round_trip(vec![Enum::Variant1, - Enum::Variant2(1, 2.5), - Enum::Variant3 { a: 3, b: 'b', c: false }, - Enum::Variant3 { a: -4, b: 'f', c: true }]); - } - - #[test] - fn test_sequence() { - let mut vec = vec![]; - for i in -100i64 .. 100i64 { - vec.push(i*100000); - } - - check_round_trip(vec![vec]); - } - - #[test] - fn test_hash_map() { - use std::collections::HashMap; - let mut map = HashMap::new(); - for i in -100i64 .. 100i64 { - map.insert(i*100000, i*10000); - } - - check_round_trip(vec![map]); - } - - #[test] - fn test_tuples() { - check_round_trip(vec![('x', (), false, 0.5f32)]); - check_round_trip(vec![(9i8, 10u16, 1.5f64)]); - check_round_trip(vec![(-12i16, 11u8, 12usize)]); - check_round_trip(vec![(1234567isize, 100000000000000u64, 99999999999999i64)]); - check_round_trip(vec![(String::new(), "some string".to_string())]); - } -} diff --git a/src/librustc/Cargo.toml b/src/librustc/Cargo.toml new file mode 100644 index 0000000000000..578ef68b00386 --- /dev/null +++ b/src/librustc/Cargo.toml @@ -0,0 +1,25 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc" +version = "0.0.0" + +[lib] +name = "rustc" +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +arena = { path = "../libarena" } +flate = { path = "../libflate" } +fmt_macros = { path = "../libfmt_macros" } +graphviz = { path = "../libgraphviz" } +log = { path = "../liblog" } +rustc_back = { path = "../librustc_back" } +rustc_bitflags = { path = "../librustc_bitflags" } +rustc_const_math = { path = "../librustc_const_math" } +rustc_data_structures = { path = "../librustc_data_structures" } +rustc_errors = { path = "../librustc_errors" } +rustc_llvm = { path = "../librustc_llvm" } +serialize = { path = "../libserialize" } +syntax = { path = "../libsyntax" } +syntax_pos = { path = "../libsyntax_pos" } diff --git a/src/librustc/README.md b/src/librustc/README.md index fd2e4e2fb7700..c24d3d82b2f72 100644 --- a/src/librustc/README.md +++ b/src/librustc/README.md @@ -21,7 +21,7 @@ Rustc consists of a number of crates, including `libsyntax`, (the names and divisions are not set in stone and may change; in general, a finer-grained division of crates is preferable): -- `libsyntax` contains those things concerned purely with syntax – +- [`libsyntax`][libsyntax] contains those things concerned purely with syntax – that is, the AST, parser, pretty-printer, lexer, macro expander, and utilities for traversing ASTs – are in a separate crate called "syntax", whose files are in `./../libsyntax`, where `.` is the @@ -32,32 +32,92 @@ in general, a finer-grained division of crates is preferable): passes, such as the type checker, borrow checker, and so forth. It is the heart of the compiler. -- `librustc_back` contains some very low-level details that are +- [`librustc_back`][back] contains some very low-level details that are specific to different LLVM targets and so forth. -- `librustc_trans` contains the code to convert from Rust IR into LLVM +- [`librustc_trans`][trans] contains the code to convert from Rust IR into LLVM IR, and then from LLVM IR into machine code, as well as the main driver that orchestrates all the other passes and various other bits of miscellany. In general it contains code that runs towards the end of the compilation process. -- `librustc_driver` invokes the compiler from `libsyntax`, then the - analysis phases from `librustc`, and finally the lowering and - codegen passes from `librustc_trans`. +- [`librustc_driver`][driver] invokes the compiler from + [`libsyntax`][libsyntax], then the analysis phases from `librustc`, and + finally the lowering and codegen passes from [`librustc_trans`][trans]. Roughly speaking the "order" of the three crates is as follows: - libsyntax -> librustc -> librustc_trans - | | - +-----------------+-------------------+ - | librustc_driver + | + +-----------------+-------------------+ + | | + libsyntax -> librustc -> librustc_trans -Modules in the rustc crate -========================== - -The rustc crate itself consists of the following submodules +The compiler process: +===================== + +The Rust compiler is comprised of six main compilation phases. + +1. Parsing input +2. Configuration & expanding (cfg rules & syntax extension expansion) +3. Running analysis passes +4. Translation to LLVM +5. LLVM passes +6. Linking + +Phase one is responsible for parsing & lexing the input to the compiler. The +output of this phase is an abstract syntax tree (AST). The AST at this point +includes all macro uses & attributes. This means code which will be later +expanded and/or removed due to `cfg` attributes is still present in this +version of the AST. Parsing abstracts away details about individual files which +have been read into the AST. + +Phase two handles configuration and macro expansion. You can think of this +phase as a function acting on the AST from the previous phase. The input for +this phase is the unexpanded AST from phase one, and the output is an expanded +version of the same AST. This phase will expand all macros & syntax +extensions and will evaluate all `cfg` attributes, potentially removing some +code. The resulting AST will not contain any macros or `macro_use` statements. + +The code for these first two phases is in [`libsyntax`][libsyntax]. + +After this phase, the compiler allocates IDs to each node in the AST +(technically not every node, but most of them). If we are writing out +dependencies, that happens now. + +The third phase is analysis. This is the most complex phase in the compiler, +and makes up much of the code. This phase included name resolution, type +checking, borrow checking, type & lifetime inference, trait selection, method +selection, linting and so on. Most of the error detection in the compiler comes +from this phase (with the exception of parse errors which arise during +parsing). The "output" of this phase is a set of side tables containing +semantic information about the source program. The analysis code is in +[`librustc`][rustc] and some other crates with the `librustc_` prefix. + +The fourth phase is translation. This phase translates the AST (and the side +tables from the previous phase) into LLVM IR (intermediate representation). +This is achieved by calling into the LLVM libraries. The code for this is in +[`librustc_trans`][trans]. + +Phase five runs the LLVM backend. This runs LLVM's optimization passes on the +generated IR and generates machine code resulting in object files. This phase +is not really part of the Rust compiler, as LLVM carries out all the work. +The interface between LLVM and Rust is in [`librustc_llvm`][llvm]. + +The final phase, phase six, links the object files into an executable. This is +again outsourced to other tools and not performed by the Rust compiler +directly. The interface is in [`librustc_back`][back] (which also contains some +things used primarily during translation). + +A module called the driver coordinates all these phases. It handles all the +highest level coordination of compilation from parsing command line arguments +all the way to invoking the linker to produce an executable. + +Modules in the librustc crate +============================= + +The librustc crate itself consists of the following submodules (mostly, but not entirely, in their own directories): - session: options and data that pertain to the compilation session as @@ -71,7 +131,7 @@ The rustc crate itself consists of the following submodules - util: ubiquitous types and helper functions - lib: bindings to LLVM -The entry-point for the compiler is main() in the librustc_driver +The entry-point for the compiler is main() in the [`librustc_driver`][driver] crate. The 3 central data structures: @@ -106,23 +166,9 @@ The 3 central data structures: Each of these is an opaque pointer to an LLVM type, manipulated through the `lib::llvm` interface. - -Control and information flow within the compiler: -------------------------------------------------- - -- main() in lib.rs assumes control on startup. Options are - parsed, platform is detected, etc. - -- `./../libsyntax/parse/parser.rs` parses the input files and produces - an AST that represents the input crate. - -- Multiple middle-end passes (`middle/resolve.rs`, `middle/typeck.rs`) - analyze the semantics of the resulting AST. Each pass generates new - information about the AST and stores it in various environment data - structures. The driver passes environments to each compiler pass - that needs to refer to them. - -- Finally, the `trans` module in `librustc_trans` translates the Rust - AST to LLVM bitcode in a type-directed way. When it's finished - synthesizing LLVM values, rustc asks LLVM to write them out in some - form (`.bc`, `.o`) and possibly run the system linker. +[libsyntax]: https://github.com/rust-lang/rust/tree/master/src/libsyntax/ +[trans]: https://github.com/rust-lang/rust/tree/master/src/librustc_trans/ +[llvm]: https://github.com/rust-lang/rust/tree/master/src/librustc_llvm/ +[back]: https://github.com/rust-lang/rust/tree/master/src/librustc_back/ +[rustc]: https://github.com/rust-lang/rust/tree/master/src/librustc/ +[driver]: https://github.com/rust-lang/rust/tree/master/src/librustc_driver diff --git a/src/librustc/middle/cfg/construct.rs b/src/librustc/cfg/construct.rs similarity index 77% rename from src/librustc/middle/cfg/construct.rs rename to src/librustc/cfg/construct.rs index abe8512521570..f21d98a0fc7f9 100644 --- a/src/librustc/middle/cfg/construct.rs +++ b/src/librustc/cfg/construct.rs @@ -9,17 +9,15 @@ // except according to those terms. use rustc_data_structures::graph; -use middle::cfg::*; -use middle::def; -use middle::pat_util; -use middle::ty; +use cfg::*; +use ty::{self, TyCtxt}; use syntax::ast; use syntax::ptr::P; -use rustc_front::hir; +use hir::{self, PatKind}; struct CFGBuilder<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, graph: CFGGraph, fn_exit: CFGIndex, loop_scopes: Vec, @@ -32,17 +30,17 @@ struct LoopScope { break_index: CFGIndex, // where to go on a `break } -pub fn construct(tcx: &ty::ctxt, - blk: &hir::Block) -> CFG { +pub fn construct<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + body: &hir::Expr) -> CFG { let mut graph = graph::Graph::new(); let entry = graph.add_node(CFGNodeData::Entry); // `fn_exit` is target of return exprs, which lies somewhere - // outside input `blk`. (Distinguishing `fn_exit` and `block_exit` + // outside input `body`. (Distinguishing `fn_exit` and `body_exit` // also resolves chicken-and-egg problem that arises if you try to - // have return exprs jump to `block_exit` during construction.) + // have return exprs jump to `body_exit` during construction.) let fn_exit = graph.add_node(CFGNodeData::Exit); - let block_exit; + let body_exit; let mut cfg_builder = CFGBuilder { graph: graph, @@ -50,8 +48,8 @@ pub fn construct(tcx: &ty::ctxt, tcx: tcx, loop_scopes: Vec::new() }; - block_exit = cfg_builder.block(blk, entry); - cfg_builder.add_contained_edge(block_exit, fn_exit); + body_exit = cfg_builder.expr(body, entry); + cfg_builder.add_contained_edge(body_exit, fn_exit); let CFGBuilder {graph, ..} = cfg_builder; CFG {graph: graph, entry: entry, @@ -73,12 +71,12 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { fn stmt(&mut self, stmt: &hir::Stmt, pred: CFGIndex) -> CFGIndex { match stmt.node { hir::StmtDecl(ref decl, id) => { - let exit = self.decl(&**decl, pred); + let exit = self.decl(&decl, pred); self.add_ast_node(id, &[exit]) } hir::StmtExpr(ref expr, id) | hir::StmtSemi(ref expr, id) => { - let exit = self.expr(&**expr, pred); + let exit = self.expr(&expr, pred); self.add_ast_node(id, &[exit]) } } @@ -88,7 +86,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { match decl.node { hir::DeclLocal(ref local) => { let init_exit = self.opt_expr(&local.init, pred); - self.pat(&*local.pat, init_exit) + self.pat(&local.pat, init_exit) } hir::DeclItem(_) => { @@ -99,35 +97,34 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { fn pat(&mut self, pat: &hir::Pat, pred: CFGIndex) -> CFGIndex { match pat.node { - hir::PatIdent(_, _, None) | - hir::PatEnum(_, None) | - hir::PatQPath(..) | - hir::PatLit(..) | - hir::PatRange(..) | - hir::PatWild => { + PatKind::Binding(.., None) | + PatKind::Path(_) | + PatKind::Lit(..) | + PatKind::Range(..) | + PatKind::Wild => { self.add_ast_node(pat.id, &[pred]) } - hir::PatBox(ref subpat) | - hir::PatRegion(ref subpat, _) | - hir::PatIdent(_, _, Some(ref subpat)) => { - let subpat_exit = self.pat(&**subpat, pred); + PatKind::Box(ref subpat) | + PatKind::Ref(ref subpat, _) | + PatKind::Binding(.., Some(ref subpat)) => { + let subpat_exit = self.pat(&subpat, pred); self.add_ast_node(pat.id, &[subpat_exit]) } - hir::PatEnum(_, Some(ref subpats)) | - hir::PatTup(ref subpats) => { + PatKind::TupleStruct(_, ref subpats, _) | + PatKind::Tuple(ref subpats, _) => { let pats_exit = self.pats_all(subpats.iter(), pred); self.add_ast_node(pat.id, &[pats_exit]) } - hir::PatStruct(_, ref subpats, _) => { + PatKind::Struct(_, ref subpats, _) => { let pats_exit = self.pats_all(subpats.iter().map(|f| &f.node.pat), pred); self.add_ast_node(pat.id, &[pats_exit]) } - hir::PatVec(ref pre, ref vec, ref post) => { + PatKind::Slice(ref pre, ref vec, ref post) => { let pre_exit = self.pats_all(pre.iter(), pred); let vec_exit = self.pats_all(vec.iter(), pre_exit); let post_exit = self.pats_all(post.iter(), vec_exit); @@ -140,13 +137,13 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { pats: I, pred: CFGIndex) -> CFGIndex { //! Handles case where all of the patterns must match. - pats.fold(pred, |pred, pat| self.pat(&**pat, pred)) + pats.fold(pred, |pred, pat| self.pat(&pat, pred)) } fn expr(&mut self, expr: &hir::Expr, pred: CFGIndex) -> CFGIndex { match expr.node { hir::ExprBlock(ref blk) => { - let blk_exit = self.block(&**blk, pred); + let blk_exit = self.block(&blk, pred); self.add_ast_node(expr.id, &[blk_exit]) } @@ -165,8 +162,8 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { // v 3 v 4 // [..expr..] // - let cond_exit = self.expr(&**cond, pred); // 1 - let then_exit = self.block(&**then, cond_exit); // 2 + let cond_exit = self.expr(&cond, pred); // 1 + let then_exit = self.block(&then, cond_exit); // 2 self.add_ast_node(expr.id, &[cond_exit, then_exit]) // 3,4 } @@ -185,9 +182,9 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { // v 4 v 5 // [..expr..] // - let cond_exit = self.expr(&**cond, pred); // 1 - let then_exit = self.block(&**then, cond_exit); // 2 - let else_exit = self.expr(&**otherwise, cond_exit); // 3 + let cond_exit = self.expr(&cond, pred); // 1 + let then_exit = self.block(&then, cond_exit); // 2 + let else_exit = self.expr(&otherwise, cond_exit); // 3 self.add_ast_node(expr.id, &[then_exit, else_exit]) // 4, 5 } @@ -211,20 +208,20 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { // Is the condition considered part of the loop? let loopback = self.add_dummy_node(&[pred]); // 1 - let cond_exit = self.expr(&**cond, loopback); // 2 + let cond_exit = self.expr(&cond, loopback); // 2 let expr_exit = self.add_ast_node(expr.id, &[cond_exit]); // 3 self.loop_scopes.push(LoopScope { loop_id: expr.id, continue_index: loopback, break_index: expr_exit }); - let body_exit = self.block(&**body, cond_exit); // 4 + let body_exit = self.block(&body, cond_exit); // 4 self.add_contained_edge(body_exit, loopback); // 5 self.loop_scopes.pop(); expr_exit } - hir::ExprLoop(ref body, _) => { + hir::ExprLoop(ref body, _, _) => { // // [pred] // | @@ -246,7 +243,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { continue_index: loopback, break_index: expr_exit, }); - let body_exit = self.block(&**body, loopback); // 3 + let body_exit = self.block(&body, loopback); // 3 self.add_contained_edge(body_exit, loopback); // 4 self.loop_scopes.pop(); expr_exit @@ -256,7 +253,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { self.match_(expr.id, &discr, &arms, pred) } - hir::ExprBinary(op, ref l, ref r) if ::rustc_front::util::lazy_binop(op.node) => { + hir::ExprBinary(op, ref l, ref r) if op.node.is_lazy() => { // // [pred] // | @@ -271,8 +268,8 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { // v 3 v 4 // [..exit..] // - let l_exit = self.expr(&**l, pred); // 1 - let r_exit = self.expr(&**r, l_exit); // 2 + let l_exit = self.expr(&l, pred); // 1 + let r_exit = self.expr(&r, l_exit); // 2 self.add_ast_node(expr.id, &[l_exit, r_exit]) // 3,4 } @@ -283,51 +280,46 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { self.add_unreachable_node() } - hir::ExprBreak(label) => { - let loop_scope = self.find_scope(expr, label.map(|l| l.node.name)); - let b = self.add_ast_node(expr.id, &[pred]); + hir::ExprBreak(label, ref opt_expr) => { + let v = self.opt_expr(opt_expr, pred); + let loop_scope = self.find_scope(expr, label); + let b = self.add_ast_node(expr.id, &[v]); self.add_exiting_edge(expr, b, loop_scope, loop_scope.break_index); self.add_unreachable_node() } hir::ExprAgain(label) => { - let loop_scope = self.find_scope(expr, label.map(|l| l.node.name)); + let loop_scope = self.find_scope(expr, label); let a = self.add_ast_node(expr.id, &[pred]); self.add_exiting_edge(expr, a, loop_scope, loop_scope.continue_index); self.add_unreachable_node() } - hir::ExprVec(ref elems) => { - self.straightline(expr, pred, elems.iter().map(|e| &**e)) + hir::ExprArray(ref elems) => { + self.straightline(expr, pred, elems.iter().map(|e| &*e)) } hir::ExprCall(ref func, ref args) => { - self.call(expr, pred, &**func, args.iter().map(|e| &**e)) + self.call(expr, pred, &func, args.iter().map(|e| &*e)) } - hir::ExprMethodCall(_, _, ref args) => { - self.call(expr, pred, &*args[0], args[1..].iter().map(|e| &**e)) + hir::ExprMethodCall(.., ref args) => { + self.call(expr, pred, &args[0], args[1..].iter().map(|e| &*e)) } hir::ExprIndex(ref l, ref r) | - hir::ExprBinary(_, ref l, ref r) if self.tcx.is_method_call(expr.id) => { - self.call(expr, pred, &**l, Some(&**r).into_iter()) + hir::ExprBinary(_, ref l, ref r) if self.tcx.tables().is_method_call(expr.id) => { + self.call(expr, pred, &l, Some(&**r).into_iter()) } - hir::ExprRange(ref start, ref end) => { - let fields = start.as_ref().map(|e| &**e).into_iter() - .chain(end.as_ref().map(|e| &**e)); - self.straightline(expr, pred, fields) - } - - hir::ExprUnary(_, ref e) if self.tcx.is_method_call(expr.id) => { - self.call(expr, pred, &**e, None::.iter()) + hir::ExprUnary(_, ref e) if self.tcx.tables().is_method_call(expr.id) => { + self.call(expr, pred, &e, None::.iter()) } hir::ExprTup(ref exprs) => { - self.straightline(expr, pred, exprs.iter().map(|e| &**e)) + self.straightline(expr, pred, exprs.iter().map(|e| &*e)) } hir::ExprStruct(_, ref fields, ref base) => { @@ -359,24 +351,15 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { self.straightline(expr, pred, Some(&**e).into_iter()) } - hir::ExprInlineAsm(ref inline_asm) => { - let inputs = inline_asm.inputs.iter(); - let outputs = inline_asm.outputs.iter(); - let post_inputs = self.exprs(inputs.map(|a| { - debug!("cfg::construct InlineAsm id:{} input:{:?}", expr.id, a); - let &(_, ref expr) = a; - &**expr - }), pred); - let post_outputs = self.exprs(outputs.map(|a| { - debug!("cfg::construct InlineAsm id:{} output:{:?}", expr.id, a); - &*a.expr - }), post_inputs); - self.add_ast_node(expr.id, &[post_outputs]) + hir::ExprInlineAsm(_, ref outputs, ref inputs) => { + let post_outputs = self.exprs(outputs.iter().map(|e| &*e), pred); + let post_inputs = self.exprs(inputs.iter().map(|e| &*e), post_outputs); + self.add_ast_node(expr.id, &[post_inputs]) } hir::ExprClosure(..) | hir::ExprLit(..) | - hir::ExprPath(..) => { + hir::ExprPath(_) => { self.straightline(expr, pred, None::.iter()) } } @@ -388,14 +371,15 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { func_or_rcvr: &hir::Expr, args: I) -> CFGIndex { let method_call = ty::MethodCall::expr(call_expr.id); - let fn_ty = match self.tcx.tables.borrow().method_map.get(&method_call) { + let fn_ty = match self.tcx.tables().method_map.get(&method_call) { Some(method) => method.ty, - None => self.tcx.expr_ty_adjusted(func_or_rcvr) + None => self.tcx.tables().expr_ty_adjusted(func_or_rcvr) }; let func_or_rcvr_exit = self.expr(func_or_rcvr, pred); let ret = self.straightline(call_expr, func_or_rcvr_exit, args); - if fn_ty.fn_ret().diverges() { + // FIXME(canndrew): This is_never should probably be an is_uninhabited. + if fn_ty.fn_ret().0.is_never() { self.add_unreachable_node() } else { ret @@ -413,7 +397,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { opt_expr: &Option>, pred: CFGIndex) -> CFGIndex { //! Constructs graph for `opt_expr` evaluated, if Some - opt_expr.iter().fold(pred, |p, e| self.expr(&**e, p)) + opt_expr.iter().fold(pred, |p, e| self.expr(&e, p)) } fn straightline<'b, I: Iterator>(&mut self, @@ -461,7 +445,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { for pat in &arm.pats { // Visit the pattern, coming from the discriminant exit - let mut pat_exit = self.pat(&**pat, discr_exit); + let mut pat_exit = self.pat(&pat, discr_exit); // If there is a guard expression, handle it here if let Some(ref guard) = arm.guard { @@ -469,10 +453,9 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { // expression to target let guard_start = self.add_dummy_node(&[pat_exit]); // Visit the guard expression - let guard_exit = self.expr(&**guard, guard_start); + let guard_exit = self.expr(&guard, guard_start); - let this_has_bindings = pat_util::pat_contains_bindings_or_wild( - &self.tcx.def_map.borrow(), &**pat); + let this_has_bindings = pat.contains_bindings_or_wild(); // If both this pattern and the previous pattern // were free of bindings, they must consist only @@ -552,7 +535,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { fn add_contained_edge(&mut self, source: CFGIndex, target: CFGIndex) { - let data = CFGEdgeData {exiting_scopes: vec!() }; + let data = CFGEdgeData {exiting_scopes: vec![] }; self.graph.add_edge(source, target, data); } @@ -561,7 +544,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { from_index: CFGIndex, to_loop: LoopScope, to_index: CFGIndex) { - let mut data = CFGEdgeData {exiting_scopes: vec!() }; + let mut data = CFGEdgeData {exiting_scopes: vec![] }; let mut scope = self.tcx.region_maps.node_extent(from_expr.id); let target_scope = self.tcx.region_maps.node_extent(to_loop.loop_id); while scope != target_scope { @@ -575,7 +558,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { _from_expr: &hir::Expr, from_index: CFGIndex) { let mut data = CFGEdgeData { - exiting_scopes: vec!(), + exiting_scopes: vec![], }; for &LoopScope { loop_id: id, .. } in self.loop_scopes.iter().rev() { data.exiting_scopes.push(id); @@ -585,25 +568,16 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { fn find_scope(&self, expr: &hir::Expr, - label: Option) -> LoopScope { - if label.is_none() { - return *self.loop_scopes.last().unwrap(); - } - - match self.tcx.def_map.borrow().get(&expr.id).map(|d| d.full_def()) { - Some(def::DefLabel(loop_id)) => { + label: Option) -> LoopScope { + match label { + None => *self.loop_scopes.last().unwrap(), + Some(label) => { for l in &self.loop_scopes { - if l.loop_id == loop_id { + if l.loop_id == label.loop_id { return *l; } } - self.tcx.sess.span_bug(expr.span, - &format!("no loop scope for id {}", loop_id)); - } - - r => { - self.tcx.sess.span_bug(expr.span, - &format!("bad entry `{:?}` in def_map for label", r)); + span_bug!(expr.span, "no loop scope for id {}", label.loop_id); } } } diff --git a/src/librustc/cfg/graphviz.rs b/src/librustc/cfg/graphviz.rs new file mode 100644 index 0000000000000..c651baae28224 --- /dev/null +++ b/src/librustc/cfg/graphviz.rs @@ -0,0 +1,131 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// This module provides linkage between rustc::middle::graph and +/// libgraphviz traits. + +// For clarity, rename the graphviz crate locally to dot. +use graphviz as dot; +use graphviz::IntoCow; + +use syntax::ast; + +use hir::map as ast_map; +use cfg; + +pub type Node<'a> = (cfg::CFGIndex, &'a cfg::CFGNode); +pub type Edge<'a> = &'a cfg::CFGEdge; + +pub struct LabelledCFG<'a, 'ast: 'a> { + pub ast_map: &'a ast_map::Map<'ast>, + pub cfg: &'a cfg::CFG, + pub name: String, + /// `labelled_edges` controls whether we emit labels on the edges + pub labelled_edges: bool, +} + +fn replace_newline_with_backslash_l(s: String) -> String { + // Replacing newlines with \\l causes each line to be left-aligned, + // improving presentation of (long) pretty-printed expressions. + if s.contains("\n") { + let mut s = s.replace("\n", "\\l"); + // Apparently left-alignment applies to the line that precedes + // \l, not the line that follows; so, add \l at end of string + // if not already present, ensuring last line gets left-aligned + // as well. + let mut last_two: Vec<_> = + s.chars().rev().take(2).collect(); + last_two.reverse(); + if last_two != ['\\', 'l'] { + s.push_str("\\l"); + } + s + } else { + s + } +} + +impl<'a, 'ast> dot::Labeller<'a> for LabelledCFG<'a, 'ast> { + type Node = Node<'a>; + type Edge = Edge<'a>; + fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new(&self.name[..]).unwrap() } + + fn node_id(&'a self, &(i,_): &Node<'a>) -> dot::Id<'a> { + dot::Id::new(format!("N{}", i.node_id())).unwrap() + } + + fn node_label(&'a self, &(i, n): &Node<'a>) -> dot::LabelText<'a> { + if i == self.cfg.entry { + dot::LabelText::LabelStr("entry".into_cow()) + } else if i == self.cfg.exit { + dot::LabelText::LabelStr("exit".into_cow()) + } else if n.data.id() == ast::DUMMY_NODE_ID { + dot::LabelText::LabelStr("(dummy_node)".into_cow()) + } else { + let s = self.ast_map.node_to_string(n.data.id()); + // left-aligns the lines + let s = replace_newline_with_backslash_l(s); + dot::LabelText::EscStr(s.into_cow()) + } + } + + fn edge_label(&self, e: &Edge<'a>) -> dot::LabelText<'a> { + let mut label = String::new(); + if !self.labelled_edges { + return dot::LabelText::EscStr(label.into_cow()); + } + let mut put_one = false; + for (i, &node_id) in e.data.exiting_scopes.iter().enumerate() { + if put_one { + label.push_str(",\\l"); + } else { + put_one = true; + } + let s = self.ast_map.node_to_string(node_id); + // left-aligns the lines + let s = replace_newline_with_backslash_l(s); + label.push_str(&format!("exiting scope_{} {}", + i, + &s[..])); + } + dot::LabelText::EscStr(label.into_cow()) + } +} + +impl<'a> dot::GraphWalk<'a> for &'a cfg::CFG { + type Node = Node<'a>; + type Edge = Edge<'a>; + fn nodes(&'a self) -> dot::Nodes<'a, Node<'a>> { + let mut v = Vec::new(); + self.graph.each_node(|i, nd| { v.push((i, nd)); true }); + v.into_cow() + } + fn edges(&'a self) -> dot::Edges<'a, Edge<'a>> { + self.graph.all_edges().iter().collect() + } + fn source(&'a self, edge: &Edge<'a>) -> Node<'a> { + let i = edge.source(); + (i, self.graph.node(i)) + } + fn target(&'a self, edge: &Edge<'a>) -> Node<'a> { + let i = edge.target(); + (i, self.graph.node(i)) + } +} + +impl<'a, 'ast> dot::GraphWalk<'a> for LabelledCFG<'a, 'ast> +{ + type Node = Node<'a>; + type Edge = Edge<'a>; + fn nodes(&'a self) -> dot::Nodes<'a, Node<'a>> { self.cfg.nodes() } + fn edges(&'a self) -> dot::Edges<'a, Edge<'a>> { self.cfg.edges() } + fn source(&'a self, edge: &Edge<'a>) -> Node<'a> { self.cfg.source(edge) } + fn target(&'a self, edge: &Edge<'a>) -> Node<'a> { self.cfg.target(edge) } +} diff --git a/src/librustc/cfg/mod.rs b/src/librustc/cfg/mod.rs new file mode 100644 index 0000000000000..43434b884c8d4 --- /dev/null +++ b/src/librustc/cfg/mod.rs @@ -0,0 +1,70 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Module that constructs a control-flow graph representing an item. +//! Uses `Graph` as the underlying representation. + +use rustc_data_structures::graph; +use ty::TyCtxt; +use syntax::ast; +use hir; + +mod construct; +pub mod graphviz; + +pub struct CFG { + pub graph: CFGGraph, + pub entry: CFGIndex, + pub exit: CFGIndex, +} + +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum CFGNodeData { + AST(ast::NodeId), + Entry, + Exit, + Dummy, + Unreachable, +} + +impl CFGNodeData { + pub fn id(&self) -> ast::NodeId { + if let CFGNodeData::AST(id) = *self { + id + } else { + ast::DUMMY_NODE_ID + } + } +} + +#[derive(Debug)] +pub struct CFGEdgeData { + pub exiting_scopes: Vec +} + +pub type CFGIndex = graph::NodeIndex; + +pub type CFGGraph = graph::Graph; + +pub type CFGNode = graph::Node; + +pub type CFGEdge = graph::Edge; + +impl CFG { + pub fn new<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + body: &hir::Expr) -> CFG { + construct::construct(tcx, body) + } + + pub fn node_is_reachable(&self, id: ast::NodeId) -> bool { + self.graph.depth_traverse(self.entry, graph::OUTGOING) + .any(|idx| self.graph.node_data(idx).id() == id) + } +} diff --git a/src/librustc/dep_graph/README.md b/src/librustc/dep_graph/README.md index 21742d9935dc2..48f5b7ea2595d 100644 --- a/src/librustc/dep_graph/README.md +++ b/src/librustc/dep_graph/README.md @@ -51,7 +51,7 @@ could invalidate work done for other items. So, for example: not shared state, because if it changes it does not itself invalidate other functions (though it may be that it causes new monomorphizations to occur, but that's handled independently). - + Put another way: if the HIR for an item changes, we are going to recompile that item for sure. But we need the dep tracking map to tell us what *else* we have to recompile. Shared state is anything that is @@ -134,6 +134,10 @@ to read from it. Similarly, reading from the `tcache` map for item `X` (which is a `DepTrackingMap`, described below) automatically invokes `dep_graph.read(ItemSignature(X))`. +**Note:** adding `Hir` nodes requires a bit of caution due to the +"inlining" that old trans and constant evaluation still use. See the +section on inlining below. + To make this strategy work, a certain amount of indirection is required. For example, modules in the HIR do not have direct pointers to the items that they contain. Rather, they contain node-ids -- one @@ -177,7 +181,7 @@ reads from `item`, there would be missing edges in the graph: | ^ | | +---------------------------------+ // added by `visit_all_items_in_krate` - + In particular, the edge from `Hir(X)` to `ItemSignature(X)` is only present because we called `read` ourselves when entering the `ItemSignature(X)` task. @@ -273,8 +277,8 @@ should not exist. In contrast, using the memoized helper, you get: ... -> MapVariant(key) -> A | +----------> B - -which is much cleaner. + +which is much cleaner. **Be aware though that the closure is executed with `MapVariant(key)` pushed onto the stack as the current task!** That means that you must @@ -337,6 +341,8 @@ path is found (as demonstrated above). ### Debugging the dependency graph +#### Dumping the graph + The compiler is also capable of dumping the dependency graph for your debugging pleasure. To do so, pass the `-Z dump-dep-graph` flag. The graph will be dumped to `dep_graph.{txt,dot}` in the current @@ -388,3 +394,52 @@ This will dump out all the nodes that lead from `Hir(foo)` to `TypeckItemBody(bar)`, from which you can (hopefully) see the source of the erroneous edge. +#### Tracking down incorrect edges + +Sometimes, after you dump the dependency graph, you will find some +path that should not exist, but you will not be quite sure how it came +to be. **When the compiler is built with debug assertions,** it can +help you track that down. Simply set the `RUST_FORBID_DEP_GRAPH_EDGE` +environment variable to a filter. Every edge created in the dep-graph +will be tested against that filter -- if it matches, a `bug!` is +reported, so you can easily see the backtrace (`RUST_BACKTRACE=1`). + +The syntax for these filters is the same as described in the previous +section. However, note that this filter is applied to every **edge** +and doesn't handle longer paths in the graph, unlike the previous +section. + +Example: + +You find that there is a path from the `Hir` of `foo` to the type +check of `bar` and you don't think there should be. You dump the +dep-graph as described in the previous section and open `dep-graph.txt` +to see something like: + + Hir(foo) -> Collect(bar) + Collect(bar) -> TypeckItemBody(bar) + +That first edge looks suspicious to you. So you set +`RUST_FORBID_DEP_GRAPH_EDGE` to `Hir&foo -> Collect&bar`, re-run, and +then observe the backtrace. Voila, bug fixed! + +### Inlining of HIR nodes + +For the time being, at least, we still sometimes "inline" HIR nodes +from other crates into the current HIR map. This creates a weird +scenario where the same logical item (let's call it `X`) has two +def-ids: the original def-id `X` and a new, inlined one `X'`. `X'` is +in the current crate, but it's not like other HIR nodes: in +particular, when we restart compilation, it will not be available to +hash. Therefore, we do not want `Hir(X')` nodes appearing in our +graph. Instead, we want a "read" of `Hir(X')` to be represented as a +read of `MetaData(X)`, since the metadata for `X` is where the inlined +representation originated in the first place. + +To achieve this, the HIR map will detect if the def-id originates in +an inlined node and add a dependency to a suitable `MetaData` node +instead. If you are reading a HIR node and are not sure if it may be +inlined or not, you can use `tcx.map.read(node_id)` and it will detect +whether the node is inlined or not and do the right thing. You can +also use `tcx.map.is_inlined_def_id()` and +`tcx.map.is_inlined_node_id()` to test. diff --git a/src/librustc/dep_graph/debug.rs b/src/librustc/dep_graph/debug.rs new file mode 100644 index 0000000000000..5b15c5e67174e --- /dev/null +++ b/src/librustc/dep_graph/debug.rs @@ -0,0 +1,76 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Code for debugging the dep-graph. + +use super::dep_node::DepNode; +use std::error::Error; +use std::fmt::Debug; + +/// A dep-node filter goes from a user-defined string to a query over +/// nodes. Right now the format is like this: +/// +/// x & y & z +/// +/// where the format-string of the dep-node must contain `x`, `y`, and +/// `z`. +#[derive(Debug)] +pub struct DepNodeFilter { + text: String +} + +impl DepNodeFilter { + pub fn new(text: &str) -> Self { + DepNodeFilter { + text: text.trim().to_string() + } + } + + /// True if all nodes always pass the filter. + pub fn accepts_all(&self) -> bool { + self.text.is_empty() + } + + /// Tests whether `node` meets the filter, returning true if so. + pub fn test(&self, node: &DepNode) -> bool { + let debug_str = format!("{:?}", node); + self.text.split("&") + .map(|s| s.trim()) + .all(|f| debug_str.contains(f)) + } +} + +/// A filter like `F -> G` where `F` and `G` are valid dep-node +/// filters. This can be used to test the source/target independently. +pub struct EdgeFilter { + pub source: DepNodeFilter, + pub target: DepNodeFilter, +} + +impl EdgeFilter { + pub fn new(test: &str) -> Result> { + let parts: Vec<_> = test.split("->").collect(); + if parts.len() != 2 { + Err(format!("expected a filter like `a&b -> c&d`, not `{}`", test).into()) + } else { + Ok(EdgeFilter { + source: DepNodeFilter::new(parts[0]), + target: DepNodeFilter::new(parts[1]), + }) + } + } + + pub fn test(&self, + source: &DepNode, + target: &DepNode) + -> bool { + self.source.test(source) && self.target.test(target) + } +} diff --git a/src/librustc/dep_graph/dep_node.rs b/src/librustc/dep_graph/dep_node.rs new file mode 100644 index 0000000000000..e261c699b6ac6 --- /dev/null +++ b/src/librustc/dep_graph/dep_node.rs @@ -0,0 +1,251 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::fmt::Debug; +use std::sync::Arc; + +macro_rules! try_opt { + ($e:expr) => ( + match $e { + Some(r) => r, + None => return None, + } + ) +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] +pub enum DepNode { + // The `D` type is "how definitions are identified". + // During compilation, it is always `DefId`, but when serializing + // it is mapped to `DefPath`. + + // Represents the `Krate` as a whole (the `hir::Krate` value) (as + // distinct from the krate module). This is basically a hash of + // the entire krate, so if you read from `Krate` (e.g., by calling + // `tcx.map.krate()`), we will have to assume that any change + // means that you need to be recompiled. This is because the + // `Krate` value gives you access to all other items. To avoid + // this fate, do not call `tcx.map.krate()`; instead, prefer + // wrappers like `tcx.visit_all_items_in_krate()`. If there is no + // suitable wrapper, you can use `tcx.dep_graph.ignore()` to gain + // access to the krate, but you must remember to add suitable + // edges yourself for the individual items that you read. + Krate, + + // Represents the HIR node with the given node-id + Hir(D), + + // Represents the body of a function or method. The def-id is that of the + // function/method. + HirBody(D), + + // Represents the metadata for a given HIR node, typically found + // in an extern crate. + MetaData(D), + + // Represents some artifact that we save to disk. Note that these + // do not have a def-id as part of their identifier. + WorkProduct(Arc), + + // Represents different phases in the compiler. + CollectLanguageItems, + CheckStaticRecursion, + ResolveLifetimes, + RegionResolveCrate, + CheckLoops, + PluginRegistrar, + StabilityIndex, + CollectItem(D), + CollectItemSig(D), + Coherence, + EffectCheck, + Liveness, + Resolve, + EntryPoint, + CheckEntryFn, + CoherenceCheckImpl(D), + CoherenceOverlapCheck(D), + CoherenceOverlapCheckSpecial(D), + CoherenceOverlapInherentCheck(D), + CoherenceOrphanCheck(D), + Variance, + WfCheck(D), + TypeckItemType(D), + TypeckItemBody(D), + Dropck, + DropckImpl(D), + UnusedTraitCheck, + CheckConst(D), + Privacy, + IntrinsicCheck(D), + MatchCheck(D), + + // Represents the MIR for a fn; also used as the task node for + // things read/modify that MIR. + Mir(D), + + BorrowCheck(D), + RvalueCheck(D), + Reachability, + DeadCheck, + StabilityCheck(D), + LateLintCheck, + TransCrate, + TransCrateItem(D), + TransInlinedItem(D), + TransWriteMetadata, + LinkBinary, + + // Nodes representing bits of computed IR in the tcx. Each shared + // table in the tcx (or elsewhere) maps to one of these + // nodes. Often we map multiple tables to the same node if there + // is no point in distinguishing them (e.g., both the type and + // predicates for an item wind up in `ItemSignature`). + AssociatedItems(D), + ItemSignature(D), + SizedConstraint(D), + AssociatedItemDefIds(D), + InherentImpls(D), + + // The set of impls for a given trait. Ultimately, it would be + // nice to get more fine-grained here (e.g., to include a + // simplified type), but we can't do that until we restructure the + // HIR to distinguish the *header* of an impl from its body. This + // is because changes to the header may change the self-type of + // the impl and hence would require us to be more conservative + // than changes in the impl body. + TraitImpls(D), + + // Nodes representing caches. To properly handle a true cache, we + // don't use a DepTrackingMap, but rather we push a task node. + // Otherwise the write into the map would be incorrectly + // attributed to the first task that happened to fill the cache, + // which would yield an overly conservative dep-graph. + TraitItems(D), + ReprHints(D), + TraitSelect(Vec), +} + +impl DepNode { + /// Used in testing + pub fn from_label_string(label: &str, data: D) -> Result, ()> { + macro_rules! check { + ($($name:ident,)*) => { + match label { + $(stringify!($name) => Ok(DepNode::$name(data)),)* + _ => Err(()) + } + } + } + + if label == "Krate" { + // special case + return Ok(DepNode::Krate); + } + + check! { + CollectItem, + BorrowCheck, + Hir, + HirBody, + TransCrateItem, + TypeckItemType, + TypeckItemBody, + AssociatedItems, + ItemSignature, + AssociatedItemDefIds, + InherentImpls, + TraitImpls, + ReprHints, + } + } + + pub fn map_def(&self, mut op: OP) -> Option> + where OP: FnMut(&D) -> Option, E: Clone + Debug + { + use self::DepNode::*; + + match *self { + Krate => Some(Krate), + CollectLanguageItems => Some(CollectLanguageItems), + CheckStaticRecursion => Some(CheckStaticRecursion), + ResolveLifetimes => Some(ResolveLifetimes), + RegionResolveCrate => Some(RegionResolveCrate), + CheckLoops => Some(CheckLoops), + PluginRegistrar => Some(PluginRegistrar), + StabilityIndex => Some(StabilityIndex), + Coherence => Some(Coherence), + EffectCheck => Some(EffectCheck), + Liveness => Some(Liveness), + Resolve => Some(Resolve), + EntryPoint => Some(EntryPoint), + CheckEntryFn => Some(CheckEntryFn), + Variance => Some(Variance), + Dropck => Some(Dropck), + UnusedTraitCheck => Some(UnusedTraitCheck), + Privacy => Some(Privacy), + Reachability => Some(Reachability), + DeadCheck => Some(DeadCheck), + LateLintCheck => Some(LateLintCheck), + TransCrate => Some(TransCrate), + TransWriteMetadata => Some(TransWriteMetadata), + LinkBinary => Some(LinkBinary), + + // work product names do not need to be mapped, because + // they are always absolute. + WorkProduct(ref id) => Some(WorkProduct(id.clone())), + + Hir(ref d) => op(d).map(Hir), + HirBody(ref d) => op(d).map(HirBody), + MetaData(ref d) => op(d).map(MetaData), + CollectItem(ref d) => op(d).map(CollectItem), + CollectItemSig(ref d) => op(d).map(CollectItemSig), + CoherenceCheckImpl(ref d) => op(d).map(CoherenceCheckImpl), + CoherenceOverlapCheck(ref d) => op(d).map(CoherenceOverlapCheck), + CoherenceOverlapCheckSpecial(ref d) => op(d).map(CoherenceOverlapCheckSpecial), + CoherenceOverlapInherentCheck(ref d) => op(d).map(CoherenceOverlapInherentCheck), + CoherenceOrphanCheck(ref d) => op(d).map(CoherenceOrphanCheck), + WfCheck(ref d) => op(d).map(WfCheck), + TypeckItemType(ref d) => op(d).map(TypeckItemType), + TypeckItemBody(ref d) => op(d).map(TypeckItemBody), + DropckImpl(ref d) => op(d).map(DropckImpl), + CheckConst(ref d) => op(d).map(CheckConst), + IntrinsicCheck(ref d) => op(d).map(IntrinsicCheck), + MatchCheck(ref d) => op(d).map(MatchCheck), + Mir(ref d) => op(d).map(Mir), + BorrowCheck(ref d) => op(d).map(BorrowCheck), + RvalueCheck(ref d) => op(d).map(RvalueCheck), + StabilityCheck(ref d) => op(d).map(StabilityCheck), + TransCrateItem(ref d) => op(d).map(TransCrateItem), + TransInlinedItem(ref d) => op(d).map(TransInlinedItem), + AssociatedItems(ref d) => op(d).map(AssociatedItems), + ItemSignature(ref d) => op(d).map(ItemSignature), + SizedConstraint(ref d) => op(d).map(SizedConstraint), + AssociatedItemDefIds(ref d) => op(d).map(AssociatedItemDefIds), + InherentImpls(ref d) => op(d).map(InherentImpls), + TraitImpls(ref d) => op(d).map(TraitImpls), + TraitItems(ref d) => op(d).map(TraitItems), + ReprHints(ref d) => op(d).map(ReprHints), + TraitSelect(ref type_ds) => { + let type_ds = try_opt!(type_ds.iter().map(|d| op(d)).collect()); + Some(TraitSelect(type_ds)) + } + } + } +} + +/// A "work product" corresponds to a `.o` (or other) file that we +/// save in between runs. These ids do not have a DefId but rather +/// some independent path or string that persists between runs without +/// the need to be mapped or unmapped. (This ensures we can serialize +/// them even in the absence of a tcx.) +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] +pub struct WorkProductId(pub String); + diff --git a/src/librustc/dep_graph/dep_tracking_map.rs b/src/librustc/dep_graph/dep_tracking_map.rs index c49e64f0f543b..50dfe9d22f12f 100644 --- a/src/librustc/dep_graph/dep_tracking_map.rs +++ b/src/librustc/dep_graph/dep_tracking_map.rs @@ -8,7 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc_data_structures::fnv::FnvHashMap; +use hir::def_id::DefId; +use rustc_data_structures::fx::FxHashMap; use std::cell::RefCell; use std::ops::Index; use std::hash::Hash; @@ -23,13 +24,13 @@ use super::{DepNode, DepGraph}; pub struct DepTrackingMap { phantom: PhantomData, graph: DepGraph, - map: FnvHashMap, + map: FxHashMap, } pub trait DepTrackingMapConfig { type Key: Eq + Hash + Clone; type Value: Clone; - fn to_dep_node(key: &Self::Key) -> DepNode; + fn to_dep_node(key: &Self::Key) -> DepNode; } impl DepTrackingMap { @@ -37,7 +38,7 @@ impl DepTrackingMap { DepTrackingMap { phantom: PhantomData, graph: graph, - map: FnvHashMap() + map: FxHashMap() } } @@ -60,6 +61,12 @@ impl DepTrackingMap { self.map.get(k) } + pub fn get_mut(&mut self, k: &M::Key) -> Option<&mut M::Value> { + self.read(k); + self.write(k); + self.map.get_mut(k) + } + pub fn insert(&mut self, k: M::Key, v: M::Value) -> Option { self.write(&k); self.map.insert(k, v) @@ -69,6 +76,21 @@ impl DepTrackingMap { self.read(k); self.map.contains_key(k) } + + pub fn keys(&self) -> Vec { + self.map.keys().cloned().collect() + } + + /// Append `elem` to the vector stored for `k`, creating a new vector if needed. + /// This is considered a write to `k`. + pub fn push(&mut self, k: M::Key, elem: E) + where M: DepTrackingMapConfig> + { + self.write(&k); + self.map.entry(k) + .or_insert(Vec::new()) + .push(elem); + } } impl MemoizationMap for RefCell> { @@ -90,15 +112,15 @@ impl MemoizationMap for RefCell> { /// switched to `Map(key)`. Therefore, if `op` makes use of any /// HIR nodes or shared state accessed through its closure /// environment, it must explicitly register a read of that - /// state. As an example, see `type_scheme_of_item` in `collect`, + /// state. As an example, see `type_of_item` in `collect`, /// which looks something like this: /// /// ``` - /// fn type_scheme_of_item(..., item: &hir::Item) -> ty::TypeScheme<'tcx> { + /// fn type_of_item(..., item: &hir::Item) -> Ty<'tcx> { /// let item_def_id = ccx.tcx.map.local_def_id(it.id); - /// ccx.tcx.tcache.memoized(item_def_id, || { + /// ccx.tcx.item_types.memoized(item_def_id, || { /// ccx.tcx.dep_graph.read(DepNode::Hir(item_def_id)); // (*) - /// compute_type_scheme_of_item(ccx, item) + /// compute_type_of_item(ccx, item) /// }); /// } /// ``` diff --git a/src/librustc/dep_graph/edges.rs b/src/librustc/dep_graph/edges.rs index 4b25285c476c4..8657a3e5a5878 100644 --- a/src/librustc/dep_graph/edges.rs +++ b/src/librustc/dep_graph/edges.rs @@ -8,13 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc_data_structures::fnv::{FnvHashMap, FnvHashSet}; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use std::fmt::Debug; +use std::hash::Hash; use super::{DepGraphQuery, DepNode}; -pub struct DepGraphEdges { - nodes: Vec, - indices: FnvHashMap, - edges: FnvHashSet<(IdIndex, IdIndex)>, +pub struct DepGraphEdges { + nodes: Vec>, + indices: FxHashMap, IdIndex>, + edges: FxHashSet<(IdIndex, IdIndex)>, open_nodes: Vec, } @@ -40,22 +42,22 @@ enum OpenNode { Ignore, } -impl DepGraphEdges { - pub fn new() -> DepGraphEdges { +impl DepGraphEdges { + pub fn new() -> DepGraphEdges { DepGraphEdges { nodes: vec![], - indices: FnvHashMap(), - edges: FnvHashSet(), + indices: FxHashMap(), + edges: FxHashSet(), open_nodes: Vec::new() } } - fn id(&self, index: IdIndex) -> DepNode { - self.nodes[index.index()] + fn id(&self, index: IdIndex) -> DepNode { + self.nodes[index.index()].clone() } /// Creates a node for `id` in the graph. - fn make_node(&mut self, id: DepNode) -> IdIndex { + fn make_node(&mut self, id: DepNode) -> IdIndex { if let Some(&i) = self.indices.get(&id) { return i; } @@ -80,7 +82,7 @@ impl DepGraphEdges { assert_eq!(popped_node, OpenNode::Ignore); } - pub fn push_task(&mut self, key: DepNode) { + pub fn push_task(&mut self, key: DepNode) { let top_node = self.current_node(); let new_node = self.make_node(key); @@ -93,7 +95,7 @@ impl DepGraphEdges { } } - pub fn pop_task(&mut self, key: DepNode) { + pub fn pop_task(&mut self, key: DepNode) { let popped_node = self.open_nodes.pop().unwrap(); assert_eq!(OpenNode::Node(self.indices[&key]), popped_node); } @@ -101,7 +103,7 @@ impl DepGraphEdges { /// Indicates that the current task `C` reads `v` by adding an /// edge from `v` to `C`. If there is no current task, panics. If /// you want to suppress this edge, use `ignore`. - pub fn read(&mut self, v: DepNode) { + pub fn read(&mut self, v: DepNode) { let source = self.make_node(v); self.add_edge_from_current_node(|current| (source, current)) } @@ -109,7 +111,7 @@ impl DepGraphEdges { /// Indicates that the current task `C` writes `v` by adding an /// edge from `C` to `v`. If there is no current task, panics. If /// you want to suppress this edge, use `ignore`. - pub fn write(&mut self, v: DepNode) { + pub fn write(&mut self, v: DepNode) { let target = self.make_node(v); self.add_edge_from_current_node(|current| (current, target)) } @@ -122,7 +124,7 @@ impl DepGraphEdges { { match self.current_node() { Some(open_node) => self.add_edge_from_open_node(open_node, op), - None => panic!("no current node, cannot add edge into dependency graph") + None => bug!("no current node, cannot add edge into dependency graph") } } @@ -153,7 +155,7 @@ impl DepGraphEdges { } } - pub fn query(&self) -> DepGraphQuery { + pub fn query(&self) -> DepGraphQuery { let edges: Vec<_> = self.edges.iter() .map(|&(i, j)| (self.id(i), self.id(j))) .collect(); diff --git a/src/librustc/dep_graph/graph.rs b/src/librustc/dep_graph/graph.rs new file mode 100644 index 0000000000000..2637d34c5c56e --- /dev/null +++ b/src/librustc/dep_graph/graph.rs @@ -0,0 +1,165 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use hir::def_id::DefId; +use rustc_data_structures::fx::FxHashMap; +use session::config::OutputType; +use std::cell::{Ref, RefCell}; +use std::rc::Rc; +use std::sync::Arc; + +use super::dep_node::{DepNode, WorkProductId}; +use super::query::DepGraphQuery; +use super::raii; +use super::thread::{DepGraphThreadData, DepMessage}; + +#[derive(Clone)] +pub struct DepGraph { + data: Rc +} + +struct DepGraphData { + /// We send messages to the thread to let it build up the dep-graph + /// from the current run. + thread: DepGraphThreadData, + + /// When we load, there may be `.o` files, cached mir, or other such + /// things available to us. If we find that they are not dirty, we + /// load the path to the file storing those work-products here into + /// this map. We can later look for and extract that data. + previous_work_products: RefCell, WorkProduct>>, + + /// Work-products that we generate in this run. + work_products: RefCell, WorkProduct>>, +} + +impl DepGraph { + pub fn new(enabled: bool) -> DepGraph { + DepGraph { + data: Rc::new(DepGraphData { + thread: DepGraphThreadData::new(enabled), + previous_work_products: RefCell::new(FxHashMap()), + work_products: RefCell::new(FxHashMap()), + }) + } + } + + pub fn query(&self) -> DepGraphQuery { + self.data.thread.query() + } + + pub fn in_ignore<'graph>(&'graph self) -> Option> { + raii::IgnoreTask::new(&self.data.thread) + } + + pub fn in_task<'graph>(&'graph self, key: DepNode) -> Option> { + raii::DepTask::new(&self.data.thread, key) + } + + pub fn with_ignore(&self, op: OP) -> R + where OP: FnOnce() -> R + { + let _task = self.in_ignore(); + op() + } + + pub fn with_task(&self, key: DepNode, op: OP) -> R + where OP: FnOnce() -> R + { + let _task = self.in_task(key); + op() + } + + pub fn read(&self, v: DepNode) { + if self.data.thread.is_enqueue_enabled() { + self.data.thread.enqueue(DepMessage::Read(v)); + } + } + + pub fn write(&self, v: DepNode) { + if self.data.thread.is_enqueue_enabled() { + self.data.thread.enqueue(DepMessage::Write(v)); + } + } + + /// Indicates that a previous work product exists for `v`. This is + /// invoked during initial start-up based on what nodes are clean + /// (and what files exist in the incr. directory). + pub fn insert_previous_work_product(&self, v: &Arc, data: WorkProduct) { + debug!("insert_previous_work_product({:?}, {:?})", v, data); + self.data.previous_work_products.borrow_mut() + .insert(v.clone(), data); + } + + /// Indicates that we created the given work-product in this run + /// for `v`. This record will be preserved and loaded in the next + /// run. + pub fn insert_work_product(&self, v: &Arc, data: WorkProduct) { + debug!("insert_work_product({:?}, {:?})", v, data); + self.data.work_products.borrow_mut() + .insert(v.clone(), data); + } + + /// Check whether a previous work product exists for `v` and, if + /// so, return the path that leads to it. Used to skip doing work. + pub fn previous_work_product(&self, v: &Arc) -> Option { + self.data.previous_work_products.borrow() + .get(v) + .cloned() + } + + /// Access the map of work-products created during this run. Only + /// used during saving of the dep-graph. + pub fn work_products(&self) -> Ref, WorkProduct>> { + self.data.work_products.borrow() + } +} + +/// A "work product" is an intermediate result that we save into the +/// incremental directory for later re-use. The primary example are +/// the object files that we save for each partition at code +/// generation time. +/// +/// Each work product is associated with a dep-node, representing the +/// process that produced the work-product. If that dep-node is found +/// to be dirty when we load up, then we will delete the work-product +/// at load time. If the work-product is found to be clean, then we +/// will keep a record in the `previous_work_products` list. +/// +/// In addition, work products have an associated hash. This hash is +/// an extra hash that can be used to decide if the work-product from +/// a previous compilation can be re-used (in addition to the dirty +/// edges check). +/// +/// As the primary example, consider the object files we generate for +/// each partition. In the first run, we create partitions based on +/// the symbols that need to be compiled. For each partition P, we +/// hash the symbols in P and create a `WorkProduct` record associated +/// with `DepNode::TransPartition(P)`; the hash is the set of symbols +/// in P. +/// +/// The next time we compile, if the `DepNode::TransPartition(P)` is +/// judged to be clean (which means none of the things we read to +/// generate the partition were found to be dirty), it will be loaded +/// into previous work products. We will then regenerate the set of +/// symbols in the partition P and hash them (note that new symbols +/// may be added -- for example, new monomorphizations -- even if +/// nothing in P changed!). We will compare that hash against the +/// previous hash. If it matches up, we can reuse the object file. +#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +pub struct WorkProduct { + /// Extra hash used to decide if work-product is still suitable; + /// note that this is *not* a hash of the work-product itself. + /// See documentation on `WorkProduct` type for an example. + pub input_hash: u64, + + /// Saved files associated with this CGU + pub saved_files: Vec<(OutputType, String)>, +} diff --git a/src/librustc/dep_graph/mod.rs b/src/librustc/dep_graph/mod.rs index 9bf0a79115e78..e365cea6d0e5e 100644 --- a/src/librustc/dep_graph/mod.rs +++ b/src/librustc/dep_graph/mod.rs @@ -8,189 +8,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use self::thread::{DepGraphThreadData, DepMessage}; -use middle::def_id::DefId; -use middle::ty; -use middle::ty::fast_reject::SimplifiedType; -use rustc_front::hir; -use rustc_front::intravisit::Visitor; -use std::rc::Rc; - +pub mod debug; +mod dep_node; mod dep_tracking_map; mod edges; +mod graph; mod query; mod raii; +mod shadow; mod thread; - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub enum DepNode { - // Represents the `Krate` as a whole (the `hir::Krate` value) (as - // distinct from the krate module). This is basically a hash of - // the entire krate, so if you read from `Krate` (e.g., by calling - // `tcx.map.krate()`), we will have to assume that any change - // means that you need to be recompiled. This is because the - // `Krate` value gives you access to all other items. To avoid - // this fate, do not call `tcx.map.krate()`; instead, prefer - // wrappers like `tcx.visit_all_items_in_krate()`. If there is no - // suitable wrapper, you can use `tcx.dep_graph.ignore()` to gain - // access to the krate, but you must remember to add suitable - // edges yourself for the individual items that you read. - Krate, - - // Represents the HIR node with the given node-id - Hir(DefId), - - // Represents different phases in the compiler. - CollectItem(DefId), - Coherence, - CoherenceCheckImpl(DefId), - CoherenceOverlapCheck(DefId), - CoherenceOverlapCheckSpecial(DefId), - CoherenceOrphanCheck(DefId), - Variance, - WfCheck(DefId), - TypeckItemType(DefId), - TypeckItemBody(DefId), - Dropck, - DropckImpl(DefId), - CheckConst(DefId), - Privacy, - IntrinsicCheck(DefId), - MatchCheck(DefId), - MirMapConstruction(DefId), - BorrowCheck(DefId), - RvalueCheck(DefId), - Reachability, - DeadCheck, - StabilityCheck, - LateLintCheck, - IntrinsicUseCheck, - TransCrate, - TransCrateItem(DefId), - TransInlinedItem(DefId), - TransWriteMetadata, - - // Nodes representing bits of computed IR in the tcx. Each shared - // table in the tcx (or elsewhere) maps to one of these - // nodes. Often we map multiple tables to the same node if there - // is no point in distinguishing them (e.g., both the type and - // predicates for an item wind up in `ItemSignature`). Other - // times, such as `ImplItems` vs `TraitItemDefIds`, tables which - // might be mergable are kept distinct because the sets of def-ids - // to which they apply are disjoint, and hence we might as well - // have distinct labels for easier debugging. - ImplOrTraitItems(DefId), - ItemSignature(DefId), - FieldTy(DefId), - TraitItemDefIds(DefId), - InherentImpls(DefId), - ImplItems(DefId), - - // The set of impls for a given trait. Ultimately, it would be - // nice to get more fine-grained here (e.g., to include a - // simplified type), but we can't do that until we restructure the - // HIR to distinguish the *header* of an impl from its body. This - // is because changes to the header may change the self-type of - // the impl and hence would require us to be more conservative - // than changes in the impl body. - TraitImpls(DefId), - - // Nodes representing caches. To properly handle a true cache, we - // don't use a DepTrackingMap, but rather we push a task node. - // Otherwise the write into the map would be incorrectly - // attributed to the first task that happened to fill the cache, - // which would yield an overly conservative dep-graph. - TraitItems(DefId), - ReprHints(DefId), - TraitSelect(DefId, Option), -} - -#[derive(Clone)] -pub struct DepGraph { - data: Rc -} - -impl DepGraph { - pub fn new(enabled: bool) -> DepGraph { - DepGraph { - data: Rc::new(DepGraphThreadData::new(enabled)) - } - } - - pub fn query(&self) -> DepGraphQuery { - self.data.query() - } - - pub fn in_ignore<'graph>(&'graph self) -> raii::IgnoreTask<'graph> { - raii::IgnoreTask::new(&self.data) - } - - pub fn in_task<'graph>(&'graph self, key: DepNode) -> raii::DepTask<'graph> { - raii::DepTask::new(&self.data, key) - } - - pub fn with_ignore(&self, op: OP) -> R - where OP: FnOnce() -> R - { - let _task = self.in_ignore(); - op() - } - - pub fn with_task(&self, key: DepNode, op: OP) -> R - where OP: FnOnce() -> R - { - let _task = self.in_task(key); - op() - } - - pub fn read(&self, v: DepNode) { - self.data.enqueue(DepMessage::Read(v)); - } - - pub fn write(&self, v: DepNode) { - self.data.enqueue(DepMessage::Write(v)); - } -} +mod visit; pub use self::dep_tracking_map::{DepTrackingMap, DepTrackingMapConfig}; - +pub use self::dep_node::DepNode; +pub use self::dep_node::WorkProductId; +pub use self::graph::DepGraph; +pub use self::graph::WorkProduct; pub use self::query::DepGraphQuery; - -/// Visit all the items in the krate in some order. When visiting a -/// particular item, first create a dep-node by calling `dep_node_fn` -/// and push that onto the dep-graph stack of tasks, and also create a -/// read edge from the corresponding AST node. This is used in -/// compiler passes to automatically record the item that they are -/// working on. -pub fn visit_all_items_in_krate<'tcx,V,F>(tcx: &ty::ctxt<'tcx>, - mut dep_node_fn: F, - visitor: &mut V) - where F: FnMut(DefId) -> DepNode, V: Visitor<'tcx> -{ - struct TrackingVisitor<'visit, 'tcx: 'visit, F: 'visit, V: 'visit> { - tcx: &'visit ty::ctxt<'tcx>, - dep_node_fn: &'visit mut F, - visitor: &'visit mut V - } - - impl<'visit, 'tcx, F, V> Visitor<'tcx> for TrackingVisitor<'visit, 'tcx, F, V> - where F: FnMut(DefId) -> DepNode, V: Visitor<'tcx> - { - fn visit_item(&mut self, i: &'tcx hir::Item) { - let item_def_id = self.tcx.map.local_def_id(i.id); - let task_id = (self.dep_node_fn)(item_def_id); - debug!("About to start task {:?}", task_id); - let _task = self.tcx.dep_graph.in_task(task_id); - self.tcx.dep_graph.read(DepNode::Hir(item_def_id)); - self.visitor.visit_item(i) - } - } - - let krate = tcx.dep_graph.with_ignore(|| tcx.map.krate()); - let mut tracking_visitor = TrackingVisitor { - tcx: tcx, - dep_node_fn: &mut dep_node_fn, - visitor: visitor - }; - krate.visit_all_items(&mut tracking_visitor) -} +pub use self::visit::visit_all_item_likes_in_krate; +pub use self::raii::DepTask; diff --git a/src/librustc/dep_graph/query.rs b/src/librustc/dep_graph/query.rs index 74a054acb4fa0..4c791f9655342 100644 --- a/src/librustc/dep_graph/query.rs +++ b/src/librustc/dep_graph/query.rs @@ -8,20 +8,24 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc_data_structures::fnv::FnvHashMap; -use rustc_data_structures::graph::{Graph, NodeIndex}; +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::graph::{Direction, INCOMING, Graph, NodeIndex, OUTGOING}; +use std::fmt::Debug; +use std::hash::Hash; use super::DepNode; -pub struct DepGraphQuery { - pub graph: Graph, - pub indices: FnvHashMap, +pub struct DepGraphQuery { + pub graph: Graph, ()>, + pub indices: FxHashMap, NodeIndex>, } -impl DepGraphQuery { - pub fn new(nodes: &[DepNode], edges: &[(DepNode, DepNode)]) -> DepGraphQuery { +impl DepGraphQuery { + pub fn new(nodes: &[DepNode], + edges: &[(DepNode, DepNode)]) + -> DepGraphQuery { let mut graph = Graph::new(); - let mut indices = FnvHashMap(); + let mut indices = FxHashMap(); for node in nodes { indices.insert(node.clone(), graph.next_node_index()); graph.add_node(node.clone()); @@ -39,27 +43,52 @@ impl DepGraphQuery { } } - pub fn nodes(&self) -> Vec { + pub fn contains_node(&self, node: &DepNode) -> bool { + self.indices.contains_key(&node) + } + + pub fn nodes(&self) -> Vec<&DepNode> { self.graph.all_nodes() .iter() - .map(|n| n.data.clone()) + .map(|n| &n.data) .collect() } - pub fn edges(&self) -> Vec<(DepNode,DepNode)> { + pub fn edges(&self) -> Vec<(&DepNode,&DepNode)> { self.graph.all_edges() .iter() .map(|edge| (edge.source(), edge.target())) - .map(|(s, t)| (self.graph.node_data(s).clone(), self.graph.node_data(t).clone())) + .map(|(s, t)| (self.graph.node_data(s), + self.graph.node_data(t))) .collect() } + fn reachable_nodes(&self, node: &DepNode, direction: Direction) -> Vec<&DepNode> { + if let Some(&index) = self.indices.get(node) { + self.graph.depth_traverse(index, direction) + .map(|s| self.graph.node_data(s)) + .collect() + } else { + vec![] + } + } + /// All nodes reachable from `node`. In other words, things that /// will have to be recomputed if `node` changes. - pub fn dependents(&self, node: DepNode) -> Vec { + pub fn transitive_successors(&self, node: &DepNode) -> Vec<&DepNode> { + self.reachable_nodes(node, OUTGOING) + } + + /// All nodes that can reach `node`. + pub fn transitive_predecessors(&self, node: &DepNode) -> Vec<&DepNode> { + self.reachable_nodes(node, INCOMING) + } + + /// Just the outgoing edges from `node`. + pub fn immediate_successors(&self, node: &DepNode) -> Vec<&DepNode> { if let Some(&index) = self.indices.get(&node) { - self.graph.depth_traverse(index) - .map(|dependent_node| self.graph.node_data(dependent_node).clone()) + self.graph.successor_nodes(index) + .map(|s| self.graph.node_data(s)) .collect() } else { vec![] diff --git a/src/librustc/dep_graph/raii.rs b/src/librustc/dep_graph/raii.rs index dd7ff92f9c360..e39797599acfd 100644 --- a/src/librustc/dep_graph/raii.rs +++ b/src/librustc/dep_graph/raii.rs @@ -8,24 +8,32 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use hir::def_id::DefId; use super::DepNode; use super::thread::{DepGraphThreadData, DepMessage}; pub struct DepTask<'graph> { data: &'graph DepGraphThreadData, - key: DepNode, + key: Option>, } impl<'graph> DepTask<'graph> { - pub fn new(data: &'graph DepGraphThreadData, key: DepNode) -> DepTask<'graph> { - data.enqueue(DepMessage::PushTask(key)); - DepTask { data: data, key: key } + pub fn new(data: &'graph DepGraphThreadData, key: DepNode) + -> Option> { + if data.is_enqueue_enabled() { + data.enqueue(DepMessage::PushTask(key.clone())); + Some(DepTask { data: data, key: Some(key) }) + } else { + None + } } } impl<'graph> Drop for DepTask<'graph> { fn drop(&mut self) { - self.data.enqueue(DepMessage::PopTask(self.key)); + if self.data.is_enqueue_enabled() { + self.data.enqueue(DepMessage::PopTask(self.key.take().unwrap())); + } } } @@ -34,14 +42,21 @@ pub struct IgnoreTask<'graph> { } impl<'graph> IgnoreTask<'graph> { - pub fn new(data: &'graph DepGraphThreadData) -> IgnoreTask<'graph> { - data.enqueue(DepMessage::PushIgnore); - IgnoreTask { data: data } + pub fn new(data: &'graph DepGraphThreadData) -> Option> { + if data.is_enqueue_enabled() { + data.enqueue(DepMessage::PushIgnore); + Some(IgnoreTask { data: data }) + } else { + None + } } } impl<'graph> Drop for IgnoreTask<'graph> { fn drop(&mut self) { - self.data.enqueue(DepMessage::PopIgnore); + if self.data.is_enqueue_enabled() { + self.data.enqueue(DepMessage::PopIgnore); + } } } + diff --git a/src/librustc/dep_graph/shadow.rs b/src/librustc/dep_graph/shadow.rs new file mode 100644 index 0000000000000..06def4bf19af3 --- /dev/null +++ b/src/librustc/dep_graph/shadow.rs @@ -0,0 +1,150 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The "Shadow Graph" is maintained on the main thread and which +//! tracks each message relating to the dep-graph and applies some +//! sanity checks as they go by. If an error results, it means you get +//! a nice stack-trace telling you precisely what caused the error. +//! +//! NOTE: This is a debugging facility which can potentially have non-trivial +//! runtime impact. Therefore, it is largely compiled out if +//! debug-assertions are not enabled. +//! +//! The basic sanity check, enabled if you have debug assertions +//! enabled, is that there is always a task (or ignore) on the stack +//! when you do read/write, and that the tasks are pushed/popped +//! according to a proper stack discipline. +//! +//! Optionally, if you specify RUST_FORBID_DEP_GRAPH_EDGE, you can +//! specify an edge filter to be applied to each edge as it is +//! created. See `./README.md` for details. + +use hir::def_id::DefId; +use std::cell::{BorrowState, RefCell}; +use std::env; + +use super::DepNode; +use super::thread::DepMessage; +use super::debug::EdgeFilter; + +pub struct ShadowGraph { + // if you push None onto the stack, that corresponds to an Ignore + stack: RefCell>>>, + forbidden_edge: Option, +} + +const ENABLED: bool = cfg!(debug_assertions); + +impl ShadowGraph { + pub fn new() -> Self { + let forbidden_edge = if !ENABLED { + None + } else { + match env::var("RUST_FORBID_DEP_GRAPH_EDGE") { + Ok(s) => { + match EdgeFilter::new(&s) { + Ok(f) => Some(f), + Err(err) => bug!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err), + } + } + Err(_) => None, + } + }; + + ShadowGraph { + stack: RefCell::new(vec![]), + forbidden_edge: forbidden_edge, + } + } + + #[inline] + pub fn enabled(&self) -> bool { + ENABLED + } + + pub fn enqueue(&self, message: &DepMessage) { + if ENABLED { + match self.stack.borrow_state() { + BorrowState::Unused => {} + _ => { + // When we apply edge filters, that invokes the + // Debug trait on DefIds, which in turn reads from + // various bits of state and creates reads! Ignore + // those recursive reads. + return; + } + } + + let mut stack = self.stack.borrow_mut(); + match *message { + DepMessage::Read(ref n) => self.check_edge(Some(Some(n)), top(&stack)), + DepMessage::Write(ref n) => self.check_edge(top(&stack), Some(Some(n))), + DepMessage::PushTask(ref n) => stack.push(Some(n.clone())), + DepMessage::PushIgnore => stack.push(None), + DepMessage::PopTask(ref n) => { + match stack.pop() { + Some(Some(m)) => { + if *n != m { + bug!("stack mismatch: found {:?} expected {:?}", m, n) + } + } + Some(None) => bug!("stack mismatch: found Ignore expected {:?}", n), + None => bug!("stack mismatch: found empty stack, expected {:?}", n), + } + } + DepMessage::PopIgnore => { + match stack.pop() { + Some(Some(m)) => bug!("stack mismatch: found {:?} expected ignore", m), + Some(None) => (), + None => bug!("stack mismatch: found empty stack, expected ignore"), + } + } + DepMessage::Query => (), + } + } + } + + fn check_edge(&self, + source: Option>>, + target: Option>>) { + assert!(ENABLED); + match (source, target) { + // cannot happen, one side is always Some(Some(_)) + (None, None) => unreachable!(), + + // nothing on top of the stack + (None, Some(n)) | (Some(n), None) => bug!("read/write of {:?} but no current task", n), + + // this corresponds to an Ignore being top of the stack + (Some(None), _) | (_, Some(None)) => (), + + // a task is on top of the stack + (Some(Some(source)), Some(Some(target))) => { + if let Some(ref forbidden_edge) = self.forbidden_edge { + if forbidden_edge.test(source, target) { + bug!("forbidden edge {:?} -> {:?} created", source, target) + } + } + } + } + } +} + +// Do a little juggling: we get back a reference to an option at the +// top of the stack, convert it to an optional reference. +fn top<'s>(stack: &'s Vec>>) -> Option>> { + stack.last() + .map(|n: &'s Option>| -> Option<&'s DepNode> { + // (*) + // (*) type annotation just there to clarify what would + // otherwise be some *really* obscure code + n.as_ref() + }) +} diff --git a/src/librustc/dep_graph/thread.rs b/src/librustc/dep_graph/thread.rs index dbc57605d71ae..9f755cf86e4e4 100644 --- a/src/librustc/dep_graph/thread.rs +++ b/src/librustc/dep_graph/thread.rs @@ -18,6 +18,7 @@ //! to accumulate more messages. This way we only ever have two vectors //! allocated (and both have a fairly large capacity). +use hir::def_id::DefId; use rustc_data_structures::veccell::VecCell; use std::sync::mpsc::{self, Sender, Receiver}; use std::thread; @@ -25,12 +26,14 @@ use std::thread; use super::DepGraphQuery; use super::DepNode; use super::edges::DepGraphEdges; +use super::shadow::ShadowGraph; +#[derive(Debug)] pub enum DepMessage { - Read(DepNode), - Write(DepNode), - PushTask(DepNode), - PopTask(DepNode), + Read(DepNode), + Write(DepNode), + PushTask(DepNode), + PopTask(DepNode), PushIgnore, PopIgnore, Query, @@ -39,6 +42,17 @@ pub enum DepMessage { pub struct DepGraphThreadData { enabled: bool, + // The "shadow graph" is a debugging aid. We give it each message + // in real time as it arrives and it checks for various errors + // (for example, a read/write when there is no current task; it + // can also apply user-defined filters; see `shadow` module for + // details). This only occurs if debug-assertions are enabled. + // + // Note that in some cases the same errors will occur when the + // data is processed off the main thread, but that's annoying + // because it lacks precision about the source of the error. + shadow_graph: ShadowGraph, + // current buffer, where we accumulate messages messages: VecCell, @@ -49,7 +63,7 @@ pub struct DepGraphThreadData { swap_out: Sender>, // where to receive query results - query_in: Receiver, + query_in: Receiver>, } const INITIAL_CAPACITY: usize = 2048; @@ -59,11 +73,14 @@ impl DepGraphThreadData { let (tx1, rx1) = mpsc::channel(); let (tx2, rx2) = mpsc::channel(); let (txq, rxq) = mpsc::channel(); + if enabled { thread::spawn(move || main(rx1, tx2, txq)); } + DepGraphThreadData { enabled: enabled, + shadow_graph: ShadowGraph::new(), messages: VecCell::with_capacity(INITIAL_CAPACITY), swap_in: rx2, swap_out: tx1, @@ -71,10 +88,24 @@ impl DepGraphThreadData { } } + /// True if we are actually building the full dep-graph. + #[inline] + pub fn is_fully_enabled(&self) -> bool { + self.enabled + } + + /// True if (a) we are actually building the full dep-graph, or (b) we are + /// only enqueuing messages in order to sanity-check them (which happens + /// when debug assertions are enabled). + #[inline] + pub fn is_enqueue_enabled(&self) -> bool { + self.is_fully_enabled() || self.shadow_graph.enabled() + } + /// Sends the current batch of messages to the thread. Installs a /// new vector of messages. fn swap(&self) { - assert!(self.enabled, "should never swap if not enabled"); + assert!(self.is_fully_enabled(), "should never swap if not fully enabled"); // should be a buffer waiting for us (though of course we may // have to wait for depgraph thread to finish processing the @@ -89,8 +120,8 @@ impl DepGraphThreadData { self.swap_out.send(old_messages).unwrap(); } - pub fn query(&self) -> DepGraphQuery { - assert!(self.enabled, "cannot query if dep graph construction not enabled"); + pub fn query(&self) -> DepGraphQuery { + assert!(self.is_fully_enabled(), "should never query if not fully enabled"); self.enqueue(DepMessage::Query); self.swap(); self.query_in.recv().unwrap() @@ -100,11 +131,19 @@ impl DepGraphThreadData { /// the buffer is full, this may swap.) #[inline] pub fn enqueue(&self, message: DepMessage) { - if self.enabled { - let len = self.messages.push(message); - if len == INITIAL_CAPACITY { - self.swap(); - } + assert!(self.is_enqueue_enabled(), "should never enqueue if not enqueue-enabled"); + self.shadow_graph.enqueue(&message); + if self.is_fully_enabled() { + self.enqueue_enabled(message); + } + } + + // Outline this fn since I expect it may want to be inlined + // separately. + fn enqueue_enabled(&self, message: DepMessage) { + let len = self.messages.push(message); + if len == INITIAL_CAPACITY { + self.swap(); } } } @@ -112,7 +151,7 @@ impl DepGraphThreadData { /// Definition of the depgraph thread. pub fn main(swap_in: Receiver>, swap_out: Sender>, - query_out: Sender) { + query_out: Sender>) { let mut edges = DepGraphEdges::new(); // the compiler thread always expects a fresh buffer to be @@ -132,6 +171,9 @@ pub fn main(swap_in: Receiver>, DepMessage::Query => query_out.send(edges.query()).unwrap(), } } - swap_out.send(messages).unwrap(); + if let Err(_) = swap_out.send(messages) { + // the receiver must have been dropped already + break; + } } } diff --git a/src/librustc/dep_graph/visit.rs b/src/librustc/dep_graph/visit.rs new file mode 100644 index 0000000000000..600732fc6f70b --- /dev/null +++ b/src/librustc/dep_graph/visit.rs @@ -0,0 +1,68 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use hir; +use hir::def_id::DefId; +use hir::itemlikevisit::ItemLikeVisitor; +use ty::TyCtxt; + +use super::dep_node::DepNode; + +/// Visit all the items in the krate in some order. When visiting a +/// particular item, first create a dep-node by calling `dep_node_fn` +/// and push that onto the dep-graph stack of tasks, and also create a +/// read edge from the corresponding AST node. This is used in +/// compiler passes to automatically record the item that they are +/// working on. +pub fn visit_all_item_likes_in_krate<'a, 'tcx, V, F>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + mut dep_node_fn: F, + visitor: &mut V) + where F: FnMut(DefId) -> DepNode, V: ItemLikeVisitor<'tcx> +{ + struct TrackingVisitor<'visit, 'tcx: 'visit, F: 'visit, V: 'visit> { + tcx: TyCtxt<'visit, 'tcx, 'tcx>, + dep_node_fn: &'visit mut F, + visitor: &'visit mut V + } + + impl<'visit, 'tcx, F, V> ItemLikeVisitor<'tcx> for TrackingVisitor<'visit, 'tcx, F, V> + where F: FnMut(DefId) -> DepNode, V: ItemLikeVisitor<'tcx> + { + fn visit_item(&mut self, i: &'tcx hir::Item) { + let item_def_id = self.tcx.map.local_def_id(i.id); + let task_id = (self.dep_node_fn)(item_def_id); + let _task = self.tcx.dep_graph.in_task(task_id.clone()); + debug!("Started task {:?}", task_id); + assert!(!self.tcx.map.is_inlined_def_id(item_def_id)); + self.tcx.dep_graph.read(DepNode::Hir(item_def_id)); + self.visitor.visit_item(i); + debug!("Ended task {:?}", task_id); + } + + fn visit_impl_item(&mut self, i: &'tcx hir::ImplItem) { + let impl_item_def_id = self.tcx.map.local_def_id(i.id); + let task_id = (self.dep_node_fn)(impl_item_def_id); + let _task = self.tcx.dep_graph.in_task(task_id.clone()); + debug!("Started task {:?}", task_id); + assert!(!self.tcx.map.is_inlined_def_id(impl_item_def_id)); + self.tcx.dep_graph.read(DepNode::Hir(impl_item_def_id)); + self.visitor.visit_impl_item(i); + debug!("Ended task {:?}", task_id); + } + } + + let krate = tcx.dep_graph.with_ignore(|| tcx.map.krate()); + let mut tracking_visitor = TrackingVisitor { + tcx: tcx, + dep_node_fn: &mut dep_node_fn, + visitor: visitor + }; + krate.visit_all_item_likes(&mut tracking_visitor) +} diff --git a/src/librustc/diagnostics.rs b/src/librustc/diagnostics.rs index aa2f60f71f979..ec09877ae121c 100644 --- a/src/librustc/diagnostics.rs +++ b/src/librustc/diagnostics.rs @@ -14,418 +14,20 @@ // Each message should start and end with a new line, and be wrapped to 80 characters. // In vim you can `:set tw=80` and use `gq` to wrap paragraphs. Use `:set tw=0` to disable. register_long_diagnostics! { - -E0001: r##" -This error suggests that the expression arm corresponding to the noted pattern -will never be reached as for all possible values of the expression being -matched, one of the preceding patterns will match. - -This means that perhaps some of the preceding patterns are too general, this one -is too specific or the ordering is incorrect. - -For example, the following `match` block has too many arms: - -``` -match foo { - Some(bar) => {/* ... */} - None => {/* ... */} - _ => {/* ... */} // All possible cases have already been handled -} -``` - -`match` blocks have their patterns matched in order, so, for example, putting -a wildcard arm above a more specific arm will make the latter arm irrelevant. - -Ensure the ordering of the match arm is correct and remove any superfluous -arms. -"##, - -E0002: r##" -This error indicates that an empty match expression is invalid because the type -it is matching on is non-empty (there exist values of this type). In safe code -it is impossible to create an instance of an empty type, so empty match -expressions are almost never desired. This error is typically fixed by adding -one or more cases to the match expression. - -An example of an empty type is `enum Empty { }`. So, the following will work: - -``` -fn foo(x: Empty) { - match x { - // empty - } -} -``` - -However, this won't: - -``` -fn foo(x: Option) { - match x { - // empty - } -} -``` -"##, - -E0003: r##" -Not-a-Number (NaN) values cannot be compared for equality and hence can never -match the input to a match expression. So, the following will not compile: - -``` -const NAN: f32 = 0.0 / 0.0; - -match number { - NAN => { /* ... */ }, - // ... -} -``` - -To match against NaN values, you should instead use the `is_nan()` method in a -guard, like so: - -``` -match number { - // ... - x if x.is_nan() => { /* ... */ } - // ... -} -``` -"##, - -E0004: r##" -This error indicates that the compiler cannot guarantee a matching pattern for -one or more possible inputs to a match expression. Guaranteed matches are -required in order to assign values to match expressions, or alternatively, -determine the flow of execution. - -If you encounter this error you must alter your patterns so that every possible -value of the input type is matched. For types with a small number of variants -(like enums) you should probably cover all cases explicitly. Alternatively, the -underscore `_` wildcard pattern can be added after all other patterns to match -"anything else". -"##, - -E0005: r##" -Patterns used to bind names must be irrefutable, that is, they must guarantee -that a name will be extracted in all cases. If you encounter this error you -probably need to use a `match` or `if let` to deal with the possibility of -failure. -"##, - -E0007: r##" -This error indicates that the bindings in a match arm would require a value to -be moved into more than one location, thus violating unique ownership. Code like -the following is invalid as it requires the entire `Option` to be moved -into a variable called `op_string` while simultaneously requiring the inner -String to be moved into a variable called `s`. - -``` -let x = Some("s".to_string()); -match x { - op_string @ Some(s) => ... - None => ... -} -``` - -See also Error 303. -"##, - -E0008: r##" -Names bound in match arms retain their type in pattern guards. As such, if a -name is bound by move in a pattern, it should also be moved to wherever it is -referenced in the pattern guard code. Doing so however would prevent the name -from being available in the body of the match arm. Consider the following: - -``` -match Some("hi".to_string()) { - Some(s) if s.len() == 0 => // use s. - ... -} -``` - -The variable `s` has type `String`, and its use in the guard is as a variable of -type `String`. The guard code effectively executes in a separate scope to the -body of the arm, so the value would be moved into this anonymous scope and -therefore become unavailable in the body of the arm. Although this example seems -innocuous, the problem is most clear when considering functions that take their -argument by value. - -``` -match Some("hi".to_string()) { - Some(s) if { drop(s); false } => (), - Some(s) => // use s. - ... -} -``` - -The value would be dropped in the guard then become unavailable not only in the -body of that arm but also in all subsequent arms! The solution is to bind by -reference when using guards or refactor the entire expression, perhaps by -putting the condition inside the body of the arm. -"##, - -E0009: r##" -In a pattern, all values that don't implement the `Copy` trait have to be bound -the same way. The goal here is to avoid binding simultaneously by-move and -by-ref. - -This limitation may be removed in a future version of Rust. - -Wrong example: - -``` -struct X { x: (), } - -let x = Some((X { x: () }, X { x: () })); -match x { - Some((y, ref z)) => {}, - None => panic!() -} -``` - -You have two solutions: - -Solution #1: Bind the pattern's values the same way. - -``` -struct X { x: (), } - -let x = Some((X { x: () }, X { x: () })); -match x { - Some((ref y, ref z)) => {}, - // or Some((y, z)) => {} - None => panic!() -} -``` - -Solution #2: Implement the `Copy` trait for the `X` structure. - -However, please keep in mind that the first solution should be preferred. - -``` -#[derive(Clone, Copy)] -struct X { x: (), } - -let x = Some((X { x: () }, X { x: () })); -match x { - Some((y, ref z)) => {}, - None => panic!() -} -``` -"##, - -E0010: r##" -The value of statics and constants must be known at compile time, and they live -for the entire lifetime of a program. Creating a boxed value allocates memory on -the heap at runtime, and therefore cannot be done at compile time. Erroneous -code example: - -``` -#![feature(box_syntax)] - -const CON : Box = box 0; -``` -"##, - -E0011: r##" -Initializers for constants and statics are evaluated at compile time. -User-defined operators rely on user-defined functions, which cannot be evaluated -at compile time. - -Bad example: - -``` -use std::ops::Index; - -struct Foo { a: u8 } - -impl Index for Foo { - type Output = u8; - - fn index<'a>(&'a self, idx: u8) -> &'a u8 { &self.a } -} - -const a: Foo = Foo { a: 0u8 }; -const b: u8 = a[0]; // Index trait is defined by the user, bad! -``` - -Only operators on builtin types are allowed. - -Example: - -``` -const a: &'static [i32] = &[1, 2, 3]; -const b: i32 = a[0]; // Good! -``` -"##, - -E0013: r##" -Static and const variables can refer to other const variables. But a const -variable cannot refer to a static variable. For example, `Y` cannot refer to `X` -here: - -``` -static X: i32 = 42; -const Y: i32 = X; -``` - -To fix this, the value can be extracted as a const and then used: - -``` -const A: i32 = 42; -static X: i32 = A; -const Y: i32 = A; -``` -"##, - -E0014: r##" -Constants can only be initialized by a constant value or, in a future -version of Rust, a call to a const function. This error indicates the use -of a path (like a::b, or x) denoting something other than one of these -allowed items. Example: - -``` -const FOO: i32 = { let x = 0; x }; // 'x' isn't a constant nor a function! -``` - -To avoid it, you have to replace the non-constant value: - -``` -const FOO: i32 = { const X : i32 = 0; X }; -// or even: -const FOO: i32 = { 0 }; // but brackets are useless here -``` -"##, - -// FIXME(#24111) Change the language here when const fn stabilizes -E0015: r##" -The only functions that can be called in static or constant expressions are -`const` functions, and struct/enum constructors. `const` functions are only -available on a nightly compiler. Rust currently does not support more general -compile-time function execution. - -``` -const FOO: Option = Some(1); // enum constructor -struct Bar {x: u8} -const BAR: Bar = Bar {x: 1}; // struct constructor -``` - -See [RFC 911] for more details on the design of `const fn`s. - -[RFC 911]: https://github.com/rust-lang/rfcs/blob/master/text/0911-const-fn.md -"##, - -E0017: r##" -References in statics and constants may only refer to immutable values. Example: - -``` -static X: i32 = 1; -const C: i32 = 2; - -// these three are not allowed: -const CR: &'static mut i32 = &mut C; -static STATIC_REF: &'static mut i32 = &mut X; -static CONST_REF: &'static mut i32 = &mut C; -``` - -Statics are shared everywhere, and if they refer to mutable data one might -violate memory safety since holding multiple mutable references to shared data -is not allowed. - -If you really want global mutable state, try using `static mut` or a global -`UnsafeCell`. -"##, - -E0018: r##" -The value of static and const variables must be known at compile time. You -can't cast a pointer as an integer because we can't know what value the -address will take. - -However, pointers to other constants' addresses are allowed in constants, -example: - -``` -const X: u32 = 50; -const Y: *const u32 = &X; -``` - -Therefore, casting one of these non-constant pointers to an integer results -in a non-constant integer which lead to this error. Example: - -``` -const X: u32 = 1; -const Y: usize = &X as *const u32 as usize; -println!("{}", Y); -``` -"##, - -E0019: r##" -A function call isn't allowed in the const's initialization expression -because the expression's value must be known at compile-time. Example of -erroneous code: - -``` -enum Test { - V1 -} - -impl Test { - fn test(&self) -> i32 { - 12 - } -} - -fn main() { - const FOO: Test = Test::V1; - - const A: i32 = FOO.test(); // You can't call Test::func() here ! -} -``` - -Remember: you can't use a function call inside a const's initialization -expression! However, you can totally use it anywhere else: - -``` -fn main() { - const FOO: Test = Test::V1; - - FOO.func(); // here is good - let x = FOO.func(); // or even here! -} -``` -"##, - E0020: r##" This error indicates that an attempt was made to divide by zero (or take the remainder of a zero divisor) in a static or constant expression. Erroneous code example: -``` -const X: i32 = 42 / 0; -// error: attempted to divide by zero in a constant expression -``` -"##, - -E0030: r##" -When matching against a range, the compiler verifies that the range is -non-empty. Range patterns include both end-points, so this is equivalent to -requiring the start of the range to be less than or equal to the end of the -range. - -For example: +```compile_fail +#[deny(const_err)] -``` -match 5u32 { - // This range is ok, albeit pointless. - 1 ... 1 => ... - // This range is empty, and the compiler can tell. - 1000 ... 5 => ... -} +const X: i32 = 42 / 0; +// error: attempt to divide by zero in a constant expression ``` "##, -E0038: r####" +E0038: r##" Trait objects like `Box` can only be constructed when certain requirements are satisfied by the trait in question. @@ -455,7 +57,7 @@ trait Foo where Self: Sized { } ``` -we cannot create an object of type `Box` or `&Foo` since in this case +We cannot create an object of type `Box` or `&Foo` since in this case `Self` would not be `Sized`. Generally, `Self : Sized` is used to indicate that the trait should not be used @@ -485,12 +87,16 @@ impl Trait for u8 { ``` (Note that `&self` and `&mut self` are okay, it's additional `Self` types which -cause this problem) +cause this problem.) In such a case, the compiler cannot predict the return type of `foo()` in a situation like the following: -``` +```compile_fail +trait Trait { + fn foo(&self) -> Self; +} + fn call_foo(x: Box) { let y = x.foo(); // What type is y? // ... @@ -511,7 +117,7 @@ trait Trait { Now, `foo()` can no longer be called on a trait object, but you will now be allowed to make a trait object, and that will be able to call any object-safe -methods". With such a bound, one can still call `foo()` on types implementing +methods. With such a bound, one can still call `foo()` on types implementing that trait that aren't behind trait objects. ### Method has generic type parameters @@ -523,11 +129,13 @@ have: trait Trait { fn foo(&self); } + impl Trait for String { fn foo(&self) { // implementation 1 } } + impl Trait for u8 { fn foo(&self) { // implementation 2 @@ -550,7 +158,7 @@ fn foo(x: T) { } ``` -the machine code for `foo::()`, `foo::()`, `foo::()`, or any +The machine code for `foo::()`, `foo::()`, `foo::()`, or any other type substitution is different. Hence the compiler generates the implementation on-demand. If you call `foo()` with a `bool` parameter, the compiler will only generate code for `foo::()`. When we have additional @@ -572,22 +180,25 @@ trait Trait { fn foo(&self, on: T); // more methods } + impl Trait for String { fn foo(&self, on: T) { // implementation 1 } } + impl Trait for u8 { fn foo(&self, on: T) { // implementation 2 } } + // 8 more implementations ``` Now, if we have the following code: -``` +```ignore fn call_foo(thing: Box) { thing.foo(true); // this could be any one of the 8 types above thing.foo(1); @@ -595,7 +206,7 @@ fn call_foo(thing: Box) { } ``` -we don't just need to create a table of all implementations of all methods of +We don't just need to create a table of all implementations of all methods of `Trait`, we need to create such a table, for each different type fed to `foo()`. In this case this turns out to be (10 types implementing `Trait`)*(3 types being fed to `foo()`) = 30 implementations! @@ -621,7 +232,7 @@ out the methods of different types. ### Method has no receiver Methods that do not take a `self` parameter can't be called since there won't be -a way to get a pointer to the method table for them +a way to get a pointer to the method table for them. ``` trait Foo { @@ -645,7 +256,7 @@ trait Foo { This is similar to the second sub-error, but subtler. It happens in situations like the following: -``` +```compile_fail trait Super {} trait Trait: Super { @@ -677,17 +288,17 @@ so they are forbidden when specifying supertraits. There's no easy fix for this, generally code will need to be refactored so that you no longer need to derive from `Super`. -"####, +"##, E0072: r##" When defining a recursive struct or enum, any use of the type being defined from inside the definition must occur behind a pointer (like `Box` or `&`). This is because structs and enums must have a well-defined size, and without -the pointer the size of the type would need to be unbounded. +the pointer, the size of the type would need to be unbounded. Consider the following erroneous definition of a type for a list of bytes: -``` +```compile_fail,E0072 // error, invalid recursive struct type struct ListNode { head: u8, @@ -720,7 +331,7 @@ E0109: r##" You tried to give a type parameter to a type which doesn't need it. Erroneous code example: -``` +```compile_fail,E0109 type X = u32; // error: type parameters are not allowed on this type ``` @@ -741,7 +352,7 @@ E0110: r##" You tried to give a lifetime parameter to a type which doesn't need it. Erroneous code example: -``` +```compile_fail,E0110 type X = u32<'static>; // error: lifetime parameters are not allowed on // this type ``` @@ -755,21 +366,33 @@ type X = u32; // ok! "##, E0133: r##" -Using unsafe functionality, is potentially dangerous and disallowed -by safety checks. Examples: +Unsafe code was used outside of an unsafe function or block. + +Erroneous code example: + +```compile_fail,E0133 +unsafe fn f() { return; } // This is the unsafe code + +fn main() { + f(); // error: call to unsafe function requires unsafe function or block +} +``` + +Using unsafe functionality is potentially dangerous and disallowed by safety +checks. Examples: -- Dereferencing raw pointers -- Calling functions via FFI -- Calling functions marked unsafe +* Dereferencing raw pointers +* Calling functions via FFI +* Calling functions marked unsafe -These safety checks can be relaxed for a section of the code -by wrapping the unsafe instructions with an `unsafe` block. For instance: +These safety checks can be relaxed for a section of the code by wrapping the +unsafe instructions with an `unsafe` block. For instance: ``` unsafe fn f() { return; } fn main() { - unsafe { f(); } + unsafe { f(); } // ok! } ``` @@ -783,19 +406,61 @@ function `main()`. If there are multiple such functions, please rename one. "##, E0137: r##" +More than one function was declared with the `#[main]` attribute. + +Erroneous code example: + +```compile_fail,E0137 +#![feature(main)] + +#[main] +fn foo() {} + +#[main] +fn f() {} // error: multiple functions with a #[main] attribute +``` + This error indicates that the compiler found multiple functions with the `#[main]` attribute. This is an error because there must be a unique entry -point into a Rust program. +point into a Rust program. Example: + +``` +#![feature(main)] + +#[main] +fn f() {} // ok! +``` "##, E0138: r##" +More than one function was declared with the `#[start]` attribute. + +Erroneous code example: + +```compile_fail,E0138 +#![feature(start)] + +#[start] +fn foo(argc: isize, argv: *const *const u8) -> isize {} + +#[start] +fn f(argc: isize, argv: *const *const u8) -> isize {} +// error: multiple 'start' functions +``` + This error indicates that the compiler found multiple functions with the `#[start]` attribute. This is an error because there must be a unique entry -point into a Rust program. +point into a Rust program. Example: + +``` +#![feature(start)] + +#[start] +fn foo(argc: isize, argv: *const *const u8) -> isize { 0 } // ok! +``` "##, -// FIXME link this to the relevant turpl chapters for instilling fear of the -// transmute gods in the user +// isn't thrown anymore E0139: r##" There are various restrictions on transmuting between types in Rust; for example types being transmuted must have the same size. To apply all these restrictions, @@ -805,10 +470,12 @@ parameters are involved, this cannot always be done. So, for example, the following is not allowed: ``` -struct Foo(Vec) +use std::mem::transmute; + +struct Foo(Vec); fn foo(x: Vec) { - // we are transmuting between Vec and Foo here + // we are transmuting between Vec and Foo here let y: Foo = unsafe { transmute(x) }; // do something with y } @@ -830,9 +497,11 @@ If it's possible, hand-monomorphize the code by writing the function for each possible type substitution. It's possible to use traits to do this cleanly, for example: -``` +```ignore +struct Foo(Vec); + trait MyTransmutableType { - fn transmute(Vec) -> Foo + fn transmute(Vec) -> Foo; } impl MyTransmutableType for u8 { @@ -840,11 +509,13 @@ impl MyTransmutableType for u8 { transmute(x) } } + impl MyTransmutableType for String { fn transmute(x: Foo) -> Vec { transmute(x) } } + // ... more impls for the types you intend to transmute fn foo(x: Vec) { @@ -859,7 +530,7 @@ is a size mismatch in one of the impls. It is also possible to manually transmute: -``` +```ignore ptr::read(&v as *const _ as *const SomeType) // `v` transmuted to `SomeType` ``` @@ -868,6 +539,17 @@ call to `mem::forget(v)` in case you want to avoid destructors being called. "##, E0152: r##" +A lang item was redefined. + +Erroneous code example: + +```compile_fail,E0152 +#![feature(lang_items)] + +#[lang = "panic_fmt"] +struct Foo; // error: duplicate lang item found: `panic_fmt` +``` + Lang items are already implemented in the standard library. Unless you are writing a free-standing application (e.g. a kernel), you do not need to provide them yourself. @@ -882,122 +564,11 @@ attributes: See also https://doc.rust-lang.org/book/no-stdlib.html "##, -E0158: r##" -`const` and `static` mean different things. A `const` is a compile-time -constant, an alias for a literal value. This property means you can match it -directly within a pattern. - -The `static` keyword, on the other hand, guarantees a fixed location in memory. -This does not always mean that the value is constant. For example, a global -mutex can be declared `static` as well. - -If you want to match against a `static`, consider using a guard instead: - -``` -static FORTY_TWO: i32 = 42; -match Some(42) { - Some(x) if x == FORTY_TWO => ... - ... -} -``` -"##, - -E0161: r##" -In Rust, you can only move a value when its size is known at compile time. - -To work around this restriction, consider "hiding" the value behind a reference: -either `&x` or `&mut x`. Since a reference has a fixed size, this lets you move -it around as usual. -"##, - -E0162: r##" -An if-let pattern attempts to match the pattern, and enters the body if the -match was successful. If the match is irrefutable (when it cannot fail to -match), use a regular `let`-binding instead. For instance: - -``` -struct Irrefutable(i32); -let irr = Irrefutable(0); - -// This fails to compile because the match is irrefutable. -if let Irrefutable(x) = irr { - // This body will always be executed. - foo(x); -} - -// Try this instead: -let Irrefutable(x) = irr; -foo(x); -``` -"##, - -E0165: r##" -A while-let pattern attempts to match the pattern, and enters the body if the -match was successful. If the match is irrefutable (when it cannot fail to -match), use a regular `let`-binding inside a `loop` instead. For instance: - -``` -struct Irrefutable(i32); -let irr = Irrefutable(0); - -// This fails to compile because the match is irrefutable. -while let Irrefutable(x) = irr { - ... -} - -// Try this instead: -loop { - let Irrefutable(x) = irr; - ... -} -``` -"##, - -E0170: r##" -Enum variants are qualified by default. For example, given this type: - -``` -enum Method { - GET, - POST -} -``` - -you would match it using: - -``` -match m { - Method::GET => ... - Method::POST => ... -} -``` - -If you don't qualify the names, the code will bind new variables named "GET" and -"POST" instead. This behavior is likely not what you want, so `rustc` warns when -that happens. - -Qualified names are good practice, and most code works well with them. But if -you prefer them unqualified, you can import the variants into scope: - -``` -use Method::*; -enum Method { GET, POST } -``` - -If you want others to be able to import variants from your module directly, use -`pub use`: - -``` -pub use Method::*; -enum Method { GET, POST } -``` -"##, - E0229: r##" An associated type binding was done outside of the type parameter declaration and `where` clause. Erroneous code example: -``` +```compile_fail,E0229 pub trait Foo { type A; fn boo(&self) -> ::A; @@ -1017,13 +588,13 @@ fn baz(x: &>::A) {} To solve this error, please move the type bindings in the type parameter declaration: -``` +```ignore fn baz>(x: &::A) {} // ok! ``` -or in the `where` clause: +Or in the `where` clause: -``` +```ignore fn baz(x: &::A) where I: Foo {} ``` "##, @@ -1034,7 +605,7 @@ used. These two examples illustrate the problem: -``` +```compile_fail,E0261 // error, use of undeclared lifetime name `'a` fn foo(x: &'a str) { } @@ -1047,7 +618,7 @@ struct Foo { These can be fixed by declaring lifetime parameters: ``` -fn foo<'a>(x: &'a str) { } +fn foo<'a>(x: &'a str) {} struct Foo<'a> { x: &'a str, @@ -1060,7 +631,7 @@ Declaring certain lifetime names in parameters is disallowed. For example, because the `'static` lifetime is a special built-in lifetime name denoting the lifetime of the entire program, this is an error: -``` +```compile_fail,E0262 // error, invalid lifetime parameter name `'static` fn foo<'static>(x: &'static str) { } ``` @@ -1070,7 +641,7 @@ E0263: r##" A lifetime name cannot be declared more than once in the same scope. For example: -``` +```compile_fail,E0263 // error, lifetime name `'a` declared twice in the same scope fn foo<'a, 'b, 'a>(x: &'a str, y: &'b str) { } ``` @@ -1079,7 +650,7 @@ fn foo<'a, 'b, 'a>(x: &'a str, y: &'b str) { } E0264: r##" An unknown external lang item was used. Erroneous code example: -``` +```compile_fail,E0264 #![feature(lang_items)] extern "C" { @@ -1101,169 +672,6 @@ extern "C" { ``` "##, -E0265: r##" -This error indicates that a static or constant references itself. -All statics and constants need to resolve to a value in an acyclic manner. - -For example, neither of the following can be sensibly compiled: - -``` -const X: u32 = X; -``` - -``` -const X: u32 = Y; -const Y: u32 = X; -``` -"##, - -E0267: r##" -This error indicates the use of a loop keyword (`break` or `continue`) inside a -closure but outside of any loop. Erroneous code example: - -``` -let w = || { break; }; // error: `break` inside of a closure -``` - -`break` and `continue` keywords can be used as normal inside closures as long as -they are also contained within a loop. To halt the execution of a closure you -should instead use a return statement. Example: - -``` -let w = || { - for _ in 0..10 { - break; - } -}; - -w(); -``` -"##, - -E0268: r##" -This error indicates the use of a loop keyword (`break` or `continue`) outside -of a loop. Without a loop to break out of or continue in, no sensible action can -be taken. Erroneous code example: - -``` -fn some_func() { - break; // error: `break` outside of loop -} -``` - -Please verify that you are using `break` and `continue` only in loops. Example: - -``` -fn some_func() { - for _ in 0..10 { - break; // ok! - } -} -``` -"##, - -E0269: r##" -Functions must eventually return a value of their return type. For example, in -the following function - -``` -fn foo(x: u8) -> u8 { - if x > 0 { - x // alternatively, `return x` - } - // nothing here -} -``` - -if the condition is true, the value `x` is returned, but if the condition is -false, control exits the `if` block and reaches a place where nothing is being -returned. All possible control paths must eventually return a `u8`, which is not -happening here. - -An easy fix for this in a complicated function is to specify a default return -value, if possible: - -``` -fn foo(x: u8) -> u8 { - if x > 0 { - x // alternatively, `return x` - } - // lots of other if branches - 0 // return 0 if all else fails -} -``` - -It is advisable to find out what the unhandled cases are and check for them, -returning an appropriate value or panicking if necessary. -"##, - -E0270: r##" -Rust lets you define functions which are known to never return, i.e. are -'diverging', by marking its return type as `!`. - -For example, the following functions never return: - -``` -fn foo() -> ! { - loop {} -} - -fn bar() -> ! { - foo() // foo() is diverging, so this will diverge too -} - -fn baz() -> ! { - panic!(); // this macro internally expands to a call to a diverging function -} - -``` - -Such functions can be used in a place where a value is expected without -returning a value of that type, for instance: - -``` -let y = match x { - 1 => 1, - 2 => 4, - _ => foo() // diverging function called here -}; -println!("{}", y) -``` - -If the third arm of the match block is reached, since `foo()` doesn't ever -return control to the match block, it is fine to use it in a place where an -integer was expected. The `match` block will never finish executing, and any -point where `y` (like the print statement) is needed will not be reached. - -However, if we had a diverging function that actually does finish execution - -``` -fn foo() -> { - loop {break;} -} -``` - -then we would have an unknown value for `y` in the following code: - -``` -let y = match x { - 1 => 1, - 2 => 4, - _ => foo() -}; -println!("{}", y); -``` - -In the previous example, the print statement was never reached when the wildcard -match arm was hit, so we were okay with `foo()` not returning an integer that we -could set to `y`. But in this example, `foo()` actually does return control, so -the print statement will be executed with an uninitialized value. - -Obviously we cannot have functions which are allowed to be used in such -positions and yet can return control. So, if you are defining a function that -returns `!`, make sure that there is no way for it to actually finish executing. -"##, - E0271: r##" This is because of a type mismatch between the associated type of some trait (e.g. `T::Bar`, where `T` implements `trait Quux { type Bar; }`) @@ -1272,18 +680,21 @@ Examples follow. Here is a basic example: -``` +```compile_fail,E0271 trait Trait { type AssociatedType; } + fn foo(t: T) where T: Trait { println!("in foo"); } + impl Trait for i8 { type AssociatedType = &'static str; } + foo(3_i8); ``` Here is that same example again, with some explanatory comments: -``` +```ignore trait Trait { type AssociatedType; } fn foo(t: T) where T: Trait { @@ -1321,12 +732,12 @@ foo(3_i8); Here is a more subtle instance of the same problem, that can arise with for-loops in Rust: -``` +```compile_fail let vs: Vec = vec![1, 2, 3, 4]; for v in &vs { match v { - 1 => {} - _ => {} + 1 => {}, + _ => {}, } } ``` @@ -1335,7 +746,7 @@ The above fails because of an analogous type mismatch, though may be harder to see. Again, here are some explanatory comments for the same example: -``` +```ignore { let vs = vec![1, 2, 3, 4]; @@ -1383,10 +794,13 @@ So we can fix the previous examples like this: ``` // Basic Example: trait Trait { type AssociatedType; } + fn foo(t: T) where T: Trait { println!("in foo"); } + impl Trait for i8 { type AssociatedType = &'static str; } + foo(3_i8); // For-Loop Example: @@ -1406,28 +820,30 @@ message for when a particular trait isn't implemented on a type placed in a position that needs that trait. For example, when the following code is compiled: -``` +```compile_fail +#![feature(on_unimplemented)] + fn foo>(x: T){} #[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"] -trait Index { ... } +trait Index { /* ... */ } foo(true); // `bool` does not implement `Index` ``` -there will be an error about `bool` not implementing `Index`, followed by a +There will be an error about `bool` not implementing `Index`, followed by a note saying "the type `bool` cannot be indexed by `u8`". -As you can see, you can specify type parameters in curly braces for substitution -with the actual types (using the regular format string syntax) in a given -situation. Furthermore, `{Self}` will substitute to the type (in this case, -`bool`) that we tried to use. +As you can see, you can specify type parameters in curly braces for +substitution with the actual types (using the regular format string syntax) in +a given situation. Furthermore, `{Self}` will substitute to the type (in this +case, `bool`) that we tried to use. This error appears when the curly braces contain an identifier which doesn't -match with any of the type parameters or the string `Self`. This might happen if -you misspelled a type parameter, or if you intended to use literal curly braces. -If it is the latter, escape the curly braces with a second curly brace of the -same type; e.g. a literal `{` is `{{` +match with any of the type parameters or the string `Self`. This might happen +if you misspelled a type parameter, or if you intended to use literal curly +braces. If it is the latter, escape the curly braces with a second curly brace +of the same type; e.g. a literal `{` is `{{`. "##, E0273: r##" @@ -1436,11 +852,13 @@ message for when a particular trait isn't implemented on a type placed in a position that needs that trait. For example, when the following code is compiled: -``` +```compile_fail +#![feature(on_unimplemented)] + fn foo>(x: T){} #[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"] -trait Index { ... } +trait Index { /* ... */ } foo(true); // `bool` does not implement `Index` ``` @@ -1448,10 +866,10 @@ foo(true); // `bool` does not implement `Index` there will be an error about `bool` not implementing `Index`, followed by a note saying "the type `bool` cannot be indexed by `u8`". -As you can see, you can specify type parameters in curly braces for substitution -with the actual types (using the regular format string syntax) in a given -situation. Furthermore, `{Self}` will substitute to the type (in this case, -`bool`) that we tried to use. +As you can see, you can specify type parameters in curly braces for +substitution with the actual types (using the regular format string syntax) in +a given situation. Furthermore, `{Self}` will substitute to the type (in this +case, `bool`) that we tried to use. This error appears when the curly braces do not contain an identifier. Please add one of the same name as a type parameter. If you intended to use literal @@ -1464,11 +882,13 @@ message for when a particular trait isn't implemented on a type placed in a position that needs that trait. For example, when the following code is compiled: -``` +```compile_fail +#![feature(on_unimplemented)] + fn foo>(x: T){} #[rustc_on_unimplemented = "the type `{Self}` cannot be indexed by `{Idx}`"] -trait Index { ... } +trait Index { /* ... */ } foo(true); // `bool` does not implement `Index` ``` @@ -1483,12 +903,12 @@ trait. E0275: r##" This error occurs when there was a recursive trait requirement that overflowed -before it could be evaluated. Often this means that there is unbounded recursion -in resolving some type bounds. +before it could be evaluated. Often this means that there is unbounded +recursion in resolving some type bounds. -For example, in the following code +For example, in the following code: -``` +```compile_fail,E0275 trait Foo {} struct Bar(T); @@ -1496,10 +916,10 @@ struct Bar(T); impl Foo for T where Bar: Foo {} ``` -to determine if a `T` is `Foo`, we need to check if `Bar` is `Foo`. However, -to do this check, we need to determine that `Bar>` is `Foo`. To determine -this, we check if `Bar>>` is `Foo`, and so on. This is clearly a -recursive requirement that can't be resolved directly. +To determine if a `T` is `Foo`, we need to check if `Bar` is `Foo`. However, +to do this check, we need to determine that `Bar>` is `Foo`. To +determine this, we check if `Bar>>` is `Foo`, and so on. This is +clearly a recursive requirement that can't be resolved directly. Consider changing your trait bounds so that they're less self-referential. "##, @@ -1508,13 +928,13 @@ E0276: r##" This error occurs when a bound in an implementation of a trait does not match the bounds specified in the original trait. For example: -``` +```compile_fail,E0276 trait Foo { - fn foo(x: T); + fn foo(x: T); } impl Foo for bool { - fn foo(x: T) where T: Copy {} + fn foo(x: T) where T: Copy {} } ``` @@ -1530,7 +950,7 @@ E0277: r##" You tried to use a type which doesn't implement some trait in a place which expected that trait. Erroneous code example: -``` +```compile_fail,E0277 // here we declare the Foo trait with a bar method trait Foo { fn bar(&self); @@ -1544,8 +964,7 @@ fn some_func(foo: T) { fn main() { // we now call the method with the i32 type, which doesn't implement // the Foo trait - some_func(5i32); // error: the trait `Foo` is not implemented for the - // type `i32` + some_func(5i32); // error: the trait bound `i32 : Foo` is not satisfied } ``` @@ -1571,6 +990,48 @@ fn main() { some_func(5i32); // ok! } ``` + +Or in a generic context, an erroneous code example would look like: + +```compile_fail,E0277 +fn some_func(foo: T) { + println!("{:?}", foo); // error: the trait `core::fmt::Debug` is not + // implemented for the type `T` +} + +fn main() { + // We now call the method with the i32 type, + // which *does* implement the Debug trait. + some_func(5i32); +} +``` + +Note that the error here is in the definition of the generic function: Although +we only call it with a parameter that does implement `Debug`, the compiler +still rejects the function: It must work with all possible input types. In +order to make this example compile, we need to restrict the generic type we're +accepting: + +``` +use std::fmt; + +// Restrict the input type to types that implement Debug. +fn some_func(foo: T) { + println!("{:?}", foo); +} + +fn main() { + // Calling the method is still fine, as i32 implements Debug. + some_func(5i32); + + // This would fail to compile now: + // struct WithoutDebug; + // some_func(WithoutDebug); +} +``` + +Rust only looks at the signature of the called function, as such it must +already specify all requirements that will be used for every type parameter. "##, E0281: r##" @@ -1578,7 +1039,7 @@ You tried to supply a type which doesn't implement some trait in a location which expected that trait. This error typically occurs when working with `Fn`-based types. Erroneous code example: -``` +```compile_fail,E0281 fn foo(x: F) { } fn main() { @@ -1604,7 +1065,7 @@ parameter with a `FromIterator` bound, which for a `char` iterator is implemented by `Vec` and `String` among others. Consider the following snippet that reverses the characters of a string: -``` +```compile_fail,E0282 let x = "hello".chars().rev().collect(); ``` @@ -1641,9 +1102,9 @@ occur when a type parameter of a struct or trait cannot be inferred. In that case it is not always possible to use a type annotation, because all candidates have the same return type. For instance: -``` +```compile_fail,E0282 struct Foo { - // Some fields omitted. + num: T, } impl Foo { @@ -1667,17 +1128,19 @@ to unambiguously choose an implementation. For example: -``` +```compile_fail,E0283 trait Generator { fn create() -> u32; } struct Impl; + impl Generator for Impl { fn create() -> u32 { 1 } } struct AnotherImpl; + impl Generator for AnotherImpl { fn create() -> u32 { 2 } } @@ -1692,6 +1155,16 @@ fn main() { To resolve this error use the concrete type: ``` +trait Generator { + fn create() -> u32; +} + +struct AnotherImpl; + +impl Generator for AnotherImpl { + fn create() -> u32 { 2 } +} + fn main() { let gen1 = AnotherImpl::create(); @@ -1703,109 +1176,23 @@ fn main() { E0296: r##" This error indicates that the given recursion limit could not be parsed. Ensure -that the value provided is a positive integer between quotes, like so: - -``` -#![recursion_limit="1000"] -``` -"##, - -E0297: r##" -Patterns used to bind names must be irrefutable. That is, they must guarantee -that a name will be extracted in all cases. Instead of pattern matching the -loop variable, consider using a `match` or `if let` inside the loop body. For -instance: - -``` -// This fails because `None` is not covered. -for Some(x) in xs { - ... -} - -// Match inside the loop instead: -for item in xs { - match item { - Some(x) => ... - None => ... - } -} - -// Or use `if let`: -for item in xs { - if let Some(x) = item { - ... - } -} -``` -"##, +that the value provided is a positive integer between quotes. -E0301: r##" -Mutable borrows are not allowed in pattern guards, because matching cannot have -side effects. Side effects could alter the matched object or the environment -on which the match depends in such a way, that the match would not be -exhaustive. For instance, the following would not match any arm if mutable -borrows were allowed: - -``` -match Some(()) { - None => { }, - option if option.take().is_none() => { /* impossible, option is `Some` */ }, - Some(_) => { } // When the previous match failed, the option became `None`. -} -``` -"##, +Erroneous code example: -E0302: r##" -Assignments are not allowed in pattern guards, because matching cannot have -side effects. Side effects could alter the matched object or the environment -on which the match depends in such a way, that the match would not be -exhaustive. For instance, the following would not match any arm if assignments -were allowed: +```compile_fail,E0296 +#![recursion_limit] +fn main() {} ``` -match Some(()) { - None => { }, - option if { option = None; false } { }, - Some(_) => { } // When the previous match failed, the option became `None`. -} -``` -"##, -E0303: r##" -In certain cases it is possible for sub-bindings to violate memory safety. -Updates to the borrow checker in a future version of Rust may remove this -restriction, but for now patterns must be rewritten without sub-bindings. +And a working example: ``` -// Before. -match Some("hi".to_string()) { - ref op_string_ref @ Some(ref s) => ... - None => ... -} +#![recursion_limit="1000"] -// After. -match Some("hi".to_string()) { - Some(ref s) => { - let op_string_ref = &Some(s); - ... - } - None => ... -} +fn main() {} ``` - -The `op_string_ref` binding has type `&Option<&String>` in both cases. - -See also https://github.com/rust-lang/rust/issues/14587 -"##, - -E0306: r##" -In an array literal `[x; N]`, `N` is the number of elements in the array. This -number cannot be negative. -"##, - -E0307: r##" -The length of an array is part of its type. For this reason, this length must be -a compile-time constant. "##, E0308: r##" @@ -1817,7 +1204,7 @@ variable. For example: -``` +```compile_fail,E0308 let x: i32 = "I am not a number!"; // ~~~ ~~~~~~~~~~~~~~~~~~~~ // | | @@ -1826,30 +1213,6 @@ let x: i32 = "I am not a number!"; // | // type `i32` assigned to variable `x` ``` - -Another situation in which this occurs is when you attempt to use the `try!` -macro inside a function that does not return a `Result`: - -``` -use std::fs::File; - -fn main() { - let mut f = try!(File::create("foo.txt")); -} -``` - -This code gives an error like this: - -```text -:5:8: 6:42 error: mismatched types: - expected `()`, - found `core::result::Result<_, _>` - (expected (), - found enum `core::result::Result`) [E0308] -``` - -`try!` returns a `Result`, and so the function must. But `main()` has -`()` as its return type, hence the error. "##, E0309: r##" @@ -1858,14 +1221,17 @@ how long the data stored within them is guaranteed to be live. This lifetime must be as long as the data needs to be alive, and missing the constraint that denotes this will cause this error. -``` +```compile_fail,E0309 // This won't compile because T is not constrained, meaning the data // stored in it is not guaranteed to last as long as the reference struct Foo<'a, T> { foo: &'a T } +``` + +This will compile, because it has the constraint on the type parameter: -// This will compile, because it has the constraint on the type parameter +``` struct Foo<'a, T: 'a> { foo: &'a T } @@ -1878,202 +1244,110 @@ how long the data stored within them is guaranteed to be live. This lifetime must be as long as the data needs to be alive, and missing the constraint that denotes this will cause this error. -``` +```compile_fail,E0310 // This won't compile because T is not constrained to the static lifetime // the reference needs struct Foo { foo: &'static T } - -// This will compile, because it has the constraint on the type parameter -struct Foo { - foo: &'static T -} ``` -"##, - -E0378: r##" -Method calls that aren't calls to inherent `const` methods are disallowed -in statics, constants, and constant functions. -For example: +This will compile, because it has the constraint on the type parameter: ``` -const BAZ: i32 = Foo(25).bar(); // error, `bar` isn't `const` - -struct Foo(i32); - -impl Foo { - const fn foo(&self) -> i32 { - self.bar() // error, `bar` isn't `const` - } - - fn bar(&self) -> i32 { self.0 } +struct Foo { + foo: &'static T } ``` - -For more information about `const fn`'s, see [RFC 911]. - -[RFC 911]: https://github.com/rust-lang/rfcs/blob/master/text/0911-const-fn.md -"##, - -E0394: r##" -From [RFC 246]: - - > It is invalid for a static to reference another static by value. It is - > required that all references be borrowed. - -[RFC 246]: https://github.com/rust-lang/rfcs/pull/246 "##, -E0395: r##" -The value assigned to a constant expression must be known at compile time, -which is not the case when comparing raw pointers. Erroneous code example: - -``` -static foo: i32 = 42; -static bar: i32 = 43; - -static baz: bool = { (&foo as *const i32) == (&bar as *const i32) }; -// error: raw pointers cannot be compared in statics! -``` - -Please check that the result of the comparison can be determined at compile time -or isn't assigned to a constant expression. Example: +E0312: r##" +A lifetime of reference outlives lifetime of borrowed content. -``` -static foo: i32 = 42; -static bar: i32 = 43; +Erroneous code example: -let baz: bool = { (&foo as *const i32) == (&bar as *const i32) }; -// baz isn't a constant expression so it's ok +```compile_fail,E0312 +fn make_child<'human, 'elve>(x: &mut &'human isize, y: &mut &'elve isize) { + *x = *y; + // error: lifetime of reference outlives lifetime of borrowed content +} ``` -"##, -E0396: r##" -The value assigned to a constant expression must be known at compile time, -which is not the case when dereferencing raw pointers. Erroneous code -example: +The compiler cannot determine if the `human` lifetime will live long enough +to keep up on the elve one. To solve this error, you have to give an +explicit lifetime hierarchy: ``` -const foo: i32 = 42; -const baz: *const i32 = (&foo as *const i32); - -const deref: i32 = *baz; -// error: raw pointers cannot be dereferenced in constants +fn make_child<'human, 'elve: 'human>(x: &mut &'human isize, + y: &mut &'elve isize) { + *x = *y; // ok! +} ``` -To fix this error, please do not assign this value to a constant expression. -Example: +Or use the same lifetime for every variable: ``` -const foo: i32 = 42; -const baz: *const i32 = (&foo as *const i32); - -unsafe { let deref: i32 = *baz; } -// baz isn't a constant expression so it's ok +fn make_child<'elve>(x: &mut &'elve isize, y: &mut &'elve isize) { + *x = *y; // ok! +} ``` - -You'll also note that this assignment must be done in an unsafe block! "##, -E0397: r##" -It is not allowed for a mutable static to allocate or have destructors. For -example: +E0317: r##" +This error occurs when an `if` expression without an `else` block is used in a +context where a type other than `()` is expected, for example a `let` +expression: +```compile_fail,E0317 +fn main() { + let x = 5; + let a = if x == 5 { 1 }; +} ``` -// error: mutable statics are not allowed to have boxes -static mut FOO: Option> = None; -// error: mutable statics are not allowed to have destructors -static mut BAR: Option> = None; -``` +An `if` expression without an `else` block has the type `()`, so this is a type +error. To resolve it, add an `else` block having the same type as the `if` +block. "##, E0398: r##" -In Rust 1.3, the default object lifetime bounds are expected to -change, as described in RFC #1156 [1]. You are getting a warning -because the compiler thinks it is possible that this change will cause -a compilation error in your code. It is possible, though unlikely, -that this is a false alarm. - -The heart of the change is that where `&'a Box` used to -default to `&'a Box`, it now defaults to `&'a -Box` (here, `SomeTrait` is the name of some trait -type). Note that the only types which are affected are references to -boxes, like `&Box` or `&[Box]`. More common -types like `&SomeTrait` or `Box` are unaffected. - -To silence this warning, edit your code to use an explicit bound. -Most of the time, this means that you will want to change the -signature of a function that you are calling. For example, if -the error is reported on a call like `foo(x)`, and `foo` is -defined as follows: - -``` +In Rust 1.3, the default object lifetime bounds are expected to change, as +described in RFC #1156 [1]. You are getting a warning because the compiler +thinks it is possible that this change will cause a compilation error in your +code. It is possible, though unlikely, that this is a false alarm. + +The heart of the change is that where `&'a Box` used to default to +`&'a Box`, it now defaults to `&'a Box` (here, +`SomeTrait` is the name of some trait type). Note that the only types which are +affected are references to boxes, like `&Box` or +`&[Box]`. More common types like `&SomeTrait` or `Box` +are unaffected. + +To silence this warning, edit your code to use an explicit bound. Most of the +time, this means that you will want to change the signature of a function that +you are calling. For example, if the error is reported on a call like `foo(x)`, +and `foo` is defined as follows: + +```ignore fn foo(arg: &Box) { ... } ``` -you might change it to: +You might change it to: -``` +```ignore fn foo<'a>(arg: &Box) { ... } ``` -This explicitly states that you expect the trait object `SomeTrait` to -contain references (with a maximum lifetime of `'a`). +This explicitly states that you expect the trait object `SomeTrait` to contain +references (with a maximum lifetime of `'a`). [1]: https://github.com/rust-lang/rfcs/pull/1156 "##, -E0400: r##" -A user-defined dereference was attempted in an invalid context. Erroneous -code example: - -``` -use std::ops::Deref; - -struct A; - -impl Deref for A { - type Target = str; - - fn deref(&self)-> &str { "foo" } -} - -const S: &'static str = &A; -// error: user-defined dereference operators are not allowed in constants - -fn main() { - let foo = S; -} -``` - -You cannot directly use a dereference operation whilst initializing a constant -or a static. To fix this error, restructure your code to avoid this dereference, -perhaps moving it inline: - -``` -use std::ops::Deref; - -struct A; - -impl Deref for A { - type Target = str; - - fn deref(&self)-> &str { "foo" } -} - -fn main() { - let foo : &str = &A; -} -``` -"##, - E0452: r##" An invalid lint attribute has been given. Erroneous code example: -``` +```compile_fail,E0452 #![allow(foo = "")] // error: malformed lint attribute ``` @@ -2087,140 +1361,86 @@ lint name). Ensure the attribute is of this form: ``` "##, -E0492: r##" -A borrow of a constant containing interior mutability was attempted. Erroneous -code example: - -``` -use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; - -const A: AtomicUsize = ATOMIC_USIZE_INIT; -static B: &'static AtomicUsize = &A; -// error: cannot borrow a constant which contains interior mutability, create a -// static instead -``` +E0453: r##" +A lint check attribute was overruled by a `forbid` directive set as an +attribute on an enclosing scope, or on the command line with the `-F` option. -A `const` represents a constant value that should never change. If one takes -a `&` reference to the constant, then one is taking a pointer to some memory -location containing the value. Normally this is perfectly fine: most values -can't be changed via a shared `&` pointer, but interior mutability would allow -it. That is, a constant value could be mutated. On the other hand, a `static` is -explicitly a single memory location, which can be mutated at will. +Example of erroneous code: -So, in order to solve this error, either use statics which are `Sync`: +```compile_fail,E0453 +#![forbid(non_snake_case)] +#[allow(non_snake_case)] +fn main() { + let MyNumber = 2; // error: allow(non_snake_case) overruled by outer + // forbid(non_snake_case) +} ``` -use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; -static A: AtomicUsize = ATOMIC_USIZE_INIT; -static B: &'static AtomicUsize = &A; // ok! -``` +The `forbid` lint setting, like `deny`, turns the corresponding compiler +warning into a hard error. Unlike `deny`, `forbid` prevents itself from being +overridden by inner attributes. -You can also have this error while using a cell type: +If you're sure you want to override the lint check, you can change `forbid` to +`deny` (or use `-D` instead of `-F` if the `forbid` setting was given as a +command-line option) to allow the inner lint check attribute: ``` -#![feature(const_fn)] - -use std::cell::Cell; +#![deny(non_snake_case)] -const A: Cell = Cell::new(1); -const B: &'static Cell = &A; -// error: cannot borrow a constant which contains interior mutability, create -// a static instead - -// or: -struct C { a: Cell } - -const D: C = C { a: Cell::new(1) }; -const E: &'static Cell = &D.a; // error - -// or: -const F: &'static C = &D; // error +#[allow(non_snake_case)] +fn main() { + let MyNumber = 2; // ok! +} ``` -This is because cell types do operations that are not thread-safe. Due to this, -they don't implement Sync and thus can't be placed in statics. In this -case, `StaticMutex` would work just fine, but it isn't stable yet: -https://doc.rust-lang.org/nightly/std/sync/struct.StaticMutex.html - -However, if you still wish to use these types, you can achieve this by an unsafe -wrapper: +Otherwise, edit the code to pass the lint check, and remove the overruled +attribute: ``` -#![feature(const_fn)] +#![forbid(non_snake_case)] -use std::cell::Cell; -use std::marker::Sync; - -struct NotThreadSafe { - value: Cell, +fn main() { + let my_number = 2; } - -unsafe impl Sync for NotThreadSafe {} - -static A: NotThreadSafe = NotThreadSafe { value : Cell::new(1) }; -static B: &'static NotThreadSafe = &A; // ok! ``` - -Remember this solution is unsafe! You will have to ensure that accesses to the -cell are synchronized. "##, -E0493: r##" -A type with a destructor was assigned to an invalid type of variable. Erroneous -code example: - -``` -struct Foo { - a: u32 -} - -impl Drop for Foo { - fn drop(&mut self) {} -} +E0478: r##" +A lifetime bound was not satisfied. -const F : Foo = Foo { a : 0 }; -// error: constants are not allowed to have destructors -static S : Foo = Foo { a : 0 }; -// error: statics are not allowed to have destructors -``` +Erroneous code example: -To solve this issue, please use a type which does allow the usage of type with -destructors. -"##, +```compile_fail,E0478 +// Check that the explicit lifetime bound (`'SnowWhite`, in this example) must +// outlive all the superbounds from the trait (`'kiss`, in this example). -E0494: r##" -A reference of an interior static was assigned to another const/static. -Erroneous code example: +trait Wedding<'t>: 't { } -``` -struct Foo { - a: u32 +struct Prince<'kiss, 'SnowWhite> { + child: Box + 'SnowWhite>, + // error: lifetime bound not satisfied } - -static S : Foo = Foo { a : 0 }; -static A : &'static u32 = &S.a; -// error: cannot refer to the interior of another static, use a -// constant instead ``` -The "base" variable has to be a const if you want another static/const variable -to refer to one of its fields. Example: +In this example, the `'SnowWhite` lifetime is supposed to outlive the `'kiss` +lifetime but the declaration of the `Prince` struct doesn't enforce it. To fix +this issue, you need to specify it: ``` -struct Foo { - a: u32 -} +trait Wedding<'t>: 't { } -const S : Foo = Foo { a : 0 }; -static A : &'static u32 = &S.a; // ok! +struct Prince<'kiss, 'SnowWhite: 'kiss> { // You say here that 'kiss must live + // longer than 'SnowWhite. + child: Box + 'SnowWhite>, // And now it's all good! +} ``` "##, E0496: r##" A lifetime name is shadowing another lifetime name. Erroneous code example: -``` +```compile_fail,E0496 struct Foo<'a> { a: &'a i32, } @@ -2253,7 +1473,7 @@ E0497: r##" A stability attribute was used outside of the standard library. Erroneous code example: -``` +```compile_fail #[stable] // error: stability attributes may not be used outside of the // standard library fn foo() {} @@ -2263,13 +1483,39 @@ It is not possible to use stability attributes outside of the standard library. Also, for now, it is not possible to write deprecation messages either. "##, +E0512: r##" +Transmute with two differently sized types was attempted. Erroneous code +example: + +```compile_fail,E0512 +fn takes_u8(_: u8) {} + +fn main() { + unsafe { takes_u8(::std::mem::transmute(0u16)); } + // error: transmute called with differently sized types +} +``` + +Please use types with same size or use the expected type directly. Example: + +``` +fn takes_u8(_: u8) {} + +fn main() { + unsafe { takes_u8(::std::mem::transmute(0i8)); } // ok! + // or: + unsafe { takes_u8(0u8); } // ok! +} +``` +"##, + E0517: r##" -This error indicates that a `#[repr(..)]` attribute was placed on an unsupported -item. +This error indicates that a `#[repr(..)]` attribute was placed on an +unsupported item. Examples of erroneous code: -``` +```compile_fail,E0517 #[repr(C)] type Foo = u8; @@ -2281,29 +1527,29 @@ struct Foo {bar: bool, baz: bool} #[repr(C)] impl Foo { - ... + // ... } ``` - - The `#[repr(C)]` attribute can only be placed on structs and enums - - The `#[repr(packed)]` and `#[repr(simd)]` attributes only work on structs - - The `#[repr(u8)]`, `#[repr(i16)]`, etc attributes only work on enums +* The `#[repr(C)]` attribute can only be placed on structs and enums. +* The `#[repr(packed)]` and `#[repr(simd)]` attributes only work on structs. +* The `#[repr(u8)]`, `#[repr(i16)]`, etc attributes only work on enums. These attributes do not work on typedefs, since typedefs are just aliases. Representations like `#[repr(u8)]`, `#[repr(i64)]` are for selecting the -discriminant size for C-like enums (when there is no associated data, e.g. `enum -Color {Red, Blue, Green}`), effectively setting the size of the enum to the size -of the provided type. Such an enum can be cast to a value of the same type as -well. In short, `#[repr(u8)]` makes the enum behave like an integer with a -constrained set of allowed values. +discriminant size for C-like enums (when there is no associated data, e.g. +`enum Color {Red, Blue, Green}`), effectively setting the size of the enum to +the size of the provided type. Such an enum can be cast to a value of the same +type as well. In short, `#[repr(u8)]` makes the enum behave like an integer +with a constrained set of allowed values. Only C-like enums can be cast to numerical primitives, so this attribute will not apply to structs. `#[repr(packed)]` reduces padding to make the struct size smaller. The -representation of enums isn't strictly defined in Rust, and this attribute won't -work on enums. +representation of enums isn't strictly defined in Rust, and this attribute +won't work on enums. `#[repr(simd)]` will give a struct consisting of a homogenous series of machine types (i.e. `u8`, `i32`, etc) a representation that permits vectorization via @@ -2312,18 +1558,18 @@ single list of data. "##, E0518: r##" -This error indicates that an `#[inline(..)]` attribute was incorrectly placed on -something other than a function or method. +This error indicates that an `#[inline(..)]` attribute was incorrectly placed +on something other than a function or method. Examples of erroneous code: -``` +```compile_fail,E0518 #[inline(always)] struct Foo; #[inline(never)] impl Foo { - ... + // ... } ``` @@ -2337,37 +1583,92 @@ each method; it is not possible to annotate the entire impl with an `#[inline]` attribute. "##, +E0522: r##" +The lang attribute is intended for marking special items that are built-in to +Rust itself. This includes special traits (like `Copy` and `Sized`) that affect +how the compiler behaves, as well as special functions that may be automatically +invoked (such as the handler for out-of-bounds accesses when indexing a slice). +Erroneous code example: + +```compile_fail,E0522 +#![feature(lang_items)] + +#[lang = "cookie"] +fn cookie() -> ! { // error: definition of an unknown language item: `cookie` + loop {} +} +``` +"##, + +E0525: r##" +A closure was attempted to get used whereas it doesn't implement the expected +trait. + +Erroneous code example: + +```compile_fail,E0525 +struct X; + +fn foo(_: T) {} +fn bar(_: T) {} + +fn main() { + let x = X; + let closure = |_| foo(x); // error: expected a closure that implements + // the `Fn` trait, but this closure only + // implements `FnOnce` + bar(closure); +} +``` + +In the example above, `closure` is an `FnOnce` closure whereas the `bar` +function expected an `Fn` closure. In this case, it's simple to fix the issue, +you just have to implement `Copy` and `Clone` traits on `struct X` and it'll +be ok: + +``` +#[derive(Clone, Copy)] // We implement `Clone` and `Copy` traits. +struct X; + +fn foo(_: T) {} +fn bar(_: T) {} + +fn main() { + let x = X; + let closure = |_| foo(x); + bar(closure); // ok! +} +``` + +To understand better how closures work in Rust, read: +https://doc.rust-lang.org/book/closures.html +"##, + } register_diagnostics! { - // E0006 // merged with E0005 +// E0006 // merged with E0005 // E0134, // E0135, E0278, // requirement is not satisfied E0279, // requirement is not satisfied E0280, // requirement is not satisfied E0284, // cannot resolve type - E0285, // overflow evaluation builtin bounds - E0298, // mismatched types between arms - E0299, // mismatched types between arms - // E0300, // unexpanded macro - // E0304, // expected signed integer constant - // E0305, // expected constant +// E0285, // overflow evaluation builtin bounds +// E0300, // unexpanded macro +// E0304, // expected signed integer constant +// E0305, // expected constant E0311, // thing may not live long enough - E0312, // lifetime of reference outlives lifetime of borrowed content E0313, // lifetime of borrowed pointer outlives lifetime of captured variable E0314, // closure outlives stack frame E0315, // cannot invoke closure outside of its lifetime E0316, // nested quantification of lifetimes - E0453, // overruled by outer forbid - E0471, // constant evaluation error: .. E0473, // dereference of reference outside its lifetime E0474, // captured variable `..` does not outlive the enclosing closure E0475, // index of slice outside its lifetime E0476, // lifetime of the source pointer does not outlive lifetime bound... E0477, // the type `..` does not fulfill the required lifetime... - E0478, // lifetime bound not satisfied E0479, // the type `..` (provided as the value of a type parameter) is... E0480, // lifetime of method receiver does not outlive the method call E0481, // lifetime of function argument does not outlive the function call @@ -2382,4 +1683,5 @@ register_diagnostics! { E0490, // a value of type `..` is borrowed for too long E0491, // in type `..`, reference has a longer lifetime than the data it... E0495, // cannot infer an appropriate lifetime due to conflicting requirements + E0566 // conflicting representation hints } diff --git a/src/librustc/front/check_attr.rs b/src/librustc/front/check_attr.rs deleted file mode 100644 index 27785a072a654..0000000000000 --- a/src/librustc/front/check_attr.rs +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use session::Session; - -use syntax::ast; -use syntax::attr::AttrMetaMethods; -use syntax::visit; -use syntax::visit::Visitor; - -#[derive(Copy, Clone, PartialEq)] -enum Target { - Fn, - Struct, - Enum, - Other, -} - -impl Target { - fn from_item(item: &ast::Item) -> Target { - match item.node { - ast::ItemFn(..) => Target::Fn, - ast::ItemStruct(..) => Target::Struct, - ast::ItemEnum(..) => Target::Enum, - _ => Target::Other, - } - } -} - -struct CheckAttrVisitor<'a> { - sess: &'a Session, -} - -impl<'a> CheckAttrVisitor<'a> { - fn check_inline(&self, attr: &ast::Attribute, target: Target) { - if target != Target::Fn { - span_err!(self.sess, attr.span, E0518, "attribute should be applied to function"); - } - } - - fn check_repr(&self, attr: &ast::Attribute, target: Target) { - let words = match attr.meta_item_list() { - Some(words) => words, - None => { - return; - } - }; - for word in words { - let word: &str = &word.name(); - let message = match word { - "C" => { - if target != Target::Struct && target != Target::Enum { - "attribute should be applied to struct or enum" - } else { - continue - } - } - "packed" | - "simd" => { - if target != Target::Struct { - "attribute should be applied to struct" - } else { - continue - } - } - "i8" | "u8" | "i16" | "u16" | - "i32" | "u32" | "i64" | "u64" | - "isize" | "usize" => { - if target != Target::Enum { - "attribute should be applied to enum" - } else { - continue - } - } - _ => continue, - }; - span_err!(self.sess, attr.span, E0517, "{}", message); - } - } - - fn check_attribute(&self, attr: &ast::Attribute, target: Target) { - let name: &str = &attr.name(); - match name { - "inline" => self.check_inline(attr, target), - "repr" => self.check_repr(attr, target), - _ => (), - } - } -} - -impl<'a, 'v> Visitor<'v> for CheckAttrVisitor<'a> { - fn visit_item(&mut self, item: &ast::Item) { - let target = Target::from_item(item); - for attr in &item.attrs { - self.check_attribute(attr, target); - } - } -} - -pub fn check_crate(sess: &Session, krate: &ast::Crate) { - visit::walk_crate(&mut CheckAttrVisitor { sess: sess }, krate); -} diff --git a/src/librustc/front/map/blocks.rs b/src/librustc/front/map/blocks.rs deleted file mode 100644 index 0e24a4446fbe9..0000000000000 --- a/src/librustc/front/map/blocks.rs +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! This module provides a simplified abstraction for working with -//! code blocks identified by their integer node-id. In particular, -//! it captures a common set of attributes that all "function-like -//! things" (represented by `FnLike` instances) share. For example, -//! all `FnLike` instances have a type signature (be it explicit or -//! inferred). And all `FnLike` instances have a body, i.e. the code -//! that is run when the function-like thing it represents is invoked. -//! -//! With the above abstraction in place, one can treat the program -//! text as a collection of blocks of code (and most such blocks are -//! nested within a uniquely determined `FnLike`), and users can ask -//! for the `Code` associated with a particular NodeId. - -pub use self::Code::*; - -use front::map::{self, Node}; -use syntax::abi; -use rustc_front::hir::{Block, FnDecl}; -use syntax::ast::{Name, NodeId}; -use rustc_front::hir as ast; -use syntax::codemap::Span; -use rustc_front::intravisit::FnKind; - -/// An FnLikeNode is a Node that is like a fn, in that it has a decl -/// and a body (as well as a NodeId, a span, etc). -/// -/// More specifically, it is one of either: -/// - A function item, -/// - A closure expr (i.e. an ExprClosure), or -/// - The default implementation for a trait method. -/// -/// To construct one, use the `Code::from_node` function. -#[derive(Copy, Clone)] -pub struct FnLikeNode<'a> { node: map::Node<'a> } - -/// MaybeFnLike wraps a method that indicates if an object -/// corresponds to some FnLikeNode. -pub trait MaybeFnLike { fn is_fn_like(&self) -> bool; } - -/// Components shared by fn-like things (fn items, methods, closures). -pub struct FnParts<'a> { - pub decl: &'a FnDecl, - pub body: &'a Block, - pub kind: FnKind<'a>, - pub span: Span, - pub id: NodeId, -} - -impl MaybeFnLike for ast::Item { - fn is_fn_like(&self) -> bool { - match self.node { ast::ItemFn(..) => true, _ => false, } - } -} - -impl MaybeFnLike for ast::TraitItem { - fn is_fn_like(&self) -> bool { - match self.node { ast::MethodTraitItem(_, Some(_)) => true, _ => false, } - } -} - -impl MaybeFnLike for ast::Expr { - fn is_fn_like(&self) -> bool { - match self.node { - ast::ExprClosure(..) => true, - _ => false, - } - } -} - -/// Carries either an FnLikeNode or a Block, as these are the two -/// constructs that correspond to "code" (as in, something from which -/// we can construct a control-flow graph). -#[derive(Copy, Clone)] -pub enum Code<'a> { - FnLikeCode(FnLikeNode<'a>), - BlockCode(&'a Block), -} - -impl<'a> Code<'a> { - pub fn id(&self) -> NodeId { - match *self { - FnLikeCode(node) => node.id(), - BlockCode(block) => block.id, - } - } - - /// Attempts to construct a Code from presumed FnLike or Block node input. - pub fn from_node(node: Node) -> Option { - if let map::NodeBlock(block) = node { - Some(BlockCode(block)) - } else { - FnLikeNode::from_node(node).map(|fn_like| FnLikeCode(fn_like)) - } - } -} - -/// These are all the components one can extract from a fn item for -/// use when implementing FnLikeNode operations. -struct ItemFnParts<'a> { - name: Name, - decl: &'a ast::FnDecl, - unsafety: ast::Unsafety, - constness: ast::Constness, - abi: abi::Abi, - vis: ast::Visibility, - generics: &'a ast::Generics, - body: &'a Block, - id: NodeId, - span: Span -} - -/// These are all the components one can extract from a closure expr -/// for use when implementing FnLikeNode operations. -struct ClosureParts<'a> { - decl: &'a FnDecl, - body: &'a Block, - id: NodeId, - span: Span -} - -impl<'a> ClosureParts<'a> { - fn new(d: &'a FnDecl, b: &'a Block, id: NodeId, s: Span) -> ClosureParts<'a> { - ClosureParts { decl: d, body: b, id: id, span: s } - } -} - -impl<'a> FnLikeNode<'a> { - /// Attempts to construct a FnLikeNode from presumed FnLike node input. - pub fn from_node(node: Node) -> Option { - let fn_like = match node { - map::NodeItem(item) => item.is_fn_like(), - map::NodeTraitItem(tm) => tm.is_fn_like(), - map::NodeImplItem(_) => true, - map::NodeExpr(e) => e.is_fn_like(), - _ => false - }; - if fn_like { - Some(FnLikeNode { - node: node - }) - } else { - None - } - } - - pub fn to_fn_parts(self) -> FnParts<'a> { - FnParts { - decl: self.decl(), - body: self.body(), - kind: self.kind(), - span: self.span(), - id: self.id(), - } - } - - pub fn body(self) -> &'a Block { - self.handle(|i: ItemFnParts<'a>| &*i.body, - |_, _, _: &'a ast::MethodSig, _, body: &'a ast::Block, _| body, - |c: ClosureParts<'a>| c.body) - } - - pub fn decl(self) -> &'a FnDecl { - self.handle(|i: ItemFnParts<'a>| &*i.decl, - |_, _, sig: &'a ast::MethodSig, _, _, _| &sig.decl, - |c: ClosureParts<'a>| c.decl) - } - - pub fn span(self) -> Span { - self.handle(|i: ItemFnParts| i.span, - |_, _, _: &'a ast::MethodSig, _, _, span| span, - |c: ClosureParts| c.span) - } - - pub fn id(self) -> NodeId { - self.handle(|i: ItemFnParts| i.id, - |id, _, _: &'a ast::MethodSig, _, _, _| id, - |c: ClosureParts| c.id) - } - - pub fn kind(self) -> FnKind<'a> { - let item = |p: ItemFnParts<'a>| -> FnKind<'a> { - FnKind::ItemFn(p.name, p.generics, p.unsafety, p.constness, p.abi, p.vis) - }; - let closure = |_: ClosureParts| { - FnKind::Closure - }; - let method = |_, name: Name, sig: &'a ast::MethodSig, vis, _, _| { - FnKind::Method(name, sig, vis) - }; - self.handle(item, method, closure) - } - - fn handle(self, item_fn: I, method: M, closure: C) -> A where - I: FnOnce(ItemFnParts<'a>) -> A, - M: FnOnce(NodeId, - Name, - &'a ast::MethodSig, - Option, - &'a ast::Block, - Span) - -> A, - C: FnOnce(ClosureParts<'a>) -> A, - { - match self.node { - map::NodeItem(i) => match i.node { - ast::ItemFn(ref decl, unsafety, constness, abi, ref generics, ref block) => - item_fn(ItemFnParts { - id: i.id, - name: i.name, - decl: &**decl, - unsafety: unsafety, - body: &**block, - generics: generics, - abi: abi, - vis: i.vis, - constness: constness, - span: i.span - }), - _ => panic!("item FnLikeNode that is not fn-like"), - }, - map::NodeTraitItem(ti) => match ti.node { - ast::MethodTraitItem(ref sig, Some(ref body)) => { - method(ti.id, ti.name, sig, None, body, ti.span) - } - _ => panic!("trait method FnLikeNode that is not fn-like"), - }, - map::NodeImplItem(ii) => { - match ii.node { - ast::ImplItemKind::Method(ref sig, ref body) => { - method(ii.id, ii.name, sig, Some(ii.vis), body, ii.span) - } - _ => { - panic!("impl method FnLikeNode that is not fn-like") - } - } - } - map::NodeExpr(e) => match e.node { - ast::ExprClosure(_, ref decl, ref block) => - closure(ClosureParts::new(&**decl, &**block, e.id, e.span)), - _ => panic!("expr FnLikeNode that is not fn-like"), - }, - _ => panic!("other FnLikeNode that is not fn-like"), - } - } -} diff --git a/src/librustc/front/map/collector.rs b/src/librustc/front/map/collector.rs deleted file mode 100644 index e85b0ec77cbbd..0000000000000 --- a/src/librustc/front/map/collector.rs +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::*; -use super::MapEntry::*; - -use rustc_front::hir::*; -use rustc_front::util; -use rustc_front::intravisit::{self, Visitor}; -use middle::def_id::{CRATE_DEF_INDEX, DefIndex}; -use std::iter::repeat; -use syntax::ast::{NodeId, CRATE_NODE_ID, DUMMY_NODE_ID}; -use syntax::codemap::Span; - -/// A Visitor that walks over an AST and collects Node's into an AST -/// Map. -pub struct NodeCollector<'ast> { - pub krate: &'ast Crate, - pub map: Vec>, - pub definitions: Definitions, - pub parent_node: NodeId, -} - -impl<'ast> NodeCollector<'ast> { - pub fn root(krate: &'ast Crate) -> NodeCollector<'ast> { - let mut collector = NodeCollector { - krate: krate, - map: vec![], - definitions: Definitions::new(), - parent_node: CRATE_NODE_ID, - }; - collector.insert_entry(CRATE_NODE_ID, RootCrate); - - let result = collector.create_def_with_parent(None, CRATE_NODE_ID, DefPathData::CrateRoot); - assert_eq!(result, CRATE_DEF_INDEX); - - collector.create_def_with_parent(Some(CRATE_DEF_INDEX), DUMMY_NODE_ID, DefPathData::Misc); - - collector - } - - pub fn extend(krate: &'ast Crate, - parent: &'ast InlinedParent, - parent_node: NodeId, - parent_def_path: DefPath, - map: Vec>, - definitions: Definitions) - -> NodeCollector<'ast> { - let mut collector = NodeCollector { - krate: krate, - map: map, - parent_node: parent_node, - definitions: definitions, - }; - - collector.insert_entry(parent_node, RootInlinedParent(parent)); - collector.create_def(parent_node, DefPathData::InlinedRoot(parent_def_path)); - - collector - } - - fn parent_def(&self) -> Option { - let mut parent_node = Some(self.parent_node); - while let Some(p) = parent_node { - if let Some(q) = self.definitions.opt_def_index(p) { - return Some(q); - } - parent_node = self.map[p as usize].parent_node(); - } - None - } - - fn create_def(&mut self, node_id: NodeId, data: DefPathData) -> DefIndex { - let parent_def = self.parent_def(); - self.definitions.create_def_with_parent(parent_def, node_id, data) - } - - fn create_def_with_parent(&mut self, - parent: Option, - node_id: NodeId, - data: DefPathData) - -> DefIndex { - self.definitions.create_def_with_parent(parent, node_id, data) - } - - fn insert_entry(&mut self, id: NodeId, entry: MapEntry<'ast>) { - debug!("ast_map: {:?} => {:?}", id, entry); - let len = self.map.len(); - if id as usize >= len { - self.map.extend(repeat(NotPresent).take(id as usize - len + 1)); - } - self.map[id as usize] = entry; - } - - fn insert_def(&mut self, id: NodeId, node: Node<'ast>, data: DefPathData) -> DefIndex { - self.insert(id, node); - self.create_def(id, data) - } - - fn insert(&mut self, id: NodeId, node: Node<'ast>) { - let entry = MapEntry::from_node(self.parent_node, node); - self.insert_entry(id, entry); - } -} - -impl<'ast> Visitor<'ast> for NodeCollector<'ast> { - /// Because we want to track parent items and so forth, enable - /// deep walking so that we walk nested items in the context of - /// their outer items. - fn visit_nested_item(&mut self, item: ItemId) { - self.visit_item(self.krate.item(item.id)) - } - - fn visit_item(&mut self, i: &'ast Item) { - // Pick the def data. This need not be unique, but the more - // information we encapsulate into - let def_data = match i.node { - ItemDefaultImpl(..) | ItemImpl(..) => DefPathData::Impl(i.name), - ItemEnum(..) | ItemStruct(..) | ItemTrait(..) => DefPathData::Type(i.name), - ItemExternCrate(..) | ItemMod(..) => DefPathData::Mod(i.name), - ItemStatic(..) | ItemConst(..) | ItemFn(..) => DefPathData::Value(i.name), - _ => DefPathData::Misc, - }; - - self.insert_def(i.id, NodeItem(i), def_data); - - let parent_node = self.parent_node; - self.parent_node = i.id; - - match i.node { - ItemImpl(..) => {} - ItemEnum(ref enum_definition, _) => { - for v in &enum_definition.variants { - let variant_def_index = - self.insert_def(v.node.data.id(), - NodeVariant(v), - DefPathData::EnumVariant(v.node.name)); - - for field in v.node.data.fields() { - self.create_def_with_parent( - Some(variant_def_index), - field.node.id, - DefPathData::Field(field.node.kind)); - } - } - } - ItemForeignMod(..) => { - } - ItemStruct(ref struct_def, _) => { - // If this is a tuple-like struct, register the constructor. - if !struct_def.is_struct() { - self.insert_def(struct_def.id(), - NodeStructCtor(struct_def), - DefPathData::StructCtor); - } - - for field in struct_def.fields() { - self.create_def(field.node.id, DefPathData::Field(field.node.kind)); - } - } - ItemTrait(_, _, ref bounds, _) => { - for b in bounds.iter() { - if let TraitTyParamBound(ref t, TraitBoundModifier::None) = *b { - self.insert(t.trait_ref.ref_id, NodeItem(i)); - } - } - } - ItemUse(ref view_path) => { - match view_path.node { - ViewPathList(_, ref paths) => { - for path in paths { - self.insert(path.node.id(), NodeItem(i)); - } - } - _ => () - } - } - _ => {} - } - intravisit::walk_item(self, i); - self.parent_node = parent_node; - } - - fn visit_foreign_item(&mut self, foreign_item: &'ast ForeignItem) { - self.insert_def(foreign_item.id, - NodeForeignItem(foreign_item), - DefPathData::Value(foreign_item.name)); - - let parent_node = self.parent_node; - self.parent_node = foreign_item.id; - intravisit::walk_foreign_item(self, foreign_item); - self.parent_node = parent_node; - } - - fn visit_generics(&mut self, generics: &'ast Generics) { - for ty_param in generics.ty_params.iter() { - self.insert_def(ty_param.id, - NodeTyParam(ty_param), - DefPathData::TypeParam(ty_param.name)); - } - - intravisit::walk_generics(self, generics); - } - - fn visit_trait_item(&mut self, ti: &'ast TraitItem) { - let def_data = match ti.node { - MethodTraitItem(..) | ConstTraitItem(..) => DefPathData::Value(ti.name), - TypeTraitItem(..) => DefPathData::Type(ti.name), - }; - - self.insert(ti.id, NodeTraitItem(ti)); - self.create_def(ti.id, def_data); - - let parent_node = self.parent_node; - self.parent_node = ti.id; - - match ti.node { - ConstTraitItem(_, Some(ref expr)) => { - self.create_def(expr.id, DefPathData::Initializer); - } - _ => { } - } - - intravisit::walk_trait_item(self, ti); - - self.parent_node = parent_node; - } - - fn visit_impl_item(&mut self, ii: &'ast ImplItem) { - let def_data = match ii.node { - ImplItemKind::Method(..) | ImplItemKind::Const(..) => DefPathData::Value(ii.name), - ImplItemKind::Type(..) => DefPathData::Type(ii.name), - }; - - self.insert_def(ii.id, NodeImplItem(ii), def_data); - - let parent_node = self.parent_node; - self.parent_node = ii.id; - - match ii.node { - ImplItemKind::Const(_, ref expr) => { - self.create_def(expr.id, DefPathData::Initializer); - } - _ => { } - } - - intravisit::walk_impl_item(self, ii); - - self.parent_node = parent_node; - } - - fn visit_pat(&mut self, pat: &'ast Pat) { - let maybe_binding = match pat.node { - PatIdent(_, id, _) => Some(id.node), - _ => None - }; - - if let Some(id) = maybe_binding { - self.insert_def(pat.id, NodeLocal(pat), DefPathData::Binding(id.name)); - } else { - self.insert(pat.id, NodePat(pat)); - } - - let parent_node = self.parent_node; - self.parent_node = pat.id; - intravisit::walk_pat(self, pat); - self.parent_node = parent_node; - } - - fn visit_expr(&mut self, expr: &'ast Expr) { - self.insert(expr.id, NodeExpr(expr)); - - match expr.node { - ExprClosure(..) => { self.create_def(expr.id, DefPathData::ClosureExpr); } - _ => { } - } - - let parent_node = self.parent_node; - self.parent_node = expr.id; - intravisit::walk_expr(self, expr); - self.parent_node = parent_node; - } - - fn visit_stmt(&mut self, stmt: &'ast Stmt) { - let id = util::stmt_id(stmt); - self.insert(id, NodeStmt(stmt)); - let parent_node = self.parent_node; - self.parent_node = id; - intravisit::walk_stmt(self, stmt); - self.parent_node = parent_node; - } - - fn visit_fn(&mut self, fk: intravisit::FnKind<'ast>, fd: &'ast FnDecl, - b: &'ast Block, s: Span, id: NodeId) { - assert_eq!(self.parent_node, id); - intravisit::walk_fn(self, fk, fd, b, s); - } - - fn visit_block(&mut self, block: &'ast Block) { - self.insert(block.id, NodeBlock(block)); - let parent_node = self.parent_node; - self.parent_node = block.id; - intravisit::walk_block(self, block); - self.parent_node = parent_node; - } - - fn visit_lifetime(&mut self, lifetime: &'ast Lifetime) { - self.insert(lifetime.id, NodeLifetime(lifetime)); - } - - fn visit_lifetime_def(&mut self, def: &'ast LifetimeDef) { - self.create_def(def.lifetime.id, DefPathData::LifetimeDef(def.lifetime.name)); - self.visit_lifetime(&def.lifetime); - } - - fn visit_macro_def(&mut self, macro_def: &'ast MacroDef) { - self.create_def(macro_def.id, DefPathData::MacroDef(macro_def.name)); - } -} diff --git a/src/librustc/front/map/definitions.rs b/src/librustc/front/map/definitions.rs deleted file mode 100644 index e903fcf6a56c2..0000000000000 --- a/src/librustc/front/map/definitions.rs +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use middle::cstore::LOCAL_CRATE; -use middle::def_id::{DefId, DefIndex}; -use rustc_data_structures::fnv::FnvHashMap; -use rustc_front::hir; -use syntax::ast; -use syntax::parse::token::InternedString; -use util::nodemap::NodeMap; - -#[derive(Clone)] -pub struct Definitions { - data: Vec, - key_map: FnvHashMap, - node_map: NodeMap, -} - -/// A unique identifier that we can use to lookup a definition -/// precisely. It combines the index of the definition's parent (if -/// any) with a `DisambiguatedDefPathData`. -#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] -pub struct DefKey { - /// Parent path. - pub parent: Option, - - /// Identifier of this node. - pub disambiguated_data: DisambiguatedDefPathData, -} - -/// Pair of `DefPathData` and an integer disambiguator. The integer is -/// normally 0, but in the event that there are multiple defs with the -/// same `parent` and `data`, we use this field to disambiguate -/// between them. This introduces some artificial ordering dependency -/// but means that if you have (e.g.) two impls for the same type in -/// the same module, they do get distinct def-ids. -#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] -pub struct DisambiguatedDefPathData { - pub data: DefPathData, - pub disambiguator: u32 -} - -/// For each definition, we track the following data. A definition -/// here is defined somewhat circularly as "something with a def-id", -/// but it generally corresponds to things like structs, enums, etc. -/// There are also some rather random cases (like const initializer -/// expressions) that are mostly just leftovers. -#[derive(Clone, Debug)] -pub struct DefData { - pub key: DefKey, - - /// Local ID within the HIR. - pub node_id: ast::NodeId, -} - -pub type DefPath = Vec; - -#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] -pub enum DefPathData { - // Root: these should only be used for the root nodes, because - // they are treated specially by the `def_path` function. - CrateRoot, - InlinedRoot(DefPath), - - // Catch-all for random DefId things like DUMMY_NODE_ID - Misc, - - // Different kinds of items and item-like things: - Impl(ast::Name), - Type(ast::Name), - Mod(ast::Name), - Value(ast::Name), - MacroDef(ast::Name), - ClosureExpr, - - // Subportions of items - TypeParam(ast::Name), - LifetimeDef(ast::Name), - EnumVariant(ast::Name), - PositionalField, - Field(hir::StructFieldKind), - StructCtor, // implicit ctor for a tuple-like struct - Initializer, // initializer for a const - Binding(ast::Name), // pattern binding - - // An external crate that does not have an `extern crate` in this - // crate. - DetachedCrate(ast::Name), -} - -impl Definitions { - pub fn new() -> Definitions { - Definitions { - data: vec![], - key_map: FnvHashMap(), - node_map: NodeMap(), - } - } - - pub fn len(&self) -> usize { - self.data.len() - } - - pub fn def_key(&self, index: DefIndex) -> DefKey { - self.data[index.as_usize()].key.clone() - } - - /// Returns the path from the crate root to `index`. The root - /// nodes are not included in the path (i.e., this will be an - /// empty vector for the crate root). For an inlined item, this - /// will be the path of the item in the external crate (but the - /// path will begin with the path to the external crate). - pub fn def_path(&self, index: DefIndex) -> DefPath { - make_def_path(index, |p| self.def_key(p)) - } - - pub fn opt_def_index(&self, node: ast::NodeId) -> Option { - self.node_map.get(&node).cloned() - } - - pub fn opt_local_def_id(&self, node: ast::NodeId) -> Option { - self.opt_def_index(node).map(DefId::local) - } - - pub fn as_local_node_id(&self, def_id: DefId) -> Option { - if def_id.krate == LOCAL_CRATE { - assert!(def_id.index.as_usize() < self.data.len()); - Some(self.data[def_id.index.as_usize()].node_id) - } else { - None - } - } - - pub fn create_def_with_parent(&mut self, - parent: Option, - node_id: ast::NodeId, - data: DefPathData) - -> DefIndex { - assert!(!self.node_map.contains_key(&node_id), - "adding a def'n for node-id {:?} and data {:?} but a previous def'n exists: {:?}", - node_id, - data, - self.data[self.node_map[&node_id].as_usize()]); - - // Find a unique DefKey. This basically means incrementing the disambiguator - // until we get no match. - let mut key = DefKey { - parent: parent, - disambiguated_data: DisambiguatedDefPathData { - data: data, - disambiguator: 0 - } - }; - - while self.key_map.contains_key(&key) { - key.disambiguated_data.disambiguator += 1; - } - - // Create the definition. - let index = DefIndex::new(self.data.len()); - self.data.push(DefData { key: key.clone(), node_id: node_id }); - self.node_map.insert(node_id, index); - self.key_map.insert(key, index); - - index - } -} - -impl DefPathData { - pub fn as_interned_str(&self) -> InternedString { - use self::DefPathData::*; - match *self { - Impl(name) | - Type(name) | - Mod(name) | - Value(name) | - MacroDef(name) | - TypeParam(name) | - LifetimeDef(name) | - EnumVariant(name) | - DetachedCrate(name) | - Binding(name) => { - name.as_str() - } - - Field(hir::StructFieldKind::NamedField(name, _)) => { - name.as_str() - } - - PositionalField | - Field(hir::StructFieldKind::UnnamedField(_)) => { - InternedString::new("") - } - - // note that this does not show up in user printouts - CrateRoot => { - InternedString::new("") - } - - // note that this does not show up in user printouts - InlinedRoot(_) => { - InternedString::new("") - } - - Misc => { - InternedString::new("?") - } - - ClosureExpr => { - InternedString::new("") - } - - StructCtor => { - InternedString::new("") - } - - Initializer => { - InternedString::new("") - } - } - } - - pub fn to_string(&self) -> String { - self.as_interned_str().to_string() - } -} - -pub fn make_def_path(start_index: DefIndex, mut get_key: FN) -> DefPath - where FN: FnMut(DefIndex) -> DefKey -{ - let mut result = vec![]; - let mut index = Some(start_index); - while let Some(p) = index { - let key = get_key(p); - match key.disambiguated_data.data { - DefPathData::CrateRoot => { - assert!(key.parent.is_none()); - break; - } - DefPathData::InlinedRoot(ref p) => { - assert!(key.parent.is_none()); - result.extend(p.iter().cloned().rev()); - break; - } - _ => { - result.push(key.disambiguated_data); - index = key.parent; - } - } - } - result.reverse(); - result -} diff --git a/src/librustc/front/map/mod.rs b/src/librustc/front/map/mod.rs deleted file mode 100644 index 7de6099544525..0000000000000 --- a/src/librustc/front/map/mod.rs +++ /dev/null @@ -1,1008 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub use self::Node::*; -pub use self::PathElem::*; -use self::MapEntry::*; -use self::collector::NodeCollector; -pub use self::definitions::{Definitions, DefKey, DefPath, DefPathData, DisambiguatedDefPathData}; - -use middle::cstore::InlinedItem; -use middle::cstore::InlinedItem as II; -use middle::def_id::DefId; - -use syntax::abi; -use syntax::ast::{self, Name, NodeId, DUMMY_NODE_ID}; -use syntax::codemap::{Span, Spanned}; -use syntax::parse::token; - -use rustc_front::hir::*; -use rustc_front::fold::Folder; -use rustc_front::intravisit; -use rustc_front::print::pprust; - -use arena::TypedArena; -use std::cell::RefCell; -use std::fmt; -use std::io; -use std::iter; -use std::mem; -use std::slice; - -pub mod blocks; -mod collector; -pub mod definitions; - -#[derive(Clone, Copy, PartialEq, Debug)] -pub enum PathElem { - PathMod(Name), - PathName(Name) -} - -impl PathElem { - pub fn name(&self) -> Name { - match *self { - PathMod(name) | PathName(name) => name - } - } -} - -impl fmt::Display for PathElem { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.name()) - } -} - -#[derive(Clone)] -pub struct LinkedPathNode<'a> { - node: PathElem, - next: LinkedPath<'a>, -} - -#[derive(Copy, Clone)] -pub struct LinkedPath<'a>(Option<&'a LinkedPathNode<'a>>); - -impl<'a> LinkedPath<'a> { - pub fn empty() -> LinkedPath<'a> { - LinkedPath(None) - } - - pub fn from(node: &'a LinkedPathNode) -> LinkedPath<'a> { - LinkedPath(Some(node)) - } -} - -impl<'a> Iterator for LinkedPath<'a> { - type Item = PathElem; - - fn next(&mut self) -> Option { - match self.0 { - Some(node) => { - *self = node.next; - Some(node.node) - } - None => None - } - } -} - -/// The type of the iterator used by with_path. -pub type PathElems<'a, 'b> = iter::Chain>, LinkedPath<'b>>; - -pub fn path_to_string>(path: PI) -> String { - let itr = token::get_ident_interner(); - - path.fold(String::new(), |mut s, e| { - let e = itr.get(e.name()); - if !s.is_empty() { - s.push_str("::"); - } - s.push_str(&e[..]); - s - }) -} - -#[derive(Copy, Clone, Debug)] -pub enum Node<'ast> { - NodeItem(&'ast Item), - NodeForeignItem(&'ast ForeignItem), - NodeTraitItem(&'ast TraitItem), - NodeImplItem(&'ast ImplItem), - NodeVariant(&'ast Variant), - NodeExpr(&'ast Expr), - NodeStmt(&'ast Stmt), - NodeLocal(&'ast Pat), - NodePat(&'ast Pat), - NodeBlock(&'ast Block), - - /// NodeStructCtor represents a tuple struct. - NodeStructCtor(&'ast VariantData), - - NodeLifetime(&'ast Lifetime), - NodeTyParam(&'ast TyParam) -} - -/// Represents an entry and its parent NodeID. -/// The odd layout is to bring down the total size. -#[derive(Copy, Debug)] -pub enum MapEntry<'ast> { - /// Placeholder for holes in the map. - NotPresent, - - /// All the node types, with a parent ID. - EntryItem(NodeId, &'ast Item), - EntryForeignItem(NodeId, &'ast ForeignItem), - EntryTraitItem(NodeId, &'ast TraitItem), - EntryImplItem(NodeId, &'ast ImplItem), - EntryVariant(NodeId, &'ast Variant), - EntryExpr(NodeId, &'ast Expr), - EntryStmt(NodeId, &'ast Stmt), - EntryLocal(NodeId, &'ast Pat), - EntryPat(NodeId, &'ast Pat), - EntryBlock(NodeId, &'ast Block), - EntryStructCtor(NodeId, &'ast VariantData), - EntryLifetime(NodeId, &'ast Lifetime), - EntryTyParam(NodeId, &'ast TyParam), - - /// Roots for node trees. - RootCrate, - RootInlinedParent(&'ast InlinedParent) -} - -impl<'ast> Clone for MapEntry<'ast> { - fn clone(&self) -> MapEntry<'ast> { - *self - } -} - -#[derive(Debug)] -pub struct InlinedParent { - path: Vec, - ii: InlinedItem -} - -impl<'ast> MapEntry<'ast> { - fn from_node(p: NodeId, node: Node<'ast>) -> MapEntry<'ast> { - match node { - NodeItem(n) => EntryItem(p, n), - NodeForeignItem(n) => EntryForeignItem(p, n), - NodeTraitItem(n) => EntryTraitItem(p, n), - NodeImplItem(n) => EntryImplItem(p, n), - NodeVariant(n) => EntryVariant(p, n), - NodeExpr(n) => EntryExpr(p, n), - NodeStmt(n) => EntryStmt(p, n), - NodeLocal(n) => EntryLocal(p, n), - NodePat(n) => EntryPat(p, n), - NodeBlock(n) => EntryBlock(p, n), - NodeStructCtor(n) => EntryStructCtor(p, n), - NodeLifetime(n) => EntryLifetime(p, n), - NodeTyParam(n) => EntryTyParam(p, n), - } - } - - fn parent_node(self) -> Option { - Some(match self { - EntryItem(id, _) => id, - EntryForeignItem(id, _) => id, - EntryTraitItem(id, _) => id, - EntryImplItem(id, _) => id, - EntryVariant(id, _) => id, - EntryExpr(id, _) => id, - EntryStmt(id, _) => id, - EntryLocal(id, _) => id, - EntryPat(id, _) => id, - EntryBlock(id, _) => id, - EntryStructCtor(id, _) => id, - EntryLifetime(id, _) => id, - EntryTyParam(id, _) => id, - _ => return None - }) - } - - fn to_node(self) -> Option> { - Some(match self { - EntryItem(_, n) => NodeItem(n), - EntryForeignItem(_, n) => NodeForeignItem(n), - EntryTraitItem(_, n) => NodeTraitItem(n), - EntryImplItem(_, n) => NodeImplItem(n), - EntryVariant(_, n) => NodeVariant(n), - EntryExpr(_, n) => NodeExpr(n), - EntryStmt(_, n) => NodeStmt(n), - EntryLocal(_, n) => NodeLocal(n), - EntryPat(_, n) => NodePat(n), - EntryBlock(_, n) => NodeBlock(n), - EntryStructCtor(_, n) => NodeStructCtor(n), - EntryLifetime(_, n) => NodeLifetime(n), - EntryTyParam(_, n) => NodeTyParam(n), - _ => return None - }) - } -} - -/// Stores a crate and any number of inlined items from other crates. -pub struct Forest { - pub krate: Crate, - inlined_items: TypedArena -} - -impl Forest { - pub fn new(krate: Crate) -> Forest { - Forest { - krate: krate, - inlined_items: TypedArena::new() - } - } - - pub fn krate<'ast>(&'ast self) -> &'ast Crate { - &self.krate - } -} - -/// Represents a mapping from Node IDs to AST elements and their parent -/// Node IDs -#[derive(Clone)] -pub struct Map<'ast> { - /// The backing storage for all the AST nodes. - pub forest: &'ast Forest, - - /// NodeIds are sequential integers from 0, so we can be - /// super-compact by storing them in a vector. Not everything with - /// a NodeId is in the map, but empirically the occupancy is about - /// 75-80%, so there's not too much overhead (certainly less than - /// a hashmap, since they (at the time of writing) have a maximum - /// of 75% occupancy). - /// - /// Also, indexing is pretty quick when you've got a vector and - /// plain old integers. - map: RefCell>>, - - definitions: RefCell, -} - -impl<'ast> Map<'ast> { - pub fn num_local_def_ids(&self) -> usize { - self.definitions.borrow().len() - } - - pub fn def_key(&self, def_id: DefId) -> DefKey { - assert!(def_id.is_local()); - self.definitions.borrow().def_key(def_id.index) - } - - pub fn def_path_from_id(&self, id: NodeId) -> DefPath { - self.def_path(self.local_def_id(id)) - } - - pub fn def_path(&self, def_id: DefId) -> DefPath { - assert!(def_id.is_local()); - self.definitions.borrow().def_path(def_id.index) - } - - pub fn local_def_id(&self, node: NodeId) -> DefId { - self.opt_local_def_id(node).unwrap_or_else(|| { - panic!("local_def_id: no entry for `{}`, which has a map of `{:?}`", - node, self.find_entry(node)) - }) - } - - pub fn opt_local_def_id(&self, node: NodeId) -> Option { - self.definitions.borrow().opt_local_def_id(node) - } - - pub fn as_local_node_id(&self, def_id: DefId) -> Option { - self.definitions.borrow().as_local_node_id(def_id) - } - - fn entry_count(&self) -> usize { - self.map.borrow().len() - } - - fn find_entry(&self, id: NodeId) -> Option> { - self.map.borrow().get(id as usize).cloned() - } - - pub fn krate(&self) -> &'ast Crate { - &self.forest.krate - } - - /// Retrieve the Node corresponding to `id`, panicking if it cannot - /// be found. - pub fn get(&self, id: NodeId) -> Node<'ast> { - match self.find(id) { - Some(node) => node, - None => panic!("couldn't find node id {} in the AST map", id) - } - } - - pub fn get_if_local(&self, id: DefId) -> Option> { - self.as_local_node_id(id).map(|id| self.get(id)) - } - - /// Retrieve the Node corresponding to `id`, returning None if - /// cannot be found. - pub fn find(&self, id: NodeId) -> Option> { - self.find_entry(id).and_then(|x| x.to_node()) - } - - /// Similar to get_parent, returns the parent node id or id if there is no - /// parent. - /// This function returns the immediate parent in the AST, whereas get_parent - /// returns the enclosing item. Note that this might not be the actual parent - /// node in the AST - some kinds of nodes are not in the map and these will - /// never appear as the parent_node. So you can always walk the parent_nodes - /// from a node to the root of the ast (unless you get the same id back here - /// that can happen if the id is not in the map itself or is just weird). - pub fn get_parent_node(&self, id: NodeId) -> NodeId { - self.find_entry(id).and_then(|x| x.parent_node()).unwrap_or(id) - } - - /// Check if the node is an argument. An argument is a local variable whose - /// immediate parent is an item or a closure. - pub fn is_argument(&self, id: NodeId) -> bool { - match self.find(id) { - Some(NodeLocal(_)) => (), - _ => return false, - } - match self.find(self.get_parent_node(id)) { - Some(NodeItem(_)) | - Some(NodeTraitItem(_)) | - Some(NodeImplItem(_)) => true, - Some(NodeExpr(e)) => { - match e.node { - ExprClosure(..) => true, - _ => false, - } - } - _ => false, - } - } - - /// If there is some error when walking the parents (e.g., a node does not - /// have a parent in the map or a node can't be found), then we return the - /// last good node id we found. Note that reaching the crate root (id == 0), - /// is not an error, since items in the crate module have the crate root as - /// parent. - fn walk_parent_nodes(&self, start_id: NodeId, found: F) -> Result - where F: Fn(&Node<'ast>) -> bool - { - let mut id = start_id; - loop { - let parent_node = self.get_parent_node(id); - if parent_node == 0 { - return Ok(0); - } - if parent_node == id { - return Err(id); - } - - let node = self.find_entry(parent_node); - if node.is_none() { - return Err(id); - } - let node = node.unwrap().to_node(); - match node { - Some(ref node) => { - if found(node) { - return Ok(parent_node); - } - } - None => { - return Err(parent_node); - } - } - id = parent_node; - } - } - - /// Retrieve the NodeId for `id`'s parent item, or `id` itself if no - /// parent item is in this map. The "parent item" is the closest parent node - /// in the AST which is recorded by the map and is an item, either an item - /// in a module, trait, or impl. - pub fn get_parent(&self, id: NodeId) -> NodeId { - match self.walk_parent_nodes(id, |node| match *node { - NodeItem(_) | - NodeForeignItem(_) | - NodeTraitItem(_) | - NodeImplItem(_) => true, - _ => false, - }) { - Ok(id) => id, - Err(id) => id, - } - } - - /// Returns the nearest enclosing scope. A scope is an item or block. - /// FIXME it is not clear to me that all items qualify as scopes - statics - /// and associated types probably shouldn't, for example. Behaviour in this - /// regard should be expected to be highly unstable. - pub fn get_enclosing_scope(&self, id: NodeId) -> Option { - match self.walk_parent_nodes(id, |node| match *node { - NodeItem(_) | - NodeForeignItem(_) | - NodeTraitItem(_) | - NodeImplItem(_) | - NodeBlock(_) => true, - _ => false, - }) { - Ok(id) => Some(id), - Err(_) => None, - } - } - - pub fn get_parent_did(&self, id: NodeId) -> DefId { - let parent = self.get_parent(id); - match self.find_entry(parent) { - Some(RootInlinedParent(&InlinedParent {ii: II::TraitItem(did, _), ..})) => did, - Some(RootInlinedParent(&InlinedParent {ii: II::ImplItem(did, _), ..})) => did, - _ => self.local_def_id(parent) - } - } - - pub fn get_foreign_abi(&self, id: NodeId) -> abi::Abi { - let parent = self.get_parent(id); - let abi = match self.find_entry(parent) { - Some(EntryItem(_, i)) => { - match i.node { - ItemForeignMod(ref nm) => Some(nm.abi), - _ => None - } - } - /// Wrong but OK, because the only inlined foreign items are intrinsics. - Some(RootInlinedParent(_)) => Some(abi::RustIntrinsic), - _ => None - }; - match abi { - Some(abi) => abi, - None => panic!("expected foreign mod or inlined parent, found {}", - self.node_to_string(parent)) - } - } - - pub fn get_foreign_vis(&self, id: NodeId) -> Visibility { - let vis = self.expect_foreign_item(id).vis; - match self.find(self.get_parent(id)) { - Some(NodeItem(i)) => vis.inherit_from(i.vis), - _ => vis - } - } - - pub fn expect_item(&self, id: NodeId) -> &'ast Item { - match self.find(id) { - Some(NodeItem(item)) => item, - _ => panic!("expected item, found {}", self.node_to_string(id)) - } - } - - pub fn expect_trait_item(&self, id: NodeId) -> &'ast TraitItem { - match self.find(id) { - Some(NodeTraitItem(item)) => item, - _ => panic!("expected trait item, found {}", self.node_to_string(id)) - } - } - - pub fn expect_struct(&self, id: NodeId) -> &'ast VariantData { - match self.find(id) { - Some(NodeItem(i)) => { - match i.node { - ItemStruct(ref struct_def, _) => struct_def, - _ => panic!("struct ID bound to non-struct") - } - } - Some(NodeVariant(variant)) => { - if variant.node.data.is_struct() { - &variant.node.data - } else { - panic!("struct ID bound to enum variant that isn't struct-like") - } - } - _ => panic!(format!("expected struct, found {}", self.node_to_string(id))), - } - } - - pub fn expect_variant(&self, id: NodeId) -> &'ast Variant { - match self.find(id) { - Some(NodeVariant(variant)) => variant, - _ => panic!(format!("expected variant, found {}", self.node_to_string(id))), - } - } - - pub fn expect_foreign_item(&self, id: NodeId) -> &'ast ForeignItem { - match self.find(id) { - Some(NodeForeignItem(item)) => item, - _ => panic!("expected foreign item, found {}", self.node_to_string(id)) - } - } - - pub fn expect_expr(&self, id: NodeId) -> &'ast Expr { - match self.find(id) { - Some(NodeExpr(expr)) => expr, - _ => panic!("expected expr, found {}", self.node_to_string(id)) - } - } - - /// returns the name associated with the given NodeId's AST - pub fn get_path_elem(&self, id: NodeId) -> PathElem { - let node = self.get(id); - match node { - NodeItem(item) => { - match item.node { - ItemMod(_) | ItemForeignMod(_) => { - PathMod(item.name) - } - _ => PathName(item.name) - } - } - NodeForeignItem(i) => PathName(i.name), - NodeImplItem(ii) => PathName(ii.name), - NodeTraitItem(ti) => PathName(ti.name), - NodeVariant(v) => PathName(v.node.name), - NodeLifetime(lt) => PathName(lt.name), - NodeTyParam(tp) => PathName(tp.name), - NodeLocal(&Pat { node: PatIdent(_,l,_), .. }) => { - PathName(l.node.name) - }, - _ => panic!("no path elem for {:?}", node) - } - } - - pub fn with_path(&self, id: NodeId, f: F) -> T where - F: FnOnce(PathElems) -> T, - { - self.with_path_next(id, LinkedPath::empty(), f) - } - - pub fn path_to_string(&self, id: NodeId) -> String { - self.with_path(id, |path| path_to_string(path)) - } - - fn path_to_str_with_name(&self, id: NodeId, name: Name) -> String { - self.with_path(id, |path| { - path_to_string(path.chain(Some(PathName(name)))) - }) - } - - fn with_path_next(&self, id: NodeId, next: LinkedPath, f: F) -> T where - F: FnOnce(PathElems) -> T, - { - let parent = self.get_parent(id); - let parent = match self.find_entry(id) { - Some(EntryForeignItem(..)) => { - // Anonymous extern items go in the parent scope. - self.get_parent(parent) - } - // But tuple struct ctors don't have names, so use the path of its - // parent, the struct item. Similarly with closure expressions. - Some(EntryStructCtor(..)) | Some(EntryExpr(..)) => { - return self.with_path_next(parent, next, f); - } - _ => parent - }; - if parent == id { - match self.find_entry(id) { - Some(RootInlinedParent(data)) => { - f(data.path.iter().cloned().chain(next)) - } - _ => f([].iter().cloned().chain(next)) - } - } else { - self.with_path_next(parent, LinkedPath::from(&LinkedPathNode { - node: self.get_path_elem(id), - next: next - }), f) - } - } - - /// Given a node ID, get a list of attributes associated with the AST - /// corresponding to the Node ID - pub fn attrs(&self, id: NodeId) -> &'ast [ast::Attribute] { - let attrs = match self.find(id) { - Some(NodeItem(i)) => Some(&i.attrs[..]), - Some(NodeForeignItem(fi)) => Some(&fi.attrs[..]), - Some(NodeTraitItem(ref ti)) => Some(&ti.attrs[..]), - Some(NodeImplItem(ref ii)) => Some(&ii.attrs[..]), - Some(NodeVariant(ref v)) => Some(&v.node.attrs[..]), - // unit/tuple structs take the attributes straight from - // the struct definition. - Some(NodeStructCtor(_)) => { - return self.attrs(self.get_parent(id)); - } - _ => None - }; - attrs.unwrap_or(&[]) - } - - /// Returns an iterator that yields the node id's with paths that - /// match `parts`. (Requires `parts` is non-empty.) - /// - /// For example, if given `parts` equal to `["bar", "quux"]`, then - /// the iterator will produce node id's for items with paths - /// such as `foo::bar::quux`, `bar::quux`, `other::bar::quux`, and - /// any other such items it can find in the map. - pub fn nodes_matching_suffix<'a>(&'a self, parts: &'a [String]) - -> NodesMatchingSuffix<'a, 'ast> { - NodesMatchingSuffix { - map: self, - item_name: parts.last().unwrap(), - in_which: &parts[..parts.len() - 1], - idx: 0, - } - } - - pub fn opt_span(&self, id: NodeId) -> Option { - let sp = match self.find(id) { - Some(NodeItem(item)) => item.span, - Some(NodeForeignItem(foreign_item)) => foreign_item.span, - Some(NodeTraitItem(trait_method)) => trait_method.span, - Some(NodeImplItem(ref impl_item)) => impl_item.span, - Some(NodeVariant(variant)) => variant.span, - Some(NodeExpr(expr)) => expr.span, - Some(NodeStmt(stmt)) => stmt.span, - Some(NodeLocal(pat)) => pat.span, - Some(NodePat(pat)) => pat.span, - Some(NodeBlock(block)) => block.span, - Some(NodeStructCtor(_)) => self.expect_item(self.get_parent(id)).span, - Some(NodeTyParam(ty_param)) => ty_param.span, - _ => return None, - }; - Some(sp) - } - - pub fn span(&self, id: NodeId) -> Span { - self.opt_span(id) - .unwrap_or_else(|| panic!("AstMap.span: could not find span for id {:?}", id)) - } - - pub fn span_if_local(&self, id: DefId) -> Option { - self.as_local_node_id(id).map(|id| self.span(id)) - } - - pub fn def_id_span(&self, def_id: DefId, fallback: Span) -> Span { - if let Some(node_id) = self.as_local_node_id(def_id) { - self.opt_span(node_id).unwrap_or(fallback) - } else { - fallback - } - } - - pub fn node_to_string(&self, id: NodeId) -> String { - node_id_to_string(self, id, true) - } - - pub fn node_to_user_string(&self, id: NodeId) -> String { - node_id_to_string(self, id, false) - } -} - -pub struct NodesMatchingSuffix<'a, 'ast:'a> { - map: &'a Map<'ast>, - item_name: &'a String, - in_which: &'a [String], - idx: NodeId, -} - -impl<'a, 'ast> NodesMatchingSuffix<'a, 'ast> { - /// Returns true only if some suffix of the module path for parent - /// matches `self.in_which`. - /// - /// In other words: let `[x_0,x_1,...,x_k]` be `self.in_which`; - /// returns true if parent's path ends with the suffix - /// `x_0::x_1::...::x_k`. - fn suffix_matches(&self, parent: NodeId) -> bool { - let mut cursor = parent; - for part in self.in_which.iter().rev() { - let (mod_id, mod_name) = match find_first_mod_parent(self.map, cursor) { - None => return false, - Some((node_id, name)) => (node_id, name), - }; - if &part[..] != mod_name.as_str() { - return false; - } - cursor = self.map.get_parent(mod_id); - } - return true; - - // Finds the first mod in parent chain for `id`, along with - // that mod's name. - // - // If `id` itself is a mod named `m` with parent `p`, then - // returns `Some(id, m, p)`. If `id` has no mod in its parent - // chain, then returns `None`. - fn find_first_mod_parent<'a>(map: &'a Map, mut id: NodeId) -> Option<(NodeId, Name)> { - loop { - match map.find(id) { - None => return None, - Some(NodeItem(item)) if item_is_mod(&*item) => - return Some((id, item.name)), - _ => {} - } - let parent = map.get_parent(id); - if parent == id { return None } - id = parent; - } - - fn item_is_mod(item: &Item) -> bool { - match item.node { - ItemMod(_) => true, - _ => false, - } - } - } - } - - // We are looking at some node `n` with a given name and parent - // id; do their names match what I am seeking? - fn matches_names(&self, parent_of_n: NodeId, name: Name) -> bool { - name.as_str() == &self.item_name[..] && - self.suffix_matches(parent_of_n) - } -} - -impl<'a, 'ast> Iterator for NodesMatchingSuffix<'a, 'ast> { - type Item = NodeId; - - fn next(&mut self) -> Option { - loop { - let idx = self.idx; - if idx as usize >= self.map.entry_count() { - return None; - } - self.idx += 1; - let name = match self.map.find_entry(idx) { - Some(EntryItem(_, n)) => n.name(), - Some(EntryForeignItem(_, n))=> n.name(), - Some(EntryTraitItem(_, n)) => n.name(), - Some(EntryImplItem(_, n)) => n.name(), - Some(EntryVariant(_, n)) => n.name(), - _ => continue, - }; - if self.matches_names(self.map.get_parent(idx), name) { - return Some(idx) - } - } - } -} - -trait Named { - fn name(&self) -> Name; -} - -impl Named for Spanned { fn name(&self) -> Name { self.node.name() } } - -impl Named for Item { fn name(&self) -> Name { self.name } } -impl Named for ForeignItem { fn name(&self) -> Name { self.name } } -impl Named for Variant_ { fn name(&self) -> Name { self.name } } -impl Named for TraitItem { fn name(&self) -> Name { self.name } } -impl Named for ImplItem { fn name(&self) -> Name { self.name } } - -pub trait FoldOps { - fn new_id(&self, id: NodeId) -> NodeId { - id - } - fn new_def_id(&self, def_id: DefId) -> DefId { - def_id - } - fn new_span(&self, span: Span) -> Span { - span - } -} - -/// A Folder that updates IDs and Span's according to fold_ops. -struct IdAndSpanUpdater { - fold_ops: F -} - -impl Folder for IdAndSpanUpdater { - fn new_id(&mut self, id: NodeId) -> NodeId { - self.fold_ops.new_id(id) - } - - fn new_span(&mut self, span: Span) -> Span { - self.fold_ops.new_span(span) - } -} - -pub fn map_crate<'ast>(forest: &'ast mut Forest) -> Map<'ast> { - let (map, definitions) = { - let mut collector = NodeCollector::root(&forest.krate); - intravisit::walk_crate(&mut collector, &forest.krate); - (collector.map, collector.definitions) - }; - - if log_enabled!(::log::DEBUG) { - // This only makes sense for ordered stores; note the - // enumerate to count the number of entries. - let (entries_less_1, _) = map.iter().filter(|&x| { - match *x { - NotPresent => false, - _ => true - } - }).enumerate().last().expect("AST map was empty after folding?"); - - let entries = entries_less_1 + 1; - let vector_length = map.len(); - debug!("The AST map has {} entries with a maximum of {}: occupancy {:.1}%", - entries, vector_length, (entries as f64 / vector_length as f64) * 100.); - } - - Map { - forest: forest, - map: RefCell::new(map), - definitions: RefCell::new(definitions), - } -} - -/// Used for items loaded from external crate that are being inlined into this -/// crate. -pub fn map_decoded_item<'ast, F: FoldOps>(map: &Map<'ast>, - parent_path: Vec, - parent_def_path: DefPath, - ii: InlinedItem, - fold_ops: F) - -> &'ast InlinedItem { - let mut fld = IdAndSpanUpdater { fold_ops: fold_ops }; - let ii = match ii { - II::Item(i) => II::Item(i.map(|i| fld.fold_item(i))), - II::TraitItem(d, ti) => { - II::TraitItem(fld.fold_ops.new_def_id(d), - ti.map(|ti| fld.fold_trait_item(ti))) - } - II::ImplItem(d, ii) => { - II::ImplItem(fld.fold_ops.new_def_id(d), - ii.map(|ii| fld.fold_impl_item(ii))) - } - II::Foreign(i) => II::Foreign(i.map(|i| fld.fold_foreign_item(i))) - }; - - let ii_parent = map.forest.inlined_items.alloc(InlinedParent { - path: parent_path, - ii: ii - }); - - let ii_parent_id = fld.new_id(DUMMY_NODE_ID); - let mut collector = - NodeCollector::extend( - map.krate(), - ii_parent, - ii_parent_id, - parent_def_path, - mem::replace(&mut *map.map.borrow_mut(), vec![]), - mem::replace(&mut *map.definitions.borrow_mut(), Definitions::new())); - ii_parent.ii.visit(&mut collector); - - *map.map.borrow_mut() = collector.map; - *map.definitions.borrow_mut() = collector.definitions; - - &ii_parent.ii -} - -pub trait NodePrinter { - fn print_node(&mut self, node: &Node) -> io::Result<()>; -} - -impl<'a> NodePrinter for pprust::State<'a> { - fn print_node(&mut self, node: &Node) -> io::Result<()> { - match *node { - NodeItem(a) => self.print_item(&*a), - NodeForeignItem(a) => self.print_foreign_item(&*a), - NodeTraitItem(a) => self.print_trait_item(a), - NodeImplItem(a) => self.print_impl_item(a), - NodeVariant(a) => self.print_variant(&*a), - NodeExpr(a) => self.print_expr(&*a), - NodeStmt(a) => self.print_stmt(&*a), - NodePat(a) => self.print_pat(&*a), - NodeBlock(a) => self.print_block(&*a), - NodeLifetime(a) => self.print_lifetime(&*a), - NodeTyParam(_) => panic!("cannot print TyParam"), - // these cases do not carry enough information in the - // ast_map to reconstruct their full structure for pretty - // printing. - NodeLocal(_) => panic!("cannot print isolated Local"), - NodeStructCtor(_) => panic!("cannot print isolated StructCtor"), - } - } -} - -fn node_id_to_string(map: &Map, id: NodeId, include_id: bool) -> String { - let id_str = format!(" (id={})", id); - let id_str = if include_id { &id_str[..] } else { "" }; - - match map.find(id) { - Some(NodeItem(item)) => { - let path_str = map.path_to_str_with_name(id, item.name); - let item_str = match item.node { - ItemExternCrate(..) => "extern crate", - ItemUse(..) => "use", - ItemStatic(..) => "static", - ItemConst(..) => "const", - ItemFn(..) => "fn", - ItemMod(..) => "mod", - ItemForeignMod(..) => "foreign mod", - ItemTy(..) => "ty", - ItemEnum(..) => "enum", - ItemStruct(..) => "struct", - ItemTrait(..) => "trait", - ItemImpl(..) => "impl", - ItemDefaultImpl(..) => "default impl", - }; - format!("{} {}{}", item_str, path_str, id_str) - } - Some(NodeForeignItem(item)) => { - let path_str = map.path_to_str_with_name(id, item.name); - format!("foreign item {}{}", path_str, id_str) - } - Some(NodeImplItem(ii)) => { - match ii.node { - ImplItemKind::Const(..) => { - format!("assoc const {} in {}{}", - ii.name, - map.path_to_string(id), - id_str) - } - ImplItemKind::Method(..) => { - format!("method {} in {}{}", - ii.name, - map.path_to_string(id), id_str) - } - ImplItemKind::Type(_) => { - format!("assoc type {} in {}{}", - ii.name, - map.path_to_string(id), - id_str) - } - } - } - Some(NodeTraitItem(ti)) => { - let kind = match ti.node { - ConstTraitItem(..) => "assoc constant", - MethodTraitItem(..) => "trait method", - TypeTraitItem(..) => "assoc type", - }; - - format!("{} {} in {}{}", - kind, - ti.name, - map.path_to_string(id), - id_str) - } - Some(NodeVariant(ref variant)) => { - format!("variant {} in {}{}", - variant.node.name, - map.path_to_string(id), id_str) - } - Some(NodeExpr(ref expr)) => { - format!("expr {}{}", pprust::expr_to_string(&**expr), id_str) - } - Some(NodeStmt(ref stmt)) => { - format!("stmt {}{}", pprust::stmt_to_string(&**stmt), id_str) - } - Some(NodeLocal(ref pat)) => { - format!("local {}{}", pprust::pat_to_string(&**pat), id_str) - } - Some(NodePat(ref pat)) => { - format!("pat {}{}", pprust::pat_to_string(&**pat), id_str) - } - Some(NodeBlock(ref block)) => { - format!("block {}{}", pprust::block_to_string(&**block), id_str) - } - Some(NodeStructCtor(_)) => { - format!("struct_ctor {}{}", map.path_to_string(id), id_str) - } - Some(NodeLifetime(ref l)) => { - format!("lifetime {}{}", - pprust::lifetime_to_string(&**l), id_str) - } - Some(NodeTyParam(ref ty_param)) => { - format!("typaram {:?}{}", ty_param, id_str) - } - None => { - format!("unknown node{}", id_str) - } - } -} diff --git a/src/librustc/hir/check_attr.rs b/src/librustc/hir/check_attr.rs new file mode 100644 index 0000000000000..abc35634d15f4 --- /dev/null +++ b/src/librustc/hir/check_attr.rs @@ -0,0 +1,144 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use session::Session; + +use syntax::ast; +use syntax::visit; +use syntax::visit::Visitor; + +#[derive(Copy, Clone, PartialEq)] +enum Target { + Fn, + Struct, + Union, + Enum, + Other, +} + +impl Target { + fn from_item(item: &ast::Item) -> Target { + match item.node { + ast::ItemKind::Fn(..) => Target::Fn, + ast::ItemKind::Struct(..) => Target::Struct, + ast::ItemKind::Union(..) => Target::Union, + ast::ItemKind::Enum(..) => Target::Enum, + _ => Target::Other, + } + } +} + +struct CheckAttrVisitor<'a> { + sess: &'a Session, +} + +impl<'a> CheckAttrVisitor<'a> { + fn check_inline(&self, attr: &ast::Attribute, target: Target) { + if target != Target::Fn { + struct_span_err!(self.sess, attr.span, E0518, "attribute should be applied to function") + .span_label(attr.span, &format!("requires a function")) + .emit(); + } + } + + fn check_repr(&self, attr: &ast::Attribute, target: Target) { + let words = match attr.meta_item_list() { + Some(words) => words, + None => { + return; + } + }; + + let mut conflicting_reprs = 0; + for word in words { + + let name = match word.name() { + Some(word) => word, + None => continue, + }; + + let (message, label) = match &*name.as_str() { + "C" => { + conflicting_reprs += 1; + if target != Target::Struct && + target != Target::Union && + target != Target::Enum { + ("attribute should be applied to struct, enum or union", + "a struct, enum or union") + } else { + continue + } + } + "packed" => { + // Do not increment conflicting_reprs here, because "packed" + // can be used to modify another repr hint + if target != Target::Struct && + target != Target::Union { + ("attribute should be applied to struct or union", + "a struct or union") + } else { + continue + } + } + "simd" => { + conflicting_reprs += 1; + if target != Target::Struct { + ("attribute should be applied to struct", + "a struct") + } else { + continue + } + } + "i8" | "u8" | "i16" | "u16" | + "i32" | "u32" | "i64" | "u64" | + "isize" | "usize" => { + conflicting_reprs += 1; + if target != Target::Enum { + ("attribute should be applied to enum", + "an enum") + } else { + continue + } + } + _ => continue, + }; + struct_span_err!(self.sess, attr.span, E0517, "{}", message) + .span_label(attr.span, &format!("requires {}", label)) + .emit(); + } + if conflicting_reprs > 1 { + span_warn!(self.sess, attr.span, E0566, + "conflicting representation hints"); + } + } + + fn check_attribute(&self, attr: &ast::Attribute, target: Target) { + let name: &str = &attr.name().as_str(); + match name { + "inline" => self.check_inline(attr, target), + "repr" => self.check_repr(attr, target), + _ => (), + } + } +} + +impl<'a> Visitor for CheckAttrVisitor<'a> { + fn visit_item(&mut self, item: &ast::Item) { + let target = Target::from_item(item); + for attr in &item.attrs { + self.check_attribute(attr, target); + } + visit::walk_item(self, item); + } +} + +pub fn check_crate(sess: &Session, krate: &ast::Crate) { + visit::walk_crate(&mut CheckAttrVisitor { sess: sess }, krate); +} diff --git a/src/librustc/hir/def.rs b/src/librustc/hir/def.rs new file mode 100644 index 0000000000000..b6fce2d6ca0be --- /dev/null +++ b/src/librustc/hir/def.rs @@ -0,0 +1,175 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use hir::def_id::DefId; +use util::nodemap::NodeMap; +use syntax::ast; +use hir; + +#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub enum CtorKind { + // Constructor function automatically created by a tuple struct/variant. + Fn, + // Constructor constant automatically created by a unit struct/variant. + Const, + // Unusable name in value namespace created by a struct variant. + Fictive, +} + +#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub enum Def { + // Type namespace + Mod(DefId), + Struct(DefId), // DefId refers to NodeId of the struct itself + Union(DefId), + Enum(DefId), + Variant(DefId), + Trait(DefId), + TyAlias(DefId), + AssociatedTy(DefId), + PrimTy(hir::PrimTy), + TyParam(DefId), + SelfTy(Option /* trait */, Option /* impl */), + + // Value namespace + Fn(DefId), + Const(DefId), + Static(DefId, bool /* is_mutbl */), + StructCtor(DefId, CtorKind), // DefId refers to NodeId of the struct's constructor + VariantCtor(DefId, CtorKind), + Method(DefId), + AssociatedConst(DefId), + Local(DefId), + Upvar(DefId, // def id of closed over local + usize, // index in the freevars list of the closure + ast::NodeId), // expr node that creates the closure + Label(ast::NodeId), + + // Macro namespace + Macro(DefId), + + // Both namespaces + Err, +} + +/// The result of resolving a path. +/// Before type checking completes, `depth` represents the number of +/// trailing segments which are yet unresolved. Afterwards, if there +/// were no errors, all paths should be fully resolved, with `depth` +/// set to `0` and `base_def` representing the final resolution. +/// +/// module::Type::AssocX::AssocY::MethodOrAssocType +/// ^~~~~~~~~~~~ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +/// base_def depth = 3 +/// +/// ::AssocX::AssocY::MethodOrAssocType +/// ^~~~~~~~~~~~~~ ^~~~~~~~~~~~~~~~~~~~~~~~~ +/// base_def depth = 2 +#[derive(Copy, Clone, Debug)] +pub struct PathResolution { + pub base_def: Def, + pub depth: usize +} + +impl PathResolution { + pub fn new(def: Def) -> PathResolution { + PathResolution { base_def: def, depth: 0 } + } + + pub fn kind_name(&self) -> &'static str { + if self.depth != 0 { + "associated item" + } else { + self.base_def.kind_name() + } + } +} + +// Definition mapping +pub type DefMap = NodeMap; +// This is the replacement export map. It maps a module to all of the exports +// within. +pub type ExportMap = NodeMap>; + +#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)] +pub struct Export { + pub name: ast::Name, // The name of the target. + pub def: Def, // The definition of the target. +} + +impl CtorKind { + pub fn from_ast(vdata: &ast::VariantData) -> CtorKind { + match *vdata { + ast::VariantData::Tuple(..) => CtorKind::Fn, + ast::VariantData::Unit(..) => CtorKind::Const, + ast::VariantData::Struct(..) => CtorKind::Fictive, + } + } + pub fn from_hir(vdata: &hir::VariantData) -> CtorKind { + match *vdata { + hir::VariantData::Tuple(..) => CtorKind::Fn, + hir::VariantData::Unit(..) => CtorKind::Const, + hir::VariantData::Struct(..) => CtorKind::Fictive, + } + } +} + +impl Def { + pub fn def_id(&self) -> DefId { + match *self { + Def::Fn(id) | Def::Mod(id) | Def::Static(id, _) | + Def::Variant(id) | Def::VariantCtor(id, ..) | Def::Enum(id) | Def::TyAlias(id) | + Def::AssociatedTy(id) | Def::TyParam(id) | Def::Struct(id) | Def::StructCtor(id, ..) | + Def::Union(id) | Def::Trait(id) | Def::Method(id) | Def::Const(id) | + Def::AssociatedConst(id) | Def::Local(id) | Def::Upvar(id, ..) | Def::Macro(id) => { + id + } + + Def::Label(..) | + Def::PrimTy(..) | + Def::SelfTy(..) | + Def::Err => { + bug!("attempted .def_id() on invalid def: {:?}", self) + } + } + } + + pub fn kind_name(&self) -> &'static str { + match *self { + Def::Fn(..) => "function", + Def::Mod(..) => "module", + Def::Static(..) => "static", + Def::Variant(..) => "variant", + Def::VariantCtor(.., CtorKind::Fn) => "tuple variant", + Def::VariantCtor(.., CtorKind::Const) => "unit variant", + Def::VariantCtor(.., CtorKind::Fictive) => "struct variant", + Def::Enum(..) => "enum", + Def::TyAlias(..) => "type alias", + Def::AssociatedTy(..) => "associated type", + Def::Struct(..) => "struct", + Def::StructCtor(.., CtorKind::Fn) => "tuple struct", + Def::StructCtor(.., CtorKind::Const) => "unit struct", + Def::StructCtor(.., CtorKind::Fictive) => bug!("impossible struct constructor"), + Def::Union(..) => "union", + Def::Trait(..) => "trait", + Def::Method(..) => "method", + Def::Const(..) => "constant", + Def::AssociatedConst(..) => "associated constant", + Def::TyParam(..) => "type parameter", + Def::PrimTy(..) => "builtin type", + Def::Local(..) => "local variable", + Def::Upvar(..) => "closure capture", + Def::Label(..) => "label", + Def::SelfTy(..) => "self type", + Def::Macro(..) => "macro", + Def::Err => "unresolved item", + } + } +} diff --git a/src/librustc/hir/def_id.rs b/src/librustc/hir/def_id.rs new file mode 100644 index 0000000000000..d3771b1755b16 --- /dev/null +++ b/src/librustc/hir/def_id.rs @@ -0,0 +1,143 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use ty; + +use rustc_data_structures::indexed_vec::Idx; +use serialize::{self, Encoder, Decoder}; + +use std::fmt; +use std::u32; + +#[derive(Clone, Copy, Eq, Ord, PartialOrd, PartialEq, Hash, Debug)] +pub struct CrateNum(u32); + +impl Idx for CrateNum { + fn new(value: usize) -> Self { + assert!(value < (u32::MAX) as usize); + CrateNum(value as u32) + } + + fn index(self) -> usize { + self.0 as usize + } +} + +/// Item definitions in the currently-compiled crate would have the CrateNum +/// LOCAL_CRATE in their DefId. +pub const LOCAL_CRATE: CrateNum = CrateNum(0); + +/// Virtual crate for builtin macros +// FIXME(jseyfried): this is also used for custom derives until proc-macro crates get `CrateNum`s. +pub const BUILTIN_MACROS_CRATE: CrateNum = CrateNum(!0); + +impl CrateNum { + pub fn new(x: usize) -> CrateNum { + assert!(x < (u32::MAX as usize)); + CrateNum(x as u32) + } + + pub fn from_u32(x: u32) -> CrateNum { + CrateNum(x) + } + + pub fn as_usize(&self) -> usize { + self.0 as usize + } + + pub fn as_u32(&self) -> u32 { + self.0 + } +} + +impl fmt::Display for CrateNum { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.0, f) + } +} + +impl serialize::UseSpecializedEncodable for CrateNum { + fn default_encode(&self, s: &mut S) -> Result<(), S::Error> { + s.emit_u32(self.0) + } +} + +impl serialize::UseSpecializedDecodable for CrateNum { + fn default_decode(d: &mut D) -> Result { + d.read_u32().map(CrateNum) + } +} + +/// A DefIndex is an index into the hir-map for a crate, identifying a +/// particular definition. It should really be considered an interned +/// shorthand for a particular DefPath. +#[derive(Clone, Debug, Eq, Ord, PartialOrd, PartialEq, RustcEncodable, + RustcDecodable, Hash, Copy)] +pub struct DefIndex(u32); + +impl DefIndex { + pub fn new(x: usize) -> DefIndex { + assert!(x < (u32::MAX as usize)); + DefIndex(x as u32) + } + + pub fn from_u32(x: u32) -> DefIndex { + DefIndex(x) + } + + pub fn as_usize(&self) -> usize { + self.0 as usize + } + + pub fn as_u32(&self) -> u32 { + self.0 + } +} + +/// The crate root is always assigned index 0 by the AST Map code, +/// thanks to `NodeCollector::new`. +pub const CRATE_DEF_INDEX: DefIndex = DefIndex(0); + +/// A DefId identifies a particular *definition*, by combining a crate +/// index and a def index. +#[derive(Clone, Eq, Ord, PartialOrd, PartialEq, RustcEncodable, RustcDecodable, Hash, Copy)] +pub struct DefId { + pub krate: CrateNum, + pub index: DefIndex, +} + +impl fmt::Debug for DefId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "DefId {{ krate: {:?}, node: {:?}", + self.krate, self.index)?; + + ty::tls::with_opt(|opt_tcx| { + if let Some(tcx) = opt_tcx { + if let Some(def_path) = tcx.opt_def_path(*self) { + write!(f, " => {}", def_path.to_string(tcx))?; + } + } + Ok(()) + })?; + + write!(f, " }}") + } +} + + +impl DefId { + pub fn local(index: DefIndex) -> DefId { + DefId { krate: LOCAL_CRATE, index: index } + } + + pub fn is_local(&self) -> bool { + self.krate == LOCAL_CRATE + } +} diff --git a/src/librustc/hir/intravisit.rs b/src/librustc/hir/intravisit.rs new file mode 100644 index 0000000000000..625bde2ca8b67 --- /dev/null +++ b/src/librustc/hir/intravisit.rs @@ -0,0 +1,1113 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! HIR walker for walking the contents of nodes. +//! +//! **For an overview of the visitor strategy, see the docs on the +//! `super::itemlikevisit::ItemLikeVisitor` trait.** +//! +//! If you have decided to use this visitor, here are some general +//! notes on how to do it: +//! +//! Each overridden visit method has full control over what +//! happens with its node, it can do its own traversal of the node's children, +//! call `intravisit::walk_*` to apply the default traversal algorithm, or prevent +//! deeper traversal by doing nothing. +//! +//! When visiting the HIR, the contents of nested items are NOT visited +//! by default. This is different from the AST visitor, which does a deep walk. +//! Hence this module is called `intravisit`; see the method `visit_nested_item` +//! for more details. +//! +//! Note: it is an important invariant that the default visitor walks +//! the body of a function in "execution order" (more concretely, +//! reverse post-order with respect to the CFG implied by the AST), +//! meaning that if AST node A may execute before AST node B, then A +//! is visited first. The borrow checker in particular relies on this +//! property. + +use syntax::abi::Abi; +use syntax::ast::{NodeId, CRATE_NODE_ID, Name, Attribute}; +use syntax::codemap::Spanned; +use syntax_pos::Span; +use hir::*; +use hir::def::Def; +use hir::map::Map; +use super::itemlikevisit::DeepVisitor; + +use std::cmp; +use std::u32; + +#[derive(Copy, Clone, PartialEq, Eq)] +pub enum FnKind<'a> { + /// fn foo() or extern "Abi" fn foo() + ItemFn(Name, &'a Generics, Unsafety, Constness, Abi, &'a Visibility, &'a [Attribute]), + + /// fn foo(&self) + Method(Name, &'a MethodSig, Option<&'a Visibility>, &'a [Attribute]), + + /// |x, y| {} + Closure(&'a [Attribute]), +} + +impl<'a> FnKind<'a> { + pub fn attrs(&self) -> &'a [Attribute] { + match *self { + FnKind::ItemFn(.., attrs) => attrs, + FnKind::Method(.., attrs) => attrs, + FnKind::Closure(attrs) => attrs, + } + } +} + +/// Specifies what nested things a visitor wants to visit. The most +/// common choice is `OnlyBodies`, which will cause the visitor to +/// visit fn bodies for fns that it encounters, but skip over nested +/// item-like things. +/// +/// See the comments on `ItemLikeVisitor` for more details on the overall +/// visit strategy. +pub enum NestedVisitorMap<'this, 'tcx: 'this> { + /// Do not visit any nested things. When you add a new + /// "non-nested" thing, you will want to audit such uses to see if + /// they remain valid. + /// + /// Use this if you are only walking some particular kind of tree + /// (i.e., a type, or fn signature) and you don't want to thread a + /// HIR map around. + None, + + /// Do not visit nested item-like things, but visit nested things + /// that are inside of an item-like. + /// + /// **This is the most common choice.** A very commmon pattern is + /// to use `tcx.visit_all_item_likes_in_krate()` as an outer loop, + /// and to have the visitor that visits the contents of each item + /// using this setting. + OnlyBodies(&'this Map<'tcx>), + + /// Visit all nested things, including item-likes. + /// + /// **This is an unusual choice.** It is used when you want to + /// process everything within their lexical context. Typically you + /// kick off the visit by doing `walk_krate()`. + All(&'this Map<'tcx>), +} + +impl<'this, 'tcx> NestedVisitorMap<'this, 'tcx> { + /// Returns the map to use for an "intra item-like" thing (if any). + /// e.g., function body. + pub fn intra(self) -> Option<&'this Map<'tcx>> { + match self { + NestedVisitorMap::None => None, + NestedVisitorMap::OnlyBodies(map) => Some(map), + NestedVisitorMap::All(map) => Some(map), + } + } + + /// Returns the map to use for an "item-like" thing (if any). + /// e.g., item, impl-item. + pub fn inter(self) -> Option<&'this Map<'tcx>> { + match self { + NestedVisitorMap::None => None, + NestedVisitorMap::OnlyBodies(_) => None, + NestedVisitorMap::All(map) => Some(map), + } + } +} + +/// Each method of the Visitor trait is a hook to be potentially +/// overridden. Each method's default implementation recursively visits +/// the substructure of the input via the corresponding `walk` method; +/// e.g. the `visit_mod` method by default calls `intravisit::walk_mod`. +/// +/// Note that this visitor does NOT visit nested items by default +/// (this is why the module is called `intravisit`, to distinguish it +/// from the AST's `visit` module, which acts differently). If you +/// simply want to visit all items in the crate in some order, you +/// should call `Crate::visit_all_items`. Otherwise, see the comment +/// on `visit_nested_item` for details on how to visit nested items. +/// +/// If you want to ensure that your code handles every variant +/// explicitly, you need to override each method. (And you also need +/// to monitor future changes to `Visitor` in case a new method with a +/// new default implementation gets introduced.) +pub trait Visitor<'v> : Sized { + /////////////////////////////////////////////////////////////////////////// + // Nested items. + + /// The default versions of the `visit_nested_XXX` routines invoke + /// this method to get a map to use. By selecting an enum variant, + /// you control which kinds of nested HIR are visited; see + /// `NestedVisitorMap` for details. By "nested HIR", we are + /// referring to bits of HIR that are not directly embedded within + /// one another but rather indirectly, through a table in the + /// crate. This is done to control dependencies during incremental + /// compilation: the non-inline bits of HIR can be tracked and + /// hashed separately. + /// + /// **If for some reason you want the nested behavior, but don't + /// have a `Map` are your disposal:** then you should override the + /// `visit_nested_XXX` methods, and override this method to + /// `panic!()`. This way, if a new `visit_nested_XXX` variant is + /// added in the future, we will see the panic in your code and + /// fix it appropriately. + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v>; + + /// Invoked when a nested item is encountered. By default does + /// nothing unless you override `nested_visit_map` to return + /// `Some(_)`, in which case it will walk the item. **You probably + /// don't want to override this method** -- instead, override + /// `nested_visit_map` or use the "shallow" or "deep" visit + /// patterns described on `itemlikevisit::ItemLikeVisitor`. The only + /// reason to override this method is if you want a nested pattern + /// but cannot supply a `Map`; see `nested_visit_map` for advice. + #[allow(unused_variables)] + fn visit_nested_item(&mut self, id: ItemId) { + let opt_item = self.nested_visit_map().inter().map(|map| map.expect_item(id.id)); + if let Some(item) = opt_item { + self.visit_item(item); + } + } + + /// Like `visit_nested_item()`, but for impl items. See + /// `visit_nested_item()` for advice on when to override this + /// method. + #[allow(unused_variables)] + fn visit_nested_impl_item(&mut self, id: ImplItemId) { + let opt_item = self.nested_visit_map().inter().map(|map| map.impl_item(id)); + if let Some(item) = opt_item { + self.visit_impl_item(item); + } + } + + /// Invoked to visit the body of a function, method or closure. Like + /// visit_nested_item, does nothing by default unless you override + /// `nested_visit_map` to return `Some(_)`, in which case it will walk the + /// body. + fn visit_body(&mut self, id: ExprId) { + let opt_expr = self.nested_visit_map().intra().map(|map| map.expr(id)); + if let Some(expr) = opt_expr { + self.visit_expr(expr); + } + } + + /// Visit the top-level item and (optionally) nested items / impl items. See + /// `visit_nested_item` for details. + fn visit_item(&mut self, i: &'v Item) { + walk_item(self, i) + } + + /// When invoking `visit_all_item_likes()`, you need to supply an + /// item-like visitor. This method converts a "intra-visit" + /// visitor into an item-like visitor that walks the entire tree. + /// If you use this, you probably don't want to process the + /// contents of nested item-like things, since the outer loop will + /// visit them as well. + fn as_deep_visitor<'s>(&'s mut self) -> DeepVisitor<'s, Self> { + DeepVisitor::new(self) + } + + /////////////////////////////////////////////////////////////////////////// + + fn visit_id(&mut self, _node_id: NodeId) { + // Nothing to do. + } + fn visit_def_mention(&mut self, _def: Def) { + // Nothing to do. + } + fn visit_name(&mut self, _span: Span, _name: Name) { + // Nothing to do. + } + fn visit_mod(&mut self, m: &'v Mod, _s: Span, n: NodeId) { + walk_mod(self, m, n) + } + fn visit_foreign_item(&mut self, i: &'v ForeignItem) { + walk_foreign_item(self, i) + } + fn visit_local(&mut self, l: &'v Local) { + walk_local(self, l) + } + fn visit_block(&mut self, b: &'v Block) { + walk_block(self, b) + } + fn visit_stmt(&mut self, s: &'v Stmt) { + walk_stmt(self, s) + } + fn visit_arm(&mut self, a: &'v Arm) { + walk_arm(self, a) + } + fn visit_pat(&mut self, p: &'v Pat) { + walk_pat(self, p) + } + fn visit_decl(&mut self, d: &'v Decl) { + walk_decl(self, d) + } + fn visit_expr(&mut self, ex: &'v Expr) { + walk_expr(self, ex) + } + fn visit_expr_post(&mut self, _ex: &'v Expr) { + } + fn visit_ty(&mut self, t: &'v Ty) { + walk_ty(self, t) + } + fn visit_generics(&mut self, g: &'v Generics) { + walk_generics(self, g) + } + fn visit_where_predicate(&mut self, predicate: &'v WherePredicate) { + walk_where_predicate(self, predicate) + } + fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v FnDecl, b: ExprId, s: Span, id: NodeId) { + walk_fn(self, fk, fd, b, s, id) + } + fn visit_trait_item(&mut self, ti: &'v TraitItem) { + walk_trait_item(self, ti) + } + fn visit_impl_item(&mut self, ii: &'v ImplItem) { + walk_impl_item(self, ii) + } + fn visit_impl_item_ref(&mut self, ii: &'v ImplItemRef) { + walk_impl_item_ref(self, ii) + } + fn visit_trait_ref(&mut self, t: &'v TraitRef) { + walk_trait_ref(self, t) + } + fn visit_ty_param_bound(&mut self, bounds: &'v TyParamBound) { + walk_ty_param_bound(self, bounds) + } + fn visit_poly_trait_ref(&mut self, t: &'v PolyTraitRef, m: &'v TraitBoundModifier) { + walk_poly_trait_ref(self, t, m) + } + fn visit_variant_data(&mut self, + s: &'v VariantData, + _: Name, + _: &'v Generics, + _parent_id: NodeId, + _: Span) { + walk_struct_def(self, s) + } + fn visit_struct_field(&mut self, s: &'v StructField) { + walk_struct_field(self, s) + } + fn visit_enum_def(&mut self, + enum_definition: &'v EnumDef, + generics: &'v Generics, + item_id: NodeId, + _: Span) { + walk_enum_def(self, enum_definition, generics, item_id) + } + fn visit_variant(&mut self, v: &'v Variant, g: &'v Generics, item_id: NodeId) { + walk_variant(self, v, g, item_id) + } + fn visit_lifetime(&mut self, lifetime: &'v Lifetime) { + walk_lifetime(self, lifetime) + } + fn visit_lifetime_def(&mut self, lifetime: &'v LifetimeDef) { + walk_lifetime_def(self, lifetime) + } + fn visit_qpath(&mut self, qpath: &'v QPath, id: NodeId, span: Span) { + walk_qpath(self, qpath, id, span) + } + fn visit_path(&mut self, path: &'v Path, _id: NodeId) { + walk_path(self, path) + } + fn visit_path_segment(&mut self, path_span: Span, path_segment: &'v PathSegment) { + walk_path_segment(self, path_span, path_segment) + } + fn visit_path_parameters(&mut self, path_span: Span, path_parameters: &'v PathParameters) { + walk_path_parameters(self, path_span, path_parameters) + } + fn visit_assoc_type_binding(&mut self, type_binding: &'v TypeBinding) { + walk_assoc_type_binding(self, type_binding) + } + fn visit_attribute(&mut self, _attr: &'v Attribute) { + } + fn visit_macro_def(&mut self, macro_def: &'v MacroDef) { + walk_macro_def(self, macro_def) + } + fn visit_vis(&mut self, vis: &'v Visibility) { + walk_vis(self, vis) + } + fn visit_associated_item_kind(&mut self, kind: &'v AssociatedItemKind) { + walk_associated_item_kind(self, kind); + } + fn visit_defaultness(&mut self, defaultness: &'v Defaultness) { + walk_defaultness(self, defaultness); + } +} + +pub fn walk_opt_name<'v, V: Visitor<'v>>(visitor: &mut V, span: Span, opt_name: Option) { + if let Some(name) = opt_name { + visitor.visit_name(span, name); + } +} + +pub fn walk_opt_sp_name<'v, V: Visitor<'v>>(visitor: &mut V, opt_sp_name: &Option>) { + if let Some(ref sp_name) = *opt_sp_name { + visitor.visit_name(sp_name.span, sp_name.node); + } +} + +/// Walks the contents of a crate. See also `Crate::visit_all_items`. +pub fn walk_crate<'v, V: Visitor<'v>>(visitor: &mut V, krate: &'v Crate) { + visitor.visit_mod(&krate.module, krate.span, CRATE_NODE_ID); + walk_list!(visitor, visit_attribute, &krate.attrs); + walk_list!(visitor, visit_macro_def, &krate.exported_macros); +} + +pub fn walk_macro_def<'v, V: Visitor<'v>>(visitor: &mut V, macro_def: &'v MacroDef) { + visitor.visit_id(macro_def.id); + visitor.visit_name(macro_def.span, macro_def.name); + walk_opt_name(visitor, macro_def.span, macro_def.imported_from); + walk_list!(visitor, visit_attribute, ¯o_def.attrs); +} + +pub fn walk_mod<'v, V: Visitor<'v>>(visitor: &mut V, module: &'v Mod, mod_node_id: NodeId) { + visitor.visit_id(mod_node_id); + for &item_id in &module.item_ids { + visitor.visit_nested_item(item_id); + } +} + +pub fn walk_local<'v, V: Visitor<'v>>(visitor: &mut V, local: &'v Local) { + visitor.visit_id(local.id); + visitor.visit_pat(&local.pat); + walk_list!(visitor, visit_ty, &local.ty); + walk_list!(visitor, visit_expr, &local.init); +} + +pub fn walk_lifetime<'v, V: Visitor<'v>>(visitor: &mut V, lifetime: &'v Lifetime) { + visitor.visit_id(lifetime.id); + visitor.visit_name(lifetime.span, lifetime.name); +} + +pub fn walk_lifetime_def<'v, V: Visitor<'v>>(visitor: &mut V, lifetime_def: &'v LifetimeDef) { + visitor.visit_lifetime(&lifetime_def.lifetime); + walk_list!(visitor, visit_lifetime, &lifetime_def.bounds); +} + +pub fn walk_poly_trait_ref<'v, V>(visitor: &mut V, + trait_ref: &'v PolyTraitRef, + _modifier: &'v TraitBoundModifier) + where V: Visitor<'v> +{ + walk_list!(visitor, visit_lifetime_def, &trait_ref.bound_lifetimes); + visitor.visit_trait_ref(&trait_ref.trait_ref); +} + +pub fn walk_trait_ref<'v, V>(visitor: &mut V, trait_ref: &'v TraitRef) + where V: Visitor<'v> +{ + visitor.visit_id(trait_ref.ref_id); + visitor.visit_path(&trait_ref.path, trait_ref.ref_id) +} + +pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item) { + visitor.visit_vis(&item.vis); + visitor.visit_name(item.span, item.name); + match item.node { + ItemExternCrate(opt_name) => { + visitor.visit_id(item.id); + walk_opt_name(visitor, item.span, opt_name) + } + ItemUse(ref path, _) => { + visitor.visit_id(item.id); + visitor.visit_path(path, item.id); + } + ItemStatic(ref typ, _, ref expr) | + ItemConst(ref typ, ref expr) => { + visitor.visit_id(item.id); + visitor.visit_ty(typ); + visitor.visit_expr(expr); + } + ItemFn(ref declaration, unsafety, constness, abi, ref generics, body_id) => { + visitor.visit_fn(FnKind::ItemFn(item.name, + generics, + unsafety, + constness, + abi, + &item.vis, + &item.attrs), + declaration, + body_id, + item.span, + item.id) + } + ItemMod(ref module) => { + // visit_mod() takes care of visiting the Item's NodeId + visitor.visit_mod(module, item.span, item.id) + } + ItemForeignMod(ref foreign_module) => { + visitor.visit_id(item.id); + walk_list!(visitor, visit_foreign_item, &foreign_module.items); + } + ItemTy(ref typ, ref type_parameters) => { + visitor.visit_id(item.id); + visitor.visit_ty(typ); + visitor.visit_generics(type_parameters) + } + ItemEnum(ref enum_definition, ref type_parameters) => { + visitor.visit_generics(type_parameters); + // visit_enum_def() takes care of visiting the Item's NodeId + visitor.visit_enum_def(enum_definition, type_parameters, item.id, item.span) + } + ItemDefaultImpl(_, ref trait_ref) => { + visitor.visit_id(item.id); + visitor.visit_trait_ref(trait_ref) + } + ItemImpl(.., ref type_parameters, ref opt_trait_reference, ref typ, ref impl_item_refs) => { + visitor.visit_id(item.id); + visitor.visit_generics(type_parameters); + walk_list!(visitor, visit_trait_ref, opt_trait_reference); + visitor.visit_ty(typ); + for impl_item_ref in impl_item_refs { + visitor.visit_impl_item_ref(impl_item_ref); + } + } + ItemStruct(ref struct_definition, ref generics) | + ItemUnion(ref struct_definition, ref generics) => { + visitor.visit_generics(generics); + visitor.visit_id(item.id); + visitor.visit_variant_data(struct_definition, item.name, generics, item.id, item.span); + } + ItemTrait(_, ref generics, ref bounds, ref methods) => { + visitor.visit_id(item.id); + visitor.visit_generics(generics); + walk_list!(visitor, visit_ty_param_bound, bounds); + walk_list!(visitor, visit_trait_item, methods); + } + } + walk_list!(visitor, visit_attribute, &item.attrs); +} + +pub fn walk_enum_def<'v, V: Visitor<'v>>(visitor: &mut V, + enum_definition: &'v EnumDef, + generics: &'v Generics, + item_id: NodeId) { + visitor.visit_id(item_id); + walk_list!(visitor, + visit_variant, + &enum_definition.variants, + generics, + item_id); +} + +pub fn walk_variant<'v, V: Visitor<'v>>(visitor: &mut V, + variant: &'v Variant, + generics: &'v Generics, + parent_item_id: NodeId) { + visitor.visit_name(variant.span, variant.node.name); + visitor.visit_variant_data(&variant.node.data, + variant.node.name, + generics, + parent_item_id, + variant.span); + walk_list!(visitor, visit_expr, &variant.node.disr_expr); + walk_list!(visitor, visit_attribute, &variant.node.attrs); +} + +pub fn walk_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty) { + visitor.visit_id(typ.id); + + match typ.node { + TySlice(ref ty) => { + visitor.visit_ty(ty) + } + TyPtr(ref mutable_type) => { + visitor.visit_ty(&mutable_type.ty) + } + TyRptr(ref opt_lifetime, ref mutable_type) => { + walk_list!(visitor, visit_lifetime, opt_lifetime); + visitor.visit_ty(&mutable_type.ty) + } + TyNever => {}, + TyTup(ref tuple_element_types) => { + walk_list!(visitor, visit_ty, tuple_element_types); + } + TyBareFn(ref function_declaration) => { + walk_fn_decl(visitor, &function_declaration.decl); + walk_list!(visitor, visit_lifetime_def, &function_declaration.lifetimes); + } + TyPath(ref qpath) => { + visitor.visit_qpath(qpath, typ.id, typ.span); + } + TyObjectSum(ref ty, ref bounds) => { + visitor.visit_ty(ty); + walk_list!(visitor, visit_ty_param_bound, bounds); + } + TyArray(ref ty, ref expression) => { + visitor.visit_ty(ty); + visitor.visit_expr(expression) + } + TyPolyTraitRef(ref bounds) => { + walk_list!(visitor, visit_ty_param_bound, bounds); + } + TyImplTrait(ref bounds) => { + walk_list!(visitor, visit_ty_param_bound, bounds); + } + TyTypeof(ref expression) => { + visitor.visit_expr(expression) + } + TyInfer => {} + } +} + +pub fn walk_qpath<'v, V: Visitor<'v>>(visitor: &mut V, qpath: &'v QPath, id: NodeId, span: Span) { + match *qpath { + QPath::Resolved(ref maybe_qself, ref path) => { + if let Some(ref qself) = *maybe_qself { + visitor.visit_ty(qself); + } + visitor.visit_path(path, id) + } + QPath::TypeRelative(ref qself, ref segment) => { + visitor.visit_ty(qself); + visitor.visit_path_segment(span, segment); + } + } +} + +pub fn walk_path<'v, V: Visitor<'v>>(visitor: &mut V, path: &'v Path) { + visitor.visit_def_mention(path.def); + for segment in &path.segments { + visitor.visit_path_segment(path.span, segment); + } +} + +pub fn walk_path_segment<'v, V: Visitor<'v>>(visitor: &mut V, + path_span: Span, + segment: &'v PathSegment) { + visitor.visit_name(path_span, segment.name); + visitor.visit_path_parameters(path_span, &segment.parameters); +} + +pub fn walk_path_parameters<'v, V: Visitor<'v>>(visitor: &mut V, + _path_span: Span, + path_parameters: &'v PathParameters) { + match *path_parameters { + AngleBracketedParameters(ref data) => { + walk_list!(visitor, visit_ty, &data.types); + walk_list!(visitor, visit_lifetime, &data.lifetimes); + walk_list!(visitor, visit_assoc_type_binding, &data.bindings); + } + ParenthesizedParameters(ref data) => { + walk_list!(visitor, visit_ty, &data.inputs); + walk_list!(visitor, visit_ty, &data.output); + } + } +} + +pub fn walk_assoc_type_binding<'v, V: Visitor<'v>>(visitor: &mut V, + type_binding: &'v TypeBinding) { + visitor.visit_id(type_binding.id); + visitor.visit_name(type_binding.span, type_binding.name); + visitor.visit_ty(&type_binding.ty); +} + +pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat) { + visitor.visit_id(pattern.id); + match pattern.node { + PatKind::TupleStruct(ref qpath, ref children, _) => { + visitor.visit_qpath(qpath, pattern.id, pattern.span); + walk_list!(visitor, visit_pat, children); + } + PatKind::Path(ref qpath) => { + visitor.visit_qpath(qpath, pattern.id, pattern.span); + } + PatKind::Struct(ref qpath, ref fields, _) => { + visitor.visit_qpath(qpath, pattern.id, pattern.span); + for field in fields { + visitor.visit_name(field.span, field.node.name); + visitor.visit_pat(&field.node.pat) + } + } + PatKind::Tuple(ref tuple_elements, _) => { + walk_list!(visitor, visit_pat, tuple_elements); + } + PatKind::Box(ref subpattern) | + PatKind::Ref(ref subpattern, _) => { + visitor.visit_pat(subpattern) + } + PatKind::Binding(_, def_id, ref pth1, ref optional_subpattern) => { + visitor.visit_def_mention(Def::Local(def_id)); + visitor.visit_name(pth1.span, pth1.node); + walk_list!(visitor, visit_pat, optional_subpattern); + } + PatKind::Lit(ref expression) => visitor.visit_expr(expression), + PatKind::Range(ref lower_bound, ref upper_bound) => { + visitor.visit_expr(lower_bound); + visitor.visit_expr(upper_bound) + } + PatKind::Wild => (), + PatKind::Slice(ref prepatterns, ref slice_pattern, ref postpatterns) => { + walk_list!(visitor, visit_pat, prepatterns); + walk_list!(visitor, visit_pat, slice_pattern); + walk_list!(visitor, visit_pat, postpatterns); + } + } +} + +pub fn walk_foreign_item<'v, V: Visitor<'v>>(visitor: &mut V, foreign_item: &'v ForeignItem) { + visitor.visit_id(foreign_item.id); + visitor.visit_vis(&foreign_item.vis); + visitor.visit_name(foreign_item.span, foreign_item.name); + + match foreign_item.node { + ForeignItemFn(ref function_declaration, ref generics) => { + walk_fn_decl(visitor, function_declaration); + visitor.visit_generics(generics) + } + ForeignItemStatic(ref typ, _) => visitor.visit_ty(typ), + } + + walk_list!(visitor, visit_attribute, &foreign_item.attrs); +} + +pub fn walk_ty_param_bound<'v, V: Visitor<'v>>(visitor: &mut V, bound: &'v TyParamBound) { + match *bound { + TraitTyParamBound(ref typ, ref modifier) => { + visitor.visit_poly_trait_ref(typ, modifier); + } + RegionTyParamBound(ref lifetime) => { + visitor.visit_lifetime(lifetime); + } + } +} + +pub fn walk_generics<'v, V: Visitor<'v>>(visitor: &mut V, generics: &'v Generics) { + for param in &generics.ty_params { + visitor.visit_id(param.id); + visitor.visit_name(param.span, param.name); + walk_list!(visitor, visit_ty_param_bound, ¶m.bounds); + walk_list!(visitor, visit_ty, ¶m.default); + } + walk_list!(visitor, visit_lifetime_def, &generics.lifetimes); + visitor.visit_id(generics.where_clause.id); + walk_list!(visitor, visit_where_predicate, &generics.where_clause.predicates); +} + +pub fn walk_where_predicate<'v, V: Visitor<'v>>( + visitor: &mut V, + predicate: &'v WherePredicate) +{ + match predicate { + &WherePredicate::BoundPredicate(WhereBoundPredicate{ref bounded_ty, + ref bounds, + ref bound_lifetimes, + ..}) => { + visitor.visit_ty(bounded_ty); + walk_list!(visitor, visit_ty_param_bound, bounds); + walk_list!(visitor, visit_lifetime_def, bound_lifetimes); + } + &WherePredicate::RegionPredicate(WhereRegionPredicate{ref lifetime, + ref bounds, + ..}) => { + visitor.visit_lifetime(lifetime); + walk_list!(visitor, visit_lifetime, bounds); + } + &WherePredicate::EqPredicate(WhereEqPredicate{id, + ref path, + ref ty, + ..}) => { + visitor.visit_id(id); + visitor.visit_path(path, id); + visitor.visit_ty(ty); + } + } +} + +pub fn walk_fn_ret_ty<'v, V: Visitor<'v>>(visitor: &mut V, ret_ty: &'v FunctionRetTy) { + if let Return(ref output_ty) = *ret_ty { + visitor.visit_ty(output_ty) + } +} + +pub fn walk_fn_decl<'v, V: Visitor<'v>>(visitor: &mut V, function_declaration: &'v FnDecl) { + for argument in &function_declaration.inputs { + visitor.visit_id(argument.id); + visitor.visit_pat(&argument.pat); + visitor.visit_ty(&argument.ty) + } + walk_fn_ret_ty(visitor, &function_declaration.output) +} + +pub fn walk_fn_decl_nopat<'v, V: Visitor<'v>>(visitor: &mut V, function_declaration: &'v FnDecl) { + for argument in &function_declaration.inputs { + visitor.visit_id(argument.id); + visitor.visit_ty(&argument.ty) + } + walk_fn_ret_ty(visitor, &function_declaration.output) +} + +pub fn walk_fn_kind<'v, V: Visitor<'v>>(visitor: &mut V, function_kind: FnKind<'v>) { + match function_kind { + FnKind::ItemFn(_, generics, ..) => { + visitor.visit_generics(generics); + } + FnKind::Method(_, sig, ..) => { + visitor.visit_generics(&sig.generics); + } + FnKind::Closure(_) => {} + } +} + +pub fn walk_fn<'v, V: Visitor<'v>>(visitor: &mut V, + function_kind: FnKind<'v>, + function_declaration: &'v FnDecl, + body_id: ExprId, + _span: Span, + id: NodeId) { + visitor.visit_id(id); + walk_fn_decl(visitor, function_declaration); + walk_fn_kind(visitor, function_kind); + visitor.visit_body(body_id) +} + +pub fn walk_fn_with_body<'v, V: Visitor<'v>>(visitor: &mut V, + function_kind: FnKind<'v>, + function_declaration: &'v FnDecl, + body: &'v Expr, + _span: Span, + id: NodeId) { + visitor.visit_id(id); + walk_fn_decl(visitor, function_declaration); + walk_fn_kind(visitor, function_kind); + visitor.visit_expr(body) +} + +pub fn walk_trait_item<'v, V: Visitor<'v>>(visitor: &mut V, trait_item: &'v TraitItem) { + visitor.visit_name(trait_item.span, trait_item.name); + walk_list!(visitor, visit_attribute, &trait_item.attrs); + match trait_item.node { + ConstTraitItem(ref ty, ref default) => { + visitor.visit_id(trait_item.id); + visitor.visit_ty(ty); + walk_list!(visitor, visit_expr, default); + } + MethodTraitItem(ref sig, None) => { + visitor.visit_id(trait_item.id); + visitor.visit_generics(&sig.generics); + walk_fn_decl(visitor, &sig.decl); + } + MethodTraitItem(ref sig, Some(body_id)) => { + visitor.visit_fn(FnKind::Method(trait_item.name, + sig, + None, + &trait_item.attrs), + &sig.decl, + body_id, + trait_item.span, + trait_item.id); + } + TypeTraitItem(ref bounds, ref default) => { + visitor.visit_id(trait_item.id); + walk_list!(visitor, visit_ty_param_bound, bounds); + walk_list!(visitor, visit_ty, default); + } + } +} + +pub fn walk_impl_item<'v, V: Visitor<'v>>(visitor: &mut V, impl_item: &'v ImplItem) { + // NB: Deliberately force a compilation error if/when new fields are added. + let ImplItem { id: _, name, ref vis, ref defaultness, ref attrs, ref node, span } = *impl_item; + + visitor.visit_name(span, name); + visitor.visit_vis(vis); + visitor.visit_defaultness(defaultness); + walk_list!(visitor, visit_attribute, attrs); + match *node { + ImplItemKind::Const(ref ty, ref expr) => { + visitor.visit_id(impl_item.id); + visitor.visit_ty(ty); + visitor.visit_expr(expr); + } + ImplItemKind::Method(ref sig, body_id) => { + visitor.visit_fn(FnKind::Method(impl_item.name, + sig, + Some(&impl_item.vis), + &impl_item.attrs), + &sig.decl, + body_id, + impl_item.span, + impl_item.id); + } + ImplItemKind::Type(ref ty) => { + visitor.visit_id(impl_item.id); + visitor.visit_ty(ty); + } + } +} + +pub fn walk_impl_item_ref<'v, V: Visitor<'v>>(visitor: &mut V, impl_item_ref: &'v ImplItemRef) { + // NB: Deliberately force a compilation error if/when new fields are added. + let ImplItemRef { id, name, ref kind, span, ref vis, ref defaultness } = *impl_item_ref; + visitor.visit_nested_impl_item(id); + visitor.visit_name(span, name); + visitor.visit_associated_item_kind(kind); + visitor.visit_vis(vis); + visitor.visit_defaultness(defaultness); +} + + +pub fn walk_struct_def<'v, V: Visitor<'v>>(visitor: &mut V, struct_definition: &'v VariantData) { + visitor.visit_id(struct_definition.id()); + walk_list!(visitor, visit_struct_field, struct_definition.fields()); +} + +pub fn walk_struct_field<'v, V: Visitor<'v>>(visitor: &mut V, struct_field: &'v StructField) { + visitor.visit_id(struct_field.id); + visitor.visit_vis(&struct_field.vis); + visitor.visit_name(struct_field.span, struct_field.name); + visitor.visit_ty(&struct_field.ty); + walk_list!(visitor, visit_attribute, &struct_field.attrs); +} + +pub fn walk_block<'v, V: Visitor<'v>>(visitor: &mut V, block: &'v Block) { + visitor.visit_id(block.id); + walk_list!(visitor, visit_stmt, &block.stmts); + walk_list!(visitor, visit_expr, &block.expr); +} + +pub fn walk_stmt<'v, V: Visitor<'v>>(visitor: &mut V, statement: &'v Stmt) { + match statement.node { + StmtDecl(ref declaration, id) => { + visitor.visit_id(id); + visitor.visit_decl(declaration) + } + StmtExpr(ref expression, id) | + StmtSemi(ref expression, id) => { + visitor.visit_id(id); + visitor.visit_expr(expression) + } + } +} + +pub fn walk_decl<'v, V: Visitor<'v>>(visitor: &mut V, declaration: &'v Decl) { + match declaration.node { + DeclLocal(ref local) => visitor.visit_local(local), + DeclItem(item) => visitor.visit_nested_item(item), + } +} + +pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) { + visitor.visit_id(expression.id); + match expression.node { + ExprBox(ref subexpression) => { + visitor.visit_expr(subexpression) + } + ExprArray(ref subexpressions) => { + walk_list!(visitor, visit_expr, subexpressions); + } + ExprRepeat(ref element, ref count) => { + visitor.visit_expr(element); + visitor.visit_expr(count) + } + ExprStruct(ref qpath, ref fields, ref optional_base) => { + visitor.visit_qpath(qpath, expression.id, expression.span); + for field in fields { + visitor.visit_name(field.name.span, field.name.node); + visitor.visit_expr(&field.expr) + } + walk_list!(visitor, visit_expr, optional_base); + } + ExprTup(ref subexpressions) => { + walk_list!(visitor, visit_expr, subexpressions); + } + ExprCall(ref callee_expression, ref arguments) => { + walk_list!(visitor, visit_expr, arguments); + visitor.visit_expr(callee_expression) + } + ExprMethodCall(ref name, ref types, ref arguments) => { + visitor.visit_name(name.span, name.node); + walk_list!(visitor, visit_expr, arguments); + walk_list!(visitor, visit_ty, types); + } + ExprBinary(_, ref left_expression, ref right_expression) => { + visitor.visit_expr(left_expression); + visitor.visit_expr(right_expression) + } + ExprAddrOf(_, ref subexpression) | ExprUnary(_, ref subexpression) => { + visitor.visit_expr(subexpression) + } + ExprLit(_) => {} + ExprCast(ref subexpression, ref typ) | ExprType(ref subexpression, ref typ) => { + visitor.visit_expr(subexpression); + visitor.visit_ty(typ) + } + ExprIf(ref head_expression, ref if_block, ref optional_else) => { + visitor.visit_expr(head_expression); + visitor.visit_block(if_block); + walk_list!(visitor, visit_expr, optional_else); + } + ExprWhile(ref subexpression, ref block, ref opt_sp_name) => { + visitor.visit_expr(subexpression); + visitor.visit_block(block); + walk_opt_sp_name(visitor, opt_sp_name); + } + ExprLoop(ref block, ref opt_sp_name, _) => { + visitor.visit_block(block); + walk_opt_sp_name(visitor, opt_sp_name); + } + ExprMatch(ref subexpression, ref arms, _) => { + visitor.visit_expr(subexpression); + walk_list!(visitor, visit_arm, arms); + } + ExprClosure(_, ref function_declaration, body, _fn_decl_span) => { + visitor.visit_fn(FnKind::Closure(&expression.attrs), + function_declaration, + body, + expression.span, + expression.id) + } + ExprBlock(ref block) => visitor.visit_block(block), + ExprAssign(ref left_hand_expression, ref right_hand_expression) => { + visitor.visit_expr(right_hand_expression); + visitor.visit_expr(left_hand_expression) + } + ExprAssignOp(_, ref left_expression, ref right_expression) => { + visitor.visit_expr(right_expression); + visitor.visit_expr(left_expression) + } + ExprField(ref subexpression, ref name) => { + visitor.visit_expr(subexpression); + visitor.visit_name(name.span, name.node); + } + ExprTupField(ref subexpression, _) => { + visitor.visit_expr(subexpression); + } + ExprIndex(ref main_expression, ref index_expression) => { + visitor.visit_expr(main_expression); + visitor.visit_expr(index_expression) + } + ExprPath(ref qpath) => { + visitor.visit_qpath(qpath, expression.id, expression.span); + } + ExprBreak(None, ref opt_expr) => { + walk_list!(visitor, visit_expr, opt_expr); + } + ExprBreak(Some(label), ref opt_expr) => { + visitor.visit_def_mention(Def::Label(label.loop_id)); + visitor.visit_name(label.span, label.name); + walk_list!(visitor, visit_expr, opt_expr); + } + ExprAgain(None) => {} + ExprAgain(Some(label)) => { + visitor.visit_def_mention(Def::Label(label.loop_id)); + visitor.visit_name(label.span, label.name); + } + ExprRet(ref optional_expression) => { + walk_list!(visitor, visit_expr, optional_expression); + } + ExprInlineAsm(_, ref outputs, ref inputs) => { + for output in outputs { + visitor.visit_expr(output) + } + for input in inputs { + visitor.visit_expr(input) + } + } + } + + visitor.visit_expr_post(expression) +} + +pub fn walk_arm<'v, V: Visitor<'v>>(visitor: &mut V, arm: &'v Arm) { + walk_list!(visitor, visit_pat, &arm.pats); + walk_list!(visitor, visit_expr, &arm.guard); + visitor.visit_expr(&arm.body); + walk_list!(visitor, visit_attribute, &arm.attrs); +} + +pub fn walk_vis<'v, V: Visitor<'v>>(visitor: &mut V, vis: &'v Visibility) { + if let Visibility::Restricted { ref path, id } = *vis { + visitor.visit_id(id); + visitor.visit_path(path, id) + } +} + +pub fn walk_associated_item_kind<'v, V: Visitor<'v>>(_: &mut V, _: &'v AssociatedItemKind) { + // No visitable content here: this fn exists so you can call it if + // the right thing to do, should content be added in the future, + // would be to walk it. +} + +pub fn walk_defaultness<'v, V: Visitor<'v>>(_: &mut V, _: &'v Defaultness) { + // No visitable content here: this fn exists so you can call it if + // the right thing to do, should content be added in the future, + // would be to walk it. +} + +#[derive(Copy, Clone, RustcEncodable, RustcDecodable, Debug, PartialEq, Eq)] +pub struct IdRange { + pub min: NodeId, + pub max: NodeId, +} + +impl IdRange { + pub fn max() -> IdRange { + IdRange { + min: NodeId::from_u32(u32::MAX), + max: NodeId::from_u32(u32::MIN), + } + } + + pub fn empty(&self) -> bool { + self.min >= self.max + } + + pub fn contains(&self, id: NodeId) -> bool { + id >= self.min && id < self.max + } + + pub fn add(&mut self, id: NodeId) { + self.min = cmp::min(self.min, id); + self.max = cmp::max(self.max, NodeId::from_u32(id.as_u32() + 1)); + } + +} + + +pub struct IdRangeComputingVisitor<'a, 'ast: 'a> { + result: IdRange, + map: &'a map::Map<'ast>, +} + +impl<'a, 'ast> IdRangeComputingVisitor<'a, 'ast> { + pub fn new(map: &'a map::Map<'ast>) -> IdRangeComputingVisitor<'a, 'ast> { + IdRangeComputingVisitor { result: IdRange::max(), map: map } + } + + pub fn result(&self) -> IdRange { + self.result + } +} + +impl<'a, 'ast> Visitor<'ast> for IdRangeComputingVisitor<'a, 'ast> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'ast> { + NestedVisitorMap::OnlyBodies(&self.map) + } + + fn visit_id(&mut self, id: NodeId) { + self.result.add(id); + } +} + +/// Computes the id range for a single fn body, ignoring nested items. +pub fn compute_id_range_for_fn_body<'v>(fk: FnKind<'v>, + decl: &'v FnDecl, + body: &'v Expr, + sp: Span, + id: NodeId, + map: &map::Map<'v>) + -> IdRange { + let mut visitor = IdRangeComputingVisitor::new(map); + walk_fn_with_body(&mut visitor, fk, decl, body, sp, id); + visitor.result() +} diff --git a/src/librustc/hir/itemlikevisit.rs b/src/librustc/hir/itemlikevisit.rs new file mode 100644 index 0000000000000..71ef7131440b8 --- /dev/null +++ b/src/librustc/hir/itemlikevisit.rs @@ -0,0 +1,86 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::{Item, ImplItem}; +use super::intravisit::Visitor; + +/// The "item-like visitor" visitor defines only the top-level methods +/// that can be invoked by `Crate::visit_all_item_likes()`. Whether +/// this trait is the right one to implement will depend on the +/// overall pattern you need. Here are the three available patterns, +/// in roughly the order of desirability: +/// +/// 1. **Shallow visit**: Get a simple callback for every item (or item-like thing) in the HIR. +/// - Example: find all items with a `#[foo]` attribute on them. +/// - How: Implement `ItemLikeVisitor` and call `tcx.visit_all_item_likes_in_krate()`. +/// - Pro: Efficient; just walks the lists of item-like things, not the nodes themselves. +/// - Pro: Integrates well into dependency tracking. +/// - Con: Don't get information about nesting +/// - Con: Don't have methods for specific bits of HIR, like "on +/// every expr, do this". +/// 2. **Deep visit**: Want to scan for specific kinds of HIR nodes within +/// an item, but don't care about how item-like things are nested +/// within one another. +/// - Example: Examine each expression to look for its type and do some check or other. +/// - How: Implement `intravisit::Visitor` and use +/// `tcx.visit_all_item_likes_in_krate(visitor.as_deep_visitor())`. Within +/// your `intravisit::Visitor` impl, implement methods like +/// `visit_expr()`; don't forget to invoke +/// `intravisit::walk_visit_expr()` to keep walking the subparts. +/// - Pro: Visitor methods for any kind of HIR node, not just item-like things. +/// - Pro: Integrates well into dependency tracking. +/// - Con: Don't get information about nesting between items +/// 3. **Nested visit**: Want to visit the whole HIR and you care about the nesting between +/// item-like things. +/// - Example: Lifetime resolution, which wants to bring lifetimes declared on the +/// impl into scope while visiting the impl-items, and then back out again. +/// - How: Implement `intravisit::Visitor` and override the +/// `visit_nested_map()` methods to return +/// `NestedVisitorMap::All`. Walk your crate with +/// `intravisit::walk_crate()` invoked on `tcx.map.krate()`. +/// - Pro: Visitor methods for any kind of HIR node, not just item-like things. +/// - Pro: Preserves nesting information +/// - Con: Does not integrate well into dependency tracking. +/// +/// Note: the methods of `ItemLikeVisitor` intentionally have no +/// defaults, so that as we expand the list of item-like things, we +/// revisit the various visitors to see if they need to change. This +/// is harder to do with `intravisit::Visitor`, so when you add a new +/// `visit_nested_foo()` method, it is recommended that you search for +/// existing `fn visit_nested` methods to see where changes are +/// needed. +pub trait ItemLikeVisitor<'hir> { + fn visit_item(&mut self, item: &'hir Item); + fn visit_impl_item(&mut self, impl_item: &'hir ImplItem); +} + +pub struct DeepVisitor<'v, V: 'v> { + visitor: &'v mut V, +} + +impl<'v, 'hir, V> DeepVisitor<'v, V> + where V: Visitor<'hir> + 'v +{ + pub fn new(base: &'v mut V) -> Self { + DeepVisitor { visitor: base } + } +} + +impl<'v, 'hir, V> ItemLikeVisitor<'hir> for DeepVisitor<'v, V> + where V: Visitor<'hir> +{ + fn visit_item(&mut self, item: &'hir Item) { + self.visitor.visit_item(item); + } + + fn visit_impl_item(&mut self, impl_item: &'hir ImplItem) { + self.visitor.visit_impl_item(impl_item); + } +} diff --git a/src/librustc/hir/lowering.rs b/src/librustc/hir/lowering.rs new file mode 100644 index 0000000000000..615738277bf5c --- /dev/null +++ b/src/librustc/hir/lowering.rs @@ -0,0 +1,2180 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Lowers the AST to the HIR. +// +// Since the AST and HIR are fairly similar, this is mostly a simple procedure, +// much like a fold. Where lowering involves a bit more work things get more +// interesting and there are some invariants you should know about. These mostly +// concern spans and ids. +// +// Spans are assigned to AST nodes during parsing and then are modified during +// expansion to indicate the origin of a node and the process it went through +// being expanded. Ids are assigned to AST nodes just before lowering. +// +// For the simpler lowering steps, ids and spans should be preserved. Unlike +// expansion we do not preserve the process of lowering in the spans, so spans +// should not be modified here. When creating a new node (as opposed to +// 'folding' an existing one), then you create a new id using `next_id()`. +// +// You must ensure that ids are unique. That means that you should only use the +// id from an AST node in a single HIR node (you can assume that AST node ids +// are unique). Every new node must have a unique id. Avoid cloning HIR nodes. +// If you do, you must then set the new node's id to a fresh one. +// +// Spans are used for error messages and for tools to map semantics back to +// source code. It is therefore not as important with spans as ids to be strict +// about use (you can't break the compiler by screwing up a span). Obviously, a +// HIR node can only have a single span. But multiple nodes can have the same +// span and spans don't need to be kept in order, etc. Where code is preserved +// by lowering, it should have the same span as in the AST. Where HIR nodes are +// new it is probably best to give a span for the whole AST node being lowered. +// All nodes should have real spans, don't use dummy spans. Tools are likely to +// get confused if the spans from leaf AST nodes occur in multiple places +// in the HIR, especially for multiple identifiers. + +use hir; +use hir::map::Definitions; +use hir::map::definitions::DefPathData; +use hir::def_id::{DefIndex, DefId}; +use hir::def::{Def, PathResolution}; +use session::Session; +use util::nodemap::NodeMap; +use rustc_data_structures::fnv::FnvHashMap; + +use std::collections::BTreeMap; +use std::iter; +use std::mem; + +use syntax::ast::*; +use syntax::errors; +use syntax::ptr::P; +use syntax::codemap::{self, respan, Spanned}; +use syntax::std_inject; +use syntax::symbol::{Symbol, keywords}; +use syntax::util::small_vector::SmallVector; +use syntax::visit::{self, Visitor}; +use syntax_pos::Span; + +pub struct LoweringContext<'a> { + crate_root: Option<&'static str>, + // Use to assign ids to hir nodes that do not directly correspond to an ast node + sess: &'a Session, + // As we walk the AST we must keep track of the current 'parent' def id (in + // the form of a DefIndex) so that if we create a new node which introduces + // a definition, then we can properly create the def id. + parent_def: Option, + exprs: FnvHashMap, + resolver: &'a mut Resolver, + + /// The items being lowered are collected here. + items: BTreeMap, + + impl_items: BTreeMap, +} + +pub trait Resolver { + // Resolve a global hir path generated by the lowerer when expanding `for`, `if let`, etc. + fn resolve_hir_path(&mut self, path: &mut hir::Path, is_value: bool); + + // Obtain the resolution for a node id + fn get_resolution(&mut self, id: NodeId) -> Option; + + // We must keep the set of definitions up to date as we add nodes that weren't in the AST. + // This should only return `None` during testing. + fn definitions(&mut self) -> &mut Definitions; +} + +pub fn lower_crate(sess: &Session, + krate: &Crate, + resolver: &mut Resolver) + -> hir::Crate { + // We're constructing the HIR here; we don't care what we will + // read, since we haven't even constructed the *input* to + // incr. comp. yet. + let _ignore = sess.dep_graph.in_ignore(); + + LoweringContext { + crate_root: std_inject::injected_crate_name(krate), + sess: sess, + parent_def: None, + exprs: FnvHashMap(), + resolver: resolver, + items: BTreeMap::new(), + impl_items: BTreeMap::new(), + }.lower_crate(krate) +} + +#[derive(Copy, Clone, PartialEq, Eq)] +enum ParamMode { + /// Any path in a type context. + Explicit, + /// The `module::Type` in `module::Type::method` in an expression. + Optional +} + +impl<'a> LoweringContext<'a> { + fn lower_crate(mut self, c: &Crate) -> hir::Crate { + self.lower_items(c); + let module = self.lower_mod(&c.module); + let attrs = self.lower_attrs(&c.attrs); + let exported_macros = c.exported_macros.iter().map(|m| self.lower_macro_def(m)).collect(); + + hir::Crate { + module: module, + attrs: attrs, + span: c.span, + exported_macros: exported_macros, + items: self.items, + impl_items: self.impl_items, + exprs: mem::replace(&mut self.exprs, FnvHashMap()), + } + } + + fn lower_items(&mut self, c: &Crate) { + struct ItemLowerer<'lcx, 'interner: 'lcx> { + lctx: &'lcx mut LoweringContext<'interner>, + } + + impl<'lcx, 'interner> Visitor for ItemLowerer<'lcx, 'interner> { + fn visit_item(&mut self, item: &Item) { + let hir_item = self.lctx.lower_item(item); + self.lctx.items.insert(item.id, hir_item); + visit::walk_item(self, item); + } + + fn visit_impl_item(&mut self, item: &ImplItem) { + let id = self.lctx.lower_impl_item_ref(item).id; + let hir_item = self.lctx.lower_impl_item(item); + self.lctx.impl_items.insert(id, hir_item); + visit::walk_impl_item(self, item); + } + } + + let mut item_lowerer = ItemLowerer { lctx: self }; + visit::walk_crate(&mut item_lowerer, c); + } + + fn record_expr(&mut self, expr: hir::Expr) -> hir::ExprId { + let id = hir::ExprId(expr.id); + self.exprs.insert(id, expr); + id + } + + fn next_id(&self) -> NodeId { + self.sess.next_node_id() + } + + fn expect_full_def(&mut self, id: NodeId) -> Def { + self.resolver.get_resolution(id).map_or(Def::Err, |pr| { + if pr.depth != 0 { + bug!("path not fully resolved: {:?}", pr); + } + pr.base_def + }) + } + + fn diagnostic(&self) -> &errors::Handler { + self.sess.diagnostic() + } + + fn str_to_ident(&self, s: &'static str) -> Name { + Symbol::gensym(s) + } + + fn allow_internal_unstable(&self, reason: &'static str, mut span: Span) -> Span { + span.expn_id = self.sess.codemap().record_expansion(codemap::ExpnInfo { + call_site: span, + callee: codemap::NameAndSpan { + format: codemap::CompilerDesugaring(Symbol::intern(reason)), + span: Some(span), + allow_internal_unstable: true, + }, + }); + span + } + + fn with_parent_def(&mut self, parent_id: NodeId, f: F) -> T + where F: FnOnce(&mut LoweringContext) -> T + { + let old_def = self.parent_def; + self.parent_def = { + let defs = self.resolver.definitions(); + Some(defs.opt_def_index(parent_id).unwrap()) + }; + + let result = f(self); + + self.parent_def = old_def; + result + } + + fn lower_opt_sp_ident(&mut self, o_id: Option>) -> Option> { + o_id.map(|sp_ident| respan(sp_ident.span, sp_ident.node.name)) + } + + fn lower_label(&mut self, id: NodeId, label: Option>) -> Option { + label.map(|sp_ident| { + hir::Label { + span: sp_ident.span, + name: sp_ident.node.name, + loop_id: match self.expect_full_def(id) { + Def::Label(loop_id) => loop_id, + _ => DUMMY_NODE_ID + } + } + }) + } + + fn lower_attrs(&mut self, attrs: &Vec) -> hir::HirVec { + attrs.clone().into() + } + + fn lower_arm(&mut self, arm: &Arm) -> hir::Arm { + hir::Arm { + attrs: self.lower_attrs(&arm.attrs), + pats: arm.pats.iter().map(|x| self.lower_pat(x)).collect(), + guard: arm.guard.as_ref().map(|ref x| P(self.lower_expr(x))), + body: P(self.lower_expr(&arm.body)), + } + } + + fn lower_ty_binding(&mut self, b: &TypeBinding) -> hir::TypeBinding { + hir::TypeBinding { + id: b.id, + name: b.ident.name, + ty: self.lower_ty(&b.ty), + span: b.span, + } + } + + fn lower_ty(&mut self, t: &Ty) -> P { + P(hir::Ty { + id: t.id, + node: match t.node { + TyKind::Infer | TyKind::ImplicitSelf => hir::TyInfer, + TyKind::Slice(ref ty) => hir::TySlice(self.lower_ty(ty)), + TyKind::Ptr(ref mt) => hir::TyPtr(self.lower_mt(mt)), + TyKind::Rptr(ref region, ref mt) => { + hir::TyRptr(self.lower_opt_lifetime(region), self.lower_mt(mt)) + } + TyKind::BareFn(ref f) => { + hir::TyBareFn(P(hir::BareFnTy { + lifetimes: self.lower_lifetime_defs(&f.lifetimes), + unsafety: self.lower_unsafety(f.unsafety), + abi: f.abi, + decl: self.lower_fn_decl(&f.decl), + })) + } + TyKind::Never => hir::TyNever, + TyKind::Tup(ref tys) => { + hir::TyTup(tys.iter().map(|ty| self.lower_ty(ty)).collect()) + } + TyKind::Paren(ref ty) => { + return self.lower_ty(ty); + } + TyKind::Path(ref qself, ref path) => { + hir::TyPath(self.lower_qpath(t.id, qself, path, ParamMode::Explicit)) + } + TyKind::ObjectSum(ref ty, ref bounds) => { + hir::TyObjectSum(self.lower_ty(ty), self.lower_bounds(bounds)) + } + TyKind::Array(ref ty, ref e) => { + hir::TyArray(self.lower_ty(ty), P(self.lower_expr(e))) + } + TyKind::Typeof(ref expr) => { + hir::TyTypeof(P(self.lower_expr(expr))) + } + TyKind::PolyTraitRef(ref bounds) => { + hir::TyPolyTraitRef(self.lower_bounds(bounds)) + } + TyKind::ImplTrait(ref bounds) => { + hir::TyImplTrait(self.lower_bounds(bounds)) + } + TyKind::Mac(_) => panic!("TyMac should have been expanded by now."), + }, + span: t.span, + }) + } + + fn lower_foreign_mod(&mut self, fm: &ForeignMod) -> hir::ForeignMod { + hir::ForeignMod { + abi: fm.abi, + items: fm.items.iter().map(|x| self.lower_foreign_item(x)).collect(), + } + } + + fn lower_variant(&mut self, v: &Variant) -> hir::Variant { + Spanned { + node: hir::Variant_ { + name: v.node.name.name, + attrs: self.lower_attrs(&v.node.attrs), + data: self.lower_variant_data(&v.node.data), + disr_expr: v.node.disr_expr.as_ref().map(|e| P(self.lower_expr(e))), + }, + span: v.span, + } + } + + fn lower_qpath(&mut self, + id: NodeId, + qself: &Option, + p: &Path, + param_mode: ParamMode) + -> hir::QPath { + let qself_position = qself.as_ref().map(|q| q.position); + let qself = qself.as_ref().map(|q| self.lower_ty(&q.ty)); + + let resolution = self.resolver.get_resolution(id) + .unwrap_or(PathResolution::new(Def::Err)); + + let proj_start = p.segments.len() - resolution.depth; + let path = P(hir::Path { + global: p.global, + def: resolution.base_def, + segments: p.segments[..proj_start].iter().enumerate().map(|(i, segment)| { + let param_mode = match (qself_position, param_mode) { + (Some(j), ParamMode::Optional) if i < j => { + // This segment is part of the trait path in a + // qualified path - one of `a`, `b` or `Trait` + // in `::T::U::method`. + ParamMode::Explicit + } + _ => param_mode + }; + self.lower_path_segment(segment, param_mode) + }).collect(), + span: p.span, + }); + + // Simple case, either no projections, or only fully-qualified. + // E.g. `std::mem::size_of` or `::Item`. + if resolution.depth == 0 { + return hir::QPath::Resolved(qself, path); + } + + // Create the innermost type that we're projecting from. + let mut ty = if path.segments.is_empty() { + // If the base path is empty that means there exists a + // syntactical `Self`, e.g. `&i32` in `<&i32>::clone`. + qself.expect("missing QSelf for ::...") + } else { + // Otherwise, the base path is an implicit `Self` type path, + // e.g. `Vec` in `Vec::new` or `::Item` in + // `::Item::default`. + self.ty(p.span, hir::TyPath(hir::QPath::Resolved(qself, path))) + }; + + // Anything after the base path are associated "extensions", + // out of which all but the last one are associated types, + // e.g. for `std::vec::Vec::::IntoIter::Item::clone`: + // * base path is `std::vec::Vec` + // * "extensions" are `IntoIter`, `Item` and `clone` + // * type nodes are: + // 1. `std::vec::Vec` (created above) + // 2. `>::IntoIter` + // 3. `<>::IntoIter>::Item` + // * final path is `<<>::IntoIter>::Item>::clone` + for (i, segment) in p.segments.iter().enumerate().skip(proj_start) { + let segment = P(self.lower_path_segment(segment, param_mode)); + let qpath = hir::QPath::TypeRelative(ty, segment); + + // It's finished, return the extension of the right node type. + if i == p.segments.len() - 1 { + return qpath; + } + + // Wrap the associated extension in another type node. + ty = self.ty(p.span, hir::TyPath(qpath)); + } + + // Should've returned in the for loop above. + span_bug!(p.span, "lower_qpath: no final extension segment in {}..{}", + proj_start, p.segments.len()) + } + + fn lower_path_extra(&mut self, + id: NodeId, + p: &Path, + name: Option, + param_mode: ParamMode) + -> hir::Path { + hir::Path { + global: p.global, + def: self.expect_full_def(id), + segments: p.segments.iter().map(|segment| { + self.lower_path_segment(segment, param_mode) + }).chain(name.map(|name| { + hir::PathSegment { + name: name, + parameters: hir::PathParameters::none() + } + })).collect(), + span: p.span, + } + } + + fn lower_path(&mut self, + id: NodeId, + p: &Path, + param_mode: ParamMode) + -> hir::Path { + self.lower_path_extra(id, p, None, param_mode) + } + + fn lower_path_segment(&mut self, + segment: &PathSegment, + param_mode: ParamMode) + -> hir::PathSegment { + let parameters = match segment.parameters { + PathParameters::AngleBracketed(ref data) => { + let data = self.lower_angle_bracketed_parameter_data(data, param_mode); + hir::AngleBracketedParameters(data) + } + PathParameters::Parenthesized(ref data) => + hir::ParenthesizedParameters(self.lower_parenthesized_parameter_data(data)), + }; + + hir::PathSegment { + name: segment.identifier.name, + parameters: parameters, + } + } + + fn lower_angle_bracketed_parameter_data(&mut self, + data: &AngleBracketedParameterData, + param_mode: ParamMode) + -> hir::AngleBracketedParameterData { + let &AngleBracketedParameterData { ref lifetimes, ref types, ref bindings } = data; + hir::AngleBracketedParameterData { + lifetimes: self.lower_lifetimes(lifetimes), + types: types.iter().map(|ty| self.lower_ty(ty)).collect(), + infer_types: types.is_empty() && param_mode == ParamMode::Optional, + bindings: bindings.iter().map(|b| self.lower_ty_binding(b)).collect(), + } + } + + fn lower_parenthesized_parameter_data(&mut self, + data: &ParenthesizedParameterData) + -> hir::ParenthesizedParameterData { + let &ParenthesizedParameterData { ref inputs, ref output, span } = data; + hir::ParenthesizedParameterData { + inputs: inputs.iter().map(|ty| self.lower_ty(ty)).collect(), + output: output.as_ref().map(|ty| self.lower_ty(ty)), + span: span, + } + } + + fn lower_local(&mut self, l: &Local) -> P { + P(hir::Local { + id: l.id, + ty: l.ty.as_ref().map(|t| self.lower_ty(t)), + pat: self.lower_pat(&l.pat), + init: l.init.as_ref().map(|e| P(self.lower_expr(e))), + span: l.span, + attrs: l.attrs.clone(), + }) + } + + fn lower_mutability(&mut self, m: Mutability) -> hir::Mutability { + match m { + Mutability::Mutable => hir::MutMutable, + Mutability::Immutable => hir::MutImmutable, + } + } + + fn lower_arg(&mut self, arg: &Arg) -> hir::Arg { + hir::Arg { + id: arg.id, + pat: self.lower_pat(&arg.pat), + ty: self.lower_ty(&arg.ty), + } + } + + fn lower_fn_decl(&mut self, decl: &FnDecl) -> P { + P(hir::FnDecl { + inputs: decl.inputs.iter().map(|x| self.lower_arg(x)).collect(), + output: match decl.output { + FunctionRetTy::Ty(ref ty) => hir::Return(self.lower_ty(ty)), + FunctionRetTy::Default(span) => hir::DefaultReturn(span), + }, + variadic: decl.variadic, + }) + } + + fn lower_ty_param_bound(&mut self, tpb: &TyParamBound) -> hir::TyParamBound { + match *tpb { + TraitTyParamBound(ref ty, modifier) => { + hir::TraitTyParamBound(self.lower_poly_trait_ref(ty), + self.lower_trait_bound_modifier(modifier)) + } + RegionTyParamBound(ref lifetime) => { + hir::RegionTyParamBound(self.lower_lifetime(lifetime)) + } + } + } + + fn lower_ty_param(&mut self, tp: &TyParam, add_bounds: &[TyParamBound]) -> hir::TyParam { + let mut name = tp.ident.name; + + // Don't expose `Self` (recovered "keyword used as ident" parse error). + // `rustc::ty` expects `Self` to be only used for a trait's `Self`. + // Instead, use gensym("Self") to create a distinct name that looks the same. + if name == keywords::SelfType.name() { + name = Symbol::gensym("Self"); + } + + let mut bounds = self.lower_bounds(&tp.bounds); + if !add_bounds.is_empty() { + bounds = bounds.into_iter().chain(self.lower_bounds(add_bounds).into_iter()).collect(); + } + + hir::TyParam { + id: tp.id, + name: name, + bounds: bounds, + default: tp.default.as_ref().map(|x| self.lower_ty(x)), + span: tp.span, + pure_wrt_drop: tp.attrs.iter().any(|attr| attr.check_name("may_dangle")), + } + } + + fn lower_ty_params(&mut self, tps: &P<[TyParam]>, add_bounds: &NodeMap>) + -> hir::HirVec { + tps.iter().map(|tp| { + self.lower_ty_param(tp, add_bounds.get(&tp.id).map_or(&[][..], |x| &x)) + }).collect() + } + + fn lower_lifetime(&mut self, l: &Lifetime) -> hir::Lifetime { + hir::Lifetime { + id: l.id, + name: l.name, + span: l.span, + } + } + + fn lower_lifetime_def(&mut self, l: &LifetimeDef) -> hir::LifetimeDef { + hir::LifetimeDef { + lifetime: self.lower_lifetime(&l.lifetime), + bounds: self.lower_lifetimes(&l.bounds), + pure_wrt_drop: l.attrs.iter().any(|attr| attr.check_name("may_dangle")), + } + } + + fn lower_lifetimes(&mut self, lts: &Vec) -> hir::HirVec { + lts.iter().map(|l| self.lower_lifetime(l)).collect() + } + + fn lower_lifetime_defs(&mut self, lts: &Vec) -> hir::HirVec { + lts.iter().map(|l| self.lower_lifetime_def(l)).collect() + } + + fn lower_opt_lifetime(&mut self, o_lt: &Option) -> Option { + o_lt.as_ref().map(|lt| self.lower_lifetime(lt)) + } + + fn lower_generics(&mut self, g: &Generics) -> hir::Generics { + // Collect `?Trait` bounds in where clause and move them to parameter definitions. + let mut add_bounds = NodeMap(); + for pred in &g.where_clause.predicates { + if let WherePredicate::BoundPredicate(ref bound_pred) = *pred { + 'next_bound: for bound in &bound_pred.bounds { + if let TraitTyParamBound(_, TraitBoundModifier::Maybe) = *bound { + let report_error = |this: &mut Self| { + this.diagnostic().span_err(bound_pred.bounded_ty.span, + "`?Trait` bounds are only permitted at the \ + point where a type parameter is declared"); + }; + // Check if the where clause type is a plain type parameter. + match bound_pred.bounded_ty.node { + TyKind::Path(None, ref path) + if !path.global && path.segments.len() == 1 && + bound_pred.bound_lifetimes.is_empty() => { + if let Some(Def::TyParam(def_id)) = + self.resolver.get_resolution(bound_pred.bounded_ty.id) + .map(|d| d.base_def) { + if let Some(node_id) = + self.resolver.definitions().as_local_node_id(def_id) { + for ty_param in &g.ty_params { + if node_id == ty_param.id { + add_bounds.entry(ty_param.id).or_insert(Vec::new()) + .push(bound.clone()); + continue 'next_bound; + } + } + } + } + report_error(self) + } + _ => report_error(self) + } + } + } + } + } + + hir::Generics { + ty_params: self.lower_ty_params(&g.ty_params, &add_bounds), + lifetimes: self.lower_lifetime_defs(&g.lifetimes), + where_clause: self.lower_where_clause(&g.where_clause), + span: g.span, + } + } + + fn lower_where_clause(&mut self, wc: &WhereClause) -> hir::WhereClause { + hir::WhereClause { + id: wc.id, + predicates: wc.predicates + .iter() + .map(|predicate| self.lower_where_predicate(predicate)) + .collect(), + } + } + + fn lower_where_predicate(&mut self, pred: &WherePredicate) -> hir::WherePredicate { + match *pred { + WherePredicate::BoundPredicate(WhereBoundPredicate{ ref bound_lifetimes, + ref bounded_ty, + ref bounds, + span}) => { + hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate { + bound_lifetimes: self.lower_lifetime_defs(bound_lifetimes), + bounded_ty: self.lower_ty(bounded_ty), + bounds: bounds.iter().filter_map(|bound| match *bound { + // Ignore `?Trait` bounds, they were copied into type parameters already. + TraitTyParamBound(_, TraitBoundModifier::Maybe) => None, + _ => Some(self.lower_ty_param_bound(bound)) + }).collect(), + span: span, + }) + } + WherePredicate::RegionPredicate(WhereRegionPredicate{ ref lifetime, + ref bounds, + span}) => { + hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate { + span: span, + lifetime: self.lower_lifetime(lifetime), + bounds: bounds.iter().map(|bound| self.lower_lifetime(bound)).collect(), + }) + } + WherePredicate::EqPredicate(WhereEqPredicate{ id, + ref path, + ref ty, + span}) => { + hir::WherePredicate::EqPredicate(hir::WhereEqPredicate { + id: id, + path: self.lower_path(id, path, ParamMode::Explicit), + ty: self.lower_ty(ty), + span: span, + }) + } + } + } + + fn lower_variant_data(&mut self, vdata: &VariantData) -> hir::VariantData { + match *vdata { + VariantData::Struct(ref fields, id) => { + hir::VariantData::Struct(fields.iter() + .enumerate() + .map(|f| self.lower_struct_field(f)) + .collect(), + id) + } + VariantData::Tuple(ref fields, id) => { + hir::VariantData::Tuple(fields.iter() + .enumerate() + .map(|f| self.lower_struct_field(f)) + .collect(), + id) + } + VariantData::Unit(id) => hir::VariantData::Unit(id), + } + } + + fn lower_trait_ref(&mut self, p: &TraitRef) -> hir::TraitRef { + hir::TraitRef { + path: self.lower_path(p.ref_id, &p.path, ParamMode::Explicit), + ref_id: p.ref_id, + } + } + + fn lower_poly_trait_ref(&mut self, p: &PolyTraitRef) -> hir::PolyTraitRef { + hir::PolyTraitRef { + bound_lifetimes: self.lower_lifetime_defs(&p.bound_lifetimes), + trait_ref: self.lower_trait_ref(&p.trait_ref), + span: p.span, + } + } + + fn lower_struct_field(&mut self, (index, f): (usize, &StructField)) -> hir::StructField { + hir::StructField { + span: f.span, + id: f.id, + name: f.ident.map(|ident| ident.name).unwrap_or(Symbol::intern(&index.to_string())), + vis: self.lower_visibility(&f.vis), + ty: self.lower_ty(&f.ty), + attrs: self.lower_attrs(&f.attrs), + } + } + + fn lower_field(&mut self, f: &Field) -> hir::Field { + hir::Field { + name: respan(f.ident.span, f.ident.node.name), + expr: P(self.lower_expr(&f.expr)), + span: f.span, + is_shorthand: f.is_shorthand, + } + } + + fn lower_mt(&mut self, mt: &MutTy) -> hir::MutTy { + hir::MutTy { + ty: self.lower_ty(&mt.ty), + mutbl: self.lower_mutability(mt.mutbl), + } + } + + fn lower_bounds(&mut self, bounds: &[TyParamBound]) -> hir::TyParamBounds { + bounds.iter().map(|bound| self.lower_ty_param_bound(bound)).collect() + } + + fn lower_block(&mut self, b: &Block) -> P { + let mut expr = None; + + let mut stmts = b.stmts.iter().flat_map(|s| self.lower_stmt(s)).collect::>(); + if let Some(last) = stmts.pop() { + if let hir::StmtExpr(e, _) = last.node { + expr = Some(e); + } else { + stmts.push(last); + } + } + + P(hir::Block { + id: b.id, + stmts: stmts.into(), + expr: expr, + rules: self.lower_block_check_mode(&b.rules), + span: b.span, + }) + } + + fn lower_item_kind(&mut self, + id: NodeId, + name: &mut Name, + attrs: &hir::HirVec, + vis: &mut hir::Visibility, + i: &ItemKind) + -> hir::Item_ { + match *i { + ItemKind::ExternCrate(string) => hir::ItemExternCrate(string), + ItemKind::Use(ref view_path) => { + let path = match view_path.node { + ViewPathSimple(_, ref path) => path, + ViewPathGlob(ref path) => path, + ViewPathList(ref path, ref path_list_idents) => { + for &Spanned { node: ref import, span } in path_list_idents { + // `use a::{self as x, b as y};` lowers to + // `use a as x; use a::b as y;` + let mut ident = import.name; + let suffix = if ident.name == keywords::SelfValue.name() { + if let Some(last) = path.segments.last() { + ident = last.identifier; + } + None + } else { + Some(ident.name) + }; + + let mut path = self.lower_path_extra(import.id, path, suffix, + ParamMode::Explicit); + path.span = span; + self.items.insert(import.id, hir::Item { + id: import.id, + name: import.rename.unwrap_or(ident).name, + attrs: attrs.clone(), + node: hir::ItemUse(P(path), hir::UseKind::Single), + vis: vis.clone(), + span: span, + }); + } + path + } + }; + let path = P(self.lower_path(id, path, ParamMode::Explicit)); + let kind = match view_path.node { + ViewPathSimple(ident, _) => { + *name = ident.name; + hir::UseKind::Single + } + ViewPathGlob(_) => { + hir::UseKind::Glob + } + ViewPathList(..) => { + // Privatize the degenerate import base, used only to check + // the stability of `use a::{};`, to avoid it showing up as + // a reexport by accident when `pub`, e.g. in documentation. + *vis = hir::Inherited; + hir::UseKind::ListStem + } + }; + hir::ItemUse(path, kind) + } + ItemKind::Static(ref t, m, ref e) => { + hir::ItemStatic(self.lower_ty(t), + self.lower_mutability(m), + P(self.lower_expr(e))) + } + ItemKind::Const(ref t, ref e) => { + hir::ItemConst(self.lower_ty(t), P(self.lower_expr(e))) + } + ItemKind::Fn(ref decl, unsafety, constness, abi, ref generics, ref body) => { + let body = self.lower_block(body); + let body = self.expr_block(body, ThinVec::new()); + let body_id = self.record_expr(body); + hir::ItemFn(self.lower_fn_decl(decl), + self.lower_unsafety(unsafety), + self.lower_constness(constness), + abi, + self.lower_generics(generics), + body_id) + } + ItemKind::Mod(ref m) => hir::ItemMod(self.lower_mod(m)), + ItemKind::ForeignMod(ref nm) => hir::ItemForeignMod(self.lower_foreign_mod(nm)), + ItemKind::Ty(ref t, ref generics) => { + hir::ItemTy(self.lower_ty(t), self.lower_generics(generics)) + } + ItemKind::Enum(ref enum_definition, ref generics) => { + hir::ItemEnum(hir::EnumDef { + variants: enum_definition.variants + .iter() + .map(|x| self.lower_variant(x)) + .collect(), + }, + self.lower_generics(generics)) + } + ItemKind::Struct(ref struct_def, ref generics) => { + let struct_def = self.lower_variant_data(struct_def); + hir::ItemStruct(struct_def, self.lower_generics(generics)) + } + ItemKind::Union(ref vdata, ref generics) => { + let vdata = self.lower_variant_data(vdata); + hir::ItemUnion(vdata, self.lower_generics(generics)) + } + ItemKind::DefaultImpl(unsafety, ref trait_ref) => { + hir::ItemDefaultImpl(self.lower_unsafety(unsafety), + self.lower_trait_ref(trait_ref)) + } + ItemKind::Impl(unsafety, polarity, ref generics, ref ifce, ref ty, ref impl_items) => { + let new_impl_items = impl_items.iter() + .map(|item| self.lower_impl_item_ref(item)) + .collect(); + let ifce = ifce.as_ref().map(|trait_ref| self.lower_trait_ref(trait_ref)); + hir::ItemImpl(self.lower_unsafety(unsafety), + self.lower_impl_polarity(polarity), + self.lower_generics(generics), + ifce, + self.lower_ty(ty), + new_impl_items) + } + ItemKind::Trait(unsafety, ref generics, ref bounds, ref items) => { + let bounds = self.lower_bounds(bounds); + let items = items.iter().map(|item| self.lower_trait_item(item)).collect(); + hir::ItemTrait(self.lower_unsafety(unsafety), + self.lower_generics(generics), + bounds, + items) + } + ItemKind::Mac(_) => panic!("Shouldn't still be around"), + } + } + + fn lower_trait_item(&mut self, i: &TraitItem) -> hir::TraitItem { + self.with_parent_def(i.id, |this| { + hir::TraitItem { + id: i.id, + name: i.ident.name, + attrs: this.lower_attrs(&i.attrs), + node: match i.node { + TraitItemKind::Const(ref ty, ref default) => { + hir::ConstTraitItem(this.lower_ty(ty), + default.as_ref().map(|x| P(this.lower_expr(x)))) + } + TraitItemKind::Method(ref sig, ref body) => { + hir::MethodTraitItem(this.lower_method_sig(sig), + body.as_ref().map(|x| { + let body = this.lower_block(x); + let expr = this.expr_block(body, ThinVec::new()); + this.record_expr(expr) + })) + } + TraitItemKind::Type(ref bounds, ref default) => { + hir::TypeTraitItem(this.lower_bounds(bounds), + default.as_ref().map(|x| this.lower_ty(x))) + } + TraitItemKind::Macro(..) => panic!("Shouldn't exist any more"), + }, + span: i.span, + } + }) + } + + fn lower_impl_item(&mut self, i: &ImplItem) -> hir::ImplItem { + self.with_parent_def(i.id, |this| { + hir::ImplItem { + id: i.id, + name: i.ident.name, + attrs: this.lower_attrs(&i.attrs), + vis: this.lower_visibility(&i.vis), + defaultness: this.lower_defaultness(i.defaultness, true /* [1] */), + node: match i.node { + ImplItemKind::Const(ref ty, ref expr) => { + hir::ImplItemKind::Const(this.lower_ty(ty), P(this.lower_expr(expr))) + } + ImplItemKind::Method(ref sig, ref body) => { + let body = this.lower_block(body); + let expr = this.expr_block(body, ThinVec::new()); + let expr_id = this.record_expr(expr); + hir::ImplItemKind::Method(this.lower_method_sig(sig), expr_id) + } + ImplItemKind::Type(ref ty) => hir::ImplItemKind::Type(this.lower_ty(ty)), + ImplItemKind::Macro(..) => panic!("Shouldn't exist any more"), + }, + span: i.span, + } + }) + + // [1] since `default impl` is not yet implemented, this is always true in impls + } + + fn lower_impl_item_ref(&mut self, i: &ImplItem) -> hir::ImplItemRef { + hir::ImplItemRef { + id: hir::ImplItemId { node_id: i.id }, + name: i.ident.name, + span: i.span, + vis: self.lower_visibility(&i.vis), + defaultness: self.lower_defaultness(i.defaultness, true /* [1] */), + kind: match i.node { + ImplItemKind::Const(..) => hir::AssociatedItemKind::Const, + ImplItemKind::Type(..) => hir::AssociatedItemKind::Type, + ImplItemKind::Method(ref sig, _) => hir::AssociatedItemKind::Method { + has_self: sig.decl.get_self().is_some(), + }, + ImplItemKind::Macro(..) => unimplemented!(), + }, + } + + // [1] since `default impl` is not yet implemented, this is always true in impls + } + + fn lower_mod(&mut self, m: &Mod) -> hir::Mod { + hir::Mod { + inner: m.inner, + item_ids: m.items.iter().flat_map(|x| self.lower_item_id(x)).collect(), + } + } + + fn lower_macro_def(&mut self, m: &MacroDef) -> hir::MacroDef { + hir::MacroDef { + name: m.ident.name, + attrs: self.lower_attrs(&m.attrs), + id: m.id, + span: m.span, + imported_from: m.imported_from.map(|x| x.name), + allow_internal_unstable: m.allow_internal_unstable, + body: m.body.clone().into(), + } + } + + fn lower_item_id(&mut self, i: &Item) -> SmallVector { + if let ItemKind::Use(ref view_path) = i.node { + if let ViewPathList(_, ref imports) = view_path.node { + return iter::once(i.id).chain(imports.iter().map(|import| import.node.id)) + .map(|id| hir::ItemId { id: id }).collect(); + } + } + SmallVector::one(hir::ItemId { id: i.id }) + } + + pub fn lower_item(&mut self, i: &Item) -> hir::Item { + let mut name = i.ident.name; + let attrs = self.lower_attrs(&i.attrs); + let mut vis = self.lower_visibility(&i.vis); + let node = self.with_parent_def(i.id, |this| { + this.lower_item_kind(i.id, &mut name, &attrs, &mut vis, &i.node) + }); + + hir::Item { + id: i.id, + name: name, + attrs: attrs, + node: node, + vis: vis, + span: i.span, + } + } + + fn lower_foreign_item(&mut self, i: &ForeignItem) -> hir::ForeignItem { + self.with_parent_def(i.id, |this| { + hir::ForeignItem { + id: i.id, + name: i.ident.name, + attrs: this.lower_attrs(&i.attrs), + node: match i.node { + ForeignItemKind::Fn(ref fdec, ref generics) => { + hir::ForeignItemFn(this.lower_fn_decl(fdec), this.lower_generics(generics)) + } + ForeignItemKind::Static(ref t, m) => { + hir::ForeignItemStatic(this.lower_ty(t), m) + } + }, + vis: this.lower_visibility(&i.vis), + span: i.span, + } + }) + } + + fn lower_method_sig(&mut self, sig: &MethodSig) -> hir::MethodSig { + let hir_sig = hir::MethodSig { + generics: self.lower_generics(&sig.generics), + abi: sig.abi, + unsafety: self.lower_unsafety(sig.unsafety), + constness: self.lower_constness(sig.constness), + decl: self.lower_fn_decl(&sig.decl), + }; + // Check for `self: _` and `self: &_` + if let Some(SelfKind::Explicit(..)) = sig.decl.get_self().map(|eself| eself.node) { + match hir_sig.decl.get_self().map(|eself| eself.node) { + Some(hir::SelfKind::Value(..)) | Some(hir::SelfKind::Region(..)) => { + self.diagnostic().span_err(sig.decl.inputs[0].ty.span, + "the type placeholder `_` is not allowed within types on item signatures"); + } + _ => {} + } + } + hir_sig + } + + fn lower_unsafety(&mut self, u: Unsafety) -> hir::Unsafety { + match u { + Unsafety::Unsafe => hir::Unsafety::Unsafe, + Unsafety::Normal => hir::Unsafety::Normal, + } + } + + fn lower_constness(&mut self, c: Spanned) -> hir::Constness { + match c.node { + Constness::Const => hir::Constness::Const, + Constness::NotConst => hir::Constness::NotConst, + } + } + + fn lower_unop(&mut self, u: UnOp) -> hir::UnOp { + match u { + UnOp::Deref => hir::UnDeref, + UnOp::Not => hir::UnNot, + UnOp::Neg => hir::UnNeg, + } + } + + fn lower_binop(&mut self, b: BinOp) -> hir::BinOp { + Spanned { + node: match b.node { + BinOpKind::Add => hir::BiAdd, + BinOpKind::Sub => hir::BiSub, + BinOpKind::Mul => hir::BiMul, + BinOpKind::Div => hir::BiDiv, + BinOpKind::Rem => hir::BiRem, + BinOpKind::And => hir::BiAnd, + BinOpKind::Or => hir::BiOr, + BinOpKind::BitXor => hir::BiBitXor, + BinOpKind::BitAnd => hir::BiBitAnd, + BinOpKind::BitOr => hir::BiBitOr, + BinOpKind::Shl => hir::BiShl, + BinOpKind::Shr => hir::BiShr, + BinOpKind::Eq => hir::BiEq, + BinOpKind::Lt => hir::BiLt, + BinOpKind::Le => hir::BiLe, + BinOpKind::Ne => hir::BiNe, + BinOpKind::Ge => hir::BiGe, + BinOpKind::Gt => hir::BiGt, + }, + span: b.span, + } + } + + fn lower_pat(&mut self, p: &Pat) -> P { + P(hir::Pat { + id: p.id, + node: match p.node { + PatKind::Wild => hir::PatKind::Wild, + PatKind::Ident(ref binding_mode, pth1, ref sub) => { + self.with_parent_def(p.id, |this| { + match this.resolver.get_resolution(p.id).map(|d| d.base_def) { + // `None` can occur in body-less function signatures + def @ None | def @ Some(Def::Local(_)) => { + let def_id = def.map(|d| d.def_id()).unwrap_or_else(|| { + this.resolver.definitions().local_def_id(p.id) + }); + hir::PatKind::Binding(this.lower_binding_mode(binding_mode), + def_id, + respan(pth1.span, pth1.node.name), + sub.as_ref().map(|x| this.lower_pat(x))) + } + Some(def) => { + hir::PatKind::Path(hir::QPath::Resolved(None, P(hir::Path { + span: pth1.span, + global: false, + def: def, + segments: hir_vec![ + hir::PathSegment::from_name(pth1.node.name) + ], + }))) + } + } + }) + } + PatKind::Lit(ref e) => hir::PatKind::Lit(P(self.lower_expr(e))), + PatKind::TupleStruct(ref path, ref pats, ddpos) => { + let qpath = self.lower_qpath(p.id, &None, path, ParamMode::Optional); + hir::PatKind::TupleStruct(qpath, + pats.iter().map(|x| self.lower_pat(x)).collect(), + ddpos) + } + PatKind::Path(ref qself, ref path) => { + hir::PatKind::Path(self.lower_qpath(p.id, qself, path, ParamMode::Optional)) + } + PatKind::Struct(ref path, ref fields, etc) => { + let qpath = self.lower_qpath(p.id, &None, path, ParamMode::Optional); + + let fs = fields.iter() + .map(|f| { + Spanned { + span: f.span, + node: hir::FieldPat { + name: f.node.ident.name, + pat: self.lower_pat(&f.node.pat), + is_shorthand: f.node.is_shorthand, + }, + } + }) + .collect(); + hir::PatKind::Struct(qpath, fs, etc) + } + PatKind::Tuple(ref elts, ddpos) => { + hir::PatKind::Tuple(elts.iter().map(|x| self.lower_pat(x)).collect(), ddpos) + } + PatKind::Box(ref inner) => hir::PatKind::Box(self.lower_pat(inner)), + PatKind::Ref(ref inner, mutbl) => { + hir::PatKind::Ref(self.lower_pat(inner), self.lower_mutability(mutbl)) + } + PatKind::Range(ref e1, ref e2) => { + hir::PatKind::Range(P(self.lower_expr(e1)), P(self.lower_expr(e2))) + } + PatKind::Slice(ref before, ref slice, ref after) => { + hir::PatKind::Slice(before.iter().map(|x| self.lower_pat(x)).collect(), + slice.as_ref().map(|x| self.lower_pat(x)), + after.iter().map(|x| self.lower_pat(x)).collect()) + } + PatKind::Mac(_) => panic!("Shouldn't exist here"), + }, + span: p.span, + }) + } + + fn lower_expr(&mut self, e: &Expr) -> hir::Expr { + hir::Expr { + id: e.id, + node: match e.node { + // Issue #22181: + // Eventually a desugaring for `box EXPR` + // (similar to the desugaring above for `in PLACE BLOCK`) + // should go here, desugaring + // + // to: + // + // let mut place = BoxPlace::make_place(); + // let raw_place = Place::pointer(&mut place); + // let value = $value; + // unsafe { + // ::std::ptr::write(raw_place, value); + // Boxed::finalize(place) + // } + // + // But for now there are type-inference issues doing that. + ExprKind::Box(ref e) => { + hir::ExprBox(P(self.lower_expr(e))) + } + + // Desugar ExprBox: `in (PLACE) EXPR` + ExprKind::InPlace(ref placer, ref value_expr) => { + // to: + // + // let p = PLACE; + // let mut place = Placer::make_place(p); + // let raw_place = Place::pointer(&mut place); + // push_unsafe!({ + // std::intrinsics::move_val_init(raw_place, pop_unsafe!( EXPR )); + // InPlace::finalize(place) + // }) + let placer_expr = P(self.lower_expr(placer)); + let value_expr = P(self.lower_expr(value_expr)); + + let placer_ident = self.str_to_ident("placer"); + let place_ident = self.str_to_ident("place"); + let p_ptr_ident = self.str_to_ident("p_ptr"); + + let make_place = ["ops", "Placer", "make_place"]; + let place_pointer = ["ops", "Place", "pointer"]; + let move_val_init = ["intrinsics", "move_val_init"]; + let inplace_finalize = ["ops", "InPlace", "finalize"]; + + let unstable_span = self.allow_internal_unstable("<-", e.span); + let make_call = |this: &mut LoweringContext, p, args| { + let path = P(this.expr_std_path(unstable_span, p, ThinVec::new())); + P(this.expr_call(e.span, path, args)) + }; + + let mk_stmt_let = |this: &mut LoweringContext, bind, expr| { + this.stmt_let(e.span, false, bind, expr) + }; + + let mk_stmt_let_mut = |this: &mut LoweringContext, bind, expr| { + this.stmt_let(e.span, true, bind, expr) + }; + + // let placer = ; + let (s1, placer_binding) = { + mk_stmt_let(self, placer_ident, placer_expr) + }; + + // let mut place = Placer::make_place(placer); + let (s2, place_binding) = { + let placer = self.expr_ident(e.span, placer_ident, placer_binding); + let call = make_call(self, &make_place, hir_vec![placer]); + mk_stmt_let_mut(self, place_ident, call) + }; + + // let p_ptr = Place::pointer(&mut place); + let (s3, p_ptr_binding) = { + let agent = P(self.expr_ident(e.span, place_ident, place_binding)); + let args = hir_vec![self.expr_mut_addr_of(e.span, agent)]; + let call = make_call(self, &place_pointer, args); + mk_stmt_let(self, p_ptr_ident, call) + }; + + // pop_unsafe!(EXPR)); + let pop_unsafe_expr = { + self.signal_block_expr(hir_vec![], + value_expr, + e.span, + hir::PopUnsafeBlock(hir::CompilerGenerated), + ThinVec::new()) + }; + + // push_unsafe!({ + // std::intrinsics::move_val_init(raw_place, pop_unsafe!( EXPR )); + // InPlace::finalize(place) + // }) + let expr = { + let ptr = self.expr_ident(e.span, p_ptr_ident, p_ptr_binding); + let call_move_val_init = + hir::StmtSemi( + make_call(self, &move_val_init, hir_vec![ptr, pop_unsafe_expr]), + self.next_id()); + let call_move_val_init = respan(e.span, call_move_val_init); + + let place = self.expr_ident(e.span, place_ident, place_binding); + let call = make_call(self, &inplace_finalize, hir_vec![place]); + P(self.signal_block_expr(hir_vec![call_move_val_init], + call, + e.span, + hir::PushUnsafeBlock(hir::CompilerGenerated), + ThinVec::new())) + }; + + let block = self.block_all(e.span, hir_vec![s1, s2, s3], Some(expr)); + // add the attributes to the outer returned expr node + return self.expr_block(P(block), e.attrs.clone()); + } + + ExprKind::Vec(ref exprs) => { + hir::ExprArray(exprs.iter().map(|x| self.lower_expr(x)).collect()) + } + ExprKind::Repeat(ref expr, ref count) => { + let expr = P(self.lower_expr(expr)); + let count = P(self.lower_expr(count)); + hir::ExprRepeat(expr, count) + } + ExprKind::Tup(ref elts) => { + hir::ExprTup(elts.iter().map(|x| self.lower_expr(x)).collect()) + } + ExprKind::Call(ref f, ref args) => { + let f = P(self.lower_expr(f)); + hir::ExprCall(f, args.iter().map(|x| self.lower_expr(x)).collect()) + } + ExprKind::MethodCall(i, ref tps, ref args) => { + let tps = tps.iter().map(|x| self.lower_ty(x)).collect(); + let args = args.iter().map(|x| self.lower_expr(x)).collect(); + hir::ExprMethodCall(respan(i.span, i.node.name), tps, args) + } + ExprKind::Binary(binop, ref lhs, ref rhs) => { + let binop = self.lower_binop(binop); + let lhs = P(self.lower_expr(lhs)); + let rhs = P(self.lower_expr(rhs)); + hir::ExprBinary(binop, lhs, rhs) + } + ExprKind::Unary(op, ref ohs) => { + let op = self.lower_unop(op); + let ohs = P(self.lower_expr(ohs)); + hir::ExprUnary(op, ohs) + } + ExprKind::Lit(ref l) => hir::ExprLit(P((**l).clone())), + ExprKind::Cast(ref expr, ref ty) => { + let expr = P(self.lower_expr(expr)); + hir::ExprCast(expr, self.lower_ty(ty)) + } + ExprKind::Type(ref expr, ref ty) => { + let expr = P(self.lower_expr(expr)); + hir::ExprType(expr, self.lower_ty(ty)) + } + ExprKind::AddrOf(m, ref ohs) => { + let m = self.lower_mutability(m); + let ohs = P(self.lower_expr(ohs)); + hir::ExprAddrOf(m, ohs) + } + // More complicated than you might expect because the else branch + // might be `if let`. + ExprKind::If(ref cond, ref blk, ref else_opt) => { + let else_opt = else_opt.as_ref().map(|els| { + match els.node { + ExprKind::IfLet(..) => { + // wrap the if-let expr in a block + let span = els.span; + let els = P(self.lower_expr(els)); + let id = self.next_id(); + let blk = P(hir::Block { + stmts: hir_vec![], + expr: Some(els), + id: id, + rules: hir::DefaultBlock, + span: span, + }); + P(self.expr_block(blk, ThinVec::new())) + } + _ => P(self.lower_expr(els)), + } + }); + + hir::ExprIf(P(self.lower_expr(cond)), self.lower_block(blk), else_opt) + } + ExprKind::While(ref cond, ref body, opt_ident) => { + hir::ExprWhile(P(self.lower_expr(cond)), self.lower_block(body), + self.lower_opt_sp_ident(opt_ident)) + } + ExprKind::Loop(ref body, opt_ident) => { + hir::ExprLoop(self.lower_block(body), + self.lower_opt_sp_ident(opt_ident), + hir::LoopSource::Loop) + } + ExprKind::Match(ref expr, ref arms) => { + hir::ExprMatch(P(self.lower_expr(expr)), + arms.iter().map(|x| self.lower_arm(x)).collect(), + hir::MatchSource::Normal) + } + ExprKind::Closure(capture_clause, ref decl, ref body, fn_decl_span) => { + self.with_parent_def(e.id, |this| { + let expr = this.lower_expr(body); + hir::ExprClosure(this.lower_capture_clause(capture_clause), + this.lower_fn_decl(decl), + this.record_expr(expr), + fn_decl_span) + }) + } + ExprKind::Block(ref blk) => hir::ExprBlock(self.lower_block(blk)), + ExprKind::Assign(ref el, ref er) => { + hir::ExprAssign(P(self.lower_expr(el)), P(self.lower_expr(er))) + } + ExprKind::AssignOp(op, ref el, ref er) => { + hir::ExprAssignOp(self.lower_binop(op), + P(self.lower_expr(el)), + P(self.lower_expr(er))) + } + ExprKind::Field(ref el, ident) => { + hir::ExprField(P(self.lower_expr(el)), respan(ident.span, ident.node.name)) + } + ExprKind::TupField(ref el, ident) => { + hir::ExprTupField(P(self.lower_expr(el)), ident) + } + ExprKind::Index(ref el, ref er) => { + hir::ExprIndex(P(self.lower_expr(el)), P(self.lower_expr(er))) + } + ExprKind::Range(ref e1, ref e2, lims) => { + fn make_struct(this: &mut LoweringContext, + ast_expr: &Expr, + path: &[&str], + fields: &[(&str, &P)]) -> hir::Expr { + let struct_path = &iter::once(&"ops").chain(path).map(|s| *s) + .collect::>(); + let unstable_span = this.allow_internal_unstable("...", ast_expr.span); + + if fields.len() == 0 { + this.expr_std_path(unstable_span, struct_path, + ast_expr.attrs.clone()) + } else { + let fields = fields.into_iter().map(|&(s, e)| { + let expr = P(this.lower_expr(&e)); + let unstable_span = this.allow_internal_unstable("...", e.span); + this.field(Symbol::intern(s), expr, unstable_span) + }).collect(); + let attrs = ast_expr.attrs.clone(); + + this.expr_std_struct(unstable_span, struct_path, fields, None, attrs) + } + } + + use syntax::ast::RangeLimits::*; + + return match (e1, e2, lims) { + (&None, &None, HalfOpen) => + make_struct(self, e, &["RangeFull"], &[]), + + (&Some(ref e1), &None, HalfOpen) => + make_struct(self, e, &["RangeFrom"], + &[("start", e1)]), + + (&None, &Some(ref e2), HalfOpen) => + make_struct(self, e, &["RangeTo"], + &[("end", e2)]), + + (&Some(ref e1), &Some(ref e2), HalfOpen) => + make_struct(self, e, &["Range"], + &[("start", e1), ("end", e2)]), + + (&None, &Some(ref e2), Closed) => + make_struct(self, e, &["RangeToInclusive"], + &[("end", e2)]), + + (&Some(ref e1), &Some(ref e2), Closed) => + make_struct(self, e, &["RangeInclusive", "NonEmpty"], + &[("start", e1), ("end", e2)]), + + _ => panic!(self.diagnostic() + .span_fatal(e.span, "inclusive range with no end")), + }; + } + ExprKind::Path(ref qself, ref path) => { + hir::ExprPath(self.lower_qpath(e.id, qself, path, ParamMode::Optional)) + } + ExprKind::Break(opt_ident, ref opt_expr) => { + hir::ExprBreak(self.lower_label(e.id, opt_ident), + opt_expr.as_ref().map(|x| P(self.lower_expr(x)))) + } + ExprKind::Continue(opt_ident) => hir::ExprAgain(self.lower_label(e.id, opt_ident)), + ExprKind::Ret(ref e) => hir::ExprRet(e.as_ref().map(|x| P(self.lower_expr(x)))), + ExprKind::InlineAsm(ref asm) => { + let hir_asm = hir::InlineAsm { + inputs: asm.inputs.iter().map(|&(ref c, _)| c.clone()).collect(), + outputs: asm.outputs.iter().map(|out| { + hir::InlineAsmOutput { + constraint: out.constraint.clone(), + is_rw: out.is_rw, + is_indirect: out.is_indirect, + } + }).collect(), + asm: asm.asm.clone(), + asm_str_style: asm.asm_str_style, + clobbers: asm.clobbers.clone().into(), + volatile: asm.volatile, + alignstack: asm.alignstack, + dialect: asm.dialect, + expn_id: asm.expn_id, + }; + let outputs = + asm.outputs.iter().map(|out| self.lower_expr(&out.expr)).collect(); + let inputs = + asm.inputs.iter().map(|&(_, ref input)| self.lower_expr(input)).collect(); + hir::ExprInlineAsm(P(hir_asm), outputs, inputs) + } + ExprKind::Struct(ref path, ref fields, ref maybe_expr) => { + hir::ExprStruct(self.lower_qpath(e.id, &None, path, ParamMode::Optional), + fields.iter().map(|x| self.lower_field(x)).collect(), + maybe_expr.as_ref().map(|x| P(self.lower_expr(x)))) + } + ExprKind::Paren(ref ex) => { + let mut ex = self.lower_expr(ex); + // include parens in span, but only if it is a super-span. + if e.span.contains(ex.span) { + ex.span = e.span; + } + // merge attributes into the inner expression. + let mut attrs = e.attrs.clone(); + attrs.extend::>(ex.attrs.into()); + ex.attrs = attrs; + return ex; + } + + // Desugar ExprIfLet + // From: `if let = []` + ExprKind::IfLet(ref pat, ref sub_expr, ref body, ref else_opt) => { + // to: + // + // match { + // => , + // [_ if => ,] + // _ => [ | ()] + // } + + // ` => ` + let pat_arm = { + let body = self.lower_block(body); + let body_expr = P(self.expr_block(body, ThinVec::new())); + let pat = self.lower_pat(pat); + self.arm(hir_vec![pat], body_expr) + }; + + // `[_ if => ,]` + let mut else_opt = else_opt.as_ref().map(|e| P(self.lower_expr(e))); + let else_if_arms = { + let mut arms = vec![]; + loop { + let else_opt_continue = else_opt.and_then(|els| { + els.and_then(|els| { + match els.node { + // else if + hir::ExprIf(cond, then, else_opt) => { + let pat_under = self.pat_wild(e.span); + arms.push(hir::Arm { + attrs: hir_vec![], + pats: hir_vec![pat_under], + guard: Some(cond), + body: P(self.expr_block(then, ThinVec::new())), + }); + else_opt.map(|else_opt| (else_opt, true)) + } + _ => Some((P(els), false)), + } + }) + }); + match else_opt_continue { + Some((e, true)) => { + else_opt = Some(e); + } + Some((e, false)) => { + else_opt = Some(e); + break; + } + None => { + else_opt = None; + break; + } + } + } + arms + }; + + let contains_else_clause = else_opt.is_some(); + + // `_ => [ | ()]` + let else_arm = { + let pat_under = self.pat_wild(e.span); + let else_expr = + else_opt.unwrap_or_else(|| self.expr_tuple(e.span, hir_vec![])); + self.arm(hir_vec![pat_under], else_expr) + }; + + let mut arms = Vec::with_capacity(else_if_arms.len() + 2); + arms.push(pat_arm); + arms.extend(else_if_arms); + arms.push(else_arm); + + let sub_expr = P(self.lower_expr(sub_expr)); + // add attributes to the outer returned expr node + return self.expr(e.span, + hir::ExprMatch(sub_expr, + arms.into(), + hir::MatchSource::IfLetDesugar { + contains_else_clause: contains_else_clause, + }), + e.attrs.clone()); + } + + // Desugar ExprWhileLet + // From: `[opt_ident]: while let = ` + ExprKind::WhileLet(ref pat, ref sub_expr, ref body, opt_ident) => { + // to: + // + // [opt_ident]: loop { + // match { + // => , + // _ => break + // } + // } + + // ` => ` + let pat_arm = { + let body = self.lower_block(body); + let body_expr = P(self.expr_block(body, ThinVec::new())); + let pat = self.lower_pat(pat); + self.arm(hir_vec![pat], body_expr) + }; + + // `_ => break` + let break_arm = { + let pat_under = self.pat_wild(e.span); + let break_expr = self.expr_break(e.span, ThinVec::new()); + self.arm(hir_vec![pat_under], break_expr) + }; + + // `match { ... }` + let arms = hir_vec![pat_arm, break_arm]; + let sub_expr = P(self.lower_expr(sub_expr)); + let match_expr = self.expr(e.span, + hir::ExprMatch(sub_expr, + arms, + hir::MatchSource::WhileLetDesugar), + ThinVec::new()); + + // `[opt_ident]: loop { ... }` + let loop_block = P(self.block_expr(P(match_expr))); + let loop_expr = hir::ExprLoop(loop_block, self.lower_opt_sp_ident(opt_ident), + hir::LoopSource::WhileLet); + // add attributes to the outer returned expr node + let attrs = e.attrs.clone(); + return hir::Expr { id: e.id, node: loop_expr, span: e.span, attrs: attrs }; + } + + // Desugar ExprForLoop + // From: `[opt_ident]: for in ` + ExprKind::ForLoop(ref pat, ref head, ref body, opt_ident) => { + // to: + // + // { + // let result = match ::std::iter::IntoIterator::into_iter() { + // mut iter => { + // [opt_ident]: loop { + // match ::std::iter::Iterator::next(&mut iter) { + // ::std::option::Option::Some() => , + // ::std::option::Option::None => break + // } + // } + // } + // }; + // result + // } + + // expand + let head = self.lower_expr(head); + + let iter = self.str_to_ident("iter"); + + // `::std::option::Option::Some() => ` + let pat_arm = { + let body_block = self.lower_block(body); + let body_span = body_block.span; + let body_expr = P(hir::Expr { + id: self.next_id(), + node: hir::ExprBlock(body_block), + span: body_span, + attrs: ThinVec::new(), + }); + let pat = self.lower_pat(pat); + let some_pat = self.pat_some(e.span, pat); + + self.arm(hir_vec![some_pat], body_expr) + }; + + // `::std::option::Option::None => break` + let break_arm = { + let break_expr = self.expr_break(e.span, ThinVec::new()); + let pat = self.pat_none(e.span); + self.arm(hir_vec![pat], break_expr) + }; + + // `mut iter` + let iter_pat = self.pat_ident_binding_mode(e.span, iter, + hir::BindByValue(hir::MutMutable)); + + // `match ::std::iter::Iterator::next(&mut iter) { ... }` + let match_expr = { + let iter = P(self.expr_ident(e.span, iter, iter_pat.id)); + let ref_mut_iter = self.expr_mut_addr_of(e.span, iter); + let next_path = &["iter", "Iterator", "next"]; + let next_path = P(self.expr_std_path(e.span, next_path, ThinVec::new())); + let next_expr = P(self.expr_call(e.span, next_path, + hir_vec![ref_mut_iter])); + let arms = hir_vec![pat_arm, break_arm]; + + P(self.expr(e.span, + hir::ExprMatch(next_expr, arms, + hir::MatchSource::ForLoopDesugar), + ThinVec::new())) + }; + + // `[opt_ident]: loop { ... }` + let loop_block = P(self.block_expr(match_expr)); + let loop_expr = hir::ExprLoop(loop_block, self.lower_opt_sp_ident(opt_ident), + hir::LoopSource::ForLoop); + let loop_expr = P(hir::Expr { + id: e.id, + node: loop_expr, + span: e.span, + attrs: ThinVec::new(), + }); + + // `mut iter => { ... }` + let iter_arm = self.arm(hir_vec![iter_pat], loop_expr); + + // `match ::std::iter::IntoIterator::into_iter() { ... }` + let into_iter_expr = { + let into_iter_path = &["iter", "IntoIterator", "into_iter"]; + let into_iter = P(self.expr_std_path(e.span, into_iter_path, + ThinVec::new())); + P(self.expr_call(e.span, into_iter, hir_vec![head])) + }; + + let match_expr = P(self.expr_match(e.span, + into_iter_expr, + hir_vec![iter_arm], + hir::MatchSource::ForLoopDesugar)); + + // `{ let _result = ...; _result }` + // underscore prevents an unused_variables lint if the head diverges + let result_ident = self.str_to_ident("_result"); + let (let_stmt, let_stmt_binding) = + self.stmt_let(e.span, false, result_ident, match_expr); + + let result = P(self.expr_ident(e.span, result_ident, let_stmt_binding)); + let block = P(self.block_all(e.span, hir_vec![let_stmt], Some(result))); + // add the attributes to the outer returned expr node + return self.expr_block(block, e.attrs.clone()); + } + + // Desugar ExprKind::Try + // From: `?` + ExprKind::Try(ref sub_expr) => { + // to: + // + // match Carrier::translate() { + // Ok(val) => val, + // Err(err) => return Carrier::from_error(From::from(err)) + // } + let unstable_span = self.allow_internal_unstable("?", e.span); + + // Carrier::translate() + let discr = { + // expand + let sub_expr = self.lower_expr(sub_expr); + + let path = &["ops", "Carrier", "translate"]; + let path = P(self.expr_std_path(unstable_span, path, ThinVec::new())); + P(self.expr_call(e.span, path, hir_vec![sub_expr])) + }; + + // Ok(val) => val + let ok_arm = { + let val_ident = self.str_to_ident("val"); + let val_pat = self.pat_ident(e.span, val_ident); + let val_expr = P(self.expr_ident(e.span, val_ident, val_pat.id)); + let ok_pat = self.pat_ok(e.span, val_pat); + + self.arm(hir_vec![ok_pat], val_expr) + }; + + // Err(err) => return Carrier::from_error(From::from(err)) + let err_arm = { + let err_ident = self.str_to_ident("err"); + let err_local = self.pat_ident(e.span, err_ident); + let from_expr = { + let path = &["convert", "From", "from"]; + let from = P(self.expr_std_path(e.span, path, ThinVec::new())); + let err_expr = self.expr_ident(e.span, err_ident, err_local.id); + + self.expr_call(e.span, from, hir_vec![err_expr]) + }; + let from_err_expr = { + let path = &["ops", "Carrier", "from_error"]; + let from_err = P(self.expr_std_path(unstable_span, path, + ThinVec::new())); + P(self.expr_call(e.span, from_err, hir_vec![from_expr])) + }; + + let ret_expr = P(self.expr(e.span, + hir::Expr_::ExprRet(Some(from_err_expr)), + ThinVec::new())); + + let err_pat = self.pat_err(e.span, err_local); + self.arm(hir_vec![err_pat], ret_expr) + }; + + return self.expr_match(e.span, discr, hir_vec![err_arm, ok_arm], + hir::MatchSource::TryDesugar); + } + + ExprKind::Mac(_) => panic!("Shouldn't exist here"), + }, + span: e.span, + attrs: e.attrs.clone(), + } + } + + fn lower_stmt(&mut self, s: &Stmt) -> SmallVector { + SmallVector::one(match s.node { + StmtKind::Local(ref l) => Spanned { + node: hir::StmtDecl(P(Spanned { + node: hir::DeclLocal(self.lower_local(l)), + span: s.span, + }), s.id), + span: s.span, + }, + StmtKind::Item(ref it) => { + // Can only use the ID once. + let mut id = Some(s.id); + return self.lower_item_id(it).into_iter().map(|item_id| Spanned { + node: hir::StmtDecl(P(Spanned { + node: hir::DeclItem(item_id), + span: s.span, + }), id.take().unwrap_or_else(|| self.next_id())), + span: s.span, + }).collect(); + } + StmtKind::Expr(ref e) => { + Spanned { + node: hir::StmtExpr(P(self.lower_expr(e)), s.id), + span: s.span, + } + } + StmtKind::Semi(ref e) => { + Spanned { + node: hir::StmtSemi(P(self.lower_expr(e)), s.id), + span: s.span, + } + } + StmtKind::Mac(..) => panic!("Shouldn't exist here"), + }) + } + + fn lower_capture_clause(&mut self, c: CaptureBy) -> hir::CaptureClause { + match c { + CaptureBy::Value => hir::CaptureByValue, + CaptureBy::Ref => hir::CaptureByRef, + } + } + + fn lower_visibility(&mut self, v: &Visibility) -> hir::Visibility { + match *v { + Visibility::Public => hir::Public, + Visibility::Crate(_) => hir::Visibility::Crate, + Visibility::Restricted { ref path, id } => { + hir::Visibility::Restricted { + path: P(self.lower_path(id, path, ParamMode::Explicit)), + id: id + } + } + Visibility::Inherited => hir::Inherited, + } + } + + fn lower_defaultness(&mut self, d: Defaultness, has_value: bool) -> hir::Defaultness { + match d { + Defaultness::Default => hir::Defaultness::Default { has_value: has_value }, + Defaultness::Final => { + assert!(has_value); + hir::Defaultness::Final + } + } + } + + fn lower_block_check_mode(&mut self, b: &BlockCheckMode) -> hir::BlockCheckMode { + match *b { + BlockCheckMode::Default => hir::DefaultBlock, + BlockCheckMode::Unsafe(u) => hir::UnsafeBlock(self.lower_unsafe_source(u)), + } + } + + fn lower_binding_mode(&mut self, b: &BindingMode) -> hir::BindingMode { + match *b { + BindingMode::ByRef(m) => hir::BindByRef(self.lower_mutability(m)), + BindingMode::ByValue(m) => hir::BindByValue(self.lower_mutability(m)), + } + } + + fn lower_unsafe_source(&mut self, u: UnsafeSource) -> hir::UnsafeSource { + match u { + CompilerGenerated => hir::CompilerGenerated, + UserProvided => hir::UserProvided, + } + } + + fn lower_impl_polarity(&mut self, i: ImplPolarity) -> hir::ImplPolarity { + match i { + ImplPolarity::Positive => hir::ImplPolarity::Positive, + ImplPolarity::Negative => hir::ImplPolarity::Negative, + } + } + + fn lower_trait_bound_modifier(&mut self, f: TraitBoundModifier) -> hir::TraitBoundModifier { + match f { + TraitBoundModifier::None => hir::TraitBoundModifier::None, + TraitBoundModifier::Maybe => hir::TraitBoundModifier::Maybe, + } + } + + // Helper methods for building HIR. + + fn arm(&mut self, pats: hir::HirVec>, expr: P) -> hir::Arm { + hir::Arm { + attrs: hir_vec![], + pats: pats, + guard: None, + body: expr, + } + } + + fn field(&mut self, name: Name, expr: P, span: Span) -> hir::Field { + hir::Field { + name: Spanned { + node: name, + span: span, + }, + span: span, + expr: expr, + is_shorthand: false, + } + } + + fn expr_break(&mut self, span: Span, attrs: ThinVec) -> P { + P(self.expr(span, hir::ExprBreak(None, None), attrs)) + } + + fn expr_call(&mut self, span: Span, e: P, args: hir::HirVec) + -> hir::Expr { + self.expr(span, hir::ExprCall(e, args), ThinVec::new()) + } + + fn expr_ident(&mut self, span: Span, id: Name, binding: NodeId) -> hir::Expr { + let def = { + let defs = self.resolver.definitions(); + Def::Local(defs.local_def_id(binding)) + }; + + let expr_path = hir::ExprPath(hir::QPath::Resolved(None, P(hir::Path { + span: span, + global: false, + def: def, + segments: hir_vec![hir::PathSegment::from_name(id)], + }))); + + self.expr(span, expr_path, ThinVec::new()) + } + + fn expr_mut_addr_of(&mut self, span: Span, e: P) -> hir::Expr { + self.expr(span, hir::ExprAddrOf(hir::MutMutable, e), ThinVec::new()) + } + + fn expr_std_path(&mut self, + span: Span, + components: &[&str], + attrs: ThinVec) + -> hir::Expr { + let path = self.std_path(span, components, true); + self.expr(span, hir::ExprPath(hir::QPath::Resolved(None, P(path))), attrs) + } + + fn expr_match(&mut self, + span: Span, + arg: P, + arms: hir::HirVec, + source: hir::MatchSource) + -> hir::Expr { + self.expr(span, hir::ExprMatch(arg, arms, source), ThinVec::new()) + } + + fn expr_block(&mut self, b: P, attrs: ThinVec) -> hir::Expr { + self.expr(b.span, hir::ExprBlock(b), attrs) + } + + fn expr_tuple(&mut self, sp: Span, exprs: hir::HirVec) -> P { + P(self.expr(sp, hir::ExprTup(exprs), ThinVec::new())) + } + + fn expr_std_struct(&mut self, + span: Span, + components: &[&str], + fields: hir::HirVec, + e: Option>, + attrs: ThinVec) -> hir::Expr { + let path = self.std_path(span, components, false); + let qpath = hir::QPath::Resolved(None, P(path)); + self.expr(span, hir::ExprStruct(qpath, fields, e), attrs) + } + + fn expr(&mut self, span: Span, node: hir::Expr_, attrs: ThinVec) -> hir::Expr { + hir::Expr { + id: self.next_id(), + node: node, + span: span, + attrs: attrs, + } + } + + fn stmt_let(&mut self, sp: Span, mutbl: bool, ident: Name, ex: P) + -> (hir::Stmt, NodeId) { + let pat = if mutbl { + self.pat_ident_binding_mode(sp, ident, hir::BindByValue(hir::MutMutable)) + } else { + self.pat_ident(sp, ident) + }; + let pat_id = pat.id; + let local = P(hir::Local { + pat: pat, + ty: None, + init: Some(ex), + id: self.next_id(), + span: sp, + attrs: ThinVec::new(), + }); + let decl = respan(sp, hir::DeclLocal(local)); + (respan(sp, hir::StmtDecl(P(decl), self.next_id())), pat_id) + } + + fn block_expr(&mut self, expr: P) -> hir::Block { + self.block_all(expr.span, hir::HirVec::new(), Some(expr)) + } + + fn block_all(&mut self, span: Span, stmts: hir::HirVec, expr: Option>) + -> hir::Block { + hir::Block { + stmts: stmts, + expr: expr, + id: self.next_id(), + rules: hir::DefaultBlock, + span: span, + } + } + + fn pat_ok(&mut self, span: Span, pat: P) -> P { + self.pat_std_enum(span, &["result", "Result", "Ok"], hir_vec![pat]) + } + + fn pat_err(&mut self, span: Span, pat: P) -> P { + self.pat_std_enum(span, &["result", "Result", "Err"], hir_vec![pat]) + } + + fn pat_some(&mut self, span: Span, pat: P) -> P { + self.pat_std_enum(span, &["option", "Option", "Some"], hir_vec![pat]) + } + + fn pat_none(&mut self, span: Span) -> P { + self.pat_std_enum(span, &["option", "Option", "None"], hir_vec![]) + } + + fn pat_std_enum(&mut self, + span: Span, + components: &[&str], + subpats: hir::HirVec>) + -> P { + let path = self.std_path(span, components, true); + let qpath = hir::QPath::Resolved(None, P(path)); + let pt = if subpats.is_empty() { + hir::PatKind::Path(qpath) + } else { + hir::PatKind::TupleStruct(qpath, subpats, None) + }; + self.pat(span, pt) + } + + fn pat_ident(&mut self, span: Span, name: Name) -> P { + self.pat_ident_binding_mode(span, name, hir::BindByValue(hir::MutImmutable)) + } + + fn pat_ident_binding_mode(&mut self, span: Span, name: Name, bm: hir::BindingMode) + -> P { + let id = self.next_id(); + let parent_def = self.parent_def; + let def_id = { + let defs = self.resolver.definitions(); + let def_path_data = DefPathData::Binding(name.as_str()); + let def_index = defs.create_def_with_parent(parent_def, id, def_path_data); + DefId::local(def_index) + }; + + P(hir::Pat { + id: id, + node: hir::PatKind::Binding(bm, + def_id, + Spanned { + span: span, + node: name, + }, + None), + span: span, + }) + } + + fn pat_wild(&mut self, span: Span) -> P { + self.pat(span, hir::PatKind::Wild) + } + + fn pat(&mut self, span: Span, pat: hir::PatKind) -> P { + P(hir::Pat { + id: self.next_id(), + node: pat, + span: span, + }) + } + + /// Given suffix ["b","c","d"], returns path `::std::b::c::d` when + /// `fld.cx.use_std`, and `::core::b::c::d` otherwise. + /// The path is also resolved according to `is_value`. + fn std_path(&mut self, span: Span, components: &[&str], is_value: bool) -> hir::Path { + let idents = self.crate_root.iter().chain(components); + + let segments: Vec<_> = idents.map(|name| { + hir::PathSegment::from_name(Symbol::intern(name)) + }).collect(); + + let mut path = hir::Path { + span: span, + global: true, + def: Def::Err, + segments: segments.into(), + }; + + self.resolver.resolve_hir_path(&mut path, is_value); + path + } + + fn signal_block_expr(&mut self, + stmts: hir::HirVec, + expr: P, + span: Span, + rule: hir::BlockCheckMode, + attrs: ThinVec) + -> hir::Expr { + let id = self.next_id(); + let block = P(hir::Block { + rules: rule, + span: span, + id: id, + stmts: stmts, + expr: Some(expr), + }); + self.expr_block(block, attrs) + } + + fn ty(&mut self, span: Span, node: hir::Ty_) -> P { + P(hir::Ty { + id: self.next_id(), + node: node, + span: span, + }) + } +} diff --git a/src/librustc/hir/map/blocks.rs b/src/librustc/hir/map/blocks.rs new file mode 100644 index 0000000000000..068e7ed8624ed --- /dev/null +++ b/src/librustc/hir/map/blocks.rs @@ -0,0 +1,278 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This module provides a simplified abstraction for working with +//! code blocks identified by their integer node-id. In particular, +//! it captures a common set of attributes that all "function-like +//! things" (represented by `FnLike` instances) share. For example, +//! all `FnLike` instances have a type signature (be it explicit or +//! inferred). And all `FnLike` instances have a body, i.e. the code +//! that is run when the function-like thing it represents is invoked. +//! +//! With the above abstraction in place, one can treat the program +//! text as a collection of blocks of code (and most such blocks are +//! nested within a uniquely determined `FnLike`), and users can ask +//! for the `Code` associated with a particular NodeId. + +use hir as ast; +use hir::map::{self, Node}; +use hir::{Expr, FnDecl}; +use hir::intravisit::FnKind; +use syntax::abi; +use syntax::ast::{Attribute, Name, NodeId}; +use syntax_pos::Span; + +/// An FnLikeNode is a Node that is like a fn, in that it has a decl +/// and a body (as well as a NodeId, a span, etc). +/// +/// More specifically, it is one of either: +/// - A function item, +/// - A closure expr (i.e. an ExprClosure), or +/// - The default implementation for a trait method. +/// +/// To construct one, use the `Code::from_node` function. +#[derive(Copy, Clone)] +pub struct FnLikeNode<'a> { node: map::Node<'a> } + +/// MaybeFnLike wraps a method that indicates if an object +/// corresponds to some FnLikeNode. +pub trait MaybeFnLike { fn is_fn_like(&self) -> bool; } + +/// Components shared by fn-like things (fn items, methods, closures). +pub struct FnParts<'a> { + pub decl: &'a FnDecl, + pub body: ast::ExprId, + pub kind: FnKind<'a>, + pub span: Span, + pub id: NodeId, +} + +impl MaybeFnLike for ast::Item { + fn is_fn_like(&self) -> bool { + match self.node { ast::ItemFn(..) => true, _ => false, } + } +} + +impl MaybeFnLike for ast::TraitItem { + fn is_fn_like(&self) -> bool { + match self.node { ast::MethodTraitItem(_, Some(_)) => true, _ => false, } + } +} + +impl MaybeFnLike for ast::Expr { + fn is_fn_like(&self) -> bool { + match self.node { + ast::ExprClosure(..) => true, + _ => false, + } + } +} + +/// Carries either an FnLikeNode or a Expr, as these are the two +/// constructs that correspond to "code" (as in, something from which +/// we can construct a control-flow graph). +#[derive(Copy, Clone)] +pub enum Code<'a> { + FnLike(FnLikeNode<'a>), + Expr(&'a Expr), +} + +impl<'a> Code<'a> { + pub fn id(&self) -> NodeId { + match *self { + Code::FnLike(node) => node.id(), + Code::Expr(block) => block.id, + } + } + + /// Attempts to construct a Code from presumed FnLike or Expr node input. + pub fn from_node(map: &map::Map<'a>, id: NodeId) -> Option> { + match map.get(id) { + map::NodeBlock(_) => { + // Use the parent, hopefully an expression node. + Code::from_node(map, map.get_parent_node(id)) + } + map::NodeExpr(expr) => Some(Code::Expr(expr)), + node => FnLikeNode::from_node(node).map(Code::FnLike) + } + } +} + +/// These are all the components one can extract from a fn item for +/// use when implementing FnLikeNode operations. +struct ItemFnParts<'a> { + name: Name, + decl: &'a ast::FnDecl, + unsafety: ast::Unsafety, + constness: ast::Constness, + abi: abi::Abi, + vis: &'a ast::Visibility, + generics: &'a ast::Generics, + body: ast::ExprId, + id: NodeId, + span: Span, + attrs: &'a [Attribute], +} + +/// These are all the components one can extract from a closure expr +/// for use when implementing FnLikeNode operations. +struct ClosureParts<'a> { + decl: &'a FnDecl, + body: ast::ExprId, + id: NodeId, + span: Span, + attrs: &'a [Attribute], +} + +impl<'a> ClosureParts<'a> { + fn new(d: &'a FnDecl, b: ast::ExprId, id: NodeId, s: Span, attrs: &'a [Attribute]) -> Self { + ClosureParts { + decl: d, + body: b, + id: id, + span: s, + attrs: attrs, + } + } +} + +impl<'a> FnLikeNode<'a> { + /// Attempts to construct a FnLikeNode from presumed FnLike node input. + pub fn from_node(node: Node) -> Option { + let fn_like = match node { + map::NodeItem(item) => item.is_fn_like(), + map::NodeTraitItem(tm) => tm.is_fn_like(), + map::NodeImplItem(_) => true, + map::NodeExpr(e) => e.is_fn_like(), + _ => false + }; + if fn_like { + Some(FnLikeNode { + node: node + }) + } else { + None + } + } + + pub fn to_fn_parts(self) -> FnParts<'a> { + FnParts { + decl: self.decl(), + body: self.body(), + kind: self.kind(), + span: self.span(), + id: self.id(), + } + } + + pub fn body(self) -> ast::ExprId { + self.handle(|i: ItemFnParts<'a>| i.body, + |_, _, _: &'a ast::MethodSig, _, body: ast::ExprId, _, _| body, + |c: ClosureParts<'a>| c.body) + } + + pub fn decl(self) -> &'a FnDecl { + self.handle(|i: ItemFnParts<'a>| &*i.decl, + |_, _, sig: &'a ast::MethodSig, _, _, _, _| &sig.decl, + |c: ClosureParts<'a>| c.decl) + } + + pub fn span(self) -> Span { + self.handle(|i: ItemFnParts| i.span, + |_, _, _: &'a ast::MethodSig, _, _, span, _| span, + |c: ClosureParts| c.span) + } + + pub fn id(self) -> NodeId { + self.handle(|i: ItemFnParts| i.id, + |id, _, _: &'a ast::MethodSig, _, _, _, _| id, + |c: ClosureParts| c.id) + } + + pub fn constness(self) -> ast::Constness { + match self.kind() { + FnKind::ItemFn(_, _, _, constness, ..) => { + constness + } + FnKind::Method(_, m, ..) => { + m.constness + } + _ => ast::Constness::NotConst + } + } + + pub fn kind(self) -> FnKind<'a> { + let item = |p: ItemFnParts<'a>| -> FnKind<'a> { + FnKind::ItemFn(p.name, p.generics, p.unsafety, p.constness, p.abi, p.vis, p.attrs) + }; + let closure = |c: ClosureParts<'a>| { + FnKind::Closure(c.attrs) + }; + let method = |_, name: Name, sig: &'a ast::MethodSig, vis, _, _, attrs| { + FnKind::Method(name, sig, vis, attrs) + }; + self.handle(item, method, closure) + } + + fn handle(self, item_fn: I, method: M, closure: C) -> A where + I: FnOnce(ItemFnParts<'a>) -> A, + M: FnOnce(NodeId, + Name, + &'a ast::MethodSig, + Option<&'a ast::Visibility>, + ast::ExprId, + Span, + &'a [Attribute]) + -> A, + C: FnOnce(ClosureParts<'a>) -> A, + { + match self.node { + map::NodeItem(i) => match i.node { + ast::ItemFn(ref decl, unsafety, constness, abi, ref generics, block) => + item_fn(ItemFnParts { + id: i.id, + name: i.name, + decl: &decl, + unsafety: unsafety, + body: block, + generics: generics, + abi: abi, + vis: &i.vis, + constness: constness, + span: i.span, + attrs: &i.attrs, + }), + _ => bug!("item FnLikeNode that is not fn-like"), + }, + map::NodeTraitItem(ti) => match ti.node { + ast::MethodTraitItem(ref sig, Some(body)) => { + method(ti.id, ti.name, sig, None, body, ti.span, &ti.attrs) + } + _ => bug!("trait method FnLikeNode that is not fn-like"), + }, + map::NodeImplItem(ii) => { + match ii.node { + ast::ImplItemKind::Method(ref sig, body) => { + method(ii.id, ii.name, sig, Some(&ii.vis), body, ii.span, &ii.attrs) + } + _ => { + bug!("impl method FnLikeNode that is not fn-like") + } + } + }, + map::NodeExpr(e) => match e.node { + ast::ExprClosure(_, ref decl, block, _fn_decl_span) => + closure(ClosureParts::new(&decl, block, e.id, e.span, &e.attrs)), + _ => bug!("expr FnLikeNode that is not fn-like"), + }, + _ => bug!("other FnLikeNode that is not fn-like"), + } + } +} diff --git a/src/librustc/hir/map/collector.rs b/src/librustc/hir/map/collector.rs new file mode 100644 index 0000000000000..c46c8f044e0ff --- /dev/null +++ b/src/librustc/hir/map/collector.rs @@ -0,0 +1,256 @@ +// Copyright 2015-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::*; + +use hir::intravisit::{Visitor, NestedVisitorMap}; +use hir::def_id::DefId; +use middle::cstore::InlinedItem; +use std::iter::repeat; +use syntax::ast::{NodeId, CRATE_NODE_ID}; +use syntax_pos::Span; + +/// A Visitor that walks over the HIR and collects Nodes into a HIR map +pub struct NodeCollector<'ast> { + /// The crate + pub krate: &'ast Crate, + /// The node map + pub map: Vec>, + /// The parent of this node + pub parent_node: NodeId, + /// If true, completely ignore nested items. We set this when loading + /// HIR from metadata, since in that case we only want the HIR for + /// one specific item (and not the ones nested inside of it). + pub ignore_nested_items: bool +} + +impl<'ast> NodeCollector<'ast> { + pub fn root(krate: &'ast Crate) -> NodeCollector<'ast> { + let mut collector = NodeCollector { + krate: krate, + map: vec![], + parent_node: CRATE_NODE_ID, + ignore_nested_items: false + }; + collector.insert_entry(CRATE_NODE_ID, RootCrate); + + collector + } + + pub fn extend(krate: &'ast Crate, + parent: &'ast InlinedItem, + parent_node: NodeId, + parent_def_path: DefPath, + parent_def_id: DefId, + map: Vec>) + -> NodeCollector<'ast> { + let mut collector = NodeCollector { + krate: krate, + map: map, + parent_node: parent_node, + ignore_nested_items: true + }; + + assert_eq!(parent_def_path.krate, parent_def_id.krate); + collector.insert_entry(parent_node, RootInlinedParent(parent)); + + collector + } + + fn insert_entry(&mut self, id: NodeId, entry: MapEntry<'ast>) { + debug!("ast_map: {:?} => {:?}", id, entry); + let len = self.map.len(); + if id.as_usize() >= len { + self.map.extend(repeat(NotPresent).take(id.as_usize() - len + 1)); + } + self.map[id.as_usize()] = entry; + } + + fn insert(&mut self, id: NodeId, node: Node<'ast>) { + let entry = MapEntry::from_node(self.parent_node, node); + self.insert_entry(id, entry); + } + + fn with_parent(&mut self, parent_id: NodeId, f: F) { + let parent_node = self.parent_node; + self.parent_node = parent_id; + f(self); + self.parent_node = parent_node; + } +} + +impl<'ast> Visitor<'ast> for NodeCollector<'ast> { + /// Because we want to track parent items and so forth, enable + /// deep walking so that we walk nested items in the context of + /// their outer items. + + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'ast> { + panic!("visit_nested_xxx must be manually implemented in this visitor") + } + + fn visit_nested_item(&mut self, item: ItemId) { + debug!("visit_nested_item: {:?}", item); + if !self.ignore_nested_items { + self.visit_item(self.krate.item(item.id)) + } + } + + fn visit_nested_impl_item(&mut self, item_id: ImplItemId) { + self.visit_impl_item(self.krate.impl_item(item_id)) + } + + fn visit_body(&mut self, id: ExprId) { + self.visit_expr(self.krate.expr(id)) + } + + fn visit_item(&mut self, i: &'ast Item) { + debug!("visit_item: {:?}", i); + + self.insert(i.id, NodeItem(i)); + + self.with_parent(i.id, |this| { + match i.node { + ItemEnum(ref enum_definition, _) => { + for v in &enum_definition.variants { + this.insert(v.node.data.id(), NodeVariant(v)); + } + } + ItemStruct(ref struct_def, _) => { + // If this is a tuple-like struct, register the constructor. + if !struct_def.is_struct() { + this.insert(struct_def.id(), NodeStructCtor(struct_def)); + } + } + _ => {} + } + intravisit::walk_item(this, i); + }); + } + + fn visit_foreign_item(&mut self, foreign_item: &'ast ForeignItem) { + self.insert(foreign_item.id, NodeForeignItem(foreign_item)); + + self.with_parent(foreign_item.id, |this| { + intravisit::walk_foreign_item(this, foreign_item); + }); + } + + fn visit_generics(&mut self, generics: &'ast Generics) { + for ty_param in generics.ty_params.iter() { + self.insert(ty_param.id, NodeTyParam(ty_param)); + } + + intravisit::walk_generics(self, generics); + } + + fn visit_trait_item(&mut self, ti: &'ast TraitItem) { + self.insert(ti.id, NodeTraitItem(ti)); + + self.with_parent(ti.id, |this| { + intravisit::walk_trait_item(this, ti); + }); + } + + fn visit_impl_item(&mut self, ii: &'ast ImplItem) { + self.insert(ii.id, NodeImplItem(ii)); + + self.with_parent(ii.id, |this| { + intravisit::walk_impl_item(this, ii); + }); + } + + fn visit_pat(&mut self, pat: &'ast Pat) { + let node = if let PatKind::Binding(..) = pat.node { + NodeLocal(pat) + } else { + NodePat(pat) + }; + self.insert(pat.id, node); + + self.with_parent(pat.id, |this| { + intravisit::walk_pat(this, pat); + }); + } + + fn visit_expr(&mut self, expr: &'ast Expr) { + self.insert(expr.id, NodeExpr(expr)); + + self.with_parent(expr.id, |this| { + intravisit::walk_expr(this, expr); + }); + } + + fn visit_stmt(&mut self, stmt: &'ast Stmt) { + let id = stmt.node.id(); + self.insert(id, NodeStmt(stmt)); + + self.with_parent(id, |this| { + intravisit::walk_stmt(this, stmt); + }); + } + + fn visit_ty(&mut self, ty: &'ast Ty) { + self.insert(ty.id, NodeTy(ty)); + + self.with_parent(ty.id, |this| { + intravisit::walk_ty(this, ty); + }); + } + + fn visit_trait_ref(&mut self, tr: &'ast TraitRef) { + self.insert(tr.ref_id, NodeTraitRef(tr)); + + self.with_parent(tr.ref_id, |this| { + intravisit::walk_trait_ref(this, tr); + }); + } + + fn visit_fn(&mut self, fk: intravisit::FnKind<'ast>, fd: &'ast FnDecl, + b: ExprId, s: Span, id: NodeId) { + assert_eq!(self.parent_node, id); + intravisit::walk_fn(self, fk, fd, b, s, id); + } + + fn visit_block(&mut self, block: &'ast Block) { + self.insert(block.id, NodeBlock(block)); + self.with_parent(block.id, |this| { + intravisit::walk_block(this, block); + }); + } + + fn visit_lifetime(&mut self, lifetime: &'ast Lifetime) { + self.insert(lifetime.id, NodeLifetime(lifetime)); + } + + fn visit_vis(&mut self, visibility: &'ast Visibility) { + match *visibility { + Visibility::Public | + Visibility::Crate | + Visibility::Inherited => {} + Visibility::Restricted { id, .. } => { + self.insert(id, NodeVisibility(visibility)); + self.with_parent(id, |this| { + intravisit::walk_vis(this, visibility); + }); + } + } + } + + fn visit_macro_def(&mut self, macro_def: &'ast MacroDef) { + self.insert_entry(macro_def.id, NotPresent); + } + + fn visit_struct_field(&mut self, field: &'ast StructField) { + self.insert(field.id, NodeField(field)); + self.with_parent(field.id, |this| { + intravisit::walk_struct_field(this, field); + }); + } +} diff --git a/src/librustc/hir/map/def_collector.rs b/src/librustc/hir/map/def_collector.rs new file mode 100644 index 0000000000000..273094b735c3a --- /dev/null +++ b/src/librustc/hir/map/def_collector.rs @@ -0,0 +1,492 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use hir::map::definitions::*; + +use hir; +use hir::intravisit::{self, Visitor, NestedVisitorMap}; +use hir::def_id::{CRATE_DEF_INDEX, DefId, DefIndex}; + +use middle::cstore::InlinedItem; + +use syntax::ast::*; +use syntax::ext::hygiene::Mark; +use syntax::visit; +use syntax::symbol::{Symbol, keywords}; + +/// Creates def ids for nodes in the HIR. +pub struct DefCollector<'a> { + // If we are walking HIR (c.f., AST), we need to keep a reference to the + // crate. + hir_crate: Option<&'a hir::Crate>, + definitions: &'a mut Definitions, + parent_def: Option, + pub visit_macro_invoc: Option<&'a mut FnMut(MacroInvocationData)>, +} + +pub struct MacroInvocationData { + pub mark: Mark, + pub def_index: DefIndex, + pub const_integer: bool, +} + +impl<'a> DefCollector<'a> { + pub fn new(definitions: &'a mut Definitions) -> Self { + DefCollector { + hir_crate: None, + definitions: definitions, + parent_def: None, + visit_macro_invoc: None, + } + } + + pub fn extend(parent_node: NodeId, + parent_def_path: DefPath, + parent_def_id: DefId, + definitions: &'a mut Definitions) + -> Self { + let mut collector = DefCollector::new(definitions); + + assert_eq!(parent_def_path.krate, parent_def_id.krate); + let root_path = Box::new(InlinedRootPath { + data: parent_def_path.data, + def_id: parent_def_id, + }); + + let def = collector.create_def(parent_node, DefPathData::InlinedRoot(root_path)); + collector.parent_def = Some(def); + + collector + } + + pub fn collect_root(&mut self) { + let root = self.create_def_with_parent(None, CRATE_NODE_ID, DefPathData::CrateRoot); + assert_eq!(root, CRATE_DEF_INDEX); + self.parent_def = Some(root); + + self.create_def_with_parent(Some(CRATE_DEF_INDEX), DUMMY_NODE_ID, DefPathData::Misc); + } + + pub fn walk_item(&mut self, ii: &'a InlinedItem, krate: &'a hir::Crate) { + self.hir_crate = Some(krate); + ii.visit(self); + } + + fn create_def(&mut self, node_id: NodeId, data: DefPathData) -> DefIndex { + let parent_def = self.parent_def; + debug!("create_def(node_id={:?}, data={:?}, parent_def={:?})", node_id, data, parent_def); + self.definitions.create_def_with_parent(parent_def, node_id, data) + } + + fn create_def_with_parent(&mut self, + parent: Option, + node_id: NodeId, + data: DefPathData) + -> DefIndex { + self.definitions.create_def_with_parent(parent, node_id, data) + } + + pub fn with_parent(&mut self, parent_def: DefIndex, f: F) { + let parent = self.parent_def; + self.parent_def = Some(parent_def); + f(self); + self.parent_def = parent; + } + + pub fn visit_ast_const_integer(&mut self, expr: &Expr) { + match expr.node { + // Find the node which will be used after lowering. + ExprKind::Paren(ref inner) => return self.visit_ast_const_integer(inner), + ExprKind::Mac(..) => return self.visit_macro_invoc(expr.id, true), + // FIXME(eddyb) Closures should have separate + // function definition IDs and expression IDs. + ExprKind::Closure(..) => return, + _ => {} + } + + self.create_def(expr.id, DefPathData::Initializer); + } + + fn visit_hir_const_integer(&mut self, expr: &hir::Expr) { + // FIXME(eddyb) Closures should have separate + // function definition IDs and expression IDs. + if let hir::ExprClosure(..) = expr.node { + return; + } + + self.create_def(expr.id, DefPathData::Initializer); + } + + fn visit_macro_invoc(&mut self, id: NodeId, const_integer: bool) { + if let Some(ref mut visit) = self.visit_macro_invoc { + visit(MacroInvocationData { + mark: Mark::from_placeholder_id(id), + const_integer: const_integer, + def_index: self.parent_def.unwrap(), + }) + } + } +} + +impl<'a> visit::Visitor for DefCollector<'a> { + fn visit_item(&mut self, i: &Item) { + debug!("visit_item: {:?}", i); + + // Pick the def data. This need not be unique, but the more + // information we encapsulate into + let def_data = match i.node { + ItemKind::DefaultImpl(..) | ItemKind::Impl(..) => + DefPathData::Impl, + ItemKind::Enum(..) | ItemKind::Struct(..) | ItemKind::Union(..) | ItemKind::Trait(..) | + ItemKind::ExternCrate(..) | ItemKind::ForeignMod(..) | ItemKind::Ty(..) => + DefPathData::TypeNs(i.ident.name.as_str()), + ItemKind::Mod(..) if i.ident == keywords::Invalid.ident() => { + return visit::walk_item(self, i); + } + ItemKind::Mod(..) => DefPathData::Module(i.ident.name.as_str()), + ItemKind::Static(..) | ItemKind::Const(..) | ItemKind::Fn(..) => + DefPathData::ValueNs(i.ident.name.as_str()), + ItemKind::Mac(..) if i.id == DUMMY_NODE_ID => return, // Scope placeholder + ItemKind::Mac(..) => return self.visit_macro_invoc(i.id, false), + ItemKind::Use(ref view_path) => { + match view_path.node { + ViewPathGlob(..) => {} + + // FIXME(eddyb) Should use the real name. Which namespace? + ViewPathSimple(..) => {} + ViewPathList(_, ref imports) => { + for import in imports { + self.create_def(import.node.id, DefPathData::Misc); + } + } + } + DefPathData::Misc + } + }; + let def = self.create_def(i.id, def_data); + + self.with_parent(def, |this| { + match i.node { + ItemKind::Enum(ref enum_definition, _) => { + for v in &enum_definition.variants { + let variant_def_index = + this.create_def(v.node.data.id(), + DefPathData::EnumVariant(v.node.name.name.as_str())); + this.with_parent(variant_def_index, |this| { + for (index, field) in v.node.data.fields().iter().enumerate() { + let name = field.ident.map(|ident| ident.name) + .unwrap_or_else(|| Symbol::intern(&index.to_string())); + this.create_def(field.id, DefPathData::Field(name.as_str())); + } + + if let Some(ref expr) = v.node.disr_expr { + this.visit_ast_const_integer(expr); + } + }); + } + } + ItemKind::Struct(ref struct_def, _) | ItemKind::Union(ref struct_def, _) => { + // If this is a tuple-like struct, register the constructor. + if !struct_def.is_struct() { + this.create_def(struct_def.id(), + DefPathData::StructCtor); + } + + for (index, field) in struct_def.fields().iter().enumerate() { + let name = field.ident.map(|ident| ident.name.as_str()) + .unwrap_or(Symbol::intern(&index.to_string()).as_str()); + this.create_def(field.id, DefPathData::Field(name)); + } + } + _ => {} + } + visit::walk_item(this, i); + }); + } + + fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) { + let def = self.create_def(foreign_item.id, + DefPathData::ValueNs(foreign_item.ident.name.as_str())); + + self.with_parent(def, |this| { + visit::walk_foreign_item(this, foreign_item); + }); + } + + fn visit_generics(&mut self, generics: &Generics) { + for ty_param in generics.ty_params.iter() { + self.create_def(ty_param.id, DefPathData::TypeParam(ty_param.ident.name.as_str())); + } + + visit::walk_generics(self, generics); + } + + fn visit_trait_item(&mut self, ti: &TraitItem) { + let def_data = match ti.node { + TraitItemKind::Method(..) | TraitItemKind::Const(..) => + DefPathData::ValueNs(ti.ident.name.as_str()), + TraitItemKind::Type(..) => DefPathData::TypeNs(ti.ident.name.as_str()), + TraitItemKind::Macro(..) => return self.visit_macro_invoc(ti.id, false), + }; + + let def = self.create_def(ti.id, def_data); + self.with_parent(def, |this| { + if let TraitItemKind::Const(_, Some(ref expr)) = ti.node { + this.create_def(expr.id, DefPathData::Initializer); + } + + visit::walk_trait_item(this, ti); + }); + } + + fn visit_impl_item(&mut self, ii: &ImplItem) { + let def_data = match ii.node { + ImplItemKind::Method(..) | ImplItemKind::Const(..) => + DefPathData::ValueNs(ii.ident.name.as_str()), + ImplItemKind::Type(..) => DefPathData::TypeNs(ii.ident.name.as_str()), + ImplItemKind::Macro(..) => return self.visit_macro_invoc(ii.id, false), + }; + + let def = self.create_def(ii.id, def_data); + self.with_parent(def, |this| { + if let ImplItemKind::Const(_, ref expr) = ii.node { + this.create_def(expr.id, DefPathData::Initializer); + } + + visit::walk_impl_item(this, ii); + }); + } + + fn visit_pat(&mut self, pat: &Pat) { + let parent_def = self.parent_def; + + match pat.node { + PatKind::Mac(..) => return self.visit_macro_invoc(pat.id, false), + PatKind::Ident(_, id, _) => { + let def = self.create_def(pat.id, DefPathData::Binding(id.node.name.as_str())); + self.parent_def = Some(def); + } + _ => {} + } + + visit::walk_pat(self, pat); + self.parent_def = parent_def; + } + + fn visit_expr(&mut self, expr: &Expr) { + let parent_def = self.parent_def; + + match expr.node { + ExprKind::Mac(..) => return self.visit_macro_invoc(expr.id, false), + ExprKind::Repeat(_, ref count) => self.visit_ast_const_integer(count), + ExprKind::Closure(..) => { + let def = self.create_def(expr.id, DefPathData::ClosureExpr); + self.parent_def = Some(def); + } + _ => {} + } + + visit::walk_expr(self, expr); + self.parent_def = parent_def; + } + + fn visit_ty(&mut self, ty: &Ty) { + match ty.node { + TyKind::Mac(..) => return self.visit_macro_invoc(ty.id, false), + TyKind::Array(_, ref length) => self.visit_ast_const_integer(length), + TyKind::ImplTrait(..) => { + self.create_def(ty.id, DefPathData::ImplTrait); + } + _ => {} + } + visit::walk_ty(self, ty); + } + + fn visit_lifetime_def(&mut self, def: &LifetimeDef) { + self.create_def(def.lifetime.id, DefPathData::LifetimeDef(def.lifetime.name.as_str())); + } + + fn visit_macro_def(&mut self, macro_def: &MacroDef) { + self.create_def(macro_def.id, DefPathData::MacroDef(macro_def.ident.name.as_str())); + } + + fn visit_stmt(&mut self, stmt: &Stmt) { + match stmt.node { + StmtKind::Mac(..) => self.visit_macro_invoc(stmt.id, false), + _ => visit::walk_stmt(self, stmt), + } + } +} + +// We walk the HIR rather than the AST when reading items from metadata. +impl<'ast> Visitor<'ast> for DefCollector<'ast> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'ast> { + // note however that we override `visit_body` below + NestedVisitorMap::None + } + + fn visit_body(&mut self, id: hir::ExprId) { + if let Some(krate) = self.hir_crate { + self.visit_expr(krate.expr(id)); + } + } + + fn visit_item(&mut self, i: &'ast hir::Item) { + debug!("visit_item: {:?}", i); + + // Pick the def data. This need not be unique, but the more + // information we encapsulate into + let def_data = match i.node { + hir::ItemDefaultImpl(..) | hir::ItemImpl(..) => + DefPathData::Impl, + hir::ItemEnum(..) | hir::ItemStruct(..) | hir::ItemUnion(..) | + hir::ItemTrait(..) | hir::ItemExternCrate(..) | hir::ItemMod(..) | + hir::ItemForeignMod(..) | hir::ItemTy(..) => + DefPathData::TypeNs(i.name.as_str()), + hir::ItemStatic(..) | hir::ItemConst(..) | hir::ItemFn(..) => + DefPathData::ValueNs(i.name.as_str()), + hir::ItemUse(..) => DefPathData::Misc, + }; + let def = self.create_def(i.id, def_data); + + self.with_parent(def, |this| { + match i.node { + hir::ItemEnum(ref enum_definition, _) => { + for v in &enum_definition.variants { + let variant_def_index = + this.create_def(v.node.data.id(), + DefPathData::EnumVariant(v.node.name.as_str())); + + this.with_parent(variant_def_index, |this| { + for field in v.node.data.fields() { + this.create_def(field.id, + DefPathData::Field(field.name.as_str())); + } + if let Some(ref expr) = v.node.disr_expr { + this.visit_hir_const_integer(expr); + } + }); + } + } + hir::ItemStruct(ref struct_def, _) | + hir::ItemUnion(ref struct_def, _) => { + // If this is a tuple-like struct, register the constructor. + if !struct_def.is_struct() { + this.create_def(struct_def.id(), + DefPathData::StructCtor); + } + + for field in struct_def.fields() { + this.create_def(field.id, DefPathData::Field(field.name.as_str())); + } + } + _ => {} + } + intravisit::walk_item(this, i); + }); + } + + fn visit_foreign_item(&mut self, foreign_item: &'ast hir::ForeignItem) { + let def = self.create_def(foreign_item.id, + DefPathData::ValueNs(foreign_item.name.as_str())); + + self.with_parent(def, |this| { + intravisit::walk_foreign_item(this, foreign_item); + }); + } + + fn visit_generics(&mut self, generics: &'ast hir::Generics) { + for ty_param in generics.ty_params.iter() { + self.create_def(ty_param.id, DefPathData::TypeParam(ty_param.name.as_str())); + } + + intravisit::walk_generics(self, generics); + } + + fn visit_trait_item(&mut self, ti: &'ast hir::TraitItem) { + let def_data = match ti.node { + hir::MethodTraitItem(..) | hir::ConstTraitItem(..) => + DefPathData::ValueNs(ti.name.as_str()), + hir::TypeTraitItem(..) => DefPathData::TypeNs(ti.name.as_str()), + }; + + let def = self.create_def(ti.id, def_data); + self.with_parent(def, |this| { + if let hir::ConstTraitItem(_, Some(ref expr)) = ti.node { + this.create_def(expr.id, DefPathData::Initializer); + } + + intravisit::walk_trait_item(this, ti); + }); + } + + fn visit_impl_item(&mut self, ii: &'ast hir::ImplItem) { + let def_data = match ii.node { + hir::ImplItemKind::Method(..) | hir::ImplItemKind::Const(..) => + DefPathData::ValueNs(ii.name.as_str()), + hir::ImplItemKind::Type(..) => DefPathData::TypeNs(ii.name.as_str()), + }; + + let def = self.create_def(ii.id, def_data); + self.with_parent(def, |this| { + if let hir::ImplItemKind::Const(_, ref expr) = ii.node { + this.create_def(expr.id, DefPathData::Initializer); + } + + intravisit::walk_impl_item(this, ii); + }); + } + + fn visit_pat(&mut self, pat: &'ast hir::Pat) { + let parent_def = self.parent_def; + + if let hir::PatKind::Binding(_, _, name, _) = pat.node { + let def = self.create_def(pat.id, DefPathData::Binding(name.node.as_str())); + self.parent_def = Some(def); + } + + intravisit::walk_pat(self, pat); + self.parent_def = parent_def; + } + + fn visit_expr(&mut self, expr: &'ast hir::Expr) { + let parent_def = self.parent_def; + + if let hir::ExprRepeat(_, ref count) = expr.node { + self.visit_hir_const_integer(count); + } + + if let hir::ExprClosure(..) = expr.node { + let def = self.create_def(expr.id, DefPathData::ClosureExpr); + self.parent_def = Some(def); + } + + intravisit::walk_expr(self, expr); + self.parent_def = parent_def; + } + + fn visit_ty(&mut self, ty: &'ast hir::Ty) { + if let hir::TyArray(_, ref length) = ty.node { + self.visit_hir_const_integer(length); + } + if let hir::TyImplTrait(..) = ty.node { + self.create_def(ty.id, DefPathData::ImplTrait); + } + intravisit::walk_ty(self, ty); + } + + fn visit_lifetime_def(&mut self, def: &'ast hir::LifetimeDef) { + self.create_def(def.lifetime.id, DefPathData::LifetimeDef(def.lifetime.name.as_str())); + } + + fn visit_macro_def(&mut self, macro_def: &'ast hir::MacroDef) { + self.create_def(macro_def.id, DefPathData::MacroDef(macro_def.name.as_str())); + } +} diff --git a/src/librustc/hir/map/definitions.rs b/src/librustc/hir/map/definitions.rs new file mode 100644 index 0000000000000..83d3627d8e616 --- /dev/null +++ b/src/librustc/hir/map/definitions.rs @@ -0,0 +1,380 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE}; +use rustc_data_structures::fx::FxHashMap; +use std::fmt::Write; +use std::hash::{Hash, Hasher}; +use std::collections::hash_map::DefaultHasher; +use syntax::ast; +use syntax::symbol::{Symbol, InternedString}; +use ty::TyCtxt; +use util::nodemap::NodeMap; + +/// The definition table containing node definitions +#[derive(Clone)] +pub struct Definitions { + data: Vec, + key_map: FxHashMap, + node_map: NodeMap, +} + +/// A unique identifier that we can use to lookup a definition +/// precisely. It combines the index of the definition's parent (if +/// any) with a `DisambiguatedDefPathData`. +#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct DefKey { + /// Parent path. + pub parent: Option, + + /// Identifier of this node. + pub disambiguated_data: DisambiguatedDefPathData, +} + +/// Pair of `DefPathData` and an integer disambiguator. The integer is +/// normally 0, but in the event that there are multiple defs with the +/// same `parent` and `data`, we use this field to disambiguate +/// between them. This introduces some artificial ordering dependency +/// but means that if you have (e.g.) two impls for the same type in +/// the same module, they do get distinct def-ids. +#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct DisambiguatedDefPathData { + pub data: DefPathData, + pub disambiguator: u32 +} + +/// For each definition, we track the following data. A definition +/// here is defined somewhat circularly as "something with a def-id", +/// but it generally corresponds to things like structs, enums, etc. +/// There are also some rather random cases (like const initializer +/// expressions) that are mostly just leftovers. +#[derive(Clone, Debug)] +pub struct DefData { + pub key: DefKey, + + /// Local ID within the HIR. + pub node_id: ast::NodeId, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct DefPath { + /// the path leading from the crate root to the item + pub data: Vec, + + /// what krate root is this path relative to? + pub krate: CrateNum, +} + +impl DefPath { + pub fn is_local(&self) -> bool { + self.krate == LOCAL_CRATE + } + + pub fn make(start_krate: CrateNum, + start_index: DefIndex, + mut get_key: FN) -> DefPath + where FN: FnMut(DefIndex) -> DefKey + { + let mut krate = start_krate; + let mut data = vec![]; + let mut index = Some(start_index); + loop { + debug!("DefPath::make: krate={:?} index={:?}", krate, index); + let p = index.unwrap(); + let key = get_key(p); + debug!("DefPath::make: key={:?}", key); + match key.disambiguated_data.data { + DefPathData::CrateRoot => { + assert!(key.parent.is_none()); + break; + } + DefPathData::InlinedRoot(ref p) => { + assert!(key.parent.is_none()); + assert!(!p.def_id.is_local()); + data.extend(p.data.iter().cloned().rev()); + krate = p.def_id.krate; + break; + } + _ => { + data.push(key.disambiguated_data); + index = key.parent; + } + } + } + data.reverse(); + DefPath { data: data, krate: krate } + } + + pub fn to_string(&self, tcx: TyCtxt) -> String { + let mut s = String::with_capacity(self.data.len() * 16); + + s.push_str(&tcx.original_crate_name(self.krate).as_str()); + s.push_str("/"); + s.push_str(&tcx.crate_disambiguator(self.krate).as_str()); + + for component in &self.data { + write!(s, + "::{}[{}]", + component.data.as_interned_str(), + component.disambiguator) + .unwrap(); + } + + s + } + + pub fn deterministic_hash(&self, tcx: TyCtxt) -> u64 { + let mut state = DefaultHasher::new(); + self.deterministic_hash_to(tcx, &mut state); + state.finish() + } + + pub fn deterministic_hash_to(&self, tcx: TyCtxt, state: &mut H) { + tcx.original_crate_name(self.krate).as_str().hash(state); + tcx.crate_disambiguator(self.krate).as_str().hash(state); + self.data.hash(state); + } +} + +/// Root of an inlined item. We track the `DefPath` of the item within +/// the original crate but also its def-id. This is kind of an +/// augmented version of a `DefPath` that includes a `DefId`. This is +/// all sort of ugly but the hope is that inlined items will be going +/// away soon anyway. +/// +/// Some of the constraints that led to the current approach: +/// +/// - I don't want to have a `DefId` in the main `DefPath` because +/// that gets serialized for incr. comp., and when reloaded the +/// `DefId` is no longer valid. I'd rather maintain the invariant +/// that every `DefId` is valid, and a potentially outdated `DefId` is +/// represented as a `DefPath`. +/// - (We don't serialize def-paths from inlined items, so it's ok to have one here.) +/// - We need to be able to extract the def-id from inline items to +/// make the symbol name. In theory we could retrace it from the +/// data, but the metadata doesn't have the required indices, and I +/// don't want to write the code to create one just for this. +/// - It may be that we don't actually need `data` at all. We'll have +/// to see about that. +#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct InlinedRootPath { + pub data: Vec, + pub def_id: DefId, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub enum DefPathData { + // Root: these should only be used for the root nodes, because + // they are treated specially by the `def_path` function. + /// The crate root (marker) + CrateRoot, + /// An inlined root + InlinedRoot(Box), + + // Catch-all for random DefId things like DUMMY_NODE_ID + Misc, + + // Different kinds of items and item-like things: + /// An impl + Impl, + /// Something in the type NS + TypeNs(InternedString), + /// Something in the value NS + ValueNs(InternedString), + /// A module declaration + Module(InternedString), + /// A macro rule + MacroDef(InternedString), + /// A closure expression + ClosureExpr, + + // Subportions of items + /// A type parameter (generic parameter) + TypeParam(InternedString), + /// A lifetime definition + LifetimeDef(InternedString), + /// A variant of a enum + EnumVariant(InternedString), + /// A struct field + Field(InternedString), + /// Implicit ctor for a tuple-like struct + StructCtor, + /// Initializer for a const + Initializer, + /// Pattern binding + Binding(InternedString), + /// An `impl Trait` type node. + ImplTrait +} + +impl Definitions { + /// Create new empty definition map. + pub fn new() -> Definitions { + Definitions { + data: vec![], + key_map: FxHashMap(), + node_map: NodeMap(), + } + } + + /// Get the number of definitions. + pub fn len(&self) -> usize { + self.data.len() + } + + pub fn def_key(&self, index: DefIndex) -> DefKey { + self.data[index.as_usize()].key.clone() + } + + pub fn def_index_for_def_key(&self, key: DefKey) -> Option { + self.key_map.get(&key).cloned() + } + + /// Returns the path from the crate root to `index`. The root + /// nodes are not included in the path (i.e., this will be an + /// empty vector for the crate root). For an inlined item, this + /// will be the path of the item in the external crate (but the + /// path will begin with the path to the external crate). + pub fn def_path(&self, index: DefIndex) -> DefPath { + DefPath::make(LOCAL_CRATE, index, |p| self.def_key(p)) + } + + pub fn opt_def_index(&self, node: ast::NodeId) -> Option { + self.node_map.get(&node).cloned() + } + + pub fn opt_local_def_id(&self, node: ast::NodeId) -> Option { + self.opt_def_index(node).map(DefId::local) + } + + pub fn local_def_id(&self, node: ast::NodeId) -> DefId { + self.opt_local_def_id(node).unwrap() + } + + pub fn as_local_node_id(&self, def_id: DefId) -> Option { + if def_id.krate == LOCAL_CRATE { + assert!(def_id.index.as_usize() < self.data.len()); + Some(self.data[def_id.index.as_usize()].node_id) + } else { + None + } + } + + /// Add a definition with a parent definition. + pub fn create_def_with_parent(&mut self, + parent: Option, + node_id: ast::NodeId, + data: DefPathData) + -> DefIndex { + debug!("create_def_with_parent(parent={:?}, node_id={:?}, data={:?})", + parent, node_id, data); + + assert!(!self.node_map.contains_key(&node_id), + "adding a def'n for node-id {:?} and data {:?} but a previous def'n exists: {:?}", + node_id, + data, + self.data[self.node_map[&node_id].as_usize()]); + + assert!(parent.is_some() ^ match data { + DefPathData::CrateRoot | DefPathData::InlinedRoot(_) => true, + _ => false, + }); + + // Find a unique DefKey. This basically means incrementing the disambiguator + // until we get no match. + let mut key = DefKey { + parent: parent, + disambiguated_data: DisambiguatedDefPathData { + data: data, + disambiguator: 0 + } + }; + + while self.key_map.contains_key(&key) { + key.disambiguated_data.disambiguator += 1; + } + + debug!("create_def_with_parent: after disambiguation, key = {:?}", key); + + // Create the definition. + let index = DefIndex::new(self.data.len()); + self.data.push(DefData { key: key.clone(), node_id: node_id }); + debug!("create_def_with_parent: node_map[{:?}] = {:?}", node_id, index); + self.node_map.insert(node_id, index); + debug!("create_def_with_parent: key_map[{:?}] = {:?}", key, index); + self.key_map.insert(key, index); + + + index + } +} + +impl DefPathData { + pub fn get_opt_name(&self) -> Option { + use self::DefPathData::*; + match *self { + TypeNs(ref name) | + ValueNs(ref name) | + Module(ref name) | + MacroDef(ref name) | + TypeParam(ref name) | + LifetimeDef(ref name) | + EnumVariant(ref name) | + Binding(ref name) | + Field(ref name) => Some(Symbol::intern(name)), + + Impl | + CrateRoot | + InlinedRoot(_) | + Misc | + ClosureExpr | + StructCtor | + Initializer | + ImplTrait => None + } + } + + pub fn as_interned_str(&self) -> InternedString { + use self::DefPathData::*; + let s = match *self { + TypeNs(ref name) | + ValueNs(ref name) | + Module(ref name) | + MacroDef(ref name) | + TypeParam(ref name) | + LifetimeDef(ref name) | + EnumVariant(ref name) | + Binding(ref name) | + Field(ref name) => { + return name.clone(); + } + + // note that this does not show up in user printouts + CrateRoot => "{{root}}", + + // note that this does not show up in user printouts + InlinedRoot(_) => "{{inlined-root}}", + + Impl => "{{impl}}", + Misc => "{{?}}", + ClosureExpr => "{{closure}}", + StructCtor => "{{constructor}}", + Initializer => "{{initializer}}", + ImplTrait => "{{impl-Trait}}", + }; + + Symbol::intern(s).as_str() + } + + pub fn to_string(&self) -> String { + self.as_interned_str().to_string() + } +} + diff --git a/src/librustc/hir/map/mod.rs b/src/librustc/hir/map/mod.rs new file mode 100644 index 0000000000000..6ce6f6896df29 --- /dev/null +++ b/src/librustc/hir/map/mod.rs @@ -0,0 +1,1124 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub use self::Node::*; +use self::MapEntry::*; +use self::collector::NodeCollector; +pub use self::def_collector::{DefCollector, MacroInvocationData}; +pub use self::definitions::{Definitions, DefKey, DefPath, DefPathData, + DisambiguatedDefPathData, InlinedRootPath}; + +use dep_graph::{DepGraph, DepNode}; + +use middle::cstore::InlinedItem; +use hir::def_id::{CRATE_DEF_INDEX, DefId, DefIndex}; + +use syntax::abi::Abi; +use syntax::ast::{self, Name, NodeId, CRATE_NODE_ID}; +use syntax::codemap::Spanned; +use syntax_pos::Span; + +use hir::*; +use hir::print as pprust; + +use arena::TypedArena; +use std::cell::RefCell; +use std::io; +use std::mem; + +pub mod blocks; +mod collector; +mod def_collector; +pub mod definitions; + +#[derive(Copy, Clone, Debug)] +pub enum Node<'ast> { + NodeItem(&'ast Item), + NodeForeignItem(&'ast ForeignItem), + NodeTraitItem(&'ast TraitItem), + NodeImplItem(&'ast ImplItem), + NodeVariant(&'ast Variant), + NodeField(&'ast StructField), + NodeExpr(&'ast Expr), + NodeStmt(&'ast Stmt), + NodeTy(&'ast Ty), + NodeTraitRef(&'ast TraitRef), + NodeLocal(&'ast Pat), + NodePat(&'ast Pat), + NodeBlock(&'ast Block), + + /// NodeStructCtor represents a tuple struct. + NodeStructCtor(&'ast VariantData), + + NodeLifetime(&'ast Lifetime), + NodeTyParam(&'ast TyParam), + NodeVisibility(&'ast Visibility), + + NodeInlinedItem(&'ast InlinedItem), +} + +/// Represents an entry and its parent NodeID. +/// The odd layout is to bring down the total size. +#[derive(Copy, Debug)] +pub enum MapEntry<'ast> { + /// Placeholder for holes in the map. + NotPresent, + + /// All the node types, with a parent ID. + EntryItem(NodeId, &'ast Item), + EntryForeignItem(NodeId, &'ast ForeignItem), + EntryTraitItem(NodeId, &'ast TraitItem), + EntryImplItem(NodeId, &'ast ImplItem), + EntryVariant(NodeId, &'ast Variant), + EntryField(NodeId, &'ast StructField), + EntryExpr(NodeId, &'ast Expr), + EntryStmt(NodeId, &'ast Stmt), + EntryTy(NodeId, &'ast Ty), + EntryTraitRef(NodeId, &'ast TraitRef), + EntryLocal(NodeId, &'ast Pat), + EntryPat(NodeId, &'ast Pat), + EntryBlock(NodeId, &'ast Block), + EntryStructCtor(NodeId, &'ast VariantData), + EntryLifetime(NodeId, &'ast Lifetime), + EntryTyParam(NodeId, &'ast TyParam), + EntryVisibility(NodeId, &'ast Visibility), + + /// Roots for node trees. + RootCrate, + RootInlinedParent(&'ast InlinedItem) +} + +impl<'ast> Clone for MapEntry<'ast> { + fn clone(&self) -> MapEntry<'ast> { + *self + } +} + +impl<'ast> MapEntry<'ast> { + fn from_node(p: NodeId, node: Node<'ast>) -> MapEntry<'ast> { + match node { + NodeItem(n) => EntryItem(p, n), + NodeForeignItem(n) => EntryForeignItem(p, n), + NodeTraitItem(n) => EntryTraitItem(p, n), + NodeImplItem(n) => EntryImplItem(p, n), + NodeVariant(n) => EntryVariant(p, n), + NodeField(n) => EntryField(p, n), + NodeExpr(n) => EntryExpr(p, n), + NodeStmt(n) => EntryStmt(p, n), + NodeTy(n) => EntryTy(p, n), + NodeTraitRef(n) => EntryTraitRef(p, n), + NodeLocal(n) => EntryLocal(p, n), + NodePat(n) => EntryPat(p, n), + NodeBlock(n) => EntryBlock(p, n), + NodeStructCtor(n) => EntryStructCtor(p, n), + NodeLifetime(n) => EntryLifetime(p, n), + NodeTyParam(n) => EntryTyParam(p, n), + NodeVisibility(n) => EntryVisibility(p, n), + + NodeInlinedItem(n) => RootInlinedParent(n), + } + } + + fn parent_node(self) -> Option { + Some(match self { + EntryItem(id, _) => id, + EntryForeignItem(id, _) => id, + EntryTraitItem(id, _) => id, + EntryImplItem(id, _) => id, + EntryVariant(id, _) => id, + EntryField(id, _) => id, + EntryExpr(id, _) => id, + EntryStmt(id, _) => id, + EntryTy(id, _) => id, + EntryTraitRef(id, _) => id, + EntryLocal(id, _) => id, + EntryPat(id, _) => id, + EntryBlock(id, _) => id, + EntryStructCtor(id, _) => id, + EntryLifetime(id, _) => id, + EntryTyParam(id, _) => id, + EntryVisibility(id, _) => id, + + NotPresent | + RootCrate | + RootInlinedParent(_) => return None, + }) + } + + fn to_node(self) -> Option> { + Some(match self { + EntryItem(_, n) => NodeItem(n), + EntryForeignItem(_, n) => NodeForeignItem(n), + EntryTraitItem(_, n) => NodeTraitItem(n), + EntryImplItem(_, n) => NodeImplItem(n), + EntryVariant(_, n) => NodeVariant(n), + EntryField(_, n) => NodeField(n), + EntryExpr(_, n) => NodeExpr(n), + EntryStmt(_, n) => NodeStmt(n), + EntryTy(_, n) => NodeTy(n), + EntryTraitRef(_, n) => NodeTraitRef(n), + EntryLocal(_, n) => NodeLocal(n), + EntryPat(_, n) => NodePat(n), + EntryBlock(_, n) => NodeBlock(n), + EntryStructCtor(_, n) => NodeStructCtor(n), + EntryLifetime(_, n) => NodeLifetime(n), + EntryTyParam(_, n) => NodeTyParam(n), + EntryVisibility(_, n) => NodeVisibility(n), + RootInlinedParent(n) => NodeInlinedItem(n), + _ => return None + }) + } +} + +/// Stores a crate and any number of inlined items from other crates. +pub struct Forest { + krate: Crate, + pub dep_graph: DepGraph, + inlined_items: TypedArena +} + +impl Forest { + pub fn new(krate: Crate, dep_graph: &DepGraph) -> Forest { + Forest { + krate: krate, + dep_graph: dep_graph.clone(), + inlined_items: TypedArena::new() + } + } + + pub fn krate<'ast>(&'ast self) -> &'ast Crate { + self.dep_graph.read(DepNode::Krate); + &self.krate + } +} + +/// Represents a mapping from Node IDs to AST elements and their parent +/// Node IDs +#[derive(Clone)] +pub struct Map<'ast> { + /// The backing storage for all the AST nodes. + pub forest: &'ast Forest, + + /// Same as the dep_graph in forest, just available with one fewer + /// deref. This is a gratuitious micro-optimization. + pub dep_graph: DepGraph, + + /// NodeIds are sequential integers from 0, so we can be + /// super-compact by storing them in a vector. Not everything with + /// a NodeId is in the map, but empirically the occupancy is about + /// 75-80%, so there's not too much overhead (certainly less than + /// a hashmap, since they (at the time of writing) have a maximum + /// of 75% occupancy). + /// + /// Also, indexing is pretty quick when you've got a vector and + /// plain old integers. + map: RefCell>>, + + definitions: RefCell, + + /// All NodeIds that are numerically greater or equal to this value come + /// from inlined items. + local_node_id_watermark: NodeId, + + /// All def-indices that are numerically greater or equal to this value come + /// from inlined items. + local_def_id_watermark: usize, +} + +impl<'ast> Map<'ast> { + pub fn is_inlined_def_id(&self, id: DefId) -> bool { + id.is_local() && id.index.as_usize() >= self.local_def_id_watermark + } + + pub fn is_inlined_node_id(&self, id: NodeId) -> bool { + id >= self.local_node_id_watermark + } + + /// Registers a read in the dependency graph of the AST node with + /// the given `id`. This needs to be called each time a public + /// function returns the HIR for a node -- in other words, when it + /// "reveals" the content of a node to the caller (who might not + /// otherwise have had access to those contents, and hence needs a + /// read recorded). If the function just returns a DefId or + /// NodeId, no actual content was returned, so no read is needed. + pub fn read(&self, id: NodeId) { + self.dep_graph.read(self.dep_node(id)); + } + + fn dep_node(&self, id0: NodeId) -> DepNode { + let map = self.map.borrow(); + let mut id = id0; + if !self.is_inlined_node_id(id) { + let mut last_expr = None; + loop { + match map[id.as_usize()] { + EntryItem(_, item) => { + assert_eq!(id, item.id); + let def_id = self.local_def_id(id); + assert!(!self.is_inlined_def_id(def_id)); + + if let Some(last_id) = last_expr { + // The body of the item may have a separate dep node + // (Note that trait items don't currently have + // their own dep node, so there's also just one + // HirBody node for all the items) + if self.is_body(last_id, item) { + return DepNode::HirBody(def_id); + } + } + return DepNode::Hir(def_id); + } + + EntryImplItem(_, item) => { + let def_id = self.local_def_id(id); + assert!(!self.is_inlined_def_id(def_id)); + + if let Some(last_id) = last_expr { + // The body of the item may have a separate dep node + if self.is_impl_item_body(last_id, item) { + return DepNode::HirBody(def_id); + } + } + return DepNode::Hir(def_id); + } + + EntryForeignItem(p, _) | + EntryTraitItem(p, _) | + EntryVariant(p, _) | + EntryField(p, _) | + EntryStmt(p, _) | + EntryTy(p, _) | + EntryTraitRef(p, _) | + EntryLocal(p, _) | + EntryPat(p, _) | + EntryBlock(p, _) | + EntryStructCtor(p, _) | + EntryLifetime(p, _) | + EntryTyParam(p, _) | + EntryVisibility(p, _) => + id = p, + + EntryExpr(p, _) => { + last_expr = Some(id); + id = p; + } + + RootCrate => + return DepNode::Krate, + + RootInlinedParent(_) => + bug!("node {} has inlined ancestor but is not inlined", id0), + + NotPresent => + // Some nodes, notably struct fields, are not + // present in the map for whatever reason, but + // they *do* have def-ids. So if we encounter an + // empty hole, check for that case. + return self.opt_local_def_id(id) + .map(|def_id| DepNode::Hir(def_id)) + .unwrap_or_else(|| { + bug!("Walking parents from `{}` \ + led to `NotPresent` at `{}`", + id0, id) + }), + } + } + } else { + // reading from an inlined def-id is really a read out of + // the metadata from which we loaded the item. + loop { + match map[id.as_usize()] { + EntryItem(p, _) | + EntryForeignItem(p, _) | + EntryTraitItem(p, _) | + EntryImplItem(p, _) | + EntryVariant(p, _) | + EntryField(p, _) | + EntryExpr(p, _) | + EntryStmt(p, _) | + EntryTy(p, _) | + EntryTraitRef(p, _) | + EntryLocal(p, _) | + EntryPat(p, _) | + EntryBlock(p, _) | + EntryStructCtor(p, _) | + EntryLifetime(p, _) | + EntryTyParam(p, _) | + EntryVisibility(p, _) => + id = p, + + RootInlinedParent(parent) => + return DepNode::MetaData(parent.def_id), + + RootCrate => + bug!("node {} has crate ancestor but is inlined", id0), + + NotPresent => + bug!("node {} is inlined but not present in map", id0), + } + } + } + } + + fn is_body(&self, node_id: NodeId, item: &Item) -> bool { + match item.node { + ItemFn(_, _, _, _, _, body) => body.node_id() == node_id, + // Since trait items currently don't get their own dep nodes, + // we check here whether node_id is the body of any of the items. + // If they get their own dep nodes, this can go away + ItemTrait(_, _, _, ref trait_items) => { + trait_items.iter().any(|trait_item| { match trait_item.node { + MethodTraitItem(_, Some(body)) => body.node_id() == node_id, + _ => false + }}) + } + _ => false + } + } + + fn is_impl_item_body(&self, node_id: NodeId, item: &ImplItem) -> bool { + match item.node { + ImplItemKind::Method(_, body) => body.node_id() == node_id, + _ => false + } + } + + pub fn num_local_def_ids(&self) -> usize { + self.definitions.borrow().len() + } + + pub fn def_key(&self, def_id: DefId) -> DefKey { + assert!(def_id.is_local()); + self.definitions.borrow().def_key(def_id.index) + } + + pub fn def_path_from_id(&self, id: NodeId) -> Option { + self.opt_local_def_id(id).map(|def_id| { + self.def_path(def_id) + }) + } + + pub fn def_path(&self, def_id: DefId) -> DefPath { + assert!(def_id.is_local()); + self.definitions.borrow().def_path(def_id.index) + } + + pub fn def_index_for_def_key(&self, def_key: DefKey) -> Option { + self.definitions.borrow().def_index_for_def_key(def_key) + } + + pub fn local_def_id(&self, node: NodeId) -> DefId { + self.opt_local_def_id(node).unwrap_or_else(|| { + bug!("local_def_id: no entry for `{}`, which has a map of `{:?}`", + node, self.find_entry(node)) + }) + } + + pub fn opt_local_def_id(&self, node: NodeId) -> Option { + self.definitions.borrow().opt_local_def_id(node) + } + + pub fn as_local_node_id(&self, def_id: DefId) -> Option { + self.definitions.borrow().as_local_node_id(def_id) + } + + fn entry_count(&self) -> usize { + self.map.borrow().len() + } + + fn find_entry(&self, id: NodeId) -> Option> { + self.map.borrow().get(id.as_usize()).cloned() + } + + pub fn krate(&self) -> &'ast Crate { + self.forest.krate() + } + + pub fn impl_item(&self, id: ImplItemId) -> &'ast ImplItem { + self.read(id.node_id); + + // NB: intentionally bypass `self.forest.krate()` so that we + // do not trigger a read of the whole krate here + self.forest.krate.impl_item(id) + } + + /// Get the attributes on the krate. This is preferable to + /// invoking `krate.attrs` because it registers a tighter + /// dep-graph access. + pub fn krate_attrs(&self) -> &'ast [ast::Attribute] { + let crate_root_def_id = DefId::local(CRATE_DEF_INDEX); + self.dep_graph.read(DepNode::Hir(crate_root_def_id)); + &self.forest.krate.attrs + } + + /// Retrieve the Node corresponding to `id`, panicking if it cannot + /// be found. + pub fn get(&self, id: NodeId) -> Node<'ast> { + match self.find(id) { + Some(node) => node, // read recorded by `find` + None => bug!("couldn't find node id {} in the AST map", id) + } + } + + pub fn get_if_local(&self, id: DefId) -> Option> { + self.as_local_node_id(id).map(|id| self.get(id)) // read recorded by `get` + } + + /// Retrieve the Node corresponding to `id`, returning None if + /// cannot be found. + pub fn find(&self, id: NodeId) -> Option> { + let result = self.find_entry(id).and_then(|x| x.to_node()); + if result.is_some() { + self.read(id); + } + result + } + + /// Similar to get_parent, returns the parent node id or id if there is no + /// parent. + /// This function returns the immediate parent in the AST, whereas get_parent + /// returns the enclosing item. Note that this might not be the actual parent + /// node in the AST - some kinds of nodes are not in the map and these will + /// never appear as the parent_node. So you can always walk the parent_nodes + /// from a node to the root of the ast (unless you get the same id back here + /// that can happen if the id is not in the map itself or is just weird). + pub fn get_parent_node(&self, id: NodeId) -> NodeId { + self.find_entry(id).and_then(|x| x.parent_node()).unwrap_or(id) + } + + /// Check if the node is an argument. An argument is a local variable whose + /// immediate parent is an item or a closure. + pub fn is_argument(&self, id: NodeId) -> bool { + match self.find(id) { + Some(NodeLocal(_)) => (), + _ => return false, + } + match self.find(self.get_parent_node(id)) { + Some(NodeItem(_)) | + Some(NodeTraitItem(_)) | + Some(NodeImplItem(_)) => true, + Some(NodeExpr(e)) => { + match e.node { + ExprClosure(..) => true, + _ => false, + } + } + _ => false, + } + } + + /// If there is some error when walking the parents (e.g., a node does not + /// have a parent in the map or a node can't be found), then we return the + /// last good node id we found. Note that reaching the crate root (id == 0), + /// is not an error, since items in the crate module have the crate root as + /// parent. + fn walk_parent_nodes(&self, start_id: NodeId, found: F) -> Result + where F: Fn(&Node<'ast>) -> bool + { + let mut id = start_id; + loop { + let parent_node = self.get_parent_node(id); + if parent_node == CRATE_NODE_ID { + return Ok(CRATE_NODE_ID); + } + if parent_node == id { + return Err(id); + } + + let node = self.find_entry(parent_node); + if node.is_none() { + return Err(id); + } + let node = node.unwrap().to_node(); + match node { + Some(ref node) => { + if found(node) { + return Ok(parent_node); + } + } + None => { + return Err(parent_node); + } + } + id = parent_node; + } + } + + /// Retrieve the NodeId for `id`'s parent item, or `id` itself if no + /// parent item is in this map. The "parent item" is the closest parent node + /// in the AST which is recorded by the map and is an item, either an item + /// in a module, trait, or impl. + pub fn get_parent(&self, id: NodeId) -> NodeId { + match self.walk_parent_nodes(id, |node| match *node { + NodeItem(_) | + NodeForeignItem(_) | + NodeTraitItem(_) | + NodeImplItem(_) => true, + _ => false, + }) { + Ok(id) => id, + Err(id) => id, + } + } + + /// Returns the NodeId of `id`'s nearest module parent, or `id` itself if no + /// module parent is in this map. + pub fn get_module_parent(&self, id: NodeId) -> NodeId { + match self.walk_parent_nodes(id, |node| match *node { + NodeItem(&Item { node: Item_::ItemMod(_), .. }) => true, + _ => false, + }) { + Ok(id) => id, + Err(id) => id, + } + } + + /// Returns the nearest enclosing scope. A scope is an item or block. + /// FIXME it is not clear to me that all items qualify as scopes - statics + /// and associated types probably shouldn't, for example. Behaviour in this + /// regard should be expected to be highly unstable. + pub fn get_enclosing_scope(&self, id: NodeId) -> Option { + match self.walk_parent_nodes(id, |node| match *node { + NodeItem(_) | + NodeForeignItem(_) | + NodeTraitItem(_) | + NodeImplItem(_) | + NodeBlock(_) => true, + _ => false, + }) { + Ok(id) => Some(id), + Err(_) => None, + } + } + + pub fn get_parent_did(&self, id: NodeId) -> DefId { + let parent = self.get_parent(id); + match self.find_entry(parent) { + Some(RootInlinedParent(ii)) => ii.def_id, + _ => self.local_def_id(parent) + } + } + + pub fn get_foreign_abi(&self, id: NodeId) -> Abi { + let parent = self.get_parent(id); + let abi = match self.find_entry(parent) { + Some(EntryItem(_, i)) => { + match i.node { + ItemForeignMod(ref nm) => Some(nm.abi), + _ => None + } + } + /// Wrong but OK, because the only inlined foreign items are intrinsics. + Some(RootInlinedParent(_)) => Some(Abi::RustIntrinsic), + _ => None + }; + match abi { + Some(abi) => { + self.read(id); // reveals some of the content of a node + abi + } + None => bug!("expected foreign mod or inlined parent, found {}", + self.node_to_string(parent)) + } + } + + pub fn expect_item(&self, id: NodeId) -> &'ast Item { + match self.find(id) { // read recorded by `find` + Some(NodeItem(item)) => item, + _ => bug!("expected item, found {}", self.node_to_string(id)) + } + } + + pub fn expect_impl_item(&self, id: NodeId) -> &'ast ImplItem { + match self.find(id) { + Some(NodeImplItem(item)) => item, + _ => bug!("expected impl item, found {}", self.node_to_string(id)) + } + } + + pub fn expect_trait_item(&self, id: NodeId) -> &'ast TraitItem { + match self.find(id) { + Some(NodeTraitItem(item)) => item, + _ => bug!("expected trait item, found {}", self.node_to_string(id)) + } + } + + pub fn expect_variant_data(&self, id: NodeId) -> &'ast VariantData { + match self.find(id) { + Some(NodeItem(i)) => { + match i.node { + ItemStruct(ref struct_def, _) | + ItemUnion(ref struct_def, _) => struct_def, + _ => { + bug!("struct ID bound to non-struct {}", + self.node_to_string(id)); + } + } + } + Some(NodeStructCtor(data)) => data, + Some(NodeVariant(variant)) => &variant.node.data, + _ => { + bug!("expected struct or variant, found {}", + self.node_to_string(id)); + } + } + } + + pub fn expect_variant(&self, id: NodeId) -> &'ast Variant { + match self.find(id) { + Some(NodeVariant(variant)) => variant, + _ => bug!("expected variant, found {}", self.node_to_string(id)), + } + } + + pub fn expect_foreign_item(&self, id: NodeId) -> &'ast ForeignItem { + match self.find(id) { + Some(NodeForeignItem(item)) => item, + _ => bug!("expected foreign item, found {}", self.node_to_string(id)) + } + } + + pub fn expect_expr(&self, id: NodeId) -> &'ast Expr { + match self.find(id) { // read recorded by find + Some(NodeExpr(expr)) => expr, + _ => bug!("expected expr, found {}", self.node_to_string(id)) + } + } + + pub fn expect_inlined_item(&self, id: NodeId) -> &'ast InlinedItem { + match self.find_entry(id) { + Some(RootInlinedParent(inlined_item)) => inlined_item, + _ => bug!("expected inlined item, found {}", self.node_to_string(id)), + } + } + + pub fn expr(&self, id: ExprId) -> &'ast Expr { + self.expect_expr(id.node_id()) + } + + /// Returns the name associated with the given NodeId's AST. + pub fn name(&self, id: NodeId) -> Name { + match self.get(id) { + NodeItem(i) => i.name, + NodeForeignItem(i) => i.name, + NodeImplItem(ii) => ii.name, + NodeTraitItem(ti) => ti.name, + NodeVariant(v) => v.node.name, + NodeField(f) => f.name, + NodeLifetime(lt) => lt.name, + NodeTyParam(tp) => tp.name, + NodeLocal(&Pat { node: PatKind::Binding(_,_,l,_), .. }) => l.node, + NodeStructCtor(_) => self.name(self.get_parent(id)), + _ => bug!("no name for {}", self.node_to_string(id)) + } + } + + /// Given a node ID, get a list of attributes associated with the AST + /// corresponding to the Node ID + pub fn attrs(&self, id: NodeId) -> &'ast [ast::Attribute] { + self.read(id); // reveals attributes on the node + let attrs = match self.find(id) { + Some(NodeItem(i)) => Some(&i.attrs[..]), + Some(NodeForeignItem(fi)) => Some(&fi.attrs[..]), + Some(NodeTraitItem(ref ti)) => Some(&ti.attrs[..]), + Some(NodeImplItem(ref ii)) => Some(&ii.attrs[..]), + Some(NodeVariant(ref v)) => Some(&v.node.attrs[..]), + Some(NodeField(ref f)) => Some(&f.attrs[..]), + Some(NodeExpr(ref e)) => Some(&*e.attrs), + Some(NodeStmt(ref s)) => Some(s.node.attrs()), + // unit/tuple structs take the attributes straight from + // the struct definition. + Some(NodeStructCtor(_)) => { + return self.attrs(self.get_parent(id)); + } + _ => None + }; + attrs.unwrap_or(&[]) + } + + /// Returns an iterator that yields the node id's with paths that + /// match `parts`. (Requires `parts` is non-empty.) + /// + /// For example, if given `parts` equal to `["bar", "quux"]`, then + /// the iterator will produce node id's for items with paths + /// such as `foo::bar::quux`, `bar::quux`, `other::bar::quux`, and + /// any other such items it can find in the map. + pub fn nodes_matching_suffix<'a>(&'a self, parts: &'a [String]) + -> NodesMatchingSuffix<'a, 'ast> { + NodesMatchingSuffix { + map: self, + item_name: parts.last().unwrap(), + in_which: &parts[..parts.len() - 1], + idx: CRATE_NODE_ID, + } + } + + pub fn span(&self, id: NodeId) -> Span { + self.read(id); // reveals span from node + match self.find_entry(id) { + Some(EntryItem(_, item)) => item.span, + Some(EntryForeignItem(_, foreign_item)) => foreign_item.span, + Some(EntryTraitItem(_, trait_method)) => trait_method.span, + Some(EntryImplItem(_, impl_item)) => impl_item.span, + Some(EntryVariant(_, variant)) => variant.span, + Some(EntryField(_, field)) => field.span, + Some(EntryExpr(_, expr)) => expr.span, + Some(EntryStmt(_, stmt)) => stmt.span, + Some(EntryTy(_, ty)) => ty.span, + Some(EntryTraitRef(_, tr)) => tr.path.span, + Some(EntryLocal(_, pat)) => pat.span, + Some(EntryPat(_, pat)) => pat.span, + Some(EntryBlock(_, block)) => block.span, + Some(EntryStructCtor(_, _)) => self.expect_item(self.get_parent(id)).span, + Some(EntryLifetime(_, lifetime)) => lifetime.span, + Some(EntryTyParam(_, ty_param)) => ty_param.span, + Some(EntryVisibility(_, &Visibility::Restricted { ref path, .. })) => path.span, + Some(EntryVisibility(_, v)) => bug!("unexpected Visibility {:?}", v), + + Some(RootCrate) => self.krate().span, + Some(RootInlinedParent(parent)) => parent.body.span, + Some(NotPresent) | None => { + bug!("hir::map::Map::span: id not in map: {:?}", id) + } + } + } + + pub fn span_if_local(&self, id: DefId) -> Option { + self.as_local_node_id(id).map(|id| self.span(id)) + } + + pub fn node_to_string(&self, id: NodeId) -> String { + node_id_to_string(self, id, true) + } + + pub fn node_to_user_string(&self, id: NodeId) -> String { + node_id_to_string(self, id, false) + } +} + +pub struct NodesMatchingSuffix<'a, 'ast:'a> { + map: &'a Map<'ast>, + item_name: &'a String, + in_which: &'a [String], + idx: NodeId, +} + +impl<'a, 'ast> NodesMatchingSuffix<'a, 'ast> { + /// Returns true only if some suffix of the module path for parent + /// matches `self.in_which`. + /// + /// In other words: let `[x_0,x_1,...,x_k]` be `self.in_which`; + /// returns true if parent's path ends with the suffix + /// `x_0::x_1::...::x_k`. + fn suffix_matches(&self, parent: NodeId) -> bool { + let mut cursor = parent; + for part in self.in_which.iter().rev() { + let (mod_id, mod_name) = match find_first_mod_parent(self.map, cursor) { + None => return false, + Some((node_id, name)) => (node_id, name), + }; + if mod_name != &**part { + return false; + } + cursor = self.map.get_parent(mod_id); + } + return true; + + // Finds the first mod in parent chain for `id`, along with + // that mod's name. + // + // If `id` itself is a mod named `m` with parent `p`, then + // returns `Some(id, m, p)`. If `id` has no mod in its parent + // chain, then returns `None`. + fn find_first_mod_parent<'a>(map: &'a Map, mut id: NodeId) -> Option<(NodeId, Name)> { + loop { + match map.find(id) { + None => return None, + Some(NodeItem(item)) if item_is_mod(&item) => + return Some((id, item.name)), + _ => {} + } + let parent = map.get_parent(id); + if parent == id { return None } + id = parent; + } + + fn item_is_mod(item: &Item) -> bool { + match item.node { + ItemMod(_) => true, + _ => false, + } + } + } + } + + // We are looking at some node `n` with a given name and parent + // id; do their names match what I am seeking? + fn matches_names(&self, parent_of_n: NodeId, name: Name) -> bool { + name == &**self.item_name && self.suffix_matches(parent_of_n) + } +} + +impl<'a, 'ast> Iterator for NodesMatchingSuffix<'a, 'ast> { + type Item = NodeId; + + fn next(&mut self) -> Option { + loop { + let idx = self.idx; + if idx.as_usize() >= self.map.entry_count() { + return None; + } + self.idx = NodeId::from_u32(self.idx.as_u32() + 1); + let name = match self.map.find_entry(idx) { + Some(EntryItem(_, n)) => n.name(), + Some(EntryForeignItem(_, n))=> n.name(), + Some(EntryTraitItem(_, n)) => n.name(), + Some(EntryImplItem(_, n)) => n.name(), + Some(EntryVariant(_, n)) => n.name(), + Some(EntryField(_, n)) => n.name(), + _ => continue, + }; + if self.matches_names(self.map.get_parent(idx), name) { + return Some(idx) + } + } + } +} + +trait Named { + fn name(&self) -> Name; +} + +impl Named for Spanned { fn name(&self) -> Name { self.node.name() } } + +impl Named for Item { fn name(&self) -> Name { self.name } } +impl Named for ForeignItem { fn name(&self) -> Name { self.name } } +impl Named for Variant_ { fn name(&self) -> Name { self.name } } +impl Named for StructField { fn name(&self) -> Name { self.name } } +impl Named for TraitItem { fn name(&self) -> Name { self.name } } +impl Named for ImplItem { fn name(&self) -> Name { self.name } } + +pub fn map_crate<'ast>(forest: &'ast mut Forest, + definitions: Definitions) + -> Map<'ast> { + let mut collector = NodeCollector::root(&forest.krate); + intravisit::walk_crate(&mut collector, &forest.krate); + let map = collector.map; + + if log_enabled!(::log::DEBUG) { + // This only makes sense for ordered stores; note the + // enumerate to count the number of entries. + let (entries_less_1, _) = map.iter().filter(|&x| { + match *x { + NotPresent => false, + _ => true + } + }).enumerate().last().expect("AST map was empty after folding?"); + + let entries = entries_less_1 + 1; + let vector_length = map.len(); + debug!("The AST map has {} entries with a maximum of {}: occupancy {:.1}%", + entries, vector_length, (entries as f64 / vector_length as f64) * 100.); + } + + let local_node_id_watermark = NodeId::new(map.len()); + let local_def_id_watermark = definitions.len(); + + Map { + forest: forest, + dep_graph: forest.dep_graph.clone(), + map: RefCell::new(map), + definitions: RefCell::new(definitions), + local_node_id_watermark: local_node_id_watermark, + local_def_id_watermark: local_def_id_watermark, + } +} + +/// Used for items loaded from external crate that are being inlined into this +/// crate. +pub fn map_decoded_item<'ast>(map: &Map<'ast>, + parent_def_path: DefPath, + parent_def_id: DefId, + ii: InlinedItem, + ii_parent_id: NodeId) + -> &'ast InlinedItem { + let _ignore = map.forest.dep_graph.in_ignore(); + + let ii = map.forest.inlined_items.alloc(ii); + + let defs = &mut *map.definitions.borrow_mut(); + let mut def_collector = DefCollector::extend(ii_parent_id, + parent_def_path.clone(), + parent_def_id, + defs); + def_collector.walk_item(ii, map.krate()); + + let mut collector = NodeCollector::extend(map.krate(), + ii, + ii_parent_id, + parent_def_path, + parent_def_id, + mem::replace(&mut *map.map.borrow_mut(), vec![])); + ii.visit(&mut collector); + *map.map.borrow_mut() = collector.map; + + ii +} + +pub trait NodePrinter { + fn print_node(&mut self, node: &Node) -> io::Result<()>; +} + +impl<'a> NodePrinter for pprust::State<'a> { + fn print_node(&mut self, node: &Node) -> io::Result<()> { + match *node { + NodeItem(a) => self.print_item(&a), + NodeForeignItem(a) => self.print_foreign_item(&a), + NodeTraitItem(a) => self.print_trait_item(a), + NodeImplItem(a) => self.print_impl_item(a), + NodeVariant(a) => self.print_variant(&a), + NodeExpr(a) => self.print_expr(&a), + NodeStmt(a) => self.print_stmt(&a), + NodeTy(a) => self.print_type(&a), + NodeTraitRef(a) => self.print_trait_ref(&a), + NodePat(a) => self.print_pat(&a), + NodeBlock(a) => self.print_block(&a), + NodeLifetime(a) => self.print_lifetime(&a), + NodeVisibility(a) => self.print_visibility(&a), + NodeTyParam(_) => bug!("cannot print TyParam"), + NodeField(_) => bug!("cannot print StructField"), + // these cases do not carry enough information in the + // ast_map to reconstruct their full structure for pretty + // printing. + NodeLocal(_) => bug!("cannot print isolated Local"), + NodeStructCtor(_) => bug!("cannot print isolated StructCtor"), + + NodeInlinedItem(_) => bug!("cannot print inlined item"), + } + } +} + +fn node_id_to_string(map: &Map, id: NodeId, include_id: bool) -> String { + let id_str = format!(" (id={})", id); + let id_str = if include_id { &id_str[..] } else { "" }; + + let path_str = || { + // This functionality is used for debugging, try to use TyCtxt to get + // the user-friendly path, otherwise fall back to stringifying DefPath. + ::ty::tls::with_opt(|tcx| { + if let Some(tcx) = tcx { + tcx.node_path_str(id) + } else if let Some(path) = map.def_path_from_id(id) { + path.data.into_iter().map(|elem| { + elem.data.to_string() + }).collect::>().join("::") + } else { + String::from("") + } + }) + }; + + match map.find(id) { + Some(NodeItem(item)) => { + let item_str = match item.node { + ItemExternCrate(..) => "extern crate", + ItemUse(..) => "use", + ItemStatic(..) => "static", + ItemConst(..) => "const", + ItemFn(..) => "fn", + ItemMod(..) => "mod", + ItemForeignMod(..) => "foreign mod", + ItemTy(..) => "ty", + ItemEnum(..) => "enum", + ItemStruct(..) => "struct", + ItemUnion(..) => "union", + ItemTrait(..) => "trait", + ItemImpl(..) => "impl", + ItemDefaultImpl(..) => "default impl", + }; + format!("{} {}{}", item_str, path_str(), id_str) + } + Some(NodeForeignItem(_)) => { + format!("foreign item {}{}", path_str(), id_str) + } + Some(NodeImplItem(ii)) => { + match ii.node { + ImplItemKind::Const(..) => { + format!("assoc const {} in {}{}", ii.name, path_str(), id_str) + } + ImplItemKind::Method(..) => { + format!("method {} in {}{}", ii.name, path_str(), id_str) + } + ImplItemKind::Type(_) => { + format!("assoc type {} in {}{}", ii.name, path_str(), id_str) + } + } + } + Some(NodeTraitItem(ti)) => { + let kind = match ti.node { + ConstTraitItem(..) => "assoc constant", + MethodTraitItem(..) => "trait method", + TypeTraitItem(..) => "assoc type", + }; + + format!("{} {} in {}{}", kind, ti.name, path_str(), id_str) + } + Some(NodeVariant(ref variant)) => { + format!("variant {} in {}{}", + variant.node.name, + path_str(), id_str) + } + Some(NodeField(ref field)) => { + format!("field {} in {}{}", + field.name, + path_str(), id_str) + } + Some(NodeExpr(ref expr)) => { + format!("expr {}{}", pprust::expr_to_string(&expr), id_str) + } + Some(NodeStmt(ref stmt)) => { + format!("stmt {}{}", pprust::stmt_to_string(&stmt), id_str) + } + Some(NodeTy(ref ty)) => { + format!("type {}{}", pprust::ty_to_string(&ty), id_str) + } + Some(NodeTraitRef(ref tr)) => { + format!("trait_ref {}{}", pprust::path_to_string(&tr.path), id_str) + } + Some(NodeLocal(ref pat)) => { + format!("local {}{}", pprust::pat_to_string(&pat), id_str) + } + Some(NodePat(ref pat)) => { + format!("pat {}{}", pprust::pat_to_string(&pat), id_str) + } + Some(NodeBlock(ref block)) => { + format!("block {}{}", pprust::block_to_string(&block), id_str) + } + Some(NodeStructCtor(_)) => { + format!("struct_ctor {}{}", path_str(), id_str) + } + Some(NodeLifetime(ref l)) => { + format!("lifetime {}{}", + pprust::lifetime_to_string(&l), id_str) + } + Some(NodeTyParam(ref ty_param)) => { + format!("typaram {:?}{}", ty_param, id_str) + } + Some(NodeVisibility(ref vis)) => { + format!("visibility {:?}{}", vis, id_str) + } + Some(NodeInlinedItem(_)) => { + format!("inlined item {}", id_str) + } + None => { + format!("unknown node{}", id_str) + } + } +} diff --git a/src/librustc/hir/mod.rs b/src/librustc/hir/mod.rs new file mode 100644 index 0000000000000..4fd8f96ba046a --- /dev/null +++ b/src/librustc/hir/mod.rs @@ -0,0 +1,1697 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// The Rust HIR. + +pub use self::BindingMode::*; +pub use self::BinOp_::*; +pub use self::BlockCheckMode::*; +pub use self::CaptureClause::*; +pub use self::Decl_::*; +pub use self::Expr_::*; +pub use self::FunctionRetTy::*; +pub use self::ForeignItem_::*; +pub use self::Item_::*; +pub use self::Mutability::*; +pub use self::PrimTy::*; +pub use self::Stmt_::*; +pub use self::TraitItem_::*; +pub use self::Ty_::*; +pub use self::TyParamBound::*; +pub use self::UnOp::*; +pub use self::UnsafeSource::*; +pub use self::Visibility::{Public, Inherited}; +pub use self::PathParameters::*; + +use hir::def::Def; +use hir::def_id::DefId; +use util::nodemap::{NodeMap, FxHashSet}; +use rustc_data_structures::fnv::FnvHashMap; + +use syntax_pos::{mk_sp, Span, ExpnId, DUMMY_SP}; +use syntax::codemap::{self, respan, Spanned}; +use syntax::abi::Abi; +use syntax::ast::{Name, NodeId, DUMMY_NODE_ID, AsmDialect}; +use syntax::ast::{Attribute, Lit, StrStyle, FloatTy, IntTy, UintTy, MetaItem}; +use syntax::ptr::P; +use syntax::symbol::{Symbol, keywords}; +use syntax::tokenstream::TokenTree; +use syntax::util::ThinVec; + +use std::collections::BTreeMap; +use std::fmt; + +/// HIR doesn't commit to a concrete storage type and have its own alias for a vector. +/// It can be `Vec`, `P<[T]>` or potentially `Box<[T]>`, or some other container with similar +/// behavior. Unlike AST, HIR is mostly a static structure, so we can use an owned slice instead +/// of `Vec` to avoid keeping extra capacity. +pub type HirVec = P<[T]>; + +macro_rules! hir_vec { + ($elem:expr; $n:expr) => ( + $crate::hir::HirVec::from(vec![$elem; $n]) + ); + ($($x:expr),*) => ( + $crate::hir::HirVec::from(vec![$($x),*]) + ); + ($($x:expr,)*) => (hir_vec![$($x),*]) +} + +pub mod check_attr; +pub mod def; +pub mod def_id; +pub mod intravisit; +pub mod itemlikevisit; +pub mod lowering; +pub mod map; +pub mod pat_util; +pub mod print; +pub mod svh; + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy)] +pub struct Lifetime { + pub id: NodeId, + pub span: Span, + pub name: Name, +} + +impl fmt::Debug for Lifetime { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, + "lifetime({}: {})", + self.id, + print::lifetime_to_string(self)) + } +} + +/// A lifetime definition, eg `'a: 'b+'c+'d` +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct LifetimeDef { + pub lifetime: Lifetime, + pub bounds: HirVec, + pub pure_wrt_drop: bool, +} + +/// A "Path" is essentially Rust's notion of a name; for instance: +/// std::cmp::PartialEq . It's represented as a sequence of identifiers, +/// along with a bunch of supporting information. +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)] +pub struct Path { + pub span: Span, + /// A `::foo` path, is relative to the crate root rather than current + /// module (like paths in an import). + pub global: bool, + /// The definition that the path resolved to. + pub def: Def, + /// The segments in the path: the things separated by `::`. + pub segments: HirVec, +} + +impl fmt::Debug for Path { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "path({})", print::path_to_string(self)) + } +} + +impl fmt::Display for Path { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", print::path_to_string(self)) + } +} + +/// A segment of a path: an identifier, an optional lifetime, and a set of +/// types. +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct PathSegment { + /// The identifier portion of this path segment. + pub name: Name, + + /// Type/lifetime parameters attached to this path. They come in + /// two flavors: `Path` and `Path(A,B) -> C`. Note that + /// this is more than just simple syntactic sugar; the use of + /// parens affects the region binding rules, so we preserve the + /// distinction. + pub parameters: PathParameters, +} + +impl PathSegment { + /// Convert an identifier to the corresponding segment. + pub fn from_name(name: Name) -> PathSegment { + PathSegment { + name: name, + parameters: PathParameters::none() + } + } +} + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub enum PathParameters { + /// The `<'a, A,B,C>` in `foo::bar::baz::<'a, A,B,C>` + AngleBracketedParameters(AngleBracketedParameterData), + /// The `(A,B)` and `C` in `Foo(A,B) -> C` + ParenthesizedParameters(ParenthesizedParameterData), +} + +impl PathParameters { + pub fn none() -> PathParameters { + AngleBracketedParameters(AngleBracketedParameterData { + lifetimes: HirVec::new(), + types: HirVec::new(), + infer_types: true, + bindings: HirVec::new(), + }) + } + + pub fn is_empty(&self) -> bool { + match *self { + AngleBracketedParameters(ref data) => data.is_empty(), + + // Even if the user supplied no types, something like + // `X()` is equivalent to `X<(),()>`. + ParenthesizedParameters(..) => false, + } + } + + pub fn has_lifetimes(&self) -> bool { + match *self { + AngleBracketedParameters(ref data) => !data.lifetimes.is_empty(), + ParenthesizedParameters(_) => false, + } + } + + pub fn has_types(&self) -> bool { + match *self { + AngleBracketedParameters(ref data) => !data.types.is_empty(), + ParenthesizedParameters(..) => true, + } + } + + /// Returns the types that the user wrote. Note that these do not necessarily map to the type + /// parameters in the parenthesized case. + pub fn types(&self) -> HirVec<&P> { + match *self { + AngleBracketedParameters(ref data) => { + data.types.iter().collect() + } + ParenthesizedParameters(ref data) => { + data.inputs + .iter() + .chain(data.output.iter()) + .collect() + } + } + } + + pub fn lifetimes(&self) -> HirVec<&Lifetime> { + match *self { + AngleBracketedParameters(ref data) => { + data.lifetimes.iter().collect() + } + ParenthesizedParameters(_) => { + HirVec::new() + } + } + } + + pub fn bindings(&self) -> HirVec<&TypeBinding> { + match *self { + AngleBracketedParameters(ref data) => { + data.bindings.iter().collect() + } + ParenthesizedParameters(_) => { + HirVec::new() + } + } + } +} + +/// A path like `Foo<'a, T>` +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct AngleBracketedParameterData { + /// The lifetime parameters for this path segment. + pub lifetimes: HirVec, + /// The type parameters for this path segment, if present. + pub types: HirVec>, + /// Whether to infer remaining type parameters, if any. + /// This only applies to expression and pattern paths, and + /// out of those only the segments with no type parameters + /// to begin with, e.g. `Vec::new` is `>::new::<..>`. + pub infer_types: bool, + /// Bindings (equality constraints) on associated types, if present. + /// E.g., `Foo`. + pub bindings: HirVec, +} + +impl AngleBracketedParameterData { + fn is_empty(&self) -> bool { + self.lifetimes.is_empty() && self.types.is_empty() && self.bindings.is_empty() + } +} + +/// A path like `Foo(A,B) -> C` +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct ParenthesizedParameterData { + /// Overall span + pub span: Span, + + /// `(A,B)` + pub inputs: HirVec>, + + /// `C` + pub output: Option>, +} + +/// The AST represents all type param bounds as types. +/// typeck::collect::compute_bounds matches these against +/// the "special" built-in traits (see middle::lang_items) and +/// detects Copy, Send and Sync. +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub enum TyParamBound { + TraitTyParamBound(PolyTraitRef, TraitBoundModifier), + RegionTyParamBound(Lifetime), +} + +/// A modifier on a bound, currently this is only used for `?Sized`, where the +/// modifier is `Maybe`. Negative bounds should also be handled here. +#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub enum TraitBoundModifier { + None, + Maybe, +} + +pub type TyParamBounds = HirVec; + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct TyParam { + pub name: Name, + pub id: NodeId, + pub bounds: TyParamBounds, + pub default: Option>, + pub span: Span, + pub pure_wrt_drop: bool, +} + +/// Represents lifetimes and type parameters attached to a declaration +/// of a function, enum, trait, etc. +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct Generics { + pub lifetimes: HirVec, + pub ty_params: HirVec, + pub where_clause: WhereClause, + pub span: Span, +} + +impl Generics { + pub fn empty() -> Generics { + Generics { + lifetimes: HirVec::new(), + ty_params: HirVec::new(), + where_clause: WhereClause { + id: DUMMY_NODE_ID, + predicates: HirVec::new(), + }, + span: DUMMY_SP, + } + } + + pub fn is_lt_parameterized(&self) -> bool { + !self.lifetimes.is_empty() + } + + pub fn is_type_parameterized(&self) -> bool { + !self.ty_params.is_empty() + } + + pub fn is_parameterized(&self) -> bool { + self.is_lt_parameterized() || self.is_type_parameterized() + } +} + +pub enum UnsafeGeneric { + Region(LifetimeDef, &'static str), + Type(TyParam, &'static str), +} + +impl UnsafeGeneric { + pub fn attr_name(&self) -> &'static str { + match *self { + UnsafeGeneric::Region(_, s) => s, + UnsafeGeneric::Type(_, s) => s, + } + } +} + +impl Generics { + pub fn carries_unsafe_attr(&self) -> Option { + for r in &self.lifetimes { + if r.pure_wrt_drop { + return Some(UnsafeGeneric::Region(r.clone(), "may_dangle")); + } + } + for t in &self.ty_params { + if t.pure_wrt_drop { + return Some(UnsafeGeneric::Type(t.clone(), "may_dangle")); + } + } + return None; + } +} + +/// A `where` clause in a definition +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct WhereClause { + pub id: NodeId, + pub predicates: HirVec, +} + +/// A single predicate in a `where` clause +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub enum WherePredicate { + /// A type binding, eg `for<'c> Foo: Send+Clone+'c` + BoundPredicate(WhereBoundPredicate), + /// A lifetime predicate, e.g. `'a: 'b+'c` + RegionPredicate(WhereRegionPredicate), + /// An equality predicate (unsupported) + EqPredicate(WhereEqPredicate), +} + +/// A type bound, eg `for<'c> Foo: Send+Clone+'c` +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct WhereBoundPredicate { + pub span: Span, + /// Any lifetimes from a `for` binding + pub bound_lifetimes: HirVec, + /// The type being bounded + pub bounded_ty: P, + /// Trait and lifetime bounds (`Clone+Send+'static`) + pub bounds: TyParamBounds, +} + +/// A lifetime predicate, e.g. `'a: 'b+'c` +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct WhereRegionPredicate { + pub span: Span, + pub lifetime: Lifetime, + pub bounds: HirVec, +} + +/// An equality predicate (unsupported), e.g. `T=int` +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct WhereEqPredicate { + pub id: NodeId, + pub span: Span, + pub path: Path, + pub ty: P, +} + +pub type CrateConfig = HirVec>; + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Debug)] +pub struct Crate { + pub module: Mod, + pub attrs: HirVec, + pub span: Span, + pub exported_macros: HirVec, + + // NB: We use a BTreeMap here so that `visit_all_items` iterates + // over the ids in increasing order. In principle it should not + // matter what order we visit things in, but in *practice* it + // does, because it can affect the order in which errors are + // detected, which in turn can make compile-fail tests yield + // slightly different results. + pub items: BTreeMap, + + pub impl_items: BTreeMap, + pub exprs: FnvHashMap, +} + +impl Crate { + pub fn item(&self, id: NodeId) -> &Item { + &self.items[&id] + } + + pub fn impl_item(&self, id: ImplItemId) -> &ImplItem { + &self.impl_items[&id] + } + + /// Visits all items in the crate in some determinstic (but + /// unspecified) order. If you just need to process every item, + /// but don't care about nesting, this method is the best choice. + /// + /// If you do care about nesting -- usually because your algorithm + /// follows lexical scoping rules -- then you want a different + /// approach. You should override `visit_nested_item` in your + /// visitor and then call `intravisit::walk_crate` instead. + pub fn visit_all_item_likes<'hir, V>(&'hir self, visitor: &mut V) + where V: itemlikevisit::ItemLikeVisitor<'hir> + { + for (_, item) in &self.items { + visitor.visit_item(item); + } + + for (_, impl_item) in &self.impl_items { + visitor.visit_impl_item(impl_item); + } + } + + pub fn expr(&self, id: ExprId) -> &Expr { + &self.exprs[&id] + } +} + +/// A macro definition, in this crate or imported from another. +/// +/// Not parsed directly, but created on macro import or `macro_rules!` expansion. +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct MacroDef { + pub name: Name, + pub attrs: HirVec, + pub id: NodeId, + pub span: Span, + pub imported_from: Option, + pub allow_internal_unstable: bool, + pub body: HirVec, +} + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct Block { + /// Statements in a block + pub stmts: HirVec, + /// An expression at the end of the block + /// without a semicolon, if any + pub expr: Option>, + pub id: NodeId, + /// Distinguishes between `unsafe { ... }` and `{ ... }` + pub rules: BlockCheckMode, + pub span: Span, +} + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)] +pub struct Pat { + pub id: NodeId, + pub node: PatKind, + pub span: Span, +} + +impl fmt::Debug for Pat { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "pat({}: {})", self.id, print::pat_to_string(self)) + } +} + +impl Pat { + // FIXME(#19596) this is a workaround, but there should be a better way + fn walk_(&self, it: &mut G) -> bool + where G: FnMut(&Pat) -> bool + { + if !it(self) { + return false; + } + + match self.node { + PatKind::Binding(.., Some(ref p)) => p.walk_(it), + PatKind::Struct(_, ref fields, _) => { + fields.iter().all(|field| field.node.pat.walk_(it)) + } + PatKind::TupleStruct(_, ref s, _) | PatKind::Tuple(ref s, _) => { + s.iter().all(|p| p.walk_(it)) + } + PatKind::Box(ref s) | PatKind::Ref(ref s, _) => { + s.walk_(it) + } + PatKind::Slice(ref before, ref slice, ref after) => { + before.iter().all(|p| p.walk_(it)) && + slice.iter().all(|p| p.walk_(it)) && + after.iter().all(|p| p.walk_(it)) + } + PatKind::Wild | + PatKind::Lit(_) | + PatKind::Range(..) | + PatKind::Binding(..) | + PatKind::Path(_) => { + true + } + } + } + + pub fn walk(&self, mut it: F) -> bool + where F: FnMut(&Pat) -> bool + { + self.walk_(&mut it) + } +} + +/// A single field in a struct pattern +/// +/// Patterns like the fields of Foo `{ x, ref y, ref mut z }` +/// are treated the same as` x: x, y: ref y, z: ref mut z`, +/// except is_shorthand is true +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct FieldPat { + /// The identifier for the field + pub name: Name, + /// The pattern the field is destructured to + pub pat: P, + pub is_shorthand: bool, +} + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] +pub enum BindingMode { + BindByRef(Mutability), + BindByValue(Mutability), +} + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub enum PatKind { + /// Represents a wildcard pattern (`_`) + Wild, + + /// A fresh binding `ref mut binding @ OPT_SUBPATTERN`. + /// The `DefId` is for the definition of the variable being bound. + Binding(BindingMode, DefId, Spanned, Option>), + + /// A struct or struct variant pattern, e.g. `Variant {x, y, ..}`. + /// The `bool` is `true` in the presence of a `..`. + Struct(QPath, HirVec>, bool), + + /// A tuple struct/variant pattern `Variant(x, y, .., z)`. + /// If the `..` pattern fragment is present, then `Option` denotes its position. + /// 0 <= position <= subpats.len() + TupleStruct(QPath, HirVec>, Option), + + /// A path pattern for an unit struct/variant or a (maybe-associated) constant. + Path(QPath), + + /// A tuple pattern `(a, b)`. + /// If the `..` pattern fragment is present, then `Option` denotes its position. + /// 0 <= position <= subpats.len() + Tuple(HirVec>, Option), + /// A `box` pattern + Box(P), + /// A reference pattern, e.g. `&mut (a, b)` + Ref(P, Mutability), + /// A literal + Lit(P), + /// A range pattern, e.g. `1...2` + Range(P, P), + /// `[a, b, ..i, y, z]` is represented as: + /// `PatKind::Slice(box [a, b], Some(i), box [y, z])` + Slice(HirVec>, Option>, HirVec>), +} + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] +pub enum Mutability { + MutMutable, + MutImmutable, +} + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] +pub enum BinOp_ { + /// The `+` operator (addition) + BiAdd, + /// The `-` operator (subtraction) + BiSub, + /// The `*` operator (multiplication) + BiMul, + /// The `/` operator (division) + BiDiv, + /// The `%` operator (modulus) + BiRem, + /// The `&&` operator (logical and) + BiAnd, + /// The `||` operator (logical or) + BiOr, + /// The `^` operator (bitwise xor) + BiBitXor, + /// The `&` operator (bitwise and) + BiBitAnd, + /// The `|` operator (bitwise or) + BiBitOr, + /// The `<<` operator (shift left) + BiShl, + /// The `>>` operator (shift right) + BiShr, + /// The `==` operator (equality) + BiEq, + /// The `<` operator (less than) + BiLt, + /// The `<=` operator (less than or equal to) + BiLe, + /// The `!=` operator (not equal to) + BiNe, + /// The `>=` operator (greater than or equal to) + BiGe, + /// The `>` operator (greater than) + BiGt, +} + +impl BinOp_ { + pub fn as_str(self) -> &'static str { + match self { + BiAdd => "+", + BiSub => "-", + BiMul => "*", + BiDiv => "/", + BiRem => "%", + BiAnd => "&&", + BiOr => "||", + BiBitXor => "^", + BiBitAnd => "&", + BiBitOr => "|", + BiShl => "<<", + BiShr => ">>", + BiEq => "==", + BiLt => "<", + BiLe => "<=", + BiNe => "!=", + BiGe => ">=", + BiGt => ">", + } + } + + pub fn is_lazy(self) -> bool { + match self { + BiAnd | BiOr => true, + _ => false, + } + } + + pub fn is_shift(self) -> bool { + match self { + BiShl | BiShr => true, + _ => false, + } + } + + pub fn is_comparison(self) -> bool { + match self { + BiEq | BiLt | BiLe | BiNe | BiGt | BiGe => true, + BiAnd | + BiOr | + BiAdd | + BiSub | + BiMul | + BiDiv | + BiRem | + BiBitXor | + BiBitAnd | + BiBitOr | + BiShl | + BiShr => false, + } + } + + /// Returns `true` if the binary operator takes its arguments by value + pub fn is_by_value(self) -> bool { + !self.is_comparison() + } +} + +pub type BinOp = Spanned; + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] +pub enum UnOp { + /// The `*` operator for dereferencing + UnDeref, + /// The `!` operator for logical inversion + UnNot, + /// The `-` operator for negation + UnNeg, +} + +impl UnOp { + pub fn as_str(self) -> &'static str { + match self { + UnDeref => "*", + UnNot => "!", + UnNeg => "-", + } + } + + /// Returns `true` if the unary operator takes its argument by value + pub fn is_by_value(self) -> bool { + match self { + UnNeg | UnNot => true, + _ => false, + } + } +} + +/// A statement +pub type Stmt = Spanned; + +impl fmt::Debug for Stmt_ { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // Sadness. + let spanned = codemap::dummy_spanned(self.clone()); + write!(f, + "stmt({}: {})", + spanned.node.id(), + print::stmt_to_string(&spanned)) + } +} + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)] +pub enum Stmt_ { + /// Could be an item or a local (let) binding: + StmtDecl(P, NodeId), + + /// Expr without trailing semi-colon (must have unit type): + StmtExpr(P, NodeId), + + /// Expr with trailing semi-colon (may have any type): + StmtSemi(P, NodeId), +} + +impl Stmt_ { + pub fn attrs(&self) -> &[Attribute] { + match *self { + StmtDecl(ref d, _) => d.node.attrs(), + StmtExpr(ref e, _) | + StmtSemi(ref e, _) => &e.attrs, + } + } + + pub fn id(&self) -> NodeId { + match *self { + StmtDecl(_, id) => id, + StmtExpr(_, id) => id, + StmtSemi(_, id) => id, + } + } +} + +// FIXME (pending discussion of #1697, #2178...): local should really be +// a refinement on pat. +/// Local represents a `let` statement, e.g., `let : = ;` +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct Local { + pub pat: P, + pub ty: Option>, + /// Initializer expression to set the value, if any + pub init: Option>, + pub id: NodeId, + pub span: Span, + pub attrs: ThinVec, +} + +pub type Decl = Spanned; + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub enum Decl_ { + /// A local (let) binding: + DeclLocal(P), + /// An item binding: + DeclItem(ItemId), +} + +impl Decl_ { + pub fn attrs(&self) -> &[Attribute] { + match *self { + DeclLocal(ref l) => &l.attrs, + DeclItem(_) => &[] + } + } +} + +/// represents one arm of a 'match' +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct Arm { + pub attrs: HirVec, + pub pats: HirVec>, + pub guard: Option>, + pub body: P, +} + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct Field { + pub name: Spanned, + pub expr: P, + pub span: Span, + pub is_shorthand: bool, +} + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] +pub enum BlockCheckMode { + DefaultBlock, + UnsafeBlock(UnsafeSource), + PushUnsafeBlock(UnsafeSource), + PopUnsafeBlock(UnsafeSource), +} + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] +pub enum UnsafeSource { + CompilerGenerated, + UserProvided, +} + +#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct ExprId(NodeId); + +impl ExprId { + pub fn node_id(self) -> NodeId { + self.0 + } +} + +/// An expression +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)] +pub struct Expr { + pub id: NodeId, + pub span: Span, + pub node: Expr_, + pub attrs: ThinVec, +} + +impl Expr { + pub fn expr_id(&self) -> ExprId { + ExprId(self.id) + } +} + +impl fmt::Debug for Expr { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "expr({}: {})", self.id, print::expr_to_string(self)) + } +} + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub enum Expr_ { + /// A `box x` expression. + ExprBox(P), + /// An array (`[a, b, c, d]`) + ExprArray(HirVec), + /// A function call + /// + /// The first field resolves to the function itself (usually an `ExprPath`), + /// and the second field is the list of arguments + ExprCall(P, HirVec), + /// A method call (`x.foo::(a, b, c, d)`) + /// + /// The `Spanned` is the identifier for the method name. + /// The vector of `Ty`s are the ascripted type parameters for the method + /// (within the angle brackets). + /// + /// The first element of the vector of `Expr`s is the expression that + /// evaluates to the object on which the method is being called on (the + /// receiver), and the remaining elements are the rest of the arguments. + /// + /// Thus, `x.foo::(a, b, c, d)` is represented as + /// `ExprMethodCall(foo, [Bar, Baz], [x, a, b, c, d])`. + ExprMethodCall(Spanned, HirVec>, HirVec), + /// A tuple (`(a, b, c ,d)`) + ExprTup(HirVec), + /// A binary operation (For example: `a + b`, `a * b`) + ExprBinary(BinOp, P, P), + /// A unary operation (For example: `!x`, `*x`) + ExprUnary(UnOp, P), + /// A literal (For example: `1`, `"foo"`) + ExprLit(P), + /// A cast (`foo as f64`) + ExprCast(P, P), + ExprType(P, P), + /// An `if` block, with an optional else block + /// + /// `if expr { block } else { expr }` + ExprIf(P, P, Option>), + /// A while loop, with an optional label + /// + /// `'label: while expr { block }` + ExprWhile(P, P, Option>), + /// Conditionless loop (can be exited with break, continue, or return) + /// + /// `'label: loop { block }` + ExprLoop(P, Option>, LoopSource), + /// A `match` block, with a source that indicates whether or not it is + /// the result of a desugaring, and if so, which kind. + ExprMatch(P, HirVec, MatchSource), + /// A closure (for example, `move |a, b, c| {a + b + c}`). + /// + /// The final span is the span of the argument block `|...|` + ExprClosure(CaptureClause, P, ExprId, Span), + /// A block (`{ ... }`) + ExprBlock(P), + + /// An assignment (`a = foo()`) + ExprAssign(P, P), + /// An assignment with an operator + /// + /// For example, `a += 1`. + ExprAssignOp(BinOp, P, P), + /// Access of a named struct field (`obj.foo`) + ExprField(P, Spanned), + /// Access of an unnamed field of a struct or tuple-struct + /// + /// For example, `foo.0`. + ExprTupField(P, Spanned), + /// An indexing operation (`foo[2]`) + ExprIndex(P, P), + + /// Path to a definition, possibly containing lifetime or type parameters. + ExprPath(QPath), + + /// A referencing operation (`&a` or `&mut a`) + ExprAddrOf(Mutability, P), + /// A `break`, with an optional label to break + ExprBreak(Option, D}` + ItemEnum(EnumDef, Generics), + /// A struct definition, e.g. `struct Foo {x: A}` + ItemStruct(VariantData, Generics), + /// A union definition, e.g. `union Foo {x: A, y: B}` + ItemUnion(VariantData, Generics), + /// Represents a Trait Declaration + ItemTrait(Unsafety, Generics, TyParamBounds, HirVec), + + // Default trait implementations + /// + /// `impl Trait for .. {}` + ItemDefaultImpl(Unsafety, TraitRef), + /// An implementation, eg `impl Trait for Foo { .. }` + ItemImpl(Unsafety, + ImplPolarity, + Generics, + Option, // (optional) trait this impl implements + P, // self + HirVec), +} + +impl Item_ { + pub fn descriptive_variant(&self) -> &str { + match *self { + ItemExternCrate(..) => "extern crate", + ItemUse(..) => "use", + ItemStatic(..) => "static item", + ItemConst(..) => "constant item", + ItemFn(..) => "function", + ItemMod(..) => "module", + ItemForeignMod(..) => "foreign module", + ItemTy(..) => "type alias", + ItemEnum(..) => "enum", + ItemStruct(..) => "struct", + ItemUnion(..) => "union", + ItemTrait(..) => "trait", + ItemImpl(..) | + ItemDefaultImpl(..) => "item", + } + } +} + +/// A reference from an impl to one of its associated items. This +/// contains the item's id, naturally, but also the item's name and +/// some other high-level details (like whether it is an associated +/// type or method, and whether it is public). This allows other +/// passes to find the impl they want without loading the id (which +/// means fewer edges in the incremental compilation graph). +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct ImplItemRef { + pub id: ImplItemId, + pub name: Name, + pub kind: AssociatedItemKind, + pub span: Span, + pub vis: Visibility, + pub defaultness: Defaultness, +} + +#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub enum AssociatedItemKind { + Const, + Method { has_self: bool }, + Type, +} + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub struct ForeignItem { + pub name: Name, + pub attrs: HirVec, + pub node: ForeignItem_, + pub id: NodeId, + pub span: Span, + pub vis: Visibility, +} + +/// An item within an `extern` block +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +pub enum ForeignItem_ { + /// A foreign function + ForeignItemFn(P, Generics), + /// A foreign static item (`static ext: u8`), with optional mutability + /// (the boolean is true when mutable) + ForeignItemStatic(P, bool), +} + +impl ForeignItem_ { + pub fn descriptive_variant(&self) -> &str { + match *self { + ForeignItemFn(..) => "foreign function", + ForeignItemStatic(..) => "foreign static item", + } + } +} + +/// A free variable referred to in a function. +#[derive(Copy, Clone, RustcEncodable, RustcDecodable)] +pub struct Freevar { + /// The variable being accessed free. + pub def: Def, + + // First span where it is accessed (there can be multiple). + pub span: Span +} + +pub type FreevarMap = NodeMap>; + +pub type CaptureModeMap = NodeMap; + +#[derive(Clone, Debug)] +pub struct TraitCandidate { + pub def_id: DefId, + pub import_id: Option, +} + +// Trait method resolution +pub type TraitMap = NodeMap>; + +// Map from the NodeId of a glob import to a list of items which are actually +// imported. +pub type GlobMap = NodeMap>; diff --git a/src/librustc/hir/pat_util.rs b/src/librustc/hir/pat_util.rs new file mode 100644 index 0000000000000..0190e74df6953 --- /dev/null +++ b/src/librustc/hir/pat_util.rs @@ -0,0 +1,193 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use hir::def::Def; +use hir::def_id::DefId; +use hir::{self, PatKind}; +use syntax::ast; +use syntax::codemap::Spanned; +use syntax_pos::Span; + +use std::iter::{Enumerate, ExactSizeIterator}; + +pub struct EnumerateAndAdjust { + enumerate: Enumerate, + gap_pos: usize, + gap_len: usize, +} + +impl Iterator for EnumerateAndAdjust where I: Iterator { + type Item = (usize, ::Item); + + fn next(&mut self) -> Option<(usize, ::Item)> { + self.enumerate.next().map(|(i, elem)| { + (if i < self.gap_pos { i } else { i + self.gap_len }, elem) + }) + } +} + +pub trait EnumerateAndAdjustIterator { + fn enumerate_and_adjust(self, expected_len: usize, gap_pos: Option) + -> EnumerateAndAdjust where Self: Sized; +} + +impl EnumerateAndAdjustIterator for T { + fn enumerate_and_adjust(self, expected_len: usize, gap_pos: Option) + -> EnumerateAndAdjust where Self: Sized { + let actual_len = self.len(); + EnumerateAndAdjust { + enumerate: self.enumerate(), + gap_pos: if let Some(gap_pos) = gap_pos { gap_pos } else { expected_len }, + gap_len: expected_len - actual_len, + } + } +} + +impl hir::Pat { + pub fn is_refutable(&self) -> bool { + match self.node { + PatKind::Lit(_) | + PatKind::Range(..) | + PatKind::Path(hir::QPath::Resolved(Some(..), _)) | + PatKind::Path(hir::QPath::TypeRelative(..)) => true, + + PatKind::Path(hir::QPath::Resolved(_, ref path)) | + PatKind::TupleStruct(hir::QPath::Resolved(_, ref path), ..) | + PatKind::Struct(hir::QPath::Resolved(_, ref path), ..) => { + match path.def { + Def::Variant(..) | Def::VariantCtor(..) => true, + _ => false + } + } + PatKind::Slice(..) => true, + _ => false + } + } + + pub fn is_const(&self) -> bool { + match self.node { + PatKind::Path(hir::QPath::TypeRelative(..)) => true, + PatKind::Path(hir::QPath::Resolved(_, ref path)) => { + match path.def { + Def::Const(..) | Def::AssociatedConst(..) => true, + _ => false + } + } + _ => false + } + } + + /// Call `f` on every "binding" in a pattern, e.g., on `a` in + /// `match foo() { Some(a) => (), None => () }` + pub fn each_binding(&self, mut f: F) + where F: FnMut(hir::BindingMode, ast::NodeId, Span, &Spanned), + { + self.walk(|p| { + if let PatKind::Binding(binding_mode, _, ref pth, _) = p.node { + f(binding_mode, p.id, p.span, pth); + } + true + }); + } + + /// Checks if the pattern contains any patterns that bind something to + /// an ident, e.g. `foo`, or `Foo(foo)` or `foo @ Bar(..)`. + pub fn contains_bindings(&self) -> bool { + let mut contains_bindings = false; + self.walk(|p| { + if let PatKind::Binding(..) = p.node { + contains_bindings = true; + false // there's at least one binding, can short circuit now. + } else { + true + } + }); + contains_bindings + } + + /// Checks if the pattern contains any patterns that bind something to + /// an ident or wildcard, e.g. `foo`, or `Foo(_)`, `foo @ Bar(..)`, + pub fn contains_bindings_or_wild(&self) -> bool { + let mut contains_bindings = false; + self.walk(|p| { + match p.node { + PatKind::Binding(..) | PatKind::Wild => { + contains_bindings = true; + false // there's at least one binding/wildcard, can short circuit now. + } + _ => true + } + }); + contains_bindings + } + + pub fn simple_name(&self) -> Option { + match self.node { + PatKind::Binding(hir::BindByValue(..), _, ref path1, None) => { + Some(path1.node) + } + _ => { + None + } + } + } + + /// Return variants that are necessary to exist for the pattern to match. + pub fn necessary_variants(&self) -> Vec { + let mut variants = vec![]; + self.walk(|p| { + match p.node { + PatKind::Path(hir::QPath::Resolved(_, ref path)) | + PatKind::TupleStruct(hir::QPath::Resolved(_, ref path), ..) | + PatKind::Struct(hir::QPath::Resolved(_, ref path), ..) => { + match path.def { + Def::Variant(id) | + Def::VariantCtor(id, ..) => variants.push(id), + _ => () + } + } + _ => () + } + true + }); + variants.sort(); + variants.dedup(); + variants + } + + /// Checks if the pattern contains any `ref` or `ref mut` bindings, + /// and if yes whether its containing mutable ones or just immutables ones. + pub fn contains_ref_binding(&self) -> Option { + let mut result = None; + self.each_binding(|mode, _, _, _| { + if let hir::BindingMode::BindByRef(m) = mode { + // Pick Mutable as maximum + match result { + None | Some(hir::MutImmutable) => result = Some(m), + _ => (), + } + } + }); + result + } +} + +impl hir::Arm { + /// Checks if the patterns for this arm contain any `ref` or `ref mut` + /// bindings, and if yes whether its containing mutable ones or just immutables ones. + pub fn contains_ref_binding(&self) -> Option { + self.pats.iter() + .filter_map(|pat| pat.contains_ref_binding()) + .max_by_key(|m| match *m { + hir::MutMutable => 1, + hir::MutImmutable => 0, + }) + } +} diff --git a/src/librustc/hir/print.rs b/src/librustc/hir/print.rs new file mode 100644 index 0000000000000..74920b1328076 --- /dev/null +++ b/src/librustc/hir/print.rs @@ -0,0 +1,2398 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub use self::AnnNode::*; + +use syntax::abi::Abi; +use syntax::ast; +use syntax::codemap::{CodeMap, Spanned}; +use syntax::parse::token::{self, BinOpToken}; +use syntax::parse::lexer::comments; +use syntax::print::pp::{self, break_offset, word, space, hardbreak}; +use syntax::print::pp::{Breaks, eof}; +use syntax::print::pp::Breaks::{Consistent, Inconsistent}; +use syntax::print::pprust::{self as ast_pp, PrintState}; +use syntax::ptr::P; +use syntax::symbol::keywords; +use syntax_pos::{self, BytePos}; +use errors; + +use hir; +use hir::{Crate, PatKind, RegionTyParamBound, SelfKind, TraitTyParamBound, TraitBoundModifier}; + +use std::io::{self, Write, Read}; + +pub enum AnnNode<'a> { + NodeName(&'a ast::Name), + NodeBlock(&'a hir::Block), + NodeItem(&'a hir::Item), + NodeSubItem(ast::NodeId), + NodeExpr(&'a hir::Expr), + NodePat(&'a hir::Pat), +} + +pub trait PpAnn { + fn pre(&self, _state: &mut State, _node: AnnNode) -> io::Result<()> { + Ok(()) + } + fn post(&self, _state: &mut State, _node: AnnNode) -> io::Result<()> { + Ok(()) + } +} + +#[derive(Copy, Clone)] +pub struct NoAnn; + +impl PpAnn for NoAnn {} + + +pub struct State<'a> { + krate: Option<&'a Crate>, + pub s: pp::Printer<'a>, + cm: Option<&'a CodeMap>, + comments: Option>, + literals: Option>, + cur_cmnt_and_lit: ast_pp::CurrentCommentAndLiteral, + boxes: Vec, + ann: &'a (PpAnn + 'a), +} + +impl<'a> PrintState<'a> for State<'a> { + fn writer(&mut self) -> &mut pp::Printer<'a> { + &mut self.s + } + + fn boxes(&mut self) -> &mut Vec { + &mut self.boxes + } + + fn comments(&mut self) -> &mut Option> { + &mut self.comments + } + + fn cur_cmnt_and_lit(&mut self) -> &mut ast_pp::CurrentCommentAndLiteral { + &mut self.cur_cmnt_and_lit + } + + fn literals(&self) -> &Option> { + &self.literals + } +} + +pub fn rust_printer<'a>(writer: Box, krate: Option<&'a Crate>) -> State<'a> { + static NO_ANN: NoAnn = NoAnn; + rust_printer_annotated(writer, &NO_ANN, krate) +} + +pub fn rust_printer_annotated<'a>(writer: Box, + ann: &'a PpAnn, + krate: Option<&'a Crate>) + -> State<'a> { + State { + krate: krate, + s: pp::mk_printer(writer, default_columns), + cm: None, + comments: None, + literals: None, + cur_cmnt_and_lit: ast_pp::CurrentCommentAndLiteral { + cur_cmnt: 0, + cur_lit: 0, + }, + boxes: Vec::new(), + ann: ann, + } +} + +#[allow(non_upper_case_globals)] +pub const indent_unit: usize = 4; + +#[allow(non_upper_case_globals)] +pub const default_columns: usize = 78; + + +/// Requires you to pass an input filename and reader so that +/// it can scan the input text for comments and literals to +/// copy forward. +pub fn print_crate<'a>(cm: &'a CodeMap, + span_diagnostic: &errors::Handler, + krate: &hir::Crate, + filename: String, + input: &mut Read, + out: Box, + ann: &'a PpAnn, + is_expanded: bool) + -> io::Result<()> { + let mut s = State::new_from_input(cm, span_diagnostic, filename, input, + out, ann, is_expanded, Some(krate)); + + // When printing the AST, we sometimes need to inject `#[no_std]` here. + // Since you can't compile the HIR, it's not necessary. + + s.print_mod(&krate.module, &krate.attrs)?; + s.print_remaining_comments()?; + eof(&mut s.s) +} + +impl<'a> State<'a> { + pub fn new_from_input(cm: &'a CodeMap, + span_diagnostic: &errors::Handler, + filename: String, + input: &mut Read, + out: Box, + ann: &'a PpAnn, + is_expanded: bool, + krate: Option<&'a Crate>) + -> State<'a> { + let (cmnts, lits) = comments::gather_comments_and_literals(span_diagnostic, + filename, + input); + + State::new(cm, + out, + ann, + Some(cmnts), + // If the code is post expansion, don't use the table of + // literals, since it doesn't correspond with the literals + // in the AST anymore. + if is_expanded { + None + } else { + Some(lits) + }, + krate) + } + + pub fn new(cm: &'a CodeMap, + out: Box, + ann: &'a PpAnn, + comments: Option>, + literals: Option>, + krate: Option<&'a Crate>) + -> State<'a> { + State { + krate: krate, + s: pp::mk_printer(out, default_columns), + cm: Some(cm), + comments: comments.clone(), + literals: literals.clone(), + cur_cmnt_and_lit: ast_pp::CurrentCommentAndLiteral { + cur_cmnt: 0, + cur_lit: 0, + }, + boxes: Vec::new(), + ann: ann, + } + } +} + +pub fn to_string(f: F) -> String + where F: FnOnce(&mut State) -> io::Result<()> +{ + let mut wr = Vec::new(); + { + let mut printer = rust_printer(Box::new(&mut wr), None); + f(&mut printer).unwrap(); + eof(&mut printer.s).unwrap(); + } + String::from_utf8(wr).unwrap() +} + +pub fn binop_to_string(op: BinOpToken) -> &'static str { + match op { + token::Plus => "+", + token::Minus => "-", + token::Star => "*", + token::Slash => "/", + token::Percent => "%", + token::Caret => "^", + token::And => "&", + token::Or => "|", + token::Shl => "<<", + token::Shr => ">>", + } +} + +pub fn ty_to_string(ty: &hir::Ty) -> String { + to_string(|s| s.print_type(ty)) +} + +pub fn bounds_to_string(bounds: &[hir::TyParamBound]) -> String { + to_string(|s| s.print_bounds("", bounds)) +} + +pub fn pat_to_string(pat: &hir::Pat) -> String { + to_string(|s| s.print_pat(pat)) +} + +pub fn arm_to_string(arm: &hir::Arm) -> String { + to_string(|s| s.print_arm(arm)) +} + +pub fn expr_to_string(e: &hir::Expr) -> String { + to_string(|s| s.print_expr(e)) +} + +pub fn lifetime_to_string(e: &hir::Lifetime) -> String { + to_string(|s| s.print_lifetime(e)) +} + +pub fn stmt_to_string(stmt: &hir::Stmt) -> String { + to_string(|s| s.print_stmt(stmt)) +} + +pub fn item_to_string(i: &hir::Item) -> String { + to_string(|s| s.print_item(i)) +} + +pub fn impl_item_to_string(i: &hir::ImplItem) -> String { + to_string(|s| s.print_impl_item(i)) +} + +pub fn trait_item_to_string(i: &hir::TraitItem) -> String { + to_string(|s| s.print_trait_item(i)) +} + +pub fn generics_to_string(generics: &hir::Generics) -> String { + to_string(|s| s.print_generics(generics)) +} + +pub fn where_clause_to_string(i: &hir::WhereClause) -> String { + to_string(|s| s.print_where_clause(i)) +} + +pub fn fn_block_to_string(p: &hir::FnDecl) -> String { + to_string(|s| s.print_fn_block_args(p)) +} + +pub fn path_to_string(p: &hir::Path) -> String { + to_string(|s| s.print_path(p, false)) +} + +pub fn qpath_to_string(p: &hir::QPath) -> String { + to_string(|s| s.print_qpath(p, false)) +} + +pub fn name_to_string(name: ast::Name) -> String { + to_string(|s| s.print_name(name)) +} + +pub fn fun_to_string(decl: &hir::FnDecl, + unsafety: hir::Unsafety, + constness: hir::Constness, + name: ast::Name, + generics: &hir::Generics) + -> String { + to_string(|s| { + s.head("")?; + s.print_fn(decl, + unsafety, + constness, + Abi::Rust, + Some(name), + generics, + &hir::Inherited)?; + s.end()?; // Close the head box + s.end() // Close the outer box + }) +} + +pub fn block_to_string(blk: &hir::Block) -> String { + to_string(|s| { + // containing cbox, will be closed by print-block at } + s.cbox(indent_unit)?; + // head-ibox, will be closed by print-block after { + s.ibox(0)?; + s.print_block(blk) + }) +} + +pub fn variant_to_string(var: &hir::Variant) -> String { + to_string(|s| s.print_variant(var)) +} + +pub fn arg_to_string(arg: &hir::Arg) -> String { + to_string(|s| s.print_arg(arg, false)) +} + +pub fn visibility_qualified(vis: &hir::Visibility, s: &str) -> String { + match *vis { + hir::Public => format!("pub {}", s), + hir::Visibility::Crate => format!("pub(crate) {}", s), + hir::Visibility::Restricted { ref path, .. } => format!("pub({}) {}", path, s), + hir::Inherited => s.to_string(), + } +} + +fn needs_parentheses(expr: &hir::Expr) -> bool { + match expr.node { + hir::ExprAssign(..) | + hir::ExprBinary(..) | + hir::ExprClosure(..) | + hir::ExprAssignOp(..) | + hir::ExprCast(..) | + hir::ExprType(..) => true, + _ => false, + } +} + +impl<'a> State<'a> { + pub fn cbox(&mut self, u: usize) -> io::Result<()> { + self.boxes.push(pp::Breaks::Consistent); + pp::cbox(&mut self.s, u) + } + + pub fn nbsp(&mut self) -> io::Result<()> { + word(&mut self.s, " ") + } + + pub fn word_nbsp(&mut self, w: &str) -> io::Result<()> { + word(&mut self.s, w)?; + self.nbsp() + } + + pub fn head(&mut self, w: &str) -> io::Result<()> { + // outer-box is consistent + self.cbox(indent_unit)?; + // head-box is inconsistent + self.ibox(w.len() + 1)?; + // keyword that starts the head + if !w.is_empty() { + self.word_nbsp(w)?; + } + Ok(()) + } + + pub fn bopen(&mut self) -> io::Result<()> { + word(&mut self.s, "{")?; + self.end() // close the head-box + } + + pub fn bclose_(&mut self, span: syntax_pos::Span, indented: usize) -> io::Result<()> { + self.bclose_maybe_open(span, indented, true) + } + pub fn bclose_maybe_open(&mut self, + span: syntax_pos::Span, + indented: usize, + close_box: bool) + -> io::Result<()> { + self.maybe_print_comment(span.hi)?; + self.break_offset_if_not_bol(1, -(indented as isize))?; + word(&mut self.s, "}")?; + if close_box { + self.end()?; // close the outer-box + } + Ok(()) + } + pub fn bclose(&mut self, span: syntax_pos::Span) -> io::Result<()> { + self.bclose_(span, indent_unit) + } + + pub fn in_cbox(&self) -> bool { + match self.boxes.last() { + Some(&last_box) => last_box == pp::Breaks::Consistent, + None => false, + } + } + pub fn space_if_not_bol(&mut self) -> io::Result<()> { + if !self.is_bol() { + space(&mut self.s)?; + } + Ok(()) + } + pub fn break_offset_if_not_bol(&mut self, n: usize, off: isize) -> io::Result<()> { + if !self.is_bol() { + break_offset(&mut self.s, n, off) + } else { + if off != 0 && self.s.last_token().is_hardbreak_tok() { + // We do something pretty sketchy here: tuck the nonzero + // offset-adjustment we were going to deposit along with the + // break into the previous hardbreak. + self.s.replace_last_token(pp::hardbreak_tok_offset(off)); + } + Ok(()) + } + } + + // Synthesizes a comment that was not textually present in the original source + // file. + pub fn synth_comment(&mut self, text: String) -> io::Result<()> { + word(&mut self.s, "/*")?; + space(&mut self.s)?; + word(&mut self.s, &text[..])?; + space(&mut self.s)?; + word(&mut self.s, "*/") + } + + + pub fn commasep_cmnt(&mut self, + b: Breaks, + elts: &[T], + mut op: F, + mut get_span: G) + -> io::Result<()> + where F: FnMut(&mut State, &T) -> io::Result<()>, + G: FnMut(&T) -> syntax_pos::Span + { + self.rbox(0, b)?; + let len = elts.len(); + let mut i = 0; + for elt in elts { + self.maybe_print_comment(get_span(elt).hi)?; + op(self, elt)?; + i += 1; + if i < len { + word(&mut self.s, ",")?; + self.maybe_print_trailing_comment(get_span(elt), Some(get_span(&elts[i]).hi))?; + self.space_if_not_bol()?; + } + } + self.end() + } + + pub fn commasep_exprs(&mut self, b: Breaks, exprs: &[hir::Expr]) -> io::Result<()> { + self.commasep_cmnt(b, exprs, |s, e| s.print_expr(&e), |e| e.span) + } + + pub fn print_mod(&mut self, _mod: &hir::Mod, attrs: &[ast::Attribute]) -> io::Result<()> { + self.print_inner_attributes(attrs)?; + for item_id in &_mod.item_ids { + self.print_item_id(item_id)?; + } + Ok(()) + } + + pub fn print_foreign_mod(&mut self, + nmod: &hir::ForeignMod, + attrs: &[ast::Attribute]) + -> io::Result<()> { + self.print_inner_attributes(attrs)?; + for item in &nmod.items { + self.print_foreign_item(item)?; + } + Ok(()) + } + + pub fn print_opt_lifetime(&mut self, lifetime: &Option) -> io::Result<()> { + if let Some(l) = *lifetime { + self.print_lifetime(&l)?; + self.nbsp()?; + } + Ok(()) + } + + pub fn print_type(&mut self, ty: &hir::Ty) -> io::Result<()> { + self.maybe_print_comment(ty.span.lo)?; + self.ibox(0)?; + match ty.node { + hir::TySlice(ref ty) => { + word(&mut self.s, "[")?; + self.print_type(&ty)?; + word(&mut self.s, "]")?; + } + hir::TyPtr(ref mt) => { + word(&mut self.s, "*")?; + match mt.mutbl { + hir::MutMutable => self.word_nbsp("mut")?, + hir::MutImmutable => self.word_nbsp("const")?, + } + self.print_type(&mt.ty)?; + } + hir::TyRptr(ref lifetime, ref mt) => { + word(&mut self.s, "&")?; + self.print_opt_lifetime(lifetime)?; + self.print_mt(mt)?; + } + hir::TyNever => { + word(&mut self.s, "!")?; + }, + hir::TyTup(ref elts) => { + self.popen()?; + self.commasep(Inconsistent, &elts[..], |s, ty| s.print_type(&ty))?; + if elts.len() == 1 { + word(&mut self.s, ",")?; + } + self.pclose()?; + } + hir::TyBareFn(ref f) => { + let generics = hir::Generics { + lifetimes: f.lifetimes.clone(), + ty_params: hir::HirVec::new(), + where_clause: hir::WhereClause { + id: ast::DUMMY_NODE_ID, + predicates: hir::HirVec::new(), + }, + span: syntax_pos::DUMMY_SP, + }; + self.print_ty_fn(f.abi, f.unsafety, &f.decl, None, &generics)?; + } + hir::TyPath(ref qpath) => { + self.print_qpath(qpath, false)? + } + hir::TyObjectSum(ref ty, ref bounds) => { + self.print_type(&ty)?; + self.print_bounds("+", &bounds[..])?; + } + hir::TyPolyTraitRef(ref bounds) => { + self.print_bounds("", &bounds[..])?; + } + hir::TyImplTrait(ref bounds) => { + self.print_bounds("impl ", &bounds[..])?; + } + hir::TyArray(ref ty, ref v) => { + word(&mut self.s, "[")?; + self.print_type(&ty)?; + word(&mut self.s, "; ")?; + self.print_expr(&v)?; + word(&mut self.s, "]")?; + } + hir::TyTypeof(ref e) => { + word(&mut self.s, "typeof(")?; + self.print_expr(&e)?; + word(&mut self.s, ")")?; + } + hir::TyInfer => { + word(&mut self.s, "_")?; + } + } + self.end() + } + + pub fn print_foreign_item(&mut self, item: &hir::ForeignItem) -> io::Result<()> { + self.hardbreak_if_not_bol()?; + self.maybe_print_comment(item.span.lo)?; + self.print_outer_attributes(&item.attrs)?; + match item.node { + hir::ForeignItemFn(ref decl, ref generics) => { + self.head("")?; + self.print_fn(decl, + hir::Unsafety::Normal, + hir::Constness::NotConst, + Abi::Rust, + Some(item.name), + generics, + &item.vis)?; + self.end()?; // end head-ibox + word(&mut self.s, ";")?; + self.end() // end the outer fn box + } + hir::ForeignItemStatic(ref t, m) => { + self.head(&visibility_qualified(&item.vis, "static"))?; + if m { + self.word_space("mut")?; + } + self.print_name(item.name)?; + self.word_space(":")?; + self.print_type(&t)?; + word(&mut self.s, ";")?; + self.end()?; // end the head-ibox + self.end() // end the outer cbox + } + } + } + + fn print_associated_const(&mut self, + name: ast::Name, + ty: &hir::Ty, + default: Option<&hir::Expr>, + vis: &hir::Visibility) + -> io::Result<()> { + word(&mut self.s, &visibility_qualified(vis, ""))?; + self.word_space("const")?; + self.print_name(name)?; + self.word_space(":")?; + self.print_type(ty)?; + if let Some(expr) = default { + space(&mut self.s)?; + self.word_space("=")?; + self.print_expr(expr)?; + } + word(&mut self.s, ";") + } + + fn print_associated_type(&mut self, + name: ast::Name, + bounds: Option<&hir::TyParamBounds>, + ty: Option<&hir::Ty>) + -> io::Result<()> { + self.word_space("type")?; + self.print_name(name)?; + if let Some(bounds) = bounds { + self.print_bounds(":", bounds)?; + } + if let Some(ty) = ty { + space(&mut self.s)?; + self.word_space("=")?; + self.print_type(ty)?; + } + word(&mut self.s, ";") + } + + pub fn print_item_id(&mut self, item_id: &hir::ItemId) -> io::Result<()> { + if let Some(krate) = self.krate { + // skip nested items if krate context was not provided + let item = &krate.items[&item_id.id]; + self.print_item(item) + } else { + Ok(()) + } + } + + pub fn print_expr_id(&mut self, expr_id: &hir::ExprId) -> io::Result<()> { + if let Some(krate) = self.krate { + let expr = &krate.exprs[expr_id]; + self.print_expr(expr) + } else { + Ok(()) + } + } + + /// Pretty-print an item + pub fn print_item(&mut self, item: &hir::Item) -> io::Result<()> { + self.hardbreak_if_not_bol()?; + self.maybe_print_comment(item.span.lo)?; + self.print_outer_attributes(&item.attrs)?; + self.ann.pre(self, NodeItem(item))?; + match item.node { + hir::ItemExternCrate(ref optional_path) => { + self.head(&visibility_qualified(&item.vis, "extern crate"))?; + if let Some(p) = *optional_path { + let val = p.as_str(); + if val.contains("-") { + self.print_string(&val, ast::StrStyle::Cooked)?; + } else { + self.print_name(p)?; + } + space(&mut self.s)?; + word(&mut self.s, "as")?; + space(&mut self.s)?; + } + self.print_name(item.name)?; + word(&mut self.s, ";")?; + self.end()?; // end inner head-block + self.end()?; // end outer head-block + } + hir::ItemUse(ref path, kind) => { + self.head(&visibility_qualified(&item.vis, "use"))?; + self.print_path(path, false)?; + + match kind { + hir::UseKind::Single => { + if path.segments.last().unwrap().name != item.name { + space(&mut self.s)?; + self.word_space("as")?; + self.print_name(item.name)?; + } + word(&mut self.s, ";")?; + } + hir::UseKind::Glob => word(&mut self.s, "::*;")?, + hir::UseKind::ListStem => word(&mut self.s, "::{};")? + } + self.end()?; // end inner head-block + self.end()?; // end outer head-block + } + hir::ItemStatic(ref ty, m, ref expr) => { + self.head(&visibility_qualified(&item.vis, "static"))?; + if m == hir::MutMutable { + self.word_space("mut")?; + } + self.print_name(item.name)?; + self.word_space(":")?; + self.print_type(&ty)?; + space(&mut self.s)?; + self.end()?; // end the head-ibox + + self.word_space("=")?; + self.print_expr(&expr)?; + word(&mut self.s, ";")?; + self.end()?; // end the outer cbox + } + hir::ItemConst(ref ty, ref expr) => { + self.head(&visibility_qualified(&item.vis, "const"))?; + self.print_name(item.name)?; + self.word_space(":")?; + self.print_type(&ty)?; + space(&mut self.s)?; + self.end()?; // end the head-ibox + + self.word_space("=")?; + self.print_expr(&expr)?; + word(&mut self.s, ";")?; + self.end()?; // end the outer cbox + } + hir::ItemFn(ref decl, unsafety, constness, abi, ref typarams, ref body) => { + self.head("")?; + self.print_fn(decl, + unsafety, + constness, + abi, + Some(item.name), + typarams, + &item.vis)?; + word(&mut self.s, " ")?; + self.end()?; // need to close a box + self.end()?; // need to close a box + self.print_expr_id(body)?; + } + hir::ItemMod(ref _mod) => { + self.head(&visibility_qualified(&item.vis, "mod"))?; + self.print_name(item.name)?; + self.nbsp()?; + self.bopen()?; + self.print_mod(_mod, &item.attrs)?; + self.bclose(item.span)?; + } + hir::ItemForeignMod(ref nmod) => { + self.head("extern")?; + self.word_nbsp(&nmod.abi.to_string())?; + self.bopen()?; + self.print_foreign_mod(nmod, &item.attrs)?; + self.bclose(item.span)?; + } + hir::ItemTy(ref ty, ref params) => { + self.ibox(indent_unit)?; + self.ibox(0)?; + self.word_nbsp(&visibility_qualified(&item.vis, "type"))?; + self.print_name(item.name)?; + self.print_generics(params)?; + self.end()?; // end the inner ibox + + self.print_where_clause(¶ms.where_clause)?; + space(&mut self.s)?; + self.word_space("=")?; + self.print_type(&ty)?; + word(&mut self.s, ";")?; + self.end()?; // end the outer ibox + } + hir::ItemEnum(ref enum_definition, ref params) => { + self.print_enum_def(enum_definition, params, item.name, item.span, &item.vis)?; + } + hir::ItemStruct(ref struct_def, ref generics) => { + self.head(&visibility_qualified(&item.vis, "struct"))?; + self.print_struct(struct_def, generics, item.name, item.span, true)?; + } + hir::ItemUnion(ref struct_def, ref generics) => { + self.head(&visibility_qualified(&item.vis, "union"))?; + self.print_struct(struct_def, generics, item.name, item.span, true)?; + } + hir::ItemDefaultImpl(unsafety, ref trait_ref) => { + self.head("")?; + self.print_visibility(&item.vis)?; + self.print_unsafety(unsafety)?; + self.word_nbsp("impl")?; + self.print_trait_ref(trait_ref)?; + space(&mut self.s)?; + self.word_space("for")?; + self.word_space("..")?; + self.bopen()?; + self.bclose(item.span)?; + } + hir::ItemImpl(unsafety, + polarity, + ref generics, + ref opt_trait, + ref ty, + ref impl_items) => { + self.head("")?; + self.print_visibility(&item.vis)?; + self.print_unsafety(unsafety)?; + self.word_nbsp("impl")?; + + if generics.is_parameterized() { + self.print_generics(generics)?; + space(&mut self.s)?; + } + + match polarity { + hir::ImplPolarity::Negative => { + word(&mut self.s, "!")?; + } + _ => {} + } + + match opt_trait { + &Some(ref t) => { + self.print_trait_ref(t)?; + space(&mut self.s)?; + self.word_space("for")?; + } + &None => {} + } + + self.print_type(&ty)?; + self.print_where_clause(&generics.where_clause)?; + + space(&mut self.s)?; + self.bopen()?; + self.print_inner_attributes(&item.attrs)?; + for impl_item in impl_items { + self.print_impl_item_ref(impl_item)?; + } + self.bclose(item.span)?; + } + hir::ItemTrait(unsafety, ref generics, ref bounds, ref trait_items) => { + self.head("")?; + self.print_visibility(&item.vis)?; + self.print_unsafety(unsafety)?; + self.word_nbsp("trait")?; + self.print_name(item.name)?; + self.print_generics(generics)?; + let mut real_bounds = Vec::with_capacity(bounds.len()); + for b in bounds.iter() { + if let TraitTyParamBound(ref ptr, hir::TraitBoundModifier::Maybe) = *b { + space(&mut self.s)?; + self.word_space("for ?")?; + self.print_trait_ref(&ptr.trait_ref)?; + } else { + real_bounds.push(b.clone()); + } + } + self.print_bounds(":", &real_bounds[..])?; + self.print_where_clause(&generics.where_clause)?; + word(&mut self.s, " ")?; + self.bopen()?; + for trait_item in trait_items { + self.print_trait_item(trait_item)?; + } + self.bclose(item.span)?; + } + } + self.ann.post(self, NodeItem(item)) + } + + pub fn print_trait_ref(&mut self, t: &hir::TraitRef) -> io::Result<()> { + self.print_path(&t.path, false) + } + + fn print_formal_lifetime_list(&mut self, lifetimes: &[hir::LifetimeDef]) -> io::Result<()> { + if !lifetimes.is_empty() { + word(&mut self.s, "for<")?; + let mut comma = false; + for lifetime_def in lifetimes { + if comma { + self.word_space(",")? + } + self.print_lifetime_def(lifetime_def)?; + comma = true; + } + word(&mut self.s, ">")?; + } + Ok(()) + } + + fn print_poly_trait_ref(&mut self, t: &hir::PolyTraitRef) -> io::Result<()> { + self.print_formal_lifetime_list(&t.bound_lifetimes)?; + self.print_trait_ref(&t.trait_ref) + } + + pub fn print_enum_def(&mut self, + enum_definition: &hir::EnumDef, + generics: &hir::Generics, + name: ast::Name, + span: syntax_pos::Span, + visibility: &hir::Visibility) + -> io::Result<()> { + self.head(&visibility_qualified(visibility, "enum"))?; + self.print_name(name)?; + self.print_generics(generics)?; + self.print_where_clause(&generics.where_clause)?; + space(&mut self.s)?; + self.print_variants(&enum_definition.variants, span) + } + + pub fn print_variants(&mut self, + variants: &[hir::Variant], + span: syntax_pos::Span) + -> io::Result<()> { + self.bopen()?; + for v in variants { + self.space_if_not_bol()?; + self.maybe_print_comment(v.span.lo)?; + self.print_outer_attributes(&v.node.attrs)?; + self.ibox(indent_unit)?; + self.print_variant(v)?; + word(&mut self.s, ",")?; + self.end()?; + self.maybe_print_trailing_comment(v.span, None)?; + } + self.bclose(span) + } + + pub fn print_visibility(&mut self, vis: &hir::Visibility) -> io::Result<()> { + match *vis { + hir::Public => self.word_nbsp("pub"), + hir::Visibility::Crate => self.word_nbsp("pub(crate)"), + hir::Visibility::Restricted { ref path, .. } => + self.word_nbsp(&format!("pub({})", path)), + hir::Inherited => Ok(()), + } + } + + pub fn print_struct(&mut self, + struct_def: &hir::VariantData, + generics: &hir::Generics, + name: ast::Name, + span: syntax_pos::Span, + print_finalizer: bool) + -> io::Result<()> { + self.print_name(name)?; + self.print_generics(generics)?; + if !struct_def.is_struct() { + if struct_def.is_tuple() { + self.popen()?; + self.commasep(Inconsistent, struct_def.fields(), |s, field| { + s.maybe_print_comment(field.span.lo)?; + s.print_outer_attributes(&field.attrs)?; + s.print_visibility(&field.vis)?; + s.print_type(&field.ty) + })?; + self.pclose()?; + } + self.print_where_clause(&generics.where_clause)?; + if print_finalizer { + word(&mut self.s, ";")?; + } + self.end()?; + self.end() // close the outer-box + } else { + self.print_where_clause(&generics.where_clause)?; + self.nbsp()?; + self.bopen()?; + self.hardbreak_if_not_bol()?; + + for field in struct_def.fields() { + self.hardbreak_if_not_bol()?; + self.maybe_print_comment(field.span.lo)?; + self.print_outer_attributes(&field.attrs)?; + self.print_visibility(&field.vis)?; + self.print_name(field.name)?; + self.word_nbsp(":")?; + self.print_type(&field.ty)?; + word(&mut self.s, ",")?; + } + + self.bclose(span) + } + } + + pub fn print_variant(&mut self, v: &hir::Variant) -> io::Result<()> { + self.head("")?; + let generics = hir::Generics::empty(); + self.print_struct(&v.node.data, &generics, v.node.name, v.span, false)?; + match v.node.disr_expr { + Some(ref d) => { + space(&mut self.s)?; + self.word_space("=")?; + self.print_expr(&d) + } + _ => Ok(()), + } + } + pub fn print_method_sig(&mut self, + name: ast::Name, + m: &hir::MethodSig, + vis: &hir::Visibility) + -> io::Result<()> { + self.print_fn(&m.decl, + m.unsafety, + m.constness, + m.abi, + Some(name), + &m.generics, + vis) + } + + pub fn print_trait_item(&mut self, ti: &hir::TraitItem) -> io::Result<()> { + self.ann.pre(self, NodeSubItem(ti.id))?; + self.hardbreak_if_not_bol()?; + self.maybe_print_comment(ti.span.lo)?; + self.print_outer_attributes(&ti.attrs)?; + match ti.node { + hir::ConstTraitItem(ref ty, ref default) => { + self.print_associated_const(ti.name, + &ty, + default.as_ref().map(|expr| &**expr), + &hir::Inherited)?; + } + hir::MethodTraitItem(ref sig, ref body) => { + if body.is_some() { + self.head("")?; + } + self.print_method_sig(ti.name, sig, &hir::Inherited)?; + if let Some(ref body) = *body { + self.nbsp()?; + self.end()?; // need to close a box + self.end()?; // need to close a box + self.print_expr_id(body)?; + } else { + word(&mut self.s, ";")?; + } + } + hir::TypeTraitItem(ref bounds, ref default) => { + self.print_associated_type(ti.name, + Some(bounds), + default.as_ref().map(|ty| &**ty))?; + } + } + self.ann.post(self, NodeSubItem(ti.id)) + } + + pub fn print_impl_item_ref(&mut self, item_ref: &hir::ImplItemRef) -> io::Result<()> { + if let Some(krate) = self.krate { + // skip nested items if krate context was not provided + let item = &krate.impl_item(item_ref.id); + self.print_impl_item(item) + } else { + Ok(()) + } + } + + pub fn print_impl_item(&mut self, ii: &hir::ImplItem) -> io::Result<()> { + self.ann.pre(self, NodeSubItem(ii.id))?; + self.hardbreak_if_not_bol()?; + self.maybe_print_comment(ii.span.lo)?; + self.print_outer_attributes(&ii.attrs)?; + + match ii.defaultness { + hir::Defaultness::Default { .. } => self.word_nbsp("default")?, + hir::Defaultness::Final => (), + } + + match ii.node { + hir::ImplItemKind::Const(ref ty, ref expr) => { + self.print_associated_const(ii.name, &ty, Some(&expr), &ii.vis)?; + } + hir::ImplItemKind::Method(ref sig, ref body) => { + self.head("")?; + self.print_method_sig(ii.name, sig, &ii.vis)?; + self.nbsp()?; + self.end()?; // need to close a box + self.end()?; // need to close a box + self.print_expr_id(body)?; + } + hir::ImplItemKind::Type(ref ty) => { + self.print_associated_type(ii.name, None, Some(ty))?; + } + } + self.ann.post(self, NodeSubItem(ii.id)) + } + + pub fn print_stmt(&mut self, st: &hir::Stmt) -> io::Result<()> { + self.maybe_print_comment(st.span.lo)?; + match st.node { + hir::StmtDecl(ref decl, _) => { + self.print_decl(&decl)?; + } + hir::StmtExpr(ref expr, _) => { + self.space_if_not_bol()?; + self.print_expr(&expr)?; + } + hir::StmtSemi(ref expr, _) => { + self.space_if_not_bol()?; + self.print_expr(&expr)?; + word(&mut self.s, ";")?; + } + } + if stmt_ends_with_semi(&st.node) { + word(&mut self.s, ";")?; + } + self.maybe_print_trailing_comment(st.span, None) + } + + pub fn print_block(&mut self, blk: &hir::Block) -> io::Result<()> { + self.print_block_with_attrs(blk, &[]) + } + + pub fn print_block_unclosed(&mut self, blk: &hir::Block) -> io::Result<()> { + self.print_block_unclosed_indent(blk, indent_unit) + } + + pub fn print_block_unclosed_indent(&mut self, + blk: &hir::Block, + indented: usize) + -> io::Result<()> { + self.print_block_maybe_unclosed(blk, indented, &[], false) + } + + pub fn print_block_with_attrs(&mut self, + blk: &hir::Block, + attrs: &[ast::Attribute]) + -> io::Result<()> { + self.print_block_maybe_unclosed(blk, indent_unit, attrs, true) + } + + pub fn print_block_maybe_unclosed(&mut self, + blk: &hir::Block, + indented: usize, + attrs: &[ast::Attribute], + close_box: bool) + -> io::Result<()> { + match blk.rules { + hir::UnsafeBlock(..) => self.word_space("unsafe")?, + hir::PushUnsafeBlock(..) => self.word_space("push_unsafe")?, + hir::PopUnsafeBlock(..) => self.word_space("pop_unsafe")?, + hir::DefaultBlock => (), + } + self.maybe_print_comment(blk.span.lo)?; + self.ann.pre(self, NodeBlock(blk))?; + self.bopen()?; + + self.print_inner_attributes(attrs)?; + + for st in &blk.stmts { + self.print_stmt(st)?; + } + match blk.expr { + Some(ref expr) => { + self.space_if_not_bol()?; + self.print_expr(&expr)?; + self.maybe_print_trailing_comment(expr.span, Some(blk.span.hi))?; + } + _ => (), + } + self.bclose_maybe_open(blk.span, indented, close_box)?; + self.ann.post(self, NodeBlock(blk)) + } + + fn print_else(&mut self, els: Option<&hir::Expr>) -> io::Result<()> { + match els { + Some(_else) => { + match _else.node { + // "another else-if" + hir::ExprIf(ref i, ref then, ref e) => { + self.cbox(indent_unit - 1)?; + self.ibox(0)?; + word(&mut self.s, " else if ")?; + self.print_expr(&i)?; + space(&mut self.s)?; + self.print_block(&then)?; + self.print_else(e.as_ref().map(|e| &**e)) + } + // "final else" + hir::ExprBlock(ref b) => { + self.cbox(indent_unit - 1)?; + self.ibox(0)?; + word(&mut self.s, " else ")?; + self.print_block(&b) + } + // BLEAH, constraints would be great here + _ => { + panic!("print_if saw if with weird alternative"); + } + } + } + _ => Ok(()), + } + } + + pub fn print_if(&mut self, + test: &hir::Expr, + blk: &hir::Block, + elseopt: Option<&hir::Expr>) + -> io::Result<()> { + self.head("if")?; + self.print_expr(test)?; + space(&mut self.s)?; + self.print_block(blk)?; + self.print_else(elseopt) + } + + pub fn print_if_let(&mut self, + pat: &hir::Pat, + expr: &hir::Expr, + blk: &hir::Block, + elseopt: Option<&hir::Expr>) + -> io::Result<()> { + self.head("if let")?; + self.print_pat(pat)?; + space(&mut self.s)?; + self.word_space("=")?; + self.print_expr(expr)?; + space(&mut self.s)?; + self.print_block(blk)?; + self.print_else(elseopt) + } + + + fn print_call_post(&mut self, args: &[hir::Expr]) -> io::Result<()> { + self.popen()?; + self.commasep_exprs(Inconsistent, args)?; + self.pclose() + } + + pub fn print_expr_maybe_paren(&mut self, expr: &hir::Expr) -> io::Result<()> { + let needs_par = needs_parentheses(expr); + if needs_par { + self.popen()?; + } + self.print_expr(expr)?; + if needs_par { + self.pclose()?; + } + Ok(()) + } + + fn print_expr_vec(&mut self, exprs: &[hir::Expr]) -> io::Result<()> { + self.ibox(indent_unit)?; + word(&mut self.s, "[")?; + self.commasep_exprs(Inconsistent, exprs)?; + word(&mut self.s, "]")?; + self.end() + } + + fn print_expr_repeat(&mut self, element: &hir::Expr, count: &hir::Expr) -> io::Result<()> { + self.ibox(indent_unit)?; + word(&mut self.s, "[")?; + self.print_expr(element)?; + self.word_space(";")?; + self.print_expr(count)?; + word(&mut self.s, "]")?; + self.end() + } + + fn print_expr_struct(&mut self, + qpath: &hir::QPath, + fields: &[hir::Field], + wth: &Option>) + -> io::Result<()> { + self.print_qpath(qpath, true)?; + word(&mut self.s, "{")?; + self.commasep_cmnt(Consistent, + &fields[..], + |s, field| { + s.ibox(indent_unit)?; + if !field.is_shorthand { + s.print_name(field.name.node)?; + s.word_space(":")?; + } + s.print_expr(&field.expr)?; + s.end() + }, + |f| f.span)?; + match *wth { + Some(ref expr) => { + self.ibox(indent_unit)?; + if !fields.is_empty() { + word(&mut self.s, ",")?; + space(&mut self.s)?; + } + word(&mut self.s, "..")?; + self.print_expr(&expr)?; + self.end()?; + } + _ => if !fields.is_empty() { + word(&mut self.s, ",")? + }, + } + word(&mut self.s, "}")?; + Ok(()) + } + + fn print_expr_tup(&mut self, exprs: &[hir::Expr]) -> io::Result<()> { + self.popen()?; + self.commasep_exprs(Inconsistent, exprs)?; + if exprs.len() == 1 { + word(&mut self.s, ",")?; + } + self.pclose() + } + + fn print_expr_call(&mut self, func: &hir::Expr, args: &[hir::Expr]) -> io::Result<()> { + self.print_expr_maybe_paren(func)?; + self.print_call_post(args) + } + + fn print_expr_method_call(&mut self, + name: Spanned, + tys: &[P], + args: &[hir::Expr]) + -> io::Result<()> { + let base_args = &args[1..]; + self.print_expr(&args[0])?; + word(&mut self.s, ".")?; + self.print_name(name.node)?; + if !tys.is_empty() { + word(&mut self.s, "::<")?; + self.commasep(Inconsistent, tys, |s, ty| s.print_type(&ty))?; + word(&mut self.s, ">")?; + } + self.print_call_post(base_args) + } + + fn print_expr_binary(&mut self, + op: hir::BinOp, + lhs: &hir::Expr, + rhs: &hir::Expr) + -> io::Result<()> { + self.print_expr(lhs)?; + space(&mut self.s)?; + self.word_space(op.node.as_str())?; + self.print_expr(rhs) + } + + fn print_expr_unary(&mut self, op: hir::UnOp, expr: &hir::Expr) -> io::Result<()> { + word(&mut self.s, op.as_str())?; + self.print_expr_maybe_paren(expr) + } + + fn print_expr_addr_of(&mut self, + mutability: hir::Mutability, + expr: &hir::Expr) + -> io::Result<()> { + word(&mut self.s, "&")?; + self.print_mutability(mutability)?; + self.print_expr_maybe_paren(expr) + } + + pub fn print_expr(&mut self, expr: &hir::Expr) -> io::Result<()> { + self.maybe_print_comment(expr.span.lo)?; + self.ibox(indent_unit)?; + self.ann.pre(self, NodeExpr(expr))?; + match expr.node { + hir::ExprBox(ref expr) => { + self.word_space("box")?; + self.print_expr(expr)?; + } + hir::ExprArray(ref exprs) => { + self.print_expr_vec(exprs)?; + } + hir::ExprRepeat(ref element, ref count) => { + self.print_expr_repeat(&element, &count)?; + } + hir::ExprStruct(ref qpath, ref fields, ref wth) => { + self.print_expr_struct(qpath, &fields[..], wth)?; + } + hir::ExprTup(ref exprs) => { + self.print_expr_tup(exprs)?; + } + hir::ExprCall(ref func, ref args) => { + self.print_expr_call(&func, args)?; + } + hir::ExprMethodCall(name, ref tys, ref args) => { + self.print_expr_method_call(name, &tys[..], args)?; + } + hir::ExprBinary(op, ref lhs, ref rhs) => { + self.print_expr_binary(op, &lhs, &rhs)?; + } + hir::ExprUnary(op, ref expr) => { + self.print_expr_unary(op, &expr)?; + } + hir::ExprAddrOf(m, ref expr) => { + self.print_expr_addr_of(m, &expr)?; + } + hir::ExprLit(ref lit) => { + self.print_literal(&lit)?; + } + hir::ExprCast(ref expr, ref ty) => { + self.print_expr(&expr)?; + space(&mut self.s)?; + self.word_space("as")?; + self.print_type(&ty)?; + } + hir::ExprType(ref expr, ref ty) => { + self.print_expr(&expr)?; + self.word_space(":")?; + self.print_type(&ty)?; + } + hir::ExprIf(ref test, ref blk, ref elseopt) => { + self.print_if(&test, &blk, elseopt.as_ref().map(|e| &**e))?; + } + hir::ExprWhile(ref test, ref blk, opt_sp_name) => { + if let Some(sp_name) = opt_sp_name { + self.print_name(sp_name.node)?; + self.word_space(":")?; + } + self.head("while")?; + self.print_expr(&test)?; + space(&mut self.s)?; + self.print_block(&blk)?; + } + hir::ExprLoop(ref blk, opt_sp_name, _) => { + if let Some(sp_name) = opt_sp_name { + self.print_name(sp_name.node)?; + self.word_space(":")?; + } + self.head("loop")?; + space(&mut self.s)?; + self.print_block(&blk)?; + } + hir::ExprMatch(ref expr, ref arms, _) => { + self.cbox(indent_unit)?; + self.ibox(4)?; + self.word_nbsp("match")?; + self.print_expr(&expr)?; + space(&mut self.s)?; + self.bopen()?; + for arm in arms { + self.print_arm(arm)?; + } + self.bclose_(expr.span, indent_unit)?; + } + hir::ExprClosure(capture_clause, ref decl, ref body, _fn_decl_span) => { + self.print_capture_clause(capture_clause)?; + + self.print_fn_block_args(&decl)?; + space(&mut self.s)?; + + // this is a bare expression + self.print_expr_id(body)?; + self.end()?; // need to close a box + + // a box will be closed by print_expr, but we didn't want an overall + // wrapper so we closed the corresponding opening. so create an + // empty box to satisfy the close. + self.ibox(0)?; + } + hir::ExprBlock(ref blk) => { + // containing cbox, will be closed by print-block at } + self.cbox(indent_unit)?; + // head-box, will be closed by print-block after { + self.ibox(0)?; + self.print_block(&blk)?; + } + hir::ExprAssign(ref lhs, ref rhs) => { + self.print_expr(&lhs)?; + space(&mut self.s)?; + self.word_space("=")?; + self.print_expr(&rhs)?; + } + hir::ExprAssignOp(op, ref lhs, ref rhs) => { + self.print_expr(&lhs)?; + space(&mut self.s)?; + word(&mut self.s, op.node.as_str())?; + self.word_space("=")?; + self.print_expr(&rhs)?; + } + hir::ExprField(ref expr, name) => { + self.print_expr(&expr)?; + word(&mut self.s, ".")?; + self.print_name(name.node)?; + } + hir::ExprTupField(ref expr, id) => { + self.print_expr(&expr)?; + word(&mut self.s, ".")?; + self.print_usize(id.node)?; + } + hir::ExprIndex(ref expr, ref index) => { + self.print_expr(&expr)?; + word(&mut self.s, "[")?; + self.print_expr(&index)?; + word(&mut self.s, "]")?; + } + hir::ExprPath(ref qpath) => { + self.print_qpath(qpath, true)? + } + hir::ExprBreak(opt_label, ref opt_expr) => { + word(&mut self.s, "break")?; + space(&mut self.s)?; + if let Some(label) = opt_label { + self.print_name(label.name)?; + space(&mut self.s)?; + } + if let Some(ref expr) = *opt_expr { + self.print_expr(expr)?; + space(&mut self.s)?; + } + } + hir::ExprAgain(opt_label) => { + word(&mut self.s, "continue")?; + space(&mut self.s)?; + if let Some(label) = opt_label { + self.print_name(label.name)?; + space(&mut self.s)? + } + } + hir::ExprRet(ref result) => { + word(&mut self.s, "return")?; + match *result { + Some(ref expr) => { + word(&mut self.s, " ")?; + self.print_expr(&expr)?; + } + _ => (), + } + } + hir::ExprInlineAsm(ref a, ref outputs, ref inputs) => { + word(&mut self.s, "asm!")?; + self.popen()?; + self.print_string(&a.asm.as_str(), a.asm_str_style)?; + self.word_space(":")?; + + let mut out_idx = 0; + self.commasep(Inconsistent, &a.outputs, |s, out| { + let constraint = out.constraint.as_str(); + let mut ch = constraint.chars(); + match ch.next() { + Some('=') if out.is_rw => { + s.print_string(&format!("+{}", ch.as_str()), + ast::StrStyle::Cooked)? + } + _ => s.print_string(&constraint, ast::StrStyle::Cooked)?, + } + s.popen()?; + s.print_expr(&outputs[out_idx])?; + s.pclose()?; + out_idx += 1; + Ok(()) + })?; + space(&mut self.s)?; + self.word_space(":")?; + + let mut in_idx = 0; + self.commasep(Inconsistent, &a.inputs, |s, co| { + s.print_string(&co.as_str(), ast::StrStyle::Cooked)?; + s.popen()?; + s.print_expr(&inputs[in_idx])?; + s.pclose()?; + in_idx += 1; + Ok(()) + })?; + space(&mut self.s)?; + self.word_space(":")?; + + self.commasep(Inconsistent, &a.clobbers, |s, co| { + s.print_string(&co.as_str(), ast::StrStyle::Cooked)?; + Ok(()) + })?; + + let mut options = vec![]; + if a.volatile { + options.push("volatile"); + } + if a.alignstack { + options.push("alignstack"); + } + if a.dialect == ast::AsmDialect::Intel { + options.push("intel"); + } + + if !options.is_empty() { + space(&mut self.s)?; + self.word_space(":")?; + self.commasep(Inconsistent, &options, |s, &co| { + s.print_string(co, ast::StrStyle::Cooked)?; + Ok(()) + })?; + } + + self.pclose()?; + } + } + self.ann.post(self, NodeExpr(expr))?; + self.end() + } + + pub fn print_local_decl(&mut self, loc: &hir::Local) -> io::Result<()> { + self.print_pat(&loc.pat)?; + if let Some(ref ty) = loc.ty { + self.word_space(":")?; + self.print_type(&ty)?; + } + Ok(()) + } + + pub fn print_decl(&mut self, decl: &hir::Decl) -> io::Result<()> { + self.maybe_print_comment(decl.span.lo)?; + match decl.node { + hir::DeclLocal(ref loc) => { + self.space_if_not_bol()?; + self.ibox(indent_unit)?; + self.word_nbsp("let")?; + + self.ibox(indent_unit)?; + self.print_local_decl(&loc)?; + self.end()?; + if let Some(ref init) = loc.init { + self.nbsp()?; + self.word_space("=")?; + self.print_expr(&init)?; + } + self.end() + } + hir::DeclItem(ref item) => { + self.print_item_id(item) + } + } + } + + pub fn print_usize(&mut self, i: usize) -> io::Result<()> { + word(&mut self.s, &i.to_string()) + } + + pub fn print_name(&mut self, name: ast::Name) -> io::Result<()> { + word(&mut self.s, &name.as_str())?; + self.ann.post(self, NodeName(&name)) + } + + pub fn print_for_decl(&mut self, loc: &hir::Local, coll: &hir::Expr) -> io::Result<()> { + self.print_local_decl(loc)?; + space(&mut self.s)?; + self.word_space("in")?; + self.print_expr(coll) + } + + fn print_path(&mut self, + path: &hir::Path, + colons_before_params: bool) + -> io::Result<()> { + self.maybe_print_comment(path.span.lo)?; + + let mut first = !path.global; + for segment in &path.segments { + if first { + first = false + } else { + word(&mut self.s, "::")? + } + + self.print_name(segment.name)?; + + self.print_path_parameters(&segment.parameters, colons_before_params)?; + } + + Ok(()) + } + + fn print_qpath(&mut self, + qpath: &hir::QPath, + colons_before_params: bool) + -> io::Result<()> { + match *qpath { + hir::QPath::Resolved(None, ref path) => { + self.print_path(path, colons_before_params) + } + hir::QPath::Resolved(Some(ref qself), ref path) => { + word(&mut self.s, "<")?; + self.print_type(qself)?; + space(&mut self.s)?; + self.word_space("as")?; + + let mut first = !path.global; + for segment in &path.segments[..path.segments.len() - 1] { + if first { + first = false + } else { + word(&mut self.s, "::")? + } + self.print_name(segment.name)?; + self.print_path_parameters(&segment.parameters, colons_before_params)?; + } + + word(&mut self.s, ">")?; + word(&mut self.s, "::")?; + let item_segment = path.segments.last().unwrap(); + self.print_name(item_segment.name)?; + self.print_path_parameters(&item_segment.parameters, colons_before_params) + } + hir::QPath::TypeRelative(ref qself, ref item_segment) => { + word(&mut self.s, "<")?; + self.print_type(qself)?; + word(&mut self.s, ">")?; + word(&mut self.s, "::")?; + self.print_name(item_segment.name)?; + self.print_path_parameters(&item_segment.parameters, colons_before_params) + } + } + } + + fn print_path_parameters(&mut self, + parameters: &hir::PathParameters, + colons_before_params: bool) + -> io::Result<()> { + if parameters.is_empty() { + let infer_types = match *parameters { + hir::AngleBracketedParameters(ref data) => data.infer_types, + hir::ParenthesizedParameters(_) => false + }; + + // FIXME(eddyb) See the comment below about infer_types. + if !(infer_types && false) { + return Ok(()); + } + } + + if colons_before_params { + word(&mut self.s, "::")? + } + + match *parameters { + hir::AngleBracketedParameters(ref data) => { + word(&mut self.s, "<")?; + + let mut comma = false; + for lifetime in &data.lifetimes { + if comma { + self.word_space(",")? + } + self.print_lifetime(lifetime)?; + comma = true; + } + + if !data.types.is_empty() { + if comma { + self.word_space(",")? + } + self.commasep(Inconsistent, &data.types, |s, ty| s.print_type(&ty))?; + comma = true; + } + + // FIXME(eddyb) This would leak into error messages, e.g.: + // "non-exhaustive patterns: `Some::<..>(_)` not covered". + if data.infer_types && false { + if comma { + self.word_space(",")? + } + word(&mut self.s, "..")?; + comma = true; + } + + for binding in data.bindings.iter() { + if comma { + self.word_space(",")? + } + self.print_name(binding.name)?; + space(&mut self.s)?; + self.word_space("=")?; + self.print_type(&binding.ty)?; + comma = true; + } + + word(&mut self.s, ">")? + } + + hir::ParenthesizedParameters(ref data) => { + word(&mut self.s, "(")?; + self.commasep(Inconsistent, &data.inputs, |s, ty| s.print_type(&ty))?; + word(&mut self.s, ")")?; + + if let Some(ref ty) = data.output { + self.space_if_not_bol()?; + self.word_space("->")?; + self.print_type(&ty)?; + } + } + } + + Ok(()) + } + + pub fn print_pat(&mut self, pat: &hir::Pat) -> io::Result<()> { + self.maybe_print_comment(pat.span.lo)?; + self.ann.pre(self, NodePat(pat))?; + // Pat isn't normalized, but the beauty of it + // is that it doesn't matter + match pat.node { + PatKind::Wild => word(&mut self.s, "_")?, + PatKind::Binding(binding_mode, _, ref path1, ref sub) => { + match binding_mode { + hir::BindByRef(mutbl) => { + self.word_nbsp("ref")?; + self.print_mutability(mutbl)?; + } + hir::BindByValue(hir::MutImmutable) => {} + hir::BindByValue(hir::MutMutable) => { + self.word_nbsp("mut")?; + } + } + self.print_name(path1.node)?; + if let Some(ref p) = *sub { + word(&mut self.s, "@")?; + self.print_pat(&p)?; + } + } + PatKind::TupleStruct(ref qpath, ref elts, ddpos) => { + self.print_qpath(qpath, true)?; + self.popen()?; + if let Some(ddpos) = ddpos { + self.commasep(Inconsistent, &elts[..ddpos], |s, p| s.print_pat(&p))?; + if ddpos != 0 { + self.word_space(",")?; + } + word(&mut self.s, "..")?; + if ddpos != elts.len() { + word(&mut self.s, ",")?; + self.commasep(Inconsistent, &elts[ddpos..], |s, p| s.print_pat(&p))?; + } + } else { + self.commasep(Inconsistent, &elts[..], |s, p| s.print_pat(&p))?; + } + self.pclose()?; + } + PatKind::Path(ref qpath) => { + self.print_qpath(qpath, true)?; + } + PatKind::Struct(ref qpath, ref fields, etc) => { + self.print_qpath(qpath, true)?; + self.nbsp()?; + self.word_space("{")?; + self.commasep_cmnt(Consistent, + &fields[..], + |s, f| { + s.cbox(indent_unit)?; + if !f.node.is_shorthand { + s.print_name(f.node.name)?; + s.word_nbsp(":")?; + } + s.print_pat(&f.node.pat)?; + s.end() + }, + |f| f.node.pat.span)?; + if etc { + if !fields.is_empty() { + self.word_space(",")?; + } + word(&mut self.s, "..")?; + } + space(&mut self.s)?; + word(&mut self.s, "}")?; + } + PatKind::Tuple(ref elts, ddpos) => { + self.popen()?; + if let Some(ddpos) = ddpos { + self.commasep(Inconsistent, &elts[..ddpos], |s, p| s.print_pat(&p))?; + if ddpos != 0 { + self.word_space(",")?; + } + word(&mut self.s, "..")?; + if ddpos != elts.len() { + word(&mut self.s, ",")?; + self.commasep(Inconsistent, &elts[ddpos..], |s, p| s.print_pat(&p))?; + } + } else { + self.commasep(Inconsistent, &elts[..], |s, p| s.print_pat(&p))?; + if elts.len() == 1 { + word(&mut self.s, ",")?; + } + } + self.pclose()?; + } + PatKind::Box(ref inner) => { + word(&mut self.s, "box ")?; + self.print_pat(&inner)?; + } + PatKind::Ref(ref inner, mutbl) => { + word(&mut self.s, "&")?; + if mutbl == hir::MutMutable { + word(&mut self.s, "mut ")?; + } + self.print_pat(&inner)?; + } + PatKind::Lit(ref e) => self.print_expr(&e)?, + PatKind::Range(ref begin, ref end) => { + self.print_expr(&begin)?; + space(&mut self.s)?; + word(&mut self.s, "...")?; + self.print_expr(&end)?; + } + PatKind::Slice(ref before, ref slice, ref after) => { + word(&mut self.s, "[")?; + self.commasep(Inconsistent, &before[..], |s, p| s.print_pat(&p))?; + if let Some(ref p) = *slice { + if !before.is_empty() { + self.word_space(",")?; + } + if p.node != PatKind::Wild { + self.print_pat(&p)?; + } + word(&mut self.s, "..")?; + if !after.is_empty() { + self.word_space(",")?; + } + } + self.commasep(Inconsistent, &after[..], |s, p| s.print_pat(&p))?; + word(&mut self.s, "]")?; + } + } + self.ann.post(self, NodePat(pat)) + } + + fn print_arm(&mut self, arm: &hir::Arm) -> io::Result<()> { + // I have no idea why this check is necessary, but here it + // is :( + if arm.attrs.is_empty() { + space(&mut self.s)?; + } + self.cbox(indent_unit)?; + self.ibox(0)?; + self.print_outer_attributes(&arm.attrs)?; + let mut first = true; + for p in &arm.pats { + if first { + first = false; + } else { + space(&mut self.s)?; + self.word_space("|")?; + } + self.print_pat(&p)?; + } + space(&mut self.s)?; + if let Some(ref e) = arm.guard { + self.word_space("if")?; + self.print_expr(&e)?; + space(&mut self.s)?; + } + self.word_space("=>")?; + + match arm.body.node { + hir::ExprBlock(ref blk) => { + // the block will close the pattern's ibox + self.print_block_unclosed_indent(&blk, indent_unit)?; + + // If it is a user-provided unsafe block, print a comma after it + if let hir::UnsafeBlock(hir::UserProvided) = blk.rules { + word(&mut self.s, ",")?; + } + } + _ => { + self.end()?; // close the ibox for the pattern + self.print_expr(&arm.body)?; + word(&mut self.s, ",")?; + } + } + self.end() // close enclosing cbox + } + + fn print_explicit_self(&mut self, explicit_self: &hir::ExplicitSelf) -> io::Result<()> { + match explicit_self.node { + SelfKind::Value(m) => { + self.print_mutability(m)?; + word(&mut self.s, "self") + } + SelfKind::Region(ref lt, m) => { + word(&mut self.s, "&")?; + self.print_opt_lifetime(lt)?; + self.print_mutability(m)?; + word(&mut self.s, "self") + } + SelfKind::Explicit(ref typ, m) => { + self.print_mutability(m)?; + word(&mut self.s, "self")?; + self.word_space(":")?; + self.print_type(&typ) + } + } + } + + pub fn print_fn(&mut self, + decl: &hir::FnDecl, + unsafety: hir::Unsafety, + constness: hir::Constness, + abi: Abi, + name: Option, + generics: &hir::Generics, + vis: &hir::Visibility) + -> io::Result<()> { + self.print_fn_header_info(unsafety, constness, abi, vis)?; + + if let Some(name) = name { + self.nbsp()?; + self.print_name(name)?; + } + self.print_generics(generics)?; + self.print_fn_args_and_ret(decl)?; + self.print_where_clause(&generics.where_clause) + } + + pub fn print_fn_args_and_ret(&mut self, decl: &hir::FnDecl) -> io::Result<()> { + self.popen()?; + self.commasep(Inconsistent, &decl.inputs, |s, arg| s.print_arg(arg, false))?; + if decl.variadic { + word(&mut self.s, ", ...")?; + } + self.pclose()?; + + self.print_fn_output(decl) + } + + pub fn print_fn_block_args(&mut self, decl: &hir::FnDecl) -> io::Result<()> { + word(&mut self.s, "|")?; + self.commasep(Inconsistent, &decl.inputs, |s, arg| s.print_arg(arg, true))?; + word(&mut self.s, "|")?; + + if let hir::DefaultReturn(..) = decl.output { + return Ok(()); + } + + self.space_if_not_bol()?; + self.word_space("->")?; + match decl.output { + hir::Return(ref ty) => { + self.print_type(&ty)?; + self.maybe_print_comment(ty.span.lo) + } + hir::DefaultReturn(..) => unreachable!(), + } + } + + pub fn print_capture_clause(&mut self, capture_clause: hir::CaptureClause) -> io::Result<()> { + match capture_clause { + hir::CaptureByValue => self.word_space("move"), + hir::CaptureByRef => Ok(()), + } + } + + pub fn print_bounds(&mut self, prefix: &str, bounds: &[hir::TyParamBound]) -> io::Result<()> { + if !bounds.is_empty() { + word(&mut self.s, prefix)?; + let mut first = true; + for bound in bounds { + self.nbsp()?; + if first { + first = false; + } else { + self.word_space("+")?; + } + + match *bound { + TraitTyParamBound(ref tref, TraitBoundModifier::None) => { + self.print_poly_trait_ref(tref) + } + TraitTyParamBound(ref tref, TraitBoundModifier::Maybe) => { + word(&mut self.s, "?")?; + self.print_poly_trait_ref(tref) + } + RegionTyParamBound(ref lt) => { + self.print_lifetime(lt) + } + }? + } + Ok(()) + } else { + Ok(()) + } + } + + pub fn print_lifetime(&mut self, lifetime: &hir::Lifetime) -> io::Result<()> { + self.print_name(lifetime.name) + } + + pub fn print_lifetime_def(&mut self, lifetime: &hir::LifetimeDef) -> io::Result<()> { + self.print_lifetime(&lifetime.lifetime)?; + let mut sep = ":"; + for v in &lifetime.bounds { + word(&mut self.s, sep)?; + self.print_lifetime(v)?; + sep = "+"; + } + Ok(()) + } + + pub fn print_generics(&mut self, generics: &hir::Generics) -> io::Result<()> { + let total = generics.lifetimes.len() + generics.ty_params.len(); + if total == 0 { + return Ok(()); + } + + word(&mut self.s, "<")?; + + let mut ints = Vec::new(); + for i in 0..total { + ints.push(i); + } + + self.commasep(Inconsistent, &ints[..], |s, &idx| { + if idx < generics.lifetimes.len() { + let lifetime = &generics.lifetimes[idx]; + s.print_lifetime_def(lifetime) + } else { + let idx = idx - generics.lifetimes.len(); + let param = &generics.ty_params[idx]; + s.print_ty_param(param) + } + })?; + + word(&mut self.s, ">")?; + Ok(()) + } + + pub fn print_ty_param(&mut self, param: &hir::TyParam) -> io::Result<()> { + self.print_name(param.name)?; + self.print_bounds(":", ¶m.bounds)?; + match param.default { + Some(ref default) => { + space(&mut self.s)?; + self.word_space("=")?; + self.print_type(&default) + } + _ => Ok(()), + } + } + + pub fn print_where_clause(&mut self, where_clause: &hir::WhereClause) -> io::Result<()> { + if where_clause.predicates.is_empty() { + return Ok(()); + } + + space(&mut self.s)?; + self.word_space("where")?; + + for (i, predicate) in where_clause.predicates.iter().enumerate() { + if i != 0 { + self.word_space(",")?; + } + + match predicate { + &hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate{ref bound_lifetimes, + ref bounded_ty, + ref bounds, + ..}) => { + self.print_formal_lifetime_list(bound_lifetimes)?; + self.print_type(&bounded_ty)?; + self.print_bounds(":", bounds)?; + } + &hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate{ref lifetime, + ref bounds, + ..}) => { + self.print_lifetime(lifetime)?; + word(&mut self.s, ":")?; + + for (i, bound) in bounds.iter().enumerate() { + self.print_lifetime(bound)?; + + if i != 0 { + word(&mut self.s, ":")?; + } + } + } + &hir::WherePredicate::EqPredicate(hir::WhereEqPredicate{ref path, ref ty, ..}) => { + self.print_path(path, false)?; + space(&mut self.s)?; + self.word_space("=")?; + self.print_type(&ty)?; + } + } + } + + Ok(()) + } + + pub fn print_mutability(&mut self, mutbl: hir::Mutability) -> io::Result<()> { + match mutbl { + hir::MutMutable => self.word_nbsp("mut"), + hir::MutImmutable => Ok(()), + } + } + + pub fn print_mt(&mut self, mt: &hir::MutTy) -> io::Result<()> { + self.print_mutability(mt.mutbl)?; + self.print_type(&mt.ty) + } + + pub fn print_arg(&mut self, input: &hir::Arg, is_closure: bool) -> io::Result<()> { + self.ibox(indent_unit)?; + match input.ty.node { + hir::TyInfer if is_closure => self.print_pat(&input.pat)?, + _ => { + if let Some(eself) = input.to_self() { + self.print_explicit_self(&eself)?; + } else { + let invalid = if let PatKind::Binding(_, _, name, _) = input.pat.node { + name.node == keywords::Invalid.name() + } else { + false + }; + if !invalid { + self.print_pat(&input.pat)?; + word(&mut self.s, ":")?; + space(&mut self.s)?; + } + self.print_type(&input.ty)?; + } + } + } + self.end() + } + + pub fn print_fn_output(&mut self, decl: &hir::FnDecl) -> io::Result<()> { + if let hir::DefaultReturn(..) = decl.output { + return Ok(()); + } + + self.space_if_not_bol()?; + self.ibox(indent_unit)?; + self.word_space("->")?; + match decl.output { + hir::DefaultReturn(..) => unreachable!(), + hir::Return(ref ty) => self.print_type(&ty)?, + } + self.end()?; + + match decl.output { + hir::Return(ref output) => self.maybe_print_comment(output.span.lo), + _ => Ok(()), + } + } + + pub fn print_ty_fn(&mut self, + abi: Abi, + unsafety: hir::Unsafety, + decl: &hir::FnDecl, + name: Option, + generics: &hir::Generics) + -> io::Result<()> { + self.ibox(indent_unit)?; + if !generics.lifetimes.is_empty() || !generics.ty_params.is_empty() { + word(&mut self.s, "for")?; + self.print_generics(generics)?; + } + let generics = hir::Generics { + lifetimes: hir::HirVec::new(), + ty_params: hir::HirVec::new(), + where_clause: hir::WhereClause { + id: ast::DUMMY_NODE_ID, + predicates: hir::HirVec::new(), + }, + span: syntax_pos::DUMMY_SP, + }; + self.print_fn(decl, + unsafety, + hir::Constness::NotConst, + abi, + name, + &generics, + &hir::Inherited)?; + self.end() + } + + pub fn maybe_print_trailing_comment(&mut self, + span: syntax_pos::Span, + next_pos: Option) + -> io::Result<()> { + let cm = match self.cm { + Some(cm) => cm, + _ => return Ok(()), + }; + if let Some(ref cmnt) = self.next_comment() { + if (*cmnt).style != comments::Trailing { + return Ok(()); + } + let span_line = cm.lookup_char_pos(span.hi); + let comment_line = cm.lookup_char_pos((*cmnt).pos); + let mut next = (*cmnt).pos + BytePos(1); + if let Some(p) = next_pos { + next = p; + } + if span.hi < (*cmnt).pos && (*cmnt).pos < next && + span_line.line == comment_line.line { + self.print_comment(cmnt)?; + self.cur_cmnt_and_lit.cur_cmnt += 1; + } + } + Ok(()) + } + + pub fn print_remaining_comments(&mut self) -> io::Result<()> { + // If there aren't any remaining comments, then we need to manually + // make sure there is a line break at the end. + if self.next_comment().is_none() { + hardbreak(&mut self.s)?; + } + loop { + match self.next_comment() { + Some(ref cmnt) => { + self.print_comment(cmnt)?; + self.cur_cmnt_and_lit.cur_cmnt += 1; + } + _ => break, + } + } + Ok(()) + } + + pub fn print_opt_abi_and_extern_if_nondefault(&mut self, + opt_abi: Option) + -> io::Result<()> { + match opt_abi { + Some(Abi::Rust) => Ok(()), + Some(abi) => { + self.word_nbsp("extern")?; + self.word_nbsp(&abi.to_string()) + } + None => Ok(()), + } + } + + pub fn print_extern_opt_abi(&mut self, opt_abi: Option) -> io::Result<()> { + match opt_abi { + Some(abi) => { + self.word_nbsp("extern")?; + self.word_nbsp(&abi.to_string()) + } + None => Ok(()), + } + } + + pub fn print_fn_header_info(&mut self, + unsafety: hir::Unsafety, + constness: hir::Constness, + abi: Abi, + vis: &hir::Visibility) + -> io::Result<()> { + word(&mut self.s, &visibility_qualified(vis, ""))?; + self.print_unsafety(unsafety)?; + + match constness { + hir::Constness::NotConst => {} + hir::Constness::Const => self.word_nbsp("const")?, + } + + if abi != Abi::Rust { + self.word_nbsp("extern")?; + self.word_nbsp(&abi.to_string())?; + } + + word(&mut self.s, "fn") + } + + pub fn print_unsafety(&mut self, s: hir::Unsafety) -> io::Result<()> { + match s { + hir::Unsafety::Normal => Ok(()), + hir::Unsafety::Unsafe => self.word_nbsp("unsafe"), + } + } +} + +// Dup'ed from parse::classify, but adapted for the HIR. +/// Does this expression require a semicolon to be treated +/// as a statement? The negation of this: 'can this expression +/// be used as a statement without a semicolon' -- is used +/// as an early-bail-out in the parser so that, for instance, +/// if true {...} else {...} +/// |x| 5 +/// isn't parsed as (if true {...} else {...} | x) | 5 +fn expr_requires_semi_to_be_stmt(e: &hir::Expr) -> bool { + match e.node { + hir::ExprIf(..) | + hir::ExprMatch(..) | + hir::ExprBlock(_) | + hir::ExprWhile(..) | + hir::ExprLoop(..) => false, + _ => true, + } +} + +/// this statement requires a semicolon after it. +/// note that in one case (stmt_semi), we've already +/// seen the semicolon, and thus don't need another. +fn stmt_ends_with_semi(stmt: &hir::Stmt_) -> bool { + match *stmt { + hir::StmtDecl(ref d, _) => { + match d.node { + hir::DeclLocal(_) => true, + hir::DeclItem(_) => false, + } + } + hir::StmtExpr(ref e, _) => { + expr_requires_semi_to_be_stmt(&e) + } + hir::StmtSemi(..) => { + false + } + } +} diff --git a/src/librustc/hir/svh.rs b/src/librustc/hir/svh.rs new file mode 100644 index 0000000000000..ae1f9d3028c2c --- /dev/null +++ b/src/librustc/hir/svh.rs @@ -0,0 +1,68 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Calculation and management of a Strict Version Hash for crates +//! +//! The SVH is used for incremental compilation to track when HIR +//! nodes have changed between compilations, and also to detect +//! mismatches where we have two versions of the same crate that were +//! compiled from distinct sources. + +use std::fmt; +use std::hash::{Hash, Hasher}; +use serialize::{Encodable, Decodable, Encoder, Decoder}; + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub struct Svh { + hash: u64, +} + +impl Svh { + /// Create a new `Svh` given the hash. If you actually want to + /// compute the SVH from some HIR, you want the `calculate_svh` + /// function found in `librustc_incremental`. + pub fn new(hash: u64) -> Svh { + Svh { hash: hash } + } + + pub fn as_u64(&self) -> u64 { + self.hash + } + + pub fn to_string(&self) -> String { + format!("{:016x}", self.hash) + } +} + +impl Hash for Svh { + fn hash(&self, state: &mut H) where H: Hasher { + self.hash.to_le().hash(state); + } +} + +impl fmt::Display for Svh { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.pad(&self.to_string()) + } +} + +impl Encodable for Svh { + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + s.emit_u64(self.as_u64().to_le()) + } +} + +impl Decodable for Svh { + fn decode(d: &mut D) -> Result { + d.read_u64() + .map(u64::from_le) + .map(Svh::new) + } +} diff --git a/src/librustc/middle/infer/README.md b/src/librustc/infer/README.md similarity index 100% rename from src/librustc/middle/infer/README.md rename to src/librustc/infer/README.md diff --git a/src/librustc/infer/bivariate.rs b/src/librustc/infer/bivariate.rs new file mode 100644 index 0000000000000..4acb8b807d594 --- /dev/null +++ b/src/librustc/infer/bivariate.rs @@ -0,0 +1,123 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Applies the "bivariance relationship" to two types and/or regions. +//! If (A,B) are bivariant then either A <: B or B <: A. It occurs +//! when type/lifetime parameters are unconstrained. Usually this is +//! an error, but we permit it in the specific case where a type +//! parameter is constrained in a where-clause via an associated type. +//! +//! There are several ways one could implement bivariance. You could +//! just do nothing at all, for example, or you could fully verify +//! that one of the two subtyping relationships hold. We choose to +//! thread a middle line: we relate types up to regions, but ignore +//! all region relationships. +//! +//! At one point, handling bivariance in this fashion was necessary +//! for inference, but I'm actually not sure if that is true anymore. +//! In particular, it might be enough to say (A,B) are bivariant for +//! all (A,B). + +use super::combine::CombineFields; +use super::type_variable::{BiTo}; + +use ty::{self, Ty, TyCtxt}; +use ty::TyVar; +use ty::relate::{Relate, RelateResult, TypeRelation}; + +pub struct Bivariate<'combine, 'infcx: 'combine, 'gcx: 'infcx+'tcx, 'tcx: 'infcx> { + fields: &'combine mut CombineFields<'infcx, 'gcx, 'tcx>, + a_is_expected: bool, +} + +impl<'combine, 'infcx, 'gcx, 'tcx> Bivariate<'combine, 'infcx, 'gcx, 'tcx> { + pub fn new(fields: &'combine mut CombineFields<'infcx, 'gcx, 'tcx>, a_is_expected: bool) + -> Bivariate<'combine, 'infcx, 'gcx, 'tcx> + { + Bivariate { fields: fields, a_is_expected: a_is_expected } + } +} + +impl<'combine, 'infcx, 'gcx, 'tcx> TypeRelation<'infcx, 'gcx, 'tcx> + for Bivariate<'combine, 'infcx, 'gcx, 'tcx> +{ + fn tag(&self) -> &'static str { "Bivariate" } + + fn tcx(&self) -> TyCtxt<'infcx, 'gcx, 'tcx> { self.fields.tcx() } + + fn a_is_expected(&self) -> bool { self.a_is_expected } + + fn relate_with_variance>(&mut self, + variance: ty::Variance, + a: &T, + b: &T) + -> RelateResult<'tcx, T> + { + match variance { + // If we have Foo and Foo is invariant w/r/t A, + // and we want to assert that + // + // Foo <: Foo || + // Foo <: Foo + // + // then still A must equal B. + ty::Invariant => self.relate(a, b), + + ty::Covariant => self.relate(a, b), + ty::Bivariant => self.relate(a, b), + ty::Contravariant => self.relate(a, b), + } + } + + fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { + debug!("{}.tys({:?}, {:?})", self.tag(), + a, b); + if a == b { return Ok(a); } + + let infcx = self.fields.infcx; + let a = infcx.type_variables.borrow_mut().replace_if_possible(a); + let b = infcx.type_variables.borrow_mut().replace_if_possible(b); + match (&a.sty, &b.sty) { + (&ty::TyInfer(TyVar(a_id)), &ty::TyInfer(TyVar(b_id))) => { + infcx.type_variables.borrow_mut().relate_vars(a_id, BiTo, b_id); + Ok(a) + } + + (&ty::TyInfer(TyVar(a_id)), _) => { + self.fields.instantiate(b, BiTo, a_id, self.a_is_expected)?; + Ok(a) + } + + (_, &ty::TyInfer(TyVar(b_id))) => { + self.fields.instantiate(a, BiTo, b_id, self.a_is_expected)?; + Ok(a) + } + + _ => { + self.fields.infcx.super_combine_tys(self, a, b) + } + } + } + + fn regions(&mut self, a: &'tcx ty::Region, _: &'tcx ty::Region) + -> RelateResult<'tcx, &'tcx ty::Region> { + Ok(a) + } + + fn binders(&mut self, a: &ty::Binder, b: &ty::Binder) + -> RelateResult<'tcx, ty::Binder> + where T: Relate<'tcx> + { + let a1 = self.tcx().erase_late_bound_regions(a); + let b1 = self.tcx().erase_late_bound_regions(b); + let c = self.relate(&a1, &b1)?; + Ok(ty::Binder(c)) + } +} diff --git a/src/librustc/infer/combine.rs b/src/librustc/infer/combine.rs new file mode 100644 index 0000000000000..5d33d6e6d2e71 --- /dev/null +++ b/src/librustc/infer/combine.rs @@ -0,0 +1,406 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/////////////////////////////////////////////////////////////////////////// +// # Type combining +// +// There are four type combiners: equate, sub, lub, and glb. Each +// implements the trait `Combine` and contains methods for combining +// two instances of various things and yielding a new instance. These +// combiner methods always yield a `Result`. There is a lot of +// common code for these operations, implemented as default methods on +// the `Combine` trait. +// +// Each operation may have side-effects on the inference context, +// though these can be unrolled using snapshots. On success, the +// LUB/GLB operations return the appropriate bound. The Eq and Sub +// operations generally return the first operand. +// +// ## Contravariance +// +// When you are relating two things which have a contravariant +// relationship, you should use `contratys()` or `contraregions()`, +// rather than inversing the order of arguments! This is necessary +// because the order of arguments is not relevant for LUB and GLB. It +// is also useful to track which value is the "expected" value in +// terms of error reporting. + +use super::bivariate::Bivariate; +use super::equate::Equate; +use super::glb::Glb; +use super::lub::Lub; +use super::sub::Sub; +use super::InferCtxt; +use super::{MiscVariable, TypeTrace}; +use super::type_variable::{RelationDir, BiTo, EqTo, SubtypeOf, SupertypeOf}; + +use ty::{IntType, UintType}; +use ty::{self, Ty, TyCtxt}; +use ty::error::TypeError; +use ty::fold::TypeFoldable; +use ty::relate::{RelateResult, TypeRelation}; +use traits::PredicateObligations; + +use syntax::ast; +use syntax::util::small_vector::SmallVector; +use syntax_pos::Span; + +#[derive(Clone)] +pub struct CombineFields<'infcx, 'gcx: 'infcx+'tcx, 'tcx: 'infcx> { + pub infcx: &'infcx InferCtxt<'infcx, 'gcx, 'tcx>, + pub trace: TypeTrace<'tcx>, + pub cause: Option, + pub obligations: PredicateObligations<'tcx>, +} + +impl<'infcx, 'gcx, 'tcx> InferCtxt<'infcx, 'gcx, 'tcx> { + pub fn super_combine_tys(&self, + relation: &mut R, + a: Ty<'tcx>, + b: Ty<'tcx>) + -> RelateResult<'tcx, Ty<'tcx>> + where R: TypeRelation<'infcx, 'gcx, 'tcx> + { + let a_is_expected = relation.a_is_expected(); + + match (&a.sty, &b.sty) { + // Relate integral variables to other types + (&ty::TyInfer(ty::IntVar(a_id)), &ty::TyInfer(ty::IntVar(b_id))) => { + self.int_unification_table + .borrow_mut() + .unify_var_var(a_id, b_id) + .map_err(|e| int_unification_error(a_is_expected, e))?; + Ok(a) + } + (&ty::TyInfer(ty::IntVar(v_id)), &ty::TyInt(v)) => { + self.unify_integral_variable(a_is_expected, v_id, IntType(v)) + } + (&ty::TyInt(v), &ty::TyInfer(ty::IntVar(v_id))) => { + self.unify_integral_variable(!a_is_expected, v_id, IntType(v)) + } + (&ty::TyInfer(ty::IntVar(v_id)), &ty::TyUint(v)) => { + self.unify_integral_variable(a_is_expected, v_id, UintType(v)) + } + (&ty::TyUint(v), &ty::TyInfer(ty::IntVar(v_id))) => { + self.unify_integral_variable(!a_is_expected, v_id, UintType(v)) + } + + // Relate floating-point variables to other types + (&ty::TyInfer(ty::FloatVar(a_id)), &ty::TyInfer(ty::FloatVar(b_id))) => { + self.float_unification_table + .borrow_mut() + .unify_var_var(a_id, b_id) + .map_err(|e| float_unification_error(relation.a_is_expected(), e))?; + Ok(a) + } + (&ty::TyInfer(ty::FloatVar(v_id)), &ty::TyFloat(v)) => { + self.unify_float_variable(a_is_expected, v_id, v) + } + (&ty::TyFloat(v), &ty::TyInfer(ty::FloatVar(v_id))) => { + self.unify_float_variable(!a_is_expected, v_id, v) + } + + // All other cases of inference are errors + (&ty::TyInfer(_), _) | + (_, &ty::TyInfer(_)) => { + Err(TypeError::Sorts(ty::relate::expected_found(relation, &a, &b))) + } + + + _ => { + ty::relate::super_relate_tys(relation, a, b) + } + } + } + + fn unify_integral_variable(&self, + vid_is_expected: bool, + vid: ty::IntVid, + val: ty::IntVarValue) + -> RelateResult<'tcx, Ty<'tcx>> + { + self.int_unification_table + .borrow_mut() + .unify_var_value(vid, val) + .map_err(|e| int_unification_error(vid_is_expected, e))?; + match val { + IntType(v) => Ok(self.tcx.mk_mach_int(v)), + UintType(v) => Ok(self.tcx.mk_mach_uint(v)), + } + } + + fn unify_float_variable(&self, + vid_is_expected: bool, + vid: ty::FloatVid, + val: ast::FloatTy) + -> RelateResult<'tcx, Ty<'tcx>> + { + self.float_unification_table + .borrow_mut() + .unify_var_value(vid, val) + .map_err(|e| float_unification_error(vid_is_expected, e))?; + Ok(self.tcx.mk_mach_float(val)) + } +} + +impl<'infcx, 'gcx, 'tcx> CombineFields<'infcx, 'gcx, 'tcx> { + pub fn tcx(&self) -> TyCtxt<'infcx, 'gcx, 'tcx> { + self.infcx.tcx + } + + pub fn equate<'a>(&'a mut self, a_is_expected: bool) -> Equate<'a, 'infcx, 'gcx, 'tcx> { + Equate::new(self, a_is_expected) + } + + pub fn bivariate<'a>(&'a mut self, a_is_expected: bool) -> Bivariate<'a, 'infcx, 'gcx, 'tcx> { + Bivariate::new(self, a_is_expected) + } + + pub fn sub<'a>(&'a mut self, a_is_expected: bool) -> Sub<'a, 'infcx, 'gcx, 'tcx> { + Sub::new(self, a_is_expected) + } + + pub fn lub<'a>(&'a mut self, a_is_expected: bool) -> Lub<'a, 'infcx, 'gcx, 'tcx> { + Lub::new(self, a_is_expected) + } + + pub fn glb<'a>(&'a mut self, a_is_expected: bool) -> Glb<'a, 'infcx, 'gcx, 'tcx> { + Glb::new(self, a_is_expected) + } + + pub fn instantiate(&mut self, + a_ty: Ty<'tcx>, + dir: RelationDir, + b_vid: ty::TyVid, + a_is_expected: bool) + -> RelateResult<'tcx, ()> + { + // We use SmallVector here instead of Vec because this code is hot and + // it's rare that the stack length exceeds 1. + let mut stack = SmallVector::new(); + stack.push((a_ty, dir, b_vid)); + loop { + // For each turn of the loop, we extract a tuple + // + // (a_ty, dir, b_vid) + // + // to relate. Here dir is either SubtypeOf or + // SupertypeOf. The idea is that we should ensure that + // the type `a_ty` is a subtype or supertype (respectively) of the + // type to which `b_vid` is bound. + // + // If `b_vid` has not yet been instantiated with a type + // (which is always true on the first iteration, but not + // necessarily true on later iterations), we will first + // instantiate `b_vid` with a *generalized* version of + // `a_ty`. Generalization introduces other inference + // variables wherever subtyping could occur (at time of + // this writing, this means replacing free regions with + // region variables). + let (a_ty, dir, b_vid) = match stack.pop() { + None => break, + Some(e) => e, + }; + // Get the actual variable that b_vid has been inferred to + let (b_vid, b_ty) = { + let mut variables = self.infcx.type_variables.borrow_mut(); + let b_vid = variables.root_var(b_vid); + (b_vid, variables.probe_root(b_vid)) + }; + + debug!("instantiate(a_ty={:?} dir={:?} b_vid={:?})", + a_ty, + dir, + b_vid); + + // Check whether `vid` has been instantiated yet. If not, + // make a generalized form of `ty` and instantiate with + // that. + let b_ty = match b_ty { + Some(t) => t, // ...already instantiated. + None => { // ...not yet instantiated: + // Generalize type if necessary. + let generalized_ty = match dir { + EqTo => self.generalize(a_ty, b_vid, false), + BiTo | SupertypeOf | SubtypeOf => self.generalize(a_ty, b_vid, true), + }?; + debug!("instantiate(a_ty={:?}, dir={:?}, \ + b_vid={:?}, generalized_ty={:?})", + a_ty, dir, b_vid, + generalized_ty); + self.infcx.type_variables + .borrow_mut() + .instantiate_and_push( + b_vid, generalized_ty, &mut stack); + generalized_ty + } + }; + + // The original triple was `(a_ty, dir, b_vid)` -- now we have + // resolved `b_vid` to `b_ty`, so apply `(a_ty, dir, b_ty)`: + // + // FIXME(#16847): This code is non-ideal because all these subtype + // relations wind up attributed to the same spans. We need + // to associate causes/spans with each of the relations in + // the stack to get this right. + match dir { + BiTo => self.bivariate(a_is_expected).relate(&a_ty, &b_ty), + EqTo => self.equate(a_is_expected).relate(&a_ty, &b_ty), + SubtypeOf => self.sub(a_is_expected).relate(&a_ty, &b_ty), + SupertypeOf => self.sub(a_is_expected).relate_with_variance( + ty::Contravariant, &a_ty, &b_ty), + }?; + } + + Ok(()) + } + + /// Attempts to generalize `ty` for the type variable `for_vid`. This checks for cycle -- that + /// is, whether the type `ty` references `for_vid`. If `make_region_vars` is true, it will also + /// replace all regions with fresh variables. Returns `TyError` in the case of a cycle, `Ok` + /// otherwise. + fn generalize(&self, + ty: Ty<'tcx>, + for_vid: ty::TyVid, + make_region_vars: bool) + -> RelateResult<'tcx, Ty<'tcx>> + { + let mut generalize = Generalizer { + infcx: self.infcx, + span: self.trace.cause.span, + for_vid: for_vid, + make_region_vars: make_region_vars, + cycle_detected: false + }; + let u = ty.fold_with(&mut generalize); + if generalize.cycle_detected { + Err(TypeError::CyclicTy) + } else { + Ok(u) + } + } +} + +struct Generalizer<'cx, 'gcx: 'cx+'tcx, 'tcx: 'cx> { + infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, + span: Span, + for_vid: ty::TyVid, + make_region_vars: bool, + cycle_detected: bool, +} + +impl<'cx, 'gcx, 'tcx> ty::fold::TypeFolder<'gcx, 'tcx> for Generalizer<'cx, 'gcx, 'tcx> { + fn tcx<'a>(&'a self) -> TyCtxt<'a, 'gcx, 'tcx> { + self.infcx.tcx + } + + fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { + // Check to see whether the type we are genealizing references + // `vid`. At the same time, also update any type variables to + // the values that they are bound to. This is needed to truly + // check for cycles, but also just makes things readable. + // + // (In particular, you could have something like `$0 = Box<$1>` + // where `$1` has already been instantiated with `Box<$0>`) + match t.sty { + ty::TyInfer(ty::TyVar(vid)) => { + let mut variables = self.infcx.type_variables.borrow_mut(); + let vid = variables.root_var(vid); + if vid == self.for_vid { + self.cycle_detected = true; + self.tcx().types.err + } else { + match variables.probe_root(vid) { + Some(u) => { + drop(variables); + self.fold_ty(u) + } + None => t, + } + } + } + _ => { + t.super_fold_with(self) + } + } + } + + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { + match *r { + // Never make variables for regions bound within the type itself, + // nor for erased regions. + ty::ReLateBound(..) | + ty::ReErased => { return r; } + + // Early-bound regions should really have been substituted away before + // we get to this point. + ty::ReEarlyBound(..) => { + span_bug!( + self.span, + "Encountered early bound region when generalizing: {:?}", + r); + } + + // Always make a fresh region variable for skolemized regions; + // the higher-ranked decision procedures rely on this. + ty::ReSkolemized(..) => { } + + // For anything else, we make a region variable, unless we + // are *equating*, in which case it's just wasteful. + ty::ReEmpty | + ty::ReStatic | + ty::ReScope(..) | + ty::ReVar(..) | + ty::ReFree(..) => { + if !self.make_region_vars { + return r; + } + } + } + + // FIXME: This is non-ideal because we don't give a + // very descriptive origin for this region variable. + self.infcx.next_region_var(MiscVariable(self.span)) + } +} + +pub trait RelateResultCompare<'tcx, T> { + fn compare(&self, t: T, f: F) -> RelateResult<'tcx, T> where + F: FnOnce() -> TypeError<'tcx>; +} + +impl<'tcx, T:Clone + PartialEq> RelateResultCompare<'tcx, T> for RelateResult<'tcx, T> { + fn compare(&self, t: T, f: F) -> RelateResult<'tcx, T> where + F: FnOnce() -> TypeError<'tcx>, + { + self.clone().and_then(|s| { + if s == t { + self.clone() + } else { + Err(f()) + } + }) + } +} + +fn int_unification_error<'tcx>(a_is_expected: bool, v: (ty::IntVarValue, ty::IntVarValue)) + -> TypeError<'tcx> +{ + let (a, b) = v; + TypeError::IntMismatch(ty::relate::expected_found_bool(a_is_expected, &a, &b)) +} + +fn float_unification_error<'tcx>(a_is_expected: bool, + v: (ast::FloatTy, ast::FloatTy)) + -> TypeError<'tcx> +{ + let (a, b) = v; + TypeError::FloatMismatch(ty::relate::expected_found_bool(a_is_expected, &a, &b)) +} diff --git a/src/librustc/infer/equate.rs b/src/librustc/infer/equate.rs new file mode 100644 index 0000000000000..bf247acec5a2d --- /dev/null +++ b/src/librustc/infer/equate.rs @@ -0,0 +1,100 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::combine::CombineFields; +use super::{Subtype}; +use super::type_variable::{EqTo}; + +use ty::{self, Ty, TyCtxt}; +use ty::TyVar; +use ty::relate::{Relate, RelateResult, TypeRelation}; + +/// Ensures `a` is made equal to `b`. Returns `a` on success. +pub struct Equate<'combine, 'infcx: 'combine, 'gcx: 'infcx+'tcx, 'tcx: 'infcx> { + fields: &'combine mut CombineFields<'infcx, 'gcx, 'tcx>, + a_is_expected: bool, +} + +impl<'combine, 'infcx, 'gcx, 'tcx> Equate<'combine, 'infcx, 'gcx, 'tcx> { + pub fn new(fields: &'combine mut CombineFields<'infcx, 'gcx, 'tcx>, a_is_expected: bool) + -> Equate<'combine, 'infcx, 'gcx, 'tcx> + { + Equate { fields: fields, a_is_expected: a_is_expected } + } +} + +impl<'combine, 'infcx, 'gcx, 'tcx> TypeRelation<'infcx, 'gcx, 'tcx> + for Equate<'combine, 'infcx, 'gcx, 'tcx> +{ + fn tag(&self) -> &'static str { "Equate" } + + fn tcx(&self) -> TyCtxt<'infcx, 'gcx, 'tcx> { self.fields.tcx() } + + fn a_is_expected(&self) -> bool { self.a_is_expected } + + fn relate_with_variance>(&mut self, + _: ty::Variance, + a: &T, + b: &T) + -> RelateResult<'tcx, T> + { + self.relate(a, b) + } + + fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { + debug!("{}.tys({:?}, {:?})", self.tag(), + a, b); + if a == b { return Ok(a); } + + let infcx = self.fields.infcx; + let a = infcx.type_variables.borrow_mut().replace_if_possible(a); + let b = infcx.type_variables.borrow_mut().replace_if_possible(b); + match (&a.sty, &b.sty) { + (&ty::TyInfer(TyVar(a_id)), &ty::TyInfer(TyVar(b_id))) => { + infcx.type_variables.borrow_mut().relate_vars(a_id, EqTo, b_id); + Ok(a) + } + + (&ty::TyInfer(TyVar(a_id)), _) => { + self.fields.instantiate(b, EqTo, a_id, self.a_is_expected)?; + Ok(a) + } + + (_, &ty::TyInfer(TyVar(b_id))) => { + self.fields.instantiate(a, EqTo, b_id, self.a_is_expected)?; + Ok(a) + } + + _ => { + self.fields.infcx.super_combine_tys(self, a, b)?; + Ok(a) + } + } + } + + fn regions(&mut self, a: &'tcx ty::Region, b: &'tcx ty::Region) + -> RelateResult<'tcx, &'tcx ty::Region> { + debug!("{}.regions({:?}, {:?})", + self.tag(), + a, + b); + let origin = Subtype(self.fields.trace.clone()); + self.fields.infcx.region_vars.make_eqregion(origin, a, b); + Ok(a) + } + + fn binders(&mut self, a: &ty::Binder, b: &ty::Binder) + -> RelateResult<'tcx, ty::Binder> + where T: Relate<'tcx> + { + self.fields.higher_ranked_sub(a, b, self.a_is_expected)?; + self.fields.higher_ranked_sub(b, a, self.a_is_expected) + } +} diff --git a/src/librustc/infer/error_reporting.rs b/src/librustc/infer/error_reporting.rs new file mode 100644 index 0000000000000..90d752ae6ee29 --- /dev/null +++ b/src/librustc/infer/error_reporting.rs @@ -0,0 +1,2005 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Error Reporting Code for the inference engine +//! +//! Because of the way inference, and in particular region inference, +//! works, it often happens that errors are not detected until far after +//! the relevant line of code has been type-checked. Therefore, there is +//! an elaborate system to track why a particular constraint in the +//! inference graph arose so that we can explain to the user what gave +//! rise to a particular error. +//! +//! The basis of the system are the "origin" types. An "origin" is the +//! reason that a constraint or inference variable arose. There are +//! different "origin" enums for different kinds of constraints/variables +//! (e.g., `TypeOrigin`, `RegionVariableOrigin`). An origin always has +//! a span, but also more information so that we can generate a meaningful +//! error message. +//! +//! Having a catalogue of all the different reasons an error can arise is +//! also useful for other reasons, like cross-referencing FAQs etc, though +//! we are not really taking advantage of this yet. +//! +//! # Region Inference +//! +//! Region inference is particularly tricky because it always succeeds "in +//! the moment" and simply registers a constraint. Then, at the end, we +//! can compute the full graph and report errors, so we need to be able to +//! store and later report what gave rise to the conflicting constraints. +//! +//! # Subtype Trace +//! +//! Determining whether `T1 <: T2` often involves a number of subtypes and +//! subconstraints along the way. A "TypeTrace" is an extended version +//! of an origin that traces the types and other values that were being +//! compared. It is not necessarily comprehensive (in fact, at the time of +//! this writing it only tracks the root values being compared) but I'd +//! like to extend it to include significant "waypoints". For example, if +//! you are comparing `(T1, T2) <: (T3, T4)`, and the problem is that `T2 +//! <: T4` fails, I'd like the trace to include enough information to say +//! "in the 2nd element of the tuple". Similarly, failures when comparing +//! arguments or return types in fn types should be able to cite the +//! specific position, etc. +//! +//! # Reality vs plan +//! +//! Of course, there is still a LOT of code in typeck that has yet to be +//! ported to this system, and which relies on string concatenation at the +//! time of error detection. + +use self::FreshOrKept::*; + +use super::InferCtxt; +use super::TypeTrace; +use super::SubregionOrigin; +use super::RegionVariableOrigin; +use super::ValuePairs; +use super::region_inference::RegionResolutionError; +use super::region_inference::ConcreteFailure; +use super::region_inference::SubSupConflict; +use super::region_inference::GenericBoundFailure; +use super::region_inference::GenericKind; +use super::region_inference::ProcessedErrors; +use super::region_inference::ProcessedErrorOrigin; +use super::region_inference::SameRegions; + +use std::collections::HashSet; + +use hir::map as ast_map; +use hir; +use hir::print as pprust; + +use lint; +use hir::def::Def; +use hir::def_id::DefId; +use infer; +use middle::region; +use traits::{ObligationCause, ObligationCauseCode}; +use ty::{self, TyCtxt, TypeFoldable}; +use ty::{Region, ReFree}; +use ty::error::TypeError; + +use std::cell::{Cell, RefCell}; +use std::char::from_u32; +use std::fmt; +use syntax::ast; +use syntax::ptr::P; +use syntax::symbol::Symbol; +use syntax_pos::{self, Pos, Span}; +use errors::DiagnosticBuilder; + +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + pub fn note_and_explain_region(self, + err: &mut DiagnosticBuilder, + prefix: &str, + region: &'tcx ty::Region, + suffix: &str) { + fn item_scope_tag(item: &hir::Item) -> &'static str { + match item.node { + hir::ItemImpl(..) => "impl", + hir::ItemStruct(..) => "struct", + hir::ItemUnion(..) => "union", + hir::ItemEnum(..) => "enum", + hir::ItemTrait(..) => "trait", + hir::ItemFn(..) => "function body", + _ => "item" + } + } + + fn explain_span<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + heading: &str, span: Span) + -> (String, Option) { + let lo = tcx.sess.codemap().lookup_char_pos_adj(span.lo); + (format!("the {} at {}:{}", heading, lo.line, lo.col.to_usize()), + Some(span)) + } + + let (description, span) = match *region { + ty::ReScope(scope) => { + let new_string; + let unknown_scope = || { + format!("{}unknown scope: {:?}{}. Please report a bug.", + prefix, scope, suffix) + }; + let span = match scope.span(&self.region_maps, &self.map) { + Some(s) => s, + None => { + err.note(&unknown_scope()); + return; + } + }; + let tag = match self.map.find(scope.node_id(&self.region_maps)) { + Some(ast_map::NodeBlock(_)) => "block", + Some(ast_map::NodeExpr(expr)) => match expr.node { + hir::ExprCall(..) => "call", + hir::ExprMethodCall(..) => "method call", + hir::ExprMatch(.., hir::MatchSource::IfLetDesugar { .. }) => "if let", + hir::ExprMatch(.., hir::MatchSource::WhileLetDesugar) => "while let", + hir::ExprMatch(.., hir::MatchSource::ForLoopDesugar) => "for", + hir::ExprMatch(..) => "match", + _ => "expression", + }, + Some(ast_map::NodeStmt(_)) => "statement", + Some(ast_map::NodeItem(it)) => item_scope_tag(&it), + Some(_) | None => { + err.span_note(span, &unknown_scope()); + return; + } + }; + let scope_decorated_tag = match self.region_maps.code_extent_data(scope) { + region::CodeExtentData::Misc(_) => tag, + region::CodeExtentData::CallSiteScope { .. } => { + "scope of call-site for function" + } + region::CodeExtentData::ParameterScope { .. } => { + "scope of function body" + } + region::CodeExtentData::DestructionScope(_) => { + new_string = format!("destruction scope surrounding {}", tag); + &new_string[..] + } + region::CodeExtentData::Remainder(r) => { + new_string = format!("block suffix following statement {}", + r.first_statement_index); + &new_string[..] + } + }; + explain_span(self, scope_decorated_tag, span) + } + + ty::ReFree(ref fr) => { + let prefix = match fr.bound_region { + ty::BrAnon(idx) => { + format!("the anonymous lifetime #{} defined on", idx + 1) + } + ty::BrFresh(_) => "an anonymous lifetime defined on".to_owned(), + _ => { + format!("the lifetime {} as defined on", + fr.bound_region) + } + }; + + match self.map.find(fr.scope.node_id(&self.region_maps)) { + Some(ast_map::NodeBlock(ref blk)) => { + let (msg, opt_span) = explain_span(self, "block", blk.span); + (format!("{} {}", prefix, msg), opt_span) + } + Some(ast_map::NodeItem(it)) => { + let tag = item_scope_tag(&it); + let (msg, opt_span) = explain_span(self, tag, it.span); + (format!("{} {}", prefix, msg), opt_span) + } + Some(_) | None => { + // this really should not happen, but it does: + // FIXME(#27942) + (format!("{} unknown free region bounded by scope {:?}", + prefix, fr.scope), None) + } + } + } + + ty::ReStatic => ("the static lifetime".to_owned(), None), + + ty::ReEmpty => ("the empty lifetime".to_owned(), None), + + ty::ReEarlyBound(ref data) => (data.name.to_string(), None), + + // FIXME(#13998) ReSkolemized should probably print like + // ReFree rather than dumping Debug output on the user. + // + // We shouldn't really be having unification failures with ReVar + // and ReLateBound though. + ty::ReSkolemized(..) | + ty::ReVar(_) | + ty::ReLateBound(..) | + ty::ReErased => { + (format!("lifetime {:?}", region), None) + } + }; + let message = format!("{}{}{}", prefix, description, suffix); + if let Some(span) = span { + err.span_note(span, &message); + } else { + err.note(&message); + } + } +} + +impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { + pub fn report_region_errors(&self, + errors: &Vec>) { + debug!("report_region_errors(): {} errors to start", errors.len()); + + // try to pre-process the errors, which will group some of them + // together into a `ProcessedErrors` group: + let processed_errors = self.process_errors(errors); + let errors = processed_errors.as_ref().unwrap_or(errors); + + debug!("report_region_errors: {} errors after preprocessing", errors.len()); + + for error in errors { + debug!("report_region_errors: error = {:?}", error); + match error.clone() { + ConcreteFailure(origin, sub, sup) => { + self.report_concrete_failure(origin, sub, sup).emit(); + } + + GenericBoundFailure(kind, param_ty, sub) => { + self.report_generic_bound_failure(kind, param_ty, sub); + } + + SubSupConflict(var_origin, + sub_origin, sub_r, + sup_origin, sup_r) => { + self.report_sub_sup_conflict(var_origin, + sub_origin, sub_r, + sup_origin, sup_r); + } + + ProcessedErrors(ref origins, + ref same_regions) => { + if !same_regions.is_empty() { + self.report_processed_errors(origins, same_regions); + } + } + } + } + } + + // This method goes through all the errors and try to group certain types + // of error together, for the purpose of suggesting explicit lifetime + // parameters to the user. This is done so that we can have a more + // complete view of what lifetimes should be the same. + // If the return value is an empty vector, it means that processing + // failed (so the return value of this method should not be used). + // + // The method also attempts to weed out messages that seem like + // duplicates that will be unhelpful to the end-user. But + // obviously it never weeds out ALL errors. + fn process_errors(&self, errors: &Vec>) + -> Option>> { + debug!("process_errors()"); + let mut origins = Vec::new(); + + // we collect up ConcreteFailures and SubSupConflicts that are + // relating free-regions bound on the fn-header and group them + // together into this vector + let mut same_regions = Vec::new(); + + // here we put errors that we will not be able to process nicely + let mut other_errors = Vec::new(); + + // we collect up GenericBoundFailures in here. + let mut bound_failures = Vec::new(); + + for error in errors { + // Check whether we can process this error into some other + // form; if not, fall through. + match *error { + ConcreteFailure(ref origin, sub, sup) => { + debug!("processing ConcreteFailure"); + if let SubregionOrigin::CompareImplMethodObligation { .. } = *origin { + // When comparing an impl method against a + // trait method, it is not helpful to suggest + // changes to the impl method. This is + // because the impl method signature is being + // checked using the trait's environment, so + // usually the changes we suggest would + // actually have to be applied to the *trait* + // method (and it's not clear that the trait + // method is even under the user's control). + } else if let Some(same_frs) = free_regions_from_same_fn(self.tcx, sub, sup) { + origins.push( + ProcessedErrorOrigin::ConcreteFailure( + origin.clone(), + sub, + sup)); + append_to_same_regions(&mut same_regions, &same_frs); + continue; + } + } + SubSupConflict(ref var_origin, ref sub_origin, sub, ref sup_origin, sup) => { + debug!("processing SubSupConflict sub: {:?} sup: {:?}", sub, sup); + match (sub_origin, sup_origin) { + (&SubregionOrigin::CompareImplMethodObligation { .. }, _) => { + // As above, when comparing an impl method + // against a trait method, it is not helpful + // to suggest changes to the impl method. + } + (_, &SubregionOrigin::CompareImplMethodObligation { .. }) => { + // See above. + } + _ => { + if let Some(same_frs) = free_regions_from_same_fn(self.tcx, sub, sup) { + origins.push( + ProcessedErrorOrigin::VariableFailure( + var_origin.clone())); + append_to_same_regions(&mut same_regions, &same_frs); + continue; + } + } + } + } + GenericBoundFailure(ref origin, ref kind, region) => { + bound_failures.push((origin.clone(), kind.clone(), region)); + continue; + } + ProcessedErrors(..) => { + bug!("should not encounter a `ProcessedErrors` yet: {:?}", error) + } + } + + // No changes to this error. + other_errors.push(error.clone()); + } + + // ok, let's pull together the errors, sorted in an order that + // we think will help user the best + let mut processed_errors = vec![]; + + // first, put the processed errors, if any + if !same_regions.is_empty() { + let common_scope_id = same_regions[0].scope_id; + for sr in &same_regions { + // Since ProcessedErrors is used to reconstruct the function + // declaration, we want to make sure that they are, in fact, + // from the same scope + if sr.scope_id != common_scope_id { + debug!("returning empty result from process_errors because + {} != {}", sr.scope_id, common_scope_id); + return None; + } + } + assert!(origins.len() > 0); + let pe = ProcessedErrors(origins, same_regions); + debug!("errors processed: {:?}", pe); + processed_errors.push(pe); + } + + // next, put the other misc errors + processed_errors.extend(other_errors); + + // finally, put the `T: 'a` errors, but only if there were no + // other errors. otherwise, these have a very high rate of + // being unhelpful in practice. This is because they are + // basically secondary checks that test the state of the + // region graph after the rest of inference is done, and the + // other kinds of errors indicate that the region constraint + // graph is internally inconsistent, so these test results are + // likely to be meaningless. + if processed_errors.is_empty() { + for (origin, kind, region) in bound_failures { + processed_errors.push(GenericBoundFailure(origin, kind, region)); + } + } + + // we should always wind up with SOME errors, unless there were no + // errors to start + assert!(if errors.len() > 0 {processed_errors.len() > 0} else {true}); + + return Some(processed_errors); + + #[derive(Debug)] + struct FreeRegionsFromSameFn { + sub_fr: ty::FreeRegion, + sup_fr: ty::FreeRegion, + scope_id: ast::NodeId + } + + impl FreeRegionsFromSameFn { + fn new(sub_fr: ty::FreeRegion, + sup_fr: ty::FreeRegion, + scope_id: ast::NodeId) + -> FreeRegionsFromSameFn { + FreeRegionsFromSameFn { + sub_fr: sub_fr, + sup_fr: sup_fr, + scope_id: scope_id + } + } + } + + fn free_regions_from_same_fn<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + sub: &'tcx Region, + sup: &'tcx Region) + -> Option { + debug!("free_regions_from_same_fn(sub={:?}, sup={:?})", sub, sup); + let (scope_id, fr1, fr2) = match (sub, sup) { + (&ReFree(fr1), &ReFree(fr2)) => { + if fr1.scope != fr2.scope { + return None + } + assert!(fr1.scope == fr2.scope); + (fr1.scope.node_id(&tcx.region_maps), fr1, fr2) + }, + _ => return None + }; + let parent = tcx.map.get_parent(scope_id); + let parent_node = tcx.map.find(parent); + match parent_node { + Some(node) => match node { + ast_map::NodeItem(item) => match item.node { + hir::ItemFn(..) => { + Some(FreeRegionsFromSameFn::new(fr1, fr2, scope_id)) + }, + _ => None + }, + ast_map::NodeImplItem(..) | + ast_map::NodeTraitItem(..) => { + Some(FreeRegionsFromSameFn::new(fr1, fr2, scope_id)) + }, + _ => None + }, + None => { + debug!("no parent node of scope_id {}", scope_id); + None + } + } + } + + fn append_to_same_regions(same_regions: &mut Vec, + same_frs: &FreeRegionsFromSameFn) { + debug!("append_to_same_regions(same_regions={:?}, same_frs={:?})", + same_regions, same_frs); + let scope_id = same_frs.scope_id; + let (sub_fr, sup_fr) = (same_frs.sub_fr, same_frs.sup_fr); + for sr in same_regions.iter_mut() { + if sr.contains(&sup_fr.bound_region) && scope_id == sr.scope_id { + sr.push(sub_fr.bound_region); + return + } + } + same_regions.push(SameRegions { + scope_id: scope_id, + regions: vec![sub_fr.bound_region, sup_fr.bound_region] + }) + } + } + + /// Adds a note if the types come from similarly named crates + fn check_and_note_conflicting_crates(&self, + err: &mut DiagnosticBuilder, + terr: &TypeError<'tcx>, + sp: Span) { + let report_path_match = |err: &mut DiagnosticBuilder, did1: DefId, did2: DefId| { + // Only external crates, if either is from a local + // module we could have false positives + if !(did1.is_local() || did2.is_local()) && did1.krate != did2.krate { + let exp_path = self.tcx.item_path_str(did1); + let found_path = self.tcx.item_path_str(did2); + // We compare strings because DefPath can be different + // for imported and non-imported crates + if exp_path == found_path { + let crate_name = self.tcx.sess.cstore.crate_name(did1.krate); + err.span_note(sp, &format!("Perhaps two different versions \ + of crate `{}` are being used?", + crate_name)); + } + } + }; + match *terr { + TypeError::Sorts(ref exp_found) => { + // if they are both "path types", there's a chance of ambiguity + // due to different versions of the same crate + match (&exp_found.expected.sty, &exp_found.found.sty) { + (&ty::TyAdt(exp_adt, _), &ty::TyAdt(found_adt, _)) => { + report_path_match(err, exp_adt.did, found_adt.did); + }, + _ => () + } + }, + TypeError::Traits(ref exp_found) => { + report_path_match(err, exp_found.expected, exp_found.found); + }, + _ => () // FIXME(#22750) handle traits and stuff + } + } + + fn note_error_origin(&self, + err: &mut DiagnosticBuilder<'tcx>, + cause: &ObligationCause<'tcx>) + { + match cause.code { + ObligationCauseCode::MatchExpressionArm { arm_span, source } => match source { + hir::MatchSource::IfLetDesugar {..} => { + err.span_note(arm_span, "`if let` arm with an incompatible type"); + } + _ => { + err.span_note(arm_span, "match arm with an incompatible type"); + } + }, + _ => () + } + } + + pub fn note_type_err(&self, + diag: &mut DiagnosticBuilder<'tcx>, + cause: &ObligationCause<'tcx>, + secondary_span: Option<(Span, String)>, + values: Option>, + terr: &TypeError<'tcx>) + { + let expected_found = match values { + None => None, + Some(values) => match self.values_str(&values) { + Some((expected, found)) => Some((expected, found)), + None => { + // Derived error. Cancel the emitter. + self.tcx.sess.diagnostic().cancel(diag); + return + } + } + }; + + let span = cause.span; + + if let Some((expected, found)) = expected_found { + let is_simple_error = if let &TypeError::Sorts(ref values) = terr { + values.expected.is_primitive() && values.found.is_primitive() + } else { + false + }; + + if !is_simple_error { + if expected == found { + if let &TypeError::Sorts(ref values) = terr { + diag.note_expected_found_extra( + &"type", &expected, &found, + &format!(" ({})", values.expected.sort_string(self.tcx)), + &format!(" ({})", values.found.sort_string(self.tcx))); + } else { + diag.note_expected_found(&"type", &expected, &found); + } + } else { + diag.note_expected_found(&"type", &expected, &found); + } + } + } + + diag.span_label(span, &terr); + if let Some((sp, msg)) = secondary_span { + diag.span_label(sp, &msg); + } + + self.note_error_origin(diag, &cause); + self.check_and_note_conflicting_crates(diag, terr, span); + self.tcx.note_and_explain_type_err(diag, terr, span); + } + + pub fn report_and_explain_type_error(&self, + trace: TypeTrace<'tcx>, + terr: &TypeError<'tcx>) + -> DiagnosticBuilder<'tcx> + { + let span = trace.cause.span; + let failure_str = trace.cause.as_failure_str(); + let mut diag = match trace.cause.code { + ObligationCauseCode::IfExpressionWithNoElse => { + struct_span_err!(self.tcx.sess, span, E0317, "{}", failure_str) + }, + _ => { + struct_span_err!(self.tcx.sess, span, E0308, "{}", failure_str) + }, + }; + self.note_type_err(&mut diag, &trace.cause, None, Some(trace.values), terr); + diag + } + + /// Returns a string of the form "expected `{}`, found `{}`". + fn values_str(&self, values: &ValuePairs<'tcx>) -> Option<(String, String)> { + match *values { + infer::Types(ref exp_found) => self.expected_found_str(exp_found), + infer::TraitRefs(ref exp_found) => self.expected_found_str(exp_found), + infer::PolyTraitRefs(ref exp_found) => self.expected_found_str(exp_found), + } + } + + fn expected_found_str>( + &self, + exp_found: &ty::error::ExpectedFound) + -> Option<(String, String)> + { + let exp_found = self.resolve_type_vars_if_possible(exp_found); + if exp_found.references_error() { + return None; + } + + Some((format!("{}", exp_found.expected), format!("{}", exp_found.found))) + } + + fn report_generic_bound_failure(&self, + origin: SubregionOrigin<'tcx>, + bound_kind: GenericKind<'tcx>, + sub: &'tcx Region) + { + // FIXME: it would be better to report the first error message + // with the span of the parameter itself, rather than the span + // where the error was detected. But that span is not readily + // accessible. + + let labeled_user_string = match bound_kind { + GenericKind::Param(ref p) => + format!("the parameter type `{}`", p), + GenericKind::Projection(ref p) => + format!("the associated type `{}`", p), + }; + + if let SubregionOrigin::CompareImplMethodObligation { + span, item_name, impl_item_def_id, trait_item_def_id, lint_id + } = origin { + self.report_extra_impl_obligation(span, + item_name, + impl_item_def_id, + trait_item_def_id, + &format!("`{}: {}`", bound_kind, sub), + lint_id) + .emit(); + return; + } + + let mut err = match *sub { + ty::ReFree(ty::FreeRegion {bound_region: ty::BrNamed(..), ..}) => { + // Does the required lifetime have a nice name we can print? + let mut err = struct_span_err!(self.tcx.sess, + origin.span(), + E0309, + "{} may not live long enough", + labeled_user_string); + err.help(&format!("consider adding an explicit lifetime bound `{}: {}`...", + bound_kind, + sub)); + err + } + + ty::ReStatic => { + // Does the required lifetime have a nice name we can print? + let mut err = struct_span_err!(self.tcx.sess, + origin.span(), + E0310, + "{} may not live long enough", + labeled_user_string); + err.help(&format!("consider adding an explicit lifetime \ + bound `{}: 'static`...", + bound_kind)); + err + } + + _ => { + // If not, be less specific. + let mut err = struct_span_err!(self.tcx.sess, + origin.span(), + E0311, + "{} may not live long enough", + labeled_user_string); + err.help(&format!("consider adding an explicit lifetime bound for `{}`", + bound_kind)); + self.tcx.note_and_explain_region( + &mut err, + &format!("{} must be valid for ", labeled_user_string), + sub, + "..."); + err + } + }; + + self.note_region_origin(&mut err, &origin); + err.emit(); + } + + fn report_concrete_failure(&self, + origin: SubregionOrigin<'tcx>, + sub: &'tcx Region, + sup: &'tcx Region) + -> DiagnosticBuilder<'tcx> { + match origin { + infer::Subtype(trace) => { + let terr = TypeError::RegionsDoesNotOutlive(sup, sub); + self.report_and_explain_type_error(trace, &terr) + } + infer::Reborrow(span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0312, + "lifetime of reference outlives \ + lifetime of borrowed content..."); + self.tcx.note_and_explain_region(&mut err, + "...the reference is valid for ", + sub, + "..."); + self.tcx.note_and_explain_region(&mut err, + "...but the borrowed content is only valid for ", + sup, + ""); + err + } + infer::ReborrowUpvar(span, ref upvar_id) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0313, + "lifetime of borrowed pointer outlives \ + lifetime of captured variable `{}`...", + self.tcx.local_var_name_str(upvar_id.var_id)); + self.tcx.note_and_explain_region(&mut err, + "...the borrowed pointer is valid for ", + sub, + "..."); + self.tcx.note_and_explain_region(&mut err, + &format!("...but `{}` is only valid for ", + self.tcx.local_var_name_str(upvar_id.var_id)), + sup, + ""); + err + } + infer::InfStackClosure(span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0314, + "closure outlives stack frame"); + self.tcx.note_and_explain_region(&mut err, + "...the closure must be valid for ", + sub, + "..."); + self.tcx.note_and_explain_region(&mut err, + "...but the closure's stack frame is only valid for ", + sup, + ""); + err + } + infer::InvokeClosure(span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0315, + "cannot invoke closure outside of its lifetime"); + self.tcx.note_and_explain_region(&mut err, + "the closure is only valid for ", + sup, + ""); + err + } + infer::DerefPointer(span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0473, + "dereference of reference outside its lifetime"); + self.tcx.note_and_explain_region(&mut err, + "the reference is only valid for ", + sup, + ""); + err + } + infer::FreeVariable(span, id) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0474, + "captured variable `{}` does not outlive the enclosing closure", + self.tcx.local_var_name_str(id)); + self.tcx.note_and_explain_region(&mut err, + "captured variable is valid for ", + sup, + ""); + self.tcx.note_and_explain_region(&mut err, + "closure is valid for ", + sub, + ""); + err + } + infer::IndexSlice(span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0475, + "index of slice outside its lifetime"); + self.tcx.note_and_explain_region(&mut err, + "the slice is only valid for ", + sup, + ""); + err + } + infer::RelateObjectBound(span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0476, + "lifetime of the source pointer does not outlive \ + lifetime bound of the object type"); + self.tcx.note_and_explain_region(&mut err, + "object type is valid for ", + sub, + ""); + self.tcx.note_and_explain_region(&mut err, + "source pointer is only valid for ", + sup, + ""); + err + } + infer::RelateParamBound(span, ty) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0477, + "the type `{}` does not fulfill the required lifetime", + self.ty_to_string(ty)); + self.tcx.note_and_explain_region(&mut err, + "type must outlive ", + sub, + ""); + err + } + infer::RelateRegionParamBound(span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0478, + "lifetime bound not satisfied"); + self.tcx.note_and_explain_region(&mut err, + "lifetime parameter instantiated with ", + sup, + ""); + self.tcx.note_and_explain_region(&mut err, + "but lifetime parameter must outlive ", + sub, + ""); + err + } + infer::RelateDefaultParamBound(span, ty) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0479, + "the type `{}` (provided as the value of \ + a type parameter) is not valid at this point", + self.ty_to_string(ty)); + self.tcx.note_and_explain_region(&mut err, + "type must outlive ", + sub, + ""); + err + } + infer::CallRcvr(span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0480, + "lifetime of method receiver does not outlive \ + the method call"); + self.tcx.note_and_explain_region(&mut err, + "the receiver is only valid for ", + sup, + ""); + err + } + infer::CallArg(span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0481, + "lifetime of function argument does not outlive \ + the function call"); + self.tcx.note_and_explain_region(&mut err, + "the function argument is only valid for ", + sup, + ""); + err + } + infer::CallReturn(span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0482, + "lifetime of return value does not outlive \ + the function call"); + self.tcx.note_and_explain_region(&mut err, + "the return value is only valid for ", + sup, + ""); + err + } + infer::Operand(span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0483, + "lifetime of operand does not outlive \ + the operation"); + self.tcx.note_and_explain_region(&mut err, + "the operand is only valid for ", + sup, + ""); + err + } + infer::AddrOf(span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0484, + "reference is not valid at the time of borrow"); + self.tcx.note_and_explain_region(&mut err, + "the borrow is only valid for ", + sup, + ""); + err + } + infer::AutoBorrow(span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0485, + "automatically reference is not valid \ + at the time of borrow"); + self.tcx.note_and_explain_region(&mut err, + "the automatic borrow is only valid for ", + sup, + ""); + err + } + infer::ExprTypeIsNotInScope(t, span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0486, + "type of expression contains references \ + that are not valid during the expression: `{}`", + self.ty_to_string(t)); + self.tcx.note_and_explain_region(&mut err, + "type is only valid for ", + sup, + ""); + err + } + infer::SafeDestructor(span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0487, + "unsafe use of destructor: destructor might be called \ + while references are dead"); + // FIXME (22171): terms "super/subregion" are suboptimal + self.tcx.note_and_explain_region(&mut err, + "superregion: ", + sup, + ""); + self.tcx.note_and_explain_region(&mut err, + "subregion: ", + sub, + ""); + err + } + infer::BindingTypeIsNotValidAtDecl(span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0488, + "lifetime of variable does not enclose its declaration"); + self.tcx.note_and_explain_region(&mut err, + "the variable is only valid for ", + sup, + ""); + err + } + infer::ParameterInScope(_, span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0489, + "type/lifetime parameter not in scope here"); + self.tcx.note_and_explain_region(&mut err, + "the parameter is only valid for ", + sub, + ""); + err + } + infer::DataBorrowed(ty, span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0490, + "a value of type `{}` is borrowed for too long", + self.ty_to_string(ty)); + self.tcx.note_and_explain_region(&mut err, "the type is valid for ", sub, ""); + self.tcx.note_and_explain_region(&mut err, "but the borrow lasts for ", sup, ""); + err + } + infer::ReferenceOutlivesReferent(ty, span) => { + let mut err = struct_span_err!(self.tcx.sess, span, E0491, + "in type `{}`, reference has a longer lifetime \ + than the data it references", + self.ty_to_string(ty)); + self.tcx.note_and_explain_region(&mut err, + "the pointer is valid for ", + sub, + ""); + self.tcx.note_and_explain_region(&mut err, + "but the referenced data is only valid for ", + sup, + ""); + err + } + infer::CompareImplMethodObligation { span, + item_name, + impl_item_def_id, + trait_item_def_id, + lint_id } => { + self.report_extra_impl_obligation(span, + item_name, + impl_item_def_id, + trait_item_def_id, + &format!("`{}: {}`", sup, sub), + lint_id) + } + } + } + + fn report_sub_sup_conflict(&self, + var_origin: RegionVariableOrigin, + sub_origin: SubregionOrigin<'tcx>, + sub_region: &'tcx Region, + sup_origin: SubregionOrigin<'tcx>, + sup_region: &'tcx Region) { + let mut err = self.report_inference_failure(var_origin); + + self.tcx.note_and_explain_region(&mut err, + "first, the lifetime cannot outlive ", + sup_region, + "..."); + + self.note_region_origin(&mut err, &sup_origin); + + self.tcx.note_and_explain_region(&mut err, + "but, the lifetime must be valid for ", + sub_region, + "..."); + + self.note_region_origin(&mut err, &sub_origin); + err.emit(); + } + + fn report_processed_errors(&self, + origins: &[ProcessedErrorOrigin<'tcx>], + same_regions: &[SameRegions]) { + for (i, origin) in origins.iter().enumerate() { + let mut err = match *origin { + ProcessedErrorOrigin::VariableFailure(ref var_origin) => + self.report_inference_failure(var_origin.clone()), + ProcessedErrorOrigin::ConcreteFailure(ref sr_origin, sub, sup) => + self.report_concrete_failure(sr_origin.clone(), sub, sup), + }; + + // attach the suggestion to the last such error + if i == origins.len() - 1 { + self.give_suggestion(&mut err, same_regions); + } + + err.emit(); + } + } + + fn give_suggestion(&self, err: &mut DiagnosticBuilder, same_regions: &[SameRegions]) { + let scope_id = same_regions[0].scope_id; + let parent = self.tcx.map.get_parent(scope_id); + let parent_node = self.tcx.map.find(parent); + let taken = lifetimes_in_scope(self.tcx, scope_id); + let life_giver = LifeGiver::with_taken(&taken[..]); + let node_inner = match parent_node { + Some(ref node) => match *node { + ast_map::NodeItem(ref item) => { + match item.node { + hir::ItemFn(ref fn_decl, unsafety, constness, _, ref gen, _) => { + Some((fn_decl, gen, unsafety, constness, item.name, item.span)) + } + _ => None, + } + } + ast_map::NodeImplItem(item) => { + let id = self.tcx.map.get_parent(item.id); + if let Some(ast_map::NodeItem(parent_scope)) = self.tcx.map.find(id) { + if let hir::ItemImpl(_, _, _, None, _, _) = parent_scope.node { + // this impl scope implements a trait, do not recomend + // using explicit lifetimes (#37363) + return; + } + } + if let hir::ImplItemKind::Method(ref sig, _) = item.node { + Some((&sig.decl, + &sig.generics, + sig.unsafety, + sig.constness, + item.name, + item.span)) + } else { + None + } + }, + ast_map::NodeTraitItem(item) => { + match item.node { + hir::MethodTraitItem(ref sig, Some(_)) => { + Some((&sig.decl, + &sig.generics, + sig.unsafety, + sig.constness, + item.name, + item.span)) + } + _ => None, + } + } + _ => None, + }, + None => None, + }; + let (fn_decl, generics, unsafety, constness, name, span) + = node_inner.expect("expect item fn"); + let rebuilder = Rebuilder::new(self.tcx, fn_decl, generics, same_regions, &life_giver); + let (fn_decl, generics) = rebuilder.rebuild(); + self.give_expl_lifetime_param( + err, &fn_decl, unsafety, constness, name, &generics, span); + } + + pub fn issue_32330_warnings(&self, span: Span, issue32330s: &[ty::Issue32330]) { + for issue32330 in issue32330s { + match *issue32330 { + ty::Issue32330::WontChange => { } + ty::Issue32330::WillChange { fn_def_id, region_name } => { + self.tcx.sess.add_lint( + lint::builtin::HR_LIFETIME_IN_ASSOC_TYPE, + ast::CRATE_NODE_ID, + span, + format!("lifetime parameter `{0}` declared on fn `{1}` \ + appears only in the return type, \ + but here is required to be higher-ranked, \ + which means that `{0}` must appear in both \ + argument and return types", + region_name, + self.tcx.item_path_str(fn_def_id))); + } + } + } + } +} + +struct RebuildPathInfo<'a> { + path: &'a hir::Path, + // indexes to insert lifetime on path.lifetimes + indexes: Vec, + // number of lifetimes we expect to see on the type referred by `path` + // (e.g., expected=1 for struct Foo<'a>) + expected: u32, + anon_nums: &'a HashSet, + region_names: &'a HashSet +} + +struct Rebuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + tcx: TyCtxt<'a, 'gcx, 'tcx>, + fn_decl: &'a hir::FnDecl, + generics: &'a hir::Generics, + same_regions: &'a [SameRegions], + life_giver: &'a LifeGiver, + cur_anon: Cell, + inserted_anons: RefCell>, +} + +enum FreshOrKept { + Fresh, + Kept +} + +impl<'a, 'gcx, 'tcx> Rebuilder<'a, 'gcx, 'tcx> { + fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>, + fn_decl: &'a hir::FnDecl, + generics: &'a hir::Generics, + same_regions: &'a [SameRegions], + life_giver: &'a LifeGiver) + -> Rebuilder<'a, 'gcx, 'tcx> { + Rebuilder { + tcx: tcx, + fn_decl: fn_decl, + generics: generics, + same_regions: same_regions, + life_giver: life_giver, + cur_anon: Cell::new(0), + inserted_anons: RefCell::new(HashSet::new()), + } + } + + fn rebuild(&self) -> (hir::FnDecl, hir::Generics) { + let mut inputs = self.fn_decl.inputs.clone(); + let mut output = self.fn_decl.output.clone(); + let mut ty_params = self.generics.ty_params.clone(); + let where_clause = self.generics.where_clause.clone(); + let mut kept_lifetimes = HashSet::new(); + for sr in self.same_regions { + self.cur_anon.set(0); + self.offset_cur_anon(); + let (anon_nums, region_names) = + self.extract_anon_nums_and_names(sr); + let (lifetime, fresh_or_kept) = self.pick_lifetime(®ion_names); + match fresh_or_kept { + Kept => { kept_lifetimes.insert(lifetime.name); } + _ => () + } + inputs = self.rebuild_args_ty(&inputs[..], lifetime, + &anon_nums, ®ion_names); + output = self.rebuild_output(&output, lifetime, &anon_nums, ®ion_names); + ty_params = self.rebuild_ty_params(ty_params, lifetime, + ®ion_names); + } + let fresh_lifetimes = self.life_giver.get_generated_lifetimes(); + let all_region_names = self.extract_all_region_names(); + let generics = self.rebuild_generics(self.generics, + &fresh_lifetimes, + &kept_lifetimes, + &all_region_names, + ty_params, + where_clause); + let new_fn_decl = hir::FnDecl { + inputs: inputs, + output: output, + variadic: self.fn_decl.variadic + }; + (new_fn_decl, generics) + } + + fn pick_lifetime(&self, + region_names: &HashSet) + -> (hir::Lifetime, FreshOrKept) { + if !region_names.is_empty() { + // It's not necessary to convert the set of region names to a + // vector of string and then sort them. However, it makes the + // choice of lifetime name deterministic and thus easier to test. + let mut names = Vec::new(); + for rn in region_names { + let lt_name = rn.to_string(); + names.push(lt_name); + } + names.sort(); + let name = Symbol::intern(&names[0]); + return (name_to_dummy_lifetime(name), Kept); + } + return (self.life_giver.give_lifetime(), Fresh); + } + + fn extract_anon_nums_and_names(&self, same_regions: &SameRegions) + -> (HashSet, HashSet) { + let mut anon_nums = HashSet::new(); + let mut region_names = HashSet::new(); + for br in &same_regions.regions { + match *br { + ty::BrAnon(i) => { + anon_nums.insert(i); + } + ty::BrNamed(_, name, _) => { + region_names.insert(name); + } + _ => () + } + } + (anon_nums, region_names) + } + + fn extract_all_region_names(&self) -> HashSet { + let mut all_region_names = HashSet::new(); + for sr in self.same_regions { + for br in &sr.regions { + match *br { + ty::BrNamed(_, name, _) => { + all_region_names.insert(name); + } + _ => () + } + } + } + all_region_names + } + + fn inc_cur_anon(&self, n: u32) { + let anon = self.cur_anon.get(); + self.cur_anon.set(anon+n); + } + + fn offset_cur_anon(&self) { + let mut anon = self.cur_anon.get(); + while self.inserted_anons.borrow().contains(&anon) { + anon += 1; + } + self.cur_anon.set(anon); + } + + fn inc_and_offset_cur_anon(&self, n: u32) { + self.inc_cur_anon(n); + self.offset_cur_anon(); + } + + fn track_anon(&self, anon: u32) { + self.inserted_anons.borrow_mut().insert(anon); + } + + fn rebuild_ty_params(&self, + ty_params: hir::HirVec, + lifetime: hir::Lifetime, + region_names: &HashSet) + -> hir::HirVec { + ty_params.into_iter().map(|ty_param| { + let bounds = self.rebuild_ty_param_bounds(ty_param.bounds, + lifetime, + region_names); + hir::TyParam { + name: ty_param.name, + id: ty_param.id, + bounds: bounds, + default: ty_param.default, + span: ty_param.span, + pure_wrt_drop: ty_param.pure_wrt_drop, + } + }).collect() + } + + fn rebuild_ty_param_bounds(&self, + ty_param_bounds: hir::TyParamBounds, + lifetime: hir::Lifetime, + region_names: &HashSet) + -> hir::TyParamBounds { + ty_param_bounds.iter().map(|tpb| { + match tpb { + &hir::RegionTyParamBound(lt) => { + // FIXME -- it's unclear whether I'm supposed to + // substitute lifetime here. I suspect we need to + // be passing down a map. + hir::RegionTyParamBound(lt) + } + &hir::TraitTyParamBound(ref poly_tr, modifier) => { + let tr = &poly_tr.trait_ref; + let last_seg = tr.path.segments.last().unwrap(); + let mut insert = Vec::new(); + let lifetimes = last_seg.parameters.lifetimes(); + for (i, lt) in lifetimes.iter().enumerate() { + if region_names.contains(<.name) { + insert.push(i as u32); + } + } + let rebuild_info = RebuildPathInfo { + path: &tr.path, + indexes: insert, + expected: lifetimes.len() as u32, + anon_nums: &HashSet::new(), + region_names: region_names + }; + let new_path = self.rebuild_path(rebuild_info, lifetime); + hir::TraitTyParamBound(hir::PolyTraitRef { + bound_lifetimes: poly_tr.bound_lifetimes.clone(), + trait_ref: hir::TraitRef { + path: new_path, + ref_id: tr.ref_id, + }, + span: poly_tr.span, + }, modifier) + } + } + }).collect() + } + + fn rebuild_generics(&self, + generics: &hir::Generics, + add: &Vec, + keep: &HashSet, + remove: &HashSet, + ty_params: hir::HirVec, + where_clause: hir::WhereClause) + -> hir::Generics { + let mut lifetimes = Vec::new(); + for lt in add { + lifetimes.push(hir::LifetimeDef { + lifetime: *lt, + bounds: hir::HirVec::new(), + pure_wrt_drop: false, + }); + } + for lt in &generics.lifetimes { + if keep.contains(<.lifetime.name) || + !remove.contains(<.lifetime.name) { + lifetimes.push((*lt).clone()); + } + } + hir::Generics { + lifetimes: lifetimes.into(), + ty_params: ty_params, + where_clause: where_clause, + span: generics.span, + } + } + + fn rebuild_args_ty(&self, + inputs: &[hir::Arg], + lifetime: hir::Lifetime, + anon_nums: &HashSet, + region_names: &HashSet) + -> hir::HirVec { + let mut new_inputs = Vec::new(); + for arg in inputs { + let new_ty = self.rebuild_arg_ty_or_output(&arg.ty, lifetime, + anon_nums, region_names); + let possibly_new_arg = hir::Arg { + ty: new_ty, + pat: arg.pat.clone(), + id: arg.id + }; + new_inputs.push(possibly_new_arg); + } + new_inputs.into() + } + + fn rebuild_output(&self, ty: &hir::FunctionRetTy, + lifetime: hir::Lifetime, + anon_nums: &HashSet, + region_names: &HashSet) -> hir::FunctionRetTy { + match *ty { + hir::Return(ref ret_ty) => hir::Return( + self.rebuild_arg_ty_or_output(&ret_ty, lifetime, anon_nums, region_names) + ), + hir::DefaultReturn(span) => hir::DefaultReturn(span), + } + } + + fn rebuild_arg_ty_or_output(&self, + ty: &hir::Ty, + lifetime: hir::Lifetime, + anon_nums: &HashSet, + region_names: &HashSet) + -> P { + let mut new_ty = P(ty.clone()); + let mut ty_queue = vec![ty]; + while !ty_queue.is_empty() { + let cur_ty = ty_queue.remove(0); + match cur_ty.node { + hir::TyRptr(lt_opt, ref mut_ty) => { + let rebuild = match lt_opt { + Some(lt) => region_names.contains(<.name), + None => { + let anon = self.cur_anon.get(); + let rebuild = anon_nums.contains(&anon); + if rebuild { + self.track_anon(anon); + } + self.inc_and_offset_cur_anon(1); + rebuild + } + }; + if rebuild { + let to = hir::Ty { + id: cur_ty.id, + node: hir::TyRptr(Some(lifetime), mut_ty.clone()), + span: cur_ty.span + }; + new_ty = self.rebuild_ty(new_ty, P(to)); + } + ty_queue.push(&mut_ty.ty); + } + hir::TyPath(hir::QPath::Resolved(ref maybe_qself, ref path)) => { + match path.def { + Def::Enum(did) | Def::TyAlias(did) | + Def::Struct(did) | Def::Union(did) => { + let generics = self.tcx.item_generics(did); + + let expected = + generics.regions.len() as u32; + let lifetimes = + path.segments.last().unwrap().parameters.lifetimes(); + let mut insert = Vec::new(); + if lifetimes.is_empty() { + let anon = self.cur_anon.get(); + for (i, a) in (anon..anon+expected).enumerate() { + if anon_nums.contains(&a) { + insert.push(i as u32); + } + self.track_anon(a); + } + self.inc_and_offset_cur_anon(expected); + } else { + for (i, lt) in lifetimes.iter().enumerate() { + if region_names.contains(<.name) { + insert.push(i as u32); + } + } + } + let rebuild_info = RebuildPathInfo { + path: path, + indexes: insert, + expected: expected, + anon_nums: anon_nums, + region_names: region_names + }; + let new_path = self.rebuild_path(rebuild_info, lifetime); + let qself = maybe_qself.as_ref().map(|qself| { + self.rebuild_arg_ty_or_output(qself, lifetime, + anon_nums, region_names) + }); + let to = hir::Ty { + id: cur_ty.id, + node: hir::TyPath(hir::QPath::Resolved(qself, P(new_path))), + span: cur_ty.span + }; + new_ty = self.rebuild_ty(new_ty, P(to)); + } + _ => () + } + } + + hir::TyPtr(ref mut_ty) => { + ty_queue.push(&mut_ty.ty); + } + hir::TySlice(ref ty) | + hir::TyArray(ref ty, _) => { + ty_queue.push(&ty); + } + hir::TyTup(ref tys) => ty_queue.extend(tys.iter().map(|ty| &**ty)), + _ => {} + } + } + new_ty + } + + fn rebuild_ty(&self, + from: P, + to: P) + -> P { + + fn build_to(from: P, + to: &mut Option>) + -> P { + if Some(from.id) == to.as_ref().map(|ty| ty.id) { + return to.take().expect("`to` type found more than once during rebuild"); + } + from.map(|hir::Ty {id, node, span}| { + let new_node = match node { + hir::TyRptr(lifetime, mut_ty) => { + hir::TyRptr(lifetime, hir::MutTy { + mutbl: mut_ty.mutbl, + ty: build_to(mut_ty.ty, to), + }) + } + hir::TyPtr(mut_ty) => { + hir::TyPtr(hir::MutTy { + mutbl: mut_ty.mutbl, + ty: build_to(mut_ty.ty, to), + }) + } + hir::TySlice(ty) => hir::TySlice(build_to(ty, to)), + hir::TyArray(ty, e) => { + hir::TyArray(build_to(ty, to), e) + } + hir::TyTup(tys) => { + hir::TyTup(tys.into_iter().map(|ty| build_to(ty, to)).collect()) + } + other => other + }; + hir::Ty { id: id, node: new_node, span: span } + }) + } + + build_to(from, &mut Some(to)) + } + + fn rebuild_path(&self, + rebuild_info: RebuildPathInfo, + lifetime: hir::Lifetime) + -> hir::Path + { + let RebuildPathInfo { + path, + indexes, + expected, + anon_nums, + region_names, + } = rebuild_info; + + let last_seg = path.segments.last().unwrap(); + let new_parameters = match last_seg.parameters { + hir::ParenthesizedParameters(..) => { + last_seg.parameters.clone() + } + + hir::AngleBracketedParameters(ref data) => { + let mut new_lts = Vec::new(); + if data.lifetimes.is_empty() { + // traverse once to see if there's a need to insert lifetime + let need_insert = (0..expected).any(|i| { + indexes.contains(&i) + }); + if need_insert { + for i in 0..expected { + if indexes.contains(&i) { + new_lts.push(lifetime); + } else { + new_lts.push(self.life_giver.give_lifetime()); + } + } + } + } else { + for (i, lt) in data.lifetimes.iter().enumerate() { + if indexes.contains(&(i as u32)) { + new_lts.push(lifetime); + } else { + new_lts.push(*lt); + } + } + } + let new_types = data.types.iter().map(|t| { + self.rebuild_arg_ty_or_output(&t, lifetime, anon_nums, region_names) + }).collect(); + let new_bindings = data.bindings.iter().map(|b| { + hir::TypeBinding { + id: b.id, + name: b.name, + ty: self.rebuild_arg_ty_or_output(&b.ty, + lifetime, + anon_nums, + region_names), + span: b.span + } + }).collect(); + hir::AngleBracketedParameters(hir::AngleBracketedParameterData { + lifetimes: new_lts.into(), + types: new_types, + infer_types: data.infer_types, + bindings: new_bindings, + }) + } + }; + let new_seg = hir::PathSegment { + name: last_seg.name, + parameters: new_parameters + }; + let mut new_segs = Vec::new(); + new_segs.extend_from_slice(path.segments.split_last().unwrap().1); + new_segs.push(new_seg); + hir::Path { + span: path.span, + global: path.global, + def: path.def, + segments: new_segs.into() + } + } +} + +impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { + fn give_expl_lifetime_param(&self, + err: &mut DiagnosticBuilder, + decl: &hir::FnDecl, + unsafety: hir::Unsafety, + constness: hir::Constness, + name: ast::Name, + generics: &hir::Generics, + span: Span) { + let suggested_fn = pprust::fun_to_string(decl, unsafety, constness, name, generics); + let msg = format!("consider using an explicit lifetime \ + parameter as shown: {}", suggested_fn); + err.span_help(span, &msg[..]); + } + + fn report_inference_failure(&self, + var_origin: RegionVariableOrigin) + -> DiagnosticBuilder<'tcx> { + let br_string = |br: ty::BoundRegion| { + let mut s = br.to_string(); + if !s.is_empty() { + s.push_str(" "); + } + s + }; + let var_description = match var_origin { + infer::MiscVariable(_) => "".to_string(), + infer::PatternRegion(_) => " for pattern".to_string(), + infer::AddrOfRegion(_) => " for borrow expression".to_string(), + infer::Autoref(_) => " for autoref".to_string(), + infer::Coercion(_) => " for automatic coercion".to_string(), + infer::LateBoundRegion(_, br, infer::FnCall) => { + format!(" for lifetime parameter {}in function call", + br_string(br)) + } + infer::LateBoundRegion(_, br, infer::HigherRankedType) => { + format!(" for lifetime parameter {}in generic type", br_string(br)) + } + infer::LateBoundRegion(_, br, infer::AssocTypeProjection(type_name)) => { + format!(" for lifetime parameter {}in trait containing associated type `{}`", + br_string(br), type_name) + } + infer::EarlyBoundRegion(_, name) => { + format!(" for lifetime parameter `{}`", + name) + } + infer::BoundRegionInCoherence(name) => { + format!(" for lifetime parameter `{}` in coherence check", + name) + } + infer::UpvarRegion(ref upvar_id, _) => { + format!(" for capture of `{}` by closure", + self.tcx.local_var_name_str(upvar_id.var_id).to_string()) + } + }; + + struct_span_err!(self.tcx.sess, var_origin.span(), E0495, + "cannot infer an appropriate lifetime{} \ + due to conflicting requirements", + var_description) + } + + fn note_region_origin(&self, err: &mut DiagnosticBuilder, origin: &SubregionOrigin<'tcx>) { + match *origin { + infer::Subtype(ref trace) => { + if let Some((expected, found)) = self.values_str(&trace.values) { + // FIXME: do we want a "the" here? + err.span_note( + trace.cause.span, + &format!("...so that {} (expected {}, found {})", + trace.cause.as_requirement_str(), expected, found)); + } else { + // FIXME: this really should be handled at some earlier stage. Our + // handling of region checking when type errors are present is + // *terrible*. + + err.span_note( + trace.cause.span, + &format!("...so that {}", + trace.cause.as_requirement_str())); + } + } + infer::Reborrow(span) => { + err.span_note( + span, + "...so that reference does not outlive \ + borrowed content"); + } + infer::ReborrowUpvar(span, ref upvar_id) => { + err.span_note( + span, + &format!( + "...so that closure can access `{}`", + self.tcx.local_var_name_str(upvar_id.var_id) + .to_string())); + } + infer::InfStackClosure(span) => { + err.span_note( + span, + "...so that closure does not outlive its stack frame"); + } + infer::InvokeClosure(span) => { + err.span_note( + span, + "...so that closure is not invoked outside its lifetime"); + } + infer::DerefPointer(span) => { + err.span_note( + span, + "...so that pointer is not dereferenced \ + outside its lifetime"); + } + infer::FreeVariable(span, id) => { + err.span_note( + span, + &format!("...so that captured variable `{}` \ + does not outlive the enclosing closure", + self.tcx.local_var_name_str(id))); + } + infer::IndexSlice(span) => { + err.span_note( + span, + "...so that slice is not indexed outside the lifetime"); + } + infer::RelateObjectBound(span) => { + err.span_note( + span, + "...so that it can be closed over into an object"); + } + infer::CallRcvr(span) => { + err.span_note( + span, + "...so that method receiver is valid for the method call"); + } + infer::CallArg(span) => { + err.span_note( + span, + "...so that argument is valid for the call"); + } + infer::CallReturn(span) => { + err.span_note( + span, + "...so that return value is valid for the call"); + } + infer::Operand(span) => { + err.span_note( + span, + "...so that operand is valid for operation"); + } + infer::AddrOf(span) => { + err.span_note( + span, + "...so that reference is valid \ + at the time of borrow"); + } + infer::AutoBorrow(span) => { + err.span_note( + span, + "...so that auto-reference is valid \ + at the time of borrow"); + } + infer::ExprTypeIsNotInScope(t, span) => { + err.span_note( + span, + &format!("...so type `{}` of expression is valid during the \ + expression", + self.ty_to_string(t))); + } + infer::BindingTypeIsNotValidAtDecl(span) => { + err.span_note( + span, + "...so that variable is valid at time of its declaration"); + } + infer::ParameterInScope(_, span) => { + err.span_note( + span, + "...so that a type/lifetime parameter is in scope here"); + } + infer::DataBorrowed(ty, span) => { + err.span_note( + span, + &format!("...so that the type `{}` is not borrowed for too long", + self.ty_to_string(ty))); + } + infer::ReferenceOutlivesReferent(ty, span) => { + err.span_note( + span, + &format!("...so that the reference type `{}` \ + does not outlive the data it points at", + self.ty_to_string(ty))); + } + infer::RelateParamBound(span, t) => { + err.span_note( + span, + &format!("...so that the type `{}` \ + will meet its required lifetime bounds", + self.ty_to_string(t))); + } + infer::RelateDefaultParamBound(span, t) => { + err.span_note( + span, + &format!("...so that type parameter \ + instantiated with `{}`, \ + will meet its declared lifetime bounds", + self.ty_to_string(t))); + } + infer::RelateRegionParamBound(span) => { + err.span_note( + span, + "...so that the declared lifetime parameter bounds \ + are satisfied"); + } + infer::SafeDestructor(span) => { + err.span_note( + span, + "...so that references are valid when the destructor \ + runs"); + } + infer::CompareImplMethodObligation { span, .. } => { + err.span_note( + span, + "...so that the definition in impl matches the definition from the trait"); + } + } + } +} + +fn lifetimes_in_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + scope_id: ast::NodeId) + -> Vec { + let mut taken = Vec::new(); + let parent = tcx.map.get_parent(scope_id); + let method_id_opt = match tcx.map.find(parent) { + Some(node) => match node { + ast_map::NodeItem(item) => match item.node { + hir::ItemFn(.., ref gen, _) => { + taken.extend_from_slice(&gen.lifetimes); + None + }, + _ => None + }, + ast_map::NodeImplItem(ii) => { + match ii.node { + hir::ImplItemKind::Method(ref sig, _) => { + taken.extend_from_slice(&sig.generics.lifetimes); + Some(ii.id) + } + _ => None, + } + } + _ => None + }, + None => None + }; + if let Some(method_id) = method_id_opt { + let parent = tcx.map.get_parent(method_id); + if let Some(node) = tcx.map.find(parent) { + match node { + ast_map::NodeItem(item) => match item.node { + hir::ItemImpl(_, _, ref gen, ..) => { + taken.extend_from_slice(&gen.lifetimes); + } + _ => () + }, + _ => () + } + } + } + return taken; +} + +// LifeGiver is responsible for generating fresh lifetime names +struct LifeGiver { + taken: HashSet, + counter: Cell, + generated: RefCell>, +} + +impl LifeGiver { + fn with_taken(taken: &[hir::LifetimeDef]) -> LifeGiver { + let mut taken_ = HashSet::new(); + for lt in taken { + let lt_name = lt.lifetime.name.to_string(); + taken_.insert(lt_name); + } + LifeGiver { + taken: taken_, + counter: Cell::new(0), + generated: RefCell::new(Vec::new()), + } + } + + fn inc_counter(&self) { + let c = self.counter.get(); + self.counter.set(c+1); + } + + fn give_lifetime(&self) -> hir::Lifetime { + let lifetime; + loop { + let mut s = String::from("'"); + s.push_str(&num_to_string(self.counter.get())); + if !self.taken.contains(&s) { + lifetime = name_to_dummy_lifetime(Symbol::intern(&s)); + self.generated.borrow_mut().push(lifetime); + break; + } + self.inc_counter(); + } + self.inc_counter(); + return lifetime; + + // 0 .. 25 generates a .. z, 26 .. 51 generates aa .. zz, and so on + fn num_to_string(counter: usize) -> String { + let mut s = String::new(); + let (n, r) = (counter/26 + 1, counter % 26); + let letter: char = from_u32((r+97) as u32).unwrap(); + for _ in 0..n { + s.push(letter); + } + s + } + } + + fn get_generated_lifetimes(&self) -> Vec { + self.generated.borrow().clone() + } +} + +fn name_to_dummy_lifetime(name: ast::Name) -> hir::Lifetime { + hir::Lifetime { id: ast::DUMMY_NODE_ID, + span: syntax_pos::DUMMY_SP, + name: name } +} + +impl<'tcx> ObligationCause<'tcx> { + fn as_failure_str(&self) -> &'static str { + use traits::ObligationCauseCode::*; + match self.code { + CompareImplMethodObligation { .. } => "method not compatible with trait", + MatchExpressionArm { source, .. } => match source { + hir::MatchSource::IfLetDesugar{..} => "`if let` arms have incompatible types", + _ => "match arms have incompatible types", + }, + IfExpression => "if and else have incompatible types", + IfExpressionWithNoElse => "if may be missing an else clause", + EquatePredicate => "equality predicate not satisfied", + MainFunctionType => "main function has wrong type", + StartFunctionType => "start function has wrong type", + IntrinsicType => "intrinsic has wrong type", + MethodReceiver => "mismatched method receiver", + _ => "mismatched types", + } + } + + fn as_requirement_str(&self) -> &'static str { + use traits::ObligationCauseCode::*; + match self.code { + CompareImplMethodObligation { .. } => "method type is compatible with trait", + ExprAssignable => "expression is assignable", + MatchExpressionArm { source, .. } => match source { + hir::MatchSource::IfLetDesugar{..} => "`if let` arms have compatible types", + _ => "match arms have compatible types", + }, + IfExpression => "if and else have compatible types", + IfExpressionWithNoElse => "if missing an else returns ()", + EquatePredicate => "equality where clause is satisfied", + MainFunctionType => "`main` function has the correct type", + StartFunctionType => "`start` function has the correct type", + IntrinsicType => "intrinsic has the correct type", + MethodReceiver => "method receiver has the correct type", + _ => "types are compatible", + } + } +} + diff --git a/src/librustc/middle/infer/freshen.rs b/src/librustc/infer/freshen.rs similarity index 76% rename from src/librustc/middle/infer/freshen.rs rename to src/librustc/infer/freshen.rs index 76dd62383f1b1..19183892e4b0c 100644 --- a/src/librustc/middle/infer/freshen.rs +++ b/src/librustc/infer/freshen.rs @@ -23,32 +23,34 @@ //! error messages or in any other form. Freshening is only really useful as an internal detail. //! //! __An important detail concerning regions.__ The freshener also replaces *all* regions with -//! 'static. The reason behind this is that, in general, we do not take region relationships into +//! 'erased. The reason behind this is that, in general, we do not take region relationships into //! account when making type-overloaded decisions. This is important because of the design of the //! region inferencer, which is not based on unification but rather on accumulating and then //! solving a set of constraints. In contrast, the type inferencer assigns a value to each type //! variable only once, and it does so as soon as it can, so it is reasonable to ask what the type //! inferencer knows "so far". -use middle::ty::{self, Ty, TypeFoldable}; -use middle::ty::fold::TypeFolder; -use std::collections::hash_map::{self, Entry}; +use ty::{self, Ty, TyCtxt, TypeFoldable}; +use ty::fold::TypeFolder; +use util::nodemap::FxHashMap; +use std::collections::hash_map::Entry; use super::InferCtxt; use super::unify_key::ToType; -pub struct TypeFreshener<'a, 'tcx:'a> { - infcx: &'a InferCtxt<'a, 'tcx>, +pub struct TypeFreshener<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, freshen_count: u32, - freshen_map: hash_map::HashMap>, + freshen_map: FxHashMap>, } -impl<'a, 'tcx> TypeFreshener<'a, 'tcx> { - pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> TypeFreshener<'a, 'tcx> { +impl<'a, 'gcx, 'tcx> TypeFreshener<'a, 'gcx, 'tcx> { + pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>) + -> TypeFreshener<'a, 'gcx, 'tcx> { TypeFreshener { infcx: infcx, freshen_count: 0, - freshen_map: hash_map::HashMap::new(), + freshen_map: FxHashMap(), } } @@ -59,9 +61,8 @@ impl<'a, 'tcx> TypeFreshener<'a, 'tcx> { -> Ty<'tcx> where F: FnOnce(u32) -> ty::InferTy, { - match opt_ty { - Some(ty) => { return ty.fold_with(self); } - None => { } + if let Some(ty) = opt_ty { + return ty.fold_with(self); } match self.freshen_map.entry(key) { @@ -77,13 +78,13 @@ impl<'a, 'tcx> TypeFreshener<'a, 'tcx> { } } -impl<'a, 'tcx> TypeFolder<'tcx> for TypeFreshener<'a, 'tcx> { - fn tcx<'b>(&'b self) -> &'b ty::ctxt<'tcx> { +impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for TypeFreshener<'a, 'gcx, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.infcx.tcx } - fn fold_region(&mut self, r: ty::Region) -> ty::Region { - match r { + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { + match *r { ty::ReEarlyBound(..) | ty::ReLateBound(..) => { // leave bound regions alone @@ -95,9 +96,10 @@ impl<'a, 'tcx> TypeFolder<'tcx> for TypeFreshener<'a, 'tcx> { ty::ReScope(_) | ty::ReVar(_) | ty::ReSkolemized(..) | - ty::ReEmpty => { - // replace all free regions with 'static - ty::ReStatic + ty::ReEmpty | + ty::ReErased => { + // replace all free regions with 'erased + self.tcx().mk_region(ty::ReErased) } } } @@ -111,8 +113,9 @@ impl<'a, 'tcx> TypeFolder<'tcx> for TypeFreshener<'a, 'tcx> { match t.sty { ty::TyInfer(ty::TyVar(v)) => { + let opt_ty = self.infcx.type_variables.borrow_mut().probe(v); self.freshen( - self.infcx.type_variables.borrow().probe(v), + opt_ty, ty::TyVar(v), ty::FreshTy) } @@ -139,11 +142,10 @@ impl<'a, 'tcx> TypeFolder<'tcx> for TypeFreshener<'a, 'tcx> { ty::TyInfer(ty::FreshIntTy(c)) | ty::TyInfer(ty::FreshFloatTy(c)) => { if c >= self.freshen_count { - tcx.sess.bug( - &format!("Encountered a freshend type with id {} \ - but our counter is only at {}", - c, - self.freshen_count)); + bug!("Encountered a freshend type with id {} \ + but our counter is only at {}", + c, + self.freshen_count); } t } @@ -153,7 +155,7 @@ impl<'a, 'tcx> TypeFolder<'tcx> for TypeFreshener<'a, 'tcx> { ty::TyInt(..) | ty::TyUint(..) | ty::TyFloat(..) | - ty::TyEnum(..) | + ty::TyAdt(..) | ty::TyBox(..) | ty::TyStr | ty::TyError | @@ -161,13 +163,15 @@ impl<'a, 'tcx> TypeFolder<'tcx> for TypeFreshener<'a, 'tcx> { ty::TySlice(..) | ty::TyRawPtr(..) | ty::TyRef(..) | - ty::TyBareFn(..) | - ty::TyTrait(..) | - ty::TyStruct(..) | + ty::TyFnDef(..) | + ty::TyFnPtr(_) | + ty::TyDynamic(..) | ty::TyClosure(..) | + ty::TyNever | ty::TyTuple(..) | ty::TyProjection(..) | - ty::TyParam(..) => { + ty::TyParam(..) | + ty::TyAnon(..) => { t.super_fold_with(self) } } diff --git a/src/librustc/infer/fudge.rs b/src/librustc/infer/fudge.rs new file mode 100644 index 0000000000000..806b94486615f --- /dev/null +++ b/src/librustc/infer/fudge.rs @@ -0,0 +1,137 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use ty::{self, TyCtxt}; +use ty::fold::{TypeFoldable, TypeFolder}; + +use super::InferCtxt; +use super::RegionVariableOrigin; + +impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { + /// This rather funky routine is used while processing expected + /// types. What happens here is that we want to propagate a + /// coercion through the return type of a fn to its + /// argument. Consider the type of `Option::Some`, which is + /// basically `for fn(T) -> Option`. So if we have an + /// expression `Some(&[1, 2, 3])`, and that has the expected type + /// `Option<&[u32]>`, we would like to type check `&[1, 2, 3]` + /// with the expectation of `&[u32]`. This will cause us to coerce + /// from `&[u32; 3]` to `&[u32]` and make the users life more + /// pleasant. + /// + /// The way we do this is using `fudge_regions_if_ok`. What the + /// routine actually does is to start a snapshot and execute the + /// closure `f`. In our example above, what this closure will do + /// is to unify the expectation (`Option<&[u32]>`) with the actual + /// return type (`Option`, where `?T` represents the variable + /// instantiated for `T`). This will cause `?T` to be unified + /// with `&?a [u32]`, where `?a` is a fresh lifetime variable. The + /// input type (`?T`) is then returned by `f()`. + /// + /// At this point, `fudge_regions_if_ok` will normalize all type + /// variables, converting `?T` to `&?a [u32]` and end the + /// snapshot. The problem is that we can't just return this type + /// out, because it references the region variable `?a`, and that + /// region variable was popped when we popped the snapshot. + /// + /// So what we do is to keep a list (`region_vars`, in the code below) + /// of region variables created during the snapshot (here, `?a`). We + /// fold the return value and replace any such regions with a *new* + /// region variable (e.g., `?b`) and return the result (`&?b [u32]`). + /// This can then be used as the expectation for the fn argument. + /// + /// The important point here is that, for soundness purposes, the + /// regions in question are not particularly important. We will + /// use the expected types to guide coercions, but we will still + /// type-check the resulting types from those coercions against + /// the actual types (`?T`, `Option(&self, + origin: &RegionVariableOrigin, + f: F) -> Result where + F: FnOnce() -> Result, + T: TypeFoldable<'tcx>, + { + let (region_vars, value) = self.probe(|snapshot| { + let vars_at_start = self.type_variables.borrow().num_vars(); + + match f() { + Ok(value) => { + let value = self.resolve_type_vars_if_possible(&value); + + // At this point, `value` could in principle refer + // to regions that have been created during the + // snapshot (we assert below that `f()` does not + // create any new type variables, so there + // shouldn't be any of those). Once we exit + // `probe()`, those are going to be popped, so we + // will have to eliminate any references to them. + + assert_eq!(self.type_variables.borrow().num_vars(), vars_at_start, + "type variables were created during fudge_regions_if_ok"); + let region_vars = + self.region_vars.vars_created_since_snapshot( + &snapshot.region_vars_snapshot); + + Ok((region_vars, value)) + } + Err(e) => Err(e), + } + })?; + + // At this point, we need to replace any of the now-popped + // region variables that appear in `value` with a fresh region + // variable. We can't do this during the probe because they + // would just get popped then too. =) + + // Micro-optimization: if no variables have been created, then + // `value` can't refer to any of them. =) So we can just return it. + if region_vars.is_empty() { + return Ok(value); + } + + let mut fudger = RegionFudger { + infcx: self, + region_vars: ®ion_vars, + origin: origin + }; + + Ok(value.fold_with(&mut fudger)) + } +} + +pub struct RegionFudger<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, + region_vars: &'a Vec, + origin: &'a RegionVariableOrigin, +} + +impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionFudger<'a, 'gcx, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { + self.infcx.tcx + } + + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { + match *r { + ty::ReVar(v) if self.region_vars.contains(&v) => { + self.infcx.next_region_var(self.origin.clone()) + } + _ => { + r + } + } + } +} diff --git a/src/librustc/infer/glb.rs b/src/librustc/infer/glb.rs new file mode 100644 index 0000000000000..a5709e1880801 --- /dev/null +++ b/src/librustc/infer/glb.rs @@ -0,0 +1,92 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::combine::CombineFields; +use super::InferCtxt; +use super::lattice::{self, LatticeDir}; +use super::Subtype; + +use ty::{self, Ty, TyCtxt}; +use ty::relate::{Relate, RelateResult, TypeRelation}; + +/// "Greatest lower bound" (common subtype) +pub struct Glb<'combine, 'infcx: 'combine, 'gcx: 'infcx+'tcx, 'tcx: 'infcx> { + fields: &'combine mut CombineFields<'infcx, 'gcx, 'tcx>, + a_is_expected: bool, +} + +impl<'combine, 'infcx, 'gcx, 'tcx> Glb<'combine, 'infcx, 'gcx, 'tcx> { + pub fn new(fields: &'combine mut CombineFields<'infcx, 'gcx, 'tcx>, a_is_expected: bool) + -> Glb<'combine, 'infcx, 'gcx, 'tcx> + { + Glb { fields: fields, a_is_expected: a_is_expected } + } +} + +impl<'combine, 'infcx, 'gcx, 'tcx> TypeRelation<'infcx, 'gcx, 'tcx> + for Glb<'combine, 'infcx, 'gcx, 'tcx> +{ + fn tag(&self) -> &'static str { "Glb" } + + fn tcx(&self) -> TyCtxt<'infcx, 'gcx, 'tcx> { self.fields.tcx() } + + fn a_is_expected(&self) -> bool { self.a_is_expected } + + fn relate_with_variance>(&mut self, + variance: ty::Variance, + a: &T, + b: &T) + -> RelateResult<'tcx, T> + { + match variance { + ty::Invariant => self.fields.equate(self.a_is_expected).relate(a, b), + ty::Covariant => self.relate(a, b), + ty::Bivariant => self.fields.bivariate(self.a_is_expected).relate(a, b), + ty::Contravariant => self.fields.lub(self.a_is_expected).relate(a, b), + } + } + + fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { + lattice::super_lattice_tys(self, a, b) + } + + fn regions(&mut self, a: &'tcx ty::Region, b: &'tcx ty::Region) + -> RelateResult<'tcx, &'tcx ty::Region> { + debug!("{}.regions({:?}, {:?})", + self.tag(), + a, + b); + + let origin = Subtype(self.fields.trace.clone()); + Ok(self.fields.infcx.region_vars.glb_regions(origin, a, b)) + } + + fn binders(&mut self, a: &ty::Binder, b: &ty::Binder) + -> RelateResult<'tcx, ty::Binder> + where T: Relate<'tcx> + { + self.fields.higher_ranked_glb(a, b, self.a_is_expected) + } +} + +impl<'combine, 'infcx, 'gcx, 'tcx> LatticeDir<'infcx, 'gcx, 'tcx> + for Glb<'combine, 'infcx, 'gcx, 'tcx> +{ + fn infcx(&self) -> &'infcx InferCtxt<'infcx, 'gcx, 'tcx> { + self.fields.infcx + } + + fn relate_bound(&mut self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()> { + let mut sub = self.fields.sub(self.a_is_expected); + sub.relate(&v, &a)?; + sub.relate(&v, &b)?; + Ok(()) + } +} diff --git a/src/librustc/middle/infer/higher_ranked/README.md b/src/librustc/infer/higher_ranked/README.md similarity index 100% rename from src/librustc/middle/infer/higher_ranked/README.md rename to src/librustc/infer/higher_ranked/README.md diff --git a/src/librustc/infer/higher_ranked/mod.rs b/src/librustc/infer/higher_ranked/mod.rs new file mode 100644 index 0000000000000..08e522f5fd6ee --- /dev/null +++ b/src/librustc/infer/higher_ranked/mod.rs @@ -0,0 +1,846 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Helper routines for higher-ranked things. See the `doc` module at +//! the end of the file for details. + +use super::{CombinedSnapshot, + InferCtxt, + LateBoundRegion, + HigherRankedType, + SubregionOrigin, + SkolemizationMap}; +use super::combine::CombineFields; +use super::region_inference::{TaintDirections}; + +use ty::{self, TyCtxt, Binder, TypeFoldable}; +use ty::error::TypeError; +use ty::relate::{Relate, RelateResult, TypeRelation}; +use syntax_pos::Span; +use util::nodemap::{FxHashMap, FxHashSet}; + +pub struct HrMatchResult { + pub value: U, + + /// Normally, when we do a higher-ranked match operation, we + /// expect all higher-ranked regions to be constrained as part of + /// the match operation. However, in the transition period for + /// #32330, it can happen that we sometimes have unconstrained + /// regions that get instantiated with fresh variables. In that + /// case, we collect the set of unconstrained bound regions here + /// and replace them with fresh variables. + pub unconstrained_regions: Vec, +} + +impl<'a, 'gcx, 'tcx> CombineFields<'a, 'gcx, 'tcx> { + pub fn higher_ranked_sub(&mut self, a: &Binder, b: &Binder, a_is_expected: bool) + -> RelateResult<'tcx, Binder> + where T: Relate<'tcx> + { + debug!("higher_ranked_sub(a={:?}, b={:?})", + a, b); + + // Rather than checking the subtype relationship between `a` and `b` + // as-is, we need to do some extra work here in order to make sure + // that function subtyping works correctly with respect to regions + // + // Note: this is a subtle algorithm. For a full explanation, + // please see the large comment at the end of the file in the (inlined) module + // `doc`. + + // Start a snapshot so we can examine "all bindings that were + // created as part of this type comparison". + return self.infcx.commit_if_ok(|snapshot| { + let span = self.trace.cause.span; + + // First, we instantiate each bound region in the subtype with a fresh + // region variable. + let (a_prime, _) = + self.infcx.replace_late_bound_regions_with_fresh_var( + span, + HigherRankedType, + a); + + // Second, we instantiate each bound region in the supertype with a + // fresh concrete region. + let (b_prime, skol_map) = + self.infcx.skolemize_late_bound_regions(b, snapshot); + + debug!("a_prime={:?}", a_prime); + debug!("b_prime={:?}", b_prime); + + // Compare types now that bound regions have been replaced. + let result = self.sub(a_is_expected).relate(&a_prime, &b_prime)?; + + // Presuming type comparison succeeds, we need to check + // that the skolemized regions do not "leak". + self.infcx.leak_check(!a_is_expected, span, &skol_map, snapshot)?; + + // We are finished with the skolemized regions now so pop + // them off. + self.infcx.pop_skolemized(skol_map, snapshot); + + debug!("higher_ranked_sub: OK result={:?}", result); + + Ok(ty::Binder(result)) + }); + } + + /// The value consists of a pair `(t, u)` where `t` is the + /// *matcher* and `u` is a *value*. The idea is to find a + /// substitution `S` such that `S(t) == b`, and then return + /// `S(u)`. In other words, find values for the late-bound regions + /// in `a` that can make `t == b` and then replace the LBR in `u` + /// with those values. + /// + /// This routine is (as of this writing) used in trait matching, + /// particularly projection. + /// + /// NB. It should not happen that there are LBR appearing in `U` + /// that do not appear in `T`. If that happens, those regions are + /// unconstrained, and this routine replaces them with `'static`. + pub fn higher_ranked_match(&mut self, + span: Span, + a_pair: &Binder<(T, U)>, + b_match: &T, + a_is_expected: bool) + -> RelateResult<'tcx, HrMatchResult> + where T: Relate<'tcx>, + U: TypeFoldable<'tcx> + { + debug!("higher_ranked_match(a={:?}, b={:?})", + a_pair, b_match); + + // Start a snapshot so we can examine "all bindings that were + // created as part of this type comparison". + return self.infcx.commit_if_ok(|snapshot| { + // First, we instantiate each bound region in the matcher + // with a skolemized region. + let ((a_match, a_value), skol_map) = + self.infcx.skolemize_late_bound_regions(a_pair, snapshot); + + debug!("higher_ranked_match: a_match={:?}", a_match); + debug!("higher_ranked_match: skol_map={:?}", skol_map); + + // Equate types now that bound regions have been replaced. + self.equate(a_is_expected).relate(&a_match, &b_match)?; + + // Map each skolemized region to a vector of other regions that it + // must be equated with. (Note that this vector may include other + // skolemized regions from `skol_map`.) + let skol_resolution_map: FxHashMap<_, _> = + skol_map + .iter() + .map(|(&br, &skol)| { + let tainted_regions = + self.infcx.tainted_regions(snapshot, + skol, + TaintDirections::incoming()); // [1] + + // [1] this routine executes after the skolemized + // regions have been *equated* with something + // else, so examining the incoming edges ought to + // be enough to collect all constraints + + (skol, (br, tainted_regions)) + }) + .collect(); + + // For each skolemized region, pick a representative -- which can + // be any region from the sets above, except for other members of + // `skol_map`. There should always be a representative if things + // are properly well-formed. + let mut unconstrained_regions = vec![]; + let skol_representatives: FxHashMap<_, _> = + skol_resolution_map + .iter() + .map(|(&skol, &(br, ref regions))| { + let representative = + regions.iter() + .filter(|&&r| !skol_resolution_map.contains_key(r)) + .cloned() + .next() + .unwrap_or_else(|| { // [1] + unconstrained_regions.push(br); + self.infcx.next_region_var( + LateBoundRegion(span, br, HigherRankedType)) + }); + + // [1] There should always be a representative, + // unless the higher-ranked region did not appear + // in the values being matched. We should reject + // as ill-formed cases that can lead to this, but + // right now we sometimes issue warnings (see + // #32330). + + (skol, representative) + }) + .collect(); + + // Equate all the members of each skolemization set with the + // representative. + for (skol, &(_br, ref regions)) in &skol_resolution_map { + let representative = &skol_representatives[skol]; + debug!("higher_ranked_match: \ + skol={:?} representative={:?} regions={:?}", + skol, representative, regions); + for region in regions.iter() + .filter(|&r| !skol_resolution_map.contains_key(r)) + .filter(|&r| r != representative) + { + let origin = SubregionOrigin::Subtype(self.trace.clone()); + self.infcx.region_vars.make_eqregion(origin, + *representative, + *region); + } + } + + // Replace the skolemized regions appearing in value with + // their representatives + let a_value = + fold_regions_in( + self.tcx(), + &a_value, + |r, _| skol_representatives.get(&r).cloned().unwrap_or(r)); + + debug!("higher_ranked_match: value={:?}", a_value); + + // We are now done with these skolemized variables. + self.infcx.pop_skolemized(skol_map, snapshot); + + Ok(HrMatchResult { + value: a_value, + unconstrained_regions: unconstrained_regions, + }) + }); + } + + pub fn higher_ranked_lub(&mut self, a: &Binder, b: &Binder, a_is_expected: bool) + -> RelateResult<'tcx, Binder> + where T: Relate<'tcx> + { + // Start a snapshot so we can examine "all bindings that were + // created as part of this type comparison". + return self.infcx.commit_if_ok(|snapshot| { + // Instantiate each bound region with a fresh region variable. + let span = self.trace.cause.span; + let (a_with_fresh, a_map) = + self.infcx.replace_late_bound_regions_with_fresh_var( + span, HigherRankedType, a); + let (b_with_fresh, _) = + self.infcx.replace_late_bound_regions_with_fresh_var( + span, HigherRankedType, b); + + // Collect constraints. + let result0 = + self.lub(a_is_expected).relate(&a_with_fresh, &b_with_fresh)?; + let result0 = + self.infcx.resolve_type_vars_if_possible(&result0); + debug!("lub result0 = {:?}", result0); + + // Generalize the regions appearing in result0 if possible + let new_vars = self.infcx.region_vars_confined_to_snapshot(snapshot); + let span = self.trace.cause.span; + let result1 = + fold_regions_in( + self.tcx(), + &result0, + |r, debruijn| generalize_region(self.infcx, span, snapshot, debruijn, + &new_vars, &a_map, r)); + + debug!("lub({:?},{:?}) = {:?}", + a, + b, + result1); + + Ok(ty::Binder(result1)) + }); + + fn generalize_region<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + span: Span, + snapshot: &CombinedSnapshot, + debruijn: ty::DebruijnIndex, + new_vars: &[ty::RegionVid], + a_map: &FxHashMap, + r0: &'tcx ty::Region) + -> &'tcx ty::Region { + // Regions that pre-dated the LUB computation stay as they are. + if !is_var_in_set(new_vars, r0) { + assert!(!r0.is_bound()); + debug!("generalize_region(r0={:?}): not new variable", r0); + return r0; + } + + let tainted = infcx.tainted_regions(snapshot, r0, TaintDirections::both()); + + // Variables created during LUB computation which are + // *related* to regions that pre-date the LUB computation + // stay as they are. + if !tainted.iter().all(|r| is_var_in_set(new_vars, *r)) { + debug!("generalize_region(r0={:?}): \ + non-new-variables found in {:?}", + r0, tainted); + assert!(!r0.is_bound()); + return r0; + } + + // Otherwise, the variable must be associated with at + // least one of the variables representing bound regions + // in both A and B. Replace the variable with the "first" + // bound region from A that we find it to be associated + // with. + for (a_br, a_r) in a_map { + if tainted.iter().any(|x| x == a_r) { + debug!("generalize_region(r0={:?}): \ + replacing with {:?}, tainted={:?}", + r0, *a_br, tainted); + return infcx.tcx.mk_region(ty::ReLateBound(debruijn, *a_br)); + } + } + + span_bug!( + span, + "region {:?} is not associated with any bound region from A!", + r0) + } + } + + pub fn higher_ranked_glb(&mut self, a: &Binder, b: &Binder, a_is_expected: bool) + -> RelateResult<'tcx, Binder> + where T: Relate<'tcx> + { + debug!("higher_ranked_glb({:?}, {:?})", + a, b); + + // Make a snapshot so we can examine "all bindings that were + // created as part of this type comparison". + return self.infcx.commit_if_ok(|snapshot| { + // Instantiate each bound region with a fresh region variable. + let (a_with_fresh, a_map) = + self.infcx.replace_late_bound_regions_with_fresh_var( + self.trace.cause.span, HigherRankedType, a); + let (b_with_fresh, b_map) = + self.infcx.replace_late_bound_regions_with_fresh_var( + self.trace.cause.span, HigherRankedType, b); + let a_vars = var_ids(self, &a_map); + let b_vars = var_ids(self, &b_map); + + // Collect constraints. + let result0 = + self.glb(a_is_expected).relate(&a_with_fresh, &b_with_fresh)?; + let result0 = + self.infcx.resolve_type_vars_if_possible(&result0); + debug!("glb result0 = {:?}", result0); + + // Generalize the regions appearing in result0 if possible + let new_vars = self.infcx.region_vars_confined_to_snapshot(snapshot); + let span = self.trace.cause.span; + let result1 = + fold_regions_in( + self.tcx(), + &result0, + |r, debruijn| generalize_region(self.infcx, span, snapshot, debruijn, + &new_vars, + &a_map, &a_vars, &b_vars, + r)); + + debug!("glb({:?},{:?}) = {:?}", + a, + b, + result1); + + Ok(ty::Binder(result1)) + }); + + fn generalize_region<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + span: Span, + snapshot: &CombinedSnapshot, + debruijn: ty::DebruijnIndex, + new_vars: &[ty::RegionVid], + a_map: &FxHashMap, + a_vars: &[ty::RegionVid], + b_vars: &[ty::RegionVid], + r0: &'tcx ty::Region) + -> &'tcx ty::Region { + if !is_var_in_set(new_vars, r0) { + assert!(!r0.is_bound()); + return r0; + } + + let tainted = infcx.tainted_regions(snapshot, r0, TaintDirections::both()); + + let mut a_r = None; + let mut b_r = None; + let mut only_new_vars = true; + for r in &tainted { + if is_var_in_set(a_vars, *r) { + if a_r.is_some() { + return fresh_bound_variable(infcx, debruijn); + } else { + a_r = Some(*r); + } + } else if is_var_in_set(b_vars, *r) { + if b_r.is_some() { + return fresh_bound_variable(infcx, debruijn); + } else { + b_r = Some(*r); + } + } else if !is_var_in_set(new_vars, *r) { + only_new_vars = false; + } + } + + // NB---I do not believe this algorithm computes + // (necessarily) the GLB. As written it can + // spuriously fail. In particular, if there is a case + // like: |fn(&a)| and fn(fn(&b)), where a and b are + // free, it will return fn(&c) where c = GLB(a,b). If + // however this GLB is not defined, then the result is + // an error, even though something like + // "fn(fn(&X))" where X is bound would be a + // subtype of both of those. + // + // The problem is that if we were to return a bound + // variable, we'd be computing a lower-bound, but not + // necessarily the *greatest* lower-bound. + // + // Unfortunately, this problem is non-trivial to solve, + // because we do not know at the time of computing the GLB + // whether a GLB(a,b) exists or not, because we haven't + // run region inference (or indeed, even fully computed + // the region hierarchy!). The current algorithm seems to + // works ok in practice. + + if a_r.is_some() && b_r.is_some() && only_new_vars { + // Related to exactly one bound variable from each fn: + return rev_lookup(infcx, span, a_map, a_r.unwrap()); + } else if a_r.is_none() && b_r.is_none() { + // Not related to bound variables from either fn: + assert!(!r0.is_bound()); + return r0; + } else { + // Other: + return fresh_bound_variable(infcx, debruijn); + } + } + + fn rev_lookup<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + span: Span, + a_map: &FxHashMap, + r: &'tcx ty::Region) -> &'tcx ty::Region + { + for (a_br, a_r) in a_map { + if *a_r == r { + return infcx.tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1), *a_br)); + } + } + span_bug!( + span, + "could not find original bound region for {:?}", + r); + } + + fn fresh_bound_variable<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + debruijn: ty::DebruijnIndex) + -> &'tcx ty::Region { + infcx.region_vars.new_bound(debruijn) + } + } +} + +fn var_ids<'a, 'gcx, 'tcx>(fields: &CombineFields<'a, 'gcx, 'tcx>, + map: &FxHashMap) + -> Vec { + map.iter() + .map(|(_, &r)| match *r { + ty::ReVar(r) => { r } + _ => { + span_bug!( + fields.trace.cause.span, + "found non-region-vid: {:?}", + r); + } + }) + .collect() +} + +fn is_var_in_set(new_vars: &[ty::RegionVid], r: &ty::Region) -> bool { + match *r { + ty::ReVar(ref v) => new_vars.iter().any(|x| x == v), + _ => false + } +} + +fn fold_regions_in<'a, 'gcx, 'tcx, T, F>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + unbound_value: &T, + mut fldr: F) + -> T + where T: TypeFoldable<'tcx>, + F: FnMut(&'tcx ty::Region, ty::DebruijnIndex) -> &'tcx ty::Region, +{ + tcx.fold_regions(unbound_value, &mut false, |region, current_depth| { + // we should only be encountering "escaping" late-bound regions here, + // because the ones at the current level should have been replaced + // with fresh variables + assert!(match *region { + ty::ReLateBound(..) => false, + _ => true + }); + + fldr(region, ty::DebruijnIndex::new(current_depth)) + }) +} + +impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { + fn tainted_regions(&self, + snapshot: &CombinedSnapshot, + r: &'tcx ty::Region, + directions: TaintDirections) + -> FxHashSet<&'tcx ty::Region> { + self.region_vars.tainted(&snapshot.region_vars_snapshot, r, directions) + } + + fn region_vars_confined_to_snapshot(&self, + snapshot: &CombinedSnapshot) + -> Vec + { + /*! + * Returns the set of region variables that do not affect any + * types/regions which existed before `snapshot` was + * started. This is used in the sub/lub/glb computations. The + * idea here is that when we are computing lub/glb of two + * regions, we sometimes create intermediate region variables. + * Those region variables may touch some of the skolemized or + * other "forbidden" regions we created to replace bound + * regions, but they don't really represent an "external" + * constraint. + * + * However, sometimes fresh variables are created for other + * purposes too, and those *may* represent an external + * constraint. In particular, when a type variable is + * instantiated, we create region variables for all the + * regions that appear within, and if that type variable + * pre-existed the snapshot, then those region variables + * represent external constraints. + * + * An example appears in the unit test + * `sub_free_bound_false_infer`. In this test, we want to + * know whether + * + * ```rust + * fn(_#0t) <: for<'a> fn(&'a int) + * ``` + * + * Note that the subtype has a type variable. Because the type + * variable can't be instantiated with a region that is bound + * in the fn signature, this comparison ought to fail. But if + * we're not careful, it will succeed. + * + * The reason is that when we walk through the subtyping + * algorith, we begin by replacing `'a` with a skolemized + * variable `'1`. We then have `fn(_#0t) <: fn(&'1 int)`. This + * can be made true by unifying `_#0t` with `&'1 int`. In the + * process, we create a fresh variable for the skolemized + * region, `'$2`, and hence we have that `_#0t == &'$2 + * int`. However, because `'$2` was created during the sub + * computation, if we're not careful we will erroneously + * assume it is one of the transient region variables + * representing a lub/glb internally. Not good. + * + * To prevent this, we check for type variables which were + * unified during the snapshot, and say that any region + * variable created during the snapshot but which finds its + * way into a type variable is considered to "escape" the + * snapshot. + */ + + let mut region_vars = + self.region_vars.vars_created_since_snapshot(&snapshot.region_vars_snapshot); + + let escaping_types = + self.type_variables.borrow_mut().types_escaping_snapshot(&snapshot.type_snapshot); + + let mut escaping_region_vars = FxHashSet(); + for ty in &escaping_types { + self.tcx.collect_regions(ty, &mut escaping_region_vars); + } + + region_vars.retain(|®ion_vid| { + let r = ty::ReVar(region_vid); + !escaping_region_vars.contains(&r) + }); + + debug!("region_vars_confined_to_snapshot: region_vars={:?} escaping_types={:?}", + region_vars, + escaping_types); + + region_vars + } + + /// Replace all regions bound by `binder` with skolemized regions and + /// return a map indicating which bound-region was replaced with what + /// skolemized region. This is the first step of checking subtyping + /// when higher-ranked things are involved. + /// + /// **Important:** you must call this function from within a snapshot. + /// Moreover, before committing the snapshot, you must eventually call + /// either `plug_leaks` or `pop_skolemized` to remove the skolemized + /// regions. If you rollback the snapshot (or are using a probe), then + /// the pop occurs as part of the rollback, so an explicit call is not + /// needed (but is also permitted). + /// + /// See `README.md` for more details. + pub fn skolemize_late_bound_regions(&self, + binder: &ty::Binder, + snapshot: &CombinedSnapshot) + -> (T, SkolemizationMap<'tcx>) + where T : TypeFoldable<'tcx> + { + let (result, map) = self.tcx.replace_late_bound_regions(binder, |br| { + self.region_vars.push_skolemized(br, &snapshot.region_vars_snapshot) + }); + + debug!("skolemize_bound_regions(binder={:?}, result={:?}, map={:?})", + binder, + result, + map); + + (result, map) + } + + /// Searches the region constriants created since `snapshot` was started + /// and checks to determine whether any of the skolemized regions created + /// in `skol_map` would "escape" -- meaning that they are related to + /// other regions in some way. If so, the higher-ranked subtyping doesn't + /// hold. See `README.md` for more details. + pub fn leak_check(&self, + overly_polymorphic: bool, + span: Span, + skol_map: &SkolemizationMap<'tcx>, + snapshot: &CombinedSnapshot) + -> RelateResult<'tcx, ()> + { + debug!("leak_check: skol_map={:?}", + skol_map); + + // ## Issue #32330 warnings + // + // When Issue #32330 is fixed, a certain number of late-bound + // regions (LBR) will become early-bound. We wish to issue + // warnings when the result of `leak_check` relies on such LBR, as + // that means that compilation will likely start to fail. + // + // Recall that when we do a "HR subtype" check, we replace all + // late-bound regions (LBR) in the subtype with fresh variables, + // and skolemize the late-bound regions in the supertype. If those + // skolemized regions from the supertype wind up being + // super-regions (directly or indirectly) of either + // + // - another skolemized region; or, + // - some region that pre-exists the HR subtype check + // - e.g., a region variable that is not one of those created + // to represent bound regions in the subtype + // + // then leak-check (and hence the subtype check) fails. + // + // What will change when we fix #32330 is that some of the LBR in the + // subtype may become early-bound. In that case, they would no longer be in + // the "permitted set" of variables that can be related to a skolemized + // type. + // + // So the foundation for this warning is to collect variables that we found + // to be related to a skolemized type. For each of them, we have a + // `BoundRegion` which carries a `Issue32330` flag. We check whether any of + // those flags indicate that this variable was created from a lifetime + // that will change from late- to early-bound. If so, we issue a warning + // indicating that the results of compilation may change. + // + // This is imperfect, since there are other kinds of code that will not + // compile once #32330 is fixed. However, it fixes the errors observed in + // practice on crater runs. + let mut warnings = vec![]; + + let new_vars = self.region_vars_confined_to_snapshot(snapshot); + for (&skol_br, &skol) in skol_map { + // The inputs to a skolemized variable can only + // be itself or other new variables. + let incoming_taints = self.tainted_regions(snapshot, + skol, + TaintDirections::both()); + for &tainted_region in &incoming_taints { + // Each skolemized should only be relatable to itself + // or new variables: + match *tainted_region { + ty::ReVar(vid) => { + if new_vars.contains(&vid) { + warnings.extend( + match self.region_vars.var_origin(vid) { + LateBoundRegion(_, + ty::BrNamed(.., wc), + _) => Some(wc), + _ => None, + }); + continue; + } + } + _ => { + if tainted_region == skol { continue; } + } + }; + + debug!("{:?} (which replaced {:?}) is tainted by {:?}", + skol, + skol_br, + tainted_region); + + if overly_polymorphic { + debug!("Overly polymorphic!"); + return Err(TypeError::RegionsOverlyPolymorphic(skol_br, + tainted_region)); + } else { + debug!("Not as polymorphic!"); + return Err(TypeError::RegionsInsufficientlyPolymorphic(skol_br, + tainted_region)); + } + } + } + + self.issue_32330_warnings(span, &warnings); + + Ok(()) + } + + /// This code converts from skolemized regions back to late-bound + /// regions. It works by replacing each region in the taint set of a + /// skolemized region with a bound-region. The bound region will be bound + /// by the outer-most binder in `value`; the caller must ensure that there is + /// such a binder and it is the right place. + /// + /// This routine is only intended to be used when the leak-check has + /// passed; currently, it's used in the trait matching code to create + /// a set of nested obligations frmo an impl that matches against + /// something higher-ranked. More details can be found in + /// `librustc/middle/traits/README.md`. + /// + /// As a brief example, consider the obligation `for<'a> Fn(&'a int) + /// -> &'a int`, and the impl: + /// + /// impl Fn for SomethingOrOther + /// where A : Clone + /// { ... } + /// + /// Here we will have replaced `'a` with a skolemized region + /// `'0`. This means that our substitution will be `{A=>&'0 + /// int, R=>&'0 int}`. + /// + /// When we apply the substitution to the bounds, we will wind up with + /// `&'0 int : Clone` as a predicate. As a last step, we then go and + /// replace `'0` with a late-bound region `'a`. The depth is matched + /// to the depth of the predicate, in this case 1, so that the final + /// predicate is `for<'a> &'a int : Clone`. + pub fn plug_leaks(&self, + skol_map: SkolemizationMap<'tcx>, + snapshot: &CombinedSnapshot, + value: T) -> T + where T : TypeFoldable<'tcx> + { + debug!("plug_leaks(skol_map={:?}, value={:?})", + skol_map, + value); + + if skol_map.is_empty() { + return value; + } + + // Compute a mapping from the "taint set" of each skolemized + // region back to the `ty::BoundRegion` that it originally + // represented. Because `leak_check` passed, we know that + // these taint sets are mutually disjoint. + let inv_skol_map: FxHashMap<&'tcx ty::Region, ty::BoundRegion> = + skol_map + .iter() + .flat_map(|(&skol_br, &skol)| { + self.tainted_regions(snapshot, skol, TaintDirections::both()) + .into_iter() + .map(move |tainted_region| (tainted_region, skol_br)) + }) + .collect(); + + debug!("plug_leaks: inv_skol_map={:?}", + inv_skol_map); + + // Remove any instantiated type variables from `value`; those can hide + // references to regions from the `fold_regions` code below. + let value = self.resolve_type_vars_if_possible(&value); + + // Map any skolemization byproducts back to a late-bound + // region. Put that late-bound region at whatever the outermost + // binder is that we encountered in `value`. The caller is + // responsible for ensuring that (a) `value` contains at least one + // binder and (b) that binder is the one we want to use. + let result = self.tcx.fold_regions(&value, &mut false, |r, current_depth| { + match inv_skol_map.get(&r) { + None => r, + Some(br) => { + // It is the responsibility of the caller to ensure + // that each skolemized region appears within a + // binder. In practice, this routine is only used by + // trait checking, and all of the skolemized regions + // appear inside predicates, which always have + // binders, so this assert is satisfied. + assert!(current_depth > 1); + + // since leak-check passed, this skolemized region + // should only have incoming edges from variables + // (which ought not to escape the snapshot, but we + // don't check that) or itself + assert!( + match *r { + ty::ReVar(_) => true, + ty::ReSkolemized(_, ref br1) => br == br1, + _ => false, + }, + "leak-check would have us replace {:?} with {:?}", + r, br); + + self.tcx.mk_region(ty::ReLateBound( + ty::DebruijnIndex::new(current_depth - 1), br.clone())) + } + } + }); + + self.pop_skolemized(skol_map, snapshot); + + debug!("plug_leaks: result={:?}", result); + + result + } + + /// Pops the skolemized regions found in `skol_map` from the region + /// inference context. Whenever you create skolemized regions via + /// `skolemize_late_bound_regions`, they must be popped before you + /// commit the enclosing snapshot (if you do not commit, e.g. within a + /// probe or as a result of an error, then this is not necessary, as + /// popping happens as part of the rollback). + /// + /// Note: popping also occurs implicitly as part of `leak_check`. + pub fn pop_skolemized(&self, + skol_map: SkolemizationMap<'tcx>, + snapshot: &CombinedSnapshot) + { + debug!("pop_skolemized({:?})", skol_map); + let skol_regions: FxHashSet<_> = skol_map.values().cloned().collect(); + self.region_vars.pop_skolemized(&skol_regions, &snapshot.region_vars_snapshot); + if !skol_map.is_empty() { + self.projection_cache.borrow_mut().rollback_skolemized( + &snapshot.projection_cache_snapshot); + } + } +} diff --git a/src/librustc/infer/lattice.rs b/src/librustc/infer/lattice.rs new file mode 100644 index 0000000000000..eda78428e61ad --- /dev/null +++ b/src/librustc/infer/lattice.rs @@ -0,0 +1,83 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! # Lattice Variables +//! +//! This file contains generic code for operating on inference variables +//! that are characterized by an upper- and lower-bound. The logic and +//! reasoning is explained in detail in the large comment in `infer.rs`. +//! +//! The code in here is defined quite generically so that it can be +//! applied both to type variables, which represent types being inferred, +//! and fn variables, which represent function types being inferred. +//! It may eventually be applied to their types as well, who knows. +//! In some cases, the functions are also generic with respect to the +//! operation on the lattice (GLB vs LUB). +//! +//! Although all the functions are generic, we generally write the +//! comments in a way that is specific to type variables and the LUB +//! operation. It's just easier that way. +//! +//! In general all of the functions are defined parametrically +//! over a `LatticeValue`, which is a value defined with respect to +//! a lattice. + +use super::InferCtxt; + +use ty::TyVar; +use ty::{self, Ty}; +use ty::relate::{RelateResult, TypeRelation}; + +pub trait LatticeDir<'f, 'gcx: 'f+'tcx, 'tcx: 'f> : TypeRelation<'f, 'gcx, 'tcx> { + fn infcx(&self) -> &'f InferCtxt<'f, 'gcx, 'tcx>; + + // Relates the type `v` to `a` and `b` such that `v` represents + // the LUB/GLB of `a` and `b` as appropriate. + fn relate_bound(&mut self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()>; +} + +pub fn super_lattice_tys<'a, 'gcx, 'tcx, L>(this: &mut L, + a: Ty<'tcx>, + b: Ty<'tcx>) + -> RelateResult<'tcx, Ty<'tcx>> + where L: LatticeDir<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a +{ + debug!("{}.lattice_tys({:?}, {:?})", + this.tag(), + a, + b); + + if a == b { + return Ok(a); + } + + let infcx = this.infcx(); + let a = infcx.type_variables.borrow_mut().replace_if_possible(a); + let b = infcx.type_variables.borrow_mut().replace_if_possible(b); + match (&a.sty, &b.sty) { + (&ty::TyInfer(TyVar(..)), &ty::TyInfer(TyVar(..))) + if infcx.type_var_diverges(a) && infcx.type_var_diverges(b) => { + let v = infcx.next_diverging_ty_var(); + this.relate_bound(v, a, b)?; + Ok(v) + } + + (&ty::TyInfer(TyVar(..)), _) | + (_, &ty::TyInfer(TyVar(..))) => { + let v = infcx.next_ty_var(); + this.relate_bound(v, a, b)?; + Ok(v) + } + + _ => { + infcx.super_combine_tys(this, a, b) + } + } +} diff --git a/src/librustc/infer/lub.rs b/src/librustc/infer/lub.rs new file mode 100644 index 0000000000000..7d352be67d32b --- /dev/null +++ b/src/librustc/infer/lub.rs @@ -0,0 +1,92 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::combine::CombineFields; +use super::InferCtxt; +use super::lattice::{self, LatticeDir}; +use super::Subtype; + +use ty::{self, Ty, TyCtxt}; +use ty::relate::{Relate, RelateResult, TypeRelation}; + +/// "Least upper bound" (common supertype) +pub struct Lub<'combine, 'infcx: 'combine, 'gcx: 'infcx+'tcx, 'tcx: 'infcx> { + fields: &'combine mut CombineFields<'infcx, 'gcx, 'tcx>, + a_is_expected: bool, +} + +impl<'combine, 'infcx, 'gcx, 'tcx> Lub<'combine, 'infcx, 'gcx, 'tcx> { + pub fn new(fields: &'combine mut CombineFields<'infcx, 'gcx, 'tcx>, a_is_expected: bool) + -> Lub<'combine, 'infcx, 'gcx, 'tcx> + { + Lub { fields: fields, a_is_expected: a_is_expected } + } +} + +impl<'combine, 'infcx, 'gcx, 'tcx> TypeRelation<'infcx, 'gcx, 'tcx> + for Lub<'combine, 'infcx, 'gcx, 'tcx> +{ + fn tag(&self) -> &'static str { "Lub" } + + fn tcx(&self) -> TyCtxt<'infcx, 'gcx, 'tcx> { self.fields.tcx() } + + fn a_is_expected(&self) -> bool { self.a_is_expected } + + fn relate_with_variance>(&mut self, + variance: ty::Variance, + a: &T, + b: &T) + -> RelateResult<'tcx, T> + { + match variance { + ty::Invariant => self.fields.equate(self.a_is_expected).relate(a, b), + ty::Covariant => self.relate(a, b), + ty::Bivariant => self.fields.bivariate(self.a_is_expected).relate(a, b), + ty::Contravariant => self.fields.glb(self.a_is_expected).relate(a, b), + } + } + + fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { + lattice::super_lattice_tys(self, a, b) + } + + fn regions(&mut self, a: &'tcx ty::Region, b: &'tcx ty::Region) + -> RelateResult<'tcx, &'tcx ty::Region> { + debug!("{}.regions({:?}, {:?})", + self.tag(), + a, + b); + + let origin = Subtype(self.fields.trace.clone()); + Ok(self.fields.infcx.region_vars.lub_regions(origin, a, b)) + } + + fn binders(&mut self, a: &ty::Binder, b: &ty::Binder) + -> RelateResult<'tcx, ty::Binder> + where T: Relate<'tcx> + { + self.fields.higher_ranked_lub(a, b, self.a_is_expected) + } +} + +impl<'combine, 'infcx, 'gcx, 'tcx> LatticeDir<'infcx, 'gcx, 'tcx> + for Lub<'combine, 'infcx, 'gcx, 'tcx> +{ + fn infcx(&self) -> &'infcx InferCtxt<'infcx, 'gcx, 'tcx> { + self.fields.infcx + } + + fn relate_bound(&mut self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()> { + let mut sub = self.fields.sub(self.a_is_expected); + sub.relate(&a, &v)?; + sub.relate(&b, &v)?; + Ok(()) + } +} diff --git a/src/librustc/infer/mod.rs b/src/librustc/infer/mod.rs new file mode 100644 index 0000000000000..72ef987aefd5c --- /dev/null +++ b/src/librustc/infer/mod.rs @@ -0,0 +1,1721 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! See the Book for more information. + +pub use self::LateBoundRegionConversionTime::*; +pub use self::RegionVariableOrigin::*; +pub use self::SubregionOrigin::*; +pub use self::ValuePairs::*; +pub use ty::IntVarValue; +pub use self::freshen::TypeFreshener; +pub use self::region_inference::{GenericKind, VerifyBound}; + +use hir::def_id::DefId; +use hir; +use middle::free_region::FreeRegionMap; +use middle::mem_categorization as mc; +use middle::mem_categorization::McResult; +use middle::region::CodeExtent; +use middle::lang_items; +use mir::tcx::LvalueTy; +use ty::subst::{Kind, Subst, Substs}; +use ty::adjustment; +use ty::{TyVid, IntVid, FloatVid}; +use ty::{self, Ty, TyCtxt}; +use ty::error::{ExpectedFound, TypeError, UnconstrainedNumeric}; +use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; +use ty::relate::{Relate, RelateResult, TypeRelation}; +use traits::{self, ObligationCause, PredicateObligations, Reveal}; +use rustc_data_structures::unify::{self, UnificationTable}; +use std::cell::{Cell, RefCell, Ref, RefMut}; +use std::fmt; +use syntax::ast; +use errors::DiagnosticBuilder; +use syntax_pos::{self, Span, DUMMY_SP}; +use util::nodemap::{FxHashMap, FxHashSet, NodeMap}; + +use self::combine::CombineFields; +use self::higher_ranked::HrMatchResult; +use self::region_inference::{RegionVarBindings, RegionSnapshot}; +use self::unify_key::ToType; + +mod bivariate; +mod combine; +mod equate; +pub mod error_reporting; +mod fudge; +mod glb; +mod higher_ranked; +pub mod lattice; +mod lub; +pub mod region_inference; +pub mod resolve; +mod freshen; +mod sub; +pub mod type_variable; +pub mod unify_key; + +#[must_use] +pub struct InferOk<'tcx, T> { + pub value: T, + pub obligations: PredicateObligations<'tcx>, +} +pub type InferResult<'tcx, T> = Result, TypeError<'tcx>>; + +pub type Bound = Option; +pub type UnitResult<'tcx> = RelateResult<'tcx, ()>; // "unify result" +pub type FixupResult = Result; // "fixup result" + +/// A version of &ty::Tables which can be global or local. +/// Only the local version supports borrow_mut. +#[derive(Copy, Clone)] +pub enum InferTables<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + Global(&'a RefCell>), + Local(&'a RefCell>) +} + +impl<'a, 'gcx, 'tcx> InferTables<'a, 'gcx, 'tcx> { + pub fn borrow(self) -> Ref<'a, ty::Tables<'tcx>> { + match self { + InferTables::Global(tables) => tables.borrow(), + InferTables::Local(tables) => tables.borrow() + } + } + + pub fn borrow_mut(self) -> RefMut<'a, ty::Tables<'tcx>> { + match self { + InferTables::Global(_) => { + bug!("InferTables: infcx.tables.borrow_mut() outside of type-checking"); + } + InferTables::Local(tables) => tables.borrow_mut() + } + } +} + +pub struct InferCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + pub tcx: TyCtxt<'a, 'gcx, 'tcx>, + + pub tables: InferTables<'a, 'gcx, 'tcx>, + + // Cache for projections. This cache is snapshotted along with the + // infcx. + // + // Public so that `traits::project` can use it. + pub projection_cache: RefCell>, + + // We instantiate UnificationTable with bounds because the + // types that might instantiate a general type variable have an + // order, represented by its upper and lower bounds. + type_variables: RefCell>, + + // Map from integral variable to the kind of integer it represents + int_unification_table: RefCell>, + + // Map from floating variable to the kind of float it represents + float_unification_table: RefCell>, + + // For region variables. + region_vars: RegionVarBindings<'a, 'gcx, 'tcx>, + + pub parameter_environment: ty::ParameterEnvironment<'gcx>, + + /// Caches the results of trait selection. This cache is used + /// for things that have to do with the parameters in scope. + pub selection_cache: traits::SelectionCache<'tcx>, + + /// Caches the results of trait evaluation. + pub evaluation_cache: traits::EvaluationCache<'tcx>, + + // the set of predicates on which errors have been reported, to + // avoid reporting the same error twice. + pub reported_trait_errors: RefCell>>, + + // Sadly, the behavior of projection varies a bit depending on the + // stage of compilation. The specifics are given in the + // documentation for `Reveal`. + projection_mode: Reveal, + + // When an error occurs, we want to avoid reporting "derived" + // errors that are due to this original failure. Normally, we + // handle this with the `err_count_on_creation` count, which + // basically just tracks how many errors were reported when we + // started type-checking a fn and checks to see if any new errors + // have been reported since then. Not great, but it works. + // + // However, when errors originated in other passes -- notably + // resolve -- this heuristic breaks down. Therefore, we have this + // auxiliary flag that one can set whenever one creates a + // type-error that is due to an error in a prior pass. + // + // Don't read this flag directly, call `is_tainted_by_errors()` + // and `set_tainted_by_errors()`. + tainted_by_errors_flag: Cell, + + // Track how many errors were reported when this infcx is created. + // If the number of errors increases, that's also a sign (line + // `tained_by_errors`) to avoid reporting certain kinds of errors. + err_count_on_creation: usize, + + // This flag is used for debugging, and is set to true if there are + // any obligations set during the current snapshot. In that case, the + // snapshot can't be rolled back. + pub obligations_in_snapshot: Cell, +} + +/// A map returned by `skolemize_late_bound_regions()` indicating the skolemized +/// region that each late-bound region was replaced with. +pub type SkolemizationMap<'tcx> = FxHashMap; + +/// See `error_reporting.rs` for more details +#[derive(Clone, Debug)] +pub enum ValuePairs<'tcx> { + Types(ExpectedFound>), + TraitRefs(ExpectedFound>), + PolyTraitRefs(ExpectedFound>), +} + +/// The trace designates the path through inference that we took to +/// encounter an error or subtyping constraint. +/// +/// See `error_reporting.rs` for more details. +#[derive(Clone)] +pub struct TypeTrace<'tcx> { + cause: ObligationCause<'tcx>, + values: ValuePairs<'tcx>, +} + +/// The origin of a `r1 <= r2` constraint. +/// +/// See `error_reporting.rs` for more details +#[derive(Clone, Debug)] +pub enum SubregionOrigin<'tcx> { + // Arose from a subtyping relation + Subtype(TypeTrace<'tcx>), + + // Stack-allocated closures cannot outlive innermost loop + // or function so as to ensure we only require finite stack + InfStackClosure(Span), + + // Invocation of closure must be within its lifetime + InvokeClosure(Span), + + // Dereference of reference must be within its lifetime + DerefPointer(Span), + + // Closure bound must not outlive captured free variables + FreeVariable(Span, ast::NodeId), + + // Index into slice must be within its lifetime + IndexSlice(Span), + + // When casting `&'a T` to an `&'b Trait` object, + // relating `'a` to `'b` + RelateObjectBound(Span), + + // Some type parameter was instantiated with the given type, + // and that type must outlive some region. + RelateParamBound(Span, Ty<'tcx>), + + // The given region parameter was instantiated with a region + // that must outlive some other region. + RelateRegionParamBound(Span), + + // A bound placed on type parameters that states that must outlive + // the moment of their instantiation. + RelateDefaultParamBound(Span, Ty<'tcx>), + + // Creating a pointer `b` to contents of another reference + Reborrow(Span), + + // Creating a pointer `b` to contents of an upvar + ReborrowUpvar(Span, ty::UpvarId), + + // Data with type `Ty<'tcx>` was borrowed + DataBorrowed(Ty<'tcx>, Span), + + // (&'a &'b T) where a >= b + ReferenceOutlivesReferent(Ty<'tcx>, Span), + + // Type or region parameters must be in scope. + ParameterInScope(ParameterOrigin, Span), + + // The type T of an expression E must outlive the lifetime for E. + ExprTypeIsNotInScope(Ty<'tcx>, Span), + + // A `ref b` whose region does not enclose the decl site + BindingTypeIsNotValidAtDecl(Span), + + // Regions appearing in a method receiver must outlive method call + CallRcvr(Span), + + // Regions appearing in a function argument must outlive func call + CallArg(Span), + + // Region in return type of invoked fn must enclose call + CallReturn(Span), + + // Operands must be in scope + Operand(Span), + + // Region resulting from a `&` expr must enclose the `&` expr + AddrOf(Span), + + // An auto-borrow that does not enclose the expr where it occurs + AutoBorrow(Span), + + // Region constraint arriving from destructor safety + SafeDestructor(Span), + + // Comparing the signature and requirements of an impl method against + // the containing trait. + CompareImplMethodObligation { + span: Span, + item_name: ast::Name, + impl_item_def_id: DefId, + trait_item_def_id: DefId, + + // this is `Some(_)` if this error arises from the bug fix for + // #18937. This is a temporary measure. + lint_id: Option, + }, +} + +/// Places that type/region parameters can appear. +#[derive(Clone, Copy, Debug)] +pub enum ParameterOrigin { + Path, // foo::bar + MethodCall, // foo.bar() <-- parameters on impl providing bar() + OverloadedOperator, // a + b when overloaded + OverloadedDeref, // *a when overloaded +} + +/// Times when we replace late-bound regions with variables: +#[derive(Clone, Copy, Debug)] +pub enum LateBoundRegionConversionTime { + /// when a fn is called + FnCall, + + /// when two higher-ranked types are compared + HigherRankedType, + + /// when projecting an associated type + AssocTypeProjection(ast::Name), +} + +/// Reasons to create a region inference variable +/// +/// See `error_reporting.rs` for more details +#[derive(Clone, Debug)] +pub enum RegionVariableOrigin { + // Region variables created for ill-categorized reasons, + // mostly indicates places in need of refactoring + MiscVariable(Span), + + // Regions created by a `&P` or `[...]` pattern + PatternRegion(Span), + + // Regions created by `&` operator + AddrOfRegion(Span), + + // Regions created as part of an autoref of a method receiver + Autoref(Span), + + // Regions created as part of an automatic coercion + Coercion(Span), + + // Region variables created as the values for early-bound regions + EarlyBoundRegion(Span, ast::Name), + + // Region variables created for bound regions + // in a function or method that is called + LateBoundRegion(Span, ty::BoundRegion, LateBoundRegionConversionTime), + + UpvarRegion(ty::UpvarId, Span), + + BoundRegionInCoherence(ast::Name), +} + +#[derive(Copy, Clone, Debug)] +pub enum FixupError { + UnresolvedIntTy(IntVid), + UnresolvedFloatTy(FloatVid), + UnresolvedTy(TyVid) +} + +impl fmt::Display for FixupError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::FixupError::*; + + match *self { + UnresolvedIntTy(_) => { + write!(f, "cannot determine the type of this integer; \ + add a suffix to specify the type explicitly") + } + UnresolvedFloatTy(_) => { + write!(f, "cannot determine the type of this number; \ + add a suffix to specify the type explicitly") + } + UnresolvedTy(_) => write!(f, "unconstrained type") + } + } +} + +/// Helper type of a temporary returned by tcx.infer_ctxt(...). +/// Necessary because we can't write the following bound: +/// F: for<'b, 'tcx> where 'gcx: 'tcx FnOnce(InferCtxt<'b, 'gcx, 'tcx>). +pub struct InferCtxtBuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + global_tcx: TyCtxt<'a, 'gcx, 'gcx>, + arenas: ty::CtxtArenas<'tcx>, + tables: Option>>, + param_env: Option>, + projection_mode: Reveal, +} + +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'gcx> { + pub fn infer_ctxt(self, + tables: Option>, + param_env: Option>, + projection_mode: Reveal) + -> InferCtxtBuilder<'a, 'gcx, 'tcx> { + InferCtxtBuilder { + global_tcx: self, + arenas: ty::CtxtArenas::new(), + tables: tables.map(RefCell::new), + param_env: param_env, + projection_mode: projection_mode, + } + } + + /// Fake InferCtxt with the global tcx. Used by pre-MIR borrowck + /// for MemCategorizationContext/ExprUseVisitor. + /// If any inference functionality is used, ICEs will occur. + pub fn borrowck_fake_infer_ctxt(self, param_env: ty::ParameterEnvironment<'gcx>) + -> InferCtxt<'a, 'gcx, 'gcx> { + InferCtxt { + tcx: self, + tables: InferTables::Global(&self.tables), + type_variables: RefCell::new(type_variable::TypeVariableTable::new()), + int_unification_table: RefCell::new(UnificationTable::new()), + float_unification_table: RefCell::new(UnificationTable::new()), + region_vars: RegionVarBindings::new(self), + parameter_environment: param_env, + selection_cache: traits::SelectionCache::new(), + evaluation_cache: traits::EvaluationCache::new(), + projection_cache: RefCell::new(traits::ProjectionCache::new()), + reported_trait_errors: RefCell::new(FxHashSet()), + projection_mode: Reveal::NotSpecializable, + tainted_by_errors_flag: Cell::new(false), + err_count_on_creation: self.sess.err_count(), + obligations_in_snapshot: Cell::new(false), + } + } +} + +impl<'a, 'gcx, 'tcx> InferCtxtBuilder<'a, 'gcx, 'tcx> { + pub fn enter(&'tcx mut self, f: F) -> R + where F: for<'b> FnOnce(InferCtxt<'b, 'gcx, 'tcx>) -> R + { + let InferCtxtBuilder { + global_tcx, + ref arenas, + ref tables, + ref mut param_env, + projection_mode, + } = *self; + let tables = if let Some(ref tables) = *tables { + InferTables::Local(tables) + } else { + InferTables::Global(&global_tcx.tables) + }; + let param_env = param_env.take().unwrap_or_else(|| { + global_tcx.empty_parameter_environment() + }); + global_tcx.enter_local(arenas, |tcx| f(InferCtxt { + tcx: tcx, + tables: tables, + projection_cache: RefCell::new(traits::ProjectionCache::new()), + type_variables: RefCell::new(type_variable::TypeVariableTable::new()), + int_unification_table: RefCell::new(UnificationTable::new()), + float_unification_table: RefCell::new(UnificationTable::new()), + region_vars: RegionVarBindings::new(tcx), + parameter_environment: param_env, + selection_cache: traits::SelectionCache::new(), + evaluation_cache: traits::EvaluationCache::new(), + reported_trait_errors: RefCell::new(FxHashSet()), + projection_mode: projection_mode, + tainted_by_errors_flag: Cell::new(false), + err_count_on_creation: tcx.sess.err_count(), + obligations_in_snapshot: Cell::new(false), + })) + } +} + +impl ExpectedFound { + fn new(a_is_expected: bool, a: T, b: T) -> Self { + if a_is_expected { + ExpectedFound {expected: a, found: b} + } else { + ExpectedFound {expected: b, found: a} + } + } +} + +impl<'tcx, T> InferOk<'tcx, T> { + pub fn unit(self) -> InferOk<'tcx, ()> { + InferOk { value: (), obligations: self.obligations } + } +} + +#[must_use = "once you start a snapshot, you should always consume it"] +pub struct CombinedSnapshot { + projection_cache_snapshot: traits::ProjectionCacheSnapshot, + type_snapshot: type_variable::Snapshot, + int_snapshot: unify::Snapshot, + float_snapshot: unify::Snapshot, + region_vars_snapshot: RegionSnapshot, + obligations_in_snapshot: bool, +} + +/// Helper trait for shortening the lifetimes inside a +/// value for post-type-checking normalization. +pub trait TransNormalize<'gcx>: TypeFoldable<'gcx> { + fn trans_normalize<'a, 'tcx>(&self, infcx: &InferCtxt<'a, 'gcx, 'tcx>) -> Self; +} + +macro_rules! items { ($($item:item)+) => ($($item)+) } +macro_rules! impl_trans_normalize { + ($lt_gcx:tt, $($ty:ty),+) => { + items!($(impl<$lt_gcx> TransNormalize<$lt_gcx> for $ty { + fn trans_normalize<'a, 'tcx>(&self, + infcx: &InferCtxt<'a, $lt_gcx, 'tcx>) + -> Self { + infcx.normalize_projections_in(self) + } + })+); + } +} + +impl_trans_normalize!('gcx, + Ty<'gcx>, + &'gcx Substs<'gcx>, + ty::FnSig<'gcx>, + &'gcx ty::BareFnTy<'gcx>, + ty::ClosureSubsts<'gcx>, + ty::PolyTraitRef<'gcx>, + ty::ExistentialTraitRef<'gcx> +); + +impl<'gcx> TransNormalize<'gcx> for LvalueTy<'gcx> { + fn trans_normalize<'a, 'tcx>(&self, infcx: &InferCtxt<'a, 'gcx, 'tcx>) -> Self { + match *self { + LvalueTy::Ty { ty } => LvalueTy::Ty { ty: ty.trans_normalize(infcx) }, + LvalueTy::Downcast { adt_def, substs, variant_index } => { + LvalueTy::Downcast { + adt_def: adt_def, + substs: substs.trans_normalize(infcx), + variant_index: variant_index + } + } + } + } +} + +// NOTE: Callable from trans only! +impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { + /// Currently, higher-ranked type bounds inhibit normalization. Therefore, + /// each time we erase them in translation, we need to normalize + /// the contents. + pub fn erase_late_bound_regions_and_normalize(self, value: &ty::Binder) + -> T + where T: TransNormalize<'tcx> + { + assert!(!value.needs_subst()); + let value = self.erase_late_bound_regions(value); + self.normalize_associated_type(&value) + } + + pub fn normalize_associated_type(self, value: &T) -> T + where T: TransNormalize<'tcx> + { + debug!("normalize_associated_type(t={:?})", value); + + let value = self.erase_regions(value); + + if !value.has_projection_types() { + return value; + } + + self.infer_ctxt(None, None, Reveal::All).enter(|infcx| { + value.trans_normalize(&infcx) + }) + } + + pub fn normalize_associated_type_in_env( + self, value: &T, env: &'a ty::ParameterEnvironment<'tcx> + ) -> T + where T: TransNormalize<'tcx> + { + debug!("normalize_associated_type_in_env(t={:?})", value); + + let value = self.erase_regions(value); + + if !value.has_projection_types() { + return value; + } + + self.infer_ctxt(None, Some(env.clone()), Reveal::All).enter(|infcx| { + value.trans_normalize(&infcx) + }) + } +} + +impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { + fn normalize_projections_in(&self, value: &T) -> T::Lifted + where T: TypeFoldable<'tcx> + ty::Lift<'gcx> + { + let mut selcx = traits::SelectionContext::new(self); + let cause = traits::ObligationCause::dummy(); + let traits::Normalized { value: result, obligations } = + traits::normalize(&mut selcx, cause, value); + + debug!("normalize_projections_in: result={:?} obligations={:?}", + result, obligations); + + let mut fulfill_cx = traits::FulfillmentContext::new(); + + for obligation in obligations { + fulfill_cx.register_predicate_obligation(self, obligation); + } + + self.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &result) + } + + /// Finishes processes any obligations that remain in the + /// fulfillment context, and then returns the result with all type + /// variables removed and regions erased. Because this is intended + /// for use after type-check has completed, if any errors occur, + /// it will panic. It is used during normalization and other cases + /// where processing the obligations in `fulfill_cx` may cause + /// type inference variables that appear in `result` to be + /// unified, and hence we need to process those obligations to get + /// the complete picture of the type. + pub fn drain_fulfillment_cx_or_panic(&self, + span: Span, + fulfill_cx: &mut traits::FulfillmentContext<'tcx>, + result: &T) + -> T::Lifted + where T: TypeFoldable<'tcx> + ty::Lift<'gcx> + { + debug!("drain_fulfillment_cx_or_panic()"); + + // In principle, we only need to do this so long as `result` + // contains unbound type parameters. It could be a slight + // optimization to stop iterating early. + match fulfill_cx.select_all_or_error(self) { + Ok(()) => { } + Err(errors) => { + span_bug!(span, "Encountered errors `{:?}` resolving bounds after type-checking", + errors); + } + } + + let result = self.resolve_type_vars_if_possible(result); + let result = self.tcx.erase_regions(&result); + + match self.tcx.lift_to_global(&result) { + Some(result) => result, + None => { + span_bug!(span, "Uninferred types/regions in `{:?}`", result); + } + } + } + + pub fn projection_mode(&self) -> Reveal { + self.projection_mode + } + + pub fn freshen>(&self, t: T) -> T { + t.fold_with(&mut self.freshener()) + } + + pub fn type_var_diverges(&'a self, ty: Ty) -> bool { + match ty.sty { + ty::TyInfer(ty::TyVar(vid)) => self.type_variables.borrow().var_diverges(vid), + _ => false + } + } + + pub fn freshener<'b>(&'b self) -> TypeFreshener<'b, 'gcx, 'tcx> { + freshen::TypeFreshener::new(self) + } + + pub fn type_is_unconstrained_numeric(&'a self, ty: Ty) -> UnconstrainedNumeric { + use ty::error::UnconstrainedNumeric::Neither; + use ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat}; + match ty.sty { + ty::TyInfer(ty::IntVar(vid)) => { + if self.int_unification_table.borrow_mut().has_value(vid) { + Neither + } else { + UnconstrainedInt + } + }, + ty::TyInfer(ty::FloatVar(vid)) => { + if self.float_unification_table.borrow_mut().has_value(vid) { + Neither + } else { + UnconstrainedFloat + } + }, + _ => Neither, + } + } + + /// Returns a type variable's default fallback if any exists. A default + /// must be attached to the variable when created, if it is created + /// without a default, this will return None. + /// + /// This code does not apply to integral or floating point variables, + /// only to use declared defaults. + /// + /// See `new_ty_var_with_default` to create a type variable with a default. + /// See `type_variable::Default` for details about what a default entails. + pub fn default(&self, ty: Ty<'tcx>) -> Option> { + match ty.sty { + ty::TyInfer(ty::TyVar(vid)) => self.type_variables.borrow().default(vid), + _ => None + } + } + + pub fn unsolved_variables(&self) -> Vec> { + let mut variables = Vec::new(); + + let unbound_ty_vars = self.type_variables + .borrow_mut() + .unsolved_variables() + .into_iter() + .map(|t| self.tcx.mk_var(t)); + + let unbound_int_vars = self.int_unification_table + .borrow_mut() + .unsolved_variables() + .into_iter() + .map(|v| self.tcx.mk_int_var(v)); + + let unbound_float_vars = self.float_unification_table + .borrow_mut() + .unsolved_variables() + .into_iter() + .map(|v| self.tcx.mk_float_var(v)); + + variables.extend(unbound_ty_vars); + variables.extend(unbound_int_vars); + variables.extend(unbound_float_vars); + + return variables; + } + + fn combine_fields(&'a self, trace: TypeTrace<'tcx>) + -> CombineFields<'a, 'gcx, 'tcx> { + CombineFields { + infcx: self, + trace: trace, + cause: None, + obligations: PredicateObligations::new(), + } + } + + pub fn equate(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>, a: &T, b: &T) + -> InferResult<'tcx, T> + where T: Relate<'tcx> + { + let mut fields = self.combine_fields(trace); + let result = fields.equate(a_is_expected).relate(a, b); + result.map(move |t| InferOk { value: t, obligations: fields.obligations }) + } + + pub fn sub(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>, a: &T, b: &T) + -> InferResult<'tcx, T> + where T: Relate<'tcx> + { + let mut fields = self.combine_fields(trace); + let result = fields.sub(a_is_expected).relate(a, b); + result.map(move |t| InferOk { value: t, obligations: fields.obligations }) + } + + pub fn lub(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>, a: &T, b: &T) + -> InferResult<'tcx, T> + where T: Relate<'tcx> + { + let mut fields = self.combine_fields(trace); + let result = fields.lub(a_is_expected).relate(a, b); + result.map(move |t| InferOk { value: t, obligations: fields.obligations }) + } + + pub fn glb(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>, a: &T, b: &T) + -> InferResult<'tcx, T> + where T: Relate<'tcx> + { + let mut fields = self.combine_fields(trace); + let result = fields.glb(a_is_expected).relate(a, b); + result.map(move |t| InferOk { value: t, obligations: fields.obligations }) + } + + // Clear the "obligations in snapshot" flag, invoke the closure, + // then restore the flag to its original value. This flag is a + // debugging measure designed to detect cases where we start a + // snapshot, create type variables, register obligations involving + // those type variables in the fulfillment cx, and then have to + // unroll the snapshot, leaving "dangling type variables" behind. + // In such cases, the flag will be set by the fulfillment cx, and + // an assertion will fail when rolling the snapshot back. Very + // useful, much better than grovelling through megabytes of + // RUST_LOG output. + // + // HOWEVER, in some cases the flag is wrong. In particular, we + // sometimes create a "mini-fulfilment-cx" in which we enroll + // obligations. As long as this fulfillment cx is fully drained + // before we return, this is not a problem, as there won't be any + // escaping obligations in the main cx. In those cases, you can + // use this function. + pub fn save_and_restore_obligations_in_snapshot_flag(&self, func: F) -> R + where F: FnOnce(&Self) -> R + { + let flag = self.obligations_in_snapshot.get(); + self.obligations_in_snapshot.set(false); + let result = func(self); + self.obligations_in_snapshot.set(flag); + result + } + + fn start_snapshot(&self) -> CombinedSnapshot { + debug!("start_snapshot()"); + + let obligations_in_snapshot = self.obligations_in_snapshot.get(); + self.obligations_in_snapshot.set(false); + + CombinedSnapshot { + projection_cache_snapshot: self.projection_cache.borrow_mut().snapshot(), + type_snapshot: self.type_variables.borrow_mut().snapshot(), + int_snapshot: self.int_unification_table.borrow_mut().snapshot(), + float_snapshot: self.float_unification_table.borrow_mut().snapshot(), + region_vars_snapshot: self.region_vars.start_snapshot(), + obligations_in_snapshot: obligations_in_snapshot, + } + } + + fn rollback_to(&self, cause: &str, snapshot: CombinedSnapshot) { + debug!("rollback_to(cause={})", cause); + let CombinedSnapshot { projection_cache_snapshot, + type_snapshot, + int_snapshot, + float_snapshot, + region_vars_snapshot, + obligations_in_snapshot } = snapshot; + + assert!(!self.obligations_in_snapshot.get()); + self.obligations_in_snapshot.set(obligations_in_snapshot); + + self.projection_cache + .borrow_mut() + .rollback_to(projection_cache_snapshot); + self.type_variables + .borrow_mut() + .rollback_to(type_snapshot); + self.int_unification_table + .borrow_mut() + .rollback_to(int_snapshot); + self.float_unification_table + .borrow_mut() + .rollback_to(float_snapshot); + self.region_vars + .rollback_to(region_vars_snapshot); + } + + fn commit_from(&self, snapshot: CombinedSnapshot) { + debug!("commit_from()"); + let CombinedSnapshot { projection_cache_snapshot, + type_snapshot, + int_snapshot, + float_snapshot, + region_vars_snapshot, + obligations_in_snapshot } = snapshot; + + self.obligations_in_snapshot.set(obligations_in_snapshot); + + self.projection_cache + .borrow_mut() + .commit(projection_cache_snapshot); + self.type_variables + .borrow_mut() + .commit(type_snapshot); + self.int_unification_table + .borrow_mut() + .commit(int_snapshot); + self.float_unification_table + .borrow_mut() + .commit(float_snapshot); + self.region_vars + .commit(region_vars_snapshot); + } + + /// Execute `f` and commit the bindings + pub fn commit_unconditionally(&self, f: F) -> R where + F: FnOnce() -> R, + { + debug!("commit()"); + let snapshot = self.start_snapshot(); + let r = f(); + self.commit_from(snapshot); + r + } + + /// Execute `f` and commit the bindings if closure `f` returns `Ok(_)` + pub fn commit_if_ok(&self, f: F) -> Result where + F: FnOnce(&CombinedSnapshot) -> Result + { + debug!("commit_if_ok()"); + let snapshot = self.start_snapshot(); + let r = f(&snapshot); + debug!("commit_if_ok() -- r.is_ok() = {}", r.is_ok()); + match r { + Ok(_) => { self.commit_from(snapshot); } + Err(_) => { self.rollback_to("commit_if_ok -- error", snapshot); } + } + r + } + + // Execute `f` in a snapshot, and commit the bindings it creates + pub fn in_snapshot(&self, f: F) -> T where + F: FnOnce(&CombinedSnapshot) -> T + { + debug!("in_snapshot()"); + let snapshot = self.start_snapshot(); + let r = f(&snapshot); + self.commit_from(snapshot); + r + } + + /// Execute `f` then unroll any bindings it creates + pub fn probe(&self, f: F) -> R where + F: FnOnce(&CombinedSnapshot) -> R, + { + debug!("probe()"); + let snapshot = self.start_snapshot(); + let r = f(&snapshot); + self.rollback_to("probe", snapshot); + r + } + + pub fn add_given(&self, + sub: ty::FreeRegion, + sup: ty::RegionVid) + { + self.region_vars.add_given(sub, sup); + } + + pub fn sub_types(&self, + a_is_expected: bool, + cause: &ObligationCause<'tcx>, + a: Ty<'tcx>, + b: Ty<'tcx>) + -> InferResult<'tcx, ()> + { + debug!("sub_types({:?} <: {:?})", a, b); + self.commit_if_ok(|_| { + let trace = TypeTrace::types(cause, a_is_expected, a, b); + self.sub(a_is_expected, trace, &a, &b).map(|ok| ok.unit()) + }) + } + + pub fn can_sub_types(&self, + a: Ty<'tcx>, + b: Ty<'tcx>) + -> UnitResult<'tcx> + { + self.probe(|_| { + let origin = &ObligationCause::dummy(); + let trace = TypeTrace::types(origin, true, a, b); + self.sub(true, trace, &a, &b).map(|InferOk { obligations, .. }| { + // FIXME(#32730) propagate obligations + assert!(obligations.is_empty()); + }) + }) + } + + pub fn eq_types(&self, + a_is_expected: bool, + cause: &ObligationCause<'tcx>, + a: Ty<'tcx>, + b: Ty<'tcx>) + -> InferResult<'tcx, ()> + { + self.commit_if_ok(|_| { + let trace = TypeTrace::types(cause, a_is_expected, a, b); + self.equate(a_is_expected, trace, &a, &b).map(|ok| ok.unit()) + }) + } + + pub fn eq_trait_refs(&self, + a_is_expected: bool, + cause: &ObligationCause<'tcx>, + a: ty::TraitRef<'tcx>, + b: ty::TraitRef<'tcx>) + -> InferResult<'tcx, ()> + { + debug!("eq_trait_refs({:?} = {:?})", a, b); + self.commit_if_ok(|_| { + let trace = TypeTrace { + cause: cause.clone(), + values: TraitRefs(ExpectedFound::new(a_is_expected, a, b)) + }; + self.equate(a_is_expected, trace, &a, &b).map(|ok| ok.unit()) + }) + } + + pub fn eq_impl_headers(&self, + a_is_expected: bool, + cause: &ObligationCause<'tcx>, + a: &ty::ImplHeader<'tcx>, + b: &ty::ImplHeader<'tcx>) + -> InferResult<'tcx, ()> + { + debug!("eq_impl_header({:?} = {:?})", a, b); + match (a.trait_ref, b.trait_ref) { + (Some(a_ref), Some(b_ref)) => self.eq_trait_refs(a_is_expected, cause, a_ref, b_ref), + (None, None) => self.eq_types(a_is_expected, cause, a.self_ty, b.self_ty), + _ => bug!("mk_eq_impl_headers given mismatched impl kinds"), + } + } + + pub fn sub_poly_trait_refs(&self, + a_is_expected: bool, + cause: ObligationCause<'tcx>, + a: ty::PolyTraitRef<'tcx>, + b: ty::PolyTraitRef<'tcx>) + -> InferResult<'tcx, ()> + { + debug!("sub_poly_trait_refs({:?} <: {:?})", a, b); + self.commit_if_ok(|_| { + let trace = TypeTrace { + cause: cause, + values: PolyTraitRefs(ExpectedFound::new(a_is_expected, a, b)) + }; + self.sub(a_is_expected, trace, &a, &b).map(|ok| ok.unit()) + }) + } + + pub fn sub_regions(&self, + origin: SubregionOrigin<'tcx>, + a: &'tcx ty::Region, + b: &'tcx ty::Region) { + debug!("sub_regions({:?} <: {:?})", a, b); + self.region_vars.make_subregion(origin, a, b); + } + + pub fn equality_predicate(&self, + cause: &ObligationCause<'tcx>, + predicate: &ty::PolyEquatePredicate<'tcx>) + -> InferResult<'tcx, ()> + { + self.commit_if_ok(|snapshot| { + let (ty::EquatePredicate(a, b), skol_map) = + self.skolemize_late_bound_regions(predicate, snapshot); + let cause_span = cause.span; + let eqty_ok = self.eq_types(false, cause, a, b)?; + self.leak_check(false, cause_span, &skol_map, snapshot)?; + self.pop_skolemized(skol_map, snapshot); + Ok(eqty_ok.unit()) + }) + } + + pub fn region_outlives_predicate(&self, + cause: &traits::ObligationCause<'tcx>, + predicate: &ty::PolyRegionOutlivesPredicate<'tcx>) + -> UnitResult<'tcx> + { + self.commit_if_ok(|snapshot| { + let (ty::OutlivesPredicate(r_a, r_b), skol_map) = + self.skolemize_late_bound_regions(predicate, snapshot); + let origin = + SubregionOrigin::from_obligation_cause(cause, + || RelateRegionParamBound(cause.span)); + self.sub_regions(origin, r_b, r_a); // `b : a` ==> `a <= b` + self.leak_check(false, cause.span, &skol_map, snapshot)?; + Ok(self.pop_skolemized(skol_map, snapshot)) + }) + } + + pub fn next_ty_var_id(&self, diverging: bool) -> TyVid { + self.type_variables + .borrow_mut() + .new_var(diverging, None) + } + + pub fn next_ty_var(&self) -> Ty<'tcx> { + self.tcx.mk_var(self.next_ty_var_id(false)) + } + + pub fn next_diverging_ty_var(&self) -> Ty<'tcx> { + self.tcx.mk_var(self.next_ty_var_id(true)) + } + + pub fn next_int_var_id(&self) -> IntVid { + self.int_unification_table + .borrow_mut() + .new_key(None) + } + + pub fn next_float_var_id(&self) -> FloatVid { + self.float_unification_table + .borrow_mut() + .new_key(None) + } + + pub fn next_region_var(&self, origin: RegionVariableOrigin) + -> &'tcx ty::Region { + self.tcx.mk_region(ty::ReVar(self.region_vars.new_region_var(origin))) + } + + /// Create a region inference variable for the given + /// region parameter definition. + pub fn region_var_for_def(&self, + span: Span, + def: &ty::RegionParameterDef) + -> &'tcx ty::Region { + self.next_region_var(EarlyBoundRegion(span, def.name)) + } + + /// Create a type inference variable for the given + /// type parameter definition. The substitutions are + /// for actual parameters that may be referred to by + /// the default of this type parameter, if it exists. + /// E.g. `struct Foo(...);` when + /// used in a path such as `Foo::::new()` will + /// use an inference variable for `C` with `[T, U]` + /// as the substitutions for the default, `(T, U)`. + pub fn type_var_for_def(&self, + span: Span, + def: &ty::TypeParameterDef<'tcx>, + substs: &[Kind<'tcx>]) + -> Ty<'tcx> { + let default = def.default.map(|default| { + type_variable::Default { + ty: default.subst_spanned(self.tcx, substs, Some(span)), + origin_span: span, + def_id: def.default_def_id + } + }); + + + let ty_var_id = self.type_variables + .borrow_mut() + .new_var(false, default); + + self.tcx.mk_var(ty_var_id) + } + + /// Given a set of generics defined on a type or impl, returns a substitution mapping each + /// type/region parameter to a fresh inference variable. + pub fn fresh_substs_for_item(&self, + span: Span, + def_id: DefId) + -> &'tcx Substs<'tcx> { + Substs::for_item(self.tcx, def_id, |def, _| { + self.region_var_for_def(span, def) + }, |def, substs| { + self.type_var_for_def(span, def, substs) + }) + } + + pub fn fresh_bound_region(&self, debruijn: ty::DebruijnIndex) -> &'tcx ty::Region { + self.region_vars.new_bound(debruijn) + } + + /// True if errors have been reported since this infcx was + /// created. This is sometimes used as a heuristic to skip + /// reporting errors that often occur as a result of earlier + /// errors, but where it's hard to be 100% sure (e.g., unresolved + /// inference variables, regionck errors). + pub fn is_tainted_by_errors(&self) -> bool { + debug!("is_tainted_by_errors(err_count={}, err_count_on_creation={}, \ + tainted_by_errors_flag={})", + self.tcx.sess.err_count(), + self.err_count_on_creation, + self.tainted_by_errors_flag.get()); + + if self.tcx.sess.err_count() > self.err_count_on_creation { + return true; // errors reported since this infcx was made + } + self.tainted_by_errors_flag.get() + } + + /// Set the "tainted by errors" flag to true. We call this when we + /// observe an error from a prior pass. + pub fn set_tainted_by_errors(&self) { + debug!("set_tainted_by_errors()"); + self.tainted_by_errors_flag.set(true) + } + + pub fn node_type(&self, id: ast::NodeId) -> Ty<'tcx> { + match self.tables.borrow().node_types.get(&id) { + Some(&t) => t, + // FIXME + None if self.is_tainted_by_errors() => + self.tcx.types.err, + None => { + bug!("no type for node {}: {} in fcx", + id, self.tcx.map.node_to_string(id)); + } + } + } + + pub fn expr_ty(&self, ex: &hir::Expr) -> Ty<'tcx> { + match self.tables.borrow().node_types.get(&ex.id) { + Some(&t) => t, + None => { + bug!("no type for expr in fcx"); + } + } + } + + pub fn resolve_regions_and_report_errors(&self, + free_regions: &FreeRegionMap, + subject_node_id: ast::NodeId) { + let errors = self.region_vars.resolve_regions(free_regions, subject_node_id); + if !self.is_tainted_by_errors() { + // As a heuristic, just skip reporting region errors + // altogether if other errors have been reported while + // this infcx was in use. This is totally hokey but + // otherwise we have a hard time separating legit region + // errors from silly ones. + self.report_region_errors(&errors); // see error_reporting.rs + } + } + + pub fn ty_to_string(&self, t: Ty<'tcx>) -> String { + self.resolve_type_vars_if_possible(&t).to_string() + } + + pub fn tys_to_string(&self, ts: &[Ty<'tcx>]) -> String { + let tstrs: Vec = ts.iter().map(|t| self.ty_to_string(*t)).collect(); + format!("({})", tstrs.join(", ")) + } + + pub fn trait_ref_to_string(&self, t: &ty::TraitRef<'tcx>) -> String { + self.resolve_type_vars_if_possible(t).to_string() + } + + pub fn shallow_resolve(&self, typ: Ty<'tcx>) -> Ty<'tcx> { + match typ.sty { + ty::TyInfer(ty::TyVar(v)) => { + // Not entirely obvious: if `typ` is a type variable, + // it can be resolved to an int/float variable, which + // can then be recursively resolved, hence the + // recursion. Note though that we prevent type + // variables from unifying to other type variables + // directly (though they may be embedded + // structurally), and we prevent cycles in any case, + // so this recursion should always be of very limited + // depth. + self.type_variables.borrow_mut() + .probe(v) + .map(|t| self.shallow_resolve(t)) + .unwrap_or(typ) + } + + ty::TyInfer(ty::IntVar(v)) => { + self.int_unification_table + .borrow_mut() + .probe(v) + .map(|v| v.to_type(self.tcx)) + .unwrap_or(typ) + } + + ty::TyInfer(ty::FloatVar(v)) => { + self.float_unification_table + .borrow_mut() + .probe(v) + .map(|v| v.to_type(self.tcx)) + .unwrap_or(typ) + } + + _ => { + typ + } + } + } + + pub fn resolve_type_vars_if_possible(&self, value: &T) -> T + where T: TypeFoldable<'tcx> + { + /*! + * Where possible, replaces type/int/float variables in + * `value` with their final value. Note that region variables + * are unaffected. If a type variable has not been unified, it + * is left as is. This is an idempotent operation that does + * not affect inference state in any way and so you can do it + * at will. + */ + + if !value.needs_infer() { + return value.clone(); // avoid duplicated subst-folding + } + let mut r = resolve::OpportunisticTypeResolver::new(self); + value.fold_with(&mut r) + } + + pub fn resolve_type_and_region_vars_if_possible(&self, value: &T) -> T + where T: TypeFoldable<'tcx> + { + let mut r = resolve::OpportunisticTypeAndRegionResolver::new(self); + value.fold_with(&mut r) + } + + /// Resolves all type variables in `t` and then, if any were left + /// unresolved, substitutes an error type. This is used after the + /// main checking when doing a second pass before writeback. The + /// justification is that writeback will produce an error for + /// these unconstrained type variables. + fn resolve_type_vars_or_error(&self, t: &Ty<'tcx>) -> mc::McResult> { + let ty = self.resolve_type_vars_if_possible(t); + if ty.references_error() || ty.is_ty_var() { + debug!("resolve_type_vars_or_error: error from {:?}", ty); + Err(()) + } else { + Ok(ty) + } + } + + pub fn fully_resolve>(&self, value: &T) -> FixupResult { + /*! + * Attempts to resolve all type/region variables in + * `value`. Region inference must have been run already (e.g., + * by calling `resolve_regions_and_report_errors`). If some + * variable was never unified, an `Err` results. + * + * This method is idempotent, but it not typically not invoked + * except during the writeback phase. + */ + + resolve::fully_resolve(self, value) + } + + // [Note-Type-error-reporting] + // An invariant is that anytime the expected or actual type is TyError (the special + // error type, meaning that an error occurred when typechecking this expression), + // this is a derived error. The error cascaded from another error (that was already + // reported), so it's not useful to display it to the user. + // The following methods implement this logic. + // They check if either the actual or expected type is TyError, and don't print the error + // in this case. The typechecker should only ever report type errors involving mismatched + // types using one of these methods, and should not call span_err directly for such + // errors. + + pub fn type_error_message(&self, + sp: Span, + mk_msg: M, + actual_ty: Ty<'tcx>) + where M: FnOnce(String) -> String, + { + self.type_error_struct(sp, mk_msg, actual_ty).emit(); + } + + // FIXME: this results in errors without an error code. Deprecate? + pub fn type_error_struct(&self, + sp: Span, + mk_msg: M, + actual_ty: Ty<'tcx>) + -> DiagnosticBuilder<'tcx> + where M: FnOnce(String) -> String, + { + self.type_error_struct_with_diag(sp, |actual_ty| { + self.tcx.sess.struct_span_err(sp, &mk_msg(actual_ty)) + }, actual_ty) + } + + pub fn type_error_struct_with_diag(&self, + sp: Span, + mk_diag: M, + actual_ty: Ty<'tcx>) + -> DiagnosticBuilder<'tcx> + where M: FnOnce(String) -> DiagnosticBuilder<'tcx>, + { + let actual_ty = self.resolve_type_vars_if_possible(&actual_ty); + debug!("type_error_struct_with_diag({:?}, {:?})", sp, actual_ty); + + // Don't report an error if actual type is TyError. + if actual_ty.references_error() { + return self.tcx.sess.diagnostic().struct_dummy(); + } + + mk_diag(self.ty_to_string(actual_ty)) + } + + pub fn report_mismatched_types(&self, + cause: &ObligationCause<'tcx>, + expected: Ty<'tcx>, + actual: Ty<'tcx>, + err: TypeError<'tcx>) { + let trace = TypeTrace::types(cause, true, expected, actual); + self.report_and_explain_type_error(trace, &err).emit(); + } + + pub fn report_conflicting_default_types(&self, + span: Span, + body_id: ast::NodeId, + expected: type_variable::Default<'tcx>, + actual: type_variable::Default<'tcx>) { + let trace = TypeTrace { + cause: ObligationCause::misc(span, body_id), + values: Types(ExpectedFound { + expected: expected.ty, + found: actual.ty + }) + }; + + self.report_and_explain_type_error( + trace, + &TypeError::TyParamDefaultMismatch(ExpectedFound { + expected: expected, + found: actual + })) + .emit(); + } + + pub fn replace_late_bound_regions_with_fresh_var( + &self, + span: Span, + lbrct: LateBoundRegionConversionTime, + value: &ty::Binder) + -> (T, FxHashMap) + where T : TypeFoldable<'tcx> + { + self.tcx.replace_late_bound_regions( + value, + |br| self.next_region_var(LateBoundRegion(span, br, lbrct))) + } + + /// Given a higher-ranked projection predicate like: + /// + /// for<'a> >::Output = &'a u32 + /// + /// and a target trait-ref like: + /// + /// > + /// + /// find a substitution `S` for the higher-ranked regions (here, + /// `['a => 'x]`) such that the predicate matches the trait-ref, + /// and then return the value (here, `&'a u32`) but with the + /// substitution applied (hence, `&'x u32`). + /// + /// See `higher_ranked_match` in `higher_ranked/mod.rs` for more + /// details. + pub fn match_poly_projection_predicate(&self, + cause: ObligationCause<'tcx>, + match_a: ty::PolyProjectionPredicate<'tcx>, + match_b: ty::TraitRef<'tcx>) + -> InferResult<'tcx, HrMatchResult>> + { + let span = cause.span; + let match_trait_ref = match_a.skip_binder().projection_ty.trait_ref; + let trace = TypeTrace { + cause: cause, + values: TraitRefs(ExpectedFound::new(true, match_trait_ref, match_b)) + }; + + let match_pair = match_a.map_bound(|p| (p.projection_ty.trait_ref, p.ty)); + let mut combine = self.combine_fields(trace); + let result = combine.higher_ranked_match(span, &match_pair, &match_b, true)?; + Ok(InferOk { value: result, obligations: combine.obligations }) + } + + /// See `verify_generic_bound` method in `region_inference` + pub fn verify_generic_bound(&self, + origin: SubregionOrigin<'tcx>, + kind: GenericKind<'tcx>, + a: &'tcx ty::Region, + bound: VerifyBound<'tcx>) { + debug!("verify_generic_bound({:?}, {:?} <: {:?})", + kind, + a, + bound); + + self.region_vars.verify_generic_bound(origin, kind, a, bound); + } + + pub fn can_equate(&self, a: &T, b: &T) -> UnitResult<'tcx> + where T: Relate<'tcx> + fmt::Debug + { + debug!("can_equate({:?}, {:?})", a, b); + self.probe(|_| { + // Gin up a dummy trace, since this won't be committed + // anyhow. We should make this typetrace stuff more + // generic so we don't have to do anything quite this + // terrible. + let trace = TypeTrace::dummy(self.tcx); + self.equate(true, trace, a, b).map(|InferOk { obligations, .. }| { + // FIXME(#32730) propagate obligations + assert!(obligations.is_empty()); + }) + }) + } + + pub fn node_ty(&self, id: ast::NodeId) -> McResult> { + let ty = self.node_type(id); + self.resolve_type_vars_or_error(&ty) + } + + pub fn expr_ty_adjusted(&self, expr: &hir::Expr) -> McResult> { + let ty = self.tables.borrow().expr_ty_adjusted(expr); + self.resolve_type_vars_or_error(&ty) + } + + pub fn type_moves_by_default(&self, ty: Ty<'tcx>, span: Span) -> bool { + let ty = self.resolve_type_vars_if_possible(&ty); + if let Some(ty) = self.tcx.lift_to_global(&ty) { + // Even if the type may have no inference variables, during + // type-checking closure types are in local tables only. + let local_closures = match self.tables { + InferTables::Local(_) => ty.has_closure_types(), + InferTables::Global(_) => false + }; + if !local_closures { + return ty.moves_by_default(self.tcx.global_tcx(), self.param_env(), span); + } + } + + let copy_def_id = self.tcx.require_lang_item(lang_items::CopyTraitLangItem); + + // this can get called from typeck (by euv), and moves_by_default + // rightly refuses to work with inference variables, but + // moves_by_default has a cache, which we want to use in other + // cases. + !traits::type_known_to_meet_bound(self, ty, copy_def_id, span) + } + + pub fn node_method_ty(&self, method_call: ty::MethodCall) + -> Option> { + self.tables + .borrow() + .method_map + .get(&method_call) + .map(|method| method.ty) + .map(|ty| self.resolve_type_vars_if_possible(&ty)) + } + + pub fn node_method_id(&self, method_call: ty::MethodCall) + -> Option { + self.tables + .borrow() + .method_map + .get(&method_call) + .map(|method| method.def_id) + } + + pub fn adjustments(&self) -> Ref>> { + fn project_adjustments<'a, 'tcx>(tables: &'a ty::Tables<'tcx>) + -> &'a NodeMap> { + &tables.adjustments + } + + Ref::map(self.tables.borrow(), project_adjustments) + } + + pub fn is_method_call(&self, id: ast::NodeId) -> bool { + self.tables.borrow().method_map.contains_key(&ty::MethodCall::expr(id)) + } + + pub fn temporary_scope(&self, rvalue_id: ast::NodeId) -> Option { + self.tcx.region_maps.temporary_scope(rvalue_id) + } + + pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option> { + self.tables.borrow().upvar_capture_map.get(&upvar_id).cloned() + } + + pub fn param_env(&self) -> &ty::ParameterEnvironment<'gcx> { + &self.parameter_environment + } + + pub fn closure_kind(&self, + def_id: DefId) + -> Option + { + if def_id.is_local() { + self.tables.borrow().closure_kinds.get(&def_id).cloned() + } else { + // During typeck, ALL closures are local. But afterwards, + // during trans, we see closure ids from other traits. + // That may require loading the closure data out of the + // cstore. + Some(self.tcx.closure_kind(def_id)) + } + } + + pub fn closure_type(&self, + def_id: DefId, + substs: ty::ClosureSubsts<'tcx>) + -> ty::ClosureTy<'tcx> + { + if let InferTables::Local(tables) = self.tables { + if let Some(ty) = tables.borrow().closure_tys.get(&def_id) { + return ty.subst(self.tcx, substs.substs); + } + } + + let closure_ty = self.tcx.closure_type(def_id, substs); + closure_ty + } +} + +impl<'a, 'gcx, 'tcx> TypeTrace<'tcx> { + pub fn span(&self) -> Span { + self.cause.span + } + + pub fn types(cause: &ObligationCause<'tcx>, + a_is_expected: bool, + a: Ty<'tcx>, + b: Ty<'tcx>) + -> TypeTrace<'tcx> { + TypeTrace { + cause: cause.clone(), + values: Types(ExpectedFound::new(a_is_expected, a, b)) + } + } + + pub fn dummy(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> TypeTrace<'tcx> { + TypeTrace { + cause: ObligationCause::dummy(), + values: Types(ExpectedFound { + expected: tcx.types.err, + found: tcx.types.err, + }) + } + } +} + +impl<'tcx> fmt::Debug for TypeTrace<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "TypeTrace({:?})", self.cause) + } +} + +impl<'tcx> SubregionOrigin<'tcx> { + pub fn span(&self) -> Span { + match *self { + Subtype(ref a) => a.span(), + InfStackClosure(a) => a, + InvokeClosure(a) => a, + DerefPointer(a) => a, + FreeVariable(a, _) => a, + IndexSlice(a) => a, + RelateObjectBound(a) => a, + RelateParamBound(a, _) => a, + RelateRegionParamBound(a) => a, + RelateDefaultParamBound(a, _) => a, + Reborrow(a) => a, + ReborrowUpvar(a, _) => a, + DataBorrowed(_, a) => a, + ReferenceOutlivesReferent(_, a) => a, + ParameterInScope(_, a) => a, + ExprTypeIsNotInScope(_, a) => a, + BindingTypeIsNotValidAtDecl(a) => a, + CallRcvr(a) => a, + CallArg(a) => a, + CallReturn(a) => a, + Operand(a) => a, + AddrOf(a) => a, + AutoBorrow(a) => a, + SafeDestructor(a) => a, + CompareImplMethodObligation { span, .. } => span, + } + } + + pub fn from_obligation_cause(cause: &traits::ObligationCause<'tcx>, + default: F) + -> Self + where F: FnOnce() -> Self + { + match cause.code { + traits::ObligationCauseCode::ReferenceOutlivesReferent(ref_type) => + SubregionOrigin::ReferenceOutlivesReferent(ref_type, cause.span), + + traits::ObligationCauseCode::CompareImplMethodObligation { item_name, + impl_item_def_id, + trait_item_def_id, + lint_id } => + SubregionOrigin::CompareImplMethodObligation { + span: cause.span, + item_name: item_name, + impl_item_def_id: impl_item_def_id, + trait_item_def_id: trait_item_def_id, + lint_id: lint_id, + }, + + _ => default(), + } + } +} + +impl RegionVariableOrigin { + pub fn span(&self) -> Span { + match *self { + MiscVariable(a) => a, + PatternRegion(a) => a, + AddrOfRegion(a) => a, + Autoref(a) => a, + Coercion(a) => a, + EarlyBoundRegion(a, _) => a, + LateBoundRegion(a, ..) => a, + BoundRegionInCoherence(_) => syntax_pos::DUMMY_SP, + UpvarRegion(_, a) => a + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for ValuePairs<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + match *self { + ValuePairs::Types(ref ef) => { + ValuePairs::Types(ef.fold_with(folder)) + } + ValuePairs::TraitRefs(ref ef) => { + ValuePairs::TraitRefs(ef.fold_with(folder)) + } + ValuePairs::PolyTraitRefs(ref ef) => { + ValuePairs::PolyTraitRefs(ef.fold_with(folder)) + } + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + match *self { + ValuePairs::Types(ref ef) => ef.visit_with(visitor), + ValuePairs::TraitRefs(ref ef) => ef.visit_with(visitor), + ValuePairs::PolyTraitRefs(ref ef) => ef.visit_with(visitor), + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for TypeTrace<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + TypeTrace { + cause: self.cause.fold_with(folder), + values: self.values.fold_with(folder) + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.cause.visit_with(visitor) || self.values.visit_with(visitor) + } +} diff --git a/src/librustc/middle/infer/region_inference/README.md b/src/librustc/infer/region_inference/README.md similarity index 100% rename from src/librustc/middle/infer/region_inference/README.md rename to src/librustc/infer/region_inference/README.md diff --git a/src/librustc/infer/region_inference/graphviz.rs b/src/librustc/infer/region_inference/graphviz.rs new file mode 100644 index 0000000000000..95ce8d39ff488 --- /dev/null +++ b/src/librustc/infer/region_inference/graphviz.rs @@ -0,0 +1,278 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This module provides linkage between libgraphviz traits and +//! `rustc::middle::typeck::infer::region_inference`, generating a +//! rendering of the graph represented by the list of `Constraint` +//! instances (which make up the edges of the graph), as well as the +//! origin for each constraint (which are attached to the labels on +//! each edge). + +/// For clarity, rename the graphviz crate locally to dot. +use graphviz as dot; + +use ty::{self, TyCtxt}; +use middle::region::CodeExtent; +use super::Constraint; +use infer::SubregionOrigin; +use infer::region_inference::RegionVarBindings; +use util::nodemap::{FxHashMap, FxHashSet}; + +use std::borrow::Cow; +use std::collections::hash_map::Entry::Vacant; +use std::env; +use std::fs::File; +use std::io; +use std::io::prelude::*; +use std::sync::atomic::{AtomicBool, Ordering}; +use syntax::ast; + +fn print_help_message() { + println!("\ +-Z print-region-graph by default prints a region constraint graph for every \n\ +function body, to the path `/tmp/constraints.nodeXXX.dot`, where the XXX is \n\ +replaced with the node id of the function under analysis. \n\ + \n\ +To select one particular function body, set `RUST_REGION_GRAPH_NODE=XXX`, \n\ +where XXX is the node id desired. \n\ + \n\ +To generate output to some path other than the default \n\ +`/tmp/constraints.nodeXXX.dot`, set `RUST_REGION_GRAPH=/path/desired.dot`; \n\ +occurrences of the character `%` in the requested path will be replaced with\n\ +the node id of the function under analysis. \n\ + \n\ +(Since you requested help via RUST_REGION_GRAPH=help, no region constraint \n\ +graphs will be printed. \n\ +"); +} + +pub fn maybe_print_constraints_for<'a, 'gcx, 'tcx>( + region_vars: &RegionVarBindings<'a, 'gcx, 'tcx>, + subject_node: ast::NodeId) +{ + let tcx = region_vars.tcx; + + if !region_vars.tcx.sess.opts.debugging_opts.print_region_graph { + return; + } + + let requested_node = env::var("RUST_REGION_GRAPH_NODE") + .ok().and_then(|s| s.parse().map(ast::NodeId::new).ok()); + + if requested_node.is_some() && requested_node != Some(subject_node) { + return; + } + + let requested_output = env::var("RUST_REGION_GRAPH"); + debug!("requested_output: {:?} requested_node: {:?}", + requested_output, + requested_node); + + let output_path = { + let output_template = match requested_output { + Ok(ref s) if s == "help" => { + static PRINTED_YET: AtomicBool = AtomicBool::new(false); + if !PRINTED_YET.load(Ordering::SeqCst) { + print_help_message(); + PRINTED_YET.store(true, Ordering::SeqCst); + } + return; + } + + Ok(other_path) => other_path, + Err(_) => "/tmp/constraints.node%.dot".to_string(), + }; + + if output_template.is_empty() { + bug!("empty string provided as RUST_REGION_GRAPH"); + } + + if output_template.contains('%') { + let mut new_str = String::new(); + for c in output_template.chars() { + if c == '%' { + new_str.push_str(&subject_node.to_string()); + } else { + new_str.push(c); + } + } + new_str + } else { + output_template + } + }; + + let constraints = &*region_vars.constraints.borrow(); + match dump_region_constraints_to(tcx, constraints, &output_path) { + Ok(()) => {} + Err(e) => { + let msg = format!("io error dumping region constraints: {}", e); + region_vars.tcx.sess.err(&msg) + } + } +} + +struct ConstraintGraph<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + tcx: TyCtxt<'a, 'gcx, 'tcx>, + graph_name: String, + map: &'a FxHashMap, SubregionOrigin<'tcx>>, + node_ids: FxHashMap, +} + +#[derive(Clone, Hash, PartialEq, Eq, Debug, Copy)] +enum Node { + RegionVid(ty::RegionVid), + Region(ty::Region), +} + +// type Edge = Constraint; +#[derive(Clone, PartialEq, Eq, Debug, Copy)] +enum Edge<'tcx> { + Constraint(Constraint<'tcx>), + EnclScope(CodeExtent, CodeExtent), +} + +impl<'a, 'gcx, 'tcx> ConstraintGraph<'a, 'gcx, 'tcx> { + fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>, + name: String, + map: &'a ConstraintMap<'tcx>) + -> ConstraintGraph<'a, 'gcx, 'tcx> { + let mut i = 0; + let mut node_ids = FxHashMap(); + { + let mut add_node = |node| { + if let Vacant(e) = node_ids.entry(node) { + e.insert(i); + i += 1; + } + }; + + for (n1, n2) in map.keys().map(|c| constraint_to_nodes(c)) { + add_node(n1); + add_node(n2); + } + + tcx.region_maps.each_encl_scope(|sub, sup| { + add_node(Node::Region(ty::ReScope(*sub))); + add_node(Node::Region(ty::ReScope(*sup))); + }); + } + + ConstraintGraph { + tcx: tcx, + graph_name: name, + map: map, + node_ids: node_ids, + } + } +} + +impl<'a, 'gcx, 'tcx> dot::Labeller<'a> for ConstraintGraph<'a, 'gcx, 'tcx> { + type Node = Node; + type Edge = Edge<'tcx>; + fn graph_id(&self) -> dot::Id { + dot::Id::new(&*self.graph_name).unwrap() + } + fn node_id(&self, n: &Node) -> dot::Id { + let node_id = match self.node_ids.get(n) { + Some(node_id) => node_id, + None => bug!("no node_id found for node: {:?}", n), + }; + let name = || format!("node_{}", node_id); + match dot::Id::new(name()) { + Ok(id) => id, + Err(_) => { + bug!("failed to create graphviz node identified by {}", name()); + } + } + } + fn node_label(&self, n: &Node) -> dot::LabelText { + match *n { + Node::RegionVid(n_vid) => dot::LabelText::label(format!("{:?}", n_vid)), + Node::Region(n_rgn) => dot::LabelText::label(format!("{:?}", n_rgn)), + } + } + fn edge_label(&self, e: &Edge) -> dot::LabelText { + match *e { + Edge::Constraint(ref c) => + dot::LabelText::label(format!("{:?}", self.map.get(c).unwrap())), + Edge::EnclScope(..) => dot::LabelText::label(format!("(enclosed)")), + } + } +} + +fn constraint_to_nodes(c: &Constraint) -> (Node, Node) { + match *c { + Constraint::ConstrainVarSubVar(rv_1, rv_2) => + (Node::RegionVid(rv_1), Node::RegionVid(rv_2)), + Constraint::ConstrainRegSubVar(r_1, rv_2) => + (Node::Region(*r_1), Node::RegionVid(rv_2)), + Constraint::ConstrainVarSubReg(rv_1, r_2) => + (Node::RegionVid(rv_1), Node::Region(*r_2)), + Constraint::ConstrainRegSubReg(r_1, r_2) => + (Node::Region(*r_1), Node::Region(*r_2)), + } +} + +fn edge_to_nodes(e: &Edge) -> (Node, Node) { + match *e { + Edge::Constraint(ref c) => constraint_to_nodes(c), + Edge::EnclScope(sub, sup) => { + (Node::Region(ty::ReScope(sub)), + Node::Region(ty::ReScope(sup))) + } + } +} + +impl<'a, 'gcx, 'tcx> dot::GraphWalk<'a> for ConstraintGraph<'a, 'gcx, 'tcx> { + type Node = Node; + type Edge = Edge<'tcx>; + fn nodes(&self) -> dot::Nodes { + let mut set = FxHashSet(); + for node in self.node_ids.keys() { + set.insert(*node); + } + debug!("constraint graph has {} nodes", set.len()); + set.into_iter().collect() + } + fn edges(&self) -> dot::Edges> { + debug!("constraint graph has {} edges", self.map.len()); + let mut v: Vec<_> = self.map.keys().map(|e| Edge::Constraint(*e)).collect(); + self.tcx.region_maps.each_encl_scope(|sub, sup| v.push(Edge::EnclScope(*sub, *sup))); + debug!("region graph has {} edges", v.len()); + Cow::Owned(v) + } + fn source(&self, edge: &Edge<'tcx>) -> Node { + let (n1, _) = edge_to_nodes(edge); + debug!("edge {:?} has source {:?}", edge, n1); + n1 + } + fn target(&self, edge: &Edge<'tcx>) -> Node { + let (_, n2) = edge_to_nodes(edge); + debug!("edge {:?} has target {:?}", edge, n2); + n2 + } +} + +pub type ConstraintMap<'tcx> = FxHashMap, SubregionOrigin<'tcx>>; + +fn dump_region_constraints_to<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + map: &ConstraintMap<'tcx>, + path: &str) + -> io::Result<()> { + debug!("dump_region_constraints map (len: {}) path: {}", + map.len(), + path); + let g = ConstraintGraph::new(tcx, format!("region_constraints"), map); + debug!("dump_region_constraints calling render"); + let mut v = Vec::new(); + dot::render(&g, &mut v).unwrap(); + File::create(path).and_then(|mut f| f.write_all(&v)) +} diff --git a/src/librustc/infer/region_inference/mod.rs b/src/librustc/infer/region_inference/mod.rs new file mode 100644 index 0000000000000..af6f2c50e72fc --- /dev/null +++ b/src/librustc/infer/region_inference/mod.rs @@ -0,0 +1,1652 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! See README.md + +pub use self::Constraint::*; +pub use self::UndoLogEntry::*; +pub use self::CombineMapType::*; +pub use self::RegionResolutionError::*; +pub use self::VarValue::*; + +use super::{RegionVariableOrigin, SubregionOrigin, MiscVariable}; +use super::unify_key; + +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_data_structures::graph::{self, Direction, NodeIndex, OUTGOING}; +use rustc_data_structures::unify::{self, UnificationTable}; +use middle::free_region::FreeRegionMap; +use ty::{self, Ty, TyCtxt}; +use ty::{BoundRegion, Region, RegionVid}; +use ty::{ReEmpty, ReStatic, ReFree, ReEarlyBound, ReErased}; +use ty::{ReLateBound, ReScope, ReVar, ReSkolemized, BrFresh}; + +use std::cell::{Cell, RefCell}; +use std::cmp::Ordering::{self, Less, Greater, Equal}; +use std::fmt; +use std::mem; +use std::u32; +use syntax::ast; + +mod graphviz; + +// A constraint that influences the inference process. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum Constraint<'tcx> { + // One region variable is subregion of another + ConstrainVarSubVar(RegionVid, RegionVid), + + // Concrete region is subregion of region variable + ConstrainRegSubVar(&'tcx Region, RegionVid), + + // Region variable is subregion of concrete region. This does not + // directly affect inference, but instead is checked after + // inference is complete. + ConstrainVarSubReg(RegionVid, &'tcx Region), + + // A constraint where neither side is a variable. This does not + // directly affect inference, but instead is checked after + // inference is complete. + ConstrainRegSubReg(&'tcx Region, &'tcx Region), +} + +// VerifyGenericBound(T, _, R, RS): The parameter type `T` (or +// associated type) must outlive the region `R`. `T` is known to +// outlive `RS`. Therefore verify that `R <= RS[i]` for some +// `i`. Inference variables may be involved (but this verification +// step doesn't influence inference). +#[derive(Debug)] +pub struct Verify<'tcx> { + kind: GenericKind<'tcx>, + origin: SubregionOrigin<'tcx>, + region: &'tcx Region, + bound: VerifyBound<'tcx>, +} + +#[derive(Copy, Clone, PartialEq, Eq)] +pub enum GenericKind<'tcx> { + Param(ty::ParamTy), + Projection(ty::ProjectionTy<'tcx>), +} + +// When we introduce a verification step, we wish to test that a +// particular region (let's call it `'min`) meets some bound. +// The bound is described the by the following grammar: +#[derive(Debug)] +pub enum VerifyBound<'tcx> { + // B = exists {R} --> some 'r in {R} must outlive 'min + // + // Put another way, the subject value is known to outlive all + // regions in {R}, so if any of those outlives 'min, then the + // bound is met. + AnyRegion(Vec<&'tcx Region>), + + // B = forall {R} --> all 'r in {R} must outlive 'min + // + // Put another way, the subject value is known to outlive some + // region in {R}, so if all of those outlives 'min, then the bound + // is met. + AllRegions(Vec<&'tcx Region>), + + // B = exists {B} --> 'min must meet some bound b in {B} + AnyBound(Vec>), + + // B = forall {B} --> 'min must meet all bounds b in {B} + AllBounds(Vec>), +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +pub struct TwoRegions<'tcx> { + a: &'tcx Region, + b: &'tcx Region, +} + +#[derive(Copy, Clone, PartialEq)] +pub enum UndoLogEntry<'tcx> { + /// Pushed when we start a snapshot. + OpenSnapshot, + + /// Replaces an `OpenSnapshot` when a snapshot is committed, but + /// that snapshot is not the root. If the root snapshot is + /// unrolled, all nested snapshots must be committed. + CommitedSnapshot, + + /// We added `RegionVid` + AddVar(RegionVid), + + /// We added the given `constraint` + AddConstraint(Constraint<'tcx>), + + /// We added the given `verify` + AddVerify(usize), + + /// We added the given `given` + AddGiven(ty::FreeRegion, ty::RegionVid), + + /// We added a GLB/LUB "combinaton variable" + AddCombination(CombineMapType, TwoRegions<'tcx>), + + /// During skolemization, we sometimes purge entries from the undo + /// log in a kind of minisnapshot (unlike other snapshots, this + /// purging actually takes place *on success*). In that case, we + /// replace the corresponding entry with `Noop` so as to avoid the + /// need to do a bunch of swapping. (We can't use `swap_remove` as + /// the order of the vector is important.) + Purged, +} + +#[derive(Copy, Clone, PartialEq)] +pub enum CombineMapType { + Lub, + Glb, +} + +#[derive(Clone, Debug)] +pub enum RegionResolutionError<'tcx> { + /// `ConcreteFailure(o, a, b)`: + /// + /// `o` requires that `a <= b`, but this does not hold + ConcreteFailure(SubregionOrigin<'tcx>, &'tcx Region, &'tcx Region), + + /// `GenericBoundFailure(p, s, a) + /// + /// The parameter/associated-type `p` must be known to outlive the lifetime + /// `a` (but none of the known bounds are sufficient). + GenericBoundFailure(SubregionOrigin<'tcx>, GenericKind<'tcx>, &'tcx Region), + + /// `SubSupConflict(v, sub_origin, sub_r, sup_origin, sup_r)`: + /// + /// Could not infer a value for `v` because `sub_r <= v` (due to + /// `sub_origin`) but `v <= sup_r` (due to `sup_origin`) and + /// `sub_r <= sup_r` does not hold. + SubSupConflict(RegionVariableOrigin, + SubregionOrigin<'tcx>, + &'tcx Region, + SubregionOrigin<'tcx>, + &'tcx Region), + + /// For subsets of `ConcreteFailure` and `SubSupConflict`, we can derive + /// more specific errors message by suggesting to the user where they + /// should put a lifetime. In those cases we process and put those errors + /// into `ProcessedErrors` before we do any reporting. + ProcessedErrors(Vec>, + Vec), +} + +#[derive(Clone, Debug)] +pub enum ProcessedErrorOrigin<'tcx> { + ConcreteFailure(SubregionOrigin<'tcx>, &'tcx Region, &'tcx Region), + VariableFailure(RegionVariableOrigin), +} + +/// SameRegions is used to group regions that we think are the same and would +/// like to indicate so to the user. +/// For example, the following function +/// ``` +/// struct Foo { bar: i32 } +/// fn foo2<'a, 'b>(x: &'a Foo) -> &'b i32 { +/// &x.bar +/// } +/// ``` +/// would report an error because we expect 'a and 'b to match, and so we group +/// 'a and 'b together inside a SameRegions struct +#[derive(Clone, Debug)] +pub struct SameRegions { + pub scope_id: ast::NodeId, + pub regions: Vec, +} + +impl SameRegions { + pub fn contains(&self, other: &BoundRegion) -> bool { + self.regions.contains(other) + } + + pub fn push(&mut self, other: BoundRegion) { + self.regions.push(other); + } +} + +pub type CombineMap<'tcx> = FxHashMap, RegionVid>; + +pub struct RegionVarBindings<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + tcx: TyCtxt<'a, 'gcx, 'tcx>, + var_origins: RefCell>, + + // Constraints of the form `A <= B` introduced by the region + // checker. Here at least one of `A` and `B` must be a region + // variable. + constraints: RefCell, SubregionOrigin<'tcx>>>, + + // A "verify" is something that we need to verify after inference is + // done, but which does not directly affect inference in any way. + // + // An example is a `A <= B` where neither `A` nor `B` are + // inference variables. + verifys: RefCell>>, + + // A "given" is a relationship that is known to hold. In particular, + // we often know from closure fn signatures that a particular free + // region must be a subregion of a region variable: + // + // foo.iter().filter(<'a> |x: &'a &'b T| ...) + // + // In situations like this, `'b` is in fact a region variable + // introduced by the call to `iter()`, and `'a` is a bound region + // on the closure (as indicated by the `<'a>` prefix). If we are + // naive, we wind up inferring that `'b` must be `'static`, + // because we require that it be greater than `'a` and we do not + // know what `'a` is precisely. + // + // This hashmap is used to avoid that naive scenario. Basically we + // record the fact that `'a <= 'b` is implied by the fn signature, + // and then ignore the constraint when solving equations. This is + // a bit of a hack but seems to work. + givens: RefCell>, + + lubs: RefCell>, + glbs: RefCell>, + skolemization_count: Cell, + bound_count: Cell, + + // The undo log records actions that might later be undone. + // + // Note: when the undo_log is empty, we are not actively + // snapshotting. When the `start_snapshot()` method is called, we + // push an OpenSnapshot entry onto the list to indicate that we + // are now actively snapshotting. The reason for this is that + // otherwise we end up adding entries for things like the lower + // bound on a variable and so forth, which can never be rolled + // back. + undo_log: RefCell>>, + unification_table: RefCell>, + + // This contains the results of inference. It begins as an empty + // option and only acquires a value after inference is complete. + values: RefCell>>>, +} + +pub struct RegionSnapshot { + length: usize, + region_snapshot: unify::Snapshot, + skolemization_count: u32, +} + +/// When working with skolemized regions, we often wish to find all of +/// the regions that are either reachable from a skolemized region, or +/// which can reach a skolemized region, or both. We call such regions +/// *tained* regions. This struct allows you to decide what set of +/// tainted regions you want. +#[derive(Debug)] +pub struct TaintDirections { + incoming: bool, + outgoing: bool, +} + +impl TaintDirections { + pub fn incoming() -> Self { + TaintDirections { incoming: true, outgoing: false } + } + + pub fn outgoing() -> Self { + TaintDirections { incoming: false, outgoing: true } + } + + pub fn both() -> Self { + TaintDirections { incoming: true, outgoing: true } + } +} + +struct TaintSet<'tcx> { + directions: TaintDirections, + regions: FxHashSet<&'tcx ty::Region> +} + +impl<'a, 'gcx, 'tcx> TaintSet<'tcx> { + fn new(directions: TaintDirections, + initial_region: &'tcx ty::Region) + -> Self { + let mut regions = FxHashSet(); + regions.insert(initial_region); + TaintSet { directions: directions, regions: regions } + } + + fn fixed_point(&mut self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + undo_log: &[UndoLogEntry<'tcx>], + verifys: &[Verify<'tcx>]) { + let mut prev_len = 0; + while prev_len < self.len() { + debug!("tainted: prev_len = {:?} new_len = {:?}", + prev_len, self.len()); + + prev_len = self.len(); + + for undo_entry in undo_log { + match undo_entry { + &AddConstraint(ConstrainVarSubVar(a, b)) => { + self.add_edge(tcx.mk_region(ReVar(a)), + tcx.mk_region(ReVar(b))); + } + &AddConstraint(ConstrainRegSubVar(a, b)) => { + self.add_edge(a, tcx.mk_region(ReVar(b))); + } + &AddConstraint(ConstrainVarSubReg(a, b)) => { + self.add_edge(tcx.mk_region(ReVar(a)), b); + } + &AddConstraint(ConstrainRegSubReg(a, b)) => { + self.add_edge(a, b); + } + &AddGiven(a, b) => { + self.add_edge(tcx.mk_region(ReFree(a)), + tcx.mk_region(ReVar(b))); + } + &AddVerify(i) => { + verifys[i].bound.for_each_region(&mut |b| { + self.add_edge(verifys[i].region, b); + }); + } + &Purged | + &AddCombination(..) | + &AddVar(..) | + &OpenSnapshot | + &CommitedSnapshot => {} + } + } + } + } + + fn into_set(self) -> FxHashSet<&'tcx ty::Region> { + self.regions + } + + fn len(&self) -> usize { + self.regions.len() + } + + fn add_edge(&mut self, + source: &'tcx ty::Region, + target: &'tcx ty::Region) { + if self.directions.incoming { + if self.regions.contains(&target) { + self.regions.insert(source); + } + } + + if self.directions.outgoing { + if self.regions.contains(&source) { + self.regions.insert(target); + } + } + } +} + +impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { + pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> RegionVarBindings<'a, 'gcx, 'tcx> { + RegionVarBindings { + tcx: tcx, + var_origins: RefCell::new(Vec::new()), + values: RefCell::new(None), + constraints: RefCell::new(FxHashMap()), + verifys: RefCell::new(Vec::new()), + givens: RefCell::new(FxHashSet()), + lubs: RefCell::new(FxHashMap()), + glbs: RefCell::new(FxHashMap()), + skolemization_count: Cell::new(0), + bound_count: Cell::new(0), + undo_log: RefCell::new(Vec::new()), + unification_table: RefCell::new(UnificationTable::new()), + } + } + + fn in_snapshot(&self) -> bool { + !self.undo_log.borrow().is_empty() + } + + pub fn start_snapshot(&self) -> RegionSnapshot { + let length = self.undo_log.borrow().len(); + debug!("RegionVarBindings: start_snapshot({})", length); + self.undo_log.borrow_mut().push(OpenSnapshot); + RegionSnapshot { + length: length, + region_snapshot: self.unification_table.borrow_mut().snapshot(), + skolemization_count: self.skolemization_count.get(), + } + } + + pub fn commit(&self, snapshot: RegionSnapshot) { + debug!("RegionVarBindings: commit({})", snapshot.length); + assert!(self.undo_log.borrow().len() > snapshot.length); + assert!((*self.undo_log.borrow())[snapshot.length] == OpenSnapshot); + assert!(self.skolemization_count.get() == snapshot.skolemization_count, + "failed to pop skolemized regions: {} now vs {} at start", + self.skolemization_count.get(), + snapshot.skolemization_count); + + let mut undo_log = self.undo_log.borrow_mut(); + if snapshot.length == 0 { + undo_log.truncate(0); + } else { + (*undo_log)[snapshot.length] = CommitedSnapshot; + } + self.unification_table.borrow_mut().commit(snapshot.region_snapshot); + } + + pub fn rollback_to(&self, snapshot: RegionSnapshot) { + debug!("RegionVarBindings: rollback_to({:?})", snapshot); + let mut undo_log = self.undo_log.borrow_mut(); + assert!(undo_log.len() > snapshot.length); + assert!((*undo_log)[snapshot.length] == OpenSnapshot); + while undo_log.len() > snapshot.length + 1 { + self.rollback_undo_entry(undo_log.pop().unwrap()); + } + let c = undo_log.pop().unwrap(); + assert!(c == OpenSnapshot); + self.skolemization_count.set(snapshot.skolemization_count); + self.unification_table.borrow_mut() + .rollback_to(snapshot.region_snapshot); + } + + pub fn rollback_undo_entry(&self, undo_entry: UndoLogEntry<'tcx>) { + match undo_entry { + OpenSnapshot => { + panic!("Failure to observe stack discipline"); + } + Purged | CommitedSnapshot => { + // nothing to do here + } + AddVar(vid) => { + let mut var_origins = self.var_origins.borrow_mut(); + var_origins.pop().unwrap(); + assert_eq!(var_origins.len(), vid.index as usize); + } + AddConstraint(ref constraint) => { + self.constraints.borrow_mut().remove(constraint); + } + AddVerify(index) => { + self.verifys.borrow_mut().pop(); + assert_eq!(self.verifys.borrow().len(), index); + } + AddGiven(sub, sup) => { + self.givens.borrow_mut().remove(&(sub, sup)); + } + AddCombination(Glb, ref regions) => { + self.glbs.borrow_mut().remove(regions); + } + AddCombination(Lub, ref regions) => { + self.lubs.borrow_mut().remove(regions); + } + } + } + + pub fn num_vars(&self) -> u32 { + let len = self.var_origins.borrow().len(); + // enforce no overflow + assert!(len as u32 as usize == len); + len as u32 + } + + pub fn new_region_var(&self, origin: RegionVariableOrigin) -> RegionVid { + let vid = RegionVid { index: self.num_vars() }; + self.var_origins.borrow_mut().push(origin.clone()); + + let u_vid = self.unification_table.borrow_mut().new_key( + unify_key::RegionVidKey { min_vid: vid } + ); + assert_eq!(vid, u_vid); + if self.in_snapshot() { + self.undo_log.borrow_mut().push(AddVar(vid)); + } + debug!("created new region variable {:?} with origin {:?}", + vid, + origin); + return vid; + } + + pub fn var_origin(&self, vid: RegionVid) -> RegionVariableOrigin { + self.var_origins.borrow()[vid.index as usize].clone() + } + + /// Creates a new skolemized region. Skolemized regions are fresh + /// regions used when performing higher-ranked computations. They + /// must be used in a very particular way and are never supposed + /// to "escape" out into error messages or the code at large. + /// + /// The idea is to always create a snapshot. Skolemized regions + /// can be created in the context of this snapshot, but before the + /// snapshot is committed or rolled back, they must be popped + /// (using `pop_skolemized_regions`), so that their numbers can be + /// recycled. Normally you don't have to think about this: you use + /// the APIs in `higher_ranked/mod.rs`, such as + /// `skolemize_late_bound_regions` and `plug_leaks`, which will + /// guide you on this path (ensure that the `SkolemizationMap` is + /// consumed and you are good). There are also somewhat extensive + /// comments in `higher_ranked/README.md`. + /// + /// The `snapshot` argument to this function is not really used; + /// it's just there to make it explicit which snapshot bounds the + /// skolemized region that results. It should always be the top-most snapshot. + pub fn push_skolemized(&self, br: ty::BoundRegion, snapshot: &RegionSnapshot) + -> &'tcx Region { + assert!(self.in_snapshot()); + assert!(self.undo_log.borrow()[snapshot.length] == OpenSnapshot); + + let sc = self.skolemization_count.get(); + self.skolemization_count.set(sc + 1); + self.tcx.mk_region(ReSkolemized(ty::SkolemizedRegionVid { index: sc }, br)) + } + + /// Removes all the edges to/from the skolemized regions that are + /// in `skols`. This is used after a higher-ranked operation + /// completes to remove all trace of the skolemized regions + /// created in that time. + pub fn pop_skolemized(&self, + skols: &FxHashSet<&'tcx ty::Region>, + snapshot: &RegionSnapshot) { + debug!("pop_skolemized_regions(skols={:?})", skols); + + assert!(self.in_snapshot()); + assert!(self.undo_log.borrow()[snapshot.length] == OpenSnapshot); + assert!(self.skolemization_count.get() as usize >= skols.len(), + "popping more skolemized variables than actually exist, \ + sc now = {}, skols.len = {}", + self.skolemization_count.get(), + skols.len()); + + let last_to_pop = self.skolemization_count.get(); + let first_to_pop = last_to_pop - (skols.len() as u32); + + assert!(first_to_pop >= snapshot.skolemization_count, + "popping more regions than snapshot contains, \ + sc now = {}, sc then = {}, skols.len = {}", + self.skolemization_count.get(), + snapshot.skolemization_count, + skols.len()); + debug_assert! { + skols.iter() + .all(|&k| match *k { + ty::ReSkolemized(index, _) => + index.index >= first_to_pop && + index.index < last_to_pop, + _ => + false + }), + "invalid skolemization keys or keys out of range ({}..{}): {:?}", + snapshot.skolemization_count, + self.skolemization_count.get(), + skols + } + + let mut undo_log = self.undo_log.borrow_mut(); + + let constraints_to_kill: Vec = + undo_log.iter() + .enumerate() + .rev() + .filter(|&(_, undo_entry)| kill_constraint(skols, undo_entry)) + .map(|(index, _)| index) + .collect(); + + for index in constraints_to_kill { + let undo_entry = mem::replace(&mut undo_log[index], Purged); + self.rollback_undo_entry(undo_entry); + } + + self.skolemization_count.set(snapshot.skolemization_count); + return; + + fn kill_constraint<'tcx>(skols: &FxHashSet<&'tcx ty::Region>, + undo_entry: &UndoLogEntry<'tcx>) + -> bool { + match undo_entry { + &AddConstraint(ConstrainVarSubVar(..)) => + false, + &AddConstraint(ConstrainRegSubVar(a, _)) => + skols.contains(&a), + &AddConstraint(ConstrainVarSubReg(_, b)) => + skols.contains(&b), + &AddConstraint(ConstrainRegSubReg(a, b)) => + skols.contains(&a) || skols.contains(&b), + &AddGiven(..) => + false, + &AddVerify(_) => + false, + &AddCombination(_, ref two_regions) => + skols.contains(&two_regions.a) || + skols.contains(&two_regions.b), + &AddVar(..) | + &OpenSnapshot | + &Purged | + &CommitedSnapshot => + false, + } + } + + } + + pub fn new_bound(&self, debruijn: ty::DebruijnIndex) -> &'tcx Region { + // Creates a fresh bound variable for use in GLB computations. + // See discussion of GLB computation in the large comment at + // the top of this file for more details. + // + // This computation is potentially wrong in the face of + // rollover. It's conceivable, if unlikely, that one might + // wind up with accidental capture for nested functions in + // that case, if the outer function had bound regions created + // a very long time before and the inner function somehow + // wound up rolling over such that supposedly fresh + // identifiers were in fact shadowed. For now, we just assert + // that there is no rollover -- eventually we should try to be + // robust against this possibility, either by checking the set + // of bound identifiers that appear in a given expression and + // ensure that we generate one that is distinct, or by + // changing the representation of bound regions in a fn + // declaration + + let sc = self.bound_count.get(); + self.bound_count.set(sc + 1); + + if sc >= self.bound_count.get() { + bug!("rollover in RegionInference new_bound()"); + } + + self.tcx.mk_region(ReLateBound(debruijn, BrFresh(sc))) + } + + fn values_are_none(&self) -> bool { + self.values.borrow().is_none() + } + + fn add_constraint(&self, constraint: Constraint<'tcx>, origin: SubregionOrigin<'tcx>) { + // cannot add constraints once regions are resolved + assert!(self.values_are_none()); + + debug!("RegionVarBindings: add_constraint({:?})", constraint); + + if self.constraints.borrow_mut().insert(constraint, origin).is_none() { + if self.in_snapshot() { + self.undo_log.borrow_mut().push(AddConstraint(constraint)); + } + } + } + + fn add_verify(&self, verify: Verify<'tcx>) { + // cannot add verifys once regions are resolved + assert!(self.values_are_none()); + + debug!("RegionVarBindings: add_verify({:?})", verify); + + // skip no-op cases known to be satisfied + match verify.bound { + VerifyBound::AllBounds(ref bs) if bs.len() == 0 => { return; } + _ => { } + } + + let mut verifys = self.verifys.borrow_mut(); + let index = verifys.len(); + verifys.push(verify); + if self.in_snapshot() { + self.undo_log.borrow_mut().push(AddVerify(index)); + } + } + + pub fn add_given(&self, sub: ty::FreeRegion, sup: ty::RegionVid) { + // cannot add givens once regions are resolved + assert!(self.values_are_none()); + + let mut givens = self.givens.borrow_mut(); + if givens.insert((sub, sup)) { + debug!("add_given({:?} <= {:?})", sub, sup); + + self.undo_log.borrow_mut().push(AddGiven(sub, sup)); + } + } + + pub fn make_eqregion(&self, + origin: SubregionOrigin<'tcx>, + sub: &'tcx Region, + sup: &'tcx Region) { + if sub != sup { + // Eventually, it would be nice to add direct support for + // equating regions. + self.make_subregion(origin.clone(), sub, sup); + self.make_subregion(origin, sup, sub); + + if let (ty::ReVar(sub), ty::ReVar(sup)) = (*sub, *sup) { + self.unification_table.borrow_mut().union(sub, sup); + } + } + } + + pub fn make_subregion(&self, + origin: SubregionOrigin<'tcx>, + sub: &'tcx Region, + sup: &'tcx Region) { + // cannot add constraints once regions are resolved + assert!(self.values_are_none()); + + debug!("RegionVarBindings: make_subregion({:?}, {:?}) due to {:?}", + sub, + sup, + origin); + + match (sub, sup) { + (&ReEarlyBound(..), _) | + (&ReLateBound(..), _) | + (_, &ReEarlyBound(..)) | + (_, &ReLateBound(..)) => { + span_bug!(origin.span(), + "cannot relate bound region: {:?} <= {:?}", + sub, + sup); + } + (_, &ReStatic) => { + // all regions are subregions of static, so we can ignore this + } + (&ReVar(sub_id), &ReVar(sup_id)) => { + self.add_constraint(ConstrainVarSubVar(sub_id, sup_id), origin); + } + (_, &ReVar(sup_id)) => { + self.add_constraint(ConstrainRegSubVar(sub, sup_id), origin); + } + (&ReVar(sub_id), _) => { + self.add_constraint(ConstrainVarSubReg(sub_id, sup), origin); + } + _ => { + self.add_constraint(ConstrainRegSubReg(sub, sup), origin); + } + } + } + + /// See `Verify::VerifyGenericBound` + pub fn verify_generic_bound(&self, + origin: SubregionOrigin<'tcx>, + kind: GenericKind<'tcx>, + sub: &'tcx Region, + bound: VerifyBound<'tcx>) { + self.add_verify(Verify { + kind: kind, + origin: origin, + region: sub, + bound: bound + }); + } + + pub fn lub_regions(&self, + origin: SubregionOrigin<'tcx>, + a: &'tcx Region, + b: &'tcx Region) + -> &'tcx Region { + // cannot add constraints once regions are resolved + assert!(self.values_are_none()); + + debug!("RegionVarBindings: lub_regions({:?}, {:?})", a, b); + match (a, b) { + (r @ &ReStatic, _) | (_, r @ &ReStatic) => { + r // nothing lives longer than static + } + + _ if a == b => { + a // LUB(a,a) = a + } + + _ => { + self.combine_vars(Lub, a, b, origin.clone(), |this, old_r, new_r| { + this.make_subregion(origin.clone(), old_r, new_r) + }) + } + } + } + + pub fn glb_regions(&self, + origin: SubregionOrigin<'tcx>, + a: &'tcx Region, + b: &'tcx Region) + -> &'tcx Region { + // cannot add constraints once regions are resolved + assert!(self.values_are_none()); + + debug!("RegionVarBindings: glb_regions({:?}, {:?})", a, b); + match (a, b) { + (&ReStatic, r) | (r, &ReStatic) => { + r // static lives longer than everything else + } + + _ if a == b => { + a // GLB(a,a) = a + } + + _ => { + self.combine_vars(Glb, a, b, origin.clone(), |this, old_r, new_r| { + this.make_subregion(origin.clone(), new_r, old_r) + }) + } + } + } + + pub fn resolve_var(&self, rid: RegionVid) -> &'tcx ty::Region { + match *self.values.borrow() { + None => { + span_bug!((*self.var_origins.borrow())[rid.index as usize].span(), + "attempt to resolve region variable before values have \ + been computed!") + } + Some(ref values) => { + let r = lookup(self.tcx, values, rid); + debug!("resolve_var({:?}) = {:?}", rid, r); + r + } + } + } + + pub fn opportunistic_resolve_var(&self, rid: RegionVid) -> &'tcx ty::Region { + let vid = self.unification_table.borrow_mut().find_value(rid).min_vid; + self.tcx.mk_region(ty::ReVar(vid)) + } + + fn combine_map(&self, t: CombineMapType) -> &RefCell> { + match t { + Glb => &self.glbs, + Lub => &self.lubs, + } + } + + pub fn combine_vars(&self, + t: CombineMapType, + a: &'tcx Region, + b: &'tcx Region, + origin: SubregionOrigin<'tcx>, + mut relate: F) + -> &'tcx Region + where F: FnMut(&RegionVarBindings<'a, 'gcx, 'tcx>, &'tcx Region, &'tcx Region) + { + let vars = TwoRegions { a: a, b: b }; + if let Some(&c) = self.combine_map(t).borrow().get(&vars) { + return self.tcx.mk_region(ReVar(c)); + } + let c = self.new_region_var(MiscVariable(origin.span())); + self.combine_map(t).borrow_mut().insert(vars, c); + if self.in_snapshot() { + self.undo_log.borrow_mut().push(AddCombination(t, vars)); + } + relate(self, a, self.tcx.mk_region(ReVar(c))); + relate(self, b, self.tcx.mk_region(ReVar(c))); + debug!("combine_vars() c={:?}", c); + self.tcx.mk_region(ReVar(c)) + } + + pub fn vars_created_since_snapshot(&self, mark: &RegionSnapshot) -> Vec { + self.undo_log.borrow()[mark.length..] + .iter() + .filter_map(|&elt| { + match elt { + AddVar(vid) => Some(vid), + _ => None, + } + }) + .collect() + } + + /// Computes all regions that have been related to `r0` since the + /// mark `mark` was made---`r0` itself will be the first + /// entry. The `directions` parameter controls what kind of + /// relations are considered. For example, one can say that only + /// "incoming" edges to `r0` are desired, in which case one will + /// get the set of regions `{r|r <= r0}`. This is used when + /// checking whether skolemized regions are being improperly + /// related to other regions. + pub fn tainted(&self, + mark: &RegionSnapshot, + r0: &'tcx Region, + directions: TaintDirections) + -> FxHashSet<&'tcx ty::Region> { + debug!("tainted(mark={:?}, r0={:?}, directions={:?})", + mark, r0, directions); + + // `result_set` acts as a worklist: we explore all outgoing + // edges and add any new regions we find to result_set. This + // is not a terribly efficient implementation. + let mut taint_set = TaintSet::new(directions, r0); + taint_set.fixed_point(self.tcx, + &self.undo_log.borrow()[mark.length..], + &self.verifys.borrow()); + debug!("tainted: result={:?}", taint_set.regions); + return taint_set.into_set(); + } + + /// This function performs the actual region resolution. It must be + /// called after all constraints have been added. It performs a + /// fixed-point iteration to find region values which satisfy all + /// constraints, assuming such values can be found; if they cannot, + /// errors are reported. + pub fn resolve_regions(&self, + free_regions: &FreeRegionMap, + subject_node: ast::NodeId) + -> Vec> { + debug!("RegionVarBindings: resolve_regions()"); + let mut errors = vec![]; + let v = self.infer_variable_values(free_regions, &mut errors, subject_node); + *self.values.borrow_mut() = Some(v); + errors + } + + fn lub_concrete_regions(&self, + free_regions: &FreeRegionMap, + a: &'tcx Region, + b: &'tcx Region) + -> &'tcx Region { + match (a, b) { + (&ReLateBound(..), _) | + (_, &ReLateBound(..)) | + (&ReEarlyBound(..), _) | + (_, &ReEarlyBound(..)) | + (&ReErased, _) | + (_, &ReErased) => { + bug!("cannot relate region: LUB({:?}, {:?})", a, b); + } + + (r @ &ReStatic, _) | (_, r @ &ReStatic) => { + r // nothing lives longer than static + } + + (&ReEmpty, r) | (r, &ReEmpty) => { + r // everything lives longer than empty + } + + (&ReVar(v_id), _) | (_, &ReVar(v_id)) => { + span_bug!((*self.var_origins.borrow())[v_id.index as usize].span(), + "lub_concrete_regions invoked with non-concrete \ + regions: {:?}, {:?}", + a, + b); + } + + (&ReFree(fr), &ReScope(s_id)) | + (&ReScope(s_id), &ReFree(fr)) => { + // A "free" region can be interpreted as "some region + // at least as big as the block fr.scope_id". So, we can + // reasonably compare free regions and scopes: + let r_id = self.tcx.region_maps.nearest_common_ancestor(fr.scope, s_id); + + if r_id == fr.scope { + // if the free region's scope `fr.scope_id` is bigger than + // the scope region `s_id`, then the LUB is the free + // region itself: + self.tcx.mk_region(ReFree(fr)) + } else { + // otherwise, we don't know what the free region is, + // so we must conservatively say the LUB is static: + self.tcx.mk_region(ReStatic) + } + } + + (&ReScope(a_id), &ReScope(b_id)) => { + // The region corresponding to an outer block is a + // subtype of the region corresponding to an inner + // block. + self.tcx.mk_region(ReScope( + self.tcx.region_maps.nearest_common_ancestor(a_id, b_id))) + } + + (&ReFree(a_fr), &ReFree(b_fr)) => { + self.tcx.mk_region(free_regions.lub_free_regions(a_fr, b_fr)) + } + + // For these types, we cannot define any additional + // relationship: + (&ReSkolemized(..), _) | + (_, &ReSkolemized(..)) => { + if a == b { + a + } else { + self.tcx.mk_region(ReStatic) + } + } + } + } +} + +// ______________________________________________________________________ + +#[derive(Copy, Clone, Debug)] +pub enum VarValue<'tcx> { + Value(&'tcx Region), + ErrorValue, +} + +struct RegionAndOrigin<'tcx> { + region: &'tcx Region, + origin: SubregionOrigin<'tcx>, +} + +type RegionGraph<'tcx> = graph::Graph<(), Constraint<'tcx>>; + +impl<'a, 'gcx, 'tcx> RegionVarBindings<'a, 'gcx, 'tcx> { + fn infer_variable_values(&self, + free_regions: &FreeRegionMap, + errors: &mut Vec>, + subject: ast::NodeId) + -> Vec> { + let mut var_data = self.construct_var_data(); + + // Dorky hack to cause `dump_constraints` to only get called + // if debug mode is enabled: + debug!("----() End constraint listing (subject={}) {:?}---", + subject, + self.dump_constraints(subject)); + graphviz::maybe_print_constraints_for(self, subject); + + let graph = self.construct_graph(); + self.expand_givens(&graph); + self.expansion(free_regions, &mut var_data); + self.collect_errors(free_regions, &mut var_data, errors); + self.collect_var_errors(free_regions, &var_data, &graph, errors); + var_data + } + + fn construct_var_data(&self) -> Vec> { + (0..self.num_vars() as usize) + .map(|_| Value(self.tcx.mk_region(ty::ReEmpty))) + .collect() + } + + fn dump_constraints(&self, subject: ast::NodeId) { + debug!("----() Start constraint listing (subject={}) ()----", + subject); + for (idx, (constraint, _)) in self.constraints.borrow().iter().enumerate() { + debug!("Constraint {} => {:?}", idx, constraint); + } + } + + fn expand_givens(&self, graph: &RegionGraph) { + // Givens are a kind of horrible hack to account for + // constraints like 'c <= '0 that are known to hold due to + // closure signatures (see the comment above on the `givens` + // field). They should go away. But until they do, the role + // of this fn is to account for the transitive nature: + // + // Given 'c <= '0 + // and '0 <= '1 + // then 'c <= '1 + + let mut givens = self.givens.borrow_mut(); + let seeds: Vec<_> = givens.iter().cloned().collect(); + for (fr, vid) in seeds { + let seed_index = NodeIndex(vid.index as usize); + for succ_index in graph.depth_traverse(seed_index, OUTGOING) { + let succ_index = succ_index.0 as u32; + if succ_index < self.num_vars() { + let succ_vid = RegionVid { index: succ_index }; + givens.insert((fr, succ_vid)); + } + } + } + } + + fn expansion(&self, free_regions: &FreeRegionMap, var_values: &mut [VarValue<'tcx>]) { + self.iterate_until_fixed_point("Expansion", |constraint, origin| { + debug!("expansion: constraint={:?} origin={:?}", + constraint, origin); + match *constraint { + ConstrainRegSubVar(a_region, b_vid) => { + let b_data = &mut var_values[b_vid.index as usize]; + self.expand_node(free_regions, a_region, b_vid, b_data) + } + ConstrainVarSubVar(a_vid, b_vid) => { + match var_values[a_vid.index as usize] { + ErrorValue => false, + Value(a_region) => { + let b_node = &mut var_values[b_vid.index as usize]; + self.expand_node(free_regions, a_region, b_vid, b_node) + } + } + } + ConstrainRegSubReg(..) | + ConstrainVarSubReg(..) => { + // These constraints are checked after expansion + // is done, in `collect_errors`. + false + } + } + }) + } + + fn expand_node(&self, + free_regions: &FreeRegionMap, + a_region: &'tcx Region, + b_vid: RegionVid, + b_data: &mut VarValue<'tcx>) + -> bool { + debug!("expand_node({:?}, {:?} == {:?})", + a_region, + b_vid, + b_data); + + // Check if this relationship is implied by a given. + match *a_region { + ty::ReFree(fr) => { + if self.givens.borrow().contains(&(fr, b_vid)) { + debug!("given"); + return false; + } + } + _ => {} + } + + match *b_data { + Value(cur_region) => { + let lub = self.lub_concrete_regions(free_regions, a_region, cur_region); + if lub == cur_region { + return false; + } + + debug!("Expanding value of {:?} from {:?} to {:?}", + b_vid, + cur_region, + lub); + + *b_data = Value(lub); + return true; + } + + ErrorValue => { + return false; + } + } + } + + /// After expansion is complete, go and check upper bounds (i.e., + /// cases where the region cannot grow larger than a fixed point) + /// and check that they are satisfied. + fn collect_errors(&self, + free_regions: &FreeRegionMap, + var_data: &mut Vec>, + errors: &mut Vec>) { + let constraints = self.constraints.borrow(); + for (constraint, origin) in constraints.iter() { + debug!("collect_errors: constraint={:?} origin={:?}", + constraint, origin); + match *constraint { + ConstrainRegSubVar(..) | + ConstrainVarSubVar(..) => { + // Expansion will ensure that these constraints hold. Ignore. + } + + ConstrainRegSubReg(sub, sup) => { + if free_regions.is_subregion_of(self.tcx, sub, sup) { + continue; + } + + debug!("collect_errors: region error at {:?}: \ + cannot verify that {:?} <= {:?}", + origin, + sub, + sup); + + errors.push(ConcreteFailure((*origin).clone(), sub, sup)); + } + + ConstrainVarSubReg(a_vid, b_region) => { + let a_data = &mut var_data[a_vid.index as usize]; + debug!("contraction: {:?} == {:?}, {:?}", + a_vid, + a_data, + b_region); + + let a_region = match *a_data { + ErrorValue => continue, + Value(a_region) => a_region, + }; + + // Do not report these errors immediately: + // instead, set the variable value to error and + // collect them later. + if !free_regions.is_subregion_of(self.tcx, a_region, b_region) { + debug!("collect_errors: region error at {:?}: \ + cannot verify that {:?}={:?} <= {:?}", + origin, + a_vid, + a_region, + b_region); + *a_data = ErrorValue; + } + } + } + } + + for verify in self.verifys.borrow().iter() { + debug!("collect_errors: verify={:?}", verify); + let sub = normalize(self.tcx, var_data, verify.region); + if verify.bound.is_met(self.tcx, free_regions, var_data, sub) { + continue; + } + + debug!("collect_errors: region error at {:?}: \ + cannot verify that {:?} <= {:?}", + verify.origin, + verify.region, + verify.bound); + + errors.push(GenericBoundFailure(verify.origin.clone(), + verify.kind.clone(), + sub)); + } + } + + /// Go over the variables that were declared to be error variables + /// and create a `RegionResolutionError` for each of them. + fn collect_var_errors(&self, + free_regions: &FreeRegionMap, + var_data: &[VarValue<'tcx>], + graph: &RegionGraph<'tcx>, + errors: &mut Vec>) { + debug!("collect_var_errors"); + + // This is the best way that I have found to suppress + // duplicate and related errors. Basically we keep a set of + // flags for every node. Whenever an error occurs, we will + // walk some portion of the graph looking to find pairs of + // conflicting regions to report to the user. As we walk, we + // trip the flags from false to true, and if we find that + // we've already reported an error involving any particular + // node we just stop and don't report the current error. The + // idea is to report errors that derive from independent + // regions of the graph, but not those that derive from + // overlapping locations. + let mut dup_vec = vec![u32::MAX; self.num_vars() as usize]; + + for idx in 0..self.num_vars() as usize { + match var_data[idx] { + Value(_) => { + /* Inference successful */ + } + ErrorValue => { + /* Inference impossible, this value contains + inconsistent constraints. + + I think that in this case we should report an + error now---unlike the case above, we can't + wait to see whether the user needs the result + of this variable. The reason is that the mere + existence of this variable implies that the + region graph is inconsistent, whether or not it + is used. + + For example, we may have created a region + variable that is the GLB of two other regions + which do not have a GLB. Even if that variable + is not used, it implies that those two regions + *should* have a GLB. + + At least I think this is true. It may be that + the mere existence of a conflict in a region variable + that is not used is not a problem, so if this rule + starts to create problems we'll have to revisit + this portion of the code and think hard about it. =) */ + + let node_vid = RegionVid { index: idx as u32 }; + self.collect_error_for_expanding_node(free_regions, + graph, + &mut dup_vec, + node_vid, + errors); + } + } + } + } + + fn construct_graph(&self) -> RegionGraph<'tcx> { + let num_vars = self.num_vars(); + + let constraints = self.constraints.borrow(); + + let mut graph = graph::Graph::new(); + + for _ in 0..num_vars { + graph.add_node(()); + } + + // Issue #30438: two distinct dummy nodes, one for incoming + // edges (dummy_source) and another for outgoing edges + // (dummy_sink). In `dummy -> a -> b -> dummy`, using one + // dummy node leads one to think (erroneously) there exists a + // path from `b` to `a`. Two dummy nodes sidesteps the issue. + let dummy_source = graph.add_node(()); + let dummy_sink = graph.add_node(()); + + for (constraint, _) in constraints.iter() { + match *constraint { + ConstrainVarSubVar(a_id, b_id) => { + graph.add_edge(NodeIndex(a_id.index as usize), + NodeIndex(b_id.index as usize), + *constraint); + } + ConstrainRegSubVar(_, b_id) => { + graph.add_edge(dummy_source, NodeIndex(b_id.index as usize), *constraint); + } + ConstrainVarSubReg(a_id, _) => { + graph.add_edge(NodeIndex(a_id.index as usize), dummy_sink, *constraint); + } + ConstrainRegSubReg(..) => { + // this would be an edge from `dummy_source` to + // `dummy_sink`; just ignore it. + } + } + } + + return graph; + } + + fn collect_error_for_expanding_node(&self, + free_regions: &FreeRegionMap, + graph: &RegionGraph<'tcx>, + dup_vec: &mut [u32], + node_idx: RegionVid, + errors: &mut Vec>) { + // Errors in expanding nodes result from a lower-bound that is + // not contained by an upper-bound. + let (mut lower_bounds, lower_dup) = self.collect_concrete_regions(graph, + node_idx, + graph::INCOMING, + dup_vec); + let (mut upper_bounds, upper_dup) = self.collect_concrete_regions(graph, + node_idx, + graph::OUTGOING, + dup_vec); + + if lower_dup || upper_dup { + return; + } + + // We place free regions first because we are special casing + // SubSupConflict(ReFree, ReFree) when reporting error, and so + // the user will more likely get a specific suggestion. + fn free_regions_first(a: &RegionAndOrigin, b: &RegionAndOrigin) -> Ordering { + match (a.region, b.region) { + (&ReFree(..), &ReFree(..)) => Equal, + (&ReFree(..), _) => Less, + (_, &ReFree(..)) => Greater, + (..) => Equal, + } + } + lower_bounds.sort_by(|a, b| free_regions_first(a, b)); + upper_bounds.sort_by(|a, b| free_regions_first(a, b)); + + for lower_bound in &lower_bounds { + for upper_bound in &upper_bounds { + if !free_regions.is_subregion_of(self.tcx, lower_bound.region, upper_bound.region) { + let origin = (*self.var_origins.borrow())[node_idx.index as usize].clone(); + debug!("region inference error at {:?} for {:?}: SubSupConflict sub: {:?} \ + sup: {:?}", + origin, + node_idx, + lower_bound.region, + upper_bound.region); + errors.push(SubSupConflict(origin, + lower_bound.origin.clone(), + lower_bound.region, + upper_bound.origin.clone(), + upper_bound.region)); + return; + } + } + } + + span_bug!((*self.var_origins.borrow())[node_idx.index as usize].span(), + "collect_error_for_expanding_node() could not find \ + error for var {:?}, lower_bounds={:?}, \ + upper_bounds={:?}", + node_idx, + lower_bounds, + upper_bounds); + } + + fn collect_concrete_regions(&self, + graph: &RegionGraph<'tcx>, + orig_node_idx: RegionVid, + dir: Direction, + dup_vec: &mut [u32]) + -> (Vec>, bool) { + struct WalkState<'tcx> { + set: FxHashSet, + stack: Vec, + result: Vec>, + dup_found: bool, + } + let mut state = WalkState { + set: FxHashSet(), + stack: vec![orig_node_idx], + result: Vec::new(), + dup_found: false, + }; + state.set.insert(orig_node_idx); + + // to start off the process, walk the source node in the + // direction specified + process_edges(self, &mut state, graph, orig_node_idx, dir); + + while !state.stack.is_empty() { + let node_idx = state.stack.pop().unwrap(); + + // check whether we've visited this node on some previous walk + if dup_vec[node_idx.index as usize] == u32::MAX { + dup_vec[node_idx.index as usize] = orig_node_idx.index; + } else if dup_vec[node_idx.index as usize] != orig_node_idx.index { + state.dup_found = true; + } + + debug!("collect_concrete_regions(orig_node_idx={:?}, node_idx={:?})", + orig_node_idx, + node_idx); + + process_edges(self, &mut state, graph, node_idx, dir); + } + + let WalkState {result, dup_found, ..} = state; + return (result, dup_found); + + fn process_edges<'a, 'gcx, 'tcx>(this: &RegionVarBindings<'a, 'gcx, 'tcx>, + state: &mut WalkState<'tcx>, + graph: &RegionGraph<'tcx>, + source_vid: RegionVid, + dir: Direction) { + debug!("process_edges(source_vid={:?}, dir={:?})", source_vid, dir); + + let source_node_index = NodeIndex(source_vid.index as usize); + for (_, edge) in graph.adjacent_edges(source_node_index, dir) { + match edge.data { + ConstrainVarSubVar(from_vid, to_vid) => { + let opp_vid = if from_vid == source_vid { + to_vid + } else { + from_vid + }; + if state.set.insert(opp_vid) { + state.stack.push(opp_vid); + } + } + + ConstrainRegSubVar(region, _) | + ConstrainVarSubReg(_, region) => { + state.result.push(RegionAndOrigin { + region: region, + origin: this.constraints.borrow().get(&edge.data).unwrap().clone(), + }); + } + + ConstrainRegSubReg(..) => { + panic!("cannot reach reg-sub-reg edge in region inference \ + post-processing") + } + } + } + } + } + + fn iterate_until_fixed_point(&self, tag: &str, mut body: F) + where F: FnMut(&Constraint<'tcx>, &SubregionOrigin<'tcx>) -> bool + { + let mut iteration = 0; + let mut changed = true; + while changed { + changed = false; + iteration += 1; + debug!("---- {} Iteration {}{}", "#", tag, iteration); + for (constraint, origin) in self.constraints.borrow().iter() { + let edge_changed = body(constraint, origin); + if edge_changed { + debug!("Updated due to constraint {:?}", constraint); + changed = true; + } + } + } + debug!("---- {} Complete after {} iteration(s)", tag, iteration); + } + +} + +fn normalize<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + values: &Vec>, + r: &'tcx ty::Region) + -> &'tcx ty::Region { + match *r { + ty::ReVar(rid) => lookup(tcx, values, rid), + _ => r, + } +} + +fn lookup<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + values: &Vec>, + rid: ty::RegionVid) + -> &'tcx ty::Region { + match values[rid.index as usize] { + Value(r) => r, + ErrorValue => tcx.mk_region(ReStatic), // Previously reported error. + } +} + +impl<'tcx> fmt::Debug for RegionAndOrigin<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "RegionAndOrigin({:?},{:?})", self.region, self.origin) + } +} + +impl fmt::Debug for RegionSnapshot { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "RegionSnapshot(length={},skolemization={})", + self.length, self.skolemization_count) + } +} + +impl<'tcx> fmt::Debug for GenericKind<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + GenericKind::Param(ref p) => write!(f, "{:?}", p), + GenericKind::Projection(ref p) => write!(f, "{:?}", p), + } + } +} + +impl<'tcx> fmt::Display for GenericKind<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + GenericKind::Param(ref p) => write!(f, "{}", p), + GenericKind::Projection(ref p) => write!(f, "{}", p), + } + } +} + +impl<'a, 'gcx, 'tcx> GenericKind<'tcx> { + pub fn to_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { + match *self { + GenericKind::Param(ref p) => p.to_ty(tcx), + GenericKind::Projection(ref p) => tcx.mk_projection(p.trait_ref.clone(), p.item_name), + } + } +} + +impl<'a, 'gcx, 'tcx> VerifyBound<'tcx> { + fn for_each_region(&self, f: &mut FnMut(&'tcx ty::Region)) { + match self { + &VerifyBound::AnyRegion(ref rs) | + &VerifyBound::AllRegions(ref rs) => for &r in rs { + f(r); + }, + + &VerifyBound::AnyBound(ref bs) | + &VerifyBound::AllBounds(ref bs) => for b in bs { + b.for_each_region(f); + }, + } + } + + pub fn must_hold(&self) -> bool { + match self { + &VerifyBound::AnyRegion(ref bs) => bs.contains(&&ty::ReStatic), + &VerifyBound::AllRegions(ref bs) => bs.is_empty(), + &VerifyBound::AnyBound(ref bs) => bs.iter().any(|b| b.must_hold()), + &VerifyBound::AllBounds(ref bs) => bs.iter().all(|b| b.must_hold()), + } + } + + pub fn cannot_hold(&self) -> bool { + match self { + &VerifyBound::AnyRegion(ref bs) => bs.is_empty(), + &VerifyBound::AllRegions(ref bs) => bs.contains(&&ty::ReEmpty), + &VerifyBound::AnyBound(ref bs) => bs.iter().all(|b| b.cannot_hold()), + &VerifyBound::AllBounds(ref bs) => bs.iter().any(|b| b.cannot_hold()), + } + } + + pub fn or(self, vb: VerifyBound<'tcx>) -> VerifyBound<'tcx> { + if self.must_hold() || vb.cannot_hold() { + self + } else if self.cannot_hold() || vb.must_hold() { + vb + } else { + VerifyBound::AnyBound(vec![self, vb]) + } + } + + pub fn and(self, vb: VerifyBound<'tcx>) -> VerifyBound<'tcx> { + if self.must_hold() && vb.must_hold() { + self + } else if self.cannot_hold() && vb.cannot_hold() { + self + } else { + VerifyBound::AllBounds(vec![self, vb]) + } + } + + fn is_met(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + free_regions: &FreeRegionMap, + var_values: &Vec>, + min: &'tcx ty::Region) + -> bool { + match self { + &VerifyBound::AnyRegion(ref rs) => + rs.iter() + .map(|&r| normalize(tcx, var_values, r)) + .any(|r| free_regions.is_subregion_of(tcx, min, r)), + + &VerifyBound::AllRegions(ref rs) => + rs.iter() + .map(|&r| normalize(tcx, var_values, r)) + .all(|r| free_regions.is_subregion_of(tcx, min, r)), + + &VerifyBound::AnyBound(ref bs) => + bs.iter() + .any(|b| b.is_met(tcx, free_regions, var_values, min)), + + &VerifyBound::AllBounds(ref bs) => + bs.iter() + .all(|b| b.is_met(tcx, free_regions, var_values, min)), + } + } +} diff --git a/src/librustc/infer/resolve.rs b/src/librustc/infer/resolve.rs new file mode 100644 index 0000000000000..357a03a2ffd7c --- /dev/null +++ b/src/librustc/infer/resolve.rs @@ -0,0 +1,147 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::{InferCtxt, FixupError, FixupResult}; +use ty::{self, Ty, TyCtxt, TypeFoldable}; +use ty::fold::TypeFolder; + +/////////////////////////////////////////////////////////////////////////// +// OPPORTUNISTIC TYPE RESOLVER + +/// The opportunistic type resolver can be used at any time. It simply replaces +/// type variables that have been unified with the things they have +/// been unified with (similar to `shallow_resolve`, but deep). This is +/// useful for printing messages etc but also required at various +/// points for correctness. +pub struct OpportunisticTypeResolver<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, +} + +impl<'a, 'gcx, 'tcx> OpportunisticTypeResolver<'a, 'gcx, 'tcx> { + pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>) -> Self { + OpportunisticTypeResolver { infcx: infcx } + } +} + +impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for OpportunisticTypeResolver<'a, 'gcx, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { + self.infcx.tcx + } + + fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { + if !t.has_infer_types() { + t // micro-optimize -- if there is nothing in this type that this fold affects... + } else { + let t0 = self.infcx.shallow_resolve(t); + t0.super_fold_with(self) + } + } +} + +/// The opportunistic type and region resolver is similar to the +/// opportunistic type resolver, but also opportunistly resolves +/// regions. It is useful for canonicalization. +pub struct OpportunisticTypeAndRegionResolver<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, +} + +impl<'a, 'gcx, 'tcx> OpportunisticTypeAndRegionResolver<'a, 'gcx, 'tcx> { + pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>) -> Self { + OpportunisticTypeAndRegionResolver { infcx: infcx } + } +} + +impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for OpportunisticTypeAndRegionResolver<'a, 'gcx, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { + self.infcx.tcx + } + + fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { + if !t.needs_infer() { + t // micro-optimize -- if there is nothing in this type that this fold affects... + } else { + let t0 = self.infcx.shallow_resolve(t); + t0.super_fold_with(self) + } + } + + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { + match *r { + ty::ReVar(rid) => self.infcx.region_vars.opportunistic_resolve_var(rid), + _ => r, + } + } +} + +/////////////////////////////////////////////////////////////////////////// +// FULL TYPE RESOLUTION + +/// Full type resolution replaces all type and region variables with +/// their concrete results. If any variable cannot be replaced (never unified, etc) +/// then an `Err` result is returned. +pub fn fully_resolve<'a, 'gcx, 'tcx, T>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + value: &T) -> FixupResult + where T : TypeFoldable<'tcx> +{ + let mut full_resolver = FullTypeResolver { infcx: infcx, err: None }; + let result = value.fold_with(&mut full_resolver); + match full_resolver.err { + None => Ok(result), + Some(e) => Err(e), + } +} + +// N.B. This type is not public because the protocol around checking the +// `err` field is not enforcable otherwise. +struct FullTypeResolver<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, + err: Option, +} + +impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for FullTypeResolver<'a, 'gcx, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { + self.infcx.tcx + } + + fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { + if !t.needs_infer() { + t // micro-optimize -- if there is nothing in this type that this fold affects... + } else { + let t = self.infcx.shallow_resolve(t); + match t.sty { + ty::TyInfer(ty::TyVar(vid)) => { + self.err = Some(FixupError::UnresolvedTy(vid)); + self.tcx().types.err + } + ty::TyInfer(ty::IntVar(vid)) => { + self.err = Some(FixupError::UnresolvedIntTy(vid)); + self.tcx().types.err + } + ty::TyInfer(ty::FloatVar(vid)) => { + self.err = Some(FixupError::UnresolvedFloatTy(vid)); + self.tcx().types.err + } + ty::TyInfer(_) => { + bug!("Unexpected type in full type resolver: {:?}", t); + } + _ => { + t.super_fold_with(self) + } + } + } + } + + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { + match *r { + ty::ReVar(rid) => self.infcx.region_vars.resolve_var(rid), + _ => r, + } + } +} diff --git a/src/librustc/infer/sub.rs b/src/librustc/infer/sub.rs new file mode 100644 index 0000000000000..dae30ea97c80d --- /dev/null +++ b/src/librustc/infer/sub.rs @@ -0,0 +1,130 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::SubregionOrigin; +use super::combine::CombineFields; +use super::type_variable::{SubtypeOf, SupertypeOf}; + +use ty::{self, Ty, TyCtxt}; +use ty::TyVar; +use ty::relate::{Cause, Relate, RelateResult, TypeRelation}; +use std::mem; + +/// Ensures `a` is made a subtype of `b`. Returns `a` on success. +pub struct Sub<'combine, 'infcx: 'combine, 'gcx: 'infcx+'tcx, 'tcx: 'infcx> { + fields: &'combine mut CombineFields<'infcx, 'gcx, 'tcx>, + a_is_expected: bool, +} + +impl<'combine, 'infcx, 'gcx, 'tcx> Sub<'combine, 'infcx, 'gcx, 'tcx> { + pub fn new(f: &'combine mut CombineFields<'infcx, 'gcx, 'tcx>, a_is_expected: bool) + -> Sub<'combine, 'infcx, 'gcx, 'tcx> + { + Sub { fields: f, a_is_expected: a_is_expected } + } + + fn with_expected_switched R>(&mut self, f: F) -> R { + self.a_is_expected = !self.a_is_expected; + let result = f(self); + self.a_is_expected = !self.a_is_expected; + result + } +} + +impl<'combine, 'infcx, 'gcx, 'tcx> TypeRelation<'infcx, 'gcx, 'tcx> + for Sub<'combine, 'infcx, 'gcx, 'tcx> +{ + fn tag(&self) -> &'static str { "Sub" } + fn tcx(&self) -> TyCtxt<'infcx, 'gcx, 'tcx> { self.fields.infcx.tcx } + fn a_is_expected(&self) -> bool { self.a_is_expected } + + fn with_cause(&mut self, cause: Cause, f: F) -> R + where F: FnOnce(&mut Self) -> R + { + debug!("sub with_cause={:?}", cause); + let old_cause = mem::replace(&mut self.fields.cause, Some(cause)); + let r = f(self); + debug!("sub old_cause={:?}", old_cause); + self.fields.cause = old_cause; + r + } + + fn relate_with_variance>(&mut self, + variance: ty::Variance, + a: &T, + b: &T) + -> RelateResult<'tcx, T> + { + match variance { + ty::Invariant => self.fields.equate(self.a_is_expected).relate(a, b), + ty::Covariant => self.relate(a, b), + ty::Bivariant => self.fields.bivariate(self.a_is_expected).relate(a, b), + ty::Contravariant => self.with_expected_switched(|this| { this.relate(b, a) }), + } + } + + fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { + debug!("{}.tys({:?}, {:?})", self.tag(), a, b); + + if a == b { return Ok(a); } + + let infcx = self.fields.infcx; + let a = infcx.type_variables.borrow_mut().replace_if_possible(a); + let b = infcx.type_variables.borrow_mut().replace_if_possible(b); + match (&a.sty, &b.sty) { + (&ty::TyInfer(TyVar(a_id)), &ty::TyInfer(TyVar(b_id))) => { + infcx.type_variables + .borrow_mut() + .relate_vars(a_id, SubtypeOf, b_id); + Ok(a) + } + (&ty::TyInfer(TyVar(a_id)), _) => { + self.fields + .instantiate(b, SupertypeOf, a_id, !self.a_is_expected)?; + Ok(a) + } + (_, &ty::TyInfer(TyVar(b_id))) => { + self.fields.instantiate(a, SubtypeOf, b_id, self.a_is_expected)?; + Ok(a) + } + + (&ty::TyError, _) | (_, &ty::TyError) => { + infcx.set_tainted_by_errors(); + Ok(self.tcx().types.err) + } + + _ => { + self.fields.infcx.super_combine_tys(self, a, b)?; + Ok(a) + } + } + } + + fn regions(&mut self, a: &'tcx ty::Region, b: &'tcx ty::Region) + -> RelateResult<'tcx, &'tcx ty::Region> { + debug!("{}.regions({:?}, {:?}) self.cause={:?}", + self.tag(), a, b, self.fields.cause); + + // FIXME -- we have more fine-grained information available + // from the "cause" field, we could perhaps give more tailored + // error messages. + let origin = SubregionOrigin::Subtype(self.fields.trace.clone()); + self.fields.infcx.region_vars.make_subregion(origin, a, b); + + Ok(a) + } + + fn binders(&mut self, a: &ty::Binder, b: &ty::Binder) + -> RelateResult<'tcx, ty::Binder> + where T: Relate<'tcx> + { + self.fields.higher_ranked_sub(a, b, self.a_is_expected) + } +} diff --git a/src/librustc/infer/type_variable.rs b/src/librustc/infer/type_variable.rs new file mode 100644 index 0000000000000..804765ec8811e --- /dev/null +++ b/src/librustc/infer/type_variable.rs @@ -0,0 +1,342 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub use self::RelationDir::*; +use self::TypeVariableValue::*; +use self::UndoEntry::*; +use hir::def_id::{DefId}; +use syntax::util::small_vector::SmallVector; +use syntax_pos::Span; +use ty::{self, Ty}; + +use std::cmp::min; +use std::marker::PhantomData; +use std::mem; +use std::u32; +use rustc_data_structures::snapshot_vec as sv; +use rustc_data_structures::unify as ut; + +pub struct TypeVariableTable<'tcx> { + values: sv::SnapshotVec>, + eq_relations: ut::UnificationTable, +} + +struct TypeVariableData<'tcx> { + value: TypeVariableValue<'tcx>, + diverging: bool +} + +enum TypeVariableValue<'tcx> { + Known(Ty<'tcx>), + Bounded { + relations: Vec, + default: Option> + } +} + +// We will use this to store the required information to recapitulate what happened when +// an error occurs. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct Default<'tcx> { + pub ty: Ty<'tcx>, + /// The span where the default was incurred + pub origin_span: Span, + /// The definition that the default originates from + pub def_id: DefId +} + +pub struct Snapshot { + snapshot: sv::Snapshot, + eq_snapshot: ut::Snapshot, +} + +enum UndoEntry<'tcx> { + // The type of the var was specified. + SpecifyVar(ty::TyVid, Vec, Option>), + Relate(ty::TyVid, ty::TyVid), + RelateRange(ty::TyVid, usize), +} + +struct Delegate<'tcx>(PhantomData<&'tcx ()>); + +type Relation = (RelationDir, ty::TyVid); + +#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] +pub enum RelationDir { + SubtypeOf, SupertypeOf, EqTo, BiTo +} + +impl RelationDir { + fn opposite(self) -> RelationDir { + match self { + SubtypeOf => SupertypeOf, + SupertypeOf => SubtypeOf, + EqTo => EqTo, + BiTo => BiTo, + } + } +} + +impl<'tcx> TypeVariableTable<'tcx> { + pub fn new() -> TypeVariableTable<'tcx> { + TypeVariableTable { + values: sv::SnapshotVec::new(), + eq_relations: ut::UnificationTable::new(), + } + } + + fn relations<'a>(&'a mut self, a: ty::TyVid) -> &'a mut Vec { + relations(self.values.get_mut(a.index as usize)) + } + + pub fn default(&self, vid: ty::TyVid) -> Option> { + match &self.values.get(vid.index as usize).value { + &Known(_) => None, + &Bounded { ref default, .. } => default.clone() + } + } + + pub fn var_diverges<'a>(&'a self, vid: ty::TyVid) -> bool { + self.values.get(vid.index as usize).diverging + } + + /// Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`. + /// + /// Precondition: neither `a` nor `b` are known. + pub fn relate_vars(&mut self, a: ty::TyVid, dir: RelationDir, b: ty::TyVid) { + let a = self.root_var(a); + let b = self.root_var(b); + if a != b { + if dir == EqTo { + // a and b must be equal which we mark in the unification table + let root = self.eq_relations.union(a, b); + // In addition to being equal, all relations from the variable which is no longer + // the root must be added to the root so they are not forgotten as the other + // variable should no longer be referenced (other than to get the root) + let other = if a == root { b } else { a }; + let count = { + let (relations, root_relations) = if other.index < root.index { + let (pre, post) = self.values.split_at_mut(root.index as usize); + (relations(&mut pre[other.index as usize]), relations(&mut post[0])) + } else { + let (pre, post) = self.values.split_at_mut(other.index as usize); + (relations(&mut post[0]), relations(&mut pre[root.index as usize])) + }; + root_relations.extend_from_slice(relations); + relations.len() + }; + self.values.record(RelateRange(root, count)); + } else { + self.relations(a).push((dir, b)); + self.relations(b).push((dir.opposite(), a)); + self.values.record(Relate(a, b)); + } + } + } + + /// Instantiates `vid` with the type `ty` and then pushes an entry onto `stack` for each of the + /// relations of `vid` to other variables. The relations will have the form `(ty, dir, vid1)` + /// where `vid1` is some other variable id. + /// + /// Precondition: `vid` must be a root in the unification table + pub fn instantiate_and_push( + &mut self, + vid: ty::TyVid, + ty: Ty<'tcx>, + stack: &mut SmallVector<(Ty<'tcx>, RelationDir, ty::TyVid)>) + { + debug_assert!(self.root_var(vid) == vid); + let old_value = { + let value_ptr = &mut self.values.get_mut(vid.index as usize).value; + mem::replace(value_ptr, Known(ty)) + }; + + let (relations, default) = match old_value { + Bounded { relations, default } => (relations, default), + Known(_) => bug!("Asked to instantiate variable that is \ + already instantiated") + }; + + for &(dir, vid) in &relations { + stack.push((ty, dir, vid)); + } + + self.values.record(SpecifyVar(vid, relations, default)); + } + + pub fn new_var(&mut self, + diverging: bool, + default: Option>) -> ty::TyVid { + self.eq_relations.new_key(()); + let index = self.values.push(TypeVariableData { + value: Bounded { relations: vec![], default: default }, + diverging: diverging + }); + let v = ty::TyVid { index: index as u32 }; + debug!("new_var() -> {:?}", v); + v + } + + pub fn num_vars(&self) -> usize { + self.values.len() + } + + pub fn root_var(&mut self, vid: ty::TyVid) -> ty::TyVid { + self.eq_relations.find(vid) + } + + pub fn probe(&mut self, vid: ty::TyVid) -> Option> { + let vid = self.root_var(vid); + self.probe_root(vid) + } + + /// Retrieves the type of `vid` given that it is currently a root in the unification table + pub fn probe_root(&mut self, vid: ty::TyVid) -> Option> { + debug_assert!(self.root_var(vid) == vid); + match self.values.get(vid.index as usize).value { + Bounded { .. } => None, + Known(t) => Some(t) + } + } + + pub fn replace_if_possible(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { + match t.sty { + ty::TyInfer(ty::TyVar(v)) => { + match self.probe(v) { + None => t, + Some(u) => u + } + } + _ => t, + } + } + + pub fn snapshot(&mut self) -> Snapshot { + Snapshot { + snapshot: self.values.start_snapshot(), + eq_snapshot: self.eq_relations.snapshot(), + } + } + + pub fn rollback_to(&mut self, s: Snapshot) { + debug!("rollback_to{:?}", { + for action in self.values.actions_since_snapshot(&s.snapshot) { + match *action { + sv::UndoLog::NewElem(index) => { + debug!("inference variable _#{}t popped", index) + } + _ => { } + } + } + }); + + self.values.rollback_to(s.snapshot); + self.eq_relations.rollback_to(s.eq_snapshot); + } + + pub fn commit(&mut self, s: Snapshot) { + self.values.commit(s.snapshot); + self.eq_relations.commit(s.eq_snapshot); + } + + pub fn types_escaping_snapshot(&mut self, s: &Snapshot) -> Vec> { + /*! + * Find the set of type variables that existed *before* `s` + * but which have only been unified since `s` started, and + * return the types with which they were unified. So if we had + * a type variable `V0`, then we started the snapshot, then we + * created a type variable `V1`, unifed `V0` with `T0`, and + * unified `V1` with `T1`, this function would return `{T0}`. + */ + + let mut new_elem_threshold = u32::MAX; + let mut escaping_types = Vec::new(); + let actions_since_snapshot = self.values.actions_since_snapshot(&s.snapshot); + debug!("actions_since_snapshot.len() = {}", actions_since_snapshot.len()); + for action in actions_since_snapshot { + match *action { + sv::UndoLog::NewElem(index) => { + // if any new variables were created during the + // snapshot, remember the lower index (which will + // always be the first one we see). Note that this + // action must precede those variables being + // specified. + new_elem_threshold = min(new_elem_threshold, index as u32); + debug!("NewElem({}) new_elem_threshold={}", index, new_elem_threshold); + } + + sv::UndoLog::Other(SpecifyVar(vid, ..)) => { + if vid.index < new_elem_threshold { + // quick check to see if this variable was + // created since the snapshot started or not. + let escaping_type = match self.values.get(vid.index as usize).value { + Bounded { .. } => bug!(), + Known(ty) => ty, + }; + escaping_types.push(escaping_type); + } + debug!("SpecifyVar({:?}) new_elem_threshold={}", vid, new_elem_threshold); + } + + _ => { } + } + } + + escaping_types + } + + pub fn unsolved_variables(&mut self) -> Vec { + (0..self.values.len()) + .filter_map(|i| { + let vid = ty::TyVid { index: i as u32 }; + if self.probe(vid).is_some() { + None + } else { + Some(vid) + } + }) + .collect() + } +} + +impl<'tcx> sv::SnapshotVecDelegate for Delegate<'tcx> { + type Value = TypeVariableData<'tcx>; + type Undo = UndoEntry<'tcx>; + + fn reverse(values: &mut Vec>, action: UndoEntry<'tcx>) { + match action { + SpecifyVar(vid, relations, default) => { + values[vid.index as usize].value = Bounded { + relations: relations, + default: default + }; + } + + Relate(a, b) => { + relations(&mut (*values)[a.index as usize]).pop(); + relations(&mut (*values)[b.index as usize]).pop(); + } + + RelateRange(i, n) => { + let relations = relations(&mut (*values)[i.index as usize]); + for _ in 0..n { + relations.pop(); + } + } + } + } +} + +fn relations<'a>(v: &'a mut TypeVariableData) -> &'a mut Vec { + match v.value { + Known(_) => bug!("var_sub_var: variable is known"), + Bounded { ref mut relations, .. } => relations + } +} diff --git a/src/librustc/middle/infer/unify_key.rs b/src/librustc/infer/unify_key.rs similarity index 77% rename from src/librustc/middle/infer/unify_key.rs rename to src/librustc/infer/unify_key.rs index c83231930f502..d7e3a53ff25c9 100644 --- a/src/librustc/middle/infer/unify_key.rs +++ b/src/librustc/infer/unify_key.rs @@ -9,11 +9,11 @@ // except according to those terms. use syntax::ast; -use middle::ty::{self, IntVarValue, Ty}; +use ty::{self, IntVarValue, Ty, TyCtxt}; use rustc_data_structures::unify::{Combine, UnifyKey}; -pub trait ToType<'tcx> { - fn to_type(&self, tcx: &ty::ctxt<'tcx>) -> Ty<'tcx>; +pub trait ToType { + fn to_type<'a, 'gcx, 'tcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx>; } impl UnifyKey for ty::IntVid { @@ -50,8 +50,8 @@ impl UnifyKey for ty::RegionVid { fn tag(_: Option) -> &'static str { "RegionVid" } } -impl<'tcx> ToType<'tcx> for IntVarValue { - fn to_type(&self, tcx: &ty::ctxt<'tcx>) -> Ty<'tcx> { +impl ToType for IntVarValue { + fn to_type<'a, 'gcx, 'tcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { match *self { ty::IntType(i) => tcx.mk_mach_int(i), ty::UintType(i) => tcx.mk_mach_uint(i), @@ -68,8 +68,15 @@ impl UnifyKey for ty::FloatVid { fn tag(_: Option) -> &'static str { "FloatVid" } } -impl<'tcx> ToType<'tcx> for ast::FloatTy { - fn to_type(&self, tcx: &ty::ctxt<'tcx>) -> Ty<'tcx> { +impl ToType for ast::FloatTy { + fn to_type<'a, 'gcx, 'tcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { tcx.mk_mach_float(*self) } } + +impl UnifyKey for ty::TyVid { + type Value = (); + fn index(&self) -> u32 { self.index } + fn from_index(i: u32) -> ty::TyVid { ty::TyVid { index: i } } + fn tag(_: Option) -> &'static str { "TyVid" } +} diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index 501a03f128664..7c26b710a53cb 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -19,32 +19,29 @@ #![crate_type = "dylib"] #![crate_type = "rlib"] #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] #![feature(associated_consts)] +#![feature(borrow_state)] #![feature(box_patterns)] #![feature(box_syntax)] -#![feature(cell_extras)] #![feature(collections)] +#![feature(conservative_impl_trait)] #![feature(const_fn)] -#![feature(enumset)] -#![feature(hashmap_hasher)] -#![feature(iter_arith)] +#![feature(core_intrinsics)] +#![cfg_attr(stage0, feature(item_like_imports))] #![feature(libc)] #![feature(nonzero)] #![feature(quote)] #![feature(rustc_diagnostic_macros)] #![feature(rustc_private)] -#![feature(scoped_tls)] #![feature(slice_patterns)] #![feature(staged_api)] -#![feature(str_char)] -#![feature(time2)] +#![feature(unboxed_closures)] #![cfg_attr(test, feature(test))] -#![allow(trivial_casts)] - extern crate arena; extern crate core; extern crate flate; @@ -52,15 +49,16 @@ extern crate fmt_macros; extern crate getopts; extern crate graphviz; extern crate libc; -extern crate rbml; -extern crate rustc_llvm; +extern crate rustc_llvm as llvm; extern crate rustc_back; -extern crate rustc_front; extern crate rustc_data_structures; extern crate serialize; extern crate collections; +extern crate rustc_const_math; +extern crate rustc_errors as errors; #[macro_use] extern crate log; #[macro_use] extern crate syntax; +#[macro_use] extern crate syntax_pos; #[macro_use] #[no_link] extern crate rustc_bitflags; extern crate serialize as rustc_serialize; // used by deriving @@ -68,8 +66,6 @@ extern crate serialize as rustc_serialize; // used by deriving #[cfg(test)] extern crate test; -pub use rustc_llvm as llvm; - #[macro_use] mod macros; @@ -77,70 +73,43 @@ mod macros; // registered before they are used. pub mod diagnostics; -pub mod back { - pub use rustc_back::abi; - pub use rustc_back::rpath; - pub use rustc_back::svh; -} - +pub mod cfg; pub mod dep_graph; - -pub mod front { - pub mod check_attr; - pub mod map; -} +pub mod hir; +pub mod infer; +pub mod lint; pub mod middle { pub mod astconv_util; pub mod expr_use_visitor; // STAGE0: increase glitch immunity - pub mod cfg; - pub mod check_const; - pub mod check_static_recursion; - pub mod check_loop; - pub mod check_match; - pub mod check_rvalues; - pub mod const_eval; + pub mod const_val; + pub mod const_qualif; pub mod cstore; pub mod dataflow; pub mod dead; - pub mod def; - pub mod def_id; pub mod dependency_format; pub mod effect; pub mod entry; pub mod free_region; pub mod intrinsicck; - pub mod infer; - pub mod implicator; pub mod lang_items; pub mod liveness; pub mod mem_categorization; - pub mod pat_util; pub mod privacy; pub mod reachable; pub mod region; pub mod recursion_limit; pub mod resolve_lifetime; pub mod stability; - pub mod subst; - pub mod traits; - pub mod ty; pub mod weak_lang_items; } -pub mod mir { - pub mod repr; - pub mod tcx; - pub mod visit; -} - +pub mod mir; pub mod session; - -pub mod lint; +pub mod traits; +pub mod ty; pub mod util { - pub use rustc_back::sha2; - pub mod common; pub mod ppaux; pub mod nodemap; @@ -148,10 +117,6 @@ pub mod util { pub mod fs; } -pub mod lib { - pub use llvm; -} - // A private module so that macro-expanded idents like // `::rustc::lint::Lint` will also work in `rustc` itself. // diff --git a/src/librustc/lint/builtin.rs b/src/librustc/lint/builtin.rs index 3676e23064687..78d5067b273a5 100644 --- a/src/librustc/lint/builtin.rs +++ b/src/librustc/lint/builtin.rs @@ -94,12 +94,6 @@ declare_lint! { "unknown crate type found in #[crate_type] directive" } -declare_lint! { - pub VARIANT_SIZE_DIFFERENCES, - Allow, - "detects enums with widely varying variant sizes" -} - declare_lint! { pub FAT_PTR_TRANSMUTES, Allow, @@ -124,16 +118,29 @@ declare_lint! { "detect private items in public interfaces not caught by the old implementation" } +declare_lint! { + pub INACCESSIBLE_EXTERN_CRATE, + Deny, + "use of inaccessible extern crate erroneously allowed" +} + declare_lint! { pub INVALID_TYPE_PARAM_DEFAULT, - Warn, + Deny, "type parameter default erroneously allowed in invalid location" } declare_lint! { - pub MATCH_OF_UNIT_VARIANT_VIA_PAREN_DOTDOT, - Warn, - "unit struct or enum variant erroneously allowed to match via path::ident(..)" + pub ILLEGAL_FLOATING_POINT_CONSTANT_PATTERN, + Deny, + "floating-point constants cannot be used in patterns" +} + +declare_lint! { + pub ILLEGAL_STRUCT_OR_ENUM_CONSTANT_PATTERN, + Deny, + "constants of struct or enum type can only be used in a pattern if \ + the struct or enum has `#[derive(PartialEq, Eq)]`" } declare_lint! { @@ -142,6 +149,74 @@ declare_lint! { "uses of #[derive] with raw pointers are rarely correct" } +declare_lint! { + pub TRANSMUTE_FROM_FN_ITEM_TYPES, + Deny, + "transmute from function item type to pointer-sized type erroneously allowed" +} + +declare_lint! { + pub HR_LIFETIME_IN_ASSOC_TYPE, + Deny, + "binding for associated type references higher-ranked lifetime \ + that does not appear in the trait input types" +} + +declare_lint! { + pub OVERLAPPING_INHERENT_IMPLS, + Deny, + "two overlapping inherent impls define an item with the same name were erroneously allowed" +} + +declare_lint! { + pub RENAMED_AND_REMOVED_LINTS, + Warn, + "lints that have been renamed or removed" +} + +declare_lint! { + pub SUPER_OR_SELF_IN_GLOBAL_PATH, + Deny, + "detects super or self keywords at the beginning of global path" +} + +declare_lint! { + pub LIFETIME_UNDERSCORE, + Deny, + "lifetimes or labels named `'_` were erroneously allowed" +} + +declare_lint! { + pub SAFE_EXTERN_STATICS, + Warn, + "safe access to extern statics was erroneously allowed" +} + +declare_lint! { + pub PATTERNS_IN_FNS_WITHOUT_BODY, + Warn, + "patterns in functions without body were erroneously allowed" +} + +declare_lint! { + pub EXTRA_REQUIREMENT_IN_IMPL, + Warn, + "detects extra requirements in impls that were erroneously allowed" +} + +declare_lint! { + pub LEGACY_DIRECTORY_OWNERSHIP, + Warn, + "non-inline, non-`#[path]` modules (e.g. `mod foo;`) were erroneously allowed in some files \ + not named `mod.rs`" +} + +declare_lint! { + pub DEPRECATED, + Warn, + "detects use of deprecated items" +} + /// Does nothing as a lint pass, but registers some `Lint`s /// which are used by other parts of the compiler. #[derive(Copy, Clone)] @@ -162,15 +237,27 @@ impl LintPass for HardwiredLints { UNUSED_FEATURES, STABLE_FEATURES, UNKNOWN_CRATE_TYPES, - VARIANT_SIZE_DIFFERENCES, FAT_PTR_TRANSMUTES, TRIVIAL_CASTS, TRIVIAL_NUMERIC_CASTS, PRIVATE_IN_PUBLIC, + INACCESSIBLE_EXTERN_CRATE, INVALID_TYPE_PARAM_DEFAULT, - MATCH_OF_UNIT_VARIANT_VIA_PAREN_DOTDOT, + ILLEGAL_FLOATING_POINT_CONSTANT_PATTERN, + ILLEGAL_STRUCT_OR_ENUM_CONSTANT_PATTERN, CONST_ERR, - RAW_POINTER_DERIVE + RAW_POINTER_DERIVE, + TRANSMUTE_FROM_FN_ITEM_TYPES, + OVERLAPPING_INHERENT_IMPLS, + RENAMED_AND_REMOVED_LINTS, + SUPER_OR_SELF_IN_GLOBAL_PATH, + HR_LIFETIME_IN_ASSOC_TYPE, + LIFETIME_UNDERSCORE, + SAFE_EXTERN_STATICS, + PATTERNS_IN_FNS_WITHOUT_BODY, + EXTRA_REQUIREMENT_IN_IMPL, + LEGACY_DIRECTORY_OWNERSHIP, + DEPRECATED ) } } diff --git a/src/librustc/lint/context.rs b/src/librustc/lint/context.rs index c41a361fcc309..fba4f35074dbc 100644 --- a/src/librustc/lint/context.rs +++ b/src/librustc/lint/context.rs @@ -27,28 +27,24 @@ use self::TargetLint::*; use dep_graph::DepNode; use middle::privacy::AccessLevels; -use middle::ty; +use ty::TyCtxt; use session::{config, early_error, Session}; -use lint::{Level, LevelSource, Lint, LintId, LintArray, LintPass}; -use lint::{EarlyLintPass, EarlyLintPassObject, LateLintPass, LateLintPassObject}; +use lint::{Level, LevelSource, Lint, LintId, LintPass, LintSource}; +use lint::{EarlyLintPassObject, LateLintPassObject}; use lint::{Default, CommandLine, Node, Allow, Warn, Deny, Forbid}; use lint::builtin; -use util::nodemap::FnvHashMap; +use util::nodemap::FxHashMap; -use std::cell::RefCell; use std::cmp; use std::default::Default as StdDefault; use std::mem; -use syntax::ast_util::{self, IdVisitingOperation}; -use syntax::attr::{self, AttrMetaMethods}; -use syntax::codemap::Span; -use syntax::errors::DiagnosticBuilder; -use syntax::parse::token::InternedString; +use std::fmt; +use syntax::attr; use syntax::ast; -use syntax::attr::ThinAttributesExt; -use rustc_front::hir; -use rustc_front::util; -use rustc_front::intravisit as hir_visit; +use syntax_pos::{MultiSpan, Span}; +use errors::{self, Diagnostic, DiagnosticBuilder}; +use hir; +use hir::intravisit as hir_visit; use syntax::visit as ast_visit; /// Information about the registered lints. @@ -67,23 +63,63 @@ pub struct LintStore { late_passes: Option>, /// Lints indexed by name. - by_name: FnvHashMap, + by_name: FxHashMap, /// Current levels of each lint, and where they were set. - levels: FnvHashMap, + levels: FxHashMap, /// Map of registered lint groups to what lints they expand to. The bool /// is true if the lint group was added by a plugin. - lint_groups: FnvHashMap<&'static str, (Vec, bool)>, + lint_groups: FxHashMap<&'static str, (Vec, bool)>, /// Extra info for future incompatibility lints, descibing the /// issue or RFC that caused the incompatibility. - future_incompatible: FnvHashMap, + future_incompatible: FxHashMap, /// Maximum level a lint can be lint_cap: Option, } +/// When you call `add_lint` on the session, you wind up storing one +/// of these, which records a "potential lint" at a particular point. +#[derive(PartialEq)] +pub struct EarlyLint { + /// what lint is this? (e.g., `dead_code`) + pub id: LintId, + + /// the main message + pub diagnostic: Diagnostic, +} + +impl fmt::Debug for EarlyLint { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("EarlyLint") + .field("id", &self.id) + .field("span", &self.diagnostic.span) + .field("diagnostic", &self.diagnostic) + .finish() + } +} + +pub trait IntoEarlyLint { + fn into_early_lint(self, id: LintId) -> EarlyLint; +} + +impl<'a, S: Into> IntoEarlyLint for (S, &'a str) { + fn into_early_lint(self, id: LintId) -> EarlyLint { + let (span, msg) = self; + let mut diagnostic = Diagnostic::new(errors::Level::Warning, msg); + diagnostic.set_span(span); + EarlyLint { id: id, diagnostic: diagnostic } + } +} + +impl IntoEarlyLint for Diagnostic { + fn into_early_lint(self, id: LintId) -> EarlyLint { + EarlyLint { id: id, diagnostic: self } + } +} + /// Extra information for a future incompatibility lint. See the call /// to `register_future_incompatible` in `librustc_lint/lib.rs` for /// guidelines. @@ -131,13 +167,13 @@ impl LintStore { pub fn new() -> LintStore { LintStore { - lints: vec!(), - early_passes: Some(vec!()), - late_passes: Some(vec!()), - by_name: FnvHashMap(), - levels: FnvHashMap(), - future_incompatible: FnvHashMap(), - lint_groups: FnvHashMap(), + lints: vec![], + early_passes: Some(vec![]), + late_passes: Some(vec![]), + by_name: FxHashMap(), + levels: FxHashMap(), + future_incompatible: FxHashMap(), + lint_groups: FxHashMap(), lint_cap: None, } } @@ -183,7 +219,7 @@ impl LintStore { // We load builtin lints first, so a duplicate is a compiler bug. // Use early_error when handling -W help with no crate. (None, _) => early_error(config::ErrorOutputType::default(), &msg[..]), - (Some(sess), false) => sess.bug(&msg[..]), + (Some(_), false) => bug!("{}", msg), // A duplicate name from a plugin is a user error. (Some(sess), true) => sess.err(&msg[..]), @@ -221,7 +257,7 @@ impl LintStore { // We load builtin lints first, so a duplicate is a compiler bug. // Use early_error when handling -W help with no crate. (None, _) => early_error(config::ErrorOutputType::default(), &msg[..]), - (Some(sess), false) => sess.bug(&msg[..]), + (Some(_), false) => bug!("{}", msg), // A duplicate name from a plugin is a user error. (Some(sess), true) => sess.err(&msg[..]), @@ -232,7 +268,7 @@ impl LintStore { pub fn register_renamed(&mut self, old_name: &str, new_name: &str) { let target = match self.by_name.get(new_name) { Some(&Id(lint_id)) => lint_id.clone(), - _ => panic!("invalid lint renaming of {} to {}", old_name, new_name) + _ => bug!("invalid lint renaming of {} to {}", old_name, new_name) }; self.by_name.insert(old_name.to_string(), Renamed(new_name.to_string(), target)); } @@ -267,8 +303,8 @@ impl LintStore { Err(FindLintError::Removed) => { } Err(_) => { match self.lint_groups.iter().map(|(&x, pair)| (x, pair.0.clone())) - .collect::>>() + .collect::>>() .get(&lint_name[..]) { Some(v) => { v.iter() @@ -298,7 +334,7 @@ impl LintStore { /// Context for lint checking after type checking. pub struct LateContext<'a, 'tcx: 'a> { /// Type context we're checking in. - pub tcx: &'a ty::ctxt<'tcx>, + pub tcx: TyCtxt<'a, 'tcx, 'tcx>, /// The crate being checked. pub krate: &'a hir::Crate, @@ -313,10 +349,6 @@ pub struct LateContext<'a, 'tcx: 'a> { /// levels, this stack keeps track of the previous lint levels of whatever /// was modified. level_stack: Vec<(LintId, LevelSource)>, - - /// Level of lints for certain NodeIds, stored here because the body of - /// the lint needs to run in trans. - node_levels: RefCell>, } /// Context for lint checking of the AST, after expansion, before lowering to @@ -351,9 +383,8 @@ macro_rules! run_lints { ($cx:expr, $f:ident, $ps:ident, $($args:expr),*) => ({ /// Parse the lint attributes into a vector, with `Err`s for malformed lint /// attributes. Writing this as an iterator is an enormous mess. // See also the hir version just below. -pub fn gather_attrs(attrs: &[ast::Attribute]) - -> Vec> { - let mut out = vec!(); +pub fn gather_attrs(attrs: &[ast::Attribute]) -> Vec> { + let mut out = vec![]; for attr in attrs { let r = gather_attr(attr); out.extend(r.into_iter()); @@ -361,31 +392,26 @@ pub fn gather_attrs(attrs: &[ast::Attribute]) out } -pub fn gather_attr(attr: &ast::Attribute) - -> Vec> { - let mut out = vec!(); +pub fn gather_attr(attr: &ast::Attribute) -> Vec> { + let mut out = vec![]; - let level = match Level::from_str(&attr.name()) { + let level = match Level::from_str(&attr.name().as_str()) { None => return out, Some(lvl) => lvl, }; attr::mark_used(attr); - let meta = &attr.node.value; - let metas = match meta.node { - ast::MetaList(_, ref metas) => metas, - _ => { - out.push(Err(meta.span)); - return out; - } + let meta = &attr.value; + let metas = if let Some(metas) = meta.meta_item_list() { + metas + } else { + out.push(Err(meta.span)); + return out; }; - for meta in metas { - out.push(match meta.node { - ast::MetaWord(ref lint_name) => Ok((lint_name.clone(), level, meta.span)), - _ => Err(meta.span), - }); + for li in metas { + out.push(li.word().map_or(Err(li.span), |word| Ok((word.name(), level, word.span)))); } out @@ -398,22 +424,24 @@ pub fn gather_attr(attr: &ast::Attribute) /// in trans that run after the main lint pass is finished. Most /// lints elsewhere in the compiler should call /// `Session::add_lint()` instead. -pub fn raw_emit_lint(sess: &Session, - lints: &LintStore, - lint: &'static Lint, - lvlsrc: LevelSource, - span: Option, - msg: &str) { +pub fn raw_emit_lint>(sess: &Session, + lints: &LintStore, + lint: &'static Lint, + lvlsrc: LevelSource, + span: Option, + msg: &str) { raw_struct_lint(sess, lints, lint, lvlsrc, span, msg).emit(); } -pub fn raw_struct_lint<'a>(sess: &'a Session, - lints: &LintStore, - lint: &'static Lint, - lvlsrc: LevelSource, - span: Option, - msg: &str) - -> DiagnosticBuilder<'a> { +pub fn raw_struct_lint<'a, S>(sess: &'a Session, + lints: &LintStore, + lint: &'static Lint, + lvlsrc: LevelSource, + span: Option, + msg: &str) + -> DiagnosticBuilder<'a> + where S: Into +{ let (mut level, source) = lvlsrc; if level == Allow { return sess.diagnostic().struct_dummy(); @@ -430,7 +458,7 @@ pub fn raw_struct_lint<'a>(sess: &'a Session, format!("{} [-{} {}]", msg, match level { Warn => 'W', Deny => 'D', Forbid => 'F', - Allow => panic!() + Allow => bug!() }, name.replace("_", "-")) }, Node(src) => { @@ -447,7 +475,7 @@ pub fn raw_struct_lint<'a>(sess: &'a Session, (Warn, None) => sess.struct_warn(&msg[..]), (Deny, Some(sp)) => sess.struct_span_err(sp, &msg[..]), (Deny, None) => sess.struct_err(&msg[..]), - _ => sess.bug("impossible level in raw_emit_lint"), + _ => bug!("impossible level in raw_emit_lint"), }; // Check for future incompatibility lints and issue a stronger warning. @@ -457,17 +485,12 @@ pub fn raw_struct_lint<'a>(sess: &'a Session, it will become a hard error in a future release!"); let citation = format!("for more information, see {}", future_incompatible.reference); - if let Some(sp) = span { - err.fileline_warn(sp, &explanation); - err.fileline_note(sp, &citation); - } else { - err.warn(&explanation); - err.note(&citation); - } + err.warn(&explanation); + err.note(&citation); } if let Some(span) = def { - err.span_note(span, "lint level defined here"); + sess.diag_span_note_once(&mut err, lint, span, "lint level defined here"); } err @@ -489,15 +512,23 @@ pub trait LintContext: Sized { fn level_src(&self, lint: &'static Lint) -> Option { self.lints().levels.get(&LintId::of(lint)).map(|ls| match ls { - &(Warn, src) => { + &(Warn, _) => { let lint_id = LintId::of(builtin::WARNINGS); - (self.lints().get_level_source(lint_id).0, src) + let warn_src = self.lints().get_level_source(lint_id); + if warn_src.0 != Warn { + warn_src + } else { + *ls + } } _ => *ls }) } - fn lookup_and_emit(&self, lint: &'static Lint, span: Option, msg: &str) { + fn lookup_and_emit>(&self, + lint: &'static Lint, + span: Option, + msg: &str) { let (level, src) = match self.level_src(lint) { None => return, Some(pair) => pair, @@ -506,11 +537,11 @@ pub trait LintContext: Sized { raw_emit_lint(&self.sess(), self.lints(), lint, (level, src), span, msg); } - fn lookup(&self, - lint: &'static Lint, - span: Option, - msg: &str) - -> DiagnosticBuilder { + fn lookup>(&self, + lint: &'static Lint, + span: Option, + msg: &str) + -> DiagnosticBuilder { let (level, src) = match self.level_src(lint) { None => return self.sess().diagnostic().struct_dummy(), Some(pair) => pair, @@ -520,15 +551,24 @@ pub trait LintContext: Sized { } /// Emit a lint at the appropriate level, for a particular span. - fn span_lint(&self, lint: &'static Lint, span: Span, msg: &str) { + fn span_lint>(&self, lint: &'static Lint, span: S, msg: &str) { self.lookup_and_emit(lint, Some(span), msg); } - fn struct_span_lint(&self, - lint: &'static Lint, - span: Span, - msg: &str) - -> DiagnosticBuilder { + fn early_lint(&self, early_lint: EarlyLint) { + let span = early_lint.diagnostic.span.primary_span().expect("early lint w/o primary span"); + let mut err = self.struct_span_lint(early_lint.id.lint, + span, + &early_lint.diagnostic.message); + err.copy_details_not_message(&early_lint.diagnostic); + err.emit(); + } + + fn struct_span_lint>(&self, + lint: &'static Lint, + span: S, + msg: &str) + -> DiagnosticBuilder { self.lookup(lint, Some(span), msg) } @@ -538,7 +578,7 @@ pub trait LintContext: Sized { let mut err = self.lookup(lint, Some(span), msg); if self.current_level(lint) != Level::Allow { if note_span == span { - err.fileline_note(note_span, note); + err.note(note); } else { err.span_note(note_span, note); } @@ -559,7 +599,7 @@ pub trait LintContext: Sized { /// Emit a lint at the appropriate level, with no associated span. fn lint(&self, lint: &'static Lint, msg: &str) { - self.lookup_and_emit(lint, None, msg); + self.lookup_and_emit(lint, None as Option, msg); } /// Merge the lints specified by any lint attributes into the @@ -584,10 +624,10 @@ pub trait LintContext: Sized { continue; } Ok((lint_name, level, span)) => { - match self.lints().find_lint(&lint_name, &self.sess(), Some(span)) { + match self.lints().find_lint(&lint_name.as_str(), &self.sess(), Some(span)) { Ok(lint_id) => vec![(lint_id, level, span)], Err(FindLintError::NotFound) => { - match self.lints().lint_groups.get(&lint_name[..]) { + match self.lints().lint_groups.get(&*lint_name.as_str()) { Some(&(ref v, _)) => v.iter() .map(|lint_id: &LintId| (*lint_id, level, span)) @@ -606,13 +646,24 @@ pub trait LintContext: Sized { }; for (lint_id, level, span) in v { - let now = self.lints().get_level_source(lint_id).0; + let (now, now_source) = self.lints().get_level_source(lint_id); if now == Forbid && level != Forbid { - let lint_name = lint_id.as_str(); - span_err!(self.sess(), span, E0453, - "{}({}) overruled by outer forbid({})", - level.as_str(), lint_name, - lint_name); + let lint_name = lint_id.to_string(); + let mut diag_builder = struct_span_err!(self.sess(), span, E0453, + "{}({}) overruled by outer forbid({})", + level.as_str(), lint_name, + lint_name); + diag_builder.span_label(span, &format!("overruled by previous forbid")); + match now_source { + LintSource::Default => &mut diag_builder, + LintSource::Node(forbid_source_span) => { + diag_builder.span_label(forbid_source_span, + &format!("`forbid` level set here")) + }, + LintSource::CommandLine => { + diag_builder.note("`forbid` lint level was set on command line") + } + }.emit() } else if now != level { let src = self.lints().get_level_source(lint_id).1; self.level_stack().push((lint_id, (now, src))); @@ -649,20 +700,10 @@ impl<'a> EarlyContext<'a> { level_stack: vec![], } } - - fn visit_ids(&mut self, f: F) - where F: FnOnce(&mut ast_util::IdVisitor) - { - let mut v = ast_util::IdVisitor { - operation: self, - visited_outermost: false, - }; - f(&mut v); - } } impl<'a, 'tcx> LateContext<'a, 'tcx> { - fn new(tcx: &'a ty::ctxt<'tcx>, + fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, krate: &'a hir::Crate, access_levels: &'a AccessLevels) -> LateContext<'a, 'tcx> { // We want to own the lint store, so move it out of the session. @@ -675,14 +716,15 @@ impl<'a, 'tcx> LateContext<'a, 'tcx> { access_levels: access_levels, lints: lint_store, level_stack: vec![], - node_levels: RefCell::new(FnvHashMap()), } } - fn visit_ids(&mut self, f: F) - where F: FnOnce(&mut util::IdVisitor) + fn visit_ids<'b, F: 'b>(&'b mut self, f: F) + where F: FnOnce(&mut IdVisitor<'b, 'a, 'tcx>) { - let mut v = util::IdVisitor::new(self); + let mut v = IdVisitor::<'b, 'a, 'tcx> { + cx: self + }; f(&mut v); } } @@ -735,7 +777,7 @@ impl<'a> LintContext for EarlyContext<'a> { } fn enter_attrs(&mut self, attrs: &[ast::Attribute]) { - debug!("early context: exit_attrs({:?})", attrs); + debug!("early context: enter_attrs({:?})", attrs); run_lints!(self, enter_lint_attrs, early_passes, attrs); } @@ -745,42 +787,44 @@ impl<'a> LintContext for EarlyContext<'a> { } } -impl<'a, 'tcx, 'v> hir_visit::Visitor<'v> for LateContext<'a, 'tcx> { +impl<'a, 'tcx> hir_visit::Visitor<'tcx> for LateContext<'a, 'tcx> { /// Because lints are scoped lexically, we want to walk nested /// items in the context of the outer item, so enable /// deep-walking. - fn visit_nested_item(&mut self, item: hir::ItemId) { - self.visit_item(self.tcx.map.expect_item(item.id)) + fn nested_visit_map<'this>(&'this mut self) -> hir_visit::NestedVisitorMap<'this, 'tcx> { + hir_visit::NestedVisitorMap::All(&self.tcx.map) } - fn visit_item(&mut self, it: &hir::Item) { + fn visit_item(&mut self, it: &'tcx hir::Item) { self.with_lint_attrs(&it.attrs, |cx| { run_lints!(cx, check_item, late_passes, it); cx.visit_ids(|v| v.visit_item(it)); hir_visit::walk_item(cx, it); + run_lints!(cx, check_item_post, late_passes, it); }) } - fn visit_foreign_item(&mut self, it: &hir::ForeignItem) { + fn visit_foreign_item(&mut self, it: &'tcx hir::ForeignItem) { self.with_lint_attrs(&it.attrs, |cx| { run_lints!(cx, check_foreign_item, late_passes, it); hir_visit::walk_foreign_item(cx, it); + run_lints!(cx, check_foreign_item_post, late_passes, it); }) } - fn visit_pat(&mut self, p: &hir::Pat) { + fn visit_pat(&mut self, p: &'tcx hir::Pat) { run_lints!(self, check_pat, late_passes, p); hir_visit::walk_pat(self, p); } - fn visit_expr(&mut self, e: &hir::Expr) { - self.with_lint_attrs(e.attrs.as_attr_slice(), |cx| { + fn visit_expr(&mut self, e: &'tcx hir::Expr) { + self.with_lint_attrs(&e.attrs, |cx| { run_lints!(cx, check_expr, late_passes, e); hir_visit::walk_expr(cx, e); }) } - fn visit_stmt(&mut self, s: &hir::Stmt) { + fn visit_stmt(&mut self, s: &'tcx hir::Stmt) { // statement attributes are actually just attributes on one of // - item // - local @@ -790,16 +834,18 @@ impl<'a, 'tcx, 'v> hir_visit::Visitor<'v> for LateContext<'a, 'tcx> { hir_visit::walk_stmt(self, s); } - fn visit_fn(&mut self, fk: hir_visit::FnKind<'v>, decl: &'v hir::FnDecl, - body: &'v hir::Block, span: Span, id: ast::NodeId) { + fn visit_fn(&mut self, fk: hir_visit::FnKind<'tcx>, decl: &'tcx hir::FnDecl, + body_id: hir::ExprId, span: Span, id: ast::NodeId) { + let body = self.tcx.map.expr(body_id); run_lints!(self, check_fn, late_passes, fk, decl, body, span, id); - hir_visit::walk_fn(self, fk, decl, body, span); + hir_visit::walk_fn(self, fk, decl, body_id, span, id); + run_lints!(self, check_fn_post, late_passes, fk, decl, body, span, id); } fn visit_variant_data(&mut self, - s: &hir::VariantData, + s: &'tcx hir::VariantData, name: ast::Name, - g: &hir::Generics, + g: &'tcx hir::Generics, item_id: ast::NodeId, _: Span) { run_lints!(self, check_struct_def, late_passes, s, name, g, item_id); @@ -807,14 +853,17 @@ impl<'a, 'tcx, 'v> hir_visit::Visitor<'v> for LateContext<'a, 'tcx> { run_lints!(self, check_struct_def_post, late_passes, s, name, g, item_id); } - fn visit_struct_field(&mut self, s: &hir::StructField) { - self.with_lint_attrs(&s.node.attrs, |cx| { + fn visit_struct_field(&mut self, s: &'tcx hir::StructField) { + self.with_lint_attrs(&s.attrs, |cx| { run_lints!(cx, check_struct_field, late_passes, s); hir_visit::walk_struct_field(cx, s); }) } - fn visit_variant(&mut self, v: &hir::Variant, g: &hir::Generics, item_id: ast::NodeId) { + fn visit_variant(&mut self, + v: &'tcx hir::Variant, + g: &'tcx hir::Generics, + item_id: ast::NodeId) { self.with_lint_attrs(&v.node.attrs, |cx| { run_lints!(cx, check_variant, late_passes, v, g); hir_visit::walk_variant(cx, v, g, item_id); @@ -822,7 +871,7 @@ impl<'a, 'tcx, 'v> hir_visit::Visitor<'v> for LateContext<'a, 'tcx> { }) } - fn visit_ty(&mut self, t: &hir::Ty) { + fn visit_ty(&mut self, t: &'tcx hir::Ty) { run_lints!(self, check_ty, late_passes, t); hir_visit::walk_ty(self, t); } @@ -831,93 +880,87 @@ impl<'a, 'tcx, 'v> hir_visit::Visitor<'v> for LateContext<'a, 'tcx> { run_lints!(self, check_name, late_passes, sp, name); } - fn visit_mod(&mut self, m: &hir::Mod, s: Span, n: ast::NodeId) { + fn visit_mod(&mut self, m: &'tcx hir::Mod, s: Span, n: ast::NodeId) { run_lints!(self, check_mod, late_passes, m, s, n); - hir_visit::walk_mod(self, m); + hir_visit::walk_mod(self, m, n); + run_lints!(self, check_mod_post, late_passes, m, s, n); } - fn visit_local(&mut self, l: &hir::Local) { - self.with_lint_attrs(l.attrs.as_attr_slice(), |cx| { + fn visit_local(&mut self, l: &'tcx hir::Local) { + self.with_lint_attrs(&l.attrs, |cx| { run_lints!(cx, check_local, late_passes, l); hir_visit::walk_local(cx, l); }) } - fn visit_block(&mut self, b: &hir::Block) { + fn visit_block(&mut self, b: &'tcx hir::Block) { run_lints!(self, check_block, late_passes, b); hir_visit::walk_block(self, b); + run_lints!(self, check_block_post, late_passes, b); } - fn visit_arm(&mut self, a: &hir::Arm) { + fn visit_arm(&mut self, a: &'tcx hir::Arm) { run_lints!(self, check_arm, late_passes, a); hir_visit::walk_arm(self, a); } - fn visit_decl(&mut self, d: &hir::Decl) { + fn visit_decl(&mut self, d: &'tcx hir::Decl) { run_lints!(self, check_decl, late_passes, d); hir_visit::walk_decl(self, d); } - fn visit_expr_post(&mut self, e: &hir::Expr) { + fn visit_expr_post(&mut self, e: &'tcx hir::Expr) { run_lints!(self, check_expr_post, late_passes, e); } - fn visit_generics(&mut self, g: &hir::Generics) { + fn visit_generics(&mut self, g: &'tcx hir::Generics) { run_lints!(self, check_generics, late_passes, g); hir_visit::walk_generics(self, g); } - fn visit_trait_item(&mut self, trait_item: &hir::TraitItem) { + fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) { self.with_lint_attrs(&trait_item.attrs, |cx| { run_lints!(cx, check_trait_item, late_passes, trait_item); - cx.visit_ids(|v| v.visit_trait_item(trait_item)); + cx.visit_ids(|v| hir_visit::walk_trait_item(v, trait_item)); hir_visit::walk_trait_item(cx, trait_item); + run_lints!(cx, check_trait_item_post, late_passes, trait_item); }); } - fn visit_impl_item(&mut self, impl_item: &hir::ImplItem) { + fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) { self.with_lint_attrs(&impl_item.attrs, |cx| { run_lints!(cx, check_impl_item, late_passes, impl_item); - cx.visit_ids(|v| v.visit_impl_item(impl_item)); + cx.visit_ids(|v| hir_visit::walk_impl_item(v, impl_item)); hir_visit::walk_impl_item(cx, impl_item); + run_lints!(cx, check_impl_item_post, late_passes, impl_item); }); } - fn visit_lifetime(&mut self, lt: &hir::Lifetime) { + fn visit_lifetime(&mut self, lt: &'tcx hir::Lifetime) { run_lints!(self, check_lifetime, late_passes, lt); } - fn visit_lifetime_def(&mut self, lt: &hir::LifetimeDef) { + fn visit_lifetime_def(&mut self, lt: &'tcx hir::LifetimeDef) { run_lints!(self, check_lifetime_def, late_passes, lt); } - fn visit_explicit_self(&mut self, es: &hir::ExplicitSelf) { - run_lints!(self, check_explicit_self, late_passes, es); - hir_visit::walk_explicit_self(self, es); - } - - fn visit_path(&mut self, p: &hir::Path, id: ast::NodeId) { + fn visit_path(&mut self, p: &'tcx hir::Path, id: ast::NodeId) { run_lints!(self, check_path, late_passes, p, id); hir_visit::walk_path(self, p); } - fn visit_path_list_item(&mut self, prefix: &hir::Path, item: &hir::PathListItem) { - run_lints!(self, check_path_list_item, late_passes, item); - hir_visit::walk_path_list_item(self, prefix, item); - } - fn visit_attribute(&mut self, attr: &ast::Attribute) { check_lint_name_attribute(self, attr); run_lints!(self, check_attribute, late_passes, attr); } } -impl<'a, 'v> ast_visit::Visitor<'v> for EarlyContext<'a> { +impl<'a> ast_visit::Visitor for EarlyContext<'a> { fn visit_item(&mut self, it: &ast::Item) { self.with_lint_attrs(&it.attrs, |cx| { run_lints!(cx, check_item, early_passes, it); - cx.visit_ids(|v| v.visit_item(it)); ast_visit::walk_item(cx, it); + run_lints!(cx, check_item_post, early_passes, it); }) } @@ -925,6 +968,7 @@ impl<'a, 'v> ast_visit::Visitor<'v> for EarlyContext<'a> { self.with_lint_attrs(&it.attrs, |cx| { run_lints!(cx, check_foreign_item, early_passes, it); ast_visit::walk_foreign_item(cx, it); + run_lints!(cx, check_foreign_item_post, early_passes, it); }) } @@ -934,8 +978,10 @@ impl<'a, 'v> ast_visit::Visitor<'v> for EarlyContext<'a> { } fn visit_expr(&mut self, e: &ast::Expr) { - run_lints!(self, check_expr, early_passes, e); - ast_visit::walk_expr(self, e); + self.with_lint_attrs(&e.attrs, |cx| { + run_lints!(cx, check_expr, early_passes, e); + ast_visit::walk_expr(cx, e); + }) } fn visit_stmt(&mut self, s: &ast::Stmt) { @@ -943,10 +989,11 @@ impl<'a, 'v> ast_visit::Visitor<'v> for EarlyContext<'a> { ast_visit::walk_stmt(self, s); } - fn visit_fn(&mut self, fk: ast_visit::FnKind<'v>, decl: &'v ast::FnDecl, - body: &'v ast::Block, span: Span, id: ast::NodeId) { - run_lints!(self, check_fn, early_passes, fk, decl, body, span, id); - ast_visit::walk_fn(self, fk, decl, body, span); + fn visit_fn(&mut self, fk: ast_visit::FnKind, decl: &ast::FnDecl, + span: Span, id: ast::NodeId) { + run_lints!(self, check_fn, early_passes, fk, decl, span, id); + ast_visit::walk_fn(self, fk, decl, span); + run_lints!(self, check_fn_post, early_passes, fk, decl, span, id); } fn visit_variant_data(&mut self, @@ -961,7 +1008,7 @@ impl<'a, 'v> ast_visit::Visitor<'v> for EarlyContext<'a> { } fn visit_struct_field(&mut self, s: &ast::StructField) { - self.with_lint_attrs(&s.node.attrs, |cx| { + self.with_lint_attrs(&s.attrs, |cx| { run_lints!(cx, check_struct_field, early_passes, s); ast_visit::walk_struct_field(cx, s); }) @@ -987,16 +1034,20 @@ impl<'a, 'v> ast_visit::Visitor<'v> for EarlyContext<'a> { fn visit_mod(&mut self, m: &ast::Mod, s: Span, n: ast::NodeId) { run_lints!(self, check_mod, early_passes, m, s, n); ast_visit::walk_mod(self, m); + run_lints!(self, check_mod_post, early_passes, m, s, n); } fn visit_local(&mut self, l: &ast::Local) { - run_lints!(self, check_local, early_passes, l); - ast_visit::walk_local(self, l); + self.with_lint_attrs(&l.attrs, |cx| { + run_lints!(cx, check_local, early_passes, l); + ast_visit::walk_local(cx, l); + }) } fn visit_block(&mut self, b: &ast::Block) { run_lints!(self, check_block, early_passes, b); ast_visit::walk_block(self, b); + run_lints!(self, check_block_post, early_passes, b); } fn visit_arm(&mut self, a: &ast::Arm) { @@ -1004,11 +1055,6 @@ impl<'a, 'v> ast_visit::Visitor<'v> for EarlyContext<'a> { ast_visit::walk_arm(self, a); } - fn visit_decl(&mut self, d: &ast::Decl) { - run_lints!(self, check_decl, early_passes, d); - ast_visit::walk_decl(self, d); - } - fn visit_expr_post(&mut self, e: &ast::Expr) { run_lints!(self, check_expr_post, early_passes, e); } @@ -1021,16 +1067,16 @@ impl<'a, 'v> ast_visit::Visitor<'v> for EarlyContext<'a> { fn visit_trait_item(&mut self, trait_item: &ast::TraitItem) { self.with_lint_attrs(&trait_item.attrs, |cx| { run_lints!(cx, check_trait_item, early_passes, trait_item); - cx.visit_ids(|v| v.visit_trait_item(trait_item)); ast_visit::walk_trait_item(cx, trait_item); + run_lints!(cx, check_trait_item_post, early_passes, trait_item); }); } fn visit_impl_item(&mut self, impl_item: &ast::ImplItem) { self.with_lint_attrs(&impl_item.attrs, |cx| { run_lints!(cx, check_impl_item, early_passes, impl_item); - cx.visit_ids(|v| v.visit_impl_item(impl_item)); ast_visit::walk_impl_item(cx, impl_item); + run_lints!(cx, check_impl_item_post, early_passes, impl_item); }); } @@ -1042,11 +1088,6 @@ impl<'a, 'v> ast_visit::Visitor<'v> for EarlyContext<'a> { run_lints!(self, check_lifetime_def, early_passes, lt); } - fn visit_explicit_self(&mut self, es: &ast::ExplicitSelf) { - run_lints!(self, check_explicit_self, early_passes, es); - ast_visit::walk_explicit_self(self, es); - } - fn visit_path(&mut self, p: &ast::Path, id: ast::NodeId) { run_lints!(self, check_path, early_passes, p, id); ast_visit::walk_path(self, p); @@ -1062,72 +1103,42 @@ impl<'a, 'v> ast_visit::Visitor<'v> for EarlyContext<'a> { } } +struct IdVisitor<'a, 'b: 'a, 'tcx: 'a+'b> { + cx: &'a mut LateContext<'b, 'tcx> +} + // Output any lints that were previously added to the session. -impl<'a, 'tcx> IdVisitingOperation for LateContext<'a, 'tcx> { - fn visit_id(&mut self, id: ast::NodeId) { - match self.sess().lints.borrow_mut().remove(&id) { - None => {} - Some(lints) => { - debug!("LateContext::visit_id: id={:?} lints={:?}", id, lints); - for (lint_id, span, msg) in lints { - self.span_lint(lint_id.lint, span, &msg[..]) - } - } - } +impl<'a, 'b, 'tcx> hir_visit::Visitor<'tcx> for IdVisitor<'a, 'b, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> hir_visit::NestedVisitorMap<'this, 'tcx> { + hir_visit::NestedVisitorMap::OnlyBodies(&self.cx.tcx.map) } -} -impl<'a> IdVisitingOperation for EarlyContext<'a> { + fn visit_id(&mut self, id: ast::NodeId) { - match self.sess.lints.borrow_mut().remove(&id) { - None => {} - Some(lints) => { - for (lint_id, span, msg) in lints { - self.span_lint(lint_id.lint, span, &msg[..]) - } + if let Some(lints) = self.cx.sess().lints.borrow_mut().remove(&id) { + debug!("LateContext::visit_id: id={:?} lints={:?}", id, lints); + for early_lint in lints { + self.cx.early_lint(early_lint); } } } -} - -// This lint pass is defined here because it touches parts of the `LateContext` -// that we don't want to expose. It records the lint level at certain AST -// nodes, so that the variant size difference check in trans can call -// `raw_emit_lint`. - -pub struct GatherNodeLevels; -impl LintPass for GatherNodeLevels { - fn get_lints(&self) -> LintArray { - lint_array!() + fn visit_trait_item(&mut self, _ti: &'tcx hir::TraitItem) { + // Do not recurse into trait or impl items automatically. These are + // processed separately by calling hir_visit::walk_trait_item() } -} -impl LateLintPass for GatherNodeLevels { - fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { - match it.node { - hir::ItemEnum(..) => { - let lint_id = LintId::of(builtin::VARIANT_SIZE_DIFFERENCES); - let lvlsrc = cx.lints.get_level_source(lint_id); - match lvlsrc { - (lvl, _) if lvl != Allow => { - cx.node_levels.borrow_mut() - .insert((it.id, lint_id), lvlsrc); - }, - _ => { } - } - }, - _ => { } - } + fn visit_impl_item(&mut self, _ii: &'tcx hir::ImplItem) { + // See visit_trait_item() } } -enum CheckLintNameResult<'a> { +enum CheckLintNameResult { Ok, // Lint doesn't exist NoLint, - // The lint is either renamed or removed and a warning was - // generated in the DiagnosticBuilder - Mentioned(DiagnosticBuilder<'a>) + // The lint is either renamed or removed. This is the warning + // message. + Warning(String) } /// Checks the name of a lint for its existence, and whether it was @@ -1137,27 +1148,18 @@ enum CheckLintNameResult<'a> { /// it emits non-fatal warnings and there are *two* lint passes that /// inspect attributes, this is only run from the late pass to avoid /// printing duplicate warnings. -fn check_lint_name<'a>(sess: &'a Session, - lint_cx: &LintStore, - lint_name: &str, - span: Option) -> CheckLintNameResult<'a> { +fn check_lint_name(lint_cx: &LintStore, + lint_name: &str) -> CheckLintNameResult { match lint_cx.by_name.get(lint_name) { Some(&Renamed(ref new_name, _)) => { - let warning = format!("lint {} has been renamed to {}", - lint_name, new_name); - let db = match span { - Some(span) => sess.struct_span_warn(span, &warning[..]), - None => sess.struct_warn(&warning[..]), - }; - CheckLintNameResult::Mentioned(db) + CheckLintNameResult::Warning( + format!("lint {} has been renamed to {}", lint_name, new_name) + ) }, Some(&Removed(ref reason)) => { - let warning = format!("lint {} has been removed: {}", lint_name, reason); - let db = match span { - Some(span) => sess.struct_span_warn(span, &warning[..]), - None => sess.struct_warn(&warning[..]) - }; - CheckLintNameResult::Mentioned(db) + CheckLintNameResult::Warning( + format!("lint {} has been removed: {}", lint_name, reason) + ) }, None => { match lint_cx.lint_groups.get(lint_name) { @@ -1186,10 +1188,11 @@ fn check_lint_name_attribute(cx: &LateContext, attr: &ast::Attribute) { continue; } Ok((lint_name, _, span)) => { - match check_lint_name(&cx.tcx.sess, &cx.lints, &lint_name[..], Some(span)) { + match check_lint_name(&cx.lints, &lint_name.as_str()) { CheckLintNameResult::Ok => (), - CheckLintNameResult::Mentioned(mut db) => { - db.emit(); + CheckLintNameResult::Warning(ref msg) => { + cx.span_lint(builtin::RENAMED_AND_REMOVED_LINTS, + span, msg); } CheckLintNameResult::NoLint => { cx.span_lint(builtin::UNKNOWN_LINTS, span, @@ -1205,9 +1208,11 @@ fn check_lint_name_attribute(cx: &LateContext, attr: &ast::Attribute) { // Checks the validity of lint names derived from the command line fn check_lint_name_cmdline(sess: &Session, lint_cx: &LintStore, lint_name: &str, level: Level) { - let db = match check_lint_name(sess, lint_cx, lint_name, None) { + let db = match check_lint_name(lint_cx, lint_name) { CheckLintNameResult::Ok => None, - CheckLintNameResult::Mentioned(db) => Some(db), + CheckLintNameResult::Warning(ref msg) => { + Some(sess.struct_warn(msg)) + }, CheckLintNameResult::NoLint => { Some(sess.struct_err(&format!("unknown lint: `{}`", lint_name))) } @@ -1231,7 +1236,8 @@ fn check_lint_name_cmdline(sess: &Session, lint_cx: &LintStore, /// Perform lint checking on a crate. /// /// Consumes the `lint_store` field of the `Session`. -pub fn check_crate(tcx: &ty::ctxt, access_levels: &AccessLevels) { +pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + access_levels: &AccessLevels) { let _task = tcx.dep_graph.in_task(DepNode::LateLintCheck); let krate = tcx.map.krate(); @@ -1239,7 +1245,6 @@ pub fn check_crate(tcx: &ty::ctxt, access_levels: &AccessLevels) { // Visit the whole crate. cx.with_lint_attrs(&krate.attrs, |cx| { - cx.visit_id(ast::CRATE_NODE_ID); cx.visit_ids(|v| { hir_visit::walk_crate(v, krate); }); @@ -1249,19 +1254,22 @@ pub fn check_crate(tcx: &ty::ctxt, access_levels: &AccessLevels) { run_lints!(cx, check_crate, late_passes, krate); hir_visit::walk_crate(cx, krate); + + run_lints!(cx, check_crate_post, late_passes, krate); }); // If we missed any lints added to the session, then there's a bug somewhere // in the iteration code. for (id, v) in tcx.sess.lints.borrow().iter() { - for &(lint, span, ref msg) in v { - tcx.sess.span_bug(span, - &format!("unprocessed lint {} at {}: {}", - lint.as_str(), tcx.map.node_to_string(*id), *msg)) + for early_lint in v { + span_bug!(early_lint.diagnostic.span.clone(), + "unprocessed lint {:?} at {}", + early_lint, tcx.map.node_to_string(*id)); } } - *tcx.node_lint_levels.borrow_mut() = cx.node_levels.into_inner(); + // Put the lint store back in the session. + mem::replace(&mut *tcx.sess.lint_store.borrow_mut(), cx.lints); } pub fn check_ast_crate(sess: &Session, krate: &ast::Crate) { @@ -1269,17 +1277,20 @@ pub fn check_ast_crate(sess: &Session, krate: &ast::Crate) { // Visit the whole crate. cx.with_lint_attrs(&krate.attrs, |cx| { - cx.visit_id(ast::CRATE_NODE_ID); - cx.visit_ids(|v| { - v.visited_outermost = true; - ast_visit::walk_crate(v, krate); - }); + // Lints may be assigned to the whole crate. + if let Some(lints) = cx.sess.lints.borrow_mut().remove(&ast::CRATE_NODE_ID) { + for early_lint in lints { + cx.early_lint(early_lint); + } + } // since the root module isn't visited as an item (because it isn't an // item), warn for it here. run_lints!(cx, check_crate, early_passes, krate); ast_visit::walk_crate(cx, krate); + + run_lints!(cx, check_crate_post, early_passes, krate); }); // Put the lint store back in the session. @@ -1288,10 +1299,8 @@ pub fn check_ast_crate(sess: &Session, krate: &ast::Crate) { // If we missed any lints added to the session, then there's a bug somewhere // in the iteration code. for (_, v) in sess.lints.borrow().iter() { - for &(lint, span, ref msg) in v { - sess.span_bug(span, - &format!("unprocessed lint {}: {}", - lint.as_str(), *msg)) + for early_lint in v { + span_bug!(early_lint.diagnostic.span.clone(), "unprocessed lint {:?}", early_lint); } } } diff --git a/src/librustc/lint/mod.rs b/src/librustc/lint/mod.rs index 6061525ef398c..4e06e0abf0148 100644 --- a/src/librustc/lint/mod.rs +++ b/src/librustc/lint/mod.rs @@ -33,15 +33,15 @@ pub use self::LintSource::*; use std::hash; use std::ascii::AsciiExt; -use syntax::codemap::Span; -use rustc_front::intravisit::FnKind; +use syntax_pos::Span; +use hir::intravisit::FnKind; use syntax::visit as ast_visit; use syntax::ast; -use rustc_front::hir; +use hir; pub use lint::context::{LateContext, EarlyContext, LintContext, LintStore, raw_emit_lint, check_crate, check_ast_crate, gather_attrs, - raw_struct_lint, GatherNodeLevels, FutureIncompatibleInfo}; + raw_struct_lint, FutureIncompatibleInfo, EarlyLint, IntoEarlyLint}; /// Specification of a single lint. #[derive(Copy, Clone, Debug)] @@ -132,11 +132,16 @@ pub trait LintPass { pub trait LateLintPass: LintPass { fn check_name(&mut self, _: &LateContext, _: Span, _: ast::Name) { } fn check_crate(&mut self, _: &LateContext, _: &hir::Crate) { } + fn check_crate_post(&mut self, _: &LateContext, _: &hir::Crate) { } fn check_mod(&mut self, _: &LateContext, _: &hir::Mod, _: Span, _: ast::NodeId) { } + fn check_mod_post(&mut self, _: &LateContext, _: &hir::Mod, _: Span, _: ast::NodeId) { } fn check_foreign_item(&mut self, _: &LateContext, _: &hir::ForeignItem) { } + fn check_foreign_item_post(&mut self, _: &LateContext, _: &hir::ForeignItem) { } fn check_item(&mut self, _: &LateContext, _: &hir::Item) { } + fn check_item_post(&mut self, _: &LateContext, _: &hir::Item) { } fn check_local(&mut self, _: &LateContext, _: &hir::Local) { } fn check_block(&mut self, _: &LateContext, _: &hir::Block) { } + fn check_block_post(&mut self, _: &LateContext, _: &hir::Block) { } fn check_stmt(&mut self, _: &LateContext, _: &hir::Stmt) { } fn check_arm(&mut self, _: &LateContext, _: &hir::Arm) { } fn check_pat(&mut self, _: &LateContext, _: &hir::Pat) { } @@ -146,9 +151,13 @@ pub trait LateLintPass: LintPass { fn check_ty(&mut self, _: &LateContext, _: &hir::Ty) { } fn check_generics(&mut self, _: &LateContext, _: &hir::Generics) { } fn check_fn(&mut self, _: &LateContext, - _: FnKind, _: &hir::FnDecl, _: &hir::Block, _: Span, _: ast::NodeId) { } + _: FnKind, _: &hir::FnDecl, _: &hir::Expr, _: Span, _: ast::NodeId) { } + fn check_fn_post(&mut self, _: &LateContext, + _: FnKind, _: &hir::FnDecl, _: &hir::Expr, _: Span, _: ast::NodeId) { } fn check_trait_item(&mut self, _: &LateContext, _: &hir::TraitItem) { } + fn check_trait_item_post(&mut self, _: &LateContext, _: &hir::TraitItem) { } fn check_impl_item(&mut self, _: &LateContext, _: &hir::ImplItem) { } + fn check_impl_item_post(&mut self, _: &LateContext, _: &hir::ImplItem) { } fn check_struct_def(&mut self, _: &LateContext, _: &hir::VariantData, _: ast::Name, _: &hir::Generics, _: ast::NodeId) { } fn check_struct_def_post(&mut self, _: &LateContext, @@ -158,9 +167,7 @@ pub trait LateLintPass: LintPass { fn check_variant_post(&mut self, _: &LateContext, _: &hir::Variant, _: &hir::Generics) { } fn check_lifetime(&mut self, _: &LateContext, _: &hir::Lifetime) { } fn check_lifetime_def(&mut self, _: &LateContext, _: &hir::LifetimeDef) { } - fn check_explicit_self(&mut self, _: &LateContext, _: &hir::ExplicitSelf) { } fn check_path(&mut self, _: &LateContext, _: &hir::Path, _: ast::NodeId) { } - fn check_path_list_item(&mut self, _: &LateContext, _: &hir::PathListItem) { } fn check_attribute(&mut self, _: &LateContext, _: &ast::Attribute) { } /// Called when entering a syntax node that can have lint attributes such @@ -174,23 +181,31 @@ pub trait LateLintPass: LintPass { pub trait EarlyLintPass: LintPass { fn check_ident(&mut self, _: &EarlyContext, _: Span, _: ast::Ident) { } fn check_crate(&mut self, _: &EarlyContext, _: &ast::Crate) { } + fn check_crate_post(&mut self, _: &EarlyContext, _: &ast::Crate) { } fn check_mod(&mut self, _: &EarlyContext, _: &ast::Mod, _: Span, _: ast::NodeId) { } + fn check_mod_post(&mut self, _: &EarlyContext, _: &ast::Mod, _: Span, _: ast::NodeId) { } fn check_foreign_item(&mut self, _: &EarlyContext, _: &ast::ForeignItem) { } + fn check_foreign_item_post(&mut self, _: &EarlyContext, _: &ast::ForeignItem) { } fn check_item(&mut self, _: &EarlyContext, _: &ast::Item) { } + fn check_item_post(&mut self, _: &EarlyContext, _: &ast::Item) { } fn check_local(&mut self, _: &EarlyContext, _: &ast::Local) { } fn check_block(&mut self, _: &EarlyContext, _: &ast::Block) { } + fn check_block_post(&mut self, _: &EarlyContext, _: &ast::Block) { } fn check_stmt(&mut self, _: &EarlyContext, _: &ast::Stmt) { } fn check_arm(&mut self, _: &EarlyContext, _: &ast::Arm) { } fn check_pat(&mut self, _: &EarlyContext, _: &ast::Pat) { } - fn check_decl(&mut self, _: &EarlyContext, _: &ast::Decl) { } fn check_expr(&mut self, _: &EarlyContext, _: &ast::Expr) { } fn check_expr_post(&mut self, _: &EarlyContext, _: &ast::Expr) { } fn check_ty(&mut self, _: &EarlyContext, _: &ast::Ty) { } fn check_generics(&mut self, _: &EarlyContext, _: &ast::Generics) { } fn check_fn(&mut self, _: &EarlyContext, - _: ast_visit::FnKind, _: &ast::FnDecl, _: &ast::Block, _: Span, _: ast::NodeId) { } + _: ast_visit::FnKind, _: &ast::FnDecl, _: Span, _: ast::NodeId) { } + fn check_fn_post(&mut self, _: &EarlyContext, + _: ast_visit::FnKind, _: &ast::FnDecl, _: Span, _: ast::NodeId) { } fn check_trait_item(&mut self, _: &EarlyContext, _: &ast::TraitItem) { } + fn check_trait_item_post(&mut self, _: &EarlyContext, _: &ast::TraitItem) { } fn check_impl_item(&mut self, _: &EarlyContext, _: &ast::ImplItem) { } + fn check_impl_item_post(&mut self, _: &EarlyContext, _: &ast::ImplItem) { } fn check_struct_def(&mut self, _: &EarlyContext, _: &ast::VariantData, _: ast::Ident, _: &ast::Generics, _: ast::NodeId) { } fn check_struct_def_post(&mut self, _: &EarlyContext, @@ -200,7 +215,6 @@ pub trait EarlyLintPass: LintPass { fn check_variant_post(&mut self, _: &EarlyContext, _: &ast::Variant, _: &ast::Generics) { } fn check_lifetime(&mut self, _: &EarlyContext, _: &ast::Lifetime) { } fn check_lifetime_def(&mut self, _: &EarlyContext, _: &ast::LifetimeDef) { } - fn check_explicit_self(&mut self, _: &EarlyContext, _: &ast::ExplicitSelf) { } fn check_path(&mut self, _: &EarlyContext, _: &ast::Path, _: ast::NodeId) { } fn check_path_list_item(&mut self, _: &EarlyContext, _: &ast::PathListItem) { } fn check_attribute(&mut self, _: &EarlyContext, _: &ast::Attribute) { } @@ -248,13 +262,13 @@ impl LintId { } /// Get the name of the lint. - pub fn as_str(&self) -> String { + pub fn to_string(&self) -> String { self.lint.name_lower() } } /// Setting for how to handle a lint. -#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug)] +#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash)] pub enum Level { Allow, Warn, Deny, Forbid } @@ -299,5 +313,4 @@ pub enum LintSource { pub type LevelSource = (Level, LintSource); pub mod builtin; - mod context; diff --git a/src/librustc/macros.rs b/src/librustc/macros.rs index ed764ebd9f95d..76dca1bb5b649 100644 --- a/src/librustc/macros.rs +++ b/src/librustc/macros.rs @@ -44,3 +44,18 @@ macro_rules! enum_from_u32 { } } } + +#[macro_export] +macro_rules! bug { + () => ( bug!("impossible case reached") ); + ($($message:tt)*) => ({ + $crate::session::bug_fmt(file!(), line!(), format_args!($($message)*)) + }) +} + +#[macro_export] +macro_rules! span_bug { + ($span:expr, $($message:tt)*) => ({ + $crate::session::span_bug_fmt(file!(), line!(), $span, format_args!($($message)*)) + }) +} diff --git a/src/librustc/middle/astconv_util.rs b/src/librustc/middle/astconv_util.rs index 2bf749d93cec4..3418034b069d6 100644 --- a/src/librustc/middle/astconv_util.rs +++ b/src/librustc/middle/astconv_util.rs @@ -14,70 +14,70 @@ * Almost certainly this could (and should) be refactored out of existence. */ -use middle::def; -use middle::ty::{self, Ty}; +use hir; +use hir::def::Def; +use ty::{Ty, TyCtxt}; -use syntax::codemap::Span; -use rustc_front::hir as ast; +use syntax_pos::Span; -pub fn prohibit_type_params(tcx: &ty::ctxt, segments: &[ast::PathSegment]) { - for segment in segments { - for typ in segment.parameters.types() { - span_err!(tcx.sess, typ.span, E0109, - "type parameters are not allowed on this type"); - break; - } - for lifetime in segment.parameters.lifetimes() { - span_err!(tcx.sess, lifetime.span, E0110, - "lifetime parameters are not allowed on this type"); - break; - } - for binding in segment.parameters.bindings() { - prohibit_projection(tcx, binding.span); - break; +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + pub fn prohibit_type_params(self, segments: &[hir::PathSegment]) { + for segment in segments { + for typ in segment.parameters.types() { + struct_span_err!(self.sess, typ.span, E0109, + "type parameters are not allowed on this type") + .span_label(typ.span, &format!("type parameter not allowed")) + .emit(); + break; + } + for lifetime in segment.parameters.lifetimes() { + struct_span_err!(self.sess, lifetime.span, E0110, + "lifetime parameters are not allowed on this type") + .span_label(lifetime.span, + &format!("lifetime parameter not allowed on this type")) + .emit(); + break; + } + for binding in segment.parameters.bindings() { + self.prohibit_projection(binding.span); + break; + } } } -} -pub fn prohibit_projection(tcx: &ty::ctxt, span: Span) -{ - span_err!(tcx.sess, span, E0229, - "associated type bindings are not allowed here"); -} + pub fn prohibit_projection(self, span: Span) + { + let mut err = struct_span_err!(self.sess, span, E0229, + "associated type bindings are not allowed here"); + err.span_label(span, &format!("associate type not allowed here")).emit(); + } -pub fn prim_ty_to_ty<'tcx>(tcx: &ty::ctxt<'tcx>, - segments: &[ast::PathSegment], - nty: ast::PrimTy) - -> Ty<'tcx> { - prohibit_type_params(tcx, segments); - match nty { - ast::TyBool => tcx.types.bool, - ast::TyChar => tcx.types.char, - ast::TyInt(it) => tcx.mk_mach_int(it), - ast::TyUint(uit) => tcx.mk_mach_uint(uit), - ast::TyFloat(ft) => tcx.mk_mach_float(ft), - ast::TyStr => tcx.mk_str() + pub fn prim_ty_to_ty(self, + segments: &[hir::PathSegment], + nty: hir::PrimTy) + -> Ty<'tcx> { + self.prohibit_type_params(segments); + match nty { + hir::TyBool => self.types.bool, + hir::TyChar => self.types.char, + hir::TyInt(it) => self.mk_mach_int(it), + hir::TyUint(uit) => self.mk_mach_uint(uit), + hir::TyFloat(ft) => self.mk_mach_float(ft), + hir::TyStr => self.mk_str() + } } -} -/// If a type in the AST is a primitive type, return the ty::Ty corresponding -/// to it. -pub fn ast_ty_to_prim_ty<'tcx>(tcx: &ty::ctxt<'tcx>, ast_ty: &ast::Ty) - -> Option> { - if let ast::TyPath(None, ref path) = ast_ty.node { - let def = match tcx.def_map.borrow().get(&ast_ty.id) { - None => { - tcx.sess.span_bug(ast_ty.span, - &format!("unbound path {:?}", path)) + /// If a type in the AST is a primitive type, return the ty::Ty corresponding + /// to it. + pub fn ast_ty_to_prim_ty(self, ast_ty: &hir::Ty) -> Option> { + if let hir::TyPath(hir::QPath::Resolved(None, ref path)) = ast_ty.node { + if let Def::PrimTy(nty) = path.def { + Some(self.prim_ty_to_ty(&path.segments, nty)) + } else { + None } - Some(d) => d.full_def() - }; - if let def::DefPrimTy(nty) = def { - Some(prim_ty_to_ty(tcx, &path.segments, nty)) } else { None } - } else { - None } } diff --git a/src/librustc/middle/cfg/graphviz.rs b/src/librustc/middle/cfg/graphviz.rs deleted file mode 100644 index e807092507082..0000000000000 --- a/src/librustc/middle/cfg/graphviz.rs +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/// This module provides linkage between rustc::middle::graph and -/// libgraphviz traits. - -// For clarity, rename the graphviz crate locally to dot. -use graphviz as dot; -use graphviz::IntoCow; - -use syntax::ast; - -use front::map as ast_map; -use middle::cfg; - -pub type Node<'a> = (cfg::CFGIndex, &'a cfg::CFGNode); -pub type Edge<'a> = &'a cfg::CFGEdge; - -pub struct LabelledCFG<'a, 'ast: 'a> { - pub ast_map: &'a ast_map::Map<'ast>, - pub cfg: &'a cfg::CFG, - pub name: String, - /// `labelled_edges` controls whether we emit labels on the edges - pub labelled_edges: bool, -} - -fn replace_newline_with_backslash_l(s: String) -> String { - // Replacing newlines with \\l causes each line to be left-aligned, - // improving presentation of (long) pretty-printed expressions. - if s.contains("\n") { - let mut s = s.replace("\n", "\\l"); - // Apparently left-alignment applies to the line that precedes - // \l, not the line that follows; so, add \l at end of string - // if not already present, ensuring last line gets left-aligned - // as well. - let mut last_two: Vec<_> = - s.chars().rev().take(2).collect(); - last_two.reverse(); - if last_two != ['\\', 'l'] { - s.push_str("\\l"); - } - s - } else { - s - } -} - -impl<'a, 'ast> dot::Labeller<'a, Node<'a>, Edge<'a>> for LabelledCFG<'a, 'ast> { - fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new(&self.name[..]).unwrap() } - - fn node_id(&'a self, &(i,_): &Node<'a>) -> dot::Id<'a> { - dot::Id::new(format!("N{}", i.node_id())).unwrap() - } - - fn node_label(&'a self, &(i, n): &Node<'a>) -> dot::LabelText<'a> { - if i == self.cfg.entry { - dot::LabelText::LabelStr("entry".into_cow()) - } else if i == self.cfg.exit { - dot::LabelText::LabelStr("exit".into_cow()) - } else if n.data.id() == ast::DUMMY_NODE_ID { - dot::LabelText::LabelStr("(dummy_node)".into_cow()) - } else { - let s = self.ast_map.node_to_string(n.data.id()); - // left-aligns the lines - let s = replace_newline_with_backslash_l(s); - dot::LabelText::EscStr(s.into_cow()) - } - } - - fn edge_label(&self, e: &Edge<'a>) -> dot::LabelText<'a> { - let mut label = String::new(); - if !self.labelled_edges { - return dot::LabelText::EscStr(label.into_cow()); - } - let mut put_one = false; - for (i, &node_id) in e.data.exiting_scopes.iter().enumerate() { - if put_one { - label.push_str(",\\l"); - } else { - put_one = true; - } - let s = self.ast_map.node_to_string(node_id); - // left-aligns the lines - let s = replace_newline_with_backslash_l(s); - label.push_str(&format!("exiting scope_{} {}", - i, - &s[..])); - } - dot::LabelText::EscStr(label.into_cow()) - } -} - -impl<'a> dot::GraphWalk<'a, Node<'a>, Edge<'a>> for &'a cfg::CFG { - fn nodes(&'a self) -> dot::Nodes<'a, Node<'a>> { - let mut v = Vec::new(); - self.graph.each_node(|i, nd| { v.push((i, nd)); true }); - v.into_cow() - } - fn edges(&'a self) -> dot::Edges<'a, Edge<'a>> { - self.graph.all_edges().iter().collect() - } - fn source(&'a self, edge: &Edge<'a>) -> Node<'a> { - let i = edge.source(); - (i, self.graph.node(i)) - } - fn target(&'a self, edge: &Edge<'a>) -> Node<'a> { - let i = edge.target(); - (i, self.graph.node(i)) - } -} - -impl<'a, 'ast> dot::GraphWalk<'a, Node<'a>, Edge<'a>> for LabelledCFG<'a, 'ast> -{ - fn nodes(&'a self) -> dot::Nodes<'a, Node<'a>> { self.cfg.nodes() } - fn edges(&'a self) -> dot::Edges<'a, Edge<'a>> { self.cfg.edges() } - fn source(&'a self, edge: &Edge<'a>) -> Node<'a> { self.cfg.source(edge) } - fn target(&'a self, edge: &Edge<'a>) -> Node<'a> { self.cfg.target(edge) } -} diff --git a/src/librustc/middle/cfg/mod.rs b/src/librustc/middle/cfg/mod.rs deleted file mode 100644 index ac84d3dec94e2..0000000000000 --- a/src/librustc/middle/cfg/mod.rs +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Module that constructs a control-flow graph representing an item. -//! Uses `Graph` as the underlying representation. - -use rustc_data_structures::graph; -use middle::ty; -use syntax::ast; -use rustc_front::hir; - -mod construct; -pub mod graphviz; - -pub struct CFG { - pub graph: CFGGraph, - pub entry: CFGIndex, - pub exit: CFGIndex, -} - -#[derive(Copy, Clone, Debug, PartialEq)] -pub enum CFGNodeData { - AST(ast::NodeId), - Entry, - Exit, - Dummy, - Unreachable, -} - -impl CFGNodeData { - pub fn id(&self) -> ast::NodeId { - if let CFGNodeData::AST(id) = *self { - id - } else { - ast::DUMMY_NODE_ID - } - } -} - -#[derive(Debug)] -pub struct CFGEdgeData { - pub exiting_scopes: Vec -} - -pub type CFGIndex = graph::NodeIndex; - -pub type CFGGraph = graph::Graph; - -pub type CFGNode = graph::Node; - -pub type CFGEdge = graph::Edge; - -impl CFG { - pub fn new(tcx: &ty::ctxt, - blk: &hir::Block) -> CFG { - construct::construct(tcx, blk) - } - - pub fn node_is_reachable(&self, id: ast::NodeId) -> bool { - self.graph.depth_traverse(self.entry) - .any(|idx| self.graph.node_data(idx).id() == id) - } -} diff --git a/src/librustc/middle/check_const.rs b/src/librustc/middle/check_const.rs deleted file mode 100644 index 5822b3dc5e954..0000000000000 --- a/src/librustc/middle/check_const.rs +++ /dev/null @@ -1,949 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// Verifies that the types and values of const and static items -// are safe. The rules enforced by this module are: -// -// - For each *mutable* static item, it checks that its **type**: -// - doesn't have a destructor -// - doesn't own a box -// -// - For each *immutable* static item, it checks that its **value**: -// - doesn't own a box -// - doesn't contain a struct literal or a call to an enum variant / struct constructor where -// - the type of the struct/enum has a dtor -// -// Rules Enforced Elsewhere: -// - It's not possible to take the address of a static item with unsafe interior. This is enforced -// by borrowck::gather_loans - -use dep_graph::DepNode; -use middle::ty::cast::{CastKind}; -use middle::const_eval::{self, ConstEvalErr}; -use middle::const_eval::ErrKind::IndexOpFeatureGated; -use middle::const_eval::EvalHint::ExprTypeChecked; -use middle::def; -use middle::def_id::DefId; -use middle::expr_use_visitor as euv; -use middle::infer; -use middle::mem_categorization as mc; -use middle::mem_categorization::Categorization; -use middle::traits; -use middle::ty::{self, Ty}; -use util::nodemap::NodeMap; - -use rustc_front::hir; -use syntax::ast; -use syntax::codemap::Span; -use syntax::feature_gate::UnstableFeatures; -use rustc_front::intravisit::{self, FnKind, Visitor}; - -use std::collections::hash_map::Entry; -use std::cmp::Ordering; - -// Const qualification, from partial to completely promotable. -bitflags! { - #[derive(RustcEncodable, RustcDecodable)] - flags ConstQualif: u8 { - // Inner mutability (can not be placed behind a reference) or behind - // &mut in a non-global expression. Can be copied from static memory. - const MUTABLE_MEM = 1 << 0, - // Constant value with a type that implements Drop. Can be copied - // from static memory, similar to MUTABLE_MEM. - const NEEDS_DROP = 1 << 1, - // Even if the value can be placed in static memory, copying it from - // there is more expensive than in-place instantiation, and/or it may - // be too large. This applies to [T; N] and everything containing it. - // N.B.: references need to clear this flag to not end up on the stack. - const PREFER_IN_PLACE = 1 << 2, - // May use more than 0 bytes of memory, doesn't impact the constness - // directly, but is not allowed to be borrowed mutably in a constant. - const NON_ZERO_SIZED = 1 << 3, - // Actually borrowed, has to always be in static memory. Does not - // propagate, and requires the expression to behave like a 'static - // lvalue. The set of expressions with this flag is the minimum - // that have to be promoted. - const HAS_STATIC_BORROWS = 1 << 4, - // Invalid const for miscellaneous reasons (e.g. not implemented). - const NOT_CONST = 1 << 5, - - // Borrowing the expression won't produce &'static T if any of these - // bits are set, though the value could be copied from static memory - // if `NOT_CONST` isn't set. - const NON_STATIC_BORROWS = ConstQualif::MUTABLE_MEM.bits | - ConstQualif::NEEDS_DROP.bits | - ConstQualif::NOT_CONST.bits - } -} - -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -enum Mode { - Const, - ConstFn, - Static, - StaticMut, - - // An expression that occurs outside of any constant context - // (i.e. `const`, `static`, array lengths, etc.). The value - // can be variable at runtime, but will be promotable to - // static memory if we can prove it is actually constant. - Var, -} - -struct CheckCrateVisitor<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, - mode: Mode, - qualif: ConstQualif, - rvalue_borrows: NodeMap -} - -impl<'a, 'tcx> CheckCrateVisitor<'a, 'tcx> { - fn with_mode(&mut self, mode: Mode, f: F) -> R where - F: FnOnce(&mut CheckCrateVisitor<'a, 'tcx>) -> R, - { - let (old_mode, old_qualif) = (self.mode, self.qualif); - self.mode = mode; - self.qualif = ConstQualif::empty(); - let r = f(self); - self.mode = old_mode; - self.qualif = old_qualif; - r - } - - fn with_euv<'b, F, R>(&'b mut self, item_id: Option, f: F) -> R where - F: for<'t> FnOnce(&mut euv::ExprUseVisitor<'b, 't, 'b, 'tcx>) -> R, - { - let param_env = match item_id { - Some(item_id) => ty::ParameterEnvironment::for_item(self.tcx, item_id), - None => self.tcx.empty_parameter_environment() - }; - - let infcx = infer::new_infer_ctxt(self.tcx, &self.tcx.tables, Some(param_env)); - - f(&mut euv::ExprUseVisitor::new(self, &infcx)) - } - - fn global_expr(&mut self, mode: Mode, expr: &hir::Expr) -> ConstQualif { - assert!(mode != Mode::Var); - match self.tcx.const_qualif_map.borrow_mut().entry(expr.id) { - Entry::Occupied(entry) => return *entry.get(), - Entry::Vacant(entry) => { - // Prevent infinite recursion on re-entry. - entry.insert(ConstQualif::empty()); - } - } - self.with_mode(mode, |this| { - this.with_euv(None, |euv| euv.consume_expr(expr)); - this.visit_expr(expr); - this.qualif - }) - } - - fn fn_like(&mut self, - fk: FnKind, - fd: &hir::FnDecl, - b: &hir::Block, - s: Span, - fn_id: ast::NodeId) - -> ConstQualif { - match self.tcx.const_qualif_map.borrow_mut().entry(fn_id) { - Entry::Occupied(entry) => return *entry.get(), - Entry::Vacant(entry) => { - // Prevent infinite recursion on re-entry. - entry.insert(ConstQualif::empty()); - } - } - - let mode = match fk { - FnKind::ItemFn(_, _, _, hir::Constness::Const, _, _) => { - Mode::ConstFn - } - FnKind::Method(_, m, _) => { - if m.constness == hir::Constness::Const { - Mode::ConstFn - } else { - Mode::Var - } - } - _ => Mode::Var - }; - - let qualif = self.with_mode(mode, |this| { - this.with_euv(Some(fn_id), |euv| euv.walk_fn(fd, b)); - intravisit::walk_fn(this, fk, fd, b, s); - this.qualif - }); - - // Keep only bits that aren't affected by function body (NON_ZERO_SIZED), - // and bits that don't change semantics, just optimizations (PREFER_IN_PLACE). - let qualif = qualif & (ConstQualif::NON_ZERO_SIZED | ConstQualif::PREFER_IN_PLACE); - - self.tcx.const_qualif_map.borrow_mut().insert(fn_id, qualif); - qualif - } - - fn add_qualif(&mut self, qualif: ConstQualif) { - self.qualif = self.qualif | qualif; - } - - /// Returns true if the call is to a const fn or method. - fn handle_const_fn_call(&mut self, - expr: &hir::Expr, - def_id: DefId, - ret_ty: Ty<'tcx>) - -> bool { - if let Some(fn_like) = const_eval::lookup_const_fn_by_id(self.tcx, def_id) { - if - // we are in a static/const initializer - self.mode != Mode::Var && - - // feature-gate is not enabled - !self.tcx.sess.features.borrow().const_fn && - - // this doesn't come from a macro that has #[allow_internal_unstable] - !self.tcx.sess.codemap().span_allows_unstable(expr.span) - { - let mut err = self.tcx.sess.struct_span_err( - expr.span, - "const fns are an unstable feature"); - fileline_help!( - &mut err, - expr.span, - "in Nightly builds, add `#![feature(const_fn)]` to the crate \ - attributes to enable"); - err.emit(); - } - - let qualif = self.fn_like(fn_like.kind(), - fn_like.decl(), - fn_like.body(), - fn_like.span(), - fn_like.id()); - self.add_qualif(qualif); - - if ret_ty.type_contents(self.tcx).interior_unsafe() { - self.add_qualif(ConstQualif::MUTABLE_MEM); - } - - true - } else { - false - } - } - - fn record_borrow(&mut self, id: ast::NodeId, mutbl: hir::Mutability) { - match self.rvalue_borrows.entry(id) { - Entry::Occupied(mut entry) => { - // Merge the two borrows, taking the most demanding - // one, mutability-wise. - if mutbl == hir::MutMutable { - entry.insert(mutbl); - } - } - Entry::Vacant(entry) => { - entry.insert(mutbl); - } - } - } - - fn msg(&self) -> &'static str { - match self.mode { - Mode::Const => "constant", - Mode::ConstFn => "constant function", - Mode::StaticMut | Mode::Static => "static", - Mode::Var => unreachable!(), - } - } - - fn check_static_mut_type(&self, e: &hir::Expr) { - let node_ty = self.tcx.node_id_to_type(e.id); - let tcontents = node_ty.type_contents(self.tcx); - - let suffix = if tcontents.has_dtor() { - "destructors" - } else if tcontents.owns_owned() { - "boxes" - } else { - return - }; - - span_err!(self.tcx.sess, e.span, E0397, - "mutable statics are not allowed to have {}", suffix); - } - - fn check_static_type(&self, e: &hir::Expr) { - let ty = self.tcx.node_id_to_type(e.id); - let infcx = infer::new_infer_ctxt(self.tcx, &self.tcx.tables, None); - let cause = traits::ObligationCause::new(e.span, e.id, traits::SharedStatic); - let mut fulfill_cx = infcx.fulfillment_cx.borrow_mut(); - fulfill_cx.register_builtin_bound(&infcx, ty, ty::BoundSync, cause); - match fulfill_cx.select_all_or_error(&infcx) { - Ok(()) => { }, - Err(ref errors) => { - traits::report_fulfillment_errors(&infcx, errors); - } - } - } -} - -impl<'a, 'tcx, 'v> Visitor<'v> for CheckCrateVisitor<'a, 'tcx> { - fn visit_item(&mut self, i: &hir::Item) { - debug!("visit_item(item={})", self.tcx.map.node_to_string(i.id)); - assert_eq!(self.mode, Mode::Var); - match i.node { - hir::ItemStatic(_, hir::MutImmutable, ref expr) => { - self.check_static_type(&**expr); - self.global_expr(Mode::Static, &**expr); - } - hir::ItemStatic(_, hir::MutMutable, ref expr) => { - self.check_static_mut_type(&**expr); - self.global_expr(Mode::StaticMut, &**expr); - } - hir::ItemConst(_, ref expr) => { - self.global_expr(Mode::Const, &**expr); - } - hir::ItemEnum(ref enum_definition, _) => { - for var in &enum_definition.variants { - if let Some(ref ex) = var.node.disr_expr { - self.global_expr(Mode::Const, &**ex); - } - } - } - _ => { - intravisit::walk_item(self, i); - } - } - } - - fn visit_trait_item(&mut self, t: &'v hir::TraitItem) { - match t.node { - hir::ConstTraitItem(_, ref default) => { - if let Some(ref expr) = *default { - self.global_expr(Mode::Const, &*expr); - } else { - intravisit::walk_trait_item(self, t); - } - } - _ => self.with_mode(Mode::Var, |v| intravisit::walk_trait_item(v, t)), - } - } - - fn visit_impl_item(&mut self, i: &'v hir::ImplItem) { - match i.node { - hir::ImplItemKind::Const(_, ref expr) => { - self.global_expr(Mode::Const, &*expr); - } - _ => self.with_mode(Mode::Var, |v| intravisit::walk_impl_item(v, i)), - } - } - - fn visit_fn(&mut self, - fk: FnKind<'v>, - fd: &'v hir::FnDecl, - b: &'v hir::Block, - s: Span, - fn_id: ast::NodeId) { - self.fn_like(fk, fd, b, s, fn_id); - } - - fn visit_pat(&mut self, p: &hir::Pat) { - match p.node { - hir::PatLit(ref lit) => { - self.global_expr(Mode::Const, &**lit); - } - hir::PatRange(ref start, ref end) => { - self.global_expr(Mode::Const, &**start); - self.global_expr(Mode::Const, &**end); - - match const_eval::compare_lit_exprs(self.tcx, start, end) { - Some(Ordering::Less) | - Some(Ordering::Equal) => {} - Some(Ordering::Greater) => { - span_err!(self.tcx.sess, start.span, E0030, - "lower range bound must be less than or equal to upper"); - } - None => { - self.tcx.sess.delay_span_bug(start.span, - "non-constant path in constant expr"); - } - } - } - _ => intravisit::walk_pat(self, p) - } - } - - fn visit_block(&mut self, block: &hir::Block) { - // Check all statements in the block - for stmt in &block.stmts { - match stmt.node { - hir::StmtDecl(ref decl, _) => { - match decl.node { - hir::DeclLocal(_) => {}, - // Item statements are allowed - hir::DeclItem(_) => continue - } - } - hir::StmtExpr(_, _) => {}, - hir::StmtSemi(_, _) => {}, - } - self.add_qualif(ConstQualif::NOT_CONST); - // anything else should have been caught by check_const_fn - assert_eq!(self.mode, Mode::Var); - } - intravisit::walk_block(self, block); - } - - fn visit_expr(&mut self, ex: &hir::Expr) { - let mut outer = self.qualif; - self.qualif = ConstQualif::empty(); - - let node_ty = self.tcx.node_id_to_type(ex.id); - check_expr(self, ex, node_ty); - check_adjustments(self, ex); - - // Special-case some expressions to avoid certain flags bubbling up. - match ex.node { - hir::ExprCall(ref callee, ref args) => { - for arg in args { - self.visit_expr(&**arg) - } - - let inner = self.qualif; - self.visit_expr(&**callee); - // The callee's size doesn't count in the call. - let added = self.qualif - inner; - self.qualif = inner | (added - ConstQualif::NON_ZERO_SIZED); - } - hir::ExprRepeat(ref element, _) => { - self.visit_expr(&**element); - // The count is checked elsewhere (typeck). - let count = match node_ty.sty { - ty::TyArray(_, n) => n, - _ => unreachable!() - }; - // [element; 0] is always zero-sized. - if count == 0 { - self.qualif.remove(ConstQualif::NON_ZERO_SIZED | ConstQualif::PREFER_IN_PLACE); - } - } - hir::ExprMatch(ref discr, ref arms, _) => { - // Compute the most demanding borrow from all the arms' - // patterns and set that on the discriminator. - let mut borrow = None; - for pat in arms.iter().flat_map(|arm| &arm.pats) { - let pat_borrow = self.rvalue_borrows.remove(&pat.id); - match (borrow, pat_borrow) { - (None, _) | (_, Some(hir::MutMutable)) => { - borrow = pat_borrow; - } - _ => {} - } - } - if let Some(mutbl) = borrow { - self.record_borrow(discr.id, mutbl); - } - intravisit::walk_expr(self, ex); - } - // Division by zero and overflow checking. - hir::ExprBinary(op, _, _) => { - intravisit::walk_expr(self, ex); - let div_or_rem = op.node == hir::BiDiv || op.node == hir::BiRem; - match node_ty.sty { - ty::TyUint(_) | ty::TyInt(_) if div_or_rem => { - if !self.qualif.intersects(ConstQualif::NOT_CONST) { - match const_eval::eval_const_expr_partial( - self.tcx, ex, ExprTypeChecked, None) { - Ok(_) => {} - Err(ConstEvalErr { kind: IndexOpFeatureGated, ..}) => {}, - Err(msg) => { - self.tcx.sess.add_lint(::lint::builtin::CONST_ERR, ex.id, - msg.span, - msg.description().into_owned()) - } - } - } - } - _ => {} - } - } - _ => intravisit::walk_expr(self, ex) - } - - // Handle borrows on (or inside the autorefs of) this expression. - match self.rvalue_borrows.remove(&ex.id) { - Some(hir::MutImmutable) => { - // Constants cannot be borrowed if they contain interior mutability as - // it means that our "silent insertion of statics" could change - // initializer values (very bad). - // If the type doesn't have interior mutability, then `ConstQualif::MUTABLE_MEM` has - // propagated from another error, so erroring again would be just noise. - let tc = node_ty.type_contents(self.tcx); - if self.qualif.intersects(ConstQualif::MUTABLE_MEM) && tc.interior_unsafe() { - outer = outer | ConstQualif::NOT_CONST; - if self.mode != Mode::Var { - span_err!(self.tcx.sess, ex.span, E0492, - "cannot borrow a constant which contains \ - interior mutability, create a static instead"); - } - } - // If the reference has to be 'static, avoid in-place initialization - // as that will end up pointing to the stack instead. - if !self.qualif.intersects(ConstQualif::NON_STATIC_BORROWS) { - self.qualif = self.qualif - ConstQualif::PREFER_IN_PLACE; - self.add_qualif(ConstQualif::HAS_STATIC_BORROWS); - } - } - Some(hir::MutMutable) => { - // `&mut expr` means expr could be mutated, unless it's zero-sized. - if self.qualif.intersects(ConstQualif::NON_ZERO_SIZED) { - if self.mode == Mode::Var { - outer = outer | ConstQualif::NOT_CONST; - self.add_qualif(ConstQualif::MUTABLE_MEM); - } else { - span_err!(self.tcx.sess, ex.span, E0017, - "references in {}s may only refer \ - to immutable values", self.msg()) - } - } - if !self.qualif.intersects(ConstQualif::NON_STATIC_BORROWS) { - self.add_qualif(ConstQualif::HAS_STATIC_BORROWS); - } - } - None => {} - } - self.tcx.const_qualif_map.borrow_mut().insert(ex.id, self.qualif); - // Don't propagate certain flags. - self.qualif = outer | (self.qualif - ConstQualif::HAS_STATIC_BORROWS); - } -} - -/// This function is used to enforce the constraints on -/// const/static items. It walks through the *value* -/// of the item walking down the expression and evaluating -/// every nested expression. If the expression is not part -/// of a const/static item, it is qualified for promotion -/// instead of producing errors. -fn check_expr<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>, - e: &hir::Expr, node_ty: Ty<'tcx>) { - match node_ty.sty { - ty::TyStruct(def, _) | - ty::TyEnum(def, _) if def.has_dtor() => { - v.add_qualif(ConstQualif::NEEDS_DROP); - if v.mode != Mode::Var { - span_err!(v.tcx.sess, e.span, E0493, - "{}s are not allowed to have destructors", - v.msg()); - } - } - _ => {} - } - - let method_call = ty::MethodCall::expr(e.id); - match e.node { - hir::ExprUnary(..) | - hir::ExprBinary(..) | - hir::ExprIndex(..) if v.tcx.tables.borrow().method_map.contains_key(&method_call) => { - v.add_qualif(ConstQualif::NOT_CONST); - if v.mode != Mode::Var { - span_err!(v.tcx.sess, e.span, E0011, - "user-defined operators are not allowed in {}s", v.msg()); - } - } - hir::ExprBox(_) => { - v.add_qualif(ConstQualif::NOT_CONST); - if v.mode != Mode::Var { - span_err!(v.tcx.sess, e.span, E0010, - "allocations are not allowed in {}s", v.msg()); - } - } - hir::ExprUnary(op, ref inner) => { - match v.tcx.node_id_to_type(inner.id).sty { - ty::TyRawPtr(_) => { - assert!(op == hir::UnDeref); - - v.add_qualif(ConstQualif::NOT_CONST); - if v.mode != Mode::Var { - span_err!(v.tcx.sess, e.span, E0396, - "raw pointers cannot be dereferenced in {}s", v.msg()); - } - } - _ => {} - } - } - hir::ExprBinary(op, ref lhs, _) => { - match v.tcx.node_id_to_type(lhs.id).sty { - ty::TyRawPtr(_) => { - assert!(op.node == hir::BiEq || op.node == hir::BiNe || - op.node == hir::BiLe || op.node == hir::BiLt || - op.node == hir::BiGe || op.node == hir::BiGt); - - v.add_qualif(ConstQualif::NOT_CONST); - if v.mode != Mode::Var { - span_err!(v.tcx.sess, e.span, E0395, - "raw pointers cannot be compared in {}s", v.msg()); - } - } - _ => {} - } - } - hir::ExprCast(ref from, _) => { - debug!("Checking const cast(id={})", from.id); - match v.tcx.cast_kinds.borrow().get(&from.id) { - None => v.tcx.sess.span_bug(e.span, "no kind for cast"), - Some(&CastKind::PtrAddrCast) | Some(&CastKind::FnPtrAddrCast) => { - v.add_qualif(ConstQualif::NOT_CONST); - if v.mode != Mode::Var { - span_err!(v.tcx.sess, e.span, E0018, - "raw pointers cannot be cast to integers in {}s", v.msg()); - } - } - _ => {} - } - } - hir::ExprPath(..) => { - let def = v.tcx.def_map.borrow().get(&e.id).map(|d| d.full_def()); - match def { - Some(def::DefVariant(_, _, _)) => { - // Count the discriminator or function pointer. - v.add_qualif(ConstQualif::NON_ZERO_SIZED); - } - Some(def::DefStruct(_)) => { - if let ty::TyBareFn(..) = node_ty.sty { - // Count the function pointer. - v.add_qualif(ConstQualif::NON_ZERO_SIZED); - } - } - Some(def::DefFn(..)) | Some(def::DefMethod(..)) => { - // Count the function pointer. - v.add_qualif(ConstQualif::NON_ZERO_SIZED); - } - Some(def::DefStatic(..)) => { - match v.mode { - Mode::Static | Mode::StaticMut => {} - Mode::Const | Mode::ConstFn => { - span_err!(v.tcx.sess, e.span, E0013, - "{}s cannot refer to other statics, insert \ - an intermediate constant instead", v.msg()); - } - Mode::Var => v.add_qualif(ConstQualif::NOT_CONST) - } - } - Some(def::DefConst(did)) | - Some(def::DefAssociatedConst(did)) => { - if let Some(expr) = const_eval::lookup_const_by_id(v.tcx, did, - Some(e.id), - None) { - let inner = v.global_expr(Mode::Const, expr); - v.add_qualif(inner); - } - } - Some(def::DefLocal(..)) if v.mode == Mode::ConstFn => { - // Sadly, we can't determine whether the types are zero-sized. - v.add_qualif(ConstQualif::NOT_CONST | ConstQualif::NON_ZERO_SIZED); - } - def => { - v.add_qualif(ConstQualif::NOT_CONST); - if v.mode != Mode::Var { - debug!("(checking const) found bad def: {:?}", def); - span_err!(v.tcx.sess, e.span, E0014, - "paths in {}s may only refer to constants \ - or functions", v.msg()); - } - } - } - } - hir::ExprCall(ref callee, _) => { - let mut callee = &**callee; - loop { - callee = match callee.node { - hir::ExprBlock(ref block) => match block.expr { - Some(ref tail) => &**tail, - None => break - }, - _ => break - }; - } - let def = v.tcx.def_map.borrow().get(&callee.id).map(|d| d.full_def()); - let is_const = match def { - Some(def::DefStruct(..)) => true, - Some(def::DefVariant(..)) => { - // Count the discriminator. - v.add_qualif(ConstQualif::NON_ZERO_SIZED); - true - } - Some(def::DefFn(did, _)) => { - v.handle_const_fn_call(e, did, node_ty) - } - Some(def::DefMethod(did)) => { - match v.tcx.impl_or_trait_item(did).container() { - ty::ImplContainer(_) => { - v.handle_const_fn_call(e, did, node_ty) - } - ty::TraitContainer(_) => false - } - } - _ => false - }; - if !is_const { - v.add_qualif(ConstQualif::NOT_CONST); - if v.mode != Mode::Var { - // FIXME(#24111) Remove this check when const fn stabilizes - let (msg, note) = - if let UnstableFeatures::Disallow = v.tcx.sess.opts.unstable_features { - (format!("function calls in {}s are limited to \ - struct and enum constructors", - v.msg()), - Some("a limited form of compile-time function \ - evaluation is available on a nightly \ - compiler via `const fn`")) - } else { - (format!("function calls in {}s are limited \ - to constant functions, \ - struct and enum constructors", - v.msg()), - None) - }; - let mut err = struct_span_err!(v.tcx.sess, e.span, E0015, "{}", msg); - if let Some(note) = note { - err.span_note(e.span, note); - } - err.emit(); - } - } - } - hir::ExprMethodCall(..) => { - let method = v.tcx.tables.borrow().method_map[&method_call]; - let is_const = match v.tcx.impl_or_trait_item(method.def_id).container() { - ty::ImplContainer(_) => v.handle_const_fn_call(e, method.def_id, node_ty), - ty::TraitContainer(_) => false - }; - if !is_const { - v.add_qualif(ConstQualif::NOT_CONST); - if v.mode != Mode::Var { - span_err!(v.tcx.sess, e.span, E0378, - "method calls in {}s are limited to \ - constant inherent methods", v.msg()); - } - } - } - hir::ExprStruct(..) => { - let did = v.tcx.def_map.borrow().get(&e.id).map(|def| def.def_id()); - if did == v.tcx.lang_items.unsafe_cell_type() { - v.add_qualif(ConstQualif::MUTABLE_MEM); - } - } - - hir::ExprLit(_) | - hir::ExprAddrOf(..) => { - v.add_qualif(ConstQualif::NON_ZERO_SIZED); - } - - hir::ExprRepeat(..) => { - v.add_qualif(ConstQualif::PREFER_IN_PLACE); - } - - hir::ExprClosure(..) => { - // Paths in constant contexts cannot refer to local variables, - // as there are none, and thus closures can't have upvars there. - if v.tcx.with_freevars(e.id, |fv| !fv.is_empty()) { - assert!(v.mode == Mode::Var, - "global closures can't capture anything"); - v.add_qualif(ConstQualif::NOT_CONST); - } - } - - hir::ExprBlock(_) | - hir::ExprIndex(..) | - hir::ExprField(..) | - hir::ExprTupField(..) | - hir::ExprVec(_) | - hir::ExprType(..) | - hir::ExprTup(..) => {} - - // Conditional control flow (possible to implement). - hir::ExprMatch(..) | - hir::ExprIf(..) | - - // Loops (not very meaningful in constants). - hir::ExprWhile(..) | - hir::ExprLoop(..) | - - // More control flow (also not very meaningful). - hir::ExprBreak(_) | - hir::ExprAgain(_) | - hir::ExprRet(_) | - - // Miscellaneous expressions that could be implemented. - hir::ExprRange(..) | - - // Expressions with side-effects. - hir::ExprAssign(..) | - hir::ExprAssignOp(..) | - hir::ExprInlineAsm(_) => { - v.add_qualif(ConstQualif::NOT_CONST); - if v.mode != Mode::Var { - span_err!(v.tcx.sess, e.span, E0019, - "{} contains unimplemented expression type", v.msg()); - } - } - } -} - -/// Check the adjustments of an expression -fn check_adjustments<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>, e: &hir::Expr) { - match v.tcx.tables.borrow().adjustments.get(&e.id) { - None | - Some(&ty::adjustment::AdjustReifyFnPointer) | - Some(&ty::adjustment::AdjustUnsafeFnPointer) => {} - - Some(&ty::adjustment::AdjustDerefRef( - ty::adjustment::AutoDerefRef { autoderefs, .. } - )) => { - if (0..autoderefs as u32).any(|autoderef| { - v.tcx.is_overloaded_autoderef(e.id, autoderef) - }) { - v.add_qualif(ConstQualif::NOT_CONST); - if v.mode != Mode::Var { - span_err!(v.tcx.sess, e.span, E0400, - "user-defined dereference operators are not allowed in {}s", - v.msg()); - } - } - } - } -} - -pub fn check_crate(tcx: &ty::ctxt) { - tcx.visit_all_items_in_krate(DepNode::CheckConst, &mut CheckCrateVisitor { - tcx: tcx, - mode: Mode::Var, - qualif: ConstQualif::NOT_CONST, - rvalue_borrows: NodeMap() - }); - tcx.sess.abort_if_errors(); -} - -impl<'a, 'tcx> euv::Delegate<'tcx> for CheckCrateVisitor<'a, 'tcx> { - fn consume(&mut self, - _consume_id: ast::NodeId, - consume_span: Span, - cmt: mc::cmt, - _mode: euv::ConsumeMode) { - let mut cur = &cmt; - loop { - match cur.cat { - Categorization::StaticItem => { - if self.mode != Mode::Var { - // statics cannot be consumed by value at any time, that would imply - // that they're an initializer (what a const is for) or kept in sync - // over time (not feasible), so deny it outright. - span_err!(self.tcx.sess, consume_span, E0394, - "cannot refer to other statics by value, use the \ - address-of operator or a constant instead"); - } - break; - } - Categorization::Deref(ref cmt, _, _) | - Categorization::Downcast(ref cmt, _) | - Categorization::Interior(ref cmt, _) => cur = cmt, - - Categorization::Rvalue(..) | - Categorization::Upvar(..) | - Categorization::Local(..) => break - } - } - } - fn borrow(&mut self, - borrow_id: ast::NodeId, - borrow_span: Span, - cmt: mc::cmt<'tcx>, - _loan_region: ty::Region, - bk: ty::BorrowKind, - loan_cause: euv::LoanCause) - { - // Kind of hacky, but we allow Unsafe coercions in constants. - // These occur when we convert a &T or *T to a *U, as well as - // when making a thin pointer (e.g., `*T`) into a fat pointer - // (e.g., `*Trait`). - match loan_cause { - euv::LoanCause::AutoUnsafe => { - return; - } - _ => { } - } - - let mut cur = &cmt; - let mut is_interior = false; - loop { - match cur.cat { - Categorization::Rvalue(..) => { - if loan_cause == euv::MatchDiscriminant { - // Ignore the dummy immutable borrow created by EUV. - break; - } - let mutbl = bk.to_mutbl_lossy(); - if mutbl == hir::MutMutable && self.mode == Mode::StaticMut { - // Mutable slices are the only `&mut` allowed in - // globals, but only in `static mut`, nowhere else. - // FIXME: This exception is really weird... there isn't - // any fundamental reason to restrict this based on - // type of the expression. `&mut [1]` has exactly the - // same representation as &mut 1. - match cmt.ty.sty { - ty::TyArray(_, _) | ty::TySlice(_) => break, - _ => {} - } - } - self.record_borrow(borrow_id, mutbl); - break; - } - Categorization::StaticItem => { - if is_interior && self.mode != Mode::Var { - // Borrowed statics can specifically *only* have their address taken, - // not any number of other borrows such as borrowing fields, reading - // elements of an array, etc. - span_err!(self.tcx.sess, borrow_span, E0494, - "cannot refer to the interior of another \ - static, use a constant instead"); - } - break; - } - Categorization::Deref(ref cmt, _, _) | - Categorization::Downcast(ref cmt, _) | - Categorization::Interior(ref cmt, _) => { - is_interior = true; - cur = cmt; - } - - Categorization::Upvar(..) | - Categorization::Local(..) => break - } - } - } - - fn decl_without_init(&mut self, - _id: ast::NodeId, - _span: Span) {} - fn mutate(&mut self, - _assignment_id: ast::NodeId, - _assignment_span: Span, - _assignee_cmt: mc::cmt, - _mode: euv::MutateMode) {} - - fn matched_pat(&mut self, - _: &hir::Pat, - _: mc::cmt, - _: euv::MatchMode) {} - - fn consume_pat(&mut self, - _consume_pat: &hir::Pat, - _cmt: mc::cmt, - _mode: euv::ConsumeMode) {} -} diff --git a/src/librustc/middle/check_loop.rs b/src/librustc/middle/check_loop.rs deleted file mode 100644 index 22e9df63d0185..0000000000000 --- a/src/librustc/middle/check_loop.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -use self::Context::*; - -use session::Session; - -use syntax::codemap::Span; -use rustc_front::intravisit::{self, Visitor}; -use rustc_front::hir; - -#[derive(Clone, Copy, PartialEq)] -enum Context { - Normal, Loop, Closure -} - -#[derive(Copy, Clone)] -struct CheckLoopVisitor<'a> { - sess: &'a Session, - cx: Context -} - -pub fn check_crate(sess: &Session, krate: &hir::Crate) { - krate.visit_all_items(&mut CheckLoopVisitor { sess: sess, cx: Normal }); -} - -impl<'a, 'v> Visitor<'v> for CheckLoopVisitor<'a> { - fn visit_item(&mut self, i: &hir::Item) { - self.with_context(Normal, |v| intravisit::walk_item(v, i)); - } - - fn visit_expr(&mut self, e: &hir::Expr) { - match e.node { - hir::ExprWhile(ref e, ref b, _) => { - self.visit_expr(&**e); - self.with_context(Loop, |v| v.visit_block(&**b)); - } - hir::ExprLoop(ref b, _) => { - self.with_context(Loop, |v| v.visit_block(&**b)); - } - hir::ExprClosure(_, _, ref b) => { - self.with_context(Closure, |v| v.visit_block(&**b)); - } - hir::ExprBreak(_) => self.require_loop("break", e.span), - hir::ExprAgain(_) => self.require_loop("continue", e.span), - _ => intravisit::walk_expr(self, e) - } - } -} - -impl<'a> CheckLoopVisitor<'a> { - fn with_context(&mut self, cx: Context, f: F) where - F: FnOnce(&mut CheckLoopVisitor<'a>), - { - let old_cx = self.cx; - self.cx = cx; - f(self); - self.cx = old_cx; - } - - fn require_loop(&self, name: &str, span: Span) { - match self.cx { - Loop => {} - Closure => { - span_err!(self.sess, span, E0267, - "`{}` inside of a closure", name); - } - Normal => { - span_err!(self.sess, span, E0268, - "`{}` outside of loop", name); - } - } - } -} diff --git a/src/librustc/middle/check_match.rs b/src/librustc/middle/check_match.rs deleted file mode 100644 index 8e5c5788201cc..0000000000000 --- a/src/librustc/middle/check_match.rs +++ /dev/null @@ -1,1200 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub use self::Constructor::*; -use self::Usefulness::*; -use self::WitnessPreference::*; - -use dep_graph::DepNode; -use middle::const_eval::{compare_const_vals, ConstVal}; -use middle::const_eval::{eval_const_expr, eval_const_expr_partial}; -use middle::const_eval::{const_expr_to_pat, lookup_const_by_id}; -use middle::const_eval::EvalHint::ExprTypeChecked; -use middle::def::*; -use middle::def_id::{DefId}; -use middle::expr_use_visitor::{ConsumeMode, Delegate, ExprUseVisitor}; -use middle::expr_use_visitor::{LoanCause, MutateMode}; -use middle::expr_use_visitor as euv; -use middle::infer; -use middle::mem_categorization::{cmt}; -use middle::pat_util::*; -use middle::ty::*; -use middle::ty; -use std::cmp::Ordering; -use std::fmt; -use std::iter::{FromIterator, IntoIterator, repeat}; - -use rustc_front::hir; -use rustc_front::hir::Pat; -use rustc_front::intravisit::{self, Visitor, FnKind}; -use rustc_front::util as front_util; -use rustc_back::slice; - -use syntax::ast::{self, DUMMY_NODE_ID, NodeId}; -use syntax::ast_util; -use syntax::codemap::{Span, Spanned, DUMMY_SP}; -use rustc_front::fold::{Folder, noop_fold_pat}; -use rustc_front::print::pprust::pat_to_string; -use syntax::ptr::P; -use util::nodemap::FnvHashMap; - -pub const DUMMY_WILD_PAT: &'static Pat = &Pat { - id: DUMMY_NODE_ID, - node: hir::PatWild, - span: DUMMY_SP -}; - -struct Matrix<'a>(Vec>); - -/// Pretty-printer for matrices of patterns, example: -/// ++++++++++++++++++++++++++ -/// + _ + [] + -/// ++++++++++++++++++++++++++ -/// + true + [First] + -/// ++++++++++++++++++++++++++ -/// + true + [Second(true)] + -/// ++++++++++++++++++++++++++ -/// + false + [_] + -/// ++++++++++++++++++++++++++ -/// + _ + [_, _, ..tail] + -/// ++++++++++++++++++++++++++ -impl<'a> fmt::Debug for Matrix<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - try!(write!(f, "\n")); - - let &Matrix(ref m) = self; - let pretty_printed_matrix: Vec> = m.iter().map(|row| { - row.iter() - .map(|&pat| pat_to_string(&*pat)) - .collect::>() - }).collect(); - - let column_count = m.iter().map(|row| row.len()).max().unwrap_or(0); - assert!(m.iter().all(|row| row.len() == column_count)); - let column_widths: Vec = (0..column_count).map(|col| { - pretty_printed_matrix.iter().map(|row| row[col].len()).max().unwrap_or(0) - }).collect(); - - let total_width = column_widths.iter().cloned().sum::() + column_count * 3 + 1; - let br = repeat('+').take(total_width).collect::(); - try!(write!(f, "{}\n", br)); - for row in pretty_printed_matrix { - try!(write!(f, "+")); - for (column, pat_str) in row.into_iter().enumerate() { - try!(write!(f, " ")); - try!(write!(f, "{:1$}", pat_str, column_widths[column])); - try!(write!(f, " +")); - } - try!(write!(f, "\n")); - try!(write!(f, "{}\n", br)); - } - Ok(()) - } -} - -impl<'a> FromIterator> for Matrix<'a> { - fn from_iter>>(iter: T) -> Matrix<'a> { - Matrix(iter.into_iter().collect()) - } -} - -//NOTE: appears to be the only place other then InferCtxt to contain a ParamEnv -pub struct MatchCheckCtxt<'a, 'tcx: 'a> { - pub tcx: &'a ty::ctxt<'tcx>, - pub param_env: ParameterEnvironment<'a, 'tcx>, -} - -#[derive(Clone, PartialEq)] -pub enum Constructor { - /// The constructor of all patterns that don't vary by constructor, - /// e.g. struct patterns and fixed-length arrays. - Single, - /// Enum variants. - Variant(DefId), - /// Literal values. - ConstantValue(ConstVal), - /// Ranges of literal values (2..5). - ConstantRange(ConstVal, ConstVal), - /// Array patterns of length n. - Slice(usize), - /// Array patterns with a subslice. - SliceWithSubslice(usize, usize) -} - -#[derive(Clone, PartialEq)] -enum Usefulness { - Useful, - UsefulWithWitness(Vec>), - NotUseful -} - -#[derive(Copy, Clone)] -enum WitnessPreference { - ConstructWitness, - LeaveOutWitness -} - -impl<'a, 'tcx, 'v> Visitor<'v> for MatchCheckCtxt<'a, 'tcx> { - fn visit_expr(&mut self, ex: &hir::Expr) { - check_expr(self, ex); - } - fn visit_local(&mut self, l: &hir::Local) { - check_local(self, l); - } - fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v hir::FnDecl, - b: &'v hir::Block, s: Span, n: NodeId) { - check_fn(self, fk, fd, b, s, n); - } -} - -pub fn check_crate(tcx: &ty::ctxt) { - tcx.visit_all_items_in_krate(DepNode::MatchCheck, &mut MatchCheckCtxt { - tcx: tcx, - param_env: tcx.empty_parameter_environment(), - }); - tcx.sess.abort_if_errors(); -} - -fn check_expr(cx: &mut MatchCheckCtxt, ex: &hir::Expr) { - intravisit::walk_expr(cx, ex); - match ex.node { - hir::ExprMatch(ref scrut, ref arms, source) => { - for arm in arms { - // First, check legality of move bindings. - check_legality_of_move_bindings(cx, - arm.guard.is_some(), - &arm.pats); - - // Second, if there is a guard on each arm, make sure it isn't - // assigning or borrowing anything mutably. - match arm.guard { - Some(ref guard) => check_for_mutation_in_guard(cx, &**guard), - None => {} - } - } - - let mut static_inliner = StaticInliner::new(cx.tcx, None); - let inlined_arms = arms.iter().map(|arm| { - (arm.pats.iter().map(|pat| { - static_inliner.fold_pat((*pat).clone()) - }).collect(), arm.guard.as_ref().map(|e| &**e)) - }).collect::>, Option<&hir::Expr>)>>(); - - // Bail out early if inlining failed. - if static_inliner.failed { - return; - } - - for pat in inlined_arms - .iter() - .flat_map(|&(ref pats, _)| pats) { - // Third, check legality of move bindings. - check_legality_of_bindings_in_at_patterns(cx, &**pat); - - // Fourth, check if there are any references to NaN that we should warn about. - check_for_static_nan(cx, &**pat); - - // Fifth, check if for any of the patterns that match an enumerated type - // are bindings with the same name as one of the variants of said type. - check_for_bindings_named_the_same_as_variants(cx, &**pat); - } - - // Fourth, check for unreachable arms. - check_arms(cx, &inlined_arms[..], source); - - // Finally, check if the whole match expression is exhaustive. - // Check for empty enum, because is_useful only works on inhabited types. - let pat_ty = cx.tcx.node_id_to_type(scrut.id); - if inlined_arms.is_empty() { - if !pat_ty.is_empty(cx.tcx) { - // We know the type is inhabited, so this must be wrong - let mut err = struct_span_err!(cx.tcx.sess, ex.span, E0002, - "non-exhaustive patterns: type {} is non-empty", - pat_ty); - span_help!(&mut err, ex.span, - "Please ensure that all possible cases are being handled; \ - possibly adding wildcards or more match arms."); - err.emit(); - } - // If the type *is* empty, it's vacuously exhaustive - return; - } - - let matrix: Matrix = inlined_arms - .iter() - .filter(|&&(_, guard)| guard.is_none()) - .flat_map(|arm| &arm.0) - .map(|pat| vec![&**pat]) - .collect(); - check_exhaustive(cx, ex.span, &matrix, source); - }, - _ => () - } -} - -fn check_for_bindings_named_the_same_as_variants(cx: &MatchCheckCtxt, pat: &Pat) { - front_util::walk_pat(pat, |p| { - match p.node { - hir::PatIdent(hir::BindByValue(hir::MutImmutable), ident, None) => { - let pat_ty = cx.tcx.pat_ty(p); - if let ty::TyEnum(edef, _) = pat_ty.sty { - let def = cx.tcx.def_map.borrow().get(&p.id).map(|d| d.full_def()); - if let Some(DefLocal(..)) = def { - if edef.variants.iter().any(|variant| - variant.name == ident.node.unhygienic_name - && variant.kind() == VariantKind::Unit - ) { - let ty_path = cx.tcx.item_path_str(edef.did); - let mut err = struct_span_warn!(cx.tcx.sess, p.span, E0170, - "pattern binding `{}` is named the same as one \ - of the variants of the type `{}`", - ident.node, ty_path); - fileline_help!(err, p.span, - "if you meant to match on a variant, \ - consider making the path in the pattern qualified: `{}::{}`", - ty_path, ident.node); - err.emit(); - } - } - } - } - _ => () - } - true - }); -} - -// Check that we do not match against a static NaN (#6804) -fn check_for_static_nan(cx: &MatchCheckCtxt, pat: &Pat) { - front_util::walk_pat(pat, |p| { - if let hir::PatLit(ref expr) = p.node { - match eval_const_expr_partial(cx.tcx, &**expr, ExprTypeChecked, None) { - Ok(ConstVal::Float(f)) if f.is_nan() => { - span_warn!(cx.tcx.sess, p.span, E0003, - "unmatchable NaN in pattern, \ - use the is_nan method in a guard instead"); - } - Ok(_) => {} - - Err(err) => { - let mut diag = struct_span_err!(cx.tcx.sess, err.span, E0471, - "constant evaluation error: {}", - err.description()); - if !p.span.contains(err.span) { - diag.span_note(p.span, "in pattern here"); - } - diag.emit(); - } - } - } - true - }); -} - -// Check for unreachable patterns -fn check_arms(cx: &MatchCheckCtxt, - arms: &[(Vec>, Option<&hir::Expr>)], - source: hir::MatchSource) { - let mut seen = Matrix(vec![]); - let mut printed_if_let_err = false; - for &(ref pats, guard) in arms { - for pat in pats { - let v = vec![&**pat]; - - match is_useful(cx, &seen, &v[..], LeaveOutWitness) { - NotUseful => { - match source { - hir::MatchSource::IfLetDesugar { .. } => { - if printed_if_let_err { - // we already printed an irrefutable if-let pattern error. - // We don't want two, that's just confusing. - } else { - // find the first arm pattern so we can use its span - let &(ref first_arm_pats, _) = &arms[0]; - let first_pat = &first_arm_pats[0]; - let span = first_pat.span; - span_err!(cx.tcx.sess, span, E0162, "irrefutable if-let pattern"); - printed_if_let_err = true; - } - }, - - hir::MatchSource::WhileLetDesugar => { - // find the first arm pattern so we can use its span - let &(ref first_arm_pats, _) = &arms[0]; - let first_pat = &first_arm_pats[0]; - let span = first_pat.span; - span_err!(cx.tcx.sess, span, E0165, "irrefutable while-let pattern"); - }, - - hir::MatchSource::ForLoopDesugar => { - // this is a bug, because on `match iter.next()` we cover - // `Some()` and `None`. It's impossible to have an unreachable - // pattern - // (see libsyntax/ext/expand.rs for the full expansion of a for loop) - cx.tcx.sess.span_bug(pat.span, "unreachable for-loop pattern") - }, - - hir::MatchSource::Normal => { - span_err!(cx.tcx.sess, pat.span, E0001, "unreachable pattern") - }, - } - } - Useful => (), - UsefulWithWitness(_) => unreachable!() - } - if guard.is_none() { - let Matrix(mut rows) = seen; - rows.push(v); - seen = Matrix(rows); - } - } - } -} - -fn raw_pat<'a>(p: &'a Pat) -> &'a Pat { - match p.node { - hir::PatIdent(_, _, Some(ref s)) => raw_pat(&**s), - _ => p - } -} - -fn check_exhaustive(cx: &MatchCheckCtxt, sp: Span, matrix: &Matrix, source: hir::MatchSource) { - match is_useful(cx, matrix, &[DUMMY_WILD_PAT], ConstructWitness) { - UsefulWithWitness(pats) => { - let witness = match &pats[..] { - [ref witness] => &**witness, - [] => DUMMY_WILD_PAT, - _ => unreachable!() - }; - match source { - hir::MatchSource::ForLoopDesugar => { - // `witness` has the form `Some()`, peel off the `Some` - let witness = match witness.node { - hir::PatEnum(_, Some(ref pats)) => match &pats[..] { - [ref pat] => &**pat, - _ => unreachable!(), - }, - _ => unreachable!(), - }; - - span_err!(cx.tcx.sess, sp, E0297, - "refutable pattern in `for` loop binding: \ - `{}` not covered", - pat_to_string(witness)); - }, - _ => { - span_err!(cx.tcx.sess, sp, E0004, - "non-exhaustive patterns: `{}` not covered", - pat_to_string(witness) - ); - }, - } - } - NotUseful => { - // This is good, wildcard pattern isn't reachable - }, - _ => unreachable!() - } -} - -fn const_val_to_expr(value: &ConstVal) -> P { - let node = match value { - &ConstVal::Bool(b) => ast::LitBool(b), - _ => unreachable!() - }; - P(hir::Expr { - id: 0, - node: hir::ExprLit(P(Spanned { node: node, span: DUMMY_SP })), - span: DUMMY_SP, - attrs: None, - }) -} - -pub struct StaticInliner<'a, 'tcx: 'a> { - pub tcx: &'a ty::ctxt<'tcx>, - pub failed: bool, - pub renaming_map: Option<&'a mut FnvHashMap<(NodeId, Span), NodeId>>, -} - -impl<'a, 'tcx> StaticInliner<'a, 'tcx> { - pub fn new<'b>(tcx: &'b ty::ctxt<'tcx>, - renaming_map: Option<&'b mut FnvHashMap<(NodeId, Span), NodeId>>) - -> StaticInliner<'b, 'tcx> { - StaticInliner { - tcx: tcx, - failed: false, - renaming_map: renaming_map - } - } -} - -struct RenamingRecorder<'map> { - substituted_node_id: NodeId, - origin_span: Span, - renaming_map: &'map mut FnvHashMap<(NodeId, Span), NodeId> -} - -impl<'map> ast_util::IdVisitingOperation for RenamingRecorder<'map> { - fn visit_id(&mut self, node_id: NodeId) { - let key = (node_id, self.origin_span); - self.renaming_map.insert(key, self.substituted_node_id); - } -} - -impl<'a, 'tcx> Folder for StaticInliner<'a, 'tcx> { - fn fold_pat(&mut self, pat: P) -> P { - return match pat.node { - hir::PatIdent(..) | hir::PatEnum(..) | hir::PatQPath(..) => { - let def = self.tcx.def_map.borrow().get(&pat.id).map(|d| d.full_def()); - match def { - Some(DefAssociatedConst(did)) | - Some(DefConst(did)) => match lookup_const_by_id(self.tcx, did, - Some(pat.id), None) { - Some(const_expr) => { - const_expr_to_pat(self.tcx, const_expr, pat.span).map(|new_pat| { - - if let Some(ref mut renaming_map) = self.renaming_map { - // Record any renamings we do here - record_renamings(const_expr, &pat, renaming_map); - } - - new_pat - }) - } - None => { - self.failed = true; - span_err!(self.tcx.sess, pat.span, E0158, - "statics cannot be referenced in patterns"); - pat - } - }, - _ => noop_fold_pat(pat, self) - } - } - _ => noop_fold_pat(pat, self) - }; - - fn record_renamings(const_expr: &hir::Expr, - substituted_pat: &hir::Pat, - renaming_map: &mut FnvHashMap<(NodeId, Span), NodeId>) { - let mut renaming_recorder = RenamingRecorder { - substituted_node_id: substituted_pat.id, - origin_span: substituted_pat.span, - renaming_map: renaming_map, - }; - - let mut id_visitor = front_util::IdVisitor::new(&mut renaming_recorder); - - id_visitor.visit_expr(const_expr); - } - } -} - -/// Constructs a partial witness for a pattern given a list of -/// patterns expanded by the specialization step. -/// -/// When a pattern P is discovered to be useful, this function is used bottom-up -/// to reconstruct a complete witness, e.g. a pattern P' that covers a subset -/// of values, V, where each value in that set is not covered by any previously -/// used patterns and is covered by the pattern P'. Examples: -/// -/// left_ty: tuple of 3 elements -/// pats: [10, 20, _] => (10, 20, _) -/// -/// left_ty: struct X { a: (bool, &'static str), b: usize} -/// pats: [(false, "foo"), 42] => X { a: (false, "foo"), b: 42 } -fn construct_witness<'a,'tcx>(cx: &MatchCheckCtxt<'a,'tcx>, ctor: &Constructor, - pats: Vec<&Pat>, left_ty: Ty<'tcx>) -> P { - let pats_len = pats.len(); - let mut pats = pats.into_iter().map(|p| P((*p).clone())); - let pat = match left_ty.sty { - ty::TyTuple(_) => hir::PatTup(pats.collect()), - - ty::TyEnum(adt, _) | ty::TyStruct(adt, _) => { - let v = adt.variant_of_ctor(ctor); - if let VariantKind::Struct = v.kind() { - let field_pats: hir::HirVec<_> = v.fields.iter() - .zip(pats) - .filter(|&(_, ref pat)| pat.node != hir::PatWild) - .map(|(field, pat)| Spanned { - span: DUMMY_SP, - node: hir::FieldPat { - name: field.name, - pat: pat, - is_shorthand: false, - } - }).collect(); - let has_more_fields = field_pats.len() < pats_len; - hir::PatStruct(def_to_path(cx.tcx, v.did), field_pats, has_more_fields) - } else { - hir::PatEnum(def_to_path(cx.tcx, v.did), Some(pats.collect())) - } - } - - ty::TyRef(_, ty::TypeAndMut { ty, mutbl }) => { - match ty.sty { - ty::TyArray(_, n) => match ctor { - &Single => { - assert_eq!(pats_len, n); - hir::PatVec(pats.collect(), None, hir::HirVec::new()) - }, - _ => unreachable!() - }, - ty::TySlice(_) => match ctor { - &Slice(n) => { - assert_eq!(pats_len, n); - hir::PatVec(pats.collect(), None, hir::HirVec::new()) - }, - _ => unreachable!() - }, - ty::TyStr => hir::PatWild, - - _ => { - assert_eq!(pats_len, 1); - hir::PatRegion(pats.nth(0).unwrap(), mutbl) - } - } - } - - ty::TyArray(_, len) => { - assert_eq!(pats_len, len); - hir::PatVec(pats.collect(), None, hir::HirVec::new()) - } - - _ => { - match *ctor { - ConstantValue(ref v) => hir::PatLit(const_val_to_expr(v)), - _ => hir::PatWild, - } - } - }; - - P(hir::Pat { - id: 0, - node: pat, - span: DUMMY_SP - }) -} - -impl<'tcx, 'container> ty::AdtDefData<'tcx, 'container> { - fn variant_of_ctor(&self, - ctor: &Constructor) - -> &VariantDefData<'tcx, 'container> { - match ctor { - &Variant(vid) => self.variant_with_id(vid), - _ => self.struct_variant() - } - } -} - -fn missing_constructor(cx: &MatchCheckCtxt, &Matrix(ref rows): &Matrix, - left_ty: Ty, max_slice_length: usize) -> Option { - let used_constructors: Vec = rows.iter() - .flat_map(|row| pat_constructors(cx, row[0], left_ty, max_slice_length)) - .collect(); - all_constructors(cx, left_ty, max_slice_length) - .into_iter() - .find(|c| !used_constructors.contains(c)) -} - -/// This determines the set of all possible constructors of a pattern matching -/// values of type `left_ty`. For vectors, this would normally be an infinite set -/// but is instead bounded by the maximum fixed length of slice patterns in -/// the column of patterns being analyzed. -fn all_constructors(_cx: &MatchCheckCtxt, left_ty: Ty, - max_slice_length: usize) -> Vec { - match left_ty.sty { - ty::TyBool => - [true, false].iter().map(|b| ConstantValue(ConstVal::Bool(*b))).collect(), - - ty::TyRef(_, ty::TypeAndMut { ty, .. }) => match ty.sty { - ty::TySlice(_) => - (0..max_slice_length+1).map(|length| Slice(length)).collect(), - _ => vec![Single] - }, - - ty::TyEnum(def, _) => def.variants.iter().map(|v| Variant(v.did)).collect(), - _ => vec![Single] - } -} - -// Algorithm from http://moscova.inria.fr/~maranget/papers/warn/index.html -// -// Whether a vector `v` of patterns is 'useful' in relation to a set of such -// vectors `m` is defined as there being a set of inputs that will match `v` -// but not any of the sets in `m`. -// -// This is used both for reachability checking (if a pattern isn't useful in -// relation to preceding patterns, it is not reachable) and exhaustiveness -// checking (if a wildcard pattern is useful in relation to a matrix, the -// matrix isn't exhaustive). - -// Note: is_useful doesn't work on empty types, as the paper notes. -// So it assumes that v is non-empty. -fn is_useful(cx: &MatchCheckCtxt, - matrix: &Matrix, - v: &[&Pat], - witness: WitnessPreference) - -> Usefulness { - let &Matrix(ref rows) = matrix; - debug!("{:?}", matrix); - if rows.is_empty() { - return match witness { - ConstructWitness => UsefulWithWitness(vec!()), - LeaveOutWitness => Useful - }; - } - if rows[0].is_empty() { - return NotUseful; - } - assert!(rows.iter().all(|r| r.len() == v.len())); - let real_pat = match rows.iter().find(|r| (*r)[0].id != DUMMY_NODE_ID) { - Some(r) => raw_pat(r[0]), - None if v.is_empty() => return NotUseful, - None => v[0] - }; - let left_ty = if real_pat.id == DUMMY_NODE_ID { - cx.tcx.mk_nil() - } else { - let left_ty = cx.tcx.pat_ty(&*real_pat); - - match real_pat.node { - hir::PatIdent(hir::BindByRef(..), _, _) => { - left_ty.builtin_deref(false, NoPreference).unwrap().ty - } - _ => left_ty, - } - }; - - let max_slice_length = rows.iter().filter_map(|row| match row[0].node { - hir::PatVec(ref before, _, ref after) => Some(before.len() + after.len()), - _ => None - }).max().map_or(0, |v| v + 1); - - let constructors = pat_constructors(cx, v[0], left_ty, max_slice_length); - if constructors.is_empty() { - match missing_constructor(cx, matrix, left_ty, max_slice_length) { - None => { - all_constructors(cx, left_ty, max_slice_length).into_iter().map(|c| { - match is_useful_specialized(cx, matrix, v, c.clone(), left_ty, witness) { - UsefulWithWitness(pats) => UsefulWithWitness({ - let arity = constructor_arity(cx, &c, left_ty); - let mut result = { - let pat_slice = &pats[..]; - let subpats: Vec<_> = (0..arity).map(|i| { - pat_slice.get(i).map_or(DUMMY_WILD_PAT, |p| &**p) - }).collect(); - vec![construct_witness(cx, &c, subpats, left_ty)] - }; - result.extend(pats.into_iter().skip(arity)); - result - }), - result => result - } - }).find(|result| result != &NotUseful).unwrap_or(NotUseful) - }, - - Some(constructor) => { - let matrix = rows.iter().filter_map(|r| { - if pat_is_binding_or_wild(&cx.tcx.def_map.borrow(), raw_pat(r[0])) { - Some(r[1..].to_vec()) - } else { - None - } - }).collect(); - match is_useful(cx, &matrix, &v[1..], witness) { - UsefulWithWitness(pats) => { - let arity = constructor_arity(cx, &constructor, left_ty); - let wild_pats = vec![DUMMY_WILD_PAT; arity]; - let enum_pat = construct_witness(cx, &constructor, wild_pats, left_ty); - let mut new_pats = vec![enum_pat]; - new_pats.extend(pats); - UsefulWithWitness(new_pats) - }, - result => result - } - } - } - } else { - constructors.into_iter().map(|c| - is_useful_specialized(cx, matrix, v, c.clone(), left_ty, witness) - ).find(|result| result != &NotUseful).unwrap_or(NotUseful) - } -} - -fn is_useful_specialized(cx: &MatchCheckCtxt, &Matrix(ref m): &Matrix, - v: &[&Pat], ctor: Constructor, lty: Ty, - witness: WitnessPreference) -> Usefulness { - let arity = constructor_arity(cx, &ctor, lty); - let matrix = Matrix(m.iter().filter_map(|r| { - specialize(cx, &r[..], &ctor, 0, arity) - }).collect()); - match specialize(cx, v, &ctor, 0, arity) { - Some(v) => is_useful(cx, &matrix, &v[..], witness), - None => NotUseful - } -} - -/// Determines the constructors that the given pattern can be specialized to. -/// -/// In most cases, there's only one constructor that a specific pattern -/// represents, such as a specific enum variant or a specific literal value. -/// Slice patterns, however, can match slices of different lengths. For instance, -/// `[a, b, ..tail]` can match a slice of length 2, 3, 4 and so on. -/// -/// On the other hand, a wild pattern and an identifier pattern cannot be -/// specialized in any way. -fn pat_constructors(cx: &MatchCheckCtxt, p: &Pat, - left_ty: Ty, max_slice_length: usize) -> Vec { - let pat = raw_pat(p); - match pat.node { - hir::PatIdent(..) => - match cx.tcx.def_map.borrow().get(&pat.id).map(|d| d.full_def()) { - Some(DefConst(..)) | Some(DefAssociatedConst(..)) => - cx.tcx.sess.span_bug(pat.span, "const pattern should've \ - been rewritten"), - Some(DefStruct(_)) => vec!(Single), - Some(DefVariant(_, id, _)) => vec!(Variant(id)), - _ => vec!() - }, - hir::PatEnum(..) => - match cx.tcx.def_map.borrow().get(&pat.id).map(|d| d.full_def()) { - Some(DefConst(..)) | Some(DefAssociatedConst(..)) => - cx.tcx.sess.span_bug(pat.span, "const pattern should've \ - been rewritten"), - Some(DefVariant(_, id, _)) => vec!(Variant(id)), - _ => vec!(Single) - }, - hir::PatQPath(..) => - cx.tcx.sess.span_bug(pat.span, "const pattern should've \ - been rewritten"), - hir::PatStruct(..) => - match cx.tcx.def_map.borrow().get(&pat.id).map(|d| d.full_def()) { - Some(DefConst(..)) | Some(DefAssociatedConst(..)) => - cx.tcx.sess.span_bug(pat.span, "const pattern should've \ - been rewritten"), - Some(DefVariant(_, id, _)) => vec!(Variant(id)), - _ => vec!(Single) - }, - hir::PatLit(ref expr) => - vec!(ConstantValue(eval_const_expr(cx.tcx, &**expr))), - hir::PatRange(ref lo, ref hi) => - vec!(ConstantRange(eval_const_expr(cx.tcx, &**lo), eval_const_expr(cx.tcx, &**hi))), - hir::PatVec(ref before, ref slice, ref after) => - match left_ty.sty { - ty::TyArray(_, _) => vec!(Single), - _ => if slice.is_some() { - (before.len() + after.len()..max_slice_length+1) - .map(|length| Slice(length)) - .collect() - } else { - vec!(Slice(before.len() + after.len())) - } - }, - hir::PatBox(_) | hir::PatTup(_) | hir::PatRegion(..) => - vec!(Single), - hir::PatWild => - vec!(), - } -} - -/// This computes the arity of a constructor. The arity of a constructor -/// is how many subpattern patterns of that constructor should be expanded to. -/// -/// For instance, a tuple pattern (_, 42, Some([])) has the arity of 3. -/// A struct pattern's arity is the number of fields it contains, etc. -pub fn constructor_arity(_cx: &MatchCheckCtxt, ctor: &Constructor, ty: Ty) -> usize { - match ty.sty { - ty::TyTuple(ref fs) => fs.len(), - ty::TyBox(_) => 1, - ty::TyRef(_, ty::TypeAndMut { ty, .. }) => match ty.sty { - ty::TySlice(_) => match *ctor { - Slice(length) => length, - ConstantValue(_) => 0, - _ => unreachable!() - }, - ty::TyStr => 0, - _ => 1 - }, - ty::TyEnum(adt, _) | ty::TyStruct(adt, _) => { - adt.variant_of_ctor(ctor).fields.len() - } - ty::TyArray(_, n) => n, - _ => 0 - } -} - -fn range_covered_by_constructor(ctor: &Constructor, - from: &ConstVal, to: &ConstVal) -> Option { - let (c_from, c_to) = match *ctor { - ConstantValue(ref value) => (value, value), - ConstantRange(ref from, ref to) => (from, to), - Single => return Some(true), - _ => unreachable!() - }; - let cmp_from = compare_const_vals(c_from, from); - let cmp_to = compare_const_vals(c_to, to); - match (cmp_from, cmp_to) { - (Some(cmp_from), Some(cmp_to)) => { - Some(cmp_from != Ordering::Less && cmp_to != Ordering::Greater) - } - _ => None - } -} - -/// This is the main specialization step. It expands the first pattern in the given row -/// into `arity` patterns based on the constructor. For most patterns, the step is trivial, -/// for instance tuple patterns are flattened and box patterns expand into their inner pattern. -/// -/// OTOH, slice patterns with a subslice pattern (..tail) can be expanded into multiple -/// different patterns. -/// Structure patterns with a partial wild pattern (Foo { a: 42, .. }) have their missing -/// fields filled with wild patterns. -pub fn specialize<'a>(cx: &MatchCheckCtxt, r: &[&'a Pat], - constructor: &Constructor, col: usize, arity: usize) -> Option> { - let &Pat { - id: pat_id, ref node, span: pat_span - } = raw_pat(r[col]); - let head: Option> = match *node { - hir::PatWild => - Some(vec![DUMMY_WILD_PAT; arity]), - - hir::PatIdent(_, _, _) => { - let opt_def = cx.tcx.def_map.borrow().get(&pat_id).map(|d| d.full_def()); - match opt_def { - Some(DefConst(..)) | Some(DefAssociatedConst(..)) => - cx.tcx.sess.span_bug(pat_span, "const pattern should've \ - been rewritten"), - Some(DefVariant(_, id, _)) => if *constructor == Variant(id) { - Some(vec!()) - } else { - None - }, - _ => Some(vec![DUMMY_WILD_PAT; arity]) - } - } - - hir::PatEnum(_, ref args) => { - let def = cx.tcx.def_map.borrow().get(&pat_id).unwrap().full_def(); - match def { - DefConst(..) | DefAssociatedConst(..) => - cx.tcx.sess.span_bug(pat_span, "const pattern should've \ - been rewritten"), - DefVariant(_, id, _) if *constructor != Variant(id) => None, - DefVariant(..) | DefStruct(..) => { - Some(match args { - &Some(ref args) => args.iter().map(|p| &**p).collect(), - &None => vec![DUMMY_WILD_PAT; arity], - }) - } - _ => None - } - } - - hir::PatQPath(_, _) => { - cx.tcx.sess.span_bug(pat_span, "const pattern should've \ - been rewritten") - } - - hir::PatStruct(_, ref pattern_fields, _) => { - let def = cx.tcx.def_map.borrow().get(&pat_id).unwrap().full_def(); - let adt = cx.tcx.node_id_to_type(pat_id).ty_adt_def().unwrap(); - let variant = adt.variant_of_ctor(constructor); - let def_variant = adt.variant_of_def(def); - if variant.did == def_variant.did { - Some(variant.fields.iter().map(|sf| { - match pattern_fields.iter().find(|f| f.node.name == sf.name) { - Some(ref f) => &*f.node.pat, - _ => DUMMY_WILD_PAT - } - }).collect()) - } else { - None - } - } - - hir::PatTup(ref args) => - Some(args.iter().map(|p| &**p).collect()), - - hir::PatBox(ref inner) | hir::PatRegion(ref inner, _) => - Some(vec![&**inner]), - - hir::PatLit(ref expr) => { - let expr_value = eval_const_expr(cx.tcx, &**expr); - match range_covered_by_constructor(constructor, &expr_value, &expr_value) { - Some(true) => Some(vec![]), - Some(false) => None, - None => { - span_err!(cx.tcx.sess, pat_span, E0298, "mismatched types between arms"); - None - } - } - } - - hir::PatRange(ref from, ref to) => { - let from_value = eval_const_expr(cx.tcx, &**from); - let to_value = eval_const_expr(cx.tcx, &**to); - match range_covered_by_constructor(constructor, &from_value, &to_value) { - Some(true) => Some(vec![]), - Some(false) => None, - None => { - span_err!(cx.tcx.sess, pat_span, E0299, "mismatched types between arms"); - None - } - } - } - - hir::PatVec(ref before, ref slice, ref after) => { - match *constructor { - // Fixed-length vectors. - Single => { - let mut pats: Vec<&Pat> = before.iter().map(|p| &**p).collect(); - pats.extend(repeat(DUMMY_WILD_PAT).take(arity - before.len() - after.len())); - pats.extend(after.iter().map(|p| &**p)); - Some(pats) - }, - Slice(length) if before.len() + after.len() <= length && slice.is_some() => { - let mut pats: Vec<&Pat> = before.iter().map(|p| &**p).collect(); - pats.extend(repeat(DUMMY_WILD_PAT).take(arity - before.len() - after.len())); - pats.extend(after.iter().map(|p| &**p)); - Some(pats) - }, - Slice(length) if before.len() + after.len() == length => { - let mut pats: Vec<&Pat> = before.iter().map(|p| &**p).collect(); - pats.extend(after.iter().map(|p| &**p)); - Some(pats) - }, - SliceWithSubslice(prefix, suffix) - if before.len() == prefix - && after.len() == suffix - && slice.is_some() => { - let mut pats: Vec<&Pat> = before.iter().map(|p| &**p).collect(); - pats.extend(after.iter().map(|p| &**p)); - Some(pats) - } - _ => None - } - } - }; - head.map(|mut head| { - head.extend_from_slice(&r[..col]); - head.extend_from_slice(&r[col + 1..]); - head - }) -} - -fn check_local(cx: &mut MatchCheckCtxt, loc: &hir::Local) { - intravisit::walk_local(cx, loc); - - let pat = StaticInliner::new(cx.tcx, None).fold_pat(loc.pat.clone()); - check_irrefutable(cx, &pat, false); - - // Check legality of move bindings and `@` patterns. - check_legality_of_move_bindings(cx, false, slice::ref_slice(&loc.pat)); - check_legality_of_bindings_in_at_patterns(cx, &*loc.pat); -} - -fn check_fn(cx: &mut MatchCheckCtxt, - kind: FnKind, - decl: &hir::FnDecl, - body: &hir::Block, - sp: Span, - fn_id: NodeId) { - match kind { - FnKind::Closure => {} - _ => cx.param_env = ParameterEnvironment::for_item(cx.tcx, fn_id), - } - - intravisit::walk_fn(cx, kind, decl, body, sp); - - for input in &decl.inputs { - check_irrefutable(cx, &input.pat, true); - check_legality_of_move_bindings(cx, false, slice::ref_slice(&input.pat)); - check_legality_of_bindings_in_at_patterns(cx, &*input.pat); - } -} - -fn check_irrefutable(cx: &MatchCheckCtxt, pat: &Pat, is_fn_arg: bool) { - let origin = if is_fn_arg { - "function argument" - } else { - "local binding" - }; - - is_refutable(cx, pat, |uncovered_pat| { - span_err!(cx.tcx.sess, pat.span, E0005, - "refutable pattern in {}: `{}` not covered", - origin, - pat_to_string(uncovered_pat), - ); - }); -} - -fn is_refutable(cx: &MatchCheckCtxt, pat: &Pat, refutable: F) -> Option where - F: FnOnce(&Pat) -> A, -{ - let pats = Matrix(vec!(vec!(pat))); - match is_useful(cx, &pats, &[DUMMY_WILD_PAT], ConstructWitness) { - UsefulWithWitness(pats) => { - assert_eq!(pats.len(), 1); - Some(refutable(&*pats[0])) - }, - NotUseful => None, - Useful => unreachable!() - } -} - -// Legality of move bindings checking -fn check_legality_of_move_bindings(cx: &MatchCheckCtxt, - has_guard: bool, - pats: &[P]) { - let tcx = cx.tcx; - let def_map = &tcx.def_map; - let mut by_ref_span = None; - for pat in pats { - pat_bindings(def_map, &**pat, |bm, _, span, _path| { - match bm { - hir::BindByRef(_) => { - by_ref_span = Some(span); - } - hir::BindByValue(_) => { - } - } - }) - } - - let check_move = |p: &Pat, sub: Option<&Pat>| { - // check legality of moving out of the enum - - // x @ Foo(..) is legal, but x @ Foo(y) isn't. - if sub.map_or(false, |p| pat_contains_bindings(&def_map.borrow(), &*p)) { - span_err!(cx.tcx.sess, p.span, E0007, "cannot bind by-move with sub-bindings"); - } else if has_guard { - span_err!(cx.tcx.sess, p.span, E0008, "cannot bind by-move into a pattern guard"); - } else if by_ref_span.is_some() { - let mut err = struct_span_err!(cx.tcx.sess, p.span, E0009, - "cannot bind by-move and by-ref in the same pattern"); - span_note!(&mut err, by_ref_span.unwrap(), "by-ref binding occurs here"); - err.emit(); - } - }; - - for pat in pats { - front_util::walk_pat(&**pat, |p| { - if pat_is_binding(&def_map.borrow(), &*p) { - match p.node { - hir::PatIdent(hir::BindByValue(_), _, ref sub) => { - let pat_ty = tcx.node_id_to_type(p.id); - //FIXME: (@jroesch) this code should be floated up as well - let infcx = infer::new_infer_ctxt(cx.tcx, - &cx.tcx.tables, - Some(cx.param_env.clone())); - if infcx.type_moves_by_default(pat_ty, pat.span) { - check_move(p, sub.as_ref().map(|p| &**p)); - } - } - hir::PatIdent(hir::BindByRef(_), _, _) => { - } - _ => { - cx.tcx.sess.span_bug( - p.span, - &format!("binding pattern {} is not an \ - identifier: {:?}", - p.id, - p.node)); - } - } - } - true - }); - } -} - -/// Ensures that a pattern guard doesn't borrow by mutable reference or -/// assign. -fn check_for_mutation_in_guard<'a, 'tcx>(cx: &'a MatchCheckCtxt<'a, 'tcx>, - guard: &hir::Expr) { - let mut checker = MutationChecker { - cx: cx, - }; - - let infcx = infer::new_infer_ctxt(cx.tcx, - &cx.tcx.tables, - Some(checker.cx.param_env.clone())); - - let mut visitor = ExprUseVisitor::new(&mut checker, &infcx); - visitor.walk_expr(guard); -} - -struct MutationChecker<'a, 'tcx: 'a> { - cx: &'a MatchCheckCtxt<'a, 'tcx>, -} - -impl<'a, 'tcx> Delegate<'tcx> for MutationChecker<'a, 'tcx> { - fn matched_pat(&mut self, _: &Pat, _: cmt, _: euv::MatchMode) {} - fn consume(&mut self, _: NodeId, _: Span, _: cmt, _: ConsumeMode) {} - fn consume_pat(&mut self, _: &Pat, _: cmt, _: ConsumeMode) {} - fn borrow(&mut self, - _: NodeId, - span: Span, - _: cmt, - _: Region, - kind: BorrowKind, - _: LoanCause) { - match kind { - MutBorrow => { - span_err!(self.cx.tcx.sess, span, E0301, - "cannot mutably borrow in a pattern guard") - } - ImmBorrow | UniqueImmBorrow => {} - } - } - fn decl_without_init(&mut self, _: NodeId, _: Span) {} - fn mutate(&mut self, _: NodeId, span: Span, _: cmt, mode: MutateMode) { - match mode { - MutateMode::JustWrite | MutateMode::WriteAndRead => { - span_err!(self.cx.tcx.sess, span, E0302, "cannot assign in a pattern guard") - } - MutateMode::Init => {} - } - } -} - -/// Forbids bindings in `@` patterns. This is necessary for memory safety, -/// because of the way rvalues are handled in the borrow check. (See issue -/// #14587.) -fn check_legality_of_bindings_in_at_patterns(cx: &MatchCheckCtxt, pat: &Pat) { - AtBindingPatternVisitor { cx: cx, bindings_allowed: true }.visit_pat(pat); -} - -struct AtBindingPatternVisitor<'a, 'b:'a, 'tcx:'b> { - cx: &'a MatchCheckCtxt<'b, 'tcx>, - bindings_allowed: bool -} - -impl<'a, 'b, 'tcx, 'v> Visitor<'v> for AtBindingPatternVisitor<'a, 'b, 'tcx> { - fn visit_pat(&mut self, pat: &Pat) { - if !self.bindings_allowed && pat_is_binding(&self.cx.tcx.def_map.borrow(), pat) { - span_err!(self.cx.tcx.sess, pat.span, E0303, - "pattern bindings are not allowed \ - after an `@`"); - } - - match pat.node { - hir::PatIdent(_, _, Some(_)) => { - let bindings_were_allowed = self.bindings_allowed; - self.bindings_allowed = false; - intravisit::walk_pat(self, pat); - self.bindings_allowed = bindings_were_allowed; - } - _ => intravisit::walk_pat(self, pat), - } - } -} diff --git a/src/librustc/middle/check_rvalues.rs b/src/librustc/middle/check_rvalues.rs deleted file mode 100644 index 5ead8fb95f8eb..0000000000000 --- a/src/librustc/middle/check_rvalues.rs +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// Checks that all rvalues in a crate have statically known size. check_crate -// is the public starting point. - -use dep_graph::DepNode; -use middle::expr_use_visitor as euv; -use middle::infer; -use middle::mem_categorization as mc; -use middle::ty::ParameterEnvironment; -use middle::ty; - -use rustc_front::hir; -use rustc_front::intravisit; -use syntax::ast; -use syntax::codemap::Span; - -pub fn check_crate(tcx: &ty::ctxt) { - let mut rvcx = RvalueContext { tcx: tcx }; - tcx.visit_all_items_in_krate(DepNode::RvalueCheck, &mut rvcx); -} - -struct RvalueContext<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, -} - -impl<'a, 'tcx, 'v> intravisit::Visitor<'v> for RvalueContext<'a, 'tcx> { - fn visit_fn(&mut self, - fk: intravisit::FnKind<'v>, - fd: &'v hir::FnDecl, - b: &'v hir::Block, - s: Span, - fn_id: ast::NodeId) { - { - // FIXME (@jroesch) change this to be an inference context - let param_env = ParameterEnvironment::for_item(self.tcx, fn_id); - let infcx = infer::new_infer_ctxt(self.tcx, - &self.tcx.tables, - Some(param_env.clone())); - let mut delegate = RvalueContextDelegate { tcx: self.tcx, param_env: ¶m_env }; - let mut euv = euv::ExprUseVisitor::new(&mut delegate, &infcx); - euv.walk_fn(fd, b); - } - intravisit::walk_fn(self, fk, fd, b, s) - } -} - -struct RvalueContextDelegate<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, - param_env: &'a ty::ParameterEnvironment<'a,'tcx>, -} - -impl<'a, 'tcx> euv::Delegate<'tcx> for RvalueContextDelegate<'a, 'tcx> { - fn consume(&mut self, - _: ast::NodeId, - span: Span, - cmt: mc::cmt<'tcx>, - _: euv::ConsumeMode) { - debug!("consume; cmt: {:?}; type: {:?}", *cmt, cmt.ty); - if !cmt.ty.is_sized(self.param_env, span) { - span_err!(self.tcx.sess, span, E0161, - "cannot move a value of type {0}: the size of {0} cannot be statically determined", - cmt.ty); - } - } - - fn matched_pat(&mut self, - _matched_pat: &hir::Pat, - _cmt: mc::cmt, - _mode: euv::MatchMode) {} - - fn consume_pat(&mut self, - _consume_pat: &hir::Pat, - _cmt: mc::cmt, - _mode: euv::ConsumeMode) { - } - - fn borrow(&mut self, - _borrow_id: ast::NodeId, - _borrow_span: Span, - _cmt: mc::cmt, - _loan_region: ty::Region, - _bk: ty::BorrowKind, - _loan_cause: euv::LoanCause) { - } - - fn decl_without_init(&mut self, - _id: ast::NodeId, - _span: Span) { - } - - fn mutate(&mut self, - _assignment_id: ast::NodeId, - _assignment_span: Span, - _assignee_cmt: mc::cmt, - _mode: euv::MutateMode) { - } -} diff --git a/src/librustc/middle/check_static_recursion.rs b/src/librustc/middle/check_static_recursion.rs deleted file mode 100644 index 0882f3f1137ec..0000000000000 --- a/src/librustc/middle/check_static_recursion.rs +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// This compiler pass detects constants that refer to themselves -// recursively. - -use front::map as ast_map; -use session::Session; -use middle::def::{DefStatic, DefConst, DefAssociatedConst, DefVariant, DefMap}; -use util::nodemap::NodeMap; - -use syntax::{ast}; -use syntax::codemap::Span; -use syntax::feature_gate::{GateIssue, emit_feature_err}; -use rustc_front::intravisit::{self, Visitor}; -use rustc_front::hir; - -use std::cell::RefCell; - -struct CheckCrateVisitor<'a, 'ast: 'a> { - sess: &'a Session, - def_map: &'a DefMap, - ast_map: &'a ast_map::Map<'ast>, - // `discriminant_map` is a cache that associates the `NodeId`s of local - // variant definitions with the discriminant expression that applies to - // each one. If the variant uses the default values (starting from `0`), - // then `None` is stored. - discriminant_map: RefCell>>, -} - -impl<'a, 'ast: 'a> Visitor<'ast> for CheckCrateVisitor<'a, 'ast> { - fn visit_item(&mut self, it: &'ast hir::Item) { - match it.node { - hir::ItemStatic(..) | - hir::ItemConst(..) => { - let mut recursion_visitor = - CheckItemRecursionVisitor::new(self, &it.span); - recursion_visitor.visit_item(it); - }, - hir::ItemEnum(ref enum_def, ref generics) => { - // We could process the whole enum, but handling the variants - // with discriminant expressions one by one gives more specific, - // less redundant output. - for variant in &enum_def.variants { - if let Some(_) = variant.node.disr_expr { - let mut recursion_visitor = - CheckItemRecursionVisitor::new(self, &variant.span); - recursion_visitor.populate_enum_discriminants(enum_def); - recursion_visitor.visit_variant(variant, generics, it.id); - } - } - } - _ => {} - } - intravisit::walk_item(self, it) - } - - fn visit_trait_item(&mut self, ti: &'ast hir::TraitItem) { - match ti.node { - hir::ConstTraitItem(_, ref default) => { - if let Some(_) = *default { - let mut recursion_visitor = - CheckItemRecursionVisitor::new(self, &ti.span); - recursion_visitor.visit_trait_item(ti); - } - } - _ => {} - } - intravisit::walk_trait_item(self, ti) - } - - fn visit_impl_item(&mut self, ii: &'ast hir::ImplItem) { - match ii.node { - hir::ImplItemKind::Const(..) => { - let mut recursion_visitor = - CheckItemRecursionVisitor::new(self, &ii.span); - recursion_visitor.visit_impl_item(ii); - } - _ => {} - } - intravisit::walk_impl_item(self, ii) - } -} - -pub fn check_crate<'ast>(sess: &Session, - krate: &'ast hir::Crate, - def_map: &DefMap, - ast_map: &ast_map::Map<'ast>) { - let mut visitor = CheckCrateVisitor { - sess: sess, - def_map: def_map, - ast_map: ast_map, - discriminant_map: RefCell::new(NodeMap()), - }; - sess.abort_if_new_errors(|| { - krate.visit_all_items(&mut visitor); - }); -} - -struct CheckItemRecursionVisitor<'a, 'ast: 'a> { - root_span: &'a Span, - sess: &'a Session, - ast_map: &'a ast_map::Map<'ast>, - def_map: &'a DefMap, - discriminant_map: &'a RefCell>>, - idstack: Vec, -} - -impl<'a, 'ast: 'a> CheckItemRecursionVisitor<'a, 'ast> { - fn new(v: &'a CheckCrateVisitor<'a, 'ast>, span: &'a Span) - -> CheckItemRecursionVisitor<'a, 'ast> { - CheckItemRecursionVisitor { - root_span: span, - sess: v.sess, - ast_map: v.ast_map, - def_map: v.def_map, - discriminant_map: &v.discriminant_map, - idstack: Vec::new(), - } - } - fn with_item_id_pushed(&mut self, id: ast::NodeId, f: F) - where F: Fn(&mut Self) { - if self.idstack.iter().any(|&x| x == id) { - let any_static = self.idstack.iter().any(|&x| { - if let ast_map::NodeItem(item) = self.ast_map.get(x) { - if let hir::ItemStatic(..) = item.node { - true - } else { - false - } - } else { - false - } - }); - if any_static { - if !self.sess.features.borrow().static_recursion { - emit_feature_err(&self.sess.parse_sess.span_diagnostic, - "static_recursion", - *self.root_span, GateIssue::Language, "recursive static"); - } - } else { - span_err!(self.sess, *self.root_span, E0265, "recursive constant"); - } - return; - } - self.idstack.push(id); - f(self); - self.idstack.pop(); - } - // If a variant has an expression specifying its discriminant, then it needs - // to be checked just like a static or constant. However, if there are more - // variants with no explicitly specified discriminant, those variants will - // increment the same expression to get their values. - // - // So for every variant, we need to track whether there is an expression - // somewhere in the enum definition that controls its discriminant. We do - // this by starting from the end and searching backward. - fn populate_enum_discriminants(&self, enum_definition: &'ast hir::EnumDef) { - // Get the map, and return if we already processed this enum or if it - // has no variants. - let mut discriminant_map = self.discriminant_map.borrow_mut(); - match enum_definition.variants.first() { - None => { return; } - Some(variant) if discriminant_map.contains_key(&variant.node.data.id()) => { - return; - } - _ => {} - } - - // Go through all the variants. - let mut variant_stack: Vec = Vec::new(); - for variant in enum_definition.variants.iter().rev() { - variant_stack.push(variant.node.data.id()); - // When we find an expression, every variant currently on the stack - // is affected by that expression. - if let Some(ref expr) = variant.node.disr_expr { - for id in &variant_stack { - discriminant_map.insert(*id, Some(expr)); - } - variant_stack.clear() - } - } - // If we are at the top, that always starts at 0, so any variant on the - // stack has a default value and does not need to be checked. - for id in &variant_stack { - discriminant_map.insert(*id, None); - } - } -} - -impl<'a, 'ast: 'a> Visitor<'ast> for CheckItemRecursionVisitor<'a, 'ast> { - fn visit_item(&mut self, it: &'ast hir::Item) { - self.with_item_id_pushed(it.id, |v| intravisit::walk_item(v, it)); - } - - fn visit_enum_def(&mut self, enum_definition: &'ast hir::EnumDef, - generics: &'ast hir::Generics, item_id: ast::NodeId, _: Span) { - self.populate_enum_discriminants(enum_definition); - intravisit::walk_enum_def(self, enum_definition, generics, item_id); - } - - fn visit_variant(&mut self, variant: &'ast hir::Variant, - _: &'ast hir::Generics, _: ast::NodeId) { - let variant_id = variant.node.data.id(); - let maybe_expr; - if let Some(get_expr) = self.discriminant_map.borrow().get(&variant_id) { - // This is necessary because we need to let the `discriminant_map` - // borrow fall out of scope, so that we can reborrow farther down. - maybe_expr = (*get_expr).clone(); - } else { - self.sess.span_bug(variant.span, - "`check_static_recursion` attempted to visit \ - variant with unknown discriminant") - } - // If `maybe_expr` is `None`, that's because no discriminant is - // specified that affects this variant. Thus, no risk of recursion. - if let Some(expr) = maybe_expr { - self.with_item_id_pushed(expr.id, |v| intravisit::walk_expr(v, expr)); - } - } - - fn visit_trait_item(&mut self, ti: &'ast hir::TraitItem) { - self.with_item_id_pushed(ti.id, |v| intravisit::walk_trait_item(v, ti)); - } - - fn visit_impl_item(&mut self, ii: &'ast hir::ImplItem) { - self.with_item_id_pushed(ii.id, |v| intravisit::walk_impl_item(v, ii)); - } - - fn visit_expr(&mut self, e: &'ast hir::Expr) { - match e.node { - hir::ExprPath(..) => { - match self.def_map.get(&e.id).map(|d| d.base_def) { - Some(DefStatic(def_id, _)) | - Some(DefAssociatedConst(def_id)) | - Some(DefConst(def_id)) => { - if let Some(node_id) = self.ast_map.as_local_node_id(def_id) { - match self.ast_map.get(node_id) { - ast_map::NodeItem(item) => - self.visit_item(item), - ast_map::NodeTraitItem(item) => - self.visit_trait_item(item), - ast_map::NodeImplItem(item) => - self.visit_impl_item(item), - ast_map::NodeForeignItem(_) => {}, - _ => { - self.sess.span_bug( - e.span, - &format!("expected item, found {}", - self.ast_map.node_to_string(node_id))); - } - } - } - } - // For variants, we only want to check expressions that - // affect the specific variant used, but we need to check - // the whole enum definition to see what expression that - // might be (if any). - Some(DefVariant(enum_id, variant_id, false)) => { - if let Some(enum_node_id) = self.ast_map.as_local_node_id(enum_id) { - if let hir::ItemEnum(ref enum_def, ref generics) = - self.ast_map.expect_item(enum_node_id).node - { - self.populate_enum_discriminants(enum_def); - let enum_id = self.ast_map.as_local_node_id(enum_id).unwrap(); - let variant_id = self.ast_map.as_local_node_id(variant_id).unwrap(); - let variant = self.ast_map.expect_variant(variant_id); - self.visit_variant(variant, generics, enum_id); - } else { - self.sess.span_bug(e.span, - "`check_static_recursion` found \ - non-enum in DefVariant"); - } - } - } - _ => () - } - }, - _ => () - } - intravisit::walk_expr(self, e); - } -} diff --git a/src/librustc/middle/const_eval.rs b/src/librustc/middle/const_eval.rs deleted file mode 100644 index eae2aa9cb7e73..0000000000000 --- a/src/librustc/middle/const_eval.rs +++ /dev/null @@ -1,1442 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//#![allow(non_camel_case_types)] - -use self::ConstVal::*; -use self::ErrKind::*; -use self::EvalHint::*; - -use front::map as ast_map; -use front::map::blocks::FnLikeNode; -use middle::cstore::{self, CrateStore, InlinedItem}; -use middle::{def, infer, subst, traits}; -use middle::subst::Subst; -use middle::def_id::DefId; -use middle::pat_util::def_to_path; -use middle::ty::{self, Ty}; -use middle::astconv_util::ast_ty_to_prim_ty; -use util::num::ToPrimitive; -use util::nodemap::NodeMap; - -use graphviz::IntoCow; -use syntax::{ast, abi}; -use rustc_front::hir::Expr; -use rustc_front::hir; -use rustc_front::intravisit::FnKind; -use syntax::codemap::Span; -use syntax::parse::token::InternedString; -use syntax::ptr::P; -use syntax::codemap; - -use std::borrow::Cow; -use std::cmp::Ordering; -use std::collections::hash_map::Entry::Vacant; -use std::hash; -use std::mem::transmute; -use std::{i8, i16, i32, i64, u8, u16, u32, u64}; -use std::rc::Rc; - -fn lookup_variant_by_id<'a>(tcx: &'a ty::ctxt, - enum_def: DefId, - variant_def: DefId) - -> Option<&'a Expr> { - fn variant_expr<'a>(variants: &'a [hir::Variant], id: ast::NodeId) - -> Option<&'a Expr> { - for variant in variants { - if variant.node.data.id() == id { - return variant.node.disr_expr.as_ref().map(|e| &**e); - } - } - None - } - - if let Some(enum_node_id) = tcx.map.as_local_node_id(enum_def) { - let variant_node_id = tcx.map.as_local_node_id(variant_def).unwrap(); - match tcx.map.find(enum_node_id) { - None => None, - Some(ast_map::NodeItem(it)) => match it.node { - hir::ItemEnum(hir::EnumDef { ref variants }, _) => { - variant_expr(variants, variant_node_id) - } - _ => None - }, - Some(_) => None - } - } else { - None - } -} - -/// * `def_id` is the id of the constant. -/// * `maybe_ref_id` is the id of the expr referencing the constant. -/// * `param_substs` is the monomorphization substitution for the expression. -/// -/// `maybe_ref_id` and `param_substs` are optional and are used for -/// finding substitutions in associated constants. This generally -/// happens in late/trans const evaluation. -pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: &'a ty::ctxt<'tcx>, - def_id: DefId, - maybe_ref_id: Option, - param_substs: Option<&'tcx subst::Substs<'tcx>>) - -> Option<&'tcx Expr> { - if let Some(node_id) = tcx.map.as_local_node_id(def_id) { - match tcx.map.find(node_id) { - None => None, - Some(ast_map::NodeItem(it)) => match it.node { - hir::ItemConst(_, ref const_expr) => { - Some(&*const_expr) - } - _ => None - }, - Some(ast_map::NodeTraitItem(ti)) => match ti.node { - hir::ConstTraitItem(_, _) => { - match maybe_ref_id { - // If we have a trait item, and we know the expression - // that's the source of the obligation to resolve it, - // `resolve_trait_associated_const` will select an impl - // or the default. - Some(ref_id) => { - let trait_id = tcx.trait_of_item(def_id) - .unwrap(); - let mut substs = tcx.node_id_item_substs(ref_id) - .substs; - if let Some(param_substs) = param_substs { - substs = substs.subst(tcx, param_substs); - } - resolve_trait_associated_const(tcx, ti, trait_id, - substs) - } - // Technically, without knowing anything about the - // expression that generates the obligation, we could - // still return the default if there is one. However, - // it's safer to return `None` than to return some value - // that may differ from what you would get from - // correctly selecting an impl. - None => None - } - } - _ => None - }, - Some(ast_map::NodeImplItem(ii)) => match ii.node { - hir::ImplItemKind::Const(_, ref expr) => { - Some(&*expr) - } - _ => None - }, - Some(_) => None - } - } else { - match tcx.extern_const_statics.borrow().get(&def_id) { - Some(&ast::DUMMY_NODE_ID) => return None, - Some(&expr_id) => { - return Some(tcx.map.expect_expr(expr_id)); - } - None => {} - } - let mut used_ref_id = false; - let expr_id = match tcx.sess.cstore.maybe_get_item_ast(tcx, def_id) { - cstore::FoundAst::Found(&InlinedItem::Item(ref item)) => match item.node { - hir::ItemConst(_, ref const_expr) => Some(const_expr.id), - _ => None - }, - cstore::FoundAst::Found(&InlinedItem::TraitItem(trait_id, ref ti)) => match ti.node { - hir::ConstTraitItem(_, _) => { - used_ref_id = true; - match maybe_ref_id { - // As mentioned in the comments above for in-crate - // constants, we only try to find the expression for - // a trait-associated const if the caller gives us - // the expression that refers to it. - Some(ref_id) => { - let mut substs = tcx.node_id_item_substs(ref_id) - .substs; - if let Some(param_substs) = param_substs { - substs = substs.subst(tcx, param_substs); - } - resolve_trait_associated_const(tcx, ti, trait_id, - substs).map(|e| e.id) - } - None => None - } - } - _ => None - }, - cstore::FoundAst::Found(&InlinedItem::ImplItem(_, ref ii)) => match ii.node { - hir::ImplItemKind::Const(_, ref expr) => Some(expr.id), - _ => None - }, - _ => None - }; - // If we used the reference expression, particularly to choose an impl - // of a trait-associated const, don't cache that, because the next - // lookup with the same def_id may yield a different result. - if !used_ref_id { - tcx.extern_const_statics - .borrow_mut().insert(def_id, - expr_id.unwrap_or(ast::DUMMY_NODE_ID)); - } - expr_id.map(|id| tcx.map.expect_expr(id)) - } -} - -fn inline_const_fn_from_external_crate(tcx: &ty::ctxt, def_id: DefId) - -> Option { - match tcx.extern_const_fns.borrow().get(&def_id) { - Some(&ast::DUMMY_NODE_ID) => return None, - Some(&fn_id) => return Some(fn_id), - None => {} - } - - if !tcx.sess.cstore.is_const_fn(def_id) { - tcx.extern_const_fns.borrow_mut().insert(def_id, ast::DUMMY_NODE_ID); - return None; - } - - let fn_id = match tcx.sess.cstore.maybe_get_item_ast(tcx, def_id) { - cstore::FoundAst::Found(&InlinedItem::Item(ref item)) => Some(item.id), - cstore::FoundAst::Found(&InlinedItem::ImplItem(_, ref item)) => Some(item.id), - _ => None - }; - tcx.extern_const_fns.borrow_mut().insert(def_id, - fn_id.unwrap_or(ast::DUMMY_NODE_ID)); - fn_id -} - -pub fn lookup_const_fn_by_id<'tcx>(tcx: &ty::ctxt<'tcx>, def_id: DefId) - -> Option> -{ - let fn_id = if let Some(node_id) = tcx.map.as_local_node_id(def_id) { - node_id - } else { - if let Some(fn_id) = inline_const_fn_from_external_crate(tcx, def_id) { - fn_id - } else { - return None; - } - }; - - let fn_like = match FnLikeNode::from_node(tcx.map.get(fn_id)) { - Some(fn_like) => fn_like, - None => return None - }; - - match fn_like.kind() { - FnKind::ItemFn(_, _, _, hir::Constness::Const, _, _) => { - Some(fn_like) - } - FnKind::Method(_, m, _) => { - if m.constness == hir::Constness::Const { - Some(fn_like) - } else { - None - } - } - _ => None - } -} - -#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] -pub enum ConstVal { - Float(f64), - Int(i64), - Uint(u64), - Str(InternedString), - ByteStr(Rc>), - Bool(bool), - Struct(ast::NodeId), - Tuple(ast::NodeId), - Function(DefId), - Array(ast::NodeId, u64), - Repeat(ast::NodeId, u64), -} - -impl hash::Hash for ConstVal { - fn hash(&self, state: &mut H) { - match *self { - Float(a) => unsafe { transmute::<_,u64>(a) }.hash(state), - Int(a) => a.hash(state), - Uint(a) => a.hash(state), - Str(ref a) => a.hash(state), - ByteStr(ref a) => a.hash(state), - Bool(a) => a.hash(state), - Struct(a) => a.hash(state), - Tuple(a) => a.hash(state), - Function(a) => a.hash(state), - Array(a, n) => { a.hash(state); n.hash(state) }, - Repeat(a, n) => { a.hash(state); n.hash(state) }, - } - } -} - -/// Note that equality for `ConstVal` means that the it is the same -/// constant, not that the rust values are equal. In particular, `NaN -/// == NaN` (at least if it's the same NaN; distinct encodings for NaN -/// are considering unequal). -impl PartialEq for ConstVal { - fn eq(&self, other: &ConstVal) -> bool { - match (self, other) { - (&Float(a), &Float(b)) => unsafe{transmute::<_,u64>(a) == transmute::<_,u64>(b)}, - (&Int(a), &Int(b)) => a == b, - (&Uint(a), &Uint(b)) => a == b, - (&Str(ref a), &Str(ref b)) => a == b, - (&ByteStr(ref a), &ByteStr(ref b)) => a == b, - (&Bool(a), &Bool(b)) => a == b, - (&Struct(a), &Struct(b)) => a == b, - (&Tuple(a), &Tuple(b)) => a == b, - (&Function(a), &Function(b)) => a == b, - (&Array(a, an), &Array(b, bn)) => (a == b) && (an == bn), - (&Repeat(a, an), &Repeat(b, bn)) => (a == b) && (an == bn), - _ => false, - } - } -} - -impl Eq for ConstVal { } - -impl ConstVal { - pub fn description(&self) -> &'static str { - match *self { - Float(_) => "float", - Int(i) if i < 0 => "negative integer", - Int(_) => "positive integer", - Uint(_) => "unsigned integer", - Str(_) => "string literal", - ByteStr(_) => "byte string literal", - Bool(_) => "boolean", - Struct(_) => "struct", - Tuple(_) => "tuple", - Function(_) => "function definition", - Array(..) => "array", - Repeat(..) => "repeat", - } - } -} - -pub fn const_expr_to_pat(tcx: &ty::ctxt, expr: &Expr, span: Span) -> P { - let pat = match expr.node { - hir::ExprTup(ref exprs) => - hir::PatTup(exprs.iter().map(|expr| const_expr_to_pat(tcx, &**expr, span)).collect()), - - hir::ExprCall(ref callee, ref args) => { - let def = *tcx.def_map.borrow().get(&callee.id).unwrap(); - if let Vacant(entry) = tcx.def_map.borrow_mut().entry(expr.id) { - entry.insert(def); - } - let path = match def.full_def() { - def::DefStruct(def_id) => def_to_path(tcx, def_id), - def::DefVariant(_, variant_did, _) => def_to_path(tcx, variant_did), - def::DefFn(..) => return P(hir::Pat { - id: expr.id, - node: hir::PatLit(P(expr.clone())), - span: span, - }), - _ => unreachable!() - }; - let pats = args.iter().map(|expr| const_expr_to_pat(tcx, &**expr, span)).collect(); - hir::PatEnum(path, Some(pats)) - } - - hir::ExprStruct(ref path, ref fields, None) => { - let field_pats = fields.iter().map(|field| codemap::Spanned { - span: codemap::DUMMY_SP, - node: hir::FieldPat { - name: field.name.node, - pat: const_expr_to_pat(tcx, &*field.expr, span), - is_shorthand: false, - }, - }).collect(); - hir::PatStruct(path.clone(), field_pats, false) - } - - hir::ExprVec(ref exprs) => { - let pats = exprs.iter().map(|expr| const_expr_to_pat(tcx, &**expr, span)).collect(); - hir::PatVec(pats, None, hir::HirVec::new()) - } - - hir::ExprPath(_, ref path) => { - let opt_def = tcx.def_map.borrow().get(&expr.id).map(|d| d.full_def()); - match opt_def { - Some(def::DefStruct(..)) => - hir::PatStruct(path.clone(), hir::HirVec::new(), false), - Some(def::DefVariant(..)) => - hir::PatEnum(path.clone(), None), - Some(def::DefConst(def_id)) | - Some(def::DefAssociatedConst(def_id)) => { - let expr = lookup_const_by_id(tcx, def_id, Some(expr.id), None).unwrap(); - return const_expr_to_pat(tcx, expr, span); - }, - _ => unreachable!(), - } - } - - _ => hir::PatLit(P(expr.clone())) - }; - P(hir::Pat { id: expr.id, node: pat, span: span }) -} - -pub fn eval_const_expr(tcx: &ty::ctxt, e: &Expr) -> ConstVal { - match eval_const_expr_partial(tcx, e, ExprTypeChecked, None) { - Ok(r) => r, - Err(s) => tcx.sess.span_fatal(s.span, &s.description()) - } -} - -pub type FnArgMap<'a> = Option<&'a NodeMap>; - -#[derive(Clone)] -pub struct ConstEvalErr { - pub span: Span, - pub kind: ErrKind, -} - -#[derive(Clone)] -pub enum ErrKind { - CannotCast, - CannotCastTo(&'static str), - InvalidOpForInts(hir::BinOp_), - InvalidOpForUInts(hir::BinOp_), - InvalidOpForBools(hir::BinOp_), - InvalidOpForFloats(hir::BinOp_), - InvalidOpForIntUint(hir::BinOp_), - InvalidOpForUintInt(hir::BinOp_), - NegateOn(ConstVal), - NotOn(ConstVal), - CallOn(ConstVal), - - NegateWithOverflow(i64), - AddiWithOverflow(i64, i64), - SubiWithOverflow(i64, i64), - MuliWithOverflow(i64, i64), - AdduWithOverflow(u64, u64), - SubuWithOverflow(u64, u64), - MuluWithOverflow(u64, u64), - DivideByZero, - DivideWithOverflow, - ModuloByZero, - ModuloWithOverflow, - ShiftLeftWithOverflow, - ShiftRightWithOverflow, - MissingStructField, - NonConstPath, - UnimplementedConstVal(&'static str), - UnresolvedPath, - ExpectedConstTuple, - ExpectedConstStruct, - TupleIndexOutOfBounds, - IndexedNonVec, - IndexNegative, - IndexNotInt, - IndexOutOfBounds, - RepeatCountNotNatural, - RepeatCountNotInt, - - MiscBinaryOp, - MiscCatchAll, - - IndexOpFeatureGated, -} - -impl ConstEvalErr { - pub fn description(&self) -> Cow { - use self::ErrKind::*; - - match self.kind { - CannotCast => "can't cast this type".into_cow(), - CannotCastTo(s) => format!("can't cast this type to {}", s).into_cow(), - InvalidOpForInts(_) => "can't do this op on signed integrals".into_cow(), - InvalidOpForUInts(_) => "can't do this op on unsigned integrals".into_cow(), - InvalidOpForBools(_) => "can't do this op on bools".into_cow(), - InvalidOpForFloats(_) => "can't do this op on floats".into_cow(), - InvalidOpForIntUint(..) => "can't do this op on an isize and usize".into_cow(), - InvalidOpForUintInt(..) => "can't do this op on a usize and isize".into_cow(), - NegateOn(ref const_val) => format!("negate on {}", const_val.description()).into_cow(), - NotOn(ref const_val) => format!("not on {}", const_val.description()).into_cow(), - CallOn(ref const_val) => format!("call on {}", const_val.description()).into_cow(), - - NegateWithOverflow(..) => "attempted to negate with overflow".into_cow(), - AddiWithOverflow(..) => "attempted to add with overflow".into_cow(), - SubiWithOverflow(..) => "attempted to sub with overflow".into_cow(), - MuliWithOverflow(..) => "attempted to mul with overflow".into_cow(), - AdduWithOverflow(..) => "attempted to add with overflow".into_cow(), - SubuWithOverflow(..) => "attempted to sub with overflow".into_cow(), - MuluWithOverflow(..) => "attempted to mul with overflow".into_cow(), - DivideByZero => "attempted to divide by zero".into_cow(), - DivideWithOverflow => "attempted to divide with overflow".into_cow(), - ModuloByZero => "attempted remainder with a divisor of zero".into_cow(), - ModuloWithOverflow => "attempted remainder with overflow".into_cow(), - ShiftLeftWithOverflow => "attempted left shift with overflow".into_cow(), - ShiftRightWithOverflow => "attempted right shift with overflow".into_cow(), - MissingStructField => "nonexistent struct field".into_cow(), - NonConstPath => "non-constant path in constant expression".into_cow(), - UnimplementedConstVal(what) => - format!("unimplemented constant expression: {}", what).into_cow(), - UnresolvedPath => "unresolved path in constant expression".into_cow(), - ExpectedConstTuple => "expected constant tuple".into_cow(), - ExpectedConstStruct => "expected constant struct".into_cow(), - TupleIndexOutOfBounds => "tuple index out of bounds".into_cow(), - IndexedNonVec => "indexing is only supported for arrays".into_cow(), - IndexNegative => "indices must be non-negative integers".into_cow(), - IndexNotInt => "indices must be integers".into_cow(), - IndexOutOfBounds => "array index out of bounds".into_cow(), - RepeatCountNotNatural => "repeat count must be a natural number".into_cow(), - RepeatCountNotInt => "repeat count must be integers".into_cow(), - - MiscBinaryOp => "bad operands for binary".into_cow(), - MiscCatchAll => "unsupported constant expr".into_cow(), - IndexOpFeatureGated => "the index operation on const values is unstable".into_cow(), - } - } -} - -pub type EvalResult = Result; -pub type CastResult = Result; - -// FIXME: Long-term, this enum should go away: trying to evaluate -// an expression which hasn't been type-checked is a recipe for -// disaster. That said, it's not clear how to fix ast_ty_to_ty -// to avoid the ordering issue. - -/// Hint to determine how to evaluate constant expressions which -/// might not be type-checked. -#[derive(Copy, Clone, Debug)] -pub enum EvalHint<'tcx> { - /// We have a type-checked expression. - ExprTypeChecked, - /// We have an expression which hasn't been type-checked, but we have - /// an idea of what the type will be because of the context. For example, - /// the length of an array is always `usize`. (This is referred to as - /// a hint because it isn't guaranteed to be consistent with what - /// type-checking would compute.) - UncheckedExprHint(Ty<'tcx>), - /// We have an expression which has not yet been type-checked, and - /// and we have no clue what the type will be. - UncheckedExprNoHint, -} - -impl<'tcx> EvalHint<'tcx> { - fn erase_hint(&self) -> EvalHint<'tcx> { - match *self { - ExprTypeChecked => ExprTypeChecked, - UncheckedExprHint(_) | UncheckedExprNoHint => UncheckedExprNoHint, - } - } - fn checked_or(&self, ty: Ty<'tcx>) -> EvalHint<'tcx> { - match *self { - ExprTypeChecked => ExprTypeChecked, - _ => UncheckedExprHint(ty), - } - } -} - -#[derive(Copy, Clone, PartialEq, Debug)] -pub enum IntTy { I8, I16, I32, I64 } -#[derive(Copy, Clone, PartialEq, Debug)] -pub enum UintTy { U8, U16, U32, U64 } - -impl IntTy { - pub fn from(tcx: &ty::ctxt, t: ast::IntTy) -> IntTy { - let t = if let ast::TyIs = t { - tcx.sess.target.int_type - } else { - t - }; - match t { - ast::TyIs => unreachable!(), - ast::TyI8 => IntTy::I8, - ast::TyI16 => IntTy::I16, - ast::TyI32 => IntTy::I32, - ast::TyI64 => IntTy::I64, - } - } -} - -impl UintTy { - pub fn from(tcx: &ty::ctxt, t: ast::UintTy) -> UintTy { - let t = if let ast::TyUs = t { - tcx.sess.target.uint_type - } else { - t - }; - match t { - ast::TyUs => unreachable!(), - ast::TyU8 => UintTy::U8, - ast::TyU16 => UintTy::U16, - ast::TyU32 => UintTy::U32, - ast::TyU64 => UintTy::U64, - } - } -} - -macro_rules! signal { - ($e:expr, $exn:expr) => { - return Err(ConstEvalErr { span: $e.span, kind: $exn }) - } -} - -// The const_{int,uint}_checked_{neg,add,sub,mul,div,shl,shr} family -// of functions catch and signal overflow errors during constant -// evaluation. -// -// They all take the operator's arguments (`a` and `b` if binary), the -// overall expression (`e`) and, if available, whole expression's -// concrete type (`opt_ety`). -// -// If the whole expression's concrete type is None, then this is a -// constant evaluation happening before type check (e.g. in the check -// to confirm that a pattern range's left-side is not greater than its -// right-side). We do not do arithmetic modulo the type's bitwidth in -// such a case; we just do 64-bit arithmetic and assume that later -// passes will do it again with the type information, and thus do the -// overflow checks then. - -pub fn const_int_checked_neg<'a>( - a: i64, e: &'a Expr, opt_ety: Option) -> EvalResult { - - let (min,max) = match opt_ety { - // (-i8::MIN is itself not an i8, etc, but this is an easy way - // to allow literals to pass the check. Of course that does - // not work for i64::MIN.) - Some(IntTy::I8) => (-(i8::MAX as i64), -(i8::MIN as i64)), - Some(IntTy::I16) => (-(i16::MAX as i64), -(i16::MIN as i64)), - Some(IntTy::I32) => (-(i32::MAX as i64), -(i32::MIN as i64)), - None | Some(IntTy::I64) => (-i64::MAX, -(i64::MIN+1)), - }; - - let oflo = a < min || a > max; - if oflo { - signal!(e, NegateWithOverflow(a)); - } else { - Ok(Int(-a)) - } -} - -pub fn const_uint_checked_neg<'a>( - a: u64, _e: &'a Expr, _opt_ety: Option) -> EvalResult { - // This always succeeds, and by definition, returns `(!a)+1`. - Ok(Uint((!a).wrapping_add(1))) -} - -fn const_uint_not(a: u64, opt_ety: Option) -> ConstVal { - let mask = match opt_ety { - Some(UintTy::U8) => u8::MAX as u64, - Some(UintTy::U16) => u16::MAX as u64, - Some(UintTy::U32) => u32::MAX as u64, - None | Some(UintTy::U64) => u64::MAX, - }; - Uint(!a & mask) -} - -macro_rules! overflow_checking_body { - ($a:ident, $b:ident, $ety:ident, $overflowing_op:ident, - lhs: $to_8_lhs:ident $to_16_lhs:ident $to_32_lhs:ident, - rhs: $to_8_rhs:ident $to_16_rhs:ident $to_32_rhs:ident $to_64_rhs:ident, - $EnumTy:ident $T8: ident $T16: ident $T32: ident $T64: ident, - $result_type: ident) => { { - let (a,b,opt_ety) = ($a,$b,$ety); - match opt_ety { - Some($EnumTy::$T8) => match (a.$to_8_lhs(), b.$to_8_rhs()) { - (Some(a), Some(b)) => { - let (a, oflo) = a.$overflowing_op(b); - (a as $result_type, oflo) - } - (None, _) | (_, None) => (0, true) - }, - Some($EnumTy::$T16) => match (a.$to_16_lhs(), b.$to_16_rhs()) { - (Some(a), Some(b)) => { - let (a, oflo) = a.$overflowing_op(b); - (a as $result_type, oflo) - } - (None, _) | (_, None) => (0, true) - }, - Some($EnumTy::$T32) => match (a.$to_32_lhs(), b.$to_32_rhs()) { - (Some(a), Some(b)) => { - let (a, oflo) = a.$overflowing_op(b); - (a as $result_type, oflo) - } - (None, _) | (_, None) => (0, true) - }, - None | Some($EnumTy::$T64) => match b.$to_64_rhs() { - Some(b) => a.$overflowing_op(b), - None => (0, true), - } - } - } } -} - -macro_rules! int_arith_body { - ($a:ident, $b:ident, $ety:ident, $overflowing_op:ident) => { - overflow_checking_body!( - $a, $b, $ety, $overflowing_op, - lhs: to_i8 to_i16 to_i32, - rhs: to_i8 to_i16 to_i32 to_i64, IntTy I8 I16 I32 I64, i64) - } -} - -macro_rules! uint_arith_body { - ($a:ident, $b:ident, $ety:ident, $overflowing_op:ident) => { - overflow_checking_body!( - $a, $b, $ety, $overflowing_op, - lhs: to_u8 to_u16 to_u32, - rhs: to_u8 to_u16 to_u32 to_u64, UintTy U8 U16 U32 U64, u64) - } -} - -macro_rules! int_shift_body { - ($a:ident, $b:ident, $ety:ident, $overflowing_op:ident) => { - overflow_checking_body!( - $a, $b, $ety, $overflowing_op, - lhs: to_i8 to_i16 to_i32, - rhs: to_u32 to_u32 to_u32 to_u32, IntTy I8 I16 I32 I64, i64) - } -} - -macro_rules! uint_shift_body { - ($a:ident, $b:ident, $ety:ident, $overflowing_op:ident) => { - overflow_checking_body!( - $a, $b, $ety, $overflowing_op, - lhs: to_u8 to_u16 to_u32, - rhs: to_u32 to_u32 to_u32 to_u32, UintTy U8 U16 U32 U64, u64) - } -} - -macro_rules! pub_fn_checked_op { - {$fn_name:ident ($a:ident : $a_ty:ty, $b:ident : $b_ty:ty,.. $WhichTy:ident) { - $ret_oflo_body:ident $overflowing_op:ident - $const_ty:ident $signal_exn:expr - }} => { - pub fn $fn_name<'a>($a: $a_ty, - $b: $b_ty, - e: &'a Expr, - opt_ety: Option<$WhichTy>) -> EvalResult { - let (ret, oflo) = $ret_oflo_body!($a, $b, opt_ety, $overflowing_op); - if !oflo { Ok($const_ty(ret)) } else { signal!(e, $signal_exn) } - } - } -} - -pub_fn_checked_op!{ const_int_checked_add(a: i64, b: i64,.. IntTy) { - int_arith_body overflowing_add Int AddiWithOverflow(a, b) -}} - -pub_fn_checked_op!{ const_int_checked_sub(a: i64, b: i64,.. IntTy) { - int_arith_body overflowing_sub Int SubiWithOverflow(a, b) -}} - -pub_fn_checked_op!{ const_int_checked_mul(a: i64, b: i64,.. IntTy) { - int_arith_body overflowing_mul Int MuliWithOverflow(a, b) -}} - -pub fn const_int_checked_div<'a>( - a: i64, b: i64, e: &'a Expr, opt_ety: Option) -> EvalResult { - if b == 0 { signal!(e, DivideByZero); } - let (ret, oflo) = int_arith_body!(a, b, opt_ety, overflowing_div); - if !oflo { Ok(Int(ret)) } else { signal!(e, DivideWithOverflow) } -} - -pub fn const_int_checked_rem<'a>( - a: i64, b: i64, e: &'a Expr, opt_ety: Option) -> EvalResult { - if b == 0 { signal!(e, ModuloByZero); } - let (ret, oflo) = int_arith_body!(a, b, opt_ety, overflowing_rem); - if !oflo { Ok(Int(ret)) } else { signal!(e, ModuloWithOverflow) } -} - -pub_fn_checked_op!{ const_int_checked_shl(a: i64, b: i64,.. IntTy) { - int_shift_body overflowing_shl Int ShiftLeftWithOverflow -}} - -pub_fn_checked_op!{ const_int_checked_shl_via_uint(a: i64, b: u64,.. IntTy) { - int_shift_body overflowing_shl Int ShiftLeftWithOverflow -}} - -pub_fn_checked_op!{ const_int_checked_shr(a: i64, b: i64,.. IntTy) { - int_shift_body overflowing_shr Int ShiftRightWithOverflow -}} - -pub_fn_checked_op!{ const_int_checked_shr_via_uint(a: i64, b: u64,.. IntTy) { - int_shift_body overflowing_shr Int ShiftRightWithOverflow -}} - -pub_fn_checked_op!{ const_uint_checked_add(a: u64, b: u64,.. UintTy) { - uint_arith_body overflowing_add Uint AdduWithOverflow(a, b) -}} - -pub_fn_checked_op!{ const_uint_checked_sub(a: u64, b: u64,.. UintTy) { - uint_arith_body overflowing_sub Uint SubuWithOverflow(a, b) -}} - -pub_fn_checked_op!{ const_uint_checked_mul(a: u64, b: u64,.. UintTy) { - uint_arith_body overflowing_mul Uint MuluWithOverflow(a, b) -}} - -pub fn const_uint_checked_div<'a>( - a: u64, b: u64, e: &'a Expr, opt_ety: Option) -> EvalResult { - if b == 0 { signal!(e, DivideByZero); } - let (ret, oflo) = uint_arith_body!(a, b, opt_ety, overflowing_div); - if !oflo { Ok(Uint(ret)) } else { signal!(e, DivideWithOverflow) } -} - -pub fn const_uint_checked_rem<'a>( - a: u64, b: u64, e: &'a Expr, opt_ety: Option) -> EvalResult { - if b == 0 { signal!(e, ModuloByZero); } - let (ret, oflo) = uint_arith_body!(a, b, opt_ety, overflowing_rem); - if !oflo { Ok(Uint(ret)) } else { signal!(e, ModuloWithOverflow) } -} - -pub_fn_checked_op!{ const_uint_checked_shl(a: u64, b: u64,.. UintTy) { - uint_shift_body overflowing_shl Uint ShiftLeftWithOverflow -}} - -pub_fn_checked_op!{ const_uint_checked_shl_via_int(a: u64, b: i64,.. UintTy) { - uint_shift_body overflowing_shl Uint ShiftLeftWithOverflow -}} - -pub_fn_checked_op!{ const_uint_checked_shr(a: u64, b: u64,.. UintTy) { - uint_shift_body overflowing_shr Uint ShiftRightWithOverflow -}} - -pub_fn_checked_op!{ const_uint_checked_shr_via_int(a: u64, b: i64,.. UintTy) { - uint_shift_body overflowing_shr Uint ShiftRightWithOverflow -}} - -/// Evaluate a constant expression in a context where the expression isn't -/// guaranteed to be evaluatable. `ty_hint` is usually ExprTypeChecked, -/// but a few places need to evaluate constants during type-checking, like -/// computing the length of an array. (See also the FIXME above EvalHint.) -pub fn eval_const_expr_partial<'tcx>(tcx: &ty::ctxt<'tcx>, - e: &Expr, - ty_hint: EvalHint<'tcx>, - fn_args: FnArgMap) -> EvalResult { - // Try to compute the type of the expression based on the EvalHint. - // (See also the definition of EvalHint, and the FIXME above EvalHint.) - let ety = match ty_hint { - ExprTypeChecked => { - // After type-checking, expr_ty is guaranteed to succeed. - Some(tcx.expr_ty(e)) - } - UncheckedExprHint(ty) => { - // Use the type hint; it's not guaranteed to be right, but it's - // usually good enough. - Some(ty) - } - UncheckedExprNoHint => { - // This expression might not be type-checked, and we have no hint. - // Try to query the context for a type anyway; we might get lucky - // (for example, if the expression was imported from another crate). - tcx.expr_ty_opt(e) - } - }; - - // If type of expression itself is int or uint, normalize in these - // bindings so that isize/usize is mapped to a type with an - // inherently known bitwidth. - let expr_int_type = ety.and_then(|ty| { - if let ty::TyInt(t) = ty.sty { - Some(IntTy::from(tcx, t)) } else { None } - }); - let expr_uint_type = ety.and_then(|ty| { - if let ty::TyUint(t) = ty.sty { - Some(UintTy::from(tcx, t)) } else { None } - }); - - let result = match e.node { - hir::ExprUnary(hir::UnNeg, ref inner) => { - match try!(eval_const_expr_partial(tcx, &**inner, ty_hint, fn_args)) { - Float(f) => Float(-f), - Int(n) => try!(const_int_checked_neg(n, e, expr_int_type)), - Uint(i) => { - try!(const_uint_checked_neg(i, e, expr_uint_type)) - } - const_val => signal!(e, NegateOn(const_val)), - } - } - hir::ExprUnary(hir::UnNot, ref inner) => { - match try!(eval_const_expr_partial(tcx, &**inner, ty_hint, fn_args)) { - Int(i) => Int(!i), - Uint(i) => const_uint_not(i, expr_uint_type), - Bool(b) => Bool(!b), - const_val => signal!(e, NotOn(const_val)), - } - } - hir::ExprBinary(op, ref a, ref b) => { - let b_ty = match op.node { - hir::BiShl | hir::BiShr => ty_hint.checked_or(tcx.types.usize), - _ => ty_hint - }; - match (try!(eval_const_expr_partial(tcx, &**a, ty_hint, fn_args)), - try!(eval_const_expr_partial(tcx, &**b, b_ty, fn_args))) { - (Float(a), Float(b)) => { - match op.node { - hir::BiAdd => Float(a + b), - hir::BiSub => Float(a - b), - hir::BiMul => Float(a * b), - hir::BiDiv => Float(a / b), - hir::BiRem => Float(a % b), - hir::BiEq => Bool(a == b), - hir::BiLt => Bool(a < b), - hir::BiLe => Bool(a <= b), - hir::BiNe => Bool(a != b), - hir::BiGe => Bool(a >= b), - hir::BiGt => Bool(a > b), - _ => signal!(e, InvalidOpForFloats(op.node)), - } - } - (Int(a), Int(b)) => { - match op.node { - hir::BiAdd => try!(const_int_checked_add(a,b,e,expr_int_type)), - hir::BiSub => try!(const_int_checked_sub(a,b,e,expr_int_type)), - hir::BiMul => try!(const_int_checked_mul(a,b,e,expr_int_type)), - hir::BiDiv => try!(const_int_checked_div(a,b,e,expr_int_type)), - hir::BiRem => try!(const_int_checked_rem(a,b,e,expr_int_type)), - hir::BiBitAnd => Int(a & b), - hir::BiBitOr => Int(a | b), - hir::BiBitXor => Int(a ^ b), - hir::BiShl => try!(const_int_checked_shl(a,b,e,expr_int_type)), - hir::BiShr => try!(const_int_checked_shr(a,b,e,expr_int_type)), - hir::BiEq => Bool(a == b), - hir::BiLt => Bool(a < b), - hir::BiLe => Bool(a <= b), - hir::BiNe => Bool(a != b), - hir::BiGe => Bool(a >= b), - hir::BiGt => Bool(a > b), - _ => signal!(e, InvalidOpForInts(op.node)), - } - } - (Uint(a), Uint(b)) => { - match op.node { - hir::BiAdd => try!(const_uint_checked_add(a,b,e,expr_uint_type)), - hir::BiSub => try!(const_uint_checked_sub(a,b,e,expr_uint_type)), - hir::BiMul => try!(const_uint_checked_mul(a,b,e,expr_uint_type)), - hir::BiDiv => try!(const_uint_checked_div(a,b,e,expr_uint_type)), - hir::BiRem => try!(const_uint_checked_rem(a,b,e,expr_uint_type)), - hir::BiBitAnd => Uint(a & b), - hir::BiBitOr => Uint(a | b), - hir::BiBitXor => Uint(a ^ b), - hir::BiShl => try!(const_uint_checked_shl(a,b,e,expr_uint_type)), - hir::BiShr => try!(const_uint_checked_shr(a,b,e,expr_uint_type)), - hir::BiEq => Bool(a == b), - hir::BiLt => Bool(a < b), - hir::BiLe => Bool(a <= b), - hir::BiNe => Bool(a != b), - hir::BiGe => Bool(a >= b), - hir::BiGt => Bool(a > b), - _ => signal!(e, InvalidOpForUInts(op.node)), - } - } - // shifts can have any integral type as their rhs - (Int(a), Uint(b)) => { - match op.node { - hir::BiShl => try!(const_int_checked_shl_via_uint(a,b,e,expr_int_type)), - hir::BiShr => try!(const_int_checked_shr_via_uint(a,b,e,expr_int_type)), - _ => signal!(e, InvalidOpForIntUint(op.node)), - } - } - (Uint(a), Int(b)) => { - match op.node { - hir::BiShl => try!(const_uint_checked_shl_via_int(a,b,e,expr_uint_type)), - hir::BiShr => try!(const_uint_checked_shr_via_int(a,b,e,expr_uint_type)), - _ => signal!(e, InvalidOpForUintInt(op.node)), - } - } - (Bool(a), Bool(b)) => { - Bool(match op.node { - hir::BiAnd => a && b, - hir::BiOr => a || b, - hir::BiBitXor => a ^ b, - hir::BiBitAnd => a & b, - hir::BiBitOr => a | b, - hir::BiEq => a == b, - hir::BiNe => a != b, - _ => signal!(e, InvalidOpForBools(op.node)), - }) - } - - _ => signal!(e, MiscBinaryOp), - } - } - hir::ExprCast(ref base, ref target_ty) => { - let ety = ety.or_else(|| ast_ty_to_prim_ty(tcx, &**target_ty)) - .unwrap_or_else(|| { - tcx.sess.span_fatal(target_ty.span, - "target type not found for const cast") - }); - - let base_hint = if let ExprTypeChecked = ty_hint { - ExprTypeChecked - } else { - // FIXME (#23833): the type-hint can cause problems, - // e.g. `(i8::MAX + 1_i8) as u32` feeds in `u32` as result - // type to the sum, and thus no overflow is signaled. - match tcx.expr_ty_opt(&base) { - Some(t) => UncheckedExprHint(t), - None => ty_hint - } - }; - - let val = try!(eval_const_expr_partial(tcx, &**base, base_hint, fn_args)); - match cast_const(tcx, val, ety) { - Ok(val) => val, - Err(kind) => return Err(ConstEvalErr { span: e.span, kind: kind }), - } - } - hir::ExprPath(..) => { - let opt_def = if let Some(def) = tcx.def_map.borrow().get(&e.id) { - // After type-checking, def_map contains definition of the - // item referred to by the path. During type-checking, it - // can contain the raw output of path resolution, which - // might be a partially resolved path. - // FIXME: There's probably a better way to make sure we don't - // panic here. - if def.depth != 0 { - signal!(e, UnresolvedPath); - } - Some(def.full_def()) - } else { - None - }; - let (const_expr, const_ty) = match opt_def { - Some(def::DefConst(def_id)) => { - if let Some(node_id) = tcx.map.as_local_node_id(def_id) { - match tcx.map.find(node_id) { - Some(ast_map::NodeItem(it)) => match it.node { - hir::ItemConst(ref ty, ref expr) => { - (Some(&**expr), Some(&**ty)) - } - _ => (None, None) - }, - _ => (None, None) - } - } else { - (lookup_const_by_id(tcx, def_id, Some(e.id), None), None) - } - } - Some(def::DefAssociatedConst(def_id)) => { - if let Some(node_id) = tcx.map.as_local_node_id(def_id) { - match tcx.impl_or_trait_item(def_id).container() { - ty::TraitContainer(trait_id) => match tcx.map.find(node_id) { - Some(ast_map::NodeTraitItem(ti)) => match ti.node { - hir::ConstTraitItem(ref ty, _) => { - if let ExprTypeChecked = ty_hint { - let substs = tcx.node_id_item_substs(e.id).substs; - (resolve_trait_associated_const(tcx, - ti, - trait_id, - substs), - Some(&**ty)) - } else { - (None, None) - } - } - _ => (None, None) - }, - _ => (None, None) - }, - ty::ImplContainer(_) => match tcx.map.find(node_id) { - Some(ast_map::NodeImplItem(ii)) => match ii.node { - hir::ImplItemKind::Const(ref ty, ref expr) => { - (Some(&**expr), Some(&**ty)) - } - _ => (None, None) - }, - _ => (None, None) - }, - } - } else { - (lookup_const_by_id(tcx, def_id, Some(e.id), None), None) - } - } - Some(def::DefVariant(enum_def, variant_def, _)) => { - (lookup_variant_by_id(tcx, enum_def, variant_def), None) - } - Some(def::DefStruct(_)) => { - return Ok(ConstVal::Struct(e.id)) - } - Some(def::DefLocal(_, id)) => { - debug!("DefLocal({:?}): {:?}", id, fn_args); - if let Some(val) = fn_args.and_then(|args| args.get(&id)) { - return Ok(val.clone()); - } else { - (None, None) - } - }, - Some(def::DefMethod(id)) | Some(def::DefFn(id, _)) => return Ok(Function(id)), - _ => (None, None) - }; - let const_expr = match const_expr { - Some(actual_e) => actual_e, - None => signal!(e, NonConstPath) - }; - let item_hint = if let UncheckedExprNoHint = ty_hint { - match const_ty { - Some(ty) => match ast_ty_to_prim_ty(tcx, ty) { - Some(ty) => UncheckedExprHint(ty), - None => UncheckedExprNoHint - }, - None => UncheckedExprNoHint - } - } else { - ty_hint - }; - try!(eval_const_expr_partial(tcx, const_expr, item_hint, fn_args)) - } - hir::ExprCall(ref callee, ref args) => { - let sub_ty_hint = ty_hint.erase_hint(); - let callee_val = try!(eval_const_expr_partial(tcx, callee, sub_ty_hint, fn_args)); - let (decl, block, constness) = try!(get_fn_def(tcx, e, callee_val)); - match (ty_hint, constness) { - (ExprTypeChecked, _) => { - // no need to check for constness... either check_const - // already forbids this or we const eval over whatever - // we want - }, - (_, hir::Constness::Const) => { - // we don't know much about the function, so we force it to be a const fn - // so compilation will fail later in case the const fn's body is not const - }, - _ => signal!(e, NonConstPath), - } - assert_eq!(decl.inputs.len(), args.len()); - - let mut call_args = NodeMap(); - for (arg, arg_expr) in decl.inputs.iter().zip(args.iter()) { - let arg_val = try!(eval_const_expr_partial( - tcx, - arg_expr, - sub_ty_hint, - fn_args - )); - debug!("const call arg: {:?}", arg); - let old = call_args.insert(arg.pat.id, arg_val); - assert!(old.is_none()); - } - let result = block.expr.as_ref().unwrap(); - debug!("const call({:?})", call_args); - try!(eval_const_expr_partial(tcx, &**result, ty_hint, Some(&call_args))) - }, - hir::ExprLit(ref lit) => lit_to_const(&**lit, ety), - hir::ExprBlock(ref block) => { - match block.expr { - Some(ref expr) => try!(eval_const_expr_partial(tcx, &**expr, ty_hint, fn_args)), - None => unreachable!(), - } - } - hir::ExprType(ref e, _) => try!(eval_const_expr_partial(tcx, &**e, ty_hint, fn_args)), - hir::ExprTup(_) => Tuple(e.id), - hir::ExprStruct(..) => Struct(e.id), - hir::ExprIndex(ref arr, ref idx) => { - if !tcx.sess.features.borrow().const_indexing { - signal!(e, IndexOpFeatureGated); - } - let arr_hint = ty_hint.erase_hint(); - let arr = try!(eval_const_expr_partial(tcx, arr, arr_hint, fn_args)); - let idx_hint = ty_hint.checked_or(tcx.types.usize); - let idx = match try!(eval_const_expr_partial(tcx, idx, idx_hint, fn_args)) { - Int(i) if i >= 0 => i as u64, - Int(_) => signal!(idx, IndexNegative), - Uint(i) => i, - _ => signal!(idx, IndexNotInt), - }; - match arr { - Array(_, n) if idx >= n => signal!(e, IndexOutOfBounds), - Array(v, _) => if let hir::ExprVec(ref v) = tcx.map.expect_expr(v).node { - try!(eval_const_expr_partial(tcx, &*v[idx as usize], ty_hint, fn_args)) - } else { - unreachable!() - }, - - Repeat(_, n) if idx >= n => signal!(e, IndexOutOfBounds), - Repeat(elem, _) => try!(eval_const_expr_partial( - tcx, - &*tcx.map.expect_expr(elem), - ty_hint, - fn_args, - )), - - ByteStr(ref data) if idx as usize >= data.len() - => signal!(e, IndexOutOfBounds), - ByteStr(data) => Uint(data[idx as usize] as u64), - - Str(ref s) if idx as usize >= s.len() - => signal!(e, IndexOutOfBounds), - Str(_) => unimplemented!(), // there's no const_char type - _ => signal!(e, IndexedNonVec), - } - } - hir::ExprVec(ref v) => Array(e.id, v.len() as u64), - hir::ExprRepeat(_, ref n) => { - let len_hint = ty_hint.checked_or(tcx.types.usize); - Repeat( - e.id, - match try!(eval_const_expr_partial(tcx, &**n, len_hint, fn_args)) { - Int(i) if i >= 0 => i as u64, - Int(_) => signal!(e, RepeatCountNotNatural), - Uint(i) => i, - _ => signal!(e, RepeatCountNotInt), - }, - ) - }, - hir::ExprTupField(ref base, index) => { - let base_hint = ty_hint.erase_hint(); - let c = try!(eval_const_expr_partial(tcx, base, base_hint, fn_args)); - if let Tuple(tup_id) = c { - if let hir::ExprTup(ref fields) = tcx.map.expect_expr(tup_id).node { - if index.node < fields.len() { - return eval_const_expr_partial(tcx, &fields[index.node], base_hint, fn_args) - } else { - signal!(e, TupleIndexOutOfBounds); - } - } else { - unreachable!() - } - } else { - signal!(base, ExpectedConstTuple); - } - } - hir::ExprField(ref base, field_name) => { - let base_hint = ty_hint.erase_hint(); - // Get the base expression if it is a struct and it is constant - let c = try!(eval_const_expr_partial(tcx, base, base_hint, fn_args)); - if let Struct(struct_id) = c { - if let hir::ExprStruct(_, ref fields, _) = tcx.map.expect_expr(struct_id).node { - // Check that the given field exists and evaluate it - // if the idents are compared run-pass/issue-19244 fails - if let Some(f) = fields.iter().find(|f| f.name.node - == field_name.node) { - return eval_const_expr_partial(tcx, &*f.expr, base_hint, fn_args) - } else { - signal!(e, MissingStructField); - } - } else { - unreachable!() - } - } else { - signal!(base, ExpectedConstStruct); - } - } - _ => signal!(e, MiscCatchAll) - }; - - Ok(result) -} - -fn resolve_trait_associated_const<'a, 'tcx: 'a>(tcx: &'a ty::ctxt<'tcx>, - ti: &'tcx hir::TraitItem, - trait_id: DefId, - rcvr_substs: subst::Substs<'tcx>) - -> Option<&'tcx Expr> -{ - let subst::SeparateVecsPerParamSpace { - types: rcvr_type, - selfs: rcvr_self, - fns: _, - } = rcvr_substs.types.split(); - let trait_substs = - subst::Substs::erased(subst::VecPerParamSpace::new(rcvr_type, - rcvr_self, - Vec::new())); - let trait_substs = tcx.mk_substs(trait_substs); - debug!("resolve_trait_associated_const: trait_substs={:?}", - trait_substs); - let trait_ref = ty::Binder(ty::TraitRef { def_id: trait_id, - substs: trait_substs }); - - tcx.populate_implementations_for_trait_if_necessary(trait_ref.def_id()); - let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None); - - let mut selcx = traits::SelectionContext::new(&infcx); - let obligation = traits::Obligation::new(traits::ObligationCause::dummy(), - trait_ref.to_poly_trait_predicate()); - let selection = match selcx.select(&obligation) { - Ok(Some(vtable)) => vtable, - // Still ambiguous, so give up and let the caller decide whether this - // expression is really needed yet. Some associated constant values - // can't be evaluated until monomorphization is done in trans. - Ok(None) => { - return None - } - Err(_) => { - return None - } - }; - - match selection { - traits::VtableImpl(ref impl_data) => { - match tcx.associated_consts(impl_data.impl_def_id) - .iter().find(|ic| ic.name == ti.name) { - Some(ic) => lookup_const_by_id(tcx, ic.def_id, None, None), - None => match ti.node { - hir::ConstTraitItem(_, Some(ref expr)) => Some(&*expr), - _ => None, - }, - } - } - _ => { - tcx.sess.span_bug( - ti.span, - "resolve_trait_associated_const: unexpected vtable type") - } - } -} - -fn cast_const<'tcx>(tcx: &ty::ctxt<'tcx>, val: ConstVal, ty: Ty) -> CastResult { - macro_rules! convert_val { - ($intermediate_ty:ty, $const_type:ident, $target_ty:ty) => { - match val { - Bool(b) => Ok($const_type(b as u64 as $intermediate_ty as $target_ty)), - Uint(u) => Ok($const_type(u as $intermediate_ty as $target_ty)), - Int(i) => Ok($const_type(i as $intermediate_ty as $target_ty)), - Float(f) => Ok($const_type(f as $intermediate_ty as $target_ty)), - _ => Err(ErrKind::CannotCastTo(stringify!($const_type))), - } - } - } - - // Issue #23890: If isize/usize, then dispatch to appropriate target representation type - match (&ty.sty, tcx.sess.target.int_type, tcx.sess.target.uint_type) { - (&ty::TyInt(ast::TyIs), ast::TyI32, _) => return convert_val!(i32, Int, i64), - (&ty::TyInt(ast::TyIs), ast::TyI64, _) => return convert_val!(i64, Int, i64), - (&ty::TyInt(ast::TyIs), _, _) => panic!("unexpected target.int_type"), - - (&ty::TyUint(ast::TyUs), _, ast::TyU32) => return convert_val!(u32, Uint, u64), - (&ty::TyUint(ast::TyUs), _, ast::TyU64) => return convert_val!(u64, Uint, u64), - (&ty::TyUint(ast::TyUs), _, _) => panic!("unexpected target.uint_type"), - - _ => {} - } - - match ty.sty { - ty::TyInt(ast::TyIs) => unreachable!(), - ty::TyUint(ast::TyUs) => unreachable!(), - - ty::TyInt(ast::TyI8) => convert_val!(i8, Int, i64), - ty::TyInt(ast::TyI16) => convert_val!(i16, Int, i64), - ty::TyInt(ast::TyI32) => convert_val!(i32, Int, i64), - ty::TyInt(ast::TyI64) => convert_val!(i64, Int, i64), - - ty::TyUint(ast::TyU8) => convert_val!(u8, Uint, u64), - ty::TyUint(ast::TyU16) => convert_val!(u16, Uint, u64), - ty::TyUint(ast::TyU32) => convert_val!(u32, Uint, u64), - ty::TyUint(ast::TyU64) => convert_val!(u64, Uint, u64), - - ty::TyFloat(ast::TyF32) => convert_val!(f32, Float, f64), - ty::TyFloat(ast::TyF64) => convert_val!(f64, Float, f64), - _ => Err(ErrKind::CannotCast), - } -} - -fn lit_to_const(lit: &ast::Lit, ty_hint: Option) -> ConstVal { - match lit.node { - ast::LitStr(ref s, _) => Str((*s).clone()), - ast::LitByteStr(ref data) => { - ByteStr(data.clone()) - } - ast::LitByte(n) => Uint(n as u64), - ast::LitChar(n) => Uint(n as u64), - ast::LitInt(n, ast::SignedIntLit(_, ast::Plus)) => Int(n as i64), - ast::LitInt(n, ast::UnsuffixedIntLit(ast::Plus)) => { - match ty_hint.map(|ty| &ty.sty) { - Some(&ty::TyUint(_)) => Uint(n), - _ => Int(n as i64) - } - } - ast::LitInt(n, ast::SignedIntLit(_, ast::Minus)) | - ast::LitInt(n, ast::UnsuffixedIntLit(ast::Minus)) => Int(-(n as i64)), - ast::LitInt(n, ast::UnsignedIntLit(_)) => Uint(n), - ast::LitFloat(ref n, _) | - ast::LitFloatUnsuffixed(ref n) => { - Float(n.parse::().unwrap() as f64) - } - ast::LitBool(b) => Bool(b) - } -} - -pub fn compare_const_vals(a: &ConstVal, b: &ConstVal) -> Option { - Some(match (a, b) { - (&Int(a), &Int(b)) => a.cmp(&b), - (&Uint(a), &Uint(b)) => a.cmp(&b), - (&Float(a), &Float(b)) => { - // This is pretty bad but it is the existing behavior. - if a == b { - Ordering::Equal - } else if a < b { - Ordering::Less - } else { - Ordering::Greater - } - } - (&Str(ref a), &Str(ref b)) => a.cmp(b), - (&Bool(a), &Bool(b)) => a.cmp(&b), - (&ByteStr(ref a), &ByteStr(ref b)) => a.cmp(b), - _ => return None - }) -} - -pub fn compare_lit_exprs<'tcx>(tcx: &ty::ctxt<'tcx>, - a: &Expr, - b: &Expr) -> Option { - let a = match eval_const_expr_partial(tcx, a, ExprTypeChecked, None) { - Ok(a) => a, - Err(e) => { - tcx.sess.span_err(a.span, &e.description()); - return None; - } - }; - let b = match eval_const_expr_partial(tcx, b, ExprTypeChecked, None) { - Ok(b) => b, - Err(e) => { - tcx.sess.span_err(b.span, &e.description()); - return None; - } - }; - compare_const_vals(&a, &b) -} - - -// returns Err if callee is not `Function` -// `e` is only used for error reporting/spans -fn get_fn_def<'a>(tcx: &'a ty::ctxt, - e: &hir::Expr, - callee: ConstVal) - -> Result<(&'a hir::FnDecl, &'a hir::Block, hir::Constness), ConstEvalErr> { - let did = match callee { - Function(did) => did, - callee => signal!(e, CallOn(callee)), - }; - debug!("fn call: {:?}", tcx.map.get_if_local(did)); - match tcx.map.get_if_local(did) { - None => signal!(e, UnimplementedConstVal("calling non-local const fn")), // non-local - Some(ast_map::NodeItem(it)) => match it.node { - hir::ItemFn( - ref decl, - hir::Unsafety::Normal, - constness, - abi::Abi::Rust, - _, // ducktype generics? types are funky in const_eval - ref block, - ) => Ok((&**decl, &**block, constness)), - _ => signal!(e, NonConstPath), - }, - Some(ast_map::NodeImplItem(it)) => match it.node { - hir::ImplItemKind::Method( - hir::MethodSig { - ref decl, - unsafety: hir::Unsafety::Normal, - constness, - abi: abi::Abi::Rust, - .. // ducktype generics? types are funky in const_eval - }, - ref block, - ) => Ok((decl, block, constness)), - _ => signal!(e, NonConstPath), - }, - Some(ast_map::NodeTraitItem(..)) => signal!(e, NonConstPath), - Some(_) => signal!(e, UnimplementedConstVal("calling struct, tuple or variant")), - } -} diff --git a/src/librustc/middle/const_qualif.rs b/src/librustc/middle/const_qualif.rs new file mode 100644 index 0000000000000..ec98637922ee3 --- /dev/null +++ b/src/librustc/middle/const_qualif.rs @@ -0,0 +1,44 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Const qualification, from partial to completely promotable. +bitflags! { + #[derive(RustcEncodable, RustcDecodable)] + flags ConstQualif: u8 { + // Inner mutability (can not be placed behind a reference) or behind + // &mut in a non-global expression. Can be copied from static memory. + const MUTABLE_MEM = 1 << 0, + // Constant value with a type that implements Drop. Can be copied + // from static memory, similar to MUTABLE_MEM. + const NEEDS_DROP = 1 << 1, + // Even if the value can be placed in static memory, copying it from + // there is more expensive than in-place instantiation, and/or it may + // be too large. This applies to [T; N] and everything containing it. + // N.B.: references need to clear this flag to not end up on the stack. + const PREFER_IN_PLACE = 1 << 2, + // May use more than 0 bytes of memory, doesn't impact the constness + // directly, but is not allowed to be borrowed mutably in a constant. + const NON_ZERO_SIZED = 1 << 3, + // Actually borrowed, has to always be in static memory. Does not + // propagate, and requires the expression to behave like a 'static + // lvalue. The set of expressions with this flag is the minimum + // that have to be promoted. + const HAS_STATIC_BORROWS = 1 << 4, + // Invalid const for miscellaneous reasons (e.g. not implemented). + const NOT_CONST = 1 << 5, + + // Borrowing the expression won't produce &'static T if any of these + // bits are set, though the value could be copied from static memory + // if `NOT_CONST` isn't set. + const NON_STATIC_BORROWS = ConstQualif::MUTABLE_MEM.bits | + ConstQualif::NEEDS_DROP.bits | + ConstQualif::NOT_CONST.bits + } +} diff --git a/src/librustc/middle/const_val.rs b/src/librustc/middle/const_val.rs new file mode 100644 index 0000000000000..9677082a43a3c --- /dev/null +++ b/src/librustc/middle/const_val.rs @@ -0,0 +1,54 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use syntax::symbol::InternedString; +use syntax::ast; +use std::rc::Rc; +use hir::def_id::DefId; +use rustc_const_math::*; +use self::ConstVal::*; + +#[derive(Clone, Debug, Hash, RustcEncodable, RustcDecodable, Eq, PartialEq)] +pub enum ConstVal { + Float(ConstFloat), + Integral(ConstInt), + Str(InternedString), + ByteStr(Rc>), + Bool(bool), + Struct(ast::NodeId), + Tuple(ast::NodeId), + Function(DefId), + Array(ast::NodeId, u64), + Repeat(ast::NodeId, u64), + Char(char), + /// A value that only occurs in case `eval_const_expr` reported an error. You should never + /// handle this case. Its sole purpose is to allow more errors to be reported instead of + /// causing a fatal error. + Dummy, +} + +impl ConstVal { + pub fn description(&self) -> &'static str { + match *self { + Float(f) => f.description(), + Integral(i) => i.description(), + Str(_) => "string literal", + ByteStr(_) => "byte string literal", + Bool(_) => "boolean", + Struct(_) => "struct", + Tuple(_) => "tuple", + Function(_) => "function definition", + Array(..) => "array", + Repeat(..) => "repeat", + Char(..) => "char", + Dummy => "dummy value", + } + } +} diff --git a/src/librustc/middle/cstore.rs b/src/librustc/middle/cstore.rs index 380f543f969f0..822fb4d6770f0 100644 --- a/src/librustc/middle/cstore.rs +++ b/src/librustc/middle/cstore.rs @@ -22,267 +22,379 @@ // are *mostly* used as a part of that interface, but these should // probably get a better home if someone can find one. -use back::svh::Svh; -use front::map as hir_map; -use middle::def; +use hir::def::{self, Def}; +use hir::def_id::{CrateNum, DefId, DefIndex}; +use hir::map as hir_map; +use hir::map::definitions::{Definitions, DefKey}; +use hir::svh::Svh; use middle::lang_items; -use middle::ty::{self, Ty}; -use middle::def_id::{DefId, DefIndex}; -use mir::repr::Mir; +use ty::{self, Ty, TyCtxt}; +use mir::Mir; use session::Session; use session::search_paths::PathKind; -use util::nodemap::{FnvHashMap, NodeMap, NodeSet}; -use std::any::Any; -use std::cell::RefCell; -use std::rc::Rc; +use util::nodemap::{NodeSet, DefIdMap}; use std::path::PathBuf; +use std::rc::Rc; use syntax::ast; -use syntax::ast_util::{IdVisitingOperation}; use syntax::attr; -use syntax::codemap::Span; +use syntax::ext::base::SyntaxExtension; use syntax::ptr::P; +use syntax::symbol::Symbol; +use syntax_pos::Span; use rustc_back::target::Target; -use rustc_front::hir; -use rustc_front::intravisit::Visitor; -use rustc_front::util::IdVisitor; +use hir; +use hir::intravisit::Visitor; +use rustc_back::PanicStrategy; -pub use self::DefLike::{DlDef, DlField, DlImpl}; pub use self::NativeLibraryKind::{NativeStatic, NativeFramework, NativeUnknown}; // lonely orphan structs and enums looking for a better home #[derive(Clone, Debug)] pub struct LinkMeta { - pub crate_name: String, + pub crate_name: Symbol, pub crate_hash: Svh, } -// Where a crate came from on the local filesystem. One of these two options +// Where a crate came from on the local filesystem. One of these three options // must be non-None. #[derive(PartialEq, Clone, Debug)] pub struct CrateSource { pub dylib: Option<(PathBuf, PathKind)>, pub rlib: Option<(PathBuf, PathKind)>, - pub cnum: ast::CrateNum, + pub rmeta: Option<(PathBuf, PathKind)>, +} + +#[derive(RustcEncodable, RustcDecodable, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)] +pub enum DepKind { + /// A dependency that is only used for its macros, none of which are visible from other crates. + /// These are included in the metadata only as placeholders and are ignored when decoding. + UnexportedMacrosOnly, + /// A dependency that is only used for its macros. + MacrosOnly, + /// A dependency that is always injected into the dependency list and so + /// doesn't need to be linked to an rlib, e.g. the injected allocator. + Implicit, + /// A dependency that is required by an rlib version of this crate. + /// Ordinary `extern crate`s result in `Explicit` dependencies. + Explicit, +} + +impl DepKind { + pub fn macros_only(self) -> bool { + match self { + DepKind::UnexportedMacrosOnly | DepKind::MacrosOnly => true, + DepKind::Implicit | DepKind::Explicit => false, + } + } +} + +#[derive(PartialEq, Clone, Debug)] +pub enum LibSource { + Some(PathBuf), + MetadataOnly, + None, +} + +impl LibSource { + pub fn is_some(&self) -> bool { + if let LibSource::Some(_) = *self { + true + } else { + false + } + } + + pub fn option(&self) -> Option { + match *self { + LibSource::Some(ref p) => Some(p.clone()), + LibSource::MetadataOnly | LibSource::None => None, + } + } } -#[derive(Copy, Debug, PartialEq, Clone)] +#[derive(Copy, Debug, PartialEq, Clone, RustcEncodable, RustcDecodable)] pub enum LinkagePreference { RequireDynamic, RequireStatic, } -enum_from_u32! { - #[derive(Copy, Clone, PartialEq)] - pub enum NativeLibraryKind { - NativeStatic, // native static library (.a archive) - NativeFramework, // OSX-specific - NativeUnknown, // default way to specify a dynamic library - } +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] +pub enum NativeLibraryKind { + NativeStatic, // native static library (.a archive) + NativeFramework, // OSX-specific + NativeUnknown, // default way to specify a dynamic library } -// Something that a name can resolve to. -#[derive(Copy, Clone, Debug)] -pub enum DefLike { - DlDef(def::Def), - DlImpl(DefId), - DlField +#[derive(Clone, Hash, RustcEncodable, RustcDecodable)] +pub struct NativeLibrary { + pub kind: NativeLibraryKind, + pub name: Symbol, + pub cfg: Option, } /// The data we save and restore about an inlined item or method. This is not /// part of the AST that we parse from a file, but it becomes part of the tree /// that we trans. #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum InlinedItem { - Item(P), - TraitItem(DefId /* impl id */, P), - ImplItem(DefId /* impl id */, P), - Foreign(P), +pub struct InlinedItem { + pub def_id: DefId, + pub body: P, + pub const_fn_args: Vec>, +} + +/// A borrowed version of `hir::InlinedItem`. This is what's encoded when saving +/// a crate; it then gets read as an InlinedItem. +#[derive(Clone, PartialEq, Eq, RustcEncodable, Hash, Debug)] +pub struct InlinedItemRef<'a> { + pub def_id: DefId, + pub body: &'a hir::Expr, + pub const_fn_args: Vec>, } -/// A borrowed version of `hir::InlinedItem`. -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub enum InlinedItemRef<'a> { - Item(&'a hir::Item), - TraitItem(DefId, &'a hir::TraitItem), - ImplItem(DefId, &'a hir::ImplItem), - Foreign(&'a hir::ForeignItem) +fn get_fn_args(decl: &hir::FnDecl) -> Vec> { + decl.inputs.iter().map(|arg| match arg.pat.node { + hir::PatKind::Binding(_, def_id, _, _) => Some(def_id), + _ => None + }).collect() } -/// Item definitions in the currently-compiled crate would have the CrateNum -/// LOCAL_CRATE in their DefId. -pub const LOCAL_CRATE: ast::CrateNum = 0; +impl<'a> InlinedItemRef<'a> { + pub fn from_item<'b, 'tcx>(def_id: DefId, + item: &'a hir::Item, + tcx: TyCtxt<'b, 'a, 'tcx>) + -> InlinedItemRef<'a> { + let (body, args) = match item.node { + hir::ItemFn(ref decl, _, _, _, _, body_id) => + (tcx.map.expr(body_id), get_fn_args(decl)), + hir::ItemConst(_, ref body) => (&**body, Vec::new()), + _ => bug!("InlinedItemRef::from_item wrong kind") + }; + InlinedItemRef { + def_id: def_id, + body: body, + const_fn_args: args + } + } -pub struct ChildItem { - pub def: DefLike, - pub name: ast::Name, - pub vis: hir::Visibility + pub fn from_trait_item(def_id: DefId, + item: &'a hir::TraitItem, + _tcx: TyCtxt) + -> InlinedItemRef<'a> { + let (body, args) = match item.node { + hir::ConstTraitItem(_, Some(ref body)) => + (&**body, Vec::new()), + hir::ConstTraitItem(_, None) => { + bug!("InlinedItemRef::from_trait_item called for const without body") + }, + _ => bug!("InlinedItemRef::from_trait_item wrong kind") + }; + InlinedItemRef { + def_id: def_id, + body: body, + const_fn_args: args + } + } + + pub fn from_impl_item<'b, 'tcx>(def_id: DefId, + item: &'a hir::ImplItem, + tcx: TyCtxt<'b, 'a, 'tcx>) + -> InlinedItemRef<'a> { + let (body, args) = match item.node { + hir::ImplItemKind::Method(ref sig, body_id) => + (tcx.map.expr(body_id), get_fn_args(&sig.decl)), + hir::ImplItemKind::Const(_, ref body) => + (&**body, Vec::new()), + _ => bug!("InlinedItemRef::from_impl_item wrong kind") + }; + InlinedItemRef { + def_id: def_id, + body: body, + const_fn_args: args + } + } + + pub fn visit(&self, visitor: &mut V) + where V: Visitor<'a> + { + visitor.visit_expr(&self.body); + } } -pub enum FoundAst<'ast> { - Found(&'ast InlinedItem), - FoundParent(DefId, &'ast InlinedItem), - NotFound, +impl InlinedItem { + pub fn visit<'ast,V>(&'ast self, visitor: &mut V) + where V: Visitor<'ast> + { + visitor.visit_expr(&self.body); + } +} + +pub enum LoadedMacro { + MacroRules(ast::MacroDef), + ProcMacro(Rc), +} + +#[derive(Copy, Clone, Debug)] +pub struct ExternCrate { + /// def_id of an `extern crate` in the current crate that caused + /// this crate to be loaded; note that there could be multiple + /// such ids + pub def_id: DefId, + + /// span of the extern crate that caused this to be loaded + pub span: Span, + + /// If true, then this crate is the crate named by the extern + /// crate referenced above. If false, then this crate is a dep + /// of the crate. + pub direct: bool, + + /// Number of links to reach the extern crate `def_id` + /// declaration; used to select the extern crate with the shortest + /// path + pub path_len: usize, } /// A store of Rust crates, through with their metadata /// can be accessed. -/// -/// The `: Any` bound is a temporary measure that allows access -/// to the backing `rustc_metadata::cstore::CStore` object. It -/// will be removed in the near future - if you need to access -/// internal APIs, please tell us. -pub trait CrateStore<'tcx> : Any { +pub trait CrateStore<'tcx> { // item info + fn describe_def(&self, def: DefId) -> Option; + fn def_span(&self, sess: &Session, def: DefId) -> Span; fn stability(&self, def: DefId) -> Option; fn deprecation(&self, def: DefId) -> Option; - fn closure_kind(&self, tcx: &ty::ctxt<'tcx>, def_id: DefId) - -> ty::ClosureKind; - fn closure_ty(&self, tcx: &ty::ctxt<'tcx>, def_id: DefId) - -> ty::ClosureTy<'tcx>; - fn item_variances(&self, def: DefId) -> ty::ItemVariances; - fn repr_attrs(&self, def: DefId) -> Vec; - fn item_type(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> ty::TypeScheme<'tcx>; - fn item_path(&self, def: DefId) -> Vec; - fn extern_item_path(&self, def: DefId) -> Vec; - fn item_name(&self, def: DefId) -> ast::Name; - fn item_predicates(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> ty::GenericPredicates<'tcx>; - fn item_super_predicates(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> ty::GenericPredicates<'tcx>; + fn visibility(&self, def: DefId) -> ty::Visibility; + fn closure_kind(&self, def_id: DefId) -> ty::ClosureKind; + fn closure_ty<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) + -> ty::ClosureTy<'tcx>; + fn item_variances(&self, def: DefId) -> Vec; + fn item_type<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> Ty<'tcx>; + fn visible_parent_map<'a>(&'a self) -> ::std::cell::RefMut<'a, DefIdMap>; + fn item_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> ty::GenericPredicates<'tcx>; + fn item_super_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> ty::GenericPredicates<'tcx>; + fn item_generics<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> ty::Generics<'tcx>; fn item_attrs(&self, def_id: DefId) -> Vec; - fn item_symbol(&self, def: DefId) -> String; - fn trait_def(&self, tcx: &ty::ctxt<'tcx>, def: DefId)-> ty::TraitDef<'tcx>; - fn adt_def(&self, tcx: &ty::ctxt<'tcx>, def: DefId) -> ty::AdtDefMaster<'tcx>; - fn method_arg_names(&self, did: DefId) -> Vec; + fn trait_def<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)-> ty::TraitDef; + fn adt_def<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> &'tcx ty::AdtDef; + fn fn_arg_names(&self, did: DefId) -> Vec; fn inherent_implementations_for_type(&self, def_id: DefId) -> Vec; // trait info - fn implementations_of_trait(&self, def_id: DefId) -> Vec; - fn provided_trait_methods(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> Vec>>; - fn trait_item_def_ids(&self, def: DefId) - -> Vec; + fn implementations_of_trait(&self, filter: Option) -> Vec; // impl info - fn impl_items(&self, impl_def_id: DefId) -> Vec; - fn impl_trait_ref(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> Option>; - fn impl_polarity(&self, def: DefId) -> Option; + fn associated_item_def_ids(&self, def_id: DefId) -> Vec; + fn impl_trait_ref<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> Option>; + fn impl_polarity(&self, def: DefId) -> hir::ImplPolarity; fn custom_coerce_unsized_kind(&self, def: DefId) -> Option; - fn associated_consts(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> Vec>>; + fn impl_parent(&self, impl_def_id: DefId) -> Option; // trait/impl-item info - fn trait_of_item(&self, tcx: &ty::ctxt<'tcx>, def_id: DefId) - -> Option; - fn impl_or_trait_item(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> ty::ImplOrTraitItem<'tcx>; + fn trait_of_item(&self, def_id: DefId) -> Option; + fn associated_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> Option; // flags fn is_const_fn(&self, did: DefId) -> bool; fn is_defaulted_trait(&self, did: DefId) -> bool; - fn is_impl(&self, did: DefId) -> bool; fn is_default_impl(&self, impl_did: DefId) -> bool; - fn is_extern_fn(&self, tcx: &ty::ctxt<'tcx>, did: DefId) -> bool; - fn is_static(&self, did: DefId) -> bool; - fn is_static_method(&self, did: DefId) -> bool; + fn is_foreign_item(&self, did: DefId) -> bool; fn is_statically_included_foreign_item(&self, id: ast::NodeId) -> bool; - fn is_typedef(&self, did: DefId) -> bool; // crate metadata - fn dylib_dependency_formats(&self, cnum: ast::CrateNum) - -> Vec<(ast::CrateNum, LinkagePreference)>; - fn lang_items(&self, cnum: ast::CrateNum) -> Vec<(DefIndex, usize)>; - fn missing_lang_items(&self, cnum: ast::CrateNum) -> Vec; - fn is_staged_api(&self, cnum: ast::CrateNum) -> bool; - fn is_explicitly_linked(&self, cnum: ast::CrateNum) -> bool; - fn is_allocator(&self, cnum: ast::CrateNum) -> bool; - fn crate_attrs(&self, cnum: ast::CrateNum) -> Vec; - fn crate_name(&self, cnum: ast::CrateNum) -> String; - fn crate_hash(&self, cnum: ast::CrateNum) -> Svh; - fn crate_struct_field_attrs(&self, cnum: ast::CrateNum) - -> FnvHashMap>; - fn plugin_registrar_fn(&self, cnum: ast::CrateNum) -> Option; - fn native_libraries(&self, cnum: ast::CrateNum) -> Vec<(NativeLibraryKind, String)>; - fn reachable_ids(&self, cnum: ast::CrateNum) -> Vec; + fn dylib_dependency_formats(&self, cnum: CrateNum) + -> Vec<(CrateNum, LinkagePreference)>; + fn dep_kind(&self, cnum: CrateNum) -> DepKind; + fn export_macros(&self, cnum: CrateNum); + fn lang_items(&self, cnum: CrateNum) -> Vec<(DefIndex, usize)>; + fn missing_lang_items(&self, cnum: CrateNum) -> Vec; + fn is_staged_api(&self, cnum: CrateNum) -> bool; + fn is_allocator(&self, cnum: CrateNum) -> bool; + fn is_panic_runtime(&self, cnum: CrateNum) -> bool; + fn is_compiler_builtins(&self, cnum: CrateNum) -> bool; + fn panic_strategy(&self, cnum: CrateNum) -> PanicStrategy; + fn extern_crate(&self, cnum: CrateNum) -> Option; + /// The name of the crate as it is referred to in source code of the current + /// crate. + fn crate_name(&self, cnum: CrateNum) -> Symbol; + /// The name of the crate as it is stored in the crate's metadata. + fn original_crate_name(&self, cnum: CrateNum) -> Symbol; + fn crate_hash(&self, cnum: CrateNum) -> Svh; + fn crate_disambiguator(&self, cnum: CrateNum) -> Symbol; + fn plugin_registrar_fn(&self, cnum: CrateNum) -> Option; + fn native_libraries(&self, cnum: CrateNum) -> Vec; + fn reachable_ids(&self, cnum: CrateNum) -> Vec; + fn is_no_builtins(&self, cnum: CrateNum) -> bool; // resolve - fn def_path(&self, def: DefId) -> hir_map::DefPath; - fn tuple_struct_definition_if_ctor(&self, did: DefId) -> Option; + fn def_index_for_def_key(&self, + cnum: CrateNum, + def: DefKey) + -> Option; + fn def_key(&self, def: DefId) -> hir_map::DefKey; + fn relative_def_path(&self, def: DefId) -> Option; fn struct_field_names(&self, def: DefId) -> Vec; - fn item_children(&self, did: DefId) -> Vec; - fn crate_top_level_items(&self, cnum: ast::CrateNum) -> Vec; + fn item_children(&self, did: DefId) -> Vec; + fn load_macro(&self, did: DefId, sess: &Session) -> LoadedMacro; // misc. metadata - fn maybe_get_item_ast(&'tcx self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> FoundAst<'tcx>; - fn maybe_get_item_mir(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> Option>; + fn maybe_get_item_ast<'a>(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> Option<(&'tcx InlinedItem, ast::NodeId)>; + fn local_node_for_inlined_defid(&'tcx self, def_id: DefId) -> Option; + fn defid_for_inlined_node(&'tcx self, node_id: ast::NodeId) -> Option; + + fn get_item_mir<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> Mir<'tcx>; + fn is_item_mir_available(&self, def: DefId) -> bool; + // This is basically a 1-based range of ints, which is a little // silly - I may fix that. - fn crates(&self) -> Vec; - fn used_libraries(&self) -> Vec<(String, NativeLibraryKind)>; + fn crates(&self) -> Vec; + fn used_libraries(&self) -> Vec; fn used_link_args(&self) -> Vec; // utility functions fn metadata_filename(&self) -> &str; fn metadata_section_name(&self, target: &Target) -> &str; - fn encode_type(&self, tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> Vec; - fn used_crates(&self, prefer: LinkagePreference) -> Vec<(ast::CrateNum, Option)>; - fn used_crate_source(&self, cnum: ast::CrateNum) -> CrateSource; - fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option; - fn encode_metadata(&self, - tcx: &ty::ctxt<'tcx>, - reexports: &def::ExportMap, - item_symbols: &RefCell>, - link_meta: &LinkMeta, - reachable: &NodeSet, - mir_map: &NodeMap>, - krate: &hir::Crate) -> Vec; + fn used_crates(&self, prefer: LinkagePreference) -> Vec<(CrateNum, LibSource)>; + fn used_crate_source(&self, cnum: CrateNum) -> CrateSource; + fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option; + fn encode_metadata<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + reexports: &def::ExportMap, + link_meta: &LinkMeta, + reachable: &NodeSet) -> Vec; fn metadata_encoding_version(&self) -> &[u8]; } -impl InlinedItem { - pub fn visit<'ast,V>(&'ast self, visitor: &mut V) - where V: Visitor<'ast> - { - match *self { - InlinedItem::Item(ref i) => visitor.visit_item(&**i), - InlinedItem::Foreign(ref i) => visitor.visit_foreign_item(&**i), - InlinedItem::TraitItem(_, ref ti) => visitor.visit_trait_item(ti), - InlinedItem::ImplItem(_, ref ii) => visitor.visit_impl_item(ii), - } - } - - pub fn visit_ids(&self, operation: &mut O) { - let mut id_visitor = IdVisitor::new(operation); - self.visit(&mut id_visitor); - } -} - // FIXME: find a better place for this? pub fn validate_crate_name(sess: Option<&Session>, s: &str, sp: Option) { - let say = |s: &str| { - match (sp, sess) { - (_, None) => panic!("{}", s), - (Some(sp), Some(sess)) => sess.span_err(sp, s), - (None, Some(sess)) => sess.err(s), + let mut err_count = 0; + { + let mut say = |s: &str| { + match (sp, sess) { + (_, None) => bug!("{}", s), + (Some(sp), Some(sess)) => sess.span_err(sp, s), + (None, Some(sess)) => sess.err(s), + } + err_count += 1; + }; + if s.is_empty() { + say("crate name must not be empty"); + } + for c in s.chars() { + if c.is_alphanumeric() { continue } + if c == '_' { continue } + say(&format!("invalid character `{}` in crate name: `{}`", c, s)); } - }; - if s.is_empty() { - say("crate name must not be empty"); - } - for c in s.chars() { - if c.is_alphanumeric() { continue } - if c == '_' { continue } - say(&format!("invalid character `{}` in crate name: `{}`", c, s)); } - match sess { - Some(sess) => sess.abort_if_errors(), - None => {} + + if err_count > 0 { + sess.unwrap().abort_if_errors(); } } @@ -292,267 +404,148 @@ pub struct DummyCrateStore; #[allow(unused_variables)] impl<'tcx> CrateStore<'tcx> for DummyCrateStore { // item info - fn stability(&self, def: DefId) -> Option { unimplemented!() } - fn deprecation(&self, def: DefId) -> Option { unimplemented!() } - fn closure_kind(&self, tcx: &ty::ctxt<'tcx>, def_id: DefId) - -> ty::ClosureKind { unimplemented!() } - fn closure_ty(&self, tcx: &ty::ctxt<'tcx>, def_id: DefId) - -> ty::ClosureTy<'tcx> { unimplemented!() } - fn item_variances(&self, def: DefId) -> ty::ItemVariances { unimplemented!() } - fn repr_attrs(&self, def: DefId) -> Vec { unimplemented!() } - fn item_type(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> ty::TypeScheme<'tcx> { unimplemented!() } - fn item_path(&self, def: DefId) -> Vec { unimplemented!() } - fn extern_item_path(&self, def: DefId) -> Vec { unimplemented!() } - fn item_name(&self, def: DefId) -> ast::Name { unimplemented!() } - fn item_predicates(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> ty::GenericPredicates<'tcx> { unimplemented!() } - fn item_super_predicates(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> ty::GenericPredicates<'tcx> { unimplemented!() } - fn item_attrs(&self, def_id: DefId) -> Vec { unimplemented!() } - fn item_symbol(&self, def: DefId) -> String { unimplemented!() } - fn trait_def(&self, tcx: &ty::ctxt<'tcx>, def: DefId)-> ty::TraitDef<'tcx> - { unimplemented!() } - fn adt_def(&self, tcx: &ty::ctxt<'tcx>, def: DefId) -> ty::AdtDefMaster<'tcx> - { unimplemented!() } - fn method_arg_names(&self, did: DefId) -> Vec { unimplemented!() } + fn describe_def(&self, def: DefId) -> Option { bug!("describe_def") } + fn def_span(&self, sess: &Session, def: DefId) -> Span { bug!("def_span") } + fn stability(&self, def: DefId) -> Option { bug!("stability") } + fn deprecation(&self, def: DefId) -> Option { bug!("deprecation") } + fn visibility(&self, def: DefId) -> ty::Visibility { bug!("visibility") } + fn closure_kind(&self, def_id: DefId) -> ty::ClosureKind { bug!("closure_kind") } + fn closure_ty<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) + -> ty::ClosureTy<'tcx> { bug!("closure_ty") } + fn item_variances(&self, def: DefId) -> Vec { bug!("item_variances") } + fn item_type<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> Ty<'tcx> { bug!("item_type") } + fn visible_parent_map<'a>(&'a self) -> ::std::cell::RefMut<'a, DefIdMap> { + bug!("visible_parent_map") + } + fn item_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> ty::GenericPredicates<'tcx> { bug!("item_predicates") } + fn item_super_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> ty::GenericPredicates<'tcx> { bug!("item_super_predicates") } + fn item_generics<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> ty::Generics<'tcx> { bug!("item_generics") } + fn item_attrs(&self, def_id: DefId) -> Vec { bug!("item_attrs") } + fn trait_def<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)-> ty::TraitDef + { bug!("trait_def") } + fn adt_def<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> &'tcx ty::AdtDef + { bug!("adt_def") } + fn fn_arg_names(&self, did: DefId) -> Vec { bug!("fn_arg_names") } fn inherent_implementations_for_type(&self, def_id: DefId) -> Vec { vec![] } // trait info - fn implementations_of_trait(&self, def_id: DefId) -> Vec { vec![] } - fn provided_trait_methods(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> Vec>> { unimplemented!() } - fn trait_item_def_ids(&self, def: DefId) - -> Vec { unimplemented!() } + fn implementations_of_trait(&self, filter: Option) -> Vec { vec![] } + fn def_index_for_def_key(&self, + cnum: CrateNum, + def: DefKey) + -> Option { + None + } // impl info - fn impl_items(&self, impl_def_id: DefId) -> Vec - { unimplemented!() } - fn impl_trait_ref(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> Option> { unimplemented!() } - fn impl_polarity(&self, def: DefId) -> Option { unimplemented!() } + fn associated_item_def_ids(&self, def_id: DefId) -> Vec + { bug!("associated_items") } + fn impl_trait_ref<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> Option> { bug!("impl_trait_ref") } + fn impl_polarity(&self, def: DefId) -> hir::ImplPolarity { bug!("impl_polarity") } fn custom_coerce_unsized_kind(&self, def: DefId) -> Option - { unimplemented!() } - fn associated_consts(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> Vec>> { unimplemented!() } + { bug!("custom_coerce_unsized_kind") } + fn impl_parent(&self, def: DefId) -> Option { bug!("impl_parent") } // trait/impl-item info - fn trait_of_item(&self, tcx: &ty::ctxt<'tcx>, def_id: DefId) - -> Option { unimplemented!() } - fn impl_or_trait_item(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> ty::ImplOrTraitItem<'tcx> { unimplemented!() } + fn trait_of_item(&self, def_id: DefId) -> Option { bug!("trait_of_item") } + fn associated_item<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> Option { bug!("associated_item") } // flags - fn is_const_fn(&self, did: DefId) -> bool { unimplemented!() } - fn is_defaulted_trait(&self, did: DefId) -> bool { unimplemented!() } - fn is_impl(&self, did: DefId) -> bool { unimplemented!() } - fn is_default_impl(&self, impl_did: DefId) -> bool { unimplemented!() } - fn is_extern_fn(&self, tcx: &ty::ctxt<'tcx>, did: DefId) -> bool { unimplemented!() } - fn is_static(&self, did: DefId) -> bool { unimplemented!() } - fn is_static_method(&self, did: DefId) -> bool { unimplemented!() } + fn is_const_fn(&self, did: DefId) -> bool { bug!("is_const_fn") } + fn is_defaulted_trait(&self, did: DefId) -> bool { bug!("is_defaulted_trait") } + fn is_default_impl(&self, impl_did: DefId) -> bool { bug!("is_default_impl") } + fn is_foreign_item(&self, did: DefId) -> bool { bug!("is_foreign_item") } fn is_statically_included_foreign_item(&self, id: ast::NodeId) -> bool { false } - fn is_typedef(&self, did: DefId) -> bool { unimplemented!() } // crate metadata - fn dylib_dependency_formats(&self, cnum: ast::CrateNum) - -> Vec<(ast::CrateNum, LinkagePreference)> - { unimplemented!() } - fn lang_items(&self, cnum: ast::CrateNum) -> Vec<(DefIndex, usize)> - { unimplemented!() } - fn missing_lang_items(&self, cnum: ast::CrateNum) -> Vec - { unimplemented!() } - fn is_staged_api(&self, cnum: ast::CrateNum) -> bool { unimplemented!() } - fn is_explicitly_linked(&self, cnum: ast::CrateNum) -> bool { unimplemented!() } - fn is_allocator(&self, cnum: ast::CrateNum) -> bool { unimplemented!() } - fn crate_attrs(&self, cnum: ast::CrateNum) -> Vec - { unimplemented!() } - fn crate_name(&self, cnum: ast::CrateNum) -> String { unimplemented!() } - fn crate_hash(&self, cnum: ast::CrateNum) -> Svh { unimplemented!() } - fn crate_struct_field_attrs(&self, cnum: ast::CrateNum) - -> FnvHashMap> - { unimplemented!() } - fn plugin_registrar_fn(&self, cnum: ast::CrateNum) -> Option - { unimplemented!() } - fn native_libraries(&self, cnum: ast::CrateNum) -> Vec<(NativeLibraryKind, String)> - { unimplemented!() } - fn reachable_ids(&self, cnum: ast::CrateNum) -> Vec { unimplemented!() } + fn dylib_dependency_formats(&self, cnum: CrateNum) + -> Vec<(CrateNum, LinkagePreference)> + { bug!("dylib_dependency_formats") } + fn lang_items(&self, cnum: CrateNum) -> Vec<(DefIndex, usize)> + { bug!("lang_items") } + fn missing_lang_items(&self, cnum: CrateNum) -> Vec + { bug!("missing_lang_items") } + fn is_staged_api(&self, cnum: CrateNum) -> bool { bug!("is_staged_api") } + fn dep_kind(&self, cnum: CrateNum) -> DepKind { bug!("is_explicitly_linked") } + fn export_macros(&self, cnum: CrateNum) { bug!("export_macros") } + fn is_allocator(&self, cnum: CrateNum) -> bool { bug!("is_allocator") } + fn is_panic_runtime(&self, cnum: CrateNum) -> bool { bug!("is_panic_runtime") } + fn is_compiler_builtins(&self, cnum: CrateNum) -> bool { bug!("is_compiler_builtins") } + fn panic_strategy(&self, cnum: CrateNum) -> PanicStrategy { + bug!("panic_strategy") + } + fn extern_crate(&self, cnum: CrateNum) -> Option { bug!("extern_crate") } + fn crate_name(&self, cnum: CrateNum) -> Symbol { bug!("crate_name") } + fn original_crate_name(&self, cnum: CrateNum) -> Symbol { + bug!("original_crate_name") + } + fn crate_hash(&self, cnum: CrateNum) -> Svh { bug!("crate_hash") } + fn crate_disambiguator(&self, cnum: CrateNum) + -> Symbol { bug!("crate_disambiguator") } + fn plugin_registrar_fn(&self, cnum: CrateNum) -> Option + { bug!("plugin_registrar_fn") } + fn native_libraries(&self, cnum: CrateNum) -> Vec + { bug!("native_libraries") } + fn reachable_ids(&self, cnum: CrateNum) -> Vec { bug!("reachable_ids") } + fn is_no_builtins(&self, cnum: CrateNum) -> bool { bug!("is_no_builtins") } // resolve - fn def_path(&self, def: DefId) -> hir_map::DefPath { unimplemented!() } - fn tuple_struct_definition_if_ctor(&self, did: DefId) -> Option - { unimplemented!() } - fn struct_field_names(&self, def: DefId) -> Vec { unimplemented!() } - fn item_children(&self, did: DefId) -> Vec { unimplemented!() } - fn crate_top_level_items(&self, cnum: ast::CrateNum) -> Vec - { unimplemented!() } - - // misc. metadata - fn maybe_get_item_ast(&'tcx self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> FoundAst<'tcx> { unimplemented!() } - fn maybe_get_item_mir(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> Option> { unimplemented!() } - - // This is basically a 1-based range of ints, which is a little - // silly - I may fix that. - fn crates(&self) -> Vec { vec![] } - fn used_libraries(&self) -> Vec<(String, NativeLibraryKind)> { vec![] } - fn used_link_args(&self) -> Vec { vec![] } - - // utility functions - fn metadata_filename(&self) -> &str { unimplemented!() } - fn metadata_section_name(&self, target: &Target) -> &str { unimplemented!() } - fn encode_type(&self, tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> Vec - { unimplemented!() } - fn used_crates(&self, prefer: LinkagePreference) -> Vec<(ast::CrateNum, Option)> - { vec![] } - fn used_crate_source(&self, cnum: ast::CrateNum) -> CrateSource { unimplemented!() } - fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option { None } - fn encode_metadata(&self, - tcx: &ty::ctxt<'tcx>, - reexports: &def::ExportMap, - item_symbols: &RefCell>, - link_meta: &LinkMeta, - reachable: &NodeSet, - mir_map: &NodeMap>, - krate: &hir::Crate) -> Vec { vec![] } - fn metadata_encoding_version(&self) -> &[u8] { unimplemented!() } -} - - -/// Metadata encoding and decoding can make use of thread-local encoding and -/// decoding contexts. These allow implementers of serialize::Encodable and -/// Decodable to access information and datastructures that would otherwise not -/// be available to them. For example, we can automatically translate def-id and -/// span information during decoding because the decoding context knows which -/// crate the data is decoded from. Or it allows to make ty::Ty decodable -/// because the context has access to the ty::ctxt that is needed for creating -/// ty::Ty instances. -/// -/// Note, however, that this only works for RBML-based encoding and decoding at -/// the moment. -pub mod tls { - use rbml::opaque::Encoder as OpaqueEncoder; - use rbml::opaque::Decoder as OpaqueDecoder; - use serialize; - use std::mem; - use middle::ty::{self, Ty}; - use middle::subst::Substs; - use middle::def_id::DefId; - - pub trait EncodingContext<'tcx> { - fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx>; - fn encode_ty(&self, encoder: &mut OpaqueEncoder, t: Ty<'tcx>); - fn encode_substs(&self, encoder: &mut OpaqueEncoder, substs: &Substs<'tcx>); + fn def_key(&self, def: DefId) -> hir_map::DefKey { bug!("def_key") } + fn relative_def_path(&self, def: DefId) -> Option { + bug!("relative_def_path") } + fn struct_field_names(&self, def: DefId) -> Vec { bug!("struct_field_names") } + fn item_children(&self, did: DefId) -> Vec { bug!("item_children") } + fn load_macro(&self, did: DefId, sess: &Session) -> LoadedMacro { bug!("load_macro") } - /// Marker type used for the scoped TLS slot. - /// The type context cannot be used directly because the scoped TLS - /// in libstd doesn't allow types generic over lifetimes. - struct TlsPayload; - - scoped_thread_local!(static TLS_ENCODING: TlsPayload); - - /// Execute f after pushing the given EncodingContext onto the TLS stack. - pub fn enter_encoding_context<'tcx, F, R>(ecx: &EncodingContext<'tcx>, - encoder: &mut OpaqueEncoder, - f: F) -> R - where F: FnOnce(&EncodingContext<'tcx>, &mut OpaqueEncoder) -> R - { - let tls_payload = (ecx as *const _, encoder as *mut _); - let tls_ptr = &tls_payload as *const _ as *const TlsPayload; - TLS_ENCODING.set(unsafe { &*tls_ptr }, || f(ecx, encoder)) + // misc. metadata + fn maybe_get_item_ast<'a>(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> Option<(&'tcx InlinedItem, ast::NodeId)> { + bug!("maybe_get_item_ast") } - - /// Execute f with access to the thread-local encoding context and - /// rbml encoder. This function will panic if the encoder passed in and the - /// context encoder are not the same. - /// - /// Note that this method is 'practically' safe due to its checking that the - /// encoder passed in is the same as the one in TLS, but it would still be - /// possible to construct cases where the EncodingContext is exchanged - /// while the same encoder is used, thus working with a wrong context. - pub fn with_encoding_context<'tcx, E, F, R>(encoder: &mut E, f: F) -> R - where F: FnOnce(&EncodingContext<'tcx>, &mut OpaqueEncoder) -> R, - E: serialize::Encoder - { - unsafe { - unsafe_with_encoding_context(|ecx, tls_encoder| { - assert!(encoder as *mut _ as usize == tls_encoder as *mut _ as usize); - - let ecx: &EncodingContext<'tcx> = mem::transmute(ecx); - - f(ecx, tls_encoder) - }) - } + fn local_node_for_inlined_defid(&'tcx self, def_id: DefId) -> Option { + bug!("local_node_for_inlined_defid") } - - /// Execute f with access to the thread-local encoding context and - /// rbml encoder. - pub unsafe fn unsafe_with_encoding_context(f: F) -> R - where F: FnOnce(&EncodingContext, &mut OpaqueEncoder) -> R - { - TLS_ENCODING.with(|tls| { - let tls_payload = (tls as *const TlsPayload) - as *mut (&EncodingContext, &mut OpaqueEncoder); - f((*tls_payload).0, (*tls_payload).1) - }) + fn defid_for_inlined_node(&'tcx self, node_id: ast::NodeId) -> Option { + bug!("defid_for_inlined_node") } - pub trait DecodingContext<'tcx> { - fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx>; - fn decode_ty(&self, decoder: &mut OpaqueDecoder) -> ty::Ty<'tcx>; - fn decode_substs(&self, decoder: &mut OpaqueDecoder) -> Substs<'tcx>; - fn translate_def_id(&self, def_id: DefId) -> DefId; + fn get_item_mir<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> Mir<'tcx> { bug!("get_item_mir") } + fn is_item_mir_available(&self, def: DefId) -> bool { + bug!("is_item_mir_available") } - scoped_thread_local!(static TLS_DECODING: TlsPayload); - - /// Execute f after pushing the given DecodingContext onto the TLS stack. - pub fn enter_decoding_context<'tcx, F, R>(dcx: &DecodingContext<'tcx>, - decoder: &mut OpaqueDecoder, - f: F) -> R - where F: FnOnce(&DecodingContext<'tcx>, &mut OpaqueDecoder) -> R - { - let tls_payload = (dcx as *const _, decoder as *mut _); - let tls_ptr = &tls_payload as *const _ as *const TlsPayload; - TLS_DECODING.set(unsafe { &*tls_ptr }, || f(dcx, decoder)) + // This is basically a 1-based range of ints, which is a little + // silly - I may fix that. + fn crates(&self) -> Vec { vec![] } + fn used_libraries(&self) -> Vec { + vec![] } + fn used_link_args(&self) -> Vec { vec![] } - /// Execute f with access to the thread-local decoding context and - /// rbml decoder. This function will panic if the decoder passed in and the - /// context decoder are not the same. - /// - /// Note that this method is 'practically' safe due to its checking that the - /// decoder passed in is the same as the one in TLS, but it would still be - /// possible to construct cases where the DecodingContext is exchanged - /// while the same decoder is used, thus working with a wrong context. - pub fn with_decoding_context<'decoder, 'tcx, D, F, R>(d: &'decoder mut D, f: F) -> R - where D: serialize::Decoder, - F: FnOnce(&DecodingContext<'tcx>, - &mut OpaqueDecoder) -> R, - 'tcx: 'decoder - { - unsafe { - unsafe_with_decoding_context(|dcx, decoder| { - assert!((d as *mut _ as usize) == (decoder as *mut _ as usize)); - - let dcx: &DecodingContext<'tcx> = mem::transmute(dcx); - - f(dcx, decoder) - }) - } - } + // utility functions + fn metadata_filename(&self) -> &str { bug!("metadata_filename") } + fn metadata_section_name(&self, target: &Target) -> &str { bug!("metadata_section_name") } + fn used_crates(&self, prefer: LinkagePreference) -> Vec<(CrateNum, LibSource)> + { vec![] } + fn used_crate_source(&self, cnum: CrateNum) -> CrateSource { bug!("used_crate_source") } + fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option { None } + fn encode_metadata<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + reexports: &def::ExportMap, + link_meta: &LinkMeta, + reachable: &NodeSet) -> Vec { vec![] } + fn metadata_encoding_version(&self) -> &[u8] { bug!("metadata_encoding_version") } +} - /// Execute f with access to the thread-local decoding context and - /// rbml decoder. - pub unsafe fn unsafe_with_decoding_context(f: F) -> R - where F: FnOnce(&DecodingContext, &mut OpaqueDecoder) -> R - { - TLS_DECODING.with(|tls| { - let tls_payload = (tls as *const TlsPayload) - as *mut (&DecodingContext, &mut OpaqueDecoder); - f((*tls_payload).0, (*tls_payload).1) - }) - } +pub trait CrateLoader { + fn process_item(&mut self, item: &ast::Item, defs: &Definitions); + fn postprocess(&mut self, krate: &ast::Crate); } diff --git a/src/librustc/middle/dataflow.rs b/src/librustc/middle/dataflow.rs index e9029958880bd..f7a34c43cccbd 100644 --- a/src/librustc/middle/dataflow.rs +++ b/src/librustc/middle/dataflow.rs @@ -14,20 +14,19 @@ //! and thus uses bitvectors. Your job is simply to specify the so-called //! GEN and KILL bits for each expression. -use middle::cfg; -use middle::cfg::CFGIndex; -use middle::ty; +use cfg; +use cfg::CFGIndex; +use ty::TyCtxt; use std::io; use std::mem; use std::usize; use syntax::ast; -use syntax::ast_util::IdRange; use syntax::print::pp; use syntax::print::pprust::PrintState; use util::nodemap::NodeMap; -use rustc_front::hir; -use rustc_front::intravisit; -use rustc_front::print::pprust; +use hir; +use hir::intravisit::{self, IdRange}; +use hir::print as pprust; #[derive(Copy, Clone, Debug)] @@ -38,7 +37,7 @@ pub enum EntryOrExit { #[derive(Clone)] pub struct DataFlowContext<'a, 'tcx: 'a, O> { - tcx: &'a ty::ctxt<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, /// a name for the analysis using this dataflow instance analysis_name: &'static str, @@ -113,10 +112,10 @@ impl<'a, 'tcx, O:DataFlowOperator> pprust::PpAnn for DataFlowContext<'a, 'tcx, O ps: &mut pprust::State, node: pprust::AnnNode) -> io::Result<()> { let id = match node { - pprust::NodeName(_) => 0, + pprust::NodeName(_) => ast::CRATE_NODE_ID, pprust::NodeExpr(expr) => expr.id, pprust::NodeBlock(blk) => blk.id, - pprust::NodeItem(_) | pprust::NodeSubItem(_) => 0, + pprust::NodeItem(_) | pprust::NodeSubItem(_) => ast::CRATE_NODE_ID, pprust::NodePat(pat) => pat.id }; @@ -152,10 +151,10 @@ impl<'a, 'tcx, O:DataFlowOperator> pprust::PpAnn for DataFlowContext<'a, 'tcx, O "".to_string() }; - try!(ps.synth_comment( + ps.synth_comment( format!("id {}: {}{}{}{}", id, entry_str, - gens_str, action_kills_str, scope_kills_str))); - try!(pp::space(&mut ps.s)); + gens_str, action_kills_str, scope_kills_str))?; + pp::space(&mut ps.s)?; } Ok(()) } @@ -169,9 +168,8 @@ fn build_nodeid_to_index(decl: Option<&hir::FnDecl>, // into cfg itself? i.e. introduce a fn-based flow-graph in // addition to the current block-based flow-graph, rather than // have to put traversals like this here? - match decl { - None => {} - Some(decl) => add_entries_from_fn_decl(&mut index, decl, cfg.entry) + if let Some(decl) = decl { + add_entries_from_fn_decl(&mut index, decl, cfg.entry); } cfg.graph.each_node(|node_idx, node| { @@ -195,6 +193,10 @@ fn build_nodeid_to_index(decl: Option<&hir::FnDecl>, let mut formals = Formals { entry: entry, index: index }; intravisit::walk_fn_decl(&mut formals, decl); impl<'a, 'v> intravisit::Visitor<'v> for Formals<'a> { + fn nested_visit_map<'this>(&'this mut self) -> intravisit::NestedVisitorMap<'this, 'v> { + panic!("should not encounter fn bodies or items") + } + fn visit_pat(&mut self, p: &hir::Pat) { self.index.entry(p.id).or_insert(vec![]).push(self.entry); intravisit::walk_pat(self, p) @@ -223,7 +225,7 @@ pub enum KillFrom { } impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> { - pub fn new(tcx: &'a ty::ctxt<'tcx>, + pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, analysis_name: &'static str, decl: Option<&hir::FnDecl>, cfg: &cfg::CFG, @@ -489,7 +491,7 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> { let bits = &mut self.scope_kills[start.. end]; debug!("{} add_kills_from_flow_exits flow_exit={:?} bits={} [before]", self.analysis_name, flow_exit, mut_bits_to_string(bits)); - bits.clone_from_slice(&orig_kills[..]); + bits.copy_from_slice(&orig_kills[..]); debug!("{} add_kills_from_flow_exits flow_exit={:?} bits={} [after]", self.analysis_name, flow_exit, mut_bits_to_string(bits)); } @@ -500,7 +502,7 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> { impl<'a, 'tcx, O:DataFlowOperator+Clone+'static> DataFlowContext<'a, 'tcx, O> { // ^^^^^^^^^^^^^ only needed for pretty printing - pub fn propagate(&mut self, cfg: &cfg::CFG, blk: &hir::Block) { + pub fn propagate(&mut self, cfg: &cfg::CFG, body: &hir::Expr) { //! Performs the data flow analysis. if self.bits_per_id == 0 { @@ -526,18 +528,17 @@ impl<'a, 'tcx, O:DataFlowOperator+Clone+'static> DataFlowContext<'a, 'tcx, O> { debug!("Dataflow result for {}:", self.analysis_name); debug!("{}", { let mut v = Vec::new(); - self.pretty_print_to(box &mut v, blk).unwrap(); - println!("{}", String::from_utf8(v).unwrap()); - "" + self.pretty_print_to(box &mut v, body).unwrap(); + String::from_utf8(v).unwrap() }); } fn pretty_print_to<'b>(&self, wr: Box, - blk: &hir::Block) -> io::Result<()> { + body: &hir::Expr) -> io::Result<()> { let mut ps = pprust::rust_printer_annotated(wr, self, None); - try!(ps.cbox(pprust::indent_unit)); - try!(ps.ibox(0)); - try!(ps.print_block(blk)); + ps.cbox(pprust::indent_unit)?; + ps.ibox(0)?; + ps.print_expr(body)?; pp::eof(&mut ps.s) } } @@ -557,7 +558,7 @@ impl<'a, 'b, 'tcx, O:DataFlowOperator> PropagationContext<'a, 'b, 'tcx, O> { let (start, end) = self.dfcx.compute_id_range(node_index); // Initialize local bitvector with state on-entry. - in_out.clone_from_slice(&self.dfcx.on_entry[start.. end]); + in_out.copy_from_slice(&self.dfcx.on_entry[start.. end]); // Compute state on-exit by applying transfer function to // state on-entry. @@ -654,7 +655,7 @@ fn set_bit(words: &mut [usize], bit: usize) -> bool { let word = bit / usize_bits; let bit_in_word = bit % usize_bits; let bit_mask = 1 << bit_in_word; - debug!("word={} bit_in_word={} bit_mask={}", word, bit_in_word, word); + debug!("word={} bit_in_word={} bit_mask={}", word, bit_in_word, bit_mask); let oldv = words[word]; let newv = oldv | bit_mask; words[word] = newv; @@ -662,8 +663,8 @@ fn set_bit(words: &mut [usize], bit: usize) -> bool { } fn bit_str(bit: usize) -> String { - let byte = bit >> 8; - let lobits = 1 << (bit & 0xFF); + let byte = bit >> 3; + let lobits = 1 << (bit & 0b111); format!("[{}:{}-{:02x}]", bit, byte, lobits) } diff --git a/src/librustc/middle/dead.rs b/src/librustc/middle/dead.rs index 1386ef91c70bf..1bf6b837fd998 100644 --- a/src/librustc/middle/dead.rs +++ b/src/librustc/middle/dead.rs @@ -13,23 +13,28 @@ // from live codes are live, and everything else is dead. use dep_graph::DepNode; -use front::map as ast_map; -use rustc_front::hir; -use rustc_front::intravisit::{self, Visitor}; - -use middle::{def, pat_util, privacy, ty}; -use middle::def_id::{DefId}; +use hir::map as ast_map; +use hir::{self, PatKind}; +use hir::intravisit::{self, Visitor, NestedVisitorMap}; +use hir::itemlikevisit::ItemLikeVisitor; + +use middle::privacy; +use ty::{self, TyCtxt}; +use hir::def::Def; +use hir::def_id::{DefId}; use lint; +use util::nodemap::FxHashSet; -use std::collections::HashSet; use syntax::{ast, codemap}; -use syntax::attr::{self, AttrMetaMethods}; +use syntax::attr; +use syntax_pos; // Any local node that may call something in its body block should be // explored. For example, if it's a live NodeItem that is a // function, then we should explore its block to check for codes that // may need to be marked as live. -fn should_explore(tcx: &ty::ctxt, node_id: ast::NodeId) -> bool { +fn should_explore<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + node_id: ast::NodeId) -> bool { match tcx.map.find(node_id) { Some(ast_map::NodeItem(..)) | Some(ast_map::NodeImplItem(..)) | @@ -43,8 +48,8 @@ fn should_explore(tcx: &ty::ctxt, node_id: ast::NodeId) -> bool { struct MarkSymbolVisitor<'a, 'tcx: 'a> { worklist: Vec, - tcx: &'a ty::ctxt<'tcx>, - live_symbols: Box>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + live_symbols: Box>, struct_has_extern_repr: bool, ignore_non_const_paths: bool, inherited_pub_visibility: bool, @@ -52,12 +57,12 @@ struct MarkSymbolVisitor<'a, 'tcx: 'a> { } impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { - fn new(tcx: &'a ty::ctxt<'tcx>, + fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, worklist: Vec) -> MarkSymbolVisitor<'a, 'tcx> { MarkSymbolVisitor { worklist: worklist, tcx: tcx, - live_symbols: box HashSet::new(), + live_symbols: box FxHashSet(), struct_has_extern_repr: false, ignore_non_const_paths: false, inherited_pub_visibility: false, @@ -81,68 +86,73 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { } } - fn lookup_and_handle_definition(&mut self, id: &ast::NodeId) { - use middle::ty::TypeVariants::{TyEnum, TyStruct}; - + fn handle_definition(&mut self, id: ast::NodeId, def: Def) { // If `bar` is a trait item, make sure to mark Foo as alive in `Foo::bar` - self.tcx.tables.borrow().item_substs.get(id) - .and_then(|substs| substs.substs.self_ty()) - .map(|ty| match ty.sty { - TyEnum(tyid, _) | TyStruct(tyid, _) => self.check_def_id(tyid.did), - _ => (), - }); - - self.tcx.def_map.borrow().get(id).map(|def| { - match def.full_def() { - def::DefConst(_) | def::DefAssociatedConst(..) => { - self.check_def_id(def.def_id()); + match def { + Def::AssociatedTy(..) | Def::Method(_) | Def::AssociatedConst(_) + if self.tcx.trait_of_item(def.def_id()).is_some() => { + if let Some(substs) = self.tcx.tables().item_substs.get(&id) { + if let ty::TyAdt(tyid, _) = substs.substs.type_at(0).sty { + self.check_def_id(tyid.did); + } } - _ if self.ignore_non_const_paths => (), - def::DefPrimTy(_) => (), - def::DefSelfTy(..) => (), - def::DefVariant(enum_id, variant_id, _) => { + } + _ => {} + } + + match def { + Def::Const(_) | Def::AssociatedConst(..) => { + self.check_def_id(def.def_id()); + } + _ if self.ignore_non_const_paths => (), + Def::PrimTy(..) | Def::SelfTy(..) => (), + Def::Variant(variant_id) | Def::VariantCtor(variant_id, ..) => { + if let Some(enum_id) = self.tcx.parent_def_id(variant_id) { self.check_def_id(enum_id); - if !self.ignore_variant_stack.contains(&variant_id) { - self.check_def_id(variant_id); - } } - _ => { - self.check_def_id(def.def_id()); + if !self.ignore_variant_stack.contains(&variant_id) { + self.check_def_id(variant_id); } } - }); + _ => { + self.check_def_id(def.def_id()); + } + } } fn lookup_and_handle_method(&mut self, id: ast::NodeId) { let method_call = ty::MethodCall::expr(id); - let method = self.tcx.tables.borrow().method_map[&method_call]; + let method = self.tcx.tables().method_map[&method_call]; self.check_def_id(method.def_id); } fn handle_field_access(&mut self, lhs: &hir::Expr, name: ast::Name) { - if let ty::TyStruct(def, _) = self.tcx.expr_ty_adjusted(lhs).sty { - self.insert_def_id(def.struct_variant().field_named(name).did); - } else { - self.tcx.sess.span_bug(lhs.span, "named field access on non-struct") + match self.tcx.tables().expr_ty_adjusted(lhs).sty { + ty::TyAdt(def, _) => { + self.insert_def_id(def.struct_variant().field_named(name).did); + } + _ => span_bug!(lhs.span, "named field access on non-ADT"), } } fn handle_tup_field_access(&mut self, lhs: &hir::Expr, idx: usize) { - if let ty::TyStruct(def, _) = self.tcx.expr_ty_adjusted(lhs).sty { - self.insert_def_id(def.struct_variant().fields[idx].did); + match self.tcx.tables().expr_ty_adjusted(lhs).sty { + ty::TyAdt(def, _) => { + self.insert_def_id(def.struct_variant().fields[idx].did); + } + ty::TyTuple(..) => {} + _ => span_bug!(lhs.span, "numeric field access on non-ADT"), } } - fn handle_field_pattern_match(&mut self, lhs: &hir::Pat, + fn handle_field_pattern_match(&mut self, lhs: &hir::Pat, def: Def, pats: &[codemap::Spanned]) { - let def = self.tcx.def_map.borrow().get(&lhs.id).unwrap().full_def(); - let pat_ty = self.tcx.node_id_to_type(lhs.id); - let variant = match pat_ty.sty { - ty::TyStruct(adt, _) | ty::TyEnum(adt, _) => adt.variant_of_def(def), - _ => self.tcx.sess.span_bug(lhs.span, "non-ADT in struct pattern") + let variant = match self.tcx.tables().node_id_to_type(lhs.id).sty { + ty::TyAdt(adt, _) => adt.variant_of_def(def), + _ => span_bug!(lhs.span, "non-ADT in struct pattern") }; for pat in pats { - if let hir::PatWild = pat.node.pat.node { + if let PatKind::Wild = pat.node.pat.node { continue; } self.insert_def_id(variant.field_named(pat.node.name).did); @@ -150,7 +160,7 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { } fn mark_live_symbols(&mut self) { - let mut scanned = HashSet::new(); + let mut scanned = FxHashSet(); while !self.worklist.is_empty() { let id = self.worklist.pop().unwrap(); if scanned.contains(&id) { @@ -158,17 +168,14 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { } scanned.insert(id); - match self.tcx.map.find(id) { - Some(ref node) => { - self.live_symbols.insert(id); - self.visit_node(node); - } - None => (), + if let Some(ref node) = self.tcx.map.find(id) { + self.live_symbols.insert(id); + self.visit_node(node); } } } - fn visit_node(&mut self, node: &ast_map::Node) { + fn visit_node(&mut self, node: &ast_map::Node<'tcx>) { let had_extern_repr = self.struct_has_extern_repr; self.struct_has_extern_repr = false; let had_inherited_pub_visibility = self.inherited_pub_visibility; @@ -176,23 +183,23 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { match *node { ast_map::NodeItem(item) => { match item.node { - hir::ItemStruct(..) => { + hir::ItemStruct(..) | hir::ItemUnion(..) => { self.struct_has_extern_repr = item.attrs.iter().any(|attr| { attr::find_repr_attrs(self.tcx.sess.diagnostic(), attr) .contains(&attr::ReprExtern) }); - intravisit::walk_item(self, &*item); + intravisit::walk_item(self, &item); } hir::ItemEnum(..) => { self.inherited_pub_visibility = item.vis == hir::Public; - intravisit::walk_item(self, &*item); + intravisit::walk_item(self, &item); } hir::ItemFn(..) | hir::ItemTy(..) | hir::ItemStatic(..) | hir::ItemConst(..) => { - intravisit::walk_item(self, &*item); + intravisit::walk_item(self, &item); } _ => () } @@ -204,7 +211,7 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { intravisit::walk_impl_item(self, impl_item); } ast_map::NodeForeignItem(foreign_item) => { - intravisit::walk_foreign_item(self, &*foreign_item); + intravisit::walk_foreign_item(self, &foreign_item); } _ => () } @@ -213,33 +220,37 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { } } -impl<'a, 'tcx, 'v> Visitor<'v> for MarkSymbolVisitor<'a, 'tcx> { +impl<'a, 'tcx> Visitor<'tcx> for MarkSymbolVisitor<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.tcx.map) + } - fn visit_variant_data(&mut self, def: &hir::VariantData, _: ast::Name, - _: &hir::Generics, _: ast::NodeId, _: codemap::Span) { + fn visit_variant_data(&mut self, def: &'tcx hir::VariantData, _: ast::Name, + _: &hir::Generics, _: ast::NodeId, _: syntax_pos::Span) { let has_extern_repr = self.struct_has_extern_repr; let inherited_pub_visibility = self.inherited_pub_visibility; let live_fields = def.fields().iter().filter(|f| { - has_extern_repr || inherited_pub_visibility || match f.node.kind { - hir::NamedField(_, hir::Public) => true, - _ => false - } + has_extern_repr || inherited_pub_visibility || f.vis == hir::Public }); - self.live_symbols.extend(live_fields.map(|f| f.node.id)); + self.live_symbols.extend(live_fields.map(|f| f.id)); intravisit::walk_struct_def(self, def); } - fn visit_expr(&mut self, expr: &hir::Expr) { + fn visit_expr(&mut self, expr: &'tcx hir::Expr) { match expr.node { + hir::ExprPath(ref qpath @ hir::QPath::TypeRelative(..)) => { + let def = self.tcx.tables().qpath_def(qpath, expr.id); + self.handle_definition(expr.id, def); + } hir::ExprMethodCall(..) => { self.lookup_and_handle_method(expr.id); } hir::ExprField(ref lhs, ref name) => { - self.handle_field_access(&**lhs, name.node); + self.handle_field_access(&lhs, name.node); } hir::ExprTupField(ref lhs, idx) => { - self.handle_tup_field_access(&**lhs, idx.node); + self.handle_tup_field_access(&lhs, idx.node); } _ => () } @@ -247,16 +258,15 @@ impl<'a, 'tcx, 'v> Visitor<'v> for MarkSymbolVisitor<'a, 'tcx> { intravisit::walk_expr(self, expr); } - fn visit_arm(&mut self, arm: &hir::Arm) { + fn visit_arm(&mut self, arm: &'tcx hir::Arm) { if arm.pats.len() == 1 { - let pat = &*arm.pats[0]; - let variants = pat_util::necessary_variants(&self.tcx.def_map.borrow(), pat); + let variants = arm.pats[0].necessary_variants(); // Inside the body, ignore constructions of variants // necessary for the pattern to match. Those construction sites // can't be reached unless the variant is constructed elsewhere. let len = self.ignore_variant_stack.len(); - self.ignore_variant_stack.extend_from_slice(&*variants); + self.ignore_variant_stack.extend_from_slice(&variants); intravisit::walk_arm(self, arm); self.ignore_variant_stack.truncate(len); } else { @@ -264,15 +274,14 @@ impl<'a, 'tcx, 'v> Visitor<'v> for MarkSymbolVisitor<'a, 'tcx> { } } - fn visit_pat(&mut self, pat: &hir::Pat) { - let def_map = &self.tcx.def_map; + fn visit_pat(&mut self, pat: &'tcx hir::Pat) { match pat.node { - hir::PatStruct(_, ref fields, _) => { - self.handle_field_pattern_match(pat, fields); + PatKind::Struct(hir::QPath::Resolved(_, ref path), ref fields, _) => { + self.handle_field_pattern_match(pat, path.def, fields); } - _ if pat_util::pat_is_const(&def_map.borrow(), pat) => { - // it might be the only use of a const - self.lookup_and_handle_definition(&pat.id) + PatKind::Path(ref qpath @ hir::QPath::TypeRelative(..)) => { + let def = self.tcx.tables().qpath_def(qpath, pat.id); + self.handle_definition(pat.id, def); } _ => () } @@ -282,15 +291,10 @@ impl<'a, 'tcx, 'v> Visitor<'v> for MarkSymbolVisitor<'a, 'tcx> { self.ignore_non_const_paths = false; } - fn visit_path(&mut self, path: &hir::Path, id: ast::NodeId) { - self.lookup_and_handle_definition(&id); + fn visit_path(&mut self, path: &'tcx hir::Path, id: ast::NodeId) { + self.handle_definition(id, path.def); intravisit::walk_path(self, path); } - - fn visit_path_list_item(&mut self, path: &hir::Path, item: &hir::PathListItem) { - self.lookup_and_handle_definition(&item.node.id()); - intravisit::walk_path_list_item(self, path, item); - } } fn has_allow_dead_code_or_lang_attr(attrs: &[ast::Attribute]) -> bool { @@ -301,8 +305,7 @@ fn has_allow_dead_code_or_lang_attr(attrs: &[ast::Attribute]) -> bool { let dead_code = lint::builtin::DEAD_CODE.name_lower(); for attr in lint::gather_attrs(attrs) { match attr { - Ok((ref name, lint::Allow, _)) - if &name[..] == dead_code => return true, + Ok((name, lint::Allow, _)) if name == &*dead_code => return true, _ => (), } } @@ -322,11 +325,12 @@ fn has_allow_dead_code_or_lang_attr(attrs: &[ast::Attribute]) -> bool { // or // 2) We are not sure to be live or not // * Implementation of a trait method -struct LifeSeeder { - worklist: Vec +struct LifeSeeder<'k> { + worklist: Vec, + krate: &'k hir::Crate, } -impl<'v> Visitor<'v> for LifeSeeder { +impl<'v, 'k> ItemLikeVisitor<'v> for LifeSeeder<'k> { fn visit_item(&mut self, item: &hir::Item) { let allow_dead_code = has_allow_dead_code_or_lang_attr(&item.attrs); if allow_dead_code { @@ -337,7 +341,7 @@ impl<'v> Visitor<'v> for LifeSeeder { self.worklist.extend(enum_def.variants.iter() .map(|variant| variant.node.data.id())); } - hir::ItemTrait(_, _, _, ref trait_items) => { + hir::ItemTrait(.., ref trait_items) => { for trait_item in trait_items { match trait_item.node { hir::ConstTraitItem(_, Some(_)) | @@ -350,52 +354,52 @@ impl<'v> Visitor<'v> for LifeSeeder { } } } - hir::ItemImpl(_, _, _, ref opt_trait, _, ref impl_items) => { - for impl_item in impl_items { - match impl_item.node { - hir::ImplItemKind::Const(..) | - hir::ImplItemKind::Method(..) => { - if opt_trait.is_some() || - has_allow_dead_code_or_lang_attr(&impl_item.attrs) { - self.worklist.push(impl_item.id); - } - } - hir::ImplItemKind::Type(_) => {} + hir::ItemImpl(.., ref opt_trait, _, ref impl_item_refs) => { + for impl_item_ref in impl_item_refs { + let impl_item = self.krate.impl_item(impl_item_ref.id); + if opt_trait.is_some() || + has_allow_dead_code_or_lang_attr(&impl_item.attrs) { + self.worklist.push(impl_item_ref.id.node_id); } } } _ => () } } + + fn visit_impl_item(&mut self, _item: &hir::ImplItem) { + // ignore: we are handling this in `visit_item` above + } } -fn create_and_seed_worklist(tcx: &ty::ctxt, - access_levels: &privacy::AccessLevels, - krate: &hir::Crate) -> Vec { +fn create_and_seed_worklist<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + access_levels: &privacy::AccessLevels, + krate: &hir::Crate) + -> Vec { let mut worklist = Vec::new(); for (id, _) in &access_levels.map { worklist.push(*id); } // Seed entry point - match *tcx.sess.entry_fn.borrow() { - Some((id, _)) => worklist.push(id), - None => () + if let Some((id, _)) = *tcx.sess.entry_fn.borrow() { + worklist.push(id); } // Seed implemented trait items let mut life_seeder = LifeSeeder { - worklist: worklist + worklist: worklist, + krate: krate, }; - krate.visit_all_items(&mut life_seeder); + krate.visit_all_item_likes(&mut life_seeder); return life_seeder.worklist; } -fn find_live(tcx: &ty::ctxt, - access_levels: &privacy::AccessLevels, - krate: &hir::Crate) - -> Box> { +fn find_live<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + access_levels: &privacy::AccessLevels, + krate: &hir::Crate) + -> Box> { let worklist = create_and_seed_worklist(tcx, access_levels, krate); let mut symbol_visitor = MarkSymbolVisitor::new(tcx, worklist); symbol_visitor.mark_live_symbols(); @@ -412,8 +416,8 @@ fn get_struct_ctor_id(item: &hir::Item) -> Option { } struct DeadVisitor<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, - live_symbols: Box>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + live_symbols: Box>, } impl<'a, 'tcx> DeadVisitor<'a, 'tcx> { @@ -423,24 +427,24 @@ impl<'a, 'tcx> DeadVisitor<'a, 'tcx> { | hir::ItemConst(..) | hir::ItemFn(..) | hir::ItemEnum(..) - | hir::ItemStruct(..) => true, + | hir::ItemStruct(..) + | hir::ItemUnion(..) => true, _ => false }; let ctor_id = get_struct_ctor_id(item); should_warn && !self.symbol_is_live(item.id, ctor_id) } - fn should_warn_about_field(&mut self, node: &hir::StructField_) -> bool { - let is_named = node.name().is_some(); - let field_type = self.tcx.node_id_to_type(node.id); + fn should_warn_about_field(&mut self, field: &hir::StructField) -> bool { + let field_type = self.tcx.item_type(self.tcx.map.local_def_id(field.id)); let is_marker_field = match field_type.ty_to_def_id() { - Some(def_id) => self.tcx.lang_items.items().any(|(_, item)| *item == Some(def_id)), + Some(def_id) => self.tcx.lang_items.items().iter().any(|item| *item == Some(def_id)), _ => false }; - is_named - && !self.symbol_is_live(node.id, None) + !field.is_positional() + && !self.symbol_is_live(field.id, None) && !is_marker_field - && !has_allow_dead_code_or_lang_attr(&node.attrs) + && !has_allow_dead_code_or_lang_attr(&field.attrs) } fn should_warn_about_variant(&mut self, variant: &hir::Variant_) -> bool { @@ -470,17 +474,13 @@ impl<'a, 'tcx> DeadVisitor<'a, 'tcx> { // This is done to handle the case where, for example, the static // method of a private type is used, but the type itself is never // called directly. - let impl_items = self.tcx.impl_items.borrow(); - match self.tcx.inherent_impls.borrow().get(&self.tcx.map.local_def_id(id)) { - None => (), - Some(impl_list) => { - for impl_did in impl_list.iter() { - for item_did in impl_items.get(impl_did).unwrap().iter() { - if let Some(item_node_id) = - self.tcx.map.as_local_node_id(item_did.def_id()) { - if self.live_symbols.contains(&item_node_id) { - return true; - } + if let Some(impl_list) = + self.tcx.inherent_impls.borrow().get(&self.tcx.map.local_def_id(id)) { + for &impl_did in impl_list.iter() { + for &item_did in &self.tcx.associated_item_def_ids(impl_did)[..] { + if let Some(item_node_id) = self.tcx.map.as_local_node_id(item_did) { + if self.live_symbols.contains(&item_node_id) { + return true; } } } @@ -491,11 +491,10 @@ impl<'a, 'tcx> DeadVisitor<'a, 'tcx> { fn warn_dead_code(&mut self, id: ast::NodeId, - span: codemap::Span, + span: syntax_pos::Span, name: ast::Name, node_type: &str) { - let name = name.as_str(); - if !name.starts_with("_") { + if !name.as_str().starts_with("_") { self.tcx .sess .add_lint(lint::builtin::DEAD_CODE, @@ -506,16 +505,16 @@ impl<'a, 'tcx> DeadVisitor<'a, 'tcx> { } } -impl<'a, 'tcx, 'v> Visitor<'v> for DeadVisitor<'a, 'tcx> { +impl<'a, 'tcx> Visitor<'tcx> for DeadVisitor<'a, 'tcx> { /// Walk nested items in place so that we don't report dead-code /// on inner functions when the outer function is already getting /// an error. We could do this also by checking the parents, but /// this is how the code is setup and it seems harmless enough. - fn visit_nested_item(&mut self, item: hir::ItemId) { - self.visit_item(self.tcx.map.expect_item(item.id)) + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::All(&self.tcx.map) } - fn visit_item(&mut self, item: &hir::Item) { + fn visit_item(&mut self, item: &'tcx hir::Item) { if self.should_warn_about_item(item) { self.warn_dead_code( item.id, @@ -529,7 +528,10 @@ impl<'a, 'tcx, 'v> Visitor<'v> for DeadVisitor<'a, 'tcx> { } } - fn visit_variant(&mut self, variant: &hir::Variant, g: &hir::Generics, id: ast::NodeId) { + fn visit_variant(&mut self, + variant: &'tcx hir::Variant, + g: &'tcx hir::Generics, + id: ast::NodeId) { if self.should_warn_about_variant(&variant.node) { self.warn_dead_code(variant.node.data.id(), variant.span, variant.node.name, "variant"); @@ -538,23 +540,23 @@ impl<'a, 'tcx, 'v> Visitor<'v> for DeadVisitor<'a, 'tcx> { } } - fn visit_foreign_item(&mut self, fi: &hir::ForeignItem) { + fn visit_foreign_item(&mut self, fi: &'tcx hir::ForeignItem) { if !self.symbol_is_live(fi.id, None) { self.warn_dead_code(fi.id, fi.span, fi.name, fi.node.descriptive_variant()); } intravisit::walk_foreign_item(self, fi); } - fn visit_struct_field(&mut self, field: &hir::StructField) { - if self.should_warn_about_field(&field.node) { - self.warn_dead_code(field.node.id, field.span, - field.node.name().unwrap(), "struct field"); + fn visit_struct_field(&mut self, field: &'tcx hir::StructField) { + if self.should_warn_about_field(&field) { + self.warn_dead_code(field.id, field.span, + field.name, "field"); } intravisit::walk_struct_field(self, field); } - fn visit_impl_item(&mut self, impl_item: &hir::ImplItem) { + fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) { match impl_item.node { hir::ImplItemKind::Const(_, ref expr) => { if !self.symbol_is_live(impl_item.id, None) { @@ -563,25 +565,25 @@ impl<'a, 'tcx, 'v> Visitor<'v> for DeadVisitor<'a, 'tcx> { } intravisit::walk_expr(self, expr) } - hir::ImplItemKind::Method(_, ref body) => { + hir::ImplItemKind::Method(_, body_id) => { if !self.symbol_is_live(impl_item.id, None) { self.warn_dead_code(impl_item.id, impl_item.span, impl_item.name, "method"); } - intravisit::walk_block(self, body) + self.visit_body(body_id) } hir::ImplItemKind::Type(..) => {} } } // Overwrite so that we don't warn the trait item itself. - fn visit_trait_item(&mut self, trait_item: &hir::TraitItem) { + fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) { match trait_item.node { - hir::ConstTraitItem(_, Some(ref expr)) => { - intravisit::walk_expr(self, expr) + hir::ConstTraitItem(_, Some(ref body)) => { + intravisit::walk_expr(self, body) } - hir::MethodTraitItem(_, Some(ref body)) => { - intravisit::walk_block(self, body) + hir::MethodTraitItem(_, Some(body_id)) => { + self.visit_body(body_id) } hir::ConstTraitItem(_, None) | hir::MethodTraitItem(_, None) | @@ -590,7 +592,8 @@ impl<'a, 'tcx, 'v> Visitor<'v> for DeadVisitor<'a, 'tcx> { } } -pub fn check_crate(tcx: &ty::ctxt, access_levels: &privacy::AccessLevels) { +pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + access_levels: &privacy::AccessLevels) { let _task = tcx.dep_graph.in_task(DepNode::DeadCheck); let krate = tcx.map.krate(); let live_symbols = find_live(tcx, access_levels, krate); diff --git a/src/librustc/middle/def.rs b/src/librustc/middle/def.rs deleted file mode 100644 index 9ef2828c947aa..0000000000000 --- a/src/librustc/middle/def.rs +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub use self::Def::*; - -use middle::def_id::DefId; -use middle::privacy::LastPrivate; -use middle::subst::ParamSpace; -use util::nodemap::NodeMap; -use syntax::ast; -use rustc_front::hir; - -#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum Def { - DefFn(DefId, bool /* is_ctor */), - DefSelfTy(Option, // trait id - Option<(ast::NodeId, ast::NodeId)>), // (impl id, self type id) - DefMod(DefId), - DefForeignMod(DefId), - DefStatic(DefId, bool /* is_mutbl */), - DefConst(DefId), - DefAssociatedConst(DefId), - DefLocal(DefId, // def id of variable - ast::NodeId), // node id of variable - DefVariant(DefId /* enum */, DefId /* variant */, bool /* is_structure */), - DefTy(DefId, bool /* is_enum */), - DefAssociatedTy(DefId /* trait */, DefId), - DefTrait(DefId), - DefPrimTy(hir::PrimTy), - DefTyParam(ParamSpace, u32, DefId, ast::Name), - DefUpvar(DefId, // def id of closed over local - ast::NodeId, // node id of closed over local - usize, // index in the freevars list of the closure - ast::NodeId), // expr node that creates the closure - - /// Note that if it's a tuple struct's definition, the node id of the DefId - /// may either refer to the item definition's id or the VariantData.ctor_id. - /// - /// The cases that I have encountered so far are (this is not exhaustive): - /// - If it's a ty_path referring to some tuple struct, then DefMap maps - /// it to a def whose id is the item definition's id. - /// - If it's an ExprPath referring to some tuple struct, then DefMap maps - /// it to a def whose id is the VariantData.ctor_id. - DefStruct(DefId), - DefLabel(ast::NodeId), - DefMethod(DefId), - DefErr, -} - -/// The result of resolving a path. -/// Before type checking completes, `depth` represents the number of -/// trailing segments which are yet unresolved. Afterwards, if there -/// were no errors, all paths should be fully resolved, with `depth` -/// set to `0` and `base_def` representing the final resolution. -/// -/// module::Type::AssocX::AssocY::MethodOrAssocType -/// ^~~~~~~~~~~~ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -/// base_def depth = 3 -/// -/// ::AssocX::AssocY::MethodOrAssocType -/// ^~~~~~~~~~~~~~ ^~~~~~~~~~~~~~~~~~~~~~~~~ -/// base_def depth = 2 -#[derive(Copy, Clone, Debug)] -pub struct PathResolution { - pub base_def: Def, - pub last_private: LastPrivate, - pub depth: usize -} - -impl PathResolution { - /// Get the definition, if fully resolved, otherwise panic. - pub fn full_def(&self) -> Def { - if self.depth != 0 { - panic!("path not fully resolved: {:?}", self); - } - self.base_def - } - - /// Get the DefId, if fully resolved, otherwise panic. - pub fn def_id(&self) -> DefId { - self.full_def().def_id() - } - - pub fn new(base_def: Def, - last_private: LastPrivate, - depth: usize) - -> PathResolution { - PathResolution { - base_def: base_def, - last_private: last_private, - depth: depth, - } - } -} - -// Definition mapping -pub type DefMap = NodeMap; -// This is the replacement export map. It maps a module to all of the exports -// within. -pub type ExportMap = NodeMap>; - -#[derive(Copy, Clone)] -pub struct Export { - pub name: ast::Name, // The name of the target. - pub def_id: DefId, // The definition of the target. -} - -impl Def { - pub fn var_id(&self) -> ast::NodeId { - match *self { - DefLocal(_, id) | - DefUpvar(_, id, _, _) => { - id - } - - DefFn(..) | DefMod(..) | DefForeignMod(..) | DefStatic(..) | - DefVariant(..) | DefTy(..) | DefAssociatedTy(..) | - DefTyParam(..) | DefStruct(..) | DefTrait(..) | - DefMethod(..) | DefConst(..) | DefAssociatedConst(..) | - DefPrimTy(..) | DefLabel(..) | DefSelfTy(..) | DefErr => { - panic!("attempted .def_id() on invalid {:?}", self) - } - } - } - - pub fn def_id(&self) -> DefId { - match *self { - DefFn(id, _) | DefMod(id) | DefForeignMod(id) | DefStatic(id, _) | - DefVariant(_, id, _) | DefTy(id, _) | DefAssociatedTy(_, id) | - DefTyParam(_, _, id, _) | DefStruct(id) | DefTrait(id) | - DefMethod(id) | DefConst(id) | DefAssociatedConst(id) | - DefLocal(id, _) | DefUpvar(id, _, _, _) => { - id - } - - DefLabel(..) | - DefPrimTy(..) | - DefSelfTy(..) | - DefErr => { - panic!("attempted .def_id() on invalid def: {:?}", self) - } - } - } - - pub fn variant_def_ids(&self) -> Option<(DefId, DefId)> { - match *self { - DefVariant(enum_id, var_id, _) => { - Some((enum_id, var_id)) - } - _ => None - } - } -} diff --git a/src/librustc/middle/def_id.rs b/src/librustc/middle/def_id.rs deleted file mode 100644 index 4d0005f47c4f2..0000000000000 --- a/src/librustc/middle/def_id.rs +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use middle::cstore::LOCAL_CRATE; -use middle::ty; -use syntax::ast::CrateNum; -use std::fmt; -use std::u32; - -/// A DefIndex is an index into the hir-map for a crate, identifying a -/// particular definition. It should really be considered an interned -/// shorthand for a particular DefPath. -#[derive(Clone, Debug, Eq, Ord, PartialOrd, PartialEq, RustcEncodable, - RustcDecodable, Hash, Copy)] -pub struct DefIndex(u32); - -impl DefIndex { - pub fn new(x: usize) -> DefIndex { - assert!(x < (u32::MAX as usize)); - DefIndex(x as u32) - } - - pub fn from_u32(x: u32) -> DefIndex { - DefIndex(x) - } - - pub fn as_usize(&self) -> usize { - self.0 as usize - } - - pub fn as_u32(&self) -> u32 { - self.0 - } -} - -/// The crate root is always assigned index 0 by the AST Map code, -/// thanks to `NodeCollector::new`. -pub const CRATE_DEF_INDEX: DefIndex = DefIndex(0); - -/// A DefId identifies a particular *definition*, by combining a crate -/// index and a def index. -#[derive(Clone, Eq, Ord, PartialOrd, PartialEq, RustcEncodable, - RustcDecodable, Hash, Copy)] -pub struct DefId { - pub krate: CrateNum, - pub index: DefIndex, -} - -impl fmt::Debug for DefId { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - try!(write!(f, "DefId {{ krate: {:?}, node: {:?}", - self.krate, self.index)); - - // Unfortunately, there seems to be no way to attempt to print - // a path for a def-id, so I'll just make a best effort for now - // and otherwise fallback to just printing the crate/node pair - if self.is_local() { // (1) - // (1) side-step fact that not all external things have paths at - // the moment, such as type parameters - try!(ty::tls::with_opt(|opt_tcx| { - if let Some(tcx) = opt_tcx { - try!(write!(f, " => {}", tcx.item_path_str(*self))); - } - Ok(()) - })); - } - - write!(f, " }}") - } -} - - -impl DefId { - pub fn local(index: DefIndex) -> DefId { - DefId { krate: LOCAL_CRATE, index: index } - } - - pub fn is_local(&self) -> bool { - self.krate == LOCAL_CRATE - } -} diff --git a/src/librustc/middle/dependency_format.rs b/src/librustc/middle/dependency_format.rs index aac6f1edc051d..075b3d7a8e90c 100644 --- a/src/librustc/middle/dependency_format.rs +++ b/src/librustc/middle/dependency_format.rs @@ -61,13 +61,14 @@ //! Additionally, the algorithm is geared towards finding *any* solution rather //! than finding a number of solutions (there are normally quite a few). -use syntax::ast; +use hir::def_id::CrateNum; use session; use session::config; -use middle::cstore::CrateStore; +use middle::cstore::DepKind; use middle::cstore::LinkagePreference::{self, RequireStatic, RequireDynamic}; -use util::nodemap::FnvHashMap; +use util::nodemap::FxHashMap; +use rustc_back::PanicStrategy; /// A list of dependencies for a certain crate type. /// @@ -80,7 +81,7 @@ pub type DependencyList = Vec; /// A mapping of all required dependencies for a particular flavor of output. /// /// This is local to the tcx, and is generally relevant to one session. -pub type Dependencies = FnvHashMap; +pub type Dependencies = FxHashMap; #[derive(Copy, Clone, PartialEq, Debug)] pub enum Linkage { @@ -106,24 +107,24 @@ fn calculate_type(sess: &session::Session, // If the global prefer_dynamic switch is turned off, first attempt // static linkage (this can fail). config::CrateTypeExecutable if !sess.opts.cg.prefer_dynamic => { - match attempt_static(sess) { - Some(v) => return v, - None => {} + if let Some(v) = attempt_static(sess) { + return v; } } // No linkage happens with rlibs, we just needed the metadata (which we // got long ago), so don't bother with anything. - config::CrateTypeRlib => return Vec::new(), + config::CrateTypeRlib | config::CrateTypeMetadata => return Vec::new(), - // Staticlibs must have all static dependencies. If any fail to be - // found, we generate some nice pretty errors. - config::CrateTypeStaticlib => { - match attempt_static(sess) { - Some(v) => return v, - None => {} + // Staticlibs and cdylibs must have all static dependencies. If any fail + // to be found, we generate some nice pretty errors. + config::CrateTypeStaticlib | + config::CrateTypeCdylib => { + if let Some(v) = attempt_static(sess) { + return v; } for cnum in sess.cstore.crates() { + if sess.cstore.dep_kind(cnum).macros_only() { continue } let src = sess.cstore.used_crate_source(cnum); if src.rlib.is_some() { continue } sess.err(&format!("dependency `{}` not found in rlib format", @@ -136,22 +137,27 @@ fn calculate_type(sess: &session::Session, // to try to eagerly statically link all dependencies. This is normally // done for end-product dylibs, not intermediate products. config::CrateTypeDylib if !sess.opts.cg.prefer_dynamic => { - match attempt_static(sess) { - Some(v) => return v, - None => {} + if let Some(v) = attempt_static(sess) { + return v; } } - // Everything else falls through below - config::CrateTypeExecutable | config::CrateTypeDylib => {}, + // Everything else falls through below. This will happen either with the + // `-C prefer-dynamic` or because we're a proc-macro crate. Note that + // proc-macro crates are required to be dylibs, and they're currently + // required to link to libsyntax as well. + config::CrateTypeExecutable | + config::CrateTypeDylib | + config::CrateTypeProcMacro => {}, } - let mut formats = FnvHashMap(); + let mut formats = FxHashMap(); // Sweep all crates for found dylibs. Add all dylibs, as well as their // dependencies, ensuring there are no conflicts. The only valid case for a // dependency to be relied upon twice is for both cases to rely on a dylib. for cnum in sess.cstore.crates() { + if sess.cstore.dep_kind(cnum).macros_only() { continue } let name = sess.cstore.crate_name(cnum); let src = sess.cstore.used_crate_source(cnum); if src.dylib.is_some() { @@ -167,9 +173,9 @@ fn calculate_type(sess: &session::Session, } // Collect what we've got so far in the return vector. - let last_crate = sess.cstore.crates().len() as ast::CrateNum; + let last_crate = sess.cstore.crates().len(); let mut ret = (1..last_crate+1).map(|cnum| { - match formats.get(&cnum) { + match formats.get(&CrateNum::new(cnum)) { Some(&RequireDynamic) => Linkage::Dynamic, Some(&RequireStatic) => Linkage::IncludedFromDylib, None => Linkage::NotLinked, @@ -185,19 +191,24 @@ fn calculate_type(sess: &session::Session, let src = sess.cstore.used_crate_source(cnum); if src.dylib.is_none() && !formats.contains_key(&cnum) && - sess.cstore.is_explicitly_linked(cnum) { - assert!(src.rlib.is_some()); + sess.cstore.dep_kind(cnum) == DepKind::Explicit { + assert!(src.rlib.is_some() || src.rmeta.is_some()); info!("adding staticlib: {}", sess.cstore.crate_name(cnum)); add_library(sess, cnum, RequireStatic, &mut formats); - ret[cnum as usize - 1] = Linkage::Static; + ret[cnum.as_usize() - 1] = Linkage::Static; } } // We've gotten this far because we're emitting some form of a final - // artifact which means that we're going to need an allocator of some form. - // No allocator may have been required or linked so far, so activate one - // here if one isn't set. - activate_allocator(sess, &mut ret); + // artifact which means that we may need to inject dependencies of some + // form. + // + // Things like allocators and panic runtimes may not have been activated + // quite yet, so do so here. + activate_injected_dep(sess.injected_allocator.get(), &mut ret, + &|cnum| sess.cstore.is_allocator(cnum)); + activate_injected_dep(sess.injected_panic_runtime.get(), &mut ret, + &|cnum| sess.cstore.is_panic_runtime(cnum)); // When dylib B links to dylib A, then when using B we must also link to A. // It could be the case, however, that the rlib for A is present (hence we @@ -206,7 +217,7 @@ fn calculate_type(sess: &session::Session, // For situations like this, we perform one last pass over the dependencies, // making sure that everything is available in the requested format. for (cnum, kind) in ret.iter().enumerate() { - let cnum = (cnum + 1) as ast::CrateNum; + let cnum = CrateNum::new(cnum + 1); let src = sess.cstore.used_crate_source(cnum); match *kind { Linkage::NotLinked | @@ -230,9 +241,9 @@ fn calculate_type(sess: &session::Session, } fn add_library(sess: &session::Session, - cnum: ast::CrateNum, + cnum: CrateNum, link: LinkagePreference, - m: &mut FnvHashMap) { + m: &mut FxHashMap) { match m.get(&cnum) { Some(&link2) => { // If the linkages differ, then we'd have two copies of the library @@ -262,49 +273,51 @@ fn attempt_static(sess: &session::Session) -> Option { // All crates are available in an rlib format, so we're just going to link // everything in explicitly so long as it's actually required. - let last_crate = sess.cstore.crates().len() as ast::CrateNum; + let last_crate = sess.cstore.crates().len(); let mut ret = (1..last_crate+1).map(|cnum| { - if sess.cstore.is_explicitly_linked(cnum) { + if sess.cstore.dep_kind(CrateNum::new(cnum)) == DepKind::Explicit { Linkage::Static } else { Linkage::NotLinked } }).collect::>(); - // Our allocator may not have been activated as it's not flagged with - // explicitly_linked, so flag it here if necessary. - activate_allocator(sess, &mut ret); + // Our allocator/panic runtime may not have been linked above if it wasn't + // explicitly linked, which is the case for any injected dependency. Handle + // that here and activate them. + activate_injected_dep(sess.injected_allocator.get(), &mut ret, + &|cnum| sess.cstore.is_allocator(cnum)); + activate_injected_dep(sess.injected_panic_runtime.get(), &mut ret, + &|cnum| sess.cstore.is_panic_runtime(cnum)); Some(ret) } // Given a list of how to link upstream dependencies so far, ensure that an -// allocator is activated. This will not do anything if one was transitively -// included already (e.g. via a dylib or explicitly so). +// injected dependency is activated. This will not do anything if one was +// transitively included already (e.g. via a dylib or explicitly so). // -// If an allocator was not found then we're guaranteed the metadata::creader -// module has injected an allocator dependency (not listed as a required -// dependency) in the session's `injected_allocator` field. If this field is not -// set then this compilation doesn't actually need an allocator and we can also -// skip this step entirely. -fn activate_allocator(sess: &session::Session, list: &mut DependencyList) { - let mut allocator_found = false; +// If an injected dependency was not found then we're guaranteed the +// metadata::creader module has injected that dependency (not listed as +// a required dependency) in one of the session's field. If this field is not +// set then this compilation doesn't actually need the dependency and we can +// also skip this step entirely. +fn activate_injected_dep(injected: Option, + list: &mut DependencyList, + replaces_injected: &Fn(CrateNum) -> bool) { for (i, slot) in list.iter().enumerate() { - let cnum = (i + 1) as ast::CrateNum; - if !sess.cstore.is_allocator(cnum) { + let cnum = CrateNum::new(i + 1); + if !replaces_injected(cnum) { continue } - if let Linkage::NotLinked = *slot { - continue + if *slot != Linkage::NotLinked { + return } - allocator_found = true; } - if !allocator_found { - if let Some(injected_allocator) = sess.injected_allocator.get() { - let idx = injected_allocator as usize - 1; - assert_eq!(list[idx], Linkage::NotLinked); - list[idx] = Linkage::Static; - } + if let Some(injected) = injected { + let idx = injected.as_usize() - 1; + assert_eq!(list[idx], Linkage::NotLinked); + list[idx] = Linkage::Static; } } @@ -315,21 +328,75 @@ fn verify_ok(sess: &session::Session, list: &[Linkage]) { return } let mut allocator = None; + let mut panic_runtime = None; for (i, linkage) in list.iter().enumerate() { - let cnum = (i + 1) as ast::CrateNum; - if !sess.cstore.is_allocator(cnum) { - continue - } if let Linkage::NotLinked = *linkage { continue } - if let Some(prev_alloc) = allocator { - let prev_name = sess.cstore.crate_name(prev_alloc); - let cur_name = sess.cstore.crate_name(cnum); - sess.err(&format!("cannot link together two \ - allocators: {} and {}", - prev_name, cur_name)); + let cnum = CrateNum::new(i + 1); + if sess.cstore.is_allocator(cnum) { + if let Some(prev) = allocator { + let prev_name = sess.cstore.crate_name(prev); + let cur_name = sess.cstore.crate_name(cnum); + sess.err(&format!("cannot link together two \ + allocators: {} and {}", + prev_name, cur_name)); + } + allocator = Some(cnum); + } + + if sess.cstore.is_panic_runtime(cnum) { + if let Some((prev, _)) = panic_runtime { + let prev_name = sess.cstore.crate_name(prev); + let cur_name = sess.cstore.crate_name(cnum); + sess.err(&format!("cannot link together two \ + panic runtimes: {} and {}", + prev_name, cur_name)); + } + panic_runtime = Some((cnum, sess.cstore.panic_strategy(cnum))); + } + } + + // If we found a panic runtime, then we know by this point that it's the + // only one, but we perform validation here that all the panic strategy + // compilation modes for the whole DAG are valid. + if let Some((cnum, found_strategy)) = panic_runtime { + let desired_strategy = sess.panic_strategy(); + + // First up, validate that our selected panic runtime is indeed exactly + // our same strategy. + if found_strategy != desired_strategy { + sess.err(&format!("the linked panic runtime `{}` is \ + not compiled with this crate's \ + panic strategy `{}`", + sess.cstore.crate_name(cnum), + desired_strategy.desc())); + } + + // Next up, verify that all other crates are compatible with this panic + // strategy. If the dep isn't linked, we ignore it, and if our strategy + // is abort then it's compatible with everything. Otherwise all crates' + // panic strategy must match our own. + for (i, linkage) in list.iter().enumerate() { + if let Linkage::NotLinked = *linkage { + continue + } + if desired_strategy == PanicStrategy::Abort { + continue + } + let cnum = CrateNum::new(i + 1); + let found_strategy = sess.cstore.panic_strategy(cnum); + if desired_strategy == found_strategy { + continue + } + + sess.err(&format!("the crate `{}` is compiled with the \ + panic strategy `{}` which is \ + incompatible with this crate's \ + strategy of `{}`", + sess.cstore.crate_name(cnum), + found_strategy.desc(), + desired_strategy.desc())); } - allocator = Some(cnum); } } diff --git a/src/librustc/middle/effect.rs b/src/librustc/middle/effect.rs index 822faae772611..2ec7aa4c4d903 100644 --- a/src/librustc/middle/effect.rs +++ b/src/librustc/middle/effect.rs @@ -12,15 +12,16 @@ //! `unsafe`. use self::RootUnsafeContext::*; -use middle::def; -use middle::ty::{self, Ty}; -use middle::ty::MethodCall; +use dep_graph::DepNode; +use ty::{self, Ty, TyCtxt}; +use ty::MethodCall; +use lint; use syntax::ast; -use syntax::codemap::Span; -use rustc_front::hir; -use rustc_front::intravisit; -use rustc_front::intravisit::{FnKind, Visitor}; +use syntax_pos::Span; +use hir::{self, PatKind}; +use hir::def::Def; +use hir::intravisit::{self, FnKind, Visitor, NestedVisitorMap}; #[derive(Copy, Clone)] struct UnsafeContext { @@ -43,27 +44,39 @@ enum RootUnsafeContext { fn type_is_unsafe_function(ty: Ty) -> bool { match ty.sty { - ty::TyBareFn(_, ref f) => f.unsafety == hir::Unsafety::Unsafe, + ty::TyFnDef(.., ref f) | + ty::TyFnPtr(ref f) => f.unsafety == hir::Unsafety::Unsafe, _ => false, } } struct EffectCheckVisitor<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, /// Whether we're in an unsafe context. unsafe_context: UnsafeContext, } impl<'a, 'tcx> EffectCheckVisitor<'a, 'tcx> { - fn require_unsafe(&mut self, span: Span, description: &str) { + fn require_unsafe_ext(&mut self, node_id: ast::NodeId, span: Span, + description: &str, is_lint: bool) { if self.unsafe_context.push_unsafe_count > 0 { return; } match self.unsafe_context.root { SafeContext => { - // Report an error. - span_err!(self.tcx.sess, span, E0133, - "{} requires unsafe function or block", - description); + if is_lint { + self.tcx.sess.add_lint(lint::builtin::SAFE_EXTERN_STATICS, + node_id, + span, + format!("{} requires unsafe function or \ + block (error E0133)", description)); + } else { + // Report an error. + struct_span_err!( + self.tcx.sess, span, E0133, + "{} requires unsafe function or block", description) + .span_label(span, &description) + .emit(); + } } UnsafeBlock(block_id) => { // OK, but record this. @@ -73,16 +86,24 @@ impl<'a, 'tcx> EffectCheckVisitor<'a, 'tcx> { UnsafeFn => {} } } + + fn require_unsafe(&mut self, span: Span, description: &str) { + self.require_unsafe_ext(ast::DUMMY_NODE_ID, span, description, false) + } } -impl<'a, 'tcx, 'v> Visitor<'v> for EffectCheckVisitor<'a, 'tcx> { - fn visit_fn(&mut self, fn_kind: FnKind<'v>, fn_decl: &'v hir::FnDecl, - block: &'v hir::Block, span: Span, _: ast::NodeId) { +impl<'a, 'tcx> Visitor<'tcx> for EffectCheckVisitor<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.tcx.map) + } + + fn visit_fn(&mut self, fn_kind: FnKind<'tcx>, fn_decl: &'tcx hir::FnDecl, + body_id: hir::ExprId, span: Span, id: ast::NodeId) { let (is_item_fn, is_unsafe_fn) = match fn_kind { - FnKind::ItemFn(_, _, unsafety, _, _, _) => + FnKind::ItemFn(_, _, unsafety, ..) => (true, unsafety == hir::Unsafety::Unsafe), - FnKind::Method(_, sig, _) => + FnKind::Method(_, sig, ..) => (true, sig.unsafety == hir::Unsafety::Unsafe), _ => (false, false), }; @@ -94,12 +115,12 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EffectCheckVisitor<'a, 'tcx> { self.unsafe_context = UnsafeContext::new(SafeContext) } - intravisit::walk_fn(self, fn_kind, fn_decl, block, span); + intravisit::walk_fn(self, fn_kind, fn_decl, body_id, span, id); self.unsafe_context = old_unsafe_context } - fn visit_block(&mut self, block: &hir::Block) { + fn visit_block(&mut self, block: &'tcx hir::Block) { let old_unsafe_context = self.unsafe_context; match block.rules { hir::UnsafeBlock(source) => { @@ -130,7 +151,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EffectCheckVisitor<'a, 'tcx> { self.unsafe_context.push_unsafe_count = self.unsafe_context.push_unsafe_count.checked_sub(1).unwrap(); } - hir::DefaultBlock | hir::PushUnstableBlock | hir:: PopUnstableBlock => {} + hir::DefaultBlock => {} } intravisit::walk_block(self, block); @@ -138,11 +159,11 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EffectCheckVisitor<'a, 'tcx> { self.unsafe_context = old_unsafe_context } - fn visit_expr(&mut self, expr: &hir::Expr) { + fn visit_expr(&mut self, expr: &'tcx hir::Expr) { match expr.node { - hir::ExprMethodCall(_, _, _) => { + hir::ExprMethodCall(..) => { let method_call = MethodCall::expr(expr.id); - let base_type = self.tcx.tables.borrow().method_map[&method_call].ty; + let base_type = self.tcx.tables().method_map[&method_call].ty; debug!("effect: method call case, base type is {:?}", base_type); if type_is_unsafe_function(base_type) { @@ -151,7 +172,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EffectCheckVisitor<'a, 'tcx> { } } hir::ExprCall(ref base, _) => { - let base_type = self.tcx.expr_ty_adjusted(base); + let base_type = self.tcx.tables().expr_ty_adjusted(base); debug!("effect: call case, base type is {:?}", base_type); if type_is_unsafe_function(base_type) { @@ -159,7 +180,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EffectCheckVisitor<'a, 'tcx> { } } hir::ExprUnary(hir::UnDeref, ref base) => { - let base_type = self.tcx.expr_ty_adjusted(base); + let base_type = self.tcx.tables().expr_ty_adjusted(base); debug!("effect: unary case, base type is {:?}", base_type); if let ty::TyRawPtr(_) = base_type.sty { @@ -169,9 +190,24 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EffectCheckVisitor<'a, 'tcx> { hir::ExprInlineAsm(..) => { self.require_unsafe(expr.span, "use of inline assembly"); } - hir::ExprPath(..) => { - if let def::DefStatic(_, true) = self.tcx.resolve_expr(expr) { - self.require_unsafe(expr.span, "use of mutable static"); + hir::ExprPath(hir::QPath::Resolved(_, ref path)) => { + if let Def::Static(def_id, mutbl) = path.def { + if mutbl { + self.require_unsafe(expr.span, "use of mutable static"); + } else if match self.tcx.map.get_if_local(def_id) { + Some(hir::map::NodeForeignItem(..)) => true, + Some(..) => false, + None => self.tcx.sess.cstore.is_foreign_item(def_id), + } { + self.require_unsafe_ext(expr.id, expr.span, "use of extern static", true); + } + } + } + hir::ExprField(ref base_expr, field) => { + if let ty::TyAdt(adt, ..) = self.tcx.tables().expr_ty_adjusted(base_expr).sty { + if adt.is_union() { + self.require_unsafe(field.span, "access to union field"); + } } } _ => {} @@ -179,13 +215,29 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EffectCheckVisitor<'a, 'tcx> { intravisit::walk_expr(self, expr); } + + fn visit_pat(&mut self, pat: &'tcx hir::Pat) { + if let PatKind::Struct(_, ref fields, _) = pat.node { + if let ty::TyAdt(adt, ..) = self.tcx.tables().pat_ty(pat).sty { + if adt.is_union() { + for field in fields { + self.require_unsafe(field.span, "matching on union field"); + } + } + } + } + + intravisit::walk_pat(self, pat); + } } -pub fn check_crate(tcx: &ty::ctxt) { +pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { + let _task = tcx.dep_graph.in_task(DepNode::EffectCheck); + let mut visitor = EffectCheckVisitor { tcx: tcx, unsafe_context: UnsafeContext::new(SafeContext), }; - tcx.map.krate().visit_all_items(&mut visitor); + tcx.map.krate().visit_all_item_likes(&mut visitor.as_deep_visitor()); } diff --git a/src/librustc/middle/entry.rs b/src/librustc/middle/entry.rs index 2d096f66e09f6..e927843a984b8 100644 --- a/src/librustc/middle/entry.rs +++ b/src/librustc/middle/entry.rs @@ -9,15 +9,16 @@ // except according to those terms. -use front::map as ast_map; -use middle::def_id::{CRATE_DEF_INDEX}; +use dep_graph::DepNode; +use hir::map as ast_map; +use hir::def_id::{CRATE_DEF_INDEX}; use session::{config, Session}; use syntax::ast::NodeId; use syntax::attr; -use syntax::codemap::Span; use syntax::entry::EntryPointType; -use rustc_front::hir::{Item, ItemFn}; -use rustc_front::intravisit::Visitor; +use syntax_pos::Span; +use hir::{Item, ItemFn, ImplItem}; +use hir::itemlikevisit::ItemLikeVisitor; struct EntryContext<'a, 'tcx: 'a> { session: &'a Session, @@ -38,16 +39,23 @@ struct EntryContext<'a, 'tcx: 'a> { non_main_fns: Vec<(NodeId, Span)> , } -impl<'a, 'tcx> Visitor<'tcx> for EntryContext<'a, 'tcx> { +impl<'a, 'tcx> ItemLikeVisitor<'tcx> for EntryContext<'a, 'tcx> { fn visit_item(&mut self, item: &'tcx Item) { let def_id = self.map.local_def_id(item.id); let def_key = self.map.def_key(def_id); let at_root = def_key.parent == Some(CRATE_DEF_INDEX); find_item(item, self, at_root); } + + + fn visit_impl_item(&mut self, _impl_item: &'tcx ImplItem) { + // entry fn is never an impl item + } } pub fn find_entry_point(session: &Session, ast_map: &ast_map::Map) { + let _task = ast_map.dep_graph.in_task(DepNode::EntryPoint); + let any_exe = session.crate_types.borrow().iter().any(|ty| { *ty == config::CrateTypeExecutable }); @@ -71,7 +79,7 @@ pub fn find_entry_point(session: &Session, ast_map: &ast_map::Map) { non_main_fns: Vec::new(), }; - ast_map.krate().visit_all_items(&mut ctxt); + ast_map.krate().visit_all_item_likes(&mut ctxt); configure_main(&mut ctxt); } @@ -85,7 +93,7 @@ fn entry_point_type(item: &Item, at_root: bool) -> EntryPointType { EntryPointType::Start } else if attr::contains_name(&item.attrs, "main") { EntryPointType::MainAttr - } else if item.name.as_str() == "main" { + } else if item.name == "main" { if at_root { // This is a top-level function so can be 'main' EntryPointType::MainNamed @@ -118,16 +126,24 @@ fn find_item(item: &Item, ctxt: &mut EntryContext, at_root: bool) { if ctxt.attr_main_fn.is_none() { ctxt.attr_main_fn = Some((item.id, item.span)); } else { - span_err!(ctxt.session, item.span, E0137, - "multiple functions with a #[main] attribute"); + struct_span_err!(ctxt.session, item.span, E0137, + "multiple functions with a #[main] attribute") + .span_label(item.span, &format!("additional #[main] function")) + .span_label(ctxt.attr_main_fn.unwrap().1, &format!("first #[main] function")) + .emit(); } }, EntryPointType::Start => { if ctxt.start_fn.is_none() { ctxt.start_fn = Some((item.id, item.span)); } else { - span_err!(ctxt.session, item.span, E0138, - "multiple 'start' functions"); + struct_span_err!( + ctxt.session, item.span, E0138, + "multiple 'start' functions") + .span_label(ctxt.start_fn.unwrap().1, + &format!("previous `start` function here")) + .span_label(item.span, &format!("multiple `start` functions")) + .emit(); } }, EntryPointType::None => () diff --git a/src/librustc/middle/expr_use_visitor.rs b/src/librustc/middle/expr_use_visitor.rs index e746f3ac57914..b3e61f1e57067 100644 --- a/src/librustc/middle/expr_use_visitor.rs +++ b/src/librustc/middle/expr_use_visitor.rs @@ -19,18 +19,17 @@ pub use self::MatchMode::*; use self::TrackMatchMode::*; use self::OverloadedCallType::*; -use middle::{def, pat_util}; -use middle::def_id::{DefId}; -use middle::infer; +use hir::def::Def; +use hir::def_id::{DefId}; +use infer::InferCtxt; use middle::mem_categorization as mc; -use middle::ty; -use middle::ty::adjustment; +use ty::{self, TyCtxt, adjustment}; -use rustc_front::hir; +use hir::{self, PatKind}; use syntax::ast; use syntax::ptr::P; -use syntax::codemap::Span; +use syntax_pos::Span; /////////////////////////////////////////////////////////////////////////// // The Delegate trait @@ -76,7 +75,7 @@ pub trait Delegate<'tcx> { borrow_id: ast::NodeId, borrow_span: Span, cmt: mc::cmt<'tcx>, - loan_region: ty::Region, + loan_region: &'tcx ty::Region, bk: ty::BorrowKind, loan_cause: LoanCause); @@ -209,8 +208,7 @@ enum OverloadedCallType { } impl OverloadedCallType { - fn from_trait_id(tcx: &ty::ctxt, trait_id: DefId) - -> OverloadedCallType { + fn from_trait_id(tcx: TyCtxt, trait_id: DefId) -> OverloadedCallType { for &(maybe_function_trait, overloaded_call_type) in &[ (tcx.lang_items.fn_once_trait(), FnOnceOverloadedCall), (tcx.lang_items.fn_mut_trait(), FnMutOverloadedCall), @@ -224,13 +222,12 @@ impl OverloadedCallType { } } - tcx.sess.bug("overloaded call didn't map to known function trait") + bug!("overloaded call didn't map to known function trait") } - fn from_method_id(tcx: &ty::ctxt, method_id: DefId) - -> OverloadedCallType { - let method = tcx.impl_or_trait_item(method_id); - OverloadedCallType::from_trait_id(tcx, method.container().id()) + fn from_method_id(tcx: TyCtxt, method_id: DefId) -> OverloadedCallType { + let method = tcx.associated_item(method_id); + OverloadedCallType::from_trait_id(tcx, method.container.id()) } } @@ -241,10 +238,9 @@ impl OverloadedCallType { // mem_categorization, it requires a TYPER, which is a type that // supplies types from the tree. After type checking is complete, you // can just use the tcx as the typer. -pub struct ExprUseVisitor<'d, 't, 'a: 't, 'tcx:'a+'d> { - typer: &'t infer::InferCtxt<'a, 'tcx>, - mc: mc::MemCategorizationContext<'t, 'a, 'tcx>, - delegate: &'d mut Delegate<'tcx>, +pub struct ExprUseVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + mc: mc::MemCategorizationContext<'a, 'gcx, 'tcx>, + delegate: &'a mut Delegate<'tcx>, } // If the TYPER results in an error, it's because the type check @@ -272,42 +268,51 @@ enum PassArgs { ByRef, } -impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { - pub fn new(delegate: &'d mut (Delegate<'tcx>+'d), - typer: &'t infer::InferCtxt<'a, 'tcx>) - -> ExprUseVisitor<'d,'t,'a,'tcx> where 'tcx:'a+'d +impl<'a, 'gcx, 'tcx> ExprUseVisitor<'a, 'gcx, 'tcx> { + pub fn new(delegate: &'a mut (Delegate<'tcx>+'a), + infcx: &'a InferCtxt<'a, 'gcx, 'tcx>) + -> Self { - let mc: mc::MemCategorizationContext<'t, 'a, 'tcx> = - mc::MemCategorizationContext::new(typer); - ExprUseVisitor { typer: typer, mc: mc, delegate: delegate } + ExprUseVisitor::with_options(delegate, infcx, mc::MemCategorizationOptions::default()) + } + + pub fn with_options(delegate: &'a mut (Delegate<'tcx>+'a), + infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, + options: mc::MemCategorizationOptions) + -> Self + { + ExprUseVisitor { + mc: mc::MemCategorizationContext::with_options(infcx, options), + delegate: delegate + } } pub fn walk_fn(&mut self, decl: &hir::FnDecl, - body: &hir::Block) { + body: &hir::Expr) { self.walk_arg_patterns(decl, body); - self.walk_block(body); + self.consume_expr(body); } fn walk_arg_patterns(&mut self, decl: &hir::FnDecl, - body: &hir::Block) { + body: &hir::Expr) { for arg in &decl.inputs { - let arg_ty = return_if_err!(self.typer.node_ty(arg.pat.id)); + let arg_ty = return_if_err!(self.mc.infcx.node_ty(arg.pat.id)); - let fn_body_scope = self.tcx().region_maps.node_extent(body.id); + let fn_body_scope_r = self.tcx().node_scope_region(body.id); let arg_cmt = self.mc.cat_rvalue( arg.id, arg.pat.span, - ty::ReScope(fn_body_scope), // Args live only as long as the fn body. + fn_body_scope_r, // Args live only as long as the fn body. arg_ty); - self.walk_irrefutable_pat(arg_cmt, &*arg.pat); + self.walk_irrefutable_pat(arg_cmt, &arg.pat); } } - fn tcx(&self) -> &'t ty::ctxt<'tcx> { - self.typer.tcx + fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> { + self.mc.infcx.tcx } fn delegate_consume(&mut self, @@ -317,13 +322,13 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { debug!("delegate_consume(consume_id={}, cmt={:?})", consume_id, cmt); - let mode = copy_or_move(self.typer, &cmt, DirectRefMove); + let mode = copy_or_move(self.mc.infcx, &cmt, DirectRefMove); self.delegate.consume(consume_id, consume_span, cmt, mode); } - fn consume_exprs(&mut self, exprs: &[P]) { + fn consume_exprs(&mut self, exprs: &[hir::Expr]) { for expr in exprs { - self.consume_expr(&**expr); + self.consume_expr(&expr); } } @@ -346,7 +351,7 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { fn borrow_expr(&mut self, expr: &hir::Expr, - r: ty::Region, + r: &'tcx ty::Region, bk: ty::BorrowKind, cause: LoanCause) { debug!("borrow_expr(expr={:?}, r={:?}, bk={:?})", @@ -368,52 +373,47 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { self.walk_adjustment(expr); match expr.node { - hir::ExprPath(..) => { } + hir::ExprPath(_) => { } hir::ExprType(ref subexpr, _) => { - self.walk_expr(&**subexpr) + self.walk_expr(&subexpr) } hir::ExprUnary(hir::UnDeref, ref base) => { // *base - if !self.walk_overloaded_operator(expr, &**base, Vec::new(), PassArgs::ByRef) { - self.select_from_expr(&**base); + if !self.walk_overloaded_operator(expr, &base, Vec::new(), PassArgs::ByRef) { + self.select_from_expr(&base); } } hir::ExprField(ref base, _) => { // base.f - self.select_from_expr(&**base); + self.select_from_expr(&base); } hir::ExprTupField(ref base, _) => { // base. - self.select_from_expr(&**base); + self.select_from_expr(&base); } hir::ExprIndex(ref lhs, ref rhs) => { // lhs[rhs] if !self.walk_overloaded_operator(expr, - &**lhs, - vec![&**rhs], + &lhs, + vec![&rhs], PassArgs::ByValue) { - self.select_from_expr(&**lhs); - self.consume_expr(&**rhs); + self.select_from_expr(&lhs); + self.consume_expr(&rhs); } } - hir::ExprRange(ref start, ref end) => { - start.as_ref().map(|e| self.consume_expr(&**e)); - end.as_ref().map(|e| self.consume_expr(&**e)); - } - hir::ExprCall(ref callee, ref args) => { // callee(args) - self.walk_callee(expr, &**callee); + self.walk_callee(expr, &callee); self.consume_exprs(args); } - hir::ExprMethodCall(_, _, ref args) => { // callee.m(args) + hir::ExprMethodCall(.., ref args) => { // callee.m(args) self.consume_exprs(args); } hir::ExprStruct(_, ref fields, ref opt_with) => { - self.walk_struct_expr(expr, fields, opt_with); + self.walk_struct_expr(fields, opt_with); } hir::ExprTup(ref exprs) => { @@ -421,16 +421,17 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { } hir::ExprIf(ref cond_expr, ref then_blk, ref opt_else_expr) => { - self.consume_expr(&**cond_expr); - self.walk_block(&**then_blk); + self.consume_expr(&cond_expr); + self.walk_block(&then_blk); if let Some(ref else_expr) = *opt_else_expr { - self.consume_expr(&**else_expr); + self.consume_expr(&else_expr); } } hir::ExprMatch(ref discr, ref arms, _) => { - let discr_cmt = return_if_err!(self.mc.cat_expr(&**discr)); - self.borrow_expr(&**discr, ty::ReEmpty, ty::ImmBorrow, MatchDiscriminant); + let discr_cmt = return_if_err!(self.mc.cat_expr(&discr)); + let r = self.tcx().mk_region(ty::ReEmpty); + self.borrow_expr(&discr, r, ty::ImmBorrow, MatchDiscriminant); // treatment of the discriminant is handled while walking the arms. for arm in arms { @@ -440,153 +441,151 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { } } - hir::ExprVec(ref exprs) => { + hir::ExprArray(ref exprs) => { self.consume_exprs(exprs); } hir::ExprAddrOf(m, ref base) => { // &base // make sure that the thing we are pointing out stays valid // for the lifetime `scope_r` of the resulting ptr: - let expr_ty = return_if_err!(self.typer.node_ty(expr.id)); - if let ty::TyRef(&r, _) = expr_ty.sty { + let expr_ty = return_if_err!(self.mc.infcx.node_ty(expr.id)); + if let ty::TyRef(r, _) = expr_ty.sty { let bk = ty::BorrowKind::from_mutbl(m); - self.borrow_expr(&**base, r, bk, AddrOf); + self.borrow_expr(&base, r, bk, AddrOf); } } - hir::ExprInlineAsm(ref ia) => { - for &(_, ref input) in &ia.inputs { - self.consume_expr(&**input); - } - - for output in &ia.outputs { - if output.is_indirect { - self.consume_expr(&*output.expr); + hir::ExprInlineAsm(ref ia, ref outputs, ref inputs) => { + for (o, output) in ia.outputs.iter().zip(outputs) { + if o.is_indirect { + self.consume_expr(output); } else { - self.mutate_expr(expr, &*output.expr, - if output.is_rw { + self.mutate_expr(expr, output, + if o.is_rw { MutateMode::WriteAndRead } else { MutateMode::JustWrite }); } } + self.consume_exprs(inputs); } - hir::ExprBreak(..) | hir::ExprAgain(..) | hir::ExprLit(..) => {} - hir::ExprLoop(ref blk, _) => { - self.walk_block(&**blk); + hir::ExprLoop(ref blk, _, _) => { + self.walk_block(&blk); } hir::ExprWhile(ref cond_expr, ref blk, _) => { - self.consume_expr(&**cond_expr); - self.walk_block(&**blk); + self.consume_expr(&cond_expr); + self.walk_block(&blk); } hir::ExprUnary(op, ref lhs) => { - let pass_args = if ::rustc_front::util::is_by_value_unop(op) { + let pass_args = if op.is_by_value() { PassArgs::ByValue } else { PassArgs::ByRef }; - if !self.walk_overloaded_operator(expr, &**lhs, Vec::new(), pass_args) { - self.consume_expr(&**lhs); + if !self.walk_overloaded_operator(expr, &lhs, Vec::new(), pass_args) { + self.consume_expr(&lhs); } } hir::ExprBinary(op, ref lhs, ref rhs) => { - let pass_args = if ::rustc_front::util::is_by_value_binop(op.node) { + let pass_args = if op.node.is_by_value() { PassArgs::ByValue } else { PassArgs::ByRef }; - if !self.walk_overloaded_operator(expr, &**lhs, vec![&**rhs], pass_args) { - self.consume_expr(&**lhs); - self.consume_expr(&**rhs); + if !self.walk_overloaded_operator(expr, &lhs, vec![&rhs], pass_args) { + self.consume_expr(&lhs); + self.consume_expr(&rhs); } } hir::ExprBlock(ref blk) => { - self.walk_block(&**blk); + self.walk_block(&blk); } - hir::ExprRet(ref opt_expr) => { + hir::ExprBreak(_, ref opt_expr) | hir::ExprRet(ref opt_expr) => { if let Some(ref expr) = *opt_expr { - self.consume_expr(&**expr); + self.consume_expr(&expr); } } hir::ExprAssign(ref lhs, ref rhs) => { - self.mutate_expr(expr, &**lhs, MutateMode::JustWrite); - self.consume_expr(&**rhs); + self.mutate_expr(expr, &lhs, MutateMode::JustWrite); + self.consume_expr(&rhs); } hir::ExprCast(ref base, _) => { - self.consume_expr(&**base); + self.consume_expr(&base); } hir::ExprAssignOp(op, ref lhs, ref rhs) => { // NB All our assignment operations take the RHS by value - assert!(::rustc_front::util::is_by_value_binop(op.node)); + assert!(op.node.is_by_value()); if !self.walk_overloaded_operator(expr, lhs, vec![rhs], PassArgs::ByValue) { - self.mutate_expr(expr, &**lhs, MutateMode::WriteAndRead); - self.consume_expr(&**rhs); + self.mutate_expr(expr, &lhs, MutateMode::WriteAndRead); + self.consume_expr(&rhs); } } hir::ExprRepeat(ref base, ref count) => { - self.consume_expr(&**base); - self.consume_expr(&**count); + self.consume_expr(&base); + self.consume_expr(&count); } - hir::ExprClosure(..) => { - self.walk_captures(expr) + hir::ExprClosure(.., fn_decl_span) => { + self.walk_captures(expr, fn_decl_span) } hir::ExprBox(ref base) => { - self.consume_expr(&**base); + self.consume_expr(&base); } } } fn walk_callee(&mut self, call: &hir::Expr, callee: &hir::Expr) { - let callee_ty = return_if_err!(self.typer.expr_ty_adjusted(callee)); + let callee_ty = return_if_err!(self.mc.infcx.expr_ty_adjusted(callee)); debug!("walk_callee: callee={:?} callee_ty={:?}", callee, callee_ty); - let call_scope = self.tcx().region_maps.node_extent(call.id); match callee_ty.sty { - ty::TyBareFn(..) => { + ty::TyFnDef(..) | ty::TyFnPtr(_) => { self.consume_expr(callee); } ty::TyError => { } _ => { let overloaded_call_type = - match self.typer.node_method_id(ty::MethodCall::expr(call.id)) { + match self.mc.infcx.node_method_id(ty::MethodCall::expr(call.id)) { Some(method_id) => { OverloadedCallType::from_method_id(self.tcx(), method_id) } None => { - self.tcx().sess.span_bug( + span_bug!( callee.span, - &format!("unexpected callee type {}", callee_ty)) + "unexpected callee type {}", + callee_ty) } }; match overloaded_call_type { FnMutOverloadedCall => { + let call_scope_r = self.tcx().node_scope_region(call.id); self.borrow_expr(callee, - ty::ReScope(call_scope), + call_scope_r, ty::MutBorrow, ClosureInvocation); } FnOverloadedCall => { + let call_scope_r = self.tcx().node_scope_region(call.id); self.borrow_expr(callee, - ty::ReScope(call_scope), + call_scope_r, ty::ImmBorrow, ClosureInvocation); } @@ -601,7 +600,7 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { hir::StmtDecl(ref decl, _) => { match decl.node { hir::DeclLocal(ref local) => { - self.walk_local(&**local); + self.walk_local(&local); } hir::DeclItem(_) => { @@ -613,7 +612,7 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { hir::StmtExpr(ref expr, _) | hir::StmtSemi(ref expr, _) => { - self.consume_expr(&**expr); + self.consume_expr(&expr); } } } @@ -622,8 +621,7 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { match local.init { None => { let delegate = &mut self.delegate; - pat_util::pat_bindings(&self.typer.tcx.def_map, &*local.pat, - |_, id, span, _| { + local.pat.each_binding(|_, id, span, _| { delegate.decl_without_init(id, span); }) } @@ -633,9 +631,9 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { // initializers are considered // "assigns", which is handled by // `walk_pat`: - self.walk_expr(&**expr); - let init_cmt = return_if_err!(self.mc.cat_expr(&**expr)); - self.walk_irrefutable_pat(init_cmt, &*local.pat); + self.walk_expr(&expr); + let init_cmt = return_if_err!(self.mc.cat_expr(&expr)); + self.walk_irrefutable_pat(init_cmt, &local.pat); } } } @@ -650,17 +648,16 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { } if let Some(ref tail_expr) = blk.expr { - self.consume_expr(&**tail_expr); + self.consume_expr(&tail_expr); } } fn walk_struct_expr(&mut self, - _expr: &hir::Expr, fields: &[hir::Field], opt_with: &Option>) { // Consume the expressions supplying values for each field. for field in fields { - self.consume_expr(&*field.expr); + self.consume_expr(&field.expr); } let with_expr = match *opt_with { @@ -668,40 +665,43 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { None => { return; } }; - let with_cmt = return_if_err!(self.mc.cat_expr(&*with_expr)); + let with_cmt = return_if_err!(self.mc.cat_expr(&with_expr)); // Select just those fields of the `with` // expression that will actually be used - if let ty::TyStruct(def, substs) = with_cmt.ty.sty { - // Consume those fields of the with expression that are needed. - for with_field in &def.struct_variant().fields { - if !contains_field_named(with_field, fields) { - let cmt_field = self.mc.cat_field( - &*with_expr, - with_cmt.clone(), - with_field.name, - with_field.ty(self.tcx(), substs) - ); - self.delegate_consume(with_expr.id, with_expr.span, cmt_field); + match with_cmt.ty.sty { + ty::TyAdt(adt, substs) if adt.is_struct() => { + // Consume those fields of the with expression that are needed. + for with_field in &adt.struct_variant().fields { + if !contains_field_named(with_field, fields) { + let cmt_field = self.mc.cat_field( + &*with_expr, + with_cmt.clone(), + with_field.name, + with_field.ty(self.tcx(), substs) + ); + self.delegate_consume(with_expr.id, with_expr.span, cmt_field); + } } } - } else { - // the base expression should always evaluate to a - // struct; however, when EUV is run during typeck, it - // may not. This will generate an error earlier in typeck, - // so we can just ignore it. - if !self.tcx().sess.has_errors() { - self.tcx().sess.span_bug( - with_expr.span, - "with expression doesn't evaluate to a struct"); + _ => { + // the base expression should always evaluate to a + // struct; however, when EUV is run during typeck, it + // may not. This will generate an error earlier in typeck, + // so we can just ignore it. + if !self.tcx().sess.has_errors() { + span_bug!( + with_expr.span, + "with expression doesn't evaluate to a struct"); + } } - }; + } // walk the with expression so that complex expressions // are properly handled. self.walk_expr(with_expr); - fn contains_field_named(field: ty::FieldDef, + fn contains_field_named(field: &ty::FieldDef, fields: &[hir::Field]) -> bool { @@ -714,22 +714,37 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { // consumed or borrowed as part of the automatic adjustment // process. fn walk_adjustment(&mut self, expr: &hir::Expr) { - let typer = self.typer; + let infcx = self.mc.infcx; //NOTE(@jroesch): mixed RefCell borrow causes crash - let adj = typer.adjustments().get(&expr.id).map(|x| x.clone()); + let adj = infcx.adjustments().get(&expr.id).map(|x| x.clone()); if let Some(adjustment) = adj { - match adjustment { - adjustment::AdjustReifyFnPointer | - adjustment::AdjustUnsafeFnPointer => { + match adjustment.kind { + adjustment::Adjust::NeverToAny | + adjustment::Adjust::ReifyFnPointer | + adjustment::Adjust::UnsafeFnPointer | + adjustment::Adjust::MutToConstPointer => { // Creating a closure/fn-pointer or unsizing consumes // the input and stores it into the resulting rvalue. - debug!("walk_adjustment(AdjustReifyFnPointer|AdjustUnsafeFnPointer)"); + debug!("walk_adjustment: trivial adjustment"); let cmt_unadjusted = return_if_err!(self.mc.cat_expr_unadjusted(expr)); self.delegate_consume(expr.id, expr.span, cmt_unadjusted); } - adjustment::AdjustDerefRef(ref adj) => { - self.walk_autoderefref(expr, adj); + adjustment::Adjust::DerefRef { autoderefs, autoref, unsize } => { + debug!("walk_adjustment expr={:?} adj={:?}", expr, adjustment); + + self.walk_autoderefs(expr, autoderefs); + + let cmt_derefd = + return_if_err!(self.mc.cat_expr_autoderefd(expr, autoderefs)); + + let cmt_refd = + self.walk_autoref(expr, cmt_derefd, autoref); + + if unsize { + // Unsizing consumes the thin pointer and produces a fat one. + self.delegate_consume(expr.id, expr.span, cmt_refd); + } } } } @@ -745,52 +760,27 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { for i in 0..autoderefs { let deref_id = ty::MethodCall::autoderef(expr.id, i as u32); - match self.typer.node_method_ty(deref_id) { - None => {} - Some(method_ty) => { - let cmt = return_if_err!(self.mc.cat_expr_autoderefd(expr, i)); - - // the method call infrastructure should have - // replaced all late-bound regions with variables: - let self_ty = method_ty.fn_sig().input(0); - let self_ty = self.tcx().no_late_bound_regions(&self_ty).unwrap(); - - let (m, r) = match self_ty.sty { - ty::TyRef(r, ref m) => (m.mutbl, r), - _ => self.tcx().sess.span_bug(expr.span, - &format!("bad overloaded deref type {:?}", - method_ty)) - }; - let bk = ty::BorrowKind::from_mutbl(m); - self.delegate.borrow(expr.id, expr.span, cmt, - *r, bk, AutoRef); - } + if let Some(method_ty) = self.mc.infcx.node_method_ty(deref_id) { + let cmt = return_if_err!(self.mc.cat_expr_autoderefd(expr, i)); + + // the method call infrastructure should have + // replaced all late-bound regions with variables: + let self_ty = method_ty.fn_sig().input(0); + let self_ty = self.tcx().no_late_bound_regions(&self_ty).unwrap(); + + let (m, r) = match self_ty.sty { + ty::TyRef(r, ref m) => (m.mutbl, r), + _ => span_bug!(expr.span, + "bad overloaded deref type {:?}", + method_ty) + }; + let bk = ty::BorrowKind::from_mutbl(m); + self.delegate.borrow(expr.id, expr.span, cmt, + r, bk, AutoRef); } } } - fn walk_autoderefref(&mut self, - expr: &hir::Expr, - adj: &adjustment::AutoDerefRef<'tcx>) { - debug!("walk_autoderefref expr={:?} adj={:?}", - expr, - adj); - - self.walk_autoderefs(expr, adj.autoderefs); - - let cmt_derefd = - return_if_err!(self.mc.cat_expr_autoderefd(expr, adj.autoderefs)); - - let cmt_refd = - self.walk_autoref(expr, cmt_derefd, adj.autoref); - - if adj.unsize.is_some() { - // Unsizing consumes the thin pointer and produces a fat one. - self.delegate_consume(expr.id, expr.span, cmt_refd); - } - } - - /// Walks the autoref `opt_autoref` applied to the autoderef'd /// `expr`. `cmt_derefd` is the mem-categorized form of `expr` /// after all relevant autoderefs have occurred. Because AutoRefs @@ -802,7 +792,7 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { fn walk_autoref(&mut self, expr: &hir::Expr, cmt_base: mc::cmt<'tcx>, - opt_autoref: Option>) + opt_autoref: Option>) -> mc::cmt<'tcx> { debug!("walk_autoref(expr.id={} cmt_derefd={:?} opt_autoref={:?})", @@ -821,16 +811,16 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { }; match *autoref { - adjustment::AutoPtr(r, m) => { + adjustment::AutoBorrow::Ref(r, m) => { self.delegate.borrow(expr.id, expr.span, cmt_base, - *r, + r, ty::BorrowKind::from_mutbl(m), AutoRef); } - adjustment::AutoUnsafe(m) => { + adjustment::AutoBorrow::RawPtr(m) => { debug!("walk_autoref: expr.id={} cmt_base={:?}", expr.id, cmt_base); @@ -838,7 +828,7 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { // Converting from a &T to *T (or &mut T to *mut T) is // treated as borrowing it for the enclosing temporary // scope. - let r = ty::ReScope(self.tcx().region_maps.node_extent(expr.id)); + let r = self.tcx().node_scope_region(expr.id); self.delegate.borrow(expr.id, expr.span, @@ -871,7 +861,7 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { pass_args: PassArgs) -> bool { - if !self.typer.is_method_call(expr.id) { + if !self.mc.infcx.is_method_call(expr.id) { return false; } @@ -893,7 +883,7 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { // methods are implicitly autoref'd which sadly does not use // adjustments, so we must hardcode the borrow here. - let r = ty::ReScope(self.tcx().region_maps.node_extent(expr.id)); + let r = self.tcx().node_scope_region(expr.id); let bk = ty::ImmBorrow; for &arg in &rhs { @@ -905,21 +895,21 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { fn arm_move_mode(&mut self, discr_cmt: mc::cmt<'tcx>, arm: &hir::Arm) -> TrackMatchMode { let mut mode = Unknown; for pat in &arm.pats { - self.determine_pat_move_mode(discr_cmt.clone(), &**pat, &mut mode); + self.determine_pat_move_mode(discr_cmt.clone(), &pat, &mut mode); } mode } fn walk_arm(&mut self, discr_cmt: mc::cmt<'tcx>, arm: &hir::Arm, mode: MatchMode) { for pat in &arm.pats { - self.walk_pat(discr_cmt.clone(), &**pat, mode); + self.walk_pat(discr_cmt.clone(), &pat, mode); } if let Some(ref guard) = arm.guard { - self.consume_expr(&**guard); + self.consume_expr(&guard); } - self.consume_expr(&*arm.body); + self.consume_expr(&arm.body); } /// Walks a pat that occurs in isolation (i.e. top-level of fn @@ -941,24 +931,16 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { debug!("determine_pat_move_mode cmt_discr={:?} pat={:?}", cmt_discr, pat); return_if_err!(self.mc.cat_pattern(cmt_discr, pat, |_mc, cmt_pat, pat| { - let tcx = self.tcx(); - let def_map = &self.tcx().def_map; - if pat_util::pat_is_binding(&def_map.borrow(), pat) { - match pat.node { - hir::PatIdent(hir::BindByRef(_), _, _) => - mode.lub(BorrowingMatch), - hir::PatIdent(hir::BindByValue(_), _, _) => { - match copy_or_move(self.typer, &cmt_pat, PatBindingMove) { - Copy => mode.lub(CopyingMatch), - Move(_) => mode.lub(MovingMatch), - } - } - _ => { - tcx.sess.span_bug( - pat.span, - "binding pattern not an identifier"); + match pat.node { + PatKind::Binding(hir::BindByRef(..), ..) => + mode.lub(BorrowingMatch), + PatKind::Binding(hir::BindByValue(..), ..) => { + match copy_or_move(self.mc.infcx, &cmt_pat, PatBindingMove) { + Copy => mode.lub(CopyingMatch), + Move(..) => mode.lub(MovingMatch), } } + _ => {} } })); } @@ -966,96 +948,40 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { /// The core driver for walking a pattern; `match_mode` must be /// established up front, e.g. via `determine_pat_move_mode` (see /// also `walk_irrefutable_pat` for patterns that stand alone). - fn walk_pat(&mut self, - cmt_discr: mc::cmt<'tcx>, - pat: &hir::Pat, - match_mode: MatchMode) { - debug!("walk_pat cmt_discr={:?} pat={:?}", cmt_discr, - pat); + fn walk_pat(&mut self, cmt_discr: mc::cmt<'tcx>, pat: &hir::Pat, match_mode: MatchMode) { + debug!("walk_pat cmt_discr={:?} pat={:?}", cmt_discr, pat); + let tcx = &self.tcx(); let mc = &self.mc; - let typer = self.typer; - let def_map = &self.tcx().def_map; + let infcx = self.mc.infcx; let delegate = &mut self.delegate; return_if_err!(mc.cat_pattern(cmt_discr.clone(), pat, |mc, cmt_pat, pat| { - if pat_util::pat_is_binding(&def_map.borrow(), pat) { - let tcx = typer.tcx; - - debug!("binding cmt_pat={:?} pat={:?} match_mode={:?}", - cmt_pat, - pat, - match_mode); + if let PatKind::Binding(bmode, def_id, ..) = pat.node { + debug!("binding cmt_pat={:?} pat={:?} match_mode={:?}", cmt_pat, pat, match_mode); // pat_ty: the type of the binding being produced. - let pat_ty = return_if_err!(typer.node_ty(pat.id)); + let pat_ty = return_if_err!(infcx.node_ty(pat.id)); // Each match binding is effectively an assignment to the // binding being produced. - let def = def_map.borrow().get(&pat.id).unwrap().full_def(); - match mc.cat_def(pat.id, pat.span, pat_ty, def) { - Ok(binding_cmt) => { - delegate.mutate(pat.id, pat.span, binding_cmt, MutateMode::Init); - } - Err(_) => { } + let def = Def::Local(def_id); + if let Ok(binding_cmt) = mc.cat_def(pat.id, pat.span, pat_ty, def) { + delegate.mutate(pat.id, pat.span, binding_cmt, MutateMode::Init); } // It is also a borrow or copy/move of the value being matched. - match pat.node { - hir::PatIdent(hir::BindByRef(m), _, _) => { - if let ty::TyRef(&r, _) = pat_ty.sty { + match bmode { + hir::BindByRef(m) => { + if let ty::TyRef(r, _) = pat_ty.sty { let bk = ty::BorrowKind::from_mutbl(m); - delegate.borrow(pat.id, pat.span, cmt_pat, - r, bk, RefBinding); + delegate.borrow(pat.id, pat.span, cmt_pat, r, bk, RefBinding); } } - hir::PatIdent(hir::BindByValue(_), _, _) => { - let mode = copy_or_move(typer, &cmt_pat, PatBindingMove); + hir::BindByValue(..) => { + let mode = copy_or_move(infcx, &cmt_pat, PatBindingMove); debug!("walk_pat binding consuming pat"); delegate.consume_pat(pat, cmt_pat, mode); } - _ => { - tcx.sess.span_bug( - pat.span, - "binding pattern not an identifier"); - } - } - } else { - match pat.node { - hir::PatVec(_, Some(ref slice_pat), _) => { - // The `slice_pat` here creates a slice into - // the original vector. This is effectively a - // borrow of the elements of the vector being - // matched. - - let (slice_cmt, slice_mutbl, slice_r) = - return_if_err!(mc.cat_slice_pattern(cmt_pat, &**slice_pat)); - - // Note: We declare here that the borrow - // occurs upon entering the `[...]` - // pattern. This implies that something like - // `[a; b]` where `a` is a move is illegal, - // because the borrow is already in effect. - // In fact such a move would be safe-ish, but - // it effectively *requires* that we use the - // nulling out semantics to indicate when a - // value has been moved, which we are trying - // to move away from. Otherwise, how can we - // indicate that the first element in the - // vector has been moved? Eventually, we - // could perhaps modify this rule to permit - // `[..a, b]` where `b` is a move, because in - // that case we can adjust the length of the - // original vec accordingly, but we'd have to - // make trans do the right thing, and it would - // only work for `Box<[T]>`s. It seems simpler - // to just require that people call - // `vec.pop()` or `vec.unshift()`. - let slice_bk = ty::BorrowKind::from_mutbl(slice_mutbl); - delegate.borrow(pat.id, pat.span, - slice_cmt, slice_r, - slice_bk, RefBinding); - } - _ => { } } } })); @@ -1065,123 +991,58 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { // to the above loop's visit of than the bindings that form // the leaves of the pattern tree structure. return_if_err!(mc.cat_pattern(cmt_discr, pat, |mc, cmt_pat, pat| { - let def_map = def_map.borrow(); - let tcx = typer.tcx; - - match pat.node { - hir::PatEnum(_, _) | hir::PatQPath(..) | - hir::PatIdent(_, _, None) | hir::PatStruct(..) => { - match def_map.get(&pat.id).map(|d| d.full_def()) { - None => { - // no definition found: pat is not a - // struct or enum pattern. - } - - Some(def::DefVariant(enum_did, variant_did, _is_struct)) => { - let downcast_cmt = - if tcx.lookup_adt_def(enum_did).is_univariant() { - cmt_pat - } else { - let cmt_pat_ty = cmt_pat.ty; - mc.cat_downcast(pat, cmt_pat, cmt_pat_ty, variant_did) - }; - - debug!("variant downcast_cmt={:?} pat={:?}", - downcast_cmt, - pat); - - delegate.matched_pat(pat, downcast_cmt, match_mode); - } - - Some(def::DefStruct(..)) | Some(def::DefTy(_, false)) => { - // A struct (in either the value or type - // namespace; we encounter the former on - // e.g. patterns for unit structs). - - debug!("struct cmt_pat={:?} pat={:?}", - cmt_pat, - pat); - - delegate.matched_pat(pat, cmt_pat, match_mode); - } - - Some(def::DefConst(..)) | - Some(def::DefAssociatedConst(..)) | - Some(def::DefLocal(..)) => { - // This is a leaf (i.e. identifier binding - // or constant value to match); thus no - // `matched_pat` call. - } - - Some(def @ def::DefTy(_, true)) => { - // An enum's type -- should never be in a - // pattern. - - if !tcx.sess.has_errors() { - let msg = format!("Pattern has unexpected type: {:?} and type {:?}", - def, - cmt_pat.ty); - tcx.sess.span_bug(pat.span, &msg) - } - } - - Some(def) => { - // Remaining cases are e.g. DefFn, to - // which identifiers within patterns - // should not resolve. However, we do - // encouter this when using the - // expr-use-visitor during typeck. So just - // ignore it, an error should have been - // reported. - - if !tcx.sess.has_errors() { - let msg = format!("Pattern has unexpected def: {:?} and type {:?}", - def, - cmt_pat.ty); - tcx.sess.span_bug(pat.span, &msg[..]) - } - } - } - } + let qpath = match pat.node { + PatKind::Path(ref qpath) | + PatKind::TupleStruct(ref qpath, ..) | + PatKind::Struct(ref qpath, ..) => qpath, + _ => return + }; + let def = tcx.tables().qpath_def(qpath, pat.id); + match def { + Def::Variant(variant_did) | + Def::VariantCtor(variant_did, ..) => { + let enum_did = tcx.parent_def_id(variant_did).unwrap(); + let downcast_cmt = if tcx.lookup_adt_def(enum_did).is_univariant() { + cmt_pat + } else { + let cmt_pat_ty = cmt_pat.ty; + mc.cat_downcast(pat, cmt_pat, cmt_pat_ty, variant_did) + }; - hir::PatIdent(_, _, Some(_)) => { - // Do nothing; this is a binding (not an enum - // variant or struct), and the cat_pattern call - // will visit the substructure recursively. + debug!("variant downcast_cmt={:?} pat={:?}", downcast_cmt, pat); + delegate.matched_pat(pat, downcast_cmt, match_mode); } - - hir::PatWild | hir::PatTup(..) | hir::PatBox(..) | - hir::PatRegion(..) | hir::PatLit(..) | hir::PatRange(..) | - hir::PatVec(..) => { - // Similarly, each of these cases does not - // correspond to an enum variant or struct, so we - // do not do any `matched_pat` calls for these - // cases either. + Def::Struct(..) | Def::StructCtor(..) | Def::Union(..) | + Def::TyAlias(..) | Def::AssociatedTy(..) | Def::SelfTy(..) => { + debug!("struct cmt_pat={:?} pat={:?}", cmt_pat, pat); + delegate.matched_pat(pat, cmt_pat, match_mode); } + _ => {} } })); } - fn walk_captures(&mut self, closure_expr: &hir::Expr) { + fn walk_captures(&mut self, closure_expr: &hir::Expr, fn_decl_span: Span) { debug!("walk_captures({:?})", closure_expr); self.tcx().with_freevars(closure_expr.id, |freevars| { for freevar in freevars { - let id_var = freevar.def.var_id(); + let def_id = freevar.def.def_id(); + let id_var = self.tcx().map.as_local_node_id(def_id).unwrap(); let upvar_id = ty::UpvarId { var_id: id_var, closure_expr_id: closure_expr.id }; - let upvar_capture = self.typer.upvar_capture(upvar_id).unwrap(); + let upvar_capture = self.mc.infcx.upvar_capture(upvar_id).unwrap(); let cmt_var = return_if_err!(self.cat_captured_var(closure_expr.id, - closure_expr.span, + fn_decl_span, freevar.def)); match upvar_capture { ty::UpvarCapture::ByValue => { - let mode = copy_or_move(self.typer, &cmt_var, CaptureMove); + let mode = copy_or_move(self.mc.infcx, &cmt_var, CaptureMove); self.delegate.consume(closure_expr.id, freevar.span, cmt_var, mode); } ty::UpvarCapture::ByRef(upvar_borrow) => { self.delegate.borrow(closure_expr.id, - closure_expr.span, + fn_decl_span, cmt_var, upvar_borrow.region, upvar_borrow.kind, @@ -1195,22 +1056,22 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> { fn cat_captured_var(&mut self, closure_id: ast::NodeId, closure_span: Span, - upvar_def: def::Def) + upvar_def: Def) -> mc::McResult> { // Create the cmt for the variable being borrowed, from the // caller's perspective - let var_id = upvar_def.var_id(); - let var_ty = try!(self.typer.node_ty(var_id)); + let var_id = self.tcx().map.as_local_node_id(upvar_def.def_id()).unwrap(); + let var_ty = self.mc.infcx.node_ty(var_id)?; self.mc.cat_def(closure_id, closure_span, var_ty, upvar_def) } } -fn copy_or_move<'a, 'tcx>(typer: &infer::InferCtxt<'a, 'tcx>, - cmt: &mc::cmt<'tcx>, - move_reason: MoveReason) - -> ConsumeMode +fn copy_or_move<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + cmt: &mc::cmt<'tcx>, + move_reason: MoveReason) + -> ConsumeMode { - if typer.type_moves_by_default(cmt.ty, cmt.span) { + if infcx.type_moves_by_default(cmt.ty, cmt.span) { Move(move_reason) } else { Copy diff --git a/src/librustc/middle/free_region.rs b/src/librustc/middle/free_region.rs index face6d629340d..bd35bfc9829a5 100644 --- a/src/librustc/middle/free_region.rs +++ b/src/librustc/middle/free_region.rs @@ -15,8 +15,8 @@ //! `TransitiveRelation` type and use that to decide when one free //! region outlives another and so forth. -use middle::ty::{self, FreeRegion, Region}; -use middle::ty::wf::ImpliedBound; +use ty::{self, TyCtxt, FreeRegion, Region}; +use ty::wf::ImpliedBound; use rustc_data_structures::transitive_relation::TransitiveRelation; #[derive(Clone)] @@ -37,7 +37,7 @@ impl FreeRegionMap { for implied_bound in implied_bounds { debug!("implied bound: {:?}", implied_bound); match *implied_bound { - ImpliedBound::RegionSubRegion(ty::ReFree(free_a), ty::ReFree(free_b)) => { + ImpliedBound::RegionSubRegion(&ty::ReFree(free_a), &ty::ReFree(free_b)) => { self.relate_free_regions(free_a, free_b); } ImpliedBound::RegionSubRegion(..) | @@ -48,9 +48,8 @@ impl FreeRegionMap { } } - pub fn relate_free_regions_from_predicates<'tcx>(&mut self, - tcx: &ty::ctxt<'tcx>, - predicates: &[ty::Predicate<'tcx>]) { + pub fn relate_free_regions_from_predicates(&mut self, + predicates: &[ty::Predicate]) { debug!("relate_free_regions_from_predicates(predicates={:?})", predicates); for predicate in predicates { match *predicate { @@ -59,23 +58,23 @@ impl FreeRegionMap { ty::Predicate::Equate(..) | ty::Predicate::WellFormed(..) | ty::Predicate::ObjectSafe(..) | + ty::Predicate::ClosureKind(..) | ty::Predicate::TypeOutlives(..) => { // No region bounds here } ty::Predicate::RegionOutlives(ty::Binder(ty::OutlivesPredicate(r_a, r_b))) => { match (r_a, r_b) { - (ty::ReStatic, ty::ReFree(_)) => {}, - (ty::ReFree(fr_a), ty::ReStatic) => self.relate_to_static(fr_a), - (ty::ReFree(fr_a), ty::ReFree(fr_b)) => { + (&ty::ReStatic, &ty::ReFree(_)) => {}, + (&ty::ReFree(fr_a), &ty::ReStatic) => self.relate_to_static(fr_a), + (&ty::ReFree(fr_a), &ty::ReFree(fr_b)) => { // Record that `'a:'b`. Or, put another way, `'b <= 'a`. self.relate_free_regions(fr_b, fr_a); } _ => { // All named regions are instantiated with free regions. - tcx.sess.bug( - &format!("record_region_bounds: non free region: {:?} / {:?}", - r_a, - r_b)); + bug!("record_region_bounds: non free region: {:?} / {:?}", + r_a, + r_b); } } } @@ -121,27 +120,27 @@ impl FreeRegionMap { /// Determines whether one region is a subregion of another. This is intended to run *after /// inference* and sadly the logic is somewhat duplicated with the code in infer.rs. pub fn is_subregion_of(&self, - tcx: &ty::ctxt, - sub_region: ty::Region, - super_region: ty::Region) + tcx: TyCtxt, + sub_region: &ty::Region, + super_region: &ty::Region) -> bool { let result = sub_region == super_region || { match (sub_region, super_region) { - (ty::ReEmpty, _) | - (_, ty::ReStatic) => + (&ty::ReEmpty, _) | + (_, &ty::ReStatic) => true, - (ty::ReScope(sub_scope), ty::ReScope(super_scope)) => + (&ty::ReScope(sub_scope), &ty::ReScope(super_scope)) => tcx.region_maps.is_subscope_of(sub_scope, super_scope), - (ty::ReScope(sub_scope), ty::ReFree(fr)) => + (&ty::ReScope(sub_scope), &ty::ReFree(fr)) => tcx.region_maps.is_subscope_of(sub_scope, fr.scope) || self.is_static(fr), - (ty::ReFree(sub_fr), ty::ReFree(super_fr)) => + (&ty::ReFree(sub_fr), &ty::ReFree(super_fr)) => self.sub_free_region(sub_fr, super_fr), - (ty::ReStatic, ty::ReFree(sup_fr)) => + (&ty::ReStatic, &ty::ReFree(sup_fr)) => self.is_static(sup_fr), _ => diff --git a/src/librustc/middle/implicator.rs b/src/librustc/middle/implicator.rs deleted file mode 100644 index d25084bbdffb5..0000000000000 --- a/src/librustc/middle/implicator.rs +++ /dev/null @@ -1,454 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// #![warn(deprecated_mode)] - -use middle::def_id::DefId; -use middle::infer::{InferCtxt, GenericKind}; -use middle::subst::Substs; -use middle::traits; -use middle::ty::{self, ToPredicate, Ty}; -use middle::ty::fold::{TypeFoldable, TypeFolder}; - -use syntax::ast; -use syntax::codemap::Span; - -use util::common::ErrorReported; -use util::nodemap::FnvHashSet; - -// Helper functions related to manipulating region types. - -#[derive(Debug)] -pub enum Implication<'tcx> { - RegionSubRegion(Option>, ty::Region, ty::Region), - RegionSubGeneric(Option>, ty::Region, GenericKind<'tcx>), - Predicate(DefId, ty::Predicate<'tcx>), -} - -struct Implicator<'a, 'tcx: 'a> { - infcx: &'a InferCtxt<'a,'tcx>, - body_id: ast::NodeId, - stack: Vec<(ty::Region, Option>)>, - span: Span, - out: Vec>, - visited: FnvHashSet>, -} - -/// This routine computes the well-formedness constraints that must hold for the type `ty` to -/// appear in a context with lifetime `outer_region` -pub fn implications<'a,'tcx>( - infcx: &'a InferCtxt<'a,'tcx>, - body_id: ast::NodeId, - ty: Ty<'tcx>, - outer_region: ty::Region, - span: Span) - -> Vec> -{ - debug!("implications(body_id={}, ty={:?}, outer_region={:?})", - body_id, - ty, - outer_region); - - let mut stack = Vec::new(); - stack.push((outer_region, None)); - let mut wf = Implicator { infcx: infcx, - body_id: body_id, - span: span, - stack: stack, - out: Vec::new(), - visited: FnvHashSet() }; - wf.accumulate_from_ty(ty); - debug!("implications: out={:?}", wf.out); - wf.out -} - -impl<'a, 'tcx> Implicator<'a, 'tcx> { - fn tcx(&self) -> &'a ty::ctxt<'tcx> { - self.infcx.tcx - } - - fn accumulate_from_ty(&mut self, ty: Ty<'tcx>) { - debug!("accumulate_from_ty(ty={:?})", - ty); - - // When expanding out associated types, we can visit a cyclic - // set of types. Issue #23003. - if !self.visited.insert(ty) { - return; - } - - match ty.sty { - ty::TyBool | - ty::TyChar | - ty::TyInt(..) | - ty::TyUint(..) | - ty::TyFloat(..) | - ty::TyBareFn(..) | - ty::TyError | - ty::TyStr => { - // No borrowed content reachable here. - } - - ty::TyClosure(_, ref substs) => { - // FIXME(#27086). We do not accumulate from substs, since they - // don't represent reachable data. This means that, in - // practice, some of the lifetime parameters might not - // be in scope when the body runs, so long as there is - // no reachable data with that lifetime. For better or - // worse, this is consistent with fn types, however, - // which can also encapsulate data in this fashion - // (though it's somewhat harder, and typically - // requires virtual dispatch). - // - // Note that changing this (in a naive way, at least) - // causes regressions for what appears to be perfectly - // reasonable code like this: - // - // ``` - // fn foo<'a>(p: &Data<'a>) { - // bar(|q: &mut Parser| q.read_addr()) - // } - // fn bar(p: Box) { - // } - // ``` - // - // Note that `p` (and `'a`) are not used in the - // closure at all, but to meet the requirement that - // the closure type `C: 'static` (so it can be coerced - // to the object type), we get the requirement that - // `'a: 'static` since `'a` appears in the closure - // type `C`. - // - // A smarter fix might "prune" unused `func_substs` -- - // this would avoid breaking simple examples like - // this, but would still break others (which might - // indeed be invalid, depending on your POV). Pruning - // would be a subtle process, since we have to see - // what func/type parameters are used and unused, - // taking into consideration UFCS and so forth. - - for &upvar_ty in &substs.upvar_tys { - self.accumulate_from_ty(upvar_ty); - } - } - - ty::TyTrait(ref t) => { - let required_region_bounds = - object_region_bounds(self.tcx(), &t.principal, t.bounds.builtin_bounds); - self.accumulate_from_object_ty(ty, t.bounds.region_bound, required_region_bounds) - } - - ty::TyEnum(def, substs) | - ty::TyStruct(def, substs) => { - let item_scheme = def.type_scheme(self.tcx()); - self.accumulate_from_adt(ty, def.did, &item_scheme.generics, substs) - } - - ty::TyArray(t, _) | - ty::TySlice(t) | - ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) | - ty::TyBox(t) => { - self.accumulate_from_ty(t) - } - - ty::TyRef(r_b, mt) => { - self.accumulate_from_rptr(ty, *r_b, mt.ty); - } - - ty::TyParam(p) => { - self.push_param_constraint_from_top(p); - } - - ty::TyProjection(ref data) => { - // `>::Name` - - self.push_projection_constraint_from_top(data); - } - - ty::TyTuple(ref tuptys) => { - for &tupty in tuptys { - self.accumulate_from_ty(tupty); - } - } - - ty::TyInfer(_) => { - // This should not happen, BUT: - // - // Currently we uncover region relationships on - // entering the fn check. We should do this after - // the fn check, then we can call this case a bug(). - } - } - } - - fn accumulate_from_rptr(&mut self, - ty: Ty<'tcx>, - r_b: ty::Region, - ty_b: Ty<'tcx>) { - // We are walking down a type like this, and current - // position is indicated by caret: - // - // &'a &'b ty_b - // ^ - // - // At this point, top of stack will be `'a`. We must - // require that `'a <= 'b`. - - self.push_region_constraint_from_top(r_b); - - // Now we push `'b` onto the stack, because it must - // constrain any borrowed content we find within `T`. - - self.stack.push((r_b, Some(ty))); - self.accumulate_from_ty(ty_b); - self.stack.pop().unwrap(); - } - - /// Pushes a constraint that `r_b` must outlive the top region on the stack. - fn push_region_constraint_from_top(&mut self, - r_b: ty::Region) { - - // Indicates that we have found borrowed content with a lifetime - // of at least `r_b`. This adds a constraint that `r_b` must - // outlive the region `r_a` on top of the stack. - // - // As an example, imagine walking a type like: - // - // &'a &'b T - // ^ - // - // when we hit the inner pointer (indicated by caret), `'a` will - // be on top of stack and `'b` will be the lifetime of the content - // we just found. So we add constraint that `'a <= 'b`. - - let &(r_a, opt_ty) = self.stack.last().unwrap(); - self.push_sub_region_constraint(opt_ty, r_a, r_b); - } - - /// Pushes a constraint that `r_a <= r_b`, due to `opt_ty` - fn push_sub_region_constraint(&mut self, - opt_ty: Option>, - r_a: ty::Region, - r_b: ty::Region) { - self.out.push(Implication::RegionSubRegion(opt_ty, r_a, r_b)); - } - - /// Pushes a constraint that `param_ty` must outlive the top region on the stack. - fn push_param_constraint_from_top(&mut self, - param_ty: ty::ParamTy) { - let &(region, opt_ty) = self.stack.last().unwrap(); - self.push_param_constraint(region, opt_ty, param_ty); - } - - /// Pushes a constraint that `projection_ty` must outlive the top region on the stack. - fn push_projection_constraint_from_top(&mut self, - projection_ty: &ty::ProjectionTy<'tcx>) { - let &(region, opt_ty) = self.stack.last().unwrap(); - self.out.push(Implication::RegionSubGeneric( - opt_ty, region, GenericKind::Projection(projection_ty.clone()))); - } - - /// Pushes a constraint that `region <= param_ty`, due to `opt_ty` - fn push_param_constraint(&mut self, - region: ty::Region, - opt_ty: Option>, - param_ty: ty::ParamTy) { - self.out.push(Implication::RegionSubGeneric( - opt_ty, region, GenericKind::Param(param_ty))); - } - - fn accumulate_from_adt(&mut self, - ty: Ty<'tcx>, - def_id: DefId, - _generics: &ty::Generics<'tcx>, - substs: &Substs<'tcx>) - { - let predicates = - self.tcx().lookup_predicates(def_id).instantiate(self.tcx(), substs); - let predicates = match self.fully_normalize(&predicates) { - Ok(predicates) => predicates, - Err(ErrorReported) => { return; } - }; - - for predicate in predicates.predicates.as_slice() { - match *predicate { - ty::Predicate::Trait(..) => { } - ty::Predicate::Equate(..) => { } - ty::Predicate::Projection(..) => { } - ty::Predicate::RegionOutlives(ref data) => { - match self.tcx().no_late_bound_regions(data) { - None => { } - Some(ty::OutlivesPredicate(r_a, r_b)) => { - self.push_sub_region_constraint(Some(ty), r_b, r_a); - } - } - } - ty::Predicate::TypeOutlives(ref data) => { - match self.tcx().no_late_bound_regions(data) { - None => { } - Some(ty::OutlivesPredicate(ty_a, r_b)) => { - self.stack.push((r_b, Some(ty))); - self.accumulate_from_ty(ty_a); - self.stack.pop().unwrap(); - } - } - } - ty::Predicate::ObjectSafe(_) | - ty::Predicate::WellFormed(_) => { - } - } - } - - let obligations = predicates.predicates - .into_iter() - .map(|pred| Implication::Predicate(def_id, pred)); - self.out.extend(obligations); - - let variances = self.tcx().item_variances(def_id); - self.accumulate_from_substs(substs, Some(&variances)); - } - - fn accumulate_from_substs(&mut self, - substs: &Substs<'tcx>, - variances: Option<&ty::ItemVariances>) - { - let mut tmp_variances = None; - let variances = variances.unwrap_or_else(|| { - tmp_variances = Some(ty::ItemVariances { - types: substs.types.map(|_| ty::Variance::Invariant), - regions: substs.regions().map(|_| ty::Variance::Invariant), - }); - tmp_variances.as_ref().unwrap() - }); - - for (®ion, &variance) in substs.regions().iter().zip(&variances.regions) { - match variance { - ty::Contravariant | ty::Invariant => { - // If any data with this lifetime is reachable - // within, it must be at least contravariant. - self.push_region_constraint_from_top(region) - } - ty::Covariant | ty::Bivariant => { } - } - } - - for (&ty, &variance) in substs.types.iter().zip(&variances.types) { - match variance { - ty::Covariant | ty::Invariant => { - // If any data of this type is reachable within, - // it must be at least covariant. - self.accumulate_from_ty(ty); - } - ty::Contravariant | ty::Bivariant => { } - } - } - } - - fn accumulate_from_object_ty(&mut self, - ty: Ty<'tcx>, - region_bound: ty::Region, - required_region_bounds: Vec) - { - // Imagine a type like this: - // - // trait Foo { } - // trait Bar<'c> : 'c { } - // - // &'b (Foo+'c+Bar<'d>) - // ^ - // - // In this case, the following relationships must hold: - // - // 'b <= 'c - // 'd <= 'c - // - // The first conditions is due to the normal region pointer - // rules, which say that a reference cannot outlive its - // referent. - // - // The final condition may be a bit surprising. In particular, - // you may expect that it would have been `'c <= 'd`, since - // usually lifetimes of outer things are conservative - // approximations for inner things. However, it works somewhat - // differently with trait objects: here the idea is that if the - // user specifies a region bound (`'c`, in this case) it is the - // "master bound" that *implies* that bounds from other traits are - // all met. (Remember that *all bounds* in a type like - // `Foo+Bar+Zed` must be met, not just one, hence if we write - // `Foo<'x>+Bar<'y>`, we know that the type outlives *both* 'x and - // 'y.) - // - // Note: in fact we only permit builtin traits, not `Bar<'d>`, I - // am looking forward to the future here. - - // The content of this object type must outlive - // `bounds.region_bound`: - let r_c = region_bound; - self.push_region_constraint_from_top(r_c); - - // And then, in turn, to be well-formed, the - // `region_bound` that user specified must imply the - // region bounds required from all of the trait types: - for &r_d in &required_region_bounds { - // Each of these is an instance of the `'c <= 'b` - // constraint above - self.out.push(Implication::RegionSubRegion(Some(ty), r_d, r_c)); - } - } - - fn fully_normalize(&self, value: &T) -> Result - where T : TypeFoldable<'tcx> - { - let value = - traits::fully_normalize(self.infcx, - traits::ObligationCause::misc(self.span, self.body_id), - value); - match value { - Ok(value) => Ok(value), - Err(errors) => { - // I don't like reporting these errors here, but I - // don't know where else to report them just now. And - // I don't really expect errors to arise here - // frequently. I guess the best option would be to - // propagate them out. - traits::report_fulfillment_errors(self.infcx, &errors); - Err(ErrorReported) - } - } - } -} - -/// Given an object type like `SomeTrait+Send`, computes the lifetime -/// bounds that must hold on the elided self type. These are derived -/// from the declarations of `SomeTrait`, `Send`, and friends -- if -/// they declare `trait SomeTrait : 'static`, for example, then -/// `'static` would appear in the list. The hard work is done by -/// `ty::required_region_bounds`, see that for more information. -pub fn object_region_bounds<'tcx>( - tcx: &ty::ctxt<'tcx>, - principal: &ty::PolyTraitRef<'tcx>, - others: ty::BuiltinBounds) - -> Vec -{ - // Since we don't actually *know* the self type for an object, - // this "open(err)" serves as a kind of dummy standin -- basically - // a skolemized type. - let open_ty = tcx.mk_infer(ty::FreshTy(0)); - - // Note that we preserve the overall binding levels here. - assert!(!open_ty.has_escaping_regions()); - let substs = tcx.mk_substs(principal.0.substs.with_self_ty(open_ty)); - let trait_refs = vec!(ty::Binder(ty::TraitRef::new(principal.0.def_id, substs))); - - let mut predicates = others.to_predicates(tcx, open_ty); - predicates.extend(trait_refs.iter().map(|t| t.to_predicate())); - - tcx.required_region_bounds(open_ty, predicates) -} diff --git a/src/librustc/middle/infer/bivariate.rs b/src/librustc/middle/infer/bivariate.rs deleted file mode 100644 index 2d9432b75e719..0000000000000 --- a/src/librustc/middle/infer/bivariate.rs +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Applies the "bivariance relationship" to two types and/or regions. -//! If (A,B) are bivariant then either A <: B or B <: A. It occurs -//! when type/lifetime parameters are unconstrained. Usually this is -//! an error, but we permit it in the specific case where a type -//! parameter is constrained in a where-clause via an associated type. -//! -//! There are several ways one could implement bivariance. You could -//! just do nothing at all, for example, or you could fully verify -//! that one of the two subtyping relationships hold. We choose to -//! thread a middle line: we relate types up to regions, but ignore -//! all region relationships. -//! -//! At one point, handling bivariance in this fashion was necessary -//! for inference, but I'm actually not sure if that is true anymore. -//! In particular, it might be enough to say (A,B) are bivariant for -//! all (A,B). - -use super::combine::{self, CombineFields}; -use super::type_variable::{BiTo}; - -use middle::ty::{self, Ty}; -use middle::ty::TyVar; -use middle::ty::relate::{Relate, RelateResult, TypeRelation}; - -pub struct Bivariate<'a, 'tcx: 'a> { - fields: CombineFields<'a, 'tcx> -} - -impl<'a, 'tcx> Bivariate<'a, 'tcx> { - pub fn new(fields: CombineFields<'a, 'tcx>) -> Bivariate<'a, 'tcx> { - Bivariate { fields: fields } - } -} - -impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Bivariate<'a, 'tcx> { - fn tag(&self) -> &'static str { "Bivariate" } - - fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.fields.tcx() } - - fn a_is_expected(&self) -> bool { self.fields.a_is_expected } - - fn relate_with_variance>(&mut self, - variance: ty::Variance, - a: &T, - b: &T) - -> RelateResult<'tcx, T> - { - match variance { - // If we have Foo and Foo is invariant w/r/t A, - // and we want to assert that - // - // Foo <: Foo || - // Foo <: Foo - // - // then still A must equal B. - ty::Invariant => self.relate(a, b), - - ty::Covariant => self.relate(a, b), - ty::Bivariant => self.relate(a, b), - ty::Contravariant => self.relate(a, b), - } - } - - fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { - debug!("{}.tys({:?}, {:?})", self.tag(), - a, b); - if a == b { return Ok(a); } - - let infcx = self.fields.infcx; - let a = infcx.type_variables.borrow().replace_if_possible(a); - let b = infcx.type_variables.borrow().replace_if_possible(b); - match (&a.sty, &b.sty) { - (&ty::TyInfer(TyVar(a_id)), &ty::TyInfer(TyVar(b_id))) => { - infcx.type_variables.borrow_mut().relate_vars(a_id, BiTo, b_id); - Ok(a) - } - - (&ty::TyInfer(TyVar(a_id)), _) => { - try!(self.fields.instantiate(b, BiTo, a_id)); - Ok(a) - } - - (_, &ty::TyInfer(TyVar(b_id))) => { - try!(self.fields.instantiate(a, BiTo, b_id)); - Ok(a) - } - - _ => { - combine::super_combine_tys(self.fields.infcx, self, a, b) - } - } - } - - fn regions(&mut self, a: ty::Region, _: ty::Region) -> RelateResult<'tcx, ty::Region> { - Ok(a) - } - - fn binders(&mut self, a: &ty::Binder, b: &ty::Binder) - -> RelateResult<'tcx, ty::Binder> - where T: Relate<'a,'tcx> - { - let a1 = self.tcx().erase_late_bound_regions(a); - let b1 = self.tcx().erase_late_bound_regions(b); - let c = try!(self.relate(&a1, &b1)); - Ok(ty::Binder(c)) - } -} diff --git a/src/librustc/middle/infer/combine.rs b/src/librustc/middle/infer/combine.rs deleted file mode 100644 index faf1bdb0ce504..0000000000000 --- a/src/librustc/middle/infer/combine.rs +++ /dev/null @@ -1,395 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/////////////////////////////////////////////////////////////////////////// -// # Type combining -// -// There are four type combiners: equate, sub, lub, and glb. Each -// implements the trait `Combine` and contains methods for combining -// two instances of various things and yielding a new instance. These -// combiner methods always yield a `Result`. There is a lot of -// common code for these operations, implemented as default methods on -// the `Combine` trait. -// -// Each operation may have side-effects on the inference context, -// though these can be unrolled using snapshots. On success, the -// LUB/GLB operations return the appropriate bound. The Eq and Sub -// operations generally return the first operand. -// -// ## Contravariance -// -// When you are relating two things which have a contravariant -// relationship, you should use `contratys()` or `contraregions()`, -// rather than inversing the order of arguments! This is necessary -// because the order of arguments is not relevant for LUB and GLB. It -// is also useful to track which value is the "expected" value in -// terms of error reporting. - -use super::bivariate::Bivariate; -use super::equate::Equate; -use super::glb::Glb; -use super::lub::Lub; -use super::sub::Sub; -use super::{InferCtxt}; -use super::{MiscVariable, TypeTrace}; -use super::type_variable::{RelationDir, BiTo, EqTo, SubtypeOf, SupertypeOf}; - -use middle::ty::{IntType, UintType}; -use middle::ty::{self, Ty}; -use middle::ty::error::TypeError; -use middle::ty::fold::{TypeFolder, TypeFoldable}; -use middle::ty::relate::{Relate, RelateResult, TypeRelation}; - -use syntax::ast; -use syntax::codemap::Span; - -#[derive(Clone)] -pub struct CombineFields<'a, 'tcx: 'a> { - pub infcx: &'a InferCtxt<'a, 'tcx>, - pub a_is_expected: bool, - pub trace: TypeTrace<'tcx>, - pub cause: Option, -} - -pub fn super_combine_tys<'a,'tcx:'a,R>(infcx: &InferCtxt<'a, 'tcx>, - relation: &mut R, - a: Ty<'tcx>, - b: Ty<'tcx>) - -> RelateResult<'tcx, Ty<'tcx>> - where R: TypeRelation<'a,'tcx> -{ - let a_is_expected = relation.a_is_expected(); - - match (&a.sty, &b.sty) { - // Relate integral variables to other types - (&ty::TyInfer(ty::IntVar(a_id)), &ty::TyInfer(ty::IntVar(b_id))) => { - try!(infcx.int_unification_table - .borrow_mut() - .unify_var_var(a_id, b_id) - .map_err(|e| int_unification_error(a_is_expected, e))); - Ok(a) - } - (&ty::TyInfer(ty::IntVar(v_id)), &ty::TyInt(v)) => { - unify_integral_variable(infcx, a_is_expected, v_id, IntType(v)) - } - (&ty::TyInt(v), &ty::TyInfer(ty::IntVar(v_id))) => { - unify_integral_variable(infcx, !a_is_expected, v_id, IntType(v)) - } - (&ty::TyInfer(ty::IntVar(v_id)), &ty::TyUint(v)) => { - unify_integral_variable(infcx, a_is_expected, v_id, UintType(v)) - } - (&ty::TyUint(v), &ty::TyInfer(ty::IntVar(v_id))) => { - unify_integral_variable(infcx, !a_is_expected, v_id, UintType(v)) - } - - // Relate floating-point variables to other types - (&ty::TyInfer(ty::FloatVar(a_id)), &ty::TyInfer(ty::FloatVar(b_id))) => { - try!(infcx.float_unification_table - .borrow_mut() - .unify_var_var(a_id, b_id) - .map_err(|e| float_unification_error(relation.a_is_expected(), e))); - Ok(a) - } - (&ty::TyInfer(ty::FloatVar(v_id)), &ty::TyFloat(v)) => { - unify_float_variable(infcx, a_is_expected, v_id, v) - } - (&ty::TyFloat(v), &ty::TyInfer(ty::FloatVar(v_id))) => { - unify_float_variable(infcx, !a_is_expected, v_id, v) - } - - // All other cases of inference are errors - (&ty::TyInfer(_), _) | - (_, &ty::TyInfer(_)) => { - Err(TypeError::Sorts(ty::relate::expected_found(relation, &a, &b))) - } - - - _ => { - ty::relate::super_relate_tys(relation, a, b) - } - } -} - -fn unify_integral_variable<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>, - vid_is_expected: bool, - vid: ty::IntVid, - val: ty::IntVarValue) - -> RelateResult<'tcx, Ty<'tcx>> -{ - try!(infcx - .int_unification_table - .borrow_mut() - .unify_var_value(vid, val) - .map_err(|e| int_unification_error(vid_is_expected, e))); - match val { - IntType(v) => Ok(infcx.tcx.mk_mach_int(v)), - UintType(v) => Ok(infcx.tcx.mk_mach_uint(v)), - } -} - -fn unify_float_variable<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>, - vid_is_expected: bool, - vid: ty::FloatVid, - val: ast::FloatTy) - -> RelateResult<'tcx, Ty<'tcx>> -{ - try!(infcx - .float_unification_table - .borrow_mut() - .unify_var_value(vid, val) - .map_err(|e| float_unification_error(vid_is_expected, e))); - Ok(infcx.tcx.mk_mach_float(val)) -} - -impl<'a, 'tcx> CombineFields<'a, 'tcx> { - pub fn tcx(&self) -> &'a ty::ctxt<'tcx> { - self.infcx.tcx - } - - pub fn switch_expected(&self) -> CombineFields<'a, 'tcx> { - CombineFields { - a_is_expected: !self.a_is_expected, - ..(*self).clone() - } - } - - pub fn equate(&self) -> Equate<'a, 'tcx> { - Equate::new(self.clone()) - } - - pub fn bivariate(&self) -> Bivariate<'a, 'tcx> { - Bivariate::new(self.clone()) - } - - pub fn sub(&self) -> Sub<'a, 'tcx> { - Sub::new(self.clone()) - } - - pub fn lub(&self) -> Lub<'a, 'tcx> { - Lub::new(self.clone()) - } - - pub fn glb(&self) -> Glb<'a, 'tcx> { - Glb::new(self.clone()) - } - - pub fn instantiate(&self, - a_ty: Ty<'tcx>, - dir: RelationDir, - b_vid: ty::TyVid) - -> RelateResult<'tcx, ()> - { - let mut stack = Vec::new(); - stack.push((a_ty, dir, b_vid)); - loop { - // For each turn of the loop, we extract a tuple - // - // (a_ty, dir, b_vid) - // - // to relate. Here dir is either SubtypeOf or - // SupertypeOf. The idea is that we should ensure that - // the type `a_ty` is a subtype or supertype (respectively) of the - // type to which `b_vid` is bound. - // - // If `b_vid` has not yet been instantiated with a type - // (which is always true on the first iteration, but not - // necessarily true on later iterations), we will first - // instantiate `b_vid` with a *generalized* version of - // `a_ty`. Generalization introduces other inference - // variables wherever subtyping could occur (at time of - // this writing, this means replacing free regions with - // region variables). - let (a_ty, dir, b_vid) = match stack.pop() { - None => break, - Some(e) => e, - }; - - debug!("instantiate(a_ty={:?} dir={:?} b_vid={:?})", - a_ty, - dir, - b_vid); - - // Check whether `vid` has been instantiated yet. If not, - // make a generalized form of `ty` and instantiate with - // that. - let b_ty = self.infcx.type_variables.borrow().probe(b_vid); - let b_ty = match b_ty { - Some(t) => t, // ...already instantiated. - None => { // ...not yet instantiated: - // Generalize type if necessary. - let generalized_ty = try!(match dir { - EqTo => self.generalize(a_ty, b_vid, false), - BiTo | SupertypeOf | SubtypeOf => self.generalize(a_ty, b_vid, true), - }); - debug!("instantiate(a_ty={:?}, dir={:?}, \ - b_vid={:?}, generalized_ty={:?})", - a_ty, dir, b_vid, - generalized_ty); - self.infcx.type_variables - .borrow_mut() - .instantiate_and_push( - b_vid, generalized_ty, &mut stack); - generalized_ty - } - }; - - // The original triple was `(a_ty, dir, b_vid)` -- now we have - // resolved `b_vid` to `b_ty`, so apply `(a_ty, dir, b_ty)`: - // - // FIXME(#16847): This code is non-ideal because all these subtype - // relations wind up attributed to the same spans. We need - // to associate causes/spans with each of the relations in - // the stack to get this right. - try!(match dir { - BiTo => self.bivariate().relate(&a_ty, &b_ty), - EqTo => self.equate().relate(&a_ty, &b_ty), - SubtypeOf => self.sub().relate(&a_ty, &b_ty), - SupertypeOf => self.sub().relate_with_variance(ty::Contravariant, &a_ty, &b_ty), - }); - } - - Ok(()) - } - - /// Attempts to generalize `ty` for the type variable `for_vid`. This checks for cycle -- that - /// is, whether the type `ty` references `for_vid`. If `make_region_vars` is true, it will also - /// replace all regions with fresh variables. Returns `TyError` in the case of a cycle, `Ok` - /// otherwise. - fn generalize(&self, - ty: Ty<'tcx>, - for_vid: ty::TyVid, - make_region_vars: bool) - -> RelateResult<'tcx, Ty<'tcx>> - { - let mut generalize = Generalizer { - infcx: self.infcx, - span: self.trace.origin.span(), - for_vid: for_vid, - make_region_vars: make_region_vars, - cycle_detected: false - }; - let u = ty.fold_with(&mut generalize); - if generalize.cycle_detected { - Err(TypeError::CyclicTy) - } else { - Ok(u) - } - } -} - -struct Generalizer<'cx, 'tcx:'cx> { - infcx: &'cx InferCtxt<'cx, 'tcx>, - span: Span, - for_vid: ty::TyVid, - make_region_vars: bool, - cycle_detected: bool, -} - -impl<'cx, 'tcx> ty::fold::TypeFolder<'tcx> for Generalizer<'cx, 'tcx> { - fn tcx(&self) -> &ty::ctxt<'tcx> { - self.infcx.tcx - } - - fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { - // Check to see whether the type we are genealizing references - // `vid`. At the same time, also update any type variables to - // the values that they are bound to. This is needed to truly - // check for cycles, but also just makes things readable. - // - // (In particular, you could have something like `$0 = Box<$1>` - // where `$1` has already been instantiated with `Box<$0>`) - match t.sty { - ty::TyInfer(ty::TyVar(vid)) => { - if vid == self.for_vid { - self.cycle_detected = true; - self.tcx().types.err - } else { - match self.infcx.type_variables.borrow().probe(vid) { - Some(u) => self.fold_ty(u), - None => t, - } - } - } - _ => { - t.super_fold_with(self) - } - } - } - - fn fold_region(&mut self, r: ty::Region) -> ty::Region { - match r { - // Never make variables for regions bound within the type itself. - ty::ReLateBound(..) => { return r; } - - // Early-bound regions should really have been substituted away before - // we get to this point. - ty::ReEarlyBound(..) => { - self.tcx().sess.span_bug( - self.span, - &format!("Encountered early bound region when generalizing: {:?}", - r)); - } - - // Always make a fresh region variable for skolemized regions; - // the higher-ranked decision procedures rely on this. - ty::ReSkolemized(..) => { } - - // For anything else, we make a region variable, unless we - // are *equating*, in which case it's just wasteful. - ty::ReEmpty | - ty::ReStatic | - ty::ReScope(..) | - ty::ReVar(..) | - ty::ReFree(..) => { - if !self.make_region_vars { - return r; - } - } - } - - // FIXME: This is non-ideal because we don't give a - // very descriptive origin for this region variable. - self.infcx.next_region_var(MiscVariable(self.span)) - } -} - -pub trait RelateResultCompare<'tcx, T> { - fn compare(&self, t: T, f: F) -> RelateResult<'tcx, T> where - F: FnOnce() -> TypeError<'tcx>; -} - -impl<'tcx, T:Clone + PartialEq> RelateResultCompare<'tcx, T> for RelateResult<'tcx, T> { - fn compare(&self, t: T, f: F) -> RelateResult<'tcx, T> where - F: FnOnce() -> TypeError<'tcx>, - { - self.clone().and_then(|s| { - if s == t { - self.clone() - } else { - Err(f()) - } - }) - } -} - -fn int_unification_error<'tcx>(a_is_expected: bool, v: (ty::IntVarValue, ty::IntVarValue)) - -> TypeError<'tcx> -{ - let (a, b) = v; - TypeError::IntMismatch(ty::relate::expected_found_bool(a_is_expected, &a, &b)) -} - -fn float_unification_error<'tcx>(a_is_expected: bool, - v: (ast::FloatTy, ast::FloatTy)) - -> TypeError<'tcx> -{ - let (a, b) = v; - TypeError::FloatMismatch(ty::relate::expected_found_bool(a_is_expected, &a, &b)) -} diff --git a/src/librustc/middle/infer/equate.rs b/src/librustc/middle/infer/equate.rs deleted file mode 100644 index d1dad4921ae21..0000000000000 --- a/src/librustc/middle/infer/equate.rs +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::combine::{self, CombineFields}; -use super::higher_ranked::HigherRankedRelations; -use super::{Subtype}; -use super::type_variable::{EqTo}; - -use middle::ty::{self, Ty}; -use middle::ty::TyVar; -use middle::ty::relate::{Relate, RelateResult, TypeRelation}; - -/// Ensures `a` is made equal to `b`. Returns `a` on success. -pub struct Equate<'a, 'tcx: 'a> { - fields: CombineFields<'a, 'tcx> -} - -impl<'a, 'tcx> Equate<'a, 'tcx> { - pub fn new(fields: CombineFields<'a, 'tcx>) -> Equate<'a, 'tcx> { - Equate { fields: fields } - } -} - -impl<'a, 'tcx> TypeRelation<'a,'tcx> for Equate<'a, 'tcx> { - fn tag(&self) -> &'static str { "Equate" } - - fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.fields.tcx() } - - fn a_is_expected(&self) -> bool { self.fields.a_is_expected } - - fn relate_with_variance>(&mut self, - _: ty::Variance, - a: &T, - b: &T) - -> RelateResult<'tcx, T> - { - self.relate(a, b) - } - - fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { - debug!("{}.tys({:?}, {:?})", self.tag(), - a, b); - if a == b { return Ok(a); } - - let infcx = self.fields.infcx; - let a = infcx.type_variables.borrow().replace_if_possible(a); - let b = infcx.type_variables.borrow().replace_if_possible(b); - match (&a.sty, &b.sty) { - (&ty::TyInfer(TyVar(a_id)), &ty::TyInfer(TyVar(b_id))) => { - infcx.type_variables.borrow_mut().relate_vars(a_id, EqTo, b_id); - Ok(a) - } - - (&ty::TyInfer(TyVar(a_id)), _) => { - try!(self.fields.instantiate(b, EqTo, a_id)); - Ok(a) - } - - (_, &ty::TyInfer(TyVar(b_id))) => { - try!(self.fields.instantiate(a, EqTo, b_id)); - Ok(a) - } - - _ => { - try!(combine::super_combine_tys(self.fields.infcx, self, a, b)); - Ok(a) - } - } - } - - fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> { - debug!("{}.regions({:?}, {:?})", - self.tag(), - a, - b); - let origin = Subtype(self.fields.trace.clone()); - self.fields.infcx.region_vars.make_eqregion(origin, a, b); - Ok(a) - } - - fn binders(&mut self, a: &ty::Binder, b: &ty::Binder) - -> RelateResult<'tcx, ty::Binder> - where T: Relate<'a, 'tcx> - { - try!(self.fields.higher_ranked_sub(a, b)); - self.fields.higher_ranked_sub(b, a) - } -} diff --git a/src/librustc/middle/infer/error_reporting.rs b/src/librustc/middle/infer/error_reporting.rs deleted file mode 100644 index 5cc848d2ca3d1..0000000000000 --- a/src/librustc/middle/infer/error_reporting.rs +++ /dev/null @@ -1,1989 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Error Reporting Code for the inference engine -//! -//! Because of the way inference, and in particular region inference, -//! works, it often happens that errors are not detected until far after -//! the relevant line of code has been type-checked. Therefore, there is -//! an elaborate system to track why a particular constraint in the -//! inference graph arose so that we can explain to the user what gave -//! rise to a particular error. -//! -//! The basis of the system are the "origin" types. An "origin" is the -//! reason that a constraint or inference variable arose. There are -//! different "origin" enums for different kinds of constraints/variables -//! (e.g., `TypeOrigin`, `RegionVariableOrigin`). An origin always has -//! a span, but also more information so that we can generate a meaningful -//! error message. -//! -//! Having a catalogue of all the different reasons an error can arise is -//! also useful for other reasons, like cross-referencing FAQs etc, though -//! we are not really taking advantage of this yet. -//! -//! # Region Inference -//! -//! Region inference is particularly tricky because it always succeeds "in -//! the moment" and simply registers a constraint. Then, at the end, we -//! can compute the full graph and report errors, so we need to be able to -//! store and later report what gave rise to the conflicting constraints. -//! -//! # Subtype Trace -//! -//! Determining whether `T1 <: T2` often involves a number of subtypes and -//! subconstraints along the way. A "TypeTrace" is an extended version -//! of an origin that traces the types and other values that were being -//! compared. It is not necessarily comprehensive (in fact, at the time of -//! this writing it only tracks the root values being compared) but I'd -//! like to extend it to include significant "waypoints". For example, if -//! you are comparing `(T1, T2) <: (T3, T4)`, and the problem is that `T2 -//! <: T4` fails, I'd like the trace to include enough information to say -//! "in the 2nd element of the tuple". Similarly, failures when comparing -//! arguments or return types in fn types should be able to cite the -//! specific position, etc. -//! -//! # Reality vs plan -//! -//! Of course, there is still a LOT of code in typeck that has yet to be -//! ported to this system, and which relies on string concatenation at the -//! time of error detection. - -use self::FreshOrKept::*; - -use super::InferCtxt; -use super::TypeTrace; -use super::SubregionOrigin; -use super::RegionVariableOrigin; -use super::ValuePairs; -use super::region_inference::RegionResolutionError; -use super::region_inference::ConcreteFailure; -use super::region_inference::SubSupConflict; -use super::region_inference::GenericBoundFailure; -use super::region_inference::GenericKind; -use super::region_inference::ProcessedErrors; -use super::region_inference::SameRegions; - -use std::collections::HashSet; - -use front::map as ast_map; -use rustc_front::hir; -use rustc_front::print::pprust; - -use middle::cstore::CrateStore; -use middle::def; -use middle::def_id::DefId; -use middle::infer::{self, TypeOrigin}; -use middle::region; -use middle::subst; -use middle::ty::{self, Ty, TypeFoldable}; -use middle::ty::{Region, ReFree}; -use middle::ty::error::TypeError; - -use std::cell::{Cell, RefCell}; -use std::char::from_u32; -use std::fmt; -use syntax::ast; -use syntax::errors::DiagnosticBuilder; -use syntax::codemap::{self, Pos, Span}; -use syntax::parse::token; -use syntax::ptr::P; - -impl<'tcx> ty::ctxt<'tcx> { - pub fn note_and_explain_region(&self, - err: &mut DiagnosticBuilder, - prefix: &str, - region: ty::Region, - suffix: &str) { - fn item_scope_tag(item: &hir::Item) -> &'static str { - match item.node { - hir::ItemImpl(..) => "impl", - hir::ItemStruct(..) => "struct", - hir::ItemEnum(..) => "enum", - hir::ItemTrait(..) => "trait", - hir::ItemFn(..) => "function body", - _ => "item" - } - } - - fn explain_span(tcx: &ty::ctxt, heading: &str, span: Span) - -> (String, Option) { - let lo = tcx.sess.codemap().lookup_char_pos_adj(span.lo); - (format!("the {} at {}:{}", heading, lo.line, lo.col.to_usize()), - Some(span)) - } - - let (description, span) = match region { - ty::ReScope(scope) => { - let new_string; - let unknown_scope = || { - format!("{}unknown scope: {:?}{}. Please report a bug.", - prefix, scope, suffix) - }; - let span = match scope.span(&self.region_maps, &self.map) { - Some(s) => s, - None => { - err.note(&unknown_scope()); - return; - } - }; - let tag = match self.map.find(scope.node_id(&self.region_maps)) { - Some(ast_map::NodeBlock(_)) => "block", - Some(ast_map::NodeExpr(expr)) => match expr.node { - hir::ExprCall(..) => "call", - hir::ExprMethodCall(..) => "method call", - hir::ExprMatch(_, _, hir::MatchSource::IfLetDesugar { .. }) => "if let", - hir::ExprMatch(_, _, hir::MatchSource::WhileLetDesugar) => "while let", - hir::ExprMatch(_, _, hir::MatchSource::ForLoopDesugar) => "for", - hir::ExprMatch(..) => "match", - _ => "expression", - }, - Some(ast_map::NodeStmt(_)) => "statement", - Some(ast_map::NodeItem(it)) => item_scope_tag(&*it), - Some(_) | None => { - err.span_note(span, &unknown_scope()); - return; - } - }; - let scope_decorated_tag = match self.region_maps.code_extent_data(scope) { - region::CodeExtentData::Misc(_) => tag, - region::CodeExtentData::CallSiteScope { .. } => { - "scope of call-site for function" - } - region::CodeExtentData::ParameterScope { .. } => { - "scope of parameters for function" - } - region::CodeExtentData::DestructionScope(_) => { - new_string = format!("destruction scope surrounding {}", tag); - &new_string[..] - } - region::CodeExtentData::Remainder(r) => { - new_string = format!("block suffix following statement {}", - r.first_statement_index); - &new_string[..] - } - }; - explain_span(self, scope_decorated_tag, span) - } - - ty::ReFree(ref fr) => { - let prefix = match fr.bound_region { - ty::BrAnon(idx) => { - format!("the anonymous lifetime #{} defined on", idx + 1) - } - ty::BrFresh(_) => "an anonymous lifetime defined on".to_owned(), - _ => { - format!("the lifetime {} as defined on", - fr.bound_region) - } - }; - - match self.map.find(fr.scope.node_id(&self.region_maps)) { - Some(ast_map::NodeBlock(ref blk)) => { - let (msg, opt_span) = explain_span(self, "block", blk.span); - (format!("{} {}", prefix, msg), opt_span) - } - Some(ast_map::NodeItem(it)) => { - let tag = item_scope_tag(&*it); - let (msg, opt_span) = explain_span(self, tag, it.span); - (format!("{} {}", prefix, msg), opt_span) - } - Some(_) | None => { - // this really should not happen, but it does: - // FIXME(#27942) - (format!("{} unknown free region bounded by scope {:?}", - prefix, fr.scope), None) - } - } - } - - ty::ReStatic => ("the static lifetime".to_owned(), None), - - ty::ReEmpty => ("the empty lifetime".to_owned(), None), - - ty::ReEarlyBound(ref data) => (data.name.to_string(), None), - - // FIXME(#13998) ReSkolemized should probably print like - // ReFree rather than dumping Debug output on the user. - // - // We shouldn't really be having unification failures with ReVar - // and ReLateBound though. - ty::ReSkolemized(..) | ty::ReVar(_) | ty::ReLateBound(..) => { - (format!("lifetime {:?}", region), None) - } - }; - let message = format!("{}{}{}", prefix, description, suffix); - if let Some(span) = span { - err.span_note(span, &message); - } else { - err.note(&message); - } - } -} - -pub trait ErrorReporting<'tcx> { - fn report_region_errors(&self, - errors: &Vec>); - - fn process_errors(&self, errors: &Vec>) - -> Vec>; - - fn report_type_error(&self, - trace: TypeTrace<'tcx>, - terr: &TypeError<'tcx>) - -> DiagnosticBuilder<'tcx>; - - fn check_and_note_conflicting_crates(&self, - err: &mut DiagnosticBuilder, - terr: &TypeError<'tcx>, - sp: Span); - - fn report_and_explain_type_error(&self, - trace: TypeTrace<'tcx>, - terr: &TypeError<'tcx>); - - fn values_str(&self, values: &ValuePairs<'tcx>) -> Option; - - fn expected_found_str + TypeFoldable<'tcx>>( - &self, - exp_found: &ty::error::ExpectedFound) - -> Option; - - fn report_concrete_failure(&self, - origin: SubregionOrigin<'tcx>, - sub: Region, - sup: Region); - - fn report_generic_bound_failure(&self, - origin: SubregionOrigin<'tcx>, - kind: GenericKind<'tcx>, - sub: Region); - - fn report_sub_sup_conflict(&self, - var_origin: RegionVariableOrigin, - sub_origin: SubregionOrigin<'tcx>, - sub_region: Region, - sup_origin: SubregionOrigin<'tcx>, - sup_region: Region); - - fn report_processed_errors(&self, - var_origin: &[RegionVariableOrigin], - trace_origin: &[(TypeTrace<'tcx>, TypeError<'tcx>)], - same_regions: &[SameRegions]); - - fn give_suggestion(&self, err: &mut DiagnosticBuilder, same_regions: &[SameRegions]); -} - -trait ErrorReportingHelpers<'tcx> { - fn report_inference_failure(&self, - var_origin: RegionVariableOrigin) - -> DiagnosticBuilder<'tcx>; - - fn note_region_origin(&self, - err: &mut DiagnosticBuilder, - origin: &SubregionOrigin<'tcx>); - - fn give_expl_lifetime_param(&self, - err: &mut DiagnosticBuilder, - decl: &hir::FnDecl, - unsafety: hir::Unsafety, - constness: hir::Constness, - name: ast::Name, - opt_explicit_self: Option<&hir::ExplicitSelf_>, - generics: &hir::Generics, - span: Span); -} - -impl<'a, 'tcx> ErrorReporting<'tcx> for InferCtxt<'a, 'tcx> { - fn report_region_errors(&self, - errors: &Vec>) { - let p_errors = self.process_errors(errors); - let errors = if p_errors.is_empty() { errors } else { &p_errors }; - for error in errors { - match error.clone() { - ConcreteFailure(origin, sub, sup) => { - self.report_concrete_failure(origin, sub, sup); - } - - GenericBoundFailure(kind, param_ty, sub) => { - self.report_generic_bound_failure(kind, param_ty, sub); - } - - SubSupConflict(var_origin, - sub_origin, sub_r, - sup_origin, sup_r) => { - self.report_sub_sup_conflict(var_origin, - sub_origin, sub_r, - sup_origin, sup_r); - } - - ProcessedErrors(ref var_origins, - ref trace_origins, - ref same_regions) => { - if !same_regions.is_empty() { - self.report_processed_errors(&var_origins[..], - &trace_origins[..], - &same_regions[..]); - } - } - } - } - } - - // This method goes through all the errors and try to group certain types - // of error together, for the purpose of suggesting explicit lifetime - // parameters to the user. This is done so that we can have a more - // complete view of what lifetimes should be the same. - // If the return value is an empty vector, it means that processing - // failed (so the return value of this method should not be used) - fn process_errors(&self, errors: &Vec>) - -> Vec> { - debug!("process_errors()"); - let mut var_origins = Vec::new(); - let mut trace_origins = Vec::new(); - let mut same_regions = Vec::new(); - let mut processed_errors = Vec::new(); - for error in errors { - match error.clone() { - ConcreteFailure(origin, sub, sup) => { - debug!("processing ConcreteFailure"); - let trace = match origin { - infer::Subtype(trace) => Some(trace), - _ => None, - }; - match free_regions_from_same_fn(self.tcx, sub, sup) { - Some(ref same_frs) if trace.is_some() => { - let trace = trace.unwrap(); - let terr = TypeError::RegionsDoesNotOutlive(sup, - sub); - trace_origins.push((trace, terr)); - append_to_same_regions(&mut same_regions, same_frs); - } - _ => processed_errors.push((*error).clone()), - } - } - SubSupConflict(var_origin, _, sub_r, _, sup_r) => { - debug!("processing SubSupConflict sub: {:?} sup: {:?}", sub_r, sup_r); - match free_regions_from_same_fn(self.tcx, sub_r, sup_r) { - Some(ref same_frs) => { - var_origins.push(var_origin); - append_to_same_regions(&mut same_regions, same_frs); - } - None => processed_errors.push((*error).clone()), - } - } - _ => () // This shouldn't happen - } - } - if !same_regions.is_empty() { - let common_scope_id = same_regions[0].scope_id; - for sr in &same_regions { - // Since ProcessedErrors is used to reconstruct the function - // declaration, we want to make sure that they are, in fact, - // from the same scope - if sr.scope_id != common_scope_id { - debug!("returning empty result from process_errors because - {} != {}", sr.scope_id, common_scope_id); - return vec!(); - } - } - let pe = ProcessedErrors(var_origins, trace_origins, same_regions); - debug!("errors processed: {:?}", pe); - processed_errors.push(pe); - } - return processed_errors; - - - struct FreeRegionsFromSameFn { - sub_fr: ty::FreeRegion, - sup_fr: ty::FreeRegion, - scope_id: ast::NodeId - } - - impl FreeRegionsFromSameFn { - fn new(sub_fr: ty::FreeRegion, - sup_fr: ty::FreeRegion, - scope_id: ast::NodeId) - -> FreeRegionsFromSameFn { - FreeRegionsFromSameFn { - sub_fr: sub_fr, - sup_fr: sup_fr, - scope_id: scope_id - } - } - } - - fn free_regions_from_same_fn(tcx: &ty::ctxt, - sub: Region, - sup: Region) - -> Option { - debug!("free_regions_from_same_fn(sub={:?}, sup={:?})", sub, sup); - let (scope_id, fr1, fr2) = match (sub, sup) { - (ReFree(fr1), ReFree(fr2)) => { - if fr1.scope != fr2.scope { - return None - } - assert!(fr1.scope == fr2.scope); - (fr1.scope.node_id(&tcx.region_maps), fr1, fr2) - }, - _ => return None - }; - let parent = tcx.map.get_parent(scope_id); - let parent_node = tcx.map.find(parent); - match parent_node { - Some(node) => match node { - ast_map::NodeItem(item) => match item.node { - hir::ItemFn(..) => { - Some(FreeRegionsFromSameFn::new(fr1, fr2, scope_id)) - }, - _ => None - }, - ast_map::NodeImplItem(..) | - ast_map::NodeTraitItem(..) => { - Some(FreeRegionsFromSameFn::new(fr1, fr2, scope_id)) - }, - _ => None - }, - None => { - debug!("no parent node of scope_id {}", scope_id); - None - } - } - } - - fn append_to_same_regions(same_regions: &mut Vec, - same_frs: &FreeRegionsFromSameFn) { - let scope_id = same_frs.scope_id; - let (sub_fr, sup_fr) = (same_frs.sub_fr, same_frs.sup_fr); - for sr in &mut *same_regions { - if sr.contains(&sup_fr.bound_region) - && scope_id == sr.scope_id { - sr.push(sub_fr.bound_region); - return - } - } - same_regions.push(SameRegions { - scope_id: scope_id, - regions: vec!(sub_fr.bound_region, sup_fr.bound_region) - }) - } - } - - fn report_type_error(&self, - trace: TypeTrace<'tcx>, - terr: &TypeError<'tcx>) - -> DiagnosticBuilder<'tcx> { - let expected_found_str = match self.values_str(&trace.values) { - Some(v) => v, - None => { - return self.tcx.sess.diagnostic().struct_dummy(); /* derived error */ - } - }; - - let is_simple_error = if let &TypeError::Sorts(ref values) = terr { - values.expected.is_primitive() && values.found.is_primitive() - } else { - false - }; - - let expected_found_str = if is_simple_error { - expected_found_str - } else { - format!("{} ({})", expected_found_str, terr) - }; - - let mut err = struct_span_err!(self.tcx.sess, - trace.origin.span(), - E0308, - "{}: {}", - trace.origin, - expected_found_str); - - self.check_and_note_conflicting_crates(&mut err, terr, trace.origin.span()); - - match trace.origin { - TypeOrigin::MatchExpressionArm(_, arm_span, source) => match source { - hir::MatchSource::IfLetDesugar{..} => { - err.span_note(arm_span, "`if let` arm with an incompatible type"); - } - _ => { - err.span_note(arm_span, "match arm with an incompatible type"); - } - }, - _ => () - } - err - } - - /// Adds a note if the types come from similarly named crates - fn check_and_note_conflicting_crates(&self, - err: &mut DiagnosticBuilder, - terr: &TypeError<'tcx>, - sp: Span) { - let report_path_match = |err: &mut DiagnosticBuilder, did1: DefId, did2: DefId| { - // Only external crates, if either is from a local - // module we could have false positives - if !(did1.is_local() || did2.is_local()) && did1.krate != did2.krate { - let exp_path = self.tcx.with_path(did1, - |p| p.map(|x| x.to_string()) - .collect::>()); - let found_path = self.tcx.with_path(did2, - |p| p.map(|x| x.to_string()) - .collect::>()); - // We compare strings because PathMod and PathName can be different - // for imported and non-imported crates - if exp_path == found_path { - let crate_name = self.tcx.sess.cstore.crate_name(did1.krate); - err.span_note(sp, &format!("Perhaps two different versions \ - of crate `{}` are being used?", - crate_name)); - } - } - }; - match *terr { - TypeError::Sorts(ref exp_found) => { - // if they are both "path types", there's a chance of ambiguity - // due to different versions of the same crate - match (&exp_found.expected.sty, &exp_found.found.sty) { - (&ty::TyEnum(ref exp_adt, _), &ty::TyEnum(ref found_adt, _)) | - (&ty::TyStruct(ref exp_adt, _), &ty::TyStruct(ref found_adt, _)) | - (&ty::TyEnum(ref exp_adt, _), &ty::TyStruct(ref found_adt, _)) | - (&ty::TyStruct(ref exp_adt, _), &ty::TyEnum(ref found_adt, _)) => { - report_path_match(err, exp_adt.did, found_adt.did); - }, - _ => () - } - }, - TypeError::Traits(ref exp_found) => { - report_path_match(err, exp_found.expected, exp_found.found); - }, - _ => () // FIXME(#22750) handle traits and stuff - } - } - - fn report_and_explain_type_error(&self, - trace: TypeTrace<'tcx>, - terr: &TypeError<'tcx>) { - let span = trace.origin.span(); - let mut err = self.report_type_error(trace, terr); - self.tcx.note_and_explain_type_err(&mut err, terr, span); - err.emit(); - } - - /// Returns a string of the form "expected `{}`, found `{}`", or None if this is a derived - /// error. - fn values_str(&self, values: &ValuePairs<'tcx>) -> Option { - match *values { - infer::Types(ref exp_found) => self.expected_found_str(exp_found), - infer::TraitRefs(ref exp_found) => self.expected_found_str(exp_found), - infer::PolyTraitRefs(ref exp_found) => self.expected_found_str(exp_found) - } - } - - fn expected_found_str + TypeFoldable<'tcx>>( - &self, - exp_found: &ty::error::ExpectedFound) - -> Option - { - let expected = exp_found.expected.resolve(self); - if expected.references_error() { - return None; - } - - let found = exp_found.found.resolve(self); - if found.references_error() { - return None; - } - - Some(format!("expected `{}`, found `{}`", - expected, - found)) - } - - fn report_generic_bound_failure(&self, - origin: SubregionOrigin<'tcx>, - bound_kind: GenericKind<'tcx>, - sub: Region) - { - // FIXME: it would be better to report the first error message - // with the span of the parameter itself, rather than the span - // where the error was detected. But that span is not readily - // accessible. - - let labeled_user_string = match bound_kind { - GenericKind::Param(ref p) => - format!("the parameter type `{}`", p), - GenericKind::Projection(ref p) => - format!("the associated type `{}`", p), - }; - - let mut err = match sub { - ty::ReFree(ty::FreeRegion {bound_region: ty::BrNamed(..), ..}) => { - // Does the required lifetime have a nice name we can print? - let mut err = struct_span_err!(self.tcx.sess, - origin.span(), - E0309, - "{} may not live long enough", - labeled_user_string); - err.fileline_help(origin.span(), - &format!("consider adding an explicit lifetime bound `{}: {}`...", - bound_kind, - sub)); - err - } - - ty::ReStatic => { - // Does the required lifetime have a nice name we can print? - let mut err = struct_span_err!(self.tcx.sess, - origin.span(), - E0310, - "{} may not live long enough", - labeled_user_string); - err.fileline_help(origin.span(), - &format!("consider adding an explicit lifetime \ - bound `{}: 'static`...", - bound_kind)); - err - } - - _ => { - // If not, be less specific. - let mut err = struct_span_err!(self.tcx.sess, - origin.span(), - E0311, - "{} may not live long enough", - labeled_user_string); - err.fileline_help(origin.span(), - &format!("consider adding an explicit lifetime bound for `{}`", - bound_kind)); - self.tcx.note_and_explain_region( - &mut err, - &format!("{} must be valid for ", labeled_user_string), - sub, - "..."); - err - } - }; - - self.note_region_origin(&mut err, &origin); - err.emit(); - } - - fn report_concrete_failure(&self, - origin: SubregionOrigin<'tcx>, - sub: Region, - sup: Region) { - match origin { - infer::Subtype(trace) => { - let terr = TypeError::RegionsDoesNotOutlive(sup, sub); - self.report_and_explain_type_error(trace, &terr); - } - infer::Reborrow(span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0312, - "lifetime of reference outlines \ - lifetime of borrowed content..."); - self.tcx.note_and_explain_region(&mut err, - "...the reference is valid for ", - sub, - "..."); - self.tcx.note_and_explain_region(&mut err, - "...but the borrowed content is only valid for ", - sup, - ""); - err.emit(); - } - infer::ReborrowUpvar(span, ref upvar_id) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0313, - "lifetime of borrowed pointer outlives \ - lifetime of captured variable `{}`...", - self.tcx.local_var_name_str(upvar_id.var_id)); - self.tcx.note_and_explain_region(&mut err, - "...the borrowed pointer is valid for ", - sub, - "..."); - self.tcx.note_and_explain_region(&mut err, - &format!("...but `{}` is only valid for ", - self.tcx.local_var_name_str(upvar_id.var_id)), - sup, - ""); - err.emit(); - } - infer::InfStackClosure(span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0314, - "closure outlives stack frame"); - self.tcx.note_and_explain_region(&mut err, - "...the closure must be valid for ", - sub, - "..."); - self.tcx.note_and_explain_region(&mut err, - "...but the closure's stack frame is only valid for ", - sup, - ""); - err.emit(); - } - infer::InvokeClosure(span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0315, - "cannot invoke closure outside of its lifetime"); - self.tcx.note_and_explain_region(&mut err, - "the closure is only valid for ", - sup, - ""); - err.emit(); - } - infer::DerefPointer(span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0473, - "dereference of reference outside its lifetime"); - self.tcx.note_and_explain_region(&mut err, - "the reference is only valid for ", - sup, - ""); - err.emit(); - } - infer::FreeVariable(span, id) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0474, - "captured variable `{}` does not outlive the enclosing closure", - self.tcx.local_var_name_str(id)); - self.tcx.note_and_explain_region(&mut err, - "captured variable is valid for ", - sup, - ""); - self.tcx.note_and_explain_region(&mut err, - "closure is valid for ", - sub, - ""); - err.emit(); - } - infer::IndexSlice(span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0475, - "index of slice outside its lifetime"); - self.tcx.note_and_explain_region(&mut err, - "the slice is only valid for ", - sup, - ""); - err.emit(); - } - infer::RelateObjectBound(span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0476, - "lifetime of the source pointer does not outlive \ - lifetime bound of the object type"); - self.tcx.note_and_explain_region(&mut err, - "object type is valid for ", - sub, - ""); - self.tcx.note_and_explain_region(&mut err, - "source pointer is only valid for ", - sup, - ""); - err.emit(); - } - infer::RelateParamBound(span, ty) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0477, - "the type `{}` does not fulfill the required lifetime", - self.ty_to_string(ty)); - self.tcx.note_and_explain_region(&mut err, - "type must outlive ", - sub, - ""); - err.emit(); - } - infer::RelateRegionParamBound(span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0478, - "lifetime bound not satisfied"); - self.tcx.note_and_explain_region(&mut err, - "lifetime parameter instantiated with ", - sup, - ""); - self.tcx.note_and_explain_region(&mut err, - "but lifetime parameter must outlive ", - sub, - ""); - err.emit(); - } - infer::RelateDefaultParamBound(span, ty) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0479, - "the type `{}` (provided as the value of \ - a type parameter) is not valid at this point", - self.ty_to_string(ty)); - self.tcx.note_and_explain_region(&mut err, - "type must outlive ", - sub, - ""); - err.emit(); - } - infer::CallRcvr(span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0480, - "lifetime of method receiver does not outlive \ - the method call"); - self.tcx.note_and_explain_region(&mut err, - "the receiver is only valid for ", - sup, - ""); - err.emit(); - } - infer::CallArg(span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0481, - "lifetime of function argument does not outlive \ - the function call"); - self.tcx.note_and_explain_region(&mut err, - "the function argument is only valid for ", - sup, - ""); - err.emit(); - } - infer::CallReturn(span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0482, - "lifetime of return value does not outlive \ - the function call"); - self.tcx.note_and_explain_region(&mut err, - "the return value is only valid for ", - sup, - ""); - err.emit(); - } - infer::Operand(span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0483, - "lifetime of operand does not outlive \ - the operation"); - self.tcx.note_and_explain_region(&mut err, - "the operand is only valid for ", - sup, - ""); - err.emit(); - } - infer::AddrOf(span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0484, - "reference is not valid at the time of borrow"); - self.tcx.note_and_explain_region(&mut err, - "the borrow is only valid for ", - sup, - ""); - err.emit(); - } - infer::AutoBorrow(span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0485, - "automatically reference is not valid \ - at the time of borrow"); - self.tcx.note_and_explain_region(&mut err, - "the automatic borrow is only valid for ", - sup, - ""); - err.emit(); - } - infer::ExprTypeIsNotInScope(t, span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0486, - "type of expression contains references \ - that are not valid during the expression: `{}`", - self.ty_to_string(t)); - self.tcx.note_and_explain_region(&mut err, - "type is only valid for ", - sup, - ""); - err.emit(); - } - infer::SafeDestructor(span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0487, - "unsafe use of destructor: destructor might be called \ - while references are dead"); - // FIXME (22171): terms "super/subregion" are suboptimal - self.tcx.note_and_explain_region(&mut err, - "superregion: ", - sup, - ""); - self.tcx.note_and_explain_region(&mut err, - "subregion: ", - sub, - ""); - err.emit(); - } - infer::BindingTypeIsNotValidAtDecl(span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0488, - "lifetime of variable does not enclose its declaration"); - self.tcx.note_and_explain_region(&mut err, - "the variable is only valid for ", - sup, - ""); - err.emit(); - } - infer::ParameterInScope(_, span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0489, - "type/lifetime parameter not in scope here"); - self.tcx.note_and_explain_region(&mut err, - "the parameter is only valid for ", - sub, - ""); - err.emit(); - } - infer::DataBorrowed(ty, span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0490, - "a value of type `{}` is borrowed for too long", - self.ty_to_string(ty)); - self.tcx.note_and_explain_region(&mut err, "the type is valid for ", sub, ""); - self.tcx.note_and_explain_region(&mut err, "but the borrow lasts for ", sup, ""); - err.emit(); - } - infer::ReferenceOutlivesReferent(ty, span) => { - let mut err = struct_span_err!(self.tcx.sess, span, E0491, - "in type `{}`, reference has a longer lifetime \ - than the data it references", - self.ty_to_string(ty)); - self.tcx.note_and_explain_region(&mut err, - "the pointer is valid for ", - sub, - ""); - self.tcx.note_and_explain_region(&mut err, - "but the referenced data is only valid for ", - sup, - ""); - err.emit(); - } - } - } - - fn report_sub_sup_conflict(&self, - var_origin: RegionVariableOrigin, - sub_origin: SubregionOrigin<'tcx>, - sub_region: Region, - sup_origin: SubregionOrigin<'tcx>, - sup_region: Region) { - let mut err = self.report_inference_failure(var_origin); - - self.tcx.note_and_explain_region(&mut err, - "first, the lifetime cannot outlive ", - sup_region, - "..."); - - self.note_region_origin(&mut err, &sup_origin); - - self.tcx.note_and_explain_region(&mut err, - "but, the lifetime must be valid for ", - sub_region, - "..."); - - self.note_region_origin(&mut err, &sub_origin); - err.emit(); - } - - fn report_processed_errors(&self, - var_origins: &[RegionVariableOrigin], - trace_origins: &[(TypeTrace<'tcx>, TypeError<'tcx>)], - same_regions: &[SameRegions]) { - for (i, vo) in var_origins.iter().enumerate() { - let mut err = self.report_inference_failure(vo.clone()); - if i == var_origins.len() - 1 { - self.give_suggestion(&mut err, same_regions); - } - err.emit(); - } - - for &(ref trace, ref terr) in trace_origins { - self.report_and_explain_type_error(trace.clone(), terr); - } - } - - fn give_suggestion(&self, err: &mut DiagnosticBuilder, same_regions: &[SameRegions]) { - let scope_id = same_regions[0].scope_id; - let parent = self.tcx.map.get_parent(scope_id); - let parent_node = self.tcx.map.find(parent); - let taken = lifetimes_in_scope(self.tcx, scope_id); - let life_giver = LifeGiver::with_taken(&taken[..]); - let node_inner = match parent_node { - Some(ref node) => match *node { - ast_map::NodeItem(ref item) => { - match item.node { - hir::ItemFn(ref fn_decl, unsafety, constness, _, ref gen, _) => { - Some((fn_decl, gen, unsafety, constness, - item.name, None, item.span)) - }, - _ => None - } - } - ast_map::NodeImplItem(item) => { - match item.node { - hir::ImplItemKind::Method(ref sig, _) => { - Some((&sig.decl, - &sig.generics, - sig.unsafety, - sig.constness, - item.name, - Some(&sig.explicit_self.node), - item.span)) - } - _ => None, - } - }, - ast_map::NodeTraitItem(item) => { - match item.node { - hir::MethodTraitItem(ref sig, Some(_)) => { - Some((&sig.decl, - &sig.generics, - sig.unsafety, - sig.constness, - item.name, - Some(&sig.explicit_self.node), - item.span)) - } - _ => None - } - } - _ => None - }, - None => None - }; - let (fn_decl, generics, unsafety, constness, name, expl_self, span) - = node_inner.expect("expect item fn"); - let rebuilder = Rebuilder::new(self.tcx, fn_decl, expl_self, - generics, same_regions, &life_giver); - let (fn_decl, expl_self, generics) = rebuilder.rebuild(); - self.give_expl_lifetime_param(err, &fn_decl, unsafety, constness, name, - expl_self.as_ref(), &generics, span); - } -} - -struct RebuildPathInfo<'a> { - path: &'a hir::Path, - // indexes to insert lifetime on path.lifetimes - indexes: Vec, - // number of lifetimes we expect to see on the type referred by `path` - // (e.g., expected=1 for struct Foo<'a>) - expected: u32, - anon_nums: &'a HashSet, - region_names: &'a HashSet -} - -struct Rebuilder<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, - fn_decl: &'a hir::FnDecl, - expl_self_opt: Option<&'a hir::ExplicitSelf_>, - generics: &'a hir::Generics, - same_regions: &'a [SameRegions], - life_giver: &'a LifeGiver, - cur_anon: Cell, - inserted_anons: RefCell>, -} - -enum FreshOrKept { - Fresh, - Kept -} - -impl<'a, 'tcx> Rebuilder<'a, 'tcx> { - fn new(tcx: &'a ty::ctxt<'tcx>, - fn_decl: &'a hir::FnDecl, - expl_self_opt: Option<&'a hir::ExplicitSelf_>, - generics: &'a hir::Generics, - same_regions: &'a [SameRegions], - life_giver: &'a LifeGiver) - -> Rebuilder<'a, 'tcx> { - Rebuilder { - tcx: tcx, - fn_decl: fn_decl, - expl_self_opt: expl_self_opt, - generics: generics, - same_regions: same_regions, - life_giver: life_giver, - cur_anon: Cell::new(0), - inserted_anons: RefCell::new(HashSet::new()), - } - } - - fn rebuild(&self) - -> (hir::FnDecl, Option, hir::Generics) { - let mut expl_self_opt = self.expl_self_opt.cloned(); - let mut inputs = self.fn_decl.inputs.clone(); - let mut output = self.fn_decl.output.clone(); - let mut ty_params = self.generics.ty_params.clone(); - let where_clause = self.generics.where_clause.clone(); - let mut kept_lifetimes = HashSet::new(); - for sr in self.same_regions { - self.cur_anon.set(0); - self.offset_cur_anon(); - let (anon_nums, region_names) = - self.extract_anon_nums_and_names(sr); - let (lifetime, fresh_or_kept) = self.pick_lifetime(®ion_names); - match fresh_or_kept { - Kept => { kept_lifetimes.insert(lifetime.name); } - _ => () - } - expl_self_opt = self.rebuild_expl_self(expl_self_opt, lifetime, - &anon_nums, ®ion_names); - inputs = self.rebuild_args_ty(&inputs[..], lifetime, - &anon_nums, ®ion_names); - output = self.rebuild_output(&output, lifetime, &anon_nums, ®ion_names); - ty_params = self.rebuild_ty_params(ty_params, lifetime, - ®ion_names); - } - let fresh_lifetimes = self.life_giver.get_generated_lifetimes(); - let all_region_names = self.extract_all_region_names(); - let generics = self.rebuild_generics(self.generics, - &fresh_lifetimes, - &kept_lifetimes, - &all_region_names, - ty_params, - where_clause); - let new_fn_decl = hir::FnDecl { - inputs: inputs, - output: output, - variadic: self.fn_decl.variadic - }; - (new_fn_decl, expl_self_opt, generics) - } - - fn pick_lifetime(&self, - region_names: &HashSet) - -> (hir::Lifetime, FreshOrKept) { - if !region_names.is_empty() { - // It's not necessary to convert the set of region names to a - // vector of string and then sort them. However, it makes the - // choice of lifetime name deterministic and thus easier to test. - let mut names = Vec::new(); - for rn in region_names { - let lt_name = rn.to_string(); - names.push(lt_name); - } - names.sort(); - let name = token::intern(&names[0]); - return (name_to_dummy_lifetime(name), Kept); - } - return (self.life_giver.give_lifetime(), Fresh); - } - - fn extract_anon_nums_and_names(&self, same_regions: &SameRegions) - -> (HashSet, HashSet) { - let mut anon_nums = HashSet::new(); - let mut region_names = HashSet::new(); - for br in &same_regions.regions { - match *br { - ty::BrAnon(i) => { - anon_nums.insert(i); - } - ty::BrNamed(_, name) => { - region_names.insert(name); - } - _ => () - } - } - (anon_nums, region_names) - } - - fn extract_all_region_names(&self) -> HashSet { - let mut all_region_names = HashSet::new(); - for sr in self.same_regions { - for br in &sr.regions { - match *br { - ty::BrNamed(_, name) => { - all_region_names.insert(name); - } - _ => () - } - } - } - all_region_names - } - - fn inc_cur_anon(&self, n: u32) { - let anon = self.cur_anon.get(); - self.cur_anon.set(anon+n); - } - - fn offset_cur_anon(&self) { - let mut anon = self.cur_anon.get(); - while self.inserted_anons.borrow().contains(&anon) { - anon += 1; - } - self.cur_anon.set(anon); - } - - fn inc_and_offset_cur_anon(&self, n: u32) { - self.inc_cur_anon(n); - self.offset_cur_anon(); - } - - fn track_anon(&self, anon: u32) { - self.inserted_anons.borrow_mut().insert(anon); - } - - fn rebuild_ty_params(&self, - ty_params: hir::HirVec, - lifetime: hir::Lifetime, - region_names: &HashSet) - -> hir::HirVec { - ty_params.iter().map(|ty_param| { - let bounds = self.rebuild_ty_param_bounds(ty_param.bounds.clone(), - lifetime, - region_names); - hir::TyParam { - name: ty_param.name, - id: ty_param.id, - bounds: bounds, - default: ty_param.default.clone(), - span: ty_param.span, - } - }).collect() - } - - fn rebuild_ty_param_bounds(&self, - ty_param_bounds: hir::TyParamBounds, - lifetime: hir::Lifetime, - region_names: &HashSet) - -> hir::TyParamBounds { - ty_param_bounds.iter().map(|tpb| { - match tpb { - &hir::RegionTyParamBound(lt) => { - // FIXME -- it's unclear whether I'm supposed to - // substitute lifetime here. I suspect we need to - // be passing down a map. - hir::RegionTyParamBound(lt) - } - &hir::TraitTyParamBound(ref poly_tr, modifier) => { - let tr = &poly_tr.trait_ref; - let last_seg = tr.path.segments.last().unwrap(); - let mut insert = Vec::new(); - let lifetimes = last_seg.parameters.lifetimes(); - for (i, lt) in lifetimes.iter().enumerate() { - if region_names.contains(<.name) { - insert.push(i as u32); - } - } - let rebuild_info = RebuildPathInfo { - path: &tr.path, - indexes: insert, - expected: lifetimes.len() as u32, - anon_nums: &HashSet::new(), - region_names: region_names - }; - let new_path = self.rebuild_path(rebuild_info, lifetime); - hir::TraitTyParamBound(hir::PolyTraitRef { - bound_lifetimes: poly_tr.bound_lifetimes.clone(), - trait_ref: hir::TraitRef { - path: new_path, - ref_id: tr.ref_id, - }, - span: poly_tr.span, - }, modifier) - } - } - }).collect() - } - - fn rebuild_expl_self(&self, - expl_self_opt: Option, - lifetime: hir::Lifetime, - anon_nums: &HashSet, - region_names: &HashSet) - -> Option { - match expl_self_opt { - Some(ref expl_self) => match *expl_self { - hir::SelfRegion(lt_opt, muta, id) => match lt_opt { - Some(lt) => if region_names.contains(<.name) { - return Some(hir::SelfRegion(Some(lifetime), muta, id)); - }, - None => { - let anon = self.cur_anon.get(); - self.inc_and_offset_cur_anon(1); - if anon_nums.contains(&anon) { - self.track_anon(anon); - return Some(hir::SelfRegion(Some(lifetime), muta, id)); - } - } - }, - _ => () - }, - None => () - } - expl_self_opt - } - - fn rebuild_generics(&self, - generics: &hir::Generics, - add: &Vec, - keep: &HashSet, - remove: &HashSet, - ty_params: hir::HirVec, - where_clause: hir::WhereClause) - -> hir::Generics { - let mut lifetimes = Vec::new(); - for lt in add { - lifetimes.push(hir::LifetimeDef { lifetime: *lt, - bounds: hir::HirVec::new() }); - } - for lt in &generics.lifetimes { - if keep.contains(<.lifetime.name) || - !remove.contains(<.lifetime.name) { - lifetimes.push((*lt).clone()); - } - } - hir::Generics { - lifetimes: lifetimes.into(), - ty_params: ty_params, - where_clause: where_clause, - } - } - - fn rebuild_args_ty(&self, - inputs: &[hir::Arg], - lifetime: hir::Lifetime, - anon_nums: &HashSet, - region_names: &HashSet) - -> hir::HirVec { - let mut new_inputs = Vec::new(); - for arg in inputs { - let new_ty = self.rebuild_arg_ty_or_output(&*arg.ty, lifetime, - anon_nums, region_names); - let possibly_new_arg = hir::Arg { - ty: new_ty, - pat: arg.pat.clone(), - id: arg.id - }; - new_inputs.push(possibly_new_arg); - } - new_inputs.into() - } - - fn rebuild_output(&self, ty: &hir::FunctionRetTy, - lifetime: hir::Lifetime, - anon_nums: &HashSet, - region_names: &HashSet) -> hir::FunctionRetTy { - match *ty { - hir::Return(ref ret_ty) => hir::Return( - self.rebuild_arg_ty_or_output(&**ret_ty, lifetime, anon_nums, region_names) - ), - hir::DefaultReturn(span) => hir::DefaultReturn(span), - hir::NoReturn(span) => hir::NoReturn(span) - } - } - - fn rebuild_arg_ty_or_output(&self, - ty: &hir::Ty, - lifetime: hir::Lifetime, - anon_nums: &HashSet, - region_names: &HashSet) - -> P { - let mut new_ty = P(ty.clone()); - let mut ty_queue = vec!(ty); - while !ty_queue.is_empty() { - let cur_ty = ty_queue.remove(0); - match cur_ty.node { - hir::TyRptr(lt_opt, ref mut_ty) => { - let rebuild = match lt_opt { - Some(lt) => region_names.contains(<.name), - None => { - let anon = self.cur_anon.get(); - let rebuild = anon_nums.contains(&anon); - if rebuild { - self.track_anon(anon); - } - self.inc_and_offset_cur_anon(1); - rebuild - } - }; - if rebuild { - let to = hir::Ty { - id: cur_ty.id, - node: hir::TyRptr(Some(lifetime), mut_ty.clone()), - span: cur_ty.span - }; - new_ty = self.rebuild_ty(new_ty, P(to)); - } - ty_queue.push(&*mut_ty.ty); - } - hir::TyPath(ref maybe_qself, ref path) => { - let a_def = match self.tcx.def_map.borrow().get(&cur_ty.id) { - None => { - self.tcx - .sess - .fatal(&format!( - "unbound path {}", - pprust::path_to_string(path))) - } - Some(d) => d.full_def() - }; - match a_def { - def::DefTy(did, _) | def::DefStruct(did) => { - let generics = self.tcx.lookup_item_type(did).generics; - - let expected = - generics.regions.len(subst::TypeSpace) as u32; - let lifetimes = - path.segments.last().unwrap().parameters.lifetimes(); - let mut insert = Vec::new(); - if lifetimes.is_empty() { - let anon = self.cur_anon.get(); - for (i, a) in (anon..anon+expected).enumerate() { - if anon_nums.contains(&a) { - insert.push(i as u32); - } - self.track_anon(a); - } - self.inc_and_offset_cur_anon(expected); - } else { - for (i, lt) in lifetimes.iter().enumerate() { - if region_names.contains(<.name) { - insert.push(i as u32); - } - } - } - let rebuild_info = RebuildPathInfo { - path: path, - indexes: insert, - expected: expected, - anon_nums: anon_nums, - region_names: region_names - }; - let new_path = self.rebuild_path(rebuild_info, lifetime); - let qself = maybe_qself.as_ref().map(|qself| { - hir::QSelf { - ty: self.rebuild_arg_ty_or_output(&qself.ty, lifetime, - anon_nums, region_names), - position: qself.position - } - }); - let to = hir::Ty { - id: cur_ty.id, - node: hir::TyPath(qself, new_path), - span: cur_ty.span - }; - new_ty = self.rebuild_ty(new_ty, P(to)); - } - _ => () - } - - } - - hir::TyPtr(ref mut_ty) => { - ty_queue.push(&*mut_ty.ty); - } - hir::TyVec(ref ty) | - hir::TyFixedLengthVec(ref ty, _) => { - ty_queue.push(&**ty); - } - hir::TyTup(ref tys) => ty_queue.extend(tys.iter().map(|ty| &**ty)), - _ => {} - } - } - new_ty - } - - fn rebuild_ty(&self, - from: P, - to: P) - -> P { - - fn build_to(from: P, - to: &mut Option>) - -> P { - if Some(from.id) == to.as_ref().map(|ty| ty.id) { - return to.take().expect("`to` type found more than once during rebuild"); - } - from.map(|hir::Ty {id, node, span}| { - let new_node = match node { - hir::TyRptr(lifetime, mut_ty) => { - hir::TyRptr(lifetime, hir::MutTy { - mutbl: mut_ty.mutbl, - ty: build_to(mut_ty.ty, to), - }) - } - hir::TyPtr(mut_ty) => { - hir::TyPtr(hir::MutTy { - mutbl: mut_ty.mutbl, - ty: build_to(mut_ty.ty, to), - }) - } - hir::TyVec(ty) => hir::TyVec(build_to(ty, to)), - hir::TyFixedLengthVec(ty, e) => { - hir::TyFixedLengthVec(build_to(ty, to), e) - } - hir::TyTup(tys) => { - hir::TyTup(tys.into_iter().map(|ty| build_to(ty, to)).collect()) - } - other => other - }; - hir::Ty { id: id, node: new_node, span: span } - }) - } - - build_to(from, &mut Some(to)) - } - - fn rebuild_path(&self, - rebuild_info: RebuildPathInfo, - lifetime: hir::Lifetime) - -> hir::Path - { - let RebuildPathInfo { - path, - indexes, - expected, - anon_nums, - region_names, - } = rebuild_info; - - let last_seg = path.segments.last().unwrap(); - let new_parameters = match last_seg.parameters { - hir::ParenthesizedParameters(..) => { - last_seg.parameters.clone() - } - - hir::AngleBracketedParameters(ref data) => { - let mut new_lts = Vec::new(); - if data.lifetimes.is_empty() { - // traverse once to see if there's a need to insert lifetime - let need_insert = (0..expected).any(|i| { - indexes.contains(&i) - }); - if need_insert { - for i in 0..expected { - if indexes.contains(&i) { - new_lts.push(lifetime); - } else { - new_lts.push(self.life_giver.give_lifetime()); - } - } - } - } else { - for (i, lt) in data.lifetimes.iter().enumerate() { - if indexes.contains(&(i as u32)) { - new_lts.push(lifetime); - } else { - new_lts.push(*lt); - } - } - } - let new_types = data.types.iter().map(|t| { - self.rebuild_arg_ty_or_output(&**t, lifetime, anon_nums, region_names) - }).collect(); - let new_bindings = data.bindings.iter().map(|b| { - hir::TypeBinding { - id: b.id, - name: b.name, - ty: self.rebuild_arg_ty_or_output(&*b.ty, - lifetime, - anon_nums, - region_names), - span: b.span - } - }).collect(); - hir::AngleBracketedParameters(hir::AngleBracketedParameterData { - lifetimes: new_lts.into(), - types: new_types, - bindings: new_bindings, - }) - } - }; - let new_seg = hir::PathSegment { - identifier: last_seg.identifier, - parameters: new_parameters - }; - let mut new_segs = Vec::new(); - new_segs.extend_from_slice(path.segments.split_last().unwrap().1); - new_segs.push(new_seg); - hir::Path { - span: path.span, - global: path.global, - segments: new_segs.into() - } - } -} - -impl<'a, 'tcx> ErrorReportingHelpers<'tcx> for InferCtxt<'a, 'tcx> { - fn give_expl_lifetime_param(&self, - err: &mut DiagnosticBuilder, - decl: &hir::FnDecl, - unsafety: hir::Unsafety, - constness: hir::Constness, - name: ast::Name, - opt_explicit_self: Option<&hir::ExplicitSelf_>, - generics: &hir::Generics, - span: Span) { - let suggested_fn = pprust::fun_to_string(decl, unsafety, constness, name, - opt_explicit_self, generics); - let msg = format!("consider using an explicit lifetime \ - parameter as shown: {}", suggested_fn); - err.span_help(span, &msg[..]); - } - - fn report_inference_failure(&self, - var_origin: RegionVariableOrigin) - -> DiagnosticBuilder<'tcx> { - let br_string = |br: ty::BoundRegion| { - let mut s = br.to_string(); - if !s.is_empty() { - s.push_str(" "); - } - s - }; - let var_description = match var_origin { - infer::MiscVariable(_) => "".to_string(), - infer::PatternRegion(_) => " for pattern".to_string(), - infer::AddrOfRegion(_) => " for borrow expression".to_string(), - infer::Autoref(_) => " for autoref".to_string(), - infer::Coercion(_) => " for automatic coercion".to_string(), - infer::LateBoundRegion(_, br, infer::FnCall) => { - format!(" for lifetime parameter {}in function call", - br_string(br)) - } - infer::LateBoundRegion(_, br, infer::HigherRankedType) => { - format!(" for lifetime parameter {}in generic type", br_string(br)) - } - infer::LateBoundRegion(_, br, infer::AssocTypeProjection(type_name)) => { - format!(" for lifetime parameter {}in trait containing associated type `{}`", - br_string(br), type_name) - } - infer::EarlyBoundRegion(_, name) => { - format!(" for lifetime parameter `{}`", - name) - } - infer::BoundRegionInCoherence(name) => { - format!(" for lifetime parameter `{}` in coherence check", - name) - } - infer::UpvarRegion(ref upvar_id, _) => { - format!(" for capture of `{}` by closure", - self.tcx.local_var_name_str(upvar_id.var_id).to_string()) - } - }; - - struct_span_err!(self.tcx.sess, var_origin.span(), E0495, - "cannot infer an appropriate lifetime{} \ - due to conflicting requirements", - var_description) - } - - fn note_region_origin(&self, err: &mut DiagnosticBuilder, origin: &SubregionOrigin<'tcx>) { - match *origin { - infer::Subtype(ref trace) => { - let desc = match trace.origin { - TypeOrigin::Misc(_) => { - "types are compatible" - } - TypeOrigin::MethodCompatCheck(_) => { - "method type is compatible with trait" - } - TypeOrigin::ExprAssignable(_) => { - "expression is assignable" - } - TypeOrigin::RelateTraitRefs(_) => { - "traits are compatible" - } - TypeOrigin::RelateSelfType(_) => { - "self type matches impl self type" - } - TypeOrigin::RelateOutputImplTypes(_) => { - "trait type parameters matches those \ - specified on the impl" - } - TypeOrigin::MatchExpressionArm(_, _, _) => { - "match arms have compatible types" - } - TypeOrigin::IfExpression(_) => { - "if and else have compatible types" - } - TypeOrigin::IfExpressionWithNoElse(_) => { - "if may be missing an else clause" - } - TypeOrigin::RangeExpression(_) => { - "start and end of range have compatible types" - } - TypeOrigin::EquatePredicate(_) => { - "equality where clause is satisfied" - } - }; - - match self.values_str(&trace.values) { - Some(values_str) => { - err.span_note( - trace.origin.span(), - &format!("...so that {} ({})", - desc, values_str)); - } - None => { - // Really should avoid printing this error at - // all, since it is derived, but that would - // require more refactoring than I feel like - // doing right now. - nmatsakis - err.span_note( - trace.origin.span(), - &format!("...so that {}", desc)); - } - } - } - infer::Reborrow(span) => { - err.span_note( - span, - "...so that reference does not outlive \ - borrowed content"); - } - infer::ReborrowUpvar(span, ref upvar_id) => { - err.span_note( - span, - &format!( - "...so that closure can access `{}`", - self.tcx.local_var_name_str(upvar_id.var_id) - .to_string())); - } - infer::InfStackClosure(span) => { - err.span_note( - span, - "...so that closure does not outlive its stack frame"); - } - infer::InvokeClosure(span) => { - err.span_note( - span, - "...so that closure is not invoked outside its lifetime"); - } - infer::DerefPointer(span) => { - err.span_note( - span, - "...so that pointer is not dereferenced \ - outside its lifetime"); - } - infer::FreeVariable(span, id) => { - err.span_note( - span, - &format!("...so that captured variable `{}` \ - does not outlive the enclosing closure", - self.tcx.local_var_name_str(id))); - } - infer::IndexSlice(span) => { - err.span_note( - span, - "...so that slice is not indexed outside the lifetime"); - } - infer::RelateObjectBound(span) => { - err.span_note( - span, - "...so that it can be closed over into an object"); - } - infer::CallRcvr(span) => { - err.span_note( - span, - "...so that method receiver is valid for the method call"); - } - infer::CallArg(span) => { - err.span_note( - span, - "...so that argument is valid for the call"); - } - infer::CallReturn(span) => { - err.span_note( - span, - "...so that return value is valid for the call"); - } - infer::Operand(span) => { - err.span_note( - span, - "...so that operand is valid for operation"); - } - infer::AddrOf(span) => { - err.span_note( - span, - "...so that reference is valid \ - at the time of borrow"); - } - infer::AutoBorrow(span) => { - err.span_note( - span, - "...so that auto-reference is valid \ - at the time of borrow"); - } - infer::ExprTypeIsNotInScope(t, span) => { - err.span_note( - span, - &format!("...so type `{}` of expression is valid during the \ - expression", - self.ty_to_string(t))); - } - infer::BindingTypeIsNotValidAtDecl(span) => { - err.span_note( - span, - "...so that variable is valid at time of its declaration"); - } - infer::ParameterInScope(_, span) => { - err.span_note( - span, - "...so that a type/lifetime parameter is in scope here"); - } - infer::DataBorrowed(ty, span) => { - err.span_note( - span, - &format!("...so that the type `{}` is not borrowed for too long", - self.ty_to_string(ty))); - } - infer::ReferenceOutlivesReferent(ty, span) => { - err.span_note( - span, - &format!("...so that the reference type `{}` \ - does not outlive the data it points at", - self.ty_to_string(ty))); - } - infer::RelateParamBound(span, t) => { - err.span_note( - span, - &format!("...so that the type `{}` \ - will meet its required lifetime bounds", - self.ty_to_string(t))); - } - infer::RelateDefaultParamBound(span, t) => { - err.span_note( - span, - &format!("...so that type parameter \ - instantiated with `{}`, \ - will meet its declared lifetime bounds", - self.ty_to_string(t))); - } - infer::RelateRegionParamBound(span) => { - err.span_note( - span, - "...so that the declared lifetime parameter bounds \ - are satisfied"); - } - infer::SafeDestructor(span) => { - err.span_note( - span, - "...so that references are valid when the destructor \ - runs"); - } - } - } -} - -pub trait Resolvable<'tcx> { - fn resolve<'a>(&self, infcx: &InferCtxt<'a, 'tcx>) -> Self; -} - -impl<'tcx> Resolvable<'tcx> for Ty<'tcx> { - fn resolve<'a>(&self, infcx: &InferCtxt<'a, 'tcx>) -> Ty<'tcx> { - infcx.resolve_type_vars_if_possible(self) - } -} - -impl<'tcx> Resolvable<'tcx> for ty::TraitRef<'tcx> { - fn resolve<'a>(&self, infcx: &InferCtxt<'a, 'tcx>) - -> ty::TraitRef<'tcx> { - infcx.resolve_type_vars_if_possible(self) - } -} - -impl<'tcx> Resolvable<'tcx> for ty::PolyTraitRef<'tcx> { - fn resolve<'a>(&self, - infcx: &InferCtxt<'a, 'tcx>) - -> ty::PolyTraitRef<'tcx> - { - infcx.resolve_type_vars_if_possible(self) - } -} - -fn lifetimes_in_scope(tcx: &ty::ctxt, - scope_id: ast::NodeId) - -> Vec { - let mut taken = Vec::new(); - let parent = tcx.map.get_parent(scope_id); - let method_id_opt = match tcx.map.find(parent) { - Some(node) => match node { - ast_map::NodeItem(item) => match item.node { - hir::ItemFn(_, _, _, _, ref gen, _) => { - taken.extend_from_slice(&gen.lifetimes); - None - }, - _ => None - }, - ast_map::NodeImplItem(ii) => { - match ii.node { - hir::ImplItemKind::Method(ref sig, _) => { - taken.extend_from_slice(&sig.generics.lifetimes); - Some(ii.id) - } - _ => None, - } - } - _ => None - }, - None => None - }; - if method_id_opt.is_some() { - let method_id = method_id_opt.unwrap(); - let parent = tcx.map.get_parent(method_id); - match tcx.map.find(parent) { - Some(node) => match node { - ast_map::NodeItem(item) => match item.node { - hir::ItemImpl(_, _, ref gen, _, _, _) => { - taken.extend_from_slice(&gen.lifetimes); - } - _ => () - }, - _ => () - }, - None => () - } - } - return taken; -} - -// LifeGiver is responsible for generating fresh lifetime names -struct LifeGiver { - taken: HashSet, - counter: Cell, - generated: RefCell>, -} - -impl LifeGiver { - fn with_taken(taken: &[hir::LifetimeDef]) -> LifeGiver { - let mut taken_ = HashSet::new(); - for lt in taken { - let lt_name = lt.lifetime.name.to_string(); - taken_.insert(lt_name); - } - LifeGiver { - taken: taken_, - counter: Cell::new(0), - generated: RefCell::new(Vec::new()), - } - } - - fn inc_counter(&self) { - let c = self.counter.get(); - self.counter.set(c+1); - } - - fn give_lifetime(&self) -> hir::Lifetime { - let lifetime; - loop { - let mut s = String::from("'"); - s.push_str(&num_to_string(self.counter.get())); - if !self.taken.contains(&s) { - lifetime = name_to_dummy_lifetime(token::intern(&s[..])); - self.generated.borrow_mut().push(lifetime); - break; - } - self.inc_counter(); - } - self.inc_counter(); - return lifetime; - - // 0 .. 25 generates a .. z, 26 .. 51 generates aa .. zz, and so on - fn num_to_string(counter: usize) -> String { - let mut s = String::new(); - let (n, r) = (counter/26 + 1, counter % 26); - let letter: char = from_u32((r+97) as u32).unwrap(); - for _ in 0..n { - s.push(letter); - } - s - } - } - - fn get_generated_lifetimes(&self) -> Vec { - self.generated.borrow().clone() - } -} - -fn name_to_dummy_lifetime(name: ast::Name) -> hir::Lifetime { - hir::Lifetime { id: ast::DUMMY_NODE_ID, - span: codemap::DUMMY_SP, - name: name } -} diff --git a/src/librustc/middle/infer/glb.rs b/src/librustc/middle/infer/glb.rs deleted file mode 100644 index 0035f31e8db94..0000000000000 --- a/src/librustc/middle/infer/glb.rs +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::combine::CombineFields; -use super::higher_ranked::HigherRankedRelations; -use super::InferCtxt; -use super::lattice::{self, LatticeDir}; -use super::Subtype; - -use middle::ty::{self, Ty}; -use middle::ty::relate::{Relate, RelateResult, TypeRelation}; - -/// "Greatest lower bound" (common subtype) -pub struct Glb<'a, 'tcx: 'a> { - fields: CombineFields<'a, 'tcx> -} - -impl<'a, 'tcx> Glb<'a, 'tcx> { - pub fn new(fields: CombineFields<'a, 'tcx>) -> Glb<'a, 'tcx> { - Glb { fields: fields } - } -} - -impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Glb<'a, 'tcx> { - fn tag(&self) -> &'static str { "Glb" } - - fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.fields.tcx() } - - fn a_is_expected(&self) -> bool { self.fields.a_is_expected } - - fn relate_with_variance>(&mut self, - variance: ty::Variance, - a: &T, - b: &T) - -> RelateResult<'tcx, T> - { - match variance { - ty::Invariant => self.fields.equate().relate(a, b), - ty::Covariant => self.relate(a, b), - ty::Bivariant => self.fields.bivariate().relate(a, b), - ty::Contravariant => self.fields.lub().relate(a, b), - } - } - - fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { - lattice::super_lattice_tys(self, a, b) - } - - fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> { - debug!("{}.regions({:?}, {:?})", - self.tag(), - a, - b); - - let origin = Subtype(self.fields.trace.clone()); - Ok(self.fields.infcx.region_vars.glb_regions(origin, a, b)) - } - - fn binders(&mut self, a: &ty::Binder, b: &ty::Binder) - -> RelateResult<'tcx, ty::Binder> - where T: Relate<'a, 'tcx> - { - self.fields.higher_ranked_glb(a, b) - } -} - -impl<'a, 'tcx> LatticeDir<'a,'tcx> for Glb<'a, 'tcx> { - fn infcx(&self) -> &'a InferCtxt<'a,'tcx> { - self.fields.infcx - } - - fn relate_bound(&self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()> { - let mut sub = self.fields.sub(); - try!(sub.relate(&v, &a)); - try!(sub.relate(&v, &b)); - Ok(()) - } -} diff --git a/src/librustc/middle/infer/higher_ranked/mod.rs b/src/librustc/middle/infer/higher_ranked/mod.rs deleted file mode 100644 index e8f542db933cb..0000000000000 --- a/src/librustc/middle/infer/higher_ranked/mod.rs +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Helper routines for higher-ranked things. See the `doc` module at -//! the end of the file for details. - -use super::{CombinedSnapshot, InferCtxt, HigherRankedType, SkolemizationMap}; -use super::combine::CombineFields; - -use middle::ty::{self, Binder, TypeFoldable}; -use middle::ty::error::TypeError; -use middle::ty::relate::{Relate, RelateResult, TypeRelation}; -use syntax::codemap::Span; -use util::nodemap::{FnvHashMap, FnvHashSet}; - -pub trait HigherRankedRelations<'a,'tcx> { - fn higher_ranked_sub(&self, a: &Binder, b: &Binder) -> RelateResult<'tcx, Binder> - where T: Relate<'a,'tcx>; - - fn higher_ranked_lub(&self, a: &Binder, b: &Binder) -> RelateResult<'tcx, Binder> - where T: Relate<'a,'tcx>; - - fn higher_ranked_glb(&self, a: &Binder, b: &Binder) -> RelateResult<'tcx, Binder> - where T: Relate<'a,'tcx>; -} - -trait InferCtxtExt { - fn tainted_regions(&self, snapshot: &CombinedSnapshot, r: ty::Region) -> Vec; - - fn region_vars_confined_to_snapshot(&self, - snapshot: &CombinedSnapshot) - -> Vec; -} - -impl<'a,'tcx> HigherRankedRelations<'a,'tcx> for CombineFields<'a,'tcx> { - fn higher_ranked_sub(&self, a: &Binder, b: &Binder) - -> RelateResult<'tcx, Binder> - where T: Relate<'a,'tcx> - { - debug!("higher_ranked_sub(a={:?}, b={:?})", - a, b); - - // Rather than checking the subtype relationship between `a` and `b` - // as-is, we need to do some extra work here in order to make sure - // that function subtyping works correctly with respect to regions - // - // Note: this is a subtle algorithm. For a full explanation, - // please see the large comment at the end of the file in the (inlined) module - // `doc`. - - // Start a snapshot so we can examine "all bindings that were - // created as part of this type comparison". - return self.infcx.commit_if_ok(|snapshot| { - // First, we instantiate each bound region in the subtype with a fresh - // region variable. - let (a_prime, _) = - self.infcx.replace_late_bound_regions_with_fresh_var( - self.trace.origin.span(), - HigherRankedType, - a); - - // Second, we instantiate each bound region in the supertype with a - // fresh concrete region. - let (b_prime, skol_map) = - self.infcx.skolemize_late_bound_regions(b, snapshot); - - debug!("a_prime={:?}", a_prime); - debug!("b_prime={:?}", b_prime); - - // Compare types now that bound regions have been replaced. - let result = try!(self.sub().relate(&a_prime, &b_prime)); - - // Presuming type comparison succeeds, we need to check - // that the skolemized regions do not "leak". - match leak_check(self.infcx, &skol_map, snapshot) { - Ok(()) => { } - Err((skol_br, tainted_region)) => { - if self.a_is_expected { - debug!("Not as polymorphic!"); - return Err(TypeError::RegionsInsufficientlyPolymorphic(skol_br, - tainted_region)); - } else { - debug!("Overly polymorphic!"); - return Err(TypeError::RegionsOverlyPolymorphic(skol_br, - tainted_region)); - } - } - } - - debug!("higher_ranked_sub: OK result={:?}", - result); - - Ok(ty::Binder(result)) - }); - } - - fn higher_ranked_lub(&self, a: &Binder, b: &Binder) -> RelateResult<'tcx, Binder> - where T: Relate<'a,'tcx> - { - // Start a snapshot so we can examine "all bindings that were - // created as part of this type comparison". - return self.infcx.commit_if_ok(|snapshot| { - // Instantiate each bound region with a fresh region variable. - let span = self.trace.origin.span(); - let (a_with_fresh, a_map) = - self.infcx.replace_late_bound_regions_with_fresh_var( - span, HigherRankedType, a); - let (b_with_fresh, _) = - self.infcx.replace_late_bound_regions_with_fresh_var( - span, HigherRankedType, b); - - // Collect constraints. - let result0 = - try!(self.lub().relate(&a_with_fresh, &b_with_fresh)); - let result0 = - self.infcx.resolve_type_vars_if_possible(&result0); - debug!("lub result0 = {:?}", result0); - - // Generalize the regions appearing in result0 if possible - let new_vars = self.infcx.region_vars_confined_to_snapshot(snapshot); - let span = self.trace.origin.span(); - let result1 = - fold_regions_in( - self.tcx(), - &result0, - |r, debruijn| generalize_region(self.infcx, span, snapshot, debruijn, - &new_vars, &a_map, r)); - - debug!("lub({:?},{:?}) = {:?}", - a, - b, - result1); - - Ok(ty::Binder(result1)) - }); - - fn generalize_region(infcx: &InferCtxt, - span: Span, - snapshot: &CombinedSnapshot, - debruijn: ty::DebruijnIndex, - new_vars: &[ty::RegionVid], - a_map: &FnvHashMap, - r0: ty::Region) - -> ty::Region { - // Regions that pre-dated the LUB computation stay as they are. - if !is_var_in_set(new_vars, r0) { - assert!(!r0.is_bound()); - debug!("generalize_region(r0={:?}): not new variable", r0); - return r0; - } - - let tainted = infcx.tainted_regions(snapshot, r0); - - // Variables created during LUB computation which are - // *related* to regions that pre-date the LUB computation - // stay as they are. - if !tainted.iter().all(|r| is_var_in_set(new_vars, *r)) { - debug!("generalize_region(r0={:?}): \ - non-new-variables found in {:?}", - r0, tainted); - assert!(!r0.is_bound()); - return r0; - } - - // Otherwise, the variable must be associated with at - // least one of the variables representing bound regions - // in both A and B. Replace the variable with the "first" - // bound region from A that we find it to be associated - // with. - for (a_br, a_r) in a_map { - if tainted.iter().any(|x| x == a_r) { - debug!("generalize_region(r0={:?}): \ - replacing with {:?}, tainted={:?}", - r0, *a_br, tainted); - return ty::ReLateBound(debruijn, *a_br); - } - } - - infcx.tcx.sess.span_bug( - span, - &format!("region {:?} is not associated with \ - any bound region from A!", - r0)) - } - } - - fn higher_ranked_glb(&self, a: &Binder, b: &Binder) -> RelateResult<'tcx, Binder> - where T: Relate<'a,'tcx> - { - debug!("higher_ranked_glb({:?}, {:?})", - a, b); - - // Make a snapshot so we can examine "all bindings that were - // created as part of this type comparison". - return self.infcx.commit_if_ok(|snapshot| { - // Instantiate each bound region with a fresh region variable. - let (a_with_fresh, a_map) = - self.infcx.replace_late_bound_regions_with_fresh_var( - self.trace.origin.span(), HigherRankedType, a); - let (b_with_fresh, b_map) = - self.infcx.replace_late_bound_regions_with_fresh_var( - self.trace.origin.span(), HigherRankedType, b); - let a_vars = var_ids(self, &a_map); - let b_vars = var_ids(self, &b_map); - - // Collect constraints. - let result0 = - try!(self.glb().relate(&a_with_fresh, &b_with_fresh)); - let result0 = - self.infcx.resolve_type_vars_if_possible(&result0); - debug!("glb result0 = {:?}", result0); - - // Generalize the regions appearing in result0 if possible - let new_vars = self.infcx.region_vars_confined_to_snapshot(snapshot); - let span = self.trace.origin.span(); - let result1 = - fold_regions_in( - self.tcx(), - &result0, - |r, debruijn| generalize_region(self.infcx, span, snapshot, debruijn, - &new_vars, - &a_map, &a_vars, &b_vars, - r)); - - debug!("glb({:?},{:?}) = {:?}", - a, - b, - result1); - - Ok(ty::Binder(result1)) - }); - - fn generalize_region(infcx: &InferCtxt, - span: Span, - snapshot: &CombinedSnapshot, - debruijn: ty::DebruijnIndex, - new_vars: &[ty::RegionVid], - a_map: &FnvHashMap, - a_vars: &[ty::RegionVid], - b_vars: &[ty::RegionVid], - r0: ty::Region) -> ty::Region { - if !is_var_in_set(new_vars, r0) { - assert!(!r0.is_bound()); - return r0; - } - - let tainted = infcx.tainted_regions(snapshot, r0); - - let mut a_r = None; - let mut b_r = None; - let mut only_new_vars = true; - for r in &tainted { - if is_var_in_set(a_vars, *r) { - if a_r.is_some() { - return fresh_bound_variable(infcx, debruijn); - } else { - a_r = Some(*r); - } - } else if is_var_in_set(b_vars, *r) { - if b_r.is_some() { - return fresh_bound_variable(infcx, debruijn); - } else { - b_r = Some(*r); - } - } else if !is_var_in_set(new_vars, *r) { - only_new_vars = false; - } - } - - // NB---I do not believe this algorithm computes - // (necessarily) the GLB. As written it can - // spuriously fail. In particular, if there is a case - // like: |fn(&a)| and fn(fn(&b)), where a and b are - // free, it will return fn(&c) where c = GLB(a,b). If - // however this GLB is not defined, then the result is - // an error, even though something like - // "fn(fn(&X))" where X is bound would be a - // subtype of both of those. - // - // The problem is that if we were to return a bound - // variable, we'd be computing a lower-bound, but not - // necessarily the *greatest* lower-bound. - // - // Unfortunately, this problem is non-trivial to solve, - // because we do not know at the time of computing the GLB - // whether a GLB(a,b) exists or not, because we haven't - // run region inference (or indeed, even fully computed - // the region hierarchy!). The current algorithm seems to - // works ok in practice. - - if a_r.is_some() && b_r.is_some() && only_new_vars { - // Related to exactly one bound variable from each fn: - return rev_lookup(infcx, span, a_map, a_r.unwrap()); - } else if a_r.is_none() && b_r.is_none() { - // Not related to bound variables from either fn: - assert!(!r0.is_bound()); - return r0; - } else { - // Other: - return fresh_bound_variable(infcx, debruijn); - } - } - - fn rev_lookup(infcx: &InferCtxt, - span: Span, - a_map: &FnvHashMap, - r: ty::Region) -> ty::Region - { - for (a_br, a_r) in a_map { - if *a_r == r { - return ty::ReLateBound(ty::DebruijnIndex::new(1), *a_br); - } - } - infcx.tcx.sess.span_bug( - span, - &format!("could not find original bound region for {:?}", r)); - } - - fn fresh_bound_variable(infcx: &InferCtxt, debruijn: ty::DebruijnIndex) -> ty::Region { - infcx.region_vars.new_bound(debruijn) - } - } -} - -fn var_ids<'a, 'tcx>(fields: &CombineFields<'a, 'tcx>, - map: &FnvHashMap) - -> Vec { - map.iter() - .map(|(_, r)| match *r { - ty::ReVar(r) => { r } - r => { - fields.tcx().sess.span_bug( - fields.trace.origin.span(), - &format!("found non-region-vid: {:?}", r)); - } - }) - .collect() -} - -fn is_var_in_set(new_vars: &[ty::RegionVid], r: ty::Region) -> bool { - match r { - ty::ReVar(ref v) => new_vars.iter().any(|x| x == v), - _ => false - } -} - -fn fold_regions_in<'tcx, T, F>(tcx: &ty::ctxt<'tcx>, - unbound_value: &T, - mut fldr: F) - -> T - where T: TypeFoldable<'tcx>, - F: FnMut(ty::Region, ty::DebruijnIndex) -> ty::Region, -{ - tcx.fold_regions(unbound_value, &mut false, |region, current_depth| { - // we should only be encountering "escaping" late-bound regions here, - // because the ones at the current level should have been replaced - // with fresh variables - assert!(match region { - ty::ReLateBound(..) => false, - _ => true - }); - - fldr(region, ty::DebruijnIndex::new(current_depth)) - }) -} - -impl<'a,'tcx> InferCtxtExt for InferCtxt<'a,'tcx> { - fn tainted_regions(&self, snapshot: &CombinedSnapshot, r: ty::Region) -> Vec { - self.region_vars.tainted(&snapshot.region_vars_snapshot, r) - } - - fn region_vars_confined_to_snapshot(&self, - snapshot: &CombinedSnapshot) - -> Vec - { - /*! - * Returns the set of region variables that do not affect any - * types/regions which existed before `snapshot` was - * started. This is used in the sub/lub/glb computations. The - * idea here is that when we are computing lub/glb of two - * regions, we sometimes create intermediate region variables. - * Those region variables may touch some of the skolemized or - * other "forbidden" regions we created to replace bound - * regions, but they don't really represent an "external" - * constraint. - * - * However, sometimes fresh variables are created for other - * purposes too, and those *may* represent an external - * constraint. In particular, when a type variable is - * instantiated, we create region variables for all the - * regions that appear within, and if that type variable - * pre-existed the snapshot, then those region variables - * represent external constraints. - * - * An example appears in the unit test - * `sub_free_bound_false_infer`. In this test, we want to - * know whether - * - * ```rust - * fn(_#0t) <: for<'a> fn(&'a int) - * ``` - * - * Note that the subtype has a type variable. Because the type - * variable can't be instantiated with a region that is bound - * in the fn signature, this comparison ought to fail. But if - * we're not careful, it will succeed. - * - * The reason is that when we walk through the subtyping - * algorith, we begin by replacing `'a` with a skolemized - * variable `'1`. We then have `fn(_#0t) <: fn(&'1 int)`. This - * can be made true by unifying `_#0t` with `&'1 int`. In the - * process, we create a fresh variable for the skolemized - * region, `'$2`, and hence we have that `_#0t == &'$2 - * int`. However, because `'$2` was created during the sub - * computation, if we're not careful we will erroneously - * assume it is one of the transient region variables - * representing a lub/glb internally. Not good. - * - * To prevent this, we check for type variables which were - * unified during the snapshot, and say that any region - * variable created during the snapshot but which finds its - * way into a type variable is considered to "escape" the - * snapshot. - */ - - let mut region_vars = - self.region_vars.vars_created_since_snapshot(&snapshot.region_vars_snapshot); - - let escaping_types = - self.type_variables.borrow().types_escaping_snapshot(&snapshot.type_snapshot); - - let mut escaping_region_vars = FnvHashSet(); - for ty in &escaping_types { - self.tcx.collect_regions(ty, &mut escaping_region_vars); - } - - region_vars.retain(|®ion_vid| { - let r = ty::ReVar(region_vid); - !escaping_region_vars.contains(&r) - }); - - debug!("region_vars_confined_to_snapshot: region_vars={:?} escaping_types={:?}", - region_vars, - escaping_types); - - region_vars - } -} - -pub fn skolemize_late_bound_regions<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>, - binder: &ty::Binder, - snapshot: &CombinedSnapshot) - -> (T, SkolemizationMap) - where T : TypeFoldable<'tcx> -{ - /*! - * Replace all regions bound by `binder` with skolemized regions and - * return a map indicating which bound-region was replaced with what - * skolemized region. This is the first step of checking subtyping - * when higher-ranked things are involved. See `README.md` for more - * details. - */ - - let (result, map) = infcx.tcx.replace_late_bound_regions(binder, |br| { - infcx.region_vars.new_skolemized(br, &snapshot.region_vars_snapshot) - }); - - debug!("skolemize_bound_regions(binder={:?}, result={:?}, map={:?})", - binder, - result, - map); - - (result, map) -} - -pub fn leak_check<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>, - skol_map: &SkolemizationMap, - snapshot: &CombinedSnapshot) - -> Result<(),(ty::BoundRegion,ty::Region)> -{ - /*! - * Searches the region constriants created since `snapshot` was started - * and checks to determine whether any of the skolemized regions created - * in `skol_map` would "escape" -- meaning that they are related to - * other regions in some way. If so, the higher-ranked subtyping doesn't - * hold. See `README.md` for more details. - */ - - debug!("leak_check: skol_map={:?}", - skol_map); - - let new_vars = infcx.region_vars_confined_to_snapshot(snapshot); - for (&skol_br, &skol) in skol_map { - let tainted = infcx.tainted_regions(snapshot, skol); - for &tainted_region in &tainted { - // Each skolemized should only be relatable to itself - // or new variables: - match tainted_region { - ty::ReVar(vid) => { - if new_vars.iter().any(|&x| x == vid) { continue; } - } - _ => { - if tainted_region == skol { continue; } - } - }; - - debug!("{:?} (which replaced {:?}) is tainted by {:?}", - skol, - skol_br, - tainted_region); - - // A is not as polymorphic as B: - return Err((skol_br, tainted_region)); - } - } - Ok(()) -} - -/// This code converts from skolemized regions back to late-bound -/// regions. It works by replacing each region in the taint set of a -/// skolemized region with a bound-region. The bound region will be bound -/// by the outer-most binder in `value`; the caller must ensure that there is -/// such a binder and it is the right place. -/// -/// This routine is only intended to be used when the leak-check has -/// passed; currently, it's used in the trait matching code to create -/// a set of nested obligations frmo an impl that matches against -/// something higher-ranked. More details can be found in -/// `librustc/middle/traits/README.md`. -/// -/// As a brief example, consider the obligation `for<'a> Fn(&'a int) -/// -> &'a int`, and the impl: -/// -/// impl Fn for SomethingOrOther -/// where A : Clone -/// { ... } -/// -/// Here we will have replaced `'a` with a skolemized region -/// `'0`. This means that our substitution will be `{A=>&'0 -/// int, R=>&'0 int}`. -/// -/// When we apply the substitution to the bounds, we will wind up with -/// `&'0 int : Clone` as a predicate. As a last step, we then go and -/// replace `'0` with a late-bound region `'a`. The depth is matched -/// to the depth of the predicate, in this case 1, so that the final -/// predicate is `for<'a> &'a int : Clone`. -pub fn plug_leaks<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>, - skol_map: SkolemizationMap, - snapshot: &CombinedSnapshot, - value: &T) - -> T - where T : TypeFoldable<'tcx> -{ - debug_assert!(leak_check(infcx, &skol_map, snapshot).is_ok()); - - debug!("plug_leaks(skol_map={:?}, value={:?})", - skol_map, - value); - - // Compute a mapping from the "taint set" of each skolemized - // region back to the `ty::BoundRegion` that it originally - // represented. Because `leak_check` passed, we know that - // these taint sets are mutually disjoint. - let inv_skol_map: FnvHashMap = - skol_map - .into_iter() - .flat_map(|(skol_br, skol)| { - infcx.tainted_regions(snapshot, skol) - .into_iter() - .map(move |tainted_region| (tainted_region, skol_br)) - }) - .collect(); - - debug!("plug_leaks: inv_skol_map={:?}", - inv_skol_map); - - // Remove any instantiated type variables from `value`; those can hide - // references to regions from the `fold_regions` code below. - let value = infcx.resolve_type_vars_if_possible(value); - - // Map any skolemization byproducts back to a late-bound - // region. Put that late-bound region at whatever the outermost - // binder is that we encountered in `value`. The caller is - // responsible for ensuring that (a) `value` contains at least one - // binder and (b) that binder is the one we want to use. - let result = infcx.tcx.fold_regions(&value, &mut false, |r, current_depth| { - match inv_skol_map.get(&r) { - None => r, - Some(br) => { - // It is the responsibility of the caller to ensure - // that each skolemized region appears within a - // binder. In practice, this routine is only used by - // trait checking, and all of the skolemized regions - // appear inside predicates, which always have - // binders, so this assert is satisfied. - assert!(current_depth > 1); - - ty::ReLateBound(ty::DebruijnIndex::new(current_depth - 1), br.clone()) - } - } - }); - - debug!("plug_leaks: result={:?}", - result); - - result -} diff --git a/src/librustc/middle/infer/lattice.rs b/src/librustc/middle/infer/lattice.rs deleted file mode 100644 index 2a560ec8a1d23..0000000000000 --- a/src/librustc/middle/infer/lattice.rs +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! # Lattice Variables -//! -//! This file contains generic code for operating on inference variables -//! that are characterized by an upper- and lower-bound. The logic and -//! reasoning is explained in detail in the large comment in `infer.rs`. -//! -//! The code in here is defined quite generically so that it can be -//! applied both to type variables, which represent types being inferred, -//! and fn variables, which represent function types being inferred. -//! It may eventually be applied to their types as well, who knows. -//! In some cases, the functions are also generic with respect to the -//! operation on the lattice (GLB vs LUB). -//! -//! Although all the functions are generic, we generally write the -//! comments in a way that is specific to type variables and the LUB -//! operation. It's just easier that way. -//! -//! In general all of the functions are defined parametrically -//! over a `LatticeValue`, which is a value defined with respect to -//! a lattice. - -use super::combine; -use super::InferCtxt; - -use middle::ty::TyVar; -use middle::ty::{self, Ty}; -use middle::ty::relate::{RelateResult, TypeRelation}; - -pub trait LatticeDir<'f,'tcx> : TypeRelation<'f,'tcx> { - fn infcx(&self) -> &'f InferCtxt<'f, 'tcx>; - - // Relates the type `v` to `a` and `b` such that `v` represents - // the LUB/GLB of `a` and `b` as appropriate. - fn relate_bound(&self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()>; -} - -pub fn super_lattice_tys<'a,'tcx,L:LatticeDir<'a,'tcx>>(this: &mut L, - a: Ty<'tcx>, - b: Ty<'tcx>) - -> RelateResult<'tcx, Ty<'tcx>> - where 'tcx: 'a -{ - debug!("{}.lattice_tys({:?}, {:?})", - this.tag(), - a, - b); - - if a == b { - return Ok(a); - } - - let infcx = this.infcx(); - let a = infcx.type_variables.borrow().replace_if_possible(a); - let b = infcx.type_variables.borrow().replace_if_possible(b); - match (&a.sty, &b.sty) { - (&ty::TyInfer(TyVar(..)), &ty::TyInfer(TyVar(..))) - if infcx.type_var_diverges(a) && infcx.type_var_diverges(b) => { - let v = infcx.next_diverging_ty_var(); - try!(this.relate_bound(v, a, b)); - Ok(v) - } - - (&ty::TyInfer(TyVar(..)), _) | - (_, &ty::TyInfer(TyVar(..))) => { - let v = infcx.next_ty_var(); - try!(this.relate_bound(v, a, b)); - Ok(v) - } - - _ => { - combine::super_combine_tys(this.infcx(), this, a, b) - } - } -} diff --git a/src/librustc/middle/infer/lub.rs b/src/librustc/middle/infer/lub.rs deleted file mode 100644 index 238dad65ef0d9..0000000000000 --- a/src/librustc/middle/infer/lub.rs +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::combine::CombineFields; -use super::higher_ranked::HigherRankedRelations; -use super::InferCtxt; -use super::lattice::{self, LatticeDir}; -use super::Subtype; - -use middle::ty::{self, Ty}; -use middle::ty::relate::{Relate, RelateResult, TypeRelation}; - -/// "Least upper bound" (common supertype) -pub struct Lub<'a, 'tcx: 'a> { - fields: CombineFields<'a, 'tcx> -} - -impl<'a, 'tcx> Lub<'a, 'tcx> { - pub fn new(fields: CombineFields<'a, 'tcx>) -> Lub<'a, 'tcx> { - Lub { fields: fields } - } -} - -impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Lub<'a, 'tcx> { - fn tag(&self) -> &'static str { "Lub" } - - fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.fields.tcx() } - - fn a_is_expected(&self) -> bool { self.fields.a_is_expected } - - fn relate_with_variance>(&mut self, - variance: ty::Variance, - a: &T, - b: &T) - -> RelateResult<'tcx, T> - { - match variance { - ty::Invariant => self.fields.equate().relate(a, b), - ty::Covariant => self.relate(a, b), - ty::Bivariant => self.fields.bivariate().relate(a, b), - ty::Contravariant => self.fields.glb().relate(a, b), - } - } - - fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { - lattice::super_lattice_tys(self, a, b) - } - - fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> { - debug!("{}.regions({:?}, {:?})", - self.tag(), - a, - b); - - let origin = Subtype(self.fields.trace.clone()); - Ok(self.fields.infcx.region_vars.lub_regions(origin, a, b)) - } - - fn binders(&mut self, a: &ty::Binder, b: &ty::Binder) - -> RelateResult<'tcx, ty::Binder> - where T: Relate<'a, 'tcx> - { - self.fields.higher_ranked_lub(a, b) - } -} - -impl<'a, 'tcx> LatticeDir<'a,'tcx> for Lub<'a, 'tcx> { - fn infcx(&self) -> &'a InferCtxt<'a,'tcx> { - self.fields.infcx - } - - fn relate_bound(&self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()> { - let mut sub = self.fields.sub(); - try!(sub.relate(&a, &v)); - try!(sub.relate(&b, &v)); - Ok(()) - } -} diff --git a/src/librustc/middle/infer/mod.rs b/src/librustc/middle/infer/mod.rs deleted file mode 100644 index 15e368812f25d..0000000000000 --- a/src/librustc/middle/infer/mod.rs +++ /dev/null @@ -1,1639 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! See the Book for more information. - -pub use self::LateBoundRegionConversionTime::*; -pub use self::RegionVariableOrigin::*; -pub use self::SubregionOrigin::*; -pub use self::ValuePairs::*; -pub use middle::ty::IntVarValue; -pub use self::freshen::TypeFreshener; -pub use self::region_inference::{GenericKind, VerifyBound}; - -use middle::def_id::DefId; -use rustc_front::hir; -use middle::free_region::FreeRegionMap; -use middle::mem_categorization as mc; -use middle::mem_categorization::McResult; -use middle::region::CodeExtent; -use middle::subst; -use middle::subst::Substs; -use middle::subst::Subst; -use middle::traits; -use middle::ty::adjustment; -use middle::ty::{TyVid, IntVid, FloatVid}; -use middle::ty::{self, Ty}; -use middle::ty::error::{ExpectedFound, TypeError, UnconstrainedNumeric}; -use middle::ty::fold::{TypeFolder, TypeFoldable}; -use middle::ty::relate::{Relate, RelateResult, TypeRelation}; -use rustc_data_structures::unify::{self, UnificationTable}; -use std::cell::{RefCell, Ref}; -use std::fmt; -use syntax::ast; -use syntax::codemap; -use syntax::codemap::{Span, DUMMY_SP}; -use syntax::errors::DiagnosticBuilder; -use util::nodemap::{FnvHashMap, FnvHashSet, NodeMap}; - -use self::combine::CombineFields; -use self::region_inference::{RegionVarBindings, RegionSnapshot}; -use self::error_reporting::ErrorReporting; -use self::unify_key::ToType; - -pub mod bivariate; -pub mod combine; -pub mod equate; -pub mod error_reporting; -pub mod glb; -mod higher_ranked; -pub mod lattice; -pub mod lub; -pub mod region_inference; -pub mod resolve; -mod freshen; -pub mod sub; -pub mod type_variable; -pub mod unify_key; - -pub type Bound = Option; -pub type UnitResult<'tcx> = RelateResult<'tcx, ()>; // "unify result" -pub type FixupResult = Result; // "fixup result" - -pub struct InferCtxt<'a, 'tcx: 'a> { - pub tcx: &'a ty::ctxt<'tcx>, - - pub tables: &'a RefCell>, - - // We instantiate UnificationTable with bounds because the - // types that might instantiate a general type variable have an - // order, represented by its upper and lower bounds. - type_variables: RefCell>, - - // Map from integral variable to the kind of integer it represents - int_unification_table: RefCell>, - - // Map from floating variable to the kind of float it represents - float_unification_table: RefCell>, - - // For region variables. - region_vars: RegionVarBindings<'a, 'tcx>, - - pub parameter_environment: ty::ParameterEnvironment<'a, 'tcx>, - - pub fulfillment_cx: RefCell>, - - // the set of predicates on which errors have been reported, to - // avoid reporting the same error twice. - pub reported_trait_errors: RefCell>>, - - // This is a temporary field used for toggling on normalization in the inference context, - // as we move towards the approach described here: - // https://internals.rust-lang.org/t/flattening-the-contexts-for-fun-and-profit/2293 - // At a point sometime in the future normalization will be done by the typing context - // directly. - normalize: bool, - - err_count_on_creation: usize, -} - -/// A map returned by `skolemize_late_bound_regions()` indicating the skolemized -/// region that each late-bound region was replaced with. -pub type SkolemizationMap = FnvHashMap; - -/// Why did we require that the two types be related? -/// -/// See `error_reporting.rs` for more details -#[derive(Clone, Copy, Debug)] -pub enum TypeOrigin { - // Not yet categorized in a better way - Misc(Span), - - // Checking that method of impl is compatible with trait - MethodCompatCheck(Span), - - // Checking that this expression can be assigned where it needs to be - // FIXME(eddyb) #11161 is the original Expr required? - ExprAssignable(Span), - - // Relating trait refs when resolving vtables - RelateTraitRefs(Span), - - // Relating self types when resolving vtables - RelateSelfType(Span), - - // Relating trait type parameters to those found in impl etc - RelateOutputImplTypes(Span), - - // Computing common supertype in the arms of a match expression - MatchExpressionArm(Span, Span, hir::MatchSource), - - // Computing common supertype in an if expression - IfExpression(Span), - - // Computing common supertype of an if expression with no else counter-part - IfExpressionWithNoElse(Span), - - // Computing common supertype in a range expression - RangeExpression(Span), - - // `where a == b` - EquatePredicate(Span), -} - -impl TypeOrigin { - fn as_str(&self) -> &'static str { - match self { - &TypeOrigin::Misc(_) | - &TypeOrigin::RelateSelfType(_) | - &TypeOrigin::RelateOutputImplTypes(_) | - &TypeOrigin::ExprAssignable(_) => "mismatched types", - &TypeOrigin::RelateTraitRefs(_) => "mismatched traits", - &TypeOrigin::MethodCompatCheck(_) => "method not compatible with trait", - &TypeOrigin::MatchExpressionArm(_, _, source) => match source { - hir::MatchSource::IfLetDesugar{..} => "`if let` arms have incompatible types", - _ => "match arms have incompatible types", - }, - &TypeOrigin::IfExpression(_) => "if and else have incompatible types", - &TypeOrigin::IfExpressionWithNoElse(_) => "if may be missing an else clause", - &TypeOrigin::RangeExpression(_) => "start and end of range have incompatible types", - &TypeOrigin::EquatePredicate(_) => "equality predicate not satisfied", - } - } -} - -impl fmt::Display for TypeOrigin { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(),fmt::Error> { - fmt::Display::fmt(self.as_str(), f) - } -} - -/// See `error_reporting.rs` for more details -#[derive(Clone, Debug)] -pub enum ValuePairs<'tcx> { - Types(ExpectedFound>), - TraitRefs(ExpectedFound>), - PolyTraitRefs(ExpectedFound>), -} - -/// The trace designates the path through inference that we took to -/// encounter an error or subtyping constraint. -/// -/// See `error_reporting.rs` for more details. -#[derive(Clone)] -pub struct TypeTrace<'tcx> { - origin: TypeOrigin, - values: ValuePairs<'tcx>, -} - -/// The origin of a `r1 <= r2` constraint. -/// -/// See `error_reporting.rs` for more details -#[derive(Clone, Debug)] -pub enum SubregionOrigin<'tcx> { - // Arose from a subtyping relation - Subtype(TypeTrace<'tcx>), - - // Stack-allocated closures cannot outlive innermost loop - // or function so as to ensure we only require finite stack - InfStackClosure(Span), - - // Invocation of closure must be within its lifetime - InvokeClosure(Span), - - // Dereference of reference must be within its lifetime - DerefPointer(Span), - - // Closure bound must not outlive captured free variables - FreeVariable(Span, ast::NodeId), - - // Index into slice must be within its lifetime - IndexSlice(Span), - - // When casting `&'a T` to an `&'b Trait` object, - // relating `'a` to `'b` - RelateObjectBound(Span), - - // Some type parameter was instantiated with the given type, - // and that type must outlive some region. - RelateParamBound(Span, Ty<'tcx>), - - // The given region parameter was instantiated with a region - // that must outlive some other region. - RelateRegionParamBound(Span), - - // A bound placed on type parameters that states that must outlive - // the moment of their instantiation. - RelateDefaultParamBound(Span, Ty<'tcx>), - - // Creating a pointer `b` to contents of another reference - Reborrow(Span), - - // Creating a pointer `b` to contents of an upvar - ReborrowUpvar(Span, ty::UpvarId), - - // Data with type `Ty<'tcx>` was borrowed - DataBorrowed(Ty<'tcx>, Span), - - // (&'a &'b T) where a >= b - ReferenceOutlivesReferent(Ty<'tcx>, Span), - - // Type or region parameters must be in scope. - ParameterInScope(ParameterOrigin, Span), - - // The type T of an expression E must outlive the lifetime for E. - ExprTypeIsNotInScope(Ty<'tcx>, Span), - - // A `ref b` whose region does not enclose the decl site - BindingTypeIsNotValidAtDecl(Span), - - // Regions appearing in a method receiver must outlive method call - CallRcvr(Span), - - // Regions appearing in a function argument must outlive func call - CallArg(Span), - - // Region in return type of invoked fn must enclose call - CallReturn(Span), - - // Operands must be in scope - Operand(Span), - - // Region resulting from a `&` expr must enclose the `&` expr - AddrOf(Span), - - // An auto-borrow that does not enclose the expr where it occurs - AutoBorrow(Span), - - // Region constraint arriving from destructor safety - SafeDestructor(Span), -} - -/// Places that type/region parameters can appear. -#[derive(Clone, Copy, Debug)] -pub enum ParameterOrigin { - Path, // foo::bar - MethodCall, // foo.bar() <-- parameters on impl providing bar() - OverloadedOperator, // a + b when overloaded - OverloadedDeref, // *a when overloaded -} - -/// Times when we replace late-bound regions with variables: -#[derive(Clone, Copy, Debug)] -pub enum LateBoundRegionConversionTime { - /// when a fn is called - FnCall, - - /// when two higher-ranked types are compared - HigherRankedType, - - /// when projecting an associated type - AssocTypeProjection(ast::Name), -} - -/// Reasons to create a region inference variable -/// -/// See `error_reporting.rs` for more details -#[derive(Clone, Debug)] -pub enum RegionVariableOrigin { - // Region variables created for ill-categorized reasons, - // mostly indicates places in need of refactoring - MiscVariable(Span), - - // Regions created by a `&P` or `[...]` pattern - PatternRegion(Span), - - // Regions created by `&` operator - AddrOfRegion(Span), - - // Regions created as part of an autoref of a method receiver - Autoref(Span), - - // Regions created as part of an automatic coercion - Coercion(Span), - - // Region variables created as the values for early-bound regions - EarlyBoundRegion(Span, ast::Name), - - // Region variables created for bound regions - // in a function or method that is called - LateBoundRegion(Span, ty::BoundRegion, LateBoundRegionConversionTime), - - UpvarRegion(ty::UpvarId, Span), - - BoundRegionInCoherence(ast::Name), -} - -#[derive(Copy, Clone, Debug)] -pub enum FixupError { - UnresolvedIntTy(IntVid), - UnresolvedFloatTy(FloatVid), - UnresolvedTy(TyVid) -} - -pub fn fixup_err_to_string(f: FixupError) -> String { - use self::FixupError::*; - - match f { - UnresolvedIntTy(_) => { - "cannot determine the type of this integer; add a suffix to \ - specify the type explicitly".to_string() - } - UnresolvedFloatTy(_) => { - "cannot determine the type of this number; add a suffix to specify \ - the type explicitly".to_string() - } - UnresolvedTy(_) => "unconstrained type".to_string(), - } -} - -pub fn new_infer_ctxt<'a, 'tcx>(tcx: &'a ty::ctxt<'tcx>, - tables: &'a RefCell>, - param_env: Option>) - -> InferCtxt<'a, 'tcx> { - InferCtxt { - tcx: tcx, - tables: tables, - type_variables: RefCell::new(type_variable::TypeVariableTable::new()), - int_unification_table: RefCell::new(UnificationTable::new()), - float_unification_table: RefCell::new(UnificationTable::new()), - region_vars: RegionVarBindings::new(tcx), - parameter_environment: param_env.unwrap_or(tcx.empty_parameter_environment()), - fulfillment_cx: RefCell::new(traits::FulfillmentContext::new()), - reported_trait_errors: RefCell::new(FnvHashSet()), - normalize: false, - err_count_on_creation: tcx.sess.err_count() - } -} - -pub fn normalizing_infer_ctxt<'a, 'tcx>(tcx: &'a ty::ctxt<'tcx>, - tables: &'a RefCell>) - -> InferCtxt<'a, 'tcx> { - let mut infcx = new_infer_ctxt(tcx, tables, None); - infcx.normalize = true; - infcx -} - -/// Computes the least upper-bound of `a` and `b`. If this is not possible, reports an error and -/// returns ty::err. -pub fn common_supertype<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>, - origin: TypeOrigin, - a_is_expected: bool, - a: Ty<'tcx>, - b: Ty<'tcx>) - -> Ty<'tcx> -{ - debug!("common_supertype({:?}, {:?})", - a, b); - - let trace = TypeTrace { - origin: origin, - values: Types(expected_found(a_is_expected, a, b)) - }; - - let result = cx.commit_if_ok(|_| cx.lub(a_is_expected, trace.clone()).relate(&a, &b)); - match result { - Ok(t) => t, - Err(ref err) => { - cx.report_and_explain_type_error(trace, err); - cx.tcx.types.err - } - } -} - -pub fn mk_subty<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>, - a_is_expected: bool, - origin: TypeOrigin, - a: Ty<'tcx>, - b: Ty<'tcx>) - -> UnitResult<'tcx> -{ - debug!("mk_subty({:?} <: {:?})", a, b); - cx.sub_types(a_is_expected, origin, a, b) -} - -pub fn can_mk_subty<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>, - a: Ty<'tcx>, - b: Ty<'tcx>) - -> UnitResult<'tcx> { - debug!("can_mk_subty({:?} <: {:?})", a, b); - cx.probe(|_| { - let trace = TypeTrace { - origin: TypeOrigin::Misc(codemap::DUMMY_SP), - values: Types(expected_found(true, a, b)) - }; - cx.sub(true, trace).relate(&a, &b).map(|_| ()) - }) -} - -pub fn can_mk_eqty<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) - -> UnitResult<'tcx> -{ - cx.can_equate(&a, &b) -} - -pub fn mk_subr<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>, - origin: SubregionOrigin<'tcx>, - a: ty::Region, - b: ty::Region) { - debug!("mk_subr({:?} <: {:?})", a, b); - let snapshot = cx.region_vars.start_snapshot(); - cx.region_vars.make_subregion(origin, a, b); - cx.region_vars.commit(snapshot); -} - -pub fn mk_eqty<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>, - a_is_expected: bool, - origin: TypeOrigin, - a: Ty<'tcx>, - b: Ty<'tcx>) - -> UnitResult<'tcx> -{ - debug!("mk_eqty({:?} <: {:?})", a, b); - cx.commit_if_ok(|_| cx.eq_types(a_is_expected, origin, a, b)) -} - -pub fn mk_eq_trait_refs<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>, - a_is_expected: bool, - origin: TypeOrigin, - a: ty::TraitRef<'tcx>, - b: ty::TraitRef<'tcx>) - -> UnitResult<'tcx> -{ - debug!("mk_eq_trait_refs({:?} <: {:?})", - a, b); - cx.commit_if_ok(|_| cx.eq_trait_refs(a_is_expected, origin, a.clone(), b.clone())) -} - -pub fn mk_sub_poly_trait_refs<'a, 'tcx>(cx: &InferCtxt<'a, 'tcx>, - a_is_expected: bool, - origin: TypeOrigin, - a: ty::PolyTraitRef<'tcx>, - b: ty::PolyTraitRef<'tcx>) - -> UnitResult<'tcx> -{ - debug!("mk_sub_poly_trait_refs({:?} <: {:?})", - a, b); - cx.commit_if_ok(|_| cx.sub_poly_trait_refs(a_is_expected, origin, a.clone(), b.clone())) -} - -fn expected_found(a_is_expected: bool, - a: T, - b: T) - -> ExpectedFound -{ - if a_is_expected { - ExpectedFound {expected: a, found: b} - } else { - ExpectedFound {expected: b, found: a} - } -} - -#[must_use = "once you start a snapshot, you should always consume it"] -pub struct CombinedSnapshot { - type_snapshot: type_variable::Snapshot, - int_snapshot: unify::Snapshot, - float_snapshot: unify::Snapshot, - region_vars_snapshot: RegionSnapshot, -} - -pub fn normalize_associated_type<'tcx,T>(tcx: &ty::ctxt<'tcx>, value: &T) -> T - where T : TypeFoldable<'tcx> -{ - debug!("normalize_associated_type(t={:?})", value); - - let value = tcx.erase_regions(value); - - if !value.has_projection_types() { - return value; - } - - let infcx = new_infer_ctxt(tcx, &tcx.tables, None); - let mut selcx = traits::SelectionContext::new(&infcx); - let cause = traits::ObligationCause::dummy(); - let traits::Normalized { value: result, obligations } = - traits::normalize(&mut selcx, cause, &value); - - debug!("normalize_associated_type: result={:?} obligations={:?}", - result, - obligations); - - let mut fulfill_cx = infcx.fulfillment_cx.borrow_mut(); - - for obligation in obligations { - fulfill_cx.register_predicate_obligation(&infcx, obligation); - } - - drain_fulfillment_cx_or_panic(DUMMY_SP, &infcx, &mut fulfill_cx, &result) -} - -pub fn drain_fulfillment_cx_or_panic<'a,'tcx,T>(span: Span, - infcx: &InferCtxt<'a,'tcx>, - fulfill_cx: &mut traits::FulfillmentContext<'tcx>, - result: &T) - -> T - where T : TypeFoldable<'tcx> -{ - match drain_fulfillment_cx(infcx, fulfill_cx, result) { - Ok(v) => v, - Err(errors) => { - infcx.tcx.sess.span_bug( - span, - &format!("Encountered errors `{:?}` fulfilling during trans", - errors)); - } - } -} - -/// Finishes processes any obligations that remain in the fulfillment -/// context, and then "freshens" and returns `result`. This is -/// primarily used during normalization and other cases where -/// processing the obligations in `fulfill_cx` may cause type -/// inference variables that appear in `result` to be unified, and -/// hence we need to process those obligations to get the complete -/// picture of the type. -pub fn drain_fulfillment_cx<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>, - fulfill_cx: &mut traits::FulfillmentContext<'tcx>, - result: &T) - -> Result>> - where T : TypeFoldable<'tcx> -{ - debug!("drain_fulfillment_cx(result={:?})", - result); - - // In principle, we only need to do this so long as `result` - // contains unbound type parameters. It could be a slight - // optimization to stop iterating early. - match fulfill_cx.select_all_or_error(infcx) { - Ok(()) => { } - Err(errors) => { - return Err(errors); - } - } - - let result = infcx.resolve_type_vars_if_possible(result); - Ok(infcx.tcx.erase_regions(&result)) -} - -impl<'a, 'tcx> InferCtxt<'a, 'tcx> { - pub fn freshen>(&self, t: T) -> T { - t.fold_with(&mut self.freshener()) - } - - pub fn type_var_diverges(&'a self, ty: Ty) -> bool { - match ty.sty { - ty::TyInfer(ty::TyVar(vid)) => self.type_variables.borrow().var_diverges(vid), - _ => false - } - } - - pub fn freshener<'b>(&'b self) -> TypeFreshener<'b, 'tcx> { - freshen::TypeFreshener::new(self) - } - - pub fn type_is_unconstrained_numeric(&'a self, ty: Ty) -> UnconstrainedNumeric { - use middle::ty::error::UnconstrainedNumeric::Neither; - use middle::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat}; - match ty.sty { - ty::TyInfer(ty::IntVar(vid)) => { - if self.int_unification_table.borrow_mut().has_value(vid) { - Neither - } else { - UnconstrainedInt - } - }, - ty::TyInfer(ty::FloatVar(vid)) => { - if self.float_unification_table.borrow_mut().has_value(vid) { - Neither - } else { - UnconstrainedFloat - } - }, - _ => Neither, - } - } - - /// Returns a type variable's default fallback if any exists. A default - /// must be attached to the variable when created, if it is created - /// without a default, this will return None. - /// - /// This code does not apply to integral or floating point variables, - /// only to use declared defaults. - /// - /// See `new_ty_var_with_default` to create a type variable with a default. - /// See `type_variable::Default` for details about what a default entails. - pub fn default(&self, ty: Ty<'tcx>) -> Option> { - match ty.sty { - ty::TyInfer(ty::TyVar(vid)) => self.type_variables.borrow().default(vid), - _ => None - } - } - - pub fn unsolved_variables(&self) -> Vec> { - let mut variables = Vec::new(); - - let unbound_ty_vars = self.type_variables - .borrow() - .unsolved_variables() - .into_iter() - .map(|t| self.tcx.mk_var(t)); - - let unbound_int_vars = self.int_unification_table - .borrow_mut() - .unsolved_variables() - .into_iter() - .map(|v| self.tcx.mk_int_var(v)); - - let unbound_float_vars = self.float_unification_table - .borrow_mut() - .unsolved_variables() - .into_iter() - .map(|v| self.tcx.mk_float_var(v)); - - variables.extend(unbound_ty_vars); - variables.extend(unbound_int_vars); - variables.extend(unbound_float_vars); - - return variables; - } - - fn combine_fields(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>) - -> CombineFields<'a, 'tcx> { - CombineFields {infcx: self, - a_is_expected: a_is_expected, - trace: trace, - cause: None} - } - - // public so that it can be used from the rustc_driver unit tests - pub fn equate(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>) - -> equate::Equate<'a, 'tcx> - { - self.combine_fields(a_is_expected, trace).equate() - } - - // public so that it can be used from the rustc_driver unit tests - pub fn sub(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>) - -> sub::Sub<'a, 'tcx> - { - self.combine_fields(a_is_expected, trace).sub() - } - - // public so that it can be used from the rustc_driver unit tests - pub fn lub(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>) - -> lub::Lub<'a, 'tcx> - { - self.combine_fields(a_is_expected, trace).lub() - } - - // public so that it can be used from the rustc_driver unit tests - pub fn glb(&'a self, a_is_expected: bool, trace: TypeTrace<'tcx>) - -> glb::Glb<'a, 'tcx> - { - self.combine_fields(a_is_expected, trace).glb() - } - - fn start_snapshot(&self) -> CombinedSnapshot { - CombinedSnapshot { - type_snapshot: self.type_variables.borrow_mut().snapshot(), - int_snapshot: self.int_unification_table.borrow_mut().snapshot(), - float_snapshot: self.float_unification_table.borrow_mut().snapshot(), - region_vars_snapshot: self.region_vars.start_snapshot(), - } - } - - fn rollback_to(&self, cause: &str, snapshot: CombinedSnapshot) { - debug!("rollback_to(cause={})", cause); - let CombinedSnapshot { type_snapshot, - int_snapshot, - float_snapshot, - region_vars_snapshot } = snapshot; - - self.type_variables - .borrow_mut() - .rollback_to(type_snapshot); - self.int_unification_table - .borrow_mut() - .rollback_to(int_snapshot); - self.float_unification_table - .borrow_mut() - .rollback_to(float_snapshot); - self.region_vars - .rollback_to(region_vars_snapshot); - } - - fn commit_from(&self, snapshot: CombinedSnapshot) { - debug!("commit_from!"); - let CombinedSnapshot { type_snapshot, - int_snapshot, - float_snapshot, - region_vars_snapshot } = snapshot; - - self.type_variables - .borrow_mut() - .commit(type_snapshot); - self.int_unification_table - .borrow_mut() - .commit(int_snapshot); - self.float_unification_table - .borrow_mut() - .commit(float_snapshot); - self.region_vars - .commit(region_vars_snapshot); - } - - /// Execute `f` and commit the bindings - pub fn commit_unconditionally(&self, f: F) -> R where - F: FnOnce() -> R, - { - debug!("commit()"); - let snapshot = self.start_snapshot(); - let r = f(); - self.commit_from(snapshot); - r - } - - /// Execute `f` and commit the bindings if closure `f` returns `Ok(_)` - pub fn commit_if_ok(&self, f: F) -> Result where - F: FnOnce(&CombinedSnapshot) -> Result - { - debug!("commit_if_ok()"); - let snapshot = self.start_snapshot(); - let r = f(&snapshot); - debug!("commit_if_ok() -- r.is_ok() = {}", r.is_ok()); - match r { - Ok(_) => { self.commit_from(snapshot); } - Err(_) => { self.rollback_to("commit_if_ok -- error", snapshot); } - } - r - } - - /// Execute `f` and commit only the region bindings if successful. - /// The function f must be very careful not to leak any non-region - /// variables that get created. - pub fn commit_regions_if_ok(&self, f: F) -> Result where - F: FnOnce() -> Result - { - debug!("commit_regions_if_ok()"); - let CombinedSnapshot { type_snapshot, - int_snapshot, - float_snapshot, - region_vars_snapshot } = self.start_snapshot(); - - let r = self.commit_if_ok(|_| f()); - - debug!("commit_regions_if_ok: rolling back everything but regions"); - - // Roll back any non-region bindings - they should be resolved - // inside `f`, with, e.g. `resolve_type_vars_if_possible`. - self.type_variables - .borrow_mut() - .rollback_to(type_snapshot); - self.int_unification_table - .borrow_mut() - .rollback_to(int_snapshot); - self.float_unification_table - .borrow_mut() - .rollback_to(float_snapshot); - - // Commit region vars that may escape through resolved types. - self.region_vars - .commit(region_vars_snapshot); - - r - } - - /// Execute `f` then unroll any bindings it creates - pub fn probe(&self, f: F) -> R where - F: FnOnce(&CombinedSnapshot) -> R, - { - debug!("probe()"); - let snapshot = self.start_snapshot(); - let r = f(&snapshot); - self.rollback_to("probe", snapshot); - r - } - - pub fn add_given(&self, - sub: ty::FreeRegion, - sup: ty::RegionVid) - { - self.region_vars.add_given(sub, sup); - } - - pub fn sub_types(&self, - a_is_expected: bool, - origin: TypeOrigin, - a: Ty<'tcx>, - b: Ty<'tcx>) - -> UnitResult<'tcx> - { - debug!("sub_types({:?} <: {:?})", a, b); - self.commit_if_ok(|_| { - let trace = TypeTrace::types(origin, a_is_expected, a, b); - self.sub(a_is_expected, trace).relate(&a, &b).map(|_| ()) - }) - } - - pub fn eq_types(&self, - a_is_expected: bool, - origin: TypeOrigin, - a: Ty<'tcx>, - b: Ty<'tcx>) - -> UnitResult<'tcx> - { - self.commit_if_ok(|_| { - let trace = TypeTrace::types(origin, a_is_expected, a, b); - self.equate(a_is_expected, trace).relate(&a, &b).map(|_| ()) - }) - } - - pub fn eq_trait_refs(&self, - a_is_expected: bool, - origin: TypeOrigin, - a: ty::TraitRef<'tcx>, - b: ty::TraitRef<'tcx>) - -> UnitResult<'tcx> - { - debug!("eq_trait_refs({:?} <: {:?})", - a, - b); - self.commit_if_ok(|_| { - let trace = TypeTrace { - origin: origin, - values: TraitRefs(expected_found(a_is_expected, a.clone(), b.clone())) - }; - self.equate(a_is_expected, trace).relate(&a, &b).map(|_| ()) - }) - } - - pub fn sub_poly_trait_refs(&self, - a_is_expected: bool, - origin: TypeOrigin, - a: ty::PolyTraitRef<'tcx>, - b: ty::PolyTraitRef<'tcx>) - -> UnitResult<'tcx> - { - debug!("sub_poly_trait_refs({:?} <: {:?})", - a, - b); - self.commit_if_ok(|_| { - let trace = TypeTrace { - origin: origin, - values: PolyTraitRefs(expected_found(a_is_expected, a.clone(), b.clone())) - }; - self.sub(a_is_expected, trace).relate(&a, &b).map(|_| ()) - }) - } - - pub fn skolemize_late_bound_regions(&self, - value: &ty::Binder, - snapshot: &CombinedSnapshot) - -> (T, SkolemizationMap) - where T : TypeFoldable<'tcx> - { - /*! See `higher_ranked::skolemize_late_bound_regions` */ - - higher_ranked::skolemize_late_bound_regions(self, value, snapshot) - } - - pub fn leak_check(&self, - skol_map: &SkolemizationMap, - snapshot: &CombinedSnapshot) - -> UnitResult<'tcx> - { - /*! See `higher_ranked::leak_check` */ - - match higher_ranked::leak_check(self, skol_map, snapshot) { - Ok(()) => Ok(()), - Err((br, r)) => Err(TypeError::RegionsInsufficientlyPolymorphic(br, r)) - } - } - - pub fn plug_leaks(&self, - skol_map: SkolemizationMap, - snapshot: &CombinedSnapshot, - value: &T) - -> T - where T : TypeFoldable<'tcx> - { - /*! See `higher_ranked::plug_leaks` */ - - higher_ranked::plug_leaks(self, skol_map, snapshot, value) - } - - pub fn equality_predicate(&self, - span: Span, - predicate: &ty::PolyEquatePredicate<'tcx>) - -> UnitResult<'tcx> { - self.commit_if_ok(|snapshot| { - let (ty::EquatePredicate(a, b), skol_map) = - self.skolemize_late_bound_regions(predicate, snapshot); - let origin = TypeOrigin::EquatePredicate(span); - let () = try!(mk_eqty(self, false, origin, a, b)); - self.leak_check(&skol_map, snapshot) - }) - } - - pub fn region_outlives_predicate(&self, - span: Span, - predicate: &ty::PolyRegionOutlivesPredicate) - -> UnitResult<'tcx> { - self.commit_if_ok(|snapshot| { - let (ty::OutlivesPredicate(r_a, r_b), skol_map) = - self.skolemize_late_bound_regions(predicate, snapshot); - let origin = RelateRegionParamBound(span); - let () = mk_subr(self, origin, r_b, r_a); // `b : a` ==> `a <= b` - self.leak_check(&skol_map, snapshot) - }) - } - - pub fn next_ty_var_id(&self, diverging: bool) -> TyVid { - self.type_variables - .borrow_mut() - .new_var(diverging, None) - } - - pub fn next_ty_var(&self) -> Ty<'tcx> { - self.tcx.mk_var(self.next_ty_var_id(false)) - } - - pub fn next_ty_var_with_default(&self, - default: Option>) -> Ty<'tcx> { - let ty_var_id = self.type_variables - .borrow_mut() - .new_var(false, default); - - self.tcx.mk_var(ty_var_id) - } - - pub fn next_diverging_ty_var(&self) -> Ty<'tcx> { - self.tcx.mk_var(self.next_ty_var_id(true)) - } - - pub fn next_ty_vars(&self, n: usize) -> Vec> { - (0..n).map(|_i| self.next_ty_var()).collect() - } - - pub fn next_int_var_id(&self) -> IntVid { - self.int_unification_table - .borrow_mut() - .new_key(None) - } - - pub fn next_float_var_id(&self) -> FloatVid { - self.float_unification_table - .borrow_mut() - .new_key(None) - } - - pub fn next_region_var(&self, origin: RegionVariableOrigin) -> ty::Region { - ty::ReVar(self.region_vars.new_region_var(origin)) - } - - pub fn region_vars_for_defs(&self, - span: Span, - defs: &[ty::RegionParameterDef]) - -> Vec { - defs.iter() - .map(|d| self.next_region_var(EarlyBoundRegion(span, d.name))) - .collect() - } - - // We have to take `&mut Substs` in order to provide the correct substitutions for defaults - // along the way, for this reason we don't return them. - pub fn type_vars_for_defs(&self, - span: Span, - space: subst::ParamSpace, - substs: &mut Substs<'tcx>, - defs: &[ty::TypeParameterDef<'tcx>]) { - - let mut vars = Vec::with_capacity(defs.len()); - - for def in defs.iter() { - let default = def.default.map(|default| { - type_variable::Default { - ty: default.subst_spanned(self.tcx, substs, Some(span)), - origin_span: span, - def_id: def.default_def_id - } - }); - - let ty_var = self.next_ty_var_with_default(default); - substs.types.push(space, ty_var); - vars.push(ty_var) - } - } - - /// Given a set of generics defined on a type or impl, returns a substitution mapping each - /// type/region parameter to a fresh inference variable. - pub fn fresh_substs_for_generics(&self, - span: Span, - generics: &ty::Generics<'tcx>) - -> subst::Substs<'tcx> - { - let type_params = subst::VecPerParamSpace::empty(); - - let region_params = - generics.regions.map( - |d| self.next_region_var(EarlyBoundRegion(span, d.name))); - - let mut substs = subst::Substs::new(type_params, region_params); - - for space in subst::ParamSpace::all().iter() { - self.type_vars_for_defs( - span, - *space, - &mut substs, - generics.types.get_slice(*space)); - } - - return substs; - } - - /// Given a set of generics defined on a trait, returns a substitution mapping each output - /// type/region parameter to a fresh inference variable, and mapping the self type to - /// `self_ty`. - pub fn fresh_substs_for_trait(&self, - span: Span, - generics: &ty::Generics<'tcx>, - self_ty: Ty<'tcx>) - -> subst::Substs<'tcx> - { - - assert!(generics.types.len(subst::SelfSpace) == 1); - assert!(generics.types.len(subst::FnSpace) == 0); - assert!(generics.regions.len(subst::SelfSpace) == 0); - assert!(generics.regions.len(subst::FnSpace) == 0); - - let type_params = Vec::new(); - - let region_param_defs = generics.regions.get_slice(subst::TypeSpace); - let regions = self.region_vars_for_defs(span, region_param_defs); - - let mut substs = subst::Substs::new_trait(type_params, regions, self_ty); - - let type_parameter_defs = generics.types.get_slice(subst::TypeSpace); - self.type_vars_for_defs(span, subst::TypeSpace, &mut substs, type_parameter_defs); - - return substs; - } - - pub fn fresh_bound_region(&self, debruijn: ty::DebruijnIndex) -> ty::Region { - self.region_vars.new_bound(debruijn) - } - - /// Apply `adjustment` to the type of `expr` - pub fn adjust_expr_ty(&self, - expr: &hir::Expr, - adjustment: Option<&adjustment::AutoAdjustment<'tcx>>) - -> Ty<'tcx> - { - let raw_ty = self.expr_ty(expr); - let raw_ty = self.shallow_resolve(raw_ty); - let resolve_ty = |ty: Ty<'tcx>| self.resolve_type_vars_if_possible(&ty); - raw_ty.adjust(self.tcx, - expr.span, - expr.id, - adjustment, - |method_call| self.tables - .borrow() - .method_map - .get(&method_call) - .map(|method| resolve_ty(method.ty))) - } - - pub fn node_type(&self, id: ast::NodeId) -> Ty<'tcx> { - match self.tables.borrow().node_types.get(&id) { - Some(&t) => t, - // FIXME - None if self.tcx.sess.err_count() - self.err_count_on_creation != 0 => - self.tcx.types.err, - None => { - self.tcx.sess.bug( - &format!("no type for node {}: {} in fcx", - id, self.tcx.map.node_to_string(id))); - } - } - } - - pub fn expr_ty(&self, ex: &hir::Expr) -> Ty<'tcx> { - match self.tables.borrow().node_types.get(&ex.id) { - Some(&t) => t, - None => { - self.tcx.sess.bug("no type for expr in fcx"); - } - } - } - - pub fn resolve_regions_and_report_errors(&self, - free_regions: &FreeRegionMap, - subject_node_id: ast::NodeId) { - let errors = self.region_vars.resolve_regions(free_regions, subject_node_id); - self.report_region_errors(&errors); // see error_reporting.rs - } - - pub fn ty_to_string(&self, t: Ty<'tcx>) -> String { - self.resolve_type_vars_if_possible(&t).to_string() - } - - pub fn tys_to_string(&self, ts: &[Ty<'tcx>]) -> String { - let tstrs: Vec = ts.iter().map(|t| self.ty_to_string(*t)).collect(); - format!("({})", tstrs.join(", ")) - } - - pub fn trait_ref_to_string(&self, t: &ty::TraitRef<'tcx>) -> String { - self.resolve_type_vars_if_possible(t).to_string() - } - - pub fn shallow_resolve(&self, typ: Ty<'tcx>) -> Ty<'tcx> { - match typ.sty { - ty::TyInfer(ty::TyVar(v)) => { - // Not entirely obvious: if `typ` is a type variable, - // it can be resolved to an int/float variable, which - // can then be recursively resolved, hence the - // recursion. Note though that we prevent type - // variables from unifying to other type variables - // directly (though they may be embedded - // structurally), and we prevent cycles in any case, - // so this recursion should always be of very limited - // depth. - self.type_variables.borrow() - .probe(v) - .map(|t| self.shallow_resolve(t)) - .unwrap_or(typ) - } - - ty::TyInfer(ty::IntVar(v)) => { - self.int_unification_table - .borrow_mut() - .probe(v) - .map(|v| v.to_type(self.tcx)) - .unwrap_or(typ) - } - - ty::TyInfer(ty::FloatVar(v)) => { - self.float_unification_table - .borrow_mut() - .probe(v) - .map(|v| v.to_type(self.tcx)) - .unwrap_or(typ) - } - - _ => { - typ - } - } - } - - pub fn resolve_type_vars_if_possible(&self, value: &T) -> T - where T: TypeFoldable<'tcx> - { - /*! - * Where possible, replaces type/int/float variables in - * `value` with their final value. Note that region variables - * are unaffected. If a type variable has not been unified, it - * is left as is. This is an idempotent operation that does - * not affect inference state in any way and so you can do it - * at will. - */ - - if !value.needs_infer() { - return value.clone(); // avoid duplicated subst-folding - } - let mut r = resolve::OpportunisticTypeResolver::new(self); - value.fold_with(&mut r) - } - - pub fn resolve_type_and_region_vars_if_possible(&self, value: &T) -> T - where T: TypeFoldable<'tcx> - { - let mut r = resolve::OpportunisticTypeAndRegionResolver::new(self); - value.fold_with(&mut r) - } - - /// Resolves all type variables in `t` and then, if any were left - /// unresolved, substitutes an error type. This is used after the - /// main checking when doing a second pass before writeback. The - /// justification is that writeback will produce an error for - /// these unconstrained type variables. - fn resolve_type_vars_or_error(&self, t: &Ty<'tcx>) -> mc::McResult> { - let ty = self.resolve_type_vars_if_possible(t); - if ty.references_error() || ty.is_ty_var() { - debug!("resolve_type_vars_or_error: error from {:?}", ty); - Err(()) - } else { - Ok(ty) - } - } - - pub fn fully_resolve>(&self, value: &T) -> FixupResult { - /*! - * Attempts to resolve all type/region variables in - * `value`. Region inference must have been run already (e.g., - * by calling `resolve_regions_and_report_errors`). If some - * variable was never unified, an `Err` results. - * - * This method is idempotent, but it not typically not invoked - * except during the writeback phase. - */ - - resolve::fully_resolve(self, value) - } - - // [Note-Type-error-reporting] - // An invariant is that anytime the expected or actual type is TyError (the special - // error type, meaning that an error occurred when typechecking this expression), - // this is a derived error. The error cascaded from another error (that was already - // reported), so it's not useful to display it to the user. - // The following four methods -- type_error_message_str, type_error_message_str_with_expected, - // type_error_message, and report_mismatched_types -- implement this logic. - // They check if either the actual or expected type is TyError, and don't print the error - // in this case. The typechecker should only ever report type errors involving mismatched - // types using one of these four methods, and should not call span_err directly for such - // errors. - pub fn type_error_message_str(&self, - sp: Span, - mk_msg: M, - actual_ty: String, - err: Option<&TypeError<'tcx>>) - where M: FnOnce(Option, String) -> String, - { - self.type_error_message_str_with_expected(sp, mk_msg, None, actual_ty, err) - } - - pub fn type_error_struct_str(&self, - sp: Span, - mk_msg: M, - actual_ty: String, - err: Option<&TypeError<'tcx>>) - -> DiagnosticBuilder<'tcx> - where M: FnOnce(Option, String) -> String, - { - self.type_error_struct_str_with_expected(sp, mk_msg, None, actual_ty, err) - } - - pub fn type_error_message_str_with_expected(&self, - sp: Span, - mk_msg: M, - expected_ty: Option>, - actual_ty: String, - err: Option<&TypeError<'tcx>>) - where M: FnOnce(Option, String) -> String, - { - self.type_error_struct_str_with_expected(sp, mk_msg, expected_ty, actual_ty, err) - .emit(); - } - - pub fn type_error_struct_str_with_expected(&self, - sp: Span, - mk_msg: M, - expected_ty: Option>, - actual_ty: String, - err: Option<&TypeError<'tcx>>) - -> DiagnosticBuilder<'tcx> - where M: FnOnce(Option, String) -> String, - { - debug!("hi! expected_ty = {:?}, actual_ty = {}", expected_ty, actual_ty); - - let resolved_expected = expected_ty.map(|e_ty| self.resolve_type_vars_if_possible(&e_ty)); - - if !resolved_expected.references_error() { - let error_str = err.map_or("".to_string(), |t_err| { - format!(" ({})", t_err) - }); - - let mut db = self.tcx.sess.struct_span_err(sp, &format!("{}{}", - mk_msg(resolved_expected.map(|t| self.ty_to_string(t)), actual_ty), - error_str)); - - if let Some(err) = err { - self.tcx.note_and_explain_type_err(&mut db, err, sp); - } - db - } else { - self.tcx.sess.diagnostic().struct_dummy() - } - } - - pub fn type_error_message(&self, - sp: Span, - mk_msg: M, - actual_ty: Ty<'tcx>, - err: Option<&TypeError<'tcx>>) - where M: FnOnce(String) -> String, - { - self.type_error_struct(sp, mk_msg, actual_ty, err).emit(); - } - - pub fn type_error_struct(&self, - sp: Span, - mk_msg: M, - actual_ty: Ty<'tcx>, - err: Option<&TypeError<'tcx>>) - -> DiagnosticBuilder<'tcx> - where M: FnOnce(String) -> String, - { - let actual_ty = self.resolve_type_vars_if_possible(&actual_ty); - - // Don't report an error if actual type is TyError. - if actual_ty.references_error() { - return self.tcx.sess.diagnostic().struct_dummy(); - } - - self.type_error_struct_str(sp, - move |_e, a| { mk_msg(a) }, - self.ty_to_string(actual_ty), err) - } - - pub fn report_mismatched_types(&self, - span: Span, - expected: Ty<'tcx>, - actual: Ty<'tcx>, - err: &TypeError<'tcx>) { - let trace = TypeTrace { - origin: TypeOrigin::Misc(span), - values: Types(ExpectedFound { - expected: expected, - found: actual - }) - }; - self.report_and_explain_type_error(trace, err); - } - - pub fn report_conflicting_default_types(&self, - span: Span, - expected: type_variable::Default<'tcx>, - actual: type_variable::Default<'tcx>) { - let trace = TypeTrace { - origin: TypeOrigin::Misc(span), - values: Types(ExpectedFound { - expected: expected.ty, - found: actual.ty - }) - }; - - self.report_and_explain_type_error(trace, - &TypeError::TyParamDefaultMismatch(ExpectedFound { - expected: expected, - found: actual - })); - } - - pub fn replace_late_bound_regions_with_fresh_var( - &self, - span: Span, - lbrct: LateBoundRegionConversionTime, - value: &ty::Binder) - -> (T, FnvHashMap) - where T : TypeFoldable<'tcx> - { - self.tcx.replace_late_bound_regions( - value, - |br| self.next_region_var(LateBoundRegion(span, br, lbrct))) - } - - /// See `verify_generic_bound` method in `region_inference` - pub fn verify_generic_bound(&self, - origin: SubregionOrigin<'tcx>, - kind: GenericKind<'tcx>, - a: ty::Region, - bound: VerifyBound) { - debug!("verify_generic_bound({:?}, {:?} <: {:?})", - kind, - a, - bound); - - self.region_vars.verify_generic_bound(origin, kind, a, bound); - } - - pub fn can_equate<'b,T>(&'b self, a: &T, b: &T) -> UnitResult<'tcx> - where T: Relate<'b,'tcx> + fmt::Debug - { - debug!("can_equate({:?}, {:?})", a, b); - self.probe(|_| { - // Gin up a dummy trace, since this won't be committed - // anyhow. We should make this typetrace stuff more - // generic so we don't have to do anything quite this - // terrible. - let e = self.tcx.types.err; - let trace = TypeTrace { - origin: TypeOrigin::Misc(codemap::DUMMY_SP), - values: Types(expected_found(true, e, e)) - }; - self.equate(true, trace).relate(a, b) - }).map(|_| ()) - } - - pub fn node_ty(&self, id: ast::NodeId) -> McResult> { - let ty = self.node_type(id); - self.resolve_type_vars_or_error(&ty) - } - - pub fn expr_ty_adjusted(&self, expr: &hir::Expr) -> McResult> { - let ty = self.adjust_expr_ty(expr, self.tables.borrow().adjustments.get(&expr.id)); - self.resolve_type_vars_or_error(&ty) - } - - pub fn tables_are_tcx_tables(&self) -> bool { - let tables: &RefCell = &self.tables; - let tcx_tables: &RefCell = &self.tcx.tables; - tables as *const _ == tcx_tables as *const _ - } - - pub fn type_moves_by_default(&self, ty: Ty<'tcx>, span: Span) -> bool { - let ty = self.resolve_type_vars_if_possible(&ty); - if ty.needs_infer() || - (ty.has_closure_types() && !self.tables_are_tcx_tables()) { - // this can get called from typeck (by euv), and moves_by_default - // rightly refuses to work with inference variables, but - // moves_by_default has a cache, which we want to use in other - // cases. - !traits::type_known_to_meet_builtin_bound(self, ty, ty::BoundCopy, span) - } else { - ty.moves_by_default(&self.parameter_environment, span) - } - } - - pub fn node_method_ty(&self, method_call: ty::MethodCall) - -> Option> { - self.tables - .borrow() - .method_map - .get(&method_call) - .map(|method| method.ty) - .map(|ty| self.resolve_type_vars_if_possible(&ty)) - } - - pub fn node_method_id(&self, method_call: ty::MethodCall) - -> Option { - self.tables - .borrow() - .method_map - .get(&method_call) - .map(|method| method.def_id) - } - - pub fn adjustments(&self) -> Ref>> { - fn project_adjustments<'a, 'tcx>(tables: &'a ty::Tables<'tcx>) - -> &'a NodeMap> { - &tables.adjustments - } - - Ref::map(self.tables.borrow(), project_adjustments) - } - - pub fn is_method_call(&self, id: ast::NodeId) -> bool { - self.tables.borrow().method_map.contains_key(&ty::MethodCall::expr(id)) - } - - pub fn temporary_scope(&self, rvalue_id: ast::NodeId) -> Option { - self.tcx.region_maps.temporary_scope(rvalue_id) - } - - pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option { - self.tables.borrow().upvar_capture_map.get(&upvar_id).cloned() - } - - pub fn param_env<'b>(&'b self) -> &'b ty::ParameterEnvironment<'b,'tcx> { - &self.parameter_environment - } - - pub fn closure_kind(&self, - def_id: DefId) - -> Option - { - if def_id.is_local() { - self.tables.borrow().closure_kinds.get(&def_id).cloned() - } else { - // During typeck, ALL closures are local. But afterwards, - // during trans, we see closure ids from other traits. - // That may require loading the closure data out of the - // cstore. - Some(ty::Tables::closure_kind(&self.tables, self.tcx, def_id)) - } - } - - pub fn closure_type(&self, - def_id: DefId, - substs: &ty::ClosureSubsts<'tcx>) - -> ty::ClosureTy<'tcx> - { - let closure_ty = - ty::Tables::closure_type(self.tables, - self.tcx, - def_id, - substs); - - if self.normalize { - normalize_associated_type(&self.tcx, &closure_ty) - } else { - closure_ty - } - } -} - -impl<'tcx> TypeTrace<'tcx> { - pub fn span(&self) -> Span { - self.origin.span() - } - - pub fn types(origin: TypeOrigin, - a_is_expected: bool, - a: Ty<'tcx>, - b: Ty<'tcx>) - -> TypeTrace<'tcx> { - TypeTrace { - origin: origin, - values: Types(expected_found(a_is_expected, a, b)) - } - } - - pub fn dummy(tcx: &ty::ctxt<'tcx>) -> TypeTrace<'tcx> { - TypeTrace { - origin: TypeOrigin::Misc(codemap::DUMMY_SP), - values: Types(ExpectedFound { - expected: tcx.types.err, - found: tcx.types.err, - }) - } - } -} - -impl<'tcx> fmt::Debug for TypeTrace<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "TypeTrace({:?})", self.origin) - } -} - -impl TypeOrigin { - pub fn span(&self) -> Span { - match *self { - TypeOrigin::MethodCompatCheck(span) => span, - TypeOrigin::ExprAssignable(span) => span, - TypeOrigin::Misc(span) => span, - TypeOrigin::RelateTraitRefs(span) => span, - TypeOrigin::RelateSelfType(span) => span, - TypeOrigin::RelateOutputImplTypes(span) => span, - TypeOrigin::MatchExpressionArm(match_span, _, _) => match_span, - TypeOrigin::IfExpression(span) => span, - TypeOrigin::IfExpressionWithNoElse(span) => span, - TypeOrigin::RangeExpression(span) => span, - TypeOrigin::EquatePredicate(span) => span, - } - } -} - -impl<'tcx> SubregionOrigin<'tcx> { - pub fn span(&self) -> Span { - match *self { - Subtype(ref a) => a.span(), - InfStackClosure(a) => a, - InvokeClosure(a) => a, - DerefPointer(a) => a, - FreeVariable(a, _) => a, - IndexSlice(a) => a, - RelateObjectBound(a) => a, - RelateParamBound(a, _) => a, - RelateRegionParamBound(a) => a, - RelateDefaultParamBound(a, _) => a, - Reborrow(a) => a, - ReborrowUpvar(a, _) => a, - DataBorrowed(_, a) => a, - ReferenceOutlivesReferent(_, a) => a, - ParameterInScope(_, a) => a, - ExprTypeIsNotInScope(_, a) => a, - BindingTypeIsNotValidAtDecl(a) => a, - CallRcvr(a) => a, - CallArg(a) => a, - CallReturn(a) => a, - Operand(a) => a, - AddrOf(a) => a, - AutoBorrow(a) => a, - SafeDestructor(a) => a, - } - } -} - -impl RegionVariableOrigin { - pub fn span(&self) -> Span { - match *self { - MiscVariable(a) => a, - PatternRegion(a) => a, - AddrOfRegion(a) => a, - Autoref(a) => a, - Coercion(a) => a, - EarlyBoundRegion(a, _) => a, - LateBoundRegion(a, _, _) => a, - BoundRegionInCoherence(_) => codemap::DUMMY_SP, - UpvarRegion(_, a) => a - } - } -} diff --git a/src/librustc/middle/infer/region_inference/graphviz.rs b/src/librustc/middle/infer/region_inference/graphviz.rs deleted file mode 100644 index 439274cd47696..0000000000000 --- a/src/librustc/middle/infer/region_inference/graphviz.rs +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! This module provides linkage between libgraphviz traits and -//! `rustc::middle::typeck::infer::region_inference`, generating a -//! rendering of the graph represented by the list of `Constraint` -//! instances (which make up the edges of the graph), as well as the -//! origin for each constraint (which are attached to the labels on -//! each edge). - -/// For clarity, rename the graphviz crate locally to dot. -use graphviz as dot; - -use middle::ty; -use middle::region::CodeExtent; -use super::Constraint; -use middle::infer::SubregionOrigin; -use middle::infer::region_inference::RegionVarBindings; -use util::nodemap::{FnvHashMap, FnvHashSet}; - -use std::borrow::Cow; -use std::collections::hash_map::Entry::Vacant; -use std::env; -use std::fs::File; -use std::io; -use std::io::prelude::*; -use std::sync::atomic::{AtomicBool, Ordering}; -use syntax::ast; - -fn print_help_message() { - println!("\ --Z print-region-graph by default prints a region constraint graph for every \n\ -function body, to the path `/tmp/constraints.nodeXXX.dot`, where the XXX is \n\ -replaced with the node id of the function under analysis. \n\ - \n\ -To select one particular function body, set `RUST_REGION_GRAPH_NODE=XXX`, \n\ -where XXX is the node id desired. \n\ - \n\ -To generate output to some path other than the default \n\ -`/tmp/constraints.nodeXXX.dot`, set `RUST_REGION_GRAPH=/path/desired.dot`; \n\ -occurrences of the character `%` in the requested path will be replaced with\n\ -the node id of the function under analysis. \n\ - \n\ -(Since you requested help via RUST_REGION_GRAPH=help, no region constraint \n\ -graphs will be printed. \n\ -"); -} - -pub fn maybe_print_constraints_for<'a, 'tcx>(region_vars: &RegionVarBindings<'a, 'tcx>, - subject_node: ast::NodeId) { - let tcx = region_vars.tcx; - - if !region_vars.tcx.sess.opts.debugging_opts.print_region_graph { - return; - } - - let requested_node: Option = env::var("RUST_REGION_GRAPH_NODE") - .ok() - .and_then(|s| s.parse().ok()); - - if requested_node.is_some() && requested_node != Some(subject_node) { - return; - } - - let requested_output = env::var("RUST_REGION_GRAPH"); - debug!("requested_output: {:?} requested_node: {:?}", - requested_output, - requested_node); - - let output_path = { - let output_template = match requested_output { - Ok(ref s) if &**s == "help" => { - static PRINTED_YET: AtomicBool = AtomicBool::new(false); - if !PRINTED_YET.load(Ordering::SeqCst) { - print_help_message(); - PRINTED_YET.store(true, Ordering::SeqCst); - } - return; - } - - Ok(other_path) => other_path, - Err(_) => "/tmp/constraints.node%.dot".to_string(), - }; - - if output_template.is_empty() { - tcx.sess.bug("empty string provided as RUST_REGION_GRAPH"); - } - - if output_template.contains('%') { - let mut new_str = String::new(); - for c in output_template.chars() { - if c == '%' { - new_str.push_str(&subject_node.to_string()); - } else { - new_str.push(c); - } - } - new_str - } else { - output_template - } - }; - - let constraints = &*region_vars.constraints.borrow(); - match dump_region_constraints_to(tcx, constraints, &output_path) { - Ok(()) => {} - Err(e) => { - let msg = format!("io error dumping region constraints: {}", e); - region_vars.tcx.sess.err(&msg) - } - } -} - -struct ConstraintGraph<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, - graph_name: String, - map: &'a FnvHashMap>, - node_ids: FnvHashMap, -} - -#[derive(Clone, Hash, PartialEq, Eq, Debug, Copy)] -enum Node { - RegionVid(ty::RegionVid), - Region(ty::Region), -} - -// type Edge = Constraint; -#[derive(Clone, PartialEq, Eq, Debug, Copy)] -enum Edge { - Constraint(Constraint), - EnclScope(CodeExtent, CodeExtent), -} - -impl<'a, 'tcx> ConstraintGraph<'a, 'tcx> { - fn new(tcx: &'a ty::ctxt<'tcx>, - name: String, - map: &'a ConstraintMap<'tcx>) - -> ConstraintGraph<'a, 'tcx> { - let mut i = 0; - let mut node_ids = FnvHashMap(); - { - let mut add_node = |node| { - if let Vacant(e) = node_ids.entry(node) { - e.insert(i); - i += 1; - } - }; - - for (n1, n2) in map.keys().map(|c| constraint_to_nodes(c)) { - add_node(n1); - add_node(n2); - } - - tcx.region_maps.each_encl_scope(|sub, sup| { - add_node(Node::Region(ty::ReScope(*sub))); - add_node(Node::Region(ty::ReScope(*sup))); - }); - } - - ConstraintGraph { - tcx: tcx, - graph_name: name, - map: map, - node_ids: node_ids, - } - } -} - -impl<'a, 'tcx> dot::Labeller<'a, Node, Edge> for ConstraintGraph<'a, 'tcx> { - fn graph_id(&self) -> dot::Id { - dot::Id::new(&*self.graph_name).unwrap() - } - fn node_id(&self, n: &Node) -> dot::Id { - let node_id = match self.node_ids.get(n) { - Some(node_id) => node_id, - None => panic!("no node_id found for node: {:?}", n), - }; - let name = || format!("node_{}", node_id); - match dot::Id::new(name()) { - Ok(id) => id, - Err(_) => { - panic!("failed to create graphviz node identified by {}", name()); - } - } - } - fn node_label(&self, n: &Node) -> dot::LabelText { - match *n { - Node::RegionVid(n_vid) => dot::LabelText::label(format!("{:?}", n_vid)), - Node::Region(n_rgn) => dot::LabelText::label(format!("{:?}", n_rgn)), - } - } - fn edge_label(&self, e: &Edge) -> dot::LabelText { - match *e { - Edge::Constraint(ref c) => - dot::LabelText::label(format!("{:?}", self.map.get(c).unwrap())), - Edge::EnclScope(..) => dot::LabelText::label(format!("(enclosed)")), - } - } -} - -fn constraint_to_nodes(c: &Constraint) -> (Node, Node) { - match *c { - Constraint::ConstrainVarSubVar(rv_1, rv_2) => - (Node::RegionVid(rv_1), Node::RegionVid(rv_2)), - Constraint::ConstrainRegSubVar(r_1, rv_2) => (Node::Region(r_1), Node::RegionVid(rv_2)), - Constraint::ConstrainVarSubReg(rv_1, r_2) => (Node::RegionVid(rv_1), Node::Region(r_2)), - } -} - -fn edge_to_nodes(e: &Edge) -> (Node, Node) { - match *e { - Edge::Constraint(ref c) => constraint_to_nodes(c), - Edge::EnclScope(sub, sup) => { - (Node::Region(ty::ReScope(sub)), - Node::Region(ty::ReScope(sup))) - } - } -} - -impl<'a, 'tcx> dot::GraphWalk<'a, Node, Edge> for ConstraintGraph<'a, 'tcx> { - fn nodes(&self) -> dot::Nodes { - let mut set = FnvHashSet(); - for node in self.node_ids.keys() { - set.insert(*node); - } - debug!("constraint graph has {} nodes", set.len()); - set.into_iter().collect() - } - fn edges(&self) -> dot::Edges { - debug!("constraint graph has {} edges", self.map.len()); - let mut v: Vec<_> = self.map.keys().map(|e| Edge::Constraint(*e)).collect(); - self.tcx.region_maps.each_encl_scope(|sub, sup| v.push(Edge::EnclScope(*sub, *sup))); - debug!("region graph has {} edges", v.len()); - Cow::Owned(v) - } - fn source(&self, edge: &Edge) -> Node { - let (n1, _) = edge_to_nodes(edge); - debug!("edge {:?} has source {:?}", edge, n1); - n1 - } - fn target(&self, edge: &Edge) -> Node { - let (_, n2) = edge_to_nodes(edge); - debug!("edge {:?} has target {:?}", edge, n2); - n2 - } -} - -pub type ConstraintMap<'tcx> = FnvHashMap>; - -fn dump_region_constraints_to<'a, 'tcx: 'a>(tcx: &'a ty::ctxt<'tcx>, - map: &ConstraintMap<'tcx>, - path: &str) - -> io::Result<()> { - debug!("dump_region_constraints map (len: {}) path: {}", - map.len(), - path); - let g = ConstraintGraph::new(tcx, format!("region_constraints"), map); - debug!("dump_region_constraints calling render"); - let mut v = Vec::new(); - dot::render(&g, &mut v).unwrap(); - File::create(path).and_then(|mut f| f.write_all(&v)) -} diff --git a/src/librustc/middle/infer/region_inference/mod.rs b/src/librustc/middle/infer/region_inference/mod.rs deleted file mode 100644 index 2c2b69ff85b4e..0000000000000 --- a/src/librustc/middle/infer/region_inference/mod.rs +++ /dev/null @@ -1,1441 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! See README.md - -pub use self::Constraint::*; -pub use self::Verify::*; -pub use self::UndoLogEntry::*; -pub use self::CombineMapType::*; -pub use self::RegionResolutionError::*; -pub use self::VarValue::*; - -use super::{RegionVariableOrigin, SubregionOrigin, TypeTrace, MiscVariable}; -use super::unify_key; - -use rustc_data_structures::graph::{self, Direction, NodeIndex}; -use rustc_data_structures::unify::{self, UnificationTable}; -use middle::free_region::FreeRegionMap; -use middle::ty::{self, Ty}; -use middle::ty::{BoundRegion, Region, RegionVid}; -use middle::ty::{ReEmpty, ReStatic, ReFree, ReEarlyBound}; -use middle::ty::{ReLateBound, ReScope, ReVar, ReSkolemized, BrFresh}; -use middle::ty::error::TypeError; -use util::common::indenter; -use util::nodemap::{FnvHashMap, FnvHashSet}; - -use std::cell::{Cell, RefCell}; -use std::cmp::Ordering::{self, Less, Greater, Equal}; -use std::fmt; -use std::u32; -use syntax::ast; - -mod graphviz; - -// A constraint that influences the inference process. -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub enum Constraint { - // One region variable is subregion of another - ConstrainVarSubVar(RegionVid, RegionVid), - - // Concrete region is subregion of region variable - ConstrainRegSubVar(Region, RegionVid), - - // Region variable is subregion of concrete region - // - // FIXME(#29436) -- should be remove in favor of a Verify - ConstrainVarSubReg(RegionVid, Region), -} - -// Something we have to verify after region inference is done, but -// which does not directly influence the inference process -pub enum Verify<'tcx> { - // VerifyRegSubReg(a, b): Verify that `a <= b`. Neither `a` nor - // `b` are inference variables. - VerifyRegSubReg(SubregionOrigin<'tcx>, Region, Region), - - // VerifyGenericBound(T, _, R, RS): The parameter type `T` (or - // associated type) must outlive the region `R`. `T` is known to - // outlive `RS`. Therefore verify that `R <= RS[i]` for some - // `i`. Inference variables may be involved (but this verification - // step doesn't influence inference). - VerifyGenericBound(GenericKind<'tcx>, SubregionOrigin<'tcx>, Region, VerifyBound), -} - -#[derive(Copy, Clone, PartialEq, Eq)] -pub enum GenericKind<'tcx> { - Param(ty::ParamTy), - Projection(ty::ProjectionTy<'tcx>), -} - -// When we introduce a verification step, we wish to test that a -// particular region (let's call it `'min`) meets some bound. -// The bound is described the by the following grammar: -#[derive(Debug)] -pub enum VerifyBound { - // B = exists {R} --> some 'r in {R} must outlive 'min - // - // Put another way, the subject value is known to outlive all - // regions in {R}, so if any of those outlives 'min, then the - // bound is met. - AnyRegion(Vec), - - // B = forall {R} --> all 'r in {R} must outlive 'min - // - // Put another way, the subject value is known to outlive some - // region in {R}, so if all of those outlives 'min, then the bound - // is met. - AllRegions(Vec), - - // B = exists {B} --> 'min must meet some bound b in {B} - AnyBound(Vec), - - // B = forall {B} --> 'min must meet all bounds b in {B} - AllBounds(Vec), -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash)] -pub struct TwoRegions { - a: Region, - b: Region, -} - -#[derive(Copy, Clone, PartialEq)] -pub enum UndoLogEntry { - OpenSnapshot, - CommitedSnapshot, - AddVar(RegionVid), - AddConstraint(Constraint), - AddVerify(usize), - AddGiven(ty::FreeRegion, ty::RegionVid), - AddCombination(CombineMapType, TwoRegions), -} - -#[derive(Copy, Clone, PartialEq)] -pub enum CombineMapType { - Lub, - Glb, -} - -#[derive(Clone, Debug)] -pub enum RegionResolutionError<'tcx> { - /// `ConcreteFailure(o, a, b)`: - /// - /// `o` requires that `a <= b`, but this does not hold - ConcreteFailure(SubregionOrigin<'tcx>, Region, Region), - - /// `GenericBoundFailure(p, s, a) - /// - /// The parameter/associated-type `p` must be known to outlive the lifetime - /// `a` (but none of the known bounds are sufficient). - GenericBoundFailure(SubregionOrigin<'tcx>, GenericKind<'tcx>, Region), - - /// `SubSupConflict(v, sub_origin, sub_r, sup_origin, sup_r)`: - /// - /// Could not infer a value for `v` because `sub_r <= v` (due to - /// `sub_origin`) but `v <= sup_r` (due to `sup_origin`) and - /// `sub_r <= sup_r` does not hold. - SubSupConflict(RegionVariableOrigin, - SubregionOrigin<'tcx>, - Region, - SubregionOrigin<'tcx>, - Region), - - /// For subsets of `ConcreteFailure` and `SubSupConflict`, we can derive - /// more specific errors message by suggesting to the user where they - /// should put a lifetime. In those cases we process and put those errors - /// into `ProcessedErrors` before we do any reporting. - ProcessedErrors(Vec, - Vec<(TypeTrace<'tcx>, TypeError<'tcx>)>, - Vec), -} - -/// SameRegions is used to group regions that we think are the same and would -/// like to indicate so to the user. -/// For example, the following function -/// ``` -/// struct Foo { bar: i32 } -/// fn foo2<'a, 'b>(x: &'a Foo) -> &'b i32 { -/// &x.bar -/// } -/// ``` -/// would report an error because we expect 'a and 'b to match, and so we group -/// 'a and 'b together inside a SameRegions struct -#[derive(Clone, Debug)] -pub struct SameRegions { - pub scope_id: ast::NodeId, - pub regions: Vec, -} - -impl SameRegions { - pub fn contains(&self, other: &BoundRegion) -> bool { - self.regions.contains(other) - } - - pub fn push(&mut self, other: BoundRegion) { - self.regions.push(other); - } -} - -pub type CombineMap = FnvHashMap; - -pub struct RegionVarBindings<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, - var_origins: RefCell>, - - // Constraints of the form `A <= B` introduced by the region - // checker. Here at least one of `A` and `B` must be a region - // variable. - constraints: RefCell>>, - - // A "verify" is something that we need to verify after inference is - // done, but which does not directly affect inference in any way. - // - // An example is a `A <= B` where neither `A` nor `B` are - // inference variables. - verifys: RefCell>>, - - // A "given" is a relationship that is known to hold. In particular, - // we often know from closure fn signatures that a particular free - // region must be a subregion of a region variable: - // - // foo.iter().filter(<'a> |x: &'a &'b T| ...) - // - // In situations like this, `'b` is in fact a region variable - // introduced by the call to `iter()`, and `'a` is a bound region - // on the closure (as indicated by the `<'a>` prefix). If we are - // naive, we wind up inferring that `'b` must be `'static`, - // because we require that it be greater than `'a` and we do not - // know what `'a` is precisely. - // - // This hashmap is used to avoid that naive scenario. Basically we - // record the fact that `'a <= 'b` is implied by the fn signature, - // and then ignore the constraint when solving equations. This is - // a bit of a hack but seems to work. - givens: RefCell>, - - lubs: RefCell, - glbs: RefCell, - skolemization_count: Cell, - bound_count: Cell, - - // The undo log records actions that might later be undone. - // - // Note: when the undo_log is empty, we are not actively - // snapshotting. When the `start_snapshot()` method is called, we - // push an OpenSnapshot entry onto the list to indicate that we - // are now actively snapshotting. The reason for this is that - // otherwise we end up adding entries for things like the lower - // bound on a variable and so forth, which can never be rolled - // back. - undo_log: RefCell>, - unification_table: RefCell>, - - // This contains the results of inference. It begins as an empty - // option and only acquires a value after inference is complete. - values: RefCell>>, -} - -pub struct RegionSnapshot { - length: usize, - region_snapshot: unify::Snapshot, - skolemization_count: u32, -} - -impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> { - pub fn new(tcx: &'a ty::ctxt<'tcx>) -> RegionVarBindings<'a, 'tcx> { - RegionVarBindings { - tcx: tcx, - var_origins: RefCell::new(Vec::new()), - values: RefCell::new(None), - constraints: RefCell::new(FnvHashMap()), - verifys: RefCell::new(Vec::new()), - givens: RefCell::new(FnvHashSet()), - lubs: RefCell::new(FnvHashMap()), - glbs: RefCell::new(FnvHashMap()), - skolemization_count: Cell::new(0), - bound_count: Cell::new(0), - undo_log: RefCell::new(Vec::new()), - unification_table: RefCell::new(UnificationTable::new()), - } - } - - fn in_snapshot(&self) -> bool { - !self.undo_log.borrow().is_empty() - } - - pub fn start_snapshot(&self) -> RegionSnapshot { - let length = self.undo_log.borrow().len(); - debug!("RegionVarBindings: start_snapshot({})", length); - self.undo_log.borrow_mut().push(OpenSnapshot); - RegionSnapshot { - length: length, - region_snapshot: self.unification_table.borrow_mut().snapshot(), - skolemization_count: self.skolemization_count.get(), - } - } - - pub fn commit(&self, snapshot: RegionSnapshot) { - debug!("RegionVarBindings: commit({})", snapshot.length); - assert!(self.undo_log.borrow().len() > snapshot.length); - assert!((*self.undo_log.borrow())[snapshot.length] == OpenSnapshot); - - let mut undo_log = self.undo_log.borrow_mut(); - if snapshot.length == 0 { - undo_log.truncate(0); - } else { - (*undo_log)[snapshot.length] = CommitedSnapshot; - } - self.skolemization_count.set(snapshot.skolemization_count); - self.unification_table.borrow_mut().commit(snapshot.region_snapshot); - } - - pub fn rollback_to(&self, snapshot: RegionSnapshot) { - debug!("RegionVarBindings: rollback_to({:?})", snapshot); - let mut undo_log = self.undo_log.borrow_mut(); - assert!(undo_log.len() > snapshot.length); - assert!((*undo_log)[snapshot.length] == OpenSnapshot); - while undo_log.len() > snapshot.length + 1 { - match undo_log.pop().unwrap() { - OpenSnapshot => { - panic!("Failure to observe stack discipline"); - } - CommitedSnapshot => {} - AddVar(vid) => { - let mut var_origins = self.var_origins.borrow_mut(); - var_origins.pop().unwrap(); - assert_eq!(var_origins.len(), vid.index as usize); - } - AddConstraint(ref constraint) => { - self.constraints.borrow_mut().remove(constraint); - } - AddVerify(index) => { - self.verifys.borrow_mut().pop(); - assert_eq!(self.verifys.borrow().len(), index); - } - AddGiven(sub, sup) => { - self.givens.borrow_mut().remove(&(sub, sup)); - } - AddCombination(Glb, ref regions) => { - self.glbs.borrow_mut().remove(regions); - } - AddCombination(Lub, ref regions) => { - self.lubs.borrow_mut().remove(regions); - } - } - } - let c = undo_log.pop().unwrap(); - assert!(c == OpenSnapshot); - self.skolemization_count.set(snapshot.skolemization_count); - self.unification_table.borrow_mut() - .rollback_to(snapshot.region_snapshot); - } - - pub fn num_vars(&self) -> u32 { - let len = self.var_origins.borrow().len(); - // enforce no overflow - assert!(len as u32 as usize == len); - len as u32 - } - - pub fn new_region_var(&self, origin: RegionVariableOrigin) -> RegionVid { - let vid = RegionVid { index: self.num_vars() }; - self.var_origins.borrow_mut().push(origin.clone()); - - let u_vid = self.unification_table.borrow_mut().new_key( - unify_key::RegionVidKey { min_vid: vid } - ); - assert_eq!(vid, u_vid); - if self.in_snapshot() { - self.undo_log.borrow_mut().push(AddVar(vid)); - } - debug!("created new region variable {:?} with origin {:?}", - vid, - origin); - return vid; - } - - /// Creates a new skolemized region. Skolemized regions are fresh - /// regions used when performing higher-ranked computations. They - /// must be used in a very particular way and are never supposed - /// to "escape" out into error messages or the code at large. - /// - /// The idea is to always create a snapshot. Skolemized regions - /// can be created in the context of this snapshot, but once the - /// snapshot is committed or rolled back, their numbers will be - /// recycled, so you must be finished with them. See the extensive - /// comments in `higher_ranked.rs` to see how it works (in - /// particular, the subtyping comparison). - /// - /// The `snapshot` argument to this function is not really used; - /// it's just there to make it explicit which snapshot bounds the - /// skolemized region that results. - pub fn new_skolemized(&self, br: ty::BoundRegion, snapshot: &RegionSnapshot) -> Region { - assert!(self.in_snapshot()); - assert!(self.undo_log.borrow()[snapshot.length] == OpenSnapshot); - - let sc = self.skolemization_count.get(); - self.skolemization_count.set(sc + 1); - ReSkolemized(ty::SkolemizedRegionVid { index: sc }, br) - } - - pub fn new_bound(&self, debruijn: ty::DebruijnIndex) -> Region { - // Creates a fresh bound variable for use in GLB computations. - // See discussion of GLB computation in the large comment at - // the top of this file for more details. - // - // This computation is potentially wrong in the face of - // rollover. It's conceivable, if unlikely, that one might - // wind up with accidental capture for nested functions in - // that case, if the outer function had bound regions created - // a very long time before and the inner function somehow - // wound up rolling over such that supposedly fresh - // identifiers were in fact shadowed. For now, we just assert - // that there is no rollover -- eventually we should try to be - // robust against this possibility, either by checking the set - // of bound identifiers that appear in a given expression and - // ensure that we generate one that is distinct, or by - // changing the representation of bound regions in a fn - // declaration - - let sc = self.bound_count.get(); - self.bound_count.set(sc + 1); - - if sc >= self.bound_count.get() { - self.tcx.sess.bug("rollover in RegionInference new_bound()"); - } - - ReLateBound(debruijn, BrFresh(sc)) - } - - fn values_are_none(&self) -> bool { - self.values.borrow().is_none() - } - - fn add_constraint(&self, constraint: Constraint, origin: SubregionOrigin<'tcx>) { - // cannot add constraints once regions are resolved - assert!(self.values_are_none()); - - debug!("RegionVarBindings: add_constraint({:?})", constraint); - - if self.constraints.borrow_mut().insert(constraint, origin).is_none() { - if self.in_snapshot() { - self.undo_log.borrow_mut().push(AddConstraint(constraint)); - } - } - } - - fn add_verify(&self, verify: Verify<'tcx>) { - // cannot add verifys once regions are resolved - assert!(self.values_are_none()); - - debug!("RegionVarBindings: add_verify({:?})", verify); - - // skip no-op cases known to be satisfied - match verify { - VerifyGenericBound(_, _, _, VerifyBound::AllBounds(ref bs)) if bs.len() == 0 => { - return; - } - _ => {} - } - - let mut verifys = self.verifys.borrow_mut(); - let index = verifys.len(); - verifys.push(verify); - if self.in_snapshot() { - self.undo_log.borrow_mut().push(AddVerify(index)); - } - } - - pub fn add_given(&self, sub: ty::FreeRegion, sup: ty::RegionVid) { - // cannot add givens once regions are resolved - assert!(self.values_are_none()); - - let mut givens = self.givens.borrow_mut(); - if givens.insert((sub, sup)) { - debug!("add_given({:?} <= {:?})", sub, sup); - - self.undo_log.borrow_mut().push(AddGiven(sub, sup)); - } - } - - pub fn make_eqregion(&self, origin: SubregionOrigin<'tcx>, sub: Region, sup: Region) { - if sub != sup { - // Eventually, it would be nice to add direct support for - // equating regions. - self.make_subregion(origin.clone(), sub, sup); - self.make_subregion(origin, sup, sub); - - if let (ty::ReVar(sub), ty::ReVar(sup)) = (sub, sup) { - self.unification_table.borrow_mut().union(sub, sup); - } - } - } - - pub fn make_subregion(&self, origin: SubregionOrigin<'tcx>, sub: Region, sup: Region) { - // cannot add constraints once regions are resolved - assert!(self.values_are_none()); - - debug!("RegionVarBindings: make_subregion({:?}, {:?}) due to {:?}", - sub, - sup, - origin); - - match (sub, sup) { - (ReEarlyBound(..), _) | - (ReLateBound(..), _) | - (_, ReEarlyBound(..)) | - (_, ReLateBound(..)) => { - self.tcx.sess.span_bug(origin.span(), - &format!("cannot relate bound region: {:?} <= {:?}", - sub, - sup)); - } - (_, ReStatic) => { - // all regions are subregions of static, so we can ignore this - } - (ReVar(sub_id), ReVar(sup_id)) => { - self.add_constraint(ConstrainVarSubVar(sub_id, sup_id), origin); - } - (r, ReVar(sup_id)) => { - self.add_constraint(ConstrainRegSubVar(r, sup_id), origin); - } - (ReVar(sub_id), r) => { - self.add_constraint(ConstrainVarSubReg(sub_id, r), origin); - } - _ => { - self.add_verify(VerifyRegSubReg(origin, sub, sup)); - } - } - } - - /// See `Verify::VerifyGenericBound` - pub fn verify_generic_bound(&self, - origin: SubregionOrigin<'tcx>, - kind: GenericKind<'tcx>, - sub: Region, - bound: VerifyBound) { - self.add_verify(VerifyGenericBound(kind, origin, sub, bound)); - } - - pub fn lub_regions(&self, origin: SubregionOrigin<'tcx>, a: Region, b: Region) -> Region { - // cannot add constraints once regions are resolved - assert!(self.values_are_none()); - - debug!("RegionVarBindings: lub_regions({:?}, {:?})", a, b); - match (a, b) { - (ReStatic, _) | (_, ReStatic) => { - ReStatic // nothing lives longer than static - } - - _ => { - self.combine_vars(Lub, a, b, origin.clone(), |this, old_r, new_r| { - this.make_subregion(origin.clone(), old_r, new_r) - }) - } - } - } - - pub fn glb_regions(&self, origin: SubregionOrigin<'tcx>, a: Region, b: Region) -> Region { - // cannot add constraints once regions are resolved - assert!(self.values_are_none()); - - debug!("RegionVarBindings: glb_regions({:?}, {:?})", a, b); - match (a, b) { - (ReStatic, r) | (r, ReStatic) => { - // static lives longer than everything else - r - } - - _ => { - self.combine_vars(Glb, a, b, origin.clone(), |this, old_r, new_r| { - this.make_subregion(origin.clone(), new_r, old_r) - }) - } - } - } - - pub fn resolve_var(&self, rid: RegionVid) -> ty::Region { - match *self.values.borrow() { - None => { - self.tcx.sess.span_bug((*self.var_origins.borrow())[rid.index as usize].span(), - "attempt to resolve region variable before values have \ - been computed!") - } - Some(ref values) => { - let r = lookup(values, rid); - debug!("resolve_var({:?}) = {:?}", rid, r); - r - } - } - } - - pub fn opportunistic_resolve_var(&self, rid: RegionVid) -> ty::Region { - ty::ReVar(self.unification_table.borrow_mut().find_value(rid).min_vid) - } - - fn combine_map(&self, t: CombineMapType) -> &RefCell { - match t { - Glb => &self.glbs, - Lub => &self.lubs, - } - } - - pub fn combine_vars(&self, - t: CombineMapType, - a: Region, - b: Region, - origin: SubregionOrigin<'tcx>, - mut relate: F) - -> Region - where F: FnMut(&RegionVarBindings<'a, 'tcx>, Region, Region) - { - let vars = TwoRegions { a: a, b: b }; - match self.combine_map(t).borrow().get(&vars) { - Some(&c) => { - return ReVar(c); - } - None => {} - } - let c = self.new_region_var(MiscVariable(origin.span())); - self.combine_map(t).borrow_mut().insert(vars, c); - if self.in_snapshot() { - self.undo_log.borrow_mut().push(AddCombination(t, vars)); - } - relate(self, a, ReVar(c)); - relate(self, b, ReVar(c)); - debug!("combine_vars() c={:?}", c); - ReVar(c) - } - - pub fn vars_created_since_snapshot(&self, mark: &RegionSnapshot) -> Vec { - self.undo_log.borrow()[mark.length..] - .iter() - .filter_map(|&elt| { - match elt { - AddVar(vid) => Some(vid), - _ => None, - } - }) - .collect() - } - - /// Computes all regions that have been related to `r0` in any way since the mark `mark` was - /// made---`r0` itself will be the first entry. This is used when checking whether skolemized - /// regions are being improperly related to other regions. - pub fn tainted(&self, mark: &RegionSnapshot, r0: Region) -> Vec { - debug!("tainted(mark={:?}, r0={:?})", mark, r0); - let _indenter = indenter(); - - // `result_set` acts as a worklist: we explore all outgoing - // edges and add any new regions we find to result_set. This - // is not a terribly efficient implementation. - let mut result_set = vec![r0]; - let mut result_index = 0; - while result_index < result_set.len() { - // nb: can't use usize::range() here because result_set grows - let r = result_set[result_index]; - debug!("result_index={}, r={:?}", result_index, r); - - for undo_entry in self.undo_log.borrow()[mark.length..].iter() { - match undo_entry { - &AddConstraint(ConstrainVarSubVar(a, b)) => { - consider_adding_bidirectional_edges(&mut result_set, r, ReVar(a), ReVar(b)); - } - &AddConstraint(ConstrainRegSubVar(a, b)) => { - consider_adding_bidirectional_edges(&mut result_set, r, a, ReVar(b)); - } - &AddConstraint(ConstrainVarSubReg(a, b)) => { - consider_adding_bidirectional_edges(&mut result_set, r, ReVar(a), b); - } - &AddGiven(a, b) => { - consider_adding_bidirectional_edges(&mut result_set, - r, - ReFree(a), - ReVar(b)); - } - &AddVerify(i) => { - match (*self.verifys.borrow())[i] { - VerifyRegSubReg(_, a, b) => { - consider_adding_bidirectional_edges(&mut result_set, r, a, b); - } - VerifyGenericBound(_, _, a, ref bound) => { - bound.for_each_region(&mut |b| { - consider_adding_bidirectional_edges(&mut result_set, r, a, b) - }); - } - } - } - &AddCombination(..) | - &AddVar(..) | - &OpenSnapshot | - &CommitedSnapshot => {} - } - } - - result_index += 1; - } - - return result_set; - - fn consider_adding_bidirectional_edges(result_set: &mut Vec, - r: Region, - r1: Region, - r2: Region) { - consider_adding_directed_edge(result_set, r, r1, r2); - consider_adding_directed_edge(result_set, r, r2, r1); - } - - fn consider_adding_directed_edge(result_set: &mut Vec, - r: Region, - r1: Region, - r2: Region) { - if r == r1 { - // Clearly, this is potentially inefficient. - if !result_set.iter().any(|x| *x == r2) { - result_set.push(r2); - } - } - } - } - - /// This function performs the actual region resolution. It must be - /// called after all constraints have been added. It performs a - /// fixed-point iteration to find region values which satisfy all - /// constraints, assuming such values can be found; if they cannot, - /// errors are reported. - pub fn resolve_regions(&self, - free_regions: &FreeRegionMap, - subject_node: ast::NodeId) - -> Vec> { - debug!("RegionVarBindings: resolve_regions()"); - let mut errors = vec![]; - let v = self.infer_variable_values(free_regions, &mut errors, subject_node); - *self.values.borrow_mut() = Some(v); - errors - } - - fn lub_concrete_regions(&self, free_regions: &FreeRegionMap, a: Region, b: Region) -> Region { - match (a, b) { - (ReLateBound(..), _) | - (_, ReLateBound(..)) | - (ReEarlyBound(..), _) | - (_, ReEarlyBound(..)) => { - self.tcx.sess.bug(&format!("cannot relate bound region: LUB({:?}, {:?})", a, b)); - } - - (ReStatic, _) | (_, ReStatic) => { - ReStatic // nothing lives longer than static - } - - (ReEmpty, r) | (r, ReEmpty) => { - r // everything lives longer than empty - } - - (ReVar(v_id), _) | (_, ReVar(v_id)) => { - self.tcx.sess.span_bug((*self.var_origins.borrow())[v_id.index as usize].span(), - &format!("lub_concrete_regions invoked with non-concrete \ - regions: {:?}, {:?}", - a, - b)); - } - - (ReFree(ref fr), ReScope(s_id)) | - (ReScope(s_id), ReFree(ref fr)) => { - let f = ReFree(*fr); - // A "free" region can be interpreted as "some region - // at least as big as the block fr.scope_id". So, we can - // reasonably compare free regions and scopes: - let r_id = self.tcx.region_maps.nearest_common_ancestor(fr.scope, s_id); - - if r_id == fr.scope { - // if the free region's scope `fr.scope_id` is bigger than - // the scope region `s_id`, then the LUB is the free - // region itself: - f - } else { - // otherwise, we don't know what the free region is, - // so we must conservatively say the LUB is static: - ReStatic - } - } - - (ReScope(a_id), ReScope(b_id)) => { - // The region corresponding to an outer block is a - // subtype of the region corresponding to an inner - // block. - ReScope(self.tcx.region_maps.nearest_common_ancestor(a_id, b_id)) - } - - (ReFree(a_fr), ReFree(b_fr)) => { - free_regions.lub_free_regions(a_fr, b_fr) - } - - // For these types, we cannot define any additional - // relationship: - (ReSkolemized(..), _) | - (_, ReSkolemized(..)) => { - if a == b { - a - } else { - ReStatic - } - } - } - } -} - -// ______________________________________________________________________ - -#[derive(Copy, Clone, Debug)] -pub enum VarValue { - Value(Region), - ErrorValue, -} - -struct VarData { - value: VarValue, -} - -struct RegionAndOrigin<'tcx> { - region: Region, - origin: SubregionOrigin<'tcx>, -} - -type RegionGraph = graph::Graph<(), Constraint>; - -impl<'a, 'tcx> RegionVarBindings<'a, 'tcx> { - fn infer_variable_values(&self, - free_regions: &FreeRegionMap, - errors: &mut Vec>, - subject: ast::NodeId) - -> Vec { - let mut var_data = self.construct_var_data(); - - // Dorky hack to cause `dump_constraints` to only get called - // if debug mode is enabled: - debug!("----() End constraint listing (subject={}) {:?}---", - subject, - self.dump_constraints(subject)); - graphviz::maybe_print_constraints_for(self, subject); - - let graph = self.construct_graph(); - self.expand_givens(&graph); - self.expansion(free_regions, &mut var_data); - self.contraction(free_regions, &mut var_data); - let values = self.extract_values_and_collect_conflicts(free_regions, - &var_data, - &graph, - errors); - self.collect_concrete_region_errors(free_regions, &values, errors); - values - } - - fn construct_var_data(&self) -> Vec { - (0..self.num_vars() as usize) - .map(|_| VarData { value: Value(ty::ReEmpty) }) - .collect() - } - - fn dump_constraints(&self, subject: ast::NodeId) { - debug!("----() Start constraint listing (subject={}) ()----", - subject); - for (idx, (constraint, _)) in self.constraints.borrow().iter().enumerate() { - debug!("Constraint {} => {:?}", idx, constraint); - } - } - - fn expand_givens(&self, graph: &RegionGraph) { - // Givens are a kind of horrible hack to account for - // constraints like 'c <= '0 that are known to hold due to - // closure signatures (see the comment above on the `givens` - // field). They should go away. But until they do, the role - // of this fn is to account for the transitive nature: - // - // Given 'c <= '0 - // and '0 <= '1 - // then 'c <= '1 - - let mut givens = self.givens.borrow_mut(); - let seeds: Vec<_> = givens.iter().cloned().collect(); - for (fr, vid) in seeds { - let seed_index = NodeIndex(vid.index as usize); - for succ_index in graph.depth_traverse(seed_index) { - let succ_index = succ_index.0 as u32; - if succ_index < self.num_vars() { - let succ_vid = RegionVid { index: succ_index }; - givens.insert((fr, succ_vid)); - } - } - } - } - - fn expansion(&self, free_regions: &FreeRegionMap, var_data: &mut [VarData]) { - self.iterate_until_fixed_point("Expansion", |constraint| { - debug!("expansion: constraint={:?} origin={:?}", - constraint, - self.constraints - .borrow() - .get(constraint) - .unwrap()); - match *constraint { - ConstrainRegSubVar(a_region, b_vid) => { - let b_data = &mut var_data[b_vid.index as usize]; - self.expand_node(free_regions, a_region, b_vid, b_data) - } - ConstrainVarSubVar(a_vid, b_vid) => { - match var_data[a_vid.index as usize].value { - ErrorValue => false, - Value(a_region) => { - let b_node = &mut var_data[b_vid.index as usize]; - self.expand_node(free_regions, a_region, b_vid, b_node) - } - } - } - ConstrainVarSubReg(..) => { - // This is a contraction constraint. Ignore it. - false - } - } - }) - } - - fn expand_node(&self, - free_regions: &FreeRegionMap, - a_region: Region, - b_vid: RegionVid, - b_data: &mut VarData) - -> bool { - debug!("expand_node({:?}, {:?} == {:?})", - a_region, - b_vid, - b_data.value); - - // Check if this relationship is implied by a given. - match a_region { - ty::ReFree(fr) => { - if self.givens.borrow().contains(&(fr, b_vid)) { - debug!("given"); - return false; - } - } - _ => {} - } - - match b_data.value { - Value(cur_region) => { - let lub = self.lub_concrete_regions(free_regions, a_region, cur_region); - if lub == cur_region { - return false; - } - - debug!("Expanding value of {:?} from {:?} to {:?}", - b_vid, - cur_region, - lub); - - b_data.value = Value(lub); - return true; - } - - ErrorValue => { - return false; - } - } - } - - // FIXME(#29436) -- this fn would just go away if we removed ConstrainVarSubReg - fn contraction(&self, free_regions: &FreeRegionMap, var_data: &mut [VarData]) { - self.iterate_until_fixed_point("Contraction", |constraint| { - debug!("contraction: constraint={:?} origin={:?}", - constraint, - self.constraints - .borrow() - .get(constraint) - .unwrap()); - match *constraint { - ConstrainRegSubVar(..) | - ConstrainVarSubVar(..) => { - // Expansion will ensure that these constraints hold. Ignore. - } - ConstrainVarSubReg(a_vid, b_region) => { - let a_data = &mut var_data[a_vid.index as usize]; - debug!("contraction: {:?} == {:?}, {:?}", - a_vid, - a_data.value, - b_region); - - let a_region = match a_data.value { - ErrorValue => return false, - Value(a_region) => a_region, - }; - - if !free_regions.is_subregion_of(self.tcx, a_region, b_region) { - debug!("Setting {:?} to ErrorValue: {:?} not subregion of {:?}", - a_vid, - a_region, - b_region); - a_data.value = ErrorValue; - } - } - } - - false - }) - } - - fn collect_concrete_region_errors(&self, - free_regions: &FreeRegionMap, - values: &Vec, - errors: &mut Vec>) { - let mut reg_reg_dups = FnvHashSet(); - for verify in self.verifys.borrow().iter() { - match *verify { - VerifyRegSubReg(ref origin, sub, sup) => { - if free_regions.is_subregion_of(self.tcx, sub, sup) { - continue; - } - - if !reg_reg_dups.insert((sub, sup)) { - continue; - } - - debug!("region inference error at {:?}: {:?} <= {:?} is not true", - origin, - sub, - sup); - - errors.push(ConcreteFailure((*origin).clone(), sub, sup)); - } - - VerifyGenericBound(ref kind, ref origin, sub, ref bound) => { - let sub = normalize(values, sub); - if bound.is_met(self.tcx, free_regions, values, sub) { - continue; - } - - debug!("region inference error at {:?}: verifying {:?} <= {:?}", - origin, - sub, - bound); - - errors.push(GenericBoundFailure((*origin).clone(), kind.clone(), sub)); - } - } - } - } - - fn extract_values_and_collect_conflicts(&self, - free_regions: &FreeRegionMap, - var_data: &[VarData], - graph: &RegionGraph, - errors: &mut Vec>) - -> Vec { - debug!("extract_values_and_collect_conflicts()"); - - // This is the best way that I have found to suppress - // duplicate and related errors. Basically we keep a set of - // flags for every node. Whenever an error occurs, we will - // walk some portion of the graph looking to find pairs of - // conflicting regions to report to the user. As we walk, we - // trip the flags from false to true, and if we find that - // we've already reported an error involving any particular - // node we just stop and don't report the current error. The - // idea is to report errors that derive from independent - // regions of the graph, but not those that derive from - // overlapping locations. - let mut dup_vec = vec![u32::MAX; self.num_vars() as usize]; - - for idx in 0..self.num_vars() as usize { - match var_data[idx].value { - Value(_) => { - /* Inference successful */ - } - ErrorValue => { - /* Inference impossible, this value contains - inconsistent constraints. - - I think that in this case we should report an - error now---unlike the case above, we can't - wait to see whether the user needs the result - of this variable. The reason is that the mere - existence of this variable implies that the - region graph is inconsistent, whether or not it - is used. - - For example, we may have created a region - variable that is the GLB of two other regions - which do not have a GLB. Even if that variable - is not used, it implies that those two regions - *should* have a GLB. - - At least I think this is true. It may be that - the mere existence of a conflict in a region variable - that is not used is not a problem, so if this rule - starts to create problems we'll have to revisit - this portion of the code and think hard about it. =) */ - - let node_vid = RegionVid { index: idx as u32 }; - self.collect_error_for_expanding_node(free_regions, - graph, - &mut dup_vec, - node_vid, - errors); - } - } - } - - (0..self.num_vars() as usize).map(|idx| var_data[idx].value).collect() - } - - fn construct_graph(&self) -> RegionGraph { - let num_vars = self.num_vars(); - - let constraints = self.constraints.borrow(); - - let mut graph = graph::Graph::new(); - - for _ in 0..num_vars { - graph.add_node(()); - } - let dummy_idx = graph.add_node(()); - - for (constraint, _) in constraints.iter() { - match *constraint { - ConstrainVarSubVar(a_id, b_id) => { - graph.add_edge(NodeIndex(a_id.index as usize), - NodeIndex(b_id.index as usize), - *constraint); - } - ConstrainRegSubVar(_, b_id) => { - graph.add_edge(dummy_idx, NodeIndex(b_id.index as usize), *constraint); - } - ConstrainVarSubReg(a_id, _) => { - graph.add_edge(NodeIndex(a_id.index as usize), dummy_idx, *constraint); - } - } - } - - return graph; - } - - fn collect_error_for_expanding_node(&self, - free_regions: &FreeRegionMap, - graph: &RegionGraph, - dup_vec: &mut [u32], - node_idx: RegionVid, - errors: &mut Vec>) { - // Errors in expanding nodes result from a lower-bound that is - // not contained by an upper-bound. - let (mut lower_bounds, lower_dup) = self.collect_concrete_regions(graph, - node_idx, - graph::INCOMING, - dup_vec); - let (mut upper_bounds, upper_dup) = self.collect_concrete_regions(graph, - node_idx, - graph::OUTGOING, - dup_vec); - - if lower_dup || upper_dup { - return; - } - - // We place free regions first because we are special casing - // SubSupConflict(ReFree, ReFree) when reporting error, and so - // the user will more likely get a specific suggestion. - fn free_regions_first(a: &RegionAndOrigin, b: &RegionAndOrigin) -> Ordering { - match (a.region, b.region) { - (ReFree(..), ReFree(..)) => Equal, - (ReFree(..), _) => Less, - (_, ReFree(..)) => Greater, - (_, _) => Equal, - } - } - lower_bounds.sort_by(|a, b| free_regions_first(a, b)); - upper_bounds.sort_by(|a, b| free_regions_first(a, b)); - - for lower_bound in &lower_bounds { - for upper_bound in &upper_bounds { - if !free_regions.is_subregion_of(self.tcx, lower_bound.region, upper_bound.region) { - let origin = (*self.var_origins.borrow())[node_idx.index as usize].clone(); - debug!("region inference error at {:?} for {:?}: SubSupConflict sub: {:?} \ - sup: {:?}", - origin, - node_idx, - lower_bound.region, - upper_bound.region); - errors.push(SubSupConflict(origin, - lower_bound.origin.clone(), - lower_bound.region, - upper_bound.origin.clone(), - upper_bound.region)); - return; - } - } - } - - self.tcx.sess.span_bug((*self.var_origins.borrow())[node_idx.index as usize].span(), - &format!("collect_error_for_expanding_node() could not find \ - error for var {:?}, lower_bounds={:?}, \ - upper_bounds={:?}", - node_idx, - lower_bounds, - upper_bounds)); - } - - fn collect_concrete_regions(&self, - graph: &RegionGraph, - orig_node_idx: RegionVid, - dir: Direction, - dup_vec: &mut [u32]) - -> (Vec>, bool) { - struct WalkState<'tcx> { - set: FnvHashSet, - stack: Vec, - result: Vec>, - dup_found: bool, - } - let mut state = WalkState { - set: FnvHashSet(), - stack: vec![orig_node_idx], - result: Vec::new(), - dup_found: false, - }; - state.set.insert(orig_node_idx); - - // to start off the process, walk the source node in the - // direction specified - process_edges(self, &mut state, graph, orig_node_idx, dir); - - while !state.stack.is_empty() { - let node_idx = state.stack.pop().unwrap(); - - // check whether we've visited this node on some previous walk - if dup_vec[node_idx.index as usize] == u32::MAX { - dup_vec[node_idx.index as usize] = orig_node_idx.index; - } else if dup_vec[node_idx.index as usize] != orig_node_idx.index { - state.dup_found = true; - } - - debug!("collect_concrete_regions(orig_node_idx={:?}, node_idx={:?})", - orig_node_idx, - node_idx); - - // figure out the direction from which this node takes its - // values, and search for concrete regions etc in that direction - let dir = graph::INCOMING; - process_edges(self, &mut state, graph, node_idx, dir); - } - - let WalkState {result, dup_found, ..} = state; - return (result, dup_found); - - fn process_edges<'a, 'tcx>(this: &RegionVarBindings<'a, 'tcx>, - state: &mut WalkState<'tcx>, - graph: &RegionGraph, - source_vid: RegionVid, - dir: Direction) { - debug!("process_edges(source_vid={:?}, dir={:?})", source_vid, dir); - - let source_node_index = NodeIndex(source_vid.index as usize); - for (_, edge) in graph.adjacent_edges(source_node_index, dir) { - match edge.data { - ConstrainVarSubVar(from_vid, to_vid) => { - let opp_vid = if from_vid == source_vid { - to_vid - } else { - from_vid - }; - if state.set.insert(opp_vid) { - state.stack.push(opp_vid); - } - } - - ConstrainRegSubVar(region, _) | - ConstrainVarSubReg(_, region) => { - state.result.push(RegionAndOrigin { - region: region, - origin: this.constraints.borrow().get(&edge.data).unwrap().clone(), - }); - } - } - } - } - } - - fn iterate_until_fixed_point(&self, tag: &str, mut body: F) - where F: FnMut(&Constraint) -> bool - { - let mut iteration = 0; - let mut changed = true; - while changed { - changed = false; - iteration += 1; - debug!("---- {} Iteration {}{}", "#", tag, iteration); - for (constraint, _) in self.constraints.borrow().iter() { - let edge_changed = body(constraint); - if edge_changed { - debug!("Updated due to constraint {:?}", constraint); - changed = true; - } - } - } - debug!("---- {} Complete after {} iteration(s)", tag, iteration); - } - -} - -impl<'tcx> fmt::Debug for Verify<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - VerifyRegSubReg(_, ref a, ref b) => { - write!(f, "VerifyRegSubReg({:?}, {:?})", a, b) - } - VerifyGenericBound(_, ref p, ref a, ref bs) => { - write!(f, "VerifyGenericBound({:?}, {:?}, {:?})", p, a, bs) - } - } - } -} - -fn normalize(values: &Vec, r: ty::Region) -> ty::Region { - match r { - ty::ReVar(rid) => lookup(values, rid), - _ => r, - } -} - -fn lookup(values: &Vec, rid: ty::RegionVid) -> ty::Region { - match values[rid.index as usize] { - Value(r) => r, - ErrorValue => ReStatic, // Previously reported error. - } -} - -impl<'tcx> fmt::Debug for RegionAndOrigin<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "RegionAndOrigin({:?},{:?})", self.region, self.origin) - } -} - -impl fmt::Debug for RegionSnapshot { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "RegionSnapshot(length={},skolemization={})", - self.length, self.skolemization_count) - } -} - -impl<'tcx> fmt::Debug for GenericKind<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - GenericKind::Param(ref p) => write!(f, "{:?}", p), - GenericKind::Projection(ref p) => write!(f, "{:?}", p), - } - } -} - -impl<'tcx> fmt::Display for GenericKind<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - GenericKind::Param(ref p) => write!(f, "{}", p), - GenericKind::Projection(ref p) => write!(f, "{}", p), - } - } -} - -impl<'tcx> GenericKind<'tcx> { - pub fn to_ty(&self, tcx: &ty::ctxt<'tcx>) -> Ty<'tcx> { - match *self { - GenericKind::Param(ref p) => p.to_ty(tcx), - GenericKind::Projection(ref p) => tcx.mk_projection(p.trait_ref.clone(), p.item_name), - } - } -} - -impl VerifyBound { - fn for_each_region(&self, f: &mut FnMut(ty::Region)) { - match self { - &VerifyBound::AnyRegion(ref rs) | - &VerifyBound::AllRegions(ref rs) => for &r in rs { - f(r); - }, - - &VerifyBound::AnyBound(ref bs) | - &VerifyBound::AllBounds(ref bs) => for b in bs { - b.for_each_region(f); - }, - } - } - - pub fn must_hold(&self) -> bool { - match self { - &VerifyBound::AnyRegion(ref bs) => bs.contains(&ty::ReStatic), - &VerifyBound::AllRegions(ref bs) => bs.is_empty(), - &VerifyBound::AnyBound(ref bs) => bs.iter().any(|b| b.must_hold()), - &VerifyBound::AllBounds(ref bs) => bs.iter().all(|b| b.must_hold()), - } - } - - pub fn cannot_hold(&self) -> bool { - match self { - &VerifyBound::AnyRegion(ref bs) => bs.is_empty(), - &VerifyBound::AllRegions(ref bs) => bs.contains(&ty::ReEmpty), - &VerifyBound::AnyBound(ref bs) => bs.iter().all(|b| b.cannot_hold()), - &VerifyBound::AllBounds(ref bs) => bs.iter().any(|b| b.cannot_hold()), - } - } - - pub fn or(self, vb: VerifyBound) -> VerifyBound { - if self.must_hold() || vb.cannot_hold() { - self - } else if self.cannot_hold() || vb.must_hold() { - vb - } else { - VerifyBound::AnyBound(vec![self, vb]) - } - } - - pub fn and(self, vb: VerifyBound) -> VerifyBound { - if self.must_hold() && vb.must_hold() { - self - } else if self.cannot_hold() && vb.cannot_hold() { - self - } else { - VerifyBound::AllBounds(vec![self, vb]) - } - } - - fn is_met<'tcx>(&self, - tcx: &ty::ctxt<'tcx>, - free_regions: &FreeRegionMap, - var_values: &Vec, - min: ty::Region) - -> bool { - match self { - &VerifyBound::AnyRegion(ref rs) => - rs.iter() - .map(|&r| normalize(var_values, r)) - .any(|r| free_regions.is_subregion_of(tcx, min, r)), - - &VerifyBound::AllRegions(ref rs) => - rs.iter() - .map(|&r| normalize(var_values, r)) - .all(|r| free_regions.is_subregion_of(tcx, min, r)), - - &VerifyBound::AnyBound(ref bs) => - bs.iter() - .any(|b| b.is_met(tcx, free_regions, var_values, min)), - - &VerifyBound::AllBounds(ref bs) => - bs.iter() - .all(|b| b.is_met(tcx, free_regions, var_values, min)), - } - } -} diff --git a/src/librustc/middle/infer/resolve.rs b/src/librustc/middle/infer/resolve.rs deleted file mode 100644 index c68d0a9fa5683..0000000000000 --- a/src/librustc/middle/infer/resolve.rs +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::{InferCtxt, FixupError, FixupResult}; -use middle::ty::{self, Ty, TypeFoldable}; - -/////////////////////////////////////////////////////////////////////////// -// OPPORTUNISTIC TYPE RESOLVER - -/// The opportunistic type resolver can be used at any time. It simply replaces -/// type variables that have been unified with the things they have -/// been unified with (similar to `shallow_resolve`, but deep). This is -/// useful for printing messages etc but also required at various -/// points for correctness. -pub struct OpportunisticTypeResolver<'a, 'tcx:'a> { - infcx: &'a InferCtxt<'a, 'tcx>, -} - -impl<'a, 'tcx> OpportunisticTypeResolver<'a, 'tcx> { - pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> OpportunisticTypeResolver<'a, 'tcx> { - OpportunisticTypeResolver { infcx: infcx } - } -} - -impl<'a, 'tcx> ty::fold::TypeFolder<'tcx> for OpportunisticTypeResolver<'a, 'tcx> { - fn tcx(&self) -> &ty::ctxt<'tcx> { - self.infcx.tcx - } - - fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { - if !t.has_infer_types() { - t // micro-optimize -- if there is nothing in this type that this fold affects... - } else { - let t0 = self.infcx.shallow_resolve(t); - t0.super_fold_with(self) - } - } -} - -/// The opportunistic type and region resolver is similar to the -/// opportunistic type resolver, but also opportunistly resolves -/// regions. It is useful for canonicalization. -pub struct OpportunisticTypeAndRegionResolver<'a, 'tcx:'a> { - infcx: &'a InferCtxt<'a, 'tcx>, -} - -impl<'a, 'tcx> OpportunisticTypeAndRegionResolver<'a, 'tcx> { - pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> Self { - OpportunisticTypeAndRegionResolver { infcx: infcx } - } -} - -impl<'a, 'tcx> ty::fold::TypeFolder<'tcx> for OpportunisticTypeAndRegionResolver<'a, 'tcx> { - fn tcx(&self) -> &ty::ctxt<'tcx> { - self.infcx.tcx - } - - fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { - if !t.needs_infer() { - t // micro-optimize -- if there is nothing in this type that this fold affects... - } else { - let t0 = self.infcx.shallow_resolve(t); - t0.super_fold_with(self) - } - } - - fn fold_region(&mut self, r: ty::Region) -> ty::Region { - match r { - ty::ReVar(rid) => self.infcx.region_vars.opportunistic_resolve_var(rid), - _ => r, - } - } -} - -/////////////////////////////////////////////////////////////////////////// -// FULL TYPE RESOLUTION - -/// Full type resolution replaces all type and region variables with -/// their concrete results. If any variable cannot be replaced (never unified, etc) -/// then an `Err` result is returned. -pub fn fully_resolve<'a, 'tcx, T>(infcx: &InferCtxt<'a,'tcx>, value: &T) -> FixupResult - where T : TypeFoldable<'tcx> -{ - let mut full_resolver = FullTypeResolver { infcx: infcx, err: None }; - let result = value.fold_with(&mut full_resolver); - match full_resolver.err { - None => Ok(result), - Some(e) => Err(e), - } -} - -// N.B. This type is not public because the protocol around checking the -// `err` field is not enforcable otherwise. -struct FullTypeResolver<'a, 'tcx:'a> { - infcx: &'a InferCtxt<'a, 'tcx>, - err: Option, -} - -impl<'a, 'tcx> ty::fold::TypeFolder<'tcx> for FullTypeResolver<'a, 'tcx> { - fn tcx(&self) -> &ty::ctxt<'tcx> { - self.infcx.tcx - } - - fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { - if !t.needs_infer() { - t // micro-optimize -- if there is nothing in this type that this fold affects... - } else { - let t = self.infcx.shallow_resolve(t); - match t.sty { - ty::TyInfer(ty::TyVar(vid)) => { - self.err = Some(FixupError::UnresolvedTy(vid)); - self.tcx().types.err - } - ty::TyInfer(ty::IntVar(vid)) => { - self.err = Some(FixupError::UnresolvedIntTy(vid)); - self.tcx().types.err - } - ty::TyInfer(ty::FloatVar(vid)) => { - self.err = Some(FixupError::UnresolvedFloatTy(vid)); - self.tcx().types.err - } - ty::TyInfer(_) => { - self.infcx.tcx.sess.bug( - &format!("Unexpected type in full type resolver: {:?}", - t)); - } - _ => { - t.super_fold_with(self) - } - } - } - } - - fn fold_region(&mut self, r: ty::Region) -> ty::Region { - match r { - ty::ReVar(rid) => self.infcx.region_vars.resolve_var(rid), - _ => r, - } - } -} diff --git a/src/librustc/middle/infer/sub.rs b/src/librustc/middle/infer/sub.rs deleted file mode 100644 index 2cd686fde156e..0000000000000 --- a/src/librustc/middle/infer/sub.rs +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::combine::{self, CombineFields}; -use super::higher_ranked::HigherRankedRelations; -use super::SubregionOrigin; -use super::type_variable::{SubtypeOf, SupertypeOf}; - -use middle::ty::{self, Ty}; -use middle::ty::TyVar; -use middle::ty::relate::{Cause, Relate, RelateResult, TypeRelation}; -use std::mem; - -/// Ensures `a` is made a subtype of `b`. Returns `a` on success. -pub struct Sub<'a, 'tcx: 'a> { - fields: CombineFields<'a, 'tcx>, -} - -impl<'a, 'tcx> Sub<'a, 'tcx> { - pub fn new(f: CombineFields<'a, 'tcx>) -> Sub<'a, 'tcx> { - Sub { fields: f } - } -} - -impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Sub<'a, 'tcx> { - fn tag(&self) -> &'static str { "Sub" } - fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.fields.infcx.tcx } - fn a_is_expected(&self) -> bool { self.fields.a_is_expected } - - fn with_cause(&mut self, cause: Cause, f: F) -> R - where F: FnOnce(&mut Self) -> R - { - debug!("sub with_cause={:?}", cause); - let old_cause = mem::replace(&mut self.fields.cause, Some(cause)); - let r = f(self); - debug!("sub old_cause={:?}", old_cause); - self.fields.cause = old_cause; - r - } - - fn relate_with_variance>(&mut self, - variance: ty::Variance, - a: &T, - b: &T) - -> RelateResult<'tcx, T> - { - match variance { - ty::Invariant => self.fields.equate().relate(a, b), - ty::Covariant => self.relate(a, b), - ty::Bivariant => self.fields.bivariate().relate(a, b), - ty::Contravariant => self.fields.switch_expected().sub().relate(b, a), - } - } - - fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { - debug!("{}.tys({:?}, {:?})", self.tag(), a, b); - - if a == b { return Ok(a); } - - let infcx = self.fields.infcx; - let a = infcx.type_variables.borrow().replace_if_possible(a); - let b = infcx.type_variables.borrow().replace_if_possible(b); - match (&a.sty, &b.sty) { - (&ty::TyInfer(TyVar(a_id)), &ty::TyInfer(TyVar(b_id))) => { - infcx.type_variables - .borrow_mut() - .relate_vars(a_id, SubtypeOf, b_id); - Ok(a) - } - (&ty::TyInfer(TyVar(a_id)), _) => { - try!(self.fields - .switch_expected() - .instantiate(b, SupertypeOf, a_id)); - Ok(a) - } - (_, &ty::TyInfer(TyVar(b_id))) => { - try!(self.fields.instantiate(a, SubtypeOf, b_id)); - Ok(a) - } - - (&ty::TyError, _) | (_, &ty::TyError) => { - Ok(self.tcx().types.err) - } - - _ => { - try!(combine::super_combine_tys(self.fields.infcx, self, a, b)); - Ok(a) - } - } - } - - fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> { - debug!("{}.regions({:?}, {:?}) self.cause={:?}", - self.tag(), a, b, self.fields.cause); - // FIXME -- we have more fine-grained information available - // from the "cause" field, we could perhaps give more tailored - // error messages. - let origin = SubregionOrigin::Subtype(self.fields.trace.clone()); - self.fields.infcx.region_vars.make_subregion(origin, a, b); - Ok(a) - } - - fn binders(&mut self, a: &ty::Binder, b: &ty::Binder) - -> RelateResult<'tcx, ty::Binder> - where T: Relate<'a,'tcx> - { - self.fields.higher_ranked_sub(a, b) - } -} diff --git a/src/librustc/middle/infer/type_variable.rs b/src/librustc/middle/infer/type_variable.rs deleted file mode 100644 index e4af098c2a42d..0000000000000 --- a/src/librustc/middle/infer/type_variable.rs +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub use self::RelationDir::*; -use self::TypeVariableValue::*; -use self::UndoEntry::*; -use middle::def_id::{DefId}; -use middle::ty::{self, Ty}; -use syntax::codemap::Span; - -use std::cmp::min; -use std::marker::PhantomData; -use std::mem; -use std::u32; -use rustc_data_structures::snapshot_vec as sv; - -pub struct TypeVariableTable<'tcx> { - values: sv::SnapshotVec>, -} - -struct TypeVariableData<'tcx> { - value: TypeVariableValue<'tcx>, - diverging: bool -} - -enum TypeVariableValue<'tcx> { - Known(Ty<'tcx>), - Bounded { - relations: Vec, - default: Option> - } -} - -// We will use this to store the required information to recapitulate what happened when -// an error occurs. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct Default<'tcx> { - pub ty: Ty<'tcx>, - /// The span where the default was incurred - pub origin_span: Span, - /// The definition that the default originates from - pub def_id: DefId -} - -pub struct Snapshot { - snapshot: sv::Snapshot -} - -enum UndoEntry<'tcx> { - // The type of the var was specified. - SpecifyVar(ty::TyVid, Vec, Option>), - Relate(ty::TyVid, ty::TyVid), -} - -struct Delegate<'tcx>(PhantomData<&'tcx ()>); - -type Relation = (RelationDir, ty::TyVid); - -#[derive(Copy, Clone, PartialEq, Debug)] -pub enum RelationDir { - SubtypeOf, SupertypeOf, EqTo, BiTo -} - -impl RelationDir { - fn opposite(self) -> RelationDir { - match self { - SubtypeOf => SupertypeOf, - SupertypeOf => SubtypeOf, - EqTo => EqTo, - BiTo => BiTo, - } - } -} - -impl<'tcx> TypeVariableTable<'tcx> { - pub fn new() -> TypeVariableTable<'tcx> { - TypeVariableTable { values: sv::SnapshotVec::new() } - } - - fn relations<'a>(&'a mut self, a: ty::TyVid) -> &'a mut Vec { - relations(self.values.get_mut(a.index as usize)) - } - - pub fn default(&self, vid: ty::TyVid) -> Option> { - match &self.values.get(vid.index as usize).value { - &Known(_) => None, - &Bounded { ref default, .. } => default.clone() - } - } - - pub fn var_diverges<'a>(&'a self, vid: ty::TyVid) -> bool { - self.values.get(vid.index as usize).diverging - } - - /// Records that `a <: b`, `a :> b`, or `a == b`, depending on `dir`. - /// - /// Precondition: neither `a` nor `b` are known. - pub fn relate_vars(&mut self, a: ty::TyVid, dir: RelationDir, b: ty::TyVid) { - if a != b { - self.relations(a).push((dir, b)); - self.relations(b).push((dir.opposite(), a)); - self.values.record(Relate(a, b)); - } - } - - /// Instantiates `vid` with the type `ty` and then pushes an entry onto `stack` for each of the - /// relations of `vid` to other variables. The relations will have the form `(ty, dir, vid1)` - /// where `vid1` is some other variable id. - pub fn instantiate_and_push( - &mut self, - vid: ty::TyVid, - ty: Ty<'tcx>, - stack: &mut Vec<(Ty<'tcx>, RelationDir, ty::TyVid)>) - { - let old_value = { - let value_ptr = &mut self.values.get_mut(vid.index as usize).value; - mem::replace(value_ptr, Known(ty)) - }; - - let (relations, default) = match old_value { - Bounded { relations, default } => (relations, default), - Known(_) => panic!("Asked to instantiate variable that is \ - already instantiated") - }; - - for &(dir, vid) in &relations { - stack.push((ty, dir, vid)); - } - - self.values.record(SpecifyVar(vid, relations, default)); - } - - pub fn new_var(&mut self, - diverging: bool, - default: Option>) -> ty::TyVid { - let index = self.values.push(TypeVariableData { - value: Bounded { relations: vec![], default: default }, - diverging: diverging - }); - ty::TyVid { index: index as u32 } - } - - pub fn probe(&self, vid: ty::TyVid) -> Option> { - match self.values.get(vid.index as usize).value { - Bounded { .. } => None, - Known(t) => Some(t) - } - } - - pub fn replace_if_possible(&self, t: Ty<'tcx>) -> Ty<'tcx> { - match t.sty { - ty::TyInfer(ty::TyVar(v)) => { - match self.probe(v) { - None => t, - Some(u) => u - } - } - _ => t, - } - } - - pub fn snapshot(&mut self) -> Snapshot { - Snapshot { snapshot: self.values.start_snapshot() } - } - - pub fn rollback_to(&mut self, s: Snapshot) { - self.values.rollback_to(s.snapshot); - } - - pub fn commit(&mut self, s: Snapshot) { - self.values.commit(s.snapshot); - } - - pub fn types_escaping_snapshot(&self, s: &Snapshot) -> Vec> { - /*! - * Find the set of type variables that existed *before* `s` - * but which have only been unified since `s` started, and - * return the types with which they were unified. So if we had - * a type variable `V0`, then we started the snapshot, then we - * created a type variable `V1`, unifed `V0` with `T0`, and - * unified `V1` with `T1`, this function would return `{T0}`. - */ - - let mut new_elem_threshold = u32::MAX; - let mut escaping_types = Vec::new(); - let actions_since_snapshot = self.values.actions_since_snapshot(&s.snapshot); - debug!("actions_since_snapshot.len() = {}", actions_since_snapshot.len()); - for action in actions_since_snapshot { - match *action { - sv::UndoLog::NewElem(index) => { - // if any new variables were created during the - // snapshot, remember the lower index (which will - // always be the first one we see). Note that this - // action must precede those variables being - // specified. - new_elem_threshold = min(new_elem_threshold, index as u32); - debug!("NewElem({}) new_elem_threshold={}", index, new_elem_threshold); - } - - sv::UndoLog::Other(SpecifyVar(vid, _, _)) => { - if vid.index < new_elem_threshold { - // quick check to see if this variable was - // created since the snapshot started or not. - let escaping_type = self.probe(vid).unwrap(); - escaping_types.push(escaping_type); - } - debug!("SpecifyVar({:?}) new_elem_threshold={}", vid, new_elem_threshold); - } - - _ => { } - } - } - - escaping_types - } - - pub fn unsolved_variables(&self) -> Vec { - self.values - .iter() - .enumerate() - .filter_map(|(i, value)| match &value.value { - &TypeVariableValue::Known(_) => None, - &TypeVariableValue::Bounded { .. } => Some(ty::TyVid { index: i as u32 }) - }) - .collect() - } -} - -impl<'tcx> sv::SnapshotVecDelegate for Delegate<'tcx> { - type Value = TypeVariableData<'tcx>; - type Undo = UndoEntry<'tcx>; - - fn reverse(values: &mut Vec>, action: UndoEntry<'tcx>) { - match action { - SpecifyVar(vid, relations, default) => { - values[vid.index as usize].value = Bounded { - relations: relations, - default: default - }; - } - - Relate(a, b) => { - relations(&mut (*values)[a.index as usize]).pop(); - relations(&mut (*values)[b.index as usize]).pop(); - } - } - } -} - -fn relations<'a>(v: &'a mut TypeVariableData) -> &'a mut Vec { - match v.value { - Known(_) => panic!("var_sub_var: variable is known"), - Bounded { ref mut relations, .. } => relations - } -} diff --git a/src/librustc/middle/intrinsicck.rs b/src/librustc/middle/intrinsicck.rs index 69b952ca1f3fc..6896c69d7db92 100644 --- a/src/librustc/middle/intrinsicck.rs +++ b/src/librustc/middle/intrinsicck.rs @@ -9,263 +9,187 @@ // except according to those terms. use dep_graph::DepNode; -use middle::def::DefFn; -use middle::def_id::DefId; -use middle::subst::{Subst, Substs, EnumeratedItems}; -use middle::ty::{TransmuteRestriction, ctxt, TyBareFn}; -use middle::ty::{self, Ty, TypeFoldable}; - -use std::fmt; - -use syntax::abi::RustIntrinsic; +use hir::def::Def; +use hir::def_id::DefId; +use infer::InferCtxt; +use traits::Reveal; +use ty::{self, Ty, TyCtxt}; +use ty::layout::{LayoutError, Pointer, SizeSkeleton}; + +use syntax::abi::Abi::RustIntrinsic; use syntax::ast; -use syntax::codemap::Span; -use rustc_front::intravisit::{self, Visitor, FnKind}; -use rustc_front::hir; +use syntax_pos::Span; +use hir::intravisit::{self, Visitor, FnKind, NestedVisitorMap}; +use hir; -pub fn check_crate(tcx: &ctxt) { - let mut visitor = IntrinsicCheckingVisitor { - tcx: tcx, - param_envs: Vec::new(), - dummy_sized_ty: tcx.types.isize, - dummy_unsized_ty: tcx.mk_slice(tcx.types.isize), +pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { + let mut visitor = ItemVisitor { + tcx: tcx }; - tcx.visit_all_items_in_krate(DepNode::IntrinsicCheck, &mut visitor); + tcx.visit_all_item_likes_in_krate(DepNode::IntrinsicCheck, &mut visitor.as_deep_visitor()); } -struct IntrinsicCheckingVisitor<'a, 'tcx: 'a> { - tcx: &'a ctxt<'tcx>, +struct ItemVisitor<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx> +} - // As we traverse the AST, we keep a stack of the parameter - // environments for each function we encounter. When we find a - // call to `transmute`, we can check it in the context of the top - // of the stack (which ought not to be empty). - param_envs: Vec>, +impl<'a, 'tcx> ItemVisitor<'a, 'tcx> { + fn visit_const(&mut self, item_id: ast::NodeId, expr: &'tcx hir::Expr) { + let param_env = ty::ParameterEnvironment::for_item(self.tcx, item_id); + self.tcx.infer_ctxt(None, Some(param_env), Reveal::All).enter(|infcx| { + let mut visitor = ExprVisitor { + infcx: &infcx + }; + visitor.visit_expr(expr); + }); + } +} - // Dummy sized/unsized types that use to substitute for type - // parameters in order to estimate how big a type will be for any - // possible instantiation of the type parameters in scope. See - // `check_transmute` for more details. - dummy_sized_ty: Ty<'tcx>, - dummy_unsized_ty: Ty<'tcx>, +struct ExprVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + infcx: &'a InferCtxt<'a, 'gcx, 'tcx> } -impl<'a, 'tcx> IntrinsicCheckingVisitor<'a, 'tcx> { +impl<'a, 'gcx, 'tcx> ExprVisitor<'a, 'gcx, 'tcx> { fn def_id_is_transmute(&self, def_id: DefId) -> bool { - let intrinsic = match self.tcx.lookup_item_type(def_id).ty.sty { - ty::TyBareFn(_, ref bfty) => bfty.abi == RustIntrinsic, + let intrinsic = match self.infcx.tcx.item_type(def_id).sty { + ty::TyFnDef(.., ref bfty) => bfty.abi == RustIntrinsic, _ => return false }; - intrinsic && self.tcx.item_name(def_id).as_str() == "transmute" + intrinsic && self.infcx.tcx.item_name(def_id) == "transmute" } - fn check_transmute(&self, span: Span, from: Ty<'tcx>, to: Ty<'tcx>, id: ast::NodeId) { - // Find the parameter environment for the most recent function that - // we entered. + fn check_transmute(&self, span: Span, from: Ty<'gcx>, to: Ty<'gcx>, id: ast::NodeId) { + let sk_from = SizeSkeleton::compute(from, self.infcx); + let sk_to = SizeSkeleton::compute(to, self.infcx); - let param_env = match self.param_envs.last() { - Some(p) => p, - None => { - self.tcx.sess.span_bug( - span, - "transmute encountered outside of any fn"); + // Check for same size using the skeletons. + if let (Ok(sk_from), Ok(sk_to)) = (sk_from, sk_to) { + if sk_from.same_size(sk_to) { + return; } - }; - // Simple case: no type parameters involved. - if - !from.has_param_types() && !from.has_self_ty() && - !to.has_param_types() && !to.has_self_ty() - { - let restriction = TransmuteRestriction { - span: span, - original_from: from, - original_to: to, - substituted_from: from, - substituted_to: to, - id: id, - }; - self.push_transmute_restriction(restriction); - return; + match (&from.sty, sk_to) { + (&ty::TyFnDef(..), SizeSkeleton::Known(size_to)) + if size_to == Pointer.size(&self.infcx.tcx.data_layout) => { + // FIXME #19925 Remove this warning after a release cycle. + let msg = format!("`{}` is now zero-sized and has to be cast \ + to a pointer before transmuting to `{}`", + from, to); + self.infcx.tcx.sess.add_lint( + ::lint::builtin::TRANSMUTE_FROM_FN_ITEM_TYPES, id, span, msg); + return; + } + _ => {} + } } - // The rules around type parameters are a bit subtle. We are - // checking these rules before monomorphization, so there may - // be unsubstituted type parameters present in the - // types. Obviously we cannot create LLVM types for those. - // However, if a type parameter appears only indirectly (i.e., - // through a pointer), it does not necessarily affect the - // size, so that should be allowed. The only catch is that we - // DO want to be careful around unsized type parameters, since - // fat pointers have a different size than a thin pointer, and - // hence `&T` and `&U` have different sizes if `T : Sized` but - // `U : Sized` does not hold. - // - // However, it's not as simple as checking whether `T : - // Sized`, because even if `T : Sized` does not hold, that - // just means that `T` *may* not be sized. After all, even a - // type parameter `T: ?Sized` could be bound to a sized - // type. (Issue #20116) - // - // To handle this, we first check for "interior" type - // parameters, which are always illegal. If there are none of - // those, then we know that the only way that all type - // parameters `T` are referenced indirectly, e.g. via a - // pointer type like `&T`. In that case, we only care whether - // `T` is sized or not, because that influences whether `&T` - // is a thin or fat pointer. - // - // One could imagine establishing a sophisticated constraint - // system to ensure that the transmute is legal, but instead - // we do something brutally dumb. We just substitute dummy - // sized or unsized types for every type parameter in scope, - // exhaustively checking all possible combinations. Here are some examples: - // - // ``` - // fn foo() { - // // T=int, U=int - // } - // - // fn bar() { - // // T=int, U=int - // // T=[int], U=int - // } - // - // fn baz() { - // // T=int, U=int - // // T=[int], U=int - // // T=int, U=[int] - // // T=[int], U=[int] - // } - // ``` - // - // In all cases, we keep the original unsubstituted types - // around for error reporting. - - let from_tc = from.type_contents(self.tcx); - let to_tc = to.type_contents(self.tcx); - if from_tc.interior_param() || to_tc.interior_param() { - span_err!(self.tcx.sess, span, E0139, - "cannot transmute to or from a type that contains \ - unsubstituted type parameters"); - return; - } + // Try to display a sensible error with as much information as possible. + let skeleton_string = |ty: Ty<'gcx>, sk| { + match sk { + Ok(SizeSkeleton::Known(size)) => { + format!("{} bits", size.bits()) + } + Ok(SizeSkeleton::Pointer { tail, .. }) => { + format!("pointer to {}", tail) + } + Err(LayoutError::Unknown(bad)) => { + if bad == ty { + format!("size can vary") + } else { + format!("size can vary because of {}", bad) + } + } + Err(err) => err.to_string() + } + }; - let mut substs = param_env.free_substs.clone(); - self.with_each_combination( - span, - param_env, - param_env.free_substs.types.iter_enumerated(), - &mut substs, - &mut |substs| { - let restriction = TransmuteRestriction { - span: span, - original_from: from, - original_to: to, - substituted_from: from.subst(self.tcx, substs), - substituted_to: to.subst(self.tcx, substs), - id: id, - }; - self.push_transmute_restriction(restriction); - }); + struct_span_err!(self.infcx.tcx.sess, span, E0512, + "transmute called with differently sized types: \ + {} ({}) to {} ({})", + from, skeleton_string(from, sk_from), + to, skeleton_string(to, sk_to)) + .span_label(span, + &format!("transmuting between {} and {}", + skeleton_string(from, sk_from), + skeleton_string(to, sk_to))) + .emit(); } +} - fn with_each_combination(&self, - span: Span, - param_env: &ty::ParameterEnvironment<'a,'tcx>, - mut types_in_scope: EnumeratedItems>, - substs: &mut Substs<'tcx>, - callback: &mut FnMut(&Substs<'tcx>)) - { - // This parameter invokes `callback` many times with different - // substitutions that replace all the parameters in scope with - // either `int` or `[int]`, depending on whether the type - // parameter is known to be sized. See big comment above for - // an explanation of why this is a reasonable thing to do. - - match types_in_scope.next() { - None => { - debug!("with_each_combination(substs={:?})", - substs); - - callback(substs); - } - - Some((space, index, ¶m_ty)) => { - debug!("with_each_combination: space={:?}, index={}, param_ty={:?}", - space, index, param_ty); +impl<'a, 'tcx> Visitor<'tcx> for ItemVisitor<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.tcx.map) + } - if !param_ty.is_sized(param_env, span) { - debug!("with_each_combination: param_ty is not known to be sized"); + // const, static and N in [T; N]. + fn visit_expr(&mut self, expr: &'tcx hir::Expr) { + self.tcx.infer_ctxt(None, None, Reveal::All).enter(|infcx| { + let mut visitor = ExprVisitor { + infcx: &infcx + }; + visitor.visit_expr(expr); + }); + } - substs.types.get_mut_slice(space)[index] = self.dummy_unsized_ty; - self.with_each_combination(span, param_env, types_in_scope.clone(), - substs, callback); - } + fn visit_trait_item(&mut self, item: &'tcx hir::TraitItem) { + if let hir::ConstTraitItem(_, Some(ref expr)) = item.node { + self.visit_const(item.id, expr); + } else { + intravisit::walk_trait_item(self, item); + } + } - substs.types.get_mut_slice(space)[index] = self.dummy_sized_ty; - self.with_each_combination(span, param_env, types_in_scope, - substs, callback); - } + fn visit_impl_item(&mut self, item: &'tcx hir::ImplItem) { + if let hir::ImplItemKind::Const(_, ref expr) = item.node { + self.visit_const(item.id, expr); + } else { + intravisit::walk_impl_item(self, item); } } - fn push_transmute_restriction(&self, restriction: TransmuteRestriction<'tcx>) { - debug!("Pushing transmute restriction: {:?}", restriction); - self.tcx.transmute_restrictions.borrow_mut().push(restriction); + fn visit_fn(&mut self, fk: FnKind<'tcx>, fd: &'tcx hir::FnDecl, + b: hir::ExprId, s: Span, id: ast::NodeId) { + if let FnKind::Closure(..) = fk { + span_bug!(s, "intrinsicck: closure outside of function") + } + let param_env = ty::ParameterEnvironment::for_item(self.tcx, id); + self.tcx.infer_ctxt(None, Some(param_env), Reveal::All).enter(|infcx| { + let mut visitor = ExprVisitor { + infcx: &infcx + }; + visitor.visit_fn(fk, fd, b, s, id); + }); } } -impl<'a, 'tcx, 'v> Visitor<'v> for IntrinsicCheckingVisitor<'a, 'tcx> { - fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v hir::FnDecl, - b: &'v hir::Block, s: Span, id: ast::NodeId) { - match fk { - FnKind::ItemFn(..) | FnKind::Method(..) => { - let param_env = ty::ParameterEnvironment::for_item(self.tcx, id); - self.param_envs.push(param_env); - intravisit::walk_fn(self, fk, fd, b, s); - self.param_envs.pop(); - } - FnKind::Closure => { - intravisit::walk_fn(self, fk, fd, b, s); - } - } +impl<'a, 'gcx, 'tcx> Visitor<'gcx> for ExprVisitor<'a, 'gcx, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'gcx> { + NestedVisitorMap::OnlyBodies(&self.infcx.tcx.map) } - fn visit_expr(&mut self, expr: &hir::Expr) { - if let hir::ExprPath(..) = expr.node { - match self.tcx.resolve_expr(expr) { - DefFn(did, _) if self.def_id_is_transmute(did) => { - let typ = self.tcx.node_id_to_type(expr.id); - match typ.sty { - TyBareFn(_, ref bare_fn_ty) if bare_fn_ty.abi == RustIntrinsic => { - if let ty::FnConverging(to) = bare_fn_ty.sig.0.output { - let from = bare_fn_ty.sig.0.inputs[0]; - self.check_transmute(expr.span, from, to, expr.id); - } - } - _ => { - self.tcx - .sess - .span_bug(expr.span, "transmute wasn't a bare fn?!"); - } + fn visit_expr(&mut self, expr: &'gcx hir::Expr) { + let def = if let hir::ExprPath(ref qpath) = expr.node { + self.infcx.tcx.tables().qpath_def(qpath, expr.id) + } else { + Def::Err + }; + match def { + Def::Fn(did) if self.def_id_is_transmute(did) => { + let typ = self.infcx.tcx.tables().node_id_to_type(expr.id); + match typ.sty { + ty::TyFnDef(.., ref bare_fn_ty) if bare_fn_ty.abi == RustIntrinsic => { + let from = bare_fn_ty.sig.0.inputs[0]; + let to = bare_fn_ty.sig.0.output; + self.check_transmute(expr.span, from, to, expr.id); + } + _ => { + span_bug!(expr.span, "transmute wasn't a bare fn?!"); } } - _ => {} } + _ => {} } intravisit::walk_expr(self, expr); } } - -impl<'tcx> fmt::Debug for TransmuteRestriction<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "TransmuteRestriction(id={}, original=({:?},{:?}), substituted=({:?},{:?}))", - self.id, - self.original_from, - self.original_to, - self.substituted_from, - self.substituted_to) - } -} diff --git a/src/librustc/middle/lang_items.rs b/src/librustc/middle/lang_items.rs index ec55daca9ecdf..1efc211b8c35b 100644 --- a/src/librustc/middle/lang_items.rs +++ b/src/librustc/middle/lang_items.rs @@ -21,34 +21,29 @@ pub use self::LangItem::*; -use front::map as hir_map; +use dep_graph::DepNode; +use hir::map as hir_map; use session::Session; -use middle::cstore::CrateStore; -use middle::def_id::DefId; -use middle::ty; +use hir::def_id::DefId; +use ty; use middle::weak_lang_items; -use util::nodemap::FnvHashMap; +use util::nodemap::FxHashMap; use syntax::ast; -use syntax::attr::AttrMetaMethods; -use syntax::codemap::{DUMMY_SP, Span}; -use syntax::parse::token::InternedString; -use rustc_front::intravisit::Visitor; -use rustc_front::hir; - -use std::iter::Enumerate; -use std::slice; +use syntax::symbol::Symbol; +use hir::itemlikevisit::ItemLikeVisitor; +use hir; // The actual lang items defined come at the end of this file in one handy table. // So you probably just want to nip down to the end. -macro_rules! lets_do_this { +macro_rules! language_item_table { ( $( $variant:ident, $name:expr, $method:ident; )* ) => { enum_from_u32! { - #[derive(Copy, Clone, PartialEq, Eq, Hash)] + #[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub enum LangItem { $($variant,)* } @@ -64,13 +59,13 @@ impl LanguageItems { fn foo(_: LangItem) -> Option { None } LanguageItems { - items: vec!($(foo($variant)),*), + items: vec![$(foo($variant)),*], missing: Vec::new(), } } - pub fn items<'a>(&'a self) -> Enumerate>> { - self.items.iter().enumerate() + pub fn items(&self) -> &[Option] { + &*self.items } pub fn item_name(index: usize) -> &'static str { @@ -95,36 +90,11 @@ impl LanguageItems { self.require(OwnedBoxLangItem) } - pub fn from_builtin_kind(&self, bound: ty::BuiltinBound) - -> Result - { - match bound { - ty::BoundSend => self.require(SendTraitLangItem), - ty::BoundSized => self.require(SizedTraitLangItem), - ty::BoundCopy => self.require(CopyTraitLangItem), - ty::BoundSync => self.require(SyncTraitLangItem), - } - } - - pub fn to_builtin_kind(&self, id: DefId) -> Option { - if Some(id) == self.send_trait() { - Some(ty::BoundSend) - } else if Some(id) == self.sized_trait() { - Some(ty::BoundSized) - } else if Some(id) == self.copy_trait() { - Some(ty::BoundCopy) - } else if Some(id) == self.sync_trait() { - Some(ty::BoundSync) - } else { - None - } - } - pub fn fn_trait_kind(&self, id: DefId) -> Option { let def_id_kinds = [ - (self.fn_trait(), ty::FnClosureKind), - (self.fn_mut_trait(), ty::FnMutClosureKind), - (self.fn_once_trait(), ty::FnOnceClosureKind), + (self.fn_trait(), ty::ClosureKind::Fn), + (self.fn_mut_trait(), ty::ClosureKind::FnMut), + (self.fn_once_trait(), ty::ClosureKind::FnOnce), ]; for &(opt_def_id, kind) in &def_id_kinds { @@ -151,25 +121,34 @@ struct LanguageItemCollector<'a, 'tcx: 'a> { session: &'a Session, - item_refs: FnvHashMap<&'static str, usize>, + item_refs: FxHashMap<&'static str, usize>, } -impl<'a, 'v, 'tcx> Visitor<'v> for LanguageItemCollector<'a, 'tcx> { +impl<'a, 'v, 'tcx> ItemLikeVisitor<'v> for LanguageItemCollector<'a, 'tcx> { fn visit_item(&mut self, item: &hir::Item) { if let Some(value) = extract(&item.attrs) { - let item_index = self.item_refs.get(&value[..]).cloned(); + let item_index = self.item_refs.get(&*value.as_str()).cloned(); if let Some(item_index) = item_index { - self.collect_item(item_index, self.ast_map.local_def_id(item.id), item.span) + self.collect_item(item_index, self.ast_map.local_def_id(item.id)) + } else { + let span = self.ast_map.span(item.id); + span_err!(self.session, span, E0522, + "definition of an unknown language item: `{}`.", + value); } } } + + fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) { + // at present, lang items are always items, not impl items + } } impl<'a, 'tcx> LanguageItemCollector<'a, 'tcx> { pub fn new(session: &'a Session, ast_map: &'a hir_map::Map<'tcx>) -> LanguageItemCollector<'a, 'tcx> { - let mut item_refs = FnvHashMap(); + let mut item_refs = FxHashMap(); $( item_refs.insert($name, $variant as usize); )* @@ -182,14 +161,34 @@ impl<'a, 'tcx> LanguageItemCollector<'a, 'tcx> { } pub fn collect_item(&mut self, item_index: usize, - item_def_id: DefId, span: Span) { + item_def_id: DefId) { // Check for duplicates. match self.items.items[item_index] { Some(original_def_id) if original_def_id != item_def_id => { - span_err!(self.session, span, E0152, - "duplicate entry for `{}`", LanguageItems::item_name(item_index)); + let cstore = &self.session.cstore; + let name = LanguageItems::item_name(item_index); + let mut err = match self.ast_map.span_if_local(item_def_id) { + Some(span) => struct_span_err!( + self.session, + span, + E0152, + "duplicate lang item found: `{}`.", + name), + None => self.session.struct_err(&format!( + "duplicate lang item in crate `{}`: `{}`.", + cstore.crate_name(item_def_id.krate), + name)), + }; + if let Some(span) = self.ast_map.span_if_local(original_def_id) { + span_note!(&mut err, span, + "first defined here."); + } else { + err.note(&format!("first defined in crate `{}`.", + cstore.crate_name(original_def_id.krate))); + } + err.emit(); } - Some(_) | None => { + _ => { // OK. } } @@ -199,31 +198,30 @@ impl<'a, 'tcx> LanguageItemCollector<'a, 'tcx> { } pub fn collect_local_language_items(&mut self, krate: &hir::Crate) { - krate.visit_all_items(self); + krate.visit_all_item_likes(self); } pub fn collect_external_language_items(&mut self) { let cstore = &self.session.cstore; + for cnum in cstore.crates() { for (index, item_index) in cstore.lang_items(cnum) { let def_id = DefId { krate: cnum, index: index }; - self.collect_item(item_index, def_id, DUMMY_SP); + self.collect_item(item_index, def_id); } } } pub fn collect(&mut self, krate: &hir::Crate) { - self.collect_local_language_items(krate); self.collect_external_language_items(); + self.collect_local_language_items(krate); } } -pub fn extract(attrs: &[ast::Attribute]) -> Option { +pub fn extract(attrs: &[ast::Attribute]) -> Option { for attribute in attrs { match attribute.value_str() { - Some(ref value) if attribute.check_name("lang") => { - return Some(value.clone()); - } + Some(value) if attribute.check_name("lang") => return Some(value), _ => {} } } @@ -234,12 +232,12 @@ pub fn extract(attrs: &[ast::Attribute]) -> Option { pub fn collect_language_items(session: &Session, map: &hir_map::Map) -> LanguageItems { + let _task = map.dep_graph.in_task(DepNode::CollectLanguageItems); let krate: &hir::Crate = map.krate(); let mut collector = LanguageItemCollector::new(session, map); collector.collect(krate); let LanguageItemCollector { mut items, .. } = collector; weak_lang_items::check_crate(krate, session, &mut items); - session.abort_if_errors(); items } @@ -247,7 +245,7 @@ pub fn collect_language_items(session: &Session, } } -lets_do_this! { +language_item_table! { // Variant name, Name, Method name; CharImplItem, "char", char_impl; StrImplItem, "str", str_impl; @@ -301,10 +299,6 @@ lets_do_this! { ShrAssignTraitLangItem, "shr_assign", shr_assign_trait; IndexTraitLangItem, "index", index_trait; IndexMutTraitLangItem, "index_mut", index_mut_trait; - RangeStructLangItem, "range", range_struct; - RangeFromStructLangItem, "range_from", range_from_struct; - RangeToStructLangItem, "range_to", range_to_struct; - RangeFullStructLangItem, "range_full", range_full_struct; UnsafeCellTypeLangItem, "unsafe_cell", unsafe_cell_type; @@ -335,12 +329,12 @@ lets_do_this! { ExchangeMallocFnLangItem, "exchange_malloc", exchange_malloc_fn; ExchangeFreeFnLangItem, "exchange_free", exchange_free_fn; + BoxFreeFnLangItem, "box_free", box_free_fn; StrDupUniqFnLangItem, "strdup_uniq", strdup_uniq_fn; StartFnLangItem, "start", start_fn; EhPersonalityLangItem, "eh_personality", eh_personality; - EhPersonalityCatchLangItem, "eh_personality_catch", eh_personality_catch; EhUnwindResumeLangItem, "eh_unwind_resume", eh_unwind_resume; MSVCTryFilterLangItem, "msvc_try_filter", msvc_try_filter; @@ -362,3 +356,11 @@ lets_do_this! { DebugTraitLangItem, "debug_trait", debug_trait; } + +impl<'a, 'tcx, 'gcx> ty::TyCtxt<'a, 'tcx, 'gcx> { + pub fn require_lang_item(&self, lang_item: LangItem) -> DefId { + self.lang_items.require(lang_item).unwrap_or_else(|msg| { + self.sess.fatal(&msg) + }) + } +} diff --git a/src/librustc/middle/liveness.rs b/src/librustc/middle/liveness.rs index 29299f01ed36f..445aed8f97d60 100644 --- a/src/librustc/middle/liveness.rs +++ b/src/librustc/middle/liveness.rs @@ -109,9 +109,11 @@ use self::LoopKind::*; use self::LiveNodeKind::*; use self::VarKind::*; -use middle::def::*; -use middle::pat_util; -use middle::ty; +use dep_graph::DepNode; +use hir::def::*; +use ty::{self, TyCtxt, ParameterEnvironment}; +use traits::{self, Reveal}; +use ty::subst::Subst; use lint; use util::nodemap::NodeMap; @@ -120,14 +122,13 @@ use std::io::prelude::*; use std::io; use std::rc::Rc; use syntax::ast::{self, NodeId}; -use syntax::codemap::{BytePos, original_sp, Span}; -use syntax::parse::token::special_idents; -use syntax::ptr::P; +use syntax::symbol::keywords; +use syntax_pos::Span; -use rustc_front::hir::Expr; -use rustc_front::hir; -use rustc_front::print::pprust::{expr_to_string, block_to_string}; -use rustc_front::intravisit::{self, Visitor, FnKind}; +use hir::Expr; +use hir; +use hir::print::{expr_to_string, block_to_string}; +use hir::intravisit::{self, Visitor, FnKind, NestedVisitorMap}; /// For use with `propagate_through_loop`. enum LoopKind<'a> { @@ -165,8 +166,8 @@ enum LiveNodeKind { ExitNode } -fn live_node_kind_to_string(lnk: LiveNodeKind, cx: &ty::ctxt) -> String { - let cm = cx.sess.codemap(); +fn live_node_kind_to_string(lnk: LiveNodeKind, tcx: TyCtxt) -> String { + let cm = tcx.sess.codemap(); match lnk { FreeVarNode(s) => { format!("Free var node [{}]", cm.span_to_string(s)) @@ -181,18 +182,23 @@ fn live_node_kind_to_string(lnk: LiveNodeKind, cx: &ty::ctxt) -> String { } } -impl<'a, 'tcx, 'v> Visitor<'v> for IrMaps<'a, 'tcx> { - fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v hir::FnDecl, - b: &'v hir::Block, s: Span, id: NodeId) { +impl<'a, 'tcx> Visitor<'tcx> for IrMaps<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.tcx.map) + } + + fn visit_fn(&mut self, fk: FnKind<'tcx>, fd: &'tcx hir::FnDecl, + b: hir::ExprId, s: Span, id: NodeId) { visit_fn(self, fk, fd, b, s, id); } - fn visit_local(&mut self, l: &hir::Local) { visit_local(self, l); } - fn visit_expr(&mut self, ex: &Expr) { visit_expr(self, ex); } - fn visit_arm(&mut self, a: &hir::Arm) { visit_arm(self, a); } + fn visit_local(&mut self, l: &'tcx hir::Local) { visit_local(self, l); } + fn visit_expr(&mut self, ex: &'tcx Expr) { visit_expr(self, ex); } + fn visit_arm(&mut self, a: &'tcx hir::Arm) { visit_arm(self, a); } } -pub fn check_crate(tcx: &ty::ctxt) { - tcx.map.krate().visit_all_items(&mut IrMaps::new(tcx)); +pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { + let _task = tcx.dep_graph.in_task(DepNode::Liveness); + tcx.map.krate().visit_all_item_likes(&mut IrMaps::new(tcx).as_deep_visitor()); tcx.sess.abort_if_errors(); } @@ -258,7 +264,7 @@ enum VarKind { } struct IrMaps<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, num_live_nodes: usize, num_vars: usize, @@ -270,7 +276,7 @@ struct IrMaps<'a, 'tcx: 'a> { } impl<'a, 'tcx> IrMaps<'a, 'tcx> { - fn new(tcx: &'a ty::ctxt<'tcx>) -> IrMaps<'a, 'tcx> { + fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> IrMaps<'a, 'tcx> { IrMaps { tcx: tcx, num_live_nodes: 0, @@ -320,13 +326,10 @@ impl<'a, 'tcx> IrMaps<'a, 'tcx> { fn variable(&self, node_id: NodeId, span: Span) -> Variable { match self.variable_map.get(&node_id) { - Some(&var) => var, - None => { - self.tcx - .sess - .span_bug(span, &format!("no variable registered for id {}", - node_id)); - } + Some(&var) => var, + None => { + span_bug!(span, "no variable registered for id {}", node_id); + } } } @@ -349,28 +352,32 @@ impl<'a, 'tcx> IrMaps<'a, 'tcx> { } } -impl<'a, 'tcx, 'v> Visitor<'v> for Liveness<'a, 'tcx> { - fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v hir::FnDecl, - b: &'v hir::Block, s: Span, n: NodeId) { - check_fn(self, fk, fd, b, s, n); +impl<'a, 'tcx> Visitor<'tcx> for Liveness<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.ir.tcx.map) } - fn visit_local(&mut self, l: &hir::Local) { + + fn visit_fn(&mut self, _: FnKind<'tcx>, _: &'tcx hir::FnDecl, + _: hir::ExprId, _: Span, _: NodeId) { + // do not check contents of nested fns + } + fn visit_local(&mut self, l: &'tcx hir::Local) { check_local(self, l); } - fn visit_expr(&mut self, ex: &Expr) { + fn visit_expr(&mut self, ex: &'tcx Expr) { check_expr(self, ex); } - fn visit_arm(&mut self, a: &hir::Arm) { + fn visit_arm(&mut self, a: &'tcx hir::Arm) { check_arm(self, a); } } -fn visit_fn(ir: &mut IrMaps, - fk: FnKind, - decl: &hir::FnDecl, - body: &hir::Block, - sp: Span, - id: ast::NodeId) { +fn visit_fn<'a, 'tcx: 'a>(ir: &mut IrMaps<'a, 'tcx>, + fk: FnKind<'tcx>, + decl: &'tcx hir::FnDecl, + body_id: hir::ExprId, + sp: Span, + id: ast::NodeId) { debug!("visit_fn"); // swap in a new set of IR maps for this function body: @@ -379,9 +386,7 @@ fn visit_fn(ir: &mut IrMaps, debug!("creating fn_maps: {:?}", &fn_maps as *const IrMaps); for arg in &decl.inputs { - pat_util::pat_bindings(&ir.tcx.def_map, - &*arg.pat, - |_bm, arg_id, _x, path1| { + arg.pat.each_binding(|_bm, arg_id, _x, path1| { debug!("adding argument {}", arg_id); let name = path1.node; fn_maps.add_variable(Arg(arg_id, name)); @@ -390,7 +395,7 @@ fn visit_fn(ir: &mut IrMaps, // gather up the various local variables, significant expressions, // and so forth: - intravisit::walk_fn(&mut fn_maps, fk, decl, body, sp); + intravisit::walk_fn(&mut fn_maps, fk, decl, body_id, sp, id); // Special nodes and variables: // - exit_ln represents the end of the fn, either by return or panic @@ -403,18 +408,20 @@ fn visit_fn(ir: &mut IrMaps, clean_exit_var: fn_maps.add_variable(CleanExit) }; + let body = ir.tcx.map.expr(body_id); + // compute liveness let mut lsets = Liveness::new(&mut fn_maps, specials); - let entry_ln = lsets.compute(decl, body); + let entry_ln = lsets.compute(body); // check for various error conditions - lsets.visit_block(body); + lsets.visit_expr(body); lsets.check_ret(id, sp, fk, entry_ln, body); lsets.warn_about_unused_args(decl, entry_ln); } -fn visit_local(ir: &mut IrMaps, local: &hir::Local) { - pat_util::pat_bindings(&ir.tcx.def_map, &*local.pat, |_, p_id, sp, path1| { +fn visit_local<'a, 'tcx>(ir: &mut IrMaps<'a, 'tcx>, local: &'tcx hir::Local) { + local.pat.each_binding(|_, p_id, sp, path1| { debug!("adding local variable {}", p_id); let name = path1.node; ir.add_live_node_for_node(p_id, VarDefNode(sp)); @@ -426,9 +433,9 @@ fn visit_local(ir: &mut IrMaps, local: &hir::Local) { intravisit::walk_local(ir, local); } -fn visit_arm(ir: &mut IrMaps, arm: &hir::Arm) { +fn visit_arm<'a, 'tcx>(ir: &mut IrMaps<'a, 'tcx>, arm: &'tcx hir::Arm) { for pat in &arm.pats { - pat_util::pat_bindings(&ir.tcx.def_map, &**pat, |bm, p_id, sp, path1| { + pat.each_binding(|bm, p_id, sp, path1| { debug!("adding local variable {} from match with bm {:?}", p_id, bm); let name = path1.node; @@ -442,13 +449,12 @@ fn visit_arm(ir: &mut IrMaps, arm: &hir::Arm) { intravisit::walk_arm(ir, arm); } -fn visit_expr(ir: &mut IrMaps, expr: &Expr) { +fn visit_expr<'a, 'tcx>(ir: &mut IrMaps<'a, 'tcx>, expr: &'tcx Expr) { match expr.node { // live nodes required for uses or definitions of variables: - hir::ExprPath(..) => { - let def = ir.tcx.def_map.borrow().get(&expr.id).unwrap().full_def(); - debug!("expr {}: path that leads to {:?}", expr.id, def); - if let DefLocal(..) = def { + hir::ExprPath(hir::QPath::Resolved(_, ref path)) => { + debug!("expr {}: path that leads to {:?}", expr.id, path.def); + if let Def::Local(..) = path.def { ir.add_live_node_for_node(expr.id, ExprNode(expr.span)); } intravisit::walk_expr(ir, expr); @@ -465,7 +471,8 @@ fn visit_expr(ir: &mut IrMaps, expr: &Expr) { let mut call_caps = Vec::new(); ir.tcx.with_freevars(expr.id, |freevars| { for fv in freevars { - if let DefLocal(_, rv) = fv.def { + if let Def::Local(def_id) = fv.def { + let rv = ir.tcx.map.as_local_node_id(def_id).unwrap(); let fv_ln = ir.add_live_node(FreeVarNode(fv.span)); call_caps.push(CaptureInfo {ln: fv_ln, var_nid: rv}); @@ -482,21 +489,21 @@ fn visit_expr(ir: &mut IrMaps, expr: &Expr) { ir.add_live_node_for_node(expr.id, ExprNode(expr.span)); intravisit::walk_expr(ir, expr); } - hir::ExprBinary(op, _, _) if ::rustc_front::util::lazy_binop(op.node) => { + hir::ExprBinary(op, ..) if op.node.is_lazy() => { ir.add_live_node_for_node(expr.id, ExprNode(expr.span)); intravisit::walk_expr(ir, expr); } // otherwise, live nodes are not required: hir::ExprIndex(..) | hir::ExprField(..) | hir::ExprTupField(..) | - hir::ExprVec(..) | hir::ExprCall(..) | hir::ExprMethodCall(..) | + hir::ExprArray(..) | hir::ExprCall(..) | hir::ExprMethodCall(..) | hir::ExprTup(..) | hir::ExprBinary(..) | hir::ExprAddrOf(..) | - hir::ExprCast(..) | hir::ExprUnary(..) | hir::ExprBreak(_) | + hir::ExprCast(..) | hir::ExprUnary(..) | hir::ExprBreak(..) | hir::ExprAgain(_) | hir::ExprLit(_) | hir::ExprRet(..) | hir::ExprBlock(..) | hir::ExprAssign(..) | hir::ExprAssignOp(..) | hir::ExprStruct(..) | hir::ExprRepeat(..) | hir::ExprInlineAsm(..) | hir::ExprBox(..) | - hir::ExprRange(..) | hir::ExprType(..) => { + hir::ExprType(..) | hir::ExprPath(hir::QPath::TypeRelative(..)) => { intravisit::walk_expr(ir, expr); } } @@ -573,10 +580,10 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { // above and the propagation code below; the two sets of // code have to agree about which AST nodes are worth // creating liveness nodes for. - self.ir.tcx.sess.span_bug( + span_bug!( span, - &format!("no live node registered for node {}", - node_id)); + "no live node registered for node {}", + node_id); } } } @@ -588,7 +595,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { fn pat_bindings(&mut self, pat: &hir::Pat, mut f: F) where F: FnMut(&mut Liveness<'a, 'tcx>, LiveNode, Variable, Span, NodeId), { - pat_util::pat_bindings(&self.ir.tcx.def_map, pat, |_bm, p_id, sp, _n| { + pat.each_binding(|_bm, p_id, sp, _n| { let ln = self.live_node(p_id, sp); let var = self.variable(p_id, sp); f(self, ln, var, sp, p_id); @@ -598,11 +605,8 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { fn arm_pats_bindings(&mut self, pat: Option<&hir::Pat>, f: F) where F: FnMut(&mut Liveness<'a, 'tcx>, LiveNode, Variable, Span, NodeId), { - match pat { - Some(pat) => { - self.pat_bindings(pat, f); - } - None => {} + if let Some(pat) = pat { + self.pat_bindings(pat, f); } } @@ -681,32 +685,23 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { for var_idx in 0..self.ir.num_vars { let idx = node_base_idx + var_idx; if test(idx).is_valid() { - try!(write!(wr, " {:?}", Variable(var_idx))); + write!(wr, " {:?}", Variable(var_idx))?; } } Ok(()) } fn find_loop_scope(&self, - opt_label: Option, - id: NodeId, + opt_label: Option, sp: Span) -> NodeId { match opt_label { - Some(_) => { - // Refers to a labeled loop. Use the results of resolve - // to find with one - match self.ir.tcx.def_map.borrow().get(&id).map(|d| d.full_def()) { - Some(DefLabel(loop_id)) => loop_id, - _ => self.ir.tcx.sess.span_bug(sp, "label on break/loop \ - doesn't refer to a loop") - } - } + Some(label) => label.loop_id, None => { - // Vanilla 'break' or 'loop', so use the enclosing + // Vanilla 'break' or 'continue', so use the enclosing // loop scope if self.loop_scope.is_empty() { - self.ir.tcx.sess.span_bug(sp, "break outside loop"); + span_bug!(sp, "break outside loop"); } else { *self.loop_scope.last().unwrap() } @@ -823,17 +818,23 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { // _______________________________________________________________________ - fn compute(&mut self, decl: &hir::FnDecl, body: &hir::Block) -> LiveNode { + fn compute(&mut self, body: &hir::Expr) -> LiveNode { // if there is a `break` or `again` at the top level, then it's // effectively a return---this only occurs in `for` loops, // where the body is really a closure. - debug!("compute: using id for block, {}", block_to_string(body)); + debug!("compute: using id for body, {}", expr_to_string(body)); let exit_ln = self.s.exit_ln; - let entry_ln: LiveNode = - self.with_loop_nodes(body.id, exit_ln, exit_ln, - |this| this.propagate_through_fn_block(decl, body)); + let entry_ln: LiveNode = self.with_loop_nodes(body.id, exit_ln, exit_ln, |this| { + // the fallthrough exit is only for those cases where we do not + // explicitly return: + let s = this.s; + this.init_from_succ(s.fallthrough_ln, s.exit_ln); + this.acc(s.fallthrough_ln, s.clean_exit_var, ACC_READ); + + this.propagate_through_expr(body, s.fallthrough_ln) + }); // hack to skip the loop unless debug! is enabled: debug!("^^ liveness computation results for body {} (entry={:?})", @@ -848,20 +849,6 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { entry_ln } - fn propagate_through_fn_block(&mut self, _: &hir::FnDecl, blk: &hir::Block) - -> LiveNode { - // the fallthrough exit is only for those cases where we do not - // explicitly return: - let s = self.s; - self.init_from_succ(s.fallthrough_ln, s.exit_ln); - if blk.expr.is_none() { - self.acc(s.fallthrough_ln, s.no_ret_var, ACC_READ) - } - self.acc(s.fallthrough_ln, s.clean_exit_var, ACC_READ); - - self.propagate_through_block(blk, s.fallthrough_ln) - } - fn propagate_through_block(&mut self, blk: &hir::Block, succ: LiveNode) -> LiveNode { let succ = self.propagate_through_opt_expr(blk.expr.as_ref().map(|e| &**e), succ); @@ -874,11 +861,11 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { -> LiveNode { match stmt.node { hir::StmtDecl(ref decl, _) => { - self.propagate_through_decl(&**decl, succ) + self.propagate_through_decl(&decl, succ) } hir::StmtExpr(ref expr, _) | hir::StmtSemi(ref expr, _) => { - self.propagate_through_expr(&**expr, succ) + self.propagate_through_expr(&expr, succ) } } } @@ -887,7 +874,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { -> LiveNode { match decl.node { hir::DeclLocal(ref local) => { - self.propagate_through_local(&**local, succ) + self.propagate_through_local(&local, succ) } hir::DeclItem(_) => succ, } @@ -910,13 +897,13 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { // once at the func header but otherwise equivalent. let succ = self.propagate_through_opt_expr(local.init.as_ref().map(|e| &**e), succ); - self.define_bindings_in_pat(&*local.pat, succ) + self.define_bindings_in_pat(&local.pat, succ) } - fn propagate_through_exprs(&mut self, exprs: &[P], succ: LiveNode) + fn propagate_through_exprs(&mut self, exprs: &[Expr], succ: LiveNode) -> LiveNode { exprs.iter().rev().fold(succ, |succ, expr| { - self.propagate_through_expr(&**expr, succ) + self.propagate_through_expr(&expr, succ) }) } @@ -934,19 +921,19 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { match expr.node { // Interesting cases with control flow or which gen/kill - hir::ExprPath(..) => { - self.access_path(expr, succ, ACC_READ | ACC_USE) + hir::ExprPath(hir::QPath::Resolved(_, ref path)) => { + self.access_path(expr.id, path, succ, ACC_READ | ACC_USE) } hir::ExprField(ref e, _) => { - self.propagate_through_expr(&**e, succ) + self.propagate_through_expr(&e, succ) } hir::ExprTupField(ref e, _) => { - self.propagate_through_expr(&**e, succ) + self.propagate_through_expr(&e, succ) } - hir::ExprClosure(_, _, ref blk) => { + hir::ExprClosure(.., blk_id, _) => { debug!("{} is an ExprClosure", expr_to_string(expr)); @@ -955,14 +942,14 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { loop. The next-node for a continue is the top of this loop. */ let node = self.live_node(expr.id, expr.span); - self.with_loop_nodes(blk.id, succ, node, |this| { + self.with_loop_nodes(blk_id.node_id(), succ, node, |this| { // the construction of a closure itself is not important, // but we have to consider the closed over variables. let caps = match this.ir.capture_info_map.get(&expr.id) { Some(caps) => caps.clone(), None => { - this.ir.tcx.sess.span_bug(expr.span, "no registered caps"); + span_bug!(expr.span, "no registered caps"); } }; caps.iter().rev().fold(succ, |succ, cap| { @@ -989,21 +976,21 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { // ( succ ) // let else_ln = self.propagate_through_opt_expr(els.as_ref().map(|e| &**e), succ); - let then_ln = self.propagate_through_block(&**then, succ); + let then_ln = self.propagate_through_block(&then, succ); let ln = self.live_node(expr.id, expr.span); self.init_from_succ(ln, else_ln); self.merge_from_succ(ln, then_ln, false); - self.propagate_through_expr(&**cond, ln) + self.propagate_through_expr(&cond, ln) } hir::ExprWhile(ref cond, ref blk, _) => { - self.propagate_through_loop(expr, WhileLoop(&**cond), &**blk, succ) + self.propagate_through_loop(expr, WhileLoop(&cond), &blk, succ) } // Note that labels have been resolved, so we don't need to look // at the label ident - hir::ExprLoop(ref blk, _) => { - self.propagate_through_loop(expr, LoopLoop, &**blk, succ) + hir::ExprLoop(ref blk, _, _) => { + self.propagate_through_loop(expr, LoopLoop, &blk, succ) } hir::ExprMatch(ref e, ref arms, _) => { @@ -1026,7 +1013,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { let mut first_merge = true; for arm in arms { let body_succ = - self.propagate_through_expr(&*arm.body, succ); + self.propagate_through_expr(&arm.body, succ); let guard_succ = self.propagate_through_opt_expr(arm.guard.as_ref().map(|e| &**e), body_succ); // only consider the first pattern; any later patterns must have @@ -1038,7 +1025,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { self.merge_from_succ(ln, arm_succ, first_merge); first_merge = false; }; - self.propagate_through_expr(&**e, ln) + self.propagate_through_expr(&e, ln) } hir::ExprRet(ref o_e) => { @@ -1047,114 +1034,115 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { self.propagate_through_opt_expr(o_e.as_ref().map(|e| &**e), exit_ln) } - hir::ExprBreak(opt_label) => { + hir::ExprBreak(opt_label, ref opt_expr) => { // Find which label this break jumps to - let sc = self.find_loop_scope(opt_label.map(|l| l.node.name), expr.id, expr.span); + let sc = self.find_loop_scope(opt_label, expr.span); // Now that we know the label we're going to, // look it up in the break loop nodes table match self.break_ln.get(&sc) { - Some(&b) => b, - None => self.ir.tcx.sess.span_bug(expr.span, - "break to unknown label") + Some(&b) => self.propagate_through_opt_expr(opt_expr.as_ref().map(|e| &**e), b), + None => span_bug!(expr.span, "break to unknown label") } } hir::ExprAgain(opt_label) => { // Find which label this expr continues to - let sc = self.find_loop_scope(opt_label.map(|l| l.node.name), expr.id, expr.span); + let sc = self.find_loop_scope(opt_label, expr.span); // Now that we know the label we're going to, // look it up in the continue loop nodes table match self.cont_ln.get(&sc) { Some(&b) => b, - None => self.ir.tcx.sess.span_bug(expr.span, - "loop to unknown label") + None => span_bug!(expr.span, "continue to unknown label") } } hir::ExprAssign(ref l, ref r) => { // see comment on lvalues in // propagate_through_lvalue_components() - let succ = self.write_lvalue(&**l, succ, ACC_WRITE); - let succ = self.propagate_through_lvalue_components(&**l, succ); - self.propagate_through_expr(&**r, succ) + let succ = self.write_lvalue(&l, succ, ACC_WRITE); + let succ = self.propagate_through_lvalue_components(&l, succ); + self.propagate_through_expr(&r, succ) } hir::ExprAssignOp(_, ref l, ref r) => { - // see comment on lvalues in - // propagate_through_lvalue_components() - let succ = self.write_lvalue(&**l, succ, ACC_WRITE|ACC_READ); - let succ = self.propagate_through_expr(&**r, succ); - self.propagate_through_lvalue_components(&**l, succ) + // an overloaded assign op is like a method call + if self.ir.tcx.tables().is_method_call(expr.id) { + let succ = self.propagate_through_expr(&l, succ); + self.propagate_through_expr(&r, succ) + } else { + // see comment on lvalues in + // propagate_through_lvalue_components() + let succ = self.write_lvalue(&l, succ, ACC_WRITE|ACC_READ); + let succ = self.propagate_through_expr(&r, succ); + self.propagate_through_lvalue_components(&l, succ) + } } // Uninteresting cases: just propagate in rev exec order - hir::ExprVec(ref exprs) => { - self.propagate_through_exprs(&exprs[..], succ) + hir::ExprArray(ref exprs) => { + self.propagate_through_exprs(exprs, succ) } hir::ExprRepeat(ref element, ref count) => { - let succ = self.propagate_through_expr(&**count, succ); - self.propagate_through_expr(&**element, succ) + let succ = self.propagate_through_expr(&count, succ); + self.propagate_through_expr(&element, succ) } hir::ExprStruct(_, ref fields, ref with_expr) => { let succ = self.propagate_through_opt_expr(with_expr.as_ref().map(|e| &**e), succ); fields.iter().rev().fold(succ, |succ, field| { - self.propagate_through_expr(&*field.expr, succ) + self.propagate_through_expr(&field.expr, succ) }) } hir::ExprCall(ref f, ref args) => { - let diverges = !self.ir.tcx.is_method_call(expr.id) && - self.ir.tcx.expr_ty_adjusted(&**f).fn_ret().diverges(); + // FIXME(canndrew): This is_never should really be an is_uninhabited + let diverges = !self.ir.tcx.tables().is_method_call(expr.id) && + self.ir.tcx.tables().expr_ty_adjusted(&f).fn_ret().0.is_never(); let succ = if diverges { self.s.exit_ln } else { succ }; - let succ = self.propagate_through_exprs(&args[..], succ); - self.propagate_through_expr(&**f, succ) + let succ = self.propagate_through_exprs(args, succ); + self.propagate_through_expr(&f, succ) } - hir::ExprMethodCall(_, _, ref args) => { + hir::ExprMethodCall(.., ref args) => { let method_call = ty::MethodCall::expr(expr.id); - let method_ty = self.ir.tcx.tables.borrow().method_map[&method_call].ty; - let succ = if method_ty.fn_ret().diverges() { + let method_ty = self.ir.tcx.tables().method_map[&method_call].ty; + // FIXME(canndrew): This is_never should really be an is_uninhabited + let succ = if method_ty.fn_ret().0.is_never() { self.s.exit_ln } else { succ }; - self.propagate_through_exprs(&args[..], succ) + self.propagate_through_exprs(args, succ) } hir::ExprTup(ref exprs) => { - self.propagate_through_exprs(&exprs[..], succ) + self.propagate_through_exprs(exprs, succ) } - hir::ExprBinary(op, ref l, ref r) if ::rustc_front::util::lazy_binop(op.node) => { - let r_succ = self.propagate_through_expr(&**r, succ); + hir::ExprBinary(op, ref l, ref r) if op.node.is_lazy() => { + let r_succ = self.propagate_through_expr(&r, succ); let ln = self.live_node(expr.id, expr.span); self.init_from_succ(ln, succ); self.merge_from_succ(ln, r_succ, false); - self.propagate_through_expr(&**l, ln) + self.propagate_through_expr(&l, ln) } hir::ExprIndex(ref l, ref r) | hir::ExprBinary(_, ref l, ref r) => { - let r_succ = self.propagate_through_expr(&**r, succ); - self.propagate_through_expr(&**l, r_succ) - } - - hir::ExprRange(ref e1, ref e2) => { - let succ = e2.as_ref().map_or(succ, |e| self.propagate_through_expr(&**e, succ)); - e1.as_ref().map_or(succ, |e| self.propagate_through_expr(&**e, succ)) + let r_succ = self.propagate_through_expr(&r, succ); + self.propagate_through_expr(&l, r_succ) } hir::ExprBox(ref e) | @@ -1162,36 +1150,32 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { hir::ExprCast(ref e, _) | hir::ExprType(ref e, _) | hir::ExprUnary(_, ref e) => { - self.propagate_through_expr(&**e, succ) + self.propagate_through_expr(&e, succ) } - hir::ExprInlineAsm(ref ia) => { - - let succ = ia.outputs.iter().rev().fold(succ, - |succ, out| { - // see comment on lvalues - // in propagate_through_lvalue_components() - if out.is_indirect { - self.propagate_through_expr(&*out.expr, succ) - } else { - let acc = if out.is_rw { ACC_WRITE|ACC_READ } else { ACC_WRITE }; - let succ = self.write_lvalue(&*out.expr, succ, acc); - self.propagate_through_lvalue_components(&*out.expr, succ) - } + hir::ExprInlineAsm(ref ia, ref outputs, ref inputs) => { + let succ = ia.outputs.iter().zip(outputs).rev().fold(succ, |succ, (o, output)| { + // see comment on lvalues + // in propagate_through_lvalue_components() + if o.is_indirect { + self.propagate_through_expr(output, succ) + } else { + let acc = if o.is_rw { ACC_WRITE|ACC_READ } else { ACC_WRITE }; + let succ = self.write_lvalue(output, succ, acc); + self.propagate_through_lvalue_components(output, succ) } - ); + }); + // Inputs are executed first. Propagate last because of rev order - ia.inputs.iter().rev().fold(succ, |succ, &(_, ref expr)| { - self.propagate_through_expr(&**expr, succ) - }) + self.propagate_through_exprs(inputs, succ) } - hir::ExprLit(..) => { + hir::ExprLit(..) | hir::ExprPath(hir::QPath::TypeRelative(..)) => { succ } hir::ExprBlock(ref blk) => { - self.propagate_through_block(&**blk, succ) + self.propagate_through_block(&blk, succ) } } } @@ -1250,9 +1234,9 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { // just ignore such cases and treat them as reads. match expr.node { - hir::ExprPath(..) => succ, - hir::ExprField(ref e, _) => self.propagate_through_expr(&**e, succ), - hir::ExprTupField(ref e, _) => self.propagate_through_expr(&**e, succ), + hir::ExprPath(_) => succ, + hir::ExprField(ref e, _) => self.propagate_through_expr(&e, succ), + hir::ExprTupField(ref e, _) => self.propagate_through_expr(&e, succ), _ => self.propagate_through_expr(expr, succ) } } @@ -1261,8 +1245,8 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { fn write_lvalue(&mut self, expr: &Expr, succ: LiveNode, acc: u32) -> LiveNode { match expr.node { - hir::ExprPath(..) => { - self.access_path(expr, succ, acc) + hir::ExprPath(hir::QPath::Resolved(_, ref path)) => { + self.access_path(expr.id, path, succ, acc) } // We do not track other lvalues, so just propagate through @@ -1273,14 +1257,15 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { } } - fn access_path(&mut self, expr: &Expr, succ: LiveNode, acc: u32) + fn access_path(&mut self, id: NodeId, path: &hir::Path, succ: LiveNode, acc: u32) -> LiveNode { - match self.ir.tcx.def_map.borrow().get(&expr.id).unwrap().full_def() { - DefLocal(_, nid) => { - let ln = self.live_node(expr.id, expr.span); + match path.def { + Def::Local(def_id) => { + let nid = self.ir.tcx.map.as_local_node_id(def_id).unwrap(); + let ln = self.live_node(id, path.span); if acc != 0 { self.init_from_succ(ln, succ); - let var = self.variable(nid, expr.span); + let var = self.variable(nid, path.span); self.acc(ln, var, acc); } ln @@ -1334,7 +1319,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { let cond_ln = match kind { LoopLoop => ln, - WhileLoop(ref cond) => self.propagate_through_expr(&**cond, ln), + WhileLoop(ref cond) => self.propagate_through_expr(&cond, ln), }; let body_ln = self.with_loop_nodes(expr.id, succ, ln, |this| { this.propagate_through_block(body, cond_ln) @@ -1347,7 +1332,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { let new_cond_ln = match kind { LoopLoop => ln, WhileLoop(ref cond) => { - self.propagate_through_expr(&**cond, ln) + self.propagate_through_expr(&cond, ln) } }; assert!(cond_ln == new_cond_ln); @@ -1379,13 +1364,13 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { // _______________________________________________________________________ // Checking for error conditions -fn check_local(this: &mut Liveness, local: &hir::Local) { +fn check_local<'a, 'tcx>(this: &mut Liveness<'a, 'tcx>, local: &'tcx hir::Local) { match local.init { Some(_) => { - this.warn_about_unused_or_dead_vars_in_pat(&*local.pat); + this.warn_about_unused_or_dead_vars_in_pat(&local.pat); }, None => { - this.pat_bindings(&*local.pat, |this, ln, var, sp, id| { + this.pat_bindings(&local.pat, |this, ln, var, sp, id| { this.warn_about_unused(sp, id, ln, var); }) } @@ -1394,7 +1379,7 @@ fn check_local(this: &mut Liveness, local: &hir::Local) { intravisit::walk_local(this, local); } -fn check_arm(this: &mut Liveness, arm: &hir::Arm) { +fn check_arm<'a, 'tcx>(this: &mut Liveness<'a, 'tcx>, arm: &'tcx hir::Arm) { // only consider the first pattern; any later patterns must have // the same bindings, and we also consider the first pattern to be // the "authoritative" set of ids @@ -1404,31 +1389,33 @@ fn check_arm(this: &mut Liveness, arm: &hir::Arm) { intravisit::walk_arm(this, arm); } -fn check_expr(this: &mut Liveness, expr: &Expr) { +fn check_expr<'a, 'tcx>(this: &mut Liveness<'a, 'tcx>, expr: &'tcx Expr) { match expr.node { hir::ExprAssign(ref l, _) => { - this.check_lvalue(&**l); + this.check_lvalue(&l); intravisit::walk_expr(this, expr); } hir::ExprAssignOp(_, ref l, _) => { - this.check_lvalue(&**l); + if !this.ir.tcx.tables().is_method_call(expr.id) { + this.check_lvalue(&l); + } intravisit::walk_expr(this, expr); } - hir::ExprInlineAsm(ref ia) => { - for &(_, ref input) in &ia.inputs { - this.visit_expr(&**input); + hir::ExprInlineAsm(ref ia, ref outputs, ref inputs) => { + for input in inputs { + this.visit_expr(input); } // Output operands must be lvalues - for out in &ia.outputs { - if !out.is_indirect { - this.check_lvalue(&*out.expr); + for (o, output) in ia.outputs.iter().zip(outputs) { + if !o.is_indirect { + this.check_lvalue(output); } - this.visit_expr(&*out.expr); + this.visit_expr(output); } intravisit::walk_expr(this, expr); @@ -1438,106 +1425,69 @@ fn check_expr(this: &mut Liveness, expr: &Expr) { hir::ExprCall(..) | hir::ExprMethodCall(..) | hir::ExprIf(..) | hir::ExprMatch(..) | hir::ExprWhile(..) | hir::ExprLoop(..) | hir::ExprIndex(..) | hir::ExprField(..) | hir::ExprTupField(..) | - hir::ExprVec(..) | hir::ExprTup(..) | hir::ExprBinary(..) | + hir::ExprArray(..) | hir::ExprTup(..) | hir::ExprBinary(..) | hir::ExprCast(..) | hir::ExprUnary(..) | hir::ExprRet(..) | hir::ExprBreak(..) | hir::ExprAgain(..) | hir::ExprLit(_) | hir::ExprBlock(..) | hir::ExprAddrOf(..) | hir::ExprStruct(..) | hir::ExprRepeat(..) | - hir::ExprClosure(..) | hir::ExprPath(..) | hir::ExprBox(..) | - hir::ExprRange(..) | hir::ExprType(..) => { + hir::ExprClosure(..) | hir::ExprPath(_) | + hir::ExprBox(..) | hir::ExprType(..) => { intravisit::walk_expr(this, expr); } } } -fn check_fn(_v: &Liveness, - _fk: FnKind, - _decl: &hir::FnDecl, - _body: &hir::Block, - _sp: Span, - _id: NodeId) { - // do not check contents of nested fns -} - impl<'a, 'tcx> Liveness<'a, 'tcx> { - fn fn_ret(&self, id: NodeId) -> ty::PolyFnOutput<'tcx> { - let fn_ty = self.ir.tcx.node_id_to_type(id); - match fn_ty.sty { - ty::TyClosure(closure_def_id, ref substs) => - self.ir.tcx.closure_type(closure_def_id, substs).sig.output(), - _ => fn_ty.fn_ret() - } - } - fn check_ret(&self, id: NodeId, sp: Span, - _fk: FnKind, + fk: FnKind, entry_ln: LiveNode, - body: &hir::Block) + body: &hir::Expr) { + let fn_ty = if let FnKind::Closure(_) = fk { + self.ir.tcx.tables().node_id_to_type(id) + } else { + self.ir.tcx.item_type(self.ir.tcx.map.local_def_id(id)) + }; + let fn_ret = match fn_ty.sty { + ty::TyClosure(closure_def_id, substs) => + self.ir.tcx.closure_type(closure_def_id, substs).sig.output(), + _ => fn_ty.fn_ret() + }; + // within the fn body, late-bound regions are liberated // and must outlive the *call-site* of the function. let fn_ret = self.ir.tcx.liberate_late_bound_regions( self.ir.tcx.region_maps.call_site_extent(id, body.id), - &self.fn_ret(id)); - - match fn_ret { - ty::FnConverging(t_ret) - if self.live_on_entry(entry_ln, self.s.no_ret_var).is_some() => { - - if t_ret.is_nil() { - // for nil return types, it is ok to not return a value expl. - } else { - let ends_with_stmt = match body.expr { - None if !body.stmts.is_empty() => - match body.stmts.first().unwrap().node { - hir::StmtSemi(ref e, _) => { - self.ir.tcx.expr_ty(&**e) == t_ret - }, - _ => false - }, - _ => false - }; - let mut err = struct_span_err!(self.ir.tcx.sess, - sp, - E0269, - "not all control paths return a value"); - if ends_with_stmt { - let last_stmt = body.stmts.first().unwrap(); - let original_span = original_sp(self.ir.tcx.sess.codemap(), - last_stmt.span, sp); - let span_semicolon = Span { - lo: original_span.hi - BytePos(1), - hi: original_span.hi, - expn_id: original_span.expn_id - }; - err.span_help(span_semicolon, "consider removing this semicolon:"); - } - err.emit(); - } + &fn_ret); + + if !fn_ret.is_never() && self.live_on_entry(entry_ln, self.s.no_ret_var).is_some() { + let param_env = ParameterEnvironment::for_item(self.ir.tcx, id); + let t_ret_subst = fn_ret.subst(self.ir.tcx, ¶m_env.free_substs); + let is_nil = self.ir.tcx.infer_ctxt(None, Some(param_env), + Reveal::All).enter(|infcx| { + let cause = traits::ObligationCause::dummy(); + traits::fully_normalize(&infcx, cause, &t_ret_subst).unwrap().is_nil() + }); + + // for nil return types, it is ok to not return a value expl. + if !is_nil { + span_bug!(sp, "not all control paths return a value"); } - ty::FnDiverging - if self.live_on_entry(entry_ln, self.s.clean_exit_var).is_some() => { - span_err!(self.ir.tcx.sess, sp, E0270, - "computation may converge in a function marked as diverging"); - } - - _ => {} } } - fn check_lvalue(&mut self, expr: &Expr) { + fn check_lvalue(&mut self, expr: &'tcx Expr) { match expr.node { - hir::ExprPath(..) => { - if let DefLocal(_, nid) = self.ir.tcx.def_map.borrow().get(&expr.id) - .unwrap() - .full_def() { + hir::ExprPath(hir::QPath::Resolved(_, ref path)) => { + if let Def::Local(def_id) = path.def { // Assignment to an immutable variable or argument: only legal // if there is no later assignment. If this local is actually // mutable, then check for a reassignment to flag the mutability // as being used. + let nid = self.ir.tcx.map.as_local_node_id(def_id).unwrap(); let ln = self.live_node(expr.id, expr.span); let var = self.variable(nid, expr.span); self.warn_about_dead_assign(expr.span, expr.id, ln, var); @@ -1562,13 +1512,11 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { fn warn_about_unused_args(&self, decl: &hir::FnDecl, entry_ln: LiveNode) { for arg in &decl.inputs { - pat_util::pat_bindings(&self.ir.tcx.def_map, - &*arg.pat, - |_bm, p_id, sp, path1| { + arg.pat.each_binding(|_bm, p_id, sp, path1| { let var = self.variable(p_id, sp); // Ignore unused self. let name = path1.node; - if name != special_idents::self_.name { + if name != keywords::SelfValue.name() { if !self.warn_about_unused(sp, p_id, entry_ln, var) { if self.live_on_entry(entry_ln, var).is_none() { self.report_dead_assign(p_id, sp, var, true); diff --git a/src/librustc/middle/mem_categorization.rs b/src/librustc/middle/mem_categorization.rs index 1eb5efa0bda44..4c3b102e54039 100644 --- a/src/librustc/middle/mem_categorization.rs +++ b/src/librustc/middle/mem_categorization.rs @@ -67,33 +67,33 @@ pub use self::ElementKind::*; pub use self::MutabilityCategory::*; pub use self::AliasableReason::*; pub use self::Note::*; -pub use self::deref_kind::*; use self::Aliasability::*; -use middle::def_id::DefId; -use front::map as ast_map; -use middle::infer; -use middle::check_const; -use middle::def; -use middle::ty::adjustment; -use middle::ty::{self, Ty}; - -use rustc_front::hir::{MutImmutable, MutMutable}; -use rustc_front::hir; +use hir::def_id::DefId; +use hir::map as ast_map; +use infer::InferCtxt; +use middle::const_qualif::ConstQualif; +use hir::def::{Def, CtorKind}; +use ty::adjustment; +use ty::{self, Ty, TyCtxt}; + +use hir::{MutImmutable, MutMutable, PatKind}; +use hir::pat_util::EnumerateAndAdjustIterator; +use hir; use syntax::ast; -use syntax::codemap::Span; +use syntax_pos::Span; use std::fmt; use std::rc::Rc; #[derive(Clone, PartialEq)] pub enum Categorization<'tcx> { - Rvalue(ty::Region), // temporary val, argument is its scope + Rvalue(&'tcx ty::Region), // temporary val, argument is its scope StaticItem, Upvar(Upvar), // upvar referenced by closure env Local(ast::NodeId), // local variable - Deref(cmt<'tcx>, usize, PointerKind), // deref of a ptr + Deref(cmt<'tcx>, usize, PointerKind<'tcx>), // deref of a ptr Interior(cmt<'tcx>, InteriorKind), // something interior: field, tuple, etc Downcast(cmt<'tcx>, DefId), // selects a particular enum variant (*1) @@ -109,18 +109,18 @@ pub struct Upvar { // different kinds of pointers: #[derive(Clone, Copy, PartialEq, Eq, Hash)] -pub enum PointerKind { +pub enum PointerKind<'tcx> { /// `Box` Unique, /// `&T` - BorrowedPtr(ty::BorrowKind, ty::Region), + BorrowedPtr(ty::BorrowKind, &'tcx ty::Region), /// `*T` UnsafePtr(hir::Mutability), /// Implicit deref of the `&T` that results from an overloaded index `[]`. - Implicit(ty::BorrowKind, ty::Region), + Implicit(ty::BorrowKind, &'tcx ty::Region), } // We use the term "interior" to mean "something reachable from the @@ -194,52 +194,6 @@ pub struct cmt_<'tcx> { pub type cmt<'tcx> = Rc>; -// We pun on *T to mean both actual deref of a ptr as well -// as accessing of components: -#[derive(Copy, Clone)] -pub enum deref_kind { - deref_ptr(PointerKind), - deref_interior(InteriorKind), -} - -type DerefKindContext = Option; - -// Categorizes a derefable type. Note that we include vectors and strings as -// derefable (we model an index as the combination of a deref and then a -// pointer adjustment). -fn deref_kind(t: Ty, context: DerefKindContext) -> McResult { - match t.sty { - ty::TyBox(_) => { - Ok(deref_ptr(Unique)) - } - - ty::TyRef(r, mt) => { - let kind = ty::BorrowKind::from_mutbl(mt.mutbl); - Ok(deref_ptr(BorrowedPtr(kind, *r))) - } - - ty::TyRawPtr(ref mt) => { - Ok(deref_ptr(UnsafePtr(mt.mutbl))) - } - - ty::TyEnum(..) | - ty::TyStruct(..) => { // newtype - Ok(deref_interior(InteriorField(PositionalField(0)))) - } - - ty::TyArray(_, _) | ty::TySlice(_) | ty::TyStr => { - // no deref of indexed content without supplying InteriorOffsetKind - if let Some(context) = context { - Ok(deref_interior(InteriorElement(context, element_kind(t)))) - } else { - Err(()) - } - } - - _ => Err(()), - } -} - pub trait ast_node { fn id(&self) -> ast::NodeId; fn span(&self) -> Span; @@ -256,8 +210,20 @@ impl ast_node for hir::Pat { } #[derive(Copy, Clone)] -pub struct MemCategorizationContext<'t, 'a: 't, 'tcx : 'a> { - pub typer: &'t infer::InferCtxt<'a, 'tcx>, +pub struct MemCategorizationContext<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + pub infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, + options: MemCategorizationOptions, +} + +#[derive(Copy, Clone, Default)] +pub struct MemCategorizationOptions { + // If true, then when analyzing a closure upvar, if the closure + // has a missing kind, we treat it like a Fn closure. When false, + // we ICE if the closure has a missing kind. Should be false + // except during closure kind inference. It is used by the + // mem-categorization code to be able to have stricter assertions + // (which are always true except during upvar inference). + pub during_closure_kind_inference: bool, } pub type McResult = Result; @@ -302,19 +268,19 @@ impl MutabilityCategory { ret } - fn from_local(tcx: &ty::ctxt, id: ast::NodeId) -> MutabilityCategory { + fn from_local(tcx: TyCtxt, id: ast::NodeId) -> MutabilityCategory { let ret = match tcx.map.get(id) { ast_map::NodeLocal(p) => match p.node { - hir::PatIdent(bind_mode, _, _) => { + PatKind::Binding(bind_mode, ..) => { if bind_mode == hir::BindByValue(hir::MutMutable) { McDeclared } else { McImmutable } } - _ => tcx.sess.span_bug(p.span, "expected identifier pattern") + _ => span_bug!(p.span, "expected identifier pattern") }, - _ => tcx.sess.span_bug(tcx.map.span(id), "expected identifier pattern") + _ => span_bug!(tcx.map.span(id), "expected identifier pattern") }; debug!("MutabilityCategory::{}(tcx, id={:?}) => {:?}", "from_local", id, ret); @@ -358,17 +324,27 @@ impl MutabilityCategory { } } -impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { - pub fn new(typer: &'t infer::InferCtxt<'a, 'tcx>) -> MemCategorizationContext<'t, 'a, 'tcx> { - MemCategorizationContext { typer: typer } +impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> { + pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>) + -> MemCategorizationContext<'a, 'gcx, 'tcx> { + MemCategorizationContext::with_options(infcx, MemCategorizationOptions::default()) + } + + pub fn with_options(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, + options: MemCategorizationOptions) + -> MemCategorizationContext<'a, 'gcx, 'tcx> { + MemCategorizationContext { + infcx: infcx, + options: options, + } } - fn tcx(&self) -> &'a ty::ctxt<'tcx> { - self.typer.tcx + fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> { + self.infcx.tcx } fn expr_ty(&self, expr: &hir::Expr) -> McResult> { - match self.typer.node_ty(expr.id) { + match self.infcx.node_ty(expr.id) { Ok(t) => Ok(t), Err(()) => { debug!("expr_ty({:?}) yielded Err", expr); @@ -378,25 +354,21 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { } fn expr_ty_adjusted(&self, expr: &hir::Expr) -> McResult> { - let unadjusted_ty = try!(self.expr_ty(expr)); - Ok(unadjusted_ty.adjust( - self.tcx(), expr.span, expr.id, - self.typer.adjustments().get(&expr.id), - |method_call| self.typer.node_method_ty(method_call))) + self.infcx.expr_ty_adjusted(expr) } fn node_ty(&self, id: ast::NodeId) -> McResult> { - self.typer.node_ty(id) + self.infcx.node_ty(id) } fn pat_ty(&self, pat: &hir::Pat) -> McResult> { - let base_ty = try!(self.typer.node_ty(pat.id)); + let base_ty = self.infcx.node_ty(pat.id)?; // FIXME (Issue #18207): This code detects whether we are // looking at a `ref x`, and if so, figures out what the type // *being borrowed* is. But ideally we would put in a more // fundamental fix to this conflated use of the node id. let ret_ty = match pat.node { - hir::PatIdent(hir::BindByRef(_), _, _) => { + PatKind::Binding(hir::BindByRef(_), ..) => { // a bind-by-ref means that the base_ty will be the type of the ident itself, // but what we want here is the type of the underlying value being borrowed. // So peel off one-level, turning the &T into T. @@ -413,29 +385,33 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { } pub fn cat_expr(&self, expr: &hir::Expr) -> McResult> { - match self.typer.adjustments().get(&expr.id) { + match self.infcx.adjustments().get(&expr.id) { None => { // No adjustments. self.cat_expr_unadjusted(expr) } Some(adjustment) => { - match *adjustment { - adjustment::AdjustDerefRef( - adjustment::AutoDerefRef { - autoref: None, unsize: None, autoderefs, ..}) => { + match adjustment.kind { + adjustment::Adjust::DerefRef { + autoderefs, + autoref: None, + unsize: false + } => { // Equivalent to *expr or something similar. self.cat_expr_autoderefd(expr, autoderefs) } - adjustment::AdjustReifyFnPointer | - adjustment::AdjustUnsafeFnPointer | - adjustment::AdjustDerefRef(_) => { + adjustment::Adjust::NeverToAny | + adjustment::Adjust::ReifyFnPointer | + adjustment::Adjust::UnsafeFnPointer | + adjustment::Adjust::MutToConstPointer | + adjustment::Adjust::DerefRef {..} => { debug!("cat_expr({:?}): {:?}", adjustment, expr); // Result is an rvalue. - let expr_ty = try!(self.expr_ty_adjusted(expr)); + let expr_ty = self.expr_ty_adjusted(expr)?; Ok(self.cat_rvalue_node(expr.id(), expr.span(), expr_ty)) } } @@ -447,12 +423,12 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { expr: &hir::Expr, autoderefs: usize) -> McResult> { - let mut cmt = try!(self.cat_expr_unadjusted(expr)); + let mut cmt = self.cat_expr_unadjusted(expr)?; debug!("cat_expr_autoderefd: autoderefs={}, cmt={:?}", autoderefs, cmt); for deref in 1..autoderefs + 1 { - cmt = try!(self.cat_deref(expr, cmt, deref, None)); + cmt = self.cat_deref(expr, cmt, deref)?; } return Ok(cmt); } @@ -460,15 +436,15 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { pub fn cat_expr_unadjusted(&self, expr: &hir::Expr) -> McResult> { debug!("cat_expr: id={} expr={:?}", expr.id, expr); - let expr_ty = try!(self.expr_ty(expr)); + let expr_ty = self.expr_ty(expr)?; match expr.node { hir::ExprUnary(hir::UnDeref, ref e_base) => { - let base_cmt = try!(self.cat_expr(&**e_base)); - self.cat_deref(expr, base_cmt, 0, None) + let base_cmt = self.cat_expr(&e_base)?; + self.cat_deref(expr, base_cmt, 0) } hir::ExprField(ref base, f_name) => { - let base_cmt = try!(self.cat_expr(&**base)); + let base_cmt = self.cat_expr(&base)?; debug!("cat_expr(cat_field): id={} expr={:?} base={:?}", expr.id, expr, @@ -477,14 +453,13 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { } hir::ExprTupField(ref base, idx) => { - let base_cmt = try!(self.cat_expr(&**base)); + let base_cmt = self.cat_expr(&base)?; Ok(self.cat_tup_field(expr, base_cmt, idx.node, expr_ty)) } hir::ExprIndex(ref base, _) => { let method_call = ty::MethodCall::expr(expr.id()); - let context = InteriorOffsetKind::Index; - match self.typer.node_method_ty(method_call) { + match self.infcx.node_method_ty(method_call) { Some(method_ty) => { // If this is an index implemented by a method call, then it // will include an implicit deref of the result. @@ -505,29 +480,29 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { // is an rvalue. That is what we will be // dereferencing. let base_cmt = self.cat_rvalue_node(expr.id(), expr.span(), ret_ty); - self.cat_deref_common(expr, base_cmt, 1, elem_ty, Some(context), true) + Ok(self.cat_deref_common(expr, base_cmt, 1, elem_ty, true)) } None => { - self.cat_index(expr, try!(self.cat_expr(&**base)), context) + self.cat_index(expr, self.cat_expr(&base)?, InteriorOffsetKind::Index) } } } - hir::ExprPath(..) => { - let def = self.tcx().def_map.borrow().get(&expr.id).unwrap().full_def(); + hir::ExprPath(ref qpath) => { + let def = self.infcx.tables.borrow().qpath_def(qpath, expr.id); self.cat_def(expr.id, expr.span, expr_ty, def) } hir::ExprType(ref e, _) => { - self.cat_expr(&**e) + self.cat_expr(&e) } hir::ExprAddrOf(..) | hir::ExprCall(..) | hir::ExprAssign(..) | hir::ExprAssignOp(..) | hir::ExprClosure(..) | hir::ExprRet(..) | - hir::ExprUnary(..) | hir::ExprRange(..) | + hir::ExprUnary(..) | hir::ExprMethodCall(..) | hir::ExprCast(..) | - hir::ExprVec(..) | hir::ExprTup(..) | hir::ExprIf(..) | + hir::ExprArray(..) | hir::ExprTup(..) | hir::ExprIf(..) | hir::ExprBinary(..) | hir::ExprWhile(..) | hir::ExprBlock(..) | hir::ExprLoop(..) | hir::ExprMatch(..) | hir::ExprLit(..) | hir::ExprBreak(..) | @@ -542,32 +517,18 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { id: ast::NodeId, span: Span, expr_ty: Ty<'tcx>, - def: def::Def) + def: Def) -> McResult> { debug!("cat_def: id={} expr={:?} def={:?}", id, expr_ty, def); match def { - def::DefStruct(..) | def::DefVariant(..) | def::DefConst(..) | - def::DefAssociatedConst(..) | def::DefFn(..) | def::DefMethod(..) => { + Def::StructCtor(..) | Def::VariantCtor(..) | Def::Const(..) | + Def::AssociatedConst(..) | Def::Fn(..) | Def::Method(..) => { Ok(self.cat_rvalue_node(id, span, expr_ty)) } - def::DefMod(_) | def::DefForeignMod(_) | - def::DefTrait(_) | def::DefTy(..) | def::DefPrimTy(_) | - def::DefTyParam(..) | - def::DefLabel(_) | def::DefSelfTy(..) | - def::DefAssociatedTy(..) => { - Ok(Rc::new(cmt_ { - id:id, - span:span, - cat:Categorization::StaticItem, - mutbl: McImmutable, - ty:expr_ty, - note: NoteNone - })) - } - def::DefStatic(_, mutbl) => { + Def::Static(_, mutbl) => { Ok(Rc::new(cmt_ { id:id, span:span, @@ -578,32 +539,45 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { })) } - def::DefUpvar(_, var_id, _, fn_node_id) => { - let ty = try!(self.node_ty(fn_node_id)); + Def::Upvar(def_id, _, fn_node_id) => { + let var_id = self.tcx().map.as_local_node_id(def_id).unwrap(); + let ty = self.node_ty(fn_node_id)?; match ty.sty { ty::TyClosure(closure_id, _) => { - match self.typer.closure_kind(closure_id) { + match self.infcx.closure_kind(closure_id) { Some(kind) => { self.cat_upvar(id, span, var_id, fn_node_id, kind) } None => { - self.tcx().sess.span_bug( - span, - &*format!("No closure kind for {:?}", closure_id)); + if !self.options.during_closure_kind_inference { + span_bug!( + span, + "No closure kind for {:?}", + closure_id); + } + + // during closure kind inference, we + // don't know the closure kind yet, but + // it's ok because we detect that we are + // accessing an upvar and handle that + // case specially anyhow. Use Fn + // arbitrarily. + self.cat_upvar(id, span, var_id, fn_node_id, ty::ClosureKind::Fn) } } } _ => { - self.tcx().sess.span_bug( + span_bug!( span, - &format!("Upvar of non-closure {} - {:?}", - fn_node_id, - ty)); + "Upvar of non-closure {} - {:?}", + fn_node_id, + ty); } } } - def::DefLocal(_, vid) => { + Def::Local(def_id) => { + let vid = self.tcx().map.as_local_node_id(def_id).unwrap(); Ok(Rc::new(cmt_ { id: id, span: span, @@ -614,7 +588,7 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { })) } - def::DefErr => panic!("DefErr in memory categorization") + def => span_bug!(span, "unexpected definition in memory categorization: {:?}", def) } } @@ -653,7 +627,7 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { let upvar_id = ty::UpvarId { var_id: var_id, closure_expr_id: fn_node_id }; - let var_ty = try!(self.node_ty(var_id)); + let var_ty = self.node_ty(var_id)?; // Mutability of original variable itself let var_mutbl = MutabilityCategory::from_local(self.tcx(), var_id); @@ -674,13 +648,13 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { // conceptually a `&mut` or `&` reference, so we have to add a // deref. let cmt_result = match kind { - ty::FnOnceClosureKind => { + ty::ClosureKind::FnOnce => { cmt_result } - ty::FnMutClosureKind => { + ty::ClosureKind::FnMut => { self.env_deref(id, span, upvar_id, var_mutbl, ty::MutBorrow, cmt_result) } - ty::FnClosureKind => { + ty::ClosureKind::Fn => { self.env_deref(id, span, upvar_id, var_mutbl, ty::ImmBorrow, cmt_result) } }; @@ -690,7 +664,7 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { // for that. let upvar_id = ty::UpvarId { var_id: var_id, closure_expr_id: fn_node_id }; - let upvar_capture = self.typer.upvar_capture(upvar_id).unwrap(); + let upvar_capture = self.infcx.upvar_capture(upvar_id).unwrap(); let cmt_result = match upvar_capture { ty::UpvarCapture::ByValue => { cmt_result @@ -727,23 +701,23 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { let fn_body_id = { let fn_expr = match self.tcx().map.find(upvar_id.closure_expr_id) { Some(ast_map::NodeExpr(e)) => e, - _ => unreachable!() + _ => bug!() }; match fn_expr.node { - hir::ExprClosure(_, _, ref body) => body.id, - _ => unreachable!() + hir::ExprClosure(.., body_id, _) => body_id.node_id(), + _ => bug!() } }; // Region of environment pointer - let env_region = ty::ReFree(ty::FreeRegion { + let env_region = self.tcx().mk_region(ty::ReFree(ty::FreeRegion { // The environment of a closure is guaranteed to // outlive any bindings introduced in the body of the // closure itself. scope: self.tcx().region_maps.item_extent(fn_body_id), bound_region: ty::BrEnv - }); + })); let env_ptr = BorrowedPtr(env_borrow_kind, env_region); @@ -787,11 +761,11 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { /// Returns the lifetime of a temporary created by expr with id `id`. /// This could be `'static` if `id` is part of a constant expression. - pub fn temporary_scope(&self, id: ast::NodeId) -> ty::Region { - match self.typer.temporary_scope(id) { + pub fn temporary_scope(&self, id: ast::NodeId) -> &'tcx ty::Region { + self.tcx().mk_region(match self.infcx.temporary_scope(id) { Some(scope) => ty::ReScope(scope), None => ty::ReStatic - } + }) } pub fn cat_rvalue_node(&self, @@ -800,22 +774,22 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { expr_ty: Ty<'tcx>) -> cmt<'tcx> { let qualif = self.tcx().const_qualif_map.borrow().get(&id).cloned() - .unwrap_or(check_const::ConstQualif::NOT_CONST); + .unwrap_or(ConstQualif::NOT_CONST); // Only promote `[T; 0]` before an RFC for rvalue promotions // is accepted. let qualif = match expr_ty.sty { ty::TyArray(_, 0) => qualif, - _ => check_const::ConstQualif::NOT_CONST + _ => ConstQualif::NOT_CONST }; // Compute maximum lifetime of this rvalue. This is 'static if // we can promote to a constant, otherwise equal to enclosing temp // lifetime. - let re = if qualif.intersects(check_const::ConstQualif::NON_STATIC_BORROWS) { + let re = if qualif.intersects(ConstQualif::NON_STATIC_BORROWS) { self.temporary_scope(id) } else { - ty::ReStatic + self.tcx().mk_region(ty::ReStatic) }; let ret = self.cat_rvalue(id, span, re, expr_ty); debug!("cat_rvalue_node ret {:?}", ret); @@ -825,7 +799,7 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { pub fn cat_rvalue(&self, cmt_id: ast::NodeId, span: Span, - temp_scope: ty::Region, + temp_scope: &'tcx ty::Region, expr_ty: Ty<'tcx>) -> cmt<'tcx> { let ret = Rc::new(cmt_ { id:cmt_id, @@ -878,14 +852,13 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { fn cat_deref(&self, node: &N, base_cmt: cmt<'tcx>, - deref_cnt: usize, - deref_context: DerefKindContext) + deref_cnt: usize) -> McResult> { let method_call = ty::MethodCall { expr_id: node.id(), autoderef: deref_cnt as u32 }; - let method_ty = self.typer.node_method_ty(method_call); + let method_ty = self.infcx.node_method_ty(method_call); debug!("cat_deref: method_call={:?} method_ty={:?}", method_call, method_ty.map(|ty| ty)); @@ -893,7 +866,7 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { let base_cmt = match method_ty { Some(method_ty) => { let ref_ty = - self.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap(); + self.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap(); self.cat_rvalue_node(node.id(), node.span(), ref_ty) } None => base_cmt @@ -901,12 +874,9 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { let base_cmt_ty = base_cmt.ty; match base_cmt_ty.builtin_deref(true, ty::NoPreference) { Some(mt) => { - let ret = self.cat_deref_common(node, base_cmt, deref_cnt, - mt.ty, - deref_context, - /* implicit: */ false); + let ret = self.cat_deref_common(node, base_cmt, deref_cnt, mt.ty, false); debug!("cat_deref ret {:?}", ret); - ret + Ok(ret) } None => { debug!("Explicit deref of non-derefable type: {:?}", @@ -921,40 +891,29 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { base_cmt: cmt<'tcx>, deref_cnt: usize, deref_ty: Ty<'tcx>, - deref_context: DerefKindContext, implicit: bool) - -> McResult> + -> cmt<'tcx> { - let (m, cat) = match try!(deref_kind(base_cmt.ty, deref_context)) { - deref_ptr(ptr) => { - let ptr = if implicit { - match ptr { - BorrowedPtr(bk, r) => Implicit(bk, r), - _ => self.tcx().sess.span_bug(node.span(), - "Implicit deref of non-borrowed pointer") - } - } else { - ptr - }; - // for unique ptrs, we inherit mutability from the - // owning reference. - (MutabilityCategory::from_pointer_kind(base_cmt.mutbl, ptr), - Categorization::Deref(base_cmt, deref_cnt, ptr)) - } - deref_interior(interior) => { - (base_cmt.mutbl.inherit(), Categorization::Interior(base_cmt, interior)) + let ptr = match base_cmt.ty.sty { + ty::TyBox(..) => Unique, + ty::TyRawPtr(ref mt) => UnsafePtr(mt.mutbl), + ty::TyRef(r, mt) => { + let bk = ty::BorrowKind::from_mutbl(mt.mutbl); + if implicit { Implicit(bk, r) } else { BorrowedPtr(bk, r) } } + ref ty => bug!("unexpected type in cat_deref_common: {:?}", ty) }; let ret = Rc::new(cmt_ { id: node.id(), span: node.span(), - cat: cat, - mutbl: m, + // For unique ptrs, we inherit mutability from the owning reference. + mutbl: MutabilityCategory::from_pointer_kind(base_cmt.mutbl, ptr), + cat: Categorization::Deref(base_cmt, deref_cnt, ptr), ty: deref_ty, note: NoteNone }); debug!("cat_deref_common ret {:?}", ret); - Ok(ret) + ret } pub fn cat_index(&self, @@ -980,20 +939,19 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { //! - `base_cmt`: the cmt of `elt` let method_call = ty::MethodCall::expr(elt.id()); - let method_ty = self.typer.node_method_ty(method_call); + let method_ty = self.infcx.node_method_ty(method_call); - let element_ty = match method_ty { + let (element_ty, element_kind) = match method_ty { Some(method_ty) => { let ref_ty = self.overloaded_method_return_ty(method_ty); base_cmt = self.cat_rvalue_node(elt.id(), elt.span(), ref_ty); - // FIXME(#20649) -- why are we using the `self_ty` as the element type...? - let self_ty = method_ty.fn_sig().input(0); - self.tcx().no_late_bound_regions(&self_ty).unwrap() + (ref_ty.builtin_deref(false, ty::NoPreference).unwrap().ty, + ElementKind::OtherElement) } None => { match base_cmt.ty.builtin_index() { - Some(ty) => ty, + Some(ty) => (ty, ElementKind::VecElement), None => { return Err(()); } @@ -1001,106 +959,11 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { } }; - let m = base_cmt.mutbl.inherit(); - let ret = interior(elt, base_cmt.clone(), base_cmt.ty, - m, context, element_ty); + let interior_elem = InteriorElement(context, element_kind); + let ret = + self.cat_imm_interior(elt, base_cmt.clone(), element_ty, interior_elem); debug!("cat_index ret {:?}", ret); return Ok(ret); - - fn interior<'tcx, N: ast_node>(elt: &N, - of_cmt: cmt<'tcx>, - vec_ty: Ty<'tcx>, - mutbl: MutabilityCategory, - context: InteriorOffsetKind, - element_ty: Ty<'tcx>) -> cmt<'tcx> - { - let interior_elem = InteriorElement(context, element_kind(vec_ty)); - Rc::new(cmt_ { - id:elt.id(), - span:elt.span(), - cat:Categorization::Interior(of_cmt, interior_elem), - mutbl:mutbl, - ty:element_ty, - note: NoteNone - }) - } - } - - // Takes either a vec or a reference to a vec and returns the cmt for the - // underlying vec. - fn deref_vec(&self, - elt: &N, - base_cmt: cmt<'tcx>, - context: InteriorOffsetKind) - -> McResult> - { - let ret = match try!(deref_kind(base_cmt.ty, Some(context))) { - deref_ptr(ptr) => { - // for unique ptrs, we inherit mutability from the - // owning reference. - let m = MutabilityCategory::from_pointer_kind(base_cmt.mutbl, ptr); - - // the deref is explicit in the resulting cmt - Rc::new(cmt_ { - id:elt.id(), - span:elt.span(), - cat:Categorization::Deref(base_cmt.clone(), 0, ptr), - mutbl:m, - ty: match base_cmt.ty.builtin_deref(false, ty::NoPreference) { - Some(mt) => mt.ty, - None => self.tcx().sess.bug("Found non-derefable type") - }, - note: NoteNone - }) - } - - deref_interior(_) => { - base_cmt - } - }; - debug!("deref_vec ret {:?}", ret); - Ok(ret) - } - - /// Given a pattern P like: `[_, ..Q, _]`, where `vec_cmt` is the cmt for `P`, `slice_pat` is - /// the pattern `Q`, returns: - /// - /// * a cmt for `Q` - /// * the mutability and region of the slice `Q` - /// - /// These last two bits of info happen to be things that borrowck needs. - pub fn cat_slice_pattern(&self, - vec_cmt: cmt<'tcx>, - slice_pat: &hir::Pat) - -> McResult<(cmt<'tcx>, hir::Mutability, ty::Region)> { - let slice_ty = try!(self.node_ty(slice_pat.id)); - let (slice_mutbl, slice_r) = vec_slice_info(self.tcx(), - slice_pat, - slice_ty); - let context = InteriorOffsetKind::Pattern; - let cmt_vec = try!(self.deref_vec(slice_pat, vec_cmt, context)); - let cmt_slice = try!(self.cat_index(slice_pat, cmt_vec, context)); - return Ok((cmt_slice, slice_mutbl, slice_r)); - - /// In a pattern like [a, b, ..c], normally `c` has slice type, but if you have [a, b, - /// ..ref c], then the type of `ref c` will be `&&[]`, so to extract the slice details we - /// have to recurse through rptrs. - fn vec_slice_info(tcx: &ty::ctxt, - pat: &hir::Pat, - slice_ty: Ty) - -> (hir::Mutability, ty::Region) { - match slice_ty.sty { - ty::TyRef(r, ref mt) => match mt.ty.sty { - ty::TySlice(_) => (mt.mutbl, *r), - _ => vec_slice_info(tcx, pat, mt.ty), - }, - - _ => { - tcx.sess.span_bug(pat.span, - "type of slice pattern is not a slice"); - } - } - } } pub fn cat_imm_interior(&self, @@ -1140,15 +1003,14 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { } pub fn cat_pattern(&self, cmt: cmt<'tcx>, pat: &hir::Pat, mut op: F) -> McResult<()> - where F: FnMut(&MemCategorizationContext<'t, 'a, 'tcx>, cmt<'tcx>, &hir::Pat), + where F: FnMut(&MemCategorizationContext<'a, 'gcx, 'tcx>, cmt<'tcx>, &hir::Pat), { self.cat_pattern_(cmt, pat, &mut op) } // FIXME(#19596) This is a workaround, but there should be a better way to do this - fn cat_pattern_(&self, cmt: cmt<'tcx>, pat: &hir::Pat, op: &mut F) - -> McResult<()> - where F : FnMut(&MemCategorizationContext<'t, 'a, 'tcx>, cmt<'tcx>, &hir::Pat), + fn cat_pattern_(&self, cmt: cmt<'tcx>, pat: &hir::Pat, op: &mut F) -> McResult<()> + where F : FnMut(&MemCategorizationContext<'a, 'gcx, 'tcx>, cmt<'tcx>, &hir::Pat) { // Here, `cmt` is the categorization for the value being // matched and pat is the pattern it is being matched against. @@ -1195,143 +1057,118 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { // step out of sync again. So you'll see below that we always // get the type of the *subpattern* and use that. - debug!("cat_pattern: {:?} cmt={:?}", - pat, - cmt); - - (*op)(self, cmt.clone(), pat); - - let opt_def = if let Some(path_res) = self.tcx().def_map.borrow().get(&pat.id) { - if path_res.depth != 0 || path_res.base_def == def::DefErr { - // Since patterns can be associated constants - // which are resolved during typeck, we might have - // some unresolved patterns reaching this stage - // without aborting - return Err(()); - } - Some(path_res.full_def()) - } else { - None - }; - - // Note: This goes up here (rather than within the PatEnum arm - // alone) because struct patterns can refer to struct types or - // to struct variants within enums. - let cmt = match opt_def { - Some(def::DefVariant(enum_did, variant_did, _)) - // univariant enums do not need downcasts - if !self.tcx().lookup_adt_def(enum_did).is_univariant() => { - self.cat_downcast(pat, cmt.clone(), cmt.ty, variant_did) + debug!("cat_pattern: {:?} cmt={:?}", pat, cmt); + + op(self, cmt.clone(), pat); + + // Note: This goes up here (rather than within the PatKind::TupleStruct arm + // alone) because PatKind::Struct can also refer to variants. + let cmt = match pat.node { + PatKind::Path(hir::QPath::Resolved(_, ref path)) | + PatKind::TupleStruct(hir::QPath::Resolved(_, ref path), ..) | + PatKind::Struct(hir::QPath::Resolved(_, ref path), ..) => { + match path.def { + Def::Err => return Err(()), + Def::Variant(variant_did) | + Def::VariantCtor(variant_did, ..) => { + // univariant enums do not need downcasts + let enum_did = self.tcx().parent_def_id(variant_did).unwrap(); + if !self.tcx().lookup_adt_def(enum_did).is_univariant() { + self.cat_downcast(pat, cmt.clone(), cmt.ty, variant_did) + } else { + cmt + } + } + _ => cmt } + } _ => cmt }; match pat.node { - hir::PatWild => { - // _ - } - - hir::PatEnum(_, None) => { - // variant(..) - } - hir::PatEnum(_, Some(ref subpats)) => { - match opt_def { - Some(def::DefVariant(..)) => { - // variant(x, y, z) - for (i, subpat) in subpats.iter().enumerate() { - let subpat_ty = try!(self.pat_ty(&**subpat)); // see (*2) - - let subcmt = - self.cat_imm_interior( - pat, cmt.clone(), subpat_ty, - InteriorField(PositionalField(i))); - - try!(self.cat_pattern_(subcmt, &**subpat, op)); - } + PatKind::TupleStruct(ref qpath, ref subpats, ddpos) => { + let def = self.infcx.tables.borrow().qpath_def(qpath, pat.id); + let expected_len = match def { + Def::VariantCtor(def_id, CtorKind::Fn) => { + let enum_def = self.tcx().parent_def_id(def_id).unwrap(); + self.tcx().lookup_adt_def(enum_def).variant_with_id(def_id).fields.len() } - Some(def::DefStruct(..)) => { - for (i, subpat) in subpats.iter().enumerate() { - let subpat_ty = try!(self.pat_ty(&**subpat)); // see (*2) - let cmt_field = - self.cat_imm_interior( - pat, cmt.clone(), subpat_ty, - InteriorField(PositionalField(i))); - try!(self.cat_pattern_(cmt_field, &**subpat, op)); - } - } - Some(def::DefConst(..)) | Some(def::DefAssociatedConst(..)) => { - for subpat in subpats { - try!(self.cat_pattern_(cmt.clone(), &**subpat, op)); + Def::StructCtor(_, CtorKind::Fn) => { + match self.pat_ty(&pat)?.sty { + ty::TyAdt(adt_def, _) => { + adt_def.struct_variant().fields.len() + } + ref ty => { + span_bug!(pat.span, "tuple struct pattern unexpected type {:?}", ty); + } } } - _ => { - self.tcx().sess.span_bug( - pat.span, - &format!("enum pattern didn't resolve to enum or struct {:?}", opt_def)); + def => { + span_bug!(pat.span, "tuple struct pattern didn't resolve \ + to variant or struct {:?}", def); } - } - } - - hir::PatQPath(..) => { - // Lone constant: ignore - } - - hir::PatIdent(_, _, Some(ref subpat)) => { - try!(self.cat_pattern_(cmt, &**subpat, op)); - } + }; - hir::PatIdent(_, _, None) => { - // nullary variant or identifier: ignore + for (i, subpat) in subpats.iter().enumerate_and_adjust(expected_len, ddpos) { + let subpat_ty = self.pat_ty(&subpat)?; // see (*2) + let subcmt = self.cat_imm_interior(pat, cmt.clone(), subpat_ty, + InteriorField(PositionalField(i))); + self.cat_pattern_(subcmt, &subpat, op)?; + } } - hir::PatStruct(_, ref field_pats, _) => { + PatKind::Struct(_, ref field_pats, _) => { // {f1: p1, ..., fN: pN} for fp in field_pats { - let field_ty = try!(self.pat_ty(&*fp.node.pat)); // see (*2) + let field_ty = self.pat_ty(&fp.node.pat)?; // see (*2) let cmt_field = self.cat_field(pat, cmt.clone(), fp.node.name, field_ty); - try!(self.cat_pattern_(cmt_field, &*fp.node.pat, op)); + self.cat_pattern_(cmt_field, &fp.node.pat, op)?; } } - hir::PatTup(ref subpats) => { + PatKind::Binding(.., Some(ref subpat)) => { + self.cat_pattern_(cmt, &subpat, op)?; + } + + PatKind::Tuple(ref subpats, ddpos) => { // (p1, ..., pN) - for (i, subpat) in subpats.iter().enumerate() { - let subpat_ty = try!(self.pat_ty(&**subpat)); // see (*2) - let subcmt = - self.cat_imm_interior( - pat, cmt.clone(), subpat_ty, - InteriorField(PositionalField(i))); - try!(self.cat_pattern_(subcmt, &**subpat, op)); + let expected_len = match self.pat_ty(&pat)?.sty { + ty::TyTuple(ref tys) => tys.len(), + ref ty => span_bug!(pat.span, "tuple pattern unexpected type {:?}", ty), + }; + for (i, subpat) in subpats.iter().enumerate_and_adjust(expected_len, ddpos) { + let subpat_ty = self.pat_ty(&subpat)?; // see (*2) + let subcmt = self.cat_imm_interior(pat, cmt.clone(), subpat_ty, + InteriorField(PositionalField(i))); + self.cat_pattern_(subcmt, &subpat, op)?; } } - hir::PatBox(ref subpat) | hir::PatRegion(ref subpat, _) => { + PatKind::Box(ref subpat) | PatKind::Ref(ref subpat, _) => { // box p1, &p1, &mut p1. we can ignore the mutability of - // PatRegion since that information is already contained + // PatKind::Ref since that information is already contained // in the type. - let subcmt = try!(self.cat_deref(pat, cmt, 0, None)); - try!(self.cat_pattern_(subcmt, &**subpat, op)); + let subcmt = self.cat_deref(pat, cmt, 0)?; + self.cat_pattern_(subcmt, &subpat, op)?; } - hir::PatVec(ref before, ref slice, ref after) => { - let context = InteriorOffsetKind::Pattern; - let vec_cmt = try!(self.deref_vec(pat, cmt, context)); - let elt_cmt = try!(self.cat_index(pat, vec_cmt, context)); - for before_pat in before { - try!(self.cat_pattern_(elt_cmt.clone(), &**before_pat, op)); - } - if let Some(ref slice_pat) = *slice { - let slice_ty = try!(self.pat_ty(&**slice_pat)); - let slice_cmt = self.cat_rvalue_node(pat.id(), pat.span(), slice_ty); - try!(self.cat_pattern_(slice_cmt, &**slice_pat, op)); - } - for after_pat in after { - try!(self.cat_pattern_(elt_cmt.clone(), &**after_pat, op)); - } + PatKind::Slice(ref before, ref slice, ref after) => { + let context = InteriorOffsetKind::Pattern; + let elt_cmt = self.cat_index(pat, cmt, context)?; + for before_pat in before { + self.cat_pattern_(elt_cmt.clone(), &before_pat, op)?; + } + if let Some(ref slice_pat) = *slice { + self.cat_pattern_(elt_cmt.clone(), &slice_pat, op)?; + } + for after_pat in after { + self.cat_pattern_(elt_cmt.clone(), &after_pat, op)?; + } } - hir::PatLit(_) | hir::PatRange(_, _) => { - /*always ok*/ + PatKind::Path(_) | PatKind::Binding(.., None) | + PatKind::Lit(..) | PatKind::Range(..) | PatKind::Wild => { + // always ok } } @@ -1349,7 +1186,6 @@ impl<'t, 'a,'tcx> MemCategorizationContext<'t, 'a, 'tcx> { // to skip past the binder. self.tcx().no_late_bound_regions(&method_ty.fn_ret()) .unwrap() - .unwrap() // overloaded ops do not diverge, either } } @@ -1380,9 +1216,9 @@ impl<'tcx> cmt_<'tcx> { Categorization::Rvalue(..) | Categorization::StaticItem | Categorization::Local(..) | - Categorization::Deref(_, _, UnsafePtr(..)) | - Categorization::Deref(_, _, BorrowedPtr(..)) | - Categorization::Deref(_, _, Implicit(..)) | + Categorization::Deref(.., UnsafePtr(..)) | + Categorization::Deref(.., BorrowedPtr(..)) | + Categorization::Deref(.., Implicit(..)) | Categorization::Upvar(..) => { Rc::new((*self).clone()) } @@ -1395,8 +1231,7 @@ impl<'tcx> cmt_<'tcx> { } /// Returns `FreelyAliasable(_)` if this lvalue represents a freely aliasable pointer type. - pub fn freely_aliasable(&self, ctxt: &ty::ctxt<'tcx>) - -> Aliasability { + pub fn freely_aliasable(&self) -> Aliasability { // Maybe non-obvious: copied upvars can only be considered // non-aliasable in once closures, since any other kind can be // aliased and eventually recused. @@ -1409,11 +1244,11 @@ impl<'tcx> cmt_<'tcx> { Categorization::Downcast(ref b, _) | Categorization::Interior(ref b, _) => { // Aliasability depends on base cmt - b.freely_aliasable(ctxt) + b.freely_aliasable() } Categorization::Deref(ref b, _, Unique) => { - let sub = b.freely_aliasable(ctxt); + let sub = b.freely_aliasable(); if b.mutbl.is_mutable() { // Aliasability depends on base cmt alone sub @@ -1426,7 +1261,7 @@ impl<'tcx> cmt_<'tcx> { Categorization::Rvalue(..) | Categorization::Local(..) | Categorization::Upvar(..) | - Categorization::Deref(_, _, UnsafePtr(..)) => { // yes, it's aliasable, but... + Categorization::Deref(.., UnsafePtr(..)) => { // yes, it's aliasable, but... NonAliasable } @@ -1455,14 +1290,14 @@ impl<'tcx> cmt_<'tcx> { match self.note { NoteClosureEnv(..) | NoteUpvarRef(..) => { Some(match self.cat { - Categorization::Deref(ref inner, _, _) => { + Categorization::Deref(ref inner, ..) => { match inner.cat { - Categorization::Deref(ref inner, _, _) => inner.clone(), + Categorization::Deref(ref inner, ..) => inner.clone(), Categorization::Upvar(..) => inner.clone(), - _ => unreachable!() + _ => bug!() } } - _ => unreachable!() + _ => bug!() }) } NoteNone => None @@ -1470,7 +1305,7 @@ impl<'tcx> cmt_<'tcx> { } - pub fn descriptive_string(&self, tcx: &ty::ctxt) -> String { + pub fn descriptive_string(&self, tcx: TyCtxt) -> String { match self.cat { Categorization::StaticItem => { "static item".to_string() @@ -1485,13 +1320,13 @@ impl<'tcx> cmt_<'tcx> { "local variable".to_string() } } - Categorization::Deref(_, _, pk) => { + Categorization::Deref(.., pk) => { let upvar = self.upvar(); match upvar.as_ref().map(|i| &i.cat) { Some(&Categorization::Upvar(ref var)) => { var.to_string() } - Some(_) => unreachable!(), + Some(_) => bug!(), None => { match pk { Implicit(..) => { @@ -1586,7 +1421,7 @@ pub fn ptr_sigil(ptr: PointerKind) -> &'static str { } } -impl fmt::Debug for PointerKind { +impl<'tcx> fmt::Debug for PointerKind<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Unique => write!(f, "Box"), @@ -1617,18 +1452,6 @@ impl fmt::Debug for InteriorKind { } } -fn element_kind(t: Ty) -> ElementKind { - match t.sty { - ty::TyRef(_, ty::TypeAndMut{ty, ..}) | - ty::TyBox(ty) => match ty.sty { - ty::TySlice(_) => VecElement, - _ => OtherElement - }, - ty::TyArray(..) | ty::TySlice(_) => VecElement, - _ => OtherElement - } -} - impl fmt::Debug for Upvar { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}/{:?}", self.id, self.kind) @@ -1638,9 +1461,9 @@ impl fmt::Debug for Upvar { impl fmt::Display for Upvar { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let kind = match self.kind { - ty::FnClosureKind => "Fn", - ty::FnMutClosureKind => "FnMut", - ty::FnOnceClosureKind => "FnOnce", + ty::ClosureKind::Fn => "Fn", + ty::ClosureKind::FnMut => "FnMut", + ty::ClosureKind::FnOnce => "FnOnce", }; write!(f, "captured outer variable in an `{}` closure", kind) } diff --git a/src/librustc/middle/pat_util.rs b/src/librustc/middle/pat_util.rs deleted file mode 100644 index 1284e9fd1454e..0000000000000 --- a/src/librustc/middle/pat_util.rs +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use middle::def::*; -use middle::def_id::DefId; -use middle::ty; -use util::nodemap::FnvHashMap; - -use syntax::ast; -use rustc_front::hir; -use rustc_front::util::walk_pat; -use syntax::codemap::{respan, Span, Spanned, DUMMY_SP}; - -use std::cell::RefCell; - -pub type PatIdMap = FnvHashMap; - -// This is used because same-named variables in alternative patterns need to -// use the NodeId of their namesake in the first pattern. -pub fn pat_id_map(dm: &RefCell, pat: &hir::Pat) -> PatIdMap { - let mut map = FnvHashMap(); - pat_bindings(dm, pat, |_bm, p_id, _s, path1| { - map.insert(path1.node, p_id); - }); - map -} - -pub fn pat_is_refutable(dm: &DefMap, pat: &hir::Pat) -> bool { - match pat.node { - hir::PatLit(_) | hir::PatRange(_, _) | hir::PatQPath(..) => true, - hir::PatEnum(_, _) | - hir::PatIdent(_, _, None) | - hir::PatStruct(..) => { - match dm.get(&pat.id).map(|d| d.full_def()) { - Some(DefVariant(..)) => true, - _ => false - } - } - hir::PatVec(_, _, _) => true, - _ => false - } -} - -pub fn pat_is_variant_or_struct(dm: &DefMap, pat: &hir::Pat) -> bool { - match pat.node { - hir::PatEnum(_, _) | - hir::PatIdent(_, _, None) | - hir::PatStruct(..) => { - match dm.get(&pat.id).map(|d| d.full_def()) { - Some(DefVariant(..)) | Some(DefStruct(..)) => true, - _ => false - } - } - _ => false - } -} - -pub fn pat_is_const(dm: &DefMap, pat: &hir::Pat) -> bool { - match pat.node { - hir::PatIdent(_, _, None) | hir::PatEnum(..) | hir::PatQPath(..) => { - match dm.get(&pat.id).map(|d| d.full_def()) { - Some(DefConst(..)) | Some(DefAssociatedConst(..)) => true, - _ => false - } - } - _ => false - } -} - -// Same as above, except that partially-resolved defs cause `false` to be -// returned instead of a panic. -pub fn pat_is_resolved_const(dm: &DefMap, pat: &hir::Pat) -> bool { - match pat.node { - hir::PatIdent(_, _, None) | hir::PatEnum(..) | hir::PatQPath(..) => { - match dm.get(&pat.id) - .and_then(|d| if d.depth == 0 { Some(d.base_def) } - else { None } ) { - Some(DefConst(..)) | Some(DefAssociatedConst(..)) => true, - _ => false - } - } - _ => false - } -} - -pub fn pat_is_binding(dm: &DefMap, pat: &hir::Pat) -> bool { - match pat.node { - hir::PatIdent(..) => { - !pat_is_variant_or_struct(dm, pat) && - !pat_is_const(dm, pat) - } - _ => false - } -} - -pub fn pat_is_binding_or_wild(dm: &DefMap, pat: &hir::Pat) -> bool { - match pat.node { - hir::PatIdent(..) => pat_is_binding(dm, pat), - hir::PatWild => true, - _ => false - } -} - -/// Call `it` on every "binding" in a pattern, e.g., on `a` in -/// `match foo() { Some(a) => (), None => () }` -pub fn pat_bindings(dm: &RefCell, pat: &hir::Pat, mut it: I) where - I: FnMut(hir::BindingMode, ast::NodeId, Span, &Spanned), -{ - walk_pat(pat, |p| { - match p.node { - hir::PatIdent(binding_mode, ref pth, _) if pat_is_binding(&dm.borrow(), p) => { - it(binding_mode, p.id, p.span, &respan(pth.span, pth.node.name)); - } - _ => {} - } - true - }); -} -pub fn pat_bindings_ident(dm: &RefCell, pat: &hir::Pat, mut it: I) where - I: FnMut(hir::BindingMode, ast::NodeId, Span, &Spanned), -{ - walk_pat(pat, |p| { - match p.node { - hir::PatIdent(binding_mode, ref pth, _) if pat_is_binding(&dm.borrow(), p) => { - it(binding_mode, p.id, p.span, &respan(pth.span, pth.node)); - } - _ => {} - } - true - }); -} - -/// Checks if the pattern contains any patterns that bind something to -/// an ident, e.g. `foo`, or `Foo(foo)` or `foo @ Bar(..)`. -pub fn pat_contains_bindings(dm: &DefMap, pat: &hir::Pat) -> bool { - let mut contains_bindings = false; - walk_pat(pat, |p| { - if pat_is_binding(dm, p) { - contains_bindings = true; - false // there's at least one binding, can short circuit now. - } else { - true - } - }); - contains_bindings -} - -/// Checks if the pattern contains any `ref` or `ref mut` bindings, -/// and if yes wether its containing mutable ones or just immutables ones. -pub fn pat_contains_ref_binding(dm: &RefCell, pat: &hir::Pat) -> Option { - let mut result = None; - pat_bindings(dm, pat, |mode, _, _, _| { - match mode { - hir::BindingMode::BindByRef(m) => { - // Pick Mutable as maximum - match result { - None | Some(hir::MutImmutable) => result = Some(m), - _ => (), - } - } - hir::BindingMode::BindByValue(_) => { } - } - }); - result -} - -/// Checks if the patterns for this arm contain any `ref` or `ref mut` -/// bindings, and if yes wether its containing mutable ones or just immutables ones. -pub fn arm_contains_ref_binding(dm: &RefCell, arm: &hir::Arm) -> Option { - arm.pats.iter() - .filter_map(|pat| pat_contains_ref_binding(dm, pat)) - .max_by_key(|m| match *m { - hir::MutMutable => 1, - hir::MutImmutable => 0, - }) -} - -/// Checks if the pattern contains any patterns that bind something to -/// an ident or wildcard, e.g. `foo`, or `Foo(_)`, `foo @ Bar(..)`, -pub fn pat_contains_bindings_or_wild(dm: &DefMap, pat: &hir::Pat) -> bool { - let mut contains_bindings = false; - walk_pat(pat, |p| { - if pat_is_binding_or_wild(dm, p) { - contains_bindings = true; - false // there's at least one binding/wildcard, can short circuit now. - } else { - true - } - }); - contains_bindings -} - -pub fn simple_name<'a>(pat: &'a hir::Pat) -> Option { - match pat.node { - hir::PatIdent(hir::BindByValue(_), ref path1, None) => { - Some(path1.node.name) - } - _ => { - None - } - } -} - -pub fn def_to_path(tcx: &ty::ctxt, id: DefId) -> hir::Path { - tcx.with_path(id, |path| hir::Path { - global: false, - segments: path.last().map(|elem| hir::PathSegment { - identifier: hir::Ident::from_name(elem.name()), - parameters: hir::PathParameters::none(), - }).into_iter().collect(), - span: DUMMY_SP, - }) -} - -/// Return variants that are necessary to exist for the pattern to match. -pub fn necessary_variants(dm: &DefMap, pat: &hir::Pat) -> Vec { - let mut variants = vec![]; - walk_pat(pat, |p| { - match p.node { - hir::PatEnum(_, _) | - hir::PatIdent(_, _, None) | - hir::PatStruct(..) => { - match dm.get(&p.id) { - Some(&PathResolution { base_def: DefVariant(_, id, _), .. }) => { - variants.push(id); - } - _ => () - } - } - _ => () - } - true - }); - variants.sort(); - variants.dedup(); - variants -} diff --git a/src/librustc/middle/privacy.rs b/src/librustc/middle/privacy.rs index f464ea58c2d19..1376886968f74 100644 --- a/src/librustc/middle/privacy.rs +++ b/src/librustc/middle/privacy.rs @@ -12,24 +12,19 @@ //! outside their scopes. This pass will also generate a set of exported items //! which are available for use externally when compiled as a library. -pub use self::PrivateDep::*; -pub use self::ImportUse::*; -pub use self::LastPrivate::*; - -use middle::def_id::DefId; -use util::nodemap::{DefIdSet, FnvHashMap}; +use util::nodemap::{DefIdSet, FxHashMap}; use std::hash::Hash; +use std::fmt; use syntax::ast::NodeId; // Accessibility levels, sorted in ascending order -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] pub enum AccessLevel { // Exported items + items participating in various kinds of public interfaces, // but not directly nameable. For example, if function `fn f() -> T {...}` is - // public, then type `T` is exported. Its values can be obtained by other crates - // even if the type itseld is not nameable. - // FIXME: Mostly unimplemented. Only `type` aliases export items currently. + // public, then type `T` is reachable. Its values can be obtained by other crates + // even if the type itself is not nameable. Reachable, // Public items + items accessible to other crates with help of `pub use` reexports Exported, @@ -40,7 +35,7 @@ pub enum AccessLevel { // Accessibility levels for reachable HIR nodes #[derive(Clone)] pub struct AccessLevels { - pub map: FnvHashMap + pub map: FxHashMap } impl AccessLevels { @@ -61,42 +56,12 @@ impl Default for AccessLevels { } } +impl fmt::Debug for AccessLevels { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.map, f) + } +} + /// A set containing all exported definitions from external crates. /// The set does not contain any entries from local crates. pub type ExternalExports = DefIdSet; - -#[derive(Copy, Clone, Debug)] -pub enum LastPrivate { - LastMod(PrivateDep), - // `use` directives (imports) can refer to two separate definitions in the - // type and value namespaces. We record here the last private node for each - // and whether the import is in fact used for each. - // If the Option fields are None, it means there is no definition - // in that namespace. - LastImport{value_priv: Option, - value_used: ImportUse, - type_priv: Option, - type_used: ImportUse}, -} - -#[derive(Copy, Clone, Debug)] -pub enum PrivateDep { - AllPublic, - DependsOn(DefId), -} - -// How an import is used. -#[derive(Copy, Clone, PartialEq, Debug)] -pub enum ImportUse { - Unused, // The import is not used. - Used, // The import is used. -} - -impl LastPrivate { - pub fn or(self, other: LastPrivate) -> LastPrivate { - match (self, other) { - (me, LastMod(AllPublic)) => me, - (_, other) => other, - } - } -} diff --git a/src/librustc/middle/reachable.rs b/src/librustc/middle/reachable.rs index 738440adf416d..9798b2d587dbf 100644 --- a/src/librustc/middle/reachable.rs +++ b/src/librustc/middle/reachable.rs @@ -16,21 +16,21 @@ // reachable as well. use dep_graph::DepNode; -use front::map as ast_map; -use middle::def; -use middle::def_id::DefId; -use middle::ty; +use hir::map as ast_map; +use hir::def::Def; +use hir::def_id::DefId; +use ty::{self, TyCtxt}; use middle::privacy; use session::config; -use util::nodemap::NodeSet; +use util::nodemap::{NodeSet, FxHashSet}; -use std::collections::HashSet; -use syntax::abi; +use syntax::abi::Abi; use syntax::ast; use syntax::attr; -use rustc_front::hir; -use rustc_front::intravisit::Visitor; -use rustc_front::intravisit; +use hir; +use hir::intravisit::{Visitor, NestedVisitorMap}; +use hir::itemlikevisit::ItemLikeVisitor; +use hir::intravisit; // Returns true if the given set of generics implies that the item it's // associated with must be inlined. @@ -47,17 +47,18 @@ fn item_might_be_inlined(item: &hir::Item) -> bool { } match item.node { - hir::ItemImpl(_, _, ref generics, _, _, _) | - hir::ItemFn(_, _, _, _, ref generics, _) => { + hir::ItemImpl(_, _, ref generics, ..) | + hir::ItemFn(.., ref generics, _) => { generics_require_inlining(generics) } _ => false, } } -fn method_might_be_inlined(tcx: &ty::ctxt, sig: &hir::MethodSig, - impl_item: &hir::ImplItem, - impl_src: DefId) -> bool { +fn method_might_be_inlined<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + sig: &hir::MethodSig, + impl_item: &hir::ImplItem, + impl_src: DefId) -> bool { if attr::requests_inline(&impl_item.attrs) || generics_require_inlining(&sig.generics) { return true @@ -65,19 +66,19 @@ fn method_might_be_inlined(tcx: &ty::ctxt, sig: &hir::MethodSig, if let Some(impl_node_id) = tcx.map.as_local_node_id(impl_src) { match tcx.map.find(impl_node_id) { Some(ast_map::NodeItem(item)) => - item_might_be_inlined(&*item), + item_might_be_inlined(&item), Some(..) | None => - tcx.sess.span_bug(impl_item.span, "impl did is not an item") + span_bug!(impl_item.span, "impl did is not an item") } } else { - tcx.sess.span_bug(impl_item.span, "found a foreign impl as a parent of a local method") + span_bug!(impl_item.span, "found a foreign impl as a parent of a local method") } } // Information needed while computing reachability. struct ReachableContext<'a, 'tcx: 'a> { // The type context. - tcx: &'a ty::ctxt<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, // The set of items which must be exported in the linkage sense. reachable_symbols: NodeSet, // A worklist of item IDs. Each item ID in this worklist will be inlined @@ -87,54 +88,46 @@ struct ReachableContext<'a, 'tcx: 'a> { any_library: bool, } -impl<'a, 'tcx, 'v> Visitor<'v> for ReachableContext<'a, 'tcx> { - fn visit_expr(&mut self, expr: &hir::Expr) { - match expr.node { - hir::ExprPath(..) => { - let def = match self.tcx.def_map.borrow().get(&expr.id) { - Some(d) => d.full_def(), - None => { - self.tcx.sess.span_bug(expr.span, - "def ID not in def map?!") - } - }; - - let def_id = def.def_id(); - if let Some(node_id) = self.tcx.map.as_local_node_id(def_id) { - if self.def_id_represents_local_inlined_item(def_id) { - self.worklist.push(node_id); - } else { - match def { - // If this path leads to a constant, then we need to - // recurse into the constant to continue finding - // items that are reachable. - def::DefConst(..) | def::DefAssociatedConst(..) => { - self.worklist.push(node_id); - } +impl<'a, 'tcx> Visitor<'tcx> for ReachableContext<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.tcx.map) + } - // If this wasn't a static, then the destination is - // surely reachable. - _ => { - self.reachable_symbols.insert(node_id); - } - } - } - } + fn visit_expr(&mut self, expr: &'tcx hir::Expr) { + let def = match expr.node { + hir::ExprPath(ref qpath) => { + Some(self.tcx.tables().qpath_def(qpath, expr.id)) } hir::ExprMethodCall(..) => { let method_call = ty::MethodCall::expr(expr.id); let def_id = self.tcx.tables.borrow().method_map[&method_call].def_id; + Some(Def::Method(def_id)) + } + _ => None + }; - // Mark the trait item (and, possibly, its default impl) as reachable - // Or mark inherent impl item as reachable - if let Some(node_id) = self.tcx.map.as_local_node_id(def_id) { - if self.def_id_represents_local_inlined_item(def_id) { - self.worklist.push(node_id) + if let Some(def) = def { + let def_id = def.def_id(); + if let Some(node_id) = self.tcx.map.as_local_node_id(def_id) { + if self.def_id_represents_local_inlined_item(def_id) { + self.worklist.push(node_id); + } else { + match def { + // If this path leads to a constant, then we need to + // recurse into the constant to continue finding + // items that are reachable. + Def::Const(..) | Def::AssociatedConst(..) => { + self.worklist.push(node_id); + } + + // If this wasn't a static, then the destination is + // surely reachable. + _ => { + self.reachable_symbols.insert(node_id); + } } - self.reachable_symbols.insert(node_id); } } - _ => {} } intravisit::walk_expr(self, expr) @@ -143,9 +136,10 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ReachableContext<'a, 'tcx> { impl<'a, 'tcx> ReachableContext<'a, 'tcx> { // Creates a new reachability computation context. - fn new(tcx: &'a ty::ctxt<'tcx>) -> ReachableContext<'a, 'tcx> { + fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> ReachableContext<'a, 'tcx> { let any_library = tcx.sess.crate_types.borrow().iter().any(|ty| { - *ty != config::CrateTypeExecutable + *ty == config::CrateTypeRlib || *ty == config::CrateTypeDylib || + *ty == config::CrateTypeProcMacro || *ty == config::CrateTypeMetadata }); ReachableContext { tcx: tcx, @@ -166,7 +160,7 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { match self.tcx.map.find(node_id) { Some(ast_map::NodeItem(item)) => { match item.node { - hir::ItemFn(..) => item_might_be_inlined(&*item), + hir::ItemFn(..) => item_might_be_inlined(&item), _ => false, } } @@ -193,7 +187,7 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { // does too. let impl_node_id = self.tcx.map.as_local_node_id(impl_did).unwrap(); match self.tcx.map.expect_item(impl_node_id).node { - hir::ItemImpl(_, _, ref generics, _, _, _) => { + hir::ItemImpl(_, _, ref generics, ..) => { generics_require_inlining(generics) } _ => false @@ -210,7 +204,7 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { // Step 2: Mark all symbols that the symbols on the worklist touch. fn propagate(&mut self) { - let mut scanned = HashSet::new(); + let mut scanned = FxHashSet(); loop { let search_item = match self.worklist.pop() { Some(item) => item, @@ -226,19 +220,21 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { } } - fn propagate_node(&mut self, node: &ast_map::Node, + fn propagate_node(&mut self, node: &ast_map::Node<'tcx>, search_item: ast::NodeId) { if !self.any_library { - // If we are building an executable, then there's no need to flag - // anything as external except for `extern fn` types. These - // functions may still participate in some form of native interface, - // but all other rust-only interfaces can be private (they will not - // participate in linkage after this product is produced) + // If we are building an executable, only explicitly extern + // types need to be exported. if let ast_map::NodeItem(item) = *node { - if let hir::ItemFn(_, _, _, abi, _, _) = item.node { - if abi != abi::Rust { - self.reachable_symbols.insert(search_item); - } + let reachable = if let hir::ItemFn(.., abi, _, _) = item.node { + abi != Abi::Rust + } else { + false + }; + let is_extern = attr::contains_extern_indicator(&self.tcx.sess.diagnostic(), + &item.attrs); + if reachable || is_extern { + self.reachable_symbols.insert(search_item); } } } else { @@ -252,9 +248,9 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { match *node { ast_map::NodeItem(item) => { match item.node { - hir::ItemFn(_, _, _, _, _, ref search_block) => { - if item_might_be_inlined(&*item) { - intravisit::walk_block(self, &**search_block) + hir::ItemFn(.., body) => { + if item_might_be_inlined(&item) { + self.visit_body(body); } } @@ -262,18 +258,18 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { // unconditionally, so we need to make sure that their // contents are also reachable. hir::ItemConst(_, ref init) => { - self.visit_expr(&**init); + self.visit_expr(&init); } // These are normal, nothing reachable about these // inherently and their children are already in the // worklist, as determined by the privacy pass - hir::ItemExternCrate(_) | hir::ItemUse(_) | - hir::ItemTy(..) | hir::ItemStatic(_, _, _) | + hir::ItemExternCrate(_) | hir::ItemUse(..) | + hir::ItemTy(..) | hir::ItemStatic(..) | hir::ItemMod(..) | hir::ItemForeignMod(..) | hir::ItemImpl(..) | hir::ItemTrait(..) | hir::ItemStruct(..) | hir::ItemEnum(..) | - hir::ItemDefaultImpl(..) => {} + hir::ItemUnion(..) | hir::ItemDefaultImpl(..) => {} } } ast_map::NodeTraitItem(trait_method) => { @@ -282,11 +278,11 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { hir::MethodTraitItem(_, None) => { // Keep going, nothing to get exported } - hir::ConstTraitItem(_, Some(ref expr)) => { - self.visit_expr(&*expr); + hir::ConstTraitItem(_, Some(ref body)) => { + self.visit_expr(body); } - hir::MethodTraitItem(_, Some(ref body)) => { - intravisit::walk_block(self, body); + hir::MethodTraitItem(_, Some(body_id)) => { + self.visit_body(body_id); } hir::TypeTraitItem(..) => {} } @@ -294,12 +290,12 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { ast_map::NodeImplItem(impl_item) => { match impl_item.node { hir::ImplItemKind::Const(_, ref expr) => { - self.visit_expr(&*expr); + self.visit_expr(&expr); } - hir::ImplItemKind::Method(ref sig, ref body) => { + hir::ImplItemKind::Method(ref sig, body) => { let did = self.tcx.map.get_parent_did(search_item); if method_might_be_inlined(self.tcx, sig, impl_item, did) { - intravisit::walk_block(self, body) + self.visit_body(body) } } hir::ImplItemKind::Type(_) => {} @@ -308,14 +304,12 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { // Nothing to recurse on for these ast_map::NodeForeignItem(_) | ast_map::NodeVariant(_) | - ast_map::NodeStructCtor(_) => {} + ast_map::NodeStructCtor(_) | + ast_map::NodeField(_) | + ast_map::NodeTy(_) => {} _ => { - self.tcx - .sess - .bug(&format!("found unexpected thingy in worklist: {}", - self.tcx - .map - .node_to_string(search_item))) + bug!("found unexpected thingy in worklist: {}", + self.tcx.map.node_to_string(search_item)) } } } @@ -334,22 +328,26 @@ struct CollectPrivateImplItemsVisitor<'a> { worklist: &'a mut Vec, } -impl<'a, 'v> Visitor<'v> for CollectPrivateImplItemsVisitor<'a> { +impl<'a, 'v> ItemLikeVisitor<'v> for CollectPrivateImplItemsVisitor<'a> { fn visit_item(&mut self, item: &hir::Item) { // We need only trait impls here, not inherent impls, and only non-exported ones - if let hir::ItemImpl(_, _, _, Some(_), _, ref impl_items) = item.node { + if let hir::ItemImpl(.., Some(_), _, ref impl_item_refs) = item.node { if !self.access_levels.is_reachable(item.id) { - for impl_item in impl_items { - self.worklist.push(impl_item.id); + for impl_item_ref in impl_item_refs { + self.worklist.push(impl_item_ref.id.node_id); } } } } + + fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) { + // processed in visit_item above + } } -pub fn find_reachable(tcx: &ty::ctxt, - access_levels: &privacy::AccessLevels) - -> NodeSet { +pub fn find_reachable<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + access_levels: &privacy::AccessLevels) + -> NodeSet { let _task = tcx.dep_graph.in_task(DepNode::Reachability); let mut reachable_context = ReachableContext::new(tcx); @@ -362,7 +360,7 @@ pub fn find_reachable(tcx: &ty::ctxt, for (id, _) in &access_levels.map { reachable_context.worklist.push(*id); } - for (_, item) in tcx.lang_items.items() { + for item in tcx.lang_items.items().iter() { if let Some(did) = *item { if let Some(node_id) = tcx.map.as_local_node_id(did) { reachable_context.worklist.push(node_id); @@ -374,7 +372,7 @@ pub fn find_reachable(tcx: &ty::ctxt, access_levels: access_levels, worklist: &mut reachable_context.worklist, }; - tcx.map.krate().visit_all_items(&mut collect_private_impl_items); + tcx.map.krate().visit_all_item_likes(&mut collect_private_impl_items); } // Step 2: Mark all symbols that the symbols on the worklist touch. diff --git a/src/librustc/middle/recursion_limit.rs b/src/librustc/middle/recursion_limit.rs index 7dcd358165c92..6c87f750376fa 100644 --- a/src/librustc/middle/recursion_limit.rs +++ b/src/librustc/middle/recursion_limit.rs @@ -17,22 +17,32 @@ use session::Session; use syntax::ast; -use syntax::attr::AttrMetaMethods; -pub fn update_recursion_limit(sess: &Session, krate: &ast::Crate) { +use std::cell::Cell; + +pub fn update_limits(sess: &Session, krate: &ast::Crate) { + update_limit(sess, krate, &sess.recursion_limit, "recursion_limit", + "recursion limit"); + update_limit(sess, krate, &sess.type_length_limit, "type_length_limit", + "type length limit"); +} + +fn update_limit(sess: &Session, krate: &ast::Crate, limit: &Cell, + name: &str, description: &str) { for attr in &krate.attrs { - if !attr.check_name("recursion_limit") { + if !attr.check_name(name) { continue; } if let Some(s) = attr.value_str() { - if let Some(n) = s.parse().ok() { - sess.recursion_limit.set(n); + if let Some(n) = s.as_str().parse().ok() { + limit.set(n); return; } } - span_err!(sess, attr.span, E0296, "malformed recursion limit attribute, \ - expected #![recursion_limit=\"N\"]"); + span_err!(sess, attr.span, E0296, + "malformed {} attribute, expected #![{}=\"N\"]", + description, name); } } diff --git a/src/librustc/middle/region.rs b/src/librustc/middle/region.rs index 543b218a2bc05..b1e35e54eb9bb 100644 --- a/src/librustc/middle/region.rs +++ b/src/librustc/middle/region.rs @@ -14,25 +14,25 @@ //! region parameterized. //! //! Most of the documentation on regions can be found in -//! `middle/typeck/infer/region_inference.rs` +//! `middle/infer/region_inference/README.md` -use front::map as ast_map; +use dep_graph::DepNode; +use hir::map as ast_map; use session::Session; -use util::nodemap::{FnvHashMap, NodeMap, NodeSet}; -use middle::cstore::InlinedItem; -use middle::ty::{self, Ty}; +use util::nodemap::{FxHashMap, NodeMap, NodeSet}; +use ty; use std::cell::RefCell; use std::collections::hash_map::Entry; use std::fmt; use std::mem; -use syntax::codemap::{self, Span}; +use syntax::codemap; use syntax::ast::{self, NodeId}; +use syntax_pos::Span; -use rustc_front::hir; -use rustc_front::intravisit::{self, Visitor, FnKind}; -use rustc_front::hir::{Block, Item, FnDecl, Arm, Pat, Stmt, Expr, Local}; -use rustc_front::util::stmt_id; +use hir; +use hir::intravisit::{self, Visitor, FnKind, NestedVisitorMap}; +use hir::{Block, Item, FnDecl, Arm, Pat, PatKind, Stmt, Expr, Local}; #[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, RustcEncodable, RustcDecodable, Copy)] @@ -40,15 +40,16 @@ pub struct CodeExtent(u32); impl fmt::Debug for CodeExtent { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - try!(write!(f, "CodeExtent({:?}", self.0)); + write!(f, "CodeExtent({:?}", self.0)?; - try!(ty::tls::with_opt(|opt_tcx| { + ty::tls::with_opt(|opt_tcx| { if let Some(tcx) = opt_tcx { - let data = tcx.region_maps.code_extents.borrow()[self.0 as usize]; - try!(write!(f, "/{:?}", data)); + if let Some(data) = tcx.region_maps.code_extents.borrow().get(self.0 as usize) { + write!(f, "/{:?}", data)?; + } } Ok(()) - })); + })?; write!(f, ")") } @@ -235,7 +236,7 @@ impl CodeExtent { // (This is the special case aluded to in the // doc-comment for this method) let stmt_span = blk.stmts[r.first_statement_index as usize].span; - Some(Span { lo: stmt_span.hi, ..blk.span }) + Some(Span { lo: stmt_span.hi, hi: blk.span.hi, expn_id: stmt_span.expn_id }) } } } @@ -250,7 +251,7 @@ impl CodeExtent { /// The region maps encode information about region relationships. pub struct RegionMaps { code_extents: RefCell>, - code_extent_interner: RefCell>, + code_extent_interner: RefCell>, /// `scope_map` maps from a scope id to the enclosing scope id; /// this is usually corresponding to the lexical nesting, though /// in the case of closures the parent scope is the innermost @@ -279,7 +280,7 @@ pub struct RegionMaps { /// hierarchy based on their lexical mapping. This is used to /// handle the relationships between regions in a fn and in a /// closure defined by that fn. See the "Modeling closures" - /// section of the README in middle::infer::region_inference for + /// section of the README in infer::region_inference for /// more details. fn_tree: RefCell>, } @@ -290,7 +291,7 @@ pub struct Context { /// of the innermost fn body. Each fn forms its own disjoint tree /// in the region hierarchy. These fn bodies are themselves /// arranged into a tree. See the "Modeling closures" section of - /// the README in middle::infer::region_inference for more + /// the README in infer::region_inference for more /// details. root_id: Option, @@ -301,7 +302,7 @@ pub struct Context { parent: CodeExtent } -struct RegionResolutionVisitor<'a> { +struct RegionResolutionVisitor<'ast: 'a, 'a> { sess: &'a Session, // Generated maps: @@ -309,6 +310,8 @@ struct RegionResolutionVisitor<'a> { cx: Context, + map: &'a ast_map::Map<'ast>, + /// `terminating_scopes` is a set containing the ids of each /// statement, or conditional/repeating expression. These scopes /// are calling "terminating scopes" because, when attempting to @@ -342,7 +345,7 @@ impl RegionMaps { pub fn lookup_code_extent(&self, e: CodeExtentData) -> CodeExtent { match self.code_extent_interner.borrow().get(&e) { Some(&d) => d, - None => panic!("unknown code extent {:?}", e) + None => bug!("unknown code extent {:?}", e) } } pub fn node_extent(&self, n: ast::NodeId) -> CodeExtent { @@ -384,11 +387,11 @@ impl RegionMaps { } Entry::Vacant(v) => { if self.code_extents.borrow().len() > 0xffffffffusize { - unreachable!() // should pass a sess, - // but this isn't the only place + bug!() // should pass a sess, + // but this isn't the only place } let idx = CodeExtent(self.code_extents.borrow().len() as u32); - info!("CodeExtent({}) = {:?} [parent={}]", idx.0, e, parent.0); + debug!("CodeExtent({}) = {:?} [parent={}]", idx.0, e, parent.0); self.code_extents.borrow_mut().push(e); self.scope_map.borrow_mut().push(parent); *v.insert(idx) @@ -459,7 +462,7 @@ impl RegionMaps { self.scope_map.borrow()[id.0 as usize].into_option() } - #[allow(dead_code)] // used in middle::cfg + #[allow(dead_code)] // used in cfg pub fn encl_scope(&self, id: CodeExtent) -> CodeExtent { //! Returns the narrowest scope that encloses `id`, if any. self.opt_encl_scope(id).unwrap() @@ -469,7 +472,7 @@ impl RegionMaps { pub fn var_scope(&self, var_id: ast::NodeId) -> CodeExtent { match self.var_map.borrow().get(&var_id) { Some(&r) => r, - None => { panic!("no enclosing scope for id {:?}", var_id); } + None => { bug!("no enclosing scope for id {:?}", var_id); } } } @@ -477,12 +480,9 @@ impl RegionMaps { //! Returns the scope when temp created by expr_id will be cleaned up // check for a designated rvalue scope - match self.rvalue_scopes.borrow().get(&expr_id) { - Some(&s) => { - debug!("temporary_scope({:?}) = {:?} [custom]", expr_id, s); - return Some(s); - } - None => { } + if let Some(&s) = self.rvalue_scopes.borrow().get(&expr_id) { + debug!("temporary_scope({:?}) = {:?} [custom]", expr_id, s); + return Some(s); } let scope_map : &[CodeExtent] = &self.scope_map.borrow(); @@ -492,12 +492,7 @@ impl RegionMaps { // if there's one. Static items, for instance, won't // have an enclosing scope, hence no scope will be // returned. - let expr_extent = self.node_extent(expr_id); - // For some reason, the expr's scope itself is skipped here. - let mut id = match scope_map[expr_extent.0 as usize].into_option() { - Some(i) => i, - _ => return None - }; + let mut id = self.node_extent(expr_id); while let Some(p) = scope_map[id.0 as usize].into_option() { match code_extents[p.0 as usize] { @@ -586,7 +581,7 @@ impl RegionMaps { // different functions. Compare those fn for lexical // nesting. The reasoning behind this is subtle. See the // "Modeling closures" section of the README in - // middle::infer::region_inference for more details. + // infer::region_inference for more details. let a_root_scope = self.code_extent_data(a_ancestors[a_index]); let b_root_scope = self.code_extent_data(a_ancestors[a_index]); return match (a_root_scope, b_root_scope) { @@ -600,12 +595,12 @@ impl RegionMaps { scope_a } else { // neither fn encloses the other - unreachable!() + bug!() } } _ => { // root ids are always Misc right now - unreachable!() + bug!() } }; } @@ -667,7 +662,7 @@ fn record_var_lifetime(visitor: &mut RegionResolutionVisitor, } } -fn resolve_block(visitor: &mut RegionResolutionVisitor, blk: &hir::Block) { +fn resolve_block<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'tcx, 'a>, blk: &'tcx hir::Block) { debug!("resolve_block(blk.id={:?})", blk.id); let prev_cx = visitor.cx; @@ -738,7 +733,7 @@ fn resolve_block(visitor: &mut RegionResolutionVisitor, blk: &hir::Block) { visitor.cx = prev_cx; } -fn resolve_arm(visitor: &mut RegionResolutionVisitor, arm: &hir::Arm) { +fn resolve_arm<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'tcx, 'a>, arm: &'tcx hir::Arm) { visitor.terminating_scopes.insert(arm.body.id); if let Some(ref expr) = arm.guard { @@ -748,23 +743,19 @@ fn resolve_arm(visitor: &mut RegionResolutionVisitor, arm: &hir::Arm) { intravisit::walk_arm(visitor, arm); } -fn resolve_pat(visitor: &mut RegionResolutionVisitor, pat: &hir::Pat) { +fn resolve_pat<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'tcx, 'a>, pat: &'tcx hir::Pat) { visitor.new_node_extent(pat.id); - // If this is a binding (or maybe a binding, I'm too lazy to check - // the def map) then record the lifetime of that binding. - match pat.node { - hir::PatIdent(..) => { - record_var_lifetime(visitor, pat.id, pat.span); - } - _ => { } + // If this is a binding then record the lifetime of that binding. + if let PatKind::Binding(..) = pat.node { + record_var_lifetime(visitor, pat.id, pat.span); } intravisit::walk_pat(visitor, pat); } -fn resolve_stmt(visitor: &mut RegionResolutionVisitor, stmt: &hir::Stmt) { - let stmt_id = stmt_id(stmt); +fn resolve_stmt<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'tcx, 'a>, stmt: &'tcx hir::Stmt) { + let stmt_id = stmt.node.id(); debug!("resolve_stmt(stmt.id={:?})", stmt_id); // Every statement will clean up the temporaries created during @@ -781,7 +772,7 @@ fn resolve_stmt(visitor: &mut RegionResolutionVisitor, stmt: &hir::Stmt) { visitor.cx.parent = prev_parent; } -fn resolve_expr(visitor: &mut RegionResolutionVisitor, expr: &hir::Expr) { +fn resolve_expr<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'tcx, 'a>, expr: &'tcx hir::Expr) { debug!("resolve_expr(expr.id={:?})", expr.id); let expr_extent = visitor.new_node_extent_with_dtor(expr.id); @@ -805,7 +796,8 @@ fn resolve_expr(visitor: &mut RegionResolutionVisitor, expr: &hir::Expr) { terminating(r.id); } - hir::ExprIf(_, ref then, Some(ref otherwise)) => { + hir::ExprIf(ref expr, ref then, Some(ref otherwise)) => { + terminating(expr.id); terminating(then.id); terminating(otherwise.id); } @@ -815,7 +807,7 @@ fn resolve_expr(visitor: &mut RegionResolutionVisitor, expr: &hir::Expr) { terminating(then.id); } - hir::ExprLoop(ref body, _) => { + hir::ExprLoop(ref body, _, _) => { terminating(body.id); } @@ -858,7 +850,8 @@ fn resolve_expr(visitor: &mut RegionResolutionVisitor, expr: &hir::Expr) { visitor.cx = prev_cx; } -fn resolve_local(visitor: &mut RegionResolutionVisitor, local: &hir::Local) { +fn resolve_local<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'tcx, 'a>, + local: &'tcx hir::Local) { debug!("resolve_local(local.id={:?},local.init={:?})", local.id,local.init.is_some()); @@ -930,19 +923,15 @@ fn resolve_local(visitor: &mut RegionResolutionVisitor, local: &hir::Local) { // // FIXME(#6308) -- Note that `[]` patterns work more smoothly post-DST. - match local.init { - Some(ref expr) => { - record_rvalue_scope_if_borrow_expr(visitor, &**expr, blk_scope); + if let Some(ref expr) = local.init { + record_rvalue_scope_if_borrow_expr(visitor, &expr, blk_scope); - let is_borrow = - if let Some(ref ty) = local.ty { is_borrowed_ty(&**ty) } else { false }; + let is_borrow = + if let Some(ref ty) = local.ty { is_borrowed_ty(&ty) } else { false }; - if is_binding_pat(&*local.pat) || is_borrow { - record_rvalue_scope(visitor, &**expr, blk_scope); - } + if is_binding_pat(&local.pat) || is_borrow { + record_rvalue_scope(visitor, &expr, blk_scope); } - - None => { } } intravisit::walk_local(visitor, local); @@ -957,25 +946,25 @@ fn resolve_local(visitor: &mut RegionResolutionVisitor, local: &hir::Local) { /// | box P& fn is_binding_pat(pat: &hir::Pat) -> bool { match pat.node { - hir::PatIdent(hir::BindByRef(_), _, _) => true, + PatKind::Binding(hir::BindByRef(_), ..) => true, - hir::PatStruct(_, ref field_pats, _) => { - field_pats.iter().any(|fp| is_binding_pat(&*fp.node.pat)) + PatKind::Struct(_, ref field_pats, _) => { + field_pats.iter().any(|fp| is_binding_pat(&fp.node.pat)) } - hir::PatVec(ref pats1, ref pats2, ref pats3) => { - pats1.iter().any(|p| is_binding_pat(&**p)) || - pats2.iter().any(|p| is_binding_pat(&**p)) || - pats3.iter().any(|p| is_binding_pat(&**p)) + PatKind::Slice(ref pats1, ref pats2, ref pats3) => { + pats1.iter().any(|p| is_binding_pat(&p)) || + pats2.iter().any(|p| is_binding_pat(&p)) || + pats3.iter().any(|p| is_binding_pat(&p)) } - hir::PatEnum(_, Some(ref subpats)) | - hir::PatTup(ref subpats) => { - subpats.iter().any(|p| is_binding_pat(&**p)) + PatKind::TupleStruct(_, ref subpats, _) | + PatKind::Tuple(ref subpats, _) => { + subpats.iter().any(|p| is_binding_pat(&p)) } - hir::PatBox(ref subpat) => { - is_binding_pat(&**subpat) + PatKind::Box(ref subpat) => { + is_binding_pat(&subpat) } _ => false, @@ -1005,36 +994,32 @@ fn resolve_local(visitor: &mut RegionResolutionVisitor, local: &hir::Local) { blk_id: CodeExtent) { match expr.node { hir::ExprAddrOf(_, ref subexpr) => { - record_rvalue_scope_if_borrow_expr(visitor, &**subexpr, blk_id); - record_rvalue_scope(visitor, &**subexpr, blk_id); + record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id); + record_rvalue_scope(visitor, &subexpr, blk_id); } hir::ExprStruct(_, ref fields, _) => { for field in fields { record_rvalue_scope_if_borrow_expr( - visitor, &*field.expr, blk_id); + visitor, &field.expr, blk_id); } } - hir::ExprVec(ref subexprs) | + hir::ExprArray(ref subexprs) | hir::ExprTup(ref subexprs) => { for subexpr in subexprs { record_rvalue_scope_if_borrow_expr( - visitor, &**subexpr, blk_id); + visitor, &subexpr, blk_id); } } hir::ExprCast(ref subexpr, _) => { - record_rvalue_scope_if_borrow_expr(visitor, &**subexpr, blk_id) + record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id) } hir::ExprBlock(ref block) => { - match block.expr { - Some(ref subexpr) => { - record_rvalue_scope_if_borrow_expr( - visitor, &**subexpr, blk_id); - } - None => { } + if let Some(ref subexpr) = block.expr { + record_rvalue_scope_if_borrow_expr( + visitor, &subexpr, blk_id); } } - _ => { - } + _ => {} } } @@ -1071,7 +1056,7 @@ fn resolve_local(visitor: &mut RegionResolutionVisitor, local: &hir::Local) { hir::ExprField(ref subexpr, _) | hir::ExprTupField(ref subexpr, _) | hir::ExprIndex(ref subexpr, _) => { - expr = &**subexpr; + expr = &subexpr; } _ => { return; @@ -1081,7 +1066,11 @@ fn resolve_local(visitor: &mut RegionResolutionVisitor, local: &hir::Local) { } } -fn resolve_item(visitor: &mut RegionResolutionVisitor, item: &hir::Item) { +fn resolve_item_like<'a, 'tcx, F>(visitor: &mut RegionResolutionVisitor<'tcx, 'a>, + id: ast::NodeId, + walk: F) + where F: FnOnce(&mut RegionResolutionVisitor<'tcx, 'a>) +{ // Items create a new outer block scope as far as we're concerned. let prev_cx = visitor.cx; let prev_ts = mem::replace(&mut visitor.terminating_scopes, NodeSet()); @@ -1090,44 +1079,44 @@ fn resolve_item(visitor: &mut RegionResolutionVisitor, item: &hir::Item) { var_parent: ROOT_CODE_EXTENT, parent: ROOT_CODE_EXTENT }; - intravisit::walk_item(visitor, item); - visitor.create_item_scope_if_needed(item.id); + walk(visitor); + visitor.create_item_scope_if_needed(id); visitor.cx = prev_cx; visitor.terminating_scopes = prev_ts; } -fn resolve_fn(visitor: &mut RegionResolutionVisitor, - kind: FnKind, - decl: &hir::FnDecl, - body: &hir::Block, - sp: Span, - id: ast::NodeId) { +fn resolve_fn<'a, 'tcx>(visitor: &mut RegionResolutionVisitor<'tcx, 'a>, + kind: FnKind<'tcx>, + decl: &'tcx hir::FnDecl, + body_id: hir::ExprId, + sp: Span, + id: ast::NodeId) { debug!("region::resolve_fn(id={:?}, \ span={:?}, \ body.id={:?}, \ cx.parent={:?})", id, visitor.sess.codemap().span_to_string(sp), - body.id, + body_id, visitor.cx.parent); visitor.cx.parent = visitor.new_code_extent( - CodeExtentData::CallSiteScope { fn_id: id, body_id: body.id }); + CodeExtentData::CallSiteScope { fn_id: id, body_id: body_id.node_id() }); let fn_decl_scope = visitor.new_code_extent( - CodeExtentData::ParameterScope { fn_id: id, body_id: body.id }); + CodeExtentData::ParameterScope { fn_id: id, body_id: body_id.node_id() }); if let Some(root_id) = visitor.cx.root_id { - visitor.region_maps.record_fn_parent(body.id, root_id); + visitor.region_maps.record_fn_parent(body_id.node_id(), root_id); } let outer_cx = visitor.cx; let outer_ts = mem::replace(&mut visitor.terminating_scopes, NodeSet()); - visitor.terminating_scopes.insert(body.id); + visitor.terminating_scopes.insert(body_id.node_id()); // The arguments and `self` are parented to the fn. visitor.cx = Context { - root_id: Some(body.id), + root_id: Some(body_id.node_id()), parent: ROOT_CODE_EXTENT, var_parent: fn_decl_scope, }; @@ -1137,18 +1126,18 @@ fn resolve_fn(visitor: &mut RegionResolutionVisitor, // The body of the every fn is a root scope. visitor.cx = Context { - root_id: Some(body.id), + root_id: Some(body_id.node_id()), parent: fn_decl_scope, var_parent: fn_decl_scope }; - visitor.visit_block(body); + visitor.visit_body(body_id); // Restore context we had at the start. visitor.cx = outer_cx; visitor.terminating_scopes = outer_ts; } -impl<'a> RegionResolutionVisitor<'a> { +impl<'ast, 'a> RegionResolutionVisitor<'ast, 'a> { /// Records the current parent (if any) as the parent of `child_scope`. fn new_code_extent(&mut self, child_scope: CodeExtentData) -> CodeExtent { self.region_maps.intern_code_extent(child_scope, self.cx.parent) @@ -1184,50 +1173,55 @@ impl<'a> RegionResolutionVisitor<'a> { } } -impl<'a, 'v> Visitor<'v> for RegionResolutionVisitor<'a> { - fn visit_block(&mut self, b: &Block) { +impl<'ast, 'a> Visitor<'ast> for RegionResolutionVisitor<'ast, 'a> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'ast> { + NestedVisitorMap::OnlyBodies(&self.map) + } + + fn visit_block(&mut self, b: &'ast Block) { resolve_block(self, b); } - fn visit_item(&mut self, i: &Item) { - resolve_item(self, i); + fn visit_item(&mut self, i: &'ast Item) { + resolve_item_like(self, i.id, |this| intravisit::walk_item(this, i)); } - fn visit_impl_item(&mut self, ii: &hir::ImplItem) { - intravisit::walk_impl_item(self, ii); - self.create_item_scope_if_needed(ii.id); + fn visit_impl_item(&mut self, ii: &'ast hir::ImplItem) { + resolve_item_like(self, ii.id, |this| intravisit::walk_impl_item(this, ii)); } - fn visit_trait_item(&mut self, ti: &hir::TraitItem) { - intravisit::walk_trait_item(self, ti); - self.create_item_scope_if_needed(ti.id); + fn visit_trait_item(&mut self, ti: &'ast hir::TraitItem) { + resolve_item_like(self, ti.id, |this| intravisit::walk_trait_item(this, ti)); } - fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v FnDecl, - b: &'v Block, s: Span, n: NodeId) { + fn visit_fn(&mut self, fk: FnKind<'ast>, fd: &'ast FnDecl, + b: hir::ExprId, s: Span, n: NodeId) { resolve_fn(self, fk, fd, b, s, n); } - fn visit_arm(&mut self, a: &Arm) { + fn visit_arm(&mut self, a: &'ast Arm) { resolve_arm(self, a); } - fn visit_pat(&mut self, p: &Pat) { + fn visit_pat(&mut self, p: &'ast Pat) { resolve_pat(self, p); } - fn visit_stmt(&mut self, s: &Stmt) { + fn visit_stmt(&mut self, s: &'ast Stmt) { resolve_stmt(self, s); } - fn visit_expr(&mut self, ex: &Expr) { + fn visit_expr(&mut self, ex: &'ast Expr) { resolve_expr(self, ex); } - fn visit_local(&mut self, l: &Local) { + fn visit_local(&mut self, l: &'ast Local) { resolve_local(self, l); } } -pub fn resolve_crate(sess: &Session, krate: &hir::Crate) -> RegionMaps { +pub fn resolve_crate(sess: &Session, map: &ast_map::Map) -> RegionMaps { + let _task = map.dep_graph.in_task(DepNode::RegionResolveCrate); + let krate = map.krate(); + let maps = RegionMaps { code_extents: RefCell::new(vec![]), - code_extent_interner: RefCell::new(FnvHashMap()), + code_extent_interner: RefCell::new(FxHashMap()), scope_map: RefCell::new(vec![]), var_map: RefCell::new(NodeMap()), rvalue_scopes: RefCell::new(NodeMap()), @@ -1243,6 +1237,7 @@ pub fn resolve_crate(sess: &Session, krate: &hir::Crate) -> RegionMaps { let mut visitor = RegionResolutionVisitor { sess: sess, region_maps: &maps, + map: map, cx: Context { root_id: None, parent: ROOT_CODE_EXTENT, @@ -1250,23 +1245,7 @@ pub fn resolve_crate(sess: &Session, krate: &hir::Crate) -> RegionMaps { }, terminating_scopes: NodeSet() }; - krate.visit_all_items(&mut visitor); + krate.visit_all_item_likes(&mut visitor.as_deep_visitor()); } return maps; } - -pub fn resolve_inlined_item(sess: &Session, - region_maps: &RegionMaps, - item: &InlinedItem) { - let mut visitor = RegionResolutionVisitor { - sess: sess, - region_maps: region_maps, - cx: Context { - root_id: None, - parent: ROOT_CODE_EXTENT, - var_parent: ROOT_CODE_EXTENT - }, - terminating_scopes: NodeSet() - }; - item.visit(&mut visitor); -} diff --git a/src/librustc/middle/resolve_lifetime.rs b/src/librustc/middle/resolve_lifetime.rs index 2c74f3a82e414..c5b03a4a32add 100644 --- a/src/librustc/middle/resolve_lifetime.rs +++ b/src/librustc/middle/resolve_lifetime.rs @@ -18,27 +18,28 @@ pub use self::DefRegion::*; use self::ScopeChain::*; +use dep_graph::DepNode; +use hir::map::Map; use session::Session; -use middle::def::{self, DefMap}; +use hir::def::Def; +use hir::def_id::DefId; use middle::region; -use middle::subst; -use middle::ty; -use std::fmt; +use ty; use std::mem::replace; use syntax::ast; -use syntax::codemap::Span; -use syntax::parse::token::special_idents; +use syntax::symbol::keywords; +use syntax_pos::Span; use util::nodemap::NodeMap; -use rustc_front::hir; -use rustc_front::print::pprust::lifetime_to_string; -use rustc_front::intravisit::{self, Visitor, FnKind}; +use rustc_data_structures::fx::FxHashSet; +use hir; +use hir::print::lifetime_to_string; +use hir::intravisit::{self, Visitor, FnKind, NestedVisitorMap}; #[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug)] pub enum DefRegion { DefStaticRegion, - DefEarlyBoundRegion(/* space */ subst::ParamSpace, - /* index */ u32, + DefEarlyBoundRegion(/* index */ u32, /* lifetime decl */ ast::NodeId), DefLateBoundRegion(ty::DebruijnIndex, /* lifetime decl */ ast::NodeId), @@ -48,13 +49,22 @@ pub enum DefRegion { // Maps the id of each lifetime reference to the lifetime decl // that it corresponds to. -pub type NamedRegionMap = NodeMap; +pub struct NamedRegionMap { + // maps from every use of a named (not anonymous) lifetime to a + // `DefRegion` describing how that region is bound + pub defs: NodeMap, + + // the set of lifetime def ids that are late-bound; late-bound ids + // are named regions appearing in fn arguments that do not appear + // in where-clauses + pub late_bound: NodeMap, +} -struct LifetimeContext<'a> { +struct LifetimeContext<'a, 'tcx: 'a> { sess: &'a Session, - named_region_map: &'a mut NamedRegionMap, + hir_map: &'a Map<'tcx>, + map: &'a mut NamedRegionMap, scope: Scope<'a>, - def_map: &'a DefMap, // Deep breath. Our representation for poly trait refs contains a single // binder and thus we only allow a single level of quantification. However, // the syntax of Rust permits quantification in two places, e.g., `T: for <'a> Foo<'a>` @@ -76,10 +86,14 @@ struct LifetimeContext<'a> { labels_in_fn: Vec<(ast::Name, Span)>, } +#[derive(PartialEq, Debug)] enum ScopeChain<'a> { - /// EarlyScope(i, ['a, 'b, ...], s) extends s with early-bound - /// lifetimes, assigning indexes 'a => i, 'b => i+1, ... etc. - EarlyScope(subst::ParamSpace, &'a [hir::LifetimeDef], Scope<'a>), + /// EarlyScope(['a, 'b, ...], start, s) extends s with early-bound + /// lifetimes, with consecutive parameter indices from `start`. + /// That is, 'a has index `start`, 'b has index `start + 1`, etc. + /// Indices before `start` correspond to other generic parameters + /// of a parent item (trait/impl of a method), or `Self` in traits. + EarlyScope(&'a [hir::LifetimeDef], u32, Scope<'a>), /// LateScope(['a, 'b, ...], s) extends s with late-bound /// lifetimes introduced by the declaration binder_id. LateScope(&'a [hir::LifetimeDef], Scope<'a>), @@ -93,24 +107,38 @@ type Scope<'a> = &'a ScopeChain<'a>; static ROOT_SCOPE: ScopeChain<'static> = RootScope; -pub fn krate(sess: &Session, krate: &hir::Crate, def_map: &DefMap) -> NamedRegionMap { - let mut named_region_map = NodeMap(); - sess.abort_if_new_errors(|| { - krate.visit_all_items(&mut LifetimeContext { +pub fn krate(sess: &Session, + hir_map: &Map) + -> Result { + let _task = hir_map.dep_graph.in_task(DepNode::ResolveLifetimes); + let krate = hir_map.krate(); + let mut map = NamedRegionMap { + defs: NodeMap(), + late_bound: NodeMap(), + }; + sess.track_errors(|| { + intravisit::walk_crate(&mut LifetimeContext { sess: sess, - named_region_map: &mut named_region_map, + hir_map: hir_map, + map: &mut map, scope: &ROOT_SCOPE, - def_map: def_map, trait_ref_hack: false, labels_in_fn: vec![], - }); - }); - named_region_map + }, krate); + })?; + Ok(map) } -impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> { - fn visit_item(&mut self, item: &hir::Item) { - assert!(self.labels_in_fn.is_empty()); +impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { + // Override the nested functions -- lifetimes follow lexical scope, + // so it's convenient to walk the tree in lexical order. + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::All(&self.hir_map) + } + + fn visit_item(&mut self, item: &'tcx hir::Item) { + // Save labels for nested items. + let saved_labels_in_fn = replace(&mut self.labels_in_fn, vec![]); // Items always introduce a new root scope self.with(RootScope, |_, this| { @@ -120,7 +148,7 @@ impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> { intravisit::walk_item(this, item); } hir::ItemExternCrate(_) | - hir::ItemUse(_) | + hir::ItemUse(..) | hir::ItemMod(..) | hir::ItemDefaultImpl(..) | hir::ItemForeignMod(..) | @@ -132,12 +160,17 @@ impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> { hir::ItemTy(_, ref generics) | hir::ItemEnum(_, ref generics) | hir::ItemStruct(_, ref generics) | - hir::ItemTrait(_, ref generics, _, _) | - hir::ItemImpl(_, _, ref generics, _, _, _) => { + hir::ItemUnion(_, ref generics) | + hir::ItemTrait(_, ref generics, ..) | + hir::ItemImpl(_, _, ref generics, ..) => { // These kinds of items have only early bound lifetime parameters. let lifetimes = &generics.lifetimes; - let early_scope = EarlyScope(subst::TypeSpace, lifetimes, &ROOT_SCOPE); - this.with(early_scope, |old_scope, this| { + let start = if let hir::ItemTrait(..) = item.node { + 1 // Self comes before lifetimes + } else { + 0 + }; + this.with(EarlyScope(lifetimes, start, &ROOT_SCOPE), |old_scope, this| { this.check_lifetime_defs(old_scope, lifetimes); intravisit::walk_item(this, item); }); @@ -146,10 +179,10 @@ impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> { }); // Done traversing the item; remove any labels it created - self.labels_in_fn.truncate(0); + self.labels_in_fn = saved_labels_in_fn; } - fn visit_foreign_item(&mut self, item: &hir::ForeignItem) { + fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem) { // Items save/restore the set of labels. This way inner items // can freely reuse names, be they loop labels or lifetimes. let saved = replace(&mut self.labels_in_fn, vec![]); @@ -157,8 +190,8 @@ impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> { // Items always introduce a new root scope self.with(RootScope, |_, this| { match item.node { - hir::ForeignItemFn(_, ref generics) => { - this.visit_early_late(subst::FnSpace, generics, |this| { + hir::ForeignItemFn(ref decl, ref generics) => { + this.visit_early_late(item.id, decl, generics, |this| { intravisit::walk_foreign_item(this, item); }) } @@ -172,26 +205,33 @@ impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> { replace(&mut self.labels_in_fn, saved); } - fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v hir::FnDecl, - b: &'v hir::Block, s: Span, fn_id: ast::NodeId) { + fn visit_fn(&mut self, fk: FnKind<'tcx>, decl: &'tcx hir::FnDecl, + b: hir::ExprId, s: Span, fn_id: ast::NodeId) { match fk { - FnKind::ItemFn(_, generics, _, _, _, _) => { - self.visit_early_late(subst::FnSpace, generics, |this| { - this.add_scope_and_walk_fn(fk, fd, b, s, fn_id) + FnKind::ItemFn(_, generics, ..) => { + self.visit_early_late(fn_id,decl, generics, |this| { + this.add_scope_and_walk_fn(fk, decl, b, s, fn_id) }) } - FnKind::Method(_, sig, _) => { - self.visit_early_late(subst::FnSpace, &sig.generics, |this| { - this.add_scope_and_walk_fn(fk, fd, b, s, fn_id) - }) + FnKind::Method(_, sig, ..) => { + self.visit_early_late( + fn_id, + decl, + &sig.generics, + |this| this.add_scope_and_walk_fn(fk, decl, b, s, fn_id)); } - FnKind::Closure => { - self.add_scope_and_walk_fn(fk, fd, b, s, fn_id) + FnKind::Closure(_) => { + // Closures have their own set of labels, save labels just + // like for foreign items above. + let saved = replace(&mut self.labels_in_fn, vec![]); + let result = self.add_scope_and_walk_fn(fk, decl, b, s, fn_id); + replace(&mut self.labels_in_fn, saved); + result } } } - fn visit_ty(&mut self, ty: &hir::Ty) { + fn visit_ty(&mut self, ty: &'tcx hir::Ty) { match ty.node { hir::TyBareFn(ref c) => { self.with(LateScope(&c.lifetimes, self.scope), |old_scope, this| { @@ -201,11 +241,11 @@ impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> { intravisit::walk_ty(this, ty); }); } - hir::TyPath(None, ref path) => { + hir::TyPath(hir::QPath::Resolved(None, ref path)) => { // if this path references a trait, then this will resolve to // a trait ref, which introduces a binding scope. - match self.def_map.get(&ty.id).map(|d| (d.base_def, d.depth)) { - Some((def::DefTrait(..), 0)) => { + match path.def { + Def::Trait(..) => { self.with(LateScope(&[], self.scope), |_, this| { this.visit_path(path, ty.id); }); @@ -221,14 +261,15 @@ impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> { } } - fn visit_trait_item(&mut self, trait_item: &hir::TraitItem) { + fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) { // We reset the labels on every trait item, so that different // methods in an impl can reuse label names. let saved = replace(&mut self.labels_in_fn, vec![]); if let hir::MethodTraitItem(ref sig, None) = trait_item.node { self.visit_early_late( - subst::FnSpace, &sig.generics, + trait_item.id, + &sig.decl, &sig.generics, |this| intravisit::walk_trait_item(this, trait_item)) } else { intravisit::walk_trait_item(self, trait_item); @@ -237,20 +278,19 @@ impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> { replace(&mut self.labels_in_fn, saved); } - fn visit_lifetime(&mut self, lifetime_ref: &hir::Lifetime) { - if lifetime_ref.name == special_idents::static_lifetime.name { + fn visit_lifetime(&mut self, lifetime_ref: &'tcx hir::Lifetime) { + if lifetime_ref.name == keywords::StaticLifetime.name() { self.insert_lifetime(lifetime_ref, DefStaticRegion); return; } self.resolve_lifetime_ref(lifetime_ref); } - fn visit_generics(&mut self, generics: &hir::Generics) { + fn visit_generics(&mut self, generics: &'tcx hir::Generics) { for ty_param in generics.ty_params.iter() { walk_list!(self, visit_ty_param_bound, &ty_param.bounds); - match ty_param.default { - Some(ref ty) => self.visit_ty(&**ty), - None => {} + if let Some(ref ty) = ty_param.default { + self.visit_ty(&ty); } } for predicate in &generics.where_clause.predicates { @@ -264,13 +304,13 @@ impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> { let result = self.with(LateScope(bound_lifetimes, self.scope), |old_scope, this| { this.check_lifetime_defs(old_scope, bound_lifetimes); - this.visit_ty(&**bounded_ty); + this.visit_ty(&bounded_ty); walk_list!(this, visit_ty_param_bound, bounds); }); self.trait_ref_hack = false; result } else { - self.visit_ty(&**bounded_ty); + self.visit_ty(&bounded_ty); walk_list!(self, visit_ty_param_bound, bounds); } } @@ -288,20 +328,19 @@ impl<'a, 'v> Visitor<'v> for LifetimeContext<'a> { ref ty, .. }) => { self.visit_path(path, id); - self.visit_ty(&**ty); + self.visit_ty(&ty); } } } } fn visit_poly_trait_ref(&mut self, - trait_ref: &hir::PolyTraitRef, - _modifier: &hir::TraitBoundModifier) { + trait_ref: &'tcx hir::PolyTraitRef, + _modifier: &'tcx hir::TraitBoundModifier) { debug!("visit_poly_trait_ref trait_ref={:?}", trait_ref); if !self.trait_ref_hack || !trait_ref.bound_lifetimes.is_empty() { if self.trait_ref_hack { - println!("{:?}", trait_ref.span); span_err!(self.sess, trait_ref.span, E0316, "nested quantification of lifetimes"); } @@ -360,16 +399,15 @@ fn signal_shadowing_problem(sess: &Session, name: ast::Name, orig: Original, sha {} name that is already in scope", shadower.kind.desc(), name, orig.kind.desc())) }; - err.span_note(orig.span, - &format!("shadowed {} `{}` declared here", - orig.kind.desc(), name)); + err.span_label(orig.span, &"first declared here"); + err.span_label(shadower.span, + &format!("lifetime {} already in scope", name)); err.emit(); } // Adds all labels in `b` to `ctxt.labels_in_fn`, signalling a warning // if one of the label shadows a lifetime or another label. -fn extract_labels<'v, 'a>(ctxt: &mut LifetimeContext<'a>, b: &'v hir::Block) { - +fn extract_labels(ctxt: &mut LifetimeContext, b: hir::ExprId) { struct GatherLabels<'a> { sess: &'a Session, scope: Scope<'a>, @@ -381,10 +419,14 @@ fn extract_labels<'v, 'a>(ctxt: &mut LifetimeContext<'a>, b: &'v hir::Block) { scope: ctxt.scope, labels_in_fn: &mut ctxt.labels_in_fn, }; - gather.visit_block(b); + gather.visit_expr(ctxt.hir_map.expr(b)); return; impl<'v, 'a> Visitor<'v> for GatherLabels<'a> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> { + NestedVisitorMap::None + } + fn visit_expr(&mut self, ex: &'v hir::Expr) { // do not recurse into closures defined in the block // since they are treated as separate fns from the POV of @@ -392,23 +434,23 @@ fn extract_labels<'v, 'a>(ctxt: &mut LifetimeContext<'a>, b: &'v hir::Block) { if let hir::ExprClosure(..) = ex.node { return } - if let Some(label) = expression_label(ex) { + if let Some((label, label_span)) = expression_label(ex) { for &(prior, prior_span) in &self.labels_in_fn[..] { // FIXME (#24278): non-hygienic comparison if label == prior { signal_shadowing_problem(self.sess, label, original_label(prior_span), - shadower_label(ex.span)); + shadower_label(label_span)); } } check_if_label_shadows_lifetime(self.sess, self.scope, label, - ex.span); + label_span); - self.labels_in_fn.push((label, ex.span)); + self.labels_in_fn.push((label, label_span)); } intravisit::walk_expr(self, ex) } @@ -418,10 +460,10 @@ fn extract_labels<'v, 'a>(ctxt: &mut LifetimeContext<'a>, b: &'v hir::Block) { } } - fn expression_label(ex: &hir::Expr) -> Option { + fn expression_label(ex: &hir::Expr) -> Option<(ast::Name, Span)> { match ex.node { - hir::ExprWhile(_, _, Some(label)) | - hir::ExprLoop(_, Some(label)) => Some(label.unhygienic_name), + hir::ExprWhile(.., Some(label)) | + hir::ExprLoop(_, Some(label), _) => Some((label.node, label.span)), _ => None, } } @@ -435,7 +477,7 @@ fn extract_labels<'v, 'a>(ctxt: &mut LifetimeContext<'a>, b: &'v hir::Block) { FnScope { s, .. } => { scope = s; } RootScope => { return; } - EarlyScope(_, lifetimes, s) | + EarlyScope(lifetimes, _, s) | LateScope(lifetimes, s) => { for lifetime_def in lifetimes { // FIXME (#24278): non-hygienic comparison @@ -455,25 +497,23 @@ fn extract_labels<'v, 'a>(ctxt: &mut LifetimeContext<'a>, b: &'v hir::Block) { } } -impl<'a> LifetimeContext<'a> { - fn add_scope_and_walk_fn<'b>(&mut self, - fk: FnKind, - fd: &hir::FnDecl, - fb: &'b hir::Block, - _span: Span, - fn_id: ast::NodeId) { - +impl<'a, 'tcx> LifetimeContext<'a, 'tcx> { + fn add_scope_and_walk_fn(&mut self, + fk: FnKind<'tcx>, + fd: &'tcx hir::FnDecl, + fb: hir::ExprId, + _span: Span, + fn_id: ast::NodeId) { match fk { - FnKind::ItemFn(_, generics, _, _, _, _) => { + FnKind::ItemFn(_, generics, ..) => { intravisit::walk_fn_decl(self, fd); self.visit_generics(generics); } - FnKind::Method(_, sig, _) => { + FnKind::Method(_, sig, ..) => { intravisit::walk_fn_decl(self, fd); self.visit_generics(&sig.generics); - self.visit_explicit_self(&sig.explicit_self); } - FnKind::Closure => { + FnKind::Closure(_) => { intravisit::walk_fn_decl(self, fd); } } @@ -482,19 +522,26 @@ impl<'a> LifetimeContext<'a> { // `self.labels_in_fn`. extract_labels(self, fb); - self.with(FnScope { fn_id: fn_id, body_id: fb.id, s: self.scope }, - |_old_scope, this| this.visit_block(fb)) + self.with(FnScope { fn_id: fn_id, body_id: fb.node_id(), s: self.scope }, + |_old_scope, this| this.visit_body(fb)) + } + + // FIXME(#37666) this works around a limitation in the region inferencer + fn hack(&mut self, f: F) where + F: for<'b> FnOnce(&mut LifetimeContext<'b, 'tcx>), + { + f(self) } fn with(&mut self, wrap_scope: ScopeChain, f: F) where - F: FnOnce(Scope, &mut LifetimeContext), + F: for<'b> FnOnce(Scope, &mut LifetimeContext<'b, 'tcx>), { - let LifetimeContext {sess, ref mut named_region_map, ..} = *self; + let LifetimeContext {sess, hir_map, ref mut map, ..} = *self; let mut this = LifetimeContext { sess: sess, - named_region_map: *named_region_map, + hir_map: hir_map, + map: *map, scope: &wrap_scope, - def_map: self.def_map, trait_ref_hack: self.trait_ref_hack, labels_in_fn: self.labels_in_fn.clone(), }; @@ -522,23 +569,44 @@ impl<'a> LifetimeContext<'a> { /// bound lifetimes are resolved by name and associated with a binder id (`binder_id`), so the /// ordering is not important there. fn visit_early_late(&mut self, - early_space: subst::ParamSpace, - generics: &hir::Generics, + fn_id: ast::NodeId, + decl: &'tcx hir::FnDecl, + generics: &'tcx hir::Generics, walk: F) where - F: FnOnce(&mut LifetimeContext), + F: for<'b, 'c> FnOnce(&'b mut LifetimeContext<'c, 'tcx>), { - let referenced_idents = early_bound_lifetime_names(generics); - - debug!("visit_early_late: referenced_idents={:?}", - referenced_idents); - - let (early, late): (Vec<_>, _) = generics.lifetimes.iter().cloned().partition( - |l| referenced_idents.iter().any(|&i| i == l.lifetime.name)); + let fn_def_id = self.hir_map.local_def_id(fn_id); + insert_late_bound_lifetimes(self.map, + fn_def_id, + decl, + generics); + + let (late, early): (Vec<_>, _) = + generics.lifetimes + .iter() + .cloned() + .partition(|l| self.map.late_bound.contains_key(&l.lifetime.id)); + + // Find the start of nested early scopes, e.g. in methods. + let mut start = 0; + if let EarlyScope(..) = *self.scope { + let parent = self.hir_map.expect_item(self.hir_map.get_parent(fn_id)); + if let hir::ItemTrait(..) = parent.node { + start += 1; // Self comes first. + } + match parent.node { + hir::ItemTrait(_, ref generics, ..) | + hir::ItemImpl(_, _, ref generics, ..) => { + start += generics.lifetimes.len() + generics.ty_params.len(); + } + _ => {} + } + } - self.with(EarlyScope(early_space, &early, self.scope), move |old_scope, this| { + self.with(EarlyScope(&early, start as u32, self.scope), move |old_scope, this| { this.with(LateScope(&late, this.scope), move |_, this| { this.check_lifetime_defs(old_scope, &generics.lifetimes); - walk(this); + this.hack(walk); // FIXME(#37666) workaround in place of `walk(this)` }); }); } @@ -565,11 +633,11 @@ impl<'a> LifetimeContext<'a> { break; } - EarlyScope(space, lifetimes, s) => { + EarlyScope(lifetimes, start, s) => { match search_lifetimes(lifetimes, lifetime_ref) { Some((index, lifetime_def)) => { let decl_id = lifetime_def.id; - let def = DefEarlyBoundRegion(space, index, decl_id); + let def = DefEarlyBoundRegion(start + index, decl_id); self.insert_lifetime(lifetime_ref, def); return; } @@ -631,7 +699,7 @@ impl<'a> LifetimeContext<'a> { break; } - EarlyScope(_, lifetimes, s) | + EarlyScope(lifetimes, _, s) | LateScope(lifetimes, s) => { search_result = search_lifetimes(lifetimes, lifetime_ref); if search_result.is_some() { @@ -656,20 +724,24 @@ impl<'a> LifetimeContext<'a> { } fn unresolved_lifetime_ref(&self, lifetime_ref: &hir::Lifetime) { - span_err!(self.sess, lifetime_ref.span, E0261, - "use of undeclared lifetime name `{}`", - lifetime_ref.name); + struct_span_err!(self.sess, lifetime_ref.span, E0261, + "use of undeclared lifetime name `{}`", lifetime_ref.name) + .span_label(lifetime_ref.span, &format!("undeclared lifetime")) + .emit(); } fn check_lifetime_defs(&mut self, old_scope: Scope, lifetimes: &[hir::LifetimeDef]) { for i in 0..lifetimes.len() { let lifetime_i = &lifetimes[i]; - let special_idents = [special_idents::static_lifetime]; for lifetime in lifetimes { - if special_idents.iter().any(|&i| i.name == lifetime.lifetime.name) { - span_err!(self.sess, lifetime.lifetime.span, E0262, - "invalid lifetime parameter name: `{}`", lifetime.lifetime.name); + if lifetime.lifetime.name == keywords::StaticLifetime.name() { + let lifetime = lifetime.lifetime; + let mut err = struct_span_err!(self.sess, lifetime.span, E0262, + "invalid lifetime parameter name: `{}`", lifetime.name); + err.span_label(lifetime.span, + &format!("{} is a reserved lifetime name", lifetime.name)); + err.emit(); } } @@ -678,10 +750,14 @@ impl<'a> LifetimeContext<'a> { let lifetime_j = &lifetimes[j]; if lifetime_i.lifetime.name == lifetime_j.lifetime.name { - span_err!(self.sess, lifetime_j.lifetime.span, E0263, - "lifetime name `{}` declared twice in \ - the same scope", - lifetime_j.lifetime.name); + struct_span_err!(self.sess, lifetime_j.lifetime.span, E0263, + "lifetime name `{}` declared twice in the same scope", + lifetime_j.lifetime.name) + .span_label(lifetime_j.lifetime.span, + &format!("declared twice")) + .span_label(lifetime_i.lifetime.span, + &format!("previous declaration here")) + .emit(); } } @@ -719,7 +795,7 @@ impl<'a> LifetimeContext<'a> { return; } - EarlyScope(_, lifetimes, s) | + EarlyScope(lifetimes, _, s) | LateScope(lifetimes, s) => { if let Some((_, lifetime_def)) = search_lifetimes(lifetimes, lifetime) { signal_shadowing_problem( @@ -740,16 +816,17 @@ impl<'a> LifetimeContext<'a> { lifetime_ref: &hir::Lifetime, def: DefRegion) { if lifetime_ref.id == ast::DUMMY_NODE_ID { - self.sess.span_bug(lifetime_ref.span, - "lifetime reference not renumbered, \ - probably a bug in syntax::fold"); + span_bug!(lifetime_ref.span, + "lifetime reference not renumbered, \ + probably a bug in syntax::fold"); } - debug!("lifetime_ref={:?} id={:?} resolved to {:?}", - lifetime_to_string(lifetime_ref), - lifetime_ref.id, - def); - self.named_region_map.insert(lifetime_ref.id, def); + debug!("lifetime_ref={:?} id={:?} resolved to {:?} span={:?}", + lifetime_to_string(lifetime_ref), + lifetime_ref.id, + def, + self.sess.codemap().span_to_string(lifetime_ref.span)); + self.map.defs.insert(lifetime_ref.id, def); } } @@ -766,106 +843,159 @@ fn search_lifetimes<'a>(lifetimes: &'a [hir::LifetimeDef], /////////////////////////////////////////////////////////////////////////// -pub fn early_bound_lifetimes<'a>(generics: &'a hir::Generics) -> Vec { - let referenced_idents = early_bound_lifetime_names(generics); - if referenced_idents.is_empty() { - return Vec::new(); +/// Detects late-bound lifetimes and inserts them into +/// `map.late_bound`. +/// +/// A region declared on a fn is **late-bound** if: +/// - it is constrained by an argument type; +/// - it does not appear in a where-clause. +/// +/// "Constrained" basically means that it appears in any type but +/// not amongst the inputs to a projection. In other words, `<&'a +/// T as Trait<''b>>::Foo` does not constrain `'a` or `'b`. +fn insert_late_bound_lifetimes(map: &mut NamedRegionMap, + fn_def_id: DefId, + decl: &hir::FnDecl, + generics: &hir::Generics) { + debug!("insert_late_bound_lifetimes(decl={:?}, generics={:?})", decl, generics); + + let mut constrained_by_input = ConstrainedCollector { regions: FxHashSet() }; + for arg in &decl.inputs { + constrained_by_input.visit_ty(&arg.ty); } - generics.lifetimes.iter() - .filter(|l| referenced_idents.iter().any(|&i| i == l.lifetime.name)) - .cloned() - .collect() -} + let mut appears_in_output = AllCollector { + regions: FxHashSet(), + impl_trait: false + }; + intravisit::walk_fn_ret_ty(&mut appears_in_output, &decl.output); + + debug!("insert_late_bound_lifetimes: constrained_by_input={:?}", + constrained_by_input.regions); + + // Walk the lifetimes that appear in where clauses. + // + // Subtle point: because we disallow nested bindings, we can just + // ignore binders here and scrape up all names we see. + let mut appears_in_where_clause = AllCollector { + regions: FxHashSet(), + impl_trait: false + }; + for ty_param in generics.ty_params.iter() { + walk_list!(&mut appears_in_where_clause, + visit_ty_param_bound, + &ty_param.bounds); + } + walk_list!(&mut appears_in_where_clause, + visit_where_predicate, + &generics.where_clause.predicates); + for lifetime_def in &generics.lifetimes { + if !lifetime_def.bounds.is_empty() { + // `'a: 'b` means both `'a` and `'b` are referenced + appears_in_where_clause.visit_lifetime_def(lifetime_def); + } + } -/// Given a set of generic declarations, returns a list of names containing all early bound -/// lifetime names for those generics. (In fact, this list may also contain other names.) -fn early_bound_lifetime_names(generics: &hir::Generics) -> Vec { - // Create two lists, dividing the lifetimes into early/late bound. - // Initially, all of them are considered late, but we will move - // things from late into early as we go if we find references to - // them. - let mut early_bound = Vec::new(); - let mut late_bound = generics.lifetimes.iter() - .map(|l| l.lifetime.name) - .collect(); - - // Any lifetime that appears in a type bound is early. - { - let mut collector = - FreeLifetimeCollector { early_bound: &mut early_bound, - late_bound: &mut late_bound }; - for ty_param in generics.ty_params.iter() { - walk_list!(&mut collector, visit_ty_param_bound, &ty_param.bounds); + debug!("insert_late_bound_lifetimes: appears_in_where_clause={:?}", + appears_in_where_clause.regions); + + // Late bound regions are those that: + // - appear in the inputs + // - do not appear in the where-clauses + // - are not implicitly captured by `impl Trait` + for lifetime in &generics.lifetimes { + let name = lifetime.lifetime.name; + + // appears in the where clauses? early-bound. + if appears_in_where_clause.regions.contains(&name) { continue; } + + // any `impl Trait` in the return type? early-bound. + if appears_in_output.impl_trait { continue; } + + // does not appear in the inputs, but appears in the return + // type? eventually this will be early-bound, but for now we + // just mark it so we can issue warnings. + let constrained_by_input = constrained_by_input.regions.contains(&name); + let appears_in_output = appears_in_output.regions.contains(&name); + let will_change = !constrained_by_input && appears_in_output; + let issue_32330 = if will_change { + ty::Issue32330::WillChange { + fn_def_id: fn_def_id, + region_name: name, + } + } else { + ty::Issue32330::WontChange + }; + + debug!("insert_late_bound_lifetimes: \ + lifetime {:?} with id {:?} is late-bound ({:?}", + lifetime.lifetime.name, lifetime.lifetime.id, issue_32330); + + let prev = map.late_bound.insert(lifetime.lifetime.id, issue_32330); + assert!(prev.is_none(), "visited lifetime {:?} twice", lifetime.lifetime.id); + } + + return; + + struct ConstrainedCollector { + regions: FxHashSet, + } + + impl<'v> Visitor<'v> for ConstrainedCollector { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> { + NestedVisitorMap::None } - for predicate in &generics.where_clause.predicates { - match predicate { - &hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate{ref bounds, - ref bounded_ty, - ..}) => { - collector.visit_ty(&**bounded_ty); - walk_list!(&mut collector, visit_ty_param_bound, bounds); + + fn visit_ty(&mut self, ty: &'v hir::Ty) { + match ty.node { + hir::TyPath(hir::QPath::Resolved(Some(_), _)) | + hir::TyPath(hir::QPath::TypeRelative(..)) => { + // ignore lifetimes appearing in associated type + // projections, as they are not *constrained* + // (defined above) } - &hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate{ref lifetime, - ref bounds, - ..}) => { - collector.visit_lifetime(lifetime); - for bound in bounds { - collector.visit_lifetime(bound); + hir::TyPath(hir::QPath::Resolved(None, ref path)) => { + // consider only the lifetimes on the final + // segment; I am not sure it's even currently + // valid to have them elsewhere, but even if it + // is, those would be potentially inputs to + // projections + if let Some(last_segment) = path.segments.last() { + self.visit_path_segment(path.span, last_segment); } } - &hir::WherePredicate::EqPredicate(_) => unimplemented!() + + _ => { + intravisit::walk_ty(self, ty); + } } } - } - // Any lifetime that either has a bound or is referenced by a - // bound is early. - for lifetime_def in &generics.lifetimes { - if !lifetime_def.bounds.is_empty() { - shuffle(&mut early_bound, &mut late_bound, - lifetime_def.lifetime.name); - for bound in &lifetime_def.bounds { - shuffle(&mut early_bound, &mut late_bound, - bound.name); - } + fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) { + self.regions.insert(lifetime_ref.name); } } - return early_bound; - struct FreeLifetimeCollector<'a> { - early_bound: &'a mut Vec, - late_bound: &'a mut Vec, + struct AllCollector { + regions: FxHashSet, + impl_trait: bool } - impl<'a, 'v> Visitor<'v> for FreeLifetimeCollector<'a> { - fn visit_lifetime(&mut self, lifetime_ref: &hir::Lifetime) { - shuffle(self.early_bound, self.late_bound, - lifetime_ref.name); + impl<'v> Visitor<'v> for AllCollector { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> { + NestedVisitorMap::None } - } - fn shuffle(early_bound: &mut Vec, - late_bound: &mut Vec, - name: ast::Name) { - match late_bound.iter().position(|n| *n == name) { - Some(index) => { - late_bound.swap_remove(index); - early_bound.push(name); - } - None => { } + fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) { + self.regions.insert(lifetime_ref.name); } - } -} -impl<'a> fmt::Debug for ScopeChain<'a> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - match *self { - EarlyScope(space, defs, _) => write!(fmt, "EarlyScope({:?}, {:?})", space, defs), - LateScope(defs, _) => write!(fmt, "LateScope({:?})", defs), - FnScope { fn_id, body_id, s: _ } => write!(fmt, "FnScope({:?}, {:?})", fn_id, body_id), - RootScope => write!(fmt, "RootScope"), + fn visit_ty(&mut self, ty: &hir::Ty) { + if let hir::TyImplTrait(_) = ty.node { + self.impl_trait = true; + } + intravisit::walk_ty(self, ty); } } } diff --git a/src/librustc/middle/stability.rs b/src/librustc/middle/stability.rs index 87bc8bb885584..f3890f1c3b7e3 100644 --- a/src/librustc/middle/stability.rs +++ b/src/librustc/middle/stability.rs @@ -14,24 +14,23 @@ pub use self::StabilityLevel::*; use dep_graph::DepNode; -use session::Session; +use hir::map as hir_map; use lint; -use middle::cstore::{CrateStore, LOCAL_CRATE}; -use middle::def; -use middle::def_id::{CRATE_DEF_INDEX, DefId}; -use middle::ty; +use hir::def::Def; +use hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId, DefIndex, LOCAL_CRATE}; +use ty::TyCtxt; use middle::privacy::AccessLevels; -use syntax::parse::token::InternedString; -use syntax::codemap::{Span, DUMMY_SP}; +use syntax::symbol::Symbol; +use syntax_pos::{Span, DUMMY_SP}; use syntax::ast; use syntax::ast::{NodeId, Attribute}; -use syntax::feature_gate::{GateIssue, emit_feature_err}; -use syntax::attr::{self, Stability, Deprecation, AttrMetaMethods}; -use util::nodemap::{DefIdMap, FnvHashSet, FnvHashMap}; +use syntax::feature_gate::{GateIssue, emit_feature_err, find_lang_feature_accepted_version}; +use syntax::attr::{self, Stability, Deprecation}; +use util::nodemap::{DefIdMap, FxHashSet, FxHashMap}; -use rustc_front::hir; -use rustc_front::hir::{Crate, Item, Generics, StructField, Variant}; -use rustc_front::intravisit::{self, Visitor}; +use hir; +use hir::{Item, Generics, StructField, Variant}; +use hir::intravisit::{self, Visitor, NestedVisitorMap}; use std::mem::replace; use std::cmp::Ordering; @@ -58,24 +57,63 @@ enum AnnotationKind { Container, } +/// An entry in the `depr_map`. +#[derive(Clone)] +pub struct DeprecationEntry { + /// The metadata of the attribute associated with this entry. + pub attr: Deprecation, + /// The def id where the attr was originally attached. `None` for non-local + /// `DefId`'s. + origin: Option, +} + +impl DeprecationEntry { + fn local(attr: Deprecation, id: DefId) -> DeprecationEntry { + assert!(id.is_local()); + DeprecationEntry { + attr: attr, + origin: Some(id.index), + } + } + + fn external(attr: Deprecation) -> DeprecationEntry { + DeprecationEntry { + attr: attr, + origin: None, + } + } + + pub fn same_origin(&self, other: &DeprecationEntry) -> bool { + match (self.origin, other.origin) { + (Some(o1), Some(o2)) => o1 == o2, + _ => false + } + } +} + /// A stability index, giving the stability level for items and methods. pub struct Index<'tcx> { /// This is mostly a cache, except the stabilities of local items /// are filled by the annotator. stab_map: DefIdMap>, - depr_map: DefIdMap>, + depr_map: DefIdMap>, /// Maps for each crate whether it is part of the staged API. - staged_api: FnvHashMap + staged_api: FxHashMap, + + /// Features enabled for this crate. + active_features: FxHashSet, + + /// Features used by this crate. Updated before and during typeck. + used_features: FxHashMap } // A private tree-walker for producing an Index. struct Annotator<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, index: &'a mut Index<'tcx>, parent_stab: Option<&'tcx Stability>, - parent_depr: Option, - access_levels: &'a AccessLevels, + parent_depr: Option, in_trait_impl: bool, } @@ -84,7 +122,7 @@ impl<'a, 'tcx: 'a> Annotator<'a, 'tcx> { // stability. The stability is recorded in the index and used as the parent. fn annotate(&mut self, id: NodeId, attrs: &[Attribute], item_sp: Span, kind: AnnotationKind, visit_children: F) - where F: FnOnce(&mut Annotator) + where F: FnOnce(&mut Self) { if self.index.staged_api[&LOCAL_CRATE] && self.tcx.sess.features.borrow().staged_api { debug!("annotate(id = {:?}, attrs = {:?})", id, attrs); @@ -115,10 +153,11 @@ impl<'a, 'tcx: 'a> Annotator<'a, 'tcx> { // Check if deprecated_since < stable_since. If it is, // this is *almost surely* an accident. - if let (&Some(attr::RustcDeprecation {since: ref dep_since, ..}), - &attr::Stable {since: ref stab_since}) = (&stab.rustc_depr, &stab.level) { + if let (&Some(attr::RustcDeprecation {since: dep_since, ..}), + &attr::Stable {since: stab_since}) = (&stab.rustc_depr, &stab.level) { // Explicit version of iter::order::lt to handle parse errors properly - for (dep_v, stab_v) in dep_since.split(".").zip(stab_since.split(".")) { + for (dep_v, stab_v) in + dep_since.as_str().split(".").zip(stab_since.as_str().split(".")) { if let (Ok(dep_v), Ok(stab_v)) = (dep_v.parse::(), stab_v.parse()) { match dep_v.cmp(&stab_v) { Ordering::Less => { @@ -147,20 +186,12 @@ impl<'a, 'tcx: 'a> Annotator<'a, 'tcx> { self.parent_stab = orig_parent_stab; } else { debug!("annotate: not found, parent = {:?}", self.parent_stab); - let mut is_error = kind == AnnotationKind::Required && - self.access_levels.is_reachable(id) && - !self.tcx.sess.opts.test; if let Some(stab) = self.parent_stab { if stab.level.is_unstable() { let def_id = self.tcx.map.local_def_id(id); self.index.stab_map.insert(def_id, Some(stab)); - is_error = false; } } - if is_error { - self.tcx.sess.span_err(item_sp, "This node does not have \ - a stability attribute"); - } visit_children(self); } } else { @@ -181,14 +212,15 @@ impl<'a, 'tcx: 'a> Annotator<'a, 'tcx> { // `Deprecation` is just two pointers, no need to intern it let def_id = self.tcx.map.local_def_id(id); - self.index.depr_map.insert(def_id, Some(depr.clone())); + let depr_entry = Some(DeprecationEntry::local(depr, def_id)); + self.index.depr_map.insert(def_id, depr_entry.clone()); - let orig_parent_depr = replace(&mut self.parent_depr, Some(depr)); + let orig_parent_depr = replace(&mut self.parent_depr, depr_entry); visit_children(self); self.parent_depr = orig_parent_depr; - } else if let Some(depr) = self.parent_depr.clone() { + } else if let parent_depr @ Some(_) = self.parent_depr.clone() { let def_id = self.tcx.map.local_def_id(id); - self.index.depr_map.insert(def_id, Some(depr)); + self.index.depr_map.insert(def_id, parent_depr); visit_children(self); } else { visit_children(self); @@ -197,15 +229,15 @@ impl<'a, 'tcx: 'a> Annotator<'a, 'tcx> { } } -impl<'a, 'tcx, 'v> Visitor<'v> for Annotator<'a, 'tcx> { +impl<'a, 'tcx> Visitor<'tcx> for Annotator<'a, 'tcx> { /// Because stability levels are scoped lexically, we want to walk /// nested items in the context of the outer item, so enable /// deep-walking. - fn visit_nested_item(&mut self, item: hir::ItemId) { - self.visit_item(self.tcx.map.expect_item(item.id)) + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::All(&self.tcx.map) } - fn visit_item(&mut self, i: &Item) { + fn visit_item(&mut self, i: &'tcx Item) { let orig_in_trait_impl = self.in_trait_impl; let mut kind = AnnotationKind::Required; match i.node { @@ -213,11 +245,11 @@ impl<'a, 'tcx, 'v> Visitor<'v> for Annotator<'a, 'tcx> { // they don't have their own stability. They still can be annotated as unstable // and propagate this unstability to children, but this annotation is completely // optional. They inherit stability from their parents when unannotated. - hir::ItemImpl(_, _, _, None, _, _) | hir::ItemForeignMod(..) => { + hir::ItemImpl(.., None, _, _) | hir::ItemForeignMod(..) => { self.in_trait_impl = false; kind = AnnotationKind::Container; } - hir::ItemImpl(_, _, _, Some(_), _, _) => { + hir::ItemImpl(.., Some(_), _, _) => { self.in_trait_impl = true; } hir::ItemStruct(ref sd, _) => { @@ -234,13 +266,13 @@ impl<'a, 'tcx, 'v> Visitor<'v> for Annotator<'a, 'tcx> { self.in_trait_impl = orig_in_trait_impl; } - fn visit_trait_item(&mut self, ti: &hir::TraitItem) { + fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem) { self.annotate(ti.id, &ti.attrs, ti.span, AnnotationKind::Required, |v| { intravisit::walk_trait_item(v, ti); }); } - fn visit_impl_item(&mut self, ii: &hir::ImplItem) { + fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem) { let kind = if self.in_trait_impl { AnnotationKind::Prohibited } else { @@ -251,47 +283,127 @@ impl<'a, 'tcx, 'v> Visitor<'v> for Annotator<'a, 'tcx> { }); } - fn visit_variant(&mut self, var: &Variant, g: &'v Generics, item_id: NodeId) { + fn visit_variant(&mut self, var: &'tcx Variant, g: &'tcx Generics, item_id: NodeId) { self.annotate(var.node.data.id(), &var.node.attrs, var.span, AnnotationKind::Required, |v| { intravisit::walk_variant(v, var, g, item_id); }) } - fn visit_struct_field(&mut self, s: &StructField) { - self.annotate(s.node.id, &s.node.attrs, s.span, AnnotationKind::Required, |v| { + fn visit_struct_field(&mut self, s: &'tcx StructField) { + self.annotate(s.id, &s.attrs, s.span, AnnotationKind::Required, |v| { intravisit::walk_struct_field(v, s); }); } - fn visit_foreign_item(&mut self, i: &hir::ForeignItem) { + fn visit_foreign_item(&mut self, i: &'tcx hir::ForeignItem) { self.annotate(i.id, &i.attrs, i.span, AnnotationKind::Required, |v| { intravisit::walk_foreign_item(v, i); }); } - fn visit_macro_def(&mut self, md: &'v hir::MacroDef) { + fn visit_macro_def(&mut self, md: &'tcx hir::MacroDef) { if md.imported_from.is_none() { self.annotate(md.id, &md.attrs, md.span, AnnotationKind::Required, |_| {}); } } } -impl<'tcx> Index<'tcx> { +struct MissingStabilityAnnotations<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + access_levels: &'a AccessLevels, +} + +impl<'a, 'tcx: 'a> MissingStabilityAnnotations<'a, 'tcx> { + fn check_missing_stability(&self, id: NodeId, span: Span) { + let def_id = self.tcx.map.local_def_id(id); + let is_error = !self.tcx.sess.opts.test && + !self.tcx.stability.borrow().stab_map.contains_key(&def_id) && + self.access_levels.is_reachable(id); + if is_error { + self.tcx.sess.span_err(span, "This node does not have a stability attribute"); + } + } +} + +impl<'a, 'tcx> Visitor<'tcx> for MissingStabilityAnnotations<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.tcx.map) + } + + fn visit_item(&mut self, i: &'tcx Item) { + match i.node { + // Inherent impls and foreign modules serve only as containers for other items, + // they don't have their own stability. They still can be annotated as unstable + // and propagate this unstability to children, but this annotation is completely + // optional. They inherit stability from their parents when unannotated. + hir::ItemImpl(.., None, _, _) | hir::ItemForeignMod(..) => {} + + _ => self.check_missing_stability(i.id, i.span) + } + + intravisit::walk_item(self, i) + } + + fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem) { + self.check_missing_stability(ti.id, ti.span); + intravisit::walk_trait_item(self, ti); + } + + fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem) { + let impl_def_id = self.tcx.map.local_def_id(self.tcx.map.get_parent(ii.id)); + if self.tcx.impl_trait_ref(impl_def_id).is_none() { + self.check_missing_stability(ii.id, ii.span); + } + intravisit::walk_impl_item(self, ii); + } + + fn visit_variant(&mut self, var: &'tcx Variant, g: &'tcx Generics, item_id: NodeId) { + self.check_missing_stability(var.node.data.id(), var.span); + intravisit::walk_variant(self, var, g, item_id); + } + + fn visit_struct_field(&mut self, s: &'tcx StructField) { + self.check_missing_stability(s.id, s.span); + intravisit::walk_struct_field(self, s); + } + + fn visit_foreign_item(&mut self, i: &'tcx hir::ForeignItem) { + self.check_missing_stability(i.id, i.span); + intravisit::walk_foreign_item(self, i); + } + + fn visit_macro_def(&mut self, md: &'tcx hir::MacroDef) { + if md.imported_from.is_none() { + self.check_missing_stability(md.id, md.span); + } + } +} + +impl<'a, 'tcx> Index<'tcx> { /// Construct the stability index for a crate being compiled. - pub fn build(&mut self, tcx: &ty::ctxt<'tcx>, krate: &Crate, access_levels: &AccessLevels) { + pub fn build(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>) { + let ref active_lib_features = tcx.sess.features.borrow().declared_lib_features; + + // Put the active features into a map for quick lookup + self.active_features = active_lib_features.iter().map(|&(ref s, _)| s.clone()).collect(); + + let _task = tcx.dep_graph.in_task(DepNode::StabilityIndex); + let krate = tcx.map.krate(); let mut annotator = Annotator { tcx: tcx, index: self, parent_stab: None, parent_depr: None, - access_levels: access_levels, in_trait_impl: false, }; annotator.annotate(ast::CRATE_NODE_ID, &krate.attrs, krate.span, AnnotationKind::Required, |v| intravisit::walk_crate(v, krate)); } - pub fn new(krate: &Crate) -> Index<'tcx> { + pub fn new(hir_map: &hir_map::Map) -> Index<'tcx> { + let _task = hir_map.dep_graph.in_task(DepNode::StabilityIndex); + let krate = hir_map.krate(); + let mut is_staged_api = false; for attr in &krate.attrs { if attr.name() == "stable" || attr.name() == "unstable" { @@ -300,418 +412,273 @@ impl<'tcx> Index<'tcx> { } } - let mut staged_api = FnvHashMap(); + let mut staged_api = FxHashMap(); staged_api.insert(LOCAL_CRATE, is_staged_api); Index { staged_api: staged_api, stab_map: DefIdMap(), depr_map: DefIdMap(), + active_features: FxHashSet(), + used_features: FxHashMap(), } } } /// Cross-references the feature names of unstable APIs with enabled -/// features and possibly prints errors. Returns a list of all -/// features used. -pub fn check_unstable_api_usage(tcx: &ty::ctxt) - -> FnvHashMap { - let _task = tcx.dep_graph.in_task(DepNode::StabilityCheck); - let ref active_lib_features = tcx.sess.features.borrow().declared_lib_features; - - // Put the active features into a map for quick lookup - let active_features = active_lib_features.iter().map(|&(ref s, _)| s.clone()).collect(); - - let mut checker = Checker { - tcx: tcx, - active_features: active_features, - used_features: FnvHashMap(), - in_skip_block: 0, - }; - intravisit::walk_crate(&mut checker, tcx.map.krate()); - - checker.used_features +/// features and possibly prints errors. +pub fn check_unstable_api_usage<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { + let mut checker = Checker { tcx: tcx }; + tcx.visit_all_item_likes_in_krate(DepNode::StabilityCheck, &mut checker.as_deep_visitor()); } struct Checker<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, - active_features: FnvHashSet, - used_features: FnvHashMap, - // Within a block where feature gate checking can be skipped. - in_skip_block: u32, + tcx: TyCtxt<'a, 'tcx, 'tcx>, } -impl<'a, 'tcx> Checker<'a, 'tcx> { - fn check(&mut self, id: DefId, span: Span, - stab: &Option<&Stability>, _depr: &Option) { - if !is_staged_api(self.tcx, id) { +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + pub fn check_stability(self, def_id: DefId, id: NodeId, span: Span) { + if self.sess.codemap().span_allows_unstable(span) { + debug!("stability: \ + skipping span={:?} since it is internal", span); return; } + + let lint_deprecated = |note: Option| { + let msg = if let Some(note) = note { + format!("use of deprecated item: {}", note) + } else { + format!("use of deprecated item") + }; + + self.sess.add_lint(lint::builtin::DEPRECATED, id, span, msg); + }; + + // Deprecated attributes apply in-crate and cross-crate. + if let Some(depr_entry) = self.lookup_deprecation_entry(def_id) { + let skip = if id == ast::DUMMY_NODE_ID { + true + } else { + let parent_def_id = self.map.local_def_id(self.map.get_parent(id)); + self.lookup_deprecation_entry(parent_def_id).map_or(false, |parent_depr| { + parent_depr.same_origin(&depr_entry) + }) + }; + + if !skip { + lint_deprecated(depr_entry.attr.note); + } + } + + let is_staged_api = *self.stability.borrow_mut().staged_api.entry(def_id.krate) + .or_insert_with(|| self.sess.cstore.is_staged_api(def_id.krate)); + if !is_staged_api { + return; + } + + let stability = self.lookup_stability(def_id); + debug!("stability: \ + inspecting def_id={:?} span={:?} of stability={:?}", def_id, span, stability); + + if let Some(&Stability{rustc_depr: Some(attr::RustcDeprecation { reason, .. }), ..}) + = stability { + if id != ast::DUMMY_NODE_ID { + lint_deprecated(Some(reason)); + } + } + // Only the cross-crate scenario matters when checking unstable APIs - let cross_crate = !id.is_local(); + let cross_crate = !def_id.is_local(); if !cross_crate { return } - // We don't need to check for stability - presumably compiler generated code. - if self.in_skip_block > 0 { - return; + if let Some(&Stability { ref level, ref feature, .. }) = stability { + self.stability.borrow_mut().used_features.insert(feature.clone(), level.clone()); } - match *stab { + match stability { Some(&Stability { level: attr::Unstable {ref reason, issue}, ref feature, .. }) => { - self.used_features.insert(feature.clone(), Unstable); - - if !self.active_features.contains(feature) { + if !self.stability.borrow().active_features.contains(feature) { let msg = match *reason { Some(ref r) => format!("use of unstable library feature '{}': {}", - &feature, &r), + &feature.as_str(), &r), None => format!("use of unstable library feature '{}'", &feature) }; - emit_feature_err(&self.tcx.sess.parse_sess.span_diagnostic, - &feature, span, GateIssue::Library(Some(issue)), &msg); + emit_feature_err(&self.sess.parse_sess, &feature.as_str(), span, + GateIssue::Library(Some(issue)), &msg); } } - Some(&Stability { ref level, ref feature, .. }) => { - self.used_features.insert(feature.clone(), StabilityLevel::from_attr_level(level)); - + Some(_) => { // Stable APIs are always ok to call and deprecated APIs are - // handled by a lint. + // handled by the lint emitting logic above. } None => { // This is an 'unmarked' API, which should not exist // in the standard library. - if self.tcx.sess.features.borrow().unmarked_api { - self.tcx.sess.struct_span_warn(span, "use of unmarked library feature") - .span_note(span, "this is either a bug in the library you are \ - using or a bug in the compiler - please \ - report it in both places") - .emit() + if self.sess.features.borrow().unmarked_api { + self.sess.struct_span_warn(span, "use of unmarked library feature") + .span_note(span, "this is either a bug in the library you are \ + using or a bug in the compiler - please \ + report it in both places") + .emit() } else { - self.tcx.sess.struct_span_err(span, "use of unmarked library feature") - .span_note(span, "this is either a bug in the library you are \ - using or a bug in the compiler - please \ - report it in both places") - .span_note(span, "use #![feature(unmarked_api)] in the \ - crate attributes to override this") - .emit() + self.sess.struct_span_err(span, "use of unmarked library feature") + .span_note(span, "this is either a bug in the library you are \ + using or a bug in the compiler - please \ + report it in both places") + .span_note(span, "use #![feature(unmarked_api)] in the \ + crate attributes to override this") + .emit() } } } } } -impl<'a, 'v, 'tcx> Visitor<'v> for Checker<'a, 'tcx> { +impl<'a, 'tcx> Visitor<'tcx> for Checker<'a, 'tcx> { /// Because stability levels are scoped lexically, we want to walk /// nested items in the context of the outer item, so enable /// deep-walking. - fn visit_nested_item(&mut self, item: hir::ItemId) { - self.visit_item(self.tcx.map.expect_item(item.id)) - } - - fn visit_item(&mut self, item: &hir::Item) { - // When compiling with --test we don't enforce stability on the - // compiler-generated test module, demarcated with `DUMMY_SP` plus the - // name `__test` - if item.span == DUMMY_SP && item.name.as_str() == "__test" { return } - - check_item(self.tcx, item, true, - &mut |id, sp, stab, depr| self.check(id, sp, stab, depr)); - intravisit::walk_item(self, item); - } - - fn visit_expr(&mut self, ex: &hir::Expr) { - check_expr(self.tcx, ex, - &mut |id, sp, stab, depr| self.check(id, sp, stab, depr)); - intravisit::walk_expr(self, ex); - } - - fn visit_path(&mut self, path: &hir::Path, id: ast::NodeId) { - check_path(self.tcx, path, id, - &mut |id, sp, stab, depr| self.check(id, sp, stab, depr)); - intravisit::walk_path(self, path) - } - - fn visit_path_list_item(&mut self, prefix: &hir::Path, item: &hir::PathListItem) { - check_path_list_item(self.tcx, item, - &mut |id, sp, stab, depr| self.check(id, sp, stab, depr)); - intravisit::walk_path_list_item(self, prefix, item) - } - - fn visit_pat(&mut self, pat: &hir::Pat) { - check_pat(self.tcx, pat, - &mut |id, sp, stab, depr| self.check(id, sp, stab, depr)); - intravisit::walk_pat(self, pat) - } - - fn visit_block(&mut self, b: &hir::Block) { - let old_skip_count = self.in_skip_block; - match b.rules { - hir::BlockCheckMode::PushUnstableBlock => { - self.in_skip_block += 1; - } - hir::BlockCheckMode::PopUnstableBlock => { - self.in_skip_block = self.in_skip_block.checked_sub(1).unwrap(); + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.tcx.map) + } + + fn visit_item(&mut self, item: &'tcx hir::Item) { + match item.node { + hir::ItemExternCrate(_) => { + // compiler-generated `extern crate` items have a dummy span. + if item.span == DUMMY_SP { return } + + let cnum = match self.tcx.sess.cstore.extern_mod_stmt_cnum(item.id) { + Some(cnum) => cnum, + None => return, + }; + let def_id = DefId { krate: cnum, index: CRATE_DEF_INDEX }; + self.tcx.check_stability(def_id, item.id, item.span); } - _ => {} - } - intravisit::walk_block(self, b); - self.in_skip_block = old_skip_count; - } -} - -/// Helper for discovering nodes to check for stability -pub fn check_item(tcx: &ty::ctxt, item: &hir::Item, warn_about_defns: bool, - cb: &mut FnMut(DefId, Span, &Option<&Stability>, &Option)) { - match item.node { - hir::ItemExternCrate(_) => { - // compiler-generated `extern crate` items have a dummy span. - if item.span == DUMMY_SP { return } - - let cnum = match tcx.sess.cstore.extern_mod_stmt_cnum(item.id) { - Some(cnum) => cnum, - None => return, - }; - let id = DefId { krate: cnum, index: CRATE_DEF_INDEX }; - maybe_do_stability_check(tcx, id, item.span, cb); - } - - // For implementations of traits, check the stability of each item - // individually as it's possible to have a stable trait with unstable - // items. - hir::ItemImpl(_, _, _, Some(ref t), _, ref impl_items) => { - let trait_did = tcx.def_map.borrow().get(&t.ref_id).unwrap().def_id(); - let trait_items = tcx.trait_items(trait_did); - - for impl_item in impl_items { - let item = trait_items.iter().find(|item| { - item.name() == impl_item.name - }).unwrap(); - if warn_about_defns { - maybe_do_stability_check(tcx, item.def_id(), impl_item.span, cb); - } - } - } - - _ => (/* pass */) - } -} -/// Helper for discovering nodes to check for stability -pub fn check_expr(tcx: &ty::ctxt, e: &hir::Expr, - cb: &mut FnMut(DefId, Span, &Option<&Stability>, &Option)) { - let span; - let id = match e.node { - hir::ExprMethodCall(i, _, _) => { - span = i.span; - let method_call = ty::MethodCall::expr(e.id); - tcx.tables.borrow().method_map[&method_call].def_id - } - hir::ExprField(ref base_e, ref field) => { - span = field.span; - match tcx.expr_ty_adjusted(base_e).sty { - ty::TyStruct(def, _) => def.struct_variant().field_named(field.node).did, - _ => tcx.sess.span_bug(e.span, - "stability::check_expr: named field access on non-struct") - } - } - hir::ExprTupField(ref base_e, ref field) => { - span = field.span; - match tcx.expr_ty_adjusted(base_e).sty { - ty::TyStruct(def, _) => def.struct_variant().fields[field.node].did, - ty::TyTuple(..) => return, - _ => tcx.sess.span_bug(e.span, - "stability::check_expr: unnamed field access on \ - something other than a tuple or struct") - } - } - hir::ExprStruct(_, ref expr_fields, _) => { - let type_ = tcx.expr_ty(e); - match type_.sty { - ty::TyStruct(def, _) => { - // check the stability of each field that appears - // in the construction expression. - for field in expr_fields { - let did = def.struct_variant() - .field_named(field.name.node) - .did; - maybe_do_stability_check(tcx, did, field.span, cb); + // For implementations of traits, check the stability of each item + // individually as it's possible to have a stable trait with unstable + // items. + hir::ItemImpl(.., Some(ref t), _, ref impl_item_refs) => { + if let Def::Trait(trait_did) = t.path.def { + for impl_item_ref in impl_item_refs { + let impl_item = self.tcx.map.impl_item(impl_item_ref.id); + let trait_item_def_id = self.tcx.associated_items(trait_did) + .find(|item| item.name == impl_item.name).map(|item| item.def_id); + if let Some(def_id) = trait_item_def_id { + // Pass `DUMMY_NODE_ID` to skip deprecation warnings. + self.tcx.check_stability(def_id, ast::DUMMY_NODE_ID, impl_item.span); + } } - - // we're done. - return - } - // we don't look at stability attributes on - // struct-like enums (yet...), but it's definitely not - // a bug to have construct one. - ty::TyEnum(..) => return, - _ => { - tcx.sess.span_bug(e.span, - &format!("stability::check_expr: struct construction \ - of non-struct, type {:?}", - type_)); } } - } - _ => return - }; - maybe_do_stability_check(tcx, id, span, cb); -} - -pub fn check_path(tcx: &ty::ctxt, path: &hir::Path, id: ast::NodeId, - cb: &mut FnMut(DefId, Span, &Option<&Stability>, &Option)) { - match tcx.def_map.borrow().get(&id).map(|d| d.full_def()) { - Some(def::DefPrimTy(..)) => {} - Some(def::DefSelfTy(..)) => {} - Some(def) => { - maybe_do_stability_check(tcx, def.def_id(), path.span, cb); + _ => (/* pass */) } - None => {} + intravisit::walk_item(self, item); } -} -pub fn check_path_list_item(tcx: &ty::ctxt, item: &hir::PathListItem, - cb: &mut FnMut(DefId, Span, &Option<&Stability>, &Option)) { - match tcx.def_map.borrow().get(&item.node.id()).map(|d| d.full_def()) { - Some(def::DefPrimTy(..)) => {} - Some(def) => { - maybe_do_stability_check(tcx, def.def_id(), item.span, cb); + fn visit_path(&mut self, path: &'tcx hir::Path, id: ast::NodeId) { + match path.def { + Def::PrimTy(..) | Def::SelfTy(..) | Def::Err => {} + _ => self.tcx.check_stability(path.def.def_id(), id, path.span) } - None => {} + intravisit::walk_path(self, path) } } -pub fn check_pat(tcx: &ty::ctxt, pat: &hir::Pat, - cb: &mut FnMut(DefId, Span, &Option<&Stability>, &Option)) { - debug!("check_pat(pat = {:?})", pat); - if is_internal(tcx, pat.span) { return; } - - let v = match tcx.pat_ty_opt(pat) { - Some(&ty::TyS { sty: ty::TyStruct(def, _), .. }) => def.struct_variant(), - Some(_) | None => return, - }; - match pat.node { - // Foo(a, b, c) - // A Variant(..) pattern `hir::PatEnum(_, None)` doesn't have to be recursed into. - hir::PatEnum(_, Some(ref pat_fields)) => { - for (field, struct_field) in pat_fields.iter().zip(&v.fields) { - maybe_do_stability_check(tcx, struct_field.did, field.span, cb) - } - } - // Foo { a, b, c } - hir::PatStruct(_, ref pat_fields, _) => { - for field in pat_fields { - let did = v.field_named(field.node.name).did; - maybe_do_stability_check(tcx, did, field.span, cb); - } +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + /// Lookup the stability for a node, loading external crate + /// metadata as necessary. + pub fn lookup_stability(self, id: DefId) -> Option<&'gcx Stability> { + if let Some(st) = self.stability.borrow().stab_map.get(&id) { + return *st; } - // everything else is fine. - _ => {} - } -} - -fn maybe_do_stability_check(tcx: &ty::ctxt, id: DefId, span: Span, - cb: &mut FnMut(DefId, Span, - &Option<&Stability>, &Option)) { - if is_internal(tcx, span) { - debug!("maybe_do_stability_check: \ - skipping span={:?} since it is internal", span); - return; - } - let (stability, deprecation) = if is_staged_api(tcx, id) { - (lookup_stability(tcx, id), None) - } else { - (None, lookup_deprecation(tcx, id)) - }; - debug!("maybe_do_stability_check: \ - inspecting id={:?} span={:?} of stability={:?}", id, span, stability); - cb(id, span, &stability, &deprecation); -} - -fn is_internal(tcx: &ty::ctxt, span: Span) -> bool { - tcx.sess.codemap().span_allows_unstable(span) -} -fn is_staged_api(tcx: &ty::ctxt, id: DefId) -> bool { - match tcx.trait_item_of_item(id) { - Some(ty::MethodTraitItemId(trait_method_id)) - if trait_method_id != id => { - is_staged_api(tcx, trait_method_id) - } - _ => { - *tcx.stability.borrow_mut().staged_api.entry(id.krate).or_insert_with( - || tcx.sess.cstore.is_staged_api(id.krate)) - } + let st = self.lookup_stability_uncached(id); + self.stability.borrow_mut().stab_map.insert(id, st); + st } -} -/// Lookup the stability for a node, loading external crate -/// metadata as necessary. -pub fn lookup_stability<'tcx>(tcx: &ty::ctxt<'tcx>, id: DefId) -> Option<&'tcx Stability> { - if let Some(st) = tcx.stability.borrow().stab_map.get(&id) { - return *st; + pub fn lookup_deprecation(self, id: DefId) -> Option { + self.lookup_deprecation_entry(id).map(|depr| depr.attr) } - let st = lookup_stability_uncached(tcx, id); - tcx.stability.borrow_mut().stab_map.insert(id, st); - st -} + pub fn lookup_deprecation_entry(self, id: DefId) -> Option { + if let Some(depr) = self.stability.borrow().depr_map.get(&id) { + return depr.clone(); + } -pub fn lookup_deprecation<'tcx>(tcx: &ty::ctxt<'tcx>, id: DefId) -> Option { - if let Some(depr) = tcx.stability.borrow().depr_map.get(&id) { - return depr.clone(); + let depr = self.lookup_deprecation_uncached(id); + self.stability.borrow_mut().depr_map.insert(id, depr.clone()); + depr } - let depr = lookup_deprecation_uncached(tcx, id); - tcx.stability.borrow_mut().depr_map.insert(id, depr.clone()); - depr -} - -fn lookup_stability_uncached<'tcx>(tcx: &ty::ctxt<'tcx>, id: DefId) -> Option<&'tcx Stability> { - debug!("lookup(id={:?})", id); - if id.is_local() { - None // The stability cache is filled partially lazily - } else { - tcx.sess.cstore.stability(id).map(|st| tcx.intern_stability(st)) + fn lookup_stability_uncached(self, id: DefId) -> Option<&'gcx Stability> { + debug!("lookup(id={:?})", id); + if id.is_local() { + None // The stability cache is filled partially lazily + } else { + self.sess.cstore.stability(id).map(|st| self.intern_stability(st)) + } } -} -fn lookup_deprecation_uncached<'tcx>(tcx: &ty::ctxt<'tcx>, id: DefId) -> Option { - debug!("lookup(id={:?})", id); - if id.is_local() { - None // The stability cache is filled partially lazily - } else { - tcx.sess.cstore.deprecation(id) + fn lookup_deprecation_uncached(self, id: DefId) -> Option { + debug!("lookup(id={:?})", id); + if id.is_local() { + None // The stability cache is filled partially lazily + } else { + self.sess.cstore.deprecation(id).map(DeprecationEntry::external) + } } } /// Given the list of enabled features that were not language features (i.e. that /// were expected to be library features), and the list of features used from /// libraries, identify activated features that don't exist and error about them. -pub fn check_unused_or_stable_features(sess: &Session, - lib_features_used: &FnvHashMap) { +pub fn check_unused_or_stable_features<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + access_levels: &AccessLevels) { + let sess = &tcx.sess; + + if tcx.stability.borrow().staged_api[&LOCAL_CRATE] && tcx.sess.features.borrow().staged_api { + let _task = tcx.dep_graph.in_task(DepNode::StabilityIndex); + let krate = tcx.map.krate(); + let mut missing = MissingStabilityAnnotations { + tcx: tcx, + access_levels: access_levels, + }; + missing.check_missing_stability(ast::CRATE_NODE_ID, krate.span); + intravisit::walk_crate(&mut missing, krate); + krate.visit_all_item_likes(&mut missing.as_deep_visitor()); + } + let ref declared_lib_features = sess.features.borrow().declared_lib_features; - let mut remaining_lib_features: FnvHashMap + let mut remaining_lib_features: FxHashMap = declared_lib_features.clone().into_iter().collect(); - let stable_msg = "this feature is stable. attribute no longer needed"; + fn format_stable_since_msg(version: &str) -> String { + format!("this feature has been stable since {}. Attribute no longer needed", version) + } - for &span in &sess.features.borrow().declared_stable_lang_features { + for &(ref stable_lang_feature, span) in &sess.features.borrow().declared_stable_lang_features { + let version = find_lang_feature_accepted_version(&stable_lang_feature.as_str()) + .expect("unexpectedly couldn't find version feature was stabilized"); sess.add_lint(lint::builtin::STABLE_FEATURES, ast::CRATE_NODE_ID, span, - stable_msg.to_string()); + format_stable_since_msg(version)); } - for (used_lib_feature, level) in lib_features_used { + let index = tcx.stability.borrow(); + for (used_lib_feature, level) in &index.used_features { match remaining_lib_features.remove(used_lib_feature) { Some(span) => { - if *level == Stable { + if let &attr::StabilityLevel::Stable { since: ref version } = level { sess.add_lint(lint::builtin::STABLE_FEATURES, ast::CRATE_NODE_ID, span, - stable_msg.to_string()); + format_stable_since_msg(&version.as_str())); } } None => ( /* used but undeclared, handled during the previous ast visit */ ) diff --git a/src/librustc/middle/subst.rs b/src/librustc/middle/subst.rs deleted file mode 100644 index 61f7b2db4c432..0000000000000 --- a/src/librustc/middle/subst.rs +++ /dev/null @@ -1,774 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// Type substitutions. - -pub use self::ParamSpace::*; -pub use self::RegionSubsts::*; - -use middle::cstore; -use middle::ty::{self, Ty}; -use middle::ty::fold::{TypeFoldable, TypeFolder}; - -use serialize::{Encodable, Encoder, Decodable, Decoder}; -use std::fmt; -use std::iter::IntoIterator; -use std::slice::Iter; -use std::vec::{Vec, IntoIter}; -use syntax::codemap::{Span, DUMMY_SP}; - -/////////////////////////////////////////////////////////////////////////// - -/// A substitution mapping type/region parameters to new values. We -/// identify each in-scope parameter by an *index* and a *parameter -/// space* (which indices where the parameter is defined; see -/// `ParamSpace`). -#[derive(Clone, PartialEq, Eq, Hash)] -pub struct Substs<'tcx> { - pub types: VecPerParamSpace>, - pub regions: RegionSubsts, -} - -/// Represents the values to use when substituting lifetime parameters. -/// If the value is `ErasedRegions`, then this subst is occurring during -/// trans, and all region parameters will be replaced with `ty::ReStatic`. -#[derive(Clone, PartialEq, Eq, Hash)] -pub enum RegionSubsts { - ErasedRegions, - NonerasedRegions(VecPerParamSpace) -} - -impl<'tcx> Substs<'tcx> { - pub fn new(t: VecPerParamSpace>, - r: VecPerParamSpace) - -> Substs<'tcx> - { - Substs { types: t, regions: NonerasedRegions(r) } - } - - pub fn new_type(t: Vec>, - r: Vec) - -> Substs<'tcx> - { - Substs::new(VecPerParamSpace::new(t, Vec::new(), Vec::new()), - VecPerParamSpace::new(r, Vec::new(), Vec::new())) - } - - pub fn new_trait(t: Vec>, - r: Vec, - s: Ty<'tcx>) - -> Substs<'tcx> - { - Substs::new(VecPerParamSpace::new(t, vec!(s), Vec::new()), - VecPerParamSpace::new(r, Vec::new(), Vec::new())) - } - - pub fn erased(t: VecPerParamSpace>) -> Substs<'tcx> - { - Substs { types: t, regions: ErasedRegions } - } - - pub fn empty() -> Substs<'tcx> { - Substs { - types: VecPerParamSpace::empty(), - regions: NonerasedRegions(VecPerParamSpace::empty()), - } - } - - pub fn trans_empty() -> Substs<'tcx> { - Substs { - types: VecPerParamSpace::empty(), - regions: ErasedRegions - } - } - - pub fn is_noop(&self) -> bool { - let regions_is_noop = match self.regions { - ErasedRegions => false, // may be used to canonicalize - NonerasedRegions(ref regions) => regions.is_empty(), - }; - - regions_is_noop && self.types.is_empty() - } - - pub fn type_for_def(&self, ty_param_def: &ty::TypeParameterDef) -> Ty<'tcx> { - *self.types.get(ty_param_def.space, ty_param_def.index as usize) - } - - pub fn self_ty(&self) -> Option> { - self.types.get_self().cloned() - } - - pub fn with_self_ty(&self, self_ty: Ty<'tcx>) -> Substs<'tcx> { - assert!(self.self_ty().is_none()); - let mut s = (*self).clone(); - s.types.push(SelfSpace, self_ty); - s - } - - pub fn erase_regions(self) -> Substs<'tcx> { - let Substs { types, regions: _ } = self; - Substs { types: types, regions: ErasedRegions } - } - - /// Since ErasedRegions are only to be used in trans, most of the compiler can use this method - /// to easily access the set of region substitutions. - pub fn regions<'a>(&'a self) -> &'a VecPerParamSpace { - match self.regions { - ErasedRegions => panic!("Erased regions only expected in trans"), - NonerasedRegions(ref r) => r - } - } - - /// Since ErasedRegions are only to be used in trans, most of the compiler can use this method - /// to easily access the set of region substitutions. - pub fn mut_regions<'a>(&'a mut self) -> &'a mut VecPerParamSpace { - match self.regions { - ErasedRegions => panic!("Erased regions only expected in trans"), - NonerasedRegions(ref mut r) => r - } - } - - pub fn with_method(self, - m_types: Vec>, - m_regions: Vec) - -> Substs<'tcx> - { - let Substs { types, regions } = self; - let types = types.with_vec(FnSpace, m_types); - let regions = regions.map(|r| r.with_vec(FnSpace, m_regions)); - Substs { types: types, regions: regions } - } - - pub fn method_to_trait(self) -> Substs<'tcx> { - let Substs { mut types, regions } = self; - types.truncate(FnSpace, 0); - let regions = regions.map(|mut r| { r.truncate(FnSpace, 0); r }); - Substs { types: types, regions: regions } - } -} - -impl<'tcx> Encodable for Substs<'tcx> { - - fn encode(&self, s: &mut S) -> Result<(), S::Error> { - cstore::tls::with_encoding_context(s, |ecx, rbml_w| { - ecx.encode_substs(rbml_w, self); - Ok(()) - }) - } -} - -impl<'tcx> Decodable for Substs<'tcx> { - fn decode(d: &mut D) -> Result, D::Error> { - cstore::tls::with_decoding_context(d, |dcx, rbml_r| { - Ok(dcx.decode_substs(rbml_r)) - }) - } -} - -impl<'tcx> Decodable for &'tcx Substs<'tcx> { - fn decode(d: &mut D) -> Result<&'tcx Substs<'tcx>, D::Error> { - let substs = cstore::tls::with_decoding_context(d, |dcx, rbml_r| { - let substs = dcx.decode_substs(rbml_r); - dcx.tcx().mk_substs(substs) - }); - - Ok(substs) - } -} - -impl RegionSubsts { - pub fn map(self, op: F) -> RegionSubsts where - F: FnOnce(VecPerParamSpace) -> VecPerParamSpace, - { - match self { - ErasedRegions => ErasedRegions, - NonerasedRegions(r) => NonerasedRegions(op(r)) - } - } - - pub fn is_erased(&self) -> bool { - match *self { - ErasedRegions => true, - NonerasedRegions(_) => false, - } - } -} - -/////////////////////////////////////////////////////////////////////////// -// ParamSpace - -#[derive(PartialOrd, Ord, PartialEq, Eq, Copy, - Clone, Hash, RustcEncodable, RustcDecodable, Debug)] -pub enum ParamSpace { - TypeSpace, // Type parameters attached to a type definition, trait, or impl - SelfSpace, // Self parameter on a trait - FnSpace, // Type parameters attached to a method or fn -} - -impl ParamSpace { - pub fn all() -> [ParamSpace; 3] { - [TypeSpace, SelfSpace, FnSpace] - } - - pub fn to_uint(self) -> usize { - match self { - TypeSpace => 0, - SelfSpace => 1, - FnSpace => 2, - } - } - - pub fn from_uint(u: usize) -> ParamSpace { - match u { - 0 => TypeSpace, - 1 => SelfSpace, - 2 => FnSpace, - _ => panic!("Invalid ParamSpace: {}", u) - } - } -} - -/// Vector of things sorted by param space. Used to keep -/// the set of things declared on the type, self, or method -/// distinct. -#[derive(PartialEq, Eq, Clone, Hash, RustcEncodable, RustcDecodable)] -pub struct VecPerParamSpace { - // This was originally represented as a tuple with one Vec for - // each variant of ParamSpace, and that remains the abstraction - // that it provides to its clients. - // - // Here is how the representation corresponds to the abstraction - // i.e. the "abstraction function" AF: - // - // AF(self) = (self.content[..self.type_limit], - // self.content[self.type_limit..self.self_limit], - // self.content[self.self_limit..]) - type_limit: usize, - self_limit: usize, - content: Vec, -} - -/// The `split` function converts one `VecPerParamSpace` into this -/// `SeparateVecsPerParamSpace` structure. -pub struct SeparateVecsPerParamSpace { - pub types: Vec, - pub selfs: Vec, - pub fns: Vec, -} - -impl fmt::Debug for VecPerParamSpace { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "[{:?};{:?};{:?}]", - self.get_slice(TypeSpace), - self.get_slice(SelfSpace), - self.get_slice(FnSpace)) - } -} - -impl VecPerParamSpace { - fn limits(&self, space: ParamSpace) -> (usize, usize) { - match space { - TypeSpace => (0, self.type_limit), - SelfSpace => (self.type_limit, self.self_limit), - FnSpace => (self.self_limit, self.content.len()), - } - } - - pub fn empty() -> VecPerParamSpace { - VecPerParamSpace { - type_limit: 0, - self_limit: 0, - content: Vec::new() - } - } - - pub fn params_from_type(types: Vec) -> VecPerParamSpace { - VecPerParamSpace::empty().with_vec(TypeSpace, types) - } - - /// `t` is the type space. - /// `s` is the self space. - /// `f` is the fn space. - pub fn new(t: Vec, s: Vec, f: Vec) -> VecPerParamSpace { - let type_limit = t.len(); - let self_limit = type_limit + s.len(); - - let mut content = t; - content.extend(s); - content.extend(f); - - VecPerParamSpace { - type_limit: type_limit, - self_limit: self_limit, - content: content, - } - } - - fn new_internal(content: Vec, type_limit: usize, self_limit: usize) - -> VecPerParamSpace - { - VecPerParamSpace { - type_limit: type_limit, - self_limit: self_limit, - content: content, - } - } - - /// Appends `value` to the vector associated with `space`. - /// - /// Unlike the `push` method in `Vec`, this should not be assumed - /// to be a cheap operation (even when amortized over many calls). - pub fn push(&mut self, space: ParamSpace, value: T) { - let (_, limit) = self.limits(space); - match space { - TypeSpace => { self.type_limit += 1; self.self_limit += 1; } - SelfSpace => { self.self_limit += 1; } - FnSpace => { } - } - self.content.insert(limit, value); - } - - /// Appends `values` to the vector associated with `space`. - /// - /// Unlike the `extend` method in `Vec`, this should not be assumed - /// to be a cheap operation (even when amortized over many calls). - pub fn extend>(&mut self, space: ParamSpace, values: I) { - // This could be made more efficient, obviously. - for item in values { - self.push(space, item); - } - } - - pub fn pop(&mut self, space: ParamSpace) -> Option { - let (start, limit) = self.limits(space); - if start == limit { - None - } else { - match space { - TypeSpace => { self.type_limit -= 1; self.self_limit -= 1; } - SelfSpace => { self.self_limit -= 1; } - FnSpace => {} - } - if self.content.is_empty() { - None - } else { - Some(self.content.remove(limit - 1)) - } - } - } - - pub fn truncate(&mut self, space: ParamSpace, len: usize) { - // FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n). - while self.len(space) > len { - self.pop(space); - } - } - - pub fn replace(&mut self, space: ParamSpace, elems: Vec) { - // FIXME (#15435): slow; O(n^2); could enhance vec to make it O(n). - self.truncate(space, 0); - for t in elems { - self.push(space, t); - } - } - - pub fn get_self<'a>(&'a self) -> Option<&'a T> { - let v = self.get_slice(SelfSpace); - assert!(v.len() <= 1); - if v.is_empty() { None } else { Some(&v[0]) } - } - - pub fn len(&self, space: ParamSpace) -> usize { - self.get_slice(space).len() - } - - pub fn is_empty_in(&self, space: ParamSpace) -> bool { - self.len(space) == 0 - } - - pub fn get_slice<'a>(&'a self, space: ParamSpace) -> &'a [T] { - let (start, limit) = self.limits(space); - &self.content[start.. limit] - } - - pub fn get_mut_slice<'a>(&'a mut self, space: ParamSpace) -> &'a mut [T] { - let (start, limit) = self.limits(space); - &mut self.content[start.. limit] - } - - pub fn opt_get<'a>(&'a self, - space: ParamSpace, - index: usize) - -> Option<&'a T> { - let v = self.get_slice(space); - if index < v.len() { Some(&v[index]) } else { None } - } - - pub fn get<'a>(&'a self, space: ParamSpace, index: usize) -> &'a T { - &self.get_slice(space)[index] - } - - pub fn iter<'a>(&'a self) -> Iter<'a,T> { - self.content.iter() - } - - pub fn into_iter(self) -> IntoIter { - self.content.into_iter() - } - - pub fn iter_enumerated<'a>(&'a self) -> EnumeratedItems<'a,T> { - EnumeratedItems::new(self) - } - - pub fn as_slice(&self) -> &[T] { - &self.content - } - - pub fn into_vec(self) -> Vec { - self.content - } - - pub fn all_vecs

(&self, mut pred: P) -> bool where - P: FnMut(&[T]) -> bool, - { - let spaces = [TypeSpace, SelfSpace, FnSpace]; - spaces.iter().all(|&space| { pred(self.get_slice(space)) }) - } - - pub fn all

(&self, pred: P) -> bool where P: FnMut(&T) -> bool { - self.iter().all(pred) - } - - pub fn any

(&self, pred: P) -> bool where P: FnMut(&T) -> bool { - self.iter().any(pred) - } - - pub fn is_empty(&self) -> bool { - self.all_vecs(|v| v.is_empty()) - } - - pub fn map(&self, pred: P) -> VecPerParamSpace where P: FnMut(&T) -> U { - let result = self.iter().map(pred).collect(); - VecPerParamSpace::new_internal(result, - self.type_limit, - self.self_limit) - } - - pub fn map_enumerated(&self, pred: P) -> VecPerParamSpace where - P: FnMut((ParamSpace, usize, &T)) -> U, - { - let result = self.iter_enumerated().map(pred).collect(); - VecPerParamSpace::new_internal(result, - self.type_limit, - self.self_limit) - } - - pub fn split(self) -> SeparateVecsPerParamSpace { - let VecPerParamSpace { type_limit, self_limit, content } = self; - - let mut content_iter = content.into_iter(); - - SeparateVecsPerParamSpace { - types: content_iter.by_ref().take(type_limit).collect(), - selfs: content_iter.by_ref().take(self_limit - type_limit).collect(), - fns: content_iter.collect() - } - } - - pub fn with_vec(mut self, space: ParamSpace, vec: Vec) - -> VecPerParamSpace - { - assert!(self.is_empty_in(space)); - self.replace(space, vec); - self - } -} - -#[derive(Clone)] -pub struct EnumeratedItems<'a,T:'a> { - vec: &'a VecPerParamSpace, - space_index: usize, - elem_index: usize -} - -impl<'a,T> EnumeratedItems<'a,T> { - fn new(v: &'a VecPerParamSpace) -> EnumeratedItems<'a,T> { - let mut result = EnumeratedItems { vec: v, space_index: 0, elem_index: 0 }; - result.adjust_space(); - result - } - - fn adjust_space(&mut self) { - let spaces = ParamSpace::all(); - while - self.space_index < spaces.len() && - self.elem_index >= self.vec.len(spaces[self.space_index]) - { - self.space_index += 1; - self.elem_index = 0; - } - } -} - -impl<'a,T> Iterator for EnumeratedItems<'a,T> { - type Item = (ParamSpace, usize, &'a T); - - fn next(&mut self) -> Option<(ParamSpace, usize, &'a T)> { - let spaces = ParamSpace::all(); - if self.space_index < spaces.len() { - let space = spaces[self.space_index]; - let index = self.elem_index; - let item = self.vec.get(space, index); - - self.elem_index += 1; - self.adjust_space(); - - Some((space, index, item)) - } else { - None - } - } -} - -impl IntoIterator for VecPerParamSpace { - type Item = T; - type IntoIter = IntoIter; - - fn into_iter(self) -> IntoIter { - self.into_vec().into_iter() - } -} - -impl<'a,T> IntoIterator for &'a VecPerParamSpace { - type Item = &'a T; - type IntoIter = Iter<'a, T>; - - fn into_iter(self) -> Iter<'a, T> { - self.as_slice().into_iter() - } -} - - -/////////////////////////////////////////////////////////////////////////// -// Public trait `Subst` -// -// Just call `foo.subst(tcx, substs)` to perform a substitution across -// `foo`. Or use `foo.subst_spanned(tcx, substs, Some(span))` when -// there is more information available (for better errors). - -pub trait Subst<'tcx> : Sized { - fn subst(&self, tcx: &ty::ctxt<'tcx>, substs: &Substs<'tcx>) -> Self { - self.subst_spanned(tcx, substs, None) - } - - fn subst_spanned(&self, tcx: &ty::ctxt<'tcx>, - substs: &Substs<'tcx>, - span: Option) - -> Self; -} - -impl<'tcx, T:TypeFoldable<'tcx>> Subst<'tcx> for T { - fn subst_spanned(&self, - tcx: &ty::ctxt<'tcx>, - substs: &Substs<'tcx>, - span: Option) - -> T - { - let mut folder = SubstFolder { tcx: tcx, - substs: substs, - span: span, - root_ty: None, - ty_stack_depth: 0, - region_binders_passed: 0 }; - (*self).fold_with(&mut folder) - } -} - -/////////////////////////////////////////////////////////////////////////// -// The actual substitution engine itself is a type folder. - -struct SubstFolder<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, - substs: &'a Substs<'tcx>, - - // The location for which the substitution is performed, if available. - span: Option, - - // The root type that is being substituted, if available. - root_ty: Option>, - - // Depth of type stack - ty_stack_depth: usize, - - // Number of region binders we have passed through while doing the substitution - region_binders_passed: u32, -} - -impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> { - fn tcx(&self) -> &ty::ctxt<'tcx> { self.tcx } - - fn enter_region_binder(&mut self) { - self.region_binders_passed += 1; - } - - fn exit_region_binder(&mut self) { - self.region_binders_passed -= 1; - } - - fn fold_region(&mut self, r: ty::Region) -> ty::Region { - // Note: This routine only handles regions that are bound on - // type declarations and other outer declarations, not those - // bound in *fn types*. Region substitution of the bound - // regions that appear in a function signature is done using - // the specialized routine `ty::replace_late_regions()`. - match r { - ty::ReEarlyBound(data) => { - match self.substs.regions { - ErasedRegions => ty::ReStatic, - NonerasedRegions(ref regions) => - match regions.opt_get(data.space, data.index as usize) { - Some(&r) => { - self.shift_region_through_binders(r) - } - None => { - let span = self.span.unwrap_or(DUMMY_SP); - self.tcx().sess.span_bug( - span, - &format!("Type parameter out of range \ - when substituting in region {} (root type={:?}) \ - (space={:?}, index={})", - data.name, - self.root_ty, - data.space, - data.index)); - } - } - } - } - _ => r - } - } - - fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { - if !t.needs_subst() { - return t; - } - - // track the root type we were asked to substitute - let depth = self.ty_stack_depth; - if depth == 0 { - self.root_ty = Some(t); - } - self.ty_stack_depth += 1; - - let t1 = match t.sty { - ty::TyParam(p) => { - self.ty_for_param(p, t) - } - _ => { - t.super_fold_with(self) - } - }; - - assert_eq!(depth + 1, self.ty_stack_depth); - self.ty_stack_depth -= 1; - if depth == 0 { - self.root_ty = None; - } - - return t1; - } -} - -impl<'a,'tcx> SubstFolder<'a,'tcx> { - fn ty_for_param(&self, p: ty::ParamTy, source_ty: Ty<'tcx>) -> Ty<'tcx> { - // Look up the type in the substitutions. It really should be in there. - let opt_ty = self.substs.types.opt_get(p.space, p.idx as usize); - let ty = match opt_ty { - Some(t) => *t, - None => { - let span = self.span.unwrap_or(DUMMY_SP); - self.tcx().sess.span_bug( - span, - &format!("Type parameter `{:?}` ({:?}/{:?}/{}) out of range \ - when substituting (root type={:?}) substs={:?}", - p, - source_ty, - p.space, - p.idx, - self.root_ty, - self.substs)); - } - }; - - self.shift_regions_through_binders(ty) - } - - /// It is sometimes necessary to adjust the debruijn indices during substitution. This occurs - /// when we are substituting a type with escaping regions into a context where we have passed - /// through region binders. That's quite a mouthful. Let's see an example: - /// - /// ``` - /// type Func = fn(A); - /// type MetaFunc = for<'a> fn(Func<&'a int>) - /// ``` - /// - /// The type `MetaFunc`, when fully expanded, will be - /// - /// for<'a> fn(fn(&'a int)) - /// ^~ ^~ ^~~ - /// | | | - /// | | DebruijnIndex of 2 - /// Binders - /// - /// Here the `'a` lifetime is bound in the outer function, but appears as an argument of the - /// inner one. Therefore, that appearance will have a DebruijnIndex of 2, because we must skip - /// over the inner binder (remember that we count Debruijn indices from 1). However, in the - /// definition of `MetaFunc`, the binder is not visible, so the type `&'a int` will have a - /// debruijn index of 1. It's only during the substitution that we can see we must increase the - /// depth by 1 to account for the binder that we passed through. - /// - /// As a second example, consider this twist: - /// - /// ``` - /// type FuncTuple = (A,fn(A)); - /// type MetaFuncTuple = for<'a> fn(FuncTuple<&'a int>) - /// ``` - /// - /// Here the final type will be: - /// - /// for<'a> fn((&'a int, fn(&'a int))) - /// ^~~ ^~~ - /// | | - /// DebruijnIndex of 1 | - /// DebruijnIndex of 2 - /// - /// As indicated in the diagram, here the same type `&'a int` is substituted once, but in the - /// first case we do not increase the Debruijn index and in the second case we do. The reason - /// is that only in the second case have we passed through a fn binder. - fn shift_regions_through_binders(&self, ty: Ty<'tcx>) -> Ty<'tcx> { - debug!("shift_regions(ty={:?}, region_binders_passed={:?}, has_escaping_regions={:?})", - ty, self.region_binders_passed, ty.has_escaping_regions()); - - if self.region_binders_passed == 0 || !ty.has_escaping_regions() { - return ty; - } - - let result = ty::fold::shift_regions(self.tcx(), self.region_binders_passed, &ty); - debug!("shift_regions: shifted result = {:?}", result); - - result - } - - fn shift_region_through_binders(&self, region: ty::Region) -> ty::Region { - ty::fold::shift_region(region, self.region_binders_passed) - } -} diff --git a/src/librustc/middle/traits/README.md b/src/librustc/middle/traits/README.md deleted file mode 100644 index 92982af92dcfe..0000000000000 --- a/src/librustc/middle/traits/README.md +++ /dev/null @@ -1,430 +0,0 @@ -# TRAIT RESOLUTION - -This document describes the general process and points out some non-obvious -things. - -## Major concepts - -Trait resolution is the process of pairing up an impl with each -reference to a trait. So, for example, if there is a generic function like: - - fn clone_slice(x: &[T]) -> Vec { ... } - -and then a call to that function: - - let v: Vec = clone_slice([1, 2, 3]) - -it is the job of trait resolution to figure out (in which case) -whether there exists an impl of `isize : Clone` - -Note that in some cases, like generic functions, we may not be able to -find a specific impl, but we can figure out that the caller must -provide an impl. To see what I mean, consider the body of `clone_slice`: - - fn clone_slice(x: &[T]) -> Vec { - let mut v = Vec::new(); - for e in &x { - v.push((*e).clone()); // (*) - } - } - -The line marked `(*)` is only legal if `T` (the type of `*e`) -implements the `Clone` trait. Naturally, since we don't know what `T` -is, we can't find the specific impl; but based on the bound `T:Clone`, -we can say that there exists an impl which the caller must provide. - -We use the term *obligation* to refer to a trait reference in need of -an impl. - -## Overview - -Trait resolution consists of three major parts: - -- SELECTION: Deciding how to resolve a specific obligation. For - example, selection might decide that a specific obligation can be - resolved by employing an impl which matches the self type, or by - using a parameter bound. In the case of an impl, Selecting one - obligation can create *nested obligations* because of where clauses - on the impl itself. It may also require evaluating those nested - obligations to resolve ambiguities. - -- FULFILLMENT: The fulfillment code is what tracks that obligations - are completely fulfilled. Basically it is a worklist of obligations - to be selected: once selection is successful, the obligation is - removed from the worklist and any nested obligations are enqueued. - -- COHERENCE: The coherence checks are intended to ensure that there - are never overlapping impls, where two impls could be used with - equal precedence. - -## Selection - -Selection is the process of deciding whether an obligation can be -resolved and, if so, how it is to be resolved (via impl, where clause, etc). -The main interface is the `select()` function, which takes an obligation -and returns a `SelectionResult`. There are three possible outcomes: - -- `Ok(Some(selection))` -- yes, the obligation can be resolved, and - `selection` indicates how. If the impl was resolved via an impl, - then `selection` may also indicate nested obligations that are required - by the impl. - -- `Ok(None)` -- we are not yet sure whether the obligation can be - resolved or not. This happens most commonly when the obligation - contains unbound type variables. - -- `Err(err)` -- the obligation definitely cannot be resolved due to a - type error, or because there are no impls that could possibly apply, - etc. - -The basic algorithm for selection is broken into two big phases: -candidate assembly and confirmation. - -### Candidate assembly - -Searches for impls/where-clauses/etc that might -possibly be used to satisfy the obligation. Each of those is called -a candidate. To avoid ambiguity, we want to find exactly one -candidate that is definitively applicable. In some cases, we may not -know whether an impl/where-clause applies or not -- this occurs when -the obligation contains unbound inference variables. - -The basic idea for candidate assembly is to do a first pass in which -we identify all possible candidates. During this pass, all that we do -is try and unify the type parameters. (In particular, we ignore any -nested where clauses.) Presuming that this unification succeeds, the -impl is added as a candidate. - -Once this first pass is done, we can examine the set of candidates. If -it is a singleton set, then we are done: this is the only impl in -scope that could possibly apply. Otherwise, we can winnow down the set -of candidates by using where clauses and other conditions. If this -reduced set yields a single, unambiguous entry, we're good to go, -otherwise the result is considered ambiguous. - -#### The basic process: Inferring based on the impls we see - -This process is easier if we work through some examples. Consider -the following trait: - -``` -trait Convert { - fn convert(&self) -> Target; -} -``` - -This trait just has one method. It's about as simple as it gets. It -converts from the (implicit) `Self` type to the `Target` type. If we -wanted to permit conversion between `isize` and `usize`, we might -implement `Convert` like so: - -```rust -impl Convert for isize { ... } // isize -> usize -impl Convert for usize { ... } // usize -> isize -``` - -Now imagine there is some code like the following: - -```rust -let x: isize = ...; -let y = x.convert(); -``` - -The call to convert will generate a trait reference `Convert<$Y> for -isize`, where `$Y` is the type variable representing the type of -`y`. When we match this against the two impls we can see, we will find -that only one remains: `Convert for isize`. Therefore, we can -select this impl, which will cause the type of `$Y` to be unified to -`usize`. (Note that while assembling candidates, we do the initial -unifications in a transaction, so that they don't affect one another.) - -There are tests to this effect in src/test/run-pass: - - traits-multidispatch-infer-convert-source-and-target.rs - traits-multidispatch-infer-convert-target.rs - -#### Winnowing: Resolving ambiguities - -But what happens if there are multiple impls where all the types -unify? Consider this example: - -```rust -trait Get { - fn get(&self) -> Self; -} - -impl Get for T { - fn get(&self) -> T { *self } -} - -impl Get for Box { - fn get(&self) -> Box { box get_it(&**self) } -} -``` - -What happens when we invoke `get_it(&box 1_u16)`, for example? In this -case, the `Self` type is `Box` -- that unifies with both impls, -because the first applies to all types, and the second to all -boxes. In the olden days we'd have called this ambiguous. But what we -do now is do a second *winnowing* pass that considers where clauses -and attempts to remove candidates -- in this case, the first impl only -applies if `Box : Copy`, which doesn't hold. After winnowing, -then, we are left with just one candidate, so we can proceed. There is -a test of this in `src/test/run-pass/traits-conditional-dispatch.rs`. - -#### Matching - -The subroutines that decide whether a particular impl/where-clause/etc -applies to a particular obligation. At the moment, this amounts to -unifying the self types, but in the future we may also recursively -consider some of the nested obligations, in the case of an impl. - -#### Lifetimes and selection - -Because of how that lifetime inference works, it is not possible to -give back immediate feedback as to whether a unification or subtype -relationship between lifetimes holds or not. Therefore, lifetime -matching is *not* considered during selection. This is reflected in -the fact that subregion assignment is infallible. This may yield -lifetime constraints that will later be found to be in error (in -contrast, the non-lifetime-constraints have already been checked -during selection and can never cause an error, though naturally they -may lead to other errors downstream). - -#### Where clauses - -Besides an impl, the other major way to resolve an obligation is via a -where clause. The selection process is always given a *parameter -environment* which contains a list of where clauses, which are -basically obligations that can assume are satisfiable. We will iterate -over that list and check whether our current obligation can be found -in that list, and if so it is considered satisfied. More precisely, we -want to check whether there is a where-clause obligation that is for -the same trait (or some subtrait) and for which the self types match, -using the definition of *matching* given above. - -Consider this simple example: - - trait A1 { ... } - trait A2 : A1 { ... } - - trait B { ... } - - fn foo { ... } - -Clearly we can use methods offered by `A1`, `A2`, or `B` within the -body of `foo`. In each case, that will incur an obligation like `X : -A1` or `X : A2`. The parameter environment will contain two -where-clauses, `X : A2` and `X : B`. For each obligation, then, we -search this list of where-clauses. To resolve an obligation `X:A1`, -we would note that `X:A2` implies that `X:A1`. - -### Confirmation - -Confirmation unifies the output type parameters of the trait with the -values found in the obligation, possibly yielding a type error. If we -return to our example of the `Convert` trait from the previous -section, confirmation is where an error would be reported, because the -impl specified that `T` would be `usize`, but the obligation reported -`char`. Hence the result of selection would be an error. - -### Selection during translation - -During type checking, we do not store the results of trait selection. -We simply wish to verify that trait selection will succeed. Then -later, at trans time, when we have all concrete types available, we -can repeat the trait selection. In this case, we do not consider any -where-clauses to be in scope. We know that therefore each resolution -will resolve to a particular impl. - -One interesting twist has to do with nested obligations. In general, in trans, -we only need to do a "shallow" selection for an obligation. That is, we wish to -identify which impl applies, but we do not (yet) need to decide how to select -any nested obligations. Nonetheless, we *do* currently do a complete resolution, -and that is because it can sometimes inform the results of type inference. That is, -we do not have the full substitutions in terms of the type variables of the impl available -to us, so we must run trait selection to figure everything out. - -Here is an example: - - trait Foo { ... } - impl> Foo for Vec { ... } - - impl Bar for isize { ... } - -After one shallow round of selection for an obligation like `Vec -: Foo`, we would know which impl we want, and we would know that -`T=isize`, but we do not know the type of `U`. We must select the -nested obligation `isize : Bar` to find out that `U=usize`. - -It would be good to only do *just as much* nested resolution as -necessary. Currently, though, we just do a full resolution. - -# Higher-ranked trait bounds - -One of the more subtle concepts at work are *higher-ranked trait -bounds*. An example of such a bound is `for<'a> MyTrait<&'a isize>`. -Let's walk through how selection on higher-ranked trait references -works. - -## Basic matching and skolemization leaks - -Let's walk through the test `compile-fail/hrtb-just-for-static.rs` to see -how it works. The test starts with the trait `Foo`: - -```rust -trait Foo { - fn foo(&self, x: X) { } -} -``` - -Let's say we have a function `want_hrtb` that wants a type which -implements `Foo<&'a isize>` for any `'a`: - -```rust -fn want_hrtb() where T : for<'a> Foo<&'a isize> { ... } -``` - -Now we have a struct `AnyInt` that implements `Foo<&'a isize>` for any -`'a`: - -```rust -struct AnyInt; -impl<'a> Foo<&'a isize> for AnyInt { } -``` - -And the question is, does `AnyInt : for<'a> Foo<&'a isize>`? We want the -answer to be yes. The algorithm for figuring it out is closely related -to the subtyping for higher-ranked types (which is described in -`middle::infer::higher_ranked::doc`, but also in a [paper by SPJ] that -I recommend you read). - -1. Skolemize the obligation. -2. Match the impl against the skolemized obligation. -3. Check for skolemization leaks. - -[paper by SPJ]: http://research.microsoft.com/en-us/um/people/simonpj/papers/higher-rank/ - -So let's work through our example. The first thing we would do is to -skolemize the obligation, yielding `AnyInt : Foo<&'0 isize>` (here `'0` -represents skolemized region #0). Note that now have no quantifiers; -in terms of the compiler type, this changes from a `ty::PolyTraitRef` -to a `TraitRef`. We would then create the `TraitRef` from the impl, -using fresh variables for it's bound regions (and thus getting -`Foo<&'$a isize>`, where `'$a` is the inference variable for `'a`). Next -we relate the two trait refs, yielding a graph with the constraint -that `'0 == '$a`. Finally, we check for skolemization "leaks" -- a -leak is basically any attempt to relate a skolemized region to another -skolemized region, or to any region that pre-existed the impl match. -The leak check is done by searching from the skolemized region to find -the set of regions that it is related to in any way. This is called -the "taint" set. To pass the check, that set must consist *solely* of -itself and region variables from the impl. If the taint set includes -any other region, then the match is a failure. In this case, the taint -set for `'0` is `{'0, '$a}`, and hence the check will succeed. - -Let's consider a failure case. Imagine we also have a struct - -```rust -struct StaticInt; -impl Foo<&'static isize> for StaticInt; -``` - -We want the obligation `StaticInt : for<'a> Foo<&'a isize>` to be -considered unsatisfied. The check begins just as before. `'a` is -skolemized to `'0` and the impl trait reference is instantiated to -`Foo<&'static isize>`. When we relate those two, we get a constraint -like `'static == '0`. This means that the taint set for `'0` is `{'0, -'static}`, which fails the leak check. - -## Higher-ranked trait obligations - -Once the basic matching is done, we get to another interesting topic: -how to deal with impl obligations. I'll work through a simple example -here. Imagine we have the traits `Foo` and `Bar` and an associated impl: - -``` -trait Foo { - fn foo(&self, x: X) { } -} - -trait Bar { - fn bar(&self, x: X) { } -} - -impl Foo for F - where F : Bar -{ -} -``` - -Now let's say we have a obligation `for<'a> Foo<&'a isize>` and we match -this impl. What obligation is generated as a result? We want to get -`for<'a> Bar<&'a isize>`, but how does that happen? - -After the matching, we are in a position where we have a skolemized -substitution like `X => &'0 isize`. If we apply this substitution to the -impl obligations, we get `F : Bar<&'0 isize>`. Obviously this is not -directly usable because the skolemized region `'0` cannot leak out of -our computation. - -What we do is to create an inverse mapping from the taint set of `'0` -back to the original bound region (`'a`, here) that `'0` resulted -from. (This is done in `higher_ranked::plug_leaks`). We know that the -leak check passed, so this taint set consists solely of the skolemized -region itself plus various intermediate region variables. We then walk -the trait-reference and convert every region in that taint set back to -a late-bound region, so in this case we'd wind up with `for<'a> F : -Bar<&'a isize>`. - -# Caching and subtle considerations therewith - -In general we attempt to cache the results of trait selection. This -is a somewhat complex process. Part of the reason for this is that we -want to be able to cache results even when all the types in the trait -reference are not fully known. In that case, it may happen that the -trait selection process is also influencing type variables, so we have -to be able to not only cache the *result* of the selection process, -but *replay* its effects on the type variables. - -## An example - -The high-level idea of how the cache works is that we first replace -all unbound inference variables with skolemized versions. Therefore, -if we had a trait reference `usize : Foo<$1>`, where `$n` is an unbound -inference variable, we might replace it with `usize : Foo<%0>`, where -`%n` is a skolemized type. We would then look this up in the cache. -If we found a hit, the hit would tell us the immediate next step to -take in the selection process: i.e., apply impl #22, or apply where -clause `X : Foo`. Let's say in this case there is no hit. -Therefore, we search through impls and where clauses and so forth, and -we come to the conclusion that the only possible impl is this one, -with def-id 22: - - impl Foo for usize { ... } // Impl #22 - -We would then record in the cache `usize : Foo<%0> ==> -ImplCandidate(22)`. Next we would confirm `ImplCandidate(22)`, which -would (as a side-effect) unify `$1` with `isize`. - -Now, at some later time, we might come along and see a `usize : -Foo<$3>`. When skolemized, this would yield `usize : Foo<%0>`, just as -before, and hence the cache lookup would succeed, yielding -`ImplCandidate(22)`. We would confirm `ImplCandidate(22)` which would -(as a side-effect) unify `$3` with `isize`. - -## Where clauses and the local vs global cache - -One subtle interaction is that the results of trait lookup will vary -depending on what where clauses are in scope. Therefore, we actually -have *two* caches, a local and a global cache. The local cache is -attached to the `ParameterEnvironment` and the global cache attached -to the `tcx`. We use the local cache whenever the result might depend -on the where clauses that are in scope. The determination of which -cache to use is done by the method `pick_candidate_cache` in -`select.rs`. At the moment, we use a very simple, conservative rule: -if there are any where-clauses in scope, then we use the local cache. -We used to try and draw finer-grained distinctions, but that led to a -serious of annoying and weird bugs like #22019 and #18290. This simple -rule seems to be pretty clearly safe and also still retains a very -high hit rate (~95% when compiling rustc). diff --git a/src/librustc/middle/traits/coherence.rs b/src/librustc/middle/traits/coherence.rs deleted file mode 100644 index 0f95aa74b6fd7..0000000000000 --- a/src/librustc/middle/traits/coherence.rs +++ /dev/null @@ -1,343 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! See `README.md` for high-level documentation - -use super::Normalized; -use super::SelectionContext; -use super::ObligationCause; -use super::PredicateObligation; -use super::project; -use super::util; - -use middle::cstore::LOCAL_CRATE; -use middle::def_id::DefId; -use middle::subst::{Subst, Substs, TypeSpace}; -use middle::ty::{self, Ty}; -use middle::infer::{self, InferCtxt, TypeOrigin}; -use syntax::codemap::{DUMMY_SP, Span}; - -#[derive(Copy, Clone)] -struct InferIsLocal(bool); - -/// If there are types that satisfy both impls, returns a `TraitRef` -/// with those types substituted (by updating the given `infcx`) -pub fn overlapping_impls<'cx, 'tcx>(infcx: &InferCtxt<'cx, 'tcx>, - impl1_def_id: DefId, - impl2_def_id: DefId) - -> Option> -{ - debug!("impl_can_satisfy(\ - impl1_def_id={:?}, \ - impl2_def_id={:?})", - impl1_def_id, - impl2_def_id); - - let selcx = &mut SelectionContext::intercrate(infcx); - overlap(selcx, impl1_def_id, impl2_def_id) -} - -/// Can both impl `a` and impl `b` be satisfied by a common type (including -/// `where` clauses)? If so, returns a `TraitRef` that unifies the two impls. -fn overlap<'cx, 'tcx>(selcx: &mut SelectionContext<'cx, 'tcx>, - a_def_id: DefId, - b_def_id: DefId) - -> Option> -{ - debug!("overlap(a_def_id={:?}, b_def_id={:?})", - a_def_id, - b_def_id); - - let (a_trait_ref, a_obligations) = impl_trait_ref_and_oblig(selcx, - a_def_id, - util::fresh_type_vars_for_impl); - - let (b_trait_ref, b_obligations) = impl_trait_ref_and_oblig(selcx, - b_def_id, - util::fresh_type_vars_for_impl); - - debug!("overlap: a_trait_ref={:?} a_obligations={:?}", a_trait_ref, a_obligations); - - debug!("overlap: b_trait_ref={:?} b_obligations={:?}", b_trait_ref, b_obligations); - - // Do `a` and `b` unify? If not, no overlap. - if let Err(_) = infer::mk_eq_trait_refs(selcx.infcx(), - true, - TypeOrigin::Misc(DUMMY_SP), - a_trait_ref, - b_trait_ref) { - return None; - } - - debug!("overlap: unification check succeeded"); - - // Are any of the obligations unsatisfiable? If so, no overlap. - let infcx = selcx.infcx(); - let opt_failing_obligation = - a_obligations.iter() - .chain(&b_obligations) - .map(|o| infcx.resolve_type_vars_if_possible(o)) - .find(|o| !selcx.evaluate_obligation(o)); - - if let Some(failing_obligation) = opt_failing_obligation { - debug!("overlap: obligation unsatisfiable {:?}", failing_obligation); - return None - } - - Some(selcx.infcx().resolve_type_vars_if_possible(&a_trait_ref)) -} - -pub fn trait_ref_is_knowable<'tcx>(tcx: &ty::ctxt<'tcx>, trait_ref: &ty::TraitRef<'tcx>) -> bool -{ - debug!("trait_ref_is_knowable(trait_ref={:?})", trait_ref); - - // if the orphan rules pass, that means that no ancestor crate can - // impl this, so it's up to us. - if orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(false)).is_ok() { - debug!("trait_ref_is_knowable: orphan check passed"); - return true; - } - - // if the trait is not marked fundamental, then it's always possible that - // an ancestor crate will impl this in the future, if they haven't - // already - if - trait_ref.def_id.krate != LOCAL_CRATE && - !tcx.has_attr(trait_ref.def_id, "fundamental") - { - debug!("trait_ref_is_knowable: trait is neither local nor fundamental"); - return false; - } - - // find out when some downstream (or cousin) crate could impl this - // trait-ref, presuming that all the parameters were instantiated - // with downstream types. If not, then it could only be - // implemented by an upstream crate, which means that the impl - // must be visible to us, and -- since the trait is fundamental - // -- we can test. - orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(true)).is_err() -} - -type SubstsFn = for<'a,'tcx> fn(infcx: &InferCtxt<'a, 'tcx>, - span: Span, - impl_def_id: DefId) - -> Substs<'tcx>; - -/// Instantiate fresh variables for all bound parameters of the impl -/// and return the impl trait ref with those variables substituted. -fn impl_trait_ref_and_oblig<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>, - impl_def_id: DefId, - substs_fn: SubstsFn) - -> (ty::TraitRef<'tcx>, - Vec>) -{ - let impl_substs = - &substs_fn(selcx.infcx(), DUMMY_SP, impl_def_id); - let impl_trait_ref = - selcx.tcx().impl_trait_ref(impl_def_id).unwrap(); - let impl_trait_ref = - impl_trait_ref.subst(selcx.tcx(), impl_substs); - let Normalized { value: impl_trait_ref, obligations: normalization_obligations1 } = - project::normalize(selcx, ObligationCause::dummy(), &impl_trait_ref); - - let predicates = selcx.tcx().lookup_predicates(impl_def_id); - let predicates = predicates.instantiate(selcx.tcx(), impl_substs); - let Normalized { value: predicates, obligations: normalization_obligations2 } = - project::normalize(selcx, ObligationCause::dummy(), &predicates); - let impl_obligations = - util::predicates_for_generics(ObligationCause::dummy(), 0, &predicates); - - let impl_obligations: Vec<_> = - impl_obligations.into_iter() - .chain(normalization_obligations1) - .chain(normalization_obligations2) - .collect(); - - (impl_trait_ref, impl_obligations) -} - -pub enum OrphanCheckErr<'tcx> { - NoLocalInputType, - UncoveredTy(Ty<'tcx>), -} - -/// Checks the coherence orphan rules. `impl_def_id` should be the -/// def-id of a trait impl. To pass, either the trait must be local, or else -/// two conditions must be satisfied: -/// -/// 1. All type parameters in `Self` must be "covered" by some local type constructor. -/// 2. Some local type must appear in `Self`. -pub fn orphan_check<'tcx>(tcx: &ty::ctxt<'tcx>, - impl_def_id: DefId) - -> Result<(), OrphanCheckErr<'tcx>> -{ - debug!("orphan_check({:?})", impl_def_id); - - // We only except this routine to be invoked on implementations - // of a trait, not inherent implementations. - let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap(); - debug!("orphan_check: trait_ref={:?}", trait_ref); - - // If the *trait* is local to the crate, ok. - if trait_ref.def_id.is_local() { - debug!("trait {:?} is local to current crate", - trait_ref.def_id); - return Ok(()); - } - - orphan_check_trait_ref(tcx, &trait_ref, InferIsLocal(false)) -} - -fn orphan_check_trait_ref<'tcx>(tcx: &ty::ctxt<'tcx>, - trait_ref: &ty::TraitRef<'tcx>, - infer_is_local: InferIsLocal) - -> Result<(), OrphanCheckErr<'tcx>> -{ - debug!("orphan_check_trait_ref(trait_ref={:?}, infer_is_local={})", - trait_ref, infer_is_local.0); - - // First, create an ordered iterator over all the type parameters to the trait, with the self - // type appearing first. - let input_tys = Some(trait_ref.self_ty()); - let input_tys = input_tys.iter().chain(trait_ref.substs.types.get_slice(TypeSpace)); - - // Find the first input type that either references a type parameter OR - // some local type. - for input_ty in input_tys { - if ty_is_local(tcx, input_ty, infer_is_local) { - debug!("orphan_check_trait_ref: ty_is_local `{:?}`", input_ty); - - // First local input type. Check that there are no - // uncovered type parameters. - let uncovered_tys = uncovered_tys(tcx, input_ty, infer_is_local); - for uncovered_ty in uncovered_tys { - if let Some(param) = uncovered_ty.walk().find(|t| is_type_parameter(t)) { - debug!("orphan_check_trait_ref: uncovered type `{:?}`", param); - return Err(OrphanCheckErr::UncoveredTy(param)); - } - } - - // OK, found local type, all prior types upheld invariant. - return Ok(()); - } - - // Otherwise, enforce invariant that there are no type - // parameters reachable. - if !infer_is_local.0 { - if let Some(param) = input_ty.walk().find(|t| is_type_parameter(t)) { - debug!("orphan_check_trait_ref: uncovered type `{:?}`", param); - return Err(OrphanCheckErr::UncoveredTy(param)); - } - } - } - - // If we exit above loop, never found a local type. - debug!("orphan_check_trait_ref: no local type"); - return Err(OrphanCheckErr::NoLocalInputType); -} - -fn uncovered_tys<'tcx>(tcx: &ty::ctxt<'tcx>, - ty: Ty<'tcx>, - infer_is_local: InferIsLocal) - -> Vec> -{ - if ty_is_local_constructor(tcx, ty, infer_is_local) { - vec![] - } else if fundamental_ty(tcx, ty) { - ty.walk_shallow() - .flat_map(|t| uncovered_tys(tcx, t, infer_is_local)) - .collect() - } else { - vec![ty] - } -} - -fn is_type_parameter<'tcx>(ty: Ty<'tcx>) -> bool { - match ty.sty { - // FIXME(#20590) straighten story about projection types - ty::TyProjection(..) | ty::TyParam(..) => true, - _ => false, - } -} - -fn ty_is_local<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, infer_is_local: InferIsLocal) -> bool -{ - ty_is_local_constructor(tcx, ty, infer_is_local) || - fundamental_ty(tcx, ty) && ty.walk_shallow().any(|t| ty_is_local(tcx, t, infer_is_local)) -} - -fn fundamental_ty<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool -{ - match ty.sty { - ty::TyBox(..) | ty::TyRef(..) => - true, - ty::TyEnum(def, _) | ty::TyStruct(def, _) => - def.is_fundamental(), - ty::TyTrait(ref data) => - tcx.has_attr(data.principal_def_id(), "fundamental"), - _ => - false - } -} - -fn ty_is_local_constructor<'tcx>(tcx: &ty::ctxt<'tcx>, - ty: Ty<'tcx>, - infer_is_local: InferIsLocal) - -> bool -{ - debug!("ty_is_local_constructor({:?})", ty); - - match ty.sty { - ty::TyBool | - ty::TyChar | - ty::TyInt(..) | - ty::TyUint(..) | - ty::TyFloat(..) | - ty::TyStr | - ty::TyBareFn(..) | - ty::TyArray(..) | - ty::TySlice(..) | - ty::TyRawPtr(..) | - ty::TyRef(..) | - ty::TyTuple(..) | - ty::TyParam(..) | - ty::TyProjection(..) => { - false - } - - ty::TyInfer(..) => { - infer_is_local.0 - } - - ty::TyEnum(def, _) | - ty::TyStruct(def, _) => { - def.did.is_local() - } - - ty::TyBox(_) => { // Box - let krate = tcx.lang_items.owned_box().map(|d| d.krate); - krate == Some(LOCAL_CRATE) - } - - ty::TyTrait(ref tt) => { - tt.principal_def_id().is_local() - } - - ty::TyError => { - true - } - - ty::TyClosure(..) => { - tcx.sess.bug( - &format!("ty_is_local invoked on unexpected type: {:?}", - ty)) - } - } -} diff --git a/src/librustc/middle/traits/error_reporting.rs b/src/librustc/middle/traits/error_reporting.rs deleted file mode 100644 index d09bbc37fe468..0000000000000 --- a/src/librustc/middle/traits/error_reporting.rs +++ /dev/null @@ -1,807 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::{ - FulfillmentError, - FulfillmentErrorCode, - MismatchedProjectionTypes, - Obligation, - ObligationCauseCode, - OutputTypeParameterMismatch, - TraitNotObjectSafe, - PredicateObligation, - SelectionError, - ObjectSafetyViolation, - MethodViolationCode, - object_safety_violations, -}; - -use fmt_macros::{Parser, Piece, Position}; -use middle::def_id::DefId; -use middle::infer::InferCtxt; -use middle::ty::{self, ToPredicate, ToPolyTraitRef, TraitRef, Ty, TypeFoldable}; -use middle::ty::fast_reject; -use util::nodemap::{FnvHashMap, FnvHashSet}; - -use std::cmp; -use std::fmt; -use syntax::attr::{AttributeMethods, AttrMetaMethods}; -use syntax::codemap::Span; -use syntax::errors::DiagnosticBuilder; - -#[derive(Debug, PartialEq, Eq, Hash)] -pub struct TraitErrorKey<'tcx> { - span: Span, - predicate: ty::Predicate<'tcx> -} - -impl<'tcx> TraitErrorKey<'tcx> { - fn from_error<'a>(infcx: &InferCtxt<'a, 'tcx>, - e: &FulfillmentError<'tcx>) -> Self { - let predicate = - infcx.resolve_type_vars_if_possible(&e.obligation.predicate); - TraitErrorKey { - span: e.obligation.cause.span, - predicate: infcx.tcx.erase_regions(&predicate) - } - } -} - -pub fn report_fulfillment_errors<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>, - errors: &Vec>) { - for error in errors { - report_fulfillment_error(infcx, error); - } -} - -fn report_fulfillment_error<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>, - error: &FulfillmentError<'tcx>) { - let error_key = TraitErrorKey::from_error(infcx, error); - debug!("report_fulfillment_errors({:?}) - key={:?}", - error, error_key); - if !infcx.reported_trait_errors.borrow_mut().insert(error_key) { - debug!("report_fulfillment_errors: skipping duplicate"); - return; - } - match error.code { - FulfillmentErrorCode::CodeSelectionError(ref e) => { - report_selection_error(infcx, &error.obligation, e); - } - FulfillmentErrorCode::CodeProjectionError(ref e) => { - report_projection_error(infcx, &error.obligation, e); - } - FulfillmentErrorCode::CodeAmbiguity => { - maybe_report_ambiguity(infcx, &error.obligation); - } - } -} - -pub fn report_projection_error<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>, - obligation: &PredicateObligation<'tcx>, - error: &MismatchedProjectionTypes<'tcx>) -{ - let predicate = - infcx.resolve_type_vars_if_possible(&obligation.predicate); - - // The TyError created by normalize_to_error can end up being unified - // into all obligations: for example, if our obligation is something - // like `$X = <() as Foo<$X>>::Out` and () does not implement Foo<_>, - // then $X will be unified with TyError, but the error still needs to be - // reported. - if !infcx.tcx.sess.has_errors() || !predicate.references_error() { - let mut err = struct_span_err!(infcx.tcx.sess, obligation.cause.span, E0271, - "type mismatch resolving `{}`: {}", - predicate, - error.err); - note_obligation_cause(infcx, &mut err, obligation); - err.emit(); - } -} - -fn report_on_unimplemented<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>, - trait_ref: &TraitRef<'tcx>, - span: Span) -> Option { - let def_id = trait_ref.def_id; - let mut report = None; - for item in infcx.tcx.get_attrs(def_id).iter() { - if item.check_name("rustc_on_unimplemented") { - let err_sp = item.meta().span.substitute_dummy(span); - let def = infcx.tcx.lookup_trait_def(def_id); - let trait_str = def.trait_ref.to_string(); - if let Some(ref istring) = item.value_str() { - let mut generic_map = def.generics.types.iter_enumerated() - .map(|(param, i, gen)| { - (gen.name.as_str().to_string(), - trait_ref.substs.types.get(param, i) - .to_string()) - }).collect::>(); - generic_map.insert("Self".to_string(), - trait_ref.self_ty().to_string()); - let parser = Parser::new(&istring); - let mut errored = false; - let err: String = parser.filter_map(|p| { - match p { - Piece::String(s) => Some(s), - Piece::NextArgument(a) => match a.position { - Position::ArgumentNamed(s) => match generic_map.get(s) { - Some(val) => Some(val), - None => { - span_err!(infcx.tcx.sess, err_sp, E0272, - "the #[rustc_on_unimplemented] \ - attribute on \ - trait definition for {} refers to \ - non-existent type parameter {}", - trait_str, s); - errored = true; - None - } - }, - _ => { - span_err!(infcx.tcx.sess, err_sp, E0273, - "the #[rustc_on_unimplemented] \ - attribute on \ - trait definition for {} must have named \ - format arguments, \ - eg `#[rustc_on_unimplemented = \ - \"foo {{T}}\"]`", - trait_str); - errored = true; - None - } - } - } - }).collect(); - // Report only if the format string checks out - if !errored { - report = Some(err); - } - } else { - span_err!(infcx.tcx.sess, err_sp, E0274, - "the #[rustc_on_unimplemented] attribute on \ - trait definition for {} must have a value, \ - eg `#[rustc_on_unimplemented = \"foo\"]`", - trait_str); - } - break; - } - } - report -} - -/// Reports that an overflow has occurred and halts compilation. We -/// halt compilation unconditionally because it is important that -/// overflows never be masked -- they basically represent computations -/// whose result could not be truly determined and thus we can't say -/// if the program type checks or not -- and they are unusual -/// occurrences in any case. -pub fn report_overflow_error<'a, 'tcx, T>(infcx: &InferCtxt<'a, 'tcx>, - obligation: &Obligation<'tcx, T>, - suggest_increasing_limit: bool) - -> ! - where T: fmt::Display + TypeFoldable<'tcx> -{ - let predicate = - infcx.resolve_type_vars_if_possible(&obligation.predicate); - let mut err = struct_span_err!(infcx.tcx.sess, obligation.cause.span, E0275, - "overflow evaluating the requirement `{}`", - predicate); - - if suggest_increasing_limit { - suggest_new_overflow_limit(infcx.tcx, &mut err, obligation.cause.span); - } - - note_obligation_cause(infcx, &mut err, obligation); - - err.emit(); - infcx.tcx.sess.abort_if_errors(); - unreachable!(); -} - -/// Reports that a cycle was detected which led to overflow and halts -/// compilation. This is equivalent to `report_overflow_error` except -/// that we can give a more helpful error message (and, in particular, -/// we do not suggest increasing the overflow limit, which is not -/// going to help). -pub fn report_overflow_error_cycle<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>, - cycle: &Vec>) - -> ! -{ - assert!(cycle.len() > 1); - - debug!("report_overflow_error_cycle(cycle length = {})", cycle.len()); - - let cycle = infcx.resolve_type_vars_if_possible(cycle); - - debug!("report_overflow_error_cycle: cycle={:?}", cycle); - - assert_eq!(&cycle[0].predicate, &cycle.last().unwrap().predicate); - - try_report_overflow_error_type_of_infinite_size(infcx, &cycle); - report_overflow_error(infcx, &cycle[0], false); -} - -/// If a cycle results from evaluated whether something is Sized, that -/// is a particular special case that always results from a struct or -/// enum definition that lacks indirection (e.g., `struct Foo { x: Foo -/// }`). We wish to report a targeted error for this case. -pub fn try_report_overflow_error_type_of_infinite_size<'a, 'tcx>( - infcx: &InferCtxt<'a, 'tcx>, - cycle: &[PredicateObligation<'tcx>]) -{ - let sized_trait = match infcx.tcx.lang_items.sized_trait() { - Some(v) => v, - None => return, - }; - let top_is_sized = { - match cycle[0].predicate { - ty::Predicate::Trait(ref data) => data.def_id() == sized_trait, - _ => false, - } - }; - if !top_is_sized { - return; - } - - // The only way to have a type of infinite size is to have, - // somewhere, a struct/enum type involved. Identify all such types - // and report the cycle to the user. - - let struct_enum_tys: Vec<_> = - cycle.iter() - .flat_map(|obligation| match obligation.predicate { - ty::Predicate::Trait(ref data) => { - assert_eq!(data.def_id(), sized_trait); - let self_ty = data.skip_binder().trait_ref.self_ty(); // (*) - // (*) ok to skip binder because this is just - // error reporting and regions don't really - // matter - match self_ty.sty { - ty::TyEnum(..) | ty::TyStruct(..) => Some(self_ty), - _ => None, - } - } - _ => { - infcx.tcx.sess.span_bug(obligation.cause.span, - &format!("Sized cycle involving non-trait-ref: {:?}", - obligation.predicate)); - } - }) - .collect(); - - assert!(!struct_enum_tys.is_empty()); - - // This is a bit tricky. We want to pick a "main type" in the - // listing that is local to the current crate, so we can give a - // good span to the user. But it might not be the first one in our - // cycle list. So find the first one that is local and then - // rotate. - let (main_index, main_def_id) = - struct_enum_tys.iter() - .enumerate() - .filter_map(|(index, ty)| match ty.sty { - ty::TyEnum(adt_def, _) | ty::TyStruct(adt_def, _) - if adt_def.did.is_local() => - Some((index, adt_def.did)), - _ => - None, - }) - .next() - .unwrap(); // should always be SOME local type involved! - - // Rotate so that the "main" type is at index 0. - let struct_enum_tys: Vec<_> = - struct_enum_tys.iter() - .cloned() - .skip(main_index) - .chain(struct_enum_tys.iter().cloned().take(main_index)) - .collect(); - - let tcx = infcx.tcx; - let mut err = recursive_type_with_infinite_size_error(tcx, main_def_id); - let len = struct_enum_tys.len(); - if len > 2 { - let span = tcx.map.span_if_local(main_def_id).unwrap(); - err.fileline_note(span, - &format!("type `{}` is embedded within `{}`...", - struct_enum_tys[0], - struct_enum_tys[1])); - for &next_ty in &struct_enum_tys[1..len-1] { - err.fileline_note(span, - &format!("...which in turn is embedded within `{}`...", next_ty)); - } - err.fileline_note(span, - &format!("...which in turn is embedded within `{}`, \ - completing the cycle.", - struct_enum_tys[len-1])); - } - err.emit(); - infcx.tcx.sess.abort_if_errors(); - unreachable!(); -} - -pub fn recursive_type_with_infinite_size_error<'tcx>(tcx: &ty::ctxt<'tcx>, - type_def_id: DefId) - -> DiagnosticBuilder<'tcx> -{ - assert!(type_def_id.is_local()); - let span = tcx.map.span_if_local(type_def_id).unwrap(); - let mut err = struct_span_err!(tcx.sess, span, E0072, "recursive type `{}` has infinite size", - tcx.item_path_str(type_def_id)); - err.fileline_help(span, &format!("insert indirection (e.g., a `Box`, `Rc`, or `&`) \ - at some point to make `{}` representable", - tcx.item_path_str(type_def_id))); - err -} - -pub fn report_selection_error<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>, - obligation: &PredicateObligation<'tcx>, - error: &SelectionError<'tcx>) -{ - match *error { - SelectionError::Unimplemented => { - if let ObligationCauseCode::CompareImplMethodObligation = obligation.cause.code { - span_err!( - infcx.tcx.sess, obligation.cause.span, E0276, - "the requirement `{}` appears on the impl \ - method but not on the corresponding trait method", - obligation.predicate); - } else { - match obligation.predicate { - ty::Predicate::Trait(ref trait_predicate) => { - let trait_predicate = - infcx.resolve_type_vars_if_possible(trait_predicate); - - if !infcx.tcx.sess.has_errors() || !trait_predicate.references_error() { - let trait_ref = trait_predicate.to_poly_trait_ref(); - let mut err = struct_span_err!( - infcx.tcx.sess, obligation.cause.span, E0277, - "the trait `{}` is not implemented for the type `{}`", - trait_ref, trait_ref.self_ty()); - - // Check if it has a custom "#[rustc_on_unimplemented]" - // error message, report with that message if it does - let custom_note = report_on_unimplemented(infcx, &trait_ref.0, - obligation.cause.span); - if let Some(s) = custom_note { - err.fileline_note(obligation.cause.span, &s); - } else { - let simp = fast_reject::simplify_type(infcx.tcx, - trait_ref.self_ty(), - true); - let mut impl_candidates = Vec::new(); - let trait_def = infcx.tcx.lookup_trait_def(trait_ref.def_id()); - - match simp { - Some(simp) => trait_def.for_each_impl(infcx.tcx, |def_id| { - let imp = infcx.tcx.impl_trait_ref(def_id).unwrap(); - let imp_simp = fast_reject::simplify_type(infcx.tcx, - imp.self_ty(), - true); - if let Some(imp_simp) = imp_simp { - if simp != imp_simp { - return; - } - } - impl_candidates.push(imp); - }), - None => trait_def.for_each_impl(infcx.tcx, |def_id| { - impl_candidates.push( - infcx.tcx.impl_trait_ref(def_id).unwrap()); - }) - }; - - if impl_candidates.len() > 0 { - err.fileline_help( - obligation.cause.span, - &format!("the following implementations were found:")); - - let end = cmp::min(4, impl_candidates.len()); - for candidate in &impl_candidates[0..end] { - err.fileline_help(obligation.cause.span, - &format!(" {:?}", candidate)); - } - if impl_candidates.len() > 4 { - err.fileline_help(obligation.cause.span, - &format!("and {} others", - impl_candidates.len()-4)); - } - } - } - note_obligation_cause(infcx, &mut err, obligation); - err.emit(); - } - }, - ty::Predicate::Equate(ref predicate) => { - let predicate = infcx.resolve_type_vars_if_possible(predicate); - let err = infcx.equality_predicate(obligation.cause.span, - &predicate).err().unwrap(); - let mut err = struct_span_err!( - infcx.tcx.sess, obligation.cause.span, E0278, - "the requirement `{}` is not satisfied (`{}`)", - predicate, - err); - note_obligation_cause(infcx, &mut err, obligation); - err.emit(); - } - - ty::Predicate::RegionOutlives(ref predicate) => { - let predicate = infcx.resolve_type_vars_if_possible(predicate); - let err = infcx.region_outlives_predicate(obligation.cause.span, - &predicate).err().unwrap(); - let mut err = struct_span_err!( - infcx.tcx.sess, obligation.cause.span, E0279, - "the requirement `{}` is not satisfied (`{}`)", - predicate, - err); - note_obligation_cause(infcx, &mut err, obligation); - err.emit(); - } - - ty::Predicate::Projection(..) | ty::Predicate::TypeOutlives(..) => { - let predicate = - infcx.resolve_type_vars_if_possible(&obligation.predicate); - let mut err = struct_span_err!( - infcx.tcx.sess, obligation.cause.span, E0280, - "the requirement `{}` is not satisfied", - predicate); - note_obligation_cause(infcx, &mut err, obligation); - err.emit(); - } - - ty::Predicate::ObjectSafe(trait_def_id) => { - let violations = object_safety_violations( - infcx.tcx, trait_def_id); - let mut err = report_object_safety_error(infcx.tcx, - obligation.cause.span, - trait_def_id, - violations); - note_obligation_cause(infcx, &mut err, obligation); - err.emit(); - } - - ty::Predicate::WellFormed(ty) => { - // WF predicates cannot themselves make - // errors. They can only block due to - // ambiguity; otherwise, they always - // degenerate into other obligations - // (which may fail). - infcx.tcx.sess.span_bug( - obligation.cause.span, - &format!("WF predicate not satisfied for {:?}", ty)); - } - } - } - } - - OutputTypeParameterMismatch(ref expected_trait_ref, ref actual_trait_ref, ref e) => { - let expected_trait_ref = infcx.resolve_type_vars_if_possible(&*expected_trait_ref); - let actual_trait_ref = infcx.resolve_type_vars_if_possible(&*actual_trait_ref); - if !actual_trait_ref.self_ty().references_error() { - let mut err = struct_span_err!( - infcx.tcx.sess, obligation.cause.span, E0281, - "type mismatch: the type `{}` implements the trait `{}`, \ - but the trait `{}` is required ({})", - expected_trait_ref.self_ty(), - expected_trait_ref, - actual_trait_ref, - e); - note_obligation_cause(infcx, &mut err, obligation); - err.emit(); - } - } - - TraitNotObjectSafe(did) => { - let violations = object_safety_violations(infcx.tcx, did); - let mut err = report_object_safety_error(infcx.tcx, obligation.cause.span, did, - violations); - note_obligation_cause(infcx, &mut err, obligation); - err.emit(); - } - } -} - -pub fn report_object_safety_error<'tcx>(tcx: &ty::ctxt<'tcx>, - span: Span, - trait_def_id: DefId, - violations: Vec) - -> DiagnosticBuilder<'tcx> -{ - let mut err = struct_span_err!( - tcx.sess, span, E0038, - "the trait `{}` cannot be made into an object", - tcx.item_path_str(trait_def_id)); - - let mut reported_violations = FnvHashSet(); - for violation in violations { - if !reported_violations.insert(violation.clone()) { - continue; - } - match violation { - ObjectSafetyViolation::SizedSelf => { - err.fileline_note( - span, - "the trait cannot require that `Self : Sized`"); - } - - ObjectSafetyViolation::SupertraitSelf => { - err.fileline_note( - span, - "the trait cannot use `Self` as a type parameter \ - in the supertrait listing"); - } - - ObjectSafetyViolation::Method(method, - MethodViolationCode::StaticMethod) => { - err.fileline_note( - span, - &format!("method `{}` has no receiver", - method.name)); - } - - ObjectSafetyViolation::Method(method, - MethodViolationCode::ReferencesSelf) => { - err.fileline_note( - span, - &format!("method `{}` references the `Self` type \ - in its arguments or return type", - method.name)); - } - - ObjectSafetyViolation::Method(method, - MethodViolationCode::Generic) => { - err.fileline_note( - span, - &format!("method `{}` has generic type parameters", - method.name)); - } - } - } - err -} - -pub fn maybe_report_ambiguity<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>, - obligation: &PredicateObligation<'tcx>) { - // Unable to successfully determine, probably means - // insufficient type information, but could mean - // ambiguous impls. The latter *ought* to be a - // coherence violation, so we don't report it here. - - let predicate = infcx.resolve_type_vars_if_possible(&obligation.predicate); - - debug!("maybe_report_ambiguity(predicate={:?}, obligation={:?})", - predicate, - obligation); - - match predicate { - ty::Predicate::Trait(ref data) => { - let trait_ref = data.to_poly_trait_ref(); - let self_ty = trait_ref.self_ty(); - let all_types = &trait_ref.substs().types; - if all_types.references_error() { - } else { - // Typically, this ambiguity should only happen if - // there are unresolved type inference variables - // (otherwise it would suggest a coherence - // failure). But given #21974 that is not necessarily - // the case -- we can have multiple where clauses that - // are only distinguished by a region, which results - // in an ambiguity even when all types are fully - // known, since we don't dispatch based on region - // relationships. - - // This is kind of a hack: it frequently happens that some earlier - // error prevents types from being fully inferred, and then we get - // a bunch of uninteresting errors saying something like " doesn't implement Sized". It may even be true that we - // could just skip over all checks where the self-ty is an - // inference variable, but I was afraid that there might be an - // inference variable created, registered as an obligation, and - // then never forced by writeback, and hence by skipping here we'd - // be ignoring the fact that we don't KNOW the type works - // out. Though even that would probably be harmless, given that - // we're only talking about builtin traits, which are known to be - // inhabited. But in any case I just threw in this check for - // has_errors() to be sure that compilation isn't happening - // anyway. In that case, why inundate the user. - if !infcx.tcx.sess.has_errors() { - if - infcx.tcx.lang_items.sized_trait() - .map_or(false, |sized_id| sized_id == trait_ref.def_id()) - { - need_type_info(infcx, obligation.cause.span, self_ty); - } else { - let mut err = struct_span_err!(infcx.tcx.sess, obligation.cause.span, E0283, - "type annotations required: \ - cannot resolve `{}`", - predicate); - note_obligation_cause(infcx, &mut err, obligation); - err.emit(); - } - } - } - } - - ty::Predicate::WellFormed(ty) => { - // Same hacky approach as above to avoid deluging user - // with error messages. - if !ty.references_error() && !infcx.tcx.sess.has_errors() { - need_type_info(infcx, obligation.cause.span, ty); - } - } - - _ => { - if !infcx.tcx.sess.has_errors() { - let mut err = struct_span_err!(infcx.tcx.sess, obligation.cause.span, E0284, - "type annotations required: cannot resolve `{}`", - predicate); - note_obligation_cause(infcx, &mut err, obligation); - err.emit(); - } - } - } -} - -fn need_type_info<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>, - span: Span, - ty: Ty<'tcx>) -{ - span_err!(infcx.tcx.sess, span, E0282, - "unable to infer enough type information about `{}`; \ - type annotations or generic parameter binding required", - ty); -} - -fn note_obligation_cause<'a, 'tcx, T>(infcx: &InferCtxt<'a, 'tcx>, - err: &mut DiagnosticBuilder, - obligation: &Obligation<'tcx, T>) - where T: fmt::Display -{ - note_obligation_cause_code(infcx, - err, - &obligation.predicate, - obligation.cause.span, - &obligation.cause.code); -} - -fn note_obligation_cause_code<'a, 'tcx, T>(infcx: &InferCtxt<'a, 'tcx>, - err: &mut DiagnosticBuilder, - predicate: &T, - cause_span: Span, - cause_code: &ObligationCauseCode<'tcx>) - where T: fmt::Display -{ - let tcx = infcx.tcx; - match *cause_code { - ObligationCauseCode::MiscObligation => { } - ObligationCauseCode::SliceOrArrayElem => { - err.fileline_note( - cause_span, - "slice and array elements must have `Sized` type"); - } - ObligationCauseCode::ProjectionWf(data) => { - err.fileline_note( - cause_span, - &format!("required so that the projection `{}` is well-formed", - data)); - } - ObligationCauseCode::ReferenceOutlivesReferent(ref_ty) => { - err.fileline_note( - cause_span, - &format!("required so that reference `{}` does not outlive its referent", - ref_ty)); - } - ObligationCauseCode::ItemObligation(item_def_id) => { - let item_name = tcx.item_path_str(item_def_id); - err.fileline_note( - cause_span, - &format!("required by `{}`", item_name)); - } - ObligationCauseCode::ObjectCastObligation(object_ty) => { - err.fileline_note( - cause_span, - &format!( - "required for the cast to the object type `{}`", - infcx.ty_to_string(object_ty))); - } - ObligationCauseCode::RepeatVec => { - err.fileline_note( - cause_span, - "the `Copy` trait is required because the \ - repeated element will be copied"); - } - ObligationCauseCode::VariableType(_) => { - err.fileline_note( - cause_span, - "all local variables must have a statically known size"); - } - ObligationCauseCode::ReturnType => { - err.fileline_note( - cause_span, - "the return type of a function must have a \ - statically known size"); - } - ObligationCauseCode::AssignmentLhsSized => { - err.fileline_note( - cause_span, - "the left-hand-side of an assignment must have a statically known size"); - } - ObligationCauseCode::StructInitializerSized => { - err.fileline_note( - cause_span, - "structs must have a statically known size to be initialized"); - } - ObligationCauseCode::ClosureCapture(var_id, _, builtin_bound) => { - let def_id = tcx.lang_items.from_builtin_kind(builtin_bound).unwrap(); - let trait_name = tcx.item_path_str(def_id); - let name = tcx.local_var_name_str(var_id); - err.fileline_note( - cause_span, - &format!("the closure that captures `{}` requires that all captured variables \ - implement the trait `{}`", - name, - trait_name)); - } - ObligationCauseCode::FieldSized => { - err.fileline_note( - cause_span, - "only the last field of a struct or enum variant \ - may have a dynamically sized type"); - } - ObligationCauseCode::SharedStatic => { - err.fileline_note( - cause_span, - "shared static variables must have a type that implements `Sync`"); - } - ObligationCauseCode::BuiltinDerivedObligation(ref data) => { - let parent_trait_ref = infcx.resolve_type_vars_if_possible(&data.parent_trait_ref); - err.fileline_note( - cause_span, - &format!("required because it appears within the type `{}`", - parent_trait_ref.0.self_ty())); - let parent_predicate = parent_trait_ref.to_predicate(); - note_obligation_cause_code(infcx, - err, - &parent_predicate, - cause_span, - &*data.parent_code); - } - ObligationCauseCode::ImplDerivedObligation(ref data) => { - let parent_trait_ref = infcx.resolve_type_vars_if_possible(&data.parent_trait_ref); - err.fileline_note( - cause_span, - &format!("required because of the requirements on the impl of `{}` for `{}`", - parent_trait_ref, - parent_trait_ref.0.self_ty())); - let parent_predicate = parent_trait_ref.to_predicate(); - note_obligation_cause_code(infcx, - err, - &parent_predicate, - cause_span, - &*data.parent_code); - } - ObligationCauseCode::CompareImplMethodObligation => { - err.fileline_note( - cause_span, - &format!("the requirement `{}` appears on the impl method \ - but not on the corresponding trait method", - predicate)); - } - } -} - -fn suggest_new_overflow_limit(tcx: &ty::ctxt, err:&mut DiagnosticBuilder, span: Span) { - let current_limit = tcx.sess.recursion_limit.get(); - let suggested_limit = current_limit * 2; - err.fileline_note( - span, - &format!( - "consider adding a `#![recursion_limit=\"{}\"]` attribute to your crate", - suggested_limit)); -} diff --git a/src/librustc/middle/traits/fulfill.rs b/src/librustc/middle/traits/fulfill.rs deleted file mode 100644 index bdf1c4645c0ed..0000000000000 --- a/src/librustc/middle/traits/fulfill.rs +++ /dev/null @@ -1,636 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use middle::infer::InferCtxt; -use middle::ty::{self, Ty, TypeFoldable}; -use rustc_data_structures::obligation_forest::{Backtrace, ObligationForest, Error}; -use std::iter; -use syntax::ast; -use util::common::ErrorReported; -use util::nodemap::{FnvHashMap, FnvHashSet, NodeMap}; - -use super::CodeAmbiguity; -use super::CodeProjectionError; -use super::CodeSelectionError; -use super::is_object_safe; -use super::FulfillmentError; -use super::FulfillmentErrorCode; -use super::ObligationCause; -use super::PredicateObligation; -use super::project; -use super::report_overflow_error_cycle; -use super::select::SelectionContext; -use super::Unimplemented; -use super::util::predicate_for_builtin_bound; - -pub struct FulfilledPredicates<'tcx> { - set: FnvHashSet> -} - -/// The fulfillment context is used to drive trait resolution. It -/// consists of a list of obligations that must be (eventually) -/// satisfied. The job is to track which are satisfied, which yielded -/// errors, and which are still pending. At any point, users can call -/// `select_where_possible`, and the fulfilment context will try to do -/// selection, retaining only those obligations that remain -/// ambiguous. This may be helpful in pushing type inference -/// along. Once all type inference constraints have been generated, the -/// method `select_all_or_error` can be used to report any remaining -/// ambiguous cases as errors. -pub struct FulfillmentContext<'tcx> { - // a simple cache that aims to cache *exact duplicate obligations* - // and avoid adding them twice. This serves a different purpose - // than the `SelectionCache`: it avoids duplicate errors and - // permits recursive obligations, which are often generated from - // traits like `Send` et al. - // - // Note that because of type inference, a predicate can still - // occur twice in the predicates list, for example when 2 - // initially-distinct type variables are unified after being - // inserted. Deduplicating the predicate set on selection had a - // significant performance cost the last time I checked. - duplicate_set: FulfilledPredicates<'tcx>, - - // A list of all obligations that have been registered with this - // fulfillment context. - predicates: ObligationForest>, - - // A set of constraints that regionck must validate. Each - // constraint has the form `T:'a`, meaning "some type `T` must - // outlive the lifetime 'a". These constraints derive from - // instantiated type parameters. So if you had a struct defined - // like - // - // struct Foo { ... } - // - // then in some expression `let x = Foo { ... }` it will - // instantiate the type parameter `T` with a fresh type `$0`. At - // the same time, it will record a region obligation of - // `$0:'static`. This will get checked later by regionck. (We - // can't generally check these things right away because we have - // to wait until types are resolved.) - // - // These are stored in a map keyed to the id of the innermost - // enclosing fn body / static initializer expression. This is - // because the location where the obligation was incurred can be - // relevant with respect to which sublifetime assumptions are in - // place. The reason that we store under the fn-id, and not - // something more fine-grained, is so that it is easier for - // regionck to be sure that it has found *all* the region - // obligations (otherwise, it's easy to fail to walk to a - // particular node-id). - region_obligations: NodeMap>>, -} - -#[derive(Clone)] -pub struct RegionObligation<'tcx> { - pub sub_region: ty::Region, - pub sup_type: Ty<'tcx>, - pub cause: ObligationCause<'tcx>, -} - -#[derive(Clone, Debug)] -pub struct PendingPredicateObligation<'tcx> { - pub obligation: PredicateObligation<'tcx>, - pub stalled_on: Vec>, -} - -impl<'tcx> FulfillmentContext<'tcx> { - /// Creates a new fulfillment context. - pub fn new() -> FulfillmentContext<'tcx> { - FulfillmentContext { - duplicate_set: FulfilledPredicates::new(), - predicates: ObligationForest::new(), - region_obligations: NodeMap(), - } - } - - /// "Normalize" a projection type `::X` by - /// creating a fresh type variable `$0` as well as a projection - /// predicate `::X == $0`. When the - /// inference engine runs, it will attempt to find an impl of - /// `SomeTrait` or a where clause that lets us unify `$0` with - /// something concrete. If this fails, we'll unify `$0` with - /// `projection_ty` again. - pub fn normalize_projection_type<'a>(&mut self, - infcx: &InferCtxt<'a,'tcx>, - projection_ty: ty::ProjectionTy<'tcx>, - cause: ObligationCause<'tcx>) - -> Ty<'tcx> - { - debug!("normalize_associated_type(projection_ty={:?})", - projection_ty); - - assert!(!projection_ty.has_escaping_regions()); - - // FIXME(#20304) -- cache - - let mut selcx = SelectionContext::new(infcx); - let normalized = project::normalize_projection_type(&mut selcx, projection_ty, cause, 0); - - for obligation in normalized.obligations { - self.register_predicate_obligation(infcx, obligation); - } - - debug!("normalize_associated_type: result={:?}", normalized.value); - - normalized.value - } - - pub fn register_builtin_bound<'a>(&mut self, - infcx: &InferCtxt<'a,'tcx>, - ty: Ty<'tcx>, - builtin_bound: ty::BuiltinBound, - cause: ObligationCause<'tcx>) - { - match predicate_for_builtin_bound(infcx.tcx, cause, builtin_bound, 0, ty) { - Ok(predicate) => { - self.register_predicate_obligation(infcx, predicate); - } - Err(ErrorReported) => { } - } - } - - pub fn register_region_obligation<'a>(&mut self, - t_a: Ty<'tcx>, - r_b: ty::Region, - cause: ObligationCause<'tcx>) - { - register_region_obligation(t_a, r_b, cause, &mut self.region_obligations); - } - - pub fn register_predicate_obligation<'a>(&mut self, - infcx: &InferCtxt<'a,'tcx>, - obligation: PredicateObligation<'tcx>) - { - // this helps to reduce duplicate errors, as well as making - // debug output much nicer to read and so on. - let obligation = infcx.resolve_type_vars_if_possible(&obligation); - - assert!(!obligation.has_escaping_regions()); - - if self.is_duplicate_or_add(infcx.tcx, &obligation.predicate) { - debug!("register_predicate({:?}) -- already seen, skip", obligation); - return; - } - - debug!("register_predicate({:?})", obligation); - let obligation = PendingPredicateObligation { - obligation: obligation, - stalled_on: vec![] - }; - self.predicates.push_root(obligation); - } - - pub fn region_obligations(&self, - body_id: ast::NodeId) - -> &[RegionObligation<'tcx>] - { - match self.region_obligations.get(&body_id) { - None => Default::default(), - Some(vec) => vec, - } - } - - pub fn select_all_or_error<'a>(&mut self, - infcx: &InferCtxt<'a,'tcx>) - -> Result<(),Vec>> - { - try!(self.select_where_possible(infcx)); - let errors: Vec<_> = - self.predicates.to_errors(CodeAmbiguity) - .into_iter() - .map(|e| to_fulfillment_error(e)) - .collect(); - if errors.is_empty() { - Ok(()) - } else { - Err(errors) - } - } - - pub fn select_where_possible<'a>(&mut self, - infcx: &InferCtxt<'a,'tcx>) - -> Result<(),Vec>> - { - let mut selcx = SelectionContext::new(infcx); - self.select(&mut selcx) - } - - pub fn pending_obligations(&self) -> Vec> { - self.predicates.pending_obligations() - } - - fn is_duplicate_or_add(&mut self, - tcx: &ty::ctxt<'tcx>, - predicate: &ty::Predicate<'tcx>) - -> bool { - // For "global" predicates -- that is, predicates that don't - // involve type parameters, inference variables, or regions - // other than 'static -- we can check the cache in the tcx, - // which allows us to leverage work from other threads. Note - // that we don't add anything to this cache yet (unlike the - // local cache). This is because the tcx cache maintains the - // invariant that it only contains things that have been - // proven, and we have not yet proven that `predicate` holds. - if predicate.is_global() && tcx.fulfilled_predicates.borrow().is_duplicate(predicate) { - return true; - } - - // If `predicate` is not global, or not present in the tcx - // cache, we can still check for it in our local cache and add - // it if not present. Note that if we find this predicate in - // the local cache we can stop immediately, without reporting - // any errors, even though we don't know yet if it is - // true. This is because, while we don't yet know if the - // predicate holds, we know that this same fulfillment context - // already is in the process of finding out. - self.duplicate_set.is_duplicate_or_add(predicate) - } - - /// Attempts to select obligations using `selcx`. If `only_new_obligations` is true, then it - /// only attempts to select obligations that haven't been seen before. - fn select<'a>(&mut self, - selcx: &mut SelectionContext<'a, 'tcx>) - -> Result<(),Vec>> - { - debug!("select(obligation-forest-size={})", self.predicates.len()); - - let mut errors = Vec::new(); - - loop { - debug!("select_where_possible: starting another iteration"); - - // Process pending obligations. - let outcome = { - let region_obligations = &mut self.region_obligations; - self.predicates.process_obligations( - |obligation, backtrace| process_predicate(selcx, - obligation, - backtrace, - region_obligations)) - }; - - debug!("select_where_possible: outcome={:?}", outcome); - - // these are obligations that were proven to be true. - for pending_obligation in outcome.completed { - let predicate = &pending_obligation.obligation.predicate; - if predicate.is_global() { - selcx.tcx().fulfilled_predicates.borrow_mut() - .is_duplicate_or_add(predicate); - } - } - - errors.extend( - outcome.errors.into_iter() - .map(|e| to_fulfillment_error(e))); - - // If nothing new was added, no need to keep looping. - if outcome.stalled { - break; - } - } - - debug!("select({} predicates remaining, {} errors) done", - self.predicates.len(), errors.len()); - - if errors.is_empty() { - Ok(()) - } else { - Err(errors) - } - } -} - -/// Like `process_predicate1`, but wrap result into a pending predicate. -fn process_predicate<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>, - pending_obligation: &mut PendingPredicateObligation<'tcx>, - backtrace: Backtrace>, - region_obligations: &mut NodeMap>>) - -> Result>>, - FulfillmentErrorCode<'tcx>> -{ - match process_predicate1(selcx, pending_obligation, backtrace, region_obligations) { - Ok(Some(v)) => { - // FIXME(#30977) the right thing to do here, I think, is to permit - // DAGs. That is, we should detect whenever this predicate - // has appeared somewhere in the current tree./ If it's a - // parent, that's a cycle, and we should either error out - // or consider it ok. But if it's NOT a parent, we can - // ignore it, since it will be proven (or not) separately. - // However, this is a touch tricky, so I'm doing something - // a bit hackier for now so that the `huge-struct.rs` passes. - - let retain_vec: Vec<_> = { - let mut dedup = FnvHashSet(); - v.iter() - .map(|o| { - // Screen out obligations that we know globally - // are true. This should really be the DAG check - // mentioned above. - if - o.predicate.is_global() && - selcx.tcx().fulfilled_predicates.borrow().is_duplicate(&o.predicate) - { - return false; - } - - // If we see two siblings that are exactly the - // same, no need to add them twice. - if !dedup.insert(&o.predicate) { - return false; - } - - true - }) - .collect() - }; - - let pending_predicate_obligations = - v.into_iter() - .zip(retain_vec) - .flat_map(|(o, retain)| { - if retain { - Some(PendingPredicateObligation { - obligation: o, - stalled_on: vec![] - }) - } else { - None - } - }) - .collect(); - - Ok(Some(pending_predicate_obligations)) - } - Ok(None) => Ok(None), - Err(e) => Err(e) - } -} - -/// Processes a predicate obligation and returns either: -/// - `Ok(Some(v))` if the predicate is true, presuming that `v` are also true -/// - `Ok(None)` if we don't have enough info to be sure -/// - `Err` if the predicate does not hold -fn process_predicate1<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>, - pending_obligation: &mut PendingPredicateObligation<'tcx>, - backtrace: Backtrace>, - region_obligations: &mut NodeMap>>) - -> Result>>, - FulfillmentErrorCode<'tcx>> -{ - // if we were stalled on some unresolved variables, first check - // whether any of them have been resolved; if not, don't bother - // doing more work yet - if !pending_obligation.stalled_on.is_empty() { - if pending_obligation.stalled_on.iter().all(|&ty| { - let resolved_ty = selcx.infcx().resolve_type_vars_if_possible(&ty); - resolved_ty == ty // nothing changed here - }) { - debug!("process_predicate: pending obligation {:?} still stalled on {:?}", - selcx.infcx().resolve_type_vars_if_possible(&pending_obligation.obligation), - pending_obligation.stalled_on); - return Ok(None); - } - pending_obligation.stalled_on = vec![]; - } - - let obligation = &pending_obligation.obligation; - - // If we exceed the recursion limit, take a moment to look for a - // cycle so we can give a better error report from here, where we - // have more context. - let recursion_limit = selcx.tcx().sess.recursion_limit.get(); - if obligation.recursion_depth >= recursion_limit { - if let Some(cycle) = scan_for_cycle(obligation, &backtrace) { - report_overflow_error_cycle(selcx.infcx(), &cycle); - } - } - - match obligation.predicate { - ty::Predicate::Trait(ref data) => { - if coinductive_match(selcx, obligation, data, &backtrace) { - return Ok(Some(vec![])); - } - - let trait_obligation = obligation.with(data.clone()); - match selcx.select(&trait_obligation) { - Ok(Some(vtable)) => { - Ok(Some(vtable.nested_obligations())) - } - Ok(None) => { - // This is a bit subtle: for the most part, the - // only reason we can fail to make progress on - // trait selection is because we don't have enough - // information about the types in the trait. One - // exception is that we sometimes haven't decided - // what kind of closure a closure is. *But*, in - // that case, it turns out, the type of the - // closure will also change, because the closure - // also includes references to its upvars as part - // of its type, and those types are resolved at - // the same time. - pending_obligation.stalled_on = - data.skip_binder() // ok b/c this check doesn't care about regions - .input_types() - .iter() - .map(|t| selcx.infcx().resolve_type_vars_if_possible(t)) - .filter(|t| t.has_infer_types()) - .flat_map(|t| t.walk()) - .filter(|t| match t.sty { ty::TyInfer(_) => true, _ => false }) - .collect(); - - debug!("process_predicate: pending obligation {:?} now stalled on {:?}", - selcx.infcx().resolve_type_vars_if_possible(obligation), - pending_obligation.stalled_on); - - Ok(None) - } - Err(selection_err) => { - Err(CodeSelectionError(selection_err)) - } - } - } - - ty::Predicate::Equate(ref binder) => { - match selcx.infcx().equality_predicate(obligation.cause.span, binder) { - Ok(()) => Ok(Some(Vec::new())), - Err(_) => Err(CodeSelectionError(Unimplemented)), - } - } - - ty::Predicate::RegionOutlives(ref binder) => { - match selcx.infcx().region_outlives_predicate(obligation.cause.span, binder) { - Ok(()) => Ok(Some(Vec::new())), - Err(_) => Err(CodeSelectionError(Unimplemented)), - } - } - - ty::Predicate::TypeOutlives(ref binder) => { - // Check if there are higher-ranked regions. - match selcx.tcx().no_late_bound_regions(binder) { - // If there are, inspect the underlying type further. - None => { - // Convert from `Binder>` to `Binder`. - let binder = binder.map_bound_ref(|pred| pred.0); - - // Check if the type has any bound regions. - match selcx.tcx().no_late_bound_regions(&binder) { - // If so, this obligation is an error (for now). Eventually we should be - // able to support additional cases here, like `for<'a> &'a str: 'a`. - None => { - Err(CodeSelectionError(Unimplemented)) - } - // Otherwise, we have something of the form - // `for<'a> T: 'a where 'a not in T`, which we can treat as `T: 'static`. - Some(t_a) => { - register_region_obligation(t_a, ty::ReStatic, - obligation.cause.clone(), - region_obligations); - Ok(Some(vec![])) - } - } - } - // If there aren't, register the obligation. - Some(ty::OutlivesPredicate(t_a, r_b)) => { - register_region_obligation(t_a, r_b, - obligation.cause.clone(), - region_obligations); - Ok(Some(vec![])) - } - } - } - - ty::Predicate::Projection(ref data) => { - let project_obligation = obligation.with(data.clone()); - match project::poly_project_and_unify_type(selcx, &project_obligation) { - Ok(v) => Ok(v), - Err(e) => Err(CodeProjectionError(e)) - } - } - - ty::Predicate::ObjectSafe(trait_def_id) => { - if !is_object_safe(selcx.tcx(), trait_def_id) { - Err(CodeSelectionError(Unimplemented)) - } else { - Ok(Some(Vec::new())) - } - } - - ty::Predicate::WellFormed(ty) => { - Ok(ty::wf::obligations(selcx.infcx(), obligation.cause.body_id, - ty, obligation.cause.span)) - } - } -} - -/// For defaulted traits, we use a co-inductive strategy to solve, so -/// that recursion is ok. This routine returns true if the top of the -/// stack (`top_obligation` and `top_data`): -/// - is a defaulted trait, and -/// - it also appears in the backtrace at some position `X`; and, -/// - all the predicates at positions `X..` between `X` an the top are -/// also defaulted traits. -fn coinductive_match<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>, - top_obligation: &PredicateObligation<'tcx>, - top_data: &ty::PolyTraitPredicate<'tcx>, - backtrace: &Backtrace>) - -> bool -{ - if selcx.tcx().trait_has_default_impl(top_data.def_id()) { - debug!("coinductive_match: top_data={:?}", top_data); - for bt_obligation in backtrace.clone() { - debug!("coinductive_match: bt_obligation={:?}", bt_obligation); - - // *Everything* in the backtrace must be a defaulted trait. - match bt_obligation.obligation.predicate { - ty::Predicate::Trait(ref data) => { - if !selcx.tcx().trait_has_default_impl(data.def_id()) { - debug!("coinductive_match: trait does not have default impl"); - break; - } - } - _ => { break; } - } - - // And we must find a recursive match. - if bt_obligation.obligation.predicate == top_obligation.predicate { - debug!("coinductive_match: found a match in the backtrace"); - return true; - } - } - } - - false -} - -fn scan_for_cycle<'a,'tcx>(top_obligation: &PredicateObligation<'tcx>, - backtrace: &Backtrace>) - -> Option>> -{ - let mut map = FnvHashMap(); - let all_obligations = - || iter::once(top_obligation) - .chain(backtrace.clone() - .map(|p| &p.obligation)); - for (index, bt_obligation) in all_obligations().enumerate() { - if let Some(&start) = map.get(&bt_obligation.predicate) { - // Found a cycle starting at position `start` and running - // until the current position (`index`). - return Some(all_obligations().skip(start).take(index - start + 1).cloned().collect()); - } else { - map.insert(bt_obligation.predicate.clone(), index); - } - } - None -} - -fn register_region_obligation<'tcx>(t_a: Ty<'tcx>, - r_b: ty::Region, - cause: ObligationCause<'tcx>, - region_obligations: &mut NodeMap>>) -{ - let region_obligation = RegionObligation { sup_type: t_a, - sub_region: r_b, - cause: cause }; - - debug!("register_region_obligation({:?}, cause={:?})", - region_obligation, region_obligation.cause); - - region_obligations.entry(region_obligation.cause.body_id) - .or_insert(vec![]) - .push(region_obligation); - -} - -impl<'tcx> FulfilledPredicates<'tcx> { - pub fn new() -> FulfilledPredicates<'tcx> { - FulfilledPredicates { - set: FnvHashSet() - } - } - - pub fn is_duplicate(&self, key: &ty::Predicate<'tcx>) -> bool { - self.set.contains(key) - } - - fn is_duplicate_or_add(&mut self, key: &ty::Predicate<'tcx>) -> bool { - !self.set.insert(key.clone()) - } -} - -fn to_fulfillment_error<'tcx>( - error: Error, FulfillmentErrorCode<'tcx>>) - -> FulfillmentError<'tcx> -{ - let obligation = error.backtrace.into_iter().next().unwrap().obligation; - FulfillmentError::new(obligation, error.error) -} diff --git a/src/librustc/middle/traits/mod.rs b/src/librustc/middle/traits/mod.rs deleted file mode 100644 index 8fecffcea9fe4..0000000000000 --- a/src/librustc/middle/traits/mod.rs +++ /dev/null @@ -1,635 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Trait Resolution. See the Book for more. - -pub use self::SelectionError::*; -pub use self::FulfillmentErrorCode::*; -pub use self::Vtable::*; -pub use self::ObligationCauseCode::*; - -use dep_graph::DepNode; -use middle::def_id::DefId; -use middle::free_region::FreeRegionMap; -use middle::subst; -use middle::ty::{self, Ty, TypeFoldable}; -use middle::ty::fast_reject; -use middle::infer::{self, fixup_err_to_string, InferCtxt}; - -use std::rc::Rc; -use syntax::ast; -use syntax::codemap::{Span, DUMMY_SP}; - -pub use self::error_reporting::TraitErrorKey; -pub use self::error_reporting::recursive_type_with_infinite_size_error; -pub use self::error_reporting::report_fulfillment_errors; -pub use self::error_reporting::report_overflow_error; -pub use self::error_reporting::report_overflow_error_cycle; -pub use self::error_reporting::report_selection_error; -pub use self::error_reporting::report_object_safety_error; -pub use self::coherence::orphan_check; -pub use self::coherence::overlapping_impls; -pub use self::coherence::OrphanCheckErr; -pub use self::fulfill::{FulfillmentContext, FulfilledPredicates, RegionObligation}; -pub use self::project::MismatchedProjectionTypes; -pub use self::project::normalize; -pub use self::project::Normalized; -pub use self::object_safety::is_object_safe; -pub use self::object_safety::astconv_object_safety_violations; -pub use self::object_safety::object_safety_violations; -pub use self::object_safety::ObjectSafetyViolation; -pub use self::object_safety::MethodViolationCode; -pub use self::object_safety::is_vtable_safe_method; -pub use self::select::EvaluationCache; -pub use self::select::SelectionContext; -pub use self::select::SelectionCache; -pub use self::select::{MethodMatchResult, MethodMatched, MethodAmbiguous, MethodDidNotMatch}; -pub use self::select::{MethodMatchedData}; // intentionally don't export variants -pub use self::util::elaborate_predicates; -pub use self::util::get_vtable_index_of_object_method; -pub use self::util::trait_ref_for_builtin_bound; -pub use self::util::predicate_for_trait_def; -pub use self::util::supertraits; -pub use self::util::Supertraits; -pub use self::util::supertrait_def_ids; -pub use self::util::SupertraitDefIds; -pub use self::util::transitive_bounds; -pub use self::util::upcast; - -mod coherence; -mod error_reporting; -mod fulfill; -mod project; -mod object_safety; -mod select; -mod structural_impls; -mod util; - -/// An `Obligation` represents some trait reference (e.g. `int:Eq`) for -/// which the vtable must be found. The process of finding a vtable is -/// called "resolving" the `Obligation`. This process consists of -/// either identifying an `impl` (e.g., `impl Eq for int`) that -/// provides the required vtable, or else finding a bound that is in -/// scope. The eventual result is usually a `Selection` (defined below). -#[derive(Clone, PartialEq, Eq)] -pub struct Obligation<'tcx, T> { - pub cause: ObligationCause<'tcx>, - pub recursion_depth: usize, - pub predicate: T, -} - -pub type PredicateObligation<'tcx> = Obligation<'tcx, ty::Predicate<'tcx>>; -pub type TraitObligation<'tcx> = Obligation<'tcx, ty::PolyTraitPredicate<'tcx>>; - -/// Why did we incur this obligation? Used for error reporting. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ObligationCause<'tcx> { - pub span: Span, - - // The id of the fn body that triggered this obligation. This is - // used for region obligations to determine the precise - // environment in which the region obligation should be evaluated - // (in particular, closures can add new assumptions). See the - // field `region_obligations` of the `FulfillmentContext` for more - // information. - pub body_id: ast::NodeId, - - pub code: ObligationCauseCode<'tcx> -} - -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum ObligationCauseCode<'tcx> { - /// Not well classified or should be obvious from span. - MiscObligation, - - /// This is the trait reference from the given projection - SliceOrArrayElem, - - /// This is the trait reference from the given projection - ProjectionWf(ty::ProjectionTy<'tcx>), - - /// In an impl of trait X for type Y, type Y must - /// also implement all supertraits of X. - ItemObligation(DefId), - - /// A type like `&'a T` is WF only if `T: 'a`. - ReferenceOutlivesReferent(Ty<'tcx>), - - /// Obligation incurred due to an object cast. - ObjectCastObligation(/* Object type */ Ty<'tcx>), - - /// Various cases where expressions must be sized/copy/etc: - AssignmentLhsSized, // L = X implies that L is Sized - StructInitializerSized, // S { ... } must be Sized - VariableType(ast::NodeId), // Type of each variable must be Sized - ReturnType, // Return type must be Sized - RepeatVec, // [T,..n] --> T must be Copy - - // Captures of variable the given id by a closure (span is the - // span of the closure) - ClosureCapture(ast::NodeId, Span, ty::BuiltinBound), - - // Types of fields (other than the last) in a struct must be sized. - FieldSized, - - // static items must have `Sync` type - SharedStatic, - - BuiltinDerivedObligation(DerivedObligationCause<'tcx>), - - ImplDerivedObligation(DerivedObligationCause<'tcx>), - - CompareImplMethodObligation, -} - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct DerivedObligationCause<'tcx> { - /// The trait reference of the parent obligation that led to the - /// current obligation. Note that only trait obligations lead to - /// derived obligations, so we just store the trait reference here - /// directly. - parent_trait_ref: ty::PolyTraitRef<'tcx>, - - /// The parent trait had this cause - parent_code: Rc> -} - -pub type Obligations<'tcx, O> = Vec>; -pub type PredicateObligations<'tcx> = Vec>; -pub type TraitObligations<'tcx> = Vec>; - -pub type Selection<'tcx> = Vtable<'tcx, PredicateObligation<'tcx>>; - -#[derive(Clone,Debug)] -pub enum SelectionError<'tcx> { - Unimplemented, - OutputTypeParameterMismatch(ty::PolyTraitRef<'tcx>, - ty::PolyTraitRef<'tcx>, - ty::error::TypeError<'tcx>), - TraitNotObjectSafe(DefId), -} - -pub struct FulfillmentError<'tcx> { - pub obligation: PredicateObligation<'tcx>, - pub code: FulfillmentErrorCode<'tcx> -} - -#[derive(Clone)] -pub enum FulfillmentErrorCode<'tcx> { - CodeSelectionError(SelectionError<'tcx>), - CodeProjectionError(MismatchedProjectionTypes<'tcx>), - CodeAmbiguity, -} - -/// When performing resolution, it is typically the case that there -/// can be one of three outcomes: -/// -/// - `Ok(Some(r))`: success occurred with result `r` -/// - `Ok(None)`: could not definitely determine anything, usually due -/// to inconclusive type inference. -/// - `Err(e)`: error `e` occurred -pub type SelectionResult<'tcx, T> = Result, SelectionError<'tcx>>; - -/// Given the successful resolution of an obligation, the `Vtable` -/// indicates where the vtable comes from. Note that while we call this -/// a "vtable", it does not necessarily indicate dynamic dispatch at -/// runtime. `Vtable` instances just tell the compiler where to find -/// methods, but in generic code those methods are typically statically -/// dispatched -- only when an object is constructed is a `Vtable` -/// instance reified into an actual vtable. -/// -/// For example, the vtable may be tied to a specific impl (case A), -/// or it may be relative to some bound that is in scope (case B). -/// -/// -/// ``` -/// impl Clone for Option { ... } // Impl_1 -/// impl Clone for Box { ... } // Impl_2 -/// impl Clone for int { ... } // Impl_3 -/// -/// fn foo(concrete: Option>, -/// param: T, -/// mixed: Option) { -/// -/// // Case A: Vtable points at a specific impl. Only possible when -/// // type is concretely known. If the impl itself has bounded -/// // type parameters, Vtable will carry resolutions for those as well: -/// concrete.clone(); // Vtable(Impl_1, [Vtable(Impl_2, [Vtable(Impl_3)])]) -/// -/// // Case B: Vtable must be provided by caller. This applies when -/// // type is a type parameter. -/// param.clone(); // VtableParam -/// -/// // Case C: A mix of cases A and B. -/// mixed.clone(); // Vtable(Impl_1, [VtableParam]) -/// } -/// ``` -/// -/// ### The type parameter `N` -/// -/// See explanation on `VtableImplData`. -#[derive(Clone)] -pub enum Vtable<'tcx, N> { - /// Vtable identifying a particular impl. - VtableImpl(VtableImplData<'tcx, N>), - - /// Vtable for default trait implementations - /// This carries the information and nested obligations with regards - /// to a default implementation for a trait `Trait`. The nested obligations - /// ensure the trait implementation holds for all the constituent types. - VtableDefaultImpl(VtableDefaultImplData), - - /// Successful resolution to an obligation provided by the caller - /// for some type parameter. The `Vec` represents the - /// obligations incurred from normalizing the where-clause (if - /// any). - VtableParam(Vec), - - /// Virtual calls through an object - VtableObject(VtableObjectData<'tcx>), - - /// Successful resolution for a builtin trait. - VtableBuiltin(VtableBuiltinData), - - /// Vtable automatically generated for a closure. The def ID is the ID - /// of the closure expression. This is a `VtableImpl` in spirit, but the - /// impl is generated by the compiler and does not appear in the source. - VtableClosure(VtableClosureData<'tcx, N>), - - /// Same as above, but for a fn pointer type with the given signature. - VtableFnPointer(ty::Ty<'tcx>), -} - -/// Identifies a particular impl in the source, along with a set of -/// substitutions from the impl's type/lifetime parameters. The -/// `nested` vector corresponds to the nested obligations attached to -/// the impl's type parameters. -/// -/// The type parameter `N` indicates the type used for "nested -/// obligations" that are required by the impl. During type check, this -/// is `Obligation`, as one might expect. During trans, however, this -/// is `()`, because trans only requires a shallow resolution of an -/// impl, and nested obligations are satisfied later. -#[derive(Clone, PartialEq, Eq)] -pub struct VtableImplData<'tcx, N> { - pub impl_def_id: DefId, - pub substs: subst::Substs<'tcx>, - pub nested: Vec -} - -#[derive(Clone, PartialEq, Eq)] -pub struct VtableClosureData<'tcx, N> { - pub closure_def_id: DefId, - pub substs: ty::ClosureSubsts<'tcx>, - /// Nested obligations. This can be non-empty if the closure - /// signature contains associated types. - pub nested: Vec -} - -#[derive(Clone)] -pub struct VtableDefaultImplData { - pub trait_def_id: DefId, - pub nested: Vec -} - -#[derive(Clone)] -pub struct VtableBuiltinData { - pub nested: Vec -} - -/// A vtable for some object-safe trait `Foo` automatically derived -/// for the object type `Foo`. -#[derive(PartialEq,Eq,Clone)] -pub struct VtableObjectData<'tcx> { - /// `Foo` upcast to the obligation trait. This will be some supertrait of `Foo`. - pub upcast_trait_ref: ty::PolyTraitRef<'tcx>, - - /// The vtable is formed by concatenating together the method lists of - /// the base object trait and all supertraits; this is the start of - /// `upcast_trait_ref`'s methods in that vtable. - pub vtable_base: usize -} - -/// Creates predicate obligations from the generic bounds. -pub fn predicates_for_generics<'tcx>(cause: ObligationCause<'tcx>, - generic_bounds: &ty::InstantiatedPredicates<'tcx>) - -> PredicateObligations<'tcx> -{ - util::predicates_for_generics(cause, 0, generic_bounds) -} - -/// Determines whether the type `ty` is known to meet `bound` and -/// returns true if so. Returns false if `ty` either does not meet -/// `bound` or is not known to meet bound (note that this is -/// conservative towards *no impl*, which is the opposite of the -/// `evaluate` methods). -pub fn type_known_to_meet_builtin_bound<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>, - ty: Ty<'tcx>, - bound: ty::BuiltinBound, - span: Span) - -> bool -{ - debug!("type_known_to_meet_builtin_bound(ty={:?}, bound={:?})", - ty, - bound); - - let cause = ObligationCause::misc(span, ast::DUMMY_NODE_ID); - let obligation = - util::predicate_for_builtin_bound(infcx.tcx, cause, bound, 0, ty); - let obligation = match obligation { - Ok(o) => o, - Err(..) => return false - }; - let result = SelectionContext::new(infcx) - .evaluate_obligation_conservatively(&obligation); - debug!("type_known_to_meet_builtin_bound: ty={:?} bound={:?} => {:?}", - ty, bound, result); - - if result && (ty.has_infer_types() || ty.has_closure_types()) { - // Because of inference "guessing", selection can sometimes claim - // to succeed while the success requires a guess. To ensure - // this function's result remains infallible, we must confirm - // that guess. While imperfect, I believe this is sound. - - let mut fulfill_cx = FulfillmentContext::new(); - - // We can use a dummy node-id here because we won't pay any mind - // to region obligations that arise (there shouldn't really be any - // anyhow). - let cause = ObligationCause::misc(span, ast::DUMMY_NODE_ID); - - fulfill_cx.register_builtin_bound(infcx, ty, bound, cause); - - // Note: we only assume something is `Copy` if we can - // *definitively* show that it implements `Copy`. Otherwise, - // assume it is move; linear is always ok. - match fulfill_cx.select_all_or_error(infcx) { - Ok(()) => { - debug!("type_known_to_meet_builtin_bound: ty={:?} bound={:?} success", - ty, - bound); - true - } - Err(e) => { - debug!("type_known_to_meet_builtin_bound: ty={:?} bound={:?} errors={:?}", - ty, - bound, - e); - false - } - } - } else { - result - } -} - -// FIXME: this is gonna need to be removed ... -/// Normalizes the parameter environment, reporting errors if they occur. -pub fn normalize_param_env_or_error<'a,'tcx>(unnormalized_env: ty::ParameterEnvironment<'a,'tcx>, - cause: ObligationCause<'tcx>) - -> ty::ParameterEnvironment<'a,'tcx> -{ - // I'm not wild about reporting errors here; I'd prefer to - // have the errors get reported at a defined place (e.g., - // during typeck). Instead I have all parameter - // environments, in effect, going through this function - // and hence potentially reporting errors. This ensurse of - // course that we never forget to normalize (the - // alternative seemed like it would involve a lot of - // manual invocations of this fn -- and then we'd have to - // deal with the errors at each of those sites). - // - // In any case, in practice, typeck constructs all the - // parameter environments once for every fn as it goes, - // and errors will get reported then; so after typeck we - // can be sure that no errors should occur. - - let tcx = unnormalized_env.tcx; - let span = cause.span; - let body_id = cause.body_id; - - debug!("normalize_param_env_or_error(unnormalized_env={:?})", - unnormalized_env); - - let predicates: Vec<_> = - util::elaborate_predicates(tcx, unnormalized_env.caller_bounds.clone()) - .filter(|p| !p.is_global()) // (*) - .collect(); - - // (*) Any predicate like `i32: Trait` or whatever doesn't - // need to be in the *environment* to be proven, so screen those - // out. This is important for the soundness of inter-fn - // caching. Note though that we should probably check that these - // predicates hold at the point where the environment is - // constructed, but I am not currently doing so out of laziness. - // -nmatsakis - - debug!("normalize_param_env_or_error: elaborated-predicates={:?}", - predicates); - - let elaborated_env = unnormalized_env.with_caller_bounds(predicates); - - let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(elaborated_env)); - let predicates = match fully_normalize(&infcx, - cause, - &infcx.parameter_environment.caller_bounds) { - Ok(predicates) => predicates, - Err(errors) => { - report_fulfillment_errors(&infcx, &errors); - return infcx.parameter_environment; // an unnormalized env is better than nothing - } - }; - - debug!("normalize_param_env_or_error: normalized predicates={:?}", - predicates); - - let free_regions = FreeRegionMap::new(); - infcx.resolve_regions_and_report_errors(&free_regions, body_id); - let predicates = match infcx.fully_resolve(&predicates) { - Ok(predicates) => predicates, - Err(fixup_err) => { - // If we encounter a fixup error, it means that some type - // variable wound up unconstrained. I actually don't know - // if this can happen, and I certainly don't expect it to - // happen often, but if it did happen it probably - // represents a legitimate failure due to some kind of - // unconstrained variable, and it seems better not to ICE, - // all things considered. - let err_msg = fixup_err_to_string(fixup_err); - tcx.sess.span_err(span, &err_msg); - return infcx.parameter_environment; // an unnormalized env is better than nothing - } - }; - - debug!("normalize_param_env_or_error: resolved predicates={:?}", - predicates); - - infcx.parameter_environment.with_caller_bounds(predicates) -} - -pub fn fully_normalize<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>, - cause: ObligationCause<'tcx>, - value: &T) - -> Result>> - where T : TypeFoldable<'tcx> -{ - debug!("fully_normalize(value={:?})", value); - - let mut selcx = &mut SelectionContext::new(infcx); - // FIXME (@jroesch) ISSUE 26721 - // I'm not sure if this is a bug or not, needs further investigation. - // It appears that by reusing the fulfillment_cx here we incur more - // obligations and later trip an asssertion on regionck.rs line 337. - // - // The two possibilities I see is: - // - normalization is not actually fully happening and we - // have a bug else where - // - we are adding a duplicate bound into the list causing - // its size to change. - // - // I think we should probably land this refactor and then come - // back to this is a follow-up patch. - let mut fulfill_cx = FulfillmentContext::new(); - - let Normalized { value: normalized_value, obligations } = - project::normalize(selcx, cause, value); - debug!("fully_normalize: normalized_value={:?} obligations={:?}", - normalized_value, - obligations); - for obligation in obligations { - fulfill_cx.register_predicate_obligation(selcx.infcx(), obligation); - } - - debug!("fully_normalize: select_all_or_error start"); - match fulfill_cx.select_all_or_error(infcx) { - Ok(()) => { } - Err(e) => { - debug!("fully_normalize: error={:?}", e); - return Err(e); - } - } - debug!("fully_normalize: select_all_or_error complete"); - let resolved_value = infcx.resolve_type_vars_if_possible(&normalized_value); - debug!("fully_normalize: resolved_value={:?}", resolved_value); - Ok(resolved_value) -} - -impl<'tcx,O> Obligation<'tcx,O> { - pub fn new(cause: ObligationCause<'tcx>, - trait_ref: O) - -> Obligation<'tcx, O> - { - Obligation { cause: cause, - recursion_depth: 0, - predicate: trait_ref } - } - - fn with_depth(cause: ObligationCause<'tcx>, - recursion_depth: usize, - trait_ref: O) - -> Obligation<'tcx, O> - { - Obligation { cause: cause, - recursion_depth: recursion_depth, - predicate: trait_ref } - } - - pub fn misc(span: Span, body_id: ast::NodeId, trait_ref: O) -> Obligation<'tcx, O> { - Obligation::new(ObligationCause::misc(span, body_id), trait_ref) - } - - pub fn with

(&self, value: P) -> Obligation<'tcx,P> { - Obligation { cause: self.cause.clone(), - recursion_depth: self.recursion_depth, - predicate: value } - } -} - -impl<'tcx> ObligationCause<'tcx> { - pub fn new(span: Span, - body_id: ast::NodeId, - code: ObligationCauseCode<'tcx>) - -> ObligationCause<'tcx> { - ObligationCause { span: span, body_id: body_id, code: code } - } - - pub fn misc(span: Span, body_id: ast::NodeId) -> ObligationCause<'tcx> { - ObligationCause { span: span, body_id: body_id, code: MiscObligation } - } - - pub fn dummy() -> ObligationCause<'tcx> { - ObligationCause { span: DUMMY_SP, body_id: 0, code: MiscObligation } - } -} - -impl<'tcx, N> Vtable<'tcx, N> { - pub fn nested_obligations(self) -> Vec { - match self { - VtableImpl(i) => i.nested, - VtableParam(n) => n, - VtableBuiltin(i) => i.nested, - VtableDefaultImpl(d) => d.nested, - VtableClosure(c) => c.nested, - VtableObject(_) | VtableFnPointer(..) => vec![] - } - } - - pub fn map(self, f: F) -> Vtable<'tcx, M> where F: FnMut(N) -> M { - match self { - VtableImpl(i) => VtableImpl(VtableImplData { - impl_def_id: i.impl_def_id, - substs: i.substs, - nested: i.nested.into_iter().map(f).collect() - }), - VtableParam(n) => VtableParam(n.into_iter().map(f).collect()), - VtableBuiltin(i) => VtableBuiltin(VtableBuiltinData { - nested: i.nested.into_iter().map(f).collect() - }), - VtableObject(o) => VtableObject(o), - VtableDefaultImpl(d) => VtableDefaultImpl(VtableDefaultImplData { - trait_def_id: d.trait_def_id, - nested: d.nested.into_iter().map(f).collect() - }), - VtableFnPointer(f) => VtableFnPointer(f), - VtableClosure(c) => VtableClosure(VtableClosureData { - closure_def_id: c.closure_def_id, - substs: c.substs, - nested: c.nested.into_iter().map(f).collect(), - }) - } - } -} - -impl<'tcx> FulfillmentError<'tcx> { - fn new(obligation: PredicateObligation<'tcx>, - code: FulfillmentErrorCode<'tcx>) - -> FulfillmentError<'tcx> - { - FulfillmentError { obligation: obligation, code: code } - } -} - -impl<'tcx> TraitObligation<'tcx> { - /// Creates the dep-node for selecting/evaluating this trait reference. - fn dep_node(&self, tcx: &ty::ctxt<'tcx>) -> DepNode { - let simplified_ty = - fast_reject::simplify_type(tcx, - self.predicate.skip_binder().self_ty(), // (*) - true); - - // (*) skip_binder is ok because `simplify_type` doesn't care about regions - - DepNode::TraitSelect(self.predicate.def_id(), simplified_ty) - } - - fn self_ty(&self) -> ty::Binder> { - ty::Binder(self.predicate.skip_binder().self_ty()) - } -} diff --git a/src/librustc/middle/traits/object_safety.rs b/src/librustc/middle/traits/object_safety.rs deleted file mode 100644 index 0e4a42bd15134..0000000000000 --- a/src/librustc/middle/traits/object_safety.rs +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! "Object safety" refers to the ability for a trait to be converted -//! to an object. In general, traits may only be converted to an -//! object if all of their methods meet certain criteria. In particular, -//! they must: -//! -//! - have a suitable receiver from which we can extract a vtable; -//! - not reference the erased type `Self` except for in this receiver; -//! - not have generic type parameters - -use super::supertraits; -use super::elaborate_predicates; - -use middle::def_id::DefId; -use middle::subst::{self, SelfSpace, TypeSpace}; -use middle::traits; -use middle::ty::{self, ToPolyTraitRef, Ty, TypeFoldable}; -use std::rc::Rc; -use syntax::ast; - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub enum ObjectSafetyViolation<'tcx> { - /// Self : Sized declared on the trait - SizedSelf, - - /// Supertrait reference references `Self` an in illegal location - /// (e.g. `trait Foo : Bar`) - SupertraitSelf, - - /// Method has something illegal - Method(Rc>, MethodViolationCode), -} - -/// Reasons a method might not be object-safe. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] -pub enum MethodViolationCode { - /// e.g., `fn foo()` - StaticMethod, - - /// e.g., `fn foo(&self, x: Self)` or `fn foo(&self) -> Self` - ReferencesSelf, - - /// e.g., `fn foo()` - Generic, -} - -pub fn is_object_safe<'tcx>(tcx: &ty::ctxt<'tcx>, - trait_def_id: DefId) - -> bool -{ - // Because we query yes/no results frequently, we keep a cache: - let def = tcx.lookup_trait_def(trait_def_id); - - let result = def.object_safety().unwrap_or_else(|| { - let result = object_safety_violations(tcx, trait_def_id).is_empty(); - - // Record just a yes/no result in the cache; this is what is - // queried most frequently. Note that this may overwrite a - // previous result, but always with the same thing. - def.set_object_safety(result); - - result - }); - - debug!("is_object_safe({:?}) = {}", trait_def_id, result); - - result -} - -/// Returns the object safety violations that affect -/// astconv - currently, Self in supertraits. This is needed -/// because `object_safety_violations` can't be used during -/// type collection. -pub fn astconv_object_safety_violations<'tcx>(tcx: &ty::ctxt<'tcx>, - trait_def_id: DefId) - -> Vec> -{ - let mut violations = vec![]; - - if supertraits_reference_self(tcx, trait_def_id) { - violations.push(ObjectSafetyViolation::SupertraitSelf); - } - - debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}", - trait_def_id, - violations); - - violations -} - -pub fn object_safety_violations<'tcx>(tcx: &ty::ctxt<'tcx>, - trait_def_id: DefId) - -> Vec> -{ - traits::supertrait_def_ids(tcx, trait_def_id) - .flat_map(|def_id| object_safety_violations_for_trait(tcx, def_id)) - .collect() -} - -fn object_safety_violations_for_trait<'tcx>(tcx: &ty::ctxt<'tcx>, - trait_def_id: DefId) - -> Vec> -{ - // Check methods for violations. - let mut violations: Vec<_> = - tcx.trait_items(trait_def_id).iter() - .filter_map(|item| { - match *item { - ty::MethodTraitItem(ref m) => { - object_safety_violation_for_method(tcx, trait_def_id, &**m) - .map(|code| ObjectSafetyViolation::Method(m.clone(), code)) - } - _ => None, - } - }) - .collect(); - - // Check the trait itself. - if trait_has_sized_self(tcx, trait_def_id) { - violations.push(ObjectSafetyViolation::SizedSelf); - } - if supertraits_reference_self(tcx, trait_def_id) { - violations.push(ObjectSafetyViolation::SupertraitSelf); - } - - debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}", - trait_def_id, - violations); - - violations -} - -pub fn supertraits_reference_self<'tcx>(tcx: &ty::ctxt<'tcx>, - trait_def_id: DefId) - -> bool -{ - let trait_def = tcx.lookup_trait_def(trait_def_id); - let trait_ref = trait_def.trait_ref.clone(); - let trait_ref = trait_ref.to_poly_trait_ref(); - let predicates = tcx.lookup_super_predicates(trait_def_id); - predicates - .predicates - .into_iter() - .map(|predicate| predicate.subst_supertrait(tcx, &trait_ref)) - .any(|predicate| { - match predicate { - ty::Predicate::Trait(ref data) => { - // In the case of a trait predicate, we can skip the "self" type. - data.0.trait_ref.substs.types.get_slice(TypeSpace) - .iter() - .cloned() - .any(|t| t.has_self_ty()) - } - ty::Predicate::Projection(..) | - ty::Predicate::WellFormed(..) | - ty::Predicate::ObjectSafe(..) | - ty::Predicate::TypeOutlives(..) | - ty::Predicate::RegionOutlives(..) | - ty::Predicate::Equate(..) => { - false - } - } - }) -} - -fn trait_has_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>, - trait_def_id: DefId) - -> bool -{ - let trait_def = tcx.lookup_trait_def(trait_def_id); - let trait_predicates = tcx.lookup_predicates(trait_def_id); - generics_require_sized_self(tcx, &trait_def.generics, &trait_predicates) -} - -fn generics_require_sized_self<'tcx>(tcx: &ty::ctxt<'tcx>, - generics: &ty::Generics<'tcx>, - predicates: &ty::GenericPredicates<'tcx>) - -> bool -{ - let sized_def_id = match tcx.lang_items.sized_trait() { - Some(def_id) => def_id, - None => { return false; /* No Sized trait, can't require it! */ } - }; - - // Search for a predicate like `Self : Sized` amongst the trait bounds. - let free_substs = tcx.construct_free_substs(generics, - tcx.region_maps.node_extent(ast::DUMMY_NODE_ID)); - let predicates = predicates.instantiate(tcx, &free_substs).predicates.into_vec(); - elaborate_predicates(tcx, predicates) - .any(|predicate| { - match predicate { - ty::Predicate::Trait(ref trait_pred) if trait_pred.def_id() == sized_def_id => { - trait_pred.0.self_ty().is_self() - } - ty::Predicate::Projection(..) | - ty::Predicate::Trait(..) | - ty::Predicate::Equate(..) | - ty::Predicate::RegionOutlives(..) | - ty::Predicate::WellFormed(..) | - ty::Predicate::ObjectSafe(..) | - ty::Predicate::TypeOutlives(..) => { - false - } - } - }) -} - -/// Returns `Some(_)` if this method makes the containing trait not object safe. -fn object_safety_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>, - trait_def_id: DefId, - method: &ty::Method<'tcx>) - -> Option -{ - // Any method that has a `Self : Sized` requisite is otherwise - // exempt from the regulations. - if generics_require_sized_self(tcx, &method.generics, &method.predicates) { - return None; - } - - virtual_call_violation_for_method(tcx, trait_def_id, method) -} - -/// We say a method is *vtable safe* if it can be invoked on a trait -/// object. Note that object-safe traits can have some -/// non-vtable-safe methods, so long as they require `Self:Sized` or -/// otherwise ensure that they cannot be used when `Self=Trait`. -pub fn is_vtable_safe_method<'tcx>(tcx: &ty::ctxt<'tcx>, - trait_def_id: DefId, - method: &ty::Method<'tcx>) - -> bool -{ - virtual_call_violation_for_method(tcx, trait_def_id, method).is_none() -} - -/// Returns `Some(_)` if this method cannot be called on a trait -/// object; this does not necessarily imply that the enclosing trait -/// is not object safe, because the method might have a where clause -/// `Self:Sized`. -fn virtual_call_violation_for_method<'tcx>(tcx: &ty::ctxt<'tcx>, - trait_def_id: DefId, - method: &ty::Method<'tcx>) - -> Option -{ - // The method's first parameter must be something that derefs (or - // autorefs) to `&self`. For now, we only accept `self`, `&self` - // and `Box`. - match method.explicit_self { - ty::ExplicitSelfCategory::Static => { - return Some(MethodViolationCode::StaticMethod); - } - - ty::ExplicitSelfCategory::ByValue | - ty::ExplicitSelfCategory::ByReference(..) | - ty::ExplicitSelfCategory::ByBox => { - } - } - - // The `Self` type is erased, so it should not appear in list of - // arguments or return type apart from the receiver. - let ref sig = method.fty.sig; - for &input_ty in &sig.0.inputs[1..] { - if contains_illegal_self_type_reference(tcx, trait_def_id, input_ty) { - return Some(MethodViolationCode::ReferencesSelf); - } - } - if let ty::FnConverging(result_type) = sig.0.output { - if contains_illegal_self_type_reference(tcx, trait_def_id, result_type) { - return Some(MethodViolationCode::ReferencesSelf); - } - } - - // We can't monomorphize things like `fn foo(...)`. - if !method.generics.types.is_empty_in(subst::FnSpace) { - return Some(MethodViolationCode::Generic); - } - - None -} - -fn contains_illegal_self_type_reference<'tcx>(tcx: &ty::ctxt<'tcx>, - trait_def_id: DefId, - ty: Ty<'tcx>) - -> bool -{ - // This is somewhat subtle. In general, we want to forbid - // references to `Self` in the argument and return types, - // since the value of `Self` is erased. However, there is one - // exception: it is ok to reference `Self` in order to access - // an associated type of the current trait, since we retain - // the value of those associated types in the object type - // itself. - // - // ```rust - // trait SuperTrait { - // type X; - // } - // - // trait Trait : SuperTrait { - // type Y; - // fn foo(&self, x: Self) // bad - // fn foo(&self) -> Self // bad - // fn foo(&self) -> Option // bad - // fn foo(&self) -> Self::Y // OK, desugars to next example - // fn foo(&self) -> ::Y // OK - // fn foo(&self) -> Self::X // OK, desugars to next example - // fn foo(&self) -> ::X // OK - // } - // ``` - // - // However, it is not as simple as allowing `Self` in a projected - // type, because there are illegal ways to use `Self` as well: - // - // ```rust - // trait Trait : SuperTrait { - // ... - // fn foo(&self) -> ::X; - // } - // ``` - // - // Here we will not have the type of `X` recorded in the - // object type, and we cannot resolve `Self as SomeOtherTrait` - // without knowing what `Self` is. - - let mut supertraits: Option>> = None; - let mut error = false; - ty.maybe_walk(|ty| { - match ty.sty { - ty::TyParam(ref param_ty) => { - if param_ty.space == SelfSpace { - error = true; - } - - false // no contained types to walk - } - - ty::TyProjection(ref data) => { - // This is a projected type `::X`. - - // Compute supertraits of current trait lazily. - if supertraits.is_none() { - let trait_def = tcx.lookup_trait_def(trait_def_id); - let trait_ref = ty::Binder(trait_def.trait_ref.clone()); - supertraits = Some(traits::supertraits(tcx, trait_ref).collect()); - } - - // Determine whether the trait reference `Foo as - // SomeTrait` is in fact a supertrait of the - // current trait. In that case, this type is - // legal, because the type `X` will be specified - // in the object type. Note that we can just use - // direct equality here because all of these types - // are part of the formal parameter listing, and - // hence there should be no inference variables. - let projection_trait_ref = ty::Binder(data.trait_ref.clone()); - let is_supertrait_of_current_trait = - supertraits.as_ref().unwrap().contains(&projection_trait_ref); - - if is_supertrait_of_current_trait { - false // do not walk contained types, do not report error, do collect $200 - } else { - true // DO walk contained types, POSSIBLY reporting an error - } - } - - _ => true, // walk contained types, if any - } - }); - - error -} diff --git a/src/librustc/middle/traits/project.rs b/src/librustc/middle/traits/project.rs deleted file mode 100644 index c363425db85b0..0000000000000 --- a/src/librustc/middle/traits/project.rs +++ /dev/null @@ -1,983 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Code for projecting associated types out of trait references. - -use super::elaborate_predicates; -use super::report_overflow_error; -use super::Obligation; -use super::ObligationCause; -use super::PredicateObligation; -use super::SelectionContext; -use super::SelectionError; -use super::VtableClosureData; -use super::VtableImplData; -use super::util; - -use middle::infer::{self, TypeOrigin}; -use middle::subst::Subst; -use middle::ty::{self, ToPredicate, ToPolyTraitRef, Ty}; -use middle::ty::fold::{TypeFoldable, TypeFolder}; -use syntax::parse::token; -use util::common::FN_OUTPUT_NAME; - -pub type PolyProjectionObligation<'tcx> = - Obligation<'tcx, ty::PolyProjectionPredicate<'tcx>>; - -pub type ProjectionObligation<'tcx> = - Obligation<'tcx, ty::ProjectionPredicate<'tcx>>; - -pub type ProjectionTyObligation<'tcx> = - Obligation<'tcx, ty::ProjectionTy<'tcx>>; - -/// When attempting to resolve `::Name` ... -#[derive(Debug)] -pub enum ProjectionTyError<'tcx> { - /// ...we found multiple sources of information and couldn't resolve the ambiguity. - TooManyCandidates, - - /// ...an error occurred matching `T : TraitRef` - TraitSelectionError(SelectionError<'tcx>), -} - -#[derive(Clone)] -pub struct MismatchedProjectionTypes<'tcx> { - pub err: ty::error::TypeError<'tcx> -} - -#[derive(PartialEq, Eq, Debug)] -enum ProjectionTyCandidate<'tcx> { - // from a where-clause in the env or object type - ParamEnv(ty::PolyProjectionPredicate<'tcx>), - - // from the definition of `Trait` when you have something like <::B as Trait2>::C - TraitDef(ty::PolyProjectionPredicate<'tcx>), - - // defined in an impl - Impl(VtableImplData<'tcx, PredicateObligation<'tcx>>), - - // closure return type - Closure(VtableClosureData<'tcx, PredicateObligation<'tcx>>), - - // fn pointer return type - FnPointer(Ty<'tcx>), -} - -struct ProjectionTyCandidateSet<'tcx> { - vec: Vec>, - ambiguous: bool -} - -/// Evaluates constraints of the form: -/// -/// for<...> ::U == V -/// -/// If successful, this may result in additional obligations. -pub fn poly_project_and_unify_type<'cx,'tcx>( - selcx: &mut SelectionContext<'cx,'tcx>, - obligation: &PolyProjectionObligation<'tcx>) - -> Result>>, MismatchedProjectionTypes<'tcx>> -{ - debug!("poly_project_and_unify_type(obligation={:?})", - obligation); - - let infcx = selcx.infcx(); - infcx.commit_if_ok(|snapshot| { - let (skol_predicate, skol_map) = - infcx.skolemize_late_bound_regions(&obligation.predicate, snapshot); - - let skol_obligation = obligation.with(skol_predicate); - match project_and_unify_type(selcx, &skol_obligation) { - Ok(result) => { - match infcx.leak_check(&skol_map, snapshot) { - Ok(()) => Ok(infcx.plug_leaks(skol_map, snapshot, &result)), - Err(e) => Err(MismatchedProjectionTypes { err: e }), - } - } - Err(e) => { - Err(e) - } - } - }) -} - -/// Evaluates constraints of the form: -/// -/// ::U == V -/// -/// If successful, this may result in additional obligations. -fn project_and_unify_type<'cx,'tcx>( - selcx: &mut SelectionContext<'cx,'tcx>, - obligation: &ProjectionObligation<'tcx>) - -> Result>>, MismatchedProjectionTypes<'tcx>> -{ - debug!("project_and_unify_type(obligation={:?})", - obligation); - - let Normalized { value: normalized_ty, obligations } = - match opt_normalize_projection_type(selcx, - obligation.predicate.projection_ty.clone(), - obligation.cause.clone(), - obligation.recursion_depth) { - Some(n) => n, - None => { - consider_unification_despite_ambiguity(selcx, obligation); - return Ok(None); - } - }; - - debug!("project_and_unify_type: normalized_ty={:?} obligations={:?}", - normalized_ty, - obligations); - - let infcx = selcx.infcx(); - let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span); - match infer::mk_eqty(infcx, true, origin, normalized_ty, obligation.predicate.ty) { - Ok(()) => Ok(Some(obligations)), - Err(err) => Err(MismatchedProjectionTypes { err: err }), - } -} - -fn consider_unification_despite_ambiguity<'cx,'tcx>(selcx: &mut SelectionContext<'cx,'tcx>, - obligation: &ProjectionObligation<'tcx>) { - debug!("consider_unification_despite_ambiguity(obligation={:?})", - obligation); - - let def_id = obligation.predicate.projection_ty.trait_ref.def_id; - match selcx.tcx().lang_items.fn_trait_kind(def_id) { - Some(_) => { } - None => { return; } - } - - let infcx = selcx.infcx(); - let self_ty = obligation.predicate.projection_ty.trait_ref.self_ty(); - let self_ty = infcx.shallow_resolve(self_ty); - debug!("consider_unification_despite_ambiguity: self_ty.sty={:?}", - self_ty.sty); - match self_ty.sty { - ty::TyClosure(closure_def_id, ref substs) => { - let closure_typer = selcx.closure_typer(); - let closure_type = closure_typer.closure_type(closure_def_id, substs); - let ty::Binder((_, ret_type)) = - util::closure_trait_ref_and_return_type(infcx.tcx, - def_id, - self_ty, - &closure_type.sig, - util::TupleArgumentsFlag::No); - // We don't have to normalize the return type here - this is only - // reached for TyClosure: Fn inputs where the closure kind is - // still unknown, which should only occur in typeck where the - // closure type is already normalized. - let (ret_type, _) = - infcx.replace_late_bound_regions_with_fresh_var( - obligation.cause.span, - infer::AssocTypeProjection(obligation.predicate.projection_ty.item_name), - &ty::Binder(ret_type)); - - debug!("consider_unification_despite_ambiguity: ret_type={:?}", - ret_type); - let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span); - let obligation_ty = obligation.predicate.ty; - match infer::mk_eqty(infcx, true, origin, obligation_ty, ret_type) { - Ok(()) => { } - Err(_) => { /* ignore errors */ } - } - } - _ => { } - } -} - -/// Normalizes any associated type projections in `value`, replacing -/// them with a fully resolved type where possible. The return value -/// combines the normalized result and any additional obligations that -/// were incurred as result. -pub fn normalize<'a,'b,'tcx,T>(selcx: &'a mut SelectionContext<'b,'tcx>, - cause: ObligationCause<'tcx>, - value: &T) - -> Normalized<'tcx, T> - where T : TypeFoldable<'tcx> -{ - normalize_with_depth(selcx, cause, 0, value) -} - -/// As `normalize`, but with a custom depth. -pub fn normalize_with_depth<'a,'b,'tcx,T>(selcx: &'a mut SelectionContext<'b,'tcx>, - cause: ObligationCause<'tcx>, - depth: usize, - value: &T) - -> Normalized<'tcx, T> - where T : TypeFoldable<'tcx> -{ - let mut normalizer = AssociatedTypeNormalizer::new(selcx, cause, depth); - let result = normalizer.fold(value); - - Normalized { - value: result, - obligations: normalizer.obligations, - } -} - -struct AssociatedTypeNormalizer<'a,'b:'a,'tcx:'b> { - selcx: &'a mut SelectionContext<'b,'tcx>, - cause: ObligationCause<'tcx>, - obligations: Vec>, - depth: usize, -} - -impl<'a,'b,'tcx> AssociatedTypeNormalizer<'a,'b,'tcx> { - fn new(selcx: &'a mut SelectionContext<'b,'tcx>, - cause: ObligationCause<'tcx>, - depth: usize) - -> AssociatedTypeNormalizer<'a,'b,'tcx> - { - AssociatedTypeNormalizer { - selcx: selcx, - cause: cause, - obligations: vec!(), - depth: depth, - } - } - - fn fold>(&mut self, value: &T) -> T { - let value = self.selcx.infcx().resolve_type_vars_if_possible(value); - - if !value.has_projection_types() { - value.clone() - } else { - value.fold_with(self) - } - } -} - -impl<'a,'b,'tcx> TypeFolder<'tcx> for AssociatedTypeNormalizer<'a,'b,'tcx> { - fn tcx(&self) -> &ty::ctxt<'tcx> { - self.selcx.tcx() - } - - fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { - // We don't want to normalize associated types that occur inside of region - // binders, because they may contain bound regions, and we can't cope with that. - // - // Example: - // - // for<'a> fn(>::A) - // - // Instead of normalizing `>::A` here, we'll - // normalize it when we instantiate those bound regions (which - // should occur eventually). - - let ty = ty.super_fold_with(self); - match ty.sty { - ty::TyProjection(ref data) if !data.has_escaping_regions() => { // (*) - - // (*) This is kind of hacky -- we need to be able to - // handle normalization within binders because - // otherwise we wind up a need to normalize when doing - // trait matching (since you can have a trait - // obligation like `for<'a> T::B : Fn(&'a int)`), but - // we can't normalize with bound regions in scope. So - // far now we just ignore binders but only normalize - // if all bound regions are gone (and then we still - // have to renormalize whenever we instantiate a - // binder). It would be better to normalize in a - // binding-aware fashion. - - let Normalized { value: ty, obligations } = - normalize_projection_type(self.selcx, - data.clone(), - self.cause.clone(), - self.depth); - self.obligations.extend(obligations); - ty - } - - _ => { - ty - } - } - } -} - -#[derive(Clone)] -pub struct Normalized<'tcx,T> { - pub value: T, - pub obligations: Vec>, -} - -pub type NormalizedTy<'tcx> = Normalized<'tcx, Ty<'tcx>>; - -impl<'tcx,T> Normalized<'tcx,T> { - pub fn with(self, value: U) -> Normalized<'tcx,U> { - Normalized { value: value, obligations: self.obligations } - } -} - -/// The guts of `normalize`: normalize a specific projection like `::Item`. The result is always a type (and possibly -/// additional obligations). If ambiguity arises, which implies that -/// there are unresolved type variables in the projection, we will -/// substitute a fresh type variable `$X` and generate a new -/// obligation `::Item == $X` for later. -pub fn normalize_projection_type<'a,'b,'tcx>( - selcx: &'a mut SelectionContext<'b,'tcx>, - projection_ty: ty::ProjectionTy<'tcx>, - cause: ObligationCause<'tcx>, - depth: usize) - -> NormalizedTy<'tcx> -{ - opt_normalize_projection_type(selcx, projection_ty.clone(), cause.clone(), depth) - .unwrap_or_else(move || { - // if we bottom out in ambiguity, create a type variable - // and a deferred predicate to resolve this when more type - // information is available. - - let ty_var = selcx.infcx().next_ty_var(); - let projection = ty::Binder(ty::ProjectionPredicate { - projection_ty: projection_ty, - ty: ty_var - }); - let obligation = Obligation::with_depth( - cause, depth + 1, projection.to_predicate()); - Normalized { - value: ty_var, - obligations: vec!(obligation) - } - }) -} - -/// The guts of `normalize`: normalize a specific projection like `::Item`. The result is always a type (and possibly -/// additional obligations). Returns `None` in the case of ambiguity, -/// which indicates that there are unbound type variables. -fn opt_normalize_projection_type<'a,'b,'tcx>( - selcx: &'a mut SelectionContext<'b,'tcx>, - projection_ty: ty::ProjectionTy<'tcx>, - cause: ObligationCause<'tcx>, - depth: usize) - -> Option> -{ - debug!("normalize_projection_type(\ - projection_ty={:?}, \ - depth={})", - projection_ty, - depth); - - let obligation = Obligation::with_depth(cause.clone(), depth, projection_ty.clone()); - match project_type(selcx, &obligation) { - Ok(ProjectedTy::Progress(projected_ty, mut obligations)) => { - // if projection succeeded, then what we get out of this - // is also non-normalized (consider: it was derived from - // an impl, where-clause etc) and hence we must - // re-normalize it - - debug!("normalize_projection_type: projected_ty={:?} depth={} obligations={:?}", - projected_ty, - depth, - obligations); - - if projected_ty.has_projection_types() { - let mut normalizer = AssociatedTypeNormalizer::new(selcx, cause, depth+1); - let normalized_ty = normalizer.fold(&projected_ty); - - debug!("normalize_projection_type: normalized_ty={:?} depth={}", - normalized_ty, - depth); - - obligations.extend(normalizer.obligations); - Some(Normalized { - value: normalized_ty, - obligations: obligations, - }) - } else { - Some(Normalized { - value: projected_ty, - obligations: obligations, - }) - } - } - Ok(ProjectedTy::NoProgress(projected_ty)) => { - debug!("normalize_projection_type: projected_ty={:?} no progress", - projected_ty); - Some(Normalized { - value: projected_ty, - obligations: vec!() - }) - } - Err(ProjectionTyError::TooManyCandidates) => { - debug!("normalize_projection_type: too many candidates"); - None - } - Err(ProjectionTyError::TraitSelectionError(_)) => { - debug!("normalize_projection_type: ERROR"); - // if we got an error processing the `T as Trait` part, - // just return `ty::err` but add the obligation `T : - // Trait`, which when processed will cause the error to be - // reported later - - Some(normalize_to_error(selcx, projection_ty, cause, depth)) - } - } -} - -/// If we are projecting `::Item`, but `T: Trait` does not -/// hold. In various error cases, we cannot generate a valid -/// normalized projection. Therefore, we create an inference variable -/// return an associated obligation that, when fulfilled, will lead to -/// an error. -/// -/// Note that we used to return `TyError` here, but that was quite -/// dubious -- the premise was that an error would *eventually* be -/// reported, when the obligation was processed. But in general once -/// you see a `TyError` you are supposed to be able to assume that an -/// error *has been* reported, so that you can take whatever heuristic -/// paths you want to take. To make things worse, it was possible for -/// cycles to arise, where you basically had a setup like ` -/// as Trait>::Foo == $0`. Here, normalizing ` as -/// Trait>::Foo> to `[type error]` would lead to an obligation of -/// ` as Trait>::Foo`. We are supposed to report -/// an error for this obligation, but we legitimately should not, -/// because it contains `[type error]`. Yuck! (See issue #29857 for -/// one case where this arose.) -fn normalize_to_error<'a,'tcx>(selcx: &mut SelectionContext<'a,'tcx>, - projection_ty: ty::ProjectionTy<'tcx>, - cause: ObligationCause<'tcx>, - depth: usize) - -> NormalizedTy<'tcx> -{ - let trait_ref = projection_ty.trait_ref.to_poly_trait_ref(); - let trait_obligation = Obligation { cause: cause, - recursion_depth: depth, - predicate: trait_ref.to_predicate() }; - let new_value = selcx.infcx().next_ty_var(); - Normalized { - value: new_value, - obligations: vec!(trait_obligation) - } -} - -enum ProjectedTy<'tcx> { - Progress(Ty<'tcx>, Vec>), - NoProgress(Ty<'tcx>), -} - -/// Compute the result of a projection type (if we can). -fn project_type<'cx,'tcx>( - selcx: &mut SelectionContext<'cx,'tcx>, - obligation: &ProjectionTyObligation<'tcx>) - -> Result, ProjectionTyError<'tcx>> -{ - debug!("project(obligation={:?})", - obligation); - - let recursion_limit = selcx.tcx().sess.recursion_limit.get(); - if obligation.recursion_depth >= recursion_limit { - debug!("project: overflow!"); - report_overflow_error(selcx.infcx(), &obligation, true); - } - - let obligation_trait_ref = - selcx.infcx().resolve_type_vars_if_possible(&obligation.predicate.trait_ref); - - debug!("project: obligation_trait_ref={:?}", obligation_trait_ref); - - if obligation_trait_ref.references_error() { - return Ok(ProjectedTy::Progress(selcx.tcx().types.err, vec!())); - } - - let mut candidates = ProjectionTyCandidateSet { - vec: Vec::new(), - ambiguous: false, - }; - - assemble_candidates_from_param_env(selcx, - obligation, - &obligation_trait_ref, - &mut candidates); - - assemble_candidates_from_trait_def(selcx, - obligation, - &obligation_trait_ref, - &mut candidates); - - if let Err(e) = assemble_candidates_from_impls(selcx, - obligation, - &obligation_trait_ref, - &mut candidates) { - return Err(ProjectionTyError::TraitSelectionError(e)); - } - - debug!("{} candidates, ambiguous={}", - candidates.vec.len(), - candidates.ambiguous); - - // Inherent ambiguity that prevents us from even enumerating the - // candidates. - if candidates.ambiguous { - return Err(ProjectionTyError::TooManyCandidates); - } - - // Drop duplicates. - // - // Note: `candidates.vec` seems to be on the critical path of the - // compiler. Replacing it with an hash set was also tried, which would - // render the following dedup unnecessary. It led to cleaner code but - // prolonged compiling time of `librustc` from 5m30s to 6m in one test, or - // ~9% performance lost. - if candidates.vec.len() > 1 { - let mut i = 0; - while i < candidates.vec.len() { - let has_dup = (0..i).any(|j| candidates.vec[i] == candidates.vec[j]); - if has_dup { - candidates.vec.swap_remove(i); - } else { - i += 1; - } - } - } - - // Prefer where-clauses. As in select, if there are multiple - // candidates, we prefer where-clause candidates over impls. This - // may seem a bit surprising, since impls are the source of - // "truth" in some sense, but in fact some of the impls that SEEM - // applicable are not, because of nested obligations. Where - // clauses are the safer choice. See the comment on - // `select::SelectionCandidate` and #21974 for more details. - if candidates.vec.len() > 1 { - debug!("retaining param-env candidates only from {:?}", candidates.vec); - candidates.vec.retain(|c| match *c { - ProjectionTyCandidate::ParamEnv(..) => true, - ProjectionTyCandidate::Impl(..) | - ProjectionTyCandidate::Closure(..) | - ProjectionTyCandidate::TraitDef(..) | - ProjectionTyCandidate::FnPointer(..) => false, - }); - debug!("resulting candidate set: {:?}", candidates.vec); - if candidates.vec.len() != 1 { - return Err(ProjectionTyError::TooManyCandidates); - } - } - - assert!(candidates.vec.len() <= 1); - - match candidates.vec.pop() { - Some(candidate) => { - let (ty, obligations) = confirm_candidate(selcx, obligation, candidate); - Ok(ProjectedTy::Progress(ty, obligations)) - } - None => { - Ok(ProjectedTy::NoProgress(selcx.tcx().mk_projection( - obligation.predicate.trait_ref.clone(), - obligation.predicate.item_name))) - } - } -} - -/// The first thing we have to do is scan through the parameter -/// environment to see whether there are any projection predicates -/// there that can answer this question. -fn assemble_candidates_from_param_env<'cx,'tcx>( - selcx: &mut SelectionContext<'cx,'tcx>, - obligation: &ProjectionTyObligation<'tcx>, - obligation_trait_ref: &ty::TraitRef<'tcx>, - candidate_set: &mut ProjectionTyCandidateSet<'tcx>) -{ - debug!("assemble_candidates_from_param_env(..)"); - let env_predicates = selcx.param_env().caller_bounds.iter().cloned(); - assemble_candidates_from_predicates(selcx, - obligation, - obligation_trait_ref, - candidate_set, - ProjectionTyCandidate::ParamEnv, - env_predicates); -} - -/// In the case of a nested projection like <::FooT as Bar>::BarT, we may find -/// that the definition of `Foo` has some clues: -/// -/// ``` -/// trait Foo { -/// type FooT : Bar -/// } -/// ``` -/// -/// Here, for example, we could conclude that the result is `i32`. -fn assemble_candidates_from_trait_def<'cx,'tcx>( - selcx: &mut SelectionContext<'cx,'tcx>, - obligation: &ProjectionTyObligation<'tcx>, - obligation_trait_ref: &ty::TraitRef<'tcx>, - candidate_set: &mut ProjectionTyCandidateSet<'tcx>) -{ - debug!("assemble_candidates_from_trait_def(..)"); - - // Check whether the self-type is itself a projection. - let trait_ref = match obligation_trait_ref.self_ty().sty { - ty::TyProjection(ref data) => data.trait_ref.clone(), - ty::TyInfer(ty::TyVar(_)) => { - // If the self-type is an inference variable, then it MAY wind up - // being a projected type, so induce an ambiguity. - candidate_set.ambiguous = true; - return; - } - _ => { return; } - }; - - // If so, extract what we know from the trait and try to come up with a good answer. - let trait_predicates = selcx.tcx().lookup_predicates(trait_ref.def_id); - let bounds = trait_predicates.instantiate(selcx.tcx(), trait_ref.substs); - let bounds = elaborate_predicates(selcx.tcx(), bounds.predicates.into_vec()); - assemble_candidates_from_predicates(selcx, - obligation, - obligation_trait_ref, - candidate_set, - ProjectionTyCandidate::TraitDef, - bounds) -} - -fn assemble_candidates_from_predicates<'cx,'tcx,I>( - selcx: &mut SelectionContext<'cx,'tcx>, - obligation: &ProjectionTyObligation<'tcx>, - obligation_trait_ref: &ty::TraitRef<'tcx>, - candidate_set: &mut ProjectionTyCandidateSet<'tcx>, - ctor: fn(ty::PolyProjectionPredicate<'tcx>) -> ProjectionTyCandidate<'tcx>, - env_predicates: I) - where I: Iterator> -{ - debug!("assemble_candidates_from_predicates(obligation={:?})", - obligation); - let infcx = selcx.infcx(); - for predicate in env_predicates { - debug!("assemble_candidates_from_predicates: predicate={:?}", - predicate); - match predicate { - ty::Predicate::Projection(ref data) => { - let same_name = data.item_name() == obligation.predicate.item_name; - - let is_match = same_name && infcx.probe(|_| { - let origin = TypeOrigin::Misc(obligation.cause.span); - let data_poly_trait_ref = - data.to_poly_trait_ref(); - let obligation_poly_trait_ref = - obligation_trait_ref.to_poly_trait_ref(); - infcx.sub_poly_trait_refs(false, - origin, - data_poly_trait_ref, - obligation_poly_trait_ref).is_ok() - }); - - debug!("assemble_candidates_from_predicates: candidate={:?} \ - is_match={} same_name={}", - data, is_match, same_name); - - if is_match { - candidate_set.vec.push(ctor(data.clone())); - } - } - _ => { } - } - } -} - -fn assemble_candidates_from_object_type<'cx,'tcx>( - selcx: &mut SelectionContext<'cx,'tcx>, - obligation: &ProjectionTyObligation<'tcx>, - obligation_trait_ref: &ty::TraitRef<'tcx>, - candidate_set: &mut ProjectionTyCandidateSet<'tcx>) -{ - let self_ty = obligation_trait_ref.self_ty(); - let object_ty = selcx.infcx().shallow_resolve(self_ty); - debug!("assemble_candidates_from_object_type(object_ty={:?})", - object_ty); - let data = match object_ty.sty { - ty::TyTrait(ref data) => data, - _ => { - selcx.tcx().sess.span_bug( - obligation.cause.span, - &format!("assemble_candidates_from_object_type called with non-object: {:?}", - object_ty)); - } - }; - let projection_bounds = data.projection_bounds_with_self_ty(selcx.tcx(), object_ty); - let env_predicates = projection_bounds.iter() - .map(|p| p.to_predicate()) - .collect(); - let env_predicates = elaborate_predicates(selcx.tcx(), env_predicates); - assemble_candidates_from_predicates(selcx, - obligation, - obligation_trait_ref, - candidate_set, - ProjectionTyCandidate::ParamEnv, - env_predicates) -} - -fn assemble_candidates_from_impls<'cx,'tcx>( - selcx: &mut SelectionContext<'cx,'tcx>, - obligation: &ProjectionTyObligation<'tcx>, - obligation_trait_ref: &ty::TraitRef<'tcx>, - candidate_set: &mut ProjectionTyCandidateSet<'tcx>) - -> Result<(), SelectionError<'tcx>> -{ - // If we are resolving `>::Item == Type`, - // start out by selecting the predicate `T as TraitRef<...>`: - let poly_trait_ref = obligation_trait_ref.to_poly_trait_ref(); - let trait_obligation = obligation.with(poly_trait_ref.to_poly_trait_predicate()); - let vtable = match selcx.select(&trait_obligation) { - Ok(Some(vtable)) => vtable, - Ok(None) => { - candidate_set.ambiguous = true; - return Ok(()); - } - Err(e) => { - debug!("assemble_candidates_from_impls: selection error {:?}", - e); - return Err(e); - } - }; - - match vtable { - super::VtableImpl(data) => { - debug!("assemble_candidates_from_impls: impl candidate {:?}", - data); - - candidate_set.vec.push( - ProjectionTyCandidate::Impl(data)); - } - super::VtableObject(_) => { - assemble_candidates_from_object_type( - selcx, obligation, obligation_trait_ref, candidate_set); - } - super::VtableClosure(data) => { - candidate_set.vec.push( - ProjectionTyCandidate::Closure(data)); - } - super::VtableFnPointer(fn_type) => { - candidate_set.vec.push( - ProjectionTyCandidate::FnPointer(fn_type)); - } - super::VtableParam(..) => { - // This case tell us nothing about the value of an - // associated type. Consider: - // - // ``` - // trait SomeTrait { type Foo; } - // fn foo(...) { } - // ``` - // - // If the user writes `::Foo`, then the `T - // : SomeTrait` binding does not help us decide what the - // type `Foo` is (at least, not more specifically than - // what we already knew). - // - // But wait, you say! What about an example like this: - // - // ``` - // fn bar>(...) { ... } - // ``` - // - // Doesn't the `T : Sometrait` predicate help - // resolve `T::Foo`? And of course it does, but in fact - // that single predicate is desugared into two predicates - // in the compiler: a trait predicate (`T : SomeTrait`) and a - // projection. And the projection where clause is handled - // in `assemble_candidates_from_param_env`. - } - super::VtableDefaultImpl(..) | - super::VtableBuiltin(..) => { - // These traits have no associated types. - selcx.tcx().sess.span_bug( - obligation.cause.span, - &format!("Cannot project an associated type from `{:?}`", - vtable)); - } - } - - Ok(()) -} - -fn confirm_candidate<'cx,'tcx>( - selcx: &mut SelectionContext<'cx,'tcx>, - obligation: &ProjectionTyObligation<'tcx>, - candidate: ProjectionTyCandidate<'tcx>) - -> (Ty<'tcx>, Vec>) -{ - debug!("confirm_candidate(candidate={:?}, obligation={:?})", - candidate, - obligation); - - match candidate { - ProjectionTyCandidate::ParamEnv(poly_projection) | - ProjectionTyCandidate::TraitDef(poly_projection) => { - confirm_param_env_candidate(selcx, obligation, poly_projection) - } - - ProjectionTyCandidate::Impl(impl_vtable) => { - confirm_impl_candidate(selcx, obligation, impl_vtable) - } - - ProjectionTyCandidate::Closure(closure_vtable) => { - confirm_closure_candidate(selcx, obligation, closure_vtable) - } - - ProjectionTyCandidate::FnPointer(fn_type) => { - confirm_fn_pointer_candidate(selcx, obligation, fn_type) - } - } -} - -fn confirm_fn_pointer_candidate<'cx,'tcx>( - selcx: &mut SelectionContext<'cx,'tcx>, - obligation: &ProjectionTyObligation<'tcx>, - fn_type: Ty<'tcx>) - -> (Ty<'tcx>, Vec>) -{ - let fn_type = selcx.infcx().shallow_resolve(fn_type); - let sig = fn_type.fn_sig(); - confirm_callable_candidate(selcx, obligation, sig, util::TupleArgumentsFlag::Yes) -} - -fn confirm_closure_candidate<'cx,'tcx>( - selcx: &mut SelectionContext<'cx,'tcx>, - obligation: &ProjectionTyObligation<'tcx>, - vtable: VtableClosureData<'tcx, PredicateObligation<'tcx>>) - -> (Ty<'tcx>, Vec>) -{ - let closure_typer = selcx.closure_typer(); - let closure_type = closure_typer.closure_type(vtable.closure_def_id, &vtable.substs); - let Normalized { - value: closure_type, - mut obligations - } = normalize_with_depth(selcx, - obligation.cause.clone(), - obligation.recursion_depth+1, - &closure_type); - let (ty, mut cc_obligations) = confirm_callable_candidate(selcx, - obligation, - &closure_type.sig, - util::TupleArgumentsFlag::No); - obligations.append(&mut cc_obligations); - (ty, obligations) -} - -fn confirm_callable_candidate<'cx,'tcx>( - selcx: &mut SelectionContext<'cx,'tcx>, - obligation: &ProjectionTyObligation<'tcx>, - fn_sig: &ty::PolyFnSig<'tcx>, - flag: util::TupleArgumentsFlag) - -> (Ty<'tcx>, Vec>) -{ - let tcx = selcx.tcx(); - - debug!("confirm_callable_candidate({:?},{:?})", - obligation, - fn_sig); - - // the `Output` associated type is declared on `FnOnce` - let fn_once_def_id = tcx.lang_items.fn_once_trait().unwrap(); - - // Note: we unwrap the binder here but re-create it below (1) - let ty::Binder((trait_ref, ret_type)) = - util::closure_trait_ref_and_return_type(tcx, - fn_once_def_id, - obligation.predicate.trait_ref.self_ty(), - fn_sig, - flag); - - let predicate = ty::Binder(ty::ProjectionPredicate { // (1) recreate binder here - projection_ty: ty::ProjectionTy { - trait_ref: trait_ref, - item_name: token::intern(FN_OUTPUT_NAME), - }, - ty: ret_type - }); - - confirm_param_env_candidate(selcx, obligation, predicate) -} - -fn confirm_param_env_candidate<'cx,'tcx>( - selcx: &mut SelectionContext<'cx,'tcx>, - obligation: &ProjectionTyObligation<'tcx>, - poly_projection: ty::PolyProjectionPredicate<'tcx>) - -> (Ty<'tcx>, Vec>) -{ - let infcx = selcx.infcx(); - - let projection = - infcx.replace_late_bound_regions_with_fresh_var( - obligation.cause.span, - infer::LateBoundRegionConversionTime::HigherRankedType, - &poly_projection).0; - - assert_eq!(projection.projection_ty.item_name, - obligation.predicate.item_name); - - let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span); - match infcx.eq_trait_refs(false, - origin, - obligation.predicate.trait_ref.clone(), - projection.projection_ty.trait_ref.clone()) { - Ok(()) => { } - Err(e) => { - selcx.tcx().sess.span_bug( - obligation.cause.span, - &format!("Failed to unify `{:?}` and `{:?}` in projection: {}", - obligation, - projection, - e)); - } - } - - (projection.ty, vec!()) -} - -fn confirm_impl_candidate<'cx,'tcx>( - selcx: &mut SelectionContext<'cx,'tcx>, - obligation: &ProjectionTyObligation<'tcx>, - impl_vtable: VtableImplData<'tcx, PredicateObligation<'tcx>>) - -> (Ty<'tcx>, Vec>) -{ - // there don't seem to be nicer accessors to these: - let impl_or_trait_items_map = selcx.tcx().impl_or_trait_items.borrow(); - - // Look for the associated type in the impl - for impl_item in &selcx.tcx().impl_items.borrow()[&impl_vtable.impl_def_id] { - if let ty::TypeTraitItem(ref assoc_ty) = impl_or_trait_items_map[&impl_item.def_id()] { - if assoc_ty.name == obligation.predicate.item_name { - return (assoc_ty.ty.unwrap().subst(selcx.tcx(), &impl_vtable.substs), - impl_vtable.nested); - } - } - } - - // It is not in the impl - get the default from the trait. - let trait_ref = obligation.predicate.trait_ref; - for trait_item in selcx.tcx().trait_items(trait_ref.def_id).iter() { - if let &ty::TypeTraitItem(ref assoc_ty) = trait_item { - if assoc_ty.name == obligation.predicate.item_name { - if let Some(ty) = assoc_ty.ty { - return (ty.subst(selcx.tcx(), trait_ref.substs), - impl_vtable.nested); - } else { - // This means that the impl is missing a - // definition for the associated type. This error - // ought to be reported by the type checker method - // `check_impl_items_against_trait`, so here we - // just return TyError. - debug!("confirm_impl_candidate: no associated type {:?} for {:?}", - assoc_ty.name, - trait_ref); - return (selcx.tcx().types.err, vec!()); - } - } - } - } - - selcx.tcx().sess.span_bug(obligation.cause.span, - &format!("No associated type for {:?}", - trait_ref)); -} diff --git a/src/librustc/middle/traits/select.rs b/src/librustc/middle/traits/select.rs deleted file mode 100644 index 75992b6849b01..0000000000000 --- a/src/librustc/middle/traits/select.rs +++ /dev/null @@ -1,3004 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! See `README.md` for high-level documentation - -pub use self::MethodMatchResult::*; -pub use self::MethodMatchedData::*; -use self::SelectionCandidate::*; -use self::BuiltinBoundConditions::*; -use self::EvaluationResult::*; - -use super::coherence; -use super::DerivedObligationCause; -use super::project; -use super::project::{normalize_with_depth, Normalized}; -use super::{PredicateObligation, TraitObligation, ObligationCause}; -use super::report_overflow_error; -use super::{ObligationCauseCode, BuiltinDerivedObligation, ImplDerivedObligation}; -use super::{SelectionError, Unimplemented, OutputTypeParameterMismatch}; -use super::{ObjectCastObligation, Obligation}; -use super::TraitNotObjectSafe; -use super::Selection; -use super::SelectionResult; -use super::{VtableBuiltin, VtableImpl, VtableParam, VtableClosure, - VtableFnPointer, VtableObject, VtableDefaultImpl}; -use super::{VtableImplData, VtableObjectData, VtableBuiltinData, - VtableClosureData, VtableDefaultImplData}; -use super::object_safety; -use super::util; - -use middle::def_id::DefId; -use middle::infer; -use middle::infer::{InferCtxt, TypeFreshener, TypeOrigin}; -use middle::subst::{Subst, Substs, TypeSpace}; -use middle::ty::{self, ToPredicate, ToPolyTraitRef, Ty, TypeFoldable}; -use middle::ty::fast_reject; -use middle::ty::relate::TypeRelation; - -use std::cell::RefCell; -use std::fmt; -use std::rc::Rc; -use syntax::abi; -use rustc_front::hir; -use util::common::ErrorReported; -use util::nodemap::FnvHashMap; - -pub struct SelectionContext<'cx, 'tcx:'cx> { - infcx: &'cx InferCtxt<'cx, 'tcx>, - - /// Freshener used specifically for skolemizing entries on the - /// obligation stack. This ensures that all entries on the stack - /// at one time will have the same set of skolemized entries, - /// which is important for checking for trait bounds that - /// recursively require themselves. - freshener: TypeFreshener<'cx, 'tcx>, - - /// If true, indicates that the evaluation should be conservative - /// and consider the possibility of types outside this crate. - /// This comes up primarily when resolving ambiguity. Imagine - /// there is some trait reference `$0 : Bar` where `$0` is an - /// inference variable. If `intercrate` is true, then we can never - /// say for sure that this reference is not implemented, even if - /// there are *no impls at all for `Bar`*, because `$0` could be - /// bound to some type that in a downstream crate that implements - /// `Bar`. This is the suitable mode for coherence. Elsewhere, - /// though, we set this to false, because we are only interested - /// in types that the user could actually have written --- in - /// other words, we consider `$0 : Bar` to be unimplemented if - /// there is no type that the user could *actually name* that - /// would satisfy it. This avoids crippling inference, basically. - - intercrate: bool, -} - -// A stack that walks back up the stack frame. -struct TraitObligationStack<'prev, 'tcx: 'prev> { - obligation: &'prev TraitObligation<'tcx>, - - /// Trait ref from `obligation` but skolemized with the - /// selection-context's freshener. Used to check for recursion. - fresh_trait_ref: ty::PolyTraitRef<'tcx>, - - previous: TraitObligationStackList<'prev, 'tcx>, -} - -#[derive(Clone)] -pub struct SelectionCache<'tcx> { - hashmap: RefCell, - SelectionResult<'tcx, SelectionCandidate<'tcx>>>>, -} - -pub enum MethodMatchResult { - MethodMatched(MethodMatchedData), - MethodAmbiguous(/* list of impls that could apply */ Vec), - MethodDidNotMatch, -} - -#[derive(Copy, Clone, Debug)] -pub enum MethodMatchedData { - // In the case of a precise match, we don't really need to store - // how the match was found. So don't. - PreciseMethodMatch, - - // In the case of a coercion, we need to know the precise impl so - // that we can determine the type to which things were coerced. - CoerciveMethodMatch(/* impl we matched */ DefId) -} - -/// The selection process begins by considering all impls, where -/// clauses, and so forth that might resolve an obligation. Sometimes -/// we'll be able to say definitively that (e.g.) an impl does not -/// apply to the obligation: perhaps it is defined for `usize` but the -/// obligation is for `int`. In that case, we drop the impl out of the -/// list. But the other cases are considered *candidates*. -/// -/// For selection to succeed, there must be exactly one matching -/// candidate. If the obligation is fully known, this is guaranteed -/// by coherence. However, if the obligation contains type parameters -/// or variables, there may be multiple such impls. -/// -/// It is not a real problem if multiple matching impls exist because -/// of type variables - it just means the obligation isn't sufficiently -/// elaborated. In that case we report an ambiguity, and the caller can -/// try again after more type information has been gathered or report a -/// "type annotations required" error. -/// -/// However, with type parameters, this can be a real problem - type -/// parameters don't unify with regular types, but they *can* unify -/// with variables from blanket impls, and (unless we know its bounds -/// will always be satisfied) picking the blanket impl will be wrong -/// for at least *some* substitutions. To make this concrete, if we have -/// -/// trait AsDebug { type Out : fmt::Debug; fn debug(self) -> Self::Out; } -/// impl AsDebug for T { -/// type Out = T; -/// fn debug(self) -> fmt::Debug { self } -/// } -/// fn foo(t: T) { println!("{:?}", ::debug(t)); } -/// -/// we can't just use the impl to resolve the obligation -/// - a type from another crate (that doesn't implement fmt::Debug) could -/// implement AsDebug. -/// -/// Because where-clauses match the type exactly, multiple clauses can -/// only match if there are unresolved variables, and we can mostly just -/// report this ambiguity in that case. This is still a problem - we can't -/// *do anything* with ambiguities that involve only regions. This is issue -/// #21974. -/// -/// If a single where-clause matches and there are no inference -/// variables left, then it definitely matches and we can just select -/// it. -/// -/// In fact, we even select the where-clause when the obligation contains -/// inference variables. The can lead to inference making "leaps of logic", -/// for example in this situation: -/// -/// pub trait Foo { fn foo(&self) -> T; } -/// impl Foo<()> for T { fn foo(&self) { } } -/// impl Foo for bool { fn foo(&self) -> bool { *self } } -/// -/// pub fn foo(t: T) where T: Foo { -/// println!("{:?}", >::foo(&t)); -/// } -/// fn main() { foo(false); } -/// -/// Here the obligation > can be matched by both the blanket -/// impl and the where-clause. We select the where-clause and unify $0=bool, -/// so the program prints "false". However, if the where-clause is omitted, -/// the blanket impl is selected, we unify $0=(), and the program prints -/// "()". -/// -/// Exactly the same issues apply to projection and object candidates, except -/// that we can have both a projection candidate and a where-clause candidate -/// for the same obligation. In that case either would do (except that -/// different "leaps of logic" would occur if inference variables are -/// present), and we just pick the where-clause. This is, for example, -/// required for associated types to work in default impls, as the bounds -/// are visible both as projection bounds and as where-clauses from the -/// parameter environment. -#[derive(PartialEq,Eq,Debug,Clone)] -enum SelectionCandidate<'tcx> { - BuiltinCandidate(ty::BuiltinBound), - ParamCandidate(ty::PolyTraitRef<'tcx>), - ImplCandidate(DefId), - DefaultImplCandidate(DefId), - DefaultImplObjectCandidate(DefId), - - /// This is a trait matching with a projected type as `Self`, and - /// we found an applicable bound in the trait definition. - ProjectionCandidate, - - /// Implementation of a `Fn`-family trait by one of the - /// anonymous types generated for a `||` expression. - ClosureCandidate(/* closure */ DefId, &'tcx ty::ClosureSubsts<'tcx>), - - /// Implementation of a `Fn`-family trait by one of the anonymous - /// types generated for a fn pointer type (e.g., `fn(int)->int`) - FnPointerCandidate, - - ObjectCandidate, - - BuiltinObjectCandidate, - - BuiltinUnsizeCandidate, -} - -struct SelectionCandidateSet<'tcx> { - // a list of candidates that definitely apply to the current - // obligation (meaning: types unify). - vec: Vec>, - - // if this is true, then there were candidates that might or might - // not have applied, but we couldn't tell. This occurs when some - // of the input types are type variables, in which case there are - // various "builtin" rules that might or might not trigger. - ambiguous: bool, -} - -enum BuiltinBoundConditions<'tcx> { - If(ty::Binder>>), - ParameterBuiltin, - AmbiguousBuiltin -} - -#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq)] -/// The result of trait evaluation. The order is important -/// here as the evaluation of a list is the maximum of the -/// evaluations. -enum EvaluationResult { - /// Evaluation successful - EvaluatedToOk, - /// Evaluation failed because of recursion - treated as ambiguous - EvaluatedToUnknown, - /// Evaluation is known to be ambiguous - EvaluatedToAmbig, - /// Evaluation failed - EvaluatedToErr, -} - -#[derive(Clone)] -pub struct EvaluationCache<'tcx> { - hashmap: RefCell, EvaluationResult>> -} - -impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { - pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>) - -> SelectionContext<'cx, 'tcx> { - SelectionContext { - infcx: infcx, - freshener: infcx.freshener(), - intercrate: false, - } - } - - pub fn intercrate(infcx: &'cx InferCtxt<'cx, 'tcx>) - -> SelectionContext<'cx, 'tcx> { - SelectionContext { - infcx: infcx, - freshener: infcx.freshener(), - intercrate: true, - } - } - - pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'tcx> { - self.infcx - } - - pub fn tcx(&self) -> &'cx ty::ctxt<'tcx> { - self.infcx.tcx - } - - pub fn param_env(&self) -> &'cx ty::ParameterEnvironment<'cx, 'tcx> { - self.infcx.param_env() - } - - pub fn closure_typer(&self) -> &'cx InferCtxt<'cx, 'tcx> { - self.infcx - } - - /////////////////////////////////////////////////////////////////////////// - // Selection - // - // The selection phase tries to identify *how* an obligation will - // be resolved. For example, it will identify which impl or - // parameter bound is to be used. The process can be inconclusive - // if the self type in the obligation is not fully inferred. Selection - // can result in an error in one of two ways: - // - // 1. If no applicable impl or parameter bound can be found. - // 2. If the output type parameters in the obligation do not match - // those specified by the impl/bound. For example, if the obligation - // is `Vec:Iterable`, but the impl specifies - // `impl Iterable for Vec`, than an error would result. - - /// Attempts to satisfy the obligation. If successful, this will affect the surrounding - /// type environment by performing unification. - pub fn select(&mut self, obligation: &TraitObligation<'tcx>) - -> SelectionResult<'tcx, Selection<'tcx>> { - debug!("select({:?})", obligation); - assert!(!obligation.predicate.has_escaping_regions()); - - let dep_node = obligation.dep_node(self.tcx()); - let _task = self.tcx().dep_graph.in_task(dep_node); - - let stack = self.push_stack(TraitObligationStackList::empty(), obligation); - match try!(self.candidate_from_obligation(&stack)) { - None => { - self.consider_unification_despite_ambiguity(obligation); - Ok(None) - } - Some(candidate) => Ok(Some(try!(self.confirm_candidate(obligation, candidate)))), - } - } - - /// In the particular case of unboxed closure obligations, we can - /// sometimes do some amount of unification for the - /// argument/return types even though we can't yet fully match obligation. - /// The particular case we are interesting in is an obligation of the form: - /// - /// C : FnFoo - /// - /// where `C` is an unboxed closure type and `FnFoo` is one of the - /// `Fn` traits. Because we know that users cannot write impls for closure types - /// themselves, the only way that `C : FnFoo` can fail to match is under two - /// conditions: - /// - /// 1. The closure kind for `C` is not yet known, because inference isn't complete. - /// 2. The closure kind for `C` *is* known, but doesn't match what is needed. - /// For example, `C` may be a `FnOnce` closure, but a `Fn` closure is needed. - /// - /// In either case, we always know what argument types are - /// expected by `C`, no matter what kind of `Fn` trait it - /// eventually matches. So we can go ahead and unify the argument - /// types, even though the end result is ambiguous. - /// - /// Note that this is safe *even if* the trait would never be - /// matched (case 2 above). After all, in that case, an error will - /// result, so it kind of doesn't matter what we do --- unifying - /// the argument types can only be helpful to the user, because - /// once they patch up the kind of closure that is expected, the - /// argment types won't really change. - fn consider_unification_despite_ambiguity(&mut self, obligation: &TraitObligation<'tcx>) { - // Is this a `C : FnFoo(...)` trait reference for some trait binding `FnFoo`? - match self.tcx().lang_items.fn_trait_kind(obligation.predicate.0.def_id()) { - Some(_) => { } - None => { return; } - } - - // Is the self-type a closure type? We ignore bindings here - // because if it is a closure type, it must be a closure type from - // within this current fn, and hence none of the higher-ranked - // lifetimes can appear inside the self-type. - let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); - let (closure_def_id, substs) = match self_ty.sty { - ty::TyClosure(id, ref substs) => (id, substs), - _ => { return; } - }; - assert!(!substs.has_escaping_regions()); - - // It is OK to call the unnormalized variant here - this is only - // reached for TyClosure: Fn inputs where the closure kind is - // still unknown, which should only occur in typeck where the - // closure type is already normalized. - let closure_trait_ref = self.closure_trait_ref_unnormalized(obligation, - closure_def_id, - substs); - - match self.confirm_poly_trait_refs(obligation.cause.clone(), - obligation.predicate.to_poly_trait_ref(), - closure_trait_ref) { - Ok(()) => { } - Err(_) => { /* Silently ignore errors. */ } - } - } - - /////////////////////////////////////////////////////////////////////////// - // EVALUATION - // - // Tests whether an obligation can be selected or whether an impl - // can be applied to particular types. It skips the "confirmation" - // step and hence completely ignores output type parameters. - // - // The result is "true" if the obligation *may* hold and "false" if - // we can be sure it does not. - - - /// Evaluates whether the obligation `obligation` can be satisfied (by any means). - pub fn evaluate_obligation(&mut self, - obligation: &PredicateObligation<'tcx>) - -> bool - { - debug!("evaluate_obligation({:?})", - obligation); - - self.infcx.probe(|_| { - self.evaluate_predicate_recursively(TraitObligationStackList::empty(), obligation) - .may_apply() - }) - } - - /// Evaluates whether the obligation `obligation` can be satisfied, - /// and returns `false` if not certain. However, this is not entirely - /// accurate if inference variables are involved. - pub fn evaluate_obligation_conservatively(&mut self, - obligation: &PredicateObligation<'tcx>) - -> bool - { - debug!("evaluate_obligation_conservatively({:?})", - obligation); - - self.infcx.probe(|_| { - self.evaluate_predicate_recursively(TraitObligationStackList::empty(), obligation) - == EvaluatedToOk - }) - } - - /// Evaluates the predicates in `predicates` recursively. Note that - /// this applies projections in the predicates, and therefore - /// is run within an inference probe. - fn evaluate_predicates_recursively<'a,'o,I>(&mut self, - stack: TraitObligationStackList<'o, 'tcx>, - predicates: I) - -> EvaluationResult - where I : Iterator>, 'tcx:'a - { - let mut result = EvaluatedToOk; - for obligation in predicates { - let eval = self.evaluate_predicate_recursively(stack, obligation); - debug!("evaluate_predicate_recursively({:?}) = {:?}", - obligation, eval); - match eval { - EvaluatedToErr => { return EvaluatedToErr; } - EvaluatedToAmbig => { result = EvaluatedToAmbig; } - EvaluatedToUnknown => { - if result < EvaluatedToUnknown { - result = EvaluatedToUnknown; - } - } - EvaluatedToOk => { } - } - } - result - } - - fn evaluate_predicate_recursively<'o>(&mut self, - previous_stack: TraitObligationStackList<'o, 'tcx>, - obligation: &PredicateObligation<'tcx>) - -> EvaluationResult - { - debug!("evaluate_predicate_recursively({:?})", - obligation); - - // Check the cache from the tcx of predicates that we know - // have been proven elsewhere. This cache only contains - // predicates that are global in scope and hence unaffected by - // the current environment. - if self.tcx().fulfilled_predicates.borrow().is_duplicate(&obligation.predicate) { - return EvaluatedToOk; - } - - match obligation.predicate { - ty::Predicate::Trait(ref t) => { - assert!(!t.has_escaping_regions()); - let obligation = obligation.with(t.clone()); - self.evaluate_obligation_recursively(previous_stack, &obligation) - } - - ty::Predicate::Equate(ref p) => { - // does this code ever run? - match self.infcx.equality_predicate(obligation.cause.span, p) { - Ok(()) => EvaluatedToOk, - Err(_) => EvaluatedToErr - } - } - - ty::Predicate::WellFormed(ty) => { - match ty::wf::obligations(self.infcx, obligation.cause.body_id, - ty, obligation.cause.span) { - Some(obligations) => - self.evaluate_predicates_recursively(previous_stack, obligations.iter()), - None => - EvaluatedToAmbig, - } - } - - ty::Predicate::TypeOutlives(..) | ty::Predicate::RegionOutlives(..) => { - // we do not consider region relationships when - // evaluating trait matches - EvaluatedToOk - } - - ty::Predicate::ObjectSafe(trait_def_id) => { - if object_safety::is_object_safe(self.tcx(), trait_def_id) { - EvaluatedToOk - } else { - EvaluatedToErr - } - } - - ty::Predicate::Projection(ref data) => { - let project_obligation = obligation.with(data.clone()); - match project::poly_project_and_unify_type(self, &project_obligation) { - Ok(Some(subobligations)) => { - self.evaluate_predicates_recursively(previous_stack, - subobligations.iter()) - } - Ok(None) => { - EvaluatedToAmbig - } - Err(_) => { - EvaluatedToErr - } - } - } - } - } - - fn evaluate_obligation_recursively<'o>(&mut self, - previous_stack: TraitObligationStackList<'o, 'tcx>, - obligation: &TraitObligation<'tcx>) - -> EvaluationResult - { - debug!("evaluate_obligation_recursively({:?})", - obligation); - - let stack = self.push_stack(previous_stack, obligation); - let fresh_trait_ref = stack.fresh_trait_ref; - if let Some(result) = self.check_evaluation_cache(fresh_trait_ref) { - debug!("CACHE HIT: EVAL({:?})={:?}", - fresh_trait_ref, - result); - return result; - } - - let result = self.evaluate_stack(&stack); - - debug!("CACHE MISS: EVAL({:?})={:?}", - fresh_trait_ref, - result); - self.insert_evaluation_cache(fresh_trait_ref, result); - - result - } - - fn evaluate_stack<'o>(&mut self, - stack: &TraitObligationStack<'o, 'tcx>) - -> EvaluationResult - { - // In intercrate mode, whenever any of the types are unbound, - // there can always be an impl. Even if there are no impls in - // this crate, perhaps the type would be unified with - // something from another crate that does provide an impl. - // - // In intracrate mode, we must still be conservative. The reason is - // that we want to avoid cycles. Imagine an impl like: - // - // impl Eq for Vec - // - // and a trait reference like `$0 : Eq` where `$0` is an - // unbound variable. When we evaluate this trait-reference, we - // will unify `$0` with `Vec<$1>` (for some fresh variable - // `$1`), on the condition that `$1 : Eq`. We will then wind - // up with many candidates (since that are other `Eq` impls - // that apply) and try to winnow things down. This results in - // a recursive evaluation that `$1 : Eq` -- as you can - // imagine, this is just where we started. To avoid that, we - // check for unbound variables and return an ambiguous (hence possible) - // match if we've seen this trait before. - // - // This suffices to allow chains like `FnMut` implemented in - // terms of `Fn` etc, but we could probably make this more - // precise still. - let input_types = stack.fresh_trait_ref.0.input_types(); - let unbound_input_types = input_types.iter().any(|ty| ty.is_fresh()); - if unbound_input_types && self.intercrate { - debug!("evaluate_stack({:?}) --> unbound argument, intercrate --> ambiguous", - stack.fresh_trait_ref); - return EvaluatedToAmbig; - } - if unbound_input_types && - stack.iter().skip(1).any( - |prev| self.match_fresh_trait_refs(&stack.fresh_trait_ref, - &prev.fresh_trait_ref)) - { - debug!("evaluate_stack({:?}) --> unbound argument, recursive --> giving up", - stack.fresh_trait_ref); - return EvaluatedToUnknown; - } - - // If there is any previous entry on the stack that precisely - // matches this obligation, then we can assume that the - // obligation is satisfied for now (still all other conditions - // must be met of course). One obvious case this comes up is - // marker traits like `Send`. Think of a linked list: - // - // struct List { data: T, next: Option>> { - // - // `Box>` will be `Send` if `T` is `Send` and - // `Option>>` is `Send`, and in turn - // `Option>>` is `Send` if `Box>` is - // `Send`. - // - // Note that we do this comparison using the `fresh_trait_ref` - // fields. Because these have all been skolemized using - // `self.freshener`, we can be sure that (a) this will not - // affect the inferencer state and (b) that if we see two - // skolemized types with the same index, they refer to the - // same unbound type variable. - if - stack.iter() - .skip(1) // skip top-most frame - .any(|prev| stack.fresh_trait_ref == prev.fresh_trait_ref) - { - debug!("evaluate_stack({:?}) --> recursive", - stack.fresh_trait_ref); - return EvaluatedToOk; - } - - match self.candidate_from_obligation(stack) { - Ok(Some(c)) => self.evaluate_candidate(stack, &c), - Ok(None) => EvaluatedToAmbig, - Err(..) => EvaluatedToErr - } - } - - /// Further evaluate `candidate` to decide whether all type parameters match and whether nested - /// obligations are met. Returns true if `candidate` remains viable after this further - /// scrutiny. - fn evaluate_candidate<'o>(&mut self, - stack: &TraitObligationStack<'o, 'tcx>, - candidate: &SelectionCandidate<'tcx>) - -> EvaluationResult - { - debug!("evaluate_candidate: depth={} candidate={:?}", - stack.obligation.recursion_depth, candidate); - let result = self.infcx.probe(|_| { - let candidate = (*candidate).clone(); - match self.confirm_candidate(stack.obligation, candidate) { - Ok(selection) => { - self.evaluate_predicates_recursively( - stack.list(), - selection.nested_obligations().iter()) - } - Err(..) => EvaluatedToErr - } - }); - debug!("evaluate_candidate: depth={} result={:?}", - stack.obligation.recursion_depth, result); - result - } - - fn pick_evaluation_cache(&self) -> &EvaluationCache<'tcx> { - // see comment in `pick_candidate_cache` - if self.intercrate || - !self.param_env().caller_bounds.is_empty() - { - &self.param_env().evaluation_cache - } else - { - &self.tcx().evaluation_cache - } - } - - fn check_evaluation_cache(&self, trait_ref: ty::PolyTraitRef<'tcx>) - -> Option - { - let cache = self.pick_evaluation_cache(); - cache.hashmap.borrow().get(&trait_ref).cloned() - } - - fn insert_evaluation_cache(&mut self, - trait_ref: ty::PolyTraitRef<'tcx>, - result: EvaluationResult) - { - // Avoid caching results that depend on more than just the trait-ref: - // The stack can create EvaluatedToUnknown, and closure signatures - // being yet uninferred can create "spurious" EvaluatedToAmbig - // and EvaluatedToOk. - if result == EvaluatedToUnknown || - ((result == EvaluatedToAmbig || result == EvaluatedToOk) - && trait_ref.has_closure_types()) - { - return; - } - - let cache = self.pick_evaluation_cache(); - cache.hashmap.borrow_mut().insert(trait_ref, result); - } - - /////////////////////////////////////////////////////////////////////////// - // CANDIDATE ASSEMBLY - // - // The selection process begins by examining all in-scope impls, - // caller obligations, and so forth and assembling a list of - // candidates. See `README.md` and the `Candidate` type for more - // details. - - fn candidate_from_obligation<'o>(&mut self, - stack: &TraitObligationStack<'o, 'tcx>) - -> SelectionResult<'tcx, SelectionCandidate<'tcx>> - { - // Watch out for overflow. This intentionally bypasses (and does - // not update) the cache. - let recursion_limit = self.infcx.tcx.sess.recursion_limit.get(); - if stack.obligation.recursion_depth >= recursion_limit { - report_overflow_error(self.infcx(), &stack.obligation, true); - } - - // Check the cache. Note that we skolemize the trait-ref - // separately rather than using `stack.fresh_trait_ref` -- this - // is because we want the unbound variables to be replaced - // with fresh skolemized types starting from index 0. - let cache_fresh_trait_pred = - self.infcx.freshen(stack.obligation.predicate.clone()); - debug!("candidate_from_obligation(cache_fresh_trait_pred={:?}, obligation={:?})", - cache_fresh_trait_pred, - stack); - assert!(!stack.obligation.predicate.has_escaping_regions()); - - match self.check_candidate_cache(&cache_fresh_trait_pred) { - Some(c) => { - debug!("CACHE HIT: SELECT({:?})={:?}", - cache_fresh_trait_pred, - c); - return c; - } - None => { } - } - - // If no match, compute result and insert into cache. - let candidate = self.candidate_from_obligation_no_cache(stack); - - if self.should_update_candidate_cache(&cache_fresh_trait_pred, &candidate) { - debug!("CACHE MISS: SELECT({:?})={:?}", - cache_fresh_trait_pred, candidate); - self.insert_candidate_cache(cache_fresh_trait_pred, candidate.clone()); - } - - candidate - } - - fn candidate_from_obligation_no_cache<'o>(&mut self, - stack: &TraitObligationStack<'o, 'tcx>) - -> SelectionResult<'tcx, SelectionCandidate<'tcx>> - { - if stack.obligation.predicate.references_error() { - // If we encounter a `TyError`, we generally prefer the - // most "optimistic" result in response -- that is, the - // one least likely to report downstream errors. But - // because this routine is shared by coherence and by - // trait selection, there isn't an obvious "right" choice - // here in that respect, so we opt to just return - // ambiguity and let the upstream clients sort it out. - return Ok(None); - } - - if !self.is_knowable(stack) { - debug!("intercrate not knowable"); - return Ok(None); - } - - let candidate_set = try!(self.assemble_candidates(stack)); - - if candidate_set.ambiguous { - debug!("candidate set contains ambig"); - return Ok(None); - } - - let mut candidates = candidate_set.vec; - - debug!("assembled {} candidates for {:?}: {:?}", - candidates.len(), - stack, - candidates); - - // At this point, we know that each of the entries in the - // candidate set is *individually* applicable. Now we have to - // figure out if they contain mutual incompatibilities. This - // frequently arises if we have an unconstrained input type -- - // for example, we are looking for $0:Eq where $0 is some - // unconstrained type variable. In that case, we'll get a - // candidate which assumes $0 == int, one that assumes $0 == - // usize, etc. This spells an ambiguity. - - // If there is more than one candidate, first winnow them down - // by considering extra conditions (nested obligations and so - // forth). We don't winnow if there is exactly one - // candidate. This is a relatively minor distinction but it - // can lead to better inference and error-reporting. An - // example would be if there was an impl: - // - // impl Vec { fn push_clone(...) { ... } } - // - // and we were to see some code `foo.push_clone()` where `boo` - // is a `Vec` and `Bar` does not implement `Clone`. If - // we were to winnow, we'd wind up with zero candidates. - // Instead, we select the right impl now but report `Bar does - // not implement Clone`. - if candidates.len() > 1 { - candidates.retain(|c| self.evaluate_candidate(stack, c).may_apply()) - } - - // If there are STILL multiple candidate, we can further reduce - // the list by dropping duplicates. - if candidates.len() > 1 { - let mut i = 0; - while i < candidates.len() { - let is_dup = - (0..candidates.len()) - .filter(|&j| i != j) - .any(|j| self.candidate_should_be_dropped_in_favor_of(&candidates[i], - &candidates[j])); - if is_dup { - debug!("Dropping candidate #{}/{}: {:?}", - i, candidates.len(), candidates[i]); - candidates.swap_remove(i); - } else { - debug!("Retaining candidate #{}/{}: {:?}", - i, candidates.len(), candidates[i]); - i += 1; - } - } - } - - // If there are *STILL* multiple candidates, give up and - // report ambiguity. - if candidates.len() > 1 { - debug!("multiple matches, ambig"); - return Ok(None); - } - - - // If there are *NO* candidates, that there are no impls -- - // that we know of, anyway. Note that in the case where there - // are unbound type variables within the obligation, it might - // be the case that you could still satisfy the obligation - // from another crate by instantiating the type variables with - // a type from another crate that does have an impl. This case - // is checked for in `evaluate_stack` (and hence users - // who might care about this case, like coherence, should use - // that function). - if candidates.is_empty() { - return Err(Unimplemented); - } - - // Just one candidate left. - let candidate = candidates.pop().unwrap(); - - match candidate { - ImplCandidate(def_id) => { - match self.tcx().trait_impl_polarity(def_id) { - Some(hir::ImplPolarity::Negative) => return Err(Unimplemented), - _ => {} - } - } - _ => {} - } - - Ok(Some(candidate)) - } - - fn is_knowable<'o>(&mut self, - stack: &TraitObligationStack<'o, 'tcx>) - -> bool - { - debug!("is_knowable(intercrate={})", self.intercrate); - - if !self.intercrate { - return true; - } - - let obligation = &stack.obligation; - let predicate = self.infcx().resolve_type_vars_if_possible(&obligation.predicate); - - // ok to skip binder because of the nature of the - // trait-ref-is-knowable check, which does not care about - // bound regions - let trait_ref = &predicate.skip_binder().trait_ref; - - coherence::trait_ref_is_knowable(self.tcx(), trait_ref) - } - - fn pick_candidate_cache(&self) -> &SelectionCache<'tcx> { - // If there are any where-clauses in scope, then we always use - // a cache local to this particular scope. Otherwise, we - // switch to a global cache. We used to try and draw - // finer-grained distinctions, but that led to a serious of - // annoying and weird bugs like #22019 and #18290. This simple - // rule seems to be pretty clearly safe and also still retains - // a very high hit rate (~95% when compiling rustc). - if !self.param_env().caller_bounds.is_empty() { - return &self.param_env().selection_cache; - } - - // Avoid using the master cache during coherence and just rely - // on the local cache. This effectively disables caching - // during coherence. It is really just a simplification to - // avoid us having to fear that coherence results "pollute" - // the master cache. Since coherence executes pretty quickly, - // it's not worth going to more trouble to increase the - // hit-rate I don't think. - if self.intercrate { - return &self.param_env().selection_cache; - } - - // Otherwise, we can use the global cache. - &self.tcx().selection_cache - } - - fn check_candidate_cache(&mut self, - cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>) - -> Option>> - { - let cache = self.pick_candidate_cache(); - let hashmap = cache.hashmap.borrow(); - hashmap.get(&cache_fresh_trait_pred.0.trait_ref).cloned() - } - - fn insert_candidate_cache(&mut self, - cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>, - candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>) - { - let cache = self.pick_candidate_cache(); - let mut hashmap = cache.hashmap.borrow_mut(); - hashmap.insert(cache_fresh_trait_pred.0.trait_ref.clone(), candidate); - } - - fn should_update_candidate_cache(&mut self, - cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>, - candidate: &SelectionResult<'tcx, SelectionCandidate<'tcx>>) - -> bool - { - // In general, it's a good idea to cache results, even - // ambiguous ones, to save us some trouble later. But we have - // to be careful not to cache results that could be - // invalidated later by advances in inference. Normally, this - // is not an issue, because any inference variables whose - // types are not yet bound are "freshened" in the cache key, - // which means that if we later get the same request once that - // type variable IS bound, we'll have a different cache key. - // For example, if we have `Vec<_#0t> : Foo`, and `_#0t` is - // not yet known, we may cache the result as `None`. But if - // later `_#0t` is bound to `Bar`, then when we freshen we'll - // have `Vec : Foo` as the cache key. - // - // HOWEVER, it CAN happen that we get an ambiguity result in - // one particular case around closures where the cache key - // would not change. That is when the precise types of the - // upvars that a closure references have not yet been figured - // out (i.e., because it is not yet known if they are captured - // by ref, and if by ref, what kind of ref). In these cases, - // when matching a builtin bound, we will yield back an - // ambiguous result. But the *cache key* is just the closure type, - // it doesn't capture the state of the upvar computation. - // - // To avoid this trap, just don't cache ambiguous results if - // the self-type contains no inference byproducts (that really - // shouldn't happen in other circumstances anyway, given - // coherence). - - match *candidate { - Ok(Some(_)) | Err(_) => true, - Ok(None) => { - cache_fresh_trait_pred.0.trait_ref.substs.types.has_infer_types() - } - } - } - - fn assemble_candidates<'o>(&mut self, - stack: &TraitObligationStack<'o, 'tcx>) - -> Result, SelectionError<'tcx>> - { - let TraitObligationStack { obligation, .. } = *stack; - let ref obligation = Obligation { - cause: obligation.cause.clone(), - recursion_depth: obligation.recursion_depth, - predicate: self.infcx().resolve_type_vars_if_possible(&obligation.predicate) - }; - - if obligation.predicate.skip_binder().self_ty().is_ty_var() { - // FIXME(#20297): Self is a type variable (e.g. `_: AsRef`). - // - // This is somewhat problematic, as the current scheme can't really - // handle it turning to be a projection. This does end up as truly - // ambiguous in most cases anyway. - // - // Until this is fixed, take the fast path out - this also improves - // performance by preventing assemble_candidates_from_impls from - // matching every impl for this trait. - return Ok(SelectionCandidateSet { vec: vec![], ambiguous: true }); - } - - let mut candidates = SelectionCandidateSet { - vec: Vec::new(), - ambiguous: false - }; - - // Other bounds. Consider both in-scope bounds from fn decl - // and applicable impls. There is a certain set of precedence rules here. - - match self.tcx().lang_items.to_builtin_kind(obligation.predicate.def_id()) { - Some(ty::BoundCopy) => { - debug!("obligation self ty is {:?}", - obligation.predicate.0.self_ty()); - - // User-defined copy impls are permitted, but only for - // structs and enums. - try!(self.assemble_candidates_from_impls(obligation, &mut candidates)); - - // For other types, we'll use the builtin rules. - try!(self.assemble_builtin_bound_candidates(ty::BoundCopy, - obligation, - &mut candidates)); - } - Some(bound @ ty::BoundSized) => { - // Sized is never implementable by end-users, it is - // always automatically computed. - try!(self.assemble_builtin_bound_candidates(bound, - obligation, - &mut candidates)); - } - - None if self.tcx().lang_items.unsize_trait() == - Some(obligation.predicate.def_id()) => { - self.assemble_candidates_for_unsizing(obligation, &mut candidates); - } - - Some(ty::BoundSend) | - Some(ty::BoundSync) | - None => { - try!(self.assemble_closure_candidates(obligation, &mut candidates)); - try!(self.assemble_fn_pointer_candidates(obligation, &mut candidates)); - try!(self.assemble_candidates_from_impls(obligation, &mut candidates)); - self.assemble_candidates_from_object_ty(obligation, &mut candidates); - } - } - - self.assemble_candidates_from_projected_tys(obligation, &mut candidates); - try!(self.assemble_candidates_from_caller_bounds(stack, &mut candidates)); - // Default implementations have lower priority, so we only - // consider triggering a default if there is no other impl that can apply. - if candidates.vec.is_empty() { - try!(self.assemble_candidates_from_default_impls(obligation, &mut candidates)); - } - debug!("candidate list size: {}", candidates.vec.len()); - Ok(candidates) - } - - fn assemble_candidates_from_projected_tys(&mut self, - obligation: &TraitObligation<'tcx>, - candidates: &mut SelectionCandidateSet<'tcx>) - { - debug!("assemble_candidates_for_projected_tys({:?})", obligation); - - // FIXME(#20297) -- just examining the self-type is very simplistic - - // before we go into the whole skolemization thing, just - // quickly check if the self-type is a projection at all. - let trait_def_id = match obligation.predicate.0.trait_ref.self_ty().sty { - ty::TyProjection(ref data) => data.trait_ref.def_id, - ty::TyInfer(ty::TyVar(_)) => { - self.tcx().sess.span_bug(obligation.cause.span, - "Self=_ should have been handled by assemble_candidates"); - } - _ => { return; } - }; - - debug!("assemble_candidates_for_projected_tys: trait_def_id={:?}", - trait_def_id); - - let result = self.infcx.probe(|snapshot| { - self.match_projection_obligation_against_bounds_from_trait(obligation, - snapshot) - }); - - if result { - candidates.vec.push(ProjectionCandidate); - } - } - - fn match_projection_obligation_against_bounds_from_trait( - &mut self, - obligation: &TraitObligation<'tcx>, - snapshot: &infer::CombinedSnapshot) - -> bool - { - let poly_trait_predicate = - self.infcx().resolve_type_vars_if_possible(&obligation.predicate); - let (skol_trait_predicate, skol_map) = - self.infcx().skolemize_late_bound_regions(&poly_trait_predicate, snapshot); - debug!("match_projection_obligation_against_bounds_from_trait: \ - skol_trait_predicate={:?} skol_map={:?}", - skol_trait_predicate, - skol_map); - - let projection_trait_ref = match skol_trait_predicate.trait_ref.self_ty().sty { - ty::TyProjection(ref data) => &data.trait_ref, - _ => { - self.tcx().sess.span_bug( - obligation.cause.span, - &format!("match_projection_obligation_against_bounds_from_trait() called \ - but self-ty not a projection: {:?}", - skol_trait_predicate.trait_ref.self_ty())); - } - }; - debug!("match_projection_obligation_against_bounds_from_trait: \ - projection_trait_ref={:?}", - projection_trait_ref); - - let trait_predicates = self.tcx().lookup_predicates(projection_trait_ref.def_id); - let bounds = trait_predicates.instantiate(self.tcx(), projection_trait_ref.substs); - debug!("match_projection_obligation_against_bounds_from_trait: \ - bounds={:?}", - bounds); - - let matching_bound = - util::elaborate_predicates(self.tcx(), bounds.predicates.into_vec()) - .filter_to_traits() - .find( - |bound| self.infcx.probe( - |_| self.match_projection(obligation, - bound.clone(), - skol_trait_predicate.trait_ref.clone(), - &skol_map, - snapshot))); - - debug!("match_projection_obligation_against_bounds_from_trait: \ - matching_bound={:?}", - matching_bound); - match matching_bound { - None => false, - Some(bound) => { - // Repeat the successful match, if any, this time outside of a probe. - let result = self.match_projection(obligation, - bound, - skol_trait_predicate.trait_ref.clone(), - &skol_map, - snapshot); - assert!(result); - true - } - } - } - - fn match_projection(&mut self, - obligation: &TraitObligation<'tcx>, - trait_bound: ty::PolyTraitRef<'tcx>, - skol_trait_ref: ty::TraitRef<'tcx>, - skol_map: &infer::SkolemizationMap, - snapshot: &infer::CombinedSnapshot) - -> bool - { - assert!(!skol_trait_ref.has_escaping_regions()); - let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span); - match self.infcx.sub_poly_trait_refs(false, - origin, - trait_bound.clone(), - ty::Binder(skol_trait_ref.clone())) { - Ok(()) => { } - Err(_) => { return false; } - } - - self.infcx.leak_check(skol_map, snapshot).is_ok() - } - - /// Given an obligation like ``, search the obligations that the caller - /// supplied to find out whether it is listed among them. - /// - /// Never affects inference environment. - fn assemble_candidates_from_caller_bounds<'o>(&mut self, - stack: &TraitObligationStack<'o, 'tcx>, - candidates: &mut SelectionCandidateSet<'tcx>) - -> Result<(),SelectionError<'tcx>> - { - debug!("assemble_candidates_from_caller_bounds({:?})", - stack.obligation); - - let all_bounds = - self.param_env().caller_bounds - .iter() - .filter_map(|o| o.to_opt_poly_trait_ref()); - - let matching_bounds = - all_bounds.filter( - |bound| self.evaluate_where_clause(stack, bound.clone()).may_apply()); - - let param_candidates = - matching_bounds.map(|bound| ParamCandidate(bound)); - - candidates.vec.extend(param_candidates); - - Ok(()) - } - - fn evaluate_where_clause<'o>(&mut self, - stack: &TraitObligationStack<'o, 'tcx>, - where_clause_trait_ref: ty::PolyTraitRef<'tcx>) - -> EvaluationResult - { - self.infcx().probe(move |_| { - match self.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) { - Ok(obligations) => { - self.evaluate_predicates_recursively(stack.list(), obligations.iter()) - } - Err(()) => EvaluatedToErr - } - }) - } - - /// Check for the artificial impl that the compiler will create for an obligation like `X : - /// FnMut<..>` where `X` is a closure type. - /// - /// Note: the type parameters on a closure candidate are modeled as *output* type - /// parameters and hence do not affect whether this trait is a match or not. They will be - /// unified during the confirmation step. - fn assemble_closure_candidates(&mut self, - obligation: &TraitObligation<'tcx>, - candidates: &mut SelectionCandidateSet<'tcx>) - -> Result<(),SelectionError<'tcx>> - { - let kind = match self.tcx().lang_items.fn_trait_kind(obligation.predicate.0.def_id()) { - Some(k) => k, - None => { return Ok(()); } - }; - - // ok to skip binder because the substs on closure types never - // touch bound regions, they just capture the in-scope - // type/region parameters - let self_ty = *obligation.self_ty().skip_binder(); - let (closure_def_id, substs) = match self_ty.sty { - ty::TyClosure(id, ref substs) => (id, substs), - ty::TyInfer(ty::TyVar(_)) => { - debug!("assemble_unboxed_closure_candidates: ambiguous self-type"); - candidates.ambiguous = true; - return Ok(()); - } - _ => { return Ok(()); } - }; - - debug!("assemble_unboxed_candidates: self_ty={:?} kind={:?} obligation={:?}", - self_ty, - kind, - obligation); - - match self.infcx.closure_kind(closure_def_id) { - Some(closure_kind) => { - debug!("assemble_unboxed_candidates: closure_kind = {:?}", closure_kind); - if closure_kind.extends(kind) { - candidates.vec.push(ClosureCandidate(closure_def_id, substs)); - } - } - None => { - debug!("assemble_unboxed_candidates: closure_kind not yet known"); - candidates.ambiguous = true; - } - } - - Ok(()) - } - - /// Implement one of the `Fn()` family for a fn pointer. - fn assemble_fn_pointer_candidates(&mut self, - obligation: &TraitObligation<'tcx>, - candidates: &mut SelectionCandidateSet<'tcx>) - -> Result<(),SelectionError<'tcx>> - { - // We provide impl of all fn traits for fn pointers. - if self.tcx().lang_items.fn_trait_kind(obligation.predicate.def_id()).is_none() { - return Ok(()); - } - - // ok to skip binder because what we are inspecting doesn't involve bound regions - let self_ty = *obligation.self_ty().skip_binder(); - match self_ty.sty { - ty::TyInfer(ty::TyVar(_)) => { - debug!("assemble_fn_pointer_candidates: ambiguous self-type"); - candidates.ambiguous = true; // could wind up being a fn() type - } - - // provide an impl, but only for suitable `fn` pointers - ty::TyBareFn(_, &ty::BareFnTy { - unsafety: hir::Unsafety::Normal, - abi: abi::Rust, - sig: ty::Binder(ty::FnSig { - inputs: _, - output: ty::FnConverging(_), - variadic: false - }) - }) => { - candidates.vec.push(FnPointerCandidate); - } - - _ => { } - } - - Ok(()) - } - - /// Search for impls that might apply to `obligation`. - fn assemble_candidates_from_impls(&mut self, - obligation: &TraitObligation<'tcx>, - candidates: &mut SelectionCandidateSet<'tcx>) - -> Result<(), SelectionError<'tcx>> - { - debug!("assemble_candidates_from_impls(obligation={:?})", obligation); - - let def = self.tcx().lookup_trait_def(obligation.predicate.def_id()); - - def.for_each_relevant_impl( - self.tcx(), - obligation.predicate.0.trait_ref.self_ty(), - |impl_def_id| { - self.infcx.probe(|snapshot| { - if let Ok(_) = self.match_impl(impl_def_id, obligation, snapshot) { - candidates.vec.push(ImplCandidate(impl_def_id)); - } - }); - } - ); - - Ok(()) - } - - fn assemble_candidates_from_default_impls(&mut self, - obligation: &TraitObligation<'tcx>, - candidates: &mut SelectionCandidateSet<'tcx>) - -> Result<(), SelectionError<'tcx>> - { - // OK to skip binder here because the tests we do below do not involve bound regions - let self_ty = *obligation.self_ty().skip_binder(); - debug!("assemble_candidates_from_default_impls(self_ty={:?})", self_ty); - - let def_id = obligation.predicate.def_id(); - - if self.tcx().trait_has_default_impl(def_id) { - match self_ty.sty { - ty::TyTrait(..) => { - // For object types, we don't know what the closed - // over types are. For most traits, this means we - // conservatively say nothing; a candidate may be - // added by `assemble_candidates_from_object_ty`. - // However, for the kind of magic reflect trait, - // we consider it to be implemented even for - // object types, because it just lets you reflect - // onto the object type, not into the object's - // interior. - if self.tcx().has_attr(def_id, "rustc_reflect_like") { - candidates.vec.push(DefaultImplObjectCandidate(def_id)); - } - } - ty::TyParam(..) | - ty::TyProjection(..) => { - // In these cases, we don't know what the actual - // type is. Therefore, we cannot break it down - // into its constituent types. So we don't - // consider the `..` impl but instead just add no - // candidates: this means that typeck will only - // succeed if there is another reason to believe - // that this obligation holds. That could be a - // where-clause or, in the case of an object type, - // it could be that the object type lists the - // trait (e.g. `Foo+Send : Send`). See - // `compile-fail/typeck-default-trait-impl-send-param.rs` - // for an example of a test case that exercises - // this path. - } - ty::TyInfer(ty::TyVar(_)) => { - // the defaulted impl might apply, we don't know - candidates.ambiguous = true; - } - _ => { - candidates.vec.push(DefaultImplCandidate(def_id.clone())) - } - } - } - - Ok(()) - } - - /// Search for impls that might apply to `obligation`. - fn assemble_candidates_from_object_ty(&mut self, - obligation: &TraitObligation<'tcx>, - candidates: &mut SelectionCandidateSet<'tcx>) - { - debug!("assemble_candidates_from_object_ty(self_ty={:?})", - obligation.self_ty().skip_binder()); - - // Object-safety candidates are only applicable to object-safe - // traits. Including this check is useful because it helps - // inference in cases of traits like `BorrowFrom`, which are - // not object-safe, and which rely on being able to infer the - // self-type from one of the other inputs. Without this check, - // these cases wind up being considered ambiguous due to a - // (spurious) ambiguity introduced here. - let predicate_trait_ref = obligation.predicate.to_poly_trait_ref(); - if !object_safety::is_object_safe(self.tcx(), predicate_trait_ref.def_id()) { - return; - } - - self.infcx.commit_if_ok(|snapshot| { - let (self_ty, _) = - self.infcx().skolemize_late_bound_regions(&obligation.self_ty(), snapshot); - let poly_trait_ref = match self_ty.sty { - ty::TyTrait(ref data) => { - match self.tcx().lang_items.to_builtin_kind(obligation.predicate.def_id()) { - Some(bound @ ty::BoundSend) | Some(bound @ ty::BoundSync) => { - if data.bounds.builtin_bounds.contains(&bound) { - debug!("assemble_candidates_from_object_ty: matched builtin bound, \ - pushing candidate"); - candidates.vec.push(BuiltinObjectCandidate); - return Ok(()); - } - } - _ => {} - } - - data.principal_trait_ref_with_self_ty(self.tcx(), self_ty) - } - ty::TyInfer(ty::TyVar(_)) => { - debug!("assemble_candidates_from_object_ty: ambiguous"); - candidates.ambiguous = true; // could wind up being an object type - return Ok(()); - } - _ => { - return Ok(()); - } - }; - - debug!("assemble_candidates_from_object_ty: poly_trait_ref={:?}", - poly_trait_ref); - - // Count only those upcast versions that match the trait-ref - // we are looking for. Specifically, do not only check for the - // correct trait, but also the correct type parameters. - // For example, we may be trying to upcast `Foo` to `Bar`, - // but `Foo` is declared as `trait Foo : Bar`. - let upcast_trait_refs = - util::supertraits(self.tcx(), poly_trait_ref) - .filter(|upcast_trait_ref| { - self.infcx.probe(|_| { - let upcast_trait_ref = upcast_trait_ref.clone(); - self.match_poly_trait_ref(obligation, upcast_trait_ref).is_ok() - }) - }) - .count(); - - if upcast_trait_refs > 1 { - // can be upcast in many ways; need more type information - candidates.ambiguous = true; - } else if upcast_trait_refs == 1 { - candidates.vec.push(ObjectCandidate); - } - - Ok::<(),()>(()) - }).unwrap(); - } - - /// Search for unsizing that might apply to `obligation`. - fn assemble_candidates_for_unsizing(&mut self, - obligation: &TraitObligation<'tcx>, - candidates: &mut SelectionCandidateSet<'tcx>) { - // We currently never consider higher-ranked obligations e.g. - // `for<'a> &'a T: Unsize` to be implemented. This is not - // because they are a priori invalid, and we could potentially add support - // for them later, it's just that there isn't really a strong need for it. - // A `T: Unsize` obligation is always used as part of a `T: CoerceUnsize` - // impl, and those are generally applied to concrete types. - // - // That said, one might try to write a fn with a where clause like - // for<'a> Foo<'a, T>: Unsize> - // where the `'a` is kind of orthogonal to the relevant part of the `Unsize`. - // Still, you'd be more likely to write that where clause as - // T: Trait - // so it seems ok if we (conservatively) fail to accept that `Unsize` - // obligation above. Should be possible to extend this in the future. - let source = match self.tcx().no_late_bound_regions(&obligation.self_ty()) { - Some(t) => t, - None => { - // Don't add any candidates if there are bound regions. - return; - } - }; - let target = obligation.predicate.0.input_types()[0]; - - debug!("assemble_candidates_for_unsizing(source={:?}, target={:?})", - source, target); - - let may_apply = match (&source.sty, &target.sty) { - // Trait+Kx+'a -> Trait+Ky+'b (upcasts). - (&ty::TyTrait(ref data_a), &ty::TyTrait(ref data_b)) => { - // Upcasts permit two things: - // - // 1. Dropping builtin bounds, e.g. `Foo+Send` to `Foo` - // 2. Tightening the region bound, e.g. `Foo+'a` to `Foo+'b` if `'a : 'b` - // - // Note that neither of these changes requires any - // change at runtime. Eventually this will be - // generalized. - // - // We always upcast when we can because of reason - // #2 (region bounds). - data_a.principal.def_id() == data_a.principal.def_id() && - data_a.bounds.builtin_bounds.is_superset(&data_b.bounds.builtin_bounds) - } - - // T -> Trait. - (_, &ty::TyTrait(_)) => true, - - // Ambiguous handling is below T -> Trait, because inference - // variables can still implement Unsize and nested - // obligations will have the final say (likely deferred). - (&ty::TyInfer(ty::TyVar(_)), _) | - (_, &ty::TyInfer(ty::TyVar(_))) => { - debug!("assemble_candidates_for_unsizing: ambiguous"); - candidates.ambiguous = true; - false - } - - // [T; n] -> [T]. - (&ty::TyArray(_, _), &ty::TySlice(_)) => true, - - // Struct -> Struct. - (&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => { - def_id_a == def_id_b - } - - _ => false - }; - - if may_apply { - candidates.vec.push(BuiltinUnsizeCandidate); - } - } - - /////////////////////////////////////////////////////////////////////////// - // WINNOW - // - // Winnowing is the process of attempting to resolve ambiguity by - // probing further. During the winnowing process, we unify all - // type variables (ignoring skolemization) and then we also - // attempt to evaluate recursive bounds to see if they are - // satisfied. - - /// Returns true if `candidate_i` should be dropped in favor of - /// `candidate_j`. Generally speaking we will drop duplicate - /// candidates and prefer where-clause candidates. - /// Returns true if `victim` should be dropped in favor of - /// `other`. Generally speaking we will drop duplicate - /// candidates and prefer where-clause candidates. - /// - /// See the comment for "SelectionCandidate" for more details. - fn candidate_should_be_dropped_in_favor_of<'o>(&mut self, - victim: &SelectionCandidate<'tcx>, - other: &SelectionCandidate<'tcx>) - -> bool - { - if victim == other { - return true; - } - - match other { - &ObjectCandidate | - &ParamCandidate(_) | &ProjectionCandidate => match victim { - &DefaultImplCandidate(..) => { - self.tcx().sess.bug( - "default implementations shouldn't be recorded \ - when there are other valid candidates"); - } - &ImplCandidate(..) | - &ClosureCandidate(..) | - &FnPointerCandidate | - &BuiltinObjectCandidate | - &BuiltinUnsizeCandidate | - &DefaultImplObjectCandidate(..) | - &BuiltinCandidate(..) => { - // We have a where-clause so don't go around looking - // for impls. - true - } - &ObjectCandidate | - &ProjectionCandidate => { - // Arbitrarily give param candidates priority - // over projection and object candidates. - true - }, - &ParamCandidate(..) => false, - }, - _ => false - } - } - - /////////////////////////////////////////////////////////////////////////// - // BUILTIN BOUNDS - // - // These cover the traits that are built-in to the language - // itself. This includes `Copy` and `Sized` for sure. For the - // moment, it also includes `Send` / `Sync` and a few others, but - // those will hopefully change to library-defined traits in the - // future. - - fn assemble_builtin_bound_candidates<'o>(&mut self, - bound: ty::BuiltinBound, - obligation: &TraitObligation<'tcx>, - candidates: &mut SelectionCandidateSet<'tcx>) - -> Result<(),SelectionError<'tcx>> - { - match self.builtin_bound(bound, obligation) { - Ok(If(..)) => { - debug!("builtin_bound: bound={:?}", - bound); - candidates.vec.push(BuiltinCandidate(bound)); - Ok(()) - } - Ok(ParameterBuiltin) => { Ok(()) } - Ok(AmbiguousBuiltin) => { - debug!("assemble_builtin_bound_candidates: ambiguous builtin"); - Ok(candidates.ambiguous = true) - } - Err(e) => { Err(e) } - } - } - - fn builtin_bound(&mut self, - bound: ty::BuiltinBound, - obligation: &TraitObligation<'tcx>) - -> Result,SelectionError<'tcx>> - { - // Note: these tests operate on types that may contain bound - // regions. To be proper, we ought to skolemize here, but we - // forego the skolemization and defer it until the - // confirmation step. - - let self_ty = self.infcx.shallow_resolve(obligation.predicate.0.self_ty()); - return match self_ty.sty { - ty::TyInfer(ty::IntVar(_)) | - ty::TyInfer(ty::FloatVar(_)) | - ty::TyUint(_) | - ty::TyInt(_) | - ty::TyBool | - ty::TyFloat(_) | - ty::TyBareFn(..) | - ty::TyChar => { - // safe for everything - ok_if(Vec::new()) - } - - ty::TyBox(_) => { // Box - match bound { - ty::BoundCopy => Err(Unimplemented), - - ty::BoundSized => ok_if(Vec::new()), - - ty::BoundSync | ty::BoundSend => { - self.tcx().sess.bug("Send/Sync shouldn't occur in builtin_bounds()"); - } - } - } - - ty::TyRawPtr(..) => { // *const T, *mut T - match bound { - ty::BoundCopy | ty::BoundSized => ok_if(Vec::new()), - - ty::BoundSync | ty::BoundSend => { - self.tcx().sess.bug("Send/Sync shouldn't occur in builtin_bounds()"); - } - } - } - - ty::TyTrait(ref data) => { - match bound { - ty::BoundSized => Err(Unimplemented), - ty::BoundCopy => { - if data.bounds.builtin_bounds.contains(&bound) { - ok_if(Vec::new()) - } else { - // Recursively check all supertraits to find out if any further - // bounds are required and thus we must fulfill. - let principal = - data.principal_trait_ref_with_self_ty(self.tcx(), - self.tcx().types.err); - let copy_def_id = obligation.predicate.def_id(); - for tr in util::supertraits(self.tcx(), principal) { - if tr.def_id() == copy_def_id { - return ok_if(Vec::new()) - } - } - - Err(Unimplemented) - } - } - ty::BoundSync | ty::BoundSend => { - self.tcx().sess.bug("Send/Sync shouldn't occur in builtin_bounds()"); - } - } - } - - ty::TyRef(_, ty::TypeAndMut { ty: _, mutbl }) => { - // &mut T or &T - match bound { - ty::BoundCopy => { - match mutbl { - // &mut T is affine and hence never `Copy` - hir::MutMutable => Err(Unimplemented), - - // &T is always copyable - hir::MutImmutable => ok_if(Vec::new()), - } - } - - ty::BoundSized => ok_if(Vec::new()), - - ty::BoundSync | ty::BoundSend => { - self.tcx().sess.bug("Send/Sync shouldn't occur in builtin_bounds()"); - } - } - } - - ty::TyArray(element_ty, _) => { - // [T; n] - match bound { - ty::BoundCopy => ok_if(vec![element_ty]), - ty::BoundSized => ok_if(Vec::new()), - ty::BoundSync | ty::BoundSend => { - self.tcx().sess.bug("Send/Sync shouldn't occur in builtin_bounds()"); - } - } - } - - ty::TyStr | ty::TySlice(_) => { - match bound { - ty::BoundSync | ty::BoundSend => { - self.tcx().sess.bug("Send/Sync shouldn't occur in builtin_bounds()"); - } - - ty::BoundCopy | ty::BoundSized => Err(Unimplemented), - } - } - - // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet - ty::TyTuple(ref tys) => ok_if(tys.clone()), - - ty::TyClosure(_, ref substs) => { - // FIXME -- This case is tricky. In the case of by-ref - // closures particularly, we need the results of - // inference to decide how to reflect the type of each - // upvar (the upvar may have type `T`, but the runtime - // type could be `&mut`, `&`, or just `T`). For now, - // though, we'll do this unsoundly and assume that all - // captures are by value. Really what we ought to do - // is reserve judgement and then intertwine this - // analysis with closure inference. - - // Unboxed closures shouldn't be - // implicitly copyable - if bound == ty::BoundCopy { - return Ok(ParameterBuiltin); - } - - // Upvars are always local variables or references to - // local variables, and local variables cannot be - // unsized, so the closure struct as a whole must be - // Sized. - if bound == ty::BoundSized { - return ok_if(Vec::new()); - } - - ok_if(substs.upvar_tys.clone()) - } - - ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => { - let types: Vec = def.all_fields().map(|f| { - f.ty(self.tcx(), substs) - }).collect(); - nominal(bound, types) - } - - ty::TyProjection(_) | ty::TyParam(_) => { - // Note: A type parameter is only considered to meet a - // particular bound if there is a where clause telling - // us that it does, and that case is handled by - // `assemble_candidates_from_caller_bounds()`. - Ok(ParameterBuiltin) - } - - ty::TyInfer(ty::TyVar(_)) => { - // Unbound type variable. Might or might not have - // applicable impls and so forth, depending on what - // those type variables wind up being bound to. - debug!("assemble_builtin_bound_candidates: ambiguous builtin"); - Ok(AmbiguousBuiltin) - } - - ty::TyError => ok_if(Vec::new()), - - ty::TyInfer(ty::FreshTy(_)) - | ty::TyInfer(ty::FreshIntTy(_)) - | ty::TyInfer(ty::FreshFloatTy(_)) => { - self.tcx().sess.bug( - &format!( - "asked to assemble builtin bounds of unexpected type: {:?}", - self_ty)); - } - }; - - fn ok_if<'tcx>(v: Vec>) - -> Result, SelectionError<'tcx>> { - Ok(If(ty::Binder(v))) - } - - fn nominal<'cx, 'tcx>(bound: ty::BuiltinBound, - types: Vec>) - -> Result, SelectionError<'tcx>> - { - // First check for markers and other nonsense. - match bound { - // Fallback to whatever user-defined impls exist in this case. - ty::BoundCopy => Ok(ParameterBuiltin), - - // Sized if all the component types are sized. - ty::BoundSized => ok_if(types), - - // Shouldn't be coming through here. - ty::BoundSend | ty::BoundSync => unreachable!(), - } - } - } - - /// For default impls, we need to break apart a type into its - /// "constituent types" -- meaning, the types that it contains. - /// - /// Here are some (simple) examples: - /// - /// ``` - /// (i32, u32) -> [i32, u32] - /// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32] - /// Bar where struct Bar { x: T, y: u32 } -> [i32, u32] - /// Zed where enum Zed { A(T), B(u32) } -> [i32, u32] - /// ``` - fn constituent_types_for_ty(&self, t: Ty<'tcx>) -> Vec> { - match t.sty { - ty::TyUint(_) | - ty::TyInt(_) | - ty::TyBool | - ty::TyFloat(_) | - ty::TyBareFn(..) | - ty::TyStr | - ty::TyError | - ty::TyInfer(ty::IntVar(_)) | - ty::TyInfer(ty::FloatVar(_)) | - ty::TyChar => { - Vec::new() - } - - ty::TyTrait(..) | - ty::TyParam(..) | - ty::TyProjection(..) | - ty::TyInfer(ty::TyVar(_)) | - ty::TyInfer(ty::FreshTy(_)) | - ty::TyInfer(ty::FreshIntTy(_)) | - ty::TyInfer(ty::FreshFloatTy(_)) => { - self.tcx().sess.bug( - &format!( - "asked to assemble constituent types of unexpected type: {:?}", - t)); - } - - ty::TyBox(referent_ty) => { // Box - vec![referent_ty] - } - - ty::TyRawPtr(ty::TypeAndMut { ty: element_ty, ..}) | - ty::TyRef(_, ty::TypeAndMut { ty: element_ty, ..}) => { - vec![element_ty] - }, - - ty::TyArray(element_ty, _) | ty::TySlice(element_ty) => { - vec![element_ty] - } - - ty::TyTuple(ref tys) => { - // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet - tys.clone() - } - - ty::TyClosure(_, ref substs) => { - // FIXME(#27086). We are invariant w/r/t our - // substs.func_substs, but we don't see them as - // constituent types; this seems RIGHT but also like - // something that a normal type couldn't simulate. Is - // this just a gap with the way that PhantomData and - // OIBIT interact? That is, there is no way to say - // "make me invariant with respect to this TYPE, but - // do not act as though I can reach it" - substs.upvar_tys.clone() - } - - // for `PhantomData`, we pass `T` - ty::TyStruct(def, substs) if def.is_phantom_data() => { - substs.types.get_slice(TypeSpace).to_vec() - } - - ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => { - def.all_fields() - .map(|f| f.ty(self.tcx(), substs)) - .collect() - } - } - } - - fn collect_predicates_for_types(&mut self, - obligation: &TraitObligation<'tcx>, - trait_def_id: DefId, - types: ty::Binder>>) - -> Vec> - { - let derived_cause = match self.tcx().lang_items.to_builtin_kind(trait_def_id) { - Some(_) => { - self.derived_cause(obligation, BuiltinDerivedObligation) - }, - None => { - self.derived_cause(obligation, ImplDerivedObligation) - } - }; - - // Because the types were potentially derived from - // higher-ranked obligations they may reference late-bound - // regions. For example, `for<'a> Foo<&'a int> : Copy` would - // yield a type like `for<'a> &'a int`. In general, we - // maintain the invariant that we never manipulate bound - // regions, so we have to process these bound regions somehow. - // - // The strategy is to: - // - // 1. Instantiate those regions to skolemized regions (e.g., - // `for<'a> &'a int` becomes `&0 int`. - // 2. Produce something like `&'0 int : Copy` - // 3. Re-bind the regions back to `for<'a> &'a int : Copy` - - // Move the binder into the individual types - let bound_types: Vec>> = - types.skip_binder() - .iter() - .map(|&nested_ty| ty::Binder(nested_ty)) - .collect(); - - // For each type, produce a vector of resulting obligations - let obligations: Result>, _> = bound_types.iter().map(|nested_ty| { - self.infcx.commit_if_ok(|snapshot| { - let (skol_ty, skol_map) = - self.infcx().skolemize_late_bound_regions(nested_ty, snapshot); - let Normalized { value: normalized_ty, mut obligations } = - project::normalize_with_depth(self, - obligation.cause.clone(), - obligation.recursion_depth + 1, - &skol_ty); - let skol_obligation = - util::predicate_for_trait_def(self.tcx(), - derived_cause.clone(), - trait_def_id, - obligation.recursion_depth + 1, - normalized_ty, - vec![]); - obligations.push(skol_obligation); - Ok(self.infcx().plug_leaks(skol_map, snapshot, &obligations)) - }) - }).collect(); - - // Flatten those vectors (couldn't do it above due `collect`) - match obligations { - Ok(obligations) => obligations.into_iter().flat_map(|o| o).collect(), - Err(ErrorReported) => Vec::new(), - } - } - - /////////////////////////////////////////////////////////////////////////// - // CONFIRMATION - // - // Confirmation unifies the output type parameters of the trait - // with the values found in the obligation, possibly yielding a - // type error. See `README.md` for more details. - - fn confirm_candidate(&mut self, - obligation: &TraitObligation<'tcx>, - candidate: SelectionCandidate<'tcx>) - -> Result,SelectionError<'tcx>> - { - debug!("confirm_candidate({:?}, {:?})", - obligation, - candidate); - - match candidate { - BuiltinCandidate(builtin_bound) => { - Ok(VtableBuiltin( - try!(self.confirm_builtin_candidate(obligation, builtin_bound)))) - } - - ParamCandidate(param) => { - let obligations = self.confirm_param_candidate(obligation, param); - Ok(VtableParam(obligations)) - } - - DefaultImplCandidate(trait_def_id) => { - let data = self.confirm_default_impl_candidate(obligation, trait_def_id); - Ok(VtableDefaultImpl(data)) - } - - DefaultImplObjectCandidate(trait_def_id) => { - let data = self.confirm_default_impl_object_candidate(obligation, trait_def_id); - Ok(VtableDefaultImpl(data)) - } - - ImplCandidate(impl_def_id) => { - let vtable_impl = - try!(self.confirm_impl_candidate(obligation, impl_def_id)); - Ok(VtableImpl(vtable_impl)) - } - - ClosureCandidate(closure_def_id, substs) => { - let vtable_closure = - try!(self.confirm_closure_candidate(obligation, closure_def_id, substs)); - Ok(VtableClosure(vtable_closure)) - } - - BuiltinObjectCandidate => { - // This indicates something like `(Trait+Send) : - // Send`. In this case, we know that this holds - // because that's what the object type is telling us, - // and there's really no additional obligations to - // prove and no types in particular to unify etc. - Ok(VtableParam(Vec::new())) - } - - ObjectCandidate => { - let data = self.confirm_object_candidate(obligation); - Ok(VtableObject(data)) - } - - FnPointerCandidate => { - let fn_type = - try!(self.confirm_fn_pointer_candidate(obligation)); - Ok(VtableFnPointer(fn_type)) - } - - ProjectionCandidate => { - self.confirm_projection_candidate(obligation); - Ok(VtableParam(Vec::new())) - } - - BuiltinUnsizeCandidate => { - let data = try!(self.confirm_builtin_unsize_candidate(obligation)); - Ok(VtableBuiltin(data)) - } - } - } - - fn confirm_projection_candidate(&mut self, - obligation: &TraitObligation<'tcx>) - { - let _: Result<(),()> = - self.infcx.commit_if_ok(|snapshot| { - let result = - self.match_projection_obligation_against_bounds_from_trait(obligation, - snapshot); - assert!(result); - Ok(()) - }); - } - - fn confirm_param_candidate(&mut self, - obligation: &TraitObligation<'tcx>, - param: ty::PolyTraitRef<'tcx>) - -> Vec> - { - debug!("confirm_param_candidate({:?},{:?})", - obligation, - param); - - // During evaluation, we already checked that this - // where-clause trait-ref could be unified with the obligation - // trait-ref. Repeat that unification now without any - // transactional boundary; it should not fail. - match self.match_where_clause_trait_ref(obligation, param.clone()) { - Ok(obligations) => obligations, - Err(()) => { - self.tcx().sess.bug( - &format!("Where clause `{:?}` was applicable to `{:?}` but now is not", - param, - obligation)); - } - } - } - - fn confirm_builtin_candidate(&mut self, - obligation: &TraitObligation<'tcx>, - bound: ty::BuiltinBound) - -> Result>, - SelectionError<'tcx>> - { - debug!("confirm_builtin_candidate({:?})", - obligation); - - match try!(self.builtin_bound(bound, obligation)) { - If(nested) => Ok(self.vtable_builtin_data(obligation, bound, nested)), - AmbiguousBuiltin | ParameterBuiltin => { - self.tcx().sess.span_bug( - obligation.cause.span, - &format!("builtin bound for {:?} was ambig", - obligation)); - } - } - } - - fn vtable_builtin_data(&mut self, - obligation: &TraitObligation<'tcx>, - bound: ty::BuiltinBound, - nested: ty::Binder>>) - -> VtableBuiltinData> - { - debug!("vtable_builtin_data(obligation={:?}, bound={:?}, nested={:?})", - obligation, bound, nested); - - let trait_def = match self.tcx().lang_items.from_builtin_kind(bound) { - Ok(def_id) => def_id, - Err(_) => { - self.tcx().sess.bug("builtin trait definition not found"); - } - }; - - let obligations = self.collect_predicates_for_types(obligation, trait_def, nested); - - debug!("vtable_builtin_data: obligations={:?}", - obligations); - - VtableBuiltinData { nested: obligations } - } - - /// This handles the case where a `impl Foo for ..` impl is being used. - /// The idea is that the impl applies to `X : Foo` if the following conditions are met: - /// - /// 1. For each constituent type `Y` in `X`, `Y : Foo` holds - /// 2. For each where-clause `C` declared on `Foo`, `[Self => X] C` holds. - fn confirm_default_impl_candidate(&mut self, - obligation: &TraitObligation<'tcx>, - trait_def_id: DefId) - -> VtableDefaultImplData> - { - debug!("confirm_default_impl_candidate({:?}, {:?})", - obligation, - trait_def_id); - - // binder is moved below - let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty()); - let types = self.constituent_types_for_ty(self_ty); - self.vtable_default_impl(obligation, trait_def_id, ty::Binder(types)) - } - - fn confirm_default_impl_object_candidate(&mut self, - obligation: &TraitObligation<'tcx>, - trait_def_id: DefId) - -> VtableDefaultImplData> - { - debug!("confirm_default_impl_object_candidate({:?}, {:?})", - obligation, - trait_def_id); - - assert!(self.tcx().has_attr(trait_def_id, "rustc_reflect_like")); - - // OK to skip binder, it is reintroduced below - let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty()); - match self_ty.sty { - ty::TyTrait(ref data) => { - // OK to skip the binder, it is reintroduced below - let input_types = data.principal.skip_binder().substs.types.get_slice(TypeSpace); - let assoc_types = data.bounds.projection_bounds - .iter() - .map(|pb| pb.skip_binder().ty); - let all_types: Vec<_> = input_types.iter().cloned() - .chain(assoc_types) - .collect(); - - // reintroduce the two binding levels we skipped, then flatten into one - let all_types = ty::Binder(ty::Binder(all_types)); - let all_types = self.tcx().flatten_late_bound_regions(&all_types); - - self.vtable_default_impl(obligation, trait_def_id, all_types) - } - _ => { - self.tcx().sess.bug( - &format!( - "asked to confirm default object implementation for non-object type: {:?}", - self_ty)); - } - } - } - - /// See `confirm_default_impl_candidate` - fn vtable_default_impl(&mut self, - obligation: &TraitObligation<'tcx>, - trait_def_id: DefId, - nested: ty::Binder>>) - -> VtableDefaultImplData> - { - debug!("vtable_default_impl_data: nested={:?}", nested); - - let mut obligations = self.collect_predicates_for_types(obligation, - trait_def_id, - nested); - - let trait_obligations: Result,()> = self.infcx.commit_if_ok(|snapshot| { - let poly_trait_ref = obligation.predicate.to_poly_trait_ref(); - let (trait_ref, skol_map) = - self.infcx().skolemize_late_bound_regions(&poly_trait_ref, snapshot); - Ok(self.impl_or_trait_obligations(obligation.cause.clone(), - obligation.recursion_depth + 1, - trait_def_id, - &trait_ref.substs, - skol_map, - snapshot)) - }); - - // no Errors in that code above - obligations.append(&mut trait_obligations.unwrap()); - - debug!("vtable_default_impl_data: obligations={:?}", obligations); - - VtableDefaultImplData { - trait_def_id: trait_def_id, - nested: obligations - } - } - - fn confirm_impl_candidate(&mut self, - obligation: &TraitObligation<'tcx>, - impl_def_id: DefId) - -> Result>, - SelectionError<'tcx>> - { - debug!("confirm_impl_candidate({:?},{:?})", - obligation, - impl_def_id); - - // First, create the substitutions by matching the impl again, - // this time not in a probe. - self.infcx.commit_if_ok(|snapshot| { - let (substs, skol_map) = - self.rematch_impl(impl_def_id, obligation, - snapshot); - debug!("confirm_impl_candidate substs={:?}", substs); - Ok(self.vtable_impl(impl_def_id, substs, obligation.cause.clone(), - obligation.recursion_depth + 1, skol_map, snapshot)) - }) - } - - fn vtable_impl(&mut self, - impl_def_id: DefId, - mut substs: Normalized<'tcx, Substs<'tcx>>, - cause: ObligationCause<'tcx>, - recursion_depth: usize, - skol_map: infer::SkolemizationMap, - snapshot: &infer::CombinedSnapshot) - -> VtableImplData<'tcx, PredicateObligation<'tcx>> - { - debug!("vtable_impl(impl_def_id={:?}, substs={:?}, recursion_depth={}, skol_map={:?})", - impl_def_id, - substs, - recursion_depth, - skol_map); - - let mut impl_obligations = - self.impl_or_trait_obligations(cause, - recursion_depth, - impl_def_id, - &substs.value, - skol_map, - snapshot); - - debug!("vtable_impl: impl_def_id={:?} impl_obligations={:?}", - impl_def_id, - impl_obligations); - - // Because of RFC447, the impl-trait-ref and obligations - // are sufficient to determine the impl substs, without - // relying on projections in the impl-trait-ref. - // - // e.g. `impl> Foo<::T> for V` - impl_obligations.append(&mut substs.obligations); - - VtableImplData { impl_def_id: impl_def_id, - substs: substs.value, - nested: impl_obligations } - } - - fn confirm_object_candidate(&mut self, - obligation: &TraitObligation<'tcx>) - -> VtableObjectData<'tcx> - { - debug!("confirm_object_candidate({:?})", - obligation); - - // FIXME skipping binder here seems wrong -- we should - // probably flatten the binder from the obligation and the - // binder from the object. Have to try to make a broken test - // case that results. -nmatsakis - let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); - let poly_trait_ref = match self_ty.sty { - ty::TyTrait(ref data) => { - data.principal_trait_ref_with_self_ty(self.tcx(), self_ty) - } - _ => { - self.tcx().sess.span_bug(obligation.cause.span, - "object candidate with non-object"); - } - }; - - let mut upcast_trait_ref = None; - let vtable_base; - - { - // We want to find the first supertrait in the list of - // supertraits that we can unify with, and do that - // unification. We know that there is exactly one in the list - // where we can unify because otherwise select would have - // reported an ambiguity. (When we do find a match, also - // record it for later.) - let nonmatching = - util::supertraits(self.tcx(), poly_trait_ref) - .take_while(|&t| { - match - self.infcx.commit_if_ok( - |_| self.match_poly_trait_ref(obligation, t)) - { - Ok(_) => { upcast_trait_ref = Some(t); false } - Err(_) => { true } - } - }); - - // Additionally, for each of the nonmatching predicates that - // we pass over, we sum up the set of number of vtable - // entries, so that we can compute the offset for the selected - // trait. - vtable_base = - nonmatching.map(|t| util::count_own_vtable_entries(self.tcx(), t)) - .sum(); - - } - - VtableObjectData { - upcast_trait_ref: upcast_trait_ref.unwrap(), - vtable_base: vtable_base, - } - } - - fn confirm_fn_pointer_candidate(&mut self, - obligation: &TraitObligation<'tcx>) - -> Result,SelectionError<'tcx>> - { - debug!("confirm_fn_pointer_candidate({:?})", - obligation); - - // ok to skip binder; it is reintroduced below - let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); - let sig = self_ty.fn_sig(); - let trait_ref = - util::closure_trait_ref_and_return_type(self.tcx(), - obligation.predicate.def_id(), - self_ty, - sig, - util::TupleArgumentsFlag::Yes) - .map_bound(|(trait_ref, _)| trait_ref); - - try!(self.confirm_poly_trait_refs(obligation.cause.clone(), - obligation.predicate.to_poly_trait_ref(), - trait_ref)); - Ok(self_ty) - } - - fn confirm_closure_candidate(&mut self, - obligation: &TraitObligation<'tcx>, - closure_def_id: DefId, - substs: &ty::ClosureSubsts<'tcx>) - -> Result>, - SelectionError<'tcx>> - { - debug!("confirm_closure_candidate({:?},{:?},{:?})", - obligation, - closure_def_id, - substs); - - let Normalized { - value: trait_ref, - obligations - } = self.closure_trait_ref(obligation, closure_def_id, substs); - - debug!("confirm_closure_candidate(closure_def_id={:?}, trait_ref={:?}, obligations={:?})", - closure_def_id, - trait_ref, - obligations); - - try!(self.confirm_poly_trait_refs(obligation.cause.clone(), - obligation.predicate.to_poly_trait_ref(), - trait_ref)); - - Ok(VtableClosureData { - closure_def_id: closure_def_id, - substs: substs.clone(), - nested: obligations - }) - } - - /// In the case of closure types and fn pointers, - /// we currently treat the input type parameters on the trait as - /// outputs. This means that when we have a match we have only - /// considered the self type, so we have to go back and make sure - /// to relate the argument types too. This is kind of wrong, but - /// since we control the full set of impls, also not that wrong, - /// and it DOES yield better error messages (since we don't report - /// errors as if there is no applicable impl, but rather report - /// errors are about mismatched argument types. - /// - /// Here is an example. Imagine we have a closure expression - /// and we desugared it so that the type of the expression is - /// `Closure`, and `Closure` expects an int as argument. Then it - /// is "as if" the compiler generated this impl: - /// - /// impl Fn(int) for Closure { ... } - /// - /// Now imagine our obligation is `Fn(usize) for Closure`. So far - /// we have matched the self-type `Closure`. At this point we'll - /// compare the `int` to `usize` and generate an error. - /// - /// Note that this checking occurs *after* the impl has selected, - /// because these output type parameters should not affect the - /// selection of the impl. Therefore, if there is a mismatch, we - /// report an error to the user. - fn confirm_poly_trait_refs(&mut self, - obligation_cause: ObligationCause, - obligation_trait_ref: ty::PolyTraitRef<'tcx>, - expected_trait_ref: ty::PolyTraitRef<'tcx>) - -> Result<(), SelectionError<'tcx>> - { - let origin = TypeOrigin::RelateOutputImplTypes(obligation_cause.span); - - let obligation_trait_ref = obligation_trait_ref.clone(); - match self.infcx.sub_poly_trait_refs(false, - origin, - expected_trait_ref.clone(), - obligation_trait_ref.clone()) { - Ok(()) => Ok(()), - Err(e) => Err(OutputTypeParameterMismatch(expected_trait_ref, obligation_trait_ref, e)) - } - } - - fn confirm_builtin_unsize_candidate(&mut self, - obligation: &TraitObligation<'tcx>,) - -> Result>, - SelectionError<'tcx>> { - let tcx = self.tcx(); - - // assemble_candidates_for_unsizing should ensure there are no late bound - // regions here. See the comment there for more details. - let source = self.infcx.shallow_resolve( - tcx.no_late_bound_regions(&obligation.self_ty()).unwrap()); - let target = self.infcx.shallow_resolve(obligation.predicate.0.input_types()[0]); - - debug!("confirm_builtin_unsize_candidate(source={:?}, target={:?})", - source, target); - - let mut nested = vec![]; - match (&source.sty, &target.sty) { - // Trait+Kx+'a -> Trait+Ky+'b (upcasts). - (&ty::TyTrait(ref data_a), &ty::TyTrait(ref data_b)) => { - // See assemble_candidates_for_unsizing for more info. - let bounds = ty::ExistentialBounds { - region_bound: data_b.bounds.region_bound, - builtin_bounds: data_b.bounds.builtin_bounds, - projection_bounds: data_a.bounds.projection_bounds.clone(), - }; - - let new_trait = tcx.mk_trait(data_a.principal.clone(), bounds); - let origin = TypeOrigin::Misc(obligation.cause.span); - if self.infcx.sub_types(false, origin, new_trait, target).is_err() { - return Err(Unimplemented); - } - - // Register one obligation for 'a: 'b. - let cause = ObligationCause::new(obligation.cause.span, - obligation.cause.body_id, - ObjectCastObligation(target)); - let outlives = ty::OutlivesPredicate(data_a.bounds.region_bound, - data_b.bounds.region_bound); - nested.push(Obligation::with_depth(cause, - obligation.recursion_depth + 1, - ty::Binder(outlives).to_predicate())); - } - - // T -> Trait. - (_, &ty::TyTrait(ref data)) => { - let object_did = data.principal_def_id(); - if !object_safety::is_object_safe(tcx, object_did) { - return Err(TraitNotObjectSafe(object_did)); - } - - let cause = ObligationCause::new(obligation.cause.span, - obligation.cause.body_id, - ObjectCastObligation(target)); - let mut push = |predicate| { - nested.push(Obligation::with_depth(cause.clone(), - obligation.recursion_depth + 1, - predicate)); - }; - - // Create the obligation for casting from T to Trait. - push(data.principal_trait_ref_with_self_ty(tcx, source).to_predicate()); - - // We can only make objects from sized types. - let mut builtin_bounds = data.bounds.builtin_bounds; - builtin_bounds.insert(ty::BoundSized); - - // Create additional obligations for all the various builtin - // bounds attached to the object cast. (In other words, if the - // object type is Foo+Send, this would create an obligation - // for the Send check.) - for bound in &builtin_bounds { - if let Ok(tr) = util::trait_ref_for_builtin_bound(tcx, bound, source) { - push(tr.to_predicate()); - } else { - return Err(Unimplemented); - } - } - - // Create obligations for the projection predicates. - for bound in data.projection_bounds_with_self_ty(tcx, source) { - push(bound.to_predicate()); - } - - // If the type is `Foo+'a`, ensures that the type - // being cast to `Foo+'a` outlives `'a`: - let outlives = ty::OutlivesPredicate(source, - data.bounds.region_bound); - push(ty::Binder(outlives).to_predicate()); - } - - // [T; n] -> [T]. - (&ty::TyArray(a, _), &ty::TySlice(b)) => { - let origin = TypeOrigin::Misc(obligation.cause.span); - if self.infcx.sub_types(false, origin, a, b).is_err() { - return Err(Unimplemented); - } - } - - // Struct -> Struct. - (&ty::TyStruct(def, substs_a), &ty::TyStruct(_, substs_b)) => { - let fields = def - .all_fields() - .map(|f| f.unsubst_ty()) - .collect::>(); - - // The last field of the structure has to exist and contain type parameters. - let field = if let Some(&field) = fields.last() { - field - } else { - return Err(Unimplemented); - }; - let mut ty_params = vec![]; - for ty in field.walk() { - if let ty::TyParam(p) = ty.sty { - assert!(p.space == TypeSpace); - let idx = p.idx as usize; - if !ty_params.contains(&idx) { - ty_params.push(idx); - } - } - } - if ty_params.is_empty() { - return Err(Unimplemented); - } - - // Replace type parameters used in unsizing with - // TyError and ensure they do not affect any other fields. - // This could be checked after type collection for any struct - // with a potentially unsized trailing field. - let mut new_substs = substs_a.clone(); - for &i in &ty_params { - new_substs.types.get_mut_slice(TypeSpace)[i] = tcx.types.err; - } - for &ty in fields.split_last().unwrap().1 { - if ty.subst(tcx, &new_substs).references_error() { - return Err(Unimplemented); - } - } - - // Extract Field and Field from Struct and Struct. - let inner_source = field.subst(tcx, substs_a); - let inner_target = field.subst(tcx, substs_b); - - // Check that the source structure with the target's - // type parameters is a subtype of the target. - for &i in &ty_params { - let param_b = *substs_b.types.get(TypeSpace, i); - new_substs.types.get_mut_slice(TypeSpace)[i] = param_b; - } - let new_struct = tcx.mk_struct(def, tcx.mk_substs(new_substs)); - let origin = TypeOrigin::Misc(obligation.cause.span); - if self.infcx.sub_types(false, origin, new_struct, target).is_err() { - return Err(Unimplemented); - } - - // Construct the nested Field: Unsize> predicate. - nested.push(util::predicate_for_trait_def(tcx, - obligation.cause.clone(), - obligation.predicate.def_id(), - obligation.recursion_depth + 1, - inner_source, - vec![inner_target])); - } - - _ => unreachable!() - }; - - Ok(VtableBuiltinData { nested: nested }) - } - - /////////////////////////////////////////////////////////////////////////// - // Matching - // - // Matching is a common path used for both evaluation and - // confirmation. It basically unifies types that appear in impls - // and traits. This does affect the surrounding environment; - // therefore, when used during evaluation, match routines must be - // run inside of a `probe()` so that their side-effects are - // contained. - - fn rematch_impl(&mut self, - impl_def_id: DefId, - obligation: &TraitObligation<'tcx>, - snapshot: &infer::CombinedSnapshot) - -> (Normalized<'tcx, Substs<'tcx>>, infer::SkolemizationMap) - { - match self.match_impl(impl_def_id, obligation, snapshot) { - Ok((substs, skol_map)) => (substs, skol_map), - Err(()) => { - self.tcx().sess.bug( - &format!("Impl {:?} was matchable against {:?} but now is not", - impl_def_id, - obligation)); - } - } - } - - fn match_impl(&mut self, - impl_def_id: DefId, - obligation: &TraitObligation<'tcx>, - snapshot: &infer::CombinedSnapshot) - -> Result<(Normalized<'tcx, Substs<'tcx>>, - infer::SkolemizationMap), ()> - { - let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap(); - - // Before we create the substitutions and everything, first - // consider a "quick reject". This avoids creating more types - // and so forth that we need to. - if self.fast_reject_trait_refs(obligation, &impl_trait_ref) { - return Err(()); - } - - let (skol_obligation, skol_map) = self.infcx().skolemize_late_bound_regions( - &obligation.predicate, - snapshot); - let skol_obligation_trait_ref = skol_obligation.trait_ref; - - let impl_substs = util::fresh_type_vars_for_impl(self.infcx, - obligation.cause.span, - impl_def_id); - - let impl_trait_ref = impl_trait_ref.subst(self.tcx(), - &impl_substs); - - let impl_trait_ref = - project::normalize_with_depth(self, - obligation.cause.clone(), - obligation.recursion_depth + 1, - &impl_trait_ref); - - debug!("match_impl(impl_def_id={:?}, obligation={:?}, \ - impl_trait_ref={:?}, skol_obligation_trait_ref={:?})", - impl_def_id, - obligation, - impl_trait_ref, - skol_obligation_trait_ref); - - let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span); - if let Err(e) = self.infcx.eq_trait_refs(false, - origin, - impl_trait_ref.value.clone(), - skol_obligation_trait_ref) { - debug!("match_impl: failed eq_trait_refs due to `{}`", e); - return Err(()); - } - - if let Err(e) = self.infcx.leak_check(&skol_map, snapshot) { - debug!("match_impl: failed leak check due to `{}`", e); - return Err(()); - } - - debug!("match_impl: success impl_substs={:?}", impl_substs); - Ok((Normalized { - value: impl_substs, - obligations: impl_trait_ref.obligations - }, skol_map)) - } - - fn fast_reject_trait_refs(&mut self, - obligation: &TraitObligation, - impl_trait_ref: &ty::TraitRef) - -> bool - { - // We can avoid creating type variables and doing the full - // substitution if we find that any of the input types, when - // simplified, do not match. - - obligation.predicate.0.input_types().iter() - .zip(impl_trait_ref.input_types()) - .any(|(&obligation_ty, &impl_ty)| { - let simplified_obligation_ty = - fast_reject::simplify_type(self.tcx(), obligation_ty, true); - let simplified_impl_ty = - fast_reject::simplify_type(self.tcx(), impl_ty, false); - - simplified_obligation_ty.is_some() && - simplified_impl_ty.is_some() && - simplified_obligation_ty != simplified_impl_ty - }) - } - - /// Normalize `where_clause_trait_ref` and try to match it against - /// `obligation`. If successful, return any predicates that - /// result from the normalization. Normalization is necessary - /// because where-clauses are stored in the parameter environment - /// unnormalized. - fn match_where_clause_trait_ref(&mut self, - obligation: &TraitObligation<'tcx>, - where_clause_trait_ref: ty::PolyTraitRef<'tcx>) - -> Result>,()> - { - try!(self.match_poly_trait_ref(obligation, where_clause_trait_ref)); - Ok(Vec::new()) - } - - /// Returns `Ok` if `poly_trait_ref` being true implies that the - /// obligation is satisfied. - fn match_poly_trait_ref(&self, - obligation: &TraitObligation<'tcx>, - poly_trait_ref: ty::PolyTraitRef<'tcx>) - -> Result<(),()> - { - debug!("match_poly_trait_ref: obligation={:?} poly_trait_ref={:?}", - obligation, - poly_trait_ref); - - let origin = TypeOrigin::RelateOutputImplTypes(obligation.cause.span); - match self.infcx.sub_poly_trait_refs(false, - origin, - poly_trait_ref, - obligation.predicate.to_poly_trait_ref()) { - Ok(()) => Ok(()), - Err(_) => Err(()), - } - } - - /////////////////////////////////////////////////////////////////////////// - // Miscellany - - fn match_fresh_trait_refs(&self, - previous: &ty::PolyTraitRef<'tcx>, - current: &ty::PolyTraitRef<'tcx>) - -> bool - { - let mut matcher = ty::_match::Match::new(self.tcx()); - matcher.relate(previous, current).is_ok() - } - - fn push_stack<'o,'s:'o>(&mut self, - previous_stack: TraitObligationStackList<'s, 'tcx>, - obligation: &'o TraitObligation<'tcx>) - -> TraitObligationStack<'o, 'tcx> - { - let fresh_trait_ref = - obligation.predicate.to_poly_trait_ref().fold_with(&mut self.freshener); - - TraitObligationStack { - obligation: obligation, - fresh_trait_ref: fresh_trait_ref, - previous: previous_stack, - } - } - - fn closure_trait_ref_unnormalized(&mut self, - obligation: &TraitObligation<'tcx>, - closure_def_id: DefId, - substs: &ty::ClosureSubsts<'tcx>) - -> ty::PolyTraitRef<'tcx> - { - let closure_type = self.infcx.closure_type(closure_def_id, substs); - let ty::Binder((trait_ref, _)) = - util::closure_trait_ref_and_return_type(self.tcx(), - obligation.predicate.def_id(), - obligation.predicate.0.self_ty(), // (1) - &closure_type.sig, - util::TupleArgumentsFlag::No); - // (1) Feels icky to skip the binder here, but OTOH we know - // that the self-type is an unboxed closure type and hence is - // in fact unparameterized (or at least does not reference any - // regions bound in the obligation). Still probably some - // refactoring could make this nicer. - - ty::Binder(trait_ref) - } - - fn closure_trait_ref(&mut self, - obligation: &TraitObligation<'tcx>, - closure_def_id: DefId, - substs: &ty::ClosureSubsts<'tcx>) - -> Normalized<'tcx, ty::PolyTraitRef<'tcx>> - { - let trait_ref = self.closure_trait_ref_unnormalized( - obligation, closure_def_id, substs); - - // A closure signature can contain associated types which - // must be normalized. - normalize_with_depth(self, - obligation.cause.clone(), - obligation.recursion_depth+1, - &trait_ref) - } - - /// Returns the obligations that are implied by instantiating an - /// impl or trait. The obligations are substituted and fully - /// normalized. This is used when confirming an impl or default - /// impl. - fn impl_or_trait_obligations(&mut self, - cause: ObligationCause<'tcx>, - recursion_depth: usize, - def_id: DefId, // of impl or trait - substs: &Substs<'tcx>, // for impl or trait - skol_map: infer::SkolemizationMap, - snapshot: &infer::CombinedSnapshot) - -> Vec> - { - debug!("impl_or_trait_obligations(def_id={:?})", def_id); - let tcx = self.tcx(); - - // To allow for one-pass evaluation of the nested obligation, - // each predicate must be preceded by the obligations required - // to normalize it. - // for example, if we have: - // impl> Foo for V where U::Item: Copy - // the impl will have the following predicates: - // ::Item = U, - // U: Iterator, U: Sized, - // V: Iterator, V: Sized, - // ::Item: Copy - // When we substitute, say, `V => IntoIter, U => $0`, the last - // obligation will normalize to `<$0 as Iterator>::Item = $1` and - // `$1: Copy`, so we must ensure the obligations are emitted in - // that order. - let predicates = tcx - .lookup_predicates(def_id) - .predicates.iter() - .flat_map(|predicate| { - let predicate = - normalize_with_depth(self, cause.clone(), recursion_depth, - &predicate.subst(tcx, substs)); - predicate.obligations.into_iter().chain( - Some(Obligation { - cause: cause.clone(), - recursion_depth: recursion_depth, - predicate: predicate.value - })) - }).collect(); - self.infcx().plug_leaks(skol_map, snapshot, &predicates) - } - - #[allow(unused_comparisons)] - fn derived_cause(&self, - obligation: &TraitObligation<'tcx>, - variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>) - -> ObligationCause<'tcx> - { - /*! - * Creates a cause for obligations that are derived from - * `obligation` by a recursive search (e.g., for a builtin - * bound, or eventually a `impl Foo for ..`). If `obligation` - * is itself a derived obligation, this is just a clone, but - * otherwise we create a "derived obligation" cause so as to - * keep track of the original root obligation for error - * reporting. - */ - - // NOTE(flaper87): As of now, it keeps track of the whole error - // chain. Ideally, we should have a way to configure this either - // by using -Z verbose or just a CLI argument. - if obligation.recursion_depth >= 0 { - let derived_cause = DerivedObligationCause { - parent_trait_ref: obligation.predicate.to_poly_trait_ref(), - parent_code: Rc::new(obligation.cause.code.clone()) - }; - let derived_code = variant(derived_cause); - ObligationCause::new(obligation.cause.span, obligation.cause.body_id, derived_code) - } else { - obligation.cause.clone() - } - } -} - -impl<'tcx> SelectionCache<'tcx> { - pub fn new() -> SelectionCache<'tcx> { - SelectionCache { - hashmap: RefCell::new(FnvHashMap()) - } - } -} - -impl<'tcx> EvaluationCache<'tcx> { - pub fn new() -> EvaluationCache<'tcx> { - EvaluationCache { - hashmap: RefCell::new(FnvHashMap()) - } - } -} - -impl<'o,'tcx> TraitObligationStack<'o,'tcx> { - fn list(&'o self) -> TraitObligationStackList<'o,'tcx> { - TraitObligationStackList::with(self) - } - - fn iter(&'o self) -> TraitObligationStackList<'o,'tcx> { - self.list() - } -} - -#[derive(Copy, Clone)] -struct TraitObligationStackList<'o,'tcx:'o> { - head: Option<&'o TraitObligationStack<'o,'tcx>> -} - -impl<'o,'tcx> TraitObligationStackList<'o,'tcx> { - fn empty() -> TraitObligationStackList<'o,'tcx> { - TraitObligationStackList { head: None } - } - - fn with(r: &'o TraitObligationStack<'o,'tcx>) -> TraitObligationStackList<'o,'tcx> { - TraitObligationStackList { head: Some(r) } - } -} - -impl<'o,'tcx> Iterator for TraitObligationStackList<'o,'tcx>{ - type Item = &'o TraitObligationStack<'o,'tcx>; - - fn next(&mut self) -> Option<&'o TraitObligationStack<'o,'tcx>> { - match self.head { - Some(o) => { - *self = o.previous; - Some(o) - } - None => None - } - } -} - -impl<'o,'tcx> fmt::Debug for TraitObligationStack<'o,'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "TraitObligationStack({:?})", self.obligation) - } -} - -impl EvaluationResult { - fn may_apply(&self) -> bool { - match *self { - EvaluatedToOk | - EvaluatedToAmbig | - EvaluatedToUnknown => true, - - EvaluatedToErr => false - } - } -} - -impl MethodMatchResult { - pub fn may_apply(&self) -> bool { - match *self { - MethodMatched(_) => true, - MethodAmbiguous(_) => true, - MethodDidNotMatch => false, - } - } -} diff --git a/src/librustc/middle/traits/structural_impls.rs b/src/librustc/middle/traits/structural_impls.rs deleted file mode 100644 index 453420e2a54dc..0000000000000 --- a/src/librustc/middle/traits/structural_impls.rs +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use middle::traits; -use middle::traits::project::Normalized; -use middle::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; - -use std::fmt; - -// structural impls for the structs in middle::traits - -impl<'tcx, T: fmt::Debug> fmt::Debug for Normalized<'tcx, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Normalized({:?},{:?})", - self.value, - self.obligations) - } -} - -impl<'tcx> fmt::Debug for traits::RegionObligation<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "RegionObligation(sub_region={:?}, sup_type={:?})", - self.sub_region, - self.sup_type) - } -} -impl<'tcx, O: fmt::Debug> fmt::Debug for traits::Obligation<'tcx, O> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Obligation(predicate={:?},depth={})", - self.predicate, - self.recursion_depth) - } -} - -impl<'tcx, N: fmt::Debug> fmt::Debug for traits::Vtable<'tcx, N> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - super::VtableImpl(ref v) => - write!(f, "{:?}", v), - - super::VtableDefaultImpl(ref t) => - write!(f, "{:?}", t), - - super::VtableClosure(ref d) => - write!(f, "{:?}", d), - - super::VtableFnPointer(ref d) => - write!(f, "VtableFnPointer({:?})", d), - - super::VtableObject(ref d) => - write!(f, "{:?}", d), - - super::VtableParam(ref n) => - write!(f, "VtableParam({:?})", n), - - super::VtableBuiltin(ref d) => - write!(f, "{:?}", d) - } - } -} - -impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableImplData<'tcx, N> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "VtableImpl(impl_def_id={:?}, substs={:?}, nested={:?})", - self.impl_def_id, - self.substs, - self.nested) - } -} - -impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableClosureData<'tcx, N> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "VtableClosure(closure_def_id={:?}, substs={:?}, nested={:?})", - self.closure_def_id, - self.substs, - self.nested) - } -} - -impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableBuiltinData { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "VtableBuiltin(nested={:?})", self.nested) - } -} - -impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableDefaultImplData { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "VtableDefaultImplData(trait_def_id={:?}, nested={:?})", - self.trait_def_id, - self.nested) - } -} - -impl<'tcx> fmt::Debug for traits::VtableObjectData<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "VtableObject(upcast={:?}, vtable_base={})", - self.upcast_trait_ref, - self.vtable_base) - } -} - -impl<'tcx> fmt::Debug for traits::FulfillmentError<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "FulfillmentError({:?},{:?})", - self.obligation, - self.code) - } -} - -impl<'tcx> fmt::Debug for traits::FulfillmentErrorCode<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - super::CodeSelectionError(ref e) => write!(f, "{:?}", e), - super::CodeProjectionError(ref e) => write!(f, "{:?}", e), - super::CodeAmbiguity => write!(f, "Ambiguity") - } - } -} - -impl<'tcx> fmt::Debug for traits::MismatchedProjectionTypes<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "MismatchedProjectionTypes({:?})", self.err) - } -} - -impl<'tcx, O: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::Obligation<'tcx, O> -{ - fn super_fold_with>(&self, folder: &mut F) -> Self { - traits::Obligation { - cause: self.cause.clone(), - recursion_depth: self.recursion_depth, - predicate: self.predicate.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.predicate.visit_with(visitor) - } -} - -impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableImplData<'tcx, N> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - traits::VtableImplData { - impl_def_id: self.impl_def_id, - substs: self.substs.fold_with(folder), - nested: self.nested.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.substs.visit_with(visitor) || self.nested.visit_with(visitor) - } -} - -impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableClosureData<'tcx, N> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - traits::VtableClosureData { - closure_def_id: self.closure_def_id, - substs: self.substs.fold_with(folder), - nested: self.nested.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.substs.visit_with(visitor) || self.nested.visit_with(visitor) - } -} - -impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableDefaultImplData { - fn super_fold_with>(&self, folder: &mut F) -> Self { - traits::VtableDefaultImplData { - trait_def_id: self.trait_def_id, - nested: self.nested.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.nested.visit_with(visitor) - } -} - -impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableBuiltinData { - fn super_fold_with>(&self, folder: &mut F) -> Self { - traits::VtableBuiltinData { - nested: self.nested.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.nested.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for traits::VtableObjectData<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - traits::VtableObjectData { - upcast_trait_ref: self.upcast_trait_ref.fold_with(folder), - vtable_base: self.vtable_base - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.upcast_trait_ref.visit_with(visitor) - } -} - -impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::Vtable<'tcx, N> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - match *self { - traits::VtableImpl(ref v) => traits::VtableImpl(v.fold_with(folder)), - traits::VtableDefaultImpl(ref t) => traits::VtableDefaultImpl(t.fold_with(folder)), - traits::VtableClosure(ref d) => { - traits::VtableClosure(d.fold_with(folder)) - } - traits::VtableFnPointer(ref d) => { - traits::VtableFnPointer(d.fold_with(folder)) - } - traits::VtableParam(ref n) => traits::VtableParam(n.fold_with(folder)), - traits::VtableBuiltin(ref d) => traits::VtableBuiltin(d.fold_with(folder)), - traits::VtableObject(ref d) => traits::VtableObject(d.fold_with(folder)), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - match *self { - traits::VtableImpl(ref v) => v.visit_with(visitor), - traits::VtableDefaultImpl(ref t) => t.visit_with(visitor), - traits::VtableClosure(ref d) => d.visit_with(visitor), - traits::VtableFnPointer(ref d) => d.visit_with(visitor), - traits::VtableParam(ref n) => n.visit_with(visitor), - traits::VtableBuiltin(ref d) => d.visit_with(visitor), - traits::VtableObject(ref d) => d.visit_with(visitor), - } - } -} - -impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Normalized<'tcx, T> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - Normalized { - value: self.value.fold_with(folder), - obligations: self.obligations.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.value.visit_with(visitor) || self.obligations.visit_with(visitor) - } -} diff --git a/src/librustc/middle/traits/util.rs b/src/librustc/middle/traits/util.rs deleted file mode 100644 index c50c9e9765d25..0000000000000 --- a/src/librustc/middle/traits/util.rs +++ /dev/null @@ -1,477 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use middle::def_id::DefId; -use middle::infer::InferCtxt; -use middle::subst::Substs; -use middle::ty::{self, Ty, ToPredicate, ToPolyTraitRef}; -use syntax::codemap::Span; -use util::common::ErrorReported; -use util::nodemap::FnvHashSet; - -use super::{Obligation, ObligationCause, PredicateObligation}; - -struct PredicateSet<'a,'tcx:'a> { - tcx: &'a ty::ctxt<'tcx>, - set: FnvHashSet>, -} - -impl<'a,'tcx> PredicateSet<'a,'tcx> { - fn new(tcx: &'a ty::ctxt<'tcx>) -> PredicateSet<'a,'tcx> { - PredicateSet { tcx: tcx, set: FnvHashSet() } - } - - fn insert(&mut self, pred: &ty::Predicate<'tcx>) -> bool { - // We have to be careful here because we want - // - // for<'a> Foo<&'a int> - // - // and - // - // for<'b> Foo<&'b int> - // - // to be considered equivalent. So normalize all late-bound - // regions before we throw things into the underlying set. - let normalized_pred = match *pred { - ty::Predicate::Trait(ref data) => - ty::Predicate::Trait(self.tcx.anonymize_late_bound_regions(data)), - - ty::Predicate::Equate(ref data) => - ty::Predicate::Equate(self.tcx.anonymize_late_bound_regions(data)), - - ty::Predicate::RegionOutlives(ref data) => - ty::Predicate::RegionOutlives(self.tcx.anonymize_late_bound_regions(data)), - - ty::Predicate::TypeOutlives(ref data) => - ty::Predicate::TypeOutlives(self.tcx.anonymize_late_bound_regions(data)), - - ty::Predicate::Projection(ref data) => - ty::Predicate::Projection(self.tcx.anonymize_late_bound_regions(data)), - - ty::Predicate::WellFormed(data) => - ty::Predicate::WellFormed(data), - - ty::Predicate::ObjectSafe(data) => - ty::Predicate::ObjectSafe(data), - }; - self.set.insert(normalized_pred) - } -} - -/////////////////////////////////////////////////////////////////////////// -// `Elaboration` iterator -/////////////////////////////////////////////////////////////////////////// - -/// "Elaboration" is the process of identifying all the predicates that -/// are implied by a source predicate. Currently this basically means -/// walking the "supertraits" and other similar assumptions. For -/// example, if we know that `T : Ord`, the elaborator would deduce -/// that `T : PartialOrd` holds as well. Similarly, if we have `trait -/// Foo : 'static`, and we know that `T : Foo`, then we know that `T : -/// 'static`. -pub struct Elaborator<'cx, 'tcx:'cx> { - tcx: &'cx ty::ctxt<'tcx>, - stack: Vec>, - visited: PredicateSet<'cx,'tcx>, -} - -pub fn elaborate_trait_ref<'cx, 'tcx>( - tcx: &'cx ty::ctxt<'tcx>, - trait_ref: ty::PolyTraitRef<'tcx>) - -> Elaborator<'cx, 'tcx> -{ - elaborate_predicates(tcx, vec![trait_ref.to_predicate()]) -} - -pub fn elaborate_trait_refs<'cx, 'tcx>( - tcx: &'cx ty::ctxt<'tcx>, - trait_refs: &[ty::PolyTraitRef<'tcx>]) - -> Elaborator<'cx, 'tcx> -{ - let predicates = trait_refs.iter() - .map(|trait_ref| trait_ref.to_predicate()) - .collect(); - elaborate_predicates(tcx, predicates) -} - -pub fn elaborate_predicates<'cx, 'tcx>( - tcx: &'cx ty::ctxt<'tcx>, - mut predicates: Vec>) - -> Elaborator<'cx, 'tcx> -{ - let mut visited = PredicateSet::new(tcx); - predicates.retain(|pred| visited.insert(pred)); - Elaborator { tcx: tcx, stack: predicates, visited: visited } -} - -impl<'cx, 'tcx> Elaborator<'cx, 'tcx> { - pub fn filter_to_traits(self) -> FilterToTraits> { - FilterToTraits::new(self) - } - - fn push(&mut self, predicate: &ty::Predicate<'tcx>) { - match *predicate { - ty::Predicate::Trait(ref data) => { - // Predicates declared on the trait. - let predicates = self.tcx.lookup_super_predicates(data.def_id()); - - let mut predicates: Vec<_> = - predicates.predicates - .iter() - .map(|p| p.subst_supertrait(self.tcx, &data.to_poly_trait_ref())) - .collect(); - - debug!("super_predicates: data={:?} predicates={:?}", - data, predicates); - - // Only keep those bounds that we haven't already - // seen. This is necessary to prevent infinite - // recursion in some cases. One common case is when - // people define `trait Sized: Sized { }` rather than `trait - // Sized { }`. - predicates.retain(|r| self.visited.insert(r)); - - self.stack.extend(predicates); - } - ty::Predicate::WellFormed(..) => { - // Currently, we do not elaborate WF predicates, - // although we easily could. - } - ty::Predicate::ObjectSafe(..) => { - // Currently, we do not elaborate object-safe - // predicates. - } - ty::Predicate::Equate(..) => { - // Currently, we do not "elaborate" predicates like - // `X == Y`, though conceivably we might. For example, - // `&X == &Y` implies that `X == Y`. - } - ty::Predicate::Projection(..) => { - // Nothing to elaborate in a projection predicate. - } - ty::Predicate::RegionOutlives(..) | - ty::Predicate::TypeOutlives(..) => { - // Currently, we do not "elaborate" predicates like - // `'a : 'b` or `T : 'a`. We could conceivably do - // more here. For example, - // - // &'a int : 'b - // - // implies that - // - // 'a : 'b - // - // and we could get even more if we took WF - // constraints into account. For example, - // - // &'a &'b int : 'c - // - // implies that - // - // 'b : 'a - // 'a : 'c - } - } - } -} - -impl<'cx, 'tcx> Iterator for Elaborator<'cx, 'tcx> { - type Item = ty::Predicate<'tcx>; - - fn next(&mut self) -> Option> { - // Extract next item from top-most stack frame, if any. - let next_predicate = match self.stack.pop() { - Some(predicate) => predicate, - None => { - // No more stack frames. Done. - return None; - } - }; - self.push(&next_predicate); - return Some(next_predicate); - } -} - -/////////////////////////////////////////////////////////////////////////// -// Supertrait iterator -/////////////////////////////////////////////////////////////////////////// - -pub type Supertraits<'cx, 'tcx> = FilterToTraits>; - -pub fn supertraits<'cx, 'tcx>(tcx: &'cx ty::ctxt<'tcx>, - trait_ref: ty::PolyTraitRef<'tcx>) - -> Supertraits<'cx, 'tcx> -{ - elaborate_trait_ref(tcx, trait_ref).filter_to_traits() -} - -pub fn transitive_bounds<'cx, 'tcx>(tcx: &'cx ty::ctxt<'tcx>, - bounds: &[ty::PolyTraitRef<'tcx>]) - -> Supertraits<'cx, 'tcx> -{ - elaborate_trait_refs(tcx, bounds).filter_to_traits() -} - -/////////////////////////////////////////////////////////////////////////// -// Iterator over def-ids of supertraits - -pub struct SupertraitDefIds<'cx, 'tcx:'cx> { - tcx: &'cx ty::ctxt<'tcx>, - stack: Vec, - visited: FnvHashSet, -} - -pub fn supertrait_def_ids<'cx, 'tcx>(tcx: &'cx ty::ctxt<'tcx>, - trait_def_id: DefId) - -> SupertraitDefIds<'cx, 'tcx> -{ - SupertraitDefIds { - tcx: tcx, - stack: vec![trait_def_id], - visited: Some(trait_def_id).into_iter().collect(), - } -} - -impl<'cx, 'tcx> Iterator for SupertraitDefIds<'cx, 'tcx> { - type Item = DefId; - - fn next(&mut self) -> Option { - let def_id = match self.stack.pop() { - Some(def_id) => def_id, - None => { return None; } - }; - - let predicates = self.tcx.lookup_super_predicates(def_id); - let visited = &mut self.visited; - self.stack.extend( - predicates.predicates - .iter() - .filter_map(|p| p.to_opt_poly_trait_ref()) - .map(|t| t.def_id()) - .filter(|&super_def_id| visited.insert(super_def_id))); - Some(def_id) - } -} - -/////////////////////////////////////////////////////////////////////////// -// Other -/////////////////////////////////////////////////////////////////////////// - -/// A filter around an iterator of predicates that makes it yield up -/// just trait references. -pub struct FilterToTraits { - base_iterator: I -} - -impl FilterToTraits { - fn new(base: I) -> FilterToTraits { - FilterToTraits { base_iterator: base } - } -} - -impl<'tcx,I:Iterator>> Iterator for FilterToTraits { - type Item = ty::PolyTraitRef<'tcx>; - - fn next(&mut self) -> Option> { - loop { - match self.base_iterator.next() { - None => { - return None; - } - Some(ty::Predicate::Trait(data)) => { - return Some(data.to_poly_trait_ref()); - } - Some(_) => { - } - } - } - } -} - -/////////////////////////////////////////////////////////////////////////// -// Other -/////////////////////////////////////////////////////////////////////////// - -// determine the `self` type, using fresh variables for all variables -// declared on the impl declaration e.g., `impl for Box<[(A,B)]>` -// would return ($0, $1) where $0 and $1 are freshly instantiated type -// variables. -pub fn fresh_type_vars_for_impl<'a, 'tcx>(infcx: &InferCtxt<'a, 'tcx>, - span: Span, - impl_def_id: DefId) - -> Substs<'tcx> -{ - let tcx = infcx.tcx; - let impl_generics = tcx.lookup_item_type(impl_def_id).generics; - infcx.fresh_substs_for_generics(span, &impl_generics) -} - -/// See `super::obligations_for_generics` -pub fn predicates_for_generics<'tcx>(cause: ObligationCause<'tcx>, - recursion_depth: usize, - generic_bounds: &ty::InstantiatedPredicates<'tcx>) - -> Vec> -{ - debug!("predicates_for_generics(generic_bounds={:?})", - generic_bounds); - - generic_bounds.predicates.iter().map(|predicate| { - Obligation { cause: cause.clone(), - recursion_depth: recursion_depth, - predicate: predicate.clone() } - }).collect() -} - -pub fn trait_ref_for_builtin_bound<'tcx>( - tcx: &ty::ctxt<'tcx>, - builtin_bound: ty::BuiltinBound, - param_ty: Ty<'tcx>) - -> Result, ErrorReported> -{ - match tcx.lang_items.from_builtin_kind(builtin_bound) { - Ok(def_id) => { - Ok(ty::TraitRef { - def_id: def_id, - substs: tcx.mk_substs(Substs::empty().with_self_ty(param_ty)) - }) - } - Err(e) => { - tcx.sess.err(&e); - Err(ErrorReported) - } - } -} - - -pub fn predicate_for_trait_ref<'tcx>( - cause: ObligationCause<'tcx>, - trait_ref: ty::TraitRef<'tcx>, - recursion_depth: usize) - -> PredicateObligation<'tcx> -{ - Obligation { - cause: cause, - recursion_depth: recursion_depth, - predicate: trait_ref.to_predicate(), - } -} - -pub fn predicate_for_trait_def<'tcx>( - tcx: &ty::ctxt<'tcx>, - cause: ObligationCause<'tcx>, - trait_def_id: DefId, - recursion_depth: usize, - param_ty: Ty<'tcx>, - ty_params: Vec>) - -> PredicateObligation<'tcx> -{ - let trait_ref = ty::TraitRef { - def_id: trait_def_id, - substs: tcx.mk_substs(Substs::new_trait(ty_params, vec![], param_ty)) - }; - predicate_for_trait_ref(cause, trait_ref, recursion_depth) -} - -pub fn predicate_for_builtin_bound<'tcx>( - tcx: &ty::ctxt<'tcx>, - cause: ObligationCause<'tcx>, - builtin_bound: ty::BuiltinBound, - recursion_depth: usize, - param_ty: Ty<'tcx>) - -> Result, ErrorReported> -{ - let trait_ref = try!(trait_ref_for_builtin_bound(tcx, builtin_bound, param_ty)); - Ok(predicate_for_trait_ref(cause, trait_ref, recursion_depth)) -} - -/// Cast a trait reference into a reference to one of its super -/// traits; returns `None` if `target_trait_def_id` is not a -/// supertrait. -pub fn upcast<'tcx>(tcx: &ty::ctxt<'tcx>, - source_trait_ref: ty::PolyTraitRef<'tcx>, - target_trait_def_id: DefId) - -> Vec> -{ - if source_trait_ref.def_id() == target_trait_def_id { - return vec![source_trait_ref]; // shorcut the most common case - } - - supertraits(tcx, source_trait_ref) - .filter(|r| r.def_id() == target_trait_def_id) - .collect() -} - -/// Given a trait `trait_ref`, returns the number of vtable entries -/// that come from `trait_ref`, excluding its supertraits. Used in -/// computing the vtable base for an upcast trait of a trait object. -pub fn count_own_vtable_entries<'tcx>(tcx: &ty::ctxt<'tcx>, - trait_ref: ty::PolyTraitRef<'tcx>) - -> usize { - let mut entries = 0; - // Count number of methods and add them to the total offset. - // Skip over associated types and constants. - for trait_item in &tcx.trait_items(trait_ref.def_id())[..] { - if let ty::MethodTraitItem(_) = *trait_item { - entries += 1; - } - } - entries -} - -/// Given an upcast trait object described by `object`, returns the -/// index of the method `method_def_id` (which should be part of -/// `object.upcast_trait_ref`) within the vtable for `object`. -pub fn get_vtable_index_of_object_method<'tcx>(tcx: &ty::ctxt<'tcx>, - object: &super::VtableObjectData<'tcx>, - method_def_id: DefId) -> usize { - // Count number of methods preceding the one we are selecting and - // add them to the total offset. - // Skip over associated types and constants. - let mut entries = object.vtable_base; - for trait_item in &tcx.trait_items(object.upcast_trait_ref.def_id())[..] { - if trait_item.def_id() == method_def_id { - // The item with the ID we were given really ought to be a method. - assert!(match *trait_item { - ty::MethodTraitItem(_) => true, - _ => false - }); - - return entries; - } - if let ty::MethodTraitItem(_) = *trait_item { - entries += 1; - } - } - - tcx.sess.bug(&format!("get_vtable_index_of_object_method: {:?} was not found", - method_def_id)); -} - -pub enum TupleArgumentsFlag { Yes, No } - -pub fn closure_trait_ref_and_return_type<'tcx>( - tcx: &ty::ctxt<'tcx>, - fn_trait_def_id: DefId, - self_ty: Ty<'tcx>, - sig: &ty::PolyFnSig<'tcx>, - tuple_arguments: TupleArgumentsFlag) - -> ty::Binder<(ty::TraitRef<'tcx>, Ty<'tcx>)> -{ - let arguments_tuple = match tuple_arguments { - TupleArgumentsFlag::No => sig.0.inputs[0], - TupleArgumentsFlag::Yes => tcx.mk_tup(sig.0.inputs.to_vec()), - }; - let trait_substs = Substs::new_trait(vec![arguments_tuple], vec![], self_ty); - let trait_ref = ty::TraitRef { - def_id: fn_trait_def_id, - substs: tcx.mk_substs(trait_substs), - }; - ty::Binder((trait_ref, sig.0.output.unwrap_or(tcx.mk_nil()))) -} diff --git a/src/librustc/middle/ty/_match.rs b/src/librustc/middle/ty/_match.rs deleted file mode 100644 index 5a3ad9095ad2c..0000000000000 --- a/src/librustc/middle/ty/_match.rs +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use middle::ty::{self, Ty}; -use middle::ty::error::TypeError; -use middle::ty::relate::{self, Relate, TypeRelation, RelateResult}; - -/// A type "A" *matches* "B" if the fresh types in B could be -/// substituted with values so as to make it equal to A. Matching is -/// intended to be used only on freshened types, and it basically -/// indicates if the non-freshened versions of A and B could have been -/// unified. -/// -/// It is only an approximation. If it yields false, unification would -/// definitely fail, but a true result doesn't mean unification would -/// succeed. This is because we don't track the "side-constraints" on -/// type variables, nor do we track if the same freshened type appears -/// more than once. To some extent these approximations could be -/// fixed, given effort. -/// -/// Like subtyping, matching is really a binary relation, so the only -/// important thing about the result is Ok/Err. Also, matching never -/// affects any type variables or unification state. -pub struct Match<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx> -} - -impl<'a, 'tcx> Match<'a, 'tcx> { - pub fn new(tcx: &'a ty::ctxt<'tcx>) -> Match<'a, 'tcx> { - Match { tcx: tcx } - } -} - -impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Match<'a, 'tcx> { - fn tag(&self) -> &'static str { "Match" } - fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.tcx } - fn a_is_expected(&self) -> bool { true } // irrelevant - - fn relate_with_variance>(&mut self, - _: ty::Variance, - a: &T, - b: &T) - -> RelateResult<'tcx, T> - { - self.relate(a, b) - } - - fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> { - debug!("{}.regions({:?}, {:?})", - self.tag(), - a, - b); - Ok(a) - } - - fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { - debug!("{}.tys({:?}, {:?})", self.tag(), - a, b); - if a == b { return Ok(a); } - - match (&a.sty, &b.sty) { - (_, &ty::TyInfer(ty::FreshTy(_))) | - (_, &ty::TyInfer(ty::FreshIntTy(_))) | - (_, &ty::TyInfer(ty::FreshFloatTy(_))) => { - Ok(a) - } - - (&ty::TyInfer(_), _) | - (_, &ty::TyInfer(_)) => { - Err(TypeError::Sorts(relate::expected_found(self, &a, &b))) - } - - (&ty::TyError, _) | (_, &ty::TyError) => { - Ok(self.tcx().types.err) - } - - _ => { - relate::super_relate_tys(self, a, b) - } - } - } - - fn binders(&mut self, a: &ty::Binder, b: &ty::Binder) - -> RelateResult<'tcx, ty::Binder> - where T: Relate<'a,'tcx> - { - Ok(ty::Binder(try!(self.relate(a.skip_binder(), b.skip_binder())))) - } -} diff --git a/src/librustc/middle/ty/adjustment.rs b/src/librustc/middle/ty/adjustment.rs deleted file mode 100644 index 6cab0baa55325..0000000000000 --- a/src/librustc/middle/ty/adjustment.rs +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub use self::AutoAdjustment::*; -pub use self::AutoRef::*; - -use middle::ty::{self, Ty, TypeAndMut, TypeFoldable}; -use middle::ty::LvaluePreference::{NoPreference}; - -use syntax::ast; -use syntax::codemap::Span; - -use rustc_front::hir; - -#[derive(Copy, Clone)] -pub enum AutoAdjustment<'tcx> { - AdjustReifyFnPointer, // go from a fn-item type to a fn-pointer type - AdjustUnsafeFnPointer, // go from a safe fn pointer to an unsafe fn pointer - AdjustDerefRef(AutoDerefRef<'tcx>), -} - -/// Represents coercing a pointer to a different kind of pointer - where 'kind' -/// here means either or both of raw vs borrowed vs unique and fat vs thin. -/// -/// We transform pointers by following the following steps in order: -/// 1. Deref the pointer `self.autoderefs` times (may be 0). -/// 2. If `autoref` is `Some(_)`, then take the address and produce either a -/// `&` or `*` pointer. -/// 3. If `unsize` is `Some(_)`, then apply the unsize transformation, -/// which will do things like convert thin pointers to fat -/// pointers, or convert structs containing thin pointers to -/// structs containing fat pointers, or convert between fat -/// pointers. We don't store the details of how the transform is -/// done (in fact, we don't know that, because it might depend on -/// the precise type parameters). We just store the target -/// type. Trans figures out what has to be done at monomorphization -/// time based on the precise source/target type at hand. -/// -/// To make that more concrete, here are some common scenarios: -/// -/// 1. The simplest cases are where the pointer is not adjusted fat vs thin. -/// Here the pointer will be dereferenced N times (where a dereference can -/// happen to raw or borrowed pointers or any smart pointer which implements -/// Deref, including Box<_>). The number of dereferences is given by -/// `autoderefs`. It can then be auto-referenced zero or one times, indicated -/// by `autoref`, to either a raw or borrowed pointer. In these cases unsize is -/// None. -/// -/// 2. A thin-to-fat coercon involves unsizing the underlying data. We start -/// with a thin pointer, deref a number of times, unsize the underlying data, -/// then autoref. The 'unsize' phase may change a fixed length array to a -/// dynamically sized one, a concrete object to a trait object, or statically -/// sized struct to a dyncamically sized one. E.g., &[i32; 4] -> &[i32] is -/// represented by: -/// -/// ``` -/// AutoDerefRef { -/// autoderefs: 1, // &[i32; 4] -> [i32; 4] -/// autoref: Some(AutoPtr), // [i32] -> &[i32] -/// unsize: Some([i32]), // [i32; 4] -> [i32] -/// } -/// ``` -/// -/// Note that for a struct, the 'deep' unsizing of the struct is not recorded. -/// E.g., `struct Foo { x: T }` we can coerce &Foo<[i32; 4]> to &Foo<[i32]> -/// The autoderef and -ref are the same as in the above example, but the type -/// stored in `unsize` is `Foo<[i32]>`, we don't store any further detail about -/// the underlying conversions from `[i32; 4]` to `[i32]`. -/// -/// 3. Coercing a `Box` to `Box` is an interesting special case. In -/// that case, we have the pointer we need coming in, so there are no -/// autoderefs, and no autoref. Instead we just do the `Unsize` transformation. -/// At some point, of course, `Box` should move out of the compiler, in which -/// case this is analogous to transformating a struct. E.g., Box<[i32; 4]> -> -/// Box<[i32]> is represented by: -/// -/// ``` -/// AutoDerefRef { -/// autoderefs: 0, -/// autoref: None, -/// unsize: Some(Box<[i32]>), -/// } -/// ``` -#[derive(Copy, Clone)] -pub struct AutoDerefRef<'tcx> { - /// Step 1. Apply a number of dereferences, producing an lvalue. - pub autoderefs: usize, - - /// Step 2. Optionally produce a pointer/reference from the value. - pub autoref: Option>, - - /// Step 3. Unsize a pointer/reference value, e.g. `&[T; n]` to - /// `&[T]`. The stored type is the target pointer type. Note that - /// the source could be a thin or fat pointer. - pub unsize: Option>, -} - -impl<'tcx> AutoAdjustment<'tcx> { - pub fn is_identity(&self) -> bool { - match *self { - AdjustReifyFnPointer | - AdjustUnsafeFnPointer => false, - AdjustDerefRef(ref r) => r.is_identity(), - } - } -} -impl<'tcx> AutoDerefRef<'tcx> { - pub fn is_identity(&self) -> bool { - self.autoderefs == 0 && self.unsize.is_none() && self.autoref.is_none() - } -} - - -#[derive(Copy, Clone, PartialEq, Debug)] -pub enum AutoRef<'tcx> { - /// Convert from T to &T. - AutoPtr(&'tcx ty::Region, hir::Mutability), - - /// Convert from T to *T. - /// Value to thin pointer. - AutoUnsafe(hir::Mutability), -} - -#[derive(Clone, Copy, RustcEncodable, RustcDecodable, Debug)] -pub enum CustomCoerceUnsized { - /// Records the index of the field being coerced. - Struct(usize) -} - -impl<'tcx> ty::TyS<'tcx> { - /// See `expr_ty_adjusted` - pub fn adjust(&'tcx self, cx: &ty::ctxt<'tcx>, - span: Span, - expr_id: ast::NodeId, - adjustment: Option<&AutoAdjustment<'tcx>>, - mut method_type: F) - -> Ty<'tcx> where - F: FnMut(ty::MethodCall) -> Option>, - { - if let ty::TyError = self.sty { - return self; - } - - return match adjustment { - Some(adjustment) => { - match *adjustment { - AdjustReifyFnPointer => { - match self.sty { - ty::TyBareFn(Some(_), b) => { - cx.mk_fn(None, b) - } - _ => { - cx.sess.bug( - &format!("AdjustReifyFnPointer adjustment on non-fn-item: \ - {:?}", self)); - } - } - } - - AdjustUnsafeFnPointer => { - match self.sty { - ty::TyBareFn(None, b) => cx.safe_to_unsafe_fn_ty(b), - ref b => { - cx.sess.bug( - &format!("AdjustReifyFnPointer adjustment on non-fn-item: \ - {:?}", - b)); - } - } - } - - AdjustDerefRef(ref adj) => { - let mut adjusted_ty = self; - - if !adjusted_ty.references_error() { - for i in 0..adj.autoderefs { - adjusted_ty = - adjusted_ty.adjust_for_autoderef(cx, - expr_id, - span, - i as u32, - &mut method_type); - } - } - - if let Some(target) = adj.unsize { - target - } else { - adjusted_ty.adjust_for_autoref(cx, adj.autoref) - } - } - } - } - None => self - }; - } - - pub fn adjust_for_autoderef(&'tcx self, - cx: &ty::ctxt<'tcx>, - expr_id: ast::NodeId, - expr_span: Span, - autoderef: u32, // how many autoderefs so far? - mut method_type: F) - -> Ty<'tcx> where - F: FnMut(ty::MethodCall) -> Option>, - { - let method_call = ty::MethodCall::autoderef(expr_id, autoderef); - let mut adjusted_ty = self; - if let Some(method_ty) = method_type(method_call) { - // Method calls always have all late-bound regions - // fully instantiated. - let fn_ret = cx.no_late_bound_regions(&method_ty.fn_ret()).unwrap(); - adjusted_ty = fn_ret.unwrap(); - } - match adjusted_ty.builtin_deref(true, NoPreference) { - Some(mt) => mt.ty, - None => { - cx.sess.span_bug( - expr_span, - &format!("the {}th autoderef failed: {}", - autoderef, - adjusted_ty) - ); - } - } - } - - pub fn adjust_for_autoref(&'tcx self, cx: &ty::ctxt<'tcx>, - autoref: Option>) - -> Ty<'tcx> { - match autoref { - None => self, - Some(AutoPtr(r, m)) => { - cx.mk_ref(r, TypeAndMut { ty: self, mutbl: m }) - } - Some(AutoUnsafe(m)) => { - cx.mk_ptr(TypeAndMut { ty: self, mutbl: m }) - } - } - } -} diff --git a/src/librustc/middle/ty/context.rs b/src/librustc/middle/ty/context.rs deleted file mode 100644 index d1504d25288a8..0000000000000 --- a/src/librustc/middle/ty/context.rs +++ /dev/null @@ -1,1052 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! type context book-keeping - -// FIXME: (@jroesch) @eddyb should remove this when he renames ctxt -#![allow(non_camel_case_types)] - -use dep_graph::{DepGraph, DepTrackingMap}; -use front::map as ast_map; -use session::Session; -use lint; -use middle; -use middle::cstore::CrateStore; -use middle::def::DefMap; -use middle::def_id::DefId; -use middle::free_region::FreeRegionMap; -use middle::region::RegionMaps; -use middle::resolve_lifetime; -use middle::stability; -use middle::subst::{self, Subst, Substs}; -use middle::traits; -use middle::ty::{self, TraitRef, Ty, TypeAndMut}; -use middle::ty::{TyS, TypeVariants}; -use middle::ty::{AdtDef, ClosureSubsts, ExistentialBounds, Region}; -use middle::ty::{FreevarMap}; -use middle::ty::{BareFnTy, InferTy, ParamTy, ProjectionTy, TraitTy}; -use middle::ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid}; -use middle::ty::TypeVariants::*; -use middle::ty::maps; -use util::common::MemoizationMap; -use util::nodemap::{NodeMap, NodeSet, DefIdMap, DefIdSet}; -use util::nodemap::FnvHashMap; - -use arena::TypedArena; -use std::borrow::Borrow; -use std::cell::{Cell, RefCell, Ref}; -use std::hash::{Hash, Hasher}; -use std::rc::Rc; -use syntax::abi; -use syntax::ast::{self, Name, NodeId}; -use syntax::attr; -use syntax::parse::token::special_idents; - -use rustc_front::hir; - -/// Internal storage -pub struct CtxtArenas<'tcx> { - // internings - type_: TypedArena>, - substs: TypedArena>, - bare_fn: TypedArena>, - region: TypedArena, - stability: TypedArena, - - // references - trait_defs: TypedArena>, - adt_defs: TypedArena>, -} - -impl<'tcx> CtxtArenas<'tcx> { - pub fn new() -> CtxtArenas<'tcx> { - CtxtArenas { - type_: TypedArena::new(), - substs: TypedArena::new(), - bare_fn: TypedArena::new(), - region: TypedArena::new(), - stability: TypedArena::new(), - - trait_defs: TypedArena::new(), - adt_defs: TypedArena::new() - } - } -} - -pub struct CommonTypes<'tcx> { - pub bool: Ty<'tcx>, - pub char: Ty<'tcx>, - pub isize: Ty<'tcx>, - pub i8: Ty<'tcx>, - pub i16: Ty<'tcx>, - pub i32: Ty<'tcx>, - pub i64: Ty<'tcx>, - pub usize: Ty<'tcx>, - pub u8: Ty<'tcx>, - pub u16: Ty<'tcx>, - pub u32: Ty<'tcx>, - pub u64: Ty<'tcx>, - pub f32: Ty<'tcx>, - pub f64: Ty<'tcx>, - pub err: Ty<'tcx>, -} - -pub struct Tables<'tcx> { - /// Stores the types for various nodes in the AST. Note that this table - /// is not guaranteed to be populated until after typeck. See - /// typeck::check::fn_ctxt for details. - pub node_types: NodeMap>, - - /// Stores the type parameters which were substituted to obtain the type - /// of this node. This only applies to nodes that refer to entities - /// parameterized by type parameters, such as generic fns, types, or - /// other items. - pub item_substs: NodeMap>, - - pub adjustments: NodeMap>, - - pub method_map: ty::MethodMap<'tcx>, - - /// Borrows - pub upvar_capture_map: ty::UpvarCaptureMap, - - /// Records the type of each closure. The def ID is the ID of the - /// expression defining the closure. - pub closure_tys: DefIdMap>, - - /// Records the type of each closure. The def ID is the ID of the - /// expression defining the closure. - pub closure_kinds: DefIdMap, - - /// For each fn, records the "liberated" types of its arguments - /// and return type. Liberated means that all bound regions - /// (including late-bound regions) are replaced with free - /// equivalents. This table is not used in trans (since regions - /// are erased there) and hence is not serialized to metadata. - pub liberated_fn_sigs: NodeMap>, -} - -impl<'tcx> Tables<'tcx> { - pub fn empty() -> Tables<'tcx> { - Tables { - node_types: FnvHashMap(), - item_substs: NodeMap(), - adjustments: NodeMap(), - method_map: FnvHashMap(), - upvar_capture_map: FnvHashMap(), - closure_tys: DefIdMap(), - closure_kinds: DefIdMap(), - liberated_fn_sigs: NodeMap(), - } - } - - pub fn closure_kind(this: &RefCell, - tcx: &ty::ctxt<'tcx>, - def_id: DefId) - -> ty::ClosureKind { - // If this is a local def-id, it should be inserted into the - // tables by typeck; else, it will be retreived from - // the external crate metadata. - if let Some(&kind) = this.borrow().closure_kinds.get(&def_id) { - return kind; - } - - let kind = tcx.sess.cstore.closure_kind(tcx, def_id); - this.borrow_mut().closure_kinds.insert(def_id, kind); - kind - } - - pub fn closure_type(this: &RefCell, - tcx: &ty::ctxt<'tcx>, - def_id: DefId, - substs: &ClosureSubsts<'tcx>) - -> ty::ClosureTy<'tcx> - { - // If this is a local def-id, it should be inserted into the - // tables by typeck; else, it will be retreived from - // the external crate metadata. - if let Some(ty) = this.borrow().closure_tys.get(&def_id) { - return ty.subst(tcx, &substs.func_substs); - } - - let ty = tcx.sess.cstore.closure_ty(tcx, def_id); - this.borrow_mut().closure_tys.insert(def_id, ty.clone()); - ty.subst(tcx, &substs.func_substs) - } -} - -impl<'tcx> CommonTypes<'tcx> { - fn new(arena: &'tcx TypedArena>, - interner: &RefCell, Ty<'tcx>>>) - -> CommonTypes<'tcx> - { - let mk = |sty| ctxt::intern_ty(arena, interner, sty); - CommonTypes { - bool: mk(TyBool), - char: mk(TyChar), - err: mk(TyError), - isize: mk(TyInt(ast::TyIs)), - i8: mk(TyInt(ast::TyI8)), - i16: mk(TyInt(ast::TyI16)), - i32: mk(TyInt(ast::TyI32)), - i64: mk(TyInt(ast::TyI64)), - usize: mk(TyUint(ast::TyUs)), - u8: mk(TyUint(ast::TyU8)), - u16: mk(TyUint(ast::TyU16)), - u32: mk(TyUint(ast::TyU32)), - u64: mk(TyUint(ast::TyU64)), - f32: mk(TyFloat(ast::TyF32)), - f64: mk(TyFloat(ast::TyF64)), - } - } -} - -/// The data structure to keep track of all the information that typechecker -/// generates so that so that it can be reused and doesn't have to be redone -/// later on. -pub struct ctxt<'tcx> { - /// The arenas that types etc are allocated from. - arenas: &'tcx CtxtArenas<'tcx>, - - /// Specifically use a speedy hash algorithm for this hash map, it's used - /// quite often. - // FIXME(eddyb) use a FnvHashSet> when equivalent keys can - // queried from a HashSet. - interner: RefCell, Ty<'tcx>>>, - - // FIXME as above, use a hashset if equivalent elements can be queried. - substs_interner: RefCell, &'tcx Substs<'tcx>>>, - bare_fn_interner: RefCell, &'tcx BareFnTy<'tcx>>>, - region_interner: RefCell>, - stability_interner: RefCell>, - - pub dep_graph: DepGraph, - - /// Common types, pre-interned for your convenience. - pub types: CommonTypes<'tcx>, - - pub sess: &'tcx Session, - pub def_map: RefCell, - - pub named_region_map: resolve_lifetime::NamedRegionMap, - - pub region_maps: RegionMaps, - - // For each fn declared in the local crate, type check stores the - // free-region relationships that were deduced from its where - // clauses and parameter types. These are then read-again by - // borrowck. (They are not used during trans, and hence are not - // serialized or needed for cross-crate fns.) - free_region_maps: RefCell>, - // FIXME: jroesch make this a refcell - - pub tables: RefCell>, - - /// Maps from a trait item to the trait item "descriptor" - pub impl_or_trait_items: RefCell>>, - - /// Maps from a trait def-id to a list of the def-ids of its trait items - pub trait_item_def_ids: RefCell>>, - - /// A cache for the trait_items() routine; note that the routine - /// itself pushes the `TraitItems` dependency node. - trait_items_cache: RefCell>>, - - pub impl_trait_refs: RefCell>>, - pub trait_defs: RefCell>>, - pub adt_defs: RefCell>>, - - /// Maps from the def-id of an item (trait/struct/enum/fn) to its - /// associated predicates. - pub predicates: RefCell>>, - - /// Maps from the def-id of a trait to the list of - /// super-predicates. This is a subset of the full list of - /// predicates. We store these in a separate map because we must - /// evaluate them even during type conversion, often before the - /// full predicates are available (note that supertraits have - /// additional acyclicity requirements). - pub super_predicates: RefCell>>, - - pub map: ast_map::Map<'tcx>, - - // Records the free variables refrenced by every closure - // expression. Do not track deps for this, just recompute it from - // scratch every time. - pub freevars: RefCell, - - // Records the type of every item. - pub tcache: RefCell>>, - - // Internal cache for metadata decoding. No need to track deps on this. - pub rcache: RefCell>>, - - // Cache for the type-contents routine. FIXME -- track deps? - pub tc_cache: RefCell, ty::contents::TypeContents>>, - - // Cache for various types within a method body and so forth. - // - // FIXME this should be made local to typeck, but it is currently used by one lint - pub ast_ty_to_ty_cache: RefCell>>, - - // FIXME no dep tracking, but we should be able to remove this - pub ty_param_defs: RefCell>>, - - // FIXME dep tracking -- should be harmless enough - pub normalized_cache: RefCell, Ty<'tcx>>>, - - pub lang_items: middle::lang_items::LanguageItems, - - /// Maps from def-id of a type or region parameter to its - /// (inferred) variance. - pub item_variance_map: RefCell>>, - - /// True if the variance has been computed yet; false otherwise. - pub variance_computed: Cell, - - /// Maps a DefId of a type to a list of its inherent impls. - /// Contains implementations of methods that are inherent to a type. - /// Methods in these implementations don't need to be exported. - pub inherent_impls: RefCell>>, - - /// Maps a DefId of an impl to a list of its items. - /// Note that this contains all of the impls that we know about, - /// including ones in other crates. It's not clear that this is the best - /// way to do it. - pub impl_items: RefCell>>, - - /// Set of used unsafe nodes (functions or blocks). Unsafe nodes not - /// present in this set can be warned about. - pub used_unsafe: RefCell, - - /// Set of nodes which mark locals as mutable which end up getting used at - /// some point. Local variable definitions not in this set can be warned - /// about. - pub used_mut_nodes: RefCell, - - /// The set of external nominal types whose implementations have been read. - /// This is used for lazy resolution of methods. - pub populated_external_types: RefCell, - - /// The set of external primitive types whose implementations have been read. - /// FIXME(arielb1): why is this separate from populated_external_types? - pub populated_external_primitive_impls: RefCell, - - /// These caches are used by const_eval when decoding external constants. - pub extern_const_statics: RefCell>, - pub extern_const_fns: RefCell>, - - pub node_lint_levels: RefCell>, - - /// The types that must be asserted to be the same size for `transmute` - /// to be valid. We gather up these restrictions in the intrinsicck pass - /// and check them in trans. - pub transmute_restrictions: RefCell>>, - - /// Maps any item's def-id to its stability index. - pub stability: RefCell>, - - /// Caches the results of trait selection. This cache is used - /// for things that do not have to do with the parameters in scope. - pub selection_cache: traits::SelectionCache<'tcx>, - - /// Caches the results of trait evaluation. This cache is used - /// for things that do not have to do with the parameters in scope. - /// Merge this with `selection_cache`? - pub evaluation_cache: traits::EvaluationCache<'tcx>, - - /// A set of predicates that have been fulfilled *somewhere*. - /// This is used to avoid duplicate work. Predicates are only - /// added to this set when they mention only "global" names - /// (i.e., no type or lifetime parameters). - pub fulfilled_predicates: RefCell>, - - /// Caches the representation hints for struct definitions. - repr_hint_cache: RefCell>>, - - /// Maps Expr NodeId's to their constant qualification. - pub const_qualif_map: RefCell>, - - /// Caches CoerceUnsized kinds for impls on custom types. - pub custom_coerce_unsized_kinds: RefCell>, - - /// Maps a cast expression to its kind. This is keyed on the - /// *from* expression of the cast, not the cast itself. - pub cast_kinds: RefCell>, - - /// Maps Fn items to a collection of fragment infos. - /// - /// The main goal is to identify data (each of which may be moved - /// or assigned) whose subparts are not moved nor assigned - /// (i.e. their state is *unfragmented*) and corresponding ast - /// nodes where the path to that data is moved or assigned. - /// - /// In the long term, unfragmented values will have their - /// destructor entirely driven by a single stack-local drop-flag, - /// and their parents, the collections of the unfragmented values - /// (or more simply, "fragmented values"), are mapped to the - /// corresponding collections of stack-local drop-flags. - /// - /// (However, in the short term that is not the case; e.g. some - /// unfragmented paths still need to be zeroed, namely when they - /// reference parent data from an outer scope that was not - /// entirely moved, and therefore that needs to be zeroed so that - /// we do not get double-drop when we hit the end of the parent - /// scope.) - /// - /// Also: currently the table solely holds keys for node-ids of - /// unfragmented values (see `FragmentInfo` enum definition), but - /// longer-term we will need to also store mappings from - /// fragmented data to the set of unfragmented pieces that - /// constitute it. - pub fragment_infos: RefCell>>, -} - -impl<'tcx> ctxt<'tcx> { - pub fn type_parameter_def(&self, - node_id: NodeId) - -> ty::TypeParameterDef<'tcx> - { - self.ty_param_defs.borrow().get(&node_id).unwrap().clone() - } - - pub fn node_types(&self) -> Ref>> { - fn projection<'a, 'tcx>(tables: &'a Tables<'tcx>) -> &'a NodeMap> { - &tables.node_types - } - - Ref::map(self.tables.borrow(), projection) - } - - pub fn node_type_insert(&self, id: NodeId, ty: Ty<'tcx>) { - self.tables.borrow_mut().node_types.insert(id, ty); - } - - pub fn intern_trait_def(&self, def: ty::TraitDef<'tcx>) - -> &'tcx ty::TraitDef<'tcx> { - let did = def.trait_ref.def_id; - let interned = self.arenas.trait_defs.alloc(def); - if let Some(prev) = self.trait_defs.borrow_mut().insert(did, interned) { - self.sess.bug(&format!("Tried to overwrite interned TraitDef: {:?}", - prev)) - } - interned - } - - pub fn alloc_trait_def(&self, def: ty::TraitDef<'tcx>) - -> &'tcx ty::TraitDef<'tcx> { - self.arenas.trait_defs.alloc(def) - } - - pub fn intern_adt_def(&self, - did: DefId, - kind: ty::AdtKind, - variants: Vec>) - -> ty::AdtDefMaster<'tcx> { - let def = ty::AdtDefData::new(self, did, kind, variants); - let interned = self.arenas.adt_defs.alloc(def); - // this will need a transmute when reverse-variance is removed - if let Some(prev) = self.adt_defs.borrow_mut().insert(did, interned) { - self.sess.bug(&format!("Tried to overwrite interned AdtDef: {:?}", - prev)) - } - interned - } - - pub fn intern_stability(&self, stab: attr::Stability) -> &'tcx attr::Stability { - if let Some(st) = self.stability_interner.borrow().get(&stab) { - return st; - } - - let interned = self.arenas.stability.alloc(stab); - if let Some(prev) = self.stability_interner - .borrow_mut() - .insert(interned, interned) { - self.sess.bug(&format!("Tried to overwrite interned Stability: {:?}", - prev)) - } - interned - } - - pub fn store_free_region_map(&self, id: NodeId, map: FreeRegionMap) { - if self.free_region_maps.borrow_mut().insert(id, map).is_some() { - self.sess.bug(&format!("Tried to overwrite interned FreeRegionMap for NodeId {:?}", - id)) - } - } - - pub fn free_region_map(&self, id: NodeId) -> FreeRegionMap { - self.free_region_maps.borrow()[&id].clone() - } - - pub fn lift>(&self, value: &T) -> Option { - value.lift_to_tcx(self) - } - - /// Create a type context and call the closure with a `&ty::ctxt` reference - /// to the context. The closure enforces that the type context and any interned - /// value (types, substs, etc.) can only be used while `ty::tls` has a valid - /// reference to the context, to allow formatting values that need it. - pub fn create_and_enter(s: &'tcx Session, - arenas: &'tcx CtxtArenas<'tcx>, - def_map: RefCell, - named_region_map: resolve_lifetime::NamedRegionMap, - map: ast_map::Map<'tcx>, - freevars: FreevarMap, - region_maps: RegionMaps, - lang_items: middle::lang_items::LanguageItems, - stability: stability::Index<'tcx>, - f: F) -> R - where F: FnOnce(&ctxt<'tcx>) -> R - { - let interner = RefCell::new(FnvHashMap()); - let common_types = CommonTypes::new(&arenas.type_, &interner); - let dep_graph = DepGraph::new(s.opts.incremental_compilation); - tls::enter(ctxt { - arenas: arenas, - interner: interner, - substs_interner: RefCell::new(FnvHashMap()), - bare_fn_interner: RefCell::new(FnvHashMap()), - region_interner: RefCell::new(FnvHashMap()), - stability_interner: RefCell::new(FnvHashMap()), - dep_graph: dep_graph.clone(), - types: common_types, - named_region_map: named_region_map, - region_maps: region_maps, - free_region_maps: RefCell::new(FnvHashMap()), - item_variance_map: RefCell::new(DepTrackingMap::new(dep_graph.clone())), - variance_computed: Cell::new(false), - sess: s, - def_map: def_map, - tables: RefCell::new(Tables::empty()), - impl_trait_refs: RefCell::new(DepTrackingMap::new(dep_graph.clone())), - trait_defs: RefCell::new(DepTrackingMap::new(dep_graph.clone())), - adt_defs: RefCell::new(DepTrackingMap::new(dep_graph.clone())), - predicates: RefCell::new(DepTrackingMap::new(dep_graph.clone())), - super_predicates: RefCell::new(DepTrackingMap::new(dep_graph.clone())), - fulfilled_predicates: RefCell::new(traits::FulfilledPredicates::new()), - map: map, - freevars: RefCell::new(freevars), - tcache: RefCell::new(DepTrackingMap::new(dep_graph.clone())), - rcache: RefCell::new(FnvHashMap()), - tc_cache: RefCell::new(FnvHashMap()), - ast_ty_to_ty_cache: RefCell::new(NodeMap()), - impl_or_trait_items: RefCell::new(DepTrackingMap::new(dep_graph.clone())), - trait_item_def_ids: RefCell::new(DepTrackingMap::new(dep_graph.clone())), - trait_items_cache: RefCell::new(DepTrackingMap::new(dep_graph.clone())), - ty_param_defs: RefCell::new(NodeMap()), - normalized_cache: RefCell::new(FnvHashMap()), - lang_items: lang_items, - inherent_impls: RefCell::new(DepTrackingMap::new(dep_graph.clone())), - impl_items: RefCell::new(DepTrackingMap::new(dep_graph.clone())), - used_unsafe: RefCell::new(NodeSet()), - used_mut_nodes: RefCell::new(NodeSet()), - populated_external_types: RefCell::new(DefIdSet()), - populated_external_primitive_impls: RefCell::new(DefIdSet()), - extern_const_statics: RefCell::new(DefIdMap()), - extern_const_fns: RefCell::new(DefIdMap()), - node_lint_levels: RefCell::new(FnvHashMap()), - transmute_restrictions: RefCell::new(Vec::new()), - stability: RefCell::new(stability), - selection_cache: traits::SelectionCache::new(), - evaluation_cache: traits::EvaluationCache::new(), - repr_hint_cache: RefCell::new(DepTrackingMap::new(dep_graph.clone())), - const_qualif_map: RefCell::new(NodeMap()), - custom_coerce_unsized_kinds: RefCell::new(DefIdMap()), - cast_kinds: RefCell::new(NodeMap()), - fragment_infos: RefCell::new(DefIdMap()), - }, f) - } -} - -/// A trait implemented for all X<'a> types which can be safely and -/// efficiently converted to X<'tcx> as long as they are part of the -/// provided ty::ctxt<'tcx>. -/// This can be done, for example, for Ty<'tcx> or &'tcx Substs<'tcx> -/// by looking them up in their respective interners. -/// None is returned if the value or one of the components is not part -/// of the provided context. -/// For Ty, None can be returned if either the type interner doesn't -/// contain the TypeVariants key or if the address of the interned -/// pointer differs. The latter case is possible if a primitive type, -/// e.g. `()` or `u8`, was interned in a different context. -pub trait Lift<'tcx> { - type Lifted; - fn lift_to_tcx(&self, tcx: &ctxt<'tcx>) -> Option; -} - -impl<'a, 'tcx> Lift<'tcx> for Ty<'a> { - type Lifted = Ty<'tcx>; - fn lift_to_tcx(&self, tcx: &ctxt<'tcx>) -> Option> { - if let Some(&ty) = tcx.interner.borrow().get(&self.sty) { - if *self as *const _ == ty as *const _ { - return Some(ty); - } - } - None - } -} - -impl<'a, 'tcx> Lift<'tcx> for &'a Substs<'a> { - type Lifted = &'tcx Substs<'tcx>; - fn lift_to_tcx(&self, tcx: &ctxt<'tcx>) -> Option<&'tcx Substs<'tcx>> { - if let Some(&substs) = tcx.substs_interner.borrow().get(*self) { - if *self as *const _ == substs as *const _ { - return Some(substs); - } - } - None - } -} - - -pub mod tls { - use middle::ty; - - use std::fmt; - use syntax::codemap; - - /// Marker type used for the scoped TLS slot. - /// The type context cannot be used directly because the scoped TLS - /// in libstd doesn't allow types generic over lifetimes. - struct ThreadLocalTyCx; - - scoped_thread_local!(static TLS_TCX: ThreadLocalTyCx); - - fn span_debug(span: codemap::Span, f: &mut fmt::Formatter) -> fmt::Result { - with(|tcx| { - write!(f, "{}", tcx.sess.codemap().span_to_string(span)) - }) - } - - pub fn enter<'tcx, F: FnOnce(&ty::ctxt<'tcx>) -> R, R>(tcx: ty::ctxt<'tcx>, f: F) -> R { - codemap::SPAN_DEBUG.with(|span_dbg| { - let original_span_debug = span_dbg.get(); - span_dbg.set(span_debug); - let tls_ptr = &tcx as *const _ as *const ThreadLocalTyCx; - let result = TLS_TCX.set(unsafe { &*tls_ptr }, || f(&tcx)); - span_dbg.set(original_span_debug); - result - }) - } - - pub fn with R, R>(f: F) -> R { - TLS_TCX.with(|tcx| f(unsafe { &*(tcx as *const _ as *const ty::ctxt) })) - } - - pub fn with_opt) -> R, R>(f: F) -> R { - if TLS_TCX.is_set() { - with(|v| f(Some(v))) - } else { - f(None) - } - } -} - -macro_rules! sty_debug_print { - ($ctxt: expr, $($variant: ident),*) => {{ - // curious inner module to allow variant names to be used as - // variable names. - #[allow(non_snake_case)] - mod inner { - use middle::ty; - #[derive(Copy, Clone)] - struct DebugStat { - total: usize, - region_infer: usize, - ty_infer: usize, - both_infer: usize, - } - - pub fn go(tcx: &ty::ctxt) { - let mut total = DebugStat { - total: 0, - region_infer: 0, ty_infer: 0, both_infer: 0, - }; - $(let mut $variant = total;)* - - - for (_, t) in tcx.interner.borrow().iter() { - let variant = match t.sty { - ty::TyBool | ty::TyChar | ty::TyInt(..) | ty::TyUint(..) | - ty::TyFloat(..) | ty::TyStr => continue, - ty::TyError => /* unimportant */ continue, - $(ty::$variant(..) => &mut $variant,)* - }; - let region = t.flags.get().intersects(ty::TypeFlags::HAS_RE_INFER); - let ty = t.flags.get().intersects(ty::TypeFlags::HAS_TY_INFER); - - variant.total += 1; - total.total += 1; - if region { total.region_infer += 1; variant.region_infer += 1 } - if ty { total.ty_infer += 1; variant.ty_infer += 1 } - if region && ty { total.both_infer += 1; variant.both_infer += 1 } - } - println!("Ty interner total ty region both"); - $(println!(" {:18}: {uses:6} {usespc:4.1}%, \ -{ty:4.1}% {region:5.1}% {both:4.1}%", - stringify!($variant), - uses = $variant.total, - usespc = $variant.total as f64 * 100.0 / total.total as f64, - ty = $variant.ty_infer as f64 * 100.0 / total.total as f64, - region = $variant.region_infer as f64 * 100.0 / total.total as f64, - both = $variant.both_infer as f64 * 100.0 / total.total as f64); - )* - println!(" total {uses:6} \ -{ty:4.1}% {region:5.1}% {both:4.1}%", - uses = total.total, - ty = total.ty_infer as f64 * 100.0 / total.total as f64, - region = total.region_infer as f64 * 100.0 / total.total as f64, - both = total.both_infer as f64 * 100.0 / total.total as f64) - } - } - - inner::go($ctxt) - }} -} - -impl<'tcx> ctxt<'tcx> { - pub fn print_debug_stats(&self) { - sty_debug_print!( - self, - TyEnum, TyBox, TyArray, TySlice, TyRawPtr, TyRef, TyBareFn, TyTrait, - TyStruct, TyClosure, TyTuple, TyParam, TyInfer, TyProjection); - - println!("Substs interner: #{}", self.substs_interner.borrow().len()); - println!("BareFnTy interner: #{}", self.bare_fn_interner.borrow().len()); - println!("Region interner: #{}", self.region_interner.borrow().len()); - println!("Stability interner: #{}", self.stability_interner.borrow().len()); - } -} - - -/// An entry in the type interner. -pub struct InternedTy<'tcx> { - ty: Ty<'tcx> -} - -// NB: An InternedTy compares and hashes as a sty. -impl<'tcx> PartialEq for InternedTy<'tcx> { - fn eq(&self, other: &InternedTy<'tcx>) -> bool { - self.ty.sty == other.ty.sty - } -} - -impl<'tcx> Eq for InternedTy<'tcx> {} - -impl<'tcx> Hash for InternedTy<'tcx> { - fn hash(&self, s: &mut H) { - self.ty.sty.hash(s) - } -} - -impl<'tcx> Borrow> for InternedTy<'tcx> { - fn borrow<'a>(&'a self) -> &'a TypeVariants<'tcx> { - &self.ty.sty - } -} - -fn bound_list_is_sorted(bounds: &[ty::PolyProjectionPredicate]) -> bool { - bounds.is_empty() || - bounds[1..].iter().enumerate().all( - |(index, bound)| bounds[index].sort_key() <= bound.sort_key()) -} - -impl<'tcx> ctxt<'tcx> { - // Type constructors - pub fn mk_substs(&self, substs: Substs<'tcx>) -> &'tcx Substs<'tcx> { - if let Some(substs) = self.substs_interner.borrow().get(&substs) { - return *substs; - } - - let substs = self.arenas.substs.alloc(substs); - self.substs_interner.borrow_mut().insert(substs, substs); - substs - } - - /// Create an unsafe fn ty based on a safe fn ty. - pub fn safe_to_unsafe_fn_ty(&self, bare_fn: &BareFnTy<'tcx>) -> Ty<'tcx> { - assert_eq!(bare_fn.unsafety, hir::Unsafety::Normal); - let unsafe_fn_ty_a = self.mk_bare_fn(ty::BareFnTy { - unsafety: hir::Unsafety::Unsafe, - abi: bare_fn.abi, - sig: bare_fn.sig.clone() - }); - self.mk_fn(None, unsafe_fn_ty_a) - } - - pub fn mk_bare_fn(&self, bare_fn: BareFnTy<'tcx>) -> &'tcx BareFnTy<'tcx> { - if let Some(bare_fn) = self.bare_fn_interner.borrow().get(&bare_fn) { - return *bare_fn; - } - - let bare_fn = self.arenas.bare_fn.alloc(bare_fn); - self.bare_fn_interner.borrow_mut().insert(bare_fn, bare_fn); - bare_fn - } - - pub fn mk_region(&self, region: Region) -> &'tcx Region { - if let Some(region) = self.region_interner.borrow().get(®ion) { - return *region; - } - - let region = self.arenas.region.alloc(region); - self.region_interner.borrow_mut().insert(region, region); - region - } - - fn intern_ty(type_arena: &'tcx TypedArena>, - interner: &RefCell, Ty<'tcx>>>, - st: TypeVariants<'tcx>) - -> Ty<'tcx> { - let ty: Ty /* don't be &mut TyS */ = { - let mut interner = interner.borrow_mut(); - match interner.get(&st) { - Some(ty) => return *ty, - _ => () - } - - let flags = super::flags::FlagComputation::for_sty(&st); - - let ty = match () { - () => type_arena.alloc(TyS { sty: st, - flags: Cell::new(flags.flags), - region_depth: flags.depth, }), - }; - - interner.insert(InternedTy { ty: ty }, ty); - ty - }; - - debug!("Interned type: {:?} Pointer: {:?}", - ty, ty as *const TyS); - ty - } - - // Interns a type/name combination, stores the resulting box in cx.interner, - // and returns the box as cast to an unsafe ptr (see comments for Ty above). - pub fn mk_ty(&self, st: TypeVariants<'tcx>) -> Ty<'tcx> { - ctxt::intern_ty(&self.arenas.type_, &self.interner, st) - } - - pub fn mk_mach_int(&self, tm: ast::IntTy) -> Ty<'tcx> { - match tm { - ast::TyIs => self.types.isize, - ast::TyI8 => self.types.i8, - ast::TyI16 => self.types.i16, - ast::TyI32 => self.types.i32, - ast::TyI64 => self.types.i64, - } - } - - pub fn mk_mach_uint(&self, tm: ast::UintTy) -> Ty<'tcx> { - match tm { - ast::TyUs => self.types.usize, - ast::TyU8 => self.types.u8, - ast::TyU16 => self.types.u16, - ast::TyU32 => self.types.u32, - ast::TyU64 => self.types.u64, - } - } - - pub fn mk_mach_float(&self, tm: ast::FloatTy) -> Ty<'tcx> { - match tm { - ast::TyF32 => self.types.f32, - ast::TyF64 => self.types.f64, - } - } - - pub fn mk_str(&self) -> Ty<'tcx> { - self.mk_ty(TyStr) - } - - pub fn mk_static_str(&self) -> Ty<'tcx> { - self.mk_imm_ref(self.mk_region(ty::ReStatic), self.mk_str()) - } - - pub fn mk_enum(&self, def: AdtDef<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { - // take a copy of substs so that we own the vectors inside - self.mk_ty(TyEnum(def, substs)) - } - - pub fn mk_box(&self, ty: Ty<'tcx>) -> Ty<'tcx> { - self.mk_ty(TyBox(ty)) - } - - pub fn mk_ptr(&self, tm: TypeAndMut<'tcx>) -> Ty<'tcx> { - self.mk_ty(TyRawPtr(tm)) - } - - pub fn mk_ref(&self, r: &'tcx Region, tm: TypeAndMut<'tcx>) -> Ty<'tcx> { - self.mk_ty(TyRef(r, tm)) - } - - pub fn mk_mut_ref(&self, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> { - self.mk_ref(r, TypeAndMut {ty: ty, mutbl: hir::MutMutable}) - } - - pub fn mk_imm_ref(&self, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> { - self.mk_ref(r, TypeAndMut {ty: ty, mutbl: hir::MutImmutable}) - } - - pub fn mk_mut_ptr(&self, ty: Ty<'tcx>) -> Ty<'tcx> { - self.mk_ptr(TypeAndMut {ty: ty, mutbl: hir::MutMutable}) - } - - pub fn mk_imm_ptr(&self, ty: Ty<'tcx>) -> Ty<'tcx> { - self.mk_ptr(TypeAndMut {ty: ty, mutbl: hir::MutImmutable}) - } - - pub fn mk_nil_ptr(&self) -> Ty<'tcx> { - self.mk_imm_ptr(self.mk_nil()) - } - - pub fn mk_array(&self, ty: Ty<'tcx>, n: usize) -> Ty<'tcx> { - self.mk_ty(TyArray(ty, n)) - } - - pub fn mk_slice(&self, ty: Ty<'tcx>) -> Ty<'tcx> { - self.mk_ty(TySlice(ty)) - } - - pub fn mk_tup(&self, ts: Vec>) -> Ty<'tcx> { - self.mk_ty(TyTuple(ts)) - } - - pub fn mk_nil(&self) -> Ty<'tcx> { - self.mk_tup(Vec::new()) - } - - pub fn mk_bool(&self) -> Ty<'tcx> { - self.mk_ty(TyBool) - } - - pub fn mk_fn(&self, - opt_def_id: Option, - fty: &'tcx BareFnTy<'tcx>) -> Ty<'tcx> { - self.mk_ty(TyBareFn(opt_def_id, fty)) - } - - pub fn mk_ctor_fn(&self, - def_id: DefId, - input_tys: &[Ty<'tcx>], - output: Ty<'tcx>) -> Ty<'tcx> { - let input_args = input_tys.iter().cloned().collect(); - self.mk_fn(Some(def_id), self.mk_bare_fn(BareFnTy { - unsafety: hir::Unsafety::Normal, - abi: abi::Rust, - sig: ty::Binder(ty::FnSig { - inputs: input_args, - output: ty::FnConverging(output), - variadic: false - }) - })) - } - - pub fn mk_trait(&self, - principal: ty::PolyTraitRef<'tcx>, - bounds: ExistentialBounds<'tcx>) - -> Ty<'tcx> - { - assert!(bound_list_is_sorted(&bounds.projection_bounds)); - - let inner = box TraitTy { - principal: principal, - bounds: bounds - }; - self.mk_ty(TyTrait(inner)) - } - - pub fn mk_projection(&self, - trait_ref: TraitRef<'tcx>, - item_name: Name) - -> Ty<'tcx> { - // take a copy of substs so that we own the vectors inside - let inner = ProjectionTy { trait_ref: trait_ref, item_name: item_name }; - self.mk_ty(TyProjection(inner)) - } - - pub fn mk_struct(&self, def: AdtDef<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { - // take a copy of substs so that we own the vectors inside - self.mk_ty(TyStruct(def, substs)) - } - - pub fn mk_closure(&self, - closure_id: DefId, - substs: &'tcx Substs<'tcx>, - tys: Vec>) - -> Ty<'tcx> { - self.mk_closure_from_closure_substs(closure_id, Box::new(ClosureSubsts { - func_substs: substs, - upvar_tys: tys - })) - } - - pub fn mk_closure_from_closure_substs(&self, - closure_id: DefId, - closure_substs: Box>) - -> Ty<'tcx> { - self.mk_ty(TyClosure(closure_id, closure_substs)) - } - - pub fn mk_var(&self, v: TyVid) -> Ty<'tcx> { - self.mk_infer(TyVar(v)) - } - - pub fn mk_int_var(&self, v: IntVid) -> Ty<'tcx> { - self.mk_infer(IntVar(v)) - } - - pub fn mk_float_var(&self, v: FloatVid) -> Ty<'tcx> { - self.mk_infer(FloatVar(v)) - } - - pub fn mk_infer(&self, it: InferTy) -> Ty<'tcx> { - self.mk_ty(TyInfer(it)) - } - - pub fn mk_param(&self, - space: subst::ParamSpace, - index: u32, - name: Name) -> Ty<'tcx> { - self.mk_ty(TyParam(ParamTy { space: space, idx: index, name: name })) - } - - pub fn mk_self_type(&self) -> Ty<'tcx> { - self.mk_param(subst::SelfSpace, 0, special_idents::type_self.name) - } - - pub fn mk_param_from_def(&self, def: &ty::TypeParameterDef) -> Ty<'tcx> { - self.mk_param(def.space, def.index, def.name) - } - - pub fn trait_items(&self, trait_did: DefId) -> Rc>> { - self.trait_items_cache.memoize(trait_did, || { - let def_ids = self.trait_item_def_ids(trait_did); - Rc::new(def_ids.iter() - .map(|d| self.impl_or_trait_item(d.def_id())) - .collect()) - }) - } - - /// Obtain the representation annotation for a struct definition. - pub fn lookup_repr_hints(&self, did: DefId) -> Rc> { - self.repr_hint_cache.memoize(did, || { - Rc::new(if did.is_local() { - self.get_attrs(did).iter().flat_map(|meta| { - attr::find_repr_attrs(self.sess.diagnostic(), meta).into_iter() - }).collect() - } else { - self.sess.cstore.repr_attrs(did) - }) - }) - } -} diff --git a/src/librustc/middle/ty/flags.rs b/src/librustc/middle/ty/flags.rs deleted file mode 100644 index a0b03fe8126dd..0000000000000 --- a/src/librustc/middle/ty/flags.rs +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use middle::subst; -use middle::ty::{self, Ty, TypeFlags, TypeFoldable}; - -pub struct FlagComputation { - pub flags: TypeFlags, - - // maximum depth of any bound region that we have seen thus far - pub depth: u32, -} - -impl FlagComputation { - fn new() -> FlagComputation { - FlagComputation { flags: TypeFlags::empty(), depth: 0 } - } - - pub fn for_sty(st: &ty::TypeVariants) -> FlagComputation { - let mut result = FlagComputation::new(); - result.add_sty(st); - result - } - - fn add_flags(&mut self, flags: TypeFlags) { - self.flags = self.flags | (flags & TypeFlags::NOMINAL_FLAGS); - } - - fn add_depth(&mut self, depth: u32) { - if depth > self.depth { - self.depth = depth; - } - } - - /// Adds the flags/depth from a set of types that appear within the current type, but within a - /// region binder. - fn add_bound_computation(&mut self, computation: &FlagComputation) { - self.add_flags(computation.flags); - - // The types that contributed to `computation` occurred within - // a region binder, so subtract one from the region depth - // within when adding the depth to `self`. - let depth = computation.depth; - if depth > 0 { - self.add_depth(depth - 1); - } - } - - fn add_sty(&mut self, st: &ty::TypeVariants) { - match st { - &ty::TyBool | - &ty::TyChar | - &ty::TyInt(_) | - &ty::TyFloat(_) | - &ty::TyUint(_) | - &ty::TyStr => { - } - - // You might think that we could just return TyError for - // any type containing TyError as a component, and get - // rid of the TypeFlags::HAS_TY_ERR flag -- likewise for ty_bot (with - // the exception of function types that return bot). - // But doing so caused sporadic memory corruption, and - // neither I (tjc) nor nmatsakis could figure out why, - // so we're doing it this way. - &ty::TyError => { - self.add_flags(TypeFlags::HAS_TY_ERR) - } - - &ty::TyParam(ref p) => { - self.add_flags(TypeFlags::HAS_LOCAL_NAMES); - if p.space == subst::SelfSpace { - self.add_flags(TypeFlags::HAS_SELF); - } else { - self.add_flags(TypeFlags::HAS_PARAMS); - } - } - - &ty::TyClosure(_, ref substs) => { - self.add_flags(TypeFlags::HAS_TY_CLOSURE); - self.add_flags(TypeFlags::HAS_LOCAL_NAMES); - self.add_substs(&substs.func_substs); - self.add_tys(&substs.upvar_tys); - } - - &ty::TyInfer(_) => { - self.add_flags(TypeFlags::HAS_LOCAL_NAMES); // it might, right? - self.add_flags(TypeFlags::HAS_TY_INFER) - } - - &ty::TyEnum(_, substs) | &ty::TyStruct(_, substs) => { - self.add_substs(substs); - } - - &ty::TyProjection(ref data) => { - self.add_flags(TypeFlags::HAS_PROJECTION); - self.add_projection_ty(data); - } - - &ty::TyTrait(box ty::TraitTy { ref principal, ref bounds }) => { - let mut computation = FlagComputation::new(); - computation.add_substs(principal.0.substs); - for projection_bound in &bounds.projection_bounds { - let mut proj_computation = FlagComputation::new(); - proj_computation.add_projection_predicate(&projection_bound.0); - self.add_bound_computation(&proj_computation); - } - self.add_bound_computation(&computation); - - self.add_bounds(bounds); - } - - &ty::TyBox(tt) | &ty::TyArray(tt, _) | &ty::TySlice(tt) => { - self.add_ty(tt) - } - - &ty::TyRawPtr(ref m) => { - self.add_ty(m.ty); - } - - &ty::TyRef(r, ref m) => { - self.add_region(*r); - self.add_ty(m.ty); - } - - &ty::TyTuple(ref ts) => { - self.add_tys(&ts[..]); - } - - &ty::TyBareFn(_, ref f) => { - self.add_fn_sig(&f.sig); - } - } - } - - fn add_ty(&mut self, ty: Ty) { - self.add_flags(ty.flags.get()); - self.add_depth(ty.region_depth); - } - - fn add_tys(&mut self, tys: &[Ty]) { - for &ty in tys { - self.add_ty(ty); - } - } - - fn add_fn_sig(&mut self, fn_sig: &ty::PolyFnSig) { - let mut computation = FlagComputation::new(); - - computation.add_tys(&fn_sig.0.inputs); - - if let ty::FnConverging(output) = fn_sig.0.output { - computation.add_ty(output); - } - - self.add_bound_computation(&computation); - } - - fn add_region(&mut self, r: ty::Region) { - match r { - ty::ReVar(..) | - ty::ReSkolemized(..) => { self.add_flags(TypeFlags::HAS_RE_INFER); } - ty::ReLateBound(debruijn, _) => { self.add_depth(debruijn.depth); } - ty::ReEarlyBound(..) => { self.add_flags(TypeFlags::HAS_RE_EARLY_BOUND); } - ty::ReStatic => {} - _ => { self.add_flags(TypeFlags::HAS_FREE_REGIONS); } - } - - if !r.is_global() { - self.add_flags(TypeFlags::HAS_LOCAL_NAMES); - } - } - - fn add_projection_predicate(&mut self, projection_predicate: &ty::ProjectionPredicate) { - self.add_projection_ty(&projection_predicate.projection_ty); - self.add_ty(projection_predicate.ty); - } - - fn add_projection_ty(&mut self, projection_ty: &ty::ProjectionTy) { - self.add_substs(projection_ty.trait_ref.substs); - } - - fn add_substs(&mut self, substs: &subst::Substs) { - self.add_tys(substs.types.as_slice()); - match substs.regions { - subst::ErasedRegions => {} - subst::NonerasedRegions(ref regions) => { - for &r in regions { - self.add_region(r); - } - } - } - } - - fn add_bounds(&mut self, bounds: &ty::ExistentialBounds) { - self.add_region(bounds.region_bound); - } -} diff --git a/src/librustc/middle/ty/fold.rs b/src/librustc/middle/ty/fold.rs deleted file mode 100644 index da0245a8d2520..0000000000000 --- a/src/librustc/middle/ty/fold.rs +++ /dev/null @@ -1,638 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Generalized type folding mechanism. The setup is a bit convoluted -//! but allows for convenient usage. Let T be an instance of some -//! "foldable type" (one which implements `TypeFoldable`) and F be an -//! instance of a "folder" (a type which implements `TypeFolder`). Then -//! the setup is intended to be: -//! -//! T.fold_with(F) --calls--> F.fold_T(T) --calls--> T.super_fold_with(F) -//! -//! This way, when you define a new folder F, you can override -//! `fold_T()` to customize the behavior, and invoke `T.super_fold_with()` -//! to get the original behavior. Meanwhile, to actually fold -//! something, you can just write `T.fold_with(F)`, which is -//! convenient. (Note that `fold_with` will also transparently handle -//! things like a `Vec` where T is foldable and so on.) -//! -//! In this ideal setup, the only function that actually *does* -//! anything is `T.super_fold_with()`, which traverses the type `T`. -//! Moreover, `T.super_fold_with()` should only ever call `T.fold_with()`. -//! -//! In some cases, we follow a degenerate pattern where we do not have -//! a `fold_T` method. Instead, `T.fold_with` traverses the structure directly. -//! This is suboptimal because the behavior cannot be overridden, but it's -//! much less work to implement. If you ever *do* need an override that -//! doesn't exist, it's not hard to convert the degenerate pattern into the -//! proper thing. -//! -//! A `TypeFoldable` T can also be visited by a `TypeVisitor` V using similar setup: -//! T.visit_with(V) --calls--> V.visit_T(T) --calls--> T.super_visit_with(V). -//! These methods return true to indicate that the visitor has found what it is looking for -//! and does not need to visit anything else. - -use middle::region; -use middle::subst; -use middle::ty::adjustment; -use middle::ty::{self, Binder, Ty, TypeFlags}; - -use std::fmt; -use util::nodemap::{FnvHashMap, FnvHashSet}; - -/// The TypeFoldable trait is implemented for every type that can be folded. -/// Basically, every type that has a corresponding method in TypeFolder. -pub trait TypeFoldable<'tcx>: fmt::Debug + Clone { - fn super_fold_with>(&self, folder: &mut F) -> Self; - fn fold_with>(&self, folder: &mut F) -> Self { - self.super_fold_with(folder) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool; - fn visit_with>(&self, visitor: &mut V) -> bool { - self.super_visit_with(visitor) - } - - fn has_regions_escaping_depth(&self, depth: u32) -> bool { - self.visit_with(&mut HasEscapingRegionsVisitor { depth: depth }) - } - fn has_escaping_regions(&self) -> bool { - self.has_regions_escaping_depth(0) - } - - fn has_type_flags(&self, flags: TypeFlags) -> bool { - self.visit_with(&mut HasTypeFlagsVisitor { flags: flags }) - } - fn has_projection_types(&self) -> bool { - self.has_type_flags(TypeFlags::HAS_PROJECTION) - } - fn references_error(&self) -> bool { - self.has_type_flags(TypeFlags::HAS_TY_ERR) - } - fn has_param_types(&self) -> bool { - self.has_type_flags(TypeFlags::HAS_PARAMS) - } - fn has_self_ty(&self) -> bool { - self.has_type_flags(TypeFlags::HAS_SELF) - } - fn has_infer_types(&self) -> bool { - self.has_type_flags(TypeFlags::HAS_TY_INFER) - } - fn needs_infer(&self) -> bool { - self.has_type_flags(TypeFlags::HAS_TY_INFER | TypeFlags::HAS_RE_INFER) - } - fn needs_subst(&self) -> bool { - self.has_type_flags(TypeFlags::NEEDS_SUBST) - } - fn has_closure_types(&self) -> bool { - self.has_type_flags(TypeFlags::HAS_TY_CLOSURE) - } - fn has_erasable_regions(&self) -> bool { - self.has_type_flags(TypeFlags::HAS_RE_EARLY_BOUND | - TypeFlags::HAS_RE_INFER | - TypeFlags::HAS_FREE_REGIONS) - } - /// Indicates whether this value references only 'global' - /// types/lifetimes that are the same regardless of what fn we are - /// in. This is used for caching. Errs on the side of returning - /// false. - fn is_global(&self) -> bool { - !self.has_type_flags(TypeFlags::HAS_LOCAL_NAMES) - } -} - -/// The TypeFolder trait defines the actual *folding*. There is a -/// method defined for every foldable type. Each of these has a -/// default implementation that does an "identity" fold. Within each -/// identity fold, it should invoke `foo.fold_with(self)` to fold each -/// sub-item. -pub trait TypeFolder<'tcx> : Sized { - fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx>; - - /// Invoked by the `super_*` routines when we enter a region - /// binding level (for example, when entering a function - /// signature). This is used by clients that want to track the - /// Debruijn index nesting level. - fn enter_region_binder(&mut self) { } - - /// Invoked by the `super_*` routines when we exit a region - /// binding level. This is used by clients that want to - /// track the Debruijn index nesting level. - fn exit_region_binder(&mut self) { } - - fn fold_binder(&mut self, t: &Binder) -> Binder - where T : TypeFoldable<'tcx> - { - // FIXME(#20526) this should replace `enter_region_binder`/`exit_region_binder`. - t.super_fold_with(self) - } - - fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { - t.super_fold_with(self) - } - - fn fold_mt(&mut self, t: &ty::TypeAndMut<'tcx>) -> ty::TypeAndMut<'tcx> { - t.super_fold_with(self) - } - - fn fold_trait_ref(&mut self, t: &ty::TraitRef<'tcx>) -> ty::TraitRef<'tcx> { - t.super_fold_with(self) - } - - fn fold_substs(&mut self, - substs: &subst::Substs<'tcx>) - -> subst::Substs<'tcx> { - substs.super_fold_with(self) - } - - fn fold_fn_sig(&mut self, - sig: &ty::FnSig<'tcx>) - -> ty::FnSig<'tcx> { - sig.super_fold_with(self) - } - - fn fold_output(&mut self, - output: &ty::FnOutput<'tcx>) - -> ty::FnOutput<'tcx> { - output.super_fold_with(self) - } - - fn fold_bare_fn_ty(&mut self, - fty: &ty::BareFnTy<'tcx>) - -> ty::BareFnTy<'tcx> - { - fty.super_fold_with(self) - } - - fn fold_closure_ty(&mut self, - fty: &ty::ClosureTy<'tcx>) - -> ty::ClosureTy<'tcx> { - fty.super_fold_with(self) - } - - fn fold_region(&mut self, r: ty::Region) -> ty::Region { - r.super_fold_with(self) - } - - fn fold_existential_bounds(&mut self, s: &ty::ExistentialBounds<'tcx>) - -> ty::ExistentialBounds<'tcx> { - s.super_fold_with(self) - } - - fn fold_autoref(&mut self, ar: &adjustment::AutoRef<'tcx>) - -> adjustment::AutoRef<'tcx> { - ar.super_fold_with(self) - } -} - -pub trait TypeVisitor<'tcx> : Sized { - fn enter_region_binder(&mut self) { } - fn exit_region_binder(&mut self) { } - - fn visit_ty(&mut self, t: Ty<'tcx>) -> bool { - t.super_visit_with(self) - } - - fn visit_region(&mut self, r: ty::Region) -> bool { - r.super_visit_with(self) - } -} - -/////////////////////////////////////////////////////////////////////////// -// Some sample folders - -pub struct BottomUpFolder<'a, 'tcx: 'a, F> where F: FnMut(Ty<'tcx>) -> Ty<'tcx> { - pub tcx: &'a ty::ctxt<'tcx>, - pub fldop: F, -} - -impl<'a, 'tcx, F> TypeFolder<'tcx> for BottomUpFolder<'a, 'tcx, F> where - F: FnMut(Ty<'tcx>) -> Ty<'tcx>, -{ - fn tcx(&self) -> &ty::ctxt<'tcx> { self.tcx } - - fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { - let t1 = ty.super_fold_with(self); - (self.fldop)(t1) - } -} - -/////////////////////////////////////////////////////////////////////////// -// Region folder - -impl<'tcx> ty::ctxt<'tcx> { - /// Collects the free and escaping regions in `value` into `region_set`. Returns - /// whether any late-bound regions were skipped - pub fn collect_regions(&self, - value: &T, - region_set: &mut FnvHashSet) - -> bool - where T : TypeFoldable<'tcx> - { - let mut have_bound_regions = false; - self.fold_regions(value, &mut have_bound_regions, - |r, d| { region_set.insert(r.from_depth(d)); r }); - have_bound_regions - } - - /// Folds the escaping and free regions in `value` using `f`, and - /// sets `skipped_regions` to true if any late-bound region was found - /// and skipped. - pub fn fold_regions(&self, - value: &T, - skipped_regions: &mut bool, - mut f: F) - -> T - where F : FnMut(ty::Region, u32) -> ty::Region, - T : TypeFoldable<'tcx>, - { - value.fold_with(&mut RegionFolder::new(self, skipped_regions, &mut f)) - } -} - -/// Folds over the substructure of a type, visiting its component -/// types and all regions that occur *free* within it. -/// -/// That is, `Ty` can contain function or method types that bind -/// regions at the call site (`ReLateBound`), and occurrences of -/// regions (aka "lifetimes") that are bound within a type are not -/// visited by this folder; only regions that occur free will be -/// visited by `fld_r`. - -pub struct RegionFolder<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, - skipped_regions: &'a mut bool, - current_depth: u32, - fld_r: &'a mut (FnMut(ty::Region, u32) -> ty::Region + 'a), -} - -impl<'a, 'tcx> RegionFolder<'a, 'tcx> { - pub fn new(tcx: &'a ty::ctxt<'tcx>, - skipped_regions: &'a mut bool, - fld_r: &'a mut F) -> RegionFolder<'a, 'tcx> - where F : FnMut(ty::Region, u32) -> ty::Region - { - RegionFolder { - tcx: tcx, - skipped_regions: skipped_regions, - current_depth: 1, - fld_r: fld_r, - } - } -} - -impl<'a, 'tcx> TypeFolder<'tcx> for RegionFolder<'a, 'tcx> -{ - fn tcx(&self) -> &ty::ctxt<'tcx> { self.tcx } - - fn enter_region_binder(&mut self) { - self.current_depth += 1; - } - - fn exit_region_binder(&mut self) { - self.current_depth -= 1; - } - - fn fold_region(&mut self, r: ty::Region) -> ty::Region { - match r { - ty::ReLateBound(debruijn, _) if debruijn.depth < self.current_depth => { - debug!("RegionFolder.fold_region({:?}) skipped bound region (current depth={})", - r, self.current_depth); - *self.skipped_regions = true; - r - } - _ => { - debug!("RegionFolder.fold_region({:?}) folding free region (current_depth={})", - r, self.current_depth); - (self.fld_r)(r, self.current_depth) - } - } - } -} - -/////////////////////////////////////////////////////////////////////////// -// Late-bound region replacer - -// Replaces the escaping regions in a type. - -struct RegionReplacer<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, - current_depth: u32, - fld_r: &'a mut (FnMut(ty::BoundRegion) -> ty::Region + 'a), - map: FnvHashMap -} - -impl<'tcx> ty::ctxt<'tcx> { - pub fn replace_late_bound_regions(&self, - value: &Binder, - mut f: F) - -> (T, FnvHashMap) - where F : FnMut(ty::BoundRegion) -> ty::Region, - T : TypeFoldable<'tcx>, - { - debug!("replace_late_bound_regions({:?})", value); - let mut replacer = RegionReplacer::new(self, &mut f); - let result = value.skip_binder().fold_with(&mut replacer); - (result, replacer.map) - } - - - /// Replace any late-bound regions bound in `value` with free variants attached to scope-id - /// `scope_id`. - pub fn liberate_late_bound_regions(&self, - all_outlive_scope: region::CodeExtent, - value: &Binder) - -> T - where T : TypeFoldable<'tcx> - { - self.replace_late_bound_regions(value, |br| { - ty::ReFree(ty::FreeRegion{scope: all_outlive_scope, bound_region: br}) - }).0 - } - - /// Flattens two binding levels into one. So `for<'a> for<'b> Foo` - /// becomes `for<'a,'b> Foo`. - pub fn flatten_late_bound_regions(&self, bound2_value: &Binder>) - -> Binder - where T: TypeFoldable<'tcx> - { - let bound0_value = bound2_value.skip_binder().skip_binder(); - let value = self.fold_regions(bound0_value, &mut false, - |region, current_depth| { - match region { - ty::ReLateBound(debruijn, br) if debruijn.depth >= current_depth => { - // should be true if no escaping regions from bound2_value - assert!(debruijn.depth - current_depth <= 1); - ty::ReLateBound(ty::DebruijnIndex::new(current_depth), br) - } - _ => { - region - } - } - }); - Binder(value) - } - - pub fn no_late_bound_regions(&self, value: &Binder) -> Option - where T : TypeFoldable<'tcx> - { - if value.0.has_escaping_regions() { - None - } else { - Some(value.0.clone()) - } - } - - /// Replace any late-bound regions bound in `value` with `'static`. Useful in trans but also - /// method lookup and a few other places where precise region relationships are not required. - pub fn erase_late_bound_regions(&self, value: &Binder) -> T - where T : TypeFoldable<'tcx> - { - self.replace_late_bound_regions(value, |_| ty::ReStatic).0 - } - - /// Rewrite any late-bound regions so that they are anonymous. Region numbers are - /// assigned starting at 1 and increasing monotonically in the order traversed - /// by the fold operation. - /// - /// The chief purpose of this function is to canonicalize regions so that two - /// `FnSig`s or `TraitRef`s which are equivalent up to region naming will become - /// structurally identical. For example, `for<'a, 'b> fn(&'a isize, &'b isize)` and - /// `for<'a, 'b> fn(&'b isize, &'a isize)` will become identical after anonymization. - pub fn anonymize_late_bound_regions(&self, sig: &Binder) -> Binder - where T : TypeFoldable<'tcx>, - { - let mut counter = 0; - Binder(self.replace_late_bound_regions(sig, |_| { - counter += 1; - ty::ReLateBound(ty::DebruijnIndex::new(1), ty::BrAnon(counter)) - }).0) - } -} - -impl<'a, 'tcx> RegionReplacer<'a, 'tcx> { - fn new(tcx: &'a ty::ctxt<'tcx>, fld_r: &'a mut F) -> RegionReplacer<'a, 'tcx> - where F : FnMut(ty::BoundRegion) -> ty::Region - { - RegionReplacer { - tcx: tcx, - current_depth: 1, - fld_r: fld_r, - map: FnvHashMap() - } - } -} - -impl<'a, 'tcx> TypeFolder<'tcx> for RegionReplacer<'a, 'tcx> -{ - fn tcx(&self) -> &ty::ctxt<'tcx> { self.tcx } - - fn enter_region_binder(&mut self) { - self.current_depth += 1; - } - - fn exit_region_binder(&mut self) { - self.current_depth -= 1; - } - - fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { - if !t.has_regions_escaping_depth(self.current_depth-1) { - return t; - } - - t.super_fold_with(self) - } - - fn fold_region(&mut self, r: ty::Region) -> ty::Region { - match r { - ty::ReLateBound(debruijn, br) if debruijn.depth == self.current_depth => { - debug!("RegionReplacer.fold_region({:?}) folding region (current_depth={})", - r, self.current_depth); - let fld_r = &mut self.fld_r; - let region = *self.map.entry(br).or_insert_with(|| fld_r(br)); - if let ty::ReLateBound(debruijn1, br) = region { - // If the callback returns a late-bound region, - // that region should always use depth 1. Then we - // adjust it to the correct depth. - assert_eq!(debruijn1.depth, 1); - ty::ReLateBound(debruijn, br) - } else { - region - } - } - r => r - } - } -} - -/////////////////////////////////////////////////////////////////////////// -// Region eraser - -impl<'tcx> ty::ctxt<'tcx> { - /// Returns an equivalent value with all free regions removed (note - /// that late-bound regions remain, because they are important for - /// subtyping, but they are anonymized and normalized as well).. - pub fn erase_regions(&self, value: &T) -> T - where T : TypeFoldable<'tcx> - { - let value1 = value.fold_with(&mut RegionEraser(self)); - debug!("erase_regions({:?}) = {:?}", - value, value1); - return value1; - - struct RegionEraser<'a, 'tcx: 'a>(&'a ty::ctxt<'tcx>); - - impl<'a, 'tcx> TypeFolder<'tcx> for RegionEraser<'a, 'tcx> { - fn tcx(&self) -> &ty::ctxt<'tcx> { self.0 } - - fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { - match self.tcx().normalized_cache.borrow().get(&ty).cloned() { - None => {} - Some(u) => return u - } - - let t_norm = ty.super_fold_with(self); - self.tcx().normalized_cache.borrow_mut().insert(ty, t_norm); - return t_norm; - } - - fn fold_binder(&mut self, t: &ty::Binder) -> ty::Binder - where T : TypeFoldable<'tcx> - { - let u = self.tcx().anonymize_late_bound_regions(t); - u.super_fold_with(self) - } - - fn fold_region(&mut self, r: ty::Region) -> ty::Region { - // because late-bound regions affect subtyping, we can't - // erase the bound/free distinction, but we can replace - // all free regions with 'static. - // - // Note that we *CAN* replace early-bound regions -- the - // type system never "sees" those, they get substituted - // away. In trans, they will always be erased to 'static - // whenever a substitution occurs. - match r { - ty::ReLateBound(..) => r, - _ => ty::ReStatic - } - } - - fn fold_substs(&mut self, - substs: &subst::Substs<'tcx>) - -> subst::Substs<'tcx> { - subst::Substs { regions: subst::ErasedRegions, - types: substs.types.fold_with(self) } - } - } - } -} - -/////////////////////////////////////////////////////////////////////////// -// Region shifter -// -// Shifts the De Bruijn indices on all escaping bound regions by a -// fixed amount. Useful in substitution or when otherwise introducing -// a binding level that is not intended to capture the existing bound -// regions. See comment on `shift_regions_through_binders` method in -// `subst.rs` for more details. - -pub fn shift_region(region: ty::Region, amount: u32) -> ty::Region { - match region { - ty::ReLateBound(debruijn, br) => { - ty::ReLateBound(debruijn.shifted(amount), br) - } - _ => { - region - } - } -} - -pub fn shift_regions<'tcx, T:TypeFoldable<'tcx>>(tcx: &ty::ctxt<'tcx>, - amount: u32, value: &T) -> T { - debug!("shift_regions(value={:?}, amount={})", - value, amount); - - value.fold_with(&mut RegionFolder::new(tcx, &mut false, &mut |region, _current_depth| { - shift_region(region, amount) - })) -} - -/// An "escaping region" is a bound region whose binder is not part of `t`. -/// -/// So, for example, consider a type like the following, which has two binders: -/// -/// for<'a> fn(x: for<'b> fn(&'a isize, &'b isize)) -/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ outer scope -/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ inner scope -/// -/// This type has *bound regions* (`'a`, `'b`), but it does not have escaping regions, because the -/// binders of both `'a` and `'b` are part of the type itself. However, if we consider the *inner -/// fn type*, that type has an escaping region: `'a`. -/// -/// Note that what I'm calling an "escaping region" is often just called a "free region". However, -/// we already use the term "free region". It refers to the regions that we use to represent bound -/// regions on a fn definition while we are typechecking its body. -/// -/// To clarify, conceptually there is no particular difference between an "escaping" region and a -/// "free" region. However, there is a big difference in practice. Basically, when "entering" a -/// binding level, one is generally required to do some sort of processing to a bound region, such -/// as replacing it with a fresh/skolemized region, or making an entry in the environment to -/// represent the scope to which it is attached, etc. An escaping region represents a bound region -/// for which this processing has not yet been done. -struct HasEscapingRegionsVisitor { - depth: u32, -} - -impl<'tcx> TypeVisitor<'tcx> for HasEscapingRegionsVisitor { - fn enter_region_binder(&mut self) { - self.depth += 1; - } - - fn exit_region_binder(&mut self) { - self.depth -= 1; - } - - fn visit_ty(&mut self, t: Ty<'tcx>) -> bool { - t.region_depth > self.depth - } - - fn visit_region(&mut self, r: ty::Region) -> bool { - r.escapes_depth(self.depth) - } -} - -struct HasTypeFlagsVisitor { - flags: ty::TypeFlags, -} - -impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor { - fn visit_ty(&mut self, t: Ty) -> bool { - t.flags.get().intersects(self.flags) - } - - fn visit_region(&mut self, r: ty::Region) -> bool { - if self.flags.intersects(ty::TypeFlags::HAS_LOCAL_NAMES) { - // does this represent a region that cannot be named - // in a global way? used in fulfillment caching. - match r { - ty::ReStatic | ty::ReEmpty => {} - _ => return true, - } - } - if self.flags.intersects(ty::TypeFlags::HAS_RE_INFER) { - match r { - ty::ReVar(_) | ty::ReSkolemized(..) => { return true } - _ => {} - } - } - false - } -} diff --git a/src/librustc/middle/ty/ivar.rs b/src/librustc/middle/ty/ivar.rs deleted file mode 100644 index ffc12aa5aea19..0000000000000 --- a/src/librustc/middle/ty/ivar.rs +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use dep_graph::DepNode; -use middle::ty::{Ty, TyS}; -use middle::ty::tls; - -use rustc_data_structures::ivar; - -use std::fmt; -use std::marker::PhantomData; -use core::nonzero::NonZero; - -/// An IVar that contains a Ty. 'lt is a (reverse-variant) upper bound -/// on the lifetime of the IVar. This is required because of variance -/// problems: the IVar needs to be variant with respect to 'tcx (so -/// it can be referred to from Ty) but can only be modified if its -/// lifetime is exactly 'tcx. -/// -/// Safety invariants: -/// (A) self.0, if fulfilled, is a valid Ty<'tcx> -/// (B) no aliases to this value with a 'tcx longer than this -/// value's 'lt exist -/// -/// Dependency tracking: each ivar does not know what node in the -/// dependency graph it is associated with, so when you get/fulfill -/// you must supply a `DepNode` id. This should always be the same id! -/// -/// NonZero is used rather than Unique because Unique isn't Copy. -pub struct TyIVar<'tcx, 'lt: 'tcx>(ivar::Ivar>>, - PhantomData)->TyS<'tcx>>); - -impl<'tcx, 'lt> TyIVar<'tcx, 'lt> { - #[inline] - pub fn new() -> Self { - // Invariant (A) satisfied because the IVar is unfulfilled - // Invariant (B) because 'lt : 'tcx - TyIVar(ivar::Ivar::new(), PhantomData) - } - - #[inline] - pub fn get(&self, dep_node: DepNode) -> Option> { - tls::with(|tcx| tcx.dep_graph.read(dep_node)); - self.untracked_get() - } - - #[inline] - fn untracked_get(&self) -> Option> { - match self.0.get() { - None => None, - // valid because of invariant (A) - Some(v) => Some(unsafe { &*(*v as *const TyS<'tcx>) }) - } - } - - #[inline] - pub fn unwrap(&self, dep_node: DepNode) -> Ty<'tcx> { - self.get(dep_node).unwrap() - } - - pub fn fulfill(&self, dep_node: DepNode, value: Ty<'lt>) { - tls::with(|tcx| tcx.dep_graph.write(dep_node)); - - // Invariant (A) is fulfilled, because by (B), every alias - // of this has a 'tcx longer than 'lt. - let value: *const TyS<'lt> = value; - // FIXME(27214): unneeded [as *const ()] - let value = value as *const () as *const TyS<'static>; - self.0.fulfill(unsafe { NonZero::new(value) }) - } -} - -impl<'tcx, 'lt> fmt::Debug for TyIVar<'tcx, 'lt> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self.untracked_get() { - Some(val) => write!(f, "TyIVar({:?})", val), - None => f.write_str("TyIVar()") - } - } -} diff --git a/src/librustc/middle/ty/maps.rs b/src/librustc/middle/ty/maps.rs deleted file mode 100644 index 7d5276f379ffe..0000000000000 --- a/src/librustc/middle/ty/maps.rs +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use dep_graph::{DepNode, DepTrackingMapConfig}; -use middle::def_id::DefId; -use middle::ty; -use std::marker::PhantomData; -use std::rc::Rc; -use syntax::attr; - -macro_rules! dep_map_ty { - ($ty_name:ident : $node_name:ident ($key:ty) -> $value:ty) => { - pub struct $ty_name<'tcx> { - data: PhantomData<&'tcx ()> - } - - impl<'tcx> DepTrackingMapConfig for $ty_name<'tcx> { - type Key = $key; - type Value = $value; - fn to_dep_node(key: &$key) -> DepNode { DepNode::$node_name(*key) } - } - } -} - -dep_map_ty! { ImplOrTraitItems: ImplOrTraitItems(DefId) -> ty::ImplOrTraitItem<'tcx> } -dep_map_ty! { Tcache: ItemSignature(DefId) -> ty::TypeScheme<'tcx> } -dep_map_ty! { Predicates: ItemSignature(DefId) -> ty::GenericPredicates<'tcx> } -dep_map_ty! { SuperPredicates: ItemSignature(DefId) -> ty::GenericPredicates<'tcx> } -dep_map_ty! { TraitItemDefIds: TraitItemDefIds(DefId) -> Rc> } -dep_map_ty! { ImplTraitRefs: ItemSignature(DefId) -> Option> } -dep_map_ty! { TraitDefs: ItemSignature(DefId) -> &'tcx ty::TraitDef<'tcx> } -dep_map_ty! { AdtDefs: ItemSignature(DefId) -> ty::AdtDefMaster<'tcx> } -dep_map_ty! { ItemVariances: ItemSignature(DefId) -> Rc } -dep_map_ty! { InherentImpls: InherentImpls(DefId) -> Rc> } -dep_map_ty! { ImplItems: ImplItems(DefId) -> Vec } -dep_map_ty! { TraitItems: TraitItems(DefId) -> Rc>> } -dep_map_ty! { ReprHints: ReprHints(DefId) -> Rc> } diff --git a/src/librustc/middle/ty/mod.rs b/src/librustc/middle/ty/mod.rs deleted file mode 100644 index b902a46fea314..0000000000000 --- a/src/librustc/middle/ty/mod.rs +++ /dev/null @@ -1,2665 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub use self::ImplOrTraitItemId::*; -pub use self::ClosureKind::*; -pub use self::Variance::*; -pub use self::DtorKind::*; -pub use self::ImplOrTraitItemContainer::*; -pub use self::BorrowKind::*; -pub use self::ImplOrTraitItem::*; -pub use self::IntVarValue::*; -pub use self::LvaluePreference::*; -pub use self::fold::TypeFoldable; - -use dep_graph::{self, DepNode}; -use front::map as ast_map; -use front::map::LinkedPath; -use middle; -use middle::cstore::{self, CrateStore, LOCAL_CRATE}; -use middle::def::{self, ExportMap}; -use middle::def_id::DefId; -use middle::lang_items::{FnTraitLangItem, FnMutTraitLangItem, FnOnceTraitLangItem}; -use middle::region::{CodeExtent}; -use middle::subst::{self, Subst, Substs, VecPerParamSpace}; -use middle::traits; -use middle::ty; -use middle::ty::fold::TypeFolder; -use middle::ty::walk::TypeWalker; -use util::common::MemoizationMap; -use util::nodemap::{NodeMap, NodeSet}; -use util::nodemap::FnvHashMap; - -use serialize::{Encodable, Encoder, Decodable, Decoder}; -use std::borrow::{Borrow, Cow}; -use std::cell::Cell; -use std::hash::{Hash, Hasher}; -use std::iter; -use std::rc::Rc; -use std::slice; -use std::vec::IntoIter; -use std::collections::{HashMap, HashSet}; -use syntax::ast::{self, CrateNum, Name, NodeId}; -use syntax::attr::{self, AttrMetaMethods}; -use syntax::codemap::{DUMMY_SP, Span}; -use syntax::parse::token::InternedString; - -use rustc_front::hir; -use rustc_front::hir::{ItemImpl, ItemTrait}; -use rustc_front::intravisit::Visitor; - -pub use self::sty::{Binder, DebruijnIndex}; -pub use self::sty::{BuiltinBound, BuiltinBounds, ExistentialBounds}; -pub use self::sty::{BareFnTy, FnSig, PolyFnSig, FnOutput, PolyFnOutput}; -pub use self::sty::{ClosureTy, InferTy, ParamTy, ProjectionTy, TraitTy}; -pub use self::sty::{ClosureSubsts, TypeAndMut}; -pub use self::sty::{TraitRef, TypeVariants, PolyTraitRef}; -pub use self::sty::{BoundRegion, EarlyBoundRegion, FreeRegion, Region}; -pub use self::sty::{TyVid, IntVid, FloatVid, RegionVid, SkolemizedRegionVid}; -pub use self::sty::BoundRegion::*; -pub use self::sty::FnOutput::*; -pub use self::sty::InferTy::*; -pub use self::sty::Region::*; -pub use self::sty::TypeVariants::*; - -pub use self::sty::BuiltinBound::Send as BoundSend; -pub use self::sty::BuiltinBound::Sized as BoundSized; -pub use self::sty::BuiltinBound::Copy as BoundCopy; -pub use self::sty::BuiltinBound::Sync as BoundSync; - -pub use self::contents::TypeContents; -pub use self::context::{ctxt, tls}; -pub use self::context::{CtxtArenas, Lift, Tables}; - -pub use self::trait_def::{TraitDef, TraitFlags}; - -pub mod adjustment; -pub mod cast; -pub mod error; -pub mod fast_reject; -pub mod fold; -pub mod _match; -pub mod maps; -pub mod outlives; -pub mod relate; -pub mod trait_def; -pub mod walk; -pub mod wf; -pub mod util; - -mod contents; -mod context; -mod flags; -mod ivar; -mod structural_impls; -mod sty; - -pub type Disr = u64; -pub const INITIAL_DISCRIMINANT_VALUE: Disr = 0; - -// Data types - -/// The complete set of all analyses described in this module. This is -/// produced by the driver and fed to trans and later passes. -pub struct CrateAnalysis<'a> { - pub export_map: ExportMap, - pub access_levels: middle::privacy::AccessLevels, - pub reachable: NodeSet, - pub name: &'a str, - pub glob_map: Option, -} - -#[derive(Copy, Clone)] -pub enum DtorKind { - NoDtor, - TraitDtor(bool) -} - -impl DtorKind { - pub fn is_present(&self) -> bool { - match *self { - TraitDtor(..) => true, - _ => false - } - } - - pub fn has_drop_flag(&self) -> bool { - match self { - &NoDtor => false, - &TraitDtor(flag) => flag - } - } -} - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub enum ImplOrTraitItemContainer { - TraitContainer(DefId), - ImplContainer(DefId), -} - -impl ImplOrTraitItemContainer { - pub fn id(&self) -> DefId { - match *self { - TraitContainer(id) => id, - ImplContainer(id) => id, - } - } -} - -#[derive(Clone)] -pub enum ImplOrTraitItem<'tcx> { - ConstTraitItem(Rc>), - MethodTraitItem(Rc>), - TypeTraitItem(Rc>), -} - -impl<'tcx> ImplOrTraitItem<'tcx> { - fn id(&self) -> ImplOrTraitItemId { - match *self { - ConstTraitItem(ref associated_const) => { - ConstTraitItemId(associated_const.def_id) - } - MethodTraitItem(ref method) => MethodTraitItemId(method.def_id), - TypeTraitItem(ref associated_type) => { - TypeTraitItemId(associated_type.def_id) - } - } - } - - pub fn def_id(&self) -> DefId { - match *self { - ConstTraitItem(ref associated_const) => associated_const.def_id, - MethodTraitItem(ref method) => method.def_id, - TypeTraitItem(ref associated_type) => associated_type.def_id, - } - } - - pub fn name(&self) -> Name { - match *self { - ConstTraitItem(ref associated_const) => associated_const.name, - MethodTraitItem(ref method) => method.name, - TypeTraitItem(ref associated_type) => associated_type.name, - } - } - - pub fn vis(&self) -> hir::Visibility { - match *self { - ConstTraitItem(ref associated_const) => associated_const.vis, - MethodTraitItem(ref method) => method.vis, - TypeTraitItem(ref associated_type) => associated_type.vis, - } - } - - pub fn container(&self) -> ImplOrTraitItemContainer { - match *self { - ConstTraitItem(ref associated_const) => associated_const.container, - MethodTraitItem(ref method) => method.container, - TypeTraitItem(ref associated_type) => associated_type.container, - } - } - - pub fn as_opt_method(&self) -> Option>> { - match *self { - MethodTraitItem(ref m) => Some((*m).clone()), - _ => None, - } - } -} - -#[derive(Clone, Copy, Debug)] -pub enum ImplOrTraitItemId { - ConstTraitItemId(DefId), - MethodTraitItemId(DefId), - TypeTraitItemId(DefId), -} - -impl ImplOrTraitItemId { - pub fn def_id(&self) -> DefId { - match *self { - ConstTraitItemId(def_id) => def_id, - MethodTraitItemId(def_id) => def_id, - TypeTraitItemId(def_id) => def_id, - } - } -} - -#[derive(Clone, Debug)] -pub struct Method<'tcx> { - pub name: Name, - pub generics: Generics<'tcx>, - pub predicates: GenericPredicates<'tcx>, - pub fty: BareFnTy<'tcx>, - pub explicit_self: ExplicitSelfCategory, - pub vis: hir::Visibility, - pub def_id: DefId, - pub container: ImplOrTraitItemContainer, -} - -impl<'tcx> Method<'tcx> { - pub fn new(name: Name, - generics: ty::Generics<'tcx>, - predicates: GenericPredicates<'tcx>, - fty: BareFnTy<'tcx>, - explicit_self: ExplicitSelfCategory, - vis: hir::Visibility, - def_id: DefId, - container: ImplOrTraitItemContainer) - -> Method<'tcx> { - Method { - name: name, - generics: generics, - predicates: predicates, - fty: fty, - explicit_self: explicit_self, - vis: vis, - def_id: def_id, - container: container, - } - } - - pub fn container_id(&self) -> DefId { - match self.container { - TraitContainer(id) => id, - ImplContainer(id) => id, - } - } -} - -impl<'tcx> PartialEq for Method<'tcx> { - #[inline] - fn eq(&self, other: &Self) -> bool { self.def_id == other.def_id } -} - -impl<'tcx> Eq for Method<'tcx> {} - -impl<'tcx> Hash for Method<'tcx> { - #[inline] - fn hash(&self, s: &mut H) { - self.def_id.hash(s) - } -} - -#[derive(Clone, Copy, Debug)] -pub struct AssociatedConst<'tcx> { - pub name: Name, - pub ty: Ty<'tcx>, - pub vis: hir::Visibility, - pub def_id: DefId, - pub container: ImplOrTraitItemContainer, - pub has_value: bool -} - -#[derive(Clone, Copy, Debug)] -pub struct AssociatedType<'tcx> { - pub name: Name, - pub ty: Option>, - pub vis: hir::Visibility, - pub def_id: DefId, - pub container: ImplOrTraitItemContainer, -} - -#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable)] -pub struct ItemVariances { - pub types: VecPerParamSpace, - pub regions: VecPerParamSpace, -} - -#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable, Copy)] -pub enum Variance { - Covariant, // T <: T iff A <: B -- e.g., function return type - Invariant, // T <: T iff B == A -- e.g., type of mutable cell - Contravariant, // T <: T iff B <: A -- e.g., function param type - Bivariant, // T <: T -- e.g., unused type parameter -} - -#[derive(Clone, Copy, Debug)] -pub struct MethodCallee<'tcx> { - /// Impl method ID, for inherent methods, or trait method ID, otherwise. - pub def_id: DefId, - pub ty: Ty<'tcx>, - pub substs: &'tcx subst::Substs<'tcx> -} - -/// With method calls, we store some extra information in -/// side tables (i.e method_map). We use -/// MethodCall as a key to index into these tables instead of -/// just directly using the expression's NodeId. The reason -/// for this being that we may apply adjustments (coercions) -/// with the resulting expression also needing to use the -/// side tables. The problem with this is that we don't -/// assign a separate NodeId to this new expression -/// and so it would clash with the base expression if both -/// needed to add to the side tables. Thus to disambiguate -/// we also keep track of whether there's an adjustment in -/// our key. -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub struct MethodCall { - pub expr_id: NodeId, - pub autoderef: u32 -} - -impl MethodCall { - pub fn expr(id: NodeId) -> MethodCall { - MethodCall { - expr_id: id, - autoderef: 0 - } - } - - pub fn autoderef(expr_id: NodeId, autoderef: u32) -> MethodCall { - MethodCall { - expr_id: expr_id, - autoderef: 1 + autoderef - } - } -} - -// maps from an expression id that corresponds to a method call to the details -// of the method to be invoked -pub type MethodMap<'tcx> = FnvHashMap>; - -// Contains information needed to resolve types and (in the future) look up -// the types of AST nodes. -#[derive(Copy, Clone, PartialEq, Eq, Hash)] -pub struct CReaderCacheKey { - pub cnum: CrateNum, - pub pos: usize, -} - -/// A restriction that certain types must be the same size. The use of -/// `transmute` gives rise to these restrictions. These generally -/// cannot be checked until trans; therefore, each call to `transmute` -/// will push one or more such restriction into the -/// `transmute_restrictions` vector during `intrinsicck`. They are -/// then checked during `trans` by the fn `check_intrinsics`. -#[derive(Copy, Clone)] -pub struct TransmuteRestriction<'tcx> { - /// The span whence the restriction comes. - pub span: Span, - - /// The type being transmuted from. - pub original_from: Ty<'tcx>, - - /// The type being transmuted to. - pub original_to: Ty<'tcx>, - - /// The type being transmuted from, with all type parameters - /// substituted for an arbitrary representative. Not to be shown - /// to the end user. - pub substituted_from: Ty<'tcx>, - - /// The type being transmuted to, with all type parameters - /// substituted for an arbitrary representative. Not to be shown - /// to the end user. - pub substituted_to: Ty<'tcx>, - - /// NodeId of the transmute intrinsic. - pub id: NodeId, -} - -/// Describes the fragment-state associated with a NodeId. -/// -/// Currently only unfragmented paths have entries in the table, -/// but longer-term this enum is expected to expand to also -/// include data for fragmented paths. -#[derive(Copy, Clone, Debug)] -pub enum FragmentInfo { - Moved { var: NodeId, move_expr: NodeId }, - Assigned { var: NodeId, assign_expr: NodeId, assignee_id: NodeId }, -} - -// Flags that we track on types. These flags are propagated upwards -// through the type during type construction, so that we can quickly -// check whether the type has various kinds of types in it without -// recursing over the type itself. -bitflags! { - flags TypeFlags: u32 { - const HAS_PARAMS = 1 << 0, - const HAS_SELF = 1 << 1, - const HAS_TY_INFER = 1 << 2, - const HAS_RE_INFER = 1 << 3, - const HAS_RE_EARLY_BOUND = 1 << 4, - const HAS_FREE_REGIONS = 1 << 5, - const HAS_TY_ERR = 1 << 6, - const HAS_PROJECTION = 1 << 7, - const HAS_TY_CLOSURE = 1 << 8, - - // true if there are "names" of types and regions and so forth - // that are local to a particular fn - const HAS_LOCAL_NAMES = 1 << 9, - - const NEEDS_SUBST = TypeFlags::HAS_PARAMS.bits | - TypeFlags::HAS_SELF.bits | - TypeFlags::HAS_RE_EARLY_BOUND.bits, - - // Flags representing the nominal content of a type, - // computed by FlagsComputation. If you add a new nominal - // flag, it should be added here too. - const NOMINAL_FLAGS = TypeFlags::HAS_PARAMS.bits | - TypeFlags::HAS_SELF.bits | - TypeFlags::HAS_TY_INFER.bits | - TypeFlags::HAS_RE_INFER.bits | - TypeFlags::HAS_RE_EARLY_BOUND.bits | - TypeFlags::HAS_FREE_REGIONS.bits | - TypeFlags::HAS_TY_ERR.bits | - TypeFlags::HAS_PROJECTION.bits | - TypeFlags::HAS_TY_CLOSURE.bits | - TypeFlags::HAS_LOCAL_NAMES.bits, - - // Caches for type_is_sized, type_moves_by_default - const SIZEDNESS_CACHED = 1 << 16, - const IS_SIZED = 1 << 17, - const MOVENESS_CACHED = 1 << 18, - const MOVES_BY_DEFAULT = 1 << 19, - } -} - -pub struct TyS<'tcx> { - pub sty: TypeVariants<'tcx>, - pub flags: Cell, - - // the maximal depth of any bound regions appearing in this type. - region_depth: u32, -} - -impl<'tcx> PartialEq for TyS<'tcx> { - #[inline] - fn eq(&self, other: &TyS<'tcx>) -> bool { - // (self as *const _) == (other as *const _) - (self as *const TyS<'tcx>) == (other as *const TyS<'tcx>) - } -} -impl<'tcx> Eq for TyS<'tcx> {} - -impl<'tcx> Hash for TyS<'tcx> { - fn hash(&self, s: &mut H) { - (self as *const TyS).hash(s) - } -} - -pub type Ty<'tcx> = &'tcx TyS<'tcx>; - -impl<'tcx> Encodable for Ty<'tcx> { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { - cstore::tls::with_encoding_context(s, |ecx, rbml_w| { - ecx.encode_ty(rbml_w, *self); - Ok(()) - }) - } -} - -impl<'tcx> Decodable for Ty<'tcx> { - fn decode(d: &mut D) -> Result, D::Error> { - cstore::tls::with_decoding_context(d, |dcx, rbml_r| { - Ok(dcx.decode_ty(rbml_r)) - }) - } -} - - -/// Upvars do not get their own node-id. Instead, we use the pair of -/// the original var id (that is, the root variable that is referenced -/// by the upvar) and the id of the closure expression. -#[derive(Clone, Copy, PartialEq, Eq, Hash)] -pub struct UpvarId { - pub var_id: NodeId, - pub closure_expr_id: NodeId, -} - -#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable, Copy)] -pub enum BorrowKind { - /// Data must be immutable and is aliasable. - ImmBorrow, - - /// Data must be immutable but not aliasable. This kind of borrow - /// cannot currently be expressed by the user and is used only in - /// implicit closure bindings. It is needed when you the closure - /// is borrowing or mutating a mutable referent, e.g.: - /// - /// let x: &mut isize = ...; - /// let y = || *x += 5; - /// - /// If we were to try to translate this closure into a more explicit - /// form, we'd encounter an error with the code as written: - /// - /// struct Env { x: & &mut isize } - /// let x: &mut isize = ...; - /// let y = (&mut Env { &x }, fn_ptr); // Closure is pair of env and fn - /// fn fn_ptr(env: &mut Env) { **env.x += 5; } - /// - /// This is then illegal because you cannot mutate a `&mut` found - /// in an aliasable location. To solve, you'd have to translate with - /// an `&mut` borrow: - /// - /// struct Env { x: & &mut isize } - /// let x: &mut isize = ...; - /// let y = (&mut Env { &mut x }, fn_ptr); // changed from &x to &mut x - /// fn fn_ptr(env: &mut Env) { **env.x += 5; } - /// - /// Now the assignment to `**env.x` is legal, but creating a - /// mutable pointer to `x` is not because `x` is not mutable. We - /// could fix this by declaring `x` as `let mut x`. This is ok in - /// user code, if awkward, but extra weird for closures, since the - /// borrow is hidden. - /// - /// So we introduce a "unique imm" borrow -- the referent is - /// immutable, but not aliasable. This solves the problem. For - /// simplicity, we don't give users the way to express this - /// borrow, it's just used when translating closures. - UniqueImmBorrow, - - /// Data is mutable and not aliasable. - MutBorrow -} - -/// Information describing the capture of an upvar. This is computed -/// during `typeck`, specifically by `regionck`. -#[derive(PartialEq, Clone, Debug, Copy)] -pub enum UpvarCapture { - /// Upvar is captured by value. This is always true when the - /// closure is labeled `move`, but can also be true in other cases - /// depending on inference. - ByValue, - - /// Upvar is captured by reference. - ByRef(UpvarBorrow), -} - -#[derive(PartialEq, Clone, Copy)] -pub struct UpvarBorrow { - /// The kind of borrow: by-ref upvars have access to shared - /// immutable borrows, which are not part of the normal language - /// syntax. - pub kind: BorrowKind, - - /// Region of the resulting reference. - pub region: ty::Region, -} - -pub type UpvarCaptureMap = FnvHashMap; - -#[derive(Copy, Clone)] -pub struct ClosureUpvar<'tcx> { - pub def: def::Def, - pub span: Span, - pub ty: Ty<'tcx>, -} - -#[derive(Clone, Copy, PartialEq)] -pub enum IntVarValue { - IntType(ast::IntTy), - UintType(ast::UintTy), -} - -/// Default region to use for the bound of objects that are -/// supplied as the value for this type parameter. This is derived -/// from `T:'a` annotations appearing in the type definition. If -/// this is `None`, then the default is inherited from the -/// surrounding context. See RFC #599 for details. -#[derive(Copy, Clone)] -pub enum ObjectLifetimeDefault { - /// Require an explicit annotation. Occurs when multiple - /// `T:'a` constraints are found. - Ambiguous, - - /// Use the base default, typically 'static, but in a fn body it is a fresh variable - BaseDefault, - - /// Use the given region as the default. - Specific(Region), -} - -#[derive(Clone)] -pub struct TypeParameterDef<'tcx> { - pub name: Name, - pub def_id: DefId, - pub space: subst::ParamSpace, - pub index: u32, - pub default_def_id: DefId, // for use in error reporing about defaults - pub default: Option>, - pub object_lifetime_default: ObjectLifetimeDefault, -} - -#[derive(Clone)] -pub struct RegionParameterDef { - pub name: Name, - pub def_id: DefId, - pub space: subst::ParamSpace, - pub index: u32, - pub bounds: Vec, -} - -impl RegionParameterDef { - pub fn to_early_bound_region(&self) -> ty::Region { - ty::ReEarlyBound(ty::EarlyBoundRegion { - space: self.space, - index: self.index, - name: self.name, - }) - } - pub fn to_bound_region(&self) -> ty::BoundRegion { - ty::BoundRegion::BrNamed(self.def_id, self.name) - } -} - -/// Information about the formal type/lifetime parameters associated -/// with an item or method. Analogous to hir::Generics. -#[derive(Clone, Debug)] -pub struct Generics<'tcx> { - pub types: VecPerParamSpace>, - pub regions: VecPerParamSpace, -} - -impl<'tcx> Generics<'tcx> { - pub fn empty() -> Generics<'tcx> { - Generics { - types: VecPerParamSpace::empty(), - regions: VecPerParamSpace::empty(), - } - } - - pub fn is_empty(&self) -> bool { - self.types.is_empty() && self.regions.is_empty() - } - - pub fn has_type_params(&self, space: subst::ParamSpace) -> bool { - !self.types.is_empty_in(space) - } - - pub fn has_region_params(&self, space: subst::ParamSpace) -> bool { - !self.regions.is_empty_in(space) - } -} - -/// Bounds on generics. -#[derive(Clone)] -pub struct GenericPredicates<'tcx> { - pub predicates: VecPerParamSpace>, -} - -impl<'tcx> GenericPredicates<'tcx> { - pub fn empty() -> GenericPredicates<'tcx> { - GenericPredicates { - predicates: VecPerParamSpace::empty(), - } - } - - pub fn instantiate(&self, tcx: &ctxt<'tcx>, substs: &Substs<'tcx>) - -> InstantiatedPredicates<'tcx> { - InstantiatedPredicates { - predicates: self.predicates.subst(tcx, substs), - } - } - - pub fn instantiate_supertrait(&self, - tcx: &ctxt<'tcx>, - poly_trait_ref: &ty::PolyTraitRef<'tcx>) - -> InstantiatedPredicates<'tcx> - { - InstantiatedPredicates { - predicates: self.predicates.map(|pred| pred.subst_supertrait(tcx, poly_trait_ref)) - } - } -} - -#[derive(Clone, PartialEq, Eq, Hash)] -pub enum Predicate<'tcx> { - /// Corresponds to `where Foo : Bar`. `Foo` here would be - /// the `Self` type of the trait reference and `A`, `B`, and `C` - /// would be the parameters in the `TypeSpace`. - Trait(PolyTraitPredicate<'tcx>), - - /// where `T1 == T2`. - Equate(PolyEquatePredicate<'tcx>), - - /// where 'a : 'b - RegionOutlives(PolyRegionOutlivesPredicate), - - /// where T : 'a - TypeOutlives(PolyTypeOutlivesPredicate<'tcx>), - - /// where ::Name == X, approximately. - /// See `ProjectionPredicate` struct for details. - Projection(PolyProjectionPredicate<'tcx>), - - /// no syntax: T WF - WellFormed(Ty<'tcx>), - - /// trait must be object-safe - ObjectSafe(DefId), -} - -impl<'tcx> Predicate<'tcx> { - /// Performs a substitution suitable for going from a - /// poly-trait-ref to supertraits that must hold if that - /// poly-trait-ref holds. This is slightly different from a normal - /// substitution in terms of what happens with bound regions. See - /// lengthy comment below for details. - pub fn subst_supertrait(&self, - tcx: &ctxt<'tcx>, - trait_ref: &ty::PolyTraitRef<'tcx>) - -> ty::Predicate<'tcx> - { - // The interaction between HRTB and supertraits is not entirely - // obvious. Let me walk you (and myself) through an example. - // - // Let's start with an easy case. Consider two traits: - // - // trait Foo<'a> : Bar<'a,'a> { } - // trait Bar<'b,'c> { } - // - // Now, if we have a trait reference `for<'x> T : Foo<'x>`, then - // we can deduce that `for<'x> T : Bar<'x,'x>`. Basically, if we - // knew that `Foo<'x>` (for any 'x) then we also know that - // `Bar<'x,'x>` (for any 'x). This more-or-less falls out from - // normal substitution. - // - // In terms of why this is sound, the idea is that whenever there - // is an impl of `T:Foo<'a>`, it must show that `T:Bar<'a,'a>` - // holds. So if there is an impl of `T:Foo<'a>` that applies to - // all `'a`, then we must know that `T:Bar<'a,'a>` holds for all - // `'a`. - // - // Another example to be careful of is this: - // - // trait Foo1<'a> : for<'b> Bar1<'a,'b> { } - // trait Bar1<'b,'c> { } - // - // Here, if we have `for<'x> T : Foo1<'x>`, then what do we know? - // The answer is that we know `for<'x,'b> T : Bar1<'x,'b>`. The - // reason is similar to the previous example: any impl of - // `T:Foo1<'x>` must show that `for<'b> T : Bar1<'x, 'b>`. So - // basically we would want to collapse the bound lifetimes from - // the input (`trait_ref`) and the supertraits. - // - // To achieve this in practice is fairly straightforward. Let's - // consider the more complicated scenario: - // - // - We start out with `for<'x> T : Foo1<'x>`. In this case, `'x` - // has a De Bruijn index of 1. We want to produce `for<'x,'b> T : Bar1<'x,'b>`, - // where both `'x` and `'b` would have a DB index of 1. - // The substitution from the input trait-ref is therefore going to be - // `'a => 'x` (where `'x` has a DB index of 1). - // - The super-trait-ref is `for<'b> Bar1<'a,'b>`, where `'a` is an - // early-bound parameter and `'b' is a late-bound parameter with a - // DB index of 1. - // - If we replace `'a` with `'x` from the input, it too will have - // a DB index of 1, and thus we'll have `for<'x,'b> Bar1<'x,'b>` - // just as we wanted. - // - // There is only one catch. If we just apply the substitution `'a - // => 'x` to `for<'b> Bar1<'a,'b>`, the substitution code will - // adjust the DB index because we substituting into a binder (it - // tries to be so smart...) resulting in `for<'x> for<'b> - // Bar1<'x,'b>` (we have no syntax for this, so use your - // imagination). Basically the 'x will have DB index of 2 and 'b - // will have DB index of 1. Not quite what we want. So we apply - // the substitution to the *contents* of the trait reference, - // rather than the trait reference itself (put another way, the - // substitution code expects equal binding levels in the values - // from the substitution and the value being substituted into, and - // this trick achieves that). - - let substs = &trait_ref.0.substs; - match *self { - Predicate::Trait(ty::Binder(ref data)) => - Predicate::Trait(ty::Binder(data.subst(tcx, substs))), - Predicate::Equate(ty::Binder(ref data)) => - Predicate::Equate(ty::Binder(data.subst(tcx, substs))), - Predicate::RegionOutlives(ty::Binder(ref data)) => - Predicate::RegionOutlives(ty::Binder(data.subst(tcx, substs))), - Predicate::TypeOutlives(ty::Binder(ref data)) => - Predicate::TypeOutlives(ty::Binder(data.subst(tcx, substs))), - Predicate::Projection(ty::Binder(ref data)) => - Predicate::Projection(ty::Binder(data.subst(tcx, substs))), - Predicate::WellFormed(data) => - Predicate::WellFormed(data.subst(tcx, substs)), - Predicate::ObjectSafe(trait_def_id) => - Predicate::ObjectSafe(trait_def_id), - } - } -} - -#[derive(Clone, PartialEq, Eq, Hash)] -pub struct TraitPredicate<'tcx> { - pub trait_ref: TraitRef<'tcx> -} -pub type PolyTraitPredicate<'tcx> = ty::Binder>; - -impl<'tcx> TraitPredicate<'tcx> { - pub fn def_id(&self) -> DefId { - self.trait_ref.def_id - } - - pub fn input_types(&self) -> &[Ty<'tcx>] { - self.trait_ref.substs.types.as_slice() - } - - pub fn self_ty(&self) -> Ty<'tcx> { - self.trait_ref.self_ty() - } -} - -impl<'tcx> PolyTraitPredicate<'tcx> { - pub fn def_id(&self) -> DefId { - self.0.def_id() - } -} - -#[derive(Clone, PartialEq, Eq, Hash, Debug)] -pub struct EquatePredicate<'tcx>(pub Ty<'tcx>, pub Ty<'tcx>); // `0 == 1` -pub type PolyEquatePredicate<'tcx> = ty::Binder>; - -#[derive(Clone, PartialEq, Eq, Hash, Debug)] -pub struct OutlivesPredicate(pub A, pub B); // `A : B` -pub type PolyOutlivesPredicate = ty::Binder>; -pub type PolyRegionOutlivesPredicate = PolyOutlivesPredicate; -pub type PolyTypeOutlivesPredicate<'tcx> = PolyOutlivesPredicate, ty::Region>; - -/// This kind of predicate has no *direct* correspondent in the -/// syntax, but it roughly corresponds to the syntactic forms: -/// -/// 1. `T : TraitRef<..., Item=Type>` -/// 2. `>::Item == Type` (NYI) -/// -/// In particular, form #1 is "desugared" to the combination of a -/// normal trait predicate (`T : TraitRef<...>`) and one of these -/// predicates. Form #2 is a broader form in that it also permits -/// equality between arbitrary types. Processing an instance of Form -/// #2 eventually yields one of these `ProjectionPredicate` -/// instances to normalize the LHS. -#[derive(Clone, PartialEq, Eq, Hash)] -pub struct ProjectionPredicate<'tcx> { - pub projection_ty: ProjectionTy<'tcx>, - pub ty: Ty<'tcx>, -} - -pub type PolyProjectionPredicate<'tcx> = Binder>; - -impl<'tcx> PolyProjectionPredicate<'tcx> { - pub fn item_name(&self) -> Name { - self.0.projection_ty.item_name // safe to skip the binder to access a name - } - - pub fn sort_key(&self) -> (DefId, Name) { - self.0.projection_ty.sort_key() - } -} - -pub trait ToPolyTraitRef<'tcx> { - fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx>; -} - -impl<'tcx> ToPolyTraitRef<'tcx> for TraitRef<'tcx> { - fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> { - assert!(!self.has_escaping_regions()); - ty::Binder(self.clone()) - } -} - -impl<'tcx> ToPolyTraitRef<'tcx> for PolyTraitPredicate<'tcx> { - fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> { - self.map_bound_ref(|trait_pred| trait_pred.trait_ref.clone()) - } -} - -impl<'tcx> ToPolyTraitRef<'tcx> for PolyProjectionPredicate<'tcx> { - fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> { - // Note: unlike with TraitRef::to_poly_trait_ref(), - // self.0.trait_ref is permitted to have escaping regions. - // This is because here `self` has a `Binder` and so does our - // return value, so we are preserving the number of binding - // levels. - ty::Binder(self.0.projection_ty.trait_ref.clone()) - } -} - -pub trait ToPredicate<'tcx> { - fn to_predicate(&self) -> Predicate<'tcx>; -} - -impl<'tcx> ToPredicate<'tcx> for TraitRef<'tcx> { - fn to_predicate(&self) -> Predicate<'tcx> { - // we're about to add a binder, so let's check that we don't - // accidentally capture anything, or else that might be some - // weird debruijn accounting. - assert!(!self.has_escaping_regions()); - - ty::Predicate::Trait(ty::Binder(ty::TraitPredicate { - trait_ref: self.clone() - })) - } -} - -impl<'tcx> ToPredicate<'tcx> for PolyTraitRef<'tcx> { - fn to_predicate(&self) -> Predicate<'tcx> { - ty::Predicate::Trait(self.to_poly_trait_predicate()) - } -} - -impl<'tcx> ToPredicate<'tcx> for PolyEquatePredicate<'tcx> { - fn to_predicate(&self) -> Predicate<'tcx> { - Predicate::Equate(self.clone()) - } -} - -impl<'tcx> ToPredicate<'tcx> for PolyRegionOutlivesPredicate { - fn to_predicate(&self) -> Predicate<'tcx> { - Predicate::RegionOutlives(self.clone()) - } -} - -impl<'tcx> ToPredicate<'tcx> for PolyTypeOutlivesPredicate<'tcx> { - fn to_predicate(&self) -> Predicate<'tcx> { - Predicate::TypeOutlives(self.clone()) - } -} - -impl<'tcx> ToPredicate<'tcx> for PolyProjectionPredicate<'tcx> { - fn to_predicate(&self) -> Predicate<'tcx> { - Predicate::Projection(self.clone()) - } -} - -impl<'tcx> Predicate<'tcx> { - /// Iterates over the types in this predicate. Note that in all - /// cases this is skipping over a binder, so late-bound regions - /// with depth 0 are bound by the predicate. - pub fn walk_tys(&self) -> IntoIter> { - let vec: Vec<_> = match *self { - ty::Predicate::Trait(ref data) => { - data.0.trait_ref.substs.types.as_slice().to_vec() - } - ty::Predicate::Equate(ty::Binder(ref data)) => { - vec![data.0, data.1] - } - ty::Predicate::TypeOutlives(ty::Binder(ref data)) => { - vec![data.0] - } - ty::Predicate::RegionOutlives(..) => { - vec![] - } - ty::Predicate::Projection(ref data) => { - let trait_inputs = data.0.projection_ty.trait_ref.substs.types.as_slice(); - trait_inputs.iter() - .cloned() - .chain(Some(data.0.ty)) - .collect() - } - ty::Predicate::WellFormed(data) => { - vec![data] - } - ty::Predicate::ObjectSafe(_trait_def_id) => { - vec![] - } - }; - - // The only reason to collect into a vector here is that I was - // too lazy to make the full (somewhat complicated) iterator - // type that would be needed here. But I wanted this fn to - // return an iterator conceptually, rather than a `Vec`, so as - // to be closer to `Ty::walk`. - vec.into_iter() - } - - pub fn to_opt_poly_trait_ref(&self) -> Option> { - match *self { - Predicate::Trait(ref t) => { - Some(t.to_poly_trait_ref()) - } - Predicate::Projection(..) | - Predicate::Equate(..) | - Predicate::RegionOutlives(..) | - Predicate::WellFormed(..) | - Predicate::ObjectSafe(..) | - Predicate::TypeOutlives(..) => { - None - } - } - } -} - -/// Represents the bounds declared on a particular set of type -/// parameters. Should eventually be generalized into a flag list of -/// where clauses. You can obtain a `InstantiatedPredicates` list from a -/// `GenericPredicates` by using the `instantiate` method. Note that this method -/// reflects an important semantic invariant of `InstantiatedPredicates`: while -/// the `GenericPredicates` are expressed in terms of the bound type -/// parameters of the impl/trait/whatever, an `InstantiatedPredicates` instance -/// represented a set of bounds for some particular instantiation, -/// meaning that the generic parameters have been substituted with -/// their values. -/// -/// Example: -/// -/// struct Foo> { ... } -/// -/// Here, the `GenericPredicates` for `Foo` would contain a list of bounds like -/// `[[], [U:Bar]]`. Now if there were some particular reference -/// like `Foo`, then the `InstantiatedPredicates` would be `[[], -/// [usize:Bar]]`. -#[derive(Clone)] -pub struct InstantiatedPredicates<'tcx> { - pub predicates: VecPerParamSpace>, -} - -impl<'tcx> InstantiatedPredicates<'tcx> { - pub fn empty() -> InstantiatedPredicates<'tcx> { - InstantiatedPredicates { predicates: VecPerParamSpace::empty() } - } - - pub fn is_empty(&self) -> bool { - self.predicates.is_empty() - } -} - -impl<'tcx> TraitRef<'tcx> { - pub fn new(def_id: DefId, substs: &'tcx Substs<'tcx>) -> TraitRef<'tcx> { - TraitRef { def_id: def_id, substs: substs } - } - - pub fn self_ty(&self) -> Ty<'tcx> { - self.substs.self_ty().unwrap() - } - - pub fn input_types(&self) -> &[Ty<'tcx>] { - // Select only the "input types" from a trait-reference. For - // now this is all the types that appear in the - // trait-reference, but it should eventually exclude - // associated types. - self.substs.types.as_slice() - } -} - -/// When type checking, we use the `ParameterEnvironment` to track -/// details about the type/lifetime parameters that are in scope. -/// It primarily stores the bounds information. -/// -/// Note: This information might seem to be redundant with the data in -/// `tcx.ty_param_defs`, but it is not. That table contains the -/// parameter definitions from an "outside" perspective, but this -/// struct will contain the bounds for a parameter as seen from inside -/// the function body. Currently the only real distinction is that -/// bound lifetime parameters are replaced with free ones, but in the -/// future I hope to refine the representation of types so as to make -/// more distinctions clearer. -#[derive(Clone)] -pub struct ParameterEnvironment<'a, 'tcx:'a> { - pub tcx: &'a ctxt<'tcx>, - - /// See `construct_free_substs` for details. - pub free_substs: Substs<'tcx>, - - /// Each type parameter has an implicit region bound that - /// indicates it must outlive at least the function body (the user - /// may specify stronger requirements). This field indicates the - /// region of the callee. - pub implicit_region_bound: ty::Region, - - /// Obligations that the caller must satisfy. This is basically - /// the set of bounds on the in-scope type parameters, translated - /// into Obligations, and elaborated and normalized. - pub caller_bounds: Vec>, - - /// Caches the results of trait selection. This cache is used - /// for things that have to do with the parameters in scope. - pub selection_cache: traits::SelectionCache<'tcx>, - - /// Caches the results of trait evaluation. - pub evaluation_cache: traits::EvaluationCache<'tcx>, - - /// Scope that is attached to free regions for this scope. This - /// is usually the id of the fn body, but for more abstract scopes - /// like structs we often use the node-id of the struct. - /// - /// FIXME(#3696). It would be nice to refactor so that free - /// regions don't have this implicit scope and instead introduce - /// relationships in the environment. - pub free_id_outlive: CodeExtent, -} - -impl<'a, 'tcx> ParameterEnvironment<'a, 'tcx> { - pub fn with_caller_bounds(&self, - caller_bounds: Vec>) - -> ParameterEnvironment<'a,'tcx> - { - ParameterEnvironment { - tcx: self.tcx, - free_substs: self.free_substs.clone(), - implicit_region_bound: self.implicit_region_bound, - caller_bounds: caller_bounds, - selection_cache: traits::SelectionCache::new(), - evaluation_cache: traits::EvaluationCache::new(), - free_id_outlive: self.free_id_outlive, - } - } - - pub fn for_item(cx: &'a ctxt<'tcx>, id: NodeId) -> ParameterEnvironment<'a, 'tcx> { - match cx.map.find(id) { - Some(ast_map::NodeImplItem(ref impl_item)) => { - match impl_item.node { - hir::ImplItemKind::Type(_) => { - // associated types don't have their own entry (for some reason), - // so for now just grab environment for the impl - let impl_id = cx.map.get_parent(id); - let impl_def_id = cx.map.local_def_id(impl_id); - let scheme = cx.lookup_item_type(impl_def_id); - let predicates = cx.lookup_predicates(impl_def_id); - cx.construct_parameter_environment(impl_item.span, - &scheme.generics, - &predicates, - cx.region_maps.item_extent(id)) - } - hir::ImplItemKind::Const(_, _) => { - let def_id = cx.map.local_def_id(id); - let scheme = cx.lookup_item_type(def_id); - let predicates = cx.lookup_predicates(def_id); - cx.construct_parameter_environment(impl_item.span, - &scheme.generics, - &predicates, - cx.region_maps.item_extent(id)) - } - hir::ImplItemKind::Method(_, ref body) => { - let method_def_id = cx.map.local_def_id(id); - match cx.impl_or_trait_item(method_def_id) { - MethodTraitItem(ref method_ty) => { - let method_generics = &method_ty.generics; - let method_bounds = &method_ty.predicates; - cx.construct_parameter_environment( - impl_item.span, - method_generics, - method_bounds, - cx.region_maps.call_site_extent(id, body.id)) - } - _ => { - cx.sess - .bug("ParameterEnvironment::for_item(): \ - got non-method item from impl method?!") - } - } - } - } - } - Some(ast_map::NodeTraitItem(trait_item)) => { - match trait_item.node { - hir::TypeTraitItem(..) => { - // associated types don't have their own entry (for some reason), - // so for now just grab environment for the trait - let trait_id = cx.map.get_parent(id); - let trait_def_id = cx.map.local_def_id(trait_id); - let trait_def = cx.lookup_trait_def(trait_def_id); - let predicates = cx.lookup_predicates(trait_def_id); - cx.construct_parameter_environment(trait_item.span, - &trait_def.generics, - &predicates, - cx.region_maps.item_extent(id)) - } - hir::ConstTraitItem(..) => { - let def_id = cx.map.local_def_id(id); - let scheme = cx.lookup_item_type(def_id); - let predicates = cx.lookup_predicates(def_id); - cx.construct_parameter_environment(trait_item.span, - &scheme.generics, - &predicates, - cx.region_maps.item_extent(id)) - } - hir::MethodTraitItem(_, ref body) => { - // Use call-site for extent (unless this is a - // trait method with no default; then fallback - // to the method id). - let method_def_id = cx.map.local_def_id(id); - match cx.impl_or_trait_item(method_def_id) { - MethodTraitItem(ref method_ty) => { - let method_generics = &method_ty.generics; - let method_bounds = &method_ty.predicates; - let extent = if let Some(ref body) = *body { - // default impl: use call_site extent as free_id_outlive bound. - cx.region_maps.call_site_extent(id, body.id) - } else { - // no default impl: use item extent as free_id_outlive bound. - cx.region_maps.item_extent(id) - }; - cx.construct_parameter_environment( - trait_item.span, - method_generics, - method_bounds, - extent) - } - _ => { - cx.sess - .bug("ParameterEnvironment::for_item(): \ - got non-method item from provided \ - method?!") - } - } - } - } - } - Some(ast_map::NodeItem(item)) => { - match item.node { - hir::ItemFn(_, _, _, _, _, ref body) => { - // We assume this is a function. - let fn_def_id = cx.map.local_def_id(id); - let fn_scheme = cx.lookup_item_type(fn_def_id); - let fn_predicates = cx.lookup_predicates(fn_def_id); - - cx.construct_parameter_environment(item.span, - &fn_scheme.generics, - &fn_predicates, - cx.region_maps.call_site_extent(id, - body.id)) - } - hir::ItemEnum(..) | - hir::ItemStruct(..) | - hir::ItemImpl(..) | - hir::ItemConst(..) | - hir::ItemStatic(..) => { - let def_id = cx.map.local_def_id(id); - let scheme = cx.lookup_item_type(def_id); - let predicates = cx.lookup_predicates(def_id); - cx.construct_parameter_environment(item.span, - &scheme.generics, - &predicates, - cx.region_maps.item_extent(id)) - } - hir::ItemTrait(..) => { - let def_id = cx.map.local_def_id(id); - let trait_def = cx.lookup_trait_def(def_id); - let predicates = cx.lookup_predicates(def_id); - cx.construct_parameter_environment(item.span, - &trait_def.generics, - &predicates, - cx.region_maps.item_extent(id)) - } - _ => { - cx.sess.span_bug(item.span, - "ParameterEnvironment::from_item(): - can't create a parameter \ - environment for this kind of item") - } - } - } - Some(ast_map::NodeExpr(..)) => { - // This is a convenience to allow closures to work. - ParameterEnvironment::for_item(cx, cx.map.get_parent(id)) - } - _ => { - cx.sess.bug(&format!("ParameterEnvironment::from_item(): \ - `{}` is not an item", - cx.map.node_to_string(id))) - } - } - } -} - -/// A "type scheme", in ML terminology, is a type combined with some -/// set of generic types that the type is, well, generic over. In Rust -/// terms, it is the "type" of a fn item or struct -- this type will -/// include various generic parameters that must be substituted when -/// the item/struct is referenced. That is called converting the type -/// scheme to a monotype. -/// -/// - `generics`: the set of type parameters and their bounds -/// - `ty`: the base types, which may reference the parameters defined -/// in `generics` -/// -/// Note that TypeSchemes are also sometimes called "polytypes" (and -/// in fact this struct used to carry that name, so you may find some -/// stray references in a comment or something). We try to reserve the -/// "poly" prefix to refer to higher-ranked things, as in -/// `PolyTraitRef`. -/// -/// Note that each item also comes with predicates, see -/// `lookup_predicates`. -#[derive(Clone, Debug)] -pub struct TypeScheme<'tcx> { - pub generics: Generics<'tcx>, - pub ty: Ty<'tcx>, -} - -bitflags! { - flags AdtFlags: u32 { - const NO_ADT_FLAGS = 0, - const IS_ENUM = 1 << 0, - const IS_DTORCK = 1 << 1, // is this a dtorck type? - const IS_DTORCK_VALID = 1 << 2, - const IS_PHANTOM_DATA = 1 << 3, - const IS_SIMD = 1 << 4, - const IS_FUNDAMENTAL = 1 << 5, - const IS_NO_DROP_FLAG = 1 << 6, - } -} - -pub type AdtDef<'tcx> = &'tcx AdtDefData<'tcx, 'static>; -pub type VariantDef<'tcx> = &'tcx VariantDefData<'tcx, 'static>; -pub type FieldDef<'tcx> = &'tcx FieldDefData<'tcx, 'static>; - -// See comment on AdtDefData for explanation -pub type AdtDefMaster<'tcx> = &'tcx AdtDefData<'tcx, 'tcx>; -pub type VariantDefMaster<'tcx> = &'tcx VariantDefData<'tcx, 'tcx>; -pub type FieldDefMaster<'tcx> = &'tcx FieldDefData<'tcx, 'tcx>; - -pub struct VariantDefData<'tcx, 'container: 'tcx> { - /// The variant's DefId. If this is a tuple-like struct, - /// this is the DefId of the struct's ctor. - pub did: DefId, - pub name: Name, // struct's name if this is a struct - pub disr_val: Disr, - pub fields: Vec>, - pub kind: VariantKind, -} - -pub struct FieldDefData<'tcx, 'container: 'tcx> { - /// The field's DefId. NOTE: the fields of tuple-like enum variants - /// are not real items, and don't have entries in tcache etc. - pub did: DefId, - /// special_idents::unnamed_field.name - /// if this is a tuple-like field - pub name: Name, - pub vis: hir::Visibility, - /// TyIVar is used here to allow for variance (see the doc at - /// AdtDefData). - /// - /// Note: direct accesses to `ty` must also add dep edges. - ty: ivar::TyIVar<'tcx, 'container> -} - -/// The definition of an abstract data type - a struct or enum. -/// -/// These are all interned (by intern_adt_def) into the adt_defs -/// table. -/// -/// Because of the possibility of nested tcx-s, this type -/// needs 2 lifetimes: the traditional variant lifetime ('tcx) -/// bounding the lifetime of the inner types is of course necessary. -/// However, it is not sufficient - types from a child tcx must -/// not be leaked into the master tcx by being stored in an AdtDefData. -/// -/// The 'container lifetime ensures that by outliving the container -/// tcx and preventing shorter-lived types from being inserted. When -/// write access is not needed, the 'container lifetime can be -/// erased to 'static, which can be done by the AdtDef wrapper. -pub struct AdtDefData<'tcx, 'container: 'tcx> { - pub did: DefId, - pub variants: Vec>, - destructor: Cell>, - flags: Cell, -} - -impl<'tcx, 'container> PartialEq for AdtDefData<'tcx, 'container> { - // AdtDefData are always interned and this is part of TyS equality - #[inline] - fn eq(&self, other: &Self) -> bool { self as *const _ == other as *const _ } -} - -impl<'tcx, 'container> Eq for AdtDefData<'tcx, 'container> {} - -impl<'tcx, 'container> Hash for AdtDefData<'tcx, 'container> { - #[inline] - fn hash(&self, s: &mut H) { - (self as *const AdtDefData).hash(s) - } -} - -impl<'tcx> Encodable for AdtDef<'tcx> { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { - self.did.encode(s) - } -} - -impl<'tcx> Decodable for AdtDef<'tcx> { - fn decode(d: &mut D) -> Result, D::Error> { - let def_id: DefId = try!{ Decodable::decode(d) }; - - cstore::tls::with_decoding_context(d, |dcx, _| { - let def_id = dcx.translate_def_id(def_id); - Ok(dcx.tcx().lookup_adt_def(def_id)) - }) - } -} - - -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub enum AdtKind { Struct, Enum } - -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub enum VariantKind { Struct, Tuple, Unit } - -impl<'tcx, 'container> AdtDefData<'tcx, 'container> { - fn new(tcx: &ctxt<'tcx>, - did: DefId, - kind: AdtKind, - variants: Vec>) -> Self { - let mut flags = AdtFlags::NO_ADT_FLAGS; - let attrs = tcx.get_attrs(did); - if attr::contains_name(&attrs, "fundamental") { - flags = flags | AdtFlags::IS_FUNDAMENTAL; - } - if attr::contains_name(&attrs, "unsafe_no_drop_flag") { - flags = flags | AdtFlags::IS_NO_DROP_FLAG; - } - if tcx.lookup_simd(did) { - flags = flags | AdtFlags::IS_SIMD; - } - if Some(did) == tcx.lang_items.phantom_data() { - flags = flags | AdtFlags::IS_PHANTOM_DATA; - } - if let AdtKind::Enum = kind { - flags = flags | AdtFlags::IS_ENUM; - } - AdtDefData { - did: did, - variants: variants, - flags: Cell::new(flags), - destructor: Cell::new(None) - } - } - - fn calculate_dtorck(&'tcx self, tcx: &ctxt<'tcx>) { - if tcx.is_adt_dtorck(self) { - self.flags.set(self.flags.get() | AdtFlags::IS_DTORCK); - } - self.flags.set(self.flags.get() | AdtFlags::IS_DTORCK_VALID) - } - - /// Returns the kind of the ADT - Struct or Enum. - #[inline] - pub fn adt_kind(&self) -> AdtKind { - if self.flags.get().intersects(AdtFlags::IS_ENUM) { - AdtKind::Enum - } else { - AdtKind::Struct - } - } - - /// Returns whether this is a dtorck type. If this returns - /// true, this type being safe for destruction requires it to be - /// alive; Otherwise, only the contents are required to be. - #[inline] - pub fn is_dtorck(&'tcx self, tcx: &ctxt<'tcx>) -> bool { - if !self.flags.get().intersects(AdtFlags::IS_DTORCK_VALID) { - self.calculate_dtorck(tcx) - } - self.flags.get().intersects(AdtFlags::IS_DTORCK) - } - - /// Returns whether this type is #[fundamental] for the purposes - /// of coherence checking. - #[inline] - pub fn is_fundamental(&self) -> bool { - self.flags.get().intersects(AdtFlags::IS_FUNDAMENTAL) - } - - #[inline] - pub fn is_simd(&self) -> bool { - self.flags.get().intersects(AdtFlags::IS_SIMD) - } - - /// Returns true if this is PhantomData. - #[inline] - pub fn is_phantom_data(&self) -> bool { - self.flags.get().intersects(AdtFlags::IS_PHANTOM_DATA) - } - - /// Returns whether this type has a destructor. - pub fn has_dtor(&self) -> bool { - match self.dtor_kind() { - NoDtor => false, - TraitDtor(..) => true - } - } - - /// Asserts this is a struct and returns the struct's unique - /// variant. - pub fn struct_variant(&self) -> &VariantDefData<'tcx, 'container> { - assert!(self.adt_kind() == AdtKind::Struct); - &self.variants[0] - } - - #[inline] - pub fn type_scheme(&self, tcx: &ctxt<'tcx>) -> TypeScheme<'tcx> { - tcx.lookup_item_type(self.did) - } - - #[inline] - pub fn predicates(&self, tcx: &ctxt<'tcx>) -> GenericPredicates<'tcx> { - tcx.lookup_predicates(self.did) - } - - /// Returns an iterator over all fields contained - /// by this ADT. - #[inline] - pub fn all_fields(&self) -> - iter::FlatMap< - slice::Iter>, - slice::Iter>, - for<'s> fn(&'s VariantDefData<'tcx, 'container>) - -> slice::Iter<'s, FieldDefData<'tcx, 'container>> - > { - self.variants.iter().flat_map(VariantDefData::fields_iter) - } - - #[inline] - pub fn is_empty(&self) -> bool { - self.variants.is_empty() - } - - #[inline] - pub fn is_univariant(&self) -> bool { - self.variants.len() == 1 - } - - pub fn is_payloadfree(&self) -> bool { - !self.variants.is_empty() && - self.variants.iter().all(|v| v.fields.is_empty()) - } - - pub fn variant_with_id(&self, vid: DefId) -> &VariantDefData<'tcx, 'container> { - self.variants - .iter() - .find(|v| v.did == vid) - .expect("variant_with_id: unknown variant") - } - - pub fn variant_index_with_id(&self, vid: DefId) -> usize { - self.variants - .iter() - .position(|v| v.did == vid) - .expect("variant_index_with_id: unknown variant") - } - - pub fn variant_of_def(&self, def: def::Def) -> &VariantDefData<'tcx, 'container> { - match def { - def::DefVariant(_, vid, _) => self.variant_with_id(vid), - def::DefStruct(..) | def::DefTy(..) => self.struct_variant(), - _ => panic!("unexpected def {:?} in variant_of_def", def) - } - } - - pub fn destructor(&self) -> Option { - self.destructor.get() - } - - pub fn set_destructor(&self, dtor: DefId) { - self.destructor.set(Some(dtor)); - } - - pub fn dtor_kind(&self) -> DtorKind { - match self.destructor.get() { - Some(_) => { - TraitDtor(!self.flags.get().intersects(AdtFlags::IS_NO_DROP_FLAG)) - } - None => NoDtor, - } - } -} - -impl<'tcx, 'container> VariantDefData<'tcx, 'container> { - #[inline] - fn fields_iter(&self) -> slice::Iter> { - self.fields.iter() - } - - pub fn kind(&self) -> VariantKind { - self.kind - } - - pub fn is_tuple_struct(&self) -> bool { - self.kind() == VariantKind::Tuple - } - - #[inline] - pub fn find_field_named(&self, - name: ast::Name) - -> Option<&FieldDefData<'tcx, 'container>> { - self.fields.iter().find(|f| f.name == name) - } - - #[inline] - pub fn index_of_field_named(&self, - name: ast::Name) - -> Option { - self.fields.iter().position(|f| f.name == name) - } - - #[inline] - pub fn field_named(&self, name: ast::Name) -> &FieldDefData<'tcx, 'container> { - self.find_field_named(name).unwrap() - } -} - -impl<'tcx, 'container> FieldDefData<'tcx, 'container> { - pub fn new(did: DefId, - name: Name, - vis: hir::Visibility) -> Self { - FieldDefData { - did: did, - name: name, - vis: vis, - ty: ivar::TyIVar::new() - } - } - - pub fn ty(&self, tcx: &ctxt<'tcx>, subst: &Substs<'tcx>) -> Ty<'tcx> { - self.unsubst_ty().subst(tcx, subst) - } - - pub fn unsubst_ty(&self) -> Ty<'tcx> { - self.ty.unwrap(DepNode::FieldTy(self.did)) - } - - pub fn fulfill_ty(&self, ty: Ty<'container>) { - self.ty.fulfill(DepNode::FieldTy(self.did), ty); - } -} - -/// Records the substitutions used to translate the polytype for an -/// item into the monotype of an item reference. -#[derive(Clone)] -pub struct ItemSubsts<'tcx> { - pub substs: Substs<'tcx>, -} - -#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Debug, RustcEncodable, RustcDecodable)] -pub enum ClosureKind { - // Warning: Ordering is significant here! The ordering is chosen - // because the trait Fn is a subtrait of FnMut and so in turn, and - // hence we order it so that Fn < FnMut < FnOnce. - FnClosureKind, - FnMutClosureKind, - FnOnceClosureKind, -} - -impl ClosureKind { - pub fn trait_did(&self, cx: &ctxt) -> DefId { - let result = match *self { - FnClosureKind => cx.lang_items.require(FnTraitLangItem), - FnMutClosureKind => { - cx.lang_items.require(FnMutTraitLangItem) - } - FnOnceClosureKind => { - cx.lang_items.require(FnOnceTraitLangItem) - } - }; - match result { - Ok(trait_did) => trait_did, - Err(err) => cx.sess.fatal(&err[..]), - } - } - - /// True if this a type that impls this closure kind - /// must also implement `other`. - pub fn extends(self, other: ty::ClosureKind) -> bool { - match (self, other) { - (FnClosureKind, FnClosureKind) => true, - (FnClosureKind, FnMutClosureKind) => true, - (FnClosureKind, FnOnceClosureKind) => true, - (FnMutClosureKind, FnMutClosureKind) => true, - (FnMutClosureKind, FnOnceClosureKind) => true, - (FnOnceClosureKind, FnOnceClosureKind) => true, - _ => false, - } - } -} - -impl<'tcx> TyS<'tcx> { - /// Iterator that walks `self` and any types reachable from - /// `self`, in depth-first order. Note that just walks the types - /// that appear in `self`, it does not descend into the fields of - /// structs or variants. For example: - /// - /// ```notrust - /// isize => { isize } - /// Foo> => { Foo>, Bar, isize } - /// [isize] => { [isize], isize } - /// ``` - pub fn walk(&'tcx self) -> TypeWalker<'tcx> { - TypeWalker::new(self) - } - - /// Iterator that walks the immediate children of `self`. Hence - /// `Foo, u32>` yields the sequence `[Bar, u32]` - /// (but not `i32`, like `walk`). - pub fn walk_shallow(&'tcx self) -> IntoIter> { - walk::walk_shallow(self) - } - - /// Walks `ty` and any types appearing within `ty`, invoking the - /// callback `f` on each type. If the callback returns false, then the - /// children of the current type are ignored. - /// - /// Note: prefer `ty.walk()` where possible. - pub fn maybe_walk(&'tcx self, mut f: F) - where F : FnMut(Ty<'tcx>) -> bool - { - let mut walker = self.walk(); - while let Some(ty) = walker.next() { - if !f(ty) { - walker.skip_current_subtree(); - } - } - } -} - -impl<'tcx> ItemSubsts<'tcx> { - pub fn empty() -> ItemSubsts<'tcx> { - ItemSubsts { substs: Substs::empty() } - } - - pub fn is_noop(&self) -> bool { - self.substs.is_noop() - } -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum LvaluePreference { - PreferMutLvalue, - NoPreference -} - -impl LvaluePreference { - pub fn from_mutbl(m: hir::Mutability) -> Self { - match m { - hir::MutMutable => PreferMutLvalue, - hir::MutImmutable => NoPreference, - } - } -} - -/// Helper for looking things up in the various maps that are populated during -/// typeck::collect (e.g., `cx.impl_or_trait_items`, `cx.tcache`, etc). All of -/// these share the pattern that if the id is local, it should have been loaded -/// into the map by the `typeck::collect` phase. If the def-id is external, -/// then we have to go consult the crate loading code (and cache the result for -/// the future). -fn lookup_locally_or_in_crate_store(descr: &str, - def_id: DefId, - map: &M, - load_external: F) - -> M::Value where - M: MemoizationMap, - F: FnOnce() -> M::Value, -{ - map.memoize(def_id, || { - if def_id.is_local() { - panic!("No def'n found for {:?} in tcx.{}", def_id, descr); - } - load_external() - }) -} - -impl BorrowKind { - pub fn from_mutbl(m: hir::Mutability) -> BorrowKind { - match m { - hir::MutMutable => MutBorrow, - hir::MutImmutable => ImmBorrow, - } - } - - /// Returns a mutability `m` such that an `&m T` pointer could be used to obtain this borrow - /// kind. Because borrow kinds are richer than mutabilities, we sometimes have to pick a - /// mutability that is stronger than necessary so that it at least *would permit* the borrow in - /// question. - pub fn to_mutbl_lossy(self) -> hir::Mutability { - match self { - MutBorrow => hir::MutMutable, - ImmBorrow => hir::MutImmutable, - - // We have no type corresponding to a unique imm borrow, so - // use `&mut`. It gives all the capabilities of an `&uniq` - // and hence is a safe "over approximation". - UniqueImmBorrow => hir::MutMutable, - } - } - - pub fn to_user_str(&self) -> &'static str { - match *self { - MutBorrow => "mutable", - ImmBorrow => "immutable", - UniqueImmBorrow => "uniquely immutable", - } - } -} - -impl<'tcx> ctxt<'tcx> { - pub fn node_id_to_type(&self, id: NodeId) -> Ty<'tcx> { - match self.node_id_to_type_opt(id) { - Some(ty) => ty, - None => self.sess.bug( - &format!("node_id_to_type: no type for node `{}`", - self.map.node_to_string(id))) - } - } - - pub fn node_id_to_type_opt(&self, id: NodeId) -> Option> { - self.tables.borrow().node_types.get(&id).cloned() - } - - pub fn node_id_item_substs(&self, id: NodeId) -> ItemSubsts<'tcx> { - match self.tables.borrow().item_substs.get(&id) { - None => ItemSubsts::empty(), - Some(ts) => ts.clone(), - } - } - - // Returns the type of a pattern as a monotype. Like @expr_ty, this function - // doesn't provide type parameter substitutions. - pub fn pat_ty(&self, pat: &hir::Pat) -> Ty<'tcx> { - self.node_id_to_type(pat.id) - } - pub fn pat_ty_opt(&self, pat: &hir::Pat) -> Option> { - self.node_id_to_type_opt(pat.id) - } - - // Returns the type of an expression as a monotype. - // - // NB (1): This is the PRE-ADJUSTMENT TYPE for the expression. That is, in - // some cases, we insert `AutoAdjustment` annotations such as auto-deref or - // auto-ref. The type returned by this function does not consider such - // adjustments. See `expr_ty_adjusted()` instead. - // - // NB (2): This type doesn't provide type parameter substitutions; e.g. if you - // ask for the type of "id" in "id(3)", it will return "fn(&isize) -> isize" - // instead of "fn(ty) -> T with T = isize". - pub fn expr_ty(&self, expr: &hir::Expr) -> Ty<'tcx> { - self.node_id_to_type(expr.id) - } - - pub fn expr_ty_opt(&self, expr: &hir::Expr) -> Option> { - self.node_id_to_type_opt(expr.id) - } - - /// Returns the type of `expr`, considering any `AutoAdjustment` - /// entry recorded for that expression. - /// - /// It would almost certainly be better to store the adjusted ty in with - /// the `AutoAdjustment`, but I opted not to do this because it would - /// require serializing and deserializing the type and, although that's not - /// hard to do, I just hate that code so much I didn't want to touch it - /// unless it was to fix it properly, which seemed a distraction from the - /// thread at hand! -nmatsakis - pub fn expr_ty_adjusted(&self, expr: &hir::Expr) -> Ty<'tcx> { - self.expr_ty(expr) - .adjust(self, expr.span, expr.id, - self.tables.borrow().adjustments.get(&expr.id), - |method_call| { - self.tables.borrow().method_map.get(&method_call).map(|method| method.ty) - }) - } - - pub fn expr_span(&self, id: NodeId) -> Span { - match self.map.find(id) { - Some(ast_map::NodeExpr(e)) => { - e.span - } - Some(f) => { - self.sess.bug(&format!("Node id {} is not an expr: {:?}", - id, f)); - } - None => { - self.sess.bug(&format!("Node id {} is not present \ - in the node map", id)); - } - } - } - - pub fn local_var_name_str(&self, id: NodeId) -> InternedString { - match self.map.find(id) { - Some(ast_map::NodeLocal(pat)) => { - match pat.node { - hir::PatIdent(_, ref path1, _) => path1.node.name.as_str(), - _ => { - self.sess.bug(&format!("Variable id {} maps to {:?}, not local", id, pat)); - }, - } - }, - r => self.sess.bug(&format!("Variable id {} maps to {:?}, not local", id, r)), - } - } - - pub fn resolve_expr(&self, expr: &hir::Expr) -> def::Def { - match self.def_map.borrow().get(&expr.id) { - Some(def) => def.full_def(), - None => { - self.sess.span_bug(expr.span, &format!( - "no def-map entry for expr {}", expr.id)); - } - } - } - - pub fn expr_is_lval(&self, expr: &hir::Expr) -> bool { - match expr.node { - hir::ExprPath(..) => { - // We can't use resolve_expr here, as this needs to run on broken - // programs. We don't need to through - associated items are all - // rvalues. - match self.def_map.borrow().get(&expr.id) { - Some(&def::PathResolution { - base_def: def::DefStatic(..), .. - }) | Some(&def::PathResolution { - base_def: def::DefUpvar(..), .. - }) | Some(&def::PathResolution { - base_def: def::DefLocal(..), .. - }) => { - true - } - Some(&def::PathResolution { base_def: def::DefErr, .. })=> true, - Some(..) => false, - None => self.sess.span_bug(expr.span, &format!( - "no def for path {}", expr.id)) - } - } - - hir::ExprType(ref e, _) => { - self.expr_is_lval(e) - } - - hir::ExprUnary(hir::UnDeref, _) | - hir::ExprField(..) | - hir::ExprTupField(..) | - hir::ExprIndex(..) => { - true - } - - hir::ExprCall(..) | - hir::ExprMethodCall(..) | - hir::ExprStruct(..) | - hir::ExprRange(..) | - hir::ExprTup(..) | - hir::ExprIf(..) | - hir::ExprMatch(..) | - hir::ExprClosure(..) | - hir::ExprBlock(..) | - hir::ExprRepeat(..) | - hir::ExprVec(..) | - hir::ExprBreak(..) | - hir::ExprAgain(..) | - hir::ExprRet(..) | - hir::ExprWhile(..) | - hir::ExprLoop(..) | - hir::ExprAssign(..) | - hir::ExprInlineAsm(..) | - hir::ExprAssignOp(..) | - hir::ExprLit(_) | - hir::ExprUnary(..) | - hir::ExprBox(..) | - hir::ExprAddrOf(..) | - hir::ExprBinary(..) | - hir::ExprCast(..) => { - false - } - } - } - - pub fn provided_trait_methods(&self, id: DefId) -> Vec>> { - if let Some(id) = self.map.as_local_node_id(id) { - if let ItemTrait(_, _, _, ref ms) = self.map.expect_item(id).node { - ms.iter().filter_map(|ti| { - if let hir::MethodTraitItem(_, Some(_)) = ti.node { - match self.impl_or_trait_item(self.map.local_def_id(ti.id)) { - MethodTraitItem(m) => Some(m), - _ => { - self.sess.bug("provided_trait_methods(): \ - non-method item found from \ - looking up provided method?!") - } - } - } else { - None - } - }).collect() - } else { - self.sess.bug(&format!("provided_trait_methods: `{:?}` is not a trait", id)) - } - } else { - self.sess.cstore.provided_trait_methods(self, id) - } - } - - pub fn associated_consts(&self, id: DefId) -> Vec>> { - if let Some(id) = self.map.as_local_node_id(id) { - match self.map.expect_item(id).node { - ItemTrait(_, _, _, ref tis) => { - tis.iter().filter_map(|ti| { - if let hir::ConstTraitItem(_, _) = ti.node { - match self.impl_or_trait_item(self.map.local_def_id(ti.id)) { - ConstTraitItem(ac) => Some(ac), - _ => { - self.sess.bug("associated_consts(): \ - non-const item found from \ - looking up a constant?!") - } - } - } else { - None - } - }).collect() - } - ItemImpl(_, _, _, _, _, ref iis) => { - iis.iter().filter_map(|ii| { - if let hir::ImplItemKind::Const(_, _) = ii.node { - match self.impl_or_trait_item(self.map.local_def_id(ii.id)) { - ConstTraitItem(ac) => Some(ac), - _ => { - self.sess.bug("associated_consts(): \ - non-const item found from \ - looking up a constant?!") - } - } - } else { - None - } - }).collect() - } - _ => { - self.sess.bug(&format!("associated_consts: `{:?}` is not a trait \ - or impl", id)) - } - } - } else { - self.sess.cstore.associated_consts(self, id) - } - } - - pub fn trait_impl_polarity(&self, id: DefId) -> Option { - if let Some(id) = self.map.as_local_node_id(id) { - match self.map.find(id) { - Some(ast_map::NodeItem(item)) => { - match item.node { - hir::ItemImpl(_, polarity, _, _, _, _) => Some(polarity), - _ => None - } - } - _ => None - } - } else { - self.sess.cstore.impl_polarity(id) - } - } - - pub fn custom_coerce_unsized_kind(&self, did: DefId) -> adjustment::CustomCoerceUnsized { - self.custom_coerce_unsized_kinds.memoize(did, || { - let (kind, src) = if did.krate != LOCAL_CRATE { - (self.sess.cstore.custom_coerce_unsized_kind(did), "external") - } else { - (None, "local") - }; - - match kind { - Some(kind) => kind, - None => { - self.sess.bug(&format!("custom_coerce_unsized_kind: \ - {} impl `{}` is missing its kind", - src, self.item_path_str(did))); - } - } - }) - } - - pub fn impl_or_trait_item(&self, id: DefId) -> ImplOrTraitItem<'tcx> { - lookup_locally_or_in_crate_store( - "impl_or_trait_items", id, &self.impl_or_trait_items, - || self.sess.cstore.impl_or_trait_item(self, id)) - } - - pub fn trait_item_def_ids(&self, id: DefId) -> Rc> { - lookup_locally_or_in_crate_store( - "trait_item_def_ids", id, &self.trait_item_def_ids, - || Rc::new(self.sess.cstore.trait_item_def_ids(id))) - } - - /// Returns the trait-ref corresponding to a given impl, or None if it is - /// an inherent impl. - pub fn impl_trait_ref(&self, id: DefId) -> Option> { - lookup_locally_or_in_crate_store( - "impl_trait_refs", id, &self.impl_trait_refs, - || self.sess.cstore.impl_trait_ref(self, id)) - } - - /// Returns whether this DefId refers to an impl - pub fn is_impl(&self, id: DefId) -> bool { - if let Some(id) = self.map.as_local_node_id(id) { - if let Some(ast_map::NodeItem( - &hir::Item { node: hir::ItemImpl(..), .. })) = self.map.find(id) { - true - } else { - false - } - } else { - self.sess.cstore.is_impl(id) - } - } - - pub fn trait_ref_to_def_id(&self, tr: &hir::TraitRef) -> DefId { - self.def_map.borrow().get(&tr.ref_id).expect("no def-map entry for trait").def_id() - } - - pub fn item_path_str(&self, id: DefId) -> String { - self.with_path(id, |path| ast_map::path_to_string(path)) - } - - pub fn def_path(&self, id: DefId) -> ast_map::DefPath { - if id.is_local() { - self.map.def_path(id) - } else { - self.sess.cstore.def_path(id) - } - } - - pub fn with_path(&self, id: DefId, f: F) -> T where - F: FnOnce(ast_map::PathElems) -> T, - { - if let Some(id) = self.map.as_local_node_id(id) { - self.map.with_path(id, f) - } else { - f(self.sess.cstore.item_path(id).iter().cloned().chain(LinkedPath::empty())) - } - } - - pub fn item_name(&self, id: DefId) -> ast::Name { - if let Some(id) = self.map.as_local_node_id(id) { - self.map.get_path_elem(id).name() - } else { - self.sess.cstore.item_name(id) - } - } - - // Register a given item type - pub fn register_item_type(&self, did: DefId, ty: TypeScheme<'tcx>) { - self.tcache.borrow_mut().insert(did, ty); - } - - // If the given item is in an external crate, looks up its type and adds it to - // the type cache. Returns the type parameters and type. - pub fn lookup_item_type(&self, did: DefId) -> TypeScheme<'tcx> { - lookup_locally_or_in_crate_store( - "tcache", did, &self.tcache, - || self.sess.cstore.item_type(self, did)) - } - - /// Given the did of a trait, returns its canonical trait ref. - pub fn lookup_trait_def(&self, did: DefId) -> &'tcx TraitDef<'tcx> { - lookup_locally_or_in_crate_store( - "trait_defs", did, &self.trait_defs, - || self.alloc_trait_def(self.sess.cstore.trait_def(self, did)) - ) - } - - /// Given the did of an ADT, return a master reference to its - /// definition. Unless you are planning on fulfilling the ADT's fields, - /// use lookup_adt_def instead. - pub fn lookup_adt_def_master(&self, did: DefId) -> AdtDefMaster<'tcx> { - lookup_locally_or_in_crate_store( - "adt_defs", did, &self.adt_defs, - || self.sess.cstore.adt_def(self, did) - ) - } - - /// Given the did of an ADT, return a reference to its definition. - pub fn lookup_adt_def(&self, did: DefId) -> AdtDef<'tcx> { - // when reverse-variance goes away, a transmute:: - // woud be needed here. - self.lookup_adt_def_master(did) - } - - /// Given the did of an item, returns its full set of predicates. - pub fn lookup_predicates(&self, did: DefId) -> GenericPredicates<'tcx> { - lookup_locally_or_in_crate_store( - "predicates", did, &self.predicates, - || self.sess.cstore.item_predicates(self, did)) - } - - /// Given the did of a trait, returns its superpredicates. - pub fn lookup_super_predicates(&self, did: DefId) -> GenericPredicates<'tcx> { - lookup_locally_or_in_crate_store( - "super_predicates", did, &self.super_predicates, - || self.sess.cstore.item_super_predicates(self, did)) - } - - /// If `type_needs_drop` returns true, then `ty` is definitely - /// non-copy and *might* have a destructor attached; if it returns - /// false, then `ty` definitely has no destructor (i.e. no drop glue). - /// - /// (Note that this implies that if `ty` has a destructor attached, - /// then `type_needs_drop` will definitely return `true` for `ty`.) - pub fn type_needs_drop_given_env<'a>(&self, - ty: Ty<'tcx>, - param_env: &ty::ParameterEnvironment<'a,'tcx>) -> bool { - // Issue #22536: We first query type_moves_by_default. It sees a - // normalized version of the type, and therefore will definitely - // know whether the type implements Copy (and thus needs no - // cleanup/drop/zeroing) ... - let implements_copy = !ty.moves_by_default(param_env, DUMMY_SP); - - if implements_copy { return false; } - - // ... (issue #22536 continued) but as an optimization, still use - // prior logic of asking if the `needs_drop` bit is set; we need - // not zero non-Copy types if they have no destructor. - - // FIXME(#22815): Note that calling `ty::type_contents` is a - // conservative heuristic; it may report that `needs_drop` is set - // when actual type does not actually have a destructor associated - // with it. But since `ty` absolutely did not have the `Copy` - // bound attached (see above), it is sound to treat it as having a - // destructor (e.g. zero its memory on move). - - let contents = ty.type_contents(self); - debug!("type_needs_drop ty={:?} contents={:?}", ty, contents); - contents.needs_drop(self) - } - - /// Get the attributes of a definition. - pub fn get_attrs(&self, did: DefId) -> Cow<'tcx, [ast::Attribute]> { - if let Some(id) = self.map.as_local_node_id(did) { - Cow::Borrowed(self.map.attrs(id)) - } else { - Cow::Owned(self.sess.cstore.item_attrs(did)) - } - } - - /// Determine whether an item is annotated with an attribute - pub fn has_attr(&self, did: DefId, attr: &str) -> bool { - self.get_attrs(did).iter().any(|item| item.check_name(attr)) - } - - /// Determine whether an item is annotated with `#[repr(packed)]` - pub fn lookup_packed(&self, did: DefId) -> bool { - self.lookup_repr_hints(did).contains(&attr::ReprPacked) - } - - /// Determine whether an item is annotated with `#[simd]` - pub fn lookup_simd(&self, did: DefId) -> bool { - self.has_attr(did, "simd") - || self.lookup_repr_hints(did).contains(&attr::ReprSimd) - } - - pub fn item_variances(&self, item_id: DefId) -> Rc { - lookup_locally_or_in_crate_store( - "item_variance_map", item_id, &self.item_variance_map, - || Rc::new(self.sess.cstore.item_variances(item_id))) - } - - pub fn trait_has_default_impl(&self, trait_def_id: DefId) -> bool { - self.populate_implementations_for_trait_if_necessary(trait_def_id); - - let def = self.lookup_trait_def(trait_def_id); - def.flags.get().intersects(TraitFlags::HAS_DEFAULT_IMPL) - } - - /// Records a trait-to-implementation mapping. - pub fn record_trait_has_default_impl(&self, trait_def_id: DefId) { - let def = self.lookup_trait_def(trait_def_id); - def.flags.set(def.flags.get() | TraitFlags::HAS_DEFAULT_IMPL) - } - - /// Load primitive inherent implementations if necessary - pub fn populate_implementations_for_primitive_if_necessary(&self, - primitive_def_id: DefId) { - if primitive_def_id.is_local() { - return - } - - // The primitive is not local, hence we are reading this out - // of metadata. - let _ignore = self.dep_graph.in_ignore(); - - if self.populated_external_primitive_impls.borrow().contains(&primitive_def_id) { - return - } - - debug!("populate_implementations_for_primitive_if_necessary: searching for {:?}", - primitive_def_id); - - let impl_items = self.sess.cstore.impl_items(primitive_def_id); - - // Store the implementation info. - self.impl_items.borrow_mut().insert(primitive_def_id, impl_items); - self.populated_external_primitive_impls.borrow_mut().insert(primitive_def_id); - } - - /// Populates the type context with all the inherent implementations for - /// the given type if necessary. - pub fn populate_inherent_implementations_for_type_if_necessary(&self, - type_id: DefId) { - if type_id.is_local() { - return - } - - // The type is not local, hence we are reading this out of - // metadata and don't need to track edges. - let _ignore = self.dep_graph.in_ignore(); - - if self.populated_external_types.borrow().contains(&type_id) { - return - } - - debug!("populate_inherent_implementations_for_type_if_necessary: searching for {:?}", - type_id); - - let inherent_impls = self.sess.cstore.inherent_implementations_for_type(type_id); - for &impl_def_id in &inherent_impls { - // Store the implementation info. - let impl_items = self.sess.cstore.impl_items(impl_def_id); - self.impl_items.borrow_mut().insert(impl_def_id, impl_items); - } - - self.inherent_impls.borrow_mut().insert(type_id, Rc::new(inherent_impls)); - self.populated_external_types.borrow_mut().insert(type_id); - } - - /// Populates the type context with all the implementations for the given - /// trait if necessary. - pub fn populate_implementations_for_trait_if_necessary(&self, trait_id: DefId) { - if trait_id.is_local() { - return - } - - // The type is not local, hence we are reading this out of - // metadata and don't need to track edges. - let _ignore = self.dep_graph.in_ignore(); - - let def = self.lookup_trait_def(trait_id); - if def.flags.get().intersects(TraitFlags::IMPLS_VALID) { - return; - } - - debug!("populate_implementations_for_trait_if_necessary: searching for {:?}", def); - - if self.sess.cstore.is_defaulted_trait(trait_id) { - self.record_trait_has_default_impl(trait_id); - } - - for impl_def_id in self.sess.cstore.implementations_of_trait(trait_id) { - let impl_items = self.sess.cstore.impl_items(impl_def_id); - let trait_ref = self.impl_trait_ref(impl_def_id).unwrap(); - // Record the trait->implementation mapping. - def.record_impl(self, impl_def_id, trait_ref); - - // For any methods that use a default implementation, add them to - // the map. This is a bit unfortunate. - for impl_item_def_id in &impl_items { - let method_def_id = impl_item_def_id.def_id(); - // load impl items eagerly for convenience - // FIXME: we may want to load these lazily - self.impl_or_trait_item(method_def_id); - } - - // Store the implementation info. - self.impl_items.borrow_mut().insert(impl_def_id, impl_items); - } - - def.flags.set(def.flags.get() | TraitFlags::IMPLS_VALID); - } - - pub fn closure_kind(&self, def_id: DefId) -> ty::ClosureKind { - Tables::closure_kind(&self.tables, self, def_id) - } - - pub fn closure_type(&self, - def_id: DefId, - substs: &ClosureSubsts<'tcx>) - -> ty::ClosureTy<'tcx> - { - Tables::closure_type(&self.tables, self, def_id, substs) - } - - /// Given the def_id of an impl, return the def_id of the trait it implements. - /// If it implements no trait, return `None`. - pub fn trait_id_of_impl(&self, def_id: DefId) -> Option { - self.impl_trait_ref(def_id).map(|tr| tr.def_id) - } - - /// If the given def ID describes a method belonging to an impl, return the - /// ID of the impl that the method belongs to. Otherwise, return `None`. - pub fn impl_of_method(&self, def_id: DefId) -> Option { - if def_id.krate != LOCAL_CRATE { - return match self.sess.cstore.impl_or_trait_item(self, def_id).container() { - TraitContainer(_) => None, - ImplContainer(def_id) => Some(def_id), - }; - } - match self.impl_or_trait_items.borrow().get(&def_id).cloned() { - Some(trait_item) => { - match trait_item.container() { - TraitContainer(_) => None, - ImplContainer(def_id) => Some(def_id), - } - } - None => None - } - } - - /// If the given def ID describes an item belonging to a trait (either a - /// default method or an implementation of a trait method), return the ID of - /// the trait that the method belongs to. Otherwise, return `None`. - pub fn trait_of_item(&self, def_id: DefId) -> Option { - if def_id.krate != LOCAL_CRATE { - return self.sess.cstore.trait_of_item(self, def_id); - } - match self.impl_or_trait_items.borrow().get(&def_id).cloned() { - Some(impl_or_trait_item) => { - match impl_or_trait_item.container() { - TraitContainer(def_id) => Some(def_id), - ImplContainer(def_id) => self.trait_id_of_impl(def_id), - } - } - None => None - } - } - - /// If the given def ID describes an item belonging to a trait, (either a - /// default method or an implementation of a trait method), return the ID of - /// the method inside trait definition (this means that if the given def ID - /// is already that of the original trait method, then the return value is - /// the same). - /// Otherwise, return `None`. - pub fn trait_item_of_item(&self, def_id: DefId) -> Option { - let impl_item = match self.impl_or_trait_items.borrow().get(&def_id) { - Some(m) => m.clone(), - None => return None, - }; - let name = impl_item.name(); - match self.trait_of_item(def_id) { - Some(trait_did) => { - self.trait_items(trait_did).iter() - .find(|item| item.name() == name) - .map(|item| item.id()) - } - None => None - } - } - - /// Construct a parameter environment suitable for static contexts or other contexts where there - /// are no free type/lifetime parameters in scope. - pub fn empty_parameter_environment<'a>(&'a self) - -> ParameterEnvironment<'a,'tcx> { - - // for an empty parameter environment, there ARE no free - // regions, so it shouldn't matter what we use for the free id - let free_id_outlive = self.region_maps.node_extent(ast::DUMMY_NODE_ID); - ty::ParameterEnvironment { tcx: self, - free_substs: Substs::empty(), - caller_bounds: Vec::new(), - implicit_region_bound: ty::ReEmpty, - selection_cache: traits::SelectionCache::new(), - evaluation_cache: traits::EvaluationCache::new(), - free_id_outlive: free_id_outlive } - } - - /// Constructs and returns a substitution that can be applied to move from - /// the "outer" view of a type or method to the "inner" view. - /// In general, this means converting from bound parameters to - /// free parameters. Since we currently represent bound/free type - /// parameters in the same way, this only has an effect on regions. - pub fn construct_free_substs(&self, generics: &Generics<'tcx>, - free_id_outlive: CodeExtent) -> Substs<'tcx> { - // map T => T - let mut types = VecPerParamSpace::empty(); - for def in generics.types.as_slice() { - debug!("construct_parameter_environment(): push_types_from_defs: def={:?}", - def); - types.push(def.space, self.mk_param_from_def(def)); - } - - // map bound 'a => free 'a - let mut regions = VecPerParamSpace::empty(); - for def in generics.regions.as_slice() { - let region = - ReFree(FreeRegion { scope: free_id_outlive, - bound_region: BrNamed(def.def_id, def.name) }); - debug!("push_region_params {:?}", region); - regions.push(def.space, region); - } - - Substs { - types: types, - regions: subst::NonerasedRegions(regions) - } - } - - /// See `ParameterEnvironment` struct def'n for details. - /// If you were using `free_id: NodeId`, you might try `self.region_maps.item_extent(free_id)` - /// for the `free_id_outlive` parameter. (But note that that is not always quite right.) - pub fn construct_parameter_environment<'a>(&'a self, - span: Span, - generics: &ty::Generics<'tcx>, - generic_predicates: &ty::GenericPredicates<'tcx>, - free_id_outlive: CodeExtent) - -> ParameterEnvironment<'a, 'tcx> - { - // - // Construct the free substs. - // - - let free_substs = self.construct_free_substs(generics, free_id_outlive); - - // - // Compute the bounds on Self and the type parameters. - // - - let bounds = generic_predicates.instantiate(self, &free_substs); - let bounds = self.liberate_late_bound_regions(free_id_outlive, &ty::Binder(bounds)); - let predicates = bounds.predicates.into_vec(); - - // Finally, we have to normalize the bounds in the environment, in - // case they contain any associated type projections. This process - // can yield errors if the put in illegal associated types, like - // `::Bar` where `i32` does not implement `Foo`. We - // report these errors right here; this doesn't actually feel - // right to me, because constructing the environment feels like a - // kind of a "idempotent" action, but I'm not sure where would be - // a better place. In practice, we construct environments for - // every fn once during type checking, and we'll abort if there - // are any errors at that point, so after type checking you can be - // sure that this will succeed without errors anyway. - // - - let unnormalized_env = ty::ParameterEnvironment { - tcx: self, - free_substs: free_substs, - implicit_region_bound: ty::ReScope(free_id_outlive), - caller_bounds: predicates, - selection_cache: traits::SelectionCache::new(), - evaluation_cache: traits::EvaluationCache::new(), - free_id_outlive: free_id_outlive, - }; - - let cause = traits::ObligationCause::misc(span, free_id_outlive.node_id(&self.region_maps)); - traits::normalize_param_env_or_error(unnormalized_env, cause) - } - - pub fn is_method_call(&self, expr_id: NodeId) -> bool { - self.tables.borrow().method_map.contains_key(&MethodCall::expr(expr_id)) - } - - pub fn is_overloaded_autoderef(&self, expr_id: NodeId, autoderefs: u32) -> bool { - self.tables.borrow().method_map.contains_key(&MethodCall::autoderef(expr_id, - autoderefs)) - } - - pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option { - Some(self.tables.borrow().upvar_capture_map.get(&upvar_id).unwrap().clone()) - } - - - pub fn visit_all_items_in_krate(&self, - dep_node_fn: F, - visitor: &mut V) - where F: FnMut(DefId) -> DepNode, V: Visitor<'tcx> - { - dep_graph::visit_all_items_in_krate(self, dep_node_fn, visitor); - } -} - -/// The category of explicit self. -#[derive(Clone, Copy, Eq, PartialEq, Debug)] -pub enum ExplicitSelfCategory { - Static, - ByValue, - ByReference(Region, hir::Mutability), - ByBox, -} - -/// A free variable referred to in a function. -#[derive(Copy, Clone, RustcEncodable, RustcDecodable)] -pub struct Freevar { - /// The variable being accessed free. - pub def: def::Def, - - // First span where it is accessed (there can be multiple). - pub span: Span -} - -pub type FreevarMap = NodeMap>; - -pub type CaptureModeMap = NodeMap; - -// Trait method resolution -pub type TraitMap = NodeMap>; - -// Map from the NodeId of a glob import to a list of items which are actually -// imported. -pub type GlobMap = HashMap>; - -impl<'tcx> ctxt<'tcx> { - pub fn with_freevars(&self, fid: NodeId, f: F) -> T where - F: FnOnce(&[Freevar]) -> T, - { - match self.freevars.borrow().get(&fid) { - None => f(&[]), - Some(d) => f(&d[..]) - } - } - - pub fn make_substs_for_receiver_types(&self, - trait_ref: &ty::TraitRef<'tcx>, - method: &ty::Method<'tcx>) - -> subst::Substs<'tcx> - { - /*! - * Substitutes the values for the receiver's type parameters - * that are found in method, leaving the method's type parameters - * intact. - */ - - let meth_tps: Vec = - method.generics.types.get_slice(subst::FnSpace) - .iter() - .map(|def| self.mk_param_from_def(def)) - .collect(); - let meth_regions: Vec = - method.generics.regions.get_slice(subst::FnSpace) - .iter() - .map(|def| def.to_early_bound_region()) - .collect(); - trait_ref.substs.clone().with_method(meth_tps, meth_regions) - } -} diff --git a/src/librustc/middle/ty/outlives.rs b/src/librustc/middle/ty/outlives.rs deleted file mode 100644 index fc20c1bcb85fe..0000000000000 --- a/src/librustc/middle/ty/outlives.rs +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// The outlines relation `T: 'a` or `'a: 'b`. This code frequently -// refers to rules defined in RFC 1214 (`OutlivesFooBar`), so see that -// RFC for reference. - -use middle::infer::InferCtxt; -use middle::ty::{self, Ty, TypeFoldable}; - -#[derive(Debug)] -pub enum Component<'tcx> { - Region(ty::Region), - Param(ty::ParamTy), - UnresolvedInferenceVariable(ty::InferTy), - - // Projections like `T::Foo` are tricky because a constraint like - // `T::Foo: 'a` can be satisfied in so many ways. There may be a - // where-clause that says `T::Foo: 'a`, or the defining trait may - // include a bound like `type Foo: 'static`, or -- in the most - // conservative way -- we can prove that `T: 'a` (more generally, - // that all components in the projection outlive `'a`). This code - // is not in a position to judge which is the best technique, so - // we just product the projection as a component and leave it to - // the consumer to decide (but see `EscapingProjection` below). - Projection(ty::ProjectionTy<'tcx>), - - // In the case where a projection has escaping regions -- meaning - // regions bound within the type itself -- we always use - // the most conservative rule, which requires that all components - // outlive the bound. So for example if we had a type like this: - // - // for<'a> Trait1< >::Foo > - // ~~~~~~~~~~~~~~~~~~~~~~~~~ - // - // then the inner projection (underlined) has an escaping region - // `'a`. We consider that outer trait `'c` to meet a bound if `'b` - // outlives `'b: 'c`, and we don't consider whether the trait - // declares that `Foo: 'static` etc. Therefore, we just return the - // free components of such a projection (in this case, `'b`). - // - // However, in the future, we may want to get smarter, and - // actually return a "higher-ranked projection" here. Therefore, - // we mark that these components are part of an escaping - // projection, so that implied bounds code can avoid relying on - // them. This gives us room to improve the regionck reasoning in - // the future without breaking backwards compat. - EscapingProjection(Vec>), -} - -/// Returns all the things that must outlive `'a` for the condition -/// `ty0: 'a` to hold. -pub fn components<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>, - ty0: Ty<'tcx>) - -> Vec> { - let mut components = vec![]; - compute_components(infcx, ty0, &mut components); - debug!("components({:?}) = {:?}", ty0, components); - components -} - -fn compute_components<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>, - ty: Ty<'tcx>, - out: &mut Vec>) { - // Descend through the types, looking for the various "base" - // components and collecting them into `out`. This is not written - // with `collect()` because of the need to sometimes skip subtrees - // in the `subtys` iterator (e.g., when encountering a - // projection). - match ty.sty { - ty::TyClosure(_, ref substs) => { - // FIXME(#27086). We do not accumulate from substs, since they - // don't represent reachable data. This means that, in - // practice, some of the lifetime parameters might not - // be in scope when the body runs, so long as there is - // no reachable data with that lifetime. For better or - // worse, this is consistent with fn types, however, - // which can also encapsulate data in this fashion - // (though it's somewhat harder, and typically - // requires virtual dispatch). - // - // Note that changing this (in a naive way, at least) - // causes regressions for what appears to be perfectly - // reasonable code like this: - // - // ``` - // fn foo<'a>(p: &Data<'a>) { - // bar(|q: &mut Parser| q.read_addr()) - // } - // fn bar(p: Box) { - // } - // ``` - // - // Note that `p` (and `'a`) are not used in the - // closure at all, but to meet the requirement that - // the closure type `C: 'static` (so it can be coerced - // to the object type), we get the requirement that - // `'a: 'static` since `'a` appears in the closure - // type `C`. - // - // A smarter fix might "prune" unused `func_substs` -- - // this would avoid breaking simple examples like - // this, but would still break others (which might - // indeed be invalid, depending on your POV). Pruning - // would be a subtle process, since we have to see - // what func/type parameters are used and unused, - // taking into consideration UFCS and so forth. - - for &upvar_ty in &substs.upvar_tys { - compute_components(infcx, upvar_ty, out); - } - } - - // OutlivesTypeParameterEnv -- the actual checking that `X:'a` - // is implied by the environment is done in regionck. - ty::TyParam(p) => { - out.push(Component::Param(p)); - } - - // For projections, we prefer to generate an obligation like - // `>::Foo: 'a`, because this gives the - // regionck more ways to prove that it holds. However, - // regionck is not (at least currently) prepared to deal with - // higher-ranked regions that may appear in the - // trait-ref. Therefore, if we see any higher-ranke regions, - // we simply fallback to the most restrictive rule, which - // requires that `Pi: 'a` for all `i`. - ty::TyProjection(ref data) => { - if !data.has_escaping_regions() { - // best case: no escaping regions, so push the - // projection and skip the subtree (thus generating no - // constraints for Pi). This defers the choice between - // the rules OutlivesProjectionEnv, - // OutlivesProjectionTraitDef, and - // OutlivesProjectionComponents to regionck. - out.push(Component::Projection(*data)); - } else { - // fallback case: hard code - // OutlivesProjectionComponents. Continue walking - // through and constrain Pi. - let subcomponents = capture_components(infcx, ty); - out.push(Component::EscapingProjection(subcomponents)); - } - } - - // If we encounter an inference variable, try to resolve it - // and proceed with resolved version. If we cannot resolve it, - // then record the unresolved variable as a component. - ty::TyInfer(_) => { - let ty = infcx.resolve_type_vars_if_possible(&ty); - if let ty::TyInfer(infer_ty) = ty.sty { - out.push(Component::UnresolvedInferenceVariable(infer_ty)); - } else { - compute_components(infcx, ty, out); - } - } - - // Most types do not introduce any region binders, nor - // involve any other subtle cases, and so the WF relation - // simply constraints any regions referenced directly by - // the type and then visits the types that are lexically - // contained within. (The comments refer to relevant rules - // from RFC1214.) - ty::TyBool | // OutlivesScalar - ty::TyChar | // OutlivesScalar - ty::TyInt(..) | // OutlivesScalar - ty::TyUint(..) | // OutlivesScalar - ty::TyFloat(..) | // OutlivesScalar - ty::TyEnum(..) | // OutlivesNominalType - ty::TyStruct(..) | // OutlivesNominalType - ty::TyBox(..) | // OutlivesNominalType (ish) - ty::TyStr | // OutlivesScalar (ish) - ty::TyArray(..) | // ... - ty::TySlice(..) | // ... - ty::TyRawPtr(..) | // ... - ty::TyRef(..) | // OutlivesReference - ty::TyTuple(..) | // ... - ty::TyBareFn(..) | // OutlivesFunction (*) - ty::TyTrait(..) | // OutlivesObject, OutlivesFragment (*) - ty::TyError => { - // (*) Bare functions and traits are both binders. In the - // RFC, this means we would add the bound regions to the - // "bound regions list". In our representation, no such - // list is maintained explicitly, because bound regions - // themselves can be readily identified. - - push_region_constraints(out, ty.regions()); - for subty in ty.walk_shallow() { - compute_components(infcx, subty, out); - } - } - } -} - -fn capture_components<'a,'tcx>(infcx: &InferCtxt<'a,'tcx>, - ty: Ty<'tcx>) - -> Vec> { - let mut temp = vec![]; - push_region_constraints(&mut temp, ty.regions()); - for subty in ty.walk_shallow() { - compute_components(infcx, subty, &mut temp); - } - temp -} - -fn push_region_constraints<'tcx>(out: &mut Vec>, regions: Vec) { - for r in regions { - if !r.is_bound() { - out.push(Component::Region(r)); - } - } -} diff --git a/src/librustc/middle/ty/relate.rs b/src/librustc/middle/ty/relate.rs deleted file mode 100644 index 46bc13bd5988b..0000000000000 --- a/src/librustc/middle/ty/relate.rs +++ /dev/null @@ -1,684 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Generalized type relating mechanism. A type relation R relates a -//! pair of values (A, B). A and B are usually types or regions but -//! can be other things. Examples of type relations are subtyping, -//! type equality, etc. - -use middle::def_id::DefId; -use middle::subst::{ErasedRegions, NonerasedRegions, ParamSpace, Substs}; -use middle::ty::{self, Ty, TypeFoldable}; -use middle::ty::error::{ExpectedFound, TypeError}; -use std::rc::Rc; -use syntax::abi; -use rustc_front::hir as ast; - -pub type RelateResult<'tcx, T> = Result>; - -#[derive(Clone, Debug)] -pub enum Cause { - ExistentialRegionBound, // relating an existential region bound -} - -pub trait TypeRelation<'a,'tcx> : Sized { - fn tcx(&self) -> &'a ty::ctxt<'tcx>; - - /// Returns a static string we can use for printouts. - fn tag(&self) -> &'static str; - - /// Returns true if the value `a` is the "expected" type in the - /// relation. Just affects error messages. - fn a_is_expected(&self) -> bool; - - fn with_cause(&mut self, _cause: Cause, f: F) -> R - where F: FnOnce(&mut Self) -> R - { - f(self) - } - - /// Generic relation routine suitable for most anything. - fn relate>(&mut self, a: &T, b: &T) -> RelateResult<'tcx, T> { - Relate::relate(self, a, b) - } - - /// Relete elements of two slices pairwise. - fn relate_zip>(&mut self, a: &[T], b: &[T]) -> RelateResult<'tcx, Vec> { - assert_eq!(a.len(), b.len()); - a.iter().zip(b).map(|(a, b)| self.relate(a, b)).collect() - } - - /// Switch variance for the purpose of relating `a` and `b`. - fn relate_with_variance>(&mut self, - variance: ty::Variance, - a: &T, - b: &T) - -> RelateResult<'tcx, T>; - - // Overrideable relations. You shouldn't typically call these - // directly, instead call `relate()`, which in turn calls - // these. This is both more uniform but also allows us to add - // additional hooks for other types in the future if needed - // without making older code, which called `relate`, obsolete. - - fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) - -> RelateResult<'tcx, Ty<'tcx>>; - - fn regions(&mut self, a: ty::Region, b: ty::Region) - -> RelateResult<'tcx, ty::Region>; - - fn binders(&mut self, a: &ty::Binder, b: &ty::Binder) - -> RelateResult<'tcx, ty::Binder> - where T: Relate<'a,'tcx>; -} - -pub trait Relate<'a,'tcx>: TypeFoldable<'tcx> { - fn relate>(relation: &mut R, - a: &Self, - b: &Self) - -> RelateResult<'tcx, Self>; -} - -/////////////////////////////////////////////////////////////////////////// -// Relate impls - -impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::TypeAndMut<'tcx> { - fn relate(relation: &mut R, - a: &ty::TypeAndMut<'tcx>, - b: &ty::TypeAndMut<'tcx>) - -> RelateResult<'tcx, ty::TypeAndMut<'tcx>> - where R: TypeRelation<'a,'tcx> - { - debug!("{}.mts({:?}, {:?})", - relation.tag(), - a, - b); - if a.mutbl != b.mutbl { - Err(TypeError::Mutability) - } else { - let mutbl = a.mutbl; - let variance = match mutbl { - ast::MutImmutable => ty::Covariant, - ast::MutMutable => ty::Invariant, - }; - let ty = try!(relation.relate_with_variance(variance, &a.ty, &b.ty)); - Ok(ty::TypeAndMut {ty: ty, mutbl: mutbl}) - } - } -} - -// substitutions are not themselves relatable without more context, -// but they is an important subroutine for things that ARE relatable, -// like traits etc. -fn relate_item_substs<'a,'tcx:'a,R>(relation: &mut R, - item_def_id: DefId, - a_subst: &Substs<'tcx>, - b_subst: &Substs<'tcx>) - -> RelateResult<'tcx, Substs<'tcx>> - where R: TypeRelation<'a,'tcx> -{ - debug!("substs: item_def_id={:?} a_subst={:?} b_subst={:?}", - item_def_id, - a_subst, - b_subst); - - let variances; - let opt_variances = if relation.tcx().variance_computed.get() { - variances = relation.tcx().item_variances(item_def_id); - Some(&*variances) - } else { - None - }; - relate_substs(relation, opt_variances, a_subst, b_subst) -} - -fn relate_substs<'a,'tcx:'a,R>(relation: &mut R, - variances: Option<&ty::ItemVariances>, - a_subst: &Substs<'tcx>, - b_subst: &Substs<'tcx>) - -> RelateResult<'tcx, Substs<'tcx>> - where R: TypeRelation<'a,'tcx> -{ - let mut substs = Substs::empty(); - - for &space in &ParamSpace::all() { - let a_tps = a_subst.types.get_slice(space); - let b_tps = b_subst.types.get_slice(space); - let t_variances = variances.map(|v| v.types.get_slice(space)); - let tps = try!(relate_type_params(relation, t_variances, a_tps, b_tps)); - substs.types.replace(space, tps); - } - - match (&a_subst.regions, &b_subst.regions) { - (&ErasedRegions, _) | (_, &ErasedRegions) => { - substs.regions = ErasedRegions; - } - - (&NonerasedRegions(ref a), &NonerasedRegions(ref b)) => { - for &space in &ParamSpace::all() { - let a_regions = a.get_slice(space); - let b_regions = b.get_slice(space); - let r_variances = variances.map(|v| v.regions.get_slice(space)); - let regions = try!(relate_region_params(relation, - r_variances, - a_regions, - b_regions)); - substs.mut_regions().replace(space, regions); - } - } - } - - Ok(substs) -} - -fn relate_type_params<'a,'tcx:'a,R>(relation: &mut R, - variances: Option<&[ty::Variance]>, - a_tys: &[Ty<'tcx>], - b_tys: &[Ty<'tcx>]) - -> RelateResult<'tcx, Vec>> - where R: TypeRelation<'a,'tcx> -{ - if a_tys.len() != b_tys.len() { - return Err(TypeError::TyParamSize(expected_found(relation, - &a_tys.len(), - &b_tys.len()))); - } - - (0 .. a_tys.len()) - .map(|i| { - let a_ty = a_tys[i]; - let b_ty = b_tys[i]; - let v = variances.map_or(ty::Invariant, |v| v[i]); - relation.relate_with_variance(v, &a_ty, &b_ty) - }) - .collect() -} - -fn relate_region_params<'a,'tcx:'a,R>(relation: &mut R, - variances: Option<&[ty::Variance]>, - a_rs: &[ty::Region], - b_rs: &[ty::Region]) - -> RelateResult<'tcx, Vec> - where R: TypeRelation<'a,'tcx> -{ - let num_region_params = a_rs.len(); - - debug!("relate_region_params(a_rs={:?}, \ - b_rs={:?}, variances={:?})", - a_rs, - b_rs, - variances); - - assert_eq!(num_region_params, - variances.map_or(num_region_params, - |v| v.len())); - - assert_eq!(num_region_params, b_rs.len()); - - (0..a_rs.len()) - .map(|i| { - let a_r = a_rs[i]; - let b_r = b_rs[i]; - let variance = variances.map_or(ty::Invariant, |v| v[i]); - relation.relate_with_variance(variance, &a_r, &b_r) - }) - .collect() -} - -impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::BareFnTy<'tcx> { - fn relate(relation: &mut R, - a: &ty::BareFnTy<'tcx>, - b: &ty::BareFnTy<'tcx>) - -> RelateResult<'tcx, ty::BareFnTy<'tcx>> - where R: TypeRelation<'a,'tcx> - { - let unsafety = try!(relation.relate(&a.unsafety, &b.unsafety)); - let abi = try!(relation.relate(&a.abi, &b.abi)); - let sig = try!(relation.relate(&a.sig, &b.sig)); - Ok(ty::BareFnTy {unsafety: unsafety, - abi: abi, - sig: sig}) - } -} - -impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::FnSig<'tcx> { - fn relate(relation: &mut R, - a: &ty::FnSig<'tcx>, - b: &ty::FnSig<'tcx>) - -> RelateResult<'tcx, ty::FnSig<'tcx>> - where R: TypeRelation<'a,'tcx> - { - if a.variadic != b.variadic { - return Err(TypeError::VariadicMismatch( - expected_found(relation, &a.variadic, &b.variadic))); - } - - let inputs = try!(relate_arg_vecs(relation, - &a.inputs, - &b.inputs)); - - let output = try!(match (a.output, b.output) { - (ty::FnConverging(a_ty), ty::FnConverging(b_ty)) => - Ok(ty::FnConverging(try!(relation.relate(&a_ty, &b_ty)))), - (ty::FnDiverging, ty::FnDiverging) => - Ok(ty::FnDiverging), - (a, b) => - Err(TypeError::ConvergenceMismatch( - expected_found(relation, &(a != ty::FnDiverging), &(b != ty::FnDiverging)))), - }); - - return Ok(ty::FnSig {inputs: inputs, - output: output, - variadic: a.variadic}); - } -} - -fn relate_arg_vecs<'a,'tcx:'a,R>(relation: &mut R, - a_args: &[Ty<'tcx>], - b_args: &[Ty<'tcx>]) - -> RelateResult<'tcx, Vec>> - where R: TypeRelation<'a,'tcx> -{ - if a_args.len() != b_args.len() { - return Err(TypeError::ArgCount); - } - - a_args.iter().zip(b_args) - .map(|(a, b)| relation.relate_with_variance(ty::Contravariant, a, b)) - .collect() -} - -impl<'a,'tcx:'a> Relate<'a,'tcx> for ast::Unsafety { - fn relate(relation: &mut R, - a: &ast::Unsafety, - b: &ast::Unsafety) - -> RelateResult<'tcx, ast::Unsafety> - where R: TypeRelation<'a,'tcx> - { - if a != b { - Err(TypeError::UnsafetyMismatch(expected_found(relation, a, b))) - } else { - Ok(*a) - } - } -} - -impl<'a,'tcx:'a> Relate<'a,'tcx> for abi::Abi { - fn relate(relation: &mut R, - a: &abi::Abi, - b: &abi::Abi) - -> RelateResult<'tcx, abi::Abi> - where R: TypeRelation<'a,'tcx> - { - if a == b { - Ok(*a) - } else { - Err(TypeError::AbiMismatch(expected_found(relation, a, b))) - } - } -} - -impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::ProjectionTy<'tcx> { - fn relate(relation: &mut R, - a: &ty::ProjectionTy<'tcx>, - b: &ty::ProjectionTy<'tcx>) - -> RelateResult<'tcx, ty::ProjectionTy<'tcx>> - where R: TypeRelation<'a,'tcx> - { - if a.item_name != b.item_name { - Err(TypeError::ProjectionNameMismatched( - expected_found(relation, &a.item_name, &b.item_name))) - } else { - let trait_ref = try!(relation.relate(&a.trait_ref, &b.trait_ref)); - Ok(ty::ProjectionTy { trait_ref: trait_ref, item_name: a.item_name }) - } - } -} - -impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::ProjectionPredicate<'tcx> { - fn relate(relation: &mut R, - a: &ty::ProjectionPredicate<'tcx>, - b: &ty::ProjectionPredicate<'tcx>) - -> RelateResult<'tcx, ty::ProjectionPredicate<'tcx>> - where R: TypeRelation<'a,'tcx> - { - let projection_ty = try!(relation.relate(&a.projection_ty, &b.projection_ty)); - let ty = try!(relation.relate(&a.ty, &b.ty)); - Ok(ty::ProjectionPredicate { projection_ty: projection_ty, ty: ty }) - } -} - -impl<'a,'tcx:'a> Relate<'a,'tcx> for Vec> { - fn relate(relation: &mut R, - a: &Vec>, - b: &Vec>) - -> RelateResult<'tcx, Vec>> - where R: TypeRelation<'a,'tcx> - { - // To be compatible, `a` and `b` must be for precisely the - // same set of traits and item names. We always require that - // projection bounds lists are sorted by trait-def-id and item-name, - // so we can just iterate through the lists pairwise, so long as they are the - // same length. - if a.len() != b.len() { - Err(TypeError::ProjectionBoundsLength(expected_found(relation, &a.len(), &b.len()))) - } else { - a.iter().zip(b) - .map(|(a, b)| relation.relate(a, b)) - .collect() - } - } -} - -impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::ExistentialBounds<'tcx> { - fn relate(relation: &mut R, - a: &ty::ExistentialBounds<'tcx>, - b: &ty::ExistentialBounds<'tcx>) - -> RelateResult<'tcx, ty::ExistentialBounds<'tcx>> - where R: TypeRelation<'a,'tcx> - { - let r = - try!(relation.with_cause( - Cause::ExistentialRegionBound, - |relation| relation.relate_with_variance(ty::Contravariant, - &a.region_bound, - &b.region_bound))); - let nb = try!(relation.relate(&a.builtin_bounds, &b.builtin_bounds)); - let pb = try!(relation.relate(&a.projection_bounds, &b.projection_bounds)); - Ok(ty::ExistentialBounds { region_bound: r, - builtin_bounds: nb, - projection_bounds: pb }) - } -} - -impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::BuiltinBounds { - fn relate(relation: &mut R, - a: &ty::BuiltinBounds, - b: &ty::BuiltinBounds) - -> RelateResult<'tcx, ty::BuiltinBounds> - where R: TypeRelation<'a,'tcx> - { - // Two sets of builtin bounds are only relatable if they are - // precisely the same (but see the coercion code). - if a != b { - Err(TypeError::BuiltinBoundsMismatch(expected_found(relation, a, b))) - } else { - Ok(*a) - } - } -} - -impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::TraitRef<'tcx> { - fn relate(relation: &mut R, - a: &ty::TraitRef<'tcx>, - b: &ty::TraitRef<'tcx>) - -> RelateResult<'tcx, ty::TraitRef<'tcx>> - where R: TypeRelation<'a,'tcx> - { - // Different traits cannot be related - if a.def_id != b.def_id { - Err(TypeError::Traits(expected_found(relation, &a.def_id, &b.def_id))) - } else { - let substs = try!(relate_item_substs(relation, a.def_id, a.substs, b.substs)); - Ok(ty::TraitRef { def_id: a.def_id, substs: relation.tcx().mk_substs(substs) }) - } - } -} - -impl<'a,'tcx:'a> Relate<'a,'tcx> for Ty<'tcx> { - fn relate(relation: &mut R, - a: &Ty<'tcx>, - b: &Ty<'tcx>) - -> RelateResult<'tcx, Ty<'tcx>> - where R: TypeRelation<'a,'tcx> - { - relation.tys(a, b) - } -} - -/// The main "type relation" routine. Note that this does not handle -/// inference artifacts, so you should filter those out before calling -/// it. -pub fn super_relate_tys<'a,'tcx:'a,R>(relation: &mut R, - a: Ty<'tcx>, - b: Ty<'tcx>) - -> RelateResult<'tcx, Ty<'tcx>> - where R: TypeRelation<'a,'tcx> -{ - let tcx = relation.tcx(); - let a_sty = &a.sty; - let b_sty = &b.sty; - debug!("super_tys: a_sty={:?} b_sty={:?}", a_sty, b_sty); - match (a_sty, b_sty) { - (&ty::TyInfer(_), _) | - (_, &ty::TyInfer(_)) => - { - // The caller should handle these cases! - tcx.sess.bug("var types encountered in super_relate_tys") - } - - (&ty::TyError, _) | (_, &ty::TyError) => - { - Ok(tcx.types.err) - } - - (&ty::TyChar, _) | - (&ty::TyBool, _) | - (&ty::TyInt(_), _) | - (&ty::TyUint(_), _) | - (&ty::TyFloat(_), _) | - (&ty::TyStr, _) - if a == b => - { - Ok(a) - } - - (&ty::TyParam(ref a_p), &ty::TyParam(ref b_p)) - if a_p.idx == b_p.idx && a_p.space == b_p.space => - { - Ok(a) - } - - (&ty::TyEnum(a_def, a_substs), &ty::TyEnum(b_def, b_substs)) - if a_def == b_def => - { - let substs = try!(relate_item_substs(relation, a_def.did, a_substs, b_substs)); - Ok(tcx.mk_enum(a_def, tcx.mk_substs(substs))) - } - - (&ty::TyTrait(ref a_), &ty::TyTrait(ref b_)) => - { - let principal = try!(relation.relate(&a_.principal, &b_.principal)); - let bounds = try!(relation.relate(&a_.bounds, &b_.bounds)); - Ok(tcx.mk_trait(principal, bounds)) - } - - (&ty::TyStruct(a_def, a_substs), &ty::TyStruct(b_def, b_substs)) - if a_def == b_def => - { - let substs = try!(relate_item_substs(relation, a_def.did, a_substs, b_substs)); - Ok(tcx.mk_struct(a_def, tcx.mk_substs(substs))) - } - - (&ty::TyClosure(a_id, ref a_substs), - &ty::TyClosure(b_id, ref b_substs)) - if a_id == b_id => - { - // All TyClosure types with the same id represent - // the (anonymous) type of the same closure expression. So - // all of their regions should be equated. - let substs = try!(relation.relate(a_substs, b_substs)); - Ok(tcx.mk_closure_from_closure_substs(a_id, substs)) - } - - (&ty::TyBox(a_inner), &ty::TyBox(b_inner)) => - { - let typ = try!(relation.relate(&a_inner, &b_inner)); - Ok(tcx.mk_box(typ)) - } - - (&ty::TyRawPtr(ref a_mt), &ty::TyRawPtr(ref b_mt)) => - { - let mt = try!(relation.relate(a_mt, b_mt)); - Ok(tcx.mk_ptr(mt)) - } - - (&ty::TyRef(a_r, ref a_mt), &ty::TyRef(b_r, ref b_mt)) => - { - let r = try!(relation.relate_with_variance(ty::Contravariant, a_r, b_r)); - let mt = try!(relation.relate(a_mt, b_mt)); - Ok(tcx.mk_ref(tcx.mk_region(r), mt)) - } - - (&ty::TyArray(a_t, sz_a), &ty::TyArray(b_t, sz_b)) => - { - let t = try!(relation.relate(&a_t, &b_t)); - if sz_a == sz_b { - Ok(tcx.mk_array(t, sz_a)) - } else { - Err(TypeError::FixedArraySize(expected_found(relation, &sz_a, &sz_b))) - } - } - - (&ty::TySlice(a_t), &ty::TySlice(b_t)) => - { - let t = try!(relation.relate(&a_t, &b_t)); - Ok(tcx.mk_slice(t)) - } - - (&ty::TyTuple(ref as_), &ty::TyTuple(ref bs)) => - { - if as_.len() == bs.len() { - let ts = try!(as_.iter().zip(bs) - .map(|(a, b)| relation.relate(a, b)) - .collect::>()); - Ok(tcx.mk_tup(ts)) - } else if !(as_.is_empty() || bs.is_empty()) { - Err(TypeError::TupleSize( - expected_found(relation, &as_.len(), &bs.len()))) - } else { - Err(TypeError::Sorts(expected_found(relation, &a, &b))) - } - } - - (&ty::TyBareFn(a_opt_def_id, a_fty), &ty::TyBareFn(b_opt_def_id, b_fty)) - if a_opt_def_id == b_opt_def_id => - { - let fty = try!(relation.relate(a_fty, b_fty)); - Ok(tcx.mk_fn(a_opt_def_id, tcx.mk_bare_fn(fty))) - } - - (&ty::TyProjection(ref a_data), &ty::TyProjection(ref b_data)) => - { - let projection_ty = try!(relation.relate(a_data, b_data)); - Ok(tcx.mk_projection(projection_ty.trait_ref, projection_ty.item_name)) - } - - _ => - { - Err(TypeError::Sorts(expected_found(relation, &a, &b))) - } - } -} - -impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::ClosureSubsts<'tcx> { - fn relate(relation: &mut R, - a: &ty::ClosureSubsts<'tcx>, - b: &ty::ClosureSubsts<'tcx>) - -> RelateResult<'tcx, ty::ClosureSubsts<'tcx>> - where R: TypeRelation<'a,'tcx> - { - let func_substs = try!(relate_substs(relation, None, a.func_substs, b.func_substs)); - let upvar_tys = try!(relation.relate_zip(&a.upvar_tys, &b.upvar_tys)); - Ok(ty::ClosureSubsts { func_substs: relation.tcx().mk_substs(func_substs), - upvar_tys: upvar_tys }) - } -} - -impl<'a,'tcx:'a> Relate<'a,'tcx> for ty::Region { - fn relate(relation: &mut R, - a: &ty::Region, - b: &ty::Region) - -> RelateResult<'tcx, ty::Region> - where R: TypeRelation<'a,'tcx> - { - relation.regions(*a, *b) - } -} - -impl<'a,'tcx:'a,T> Relate<'a,'tcx> for ty::Binder - where T: Relate<'a,'tcx> -{ - fn relate(relation: &mut R, - a: &ty::Binder, - b: &ty::Binder) - -> RelateResult<'tcx, ty::Binder> - where R: TypeRelation<'a,'tcx> - { - relation.binders(a, b) - } -} - -impl<'a,'tcx:'a,T> Relate<'a,'tcx> for Rc - where T: Relate<'a,'tcx> -{ - fn relate(relation: &mut R, - a: &Rc, - b: &Rc) - -> RelateResult<'tcx, Rc> - where R: TypeRelation<'a,'tcx> - { - let a: &T = a; - let b: &T = b; - Ok(Rc::new(try!(relation.relate(a, b)))) - } -} - -impl<'a,'tcx:'a,T> Relate<'a,'tcx> for Box - where T: Relate<'a,'tcx> -{ - fn relate(relation: &mut R, - a: &Box, - b: &Box) - -> RelateResult<'tcx, Box> - where R: TypeRelation<'a,'tcx> - { - let a: &T = a; - let b: &T = b; - Ok(Box::new(try!(relation.relate(a, b)))) - } -} - -/////////////////////////////////////////////////////////////////////////// -// Error handling - -pub fn expected_found<'a,'tcx:'a,R,T>(relation: &mut R, - a: &T, - b: &T) - -> ExpectedFound - where R: TypeRelation<'a,'tcx>, T: Clone -{ - expected_found_bool(relation.a_is_expected(), a, b) -} - -pub fn expected_found_bool(a_is_expected: bool, - a: &T, - b: &T) - -> ExpectedFound - where T: Clone -{ - let a = a.clone(); - let b = b.clone(); - if a_is_expected { - ExpectedFound {expected: a, found: b} - } else { - ExpectedFound {expected: b, found: a} - } -} diff --git a/src/librustc/middle/ty/structural_impls.rs b/src/librustc/middle/ty/structural_impls.rs deleted file mode 100644 index 01b2bd36b4f07..0000000000000 --- a/src/librustc/middle/ty/structural_impls.rs +++ /dev/null @@ -1,794 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use middle::subst::{self, VecPerParamSpace}; -use middle::traits; -use middle::ty::{self, Lift, TraitRef, Ty}; -use middle::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; - -use std::rc::Rc; -use syntax::abi; -use syntax::ptr::P; - -use rustc_front::hir; - -/////////////////////////////////////////////////////////////////////////// -// Lift implementations - -impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>> Lift<'tcx> for (A, B) { - type Lifted = (A::Lifted, B::Lifted); - fn lift_to_tcx(&self, tcx: &ty::ctxt<'tcx>) -> Option { - tcx.lift(&self.0).and_then(|a| tcx.lift(&self.1).map(|b| (a, b))) - } -} - -impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for [T] { - type Lifted = Vec; - fn lift_to_tcx(&self, tcx: &ty::ctxt<'tcx>) -> Option { - // type annotation needed to inform `projection_must_outlive` - let mut result : Vec<>::Lifted> - = Vec::with_capacity(self.len()); - for x in self { - if let Some(value) = tcx.lift(x) { - result.push(value); - } else { - return None; - } - } - Some(result) - } -} - -impl<'tcx> Lift<'tcx> for ty::Region { - type Lifted = Self; - fn lift_to_tcx(&self, _: &ty::ctxt<'tcx>) -> Option { - Some(*self) - } -} - -impl<'a, 'tcx> Lift<'tcx> for TraitRef<'a> { - type Lifted = TraitRef<'tcx>; - fn lift_to_tcx(&self, tcx: &ty::ctxt<'tcx>) -> Option> { - tcx.lift(&self.substs).map(|substs| TraitRef { - def_id: self.def_id, - substs: substs - }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::TraitPredicate<'a> { - type Lifted = ty::TraitPredicate<'tcx>; - fn lift_to_tcx(&self, tcx: &ty::ctxt<'tcx>) -> Option> { - tcx.lift(&self.trait_ref).map(|trait_ref| ty::TraitPredicate { - trait_ref: trait_ref - }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::EquatePredicate<'a> { - type Lifted = ty::EquatePredicate<'tcx>; - fn lift_to_tcx(&self, tcx: &ty::ctxt<'tcx>) -> Option> { - tcx.lift(&(self.0, self.1)).map(|(a, b)| ty::EquatePredicate(a, b)) - } -} - -impl<'tcx, A: Copy+Lift<'tcx>, B: Copy+Lift<'tcx>> Lift<'tcx> for ty::OutlivesPredicate { - type Lifted = ty::OutlivesPredicate; - fn lift_to_tcx(&self, tcx: &ty::ctxt<'tcx>) -> Option { - tcx.lift(&(self.0, self.1)).map(|(a, b)| ty::OutlivesPredicate(a, b)) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionPredicate<'a> { - type Lifted = ty::ProjectionPredicate<'tcx>; - fn lift_to_tcx(&self, tcx: &ty::ctxt<'tcx>) -> Option> { - tcx.lift(&(self.projection_ty.trait_ref, self.ty)).map(|(trait_ref, ty)| { - ty::ProjectionPredicate { - projection_ty: ty::ProjectionTy { - trait_ref: trait_ref, - item_name: self.projection_ty.item_name - }, - ty: ty - } - }) - } -} - -impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::Binder { - type Lifted = ty::Binder; - fn lift_to_tcx(&self, tcx: &ty::ctxt<'tcx>) -> Option { - tcx.lift(&self.0).map(|x| ty::Binder(x)) - } -} - -/////////////////////////////////////////////////////////////////////////// -// TypeFoldable implementations. -// -// Ideally, each type should invoke `folder.fold_foo(self)` and -// nothing else. In some cases, though, we haven't gotten around to -// adding methods on the `folder` yet, and thus the folding is -// hard-coded here. This is less-flexible, because folders cannot -// override the behavior, but there are a lot of random types and one -// can easily refactor the folding into the TypeFolder trait as -// needed. - -macro_rules! CopyImpls { - ($($ty:ty),+) => { - $( - impl<'tcx> TypeFoldable<'tcx> for $ty { - fn super_fold_with>(&self, _: &mut F) -> $ty { - *self - } - - fn super_visit_with>(&self, _: &mut F) -> bool { - false - } - } - )+ - } -} - -CopyImpls! { (), hir::Unsafety, abi::Abi } - -impl<'tcx, T:TypeFoldable<'tcx>, U:TypeFoldable<'tcx>> TypeFoldable<'tcx> for (T, U) { - fn super_fold_with>(&self, folder: &mut F) -> (T, U) { - (self.0.fold_with(folder), self.1.fold_with(folder)) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.0.visit_with(visitor) || self.1.visit_with(visitor) - } -} - -impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Option { - fn super_fold_with>(&self, folder: &mut F) -> Self { - self.as_ref().map(|t| t.fold_with(folder)) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.iter().any(|t| t.visit_with(visitor)) - } -} - -impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Rc { - fn super_fold_with>(&self, folder: &mut F) -> Self { - Rc::new((**self).fold_with(folder)) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - (**self).visit_with(visitor) - } -} - -impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box { - fn super_fold_with>(&self, folder: &mut F) -> Self { - let content: T = (**self).fold_with(folder); - box content - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - (**self).visit_with(visitor) - } -} - -impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Vec { - fn super_fold_with>(&self, folder: &mut F) -> Self { - self.iter().map(|t| t.fold_with(folder)).collect() - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.iter().any(|t| t.visit_with(visitor)) - } -} - -impl<'tcx, T:TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::Binder { - fn super_fold_with>(&self, folder: &mut F) -> Self { - folder.enter_region_binder(); - let result = ty::Binder(self.0.fold_with(folder)); - folder.exit_region_binder(); - result - } - - fn fold_with>(&self, folder: &mut F) -> Self { - folder.fold_binder(self) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - visitor.enter_region_binder(); - if self.0.visit_with(visitor) { return true } - visitor.exit_region_binder(); - false - } -} - -impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for P<[T]> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - self.iter().map(|t| t.fold_with(folder)).collect() - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.iter().any(|t| t.visit_with(visitor)) - } -} - -impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for VecPerParamSpace { - fn super_fold_with>(&self, folder: &mut F) -> Self { - - // Things in the Fn space take place under an additional level - // of region binding relative to the other spaces. This is - // because those entries are attached to a method, and methods - // always introduce a level of region binding. - - let result = self.map_enumerated(|(space, index, elem)| { - if space == subst::FnSpace && index == 0 { - // enter new level when/if we reach the first thing in fn space - folder.enter_region_binder(); - } - elem.fold_with(folder) - }); - if result.len(subst::FnSpace) > 0 { - // if there was anything in fn space, exit the region binding level - folder.exit_region_binder(); - } - result - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - let mut entered_region_binder = false; - let result = self.iter_enumerated().any(|(space, index, t)| { - if space == subst::FnSpace && index == 0 { - visitor.enter_region_binder(); - entered_region_binder = true; - } - t.visit_with(visitor) - }); - if entered_region_binder { - visitor.exit_region_binder(); - } - result - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::TraitTy<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::TraitTy { - principal: self.principal.fold_with(folder), - bounds: self.bounds.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.principal.visit_with(visitor) || self.bounds.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - let sty = match self.sty { - ty::TyBox(typ) => ty::TyBox(typ.fold_with(folder)), - ty::TyRawPtr(ref tm) => ty::TyRawPtr(tm.fold_with(folder)), - ty::TyArray(typ, sz) => ty::TyArray(typ.fold_with(folder), sz), - ty::TySlice(typ) => ty::TySlice(typ.fold_with(folder)), - ty::TyEnum(tid, ref substs) => { - let substs = substs.fold_with(folder); - ty::TyEnum(tid, folder.tcx().mk_substs(substs)) - } - ty::TyTrait(ref trait_ty) => ty::TyTrait(trait_ty.fold_with(folder)), - ty::TyTuple(ref ts) => ty::TyTuple(ts.fold_with(folder)), - ty::TyBareFn(opt_def_id, ref f) => { - let bfn = f.fold_with(folder); - ty::TyBareFn(opt_def_id, folder.tcx().mk_bare_fn(bfn)) - } - ty::TyRef(r, ref tm) => { - let r = r.fold_with(folder); - ty::TyRef(folder.tcx().mk_region(r), tm.fold_with(folder)) - } - ty::TyStruct(did, ref substs) => { - let substs = substs.fold_with(folder); - ty::TyStruct(did, folder.tcx().mk_substs(substs)) - } - ty::TyClosure(did, ref substs) => { - ty::TyClosure(did, substs.fold_with(folder)) - } - ty::TyProjection(ref data) => ty::TyProjection(data.fold_with(folder)), - ty::TyBool | ty::TyChar | ty::TyStr | ty::TyInt(_) | - ty::TyUint(_) | ty::TyFloat(_) | ty::TyError | ty::TyInfer(_) | - ty::TyParam(..) => self.sty.clone(), - }; - folder.tcx().mk_ty(sty) - } - - fn fold_with>(&self, folder: &mut F) -> Self { - folder.fold_ty(*self) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - match self.sty { - ty::TyBox(typ) => typ.visit_with(visitor), - ty::TyRawPtr(ref tm) => tm.visit_with(visitor), - ty::TyArray(typ, _sz) => typ.visit_with(visitor), - ty::TySlice(typ) => typ.visit_with(visitor), - ty::TyEnum(_tid, ref substs) => substs.visit_with(visitor), - ty::TyTrait(ref trait_ty) => trait_ty.visit_with(visitor), - ty::TyTuple(ref ts) => ts.visit_with(visitor), - ty::TyBareFn(_opt_def_id, ref f) => f.visit_with(visitor), - ty::TyRef(r, ref tm) => r.visit_with(visitor) || tm.visit_with(visitor), - ty::TyStruct(_did, ref substs) => substs.visit_with(visitor), - ty::TyClosure(_did, ref substs) => substs.visit_with(visitor), - ty::TyProjection(ref data) => data.visit_with(visitor), - ty::TyBool | ty::TyChar | ty::TyStr | ty::TyInt(_) | - ty::TyUint(_) | ty::TyFloat(_) | ty::TyError | ty::TyInfer(_) | - ty::TyParam(..) => false, - } - } - - fn visit_with>(&self, visitor: &mut V) -> bool { - visitor.visit_ty(self) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::BareFnTy<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::BareFnTy { sig: self.sig.fold_with(folder), - abi: self.abi, - unsafety: self.unsafety } - } - - fn fold_with>(&self, folder: &mut F) -> Self { - folder.fold_bare_fn_ty(self) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.sig.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::ClosureTy<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::ClosureTy { - sig: self.sig.fold_with(folder), - unsafety: self.unsafety, - abi: self.abi, - } - } - - fn fold_with>(&self, folder: &mut F) -> Self { - folder.fold_closure_ty(self) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.sig.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::TypeAndMut<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::TypeAndMut { ty: self.ty.fold_with(folder), mutbl: self.mutbl } - } - - fn fold_with>(&self, folder: &mut F) -> Self { - folder.fold_mt(self) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.ty.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::FnOutput<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - match *self { - ty::FnConverging(ref ty) => ty::FnConverging(ty.fold_with(folder)), - ty::FnDiverging => ty::FnDiverging - } - } - - fn fold_with>(&self, folder: &mut F) -> Self { - folder.fold_output(self) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - match *self { - ty::FnConverging(ref ty) => ty.visit_with(visitor), - ty::FnDiverging => false, - } - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::FnSig<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::FnSig { inputs: self.inputs.fold_with(folder), - output: self.output.fold_with(folder), - variadic: self.variadic } - } - - fn fold_with>(&self, folder: &mut F) -> Self { - folder.fold_fn_sig(self) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.inputs.visit_with(visitor) || self.output.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::TraitRef<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - let substs = self.substs.fold_with(folder); - ty::TraitRef { - def_id: self.def_id, - substs: folder.tcx().mk_substs(substs), - } - } - - fn fold_with>(&self, folder: &mut F) -> Self { - folder.fold_trait_ref(self) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.substs.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::Region { - fn super_fold_with>(&self, _folder: &mut F) -> Self { - *self - } - - fn fold_with>(&self, folder: &mut F) -> Self { - folder.fold_region(*self) - } - - fn super_visit_with>(&self, _visitor: &mut V) -> bool { - false - } - - fn visit_with>(&self, visitor: &mut V) -> bool { - visitor.visit_region(*self) - } -} - -impl<'tcx> TypeFoldable<'tcx> for subst::Substs<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - let regions = match self.regions { - subst::ErasedRegions => subst::ErasedRegions, - subst::NonerasedRegions(ref regions) => { - subst::NonerasedRegions(regions.fold_with(folder)) - } - }; - - subst::Substs { regions: regions, - types: self.types.fold_with(folder) } - } - - fn fold_with>(&self, folder: &mut F) -> Self { - folder.fold_substs(self) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.types.visit_with(visitor) || match self.regions { - subst::ErasedRegions => false, - subst::NonerasedRegions(ref regions) => regions.visit_with(visitor), - } - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::ClosureSubsts<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - let func_substs = self.func_substs.fold_with(folder); - ty::ClosureSubsts { - func_substs: folder.tcx().mk_substs(func_substs), - upvar_tys: self.upvar_tys.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.func_substs.visit_with(visitor) || self.upvar_tys.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::ItemSubsts<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::ItemSubsts { - substs: self.substs.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.substs.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::adjustment::AutoRef<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - match *self { - ty::adjustment::AutoPtr(r, m) => { - let r = r.fold_with(folder); - ty::adjustment::AutoPtr(folder.tcx().mk_region(r), m) - } - ty::adjustment::AutoUnsafe(m) => ty::adjustment::AutoUnsafe(m) - } - } - - fn fold_with>(&self, folder: &mut F) -> Self { - folder.fold_autoref(self) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - match *self { - ty::adjustment::AutoPtr(r, _m) => r.visit_with(visitor), - ty::adjustment::AutoUnsafe(_m) => false, - } - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::BuiltinBounds { - fn super_fold_with>(&self, _folder: &mut F) -> Self { - *self - } - - fn super_visit_with>(&self, _visitor: &mut V) -> bool { - false - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::ExistentialBounds<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::ExistentialBounds { - region_bound: self.region_bound.fold_with(folder), - builtin_bounds: self.builtin_bounds, - projection_bounds: self.projection_bounds.fold_with(folder), - } - } - - fn fold_with>(&self, folder: &mut F) -> Self { - folder.fold_existential_bounds(self) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.region_bound.visit_with(visitor) || self.projection_bounds.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::TypeParameterDef<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::TypeParameterDef { - name: self.name, - def_id: self.def_id, - space: self.space, - index: self.index, - default: self.default.fold_with(folder), - default_def_id: self.default_def_id, - object_lifetime_default: self.object_lifetime_default.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.default.visit_with(visitor) || - self.object_lifetime_default.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::ObjectLifetimeDefault { - fn super_fold_with>(&self, folder: &mut F) -> Self { - match *self { - ty::ObjectLifetimeDefault::Ambiguous => - ty::ObjectLifetimeDefault::Ambiguous, - - ty::ObjectLifetimeDefault::BaseDefault => - ty::ObjectLifetimeDefault::BaseDefault, - - ty::ObjectLifetimeDefault::Specific(r) => - ty::ObjectLifetimeDefault::Specific(r.fold_with(folder)), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - match *self { - ty::ObjectLifetimeDefault::Specific(r) => r.visit_with(visitor), - _ => false, - } - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::RegionParameterDef { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::RegionParameterDef { - name: self.name, - def_id: self.def_id, - space: self.space, - index: self.index, - bounds: self.bounds.fold_with(folder) - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.bounds.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::Generics<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::Generics { - types: self.types.fold_with(folder), - regions: self.regions.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.types.visit_with(visitor) || self.regions.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::GenericPredicates<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::GenericPredicates { - predicates: self.predicates.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.predicates.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::Predicate<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - match *self { - ty::Predicate::Trait(ref a) => - ty::Predicate::Trait(a.fold_with(folder)), - ty::Predicate::Equate(ref binder) => - ty::Predicate::Equate(binder.fold_with(folder)), - ty::Predicate::RegionOutlives(ref binder) => - ty::Predicate::RegionOutlives(binder.fold_with(folder)), - ty::Predicate::TypeOutlives(ref binder) => - ty::Predicate::TypeOutlives(binder.fold_with(folder)), - ty::Predicate::Projection(ref binder) => - ty::Predicate::Projection(binder.fold_with(folder)), - ty::Predicate::WellFormed(data) => - ty::Predicate::WellFormed(data.fold_with(folder)), - ty::Predicate::ObjectSafe(trait_def_id) => - ty::Predicate::ObjectSafe(trait_def_id), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - match *self { - ty::Predicate::Trait(ref a) => a.visit_with(visitor), - ty::Predicate::Equate(ref binder) => binder.visit_with(visitor), - ty::Predicate::RegionOutlives(ref binder) => binder.visit_with(visitor), - ty::Predicate::TypeOutlives(ref binder) => binder.visit_with(visitor), - ty::Predicate::Projection(ref binder) => binder.visit_with(visitor), - ty::Predicate::WellFormed(data) => data.visit_with(visitor), - ty::Predicate::ObjectSafe(_trait_def_id) => false, - } - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::ProjectionPredicate<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::ProjectionPredicate { - projection_ty: self.projection_ty.fold_with(folder), - ty: self.ty.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.projection_ty.visit_with(visitor) || self.ty.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::ProjectionTy<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::ProjectionTy { - trait_ref: self.trait_ref.fold_with(folder), - item_name: self.item_name, - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.trait_ref.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::InstantiatedPredicates<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::InstantiatedPredicates { - predicates: self.predicates.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.predicates.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::EquatePredicate<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::EquatePredicate(self.0.fold_with(folder), - self.1.fold_with(folder)) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.0.visit_with(visitor) || self.1.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::TraitPredicate<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::TraitPredicate { - trait_ref: self.trait_ref.fold_with(folder) - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.trait_ref.visit_with(visitor) - } -} - -impl<'tcx,T,U> TypeFoldable<'tcx> for ty::OutlivesPredicate - where T : TypeFoldable<'tcx>, - U : TypeFoldable<'tcx>, -{ - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::OutlivesPredicate(self.0.fold_with(folder), - self.1.fold_with(folder)) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.0.visit_with(visitor) || self.1.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::ClosureUpvar<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::ClosureUpvar { - def: self.def, - span: self.span, - ty: self.ty.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.ty.visit_with(visitor) - } -} - -impl<'a, 'tcx> TypeFoldable<'tcx> for ty::ParameterEnvironment<'a, 'tcx> where 'tcx: 'a { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::ParameterEnvironment { - tcx: self.tcx, - free_substs: self.free_substs.fold_with(folder), - implicit_region_bound: self.implicit_region_bound.fold_with(folder), - caller_bounds: self.caller_bounds.fold_with(folder), - selection_cache: traits::SelectionCache::new(), - evaluation_cache: traits::EvaluationCache::new(), - free_id_outlive: self.free_id_outlive, - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.free_substs.visit_with(visitor) || - self.implicit_region_bound.visit_with(visitor) || - self.caller_bounds.visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::TypeScheme<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - ty::TypeScheme { - generics: self.generics.fold_with(folder), - ty: self.ty.fold_with(folder), - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.generics.visit_with(visitor) || self.ty.visit_with(visitor) - } -} diff --git a/src/librustc/middle/ty/sty.rs b/src/librustc/middle/ty/sty.rs deleted file mode 100644 index 6d40d377b7852..0000000000000 --- a/src/librustc/middle/ty/sty.rs +++ /dev/null @@ -1,1246 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! This module contains TypeVariants and its major components - -use middle::cstore; -use middle::def_id::DefId; -use middle::region; -use middle::subst::{self, Substs}; -use middle::traits; -use middle::ty::{self, AdtDef, ToPredicate, TypeFlags, Ty, TyS, TypeFoldable}; -use util::common::ErrorReported; - -use collections::enum_set::{self, EnumSet, CLike}; -use std::fmt; -use std::ops; -use std::mem; -use syntax::abi; -use syntax::ast::{self, Name}; -use syntax::parse::token::special_idents; - -use serialize::{Decodable, Decoder}; - -use rustc_front::hir; - -use self::FnOutput::*; -use self::InferTy::*; -use self::TypeVariants::*; - -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub struct TypeAndMut<'tcx> { - pub ty: Ty<'tcx>, - pub mutbl: hir::Mutability, -} - -#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, - RustcEncodable, RustcDecodable, Copy)] -/// A "free" region `fr` can be interpreted as "some region -/// at least as big as the scope `fr.scope`". -pub struct FreeRegion { - pub scope: region::CodeExtent, - pub bound_region: BoundRegion -} - -#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, - RustcEncodable, RustcDecodable, Copy)] -pub enum BoundRegion { - /// An anonymous region parameter for a given fn (&T) - BrAnon(u32), - - /// Named region parameters for functions (a in &'a T) - /// - /// The def-id is needed to distinguish free regions in - /// the event of shadowing. - BrNamed(DefId, Name), - - /// Fresh bound identifiers created during GLB computations. - BrFresh(u32), - - // Anonymous region for the implicit env pointer parameter - // to a closure - BrEnv -} - -// NB: If you change this, you'll probably want to change the corresponding -// AST structure in libsyntax/ast.rs as well. -#[derive(Clone, PartialEq, Eq, Hash, Debug)] -pub enum TypeVariants<'tcx> { - /// The primitive boolean type. Written as `bool`. - TyBool, - - /// The primitive character type; holds a Unicode scalar value - /// (a non-surrogate code point). Written as `char`. - TyChar, - - /// A primitive signed integer type. For example, `i32`. - TyInt(ast::IntTy), - - /// A primitive unsigned integer type. For example, `u32`. - TyUint(ast::UintTy), - - /// A primitive floating-point type. For example, `f64`. - TyFloat(ast::FloatTy), - - /// An enumerated type, defined with `enum`. - /// - /// Substs here, possibly against intuition, *may* contain `TyParam`s. - /// That is, even after substitution it is possible that there are type - /// variables. This happens when the `TyEnum` corresponds to an enum - /// definition and not a concrete use of it. To get the correct `TyEnum` - /// from the tcx, use the `NodeId` from the `ast::Ty` and look it up in - /// the `ast_ty_to_ty_cache`. This is probably true for `TyStruct` as - /// well. - TyEnum(AdtDef<'tcx>, &'tcx Substs<'tcx>), - - /// A structure type, defined with `struct`. - /// - /// See warning about substitutions for enumerated types. - TyStruct(AdtDef<'tcx>, &'tcx Substs<'tcx>), - - /// `Box`; this is nominally a struct in the documentation, but is - /// special-cased internally. For example, it is possible to implicitly - /// move the contents of a box out of that box, and methods of any type - /// can have type `Box`. - TyBox(Ty<'tcx>), - - /// The pointee of a string slice. Written as `str`. - TyStr, - - /// An array with the given length. Written as `[T; n]`. - TyArray(Ty<'tcx>, usize), - - /// The pointee of an array slice. Written as `[T]`. - TySlice(Ty<'tcx>), - - /// A raw pointer. Written as `*mut T` or `*const T` - TyRawPtr(TypeAndMut<'tcx>), - - /// A reference; a pointer with an associated lifetime. Written as - /// `&a mut T` or `&'a T`. - TyRef(&'tcx Region, TypeAndMut<'tcx>), - - /// If the def-id is Some(_), then this is the type of a specific - /// fn item. Otherwise, if None(_), it is a fn pointer type. - /// - /// FIXME: Conflating function pointers and the type of a - /// function is probably a terrible idea; a function pointer is a - /// value with a specific type, but a function can be polymorphic - /// or dynamically dispatched. - TyBareFn(Option, &'tcx BareFnTy<'tcx>), - - /// A trait, defined with `trait`. - TyTrait(Box>), - - /// The anonymous type of a closure. Used to represent the type of - /// `|a| a`. - TyClosure(DefId, Box>), - - /// A tuple type. For example, `(i32, bool)`. - TyTuple(Vec>), - - /// The projection of an associated type. For example, - /// `>::N`. - TyProjection(ProjectionTy<'tcx>), - - /// A type parameter; for example, `T` in `fn f(x: T) {} - TyParam(ParamTy), - - /// A type variable used during type-checking. - TyInfer(InferTy), - - /// A placeholder for a type which could not be computed; this is - /// propagated to avoid useless error messages. - TyError, -} - -/// A closure can be modeled as a struct that looks like: -/// -/// struct Closure<'l0...'li, T0...Tj, U0...Uk> { -/// upvar0: U0, -/// ... -/// upvark: Uk -/// } -/// -/// where 'l0...'li and T0...Tj are the lifetime and type parameters -/// in scope on the function that defined the closure, and U0...Uk are -/// type parameters representing the types of its upvars (borrowed, if -/// appropriate). -/// -/// So, for example, given this function: -/// -/// fn foo<'a, T>(data: &'a mut T) { -/// do(|| data.count += 1) -/// } -/// -/// the type of the closure would be something like: -/// -/// struct Closure<'a, T, U0> { -/// data: U0 -/// } -/// -/// Note that the type of the upvar is not specified in the struct. -/// You may wonder how the impl would then be able to use the upvar, -/// if it doesn't know it's type? The answer is that the impl is -/// (conceptually) not fully generic over Closure but rather tied to -/// instances with the expected upvar types: -/// -/// impl<'b, 'a, T> FnMut() for Closure<'a, T, &'b mut &'a mut T> { -/// ... -/// } -/// -/// You can see that the *impl* fully specified the type of the upvar -/// and thus knows full well that `data` has type `&'b mut &'a mut T`. -/// (Here, I am assuming that `data` is mut-borrowed.) -/// -/// Now, the last question you may ask is: Why include the upvar types -/// as extra type parameters? The reason for this design is that the -/// upvar types can reference lifetimes that are internal to the -/// creating function. In my example above, for example, the lifetime -/// `'b` represents the extent of the closure itself; this is some -/// subset of `foo`, probably just the extent of the call to the to -/// `do()`. If we just had the lifetime/type parameters from the -/// enclosing function, we couldn't name this lifetime `'b`. Note that -/// there can also be lifetimes in the types of the upvars themselves, -/// if one of them happens to be a reference to something that the -/// creating fn owns. -/// -/// OK, you say, so why not create a more minimal set of parameters -/// that just includes the extra lifetime parameters? The answer is -/// primarily that it would be hard --- we don't know at the time when -/// we create the closure type what the full types of the upvars are, -/// nor do we know which are borrowed and which are not. In this -/// design, we can just supply a fresh type parameter and figure that -/// out later. -/// -/// All right, you say, but why include the type parameters from the -/// original function then? The answer is that trans may need them -/// when monomorphizing, and they may not appear in the upvars. A -/// closure could capture no variables but still make use of some -/// in-scope type parameter with a bound (e.g., if our example above -/// had an extra `U: Default`, and the closure called `U::default()`). -/// -/// There is another reason. This design (implicitly) prohibits -/// closures from capturing themselves (except via a trait -/// object). This simplifies closure inference considerably, since it -/// means that when we infer the kind of a closure or its upvars, we -/// don't have to handle cycles where the decisions we make for -/// closure C wind up influencing the decisions we ought to make for -/// closure C (which would then require fixed point iteration to -/// handle). Plus it fixes an ICE. :P -#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] -pub struct ClosureSubsts<'tcx> { - /// Lifetime and type parameters from the enclosing function. - /// These are separated out because trans wants to pass them around - /// when monomorphizing. - pub func_substs: &'tcx Substs<'tcx>, - - /// The types of the upvars. The list parallels the freevars and - /// `upvar_borrows` lists. These are kept distinct so that we can - /// easily index into them. - pub upvar_tys: Vec> -} - -impl<'tcx> Decodable for &'tcx ClosureSubsts<'tcx> { - fn decode(s: &mut S) -> Result<&'tcx ClosureSubsts<'tcx>, S::Error> { - let closure_substs = try! { Decodable::decode(s) }; - let dummy_def_id: DefId = unsafe { mem::zeroed() }; - - cstore::tls::with_decoding_context(s, |dcx, _| { - // Intern the value - let ty = dcx.tcx().mk_closure_from_closure_substs(dummy_def_id, - Box::new(closure_substs)); - match ty.sty { - TyClosure(_, ref closure_substs) => Ok(&**closure_substs), - _ => unreachable!() - } - }) - } -} - -#[derive(Clone, PartialEq, Eq, Hash)] -pub struct TraitTy<'tcx> { - pub principal: ty::PolyTraitRef<'tcx>, - pub bounds: ExistentialBounds<'tcx>, -} - -impl<'tcx> TraitTy<'tcx> { - pub fn principal_def_id(&self) -> DefId { - self.principal.0.def_id - } - - /// Object types don't have a self-type specified. Therefore, when - /// we convert the principal trait-ref into a normal trait-ref, - /// you must give *some* self-type. A common choice is `mk_err()` - /// or some skolemized type. - pub fn principal_trait_ref_with_self_ty(&self, - tcx: &ty::ctxt<'tcx>, - self_ty: Ty<'tcx>) - -> ty::PolyTraitRef<'tcx> - { - // otherwise the escaping regions would be captured by the binder - assert!(!self_ty.has_escaping_regions()); - - ty::Binder(TraitRef { - def_id: self.principal.0.def_id, - substs: tcx.mk_substs(self.principal.0.substs.with_self_ty(self_ty)), - }) - } - - pub fn projection_bounds_with_self_ty(&self, - tcx: &ty::ctxt<'tcx>, - self_ty: Ty<'tcx>) - -> Vec> - { - // otherwise the escaping regions would be captured by the binders - assert!(!self_ty.has_escaping_regions()); - - self.bounds.projection_bounds.iter() - .map(|in_poly_projection_predicate| { - let in_projection_ty = &in_poly_projection_predicate.0.projection_ty; - let substs = tcx.mk_substs(in_projection_ty.trait_ref.substs.with_self_ty(self_ty)); - let trait_ref = ty::TraitRef::new(in_projection_ty.trait_ref.def_id, - substs); - let projection_ty = ty::ProjectionTy { - trait_ref: trait_ref, - item_name: in_projection_ty.item_name - }; - ty::Binder(ty::ProjectionPredicate { - projection_ty: projection_ty, - ty: in_poly_projection_predicate.0.ty - }) - }) - .collect() - } -} - -/// A complete reference to a trait. These take numerous guises in syntax, -/// but perhaps the most recognizable form is in a where clause: -/// -/// T : Foo -/// -/// This would be represented by a trait-reference where the def-id is the -/// def-id for the trait `Foo` and the substs defines `T` as parameter 0 in the -/// `SelfSpace` and `U` as parameter 0 in the `TypeSpace`. -/// -/// Trait references also appear in object types like `Foo`, but in -/// that case the `Self` parameter is absent from the substitutions. -/// -/// Note that a `TraitRef` introduces a level of region binding, to -/// account for higher-ranked trait bounds like `T : for<'a> Foo<&'a -/// U>` or higher-ranked object types. -#[derive(Copy, Clone, PartialEq, Eq, Hash)] -pub struct TraitRef<'tcx> { - pub def_id: DefId, - pub substs: &'tcx Substs<'tcx>, -} - -pub type PolyTraitRef<'tcx> = Binder>; - -impl<'tcx> PolyTraitRef<'tcx> { - pub fn self_ty(&self) -> Ty<'tcx> { - self.0.self_ty() - } - - pub fn def_id(&self) -> DefId { - self.0.def_id - } - - pub fn substs(&self) -> &'tcx Substs<'tcx> { - // FIXME(#20664) every use of this fn is probably a bug, it should yield Binder<> - self.0.substs - } - - pub fn input_types(&self) -> &[Ty<'tcx>] { - // FIXME(#20664) every use of this fn is probably a bug, it should yield Binder<> - self.0.input_types() - } - - pub fn to_poly_trait_predicate(&self) -> ty::PolyTraitPredicate<'tcx> { - // Note that we preserve binding levels - Binder(ty::TraitPredicate { trait_ref: self.0.clone() }) - } -} - -/// Binder is a binder for higher-ranked lifetimes. It is part of the -/// compiler's representation for things like `for<'a> Fn(&'a isize)` -/// (which would be represented by the type `PolyTraitRef == -/// Binder`). Note that when we skolemize, instantiate, -/// erase, or otherwise "discharge" these bound regions, we change the -/// type from `Binder` to just `T` (see -/// e.g. `liberate_late_bound_regions`). -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub struct Binder(pub T); - -impl Binder { - /// Skips the binder and returns the "bound" value. This is a - /// risky thing to do because it's easy to get confused about - /// debruijn indices and the like. It is usually better to - /// discharge the binder using `no_late_bound_regions` or - /// `replace_late_bound_regions` or something like - /// that. `skip_binder` is only valid when you are either - /// extracting data that has nothing to do with bound regions, you - /// are doing some sort of test that does not involve bound - /// regions, or you are being very careful about your depth - /// accounting. - /// - /// Some examples where `skip_binder` is reasonable: - /// - extracting the def-id from a PolyTraitRef; - /// - comparing the self type of a PolyTraitRef to see if it is equal to - /// a type parameter `X`, since the type `X` does not reference any regions - pub fn skip_binder(&self) -> &T { - &self.0 - } - - pub fn as_ref(&self) -> Binder<&T> { - ty::Binder(&self.0) - } - - pub fn map_bound_ref(&self, f: F) -> Binder - where F: FnOnce(&T) -> U - { - self.as_ref().map_bound(f) - } - - pub fn map_bound(self, f: F) -> Binder - where F: FnOnce(T) -> U - { - ty::Binder(f(self.0)) - } -} - -impl fmt::Debug for TypeFlags { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.bits) - } -} - -/// Represents the projection of an associated type. In explicit UFCS -/// form this would be written `>::N`. -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub struct ProjectionTy<'tcx> { - /// The trait reference `T as Trait<..>`. - pub trait_ref: ty::TraitRef<'tcx>, - - /// The name `N` of the associated type. - pub item_name: Name, -} - -impl<'tcx> ProjectionTy<'tcx> { - pub fn sort_key(&self) -> (DefId, Name) { - (self.trait_ref.def_id, self.item_name) - } -} - -#[derive(Clone, PartialEq, Eq, Hash, Debug)] -pub struct BareFnTy<'tcx> { - pub unsafety: hir::Unsafety, - pub abi: abi::Abi, - pub sig: PolyFnSig<'tcx>, -} - -#[derive(Clone, PartialEq, Eq, Hash)] -pub struct ClosureTy<'tcx> { - pub unsafety: hir::Unsafety, - pub abi: abi::Abi, - pub sig: PolyFnSig<'tcx>, -} - -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] -pub enum FnOutput<'tcx> { - FnConverging(Ty<'tcx>), - FnDiverging -} - -impl<'tcx> FnOutput<'tcx> { - pub fn diverges(&self) -> bool { - *self == FnDiverging - } - - pub fn unwrap(self) -> Ty<'tcx> { - match self { - ty::FnConverging(t) => t, - ty::FnDiverging => unreachable!() - } - } - - pub fn unwrap_or(self, def: Ty<'tcx>) -> Ty<'tcx> { - match self { - ty::FnConverging(t) => t, - ty::FnDiverging => def - } - } -} - -pub type PolyFnOutput<'tcx> = Binder>; - -impl<'tcx> PolyFnOutput<'tcx> { - pub fn diverges(&self) -> bool { - self.0.diverges() - } -} - -/// Signature of a function type, which I have arbitrarily -/// decided to use to refer to the input/output types. -/// -/// - `inputs` is the list of arguments and their modes. -/// - `output` is the return type. -/// - `variadic` indicates whether this is a variadic function. (only true for foreign fns) -#[derive(Clone, PartialEq, Eq, Hash)] -pub struct FnSig<'tcx> { - pub inputs: Vec>, - pub output: FnOutput<'tcx>, - pub variadic: bool -} - -pub type PolyFnSig<'tcx> = Binder>; - -impl<'tcx> PolyFnSig<'tcx> { - pub fn inputs(&self) -> ty::Binder>> { - self.map_bound_ref(|fn_sig| fn_sig.inputs.clone()) - } - pub fn input(&self, index: usize) -> ty::Binder> { - self.map_bound_ref(|fn_sig| fn_sig.inputs[index]) - } - pub fn output(&self) -> ty::Binder> { - self.map_bound_ref(|fn_sig| fn_sig.output.clone()) - } - pub fn variadic(&self) -> bool { - self.skip_binder().variadic - } -} - -#[derive(Clone, Copy, PartialEq, Eq, Hash)] -pub struct ParamTy { - pub space: subst::ParamSpace, - pub idx: u32, - pub name: Name, -} - -impl ParamTy { - pub fn new(space: subst::ParamSpace, - index: u32, - name: Name) - -> ParamTy { - ParamTy { space: space, idx: index, name: name } - } - - pub fn for_self() -> ParamTy { - ParamTy::new(subst::SelfSpace, 0, special_idents::type_self.name) - } - - pub fn for_def(def: &ty::TypeParameterDef) -> ParamTy { - ParamTy::new(def.space, def.index, def.name) - } - - pub fn to_ty<'tcx>(self, tcx: &ty::ctxt<'tcx>) -> Ty<'tcx> { - tcx.mk_param(self.space, self.idx, self.name) - } - - pub fn is_self(&self) -> bool { - self.space == subst::SelfSpace && self.idx == 0 - } -} - -/// A [De Bruijn index][dbi] is a standard means of representing -/// regions (and perhaps later types) in a higher-ranked setting. In -/// particular, imagine a type like this: -/// -/// for<'a> fn(for<'b> fn(&'b isize, &'a isize), &'a char) -/// ^ ^ | | | -/// | | | | | -/// | +------------+ 1 | | -/// | | | -/// +--------------------------------+ 2 | -/// | | -/// +------------------------------------------+ 1 -/// -/// In this type, there are two binders (the outer fn and the inner -/// fn). We need to be able to determine, for any given region, which -/// fn type it is bound by, the inner or the outer one. There are -/// various ways you can do this, but a De Bruijn index is one of the -/// more convenient and has some nice properties. The basic idea is to -/// count the number of binders, inside out. Some examples should help -/// clarify what I mean. -/// -/// Let's start with the reference type `&'b isize` that is the first -/// argument to the inner function. This region `'b` is assigned a De -/// Bruijn index of 1, meaning "the innermost binder" (in this case, a -/// fn). The region `'a` that appears in the second argument type (`&'a -/// isize`) would then be assigned a De Bruijn index of 2, meaning "the -/// second-innermost binder". (These indices are written on the arrays -/// in the diagram). -/// -/// What is interesting is that De Bruijn index attached to a particular -/// variable will vary depending on where it appears. For example, -/// the final type `&'a char` also refers to the region `'a` declared on -/// the outermost fn. But this time, this reference is not nested within -/// any other binders (i.e., it is not an argument to the inner fn, but -/// rather the outer one). Therefore, in this case, it is assigned a -/// De Bruijn index of 1, because the innermost binder in that location -/// is the outer fn. -/// -/// [dbi]: http://en.wikipedia.org/wiki/De_Bruijn_index -#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug, Copy)] -pub struct DebruijnIndex { - // We maintain the invariant that this is never 0. So 1 indicates - // the innermost binder. To ensure this, create with `DebruijnIndex::new`. - pub depth: u32, -} - -/// Representation of regions. -/// -/// Unlike types, most region variants are "fictitious", not concrete, -/// regions. Among these, `ReStatic`, `ReEmpty` and `ReScope` are the only -/// ones representing concrete regions. -/// -/// ## Bound Regions -/// -/// These are regions that are stored behind a binder and must be substituted -/// with some concrete region before being used. There are 2 kind of -/// bound regions: early-bound, which are bound in a TypeScheme/TraitDef, -/// and are substituted by a Substs, and late-bound, which are part of -/// higher-ranked types (e.g. `for<'a> fn(&'a ())`) and are substituted by -/// the likes of `liberate_late_bound_regions`. The distinction exists -/// because higher-ranked lifetimes aren't supported in all places. See [1][2]. -/// -/// Unlike TyParam-s, bound regions are not supposed to exist "in the wild" -/// outside their binder, e.g. in types passed to type inference, and -/// should first be substituted (by skolemized regions, free regions, -/// or region variables). -/// -/// ## Skolemized and Free Regions -/// -/// One often wants to work with bound regions without knowing their precise -/// identity. For example, when checking a function, the lifetime of a borrow -/// can end up being assigned to some region parameter. In these cases, -/// it must be ensured that bounds on the region can't be accidentally -/// assumed without being checked. -/// -/// The process of doing that is called "skolemization". The bound regions -/// are replaced by skolemized markers, which don't satisfy any relation -/// not explicity provided. -/// -/// There are 2 kinds of skolemized regions in rustc: `ReFree` and -/// `ReSkolemized`. When checking an item's body, `ReFree` is supposed -/// to be used. These also support explicit bounds: both the internally-stored -/// *scope*, which the region is assumed to outlive, as well as other -/// relations stored in the `FreeRegionMap`. Note that these relations -/// aren't checked when you `make_subregion` (or `mk_eqty`), only by -/// `resolve_regions_and_report_errors`. -/// -/// When working with higher-ranked types, some region relations aren't -/// yet known, so you can't just call `resolve_regions_and_report_errors`. -/// `ReSkolemized` is designed for this purpose. In these contexts, -/// there's also the risk that some inference variable laying around will -/// get unified with your skolemized region: if you want to check whether -/// `for<'a> Foo<'_>: 'a`, and you substitute your bound region `'a` -/// with a skolemized region `'%a`, the variable `'_` would just be -/// instantiated to the skolemized region `'%a`, which is wrong because -/// the inference variable is supposed to satisfy the relation -/// *for every value of the skolemized region*. To ensure that doesn't -/// happen, you can use `leak_check`. This is more clearly explained -/// by infer/higher_ranked/README.md. -/// -/// [1] http://smallcultfollowing.com/babysteps/blog/2013/10/29/intermingled-parameter-lists/ -/// [2] http://smallcultfollowing.com/babysteps/blog/2013/11/04/intermingled-parameter-lists/ -#[derive(Clone, PartialEq, Eq, Hash, Copy, RustcEncodable, RustcDecodable)] -pub enum Region { - // Region bound in a type or fn declaration which will be - // substituted 'early' -- that is, at the same time when type - // parameters are substituted. - ReEarlyBound(EarlyBoundRegion), - - // Region bound in a function scope, which will be substituted when the - // function is called. - ReLateBound(DebruijnIndex, BoundRegion), - - /// When checking a function body, the types of all arguments and so forth - /// that refer to bound region parameters are modified to refer to free - /// region parameters. - ReFree(FreeRegion), - - /// A concrete region naming some statically determined extent - /// (e.g. an expression or sequence of statements) within the - /// current function. - ReScope(region::CodeExtent), - - /// Static data that has an "infinite" lifetime. Top in the region lattice. - ReStatic, - - /// A region variable. Should not exist after typeck. - ReVar(RegionVid), - - /// A skolemized region - basically the higher-ranked version of ReFree. - /// Should not exist after typeck. - ReSkolemized(SkolemizedRegionVid, BoundRegion), - - /// Empty lifetime is for data that is never accessed. - /// Bottom in the region lattice. We treat ReEmpty somewhat - /// specially; at least right now, we do not generate instances of - /// it during the GLB computations, but rather - /// generate an error instead. This is to improve error messages. - /// The only way to get an instance of ReEmpty is to have a region - /// variable with no constraints. - ReEmpty, -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug)] -pub struct EarlyBoundRegion { - pub space: subst::ParamSpace, - pub index: u32, - pub name: Name, -} - -#[derive(Clone, Copy, PartialEq, Eq, Hash)] -pub struct TyVid { - pub index: u32 -} - -#[derive(Clone, Copy, PartialEq, Eq, Hash)] -pub struct IntVid { - pub index: u32 -} - -#[derive(Clone, Copy, PartialEq, Eq, Hash)] -pub struct FloatVid { - pub index: u32 -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy)] -pub struct RegionVid { - pub index: u32 -} - -#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] -pub struct SkolemizedRegionVid { - pub index: u32 -} - -#[derive(Clone, Copy, PartialEq, Eq, Hash)] -pub enum InferTy { - TyVar(TyVid), - IntVar(IntVid), - FloatVar(FloatVid), - - /// A `FreshTy` is one that is generated as a replacement for an - /// unbound type variable. This is convenient for caching etc. See - /// `middle::infer::freshen` for more details. - FreshTy(u32), - FreshIntTy(u32), - FreshFloatTy(u32) -} - -/// Bounds suitable for an existentially quantified type parameter -/// such as those that appear in object types or closure types. -#[derive(PartialEq, Eq, Hash, Clone)] -pub struct ExistentialBounds<'tcx> { - pub region_bound: ty::Region, - pub builtin_bounds: BuiltinBounds, - pub projection_bounds: Vec>, -} - -impl<'tcx> ExistentialBounds<'tcx> { - pub fn new(region_bound: ty::Region, - builtin_bounds: BuiltinBounds, - projection_bounds: Vec>) - -> Self { - let mut projection_bounds = projection_bounds; - projection_bounds.sort_by(|a, b| a.sort_key().cmp(&b.sort_key())); - ExistentialBounds { - region_bound: region_bound, - builtin_bounds: builtin_bounds, - projection_bounds: projection_bounds - } - } -} - -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub struct BuiltinBounds(EnumSet); - -impl BuiltinBounds { - pub fn empty() -> BuiltinBounds { - BuiltinBounds(EnumSet::new()) - } - - pub fn iter(&self) -> enum_set::Iter { - self.into_iter() - } - - pub fn to_predicates<'tcx>(&self, - tcx: &ty::ctxt<'tcx>, - self_ty: Ty<'tcx>) -> Vec> { - self.iter().filter_map(|builtin_bound| - match traits::trait_ref_for_builtin_bound(tcx, builtin_bound, self_ty) { - Ok(trait_ref) => Some(trait_ref.to_predicate()), - Err(ErrorReported) => { None } - } - ).collect() - } -} - -impl ops::Deref for BuiltinBounds { - type Target = EnumSet; - fn deref(&self) -> &Self::Target { &self.0 } -} - -impl ops::DerefMut for BuiltinBounds { - fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } -} - -impl<'a> IntoIterator for &'a BuiltinBounds { - type Item = BuiltinBound; - type IntoIter = enum_set::Iter; - fn into_iter(self) -> Self::IntoIter { - (**self).into_iter() - } -} - -#[derive(Clone, RustcEncodable, PartialEq, Eq, RustcDecodable, Hash, - Debug, Copy)] -#[repr(usize)] -pub enum BuiltinBound { - Send, - Sized, - Copy, - Sync, -} - -impl CLike for BuiltinBound { - fn to_usize(&self) -> usize { - *self as usize - } - fn from_usize(v: usize) -> BuiltinBound { - unsafe { mem::transmute(v) } - } -} - -impl<'tcx> ty::ctxt<'tcx> { - pub fn try_add_builtin_trait(&self, - trait_def_id: DefId, - builtin_bounds: &mut EnumSet) - -> bool - { - //! Checks whether `trait_ref` refers to one of the builtin - //! traits, like `Send`, and adds the corresponding - //! bound to the set `builtin_bounds` if so. Returns true if `trait_ref` - //! is a builtin trait. - - match self.lang_items.to_builtin_kind(trait_def_id) { - Some(bound) => { builtin_bounds.insert(bound); true } - None => false - } - } -} - -impl DebruijnIndex { - pub fn new(depth: u32) -> DebruijnIndex { - assert!(depth > 0); - DebruijnIndex { depth: depth } - } - - pub fn shifted(&self, amount: u32) -> DebruijnIndex { - DebruijnIndex { depth: self.depth + amount } - } -} - -// Region utilities -impl Region { - pub fn is_bound(&self) -> bool { - match *self { - ty::ReEarlyBound(..) => true, - ty::ReLateBound(..) => true, - _ => false - } - } - - pub fn needs_infer(&self) -> bool { - match *self { - ty::ReVar(..) | ty::ReSkolemized(..) => true, - _ => false - } - } - - pub fn escapes_depth(&self, depth: u32) -> bool { - match *self { - ty::ReLateBound(debruijn, _) => debruijn.depth > depth, - _ => false, - } - } - - /// Returns the depth of `self` from the (1-based) binding level `depth` - pub fn from_depth(&self, depth: u32) -> Region { - match *self { - ty::ReLateBound(debruijn, r) => ty::ReLateBound(DebruijnIndex { - depth: debruijn.depth - (depth - 1) - }, r), - r => r - } - } -} - -// Type utilities -impl<'tcx> TyS<'tcx> { - pub fn as_opt_param_ty(&self) -> Option { - match self.sty { - ty::TyParam(ref d) => Some(d.clone()), - _ => None, - } - } - - pub fn is_nil(&self) -> bool { - match self.sty { - TyTuple(ref tys) => tys.is_empty(), - _ => false - } - } - - pub fn is_empty(&self, _cx: &ty::ctxt) -> bool { - // FIXME(#24885): be smarter here - match self.sty { - TyEnum(def, _) | TyStruct(def, _) => def.is_empty(), - _ => false - } - } - - pub fn is_primitive(&self) -> bool { - match self.sty { - TyBool | TyChar | TyInt(_) | TyUint(_) | TyFloat(_) => true, - _ => false, - } - } - - pub fn is_ty_var(&self) -> bool { - match self.sty { - TyInfer(TyVar(_)) => true, - _ => false - } - } - - pub fn is_phantom_data(&self) -> bool { - if let TyStruct(def, _) = self.sty { - def.is_phantom_data() - } else { - false - } - } - - pub fn is_bool(&self) -> bool { self.sty == TyBool } - - pub fn is_param(&self, space: subst::ParamSpace, index: u32) -> bool { - match self.sty { - ty::TyParam(ref data) => data.space == space && data.idx == index, - _ => false, - } - } - - pub fn is_self(&self) -> bool { - match self.sty { - TyParam(ref p) => p.space == subst::SelfSpace, - _ => false - } - } - - fn is_slice(&self) -> bool { - match self.sty { - TyRawPtr(mt) | TyRef(_, mt) => match mt.ty.sty { - TySlice(_) | TyStr => true, - _ => false, - }, - _ => false - } - } - - pub fn is_structural(&self) -> bool { - match self.sty { - TyStruct(..) | TyTuple(_) | TyEnum(..) | - TyArray(..) | TyClosure(..) => true, - _ => self.is_slice() | self.is_trait() - } - } - - #[inline] - pub fn is_simd(&self) -> bool { - match self.sty { - TyStruct(def, _) => def.is_simd(), - _ => false - } - } - - pub fn sequence_element_type(&self, cx: &ty::ctxt<'tcx>) -> Ty<'tcx> { - match self.sty { - TyArray(ty, _) | TySlice(ty) => ty, - TyStr => cx.mk_mach_uint(ast::TyU8), - _ => cx.sess.bug(&format!("sequence_element_type called on non-sequence value: {}", - self)), - } - } - - pub fn simd_type(&self, cx: &ty::ctxt<'tcx>) -> Ty<'tcx> { - match self.sty { - TyStruct(def, substs) => { - def.struct_variant().fields[0].ty(cx, substs) - } - _ => panic!("simd_type called on invalid type") - } - } - - pub fn simd_size(&self, _cx: &ty::ctxt) -> usize { - match self.sty { - TyStruct(def, _) => def.struct_variant().fields.len(), - _ => panic!("simd_size called on invalid type") - } - } - - pub fn is_region_ptr(&self) -> bool { - match self.sty { - TyRef(..) => true, - _ => false - } - } - - pub fn is_unsafe_ptr(&self) -> bool { - match self.sty { - TyRawPtr(_) => return true, - _ => return false - } - } - - pub fn is_unique(&self) -> bool { - match self.sty { - TyBox(_) => true, - _ => false - } - } - - /* - A scalar type is one that denotes an atomic datum, with no sub-components. - (A TyRawPtr is scalar because it represents a non-managed pointer, so its - contents are abstract to rustc.) - */ - pub fn is_scalar(&self) -> bool { - match self.sty { - TyBool | TyChar | TyInt(_) | TyFloat(_) | TyUint(_) | - TyInfer(IntVar(_)) | TyInfer(FloatVar(_)) | - TyBareFn(..) | TyRawPtr(_) => true, - _ => false - } - } - - /// Returns true if this type is a floating point type and false otherwise. - pub fn is_floating_point(&self) -> bool { - match self.sty { - TyFloat(_) | - TyInfer(FloatVar(_)) => true, - _ => false, - } - } - - pub fn is_trait(&self) -> bool { - match self.sty { - TyTrait(..) => true, - _ => false - } - } - - pub fn is_integral(&self) -> bool { - match self.sty { - TyInfer(IntVar(_)) | TyInt(_) | TyUint(_) => true, - _ => false - } - } - - pub fn is_fresh(&self) -> bool { - match self.sty { - TyInfer(FreshTy(_)) => true, - TyInfer(FreshIntTy(_)) => true, - TyInfer(FreshFloatTy(_)) => true, - _ => false - } - } - - pub fn is_uint(&self) -> bool { - match self.sty { - TyInfer(IntVar(_)) | TyUint(ast::TyUs) => true, - _ => false - } - } - - pub fn is_char(&self) -> bool { - match self.sty { - TyChar => true, - _ => false - } - } - - pub fn is_bare_fn(&self) -> bool { - match self.sty { - TyBareFn(..) => true, - _ => false - } - } - - pub fn is_bare_fn_item(&self) -> bool { - match self.sty { - TyBareFn(Some(_), _) => true, - _ => false - } - } - - pub fn is_fp(&self) -> bool { - match self.sty { - TyInfer(FloatVar(_)) | TyFloat(_) => true, - _ => false - } - } - - pub fn is_numeric(&self) -> bool { - self.is_integral() || self.is_fp() - } - - pub fn is_signed(&self) -> bool { - match self.sty { - TyInt(_) => true, - _ => false - } - } - - pub fn is_machine(&self) -> bool { - match self.sty { - TyInt(ast::TyIs) | TyUint(ast::TyUs) => false, - TyInt(..) | TyUint(..) | TyFloat(..) => true, - _ => false - } - } - - // Returns the type and mutability of *ty. - // - // The parameter `explicit` indicates if this is an *explicit* dereference. - // Some types---notably unsafe ptrs---can only be dereferenced explicitly. - pub fn builtin_deref(&self, explicit: bool, pref: ty::LvaluePreference) - -> Option> - { - match self.sty { - TyBox(ty) => { - Some(TypeAndMut { - ty: ty, - mutbl: if pref == ty::PreferMutLvalue { - hir::MutMutable - } else { - hir::MutImmutable - }, - }) - }, - TyRef(_, mt) => Some(mt), - TyRawPtr(mt) if explicit => Some(mt), - _ => None - } - } - - // Returns the type of ty[i] - pub fn builtin_index(&self) -> Option> { - match self.sty { - TyArray(ty, _) | TySlice(ty) => Some(ty), - _ => None - } - } - - pub fn fn_sig(&self) -> &'tcx PolyFnSig<'tcx> { - match self.sty { - TyBareFn(_, ref f) => &f.sig, - _ => panic!("Ty::fn_sig() called on non-fn type: {:?}", self) - } - } - - /// Returns the ABI of the given function. - pub fn fn_abi(&self) -> abi::Abi { - match self.sty { - TyBareFn(_, ref f) => f.abi, - _ => panic!("Ty::fn_abi() called on non-fn type"), - } - } - - // Type accessors for substructures of types - pub fn fn_args(&self) -> ty::Binder>> { - self.fn_sig().inputs() - } - - pub fn fn_ret(&self) -> Binder> { - self.fn_sig().output() - } - - pub fn is_fn(&self) -> bool { - match self.sty { - TyBareFn(..) => true, - _ => false - } - } - - pub fn ty_to_def_id(&self) -> Option { - match self.sty { - TyTrait(ref tt) => Some(tt.principal_def_id()), - TyStruct(def, _) | - TyEnum(def, _) => Some(def.did), - TyClosure(id, _) => Some(id), - _ => None - } - } - - pub fn ty_adt_def(&self) -> Option> { - match self.sty { - TyStruct(adt, _) | TyEnum(adt, _) => Some(adt), - _ => None - } - } - - /// Returns the regions directly referenced from this type (but - /// not types reachable from this type via `walk_tys`). This - /// ignores late-bound regions binders. - pub fn regions(&self) -> Vec { - match self.sty { - TyRef(region, _) => { - vec![*region] - } - TyTrait(ref obj) => { - let mut v = vec![obj.bounds.region_bound]; - v.extend_from_slice(obj.principal.skip_binder() - .substs.regions().as_slice()); - v - } - TyEnum(_, substs) | - TyStruct(_, substs) => { - substs.regions().as_slice().to_vec() - } - TyClosure(_, ref substs) => { - substs.func_substs.regions().as_slice().to_vec() - } - TyProjection(ref data) => { - data.trait_ref.substs.regions().as_slice().to_vec() - } - TyBareFn(..) | - TyBool | - TyChar | - TyInt(_) | - TyUint(_) | - TyFloat(_) | - TyBox(_) | - TyStr | - TyArray(_, _) | - TySlice(_) | - TyRawPtr(_) | - TyTuple(_) | - TyParam(_) | - TyInfer(_) | - TyError => { - vec![] - } - } - } -} diff --git a/src/librustc/middle/ty/trait_def.rs b/src/librustc/middle/ty/trait_def.rs deleted file mode 100644 index db001ce2c446c..0000000000000 --- a/src/librustc/middle/ty/trait_def.rs +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use dep_graph::DepNode; -use middle::def_id::DefId; -use middle::ty; -use middle::ty::fast_reject; -use middle::ty::Ty; -use std::borrow::{Borrow}; -use std::cell::{Cell, Ref, RefCell}; -use syntax::ast::Name; -use rustc_front::hir; -use util::nodemap::FnvHashMap; - -/// As `TypeScheme` but for a trait ref. -pub struct TraitDef<'tcx> { - pub unsafety: hir::Unsafety, - - /// If `true`, then this trait had the `#[rustc_paren_sugar]` - /// attribute, indicating that it should be used with `Foo()` - /// sugar. This is a temporary thing -- eventually any trait wil - /// be usable with the sugar (or without it). - pub paren_sugar: bool, - - /// Generic type definitions. Note that `Self` is listed in here - /// as having a single bound, the trait itself (e.g., in the trait - /// `Eq`, there is a single bound `Self : Eq`). This is so that - /// default methods get to assume that the `Self` parameters - /// implements the trait. - pub generics: ty::Generics<'tcx>, - - pub trait_ref: ty::TraitRef<'tcx>, - - /// A list of the associated types defined in this trait. Useful - /// for resolving `X::Foo` type markers. - pub associated_type_names: Vec, - - // Impls of this trait. To allow for quicker lookup, the impls are indexed - // by a simplified version of their Self type: impls with a simplifiable - // Self are stored in nonblanket_impls keyed by it, while all other impls - // are stored in blanket_impls. - // - // These lists are tracked by `DepNode::TraitImpls`; we don't use - // a DepTrackingMap but instead have the `TraitDef` insert the - // required reads/writes. - - /// Impls of the trait. - nonblanket_impls: RefCell< - FnvHashMap> - >, - - /// Blanket impls associated with the trait. - blanket_impls: RefCell>, - - /// Various flags - pub flags: Cell -} - -impl<'tcx> TraitDef<'tcx> { - pub fn new(unsafety: hir::Unsafety, - paren_sugar: bool, - generics: ty::Generics<'tcx>, - trait_ref: ty::TraitRef<'tcx>, - associated_type_names: Vec) - -> TraitDef<'tcx> { - TraitDef { - paren_sugar: paren_sugar, - unsafety: unsafety, - generics: generics, - trait_ref: trait_ref, - associated_type_names: associated_type_names, - nonblanket_impls: RefCell::new(FnvHashMap()), - blanket_impls: RefCell::new(vec![]), - flags: Cell::new(ty::TraitFlags::NO_TRAIT_FLAGS) - } - } - - pub fn def_id(&self) -> DefId { - self.trait_ref.def_id - } - - // returns None if not yet calculated - pub fn object_safety(&self) -> Option { - if self.flags.get().intersects(TraitFlags::OBJECT_SAFETY_VALID) { - Some(self.flags.get().intersects(TraitFlags::IS_OBJECT_SAFE)) - } else { - None - } - } - - pub fn set_object_safety(&self, is_safe: bool) { - assert!(self.object_safety().map(|cs| cs == is_safe).unwrap_or(true)); - self.flags.set( - self.flags.get() | if is_safe { - TraitFlags::OBJECT_SAFETY_VALID | TraitFlags::IS_OBJECT_SAFE - } else { - TraitFlags::OBJECT_SAFETY_VALID - } - ); - } - - fn write_trait_impls(&self, tcx: &ty::ctxt<'tcx>) { - tcx.dep_graph.write(DepNode::TraitImpls(self.trait_ref.def_id)); - } - - fn read_trait_impls(&self, tcx: &ty::ctxt<'tcx>) { - tcx.dep_graph.read(DepNode::TraitImpls(self.trait_ref.def_id)); - } - - /// Records a trait-to-implementation mapping. - pub fn record_impl(&self, - tcx: &ty::ctxt<'tcx>, - impl_def_id: DefId, - impl_trait_ref: ty::TraitRef<'tcx>) { - debug!("TraitDef::record_impl for {:?}, from {:?}", - self, impl_trait_ref); - - // Record the write into the impl set, but only for local - // impls: external impls are handled differently. - if impl_def_id.is_local() { - self.write_trait_impls(tcx); - } - - // We don't want to borrow_mut after we already populated all impls, - // so check if an impl is present with an immutable borrow first. - if let Some(sty) = fast_reject::simplify_type(tcx, - impl_trait_ref.self_ty(), false) { - if let Some(is) = self.nonblanket_impls.borrow().get(&sty) { - if is.contains(&impl_def_id) { - return // duplicate - skip - } - } - - self.nonblanket_impls.borrow_mut().entry(sty).or_insert(vec![]).push(impl_def_id) - } else { - if self.blanket_impls.borrow().contains(&impl_def_id) { - return // duplicate - skip - } - self.blanket_impls.borrow_mut().push(impl_def_id) - } - } - - pub fn for_each_impl(&self, tcx: &ty::ctxt<'tcx>, mut f: F) { - self.read_trait_impls(tcx); - - tcx.populate_implementations_for_trait_if_necessary(self.trait_ref.def_id); - - for &impl_def_id in self.blanket_impls.borrow().iter() { - f(impl_def_id); - } - - for v in self.nonblanket_impls.borrow().values() { - for &impl_def_id in v { - f(impl_def_id); - } - } - } - - /// Iterate over every impl that could possibly match the - /// self-type `self_ty`. - pub fn for_each_relevant_impl(&self, - tcx: &ty::ctxt<'tcx>, - self_ty: Ty<'tcx>, - mut f: F) - { - self.read_trait_impls(tcx); - - tcx.populate_implementations_for_trait_if_necessary(self.trait_ref.def_id); - - for &impl_def_id in self.blanket_impls.borrow().iter() { - f(impl_def_id); - } - - // simplify_type(.., false) basically replaces type parameters and - // projections with infer-variables. This is, of course, done on - // the impl trait-ref when it is instantiated, but not on the - // predicate trait-ref which is passed here. - // - // for example, if we match `S: Copy` against an impl like - // `impl Copy for Option`, we replace the type variable - // in `Option` with an infer variable, to `Option<_>` (this - // doesn't actually change fast_reject output), but we don't - // replace `S` with anything - this impl of course can't be - // selected, and as there are hundreds of similar impls, - // considering them would significantly harm performance. - if let Some(simp) = fast_reject::simplify_type(tcx, self_ty, true) { - if let Some(impls) = self.nonblanket_impls.borrow().get(&simp) { - for &impl_def_id in impls { - f(impl_def_id); - } - } - } else { - for v in self.nonblanket_impls.borrow().values() { - for &impl_def_id in v { - f(impl_def_id); - } - } - } - } - - pub fn borrow_impl_lists<'s>(&'s self, tcx: &ty::ctxt<'tcx>) - -> (Ref<'s, Vec>, - Ref<'s, FnvHashMap>>) { - self.read_trait_impls(tcx); - (self.blanket_impls.borrow(), self.nonblanket_impls.borrow()) - } - -} - -bitflags! { - flags TraitFlags: u32 { - const NO_TRAIT_FLAGS = 0, - const HAS_DEFAULT_IMPL = 1 << 0, - const IS_OBJECT_SAFE = 1 << 1, - const OBJECT_SAFETY_VALID = 1 << 2, - const IMPLS_VALID = 1 << 3, - } -} - diff --git a/src/librustc/middle/ty/util.rs b/src/librustc/middle/ty/util.rs deleted file mode 100644 index 03145951367f9..0000000000000 --- a/src/librustc/middle/ty/util.rs +++ /dev/null @@ -1,893 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! misc. type-system utilities too small to deserve their own file - -use back::svh::Svh; -use middle::const_eval::{self, ConstVal, ErrKind}; -use middle::const_eval::EvalHint::UncheckedExprHint; -use middle::def_id::DefId; -use middle::subst::{self, Subst, Substs}; -use middle::infer; -use middle::pat_util; -use middle::traits; -use middle::ty::{self, Ty, TypeAndMut, TypeFlags, TypeFoldable}; -use middle::ty::{Disr, ParameterEnvironment}; -use middle::ty::TypeVariants::*; -use util::num::ToPrimitive; - -use std::cmp; -use std::hash::{Hash, SipHasher, Hasher}; -use std::rc::Rc; -use syntax::ast::{self, Name}; -use syntax::attr::{self, AttrMetaMethods, SignedInt, UnsignedInt}; -use syntax::codemap::Span; - -use rustc_front::hir; - -pub trait IntTypeExt { - fn to_ty<'tcx>(&self, cx: &ty::ctxt<'tcx>) -> Ty<'tcx>; - fn i64_to_disr(&self, val: i64) -> Option; - fn u64_to_disr(&self, val: u64) -> Option; - fn disr_incr(&self, val: Disr) -> Option; - fn disr_string(&self, val: Disr) -> String; - fn disr_wrap_incr(&self, val: Option) -> Disr; -} - -impl IntTypeExt for attr::IntType { - fn to_ty<'tcx>(&self, cx: &ty::ctxt<'tcx>) -> Ty<'tcx> { - match *self { - SignedInt(ast::TyI8) => cx.types.i8, - SignedInt(ast::TyI16) => cx.types.i16, - SignedInt(ast::TyI32) => cx.types.i32, - SignedInt(ast::TyI64) => cx.types.i64, - SignedInt(ast::TyIs) => cx.types.isize, - UnsignedInt(ast::TyU8) => cx.types.u8, - UnsignedInt(ast::TyU16) => cx.types.u16, - UnsignedInt(ast::TyU32) => cx.types.u32, - UnsignedInt(ast::TyU64) => cx.types.u64, - UnsignedInt(ast::TyUs) => cx.types.usize, - } - } - - fn i64_to_disr(&self, val: i64) -> Option { - match *self { - SignedInt(ast::TyI8) => val.to_i8() .map(|v| v as Disr), - SignedInt(ast::TyI16) => val.to_i16() .map(|v| v as Disr), - SignedInt(ast::TyI32) => val.to_i32() .map(|v| v as Disr), - SignedInt(ast::TyI64) => val.to_i64() .map(|v| v as Disr), - UnsignedInt(ast::TyU8) => val.to_u8() .map(|v| v as Disr), - UnsignedInt(ast::TyU16) => val.to_u16() .map(|v| v as Disr), - UnsignedInt(ast::TyU32) => val.to_u32() .map(|v| v as Disr), - UnsignedInt(ast::TyU64) => val.to_u64() .map(|v| v as Disr), - - UnsignedInt(ast::TyUs) | - SignedInt(ast::TyIs) => unreachable!(), - } - } - - fn u64_to_disr(&self, val: u64) -> Option { - match *self { - SignedInt(ast::TyI8) => val.to_i8() .map(|v| v as Disr), - SignedInt(ast::TyI16) => val.to_i16() .map(|v| v as Disr), - SignedInt(ast::TyI32) => val.to_i32() .map(|v| v as Disr), - SignedInt(ast::TyI64) => val.to_i64() .map(|v| v as Disr), - UnsignedInt(ast::TyU8) => val.to_u8() .map(|v| v as Disr), - UnsignedInt(ast::TyU16) => val.to_u16() .map(|v| v as Disr), - UnsignedInt(ast::TyU32) => val.to_u32() .map(|v| v as Disr), - UnsignedInt(ast::TyU64) => val.to_u64() .map(|v| v as Disr), - - UnsignedInt(ast::TyUs) | - SignedInt(ast::TyIs) => unreachable!(), - } - } - - fn disr_incr(&self, val: Disr) -> Option { - macro_rules! add1 { - ($e:expr) => { $e.and_then(|v|v.checked_add(1)).map(|v| v as Disr) } - } - match *self { - // SignedInt repr means we *want* to reinterpret the bits - // treating the highest bit of Disr as a sign-bit, so - // cast to i64 before range-checking. - SignedInt(ast::TyI8) => add1!((val as i64).to_i8()), - SignedInt(ast::TyI16) => add1!((val as i64).to_i16()), - SignedInt(ast::TyI32) => add1!((val as i64).to_i32()), - SignedInt(ast::TyI64) => add1!(Some(val as i64)), - - UnsignedInt(ast::TyU8) => add1!(val.to_u8()), - UnsignedInt(ast::TyU16) => add1!(val.to_u16()), - UnsignedInt(ast::TyU32) => add1!(val.to_u32()), - UnsignedInt(ast::TyU64) => add1!(Some(val)), - - UnsignedInt(ast::TyUs) | - SignedInt(ast::TyIs) => unreachable!(), - } - } - - // This returns a String because (1.) it is only used for - // rendering an error message and (2.) a string can represent the - // full range from `i64::MIN` through `u64::MAX`. - fn disr_string(&self, val: Disr) -> String { - match *self { - SignedInt(ast::TyI8) => format!("{}", val as i8 ), - SignedInt(ast::TyI16) => format!("{}", val as i16), - SignedInt(ast::TyI32) => format!("{}", val as i32), - SignedInt(ast::TyI64) => format!("{}", val as i64), - UnsignedInt(ast::TyU8) => format!("{}", val as u8 ), - UnsignedInt(ast::TyU16) => format!("{}", val as u16), - UnsignedInt(ast::TyU32) => format!("{}", val as u32), - UnsignedInt(ast::TyU64) => format!("{}", val as u64), - - UnsignedInt(ast::TyUs) | - SignedInt(ast::TyIs) => unreachable!(), - } - } - - fn disr_wrap_incr(&self, val: Option) -> Disr { - macro_rules! add1 { - ($e:expr) => { ($e).wrapping_add(1) as Disr } - } - let val = val.unwrap_or(ty::INITIAL_DISCRIMINANT_VALUE); - match *self { - SignedInt(ast::TyI8) => add1!(val as i8 ), - SignedInt(ast::TyI16) => add1!(val as i16), - SignedInt(ast::TyI32) => add1!(val as i32), - SignedInt(ast::TyI64) => add1!(val as i64), - UnsignedInt(ast::TyU8) => add1!(val as u8 ), - UnsignedInt(ast::TyU16) => add1!(val as u16), - UnsignedInt(ast::TyU32) => add1!(val as u32), - UnsignedInt(ast::TyU64) => add1!(val as u64), - - UnsignedInt(ast::TyUs) | - SignedInt(ast::TyIs) => unreachable!(), - } - } -} - - -#[derive(Copy, Clone)] -pub enum CopyImplementationError { - InfrigingField(Name), - InfrigingVariant(Name), - NotAnAdt, - HasDestructor -} - -/// Describes whether a type is representable. For types that are not -/// representable, 'SelfRecursive' and 'ContainsRecursive' are used to -/// distinguish between types that are recursive with themselves and types that -/// contain a different recursive type. These cases can therefore be treated -/// differently when reporting errors. -/// -/// The ordering of the cases is significant. They are sorted so that cmp::max -/// will keep the "more erroneous" of two values. -#[derive(Copy, Clone, PartialOrd, Ord, Eq, PartialEq, Debug)] -pub enum Representability { - Representable, - ContainsRecursive, - SelfRecursive, -} - -impl<'a, 'tcx> ParameterEnvironment<'a, 'tcx> { - pub fn can_type_implement_copy(&self, self_type: Ty<'tcx>, span: Span) - -> Result<(),CopyImplementationError> { - let tcx = self.tcx; - - // FIXME: (@jroesch) float this code up - let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(self.clone())); - - let adt = match self_type.sty { - ty::TyStruct(struct_def, substs) => { - for field in struct_def.all_fields() { - let field_ty = field.ty(tcx, substs); - if infcx.type_moves_by_default(field_ty, span) { - return Err(CopyImplementationError::InfrigingField( - field.name)) - } - } - struct_def - } - ty::TyEnum(enum_def, substs) => { - for variant in &enum_def.variants { - for field in &variant.fields { - let field_ty = field.ty(tcx, substs); - if infcx.type_moves_by_default(field_ty, span) { - return Err(CopyImplementationError::InfrigingVariant( - variant.name)) - } - } - } - enum_def - } - _ => return Err(CopyImplementationError::NotAnAdt), - }; - - if adt.has_dtor() { - return Err(CopyImplementationError::HasDestructor) - } - - Ok(()) - } -} - -impl<'tcx> ty::ctxt<'tcx> { - pub fn pat_contains_ref_binding(&self, pat: &hir::Pat) -> Option { - pat_util::pat_contains_ref_binding(&self.def_map, pat) - } - - pub fn arm_contains_ref_binding(&self, arm: &hir::Arm) -> Option { - pat_util::arm_contains_ref_binding(&self.def_map, arm) - } - - /// Returns the type of element at index `i` in tuple or tuple-like type `t`. - /// For an enum `t`, `variant` is None only if `t` is a univariant enum. - pub fn positional_element_ty(&self, - ty: Ty<'tcx>, - i: usize, - variant: Option) -> Option> { - match (&ty.sty, variant) { - (&TyStruct(def, substs), None) => { - def.struct_variant().fields.get(i).map(|f| f.ty(self, substs)) - } - (&TyEnum(def, substs), Some(vid)) => { - def.variant_with_id(vid).fields.get(i).map(|f| f.ty(self, substs)) - } - (&TyEnum(def, substs), None) => { - assert!(def.is_univariant()); - def.variants[0].fields.get(i).map(|f| f.ty(self, substs)) - } - (&TyTuple(ref v), None) => v.get(i).cloned(), - _ => None - } - } - - /// Returns the type of element at field `n` in struct or struct-like type `t`. - /// For an enum `t`, `variant` must be some def id. - pub fn named_element_ty(&self, - ty: Ty<'tcx>, - n: Name, - variant: Option) -> Option> { - match (&ty.sty, variant) { - (&TyStruct(def, substs), None) => { - def.struct_variant().find_field_named(n).map(|f| f.ty(self, substs)) - } - (&TyEnum(def, substs), Some(vid)) => { - def.variant_with_id(vid).find_field_named(n).map(|f| f.ty(self, substs)) - } - _ => return None - } - } - - /// Returns `(normalized_type, ty)`, where `normalized_type` is the - /// IntType representation of one of {i64,i32,i16,i8,u64,u32,u16,u8}, - /// and `ty` is the original type (i.e. may include `isize` or - /// `usize`). - pub fn enum_repr_type(&self, opt_hint: Option<&attr::ReprAttr>) - -> (attr::IntType, Ty<'tcx>) { - let repr_type = match opt_hint { - // Feed in the given type - Some(&attr::ReprInt(_, int_t)) => int_t, - // ... but provide sensible default if none provided - // - // NB. Historically `fn enum_variants` generate i64 here, while - // rustc_typeck::check would generate isize. - _ => SignedInt(ast::TyIs), - }; - - let repr_type_ty = repr_type.to_ty(self); - let repr_type = match repr_type { - SignedInt(ast::TyIs) => - SignedInt(self.sess.target.int_type), - UnsignedInt(ast::TyUs) => - UnsignedInt(self.sess.target.uint_type), - other => other - }; - - (repr_type, repr_type_ty) - } - - /// Returns the deeply last field of nested structures, or the same type, - /// if not a structure at all. Corresponds to the only possible unsized - /// field, and its type can be used to determine unsizing strategy. - pub fn struct_tail(&self, mut ty: Ty<'tcx>) -> Ty<'tcx> { - while let TyStruct(def, substs) = ty.sty { - match def.struct_variant().fields.last() { - Some(f) => ty = f.ty(self, substs), - None => break - } - } - ty - } - - /// Same as applying struct_tail on `source` and `target`, but only - /// keeps going as long as the two types are instances of the same - /// structure definitions. - /// For `(Foo>, Foo)`, the result will be `(Foo, Trait)`, - /// whereas struct_tail produces `T`, and `Trait`, respectively. - pub fn struct_lockstep_tails(&self, - source: Ty<'tcx>, - target: Ty<'tcx>) - -> (Ty<'tcx>, Ty<'tcx>) { - let (mut a, mut b) = (source, target); - while let (&TyStruct(a_def, a_substs), &TyStruct(b_def, b_substs)) = (&a.sty, &b.sty) { - if a_def != b_def { - break; - } - if let Some(f) = a_def.struct_variant().fields.last() { - a = f.ty(self, a_substs); - b = f.ty(self, b_substs); - } else { - break; - } - } - (a, b) - } - - /// Returns the repeat count for a repeating vector expression. - pub fn eval_repeat_count(&self, count_expr: &hir::Expr) -> usize { - let hint = UncheckedExprHint(self.types.usize); - match const_eval::eval_const_expr_partial(self, count_expr, hint, None) { - Ok(val) => { - let found = match val { - ConstVal::Uint(count) => return count as usize, - ConstVal::Int(count) if count >= 0 => return count as usize, - const_val => const_val.description(), - }; - span_err!(self.sess, count_expr.span, E0306, - "expected positive integer for repeat count, found {}", - found); - } - Err(err) => { - let err_msg = match count_expr.node { - hir::ExprPath(None, hir::Path { - global: false, - ref segments, - .. - }) if segments.len() == 1 => - format!("found variable"), - _ => match err.kind { - ErrKind::MiscCatchAll => format!("but found {}", err.description()), - _ => format!("but {}", err.description()) - } - }; - span_err!(self.sess, count_expr.span, E0307, - "expected constant integer for repeat count, {}", err_msg); - } - } - 0 - } - - /// Given a set of predicates that apply to an object type, returns - /// the region bounds that the (erased) `Self` type must - /// outlive. Precisely *because* the `Self` type is erased, the - /// parameter `erased_self_ty` must be supplied to indicate what type - /// has been used to represent `Self` in the predicates - /// themselves. This should really be a unique type; `FreshTy(0)` is a - /// popular choice. - /// - /// NB: in some cases, particularly around higher-ranked bounds, - /// this function returns a kind of conservative approximation. - /// That is, all regions returned by this function are definitely - /// required, but there may be other region bounds that are not - /// returned, as well as requirements like `for<'a> T: 'a`. - /// - /// Requires that trait definitions have been processed so that we can - /// elaborate predicates and walk supertraits. - pub fn required_region_bounds(&self, - erased_self_ty: Ty<'tcx>, - predicates: Vec>) - -> Vec { - debug!("required_region_bounds(erased_self_ty={:?}, predicates={:?})", - erased_self_ty, - predicates); - - assert!(!erased_self_ty.has_escaping_regions()); - - traits::elaborate_predicates(self, predicates) - .filter_map(|predicate| { - match predicate { - ty::Predicate::Projection(..) | - ty::Predicate::Trait(..) | - ty::Predicate::Equate(..) | - ty::Predicate::WellFormed(..) | - ty::Predicate::ObjectSafe(..) | - ty::Predicate::RegionOutlives(..) => { - None - } - ty::Predicate::TypeOutlives(ty::Binder(ty::OutlivesPredicate(t, r))) => { - // Search for a bound of the form `erased_self_ty - // : 'a`, but be wary of something like `for<'a> - // erased_self_ty : 'a` (we interpret a - // higher-ranked bound like that as 'static, - // though at present the code in `fulfill.rs` - // considers such bounds to be unsatisfiable, so - // it's kind of a moot point since you could never - // construct such an object, but this seems - // correct even if that code changes). - if t == erased_self_ty && !r.has_escaping_regions() { - Some(r) - } else { - None - } - } - } - }) - .collect() - } - - /// Creates a hash of the type `Ty` which will be the same no matter what crate - /// context it's calculated within. This is used by the `type_id` intrinsic. - pub fn hash_crate_independent(&self, ty: Ty<'tcx>, svh: &Svh) -> u64 { - let mut state = SipHasher::new(); - helper(self, ty, svh, &mut state); - return state.finish(); - - fn helper<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, svh: &Svh, - state: &mut SipHasher) { - macro_rules! byte { ($b:expr) => { ($b as u8).hash(state) } } - macro_rules! hash { ($e:expr) => { $e.hash(state) } } - - let region = |state: &mut SipHasher, r: ty::Region| { - match r { - ty::ReStatic => {} - ty::ReLateBound(db, ty::BrAnon(i)) => { - db.hash(state); - i.hash(state); - } - ty::ReEmpty | - ty::ReEarlyBound(..) | - ty::ReLateBound(..) | - ty::ReFree(..) | - ty::ReScope(..) | - ty::ReVar(..) | - ty::ReSkolemized(..) => { - tcx.sess.bug("unexpected region found when hashing a type") - } - } - }; - let did = |state: &mut SipHasher, did: DefId| { - let h = if did.is_local() { - svh.clone() - } else { - tcx.sess.cstore.crate_hash(did.krate) - }; - h.as_str().hash(state); - did.index.hash(state); - }; - let mt = |state: &mut SipHasher, mt: TypeAndMut| { - mt.mutbl.hash(state); - }; - let fn_sig = |state: &mut SipHasher, sig: &ty::Binder>| { - let sig = tcx.anonymize_late_bound_regions(sig).0; - for a in &sig.inputs { helper(tcx, *a, svh, state); } - if let ty::FnConverging(output) = sig.output { - helper(tcx, output, svh, state); - } - }; - ty.maybe_walk(|ty| { - match ty.sty { - TyBool => byte!(2), - TyChar => byte!(3), - TyInt(i) => { - byte!(4); - hash!(i); - } - TyUint(u) => { - byte!(5); - hash!(u); - } - TyFloat(f) => { - byte!(6); - hash!(f); - } - TyStr => { - byte!(7); - } - TyEnum(d, _) => { - byte!(8); - did(state, d.did); - } - TyBox(_) => { - byte!(9); - } - TyArray(_, n) => { - byte!(10); - n.hash(state); - } - TySlice(_) => { - byte!(11); - } - TyRawPtr(m) => { - byte!(12); - mt(state, m); - } - TyRef(r, m) => { - byte!(13); - region(state, *r); - mt(state, m); - } - TyBareFn(opt_def_id, ref b) => { - byte!(14); - hash!(opt_def_id); - hash!(b.unsafety); - hash!(b.abi); - fn_sig(state, &b.sig); - return false; - } - TyTrait(ref data) => { - byte!(17); - did(state, data.principal_def_id()); - hash!(data.bounds); - - let principal = tcx.anonymize_late_bound_regions(&data.principal).0; - for subty in &principal.substs.types { - helper(tcx, subty, svh, state); - } - - return false; - } - TyStruct(d, _) => { - byte!(18); - did(state, d.did); - } - TyTuple(ref inner) => { - byte!(19); - hash!(inner.len()); - } - TyParam(p) => { - byte!(20); - hash!(p.space); - hash!(p.idx); - hash!(p.name.as_str()); - } - TyInfer(_) => unreachable!(), - TyError => byte!(21), - TyClosure(d, _) => { - byte!(22); - did(state, d); - } - TyProjection(ref data) => { - byte!(23); - did(state, data.trait_ref.def_id); - hash!(data.item_name.as_str()); - } - } - true - }); - } - } - - /// Returns true if this ADT is a dtorck type. - /// - /// Invoking the destructor of a dtorck type during usual cleanup - /// (e.g. the glue emitted for stack unwinding) requires all - /// lifetimes in the type-structure of `adt` to strictly outlive - /// the adt value itself. - /// - /// If `adt` is not dtorck, then the adt's destructor can be - /// invoked even when there are lifetimes in the type-structure of - /// `adt` that do not strictly outlive the adt value itself. - /// (This allows programs to make cyclic structures without - /// resorting to unasfe means; see RFCs 769 and 1238). - pub fn is_adt_dtorck(&self, adt: ty::AdtDef<'tcx>) -> bool { - let dtor_method = match adt.destructor() { - Some(dtor) => dtor, - None => return false - }; - - // RFC 1238: if the destructor method is tagged with the - // attribute `unsafe_destructor_blind_to_params`, then the - // compiler is being instructed to *assume* that the - // destructor will not access borrowed data, - // even if such data is otherwise reachable. - // - // Such access can be in plain sight (e.g. dereferencing - // `*foo.0` of `Foo<'a>(&'a u32)`) or indirectly hidden - // (e.g. calling `foo.0.clone()` of `Foo`). - return !self.has_attr(dtor_method, "unsafe_destructor_blind_to_params"); - } -} - -#[derive(Debug)] -pub struct ImplMethod<'tcx> { - pub method: Rc>, - pub substs: Substs<'tcx>, - pub is_provided: bool -} - -impl<'tcx> ty::ctxt<'tcx> { - #[inline(never)] // is this perfy enough? - pub fn get_impl_method(&self, - impl_def_id: DefId, - substs: Substs<'tcx>, - name: Name) - -> ImplMethod<'tcx> - { - // there don't seem to be nicer accessors to these: - let impl_or_trait_items_map = self.impl_or_trait_items.borrow(); - - for impl_item in &self.impl_items.borrow()[&impl_def_id] { - if let ty::MethodTraitItem(ref meth) = - impl_or_trait_items_map[&impl_item.def_id()] { - if meth.name == name { - return ImplMethod { - method: meth.clone(), - substs: substs, - is_provided: false - } - } - } - } - - // It is not in the impl - get the default from the trait. - let trait_ref = self.impl_trait_ref(impl_def_id).unwrap(); - for trait_item in self.trait_items(trait_ref.def_id).iter() { - if let &ty::MethodTraitItem(ref meth) = trait_item { - if meth.name == name { - let impl_to_trait_substs = self - .make_substs_for_receiver_types(&trait_ref, meth); - return ImplMethod { - method: meth.clone(), - substs: impl_to_trait_substs.subst(self, &substs), - is_provided: true - } - } - } - } - - self.sess.bug(&format!("method {:?} not found in {:?}", - name, impl_def_id)) - } -} - -impl<'tcx> ty::TyS<'tcx> { - fn impls_bound<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>, - bound: ty::BuiltinBound, - span: Span) - -> bool - { - let tcx = param_env.tcx; - let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(param_env.clone())); - - let is_impld = traits::type_known_to_meet_builtin_bound(&infcx, - self, bound, span); - - debug!("Ty::impls_bound({:?}, {:?}) = {:?}", - self, bound, is_impld); - - is_impld - } - - // FIXME (@jroesch): I made this public to use it, not sure if should be private - pub fn moves_by_default<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>, - span: Span) -> bool { - if self.flags.get().intersects(TypeFlags::MOVENESS_CACHED) { - return self.flags.get().intersects(TypeFlags::MOVES_BY_DEFAULT); - } - - assert!(!self.needs_infer()); - - // Fast-path for primitive types - let result = match self.sty { - TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) | - TyRawPtr(..) | TyBareFn(..) | TyRef(_, TypeAndMut { - mutbl: hir::MutImmutable, .. - }) => Some(false), - - TyStr | TyBox(..) | TyRef(_, TypeAndMut { - mutbl: hir::MutMutable, .. - }) => Some(true), - - TyArray(..) | TySlice(_) | TyTrait(..) | TyTuple(..) | - TyClosure(..) | TyEnum(..) | TyStruct(..) | - TyProjection(..) | TyParam(..) | TyInfer(..) | TyError => None - }.unwrap_or_else(|| !self.impls_bound(param_env, ty::BoundCopy, span)); - - if !self.has_param_types() && !self.has_self_ty() { - self.flags.set(self.flags.get() | if result { - TypeFlags::MOVENESS_CACHED | TypeFlags::MOVES_BY_DEFAULT - } else { - TypeFlags::MOVENESS_CACHED - }); - } - - result - } - - #[inline] - pub fn is_sized<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>, - span: Span) -> bool - { - if self.flags.get().intersects(TypeFlags::SIZEDNESS_CACHED) { - return self.flags.get().intersects(TypeFlags::IS_SIZED); - } - - self.is_sized_uncached(param_env, span) - } - - fn is_sized_uncached<'a>(&'tcx self, param_env: &ParameterEnvironment<'a,'tcx>, - span: Span) -> bool { - assert!(!self.needs_infer()); - - // Fast-path for primitive types - let result = match self.sty { - TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) | - TyBox(..) | TyRawPtr(..) | TyRef(..) | TyBareFn(..) | - TyArray(..) | TyTuple(..) | TyClosure(..) => Some(true), - - TyStr | TyTrait(..) | TySlice(_) => Some(false), - - TyEnum(..) | TyStruct(..) | TyProjection(..) | TyParam(..) | - TyInfer(..) | TyError => None - }.unwrap_or_else(|| self.impls_bound(param_env, ty::BoundSized, span)); - - if !self.has_param_types() && !self.has_self_ty() { - self.flags.set(self.flags.get() | if result { - TypeFlags::SIZEDNESS_CACHED | TypeFlags::IS_SIZED - } else { - TypeFlags::SIZEDNESS_CACHED - }); - } - - result - } - - - /// Check whether a type is representable. This means it cannot contain unboxed - /// structural recursion. This check is needed for structs and enums. - pub fn is_representable(&'tcx self, cx: &ty::ctxt<'tcx>, sp: Span) -> Representability { - - // Iterate until something non-representable is found - fn find_nonrepresentable<'tcx, It: Iterator>>(cx: &ty::ctxt<'tcx>, - sp: Span, - seen: &mut Vec>, - iter: It) - -> Representability { - iter.fold(Representability::Representable, - |r, ty| cmp::max(r, is_type_structurally_recursive(cx, sp, seen, ty))) - } - - fn are_inner_types_recursive<'tcx>(cx: &ty::ctxt<'tcx>, sp: Span, - seen: &mut Vec>, ty: Ty<'tcx>) - -> Representability { - match ty.sty { - TyTuple(ref ts) => { - find_nonrepresentable(cx, sp, seen, ts.iter().cloned()) - } - // Fixed-length vectors. - // FIXME(#11924) Behavior undecided for zero-length vectors. - TyArray(ty, _) => { - is_type_structurally_recursive(cx, sp, seen, ty) - } - TyStruct(def, substs) | TyEnum(def, substs) => { - find_nonrepresentable(cx, - sp, - seen, - def.all_fields().map(|f| f.ty(cx, substs))) - } - TyClosure(..) => { - // this check is run on type definitions, so we don't expect - // to see closure types - cx.sess.bug(&format!("requires check invoked on inapplicable type: {:?}", ty)) - } - _ => Representability::Representable, - } - } - - fn same_struct_or_enum<'tcx>(ty: Ty<'tcx>, def: ty::AdtDef<'tcx>) -> bool { - match ty.sty { - TyStruct(ty_def, _) | TyEnum(ty_def, _) => { - ty_def == def - } - _ => false - } - } - - fn same_type<'tcx>(a: Ty<'tcx>, b: Ty<'tcx>) -> bool { - match (&a.sty, &b.sty) { - (&TyStruct(did_a, ref substs_a), &TyStruct(did_b, ref substs_b)) | - (&TyEnum(did_a, ref substs_a), &TyEnum(did_b, ref substs_b)) => { - if did_a != did_b { - return false; - } - - let types_a = substs_a.types.get_slice(subst::TypeSpace); - let types_b = substs_b.types.get_slice(subst::TypeSpace); - - let mut pairs = types_a.iter().zip(types_b); - - pairs.all(|(&a, &b)| same_type(a, b)) - } - _ => { - a == b - } - } - } - - // Does the type `ty` directly (without indirection through a pointer) - // contain any types on stack `seen`? - fn is_type_structurally_recursive<'tcx>(cx: &ty::ctxt<'tcx>, - sp: Span, - seen: &mut Vec>, - ty: Ty<'tcx>) -> Representability { - debug!("is_type_structurally_recursive: {:?}", ty); - - match ty.sty { - TyStruct(def, _) | TyEnum(def, _) => { - { - // Iterate through stack of previously seen types. - let mut iter = seen.iter(); - - // The first item in `seen` is the type we are actually curious about. - // We want to return SelfRecursive if this type contains itself. - // It is important that we DON'T take generic parameters into account - // for this check, so that Bar in this example counts as SelfRecursive: - // - // struct Foo; - // struct Bar { x: Bar } - - match iter.next() { - Some(&seen_type) => { - if same_struct_or_enum(seen_type, def) { - debug!("SelfRecursive: {:?} contains {:?}", - seen_type, - ty); - return Representability::SelfRecursive; - } - } - None => {} - } - - // We also need to know whether the first item contains other types - // that are structurally recursive. If we don't catch this case, we - // will recurse infinitely for some inputs. - // - // It is important that we DO take generic parameters into account - // here, so that code like this is considered SelfRecursive, not - // ContainsRecursive: - // - // struct Foo { Option> } - - for &seen_type in iter { - if same_type(ty, seen_type) { - debug!("ContainsRecursive: {:?} contains {:?}", - seen_type, - ty); - return Representability::ContainsRecursive; - } - } - } - - // For structs and enums, track all previously seen types by pushing them - // onto the 'seen' stack. - seen.push(ty); - let out = are_inner_types_recursive(cx, sp, seen, ty); - seen.pop(); - out - } - _ => { - // No need to push in other cases. - are_inner_types_recursive(cx, sp, seen, ty) - } - } - } - - debug!("is_type_representable: {:?}", self); - - // To avoid a stack overflow when checking an enum variant or struct that - // contains a different, structurally recursive type, maintain a stack - // of seen types and check recursion for each of them (issues #3008, #3779). - let mut seen: Vec = Vec::new(); - let r = is_type_structurally_recursive(cx, sp, &mut seen, self); - debug!("is_type_representable: {:?} is {:?}", self, r); - r - } -} diff --git a/src/librustc/middle/ty/walk.rs b/src/librustc/middle/ty/walk.rs deleted file mode 100644 index 81cad4486904b..0000000000000 --- a/src/librustc/middle/ty/walk.rs +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! An iterator over the type substructure. -//! WARNING: this does not keep track of the region depth. - -use middle::ty::{self, Ty}; -use std::iter::Iterator; -use std::vec::IntoIter; - -pub struct TypeWalker<'tcx> { - stack: Vec>, - last_subtree: usize, -} - -impl<'tcx> TypeWalker<'tcx> { - pub fn new(ty: Ty<'tcx>) -> TypeWalker<'tcx> { - TypeWalker { stack: vec!(ty), last_subtree: 1, } - } - - /// Skips the subtree of types corresponding to the last type - /// returned by `next()`. - /// - /// Example: Imagine you are walking `Foo, usize>`. - /// - /// ``` - /// let mut iter: TypeWalker = ...; - /// iter.next(); // yields Foo - /// iter.next(); // yields Bar - /// iter.skip_current_subtree(); // skips int - /// iter.next(); // yields usize - /// ``` - pub fn skip_current_subtree(&mut self) { - self.stack.truncate(self.last_subtree); - } -} - -impl<'tcx> Iterator for TypeWalker<'tcx> { - type Item = Ty<'tcx>; - - fn next(&mut self) -> Option> { - debug!("next(): stack={:?}", self.stack); - match self.stack.pop() { - None => { - return None; - } - Some(ty) => { - self.last_subtree = self.stack.len(); - push_subtypes(&mut self.stack, ty); - debug!("next: stack={:?}", self.stack); - Some(ty) - } - } - } -} - -pub fn walk_shallow<'tcx>(ty: Ty<'tcx>) -> IntoIter> { - let mut stack = vec![]; - push_subtypes(&mut stack, ty); - stack.into_iter() -} - -fn push_subtypes<'tcx>(stack: &mut Vec>, parent_ty: Ty<'tcx>) { - match parent_ty.sty { - ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | - ty::TyStr | ty::TyInfer(_) | ty::TyParam(_) | ty::TyError => { - } - ty::TyBox(ty) | ty::TyArray(ty, _) | ty::TySlice(ty) => { - stack.push(ty); - } - ty::TyRawPtr(ref mt) | ty::TyRef(_, ref mt) => { - stack.push(mt.ty); - } - ty::TyProjection(ref data) => { - push_reversed(stack, data.trait_ref.substs.types.as_slice()); - } - ty::TyTrait(box ty::TraitTy { ref principal, ref bounds }) => { - push_reversed(stack, principal.substs().types.as_slice()); - push_reversed(stack, &bounds.projection_bounds.iter().map(|pred| { - pred.0.ty - }).collect::>()); - } - ty::TyEnum(_, ref substs) | - ty::TyStruct(_, ref substs) => { - push_reversed(stack, substs.types.as_slice()); - } - ty::TyClosure(_, ref substs) => { - push_reversed(stack, substs.func_substs.types.as_slice()); - push_reversed(stack, &substs.upvar_tys); - } - ty::TyTuple(ref ts) => { - push_reversed(stack, ts); - } - ty::TyBareFn(_, ref ft) => { - push_sig_subtypes(stack, &ft.sig); - } - } -} - -fn push_sig_subtypes<'tcx>(stack: &mut Vec>, sig: &ty::PolyFnSig<'tcx>) { - match sig.0.output { - ty::FnConverging(output) => { stack.push(output); } - ty::FnDiverging => { } - } - push_reversed(stack, &sig.0.inputs); -} - -fn push_reversed<'tcx>(stack: &mut Vec>, tys: &[Ty<'tcx>]) { - // We push slices on the stack in reverse order so as to - // maintain a pre-order traversal. As of the time of this - // writing, the fact that the traversal is pre-order is not - // known to be significant to any code, but it seems like the - // natural order one would expect (basically, the order of the - // types as they are written). - for &ty in tys.iter().rev() { - stack.push(ty); - } -} diff --git a/src/librustc/middle/weak_lang_items.rs b/src/librustc/middle/weak_lang_items.rs index 6059d7ee74e39..c6df1497e681d 100644 --- a/src/librustc/middle/weak_lang_items.rs +++ b/src/librustc/middle/weak_lang_items.rs @@ -12,15 +12,15 @@ use session::config; use session::Session; -use middle::cstore::CrateStore; use middle::lang_items; +use rustc_back::PanicStrategy; use syntax::ast; -use syntax::codemap::Span; -use syntax::parse::token::InternedString; -use rustc_front::intravisit::Visitor; -use rustc_front::intravisit; -use rustc_front::hir; +use syntax::symbol::Symbol; +use syntax_pos::Span; +use hir::intravisit::{Visitor, NestedVisitorMap}; +use hir::intravisit; +use hir; use std::collections::HashSet; @@ -50,15 +50,15 @@ pub fn check_crate(krate: &hir::Crate, { let mut cx = Context { sess: sess, items: items }; - krate.visit_all_items(&mut cx); + krate.visit_all_item_likes(&mut cx.as_deep_visitor()); } verify(sess, items); } -pub fn link_name(attrs: &[ast::Attribute]) -> Option { +pub fn link_name(attrs: &[ast::Attribute]) -> Option { lang_items::extract(attrs).and_then(|name| { - $(if &name[..] == stringify!($name) { - Some(InternedString::new(stringify!($sym))) + $(if name == stringify!($name) { + Some(Symbol::intern(stringify!($sym))) } else)* { None } @@ -71,12 +71,17 @@ fn verify(sess: &Session, items: &lang_items::LanguageItems) { let needs_check = sess.crate_types.borrow().iter().any(|kind| { match *kind { config::CrateTypeDylib | + config::CrateTypeProcMacro | + config::CrateTypeCdylib | config::CrateTypeExecutable | config::CrateTypeStaticlib => true, - config::CrateTypeRlib => false, + config::CrateTypeRlib | + config::CrateTypeMetadata => false, } }); - if !needs_check { return } + if !needs_check { + return + } let mut missing = HashSet::new(); for cnum in sess.cstore.crates() { @@ -85,8 +90,19 @@ fn verify(sess: &Session, items: &lang_items::LanguageItems) { } } + // If we're not compiling with unwinding, we won't actually need these + // symbols. Other panic runtimes ensure that the relevant symbols are + // available to link things together, but they're never exercised. + let mut whitelisted = HashSet::new(); + if sess.panic_strategy() != PanicStrategy::Unwind { + whitelisted.insert(lang_items::EhPersonalityLangItem); + whitelisted.insert(lang_items::EhUnwindResumeLangItem); + } + $( - if missing.contains(&lang_items::$item) && items.$name().is_none() { + if missing.contains(&lang_items::$item) && + !whitelisted.contains(&lang_items::$item) && + items.$name().is_none() { sess.err(&format!("language item required, but not found: `{}`", stringify!($name))); @@ -109,10 +125,13 @@ impl<'a> Context<'a> { } impl<'a, 'v> Visitor<'v> for Context<'a> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> { + NestedVisitorMap::None + } + fn visit_foreign_item(&mut self, i: &hir::ForeignItem) { - match lang_items::extract(&i.attrs) { - None => {} - Some(lang_item) => self.register(&lang_item, i.span), + if let Some(lang_item) = lang_items::extract(&i.attrs) { + self.register(&lang_item.as_str(), i.span); } intravisit::walk_foreign_item(self, i) } diff --git a/src/librustc/mir/cache.rs b/src/librustc/mir/cache.rs new file mode 100644 index 0000000000000..bc9bbebb1796a --- /dev/null +++ b/src/librustc/mir/cache.rs @@ -0,0 +1,69 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::cell::{Ref, RefCell}; +use rustc_data_structures::indexed_vec::IndexVec; + +use mir::{Mir, BasicBlock}; + +use rustc_serialize as serialize; + +#[derive(Clone, Debug)] +pub struct Cache { + predecessors: RefCell>>> +} + + +impl serialize::Encodable for Cache { + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + serialize::Encodable::encode(&(), s) + } +} + +impl serialize::Decodable for Cache { + fn decode(d: &mut D) -> Result { + serialize::Decodable::decode(d).map(|_v: ()| Self::new()) + } +} + + +impl Cache { + pub fn new() -> Self { + Cache { + predecessors: RefCell::new(None) + } + } + + pub fn invalidate(&self) { + // FIXME: consider being more fine-grained + *self.predecessors.borrow_mut() = None; + } + + pub fn predecessors(&self, mir: &Mir) -> Ref>> { + if self.predecessors.borrow().is_none() { + *self.predecessors.borrow_mut() = Some(calculate_predecessors(mir)); + } + + Ref::map(self.predecessors.borrow(), |p| p.as_ref().unwrap()) + } +} + +fn calculate_predecessors(mir: &Mir) -> IndexVec> { + let mut result = IndexVec::from_elem(vec![], mir.basic_blocks()); + for (bb, data) in mir.basic_blocks().iter_enumerated() { + if let Some(ref term) = data.terminator { + for &tgt in term.successors().iter() { + result[tgt].push(bb); + } + } + } + + result +} diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs new file mode 100644 index 0000000000000..3cd3580473292 --- /dev/null +++ b/src/librustc/mir/mod.rs @@ -0,0 +1,1339 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use graphviz::IntoCow; +use middle::const_val::ConstVal; +use rustc_const_math::{ConstUsize, ConstInt, ConstMathErr}; +use rustc_data_structures::indexed_vec::{IndexVec, Idx}; +use rustc_data_structures::control_flow_graph::dominators::{Dominators, dominators}; +use rustc_data_structures::control_flow_graph::{GraphPredecessors, GraphSuccessors}; +use rustc_data_structures::control_flow_graph::ControlFlowGraph; +use hir::def::CtorKind; +use hir::def_id::DefId; +use ty::subst::Substs; +use ty::{self, AdtDef, ClosureSubsts, Region, Ty}; +use util::ppaux; +use rustc_back::slice; +use hir::InlineAsm; +use std::ascii; +use std::borrow::{Cow}; +use std::cell::Ref; +use std::fmt::{self, Debug, Formatter, Write}; +use std::{iter, u32}; +use std::ops::{Index, IndexMut}; +use std::vec::IntoIter; +use syntax::ast::{self, Name}; +use syntax_pos::Span; + +mod cache; +pub mod tcx; +pub mod visit; +pub mod transform; +pub mod traversal; + +macro_rules! newtype_index { + ($name:ident, $debug_name:expr) => ( + #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, + RustcEncodable, RustcDecodable)] + pub struct $name(u32); + + impl Idx for $name { + fn new(value: usize) -> Self { + assert!(value < (u32::MAX) as usize); + $name(value as u32) + } + fn index(self) -> usize { + self.0 as usize + } + } + + impl Debug for $name { + fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + write!(fmt, "{}{}", $debug_name, self.0) + } + } + ) +} + +/// Lowered representation of a single function. +// Do not implement clone for Mir, its easy to do so accidently and its kind of expensive. +#[derive(RustcEncodable, RustcDecodable, Debug)] +pub struct Mir<'tcx> { + /// List of basic blocks. References to basic block use a newtyped index type `BasicBlock` + /// that indexes into this vector. + basic_blocks: IndexVec>, + + /// List of visibility (lexical) scopes; these are referenced by statements + /// and used (eventually) for debuginfo. Indexed by a `VisibilityScope`. + pub visibility_scopes: IndexVec, + + /// Rvalues promoted from this function, such as borrows of constants. + /// Each of them is the Mir of a constant with the fn's type parameters + /// in scope, but a separate set of locals. + pub promoted: IndexVec>, + + /// Return type of the function. + pub return_ty: Ty<'tcx>, + + /// Declarations of locals. + /// + /// The first local is the return value pointer, followed by `arg_count` + /// locals for the function arguments, followed by any user-declared + /// variables and temporaries. + pub local_decls: IndexVec>, + + /// Number of arguments this function takes. + /// + /// Starting at local 1, `arg_count` locals will be provided by the caller + /// and can be assumed to be initialized. + /// + /// If this MIR was built for a constant, this will be 0. + pub arg_count: usize, + + /// Names and capture modes of all the closure upvars, assuming + /// the first argument is either the closure or a reference to it. + pub upvar_decls: Vec, + + /// Mark an argument local (which must be a tuple) as getting passed as + /// its individual components at the LLVM level. + /// + /// This is used for the "rust-call" ABI. + pub spread_arg: Option, + + /// A span representing this MIR, for error reporting + pub span: Span, + + /// A cache for various calculations + cache: cache::Cache +} + +/// where execution begins +pub const START_BLOCK: BasicBlock = BasicBlock(0); + +impl<'tcx> Mir<'tcx> { + pub fn new(basic_blocks: IndexVec>, + visibility_scopes: IndexVec, + promoted: IndexVec>, + return_ty: Ty<'tcx>, + local_decls: IndexVec>, + arg_count: usize, + upvar_decls: Vec, + span: Span) -> Self + { + // We need `arg_count` locals, and one for the return pointer + assert!(local_decls.len() >= arg_count + 1, + "expected at least {} locals, got {}", arg_count + 1, local_decls.len()); + assert_eq!(local_decls[RETURN_POINTER].ty, return_ty); + + Mir { + basic_blocks: basic_blocks, + visibility_scopes: visibility_scopes, + promoted: promoted, + return_ty: return_ty, + local_decls: local_decls, + arg_count: arg_count, + upvar_decls: upvar_decls, + spread_arg: None, + span: span, + cache: cache::Cache::new() + } + } + + #[inline] + pub fn basic_blocks(&self) -> &IndexVec> { + &self.basic_blocks + } + + #[inline] + pub fn basic_blocks_mut(&mut self) -> &mut IndexVec> { + self.cache.invalidate(); + &mut self.basic_blocks + } + + #[inline] + pub fn predecessors(&self) -> Ref>> { + self.cache.predecessors(self) + } + + #[inline] + pub fn predecessors_for(&self, bb: BasicBlock) -> Ref> { + Ref::map(self.predecessors(), |p| &p[bb]) + } + + #[inline] + pub fn dominators(&self) -> Dominators { + dominators(self) + } + + #[inline] + pub fn local_kind(&self, local: Local) -> LocalKind { + let index = local.0 as usize; + if index == 0 { + debug_assert!(self.local_decls[local].mutability == Mutability::Mut, + "return pointer should be mutable"); + + LocalKind::ReturnPointer + } else if index < self.arg_count + 1 { + LocalKind::Arg + } else if self.local_decls[local].name.is_some() { + LocalKind::Var + } else { + debug_assert!(self.local_decls[local].mutability == Mutability::Mut, + "temp should be mutable"); + + LocalKind::Temp + } + } + + /// Returns an iterator over all temporaries. + #[inline] + pub fn temps_iter<'a>(&'a self) -> impl Iterator + 'a { + (self.arg_count+1..self.local_decls.len()).filter_map(move |index| { + let local = Local::new(index); + if self.local_decls[local].source_info.is_none() { + Some(local) + } else { + None + } + }) + } + + /// Returns an iterator over all user-declared locals. + #[inline] + pub fn vars_iter<'a>(&'a self) -> impl Iterator + 'a { + (self.arg_count+1..self.local_decls.len()).filter_map(move |index| { + let local = Local::new(index); + if self.local_decls[local].source_info.is_none() { + None + } else { + Some(local) + } + }) + } + + /// Returns an iterator over all function arguments. + #[inline] + pub fn args_iter(&self) -> impl Iterator { + let arg_count = self.arg_count; + (1..arg_count+1).map(Local::new) + } + + /// Returns an iterator over all user-defined variables and compiler-generated temporaries (all + /// locals that are neither arguments nor the return pointer). + #[inline] + pub fn vars_and_temps_iter(&self) -> impl Iterator { + let arg_count = self.arg_count; + let local_count = self.local_decls.len(); + (arg_count+1..local_count).map(Local::new) + } + + /// Changes a statement to a nop. This is both faster than deleting instructions and avoids + /// invalidating statement indices in `Location`s. + pub fn make_statement_nop(&mut self, location: Location) { + let block = &mut self[location.block]; + debug_assert!(location.statement_index < block.statements.len()); + block.statements[location.statement_index].make_nop() + } +} + +impl<'tcx> Index for Mir<'tcx> { + type Output = BasicBlockData<'tcx>; + + #[inline] + fn index(&self, index: BasicBlock) -> &BasicBlockData<'tcx> { + &self.basic_blocks()[index] + } +} + +impl<'tcx> IndexMut for Mir<'tcx> { + #[inline] + fn index_mut(&mut self, index: BasicBlock) -> &mut BasicBlockData<'tcx> { + &mut self.basic_blocks_mut()[index] + } +} + +/// Grouped information about the source code origin of a MIR entity. +/// Intended to be inspected by diagnostics and debuginfo. +/// Most passes can work with it as a whole, within a single function. +#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] +pub struct SourceInfo { + /// Source span for the AST pertaining to this MIR entity. + pub span: Span, + + /// The lexical visibility scope, i.e. which bindings can be seen. + pub scope: VisibilityScope +} + +/////////////////////////////////////////////////////////////////////////// +// Mutability and borrow kinds + +#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] +pub enum Mutability { + Mut, + Not, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] +pub enum BorrowKind { + /// Data must be immutable and is aliasable. + Shared, + + /// Data must be immutable but not aliasable. This kind of borrow + /// cannot currently be expressed by the user and is used only in + /// implicit closure bindings. It is needed when you the closure + /// is borrowing or mutating a mutable referent, e.g.: + /// + /// let x: &mut isize = ...; + /// let y = || *x += 5; + /// + /// If we were to try to translate this closure into a more explicit + /// form, we'd encounter an error with the code as written: + /// + /// struct Env { x: & &mut isize } + /// let x: &mut isize = ...; + /// let y = (&mut Env { &x }, fn_ptr); // Closure is pair of env and fn + /// fn fn_ptr(env: &mut Env) { **env.x += 5; } + /// + /// This is then illegal because you cannot mutate a `&mut` found + /// in an aliasable location. To solve, you'd have to translate with + /// an `&mut` borrow: + /// + /// struct Env { x: & &mut isize } + /// let x: &mut isize = ...; + /// let y = (&mut Env { &mut x }, fn_ptr); // changed from &x to &mut x + /// fn fn_ptr(env: &mut Env) { **env.x += 5; } + /// + /// Now the assignment to `**env.x` is legal, but creating a + /// mutable pointer to `x` is not because `x` is not mutable. We + /// could fix this by declaring `x` as `let mut x`. This is ok in + /// user code, if awkward, but extra weird for closures, since the + /// borrow is hidden. + /// + /// So we introduce a "unique imm" borrow -- the referent is + /// immutable, but not aliasable. This solves the problem. For + /// simplicity, we don't give users the way to express this + /// borrow, it's just used when translating closures. + Unique, + + /// Data is mutable and not aliasable. + Mut, +} + +/////////////////////////////////////////////////////////////////////////// +// Variables and temps + +newtype_index!(Local, "_"); + +pub const RETURN_POINTER: Local = Local(0); + +/// Classifies locals into categories. See `Mir::local_kind`. +#[derive(PartialEq, Eq, Debug)] +pub enum LocalKind { + /// User-declared variable binding + Var, + /// Compiler-introduced temporary + Temp, + /// Function argument + Arg, + /// Location of function's return value + ReturnPointer, +} + +/// A MIR local. +/// +/// This can be a binding declared by the user, a temporary inserted by the compiler, a function +/// argument, or the return pointer. +#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +pub struct LocalDecl<'tcx> { + /// `let mut x` vs `let x`. + /// + /// Temporaries and the return pointer are always mutable. + pub mutability: Mutability, + + /// Type of this local. + pub ty: Ty<'tcx>, + + /// Name of the local, used in debuginfo and pretty-printing. + /// + /// Note that function arguments can also have this set to `Some(_)` + /// to generate better debuginfo. + pub name: Option, + + /// For user-declared variables, stores their source information. + /// + /// For temporaries, this is `None`. + /// + /// This is the primary way to differentiate between user-declared + /// variables and compiler-generated temporaries. + pub source_info: Option, +} + +impl<'tcx> LocalDecl<'tcx> { + /// Create a new `LocalDecl` for a temporary. + #[inline] + pub fn new_temp(ty: Ty<'tcx>) -> Self { + LocalDecl { + mutability: Mutability::Mut, + ty: ty, + name: None, + source_info: None, + } + } + + /// Builds a `LocalDecl` for the return pointer. + /// + /// This must be inserted into the `local_decls` list as the first local. + #[inline] + pub fn new_return_pointer(return_ty: Ty) -> LocalDecl { + LocalDecl { + mutability: Mutability::Mut, + ty: return_ty, + source_info: None, + name: None, // FIXME maybe we do want some name here? + } + } +} + +/// A closure capture, with its name and mode. +#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +pub struct UpvarDecl { + pub debug_name: Name, + + /// If true, the capture is behind a reference. + pub by_ref: bool +} + +/////////////////////////////////////////////////////////////////////////// +// BasicBlock + +newtype_index!(BasicBlock, "bb"); + +/////////////////////////////////////////////////////////////////////////// +// BasicBlockData and Terminator + +#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +pub struct BasicBlockData<'tcx> { + /// List of statements in this block. + pub statements: Vec>, + + /// Terminator for this block. + /// + /// NB. This should generally ONLY be `None` during construction. + /// Therefore, you should generally access it via the + /// `terminator()` or `terminator_mut()` methods. The only + /// exception is that certain passes, such as `simplify_cfg`, swap + /// out the terminator temporarily with `None` while they continue + /// to recurse over the set of basic blocks. + pub terminator: Option>, + + /// If true, this block lies on an unwind path. This is used + /// during trans where distinct kinds of basic blocks may be + /// generated (particularly for MSVC cleanup). Unwind blocks must + /// only branch to other unwind blocks. + pub is_cleanup: bool, +} + +#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +pub struct Terminator<'tcx> { + pub source_info: SourceInfo, + pub kind: TerminatorKind<'tcx> +} + +#[derive(Clone, RustcEncodable, RustcDecodable)] +pub enum TerminatorKind<'tcx> { + /// block should have one successor in the graph; we jump there + Goto { + target: BasicBlock, + }, + + /// jump to branch 0 if this lvalue evaluates to true + If { + cond: Operand<'tcx>, + targets: (BasicBlock, BasicBlock), + }, + + /// lvalue evaluates to some enum; jump depending on the branch + Switch { + discr: Lvalue<'tcx>, + adt_def: &'tcx AdtDef, + targets: Vec, + }, + + /// operand evaluates to an integer; jump depending on its value + /// to one of the targets, and otherwise fallback to `otherwise` + SwitchInt { + /// discriminant value being tested + discr: Lvalue<'tcx>, + + /// type of value being tested + switch_ty: Ty<'tcx>, + + /// Possible values. The locations to branch to in each case + /// are found in the corresponding indices from the `targets` vector. + values: Vec, + + /// Possible branch sites. The length of this vector should be + /// equal to the length of the `values` vector plus 1 -- the + /// extra item is the block to branch to if none of the values + /// fit. + targets: Vec, + }, + + /// Indicates that the landing pad is finished and unwinding should + /// continue. Emitted by build::scope::diverge_cleanup. + Resume, + + /// Indicates a normal return. The return pointer lvalue should + /// have been filled in by now. This should occur at most once. + Return, + + /// Indicates a terminator that can never be reached. + Unreachable, + + /// Drop the Lvalue + Drop { + location: Lvalue<'tcx>, + target: BasicBlock, + unwind: Option + }, + + /// Drop the Lvalue and assign the new value over it + DropAndReplace { + location: Lvalue<'tcx>, + value: Operand<'tcx>, + target: BasicBlock, + unwind: Option, + }, + + /// Block ends with a call of a converging function + Call { + /// The function that’s being called + func: Operand<'tcx>, + /// Arguments the function is called with + args: Vec>, + /// Destination for the return value. If some, the call is converging. + destination: Option<(Lvalue<'tcx>, BasicBlock)>, + /// Cleanups to be done if the call unwinds. + cleanup: Option + }, + + /// Jump to the target if the condition has the expected value, + /// otherwise panic with a message and a cleanup target. + Assert { + cond: Operand<'tcx>, + expected: bool, + msg: AssertMessage<'tcx>, + target: BasicBlock, + cleanup: Option + } +} + +impl<'tcx> Terminator<'tcx> { + pub fn successors(&self) -> Cow<[BasicBlock]> { + self.kind.successors() + } + + pub fn successors_mut(&mut self) -> Vec<&mut BasicBlock> { + self.kind.successors_mut() + } +} + +impl<'tcx> TerminatorKind<'tcx> { + pub fn successors(&self) -> Cow<[BasicBlock]> { + use self::TerminatorKind::*; + match *self { + Goto { target: ref b } => slice::ref_slice(b).into_cow(), + If { targets: (b1, b2), .. } => vec![b1, b2].into_cow(), + Switch { targets: ref b, .. } => b[..].into_cow(), + SwitchInt { targets: ref b, .. } => b[..].into_cow(), + Resume => (&[]).into_cow(), + Return => (&[]).into_cow(), + Unreachable => (&[]).into_cow(), + Call { destination: Some((_, t)), cleanup: Some(c), .. } => vec![t, c].into_cow(), + Call { destination: Some((_, ref t)), cleanup: None, .. } => + slice::ref_slice(t).into_cow(), + Call { destination: None, cleanup: Some(ref c), .. } => slice::ref_slice(c).into_cow(), + Call { destination: None, cleanup: None, .. } => (&[]).into_cow(), + DropAndReplace { target, unwind: Some(unwind), .. } | + Drop { target, unwind: Some(unwind), .. } => { + vec![target, unwind].into_cow() + } + DropAndReplace { ref target, unwind: None, .. } | + Drop { ref target, unwind: None, .. } => { + slice::ref_slice(target).into_cow() + } + Assert { target, cleanup: Some(unwind), .. } => vec![target, unwind].into_cow(), + Assert { ref target, .. } => slice::ref_slice(target).into_cow(), + } + } + + // FIXME: no mootable cow. I’m honestly not sure what a “cow” between `&mut [BasicBlock]` and + // `Vec<&mut BasicBlock>` would look like in the first place. + pub fn successors_mut(&mut self) -> Vec<&mut BasicBlock> { + use self::TerminatorKind::*; + match *self { + Goto { target: ref mut b } => vec![b], + If { targets: (ref mut b1, ref mut b2), .. } => vec![b1, b2], + Switch { targets: ref mut b, .. } => b.iter_mut().collect(), + SwitchInt { targets: ref mut b, .. } => b.iter_mut().collect(), + Resume => Vec::new(), + Return => Vec::new(), + Unreachable => Vec::new(), + Call { destination: Some((_, ref mut t)), cleanup: Some(ref mut c), .. } => vec![t, c], + Call { destination: Some((_, ref mut t)), cleanup: None, .. } => vec![t], + Call { destination: None, cleanup: Some(ref mut c), .. } => vec![c], + Call { destination: None, cleanup: None, .. } => vec![], + DropAndReplace { ref mut target, unwind: Some(ref mut unwind), .. } | + Drop { ref mut target, unwind: Some(ref mut unwind), .. } => vec![target, unwind], + DropAndReplace { ref mut target, unwind: None, .. } | + Drop { ref mut target, unwind: None, .. } => { + vec![target] + } + Assert { ref mut target, cleanup: Some(ref mut unwind), .. } => vec![target, unwind], + Assert { ref mut target, .. } => vec![target] + } + } +} + +impl<'tcx> BasicBlockData<'tcx> { + pub fn new(terminator: Option>) -> BasicBlockData<'tcx> { + BasicBlockData { + statements: vec![], + terminator: terminator, + is_cleanup: false, + } + } + + /// Accessor for terminator. + /// + /// Terminator may not be None after construction of the basic block is complete. This accessor + /// provides a convenience way to reach the terminator. + pub fn terminator(&self) -> &Terminator<'tcx> { + self.terminator.as_ref().expect("invalid terminator state") + } + + pub fn terminator_mut(&mut self) -> &mut Terminator<'tcx> { + self.terminator.as_mut().expect("invalid terminator state") + } +} + +impl<'tcx> Debug for TerminatorKind<'tcx> { + fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + self.fmt_head(fmt)?; + let successors = self.successors(); + let labels = self.fmt_successor_labels(); + assert_eq!(successors.len(), labels.len()); + + match successors.len() { + 0 => Ok(()), + + 1 => write!(fmt, " -> {:?}", successors[0]), + + _ => { + write!(fmt, " -> [")?; + for (i, target) in successors.iter().enumerate() { + if i > 0 { + write!(fmt, ", ")?; + } + write!(fmt, "{}: {:?}", labels[i], target)?; + } + write!(fmt, "]") + } + + } + } +} + +impl<'tcx> TerminatorKind<'tcx> { + /// Write the "head" part of the terminator; that is, its name and the data it uses to pick the + /// successor basic block, if any. The only information not inlcuded is the list of possible + /// successors, which may be rendered differently between the text and the graphviz format. + pub fn fmt_head(&self, fmt: &mut W) -> fmt::Result { + use self::TerminatorKind::*; + match *self { + Goto { .. } => write!(fmt, "goto"), + If { cond: ref lv, .. } => write!(fmt, "if({:?})", lv), + Switch { discr: ref lv, .. } => write!(fmt, "switch({:?})", lv), + SwitchInt { discr: ref lv, .. } => write!(fmt, "switchInt({:?})", lv), + Return => write!(fmt, "return"), + Resume => write!(fmt, "resume"), + Unreachable => write!(fmt, "unreachable"), + Drop { ref location, .. } => write!(fmt, "drop({:?})", location), + DropAndReplace { ref location, ref value, .. } => + write!(fmt, "replace({:?} <- {:?})", location, value), + Call { ref func, ref args, ref destination, .. } => { + if let Some((ref destination, _)) = *destination { + write!(fmt, "{:?} = ", destination)?; + } + write!(fmt, "{:?}(", func)?; + for (index, arg) in args.iter().enumerate() { + if index > 0 { + write!(fmt, ", ")?; + } + write!(fmt, "{:?}", arg)?; + } + write!(fmt, ")") + } + Assert { ref cond, expected, ref msg, .. } => { + write!(fmt, "assert(")?; + if !expected { + write!(fmt, "!")?; + } + write!(fmt, "{:?}, ", cond)?; + + match *msg { + AssertMessage::BoundsCheck { ref len, ref index } => { + write!(fmt, "{:?}, {:?}, {:?}", + "index out of bounds: the len is {} but the index is {}", + len, index)?; + } + AssertMessage::Math(ref err) => { + write!(fmt, "{:?}", err.description())?; + } + } + + write!(fmt, ")") + } + } + } + + /// Return the list of labels for the edges to the successor basic blocks. + pub fn fmt_successor_labels(&self) -> Vec> { + use self::TerminatorKind::*; + match *self { + Return | Resume | Unreachable => vec![], + Goto { .. } => vec!["".into()], + If { .. } => vec!["true".into(), "false".into()], + Switch { ref adt_def, .. } => { + adt_def.variants + .iter() + .map(|variant| variant.name.to_string().into()) + .collect() + } + SwitchInt { ref values, .. } => { + values.iter() + .map(|const_val| { + let mut buf = String::new(); + fmt_const_val(&mut buf, const_val).unwrap(); + buf.into() + }) + .chain(iter::once(String::from("otherwise").into())) + .collect() + } + Call { destination: Some(_), cleanup: Some(_), .. } => + vec!["return".into_cow(), "unwind".into_cow()], + Call { destination: Some(_), cleanup: None, .. } => vec!["return".into_cow()], + Call { destination: None, cleanup: Some(_), .. } => vec!["unwind".into_cow()], + Call { destination: None, cleanup: None, .. } => vec![], + DropAndReplace { unwind: None, .. } | + Drop { unwind: None, .. } => vec!["return".into_cow()], + DropAndReplace { unwind: Some(_), .. } | + Drop { unwind: Some(_), .. } => { + vec!["return".into_cow(), "unwind".into_cow()] + } + Assert { cleanup: None, .. } => vec!["".into()], + Assert { .. } => + vec!["success".into_cow(), "unwind".into_cow()] + } + } +} + +#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +pub enum AssertMessage<'tcx> { + BoundsCheck { + len: Operand<'tcx>, + index: Operand<'tcx> + }, + Math(ConstMathErr) +} + +/////////////////////////////////////////////////////////////////////////// +// Statements + +#[derive(Clone, RustcEncodable, RustcDecodable)] +pub struct Statement<'tcx> { + pub source_info: SourceInfo, + pub kind: StatementKind<'tcx>, +} + +impl<'tcx> Statement<'tcx> { + /// Changes a statement to a nop. This is both faster than deleting instructions and avoids + /// invalidating statement indices in `Location`s. + pub fn make_nop(&mut self) { + self.kind = StatementKind::Nop + } +} + +#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +pub enum StatementKind<'tcx> { + /// Write the RHS Rvalue to the LHS Lvalue. + Assign(Lvalue<'tcx>, Rvalue<'tcx>), + + /// Write the discriminant for a variant to the enum Lvalue. + SetDiscriminant { lvalue: Lvalue<'tcx>, variant_index: usize }, + + /// Start a live range for the storage of the local. + StorageLive(Lvalue<'tcx>), + + /// End the current live range for the storage of the local. + StorageDead(Lvalue<'tcx>), + + /// No-op. Useful for deleting instructions without affecting statement indices. + Nop, +} + +impl<'tcx> Debug for Statement<'tcx> { + fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + use self::StatementKind::*; + match self.kind { + Assign(ref lv, ref rv) => write!(fmt, "{:?} = {:?}", lv, rv), + StorageLive(ref lv) => write!(fmt, "StorageLive({:?})", lv), + StorageDead(ref lv) => write!(fmt, "StorageDead({:?})", lv), + SetDiscriminant{lvalue: ref lv, variant_index: index} => { + write!(fmt, "discriminant({:?}) = {:?}", lv, index) + } + Nop => write!(fmt, "nop"), + } + } +} + +/////////////////////////////////////////////////////////////////////////// +// Lvalues + +/// A path to a value; something that can be evaluated without +/// changing or disturbing program state. +#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable)] +pub enum Lvalue<'tcx> { + /// local variable + Local(Local), + + /// static or static mut variable + Static(DefId), + + /// projection out of an lvalue (access a field, deref a pointer, etc) + Projection(Box>), +} + +/// The `Projection` data structure defines things of the form `B.x` +/// or `*B` or `B[index]`. Note that it is parameterized because it is +/// shared between `Constant` and `Lvalue`. See the aliases +/// `LvalueProjection` etc below. +#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct Projection<'tcx, B, V> { + pub base: B, + pub elem: ProjectionElem<'tcx, V>, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub enum ProjectionElem<'tcx, V> { + Deref, + Field(Field, Ty<'tcx>), + Index(V), + + /// These indices are generated by slice patterns. Easiest to explain + /// by example: + /// + /// ``` + /// [X, _, .._, _, _] => { offset: 0, min_length: 4, from_end: false }, + /// [_, X, .._, _, _] => { offset: 1, min_length: 4, from_end: false }, + /// [_, _, .._, X, _] => { offset: 2, min_length: 4, from_end: true }, + /// [_, _, .._, _, X] => { offset: 1, min_length: 4, from_end: true }, + /// ``` + ConstantIndex { + /// index or -index (in Python terms), depending on from_end + offset: u32, + /// thing being indexed must be at least this long + min_length: u32, + /// counting backwards from end? + from_end: bool, + }, + + /// These indices are generated by slice patterns. + /// + /// slice[from:-to] in Python terms. + Subslice { + from: u32, + to: u32, + }, + + /// "Downcast" to a variant of an ADT. Currently, we only introduce + /// this for ADTs with more than one variant. It may be better to + /// just introduce it always, or always for enums. + Downcast(&'tcx AdtDef, usize), +} + +/// Alias for projections as they appear in lvalues, where the base is an lvalue +/// and the index is an operand. +pub type LvalueProjection<'tcx> = Projection<'tcx, Lvalue<'tcx>, Operand<'tcx>>; + +/// Alias for projections as they appear in lvalues, where the base is an lvalue +/// and the index is an operand. +pub type LvalueElem<'tcx> = ProjectionElem<'tcx, Operand<'tcx>>; + +newtype_index!(Field, "field"); + +impl<'tcx> Lvalue<'tcx> { + pub fn field(self, f: Field, ty: Ty<'tcx>) -> Lvalue<'tcx> { + self.elem(ProjectionElem::Field(f, ty)) + } + + pub fn deref(self) -> Lvalue<'tcx> { + self.elem(ProjectionElem::Deref) + } + + pub fn index(self, index: Operand<'tcx>) -> Lvalue<'tcx> { + self.elem(ProjectionElem::Index(index)) + } + + pub fn elem(self, elem: LvalueElem<'tcx>) -> Lvalue<'tcx> { + Lvalue::Projection(Box::new(LvalueProjection { + base: self, + elem: elem, + })) + } +} + +impl<'tcx> Debug for Lvalue<'tcx> { + fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + use self::Lvalue::*; + + match *self { + Local(id) => write!(fmt, "{:?}", id), + Static(def_id) => + write!(fmt, "{}", ty::tls::with(|tcx| tcx.item_path_str(def_id))), + Projection(ref data) => + match data.elem { + ProjectionElem::Downcast(ref adt_def, index) => + write!(fmt, "({:?} as {})", data.base, adt_def.variants[index].name), + ProjectionElem::Deref => + write!(fmt, "(*{:?})", data.base), + ProjectionElem::Field(field, ty) => + write!(fmt, "({:?}.{:?}: {:?})", data.base, field.index(), ty), + ProjectionElem::Index(ref index) => + write!(fmt, "{:?}[{:?}]", data.base, index), + ProjectionElem::ConstantIndex { offset, min_length, from_end: false } => + write!(fmt, "{:?}[{:?} of {:?}]", data.base, offset, min_length), + ProjectionElem::ConstantIndex { offset, min_length, from_end: true } => + write!(fmt, "{:?}[-{:?} of {:?}]", data.base, offset, min_length), + ProjectionElem::Subslice { from, to } if to == 0 => + write!(fmt, "{:?}[{:?}:]", data.base, from), + ProjectionElem::Subslice { from, to } if from == 0 => + write!(fmt, "{:?}[:-{:?}]", data.base, to), + ProjectionElem::Subslice { from, to } => + write!(fmt, "{:?}[{:?}:-{:?}]", data.base, + from, to), + + }, + } + } +} + +/////////////////////////////////////////////////////////////////////////// +// Scopes + +newtype_index!(VisibilityScope, "scope"); +pub const ARGUMENT_VISIBILITY_SCOPE : VisibilityScope = VisibilityScope(0); + +#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +pub struct VisibilityScopeData { + pub span: Span, + pub parent_scope: Option, +} + +/////////////////////////////////////////////////////////////////////////// +// Operands + +/// These are values that can appear inside an rvalue (or an index +/// lvalue). They are intentionally limited to prevent rvalues from +/// being nested in one another. +#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable)] +pub enum Operand<'tcx> { + Consume(Lvalue<'tcx>), + Constant(Constant<'tcx>), +} + +impl<'tcx> Debug for Operand<'tcx> { + fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + use self::Operand::*; + match *self { + Constant(ref a) => write!(fmt, "{:?}", a), + Consume(ref lv) => write!(fmt, "{:?}", lv), + } + } +} + +/////////////////////////////////////////////////////////////////////////// +/// Rvalues + +#[derive(Clone, RustcEncodable, RustcDecodable)] +pub enum Rvalue<'tcx> { + /// x (either a move or copy, depending on type of x) + Use(Operand<'tcx>), + + /// [x; 32] + Repeat(Operand<'tcx>, TypedConstVal<'tcx>), + + /// &x or &mut x + Ref(&'tcx Region, BorrowKind, Lvalue<'tcx>), + + /// length of a [X] or [X;n] value + Len(Lvalue<'tcx>), + + Cast(CastKind, Operand<'tcx>, Ty<'tcx>), + + BinaryOp(BinOp, Operand<'tcx>, Operand<'tcx>), + CheckedBinaryOp(BinOp, Operand<'tcx>, Operand<'tcx>), + + UnaryOp(UnOp, Operand<'tcx>), + + /// Creates an *uninitialized* Box + Box(Ty<'tcx>), + + /// Create an aggregate value, like a tuple or struct. This is + /// only needed because we want to distinguish `dest = Foo { x: + /// ..., y: ... }` from `dest.x = ...; dest.y = ...;` in the case + /// that `Foo` has a destructor. These rvalues can be optimized + /// away after type-checking and before lowering. + Aggregate(AggregateKind<'tcx>, Vec>), + + InlineAsm { + asm: InlineAsm, + outputs: Vec>, + inputs: Vec> + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] +pub enum CastKind { + Misc, + + /// Convert unique, zero-sized type for a fn to fn() + ReifyFnPointer, + + /// Convert safe fn() to unsafe fn() + UnsafeFnPointer, + + /// "Unsize" -- convert a thin-or-fat pointer to a fat pointer. + /// trans must figure out the details once full monomorphization + /// is known. For example, this could be used to cast from a + /// `&[i32;N]` to a `&[i32]`, or a `Box` to a `Box` + /// (presuming `T: Trait`). + Unsize, +} + +#[derive(Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] +pub enum AggregateKind<'tcx> { + Array, + Tuple, + /// The second field is variant number (discriminant), it's equal to 0 + /// for struct and union expressions. The fourth field is active field + /// number and is present only for union expressions. + Adt(&'tcx AdtDef, usize, &'tcx Substs<'tcx>, Option), + Closure(DefId, ClosureSubsts<'tcx>), +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] +pub enum BinOp { + /// The `+` operator (addition) + Add, + /// The `-` operator (subtraction) + Sub, + /// The `*` operator (multiplication) + Mul, + /// The `/` operator (division) + Div, + /// The `%` operator (modulus) + Rem, + /// The `^` operator (bitwise xor) + BitXor, + /// The `&` operator (bitwise and) + BitAnd, + /// The `|` operator (bitwise or) + BitOr, + /// The `<<` operator (shift left) + Shl, + /// The `>>` operator (shift right) + Shr, + /// The `==` operator (equality) + Eq, + /// The `<` operator (less than) + Lt, + /// The `<=` operator (less than or equal to) + Le, + /// The `!=` operator (not equal to) + Ne, + /// The `>=` operator (greater than or equal to) + Ge, + /// The `>` operator (greater than) + Gt, +} + +impl BinOp { + pub fn is_checkable(self) -> bool { + use self::BinOp::*; + match self { + Add | Sub | Mul | Shl | Shr => true, + _ => false + } + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] +pub enum UnOp { + /// The `!` operator for logical inversion + Not, + /// The `-` operator for negation + Neg, +} + +impl<'tcx> Debug for Rvalue<'tcx> { + fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + use self::Rvalue::*; + + match *self { + Use(ref lvalue) => write!(fmt, "{:?}", lvalue), + Repeat(ref a, ref b) => write!(fmt, "[{:?}; {:?}]", a, b), + Len(ref a) => write!(fmt, "Len({:?})", a), + Cast(ref kind, ref lv, ref ty) => write!(fmt, "{:?} as {:?} ({:?})", lv, ty, kind), + BinaryOp(ref op, ref a, ref b) => write!(fmt, "{:?}({:?}, {:?})", op, a, b), + CheckedBinaryOp(ref op, ref a, ref b) => { + write!(fmt, "Checked{:?}({:?}, {:?})", op, a, b) + } + UnaryOp(ref op, ref a) => write!(fmt, "{:?}({:?})", op, a), + Box(ref t) => write!(fmt, "Box({:?})", t), + InlineAsm { ref asm, ref outputs, ref inputs } => { + write!(fmt, "asm!({:?} : {:?} : {:?})", asm, outputs, inputs) + } + + Ref(_, borrow_kind, ref lv) => { + let kind_str = match borrow_kind { + BorrowKind::Shared => "", + BorrowKind::Mut | BorrowKind::Unique => "mut ", + }; + write!(fmt, "&{}{:?}", kind_str, lv) + } + + Aggregate(ref kind, ref lvs) => { + fn fmt_tuple(fmt: &mut Formatter, lvs: &[Operand]) -> fmt::Result { + let mut tuple_fmt = fmt.debug_tuple(""); + for lv in lvs { + tuple_fmt.field(lv); + } + tuple_fmt.finish() + } + + match *kind { + AggregateKind::Array => write!(fmt, "{:?}", lvs), + + AggregateKind::Tuple => { + match lvs.len() { + 0 => write!(fmt, "()"), + 1 => write!(fmt, "({:?},)", lvs[0]), + _ => fmt_tuple(fmt, lvs), + } + } + + AggregateKind::Adt(adt_def, variant, substs, _) => { + let variant_def = &adt_def.variants[variant]; + + ppaux::parameterized(fmt, substs, variant_def.did, &[])?; + + match variant_def.ctor_kind { + CtorKind::Const => Ok(()), + CtorKind::Fn => fmt_tuple(fmt, lvs), + CtorKind::Fictive => { + let mut struct_fmt = fmt.debug_struct(""); + for (field, lv) in variant_def.fields.iter().zip(lvs) { + struct_fmt.field(&field.name.as_str(), lv); + } + struct_fmt.finish() + } + } + } + + AggregateKind::Closure(def_id, _) => ty::tls::with(|tcx| { + if let Some(node_id) = tcx.map.as_local_node_id(def_id) { + let name = format!("[closure@{:?}]", tcx.map.span(node_id)); + let mut struct_fmt = fmt.debug_struct(&name); + + tcx.with_freevars(node_id, |freevars| { + for (freevar, lv) in freevars.iter().zip(lvs) { + let def_id = freevar.def.def_id(); + let var_id = tcx.map.as_local_node_id(def_id).unwrap(); + let var_name = tcx.local_var_name_str(var_id); + struct_fmt.field(&var_name, lv); + } + }); + + struct_fmt.finish() + } else { + write!(fmt, "[closure]") + } + }), + } + } + } + } +} + +/////////////////////////////////////////////////////////////////////////// +/// Constants +/// +/// Two constants are equal if they are the same constant. Note that +/// this does not necessarily mean that they are "==" in Rust -- in +/// particular one must be wary of `NaN`! + +#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct Constant<'tcx> { + pub span: Span, + pub ty: Ty<'tcx>, + pub literal: Literal<'tcx>, +} + +#[derive(Clone, RustcEncodable, RustcDecodable)] +pub struct TypedConstVal<'tcx> { + pub ty: Ty<'tcx>, + pub span: Span, + pub value: ConstUsize, +} + +impl<'tcx> Debug for TypedConstVal<'tcx> { + fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + write!(fmt, "const {}", ConstInt::Usize(self.value)) + } +} + +newtype_index!(Promoted, "promoted"); + +#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub enum Literal<'tcx> { + Item { + def_id: DefId, + substs: &'tcx Substs<'tcx>, + }, + Value { + value: ConstVal, + }, + Promoted { + // Index into the `promoted` vector of `Mir`. + index: Promoted + }, +} + +impl<'tcx> Debug for Constant<'tcx> { + fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + write!(fmt, "{:?}", self.literal) + } +} + +impl<'tcx> Debug for Literal<'tcx> { + fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + use self::Literal::*; + match *self { + Item { def_id, substs } => { + ppaux::parameterized(fmt, substs, def_id, &[]) + } + Value { ref value } => { + write!(fmt, "const ")?; + fmt_const_val(fmt, value) + } + Promoted { index } => { + write!(fmt, "{:?}", index) + } + } + } +} + +/// Write a `ConstVal` in a way closer to the original source code than the `Debug` output. +fn fmt_const_val(fmt: &mut W, const_val: &ConstVal) -> fmt::Result { + use middle::const_val::ConstVal::*; + match *const_val { + Float(f) => write!(fmt, "{:?}", f), + Integral(n) => write!(fmt, "{}", n), + Str(ref s) => write!(fmt, "{:?}", s), + ByteStr(ref bytes) => { + let escaped: String = bytes + .iter() + .flat_map(|&ch| ascii::escape_default(ch).map(|c| c as char)) + .collect(); + write!(fmt, "b\"{}\"", escaped) + } + Bool(b) => write!(fmt, "{:?}", b), + Function(def_id) => write!(fmt, "{}", item_path_str(def_id)), + Struct(node_id) | Tuple(node_id) | Array(node_id, _) | Repeat(node_id, _) => + write!(fmt, "{}", node_to_string(node_id)), + Char(c) => write!(fmt, "{:?}", c), + Dummy => bug!(), + } +} + +fn node_to_string(node_id: ast::NodeId) -> String { + ty::tls::with(|tcx| tcx.map.node_to_user_string(node_id)) +} + +fn item_path_str(def_id: DefId) -> String { + ty::tls::with(|tcx| tcx.item_path_str(def_id)) +} + +impl<'tcx> ControlFlowGraph for Mir<'tcx> { + + type Node = BasicBlock; + + fn num_nodes(&self) -> usize { self.basic_blocks.len() } + + fn start_node(&self) -> Self::Node { START_BLOCK } + + fn predecessors<'graph>(&'graph self, node: Self::Node) + -> >::Iter + { + self.predecessors_for(node).clone().into_iter() + } + fn successors<'graph>(&'graph self, node: Self::Node) + -> >::Iter + { + self.basic_blocks[node].terminator().successors().into_owned().into_iter() + } +} + +impl<'a, 'b> GraphPredecessors<'b> for Mir<'a> { + type Item = BasicBlock; + type Iter = IntoIter; +} + +impl<'a, 'b> GraphSuccessors<'b> for Mir<'a> { + type Item = BasicBlock; + type Iter = IntoIter; +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] +pub struct Location { + /// the location is within this block + pub block: BasicBlock, + + /// the location is the start of the this statement; or, if `statement_index` + /// == num-statements, then the start of the terminator. + pub statement_index: usize, +} + +impl fmt::Debug for Location { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "{:?}[{}]", self.block, self.statement_index) + } +} + +impl Location { + pub fn dominates(&self, other: &Location, dominators: &Dominators) -> bool { + if self.block == other.block { + self.statement_index <= other.statement_index + } else { + dominators.is_dominated_by(other.block, self.block) + } + } +} diff --git a/src/librustc/mir/repr.rs b/src/librustc/mir/repr.rs deleted file mode 100644 index 8496f606b7b87..0000000000000 --- a/src/librustc/mir/repr.rs +++ /dev/null @@ -1,963 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use middle::const_eval::ConstVal; -use middle::def_id::DefId; -use middle::subst::Substs; -use middle::ty::{self, AdtDef, ClosureSubsts, FnOutput, Region, Ty}; -use rustc_back::slice; -use rustc_data_structures::tuple_slice::TupleSlice; -use rustc_front::hir::InlineAsm; -use syntax::ast::{self, Name}; -use syntax::codemap::Span; -use graphviz::IntoCow; -use std::ascii; -use std::borrow::Cow; -use std::fmt::{self, Debug, Formatter, Write}; -use std::{iter, u32}; -use std::ops::{Index, IndexMut}; - -/// Lowered representation of a single function. -#[derive(RustcEncodable, RustcDecodable)] -pub struct Mir<'tcx> { - /// List of basic blocks. References to basic block use a newtyped index type `BasicBlock` - /// that indexes into this vector. - pub basic_blocks: Vec>, - - /// Return type of the function. - pub return_ty: FnOutput<'tcx>, - - /// Variables: these are stack slots corresponding to user variables. They may be - /// assigned many times. - pub var_decls: Vec>, - - /// Args: these are stack slots corresponding to the input arguments. - pub arg_decls: Vec>, - - /// Temp declarations: stack slots that for temporaries created by - /// the compiler. These are assigned once, but they are not SSA - /// values in that it is possible to borrow them and mutate them - /// through the resulting reference. - pub temp_decls: Vec>, -} - -/// where execution begins -pub const START_BLOCK: BasicBlock = BasicBlock(0); - -/// where execution ends, on normal return -pub const END_BLOCK: BasicBlock = BasicBlock(1); - -impl<'tcx> Mir<'tcx> { - pub fn all_basic_blocks(&self) -> Vec { - (0..self.basic_blocks.len()) - .map(|i| BasicBlock::new(i)) - .collect() - } - - pub fn basic_block_data(&self, bb: BasicBlock) -> &BasicBlockData<'tcx> { - &self.basic_blocks[bb.index()] - } - - pub fn basic_block_data_mut(&mut self, bb: BasicBlock) -> &mut BasicBlockData<'tcx> { - &mut self.basic_blocks[bb.index()] - } -} - -impl<'tcx> Index for Mir<'tcx> { - type Output = BasicBlockData<'tcx>; - - #[inline] - fn index(&self, index: BasicBlock) -> &BasicBlockData<'tcx> { - self.basic_block_data(index) - } -} - -impl<'tcx> IndexMut for Mir<'tcx> { - #[inline] - fn index_mut(&mut self, index: BasicBlock) -> &mut BasicBlockData<'tcx> { - self.basic_block_data_mut(index) - } -} - -/////////////////////////////////////////////////////////////////////////// -// Mutability and borrow kinds - -#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] -pub enum Mutability { - Mut, - Not, -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] -pub enum BorrowKind { - /// Data must be immutable and is aliasable. - Shared, - - /// Data must be immutable but not aliasable. This kind of borrow - /// cannot currently be expressed by the user and is used only in - /// implicit closure bindings. It is needed when you the closure - /// is borrowing or mutating a mutable referent, e.g.: - /// - /// let x: &mut isize = ...; - /// let y = || *x += 5; - /// - /// If we were to try to translate this closure into a more explicit - /// form, we'd encounter an error with the code as written: - /// - /// struct Env { x: & &mut isize } - /// let x: &mut isize = ...; - /// let y = (&mut Env { &x }, fn_ptr); // Closure is pair of env and fn - /// fn fn_ptr(env: &mut Env) { **env.x += 5; } - /// - /// This is then illegal because you cannot mutate a `&mut` found - /// in an aliasable location. To solve, you'd have to translate with - /// an `&mut` borrow: - /// - /// struct Env { x: & &mut isize } - /// let x: &mut isize = ...; - /// let y = (&mut Env { &mut x }, fn_ptr); // changed from &x to &mut x - /// fn fn_ptr(env: &mut Env) { **env.x += 5; } - /// - /// Now the assignment to `**env.x` is legal, but creating a - /// mutable pointer to `x` is not because `x` is not mutable. We - /// could fix this by declaring `x` as `let mut x`. This is ok in - /// user code, if awkward, but extra weird for closures, since the - /// borrow is hidden. - /// - /// So we introduce a "unique imm" borrow -- the referent is - /// immutable, but not aliasable. This solves the problem. For - /// simplicity, we don't give users the way to express this - /// borrow, it's just used when translating closures. - Unique, - - /// Data is mutable and not aliasable. - Mut, -} - -/////////////////////////////////////////////////////////////////////////// -// Variables and temps - -// A "variable" is a binding declared by the user as part of the fn -// decl, a let, etc. -#[derive(RustcEncodable, RustcDecodable)] -pub struct VarDecl<'tcx> { - pub mutability: Mutability, - pub name: Name, - pub ty: Ty<'tcx>, -} - -// A "temp" is a temporary that we place on the stack. They are -// anonymous, always mutable, and have only a type. -#[derive(RustcEncodable, RustcDecodable)] -pub struct TempDecl<'tcx> { - pub ty: Ty<'tcx>, -} - -// A "arg" is one of the function's formal arguments. These are -// anonymous and distinct from the bindings that the user declares. -// -// For example, in this function: -// -// ``` -// fn foo((x, y): (i32, u32)) { ... } -// ``` -// -// there is only one argument, of type `(i32, u32)`, but two bindings -// (`x` and `y`). -#[derive(RustcEncodable, RustcDecodable)] -pub struct ArgDecl<'tcx> { - pub ty: Ty<'tcx>, -} - -/////////////////////////////////////////////////////////////////////////// -// BasicBlock - -/// The index of a particular basic block. The index is into the `basic_blocks` -/// list of the `Mir`. -/// -/// (We use a `u32` internally just to save memory.) -#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)] -pub struct BasicBlock(u32); - -impl BasicBlock { - pub fn new(index: usize) -> BasicBlock { - assert!(index < (u32::MAX as usize)); - BasicBlock(index as u32) - } - - /// Extract the index. - pub fn index(self) -> usize { - self.0 as usize - } -} - -impl Debug for BasicBlock { - fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { - write!(fmt, "bb{}", self.0) - } -} - -/////////////////////////////////////////////////////////////////////////// -// BasicBlock and Terminator - -#[derive(Debug, RustcEncodable, RustcDecodable)] -pub struct BasicBlockData<'tcx> { - pub statements: Vec>, - pub terminator: Option>, - pub is_cleanup: bool, -} - -#[derive(RustcEncodable, RustcDecodable)] -pub enum Terminator<'tcx> { - /// block should have one successor in the graph; we jump there - Goto { - target: BasicBlock, - }, - - /// jump to branch 0 if this lvalue evaluates to true - If { - cond: Operand<'tcx>, - targets: (BasicBlock, BasicBlock), - }, - - /// lvalue evaluates to some enum; jump depending on the branch - Switch { - discr: Lvalue<'tcx>, - adt_def: AdtDef<'tcx>, - targets: Vec, - }, - - /// operand evaluates to an integer; jump depending on its value - /// to one of the targets, and otherwise fallback to `otherwise` - SwitchInt { - /// discriminant value being tested - discr: Lvalue<'tcx>, - - /// type of value being tested - switch_ty: Ty<'tcx>, - - /// Possible values. The locations to branch to in each case - /// are found in the corresponding indices from the `targets` vector. - values: Vec, - - /// Possible branch sites. The length of this vector should be - /// equal to the length of the `values` vector plus 1 -- the - /// extra item is the block to branch to if none of the values - /// fit. - targets: Vec, - }, - - /// Indicates that the landing pad is finished and unwinding should - /// continue. Emitted by build::scope::diverge_cleanup. - Resume, - - /// Indicates a normal return. The ReturnPointer lvalue should - /// have been filled in by now. This should only occur in the - /// `END_BLOCK`. - Return, - - /// Block ends with a call of a converging function - Call { - /// The function that’s being called - func: Operand<'tcx>, - /// Arguments the function is called with - args: Vec>, - /// The kind of call with associated information - kind: CallKind<'tcx>, - }, -} - -#[derive(Clone, RustcEncodable, RustcDecodable)] -pub enum CallKind<'tcx> { - /// Diverging function without associated cleanup - Diverging, - /// Diverging function with associated cleanup - DivergingCleanup(BasicBlock), - /// Converging function without associated cleanup - Converging { - /// Destination where the call result is written - destination: Lvalue<'tcx>, - /// Block to branch into on successful return - target: BasicBlock, - }, - ConvergingCleanup { - /// Destination where the call result is written - destination: Lvalue<'tcx>, - /// First target is branched to on successful return. - /// Second block contains the cleanups to do on unwind. - targets: (BasicBlock, BasicBlock) - } -} - -impl<'tcx> CallKind<'tcx> { - pub fn successors(&self) -> &[BasicBlock] { - match *self { - CallKind::Diverging => &[], - CallKind::DivergingCleanup(ref b) | - CallKind::Converging { target: ref b, .. } => slice::ref_slice(b), - CallKind::ConvergingCleanup { ref targets, .. } => targets.as_slice(), - } - } - - pub fn successors_mut(&mut self) -> &mut [BasicBlock] { - match *self { - CallKind::Diverging => &mut [], - CallKind::DivergingCleanup(ref mut b) | - CallKind::Converging { target: ref mut b, .. } => slice::mut_ref_slice(b), - CallKind::ConvergingCleanup { ref mut targets, .. } => targets.as_mut_slice(), - } - } - - pub fn destination(&self) -> Option<&Lvalue<'tcx>> { - match *self { - CallKind::Converging { ref destination, .. } | - CallKind::ConvergingCleanup { ref destination, .. } => Some(destination), - CallKind::Diverging | - CallKind::DivergingCleanup(_) => None - } - } - - pub fn destination_mut(&mut self) -> Option<&mut Lvalue<'tcx>> { - match *self { - CallKind::Converging { ref mut destination, .. } | - CallKind::ConvergingCleanup { ref mut destination, .. } => Some(destination), - CallKind::Diverging | - CallKind::DivergingCleanup(_) => None - } - } -} - -impl<'tcx> Terminator<'tcx> { - pub fn successors(&self) -> &[BasicBlock] { - use self::Terminator::*; - match *self { - Goto { target: ref b } => slice::ref_slice(b), - If { targets: ref b, .. } => b.as_slice(), - Switch { targets: ref b, .. } => b, - SwitchInt { targets: ref b, .. } => b, - Resume => &[], - Return => &[], - Call { ref kind, .. } => kind.successors(), - } - } - - pub fn successors_mut(&mut self) -> &mut [BasicBlock] { - use self::Terminator::*; - match *self { - Goto { target: ref mut b } => slice::mut_ref_slice(b), - If { targets: ref mut b, .. } => b.as_mut_slice(), - Switch { targets: ref mut b, .. } => b, - SwitchInt { targets: ref mut b, .. } => b, - Resume => &mut [], - Return => &mut [], - Call { ref mut kind, .. } => kind.successors_mut(), - } - } -} - -impl<'tcx> BasicBlockData<'tcx> { - pub fn new(terminator: Option>) -> BasicBlockData<'tcx> { - BasicBlockData { - statements: vec![], - terminator: terminator, - is_cleanup: false, - } - } - - /// Accessor for terminator. - /// - /// Terminator may not be None after construction of the basic block is complete. This accessor - /// provides a convenience way to reach the terminator. - pub fn terminator(&self) -> &Terminator<'tcx> { - self.terminator.as_ref().expect("invalid terminator state") - } - - pub fn terminator_mut(&mut self) -> &mut Terminator<'tcx> { - self.terminator.as_mut().expect("invalid terminator state") - } -} - -impl<'tcx> Debug for Terminator<'tcx> { - fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { - try!(self.fmt_head(fmt)); - let successors = self.successors(); - let labels = self.fmt_successor_labels(); - assert_eq!(successors.len(), labels.len()); - - match successors.len() { - 0 => Ok(()), - - 1 => write!(fmt, " -> {:?}", successors[0]), - - _ => { - try!(write!(fmt, " -> [")); - for (i, target) in successors.iter().enumerate() { - if i > 0 { - try!(write!(fmt, ", ")); - } - try!(write!(fmt, "{}: {:?}", labels[i], target)); - } - write!(fmt, "]") - } - - } - } -} - -impl<'tcx> Terminator<'tcx> { - /// Write the "head" part of the terminator; that is, its name and the data it uses to pick the - /// successor basic block, if any. The only information not inlcuded is the list of possible - /// successors, which may be rendered differently between the text and the graphviz format. - pub fn fmt_head(&self, fmt: &mut W) -> fmt::Result { - use self::Terminator::*; - match *self { - Goto { .. } => write!(fmt, "goto"), - If { cond: ref lv, .. } => write!(fmt, "if({:?})", lv), - Switch { discr: ref lv, .. } => write!(fmt, "switch({:?})", lv), - SwitchInt { discr: ref lv, .. } => write!(fmt, "switchInt({:?})", lv), - Return => write!(fmt, "return"), - Resume => write!(fmt, "resume"), - Call { ref kind, ref func, ref args } => { - if let Some(destination) = kind.destination() { - try!(write!(fmt, "{:?} = ", destination)); - } - try!(write!(fmt, "{:?}(", func)); - for (index, arg) in args.iter().enumerate() { - if index > 0 { - try!(write!(fmt, ", ")); - } - try!(write!(fmt, "{:?}", arg)); - } - write!(fmt, ")") - } - } - } - - /// Return the list of labels for the edges to the successor basic blocks. - pub fn fmt_successor_labels(&self) -> Vec> { - use self::Terminator::*; - match *self { - Return | Resume => vec![], - Goto { .. } => vec!["".into_cow()], - If { .. } => vec!["true".into_cow(), "false".into_cow()], - Switch { ref adt_def, .. } => { - adt_def.variants - .iter() - .map(|variant| variant.name.to_string().into_cow()) - .collect() - } - SwitchInt { ref values, .. } => { - values.iter() - .map(|const_val| { - let mut buf = String::new(); - fmt_const_val(&mut buf, const_val).unwrap(); - buf.into_cow() - }) - .chain(iter::once(String::from("otherwise").into_cow())) - .collect() - } - Call { ref kind, .. } => match *kind { - CallKind::Diverging => - vec![], - CallKind::DivergingCleanup(..) => - vec!["unwind".into_cow()], - CallKind::Converging { .. } => - vec!["return".into_cow()], - CallKind::ConvergingCleanup { .. } => - vec!["return".into_cow(), "unwind".into_cow()], - }, - } - } -} - - -/////////////////////////////////////////////////////////////////////////// -// Statements - -#[derive(RustcEncodable, RustcDecodable)] -pub struct Statement<'tcx> { - pub span: Span, - pub kind: StatementKind<'tcx>, -} - -#[derive(Debug, RustcEncodable, RustcDecodable)] -pub enum StatementKind<'tcx> { - Assign(Lvalue<'tcx>, Rvalue<'tcx>), - Drop(DropKind, Lvalue<'tcx>), -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] -pub enum DropKind { - Free, // free a partially constructed box, should go away eventually - Deep -} - -impl<'tcx> Debug for Statement<'tcx> { - fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { - use self::StatementKind::*; - match self.kind { - Assign(ref lv, ref rv) => write!(fmt, "{:?} = {:?}", lv, rv), - Drop(DropKind::Free, ref lv) => write!(fmt, "free {:?}", lv), - Drop(DropKind::Deep, ref lv) => write!(fmt, "drop {:?}", lv), - } - } -} -/////////////////////////////////////////////////////////////////////////// -// Lvalues - -/// A path to a value; something that can be evaluated without -/// changing or disturbing program state. -#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable)] -pub enum Lvalue<'tcx> { - /// local variable declared by the user - Var(u32), - - /// temporary introduced during lowering into MIR - Temp(u32), - - /// formal parameter of the function; note that these are NOT the - /// bindings that the user declares, which are vars - Arg(u32), - - /// static or static mut variable - Static(DefId), - - /// the return pointer of the fn - ReturnPointer, - - /// projection out of an lvalue (access a field, deref a pointer, etc) - Projection(Box>), -} - -/// The `Projection` data structure defines things of the form `B.x` -/// or `*B` or `B[index]`. Note that it is parameterized because it is -/// shared between `Constant` and `Lvalue`. See the aliases -/// `LvalueProjection` etc below. -#[derive(Clone, Debug, PartialEq, RustcEncodable, RustcDecodable)] -pub struct Projection<'tcx, B, V> { - pub base: B, - pub elem: ProjectionElem<'tcx, V>, -} - -#[derive(Clone, Debug, PartialEq, RustcEncodable, RustcDecodable)] -pub enum ProjectionElem<'tcx, V> { - Deref, - Field(Field), - Index(V), - - // These indices are generated by slice patterns. Easiest to explain - // by example: - // - // ``` - // [X, _, .._, _, _] => { offset: 0, min_length: 4, from_end: false }, - // [_, X, .._, _, _] => { offset: 1, min_length: 4, from_end: false }, - // [_, _, .._, X, _] => { offset: 2, min_length: 4, from_end: true }, - // [_, _, .._, _, X] => { offset: 1, min_length: 4, from_end: true }, - // ``` - ConstantIndex { - offset: u32, // index or -index (in Python terms), depending on from_end - min_length: u32, // thing being indexed must be at least this long - from_end: bool, // counting backwards from end? - }, - - // "Downcast" to a variant of an ADT. Currently, we only introduce - // this for ADTs with more than one variant. It may be better to - // just introduce it always, or always for enums. - Downcast(AdtDef<'tcx>, usize), -} - -/// Alias for projections as they appear in lvalues, where the base is an lvalue -/// and the index is an operand. -pub type LvalueProjection<'tcx> = Projection<'tcx, Lvalue<'tcx>, Operand<'tcx>>; - -/// Alias for projections as they appear in lvalues, where the base is an lvalue -/// and the index is an operand. -pub type LvalueElem<'tcx> = ProjectionElem<'tcx, Operand<'tcx>>; - -/// Index into the list of fields found in a `VariantDef` -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] -pub struct Field(u32); - -impl Field { - pub fn new(value: usize) -> Field { - assert!(value < (u32::MAX) as usize); - Field(value as u32) - } - - pub fn index(self) -> usize { - self.0 as usize - } -} - -impl<'tcx> Lvalue<'tcx> { - pub fn field(self, f: Field) -> Lvalue<'tcx> { - self.elem(ProjectionElem::Field(f)) - } - - pub fn deref(self) -> Lvalue<'tcx> { - self.elem(ProjectionElem::Deref) - } - - pub fn index(self, index: Operand<'tcx>) -> Lvalue<'tcx> { - self.elem(ProjectionElem::Index(index)) - } - - pub fn elem(self, elem: LvalueElem<'tcx>) -> Lvalue<'tcx> { - Lvalue::Projection(Box::new(LvalueProjection { - base: self, - elem: elem, - })) - } -} - -impl<'tcx> Debug for Lvalue<'tcx> { - fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { - use self::Lvalue::*; - - match *self { - Var(id) => - write!(fmt, "var{:?}", id), - Arg(id) => - write!(fmt, "arg{:?}", id), - Temp(id) => - write!(fmt, "tmp{:?}", id), - Static(def_id) => - write!(fmt, "{}", ty::tls::with(|tcx| tcx.item_path_str(def_id))), - ReturnPointer => - write!(fmt, "return"), - Projection(ref data) => - match data.elem { - ProjectionElem::Downcast(ref adt_def, index) => - write!(fmt, "({:?} as {})", data.base, adt_def.variants[index].name), - ProjectionElem::Deref => - write!(fmt, "(*{:?})", data.base), - ProjectionElem::Field(field) => - write!(fmt, "{:?}.{:?}", data.base, field.index()), - ProjectionElem::Index(ref index) => - write!(fmt, "{:?}[{:?}]", data.base, index), - ProjectionElem::ConstantIndex { offset, min_length, from_end: false } => - write!(fmt, "{:?}[{:?} of {:?}]", data.base, offset, min_length), - ProjectionElem::ConstantIndex { offset, min_length, from_end: true } => - write!(fmt, "{:?}[-{:?} of {:?}]", data.base, offset, min_length), - }, - } - } -} - -/////////////////////////////////////////////////////////////////////////// -// Operands -// -// These are values that can appear inside an rvalue (or an index -// lvalue). They are intentionally limited to prevent rvalues from -// being nested in one another. - -#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable)] -pub enum Operand<'tcx> { - Consume(Lvalue<'tcx>), - Constant(Constant<'tcx>), -} - -impl<'tcx> Debug for Operand<'tcx> { - fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { - use self::Operand::*; - match *self { - Constant(ref a) => write!(fmt, "{:?}", a), - Consume(ref lv) => write!(fmt, "{:?}", lv), - } - } -} - -/////////////////////////////////////////////////////////////////////////// -// Rvalues - -#[derive(Clone, RustcEncodable, RustcDecodable)] -pub enum Rvalue<'tcx> { - // x (either a move or copy, depending on type of x) - Use(Operand<'tcx>), - - // [x; 32] - Repeat(Operand<'tcx>, Constant<'tcx>), - - // &x or &mut x - Ref(Region, BorrowKind, Lvalue<'tcx>), - - // length of a [X] or [X;n] value - Len(Lvalue<'tcx>), - - Cast(CastKind, Operand<'tcx>, Ty<'tcx>), - - BinaryOp(BinOp, Operand<'tcx>, Operand<'tcx>), - - UnaryOp(UnOp, Operand<'tcx>), - - // Creates an *uninitialized* Box - Box(Ty<'tcx>), - - // Create an aggregate value, like a tuple or struct. This is - // only needed because we want to distinguish `dest = Foo { x: - // ..., y: ... }` from `dest.x = ...; dest.y = ...;` in the case - // that `Foo` has a destructor. These rvalues can be optimized - // away after type-checking and before lowering. - Aggregate(AggregateKind<'tcx>, Vec>), - - // Generates a slice of the form `&input[from_start..L-from_end]` - // where `L` is the length of the slice. This is only created by - // slice pattern matching, so e.g. a pattern of the form `[x, y, - // .., z]` might create a slice with `from_start=2` and - // `from_end=1`. - Slice { - input: Lvalue<'tcx>, - from_start: usize, - from_end: usize, - }, - - InlineAsm(InlineAsm), -} - -#[derive(Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] -pub enum CastKind { - Misc, - - /// Convert unique, zero-sized type for a fn to fn() - ReifyFnPointer, - - /// Convert safe fn() to unsafe fn() - UnsafeFnPointer, - - /// "Unsize" -- convert a thin-or-fat pointer to a fat pointer. - /// trans must figure out the details once full monomorphization - /// is known. For example, this could be used to cast from a - /// `&[i32;N]` to a `&[i32]`, or a `Box` to a `Box` - /// (presuming `T: Trait`). - Unsize, -} - -#[derive(Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] -pub enum AggregateKind<'tcx> { - Vec, - Tuple, - Adt(AdtDef<'tcx>, usize, &'tcx Substs<'tcx>), - Closure(DefId, &'tcx ClosureSubsts<'tcx>), -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] -pub enum BinOp { - /// The `+` operator (addition) - Add, - /// The `-` operator (subtraction) - Sub, - /// The `*` operator (multiplication) - Mul, - /// The `/` operator (division) - Div, - /// The `%` operator (modulus) - Rem, - /// The `^` operator (bitwise xor) - BitXor, - /// The `&` operator (bitwise and) - BitAnd, - /// The `|` operator (bitwise or) - BitOr, - /// The `<<` operator (shift left) - Shl, - /// The `>>` operator (shift right) - Shr, - /// The `==` operator (equality) - Eq, - /// The `<` operator (less than) - Lt, - /// The `<=` operator (less than or equal to) - Le, - /// The `!=` operator (not equal to) - Ne, - /// The `>=` operator (greater than or equal to) - Ge, - /// The `>` operator (greater than) - Gt, -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] -pub enum UnOp { - /// The `!` operator for logical inversion - Not, - /// The `-` operator for negation - Neg, -} - -impl<'tcx> Debug for Rvalue<'tcx> { - fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { - use self::Rvalue::*; - - match *self { - Use(ref lvalue) => write!(fmt, "{:?}", lvalue), - Repeat(ref a, ref b) => write!(fmt, "[{:?}; {:?}]", a, b), - Len(ref a) => write!(fmt, "Len({:?})", a), - Cast(ref kind, ref lv, ref ty) => write!(fmt, "{:?} as {:?} ({:?})", lv, ty, kind), - BinaryOp(ref op, ref a, ref b) => write!(fmt, "{:?}({:?}, {:?})", op, a, b), - UnaryOp(ref op, ref a) => write!(fmt, "{:?}({:?})", op, a), - Box(ref t) => write!(fmt, "Box({:?})", t), - InlineAsm(ref asm) => write!(fmt, "InlineAsm({:?})", asm), - Slice { ref input, from_start, from_end } => - write!(fmt, "{:?}[{:?}..-{:?}]", input, from_start, from_end), - - Ref(_, borrow_kind, ref lv) => { - let kind_str = match borrow_kind { - BorrowKind::Shared => "", - BorrowKind::Mut | BorrowKind::Unique => "mut ", - }; - write!(fmt, "&{}{:?}", kind_str, lv) - } - - Aggregate(ref kind, ref lvs) => { - use self::AggregateKind::*; - - fn fmt_tuple(fmt: &mut Formatter, name: &str, lvs: &[Operand]) -> fmt::Result { - let mut tuple_fmt = fmt.debug_tuple(name); - for lv in lvs { - tuple_fmt.field(lv); - } - tuple_fmt.finish() - } - - match *kind { - Vec => write!(fmt, "{:?}", lvs), - - Tuple => { - match lvs.len() { - 0 => write!(fmt, "()"), - 1 => write!(fmt, "({:?},)", lvs[0]), - _ => fmt_tuple(fmt, "", lvs), - } - } - - Adt(adt_def, variant, _) => { - let variant_def = &adt_def.variants[variant]; - let name = ty::tls::with(|tcx| tcx.item_path_str(variant_def.did)); - - match variant_def.kind() { - ty::VariantKind::Unit => write!(fmt, "{}", name), - ty::VariantKind::Tuple => fmt_tuple(fmt, &name, lvs), - ty::VariantKind::Struct => { - let mut struct_fmt = fmt.debug_struct(&name); - for (field, lv) in variant_def.fields.iter().zip(lvs) { - struct_fmt.field(&field.name.as_str(), lv); - } - struct_fmt.finish() - } - } - } - - Closure(def_id, _) => ty::tls::with(|tcx| { - if let Some(node_id) = tcx.map.as_local_node_id(def_id) { - let name = format!("[closure@{:?}]", tcx.map.span(node_id)); - let mut struct_fmt = fmt.debug_struct(&name); - - tcx.with_freevars(node_id, |freevars| { - for (freevar, lv) in freevars.iter().zip(lvs) { - let var_name = tcx.local_var_name_str(freevar.def.var_id()); - struct_fmt.field(&var_name, lv); - } - }); - - struct_fmt.finish() - } else { - write!(fmt, "[closure]") - } - }), - } - } - } - } -} - -/////////////////////////////////////////////////////////////////////////// -// Constants -// -// Two constants are equal if they are the same constant. Note that -// this does not necessarily mean that they are "==" in Rust -- in -// particular one must be wary of `NaN`! - -#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable)] -pub struct Constant<'tcx> { - pub span: Span, - pub ty: Ty<'tcx>, - pub literal: Literal<'tcx>, -} - -#[derive(Clone, Copy, Debug, PartialEq, RustcEncodable, RustcDecodable)] -pub enum ItemKind { - Constant, - /// This is any sort of callable (usually those that have a type of `fn(…) -> …`). This - /// includes functions, constructors, but not methods which have their own ItemKind. - Function, - Method, -} - -#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable)] -pub enum Literal<'tcx> { - Item { - def_id: DefId, - kind: ItemKind, - substs: &'tcx Substs<'tcx>, - }, - Value { - value: ConstVal, - }, -} - -impl<'tcx> Debug for Constant<'tcx> { - fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { - write!(fmt, "{:?}", self.literal) - } -} - -impl<'tcx> Debug for Literal<'tcx> { - fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { - use self::Literal::*; - match *self { - Item { def_id, .. } => - write!(fmt, "{}", item_path_str(def_id)), - Value { ref value } => { - try!(write!(fmt, "const ")); - fmt_const_val(fmt, value) - } - } - } -} - -/// Write a `ConstVal` in a way closer to the original source code than the `Debug` output. -fn fmt_const_val(fmt: &mut W, const_val: &ConstVal) -> fmt::Result { - use middle::const_eval::ConstVal::*; - match *const_val { - Float(f) => write!(fmt, "{:?}", f), - Int(n) => write!(fmt, "{:?}", n), - Uint(n) => write!(fmt, "{:?}", n), - Str(ref s) => write!(fmt, "{:?}", s), - ByteStr(ref bytes) => { - let escaped: String = bytes - .iter() - .flat_map(|&ch| ascii::escape_default(ch).map(|c| c as char)) - .collect(); - write!(fmt, "b\"{}\"", escaped) - } - Bool(b) => write!(fmt, "{:?}", b), - Function(def_id) => write!(fmt, "{}", item_path_str(def_id)), - Struct(node_id) | Tuple(node_id) | Array(node_id, _) | Repeat(node_id, _) => - write!(fmt, "{}", node_to_string(node_id)), - } -} - -fn node_to_string(node_id: ast::NodeId) -> String { - ty::tls::with(|tcx| tcx.map.node_to_user_string(node_id)) -} - -fn item_path_str(def_id: DefId) -> String { - ty::tls::with(|tcx| tcx.item_path_str(def_id)) -} diff --git a/src/librustc/mir/tcx.rs b/src/librustc/mir/tcx.rs index 45cc0b8b413ea..03530945e046d 100644 --- a/src/librustc/mir/tcx.rs +++ b/src/librustc/mir/tcx.rs @@ -13,10 +13,11 @@ * building is complete. */ -use mir::repr::*; -use middle::subst::Substs; -use middle::ty::{self, AdtDef, Ty}; -use rustc_front::hir; +use mir::*; +use ty::subst::{Subst, Substs}; +use ty::{self, AdtDef, Ty, TyCtxt}; +use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; +use hir; #[derive(Copy, Clone, Debug)] pub enum LvalueTy<'tcx> { @@ -24,44 +25,63 @@ pub enum LvalueTy<'tcx> { Ty { ty: Ty<'tcx> }, /// Downcast to a particular variant of an enum. - Downcast { adt_def: AdtDef<'tcx>, + Downcast { adt_def: &'tcx AdtDef, substs: &'tcx Substs<'tcx>, variant_index: usize }, } -impl<'tcx> LvalueTy<'tcx> { +impl<'a, 'gcx, 'tcx> LvalueTy<'tcx> { pub fn from_ty(ty: Ty<'tcx>) -> LvalueTy<'tcx> { LvalueTy::Ty { ty: ty } } - pub fn to_ty(&self, tcx: &ty::ctxt<'tcx>) -> Ty<'tcx> { + pub fn to_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { match *self { LvalueTy::Ty { ty } => ty, LvalueTy::Downcast { adt_def, substs, variant_index: _ } => - tcx.mk_enum(adt_def, substs), + tcx.mk_adt(adt_def, substs), } } - pub fn projection_ty(self, - tcx: &ty::ctxt<'tcx>, + pub fn projection_ty(self, tcx: TyCtxt<'a, 'gcx, 'tcx>, elem: &LvalueElem<'tcx>) -> LvalueTy<'tcx> { match *elem { - ProjectionElem::Deref => + ProjectionElem::Deref => { + let ty = self.to_ty(tcx) + .builtin_deref(true, ty::LvaluePreference::NoPreference) + .unwrap_or_else(|| { + bug!("deref projection of non-dereferencable ty {:?}", self) + }) + .ty; LvalueTy::Ty { - ty: self.to_ty(tcx).builtin_deref(true, ty::LvaluePreference::NoPreference) - .unwrap() - .ty - }, + ty: ty, + } + } ProjectionElem::Index(_) | ProjectionElem::ConstantIndex { .. } => LvalueTy::Ty { ty: self.to_ty(tcx).builtin_index().unwrap() }, + ProjectionElem::Subslice { from, to } => { + let ty = self.to_ty(tcx); + LvalueTy::Ty { + ty: match ty.sty { + ty::TyArray(inner, size) => { + tcx.mk_array(inner, size-(from as usize)-(to as usize)) + } + ty::TySlice(..) => ty, + _ => { + bug!("cannot subslice non-array type: `{:?}`", self) + } + } + } + } ProjectionElem::Downcast(adt_def1, index) => match self.to_ty(tcx).sty { - ty::TyEnum(adt_def, substs) => { + ty::TyAdt(adt_def, substs) => { + assert!(adt_def.is_enum()); assert!(index < adt_def.variants.len()); assert_eq!(adt_def, adt_def1); LvalueTy::Downcast { adt_def: adt_def, @@ -69,87 +89,148 @@ impl<'tcx> LvalueTy<'tcx> { variant_index: index } } _ => { - tcx.sess.bug(&format!("cannot downcast non-enum type: `{:?}`", self)) + bug!("cannot downcast non-ADT type: `{:?}`", self) } }, - ProjectionElem::Field(field) => { - let field_ty = match self { - LvalueTy::Ty { ty } => match ty.sty { - ty::TyStruct(adt_def, substs) => - adt_def.struct_variant().fields[field.index()].ty(tcx, substs), - ty::TyTuple(ref tys) => - tys[field.index()], - ty::TyClosure(_, ref closure_substs) => - closure_substs.upvar_tys[field.index()], - _ => - tcx.sess.bug(&format!("cannot get field of type: `{:?}`", ty)), - }, - LvalueTy::Downcast { adt_def, substs, variant_index } => - adt_def.variants[variant_index].fields[field.index()].ty(tcx, substs), - }; - LvalueTy::Ty { ty: field_ty } + ProjectionElem::Field(_, fty) => LvalueTy::Ty { ty: fty } + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for LvalueTy<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + match *self { + LvalueTy::Ty { ty } => LvalueTy::Ty { ty: ty.fold_with(folder) }, + LvalueTy::Downcast { adt_def, substs, variant_index } => { + LvalueTy::Downcast { + adt_def: adt_def, + substs: substs.fold_with(folder), + variant_index: variant_index + } } } } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + match *self { + LvalueTy::Ty { ty } => ty.visit_with(visitor), + LvalueTy::Downcast { substs, .. } => substs.visit_with(visitor) + } + } +} + +impl<'tcx> Lvalue<'tcx> { + pub fn ty<'a, 'gcx>(&self, mir: &Mir<'tcx>, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> LvalueTy<'tcx> { + match *self { + Lvalue::Local(index) => + LvalueTy::Ty { ty: mir.local_decls[index].ty }, + Lvalue::Static(def_id) => + LvalueTy::Ty { ty: tcx.item_type(def_id) }, + Lvalue::Projection(ref proj) => + proj.base.ty(mir, tcx).projection_ty(tcx, &proj.elem), + } + } } -impl<'tcx> Mir<'tcx> { - pub fn operand_ty(&self, - tcx: &ty::ctxt<'tcx>, - operand: &Operand<'tcx>) - -> Ty<'tcx> +impl<'tcx> Rvalue<'tcx> { + pub fn ty<'a, 'gcx>(&self, mir: &Mir<'tcx>, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option> { - match *operand { - Operand::Consume(ref l) => self.lvalue_ty(tcx, l).to_ty(tcx), - Operand::Constant(ref c) => c.ty, + match self { + &Rvalue::Use(ref operand) => Some(operand.ty(mir, tcx)), + &Rvalue::Repeat(ref operand, ref count) => { + let op_ty = operand.ty(mir, tcx); + let count = count.value.as_u64(tcx.sess.target.uint_type); + assert_eq!(count as usize as u64, count); + Some(tcx.mk_array(op_ty, count as usize)) + } + &Rvalue::Ref(reg, bk, ref lv) => { + let lv_ty = lv.ty(mir, tcx).to_ty(tcx); + Some(tcx.mk_ref(reg, + ty::TypeAndMut { + ty: lv_ty, + mutbl: bk.to_mutbl_lossy() + } + )) + } + &Rvalue::Len(..) => Some(tcx.types.usize), + &Rvalue::Cast(.., ty) => Some(ty), + &Rvalue::BinaryOp(op, ref lhs, ref rhs) => { + let lhs_ty = lhs.ty(mir, tcx); + let rhs_ty = rhs.ty(mir, tcx); + Some(op.ty(tcx, lhs_ty, rhs_ty)) + } + &Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => { + let lhs_ty = lhs.ty(mir, tcx); + let rhs_ty = rhs.ty(mir, tcx); + let ty = op.ty(tcx, lhs_ty, rhs_ty); + let ty = tcx.intern_tup(&[ty, tcx.types.bool]); + Some(ty) + } + &Rvalue::UnaryOp(_, ref operand) => { + Some(operand.ty(mir, tcx)) + } + &Rvalue::Box(t) => { + Some(tcx.mk_box(t)) + } + &Rvalue::Aggregate(ref ak, ref ops) => { + match *ak { + AggregateKind::Array => { + if let Some(operand) = ops.get(0) { + let ty = operand.ty(mir, tcx); + Some(tcx.mk_array(ty, ops.len())) + } else { + None + } + } + AggregateKind::Tuple => { + Some(tcx.mk_tup( + ops.iter().map(|op| op.ty(mir, tcx)) + )) + } + AggregateKind::Adt(def, _, substs, _) => { + Some(tcx.item_type(def.did).subst(tcx, substs)) + } + AggregateKind::Closure(did, substs) => { + Some(tcx.mk_closure_from_closure_substs(did, substs)) + } + } + } + &Rvalue::InlineAsm { .. } => None + } + } +} + +impl<'tcx> Operand<'tcx> { + pub fn ty<'a, 'gcx>(&self, mir: &Mir<'tcx>, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { + match self { + &Operand::Consume(ref l) => l.ty(mir, tcx).to_ty(tcx), + &Operand::Constant(ref c) => c.ty, } } +} - pub fn binop_ty(&self, - tcx: &ty::ctxt<'tcx>, - op: BinOp, +impl<'tcx> BinOp { + pub fn ty<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, lhs_ty: Ty<'tcx>, rhs_ty: Ty<'tcx>) - -> Ty<'tcx> - { + -> Ty<'tcx> { // FIXME: handle SIMD correctly - match op { - BinOp::Add | BinOp::Sub | BinOp::Mul | BinOp::Div | BinOp::Rem | - BinOp::BitXor | BinOp::BitAnd | BinOp::BitOr => { + match self { + &BinOp::Add | &BinOp::Sub | &BinOp::Mul | &BinOp::Div | &BinOp::Rem | + &BinOp::BitXor | &BinOp::BitAnd | &BinOp::BitOr => { // these should be integers or floats of the same size. assert_eq!(lhs_ty, rhs_ty); lhs_ty } - BinOp::Shl | BinOp::Shr => { + &BinOp::Shl | &BinOp::Shr => { lhs_ty // lhs_ty can be != rhs_ty } - BinOp::Eq | BinOp::Lt | BinOp::Le | - BinOp::Ne | BinOp::Ge | BinOp::Gt => { + &BinOp::Eq | &BinOp::Lt | &BinOp::Le | + &BinOp::Ne | &BinOp::Ge | &BinOp::Gt => { tcx.types.bool } } } - - pub fn lvalue_ty(&self, - tcx: &ty::ctxt<'tcx>, - lvalue: &Lvalue<'tcx>) - -> LvalueTy<'tcx> - { - match *lvalue { - Lvalue::Var(index) => - LvalueTy::Ty { ty: self.var_decls[index as usize].ty }, - Lvalue::Temp(index) => - LvalueTy::Ty { ty: self.temp_decls[index as usize].ty }, - Lvalue::Arg(index) => - LvalueTy::Ty { ty: self.arg_decls[index as usize].ty }, - Lvalue::Static(def_id) => - LvalueTy::Ty { ty: tcx.lookup_item_type(def_id).ty }, - Lvalue::ReturnPointer => - LvalueTy::Ty { ty: self.return_ty.unwrap() }, - Lvalue::Projection(ref proj) => - self.lvalue_ty(tcx, &proj.base).projection_ty(tcx, &proj.elem) - } - } } impl BorrowKind { diff --git a/src/librustc/mir/transform.rs b/src/librustc/mir/transform.rs new file mode 100644 index 0000000000000..3576ae662a005 --- /dev/null +++ b/src/librustc/mir/transform.rs @@ -0,0 +1,193 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use dep_graph::DepNode; +use hir; +use hir::map::DefPathData; +use mir::{Mir, Promoted}; +use ty::TyCtxt; +use syntax::ast::NodeId; +use util::common::time; + +use std::borrow::Cow; +use std::fmt; + +/// Where a specific Mir comes from. +#[derive(Debug, Copy, Clone)] +pub enum MirSource { + /// Functions and methods. + Fn(NodeId), + + /// Constants and associated constants. + Const(NodeId), + + /// Initializer of a `static` item. + Static(NodeId, hir::Mutability), + + /// Promoted rvalues within a function. + Promoted(NodeId, Promoted) +} + +impl<'a, 'tcx> MirSource { + pub fn from_node(tcx: TyCtxt<'a, 'tcx, 'tcx>, id: NodeId) -> MirSource { + use hir::*; + + // Handle constants in enum discriminants, types, and repeat expressions. + let def_id = tcx.map.local_def_id(id); + let def_key = tcx.def_key(def_id); + if def_key.disambiguated_data.data == DefPathData::Initializer { + return MirSource::Const(id); + } + + match tcx.map.get(id) { + map::NodeItem(&Item { node: ItemConst(..), .. }) | + map::NodeTraitItem(&TraitItem { node: ConstTraitItem(..), .. }) | + map::NodeImplItem(&ImplItem { node: ImplItemKind::Const(..), .. }) => { + MirSource::Const(id) + } + map::NodeItem(&Item { node: ItemStatic(_, m, _), .. }) => { + MirSource::Static(id, m) + } + // Default to function if it's not a constant or static. + _ => MirSource::Fn(id) + } + } + + pub fn item_id(&self) -> NodeId { + match *self { + MirSource::Fn(id) | + MirSource::Const(id) | + MirSource::Static(id, _) | + MirSource::Promoted(id, _) => id + } + } +} + +/// Various information about pass. +pub trait Pass { + // fn should_run(Session) to check if pass should run? + fn name<'a>(&self) -> Cow<'static, str> { + let name = unsafe { ::std::intrinsics::type_name::() }; + if let Some(tail) = name.rfind(":") { + Cow::from(&name[tail+1..]) + } else { + Cow::from(name) + } + } + fn disambiguator<'a>(&'a self) -> Option> { None } +} + +/// A pass which inspects the whole Mir map. +pub trait MirMapPass<'tcx>: Pass { + fn run_pass<'a>( + &mut self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + hooks: &mut [Box MirPassHook<'s>>]); +} + +pub trait MirPassHook<'tcx>: Pass { + fn on_mir_pass<'a>( + &mut self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + src: MirSource, + mir: &Mir<'tcx>, + pass: &Pass, + is_after: bool + ); +} + +/// A pass which inspects Mir of functions in isolation. +pub trait MirPass<'tcx>: Pass { + fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + src: MirSource, mir: &mut Mir<'tcx>); +} + +impl<'tcx, T: MirPass<'tcx>> MirMapPass<'tcx> for T { + fn run_pass<'a>(&mut self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + hooks: &mut [Box MirPassHook<'s>>]) + { + let def_ids = tcx.mir_map.borrow().keys(); + for def_id in def_ids { + if !def_id.is_local() { + continue; + } + + let _task = tcx.dep_graph.in_task(DepNode::Mir(def_id)); + let mir = &mut tcx.mir_map.borrow()[&def_id].borrow_mut(); + tcx.dep_graph.write(DepNode::Mir(def_id)); + + let id = tcx.map.as_local_node_id(def_id).unwrap(); + let src = MirSource::from_node(tcx, id); + + for hook in &mut *hooks { + hook.on_mir_pass(tcx, src, mir, self, false); + } + MirPass::run_pass(self, tcx, src, mir); + for hook in &mut *hooks { + hook.on_mir_pass(tcx, src, mir, self, true); + } + + for (i, mir) in mir.promoted.iter_enumerated_mut() { + let src = MirSource::Promoted(id, i); + for hook in &mut *hooks { + hook.on_mir_pass(tcx, src, mir, self, false); + } + MirPass::run_pass(self, tcx, src, mir); + for hook in &mut *hooks { + hook.on_mir_pass(tcx, src, mir, self, true); + } + } + } + } +} + +/// A manager for MIR passes. +pub struct Passes { + passes: Vec MirMapPass<'tcx>>>, + pass_hooks: Vec MirPassHook<'tcx>>>, + plugin_passes: Vec MirMapPass<'tcx>>> +} + +impl<'a, 'tcx> Passes { + pub fn new() -> Passes { + let passes = Passes { + passes: Vec::new(), + pass_hooks: Vec::new(), + plugin_passes: Vec::new() + }; + passes + } + + pub fn run_passes(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>) { + let Passes { ref mut passes, ref mut plugin_passes, ref mut pass_hooks } = *self; + for pass in plugin_passes.iter_mut().chain(passes.iter_mut()) { + time(tcx.sess.time_passes(), &*pass.name(), + || pass.run_pass(tcx, pass_hooks)); + } + } + + /// Pushes a built-in pass. + pub fn push_pass(&mut self, pass: Box MirMapPass<'b>>) { + self.passes.push(pass); + } + + /// Pushes a pass hook. + pub fn push_hook(&mut self, hook: Box MirPassHook<'b>>) { + self.pass_hooks.push(hook); + } +} + +/// Copies the plugin passes. +impl ::std::iter::Extend MirMapPass<'a>>> for Passes { + fn extend MirMapPass<'a>>>>(&mut self, it: I) { + self.plugin_passes.extend(it); + } +} diff --git a/src/librustc/mir/traversal.rs b/src/librustc/mir/traversal.rs new file mode 100644 index 0000000000000..6057e7ec7e0f5 --- /dev/null +++ b/src/librustc/mir/traversal.rs @@ -0,0 +1,279 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::vec; + +use rustc_data_structures::bitvec::BitVector; +use rustc_data_structures::indexed_vec::Idx; + +use super::*; + +/// Preorder traversal of a graph. +/// +/// Preorder traversal is when each node is visited before an of it's +/// successors +/// +/// ```text +/// +/// A +/// / \ +/// / \ +/// B C +/// \ / +/// \ / +/// D +/// ``` +/// +/// A preorder traversal of this graph is either `A B D C` or `A C D B` +#[derive(Clone)] +pub struct Preorder<'a, 'tcx: 'a> { + mir: &'a Mir<'tcx>, + visited: BitVector, + worklist: Vec, +} + +impl<'a, 'tcx> Preorder<'a, 'tcx> { + pub fn new(mir: &'a Mir<'tcx>, root: BasicBlock) -> Preorder<'a, 'tcx> { + let worklist = vec![root]; + + Preorder { + mir: mir, + visited: BitVector::new(mir.basic_blocks().len()), + worklist: worklist + } + } +} + +pub fn preorder<'a, 'tcx>(mir: &'a Mir<'tcx>) -> Preorder<'a, 'tcx> { + Preorder::new(mir, START_BLOCK) +} + +impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> { + type Item = (BasicBlock, &'a BasicBlockData<'tcx>); + + fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> { + while let Some(idx) = self.worklist.pop() { + if !self.visited.insert(idx.index()) { + continue; + } + + let data = &self.mir[idx]; + + if let Some(ref term) = data.terminator { + for &succ in term.successors().iter() { + self.worklist.push(succ); + } + } + + return Some((idx, data)); + } + + None + } +} + +/// Postorder traversal of a graph. +/// +/// Postorder traversal is when each node is visited after all of it's +/// successors, except when the successor is only reachable by a back-edge +/// +/// +/// ```text +/// +/// A +/// / \ +/// / \ +/// B C +/// \ / +/// \ / +/// D +/// ``` +/// +/// A Postorder traversal of this graph is `D B C A` or `D C B A` +pub struct Postorder<'a, 'tcx: 'a> { + mir: &'a Mir<'tcx>, + visited: BitVector, + visit_stack: Vec<(BasicBlock, vec::IntoIter)> +} + +impl<'a, 'tcx> Postorder<'a, 'tcx> { + pub fn new(mir: &'a Mir<'tcx>, root: BasicBlock) -> Postorder<'a, 'tcx> { + let mut po = Postorder { + mir: mir, + visited: BitVector::new(mir.basic_blocks().len()), + visit_stack: Vec::new() + }; + + + let data = &po.mir[root]; + + if let Some(ref term) = data.terminator { + po.visited.insert(root.index()); + + let succs = term.successors().into_owned().into_iter(); + + po.visit_stack.push((root, succs)); + po.traverse_successor(); + } + + po + } + + fn traverse_successor(&mut self) { + // This is quite a complex loop due to 1. the borrow checker not liking it much + // and 2. what exactly is going on is not clear + // + // It does the actual traversal of the graph, while the `next` method on the iterator + // just pops off of the stack. `visit_stack` is a stack containing pairs of nodes and + // iterators over the sucessors of those nodes. Each iteration attempts to get the next + // node from the top of the stack, then pushes that node and an iterator over the + // successors to the top of the stack. This loop only grows `visit_stack`, stopping when + // we reach a child that has no children that we haven't already visited. + // + // For a graph that looks like this: + // + // A + // / \ + // / \ + // B C + // | | + // | | + // D | + // \ / + // \ / + // E + // + // The state of the stack starts out with just the root node (`A` in this case); + // [(A, [B, C])] + // + // When the first call to `traverse_sucessor` happens, the following happens: + // + // [(B, [D]), // `B` taken from the successors of `A`, pushed to the + // // top of the stack along with the successors of `B` + // (A, [C])] + // + // [(D, [E]), // `D` taken from successors of `B`, pushed to stack + // (B, []), + // (A, [C])] + // + // [(E, []), // `E` taken from successors of `D`, pushed to stack + // (D, []), + // (B, []), + // (A, [C])] + // + // Now that the top of the stack has no successors we can traverse, each item will + // be popped off during iteration until we get back to `A`. This yeilds [E, D, B]. + // + // When we yield `B` and call `traverse_successor`, we push `C` to the stack, but + // since we've already visited `E`, that child isn't added to the stack. The last + // two iterations yield `C` and finally `A` for a final traversal of [E, D, B, C, A] + loop { + let bb = if let Some(&mut (_, ref mut iter)) = self.visit_stack.last_mut() { + if let Some(bb) = iter.next() { + bb + } else { + break; + } + } else { + break; + }; + + if self.visited.insert(bb.index()) { + if let Some(ref term) = self.mir[bb].terminator { + let succs = term.successors().into_owned().into_iter(); + self.visit_stack.push((bb, succs)); + } + } + } + } +} + +pub fn postorder<'a, 'tcx>(mir: &'a Mir<'tcx>) -> Postorder<'a, 'tcx> { + Postorder::new(mir, START_BLOCK) +} + +impl<'a, 'tcx> Iterator for Postorder<'a, 'tcx> { + type Item = (BasicBlock, &'a BasicBlockData<'tcx>); + + fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> { + let next = self.visit_stack.pop(); + if next.is_some() { + self.traverse_successor(); + } + + next.map(|(bb, _)| (bb, &self.mir[bb])) + } +} + +/// Reverse postorder traversal of a graph +/// +/// Reverse postorder is the reverse order of a postorder traversal. +/// This is different to a preorder traversal and represents a natural +/// linearisation of control-flow. +/// +/// ```text +/// +/// A +/// / \ +/// / \ +/// B C +/// \ / +/// \ / +/// D +/// ``` +/// +/// A reverse postorder traversal of this graph is either `A B C D` or `A C B D` +/// Note that for a graph containing no loops (i.e. A DAG), this is equivalent to +/// a topological sort. +/// +/// Construction of a `ReversePostorder` traversal requires doing a full +/// postorder traversal of the graph, therefore this traversal should be +/// constructed as few times as possible. Use the `reset` method to be able +/// to re-use the traversal +#[derive(Clone)] +pub struct ReversePostorder<'a, 'tcx: 'a> { + mir: &'a Mir<'tcx>, + blocks: Vec, + idx: usize +} + +impl<'a, 'tcx> ReversePostorder<'a, 'tcx> { + pub fn new(mir: &'a Mir<'tcx>, root: BasicBlock) -> ReversePostorder<'a, 'tcx> { + let blocks : Vec<_> = Postorder::new(mir, root).map(|(bb, _)| bb).collect(); + + let len = blocks.len(); + + ReversePostorder { + mir: mir, + blocks: blocks, + idx: len + } + } + + pub fn reset(&mut self) { + self.idx = self.blocks.len(); + } +} + + +pub fn reverse_postorder<'a, 'tcx>(mir: &'a Mir<'tcx>) -> ReversePostorder<'a, 'tcx> { + ReversePostorder::new(mir, START_BLOCK) +} + +impl<'a, 'tcx> Iterator for ReversePostorder<'a, 'tcx> { + type Item = (BasicBlock, &'a BasicBlockData<'tcx>); + + fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> { + if self.idx == 0 { return None; } + self.idx -= 1; + + self.blocks.get(self.idx).map(|&bb| (bb, &self.mir[bb])) + } +} diff --git a/src/librustc/mir/visit.rs b/src/librustc/mir/visit.rs index 7c8ea22de8e92..b5da304a10986 100644 --- a/src/librustc/mir/visit.rs +++ b/src/librustc/mir/visit.rs @@ -8,11 +8,77 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use middle::def_id::DefId; -use middle::ty::Region; -use mir::repr::*; +use middle::const_val::ConstVal; +use hir::def_id::DefId; +use ty::subst::Substs; +use ty::{ClosureSubsts, Region, Ty}; +use mir::*; +use rustc_const_math::ConstUsize; use rustc_data_structures::tuple_slice::TupleSlice; -use syntax::codemap::Span; +use rustc_data_structures::indexed_vec::Idx; +use syntax_pos::Span; + +// # The MIR Visitor +// +// ## Overview +// +// There are two visitors, one for immutable and one for mutable references, +// but both are generated by the following macro. The code is written according +// to the following conventions: +// +// - introduce a `visit_foo` and a `super_foo` method for every MIR type +// - `visit_foo`, by default, calls `super_foo` +// - `super_foo`, by default, destructures the `foo` and calls `visit_foo` +// +// This allows you as a user to override `visit_foo` for types are +// interested in, and invoke (within that method) call +// `self.super_foo` to get the default behavior. Just as in an OO +// language, you should never call `super` methods ordinarily except +// in that circumstance. +// +// For the most part, we do not destructure things external to the +// MIR, e.g. types, spans, etc, but simply visit them and stop. This +// avoids duplication with other visitors like `TypeFoldable`. +// +// ## Updating +// +// The code is written in a very deliberate style intended to minimize +// the chance of things being overlooked. You'll notice that we always +// use pattern matching to reference fields and we ensure that all +// matches are exhaustive. +// +// For example, the `super_basic_block_data` method begins like this: +// +// ```rust +// fn super_basic_block_data(&mut self, +// block: BasicBlock, +// data: & $($mutability)* BasicBlockData<'tcx>) { +// let BasicBlockData { +// ref $($mutability)* statements, +// ref $($mutability)* terminator, +// is_cleanup: _ +// } = *data; +// +// for statement in statements { +// self.visit_statement(block, statement); +// } +// +// ... +// } +// ``` +// +// Here we used `let BasicBlockData { } = *data` deliberately, +// rather than writing `data.statements` in the body. This is because if one +// adds a new field to `BasicBlockData`, one will be forced to revise this code, +// and hence one will (hopefully) invoke the correct visit methods (if any). +// +// For this to work, ALL MATCHES MUST BE EXHAUSTIVE IN FIELDS AND VARIANTS. +// That means you never write `..` to skip over fields, nor do you write `_` +// to skip over variants in a `match`. +// +// The only place that `_` is acceptable is to match a field (or +// variant argument) that does not require visiting, as in +// `is_cleanup` above. macro_rules! make_mir_visitor { ($visitor_trait_name:ident, $($mutability:ident)*) => { @@ -30,39 +96,77 @@ macro_rules! make_mir_visitor { self.super_basic_block_data(block, data); } + fn visit_visibility_scope_data(&mut self, + scope_data: & $($mutability)* VisibilityScopeData) { + self.super_visibility_scope_data(scope_data); + } + fn visit_statement(&mut self, block: BasicBlock, - statement: & $($mutability)* Statement<'tcx>) { - self.super_statement(block, statement); + statement: & $($mutability)* Statement<'tcx>, + location: Location) { + self.super_statement(block, statement, location); } fn visit_assign(&mut self, block: BasicBlock, lvalue: & $($mutability)* Lvalue<'tcx>, - rvalue: & $($mutability)* Rvalue<'tcx>) { - self.super_assign(block, lvalue, rvalue); + rvalue: & $($mutability)* Rvalue<'tcx>, + location: Location) { + self.super_assign(block, lvalue, rvalue, location); } fn visit_terminator(&mut self, block: BasicBlock, - terminator: & $($mutability)* Terminator<'tcx>) { - self.super_terminator(block, terminator); + terminator: & $($mutability)* Terminator<'tcx>, + location: Location) { + self.super_terminator(block, terminator, location); + } + + fn visit_terminator_kind(&mut self, + block: BasicBlock, + kind: & $($mutability)* TerminatorKind<'tcx>, + location: Location) { + self.super_terminator_kind(block, kind, location); + } + + fn visit_assert_message(&mut self, + msg: & $($mutability)* AssertMessage<'tcx>, + location: Location) { + self.super_assert_message(msg, location); } fn visit_rvalue(&mut self, - rvalue: & $($mutability)* Rvalue<'tcx>) { - self.super_rvalue(rvalue); + rvalue: & $($mutability)* Rvalue<'tcx>, + location: Location) { + self.super_rvalue(rvalue, location); } fn visit_operand(&mut self, - operand: & $($mutability)* Operand<'tcx>) { - self.super_operand(operand); + operand: & $($mutability)* Operand<'tcx>, + location: Location) { + self.super_operand(operand, location); } fn visit_lvalue(&mut self, lvalue: & $($mutability)* Lvalue<'tcx>, - context: LvalueContext) { - self.super_lvalue(lvalue, context); + context: LvalueContext<'tcx>, + location: Location) { + self.super_lvalue(lvalue, context, location); + } + + fn visit_projection(&mut self, + lvalue: & $($mutability)* LvalueProjection<'tcx>, + context: LvalueContext<'tcx>, + location: Location) { + self.super_projection(lvalue, context, location); + } + + fn visit_projection_elem(&mut self, + lvalue: & $($mutability)* LvalueElem<'tcx>, + context: LvalueContext<'tcx>, + location: Location) { + self.super_projection_elem(lvalue, context, location); } fn visit_branch(&mut self, @@ -72,17 +176,20 @@ macro_rules! make_mir_visitor { } fn visit_constant(&mut self, - constant: & $($mutability)* Constant<'tcx>) { - self.super_constant(constant); + constant: & $($mutability)* Constant<'tcx>, + location: Location) { + self.super_constant(constant, location); } fn visit_literal(&mut self, - literal: & $($mutability)* Literal<'tcx>) { - self.super_literal(literal); + literal: & $($mutability)* Literal<'tcx>, + location: Location) { + self.super_literal(literal, location); } fn visit_def_id(&mut self, - def_id: & $($mutability)* DefId) { + def_id: & $($mutability)* DefId, + _: Location) { self.super_def_id(def_id); } @@ -91,241 +198,491 @@ macro_rules! make_mir_visitor { self.super_span(span); } + fn visit_source_info(&mut self, + source_info: & $($mutability)* SourceInfo) { + self.super_source_info(source_info); + } + + fn visit_ty(&mut self, + ty: & $($mutability)* Ty<'tcx>) { + self.super_ty(ty); + } + + fn visit_substs(&mut self, + substs: & $($mutability)* &'tcx Substs<'tcx>) { + self.super_substs(substs); + } + + fn visit_closure_substs(&mut self, + substs: & $($mutability)* ClosureSubsts<'tcx>) { + self.super_closure_substs(substs); + } + + fn visit_const_val(&mut self, + const_val: & $($mutability)* ConstVal, + _: Location) { + self.super_const_val(const_val); + } + + fn visit_const_usize(&mut self, + const_usize: & $($mutability)* ConstUsize, + _: Location) { + self.super_const_usize(const_usize); + } + + fn visit_typed_const_val(&mut self, + val: & $($mutability)* TypedConstVal<'tcx>, + location: Location) { + self.super_typed_const_val(val, location); + } + + fn visit_local_decl(&mut self, + local_decl: & $($mutability)* LocalDecl<'tcx>) { + self.super_local_decl(local_decl); + } + + fn visit_visibility_scope(&mut self, + scope: & $($mutability)* VisibilityScope) { + self.super_visibility_scope(scope); + } + // The `super_xxx` methods comprise the default behavior and are - // not meant to be overidden. + // not meant to be overridden. fn super_mir(&mut self, mir: & $($mutability)* Mir<'tcx>) { - for block in mir.all_basic_blocks() { - let data = & $($mutability)* mir[block]; - self.visit_basic_block_data(block, data); + for index in 0..mir.basic_blocks().len() { + let block = BasicBlock::new(index); + self.visit_basic_block_data(block, &$($mutability)* mir[block]); + } + + for scope in &$($mutability)* mir.visibility_scopes { + self.visit_visibility_scope_data(scope); + } + + self.visit_ty(&$($mutability)* mir.return_ty); + + for local_decl in &$($mutability)* mir.local_decls { + self.visit_local_decl(local_decl); } + + self.visit_span(&$($mutability)* mir.span); } fn super_basic_block_data(&mut self, block: BasicBlock, data: & $($mutability)* BasicBlockData<'tcx>) { - for statement in & $($mutability)* data.statements { - self.visit_statement(block, statement); + let BasicBlockData { + ref $($mutability)* statements, + ref $($mutability)* terminator, + is_cleanup: _ + } = *data; + + let mut index = 0; + for statement in statements { + let location = Location { block: block, statement_index: index }; + self.visit_statement(block, statement, location); + index += 1; } - if let Some(ref $($mutability)* terminator) = data.terminator { - self.visit_terminator(block, terminator); + if let Some(ref $($mutability)* terminator) = *terminator { + let location = Location { block: block, statement_index: index }; + self.visit_terminator(block, terminator, location); + } + } + + fn super_visibility_scope_data(&mut self, + scope_data: & $($mutability)* VisibilityScopeData) { + let VisibilityScopeData { + ref $($mutability)* span, + ref $($mutability)* parent_scope, + } = *scope_data; + + self.visit_span(span); + if let Some(ref $($mutability)* parent_scope) = *parent_scope { + self.visit_visibility_scope(parent_scope); } } fn super_statement(&mut self, block: BasicBlock, - statement: & $($mutability)* Statement<'tcx>) { - self.visit_span(& $($mutability)* statement.span); - - match statement.kind { + statement: & $($mutability)* Statement<'tcx>, + location: Location) { + let Statement { + ref $($mutability)* source_info, + ref $($mutability)* kind, + } = *statement; + + self.visit_source_info(source_info); + match *kind { StatementKind::Assign(ref $($mutability)* lvalue, ref $($mutability)* rvalue) => { - self.visit_assign(block, lvalue, rvalue); + self.visit_assign(block, lvalue, rvalue, location); + } + StatementKind::SetDiscriminant{ ref $($mutability)* lvalue, .. } => { + self.visit_lvalue(lvalue, LvalueContext::Store, location); } - StatementKind::Drop(_, ref $($mutability)* lvalue) => { - self.visit_lvalue(lvalue, LvalueContext::Drop); + StatementKind::StorageLive(ref $($mutability)* lvalue) => { + self.visit_lvalue(lvalue, LvalueContext::StorageLive, location); } + StatementKind::StorageDead(ref $($mutability)* lvalue) => { + self.visit_lvalue(lvalue, LvalueContext::StorageDead, location); + } + StatementKind::Nop => {} } } fn super_assign(&mut self, _block: BasicBlock, lvalue: &$($mutability)* Lvalue<'tcx>, - rvalue: &$($mutability)* Rvalue<'tcx>) { - self.visit_lvalue(lvalue, LvalueContext::Store); - self.visit_rvalue(rvalue); + rvalue: &$($mutability)* Rvalue<'tcx>, + location: Location) { + self.visit_lvalue(lvalue, LvalueContext::Store, location); + self.visit_rvalue(rvalue, location); } fn super_terminator(&mut self, block: BasicBlock, - terminator: &$($mutability)* Terminator<'tcx>) { - match *terminator { - Terminator::Goto { target } => { + terminator: &$($mutability)* Terminator<'tcx>, + location: Location) { + let Terminator { + ref $($mutability)* source_info, + ref $($mutability)* kind, + } = *terminator; + + self.visit_source_info(source_info); + self.visit_terminator_kind(block, kind, location); + } + + fn super_terminator_kind(&mut self, + block: BasicBlock, + kind: & $($mutability)* TerminatorKind<'tcx>, + source_location: Location) { + match *kind { + TerminatorKind::Goto { target } => { self.visit_branch(block, target); } - Terminator::If { ref $($mutability)* cond, - ref $($mutability)* targets } => { - self.visit_operand(cond); + TerminatorKind::If { ref $($mutability)* cond, + ref $($mutability)* targets } => { + self.visit_operand(cond, source_location); for &target in targets.as_slice() { self.visit_branch(block, target); } } - Terminator::Switch { ref $($mutability)* discr, - adt_def: _, - ref targets } => { - self.visit_lvalue(discr, LvalueContext::Inspect); + TerminatorKind::Switch { ref $($mutability)* discr, + adt_def: _, + ref targets } => { + self.visit_lvalue(discr, LvalueContext::Inspect, source_location); for &target in targets { self.visit_branch(block, target); } } - Terminator::SwitchInt { ref $($mutability)* discr, - switch_ty: _, - values: _, - ref targets } => { - self.visit_lvalue(discr, LvalueContext::Inspect); + TerminatorKind::SwitchInt { ref $($mutability)* discr, + ref $($mutability)* switch_ty, + ref $($mutability)* values, + ref targets } => { + self.visit_lvalue(discr, LvalueContext::Inspect, source_location); + self.visit_ty(switch_ty); + for value in values { + self.visit_const_val(value, source_location); + } for &target in targets { self.visit_branch(block, target); } } - Terminator::Resume | - Terminator::Return => { + TerminatorKind::Resume | + TerminatorKind::Return | + TerminatorKind::Unreachable => { + } + + TerminatorKind::Drop { ref $($mutability)* location, + target, + unwind } => { + self.visit_lvalue(location, LvalueContext::Drop, source_location); + self.visit_branch(block, target); + unwind.map(|t| self.visit_branch(block, t)); + } + + TerminatorKind::DropAndReplace { ref $($mutability)* location, + ref $($mutability)* value, + target, + unwind } => { + self.visit_lvalue(location, LvalueContext::Drop, source_location); + self.visit_operand(value, source_location); + self.visit_branch(block, target); + unwind.map(|t| self.visit_branch(block, t)); } - Terminator::Call { ref $($mutability)* func, - ref $($mutability)* args, - ref $($mutability)* kind } => { - self.visit_operand(func); + TerminatorKind::Call { ref $($mutability)* func, + ref $($mutability)* args, + ref $($mutability)* destination, + cleanup } => { + self.visit_operand(func, source_location); for arg in args { - self.visit_operand(arg); + self.visit_operand(arg, source_location); } - match *kind { - CallKind::Converging { - ref $($mutability)* destination, - .. - } | - CallKind::ConvergingCleanup { - ref $($mutability)* destination, - .. - } => { - self.visit_lvalue(destination, LvalueContext::Store); - } - CallKind::Diverging | - CallKind::DivergingCleanup(_) => {} - } - for &target in kind.successors() { + if let Some((ref $($mutability)* destination, target)) = *destination { + self.visit_lvalue(destination, LvalueContext::Call, source_location); self.visit_branch(block, target); } + cleanup.map(|t| self.visit_branch(block, t)); + } + + TerminatorKind::Assert { ref $($mutability)* cond, + expected: _, + ref $($mutability)* msg, + target, + cleanup } => { + self.visit_operand(cond, source_location); + self.visit_assert_message(msg, source_location); + self.visit_branch(block, target); + cleanup.map(|t| self.visit_branch(block, t)); + } + } + } + + fn super_assert_message(&mut self, + msg: & $($mutability)* AssertMessage<'tcx>, + location: Location) { + match *msg { + AssertMessage::BoundsCheck { + ref $($mutability)* len, + ref $($mutability)* index + } => { + self.visit_operand(len, location); + self.visit_operand(index, location); } + AssertMessage::Math(_) => {} } } fn super_rvalue(&mut self, - rvalue: & $($mutability)* Rvalue<'tcx>) { + rvalue: & $($mutability)* Rvalue<'tcx>, + location: Location) { match *rvalue { Rvalue::Use(ref $($mutability)* operand) => { - self.visit_operand(operand); + self.visit_operand(operand, location); } Rvalue::Repeat(ref $($mutability)* value, - ref $($mutability)* len) => { - self.visit_operand(value); - self.visit_constant(len); + ref $($mutability)* typed_const_val) => { + self.visit_operand(value, location); + self.visit_typed_const_val(typed_const_val, location); } Rvalue::Ref(r, bk, ref $($mutability)* path) => { self.visit_lvalue(path, LvalueContext::Borrow { region: r, kind: bk - }); + }, location); } Rvalue::Len(ref $($mutability)* path) => { - self.visit_lvalue(path, LvalueContext::Inspect); + self.visit_lvalue(path, LvalueContext::Inspect, location); } - Rvalue::Cast(_, ref $($mutability)* operand, _) => { - self.visit_operand(operand); + Rvalue::Cast(_cast_kind, + ref $($mutability)* operand, + ref $($mutability)* ty) => { + self.visit_operand(operand, location); + self.visit_ty(ty); } - Rvalue::BinaryOp(_, + Rvalue::BinaryOp(_bin_op, + ref $($mutability)* lhs, + ref $($mutability)* rhs) | + Rvalue::CheckedBinaryOp(_bin_op, ref $($mutability)* lhs, ref $($mutability)* rhs) => { - self.visit_operand(lhs); - self.visit_operand(rhs); + self.visit_operand(lhs, location); + self.visit_operand(rhs, location); } - Rvalue::UnaryOp(_, ref $($mutability)* op) => { - self.visit_operand(op); + Rvalue::UnaryOp(_un_op, ref $($mutability)* op) => { + self.visit_operand(op, location); } - Rvalue::Box(_) => { + Rvalue::Box(ref $($mutability)* ty) => { + self.visit_ty(ty); } Rvalue::Aggregate(ref $($mutability)* kind, ref $($mutability)* operands) => { match *kind { - AggregateKind::Closure(ref $($mutability)* def_id, _) => { - self.visit_def_id(def_id); + AggregateKind::Array => { + } + AggregateKind::Tuple => { + } + AggregateKind::Adt(_adt_def, + _variant_index, + ref $($mutability)* substs, + _active_field_index) => { + self.visit_substs(substs); + } + AggregateKind::Closure(ref $($mutability)* def_id, + ref $($mutability)* closure_substs) => { + self.visit_def_id(def_id, location); + self.visit_closure_substs(closure_substs); } - _ => { /* nothing to do */ } } - for operand in & $($mutability)* operands[..] { - self.visit_operand(operand); + for operand in operands { + self.visit_operand(operand, location); } } - Rvalue::Slice { ref $($mutability)* input, - from_start, - from_end } => { - self.visit_lvalue(input, LvalueContext::Slice { - from_start: from_start, - from_end: from_end, - }); - } - - Rvalue::InlineAsm(_) => { + Rvalue::InlineAsm { ref $($mutability)* outputs, + ref $($mutability)* inputs, + asm: _ } => { + for output in & $($mutability)* outputs[..] { + self.visit_lvalue(output, LvalueContext::Store, location); + } + for input in & $($mutability)* inputs[..] { + self.visit_operand(input, location); + } } } } fn super_operand(&mut self, - operand: & $($mutability)* Operand<'tcx>) { + operand: & $($mutability)* Operand<'tcx>, + location: Location) { match *operand { Operand::Consume(ref $($mutability)* lvalue) => { - self.visit_lvalue(lvalue, LvalueContext::Consume); + self.visit_lvalue(lvalue, LvalueContext::Consume, location); } Operand::Constant(ref $($mutability)* constant) => { - self.visit_constant(constant); + self.visit_constant(constant, location); } } } fn super_lvalue(&mut self, lvalue: & $($mutability)* Lvalue<'tcx>, - _context: LvalueContext) { + context: LvalueContext<'tcx>, + location: Location) { match *lvalue { - Lvalue::Var(_) | - Lvalue::Temp(_) | - Lvalue::Arg(_) | - Lvalue::ReturnPointer => { + Lvalue::Local(_) => { } Lvalue::Static(ref $($mutability)* def_id) => { - self.visit_def_id(def_id); + self.visit_def_id(def_id, location); } Lvalue::Projection(ref $($mutability)* proj) => { - self.visit_lvalue(& $($mutability)* proj.base, - LvalueContext::Projection); + self.visit_projection(proj, context, location); } } } + fn super_projection(&mut self, + proj: & $($mutability)* LvalueProjection<'tcx>, + context: LvalueContext<'tcx>, + location: Location) { + let Projection { + ref $($mutability)* base, + ref $($mutability)* elem, + } = *proj; + let context = if context.is_mutating_use() { + LvalueContext::Projection(Mutability::Mut) + } else { + LvalueContext::Projection(Mutability::Not) + }; + self.visit_lvalue(base, context, location); + self.visit_projection_elem(elem, context, location); + } + + fn super_projection_elem(&mut self, + proj: & $($mutability)* LvalueElem<'tcx>, + _context: LvalueContext<'tcx>, + location: Location) { + match *proj { + ProjectionElem::Deref => { + } + ProjectionElem::Subslice { from: _, to: _ } => { + } + ProjectionElem::Field(_field, ref $($mutability)* ty) => { + self.visit_ty(ty); + } + ProjectionElem::Index(ref $($mutability)* operand) => { + self.visit_operand(operand, location); + } + ProjectionElem::ConstantIndex { offset: _, + min_length: _, + from_end: _ } => { + } + ProjectionElem::Downcast(_adt_def, _variant_index) => { + } + } + } + + fn super_local_decl(&mut self, + local_decl: & $($mutability)* LocalDecl<'tcx>) { + let LocalDecl { + mutability: _, + ref $($mutability)* ty, + name: _, + ref $($mutability)* source_info, + } = *local_decl; + + self.visit_ty(ty); + if let Some(ref $($mutability)* info) = *source_info { + self.visit_source_info(info); + } + } + + fn super_visibility_scope(&mut self, + _scope: & $($mutability)* VisibilityScope) { + } + fn super_branch(&mut self, _source: BasicBlock, _target: BasicBlock) { } fn super_constant(&mut self, - constant: & $($mutability)* Constant<'tcx>) { - self.visit_span(& $($mutability)* constant.span); - self.visit_literal(& $($mutability)* constant.literal); + constant: & $($mutability)* Constant<'tcx>, + location: Location) { + let Constant { + ref $($mutability)* span, + ref $($mutability)* ty, + ref $($mutability)* literal, + } = *constant; + + self.visit_span(span); + self.visit_ty(ty); + self.visit_literal(literal, location); + } + + fn super_typed_const_val(&mut self, + constant: & $($mutability)* TypedConstVal<'tcx>, + location: Location) { + let TypedConstVal { + ref $($mutability)* span, + ref $($mutability)* ty, + ref $($mutability)* value, + } = *constant; + + self.visit_span(span); + self.visit_ty(ty); + self.visit_const_usize(value, location); } fn super_literal(&mut self, - literal: & $($mutability)* Literal<'tcx>) { + literal: & $($mutability)* Literal<'tcx>, + location: Location) { match *literal { - Literal::Item { ref $($mutability)* def_id, .. } => { - self.visit_def_id(def_id); - }, - Literal::Value { .. } => { - // Nothing to do + Literal::Item { ref $($mutability)* def_id, + ref $($mutability)* substs } => { + self.visit_def_id(def_id, location); + self.visit_substs(substs); } + Literal::Value { ref $($mutability)* value } => { + self.visit_const_val(value, location); + } + Literal::Promoted { index: _ } => {} } } @@ -334,6 +691,47 @@ macro_rules! make_mir_visitor { fn super_span(&mut self, _span: & $($mutability)* Span) { } + + fn super_source_info(&mut self, source_info: & $($mutability)* SourceInfo) { + let SourceInfo { + ref $($mutability)* span, + ref $($mutability)* scope, + } = *source_info; + + self.visit_span(span); + self.visit_visibility_scope(scope); + } + + fn super_ty(&mut self, _ty: & $($mutability)* Ty<'tcx>) { + } + + fn super_substs(&mut self, _substs: & $($mutability)* &'tcx Substs<'tcx>) { + } + + fn super_closure_substs(&mut self, + _substs: & $($mutability)* ClosureSubsts<'tcx>) { + } + + fn super_const_val(&mut self, _substs: & $($mutability)* ConstVal) { + } + + fn super_const_usize(&mut self, _substs: & $($mutability)* ConstUsize) { + } + + // Convenience methods + + fn visit_location(&mut self, mir: & $($mutability)* Mir<'tcx>, location: Location) { + let basic_block = & $($mutability)* mir[location.block]; + if basic_block.statements.len() == location.statement_index { + if let Some(ref $($mutability)* terminator) = basic_block.terminator { + self.visit_terminator(location.block, terminator, location) + } + } else { + let statement = & $($mutability)* + basic_block.statements[location.statement_index]; + self.visit_statement(location.block, statement, location) + } + } } } } @@ -341,11 +739,14 @@ macro_rules! make_mir_visitor { make_mir_visitor!(Visitor,); make_mir_visitor!(MutVisitor,mut); -#[derive(Copy, Clone, Debug)] -pub enum LvalueContext { - // Appears as LHS of an assignment or as dest of a call +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum LvalueContext<'tcx> { + // Appears as LHS of an assignment Store, + // Dest of a call + Call, + // Being dropped Drop, @@ -353,14 +754,93 @@ pub enum LvalueContext { Inspect, // Being borrowed - Borrow { region: Region, kind: BorrowKind }, - - // Being sliced -- this should be same as being borrowed, probably - Slice { from_start: usize, from_end: usize }, - - // Used as base for another lvalue, e.g. `x` in `x.y` - Projection, + Borrow { region: &'tcx Region, kind: BorrowKind }, + + // Used as base for another lvalue, e.g. `x` in `x.y`. + // + // The `Mutability` argument specifies whether the projection is being performed in order to + // (potentially) mutate the lvalue. For example, the projection `x.y` is marked as a mutation + // in these cases: + // + // x.y = ...; + // f(&mut x.y); + // + // But not in these cases: + // + // z = x.y; + // f(&x.y); + Projection(Mutability), // Consumed as part of an operand Consume, + + // Starting and ending a storage live range + StorageLive, + StorageDead, +} + +impl<'tcx> LvalueContext<'tcx> { + /// Returns true if this lvalue context represents a drop. + pub fn is_drop(&self) -> bool { + match *self { + LvalueContext::Drop => true, + _ => false, + } + } + + /// Returns true if this lvalue context represents a storage live or storage dead marker. + pub fn is_storage_marker(&self) -> bool { + match *self { + LvalueContext::StorageLive | LvalueContext::StorageDead => true, + _ => false, + } + } + + /// Returns true if this lvalue context represents a storage live marker. + pub fn is_storage_live_marker(&self) -> bool { + match *self { + LvalueContext::StorageLive => true, + _ => false, + } + } + + /// Returns true if this lvalue context represents a storage dead marker. + pub fn is_storage_dead_marker(&self) -> bool { + match *self { + LvalueContext::StorageDead => true, + _ => false, + } + } + + /// Returns true if this lvalue context represents a use that potentially changes the value. + pub fn is_mutating_use(&self) -> bool { + match *self { + LvalueContext::Store | LvalueContext::Call | + LvalueContext::Borrow { kind: BorrowKind::Mut, .. } | + LvalueContext::Projection(Mutability::Mut) | + LvalueContext::Drop => true, + LvalueContext::Inspect | + LvalueContext::Borrow { kind: BorrowKind::Shared, .. } | + LvalueContext::Borrow { kind: BorrowKind::Unique, .. } | + LvalueContext::Projection(Mutability::Not) | LvalueContext::Consume | + LvalueContext::StorageLive | LvalueContext::StorageDead => false, + } + } + + /// Returns true if this lvalue context represents a use that does not change the value. + pub fn is_nonmutating_use(&self) -> bool { + match *self { + LvalueContext::Inspect | LvalueContext::Borrow { kind: BorrowKind::Shared, .. } | + LvalueContext::Borrow { kind: BorrowKind::Unique, .. } | + LvalueContext::Projection(Mutability::Not) | LvalueContext::Consume => true, + LvalueContext::Borrow { kind: BorrowKind::Mut, .. } | LvalueContext::Store | + LvalueContext::Call | LvalueContext::Projection(Mutability::Mut) | + LvalueContext::Drop | LvalueContext::StorageLive | LvalueContext::StorageDead => false, + } + } + + pub fn is_use(&self) -> bool { + self.is_mutating_use() || self.is_nonmutating_use() + } } + diff --git a/src/librustc/session/code_stats.rs b/src/librustc/session/code_stats.rs new file mode 100644 index 0000000000000..8308c54d70bf4 --- /dev/null +++ b/src/librustc/session/code_stats.rs @@ -0,0 +1,173 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use ty::AdtKind; +use ty::layout::{Align, Size}; + +use rustc_data_structures::fx::{FxHashSet}; + +use std::cmp::{self, Ordering}; + +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub struct VariantInfo { + pub name: Option, + pub kind: SizeKind, + pub size: u64, + pub align: u64, + pub fields: Vec, +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub enum SizeKind { Exact, Min } + +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub struct FieldInfo { + pub name: String, + pub offset: u64, + pub size: u64, + pub align: u64, +} + +impl From for DataTypeKind { + fn from(kind: AdtKind) -> Self { + match kind { + AdtKind::Struct => DataTypeKind::Struct, + AdtKind::Enum => DataTypeKind::Enum, + AdtKind::Union => DataTypeKind::Union, + } + } +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub enum DataTypeKind { + Struct, + Union, + Enum, + Closure, +} + +#[derive(PartialEq, Eq, Hash, Debug)] +pub struct TypeSizeInfo { + pub kind: DataTypeKind, + pub type_description: String, + pub align: u64, + pub overall_size: u64, + pub opt_discr_size: Option, + pub variants: Vec, +} + +#[derive(PartialEq, Eq, Debug)] +pub struct CodeStats { + type_sizes: FxHashSet, +} + +impl CodeStats { + pub fn new() -> Self { CodeStats { type_sizes: FxHashSet() } } + + pub fn record_type_size(&mut self, + kind: DataTypeKind, + type_desc: S, + align: Align, + overall_size: Size, + opt_discr_size: Option, + variants: Vec) { + let info = TypeSizeInfo { + kind: kind, + type_description: type_desc.to_string(), + align: align.abi(), + overall_size: overall_size.bytes(), + opt_discr_size: opt_discr_size.map(|s| s.bytes()), + variants: variants, + }; + self.type_sizes.insert(info); + } + + pub fn print_type_sizes(&self) { + let mut sorted: Vec<_> = self.type_sizes.iter().collect(); + + // Primary sort: large-to-small. + // Secondary sort: description (dictionary order) + sorted.sort_by(|info1, info2| { + // (reversing cmp order to get large-to-small ordering) + match info2.overall_size.cmp(&info1.overall_size) { + Ordering::Equal => info1.type_description.cmp(&info2.type_description), + other => other, + } + }); + + for info in &sorted { + println!("print-type-size type: `{}`: {} bytes, alignment: {} bytes", + info.type_description, info.overall_size, info.align); + let indent = " "; + + let discr_size = if let Some(discr_size) = info.opt_discr_size { + println!("print-type-size {}discriminant: {} bytes", + indent, discr_size); + discr_size + } else { + 0 + }; + + // We start this at discr_size (rather than 0) because + // things like C-enums do not have variants but we still + // want the max_variant_size at the end of the loop below + // to reflect the presence of the discriminant. + let mut max_variant_size = discr_size; + + let struct_like = match info.kind { + DataTypeKind::Struct | DataTypeKind::Closure => true, + DataTypeKind::Enum | DataTypeKind::Union => false, + }; + for (i, variant_info) in info.variants.iter().enumerate() { + let VariantInfo { ref name, kind: _, align: _, size, ref fields } = *variant_info; + let indent = if !struct_like { + let name = match name.as_ref() { + Some(name) => format!("{}", name), + None => format!("{}", i), + }; + println!("print-type-size {}variant `{}`: {} bytes", + indent, name, size - discr_size); + " " + } else { + assert!(i < 1); + " " + }; + max_variant_size = cmp::max(max_variant_size, size); + + let mut min_offset = discr_size; + for field in fields { + let FieldInfo { ref name, offset, size, align } = *field; + + // Include field alignment in output only if it caused padding injection + if min_offset != offset { + let pad = offset - min_offset; + println!("print-type-size {}padding: {} bytes", + indent, pad); + println!("print-type-size {}field `.{}`: {} bytes, alignment: {} bytes", + indent, name, size, align); + } else { + println!("print-type-size {}field `.{}`: {} bytes", + indent, name, size); + } + + min_offset = offset + size; + } + } + + assert!(max_variant_size <= info.overall_size, + "max_variant_size {} !<= {} overall_size", + max_variant_size, info.overall_size); + if max_variant_size < info.overall_size { + println!("print-type-size {}end padding: {} bytes", + indent, info.overall_size - max_variant_size); + } + } + } +} diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index 1a99aba591a8c..79c0ad0d24209 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -19,48 +19,56 @@ pub use self::DebugInfoLevel::*; use session::{early_error, early_warn, Session}; use session::search_paths::SearchPaths; +use rustc_back::PanicStrategy; use rustc_back::target::Target; use lint; use middle::cstore; use syntax::ast::{self, IntTy, UintTy}; -use syntax::attr; -use syntax::attr::AttrMetaMethods; -use syntax::errors::{ColorConfig, Handler}; use syntax::parse; -use syntax::parse::token::InternedString; +use syntax::symbol::Symbol; use syntax::feature_gate::UnstableFeatures; +use errors::{ColorConfig, FatalError, Handler}; + use getopts; -use std::collections::HashMap; -use std::env; +use std::collections::{BTreeMap, BTreeSet}; +use std::collections::btree_map::Iter as BTreeMapIter; +use std::collections::btree_map::Keys as BTreeMapKeysIter; +use std::collections::btree_map::Values as BTreeMapValuesIter; + use std::fmt; +use std::hash::Hasher; +use std::collections::hash_map::DefaultHasher; +use std::collections::HashSet; +use std::iter::FromIterator; use std::path::PathBuf; -use llvm; - pub struct Config { pub target: Target, pub int_type: IntTy, pub uint_type: UintTy, } -#[derive(Clone, Copy, PartialEq)] +#[derive(Clone, Copy, PartialEq, Hash)] pub enum OptLevel { No, // -O0 Less, // -O1 Default, // -O2 - Aggressive // -O3 + Aggressive, // -O3 + Size, // -Os + SizeMin, // -Oz } -#[derive(Clone, Copy, PartialEq)] +#[derive(Clone, Copy, PartialEq, Hash)] pub enum DebugInfoLevel { NoDebugInfo, LimitedDebugInfo, FullDebugInfo, } -#[derive(Clone, Copy, PartialEq, Eq, Hash)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, PartialOrd, Ord, + RustcEncodable, RustcDecodable)] pub enum OutputType { Bitcode, Assembly, @@ -70,18 +78,6 @@ pub enum OutputType { DepInfo, } -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum ErrorOutputType { - HumanReadable(ColorConfig), - Json, -} - -impl Default for ErrorOutputType { - fn default() -> ErrorOutputType { - ErrorOutputType::HumanReadable(ColorConfig::Auto) - } -} - impl OutputType { fn is_compatible_with_codegen_units_and_single_output_file(&self) -> bool { match *self { @@ -104,67 +100,226 @@ impl OutputType { OutputType::DepInfo => "dep-info", } } + + pub fn extension(&self) -> &'static str { + match *self { + OutputType::Bitcode => "bc", + OutputType::Assembly => "s", + OutputType::LlvmAssembly => "ll", + OutputType::Object => "o", + OutputType::DepInfo => "d", + OutputType::Exe => "", + } + } } -#[derive(Clone)] -pub struct Options { - // The crate config requested for the session, which may be combined - // with additional crate configurations during the compile process - pub crate_types: Vec, - - pub gc: bool, - pub optimize: OptLevel, - pub debug_assertions: bool, - pub debuginfo: DebugInfoLevel, - pub lint_opts: Vec<(String, lint::Level)>, - pub lint_cap: Option, - pub describe_lints: bool, - pub output_types: HashMap>, - // This was mutable for rustpkg, which updates search paths based on the - // parsed code. It remains mutable in case its replacements wants to use - // this. - pub search_paths: SearchPaths, - pub libs: Vec<(String, cstore::NativeLibraryKind)>, - pub maybe_sysroot: Option, - pub target_triple: String, - // User-specified cfg meta items. The compiler itself will add additional - // items to the crate config, and during parsing the entire crate config - // will be added to the crate AST node. This should not be used for - // anything except building the full crate config prior to parsing. - pub cfg: ast::CrateConfig, - pub test: bool, - pub parse_only: bool, - pub no_trans: bool, - pub error_format: ErrorOutputType, - pub treat_err_as_bug: bool, - pub incremental_compilation: bool, - pub dump_dep_graph: bool, - pub no_analysis: bool, - pub debugging_opts: DebuggingOptions, - pub prints: Vec, - pub cg: CodegenOptions, - pub externs: HashMap>, - pub crate_name: Option, - /// An optional name to use as the crate for std during std injection, - /// written `extern crate std = "name"`. Default to "std". Used by - /// out-of-tree drivers. - pub alt_std_name: Option, - /// Indicates how the compiler should treat unstable features - pub unstable_features: UnstableFeatures +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum ErrorOutputType { + HumanReadable(ColorConfig), + Json, +} + +impl Default for ErrorOutputType { + fn default() -> ErrorOutputType { + ErrorOutputType::HumanReadable(ColorConfig::Auto) + } +} + +// Use tree-based collections to cheaply get a deterministic Hash implementation. +// DO NOT switch BTreeMap out for an unsorted container type! That would break +// dependency tracking for commandline arguments. +#[derive(Clone, Hash)] +pub struct OutputTypes(BTreeMap>); + +impl OutputTypes { + pub fn new(entries: &[(OutputType, Option)]) -> OutputTypes { + OutputTypes(BTreeMap::from_iter(entries.iter() + .map(|&(k, ref v)| (k, v.clone())))) + } + + pub fn get(&self, key: &OutputType) -> Option<&Option> { + self.0.get(key) + } + + pub fn contains_key(&self, key: &OutputType) -> bool { + self.0.contains_key(key) + } + + pub fn keys<'a>(&'a self) -> BTreeMapKeysIter<'a, OutputType, Option> { + self.0.keys() + } + + pub fn values<'a>(&'a self) -> BTreeMapValuesIter<'a, OutputType, Option> { + self.0.values() + } +} + + +// Use tree-based collections to cheaply get a deterministic Hash implementation. +// DO NOT switch BTreeMap or BTreeSet out for an unsorted container type! That +// would break dependency tracking for commandline arguments. +#[derive(Clone, Hash)] +pub struct Externs(BTreeMap>); + +impl Externs { + pub fn new(data: BTreeMap>) -> Externs { + Externs(data) + } + + pub fn get(&self, key: &str) -> Option<&BTreeSet> { + self.0.get(key) + } + + pub fn iter<'a>(&'a self) -> BTreeMapIter<'a, String, BTreeSet> { + self.0.iter() + } } +macro_rules! hash_option { + ($opt_name:ident, $opt_expr:expr, $sub_hashes:expr, [UNTRACKED]) => ({}); + ($opt_name:ident, $opt_expr:expr, $sub_hashes:expr, [TRACKED]) => ({ + if $sub_hashes.insert(stringify!($opt_name), + $opt_expr as &dep_tracking::DepTrackingHash).is_some() { + bug!("Duplicate key in CLI DepTrackingHash: {}", stringify!($opt_name)) + } + }); + ($opt_name:ident, + $opt_expr:expr, + $sub_hashes:expr, + [UNTRACKED_WITH_WARNING $warn_val:expr, $warn_text:expr, $error_format:expr]) => ({ + if *$opt_expr == $warn_val { + early_warn($error_format, $warn_text) + } + }); +} + +macro_rules! top_level_options { + (pub struct Options { $( + $opt:ident : $t:ty [$dep_tracking_marker:ident $($warn_val:expr, $warn_text:expr)*], + )* } ) => ( + #[derive(Clone)] + pub struct Options { + $(pub $opt: $t),* + } + + impl Options { + pub fn dep_tracking_hash(&self) -> u64 { + let mut sub_hashes = BTreeMap::new(); + $({ + hash_option!($opt, + &self.$opt, + &mut sub_hashes, + [$dep_tracking_marker $($warn_val, + $warn_text, + self.error_format)*]); + })* + let mut hasher = DefaultHasher::new(); + dep_tracking::stable_hash(sub_hashes, + &mut hasher, + self.error_format); + hasher.finish() + } + } + ); +} + +// The top-level commandline options struct +// +// For each option, one has to specify how it behaves with regard to the +// dependency tracking system of incremental compilation. This is done via the +// square-bracketed directive after the field type. The options are: +// +// [TRACKED] +// A change in the given field will cause the compiler to completely clear the +// incremental compilation cache before proceeding. +// +// [UNTRACKED] +// Incremental compilation is not influenced by this option. +// +// [UNTRACKED_WITH_WARNING(val, warning)] +// The option is incompatible with incremental compilation in some way. If it +// has the value `val`, the string `warning` is emitted as a warning. +// +// If you add a new option to this struct or one of the sub-structs like +// CodegenOptions, think about how it influences incremental compilation. If in +// doubt, specify [TRACKED], which is always "correct" but might lead to +// unnecessary re-compilation. +top_level_options!( + pub struct Options { + // The crate config requested for the session, which may be combined + // with additional crate configurations during the compile process + crate_types: Vec [TRACKED], + optimize: OptLevel [TRACKED], + // Include the debug_assertions flag into dependency tracking, since it + // can influence whether overflow checks are done or not. + debug_assertions: bool [TRACKED], + debuginfo: DebugInfoLevel [TRACKED], + lint_opts: Vec<(String, lint::Level)> [TRACKED], + lint_cap: Option [TRACKED], + describe_lints: bool [UNTRACKED], + output_types: OutputTypes [TRACKED], + // FIXME(mw): We track this for now but it actually doesn't make too + // much sense: The search path can stay the same while the + // things discovered there might have changed on disk. + search_paths: SearchPaths [TRACKED], + libs: Vec<(String, cstore::NativeLibraryKind)> [TRACKED], + maybe_sysroot: Option [TRACKED], + + target_triple: String [TRACKED], + + test: bool [TRACKED], + error_format: ErrorOutputType [UNTRACKED], + mir_opt_level: usize [TRACKED], + + // if Some, enable incremental compilation, using the given + // directory to store intermediate results + incremental: Option [UNTRACKED], + + debugging_opts: DebuggingOptions [TRACKED], + prints: Vec [UNTRACKED], + cg: CodegenOptions [TRACKED], + // FIXME(mw): We track this for now but it actually doesn't make too + // much sense: The value of this option can stay the same + // while the files they refer to might have changed on disk. + externs: Externs [TRACKED], + crate_name: Option [TRACKED], + // An optional name to use as the crate for std during std injection, + // written `extern crate std = "name"`. Default to "std". Used by + // out-of-tree drivers. + alt_std_name: Option [TRACKED], + // Indicates how the compiler should treat unstable features + unstable_features: UnstableFeatures [TRACKED], + + // Indicates whether this run of the compiler is actually rustdoc. This + // is currently just a hack and will be removed eventually, so please + // try to not rely on this too much. + actually_rustdoc: bool [TRACKED], + } +); + #[derive(Clone, PartialEq, Eq)] pub enum PrintRequest { FileNames, Sysroot, CrateName, + Cfg, + TargetList, + TargetCPUs, + TargetFeatures, + RelocationModels, + CodeModels, + TargetSpec, } pub enum Input { /// Load source from file File(PathBuf), - /// The string is the source - Str(String) + Str { + /// String that is shown in place of a filename + name: String, + /// Anonymous source string + input: String, + }, } impl Input { @@ -172,7 +327,7 @@ impl Input { match *self { Input::File(ref ifile) => ifile.file_stem().unwrap() .to_str().unwrap().to_string(), - Input::Str(_) => "rust_out".to_string(), + Input::Str { .. } => "rust_out".to_string(), } } } @@ -183,26 +338,65 @@ pub struct OutputFilenames { pub out_filestem: String, pub single_output_file: Option, pub extra: String, - pub outputs: HashMap>, + pub outputs: OutputTypes, } +/// Codegen unit names generated by the numbered naming scheme will contain this +/// marker right before the index of the codegen unit. +pub const NUMBERED_CODEGEN_UNIT_MARKER: &'static str = ".cgu-"; + impl OutputFilenames { pub fn path(&self, flavor: OutputType) -> PathBuf { self.outputs.get(&flavor).and_then(|p| p.to_owned()) .or_else(|| self.single_output_file.clone()) - .unwrap_or_else(|| self.temp_path(flavor)) + .unwrap_or_else(|| self.temp_path(flavor, None)) + } + + /// Get the path where a compilation artifact of the given type for the + /// given codegen unit should be placed on disk. If codegen_unit_name is + /// None, a path distinct from those of any codegen unit will be generated. + pub fn temp_path(&self, + flavor: OutputType, + codegen_unit_name: Option<&str>) + -> PathBuf { + let extension = flavor.extension(); + self.temp_path_ext(extension, codegen_unit_name) } - pub fn temp_path(&self, flavor: OutputType) -> PathBuf { + /// Like temp_path, but also supports things where there is no corresponding + /// OutputType, like no-opt-bitcode or lto-bitcode. + pub fn temp_path_ext(&self, + ext: &str, + codegen_unit_name: Option<&str>) + -> PathBuf { let base = self.out_directory.join(&self.filestem()); - match flavor { - OutputType::Bitcode => base.with_extension("bc"), - OutputType::Assembly => base.with_extension("s"), - OutputType::LlvmAssembly => base.with_extension("ll"), - OutputType::Object => base.with_extension("o"), - OutputType::DepInfo => base.with_extension("d"), - OutputType::Exe => base, + + let mut extension = String::new(); + + if let Some(codegen_unit_name) = codegen_unit_name { + if codegen_unit_name.contains(NUMBERED_CODEGEN_UNIT_MARKER) { + // If we use the numbered naming scheme for modules, we don't want + // the files to look like ... + // but simply .. + let marker_offset = codegen_unit_name.rfind(NUMBERED_CODEGEN_UNIT_MARKER) + .unwrap(); + let index_offset = marker_offset + NUMBERED_CODEGEN_UNIT_MARKER.len(); + extension.push_str(&codegen_unit_name[index_offset .. ]); + } else { + extension.push_str(codegen_unit_name); + }; + } + + if !ext.is_empty() { + if !extension.is_empty() { + extension.push_str("."); + } + + extension.push_str(ext); } + + let path = base.with_extension(&extension[..]); + path } pub fn with_extension(&self, extension: &str) -> PathBuf { @@ -231,34 +425,43 @@ pub fn host_triple() -> &'static str { pub fn basic_options() -> Options { Options { crate_types: Vec::new(), - gc: false, optimize: OptLevel::No, debuginfo: NoDebugInfo, lint_opts: Vec::new(), lint_cap: None, describe_lints: false, - output_types: HashMap::new(), + output_types: OutputTypes(BTreeMap::new()), search_paths: SearchPaths::new(), maybe_sysroot: None, target_triple: host_triple().to_string(), - cfg: Vec::new(), test: false, - parse_only: false, - no_trans: false, - treat_err_as_bug: false, - incremental_compilation: false, - dump_dep_graph: false, - no_analysis: false, + mir_opt_level: 1, + incremental: None, debugging_opts: basic_debugging_options(), prints: Vec::new(), cg: basic_codegen_options(), error_format: ErrorOutputType::default(), - externs: HashMap::new(), + externs: Externs(BTreeMap::new()), crate_name: None, alt_std_name: None, libs: Vec::new(), unstable_features: UnstableFeatures::Disallow, debug_assertions: true, + actually_rustdoc: false, + } +} + +impl Options { + /// True if there is a reason to build the dep graph. + pub fn build_dep_graph(&self) -> bool { + self.incremental.is_some() || + self.debugging_opts.dump_dep_graph || + self.debugging_opts.query_dep_graph + } + + pub fn single_codegen_unit(&self) -> bool { + self.incremental.is_none() || + self.cg.codegen_units == 1 } } @@ -279,9 +482,12 @@ pub enum CrateType { CrateTypeDylib, CrateTypeRlib, CrateTypeStaticlib, + CrateTypeCdylib, + CrateTypeProcMacro, + CrateTypeMetadata, } -#[derive(Clone)] +#[derive(Clone, Hash)] pub enum Passes { SomePasses(Vec), AllPasses, @@ -310,7 +516,12 @@ macro_rules! options { ($struct_name:ident, $setter_name:ident, $defaultfn:ident, $buildfn:ident, $prefix:expr, $outputname:expr, $stat:ident, $mod_desc:ident, $mod_set:ident, - $($opt:ident : $t:ty = ($init:expr, $parse:ident, $desc:expr)),* ,) => + $($opt:ident : $t:ty = ( + $init:expr, + $parse:ident, + [$dep_tracking_marker:ident $(($dep_warn_val:expr, $dep_warn_text:expr))*], + $desc:expr) + ),* ,) => ( #[derive(Clone)] pub struct $struct_name { $(pub $opt: $t),* } @@ -348,7 +559,7 @@ macro_rules! options { value, $outputname, key, type_desc)) } - (None, None) => unreachable!() + (None, None) => bug!() } } found = true; @@ -362,6 +573,22 @@ macro_rules! options { return op; } + impl<'a> dep_tracking::DepTrackingHash for $struct_name { + + fn hash(&self, hasher: &mut DefaultHasher, error_format: ErrorOutputType) { + let mut sub_hashes = BTreeMap::new(); + $({ + hash_option!($opt, + &self.$opt, + &mut sub_hashes, + [$dep_tracking_marker $($dep_warn_val, + $dep_warn_text, + error_format)*]); + })* + dep_tracking::stable_hash(sub_hashes, hasher, error_format); + } + } + pub type $setter_name = fn(&mut $struct_name, v: Option<&str>) -> bool; pub const $stat: &'static [(&'static str, $setter_name, Option<&'static str>, &'static str)] = @@ -373,6 +600,7 @@ macro_rules! options { pub const parse_opt_bool: Option<&'static str> = Some("one of: `y`, `yes`, `on`, `n`, `no`, or `off`"); pub const parse_string: Option<&'static str> = Some("a string"); + pub const parse_string_push: Option<&'static str> = Some("a string"); pub const parse_opt_string: Option<&'static str> = Some("a string"); pub const parse_list: Option<&'static str> = Some("a space-separated list of strings"); pub const parse_opt_list: Option<&'static str> = Some("a space-separated list of strings"); @@ -381,11 +609,14 @@ macro_rules! options { Some("a space-separated list of passes, or `all`"); pub const parse_opt_uint: Option<&'static str> = Some("a number"); + pub const parse_panic_strategy: Option<&'static str> = + Some("either `panic` or `abort`"); } #[allow(dead_code)] mod $mod_set { use super::{$struct_name, Passes, SomePasses, AllPasses}; + use rustc_back::PanicStrategy; $( pub fn $opt(cg: &mut $struct_name, v: Option<&str>) -> bool { @@ -433,6 +664,13 @@ macro_rules! options { } } + fn parse_string_push(slot: &mut Vec, v: Option<&str>) -> bool { + match v { + Some(s) => { slot.push(s.to_string()); true }, + None => false, + } + } + fn parse_list(slot: &mut Vec, v: Option<&str>) -> bool { match v { @@ -479,7 +717,7 @@ macro_rules! options { true } v => { - let mut passes = vec!(); + let mut passes = vec![]; if parse_list(&mut passes, v) { *slot = SomePasses(passes); true @@ -489,160 +727,205 @@ macro_rules! options { } } } + + fn parse_panic_strategy(slot: &mut Option, v: Option<&str>) -> bool { + match v { + Some("unwind") => *slot = Some(PanicStrategy::Unwind), + Some("abort") => *slot = Some(PanicStrategy::Abort), + _ => return false + } + true + } } ) } options! {CodegenOptions, CodegenSetter, basic_codegen_options, build_codegen_options, "C", "codegen", CG_OPTIONS, cg_type_desc, cgsetters, - ar: Option = (None, parse_opt_string, + ar: Option = (None, parse_opt_string, [UNTRACKED], "tool to assemble archives with"), - linker: Option = (None, parse_opt_string, + linker: Option = (None, parse_opt_string, [UNTRACKED], "system linker to link outputs with"), - link_args: Option> = (None, parse_opt_list, + link_arg: Vec = (vec![], parse_string_push, [UNTRACKED], + "a single extra argument to pass to the linker (can be used several times)"), + link_args: Option> = (None, parse_opt_list, [UNTRACKED], "extra arguments to pass to the linker (space separated)"), - lto: bool = (false, parse_bool, + link_dead_code: bool = (false, parse_bool, [UNTRACKED], + "don't let linker strip dead code (turning it on can be used for code coverage)"), + lto: bool = (false, parse_bool, [TRACKED], "perform LLVM link-time optimizations"), - target_cpu: Option = (None, parse_opt_string, - "select target processor (llc -mcpu=help for details)"), - target_feature: String = ("".to_string(), parse_string, - "target specific attributes (llc -mattr=help for details)"), - passes: Vec = (Vec::new(), parse_list, + target_cpu: Option = (None, parse_opt_string, [TRACKED], + "select target processor (rustc --print target-cpus for details)"), + target_feature: String = ("".to_string(), parse_string, [TRACKED], + "target specific attributes (rustc --print target-features for details)"), + passes: Vec = (Vec::new(), parse_list, [TRACKED], "a list of extra LLVM passes to run (space separated)"), - llvm_args: Vec = (Vec::new(), parse_list, + llvm_args: Vec = (Vec::new(), parse_list, [TRACKED], "a list of arguments to pass to llvm (space separated)"), - save_temps: bool = (false, parse_bool, + save_temps: bool = (false, parse_bool, [UNTRACKED_WITH_WARNING(true, + "`-C save-temps` might not produce all requested temporary products \ + when incremental compilation is enabled.")], "save all temporary output files during compilation"), - rpath: bool = (false, parse_bool, + rpath: bool = (false, parse_bool, [UNTRACKED], "set rpath values in libs/exes"), - no_prepopulate_passes: bool = (false, parse_bool, + no_prepopulate_passes: bool = (false, parse_bool, [TRACKED], "don't pre-populate the pass manager with a list of passes"), - no_vectorize_loops: bool = (false, parse_bool, + no_vectorize_loops: bool = (false, parse_bool, [TRACKED], "don't run the loop vectorization optimization passes"), - no_vectorize_slp: bool = (false, parse_bool, + no_vectorize_slp: bool = (false, parse_bool, [TRACKED], "don't run LLVM's SLP vectorization pass"), - soft_float: bool = (false, parse_bool, - "generate software floating point library calls"), - prefer_dynamic: bool = (false, parse_bool, + soft_float: bool = (false, parse_bool, [TRACKED], + "use soft float ABI (*eabihf targets only)"), + prefer_dynamic: bool = (false, parse_bool, [TRACKED], "prefer dynamic linking to static linking"), - no_integrated_as: bool = (false, parse_bool, + no_integrated_as: bool = (false, parse_bool, [TRACKED], "use an external assembler rather than LLVM's integrated one"), - no_redzone: Option = (None, parse_opt_bool, + no_redzone: Option = (None, parse_opt_bool, [TRACKED], "disable the use of the redzone"), - relocation_model: Option = (None, parse_opt_string, - "choose the relocation model to use (llc -relocation-model for details)"), - code_model: Option = (None, parse_opt_string, - "choose the code model to use (llc -code-model for details)"), - metadata: Vec = (Vec::new(), parse_list, + relocation_model: Option = (None, parse_opt_string, [TRACKED], + "choose the relocation model to use (rustc --print relocation-models for details)"), + code_model: Option = (None, parse_opt_string, [TRACKED], + "choose the code model to use (rustc --print code-models for details)"), + metadata: Vec = (Vec::new(), parse_list, [TRACKED], "metadata to mangle symbol names with"), - extra_filename: String = ("".to_string(), parse_string, + extra_filename: String = ("".to_string(), parse_string, [UNTRACKED], "extra data to put in each output filename"), - codegen_units: usize = (1, parse_uint, + codegen_units: usize = (1, parse_uint, [UNTRACKED], "divide crate into N units to optimize in parallel"), - remark: Passes = (SomePasses(Vec::new()), parse_passes, + remark: Passes = (SomePasses(Vec::new()), parse_passes, [UNTRACKED], "print remarks for these optimization passes (space separated, or \"all\")"), - no_stack_check: bool = (false, parse_bool, - "disable checks for stack exhaustion (a memory-safety hazard!)"), - debuginfo: Option = (None, parse_opt_uint, + no_stack_check: bool = (false, parse_bool, [UNTRACKED], + "the --no-stack-check flag is deprecated and does nothing"), + debuginfo: Option = (None, parse_opt_uint, [TRACKED], "debug info emission level, 0 = no debug info, 1 = line tables only, \ 2 = full debug info with variable and type information"), - opt_level: Option = (None, parse_opt_uint, - "optimize with possible levels 0-3"), - debug_assertions: Option = (None, parse_opt_bool, + opt_level: Option = (None, parse_opt_string, [TRACKED], + "optimize with possible levels 0-3, s, or z"), + debug_assertions: Option = (None, parse_opt_bool, [TRACKED], "explicitly enable the cfg(debug_assertions) directive"), - inline_threshold: Option = (None, parse_opt_uint, + inline_threshold: Option = (None, parse_opt_uint, [TRACKED], "set the inlining threshold for"), + panic: Option = (None, parse_panic_strategy, + [TRACKED], "panic strategy to compile crate with"), } - options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, build_debugging_options, "Z", "debugging", DB_OPTIONS, db_type_desc, dbsetters, - verbose: bool = (false, parse_bool, + verbose: bool = (false, parse_bool, [UNTRACKED], "in general, enable more debug printouts"), - time_passes: bool = (false, parse_bool, + time_passes: bool = (false, parse_bool, [UNTRACKED], "measure time of each rustc pass"), count_llvm_insns: bool = (false, parse_bool, + [UNTRACKED_WITH_WARNING(true, + "The output generated by `-Z count_llvm_insns` might not be reliable \ + when used with incremental compilation")], "count where LLVM instrs originate"), - time_llvm_passes: bool = (false, parse_bool, + time_llvm_passes: bool = (false, parse_bool, [UNTRACKED_WITH_WARNING(true, + "The output of `-Z time-llvm-passes` will only reflect timings of \ + re-translated modules when used with incremental compilation" )], "measure time of each LLVM pass"), - input_stats: bool = (false, parse_bool, + input_stats: bool = (false, parse_bool, [UNTRACKED], "gather statistics about the input"), - trans_stats: bool = (false, parse_bool, + trans_stats: bool = (false, parse_bool, [UNTRACKED_WITH_WARNING(true, + "The output of `-Z trans-stats` might not be accurate when incremental \ + compilation is enabled")], "gather trans statistics"), - asm_comments: bool = (false, parse_bool, + asm_comments: bool = (false, parse_bool, [TRACKED], "generate comments into the assembly (may change behavior)"), - no_verify: bool = (false, parse_bool, + no_verify: bool = (false, parse_bool, [TRACKED], "skip LLVM verification"), - borrowck_stats: bool = (false, parse_bool, + borrowck_stats: bool = (false, parse_bool, [UNTRACKED], "gather borrowck statistics"), - no_landing_pads: bool = (false, parse_bool, + no_landing_pads: bool = (false, parse_bool, [TRACKED], "omit landing pads for unwinding"), - debug_llvm: bool = (false, parse_bool, + debug_llvm: bool = (false, parse_bool, [UNTRACKED], "enable debug output from LLVM"), - count_type_sizes: bool = (false, parse_bool, - "count the sizes of aggregate types"), - meta_stats: bool = (false, parse_bool, + meta_stats: bool = (false, parse_bool, [UNTRACKED], "gather metadata statistics"), - print_link_args: bool = (false, parse_bool, + print_link_args: bool = (false, parse_bool, [UNTRACKED], "print the arguments passed to the linker"), - gc: bool = (false, parse_bool, - "garbage collect shared data (experimental)"), - print_llvm_passes: bool = (false, parse_bool, + print_llvm_passes: bool = (false, parse_bool, [UNTRACKED], "prints the llvm optimization passes being run"), - ast_json: bool = (false, parse_bool, + ast_json: bool = (false, parse_bool, [UNTRACKED], "print the AST as JSON and halt"), - ast_json_noexpand: bool = (false, parse_bool, + ast_json_noexpand: bool = (false, parse_bool, [UNTRACKED], "print the pre-expansion AST as JSON and halt"), - ls: bool = (false, parse_bool, + ls: bool = (false, parse_bool, [UNTRACKED], "list the symbols defined by a library crate"), - save_analysis: bool = (false, parse_bool, - "write syntax and type analysis information in addition to normal output"), - print_move_fragments: bool = (false, parse_bool, + save_analysis: bool = (false, parse_bool, [UNTRACKED], + "write syntax and type analysis (in JSON format) information, in \ + addition to normal output"), + save_analysis_csv: bool = (false, parse_bool, [UNTRACKED], + "write syntax and type analysis (in CSV format) information, in addition to normal output"), + save_analysis_api: bool = (false, parse_bool, [UNTRACKED], + "write syntax and type analysis information for opaque libraries (in JSON format), \ + in addition to normal output"), + print_move_fragments: bool = (false, parse_bool, [UNTRACKED], "print out move-fragment data for every fn"), - flowgraph_print_loans: bool = (false, parse_bool, + flowgraph_print_loans: bool = (false, parse_bool, [UNTRACKED], "include loan analysis data in --unpretty flowgraph output"), - flowgraph_print_moves: bool = (false, parse_bool, + flowgraph_print_moves: bool = (false, parse_bool, [UNTRACKED], "include move analysis data in --unpretty flowgraph output"), - flowgraph_print_assigns: bool = (false, parse_bool, + flowgraph_print_assigns: bool = (false, parse_bool, [UNTRACKED], "include assignment analysis data in --unpretty flowgraph output"), - flowgraph_print_all: bool = (false, parse_bool, + flowgraph_print_all: bool = (false, parse_bool, [UNTRACKED], "include all dataflow analysis data in --unpretty flowgraph output"), - print_region_graph: bool = (false, parse_bool, + print_region_graph: bool = (false, parse_bool, [UNTRACKED], "prints region inference graph. \ Use with RUST_REGION_GRAPH=help for more info"), - parse_only: bool = (false, parse_bool, + parse_only: bool = (false, parse_bool, [UNTRACKED], "parse only; do not compile, assemble, or link"), - no_trans: bool = (false, parse_bool, + no_trans: bool = (false, parse_bool, [TRACKED], "run all passes except translation; no output"), - treat_err_as_bug: bool = (false, parse_bool, + treat_err_as_bug: bool = (false, parse_bool, [TRACKED], "treat all errors that occur as bugs"), - incr_comp: bool = (false, parse_bool, + continue_parse_after_error: bool = (false, parse_bool, [TRACKED], + "attempt to recover from parse errors (experimental)"), + incremental: Option = (None, parse_opt_string, [UNTRACKED], "enable incremental compilation (experimental)"), - dump_dep_graph: bool = (false, parse_bool, + incremental_info: bool = (false, parse_bool, [UNTRACKED], + "print high-level information about incremental reuse (or the lack thereof)"), + incremental_dump_hash: bool = (false, parse_bool, [UNTRACKED], + "dump hash information in textual format to stdout"), + dump_dep_graph: bool = (false, parse_bool, [UNTRACKED], "dump the dependency graph to $RUST_DEP_GRAPH (default: /tmp/dep_graph.gv)"), - no_analysis: bool = (false, parse_bool, + query_dep_graph: bool = (false, parse_bool, [UNTRACKED], + "enable queries of the dependency graph for regression testing"), + no_analysis: bool = (false, parse_bool, [UNTRACKED], "parse and expand the source, but run no analysis"), - extra_plugins: Vec = (Vec::new(), parse_list, + extra_plugins: Vec = (Vec::new(), parse_list, [TRACKED], "load extra plugins"), - unstable_options: bool = (false, parse_bool, + unstable_options: bool = (false, parse_bool, [UNTRACKED], "adds unstable command line options to rustc interface"), - print_enum_sizes: bool = (false, parse_bool, - "print the size of enums and their variants"), - force_overflow_checks: Option = (None, parse_opt_bool, + force_overflow_checks: Option = (None, parse_opt_bool, [TRACKED], "force overflow checks on or off"), - force_dropflag_checks: Option = (None, parse_opt_bool, - "force drop flag checks on or off"), - trace_macros: bool = (false, parse_bool, + trace_macros: bool = (false, parse_bool, [UNTRACKED], "for every macro invocation, print its name and arguments"), - enable_nonzeroing_move_hints: bool = (false, parse_bool, + debug_macros: bool = (false, parse_bool, [TRACKED], + "emit line numbers debug info inside macros"), + enable_nonzeroing_move_hints: bool = (false, parse_bool, [TRACKED], "force nonzeroing move optimization on"), - keep_mtwt_tables: bool = (false, parse_bool, - "don't clear the resolution tables after analysis"), - keep_ast: bool = (false, parse_bool, + keep_hygiene_data: bool = (false, parse_bool, [UNTRACKED], + "don't clear the hygiene data after analysis"), + keep_ast: bool = (false, parse_bool, [UNTRACKED], "keep the AST after lowering it to HIR"), - show_span: Option = (None, parse_opt_string, + show_span: Option = (None, parse_opt_string, [TRACKED], "show spans for compiler debugging (expr|pat|ty)"), + print_type_sizes: bool = (false, parse_bool, [UNTRACKED], + "print layout information for each type encountered"), + print_trans_items: Option = (None, parse_opt_string, [UNTRACKED], + "print the result of the translation item collection pass"), + mir_opt_level: Option = (None, parse_opt_uint, [TRACKED], + "set the MIR optimization level (0-3)"), + dump_mir: Option = (None, parse_opt_string, [UNTRACKED], + "dump MIR state at various points in translation"), + dump_mir_dir: Option = (None, parse_opt_string, [UNTRACKED], + "the directory the MIR is dumped into"), + perf_stats: bool = (false, parse_bool, [UNTRACKED], + "print some performance-related statistics"), + hir_stats: bool = (false, parse_bool, [UNTRACKED], + "print some statistics about AST and HIR"), } pub fn default_lib_output() -> CrateType { @@ -650,78 +933,84 @@ pub fn default_lib_output() -> CrateType { } pub fn default_configuration(sess: &Session) -> ast::CrateConfig { - use syntax::parse::token::intern_and_get_ident as intern; - let end = &sess.target.target.target_endian; let arch = &sess.target.target.arch; let wordsz = &sess.target.target.target_pointer_width; let os = &sess.target.target.target_os; let env = &sess.target.target.target_env; let vendor = &sess.target.target.target_vendor; + let max_atomic_width = sess.target.target.max_atomic_width(); let fam = if let Some(ref fam) = sess.target.target.options.target_family { - intern(fam) + Symbol::intern(fam) } else if sess.target.target.options.is_like_windows { - InternedString::new("windows") + Symbol::intern("windows") } else { - InternedString::new("unix") + Symbol::intern("unix") }; - let mk = attr::mk_name_value_item_str; - let mut ret = vec![ // Target bindings. - mk(InternedString::new("target_os"), intern(os)), - mk(InternedString::new("target_family"), fam.clone()), - mk(InternedString::new("target_arch"), intern(arch)), - mk(InternedString::new("target_endian"), intern(end)), - mk(InternedString::new("target_pointer_width"), intern(wordsz)), - mk(InternedString::new("target_env"), intern(env)), - mk(InternedString::new("target_vendor"), intern(vendor)), - ]; - match &fam[..] { - "windows" | "unix" => ret.push(attr::mk_word_item(fam)), - _ => (), + let mut ret = HashSet::new(); + // Target bindings. + ret.insert((Symbol::intern("target_os"), Some(Symbol::intern(os)))); + ret.insert((Symbol::intern("target_family"), Some(fam))); + ret.insert((Symbol::intern("target_arch"), Some(Symbol::intern(arch)))); + ret.insert((Symbol::intern("target_endian"), Some(Symbol::intern(end)))); + ret.insert((Symbol::intern("target_pointer_width"), Some(Symbol::intern(wordsz)))); + ret.insert((Symbol::intern("target_env"), Some(Symbol::intern(env)))); + ret.insert((Symbol::intern("target_vendor"), Some(Symbol::intern(vendor)))); + if fam == "windows" || fam == "unix" { + ret.insert((fam, None)); } if sess.target.target.options.has_elf_tls { - ret.push(attr::mk_word_item(InternedString::new("target_thread_local"))); + ret.insert((Symbol::intern("target_thread_local"), None)); + } + for &i in &[8, 16, 32, 64, 128] { + if i <= max_atomic_width { + let s = i.to_string(); + ret.insert((Symbol::intern("target_has_atomic"), Some(Symbol::intern(&s)))); + if &s == wordsz { + ret.insert((Symbol::intern("target_has_atomic"), Some(Symbol::intern("ptr")))); + } + } } if sess.opts.debug_assertions { - ret.push(attr::mk_word_item(InternedString::new("debug_assertions"))); + ret.insert((Symbol::intern("debug_assertions"), None)); } - return ret; -} - -pub fn append_configuration(cfg: &mut ast::CrateConfig, - name: InternedString) { - if !cfg.iter().any(|mi| mi.name() == name) { - cfg.push(attr::mk_word_item(name)) + if sess.opts.crate_types.contains(&CrateTypeProcMacro) { + ret.insert((Symbol::intern("proc_macro"), None)); } + return ret; } -pub fn build_configuration(sess: &Session) -> ast::CrateConfig { +pub fn build_configuration(sess: &Session, + mut user_cfg: ast::CrateConfig) + -> ast::CrateConfig { // Combine the configuration requested by the session (command line) with // some default and generated configuration items let default_cfg = default_configuration(sess); - let mut user_cfg = sess.opts.cfg.clone(); // If the user wants a test runner, then add the test cfg if sess.opts.test { - append_configuration(&mut user_cfg, InternedString::new("test")) + user_cfg.insert((Symbol::intern("test"), None)); } - let mut v = user_cfg.into_iter().collect::>(); - v.extend_from_slice(&default_cfg[..]); - v + user_cfg.extend(default_cfg.iter().cloned()); + user_cfg } pub fn build_target_config(opts: &Options, sp: &Handler) -> Config { let target = match Target::search(&opts.target_triple) { Ok(t) => t, Err(e) => { - panic!(sp.fatal(&format!("Error loading target specification: {}", e))); + sp.struct_fatal(&format!("Error loading target specification: {}", e)) + .help("Use `--print target-list` for a list of built-in targets") + .emit(); + panic!(FatalError); } }; let (int_type, uint_type) = match &target.target_pointer_width[..] { - "32" => (ast::TyI32, ast::TyU32), - "64" => (ast::TyI64, ast::TyU64), + "16" => (ast::IntTy::I16, ast::UintTy::U16), + "32" => (ast::IntTy::I32, ast::UintTy::U32), + "64" => (ast::IntTy::I64, ast::UintTy::U64), w => panic!(sp.fatal(&format!("target specification was invalid: \ unrecognized target-pointer-width {}", w))), }; @@ -733,24 +1022,20 @@ pub fn build_target_config(opts: &Options, sp: &Handler) -> Config { } } -/// Returns the "short" subset of the stable rustc command line options. -pub fn short_optgroups() -> Vec { - rustc_short_optgroups().into_iter() - .filter(|g|g.is_stable()) - .map(|g|g.opt_group) - .collect() -} - -/// Returns all of the stable rustc command line options. -pub fn optgroups() -> Vec { - rustc_optgroups().into_iter() - .filter(|g|g.is_stable()) - .map(|g|g.opt_group) - .collect() -} - #[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub enum OptionStability { Stable, Unstable } +pub enum OptionStability { + Stable, + + // FIXME: historically there were some options which were either `-Z` or + // required the `-Z unstable-options` flag, which were all intended + // to be unstable. Unfortunately we didn't actually gate usage of + // these options on the stable compiler, so we still allow them there + // today. There are some warnings printed out about this in the + // driver. + UnstableButNotReally, + + Unstable, +} #[derive(Clone, PartialEq, Eq)] pub struct RustcOptGroup { @@ -763,13 +1048,21 @@ impl RustcOptGroup { self.stability == OptionStability::Stable } - fn stable(g: getopts::OptGroup) -> RustcOptGroup { + pub fn stable(g: getopts::OptGroup) -> RustcOptGroup { RustcOptGroup { opt_group: g, stability: OptionStability::Stable } } - fn unstable(g: getopts::OptGroup) -> RustcOptGroup { + #[allow(dead_code)] // currently we have no "truly unstable" options + pub fn unstable(g: getopts::OptGroup) -> RustcOptGroup { RustcOptGroup { opt_group: g, stability: OptionStability::Unstable } } + + fn unstable_bnr(g: getopts::OptGroup) -> RustcOptGroup { + RustcOptGroup { + opt_group: g, + stability: OptionStability::UnstableButNotReally, + } + } } // The `opt` local module holds wrappers around the `getopts` API that @@ -791,73 +1084,109 @@ mod opt { fn stable(g: getopts::OptGroup) -> R { RustcOptGroup::stable(g) } fn unstable(g: getopts::OptGroup) -> R { RustcOptGroup::unstable(g) } + fn unstable_bnr(g: getopts::OptGroup) -> R { RustcOptGroup::unstable_bnr(g) } - // FIXME (pnkfelix): We default to stable since the current set of - // options is defacto stable. However, it would be good to revise the - // code so that a stable option is the thing that takes extra effort - // to encode. - - pub fn opt(a: S, b: S, c: S, d: S) -> R { stable(getopts::optopt(a, b, c, d)) } - pub fn multi(a: S, b: S, c: S, d: S) -> R { stable(getopts::optmulti(a, b, c, d)) } - pub fn flag(a: S, b: S, c: S) -> R { stable(getopts::optflag(a, b, c)) } - pub fn flagopt(a: S, b: S, c: S, d: S) -> R { stable(getopts::optflagopt(a, b, c, d)) } - pub fn flagmulti(a: S, b: S, c: S) -> R { stable(getopts::optflagmulti(a, b, c)) } + pub fn opt_s(a: S, b: S, c: S, d: S) -> R { + stable(getopts::optopt(a, b, c, d)) + } + pub fn multi_s(a: S, b: S, c: S, d: S) -> R { + stable(getopts::optmulti(a, b, c, d)) + } + pub fn flag_s(a: S, b: S, c: S) -> R { + stable(getopts::optflag(a, b, c)) + } + pub fn flagopt_s(a: S, b: S, c: S, d: S) -> R { + stable(getopts::optflagopt(a, b, c, d)) + } + pub fn flagmulti_s(a: S, b: S, c: S) -> R { + stable(getopts::optflagmulti(a, b, c)) + } + pub fn opt(a: S, b: S, c: S, d: S) -> R { + unstable(getopts::optopt(a, b, c, d)) + } + pub fn multi(a: S, b: S, c: S, d: S) -> R { + unstable(getopts::optmulti(a, b, c, d)) + } + pub fn flag(a: S, b: S, c: S) -> R { + unstable(getopts::optflag(a, b, c)) + } + pub fn flagopt(a: S, b: S, c: S, d: S) -> R { + unstable(getopts::optflagopt(a, b, c, d)) + } + pub fn flagmulti(a: S, b: S, c: S) -> R { + unstable(getopts::optflagmulti(a, b, c)) + } - pub fn opt_u(a: S, b: S, c: S, d: S) -> R { unstable(getopts::optopt(a, b, c, d)) } - pub fn multi_u(a: S, b: S, c: S, d: S) -> R { unstable(getopts::optmulti(a, b, c, d)) } - pub fn flag_u(a: S, b: S, c: S) -> R { unstable(getopts::optflag(a, b, c)) } - pub fn flagopt_u(a: S, b: S, c: S, d: S) -> R { unstable(getopts::optflagopt(a, b, c, d)) } - pub fn flagmulti_u(a: S, b: S, c: S) -> R { unstable(getopts::optflagmulti(a, b, c)) } + // Do not use these functions for any new options added to the compiler, all + // new options should use the `*_u` variants above to be truly unstable. + pub fn opt_ubnr(a: S, b: S, c: S, d: S) -> R { + unstable_bnr(getopts::optopt(a, b, c, d)) + } + pub fn multi_ubnr(a: S, b: S, c: S, d: S) -> R { + unstable_bnr(getopts::optmulti(a, b, c, d)) + } + pub fn flag_ubnr(a: S, b: S, c: S) -> R { + unstable_bnr(getopts::optflag(a, b, c)) + } + pub fn flagopt_ubnr(a: S, b: S, c: S, d: S) -> R { + unstable_bnr(getopts::optflagopt(a, b, c, d)) + } + pub fn flagmulti_ubnr(a: S, b: S, c: S) -> R { + unstable_bnr(getopts::optflagmulti(a, b, c)) + } } /// Returns the "short" subset of the rustc command line options, /// including metadata for each option, such as whether the option is /// part of the stable long-term interface for rustc. pub fn rustc_short_optgroups() -> Vec { + let mut print_opts = vec!["crate-name", "file-names", "sysroot", "cfg", + "target-list", "target-cpus", "target-features", + "relocation-models", "code-models"]; + if nightly_options::is_nightly_build() { + print_opts.push("target-spec-json"); + } + vec![ - opt::flag("h", "help", "Display this message"), - opt::multi("", "cfg", "Configure the compilation environment", "SPEC"), - opt::multi("L", "", "Add a directory to the library search path", - "[KIND=]PATH"), - opt::multi("l", "", "Link the generated crate(s) to the specified native - library NAME. The optional KIND can be one of, + opt::flag_s("h", "help", "Display this message"), + opt::multi_s("", "cfg", "Configure the compilation environment", "SPEC"), + opt::multi_s("L", "", "Add a directory to the library search path. The + optional KIND can be one of dependency, crate, native, + framework or all (the default).", "[KIND=]PATH"), + opt::multi_s("l", "", "Link the generated crate(s) to the specified native + library NAME. The optional KIND can be one of static, dylib, or framework. If omitted, dylib is assumed.", "[KIND=]NAME"), - opt::multi("", "crate-type", "Comma separated list of types of crates + opt::multi_s("", "crate-type", "Comma separated list of types of crates for the compiler to emit", - "[bin|lib|rlib|dylib|staticlib]"), - opt::opt("", "crate-name", "Specify the name of the crate being built", + "[bin|lib|rlib|dylib|cdylib|staticlib|metadata]"), + opt::opt_s("", "crate-name", "Specify the name of the crate being built", "NAME"), - opt::multi("", "emit", "Comma separated list of types of output for \ + opt::multi_s("", "emit", "Comma separated list of types of output for \ the compiler to emit", "[asm|llvm-bc|llvm-ir|obj|link|dep-info]"), - opt::multi("", "print", "Comma separated list of compiler information to \ - print on stdout", - "[crate-name|file-names|sysroot]"), - opt::flagmulti("g", "", "Equivalent to -C debuginfo=2"), - opt::flagmulti("O", "", "Equivalent to -C opt-level=2"), - opt::opt("o", "", "Write output to ", "FILENAME"), - opt::opt("", "out-dir", "Write output to compiler-chosen filename \ + opt::multi_s("", "print", "Comma separated list of compiler information to \ + print on stdout", &print_opts.join("|")), + opt::flagmulti_s("g", "", "Equivalent to -C debuginfo=2"), + opt::flagmulti_s("O", "", "Equivalent to -C opt-level=2"), + opt::opt_s("o", "", "Write output to ", "FILENAME"), + opt::opt_s("", "out-dir", "Write output to compiler-chosen filename \ in

", "DIR"), - opt::opt("", "explain", "Provide a detailed explanation of an error \ + opt::opt_s("", "explain", "Provide a detailed explanation of an error \ message", "OPT"), - opt::flag("", "test", "Build a test harness"), - opt::opt("", "target", "Target triple cpu-manufacturer-kernel[-os] \ - to compile for (see chapter 3.4 of \ - http://www.sourceware.org/autobook/ - for details)", - "TRIPLE"), - opt::multi("W", "warn", "Set lint warnings", "OPT"), - opt::multi("A", "allow", "Set lint allowed", "OPT"), - opt::multi("D", "deny", "Set lint denied", "OPT"), - opt::multi("F", "forbid", "Set lint forbidden", "OPT"), - opt::multi("", "cap-lints", "Set the most restrictive lint level. \ + opt::flag_s("", "test", "Build a test harness"), + opt::opt_s("", "target", "Target triple for which the code is compiled", "TARGET"), + opt::multi_s("W", "warn", "Set lint warnings", "OPT"), + opt::multi_s("A", "allow", "Set lint allowed", "OPT"), + opt::multi_s("D", "deny", "Set lint denied", "OPT"), + opt::multi_s("F", "forbid", "Set lint forbidden", "OPT"), + opt::multi_s("", "cap-lints", "Set the most restrictive lint level. \ More restrictive lints are capped at this \ level", "LEVEL"), - opt::multi("C", "codegen", "Set a codegen option", "OPT[=VALUE]"), - opt::flag("V", "version", "Print version info and exit"), - opt::flag("v", "verbose", "Use verbose output"), + opt::multi_s("C", "codegen", "Set a codegen option", "OPT[=VALUE]"), + opt::flag_s("V", "version", "Print version info and exit"), + opt::flag_s("v", "verbose", "Use verbose output"), ] } @@ -867,31 +1196,40 @@ pub fn rustc_short_optgroups() -> Vec { pub fn rustc_optgroups() -> Vec { let mut opts = rustc_short_optgroups(); opts.extend_from_slice(&[ - opt::multi("", "extern", "Specify where an external rust library is \ - located", - "NAME=PATH"), - opt::opt("", "sysroot", "Override the system root", "PATH"), - opt::multi("Z", "", "Set internal debugging options", "FLAG"), - opt::opt_u("", "error-format", "How errors and other messages are produced", "human|json"), - opt::opt("", "color", "Configure coloring of output: - auto = colorize, if output goes to a tty (default); - always = always colorize output; - never = never colorize output", "auto|always|never"), - - opt::flagopt_u("", "pretty", - "Pretty-print the input instead of compiling; - valid types are: `normal` (un-annotated source), - `expanded` (crates expanded), or - `expanded,identified` (fully parenthesized, AST nodes with IDs).", - "TYPE"), - opt::flagopt_u("", "unpretty", - "Present the input source, unstable (and less-pretty) variants; - valid types are any of the types for `--pretty`, as well as: - `flowgraph=` (graphviz formatted flowgraph for node), - `everybody_loops` (all function bodies replaced with `loop {}`), - `hir` (the HIR), `hir,identified`, or - `hir,typed` (HIR with types for each node).", - "TYPE"), + opt::multi_s("", "extern", "Specify where an external rust library is located", + "NAME=PATH"), + opt::opt_s("", "sysroot", "Override the system root", "PATH"), + opt::multi_ubnr("Z", "", "Set internal debugging options", "FLAG"), + opt::opt_s("", "error-format", + "How errors and other messages are produced", + "human|json"), + opt::opt_s("", "color", "Configure coloring of output: + auto = colorize, if output goes to a tty (default); + always = always colorize output; + never = never colorize output", "auto|always|never"), + + opt::flagopt_ubnr("", "pretty", + "Pretty-print the input instead of compiling; + valid types are: `normal` (un-annotated source), + `expanded` (crates expanded), or + `expanded,identified` (fully parenthesized, AST nodes with IDs).", + "TYPE"), + opt::flagopt_ubnr("", "unpretty", + "Present the input source, unstable (and less-pretty) variants; + valid types are any of the types for `--pretty`, as well as: + `flowgraph=` (graphviz formatted flowgraph for node), + `everybody_loops` (all function bodies replaced with `loop {}`), + `hir` (the HIR), `hir,identified`, or + `hir,typed` (HIR with types for each node).", + "TYPE"), + + // new options here should **not** use the `_ubnr` functions, all new + // unstable options should use the short variants to indicate that they + // are truly unstable. All `_ubnr` flags are just that way because they + // were so historically. + // + // You may also wish to keep this comment at the bottom of this list to + // ensure that others see it. ]); opts } @@ -899,14 +1237,26 @@ pub fn rustc_optgroups() -> Vec { // Convert strings provided as --cfg [cfgspec] into a crate_cfg pub fn parse_cfgspecs(cfgspecs: Vec ) -> ast::CrateConfig { cfgspecs.into_iter().map(|s| { - parse::parse_meta_from_source_str("cfgspec".to_string(), - s.to_string(), - Vec::new(), - &parse::ParseSess::new()) + let sess = parse::ParseSess::new(); + let mut parser = + parse::new_parser_from_source_str(&sess, "cfgspec".to_string(), s.to_string()); + + let meta_item = panictry!(parser.parse_meta_item()); + + if !parser.reader.is_eof() { + early_error(ErrorOutputType::default(), &format!("invalid --cfg argument: {}", s)) + } else if meta_item.is_meta_item_list() { + let msg = + format!("invalid predicate in --cfg command line argument: `{}`", meta_item.name()); + early_error(ErrorOutputType::default(), &msg) + } + + (meta_item.name(), meta_item.value_str()) }).collect::() } -pub fn build_session_options(matches: &getopts::Matches) -> Options { +pub fn build_session_options_and_crate_config(matches: &getopts::Matches) + -> (Options, ast::CrateConfig) { let color = match matches.opt_str("color").as_ref().map(|s| &s[..]) { Some("auto") => ColorConfig::Auto, Some("always") => ColorConfig::Always, @@ -930,24 +1280,24 @@ pub fn build_session_options(matches: &getopts::Matches) -> Options { Some("human") => ErrorOutputType::HumanReadable(color), Some("json") => ErrorOutputType::Json, - None => ErrorOutputType::default(), + None => ErrorOutputType::HumanReadable(color), Some(arg) => { - early_error(ErrorOutputType::default(), &format!("argument for --error-format must \ - be human or json (instead was \ - `{}`)", - arg)) + early_error(ErrorOutputType::HumanReadable(color), + &format!("argument for --error-format must be human or json (instead \ + was `{}`)", + arg)) } } } else { - ErrorOutputType::default() + ErrorOutputType::HumanReadable(color) }; let unparsed_crate_types = matches.opt_strs("crate-type"); let crate_types = parse_crate_types_from_list(unparsed_crate_types) .unwrap_or_else(|e| early_error(error_format, &e[..])); - let mut lint_opts = vec!(); + let mut lint_opts = vec![]; let mut describe_lints = false; for &level in &[lint::Allow, lint::Warn, lint::Deny, lint::Forbid] { @@ -968,19 +1318,10 @@ pub fn build_session_options(matches: &getopts::Matches) -> Options { let debugging_opts = build_debugging_options(matches, error_format); - let parse_only = debugging_opts.parse_only; - let no_trans = debugging_opts.no_trans; - let treat_err_as_bug = debugging_opts.treat_err_as_bug; - let incremental_compilation = debugging_opts.incr_comp; - let dump_dep_graph = debugging_opts.dump_dep_graph; - let no_analysis = debugging_opts.no_analysis; - - if debugging_opts.debug_llvm { - unsafe { llvm::LLVMSetDebug(1); } - } + let mir_opt_level = debugging_opts.mir_opt_level.unwrap_or(1); - let mut output_types = HashMap::new(); - if !debugging_opts.parse_only && !no_trans { + let mut output_types = BTreeMap::new(); + if !debugging_opts.parse_only { for list in matches.opt_strs("emit") { for output_type in list.split(',') { let mut parts = output_type.splitn(2, '='); @@ -1026,6 +1367,28 @@ pub fn build_session_options(matches: &getopts::Matches) -> Options { } } + if cg.codegen_units < 1 { + early_error(error_format, "Value for codegen units must be a positive nonzero integer"); + } + + let mut prints = Vec::::new(); + if cg.target_cpu.as_ref().map_or(false, |s| s == "help") { + prints.push(PrintRequest::TargetCPUs); + cg.target_cpu = None; + }; + if cg.target_feature == "help" { + prints.push(PrintRequest::TargetFeatures); + cg.target_feature = "".to_string(); + } + if cg.relocation_model.as_ref().map_or(false, |s| s == "help") { + prints.push(PrintRequest::RelocationModels); + cg.relocation_model = None; + } + if cg.code_model.as_ref().map_or(false, |s| s == "help") { + prints.push(PrintRequest::CodeModels); + cg.code_model = None; + } + let cg = cg; let sysroot_opt = matches.opt_str("sysroot").map(|m| PathBuf::from(&m)); @@ -1038,13 +1401,20 @@ pub fn build_session_options(matches: &getopts::Matches) -> Options { } OptLevel::Default } else { - match cg.opt_level { - None => OptLevel::No, - Some(0) => OptLevel::No, - Some(1) => OptLevel::Less, - Some(2) => OptLevel::Default, - Some(3) => OptLevel::Aggressive, - Some(arg) => { + match (cg.opt_level.as_ref().map(String::as_ref), + nightly_options::is_nightly_build()) { + (None, _) => OptLevel::No, + (Some("0"), _) => OptLevel::No, + (Some("1"), _) => OptLevel::Less, + (Some("2"), _) => OptLevel::Default, + (Some("3"), _) => OptLevel::Aggressive, + (Some("s"), true) => OptLevel::Size, + (Some("z"), true) => OptLevel::SizeMin, + (Some("s"), false) | (Some("z"), false) => { + early_error(error_format, &format!("the optimizations s or z are only \ + accepted on the nightly compiler")); + }, + (Some(arg), _) => { early_error(error_format, &format!("optimization level needs to be \ between 0-3 (instead was `{}`)", arg)); @@ -1053,7 +1423,6 @@ pub fn build_session_options(matches: &getopts::Matches) -> Options { } }; let debug_assertions = cg.debug_assertions.unwrap_or(opt_level == OptLevel::No); - let gc = debugging_opts.gc; let debuginfo = if matches.opt_present("g") { if cg.debuginfo.is_some() { early_error(error_format, "-g and -C debuginfo both provided"); @@ -1097,23 +1466,31 @@ pub fn build_session_options(matches: &getopts::Matches) -> Options { let cfg = parse_cfgspecs(matches.opt_strs("cfg")); let test = matches.opt_present("test"); - let prints = matches.opt_strs("print").into_iter().map(|s| { + prints.extend(matches.opt_strs("print").into_iter().map(|s| { match &*s { "crate-name" => PrintRequest::CrateName, "file-names" => PrintRequest::FileNames, "sysroot" => PrintRequest::Sysroot, + "cfg" => PrintRequest::Cfg, + "target-list" => PrintRequest::TargetList, + "target-cpus" => PrintRequest::TargetCPUs, + "target-features" => PrintRequest::TargetFeatures, + "relocation-models" => PrintRequest::RelocationModels, + "code-models" => PrintRequest::CodeModels, + "target-spec-json" if nightly_options::is_unstable_enabled(matches) + => PrintRequest::TargetSpec, req => { early_error(error_format, &format!("unknown print request `{}`", req)) } } - }).collect::>(); + })); if !cg.remark.is_empty() && debuginfo == NoDebugInfo { early_warn(error_format, "-C remark will not show source locations without \ --debuginfo"); } - let mut externs = HashMap::new(); + let mut externs = BTreeMap::new(); for arg in &matches.opt_strs("extern") { let mut parts = arg.splitn(2, '='); let name = match parts.next() { @@ -1125,61 +1502,45 @@ pub fn build_session_options(matches: &getopts::Matches) -> Options { None => early_error(error_format, "--extern value must be of the format `foo=bar`"), }; - externs.entry(name.to_string()).or_insert(vec![]).push(location.to_string()); + externs.entry(name.to_string()) + .or_insert_with(BTreeSet::new) + .insert(location.to_string()); } let crate_name = matches.opt_str("crate-name"); - Options { + let incremental = debugging_opts.incremental.as_ref().map(|m| PathBuf::from(m)); + + (Options { crate_types: crate_types, - gc: gc, optimize: opt_level, debuginfo: debuginfo, lint_opts: lint_opts, lint_cap: lint_cap, describe_lints: describe_lints, - output_types: output_types, + output_types: OutputTypes(output_types), search_paths: search_paths, maybe_sysroot: sysroot_opt, target_triple: target, - cfg: cfg, test: test, - parse_only: parse_only, - no_trans: no_trans, - treat_err_as_bug: treat_err_as_bug, - incremental_compilation: incremental_compilation || dump_dep_graph, - dump_dep_graph: dump_dep_graph, - no_analysis: no_analysis, + mir_opt_level: mir_opt_level, + incremental: incremental, debugging_opts: debugging_opts, prints: prints, cg: cg, error_format: error_format, - externs: externs, + externs: Externs(externs), crate_name: crate_name, alt_std_name: None, libs: libs, - unstable_features: get_unstable_features_setting(), + unstable_features: UnstableFeatures::from_environment(), debug_assertions: debug_assertions, - } -} - -pub fn get_unstable_features_setting() -> UnstableFeatures { - // Whether this is a feature-staged build, i.e. on the beta or stable channel - let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some(); - // The secret key needed to get through the rustc build itself by - // subverting the unstable features lints - let bootstrap_secret_key = option_env!("CFG_BOOTSTRAP_KEY"); - // The matching key to the above, only known by the build system - let bootstrap_provided_key = env::var("RUSTC_BOOTSTRAP_KEY").ok(); - match (disable_unstable_features, bootstrap_secret_key, bootstrap_provided_key) { - (_, Some(ref s), Some(ref p)) if s == p => UnstableFeatures::Cheat, - (true, _, _) => UnstableFeatures::Disallow, - (false, _, _) => UnstableFeatures::Allow - } + actually_rustdoc: false, + }, + cfg) } pub fn parse_crate_types_from_list(list_list: Vec) -> Result, String> { - let mut crate_types: Vec = Vec::new(); for unparsed_crate_type in &list_list { for part in unparsed_crate_type.split(',') { @@ -1188,7 +1549,10 @@ pub fn parse_crate_types_from_list(list_list: Vec) -> Result CrateTypeRlib, "staticlib" => CrateTypeStaticlib, "dylib" => CrateTypeDylib, + "cdylib" => CrateTypeCdylib, "bin" => CrateTypeExecutable, + "proc-macro" => CrateTypeProcMacro, + "metadata" => CrateTypeMetadata, _ => { return Err(format!("unknown crate type: `{}`", part)); @@ -1203,48 +1567,255 @@ pub fn parse_crate_types_from_list(list_list: Vec) -> Result bool { + is_nightly_build() && matches.opt_strs("Z").iter().any(|x| *x == "unstable-options") + } + + pub fn is_nightly_build() -> bool { + UnstableFeatures::from_environment().is_nightly_build() + } + + pub fn check_nightly_options(matches: &getopts::Matches, flags: &[RustcOptGroup]) { + let has_z_unstable_option = matches.opt_strs("Z").iter().any(|x| *x == "unstable-options"); + let really_allows_unstable_options = UnstableFeatures::from_environment() + .is_nightly_build(); + + for opt in flags.iter() { + if opt.stability == OptionStability::Stable { + continue + } + let opt_name = if opt.opt_group.long_name.is_empty() { + &opt.opt_group.short_name + } else { + &opt.opt_group.long_name + }; + if !matches.opt_present(opt_name) { + continue + } + if opt_name != "Z" && !has_z_unstable_option { + early_error(ErrorOutputType::default(), + &format!("the `-Z unstable-options` flag must also be passed to enable \ + the flag `{}`", + opt_name)); + } + if really_allows_unstable_options { + continue + } + match opt.stability { + OptionStability::Unstable => { + let msg = format!("the option `{}` is only accepted on the \ + nightly compiler", opt_name); + early_error(ErrorOutputType::default(), &msg); + } + OptionStability::UnstableButNotReally => { + let msg = format!("the option `{}` is unstable and should \ + only be used on the nightly compiler, but \ + it is currently accepted for backwards \ + compatibility; this will soon change, \ + see issue #31847 for more details", + opt_name); + early_warn(ErrorOutputType::default(), &msg); + } + OptionStability::Stable => {} + } + } + } +} + impl fmt::Display for CrateType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CrateTypeExecutable => "bin".fmt(f), CrateTypeDylib => "dylib".fmt(f), CrateTypeRlib => "rlib".fmt(f), - CrateTypeStaticlib => "staticlib".fmt(f) + CrateTypeStaticlib => "staticlib".fmt(f), + CrateTypeCdylib => "cdylib".fmt(f), + CrateTypeProcMacro => "proc-macro".fmt(f), + CrateTypeMetadata => "metadata".fmt(f), + } + } +} + +/// Commandline arguments passed to the compiler have to be incorporated with +/// the dependency tracking system for incremental compilation. This module +/// provides some utilities to make this more convenient. +/// +/// The values of all commandline arguments that are relevant for dependency +/// tracking are hashed into a single value that determines whether the +/// incremental compilation cache can be re-used or not. This hashing is done +/// via the DepTrackingHash trait defined below, since the standard Hash +/// implementation might not be suitable (e.g. arguments are stored in a Vec, +/// the hash of which is order dependent, but we might not want the order of +/// arguments to make a difference for the hash). +/// +/// However, since the value provided by Hash::hash often *is* suitable, +/// especially for primitive types, there is the +/// impl_dep_tracking_hash_via_hash!() macro that allows to simply reuse the +/// Hash implementation for DepTrackingHash. It's important though that +/// we have an opt-in scheme here, so one is hopefully forced to think about +/// how the hash should be calculated when adding a new commandline argument. +mod dep_tracking { + use lint; + use middle::cstore; + use session::search_paths::{PathKind, SearchPaths}; + use std::collections::BTreeMap; + use std::hash::Hash; + use std::path::PathBuf; + use std::collections::hash_map::DefaultHasher; + use super::{Passes, CrateType, OptLevel, DebugInfoLevel, + OutputTypes, Externs, ErrorOutputType}; + use syntax::feature_gate::UnstableFeatures; + use rustc_back::PanicStrategy; + + pub trait DepTrackingHash { + fn hash(&self, &mut DefaultHasher, ErrorOutputType); + } + + macro_rules! impl_dep_tracking_hash_via_hash { + ($t:ty) => ( + impl DepTrackingHash for $t { + fn hash(&self, hasher: &mut DefaultHasher, _: ErrorOutputType) { + Hash::hash(self, hasher); + } + } + ) + } + + macro_rules! impl_dep_tracking_hash_for_sortable_vec_of { + ($t:ty) => ( + impl DepTrackingHash for Vec<$t> { + fn hash(&self, hasher: &mut DefaultHasher, error_format: ErrorOutputType) { + let mut elems: Vec<&$t> = self.iter().collect(); + elems.sort(); + Hash::hash(&elems.len(), hasher); + for (index, elem) in elems.iter().enumerate() { + Hash::hash(&index, hasher); + DepTrackingHash::hash(*elem, hasher, error_format); + } + } + } + ); + } + + impl_dep_tracking_hash_via_hash!(bool); + impl_dep_tracking_hash_via_hash!(usize); + impl_dep_tracking_hash_via_hash!(String); + impl_dep_tracking_hash_via_hash!(lint::Level); + impl_dep_tracking_hash_via_hash!(Option); + impl_dep_tracking_hash_via_hash!(Option); + impl_dep_tracking_hash_via_hash!(Option); + impl_dep_tracking_hash_via_hash!(Option); + impl_dep_tracking_hash_via_hash!(Option); + impl_dep_tracking_hash_via_hash!(Option); + impl_dep_tracking_hash_via_hash!(CrateType); + impl_dep_tracking_hash_via_hash!(PanicStrategy); + impl_dep_tracking_hash_via_hash!(Passes); + impl_dep_tracking_hash_via_hash!(OptLevel); + impl_dep_tracking_hash_via_hash!(DebugInfoLevel); + impl_dep_tracking_hash_via_hash!(UnstableFeatures); + impl_dep_tracking_hash_via_hash!(Externs); + impl_dep_tracking_hash_via_hash!(OutputTypes); + impl_dep_tracking_hash_via_hash!(cstore::NativeLibraryKind); + + impl_dep_tracking_hash_for_sortable_vec_of!(String); + impl_dep_tracking_hash_for_sortable_vec_of!(CrateType); + impl_dep_tracking_hash_for_sortable_vec_of!((String, lint::Level)); + impl_dep_tracking_hash_for_sortable_vec_of!((String, cstore::NativeLibraryKind)); + + impl DepTrackingHash for SearchPaths { + fn hash(&self, hasher: &mut DefaultHasher, _: ErrorOutputType) { + let mut elems: Vec<_> = self + .iter(PathKind::All) + .collect(); + elems.sort(); + Hash::hash(&elems, hasher); + } + } + + impl DepTrackingHash for (T1, T2) + where T1: DepTrackingHash, + T2: DepTrackingHash + { + fn hash(&self, hasher: &mut DefaultHasher, error_format: ErrorOutputType) { + Hash::hash(&0, hasher); + DepTrackingHash::hash(&self.0, hasher, error_format); + Hash::hash(&1, hasher); + DepTrackingHash::hash(&self.1, hasher, error_format); + } + } + + // This is a stable hash because BTreeMap is a sorted container + pub fn stable_hash(sub_hashes: BTreeMap<&'static str, &DepTrackingHash>, + hasher: &mut DefaultHasher, + error_format: ErrorOutputType) { + for (key, sub_hash) in sub_hashes { + // Using Hash::hash() instead of DepTrackingHash::hash() is fine for + // the keys, as they are just plain strings + Hash::hash(&key.len(), hasher); + Hash::hash(key, hasher); + sub_hash.hash(hasher, error_format); } } } #[cfg(test)] mod tests { - use middle::cstore::DummyCrateStore; - use session::config::{build_configuration, optgroups, build_session_options}; + use dep_graph::DepGraph; + use errors; + use getopts::{getopts, OptGroup}; + use lint; + use middle::cstore::{self, DummyCrateStore}; + use session::config::{build_configuration, build_session_options_and_crate_config}; use session::build_session; - + use std::collections::{BTreeMap, BTreeSet}; + use std::iter::FromIterator; + use std::path::PathBuf; use std::rc::Rc; - use getopts::getopts; - use syntax::attr; - use syntax::attr::AttrMetaMethods; - use syntax::diagnostics; + use super::{OutputType, OutputTypes, Externs}; + use rustc_back::PanicStrategy; + use syntax::symbol::Symbol; + + fn optgroups() -> Vec { + super::rustc_optgroups().into_iter() + .map(|a| a.opt_group) + .collect() + } + + fn mk_map(entries: Vec<(K, V)>) -> BTreeMap { + BTreeMap::from_iter(entries.into_iter()) + } + + fn mk_set(entries: Vec) -> BTreeSet { + BTreeSet::from_iter(entries.into_iter()) + } // When the user supplies --test we should implicitly supply --cfg test #[test] fn test_switch_implies_cfg_test() { + let dep_graph = DepGraph::new(false); let matches = &match getopts(&["--test".to_string()], &optgroups()) { Ok(m) => m, Err(f) => panic!("test_switch_implies_cfg_test: {}", f) }; - let registry = diagnostics::registry::Registry::new(&[]); - let sessopts = build_session_options(matches); - let sess = build_session(sessopts, None, registry, Rc::new(DummyCrateStore)); - let cfg = build_configuration(&sess); - assert!((attr::contains_name(&cfg[..], "test"))); + let registry = errors::registry::Registry::new(&[]); + let (sessopts, cfg) = build_session_options_and_crate_config(matches); + let sess = build_session(sessopts, &dep_graph, None, registry, Rc::new(DummyCrateStore)); + let cfg = build_configuration(&sess, cfg); + assert!(cfg.contains(&(Symbol::intern("test"), None))); } // When the user supplies --test and --cfg test, don't implicitly add // another --cfg test #[test] fn test_switch_implies_cfg_test_unless_cfg_test() { + let dep_graph = DepGraph::new(false); let matches = &match getopts(&["--test".to_string(), "--cfg=test".to_string()], &optgroups()) { @@ -1253,25 +1824,26 @@ mod tests { panic!("test_switch_implies_cfg_test_unless_cfg_test: {}", f) } }; - let registry = diagnostics::registry::Registry::new(&[]); - let sessopts = build_session_options(matches); - let sess = build_session(sessopts, None, registry, + let registry = errors::registry::Registry::new(&[]); + let (sessopts, cfg) = build_session_options_and_crate_config(matches); + let sess = build_session(sessopts, &dep_graph, None, registry, Rc::new(DummyCrateStore)); - let cfg = build_configuration(&sess); - let mut test_items = cfg.iter().filter(|m| m.name() == "test"); + let cfg = build_configuration(&sess, cfg); + let mut test_items = cfg.iter().filter(|&&(name, _)| name == "test"); assert!(test_items.next().is_some()); assert!(test_items.next().is_none()); } #[test] fn test_can_print_warnings() { + let dep_graph = DepGraph::new(false); { let matches = getopts(&[ "-Awarnings".to_string() ], &optgroups()).unwrap(); - let registry = diagnostics::registry::Registry::new(&[]); - let sessopts = build_session_options(&matches); - let sess = build_session(sessopts, None, registry, + let registry = errors::registry::Registry::new(&[]); + let (sessopts, _) = build_session_options_and_crate_config(&matches); + let sess = build_session(sessopts, &dep_graph, None, registry, Rc::new(DummyCrateStore)); assert!(!sess.diagnostic().can_emit_warnings); } @@ -1281,9 +1853,9 @@ mod tests { "-Awarnings".to_string(), "-Dwarnings".to_string() ], &optgroups()).unwrap(); - let registry = diagnostics::registry::Registry::new(&[]); - let sessopts = build_session_options(&matches); - let sess = build_session(sessopts, None, registry, + let registry = errors::registry::Registry::new(&[]); + let (sessopts, _) = build_session_options_and_crate_config(&matches); + let sess = build_session(sessopts, &dep_graph, None, registry, Rc::new(DummyCrateStore)); assert!(sess.diagnostic().can_emit_warnings); } @@ -1292,11 +1864,586 @@ mod tests { let matches = getopts(&[ "-Adead_code".to_string() ], &optgroups()).unwrap(); - let registry = diagnostics::registry::Registry::new(&[]); - let sessopts = build_session_options(&matches); - let sess = build_session(sessopts, None, registry, + let registry = errors::registry::Registry::new(&[]); + let (sessopts, _) = build_session_options_and_crate_config(&matches); + let sess = build_session(sessopts, &dep_graph, None, registry, Rc::new(DummyCrateStore)); assert!(sess.diagnostic().can_emit_warnings); } } + + #[test] + fn test_output_types_tracking_hash_different_paths() { + let mut v1 = super::basic_options(); + let mut v2 = super::basic_options(); + let mut v3 = super::basic_options(); + + v1.output_types = OutputTypes::new(&[(OutputType::Exe, + Some(PathBuf::from("./some/thing")))]); + v2.output_types = OutputTypes::new(&[(OutputType::Exe, + Some(PathBuf::from("/some/thing")))]); + v3.output_types = OutputTypes::new(&[(OutputType::Exe, None)]); + + assert!(v1.dep_tracking_hash() != v2.dep_tracking_hash()); + assert!(v1.dep_tracking_hash() != v3.dep_tracking_hash()); + assert!(v2.dep_tracking_hash() != v3.dep_tracking_hash()); + + // Check clone + assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash()); + assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash()); + assert_eq!(v3.dep_tracking_hash(), v3.clone().dep_tracking_hash()); + } + + #[test] + fn test_output_types_tracking_hash_different_construction_order() { + let mut v1 = super::basic_options(); + let mut v2 = super::basic_options(); + + v1.output_types = OutputTypes::new(&[ + (OutputType::Exe, Some(PathBuf::from("./some/thing"))), + (OutputType::Bitcode, Some(PathBuf::from("./some/thing.bc"))), + ]); + + v2.output_types = OutputTypes::new(&[ + (OutputType::Bitcode, Some(PathBuf::from("./some/thing.bc"))), + (OutputType::Exe, Some(PathBuf::from("./some/thing"))), + ]); + + assert_eq!(v1.dep_tracking_hash(), v2.dep_tracking_hash()); + + // Check clone + assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash()); + } + + #[test] + fn test_externs_tracking_hash_different_values() { + let mut v1 = super::basic_options(); + let mut v2 = super::basic_options(); + let mut v3 = super::basic_options(); + + v1.externs = Externs::new(mk_map(vec![ + (String::from("a"), mk_set(vec![String::from("b"), + String::from("c")])), + (String::from("d"), mk_set(vec![String::from("e"), + String::from("f")])), + ])); + + v2.externs = Externs::new(mk_map(vec![ + (String::from("a"), mk_set(vec![String::from("b"), + String::from("c")])), + (String::from("X"), mk_set(vec![String::from("e"), + String::from("f")])), + ])); + + v3.externs = Externs::new(mk_map(vec![ + (String::from("a"), mk_set(vec![String::from("b"), + String::from("c")])), + (String::from("d"), mk_set(vec![String::from("X"), + String::from("f")])), + ])); + + assert!(v1.dep_tracking_hash() != v2.dep_tracking_hash()); + assert!(v1.dep_tracking_hash() != v3.dep_tracking_hash()); + assert!(v2.dep_tracking_hash() != v3.dep_tracking_hash()); + + // Check clone + assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash()); + assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash()); + assert_eq!(v3.dep_tracking_hash(), v3.clone().dep_tracking_hash()); + } + + #[test] + fn test_externs_tracking_hash_different_construction_order() { + let mut v1 = super::basic_options(); + let mut v2 = super::basic_options(); + let mut v3 = super::basic_options(); + + v1.externs = Externs::new(mk_map(vec![ + (String::from("a"), mk_set(vec![String::from("b"), + String::from("c")])), + (String::from("d"), mk_set(vec![String::from("e"), + String::from("f")])), + ])); + + v2.externs = Externs::new(mk_map(vec![ + (String::from("d"), mk_set(vec![String::from("e"), + String::from("f")])), + (String::from("a"), mk_set(vec![String::from("b"), + String::from("c")])), + ])); + + v3.externs = Externs::new(mk_map(vec![ + (String::from("a"), mk_set(vec![String::from("b"), + String::from("c")])), + (String::from("d"), mk_set(vec![String::from("f"), + String::from("e")])), + ])); + + assert_eq!(v1.dep_tracking_hash(), v2.dep_tracking_hash()); + assert_eq!(v1.dep_tracking_hash(), v3.dep_tracking_hash()); + assert_eq!(v2.dep_tracking_hash(), v3.dep_tracking_hash()); + + // Check clone + assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash()); + assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash()); + assert_eq!(v3.dep_tracking_hash(), v3.clone().dep_tracking_hash()); + } + + #[test] + fn test_lints_tracking_hash_different_values() { + let mut v1 = super::basic_options(); + let mut v2 = super::basic_options(); + let mut v3 = super::basic_options(); + + v1.lint_opts = vec![(String::from("a"), lint::Allow), + (String::from("b"), lint::Warn), + (String::from("c"), lint::Deny), + (String::from("d"), lint::Forbid)]; + + v2.lint_opts = vec![(String::from("a"), lint::Allow), + (String::from("b"), lint::Warn), + (String::from("X"), lint::Deny), + (String::from("d"), lint::Forbid)]; + + v3.lint_opts = vec![(String::from("a"), lint::Allow), + (String::from("b"), lint::Warn), + (String::from("c"), lint::Forbid), + (String::from("d"), lint::Deny)]; + + assert!(v1.dep_tracking_hash() != v2.dep_tracking_hash()); + assert!(v1.dep_tracking_hash() != v3.dep_tracking_hash()); + assert!(v2.dep_tracking_hash() != v3.dep_tracking_hash()); + + // Check clone + assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash()); + assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash()); + assert_eq!(v3.dep_tracking_hash(), v3.clone().dep_tracking_hash()); + } + + #[test] + fn test_lints_tracking_hash_different_construction_order() { + let mut v1 = super::basic_options(); + let mut v2 = super::basic_options(); + + v1.lint_opts = vec![(String::from("a"), lint::Allow), + (String::from("b"), lint::Warn), + (String::from("c"), lint::Deny), + (String::from("d"), lint::Forbid)]; + + v2.lint_opts = vec![(String::from("a"), lint::Allow), + (String::from("c"), lint::Deny), + (String::from("b"), lint::Warn), + (String::from("d"), lint::Forbid)]; + + assert_eq!(v1.dep_tracking_hash(), v2.dep_tracking_hash()); + + // Check clone + assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash()); + assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash()); + } + + #[test] + fn test_search_paths_tracking_hash_different_values() { + let mut v1 = super::basic_options(); + let mut v2 = super::basic_options(); + let mut v3 = super::basic_options(); + let mut v4 = super::basic_options(); + let mut v5 = super::basic_options(); + + // Reference + v1.search_paths.add_path("native=abc", super::ErrorOutputType::Json); + v1.search_paths.add_path("crate=def", super::ErrorOutputType::Json); + v1.search_paths.add_path("dependency=ghi", super::ErrorOutputType::Json); + v1.search_paths.add_path("framework=jkl", super::ErrorOutputType::Json); + v1.search_paths.add_path("all=mno", super::ErrorOutputType::Json); + + // Native changed + v2.search_paths.add_path("native=XXX", super::ErrorOutputType::Json); + v2.search_paths.add_path("crate=def", super::ErrorOutputType::Json); + v2.search_paths.add_path("dependency=ghi", super::ErrorOutputType::Json); + v2.search_paths.add_path("framework=jkl", super::ErrorOutputType::Json); + v2.search_paths.add_path("all=mno", super::ErrorOutputType::Json); + + // Crate changed + v2.search_paths.add_path("native=abc", super::ErrorOutputType::Json); + v2.search_paths.add_path("crate=XXX", super::ErrorOutputType::Json); + v2.search_paths.add_path("dependency=ghi", super::ErrorOutputType::Json); + v2.search_paths.add_path("framework=jkl", super::ErrorOutputType::Json); + v2.search_paths.add_path("all=mno", super::ErrorOutputType::Json); + + // Dependency changed + v3.search_paths.add_path("native=abc", super::ErrorOutputType::Json); + v3.search_paths.add_path("crate=def", super::ErrorOutputType::Json); + v3.search_paths.add_path("dependency=XXX", super::ErrorOutputType::Json); + v3.search_paths.add_path("framework=jkl", super::ErrorOutputType::Json); + v3.search_paths.add_path("all=mno", super::ErrorOutputType::Json); + + // Framework changed + v4.search_paths.add_path("native=abc", super::ErrorOutputType::Json); + v4.search_paths.add_path("crate=def", super::ErrorOutputType::Json); + v4.search_paths.add_path("dependency=ghi", super::ErrorOutputType::Json); + v4.search_paths.add_path("framework=XXX", super::ErrorOutputType::Json); + v4.search_paths.add_path("all=mno", super::ErrorOutputType::Json); + + // All changed + v5.search_paths.add_path("native=abc", super::ErrorOutputType::Json); + v5.search_paths.add_path("crate=def", super::ErrorOutputType::Json); + v5.search_paths.add_path("dependency=ghi", super::ErrorOutputType::Json); + v5.search_paths.add_path("framework=jkl", super::ErrorOutputType::Json); + v5.search_paths.add_path("all=XXX", super::ErrorOutputType::Json); + + assert!(v1.dep_tracking_hash() != v2.dep_tracking_hash()); + assert!(v1.dep_tracking_hash() != v3.dep_tracking_hash()); + assert!(v1.dep_tracking_hash() != v4.dep_tracking_hash()); + assert!(v1.dep_tracking_hash() != v5.dep_tracking_hash()); + + // Check clone + assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash()); + assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash()); + assert_eq!(v3.dep_tracking_hash(), v3.clone().dep_tracking_hash()); + assert_eq!(v4.dep_tracking_hash(), v4.clone().dep_tracking_hash()); + assert_eq!(v5.dep_tracking_hash(), v5.clone().dep_tracking_hash()); + } + + #[test] + fn test_search_paths_tracking_hash_different_order() { + let mut v1 = super::basic_options(); + let mut v2 = super::basic_options(); + let mut v3 = super::basic_options(); + let mut v4 = super::basic_options(); + + // Reference + v1.search_paths.add_path("native=abc", super::ErrorOutputType::Json); + v1.search_paths.add_path("crate=def", super::ErrorOutputType::Json); + v1.search_paths.add_path("dependency=ghi", super::ErrorOutputType::Json); + v1.search_paths.add_path("framework=jkl", super::ErrorOutputType::Json); + v1.search_paths.add_path("all=mno", super::ErrorOutputType::Json); + + v2.search_paths.add_path("native=abc", super::ErrorOutputType::Json); + v2.search_paths.add_path("dependency=ghi", super::ErrorOutputType::Json); + v2.search_paths.add_path("crate=def", super::ErrorOutputType::Json); + v2.search_paths.add_path("framework=jkl", super::ErrorOutputType::Json); + v2.search_paths.add_path("all=mno", super::ErrorOutputType::Json); + + v3.search_paths.add_path("crate=def", super::ErrorOutputType::Json); + v3.search_paths.add_path("framework=jkl", super::ErrorOutputType::Json); + v3.search_paths.add_path("native=abc", super::ErrorOutputType::Json); + v3.search_paths.add_path("dependency=ghi", super::ErrorOutputType::Json); + v3.search_paths.add_path("all=mno", super::ErrorOutputType::Json); + + v4.search_paths.add_path("all=mno", super::ErrorOutputType::Json); + v4.search_paths.add_path("native=abc", super::ErrorOutputType::Json); + v4.search_paths.add_path("crate=def", super::ErrorOutputType::Json); + v4.search_paths.add_path("dependency=ghi", super::ErrorOutputType::Json); + v4.search_paths.add_path("framework=jkl", super::ErrorOutputType::Json); + + assert!(v1.dep_tracking_hash() == v2.dep_tracking_hash()); + assert!(v1.dep_tracking_hash() == v3.dep_tracking_hash()); + assert!(v1.dep_tracking_hash() == v4.dep_tracking_hash()); + + // Check clone + assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash()); + assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash()); + assert_eq!(v3.dep_tracking_hash(), v3.clone().dep_tracking_hash()); + assert_eq!(v4.dep_tracking_hash(), v4.clone().dep_tracking_hash()); + } + + #[test] + fn test_native_libs_tracking_hash_different_values() { + let mut v1 = super::basic_options(); + let mut v2 = super::basic_options(); + let mut v3 = super::basic_options(); + + // Reference + v1.libs = vec![(String::from("a"), cstore::NativeStatic), + (String::from("b"), cstore::NativeFramework), + (String::from("c"), cstore::NativeUnknown)]; + + // Change label + v2.libs = vec![(String::from("a"), cstore::NativeStatic), + (String::from("X"), cstore::NativeFramework), + (String::from("c"), cstore::NativeUnknown)]; + + // Change kind + v3.libs = vec![(String::from("a"), cstore::NativeStatic), + (String::from("b"), cstore::NativeStatic), + (String::from("c"), cstore::NativeUnknown)]; + + assert!(v1.dep_tracking_hash() != v2.dep_tracking_hash()); + assert!(v1.dep_tracking_hash() != v3.dep_tracking_hash()); + + // Check clone + assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash()); + assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash()); + assert_eq!(v3.dep_tracking_hash(), v3.clone().dep_tracking_hash()); + } + + #[test] + fn test_native_libs_tracking_hash_different_order() { + let mut v1 = super::basic_options(); + let mut v2 = super::basic_options(); + let mut v3 = super::basic_options(); + + // Reference + v1.libs = vec![(String::from("a"), cstore::NativeStatic), + (String::from("b"), cstore::NativeFramework), + (String::from("c"), cstore::NativeUnknown)]; + + v2.libs = vec![(String::from("b"), cstore::NativeFramework), + (String::from("a"), cstore::NativeStatic), + (String::from("c"), cstore::NativeUnknown)]; + + v3.libs = vec![(String::from("c"), cstore::NativeUnknown), + (String::from("a"), cstore::NativeStatic), + (String::from("b"), cstore::NativeFramework)]; + + assert!(v1.dep_tracking_hash() == v2.dep_tracking_hash()); + assert!(v1.dep_tracking_hash() == v3.dep_tracking_hash()); + assert!(v2.dep_tracking_hash() == v3.dep_tracking_hash()); + + // Check clone + assert_eq!(v1.dep_tracking_hash(), v1.clone().dep_tracking_hash()); + assert_eq!(v2.dep_tracking_hash(), v2.clone().dep_tracking_hash()); + assert_eq!(v3.dep_tracking_hash(), v3.clone().dep_tracking_hash()); + } + + #[test] + fn test_codegen_options_tracking_hash() { + let reference = super::basic_options(); + let mut opts = super::basic_options(); + + // Make sure the changing an [UNTRACKED] option leaves the hash unchanged + opts.cg.ar = Some(String::from("abc")); + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + + opts.cg.linker = Some(String::from("linker")); + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + + opts.cg.link_args = Some(vec![String::from("abc"), String::from("def")]); + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + + opts.cg.link_dead_code = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + + opts.cg.rpath = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + + opts.cg.extra_filename = String::from("extra-filename"); + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + + opts.cg.codegen_units = 42; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + + opts.cg.remark = super::SomePasses(vec![String::from("pass1"), + String::from("pass2")]); + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + + opts.cg.save_temps = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + + + // Make sure changing a [TRACKED] option changes the hash + opts = reference.clone(); + opts.cg.lto = true; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.target_cpu = Some(String::from("abc")); + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.target_feature = String::from("all the features, all of them"); + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.passes = vec![String::from("1"), String::from("2")]; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.llvm_args = vec![String::from("1"), String::from("2")]; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.no_prepopulate_passes = true; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.no_vectorize_loops = true; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.no_vectorize_slp = true; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.soft_float = true; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.prefer_dynamic = true; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.no_integrated_as = true; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.no_redzone = Some(true); + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.relocation_model = Some(String::from("relocation model")); + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.code_model = Some(String::from("code model")); + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.metadata = vec![String::from("A"), String::from("B")]; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.debuginfo = Some(0xdeadbeef); + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.debuginfo = Some(0xba5eba11); + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.debug_assertions = Some(true); + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.inline_threshold = Some(0xf007ba11); + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.panic = Some(PanicStrategy::Abort); + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + } + + #[test] + fn test_debugging_options_tracking_hash() { + let reference = super::basic_options(); + let mut opts = super::basic_options(); + + // Make sure the changing an [UNTRACKED] option leaves the hash unchanged + opts.debugging_opts.verbose = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.time_passes = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.count_llvm_insns = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.time_llvm_passes = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.input_stats = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.trans_stats = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.borrowck_stats = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.debug_llvm = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.meta_stats = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.print_link_args = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.print_llvm_passes = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.ast_json = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.ast_json_noexpand = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.ls = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.save_analysis = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.save_analysis_csv = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.save_analysis_api = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.print_move_fragments = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.flowgraph_print_loans = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.flowgraph_print_moves = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.flowgraph_print_assigns = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.flowgraph_print_all = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.print_region_graph = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.parse_only = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.incremental = Some(String::from("abc")); + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.dump_dep_graph = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.query_dep_graph = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.no_analysis = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.unstable_options = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.trace_macros = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.keep_hygiene_data = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.keep_ast = true; + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.print_trans_items = Some(String::from("abc")); + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.dump_mir = Some(String::from("abc")); + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + opts.debugging_opts.dump_mir_dir = Some(String::from("abc")); + assert_eq!(reference.dep_tracking_hash(), opts.dep_tracking_hash()); + + // Make sure changing a [TRACKED] option changes the hash + opts = reference.clone(); + opts.debugging_opts.asm_comments = true; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.debugging_opts.no_verify = true; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.debugging_opts.no_landing_pads = true; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.debugging_opts.no_trans = true; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.debugging_opts.treat_err_as_bug = true; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.debugging_opts.continue_parse_after_error = true; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.debugging_opts.extra_plugins = vec![String::from("plugin1"), String::from("plugin2")]; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.debugging_opts.force_overflow_checks = Some(true); + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.debugging_opts.enable_nonzeroing_move_hints = true; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.debugging_opts.show_span = Some(String::from("abc")); + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.debugging_opts.mir_opt_level = Some(1); + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + } } diff --git a/src/librustc/session/filesearch.rs b/src/librustc/session/filesearch.rs index 09c6b54d99cf8..82c2425aead73 100644 --- a/src/librustc/session/filesearch.rs +++ b/src/librustc/session/filesearch.rs @@ -12,10 +12,10 @@ pub use self::FileMatch::*; +use std::borrow::Cow; use std::collections::HashSet; use std::env; use std::fs; -use std::io::prelude::*; use std::path::{Path, PathBuf}; use session::search_paths::{SearchPaths, PathKind}; @@ -68,33 +68,32 @@ impl<'a> FileSearch<'a> { { self.for_each_lib_search_path(|lib_search_path, kind| { debug!("searching {}", lib_search_path.display()); - match fs::read_dir(lib_search_path) { - Ok(files) => { - let files = files.filter_map(|p| p.ok().map(|s| s.path())) - .collect::>(); - fn is_rlib(p: &Path) -> bool { - p.extension().and_then(|s| s.to_str()) == Some("rlib") + let files = match fs::read_dir(lib_search_path) { + Ok(files) => files, + Err(..) => return, + }; + let files = files.filter_map(|p| p.ok().map(|s| s.path())) + .collect::>(); + fn is_rlib(p: &Path) -> bool { + p.extension() == Some("rlib".as_ref()) + } + // Reading metadata out of rlibs is faster, and if we find both + // an rlib and a dylib we only read one of the files of + // metadata, so in the name of speed, bring all rlib files to + // the front of the search list. + let files1 = files.iter().filter(|p| is_rlib(p)); + let files2 = files.iter().filter(|p| !is_rlib(p)); + for path in files1.chain(files2) { + debug!("testing {}", path.display()); + let maybe_picked = pick(path, kind); + match maybe_picked { + FileMatches => { + debug!("picked {}", path.display()); } - // Reading metadata out of rlibs is faster, and if we find both - // an rlib and a dylib we only read one of the files of - // metadata, so in the name of speed, bring all rlib files to - // the front of the search list. - let files1 = files.iter().filter(|p| is_rlib(p)); - let files2 = files.iter().filter(|p| !is_rlib(p)); - for path in files1.chain(files2) { - debug!("testing {}", path.display()); - let maybe_picked = pick(path, kind); - match maybe_picked { - FileMatches => { - debug!("picked {}", path.display()); - } - FileDoesntMatch => { - debug!("rejected {}", path.display()); - } - } + FileDoesntMatch => { + debug!("rejected {}", path.display()); } } - Err(..) => (), } }); } @@ -124,8 +123,8 @@ impl<'a> FileSearch<'a> { // Returns a list of directories where target-specific tool binaries are located. pub fn get_tools_search_paths(&self) -> Vec { let mut p = PathBuf::from(self.sysroot); - p.push(&find_libdir(self.sysroot)); - p.push(&rustlibdir()); + p.push(find_libdir(self.sysroot).as_ref()); + p.push(RUST_LIB_DIR); p.push(&self.triple); p.push("bin"); vec![p] @@ -133,9 +132,9 @@ impl<'a> FileSearch<'a> { } pub fn relative_target_lib_path(sysroot: &Path, target_triple: &str) -> PathBuf { - let mut p = PathBuf::from(&find_libdir(sysroot)); + let mut p = PathBuf::from(find_libdir(sysroot).as_ref()); assert!(p.is_relative()); - p.push(&rustlibdir()); + p.push(RUST_LIB_DIR); p.push(target_triple); p.push("lib"); p @@ -155,19 +154,19 @@ pub fn get_or_default_sysroot() -> PathBuf { // gcc chokes on verbatim paths which fs::canonicalize generates // so we try to avoid those kinds of paths. Ok(canon) => Some(rustcfs::fix_windows_verbatim_for_gcc(&canon)), - Err(e) => panic!("failed to get realpath: {}", e), + Err(e) => bug!("failed to get realpath: {}", e), } }) } match canonicalize(env::current_exe().ok()) { Some(mut p) => { p.pop(); p.pop(); p } - None => panic!("can't determine value for sysroot") + None => bug!("can't determine value for sysroot") } } // The name of the directory rustc expects libraries to be located. -fn find_libdir(sysroot: &Path) -> String { +fn find_libdir(sysroot: &Path) -> Cow<'static, str> { // FIXME: This is a quick hack to make the rustc binary able to locate // Rust libraries in Linux environments where libraries might be installed // to lib64/lib32. This would be more foolproof by basing the sysroot off @@ -177,31 +176,23 @@ fn find_libdir(sysroot: &Path) -> String { // "lib" (i.e. non-default), this value is used (see issue #16552). match option_env!("CFG_LIBDIR_RELATIVE") { - Some(libdir) if libdir != "lib" => return libdir.to_string(), - _ => if sysroot.join(&primary_libdir_name()).join(&rustlibdir()).exists() { - return primary_libdir_name(); + Some(libdir) if libdir != "lib" => return libdir.into(), + _ => if sysroot.join(PRIMARY_LIB_DIR).join(RUST_LIB_DIR).exists() { + return PRIMARY_LIB_DIR.into(); } else { - return secondary_libdir_name(); + return SECONDARY_LIB_DIR.into(); } } #[cfg(target_pointer_width = "64")] - fn primary_libdir_name() -> String { - "lib64".to_string() - } + const PRIMARY_LIB_DIR: &'static str = "lib64"; #[cfg(target_pointer_width = "32")] - fn primary_libdir_name() -> String { - "lib32".to_string() - } + const PRIMARY_LIB_DIR: &'static str = "lib32"; - fn secondary_libdir_name() -> String { - "lib".to_string() - } + const SECONDARY_LIB_DIR: &'static str = "lib"; } // The name of rustc's own place to organize libraries. // Used to be "rustc", now the default is "rustlib" -pub fn rustlibdir() -> String { - "rustlib".to_string() -} +const RUST_LIB_DIR: &'static str = "rustlib"; diff --git a/src/librustc/session/mod.rs b/src/librustc/session/mod.rs index 2f3af1c0d09b5..91765e68ae6e1 100644 --- a/src/librustc/session/mod.rs +++ b/src/librustc/session/mod.rs @@ -8,32 +8,51 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +pub use self::code_stats::{CodeStats, DataTypeKind, FieldInfo}; +pub use self::code_stats::{SizeKind, TypeSizeInfo, VariantInfo}; + +use dep_graph::DepGraph; +use hir::def_id::{CrateNum, DefIndex}; +use hir::svh::Svh; use lint; use middle::cstore::CrateStore; use middle::dependency_format; use session::search_paths::PathKind; -use util::nodemap::{NodeMap, FnvHashMap}; - -use syntax::ast::{NodeId, NodeIdAssigner, Name}; -use syntax::codemap::Span; -use syntax::errors::{self, DiagnosticBuilder}; -use syntax::errors::emitter::{Emitter, BasicEmitter, EmitterWriter}; -use syntax::errors::json::JsonEmitter; -use syntax::diagnostics; +use session::config::DebugInfoLevel; +use ty::tls; +use util::nodemap::{NodeMap, FxHashMap, FxHashSet}; +use util::common::duration_to_secs_str; +use mir::transform as mir_pass; + +use syntax::ast::NodeId; +use errors::{self, DiagnosticBuilder}; +use errors::emitter::{Emitter, EmitterWriter}; +use syntax::json::JsonEmitter; use syntax::feature_gate; use syntax::parse; use syntax::parse::ParseSess; +use syntax::symbol::Symbol; use syntax::{ast, codemap}; use syntax::feature_gate::AttributeType; +use syntax_pos::{Span, MultiSpan}; +use rustc_back::PanicStrategy; use rustc_back::target::Target; +use rustc_data_structures::flock; +use llvm; use std::path::{Path, PathBuf}; -use std::cell::{Cell, RefCell}; -use std::collections::HashSet; +use std::cell::{self, Cell, RefCell}; +use std::collections::HashMap; use std::env; +use std::ffi::CString; +use std::io::Write; use std::rc::Rc; +use std::fmt; +use std::time::Duration; +use libc::c_int; +mod code_stats; pub mod config; pub mod filesearch; pub mod search_paths; @@ -41,15 +60,17 @@ pub mod search_paths; // Represents the data associated with a compilation // session for a single crate. pub struct Session { + pub dep_graph: DepGraph, pub target: config::Config, pub host: Target, pub opts: config::Options, pub cstore: Rc CrateStore<'a>>, pub parse_sess: ParseSess, // For a library crate, this is always none - pub entry_fn: RefCell>, + pub entry_fn: RefCell>, pub entry_type: Cell>, pub plugin_registrar_fn: Cell>, + pub derive_registrar_fn: Cell>, pub default_sysroot: Option, // The name of the root source file of the crate, in the local file system. // The path is always expected to be absolute. `None` means that there is no @@ -57,112 +78,140 @@ pub struct Session { pub local_crate_source_file: Option, pub working_dir: PathBuf, pub lint_store: RefCell, - pub lints: RefCell>>, + pub lints: RefCell>>, + /// Set of (LintId, span, message) tuples tracking lint (sub)diagnostics + /// that have been set once, but should not be set again, in order to avoid + /// redundantly verbose output (Issue #24690). + pub one_time_diagnostics: RefCell>, pub plugin_llvm_passes: RefCell>, + pub mir_passes: RefCell, pub plugin_attributes: RefCell>, pub crate_types: RefCell>, pub dependency_formats: RefCell, - pub crate_metadata: RefCell>, + // The crate_disambiguator is constructed out of all the `-C metadata` + // arguments passed to the compiler. Its value together with the crate-name + // forms a unique global identifier for the crate. It is used to allow + // multiple crates with the same name to coexist. See the + // trans::back::symbol_names module for more information. + pub crate_disambiguator: RefCell, pub features: RefCell, /// The maximum recursion limit for potentially infinitely recursive /// operations such as auto-dereference and monomorphization. pub recursion_limit: Cell, - /// The metadata::creader module may inject an allocator dependency if it - /// didn't already find one, and this tracks what was injected. - pub injected_allocator: Cell>, + /// The maximum length of types during monomorphization. + pub type_length_limit: Cell, + + /// The metadata::creader module may inject an allocator/panic_runtime + /// dependency if it didn't already find one, and this tracks what was + /// injected. + pub injected_allocator: Cell>, + pub injected_panic_runtime: Cell>, + + /// Map from imported macro spans (which consist of + /// the localized span for the macro body) to the + /// macro name and defintion span in the source crate. + pub imported_macro_spans: RefCell>, - /// Names of all bang-style macros and syntax extensions - /// available in this crate - pub available_macros: RefCell>, + incr_comp_session: RefCell, + + /// Some measurements that are being gathered during compilation. + pub perf_stats: PerfStats, + + /// Data about code being compiled, gathered during compilation. + pub code_stats: RefCell, next_node_id: Cell, } +pub struct PerfStats { + // The accumulated time needed for computing the SVH of the crate + pub svh_time: Cell, + // The accumulated time spent on computing incr. comp. hashes + pub incr_comp_hashes_time: Cell, + // The number of incr. comp. hash computations performed + pub incr_comp_hashes_count: Cell, + // The number of bytes hashed when computing ICH values + pub incr_comp_bytes_hashed: Cell, + // The accumulated time spent on computing symbol hashes + pub symbol_hash_time: Cell, +} + impl Session { - pub fn struct_span_warn<'a>(&'a self, - sp: Span, - msg: &str) - -> DiagnosticBuilder<'a> { + pub fn local_crate_disambiguator(&self) -> Symbol { + *self.crate_disambiguator.borrow() + } + pub fn struct_span_warn<'a, S: Into>(&'a self, + sp: S, + msg: &str) + -> DiagnosticBuilder<'a> { self.diagnostic().struct_span_warn(sp, msg) } - pub fn struct_span_warn_with_code<'a>(&'a self, - sp: Span, - msg: &str, - code: &str) - -> DiagnosticBuilder<'a> { + pub fn struct_span_warn_with_code<'a, S: Into>(&'a self, + sp: S, + msg: &str, + code: &str) + -> DiagnosticBuilder<'a> { self.diagnostic().struct_span_warn_with_code(sp, msg, code) } pub fn struct_warn<'a>(&'a self, msg: &str) -> DiagnosticBuilder<'a> { self.diagnostic().struct_warn(msg) } - pub fn struct_span_err<'a>(&'a self, - sp: Span, - msg: &str) - -> DiagnosticBuilder<'a> { - match split_msg_into_multilines(msg) { - Some(ref msg) => self.diagnostic().struct_span_err(sp, msg), - None => self.diagnostic().struct_span_err(sp, msg), - } + pub fn struct_span_err<'a, S: Into>(&'a self, + sp: S, + msg: &str) + -> DiagnosticBuilder<'a> { + self.diagnostic().struct_span_err(sp, msg) } - pub fn struct_span_err_with_code<'a>(&'a self, - sp: Span, - msg: &str, - code: &str) - -> DiagnosticBuilder<'a> { - match split_msg_into_multilines(msg) { - Some(ref msg) => self.diagnostic().struct_span_err_with_code(sp, msg, code), - None => self.diagnostic().struct_span_err_with_code(sp, msg, code), - } + pub fn struct_span_err_with_code<'a, S: Into>(&'a self, + sp: S, + msg: &str, + code: &str) + -> DiagnosticBuilder<'a> { + self.diagnostic().struct_span_err_with_code(sp, msg, code) } pub fn struct_err<'a>(&'a self, msg: &str) -> DiagnosticBuilder<'a> { self.diagnostic().struct_err(msg) } - pub fn struct_span_fatal<'a>(&'a self, - sp: Span, - msg: &str) - -> DiagnosticBuilder<'a> { + pub fn struct_span_fatal<'a, S: Into>(&'a self, + sp: S, + msg: &str) + -> DiagnosticBuilder<'a> { self.diagnostic().struct_span_fatal(sp, msg) } - pub fn struct_span_fatal_with_code<'a>(&'a self, - sp: Span, - msg: &str, - code: &str) - -> DiagnosticBuilder<'a> { + pub fn struct_span_fatal_with_code<'a, S: Into>(&'a self, + sp: S, + msg: &str, + code: &str) + -> DiagnosticBuilder<'a> { self.diagnostic().struct_span_fatal_with_code(sp, msg, code) } pub fn struct_fatal<'a>(&'a self, msg: &str) -> DiagnosticBuilder<'a> { self.diagnostic().struct_fatal(msg) } - pub fn span_fatal(&self, sp: Span, msg: &str) -> ! { + pub fn span_fatal>(&self, sp: S, msg: &str) -> ! { panic!(self.diagnostic().span_fatal(sp, msg)) } - pub fn span_fatal_with_code(&self, sp: Span, msg: &str, code: &str) -> ! { + pub fn span_fatal_with_code>(&self, sp: S, msg: &str, code: &str) -> ! { panic!(self.diagnostic().span_fatal_with_code(sp, msg, code)) } pub fn fatal(&self, msg: &str) -> ! { panic!(self.diagnostic().fatal(msg)) } - pub fn span_err_or_warn(&self, is_warning: bool, sp: Span, msg: &str) { + pub fn span_err_or_warn>(&self, is_warning: bool, sp: S, msg: &str) { if is_warning { self.span_warn(sp, msg); } else { self.span_err(sp, msg); } } - pub fn span_err(&self, sp: Span, msg: &str) { - match split_msg_into_multilines(msg) { - Some(msg) => self.diagnostic().span_err(sp, &msg), - None => self.diagnostic().span_err(sp, msg) - } + pub fn span_err>(&self, sp: S, msg: &str) { + self.diagnostic().span_err(sp, msg) } - pub fn span_err_with_code(&self, sp: Span, msg: &str, code: &str) { - match split_msg_into_multilines(msg) { - Some(msg) => self.diagnostic().span_err_with_code(sp, &msg, code), - None => self.diagnostic().span_err_with_code(sp, msg, code) - } + pub fn span_err_with_code>(&self, sp: S, msg: &str, code: &str) { + self.diagnostic().span_err_with_code(sp, &msg, code) } pub fn err(&self, msg: &str) { self.diagnostic().err(msg) @@ -176,100 +225,130 @@ impl Session { pub fn abort_if_errors(&self) { self.diagnostic().abort_if_errors(); } - pub fn abort_if_new_errors(&self, mut f: F) - where F: FnMut() + pub fn track_errors(&self, f: F) -> Result + where F: FnOnce() -> T { - let count = self.err_count(); - f(); - if self.err_count() > count { - self.abort_if_errors(); + let old_count = self.err_count(); + let result = f(); + let errors = self.err_count() - old_count; + if errors == 0 { + Ok(result) + } else { + Err(errors) } } - pub fn span_warn(&self, sp: Span, msg: &str) { + pub fn span_warn>(&self, sp: S, msg: &str) { self.diagnostic().span_warn(sp, msg) } - pub fn span_warn_with_code(&self, sp: Span, msg: &str, code: &str) { + pub fn span_warn_with_code>(&self, sp: S, msg: &str, code: &str) { self.diagnostic().span_warn_with_code(sp, msg, code) } pub fn warn(&self, msg: &str) { self.diagnostic().warn(msg) } - pub fn opt_span_warn(&self, opt_sp: Option, msg: &str) { + pub fn opt_span_warn>(&self, opt_sp: Option, msg: &str) { match opt_sp { Some(sp) => self.span_warn(sp, msg), None => self.warn(msg), } } - pub fn opt_span_bug(&self, opt_sp: Option, msg: &str) -> ! { - match opt_sp { - Some(sp) => self.span_bug(sp, msg), - None => self.bug(msg), - } - } /// Delay a span_bug() call until abort_if_errors() - pub fn delay_span_bug(&self, sp: Span, msg: &str) { + pub fn delay_span_bug>(&self, sp: S, msg: &str) { self.diagnostic().delay_span_bug(sp, msg) } - pub fn span_bug(&self, sp: Span, msg: &str) -> ! { - self.diagnostic().span_bug(sp, msg) - } - pub fn bug(&self, msg: &str) -> ! { - self.diagnostic().bug(msg) - } pub fn note_without_error(&self, msg: &str) { self.diagnostic().note_without_error(msg) } - pub fn span_note_without_error(&self, sp: Span, msg: &str) { + pub fn span_note_without_error>(&self, sp: S, msg: &str) { self.diagnostic().span_note_without_error(sp, msg) } - pub fn span_unimpl(&self, sp: Span, msg: &str) -> ! { + pub fn span_unimpl>(&self, sp: S, msg: &str) -> ! { self.diagnostic().span_unimpl(sp, msg) } pub fn unimpl(&self, msg: &str) -> ! { self.diagnostic().unimpl(msg) } - pub fn add_lint(&self, - lint: &'static lint::Lint, - id: ast::NodeId, - sp: Span, - msg: String) { + pub fn add_lint>(&self, + lint: &'static lint::Lint, + id: ast::NodeId, + sp: S, + msg: String) + { + self.add_lint_diagnostic(lint, id, (sp, &msg[..])) + } + + pub fn add_lint_diagnostic(&self, + lint: &'static lint::Lint, + id: ast::NodeId, + msg: M) + where M: lint::IntoEarlyLint, + { let lint_id = lint::LintId::of(lint); let mut lints = self.lints.borrow_mut(); - match lints.get_mut(&id) { - Some(arr) => { arr.push((lint_id, sp, msg)); return; } - None => {} + let early_lint = msg.into_early_lint(lint_id); + if let Some(arr) = lints.get_mut(&id) { + if !arr.contains(&early_lint) { + arr.push(early_lint); + } + return; } - lints.insert(id, vec!((lint_id, sp, msg))); + lints.insert(id, vec![early_lint]); } - pub fn reserve_node_ids(&self, count: ast::NodeId) -> ast::NodeId { + pub fn reserve_node_ids(&self, count: usize) -> ast::NodeId { let id = self.next_node_id.get(); - match id.checked_add(count) { - Some(next) => self.next_node_id.set(next), - None => self.bug("Input too large, ran out of node ids!") + match id.as_usize().checked_add(count) { + Some(next) => { + self.next_node_id.set(ast::NodeId::new(next)); + } + None => bug!("Input too large, ran out of node ids!") } id } + pub fn next_node_id(&self) -> NodeId { + self.reserve_node_ids(1) + } pub fn diagnostic<'a>(&'a self) -> &'a errors::Handler { &self.parse_sess.span_diagnostic } + + /// Analogous to calling `.span_note` on the given DiagnosticBuilder, but + /// deduplicates on lint ID, span, and message for this `Session` if we're + /// not outputting in JSON mode. + // + // FIXME: if the need arises for one-time diagnostics other than + // `span_note`, we almost certainly want to generalize this + // "check/insert-into the one-time diagnostics map, then set message if + // it's not already there" code to accomodate all of them + pub fn diag_span_note_once<'a, 'b>(&'a self, + diag_builder: &'b mut DiagnosticBuilder<'a>, + lint: &'static lint::Lint, span: Span, message: &str) { + match self.opts.error_format { + // when outputting JSON for tool consumption, the tool might want + // the duplicates + config::ErrorOutputType::Json => { + diag_builder.span_note(span, &message); + }, + _ => { + let lint_id = lint::LintId::of(lint); + let id_span_message = (lint_id, span, message.to_owned()); + let fresh = self.one_time_diagnostics.borrow_mut().insert(id_span_message); + if fresh { + diag_builder.span_note(span, &message); + } + } + } + } + pub fn codemap<'a>(&'a self) -> &'a codemap::CodeMap { self.parse_sess.codemap() } - // This exists to help with refactoring to eliminate impossible - // cases later on - pub fn impossible_case(&self, sp: Span, msg: &str) -> ! { - self.span_bug(sp, &format!("impossible case reached: {}", msg)); - } pub fn verbose(&self) -> bool { self.opts.debugging_opts.verbose } pub fn time_passes(&self) -> bool { self.opts.debugging_opts.time_passes } pub fn count_llvm_insns(&self) -> bool { self.opts.debugging_opts.count_llvm_insns } - pub fn count_type_sizes(&self) -> bool { - self.opts.debugging_opts.count_type_sizes - } pub fn time_llvm_passes(&self) -> bool { self.opts.debugging_opts.time_llvm_passes } @@ -284,18 +363,39 @@ impl Session { pub fn lto(&self) -> bool { self.opts.cg.lto } + /// Returns the panic strategy for this compile session. If the user explicitly selected one + /// using '-C panic', use that, otherwise use the panic strategy defined by the target. + pub fn panic_strategy(&self) -> PanicStrategy { + self.opts.cg.panic.unwrap_or(self.target.target.options.panic_strategy) + } pub fn no_landing_pads(&self) -> bool { - self.opts.debugging_opts.no_landing_pads + self.opts.debugging_opts.no_landing_pads || self.panic_strategy() == PanicStrategy::Abort } pub fn unstable_options(&self) -> bool { self.opts.debugging_opts.unstable_options } - pub fn print_enum_sizes(&self) -> bool { - self.opts.debugging_opts.print_enum_sizes - } pub fn nonzeroing_move_hints(&self) -> bool { self.opts.debugging_opts.enable_nonzeroing_move_hints } + + pub fn must_not_eliminate_frame_pointers(&self) -> bool { + self.opts.debuginfo != DebugInfoLevel::NoDebugInfo || + !self.target.target.options.eliminate_frame_pointer + } + + /// Returns the symbol name for the registrar function, + /// given the crate Svh and the function DefIndex. + pub fn generate_plugin_registrar_symbol(&self, svh: &Svh, index: DefIndex) + -> String { + format!("__rustc_plugin_registrar__{}_{}", svh, index.as_usize()) + } + + pub fn generate_derive_registrar_symbol(&self, + svh: &Svh, + index: DefIndex) -> String { + format!("__rustc_derive_registrar__{}_{}", svh, index.as_usize()) + } + pub fn sysroot<'a>(&'a self) -> &'a Path { match self.opts.maybe_sysroot { Some (ref sysroot) => sysroot, @@ -316,84 +416,117 @@ impl Session { &self.opts.search_paths, kind) } -} -impl NodeIdAssigner for Session { - fn next_node_id(&self) -> NodeId { - self.reserve_node_ids(1) + pub fn init_incr_comp_session(&self, + session_dir: PathBuf, + lock_file: flock::Lock) { + let mut incr_comp_session = self.incr_comp_session.borrow_mut(); + + if let IncrCompSession::NotInitialized = *incr_comp_session { } else { + bug!("Trying to initialize IncrCompSession `{:?}`", *incr_comp_session) + } + + *incr_comp_session = IncrCompSession::Active { + session_directory: session_dir, + lock_file: lock_file, + }; } - fn peek_node_id(&self) -> NodeId { - self.next_node_id.get().checked_add(1).unwrap() + pub fn finalize_incr_comp_session(&self, new_directory_path: PathBuf) { + let mut incr_comp_session = self.incr_comp_session.borrow_mut(); + + if let IncrCompSession::Active { .. } = *incr_comp_session { } else { + bug!("Trying to finalize IncrCompSession `{:?}`", *incr_comp_session) + } + + // Note: This will also drop the lock file, thus unlocking the directory + *incr_comp_session = IncrCompSession::Finalized { + session_directory: new_directory_path, + }; + } + + pub fn mark_incr_comp_session_as_invalid(&self) { + let mut incr_comp_session = self.incr_comp_session.borrow_mut(); + + let session_directory = match *incr_comp_session { + IncrCompSession::Active { ref session_directory, .. } => { + session_directory.clone() + } + _ => bug!("Trying to invalidate IncrCompSession `{:?}`", + *incr_comp_session), + }; + + // Note: This will also drop the lock file, thus unlocking the directory + *incr_comp_session = IncrCompSession::InvalidBecauseOfErrors { + session_directory: session_directory + }; + } + + pub fn incr_comp_session_dir(&self) -> cell::Ref { + let incr_comp_session = self.incr_comp_session.borrow(); + cell::Ref::map(incr_comp_session, |incr_comp_session| { + match *incr_comp_session { + IncrCompSession::NotInitialized => { + bug!("Trying to get session directory from IncrCompSession `{:?}`", + *incr_comp_session) + } + IncrCompSession::Active { ref session_directory, .. } | + IncrCompSession::Finalized { ref session_directory } | + IncrCompSession::InvalidBecauseOfErrors { ref session_directory } => { + session_directory + } + } + }) + } + + pub fn incr_comp_session_dir_opt(&self) -> Option> { + if self.opts.incremental.is_some() { + Some(self.incr_comp_session_dir()) + } else { + None + } } -} -fn split_msg_into_multilines(msg: &str) -> Option { - // Conditions for enabling multi-line errors: - if !msg.contains("mismatched types") && - !msg.contains("type mismatch resolving") && - !msg.contains("if and else have incompatible types") && - !msg.contains("if may be missing an else clause") && - !msg.contains("match arms have incompatible types") && - !msg.contains("structure constructor specifies a structure of type") && - !msg.contains("has an incompatible type for trait") { - return None - } - let first = msg.match_indices("expected").filter(|s| { - s.0 > 0 && (msg.char_at_reverse(s.0) == ' ' || - msg.char_at_reverse(s.0) == '(') - }).map(|(a, b)| (a - 1, a + b.len())); - let second = msg.match_indices("found").filter(|s| { - msg.char_at_reverse(s.0) == ' ' - }).map(|(a, b)| (a - 1, a + b.len())); - - let mut new_msg = String::new(); - let mut head = 0; - - // Insert `\n` before expected and found. - for (pos1, pos2) in first.zip(second) { - new_msg = new_msg + - // A `(` may be preceded by a space and it should be trimmed - msg[head..pos1.0].trim_right() + // prefix - "\n" + // insert before first - &msg[pos1.0..pos1.1] + // insert what first matched - &msg[pos1.1..pos2.0] + // between matches - "\n " + // insert before second - // 123 - // `expected` is 3 char longer than `found`. To align the types, - // `found` gets 3 spaces prepended. - &msg[pos2.0..pos2.1]; // insert what second matched - - head = pos2.1; - } - - let mut tail = &msg[head..]; - let third = tail.find("(values differ") - .or(tail.find("(lifetime")) - .or(tail.find("(cyclic type of infinite size")); - // Insert `\n` before any remaining messages which match. - if let Some(pos) = third { - // The end of the message may just be wrapped in `()` without - // `expected`/`found`. Push this also to a new line and add the - // final tail after. - new_msg = new_msg + - // `(` is usually preceded by a space and should be trimmed. - tail[..pos].trim_right() + // prefix - "\n" + // insert before paren - &tail[pos..]; // append the tail - - tail = ""; - } - - new_msg.push_str(tail); - return Some(new_msg); + pub fn print_perf_stats(&self) { + println!("Total time spent computing SVHs: {}", + duration_to_secs_str(self.perf_stats.svh_time.get())); + println!("Total time spent computing incr. comp. hashes: {}", + duration_to_secs_str(self.perf_stats.incr_comp_hashes_time.get())); + println!("Total number of incr. comp. hashes computed: {}", + self.perf_stats.incr_comp_hashes_count.get()); + println!("Total number of bytes hashed for incr. comp.: {}", + self.perf_stats.incr_comp_bytes_hashed.get()); + println!("Average bytes hashed per incr. comp. HIR node: {}", + self.perf_stats.incr_comp_bytes_hashed.get() / + self.perf_stats.incr_comp_hashes_count.get()); + println!("Total time spent computing symbol hashes: {}", + duration_to_secs_str(self.perf_stats.symbol_hash_time.get())); + } } pub fn build_session(sopts: config::Options, + dep_graph: &DepGraph, local_crate_source_file: Option, - registry: diagnostics::registry::Registry, + registry: errors::registry::Registry, cstore: Rc CrateStore<'a>>) -> Session { + build_session_with_codemap(sopts, + dep_graph, + local_crate_source_file, + registry, + cstore, + Rc::new(codemap::CodeMap::new()), + None) +} + +pub fn build_session_with_codemap(sopts: config::Options, + dep_graph: &DepGraph, + local_crate_source_file: Option, + registry: errors::registry::Registry, + cstore: Rc CrateStore<'a>>, + codemap: Rc, + emitter_dest: Option>) + -> Session { // FIXME: This is not general enough to make the warning lint completely override // normal diagnostic warnings, since the warning lint can also be denied and changed // later via the source code. @@ -403,16 +536,23 @@ pub fn build_session(sopts: config::Options, .map(|&(_, ref level)| *level != lint::Allow) .last() .unwrap_or(true); - let treat_err_as_bug = sopts.treat_err_as_bug; + let treat_err_as_bug = sopts.debugging_opts.treat_err_as_bug; - let codemap = Rc::new(codemap::CodeMap::new()); - let emitter: Box = match sopts.error_format { - config::ErrorOutputType::HumanReadable(color_config) => { - Box::new(EmitterWriter::stderr(color_config, Some(registry), codemap.clone())) + let emitter: Box = match (sopts.error_format, emitter_dest) { + (config::ErrorOutputType::HumanReadable(color_config), None) => { + Box::new(EmitterWriter::stderr(color_config, + Some(codemap.clone()))) + } + (config::ErrorOutputType::HumanReadable(_), Some(dst)) => { + Box::new(EmitterWriter::new(dst, + Some(codemap.clone()))) } - config::ErrorOutputType::Json => { + (config::ErrorOutputType::Json, None) => { Box::new(JsonEmitter::stderr(Some(registry), codemap.clone())) } + (config::ErrorOutputType::Json, Some(dst)) => { + Box::new(JsonEmitter::new(dst, Some(registry), codemap.clone())) + } }; let diagnostic_handler = @@ -420,10 +560,16 @@ pub fn build_session(sopts: config::Options, treat_err_as_bug, emitter); - build_session_(sopts, local_crate_source_file, diagnostic_handler, codemap, cstore) + build_session_(sopts, + dep_graph, + local_crate_source_file, + diagnostic_handler, + codemap, + cstore) } pub fn build_session_(sopts: config::Options, + dep_graph: &DepGraph, local_crate_source_file: Option, span_diagnostic: errors::Handler, codemap: Rc, @@ -452,6 +598,7 @@ pub fn build_session_(sopts: config::Options, ); let sess = Session { + dep_graph: dep_graph.clone(), target: target_cfg, host: host, opts: sopts, @@ -461,43 +608,181 @@ pub fn build_session_(sopts: config::Options, entry_fn: RefCell::new(None), entry_type: Cell::new(None), plugin_registrar_fn: Cell::new(None), + derive_registrar_fn: Cell::new(None), default_sysroot: default_sysroot, local_crate_source_file: local_crate_source_file, working_dir: env::current_dir().unwrap(), lint_store: RefCell::new(lint::LintStore::new()), lints: RefCell::new(NodeMap()), + one_time_diagnostics: RefCell::new(FxHashSet()), plugin_llvm_passes: RefCell::new(Vec::new()), + mir_passes: RefCell::new(mir_pass::Passes::new()), plugin_attributes: RefCell::new(Vec::new()), crate_types: RefCell::new(Vec::new()), - dependency_formats: RefCell::new(FnvHashMap()), - crate_metadata: RefCell::new(Vec::new()), + dependency_formats: RefCell::new(FxHashMap()), + crate_disambiguator: RefCell::new(Symbol::intern("")), features: RefCell::new(feature_gate::Features::new()), recursion_limit: Cell::new(64), - next_node_id: Cell::new(1), + type_length_limit: Cell::new(1048576), + next_node_id: Cell::new(NodeId::new(1)), injected_allocator: Cell::new(None), - available_macros: RefCell::new(HashSet::new()), + injected_panic_runtime: Cell::new(None), + imported_macro_spans: RefCell::new(HashMap::new()), + incr_comp_session: RefCell::new(IncrCompSession::NotInitialized), + perf_stats: PerfStats { + svh_time: Cell::new(Duration::from_secs(0)), + incr_comp_hashes_time: Cell::new(Duration::from_secs(0)), + incr_comp_hashes_count: Cell::new(0), + incr_comp_bytes_hashed: Cell::new(0), + symbol_hash_time: Cell::new(Duration::from_secs(0)), + }, + code_stats: RefCell::new(CodeStats::new()), }; + init_llvm(&sess); + sess } +/// Holds data on the current incremental compilation session, if there is one. +#[derive(Debug)] +pub enum IncrCompSession { + // This is the state the session will be in until the incr. comp. dir is + // needed. + NotInitialized, + // This is the state during which the session directory is private and can + // be modified. + Active { + session_directory: PathBuf, + lock_file: flock::Lock, + }, + // This is the state after the session directory has been finalized. In this + // state, the contents of the directory must not be modified any more. + Finalized { + session_directory: PathBuf, + }, + // This is an error state that is reached when some compilation error has + // occurred. It indicates that the contents of the session directory must + // not be used, since they might be invalid. + InvalidBecauseOfErrors { + session_directory: PathBuf, + } +} + +fn init_llvm(sess: &Session) { + unsafe { + // Before we touch LLVM, make sure that multithreading is enabled. + use std::sync::Once; + static INIT: Once = Once::new(); + static mut POISONED: bool = false; + INIT.call_once(|| { + if llvm::LLVMStartMultithreaded() != 1 { + // use an extra bool to make sure that all future usage of LLVM + // cannot proceed despite the Once not running more than once. + POISONED = true; + } + + configure_llvm(sess); + }); + + if POISONED { + bug!("couldn't enable multi-threaded LLVM"); + } + } +} + +unsafe fn configure_llvm(sess: &Session) { + let mut llvm_c_strs = Vec::new(); + let mut llvm_args = Vec::new(); + + { + let mut add = |arg: &str| { + let s = CString::new(arg).unwrap(); + llvm_args.push(s.as_ptr()); + llvm_c_strs.push(s); + }; + add("rustc"); // fake program name + if sess.time_llvm_passes() { add("-time-passes"); } + if sess.print_llvm_passes() { add("-debug-pass=Structure"); } + + for arg in &sess.opts.cg.llvm_args { + add(&(*arg)); + } + } + + llvm::LLVMInitializePasses(); + + llvm::initialize_available_targets(); + + llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, + llvm_args.as_ptr()); +} + pub fn early_error(output: config::ErrorOutputType, msg: &str) -> ! { - let mut emitter: Box = match output { + let emitter: Box = match output { config::ErrorOutputType::HumanReadable(color_config) => { - Box::new(BasicEmitter::stderr(color_config)) + Box::new(EmitterWriter::stderr(color_config, + None)) } config::ErrorOutputType::Json => Box::new(JsonEmitter::basic()), }; - emitter.emit(None, msg, None, errors::Level::Fatal); + let handler = errors::Handler::with_emitter(true, false, emitter); + handler.emit(&MultiSpan::new(), msg, errors::Level::Fatal); panic!(errors::FatalError); } pub fn early_warn(output: config::ErrorOutputType, msg: &str) { - let mut emitter: Box = match output { + let emitter: Box = match output { config::ErrorOutputType::HumanReadable(color_config) => { - Box::new(BasicEmitter::stderr(color_config)) + Box::new(EmitterWriter::stderr(color_config, + None)) } config::ErrorOutputType::Json => Box::new(JsonEmitter::basic()), }; - emitter.emit(None, msg, None, errors::Level::Warning); + let handler = errors::Handler::with_emitter(true, false, emitter); + handler.emit(&MultiSpan::new(), msg, errors::Level::Warning); +} + +// Err(0) means compilation was stopped, but no errors were found. +// This would be better as a dedicated enum, but using try! is so convenient. +pub type CompileResult = Result<(), usize>; + +pub fn compile_result_from_err_count(err_count: usize) -> CompileResult { + if err_count == 0 { + Ok(()) + } else { + Err(err_count) + } +} + +#[cold] +#[inline(never)] +pub fn bug_fmt(file: &'static str, line: u32, args: fmt::Arguments) -> ! { + // this wrapper mostly exists so I don't have to write a fully + // qualified path of None:: inside the bug!() macro defintion + opt_span_bug_fmt(file, line, None::, args); +} + +#[cold] +#[inline(never)] +pub fn span_bug_fmt>(file: &'static str, + line: u32, + span: S, + args: fmt::Arguments) -> ! { + opt_span_bug_fmt(file, line, Some(span), args); +} + +fn opt_span_bug_fmt>(file: &'static str, + line: u32, + span: Option, + args: fmt::Arguments) -> ! { + tls::with_opt(move |tcx| { + let msg = format!("{}:{}: {}", file, line, args); + match (tcx, span) { + (Some(tcx), Some(span)) => tcx.sess.diagnostic().span_bug(span, &msg), + (Some(tcx), None) => tcx.sess.diagnostic().bug(&msg), + (None, _) => panic!(msg) + } + }); + unreachable!(); } diff --git a/src/librustc/session/search_paths.rs b/src/librustc/session/search_paths.rs index 3c6cd26bef6ce..5bbc6841693ea 100644 --- a/src/librustc/session/search_paths.rs +++ b/src/librustc/session/search_paths.rs @@ -22,7 +22,7 @@ pub struct Iter<'a> { iter: slice::Iter<'a, (PathKind, PathBuf)>, } -#[derive(Eq, PartialEq, Clone, Copy, Debug)] +#[derive(Eq, PartialEq, Clone, Copy, Debug, PartialOrd, Ord, Hash)] pub enum PathKind { Native, Crate, diff --git a/src/librustc/traits/README.md b/src/librustc/traits/README.md new file mode 100644 index 0000000000000..ff72f9dd07e36 --- /dev/null +++ b/src/librustc/traits/README.md @@ -0,0 +1,470 @@ +# TRAIT RESOLUTION + +This document describes the general process and points out some non-obvious +things. + +## Major concepts + +Trait resolution is the process of pairing up an impl with each +reference to a trait. So, for example, if there is a generic function like: + + fn clone_slice(x: &[T]) -> Vec { ... } + +and then a call to that function: + + let v: Vec = clone_slice([1, 2, 3]) + +it is the job of trait resolution to figure out (in which case) +whether there exists an impl of `isize : Clone` + +Note that in some cases, like generic functions, we may not be able to +find a specific impl, but we can figure out that the caller must +provide an impl. To see what I mean, consider the body of `clone_slice`: + + fn clone_slice(x: &[T]) -> Vec { + let mut v = Vec::new(); + for e in &x { + v.push((*e).clone()); // (*) + } + } + +The line marked `(*)` is only legal if `T` (the type of `*e`) +implements the `Clone` trait. Naturally, since we don't know what `T` +is, we can't find the specific impl; but based on the bound `T:Clone`, +we can say that there exists an impl which the caller must provide. + +We use the term *obligation* to refer to a trait reference in need of +an impl. + +## Overview + +Trait resolution consists of three major parts: + +- SELECTION: Deciding how to resolve a specific obligation. For + example, selection might decide that a specific obligation can be + resolved by employing an impl which matches the self type, or by + using a parameter bound. In the case of an impl, Selecting one + obligation can create *nested obligations* because of where clauses + on the impl itself. It may also require evaluating those nested + obligations to resolve ambiguities. + +- FULFILLMENT: The fulfillment code is what tracks that obligations + are completely fulfilled. Basically it is a worklist of obligations + to be selected: once selection is successful, the obligation is + removed from the worklist and any nested obligations are enqueued. + +- COHERENCE: The coherence checks are intended to ensure that there + are never overlapping impls, where two impls could be used with + equal precedence. + +## Selection + +Selection is the process of deciding whether an obligation can be +resolved and, if so, how it is to be resolved (via impl, where clause, etc). +The main interface is the `select()` function, which takes an obligation +and returns a `SelectionResult`. There are three possible outcomes: + +- `Ok(Some(selection))` -- yes, the obligation can be resolved, and + `selection` indicates how. If the impl was resolved via an impl, + then `selection` may also indicate nested obligations that are required + by the impl. + +- `Ok(None)` -- we are not yet sure whether the obligation can be + resolved or not. This happens most commonly when the obligation + contains unbound type variables. + +- `Err(err)` -- the obligation definitely cannot be resolved due to a + type error, or because there are no impls that could possibly apply, + etc. + +The basic algorithm for selection is broken into two big phases: +candidate assembly and confirmation. + +### Candidate assembly + +Searches for impls/where-clauses/etc that might +possibly be used to satisfy the obligation. Each of those is called +a candidate. To avoid ambiguity, we want to find exactly one +candidate that is definitively applicable. In some cases, we may not +know whether an impl/where-clause applies or not -- this occurs when +the obligation contains unbound inference variables. + +The basic idea for candidate assembly is to do a first pass in which +we identify all possible candidates. During this pass, all that we do +is try and unify the type parameters. (In particular, we ignore any +nested where clauses.) Presuming that this unification succeeds, the +impl is added as a candidate. + +Once this first pass is done, we can examine the set of candidates. If +it is a singleton set, then we are done: this is the only impl in +scope that could possibly apply. Otherwise, we can winnow down the set +of candidates by using where clauses and other conditions. If this +reduced set yields a single, unambiguous entry, we're good to go, +otherwise the result is considered ambiguous. + +#### The basic process: Inferring based on the impls we see + +This process is easier if we work through some examples. Consider +the following trait: + +``` +trait Convert { + fn convert(&self) -> Target; +} +``` + +This trait just has one method. It's about as simple as it gets. It +converts from the (implicit) `Self` type to the `Target` type. If we +wanted to permit conversion between `isize` and `usize`, we might +implement `Convert` like so: + +```rust +impl Convert for isize { ... } // isize -> usize +impl Convert for usize { ... } // usize -> isize +``` + +Now imagine there is some code like the following: + +```rust +let x: isize = ...; +let y = x.convert(); +``` + +The call to convert will generate a trait reference `Convert<$Y> for +isize`, where `$Y` is the type variable representing the type of +`y`. When we match this against the two impls we can see, we will find +that only one remains: `Convert for isize`. Therefore, we can +select this impl, which will cause the type of `$Y` to be unified to +`usize`. (Note that while assembling candidates, we do the initial +unifications in a transaction, so that they don't affect one another.) + +There are tests to this effect in src/test/run-pass: + + traits-multidispatch-infer-convert-source-and-target.rs + traits-multidispatch-infer-convert-target.rs + +#### Winnowing: Resolving ambiguities + +But what happens if there are multiple impls where all the types +unify? Consider this example: + +```rust +trait Get { + fn get(&self) -> Self; +} + +impl Get for T { + fn get(&self) -> T { *self } +} + +impl Get for Box { + fn get(&self) -> Box { box get_it(&**self) } +} +``` + +What happens when we invoke `get_it(&box 1_u16)`, for example? In this +case, the `Self` type is `Box` -- that unifies with both impls, +because the first applies to all types, and the second to all +boxes. In the olden days we'd have called this ambiguous. But what we +do now is do a second *winnowing* pass that considers where clauses +and attempts to remove candidates -- in this case, the first impl only +applies if `Box : Copy`, which doesn't hold. After winnowing, +then, we are left with just one candidate, so we can proceed. There is +a test of this in `src/test/run-pass/traits-conditional-dispatch.rs`. + +#### Matching + +The subroutines that decide whether a particular impl/where-clause/etc +applies to a particular obligation. At the moment, this amounts to +unifying the self types, but in the future we may also recursively +consider some of the nested obligations, in the case of an impl. + +#### Lifetimes and selection + +Because of how that lifetime inference works, it is not possible to +give back immediate feedback as to whether a unification or subtype +relationship between lifetimes holds or not. Therefore, lifetime +matching is *not* considered during selection. This is reflected in +the fact that subregion assignment is infallible. This may yield +lifetime constraints that will later be found to be in error (in +contrast, the non-lifetime-constraints have already been checked +during selection and can never cause an error, though naturally they +may lead to other errors downstream). + +#### Where clauses + +Besides an impl, the other major way to resolve an obligation is via a +where clause. The selection process is always given a *parameter +environment* which contains a list of where clauses, which are +basically obligations that can assume are satisfiable. We will iterate +over that list and check whether our current obligation can be found +in that list, and if so it is considered satisfied. More precisely, we +want to check whether there is a where-clause obligation that is for +the same trait (or some subtrait) and for which the self types match, +using the definition of *matching* given above. + +Consider this simple example: + + trait A1 { ... } + trait A2 : A1 { ... } + + trait B { ... } + + fn foo { ... } + +Clearly we can use methods offered by `A1`, `A2`, or `B` within the +body of `foo`. In each case, that will incur an obligation like `X : +A1` or `X : A2`. The parameter environment will contain two +where-clauses, `X : A2` and `X : B`. For each obligation, then, we +search this list of where-clauses. To resolve an obligation `X:A1`, +we would note that `X:A2` implies that `X:A1`. + +### Confirmation + +Confirmation unifies the output type parameters of the trait with the +values found in the obligation, possibly yielding a type error. If we +return to our example of the `Convert` trait from the previous +section, confirmation is where an error would be reported, because the +impl specified that `T` would be `usize`, but the obligation reported +`char`. Hence the result of selection would be an error. + +### Selection during translation + +During type checking, we do not store the results of trait selection. +We simply wish to verify that trait selection will succeed. Then +later, at trans time, when we have all concrete types available, we +can repeat the trait selection. In this case, we do not consider any +where-clauses to be in scope. We know that therefore each resolution +will resolve to a particular impl. + +One interesting twist has to do with nested obligations. In general, in trans, +we only need to do a "shallow" selection for an obligation. That is, we wish to +identify which impl applies, but we do not (yet) need to decide how to select +any nested obligations. Nonetheless, we *do* currently do a complete resolution, +and that is because it can sometimes inform the results of type inference. That is, +we do not have the full substitutions in terms of the type variables of the impl available +to us, so we must run trait selection to figure everything out. + +Here is an example: + + trait Foo { ... } + impl> Foo for Vec { ... } + + impl Bar for isize { ... } + +After one shallow round of selection for an obligation like `Vec +: Foo`, we would know which impl we want, and we would know that +`T=isize`, but we do not know the type of `U`. We must select the +nested obligation `isize : Bar` to find out that `U=usize`. + +It would be good to only do *just as much* nested resolution as +necessary. Currently, though, we just do a full resolution. + +# Higher-ranked trait bounds + +One of the more subtle concepts at work are *higher-ranked trait +bounds*. An example of such a bound is `for<'a> MyTrait<&'a isize>`. +Let's walk through how selection on higher-ranked trait references +works. + +## Basic matching and skolemization leaks + +Let's walk through the test `compile-fail/hrtb-just-for-static.rs` to see +how it works. The test starts with the trait `Foo`: + +```rust +trait Foo { + fn foo(&self, x: X) { } +} +``` + +Let's say we have a function `want_hrtb` that wants a type which +implements `Foo<&'a isize>` for any `'a`: + +```rust +fn want_hrtb() where T : for<'a> Foo<&'a isize> { ... } +``` + +Now we have a struct `AnyInt` that implements `Foo<&'a isize>` for any +`'a`: + +```rust +struct AnyInt; +impl<'a> Foo<&'a isize> for AnyInt { } +``` + +And the question is, does `AnyInt : for<'a> Foo<&'a isize>`? We want the +answer to be yes. The algorithm for figuring it out is closely related +to the subtyping for higher-ranked types (which is described in +`middle::infer::higher_ranked::doc`, but also in a [paper by SPJ] that +I recommend you read). + +1. Skolemize the obligation. +2. Match the impl against the skolemized obligation. +3. Check for skolemization leaks. + +[paper by SPJ]: http://research.microsoft.com/en-us/um/people/simonpj/papers/higher-rank/ + +So let's work through our example. The first thing we would do is to +skolemize the obligation, yielding `AnyInt : Foo<&'0 isize>` (here `'0` +represents skolemized region #0). Note that now have no quantifiers; +in terms of the compiler type, this changes from a `ty::PolyTraitRef` +to a `TraitRef`. We would then create the `TraitRef` from the impl, +using fresh variables for it's bound regions (and thus getting +`Foo<&'$a isize>`, where `'$a` is the inference variable for `'a`). Next +we relate the two trait refs, yielding a graph with the constraint +that `'0 == '$a`. Finally, we check for skolemization "leaks" -- a +leak is basically any attempt to relate a skolemized region to another +skolemized region, or to any region that pre-existed the impl match. +The leak check is done by searching from the skolemized region to find +the set of regions that it is related to in any way. This is called +the "taint" set. To pass the check, that set must consist *solely* of +itself and region variables from the impl. If the taint set includes +any other region, then the match is a failure. In this case, the taint +set for `'0` is `{'0, '$a}`, and hence the check will succeed. + +Let's consider a failure case. Imagine we also have a struct + +```rust +struct StaticInt; +impl Foo<&'static isize> for StaticInt; +``` + +We want the obligation `StaticInt : for<'a> Foo<&'a isize>` to be +considered unsatisfied. The check begins just as before. `'a` is +skolemized to `'0` and the impl trait reference is instantiated to +`Foo<&'static isize>`. When we relate those two, we get a constraint +like `'static == '0`. This means that the taint set for `'0` is `{'0, +'static}`, which fails the leak check. + +## Higher-ranked trait obligations + +Once the basic matching is done, we get to another interesting topic: +how to deal with impl obligations. I'll work through a simple example +here. Imagine we have the traits `Foo` and `Bar` and an associated impl: + +``` +trait Foo { + fn foo(&self, x: X) { } +} + +trait Bar { + fn bar(&self, x: X) { } +} + +impl Foo for F + where F : Bar +{ +} +``` + +Now let's say we have a obligation `for<'a> Foo<&'a isize>` and we match +this impl. What obligation is generated as a result? We want to get +`for<'a> Bar<&'a isize>`, but how does that happen? + +After the matching, we are in a position where we have a skolemized +substitution like `X => &'0 isize`. If we apply this substitution to the +impl obligations, we get `F : Bar<&'0 isize>`. Obviously this is not +directly usable because the skolemized region `'0` cannot leak out of +our computation. + +What we do is to create an inverse mapping from the taint set of `'0` +back to the original bound region (`'a`, here) that `'0` resulted +from. (This is done in `higher_ranked::plug_leaks`). We know that the +leak check passed, so this taint set consists solely of the skolemized +region itself plus various intermediate region variables. We then walk +the trait-reference and convert every region in that taint set back to +a late-bound region, so in this case we'd wind up with `for<'a> F : +Bar<&'a isize>`. + +# Caching and subtle considerations therewith + +In general we attempt to cache the results of trait selection. This +is a somewhat complex process. Part of the reason for this is that we +want to be able to cache results even when all the types in the trait +reference are not fully known. In that case, it may happen that the +trait selection process is also influencing type variables, so we have +to be able to not only cache the *result* of the selection process, +but *replay* its effects on the type variables. + +## An example + +The high-level idea of how the cache works is that we first replace +all unbound inference variables with skolemized versions. Therefore, +if we had a trait reference `usize : Foo<$1>`, where `$n` is an unbound +inference variable, we might replace it with `usize : Foo<%0>`, where +`%n` is a skolemized type. We would then look this up in the cache. +If we found a hit, the hit would tell us the immediate next step to +take in the selection process: i.e., apply impl #22, or apply where +clause `X : Foo`. Let's say in this case there is no hit. +Therefore, we search through impls and where clauses and so forth, and +we come to the conclusion that the only possible impl is this one, +with def-id 22: + + impl Foo for usize { ... } // Impl #22 + +We would then record in the cache `usize : Foo<%0> ==> +ImplCandidate(22)`. Next we would confirm `ImplCandidate(22)`, which +would (as a side-effect) unify `$1` with `isize`. + +Now, at some later time, we might come along and see a `usize : +Foo<$3>`. When skolemized, this would yield `usize : Foo<%0>`, just as +before, and hence the cache lookup would succeed, yielding +`ImplCandidate(22)`. We would confirm `ImplCandidate(22)` which would +(as a side-effect) unify `$3` with `isize`. + +## Where clauses and the local vs global cache + +One subtle interaction is that the results of trait lookup will vary +depending on what where clauses are in scope. Therefore, we actually +have *two* caches, a local and a global cache. The local cache is +attached to the `ParameterEnvironment` and the global cache attached +to the `tcx`. We use the local cache whenever the result might depend +on the where clauses that are in scope. The determination of which +cache to use is done by the method `pick_candidate_cache` in +`select.rs`. At the moment, we use a very simple, conservative rule: +if there are any where-clauses in scope, then we use the local cache. +We used to try and draw finer-grained distinctions, but that led to a +serious of annoying and weird bugs like #22019 and #18290. This simple +rule seems to be pretty clearly safe and also still retains a very +high hit rate (~95% when compiling rustc). + +# Specialization + +Defined in the `specialize` module. + +The basic strategy is to build up a *specialization graph* during +coherence checking. Insertion into the graph locates the right place +to put an impl in the specialization hierarchy; if there is no right +place (due to partial overlap but no containment), you get an overlap +error. Specialization is consulted when selecting an impl (of course), +and the graph is consulted when propagating defaults down the +specialization hierarchy. + +You might expect that the specialization graph would be used during +selection -- i.e., when actually performing specialization. This is +not done for two reasons: + +- It's merely an optimization: given a set of candidates that apply, + we can determine the most specialized one by comparing them directly + for specialization, rather than consulting the graph. Given that we + also cache the results of selection, the benefit of this + optimization is questionable. + +- To build the specialization graph in the first place, we need to use + selection (because we need to determine whether one impl specializes + another). Dealing with this reentrancy would require some additional + mode switch for selection. Given that there seems to be no strong + reason to use the graph anyway, we stick with a simpler approach in + selection, and use the graph only for propagating default + implementations. + +Trait impl selection can succeed even when multiple impls can apply, +as long as they are part of the same specialization family. In that +case, it returns a *single* impl on success -- this is the most +specialized impl *known* to apply. However, if there are any inference +variables in play, the returned impl may not be the actual impl we +will use at trans time. Thus, we take special care to avoid projecting +associated types unless either (1) the associated type does not use +`default` and thus cannot be overridden or (2) all input types are +known concretely. diff --git a/src/librustc/traits/coherence.rs b/src/librustc/traits/coherence.rs new file mode 100644 index 0000000000000..58cb52e897786 --- /dev/null +++ b/src/librustc/traits/coherence.rs @@ -0,0 +1,285 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! See `README.md` for high-level documentation + +use super::{SelectionContext, Obligation, ObligationCause}; + +use hir::def_id::{DefId, LOCAL_CRATE}; +use ty::{self, Ty, TyCtxt}; + +use infer::{InferCtxt, InferOk}; + +#[derive(Copy, Clone)] +struct InferIsLocal(bool); + +/// If there are types that satisfy both impls, returns a suitably-freshened +/// `ImplHeader` with those types substituted +pub fn overlapping_impls<'cx, 'gcx, 'tcx>(infcx: &InferCtxt<'cx, 'gcx, 'tcx>, + impl1_def_id: DefId, + impl2_def_id: DefId) + -> Option> +{ + debug!("impl_can_satisfy(\ + impl1_def_id={:?}, \ + impl2_def_id={:?})", + impl1_def_id, + impl2_def_id); + + let selcx = &mut SelectionContext::intercrate(infcx); + overlap(selcx, impl1_def_id, impl2_def_id) +} + +/// Can both impl `a` and impl `b` be satisfied by a common type (including +/// `where` clauses)? If so, returns an `ImplHeader` that unifies the two impls. +fn overlap<'cx, 'gcx, 'tcx>(selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + a_def_id: DefId, + b_def_id: DefId) + -> Option> +{ + debug!("overlap(a_def_id={:?}, b_def_id={:?})", + a_def_id, + b_def_id); + + let a_impl_header = ty::ImplHeader::with_fresh_ty_vars(selcx, a_def_id); + let b_impl_header = ty::ImplHeader::with_fresh_ty_vars(selcx, b_def_id); + + debug!("overlap: a_impl_header={:?}", a_impl_header); + debug!("overlap: b_impl_header={:?}", b_impl_header); + + // Do `a` and `b` unify? If not, no overlap. + match selcx.infcx().eq_impl_headers(true, + &ObligationCause::dummy(), + &a_impl_header, + &b_impl_header) { + Ok(InferOk { obligations, .. }) => { + // FIXME(#32730) propagate obligations + assert!(obligations.is_empty()); + } + Err(_) => return None + } + + debug!("overlap: unification check succeeded"); + + // Are any of the obligations unsatisfiable? If so, no overlap. + let infcx = selcx.infcx(); + let opt_failing_obligation = + a_impl_header.predicates + .iter() + .chain(&b_impl_header.predicates) + .map(|p| infcx.resolve_type_vars_if_possible(p)) + .map(|p| Obligation { cause: ObligationCause::dummy(), + recursion_depth: 0, + predicate: p }) + .find(|o| !selcx.evaluate_obligation(o)); + + if let Some(failing_obligation) = opt_failing_obligation { + debug!("overlap: obligation unsatisfiable {:?}", failing_obligation); + return None + } + + Some(selcx.infcx().resolve_type_vars_if_possible(&a_impl_header)) +} + +pub fn trait_ref_is_knowable<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + trait_ref: &ty::TraitRef<'tcx>) -> bool +{ + debug!("trait_ref_is_knowable(trait_ref={:?})", trait_ref); + + // if the orphan rules pass, that means that no ancestor crate can + // impl this, so it's up to us. + if orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(false)).is_ok() { + debug!("trait_ref_is_knowable: orphan check passed"); + return true; + } + + // if the trait is not marked fundamental, then it's always possible that + // an ancestor crate will impl this in the future, if they haven't + // already + if + trait_ref.def_id.krate != LOCAL_CRATE && + !tcx.has_attr(trait_ref.def_id, "fundamental") + { + debug!("trait_ref_is_knowable: trait is neither local nor fundamental"); + return false; + } + + // find out when some downstream (or cousin) crate could impl this + // trait-ref, presuming that all the parameters were instantiated + // with downstream types. If not, then it could only be + // implemented by an upstream crate, which means that the impl + // must be visible to us, and -- since the trait is fundamental + // -- we can test. + orphan_check_trait_ref(tcx, trait_ref, InferIsLocal(true)).is_err() +} + +pub enum OrphanCheckErr<'tcx> { + NoLocalInputType, + UncoveredTy(Ty<'tcx>), +} + +/// Checks the coherence orphan rules. `impl_def_id` should be the +/// def-id of a trait impl. To pass, either the trait must be local, or else +/// two conditions must be satisfied: +/// +/// 1. All type parameters in `Self` must be "covered" by some local type constructor. +/// 2. Some local type must appear in `Self`. +pub fn orphan_check<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + impl_def_id: DefId) + -> Result<(), OrphanCheckErr<'tcx>> +{ + debug!("orphan_check({:?})", impl_def_id); + + // We only except this routine to be invoked on implementations + // of a trait, not inherent implementations. + let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap(); + debug!("orphan_check: trait_ref={:?}", trait_ref); + + // If the *trait* is local to the crate, ok. + if trait_ref.def_id.is_local() { + debug!("trait {:?} is local to current crate", + trait_ref.def_id); + return Ok(()); + } + + orphan_check_trait_ref(tcx, &trait_ref, InferIsLocal(false)) +} + +fn orphan_check_trait_ref<'tcx>(tcx: TyCtxt, + trait_ref: &ty::TraitRef<'tcx>, + infer_is_local: InferIsLocal) + -> Result<(), OrphanCheckErr<'tcx>> +{ + debug!("orphan_check_trait_ref(trait_ref={:?}, infer_is_local={})", + trait_ref, infer_is_local.0); + + // First, create an ordered iterator over all the type parameters to the trait, with the self + // type appearing first. + // Find the first input type that either references a type parameter OR + // some local type. + for input_ty in trait_ref.input_types() { + if ty_is_local(tcx, input_ty, infer_is_local) { + debug!("orphan_check_trait_ref: ty_is_local `{:?}`", input_ty); + + // First local input type. Check that there are no + // uncovered type parameters. + let uncovered_tys = uncovered_tys(tcx, input_ty, infer_is_local); + for uncovered_ty in uncovered_tys { + if let Some(param) = uncovered_ty.walk().find(|t| is_type_parameter(t)) { + debug!("orphan_check_trait_ref: uncovered type `{:?}`", param); + return Err(OrphanCheckErr::UncoveredTy(param)); + } + } + + // OK, found local type, all prior types upheld invariant. + return Ok(()); + } + + // Otherwise, enforce invariant that there are no type + // parameters reachable. + if !infer_is_local.0 { + if let Some(param) = input_ty.walk().find(|t| is_type_parameter(t)) { + debug!("orphan_check_trait_ref: uncovered type `{:?}`", param); + return Err(OrphanCheckErr::UncoveredTy(param)); + } + } + } + + // If we exit above loop, never found a local type. + debug!("orphan_check_trait_ref: no local type"); + return Err(OrphanCheckErr::NoLocalInputType); +} + +fn uncovered_tys<'tcx>(tcx: TyCtxt, ty: Ty<'tcx>, infer_is_local: InferIsLocal) + -> Vec> { + if ty_is_local_constructor(tcx, ty, infer_is_local) { + vec![] + } else if fundamental_ty(tcx, ty) { + ty.walk_shallow() + .flat_map(|t| uncovered_tys(tcx, t, infer_is_local)) + .collect() + } else { + vec![ty] + } +} + +fn is_type_parameter(ty: Ty) -> bool { + match ty.sty { + // FIXME(#20590) straighten story about projection types + ty::TyProjection(..) | ty::TyParam(..) => true, + _ => false, + } +} + +fn ty_is_local(tcx: TyCtxt, ty: Ty, infer_is_local: InferIsLocal) -> bool { + ty_is_local_constructor(tcx, ty, infer_is_local) || + fundamental_ty(tcx, ty) && ty.walk_shallow().any(|t| ty_is_local(tcx, t, infer_is_local)) +} + +fn fundamental_ty(tcx: TyCtxt, ty: Ty) -> bool { + match ty.sty { + ty::TyBox(..) | ty::TyRef(..) => true, + ty::TyAdt(def, _) => def.is_fundamental(), + ty::TyDynamic(ref data, ..) => { + data.principal().map_or(false, |p| tcx.has_attr(p.def_id(), "fundamental")) + } + _ => false + } +} + +fn ty_is_local_constructor(tcx: TyCtxt, ty: Ty, infer_is_local: InferIsLocal)-> bool { + debug!("ty_is_local_constructor({:?})", ty); + + match ty.sty { + ty::TyBool | + ty::TyChar | + ty::TyInt(..) | + ty::TyUint(..) | + ty::TyFloat(..) | + ty::TyStr | + ty::TyFnDef(..) | + ty::TyFnPtr(_) | + ty::TyArray(..) | + ty::TySlice(..) | + ty::TyRawPtr(..) | + ty::TyRef(..) | + ty::TyNever | + ty::TyTuple(..) | + ty::TyParam(..) | + ty::TyProjection(..) => { + false + } + + ty::TyInfer(..) => { + infer_is_local.0 + } + + ty::TyAdt(def, _) => { + def.did.is_local() + } + + ty::TyBox(_) => { // Box + let krate = tcx.lang_items.owned_box().map(|d| d.krate); + krate == Some(LOCAL_CRATE) + } + + ty::TyDynamic(ref tt, ..) => { + tt.principal().map_or(false, |p| p.def_id().is_local()) + } + + ty::TyError => { + true + } + + ty::TyClosure(..) | ty::TyAnon(..) => { + bug!("ty_is_local invoked on unexpected type: {:?}", ty) + } + } +} diff --git a/src/librustc/traits/error_reporting.rs b/src/librustc/traits/error_reporting.rs new file mode 100644 index 0000000000000..76a5e2764f264 --- /dev/null +++ b/src/librustc/traits/error_reporting.rs @@ -0,0 +1,953 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::{ + FulfillmentError, + FulfillmentErrorCode, + MismatchedProjectionTypes, + Obligation, + ObligationCause, + ObligationCauseCode, + OutputTypeParameterMismatch, + TraitNotObjectSafe, + PredicateObligation, + SelectionContext, + SelectionError, + ObjectSafetyViolation, + MethodViolationCode, +}; + +use fmt_macros::{Parser, Piece, Position}; +use hir::def_id::DefId; +use infer::{self, InferCtxt}; +use rustc::lint::builtin::EXTRA_REQUIREMENT_IN_IMPL; +use ty::{self, AdtKind, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable}; +use ty::error::ExpectedFound; +use ty::fast_reject; +use ty::fold::TypeFolder; +use ty::subst::Subst; +use util::nodemap::{FxHashMap, FxHashSet}; + +use std::cmp; +use std::fmt; +use syntax::ast; +use syntax_pos::Span; +use errors::DiagnosticBuilder; + +#[derive(Debug, PartialEq, Eq, Hash)] +pub struct TraitErrorKey<'tcx> { + span: Span, + predicate: ty::Predicate<'tcx> +} + +impl<'a, 'gcx, 'tcx> TraitErrorKey<'tcx> { + fn from_error(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + e: &FulfillmentError<'tcx>) -> Self { + let predicate = + infcx.resolve_type_vars_if_possible(&e.obligation.predicate); + TraitErrorKey { + span: e.obligation.cause.span, + predicate: infcx.tcx.erase_regions(&predicate) + } + } +} + +impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { + pub fn report_fulfillment_errors(&self, errors: &Vec>) { + for error in errors { + self.report_fulfillment_error(error); + } + } + + fn report_fulfillment_error(&self, + error: &FulfillmentError<'tcx>) { + let error_key = TraitErrorKey::from_error(self, error); + debug!("report_fulfillment_errors({:?}) - key={:?}", + error, error_key); + if !self.reported_trait_errors.borrow_mut().insert(error_key) { + debug!("report_fulfillment_errors: skipping duplicate"); + return; + } + match error.code { + FulfillmentErrorCode::CodeSelectionError(ref e) => { + self.report_selection_error(&error.obligation, e); + } + FulfillmentErrorCode::CodeProjectionError(ref e) => { + self.report_projection_error(&error.obligation, e); + } + FulfillmentErrorCode::CodeAmbiguity => { + self.maybe_report_ambiguity(&error.obligation); + } + } + } + + fn report_projection_error(&self, + obligation: &PredicateObligation<'tcx>, + error: &MismatchedProjectionTypes<'tcx>) + { + let predicate = + self.resolve_type_vars_if_possible(&obligation.predicate); + + if predicate.references_error() { + return + } + + self.probe(|_| { + let err_buf; + let mut err = &error.err; + let mut values = None; + + // try to find the mismatched types to report the error with. + // + // this can fail if the problem was higher-ranked, in which + // cause I have no idea for a good error message. + if let ty::Predicate::Projection(ref data) = predicate { + let mut selcx = SelectionContext::new(self); + let (data, _) = self.replace_late_bound_regions_with_fresh_var( + obligation.cause.span, + infer::LateBoundRegionConversionTime::HigherRankedType, + data); + let normalized = super::normalize_projection_type( + &mut selcx, + data.projection_ty, + obligation.cause.clone(), + 0 + ); + if let Err(error) = self.eq_types( + false, &obligation.cause, + data.ty, normalized.value + ) { + values = Some(infer::ValuePairs::Types(ExpectedFound { + expected: normalized.value, + found: data.ty, + })); + err_buf = error; + err = &err_buf; + } + } + + let mut diag = struct_span_err!( + self.tcx.sess, obligation.cause.span, E0271, + "type mismatch resolving `{}`", predicate + ); + self.note_type_err(&mut diag, &obligation.cause, None, values, err); + self.note_obligation_cause(&mut diag, obligation); + diag.emit(); + }); + } + + fn fuzzy_match_tys(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> bool { + /// returns the fuzzy category of a given type, or None + /// if the type can be equated to any type. + fn type_category<'tcx>(t: Ty<'tcx>) -> Option { + match t.sty { + ty::TyBool => Some(0), + ty::TyChar => Some(1), + ty::TyStr => Some(2), + ty::TyInt(..) | ty::TyUint(..) | ty::TyInfer(ty::IntVar(..)) => Some(3), + ty::TyFloat(..) | ty::TyInfer(ty::FloatVar(..)) => Some(4), + ty::TyBox(..) | ty::TyRef(..) | ty::TyRawPtr(..) => Some(5), + ty::TyArray(..) | ty::TySlice(..) => Some(6), + ty::TyFnDef(..) | ty::TyFnPtr(..) => Some(7), + ty::TyDynamic(..) => Some(8), + ty::TyClosure(..) => Some(9), + ty::TyTuple(..) => Some(10), + ty::TyProjection(..) => Some(11), + ty::TyParam(..) => Some(12), + ty::TyAnon(..) => Some(13), + ty::TyNever => Some(14), + ty::TyAdt(adt, ..) => match adt.adt_kind() { + AdtKind::Struct => Some(15), + AdtKind::Union => Some(16), + AdtKind::Enum => Some(17), + }, + ty::TyInfer(..) | ty::TyError => None + } + } + + match (type_category(a), type_category(b)) { + (Some(cat_a), Some(cat_b)) => match (&a.sty, &b.sty) { + (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => def_a == def_b, + _ => cat_a == cat_b + }, + // infer and error can be equated to all types + _ => true + } + } + + fn impl_similar_to(&self, + trait_ref: ty::PolyTraitRef<'tcx>, + obligation: &PredicateObligation<'tcx>) + -> Option + { + let tcx = self.tcx; + + let trait_ref = tcx.erase_late_bound_regions(&trait_ref); + let trait_self_ty = trait_ref.self_ty(); + + let mut self_match_impls = vec![]; + let mut fuzzy_match_impls = vec![]; + + self.tcx.lookup_trait_def(trait_ref.def_id) + .for_each_relevant_impl(self.tcx, trait_self_ty, |def_id| { + let impl_substs = self.fresh_substs_for_item(obligation.cause.span, def_id); + let impl_trait_ref = tcx + .impl_trait_ref(def_id) + .unwrap() + .subst(tcx, impl_substs); + + let impl_self_ty = impl_trait_ref.self_ty(); + + if let Ok(..) = self.can_equate(&trait_self_ty, &impl_self_ty) { + self_match_impls.push(def_id); + + if trait_ref.substs.types().skip(1) + .zip(impl_trait_ref.substs.types().skip(1)) + .all(|(u,v)| self.fuzzy_match_tys(u, v)) + { + fuzzy_match_impls.push(def_id); + } + } + }); + + let impl_def_id = if self_match_impls.len() == 1 { + self_match_impls[0] + } else if fuzzy_match_impls.len() == 1 { + fuzzy_match_impls[0] + } else { + return None + }; + + if tcx.has_attr(impl_def_id, "rustc_on_unimplemented") { + Some(impl_def_id) + } else { + None + } + } + + fn on_unimplemented_note(&self, + trait_ref: ty::PolyTraitRef<'tcx>, + obligation: &PredicateObligation<'tcx>) -> Option { + let def_id = self.impl_similar_to(trait_ref, obligation) + .unwrap_or(trait_ref.def_id()); + let trait_ref = trait_ref.skip_binder(); + + let span = obligation.cause.span; + let mut report = None; + for item in self.tcx.get_attrs(def_id).iter() { + if item.check_name("rustc_on_unimplemented") { + let err_sp = item.meta().span.substitute_dummy(span); + let trait_str = self.tcx.item_path_str(trait_ref.def_id); + if let Some(istring) = item.value_str() { + let istring = &*istring.as_str(); + let generics = self.tcx.item_generics(trait_ref.def_id); + let generic_map = generics.types.iter().map(|param| { + (param.name.as_str().to_string(), + trait_ref.substs.type_for_def(param).to_string()) + }).collect::>(); + let parser = Parser::new(istring); + let mut errored = false; + let err: String = parser.filter_map(|p| { + match p { + Piece::String(s) => Some(s), + Piece::NextArgument(a) => match a.position { + Position::ArgumentNamed(s) => match generic_map.get(s) { + Some(val) => Some(val), + None => { + span_err!(self.tcx.sess, err_sp, E0272, + "the #[rustc_on_unimplemented] \ + attribute on \ + trait definition for {} refers to \ + non-existent type parameter {}", + trait_str, s); + errored = true; + None + } + }, + _ => { + span_err!(self.tcx.sess, err_sp, E0273, + "the #[rustc_on_unimplemented] attribute \ + on trait definition for {} must have \ + named format arguments, eg \ + `#[rustc_on_unimplemented = \ + \"foo {{T}}\"]`", trait_str); + errored = true; + None + } + } + } + }).collect(); + // Report only if the format string checks out + if !errored { + report = Some(err); + } + } else { + span_err!(self.tcx.sess, err_sp, E0274, + "the #[rustc_on_unimplemented] attribute on \ + trait definition for {} must have a value, \ + eg `#[rustc_on_unimplemented = \"foo\"]`", + trait_str); + } + break; + } + } + report + } + + fn find_similar_impl_candidates(&self, + trait_ref: ty::PolyTraitRef<'tcx>) + -> Vec> + { + let simp = fast_reject::simplify_type(self.tcx, + trait_ref.skip_binder().self_ty(), + true); + let mut impl_candidates = Vec::new(); + let trait_def = self.tcx.lookup_trait_def(trait_ref.def_id()); + + match simp { + Some(simp) => trait_def.for_each_impl(self.tcx, |def_id| { + let imp = self.tcx.impl_trait_ref(def_id).unwrap(); + let imp_simp = fast_reject::simplify_type(self.tcx, + imp.self_ty(), + true); + if let Some(imp_simp) = imp_simp { + if simp != imp_simp { + return; + } + } + impl_candidates.push(imp); + }), + None => trait_def.for_each_impl(self.tcx, |def_id| { + impl_candidates.push( + self.tcx.impl_trait_ref(def_id).unwrap()); + }) + }; + impl_candidates + } + + fn report_similar_impl_candidates(&self, + trait_ref: ty::PolyTraitRef<'tcx>, + err: &mut DiagnosticBuilder) + { + let simp = fast_reject::simplify_type(self.tcx, + trait_ref.skip_binder().self_ty(), + true); + let mut impl_candidates = Vec::new(); + let trait_def = self.tcx.lookup_trait_def(trait_ref.def_id()); + + match simp { + Some(simp) => trait_def.for_each_impl(self.tcx, |def_id| { + let imp = self.tcx.impl_trait_ref(def_id).unwrap(); + let imp_simp = fast_reject::simplify_type(self.tcx, + imp.self_ty(), + true); + if let Some(imp_simp) = imp_simp { + if simp != imp_simp { + return; + } + } + impl_candidates.push(imp); + }), + None => trait_def.for_each_impl(self.tcx, |def_id| { + impl_candidates.push( + self.tcx.impl_trait_ref(def_id).unwrap()); + }) + }; + + if impl_candidates.is_empty() { + return; + } + + err.help(&format!("the following implementations were found:")); + + let end = cmp::min(4, impl_candidates.len()); + for candidate in &impl_candidates[0..end] { + err.help(&format!(" {:?}", candidate)); + } + if impl_candidates.len() > 4 { + err.help(&format!("and {} others", impl_candidates.len()-4)); + } + } + + /// Reports that an overflow has occurred and halts compilation. We + /// halt compilation unconditionally because it is important that + /// overflows never be masked -- they basically represent computations + /// whose result could not be truly determined and thus we can't say + /// if the program type checks or not -- and they are unusual + /// occurrences in any case. + pub fn report_overflow_error(&self, + obligation: &Obligation<'tcx, T>, + suggest_increasing_limit: bool) -> ! + where T: fmt::Display + TypeFoldable<'tcx> + { + let predicate = + self.resolve_type_vars_if_possible(&obligation.predicate); + let mut err = struct_span_err!(self.tcx.sess, obligation.cause.span, E0275, + "overflow evaluating the requirement `{}`", + predicate); + + if suggest_increasing_limit { + self.suggest_new_overflow_limit(&mut err); + } + + self.note_obligation_cause(&mut err, obligation); + + err.emit(); + self.tcx.sess.abort_if_errors(); + bug!(); + } + + /// Reports that a cycle was detected which led to overflow and halts + /// compilation. This is equivalent to `report_overflow_error` except + /// that we can give a more helpful error message (and, in particular, + /// we do not suggest increasing the overflow limit, which is not + /// going to help). + pub fn report_overflow_error_cycle(&self, cycle: &[PredicateObligation<'tcx>]) -> ! { + let cycle = self.resolve_type_vars_if_possible(&cycle.to_owned()); + assert!(cycle.len() > 0); + + debug!("report_overflow_error_cycle: cycle={:?}", cycle); + + self.report_overflow_error(&cycle[0], false); + } + + pub fn report_extra_impl_obligation(&self, + error_span: Span, + item_name: ast::Name, + _impl_item_def_id: DefId, + trait_item_def_id: DefId, + requirement: &fmt::Display, + lint_id: Option) // (*) + -> DiagnosticBuilder<'tcx> + { + // (*) This parameter is temporary and used only for phasing + // in the bug fix to #18937. If it is `Some`, it has a kind of + // weird effect -- the diagnostic is reported as a lint, and + // the builder which is returned is marked as canceled. + + let mut err = + struct_span_err!(self.tcx.sess, + error_span, + E0276, + "impl has stricter requirements than trait"); + + if let Some(trait_item_span) = self.tcx.map.span_if_local(trait_item_def_id) { + err.span_label(trait_item_span, + &format!("definition of `{}` from trait", item_name)); + } + + err.span_label( + error_span, + &format!("impl has extra requirement {}", requirement)); + + if let Some(node_id) = lint_id { + self.tcx.sess.add_lint_diagnostic(EXTRA_REQUIREMENT_IN_IMPL, + node_id, + (*err).clone()); + err.cancel(); + } + + err + } + + pub fn report_selection_error(&self, + obligation: &PredicateObligation<'tcx>, + error: &SelectionError<'tcx>) + { + let span = obligation.cause.span; + let mut err = match *error { + SelectionError::Unimplemented => { + if let ObligationCauseCode::CompareImplMethodObligation { + item_name, impl_item_def_id, trait_item_def_id, lint_id + } = obligation.cause.code { + self.report_extra_impl_obligation( + span, + item_name, + impl_item_def_id, + trait_item_def_id, + &format!("`{}`", obligation.predicate), + lint_id) + .emit(); + return; + } else { + match obligation.predicate { + ty::Predicate::Trait(ref trait_predicate) => { + let trait_predicate = + self.resolve_type_vars_if_possible(trait_predicate); + + if self.tcx.sess.has_errors() && trait_predicate.references_error() { + return; + } else { + let trait_ref = trait_predicate.to_poly_trait_ref(); + + let mut err = struct_span_err!(self.tcx.sess, span, E0277, + "the trait bound `{}` is not satisfied", + trait_ref.to_predicate()); + err.span_label(span, &format!("the trait `{}` is not implemented \ + for `{}`", + trait_ref, + trait_ref.self_ty())); + + // Try to report a help message + + if !trait_ref.has_infer_types() && + self.predicate_can_apply(trait_ref) { + // If a where-clause may be useful, remind the + // user that they can add it. + // + // don't display an on-unimplemented note, as + // these notes will often be of the form + // "the type `T` can't be frobnicated" + // which is somewhat confusing. + err.help(&format!("consider adding a `where {}` bound", + trait_ref.to_predicate())); + } else if let Some(s) = self.on_unimplemented_note(trait_ref, + obligation) { + // If it has a custom "#[rustc_on_unimplemented]" + // error message, let's display it! + err.note(&s); + } else { + // If we can't show anything useful, try to find + // similar impls. + let impl_candidates = + self.find_similar_impl_candidates(trait_ref); + if impl_candidates.len() > 0 { + self.report_similar_impl_candidates(trait_ref, &mut err); + } + } + err + } + } + + ty::Predicate::Equate(ref predicate) => { + let predicate = self.resolve_type_vars_if_possible(predicate); + let err = self.equality_predicate(&obligation.cause, + &predicate).err().unwrap(); + struct_span_err!(self.tcx.sess, span, E0278, + "the requirement `{}` is not satisfied (`{}`)", + predicate, err) + } + + ty::Predicate::RegionOutlives(ref predicate) => { + let predicate = self.resolve_type_vars_if_possible(predicate); + let err = self.region_outlives_predicate(&obligation.cause, + &predicate).err().unwrap(); + struct_span_err!(self.tcx.sess, span, E0279, + "the requirement `{}` is not satisfied (`{}`)", + predicate, err) + } + + ty::Predicate::Projection(..) | ty::Predicate::TypeOutlives(..) => { + let predicate = + self.resolve_type_vars_if_possible(&obligation.predicate); + struct_span_err!(self.tcx.sess, span, E0280, + "the requirement `{}` is not satisfied", + predicate) + } + + ty::Predicate::ObjectSafe(trait_def_id) => { + let violations = self.tcx.object_safety_violations(trait_def_id); + self.tcx.report_object_safety_error(span, + trait_def_id, + violations) + } + + ty::Predicate::ClosureKind(closure_def_id, kind) => { + let found_kind = self.closure_kind(closure_def_id).unwrap(); + let closure_span = self.tcx.map.span_if_local(closure_def_id).unwrap(); + let mut err = struct_span_err!( + self.tcx.sess, closure_span, E0525, + "expected a closure that implements the `{}` trait, \ + but this closure only implements `{}`", + kind, + found_kind); + err.span_note( + obligation.cause.span, + &format!("the requirement to implement \ + `{}` derives from here", kind)); + err.emit(); + return; + } + + ty::Predicate::WellFormed(ty) => { + // WF predicates cannot themselves make + // errors. They can only block due to + // ambiguity; otherwise, they always + // degenerate into other obligations + // (which may fail). + span_bug!(span, "WF predicate not satisfied for {:?}", ty); + } + } + } + } + + OutputTypeParameterMismatch(ref expected_trait_ref, ref actual_trait_ref, ref e) => { + let expected_trait_ref = self.resolve_type_vars_if_possible(&*expected_trait_ref); + let actual_trait_ref = self.resolve_type_vars_if_possible(&*actual_trait_ref); + if actual_trait_ref.self_ty().references_error() { + return; + } + struct_span_err!(self.tcx.sess, span, E0281, + "type mismatch: the type `{}` implements the trait `{}`, \ + but the trait `{}` is required ({})", + expected_trait_ref.self_ty(), + expected_trait_ref, + actual_trait_ref, + e) + } + + TraitNotObjectSafe(did) => { + let violations = self.tcx.object_safety_violations(did); + self.tcx.report_object_safety_error(span, did, + violations) + } + }; + self.note_obligation_cause(&mut err, obligation); + err.emit(); + } +} + +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + pub fn recursive_type_with_infinite_size_error(self, + type_def_id: DefId) + -> DiagnosticBuilder<'tcx> + { + assert!(type_def_id.is_local()); + let span = self.map.span_if_local(type_def_id).unwrap(); + let mut err = struct_span_err!(self.sess, span, E0072, + "recursive type `{}` has infinite size", + self.item_path_str(type_def_id)); + err.span_label(span, &format!("recursive type has infinite size")); + err.help(&format!("insert indirection (e.g., a `Box`, `Rc`, or `&`) \ + at some point to make `{}` representable", + self.item_path_str(type_def_id))); + err + } + + pub fn report_object_safety_error(self, + span: Span, + trait_def_id: DefId, + violations: Vec) + -> DiagnosticBuilder<'tcx> + { + let trait_str = self.item_path_str(trait_def_id); + let mut err = struct_span_err!( + self.sess, span, E0038, + "the trait `{}` cannot be made into an object", + trait_str); + err.span_label(span, &format!( + "the trait `{}` cannot be made into an object", trait_str + )); + + let mut reported_violations = FxHashSet(); + for violation in violations { + if !reported_violations.insert(violation.clone()) { + continue; + } + let buf; + let note = match violation { + ObjectSafetyViolation::SizedSelf => { + "the trait cannot require that `Self : Sized`" + } + + ObjectSafetyViolation::SupertraitSelf => { + "the trait cannot use `Self` as a type parameter \ + in the supertrait listing" + } + + ObjectSafetyViolation::Method(name, + MethodViolationCode::StaticMethod) => { + buf = format!("method `{}` has no receiver", name); + &buf + } + + ObjectSafetyViolation::Method(name, + MethodViolationCode::ReferencesSelf) => { + buf = format!("method `{}` references the `Self` type \ + in its arguments or return type", + name); + &buf + } + + ObjectSafetyViolation::Method(name, + MethodViolationCode::Generic) => { + buf = format!("method `{}` has generic type parameters", name); + &buf + } + }; + err.note(note); + } + err + } +} + +impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { + fn maybe_report_ambiguity(&self, obligation: &PredicateObligation<'tcx>) { + // Unable to successfully determine, probably means + // insufficient type information, but could mean + // ambiguous impls. The latter *ought* to be a + // coherence violation, so we don't report it here. + + let predicate = self.resolve_type_vars_if_possible(&obligation.predicate); + + debug!("maybe_report_ambiguity(predicate={:?}, obligation={:?})", + predicate, + obligation); + + // Ambiguity errors are often caused as fallout from earlier + // errors. So just ignore them if this infcx is tainted. + if self.is_tainted_by_errors() { + return; + } + + match predicate { + ty::Predicate::Trait(ref data) => { + let trait_ref = data.to_poly_trait_ref(); + let self_ty = trait_ref.self_ty(); + if predicate.references_error() { + } else { + // Typically, this ambiguity should only happen if + // there are unresolved type inference variables + // (otherwise it would suggest a coherence + // failure). But given #21974 that is not necessarily + // the case -- we can have multiple where clauses that + // are only distinguished by a region, which results + // in an ambiguity even when all types are fully + // known, since we don't dispatch based on region + // relationships. + + // This is kind of a hack: it frequently happens that some earlier + // error prevents types from being fully inferred, and then we get + // a bunch of uninteresting errors saying something like " doesn't implement Sized". It may even be true that we + // could just skip over all checks where the self-ty is an + // inference variable, but I was afraid that there might be an + // inference variable created, registered as an obligation, and + // then never forced by writeback, and hence by skipping here we'd + // be ignoring the fact that we don't KNOW the type works + // out. Though even that would probably be harmless, given that + // we're only talking about builtin traits, which are known to be + // inhabited. But in any case I just threw in this check for + // has_errors() to be sure that compilation isn't happening + // anyway. In that case, why inundate the user. + if !self.tcx.sess.has_errors() { + if + self.tcx.lang_items.sized_trait() + .map_or(false, |sized_id| sized_id == trait_ref.def_id()) + { + self.need_type_info(obligation.cause.span, self_ty); + } else { + let mut err = struct_span_err!(self.tcx.sess, + obligation.cause.span, E0283, + "type annotations required: \ + cannot resolve `{}`", + predicate); + self.note_obligation_cause(&mut err, obligation); + err.emit(); + } + } + } + } + + ty::Predicate::WellFormed(ty) => { + // Same hacky approach as above to avoid deluging user + // with error messages. + if !ty.references_error() && !self.tcx.sess.has_errors() { + self.need_type_info(obligation.cause.span, ty); + } + } + + _ => { + if !self.tcx.sess.has_errors() { + let mut err = struct_span_err!(self.tcx.sess, + obligation.cause.span, E0284, + "type annotations required: \ + cannot resolve `{}`", + predicate); + self.note_obligation_cause(&mut err, obligation); + err.emit(); + } + } + } + } + + /// Returns whether the trait predicate may apply for *some* assignment + /// to the type parameters. + fn predicate_can_apply(&self, pred: ty::PolyTraitRef<'tcx>) -> bool { + struct ParamToVarFolder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, + var_map: FxHashMap, Ty<'tcx>> + } + + impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for ParamToVarFolder<'a, 'gcx, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.infcx.tcx } + + fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { + if let ty::TyParam(..) = ty.sty { + let infcx = self.infcx; + self.var_map.entry(ty).or_insert_with(|| infcx.next_ty_var()) + } else { + ty.super_fold_with(self) + } + } + } + + self.probe(|_| { + let mut selcx = SelectionContext::new(self); + + let cleaned_pred = pred.fold_with(&mut ParamToVarFolder { + infcx: self, + var_map: FxHashMap() + }); + + let cleaned_pred = super::project::normalize( + &mut selcx, + ObligationCause::dummy(), + &cleaned_pred + ).value; + + let obligation = Obligation::new( + ObligationCause::dummy(), + cleaned_pred.to_predicate() + ); + + selcx.evaluate_obligation(&obligation) + }) + } + + + fn need_type_info(&self, span: Span, ty: Ty<'tcx>) { + let mut err = struct_span_err!(self.tcx.sess, span, E0282, + "unable to infer enough type information about `{}`", + ty); + err.note("type annotations or generic parameter binding required"); + err.span_label(span, &format!("cannot infer type for `{}`", ty)); + err.emit() + } + + fn note_obligation_cause(&self, + err: &mut DiagnosticBuilder, + obligation: &Obligation<'tcx, T>) + where T: fmt::Display + { + self.note_obligation_cause_code(err, + &obligation.predicate, + &obligation.cause.code); + } + + fn note_obligation_cause_code(&self, + err: &mut DiagnosticBuilder, + predicate: &T, + cause_code: &ObligationCauseCode<'tcx>) + where T: fmt::Display + { + let tcx = self.tcx; + match *cause_code { + ObligationCauseCode::ExprAssignable | + ObligationCauseCode::MatchExpressionArm { .. } | + ObligationCauseCode::IfExpression | + ObligationCauseCode::IfExpressionWithNoElse | + ObligationCauseCode::EquatePredicate | + ObligationCauseCode::MainFunctionType | + ObligationCauseCode::StartFunctionType | + ObligationCauseCode::IntrinsicType | + ObligationCauseCode::MethodReceiver | + ObligationCauseCode::MiscObligation => { + } + ObligationCauseCode::SliceOrArrayElem => { + err.note("slice and array elements must have `Sized` type"); + } + ObligationCauseCode::TupleElem => { + err.note("tuple elements must have `Sized` type"); + } + ObligationCauseCode::ProjectionWf(data) => { + err.note(&format!("required so that the projection `{}` is well-formed", + data)); + } + ObligationCauseCode::ReferenceOutlivesReferent(ref_ty) => { + err.note(&format!("required so that reference `{}` does not outlive its referent", + ref_ty)); + } + ObligationCauseCode::ObjectTypeBound(object_ty, region) => { + err.note(&format!("required so that the lifetime bound of `{}` for `{}` \ + is satisfied", + region, object_ty)); + } + ObligationCauseCode::ItemObligation(item_def_id) => { + let item_name = tcx.item_path_str(item_def_id); + err.note(&format!("required by `{}`", item_name)); + } + ObligationCauseCode::ObjectCastObligation(object_ty) => { + err.note(&format!("required for the cast to the object type `{}`", + self.ty_to_string(object_ty))); + } + ObligationCauseCode::RepeatVec => { + err.note("the `Copy` trait is required because the \ + repeated element will be copied"); + } + ObligationCauseCode::VariableType(_) => { + err.note("all local variables must have a statically known size"); + } + ObligationCauseCode::ReturnType => { + err.note("the return type of a function must have a \ + statically known size"); + } + ObligationCauseCode::AssignmentLhsSized => { + err.note("the left-hand-side of an assignment must have a statically known size"); + } + ObligationCauseCode::StructInitializerSized => { + err.note("structs must have a statically known size to be initialized"); + } + ObligationCauseCode::FieldSized => { + err.note("only the last field of a struct may have a dynamically sized type"); + } + ObligationCauseCode::ConstSized => { + err.note("constant expressions must have a statically known size"); + } + ObligationCauseCode::SharedStatic => { + err.note("shared static variables must have a type that implements `Sync`"); + } + ObligationCauseCode::BuiltinDerivedObligation(ref data) => { + let parent_trait_ref = self.resolve_type_vars_if_possible(&data.parent_trait_ref); + err.note(&format!("required because it appears within the type `{}`", + parent_trait_ref.0.self_ty())); + let parent_predicate = parent_trait_ref.to_predicate(); + self.note_obligation_cause_code(err, + &parent_predicate, + &data.parent_code); + } + ObligationCauseCode::ImplDerivedObligation(ref data) => { + let parent_trait_ref = self.resolve_type_vars_if_possible(&data.parent_trait_ref); + err.note( + &format!("required because of the requirements on the impl of `{}` for `{}`", + parent_trait_ref, + parent_trait_ref.0.self_ty())); + let parent_predicate = parent_trait_ref.to_predicate(); + self.note_obligation_cause_code(err, + &parent_predicate, + &data.parent_code); + } + ObligationCauseCode::CompareImplMethodObligation { .. } => { + err.note( + &format!("the requirement `{}` appears on the impl method \ + but not on the corresponding trait method", + predicate)); + } + } + } + + fn suggest_new_overflow_limit(&self, err: &mut DiagnosticBuilder) { + let current_limit = self.tcx.sess.recursion_limit.get(); + let suggested_limit = current_limit * 2; + err.note(&format!( + "consider adding a `#![recursion_limit=\"{}\"]` attribute to your crate", + suggested_limit)); + } +} diff --git a/src/librustc/traits/fulfill.rs b/src/librustc/traits/fulfill.rs new file mode 100644 index 0000000000000..23c28037a3c2d --- /dev/null +++ b/src/librustc/traits/fulfill.rs @@ -0,0 +1,739 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use dep_graph::DepGraph; +use infer::{InferCtxt, InferOk}; +use ty::{self, Ty, TypeFoldable, ToPolyTraitRef, TyCtxt, ToPredicate}; +use ty::subst::Subst; +use rustc_data_structures::obligation_forest::{ObligationForest, Error}; +use rustc_data_structures::obligation_forest::{ForestObligation, ObligationProcessor}; +use std::marker::PhantomData; +use std::mem; +use syntax::ast; +use util::nodemap::{FxHashSet, NodeMap}; +use hir::def_id::DefId; + +use super::CodeAmbiguity; +use super::CodeProjectionError; +use super::CodeSelectionError; +use super::{FulfillmentError, FulfillmentErrorCode, SelectionError}; +use super::{ObligationCause, BuiltinDerivedObligation}; +use super::{PredicateObligation, TraitObligation, Obligation}; +use super::project; +use super::select::SelectionContext; +use super::Unimplemented; + +impl<'tcx> ForestObligation for PendingPredicateObligation<'tcx> { + type Predicate = ty::Predicate<'tcx>; + + fn as_predicate(&self) -> &Self::Predicate { &self.obligation.predicate } +} + +pub struct GlobalFulfilledPredicates<'tcx> { + set: FxHashSet>, + dep_graph: DepGraph, +} + +/// The fulfillment context is used to drive trait resolution. It +/// consists of a list of obligations that must be (eventually) +/// satisfied. The job is to track which are satisfied, which yielded +/// errors, and which are still pending. At any point, users can call +/// `select_where_possible`, and the fulfilment context will try to do +/// selection, retaining only those obligations that remain +/// ambiguous. This may be helpful in pushing type inference +/// along. Once all type inference constraints have been generated, the +/// method `select_all_or_error` can be used to report any remaining +/// ambiguous cases as errors. + +pub struct FulfillmentContext<'tcx> { + // A list of all obligations that have been registered with this + // fulfillment context. + predicates: ObligationForest>, + + // A set of constraints that regionck must validate. Each + // constraint has the form `T:'a`, meaning "some type `T` must + // outlive the lifetime 'a". These constraints derive from + // instantiated type parameters. So if you had a struct defined + // like + // + // struct Foo { ... } + // + // then in some expression `let x = Foo { ... }` it will + // instantiate the type parameter `T` with a fresh type `$0`. At + // the same time, it will record a region obligation of + // `$0:'static`. This will get checked later by regionck. (We + // can't generally check these things right away because we have + // to wait until types are resolved.) + // + // These are stored in a map keyed to the id of the innermost + // enclosing fn body / static initializer expression. This is + // because the location where the obligation was incurred can be + // relevant with respect to which sublifetime assumptions are in + // place. The reason that we store under the fn-id, and not + // something more fine-grained, is so that it is easier for + // regionck to be sure that it has found *all* the region + // obligations (otherwise, it's easy to fail to walk to a + // particular node-id). + region_obligations: NodeMap>>, + + // A list of obligations that need to be deferred to + // a later time for them to be properly fulfilled. + deferred_obligations: Vec>, +} + +#[derive(Clone)] +pub struct RegionObligation<'tcx> { + pub sub_region: &'tcx ty::Region, + pub sup_type: Ty<'tcx>, + pub cause: ObligationCause<'tcx>, +} + +#[derive(Clone, Debug)] +pub struct PendingPredicateObligation<'tcx> { + pub obligation: PredicateObligation<'tcx>, + pub stalled_on: Vec>, +} + +/// An obligation which cannot be fulfilled in the context +/// it was registered in, such as auto trait obligations on +/// `impl Trait`, which require the concrete type to be +/// available, only guaranteed after finishing type-checking. +#[derive(Clone, Debug)] +pub struct DeferredObligation<'tcx> { + pub predicate: ty::PolyTraitPredicate<'tcx>, + pub cause: ObligationCause<'tcx> +} + +impl<'a, 'gcx, 'tcx> DeferredObligation<'tcx> { + /// If possible, create a `DeferredObligation` from + /// a trait predicate which had failed selection, + /// but could succeed later. + pub fn from_select_error(tcx: TyCtxt<'a, 'gcx, 'tcx>, + obligation: &TraitObligation<'tcx>, + selection_err: &SelectionError<'tcx>) + -> Option> { + if let Unimplemented = *selection_err { + if DeferredObligation::must_defer(tcx, &obligation.predicate) { + return Some(DeferredObligation { + predicate: obligation.predicate.clone(), + cause: obligation.cause.clone() + }); + } + } + + None + } + + /// Returns true if the given trait predicate can be + /// fulfilled at a later time. + pub fn must_defer(tcx: TyCtxt<'a, 'gcx, 'tcx>, + predicate: &ty::PolyTraitPredicate<'tcx>) + -> bool { + // Auto trait obligations on `impl Trait`. + if tcx.trait_has_default_impl(predicate.def_id()) { + let substs = predicate.skip_binder().trait_ref.substs; + if substs.types().count() == 1 && substs.regions().next().is_none() { + if let ty::TyAnon(..) = predicate.skip_binder().self_ty().sty { + return true; + } + } + } + + false + } + + /// If possible, return the nested obligations required + /// to fulfill this obligation. + pub fn try_select(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) + -> Option>> { + if let ty::TyAnon(def_id, substs) = self.predicate.skip_binder().self_ty().sty { + let ty = if def_id.is_local() { + tcx.item_types.borrow().get(&def_id).cloned() + } else { + Some(tcx.item_type(def_id)) + }; + // We can resolve the `impl Trait` to its concrete type. + if let Some(concrete_ty) = ty.subst(tcx, substs) { + let predicate = ty::TraitRef { + def_id: self.predicate.def_id(), + substs: tcx.mk_substs_trait(concrete_ty, &[]) + }.to_predicate(); + + let original_obligation = Obligation::new(self.cause.clone(), + self.predicate.clone()); + let cause = original_obligation.derived_cause(BuiltinDerivedObligation); + return Some(vec![Obligation::new(cause, predicate)]); + } + } + + None + } + + /// Return the `PredicateObligation` this was created from. + pub fn to_obligation(&self) -> PredicateObligation<'tcx> { + let predicate = ty::Predicate::Trait(self.predicate.clone()); + Obligation::new(self.cause.clone(), predicate) + } + + /// Return an error as if this obligation had failed. + pub fn to_error(&self) -> FulfillmentError<'tcx> { + FulfillmentError::new(self.to_obligation(), CodeSelectionError(Unimplemented)) + } +} + +impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> { + /// Creates a new fulfillment context. + pub fn new() -> FulfillmentContext<'tcx> { + FulfillmentContext { + predicates: ObligationForest::new(), + region_obligations: NodeMap(), + deferred_obligations: vec![], + } + } + + /// "Normalize" a projection type `::X` by + /// creating a fresh type variable `$0` as well as a projection + /// predicate `::X == $0`. When the + /// inference engine runs, it will attempt to find an impl of + /// `SomeTrait` or a where clause that lets us unify `$0` with + /// something concrete. If this fails, we'll unify `$0` with + /// `projection_ty` again. + pub fn normalize_projection_type(&mut self, + infcx: &InferCtxt<'a, 'gcx, 'tcx>, + projection_ty: ty::ProjectionTy<'tcx>, + cause: ObligationCause<'tcx>) + -> Ty<'tcx> + { + debug!("normalize_projection_type(projection_ty={:?})", + projection_ty); + + assert!(!projection_ty.has_escaping_regions()); + + // FIXME(#20304) -- cache + + let mut selcx = SelectionContext::new(infcx); + let normalized = project::normalize_projection_type(&mut selcx, projection_ty, cause, 0); + + for obligation in normalized.obligations { + self.register_predicate_obligation(infcx, obligation); + } + + debug!("normalize_projection_type: result={:?}", normalized.value); + + normalized.value + } + + pub fn register_bound(&mut self, + infcx: &InferCtxt<'a, 'gcx, 'tcx>, + ty: Ty<'tcx>, + def_id: DefId, + cause: ObligationCause<'tcx>) + { + let trait_ref = ty::TraitRef { + def_id: def_id, + substs: infcx.tcx.mk_substs_trait(ty, &[]), + }; + self.register_predicate_obligation(infcx, Obligation { + cause: cause, + recursion_depth: 0, + predicate: trait_ref.to_predicate() + }); + } + + pub fn register_region_obligation(&mut self, + t_a: Ty<'tcx>, + r_b: &'tcx ty::Region, + cause: ObligationCause<'tcx>) + { + register_region_obligation(t_a, r_b, cause, &mut self.region_obligations); + } + + pub fn register_predicate_obligation(&mut self, + infcx: &InferCtxt<'a, 'gcx, 'tcx>, + obligation: PredicateObligation<'tcx>) + { + // this helps to reduce duplicate errors, as well as making + // debug output much nicer to read and so on. + let obligation = infcx.resolve_type_vars_if_possible(&obligation); + + debug!("register_predicate_obligation(obligation={:?})", obligation); + + infcx.obligations_in_snapshot.set(true); + + if infcx.tcx.fulfilled_predicates.borrow().check_duplicate(&obligation.predicate) { + debug!("register_predicate_obligation: duplicate"); + return + } + + self.predicates.register_obligation(PendingPredicateObligation { + obligation: obligation, + stalled_on: vec![] + }); + } + + pub fn region_obligations(&self, + body_id: ast::NodeId) + -> &[RegionObligation<'tcx>] + { + match self.region_obligations.get(&body_id) { + None => Default::default(), + Some(vec) => vec, + } + } + + pub fn select_all_or_error(&mut self, + infcx: &InferCtxt<'a, 'gcx, 'tcx>) + -> Result<(),Vec>> + { + self.select_where_possible(infcx)?; + + // Fail all of the deferred obligations that haven't + // been otherwise removed from the context. + let deferred_errors = self.deferred_obligations.iter() + .map(|d| d.to_error()); + + let errors: Vec<_> = + self.predicates.to_errors(CodeAmbiguity) + .into_iter() + .map(|e| to_fulfillment_error(e)) + .chain(deferred_errors) + .collect(); + if errors.is_empty() { + Ok(()) + } else { + Err(errors) + } + } + + pub fn select_where_possible(&mut self, + infcx: &InferCtxt<'a, 'gcx, 'tcx>) + -> Result<(),Vec>> + { + let mut selcx = SelectionContext::new(infcx); + self.select(&mut selcx) + } + + pub fn pending_obligations(&self) -> Vec> { + self.predicates.pending_obligations() + } + + pub fn take_deferred_obligations(&mut self) -> Vec> { + mem::replace(&mut self.deferred_obligations, vec![]) + } + + /// Attempts to select obligations using `selcx`. If `only_new_obligations` is true, then it + /// only attempts to select obligations that haven't been seen before. + fn select(&mut self, selcx: &mut SelectionContext<'a, 'gcx, 'tcx>) + -> Result<(),Vec>> { + debug!("select(obligation-forest-size={})", self.predicates.len()); + + let mut errors = Vec::new(); + + loop { + debug!("select: starting another iteration"); + + // Process pending obligations. + let outcome = self.predicates.process_obligations(&mut FulfillProcessor { + selcx: selcx, + region_obligations: &mut self.region_obligations, + deferred_obligations: &mut self.deferred_obligations + }); + debug!("select: outcome={:?}", outcome); + + // these are obligations that were proven to be true. + for pending_obligation in outcome.completed { + let predicate = &pending_obligation.obligation.predicate; + selcx.tcx().fulfilled_predicates.borrow_mut() + .add_if_global(selcx.tcx(), predicate); + } + + errors.extend( + outcome.errors.into_iter() + .map(|e| to_fulfillment_error(e))); + + // If nothing new was added, no need to keep looping. + if outcome.stalled { + break; + } + } + + debug!("select({} predicates remaining, {} errors) done", + self.predicates.len(), errors.len()); + + if errors.is_empty() { + Ok(()) + } else { + Err(errors) + } + } +} + +struct FulfillProcessor<'a, 'b: 'a, 'gcx: 'tcx, 'tcx: 'b> { + selcx: &'a mut SelectionContext<'b, 'gcx, 'tcx>, + region_obligations: &'a mut NodeMap>>, + deferred_obligations: &'a mut Vec> +} + +impl<'a, 'b, 'gcx, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'gcx, 'tcx> { + type Obligation = PendingPredicateObligation<'tcx>; + type Error = FulfillmentErrorCode<'tcx>; + + fn process_obligation(&mut self, + obligation: &mut Self::Obligation) + -> Result>, Self::Error> + { + process_predicate(self.selcx, + obligation, + self.region_obligations, + self.deferred_obligations) + .map(|os| os.map(|os| os.into_iter().map(|o| PendingPredicateObligation { + obligation: o, + stalled_on: vec![] + }).collect())) + } + + fn process_backedge<'c, I>(&mut self, cycle: I, + _marker: PhantomData<&'c PendingPredicateObligation<'tcx>>) + where I: Clone + Iterator>, + { + if coinductive_match(self.selcx, cycle.clone()) { + debug!("process_child_obligations: coinductive match"); + } else { + let cycle : Vec<_> = cycle.map(|c| c.obligation.clone()).collect(); + self.selcx.infcx().report_overflow_error_cycle(&cycle); + } + } +} + +/// Return the set of type variables contained in a trait ref +fn trait_ref_type_vars<'a, 'gcx, 'tcx>(selcx: &mut SelectionContext<'a, 'gcx, 'tcx>, + t: ty::PolyTraitRef<'tcx>) -> Vec> +{ + t.skip_binder() // ok b/c this check doesn't care about regions + .input_types() + .map(|t| selcx.infcx().resolve_type_vars_if_possible(&t)) + .filter(|t| t.has_infer_types()) + .flat_map(|t| t.walk()) + .filter(|t| match t.sty { ty::TyInfer(_) => true, _ => false }) + .collect() +} + +/// Processes a predicate obligation and returns either: +/// - `Ok(Some(v))` if the predicate is true, presuming that `v` are also true +/// - `Ok(None)` if we don't have enough info to be sure +/// - `Err` if the predicate does not hold +fn process_predicate<'a, 'gcx, 'tcx>( + selcx: &mut SelectionContext<'a, 'gcx, 'tcx>, + pending_obligation: &mut PendingPredicateObligation<'tcx>, + region_obligations: &mut NodeMap>>, + deferred_obligations: &mut Vec>) + -> Result>>, + FulfillmentErrorCode<'tcx>> +{ + // if we were stalled on some unresolved variables, first check + // whether any of them have been resolved; if not, don't bother + // doing more work yet + if !pending_obligation.stalled_on.is_empty() { + if pending_obligation.stalled_on.iter().all(|&ty| { + let resolved_ty = selcx.infcx().shallow_resolve(&ty); + resolved_ty == ty // nothing changed here + }) { + debug!("process_predicate: pending obligation {:?} still stalled on {:?}", + selcx.infcx().resolve_type_vars_if_possible(&pending_obligation.obligation), + pending_obligation.stalled_on); + return Ok(None); + } + pending_obligation.stalled_on = vec![]; + } + + let obligation = &mut pending_obligation.obligation; + + if obligation.predicate.has_infer_types() { + obligation.predicate = selcx.infcx().resolve_type_vars_if_possible(&obligation.predicate); + } + + match obligation.predicate { + ty::Predicate::Trait(ref data) => { + if selcx.tcx().fulfilled_predicates.borrow().check_duplicate_trait(data) { + return Ok(Some(vec![])); + } + + let trait_obligation = obligation.with(data.clone()); + match selcx.select(&trait_obligation) { + Ok(Some(vtable)) => { + debug!("selecting trait `{:?}` at depth {} yielded Ok(Some)", + data, obligation.recursion_depth); + Ok(Some(vtable.nested_obligations())) + } + Ok(None) => { + debug!("selecting trait `{:?}` at depth {} yielded Ok(None)", + data, obligation.recursion_depth); + + // This is a bit subtle: for the most part, the + // only reason we can fail to make progress on + // trait selection is because we don't have enough + // information about the types in the trait. One + // exception is that we sometimes haven't decided + // what kind of closure a closure is. *But*, in + // that case, it turns out, the type of the + // closure will also change, because the closure + // also includes references to its upvars as part + // of its type, and those types are resolved at + // the same time. + // + // FIXME(#32286) logic seems false if no upvars + pending_obligation.stalled_on = + trait_ref_type_vars(selcx, data.to_poly_trait_ref()); + + debug!("process_predicate: pending obligation {:?} now stalled on {:?}", + selcx.infcx().resolve_type_vars_if_possible(obligation), + pending_obligation.stalled_on); + + Ok(None) + } + Err(selection_err) => { + info!("selecting trait `{:?}` at depth {} yielded Err", + data, obligation.recursion_depth); + + let defer = DeferredObligation::from_select_error(selcx.tcx(), + &trait_obligation, + &selection_err); + if let Some(deferred_obligation) = defer { + if let Some(nested) = deferred_obligation.try_select(selcx.tcx()) { + Ok(Some(nested)) + } else { + // Pretend that the obligation succeeded, + // but record it for later. + deferred_obligations.push(deferred_obligation); + Ok(Some(vec![])) + } + } else { + Err(CodeSelectionError(selection_err)) + } + } + } + } + + ty::Predicate::Equate(ref binder) => { + match selcx.infcx().equality_predicate(&obligation.cause, binder) { + Ok(InferOk { obligations, value: () }) => { + Ok(Some(obligations)) + }, + Err(_) => Err(CodeSelectionError(Unimplemented)), + } + } + + ty::Predicate::RegionOutlives(ref binder) => { + match selcx.infcx().region_outlives_predicate(&obligation.cause, binder) { + Ok(()) => Ok(Some(Vec::new())), + Err(_) => Err(CodeSelectionError(Unimplemented)), + } + } + + ty::Predicate::TypeOutlives(ref binder) => { + // Check if there are higher-ranked regions. + match selcx.tcx().no_late_bound_regions(binder) { + // If there are, inspect the underlying type further. + None => { + // Convert from `Binder>` to `Binder`. + let binder = binder.map_bound_ref(|pred| pred.0); + + // Check if the type has any bound regions. + match selcx.tcx().no_late_bound_regions(&binder) { + // If so, this obligation is an error (for now). Eventually we should be + // able to support additional cases here, like `for<'a> &'a str: 'a`. + None => { + Err(CodeSelectionError(Unimplemented)) + } + // Otherwise, we have something of the form + // `for<'a> T: 'a where 'a not in T`, which we can treat as `T: 'static`. + Some(t_a) => { + let r_static = selcx.tcx().mk_region(ty::ReStatic); + register_region_obligation(t_a, r_static, + obligation.cause.clone(), + region_obligations); + Ok(Some(vec![])) + } + } + } + // If there aren't, register the obligation. + Some(ty::OutlivesPredicate(t_a, r_b)) => { + register_region_obligation(t_a, r_b, + obligation.cause.clone(), + region_obligations); + Ok(Some(vec![])) + } + } + } + + ty::Predicate::Projection(ref data) => { + let project_obligation = obligation.with(data.clone()); + match project::poly_project_and_unify_type(selcx, &project_obligation) { + Ok(None) => { + pending_obligation.stalled_on = + trait_ref_type_vars(selcx, data.to_poly_trait_ref()); + Ok(None) + } + Ok(v) => Ok(v), + Err(e) => Err(CodeProjectionError(e)) + } + } + + ty::Predicate::ObjectSafe(trait_def_id) => { + if !selcx.tcx().is_object_safe(trait_def_id) { + Err(CodeSelectionError(Unimplemented)) + } else { + Ok(Some(Vec::new())) + } + } + + ty::Predicate::ClosureKind(closure_def_id, kind) => { + match selcx.infcx().closure_kind(closure_def_id) { + Some(closure_kind) => { + if closure_kind.extends(kind) { + Ok(Some(vec![])) + } else { + Err(CodeSelectionError(Unimplemented)) + } + } + None => { + Ok(None) + } + } + } + + ty::Predicate::WellFormed(ty) => { + match ty::wf::obligations(selcx.infcx(), obligation.cause.body_id, + ty, obligation.cause.span) { + None => { + pending_obligation.stalled_on = vec![ty]; + Ok(None) + } + s => Ok(s) + } + } + } +} + +/// For defaulted traits, we use a co-inductive strategy to solve, so +/// that recursion is ok. This routine returns true if the top of the +/// stack (`cycle[0]`): +/// - is a defaulted trait, and +/// - it also appears in the backtrace at some position `X`; and, +/// - all the predicates at positions `X..` between `X` an the top are +/// also defaulted traits. +fn coinductive_match<'a,'c,'gcx,'tcx,I>(selcx: &mut SelectionContext<'a,'gcx,'tcx>, + cycle: I) -> bool + where I: Iterator>, + 'tcx: 'c +{ + let mut cycle = cycle; + cycle + .all(|bt_obligation| { + let result = coinductive_obligation(selcx, &bt_obligation.obligation); + debug!("coinductive_match: bt_obligation={:?} coinductive={}", + bt_obligation, result); + result + }) +} + +fn coinductive_obligation<'a,'gcx,'tcx>(selcx: &SelectionContext<'a,'gcx,'tcx>, + obligation: &PredicateObligation<'tcx>) + -> bool { + match obligation.predicate { + ty::Predicate::Trait(ref data) => { + selcx.tcx().trait_has_default_impl(data.def_id()) + } + _ => { + false + } + } +} + +fn register_region_obligation<'tcx>(t_a: Ty<'tcx>, + r_b: &'tcx ty::Region, + cause: ObligationCause<'tcx>, + region_obligations: &mut NodeMap>>) +{ + let region_obligation = RegionObligation { sup_type: t_a, + sub_region: r_b, + cause: cause }; + + debug!("register_region_obligation({:?}, cause={:?})", + region_obligation, region_obligation.cause); + + region_obligations.entry(region_obligation.cause.body_id) + .or_insert(vec![]) + .push(region_obligation); + +} + +impl<'a, 'gcx, 'tcx> GlobalFulfilledPredicates<'gcx> { + pub fn new(dep_graph: DepGraph) -> GlobalFulfilledPredicates<'gcx> { + GlobalFulfilledPredicates { + set: FxHashSet(), + dep_graph: dep_graph, + } + } + + pub fn check_duplicate(&self, key: &ty::Predicate<'tcx>) -> bool { + if let ty::Predicate::Trait(ref data) = *key { + self.check_duplicate_trait(data) + } else { + false + } + } + + pub fn check_duplicate_trait(&self, data: &ty::PolyTraitPredicate<'tcx>) -> bool { + // For the global predicate registry, when we find a match, it + // may have been computed by some other task, so we want to + // add a read from the node corresponding to the predicate + // processing to make sure we get the transitive dependencies. + if self.set.contains(data) { + debug_assert!(data.is_global()); + self.dep_graph.read(data.dep_node()); + debug!("check_duplicate: global predicate `{:?}` already proved elsewhere", data); + + true + } else { + false + } + } + + fn add_if_global(&mut self, tcx: TyCtxt<'a, 'gcx, 'tcx>, key: &ty::Predicate<'tcx>) { + if let ty::Predicate::Trait(ref data) = *key { + // We only add things to the global predicate registry + // after the current task has proved them, and hence + // already has the required read edges, so we don't need + // to add any more edges here. + if data.is_global() { + // Don't cache predicates which were fulfilled + // by deferring them for later fulfillment. + if DeferredObligation::must_defer(tcx, data) { + return; + } + + if let Some(data) = tcx.lift_to_global(data) { + if self.set.insert(data.clone()) { + debug!("add_if_global: global predicate `{:?}` added", data); + } + } + } + } + } +} + +fn to_fulfillment_error<'tcx>( + error: Error, FulfillmentErrorCode<'tcx>>) + -> FulfillmentError<'tcx> +{ + let obligation = error.backtrace.into_iter().next().unwrap().obligation; + FulfillmentError::new(obligation, error.error) +} diff --git a/src/librustc/traits/mod.rs b/src/librustc/traits/mod.rs new file mode 100644 index 0000000000000..5c5bf130c3ba4 --- /dev/null +++ b/src/librustc/traits/mod.rs @@ -0,0 +1,765 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Trait Resolution. See README.md for an overview of how this works. + +pub use self::SelectionError::*; +pub use self::FulfillmentErrorCode::*; +pub use self::Vtable::*; +pub use self::ObligationCauseCode::*; + +use hir; +use hir::def_id::DefId; +use middle::free_region::FreeRegionMap; +use ty::subst::Substs; +use ty::{self, Ty, TyCtxt, TypeFoldable, ToPredicate}; +use infer::InferCtxt; + +use std::rc::Rc; +use syntax::ast; +use syntax_pos::{Span, DUMMY_SP}; + +pub use self::error_reporting::TraitErrorKey; +pub use self::coherence::orphan_check; +pub use self::coherence::overlapping_impls; +pub use self::coherence::OrphanCheckErr; +pub use self::fulfill::{FulfillmentContext, GlobalFulfilledPredicates, RegionObligation}; +pub use self::fulfill::DeferredObligation; +pub use self::project::MismatchedProjectionTypes; +pub use self::project::{normalize, normalize_projection_type, Normalized}; +pub use self::project::{ProjectionCache, ProjectionCacheSnapshot, Reveal}; +pub use self::object_safety::ObjectSafetyViolation; +pub use self::object_safety::MethodViolationCode; +pub use self::select::{EvaluationCache, SelectionContext, SelectionCache}; +pub use self::select::{MethodMatchResult, MethodMatched, MethodAmbiguous, MethodDidNotMatch}; +pub use self::select::{MethodMatchedData}; // intentionally don't export variants +pub use self::specialize::{OverlapError, specialization_graph, specializes, translate_substs}; +pub use self::specialize::{SpecializesCache, find_method}; +pub use self::util::elaborate_predicates; +pub use self::util::supertraits; +pub use self::util::Supertraits; +pub use self::util::supertrait_def_ids; +pub use self::util::SupertraitDefIds; +pub use self::util::transitive_bounds; + +mod coherence; +mod error_reporting; +mod fulfill; +mod project; +mod object_safety; +mod select; +mod specialize; +mod structural_impls; +mod util; + +/// An `Obligation` represents some trait reference (e.g. `int:Eq`) for +/// which the vtable must be found. The process of finding a vtable is +/// called "resolving" the `Obligation`. This process consists of +/// either identifying an `impl` (e.g., `impl Eq for int`) that +/// provides the required vtable, or else finding a bound that is in +/// scope. The eventual result is usually a `Selection` (defined below). +#[derive(Clone, PartialEq, Eq)] +pub struct Obligation<'tcx, T> { + pub cause: ObligationCause<'tcx>, + pub recursion_depth: usize, + pub predicate: T, +} + +pub type PredicateObligation<'tcx> = Obligation<'tcx, ty::Predicate<'tcx>>; +pub type TraitObligation<'tcx> = Obligation<'tcx, ty::PolyTraitPredicate<'tcx>>; + +/// Why did we incur this obligation? Used for error reporting. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ObligationCause<'tcx> { + pub span: Span, + + // The id of the fn body that triggered this obligation. This is + // used for region obligations to determine the precise + // environment in which the region obligation should be evaluated + // (in particular, closures can add new assumptions). See the + // field `region_obligations` of the `FulfillmentContext` for more + // information. + pub body_id: ast::NodeId, + + pub code: ObligationCauseCode<'tcx> +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum ObligationCauseCode<'tcx> { + /// Not well classified or should be obvious from span. + MiscObligation, + + /// A slice or array is WF only if `T: Sized` + SliceOrArrayElem, + + /// A tuple is WF only if its middle elements are Sized + TupleElem, + + /// This is the trait reference from the given projection + ProjectionWf(ty::ProjectionTy<'tcx>), + + /// In an impl of trait X for type Y, type Y must + /// also implement all supertraits of X. + ItemObligation(DefId), + + /// A type like `&'a T` is WF only if `T: 'a`. + ReferenceOutlivesReferent(Ty<'tcx>), + + /// A type like `Box + 'b>` is WF only if `'b: 'a`. + ObjectTypeBound(Ty<'tcx>, &'tcx ty::Region), + + /// Obligation incurred due to an object cast. + ObjectCastObligation(/* Object type */ Ty<'tcx>), + + /// Various cases where expressions must be sized/copy/etc: + AssignmentLhsSized, // L = X implies that L is Sized + StructInitializerSized, // S { ... } must be Sized + VariableType(ast::NodeId), // Type of each variable must be Sized + ReturnType, // Return type must be Sized + RepeatVec, // [T,..n] --> T must be Copy + + // Types of fields (other than the last) in a struct must be sized. + FieldSized, + + // Constant expressions must be sized. + ConstSized, + + // static items must have `Sync` type + SharedStatic, + + BuiltinDerivedObligation(DerivedObligationCause<'tcx>), + + ImplDerivedObligation(DerivedObligationCause<'tcx>), + + // error derived when matching traits/impls; see ObligationCause for more details + CompareImplMethodObligation { + item_name: ast::Name, + impl_item_def_id: DefId, + trait_item_def_id: DefId, + lint_id: Option, + }, + + // Checking that this expression can be assigned where it needs to be + // FIXME(eddyb) #11161 is the original Expr required? + ExprAssignable, + + // Computing common supertype in the arms of a match expression + MatchExpressionArm { arm_span: Span, + source: hir::MatchSource }, + + // Computing common supertype in an if expression + IfExpression, + + // Computing common supertype of an if expression with no else counter-part + IfExpressionWithNoElse, + + // `where a == b` + EquatePredicate, + + // `main` has wrong type + MainFunctionType, + + // `start` has wrong type + StartFunctionType, + + // intrinsic has wrong type + IntrinsicType, + + // method receiver + MethodReceiver, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct DerivedObligationCause<'tcx> { + /// The trait reference of the parent obligation that led to the + /// current obligation. Note that only trait obligations lead to + /// derived obligations, so we just store the trait reference here + /// directly. + parent_trait_ref: ty::PolyTraitRef<'tcx>, + + /// The parent trait had this cause + parent_code: Rc> +} + +pub type Obligations<'tcx, O> = Vec>; +pub type PredicateObligations<'tcx> = Vec>; +pub type TraitObligations<'tcx> = Vec>; + +pub type Selection<'tcx> = Vtable<'tcx, PredicateObligation<'tcx>>; + +#[derive(Clone,Debug)] +pub enum SelectionError<'tcx> { + Unimplemented, + OutputTypeParameterMismatch(ty::PolyTraitRef<'tcx>, + ty::PolyTraitRef<'tcx>, + ty::error::TypeError<'tcx>), + TraitNotObjectSafe(DefId), +} + +pub struct FulfillmentError<'tcx> { + pub obligation: PredicateObligation<'tcx>, + pub code: FulfillmentErrorCode<'tcx> +} + +#[derive(Clone)] +pub enum FulfillmentErrorCode<'tcx> { + CodeSelectionError(SelectionError<'tcx>), + CodeProjectionError(MismatchedProjectionTypes<'tcx>), + CodeAmbiguity, +} + +/// When performing resolution, it is typically the case that there +/// can be one of three outcomes: +/// +/// - `Ok(Some(r))`: success occurred with result `r` +/// - `Ok(None)`: could not definitely determine anything, usually due +/// to inconclusive type inference. +/// - `Err(e)`: error `e` occurred +pub type SelectionResult<'tcx, T> = Result, SelectionError<'tcx>>; + +/// Given the successful resolution of an obligation, the `Vtable` +/// indicates where the vtable comes from. Note that while we call this +/// a "vtable", it does not necessarily indicate dynamic dispatch at +/// runtime. `Vtable` instances just tell the compiler where to find +/// methods, but in generic code those methods are typically statically +/// dispatched -- only when an object is constructed is a `Vtable` +/// instance reified into an actual vtable. +/// +/// For example, the vtable may be tied to a specific impl (case A), +/// or it may be relative to some bound that is in scope (case B). +/// +/// +/// ``` +/// impl Clone for Option { ... } // Impl_1 +/// impl Clone for Box { ... } // Impl_2 +/// impl Clone for int { ... } // Impl_3 +/// +/// fn foo(concrete: Option>, +/// param: T, +/// mixed: Option) { +/// +/// // Case A: Vtable points at a specific impl. Only possible when +/// // type is concretely known. If the impl itself has bounded +/// // type parameters, Vtable will carry resolutions for those as well: +/// concrete.clone(); // Vtable(Impl_1, [Vtable(Impl_2, [Vtable(Impl_3)])]) +/// +/// // Case B: Vtable must be provided by caller. This applies when +/// // type is a type parameter. +/// param.clone(); // VtableParam +/// +/// // Case C: A mix of cases A and B. +/// mixed.clone(); // Vtable(Impl_1, [VtableParam]) +/// } +/// ``` +/// +/// ### The type parameter `N` +/// +/// See explanation on `VtableImplData`. +#[derive(Clone)] +pub enum Vtable<'tcx, N> { + /// Vtable identifying a particular impl. + VtableImpl(VtableImplData<'tcx, N>), + + /// Vtable for default trait implementations + /// This carries the information and nested obligations with regards + /// to a default implementation for a trait `Trait`. The nested obligations + /// ensure the trait implementation holds for all the constituent types. + VtableDefaultImpl(VtableDefaultImplData), + + /// Successful resolution to an obligation provided by the caller + /// for some type parameter. The `Vec` represents the + /// obligations incurred from normalizing the where-clause (if + /// any). + VtableParam(Vec), + + /// Virtual calls through an object + VtableObject(VtableObjectData<'tcx, N>), + + /// Successful resolution for a builtin trait. + VtableBuiltin(VtableBuiltinData), + + /// Vtable automatically generated for a closure. The def ID is the ID + /// of the closure expression. This is a `VtableImpl` in spirit, but the + /// impl is generated by the compiler and does not appear in the source. + VtableClosure(VtableClosureData<'tcx, N>), + + /// Same as above, but for a fn pointer type with the given signature. + VtableFnPointer(VtableFnPointerData<'tcx, N>), +} + +/// Identifies a particular impl in the source, along with a set of +/// substitutions from the impl's type/lifetime parameters. The +/// `nested` vector corresponds to the nested obligations attached to +/// the impl's type parameters. +/// +/// The type parameter `N` indicates the type used for "nested +/// obligations" that are required by the impl. During type check, this +/// is `Obligation`, as one might expect. During trans, however, this +/// is `()`, because trans only requires a shallow resolution of an +/// impl, and nested obligations are satisfied later. +#[derive(Clone, PartialEq, Eq)] +pub struct VtableImplData<'tcx, N> { + pub impl_def_id: DefId, + pub substs: &'tcx Substs<'tcx>, + pub nested: Vec +} + +#[derive(Clone, PartialEq, Eq)] +pub struct VtableClosureData<'tcx, N> { + pub closure_def_id: DefId, + pub substs: ty::ClosureSubsts<'tcx>, + /// Nested obligations. This can be non-empty if the closure + /// signature contains associated types. + pub nested: Vec +} + +#[derive(Clone)] +pub struct VtableDefaultImplData { + pub trait_def_id: DefId, + pub nested: Vec +} + +#[derive(Clone)] +pub struct VtableBuiltinData { + pub nested: Vec +} + +/// A vtable for some object-safe trait `Foo` automatically derived +/// for the object type `Foo`. +#[derive(PartialEq,Eq,Clone)] +pub struct VtableObjectData<'tcx, N> { + /// `Foo` upcast to the obligation trait. This will be some supertrait of `Foo`. + pub upcast_trait_ref: ty::PolyTraitRef<'tcx>, + + /// The vtable is formed by concatenating together the method lists of + /// the base object trait and all supertraits; this is the start of + /// `upcast_trait_ref`'s methods in that vtable. + pub vtable_base: usize, + + pub nested: Vec, +} + +#[derive(Clone, PartialEq, Eq)] +pub struct VtableFnPointerData<'tcx, N> { + pub fn_ty: ty::Ty<'tcx>, + pub nested: Vec +} + +/// Creates predicate obligations from the generic bounds. +pub fn predicates_for_generics<'tcx>(cause: ObligationCause<'tcx>, + generic_bounds: &ty::InstantiatedPredicates<'tcx>) + -> PredicateObligations<'tcx> +{ + util::predicates_for_generics(cause, 0, generic_bounds) +} + +/// Determines whether the type `ty` is known to meet `bound` and +/// returns true if so. Returns false if `ty` either does not meet +/// `bound` or is not known to meet bound (note that this is +/// conservative towards *no impl*, which is the opposite of the +/// `evaluate` methods). +pub fn type_known_to_meet_bound<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + ty: Ty<'tcx>, + def_id: DefId, + span: Span) +-> bool +{ + debug!("type_known_to_meet_bound(ty={:?}, bound={:?})", + ty, + infcx.tcx.item_path_str(def_id)); + + let trait_ref = ty::TraitRef { + def_id: def_id, + substs: infcx.tcx.mk_substs_trait(ty, &[]), + }; + let obligation = Obligation { + cause: ObligationCause::misc(span, ast::DUMMY_NODE_ID), + recursion_depth: 0, + predicate: trait_ref.to_predicate(), + }; + + let result = SelectionContext::new(infcx) + .evaluate_obligation_conservatively(&obligation); + debug!("type_known_to_meet_ty={:?} bound={} => {:?}", + ty, infcx.tcx.item_path_str(def_id), result); + + if result && (ty.has_infer_types() || ty.has_closure_types()) { + // Because of inference "guessing", selection can sometimes claim + // to succeed while the success requires a guess. To ensure + // this function's result remains infallible, we must confirm + // that guess. While imperfect, I believe this is sound. + + let mut fulfill_cx = FulfillmentContext::new(); + + // We can use a dummy node-id here because we won't pay any mind + // to region obligations that arise (there shouldn't really be any + // anyhow). + let cause = ObligationCause::misc(span, ast::DUMMY_NODE_ID); + + fulfill_cx.register_bound(infcx, ty, def_id, cause); + + // Note: we only assume something is `Copy` if we can + // *definitively* show that it implements `Copy`. Otherwise, + // assume it is move; linear is always ok. + match fulfill_cx.select_all_or_error(infcx) { + Ok(()) => { + debug!("type_known_to_meet_bound: ty={:?} bound={} success", + ty, + infcx.tcx.item_path_str(def_id)); + true + } + Err(e) => { + debug!("type_known_to_meet_bound: ty={:?} bound={} errors={:?}", + ty, + infcx.tcx.item_path_str(def_id), + e); + false + } + } + } else { + result + } +} + +// FIXME: this is gonna need to be removed ... +/// Normalizes the parameter environment, reporting errors if they occur. +pub fn normalize_param_env_or_error<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + unnormalized_env: ty::ParameterEnvironment<'tcx>, + cause: ObligationCause<'tcx>) + -> ty::ParameterEnvironment<'tcx> +{ + // I'm not wild about reporting errors here; I'd prefer to + // have the errors get reported at a defined place (e.g., + // during typeck). Instead I have all parameter + // environments, in effect, going through this function + // and hence potentially reporting errors. This ensurse of + // course that we never forget to normalize (the + // alternative seemed like it would involve a lot of + // manual invocations of this fn -- and then we'd have to + // deal with the errors at each of those sites). + // + // In any case, in practice, typeck constructs all the + // parameter environments once for every fn as it goes, + // and errors will get reported then; so after typeck we + // can be sure that no errors should occur. + + let span = cause.span; + let body_id = cause.body_id; + + debug!("normalize_param_env_or_error(unnormalized_env={:?})", + unnormalized_env); + + let predicates: Vec<_> = + util::elaborate_predicates(tcx, unnormalized_env.caller_bounds.clone()) + .filter(|p| !p.is_global()) // (*) + .collect(); + + // (*) Any predicate like `i32: Trait` or whatever doesn't + // need to be in the *environment* to be proven, so screen those + // out. This is important for the soundness of inter-fn + // caching. Note though that we should probably check that these + // predicates hold at the point where the environment is + // constructed, but I am not currently doing so out of laziness. + // -nmatsakis + + debug!("normalize_param_env_or_error: elaborated-predicates={:?}", + predicates); + + let elaborated_env = unnormalized_env.with_caller_bounds(predicates); + + tcx.infer_ctxt(None, Some(elaborated_env), Reveal::NotSpecializable).enter(|infcx| { + let predicates = match fully_normalize(&infcx, cause, + &infcx.parameter_environment.caller_bounds) { + Ok(predicates) => predicates, + Err(errors) => { + infcx.report_fulfillment_errors(&errors); + // An unnormalized env is better than nothing. + return infcx.parameter_environment; + } + }; + + debug!("normalize_param_env_or_error: normalized predicates={:?}", + predicates); + + let free_regions = FreeRegionMap::new(); + infcx.resolve_regions_and_report_errors(&free_regions, body_id); + let predicates = match infcx.fully_resolve(&predicates) { + Ok(predicates) => predicates, + Err(fixup_err) => { + // If we encounter a fixup error, it means that some type + // variable wound up unconstrained. I actually don't know + // if this can happen, and I certainly don't expect it to + // happen often, but if it did happen it probably + // represents a legitimate failure due to some kind of + // unconstrained variable, and it seems better not to ICE, + // all things considered. + tcx.sess.span_err(span, &fixup_err.to_string()); + // An unnormalized env is better than nothing. + return infcx.parameter_environment; + } + }; + + let predicates = match tcx.lift_to_global(&predicates) { + Some(predicates) => predicates, + None => return infcx.parameter_environment + }; + + debug!("normalize_param_env_or_error: resolved predicates={:?}", + predicates); + + infcx.parameter_environment.with_caller_bounds(predicates) + }) +} + +pub fn fully_normalize<'a, 'gcx, 'tcx, T>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + cause: ObligationCause<'tcx>, + value: &T) + -> Result>> + where T : TypeFoldable<'tcx> +{ + debug!("fully_normalize(value={:?})", value); + + let mut selcx = &mut SelectionContext::new(infcx); + // FIXME (@jroesch) ISSUE 26721 + // I'm not sure if this is a bug or not, needs further investigation. + // It appears that by reusing the fulfillment_cx here we incur more + // obligations and later trip an asssertion on regionck.rs line 337. + // + // The two possibilities I see is: + // - normalization is not actually fully happening and we + // have a bug else where + // - we are adding a duplicate bound into the list causing + // its size to change. + // + // I think we should probably land this refactor and then come + // back to this is a follow-up patch. + let mut fulfill_cx = FulfillmentContext::new(); + + let Normalized { value: normalized_value, obligations } = + project::normalize(selcx, cause, value); + debug!("fully_normalize: normalized_value={:?} obligations={:?}", + normalized_value, + obligations); + for obligation in obligations { + fulfill_cx.register_predicate_obligation(selcx.infcx(), obligation); + } + + debug!("fully_normalize: select_all_or_error start"); + match fulfill_cx.select_all_or_error(infcx) { + Ok(()) => { } + Err(e) => { + debug!("fully_normalize: error={:?}", e); + return Err(e); + } + } + debug!("fully_normalize: select_all_or_error complete"); + let resolved_value = infcx.resolve_type_vars_if_possible(&normalized_value); + debug!("fully_normalize: resolved_value={:?}", resolved_value); + Ok(resolved_value) +} + +/// Normalizes the predicates and checks whether they hold. If this +/// returns false, then either normalize encountered an error or one +/// of the predicates did not hold. Used when creating vtables to +/// check for unsatisfiable methods. +pub fn normalize_and_test_predicates<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + predicates: Vec>) + -> bool +{ + debug!("normalize_and_test_predicates(predicates={:?})", + predicates); + + tcx.infer_ctxt(None, None, Reveal::All).enter(|infcx| { + let mut selcx = SelectionContext::new(&infcx); + let mut fulfill_cx = FulfillmentContext::new(); + let cause = ObligationCause::dummy(); + let Normalized { value: predicates, obligations } = + normalize(&mut selcx, cause.clone(), &predicates); + for obligation in obligations { + fulfill_cx.register_predicate_obligation(&infcx, obligation); + } + for predicate in predicates { + let obligation = Obligation::new(cause.clone(), predicate); + fulfill_cx.register_predicate_obligation(&infcx, obligation); + } + + fulfill_cx.select_all_or_error(&infcx).is_ok() + }) +} + +/// Given a trait `trait_ref`, iterates the vtable entries +/// that come from `trait_ref`, including its supertraits. +#[inline] // FIXME(#35870) Avoid closures being unexported due to impl Trait. +pub fn get_vtable_methods<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + trait_ref: ty::PolyTraitRef<'tcx>) + -> impl Iterator)>> + 'a +{ + debug!("get_vtable_methods({:?})", trait_ref); + + supertraits(tcx, trait_ref).flat_map(move |trait_ref| { + tcx.populate_implementations_for_trait_if_necessary(trait_ref.def_id()); + + let trait_methods = tcx.associated_items(trait_ref.def_id()) + .filter(|item| item.kind == ty::AssociatedKind::Method); + + // Now list each method's DefId and Substs (for within its trait). + // If the method can never be called from this object, produce None. + trait_methods.map(move |trait_method| { + debug!("get_vtable_methods: trait_method={:?}", trait_method); + let def_id = trait_method.def_id; + + // Some methods cannot be called on an object; skip those. + if !tcx.is_vtable_safe_method(trait_ref.def_id(), &trait_method) { + debug!("get_vtable_methods: not vtable safe"); + return None; + } + + // the method may have some early-bound lifetimes, add + // regions for those + let substs = Substs::for_item(tcx, def_id, + |_, _| tcx.mk_region(ty::ReErased), + |def, _| trait_ref.substs().type_for_def(def)); + + // It's possible that the method relies on where clauses that + // do not hold for this particular set of type parameters. + // Note that this method could then never be called, so we + // do not want to try and trans it, in that case (see #23435). + let predicates = tcx.item_predicates(def_id).instantiate_own(tcx, substs); + if !normalize_and_test_predicates(tcx, predicates.predicates) { + debug!("get_vtable_methods: predicates do not hold"); + return None; + } + + Some((def_id, substs)) + }) + }) +} + +impl<'tcx,O> Obligation<'tcx,O> { + pub fn new(cause: ObligationCause<'tcx>, + trait_ref: O) + -> Obligation<'tcx, O> + { + Obligation { cause: cause, + recursion_depth: 0, + predicate: trait_ref } + } + + fn with_depth(cause: ObligationCause<'tcx>, + recursion_depth: usize, + trait_ref: O) + -> Obligation<'tcx, O> + { + Obligation { cause: cause, + recursion_depth: recursion_depth, + predicate: trait_ref } + } + + pub fn misc(span: Span, body_id: ast::NodeId, trait_ref: O) -> Obligation<'tcx, O> { + Obligation::new(ObligationCause::misc(span, body_id), trait_ref) + } + + pub fn with

where P: FnMut(&hir::Expr_) -> bool { - p: P, - flag: bool, -} - -impl<'v, P> Visitor<'v> for LoopQueryVisitor

where P: FnMut(&hir::Expr_) -> bool { - fn visit_expr(&mut self, e: &hir::Expr) { - self.flag |= (self.p)(&e.node); - match e.node { - // Skip inner loops, since a break in the inner loop isn't a - // break inside the outer loop - hir::ExprLoop(..) | hir::ExprWhile(..) => {} - _ => intravisit::walk_expr(self, e) - } - } -} - -// Takes a predicate p, returns true iff p is true for any subexpressions -// of b -- skipping any inner loops (loop, while, loop_body) -pub fn loop_query

(b: &hir::Block, p: P) -> bool where P: FnMut(&hir::Expr_) -> bool { - let mut v = LoopQueryVisitor { - p: p, - flag: false, - }; - intravisit::walk_block(&mut v, b); - return v.flag; -} - -struct BlockQueryVisitor

where P: FnMut(&hir::Expr) -> bool { - p: P, - flag: bool, -} - -impl<'v, P> Visitor<'v> for BlockQueryVisitor

where P: FnMut(&hir::Expr) -> bool { - fn visit_expr(&mut self, e: &hir::Expr) { - self.flag |= (self.p)(e); - intravisit::walk_expr(self, e) - } -} - -// Takes a predicate p, returns true iff p is true for any subexpressions -// of b -- skipping any inner loops (loop, while, loop_body) -pub fn block_query

(b: &hir::Block, p: P) -> bool where P: FnMut(&hir::Expr) -> bool { - let mut v = BlockQueryVisitor { - p: p, - flag: false, - }; - intravisit::walk_block(&mut v, &*b); - return v.flag; -} - pub trait MemoizationMap { type Key: Clone; type Value: Clone; @@ -217,7 +198,7 @@ pub trait MemoizationMap { } impl MemoizationMap for RefCell> - where K: Hash+Eq+Clone, V: Clone, S: HashState + where K: Hash+Eq+Clone, V: Clone, S: BuildHasher { type Key = K; type Value = V; @@ -248,3 +229,17 @@ pub fn path2cstr(p: &Path) -> CString { pub fn path2cstr(p: &Path) -> CString { CString::new(p.to_str().unwrap()).unwrap() } + + +#[test] +fn test_to_readable_str() { + assert_eq!("0", to_readable_str(0)); + assert_eq!("1", to_readable_str(1)); + assert_eq!("99", to_readable_str(99)); + assert_eq!("999", to_readable_str(999)); + assert_eq!("1_000", to_readable_str(1_000)); + assert_eq!("1_001", to_readable_str(1_001)); + assert_eq!("999_999", to_readable_str(999_999)); + assert_eq!("1_000_000", to_readable_str(1_000_000)); + assert_eq!("1_234_567", to_readable_str(1_234_567)); +} diff --git a/src/librustc/util/fs.rs b/src/librustc/util/fs.rs index 4936e049ef2ee..c290d8f893e9e 100644 --- a/src/librustc/util/fs.rs +++ b/src/librustc/util/fs.rs @@ -10,6 +10,8 @@ use std::path::{self, Path, PathBuf}; use std::ffi::OsString; +use std::fs; +use std::io; // Unfortunately, on windows, it looks like msvcrt.dll is silently translating // verbatim paths under the hood to non-verbatim paths! This manifests itself as @@ -53,3 +55,50 @@ pub fn fix_windows_verbatim_for_gcc(p: &Path) -> PathBuf { _ => p.to_path_buf(), } } + +pub enum LinkOrCopy { + Link, + Copy +} + +/// Copy `p` into `q`, preferring to use hard-linking if possible. If +/// `q` already exists, it is removed first. +/// The result indicates which of the two operations has been performed. +pub fn link_or_copy, Q: AsRef>(p: P, q: Q) -> io::Result { + let p = p.as_ref(); + let q = q.as_ref(); + if q.exists() { + fs::remove_file(&q)?; + } + + match fs::hard_link(p, q) { + Ok(()) => Ok(LinkOrCopy::Link), + Err(_) => { + match fs::copy(p, q) { + Ok(_) => Ok(LinkOrCopy::Copy), + Err(e) => Err(e) + } + } + } +} + +// Like std::fs::create_dir_all, except handles concurrent calls among multiple +// threads or processes. +pub fn create_dir_racy(path: &Path) -> io::Result<()> { + match fs::create_dir(path) { + Ok(()) => return Ok(()), + Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => return Ok(()), + Err(ref e) if e.kind() == io::ErrorKind::NotFound => {} + Err(e) => return Err(e), + } + match path.parent() { + Some(p) => try!(create_dir_racy(p)), + None => return Err(io::Error::new(io::ErrorKind::Other, + "failed to create whole tree")), + } + match fs::create_dir(path) { + Ok(()) => Ok(()), + Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok(()), + Err(e) => Err(e), + } +} diff --git a/src/librustc/util/nodemap.rs b/src/librustc/util/nodemap.rs index 4a45797602d81..b03011fcb216d 100644 --- a/src/librustc/util/nodemap.rs +++ b/src/librustc/util/nodemap.rs @@ -12,20 +12,20 @@ #![allow(non_snake_case)] -use middle::def_id::DefId; +use hir::def_id::DefId; use syntax::ast; -pub use rustc_data_structures::fnv::FnvHashMap; -pub use rustc_data_structures::fnv::FnvHashSet; +pub use rustc_data_structures::fx::FxHashMap; +pub use rustc_data_structures::fx::FxHashSet; -pub type NodeMap = FnvHashMap; -pub type DefIdMap = FnvHashMap; +pub type NodeMap = FxHashMap; +pub type DefIdMap = FxHashMap; -pub type NodeSet = FnvHashSet; -pub type DefIdSet = FnvHashSet; +pub type NodeSet = FxHashSet; +pub type DefIdSet = FxHashSet; -pub fn NodeMap() -> NodeMap { FnvHashMap() } -pub fn DefIdMap() -> DefIdMap { FnvHashMap() } -pub fn NodeSet() -> NodeSet { FnvHashSet() } -pub fn DefIdSet() -> DefIdSet { FnvHashSet() } +pub fn NodeMap() -> NodeMap { FxHashMap() } +pub fn DefIdMap() -> DefIdMap { FxHashMap() } +pub fn NodeSet() -> NodeSet { FxHashSet() } +pub fn DefIdSet() -> DefIdSet { FxHashSet() } diff --git a/src/librustc/util/ppaux.rs b/src/librustc/util/ppaux.rs index 77e39bba54afc..b4c87e0ce426e 100644 --- a/src/librustc/util/ppaux.rs +++ b/src/librustc/util/ppaux.rs @@ -8,22 +8,25 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - -use middle::def_id::DefId; -use middle::subst::{self, Subst}; -use middle::ty::{BrAnon, BrEnv, BrFresh, BrNamed}; -use middle::ty::{TyBool, TyChar, TyStruct, TyEnum}; -use middle::ty::{TyError, TyStr, TyArray, TySlice, TyFloat, TyBareFn}; -use middle::ty::{TyParam, TyRawPtr, TyRef, TyTuple}; -use middle::ty::TyClosure; -use middle::ty::{TyBox, TyTrait, TyInt, TyUint, TyInfer}; -use middle::ty::{self, Ty, TypeFoldable}; - +use hir::def_id::DefId; +use hir::map::definitions::DefPathData; +use ty::subst::{self, Subst}; +use ty::{BrAnon, BrEnv, BrFresh, BrNamed}; +use ty::{TyBool, TyChar, TyAdt}; +use ty::{TyError, TyStr, TyArray, TySlice, TyFloat, TyFnDef, TyFnPtr}; +use ty::{TyParam, TyRawPtr, TyRef, TyNever, TyTuple}; +use ty::{TyClosure, TyProjection, TyAnon}; +use ty::{TyBox, TyDynamic, TyInt, TyUint, TyInfer}; +use ty::{self, Ty, TyCtxt, TypeFoldable}; + +use std::cell::Cell; use std::fmt; -use syntax::{abi}; -use syntax::parse::token; +use std::usize; + +use syntax::abi::Abi; use syntax::ast::CRATE_NODE_ID; -use rustc_front::hir; +use syntax::symbol::Symbol; +use hir; pub fn verbose() -> bool { ty::tls::with(|tcx| tcx.sess.verbose()) @@ -32,166 +35,221 @@ pub fn verbose() -> bool { fn fn_sig(f: &mut fmt::Formatter, inputs: &[Ty], variadic: bool, - output: ty::FnOutput) + output: Ty) -> fmt::Result { - try!(write!(f, "(")); + write!(f, "(")?; let mut inputs = inputs.iter(); if let Some(&ty) = inputs.next() { - try!(write!(f, "{}", ty)); + write!(f, "{}", ty)?; for &ty in inputs { - try!(write!(f, ", {}", ty)); + write!(f, ", {}", ty)?; } if variadic { - try!(write!(f, ", ...")); + write!(f, ", ...")?; } } - try!(write!(f, ")")); - - match output { - ty::FnConverging(ty) => { - if !ty.is_nil() { - try!(write!(f, " -> {}", ty)); - } - Ok(()) - } - ty::FnDiverging => { - write!(f, " -> !") - } + write!(f, ")")?; + if !output.is_nil() { + write!(f, " -> {}", output)?; } + + Ok(()) } -fn parameterized(f: &mut fmt::Formatter, +pub fn parameterized(f: &mut fmt::Formatter, substs: &subst::Substs, - did: DefId, - projections: &[ty::ProjectionPredicate], - get_generics: GG) - -> fmt::Result - where GG: for<'tcx> FnOnce(&ty::ctxt<'tcx>) -> ty::Generics<'tcx> -{ - let (fn_trait_kind, verbose) = try!(ty::tls::with(|tcx| { - try!(write!(f, "{}", tcx.item_path_str(did))); - Ok((tcx.lang_items.fn_trait_kind(did), tcx.sess.verbose())) - })); - - let mut empty = true; - let mut start_or_continue = |f: &mut fmt::Formatter, start: &str, cont: &str| { - if empty { - empty = false; - write!(f, "{}", start) - } else { - write!(f, "{}", cont) - } + mut did: DefId, + projections: &[ty::ProjectionPredicate]) + -> fmt::Result { + let key = ty::tls::with(|tcx| tcx.def_key(did)); + let mut item_name = if let Some(name) = key.disambiguated_data.data.get_opt_name() { + Some(name) + } else { + did.index = key.parent.unwrap_or_else( + || bug!("finding type for {:?}, encountered def-id {:?} with no parent", + did, did)); + parameterized(f, substs, did, projections)?; + return write!(f, "::{}", key.disambiguated_data.data.as_interned_str()); }; - if verbose { - match substs.regions { - subst::ErasedRegions => { - try!(start_or_continue(f, "<", ", ")); - try!(write!(f, "..")); - } - subst::NonerasedRegions(ref regions) => { - for region in regions { - try!(start_or_continue(f, "<", ", ")); - try!(write!(f, "{:?}", region)); + let mut verbose = false; + let mut num_supplied_defaults = 0; + let mut has_self = false; + let mut num_regions = 0; + let mut num_types = 0; + let mut is_value_path = false; + let fn_trait_kind = ty::tls::with(|tcx| { + // Unfortunately, some kinds of items (e.g., closures) don't have + // generics. So walk back up the find the closest parent that DOES + // have them. + let mut item_def_id = did; + loop { + let key = tcx.def_key(item_def_id); + match key.disambiguated_data.data { + DefPathData::TypeNs(_) => { + break; + } + DefPathData::ValueNs(_) | DefPathData::EnumVariant(_) => { + is_value_path = true; + break; + } + _ => { + // if we're making a symbol for something, there ought + // to be a value or type-def or something in there + // *somewhere* + item_def_id.index = key.parent.unwrap_or_else(|| { + bug!("finding type for {:?}, encountered def-id {:?} with no \ + parent", did, item_def_id); + }); } } } - for &ty in &substs.types { - try!(start_or_continue(f, "<", ", ")); - try!(write!(f, "{}", ty)); + let mut generics = tcx.item_generics(item_def_id); + let mut path_def_id = did; + verbose = tcx.sess.verbose(); + has_self = generics.has_self; + + let mut child_types = 0; + if let Some(def_id) = generics.parent { + // Methods. + assert!(is_value_path); + child_types = generics.types.len(); + generics = tcx.item_generics(def_id); + num_regions = generics.regions.len(); + num_types = generics.types.len(); + + if has_self { + write!(f, "<{} as ", substs.type_at(0))?; + } + + path_def_id = def_id; + } else { + item_name = None; + + if is_value_path { + // Functions. + assert_eq!(has_self, false); + } else { + // Types and traits. + num_regions = generics.regions.len(); + num_types = generics.types.len(); + } } - for projection in projections { - try!(start_or_continue(f, "<", ", ")); - try!(write!(f, "{}={}", - projection.projection_ty.item_name, - projection.ty)); + + if !verbose { + if generics.types.last().map_or(false, |def| def.default.is_some()) { + if let Some(substs) = tcx.lift(&substs) { + let tps = substs.types().rev().skip(child_types); + for (def, actual) in generics.types.iter().rev().zip(tps) { + if def.default.subst(tcx, substs) != Some(actual) { + break; + } + num_supplied_defaults += 1; + } + } + } } - return start_or_continue(f, "", ">"); - } - if fn_trait_kind.is_some() && projections.len() == 1 { + write!(f, "{}", tcx.item_path_str(path_def_id))?; + Ok(tcx.lang_items.fn_trait_kind(path_def_id)) + })?; + + if !verbose && fn_trait_kind.is_some() && projections.len() == 1 { let projection_ty = projections[0].ty; - if let TyTuple(ref args) = substs.types.get_slice(subst::TypeSpace)[0].sty { - return fn_sig(f, args, false, ty::FnConverging(projection_ty)); + if let TyTuple(ref args) = substs.type_at(1).sty { + return fn_sig(f, args, false, projection_ty); } } - match substs.regions { - subst::ErasedRegions => { } - subst::NonerasedRegions(ref regions) => { - for &r in regions { - try!(start_or_continue(f, "<", ", ")); - let s = r.to_string(); + let empty = Cell::new(true); + let start_or_continue = |f: &mut fmt::Formatter, start: &str, cont: &str| { + if empty.get() { + empty.set(false); + write!(f, "{}", start) + } else { + write!(f, "{}", cont) + } + }; + + let print_regions = |f: &mut fmt::Formatter, start: &str, skip, count| { + // Don't print any regions if they're all erased. + let regions = || substs.regions().skip(skip).take(count); + if regions().all(|r: &ty::Region| *r == ty::ReErased) { + return Ok(()); + } + + for region in regions() { + let region: &ty::Region = region; + start_or_continue(f, start, ", ")?; + if verbose { + write!(f, "{:?}", region)?; + } else { + let s = region.to_string(); if s.is_empty() { // This happens when the value of the region // parameter is not easily serialized. This may be // because the user omitted it in the first place, // or because it refers to some block in the code, // etc. I'm not sure how best to serialize this. - try!(write!(f, "'_")); + write!(f, "'_")?; } else { - try!(write!(f, "{}", s)); + write!(f, "{}", s)?; } } } - } - // It is important to execute this conditionally, only if -Z - // verbose is false. Otherwise, debug logs can sometimes cause - // ICEs trying to fetch the generics early in the pipeline. This - // is kind of a hacky workaround in that -Z verbose is required to - // avoid those ICEs. - let tps = substs.types.get_slice(subst::TypeSpace); - let num_defaults = ty::tls::with(|tcx| { - let generics = get_generics(tcx); - - let has_self = substs.self_ty().is_some(); - let ty_params = generics.types.get_slice(subst::TypeSpace); - if ty_params.last().map_or(false, |def| def.default.is_some()) { - let substs = tcx.lift(&substs); - ty_params.iter().zip(tps).rev().take_while(|&(def, &actual)| { - match def.default { - Some(default) => { - if !has_self && default.has_self_ty() { - // In an object type, there is no `Self`, and - // thus if the default value references Self, - // the user will be required to give an - // explicit value. We can't even do the - // substitution below to check without causing - // an ICE. (#18956). - false - } else { - let default = tcx.lift(&default); - substs.and_then(|substs| default.subst(tcx, substs)) == Some(actual) - } - } - None => false - } - }).count() - } else { - 0 - } - }); + Ok(()) + }; + + print_regions(f, "<", 0, num_regions)?; - for &ty in &tps[..tps.len() - num_defaults] { - try!(start_or_continue(f, "<", ", ")); - try!(write!(f, "{}", ty)); + let tps = substs.types().take(num_types - num_supplied_defaults) + .skip(has_self as usize); + + for ty in tps { + start_or_continue(f, "<", ", ")?; + write!(f, "{}", ty)?; } for projection in projections { - try!(start_or_continue(f, "<", ", ")); - try!(write!(f, "{}={}", - projection.projection_ty.item_name, - projection.ty)); + start_or_continue(f, "<", ", ")?; + write!(f, "{}={}", + projection.projection_ty.item_name, + projection.ty)?; } - start_or_continue(f, "", ">") + start_or_continue(f, "", ">")?; + + // For values, also print their name and type parameters. + if is_value_path { + empty.set(true); + + if has_self { + write!(f, ">")?; + } + + if let Some(item_name) = item_name { + write!(f, "::{}", item_name)?; + } + + print_regions(f, "::<", num_regions, usize::MAX)?; + + // FIXME: consider being smart with defaults here too + for ty in substs.types().skip(num_types) { + start_or_continue(f, "::<", ", ")?; + write!(f, "{}", ty)?; + } + + start_or_continue(f, "", ">")?; + } + + Ok(()) } -fn in_binder<'tcx, T, U>(f: &mut fmt::Formatter, - tcx: &ty::ctxt<'tcx>, - original: &ty::Binder, - lifted: Option>) -> fmt::Result +fn in_binder<'a, 'gcx, 'tcx, T, U>(f: &mut fmt::Formatter, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + original: &ty::Binder, + lifted: Option>) -> fmt::Result where T: fmt::Display, U: fmt::Display + TypeFoldable<'tcx> { // Replace any anonymous late-bound regions with named @@ -217,89 +275,53 @@ fn in_binder<'tcx, T, U>(f: &mut fmt::Formatter, let new_value = tcx.replace_late_bound_regions(&value, |br| { let _ = start_or_continue(f, "for<", ", "); - ty::ReLateBound(ty::DebruijnIndex::new(1), match br { - ty::BrNamed(_, name) => { + let br = match br { + ty::BrNamed(_, name, _) => { let _ = write!(f, "{}", name); br } ty::BrAnon(_) | ty::BrFresh(_) | ty::BrEnv => { - let name = token::intern("'r"); + let name = Symbol::intern("'r"); let _ = write!(f, "{}", name); - ty::BrNamed(tcx.map.local_def_id(CRATE_NODE_ID), name) + ty::BrNamed(tcx.map.local_def_id(CRATE_NODE_ID), + name, + ty::Issue32330::WontChange) } - }) + }; + tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1), br)) }).0; - try!(start_or_continue(f, "", "> ")); + start_or_continue(f, "", "> ")?; write!(f, "{}", new_value) } -/// This curious type is here to help pretty-print trait objects. In -/// a trait object, the projections are stored separately from the -/// main trait bound, but in fact we want to package them together -/// when printing out; they also have separate binders, but we want -/// them to share a binder when we print them out. (And the binder -/// pretty-printing logic is kind of clever and we don't want to -/// reproduce it.) So we just repackage up the structure somewhat. -/// -/// Right now there is only one trait in an object that can have -/// projection bounds, so we just stuff them altogether. But in -/// reality we should eventually sort things out better. -#[derive(Clone, Debug)] -struct TraitAndProjections<'tcx>(ty::TraitRef<'tcx>, Vec>); - -impl<'tcx> TypeFoldable<'tcx> for TraitAndProjections<'tcx> { - fn super_fold_with>(&self, folder: &mut F) -> Self { - TraitAndProjections(self.0.fold_with(folder), self.1.fold_with(folder)) - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - self.0.visit_with(visitor) || self.1.visit_with(visitor) - } -} - -impl<'tcx> fmt::Display for TraitAndProjections<'tcx> { +impl<'tcx> fmt::Display for &'tcx ty::Slice> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let TraitAndProjections(ref trait_ref, ref projection_bounds) = *self; - parameterized(f, trait_ref.substs, - trait_ref.def_id, - projection_bounds, - |tcx| tcx.lookup_trait_def(trait_ref.def_id).generics.clone()) - } -} - -impl<'tcx> fmt::Display for ty::TraitTy<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let bounds = &self.bounds; - // Generate the main trait ref, including associated types. - try!(ty::tls::with(|tcx| { - let principal = tcx.lift(&self.principal.0) - .expect("could not lift TraitRef for printing"); - let projections = tcx.lift(&bounds.projection_bounds[..]) - .expect("could not lift projections for printing"); - let projections = projections.into_iter().map(|p| p.0).collect(); - - let tap = ty::Binder(TraitAndProjections(principal, projections)); - in_binder(f, tcx, &ty::Binder(""), Some(tap)) - })); - - // Builtin bounds. - for bound in &bounds.builtin_bounds { - try!(write!(f, " + {:?}", bound)); - } + ty::tls::with(|tcx| { + // Use a type that can't appear in defaults of type parameters. + let dummy_self = tcx.mk_infer(ty::FreshTy(0)); + + if let Some(p) = self.principal() { + let principal = tcx.lift(&p).expect("could not lift TraitRef for printing") + .with_self_ty(tcx, dummy_self); + let projections = self.projection_bounds().map(|p| { + tcx.lift(&p) + .expect("could not lift projection for printing") + .with_self_ty(tcx, dummy_self) + }).collect::>(); + parameterized(f, principal.substs, principal.def_id, &projections)?; + } - // FIXME: It'd be nice to compute from context when this bound - // is implied, but that's non-trivial -- we'd perhaps have to - // use thread-local data of some kind? There are also - // advantages to just showing the region, since it makes - // people aware that it's there. - let bound = bounds.region_bound.to_string(); - if !bound.is_empty() { - try!(write!(f, " + {}", bound)); - } + // Builtin bounds. + for did in self.auto_traits() { + write!(f, " + {}", tcx.item_path_str(did))?; + } + + Ok(()) + })?; Ok(()) } @@ -307,19 +329,19 @@ impl<'tcx> fmt::Display for ty::TraitTy<'tcx> { impl<'tcx> fmt::Debug for ty::TypeParameterDef<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "TypeParameterDef({}, {:?}, {:?}/{})", + write!(f, "TypeParameterDef({}, {:?}, {})", self.name, self.def_id, - self.space, self.index) + self.index) } } -impl fmt::Debug for ty::RegionParameterDef { +impl<'tcx> fmt::Debug for ty::RegionParameterDef<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "RegionParameterDef({}, {:?}, {:?}/{}, {:?})", + write!(f, "RegionParameterDef({}, {:?}, {}, {:?})", self.name, self.def_id, - self.space, self.index, + self.index, self.bounds) } } @@ -338,83 +360,53 @@ impl<'tcx> fmt::Display for ty::TypeAndMut<'tcx> { } } -impl<'tcx> fmt::Debug for subst::Substs<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Substs[types={:?}, regions={:?}]", - self.types, self.regions) - } -} - impl<'tcx> fmt::Debug for ty::ItemSubsts<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "ItemSubsts({:?})", self.substs) } } -impl fmt::Debug for subst::RegionSubsts { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - subst::ErasedRegions => write!(f, "erased"), - subst::NonerasedRegions(ref regions) => write!(f, "{:?}", regions) - } - } -} - impl<'tcx> fmt::Debug for ty::TraitRef<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // when printing out the debug representation, we don't need // to enumerate the `for<...>` etc because the debruijn index // tells you everything you need to know. - match self.substs.self_ty() { - None => write!(f, "{}", *self), - Some(self_ty) => write!(f, "<{:?} as {}>", self_ty, *self) - } + write!(f, "<{:?} as {}>", self.self_ty(), *self) } } -impl<'tcx> fmt::Debug for ty::TraitDef<'tcx> { +impl<'tcx> fmt::Debug for ty::ExistentialTraitRef<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "TraitDef(generics={:?}, trait_ref={:?})", - self.generics, self.trait_ref) + ty::tls::with(|tcx| { + let dummy_self = tcx.mk_infer(ty::FreshTy(0)); + + let trait_ref = tcx.lift(&ty::Binder(*self)) + .expect("could not lift TraitRef for printing") + .with_self_ty(tcx, dummy_self).0; + parameterized(f, trait_ref.substs, trait_ref.def_id, &[]) + }) } } -impl<'tcx, 'container> fmt::Debug for ty::AdtDefData<'tcx, 'container> { +impl fmt::Debug for ty::TraitDef { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ty::tls::with(|tcx| { - write!(f, "{}", tcx.item_path_str(self.did)) + write!(f, "{}", tcx.item_path_str(self.def_id)) }) } } -impl<'tcx> fmt::Debug for ty::adjustment::AutoAdjustment<'tcx> { +impl fmt::Debug for ty::AdtDef { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - ty::adjustment::AdjustReifyFnPointer => { - write!(f, "AdjustReifyFnPointer") - } - ty::adjustment::AdjustUnsafeFnPointer => { - write!(f, "AdjustUnsafeFnPointer") - } - ty::adjustment::AdjustDerefRef(ref data) => { - write!(f, "{:?}", data) - } - } + ty::tls::with(|tcx| { + write!(f, "{}", tcx.item_path_str(self.did)) + }) } } -impl<'tcx> fmt::Debug for ty::adjustment::AutoDerefRef<'tcx> { +impl<'tcx> fmt::Debug for ty::adjustment::Adjustment<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "AutoDerefRef({}, unsize={:?}, {:?})", - self.autoderefs, self.unsize, self.autoref) - } -} - -impl<'tcx> fmt::Debug for ty::TraitTy<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "TraitTy({:?},{:?})", - self.principal, - self.bounds) + write!(f, "{:?} -> {}", self.kind, self.target) } } @@ -430,6 +422,9 @@ impl<'tcx> fmt::Debug for ty::Predicate<'tcx> { ty::Predicate::ObjectSafe(trait_def_id) => { write!(f, "ObjectSafe({:?})", trait_def_id) } + ty::Predicate::ClosureKind(closure_def_id, kind) => { + write!(f, "ClosureKind({:?}, {:?})", closure_def_id, kind) + } } } } @@ -441,7 +436,7 @@ impl fmt::Display for ty::BoundRegion { } match *self { - BrNamed(_, name) => write!(f, "{}", name), + BrNamed(_, name, _) => write!(f, "{}", name), BrAnon(_) | BrFresh(_) | BrEnv => Ok(()) } } @@ -452,8 +447,9 @@ impl fmt::Debug for ty::BoundRegion { match *self { BrAnon(n) => write!(f, "BrAnon({:?})", n), BrFresh(n) => write!(f, "BrFresh({:?})", n), - BrNamed(did, name) => { - write!(f, "BrNamed({:?}, {:?})", did, name) + BrNamed(did, name, issue32330) => { + write!(f, "BrNamed({:?}:{:?}, {:?}, {:?})", + did.krate, did.index, name, issue32330) } BrEnv => "BrEnv".fmt(f), } @@ -464,8 +460,7 @@ impl fmt::Debug for ty::Region { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ty::ReEarlyBound(ref data) => { - write!(f, "ReEarlyBound({:?}, {}, {})", - data.space, + write!(f, "ReEarlyBound({}, {})", data.index, data.name) } @@ -492,7 +487,9 @@ impl fmt::Debug for ty::Region { write!(f, "ReSkolemized({}, {:?})", id.index, bound_region) } - ty::ReEmpty => write!(f, "ReEmpty") + ty::ReEmpty => write!(f, "ReEmpty"), + + ty::ReErased => write!(f, "ReErased") } } } @@ -514,7 +511,7 @@ impl<'tcx> fmt::Debug for ty::ClosureUpvar<'tcx> { } } -impl<'a, 'tcx> fmt::Debug for ty::ParameterEnvironment<'a, 'tcx> { +impl<'tcx> fmt::Debug for ty::ParameterEnvironment<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "ParameterEnvironment(\ free_substs={:?}, \ @@ -526,7 +523,7 @@ impl<'a, 'tcx> fmt::Debug for ty::ParameterEnvironment<'a, 'tcx> { } } -impl<'tcx> fmt::Debug for ty::ObjectLifetimeDefault { +impl<'tcx> fmt::Debug for ty::ObjectLifetimeDefault<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { ty::ObjectLifetimeDefault::Ambiguous => write!(f, "Ambiguous"), @@ -556,7 +553,8 @@ impl fmt::Display for ty::Region { write!(f, "{}", br) } ty::ReScope(_) | - ty::ReVar(_) => Ok(()), + ty::ReVar(_) | + ty::ReErased => Ok(()), ty::ReStatic => write!(f, "'static"), ty::ReEmpty => write!(f, "'"), } @@ -581,13 +579,6 @@ impl fmt::Debug for ty::Variance { } } -impl fmt::Debug for ty::ItemVariances { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "ItemVariances(types={:?}, regions={:?})", - self.types, self.regions) - } -} - impl<'tcx> fmt::Debug for ty::GenericPredicates<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "GenericPredicates({:?})", self.predicates) @@ -601,70 +592,13 @@ impl<'tcx> fmt::Debug for ty::InstantiatedPredicates<'tcx> { } } -impl<'tcx> fmt::Debug for ty::ImplOrTraitItem<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - try!(write!(f, "ImplOrTraitItem(")); - try!(match *self { - ty::ImplOrTraitItem::MethodTraitItem(ref i) => write!(f, "{:?}", i), - ty::ImplOrTraitItem::ConstTraitItem(ref i) => write!(f, "{:?}", i), - ty::ImplOrTraitItem::TypeTraitItem(ref i) => write!(f, "{:?}", i), - }); - write!(f, ")") - } -} - impl<'tcx> fmt::Display for ty::FnSig<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - try!(write!(f, "fn")); + write!(f, "fn")?; fn_sig(f, &self.inputs, self.variadic, self.output) } } -impl<'tcx> fmt::Debug for ty::ExistentialBounds<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut empty = true; - let mut maybe_continue = |f: &mut fmt::Formatter| { - if empty { - empty = false; - Ok(()) - } else { - write!(f, " + ") - } - }; - - let region_str = format!("{:?}", self.region_bound); - if !region_str.is_empty() { - try!(maybe_continue(f)); - try!(write!(f, "{}", region_str)); - } - - for bound in &self.builtin_bounds { - try!(maybe_continue(f)); - try!(write!(f, "{:?}", bound)); - } - - for projection_bound in &self.projection_bounds { - try!(maybe_continue(f)); - try!(write!(f, "{:?}", projection_bound)); - } - - Ok(()) - } -} - -impl fmt::Display for ty::BuiltinBounds { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut bounds = self.iter(); - if let Some(bound) = bounds.next() { - try!(write!(f, "{:?}", bound)); - for bound in bounds { - try!(write!(f, " + {:?}", bound)); - } - } - Ok(()) - } -} - impl fmt::Debug for ty::TyVid { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "_#{}t", self.index) @@ -728,6 +662,12 @@ impl fmt::Debug for ty::IntVarValue { } }*/ +impl<'tcx> fmt::Display for ty::Binder<&'tcx ty::Slice>> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + ty::tls::with(|tcx| in_binder(f, tcx, self, tcx.lift(self))) + } +} + impl<'tcx> fmt::Display for ty::Binder> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ty::tls::with(|tcx| in_binder(f, tcx, self, tcx.lift(self))) @@ -752,13 +692,14 @@ impl<'tcx> fmt::Display for ty::Binder> { } } -impl<'tcx> fmt::Display for ty::Binder, ty::Region>> { +impl<'tcx> fmt::Display for ty::Binder, &'tcx ty::Region>> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ty::tls::with(|tcx| in_binder(f, tcx, self, tcx.lift(self))) } } -impl fmt::Display for ty::Binder> { +impl<'tcx> fmt::Display for ty::Binder> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ty::tls::with(|tcx| in_binder(f, tcx, self, tcx.lift(self))) } @@ -766,8 +707,7 @@ impl fmt::Display for ty::Binder> impl<'tcx> fmt::Display for ty::TraitRef<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - parameterized(f, self.substs, self.def_id, &[], - |tcx| tcx.lookup_trait_def(self.def_id).generics.clone()) + parameterized(f, self.substs, self.def_id, &[]) } } @@ -787,88 +727,135 @@ impl<'tcx> fmt::Display for ty::TypeVariants<'tcx> { }, tm.ty) } TyRef(r, ref tm) => { - try!(write!(f, "&")); + write!(f, "&")?; let s = r.to_string(); - try!(write!(f, "{}", s)); + write!(f, "{}", s)?; if !s.is_empty() { - try!(write!(f, " ")); + write!(f, " ")?; } write!(f, "{}", tm) } + TyNever => write!(f, "!"), TyTuple(ref tys) => { - try!(write!(f, "(")); + write!(f, "(")?; let mut tys = tys.iter(); if let Some(&ty) = tys.next() { - try!(write!(f, "{},", ty)); + write!(f, "{},", ty)?; if let Some(&ty) = tys.next() { - try!(write!(f, " {}", ty)); + write!(f, " {}", ty)?; for &ty in tys { - try!(write!(f, ", {}", ty)); + write!(f, ", {}", ty)?; } } } write!(f, ")") } - TyBareFn(opt_def_id, ref bare_fn) => { + TyFnDef(def_id, substs, ref bare_fn) => { if bare_fn.unsafety == hir::Unsafety::Unsafe { - try!(write!(f, "unsafe ")); + write!(f, "unsafe ")?; } - if bare_fn.abi != abi::Rust { - try!(write!(f, "extern {} ", bare_fn.abi)); + if bare_fn.abi != Abi::Rust { + write!(f, "extern {} ", bare_fn.abi)?; } - try!(write!(f, "{}", bare_fn.sig.0)); + write!(f, "{} {{", bare_fn.sig.0)?; + parameterized(f, substs, def_id, &[])?; + write!(f, "}}") + } + TyFnPtr(ref bare_fn) => { + if bare_fn.unsafety == hir::Unsafety::Unsafe { + write!(f, "unsafe ")?; + } - if let Some(def_id) = opt_def_id { - try!(write!(f, " {{{}}}", ty::tls::with(|tcx| { - tcx.item_path_str(def_id) - }))); + if bare_fn.abi != Abi::Rust { + write!(f, "extern {} ", bare_fn.abi)?; } - Ok(()) + + write!(f, "{}", bare_fn.sig.0) } TyInfer(infer_ty) => write!(f, "{}", infer_ty), TyError => write!(f, "[type error]"), TyParam(ref param_ty) => write!(f, "{}", param_ty), - TyEnum(def, substs) | TyStruct(def, substs) => { + TyAdt(def, substs) => { ty::tls::with(|tcx| { if def.did.is_local() && - !tcx.tcache.borrow().contains_key(&def.did) { + !tcx.item_types.borrow().contains_key(&def.did) { write!(f, "{}<..>", tcx.item_path_str(def.did)) } else { - parameterized(f, substs, def.did, &[], - |tcx| tcx.lookup_item_type(def.did).generics) + parameterized(f, substs, def.did, &[]) } }) } - TyTrait(ref data) => write!(f, "{}", data), - ty::TyProjection(ref data) => write!(f, "{}", data), + TyDynamic(data, r) => { + write!(f, "{}", data)?; + let r = r.to_string(); + if !r.is_empty() { + write!(f, " + {}", r) + } else { + Ok(()) + } + } + TyProjection(ref data) => write!(f, "{}", data), + TyAnon(def_id, substs) => { + ty::tls::with(|tcx| { + // Grab the "TraitA + TraitB" from `impl TraitA + TraitB`, + // by looking up the projections associated with the def_id. + let item_predicates = tcx.item_predicates(def_id); + let substs = tcx.lift(&substs).unwrap_or_else(|| { + tcx.intern_substs(&[]) + }); + let bounds = item_predicates.instantiate(tcx, substs); + + let mut first = true; + let mut is_sized = false; + write!(f, "impl")?; + for predicate in bounds.predicates { + if let Some(trait_ref) = predicate.to_opt_poly_trait_ref() { + // Don't print +Sized, but rather +?Sized if absent. + if Some(trait_ref.def_id()) == tcx.lang_items.sized_trait() { + is_sized = true; + continue; + } + + write!(f, "{}{}", if first { " " } else { "+" }, trait_ref)?; + first = false; + } + } + if !is_sized { + write!(f, "{}?Sized", if first { " " } else { "+" })?; + } + Ok(()) + }) + } TyStr => write!(f, "str"), - TyClosure(did, ref substs) => ty::tls::with(|tcx| { - try!(write!(f, "[closure")); + TyClosure(did, substs) => ty::tls::with(|tcx| { + let upvar_tys = substs.upvar_tys(did, tcx); + write!(f, "[closure")?; if let Some(node_id) = tcx.map.as_local_node_id(did) { - try!(write!(f, "@{:?}", tcx.map.span(node_id))); + write!(f, "@{:?}", tcx.map.span(node_id))?; let mut sep = " "; - try!(tcx.with_freevars(node_id, |freevars| { - for (freevar, upvar_ty) in freevars.iter().zip(&substs.upvar_tys) { - let node_id = freevar.def.var_id(); - try!(write!(f, + tcx.with_freevars(node_id, |freevars| { + for (freevar, upvar_ty) in freevars.iter().zip(upvar_tys) { + let def_id = freevar.def.def_id(); + let node_id = tcx.map.as_local_node_id(def_id).unwrap(); + write!(f, "{}{}:{}", sep, tcx.local_var_name_str(node_id), - upvar_ty)); + upvar_ty)?; sep = ", "; } Ok(()) - })) + })? } else { // cross-crate closure types should only be // visible in trans bug reports, I imagine. - try!(write!(f, "@{:?}", did)); + write!(f, "@{:?}", did)?; let mut sep = " "; - for (index, upvar_ty) in substs.upvar_tys.iter().enumerate() { - try!(write!(f, "{}{}:{}", sep, index, upvar_ty)); + for (index, upvar_ty) in upvar_tys.enumerate() { + write!(f, "{}{}:{}", sep, index, upvar_ty)?; sep = ", "; } } @@ -896,7 +883,7 @@ impl fmt::Debug for ty::UpvarId { } } -impl fmt::Debug for ty::UpvarBorrow { +impl<'tcx> fmt::Debug for ty::UpvarBorrow<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "UpvarBorrow({:?}, {:?})", self.kind, self.region) @@ -910,7 +897,9 @@ impl fmt::Display for ty::InferTy { ty::TyVar(ref vid) if print_var_ids => write!(f, "{:?}", vid), ty::IntVar(ref vid) if print_var_ids => write!(f, "{:?}", vid), ty::FloatVar(ref vid) if print_var_ids => write!(f, "{:?}", vid), - ty::TyVar(_) | ty::IntVar(_) | ty::FloatVar(_) => write!(f, "_"), + ty::TyVar(_) => write!(f, "_"), + ty::IntVar(_) => write!(f, "{}", "{integer}"), + ty::FloatVar(_) => write!(f, "{}", "{float}"), ty::FreshTy(v) => write!(f, "FreshTy({})", v), ty::FreshIntTy(v) => write!(f, "FreshIntTy({})", v), ty::FreshFloatTy(v) => write!(f, "FreshFloatTy({})", v) @@ -918,20 +907,6 @@ impl fmt::Display for ty::InferTy { } } -impl fmt::Display for ty::ExplicitSelfCategory { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.write_str(match *self { - ty::ExplicitSelfCategory::Static => "static", - ty::ExplicitSelfCategory::ByValue => "self", - ty::ExplicitSelfCategory::ByReference(_, hir::MutMutable) => { - "&mut self" - } - ty::ExplicitSelfCategory::ByReference(_, hir::MutImmutable) => "&self", - ty::ExplicitSelfCategory::ByBox => "Box", - }) - } -} - impl fmt::Display for ty::ParamTy { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.name) @@ -940,7 +915,7 @@ impl fmt::Display for ty::ParamTy { impl fmt::Debug for ty::ParamTy { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}/{:?}.{}", self, self.space, self.idx) + write!(f, "{}/#{}", self, self.idx) } } @@ -967,9 +942,7 @@ impl<'tcx> fmt::Debug for ty::TraitPredicate<'tcx> { impl<'tcx> fmt::Display for ty::TraitPredicate<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{} : {}", - self.trait_ref.self_ty(), - self.trait_ref) + write!(f, "{}: {}", self.trait_ref.self_ty(), self.trait_ref) } } @@ -997,6 +970,16 @@ impl<'tcx> fmt::Display for ty::ProjectionTy<'tcx> { } } +impl fmt::Display for ty::ClosureKind { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ty::ClosureKind::Fn => write!(f, "Fn"), + ty::ClosureKind::FnMut => write!(f, "FnMut"), + ty::ClosureKind::FnOnce => write!(f, "FnOnce"), + } + } +} + impl<'tcx> fmt::Display for ty::Predicate<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { @@ -1010,6 +993,11 @@ impl<'tcx> fmt::Display for ty::Predicate<'tcx> { ty::tls::with(|tcx| { write!(f, "the trait `{}` is object-safe", tcx.item_path_str(trait_def_id)) }), + ty::Predicate::ClosureKind(closure_def_id, kind) => + ty::tls::with(|tcx| { + write!(f, "the closure `{}` implements the trait `{}`", + tcx.item_path_str(closure_def_id), kind) + }), } } } diff --git a/src/librustc_back/Cargo.toml b/src/librustc_back/Cargo.toml new file mode 100644 index 0000000000000..85e861b405a9f --- /dev/null +++ b/src/librustc_back/Cargo.toml @@ -0,0 +1,17 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_back" +version = "0.0.0" + +[lib] +name = "rustc_back" +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +syntax = { path = "../libsyntax" } +serialize = { path = "../libserialize" } +log = { path = "../liblog" } + +[features] +jemalloc = [] diff --git a/src/librustc_back/abi.rs b/src/librustc_back/abi.rs deleted file mode 100644 index c3a3a8d582aff..0000000000000 --- a/src/librustc_back/abi.rs +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub const BOX_FIELD_DROP_GLUE: usize = 1; -pub const BOX_FIELD_BODY: usize = 4; - -/// The first half of a fat pointer. -/// - For a closure, this is the code address. -/// - For an object or trait instance, this is the address of the box. -/// - For a slice, this is the base address. -pub const FAT_PTR_ADDR: usize = 0; - -/// The second half of a fat pointer. -/// - For a closure, this is the address of the environment. -/// - For an object or trait instance, this is the address of the vtable. -/// - For a slice, this is the length. -pub const FAT_PTR_EXTRA: usize = 1; diff --git a/src/librustc_back/dynamic_lib.rs b/src/librustc_back/dynamic_lib.rs new file mode 100644 index 0000000000000..38e60060925e6 --- /dev/null +++ b/src/librustc_back/dynamic_lib.rs @@ -0,0 +1,318 @@ +// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Dynamic library facilities. +//! +//! A simple wrapper over the platform's dynamic library facilities + +use std::env; +use std::ffi::{CString, OsString}; +use std::path::{Path, PathBuf}; + +pub struct DynamicLibrary { + handle: *mut u8 +} + +impl Drop for DynamicLibrary { + fn drop(&mut self) { + unsafe { + dl::close(self.handle) + } + } +} + +impl DynamicLibrary { + /// Lazily open a dynamic library. When passed None it gives a + /// handle to the calling process + pub fn open(filename: Option<&Path>) -> Result { + let maybe_library = dl::open(filename.map(|path| path.as_os_str())); + + // The dynamic library must not be constructed if there is + // an error opening the library so the destructor does not + // run. + match maybe_library { + Err(err) => Err(err), + Ok(handle) => Ok(DynamicLibrary { handle: handle }) + } + } + + /// Prepends a path to this process's search path for dynamic libraries + pub fn prepend_search_path(path: &Path) { + let mut search_path = DynamicLibrary::search_path(); + search_path.insert(0, path.to_path_buf()); + env::set_var(DynamicLibrary::envvar(), &DynamicLibrary::create_path(&search_path)); + } + + /// From a slice of paths, create a new vector which is suitable to be an + /// environment variable for this platforms dylib search path. + pub fn create_path(path: &[PathBuf]) -> OsString { + let mut newvar = OsString::new(); + for (i, path) in path.iter().enumerate() { + if i > 0 { newvar.push(DynamicLibrary::separator()); } + newvar.push(path); + } + return newvar; + } + + /// Returns the environment variable for this process's dynamic library + /// search path + pub fn envvar() -> &'static str { + if cfg!(windows) { + "PATH" + } else if cfg!(target_os = "macos") { + "DYLD_LIBRARY_PATH" + } else { + "LD_LIBRARY_PATH" + } + } + + fn separator() -> &'static str { + if cfg!(windows) { ";" } else { ":" } + } + + /// Returns the current search path for dynamic libraries being used by this + /// process + pub fn search_path() -> Vec { + match env::var_os(DynamicLibrary::envvar()) { + Some(var) => env::split_paths(&var).collect(), + None => Vec::new(), + } + } + + /// Accesses the value at the symbol of the dynamic library. + pub unsafe fn symbol(&self, symbol: &str) -> Result<*mut T, String> { + // This function should have a lifetime constraint of 'a on + // T but that feature is still unimplemented + + let raw_string = CString::new(symbol).unwrap(); + let maybe_symbol_value = dl::symbol(self.handle, raw_string.as_ptr()); + + // The value must not be constructed if there is an error so + // the destructor does not run. + match maybe_symbol_value { + Err(err) => Err(err), + Ok(symbol_value) => Ok(symbol_value as *mut T) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use libc; + use std::mem; + + #[test] + fn test_loading_cosine() { + if cfg!(windows) { + return + } + + // The math library does not need to be loaded since it is already + // statically linked in + let libm = match DynamicLibrary::open(None) { + Err(error) => panic!("Could not load self as module: {}", error), + Ok(libm) => libm + }; + + let cosine: extern fn(libc::c_double) -> libc::c_double = unsafe { + match libm.symbol("cos") { + Err(error) => panic!("Could not load function cos: {}", error), + Ok(cosine) => mem::transmute::<*mut u8, _>(cosine) + } + }; + + let argument = 0.0; + let expected_result = 1.0; + let result = cosine(argument); + if result != expected_result { + panic!("cos({}) != {} but equaled {} instead", argument, + expected_result, result) + } + } + + #[test] + fn test_errors_do_not_crash() { + use std::path::Path; + + if !cfg!(unix) { + return + } + + // Open /dev/null as a library to get an error, and make sure + // that only causes an error, and not a crash. + let path = Path::new("/dev/null"); + match DynamicLibrary::open(Some(&path)) { + Err(_) => {} + Ok(_) => panic!("Successfully opened the empty library.") + } + } +} + +#[cfg(unix)] +mod dl { + use libc; + use std::ffi::{CStr, OsStr, CString}; + use std::os::unix::prelude::*; + use std::ptr; + use std::str; + + pub fn open(filename: Option<&OsStr>) -> Result<*mut u8, String> { + check_for_errors_in(|| { + unsafe { + match filename { + Some(filename) => open_external(filename), + None => open_internal(), + } + } + }) + } + + const LAZY: libc::c_int = 1; + + unsafe fn open_external(filename: &OsStr) -> *mut u8 { + let s = CString::new(filename.as_bytes()).unwrap(); + libc::dlopen(s.as_ptr(), LAZY) as *mut u8 + } + + unsafe fn open_internal() -> *mut u8 { + libc::dlopen(ptr::null(), LAZY) as *mut u8 + } + + pub fn check_for_errors_in(f: F) -> Result where + F: FnOnce() -> T, + { + use std::sync::{Mutex, Once, ONCE_INIT}; + static INIT: Once = ONCE_INIT; + static mut LOCK: *mut Mutex<()> = 0 as *mut _; + unsafe { + INIT.call_once(|| { + LOCK = Box::into_raw(Box::new(Mutex::new(()))); + }); + // dlerror isn't thread safe, so we need to lock around this entire + // sequence + let _guard = (*LOCK).lock(); + let _old_error = libc::dlerror(); + + let result = f(); + + let last_error = libc::dlerror() as *const _; + let ret = if ptr::null() == last_error { + Ok(result) + } else { + let s = CStr::from_ptr(last_error).to_bytes(); + Err(str::from_utf8(s).unwrap().to_owned()) + }; + + ret + } + } + + pub unsafe fn symbol(handle: *mut u8, + symbol: *const libc::c_char) + -> Result<*mut u8, String> { + check_for_errors_in(|| { + libc::dlsym(handle as *mut libc::c_void, symbol) as *mut u8 + }) + } + pub unsafe fn close(handle: *mut u8) { + libc::dlclose(handle as *mut libc::c_void); () + } +} + +#[cfg(windows)] +mod dl { + use std::ffi::OsStr; + use std::io; + use std::os::windows::prelude::*; + use std::ptr; + + use libc::{c_uint, c_void, c_char}; + + type DWORD = u32; + type HMODULE = *mut u8; + type BOOL = i32; + type LPCWSTR = *const u16; + type LPCSTR = *const i8; + + extern "system" { + fn SetThreadErrorMode(dwNewMode: DWORD, + lpOldMode: *mut DWORD) -> c_uint; + fn LoadLibraryW(name: LPCWSTR) -> HMODULE; + fn GetModuleHandleExW(dwFlags: DWORD, + name: LPCWSTR, + handle: *mut HMODULE) -> BOOL; + fn GetProcAddress(handle: HMODULE, + name: LPCSTR) -> *mut c_void; + fn FreeLibrary(handle: HMODULE) -> BOOL; + } + + pub fn open(filename: Option<&OsStr>) -> Result<*mut u8, String> { + // disable "dll load failed" error dialog. + let prev_error_mode = unsafe { + // SEM_FAILCRITICALERRORS 0x01 + let new_error_mode = 1; + let mut prev_error_mode = 0; + let result = SetThreadErrorMode(new_error_mode, + &mut prev_error_mode); + if result == 0 { + return Err(io::Error::last_os_error().to_string()) + } + prev_error_mode + }; + + let result = match filename { + Some(filename) => { + let filename_str: Vec<_> = + filename.encode_wide().chain(Some(0)).collect(); + let result = unsafe { + LoadLibraryW(filename_str.as_ptr()) + }; + ptr_result(result) + } + None => { + let mut handle = ptr::null_mut(); + let succeeded = unsafe { + GetModuleHandleExW(0 as DWORD, ptr::null(), &mut handle) + }; + if succeeded == 0 { + Err(io::Error::last_os_error().to_string()) + } else { + Ok(handle as *mut u8) + } + } + }; + + unsafe { + SetThreadErrorMode(prev_error_mode, ptr::null_mut()); + } + + result + } + + pub unsafe fn symbol(handle: *mut u8, + symbol: *const c_char) + -> Result<*mut u8, String> { + let ptr = GetProcAddress(handle as HMODULE, symbol) as *mut u8; + ptr_result(ptr) + } + + pub unsafe fn close(handle: *mut u8) { + FreeLibrary(handle as HMODULE); + } + + fn ptr_result(ptr: *mut T) -> Result<*mut T, String> { + if ptr.is_null() { + Err(io::Error::last_os_error().to_string()) + } else { + Ok(ptr) + } + } +} diff --git a/src/librustc_back/lib.rs b/src/librustc_back/lib.rs index 746d3ba07d601..3dc577b3c647a 100644 --- a/src/librustc_back/lib.rs +++ b/src/librustc_back/lib.rs @@ -28,26 +28,50 @@ #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] #![feature(box_syntax)] +#![feature(const_fn)] #![feature(libc)] #![feature(rand)] #![feature(rustc_private)] #![feature(staged_api)] -#![feature(step_by)] -#![cfg_attr(test, feature(test, rand))] +#![cfg_attr(test, feature(rand))] extern crate syntax; extern crate libc; extern crate serialize; -extern crate rustc_llvm; -extern crate rustc_front; #[macro_use] extern crate log; -pub mod abi; +extern crate serialize as rustc_serialize; // used by deriving + pub mod tempdir; -pub mod rpath; -pub mod sha2; -pub mod svh; pub mod target; pub mod slice; +pub mod dynamic_lib; + +use serialize::json::{Json, ToJson}; + +#[derive(Clone, Copy, Debug, PartialEq, Hash, RustcEncodable, RustcDecodable)] +pub enum PanicStrategy { + Unwind, + Abort, +} + +impl PanicStrategy { + pub fn desc(&self) -> &str { + match *self { + PanicStrategy::Unwind => "unwind", + PanicStrategy::Abort => "abort", + } + } +} + +impl ToJson for PanicStrategy { + fn to_json(&self) -> Json { + match *self { + PanicStrategy::Abort => "abort".to_json(), + PanicStrategy::Unwind => "unwind".to_json(), + } + } +} diff --git a/src/librustc_back/sha2.rs b/src/librustc_back/sha2.rs deleted file mode 100644 index 840f9abce9363..0000000000000 --- a/src/librustc_back/sha2.rs +++ /dev/null @@ -1,682 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! This module implements only the Sha256 function since that is all that is needed for internal -//! use. This implementation is not intended for external use or for any use where security is -//! important. - -use serialize::hex::ToHex; - -/// Write a u32 into a vector, which must be 4 bytes long. The value is written in big-endian -/// format. -fn write_u32_be(dst: &mut[u8], input: u32) { - dst[0] = (input >> 24) as u8; - dst[1] = (input >> 16) as u8; - dst[2] = (input >> 8) as u8; - dst[3] = input as u8; -} - -/// Read the value of a vector of bytes as a u32 value in big-endian format. -fn read_u32_be(input: &[u8]) -> u32 { - return - (input[0] as u32) << 24 | - (input[1] as u32) << 16 | - (input[2] as u32) << 8 | - (input[3] as u32); -} - -/// Read a vector of bytes into a vector of u32s. The values are read in big-endian format. -fn read_u32v_be(dst: &mut[u32], input: &[u8]) { - assert!(dst.len() * 4 == input.len()); - let mut pos = 0; - for chunk in input.chunks(4) { - dst[pos] = read_u32_be(chunk); - pos += 1; - } -} - -trait ToBits { - /// Convert the value in bytes to the number of bits, a tuple where the 1st item is the - /// high-order value and the 2nd item is the low order value. - fn to_bits(self) -> (Self, Self); -} - -impl ToBits for u64 { - fn to_bits(self) -> (u64, u64) { - return (self >> 61, self << 3); - } -} - -/// Adds the specified number of bytes to the bit count. panic!() if this would cause numeric -/// overflow. -fn add_bytes_to_bits(bits: u64, bytes: u64) -> u64 { - let (new_high_bits, new_low_bits) = bytes.to_bits(); - - if new_high_bits > 0 { - panic!("numeric overflow occurred.") - } - - match bits.checked_add(new_low_bits) { - Some(x) => return x, - None => panic!("numeric overflow occurred.") - } -} - -/// A FixedBuffer, likes its name implies, is a fixed size buffer. When the buffer becomes full, it -/// must be processed. The input() method takes care of processing and then clearing the buffer -/// automatically. However, other methods do not and require the caller to process the buffer. Any -/// method that modifies the buffer directory or provides the caller with bytes that can be modified -/// results in those bytes being marked as used by the buffer. -trait FixedBuffer { - /// Input a vector of bytes. If the buffer becomes full, process it with the provided - /// function and then clear the buffer. - fn input(&mut self, input: &[u8], func: F) where - F: FnMut(&[u8]); - - /// Reset the buffer. - fn reset(&mut self); - - /// Zero the buffer up until the specified index. The buffer position currently must not be - /// greater than that index. - fn zero_until(&mut self, idx: usize); - - /// Get a slice of the buffer of the specified size. There must be at least that many bytes - /// remaining in the buffer. - fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8]; - - /// Get the current buffer. The buffer must already be full. This clears the buffer as well. - fn full_buffer<'s>(&'s mut self) -> &'s [u8]; - - /// Get the current position of the buffer. - fn position(&self) -> usize; - - /// Get the number of bytes remaining in the buffer until it is full. - fn remaining(&self) -> usize; - - /// Get the size of the buffer - fn size(&self) -> usize; -} - -/// A FixedBuffer of 64 bytes useful for implementing Sha256 which has a 64 byte blocksize. -struct FixedBuffer64 { - buffer: [u8; 64], - buffer_idx: usize, -} - -impl FixedBuffer64 { - /// Create a new FixedBuffer64 - fn new() -> FixedBuffer64 { - return FixedBuffer64 { - buffer: [0; 64], - buffer_idx: 0 - }; - } -} - -impl FixedBuffer for FixedBuffer64 { - fn input(&mut self, input: &[u8], mut func: F) where - F: FnMut(&[u8]), - { - let mut i = 0; - - let size = self.size(); - - // If there is already data in the buffer, copy as much as we can into it and process - // the data if the buffer becomes full. - if self.buffer_idx != 0 { - let buffer_remaining = size - self.buffer_idx; - if input.len() >= buffer_remaining { - self.buffer[self.buffer_idx..size] - .clone_from_slice(&input[..buffer_remaining]); - self.buffer_idx = 0; - func(&self.buffer); - i += buffer_remaining; - } else { - self.buffer[self.buffer_idx..self.buffer_idx + input.len()] - .clone_from_slice(input); - self.buffer_idx += input.len(); - return; - } - } - - // While we have at least a full buffer size chunk's worth of data, process that data - // without copying it into the buffer - while input.len() - i >= size { - func(&input[i..i + size]); - i += size; - } - - // Copy any input data into the buffer. At this point in the method, the amount of - // data left in the input vector will be less than the buffer size and the buffer will - // be empty. - let input_remaining = input.len() - i; - self.buffer[..input_remaining].clone_from_slice(&input[i..]); - self.buffer_idx += input_remaining; - } - - fn reset(&mut self) { - self.buffer_idx = 0; - } - - fn zero_until(&mut self, idx: usize) { - assert!(idx >= self.buffer_idx); - for slot in self.buffer[self.buffer_idx..idx].iter_mut() { - *slot = 0; - } - self.buffer_idx = idx; - } - - fn next<'s>(&'s mut self, len: usize) -> &'s mut [u8] { - self.buffer_idx += len; - return &mut self.buffer[self.buffer_idx - len..self.buffer_idx]; - } - - fn full_buffer<'s>(&'s mut self) -> &'s [u8] { - assert!(self.buffer_idx == 64); - self.buffer_idx = 0; - return &self.buffer[..64]; - } - - fn position(&self) -> usize { self.buffer_idx } - - fn remaining(&self) -> usize { 64 - self.buffer_idx } - - fn size(&self) -> usize { 64 } -} - -/// The StandardPadding trait adds a method useful for Sha256 to a FixedBuffer struct. -trait StandardPadding { - /// Add padding to the buffer. The buffer must not be full when this method is called and is - /// guaranteed to have exactly rem remaining bytes when it returns. If there are not at least - /// rem bytes available, the buffer will be zero padded, processed, cleared, and then filled - /// with zeros again until only rem bytes are remaining. - fn standard_padding(&mut self, rem: usize, func: F) where F: FnMut(&[u8]); -} - -impl StandardPadding for T { - fn standard_padding(&mut self, rem: usize, mut func: F) where F: FnMut(&[u8]) { - let size = self.size(); - - self.next(1)[0] = 128; - - if self.remaining() < rem { - self.zero_until(size); - func(self.full_buffer()); - } - - self.zero_until(size - rem); - } -} - -/// The Digest trait specifies an interface common to digest functions, such as SHA-1 and the SHA-2 -/// family of digest functions. -pub trait Digest { - /// Provide message data. - /// - /// # Arguments - /// - /// * input - A vector of message data - fn input(&mut self, input: &[u8]); - - /// Retrieve the digest result. This method may be called multiple times. - /// - /// # Arguments - /// - /// * out - the vector to hold the result. Must be large enough to contain output_bits(). - fn result(&mut self, out: &mut [u8]); - - /// Reset the digest. This method must be called after result() and before supplying more - /// data. - fn reset(&mut self); - - /// Get the output size in bits. - fn output_bits(&self) -> usize; - - /// Convenience function that feeds a string into a digest. - /// - /// # Arguments - /// - /// * `input` The string to feed into the digest - fn input_str(&mut self, input: &str) { - self.input(input.as_bytes()); - } - - /// Convenience function that retrieves the result of a digest as a - /// newly allocated vec of bytes. - fn result_bytes(&mut self) -> Vec { - let mut buf = vec![0; (self.output_bits()+7)/8]; - self.result(&mut buf); - buf - } - - /// Convenience function that retrieves the result of a digest as a - /// String in hexadecimal format. - fn result_str(&mut self) -> String { - self.result_bytes().to_hex().to_string() - } -} - -// A structure that represents that state of a digest computation for the SHA-2 512 family of digest -// functions -struct Engine256State { - h0: u32, - h1: u32, - h2: u32, - h3: u32, - h4: u32, - h5: u32, - h6: u32, - h7: u32, -} - -impl Engine256State { - fn new(h: &[u32; 8]) -> Engine256State { - return Engine256State { - h0: h[0], - h1: h[1], - h2: h[2], - h3: h[3], - h4: h[4], - h5: h[5], - h6: h[6], - h7: h[7] - }; - } - - fn reset(&mut self, h: &[u32; 8]) { - self.h0 = h[0]; - self.h1 = h[1]; - self.h2 = h[2]; - self.h3 = h[3]; - self.h4 = h[4]; - self.h5 = h[5]; - self.h6 = h[6]; - self.h7 = h[7]; - } - - fn process_block(&mut self, data: &[u8]) { - fn ch(x: u32, y: u32, z: u32) -> u32 { - ((x & y) ^ ((!x) & z)) - } - - fn maj(x: u32, y: u32, z: u32) -> u32 { - ((x & y) ^ (x & z) ^ (y & z)) - } - - fn sum0(x: u32) -> u32 { - ((x >> 2) | (x << 30)) ^ ((x >> 13) | (x << 19)) ^ ((x >> 22) | (x << 10)) - } - - fn sum1(x: u32) -> u32 { - ((x >> 6) | (x << 26)) ^ ((x >> 11) | (x << 21)) ^ ((x >> 25) | (x << 7)) - } - - fn sigma0(x: u32) -> u32 { - ((x >> 7) | (x << 25)) ^ ((x >> 18) | (x << 14)) ^ (x >> 3) - } - - fn sigma1(x: u32) -> u32 { - ((x >> 17) | (x << 15)) ^ ((x >> 19) | (x << 13)) ^ (x >> 10) - } - - let mut a = self.h0; - let mut b = self.h1; - let mut c = self.h2; - let mut d = self.h3; - let mut e = self.h4; - let mut f = self.h5; - let mut g = self.h6; - let mut h = self.h7; - - let mut w = [0; 64]; - - // Sha-512 and Sha-256 use basically the same calculations which are implemented - // by these macros. Inlining the calculations seems to result in better generated code. - macro_rules! schedule_round { ($t:expr) => ( - w[$t] = sigma1(w[$t - 2]).wrapping_add(w[$t - 7]) - .wrapping_add(sigma0(w[$t - 15])).wrapping_add(w[$t - 16]); - ) - } - - macro_rules! sha2_round { - ($A:ident, $B:ident, $C:ident, $D:ident, - $E:ident, $F:ident, $G:ident, $H:ident, $K:ident, $t:expr) => ( - { - $H = $H.wrapping_add(sum1($E)).wrapping_add(ch($E, $F, $G)) - .wrapping_add($K[$t]).wrapping_add(w[$t]); - $D = $D.wrapping_add($H); - $H = $H.wrapping_add(sum0($A)).wrapping_add(maj($A, $B, $C)); - } - ) - } - - read_u32v_be(&mut w[0..16], data); - - // Putting the message schedule inside the same loop as the round calculations allows for - // the compiler to generate better code. - for t in (0..48).step_by(8) { - schedule_round!(t + 16); - schedule_round!(t + 17); - schedule_round!(t + 18); - schedule_round!(t + 19); - schedule_round!(t + 20); - schedule_round!(t + 21); - schedule_round!(t + 22); - schedule_round!(t + 23); - - sha2_round!(a, b, c, d, e, f, g, h, K32, t); - sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1); - sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2); - sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3); - sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4); - sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5); - sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6); - sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7); - } - - for t in (48..64).step_by(8) { - sha2_round!(a, b, c, d, e, f, g, h, K32, t); - sha2_round!(h, a, b, c, d, e, f, g, K32, t + 1); - sha2_round!(g, h, a, b, c, d, e, f, K32, t + 2); - sha2_round!(f, g, h, a, b, c, d, e, K32, t + 3); - sha2_round!(e, f, g, h, a, b, c, d, K32, t + 4); - sha2_round!(d, e, f, g, h, a, b, c, K32, t + 5); - sha2_round!(c, d, e, f, g, h, a, b, K32, t + 6); - sha2_round!(b, c, d, e, f, g, h, a, K32, t + 7); - } - - self.h0 = self.h0.wrapping_add(a); - self.h1 = self.h1.wrapping_add(b); - self.h2 = self.h2.wrapping_add(c); - self.h3 = self.h3.wrapping_add(d); - self.h4 = self.h4.wrapping_add(e); - self.h5 = self.h5.wrapping_add(f); - self.h6 = self.h6.wrapping_add(g); - self.h7 = self.h7.wrapping_add(h); - } -} - -static K32: [u32; 64] = [ - 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, - 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, - 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, - 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, - 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, - 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, - 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, - 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, - 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, - 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, - 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, - 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, - 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, - 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, - 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, - 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 -]; - -// A structure that keeps track of the state of the Sha-256 operation and contains the logic -// necessary to perform the final calculations. -struct Engine256 { - length_bits: u64, - buffer: FixedBuffer64, - state: Engine256State, - finished: bool, -} - -impl Engine256 { - fn new(h: &[u32; 8]) -> Engine256 { - return Engine256 { - length_bits: 0, - buffer: FixedBuffer64::new(), - state: Engine256State::new(h), - finished: false - } - } - - fn reset(&mut self, h: &[u32; 8]) { - self.length_bits = 0; - self.buffer.reset(); - self.state.reset(h); - self.finished = false; - } - - fn input(&mut self, input: &[u8]) { - assert!(!self.finished); - // Assumes that input.len() can be converted to u64 without overflow - self.length_bits = add_bytes_to_bits(self.length_bits, input.len() as u64); - let self_state = &mut self.state; - self.buffer.input(input, |input: &[u8]| { self_state.process_block(input) }); - } - - fn finish(&mut self) { - if self.finished { - return; - } - - let self_state = &mut self.state; - self.buffer.standard_padding(8, |input: &[u8]| { self_state.process_block(input) }); - write_u32_be(self.buffer.next(4), (self.length_bits >> 32) as u32 ); - write_u32_be(self.buffer.next(4), self.length_bits as u32); - self_state.process_block(self.buffer.full_buffer()); - - self.finished = true; - } -} - -/// The SHA-256 hash algorithm -pub struct Sha256 { - engine: Engine256 -} - -impl Sha256 { - /// Construct a new instance of a SHA-256 digest. - /// Do not – under any circumstances – use this where timing attacks might be possible! - pub fn new() -> Sha256 { - Sha256 { - engine: Engine256::new(&H256) - } - } -} - -impl Digest for Sha256 { - fn input(&mut self, d: &[u8]) { - self.engine.input(d); - } - - fn result(&mut self, out: &mut [u8]) { - self.engine.finish(); - - write_u32_be(&mut out[0..4], self.engine.state.h0); - write_u32_be(&mut out[4..8], self.engine.state.h1); - write_u32_be(&mut out[8..12], self.engine.state.h2); - write_u32_be(&mut out[12..16], self.engine.state.h3); - write_u32_be(&mut out[16..20], self.engine.state.h4); - write_u32_be(&mut out[20..24], self.engine.state.h5); - write_u32_be(&mut out[24..28], self.engine.state.h6); - write_u32_be(&mut out[28..32], self.engine.state.h7); - } - - fn reset(&mut self) { - self.engine.reset(&H256); - } - - fn output_bits(&self) -> usize { 256 } -} - -static H256: [u32; 8] = [ - 0x6a09e667, - 0xbb67ae85, - 0x3c6ef372, - 0xa54ff53a, - 0x510e527f, - 0x9b05688c, - 0x1f83d9ab, - 0x5be0cd19 -]; - -#[cfg(test)] -mod tests { - #![allow(deprecated)] - extern crate rand; - - use self::rand::Rng; - use self::rand::isaac::IsaacRng; - use serialize::hex::FromHex; - use std::u64; - use super::{Digest, Sha256, FixedBuffer}; - - // A normal addition - no overflow occurs - #[test] - fn test_add_bytes_to_bits_ok() { - assert!(super::add_bytes_to_bits(100, 10) == 180); - } - - // A simple failure case - adding 1 to the max value - #[test] - #[should_panic] - fn test_add_bytes_to_bits_overflow() { - super::add_bytes_to_bits(u64::MAX, 1); - } - - struct Test { - input: String, - output_str: String, - } - - fn test_hash(sh: &mut D, tests: &[Test]) { - // Test that it works when accepting the message all at once - for t in tests { - sh.reset(); - sh.input_str(&t.input); - let out_str = sh.result_str(); - assert!(out_str == t.output_str); - } - - // Test that it works when accepting the message in pieces - for t in tests { - sh.reset(); - let len = t.input.len(); - let mut left = len; - while left > 0 { - let take = (left + 1) / 2; - sh.input_str(&t.input[len - left..take + len - left]); - left = left - take; - } - let out_str = sh.result_str(); - assert!(out_str == t.output_str); - } - } - - #[test] - fn test_sha256() { - // Examples from wikipedia - let wikipedia_tests = vec!( - Test { - input: "".to_string(), - output_str: "e3b0c44298fc1c149afb\ - f4c8996fb92427ae41e4649b934ca495991b7852b855".to_string() - }, - Test { - input: "The quick brown fox jumps over the lazy \ - dog".to_string(), - output_str: "d7a8fbb307d7809469ca\ - 9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592".to_string() - }, - Test { - input: "The quick brown fox jumps over the lazy \ - dog.".to_string(), - output_str: "ef537f25c895bfa78252\ - 6529a9b63d97aa631564d5d789c2b765448c8635fb6c".to_string() - }); - - let tests = wikipedia_tests; - - let mut sh: Box<_> = box Sha256::new(); - - test_hash(&mut *sh, &tests); - } - - /// Feed 1,000,000 'a's into the digest with varying input sizes and check that the result is - /// correct. - fn test_digest_1million_random(digest: &mut D, blocksize: usize, expected: &str) { - let total_size = 1000000; - let buffer = vec![b'a'; blocksize * 2]; - let mut rng = IsaacRng::new_unseeded(); - let mut count = 0; - - digest.reset(); - - while count < total_size { - let next: usize = rng.gen_range(0, 2 * blocksize + 1); - let remaining = total_size - count; - let size = if next > remaining { remaining } else { next }; - digest.input(&buffer[..size]); - count += size; - } - - let result_str = digest.result_str(); - let result_bytes = digest.result_bytes(); - - assert_eq!(expected, result_str); - - let expected_vec: Vec = expected.from_hex() - .unwrap() - .into_iter() - .collect(); - assert_eq!(expected_vec, result_bytes); - } - - #[test] - fn test_1million_random_sha256() { - let mut sh = Sha256::new(); - test_digest_1million_random( - &mut sh, - 64, - "cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0"); - } -} - -#[cfg(test)] -mod bench { - extern crate test; - use self::test::Bencher; - use super::{Sha256, FixedBuffer, Digest}; - - #[bench] - pub fn sha256_10(b: &mut Bencher) { - let mut sh = Sha256::new(); - let bytes = [1; 10]; - b.iter(|| { - sh.input(&bytes); - }); - b.bytes = bytes.len() as u64; - } - - #[bench] - pub fn sha256_1k(b: &mut Bencher) { - let mut sh = Sha256::new(); - let bytes = [1; 1024]; - b.iter(|| { - sh.input(&bytes); - }); - b.bytes = bytes.len() as u64; - } - - #[bench] - pub fn sha256_64k(b: &mut Bencher) { - let mut sh = Sha256::new(); - let bytes = [1; 65536]; - b.iter(|| { - sh.input(&bytes); - }); - b.bytes = bytes.len() as u64; - } -} diff --git a/src/librustc_back/svh.rs b/src/librustc_back/svh.rs deleted file mode 100644 index 2532882d0127d..0000000000000 --- a/src/librustc_back/svh.rs +++ /dev/null @@ -1,441 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Calculation and management of a Strict Version Hash for crates -//! -//! # Today's ABI problem -//! -//! In today's implementation of rustc, it is incredibly difficult to achieve -//! forward binary compatibility without resorting to C-like interfaces. Within -//! rust code itself, abi details such as symbol names suffer from a variety of -//! unrelated factors to code changing such as the "def id drift" problem. This -//! ends up yielding confusing error messages about metadata mismatches and -//! such. -//! -//! The core of this problem is when an upstream dependency changes and -//! downstream dependents are not recompiled. This causes compile errors because -//! the upstream crate's metadata has changed but the downstream crates are -//! still referencing the older crate's metadata. -//! -//! This problem exists for many reasons, the primary of which is that rust does -//! not currently support forwards ABI compatibility (in place upgrades of a -//! crate). -//! -//! # SVH and how it alleviates the problem -//! -//! With all of this knowledge on hand, this module contains the implementation -//! of a notion of a "Strict Version Hash" for a crate. This is essentially a -//! hash of all contents of a crate which can somehow be exposed to downstream -//! crates. -//! -//! This hash is currently calculated by just hashing the AST, but this is -//! obviously wrong (doc changes should not result in an incompatible ABI). -//! Implementation-wise, this is required at this moment in time. -//! -//! By encoding this strict version hash into all crate's metadata, stale crates -//! can be detected immediately and error'd about by rustc itself. -//! -//! # Relevant links -//! -//! Original issue: https://github.com/rust-lang/rust/issues/10207 - -use std::fmt; -use std::hash::{Hash, SipHasher, Hasher}; -use rustc_front::hir; -use rustc_front::intravisit as visit; - -#[derive(Clone, PartialEq, Debug)] -pub struct Svh { - hash: String, -} - -impl Svh { - pub fn new(hash: &str) -> Svh { - assert!(hash.len() == 16); - Svh { hash: hash.to_string() } - } - - pub fn as_str<'a>(&'a self) -> &'a str { - &self.hash - } - - pub fn calculate(metadata: &Vec, krate: &hir::Crate) -> Svh { - // FIXME (#14132): This is better than it used to be, but it still not - // ideal. We now attempt to hash only the relevant portions of the - // Crate AST as well as the top-level crate attributes. (However, - // the hashing of the crate attributes should be double-checked - // to ensure it is not incorporating implementation artifacts into - // the hash that are not otherwise visible.) - - // FIXME: this should use SHA1, not SipHash. SipHash is not built to - // avoid collisions. - let mut state = SipHasher::new(); - - for data in metadata { - data.hash(&mut state); - } - - { - let mut visit = svh_visitor::make(&mut state, krate); - visit::walk_crate(&mut visit, krate); - } - - // FIXME (#14132): This hash is still sensitive to e.g. the - // spans of the crate Attributes and their underlying - // MetaItems; we should make ContentHashable impl for those - // types and then use hash_content. But, since all crate - // attributes should appear near beginning of the file, it is - // not such a big deal to be sensitive to their spans for now. - // - // We hash only the MetaItems instead of the entire Attribute - // to avoid hashing the AttrId - for attr in &krate.attrs { - attr.node.value.hash(&mut state); - } - - let hash = state.finish(); - return Svh { - hash: (0..64).step_by(4).map(|i| hex(hash >> i)).collect() - }; - - fn hex(b: u64) -> char { - let b = (b & 0xf) as u8; - let b = match b { - 0 ... 9 => '0' as u8 + b, - _ => 'a' as u8 + b - 10, - }; - b as char - } - } -} - -impl fmt::Display for Svh { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.pad(self.as_str()) - } -} - -// FIXME (#14132): Even this SVH computation still has implementation -// artifacts: namely, the order of item declaration will affect the -// hash computation, but for many kinds of items the order of -// declaration should be irrelevant to the ABI. - -mod svh_visitor { - pub use self::SawExprComponent::*; - pub use self::SawStmtComponent::*; - use self::SawAbiComponent::*; - use syntax::ast::{self, Name, NodeId}; - use syntax::codemap::Span; - use syntax::parse::token; - use rustc_front::intravisit as visit; - use rustc_front::intravisit::{Visitor, FnKind}; - use rustc_front::hir::*; - use rustc_front::hir; - - use std::hash::{Hash, SipHasher}; - - pub struct StrictVersionHashVisitor<'a> { - pub krate: &'a Crate, - pub st: &'a mut SipHasher, - } - - pub fn make<'a>(st: &'a mut SipHasher, krate: &'a Crate) -> StrictVersionHashVisitor<'a> { - StrictVersionHashVisitor { st: st, krate: krate } - } - - // To off-load the bulk of the hash-computation on #[derive(Hash)], - // we define a set of enums corresponding to the content that our - // crate visitor will encounter as it traverses the ast. - // - // The important invariant is that all of the Saw*Component enums - // do not carry any Spans, Names, or Idents. - // - // Not carrying any Names/Idents is the important fix for problem - // noted on PR #13948: using the ident.name as the basis for a - // hash leads to unstable SVH, because ident.name is just an index - // into intern table (i.e. essentially a random address), not - // computed from the name content. - // - // With the below enums, the SVH computation is not sensitive to - // artifacts of how rustc was invoked nor of how the source code - // was laid out. (Or at least it is *less* sensitive.) - - // This enum represents the different potential bits of code the - // visitor could encounter that could affect the ABI for the crate, - // and assigns each a distinct tag to feed into the hash computation. - #[derive(Hash)] - enum SawAbiComponent<'a> { - - // FIXME (#14132): should we include (some function of) - // ident.ctxt as well? - SawIdent(token::InternedString), - SawStructDef(token::InternedString), - - SawLifetime(token::InternedString), - SawLifetimeDef(token::InternedString), - - SawMod, - SawForeignItem, - SawItem, - SawDecl, - SawTy, - SawGenerics, - SawFn, - SawTraitItem, - SawImplItem, - SawStructField, - SawVariant, - SawExplicitSelf, - SawPath, - SawBlock, - SawPat, - SawLocal, - SawArm, - SawExpr(SawExprComponent<'a>), - SawStmt(SawStmtComponent), - } - - /// SawExprComponent carries all of the information that we want - /// to include in the hash that *won't* be covered by the - /// subsequent recursive traversal of the expression's - /// substructure by the visitor. - /// - /// We know every Expr_ variant is covered by a variant because - /// `fn saw_expr` maps each to some case below. Ensuring that - /// each variant carries an appropriate payload has to be verified - /// by hand. - /// - /// (However, getting that *exactly* right is not so important - /// because the SVH is just a developer convenience; there is no - /// guarantee of collision-freedom, hash collisions are just - /// (hopefully) unlikely.) - #[derive(Hash)] - pub enum SawExprComponent<'a> { - - SawExprLoop(Option), - SawExprField(token::InternedString), - SawExprTupField(usize), - SawExprBreak(Option), - SawExprAgain(Option), - - SawExprBox, - SawExprVec, - SawExprCall, - SawExprMethodCall, - SawExprTup, - SawExprBinary(hir::BinOp_), - SawExprUnary(hir::UnOp), - SawExprLit(ast::Lit_), - SawExprCast, - SawExprType, - SawExprIf, - SawExprWhile, - SawExprMatch, - SawExprClosure, - SawExprBlock, - SawExprAssign, - SawExprAssignOp(hir::BinOp_), - SawExprIndex, - SawExprRange, - SawExprPath(Option), - SawExprAddrOf(hir::Mutability), - SawExprRet, - SawExprInlineAsm(&'a hir::InlineAsm), - SawExprStruct, - SawExprRepeat, - } - - fn saw_expr<'a>(node: &'a Expr_) -> SawExprComponent<'a> { - match *node { - ExprBox(..) => SawExprBox, - ExprVec(..) => SawExprVec, - ExprCall(..) => SawExprCall, - ExprMethodCall(..) => SawExprMethodCall, - ExprTup(..) => SawExprTup, - ExprBinary(op, _, _) => SawExprBinary(op.node), - ExprUnary(op, _) => SawExprUnary(op), - ExprLit(ref lit) => SawExprLit(lit.node.clone()), - ExprCast(..) => SawExprCast, - ExprType(..) => SawExprType, - ExprIf(..) => SawExprIf, - ExprWhile(..) => SawExprWhile, - ExprLoop(_, id) => SawExprLoop(id.map(|id| id.name.as_str())), - ExprMatch(..) => SawExprMatch, - ExprClosure(..) => SawExprClosure, - ExprBlock(..) => SawExprBlock, - ExprAssign(..) => SawExprAssign, - ExprAssignOp(op, _, _) => SawExprAssignOp(op.node), - ExprField(_, name) => SawExprField(name.node.as_str()), - ExprTupField(_, id) => SawExprTupField(id.node), - ExprIndex(..) => SawExprIndex, - ExprRange(..) => SawExprRange, - ExprPath(ref qself, _) => SawExprPath(qself.as_ref().map(|q| q.position)), - ExprAddrOf(m, _) => SawExprAddrOf(m), - ExprBreak(id) => SawExprBreak(id.map(|id| id.node.name.as_str())), - ExprAgain(id) => SawExprAgain(id.map(|id| id.node.name.as_str())), - ExprRet(..) => SawExprRet, - ExprInlineAsm(ref asm) => SawExprInlineAsm(asm), - ExprStruct(..) => SawExprStruct, - ExprRepeat(..) => SawExprRepeat, - } - } - - /// SawStmtComponent is analogous to SawExprComponent, but for statements. - #[derive(Hash)] - pub enum SawStmtComponent { - SawStmtDecl, - SawStmtExpr, - SawStmtSemi, - } - - fn saw_stmt(node: &Stmt_) -> SawStmtComponent { - match *node { - StmtDecl(..) => SawStmtDecl, - StmtExpr(..) => SawStmtExpr, - StmtSemi(..) => SawStmtSemi, - } - } - - impl<'a> Visitor<'a> for StrictVersionHashVisitor<'a> { - fn visit_nested_item(&mut self, item: ItemId) { - self.visit_item(self.krate.item(item.id)) - } - - fn visit_variant_data(&mut self, s: &'a VariantData, name: Name, - g: &'a Generics, _: NodeId, _: Span) { - SawStructDef(name.as_str()).hash(self.st); - visit::walk_generics(self, g); - visit::walk_struct_def(self, s) - } - - fn visit_variant(&mut self, v: &'a Variant, g: &'a Generics, item_id: NodeId) { - SawVariant.hash(self.st); - // walk_variant does not call walk_generics, so do it here. - visit::walk_generics(self, g); - visit::walk_variant(self, v, g, item_id) - } - - // All of the remaining methods just record (in the hash - // SipHasher) that the visitor saw that particular variant - // (with its payload), and continue walking as the default - // visitor would. - // - // Some of the implementations have some notes as to how one - // might try to make their SVH computation less discerning - // (e.g. by incorporating reachability analysis). But - // currently all of their implementations are uniform and - // uninteresting. - // - // (If you edit a method such that it deviates from the - // pattern, please move that method up above this comment.) - - fn visit_name(&mut self, _: Span, name: Name) { - SawIdent(name.as_str()).hash(self.st); - } - - fn visit_lifetime(&mut self, l: &'a Lifetime) { - SawLifetime(l.name.as_str()).hash(self.st); - } - - fn visit_lifetime_def(&mut self, l: &'a LifetimeDef) { - SawLifetimeDef(l.lifetime.name.as_str()).hash(self.st); - } - - // We do recursively walk the bodies of functions/methods - // (rather than omitting their bodies from the hash) since - // monomorphization and cross-crate inlining generally implies - // that a change to a crate body will require downstream - // crates to be recompiled. - fn visit_expr(&mut self, ex: &'a Expr) { - SawExpr(saw_expr(&ex.node)).hash(self.st); visit::walk_expr(self, ex) - } - - fn visit_stmt(&mut self, s: &'a Stmt) { - SawStmt(saw_stmt(&s.node)).hash(self.st); visit::walk_stmt(self, s) - } - - fn visit_foreign_item(&mut self, i: &'a ForeignItem) { - // FIXME (#14132) ideally we would incorporate privacy (or - // perhaps reachability) somewhere here, so foreign items - // that do not leak into downstream crates would not be - // part of the ABI. - SawForeignItem.hash(self.st); visit::walk_foreign_item(self, i) - } - - fn visit_item(&mut self, i: &'a Item) { - // FIXME (#14132) ideally would incorporate reachability - // analysis somewhere here, so items that never leak into - // downstream crates (e.g. via monomorphisation or - // inlining) would not be part of the ABI. - SawItem.hash(self.st); visit::walk_item(self, i) - } - - fn visit_mod(&mut self, m: &'a Mod, _s: Span, _n: NodeId) { - SawMod.hash(self.st); visit::walk_mod(self, m) - } - - fn visit_decl(&mut self, d: &'a Decl) { - SawDecl.hash(self.st); visit::walk_decl(self, d) - } - - fn visit_ty(&mut self, t: &'a Ty) { - SawTy.hash(self.st); visit::walk_ty(self, t) - } - - fn visit_generics(&mut self, g: &'a Generics) { - SawGenerics.hash(self.st); visit::walk_generics(self, g) - } - - fn visit_fn(&mut self, fk: FnKind<'a>, fd: &'a FnDecl, - b: &'a Block, s: Span, _: NodeId) { - SawFn.hash(self.st); visit::walk_fn(self, fk, fd, b, s) - } - - fn visit_trait_item(&mut self, ti: &'a TraitItem) { - SawTraitItem.hash(self.st); visit::walk_trait_item(self, ti) - } - - fn visit_impl_item(&mut self, ii: &'a ImplItem) { - SawImplItem.hash(self.st); visit::walk_impl_item(self, ii) - } - - fn visit_struct_field(&mut self, s: &'a StructField) { - SawStructField.hash(self.st); visit::walk_struct_field(self, s) - } - - fn visit_explicit_self(&mut self, es: &'a ExplicitSelf) { - SawExplicitSelf.hash(self.st); visit::walk_explicit_self(self, es) - } - - fn visit_path(&mut self, path: &'a Path, _: ast::NodeId) { - SawPath.hash(self.st); visit::walk_path(self, path) - } - - fn visit_path_list_item(&mut self, prefix: &'a Path, item: &'a PathListItem) { - SawPath.hash(self.st); visit::walk_path_list_item(self, prefix, item) - } - - fn visit_block(&mut self, b: &'a Block) { - SawBlock.hash(self.st); visit::walk_block(self, b) - } - - fn visit_pat(&mut self, p: &'a Pat) { - SawPat.hash(self.st); visit::walk_pat(self, p) - } - - fn visit_local(&mut self, l: &'a Local) { - SawLocal.hash(self.st); visit::walk_local(self, l) - } - - fn visit_arm(&mut self, a: &'a Arm) { - SawArm.hash(self.st); visit::walk_arm(self, a) - } - } -} diff --git a/src/librustc_back/target/aarch64_apple_ios.rs b/src/librustc_back/target/aarch64_apple_ios.rs index e1242560e62c7..5ef79359140f7 100644 --- a/src/librustc_back/target/aarch64_apple_ios.rs +++ b/src/librustc_back/target/aarch64_apple_ios.rs @@ -8,14 +8,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::{Target, TargetOptions}; +use target::{Target, TargetOptions, TargetResult}; use super::apple_ios_base::{opts, Arch}; -pub fn target() -> Target { - Target { +pub fn target() -> TargetResult { + let base = opts(Arch::Arm64)?; + Ok(Target { llvm_target: "arm64-apple-ios".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".to_string(), arch: "aarch64".to_string(), target_os: "ios".to_string(), target_env: "".to_string(), @@ -23,7 +25,9 @@ pub fn target() -> Target { options: TargetOptions { features: "+neon,+fp-armv8,+cyclone".to_string(), eliminate_frame_pointer: false, - .. opts(Arch::Arm64) + max_atomic_width: Some(128), + abi_blacklist: super::arm_base::abi_blacklist(), + .. base }, - } + }) } diff --git a/src/librustc_back/target/aarch64_linux_android.rs b/src/librustc_back/target/aarch64_linux_android.rs index c6901a4cc4270..140195c780b9c 100644 --- a/src/librustc_back/target/aarch64_linux_android.rs +++ b/src/librustc_back/target/aarch64_linux_android.rs @@ -8,17 +8,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetOptions, TargetResult}; -pub fn target() -> Target { - Target { +pub fn target() -> TargetResult { + let mut base = super::android_base::opts(); + base.max_atomic_width = Some(128); + // As documented in http://developer.android.com/ndk/guides/cpu-features.html + // the neon (ASIMD) and FP must exist on all android aarch64 targets. + base.features = "+neon,+fp-armv8".to_string(); + Ok(Target { llvm_target: "aarch64-linux-android".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(), arch: "aarch64".to_string(), target_os: "android".to_string(), target_env: "".to_string(), target_vendor: "unknown".to_string(), - options: super::android_base::opts(), - } + options: TargetOptions { + abi_blacklist: super::arm_base::abi_blacklist(), + .. base + }, + }) } diff --git a/src/librustc_back/target/aarch64_unknown_fuchsia.rs b/src/librustc_back/target/aarch64_unknown_fuchsia.rs new file mode 100644 index 0000000000000..6ba1732e67f79 --- /dev/null +++ b/src/librustc_back/target/aarch64_unknown_fuchsia.rs @@ -0,0 +1,31 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::fuchsia_base::opts(); + base.max_atomic_width = Some(128); + + Ok(Target { + llvm_target: "aarch64-unknown-fuchsia".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "64".to_string(), + data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(), + arch: "aarch64".to_string(), + target_os: "fuchsia".to_string(), + target_env: "".to_string(), + target_vendor: "unknown".to_string(), + options: TargetOptions { + abi_blacklist: super::arm_base::abi_blacklist(), + .. base + }, + }) +} diff --git a/src/librustc_back/target/aarch64_unknown_linux_gnu.rs b/src/librustc_back/target/aarch64_unknown_linux_gnu.rs index 51abab6609a86..5f6335d405f5e 100644 --- a/src/librustc_back/target/aarch64_unknown_linux_gnu.rs +++ b/src/librustc_back/target/aarch64_unknown_linux_gnu.rs @@ -8,18 +8,27 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetOptions, TargetResult}; -pub fn target() -> Target { - let base = super::linux_base::opts(); - Target { +pub fn target() -> TargetResult { + let mut base = super::linux_base::opts(); + base.max_atomic_width = Some(128); + + // see #36994 + base.exe_allocation_crate = "alloc_system".to_string(); + + Ok(Target { llvm_target: "aarch64-unknown-linux-gnu".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), target_env: "gnu".to_string(), + data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(), arch: "aarch64".to_string(), target_os: "linux".to_string(), target_vendor: "unknown".to_string(), - options: base, - } + options: TargetOptions { + abi_blacklist: super::arm_base::abi_blacklist(), + .. base + }, + }) } diff --git a/src/librustc_back/target/apple_base.rs b/src/librustc_back/target/apple_base.rs index ffcb6f971ae25..70c7ea99e13d1 100644 --- a/src/librustc_back/target/apple_base.rs +++ b/src/librustc_back/target/apple_base.rs @@ -33,9 +33,8 @@ pub fn opts() -> TargetOptions { }).unwrap_or((10, 7)); TargetOptions { - // OSX has -dead_strip, which doesn't rely on ffunction_sections + // OSX has -dead_strip, which doesn't rely on function_sections function_sections: false, - linker: "cc".to_string(), dynamic_linking: true, executables: true, is_like_osx: true, diff --git a/src/librustc_back/target/apple_ios_base.rs b/src/librustc_back/target/apple_ios_base.rs index d182fd9605640..17492b8bdcb64 100644 --- a/src/librustc_back/target/apple_ios_base.rs +++ b/src/librustc_back/target/apple_ios_base.rs @@ -36,7 +36,7 @@ impl Arch { } } -pub fn get_sdk_root(sdk_name: &str) -> String { +pub fn get_sdk_root(sdk_name: &str) -> Result { let res = Command::new("xcrun") .arg("--show-sdk-path") .arg("-sdk") @@ -55,12 +55,12 @@ pub fn get_sdk_root(sdk_name: &str) -> String { }); match res { - Ok(output) => output.trim().to_string(), - Err(e) => panic!("failed to get {} SDK path: {}", sdk_name, e) + Ok(output) => Ok(output.trim().to_string()), + Err(e) => Err(format!("failed to get {} SDK path: {}", sdk_name, e)) } } -fn pre_link_args(arch: Arch) -> Vec { +fn build_pre_link_args(arch: Arch) -> Result, String> { let sdk_name = match arch { Armv7 | Armv7s | Arm64 => "iphoneos", I386 | X86_64 => "iphonesimulator" @@ -68,8 +68,10 @@ fn pre_link_args(arch: Arch) -> Vec { let arch_name = arch.to_string(); - vec!["-arch".to_string(), arch_name.to_string(), - "-Wl,-syslibroot".to_string(), get_sdk_root(sdk_name)] + let sdk_root = get_sdk_root(sdk_name)?; + + Ok(vec!["-arch".to_string(), arch_name.to_string(), + "-Wl,-syslibroot".to_string(), sdk_root]) } fn target_cpu(arch: Arch) -> String { @@ -82,13 +84,14 @@ fn target_cpu(arch: Arch) -> String { }.to_string() } -pub fn opts(arch: Arch) -> TargetOptions { - TargetOptions { +pub fn opts(arch: Arch) -> Result { + let pre_link_args = build_pre_link_args(arch)?; + Ok(TargetOptions { cpu: target_cpu(arch), dynamic_linking: false, executables: true, - pre_link_args: pre_link_args(arch), + pre_link_args: pre_link_args, has_elf_tls: false, .. super::apple_base::opts() - } + }) } diff --git a/src/librustc_back/target/arm_base.rs b/src/librustc_back/target/arm_base.rs new file mode 100644 index 0000000000000..ad132c27cb841 --- /dev/null +++ b/src/librustc_back/target/arm_base.rs @@ -0,0 +1,16 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use syntax::abi::Abi; + +// All the calling conventions trigger an assertion(Unsupported calling convention) in llvm on arm +pub fn abi_blacklist() -> Vec { + vec![Abi::Stdcall, Abi::Fastcall, Abi::Vectorcall, Abi::Win64, Abi::SysV64] +} diff --git a/src/librustc_back/target/arm_linux_androideabi.rs b/src/librustc_back/target/arm_linux_androideabi.rs index 732f1a353a8bd..c7d2df4344cb1 100644 --- a/src/librustc_back/target/arm_linux_androideabi.rs +++ b/src/librustc_back/target/arm_linux_androideabi.rs @@ -8,20 +8,25 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetOptions, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::android_base::opts(); - base.features = "+v7".to_string(); + base.features = "+v7,+vfp3,+d16".to_string(); + base.max_atomic_width = Some(64); - Target { + Ok(Target { llvm_target: "arm-linux-androideabi".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), arch: "arm".to_string(), target_os: "android".to_string(), - target_env: "gnu".to_string(), + target_env: "".to_string(), target_vendor: "unknown".to_string(), - options: base, - } + options: TargetOptions { + abi_blacklist: super::arm_base::abi_blacklist(), + .. base + }, + }) } diff --git a/src/librustc_back/target/arm_unknown_linux_gnueabi.rs b/src/librustc_back/target/arm_unknown_linux_gnueabi.rs index 7c35b43fd4b75..77d35edfbd09c 100644 --- a/src/librustc_back/target/arm_unknown_linux_gnueabi.rs +++ b/src/librustc_back/target/arm_unknown_linux_gnueabi.rs @@ -8,22 +8,25 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::{Target, TargetOptions}; +use target::{Target, TargetOptions, TargetResult}; -pub fn target() -> Target { - let base = super::linux_base::opts(); - Target { +pub fn target() -> TargetResult { + let mut base = super::linux_base::opts(); + base.max_atomic_width = Some(64); + Ok(Target { llvm_target: "arm-unknown-linux-gnueabi".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), arch: "arm".to_string(), target_os: "linux".to_string(), - target_env: "gnueabi".to_string(), + target_env: "gnu".to_string(), target_vendor: "unknown".to_string(), options: TargetOptions { features: "+v6".to_string(), + abi_blacklist: super::arm_base::abi_blacklist(), .. base }, - } + }) } diff --git a/src/librustc_back/target/arm_unknown_linux_gnueabihf.rs b/src/librustc_back/target/arm_unknown_linux_gnueabihf.rs index a99ec45996c2e..b183412be1934 100644 --- a/src/librustc_back/target/arm_unknown_linux_gnueabihf.rs +++ b/src/librustc_back/target/arm_unknown_linux_gnueabihf.rs @@ -8,22 +8,25 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::{Target, TargetOptions}; +use target::{Target, TargetOptions, TargetResult}; -pub fn target() -> Target { - let base = super::linux_base::opts(); - Target { +pub fn target() -> TargetResult { + let mut base = super::linux_base::opts(); + base.max_atomic_width = Some(64); + Ok(Target { llvm_target: "arm-unknown-linux-gnueabihf".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), arch: "arm".to_string(), target_os: "linux".to_string(), - target_env: "gnueabihf".to_string(), + target_env: "gnu".to_string(), target_vendor: "unknown".to_string(), options: TargetOptions { features: "+v6,+vfp2".to_string(), + abi_blacklist: super::arm_base::abi_blacklist(), .. base } - } + }) } diff --git a/src/librustc_back/target/arm_unknown_linux_musleabi.rs b/src/librustc_back/target/arm_unknown_linux_musleabi.rs new file mode 100644 index 0000000000000..261d4353c7a09 --- /dev/null +++ b/src/librustc_back/target/arm_unknown_linux_musleabi.rs @@ -0,0 +1,37 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::linux_musl_base::opts(); + + // Most of these settings are copied from the arm_unknown_linux_gnueabi + // target. + base.features = "+v6".to_string(); + base.max_atomic_width = Some(64); + Ok(Target { + // It's important we use "gnueabi" and not "musleabi" here. LLVM uses it + // to determine the calling convention and float ABI, and it doesn't + // support the "musleabi" value. + llvm_target: "arm-unknown-linux-gnueabi".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), + arch: "arm".to_string(), + target_os: "linux".to_string(), + target_env: "musl".to_string(), + target_vendor: "unknown".to_string(), + options: TargetOptions { + abi_blacklist: super::arm_base::abi_blacklist(), + .. base + }, + }) +} diff --git a/src/librustc_back/target/arm_unknown_linux_musleabihf.rs b/src/librustc_back/target/arm_unknown_linux_musleabihf.rs new file mode 100644 index 0000000000000..1443dcf5bad41 --- /dev/null +++ b/src/librustc_back/target/arm_unknown_linux_musleabihf.rs @@ -0,0 +1,37 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::linux_musl_base::opts(); + + // Most of these settings are copied from the arm_unknown_linux_gnueabihf + // target. + base.features = "+v6,+vfp2".to_string(); + base.max_atomic_width = Some(64); + Ok(Target { + // It's important we use "gnueabihf" and not "musleabihf" here. LLVM + // uses it to determine the calling convention and float ABI, and it + // doesn't support the "musleabihf" value. + llvm_target: "arm-unknown-linux-gnueabihf".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), + arch: "arm".to_string(), + target_os: "linux".to_string(), + target_env: "musl".to_string(), + target_vendor: "unknown".to_string(), + options: TargetOptions { + abi_blacklist: super::arm_base::abi_blacklist(), + .. base + }, + }) +} diff --git a/src/librustc_back/target/armv5te_unknown_linux_gnueabi.rs b/src/librustc_back/target/armv5te_unknown_linux_gnueabi.rs new file mode 100644 index 0000000000000..37216e20762d4 --- /dev/null +++ b/src/librustc_back/target/armv5te_unknown_linux_gnueabi.rs @@ -0,0 +1,34 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + let base = super::linux_base::opts(); + Ok(Target { + llvm_target: "armv5te-unknown-linux-gnueabi".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), + arch: "arm".to_string(), + target_os: "linux".to_string(), + target_env: "gnu".to_string(), + target_vendor: "unknown".to_string(), + + options: TargetOptions { + features: "+soft-float".to_string(), + // No atomic instructions on ARMv5 + max_atomic_width: Some(0), + abi_blacklist: super::arm_base::abi_blacklist(), + .. base + } + }) +} + diff --git a/src/librustc_back/target/armv7_apple_ios.rs b/src/librustc_back/target/armv7_apple_ios.rs index d30648002912e..9e9c443930624 100644 --- a/src/librustc_back/target/armv7_apple_ios.rs +++ b/src/librustc_back/target/armv7_apple_ios.rs @@ -8,21 +8,25 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::{Target, TargetOptions}; +use target::{Target, TargetOptions, TargetResult}; use super::apple_ios_base::{opts, Arch}; -pub fn target() -> Target { - Target { +pub fn target() -> TargetResult { + let base = opts(Arch::Armv7)?; + Ok(Target { llvm_target: "armv7-apple-ios".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + data_layout: "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".to_string(), arch: "arm".to_string(), target_os: "ios".to_string(), target_env: "".to_string(), target_vendor: "apple".to_string(), options: TargetOptions { features: "+v7,+vfp3,+neon".to_string(), - .. opts(Arch::Armv7) + max_atomic_width: Some(64), + abi_blacklist: super::arm_base::abi_blacklist(), + .. base } - } + }) } diff --git a/src/librustc_back/target/armv7_linux_androideabi.rs b/src/librustc_back/target/armv7_linux_androideabi.rs new file mode 100644 index 0000000000000..42f0deaa3fbff --- /dev/null +++ b/src/librustc_back/target/armv7_linux_androideabi.rs @@ -0,0 +1,32 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::android_base::opts(); + base.features = "+v7,+thumb2,+vfp3,+d16".to_string(); + base.max_atomic_width = Some(64); + + Ok(Target { + llvm_target: "armv7-none-linux-android".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), + arch: "arm".to_string(), + target_os: "android".to_string(), + target_env: "".to_string(), + target_vendor: "unknown".to_string(), + options: TargetOptions { + abi_blacklist: super::arm_base::abi_blacklist(), + .. base + }, + }) +} diff --git a/src/librustc_back/target/armv7_unknown_linux_gnueabihf.rs b/src/librustc_back/target/armv7_unknown_linux_gnueabihf.rs new file mode 100644 index 0000000000000..96ccedd5bea5c --- /dev/null +++ b/src/librustc_back/target/armv7_unknown_linux_gnueabihf.rs @@ -0,0 +1,35 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + let base = super::linux_base::opts(); + Ok(Target { + llvm_target: "armv7-unknown-linux-gnueabihf".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), + arch: "arm".to_string(), + target_os: "linux".to_string(), + target_env: "gnu".to_string(), + target_vendor: "unknown".to_string(), + + options: TargetOptions { + // Info about features at https://wiki.debian.org/ArmHardFloatPort + features: "+v7,+vfp3,+d16,+thumb2,-neon".to_string(), + cpu: "generic".to_string(), + max_atomic_width: Some(64), + abi_blacklist: super::arm_base::abi_blacklist(), + .. base + } + }) +} + diff --git a/src/librustc_back/target/armv7_unknown_linux_musleabihf.rs b/src/librustc_back/target/armv7_unknown_linux_musleabihf.rs new file mode 100644 index 0000000000000..8f66e6a4f58d4 --- /dev/null +++ b/src/librustc_back/target/armv7_unknown_linux_musleabihf.rs @@ -0,0 +1,38 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::linux_musl_base::opts(); + + // Most of these settings are copied from the armv7_unknown_linux_gnueabihf + // target. + base.features = "+v7,+vfp3,+neon".to_string(); + base.cpu = "cortex-a8".to_string(); + base.max_atomic_width = Some(64); + Ok(Target { + // It's important we use "gnueabihf" and not "musleabihf" here. LLVM + // uses it to determine the calling convention and float ABI, and LLVM + // doesn't support the "musleabihf" value. + llvm_target: "armv7-unknown-linux-gnueabihf".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), + arch: "arm".to_string(), + target_os: "linux".to_string(), + target_env: "musl".to_string(), + target_vendor: "unknown".to_string(), + options: TargetOptions { + abi_blacklist: super::arm_base::abi_blacklist(), + .. base + }, + }) +} diff --git a/src/librustc_back/target/armv7s_apple_ios.rs b/src/librustc_back/target/armv7s_apple_ios.rs index 66ec6efca0e67..6edde6e73efd3 100644 --- a/src/librustc_back/target/armv7s_apple_ios.rs +++ b/src/librustc_back/target/armv7s_apple_ios.rs @@ -8,21 +8,25 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::{Target, TargetOptions}; +use target::{Target, TargetOptions, TargetResult}; use super::apple_ios_base::{opts, Arch}; -pub fn target() -> Target { - Target { +pub fn target() -> TargetResult { + let base = opts(Arch::Armv7s)?; + Ok(Target { llvm_target: "armv7s-apple-ios".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + data_layout: "e-m:o-p:32:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".to_string(), arch: "arm".to_string(), target_os: "ios".to_string(), target_env: "".to_string(), target_vendor: "apple".to_string(), options: TargetOptions { features: "+v7,+vfp4,+neon".to_string(), - .. opts(Arch::Armv7s) + max_atomic_width: Some(64), + abi_blacklist: super::arm_base::abi_blacklist(), + .. base } - } + }) } diff --git a/src/librustc_back/target/asmjs_unknown_emscripten.rs b/src/librustc_back/target/asmjs_unknown_emscripten.rs new file mode 100644 index 0000000000000..d86a9b093272e --- /dev/null +++ b/src/librustc_back/target/asmjs_unknown_emscripten.rs @@ -0,0 +1,39 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::{Target, TargetOptions}; + +pub fn target() -> Result { + let opts = TargetOptions { + linker: "emcc".to_string(), + ar: "emar".to_string(), + + dynamic_linking: false, + executables: true, + exe_suffix: ".js".to_string(), + linker_is_gnu: true, + allow_asm: false, + obj_is_bitcode: true, + max_atomic_width: Some(32), + post_link_args: vec!["-s".to_string(), "ERROR_ON_UNDEFINED_SYMBOLS=1".to_string()], + .. Default::default() + }; + Ok(Target { + llvm_target: "asmjs-unknown-emscripten".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + target_os: "emscripten".to_string(), + target_env: "".to_string(), + target_vendor: "unknown".to_string(), + data_layout: "e-p:32:32-i64:64-v128:32:128-n32-S128".to_string(), + arch: "asmjs".to_string(), + options: opts, + }) +} diff --git a/src/librustc_back/target/bitrig_base.rs b/src/librustc_back/target/bitrig_base.rs index 2b84244cda4a2..7baf80066b274 100644 --- a/src/librustc_back/target/bitrig_base.rs +++ b/src/librustc_back/target/bitrig_base.rs @@ -13,13 +13,11 @@ use std::default::Default; pub fn opts() -> TargetOptions { TargetOptions { - linker: "cc".to_string(), dynamic_linking: true, executables: true, linker_is_gnu: true, has_rpath: true, position_independent_executables: true, - archive_format: "gnu".to_string(), exe_allocation_crate: "alloc_system".to_string(), .. Default::default() diff --git a/src/librustc_back/target/dragonfly_base.rs b/src/librustc_back/target/dragonfly_base.rs index b78fdc9f59ba1..7555181a15cf2 100644 --- a/src/librustc_back/target/dragonfly_base.rs +++ b/src/librustc_back/target/dragonfly_base.rs @@ -13,20 +13,21 @@ use std::default::Default; pub fn opts() -> TargetOptions { TargetOptions { - linker: "cc".to_string(), dynamic_linking: true, executables: true, linker_is_gnu: true, has_rpath: true, - pre_link_args: vec!( + pre_link_args: vec![ // GNU-style linkers will use this to omit linking to libraries // which don't actually fulfill any relocations, but only for // libraries which follow this flag. Thus, use it before // specifying libraries to link to. "-Wl,--as-needed".to_string(), - ), + + // Always enable NX protection when it is available + "-Wl,-z,noexecstack".to_string(), + ], position_independent_executables: true, - archive_format: "gnu".to_string(), exe_allocation_crate: super::maybe_jemalloc(), .. Default::default() } diff --git a/src/librustc_back/target/freebsd_base.rs b/src/librustc_back/target/freebsd_base.rs index e955f8c302bd6..7555181a15cf2 100644 --- a/src/librustc_back/target/freebsd_base.rs +++ b/src/librustc_back/target/freebsd_base.rs @@ -13,13 +13,22 @@ use std::default::Default; pub fn opts() -> TargetOptions { TargetOptions { - linker: "cc".to_string(), dynamic_linking: true, executables: true, + linker_is_gnu: true, has_rpath: true, - archive_format: "gnu".to_string(), - exe_allocation_crate: super::maybe_jemalloc(), + pre_link_args: vec![ + // GNU-style linkers will use this to omit linking to libraries + // which don't actually fulfill any relocations, but only for + // libraries which follow this flag. Thus, use it before + // specifying libraries to link to. + "-Wl,--as-needed".to_string(), + // Always enable NX protection when it is available + "-Wl,-z,noexecstack".to_string(), + ], + position_independent_executables: true, + exe_allocation_crate: super::maybe_jemalloc(), .. Default::default() } } diff --git a/src/librustc_back/target/fuchsia_base.rs b/src/librustc_back/target/fuchsia_base.rs new file mode 100644 index 0000000000000..69546684cb70b --- /dev/null +++ b/src/librustc_back/target/fuchsia_base.rs @@ -0,0 +1,39 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::TargetOptions; +use std::default::Default; + +pub fn opts() -> TargetOptions { + TargetOptions { + dynamic_linking: true, + executables: true, + linker_is_gnu: true, + has_rpath: true, + pre_link_args: vec![ + // We want to be able to strip as much executable code as possible + // from the linker command line, and this flag indicates to the + // linker that it can avoid linking in dynamic libraries that don't + // actually satisfy any symbols up to that point (as with many other + // resolutions the linker does). This option only applies to all + // following libraries so we're sure to pass it as one of the first + // arguments. + // FIXME: figure out whether these linker args are desirable + //"-Wl,--as-needed".to_string(), + + // Always enable NX protection when it is available + //"-Wl,-z,noexecstack".to_string(), + ], + position_independent_executables: true, + exe_allocation_crate: "alloc_system".to_string(), + has_elf_tls: true, + .. Default::default() + } +} diff --git a/src/librustc_back/target/haiku_base.rs b/src/librustc_back/target/haiku_base.rs new file mode 100644 index 0000000000000..5e319ba1838a0 --- /dev/null +++ b/src/librustc_back/target/haiku_base.rs @@ -0,0 +1,23 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::TargetOptions; +use std::default::Default; + +pub fn opts() -> TargetOptions { + TargetOptions { + linker: "cc".to_string(), + dynamic_linking: true, + executables: true, + has_rpath: true, + linker_is_gnu: true, + .. Default::default() + } +} diff --git a/src/librustc_back/target/i386_apple_ios.rs b/src/librustc_back/target/i386_apple_ios.rs index 52b5901192c65..319ada4f8e17c 100644 --- a/src/librustc_back/target/i386_apple_ios.rs +++ b/src/librustc_back/target/i386_apple_ios.rs @@ -8,18 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetOptions, TargetResult}; use super::apple_ios_base::{opts, Arch}; -pub fn target() -> Target { - Target { +pub fn target() -> TargetResult { + let base = opts(Arch::I386)?; + Ok(Target { llvm_target: "i386-apple-ios".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + data_layout: "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128".to_string(), arch: "x86".to_string(), target_os: "ios".to_string(), target_env: "".to_string(), target_vendor: "apple".to_string(), - options: opts(Arch::I386) - } + options: TargetOptions { + max_atomic_width: Some(64), + .. base + } + }) } diff --git a/src/librustc_back/target/i586_pc_windows_msvc.rs b/src/librustc_back/target/i586_pc_windows_msvc.rs new file mode 100644 index 0000000000000..9b88cde598937 --- /dev/null +++ b/src/librustc_back/target/i586_pc_windows_msvc.rs @@ -0,0 +1,18 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::TargetResult; + +pub fn target() -> TargetResult { + let mut base = super::i686_pc_windows_msvc::target()?; + base.options.cpu = "pentium".to_string(); + base.llvm_target = "i586-pc-windows-msvc".to_string(); + Ok(base) +} diff --git a/src/librustc_back/target/i586_unknown_linux_gnu.rs b/src/librustc_back/target/i586_unknown_linux_gnu.rs new file mode 100644 index 0000000000000..40fb4a67acdf1 --- /dev/null +++ b/src/librustc_back/target/i586_unknown_linux_gnu.rs @@ -0,0 +1,18 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::TargetResult; + +pub fn target() -> TargetResult { + let mut base = super::i686_unknown_linux_gnu::target()?; + base.options.cpu = "pentium".to_string(); + base.llvm_target = "i586-unknown-linux-gnu".to_string(); + Ok(base) +} diff --git a/src/librustc_back/target/i686_apple_darwin.rs b/src/librustc_back/target/i686_apple_darwin.rs index 98f4654ecab41..d3b09d9a0f112 100644 --- a/src/librustc_back/target/i686_apple_darwin.rs +++ b/src/librustc_back/target/i686_apple_darwin.rs @@ -8,21 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::apple_base::opts(); base.cpu = "yonah".to_string(); + base.max_atomic_width = Some(64); base.pre_link_args.push("-m32".to_string()); - Target { + Ok(Target { llvm_target: "i686-apple-darwin".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + data_layout: "e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128".to_string(), arch: "x86".to_string(), target_os: "macos".to_string(), target_env: "".to_string(), target_vendor: "apple".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/i686_linux_android.rs b/src/librustc_back/target/i686_linux_android.rs index f548fdad3cbed..a2c007d496960 100644 --- a/src/librustc_back/target/i686_linux_android.rs +++ b/src/librustc_back/target/i686_linux_android.rs @@ -8,20 +8,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::android_base::opts(); - base.cpu = "pentium4".to_string(); - Target { + base.max_atomic_width = Some(64); + + // http://developer.android.com/ndk/guides/abis.html#x86 + base.cpu = "pentiumpro".to_string(); + base.features = "+mmx,+sse,+sse2,+sse3,+ssse3".to_string(); + + Ok(Target { llvm_target: "i686-linux-android".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(), arch: "x86".to_string(), target_os: "android".to_string(), - target_env: "gnu".to_string(), + target_env: "".to_string(), target_vendor: "unknown".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/i686_pc_windows_gnu.rs b/src/librustc_back/target/i686_pc_windows_gnu.rs index fa12bbd89323c..0c2c5433e6c41 100644 --- a/src/librustc_back/target/i686_pc_windows_gnu.rs +++ b/src/librustc_back/target/i686_pc_windows_gnu.rs @@ -8,24 +8,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::windows_base::opts(); base.cpu = "pentium4".to_string(); + base.max_atomic_width = Some(64); // Mark all dynamic libraries and executables as compatible with the larger 4GiB address // space available to x86 Windows binaries on x86_64. base.pre_link_args.push("-Wl,--large-address-aware".to_string()); - Target { + Ok(Target { llvm_target: "i686-pc-windows-gnu".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + data_layout: "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32".to_string(), arch: "x86".to_string(), target_os: "windows".to_string(), target_env: "gnu".to_string(), target_vendor: "pc".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/i686_pc_windows_msvc.rs b/src/librustc_back/target/i686_pc_windows_msvc.rs index 96b2d37ab2088..2290d2057f130 100644 --- a/src/librustc_back/target/i686_pc_windows_msvc.rs +++ b/src/librustc_back/target/i686_pc_windows_msvc.rs @@ -8,20 +8,31 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::windows_msvc_base::opts(); base.cpu = "pentium4".to_string(); + base.max_atomic_width = Some(64); - Target { + // Mark all dynamic libraries and executables as compatible with the larger 4GiB address + // space available to x86 Windows binaries on x86_64. + base.pre_link_args.push("/LARGEADDRESSAWARE".to_string()); + + // Ensure the linker will only produce an image if it can also produce a table of + // the image's safe exception handlers. + // https://msdn.microsoft.com/en-us/library/9a89h429.aspx + base.pre_link_args.push("/SAFESEH".to_string()); + + Ok(Target { llvm_target: "i686-pc-windows-msvc".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + data_layout: "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32".to_string(), arch: "x86".to_string(), target_os: "windows".to_string(), target_env: "msvc".to_string(), target_vendor: "pc".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/i686_unknown_dragonfly.rs b/src/librustc_back/target/i686_unknown_dragonfly.rs index 32a15b9f2d4d1..d8f8431e66e7f 100644 --- a/src/librustc_back/target/i686_unknown_dragonfly.rs +++ b/src/librustc_back/target/i686_unknown_dragonfly.rs @@ -8,21 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::dragonfly_base::opts(); base.cpu = "pentium4".to_string(); + base.max_atomic_width = Some(64); base.pre_link_args.push("-m32".to_string()); - Target { + Ok(Target { llvm_target: "i686-unknown-dragonfly".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(), arch: "x86".to_string(), target_os: "dragonfly".to_string(), target_env: "".to_string(), target_vendor: "unknown".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/i686_unknown_freebsd.rs b/src/librustc_back/target/i686_unknown_freebsd.rs index 812ba11cd796b..ddbc74f25c9cd 100644 --- a/src/librustc_back/target/i686_unknown_freebsd.rs +++ b/src/librustc_back/target/i686_unknown_freebsd.rs @@ -8,21 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::freebsd_base::opts(); base.cpu = "pentium4".to_string(); + base.max_atomic_width = Some(64); base.pre_link_args.push("-m32".to_string()); - Target { + Ok(Target { llvm_target: "i686-unknown-freebsd".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(), arch: "x86".to_string(), target_os: "freebsd".to_string(), target_env: "".to_string(), target_vendor: "unknown".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/i686_unknown_haiku.rs b/src/librustc_back/target/i686_unknown_haiku.rs new file mode 100644 index 0000000000000..9078206c9e069 --- /dev/null +++ b/src/librustc_back/target/i686_unknown_haiku.rs @@ -0,0 +1,30 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::haiku_base::opts(); + base.cpu = "pentium4".to_string(); + base.max_atomic_width = Some(64); + base.pre_link_args.push("-m32".to_string()); + + Ok(Target { + llvm_target: "i686-unknown-haiku".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(), + arch: "x86".to_string(), + target_os: "haiku".to_string(), + target_env: "".to_string(), + target_vendor: "unknown".to_string(), + options: base, + }) +} diff --git a/src/librustc_back/target/i686_unknown_linux_gnu.rs b/src/librustc_back/target/i686_unknown_linux_gnu.rs index ac2af0c64fd6a..bf9c28b0c10e5 100644 --- a/src/librustc_back/target/i686_unknown_linux_gnu.rs +++ b/src/librustc_back/target/i686_unknown_linux_gnu.rs @@ -8,21 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::linux_base::opts(); base.cpu = "pentium4".to_string(); + base.max_atomic_width = Some(64); base.pre_link_args.push("-m32".to_string()); - Target { + Ok(Target { llvm_target: "i686-unknown-linux-gnu".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(), arch: "x86".to_string(), target_os: "linux".to_string(), target_env: "gnu".to_string(), target_vendor: "unknown".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/i686_unknown_linux_musl.rs b/src/librustc_back/target/i686_unknown_linux_musl.rs new file mode 100644 index 0000000000000..3d563fa6e5d97 --- /dev/null +++ b/src/librustc_back/target/i686_unknown_linux_musl.rs @@ -0,0 +1,31 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::linux_musl_base::opts(); + base.cpu = "pentium4".to_string(); + base.max_atomic_width = Some(64); + base.pre_link_args.push("-m32".to_string()); + base.pre_link_args.push("-Wl,-melf_i386".to_string()); + + Ok(Target { + llvm_target: "i686-unknown-linux-musl".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128".to_string(), + arch: "x86".to_string(), + target_os: "linux".to_string(), + target_env: "musl".to_string(), + target_vendor: "unknown".to_string(), + options: base, + }) +} diff --git a/src/librustc_back/target/le32_unknown_nacl.rs b/src/librustc_back/target/le32_unknown_nacl.rs index a5daebafda8f5..891e7dda14a2a 100644 --- a/src/librustc_back/target/le32_unknown_nacl.rs +++ b/src/librustc_back/target/le32_unknown_nacl.rs @@ -8,34 +8,34 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::{Target, TargetOptions}; +use super::{Target, TargetOptions, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let opts = TargetOptions { linker: "pnacl-clang".to_string(), ar: "pnacl-ar".to_string(), - pre_link_args: vec!("--pnacl-exceptions=sjlj".to_string(), + pre_link_args: vec!["--pnacl-exceptions=sjlj".to_string(), "--target=le32-unknown-nacl".to_string(), - "-Wl,--start-group".to_string()), - post_link_args: vec!("-Wl,--end-group".to_string()), + "-Wl,--start-group".to_string()], + post_link_args: vec!["-Wl,--end-group".to_string()], dynamic_linking: false, executables: true, exe_suffix: ".pexe".to_string(), - no_compiler_rt: false, linker_is_gnu: true, allow_asm: false, - archive_format: "gnu".to_string(), + max_atomic_width: Some(32), .. Default::default() }; - Target { + Ok(Target { llvm_target: "le32-unknown-nacl".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), target_os: "nacl".to_string(), target_env: "newlib".to_string(), target_vendor: "unknown".to_string(), + data_layout: "e-i64:64:64-p:32:32:32-v128:32:32".to_string(), arch: "le32".to_string(), options: opts, - } + }) } diff --git a/src/librustc_back/target/linux_base.rs b/src/librustc_back/target/linux_base.rs index 0efcf73ee8680..d1ab71e41404e 100644 --- a/src/librustc_back/target/linux_base.rs +++ b/src/librustc_back/target/linux_base.rs @@ -26,9 +26,11 @@ pub fn opts() -> TargetOptions { // following libraries so we're sure to pass it as one of the first // arguments. "-Wl,--as-needed".to_string(), + + // Always enable NX protection when it is available + "-Wl,-z,noexecstack".to_string(), ], position_independent_executables: true, - archive_format: "gnu".to_string(), exe_allocation_crate: super::maybe_jemalloc(), has_elf_tls: true, .. Default::default() diff --git a/src/librustc_back/target/linux_musl_base.rs b/src/librustc_back/target/linux_musl_base.rs new file mode 100644 index 0000000000000..18cca425a32c8 --- /dev/null +++ b/src/librustc_back/target/linux_musl_base.rs @@ -0,0 +1,73 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::TargetOptions; + +pub fn opts() -> TargetOptions { + let mut base = super::linux_base::opts(); + + // Make sure that the linker/gcc really don't pull in anything, including + // default objects, libs, etc. + base.pre_link_args.push("-nostdlib".to_string()); + + // At least when this was tested, the linker would not add the + // `GNU_EH_FRAME` program header to executables generated, which is required + // when unwinding to locate the unwinding information. I'm not sure why this + // argument is *not* necessary for normal builds, but it can't hurt! + base.pre_link_args.push("-Wl,--eh-frame-hdr".to_string()); + + // There's a whole bunch of circular dependencies when dealing with MUSL + // unfortunately. To put this in perspective libc is statically linked to + // liblibc and libunwind is statically linked to libstd: + // + // * libcore depends on `fmod` which is in libc (transitively in liblibc). + // liblibc, however, depends on libcore. + // * compiler-rt has personality symbols that depend on libunwind, but + // libunwind is in libstd which depends on compiler-rt. + // + // Recall that linkers discard libraries and object files as much as + // possible, and with all the static linking and archives flying around with + // MUSL the linker is super aggressively stripping out objects. For example + // the first case has fmod stripped from liblibc (it's in its own object + // file) so it's not there when libcore needs it. In the second example all + // the unused symbols from libunwind are stripped (each is in its own object + // file in libstd) before we end up linking compiler-rt which depends on + // those symbols. + // + // To deal with these circular dependencies we just force the compiler to + // link everything as a group, not stripping anything out until everything + // is processed. The linker will still perform a pass to strip out object + // files but it won't do so until all objects/archives have been processed. + base.pre_link_args.push("-Wl,-(".to_string()); + base.post_link_args.push("-Wl,-)".to_string()); + + // When generating a statically linked executable there's generally some + // small setup needed which is listed in these files. These are provided by + // a musl toolchain and are linked by default by the `musl-gcc` script. Note + // that `gcc` also does this by default, it just uses some different files. + // + // Each target directory for musl has these object files included in it so + // they'll be included from there. + base.pre_link_objects_exe.push("crt1.o".to_string()); + base.pre_link_objects_exe.push("crti.o".to_string()); + base.post_link_objects.push("crtn.o".to_string()); + + // MUSL support doesn't currently include dynamic linking, so there's no + // need for dylibs or rpath business. Additionally `-pie` is incompatible + // with `-static`, so we can't pass `-pie`. + base.dynamic_linking = false; + base.has_rpath = false; + base.position_independent_executables = false; + + // These targets statically link libc by default + base.crt_static_default = true; + + base +} diff --git a/src/librustc_back/target/mips64_unknown_linux_gnuabi64.rs b/src/librustc_back/target/mips64_unknown_linux_gnuabi64.rs new file mode 100644 index 0000000000000..c284840ecb4bd --- /dev/null +++ b/src/librustc_back/target/mips64_unknown_linux_gnuabi64.rs @@ -0,0 +1,35 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + Ok(Target { + llvm_target: "mips64-unknown-linux-gnuabi64".to_string(), + target_endian: "big".to_string(), + target_pointer_width: "64".to_string(), + data_layout: "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(), + arch: "mips64".to_string(), + target_os: "linux".to_string(), + target_env: "gnu".to_string(), + target_vendor: "unknown".to_string(), + options: TargetOptions { + // NOTE(mips64r2) matches C toolchain + cpu: "mips64r2".to_string(), + features: "+mips64r2".to_string(), + max_atomic_width: Some(64), + + // see #36994 + exe_allocation_crate: "alloc_system".to_string(), + + ..super::linux_base::opts() + }, + }) +} diff --git a/src/librustc_back/target/mips64el_unknown_linux_gnuabi64.rs b/src/librustc_back/target/mips64el_unknown_linux_gnuabi64.rs new file mode 100644 index 0000000000000..17895836fe87b --- /dev/null +++ b/src/librustc_back/target/mips64el_unknown_linux_gnuabi64.rs @@ -0,0 +1,35 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + Ok(Target { + llvm_target: "mips64el-unknown-linux-gnuabi64".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "64".to_string(), + data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".to_string(), + arch: "mips64".to_string(), + target_os: "linux".to_string(), + target_env: "gnu".to_string(), + target_vendor: "unknown".to_string(), + options: TargetOptions { + // NOTE(mips64r2) matches C toolchain + cpu: "mips64r2".to_string(), + features: "+mips64r2".to_string(), + max_atomic_width: Some(64), + + // see #36994 + exe_allocation_crate: "alloc_system".to_string(), + + ..super::linux_base::opts() + }, + }) +} diff --git a/src/librustc_back/target/mips_unknown_linux_gnu.rs b/src/librustc_back/target/mips_unknown_linux_gnu.rs index 357499c48ec7a..a6d8fae2536ca 100644 --- a/src/librustc_back/target/mips_unknown_linux_gnu.rs +++ b/src/librustc_back/target/mips_unknown_linux_gnu.rs @@ -8,17 +8,27 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetOptions, TargetResult}; -pub fn target() -> Target { - Target { +pub fn target() -> TargetResult { + Ok(Target { llvm_target: "mips-unknown-linux-gnu".to_string(), target_endian: "big".to_string(), target_pointer_width: "32".to_string(), + data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), arch: "mips".to_string(), target_os: "linux".to_string(), target_env: "gnu".to_string(), target_vendor: "unknown".to_string(), - options: super::linux_base::opts() - } + options: TargetOptions { + cpu: "mips32r2".to_string(), + features: "+mips32r2".to_string(), + max_atomic_width: Some(32), + + // see #36994 + exe_allocation_crate: "alloc_system".to_string(), + + ..super::linux_base::opts() + }, + }) } diff --git a/src/librustc_back/target/mips_unknown_linux_musl.rs b/src/librustc_back/target/mips_unknown_linux_musl.rs new file mode 100644 index 0000000000000..e4a6d2a55d981 --- /dev/null +++ b/src/librustc_back/target/mips_unknown_linux_musl.rs @@ -0,0 +1,34 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + Ok(Target { + llvm_target: "mips-unknown-linux-musl".to_string(), + target_endian: "big".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), + arch: "mips".to_string(), + target_os: "linux".to_string(), + target_env: "musl".to_string(), + target_vendor: "unknown".to_string(), + options: TargetOptions { + cpu: "mips32r2".to_string(), + features: "+mips32r2,+soft-float".to_string(), + max_atomic_width: Some(32), + + // see #36994 + exe_allocation_crate: "alloc_system".to_string(), + + ..super::linux_base::opts() + } + }) +} diff --git a/src/librustc_back/target/mips_unknown_linux_uclibc.rs b/src/librustc_back/target/mips_unknown_linux_uclibc.rs new file mode 100644 index 0000000000000..ccc64ea393b78 --- /dev/null +++ b/src/librustc_back/target/mips_unknown_linux_uclibc.rs @@ -0,0 +1,34 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + Ok(Target { + llvm_target: "mips-unknown-linux-uclibc".to_string(), + target_endian: "big".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), + arch: "mips".to_string(), + target_os: "linux".to_string(), + target_env: "uclibc".to_string(), + target_vendor: "unknown".to_string(), + options: TargetOptions { + cpu: "mips32r2".to_string(), + features: "+mips32r2,+soft-float".to_string(), + max_atomic_width: Some(32), + + // see #36994 + exe_allocation_crate: "alloc_system".to_string(), + + ..super::linux_base::opts() + }, + }) +} diff --git a/src/librustc_back/target/mipsel_unknown_linux_gnu.rs b/src/librustc_back/target/mipsel_unknown_linux_gnu.rs index 3d0088add0d53..9b8b1d5713f1d 100644 --- a/src/librustc_back/target/mipsel_unknown_linux_gnu.rs +++ b/src/librustc_back/target/mipsel_unknown_linux_gnu.rs @@ -8,18 +8,28 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetOptions, TargetResult}; -pub fn target() -> Target { - Target { +pub fn target() -> TargetResult { + Ok(Target { llvm_target: "mipsel-unknown-linux-gnu".to_string(), target_endian: "little".to_string(), target_pointer_width: "32".to_string(), + data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), arch: "mips".to_string(), target_os: "linux".to_string(), target_env: "gnu".to_string(), target_vendor: "unknown".to_string(), - options: super::linux_base::opts() - } + options: TargetOptions { + cpu: "mips32".to_string(), + features: "+mips32".to_string(), + max_atomic_width: Some(32), + + // see #36994 + exe_allocation_crate: "alloc_system".to_string(), + + ..super::linux_base::opts() + }, + }) } diff --git a/src/librustc_back/target/mipsel_unknown_linux_musl.rs b/src/librustc_back/target/mipsel_unknown_linux_musl.rs new file mode 100644 index 0000000000000..5693bddd0488a --- /dev/null +++ b/src/librustc_back/target/mipsel_unknown_linux_musl.rs @@ -0,0 +1,34 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + Ok(Target { + llvm_target: "mipsel-unknown-linux-musl".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), + arch: "mips".to_string(), + target_os: "linux".to_string(), + target_env: "musl".to_string(), + target_vendor: "unknown".to_string(), + options: TargetOptions { + cpu: "mips32".to_string(), + features: "+mips32,+soft-float".to_string(), + max_atomic_width: Some(32), + + // see #36994 + exe_allocation_crate: "alloc_system".to_string(), + + ..super::linux_base::opts() + } + }) +} diff --git a/src/librustc_back/target/mipsel_unknown_linux_uclibc.rs b/src/librustc_back/target/mipsel_unknown_linux_uclibc.rs new file mode 100644 index 0000000000000..3acade5a47444 --- /dev/null +++ b/src/librustc_back/target/mipsel_unknown_linux_uclibc.rs @@ -0,0 +1,35 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + Ok(Target { + llvm_target: "mipsel-unknown-linux-uclibc".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".to_string(), + arch: "mips".to_string(), + target_os: "linux".to_string(), + target_env: "uclibc".to_string(), + target_vendor: "unknown".to_string(), + + options: TargetOptions { + cpu: "mips32".to_string(), + features: "+mips32,+soft-float".to_string(), + max_atomic_width: Some(32), + + // see #36994 + exe_allocation_crate: "alloc_system".to_string(), + + ..super::linux_base::opts() + }, + }) +} diff --git a/src/librustc_back/target/mod.rs b/src/librustc_back/target/mod.rs index 51149101e0c2d..f195ccb3f4292 100644 --- a/src/librustc_back/target/mod.rs +++ b/src/librustc_back/target/mod.rs @@ -27,8 +27,7 @@ //! rustc will search each directory in the environment variable //! `RUST_TARGET_PATH` for a file named `TRIPLE.json`. The first one found will //! be loaded. If no file is found in any of those directories, a fatal error -//! will be given. `RUST_TARGET_PATH` includes `/etc/rustc` as its last entry, -//! to be searched by default. +//! will be given. //! //! Projects defining their own targets should use //! `--target=path/to/my-awesome-platform.json` instead of adding to @@ -40,32 +39,177 @@ //! this module defines the format the JSON file should take, though each //! underscore in the field names should be replaced with a hyphen (`-`) in the //! JSON file. Some fields are required in every target specification, such as -//! `data-layout`, `llvm-target`, `target-endian`, `target-pointer-width`, and -//! `arch`. In general, options passed to rustc with `-C` override the target's -//! settings, though `target-feature` and `link-args` will *add* to the list -//! specified by the target, rather than replace. +//! `llvm-target`, `target-endian`, `target-pointer-width`, `data-layout`, +//! `arch`, and `os`. In general, options passed to rustc with `-C` override +//! the target's settings, though `target-feature` and `link-args` will *add* +//! to the list specified by the target, rather than replace. -use serialize::json::Json; +use serialize::json::{Json, ToJson}; +use std::collections::BTreeMap; use std::default::Default; use std::io::prelude::*; -use syntax::abi; +use syntax::abi::{Abi, lookup as lookup_abi}; + +use PanicStrategy; mod android_base; mod apple_base; mod apple_ios_base; +mod arm_base; mod bitrig_base; mod dragonfly_base; mod freebsd_base; +mod haiku_base; mod linux_base; +mod linux_musl_base; mod openbsd_base; mod netbsd_base; +mod solaris_base; mod windows_base; mod windows_msvc_base; +mod thumb_base; +mod fuchsia_base; + +pub type TargetResult = Result; + +macro_rules! supported_targets { + ( $(($triple:expr, $module:ident),)+ ) => ( + $(mod $module;)* + + /// List of supported targets + const TARGETS: &'static [&'static str] = &[$($triple),*]; + + fn load_specific(target: &str) -> TargetResult { + match target { + $( + $triple => { + let mut t = $module::target()?; + t.options.is_builtin = true; + + // round-trip through the JSON parser to ensure at + // run-time that the parser works correctly + t = Target::from_json(t.to_json())?; + debug!("Got builtin target: {:?}", t); + Ok(t) + }, + )+ + _ => Err(format!("Unable to find target: {}", target)) + } + } + + pub fn get_targets() -> Box> { + Box::new(TARGETS.iter().filter_map(|t| -> Option { + load_specific(t) + .and(Ok(t.to_string())) + .ok() + })) + } + + #[cfg(test)] + mod test_json_encode_decode { + use serialize::json::ToJson; + use super::Target; + $(use super::$module;)* + + $( + #[test] + fn $module() { + // Grab the TargetResult struct. If we successfully retrieved + // a Target, then the test JSON encoding/decoding can run for this + // Target on this testing platform (i.e., checking the iOS targets + // only on a Mac test platform). + let _ = $module::target().map(|original| { + let as_json = original.to_json(); + let parsed = Target::from_json(as_json).unwrap(); + assert_eq!(original, parsed); + }); + } + )* + } + ) +} + +supported_targets! { + ("x86_64-unknown-linux-gnu", x86_64_unknown_linux_gnu), + ("i686-unknown-linux-gnu", i686_unknown_linux_gnu), + ("i586-unknown-linux-gnu", i586_unknown_linux_gnu), + ("mips-unknown-linux-gnu", mips_unknown_linux_gnu), + ("mips64-unknown-linux-gnuabi64", mips64_unknown_linux_gnuabi64), + ("mips64el-unknown-linux-gnuabi64", mips64el_unknown_linux_gnuabi64), + ("mipsel-unknown-linux-gnu", mipsel_unknown_linux_gnu), + ("powerpc-unknown-linux-gnu", powerpc_unknown_linux_gnu), + ("powerpc64-unknown-linux-gnu", powerpc64_unknown_linux_gnu), + ("powerpc64le-unknown-linux-gnu", powerpc64le_unknown_linux_gnu), + ("s390x-unknown-linux-gnu", s390x_unknown_linux_gnu), + ("arm-unknown-linux-gnueabi", arm_unknown_linux_gnueabi), + ("arm-unknown-linux-gnueabihf", arm_unknown_linux_gnueabihf), + ("arm-unknown-linux-musleabi", arm_unknown_linux_musleabi), + ("arm-unknown-linux-musleabihf", arm_unknown_linux_musleabihf), + ("armv5te-unknown-linux-gnueabi", armv5te_unknown_linux_gnueabi), + ("armv7-unknown-linux-gnueabihf", armv7_unknown_linux_gnueabihf), + ("armv7-unknown-linux-musleabihf", armv7_unknown_linux_musleabihf), + ("aarch64-unknown-linux-gnu", aarch64_unknown_linux_gnu), + ("x86_64-unknown-linux-musl", x86_64_unknown_linux_musl), + ("i686-unknown-linux-musl", i686_unknown_linux_musl), + ("mips-unknown-linux-musl", mips_unknown_linux_musl), + ("mipsel-unknown-linux-musl", mipsel_unknown_linux_musl), + ("mips-unknown-linux-uclibc", mips_unknown_linux_uclibc), + ("mipsel-unknown-linux-uclibc", mipsel_unknown_linux_uclibc), + + ("i686-linux-android", i686_linux_android), + ("arm-linux-androideabi", arm_linux_androideabi), + ("armv7-linux-androideabi", armv7_linux_androideabi), + ("aarch64-linux-android", aarch64_linux_android), + + ("i686-unknown-freebsd", i686_unknown_freebsd), + ("x86_64-unknown-freebsd", x86_64_unknown_freebsd), + + ("i686-unknown-dragonfly", i686_unknown_dragonfly), + ("x86_64-unknown-dragonfly", x86_64_unknown_dragonfly), + + ("x86_64-unknown-bitrig", x86_64_unknown_bitrig), + ("x86_64-unknown-openbsd", x86_64_unknown_openbsd), + ("x86_64-unknown-netbsd", x86_64_unknown_netbsd), + ("x86_64-rumprun-netbsd", x86_64_rumprun_netbsd), + + ("i686-unknown-haiku", i686_unknown_haiku), + ("x86_64-unknown-haiku", x86_64_unknown_haiku), + + ("x86_64-apple-darwin", x86_64_apple_darwin), + ("i686-apple-darwin", i686_apple_darwin), + + ("aarch64-unknown-fuchsia", aarch64_unknown_fuchsia), + ("x86_64-unknown-fuchsia", x86_64_unknown_fuchsia), + + ("i386-apple-ios", i386_apple_ios), + ("x86_64-apple-ios", x86_64_apple_ios), + ("aarch64-apple-ios", aarch64_apple_ios), + ("armv7-apple-ios", armv7_apple_ios), + ("armv7s-apple-ios", armv7s_apple_ios), + + ("x86_64-sun-solaris", x86_64_sun_solaris), + + ("x86_64-pc-windows-gnu", x86_64_pc_windows_gnu), + ("i686-pc-windows-gnu", i686_pc_windows_gnu), + + ("x86_64-pc-windows-msvc", x86_64_pc_windows_msvc), + ("i686-pc-windows-msvc", i686_pc_windows_msvc), + ("i586-pc-windows-msvc", i586_pc_windows_msvc), + + ("le32-unknown-nacl", le32_unknown_nacl), + ("asmjs-unknown-emscripten", asmjs_unknown_emscripten), + ("wasm32-unknown-emscripten", wasm32_unknown_emscripten), + + ("thumbv6m-none-eabi", thumbv6m_none_eabi), + ("thumbv7m-none-eabi", thumbv7m_none_eabi), + ("thumbv7em-none-eabi", thumbv7em_none_eabi), + ("thumbv7em-none-eabihf", thumbv7em_none_eabihf), +} /// Everything `rustc` knows about how to compile for a specific target. /// /// Every field here must be specified, and has no default value. -#[derive(Clone, Debug)] +#[derive(PartialEq, Clone, Debug)] pub struct Target { /// Target triple to pass to LLVM. pub llvm_target: String, @@ -79,9 +223,11 @@ pub struct Target { pub target_env: String, /// Vendor name to use for conditional compilation. pub target_vendor: String, - /// Architecture to use for ABI considerations. Valid options: "x86", "x86_64", "arm", - /// "aarch64", "mips", "powerpc", "powerpc64" and "powerpc64le". "mips" includes "mipsel". + /// Architecture to use for ABI considerations. Valid options: "x86", + /// "x86_64", "arm", "aarch64", "mips", "powerpc", and "powerpc64". pub arch: String, + /// [Data layout](http://llvm.org/docs/LangRef.html#data-layout) to pass to LLVM. + pub data_layout: String, /// Optional settings with defaults. pub options: TargetOptions, } @@ -90,10 +236,11 @@ pub struct Target { /// /// This has an implementation of `Default`, see each field for what the default is. In general, /// these try to take "minimal defaults" that don't assume anything about the runtime they run in. -#[derive(Clone, Debug)] +#[derive(PartialEq, Clone, Debug)] pub struct TargetOptions { - /// [Data layout](http://llvm.org/docs/LangRef.html#data-layout) to pass to LLVM. - pub data_layout: Option, + /// Whether the target is built-in or loaded from a custom target specification. + pub is_builtin: bool, + /// Linker to invoke. Defaults to "cc". pub linker: String, /// Archive utility to use when managing archives. Defaults to "ar". @@ -118,7 +265,7 @@ pub struct TargetOptions { pub post_link_args: Vec, /// Default CPU to pass to LLVM. Corresponds to `llc -mcpu=$cpu`. Defaults - /// to "default". + /// to "generic". pub cpu: String, /// Default target features to pass to LLVM. These features will *always* be /// passed, and cannot be disabled even via `-C`. Corresponds to `llc @@ -155,6 +302,10 @@ pub struct TargetOptions { /// Whether the target toolchain is like OSX's. Only useful for compiling against iOS/OS X, in /// particular running dsymutil and some other stuff like `-dead_strip`. Defaults to false. pub is_like_osx: bool, + /// Whether the target toolchain is like Solaris's. + /// Only useful for compiling against Illumos/Solaris, + /// as they have a different set of linker flags. Defaults to false. + pub is_like_solaris: bool, /// Whether the target toolchain is like Windows'. Only useful for compiling against Windows, /// only really used for figuring out how to find libraries, since Windows uses its own /// library naming convention. Defaults to false. @@ -165,11 +316,15 @@ pub struct TargetOptions { pub is_like_android: bool, /// Whether the linker support GNU-like arguments such as -O. Defaults to false. pub linker_is_gnu: bool, + /// The MinGW toolchain has a known issue that prevents it from correctly + /// handling COFF object files with more than 2^15 sections. Since each weak + /// symbol needs its own COMDAT section, weak linkage implies a large + /// number sections that easily exceeds the given limit for larger + /// codebases. Consequently we want a way to disallow weak linkage on some + /// platforms. + pub allows_weak_linkage: bool, /// Whether the linker support rpaths or not. Defaults to false. pub has_rpath: bool, - /// Whether to disable linking to compiler-rt. Defaults to false, as LLVM - /// will emit references to the functions that compiler-rt provides. - pub no_compiler_rt: bool, /// Whether to disable linking to the default libraries, typically corresponds /// to `-nodefaultlibs`. Defaults to true. pub no_default_libraries: bool, @@ -199,6 +354,28 @@ pub struct TargetOptions { /// Flag indicating whether ELF TLS (e.g. #[thread_local]) is available for /// this target. pub has_elf_tls: bool, + // This is mainly for easy compatibility with emscripten. + // If we give emcc .o files that are actually .bc files it + // will 'just work'. + pub obj_is_bitcode: bool, + + // LLVM can't produce object files for this target. Instead, we'll make LLVM + // emit assembly and then use `gcc` to turn that assembly into an object + // file + pub no_integrated_as: bool, + + /// Don't use this field; instead use the `.max_atomic_width()` method. + pub max_atomic_width: Option, + + /// Panic strategy: "unwind" or "abort" + pub panic_strategy: PanicStrategy, + + /// A blacklist of ABIs unsupported by the current target. Note that generic + /// ABIs are considered to be supported on all platforms and cannot be blacklisted. + pub abi_blacklist: Vec, + + /// Whether or not the CRT is statically linked by default. + pub crt_static_default: bool, } impl Default for TargetOptions { @@ -206,7 +383,7 @@ impl Default for TargetOptions { /// incomplete, and if used for compilation, will certainly not work. fn default() -> TargetOptions { TargetOptions { - data_layout: None, + is_builtin: false, linker: option_env!("CFG_DEFAULT_LINKER").unwrap_or("cc").to_string(), ar: option_env!("CFG_DEFAULT_AR").unwrap_or("ar").to_string(), pre_link_args: Vec::new(), @@ -227,54 +404,76 @@ impl Default for TargetOptions { staticlib_suffix: ".a".to_string(), target_family: None, is_like_osx: false, + is_like_solaris: false, is_like_windows: false, is_like_android: false, is_like_msvc: false, linker_is_gnu: false, + allows_weak_linkage: true, has_rpath: false, - no_compiler_rt: false, no_default_libraries: true, position_independent_executables: false, pre_link_objects_exe: Vec::new(), pre_link_objects_dll: Vec::new(), post_link_objects: Vec::new(), late_link_args: Vec::new(), - archive_format: String::new(), + archive_format: "gnu".to_string(), custom_unwind_resume: false, lib_allocation_crate: "alloc_system".to_string(), exe_allocation_crate: "alloc_system".to_string(), allow_asm: true, has_elf_tls: false, + obj_is_bitcode: false, + no_integrated_as: false, + max_atomic_width: None, + panic_strategy: PanicStrategy::Unwind, + abi_blacklist: vec![], + crt_static_default: false, } } } impl Target { /// Given a function ABI, turn "System" into the correct ABI for this target. - pub fn adjust_abi(&self, abi: abi::Abi) -> abi::Abi { + pub fn adjust_abi(&self, abi: Abi) -> Abi { match abi { - abi::System => { + Abi::System => { if self.options.is_like_windows && self.arch == "x86" { - abi::Stdcall + Abi::Stdcall } else { - abi::C + Abi::C } }, abi => abi } } + /// Maximum integer size in bits that this target can perform atomic + /// operations on. + pub fn max_atomic_width(&self) -> u64 { + self.options.max_atomic_width.unwrap_or(self.target_pointer_width.parse().unwrap()) + } + + pub fn is_abi_supported(&self, abi: Abi) -> bool { + abi.generic() || !self.options.abi_blacklist.contains(&abi) + } + /// Load a target descriptor from a JSON object. - pub fn from_json(obj: Json) -> Target { - // this is 1. ugly, 2. error prone. + pub fn from_json(obj: Json) -> TargetResult { + // While ugly, this code must remain this way to retain + // compatibility with existing JSON fields and the internal + // expected naming of the Target and TargetOptions structs. + // To ensure compatibility is retained, the built-in targets + // are round-tripped through this code to catch cases where + // the JSON parser is not updated to match the structs. let get_req_field = |name: &str| { match obj.find(name) .map(|s| s.as_string()) .and_then(|os| os.map(|s| s.to_string())) { - Some(val) => val, + Some(val) => Ok(val), None => { - panic!("Field {} in target specification is required", name) + return Err(format!("Field {} in target specification is required", name)) } } }; @@ -286,11 +485,12 @@ impl Target { }; let mut base = Target { - llvm_target: get_req_field("llvm-target"), - target_endian: get_req_field("target-endian"), - target_pointer_width: get_req_field("target-pointer-width"), - arch: get_req_field("arch"), - target_os: get_req_field("os"), + llvm_target: get_req_field("llvm-target")?, + target_endian: get_req_field("target-endian")?, + target_pointer_width: get_req_field("target-pointer-width")?, + data_layout: get_req_field("data-layout")?, + arch: get_req_field("arch")?, + target_os: get_req_field("os")?, target_env: get_opt_field("env", ""), target_vendor: get_opt_field("vendor", "unknown"), options: Default::default(), @@ -308,6 +508,25 @@ impl Target { .map(|o| o.as_boolean() .map(|s| base.options.$key_name = s)); } ); + ($key_name:ident, Option) => ( { + let name = (stringify!($key_name)).replace("_", "-"); + obj.find(&name[..]) + .map(|o| o.as_u64() + .map(|s| base.options.$key_name = Some(s))); + } ); + ($key_name:ident, PanicStrategy) => ( { + let name = (stringify!($key_name)).replace("_", "-"); + obj.find(&name[..]).and_then(|o| o.as_string().and_then(|s| { + match s { + "unwind" => base.options.$key_name = PanicStrategy::Unwind, + "abort" => base.options.$key_name = PanicStrategy::Abort, + _ => return Some(Err(format!("'{}' is not a valid value for \ + panic-strategy. Use 'unwind' or 'abort'.", + s))), + } + Some(Ok(())) + })).unwrap_or(Ok(())) + } ); ($key_name:ident, list) => ( { let name = (stringify!($key_name)).replace("_", "-"); obj.find(&name[..]).map(|o| o.as_array() @@ -326,37 +545,69 @@ impl Target { } ); } - key!(cpu); - key!(ar); + key!(is_builtin, bool); key!(linker); + key!(ar); + key!(pre_link_args, list); + key!(pre_link_objects_exe, list); + key!(pre_link_objects_dll, list); + key!(late_link_args, list); + key!(post_link_objects, list); + key!(post_link_args, list); + key!(cpu); + key!(features); + key!(dynamic_linking, bool); + key!(executables, bool); key!(relocation_model); key!(code_model); + key!(disable_redzone, bool); + key!(eliminate_frame_pointer, bool); + key!(function_sections, bool); key!(dll_prefix); key!(dll_suffix); key!(exe_suffix); key!(staticlib_prefix); key!(staticlib_suffix); - key!(features); - key!(data_layout, optional); - key!(dynamic_linking, bool); - key!(executables, bool); - key!(disable_redzone, bool); - key!(eliminate_frame_pointer, bool); - key!(function_sections, bool); key!(target_family, optional); key!(is_like_osx, bool); + key!(is_like_solaris, bool); key!(is_like_windows, bool); + key!(is_like_msvc, bool); + key!(is_like_android, bool); key!(linker_is_gnu, bool); + key!(allows_weak_linkage, bool); key!(has_rpath, bool); - key!(no_compiler_rt, bool); key!(no_default_libraries, bool); - key!(pre_link_args, list); - key!(post_link_args, list); + key!(position_independent_executables, bool); key!(archive_format); key!(allow_asm, bool); key!(custom_unwind_resume, bool); + key!(lib_allocation_crate); + key!(exe_allocation_crate); + key!(has_elf_tls, bool); + key!(obj_is_bitcode, bool); + key!(no_integrated_as, bool); + key!(max_atomic_width, Option); + try!(key!(panic_strategy, PanicStrategy)); + key!(crt_static_default, bool); + + if let Some(array) = obj.find("abi-blacklist").and_then(Json::as_array) { + for name in array.iter().filter_map(|abi| abi.as_string()) { + match lookup_abi(name) { + Some(abi) => { + if abi.generic() { + return Err(format!("The ABI \"{}\" is considered to be supported on \ + all targets and cannot be blacklisted", abi)) + } - base + base.options.abi_blacklist.push(abi) + } + None => return Err(format!("Unknown ABI \"{}\" in target specification", name)) + } + } + } + + Ok(base) } /// Search RUST_TARGET_PATH for a JSON file specifying the given target @@ -374,86 +625,18 @@ impl Target { use serialize::json; fn load_file(path: &Path) -> Result { - let mut f = try!(File::open(path).map_err(|e| e.to_string())); + let mut f = File::open(path).map_err(|e| e.to_string())?; let mut contents = Vec::new(); - try!(f.read_to_end(&mut contents).map_err(|e| e.to_string())); - let obj = try!(json::from_reader(&mut &contents[..]) - .map_err(|e| e.to_string())); - Ok(Target::from_json(obj)) + f.read_to_end(&mut contents).map_err(|e| e.to_string())?; + let obj = json::from_reader(&mut &contents[..]) + .map_err(|e| e.to_string())?; + Target::from_json(obj) } - // this would use a match if stringify! were allowed in pattern position - macro_rules! load_specific { - ( $($name:ident),+ ) => ( - { - $(mod $name;)* - let target = target.replace("-", "_"); - if false { } - $( - else if target == stringify!($name) { - let t = $name::target(); - debug!("Got builtin target: {:?}", t); - return Ok(t); - } - )* - else if target == "x86_64-w64-mingw32" { - let t = x86_64_pc_windows_gnu::target(); - return Ok(t); - } else if target == "i686-w64-mingw32" { - let t = i686_pc_windows_gnu::target(); - return Ok(t); - } - } - ) + if let Ok(t) = load_specific(target) { + return Ok(t) } - load_specific!( - x86_64_unknown_linux_gnu, - i686_unknown_linux_gnu, - mips_unknown_linux_gnu, - mipsel_unknown_linux_gnu, - powerpc_unknown_linux_gnu, - powerpc64_unknown_linux_gnu, - powerpc64le_unknown_linux_gnu, - arm_unknown_linux_gnueabi, - arm_unknown_linux_gnueabihf, - aarch64_unknown_linux_gnu, - x86_64_unknown_linux_musl, - - i686_linux_android, - arm_linux_androideabi, - aarch64_linux_android, - - i686_unknown_freebsd, - x86_64_unknown_freebsd, - - i686_unknown_dragonfly, - x86_64_unknown_dragonfly, - - x86_64_unknown_bitrig, - x86_64_unknown_openbsd, - x86_64_unknown_netbsd, - x86_64_rumprun_netbsd, - - x86_64_apple_darwin, - i686_apple_darwin, - - i386_apple_ios, - x86_64_apple_ios, - aarch64_apple_ios, - armv7_apple_ios, - armv7s_apple_ios, - - x86_64_pc_windows_gnu, - i686_pc_windows_gnu, - - x86_64_pc_windows_msvc, - i686_pc_windows_msvc, - - le32_unknown_nacl - ); - - let path = Path::new(target); if path.is_file() { @@ -482,10 +665,107 @@ impl Target { } } +impl ToJson for Target { + fn to_json(&self) -> Json { + let mut d = BTreeMap::new(); + let default: TargetOptions = Default::default(); + + macro_rules! target_val { + ($attr:ident) => ( { + let name = (stringify!($attr)).replace("_", "-"); + d.insert(name.to_string(), self.$attr.to_json()); + } ); + ($attr:ident, $key_name:expr) => ( { + let name = $key_name; + d.insert(name.to_string(), self.$attr.to_json()); + } ); + } + + macro_rules! target_option_val { + ($attr:ident) => ( { + let name = (stringify!($attr)).replace("_", "-"); + if default.$attr != self.options.$attr { + d.insert(name.to_string(), self.options.$attr.to_json()); + } + } ); + ($attr:ident, $key_name:expr) => ( { + let name = $key_name; + if default.$attr != self.options.$attr { + d.insert(name.to_string(), self.options.$attr.to_json()); + } + } ); + } + + target_val!(llvm_target); + target_val!(target_endian); + target_val!(target_pointer_width); + target_val!(arch); + target_val!(target_os, "os"); + target_val!(target_env, "env"); + target_val!(target_vendor, "vendor"); + target_val!(arch); + target_val!(data_layout); + + target_option_val!(is_builtin); + target_option_val!(linker); + target_option_val!(ar); + target_option_val!(pre_link_args); + target_option_val!(pre_link_objects_exe); + target_option_val!(pre_link_objects_dll); + target_option_val!(late_link_args); + target_option_val!(post_link_objects); + target_option_val!(post_link_args); + target_option_val!(cpu); + target_option_val!(features); + target_option_val!(dynamic_linking); + target_option_val!(executables); + target_option_val!(relocation_model); + target_option_val!(code_model); + target_option_val!(disable_redzone); + target_option_val!(eliminate_frame_pointer); + target_option_val!(function_sections); + target_option_val!(dll_prefix); + target_option_val!(dll_suffix); + target_option_val!(exe_suffix); + target_option_val!(staticlib_prefix); + target_option_val!(staticlib_suffix); + target_option_val!(target_family); + target_option_val!(is_like_osx); + target_option_val!(is_like_solaris); + target_option_val!(is_like_windows); + target_option_val!(is_like_msvc); + target_option_val!(is_like_android); + target_option_val!(linker_is_gnu); + target_option_val!(allows_weak_linkage); + target_option_val!(has_rpath); + target_option_val!(no_default_libraries); + target_option_val!(position_independent_executables); + target_option_val!(archive_format); + target_option_val!(allow_asm); + target_option_val!(custom_unwind_resume); + target_option_val!(lib_allocation_crate); + target_option_val!(exe_allocation_crate); + target_option_val!(has_elf_tls); + target_option_val!(obj_is_bitcode); + target_option_val!(no_integrated_as); + target_option_val!(max_atomic_width); + target_option_val!(panic_strategy); + target_option_val!(crt_static_default); + + if default.abi_blacklist != self.options.abi_blacklist { + d.insert("abi-blacklist".to_string(), self.options.abi_blacklist.iter() + .map(Abi::name).map(|name| name.to_json()) + .collect::>().to_json()); + } + + Json::Object(d) + } +} + fn maybe_jemalloc() -> String { - if cfg!(disable_jemalloc) { - "alloc_system".to_string() - } else { + if cfg!(feature = "jemalloc") { "alloc_jemalloc".to_string() + } else { + "alloc_system".to_string() } } diff --git a/src/librustc_back/target/netbsd_base.rs b/src/librustc_back/target/netbsd_base.rs index 361f71f699591..6e038a7ed56ee 100644 --- a/src/librustc_back/target/netbsd_base.rs +++ b/src/librustc_back/target/netbsd_base.rs @@ -13,20 +13,21 @@ use std::default::Default; pub fn opts() -> TargetOptions { TargetOptions { - linker: "cc".to_string(), dynamic_linking: true, executables: true, linker_is_gnu: true, has_rpath: true, - pre_link_args: vec!( + pre_link_args: vec![ // GNU-style linkers will use this to omit linking to libraries // which don't actually fulfill any relocations, but only for // libraries which follow this flag. Thus, use it before // specifying libraries to link to. "-Wl,--as-needed".to_string(), - ), + + // Always enable NX protection when it is available + "-Wl,-z,noexecstack".to_string(), + ], position_independent_executables: true, - archive_format: "gnu".to_string(), .. Default::default() } } diff --git a/src/librustc_back/target/openbsd_base.rs b/src/librustc_back/target/openbsd_base.rs index 2e4aa27cf889d..90e6631841bef 100644 --- a/src/librustc_back/target/openbsd_base.rs +++ b/src/librustc_back/target/openbsd_base.rs @@ -13,20 +13,21 @@ use std::default::Default; pub fn opts() -> TargetOptions { TargetOptions { - linker: "cc".to_string(), dynamic_linking: true, executables: true, linker_is_gnu: true, has_rpath: true, - pre_link_args: vec!( + pre_link_args: vec![ // GNU-style linkers will use this to omit linking to libraries // which don't actually fulfill any relocations, but only for // libraries which follow this flag. Thus, use it before // specifying libraries to link to. "-Wl,--as-needed".to_string(), - ), + + // Always enable NX protection when it is available + "-Wl,-z,noexecstack".to_string(), + ], position_independent_executables: true, - archive_format: "gnu".to_string(), exe_allocation_crate: "alloc_system".to_string(), .. Default::default() } diff --git a/src/librustc_back/target/powerpc64_unknown_linux_gnu.rs b/src/librustc_back/target/powerpc64_unknown_linux_gnu.rs index 83970e73b933c..909c5488dcb70 100644 --- a/src/librustc_back/target/powerpc64_unknown_linux_gnu.rs +++ b/src/librustc_back/target/powerpc64_unknown_linux_gnu.rs @@ -8,20 +8,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::linux_base::opts(); + base.cpu = "ppc64".to_string(); base.pre_link_args.push("-m64".to_string()); + base.max_atomic_width = Some(64); - Target { + // see #36994 + base.exe_allocation_crate = "alloc_system".to_string(); + + Ok(Target { llvm_target: "powerpc64-unknown-linux-gnu".to_string(), target_endian: "big".to_string(), target_pointer_width: "64".to_string(), + data_layout: "E-m:e-i64:64-n32:64".to_string(), arch: "powerpc64".to_string(), target_os: "linux".to_string(), target_env: "gnu".to_string(), target_vendor: "unknown".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/powerpc64le_unknown_linux_gnu.rs b/src/librustc_back/target/powerpc64le_unknown_linux_gnu.rs index 0f5252fdc23a5..a692346ca0ffe 100644 --- a/src/librustc_back/target/powerpc64le_unknown_linux_gnu.rs +++ b/src/librustc_back/target/powerpc64le_unknown_linux_gnu.rs @@ -8,20 +8,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::linux_base::opts(); + base.cpu = "ppc64le".to_string(); base.pre_link_args.push("-m64".to_string()); + base.max_atomic_width = Some(64); - Target { + // see #36994 + base.exe_allocation_crate = "alloc_system".to_string(); + + Ok(Target { llvm_target: "powerpc64le-unknown-linux-gnu".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), - arch: "powerpc64le".to_string(), + data_layout: "e-m:e-i64:64-n32:64".to_string(), + arch: "powerpc64".to_string(), target_os: "linux".to_string(), target_env: "gnu".to_string(), target_vendor: "unknown".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/powerpc_unknown_linux_gnu.rs b/src/librustc_back/target/powerpc_unknown_linux_gnu.rs index 6664abf5458b7..284772c43319a 100644 --- a/src/librustc_back/target/powerpc_unknown_linux_gnu.rs +++ b/src/librustc_back/target/powerpc_unknown_linux_gnu.rs @@ -8,20 +8,25 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::linux_base::opts(); base.pre_link_args.push("-m32".to_string()); + base.max_atomic_width = Some(32); - Target { + // see #36994 + base.exe_allocation_crate = "alloc_system".to_string(); + + Ok(Target { llvm_target: "powerpc-unknown-linux-gnu".to_string(), target_endian: "big".to_string(), target_pointer_width: "32".to_string(), + data_layout: "E-m:e-p:32:32-i64:64-n32".to_string(), arch: "powerpc".to_string(), target_os: "linux".to_string(), target_env: "gnu".to_string(), target_vendor: "unknown".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/s390x_unknown_linux_gnu.rs b/src/librustc_back/target/s390x_unknown_linux_gnu.rs new file mode 100644 index 0000000000000..6e2dd6cd67c93 --- /dev/null +++ b/src/librustc_back/target/s390x_unknown_linux_gnu.rs @@ -0,0 +1,34 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::linux_base::opts(); + // z10 is the oldest CPU supported by LLVM + base.cpu = "z10".to_string(); + // FIXME: The data_layout string below and the ABI implementation in + // cabi_s390x.rs are for now hard-coded to assume the no-vector ABI. + // Pass the -vector feature string to LLVM to respect this assumption. + base.features = "-vector".to_string(); + base.max_atomic_width = Some(64); + + Ok(Target { + llvm_target: "s390x-unknown-linux-gnu".to_string(), + target_endian: "big".to_string(), + target_pointer_width: "64".to_string(), + data_layout: "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64".to_string(), + arch: "s390x".to_string(), + target_os: "linux".to_string(), + target_env: "gnu".to_string(), + target_vendor: "unknown".to_string(), + options: base, + }) +} diff --git a/src/librustc_back/target/solaris_base.rs b/src/librustc_back/target/solaris_base.rs new file mode 100644 index 0000000000000..a7af0462e570f --- /dev/null +++ b/src/librustc_back/target/solaris_base.rs @@ -0,0 +1,24 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::TargetOptions; +use std::default::Default; + +pub fn opts() -> TargetOptions { + TargetOptions { + dynamic_linking: true, + executables: true, + has_rpath: true, + is_like_solaris: true, + exe_allocation_crate: super::maybe_jemalloc(), + + .. Default::default() + } +} diff --git a/src/librustc_back/target/thumb_base.rs b/src/librustc_back/target/thumb_base.rs new file mode 100644 index 0000000000000..6bb496649a858 --- /dev/null +++ b/src/librustc_back/target/thumb_base.rs @@ -0,0 +1,58 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// These 4 `thumbv*` targets cover the ARM Cortex-M family of processors which are widely used in +// microcontrollers. Namely, all these processors: +// +// - Cortex-M0 +// - Cortex-M0+ +// - Cortex-M1 +// - Cortex-M3 +// - Cortex-M4(F) +// - Cortex-M7(F) +// +// We have opted for 4 targets instead of one target per processor (e.g. `cortex-m0`, `cortex-m3`, +// etc) because the differences between some processors like the cortex-m0 and cortex-m1 are almost +// non-existent from the POV of codegen so it doesn't make sense to have separate targets for them. +// And if differences exist between two processors under the same target, rustc flags can be used to +// optimize for one processor or the other. +// +// Also, we have not chosen a single target (`arm-none-eabi`) like GCC does because this makes +// difficult to integrate Rust code and C code. Targeting the Cortex-M4 requires different gcc flags +// than the ones you would use for the Cortex-M0 and with a single target it'd be impossible to +// differentiate one processor from the other. +// +// About arm vs thumb in the name. The Cortex-M devices only support the Thumb instruction set, +// which is more compact (higher code density), and not the ARM instruction set. That's why LLVM +// triples use thumb instead of arm. We follow suit because having thumb in the name let us +// differentiate these targets from our other `arm(v7)-*-*-gnueabi(hf)` targets in the context of +// build scripts / gcc flags. + +use PanicStrategy; +use std::default::Default; +use target::TargetOptions; + +pub fn opts() -> TargetOptions { + // See rust-lang/rfcs#1645 for a discussion about these defaults + TargetOptions { + executables: true, + // In 99%+ of cases, we want to use the `arm-none-eabi-gcc` compiler (there aren't many + // options around) + linker: "arm-none-eabi-gcc".to_string(), + // Because these devices have very little resources having an unwinder is too onerous so we + // default to "abort" because the "unwind" strategy is very rare. + panic_strategy: PanicStrategy::Abort, + // Similarly, one almost always never wants to use relocatable code because of the extra + // costs it involves. + relocation_model: "static".to_string(), + abi_blacklist: super::arm_base::abi_blacklist(), + .. Default::default() + } +} diff --git a/src/librustc_back/target/thumbv6m_none_eabi.rs b/src/librustc_back/target/thumbv6m_none_eabi.rs new file mode 100644 index 0000000000000..6c22f98538459 --- /dev/null +++ b/src/librustc_back/target/thumbv6m_none_eabi.rs @@ -0,0 +1,36 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Targets the Cortex-M0, Cortex-M0+ and Cortex-M1 processors (ARMv6-M architecture) + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + Ok(Target { + llvm_target: "thumbv6m-none-eabi".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), + arch: "arm".to_string(), + target_os: "none".to_string(), + target_env: "".to_string(), + target_vendor: "".to_string(), + + options: TargetOptions { + // The ARMv6-M architecture doesn't support unaligned loads/stores so we disable them + // with +strict-align. + features: "+strict-align".to_string(), + // There are no atomic instructions available in the instruction set of the ARMv6-M + // architecture + max_atomic_width: Some(0), + .. super::thumb_base::opts() + } + }) +} diff --git a/src/librustc_back/target/thumbv7em_none_eabi.rs b/src/librustc_back/target/thumbv7em_none_eabi.rs new file mode 100644 index 0000000000000..ddad4e3624f3c --- /dev/null +++ b/src/librustc_back/target/thumbv7em_none_eabi.rs @@ -0,0 +1,40 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Targets the Cortex-M4 and Cortex-M7 processors (ARMv7E-M) +// +// This target assumes that the device doesn't have a FPU (Floating Point Unit) and lowers all the +// floating point operations to software routines (intrinsics). +// +// As such, this target uses the "soft" calling convention (ABI) where floating point values are +// passed to/from subroutines via general purpose registers (R0, R1, etc.). +// +// To opt-in to hardware accelerated floating point operations, you can use, for example, +// `-C target-feature=+vfp4` or `-C target-cpu=cortex-m4`. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + Ok(Target { + llvm_target: "thumbv7em-none-eabi".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), + arch: "arm".to_string(), + target_os: "none".to_string(), + target_env: "".to_string(), + target_vendor: "".to_string(), + + options: TargetOptions { + max_atomic_width: Some(32), + .. super::thumb_base::opts() + }, + }) +} diff --git a/src/librustc_back/target/thumbv7em_none_eabihf.rs b/src/librustc_back/target/thumbv7em_none_eabihf.rs new file mode 100644 index 0000000000000..a9fac48e8e5ac --- /dev/null +++ b/src/librustc_back/target/thumbv7em_none_eabihf.rs @@ -0,0 +1,49 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Targets the Cortex-M4F and Cortex-M7F processors (ARMv7E-M) +// +// This target assumes that the device does have a FPU (Floating Point Unit) and lowers all (single +// precision) floating point operations to hardware instructions. +// +// Additionally, this target uses the "hard" floating convention (ABI) where floating point values +// are passed to/from subroutines via FPU registers (S0, S1, D0, D1, etc.). +// +// To opt into double precision hardware support, use the `-C target-feature=-fp-only-sp` flag. + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + Ok(Target { + llvm_target: "thumbv7em-none-eabihf".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), + arch: "arm".to_string(), + target_os: "none".to_string(), + target_env: "".to_string(), + target_vendor: "".to_string(), + + options: TargetOptions { + // `+vfp4` is the lowest common denominator between the Cortex-M4 (vfp4-16) and the + // Cortex-M7 (vfp5) + // `+d16` both the Cortex-M4 and the Cortex-M7 only have 16 double-precision registers + // available + // `+fp-only-sp` The Cortex-M4 only supports single precision floating point operations + // whereas in the Cortex-M7 double precision is optional + // + // Reference: + // ARMv7-M Architecture Reference Manual - A2.5 The optional floating-point extension + features: "+vfp4,+d16,+fp-only-sp".to_string(), + max_atomic_width: Some(32), + .. super::thumb_base::opts() + } + }) +} diff --git a/src/librustc_back/target/thumbv7m_none_eabi.rs b/src/librustc_back/target/thumbv7m_none_eabi.rs new file mode 100644 index 0000000000000..ed61dd0459b4d --- /dev/null +++ b/src/librustc_back/target/thumbv7m_none_eabi.rs @@ -0,0 +1,31 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Targets the Cortex-M3 processor (ARMv7-M) + +use target::{Target, TargetOptions, TargetResult}; + +pub fn target() -> TargetResult { + Ok(Target { + llvm_target: "thumbv7m-none-eabi".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + data_layout: "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64".to_string(), + arch: "arm".to_string(), + target_os: "none".to_string(), + target_env: "".to_string(), + target_vendor: "".to_string(), + + options: TargetOptions { + max_atomic_width: Some(32), + .. super::thumb_base::opts() + }, + }) +} diff --git a/src/librustc_back/target/wasm32_unknown_emscripten.rs b/src/librustc_back/target/wasm32_unknown_emscripten.rs new file mode 100644 index 0000000000000..77ab4fcae7008 --- /dev/null +++ b/src/librustc_back/target/wasm32_unknown_emscripten.rs @@ -0,0 +1,42 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::{Target, TargetOptions}; + +pub fn target() -> Result { + let opts = TargetOptions { + linker: "emcc".to_string(), + ar: "emar".to_string(), + + dynamic_linking: false, + executables: true, + // Today emcc emits two files - a .js file to bootstrap and + // possibly interpret the wasm, and a .wasm file + exe_suffix: ".js".to_string(), + linker_is_gnu: true, + allow_asm: false, + obj_is_bitcode: true, + max_atomic_width: Some(32), + post_link_args: vec!["-s".to_string(), "BINARYEN=1".to_string(), + "-s".to_string(), "ERROR_ON_UNDEFINED_SYMBOLS=1".to_string()], + .. Default::default() + }; + Ok(Target { + llvm_target: "asmjs-unknown-emscripten".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "32".to_string(), + target_os: "emscripten".to_string(), + target_env: "".to_string(), + target_vendor: "unknown".to_string(), + data_layout: "e-p:32:32-i64:64-v128:32:128-n32-S128".to_string(), + arch: "wasm32".to_string(), + options: opts, + }) +} diff --git a/src/librustc_back/target/windows_base.rs b/src/librustc_back/target/windows_base.rs index 634a63cf0bb34..19ca0df51b9dc 100644 --- a/src/librustc_back/target/windows_base.rs +++ b/src/librustc_back/target/windows_base.rs @@ -25,8 +25,8 @@ pub fn opts() -> TargetOptions { staticlib_suffix: ".lib".to_string(), no_default_libraries: true, is_like_windows: true, - archive_format: "gnu".to_string(), - pre_link_args: vec!( + allows_weak_linkage: false, + pre_link_args: vec![ // And here, we see obscure linker flags #45. On windows, it has been // found to be necessary to have this flag to compile liblibc. // @@ -63,28 +63,27 @@ pub fn opts() -> TargetOptions { // Do not use the standard system startup files or libraries when linking "-nostdlib".to_string(), - ), - pre_link_objects_exe: vec!( + ], + pre_link_objects_exe: vec![ "crt2.o".to_string(), // mingw C runtime initialization for executables "rsbegin.o".to_string(), // Rust compiler runtime initialization, see rsbegin.rs - ), - pre_link_objects_dll: vec!( + ], + pre_link_objects_dll: vec![ "dllcrt2.o".to_string(), // mingw C runtime initialization for dlls "rsbegin.o".to_string(), - ), - late_link_args: vec!( + ], + late_link_args: vec![ "-lmingwex".to_string(), "-lmingw32".to_string(), "-lgcc".to_string(), // alas, mingw* libraries above depend on libgcc "-lmsvcrt".to_string(), "-luser32".to_string(), "-lkernel32".to_string(), - ), - post_link_objects: vec!( + ], + post_link_objects: vec![ "rsend.o".to_string() - ), + ], custom_unwind_resume: true, - exe_allocation_crate: super::maybe_jemalloc(), .. Default::default() } diff --git a/src/librustc_back/target/windows_msvc_base.rs b/src/librustc_back/target/windows_msvc_base.rs index fb88ce158e4b3..84e22e84fdb58 100644 --- a/src/librustc_back/target/windows_msvc_base.rs +++ b/src/librustc_back/target/windows_msvc_base.rs @@ -59,7 +59,6 @@ pub fn opts() -> TargetOptions { "/NOLOGO".to_string(), "/NXCOMPAT".to_string(), ], - archive_format: "gnu".to_string(), exe_allocation_crate: "alloc_system".to_string(), .. Default::default() diff --git a/src/librustc_back/target/x86_64_apple_darwin.rs b/src/librustc_back/target/x86_64_apple_darwin.rs index 3e19e1482909e..b3c1561dbcc0b 100644 --- a/src/librustc_back/target/x86_64_apple_darwin.rs +++ b/src/librustc_back/target/x86_64_apple_darwin.rs @@ -8,22 +8,24 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::apple_base::opts(); base.cpu = "core2".to_string(); + base.max_atomic_width = Some(128); // core2 support cmpxchg16b base.eliminate_frame_pointer = false; base.pre_link_args.push("-m64".to_string()); - Target { + Ok(Target { llvm_target: "x86_64-apple-darwin".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + data_layout: "e-m:o-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "macos".to_string(), target_env: "".to_string(), target_vendor: "apple".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/x86_64_apple_ios.rs b/src/librustc_back/target/x86_64_apple_ios.rs index 63234c0baee8c..7a58bb34ce7f6 100644 --- a/src/librustc_back/target/x86_64_apple_ios.rs +++ b/src/librustc_back/target/x86_64_apple_ios.rs @@ -8,18 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetOptions, TargetResult}; use super::apple_ios_base::{opts, Arch}; -pub fn target() -> Target { - Target { +pub fn target() -> TargetResult { + let base = opts(Arch::X86_64)?; + Ok(Target { llvm_target: "x86_64-apple-ios".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + data_layout: "e-m:o-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "ios".to_string(), target_env: "".to_string(), target_vendor: "apple".to_string(), - options: opts(Arch::X86_64) - } + options: TargetOptions { + max_atomic_width: Some(64), + .. base + } + }) } diff --git a/src/librustc_back/target/x86_64_pc_windows_gnu.rs b/src/librustc_back/target/x86_64_pc_windows_gnu.rs index 3e8438539156f..321585cd65eb3 100644 --- a/src/librustc_back/target/x86_64_pc_windows_gnu.rs +++ b/src/librustc_back/target/x86_64_pc_windows_gnu.rs @@ -8,21 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::windows_base::opts(); base.cpu = "x86-64".to_string(); base.pre_link_args.push("-m64".to_string()); + base.max_atomic_width = Some(64); - Target { + Ok(Target { llvm_target: "x86_64-pc-windows-gnu".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + data_layout: "e-m:w-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "windows".to_string(), target_env: "gnu".to_string(), target_vendor: "pc".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/x86_64_pc_windows_msvc.rs b/src/librustc_back/target/x86_64_pc_windows_msvc.rs index 5030a1ff4483b..ea8909d213e80 100644 --- a/src/librustc_back/target/x86_64_pc_windows_msvc.rs +++ b/src/librustc_back/target/x86_64_pc_windows_msvc.rs @@ -8,21 +8,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::windows_msvc_base::opts(); base.cpu = "x86-64".to_string(); - base.custom_unwind_resume = true; + base.max_atomic_width = Some(64); - Target { + Ok(Target { llvm_target: "x86_64-pc-windows-msvc".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + data_layout: "e-m:w-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "windows".to_string(), target_env: "msvc".to_string(), target_vendor: "pc".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/x86_64_rumprun_netbsd.rs b/src/librustc_back/target/x86_64_rumprun_netbsd.rs index d63ad53cc2bb9..3313721439696 100644 --- a/src/librustc_back/target/x86_64_rumprun_netbsd.rs +++ b/src/librustc_back/target/x86_64_rumprun_netbsd.rs @@ -8,28 +8,32 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::netbsd_base::opts(); + base.cpu = "x86-64".to_string(); base.pre_link_args.push("-m64".to_string()); base.linker = "x86_64-rumprun-netbsd-gcc".to_string(); base.ar = "x86_64-rumprun-netbsd-ar".to_string(); + base.max_atomic_width = Some(64); base.dynamic_linking = false; base.has_rpath = false; base.position_independent_executables = false; base.disable_redzone = true; base.no_default_libraries = false; + base.exe_allocation_crate = "alloc_system".to_string(); - Target { + Ok(Target { llvm_target: "x86_64-rumprun-netbsd".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "netbsd".to_string(), target_env: "".to_string(), target_vendor: "rumprun".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/x86_64_sun_solaris.rs b/src/librustc_back/target/x86_64_sun_solaris.rs new file mode 100644 index 0000000000000..8e4fd94e7bce4 --- /dev/null +++ b/src/librustc_back/target/x86_64_sun_solaris.rs @@ -0,0 +1,30 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::solaris_base::opts(); + base.pre_link_args.push("-m64".to_string()); + base.cpu = "x86-64".to_string(); + base.max_atomic_width = Some(64); + + Ok(Target { + llvm_target: "x86_64-pc-solaris".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "64".to_string(), + data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), + arch: "x86_64".to_string(), + target_os: "solaris".to_string(), + target_env: "".to_string(), + target_vendor: "sun".to_string(), + options: base, + }) +} diff --git a/src/librustc_back/target/x86_64_unknown_bitrig.rs b/src/librustc_back/target/x86_64_unknown_bitrig.rs index 04456b1b2714a..eda16c29466b5 100644 --- a/src/librustc_back/target/x86_64_unknown_bitrig.rs +++ b/src/librustc_back/target/x86_64_unknown_bitrig.rs @@ -8,20 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::bitrig_base::opts(); + base.cpu = "x86-64".to_string(); + base.max_atomic_width = Some(64); base.pre_link_args.push("-m64".to_string()); - Target { + Ok(Target { llvm_target: "x86_64-unknown-bitrig".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "bitrig".to_string(), target_env: "".to_string(), target_vendor: "unknown".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/x86_64_unknown_dragonfly.rs b/src/librustc_back/target/x86_64_unknown_dragonfly.rs index 62654176aa486..194efb8fc2322 100644 --- a/src/librustc_back/target/x86_64_unknown_dragonfly.rs +++ b/src/librustc_back/target/x86_64_unknown_dragonfly.rs @@ -8,21 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::dragonfly_base::opts(); base.cpu = "x86-64".to_string(); + base.max_atomic_width = Some(64); base.pre_link_args.push("-m64".to_string()); - Target { + Ok(Target { llvm_target: "x86_64-unknown-dragonfly".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "dragonfly".to_string(), target_env: "".to_string(), target_vendor: "unknown".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/x86_64_unknown_freebsd.rs b/src/librustc_back/target/x86_64_unknown_freebsd.rs index 888b7f58bffca..b127bee163b86 100644 --- a/src/librustc_back/target/x86_64_unknown_freebsd.rs +++ b/src/librustc_back/target/x86_64_unknown_freebsd.rs @@ -8,21 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::freebsd_base::opts(); base.cpu = "x86-64".to_string(); + base.max_atomic_width = Some(64); base.pre_link_args.push("-m64".to_string()); - Target { + Ok(Target { llvm_target: "x86_64-unknown-freebsd".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "freebsd".to_string(), target_env: "".to_string(), target_vendor: "unknown".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/x86_64_unknown_fuchsia.rs b/src/librustc_back/target/x86_64_unknown_fuchsia.rs new file mode 100644 index 0000000000000..08fe17a556ecc --- /dev/null +++ b/src/librustc_back/target/x86_64_unknown_fuchsia.rs @@ -0,0 +1,30 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::fuchsia_base::opts(); + base.cpu = "x86-64".to_string(); + base.max_atomic_width = Some(64); + base.pre_link_args.push("-m64".to_string()); + + Ok(Target { + llvm_target: "x86_64-unknown-fuchsia".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "64".to_string(), + data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), + arch: "x86_64".to_string(), + target_os: "fuchsia".to_string(), + target_env: "".to_string(), + target_vendor: "unknown".to_string(), + options: base, + }) +} diff --git a/src/librustc_back/target/x86_64_unknown_haiku.rs b/src/librustc_back/target/x86_64_unknown_haiku.rs new file mode 100644 index 0000000000000..7cf0599037c1e --- /dev/null +++ b/src/librustc_back/target/x86_64_unknown_haiku.rs @@ -0,0 +1,30 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use target::{Target, TargetResult}; + +pub fn target() -> TargetResult { + let mut base = super::haiku_base::opts(); + base.cpu = "x86-64".to_string(); + base.max_atomic_width = Some(64); + base.pre_link_args.push("-m64".to_string()); + + Ok(Target { + llvm_target: "x86_64-unknown-haiku".to_string(), + target_endian: "little".to_string(), + target_pointer_width: "64".to_string(), + data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), + arch: "x86_64".to_string(), + target_os: "haiku".to_string(), + target_env: "".to_string(), + target_vendor: "unknown".to_string(), + options: base, + }) +} diff --git a/src/librustc_back/target/x86_64_unknown_linux_gnu.rs b/src/librustc_back/target/x86_64_unknown_linux_gnu.rs index e3ccd9c4c7e7d..f95bcb556e57f 100644 --- a/src/librustc_back/target/x86_64_unknown_linux_gnu.rs +++ b/src/librustc_back/target/x86_64_unknown_linux_gnu.rs @@ -8,21 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::linux_base::opts(); base.cpu = "x86-64".to_string(); + base.max_atomic_width = Some(64); base.pre_link_args.push("-m64".to_string()); - Target { + Ok(Target { llvm_target: "x86_64-unknown-linux-gnu".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "linux".to_string(), target_env: "gnu".to_string(), target_vendor: "unknown".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/x86_64_unknown_linux_musl.rs b/src/librustc_back/target/x86_64_unknown_linux_musl.rs index dafbb924a9ca5..c3bf9dcca6ee4 100644 --- a/src/librustc_back/target/x86_64_unknown_linux_musl.rs +++ b/src/librustc_back/target/x86_64_unknown_linux_musl.rs @@ -8,75 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { - let mut base = super::linux_base::opts(); +pub fn target() -> TargetResult { + let mut base = super::linux_musl_base::opts(); base.cpu = "x86-64".to_string(); + base.max_atomic_width = Some(64); base.pre_link_args.push("-m64".to_string()); - // Make sure that the linker/gcc really don't pull in anything, including - // default objects, libs, etc. - base.pre_link_args.push("-nostdlib".to_string()); - base.pre_link_args.push("-static".to_string()); - - // At least when this was tested, the linker would not add the - // `GNU_EH_FRAME` program header to executables generated, which is required - // when unwinding to locate the unwinding information. I'm not sure why this - // argument is *not* necessary for normal builds, but it can't hurt! - base.pre_link_args.push("-Wl,--eh-frame-hdr".to_string()); - - // There's a whole bunch of circular dependencies when dealing with MUSL - // unfortunately. To put this in perspective libc is statically linked to - // liblibc and libunwind is statically linked to libstd: - // - // * libcore depends on `fmod` which is in libc (transitively in liblibc). - // liblibc, however, depends on libcore. - // * compiler-rt has personality symbols that depend on libunwind, but - // libunwind is in libstd which depends on compiler-rt. - // - // Recall that linkers discard libraries and object files as much as - // possible, and with all the static linking and archives flying around with - // MUSL the linker is super aggressively stripping out objects. For example - // the first case has fmod stripped from liblibc (it's in its own object - // file) so it's not there when libcore needs it. In the second example all - // the unused symbols from libunwind are stripped (each is in its own object - // file in libstd) before we end up linking compiler-rt which depends on - // those symbols. - // - // To deal with these circular dependencies we just force the compiler to - // link everything as a group, not stripping anything out until everything - // is processed. The linker will still perform a pass to strip out object - // files but it won't do so until all objects/archives have been processed. - base.pre_link_args.push("-Wl,-(".to_string()); - base.post_link_args.push("-Wl,-)".to_string()); - - // When generating a statically linked executable there's generally some - // small setup needed which is listed in these files. These are provided by - // a musl toolchain and are linked by default by the `musl-gcc` script. Note - // that `gcc` also does this by default, it just uses some different files. - // - // Each target directory for musl has these object files included in it so - // they'll be included from there. - base.pre_link_objects_exe.push("crt1.o".to_string()); - base.pre_link_objects_exe.push("crti.o".to_string()); - base.post_link_objects.push("crtn.o".to_string()); - - // MUSL support doesn't currently include dynamic linking, so there's no - // need for dylibs or rpath business. Additionally `-pie` is incompatible - // with `-static`, so we can't pass `-pie`. - base.dynamic_linking = false; - base.has_rpath = false; - base.position_independent_executables = false; - - Target { + Ok(Target { llvm_target: "x86_64-unknown-linux-musl".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "linux".to_string(), target_env: "musl".to_string(), target_vendor: "unknown".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/x86_64_unknown_netbsd.rs b/src/librustc_back/target/x86_64_unknown_netbsd.rs index 4101fabe73480..87a7c184644d5 100644 --- a/src/librustc_back/target/x86_64_unknown_netbsd.rs +++ b/src/librustc_back/target/x86_64_unknown_netbsd.rs @@ -8,20 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::netbsd_base::opts(); + base.cpu = "x86-64".to_string(); + base.max_atomic_width = Some(64); base.pre_link_args.push("-m64".to_string()); - Target { + Ok(Target { llvm_target: "x86_64-unknown-netbsd".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "netbsd".to_string(), target_env: "".to_string(), target_vendor: "unknown".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/target/x86_64_unknown_openbsd.rs b/src/librustc_back/target/x86_64_unknown_openbsd.rs index 07a1e137b4196..e9d645b0d38f2 100644 --- a/src/librustc_back/target/x86_64_unknown_openbsd.rs +++ b/src/librustc_back/target/x86_64_unknown_openbsd.rs @@ -8,20 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use target::Target; +use target::{Target, TargetResult}; -pub fn target() -> Target { +pub fn target() -> TargetResult { let mut base = super::openbsd_base::opts(); + base.cpu = "x86-64".to_string(); + base.max_atomic_width = Some(64); base.pre_link_args.push("-m64".to_string()); - Target { + Ok(Target { llvm_target: "x86_64-unknown-openbsd".to_string(), target_endian: "little".to_string(), target_pointer_width: "64".to_string(), + data_layout: "e-m:e-i64:64-f80:128-n8:16:32:64-S128".to_string(), arch: "x86_64".to_string(), target_os: "openbsd".to_string(), target_env: "".to_string(), target_vendor: "unknown".to_string(), options: base, - } + }) } diff --git a/src/librustc_back/tempdir.rs b/src/librustc_back/tempdir.rs index 04739c7418cd0..e3e89223f2d65 100644 --- a/src/librustc_back/tempdir.rs +++ b/src/librustc_back/tempdir.rs @@ -45,7 +45,7 @@ impl TempDir { let storage; let mut tmpdir = tmpdir; if !tmpdir.is_absolute() { - let cur_dir = try!(env::current_dir()); + let cur_dir = env::current_dir()?; storage = cur_dir.join(tmpdir); tmpdir = &storage; // return TempDir::new_in(&cur_dir.join(tmpdir), prefix); diff --git a/src/librustc_bitflags/Cargo.toml b/src/librustc_bitflags/Cargo.toml new file mode 100644 index 0000000000000..d82a72994ca6f --- /dev/null +++ b/src/librustc_bitflags/Cargo.toml @@ -0,0 +1,9 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_bitflags" +version = "0.0.0" + +[lib] +name = "rustc_bitflags" +path = "lib.rs" +doctest = false diff --git a/src/librustc_bitflags/lib.rs b/src/librustc_bitflags/lib.rs index e2a929f58e14d..e65d112430a16 100644 --- a/src/librustc_bitflags/lib.rs +++ b/src/librustc_bitflags/lib.rs @@ -15,6 +15,7 @@ #![crate_type = "rlib"] #![no_std] #![unstable(feature = "rustc_private", issue = "27812")] +#![cfg_attr(not(stage0), deny(warnings))] //! A typesafe bitmask flag generator. @@ -200,7 +201,7 @@ macro_rules! bitflags { !(*self & other).is_empty() } - /// Returns `true` all of the flags in `other` are contained within `self`. + /// Returns `true` if all of the flags in `other` are contained within `self`. #[inline] pub fn contains(&self, other: $BitFlags) -> bool { (*self & other) == other @@ -290,8 +291,9 @@ macro_rules! bitflags { #[cfg(test)] #[allow(non_upper_case_globals)] mod tests { - use std::hash::{Hasher, Hash, SipHasher}; - use std::option::Option::{Some, None}; + use std::hash::{Hash, Hasher}; + use std::collections::hash_map::DefaultHasher; + use std::option::Option::{None, Some}; bitflags! { #[doc = "> The first principle is that you must not fool yourself — and"] @@ -491,7 +493,7 @@ mod tests { } fn hash(t: &T) -> u64 { - let mut s = SipHasher::new(); + let mut s = DefaultHasher::new(); t.hash(&mut s); s.finish() } diff --git a/src/librustc_borrowck/Cargo.toml b/src/librustc_borrowck/Cargo.toml new file mode 100644 index 0000000000000..d53318f176848 --- /dev/null +++ b/src/librustc_borrowck/Cargo.toml @@ -0,0 +1,20 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_borrowck" +version = "0.0.0" + +[lib] +name = "rustc_borrowck" +path = "lib.rs" +crate-type = ["dylib"] +test = false + +[dependencies] +log = { path = "../liblog" } +syntax = { path = "../libsyntax" } +syntax_pos = { path = "../libsyntax_pos" } +graphviz = { path = "../libgraphviz" } +rustc = { path = "../librustc" } +rustc_data_structures = { path = "../librustc_data_structures" } +rustc_mir = { path = "../librustc_mir" } +rustc_errors = { path = "../librustc_errors" } diff --git a/src/librustc_borrowck/borrowck/check_loans.rs b/src/librustc_borrowck/borrowck/check_loans.rs index 5e8495ceddd97..5ed628d7dcae5 100644 --- a/src/librustc_borrowck/borrowck/check_loans.rs +++ b/src/librustc_borrowck/borrowck/check_loans.rs @@ -22,14 +22,13 @@ use borrowck::*; use borrowck::InteriorKind::{InteriorElement, InteriorField}; use rustc::middle::expr_use_visitor as euv; use rustc::middle::expr_use_visitor::MutateMode; -use rustc::middle::infer; use rustc::middle::mem_categorization as mc; use rustc::middle::mem_categorization::Categorization; use rustc::middle::region; -use rustc::middle::ty; +use rustc::ty::{self, TyCtxt}; use syntax::ast; -use syntax::codemap::Span; -use rustc_front::hir; +use syntax_pos::Span; +use rustc::hir; use std::rc::Rc; @@ -51,13 +50,13 @@ fn owned_ptr_base_path<'a, 'tcx>(loan_path: &'a LoanPath<'tcx>) -> &'a LoanPath< match loan_path.kind { LpVar(_) | LpUpvar(_) => None, LpExtend(ref lp_base, _, LpDeref(mc::Unique)) => { - match helper(&**lp_base) { + match helper(&lp_base) { v @ Some(_) => v, - None => Some(&**lp_base) + None => Some(&lp_base) } } LpDowncast(ref lp_base, _) | - LpExtend(ref lp_base, _, _) => helper(&**lp_base) + LpExtend(ref lp_base, ..) => helper(&lp_base) } } } @@ -81,7 +80,7 @@ fn owned_ptr_base_path_rc<'tcx>(loan_path: &Rc>) -> Rc helper(lp_base) + LpExtend(ref lp_base, ..) => helper(lp_base) } } } @@ -91,7 +90,7 @@ struct CheckLoanCtxt<'a, 'tcx: 'a> { dfcx_loans: &'a LoanDataFlow<'a, 'tcx>, move_data: &'a move_data::FlowedMoveData<'a, 'tcx>, all_loans: &'a [Loan<'tcx>], - param_env: &'a ty::ParameterEnvironment<'a, 'tcx>, + param_env: &'a ty::ParameterEnvironment<'tcx>, } impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { @@ -127,7 +126,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { borrow_id: ast::NodeId, borrow_span: Span, cmt: mc::cmt<'tcx>, - loan_region: ty::Region, + loan_region: &'tcx ty::Region, bk: ty::BorrowKind, loan_cause: euv::LoanCause) { @@ -136,15 +135,12 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { borrow_id, cmt, loan_region, bk, loan_cause); - match opt_loan_path(&cmt) { - Some(lp) => { - let moved_value_use_kind = match loan_cause { - euv::ClosureCapture(_) => MovedInCapture, - _ => MovedInUse, - }; - self.check_if_path_is_moved(borrow_id, borrow_span, moved_value_use_kind, &lp); - } - None => { } + if let Some(lp) = opt_loan_path(&cmt) { + let moved_value_use_kind = match loan_cause { + euv::ClosureCapture(_) => MovedInCapture, + _ => MovedInUse, + }; + self.check_if_path_is_moved(borrow_id, borrow_span, moved_value_use_kind, &lp); } self.check_for_conflicting_loans(borrow_id); @@ -159,33 +155,29 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { debug!("mutate(assignment_id={}, assignee_cmt={:?})", assignment_id, assignee_cmt); - match opt_loan_path(&assignee_cmt) { - Some(lp) => { - match mode { - MutateMode::Init | MutateMode::JustWrite => { - // In a case like `path = 1`, then path does not - // have to be *FULLY* initialized, but we still - // must be careful lest it contains derefs of - // pointers. - self.check_if_assigned_path_is_moved(assignee_cmt.id, - assignment_span, - MovedInUse, - &lp); - } - MutateMode::WriteAndRead => { - // In a case like `path += 1`, then path must be - // fully initialized, since we will read it before - // we write it. - self.check_if_path_is_moved(assignee_cmt.id, - assignment_span, - MovedInUse, - &lp); - } + if let Some(lp) = opt_loan_path(&assignee_cmt) { + match mode { + MutateMode::Init | MutateMode::JustWrite => { + // In a case like `path = 1`, then path does not + // have to be *FULLY* initialized, but we still + // must be careful lest it contains derefs of + // pointers. + self.check_if_assigned_path_is_moved(assignee_cmt.id, + assignment_span, + MovedInUse, + &lp); + } + MutateMode::WriteAndRead => { + // In a case like `path += 1`, then path must be + // fully initialized, since we will read it before + // we write it. + self.check_if_path_is_moved(assignee_cmt.id, + assignment_span, + MovedInUse, + &lp); } } - None => { } } - self.check_assignment(assignment_id, assignment_span, assignee_cmt); } @@ -198,12 +190,11 @@ pub fn check_loans<'a, 'b, 'c, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, all_loans: &[Loan<'tcx>], fn_id: ast::NodeId, decl: &hir::FnDecl, - body: &hir::Block) { + body: &hir::Expr) { debug!("check_loans(body id={})", body.id); let param_env = ty::ParameterEnvironment::for_item(bccx.tcx, fn_id); - let infcx = infer::new_infer_ctxt(bccx.tcx, &bccx.tcx.tables, Some(param_env)); - + let infcx = bccx.tcx.borrowck_fake_infer_ctxt(param_env); let mut clcx = CheckLoanCtxt { bccx: bccx, dfcx_loans: dfcx_loans, @@ -211,11 +202,7 @@ pub fn check_loans<'a, 'b, 'c, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, all_loans: all_loans, param_env: &infcx.parameter_environment }; - - { - let mut euv = euv::ExprUseVisitor::new(&mut clcx, &infcx); - euv.walk_fn(decl, body); - } + euv::ExprUseVisitor::new(&mut clcx, &infcx).walk_fn(decl, body); } #[derive(PartialEq)] @@ -231,7 +218,7 @@ fn compatible_borrow_kinds(borrow_kind1: ty::BorrowKind, } impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { - pub fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.bccx.tcx } + pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { self.bccx.tcx } pub fn each_issued_loan(&self, node: ast::NodeId, mut op: F) -> bool where F: FnMut(&Loan<'tcx>) -> bool, @@ -318,8 +305,8 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { break; } LpDowncast(ref lp_base, _) | - LpExtend(ref lp_base, _, _) => { - loan_path = &**lp_base; + LpExtend(ref lp_base, ..) => { + loan_path = &lp_base; } } @@ -442,23 +429,25 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { // borrow prevents subsequent moves, borrows, or modification of `x` until the // borrow ends - let common = new_loan.loan_path.common(&*old_loan.loan_path); - let (nl, ol, new_loan_msg, old_loan_msg) = - if new_loan.loan_path.has_fork(&*old_loan.loan_path) && common.is_some() { + let common = new_loan.loan_path.common(&old_loan.loan_path); + let (nl, ol, new_loan_msg, old_loan_msg) = { + if new_loan.loan_path.has_fork(&old_loan.loan_path) && common.is_some() { let nl = self.bccx.loan_path_to_string(&common.unwrap()); let ol = nl.clone(); - let new_loan_msg = format!(" (here through borrowing `{}`)", + let new_loan_msg = format!(" (via `{}`)", self.bccx.loan_path_to_string( - &*new_loan.loan_path)); - let old_loan_msg = format!(" (through borrowing `{}`)", + &new_loan.loan_path)); + let old_loan_msg = format!(" (via `{}`)", self.bccx.loan_path_to_string( - &*old_loan.loan_path)); + &old_loan.loan_path)); (nl, ol, new_loan_msg, old_loan_msg) } else { - (self.bccx.loan_path_to_string(&*new_loan.loan_path), - self.bccx.loan_path_to_string(&*old_loan.loan_path), - String::new(), String::new()) - }; + (self.bccx.loan_path_to_string(&new_loan.loan_path), + self.bccx.loan_path_to_string(&old_loan.loan_path), + String::new(), + String::new()) + } + }; let ol_pronoun = if new_loan.loan_path == old_loan.loan_path { "it".to_string() @@ -466,102 +455,133 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { format!("`{}`", ol) }; + // We want to assemble all the relevant locations for the error. + // + // 1. Where did the new loan occur. + // - if due to closure creation, where was the variable used in closure? + // 2. Where did old loan occur. + // 3. Where does old loan expire. + + let previous_end_span = + self.tcx().map.span(old_loan.kill_scope.node_id(&self.tcx().region_maps)) + .end_point(); + let mut err = match (new_loan.kind, old_loan.kind) { (ty::MutBorrow, ty::MutBorrow) => { - struct_span_err!(self.bccx, new_loan.span, E0499, - "cannot borrow `{}`{} as mutable \ - more than once at a time", - nl, new_loan_msg) + let mut err = struct_span_err!(self.bccx, new_loan.span, E0499, + "cannot borrow `{}`{} as mutable \ + more than once at a time", + nl, new_loan_msg); + err.span_label( + old_loan.span, + &format!("first mutable borrow occurs here{}", old_loan_msg)); + err.span_label( + new_loan.span, + &format!("second mutable borrow occurs here{}", new_loan_msg)); + err.span_label( + previous_end_span, + &format!("first borrow ends here")); + err + } + + (ty::UniqueImmBorrow, ty::UniqueImmBorrow) => { + let mut err = struct_span_err!(self.bccx, new_loan.span, E0524, + "two closures require unique access to `{}` \ + at the same time", + nl); + err.span_label( + old_loan.span, + &format!("first closure is constructed here")); + err.span_label( + new_loan.span, + &format!("second closure is constructed here")); + err.span_label( + previous_end_span, + &format!("borrow from first closure ends here")); + err } (ty::UniqueImmBorrow, _) => { - struct_span_err!(self.bccx, new_loan.span, E0500, - "closure requires unique access to `{}` \ - but {} is already borrowed{}", - nl, ol_pronoun, old_loan_msg) + let mut err = struct_span_err!(self.bccx, new_loan.span, E0500, + "closure requires unique access to `{}` \ + but {} is already borrowed{}", + nl, ol_pronoun, old_loan_msg); + err.span_label( + new_loan.span, + &format!("closure construction occurs here{}", new_loan_msg)); + err.span_label( + old_loan.span, + &format!("borrow occurs here{}", old_loan_msg)); + err.span_label( + previous_end_span, + &format!("borrow ends here")); + err } (_, ty::UniqueImmBorrow) => { - struct_span_err!(self.bccx, new_loan.span, E0501, - "cannot borrow `{}`{} as {} because \ - previous closure requires unique access", - nl, new_loan_msg, new_loan.kind.to_user_str()) + let mut err = struct_span_err!(self.bccx, new_loan.span, E0501, + "cannot borrow `{}`{} as {} because \ + previous closure requires unique access", + nl, new_loan_msg, new_loan.kind.to_user_str()); + err.span_label( + new_loan.span, + &format!("borrow occurs here{}", new_loan_msg)); + err.span_label( + old_loan.span, + &format!("closure construction occurs here{}", old_loan_msg)); + err.span_label( + previous_end_span, + &format!("borrow from closure ends here")); + err } - (_, _) => { - struct_span_err!(self.bccx, new_loan.span, E0502, - "cannot borrow `{}`{} as {} because \ - {} is also borrowed as {}{}", - nl, - new_loan_msg, + (..) => { + let mut err = struct_span_err!(self.bccx, new_loan.span, E0502, + "cannot borrow `{}`{} as {} because \ + {} is also borrowed as {}{}", + nl, + new_loan_msg, + new_loan.kind.to_user_str(), + ol_pronoun, + old_loan.kind.to_user_str(), + old_loan_msg); + err.span_label( + new_loan.span, + &format!("{} borrow occurs here{}", new_loan.kind.to_user_str(), - ol_pronoun, + new_loan_msg)); + err.span_label( + old_loan.span, + &format!("{} borrow occurs here{}", old_loan.kind.to_user_str(), - old_loan_msg) + old_loan_msg)); + err.span_label( + previous_end_span, + &format!("{} borrow ends here", + old_loan.kind.to_user_str())); + err } }; match new_loan.cause { euv::ClosureCapture(span) => { - err.span_note( + err.span_label( span, - &format!("borrow occurs due to use of `{}` in closure", - nl)); + &format!("borrow occurs due to use of `{}` in closure", nl)); } _ => { } } - let rule_summary = match old_loan.kind { - ty::MutBorrow => { - format!("the mutable borrow prevents subsequent \ - moves, borrows, or modification of `{0}` \ - until the borrow ends", - ol) - } - - ty::ImmBorrow => { - format!("the immutable borrow prevents subsequent \ - moves or mutable borrows of `{0}` \ - until the borrow ends", - ol) - } - - ty::UniqueImmBorrow => { - format!("the unique capture prevents subsequent \ - moves or borrows of `{0}` \ - until the borrow ends", - ol) - } - }; - - let borrow_summary = match old_loan.cause { - euv::ClosureCapture(_) => { - format!("previous borrow of `{}` occurs here{} due to \ - use in closure", - ol, old_loan_msg) - } - - euv::OverloadedOperator | - euv::AddrOf | - euv::AutoRef | - euv::AutoUnsafe | - euv::ClosureInvocation | - euv::ForLoop | - euv::RefBinding | - euv::MatchDiscriminant => { - format!("previous borrow of `{}` occurs here{}", - ol, old_loan_msg) + match old_loan.cause { + euv::ClosureCapture(span) => { + err.span_label( + span, + &format!("previous borrow occurs due to use of `{}` in closure", + ol)); } - }; - - err.span_note( - old_loan.span, - &format!("{}; {}", borrow_summary, rule_summary)); + _ => { } + } - let old_loan_span = self.tcx().map.span( - old_loan.kill_scope.node_id(&self.tcx().region_maps)); - err.span_end_note(old_loan_span, - "previous borrow ends here"); err.emit(); return false; } @@ -574,39 +594,36 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { span: Span, cmt: mc::cmt<'tcx>, mode: euv::ConsumeMode) { - match opt_loan_path(&cmt) { - Some(lp) => { - let moved_value_use_kind = match mode { - euv::Copy => { - self.check_for_copy_of_frozen_path(id, span, &*lp); - MovedInUse - } - euv::Move(_) => { - match self.move_data.kind_of_move_of_path(id, &lp) { - None => { - // Sometimes moves don't have a move kind; - // this either means that the original move - // was from something illegal to move, - // or was moved from referent of an unsafe - // pointer or something like that. + if let Some(lp) = opt_loan_path(&cmt) { + let moved_value_use_kind = match mode { + euv::Copy => { + self.check_for_copy_of_frozen_path(id, span, &lp); + MovedInUse + } + euv::Move(_) => { + match self.move_data.kind_of_move_of_path(id, &lp) { + None => { + // Sometimes moves don't have a move kind; + // this either means that the original move + // was from something illegal to move, + // or was moved from referent of an unsafe + // pointer or something like that. + MovedInUse + } + Some(move_kind) => { + self.check_for_move_of_borrowed_path(id, span, + &lp, move_kind); + if move_kind == move_data::Captured { + MovedInCapture + } else { MovedInUse } - Some(move_kind) => { - self.check_for_move_of_borrowed_path(id, span, - &*lp, move_kind); - if move_kind == move_data::Captured { - MovedInCapture - } else { - MovedInUse - } - } } } - }; + } + }; - self.check_if_path_is_moved(id, span, moved_value_use_kind, &lp); - } - None => { } + self.check_if_path_is_moved(id, span, moved_value_use_kind, &lp); } } @@ -620,10 +637,13 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { struct_span_err!(self.bccx, span, E0503, "cannot use `{}` because it was mutably borrowed", &self.bccx.loan_path_to_string(copy_path)) - .span_note(loan_span, + .span_label(loan_span, &format!("borrow of `{}` occurs here", - &self.bccx.loan_path_to_string(&*loan_path)) + &self.bccx.loan_path_to_string(&loan_path)) ) + .span_label(span, + &format!("use of borrowed `{}`", + &self.bccx.loan_path_to_string(&loan_path))) .emit(); } } @@ -641,23 +661,41 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { UseOk => { } UseWhileBorrowed(loan_path, loan_span) => { let mut err = match move_kind { - move_data::Captured => - struct_span_err!(self.bccx, span, E0504, + move_data::Captured => { + let mut err = struct_span_err!(self.bccx, span, E0504, "cannot move `{}` into closure because it is borrowed", - &self.bccx.loan_path_to_string(move_path)), + &self.bccx.loan_path_to_string(move_path)); + err.span_label( + loan_span, + &format!("borrow of `{}` occurs here", + &self.bccx.loan_path_to_string(&loan_path)) + ); + err.span_label( + span, + &format!("move into closure occurs here") + ); + err + } move_data::Declared | move_data::MoveExpr | - move_data::MovePat => - struct_span_err!(self.bccx, span, E0505, + move_data::MovePat => { + let mut err = struct_span_err!(self.bccx, span, E0505, "cannot move out of `{}` because it is borrowed", - &self.bccx.loan_path_to_string(move_path)) + &self.bccx.loan_path_to_string(move_path)); + err.span_label( + loan_span, + &format!("borrow of `{}` occurs here", + &self.bccx.loan_path_to_string(&loan_path)) + ); + err.span_label( + span, + &format!("move out of `{}` occurs here", + &self.bccx.loan_path_to_string(move_path)) + ); + err + } }; - err.span_note( - loan_span, - &format!("borrow of `{}` occurs here", - &self.bccx.loan_path_to_string(&*loan_path)) - ); err.emit(); } } @@ -706,7 +744,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { self.bccx.report_use_of_moved_value( span, use_kind, - &**lp, + &lp, the_move, moved_lp, self.param_env); @@ -748,7 +786,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { } LpExtend(ref lp_base, _, LpInterior(_, InteriorField(_))) => { match lp_base.to_type().sty { - ty::TyStruct(def, _) | ty::TyEnum(def, _) if def.has_dtor() => { + ty::TyAdt(def, _) if def.has_dtor() => { // In the case where the owner implements drop, then // the path must be initialized to prevent a case of // partial reinitialization @@ -760,7 +798,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { self.bccx .report_partial_reinitialization_of_uninitialized_structure( span, - &*loan_path); + &loan_path); false }); return; @@ -790,8 +828,8 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { // Check that we don't invalidate any outstanding loans if let Some(loan_path) = opt_loan_path(&assignee_cmt) { let scope = self.tcx().region_maps.node_extent(assignment_id); - self.each_in_scope_loan_affecting_path(scope, &*loan_path, |loan| { - self.report_illegal_mutation(assignment_span, &*loan_path, loan); + self.each_in_scope_loan_affecting_path(scope, &loan_path, |loan| { + self.report_illegal_mutation(assignment_span, &loan_path, loan); false }); } @@ -807,7 +845,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { } else { self.bccx.report_reassigned_immutable_variable( assignment_span, - &*lp, + &lp, assign); } false @@ -823,9 +861,12 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { struct_span_err!(self.bccx, span, E0506, "cannot assign to `{}` because it is borrowed", self.bccx.loan_path_to_string(loan_path)) - .span_note(loan.span, + .span_label(loan.span, &format!("borrow of `{}` occurs here", self.bccx.loan_path_to_string(loan_path))) + .span_label(span, + &format!("assignment to borrowed `{}` occurs here", + self.bccx.loan_path_to_string(loan_path))) .emit(); } } diff --git a/src/librustc_borrowck/borrowck/fragments.rs b/src/librustc_borrowck/borrowck/fragments.rs index c5e2b69683b10..b0a1b3498545f 100644 --- a/src/librustc_borrowck/borrowck/fragments.rs +++ b/src/librustc_borrowck/borrowck/fragments.rs @@ -20,15 +20,14 @@ use borrowck::LoanPathKind::{LpVar, LpUpvar, LpDowncast, LpExtend}; use borrowck::LoanPathElem::{LpDeref, LpInterior}; use borrowck::move_data::InvalidMovePathIndex; use borrowck::move_data::{MoveData, MovePathIndex}; -use rustc::middle::def_id::{DefId}; -use rustc::middle::ty; +use rustc::hir::def_id::{DefId}; +use rustc::ty::{self, AdtKind, TyCtxt}; use rustc::middle::mem_categorization as mc; use std::mem; use std::rc::Rc; use syntax::ast; -use syntax::codemap::Span; -use syntax::attr::AttrMetaMethods; +use syntax_pos::{Span, DUMMY_SP}; #[derive(PartialEq, Eq, PartialOrd, Ord)] enum Fragment { @@ -199,10 +198,10 @@ impl FragmentSets { } } -pub fn instrument_move_fragments<'tcx>(this: &MoveData<'tcx>, - tcx: &ty::ctxt<'tcx>, - sp: Span, - id: ast::NodeId) { +pub fn instrument_move_fragments<'a, 'tcx>(this: &MoveData<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + sp: Span, + id: ast::NodeId) { let span_err = tcx.map.attrs(id).iter() .any(|a| a.check_name("rustc_move_fragments")); let print = tcx.sess.opts.debugging_opts.print_move_fragments; @@ -245,7 +244,7 @@ pub fn instrument_move_fragments<'tcx>(this: &MoveData<'tcx>, /// /// Note: "left-over fragments" means paths that were not directly referenced in moves nor /// assignments, but must nonetheless be tracked as potential drop obligations. -pub fn fixup_fragment_sets<'tcx>(this: &MoveData<'tcx>, tcx: &ty::ctxt<'tcx>) { +pub fn fixup_fragment_sets<'a, 'tcx>(this: &MoveData<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>) { let mut fragments = this.fragments.borrow_mut(); @@ -346,11 +345,11 @@ pub fn fixup_fragment_sets<'tcx>(this: &MoveData<'tcx>, tcx: &ty::ctxt<'tcx>) { /// Adds all of the precisely-tracked siblings of `lp` as potential move paths of interest. For /// example, if `lp` represents `s.x.j`, then adds moves paths for `s.x.i` and `s.x.k`, the /// siblings of `s.x.j`. -fn add_fragment_siblings<'tcx>(this: &MoveData<'tcx>, - tcx: &ty::ctxt<'tcx>, - gathered_fragments: &mut Vec, - lp: Rc>, - origin_id: Option) { +fn add_fragment_siblings<'a, 'tcx>(this: &MoveData<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + gathered_fragments: &mut Vec, + lp: Rc>, + origin_id: Option) { match lp.kind { LpVar(_) | LpUpvar(..) => {} // Local variables have no siblings. @@ -366,9 +365,9 @@ fn add_fragment_siblings<'tcx>(this: &MoveData<'tcx>, } // *LV for unsafe and borrowed pointers do not consume their loan path, so stop here. - LpExtend(_, _, LpDeref(mc::UnsafePtr(..))) | - LpExtend(_, _, LpDeref(mc::Implicit(..))) | - LpExtend(_, _, LpDeref(mc::BorrowedPtr(..))) => {} + LpExtend(.., LpDeref(mc::UnsafePtr(..))) | + LpExtend(.., LpDeref(mc::Implicit(..))) | + LpExtend(.., LpDeref(mc::BorrowedPtr(..))) => {} // FIXME (pnkfelix): LV[j] should be tracked, at least in the // sense of we will track the remaining drop obligation of the @@ -379,7 +378,7 @@ fn add_fragment_siblings<'tcx>(this: &MoveData<'tcx>, // bind. // // Anyway, for now: LV[j] is not tracked precisely - LpExtend(_, _, LpInterior(_, InteriorElement(..))) => { + LpExtend(.., LpInterior(_, InteriorElement(..))) => { let mp = this.move_path(tcx, lp.clone()); gathered_fragments.push(AllButOneFrom(mp)); } @@ -405,16 +404,16 @@ fn add_fragment_siblings<'tcx>(this: &MoveData<'tcx>, /// We have determined that `origin_lp` destructures to LpExtend(parent, original_field_name). /// Based on this, add move paths for all of the siblings of `origin_lp`. -fn add_fragment_siblings_for_extension<'tcx>(this: &MoveData<'tcx>, - tcx: &ty::ctxt<'tcx>, - gathered_fragments: &mut Vec, - parent_lp: &Rc>, - mc: mc::MutabilityCategory, - origin_field_name: &mc::FieldName, - origin_lp: &Rc>, - origin_id: Option, - enum_variant_info: Option<(DefId, - Rc>)>) { +fn add_fragment_siblings_for_extension<'a, 'tcx>(this: &MoveData<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + gathered_fragments: &mut Vec, + parent_lp: &Rc>, + mc: mc::MutabilityCategory, + origin_field_name: &mc::FieldName, + origin_lp: &Rc>, + origin_id: Option, + enum_variant_info: Option<(DefId, + Rc>)>) { let parent_ty = parent_lp.to_type(); let mut add_fragment_sibling_local = |field_name, variant_did| { @@ -423,13 +422,13 @@ fn add_fragment_siblings_for_extension<'tcx>(this: &MoveData<'tcx>, variant_did); }; - match (&parent_ty.sty, enum_variant_info) { - (&ty::TyTuple(ref v), None) => { + match parent_ty.sty { + ty::TyTuple(ref v) => { let tuple_idx = match *origin_field_name { mc::PositionalField(tuple_idx) => tuple_idx, mc::NamedField(_) => - panic!("tuple type {:?} should not have named fields.", - parent_ty), + bug!("tuple type {:?} should not have named fields.", + parent_ty), }; let tuple_len = v.len(); for i in 0..tuple_len { @@ -439,78 +438,83 @@ fn add_fragment_siblings_for_extension<'tcx>(this: &MoveData<'tcx>, } } - (&ty::TyStruct(def, _), None) => { - match *origin_field_name { - mc::NamedField(ast_name) => { - for f in &def.struct_variant().fields { - if f.name == ast_name { - continue; + ty::TyAdt(def, ..) => match def.adt_kind() { + AdtKind::Struct => { + match *origin_field_name { + mc::NamedField(ast_name) => { + for f in &def.struct_variant().fields { + if f.name == ast_name { + continue; + } + let field_name = mc::NamedField(f.name); + add_fragment_sibling_local(field_name, None); } - let field_name = mc::NamedField(f.name); - add_fragment_sibling_local(field_name, None); } - } - mc::PositionalField(tuple_idx) => { - for (i, _f) in def.struct_variant().fields.iter().enumerate() { - if i == tuple_idx { - continue + mc::PositionalField(tuple_idx) => { + for (i, _f) in def.struct_variant().fields.iter().enumerate() { + if i == tuple_idx { + continue + } + let field_name = mc::PositionalField(i); + add_fragment_sibling_local(field_name, None); } - let field_name = mc::PositionalField(i); - add_fragment_sibling_local(field_name, None); } } } - } - - (&ty::TyEnum(def, _), ref enum_variant_info) => { - let variant = match *enum_variant_info { - Some((vid, ref _lp2)) => def.variant_with_id(vid), - None => { - assert!(def.is_univariant()); - &def.variants[0] - } - }; - match *origin_field_name { - mc::NamedField(ast_name) => { - for field in &variant.fields { - if field.name == ast_name { - continue; + AdtKind::Union => { + // Do nothing, all union fields are moved/assigned together. + } + AdtKind::Enum => { + let variant = match enum_variant_info { + Some((vid, ref _lp2)) => def.variant_with_id(vid), + None => { + assert!(def.is_univariant()); + &def.variants[0] + } + }; + match *origin_field_name { + mc::NamedField(ast_name) => { + for field in &variant.fields { + if field.name == ast_name { + continue; + } + let field_name = mc::NamedField(field.name); + add_fragment_sibling_local(field_name, Some(variant.did)); } - let field_name = mc::NamedField(field.name); - add_fragment_sibling_local(field_name, Some(variant.did)); } - } - mc::PositionalField(tuple_idx) => { - for (i, _f) in variant.fields.iter().enumerate() { - if tuple_idx == i { - continue; + mc::PositionalField(tuple_idx) => { + for (i, _f) in variant.fields.iter().enumerate() { + if tuple_idx == i { + continue; + } + let field_name = mc::PositionalField(i); + add_fragment_sibling_local(field_name, None); } - let field_name = mc::PositionalField(i); - add_fragment_sibling_local(field_name, None); } } } - } + }, - ref sty_and_variant_info => { - let msg = format!("type {:?} ({:?}) is not fragmentable", - parent_ty, sty_and_variant_info); - let opt_span = origin_id.and_then(|id|tcx.map.opt_span(id)); - tcx.sess.opt_span_bug(opt_span, &msg[..]) + ref ty => { + let span = origin_id.map_or(DUMMY_SP, |id| tcx.map.span(id)); + span_bug!(span, + "type {:?} ({:?}) is not fragmentable", + parent_ty, ty); } } } /// Adds the single sibling `LpExtend(parent, new_field_name)` of `origin_lp` (the original /// loan-path). -fn add_fragment_sibling_core<'tcx>(this: &MoveData<'tcx>, - tcx: &ty::ctxt<'tcx>, - gathered_fragments: &mut Vec, - parent: Rc>, - mc: mc::MutabilityCategory, - new_field_name: mc::FieldName, - origin_lp: &Rc>, - enum_variant_did: Option) -> MovePathIndex { +fn add_fragment_sibling_core<'a, 'tcx>(this: &MoveData<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + gathered_fragments: &mut Vec, + parent: Rc>, + mc: mc::MutabilityCategory, + new_field_name: mc::FieldName, + origin_lp: &Rc>, + enum_variant_did: Option) + -> MovePathIndex { let opt_variant_did = match parent.kind { LpDowncast(_, variant_did) => Some(variant_did), LpVar(..) | LpUpvar(..) | LpExtend(..) => enum_variant_did, diff --git a/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs b/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs index 90c9361623252..2c277c04a52e3 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs @@ -18,12 +18,12 @@ use rustc::middle::expr_use_visitor as euv; use rustc::middle::mem_categorization as mc; use rustc::middle::mem_categorization::Categorization; use rustc::middle::mem_categorization::InteriorOffsetKind as Kind; -use rustc::middle::ty; +use rustc::ty; use std::rc::Rc; use syntax::ast; -use syntax::codemap::Span; -use rustc_front::hir; +use syntax_pos::Span; +use rustc::hir::{self, PatKind}; struct GatherMoveInfo<'tcx> { id: ast::NodeId, @@ -37,7 +37,7 @@ pub fn gather_decl<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, decl_id: ast::NodeId, _decl_span: Span, var_id: ast::NodeId) { - let ty = bccx.tcx.node_id_to_type(var_id); + let ty = bccx.tcx.tables().node_id_to_type(var_id); let loan_path = Rc::new(LoanPath::new(LpVar(var_id), ty)); move_data.add_move(bccx.tcx, loan_path, decl_id, Declared); } @@ -78,8 +78,8 @@ pub fn gather_match_variant<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, LpDowncast(ref base_lp, _) => move_data.add_variant_match( tcx, lp.clone(), move_pat.id, base_lp.clone(), mode), - _ => panic!("should only call gather_match_variant \ - for cat_downcast cmt"), + _ => bug!("should only call gather_match_variant \ + for cat_downcast cmt"), } } None => { @@ -98,9 +98,9 @@ pub fn gather_move_from_pat<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, move_pat: &hir::Pat, cmt: mc::cmt<'tcx>) { let pat_span_path_opt = match move_pat.node { - hir::PatIdent(_, ref path1, _) => { + PatKind::Binding(_, _, ref path1, _) => { Some(MoveSpanAndPath{span: move_pat.span, - name: path1.node.name}) + name: path1.node}) }, _ => None, }; @@ -122,15 +122,12 @@ fn gather_move<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, let potentially_illegal_move = check_and_get_illegal_move_origin(bccx, &move_info.cmt); - match potentially_illegal_move { - Some(illegal_move_origin) => { - debug!("illegal_move_origin={:?}", illegal_move_origin); - let error = MoveError::with_move_info(illegal_move_origin, - move_info.span_path_opt); - move_error_collector.add_error(error); - return - } - None => () + if let Some(illegal_move_origin) = potentially_illegal_move { + debug!("illegal_move_origin={:?}", illegal_move_origin); + let error = MoveError::with_move_info(illegal_move_origin, + move_info.span_path_opt); + move_error_collector.add_error(error); + return; } match opt_loan_path(&move_info.cmt) { @@ -164,9 +161,9 @@ fn check_and_get_illegal_move_origin<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, cmt: &mc::cmt<'tcx>) -> Option> { match cmt.cat { - Categorization::Deref(_, _, mc::BorrowedPtr(..)) | - Categorization::Deref(_, _, mc::Implicit(..)) | - Categorization::Deref(_, _, mc::UnsafePtr(..)) | + Categorization::Deref(.., mc::BorrowedPtr(..)) | + Categorization::Deref(.., mc::Implicit(..)) | + Categorization::Deref(.., mc::UnsafePtr(..)) | Categorization::StaticItem => { Some(cmt.clone()) } @@ -181,13 +178,14 @@ fn check_and_get_illegal_move_origin<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, Categorization::Interior(ref b, mc::InteriorField(_)) | Categorization::Interior(ref b, mc::InteriorElement(Kind::Pattern, _)) => { match b.ty.sty { - ty::TyStruct(def, _) | ty::TyEnum(def, _) => { + ty::TyAdt(def, _) => { if def.has_dtor() { Some(cmt.clone()) } else { check_and_get_illegal_move_origin(bccx, b) } } + ty::TySlice(..) => Some(cmt.clone()), _ => { check_and_get_illegal_move_origin(bccx, b) } diff --git a/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs b/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs index 84dce6d35702a..5970d6e4f2f65 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs @@ -16,10 +16,10 @@ use rustc::middle::expr_use_visitor as euv; use rustc::middle::mem_categorization as mc; use rustc::middle::mem_categorization::Categorization; use rustc::middle::region; -use rustc::middle::ty; +use rustc::ty; use syntax::ast; -use syntax::codemap::Span; +use syntax_pos::Span; type R = Result<(),()>; @@ -28,7 +28,7 @@ pub fn guarantee_lifetime<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, span: Span, cause: euv::LoanCause, cmt: mc::cmt<'tcx>, - loan_region: ty::Region, + loan_region: &'tcx ty::Region, _: ty::BorrowKind) -> Result<(),()> { //! Reports error if `loan_region` is larger than S @@ -56,7 +56,7 @@ struct GuaranteeLifetimeContext<'a, 'tcx: 'a> { span: Span, cause: euv::LoanCause, - loan_region: ty::Region, + loan_region: &'tcx ty::Region, cmt_original: mc::cmt<'tcx> } @@ -74,9 +74,9 @@ impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> { Categorization::Rvalue(..) | Categorization::Local(..) | // L-Local Categorization::Upvar(..) | - Categorization::Deref(_, _, mc::BorrowedPtr(..)) | // L-Deref-Borrowed - Categorization::Deref(_, _, mc::Implicit(..)) | - Categorization::Deref(_, _, mc::UnsafePtr(..)) => { + Categorization::Deref(.., mc::BorrowedPtr(..)) | // L-Deref-Borrowed + Categorization::Deref(.., mc::Implicit(..)) | + Categorization::Deref(.., mc::UnsafePtr(..)) => { self.check_scope(self.scope(cmt)) } @@ -92,17 +92,17 @@ impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> { } } - fn check_scope(&self, max_scope: ty::Region) -> R { + fn check_scope(&self, max_scope: &'tcx ty::Region) -> R { //! Reports an error if `loan_region` is larger than `max_scope` if !self.bccx.is_subregion_of(self.loan_region, max_scope) { - Err(self.report_error(err_out_of_scope(max_scope, self.loan_region))) + Err(self.report_error(err_out_of_scope(max_scope, self.loan_region, self.cause))) } else { Ok(()) } } - fn scope(&self, cmt: &mc::cmt) -> ty::Region { + fn scope(&self, cmt: &mc::cmt<'tcx>) -> &'tcx ty::Region { //! Returns the maximal region scope for the which the //! lvalue `cmt` is guaranteed to be valid without any //! rooting etc, and presuming `cmt` is not mutated. @@ -112,19 +112,18 @@ impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> { temp_scope } Categorization::Upvar(..) => { - ty::ReScope(self.item_scope) - } - Categorization::StaticItem => { - ty::ReStatic + self.bccx.tcx.mk_region(ty::ReScope(self.item_scope)) } Categorization::Local(local_id) => { - ty::ReScope(self.bccx.tcx.region_maps.var_scope(local_id)) + self.bccx.tcx.mk_region(ty::ReScope( + self.bccx.tcx.region_maps.var_scope(local_id))) } - Categorization::Deref(_, _, mc::UnsafePtr(..)) => { - ty::ReStatic + Categorization::StaticItem | + Categorization::Deref(.., mc::UnsafePtr(..)) => { + self.bccx.tcx.mk_region(ty::ReStatic) } - Categorization::Deref(_, _, mc::BorrowedPtr(_, r)) | - Categorization::Deref(_, _, mc::Implicit(_, r)) => { + Categorization::Deref(.., mc::BorrowedPtr(_, r)) | + Categorization::Deref(.., mc::Implicit(_, r)) => { r } Categorization::Downcast(ref cmt, _) | @@ -135,7 +134,7 @@ impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> { } } - fn report_error(&self, code: bckerr_code) { + fn report_error(&self, code: bckerr_code<'tcx>) { self.bccx.report(BckError { cmt: self.cmt_original.clone(), span: self.span, cause: BorrowViolation(self.cause), diff --git a/src/librustc_borrowck/borrowck/gather_loans/mod.rs b/src/librustc_borrowck/borrowck/gather_loans/mod.rs index 8cf10cb9b05a0..5d59b58b847d9 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/mod.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/mod.rs @@ -19,19 +19,18 @@ use borrowck::*; use borrowck::move_data::MoveData; use rustc::middle::expr_use_visitor as euv; -use rustc::middle::infer; use rustc::middle::mem_categorization as mc; use rustc::middle::mem_categorization::Categorization; use rustc::middle::region; -use rustc::middle::ty; +use rustc::ty::{self, TyCtxt}; use syntax::ast; -use syntax::codemap::Span; use syntax::ast::NodeId; -use rustc_front::hir; -use rustc_front::hir::Expr; -use rustc_front::intravisit; -use rustc_front::intravisit::Visitor; +use syntax_pos::Span; +use rustc::hir; +use rustc::hir::Expr; +use rustc::hir::intravisit; +use rustc::hir::intravisit::{Visitor, NestedVisitorMap}; use self::restrictions::RestrictionResult; @@ -43,7 +42,7 @@ mod move_error; pub fn gather_loans_in_fn<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, fn_id: NodeId, decl: &hir::FnDecl, - body: &hir::Block) + body: &hir::Expr) -> (Vec>, move_data::MoveData<'tcx>) { let mut glcx = GatherLoanCtxt { @@ -55,11 +54,8 @@ pub fn gather_loans_in_fn<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, }; let param_env = ty::ParameterEnvironment::for_item(bccx.tcx, fn_id); - let infcx = infer::new_infer_ctxt(bccx.tcx, &bccx.tcx.tables, Some(param_env)); - { - let mut euv = euv::ExprUseVisitor::new(&mut glcx, &infcx); - euv.walk_fn(decl, body); - } + let infcx = bccx.tcx.borrowck_fake_infer_ctxt(param_env); + euv::ExprUseVisitor::new(&mut glcx, &infcx).walk_fn(decl, body); glcx.report_potential_errors(); let GatherLoanCtxt { all_loans, move_data, .. } = glcx; @@ -134,7 +130,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for GatherLoanCtxt<'a, 'tcx> { borrow_id: ast::NodeId, borrow_span: Span, cmt: mc::cmt<'tcx>, - loan_region: ty::Region, + loan_region: &'tcx ty::Region, bk: ty::BorrowKind, loan_cause: euv::LoanCause) { @@ -176,7 +172,7 @@ fn check_aliasability<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, req_kind: ty::BorrowKind) -> Result<(),()> { - let aliasability = cmt.freely_aliasable(bccx.tcx); + let aliasability = cmt.freely_aliasable(); debug!("check_aliasability aliasability={:?} req_kind={:?}", aliasability, req_kind); @@ -209,7 +205,7 @@ fn check_aliasability<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, alias_cause); Err(()) } - (_, _) => { + (..) => { Ok(()) } } @@ -253,7 +249,7 @@ fn check_mutability<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, } impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { - pub fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.bccx.tcx } + pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { self.bccx.tcx } /// Guarantees that `cmt` is assignable, or reports an error. fn guarantee_assignment_valid(&mut self, @@ -311,7 +307,7 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { borrow_span: Span, cmt: mc::cmt<'tcx>, req_kind: ty::BorrowKind, - loan_region: ty::Region, + loan_region: &'tcx ty::Region, cause: euv::LoanCause) { debug!("guarantee_valid(borrow_id={}, cmt={:?}, \ req_mutbl={:?}, loan_region={:?})", @@ -322,7 +318,7 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { // a loan for the empty region can never be dereferenced, so // it is always safe - if loan_region == ty::ReEmpty { + if *loan_region == ty::ReEmpty { return; } @@ -362,7 +358,7 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { } RestrictionResult::SafeIf(loan_path, restricted_paths) => { - let loan_scope = match loan_region { + let loan_scope = match *loan_region { ty::ReScope(scope) => scope, ty::ReFree(ref fr) => fr.scope, @@ -373,11 +369,12 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { ty::ReLateBound(..) | ty::ReEarlyBound(..) | ty::ReVar(..) | - ty::ReSkolemized(..) => { - self.tcx().sess.span_bug( + ty::ReSkolemized(..) | + ty::ReErased => { + span_bug!( cmt.span, - &format!("invalid borrow lifetime: {:?}", - loan_region)); + "invalid borrow lifetime: {:?}", + loan_region); } }; debug!("loan_scope = {:?}", loan_scope); @@ -386,11 +383,11 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { let gen_scope = self.compute_gen_scope(borrow_scope, loan_scope); debug!("gen_scope = {:?}", gen_scope); - let kill_scope = self.compute_kill_scope(loan_scope, &*loan_path); + let kill_scope = self.compute_kill_scope(loan_scope, &loan_path); debug!("kill_scope = {:?}", kill_scope); if req_kind == ty::MutBorrow { - self.mark_loan_path_as_mutated(&*loan_path); + self.mark_loan_path_as_mutated(&loan_path); } Loan { @@ -452,7 +449,7 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { LpDowncast(ref base, _) | LpExtend(ref base, mc::McInherited, _) | LpExtend(ref base, mc::McDeclared, _) => { - self.mark_loan_path_as_mutated(&**base); + self.mark_loan_path_as_mutated(&base); } LpExtend(_, mc::McImmutable, _) => { // Nothing to do. @@ -520,19 +517,27 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { /// sure the loans being taken are sound. struct StaticInitializerCtxt<'a, 'tcx: 'a> { bccx: &'a BorrowckCtxt<'a, 'tcx>, + item_id: ast::NodeId } -impl<'a, 'tcx, 'v> Visitor<'v> for StaticInitializerCtxt<'a, 'tcx> { - fn visit_expr(&mut self, ex: &Expr) { +impl<'a, 'tcx> Visitor<'tcx> for StaticInitializerCtxt<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.bccx.tcx.map) + } + + fn visit_expr(&mut self, ex: &'tcx Expr) { if let hir::ExprAddrOf(mutbl, ref base) = ex.node { - let infcx = infer::new_infer_ctxt(self.bccx.tcx, &self.bccx.tcx.tables, None); + let param_env = ty::ParameterEnvironment::for_item(self.bccx.tcx, + self.item_id); + let infcx = self.bccx.tcx.borrowck_fake_infer_ctxt(param_env); let mc = mc::MemCategorizationContext::new(&infcx); - let base_cmt = mc.cat_expr(&**base).unwrap(); + let base_cmt = mc.cat_expr(&base).unwrap(); let borrow_kind = ty::BorrowKind::from_mutbl(mutbl); // Check that we don't allow borrows of unsafe static items. - if check_aliasability(self.bccx, ex.span, - BorrowViolation(euv::AddrOf), - base_cmt, borrow_kind).is_err() { + let err = check_aliasability(self.bccx, ex.span, + BorrowViolation(euv::AddrOf), + base_cmt, borrow_kind).is_err(); + if err { return; // reported an error, no sense in reporting more. } } @@ -541,12 +546,15 @@ impl<'a, 'tcx, 'v> Visitor<'v> for StaticInitializerCtxt<'a, 'tcx> { } } -pub fn gather_loans_in_static_initializer(bccx: &mut BorrowckCtxt, expr: &hir::Expr) { +pub fn gather_loans_in_static_initializer<'a, 'tcx>(bccx: &mut BorrowckCtxt<'a, 'tcx>, + item_id: ast::NodeId, + expr: &'tcx hir::Expr) { debug!("gather_loans_in_static_initializer(expr={:?})", expr); let mut sicx = StaticInitializerCtxt { - bccx: bccx + bccx: bccx, + item_id: item_id }; sicx.visit_expr(expr); diff --git a/src/librustc_borrowck/borrowck/gather_loans/move_error.rs b/src/librustc_borrowck/borrowck/gather_loans/move_error.rs index 4cb9673785ecb..47f8d978704f4 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/move_error.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/move_error.rs @@ -12,11 +12,10 @@ use borrowck::BorrowckCtxt; use rustc::middle::mem_categorization as mc; use rustc::middle::mem_categorization::Categorization; use rustc::middle::mem_categorization::InteriorOffsetKind as Kind; -use rustc::middle::ty; +use rustc::ty; use syntax::ast; -use syntax::codemap; -use syntax::errors::DiagnosticBuilder; -use rustc_front::hir; +use syntax_pos; +use errors::DiagnosticBuilder; pub struct MoveErrorCollector<'tcx> { errors: Vec> @@ -56,7 +55,7 @@ impl<'tcx> MoveError<'tcx> { #[derive(Clone)] pub struct MoveSpanAndPath { - pub span: codemap::Span, + pub span: syntax_pos::Span, pub name: ast::Name, } @@ -72,7 +71,7 @@ fn report_move_errors<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, let mut err = report_cannot_move_out_of(bccx, error.move_from.clone()); let mut is_first_note = true; for move_to in &error.move_to_places { - note_move_destination(&mut err, move_to.span, + err = note_move_destination(err, move_to.span, move_to.name, is_first_note); is_first_note = false; } @@ -93,7 +92,7 @@ fn group_errors_with_same_origin<'tcx>(errors: &Vec>) let move_from_id = error.move_from.id; debug!("append_to_grouped_errors(move_from_id={})", move_from_id); let move_to = if error.move_to.is_some() { - vec!(error.move_to.clone().unwrap()) + vec![error.move_to.clone().unwrap()] } else { Vec::new() }; @@ -117,68 +116,73 @@ fn report_cannot_move_out_of<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, move_from: mc::cmt<'tcx>) -> DiagnosticBuilder<'a> { match move_from.cat { - Categorization::Deref(_, _, mc::BorrowedPtr(..)) | - Categorization::Deref(_, _, mc::Implicit(..)) | - Categorization::Deref(_, _, mc::UnsafePtr(..)) | + Categorization::Deref(.., mc::BorrowedPtr(..)) | + Categorization::Deref(.., mc::Implicit(..)) | + Categorization::Deref(.., mc::UnsafePtr(..)) | Categorization::StaticItem => { - struct_span_err!(bccx, move_from.span, E0507, + let mut err = struct_span_err!(bccx, move_from.span, E0507, "cannot move out of {}", - move_from.descriptive_string(bccx.tcx)) + move_from.descriptive_string(bccx.tcx)); + err.span_label( + move_from.span, + &format!("cannot move out of {}", move_from.descriptive_string(bccx.tcx)) + ); + err } - Categorization::Interior(ref b, mc::InteriorElement(Kind::Index, _)) => { - let expr = bccx.tcx.map.expect_expr(move_from.id); - if let hir::ExprIndex(..) = expr.node { - struct_span_err!(bccx, move_from.span, E0508, - "cannot move out of type `{}`, \ - a non-copy fixed-size array", - b.ty) - } else { - bccx.span_bug(move_from.span, "this path should not cause illegal move"); - unreachable!(); + Categorization::Interior(ref b, mc::InteriorElement(ik, _)) => { + match (&b.ty.sty, ik) { + (&ty::TySlice(..), _) | + (_, Kind::Index) => { + let mut err = struct_span_err!(bccx, move_from.span, E0508, + "cannot move out of type `{}`, \ + a non-copy array", + b.ty); + err.span_label(move_from.span, &format!("cannot move out of here")); + err + } + (_, Kind::Pattern) => { + span_bug!(move_from.span, "this path should not cause illegal move"); + } } } Categorization::Downcast(ref b, _) | Categorization::Interior(ref b, mc::InteriorField(_)) => { match b.ty.sty { - ty::TyStruct(def, _) | - ty::TyEnum(def, _) if def.has_dtor() => { - struct_span_err!(bccx, move_from.span, E0509, - "cannot move out of type `{}`, \ - which defines the `Drop` trait", - b.ty) + ty::TyAdt(def, _) if def.has_dtor() => { + let mut err = struct_span_err!(bccx, move_from.span, E0509, + "cannot move out of type `{}`, \ + which implements the `Drop` trait", + b.ty); + err.span_label(move_from.span, &format!("cannot move out of here")); + err }, _ => { - bccx.span_bug(move_from.span, "this path should not cause illegal move"); - unreachable!(); + span_bug!(move_from.span, "this path should not cause illegal move"); } } } _ => { - bccx.span_bug(move_from.span, "this path should not cause illegal move"); - unreachable!(); + span_bug!(move_from.span, "this path should not cause illegal move"); } } } -fn note_move_destination(err: &mut DiagnosticBuilder, - move_to_span: codemap::Span, +fn note_move_destination(mut err: DiagnosticBuilder, + move_to_span: syntax_pos::Span, pat_name: ast::Name, - is_first_note: bool) { + is_first_note: bool) -> DiagnosticBuilder { if is_first_note { - err.span_note( - move_to_span, - "attempting to move value to here"); - err.fileline_help( + err.span_label( move_to_span, - &format!("to prevent the move, \ - use `ref {0}` or `ref mut {0}` to capture value by \ - reference", + &format!("hint: to prevent move, use `ref {0}` or `ref mut {0}`", pat_name)); + err } else { - err.span_note(move_to_span, - &format!("and here (use `ref {0}` or `ref mut {0}`)", + err.span_label(move_to_span, + &format!("...and here (use `ref {0}` or `ref mut {0}`)", pat_name)); + err } } diff --git a/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs b/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs index 2a0d8ef276648..fdcefdc0d4307 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs @@ -14,8 +14,8 @@ use borrowck::*; use rustc::middle::expr_use_visitor as euv; use rustc::middle::mem_categorization as mc; use rustc::middle::mem_categorization::Categorization; -use rustc::middle::ty; -use syntax::codemap::Span; +use rustc::ty; +use syntax_pos::Span; use borrowck::ToInteriorKind; @@ -31,7 +31,7 @@ pub fn compute_restrictions<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, span: Span, cause: euv::LoanCause, cmt: mc::cmt<'tcx>, - loan_region: ty::Region) + loan_region: &'tcx ty::Region) -> RestrictionResult<'tcx> { let ctxt = RestrictionsContext { bccx: bccx, @@ -49,7 +49,7 @@ pub fn compute_restrictions<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, struct RestrictionsContext<'a, 'tcx: 'a> { bccx: &'a BorrowckCtxt<'a, 'tcx>, span: Span, - loan_region: ty::Region, + loan_region: &'tcx ty::Region, cause: euv::LoanCause, } @@ -89,7 +89,7 @@ impl<'a, 'tcx> RestrictionsContext<'a, 'tcx> { self.restrict(cmt_base) } - Categorization::Interior(cmt_base, i) => { + Categorization::Interior(cmt_base, interior) => { // R-Field // // Overwriting the base would not change the type of @@ -99,8 +99,34 @@ impl<'a, 'tcx> RestrictionsContext<'a, 'tcx> { Categorization::Downcast(_, variant_id) => Some(variant_id), _ => None }; + let interior = interior.cleaned(); + let base_ty = cmt_base.ty; let result = self.restrict(cmt_base); - self.extend(result, &cmt, LpInterior(opt_variant_id, i.cleaned())) + // Borrowing one union field automatically borrows all its fields. + match base_ty.sty { + ty::TyAdt(adt_def, _) if adt_def.is_union() => match result { + RestrictionResult::Safe => RestrictionResult::Safe, + RestrictionResult::SafeIf(base_lp, mut base_vec) => { + for field in &adt_def.struct_variant().fields { + let field = InteriorKind::InteriorField(mc::NamedField(field.name)); + let field_ty = if field == interior { + cmt.ty + } else { + self.bccx.tcx.types.err // Doesn't matter + }; + let sibling_lp_kind = LpExtend(base_lp.clone(), cmt.mutbl, + LpInterior(opt_variant_id, field)); + let sibling_lp = Rc::new(LoanPath::new(sibling_lp_kind, field_ty)); + base_vec.push(sibling_lp); + } + + let lp = new_lp(LpExtend(base_lp, cmt.mutbl, + LpInterior(opt_variant_id, interior))); + RestrictionResult::SafeIf(lp, base_vec) + } + }, + _ => self.extend(result, &cmt, LpInterior(opt_variant_id, interior)) + } } Categorization::StaticItem => { @@ -157,7 +183,7 @@ impl<'a, 'tcx> RestrictionsContext<'a, 'tcx> { fn extend(&self, result: RestrictionResult<'tcx>, cmt: &mc::cmt<'tcx>, - elem: LoanPathElem) -> RestrictionResult<'tcx> { + elem: LoanPathElem<'tcx>) -> RestrictionResult<'tcx> { match result { RestrictionResult::Safe => RestrictionResult::Safe, RestrictionResult::SafeIf(base_lp, mut base_vec) => { diff --git a/src/librustc_borrowck/borrowck/mir/abs_domain.rs b/src/librustc_borrowck/borrowck/mir/abs_domain.rs new file mode 100644 index 0000000000000..5e61c2ec7a292 --- /dev/null +++ b/src/librustc_borrowck/borrowck/mir/abs_domain.rs @@ -0,0 +1,62 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The move-analysis portion of borrowck needs to work in an abstract +//! domain of lifted Lvalues. Most of the Lvalue variants fall into a +//! one-to-one mapping between the concrete and abstract (e.g. a +//! field-deref on a local-variable, `x.field`, has the same meaning +//! in both domains). Indexed-Projections are the exception: `a[x]` +//! needs to be treated as mapping to the same move path as `a[y]` as +//! well as `a[13]`, et cetera. +//! +//! (In theory the analysis could be extended to work with sets of +//! paths, so that `a[0]` and `a[13]` could be kept distinct, while +//! `a[x]` would still overlap them both. But that is not this +//! representation does today.) + +use rustc::mir::LvalueElem; +use rustc::mir::{Operand, ProjectionElem}; + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub struct AbstractOperand; +pub type AbstractElem<'tcx> = + ProjectionElem<'tcx, AbstractOperand>; + +pub trait Lift { + type Abstract; + fn lift(&self) -> Self::Abstract; +} +impl<'tcx> Lift for Operand<'tcx> { + type Abstract = AbstractOperand; + fn lift(&self) -> Self::Abstract { AbstractOperand } +} +impl<'tcx> Lift for LvalueElem<'tcx> { + type Abstract = AbstractElem<'tcx>; + fn lift(&self) -> Self::Abstract { + match *self { + ProjectionElem::Deref => + ProjectionElem::Deref, + ProjectionElem::Field(ref f, ty) => + ProjectionElem::Field(f.clone(), ty.clone()), + ProjectionElem::Index(ref i) => + ProjectionElem::Index(i.lift()), + ProjectionElem::Subslice {from, to} => + ProjectionElem::Subslice { from: from, to: to }, + ProjectionElem::ConstantIndex {offset,min_length,from_end} => + ProjectionElem::ConstantIndex { + offset: offset, + min_length: min_length, + from_end: from_end + }, + ProjectionElem::Downcast(a, u) => + ProjectionElem::Downcast(a.clone(), u.clone()), + } + } +} diff --git a/src/librustc_borrowck/borrowck/mir/dataflow/graphviz.rs b/src/librustc_borrowck/borrowck/mir/dataflow/graphviz.rs new file mode 100644 index 0000000000000..8461f6d061a55 --- /dev/null +++ b/src/librustc_borrowck/borrowck/mir/dataflow/graphviz.rs @@ -0,0 +1,344 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Hook into libgraphviz for rendering dataflow graphs for MIR. + +use syntax::ast::NodeId; +use rustc::mir::{BasicBlock, Mir}; +use rustc_data_structures::bitslice::bits_to_string; +use rustc_data_structures::indexed_set::{IdxSet}; +use rustc_data_structures::indexed_vec::Idx; + +use dot; +use dot::IntoCow; + +use std::fmt::Debug; +use std::fs::File; +use std::io; +use std::io::prelude::*; +use std::marker::PhantomData; +use std::mem; +use std::path::Path; + +use super::super::MoveDataParamEnv; +use super::super::MirBorrowckCtxtPreDataflow; +use super::{BitDenotation, DataflowState}; + +impl DataflowState { + fn each_bit(&self, ctxt: &O::Ctxt, words: &IdxSet, mut f: F) + where F: FnMut(O::Idx) { + //! Helper for iterating over the bits in a bitvector. + + let bits_per_block = self.operator.bits_per_block(ctxt); + let usize_bits: usize = mem::size_of::() * 8; + + for (word_index, &word) in words.words().iter().enumerate() { + if word != 0 { + let base_index = word_index * usize_bits; + for offset in 0..usize_bits { + let bit = 1 << offset; + if (word & bit) != 0 { + // NB: we round up the total number of bits + // that we store in any given bit set so that + // it is an even multiple of usize::BITS. This + // means that there may be some stray bits at + // the end that do not correspond to any + // actual value; that's why we first check + // that we are in range of bits_per_block. + let bit_index = base_index + offset as usize; + if bit_index >= bits_per_block { + return; + } else { + f(O::Idx::new(bit_index)); + } + } + } + } + } + } + + pub fn interpret_set<'c, P>(&self, + ctxt: &'c O::Ctxt, + words: &IdxSet, + render_idx: &P) + -> Vec<&'c Debug> + where P: for <'b> Fn(&'b O::Ctxt, O::Idx) -> &'b Debug + { + let mut v = Vec::new(); + self.each_bit(ctxt, words, |i| { + v.push(render_idx(ctxt, i)); + }); + v + } +} + +pub trait MirWithFlowState<'tcx> { + type BD: BitDenotation>; + fn node_id(&self) -> NodeId; + fn mir(&self) -> &Mir<'tcx>; + fn analysis_ctxt(&self) -> &::Ctxt; + fn flow_state(&self) -> &DataflowState; +} + +impl<'a, 'tcx: 'a, BD> MirWithFlowState<'tcx> for MirBorrowckCtxtPreDataflow<'a, 'tcx, BD> + where 'tcx: 'a, BD: BitDenotation> +{ + type BD = BD; + fn node_id(&self) -> NodeId { self.node_id } + fn mir(&self) -> &Mir<'tcx> { self.flow_state.mir() } + fn analysis_ctxt(&self) -> &BD::Ctxt { &self.flow_state.ctxt } + fn flow_state(&self) -> &DataflowState { &self.flow_state.flow_state } +} + +struct Graph<'a, 'tcx, MWF:'a, P> where + MWF: MirWithFlowState<'tcx> +{ + mbcx: &'a MWF, + phantom: PhantomData<&'tcx ()>, + render_idx: P, +} + +pub fn print_borrowck_graph_to<'a, 'tcx, BD, P>( + mbcx: &MirBorrowckCtxtPreDataflow<'a, 'tcx, BD>, + path: &Path, + render_idx: P) + -> io::Result<()> + where BD: BitDenotation>, + P: for <'b> Fn(&'b BD::Ctxt, BD::Idx) -> &'b Debug +{ + let g = Graph { mbcx: mbcx, phantom: PhantomData, render_idx: render_idx }; + let mut v = Vec::new(); + dot::render(&g, &mut v)?; + debug!("print_borrowck_graph_to path: {} node_id: {}", + path.display(), mbcx.node_id); + File::create(path).and_then(|mut f| f.write_all(&v)) +} + +pub type Node = BasicBlock; + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub struct Edge { source: BasicBlock, index: usize } + +fn outgoing(mir: &Mir, bb: BasicBlock) -> Vec { + let succ_len = mir[bb].terminator().successors().len(); + (0..succ_len).map(|index| Edge { source: bb, index: index}).collect() +} + +impl<'a, 'tcx, MWF, P> dot::Labeller<'a> for Graph<'a, 'tcx, MWF, P> + where MWF: MirWithFlowState<'tcx>, + P: for <'b> Fn(&'b ::Ctxt, + ::Idx) + -> &'b Debug, +{ + type Node = Node; + type Edge = Edge; + fn graph_id(&self) -> dot::Id { + dot::Id::new(format!("graph_for_node_{}", + self.mbcx.node_id())) + .unwrap() + } + + fn node_id(&self, n: &Node) -> dot::Id { + dot::Id::new(format!("bb_{}", n.index())) + .unwrap() + } + + fn node_label(&self, n: &Node) -> dot::LabelText { + // A standard MIR label, as generated by write_node_label, is + // presented in a single column in a table. + // + // The code below does a bunch of formatting work to format a + // node (i.e. MIR basic-block) label with extra + // dataflow-enriched information. In particular, the goal is + // to add extra columns that present the three dataflow + // bitvectors, and the data those bitvectors represent. + // + // It presents it in the following format (where I am + // presenting the table rendering via ASCII art, one line per + // row of the table, and a chunk size of 3 rather than 5): + // + // ------ ----------------------- ------------ -------------------- + // [e1, e3, e4] + // [e8, e9] "= ENTRY:" + // ------ ----------------------- ------------ -------------------- + // Left + // Most + // Column + // Is + // Just + // Normal + // Series + // Of + // MIR + // Stmts + // ------ ----------------------- ------------ -------------------- + // [g1, g4, g5] "= GEN:" + // ------ ----------------------- ------------ -------------------- + // "KILL:" "=" [k1, k3, k8] + // [k9] + // ------ ----------------------- ------------ -------------------- + // + // (In addition, the added dataflow is rendered with a colored + // background just so it will stand out compared to the + // statements.) + let mut v = Vec::new(); + let i = n.index(); + let chunk_size = 5; + const BG_FLOWCONTENT: &'static str = r#"bgcolor="pink""#; + const ALIGN_RIGHT: &'static str = r#"align="right""#; + const FACE_MONOSPACE: &'static str = r#"FACE="Courier""#; + fn chunked_present_left(w: &mut W, + interpreted: &[&Debug], + chunk_size: usize) + -> io::Result<()> + { + // This function may emit a sequence of 's, but it + // always finishes with an (unfinished) + // + // + // Thus, after being called, one should finish both the + // pending as well as the itself. + let mut seen_one = false; + for c in interpreted.chunks(chunk_size) { + if seen_one { + // if not the first row, finish off the previous row + write!(w, "")?; + } + write!(w, "{objs:?}", + bg = BG_FLOWCONTENT, + align = ALIGN_RIGHT, + objs = c)?; + seen_one = true; + } + if !seen_one { + write!(w, "[]", + bg = BG_FLOWCONTENT, + align = ALIGN_RIGHT)?; + } + Ok(()) + } + ::rustc_mir::graphviz::write_node_label( + *n, self.mbcx.mir(), &mut v, 4, + |w| { + let ctxt = self.mbcx.analysis_ctxt(); + let flow = self.mbcx.flow_state(); + let entry_interp = flow.interpret_set(ctxt, + flow.sets.on_entry_set_for(i), + &self.render_idx); + chunked_present_left(w, &entry_interp[..], chunk_size)?; + let bits_per_block = flow.sets.bits_per_block(); + let entry = flow.sets.on_entry_set_for(i); + debug!("entry set for i={i} bits_per_block: {bpb} entry: {e:?} interp: {ei:?}", + i=i, e=entry, bpb=bits_per_block, ei=entry_interp); + write!(w, "= ENTRY:{entrybits:?}\ + ", + bg = BG_FLOWCONTENT, + face = FACE_MONOSPACE, + entrybits=bits_to_string(entry.words(), bits_per_block)) + }, + |w| { + let ctxt = self.mbcx.analysis_ctxt(); + let flow = self.mbcx.flow_state(); + let gen_interp = + flow.interpret_set(ctxt, flow.sets.gen_set_for(i), &self.render_idx); + let kill_interp = + flow.interpret_set(ctxt, flow.sets.kill_set_for(i), &self.render_idx); + chunked_present_left(w, &gen_interp[..], chunk_size)?; + let bits_per_block = flow.sets.bits_per_block(); + { + let gen = flow.sets.gen_set_for(i); + debug!("gen set for i={i} bits_per_block: {bpb} gen: {g:?} interp: {gi:?}", + i=i, g=gen, bpb=bits_per_block, gi=gen_interp); + write!(w, " = GEN:{genbits:?}\ + ", + bg = BG_FLOWCONTENT, + face = FACE_MONOSPACE, + genbits=bits_to_string(gen.words(), bits_per_block))?; + } + + { + let kill = flow.sets.kill_set_for(i); + debug!("kill set for i={i} bits_per_block: {bpb} kill: {k:?} interp: {ki:?}", + i=i, k=kill, bpb=bits_per_block, ki=kill_interp); + write!(w, "KILL:\ + {killbits:?}", + bg = BG_FLOWCONTENT, + align = ALIGN_RIGHT, + face = FACE_MONOSPACE, + killbits=bits_to_string(kill.words(), bits_per_block))?; + } + + // (chunked_present_right) + let mut seen_one = false; + for k in kill_interp.chunks(chunk_size) { + if !seen_one { + // continuation of row; this is fourth + write!(w, "= {kill:?}", + bg = BG_FLOWCONTENT, + kill=k)?; + } else { + // new row, with indent of three 's + write!(w, "{kill:?}", + bg = BG_FLOWCONTENT, + kill=k)?; + } + seen_one = true; + } + if !seen_one { + write!(w, "= []", + bg = BG_FLOWCONTENT)?; + } + + Ok(()) + }) + .unwrap(); + dot::LabelText::html(String::from_utf8(v).unwrap()) + } + + fn node_shape(&self, _n: &Node) -> Option { + Some(dot::LabelText::label("none")) + } +} + +impl<'a, 'tcx, MWF, P> dot::GraphWalk<'a> for Graph<'a, 'tcx, MWF, P> + where MWF: MirWithFlowState<'tcx> +{ + type Node = Node; + type Edge = Edge; + fn nodes(&self) -> dot::Nodes { + self.mbcx.mir() + .basic_blocks() + .indices() + .collect::>() + .into_cow() + } + + fn edges(&self) -> dot::Edges { + let mir = self.mbcx.mir(); + // base initial capacity on assumption every block has at + // least one outgoing edge (Which should be true for all + // blocks but one, the exit-block). + let mut edges = Vec::with_capacity(mir.basic_blocks().len()); + for bb in mir.basic_blocks().indices() { + let outgoing = outgoing(mir, bb); + edges.extend(outgoing.into_iter()); + } + edges.into_cow() + } + + fn source(&self, edge: &Edge) -> Node { + edge.source + } + + fn target(&self, edge: &Edge) -> Node { + let mir = self.mbcx.mir(); + mir[edge.source].terminator().successors()[edge.index] + } +} diff --git a/src/librustc_borrowck/borrowck/mir/dataflow/impls.rs b/src/librustc_borrowck/borrowck/mir/dataflow/impls.rs new file mode 100644 index 0000000000000..fcb453d81aa77 --- /dev/null +++ b/src/librustc_borrowck/borrowck/mir/dataflow/impls.rs @@ -0,0 +1,571 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::ty::TyCtxt; +use rustc::mir::{self, Mir, Location}; +use rustc_data_structures::bitslice::BitSlice; // adds set_bit/get_bit to &[usize] bitvector rep. +use rustc_data_structures::bitslice::{BitwiseOperator}; +use rustc_data_structures::indexed_set::{IdxSet}; +use rustc_data_structures::indexed_vec::Idx; + +use super::super::gather_moves::{MoveOutIndex, MovePathIndex}; +use super::super::MoveDataParamEnv; +use super::super::DropFlagState; +use super::super::drop_flag_effects_for_function_entry; +use super::super::drop_flag_effects_for_location; +use super::super::on_lookup_result_bits; + +use super::{BitDenotation, BlockSets, DataflowOperator}; + +// Dataflow analyses are built upon some interpretation of the +// bitvectors attached to each basic block, represented via a +// zero-sized structure. + +/// `MaybeInitializedLvals` tracks all l-values that might be +/// initialized upon reaching a particular point in the control flow +/// for a function. +/// +/// For example, in code like the following, we have corresponding +/// dataflow information shown in the right-hand comments. +/// +/// ```rust +/// struct S; +/// fn foo(pred: bool) { // maybe-init: +/// // {} +/// let a = S; let b = S; let c; let d; // {a, b} +/// +/// if pred { +/// drop(a); // { b} +/// b = S; // { b} +/// +/// } else { +/// drop(b); // {a} +/// d = S; // {a, d} +/// +/// } // {a, b, d} +/// +/// c = S; // {a, b, c, d} +/// } +/// ``` +/// +/// To determine whether an l-value *must* be initialized at a +/// particular control-flow point, one can take the set-difference +/// between this data and the data from `MaybeUninitializedLvals` at the +/// corresponding control-flow point. +/// +/// Similarly, at a given `drop` statement, the set-intersection +/// between this data and `MaybeUninitializedLvals` yields the set of +/// l-values that would require a dynamic drop-flag at that statement. +pub struct MaybeInitializedLvals<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &'a Mir<'tcx>, +} + +impl<'a, 'tcx: 'a> MaybeInitializedLvals<'a, 'tcx> { + pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &'a Mir<'tcx>) -> Self { + MaybeInitializedLvals { tcx: tcx, mir: mir } + } +} + +/// `MaybeUninitializedLvals` tracks all l-values that might be +/// uninitialized upon reaching a particular point in the control flow +/// for a function. +/// +/// For example, in code like the following, we have corresponding +/// dataflow information shown in the right-hand comments. +/// +/// ```rust +/// struct S; +/// fn foo(pred: bool) { // maybe-uninit: +/// // {a, b, c, d} +/// let a = S; let b = S; let c; let d; // { c, d} +/// +/// if pred { +/// drop(a); // {a, c, d} +/// b = S; // {a, c, d} +/// +/// } else { +/// drop(b); // { b, c, d} +/// d = S; // { b, c } +/// +/// } // {a, b, c, d} +/// +/// c = S; // {a, b, d} +/// } +/// ``` +/// +/// To determine whether an l-value *must* be uninitialized at a +/// particular control-flow point, one can take the set-difference +/// between this data and the data from `MaybeInitializedLvals` at the +/// corresponding control-flow point. +/// +/// Similarly, at a given `drop` statement, the set-intersection +/// between this data and `MaybeInitializedLvals` yields the set of +/// l-values that would require a dynamic drop-flag at that statement. +pub struct MaybeUninitializedLvals<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &'a Mir<'tcx>, +} + +impl<'a, 'tcx: 'a> MaybeUninitializedLvals<'a, 'tcx> { + pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &'a Mir<'tcx>) -> Self { + MaybeUninitializedLvals { tcx: tcx, mir: mir } + } +} + +/// `DefinitelyInitializedLvals` tracks all l-values that are definitely +/// initialized upon reaching a particular point in the control flow +/// for a function. +/// +/// FIXME: Note that once flow-analysis is complete, this should be +/// the set-complement of MaybeUninitializedLvals; thus we can get rid +/// of one or the other of these two. I'm inclined to get rid of +/// MaybeUninitializedLvals, simply because the sets will tend to be +/// smaller in this analysis and thus easier for humans to process +/// when debugging. +/// +/// For example, in code like the following, we have corresponding +/// dataflow information shown in the right-hand comments. +/// +/// ```rust +/// struct S; +/// fn foo(pred: bool) { // definite-init: +/// // { } +/// let a = S; let b = S; let c; let d; // {a, b } +/// +/// if pred { +/// drop(a); // { b, } +/// b = S; // { b, } +/// +/// } else { +/// drop(b); // {a, } +/// d = S; // {a, d} +/// +/// } // { } +/// +/// c = S; // { c } +/// } +/// ``` +/// +/// To determine whether an l-value *may* be uninitialized at a +/// particular control-flow point, one can take the set-complement +/// of this data. +/// +/// Similarly, at a given `drop` statement, the set-difference between +/// this data and `MaybeInitializedLvals` yields the set of l-values +/// that would require a dynamic drop-flag at that statement. +pub struct DefinitelyInitializedLvals<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &'a Mir<'tcx>, +} + +impl<'a, 'tcx: 'a> DefinitelyInitializedLvals<'a, 'tcx> { + pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &'a Mir<'tcx>) -> Self { + DefinitelyInitializedLvals { tcx: tcx, mir: mir } + } +} + +/// `MovingOutStatements` tracks the statements that perform moves out +/// of particular l-values. More precisely, it tracks whether the +/// *effect* of such moves (namely, the uninitialization of the +/// l-value in question) can reach some point in the control-flow of +/// the function, or if that effect is "killed" by some intervening +/// operation reinitializing that l-value. +/// +/// The resulting dataflow is a more enriched version of +/// `MaybeUninitializedLvals`. Both structures on their own only tell +/// you if an l-value *might* be uninitialized at a given point in the +/// control flow. But `MovingOutStatements` also includes the added +/// data of *which* particular statement causing the deinitialization +/// that the borrow checker's error meessage may need to report. +#[allow(dead_code)] +pub struct MovingOutStatements<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &'a Mir<'tcx>, +} + +impl<'a, 'tcx> MaybeInitializedLvals<'a, 'tcx> { + fn update_bits(sets: &mut BlockSets, path: MovePathIndex, + state: DropFlagState) + { + match state { + DropFlagState::Absent => sets.kill(&path), + DropFlagState::Present => sets.gen(&path), + } + } +} + +impl<'a, 'tcx> MaybeUninitializedLvals<'a, 'tcx> { + fn update_bits(sets: &mut BlockSets, path: MovePathIndex, + state: DropFlagState) + { + match state { + DropFlagState::Absent => sets.gen(&path), + DropFlagState::Present => sets.kill(&path), + } + } +} + +impl<'a, 'tcx> DefinitelyInitializedLvals<'a, 'tcx> { + fn update_bits(sets: &mut BlockSets, path: MovePathIndex, + state: DropFlagState) + { + match state { + DropFlagState::Absent => sets.kill(&path), + DropFlagState::Present => sets.gen(&path), + } + } +} + +impl<'a, 'tcx> BitDenotation for MaybeInitializedLvals<'a, 'tcx> { + type Idx = MovePathIndex; + type Ctxt = MoveDataParamEnv<'tcx>; + fn name() -> &'static str { "maybe_init" } + fn bits_per_block(&self, ctxt: &Self::Ctxt) -> usize { + ctxt.move_data.move_paths.len() + } + + fn start_block_effect(&self, ctxt: &Self::Ctxt, sets: &mut BlockSets) + { + drop_flag_effects_for_function_entry( + self.tcx, self.mir, ctxt, + |path, s| { + assert!(s == DropFlagState::Present); + sets.on_entry.add(&path); + }); + } + + fn statement_effect(&self, + ctxt: &Self::Ctxt, + sets: &mut BlockSets, + bb: mir::BasicBlock, + idx: usize) + { + drop_flag_effects_for_location( + self.tcx, self.mir, ctxt, + Location { block: bb, statement_index: idx }, + |path, s| Self::update_bits(sets, path, s) + ) + } + + fn terminator_effect(&self, + ctxt: &Self::Ctxt, + sets: &mut BlockSets, + bb: mir::BasicBlock, + statements_len: usize) + { + drop_flag_effects_for_location( + self.tcx, self.mir, ctxt, + Location { block: bb, statement_index: statements_len }, + |path, s| Self::update_bits(sets, path, s) + ) + } + + fn propagate_call_return(&self, + ctxt: &Self::Ctxt, + in_out: &mut IdxSet, + _call_bb: mir::BasicBlock, + _dest_bb: mir::BasicBlock, + dest_lval: &mir::Lvalue) { + // when a call returns successfully, that means we need to set + // the bits for that dest_lval to 1 (initialized). + on_lookup_result_bits(self.tcx, self.mir, &ctxt.move_data, + ctxt.move_data.rev_lookup.find(dest_lval), + |mpi| { in_out.add(&mpi); }); + } +} + +impl<'a, 'tcx> BitDenotation for MaybeUninitializedLvals<'a, 'tcx> { + type Idx = MovePathIndex; + type Ctxt = MoveDataParamEnv<'tcx>; + fn name() -> &'static str { "maybe_uninit" } + fn bits_per_block(&self, ctxt: &Self::Ctxt) -> usize { + ctxt.move_data.move_paths.len() + } + + // sets on_entry bits for Arg lvalues + fn start_block_effect(&self, ctxt: &Self::Ctxt, sets: &mut BlockSets) { + // set all bits to 1 (uninit) before gathering counterevidence + for e in sets.on_entry.words_mut() { *e = !0; } + + drop_flag_effects_for_function_entry( + self.tcx, self.mir, ctxt, + |path, s| { + assert!(s == DropFlagState::Present); + sets.on_entry.remove(&path); + }); + } + + fn statement_effect(&self, + ctxt: &Self::Ctxt, + sets: &mut BlockSets, + bb: mir::BasicBlock, + idx: usize) + { + drop_flag_effects_for_location( + self.tcx, self.mir, ctxt, + Location { block: bb, statement_index: idx }, + |path, s| Self::update_bits(sets, path, s) + ) + } + + fn terminator_effect(&self, + ctxt: &Self::Ctxt, + sets: &mut BlockSets, + bb: mir::BasicBlock, + statements_len: usize) + { + drop_flag_effects_for_location( + self.tcx, self.mir, ctxt, + Location { block: bb, statement_index: statements_len }, + |path, s| Self::update_bits(sets, path, s) + ) + } + + fn propagate_call_return(&self, + ctxt: &Self::Ctxt, + in_out: &mut IdxSet, + _call_bb: mir::BasicBlock, + _dest_bb: mir::BasicBlock, + dest_lval: &mir::Lvalue) { + // when a call returns successfully, that means we need to set + // the bits for that dest_lval to 0 (initialized). + on_lookup_result_bits(self.tcx, self.mir, &ctxt.move_data, + ctxt.move_data.rev_lookup.find(dest_lval), + |mpi| { in_out.remove(&mpi); }); + } +} + +impl<'a, 'tcx> BitDenotation for DefinitelyInitializedLvals<'a, 'tcx> { + type Idx = MovePathIndex; + type Ctxt = MoveDataParamEnv<'tcx>; + fn name() -> &'static str { "definite_init" } + fn bits_per_block(&self, ctxt: &Self::Ctxt) -> usize { + ctxt.move_data.move_paths.len() + } + + // sets on_entry bits for Arg lvalues + fn start_block_effect(&self, ctxt: &Self::Ctxt, sets: &mut BlockSets) { + for e in sets.on_entry.words_mut() { *e = 0; } + + drop_flag_effects_for_function_entry( + self.tcx, self.mir, ctxt, + |path, s| { + assert!(s == DropFlagState::Present); + sets.on_entry.add(&path); + }); + } + + fn statement_effect(&self, + ctxt: &Self::Ctxt, + sets: &mut BlockSets, + bb: mir::BasicBlock, + idx: usize) + { + drop_flag_effects_for_location( + self.tcx, self.mir, ctxt, + Location { block: bb, statement_index: idx }, + |path, s| Self::update_bits(sets, path, s) + ) + } + + fn terminator_effect(&self, + ctxt: &Self::Ctxt, + sets: &mut BlockSets, + bb: mir::BasicBlock, + statements_len: usize) + { + drop_flag_effects_for_location( + self.tcx, self.mir, ctxt, + Location { block: bb, statement_index: statements_len }, + |path, s| Self::update_bits(sets, path, s) + ) + } + + fn propagate_call_return(&self, + ctxt: &Self::Ctxt, + in_out: &mut IdxSet, + _call_bb: mir::BasicBlock, + _dest_bb: mir::BasicBlock, + dest_lval: &mir::Lvalue) { + // when a call returns successfully, that means we need to set + // the bits for that dest_lval to 1 (initialized). + on_lookup_result_bits(self.tcx, self.mir, &ctxt.move_data, + ctxt.move_data.rev_lookup.find(dest_lval), + |mpi| { in_out.add(&mpi); }); + } +} + +impl<'a, 'tcx> BitDenotation for MovingOutStatements<'a, 'tcx> { + type Idx = MoveOutIndex; + type Ctxt = MoveDataParamEnv<'tcx>; + fn name() -> &'static str { "moving_out" } + fn bits_per_block(&self, ctxt: &Self::Ctxt) -> usize { + ctxt.move_data.moves.len() + } + + fn start_block_effect(&self,_move_data: &Self::Ctxt, _sets: &mut BlockSets) { + // no move-statements have been executed prior to function + // execution, so this method has no effect on `_sets`. + } + fn statement_effect(&self, + ctxt: &Self::Ctxt, + sets: &mut BlockSets, + bb: mir::BasicBlock, + idx: usize) { + let (tcx, mir, move_data) = (self.tcx, self.mir, &ctxt.move_data); + let stmt = &mir[bb].statements[idx]; + let loc_map = &move_data.loc_map; + let path_map = &move_data.path_map; + let rev_lookup = &move_data.rev_lookup; + + let loc = Location { block: bb, statement_index: idx }; + debug!("stmt {:?} at loc {:?} moves out of move_indexes {:?}", + stmt, loc, &loc_map[loc]); + for move_index in &loc_map[loc] { + // Every path deinitialized by a *particular move* + // has corresponding bit, "gen'ed" (i.e. set) + // here, in dataflow vector + zero_to_one(sets.gen_set.words_mut(), *move_index); + } + let bits_per_block = self.bits_per_block(ctxt); + match stmt.kind { + mir::StatementKind::SetDiscriminant { .. } => { + span_bug!(stmt.source_info.span, "SetDiscriminant should not exist in borrowck"); + } + mir::StatementKind::Assign(ref lvalue, _) => { + // assigning into this `lvalue` kills all + // MoveOuts from it, and *also* all MoveOuts + // for children and associated fragment sets. + on_lookup_result_bits(tcx, + mir, + move_data, + rev_lookup.find(lvalue), + |mpi| for moi in &path_map[mpi] { + assert!(moi.index() < bits_per_block); + sets.kill_set.add(&moi); + }); + } + mir::StatementKind::StorageLive(_) | + mir::StatementKind::StorageDead(_) | + mir::StatementKind::Nop => {} + } + } + + fn terminator_effect(&self, + ctxt: &Self::Ctxt, + sets: &mut BlockSets, + bb: mir::BasicBlock, + statements_len: usize) + { + let (mir, move_data) = (self.mir, &ctxt.move_data); + let term = mir[bb].terminator(); + let loc_map = &move_data.loc_map; + let loc = Location { block: bb, statement_index: statements_len }; + debug!("terminator {:?} at loc {:?} moves out of move_indexes {:?}", + term, loc, &loc_map[loc]); + let bits_per_block = self.bits_per_block(ctxt); + for move_index in &loc_map[loc] { + assert!(move_index.index() < bits_per_block); + zero_to_one(sets.gen_set.words_mut(), *move_index); + } + } + + fn propagate_call_return(&self, + ctxt: &Self::Ctxt, + in_out: &mut IdxSet, + _call_bb: mir::BasicBlock, + _dest_bb: mir::BasicBlock, + dest_lval: &mir::Lvalue) { + let move_data = &ctxt.move_data; + let bits_per_block = self.bits_per_block(ctxt); + + let path_map = &move_data.path_map; + on_lookup_result_bits(self.tcx, + self.mir, + move_data, + move_data.rev_lookup.find(dest_lval), + |mpi| for moi in &path_map[mpi] { + assert!(moi.index() < bits_per_block); + in_out.remove(&moi); + }); + } +} + +fn zero_to_one(bitvec: &mut [usize], move_index: MoveOutIndex) { + let retval = bitvec.set_bit(move_index.index()); + assert!(retval); +} + +impl<'a, 'tcx> BitwiseOperator for MovingOutStatements<'a, 'tcx> { + #[inline] + fn join(&self, pred1: usize, pred2: usize) -> usize { + pred1 | pred2 // moves from both preds are in scope + } +} + +impl<'a, 'tcx> BitwiseOperator for MaybeInitializedLvals<'a, 'tcx> { + #[inline] + fn join(&self, pred1: usize, pred2: usize) -> usize { + pred1 | pred2 // "maybe" means we union effects of both preds + } +} + +impl<'a, 'tcx> BitwiseOperator for MaybeUninitializedLvals<'a, 'tcx> { + #[inline] + fn join(&self, pred1: usize, pred2: usize) -> usize { + pred1 | pred2 // "maybe" means we union effects of both preds + } +} + +impl<'a, 'tcx> BitwiseOperator for DefinitelyInitializedLvals<'a, 'tcx> { + #[inline] + fn join(&self, pred1: usize, pred2: usize) -> usize { + pred1 & pred2 // "definitely" means we intersect effects of both preds + } +} + +// The way that dataflow fixed point iteration works, you want to +// start at bottom and work your way to a fixed point. Control-flow +// merges will apply the `join` operator to each block entry's current +// state (which starts at that bottom value). +// +// This means, for propagation across the graph, that you either want +// to start at all-zeroes and then use Union as your merge when +// propagating, or you start at all-ones and then use Intersect as +// your merge when propagating. + +impl<'a, 'tcx> DataflowOperator for MovingOutStatements<'a, 'tcx> { + #[inline] + fn bottom_value() -> bool { + false // bottom = no loans in scope by default + } +} + +impl<'a, 'tcx> DataflowOperator for MaybeInitializedLvals<'a, 'tcx> { + #[inline] + fn bottom_value() -> bool { + false // bottom = uninitialized + } +} + +impl<'a, 'tcx> DataflowOperator for MaybeUninitializedLvals<'a, 'tcx> { + #[inline] + fn bottom_value() -> bool { + false // bottom = initialized (start_block_effect counters this at outset) + } +} + +impl<'a, 'tcx> DataflowOperator for DefinitelyInitializedLvals<'a, 'tcx> { + #[inline] + fn bottom_value() -> bool { + true // bottom = initialized (start_block_effect counters this at outset) + } +} diff --git a/src/librustc_borrowck/borrowck/mir/dataflow/mod.rs b/src/librustc_borrowck/borrowck/mir/dataflow/mod.rs new file mode 100644 index 0000000000000..51817afbfeafd --- /dev/null +++ b/src/librustc_borrowck/borrowck/mir/dataflow/mod.rs @@ -0,0 +1,504 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc_data_structures::indexed_set::{IdxSet, IdxSetBuf}; +use rustc_data_structures::indexed_vec::Idx; +use rustc_data_structures::bitslice::{bitwise, BitwiseOperator}; + +use rustc::ty::TyCtxt; +use rustc::mir::{self, Mir}; + +use std::fmt::Debug; +use std::io; +use std::mem; +use std::path::PathBuf; +use std::usize; + +use super::MirBorrowckCtxtPreDataflow; +use super::MoveDataParamEnv; + +pub use self::sanity_check::sanity_check_via_rustc_peek; +pub use self::impls::{MaybeInitializedLvals, MaybeUninitializedLvals}; +pub use self::impls::{DefinitelyInitializedLvals, MovingOutStatements}; + +mod graphviz; +mod sanity_check; +mod impls; + +pub trait Dataflow { + fn dataflow

(&mut self, p: P) where P: Fn(&BD::Ctxt, BD::Idx) -> &Debug; +} + +impl<'a, 'tcx: 'a, BD> Dataflow for MirBorrowckCtxtPreDataflow<'a, 'tcx, BD> + where BD: BitDenotation> + DataflowOperator +{ + fn dataflow

(&mut self, p: P) where P: Fn(&BD::Ctxt, BD::Idx) -> &Debug { + self.flow_state.build_sets(); + self.pre_dataflow_instrumentation(|c,i| p(c,i)).unwrap(); + self.flow_state.propagate(); + self.post_dataflow_instrumentation(|c,i| p(c,i)).unwrap(); + } +} + +struct PropagationContext<'b, 'a: 'b, 'tcx: 'a, O> + where O: 'b + BitDenotation, O::Ctxt: 'a +{ + builder: &'b mut DataflowAnalysis<'a, 'tcx, O>, + changed: bool, +} + +impl<'a, 'tcx: 'a, BD> DataflowAnalysis<'a, 'tcx, BD> + where BD: BitDenotation + DataflowOperator +{ + fn propagate(&mut self) { + let mut temp = IdxSetBuf::new_empty(self.flow_state.sets.bits_per_block); + let mut propcx = PropagationContext { + builder: self, + changed: true, + }; + while propcx.changed { + propcx.changed = false; + propcx.reset(&mut temp); + propcx.walk_cfg(&mut temp); + } + } + + fn build_sets(&mut self) { + // First we need to build the entry-, gen- and kill-sets. The + // gather_moves information provides a high-level mapping from + // mir-locations to the MoveOuts (and those correspond + // directly to gen-sets here). But we still need to figure out + // the kill-sets. + + { + let sets = &mut self.flow_state.sets.for_block(mir::START_BLOCK.index()); + self.flow_state.operator.start_block_effect(&self.ctxt, sets); + } + + for (bb, data) in self.mir.basic_blocks().iter_enumerated() { + let &mir::BasicBlockData { ref statements, ref terminator, is_cleanup: _ } = data; + + let sets = &mut self.flow_state.sets.for_block(bb.index()); + for j_stmt in 0..statements.len() { + self.flow_state.operator.statement_effect(&self.ctxt, sets, bb, j_stmt); + } + + if terminator.is_some() { + let stmts_len = statements.len(); + self.flow_state.operator.terminator_effect(&self.ctxt, sets, bb, stmts_len); + } + } + } +} + +impl<'b, 'a: 'b, 'tcx: 'a, BD> PropagationContext<'b, 'a, 'tcx, BD> + where BD: BitDenotation + DataflowOperator +{ + fn reset(&mut self, bits: &mut IdxSet) { + let e = if BD::bottom_value() {!0} else {0}; + for b in bits.words_mut() { + *b = e; + } + } + + fn walk_cfg(&mut self, in_out: &mut IdxSet) { + let mir = self.builder.mir; + for (bb_idx, bb_data) in mir.basic_blocks().iter().enumerate() { + let builder = &mut self.builder; + { + let sets = builder.flow_state.sets.for_block(bb_idx); + debug_assert!(in_out.words().len() == sets.on_entry.words().len()); + in_out.clone_from(sets.on_entry); + in_out.union(sets.gen_set); + in_out.subtract(sets.kill_set); + } + builder.propagate_bits_into_graph_successors_of( + in_out, &mut self.changed, (mir::BasicBlock::new(bb_idx), bb_data)); + } + } +} + +fn dataflow_path(context: &str, prepost: &str, path: &str) -> PathBuf { + format!("{}_{}", context, prepost); + let mut path = PathBuf::from(path); + let new_file_name = { + let orig_file_name = path.file_name().unwrap().to_str().unwrap(); + format!("{}_{}", context, orig_file_name) + }; + path.set_file_name(new_file_name); + path +} + +impl<'a, 'tcx: 'a, BD> MirBorrowckCtxtPreDataflow<'a, 'tcx, BD> + where BD: BitDenotation> +{ + fn pre_dataflow_instrumentation

(&self, p: P) -> io::Result<()> + where P: Fn(&BD::Ctxt, BD::Idx) -> &Debug + { + if let Some(ref path_str) = self.print_preflow_to { + let path = dataflow_path(BD::name(), "preflow", path_str); + graphviz::print_borrowck_graph_to(self, &path, p) + } else { + Ok(()) + } + } + + fn post_dataflow_instrumentation

(&self, p: P) -> io::Result<()> + where P: Fn(&BD::Ctxt, BD::Idx) -> &Debug + { + if let Some(ref path_str) = self.print_postflow_to { + let path = dataflow_path(BD::name(), "postflow", path_str); + graphviz::print_borrowck_graph_to(self, &path, p) + } else{ + Ok(()) + } + } +} + +/// Maps each block to a set of bits +#[derive(Debug)] +struct Bits { + bits: IdxSetBuf, +} + +impl Clone for Bits { + fn clone(&self) -> Self { Bits { bits: self.bits.clone() } } +} + +impl Bits { + fn new(bits: IdxSetBuf) -> Self { + Bits { bits: bits } + } +} + +pub struct DataflowAnalysis<'a, 'tcx: 'a, O> + where O: BitDenotation, O::Ctxt: 'a +{ + flow_state: DataflowState, + mir: &'a Mir<'tcx>, + ctxt: &'a O::Ctxt, +} + +impl<'a, 'tcx: 'a, O> DataflowAnalysis<'a, 'tcx, O> + where O: BitDenotation +{ + pub fn results(self) -> DataflowResults { + DataflowResults(self.flow_state) + } + + pub fn mir(&self) -> &'a Mir<'tcx> { self.mir } +} + +pub struct DataflowResults(DataflowState) where O: BitDenotation; + +impl DataflowResults { + pub fn sets(&self) -> &AllSets { + &self.0.sets + } +} + +// FIXME: This type shouldn't be public, but the graphviz::MirWithFlowState trait +// references it in a method signature. Look into using `pub(crate)` to address this. +pub struct DataflowState +{ + /// All the sets for the analysis. (Factored into its + /// own structure so that we can borrow it mutably + /// on its own separate from other fields.) + pub sets: AllSets, + + /// operator used to initialize, combine, and interpret bits. + operator: O, +} + +#[derive(Debug)] +pub struct AllSets { + /// Analysis bitwidth for each block. + bits_per_block: usize, + + /// Number of words associated with each block entry + /// equal to bits_per_block / usize::BITS, rounded up. + words_per_block: usize, + + /// For each block, bits generated by executing the statements in + /// the block. (For comparison, the Terminator for each block is + /// handled in a flow-specific manner during propagation.) + gen_sets: Bits, + + /// For each block, bits killed by executing the statements in the + /// block. (For comparison, the Terminator for each block is + /// handled in a flow-specific manner during propagation.) + kill_sets: Bits, + + /// For each block, bits valid on entry to the block. + on_entry_sets: Bits, +} + +pub struct BlockSets<'a, E: Idx> { + on_entry: &'a mut IdxSet, + gen_set: &'a mut IdxSet, + kill_set: &'a mut IdxSet, +} + +impl<'a, E:Idx> BlockSets<'a, E> { + fn gen(&mut self, e: &E) { + self.gen_set.add(e); + self.kill_set.remove(e); + } + fn kill(&mut self, e: &E) { + self.gen_set.remove(e); + self.kill_set.add(e); + } +} + +impl AllSets { + pub fn bits_per_block(&self) -> usize { self.bits_per_block } + pub fn for_block(&mut self, block_idx: usize) -> BlockSets { + let offset = self.words_per_block * block_idx; + let range = E::new(offset)..E::new(offset + self.words_per_block); + BlockSets { + on_entry: self.on_entry_sets.bits.range_mut(&range), + gen_set: self.gen_sets.bits.range_mut(&range), + kill_set: self.kill_sets.bits.range_mut(&range), + } + } + + fn lookup_set_for<'a>(&self, sets: &'a Bits, block_idx: usize) -> &'a IdxSet { + let offset = self.words_per_block * block_idx; + let range = E::new(offset)..E::new(offset + self.words_per_block); + sets.bits.range(&range) + } + pub fn gen_set_for(&self, block_idx: usize) -> &IdxSet { + self.lookup_set_for(&self.gen_sets, block_idx) + } + pub fn kill_set_for(&self, block_idx: usize) -> &IdxSet { + self.lookup_set_for(&self.kill_sets, block_idx) + } + pub fn on_entry_set_for(&self, block_idx: usize) -> &IdxSet { + self.lookup_set_for(&self.on_entry_sets, block_idx) + } +} + +/// Parameterization for the precise form of data flow that is used. +pub trait DataflowOperator: BitwiseOperator { + /// Specifies the initial value for each bit in the `on_entry` set + fn bottom_value() -> bool; +} + +pub trait BitDenotation { + /// Specifies what index type is used to access the bitvector. + type Idx: Idx; + + /// Specifies what, if any, separate context needs to be supplied for methods below. + type Ctxt; + + /// A name describing the dataflow analysis that this + /// BitDenotation is supporting. The name should be something + /// suitable for plugging in as part of a filename e.g. avoid + /// space-characters or other things that tend to look bad on a + /// file system, like slashes or periods. It is also better for + /// the name to be reasonably short, again because it will be + /// plugged into a filename. + fn name() -> &'static str; + + /// Size of each bitvector allocated for each block in the analysis. + fn bits_per_block(&self, &Self::Ctxt) -> usize; + + /// Mutates the block-sets (the flow sets for the given + /// basic block) according to the effects that have been + /// established *prior* to entering the start block. + /// + /// (For example, establishing the call arguments.) + /// + /// (Typically this should only modify `sets.on_entry`, since the + /// gen and kill sets should reflect the effects of *executing* + /// the start block itself.) + fn start_block_effect(&self, ctxt: &Self::Ctxt, sets: &mut BlockSets); + + /// Mutates the block-sets (the flow sets for the given + /// basic block) according to the effects of evaluating statement. + /// + /// This is used, in particular, for building up the + /// "transfer-function" represnting the overall-effect of the + /// block, represented via GEN and KILL sets. + /// + /// The statement is identified as `bb_data[idx_stmt]`, where + /// `bb_data` is the sequence of statements identifed by `bb` in + /// the MIR. + fn statement_effect(&self, + ctxt: &Self::Ctxt, + sets: &mut BlockSets, + bb: mir::BasicBlock, + idx_stmt: usize); + + /// Mutates the block-sets (the flow sets for the given + /// basic block) according to the effects of evaluating + /// the terminator. + /// + /// This is used, in particular, for building up the + /// "transfer-function" represnting the overall-effect of the + /// block, represented via GEN and KILL sets. + /// + /// The effects applied here cannot depend on which branch the + /// terminator took. + fn terminator_effect(&self, + ctxt: &Self::Ctxt, + sets: &mut BlockSets, + bb: mir::BasicBlock, + idx_term: usize); + + /// Mutates the block-sets according to the (flow-dependent) + /// effect of a successful return from a Call terminator. + /// + /// If basic-block BB_x ends with a call-instruction that, upon + /// successful return, flows to BB_y, then this method will be + /// called on the exit flow-state of BB_x in order to set up the + /// entry flow-state of BB_y. + /// + /// This is used, in particular, as a special case during the + /// "propagate" loop where all of the basic blocks are repeatedly + /// visited. Since the effects of a Call terminator are + /// flow-dependent, the current MIR cannot encode them via just + /// GEN and KILL sets attached to the block, and so instead we add + /// this extra machinery to represent the flow-dependent effect. + /// + /// FIXME: Right now this is a bit of a wart in the API. It might + /// be better to represent this as an additional gen- and + /// kill-sets associated with each edge coming out of the basic + /// block. + fn propagate_call_return(&self, + ctxt: &Self::Ctxt, + in_out: &mut IdxSet, + call_bb: mir::BasicBlock, + dest_bb: mir::BasicBlock, + dest_lval: &mir::Lvalue); +} + +impl<'a, 'tcx: 'a, D> DataflowAnalysis<'a, 'tcx, D> + where D: BitDenotation + DataflowOperator +{ + pub fn new(_tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &'a Mir<'tcx>, + ctxt: &'a D::Ctxt, + denotation: D) -> Self { + let bits_per_block = denotation.bits_per_block(&ctxt); + let usize_bits = mem::size_of::() * 8; + let words_per_block = (bits_per_block + usize_bits - 1) / usize_bits; + + // (now rounded up to multiple of word size) + let bits_per_block = words_per_block * usize_bits; + + let num_blocks = mir.basic_blocks().len(); + let num_overall = num_blocks * bits_per_block; + + let zeroes = Bits::new(IdxSetBuf::new_empty(num_overall)); + let on_entry = Bits::new(if D::bottom_value() { + IdxSetBuf::new_filled(num_overall) + } else { + IdxSetBuf::new_empty(num_overall) + }); + + DataflowAnalysis { + ctxt: ctxt, + mir: mir, + flow_state: DataflowState { + sets: AllSets { + bits_per_block: bits_per_block, + words_per_block: words_per_block, + gen_sets: zeroes.clone(), + kill_sets: zeroes, + on_entry_sets: on_entry, + }, + operator: denotation, + }, + } + + } +} + +impl<'a, 'tcx: 'a, D> DataflowAnalysis<'a, 'tcx, D> + where D: BitDenotation + DataflowOperator +{ + /// Propagates the bits of `in_out` into all the successors of `bb`, + /// using bitwise operator denoted by `self.operator`. + /// + /// For most blocks, this is entirely uniform. However, for blocks + /// that end with a call terminator, the effect of the call on the + /// dataflow state may depend on whether the call returned + /// successfully or unwound. + /// + /// To reflect this, the `propagate_call_return` method of the + /// `BitDenotation` mutates `in_out` when propagating `in_out` via + /// a call terminator; such mutation is performed *last*, to + /// ensure its side-effects do not leak elsewhere (e.g. into + /// unwind target). + fn propagate_bits_into_graph_successors_of( + &mut self, + in_out: &mut IdxSet, + changed: &mut bool, + (bb, bb_data): (mir::BasicBlock, &mir::BasicBlockData)) + { + match bb_data.terminator().kind { + mir::TerminatorKind::Return | + mir::TerminatorKind::Resume | + mir::TerminatorKind::Unreachable => {} + mir::TerminatorKind::Goto { ref target } | + mir::TerminatorKind::Assert { ref target, cleanup: None, .. } | + mir::TerminatorKind::Drop { ref target, location: _, unwind: None } | + mir::TerminatorKind::DropAndReplace { + ref target, value: _, location: _, unwind: None + } => { + self.propagate_bits_into_entry_set_for(in_out, changed, target); + } + mir::TerminatorKind::Assert { ref target, cleanup: Some(ref unwind), .. } | + mir::TerminatorKind::Drop { ref target, location: _, unwind: Some(ref unwind) } | + mir::TerminatorKind::DropAndReplace { + ref target, value: _, location: _, unwind: Some(ref unwind) + } => { + self.propagate_bits_into_entry_set_for(in_out, changed, target); + self.propagate_bits_into_entry_set_for(in_out, changed, unwind); + } + mir::TerminatorKind::If { ref targets, .. } => { + self.propagate_bits_into_entry_set_for(in_out, changed, &targets.0); + self.propagate_bits_into_entry_set_for(in_out, changed, &targets.1); + } + mir::TerminatorKind::Switch { ref targets, .. } | + mir::TerminatorKind::SwitchInt { ref targets, .. } => { + for target in targets { + self.propagate_bits_into_entry_set_for(in_out, changed, target); + } + } + mir::TerminatorKind::Call { ref cleanup, ref destination, func: _, args: _ } => { + if let Some(ref unwind) = *cleanup { + self.propagate_bits_into_entry_set_for(in_out, changed, unwind); + } + if let Some((ref dest_lval, ref dest_bb)) = *destination { + // N.B.: This must be done *last*, after all other + // propagation, as documented in comment above. + self.flow_state.operator.propagate_call_return( + &self.ctxt, in_out, bb, *dest_bb, dest_lval); + self.propagate_bits_into_entry_set_for(in_out, changed, dest_bb); + } + } + } + } + + fn propagate_bits_into_entry_set_for(&mut self, + in_out: &IdxSet, + changed: &mut bool, + bb: &mir::BasicBlock) { + let entry_set = self.flow_state.sets.for_block(bb.index()).on_entry; + let set_changed = bitwise(entry_set.words_mut(), + in_out.words(), + &self.flow_state.operator); + if set_changed { + *changed = true; + } + } +} diff --git a/src/librustc_borrowck/borrowck/mir/dataflow/sanity_check.rs b/src/librustc_borrowck/borrowck/mir/dataflow/sanity_check.rs new file mode 100644 index 0000000000000..916d17dcc91de --- /dev/null +++ b/src/librustc_borrowck/borrowck/mir/dataflow/sanity_check.rs @@ -0,0 +1,181 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use syntax::abi::{Abi}; +use syntax::ast; +use syntax_pos::Span; + +use rustc::ty::{self, TyCtxt}; +use rustc::mir::{self, Mir}; +use rustc_data_structures::indexed_vec::Idx; + +use super::super::gather_moves::{MovePathIndex, LookupResult}; +use super::super::MoveDataParamEnv; +use super::BitDenotation; +use super::DataflowResults; + +/// This function scans `mir` for all calls to the intrinsic +/// `rustc_peek` that have the expression form `rustc_peek(&expr)`. +/// +/// For each such call, determines what the dataflow bit-state is for +/// the L-value corresponding to `expr`; if the bit-state is a 1, then +/// that call to `rustc_peek` is ignored by the sanity check. If the +/// bit-state is a 0, then this pass emits a error message saying +/// "rustc_peek: bit not set". +/// +/// The intention is that one can write unit tests for dataflow by +/// putting code into a compile-fail test and using `rustc_peek` to +/// make observations about the results of dataflow static analyses. +/// +/// (If there are any calls to `rustc_peek` that do not match the +/// expression form above, then that emits an error as well, but those +/// errors are not intended to be used for unit tests.) +pub fn sanity_check_via_rustc_peek<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &Mir<'tcx>, + id: ast::NodeId, + _attributes: &[ast::Attribute], + flow_ctxt: &O::Ctxt, + results: &DataflowResults) + where O: BitDenotation, Idx=MovePathIndex> +{ + debug!("sanity_check_via_rustc_peek id: {:?}", id); + // FIXME: this is not DRY. Figure out way to abstract this and + // `dataflow::build_sets`. (But note it is doing non-standard + // stuff, so such generalization may not be realistic.) + + for bb in mir.basic_blocks().indices() { + each_block(tcx, mir, flow_ctxt, results, bb); + } +} + +fn each_block<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &Mir<'tcx>, + ctxt: &O::Ctxt, + results: &DataflowResults, + bb: mir::BasicBlock) where + O: BitDenotation, Idx=MovePathIndex> +{ + let move_data = &ctxt.move_data; + let mir::BasicBlockData { ref statements, ref terminator, is_cleanup: _ } = mir[bb]; + + let (args, span) = match is_rustc_peek(tcx, terminator) { + Some(args_and_span) => args_and_span, + None => return, + }; + assert!(args.len() == 1); + let peek_arg_lval = match args[0] { + mir::Operand::Consume(ref lval @ mir::Lvalue::Local(_)) => Some(lval), + _ => None, + }; + + let peek_arg_lval = match peek_arg_lval { + Some(arg) => arg, + None => { + tcx.sess.diagnostic().span_err( + span, "dataflow::sanity_check cannot feed a non-temp to rustc_peek."); + return; + } + }; + + let mut entry = results.0.sets.on_entry_set_for(bb.index()).to_owned(); + let mut gen = results.0.sets.gen_set_for(bb.index()).to_owned(); + let mut kill = results.0.sets.kill_set_for(bb.index()).to_owned(); + + // Emulate effect of all statements in the block up to (but not + // including) the borrow within `peek_arg_lval`. Do *not* include + // call to `peek_arg_lval` itself (since we are peeking the state + // of the argument at time immediate preceding Call to + // `rustc_peek`). + + let mut sets = super::BlockSets { on_entry: &mut entry, + gen_set: &mut gen, + kill_set: &mut kill }; + + for (j, stmt) in statements.iter().enumerate() { + debug!("rustc_peek: ({:?},{}) {:?}", bb, j, stmt); + let (lvalue, rvalue) = match stmt.kind { + mir::StatementKind::Assign(ref lvalue, ref rvalue) => { + (lvalue, rvalue) + } + mir::StatementKind::StorageLive(_) | + mir::StatementKind::StorageDead(_) | + mir::StatementKind::Nop => continue, + mir::StatementKind::SetDiscriminant{ .. } => + span_bug!(stmt.source_info.span, + "sanity_check should run before Deaggregator inserts SetDiscriminant"), + }; + + if lvalue == peek_arg_lval { + if let mir::Rvalue::Ref(_, mir::BorrowKind::Shared, ref peeking_at_lval) = *rvalue { + // Okay, our search is over. + match move_data.rev_lookup.find(peeking_at_lval) { + LookupResult::Exact(peek_mpi) => { + let bit_state = sets.on_entry.contains(&peek_mpi); + debug!("rustc_peek({:?} = &{:?}) bit_state: {}", + lvalue, peeking_at_lval, bit_state); + if !bit_state { + tcx.sess.span_err(span, "rustc_peek: bit not set"); + } + } + LookupResult::Parent(..) => { + tcx.sess.span_err(span, "rustc_peek: argument untracked"); + } + } + return; + } else { + // Our search should have been over, but the input + // does not match expectations of `rustc_peek` for + // this sanity_check. + let msg = "rustc_peek: argument expression \ + must be immediate borrow of form `&expr`"; + tcx.sess.span_err(span, msg); + } + } + + let lhs_mpi = move_data.rev_lookup.find(lvalue); + + debug!("rustc_peek: computing effect on lvalue: {:?} ({:?}) in stmt: {:?}", + lvalue, lhs_mpi, stmt); + // reset GEN and KILL sets before emulating their effect. + for e in sets.gen_set.words_mut() { *e = 0; } + for e in sets.kill_set.words_mut() { *e = 0; } + results.0.operator.statement_effect(ctxt, &mut sets, bb, j); + sets.on_entry.union(sets.gen_set); + sets.on_entry.subtract(sets.kill_set); + } + + tcx.sess.span_err(span, &format!("rustc_peek: MIR did not match \ + anticipated pattern; note that \ + rustc_peek expects input of \ + form `&expr`")); +} + +fn is_rustc_peek<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + terminator: &'a Option>) + -> Option<(&'a [mir::Operand<'tcx>], Span)> { + if let Some(mir::Terminator { ref kind, source_info, .. }) = *terminator { + if let mir::TerminatorKind::Call { func: ref oper, ref args, .. } = *kind + { + if let mir::Operand::Constant(ref func) = *oper + { + if let ty::TyFnDef(def_id, _, &ty::BareFnTy { abi, .. }) = func.ty.sty + { + let name = tcx.item_name(def_id); + if abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic { + if name == "rustc_peek" { + return Some((args, source_info.span)); + } + } + } + } + } + } + return None; +} diff --git a/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs b/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs new file mode 100644 index 0000000000000..4f49bfc9725b3 --- /dev/null +++ b/src/librustc_borrowck/borrowck/mir/elaborate_drops.rs @@ -0,0 +1,1030 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::gather_moves::{MoveData, MovePathIndex, LookupResult}; +use super::dataflow::{MaybeInitializedLvals, MaybeUninitializedLvals}; +use super::dataflow::{DataflowResults}; +use super::{drop_flag_effects_for_location, on_all_children_bits}; +use super::on_lookup_result_bits; +use super::{DropFlagState, MoveDataParamEnv}; +use super::patch::MirPatch; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::subst::{Kind, Subst, Substs}; +use rustc::mir::*; +use rustc::mir::transform::{Pass, MirPass, MirSource}; +use rustc::middle::const_val::ConstVal; +use rustc::middle::lang_items; +use rustc::util::nodemap::FxHashMap; +use rustc_data_structures::indexed_set::IdxSetBuf; +use rustc_data_structures::indexed_vec::Idx; +use syntax_pos::Span; + +use std::fmt; +use std::iter; +use std::u32; + +pub struct ElaborateDrops; + +impl<'tcx> MirPass<'tcx> for ElaborateDrops { + fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + src: MirSource, mir: &mut Mir<'tcx>) + { + debug!("elaborate_drops({:?} @ {:?})", src, mir.span); + match src { + MirSource::Fn(..) => {}, + _ => return + } + let id = src.item_id(); + let param_env = ty::ParameterEnvironment::for_item(tcx, id); + let move_data = MoveData::gather_moves(mir, tcx, ¶m_env); + let elaborate_patch = { + let mir = &*mir; + let env = MoveDataParamEnv { + move_data: move_data, + param_env: param_env + }; + let flow_inits = + super::do_dataflow(tcx, mir, id, &[], &env, + MaybeInitializedLvals::new(tcx, mir)); + let flow_uninits = + super::do_dataflow(tcx, mir, id, &[], &env, + MaybeUninitializedLvals::new(tcx, mir)); + + ElaborateDropsCtxt { + tcx: tcx, + mir: mir, + env: &env, + flow_inits: flow_inits, + flow_uninits: flow_uninits, + drop_flags: FxHashMap(), + patch: MirPatch::new(mir), + }.elaborate() + }; + elaborate_patch.apply(mir); + } +} + +impl Pass for ElaborateDrops {} + +struct InitializationData { + live: IdxSetBuf, + dead: IdxSetBuf +} + +impl InitializationData { + fn apply_location<'a,'tcx>(&mut self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &Mir<'tcx>, + env: &MoveDataParamEnv<'tcx>, + loc: Location) + { + drop_flag_effects_for_location(tcx, mir, env, loc, |path, df| { + debug!("at location {:?}: setting {:?} to {:?}", + loc, path, df); + match df { + DropFlagState::Present => { + self.live.add(&path); + self.dead.remove(&path); + } + DropFlagState::Absent => { + self.dead.add(&path); + self.live.remove(&path); + } + } + }); + } + + fn state(&self, path: MovePathIndex) -> (bool, bool) { + (self.live.contains(&path), self.dead.contains(&path)) + } +} + +impl fmt::Debug for InitializationData { + fn fmt(&self, _f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + Ok(()) + } +} + +struct ElaborateDropsCtxt<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &'a Mir<'tcx>, + env: &'a MoveDataParamEnv<'tcx>, + flow_inits: DataflowResults>, + flow_uninits: DataflowResults>, + drop_flags: FxHashMap, + patch: MirPatch<'tcx>, +} + +#[derive(Copy, Clone, Debug)] +struct DropCtxt<'a, 'tcx: 'a> { + source_info: SourceInfo, + is_cleanup: bool, + + init_data: &'a InitializationData, + + lvalue: &'a Lvalue<'tcx>, + path: MovePathIndex, + succ: BasicBlock, + unwind: Option +} + +impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { + fn move_data(&self) -> &'b MoveData<'tcx> { &self.env.move_data } + fn param_env(&self) -> &'b ty::ParameterEnvironment<'tcx> { + &self.env.param_env + } + + fn initialization_data_at(&self, loc: Location) -> InitializationData { + let mut data = InitializationData { + live: self.flow_inits.sets().on_entry_set_for(loc.block.index()) + .to_owned(), + dead: self.flow_uninits.sets().on_entry_set_for(loc.block.index()) + .to_owned(), + }; + for stmt in 0..loc.statement_index { + data.apply_location(self.tcx, self.mir, self.env, + Location { block: loc.block, statement_index: stmt }); + } + data + } + + fn create_drop_flag(&mut self, index: MovePathIndex) { + let tcx = self.tcx; + let patch = &mut self.patch; + self.drop_flags.entry(index).or_insert_with(|| { + patch.new_temp(tcx.types.bool) + }); + } + + fn drop_flag(&mut self, index: MovePathIndex) -> Option> { + self.drop_flags.get(&index).map(|t| Lvalue::Local(*t)) + } + + /// create a patch that elaborates all drops in the input + /// MIR. + fn elaborate(mut self) -> MirPatch<'tcx> + { + self.collect_drop_flags(); + + self.elaborate_drops(); + + self.drop_flags_on_init(); + self.drop_flags_for_fn_rets(); + self.drop_flags_for_args(); + self.drop_flags_for_locs(); + + self.patch + } + + fn path_needs_drop(&self, path: MovePathIndex) -> bool + { + let lvalue = &self.move_data().move_paths[path].lvalue; + let ty = lvalue.ty(self.mir, self.tcx).to_ty(self.tcx); + debug!("path_needs_drop({:?}, {:?} : {:?})", path, lvalue, ty); + + self.tcx.type_needs_drop_given_env(ty, self.param_env()) + } + + fn collect_drop_flags(&mut self) + { + for (bb, data) in self.mir.basic_blocks().iter_enumerated() { + let terminator = data.terminator(); + let location = match terminator.kind { + TerminatorKind::Drop { ref location, .. } | + TerminatorKind::DropAndReplace { ref location, .. } => location, + _ => continue + }; + + let init_data = self.initialization_data_at(Location { + block: bb, + statement_index: data.statements.len() + }); + + let path = self.move_data().rev_lookup.find(location); + debug!("collect_drop_flags: {:?}, lv {:?} ({:?})", + bb, location, path); + + let path = match path { + LookupResult::Exact(e) => e, + LookupResult::Parent(None) => continue, + LookupResult::Parent(Some(parent)) => { + let (_maybe_live, maybe_dead) = init_data.state(parent); + if maybe_dead { + span_bug!(terminator.source_info.span, + "drop of untracked, uninitialized value {:?}, lv {:?} ({:?})", + bb, location, path); + } + continue + } + }; + + on_all_children_bits(self.tcx, self.mir, self.move_data(), path, |child| { + if self.path_needs_drop(child) { + let (maybe_live, maybe_dead) = init_data.state(child); + debug!("collect_drop_flags: collecting {:?} from {:?}@{:?} - {:?}", + child, location, path, (maybe_live, maybe_dead)); + if maybe_live && maybe_dead { + self.create_drop_flag(child) + } + } + }); + } + } + + fn elaborate_drops(&mut self) + { + for (bb, data) in self.mir.basic_blocks().iter_enumerated() { + let loc = Location { block: bb, statement_index: data.statements.len() }; + let terminator = data.terminator(); + + let resume_block = self.patch.resume_block(); + match terminator.kind { + TerminatorKind::Drop { ref location, target, unwind } => { + let init_data = self.initialization_data_at(loc); + match self.move_data().rev_lookup.find(location) { + LookupResult::Exact(path) => { + self.elaborate_drop(&DropCtxt { + source_info: terminator.source_info, + is_cleanup: data.is_cleanup, + init_data: &init_data, + lvalue: location, + path: path, + succ: target, + unwind: if data.is_cleanup { + None + } else { + Some(Option::unwrap_or(unwind, resume_block)) + } + }, bb); + } + LookupResult::Parent(..) => { + span_bug!(terminator.source_info.span, + "drop of untracked value {:?}", bb); + } + } + } + TerminatorKind::DropAndReplace { ref location, ref value, + target, unwind } => + { + assert!(!data.is_cleanup); + + self.elaborate_replace( + loc, + location, value, + target, unwind + ); + } + _ => continue + } + } + } + + /// Elaborate a MIR `replace` terminator. This instruction + /// is not directly handled by translation, and therefore + /// must be desugared. + /// + /// The desugaring drops the location if needed, and then writes + /// the value (including setting the drop flag) over it in *both* arms. + /// + /// The `replace` terminator can also be called on lvalues that + /// are not tracked by elaboration (for example, + /// `replace x[i] <- tmp0`). The borrow checker requires that + /// these locations are initialized before the assignment, + /// so we just generate an unconditional drop. + fn elaborate_replace( + &mut self, + loc: Location, + location: &Lvalue<'tcx>, + value: &Operand<'tcx>, + target: BasicBlock, + unwind: Option) + { + let bb = loc.block; + let data = &self.mir[bb]; + let terminator = data.terminator(); + + let assign = Statement { + kind: StatementKind::Assign(location.clone(), Rvalue::Use(value.clone())), + source_info: terminator.source_info + }; + + let unwind = unwind.unwrap_or(self.patch.resume_block()); + let unwind = self.patch.new_block(BasicBlockData { + statements: vec![assign.clone()], + terminator: Some(Terminator { + kind: TerminatorKind::Goto { target: unwind }, + ..*terminator + }), + is_cleanup: true + }); + + let target = self.patch.new_block(BasicBlockData { + statements: vec![assign], + terminator: Some(Terminator { + kind: TerminatorKind::Goto { target: target }, + ..*terminator + }), + is_cleanup: data.is_cleanup, + }); + + match self.move_data().rev_lookup.find(location) { + LookupResult::Exact(path) => { + debug!("elaborate_drop_and_replace({:?}) - tracked {:?}", terminator, path); + let init_data = self.initialization_data_at(loc); + + self.elaborate_drop(&DropCtxt { + source_info: terminator.source_info, + is_cleanup: data.is_cleanup, + init_data: &init_data, + lvalue: location, + path: path, + succ: target, + unwind: Some(unwind) + }, bb); + on_all_children_bits(self.tcx, self.mir, self.move_data(), path, |child| { + self.set_drop_flag(Location { block: target, statement_index: 0 }, + child, DropFlagState::Present); + self.set_drop_flag(Location { block: unwind, statement_index: 0 }, + child, DropFlagState::Present); + }); + } + LookupResult::Parent(parent) => { + // drop and replace behind a pointer/array/whatever. The location + // must be initialized. + debug!("elaborate_drop_and_replace({:?}) - untracked {:?}", terminator, parent); + self.patch.patch_terminator(bb, TerminatorKind::Drop { + location: location.clone(), + target: target, + unwind: Some(unwind) + }); + } + } + } + + /// This elaborates a single drop instruction, located at `bb`, and + /// patches over it. + /// + /// The elaborated drop checks the drop flags to only drop what + /// is initialized. + /// + /// In addition, the relevant drop flags also need to be cleared + /// to avoid double-drops. However, in the middle of a complex + /// drop, one must avoid clearing some of the flags before they + /// are read, as that would cause a memory leak. + /// + /// In particular, when dropping an ADT, multiple fields may be + /// joined together under the `rest` subpath. They are all controlled + /// by the primary drop flag, but only the last rest-field dropped + /// should clear it (and it must also not clear anything else). + /// + /// FIXME: I think we should just control the flags externally + /// and then we do not need this machinery. + fn elaborate_drop<'a>(&mut self, c: &DropCtxt<'a, 'tcx>, bb: BasicBlock) { + debug!("elaborate_drop({:?})", c); + + let mut some_live = false; + let mut some_dead = false; + let mut children_count = 0; + on_all_children_bits( + self.tcx, self.mir, self.move_data(), + c.path, |child| { + if self.path_needs_drop(child) { + let (live, dead) = c.init_data.state(child); + debug!("elaborate_drop: state({:?}) = {:?}", + child, (live, dead)); + some_live |= live; + some_dead |= dead; + children_count += 1; + } + }); + + debug!("elaborate_drop({:?}): live - {:?}", c, + (some_live, some_dead)); + match (some_live, some_dead) { + (false, false) | (false, true) => { + // dead drop - patch it out + self.patch.patch_terminator(bb, TerminatorKind::Goto { + target: c.succ + }); + } + (true, false) => { + // static drop - just set the flag + self.patch.patch_terminator(bb, TerminatorKind::Drop { + location: c.lvalue.clone(), + target: c.succ, + unwind: c.unwind + }); + self.drop_flags_for_drop(c, bb); + } + (true, true) => { + // dynamic drop + let drop_bb = if children_count == 1 || self.must_complete_drop(c) { + self.conditional_drop(c) + } else { + self.open_drop(c) + }; + self.patch.patch_terminator(bb, TerminatorKind::Goto { + target: drop_bb + }); + } + } + } + + /// Return the lvalue and move path for each field of `variant`, + /// (the move path is `None` if the field is a rest field). + fn move_paths_for_fields(&self, + base_lv: &Lvalue<'tcx>, + variant_path: MovePathIndex, + variant: &'tcx ty::VariantDef, + substs: &'tcx Substs<'tcx>) + -> Vec<(Lvalue<'tcx>, Option)> + { + variant.fields.iter().enumerate().map(|(i, f)| { + let subpath = + super::move_path_children_matching(self.move_data(), variant_path, |p| { + match p { + &Projection { + elem: ProjectionElem::Field(idx, _), .. + } => idx.index() == i, + _ => false + } + }); + + let field_ty = + self.tcx.normalize_associated_type_in_env( + &f.ty(self.tcx, substs), + self.param_env() + ); + (base_lv.clone().field(Field::new(i), field_ty), subpath) + }).collect() + } + + /// Create one-half of the drop ladder for a list of fields, and return + /// the list of steps in it in reverse order. + /// + /// `unwind_ladder` is such a list of steps in reverse order, + /// which is called instead of the next step if the drop unwinds + /// (the first field is never reached). If it is `None`, all + /// unwind targets are left blank. + fn drop_halfladder<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + unwind_ladder: Option>, + succ: BasicBlock, + fields: &[(Lvalue<'tcx>, Option)], + is_cleanup: bool) + -> Vec + { + let mut succ = succ; + let mut unwind_succ = if is_cleanup { + None + } else { + c.unwind + }; + let mut update_drop_flag = true; + + fields.iter().rev().enumerate().map(|(i, &(ref lv, path))| { + let drop_block = match path { + Some(path) => { + debug!("drop_ladder: for std field {} ({:?})", i, lv); + + self.elaborated_drop_block(&DropCtxt { + source_info: c.source_info, + is_cleanup: is_cleanup, + init_data: c.init_data, + lvalue: lv, + path: path, + succ: succ, + unwind: unwind_succ, + }) + } + None => { + debug!("drop_ladder: for rest field {} ({:?})", i, lv); + + let blk = self.complete_drop(&DropCtxt { + source_info: c.source_info, + is_cleanup: is_cleanup, + init_data: c.init_data, + lvalue: lv, + path: c.path, + succ: succ, + unwind: unwind_succ, + }, update_drop_flag); + + // the drop flag has been updated - updating + // it again would clobber it. + update_drop_flag = false; + + blk + } + }; + + succ = drop_block; + unwind_succ = unwind_ladder.as_ref().map(|p| p[i]); + + drop_block + }).collect() + } + + /// Create a full drop ladder, consisting of 2 connected half-drop-ladders + /// + /// For example, with 3 fields, the drop ladder is + /// + /// .d0: + /// ELAB(drop location.0 [target=.d1, unwind=.c1]) + /// .d1: + /// ELAB(drop location.1 [target=.d2, unwind=.c2]) + /// .d2: + /// ELAB(drop location.2 [target=`c.succ`, unwind=`c.unwind`]) + /// .c1: + /// ELAB(drop location.1 [target=.c2]) + /// .c2: + /// ELAB(drop location.2 [target=`c.unwind]) + fn drop_ladder<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + fields: Vec<(Lvalue<'tcx>, Option)>) + -> BasicBlock + { + debug!("drop_ladder({:?}, {:?})", c, fields); + + let mut fields = fields; + fields.retain(|&(ref lvalue, _)| { + let ty = lvalue.ty(self.mir, self.tcx).to_ty(self.tcx); + self.tcx.type_needs_drop_given_env(ty, self.param_env()) + }); + + debug!("drop_ladder - fields needing drop: {:?}", fields); + + let unwind_ladder = if c.is_cleanup { + None + } else { + Some(self.drop_halfladder(c, None, c.unwind.unwrap(), &fields, true)) + }; + + self.drop_halfladder(c, unwind_ladder, c.succ, &fields, c.is_cleanup) + .last().cloned().unwrap_or(c.succ) + } + + fn open_drop_for_tuple<'a>(&mut self, c: &DropCtxt<'a, 'tcx>, tys: &[Ty<'tcx>]) + -> BasicBlock + { + debug!("open_drop_for_tuple({:?}, {:?})", c, tys); + + let fields = tys.iter().enumerate().map(|(i, &ty)| { + (c.lvalue.clone().field(Field::new(i), ty), + super::move_path_children_matching( + self.move_data(), c.path, |proj| match proj { + &Projection { + elem: ProjectionElem::Field(f, _), .. + } => f.index() == i, + _ => false + } + )) + }).collect(); + + self.drop_ladder(c, fields) + } + + fn open_drop_for_box<'a>(&mut self, c: &DropCtxt<'a, 'tcx>, ty: Ty<'tcx>) + -> BasicBlock + { + debug!("open_drop_for_box({:?}, {:?})", c, ty); + + let interior_path = super::move_path_children_matching( + self.move_data(), c.path, |proj| match proj { + &Projection { elem: ProjectionElem::Deref, .. } => true, + _ => false + }).unwrap(); + + let interior = c.lvalue.clone().deref(); + let inner_c = DropCtxt { + lvalue: &interior, + unwind: c.unwind.map(|u| { + self.box_free_block(c, ty, u, true) + }), + succ: self.box_free_block(c, ty, c.succ, c.is_cleanup), + path: interior_path, + ..*c + }; + + self.elaborated_drop_block(&inner_c) + } + + fn open_drop_for_variant<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + drop_block: &mut Option, + adt: &'tcx ty::AdtDef, + substs: &'tcx Substs<'tcx>, + variant_index: usize) + -> BasicBlock + { + let subpath = super::move_path_children_matching( + self.move_data(), c.path, |proj| match proj { + &Projection { + elem: ProjectionElem::Downcast(_, idx), .. + } => idx == variant_index, + _ => false + }); + + if let Some(variant_path) = subpath { + let base_lv = c.lvalue.clone().elem( + ProjectionElem::Downcast(adt, variant_index) + ); + let fields = self.move_paths_for_fields( + &base_lv, + variant_path, + &adt.variants[variant_index], + substs); + self.drop_ladder(c, fields) + } else { + // variant not found - drop the entire enum + if let None = *drop_block { + *drop_block = Some(self.complete_drop(c, true)); + } + return drop_block.unwrap(); + } + } + + fn open_drop_for_adt<'a>(&mut self, c: &DropCtxt<'a, 'tcx>, + adt: &'tcx ty::AdtDef, substs: &'tcx Substs<'tcx>) + -> BasicBlock { + debug!("open_drop_for_adt({:?}, {:?}, {:?})", c, adt, substs); + + let mut drop_block = None; + + match adt.variants.len() { + 1 => { + let fields = self.move_paths_for_fields( + c.lvalue, + c.path, + &adt.variants[0], + substs + ); + self.drop_ladder(c, fields) + } + _ => { + let variant_drops : Vec = + (0..adt.variants.len()).map(|i| { + self.open_drop_for_variant(c, &mut drop_block, + adt, substs, i) + }).collect(); + + // If there are multiple variants, then if something + // is present within the enum the discriminant, tracked + // by the rest path, must be initialized. + // + // Additionally, we do not want to switch on the + // discriminant after it is free-ed, because that + // way lies only trouble. + + let switch_block = self.new_block( + c, c.is_cleanup, TerminatorKind::Switch { + discr: c.lvalue.clone(), + adt_def: adt, + targets: variant_drops + }); + + self.drop_flag_test_block(c, switch_block) + } + } + } + + /// The slow-path - create an "open", elaborated drop for a type + /// which is moved-out-of only partially, and patch `bb` to a jump + /// to it. This must not be called on ADTs with a destructor, + /// as these can't be moved-out-of, except for `Box`, which is + /// special-cased. + /// + /// This creates a "drop ladder" that drops the needed fields of the + /// ADT, both in the success case or if one of the destructors fail. + fn open_drop<'a>(&mut self, c: &DropCtxt<'a, 'tcx>) -> BasicBlock { + let ty = c.lvalue.ty(self.mir, self.tcx).to_ty(self.tcx); + match ty.sty { + ty::TyAdt(def, substs) => { + self.open_drop_for_adt(c, def, substs) + } + ty::TyClosure(def_id, substs) => { + let tys : Vec<_> = substs.upvar_tys(def_id, self.tcx).collect(); + self.open_drop_for_tuple(c, &tys) + } + ty::TyTuple(tys) => { + self.open_drop_for_tuple(c, tys) + } + ty::TyBox(ty) => { + self.open_drop_for_box(c, ty) + } + _ => bug!("open drop from non-ADT `{:?}`", ty) + } + } + + /// Return a basic block that drop an lvalue using the context + /// and path in `c`. If `update_drop_flag` is true, also + /// clear `c`. + /// + /// if FLAG(c.path) + /// if(update_drop_flag) FLAG(c.path) = false + /// drop(c.lv) + fn complete_drop<'a>( + &mut self, + c: &DropCtxt<'a, 'tcx>, + update_drop_flag: bool) + -> BasicBlock + { + debug!("complete_drop({:?},{:?})", c, update_drop_flag); + + let drop_block = self.drop_block(c); + if update_drop_flag { + self.set_drop_flag( + Location { block: drop_block, statement_index: 0 }, + c.path, + DropFlagState::Absent + ); + } + + self.drop_flag_test_block(c, drop_block) + } + + /// Create a simple conditional drop. + /// + /// if FLAG(c.lv) + /// FLAGS(c.lv) = false + /// drop(c.lv) + fn conditional_drop<'a>(&mut self, c: &DropCtxt<'a, 'tcx>) + -> BasicBlock + { + debug!("conditional_drop({:?})", c); + let drop_bb = self.drop_block(c); + self.drop_flags_for_drop(c, drop_bb); + + self.drop_flag_test_block(c, drop_bb) + } + + fn new_block<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + is_cleanup: bool, + k: TerminatorKind<'tcx>) + -> BasicBlock + { + self.patch.new_block(BasicBlockData { + statements: vec![], + terminator: Some(Terminator { + source_info: c.source_info, kind: k + }), + is_cleanup: is_cleanup + }) + } + + fn elaborated_drop_block<'a>(&mut self, c: &DropCtxt<'a, 'tcx>) -> BasicBlock { + debug!("elaborated_drop_block({:?})", c); + let blk = self.drop_block(c); + self.elaborate_drop(c, blk); + blk + } + + fn drop_flag_test_block<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + on_set: BasicBlock) + -> BasicBlock { + self.drop_flag_test_block_with_succ(c, c.is_cleanup, on_set, c.succ) + } + + fn drop_flag_test_block_with_succ<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + is_cleanup: bool, + on_set: BasicBlock, + on_unset: BasicBlock) + -> BasicBlock + { + let (maybe_live, maybe_dead) = c.init_data.state(c.path); + debug!("drop_flag_test_block({:?},{:?},{:?}) - {:?}", + c, is_cleanup, on_set, (maybe_live, maybe_dead)); + + match (maybe_live, maybe_dead) { + (false, _) => on_unset, + (true, false) => on_set, + (true, true) => { + let flag = self.drop_flag(c.path).unwrap(); + self.new_block(c, is_cleanup, TerminatorKind::If { + cond: Operand::Consume(flag), + targets: (on_set, on_unset) + }) + } + } + } + + fn drop_block<'a>(&mut self, c: &DropCtxt<'a, 'tcx>) -> BasicBlock { + self.new_block(c, c.is_cleanup, TerminatorKind::Drop { + location: c.lvalue.clone(), + target: c.succ, + unwind: c.unwind + }) + } + + fn box_free_block<'a>( + &mut self, + c: &DropCtxt<'a, 'tcx>, + ty: Ty<'tcx>, + target: BasicBlock, + is_cleanup: bool + ) -> BasicBlock { + let block = self.unelaborated_free_block(c, ty, target, is_cleanup); + self.drop_flag_test_block_with_succ(c, is_cleanup, block, target) + } + + fn unelaborated_free_block<'a>( + &mut self, + c: &DropCtxt<'a, 'tcx>, + ty: Ty<'tcx>, + target: BasicBlock, + is_cleanup: bool + ) -> BasicBlock { + let mut statements = vec![]; + if let Some(&flag) = self.drop_flags.get(&c.path) { + statements.push(Statement { + source_info: c.source_info, + kind: StatementKind::Assign( + Lvalue::Local(flag), + self.constant_bool(c.source_info.span, false) + ) + }); + } + + let tcx = self.tcx; + let unit_temp = Lvalue::Local(self.patch.new_temp(tcx.mk_nil())); + let free_func = tcx.require_lang_item(lang_items::BoxFreeFnLangItem); + let substs = tcx.mk_substs(iter::once(Kind::from(ty))); + let fty = tcx.item_type(free_func).subst(tcx, substs); + + self.patch.new_block(BasicBlockData { + statements: statements, + terminator: Some(Terminator { + source_info: c.source_info, kind: TerminatorKind::Call { + func: Operand::Constant(Constant { + span: c.source_info.span, + ty: fty, + literal: Literal::Item { + def_id: free_func, + substs: substs + } + }), + args: vec![Operand::Consume(c.lvalue.clone())], + destination: Some((unit_temp, target)), + cleanup: None + } + }), + is_cleanup: is_cleanup + }) + } + + fn must_complete_drop<'a>(&self, c: &DropCtxt<'a, 'tcx>) -> bool { + // if we have a destuctor, we must *not* split the drop. + + // dataflow can create unneeded children in some cases + // - be sure to ignore them. + + let ty = c.lvalue.ty(self.mir, self.tcx).to_ty(self.tcx); + + match ty.sty { + ty::TyAdt(def, _) => { + if def.has_dtor() { + self.tcx.sess.span_warn( + c.source_info.span, + &format!("dataflow bug??? moving out of type with dtor {:?}", + c)); + true + } else { + false + } + } + _ => false + } + } + + fn constant_bool(&self, span: Span, val: bool) -> Rvalue<'tcx> { + Rvalue::Use(Operand::Constant(Constant { + span: span, + ty: self.tcx.types.bool, + literal: Literal::Value { value: ConstVal::Bool(val) } + })) + } + + fn set_drop_flag(&mut self, loc: Location, path: MovePathIndex, val: DropFlagState) { + if let Some(&flag) = self.drop_flags.get(&path) { + let span = self.patch.source_info_for_location(self.mir, loc).span; + let val = self.constant_bool(span, val.value()); + self.patch.add_assign(loc, Lvalue::Local(flag), val); + } + } + + fn drop_flags_on_init(&mut self) { + let loc = Location { block: START_BLOCK, statement_index: 0 }; + let span = self.patch.source_info_for_location(self.mir, loc).span; + let false_ = self.constant_bool(span, false); + for flag in self.drop_flags.values() { + self.patch.add_assign(loc, Lvalue::Local(*flag), false_.clone()); + } + } + + fn drop_flags_for_fn_rets(&mut self) { + for (bb, data) in self.mir.basic_blocks().iter_enumerated() { + if let TerminatorKind::Call { + destination: Some((ref lv, tgt)), cleanup: Some(_), .. + } = data.terminator().kind { + assert!(!self.patch.is_patched(bb)); + + let loc = Location { block: tgt, statement_index: 0 }; + let path = self.move_data().rev_lookup.find(lv); + on_lookup_result_bits( + self.tcx, self.mir, self.move_data(), path, + |child| self.set_drop_flag(loc, child, DropFlagState::Present) + ); + } + } + } + + fn drop_flags_for_args(&mut self) { + let loc = Location { block: START_BLOCK, statement_index: 0 }; + super::drop_flag_effects_for_function_entry( + self.tcx, self.mir, self.env, |path, ds| { + self.set_drop_flag(loc, path, ds); + } + ) + } + + fn drop_flags_for_locs(&mut self) { + // We intentionally iterate only over the *old* basic blocks. + // + // Basic blocks created by drop elaboration update their + // drop flags by themselves, to avoid the drop flags being + // clobbered before they are read. + + for (bb, data) in self.mir.basic_blocks().iter_enumerated() { + debug!("drop_flags_for_locs({:?})", data); + for i in 0..(data.statements.len()+1) { + debug!("drop_flag_for_locs: stmt {}", i); + let mut allow_initializations = true; + if i == data.statements.len() { + match data.terminator().kind { + TerminatorKind::Drop { .. } => { + // drop elaboration should handle that by itself + continue + } + TerminatorKind::DropAndReplace { .. } => { + // this contains the move of the source and + // the initialization of the destination. We + // only want the former - the latter is handled + // by the elaboration code and must be done + // *after* the destination is dropped. + assert!(self.patch.is_patched(bb)); + allow_initializations = false; + } + _ => { + assert!(!self.patch.is_patched(bb)); + } + } + } + let loc = Location { block: bb, statement_index: i }; + super::drop_flag_effects_for_location( + self.tcx, self.mir, self.env, loc, |path, ds| { + if ds == DropFlagState::Absent || allow_initializations { + self.set_drop_flag(loc, path, ds) + } + } + ) + } + + // There may be a critical edge after this call, + // so mark the return as initialized *before* the + // call. + if let TerminatorKind::Call { + destination: Some((ref lv, _)), cleanup: None, .. + } = data.terminator().kind { + assert!(!self.patch.is_patched(bb)); + + let loc = Location { block: bb, statement_index: data.statements.len() }; + let path = self.move_data().rev_lookup.find(lv); + on_lookup_result_bits( + self.tcx, self.mir, self.move_data(), path, + |child| self.set_drop_flag(loc, child, DropFlagState::Present) + ); + } + } + } + + fn drop_flags_for_drop<'a>(&mut self, + c: &DropCtxt<'a, 'tcx>, + bb: BasicBlock) + { + let loc = self.patch.terminator_loc(self.mir, bb); + on_all_children_bits( + self.tcx, self.mir, self.move_data(), c.path, + |child| self.set_drop_flag(loc, child, DropFlagState::Absent) + ); + } +} diff --git a/src/librustc_borrowck/borrowck/mir/gather_moves.rs b/src/librustc_borrowck/borrowck/mir/gather_moves.rs new file mode 100644 index 0000000000000..02064b52cb1fb --- /dev/null +++ b/src/librustc_borrowck/borrowck/mir/gather_moves.rs @@ -0,0 +1,524 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +use rustc::ty::{self, TyCtxt, ParameterEnvironment}; +use rustc::mir::*; +use rustc::util::nodemap::FxHashMap; +use rustc_data_structures::indexed_vec::{IndexVec}; + +use syntax::codemap::DUMMY_SP; + +use std::collections::hash_map::Entry; +use std::fmt; +use std::mem; +use std::ops::{Index, IndexMut}; + +use super::abs_domain::{AbstractElem, Lift}; + +// This submodule holds some newtype'd Index wrappers that are using +// NonZero to ensure that Option occupies only a single word. +// They are in a submodule to impose privacy restrictions; namely, to +// ensure that other code does not accidentally access `index.0` +// (which is likely to yield a subtle off-by-one error). +mod indexes { + use std::fmt; + use core::nonzero::NonZero; + use rustc_data_structures::indexed_vec::Idx; + + macro_rules! new_index { + ($Index:ident, $debug_name:expr) => { + #[derive(Copy, Clone, PartialEq, Eq, Hash)] + pub struct $Index(NonZero); + + impl Idx for $Index { + fn new(idx: usize) -> Self { + unsafe { $Index(NonZero::new(idx + 1)) } + } + fn index(self) -> usize { + *self.0 - 1 + } + } + + impl fmt::Debug for $Index { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "{}{}", $debug_name, self.index()) + } + } + } + } + + /// Index into MovePathData.move_paths + new_index!(MovePathIndex, "mp"); + + /// Index into MoveData.moves. + new_index!(MoveOutIndex, "mo"); +} + +pub use self::indexes::MovePathIndex; +pub use self::indexes::MoveOutIndex; + +impl self::indexes::MoveOutIndex { + pub fn move_path_index(&self, move_data: &MoveData) -> MovePathIndex { + move_data.moves[*self].path + } +} + +/// `MovePath` is a canonicalized representation of a path that is +/// moved or assigned to. +/// +/// It follows a tree structure. +/// +/// Given `struct X { m: M, n: N }` and `x: X`, moves like `drop x.m;` +/// move *out* of the l-value `x.m`. +/// +/// The MovePaths representing `x.m` and `x.n` are siblings (that is, +/// one of them will link to the other via the `next_sibling` field, +/// and the other will have no entry in its `next_sibling` field), and +/// they both have the MovePath representing `x` as their parent. +#[derive(Clone)] +pub struct MovePath<'tcx> { + pub next_sibling: Option, + pub first_child: Option, + pub parent: Option, + pub lvalue: Lvalue<'tcx>, +} + +impl<'tcx> fmt::Debug for MovePath<'tcx> { + fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result { + write!(w, "MovePath {{")?; + if let Some(parent) = self.parent { + write!(w, " parent: {:?},", parent)?; + } + if let Some(first_child) = self.first_child { + write!(w, " first_child: {:?},", first_child)?; + } + if let Some(next_sibling) = self.next_sibling { + write!(w, " next_sibling: {:?}", next_sibling)?; + } + write!(w, " lvalue: {:?} }}", self.lvalue) + } +} + +#[derive(Debug)] +pub struct MoveData<'tcx> { + pub move_paths: IndexVec>, + pub moves: IndexVec, + /// Each Location `l` is mapped to the MoveOut's that are effects + /// of executing the code at `l`. (There can be multiple MoveOut's + /// for a given `l` because each MoveOut is associated with one + /// particular path being moved.) + pub loc_map: LocationMap>, + pub path_map: IndexVec>, + pub rev_lookup: MovePathLookup<'tcx>, +} + +#[derive(Debug)] +pub struct LocationMap { + /// Location-indexed (BasicBlock for outer index, index within BB + /// for inner index) map. + map: IndexVec>, +} + +impl Index for LocationMap { + type Output = T; + fn index(&self, index: Location) -> &Self::Output { + &self.map[index.block][index.statement_index] + } +} + +impl IndexMut for LocationMap { + fn index_mut(&mut self, index: Location) -> &mut Self::Output { + &mut self.map[index.block][index.statement_index] + } +} + +impl LocationMap where T: Default + Clone { + fn new(mir: &Mir) -> Self { + LocationMap { + map: mir.basic_blocks().iter().map(|block| { + vec![T::default(); block.statements.len()+1] + }).collect() + } + } +} + +/// `MoveOut` represents a point in a program that moves out of some +/// L-value; i.e., "creates" uninitialized memory. +/// +/// With respect to dataflow analysis: +/// - Generated by moves and declaration of uninitialized variables. +/// - Killed by assignments to the memory. +#[derive(Copy, Clone)] +pub struct MoveOut { + /// path being moved + pub path: MovePathIndex, + /// location of move + pub source: Location, +} + +impl fmt::Debug for MoveOut { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "{:?}@{:?}", self.path, self.source) + } +} + +/// Tables mapping from an l-value to its MovePathIndex. +#[derive(Debug)] +pub struct MovePathLookup<'tcx> { + locals: IndexVec, + + /// projections are made from a base-lvalue and a projection + /// elem. The base-lvalue will have a unique MovePathIndex; we use + /// the latter as the index into the outer vector (narrowing + /// subsequent search so that it is solely relative to that + /// base-lvalue). For the remaining lookup, we map the projection + /// elem to the associated MovePathIndex. + projections: FxHashMap<(MovePathIndex, AbstractElem<'tcx>), MovePathIndex> +} + +struct MoveDataBuilder<'a, 'tcx: 'a> { + mir: &'a Mir<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: &'a ParameterEnvironment<'tcx>, + data: MoveData<'tcx>, +} + +pub enum MovePathError { + IllegalMove, + UnionMove { path: MovePathIndex }, +} + +impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { + fn new(mir: &'a Mir<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: &'a ParameterEnvironment<'tcx>) + -> Self { + let mut move_paths = IndexVec::new(); + let mut path_map = IndexVec::new(); + + MoveDataBuilder { + mir: mir, + tcx: tcx, + param_env: param_env, + data: MoveData { + moves: IndexVec::new(), + loc_map: LocationMap::new(mir), + rev_lookup: MovePathLookup { + locals: mir.local_decls.indices().map(Lvalue::Local).map(|v| { + Self::new_move_path(&mut move_paths, &mut path_map, None, v) + }).collect(), + projections: FxHashMap(), + }, + move_paths: move_paths, + path_map: path_map, + } + } + } + + fn new_move_path(move_paths: &mut IndexVec>, + path_map: &mut IndexVec>, + parent: Option, + lvalue: Lvalue<'tcx>) + -> MovePathIndex + { + let move_path = move_paths.push(MovePath { + next_sibling: None, + first_child: None, + parent: parent, + lvalue: lvalue + }); + + if let Some(parent) = parent { + let next_sibling = + mem::replace(&mut move_paths[parent].first_child, Some(move_path)); + move_paths[move_path].next_sibling = next_sibling; + } + + let path_map_ent = path_map.push(vec![]); + assert_eq!(path_map_ent, move_path); + move_path + } + + /// This creates a MovePath for a given lvalue, returning an `MovePathError` + /// if that lvalue can't be moved from. + /// + /// NOTE: lvalues behind references *do not* get a move path, which is + /// problematic for borrowck. + /// + /// Maybe we should have seperate "borrowck" and "moveck" modes. + fn move_path_for(&mut self, lval: &Lvalue<'tcx>) + -> Result + { + debug!("lookup({:?})", lval); + match *lval { + Lvalue::Local(local) => Ok(self.data.rev_lookup.locals[local]), + // error: can't move out of a static + Lvalue::Static(..) => Err(MovePathError::IllegalMove), + Lvalue::Projection(ref proj) => { + self.move_path_for_projection(lval, proj) + } + } + } + + fn create_move_path(&mut self, lval: &Lvalue<'tcx>) { + // This is an assignment, not a move, so this not being a valid + // move path is OK. + let _ = self.move_path_for(lval); + } + + fn move_path_for_projection(&mut self, + lval: &Lvalue<'tcx>, + proj: &LvalueProjection<'tcx>) + -> Result + { + let base = try!(self.move_path_for(&proj.base)); + let lv_ty = proj.base.ty(self.mir, self.tcx).to_ty(self.tcx); + match lv_ty.sty { + // error: can't move out of borrowed content + ty::TyRef(..) | ty::TyRawPtr(..) => return Err(MovePathError::IllegalMove), + // error: can't move out of struct with destructor + ty::TyAdt(adt, _) if adt.has_dtor() => + return Err(MovePathError::IllegalMove), + // move out of union - always move the entire union + ty::TyAdt(adt, _) if adt.is_union() => + return Err(MovePathError::UnionMove { path: base }), + // error: can't move out of a slice + ty::TySlice(..) => + return Err(MovePathError::IllegalMove), + ty::TyArray(..) => match proj.elem { + // error: can't move out of an array + ProjectionElem::Index(..) => return Err(MovePathError::IllegalMove), + _ => { + // FIXME: still badly broken + } + }, + _ => {} + }; + match self.data.rev_lookup.projections.entry((base, proj.elem.lift())) { + Entry::Occupied(ent) => Ok(*ent.get()), + Entry::Vacant(ent) => { + let path = Self::new_move_path( + &mut self.data.move_paths, + &mut self.data.path_map, + Some(base), + lval.clone() + ); + ent.insert(path); + Ok(path) + } + } + } + + fn finalize(self) -> MoveData<'tcx> { + debug!("{}", { + debug!("moves for {:?}:", self.mir.span); + for (j, mo) in self.data.moves.iter_enumerated() { + debug!(" {:?} = {:?}", j, mo); + } + debug!("move paths for {:?}:", self.mir.span); + for (j, path) in self.data.move_paths.iter_enumerated() { + debug!(" {:?} = {:?}", j, path); + } + "done dumping moves" + }); + self.data + } +} + +#[derive(Copy, Clone, Debug)] +pub enum LookupResult { + Exact(MovePathIndex), + Parent(Option) +} + +impl<'tcx> MovePathLookup<'tcx> { + // Unlike the builder `fn move_path_for` below, this lookup + // alternative will *not* create a MovePath on the fly for an + // unknown l-value, but will rather return the nearest available + // parent. + pub fn find(&self, lval: &Lvalue<'tcx>) -> LookupResult { + match *lval { + Lvalue::Local(local) => LookupResult::Exact(self.locals[local]), + Lvalue::Static(..) => LookupResult::Parent(None), + Lvalue::Projection(ref proj) => { + match self.find(&proj.base) { + LookupResult::Exact(base_path) => { + match self.projections.get(&(base_path, proj.elem.lift())) { + Some(&subpath) => LookupResult::Exact(subpath), + None => LookupResult::Parent(Some(base_path)) + } + } + inexact => inexact + } + } + } + } +} + +impl<'a, 'tcx> MoveData<'tcx> { + pub fn gather_moves(mir: &Mir<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: &ParameterEnvironment<'tcx>) + -> Self { + gather_moves(mir, tcx, param_env) + } +} + +fn gather_moves<'a, 'tcx>(mir: &Mir<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: &ParameterEnvironment<'tcx>) + -> MoveData<'tcx> { + let mut builder = MoveDataBuilder::new(mir, tcx, param_env); + + for (bb, block) in mir.basic_blocks().iter_enumerated() { + for (i, stmt) in block.statements.iter().enumerate() { + let source = Location { block: bb, statement_index: i }; + builder.gather_statement(source, stmt); + } + + let terminator_loc = Location { + block: bb, + statement_index: block.statements.len() + }; + builder.gather_terminator(terminator_loc, block.terminator()); + } + + builder.finalize() +} + +impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> { + fn gather_statement(&mut self, loc: Location, stmt: &Statement<'tcx>) { + debug!("gather_statement({:?}, {:?})", loc, stmt); + match stmt.kind { + StatementKind::Assign(ref lval, ref rval) => { + self.create_move_path(lval); + self.gather_rvalue(loc, rval); + } + StatementKind::StorageLive(_) | + StatementKind::StorageDead(_) => {} + StatementKind::SetDiscriminant{ .. } => { + span_bug!(stmt.source_info.span, + "SetDiscriminant should not exist during borrowck"); + } + StatementKind::Nop => {} + } + } + + fn gather_rvalue(&mut self, loc: Location, rvalue: &Rvalue<'tcx>) { + match *rvalue { + Rvalue::Use(ref operand) | + Rvalue::Repeat(ref operand, _) | + Rvalue::Cast(_, ref operand, _) | + Rvalue::UnaryOp(_, ref operand) => { + self.gather_operand(loc, operand) + } + Rvalue::BinaryOp(ref _binop, ref lhs, ref rhs) | + Rvalue::CheckedBinaryOp(ref _binop, ref lhs, ref rhs) => { + self.gather_operand(loc, lhs); + self.gather_operand(loc, rhs); + } + Rvalue::Aggregate(ref _kind, ref operands) => { + for operand in operands { + self.gather_operand(loc, operand); + } + } + Rvalue::Ref(..) | + Rvalue::Len(..) | + Rvalue::InlineAsm { .. } => {} + Rvalue::Box(..) => { + // This returns an rvalue with uninitialized contents. We can't + // move out of it here because it is an rvalue - assignments always + // completely initialize their lvalue. + // + // However, this does not matter - MIR building is careful to + // only emit a shallow free for the partially-initialized + // temporary. + // + // In any case, if we want to fix this, we have to register a + // special move and change the `statement_effect` functions. + } + } + } + + fn gather_terminator(&mut self, loc: Location, term: &Terminator<'tcx>) { + debug!("gather_terminator({:?}, {:?})", loc, term); + match term.kind { + TerminatorKind::Goto { target: _ } | + TerminatorKind::Resume | + TerminatorKind::Unreachable => { } + + TerminatorKind::Return => { + self.gather_move(loc, &Lvalue::Local(RETURN_POINTER)); + } + + TerminatorKind::If { .. } | + TerminatorKind::Assert { .. } | + TerminatorKind::SwitchInt { .. } | + TerminatorKind::Switch { .. } => { + // branching terminators - these don't move anything + } + + TerminatorKind::Drop { ref location, target: _, unwind: _ } => { + self.gather_move(loc, location); + } + TerminatorKind::DropAndReplace { ref location, ref value, .. } => { + self.create_move_path(location); + self.gather_operand(loc, value); + } + TerminatorKind::Call { ref func, ref args, ref destination, cleanup: _ } => { + self.gather_operand(loc, func); + for arg in args { + self.gather_operand(loc, arg); + } + if let Some((ref destination, _bb)) = *destination { + self.create_move_path(destination); + } + } + } + } + + fn gather_operand(&mut self, loc: Location, operand: &Operand<'tcx>) { + match *operand { + Operand::Constant(..) => {} // not-a-move + Operand::Consume(ref lval) => { // a move + self.gather_move(loc, lval); + } + } + } + + fn gather_move(&mut self, loc: Location, lval: &Lvalue<'tcx>) { + debug!("gather_move({:?}, {:?})", loc, lval); + + let lv_ty = lval.ty(self.mir, self.tcx).to_ty(self.tcx); + if !lv_ty.moves_by_default(self.tcx, self.param_env, DUMMY_SP) { + debug!("gather_move({:?}, {:?}) - {:?} is Copy. skipping", loc, lval, lv_ty); + return + } + + let path = match self.move_path_for(lval) { + Ok(path) | Err(MovePathError::UnionMove { path }) => path, + Err(MovePathError::IllegalMove) => { + // Moving out of a bad path. Eventually, this should be a MIR + // borrowck error instead of a bug. + span_bug!(self.mir.span, + "Broken MIR: moving out of lvalue {:?}: {:?} at {:?}", + lval, lv_ty, loc); + } + }; + let move_out = self.data.moves.push(MoveOut { path: path, source: loc }); + + debug!("gather_move({:?}, {:?}): adding move {:?} of {:?}", + loc, lval, move_out, path); + + self.data.path_map[path].push(move_out); + self.data.loc_map[loc].push(move_out); + } +} diff --git a/src/librustc_borrowck/borrowck/mir/mod.rs b/src/librustc_borrowck/borrowck/mir/mod.rs new file mode 100644 index 0000000000000..9035c2ab3c236 --- /dev/null +++ b/src/librustc_borrowck/borrowck/mir/mod.rs @@ -0,0 +1,407 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use borrowck::BorrowckCtxt; + +use syntax::ast::{self, MetaItem}; +use syntax_pos::{Span, DUMMY_SP}; + +use rustc::hir; +use rustc::hir::intravisit::{FnKind}; + +use rustc::mir::{self, BasicBlock, BasicBlockData, Mir, Statement, Terminator, Location}; +use rustc::session::Session; +use rustc::ty::{self, TyCtxt}; + +mod abs_domain; +pub mod elaborate_drops; +mod dataflow; +mod gather_moves; +mod patch; +// mod graphviz; + +use self::dataflow::{BitDenotation}; +use self::dataflow::{DataflowOperator}; +use self::dataflow::{Dataflow, DataflowAnalysis, DataflowResults}; +use self::dataflow::{MaybeInitializedLvals, MaybeUninitializedLvals}; +use self::dataflow::{DefinitelyInitializedLvals}; +use self::gather_moves::{MoveData, MovePathIndex, LookupResult}; + +fn has_rustc_mir_with(attrs: &[ast::Attribute], name: &str) -> Option { + for attr in attrs { + if attr.check_name("rustc_mir") { + let items = attr.meta_item_list(); + for item in items.iter().flat_map(|l| l.iter()) { + match item.meta_item() { + Some(mi) if mi.check_name(name) => return Some(mi.clone()), + _ => continue + } + } + } + } + return None; +} + +pub struct MoveDataParamEnv<'tcx> { + move_data: MoveData<'tcx>, + param_env: ty::ParameterEnvironment<'tcx>, +} + +pub fn borrowck_mir(bcx: &mut BorrowckCtxt, + fk: FnKind, + _decl: &hir::FnDecl, + body: &hir::Expr, + _sp: Span, + id: ast::NodeId, + attributes: &[ast::Attribute]) { + match fk { + FnKind::ItemFn(name, ..) | + FnKind::Method(name, ..) => { + debug!("borrowck_mir({}) UNIMPLEMENTED", name); + } + FnKind::Closure(_) => { + debug!("borrowck_mir closure (body.id={}) UNIMPLEMENTED", body.id); + } + } + + let tcx = bcx.tcx; + let param_env = ty::ParameterEnvironment::for_item(tcx, id); + + let mir = &tcx.item_mir(tcx.map.local_def_id(id)); + + let move_data = MoveData::gather_moves(mir, tcx, ¶m_env); + let mdpe = MoveDataParamEnv { move_data: move_data, param_env: param_env }; + let flow_inits = + do_dataflow(tcx, mir, id, attributes, &mdpe, MaybeInitializedLvals::new(tcx, mir)); + let flow_uninits = + do_dataflow(tcx, mir, id, attributes, &mdpe, MaybeUninitializedLvals::new(tcx, mir)); + let flow_def_inits = + do_dataflow(tcx, mir, id, attributes, &mdpe, DefinitelyInitializedLvals::new(tcx, mir)); + + if has_rustc_mir_with(attributes, "rustc_peek_maybe_init").is_some() { + dataflow::sanity_check_via_rustc_peek(bcx.tcx, mir, id, attributes, &mdpe, &flow_inits); + } + if has_rustc_mir_with(attributes, "rustc_peek_maybe_uninit").is_some() { + dataflow::sanity_check_via_rustc_peek(bcx.tcx, mir, id, attributes, &mdpe, &flow_uninits); + } + if has_rustc_mir_with(attributes, "rustc_peek_definite_init").is_some() { + dataflow::sanity_check_via_rustc_peek(bcx.tcx, mir, id, attributes, &mdpe, &flow_def_inits); + } + + if has_rustc_mir_with(attributes, "stop_after_dataflow").is_some() { + bcx.tcx.sess.fatal("stop_after_dataflow ended compilation"); + } + + let mut mbcx = MirBorrowckCtxt { + bcx: bcx, + mir: mir, + node_id: id, + move_data: mdpe.move_data, + flow_inits: flow_inits, + flow_uninits: flow_uninits, + }; + + for bb in mir.basic_blocks().indices() { + mbcx.process_basic_block(bb); + } + + debug!("borrowck_mir done"); +} + +fn do_dataflow<'a, 'tcx, BD>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &Mir<'tcx>, + node_id: ast::NodeId, + attributes: &[ast::Attribute], + ctxt: &BD::Ctxt, + bd: BD) -> DataflowResults + where BD: BitDenotation> + DataflowOperator +{ + let name_found = |sess: &Session, attrs: &[ast::Attribute], name| -> Option { + if let Some(item) = has_rustc_mir_with(attrs, name) { + if let Some(s) = item.value_str() { + return Some(s.to_string()) + } else { + sess.span_err( + item.span, + &format!("{} attribute requires a path", item.name())); + return None; + } + } + return None; + }; + + let print_preflow_to = + name_found(tcx.sess, attributes, "borrowck_graphviz_preflow"); + let print_postflow_to = + name_found(tcx.sess, attributes, "borrowck_graphviz_postflow"); + + let mut mbcx = MirBorrowckCtxtPreDataflow { + node_id: node_id, + print_preflow_to: print_preflow_to, + print_postflow_to: print_postflow_to, + flow_state: DataflowAnalysis::new(tcx, mir, ctxt, bd), + }; + + mbcx.dataflow(|ctxt, i| &ctxt.move_data.move_paths[i]); + mbcx.flow_state.results() +} + + +pub struct MirBorrowckCtxtPreDataflow<'a, 'tcx: 'a, BD> + where BD: BitDenotation, BD::Ctxt: 'a +{ + node_id: ast::NodeId, + flow_state: DataflowAnalysis<'a, 'tcx, BD>, + print_preflow_to: Option, + print_postflow_to: Option, +} + +#[allow(dead_code)] +pub struct MirBorrowckCtxt<'b, 'a: 'b, 'tcx: 'a> { + bcx: &'b mut BorrowckCtxt<'a, 'tcx>, + mir: &'b Mir<'tcx>, + node_id: ast::NodeId, + move_data: MoveData<'tcx>, + flow_inits: DataflowResults>, + flow_uninits: DataflowResults> +} + +impl<'b, 'a: 'b, 'tcx: 'a> MirBorrowckCtxt<'b, 'a, 'tcx> { + fn process_basic_block(&mut self, bb: BasicBlock) { + let BasicBlockData { ref statements, ref terminator, is_cleanup: _ } = + self.mir[bb]; + for stmt in statements { + self.process_statement(bb, stmt); + } + + self.process_terminator(bb, terminator); + } + + fn process_statement(&mut self, bb: BasicBlock, stmt: &Statement<'tcx>) { + debug!("MirBorrowckCtxt::process_statement({:?}, {:?}", bb, stmt); + } + + fn process_terminator(&mut self, bb: BasicBlock, term: &Option>) { + debug!("MirBorrowckCtxt::process_terminator({:?}, {:?})", bb, term); + } +} + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +enum DropFlagState { + Present, // i.e. initialized + Absent, // i.e. deinitialized or "moved" +} + +impl DropFlagState { + fn value(self) -> bool { + match self { + DropFlagState::Present => true, + DropFlagState::Absent => false + } + } +} + +fn move_path_children_matching<'tcx, F>(move_data: &MoveData<'tcx>, + path: MovePathIndex, + mut cond: F) + -> Option + where F: FnMut(&mir::LvalueProjection<'tcx>) -> bool +{ + let mut next_child = move_data.move_paths[path].first_child; + while let Some(child_index) = next_child { + match move_data.move_paths[child_index].lvalue { + mir::Lvalue::Projection(ref proj) => { + if cond(proj) { + return Some(child_index) + } + } + _ => {} + } + next_child = move_data.move_paths[child_index].next_sibling; + } + + None +} + +/// When enumerating the child fragments of a path, don't recurse into +/// paths (1.) past arrays, slices, and pointers, nor (2.) into a type +/// that implements `Drop`. +/// +/// Lvalues behind references or arrays are not tracked by elaboration +/// and are always assumed to be initialized when accessible. As +/// references and indexes can be reseated, trying to track them can +/// only lead to trouble. +/// +/// Lvalues behind ADT's with a Drop impl are not tracked by +/// elaboration since they can never have a drop-flag state that +/// differs from that of the parent with the Drop impl. +/// +/// In both cases, the contents can only be accessed if and only if +/// their parents are initialized. This implies for example that there +/// is no need to maintain separate drop flags to track such state. +/// +/// FIXME: we have to do something for moving slice patterns. +fn lvalue_contents_drop_state_cannot_differ<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &Mir<'tcx>, + lv: &mir::Lvalue<'tcx>) -> bool { + let ty = lv.ty(mir, tcx).to_ty(tcx); + match ty.sty { + ty::TyArray(..) | ty::TySlice(..) | ty::TyRef(..) | ty::TyRawPtr(..) => { + debug!("lvalue_contents_drop_state_cannot_differ lv: {:?} ty: {:?} refd => true", + lv, ty); + true + } + ty::TyAdt(def, _) if def.has_dtor() || def.is_union() => { + debug!("lvalue_contents_drop_state_cannot_differ lv: {:?} ty: {:?} Drop => true", + lv, ty); + true + } + _ => { + false + } + } +} + +fn on_lookup_result_bits<'a, 'tcx, F>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &Mir<'tcx>, + move_data: &MoveData<'tcx>, + lookup_result: LookupResult, + each_child: F) + where F: FnMut(MovePathIndex) +{ + match lookup_result { + LookupResult::Parent(..) => { + // access to untracked value - do not touch children + } + LookupResult::Exact(e) => { + on_all_children_bits(tcx, mir, move_data, e, each_child) + } + } +} + +fn on_all_children_bits<'a, 'tcx, F>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &Mir<'tcx>, + move_data: &MoveData<'tcx>, + move_path_index: MovePathIndex, + mut each_child: F) + where F: FnMut(MovePathIndex) +{ + fn is_terminal_path<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &Mir<'tcx>, + move_data: &MoveData<'tcx>, + path: MovePathIndex) -> bool + { + lvalue_contents_drop_state_cannot_differ( + tcx, mir, &move_data.move_paths[path].lvalue) + } + + fn on_all_children_bits<'a, 'tcx, F>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &Mir<'tcx>, + move_data: &MoveData<'tcx>, + move_path_index: MovePathIndex, + each_child: &mut F) + where F: FnMut(MovePathIndex) + { + each_child(move_path_index); + + if is_terminal_path(tcx, mir, move_data, move_path_index) { + return + } + + let mut next_child_index = move_data.move_paths[move_path_index].first_child; + while let Some(child_index) = next_child_index { + on_all_children_bits(tcx, mir, move_data, child_index, each_child); + next_child_index = move_data.move_paths[child_index].next_sibling; + } + } + on_all_children_bits(tcx, mir, move_data, move_path_index, &mut each_child); +} + +fn drop_flag_effects_for_function_entry<'a, 'tcx, F>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &Mir<'tcx>, + ctxt: &MoveDataParamEnv<'tcx>, + mut callback: F) + where F: FnMut(MovePathIndex, DropFlagState) +{ + let move_data = &ctxt.move_data; + for arg in mir.args_iter() { + let lvalue = mir::Lvalue::Local(arg); + let lookup_result = move_data.rev_lookup.find(&lvalue); + on_lookup_result_bits(tcx, mir, move_data, + lookup_result, + |moi| callback(moi, DropFlagState::Present)); + } +} + +fn drop_flag_effects_for_location<'a, 'tcx, F>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &Mir<'tcx>, + ctxt: &MoveDataParamEnv<'tcx>, + loc: Location, + mut callback: F) + where F: FnMut(MovePathIndex, DropFlagState) +{ + let move_data = &ctxt.move_data; + let param_env = &ctxt.param_env; + debug!("drop_flag_effects_for_location({:?})", loc); + + // first, move out of the RHS + for mi in &move_data.loc_map[loc] { + let path = mi.move_path_index(move_data); + debug!("moving out of path {:?}", move_data.move_paths[path]); + + // don't move out of non-Copy things + let lvalue = &move_data.move_paths[path].lvalue; + let ty = lvalue.ty(mir, tcx).to_ty(tcx); + if !ty.moves_by_default(tcx, param_env, DUMMY_SP) { + continue; + } + + on_all_children_bits(tcx, mir, move_data, + path, + |moi| callback(moi, DropFlagState::Absent)) + } + + let block = &mir[loc.block]; + match block.statements.get(loc.statement_index) { + Some(stmt) => match stmt.kind { + mir::StatementKind::SetDiscriminant{ .. } => { + span_bug!(stmt.source_info.span, "SetDiscrimant should not exist during borrowck"); + } + mir::StatementKind::Assign(ref lvalue, _) => { + debug!("drop_flag_effects: assignment {:?}", stmt); + on_lookup_result_bits(tcx, mir, move_data, + move_data.rev_lookup.find(lvalue), + |moi| callback(moi, DropFlagState::Present)) + } + mir::StatementKind::StorageLive(_) | + mir::StatementKind::StorageDead(_) | + mir::StatementKind::Nop => {} + }, + None => { + debug!("drop_flag_effects: replace {:?}", block.terminator()); + match block.terminator().kind { + mir::TerminatorKind::DropAndReplace { ref location, .. } => { + on_lookup_result_bits(tcx, mir, move_data, + move_data.rev_lookup.find(location), + |moi| callback(moi, DropFlagState::Present)) + } + _ => { + // other terminators do not contain move-ins + } + } + } + } +} diff --git a/src/librustc_borrowck/borrowck/mir/patch.rs b/src/librustc_borrowck/borrowck/mir/patch.rs new file mode 100644 index 0000000000000..19f240da73059 --- /dev/null +++ b/src/librustc_borrowck/borrowck/mir/patch.rs @@ -0,0 +1,178 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::ty::Ty; +use rustc::mir::*; +use rustc_data_structures::indexed_vec::{IndexVec, Idx}; + +/// This struct represents a patch to MIR, which can add +/// new statements and basic blocks and patch over block +/// terminators. +pub struct MirPatch<'tcx> { + patch_map: IndexVec>>, + new_blocks: Vec>, + new_statements: Vec<(Location, StatementKind<'tcx>)>, + new_locals: Vec>, + resume_block: BasicBlock, + next_local: usize, +} + +impl<'tcx> MirPatch<'tcx> { + pub fn new(mir: &Mir<'tcx>) -> Self { + let mut result = MirPatch { + patch_map: IndexVec::from_elem(None, mir.basic_blocks()), + new_blocks: vec![], + new_statements: vec![], + new_locals: vec![], + next_local: mir.local_decls.len(), + resume_block: START_BLOCK + }; + + // make sure the MIR we create has a resume block. It is + // completely legal to convert jumps to the resume block + // to jumps to None, but we occasionally have to add + // instructions just before that. + + let mut resume_block = None; + let mut resume_stmt_block = None; + for (bb, block) in mir.basic_blocks().iter_enumerated() { + if let TerminatorKind::Resume = block.terminator().kind { + if block.statements.len() > 0 { + resume_stmt_block = Some(bb); + } else { + resume_block = Some(bb); + } + break + } + } + let resume_block = resume_block.unwrap_or_else(|| { + result.new_block(BasicBlockData { + statements: vec![], + terminator: Some(Terminator { + source_info: SourceInfo { + span: mir.span, + scope: ARGUMENT_VISIBILITY_SCOPE + }, + kind: TerminatorKind::Resume + }), + is_cleanup: true + })}); + result.resume_block = resume_block; + if let Some(resume_stmt_block) = resume_stmt_block { + result.patch_terminator(resume_stmt_block, TerminatorKind::Goto { + target: resume_block + }); + } + result + } + + pub fn resume_block(&self) -> BasicBlock { + self.resume_block + } + + pub fn is_patched(&self, bb: BasicBlock) -> bool { + self.patch_map[bb].is_some() + } + + pub fn terminator_loc(&self, mir: &Mir<'tcx>, bb: BasicBlock) -> Location { + let offset = match bb.index().checked_sub(mir.basic_blocks().len()) { + Some(index) => self.new_blocks[index].statements.len(), + None => mir[bb].statements.len() + }; + Location { + block: bb, + statement_index: offset + } + } + + pub fn new_temp(&mut self, ty: Ty<'tcx>) -> Local { + let index = self.next_local; + self.next_local += 1; + self.new_locals.push(LocalDecl::new_temp(ty)); + Local::new(index as usize) + } + + pub fn new_block(&mut self, data: BasicBlockData<'tcx>) -> BasicBlock { + let block = BasicBlock::new(self.patch_map.len()); + debug!("MirPatch: new_block: {:?}: {:?}", block, data); + self.new_blocks.push(data); + self.patch_map.push(None); + block + } + + pub fn patch_terminator(&mut self, block: BasicBlock, new: TerminatorKind<'tcx>) { + assert!(self.patch_map[block].is_none()); + debug!("MirPatch: patch_terminator({:?}, {:?})", block, new); + self.patch_map[block] = Some(new); + } + + pub fn add_statement(&mut self, loc: Location, stmt: StatementKind<'tcx>) { + debug!("MirPatch: add_statement({:?}, {:?})", loc, stmt); + self.new_statements.push((loc, stmt)); + } + + pub fn add_assign(&mut self, loc: Location, lv: Lvalue<'tcx>, rv: Rvalue<'tcx>) { + self.add_statement(loc, StatementKind::Assign(lv, rv)); + } + + pub fn apply(self, mir: &mut Mir<'tcx>) { + debug!("MirPatch: {:?} new temps, starting from index {}: {:?}", + self.new_locals.len(), mir.local_decls.len(), self.new_locals); + debug!("MirPatch: {} new blocks, starting from index {}", + self.new_blocks.len(), mir.basic_blocks().len()); + mir.basic_blocks_mut().extend(self.new_blocks); + mir.local_decls.extend(self.new_locals); + for (src, patch) in self.patch_map.into_iter_enumerated() { + if let Some(patch) = patch { + debug!("MirPatch: patching block {:?}", src); + mir[src].terminator_mut().kind = patch; + } + } + + let mut new_statements = self.new_statements; + new_statements.sort_by(|u,v| u.0.cmp(&v.0)); + + let mut delta = 0; + let mut last_bb = START_BLOCK; + for (mut loc, stmt) in new_statements { + if loc.block != last_bb { + delta = 0; + last_bb = loc.block; + } + debug!("MirPatch: adding statement {:?} at loc {:?}+{}", + stmt, loc, delta); + loc.statement_index += delta; + let source_info = Self::source_info_for_index( + &mir[loc.block], loc + ); + mir[loc.block].statements.insert( + loc.statement_index, Statement { + source_info: source_info, + kind: stmt + }); + delta += 1; + } + } + + pub fn source_info_for_index(data: &BasicBlockData, loc: Location) -> SourceInfo { + match data.statements.get(loc.statement_index) { + Some(stmt) => stmt.source_info, + None => data.terminator().source_info + } + } + + pub fn source_info_for_location(&self, mir: &Mir, loc: Location) -> SourceInfo { + let data = match loc.block.index().checked_sub(mir.basic_blocks().len()) { + Some(new) => &self.new_blocks[new], + None => &mir[loc.block] + }; + Self::source_info_for_index(data, loc) + } +} diff --git a/src/librustc_borrowck/borrowck/mod.rs b/src/librustc_borrowck/borrowck/mod.rs index 631149e69d77e..ecf5c3ef176e5 100644 --- a/src/librustc_borrowck/borrowck/mod.rs +++ b/src/librustc_borrowck/borrowck/mod.rs @@ -18,36 +18,36 @@ pub use self::bckerr_code::*; pub use self::AliasableViolationKind::*; pub use self::MovedValueUseKind::*; +pub use self::mir::elaborate_drops::ElaborateDrops; + use self::InteriorKind::*; use rustc::dep_graph::DepNode; -use rustc::front::map as hir_map; -use rustc::front::map::blocks::FnParts; -use rustc::middle::cfg; +use rustc::hir::map as hir_map; +use rustc::hir::map::blocks::{FnParts, FnLikeNode}; +use rustc::cfg; use rustc::middle::dataflow::DataFlowContext; use rustc::middle::dataflow::BitwiseOperator; use rustc::middle::dataflow::DataFlowOperator; use rustc::middle::dataflow::KillFrom; -use rustc::middle::def_id::DefId; +use rustc::hir::def_id::DefId; use rustc::middle::expr_use_visitor as euv; use rustc::middle::free_region::FreeRegionMap; use rustc::middle::mem_categorization as mc; use rustc::middle::mem_categorization::Categorization; use rustc::middle::region; -use rustc::middle::ty::{self, Ty}; +use rustc::ty::{self, TyCtxt}; use std::fmt; use std::mem; use std::rc::Rc; +use std::hash::{Hash, Hasher}; use syntax::ast; -use syntax::codemap::Span; -use syntax::errors::DiagnosticBuilder; +use syntax_pos::{MultiSpan, Span}; +use errors::DiagnosticBuilder; -use rustc_front::hir; -use rustc_front::hir::{FnDecl, Block}; -use rustc_front::intravisit; -use rustc_front::intravisit::{Visitor, FnKind}; -use rustc_front::util as hir_util; +use rustc::hir; +use rustc::hir::intravisit::{self, Visitor, FnKind, NestedVisitorMap}; pub mod check_loans; @@ -55,50 +55,54 @@ pub mod gather_loans; pub mod move_data; +mod mir; + #[derive(Clone, Copy)] pub struct LoanDataFlowOperator; pub type LoanDataFlow<'a, 'tcx> = DataFlowContext<'a, 'tcx, LoanDataFlowOperator>; -impl<'a, 'tcx, 'v> Visitor<'v> for BorrowckCtxt<'a, 'tcx> { - fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v FnDecl, - b: &'v Block, s: Span, id: ast::NodeId) { +impl<'a, 'tcx> Visitor<'tcx> for BorrowckCtxt<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.tcx.map) + } + + fn visit_fn(&mut self, fk: FnKind<'tcx>, fd: &'tcx hir::FnDecl, + b: hir::ExprId, s: Span, id: ast::NodeId) { match fk { FnKind::ItemFn(..) | FnKind::Method(..) => { - let new_free_region_map = self.tcx.free_region_map(id); - let old_free_region_map = - mem::replace(&mut self.free_region_map, new_free_region_map); - borrowck_fn(self, fk, fd, b, s, id); - self.free_region_map = old_free_region_map; + self.with_temp_region_map(id, |this| { + borrowck_fn(this, fk, fd, b, s, id, fk.attrs()) + }); } - FnKind::Closure => { - borrowck_fn(self, fk, fd, b, s, id); + FnKind::Closure(..) => { + borrowck_fn(self, fk, fd, b, s, id, fk.attrs()); } } } - fn visit_item(&mut self, item: &hir::Item) { + fn visit_item(&mut self, item: &'tcx hir::Item) { borrowck_item(self, item); } - fn visit_trait_item(&mut self, ti: &hir::TraitItem) { + fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem) { if let hir::ConstTraitItem(_, Some(ref expr)) = ti.node { - gather_loans::gather_loans_in_static_initializer(self, &*expr); + gather_loans::gather_loans_in_static_initializer(self, ti.id, &expr); } intravisit::walk_trait_item(self, ti); } - fn visit_impl_item(&mut self, ii: &hir::ImplItem) { + fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem) { if let hir::ImplItemKind::Const(_, ref expr) = ii.node { - gather_loans::gather_loans_in_static_initializer(self, &*expr); + gather_loans::gather_loans_in_static_initializer(self, ii.id, &expr); } intravisit::walk_impl_item(self, ii); } } -pub fn check_crate(tcx: &ty::ctxt) { +pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { let mut bccx = BorrowckCtxt { tcx: tcx, free_region_map: FreeRegionMap::new(), @@ -110,7 +114,7 @@ pub fn check_crate(tcx: &ty::ctxt) { } }; - tcx.visit_all_items_in_krate(DepNode::BorrowCheck, &mut bccx); + tcx.visit_all_item_likes_in_krate(DepNode::BorrowCheck, &mut bccx.as_deep_visitor()); if tcx.sess.borrowck_stats() { println!("--- borrowck stats ---"); @@ -131,15 +135,15 @@ pub fn check_crate(tcx: &ty::ctxt) { } } -fn borrowck_item(this: &mut BorrowckCtxt, item: &hir::Item) { +fn borrowck_item<'a, 'tcx>(this: &mut BorrowckCtxt<'a, 'tcx>, item: &'tcx hir::Item) { // Gather loans for items. Note that we don't need // to check loans for single expressions. The check // loan step is intended for things that have a data // flow dependent conditions. match item.node { - hir::ItemStatic(_, _, ref ex) | + hir::ItemStatic(.., ref ex) | hir::ItemConst(_, ref ex) => { - gather_loans::gather_loans_in_static_initializer(this, &**ex); + gather_loans::gather_loans_in_static_initializer(this, item.id, &ex); } _ => { } } @@ -154,13 +158,23 @@ pub struct AnalysisData<'a, 'tcx: 'a> { pub move_data: move_data::FlowedMoveData<'a, 'tcx>, } -fn borrowck_fn(this: &mut BorrowckCtxt, - fk: FnKind, - decl: &hir::FnDecl, - body: &hir::Block, - sp: Span, - id: ast::NodeId) { +fn borrowck_fn<'a, 'tcx>(this: &mut BorrowckCtxt<'a, 'tcx>, + fk: FnKind<'tcx>, + decl: &'tcx hir::FnDecl, + body_id: hir::ExprId, + sp: Span, + id: ast::NodeId, + attributes: &[ast::Attribute]) { debug!("borrowck_fn(id={})", id); + + let body = this.tcx.map.expr(body_id); + + if attributes.iter().any(|item| item.check_name("rustc_mir_borrowck")) { + this.with_temp_region_map(id, |this| { + mir::borrowck_mir(this, fk, decl, body, sp, id, attributes) + }); + } + let cfg = cfg::CFG::new(this.tcx, body); let AnalysisData { all_loans, loans: loan_dfcx, @@ -183,21 +197,21 @@ fn borrowck_fn(this: &mut BorrowckCtxt, decl, body); - intravisit::walk_fn(this, fk, decl, body, sp); + intravisit::walk_fn(this, fk, decl, body_id, sp, id); } fn build_borrowck_dataflow_data<'a, 'tcx>(this: &mut BorrowckCtxt<'a, 'tcx>, - fk: FnKind, - decl: &hir::FnDecl, + fk: FnKind<'tcx>, + decl: &'tcx hir::FnDecl, cfg: &cfg::CFG, - body: &hir::Block, + body: &'tcx hir::Expr, sp: Span, id: ast::NodeId) -> AnalysisData<'a, 'tcx> { // Check the body of fn items. let tcx = this.tcx; - let id_range = hir_util::compute_id_range_for_fn_body(fk, decl, body, sp, id); + let id_range = intravisit::compute_id_range_for_fn_body(fk, decl, body, sp, id, &tcx.map); let (all_loans, move_data) = gather_loans::gather_loans_in_fn(this, id, decl, body); @@ -232,8 +246,8 @@ fn build_borrowck_dataflow_data<'a, 'tcx>(this: &mut BorrowckCtxt<'a, 'tcx>, /// Accessor for introspective clients inspecting `AnalysisData` and /// the `BorrowckCtxt` itself , e.g. the flowgraph visualizer. pub fn build_borrowck_dataflow_data_for_fn<'a, 'tcx>( - tcx: &'a ty::ctxt<'tcx>, - fn_parts: FnParts<'a>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + fn_parts: FnParts<'tcx>, cfg: &cfg::CFG) -> (BorrowckCtxt<'a, 'tcx>, AnalysisData<'a, 'tcx>) { @@ -249,11 +263,13 @@ pub fn build_borrowck_dataflow_data_for_fn<'a, 'tcx>( } }; + let body = tcx.map.expr(fn_parts.body); + let dataflow_data = build_borrowck_dataflow_data(&mut bccx, fn_parts.kind, - &*fn_parts.decl, + &fn_parts.decl, cfg, - &*fn_parts.body, + body, fn_parts.span, fn_parts.id); @@ -264,7 +280,7 @@ pub fn build_borrowck_dataflow_data_for_fn<'a, 'tcx>( // Type definitions pub struct BorrowckCtxt<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, // Hacky. As we visit various fns, we have to load up the // free-region map for each one. This map is computed by during @@ -282,6 +298,7 @@ pub struct BorrowckCtxt<'a, 'tcx: 'a> { stats: BorrowStats } +#[derive(Clone)] struct BorrowStats { loaned_paths_same: usize, loaned_paths_imm: usize, @@ -289,8 +306,6 @@ struct BorrowStats { guaranteed_paths: usize } -pub type BckResult<'tcx, T> = Result>; - /////////////////////////////////////////////////////////////////////////// // Loans and loan paths @@ -323,7 +338,7 @@ impl<'tcx> Loan<'tcx> { } } -#[derive(Eq, Hash)] +#[derive(Eq)] pub struct LoanPath<'tcx> { kind: LoanPathKind<'tcx>, ty: ty::Ty<'tcx>, @@ -331,10 +346,13 @@ pub struct LoanPath<'tcx> { impl<'tcx> PartialEq for LoanPath<'tcx> { fn eq(&self, that: &LoanPath<'tcx>) -> bool { - let r = self.kind == that.kind; - debug_assert!(self.ty == that.ty || !r, - "Somehow loan paths are equal though their tys are not."); - r + self.kind == that.kind + } +} + +impl<'tcx> Hash for LoanPath<'tcx> { + fn hash(&self, state: &mut H) { + self.kind.hash(state); } } @@ -343,7 +361,7 @@ pub enum LoanPathKind<'tcx> { LpVar(ast::NodeId), // `x` in README.md LpUpvar(ty::UpvarId), // `x` captured by-value into closure LpDowncast(Rc>, DefId), // `x` downcast to particular enum variant - LpExtend(Rc>, mc::MutabilityCategory, LoanPathElem) + LpExtend(Rc>, mc::MutabilityCategory, LoanPathElem<'tcx>) } impl<'tcx> LoanPath<'tcx> { @@ -388,28 +406,28 @@ impl ToInteriorKind for mc::InteriorKind { // `enum E { X { foo: u32 }, Y { foo: u32 }}` // each `foo` is qualified by the definitition id of the variant (`X` or `Y`). #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub enum LoanPathElem { - LpDeref(mc::PointerKind), +pub enum LoanPathElem<'tcx> { + LpDeref(mc::PointerKind<'tcx>), LpInterior(Option, InteriorKind), } pub fn closure_to_block(closure_id: ast::NodeId, - tcx: &ty::ctxt) -> ast::NodeId { + tcx: TyCtxt) -> ast::NodeId { match tcx.map.get(closure_id) { hir_map::NodeExpr(expr) => match expr.node { - hir::ExprClosure(_, _, ref block) => { - block.id + hir::ExprClosure(.., body_id, _) => { + body_id.node_id() } _ => { - panic!("encountered non-closure id: {}", closure_id) + bug!("encountered non-closure id: {}", closure_id) } }, - _ => panic!("encountered non-expr id: {}", closure_id) + _ => bug!("encountered non-expr id: {}", closure_id) } } -impl<'tcx> LoanPath<'tcx> { - pub fn kill_scope(&self, tcx: &ty::ctxt<'tcx>) -> region::CodeExtent { +impl<'a, 'tcx> LoanPath<'tcx> { + pub fn kill_scope(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> region::CodeExtent { match self.kind { LpVar(local_id) => tcx.region_maps.var_scope(local_id), LpUpvar(upvar_id) => { @@ -417,7 +435,7 @@ impl<'tcx> LoanPath<'tcx> { tcx.region_maps.node_extent(block_id) } LpDowncast(ref base, _) | - LpExtend(ref base, _, _) => base.kill_scope(tcx), + LpExtend(ref base, ..) => base.kill_scope(tcx), } } @@ -426,12 +444,12 @@ impl<'tcx> LoanPath<'tcx> { (&LpExtend(ref base, _, LpInterior(opt_variant_id, id)), &LpExtend(ref base2, _, LpInterior(opt_variant_id2, id2))) => if id == id2 && opt_variant_id == opt_variant_id2 { - base.has_fork(&**base2) + base.has_fork(&base2) } else { true }, (&LpExtend(ref base, _, LpDeref(_)), _) => base.has_fork(other), - (_, &LpExtend(ref base, _, LpDeref(_))) => self.has_fork(&**base), + (_, &LpExtend(ref base, _, LpDeref(_))) => self.has_fork(&base), _ => false, } } @@ -439,7 +457,7 @@ impl<'tcx> LoanPath<'tcx> { fn depth(&self) -> usize { match self.kind { LpExtend(ref base, _, LpDeref(_)) => base.depth(), - LpExtend(ref base, _, LpInterior(_, _)) => base.depth() + 1, + LpExtend(ref base, _, LpInterior(..)) => base.depth() + 1, _ => 0, } } @@ -449,11 +467,9 @@ impl<'tcx> LoanPath<'tcx> { (&LpExtend(ref base, a, LpInterior(opt_variant_id, id)), &LpExtend(ref base2, _, LpInterior(opt_variant_id2, id2))) => { if id == id2 && opt_variant_id == opt_variant_id2 { - base.common(&**base2).map(|x| { + base.common(&base2).map(|x| { let xd = x.depth(); if base.depth() == xd && base2.depth() == xd { - assert_eq!(base.ty, base2.ty); - assert_eq!(self.ty, other.ty); LoanPath { kind: LpExtend(Rc::new(x), a, LpInterior(opt_variant_id, id)), ty: self.ty, @@ -463,14 +479,13 @@ impl<'tcx> LoanPath<'tcx> { } }) } else { - base.common(&**base2) + base.common(&base2) } } (&LpExtend(ref base, _, LpDeref(_)), _) => base.common(other), - (_, &LpExtend(ref other, _, LpDeref(_))) => self.common(&**other), + (_, &LpExtend(ref other, _, LpDeref(_))) => self.common(&other), (&LpVar(id), &LpVar(id2)) => { if id == id2 { - assert_eq!(self.ty, other.ty); Some(LoanPath { kind: LpVar(id), ty: self.ty }) } else { None @@ -478,7 +493,6 @@ impl<'tcx> LoanPath<'tcx> { } (&LpUpvar(id), &LpUpvar(id2)) => { if id == id2 { - assert_eq!(self.ty, other.ty); Some(LoanPath { kind: LpUpvar(id), ty: self.ty }) } else { None @@ -542,10 +556,11 @@ pub fn opt_loan_path<'tcx>(cmt: &mc::cmt<'tcx>) -> Option>> { // Errors that can occur #[derive(PartialEq)] -pub enum bckerr_code { +pub enum bckerr_code<'tcx> { err_mutbl, - err_out_of_scope(ty::Region, ty::Region), // superscope, subscope - err_borrowed_pointer_too_short(ty::Region, ty::Region), // loan, ptr + /// superscope, subscope, loan cause + err_out_of_scope(&'tcx ty::Region, &'tcx ty::Region, euv::LoanCause), + err_borrowed_pointer_too_short(&'tcx ty::Region, &'tcx ty::Region), // loan, ptr } // Combination of an error code and the categorization of the expression @@ -555,7 +570,7 @@ pub struct BckError<'tcx> { span: Span, cause: AliasableViolationKind, cmt: mc::cmt<'tcx>, - code: bckerr_code + code: bckerr_code<'tcx> } #[derive(Copy, Clone, Debug, PartialEq)] @@ -574,7 +589,16 @@ pub enum MovedValueUseKind { // Misc impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { - pub fn is_subregion_of(&self, r_sub: ty::Region, r_sup: ty::Region) + fn with_temp_region_map(&mut self, id: ast::NodeId, f: F) + where F: for <'b> FnOnce(&'b mut BorrowckCtxt<'a, 'tcx>) + { + let new_free_region_map = self.tcx.free_region_map(id); + let old_free_region_map = mem::replace(&mut self.free_region_map, new_free_region_map); + f(self); + self.free_region_map = old_free_region_map; + } + + pub fn is_subregion_of(&self, r_sub: &'tcx ty::Region, r_sup: &'tcx ty::Region) -> bool { self.free_region_map.is_subregion_of(self.tcx, r_sub, r_sup) @@ -583,9 +607,9 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { pub fn report(&self, err: BckError<'tcx>) { // Catch and handle some particular cases. match (&err.code, &err.cause) { - (&err_out_of_scope(ty::ReScope(_), ty::ReStatic), + (&err_out_of_scope(&ty::ReScope(_), &ty::ReStatic, _), &BorrowViolation(euv::ClosureCapture(span))) | - (&err_out_of_scope(ty::ReScope(_), ty::ReFree(..)), + (&err_out_of_scope(&ty::ReScope(_), &ty::ReFree(..), _), &BorrowViolation(euv::ClosureCapture(span))) => { return self.report_out_of_scope_escaping_closure_capture(&err, span); } @@ -593,36 +617,39 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } // General fallback. + let span = err.span.clone(); let mut db = self.struct_span_err( err.span, &self.bckerr_to_string(&err)); - self.note_and_explain_bckerr(&mut db, err); + self.note_and_explain_bckerr(&mut db, err, span); db.emit(); } - pub fn report_use_of_moved_value<'b>(&self, - use_span: Span, - use_kind: MovedValueUseKind, - lp: &LoanPath<'tcx>, - the_move: &move_data::Move, - moved_lp: &LoanPath<'tcx>, - param_env: &ty::ParameterEnvironment<'b,'tcx>) { - let verb = match use_kind { - MovedInUse => "use", - MovedInCapture => "capture", + pub fn report_use_of_moved_value(&self, + use_span: Span, + use_kind: MovedValueUseKind, + lp: &LoanPath<'tcx>, + the_move: &move_data::Move, + moved_lp: &LoanPath<'tcx>, + _param_env: &ty::ParameterEnvironment<'tcx>) { + let (verb, verb_participle) = match use_kind { + MovedInUse => ("use", "used"), + MovedInCapture => ("capture", "captured"), }; - let (ol, moved_lp_msg, mut err) = match the_move.kind { + let (_ol, _moved_lp_msg, mut err) = match the_move.kind { move_data::Declared => { - let err = struct_span_err!( + // If this is an uninitialized variable, just emit a simple warning + // and return. + struct_span_err!( self.tcx.sess, use_span, E0381, "{} of possibly uninitialized variable: `{}`", verb, - self.loan_path_to_string(lp)); - - (self.loan_path_to_string(moved_lp), - String::new(), - err) + self.loan_path_to_string(lp)) + .span_label(use_span, &format!("use of possibly uninitialized `{}`", + self.loan_path_to_string(lp))) + .emit(); + return; } _ => { // If moved_lp is something like `x.a`, and lp is something like `x.b`, we would @@ -661,124 +688,54 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { self.tcx.sess, use_span, E0382, "{} of {}moved value: `{}`", verb, msg, nl); - (ol, moved_lp_msg, err) + (ol, moved_lp_msg, err)} + }; + + // Get type of value and span where it was previously + // moved. + let (move_span, move_note) = match the_move.kind { + move_data::Declared => { + unreachable!(); } + + move_data::MoveExpr | + move_data::MovePat => + (self.tcx.map.span(the_move.id), ""), + + move_data::Captured => + (match self.tcx.map.expect_expr(the_move.id).node { + hir::ExprClosure(.., fn_decl_span) => fn_decl_span, + ref r => bug!("Captured({}) maps to non-closure: {:?}", + the_move.id, r), + }, " (into closure)"), }; - match the_move.kind { - move_data::Declared => {} + // Annotate the use and the move in the span. Watch out for + // the case where the use and the move are the same. This + // means the use is in a loop. + err = if use_span == move_span { + err.span_label( + use_span, + &format!("value moved{} here in previous iteration of loop", + move_note)); + err + } else { + err.span_label(use_span, &format!("value {} here after move", verb_participle)) + .span_label(move_span, &format!("value moved{} here", move_note)); + err + }; - move_data::MoveExpr => { - let (expr_ty, expr_span) = match self.tcx - .map - .find(the_move.id) { - Some(hir_map::NodeExpr(expr)) => { - (self.tcx.expr_ty_adjusted(&*expr), expr.span) - } - r => { - self.tcx.sess.bug(&format!("MoveExpr({}) maps to \ - {:?}, not Expr", - the_move.id, - r)) - } - }; - let (suggestion, _) = - move_suggestion(param_env, expr_span, expr_ty, ("moved by default", "")); - // If the two spans are the same, it's because the expression will be evaluated - // multiple times. Avoid printing the same span and adjust the wording so it makes - // more sense that it's from multiple evalutations. - if expr_span == use_span { - err.note( - &format!("`{}` was previously moved here{} because it has type `{}`, \ - which is {}", - ol, - moved_lp_msg, - expr_ty, - suggestion)); - } else { - err.span_note( - expr_span, - &format!("`{}` moved here{} because it has type `{}`, which is {}", - ol, - moved_lp_msg, - expr_ty, - suggestion)); - } - } + err.note(&format!("move occurs because `{}` has type `{}`, \ + which does not implement the `Copy` trait", + self.loan_path_to_string(moved_lp), + moved_lp.ty)); - move_data::MovePat => { - let pat_ty = self.tcx.node_id_to_type(the_move.id); - let span = self.tcx.map.span(the_move.id); - err.span_note(span, - &format!("`{}` moved here{} because it has type `{}`, \ - which is moved by default", - ol, - moved_lp_msg, - pat_ty)); - match self.tcx.sess.codemap().span_to_snippet(span) { - Ok(string) => { - err.span_suggestion( - span, - &format!("if you would like to borrow the value instead, \ - use a `ref` binding as shown:"), - format!("ref {}", string)); - }, - Err(_) => { - err.fileline_help(span, - "use `ref` to override"); - }, - } - } + // Note: we used to suggest adding a `ref binding` or calling + // `clone` but those suggestions have been removed because + // they are often not what you actually want to do, and were + // not considered particularly helpful. - move_data::Captured => { - let (expr_ty, expr_span) = match self.tcx - .map - .find(the_move.id) { - Some(hir_map::NodeExpr(expr)) => { - (self.tcx.expr_ty_adjusted(&*expr), expr.span) - } - r => { - self.tcx.sess.bug(&format!("Captured({}) maps to \ - {:?}, not Expr", - the_move.id, - r)) - } - }; - let (suggestion, help) = - move_suggestion(param_env, - expr_span, - expr_ty, - ("moved by default", - "make a copy and capture that instead to override")); - err.span_note( - expr_span, - &format!("`{}` moved into closure environment here{} because it \ - has type `{}`, which is {}", - ol, - moved_lp_msg, - moved_lp.ty, - suggestion)); - err.fileline_help(expr_span, help); - } - } err.emit(); - - fn move_suggestion<'a,'tcx>(param_env: &ty::ParameterEnvironment<'a,'tcx>, - span: Span, - ty: Ty<'tcx>, - default_msgs: (&'static str, &'static str)) - -> (&'static str, &'static str) { - match ty.sty { - _ => { - if ty.moves_by_default(param_env, span) { - ("non-copyable", - "perhaps you meant to use `clone()`?") - } else { - default_msgs - } - } - } - } } pub fn report_partial_reinitialization_of_uninitialized_structure( @@ -796,56 +753,57 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { lp: &LoanPath<'tcx>, assign: &move_data::Assignment) { - struct_span_err!( + let mut err = struct_span_err!( self.tcx.sess, span, E0384, "re-assignment of immutable variable `{}`", - self.loan_path_to_string(lp)) - .span_note(assign.span, "prior assignment occurs here") - .emit(); + self.loan_path_to_string(lp)); + err.span_label(span, &format!("re-assignment of immutable variable")); + if span != assign.span { + err.span_label(assign.span, &format!("first assignment to `{}`", + self.loan_path_to_string(lp))); + } + err.emit(); } pub fn span_err(&self, s: Span, m: &str) { self.tcx.sess.span_err(s, m); } - pub fn struct_span_err(&self, s: Span, m: &str) -> DiagnosticBuilder<'a> { + pub fn struct_span_err>(&self, s: S, m: &str) + -> DiagnosticBuilder<'a> { self.tcx.sess.struct_span_err(s, m) } - pub fn struct_span_err_with_code(&self, - s: Span, - msg: &str, - code: &str) - -> DiagnosticBuilder<'a> { + pub fn struct_span_err_with_code>(&self, + s: S, + msg: &str, + code: &str) + -> DiagnosticBuilder<'a> { self.tcx.sess.struct_span_err_with_code(s, msg, code) } - pub fn span_err_with_code(&self, s: Span, msg: &str, code: &str) { + pub fn span_err_with_code>(&self, s: S, msg: &str, code: &str) { self.tcx.sess.span_err_with_code(s, msg, code); } - pub fn span_bug(&self, s: Span, m: &str) { - self.tcx.sess.span_bug(s, m); - } - pub fn bckerr_to_string(&self, err: &BckError<'tcx>) -> String { match err.code { err_mutbl => { let descr = match err.cmt.note { mc::NoteClosureEnv(_) | mc::NoteUpvarRef(_) => { - self.cmt_to_string(&*err.cmt) + self.cmt_to_string(&err.cmt) } _ => match opt_loan_path(&err.cmt) { None => { format!("{} {}", err.cmt.mutbl.to_user_str(), - self.cmt_to_string(&*err.cmt)) + self.cmt_to_string(&err.cmt)) } Some(lp) => { format!("{} {} `{}`", err.cmt.mutbl.to_user_str(), - self.cmt_to_string(&*err.cmt), - self.loan_path_to_string(&*lp)) + self.cmt_to_string(&err.cmt), + self.loan_path_to_string(&lp)) } } }; @@ -867,7 +825,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { format!("cannot borrow {} as mutable", descr) } BorrowViolation(euv::ClosureInvocation) => { - self.tcx.sess.span_bug(err.span, + span_bug!(err.span, "err_mutbl with a closure invocation"); } } @@ -876,7 +834,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { let msg = match opt_loan_path(&err.cmt) { None => "borrowed value".to_string(), Some(lp) => { - format!("`{}`", self.loan_path_to_string(&*lp)) + format!("`{}`", self.loan_path_to_string(&lp)) } }; format!("{} does not live long enough", msg) @@ -949,20 +907,23 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } mc::AliasableStatic | mc::AliasableStaticMut => { - struct_span_err!( + let mut err = struct_span_err!( self.tcx.sess, span, E0388, - "{} in a static location", prefix) + "{} in a static location", prefix); + err.span_label(span, &format!("cannot write data in a static definition")); + err } mc::AliasableBorrowed => { - struct_span_err!( + let mut e = struct_span_err!( self.tcx.sess, span, E0389, - "{} in a `&` reference", prefix) + "{} in a `&` reference", prefix); + e.span_label(span, &"assignment into an immutable reference"); + e } }; if is_closure { - err.fileline_help(span, - "closures behind references must be called via `&mut`"); + err.help("closures behind references must be called via `&mut`"); } err.emit(); } @@ -984,76 +945,145 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { but it borrows {}, \ which is owned by the current function", cmt_path_or_string) - .span_note(capture_span, + .span_label(capture_span, &format!("{} is borrowed here", cmt_path_or_string)) + .span_label(err.span, + &format!("may outlive borrowed value {}", + cmt_path_or_string)) .span_suggestion(err.span, &format!("to force the closure to take ownership of {} \ (and any other referenced variables), \ use the `move` keyword, as shown:", - cmt_path_or_string), + cmt_path_or_string), suggestion) .emit(); } - pub fn note_and_explain_bckerr(&self, db: &mut DiagnosticBuilder, err: BckError<'tcx>) { - let code = err.code; - match code { - err_mutbl => { - match err.cmt.note { - mc::NoteClosureEnv(upvar_id) | mc::NoteUpvarRef(upvar_id) => { - // If this is an `Fn` closure, it simply can't mutate upvars. - // If it's an `FnMut` closure, the original variable was declared immutable. - // We need to determine which is the case here. - let kind = match err.cmt.upvar().unwrap().cat { - Categorization::Upvar(mc::Upvar { kind, .. }) => kind, - _ => unreachable!() - }; - if kind == ty::FnClosureKind { - db.span_help( - self.tcx.map.span(upvar_id.closure_expr_id), - "consider changing this closure to take \ - self by mutable reference"); + fn region_end_span(&self, region: &'tcx ty::Region) -> Option { + match *region { + ty::ReScope(scope) => { + match scope.span(&self.tcx.region_maps, &self.tcx.map) { + Some(s) => { + Some(s.end_point()) + } + None => { + None + } + } + } + _ => None + } + } + + pub fn note_and_explain_bckerr(&self, db: &mut DiagnosticBuilder, err: BckError<'tcx>, + error_span: Span) { + match err.code { + err_mutbl => self.note_and_explain_mutbl_error(db, &err, &error_span), + err_out_of_scope(super_scope, sub_scope, cause) => { + let (value_kind, value_msg) = match err.cmt.cat { + mc::Categorization::Rvalue(_) => + ("temporary value", "temporary value created here"), + _ => + ("borrowed value", "borrow occurs here") + }; + + let is_closure = match cause { + euv::ClosureCapture(s) => { + // The primary span starts out as the closure creation point. + // Change the primary span here to highlight the use of the variable + // in the closure, because it seems more natural. Highlight + // closure creation point as a secondary span. + match db.span.primary_span() { + Some(primary) => { + db.span = MultiSpan::from_span(s); + db.span_label(primary, &format!("capture occurs here")); + db.span_label(s, &"does not live long enough"); + true + } + None => false } } _ => { - if let Categorization::Local(local_id) = err.cmt.cat { - let span = self.tcx.map.span(local_id); - if let Ok(snippet) = self.tcx.sess.codemap().span_to_snippet(span) { - db.span_suggestion( - span, - &format!("to make the {} mutable, use `mut` as shown:", - self.cmt_to_string(&err.cmt)), - format!("mut {}", snippet)); + db.span_label(error_span, &"does not live long enough"); + false + } + }; + + let sub_span = self.region_end_span(sub_scope); + let super_span = self.region_end_span(super_scope); + + match (sub_span, super_span) { + (Some(s1), Some(s2)) if s1 == s2 => { + if !is_closure { + db.span = MultiSpan::from_span(s1); + db.span_label(error_span, &value_msg); + let msg = match opt_loan_path(&err.cmt) { + None => value_kind.to_string(), + Some(lp) => { + format!("`{}`", self.loan_path_to_string(&lp)) + } + }; + db.span_label(s1, + &format!("{} dropped here while still borrowed", msg)); + } else { + db.span_label(s1, &format!("{} dropped before borrower", value_kind)); + } + db.note("values in a scope are dropped in the opposite order \ + they are created"); + } + (Some(s1), Some(s2)) if !is_closure => { + db.span = MultiSpan::from_span(s2); + db.span_label(error_span, &value_msg); + let msg = match opt_loan_path(&err.cmt) { + None => value_kind.to_string(), + Some(lp) => { + format!("`{}`", self.loan_path_to_string(&lp)) + } + }; + db.span_label(s2, &format!("{} dropped here while still borrowed", msg)); + db.span_label(s1, &format!("{} needs to live until here", value_kind)); + } + _ => { + match sub_span { + Some(s) => { + db.span_label(s, &format!("{} needs to live until here", + value_kind)); + } + None => { + self.tcx.note_and_explain_region( + db, + "borrowed value must be valid for ", + sub_scope, + "..."); + } + } + match super_span { + Some(s) => { + db.span_label(s, &format!("{} only lives until here", value_kind)); + } + None => { + self.tcx.note_and_explain_region( + db, + "...but borrowed value is only valid for ", + super_scope, + ""); } } } } - } - err_out_of_scope(super_scope, sub_scope) => { - self.tcx.note_and_explain_region( - db, - "reference must be valid for ", - sub_scope, - "..."); - self.tcx.note_and_explain_region( - db, - "...but borrowed value is only valid for ", - super_scope, - ""); - if let Some(span) = statement_scope_span(self.tcx, super_scope) { - db.span_help(span, - "consider using a `let` binding to increase its lifetime"); + if let Some(_) = statement_scope_span(self.tcx, super_scope) { + db.note("consider using a `let` binding to increase its lifetime"); } } err_borrowed_pointer_too_short(loan_scope, ptr_scope) => { let descr = match opt_loan_path(&err.cmt) { Some(lp) => { - format!("`{}`", self.loan_path_to_string(&*lp)) + format!("`{}`", self.loan_path_to_string(&lp)) } - None => self.cmt_to_string(&*err.cmt), + None => self.cmt_to_string(&err.cmt), }; self.tcx.note_and_explain_region( db, @@ -1070,6 +1100,86 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } } + fn note_and_explain_mutbl_error(&self, db: &mut DiagnosticBuilder, err: &BckError<'tcx>, + error_span: &Span) { + match err.cmt.note { + mc::NoteClosureEnv(upvar_id) | mc::NoteUpvarRef(upvar_id) => { + // If this is an `Fn` closure, it simply can't mutate upvars. + // If it's an `FnMut` closure, the original variable was declared immutable. + // We need to determine which is the case here. + let kind = match err.cmt.upvar().unwrap().cat { + Categorization::Upvar(mc::Upvar { kind, .. }) => kind, + _ => bug!() + }; + if kind == ty::ClosureKind::Fn { + db.span_help(self.tcx.map.span(upvar_id.closure_expr_id), + "consider changing this closure to take \ + self by mutable reference"); + } + } + _ => { + if let Categorization::Deref(ref inner_cmt, ..) = err.cmt.cat { + if let Categorization::Local(local_id) = inner_cmt.cat { + let parent = self.tcx.map.get_parent_node(local_id); + let opt_fn_decl = FnLikeNode::from_node(self.tcx.map.get(parent)) + .map(|fn_like| fn_like.decl()); + + if let Some(fn_decl) = opt_fn_decl { + if let Some(ref arg) = fn_decl.inputs.iter() + .find(|ref arg| arg.pat.id == local_id) { + if let hir::TyRptr( + opt_lifetime, + hir::MutTy{mutbl: hir::Mutability::MutImmutable, ref ty}) = + arg.ty.node { + if let Some(lifetime) = opt_lifetime { + if let Ok(snippet) = self.tcx.sess.codemap() + .span_to_snippet(ty.span) { + if let Ok(lifetime_snippet) = self.tcx.sess.codemap() + .span_to_snippet(lifetime.span) { + db.span_label(arg.ty.span, + &format!("use `&{} mut {}` \ + here to make mutable", + lifetime_snippet, + snippet)); + } + } + } + else if let Ok(snippet) = self.tcx.sess.codemap() + .span_to_snippet(arg.ty.span) { + if snippet.starts_with("&") { + db.span_label(arg.ty.span, + &format!("use `{}` here to make mutable", + snippet.replace("&", "&mut "))); + } + } + } + } + } + } + } else if let Categorization::Local(local_id) = err.cmt.cat { + let span = self.tcx.map.span(local_id); + if let Ok(snippet) = self.tcx.sess.codemap().span_to_snippet(span) { + if snippet.starts_with("ref mut ") || snippet.starts_with("&mut ") { + db.span_label(*error_span, &format!("cannot reborrow mutably")); + db.span_label(*error_span, &format!("try removing `&mut` here")); + } else { + if snippet.starts_with("ref ") { + db.span_label(span, &format!("use `{}` here to make mutable", + snippet.replace("ref ", "ref mut "))); + } else if snippet != "self" { + db.span_label(span, + &format!("use `mut {}` here to make mutable", + snippet)); + } + db.span_label(*error_span, &format!("cannot borrow mutably")); + } + } else { + db.span_label(*error_span, &format!("cannot borrow mutably")); + } + } + } + } + } pub fn append_loan_path_to_string(&self, loan_path: &LoanPath<'tcx>, out: &mut String) { @@ -1081,15 +1191,14 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { LpDowncast(ref lp_base, variant_def_id) => { out.push('('); - self.append_loan_path_to_string(&**lp_base, out); + self.append_loan_path_to_string(&lp_base, out); out.push_str(DOWNCAST_PRINTED_OPERATOR); out.push_str(&self.tcx.item_path_str(variant_def_id)); out.push(')'); } - LpExtend(ref lp_base, _, LpInterior(_, InteriorField(fname))) => { - self.append_autoderefd_loan_path_to_string(&**lp_base, out); + self.append_autoderefd_loan_path_to_string(&lp_base, out); match fname { mc::NamedField(fname) => { out.push('.'); @@ -1103,13 +1212,13 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } LpExtend(ref lp_base, _, LpInterior(_, InteriorElement(..))) => { - self.append_autoderefd_loan_path_to_string(&**lp_base, out); + self.append_autoderefd_loan_path_to_string(&lp_base, out); out.push_str("[..]"); } LpExtend(ref lp_base, _, LpDeref(_)) => { out.push('*'); - self.append_loan_path_to_string(&**lp_base, out); + self.append_loan_path_to_string(&lp_base, out); } } } @@ -1122,18 +1231,18 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { // For a path like `(*x).f` or `(*x)[3]`, autoderef // rules would normally allow users to omit the `*x`. // So just serialize such paths to `x.f` or x[3]` respectively. - self.append_autoderefd_loan_path_to_string(&**lp_base, out) + self.append_autoderefd_loan_path_to_string(&lp_base, out) } LpDowncast(ref lp_base, variant_def_id) => { out.push('('); - self.append_autoderefd_loan_path_to_string(&**lp_base, out); + self.append_autoderefd_loan_path_to_string(&lp_base, out); out.push(':'); out.push_str(&self.tcx.item_path_str(variant_def_id)); out.push(')'); } - LpVar(..) | LpUpvar(..) | LpExtend(_, _, LpInterior(..)) => { + LpVar(..) | LpUpvar(..) | LpExtend(.., LpInterior(..)) => { self.append_loan_path_to_string(loan_path, out) } } @@ -1157,8 +1266,8 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } } -fn statement_scope_span(tcx: &ty::ctxt, region: ty::Region) -> Option { - match region { +fn statement_scope_span(tcx: TyCtxt, region: &ty::Region) -> Option { + match *region { ty::ReScope(scope) => { match tcx.map.find(scope.node_id(&tcx.region_maps)) { Some(hir_map::NodeStmt(stmt)) => Some(stmt.span), diff --git a/src/librustc_borrowck/borrowck/move_data.rs b/src/librustc_borrowck/borrowck/move_data.rs index 735e618cc732b..32bda5e11620a 100644 --- a/src/librustc_borrowck/borrowck/move_data.rs +++ b/src/librustc_borrowck/borrowck/move_data.rs @@ -14,23 +14,24 @@ pub use self::MoveKind::*; use borrowck::*; -use rustc::middle::cfg; +use rustc::cfg; use rustc::middle::dataflow::DataFlowContext; use rustc::middle::dataflow::BitwiseOperator; use rustc::middle::dataflow::DataFlowOperator; use rustc::middle::dataflow::KillFrom; use rustc::middle::expr_use_visitor as euv; use rustc::middle::expr_use_visitor::MutateMode; -use rustc::middle::ty; -use rustc::util::nodemap::{FnvHashMap, NodeSet}; +use rustc::middle::mem_categorization as mc; +use rustc::ty::{self, TyCtxt}; +use rustc::util::nodemap::{FxHashMap, NodeSet}; use std::cell::RefCell; use std::rc::Rc; use std::usize; use syntax::ast; -use syntax::ast_util; -use syntax::codemap::Span; -use rustc_front::hir; +use syntax_pos::Span; +use rustc::hir; +use rustc::hir::intravisit::IdRange; #[path="fragments.rs"] pub mod fragments; @@ -40,7 +41,7 @@ pub struct MoveData<'tcx> { pub paths: RefCell>>, /// Cache of loan path to move path index, for easy lookup. - pub path_map: RefCell>, MovePathIndex>>, + pub path_map: RefCell>, MovePathIndex>>, /// Each move or uninitialized variable gets an entry here. pub moves: RefCell>, @@ -196,7 +197,7 @@ fn loan_path_is_precise(loan_path: &LoanPath) -> bool { LpVar(_) | LpUpvar(_) => { true } - LpExtend(_, _, LpInterior(_, InteriorKind::InteriorElement(..))) => { + LpExtend(.., LpInterior(_, InteriorKind::InteriorElement(..))) => { // Paths involving element accesses a[i] do not refer to a unique // location, as there is no accurate tracking of the indices. // @@ -206,17 +207,17 @@ fn loan_path_is_precise(loan_path: &LoanPath) -> bool { false } LpDowncast(ref lp_base, _) | - LpExtend(ref lp_base, _, _) => { - loan_path_is_precise(&**lp_base) + LpExtend(ref lp_base, ..) => { + loan_path_is_precise(&lp_base) } } } -impl<'tcx> MoveData<'tcx> { +impl<'a, 'tcx> MoveData<'tcx> { pub fn new() -> MoveData<'tcx> { MoveData { paths: RefCell::new(Vec::new()), - path_map: RefCell::new(FnvHashMap()), + path_map: RefCell::new(FxHashMap()), moves: RefCell::new(Vec::new()), path_assignments: RefCell::new(Vec::new()), var_assignments: RefCell::new(Vec::new()), @@ -272,14 +273,10 @@ impl<'tcx> MoveData<'tcx> { /// Returns the existing move path index for `lp`, if any, and otherwise adds a new index for /// `lp` and any of its base paths that do not yet have an index. - pub fn move_path(&self, - tcx: &ty::ctxt<'tcx>, + pub fn move_path(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, lp: Rc>) -> MovePathIndex { - match self.path_map.borrow().get(&lp) { - Some(&index) => { - return index; - } - None => {} + if let Some(&index) = self.path_map.borrow().get(&lp) { + return index; } let index = match lp.kind { @@ -298,7 +295,7 @@ impl<'tcx> MoveData<'tcx> { } LpDowncast(ref base, _) | - LpExtend(ref base, _, _) => { + LpExtend(ref base, ..) => { let parent_index = self.move_path(tcx, base.clone()); let index = MovePathIndex(self.paths.borrow().len()); @@ -334,7 +331,7 @@ impl<'tcx> MoveData<'tcx> { fn existing_base_paths(&self, lp: &Rc>) -> Vec { - let mut result = vec!(); + let mut result = vec![]; self.add_existing_base_paths(lp, &mut result); result } @@ -354,7 +351,7 @@ impl<'tcx> MoveData<'tcx> { match lp.kind { LpVar(..) | LpUpvar(..) => { } LpDowncast(ref b, _) | - LpExtend(ref b, _, _) => { + LpExtend(ref b, ..) => { self.add_existing_base_paths(b, result); } } @@ -364,11 +361,38 @@ impl<'tcx> MoveData<'tcx> { } /// Adds a new move entry for a move of `lp` that occurs at location `id` with kind `kind`. - pub fn add_move(&self, - tcx: &ty::ctxt<'tcx>, + pub fn add_move(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, lp: Rc>, id: ast::NodeId, kind: MoveKind) { + // Moving one union field automatically moves all its fields. + if let LpExtend(ref base_lp, mutbl, LpInterior(opt_variant_id, interior)) = lp.kind { + if let ty::TyAdt(adt_def, _) = base_lp.ty.sty { + if adt_def.is_union() { + for field in &adt_def.struct_variant().fields { + let field = InteriorKind::InteriorField(mc::NamedField(field.name)); + let field_ty = if field == interior { + lp.ty + } else { + tcx.types.err // Doesn't matter + }; + let sibling_lp_kind = LpExtend(base_lp.clone(), mutbl, + LpInterior(opt_variant_id, field)); + let sibling_lp = Rc::new(LoanPath::new(sibling_lp_kind, field_ty)); + self.add_move_helper(tcx, sibling_lp, id, kind); + } + return; + } + } + } + + self.add_move_helper(tcx, lp.clone(), id, kind); + } + + fn add_move_helper(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + lp: Rc>, + id: ast::NodeId, + kind: MoveKind) { debug!("add_move(lp={:?}, id={}, kind={:?})", lp, id, @@ -392,13 +416,43 @@ impl<'tcx> MoveData<'tcx> { /// Adds a new record for an assignment to `lp` that occurs at location `id` with the given /// `span`. - pub fn add_assignment(&self, - tcx: &ty::ctxt<'tcx>, + pub fn add_assignment(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, lp: Rc>, assign_id: ast::NodeId, span: Span, assignee_id: ast::NodeId, mode: euv::MutateMode) { + // Assigning to one union field automatically assigns to all its fields. + if let LpExtend(ref base_lp, mutbl, LpInterior(opt_variant_id, interior)) = lp.kind { + if let ty::TyAdt(adt_def, _) = base_lp.ty.sty { + if adt_def.is_union() { + for field in &adt_def.struct_variant().fields { + let field = InteriorKind::InteriorField(mc::NamedField(field.name)); + let field_ty = if field == interior { + lp.ty + } else { + tcx.types.err // Doesn't matter + }; + let sibling_lp_kind = LpExtend(base_lp.clone(), mutbl, + LpInterior(opt_variant_id, field)); + let sibling_lp = Rc::new(LoanPath::new(sibling_lp_kind, field_ty)); + self.add_assignment_helper(tcx, sibling_lp, assign_id, + span, assignee_id, mode); + } + return; + } + } + } + + self.add_assignment_helper(tcx, lp.clone(), assign_id, span, assignee_id, mode); + } + + fn add_assignment_helper(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + lp: Rc>, + assign_id: ast::NodeId, + span: Span, + assignee_id: ast::NodeId, + mode: euv::MutateMode) { debug!("add_assignment(lp={:?}, assign_id={}, assignee_id={}", lp, assign_id, assignee_id); @@ -437,8 +491,7 @@ impl<'tcx> MoveData<'tcx> { /// variant `lp`, that occurs at location `pattern_id`. (One /// should be able to recover the span info from the /// `pattern_id` and the ast_map, I think.) - pub fn add_variant_match(&self, - tcx: &ty::ctxt<'tcx>, + pub fn add_variant_match(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, lp: Rc>, pattern_id: ast::NodeId, base_lp: Rc>, @@ -461,7 +514,7 @@ impl<'tcx> MoveData<'tcx> { self.variant_matches.borrow_mut().push(variant_match); } - fn fixup_fragment_sets(&self, tcx: &ty::ctxt<'tcx>) { + fn fixup_fragment_sets(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) { fragments::fixup_fragment_sets(self, tcx) } @@ -470,8 +523,7 @@ impl<'tcx> MoveData<'tcx> { /// Moves are generated by moves and killed by assignments and /// scoping. Assignments are generated by assignment to variables and /// killed by scoping. See `README.md` for more details. - fn add_gen_kills(&self, - tcx: &ty::ctxt<'tcx>, + fn add_gen_kills(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, dfcx_moves: &mut MoveDataFlow, dfcx_assign: &mut AssignDataFlow) { for (i, the_move) in self.moves.borrow().iter().enumerate() { @@ -515,7 +567,7 @@ impl<'tcx> MoveData<'tcx> { assignment_index); } LpExtend(..) => { - tcx.sess.bug("var assignment for non var path"); + bug!("var assignment for non var path"); } } } @@ -587,7 +639,7 @@ impl<'tcx> MoveData<'tcx> { // assignment referring to another location. let loan_path = self.path_loan_path(path); - if loan_path_is_precise(&*loan_path) { + if loan_path_is_precise(&loan_path) { self.each_applicable_move(path, |move_index| { debug!("kill_moves add_kill {:?} kill_id={} move_index={}", kill_kind, kill_id, move_index.get()); @@ -600,11 +652,11 @@ impl<'tcx> MoveData<'tcx> { impl<'a, 'tcx> FlowedMoveData<'a, 'tcx> { pub fn new(move_data: MoveData<'tcx>, - tcx: &'a ty::ctxt<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, cfg: &cfg::CFG, - id_range: ast_util::IdRange, + id_range: IdRange, decl: &hir::FnDecl, - body: &hir::Block) + body: &hir::Expr) -> FlowedMoveData<'a, 'tcx> { let mut dfcx_moves = DataFlowContext::new(tcx, @@ -700,7 +752,7 @@ impl<'a, 'tcx> FlowedMoveData<'a, 'tcx> { if base_indices.iter().any(|x| x == &moved_path) { // Scenario 1 or 2: `loan_path` or some base path of // `loan_path` was moved. - if !f(the_move, &*self.move_data.path_loan_path(moved_path)) { + if !f(the_move, &self.move_data.path_loan_path(moved_path)) { ret = false; } } else { @@ -710,7 +762,7 @@ impl<'a, 'tcx> FlowedMoveData<'a, 'tcx> { // Scenario 3: some extension of `loan_path` // was moved f(the_move, - &*self.move_data.path_loan_path(moved_path)) + &self.move_data.path_loan_path(moved_path)) } else { true } diff --git a/src/librustc_borrowck/diagnostics.rs b/src/librustc_borrowck/diagnostics.rs index 1f5824f82e486..88f739d1c74bb 100644 --- a/src/librustc_borrowck/diagnostics.rs +++ b/src/librustc_borrowck/diagnostics.rs @@ -17,7 +17,7 @@ This error occurs when an attempt is made to use data captured by a closure, when that data may no longer exist. It's most commonly seen when attempting to return a closure: -``` +```compile_fail,E0373 fn foo() -> Box u32> { let x = 0u32; Box::new(|y| x + y) @@ -26,11 +26,12 @@ fn foo() -> Box u32> { Notice that `x` is stack-allocated by `foo()`. By default, Rust captures closed-over data by reference. This means that once `foo()` returns, `x` no -longer exists. An attempt to access `x` within the closure would thus be unsafe. +longer exists. An attempt to access `x` within the closure would thus be +unsafe. Another situation where this might be encountered is when spawning threads: -``` +```compile_fail,E0373 fn foo() { let x = 0u32; let y = 1u32; @@ -65,21 +66,29 @@ about safety. E0381: r##" It is not allowed to use or capture an uninitialized variable. For example: -``` +```compile_fail,E0381 fn main() { let x: i32; let y = x; // error, use of possibly uninitialized variable +} ``` To fix this, ensure that any declared variables are initialized before being -used. +used. Example: + +``` +fn main() { + let x: i32 = 0; + let y = x; // ok! +} +``` "##, E0382: r##" This error occurs when an attempt is made to use a variable after its contents have been moved elsewhere. For example: -``` +```compile_fail,E0382 struct MyStruct { s: u32 } fn main() { @@ -124,7 +133,7 @@ fn main() { let mut x = Rc::new(RefCell::new(MyStruct{ s: 5u32 })); let y = x.clone(); x.borrow_mut().s = 6; - println!("{}", x.borrow.s); + println!("{}", x.borrow().s); } ``` @@ -144,7 +153,11 @@ structure that is currently uninitialized. For example, this can happen when a drop has taken place: -``` +```ignore +struct Foo { + a: u32, +} + let mut x = Foo { a: 1 }; drop(x); // `x` is now uninitialized x.a = 2; // error, partial reinitialization of uninitialized structure `t` @@ -153,6 +166,10 @@ x.a = 2; // error, partial reinitialization of uninitialized structure `t` This error can be fixed by fully reinitializing the structure in question: ``` +struct Foo { + a: u32, +} + let mut x = Foo { a: 1 }; drop(x); x = Foo { a: 2 }; @@ -163,8 +180,8 @@ E0384: r##" This error occurs when an attempt is made to reassign an immutable variable. For example: -``` -fn main(){ +```compile_fail,E0384 +fn main() { let x = 3; x = 5; // error, reassignment of immutable variable } @@ -174,7 +191,7 @@ By default, variables in Rust are immutable. To fix this error, add the keyword `mut` after the keyword `let` when declaring the variable. For example: ``` -fn main(){ +fn main() { let mut x = 3; x = 5; } @@ -187,7 +204,7 @@ reference stored inside an immutable container. For example, this can happen when storing a `&mut` inside an immutable `Box`: -``` +```compile_fail,E0386 let mut x: i64 = 1; let y: Box<_> = Box::new(&mut x); **y = 2; // error, cannot assign to data in an immutable container @@ -201,10 +218,12 @@ let mut y: Box<_> = Box::new(&mut x); **y = 2; ``` -It can also be fixed by using a type with interior mutability, such as `Cell` or -`RefCell`: +It can also be fixed by using a type with interior mutability, such as `Cell` +or `RefCell`: ``` +use std::cell::Cell; + let x: i64 = 1; let y: Box> = Box::new(Cell::new(x)); y.set(2); @@ -215,12 +234,12 @@ E0387: r##" This error occurs when an attempt is made to mutate or mutably reference data that a closure has captured immutably. Examples of this error are shown below: -``` +```compile_fail,E0387 // Accepts a function or a closure that captures its environment immutably. // Closures passed to foo will not be able to mutate their closed-over state. fn foo(f: F) { } -// Attempts to mutate closed-over data. Error message reads: +// Attempts to mutate closed-over data. Error message reads: // `cannot assign to data in a captured outer variable...` fn mutable() { let mut x = 0u32; @@ -248,12 +267,14 @@ fn foo(f: F) { } ``` Alternatively, we can consider using the `Cell` and `RefCell` types to achieve -interior mutability through a shared reference. Our example's `mutable` function -could be redefined as below: +interior mutability through a shared reference. Our example's `mutable` +function could be redefined as below: ``` use std::cell::Cell; +fn foo(f: F) { } + fn mutable() { let x = Cell::new(0u32); foo(|| x.set(2)); @@ -265,10 +286,98 @@ You can read more about cell types in the API documentation: https://doc.rust-lang.org/std/cell/ "##, +E0388: r##" +A mutable borrow was attempted in a static location. + +Erroneous code example: + +```compile_fail,E0388 +static X: i32 = 1; + +static STATIC_REF: &'static mut i32 = &mut X; +// error: cannot borrow data mutably in a static location + +const CONST_REF: &'static mut i32 = &mut X; +// error: cannot borrow data mutably in a static location +``` + +To fix this error, you have to use constant borrow: + +``` +static X: i32 = 1; + +static STATIC_REF: &'static i32 = &X; +``` +"##, + +E0389: r##" +An attempt was made to mutate data using a non-mutable reference. This +commonly occurs when attempting to assign to a non-mutable reference of a +mutable reference (`&(&mut T)`). + +Example of erroneous code: + +```compile_fail,E0389 +struct FancyNum { + num: u8, +} + +fn main() { + let mut fancy = FancyNum{ num: 5 }; + let fancy_ref = &(&mut fancy); + fancy_ref.num = 6; // error: cannot assign to data in a `&` reference + println!("{}", fancy_ref.num); +} +``` + +Here, `&mut fancy` is mutable, but `&(&mut fancy)` is not. Creating an +immutable reference to a value borrows it immutably. There can be multiple +references of type `&(&mut T)` that point to the same value, so they must be +immutable to prevent multiple mutable references to the same value. + +To fix this, either remove the outer reference: + +``` +struct FancyNum { + num: u8, +} + +fn main() { + let mut fancy = FancyNum{ num: 5 }; + + let fancy_ref = &mut fancy; + // `fancy_ref` is now &mut FancyNum, rather than &(&mut FancyNum) + + fancy_ref.num = 6; // No error! + + println!("{}", fancy_ref.num); +} +``` + +Or make the outer reference mutable: + +``` +struct FancyNum { + num: u8 +} + +fn main() { + let mut fancy = FancyNum{ num: 5 }; + + let fancy_ref = &mut (&mut fancy); + // `fancy_ref` is now &mut(&mut FancyNum), rather than &(&mut FancyNum) + + fancy_ref.num = 6; // No error! + + println!("{}", fancy_ref.num); +} +``` +"##, + E0499: r##" A variable was borrowed as mutable more than once. Erroneous code example: -``` +```compile_fail,E0499 let mut i = 0; let mut x = &mut i; let mut a = &mut i; @@ -293,10 +402,488 @@ let c = &i; // still ok! ``` "##, +E0500: r##" +A borrowed variable was used in another closure. Example of erroneous code: + +```compile_fail +fn you_know_nothing(jon_snow: &mut i32) { + let nights_watch = || { + *jon_snow = 2; + }; + let starks = || { + *jon_snow = 3; // error: closure requires unique access to `jon_snow` + // but it is already borrowed + }; +} +``` + +In here, `jon_snow` is already borrowed by the `nights_watch` closure, so it +cannot be borrowed by the `starks` closure at the same time. To fix this issue, +you can put the closure in its own scope: + +``` +fn you_know_nothing(jon_snow: &mut i32) { + { + let nights_watch = || { + *jon_snow = 2; + }; + } // At this point, `jon_snow` is free. + let starks = || { + *jon_snow = 3; + }; +} +``` + +Or, if the type implements the `Clone` trait, you can clone it between +closures: + +``` +fn you_know_nothing(jon_snow: &mut i32) { + let mut jon_copy = jon_snow.clone(); + let nights_watch = || { + jon_copy = 2; + }; + let starks = || { + *jon_snow = 3; + }; +} +``` +"##, + +E0501: r##" +This error indicates that a mutable variable is being used while it is still +captured by a closure. Because the closure has borrowed the variable, it is not +available for use until the closure goes out of scope. + +Note that a capture will either move or borrow a variable, but in this +situation, the closure is borrowing the variable. Take a look at +http://rustbyexample.com/fn/closures/capture.html for more information about +capturing. + +Example of erroneous code: + +```compile_fail,E0501 +fn inside_closure(x: &mut i32) { + // Actions which require unique access +} + +fn outside_closure(x: &mut i32) { + // Actions which require unique access +} + +fn foo(a: &mut i32) { + let bar = || { + inside_closure(a) + }; + outside_closure(a); // error: cannot borrow `*a` as mutable because previous + // closure requires unique access. +} +``` + +To fix this error, you can place the closure in its own scope: + +``` +fn inside_closure(x: &mut i32) {} +fn outside_closure(x: &mut i32) {} + +fn foo(a: &mut i32) { + { + let bar = || { + inside_closure(a) + }; + } // borrow on `a` ends. + outside_closure(a); // ok! +} +``` + +Or you can pass the variable as a parameter to the closure: + +``` +fn inside_closure(x: &mut i32) {} +fn outside_closure(x: &mut i32) {} + +fn foo(a: &mut i32) { + let bar = |s: &mut i32| { + inside_closure(s) + }; + outside_closure(a); + bar(a); +} +``` + +It may be possible to define the closure later: + +``` +fn inside_closure(x: &mut i32) {} +fn outside_closure(x: &mut i32) {} + +fn foo(a: &mut i32) { + outside_closure(a); + let bar = || { + inside_closure(a) + }; +} +``` +"##, + +E0502: r##" +This error indicates that you are trying to borrow a variable as mutable when it +has already been borrowed as immutable. + +Example of erroneous code: + +```compile_fail,E0502 +fn bar(x: &mut i32) {} +fn foo(a: &mut i32) { + let ref y = a; // a is borrowed as immutable. + bar(a); // error: cannot borrow `*a` as mutable because `a` is also borrowed + // as immutable +} +``` + +To fix this error, ensure that you don't have any other references to the +variable before trying to access it mutably: + +``` +fn bar(x: &mut i32) {} +fn foo(a: &mut i32) { + bar(a); + let ref y = a; // ok! +} +``` + +For more information on the rust ownership system, take a look at +https://doc.rust-lang.org/stable/book/references-and-borrowing.html. +"##, + +E0503: r##" +A value was used after it was mutably borrowed. + +Example of erroneous code: + +```compile_fail,E0503 +fn main() { + let mut value = 3; + // Create a mutable borrow of `value`. This borrow + // lives until the end of this function. + let _borrow = &mut value; + let _sum = value + 1; // error: cannot use `value` because + // it was mutably borrowed +} +``` + +In this example, `value` is mutably borrowed by `borrow` and cannot be +used to calculate `sum`. This is not possible because this would violate +Rust's mutability rules. + +You can fix this error by limiting the scope of the borrow: + +``` +fn main() { + let mut value = 3; + // By creating a new block, you can limit the scope + // of the reference. + { + let _borrow = &mut value; // Use `_borrow` inside this block. + } + // The block has ended and with it the borrow. + // You can now use `value` again. + let _sum = value + 1; +} +``` + +Or by cloning `value` before borrowing it: + +``` +fn main() { + let mut value = 3; + // We clone `value`, creating a copy. + let value_cloned = value.clone(); + // The mutable borrow is a reference to `value` and + // not to `value_cloned`... + let _borrow = &mut value; + // ... which means we can still use `value_cloned`, + let _sum = value_cloned + 1; + // even though the borrow only ends here. +} +``` + +You can find more information about borrowing in the rust-book: +http://doc.rust-lang.org/stable/book/references-and-borrowing.html +"##, + +E0504: r##" +This error occurs when an attempt is made to move a borrowed variable into a +closure. + +Example of erroneous code: + +```compile_fail,E0504 +struct FancyNum { + num: u8, +} + +fn main() { + let fancy_num = FancyNum { num: 5 }; + let fancy_ref = &fancy_num; + + let x = move || { + println!("child function: {}", fancy_num.num); + // error: cannot move `fancy_num` into closure because it is borrowed + }; + + x(); + println!("main function: {}", fancy_ref.num); +} +``` + +Here, `fancy_num` is borrowed by `fancy_ref` and so cannot be moved into +the closure `x`. There is no way to move a value into a closure while it is +borrowed, as that would invalidate the borrow. + +If the closure can't outlive the value being moved, try using a reference +rather than moving: + +``` +struct FancyNum { + num: u8, +} + +fn main() { + let fancy_num = FancyNum { num: 5 }; + let fancy_ref = &fancy_num; + + let x = move || { + // fancy_ref is usable here because it doesn't move `fancy_num` + println!("child function: {}", fancy_ref.num); + }; + + x(); + + println!("main function: {}", fancy_num.num); +} +``` + +If the value has to be borrowed and then moved, try limiting the lifetime of +the borrow using a scoped block: + +``` +struct FancyNum { + num: u8, +} + +fn main() { + let fancy_num = FancyNum { num: 5 }; + + { + let fancy_ref = &fancy_num; + println!("main function: {}", fancy_ref.num); + // `fancy_ref` goes out of scope here + } + + let x = move || { + // `fancy_num` can be moved now (no more references exist) + println!("child function: {}", fancy_num.num); + }; + + x(); +} +``` + +If the lifetime of a reference isn't enough, such as in the case of threading, +consider using an `Arc` to create a reference-counted value: + +``` +use std::sync::Arc; +use std::thread; + +struct FancyNum { + num: u8, +} + +fn main() { + let fancy_ref1 = Arc::new(FancyNum { num: 5 }); + let fancy_ref2 = fancy_ref1.clone(); + + let x = thread::spawn(move || { + // `fancy_ref1` can be moved and has a `'static` lifetime + println!("child thread: {}", fancy_ref1.num); + }); + + x.join().expect("child thread should finish"); + println!("main thread: {}", fancy_ref2.num); +} +``` +"##, + +E0505: r##" +A value was moved out while it was still borrowed. + +Erroneous code example: + +```compile_fail,E0505 +struct Value {} + +fn eat(val: Value) {} + +fn main() { + let x = Value{}; + { + let _ref_to_val: &Value = &x; + eat(x); + } +} +``` + +Here, the function `eat` takes the ownership of `x`. However, +`x` cannot be moved because it was borrowed to `_ref_to_val`. +To fix that you can do few different things: + +* Try to avoid moving the variable. +* Release borrow before move. +* Implement the `Copy` trait on the type. + +Examples: + +``` +struct Value {} + +fn eat(val: &Value) {} + +fn main() { + let x = Value{}; + { + let _ref_to_val: &Value = &x; + eat(&x); // pass by reference, if it's possible + } +} +``` + +Or: + +``` +struct Value {} + +fn eat(val: Value) {} + +fn main() { + let x = Value{}; + { + let _ref_to_val: &Value = &x; + } + eat(x); // release borrow and then move it. +} +``` + +Or: + +``` +#[derive(Clone, Copy)] // implement Copy trait +struct Value {} + +fn eat(val: Value) {} + +fn main() { + let x = Value{}; + { + let _ref_to_val: &Value = &x; + eat(x); // it will be copied here. + } +} +``` + +You can find more information about borrowing in the rust-book: +http://doc.rust-lang.org/stable/book/references-and-borrowing.html +"##, + +E0506: r##" +This error occurs when an attempt is made to assign to a borrowed value. + +Example of erroneous code: + +```compile_fail,E0506 +struct FancyNum { + num: u8, +} + +fn main() { + let mut fancy_num = FancyNum { num: 5 }; + let fancy_ref = &fancy_num; + fancy_num = FancyNum { num: 6 }; + // error: cannot assign to `fancy_num` because it is borrowed + + println!("Num: {}, Ref: {}", fancy_num.num, fancy_ref.num); +} +``` + +Because `fancy_ref` still holds a reference to `fancy_num`, `fancy_num` can't +be assigned to a new value as it would invalidate the reference. + +Alternatively, we can move out of `fancy_num` into a second `fancy_num`: + +``` +struct FancyNum { + num: u8, +} + +fn main() { + let mut fancy_num = FancyNum { num: 5 }; + let moved_num = fancy_num; + fancy_num = FancyNum { num: 6 }; + + println!("Num: {}, Moved num: {}", fancy_num.num, moved_num.num); +} +``` + +If the value has to be borrowed, try limiting the lifetime of the borrow using +a scoped block: + +``` +struct FancyNum { + num: u8, +} + +fn main() { + let mut fancy_num = FancyNum { num: 5 }; + + { + let fancy_ref = &fancy_num; + println!("Ref: {}", fancy_ref.num); + } + + // Works because `fancy_ref` is no longer in scope + fancy_num = FancyNum { num: 6 }; + println!("Num: {}", fancy_num.num); +} +``` + +Or by moving the reference into a function: + +``` +struct FancyNum { + num: u8, +} + +fn main() { + let mut fancy_num = FancyNum { num: 5 }; + + print_fancy_ref(&fancy_num); + + // Works because function borrow has ended + fancy_num = FancyNum { num: 6 }; + println!("Num: {}", fancy_num.num); +} + +fn print_fancy_ref(fancy_ref: &FancyNum){ + println!("Ref: {}", fancy_ref.num); +} +``` +"##, + E0507: r##" You tried to move out of a value which was borrowed. Erroneous code example: -``` +```compile_fail,E0507 use std::cell::RefCell; struct TheDarkKnight; @@ -377,23 +964,179 @@ fn main() { } ``` +Moving out of a member of a mutably borrowed struct is fine if you put something +back. `mem::replace` can be used for that: + +```ignore +struct TheDarkKnight; + +impl TheDarkKnight { + fn nothing_is_true(self) {} +} + +struct Batcave { + knight: TheDarkKnight +} + +fn main() { + use std::mem; + + let mut cave = Batcave { + knight: TheDarkKnight + }; + let borrowed = &mut cave; + + borrowed.knight.nothing_is_true(); // E0507 + mem::replace(&mut borrowed.knight, TheDarkKnight).nothing_is_true(); // ok! +} +``` + You can find more information about borrowing in the rust-book: http://doc.rust-lang.org/stable/book/references-and-borrowing.html "##, +E0508: r##" +A value was moved out of a non-copy fixed-size array. + +Example of erroneous code: + +```compile_fail,E0508 +struct NonCopy; + +fn main() { + let array = [NonCopy; 1]; + let _value = array[0]; // error: cannot move out of type `[NonCopy; 1]`, + // a non-copy fixed-size array +} +``` + +The first element was moved out of the array, but this is not +possible because `NonCopy` does not implement the `Copy` trait. + +Consider borrowing the element instead of moving it: + +``` +struct NonCopy; + +fn main() { + let array = [NonCopy; 1]; + let _value = &array[0]; // Borrowing is allowed, unlike moving. +} +``` + +Alternatively, if your type implements `Clone` and you need to own the value, +consider borrowing and then cloning: + +``` +#[derive(Clone)] +struct NonCopy; + +fn main() { + let array = [NonCopy; 1]; + // Now you can clone the array element. + let _value = array[0].clone(); +} +``` +"##, + +E0509: r##" +This error occurs when an attempt is made to move out of a value whose type +implements the `Drop` trait. + +Example of erroneous code: + +```compile_fail,E0509 +struct FancyNum { + num: usize +} + +struct DropStruct { + fancy: FancyNum +} + +impl Drop for DropStruct { + fn drop(&mut self) { + // Destruct DropStruct, possibly using FancyNum + } +} + +fn main() { + let drop_struct = DropStruct{fancy: FancyNum{num: 5}}; + let fancy_field = drop_struct.fancy; // Error E0509 + println!("Fancy: {}", fancy_field.num); + // implicit call to `drop_struct.drop()` as drop_struct goes out of scope +} +``` + +Here, we tried to move a field out of a struct of type `DropStruct` which +implements the `Drop` trait. However, a struct cannot be dropped if one or +more of its fields have been moved. + +Structs implementing the `Drop` trait have an implicit destructor that gets +called when they go out of scope. This destructor may use the fields of the +struct, so moving out of the struct could make it impossible to run the +destructor. Therefore, we must think of all values whose type implements the +`Drop` trait as single units whose fields cannot be moved. + +This error can be fixed by creating a reference to the fields of a struct, +enum, or tuple using the `ref` keyword: + +``` +struct FancyNum { + num: usize +} + +struct DropStruct { + fancy: FancyNum +} + +impl Drop for DropStruct { + fn drop(&mut self) { + // Destruct DropStruct, possibly using FancyNum + } +} + +fn main() { + let drop_struct = DropStruct{fancy: FancyNum{num: 5}}; + let ref fancy_field = drop_struct.fancy; // No more errors! + println!("Fancy: {}", fancy_field.num); + // implicit call to `drop_struct.drop()` as drop_struct goes out of scope +} +``` + +Note that this technique can also be used in the arms of a match expression: + +``` +struct FancyNum { + num: usize +} + +enum DropEnum { + Fancy(FancyNum) +} + +impl Drop for DropEnum { + fn drop(&mut self) { + // Destruct DropEnum, possibly using FancyNum + } +} + +fn main() { + // Creates and enum of type `DropEnum`, which implements `Drop` + let drop_enum = DropEnum::Fancy(FancyNum{num: 10}); + match drop_enum { + // Creates a reference to the inside of `DropEnum::Fancy` + DropEnum::Fancy(ref fancy_field) => // No error! + println!("It was fancy-- {}!", fancy_field.num), + } + // implicit call to `drop_enum.drop()` as drop_enum goes out of scope +} +``` +"##, + } register_diagnostics! { E0385, // {} in an aliasable location - E0388, // {} in a static location - E0389, // {} in a `&` reference - E0500, // closure requires unique access to `..` but .. is already borrowed - E0501, // cannot borrow `..`.. as .. because previous closure requires unique access - E0502, // cannot borrow `..`.. as .. because .. is also borrowed as ... - E0503, // cannot use `..` because it was mutably borrowed - E0504, // cannot move `..` into closure because it is borrowed - E0505, // cannot move out of `..` because it is borrowed - E0506, // cannot assign to `..` because it is borrowed - E0508, // cannot move out of type `..`, a non-copy fixed-size array - E0509, // cannot move out of type `..`, which defines the `Drop` trait + E0524, // two closures require unique access to `..` at the same time } diff --git a/src/librustc_borrowck/graphviz.rs b/src/librustc_borrowck/graphviz.rs index eb63f572649a3..0da9525efd856 100644 --- a/src/librustc_borrowck/graphviz.rs +++ b/src/librustc_borrowck/graphviz.rs @@ -14,13 +14,13 @@ pub use self::Variant::*; -pub use rustc::middle::cfg::graphviz::{Node, Edge}; -use rustc::middle::cfg::graphviz as cfg_dot; +pub use rustc::cfg::graphviz::{Node, Edge}; +use rustc::cfg::graphviz as cfg_dot; use borrowck; use borrowck::{BorrowckCtxt, LoanPath}; use dot; -use rustc::middle::cfg::CFGIndex; +use rustc::cfg::CFGIndex; use rustc::middle::dataflow::{DataFlowOperator, DataFlowContext, EntryOrExit}; use std::rc::Rc; use dot::IntoCow; @@ -87,7 +87,7 @@ impl<'a, 'tcx> DataflowLabeller<'a, 'tcx> { if saw_some { set.push_str(", "); } - let loan_str = self.borrowck_ctxt.loan_path_to_string(&*lp); + let loan_str = self.borrowck_ctxt.loan_path_to_string(&lp); set.push_str(&loan_str[..]); saw_some = true; true @@ -129,7 +129,9 @@ impl<'a, 'tcx> DataflowLabeller<'a, 'tcx> { } } -impl<'a, 'tcx> dot::Labeller<'a, Node<'a>, Edge<'a>> for DataflowLabeller<'a, 'tcx> { +impl<'a, 'tcx> dot::Labeller<'a> for DataflowLabeller<'a, 'tcx> { + type Node = Node<'a>; + type Edge = Edge<'a>; fn graph_id(&'a self) -> dot::Id<'a> { self.inner.graph_id() } fn node_id(&'a self, n: &Node<'a>) -> dot::Id<'a> { self.inner.node_id(n) } fn node_label(&'a self, n: &Node<'a>) -> dot::LabelText<'a> { @@ -143,7 +145,9 @@ impl<'a, 'tcx> dot::Labeller<'a, Node<'a>, Edge<'a>> for DataflowLabeller<'a, 't fn edge_label(&'a self, e: &Edge<'a>) -> dot::LabelText<'a> { self.inner.edge_label(e) } } -impl<'a, 'tcx> dot::GraphWalk<'a, Node<'a>, Edge<'a>> for DataflowLabeller<'a, 'tcx> { +impl<'a, 'tcx> dot::GraphWalk<'a> for DataflowLabeller<'a, 'tcx> { + type Node = Node<'a>; + type Edge = Edge<'a>; fn nodes(&'a self) -> dot::Nodes<'a, Node<'a>> { self.inner.nodes() } fn edges(&'a self) -> dot::Edges<'a, Edge<'a>> { self.inner.edges() } fn source(&'a self, edge: &Edge<'a>) -> Node<'a> { self.inner.source(edge) } diff --git a/src/librustc_borrowck/lib.rs b/src/librustc_borrowck/lib.rs index d730b383a8049..1ff232da427fc 100644 --- a/src/librustc_borrowck/lib.rs +++ b/src/librustc_borrowck/lib.rs @@ -15,6 +15,7 @@ #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] #![allow(non_camel_case_types)] @@ -22,19 +23,25 @@ #![feature(rustc_diagnostic_macros)] #![feature(rustc_private)] #![feature(staged_api)] - +#![feature(associated_consts)] +#![feature(nonzero)] #[macro_use] extern crate log; #[macro_use] extern crate syntax; +extern crate syntax_pos; +extern crate rustc_errors as errors; // for "clarity", rename the graphviz crate to dot; graphviz within `borrowck` // refers to the borrowck-specific graphviz adapter traits. extern crate graphviz as dot; +#[macro_use] extern crate rustc; -extern crate rustc_front; +extern crate rustc_data_structures; +extern crate rustc_mir; +extern crate core; // for NonZero pub use borrowck::check_crate; pub use borrowck::build_borrowck_dataflow_data_for_fn; -pub use borrowck::{AnalysisData, BorrowckCtxt}; +pub use borrowck::{AnalysisData, BorrowckCtxt, ElaborateDrops}; // NB: This module needs to be declared first so diagnostics are // registered before they are used. diff --git a/src/librustc_const_eval/Cargo.toml b/src/librustc_const_eval/Cargo.toml new file mode 100644 index 0000000000000..0e5cbce8639be --- /dev/null +++ b/src/librustc_const_eval/Cargo.toml @@ -0,0 +1,22 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_const_eval" +version = "0.0.0" + +[lib] +name = "rustc_const_eval" +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +arena = { path = "../libarena" } +log = { path = "../liblog" } +serialize = { path = "../libserialize" } +rustc = { path = "../librustc" } +rustc_back = { path = "../librustc_back" } +rustc_const_math = { path = "../librustc_const_math" } +rustc_data_structures = { path = "../librustc_data_structures" } +rustc_errors = { path = "../librustc_errors" } +syntax = { path = "../libsyntax" } +graphviz = { path = "../libgraphviz" } +syntax_pos = { path = "../libsyntax_pos" } \ No newline at end of file diff --git a/src/librustc_const_eval/_match.rs b/src/librustc_const_eval/_match.rs new file mode 100644 index 0000000000000..23771f4bae3dc --- /dev/null +++ b/src/librustc_const_eval/_match.rs @@ -0,0 +1,856 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use self::Constructor::*; +use self::Usefulness::*; +use self::WitnessPreference::*; + +use rustc::middle::const_val::ConstVal; +use eval::{compare_const_vals}; + +use rustc_const_math::ConstInt; + +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::indexed_vec::Idx; + +use pattern::{FieldPattern, Pattern, PatternKind}; +use pattern::{PatternFoldable, PatternFolder}; + +use rustc::hir::def::Def; +use rustc::hir::def_id::DefId; +use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; + +use rustc::hir; +use rustc::hir::def::CtorKind; +use rustc::hir::{Pat, PatKind}; +use rustc::util::common::ErrorReported; + +use syntax::ast::{self, DUMMY_NODE_ID}; +use syntax::codemap::Spanned; +use syntax::ptr::P; +use syntax_pos::{Span, DUMMY_SP}; + +use arena::TypedArena; + +use std::cmp::{self, Ordering}; +use std::fmt; +use std::iter::{FromIterator, IntoIterator, repeat}; + +pub fn expand_pattern<'a, 'tcx>(cx: &MatchCheckCtxt<'a, 'tcx>, pat: Pattern<'tcx>) + -> &'a Pattern<'tcx> +{ + cx.pattern_arena.alloc(LiteralExpander.fold_pattern(&pat)) +} + +struct LiteralExpander; +impl<'tcx> PatternFolder<'tcx> for LiteralExpander { + fn fold_pattern(&mut self, pat: &Pattern<'tcx>) -> Pattern<'tcx> { + match (&pat.ty.sty, &*pat.kind) { + (&ty::TyRef(_, mt), &PatternKind::Constant { ref value }) => { + Pattern { + ty: pat.ty, + span: pat.span, + kind: box PatternKind::Deref { + subpattern: Pattern { + ty: mt.ty, + span: pat.span, + kind: box PatternKind::Constant { value: value.clone() }, + } + } + } + } + (_, &PatternKind::Binding { subpattern: Some(ref s), .. }) => { + s.fold_with(self) + } + _ => pat.super_fold_with(self) + } + } +} + +pub const DUMMY_WILD_PAT: &'static Pat = &Pat { + id: DUMMY_NODE_ID, + node: PatKind::Wild, + span: DUMMY_SP +}; + +impl<'tcx> Pattern<'tcx> { + fn is_wildcard(&self) -> bool { + match *self.kind { + PatternKind::Binding { subpattern: None, .. } | PatternKind::Wild => + true, + _ => false + } + } +} + +pub struct Matrix<'a, 'tcx: 'a>(Vec>>); + +impl<'a, 'tcx> Matrix<'a, 'tcx> { + pub fn empty() -> Self { + Matrix(vec![]) + } + + pub fn push(&mut self, row: Vec<&'a Pattern<'tcx>>) { + self.0.push(row) + } +} + +/// Pretty-printer for matrices of patterns, example: +/// ++++++++++++++++++++++++++ +/// + _ + [] + +/// ++++++++++++++++++++++++++ +/// + true + [First] + +/// ++++++++++++++++++++++++++ +/// + true + [Second(true)] + +/// ++++++++++++++++++++++++++ +/// + false + [_] + +/// ++++++++++++++++++++++++++ +/// + _ + [_, _, ..tail] + +/// ++++++++++++++++++++++++++ +impl<'a, 'tcx> fmt::Debug for Matrix<'a, 'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "\n")?; + + let &Matrix(ref m) = self; + let pretty_printed_matrix: Vec> = m.iter().map(|row| { + row.iter().map(|pat| format!("{:?}", pat)).collect() + }).collect(); + + let column_count = m.iter().map(|row| row.len()).max().unwrap_or(0); + assert!(m.iter().all(|row| row.len() == column_count)); + let column_widths: Vec = (0..column_count).map(|col| { + pretty_printed_matrix.iter().map(|row| row[col].len()).max().unwrap_or(0) + }).collect(); + + let total_width = column_widths.iter().cloned().sum::() + column_count * 3 + 1; + let br = repeat('+').take(total_width).collect::(); + write!(f, "{}\n", br)?; + for row in pretty_printed_matrix { + write!(f, "+")?; + for (column, pat_str) in row.into_iter().enumerate() { + write!(f, " ")?; + write!(f, "{:1$}", pat_str, column_widths[column])?; + write!(f, " +")?; + } + write!(f, "\n")?; + write!(f, "{}\n", br)?; + } + Ok(()) + } +} + +impl<'a, 'tcx> FromIterator>> for Matrix<'a, 'tcx> { + fn from_iter>>>(iter: T) -> Self + { + Matrix(iter.into_iter().collect()) + } +} + +//NOTE: appears to be the only place other then InferCtxt to contain a ParamEnv +pub struct MatchCheckCtxt<'a, 'tcx: 'a> { + pub tcx: TyCtxt<'a, 'tcx, 'tcx>, + /// A wild pattern with an error type - it exists to avoid having to normalize + /// associated types to get field types. + pub wild_pattern: &'a Pattern<'tcx>, + pub pattern_arena: &'a TypedArena>, + pub byte_array_map: FxHashMap<*const Pattern<'tcx>, Vec<&'a Pattern<'tcx>>>, +} + +impl<'a, 'tcx> MatchCheckCtxt<'a, 'tcx> { + pub fn create_and_enter( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + f: F) -> R + where F: for<'b> FnOnce(MatchCheckCtxt<'b, 'tcx>) -> R + { + let wild_pattern = Pattern { + ty: tcx.types.err, + span: DUMMY_SP, + kind: box PatternKind::Wild + }; + + let pattern_arena = TypedArena::new(); + + f(MatchCheckCtxt { + tcx: tcx, + wild_pattern: &wild_pattern, + pattern_arena: &pattern_arena, + byte_array_map: FxHashMap(), + }) + } + + // convert a byte-string pattern to a list of u8 patterns. + fn lower_byte_str_pattern(&mut self, pat: &'a Pattern<'tcx>) -> Vec<&'a Pattern<'tcx>> { + let pattern_arena = &*self.pattern_arena; + let tcx = self.tcx; + self.byte_array_map.entry(pat).or_insert_with(|| { + match pat.kind { + box PatternKind::Constant { + value: ConstVal::ByteStr(ref data) + } => { + data.iter().map(|c| &*pattern_arena.alloc(Pattern { + ty: tcx.types.u8, + span: pat.span, + kind: box PatternKind::Constant { + value: ConstVal::Integral(ConstInt::U8(*c)) + } + })).collect() + } + _ => span_bug!(pat.span, "unexpected byte array pattern {:?}", pat) + } + }).clone() + } +} + +#[derive(Clone, Debug, PartialEq)] +pub enum Constructor { + /// The constructor of all patterns that don't vary by constructor, + /// e.g. struct patterns and fixed-length arrays. + Single, + /// Enum variants. + Variant(DefId), + /// Literal values. + ConstantValue(ConstVal), + /// Ranges of literal values (2..5). + ConstantRange(ConstVal, ConstVal), + /// Array patterns of length n. + Slice(usize), +} + +impl<'tcx> Constructor { + fn variant_for_adt(&self, adt: &'tcx ty::AdtDef) -> &'tcx ty::VariantDef { + match self { + &Variant(vid) => adt.variant_with_id(vid), + &Single => { + assert_eq!(adt.variants.len(), 1); + &adt.variants[0] + } + _ => bug!("bad constructor {:?} for adt {:?}", self, adt) + } + } +} + +#[derive(Clone, PartialEq)] +pub enum Usefulness { + Useful, + UsefulWithWitness(Vec), + NotUseful +} + +#[derive(Copy, Clone)] +pub enum WitnessPreference { + ConstructWitness, + LeaveOutWitness +} + +#[derive(Copy, Clone, Debug)] +struct PatternContext<'tcx> { + ty: Ty<'tcx>, + max_slice_length: usize, +} + + +fn const_val_to_expr(value: &ConstVal) -> P { + let node = match value { + &ConstVal::Bool(b) => ast::LitKind::Bool(b), + _ => bug!() + }; + P(hir::Expr { + id: DUMMY_NODE_ID, + node: hir::ExprLit(P(Spanned { node: node, span: DUMMY_SP })), + span: DUMMY_SP, + attrs: ast::ThinVec::new(), + }) +} + +/// A stack of patterns in reverse order of construction +#[derive(Clone, PartialEq, Eq)] +pub struct Witness(Vec>); + +impl Witness { + pub fn single_pattern(&self) -> &Pat { + assert_eq!(self.0.len(), 1); + &self.0[0] + } + + fn push_wild_constructor<'a, 'tcx>( + mut self, + cx: &MatchCheckCtxt<'a, 'tcx>, + ctor: &Constructor, + ty: Ty<'tcx>) + -> Self + { + let arity = constructor_arity(cx, ctor, ty); + self.0.extend(repeat(DUMMY_WILD_PAT).take(arity).map(|p| P(p.clone()))); + self.apply_constructor(cx, ctor, ty) + } + + + /// Constructs a partial witness for a pattern given a list of + /// patterns expanded by the specialization step. + /// + /// When a pattern P is discovered to be useful, this function is used bottom-up + /// to reconstruct a complete witness, e.g. a pattern P' that covers a subset + /// of values, V, where each value in that set is not covered by any previously + /// used patterns and is covered by the pattern P'. Examples: + /// + /// left_ty: tuple of 3 elements + /// pats: [10, 20, _] => (10, 20, _) + /// + /// left_ty: struct X { a: (bool, &'static str), b: usize} + /// pats: [(false, "foo"), 42] => X { a: (false, "foo"), b: 42 } + fn apply_constructor<'a, 'tcx>( + mut self, + cx: &MatchCheckCtxt<'a,'tcx>, + ctor: &Constructor, + ty: Ty<'tcx>) + -> Self + { + let arity = constructor_arity(cx, ctor, ty); + let pat = { + let len = self.0.len(); + let mut pats = self.0.drain(len-arity..).rev(); + + match ty.sty { + ty::TyTuple(..) => PatKind::Tuple(pats.collect(), None), + + ty::TyAdt(adt, _) => { + let v = ctor.variant_for_adt(adt); + let qpath = hir::QPath::Resolved(None, P(hir::Path { + span: DUMMY_SP, + global: false, + def: Def::Err, + segments: vec![hir::PathSegment::from_name(v.name)].into(), + })); + match v.ctor_kind { + CtorKind::Fictive => { + let field_pats: hir::HirVec<_> = v.fields.iter() + .zip(pats) + .filter(|&(_, ref pat)| pat.node != PatKind::Wild) + .map(|(field, pat)| Spanned { + span: DUMMY_SP, + node: hir::FieldPat { + name: field.name, + pat: pat, + is_shorthand: false, + } + }).collect(); + let has_more_fields = field_pats.len() < arity; + PatKind::Struct(qpath, field_pats, has_more_fields) + } + CtorKind::Fn => { + PatKind::TupleStruct(qpath, pats.collect(), None) + } + CtorKind::Const => PatKind::Path(qpath) + } + } + + ty::TyRef(_, ty::TypeAndMut { mutbl, .. }) => { + PatKind::Ref(pats.nth(0).unwrap(), mutbl) + } + + ty::TySlice(_) | ty::TyArray(..) => { + PatKind::Slice(pats.collect(), None, hir::HirVec::new()) + } + + _ => { + match *ctor { + ConstantValue(ref v) => PatKind::Lit(const_val_to_expr(v)), + _ => PatKind::Wild, + } + } + } + }; + + self.0.push(P(hir::Pat { + id: DUMMY_NODE_ID, + node: pat, + span: DUMMY_SP + })); + + self + } +} + +/// Return the set of constructors from the same type as the first column of `matrix`, +/// that are matched only by wildcard patterns from that first column. +/// +/// Therefore, if there is some pattern that is unmatched by `matrix`, it will +/// still be unmatched if the first constructor is replaced by any of the constructors +/// in the return value. +fn missing_constructors(cx: &mut MatchCheckCtxt, + matrix: &Matrix, + pcx: PatternContext) -> Vec { + let used_constructors: Vec = + matrix.0.iter() + .flat_map(|row| pat_constructors(cx, row[0], pcx).unwrap_or(vec![])) + .collect(); + debug!("used_constructors = {:?}", used_constructors); + all_constructors(cx, pcx).into_iter() + .filter(|c| !used_constructors.contains(c)) + .collect() +} + +/// This determines the set of all possible constructors of a pattern matching +/// values of type `left_ty`. For vectors, this would normally be an infinite set +/// +/// This intentionally does not list ConstantValue specializations for +/// non-booleans, because we currently assume that there is always a +/// "non-standard constant" that matches. See issue #12483. +/// +/// but is instead bounded by the maximum fixed length of slice patterns in +/// the column of patterns being analyzed. +fn all_constructors(_cx: &mut MatchCheckCtxt, pcx: PatternContext) -> Vec { + match pcx.ty.sty { + ty::TyBool => + [true, false].iter().map(|b| ConstantValue(ConstVal::Bool(*b))).collect(), + ty::TySlice(_) => + (0..pcx.max_slice_length+1).map(|length| Slice(length)).collect(), + ty::TyArray(_, length) => vec![Slice(length)], + ty::TyAdt(def, _) if def.is_enum() && def.variants.len() > 1 => + def.variants.iter().map(|v| Variant(v.did)).collect(), + _ => vec![Single] + } +} + +fn max_slice_length<'a, 'tcx, I>( + _cx: &mut MatchCheckCtxt<'a, 'tcx>, + patterns: I) -> usize + where I: Iterator> +{ + // The exhaustiveness-checking paper does not include any details on + // checking variable-length slice patterns. However, they are matched + // by an infinite collection of fixed-length array patterns. + // + // Checking the infinite set directly would take an infinite amount + // of time. However, it turns out that for each finite set of + // patterns `P`, all sufficiently large array lengths are equivalent: + // + // Each slice `s` with a "sufficiently-large" length `l ≥ L` that applies + // to exactly the subset `Pₜ` of `P` can be transformed to a slice + // `sₘ` for each sufficiently-large length `m` that applies to exactly + // the same subset of `P`. + // + // Because of that, each witness for reachability-checking from one + // of the sufficiently-large lengths can be transformed to an + // equally-valid witness from any other length, so we only have + // to check slice lengths from the "minimal sufficiently-large length" + // and below. + // + // Note that the fact that there is a *single* `sₘ` for each `m` + // not depending on the specific pattern in `P` is important: if + // you look at the pair of patterns + // `[true, ..]` + // `[.., false]` + // Then any slice of length ≥1 that matches one of these two + // patterns can be be trivially turned to a slice of any + // other length ≥1 that matches them and vice-versa - for + // but the slice from length 2 `[false, true]` that matches neither + // of these patterns can't be turned to a slice from length 1 that + // matches neither of these patterns, so we have to consider + // slices from length 2 there. + // + // Now, to see that that length exists and find it, observe that slice + // patterns are either "fixed-length" patterns (`[_, _, _]`) or + // "variable-length" patterns (`[_, .., _]`). + // + // For fixed-length patterns, all slices with lengths *longer* than + // the pattern's length have the same outcome (of not matching), so + // as long as `L` is greater than the pattern's length we can pick + // any `sₘ` from that length and get the same result. + // + // For variable-length patterns, the situation is more complicated, + // because as seen above the precise value of `sₘ` matters. + // + // However, for each variable-length pattern `p` with a prefix of length + // `plₚ` and suffix of length `slₚ`, only the first `plₚ` and the last + // `slₚ` elements are examined. + // + // Therefore, as long as `L` is positive (to avoid concerns about empty + // types), all elements after the maximum prefix length and before + // the maximum suffix length are not examined by any variable-length + // pattern, and therefore can be added/removed without affecting + // them - creating equivalent patterns from any sufficiently-large + // length. + // + // Of course, if fixed-length patterns exist, we must be sure + // that our length is large enough to miss them all, so + // we can pick `L = max(FIXED_LEN+1 ∪ {max(PREFIX_LEN) + max(SUFFIX_LEN)})` + // + // for example, with the above pair of patterns, all elements + // but the first and last can be added/removed, so any + // witness of length ≥2 (say, `[false, false, true]`) can be + // turned to a witness from any other length ≥2. + + let mut max_prefix_len = 0; + let mut max_suffix_len = 0; + let mut max_fixed_len = 0; + + for row in patterns { + match *row.kind { + PatternKind::Constant { value: ConstVal::ByteStr(ref data) } => { + max_fixed_len = cmp::max(max_fixed_len, data.len()); + } + PatternKind::Slice { ref prefix, slice: None, ref suffix } => { + let fixed_len = prefix.len() + suffix.len(); + max_fixed_len = cmp::max(max_fixed_len, fixed_len); + } + PatternKind::Slice { ref prefix, slice: Some(_), ref suffix } => { + max_prefix_len = cmp::max(max_prefix_len, prefix.len()); + max_suffix_len = cmp::max(max_suffix_len, suffix.len()); + } + _ => {} + } + } + + cmp::max(max_fixed_len + 1, max_prefix_len + max_suffix_len) +} + +/// Algorithm from http://moscova.inria.fr/~maranget/papers/warn/index.html +/// +/// Whether a vector `v` of patterns is 'useful' in relation to a set of such +/// vectors `m` is defined as there being a set of inputs that will match `v` +/// but not any of the sets in `m`. +/// +/// This is used both for reachability checking (if a pattern isn't useful in +/// relation to preceding patterns, it is not reachable) and exhaustiveness +/// checking (if a wildcard pattern is useful in relation to a matrix, the +/// matrix isn't exhaustive). +/// +/// Note: is_useful doesn't work on empty types, as the paper notes. +/// So it assumes that v is non-empty. +pub fn is_useful<'a, 'tcx>(cx: &mut MatchCheckCtxt<'a, 'tcx>, + matrix: &Matrix<'a, 'tcx>, + v: &[&'a Pattern<'tcx>], + witness: WitnessPreference) + -> Usefulness { + let &Matrix(ref rows) = matrix; + debug!("is_useful({:?}, {:?})", matrix, v); + if rows.is_empty() { + return match witness { + ConstructWitness => UsefulWithWitness(vec![Witness( + repeat(DUMMY_WILD_PAT).take(v.len()).map(|p| P(p.clone())).collect() + )]), + LeaveOutWitness => Useful + }; + } + if rows[0].is_empty() { + return NotUseful; + } + + let &Matrix(ref rows) = matrix; + assert!(rows.iter().all(|r| r.len() == v.len())); + + + let pcx = PatternContext { + ty: rows.iter().map(|r| r[0].ty).find(|ty| !ty.references_error()) + .unwrap_or(v[0].ty), + max_slice_length: max_slice_length(cx, rows.iter().map(|r| r[0]).chain(Some(v[0]))) + }; + + debug!("is_useful_expand_first_col: pcx={:?}, expanding {:?}", pcx, v[0]); + + if let Some(constructors) = pat_constructors(cx, v[0], pcx) { + debug!("is_useful - expanding constructors: {:?}", constructors); + constructors.into_iter().map(|c| + is_useful_specialized(cx, matrix, v, c.clone(), pcx.ty, witness) + ).find(|result| result != &NotUseful).unwrap_or(NotUseful) + } else { + debug!("is_useful - expanding wildcard"); + let constructors = missing_constructors(cx, matrix, pcx); + debug!("is_useful - missing_constructors = {:?}", constructors); + if constructors.is_empty() { + all_constructors(cx, pcx).into_iter().map(|c| { + is_useful_specialized(cx, matrix, v, c.clone(), pcx.ty, witness) + }).find(|result| result != &NotUseful).unwrap_or(NotUseful) + } else { + let matrix = rows.iter().filter_map(|r| { + if r[0].is_wildcard() { + Some(r[1..].to_vec()) + } else { + None + } + }).collect(); + match is_useful(cx, &matrix, &v[1..], witness) { + UsefulWithWitness(pats) => { + let cx = &*cx; + UsefulWithWitness(pats.into_iter().flat_map(|witness| { + constructors.iter().map(move |ctor| { + witness.clone().push_wild_constructor(cx, ctor, pcx.ty) + }) + }).collect()) + } + result => result + } + } + } +} + +fn is_useful_specialized<'a, 'tcx>( + cx: &mut MatchCheckCtxt<'a, 'tcx>, + &Matrix(ref m): &Matrix<'a, 'tcx>, + v: &[&'a Pattern<'tcx>], + ctor: Constructor, + lty: Ty<'tcx>, + witness: WitnessPreference) -> Usefulness +{ + let arity = constructor_arity(cx, &ctor, lty); + let matrix = Matrix(m.iter().flat_map(|r| { + specialize(cx, &r[..], &ctor, 0, arity) + }).collect()); + match specialize(cx, v, &ctor, 0, arity) { + Some(v) => match is_useful(cx, &matrix, &v[..], witness) { + UsefulWithWitness(witnesses) => UsefulWithWitness( + witnesses.into_iter() + .map(|witness| witness.apply_constructor(cx, &ctor, lty)) + .collect() + ), + result => result + }, + None => NotUseful + } +} + +/// Determines the constructors that the given pattern can be specialized to. +/// +/// In most cases, there's only one constructor that a specific pattern +/// represents, such as a specific enum variant or a specific literal value. +/// Slice patterns, however, can match slices of different lengths. For instance, +/// `[a, b, ..tail]` can match a slice of length 2, 3, 4 and so on. +/// +/// Returns None in case of a catch-all, which can't be specialized. +fn pat_constructors(_cx: &mut MatchCheckCtxt, + pat: &Pattern, + pcx: PatternContext) + -> Option> +{ + match *pat.kind { + PatternKind::Binding { .. } | PatternKind::Wild => + None, + PatternKind::Leaf { .. } | PatternKind::Deref { .. } => + Some(vec![Single]), + PatternKind::Variant { adt_def, variant_index, .. } => + Some(vec![Variant(adt_def.variants[variant_index].did)]), + PatternKind::Constant { ref value } => + Some(vec![ConstantValue(value.clone())]), + PatternKind::Range { ref lo, ref hi } => + Some(vec![ConstantRange(lo.clone(), hi.clone())]), + PatternKind::Array { .. } => match pcx.ty.sty { + ty::TyArray(_, length) => Some(vec![Slice(length)]), + _ => span_bug!(pat.span, "bad ty {:?} for array pattern", pcx.ty) + }, + PatternKind::Slice { ref prefix, ref slice, ref suffix } => { + let pat_len = prefix.len() + suffix.len(); + if slice.is_some() { + Some((pat_len..pcx.max_slice_length+1).map(Slice).collect()) + } else { + Some(vec![Slice(pat_len)]) + } + } + } +} + +/// This computes the arity of a constructor. The arity of a constructor +/// is how many subpattern patterns of that constructor should be expanded to. +/// +/// For instance, a tuple pattern (_, 42, Some([])) has the arity of 3. +/// A struct pattern's arity is the number of fields it contains, etc. +fn constructor_arity(_cx: &MatchCheckCtxt, ctor: &Constructor, ty: Ty) -> usize { + debug!("constructor_arity({:?}, {:?})", ctor, ty); + match ty.sty { + ty::TyTuple(ref fs) => fs.len(), + ty::TyBox(_) => 1, + ty::TySlice(..) | ty::TyArray(..) => match *ctor { + Slice(length) => length, + ConstantValue(_) => 0, + _ => bug!("bad slice pattern {:?} {:?}", ctor, ty) + }, + ty::TyRef(..) => 1, + ty::TyAdt(adt, _) => { + ctor.variant_for_adt(adt).fields.len() + } + _ => 0 + } +} + +fn slice_pat_covered_by_constructor(_tcx: TyCtxt, _span: Span, + ctor: &Constructor, + prefix: &[Pattern], + slice: &Option, + suffix: &[Pattern]) + -> Result { + let data = match *ctor { + ConstantValue(ConstVal::ByteStr(ref data)) => data, + _ => bug!() + }; + + let pat_len = prefix.len() + suffix.len(); + if data.len() < pat_len || (slice.is_none() && data.len() > pat_len) { + return Ok(false); + } + + for (ch, pat) in + data[..prefix.len()].iter().zip(prefix).chain( + data[data.len()-suffix.len()..].iter().zip(suffix)) + { + match pat.kind { + box PatternKind::Constant { ref value } => match *value { + ConstVal::Integral(ConstInt::U8(u)) => { + if u != *ch { + return Ok(false); + } + }, + _ => span_bug!(pat.span, "bad const u8 {:?}", value) + }, + _ => {} + } + } + + Ok(true) +} + +fn range_covered_by_constructor(tcx: TyCtxt, span: Span, + ctor: &Constructor, + from: &ConstVal, to: &ConstVal) + -> Result { + let (c_from, c_to) = match *ctor { + ConstantValue(ref value) => (value, value), + ConstantRange(ref from, ref to) => (from, to), + Single => return Ok(true), + _ => bug!() + }; + let cmp_from = compare_const_vals(tcx, span, c_from, from)?; + let cmp_to = compare_const_vals(tcx, span, c_to, to)?; + Ok(cmp_from != Ordering::Less && cmp_to != Ordering::Greater) +} + +fn patterns_for_variant<'a, 'tcx>( + cx: &mut MatchCheckCtxt<'a, 'tcx>, + subpatterns: &'a [FieldPattern<'tcx>], + arity: usize) + -> Vec<&'a Pattern<'tcx>> +{ + let mut result = vec![cx.wild_pattern; arity]; + + for subpat in subpatterns { + result[subpat.field.index()] = &subpat.pattern; + } + + debug!("patterns_for_variant({:?}, {:?}) = {:?}", subpatterns, arity, result); + result +} + +/// This is the main specialization step. It expands the first pattern in the given row +/// into `arity` patterns based on the constructor. For most patterns, the step is trivial, +/// for instance tuple patterns are flattened and box patterns expand into their inner pattern. +/// +/// OTOH, slice patterns with a subslice pattern (..tail) can be expanded into multiple +/// different patterns. +/// Structure patterns with a partial wild pattern (Foo { a: 42, .. }) have their missing +/// fields filled with wild patterns. +fn specialize<'a, 'tcx>( + cx: &mut MatchCheckCtxt<'a, 'tcx>, + r: &[&'a Pattern<'tcx>], + constructor: &Constructor, col: usize, arity: usize) + -> Option>> +{ + let pat = &r[col]; + + let head: Option> = match *pat.kind { + PatternKind::Binding { .. } | PatternKind::Wild => + Some(vec![cx.wild_pattern; arity]), + + PatternKind::Variant { adt_def, variant_index, ref subpatterns } => { + let ref variant = adt_def.variants[variant_index]; + if *constructor == Variant(variant.did) { + Some(patterns_for_variant(cx, subpatterns, arity)) + } else { + None + } + } + + PatternKind::Leaf { ref subpatterns } => Some(patterns_for_variant(cx, subpatterns, arity)), + PatternKind::Deref { ref subpattern } => Some(vec![subpattern]), + + PatternKind::Constant { ref value } => { + match *constructor { + Slice(..) => match *value { + ConstVal::ByteStr(ref data) => { + if arity == data.len() { + Some(cx.lower_byte_str_pattern(pat)) + } else { + None + } + } + _ => span_bug!(pat.span, + "unexpected const-val {:?} with ctor {:?}", value, constructor) + }, + _ => { + match range_covered_by_constructor( + cx.tcx, pat.span, constructor, value, value + ) { + Ok(true) => Some(vec![]), + Ok(false) => None, + Err(ErrorReported) => None, + } + } + } + } + + PatternKind::Range { ref lo, ref hi } => { + match range_covered_by_constructor( + cx.tcx, pat.span, constructor, lo, hi + ) { + Ok(true) => Some(vec![]), + Ok(false) => None, + Err(ErrorReported) => None, + } + } + + PatternKind::Array { ref prefix, ref slice, ref suffix } | + PatternKind::Slice { ref prefix, ref slice, ref suffix } => { + match *constructor { + Slice(..) => { + let pat_len = prefix.len() + suffix.len(); + if let Some(slice_count) = arity.checked_sub(pat_len) { + if slice_count == 0 || slice.is_some() { + Some( + prefix.iter().chain( + repeat(cx.wild_pattern).take(slice_count).chain( + suffix.iter() + )).collect()) + } else { + None + } + } else { + None + } + } + ConstantValue(..) => { + match slice_pat_covered_by_constructor( + cx.tcx, pat.span, constructor, prefix, slice, suffix + ) { + Ok(true) => Some(vec![]), + Ok(false) => None, + Err(ErrorReported) => None + } + } + _ => span_bug!(pat.span, + "unexpected ctor {:?} for slice pat", constructor) + } + } + }; + debug!("specialize({:?}, {:?}) = {:?}", r[col], arity, head); + + head.map(|mut head| { + head.extend_from_slice(&r[..col]); + head.extend_from_slice(&r[col + 1..]); + head + }) +} diff --git a/src/librustc_const_eval/check_match.rs b/src/librustc_const_eval/check_match.rs new file mode 100644 index 0000000000000..786b59e818da2 --- /dev/null +++ b/src/librustc_const_eval/check_match.rs @@ -0,0 +1,592 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use _match::{MatchCheckCtxt, Matrix, expand_pattern, is_useful}; +use _match::{DUMMY_WILD_PAT}; +use _match::Usefulness::*; +use _match::WitnessPreference::*; + +use pattern::{Pattern, PatternContext, PatternError}; + +use eval::report_const_eval_err; + +use rustc::dep_graph::DepNode; + +use rustc::middle::expr_use_visitor::{ConsumeMode, Delegate, ExprUseVisitor}; +use rustc::middle::expr_use_visitor::{LoanCause, MutateMode}; +use rustc::middle::expr_use_visitor as euv; +use rustc::middle::mem_categorization::{cmt}; +use rustc::session::Session; +use rustc::traits::Reveal; +use rustc::ty::{self, TyCtxt}; +use rustc_errors::DiagnosticBuilder; + +use rustc::hir::def::*; +use rustc::hir::intravisit::{self, Visitor, FnKind, NestedVisitorMap}; +use rustc::hir::print::pat_to_string; +use rustc::hir::{self, Pat, PatKind}; + +use rustc_back::slice; + +use syntax::ast; +use syntax::ptr::P; +use syntax_pos::Span; + +struct OuterVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx> } + +impl<'a, 'tcx> Visitor<'tcx> for OuterVisitor<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::None + } + + fn visit_expr(&mut self, _expr: &'tcx hir::Expr) { + return // const, static and N in [T; N] - shouldn't contain anything + } + + fn visit_trait_item(&mut self, item: &'tcx hir::TraitItem) { + if let hir::ConstTraitItem(..) = item.node { + return // nothing worth match checking in a constant + } else { + intravisit::walk_trait_item(self, item); + } + } + + fn visit_impl_item(&mut self, item: &'tcx hir::ImplItem) { + if let hir::ImplItemKind::Const(..) = item.node { + return // nothing worth match checking in a constant + } else { + intravisit::walk_impl_item(self, item); + } + } + + fn visit_fn(&mut self, fk: FnKind<'tcx>, fd: &'tcx hir::FnDecl, + b: hir::ExprId, s: Span, id: ast::NodeId) { + if let FnKind::Closure(..) = fk { + span_bug!(s, "check_match: closure outside of function") + } + + MatchVisitor { + tcx: self.tcx, + param_env: &ty::ParameterEnvironment::for_item(self.tcx, id) + }.visit_fn(fk, fd, b, s, id); + } +} + +pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { + tcx.visit_all_item_likes_in_krate(DepNode::MatchCheck, + &mut OuterVisitor { tcx: tcx }.as_deep_visitor()); + tcx.sess.abort_if_errors(); +} + +fn create_e0004<'a>(sess: &'a Session, sp: Span, error_message: String) -> DiagnosticBuilder<'a> { + struct_span_err!(sess, sp, E0004, "{}", &error_message) +} + +struct MatchVisitor<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: &'a ty::ParameterEnvironment<'tcx> +} + +impl<'a, 'tcx> Visitor<'tcx> for MatchVisitor<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.tcx.map) + } + + fn visit_expr(&mut self, ex: &'tcx hir::Expr) { + intravisit::walk_expr(self, ex); + + match ex.node { + hir::ExprMatch(ref scrut, ref arms, source) => { + self.check_match(scrut, arms, source, ex.span); + } + _ => {} + } + } + + fn visit_local(&mut self, loc: &'tcx hir::Local) { + intravisit::walk_local(self, loc); + + self.check_irrefutable(&loc.pat, false); + + // Check legality of move bindings and `@` patterns. + self.check_patterns(false, slice::ref_slice(&loc.pat)); + } + + fn visit_fn(&mut self, fk: FnKind<'tcx>, fd: &'tcx hir::FnDecl, + b: hir::ExprId, s: Span, n: ast::NodeId) { + intravisit::walk_fn(self, fk, fd, b, s, n); + + for input in &fd.inputs { + self.check_irrefutable(&input.pat, true); + self.check_patterns(false, slice::ref_slice(&input.pat)); + } + } +} + +impl<'a, 'tcx> MatchVisitor<'a, 'tcx> { + fn check_patterns(&self, has_guard: bool, pats: &[P]) { + check_legality_of_move_bindings(self, has_guard, pats); + for pat in pats { + check_legality_of_bindings_in_at_patterns(self, pat); + } + } + + fn report_inlining_errors(&self, patcx: PatternContext, pat_span: Span) { + for error in patcx.errors { + match error { + PatternError::BadConstInPattern(span, def_id) => { + self.tcx.sess.span_err( + span, + &format!("constants of the type `{}` \ + cannot be used in patterns", + self.tcx.item_path_str(def_id))); + } + PatternError::StaticInPattern(span) => { + span_err!(self.tcx.sess, span, E0158, + "statics cannot be referenced in patterns"); + } + PatternError::ConstEval(err) => { + report_const_eval_err(self.tcx, &err, pat_span, "pattern").emit(); + } + } + } + } + + fn check_match( + &self, + scrut: &hir::Expr, + arms: &[hir::Arm], + source: hir::MatchSource, + span: Span) + { + for arm in arms { + // First, check legality of move bindings. + self.check_patterns(arm.guard.is_some(), &arm.pats); + + // Second, if there is a guard on each arm, make sure it isn't + // assigning or borrowing anything mutably. + if let Some(ref guard) = arm.guard { + check_for_mutation_in_guard(self, &guard); + } + + // Third, perform some lints. + for pat in &arm.pats { + check_for_bindings_named_the_same_as_variants(self, pat); + } + } + + MatchCheckCtxt::create_and_enter(self.tcx, |ref mut cx| { + let mut have_errors = false; + + let inlined_arms : Vec<(Vec<_>, _)> = arms.iter().map(|arm| ( + arm.pats.iter().map(|pat| { + let mut patcx = PatternContext::new(self.tcx); + let pattern = expand_pattern(cx, patcx.lower_pattern(&pat)); + if !patcx.errors.is_empty() { + self.report_inlining_errors(patcx, pat.span); + have_errors = true; + } + (pattern, &**pat) + }).collect(), + arm.guard.as_ref().map(|e| &**e) + )).collect(); + + // Bail out early if inlining failed. + if have_errors { + return; + } + + // Fourth, check for unreachable arms. + check_arms(cx, &inlined_arms, source); + + // Finally, check if the whole match expression is exhaustive. + // Check for empty enum, because is_useful only works on inhabited types. + let pat_ty = self.tcx.tables().node_id_to_type(scrut.id); + if inlined_arms.is_empty() { + if !pat_ty.is_uninhabited(Some(scrut.id), self.tcx) { + // We know the type is inhabited, so this must be wrong + let mut err = create_e0004(self.tcx.sess, span, + format!("non-exhaustive patterns: type {} \ + is non-empty", + pat_ty)); + span_help!(&mut err, span, + "Please ensure that all possible cases are being handled; \ + possibly adding wildcards or more match arms."); + err.emit(); + } + // If the type *is* uninhabited, it's vacuously exhaustive + return; + } + + let matrix: Matrix = inlined_arms + .iter() + .filter(|&&(_, guard)| guard.is_none()) + .flat_map(|arm| &arm.0) + .map(|pat| vec![pat.0]) + .collect(); + check_exhaustive(cx, scrut.span, &matrix, source); + }) + } + + fn check_irrefutable(&self, pat: &Pat, is_fn_arg: bool) { + let origin = if is_fn_arg { + "function argument" + } else { + "local binding" + }; + + MatchCheckCtxt::create_and_enter(self.tcx, |ref mut cx| { + let mut patcx = PatternContext::new(self.tcx); + let pats : Matrix = vec![vec![ + expand_pattern(cx, patcx.lower_pattern(pat)) + ]].into_iter().collect(); + + let witness = match is_useful(cx, &pats, &[cx.wild_pattern], ConstructWitness) { + UsefulWithWitness(witness) => witness, + NotUseful => return, + Useful => bug!() + }; + + let pattern_string = pat_to_string(witness[0].single_pattern()); + let mut diag = struct_span_err!( + self.tcx.sess, pat.span, E0005, + "refutable pattern in {}: `{}` not covered", + origin, pattern_string + ); + diag.span_label(pat.span, &format!("pattern `{}` not covered", pattern_string)); + diag.emit(); + }); + } +} + +fn check_for_bindings_named_the_same_as_variants(cx: &MatchVisitor, pat: &Pat) { + pat.walk(|p| { + if let PatKind::Binding(hir::BindByValue(hir::MutImmutable), _, name, None) = p.node { + let pat_ty = cx.tcx.tables().pat_ty(p); + if let ty::TyAdt(edef, _) = pat_ty.sty { + if edef.is_enum() && edef.variants.iter().any(|variant| { + variant.name == name.node && variant.ctor_kind == CtorKind::Const + }) { + let ty_path = cx.tcx.item_path_str(edef.did); + let mut err = struct_span_warn!(cx.tcx.sess, p.span, E0170, + "pattern binding `{}` is named the same as one \ + of the variants of the type `{}`", + name.node, ty_path); + help!(err, + "if you meant to match on a variant, \ + consider making the path in the pattern qualified: `{}::{}`", + ty_path, name.node); + err.emit(); + } + } + } + true + }); +} + +/// Checks for common cases of "catchall" patterns that may not be intended as such. +fn pat_is_catchall(pat: &Pat) -> bool { + match pat.node { + PatKind::Binding(.., None) => true, + PatKind::Binding(.., Some(ref s)) => pat_is_catchall(s), + PatKind::Ref(ref s, _) => pat_is_catchall(s), + PatKind::Tuple(ref v, _) => v.iter().all(|p| { + pat_is_catchall(&p) + }), + _ => false + } +} + +// Check for unreachable patterns +fn check_arms<'a, 'tcx>(cx: &mut MatchCheckCtxt<'a, 'tcx>, + arms: &[(Vec<(&'a Pattern<'tcx>, &hir::Pat)>, Option<&hir::Expr>)], + source: hir::MatchSource) +{ + let mut seen = Matrix::empty(); + let mut catchall = None; + let mut printed_if_let_err = false; + for &(ref pats, guard) in arms { + for &(pat, hir_pat) in pats { + let v = vec![pat]; + + match is_useful(cx, &seen, &v[..], LeaveOutWitness) { + NotUseful => { + match source { + hir::MatchSource::IfLetDesugar { .. } => { + if printed_if_let_err { + // we already printed an irrefutable if-let pattern error. + // We don't want two, that's just confusing. + } else { + // find the first arm pattern so we can use its span + let &(ref first_arm_pats, _) = &arms[0]; + let first_pat = &first_arm_pats[0]; + let span = first_pat.0.span; + struct_span_err!(cx.tcx.sess, span, E0162, + "irrefutable if-let pattern") + .span_label(span, &format!("irrefutable pattern")) + .emit(); + printed_if_let_err = true; + } + }, + + hir::MatchSource::WhileLetDesugar => { + // find the first arm pattern so we can use its span + let &(ref first_arm_pats, _) = &arms[0]; + let first_pat = &first_arm_pats[0]; + let span = first_pat.0.span; + struct_span_err!(cx.tcx.sess, span, E0165, + "irrefutable while-let pattern") + .span_label(span, &format!("irrefutable pattern")) + .emit(); + }, + + hir::MatchSource::ForLoopDesugar => { + // this is a bug, because on `match iter.next()` we cover + // `Some()` and `None`. It's impossible to have an unreachable + // pattern + // (see libsyntax/ext/expand.rs for the full expansion of a for loop) + span_bug!(pat.span, "unreachable for-loop pattern") + }, + + hir::MatchSource::Normal => { + let mut err = struct_span_err!(cx.tcx.sess, pat.span, E0001, + "unreachable pattern"); + err.span_label(pat.span, &"this is an unreachable pattern"); + // if we had a catchall pattern, hint at that + if let Some(catchall) = catchall { + err.span_note(catchall, "this pattern matches any value"); + } + err.emit(); + }, + + hir::MatchSource::TryDesugar => { + span_bug!(pat.span, "unreachable try pattern") + }, + } + } + Useful => (), + UsefulWithWitness(_) => bug!() + } + if guard.is_none() { + seen.push(v); + if catchall.is_none() && pat_is_catchall(hir_pat) { + catchall = Some(pat.span); + } + } + } + } +} + +fn check_exhaustive<'a, 'tcx>(cx: &mut MatchCheckCtxt<'a, 'tcx>, + sp: Span, + matrix: &Matrix<'a, 'tcx>, + source: hir::MatchSource) { + match is_useful(cx, matrix, &[cx.wild_pattern], ConstructWitness) { + UsefulWithWitness(pats) => { + let witnesses = if pats.is_empty() { + vec![DUMMY_WILD_PAT] + } else { + pats.iter().map(|w| w.single_pattern()).collect() + }; + match source { + hir::MatchSource::ForLoopDesugar => { + // `witnesses[0]` has the form `Some()`, peel off the `Some` + let witness = match witnesses[0].node { + PatKind::TupleStruct(_, ref pats, _) => match &pats[..] { + &[ref pat] => &**pat, + _ => bug!(), + }, + _ => bug!(), + }; + let pattern_string = pat_to_string(witness); + struct_span_err!(cx.tcx.sess, sp, E0297, + "refutable pattern in `for` loop binding: \ + `{}` not covered", + pattern_string) + .span_label(sp, &format!("pattern `{}` not covered", pattern_string)) + .emit(); + }, + _ => { + let pattern_strings: Vec<_> = witnesses.iter().map(|w| { + pat_to_string(w) + }).collect(); + const LIMIT: usize = 3; + let joined_patterns = match pattern_strings.len() { + 0 => bug!(), + 1 => format!("`{}`", pattern_strings[0]), + 2...LIMIT => { + let (tail, head) = pattern_strings.split_last().unwrap(); + format!("`{}`", head.join("`, `") + "` and `" + tail) + }, + _ => { + let (head, tail) = pattern_strings.split_at(LIMIT); + format!("`{}` and {} more", head.join("`, `"), tail.len()) + } + }; + + let label_text = match pattern_strings.len(){ + 1 => format!("pattern {} not covered", joined_patterns), + _ => format!("patterns {} not covered", joined_patterns) + }; + create_e0004(cx.tcx.sess, sp, + format!("non-exhaustive patterns: {} not covered", + joined_patterns)) + .span_label(sp, &label_text) + .emit(); + }, + } + } + NotUseful => { + // This is good, wildcard pattern isn't reachable + }, + _ => bug!() + } +} + +// Legality of move bindings checking +fn check_legality_of_move_bindings(cx: &MatchVisitor, + has_guard: bool, + pats: &[P]) { + let mut by_ref_span = None; + for pat in pats { + pat.each_binding(|bm, _, span, _path| { + if let hir::BindByRef(..) = bm { + by_ref_span = Some(span); + } + }) + } + + let check_move = |p: &Pat, sub: Option<&Pat>| { + // check legality of moving out of the enum + + // x @ Foo(..) is legal, but x @ Foo(y) isn't. + if sub.map_or(false, |p| p.contains_bindings()) { + struct_span_err!(cx.tcx.sess, p.span, E0007, + "cannot bind by-move with sub-bindings") + .span_label(p.span, &format!("binds an already bound by-move value by moving it")) + .emit(); + } else if has_guard { + struct_span_err!(cx.tcx.sess, p.span, E0008, + "cannot bind by-move into a pattern guard") + .span_label(p.span, &format!("moves value into pattern guard")) + .emit(); + } else if by_ref_span.is_some() { + struct_span_err!(cx.tcx.sess, p.span, E0009, + "cannot bind by-move and by-ref in the same pattern") + .span_label(p.span, &format!("by-move pattern here")) + .span_label(by_ref_span.unwrap(), &format!("both by-ref and by-move used")) + .emit(); + } + }; + + for pat in pats { + pat.walk(|p| { + if let PatKind::Binding(hir::BindByValue(..), _, _, ref sub) = p.node { + let pat_ty = cx.tcx.tables().node_id_to_type(p.id); + if pat_ty.moves_by_default(cx.tcx, cx.param_env, pat.span) { + check_move(p, sub.as_ref().map(|p| &**p)); + } + } + true + }); + } +} + +/// Ensures that a pattern guard doesn't borrow by mutable reference or +/// assign. +/// +/// FIXME: this should be done by borrowck. +fn check_for_mutation_in_guard(cx: &MatchVisitor, guard: &hir::Expr) { + cx.tcx.infer_ctxt(None, Some(cx.param_env.clone()), + Reveal::NotSpecializable).enter(|infcx| { + let mut checker = MutationChecker { + cx: cx, + }; + let mut visitor = ExprUseVisitor::new(&mut checker, &infcx); + visitor.walk_expr(guard); + }); +} + +struct MutationChecker<'a, 'gcx: 'a> { + cx: &'a MatchVisitor<'a, 'gcx>, +} + +impl<'a, 'gcx, 'tcx> Delegate<'tcx> for MutationChecker<'a, 'gcx> { + fn matched_pat(&mut self, _: &Pat, _: cmt, _: euv::MatchMode) {} + fn consume(&mut self, _: ast::NodeId, _: Span, _: cmt, _: ConsumeMode) {} + fn consume_pat(&mut self, _: &Pat, _: cmt, _: ConsumeMode) {} + fn borrow(&mut self, + _: ast::NodeId, + span: Span, + _: cmt, + _: &'tcx ty::Region, + kind:ty:: BorrowKind, + _: LoanCause) { + match kind { + ty::MutBorrow => { + struct_span_err!(self.cx.tcx.sess, span, E0301, + "cannot mutably borrow in a pattern guard") + .span_label(span, &format!("borrowed mutably in pattern guard")) + .emit(); + } + ty::ImmBorrow | ty::UniqueImmBorrow => {} + } + } + fn decl_without_init(&mut self, _: ast::NodeId, _: Span) {} + fn mutate(&mut self, _: ast::NodeId, span: Span, _: cmt, mode: MutateMode) { + match mode { + MutateMode::JustWrite | MutateMode::WriteAndRead => { + struct_span_err!(self.cx.tcx.sess, span, E0302, "cannot assign in a pattern guard") + .span_label(span, &format!("assignment in pattern guard")) + .emit(); + } + MutateMode::Init => {} + } + } +} + +/// Forbids bindings in `@` patterns. This is necessary for memory safety, +/// because of the way rvalues are handled in the borrow check. (See issue +/// #14587.) +fn check_legality_of_bindings_in_at_patterns(cx: &MatchVisitor, pat: &Pat) { + AtBindingPatternVisitor { cx: cx, bindings_allowed: true }.visit_pat(pat); +} + +struct AtBindingPatternVisitor<'a, 'b:'a, 'tcx:'b> { + cx: &'a MatchVisitor<'b, 'tcx>, + bindings_allowed: bool +} + +impl<'a, 'b, 'tcx, 'v> Visitor<'v> for AtBindingPatternVisitor<'a, 'b, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> { + NestedVisitorMap::None + } + + fn visit_pat(&mut self, pat: &Pat) { + match pat.node { + PatKind::Binding(.., ref subpat) => { + if !self.bindings_allowed { + struct_span_err!(self.cx.tcx.sess, pat.span, E0303, + "pattern bindings are not allowed after an `@`") + .span_label(pat.span, &format!("not allowed after `@`")) + .emit(); + } + + if subpat.is_some() { + let bindings_were_allowed = self.bindings_allowed; + self.bindings_allowed = false; + intravisit::walk_pat(self, pat); + self.bindings_allowed = bindings_were_allowed; + } + } + _ => intravisit::walk_pat(self, pat), + } + } +} diff --git a/src/librustc_const_eval/diagnostics.rs b/src/librustc_const_eval/diagnostics.rs new file mode 100644 index 0000000000000..83b0d9dec6d90 --- /dev/null +++ b/src/librustc_const_eval/diagnostics.rs @@ -0,0 +1,600 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_snake_case)] + +// Error messages for EXXXX errors. +// Each message should start and end with a new line, and be wrapped to 80 characters. +// In vim you can `:set tw=80` and use `gq` to wrap paragraphs. Use `:set tw=0` to disable. +register_long_diagnostics! { + +E0001: r##" +This error suggests that the expression arm corresponding to the noted pattern +will never be reached as for all possible values of the expression being +matched, one of the preceding patterns will match. + +This means that perhaps some of the preceding patterns are too general, this +one is too specific or the ordering is incorrect. + +For example, the following `match` block has too many arms: + +```compile_fail,E0001 +match Some(0) { + Some(bar) => {/* ... */} + None => {/* ... */} + _ => {/* ... */} // All possible cases have already been handled +} +``` + +`match` blocks have their patterns matched in order, so, for example, putting +a wildcard arm above a more specific arm will make the latter arm irrelevant. + +Ensure the ordering of the match arm is correct and remove any superfluous +arms. +"##, + +E0002: r##" +## Note: this error code is no longer emitted by the compiler. + +This error indicates that an empty match expression is invalid because the type +it is matching on is non-empty (there exist values of this type). In safe code +it is impossible to create an instance of an empty type, so empty match +expressions are almost never desired. This error is typically fixed by adding +one or more cases to the match expression. + +An example of an empty type is `enum Empty { }`. So, the following will work: + +``` +enum Empty {} + +fn foo(x: Empty) { + match x { + // empty + } +} +``` + +However, this won't: + +```compile_fail +fn foo(x: Option) { + match x { + // empty + } +} +``` +"##, + +E0003: r##" +## Note: this error code is no longer emitted by the compiler. + +Not-a-Number (NaN) values cannot be compared for equality and hence can never +match the input to a match expression. So, the following will not compile: + +```compile_fail +const NAN: f32 = 0.0 / 0.0; + +let number = 0.1f32; + +match number { + NAN => { /* ... */ }, + _ => {} +} +``` + +To match against NaN values, you should instead use the `is_nan()` method in a +guard, like so: + +``` +let number = 0.1f32; + +match number { + x if x.is_nan() => { /* ... */ } + _ => {} +} +``` +"##, + +E0004: r##" +This error indicates that the compiler cannot guarantee a matching pattern for +one or more possible inputs to a match expression. Guaranteed matches are +required in order to assign values to match expressions, or alternatively, +determine the flow of execution. Erroneous code example: + +```compile_fail,E0004 +enum Terminator { + HastaLaVistaBaby, + TalkToMyHand, +} + +let x = Terminator::HastaLaVistaBaby; + +match x { // error: non-exhaustive patterns: `HastaLaVistaBaby` not covered + Terminator::TalkToMyHand => {} +} +``` + +If you encounter this error you must alter your patterns so that every possible +value of the input type is matched. For types with a small number of variants +(like enums) you should probably cover all cases explicitly. Alternatively, the +underscore `_` wildcard pattern can be added after all other patterns to match +"anything else". Example: + +``` +enum Terminator { + HastaLaVistaBaby, + TalkToMyHand, +} + +let x = Terminator::HastaLaVistaBaby; + +match x { + Terminator::TalkToMyHand => {} + Terminator::HastaLaVistaBaby => {} +} + +// or: + +match x { + Terminator::TalkToMyHand => {} + _ => {} +} +``` +"##, + +E0005: r##" +Patterns used to bind names must be irrefutable, that is, they must guarantee +that a name will be extracted in all cases. Erroneous code example: + +```compile_fail,E0005 +let x = Some(1); +let Some(y) = x; +// error: refutable pattern in local binding: `None` not covered +``` + +If you encounter this error you probably need to use a `match` or `if let` to +deal with the possibility of failure. Example: + +``` +let x = Some(1); + +match x { + Some(y) => { + // do something + }, + None => {} +} + +// or: + +if let Some(y) = x { + // do something +} +``` +"##, + +E0007: r##" +This error indicates that the bindings in a match arm would require a value to +be moved into more than one location, thus violating unique ownership. Code +like the following is invalid as it requires the entire `Option` to be +moved into a variable called `op_string` while simultaneously requiring the +inner `String` to be moved into a variable called `s`. + +```compile_fail,E0007 +let x = Some("s".to_string()); + +match x { + op_string @ Some(s) => {}, // error: cannot bind by-move with sub-bindings + None => {}, +} +``` + +See also the error E0303. +"##, + +E0008: r##" +Names bound in match arms retain their type in pattern guards. As such, if a +name is bound by move in a pattern, it should also be moved to wherever it is +referenced in the pattern guard code. Doing so however would prevent the name +from being available in the body of the match arm. Consider the following: + +```compile_fail,E0008 +match Some("hi".to_string()) { + Some(s) if s.len() == 0 => {}, // use s. + _ => {}, +} +``` + +The variable `s` has type `String`, and its use in the guard is as a variable of +type `String`. The guard code effectively executes in a separate scope to the +body of the arm, so the value would be moved into this anonymous scope and +therefore becomes unavailable in the body of the arm. + +The problem above can be solved by using the `ref` keyword. + +``` +match Some("hi".to_string()) { + Some(ref s) if s.len() == 0 => {}, + _ => {}, +} +``` + +Though this example seems innocuous and easy to solve, the problem becomes clear +when it encounters functions which consume the value: + +```compile_fail,E0008 +struct A{} + +impl A { + fn consume(self) -> usize { + 0 + } +} + +fn main() { + let a = Some(A{}); + match a { + Some(y) if y.consume() > 0 => {} + _ => {} + } +} +``` + +In this situation, even the `ref` keyword cannot solve it, since borrowed +content cannot be moved. This problem cannot be solved generally. If the value +can be cloned, here is a not-so-specific solution: + +``` +#[derive(Clone)] +struct A{} + +impl A { + fn consume(self) -> usize { + 0 + } +} + +fn main() { + let a = Some(A{}); + match a{ + Some(ref y) if y.clone().consume() > 0 => {} + _ => {} + } +} +``` + +If the value will be consumed in the pattern guard, using its clone will not +move its ownership, so the code works. +"##, + +E0009: r##" +In a pattern, all values that don't implement the `Copy` trait have to be bound +the same way. The goal here is to avoid binding simultaneously by-move and +by-ref. + +This limitation may be removed in a future version of Rust. + +Erroneous code example: + +```compile_fail,E0009 +struct X { x: (), } + +let x = Some((X { x: () }, X { x: () })); +match x { + Some((y, ref z)) => {}, // error: cannot bind by-move and by-ref in the + // same pattern + None => panic!() +} +``` + +You have two solutions: + +Solution #1: Bind the pattern's values the same way. + +``` +struct X { x: (), } + +let x = Some((X { x: () }, X { x: () })); +match x { + Some((ref y, ref z)) => {}, + // or Some((y, z)) => {} + None => panic!() +} +``` + +Solution #2: Implement the `Copy` trait for the `X` structure. + +However, please keep in mind that the first solution should be preferred. + +``` +#[derive(Clone, Copy)] +struct X { x: (), } + +let x = Some((X { x: () }, X { x: () })); +match x { + Some((y, ref z)) => {}, + None => panic!() +} +``` +"##, + +E0158: r##" +`const` and `static` mean different things. A `const` is a compile-time +constant, an alias for a literal value. This property means you can match it +directly within a pattern. + +The `static` keyword, on the other hand, guarantees a fixed location in memory. +This does not always mean that the value is constant. For example, a global +mutex can be declared `static` as well. + +If you want to match against a `static`, consider using a guard instead: + +``` +static FORTY_TWO: i32 = 42; + +match Some(42) { + Some(x) if x == FORTY_TWO => {} + _ => {} +} +``` +"##, + +E0162: r##" +An if-let pattern attempts to match the pattern, and enters the body if the +match was successful. If the match is irrefutable (when it cannot fail to +match), use a regular `let`-binding instead. For instance: + +```compile_fail,E0162 +struct Irrefutable(i32); +let irr = Irrefutable(0); + +// This fails to compile because the match is irrefutable. +if let Irrefutable(x) = irr { + // This body will always be executed. + // ... +} +``` + +Try this instead: + +``` +struct Irrefutable(i32); +let irr = Irrefutable(0); + +let Irrefutable(x) = irr; +println!("{}", x); +``` +"##, + +E0165: r##" +A while-let pattern attempts to match the pattern, and enters the body if the +match was successful. If the match is irrefutable (when it cannot fail to +match), use a regular `let`-binding inside a `loop` instead. For instance: + +```compile_fail,E0165 +struct Irrefutable(i32); +let irr = Irrefutable(0); + +// This fails to compile because the match is irrefutable. +while let Irrefutable(x) = irr { + // ... +} +``` + +Try this instead: + +```no_run +struct Irrefutable(i32); +let irr = Irrefutable(0); + +loop { + let Irrefutable(x) = irr; + // ... +} +``` +"##, + +E0170: r##" +Enum variants are qualified by default. For example, given this type: + +``` +enum Method { + GET, + POST, +} +``` + +You would match it using: + +``` +enum Method { + GET, + POST, +} + +let m = Method::GET; + +match m { + Method::GET => {}, + Method::POST => {}, +} +``` + +If you don't qualify the names, the code will bind new variables named "GET" and +"POST" instead. This behavior is likely not what you want, so `rustc` warns when +that happens. + +Qualified names are good practice, and most code works well with them. But if +you prefer them unqualified, you can import the variants into scope: + +```ignore +use Method::*; +enum Method { GET, POST } +``` + +If you want others to be able to import variants from your module directly, use +`pub use`: + +```ignore +pub use Method::*; +enum Method { GET, POST } +``` +"##, + + +E0297: r##" +Patterns used to bind names must be irrefutable. That is, they must guarantee +that a name will be extracted in all cases. Instead of pattern matching the +loop variable, consider using a `match` or `if let` inside the loop body. For +instance: + +```compile_fail,E0297 +let xs : Vec> = vec![Some(1), None]; + +// This fails because `None` is not covered. +for Some(x) in xs { + // ... +} +``` + +Match inside the loop instead: + +``` +let xs : Vec> = vec![Some(1), None]; + +for item in xs { + match item { + Some(x) => {}, + None => {}, + } +} +``` + +Or use `if let`: + +``` +let xs : Vec> = vec![Some(1), None]; + +for item in xs { + if let Some(x) = item { + // ... + } +} +``` +"##, + +E0301: r##" +Mutable borrows are not allowed in pattern guards, because matching cannot have +side effects. Side effects could alter the matched object or the environment +on which the match depends in such a way, that the match would not be +exhaustive. For instance, the following would not match any arm if mutable +borrows were allowed: + +```compile_fail,E0301 +match Some(()) { + None => { }, + option if option.take().is_none() => { + /* impossible, option is `Some` */ + }, + Some(_) => { } // When the previous match failed, the option became `None`. +} +``` +"##, + +E0302: r##" +Assignments are not allowed in pattern guards, because matching cannot have +side effects. Side effects could alter the matched object or the environment +on which the match depends in such a way, that the match would not be +exhaustive. For instance, the following would not match any arm if assignments +were allowed: + +```compile_fail,E0302 +match Some(()) { + None => { }, + option if { option = None; false } => { }, + Some(_) => { } // When the previous match failed, the option became `None`. +} +``` +"##, + +E0303: r##" +In certain cases it is possible for sub-bindings to violate memory safety. +Updates to the borrow checker in a future version of Rust may remove this +restriction, but for now patterns must be rewritten without sub-bindings. + +Before: + +```compile_fail,E0303 +match Some("hi".to_string()) { + ref op_string_ref @ Some(s) => {}, + None => {}, +} +``` + +After: + +``` +match Some("hi".to_string()) { + Some(ref s) => { + let op_string_ref = &Some(s); + // ... + }, + None => {}, +} +``` + +The `op_string_ref` binding has type `&Option<&String>` in both cases. + +See also https://github.com/rust-lang/rust/issues/14587 +"##, + +E0080: r##" +This error indicates that the compiler was unable to sensibly evaluate an +constant expression that had to be evaluated. Attempting to divide by 0 +or causing integer overflow are two ways to induce this error. For example: + +```compile_fail,E0080 +enum Enum { + X = (1 << 500), + Y = (1 / 0) +} +``` + +Ensure that the expressions given can be evaluated as the desired integer type. +See the FFI section of the Reference for more information about using a custom +integer type: + +https://doc.rust-lang.org/reference.html#ffi-attributes +"##, + + +E0306: r##" +In an array literal `[x; N]`, `N` is the number of elements in the array. This +must be an unsigned integer. Erroneous code example: + +```compile_fail,E0306 +let x = [0i32; true]; // error: expected positive integer for repeat count, + // found boolean +``` + +Working example: + +``` +let x = [0i32; 2]; +``` +"##, +} + + +register_diagnostics! { + E0298, // cannot compare constants +// E0299, // mismatched types between arms +// E0471, // constant evaluation error (in pattern) +} diff --git a/src/librustc_const_eval/eval.rs b/src/librustc_const_eval/eval.rs new file mode 100644 index 0000000000000..9fcab1239899f --- /dev/null +++ b/src/librustc_const_eval/eval.rs @@ -0,0 +1,1384 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//#![allow(non_camel_case_types)] + +use rustc::middle::const_val::ConstVal::*; +use rustc::middle::const_val::ConstVal; +use self::ErrKind::*; +use self::EvalHint::*; + +use rustc::hir::map as ast_map; +use rustc::hir::map::blocks::FnLikeNode; +use rustc::middle::cstore::InlinedItem; +use rustc::traits; +use rustc::hir::def::{Def, CtorKind}; +use rustc::hir::def_id::DefId; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::util::IntTypeExt; +use rustc::ty::subst::Substs; +use rustc::traits::Reveal; +use rustc::util::common::ErrorReported; +use rustc::util::nodemap::DefIdMap; +use rustc::lint; + +use graphviz::IntoCow; +use syntax::ast; +use rustc::hir::{Expr, PatKind}; +use rustc::hir; +use syntax::ptr::P; +use syntax::codemap; +use syntax::attr::IntType; +use syntax_pos::{self, Span}; + +use std::borrow::Cow; +use std::cmp::Ordering; + +use rustc_const_math::*; +use rustc_errors::DiagnosticBuilder; + +macro_rules! math { + ($e:expr, $op:expr) => { + match $op { + Ok(val) => val, + Err(e) => signal!($e, Math(e)), + } + } +} + +fn lookup_variant_by_id<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + variant_def: DefId) + -> Option<&'tcx Expr> { + fn variant_expr<'a>(variants: &'a [hir::Variant], id: ast::NodeId) + -> Option<&'a Expr> { + for variant in variants { + if variant.node.data.id() == id { + return variant.node.disr_expr.as_ref().map(|e| &**e); + } + } + None + } + + if let Some(variant_node_id) = tcx.map.as_local_node_id(variant_def) { + let enum_node_id = tcx.map.get_parent(variant_node_id); + match tcx.map.find(enum_node_id) { + None => None, + Some(ast_map::NodeItem(it)) => match it.node { + hir::ItemEnum(hir::EnumDef { ref variants }, _) => { + variant_expr(variants, variant_node_id) + } + _ => None + }, + Some(_) => None + } + } else { + None + } +} + +/// * `def_id` is the id of the constant. +/// * `substs` is the monomorphized substitutions for the expression. +/// +/// `substs` is optional and is used for associated constants. +/// This generally happens in late/trans const evaluation. +pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId, + substs: Option<&'tcx Substs<'tcx>>) + -> Option<(&'tcx Expr, Option>)> { + if let Some(node_id) = tcx.map.as_local_node_id(def_id) { + match tcx.map.find(node_id) { + None => None, + Some(ast_map::NodeItem(it)) => match it.node { + hir::ItemConst(ref ty, ref const_expr) => { + Some((&const_expr, tcx.ast_ty_to_prim_ty(ty))) + } + _ => None + }, + Some(ast_map::NodeTraitItem(ti)) => match ti.node { + hir::ConstTraitItem(ref ty, ref expr_option) => { + if let Some(substs) = substs { + // If we have a trait item and the substitutions for it, + // `resolve_trait_associated_const` will select an impl + // or the default. + let trait_id = tcx.map.get_parent(node_id); + let trait_id = tcx.map.local_def_id(trait_id); + let default_value = expr_option.as_ref() + .map(|expr| (&**expr, tcx.ast_ty_to_prim_ty(ty))); + resolve_trait_associated_const(tcx, def_id, default_value, trait_id, substs) + } else { + // Technically, without knowing anything about the + // expression that generates the obligation, we could + // still return the default if there is one. However, + // it's safer to return `None` than to return some value + // that may differ from what you would get from + // correctly selecting an impl. + None + } + } + _ => None + }, + Some(ast_map::NodeImplItem(ii)) => match ii.node { + hir::ImplItemKind::Const(ref ty, ref expr) => { + Some((&expr, tcx.ast_ty_to_prim_ty(ty))) + } + _ => None + }, + Some(_) => None + } + } else { + match tcx.extern_const_statics.borrow().get(&def_id) { + Some(&None) => return None, + Some(&Some((expr_id, ty))) => { + return Some((tcx.map.expect_expr(expr_id), ty)); + } + None => {} + } + let mut used_substs = false; + let expr_ty = match tcx.sess.cstore.maybe_get_item_ast(tcx, def_id) { + Some((&InlinedItem { body: ref const_expr, .. }, _)) => { + Some((&**const_expr, Some(tcx.sess.cstore.item_type(tcx, def_id)))) + } + _ => None + }; + let expr_ty = match tcx.sess.cstore.describe_def(def_id) { + Some(Def::AssociatedConst(_)) => { + let trait_id = tcx.sess.cstore.trait_of_item(def_id); + // As mentioned in the comments above for in-crate + // constants, we only try to find the expression for a + // trait-associated const if the caller gives us the + // substitutions for the reference to it. + if let Some(trait_id) = trait_id { + used_substs = true; + + if let Some(substs) = substs { + resolve_trait_associated_const(tcx, def_id, expr_ty, trait_id, substs) + } else { + None + } + } else { + expr_ty + } + }, + Some(Def::Const(..)) => expr_ty, + _ => None + }; + // If we used the substitutions, particularly to choose an impl + // of a trait-associated const, don't cache that, because the next + // lookup with the same def_id may yield a different result. + if !used_substs { + tcx.extern_const_statics + .borrow_mut() + .insert(def_id, expr_ty.map(|(e, t)| (e.id, t))); + } + expr_ty + } +} + +fn inline_const_fn_from_external_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId) + -> Option { + match tcx.extern_const_fns.borrow().get(&def_id) { + Some(&ast::DUMMY_NODE_ID) => return None, + Some(&fn_id) => return Some(fn_id), + None => {} + } + + if !tcx.sess.cstore.is_const_fn(def_id) { + tcx.extern_const_fns.borrow_mut().insert(def_id, ast::DUMMY_NODE_ID); + return None; + } + + let fn_id = tcx.sess.cstore.maybe_get_item_ast(tcx, def_id).map(|t| t.1); + tcx.extern_const_fns.borrow_mut().insert(def_id, + fn_id.unwrap_or(ast::DUMMY_NODE_ID)); + fn_id +} + +pub enum ConstFnNode<'tcx> { + Local(FnLikeNode<'tcx>), + Inlined(&'tcx InlinedItem) +} + +pub fn lookup_const_fn_by_id<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) + -> Option> +{ + let fn_id = if let Some(node_id) = tcx.map.as_local_node_id(def_id) { + node_id + } else { + if let Some(fn_id) = inline_const_fn_from_external_crate(tcx, def_id) { + if let ast_map::NodeInlinedItem(ii) = tcx.map.get(fn_id) { + return Some(ConstFnNode::Inlined(ii)); + } else { + bug!("Got const fn from external crate, but it's not inlined") + } + } else { + return None; + } + }; + + let fn_like = match FnLikeNode::from_node(tcx.map.get(fn_id)) { + Some(fn_like) => fn_like, + None => return None + }; + + if fn_like.constness() == hir::Constness::Const { + Some(ConstFnNode::Local(fn_like)) + } else { + None + } +} + +pub fn const_expr_to_pat<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + expr: &Expr, + pat_id: ast::NodeId, + span: Span) + -> Result, DefId> { + let pat_ty = tcx.tables().expr_ty(expr); + debug!("expr={:?} pat_ty={:?} pat_id={}", expr, pat_ty, pat_id); + match pat_ty.sty { + ty::TyFloat(_) => { + tcx.sess.add_lint( + lint::builtin::ILLEGAL_FLOATING_POINT_CONSTANT_PATTERN, + pat_id, + span, + format!("floating point constants cannot be used in patterns")); + } + ty::TyAdt(adt_def, _) if adt_def.is_union() => { + // Matching on union fields is unsafe, we can't hide it in constants + tcx.sess.span_err(span, "cannot use unions in constant patterns"); + } + ty::TyAdt(adt_def, _) => { + if !tcx.has_attr(adt_def.did, "structural_match") { + tcx.sess.add_lint( + lint::builtin::ILLEGAL_STRUCT_OR_ENUM_CONSTANT_PATTERN, + pat_id, + span, + format!("to use a constant of type `{}` \ + in a pattern, \ + `{}` must be annotated with `#[derive(PartialEq, Eq)]`", + tcx.item_path_str(adt_def.did), + tcx.item_path_str(adt_def.did))); + } + } + _ => { } + } + let pat = match expr.node { + hir::ExprTup(ref exprs) => + PatKind::Tuple(exprs.iter() + .map(|expr| const_expr_to_pat(tcx, &expr, pat_id, span)) + .collect::>()?, None), + + hir::ExprCall(ref callee, ref args) => { + let qpath = match callee.node { + hir::ExprPath(ref qpath) => qpath, + _ => bug!() + }; + let def = tcx.tables().qpath_def(qpath, callee.id); + let ctor_path = if let hir::QPath::Resolved(_, ref path) = *qpath { + match def { + Def::StructCtor(_, CtorKind::Fn) | + Def::VariantCtor(_, CtorKind::Fn) => { + Some(path.clone()) + } + _ => None + } + } else { + None + }; + match (def, ctor_path) { + (Def::Fn(..), None) | (Def::Method(..), None) => { + PatKind::Lit(P(expr.clone())) + } + (_, Some(ctor_path)) => { + let pats = args.iter() + .map(|expr| const_expr_to_pat(tcx, expr, pat_id, span)) + .collect::>()?; + PatKind::TupleStruct(hir::QPath::Resolved(None, ctor_path), pats, None) + } + _ => bug!() + } + } + + hir::ExprStruct(ref qpath, ref fields, None) => { + let field_pats = + fields.iter() + .map(|field| Ok(codemap::Spanned { + span: syntax_pos::DUMMY_SP, + node: hir::FieldPat { + name: field.name.node, + pat: const_expr_to_pat(tcx, &field.expr, pat_id, span)?, + is_shorthand: false, + }, + })) + .collect::>()?; + PatKind::Struct(qpath.clone(), field_pats, false) + } + + hir::ExprArray(ref exprs) => { + let pats = exprs.iter() + .map(|expr| const_expr_to_pat(tcx, &expr, pat_id, span)) + .collect::>()?; + PatKind::Slice(pats, None, hir::HirVec::new()) + } + + hir::ExprPath(ref qpath) => { + let def = tcx.tables().qpath_def(qpath, expr.id); + match def { + Def::StructCtor(_, CtorKind::Const) | + Def::VariantCtor(_, CtorKind::Const) => { + match expr.node { + hir::ExprPath(hir::QPath::Resolved(_, ref path)) => { + PatKind::Path(hir::QPath::Resolved(None, path.clone())) + } + _ => bug!() + } + } + Def::Const(def_id) | Def::AssociatedConst(def_id) => { + let substs = Some(tcx.tables().node_id_item_substs(expr.id) + .unwrap_or_else(|| tcx.intern_substs(&[]))); + let (expr, _ty) = lookup_const_by_id(tcx, def_id, substs).unwrap(); + return const_expr_to_pat(tcx, expr, pat_id, span); + }, + _ => bug!(), + } + } + + _ => PatKind::Lit(P(expr.clone())) + }; + Ok(P(hir::Pat { id: expr.id, node: pat, span: span })) +} + +pub fn report_const_eval_err<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + err: &ConstEvalErr, + primary_span: Span, + primary_kind: &str) + -> DiagnosticBuilder<'tcx> +{ + let mut err = err; + while let &ConstEvalErr { kind: ErroneousReferencedConstant(box ref i_err), .. } = err { + err = i_err; + } + + let mut diag = struct_span_err!(tcx.sess, err.span, E0080, "constant evaluation error"); + note_const_eval_err(tcx, err, primary_span, primary_kind, &mut diag); + diag +} + +pub fn fatal_const_eval_err<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + err: &ConstEvalErr, + primary_span: Span, + primary_kind: &str) + -> ! +{ + report_const_eval_err(tcx, err, primary_span, primary_kind).emit(); + tcx.sess.abort_if_errors(); + unreachable!() +} + +pub fn note_const_eval_err<'a, 'tcx>( + _tcx: TyCtxt<'a, 'tcx, 'tcx>, + err: &ConstEvalErr, + primary_span: Span, + primary_kind: &str, + diag: &mut DiagnosticBuilder) +{ + match err.description() { + ConstEvalErrDescription::Simple(message) => { + diag.span_label(err.span, &message); + } + } + + if !primary_span.contains(err.span) { + diag.span_note(primary_span, + &format!("for {} here", primary_kind)); + } +} + +pub fn eval_const_expr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + e: &Expr) -> ConstVal { + match eval_const_expr_checked(tcx, e) { + Ok(r) => r, + // non-const path still needs to be a fatal error, because enums are funky + Err(s) => { + report_const_eval_err(tcx, &s, e.span, "expression").emit(); + match s.kind { + NonConstPath | + UnimplementedConstVal(_) => tcx.sess.abort_if_errors(), + _ => {} + } + Dummy + }, + } +} + +pub fn eval_const_expr_checked<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + e: &Expr) -> EvalResult +{ + eval_const_expr_partial(tcx, e, ExprTypeChecked, None) +} + +pub type FnArgMap<'a> = Option<&'a DefIdMap>; + +#[derive(Clone, Debug)] +pub struct ConstEvalErr { + pub span: Span, + pub kind: ErrKind, +} + +#[derive(Clone, Debug)] +pub enum ErrKind { + CannotCast, + CannotCastTo(&'static str), + InvalidOpForInts(hir::BinOp_), + InvalidOpForBools(hir::BinOp_), + InvalidOpForFloats(hir::BinOp_), + InvalidOpForIntUint(hir::BinOp_), + InvalidOpForUintInt(hir::BinOp_), + NegateOn(ConstVal), + NotOn(ConstVal), + CallOn(ConstVal), + + MissingStructField, + NonConstPath, + UnimplementedConstVal(&'static str), + UnresolvedPath, + ExpectedConstTuple, + ExpectedConstStruct, + TupleIndexOutOfBounds, + IndexedNonVec, + IndexNegative, + IndexNotInt, + IndexOutOfBounds { len: u64, index: u64 }, + RepeatCountNotNatural, + RepeatCountNotInt, + + MiscBinaryOp, + MiscCatchAll, + + IndexOpFeatureGated, + Math(ConstMathErr), + + IntermediateUnsignedNegative, + /// Expected, Got + TypeMismatch(String, ConstInt), + + BadType(ConstVal), + ErroneousReferencedConstant(Box), + CharCast(ConstInt), +} + +impl From for ErrKind { + fn from(err: ConstMathErr) -> ErrKind { + Math(err) + } +} + +#[derive(Clone, Debug)] +pub enum ConstEvalErrDescription<'a> { + Simple(Cow<'a, str>), +} + +impl<'a> ConstEvalErrDescription<'a> { + /// Return a one-line description of the error, for lints and such + pub fn into_oneline(self) -> Cow<'a, str> { + match self { + ConstEvalErrDescription::Simple(simple) => simple, + } + } +} + +impl ConstEvalErr { + pub fn description(&self) -> ConstEvalErrDescription { + use self::ErrKind::*; + use self::ConstEvalErrDescription::*; + + macro_rules! simple { + ($msg:expr) => ({ Simple($msg.into_cow()) }); + ($fmt:expr, $($arg:tt)+) => ({ + Simple(format!($fmt, $($arg)+).into_cow()) + }) + } + + match self.kind { + CannotCast => simple!("can't cast this type"), + CannotCastTo(s) => simple!("can't cast this type to {}", s), + InvalidOpForInts(_) => simple!("can't do this op on integrals"), + InvalidOpForBools(_) => simple!("can't do this op on bools"), + InvalidOpForFloats(_) => simple!("can't do this op on floats"), + InvalidOpForIntUint(..) => simple!("can't do this op on an isize and usize"), + InvalidOpForUintInt(..) => simple!("can't do this op on a usize and isize"), + NegateOn(ref const_val) => simple!("negate on {}", const_val.description()), + NotOn(ref const_val) => simple!("not on {}", const_val.description()), + CallOn(ref const_val) => simple!("call on {}", const_val.description()), + + MissingStructField => simple!("nonexistent struct field"), + NonConstPath => simple!("non-constant path in constant expression"), + UnimplementedConstVal(what) => + simple!("unimplemented constant expression: {}", what), + UnresolvedPath => simple!("unresolved path in constant expression"), + ExpectedConstTuple => simple!("expected constant tuple"), + ExpectedConstStruct => simple!("expected constant struct"), + TupleIndexOutOfBounds => simple!("tuple index out of bounds"), + IndexedNonVec => simple!("indexing is only supported for arrays"), + IndexNegative => simple!("indices must be non-negative integers"), + IndexNotInt => simple!("indices must be integers"), + IndexOutOfBounds { len, index } => { + simple!("index out of bounds: the len is {} but the index is {}", + len, index) + } + RepeatCountNotNatural => simple!("repeat count must be a natural number"), + RepeatCountNotInt => simple!("repeat count must be integers"), + + MiscBinaryOp => simple!("bad operands for binary"), + MiscCatchAll => simple!("unsupported constant expr"), + IndexOpFeatureGated => simple!("the index operation on const values is unstable"), + Math(ref err) => Simple(err.description().into_cow()), + + IntermediateUnsignedNegative => simple!( + "during the computation of an unsigned a negative \ + number was encountered. This is most likely a bug in\ + the constant evaluator"), + + TypeMismatch(ref expected, ref got) => { + simple!("expected {}, found {}", expected, got.description()) + }, + BadType(ref i) => simple!("value of wrong type: {:?}", i), + ErroneousReferencedConstant(_) => simple!("could not evaluate referenced constant"), + CharCast(ref got) => { + simple!("only `u8` can be cast as `char`, not `{}`", got.description()) + }, + } + } +} + +pub type EvalResult = Result; +pub type CastResult = Result; + +// FIXME: Long-term, this enum should go away: trying to evaluate +// an expression which hasn't been type-checked is a recipe for +// disaster. That said, it's not clear how to fix ast_ty_to_ty +// to avoid the ordering issue. + +/// Hint to determine how to evaluate constant expressions which +/// might not be type-checked. +#[derive(Copy, Clone, Debug)] +pub enum EvalHint<'tcx> { + /// We have a type-checked expression. + ExprTypeChecked, + /// We have an expression which hasn't been type-checked, but we have + /// an idea of what the type will be because of the context. For example, + /// the length of an array is always `usize`. (This is referred to as + /// a hint because it isn't guaranteed to be consistent with what + /// type-checking would compute.) + UncheckedExprHint(Ty<'tcx>), + /// We have an expression which has not yet been type-checked, and + /// and we have no clue what the type will be. + UncheckedExprNoHint, +} + +impl<'tcx> EvalHint<'tcx> { + fn erase_hint(&self) -> EvalHint<'tcx> { + match *self { + ExprTypeChecked => ExprTypeChecked, + UncheckedExprHint(_) | UncheckedExprNoHint => UncheckedExprNoHint, + } + } + fn checked_or(&self, ty: Ty<'tcx>) -> EvalHint<'tcx> { + match *self { + ExprTypeChecked => ExprTypeChecked, + _ => UncheckedExprHint(ty), + } + } +} + +macro_rules! signal { + ($e:expr, $exn:expr) => { + return Err(ConstEvalErr { span: $e.span, kind: $exn }) + } +} + +/// Evaluate a constant expression in a context where the expression isn't +/// guaranteed to be evaluatable. `ty_hint` is usually ExprTypeChecked, +/// but a few places need to evaluate constants during type-checking, like +/// computing the length of an array. (See also the FIXME above EvalHint.) +pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + e: &Expr, + ty_hint: EvalHint<'tcx>, + fn_args: FnArgMap) -> EvalResult { + // Try to compute the type of the expression based on the EvalHint. + // (See also the definition of EvalHint, and the FIXME above EvalHint.) + let ety = match ty_hint { + ExprTypeChecked => { + // After type-checking, expr_ty is guaranteed to succeed. + Some(tcx.tables().expr_ty(e)) + } + UncheckedExprHint(ty) => { + // Use the type hint; it's not guaranteed to be right, but it's + // usually good enough. + Some(ty) + } + UncheckedExprNoHint => { + // This expression might not be type-checked, and we have no hint. + // Try to query the context for a type anyway; we might get lucky + // (for example, if the expression was imported from another crate). + tcx.tables().expr_ty_opt(e) + } + }; + let result = match e.node { + hir::ExprUnary(hir::UnNeg, ref inner) => { + // unary neg literals already got their sign during creation + if let hir::ExprLit(ref lit) = inner.node { + use syntax::ast::*; + use syntax::ast::LitIntType::*; + const I8_OVERFLOW: u64 = ::std::i8::MAX as u64 + 1; + const I16_OVERFLOW: u64 = ::std::i16::MAX as u64 + 1; + const I32_OVERFLOW: u64 = ::std::i32::MAX as u64 + 1; + const I64_OVERFLOW: u64 = ::std::i64::MAX as u64 + 1; + match (&lit.node, ety.map(|t| &t.sty)) { + (&LitKind::Int(I8_OVERFLOW, Unsuffixed), Some(&ty::TyInt(IntTy::I8))) | + (&LitKind::Int(I8_OVERFLOW, Signed(IntTy::I8)), _) => { + return Ok(Integral(I8(::std::i8::MIN))) + }, + (&LitKind::Int(I16_OVERFLOW, Unsuffixed), Some(&ty::TyInt(IntTy::I16))) | + (&LitKind::Int(I16_OVERFLOW, Signed(IntTy::I16)), _) => { + return Ok(Integral(I16(::std::i16::MIN))) + }, + (&LitKind::Int(I32_OVERFLOW, Unsuffixed), Some(&ty::TyInt(IntTy::I32))) | + (&LitKind::Int(I32_OVERFLOW, Signed(IntTy::I32)), _) => { + return Ok(Integral(I32(::std::i32::MIN))) + }, + (&LitKind::Int(I64_OVERFLOW, Unsuffixed), Some(&ty::TyInt(IntTy::I64))) | + (&LitKind::Int(I64_OVERFLOW, Signed(IntTy::I64)), _) => { + return Ok(Integral(I64(::std::i64::MIN))) + }, + (&LitKind::Int(n, Unsuffixed), Some(&ty::TyInt(IntTy::Is))) | + (&LitKind::Int(n, Signed(IntTy::Is)), _) => { + match tcx.sess.target.int_type { + IntTy::I16 => if n == I16_OVERFLOW { + return Ok(Integral(Isize(Is16(::std::i16::MIN)))); + }, + IntTy::I32 => if n == I32_OVERFLOW { + return Ok(Integral(Isize(Is32(::std::i32::MIN)))); + }, + IntTy::I64 => if n == I64_OVERFLOW { + return Ok(Integral(Isize(Is64(::std::i64::MIN)))); + }, + _ => bug!(), + } + }, + _ => {}, + } + } + match eval_const_expr_partial(tcx, &inner, ty_hint, fn_args)? { + Float(f) => Float(-f), + Integral(i) => Integral(math!(e, -i)), + const_val => signal!(e, NegateOn(const_val)), + } + } + hir::ExprUnary(hir::UnNot, ref inner) => { + match eval_const_expr_partial(tcx, &inner, ty_hint, fn_args)? { + Integral(i) => Integral(math!(e, !i)), + Bool(b) => Bool(!b), + const_val => signal!(e, NotOn(const_val)), + } + } + hir::ExprUnary(hir::UnDeref, _) => signal!(e, UnimplementedConstVal("deref operation")), + hir::ExprBinary(op, ref a, ref b) => { + let b_ty = match op.node { + hir::BiShl | hir::BiShr => ty_hint.erase_hint(), + _ => ty_hint + }; + // technically, if we don't have type hints, but integral eval + // gives us a type through a type-suffix, cast or const def type + // we need to re-eval the other value of the BinOp if it was + // not inferred + match (eval_const_expr_partial(tcx, &a, ty_hint, fn_args)?, + eval_const_expr_partial(tcx, &b, b_ty, fn_args)?) { + (Float(a), Float(b)) => { + use std::cmp::Ordering::*; + match op.node { + hir::BiAdd => Float(math!(e, a + b)), + hir::BiSub => Float(math!(e, a - b)), + hir::BiMul => Float(math!(e, a * b)), + hir::BiDiv => Float(math!(e, a / b)), + hir::BiRem => Float(math!(e, a % b)), + hir::BiEq => Bool(math!(e, a.try_cmp(b)) == Equal), + hir::BiLt => Bool(math!(e, a.try_cmp(b)) == Less), + hir::BiLe => Bool(math!(e, a.try_cmp(b)) != Greater), + hir::BiNe => Bool(math!(e, a.try_cmp(b)) != Equal), + hir::BiGe => Bool(math!(e, a.try_cmp(b)) != Less), + hir::BiGt => Bool(math!(e, a.try_cmp(b)) == Greater), + _ => signal!(e, InvalidOpForFloats(op.node)), + } + } + (Integral(a), Integral(b)) => { + use std::cmp::Ordering::*; + match op.node { + hir::BiAdd => Integral(math!(e, a + b)), + hir::BiSub => Integral(math!(e, a - b)), + hir::BiMul => Integral(math!(e, a * b)), + hir::BiDiv => Integral(math!(e, a / b)), + hir::BiRem => Integral(math!(e, a % b)), + hir::BiBitAnd => Integral(math!(e, a & b)), + hir::BiBitOr => Integral(math!(e, a | b)), + hir::BiBitXor => Integral(math!(e, a ^ b)), + hir::BiShl => Integral(math!(e, a << b)), + hir::BiShr => Integral(math!(e, a >> b)), + hir::BiEq => Bool(math!(e, a.try_cmp(b)) == Equal), + hir::BiLt => Bool(math!(e, a.try_cmp(b)) == Less), + hir::BiLe => Bool(math!(e, a.try_cmp(b)) != Greater), + hir::BiNe => Bool(math!(e, a.try_cmp(b)) != Equal), + hir::BiGe => Bool(math!(e, a.try_cmp(b)) != Less), + hir::BiGt => Bool(math!(e, a.try_cmp(b)) == Greater), + _ => signal!(e, InvalidOpForInts(op.node)), + } + } + (Bool(a), Bool(b)) => { + Bool(match op.node { + hir::BiAnd => a && b, + hir::BiOr => a || b, + hir::BiBitXor => a ^ b, + hir::BiBitAnd => a & b, + hir::BiBitOr => a | b, + hir::BiEq => a == b, + hir::BiNe => a != b, + hir::BiLt => a < b, + hir::BiLe => a <= b, + hir::BiGe => a >= b, + hir::BiGt => a > b, + _ => signal!(e, InvalidOpForBools(op.node)), + }) + } + + _ => signal!(e, MiscBinaryOp), + } + } + hir::ExprCast(ref base, ref target_ty) => { + let ety = tcx.ast_ty_to_prim_ty(&target_ty).or(ety) + .unwrap_or_else(|| { + tcx.sess.span_fatal(target_ty.span, + "target type not found for const cast") + }); + + let base_hint = if let ExprTypeChecked = ty_hint { + ExprTypeChecked + } else { + match tcx.tables().expr_ty_opt(&base) { + Some(t) => UncheckedExprHint(t), + None => ty_hint + } + }; + + let val = match eval_const_expr_partial(tcx, &base, base_hint, fn_args) { + Ok(val) => val, + Err(ConstEvalErr { kind: ErroneousReferencedConstant( + box ConstEvalErr { kind: TypeMismatch(_, val), .. }), .. }) | + Err(ConstEvalErr { kind: TypeMismatch(_, val), .. }) => { + // Something like `5i8 as usize` doesn't need a type hint for the base + // instead take the type hint from the inner value + let hint = match val.int_type() { + Some(IntType::UnsignedInt(ty)) => ty_hint.checked_or(tcx.mk_mach_uint(ty)), + Some(IntType::SignedInt(ty)) => ty_hint.checked_or(tcx.mk_mach_int(ty)), + // we had a type hint, so we can't have an unknown type + None => bug!(), + }; + eval_const_expr_partial(tcx, &base, hint, fn_args)? + }, + Err(e) => return Err(e), + }; + match cast_const(tcx, val, ety) { + Ok(val) => val, + Err(kind) => return Err(ConstEvalErr { span: e.span, kind: kind }), + } + } + hir::ExprPath(ref qpath) => { + let def = tcx.tables().qpath_def(qpath, e.id); + match def { + Def::Const(def_id) | + Def::AssociatedConst(def_id) => { + let substs = if let ExprTypeChecked = ty_hint { + Some(tcx.tables().node_id_item_substs(e.id) + .unwrap_or_else(|| tcx.intern_substs(&[]))) + } else { + None + }; + if let Some((expr, ty)) = lookup_const_by_id(tcx, def_id, substs) { + let item_hint = match ty { + Some(ty) => ty_hint.checked_or(ty), + None => ty_hint, + }; + match eval_const_expr_partial(tcx, expr, item_hint, None) { + Ok(val) => val, + Err(err) => { + debug!("bad reference: {:?}, {:?}", err.description(), err.span); + signal!(e, ErroneousReferencedConstant(box err)) + }, + } + } else { + signal!(e, NonConstPath); + } + }, + Def::VariantCtor(variant_def, ..) => { + if let Some(const_expr) = lookup_variant_by_id(tcx, variant_def) { + match eval_const_expr_partial(tcx, const_expr, ty_hint, None) { + Ok(val) => val, + Err(err) => { + debug!("bad reference: {:?}, {:?}", err.description(), err.span); + signal!(e, ErroneousReferencedConstant(box err)) + }, + } + } else { + signal!(e, UnimplementedConstVal("enum variants")); + } + } + Def::StructCtor(..) => { + ConstVal::Struct(e.id) + } + Def::Local(def_id) => { + debug!("Def::Local({:?}): {:?}", def_id, fn_args); + if let Some(val) = fn_args.and_then(|args| args.get(&def_id)) { + val.clone() + } else { + signal!(e, NonConstPath); + } + }, + Def::Method(id) | Def::Fn(id) => Function(id), + Def::Err => signal!(e, UnresolvedPath), + _ => signal!(e, NonConstPath), + } + } + hir::ExprCall(ref callee, ref args) => { + let sub_ty_hint = ty_hint.erase_hint(); + let callee_val = eval_const_expr_partial(tcx, callee, sub_ty_hint, fn_args)?; + let did = match callee_val { + Function(did) => did, + Struct(_) => signal!(e, UnimplementedConstVal("tuple struct constructors")), + callee => signal!(e, CallOn(callee)), + }; + let (arg_defs, body_id) = match lookup_const_fn_by_id(tcx, did) { + Some(ConstFnNode::Inlined(ii)) => (ii.const_fn_args.clone(), ii.body.expr_id()), + Some(ConstFnNode::Local(fn_like)) => + (fn_like.decl().inputs.iter() + .map(|arg| match arg.pat.node { + hir::PatKind::Binding(_, def_id, _, _) => Some(def_id), + _ => None + }).collect(), + fn_like.body()), + None => signal!(e, NonConstPath), + }; + let result = tcx.map.expr(body_id); + assert_eq!(arg_defs.len(), args.len()); + + let mut call_args = DefIdMap(); + for (arg, arg_expr) in arg_defs.into_iter().zip(args.iter()) { + let arg_hint = ty_hint.erase_hint(); + let arg_val = eval_const_expr_partial( + tcx, + arg_expr, + arg_hint, + fn_args + )?; + debug!("const call arg: {:?}", arg); + if let Some(def_id) = arg { + assert!(call_args.insert(def_id, arg_val).is_none()); + } + } + debug!("const call({:?})", call_args); + eval_const_expr_partial(tcx, &result, ty_hint, Some(&call_args))? + }, + hir::ExprLit(ref lit) => match lit_to_const(&lit.node, tcx, ety) { + Ok(val) => val, + Err(err) => signal!(e, err), + }, + hir::ExprBlock(ref block) => { + match block.expr { + Some(ref expr) => eval_const_expr_partial(tcx, &expr, ty_hint, fn_args)?, + None => signal!(e, UnimplementedConstVal("empty block")), + } + } + hir::ExprType(ref e, _) => eval_const_expr_partial(tcx, &e, ty_hint, fn_args)?, + hir::ExprTup(_) => Tuple(e.id), + hir::ExprStruct(..) => Struct(e.id), + hir::ExprIndex(ref arr, ref idx) => { + if !tcx.sess.features.borrow().const_indexing { + signal!(e, IndexOpFeatureGated); + } + let arr_hint = ty_hint.erase_hint(); + let arr = eval_const_expr_partial(tcx, arr, arr_hint, fn_args)?; + let idx_hint = ty_hint.checked_or(tcx.types.usize); + let idx = match eval_const_expr_partial(tcx, idx, idx_hint, fn_args)? { + Integral(Usize(i)) => i.as_u64(tcx.sess.target.uint_type), + Integral(_) => bug!(), + _ => signal!(idx, IndexNotInt), + }; + assert_eq!(idx as usize as u64, idx); + match arr { + Array(_, n) if idx >= n => { + signal!(e, IndexOutOfBounds { len: n, index: idx }) + } + Array(v, n) => if let hir::ExprArray(ref v) = tcx.map.expect_expr(v).node { + assert_eq!(n as usize as u64, n); + eval_const_expr_partial(tcx, &v[idx as usize], ty_hint, fn_args)? + } else { + bug!() + }, + + Repeat(_, n) if idx >= n => { + signal!(e, IndexOutOfBounds { len: n, index: idx }) + } + Repeat(elem, _) => eval_const_expr_partial( + tcx, + &tcx.map.expect_expr(elem), + ty_hint, + fn_args, + )?, + + ByteStr(ref data) if idx >= data.len() as u64 => { + signal!(e, IndexOutOfBounds { len: data.len() as u64, index: idx }) + } + ByteStr(data) => { + Integral(U8(data[idx as usize])) + }, + + _ => signal!(e, IndexedNonVec), + } + } + hir::ExprArray(ref v) => Array(e.id, v.len() as u64), + hir::ExprRepeat(_, ref n) => { + let len_hint = ty_hint.checked_or(tcx.types.usize); + Repeat( + e.id, + match eval_const_expr_partial(tcx, &n, len_hint, fn_args)? { + Integral(Usize(i)) => i.as_u64(tcx.sess.target.uint_type), + Integral(_) => signal!(e, RepeatCountNotNatural), + _ => signal!(e, RepeatCountNotInt), + }, + ) + }, + hir::ExprTupField(ref base, index) => { + let base_hint = ty_hint.erase_hint(); + let c = eval_const_expr_partial(tcx, base, base_hint, fn_args)?; + if let Tuple(tup_id) = c { + if let hir::ExprTup(ref fields) = tcx.map.expect_expr(tup_id).node { + if index.node < fields.len() { + eval_const_expr_partial(tcx, &fields[index.node], ty_hint, fn_args)? + } else { + signal!(e, TupleIndexOutOfBounds); + } + } else { + bug!() + } + } else { + signal!(base, ExpectedConstTuple); + } + } + hir::ExprField(ref base, field_name) => { + let base_hint = ty_hint.erase_hint(); + // Get the base expression if it is a struct and it is constant + let c = eval_const_expr_partial(tcx, base, base_hint, fn_args)?; + if let Struct(struct_id) = c { + if let hir::ExprStruct(_, ref fields, _) = tcx.map.expect_expr(struct_id).node { + // Check that the given field exists and evaluate it + // if the idents are compared run-pass/issue-19244 fails + if let Some(f) = fields.iter().find(|f| f.name.node + == field_name.node) { + eval_const_expr_partial(tcx, &f.expr, ty_hint, fn_args)? + } else { + signal!(e, MissingStructField); + } + } else { + bug!() + } + } else { + signal!(base, ExpectedConstStruct); + } + } + hir::ExprAddrOf(..) => signal!(e, UnimplementedConstVal("address operator")), + _ => signal!(e, MiscCatchAll) + }; + + match (ety.map(|t| &t.sty), result) { + (Some(ref ty_hint), Integral(i)) => match infer(i, tcx, ty_hint) { + Ok(inferred) => Ok(Integral(inferred)), + Err(err) => signal!(e, err), + }, + (_, result) => Ok(result), + } +} + +fn infer<'a, 'tcx>(i: ConstInt, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + ty_hint: &ty::TypeVariants<'tcx>) + -> Result { + use syntax::ast::*; + + match (ty_hint, i) { + (&ty::TyInt(IntTy::I8), result @ I8(_)) => Ok(result), + (&ty::TyInt(IntTy::I16), result @ I16(_)) => Ok(result), + (&ty::TyInt(IntTy::I32), result @ I32(_)) => Ok(result), + (&ty::TyInt(IntTy::I64), result @ I64(_)) => Ok(result), + (&ty::TyInt(IntTy::Is), result @ Isize(_)) => Ok(result), + + (&ty::TyUint(UintTy::U8), result @ U8(_)) => Ok(result), + (&ty::TyUint(UintTy::U16), result @ U16(_)) => Ok(result), + (&ty::TyUint(UintTy::U32), result @ U32(_)) => Ok(result), + (&ty::TyUint(UintTy::U64), result @ U64(_)) => Ok(result), + (&ty::TyUint(UintTy::Us), result @ Usize(_)) => Ok(result), + + (&ty::TyInt(IntTy::I8), Infer(i)) => Ok(I8(i as i64 as i8)), + (&ty::TyInt(IntTy::I16), Infer(i)) => Ok(I16(i as i64 as i16)), + (&ty::TyInt(IntTy::I32), Infer(i)) => Ok(I32(i as i64 as i32)), + (&ty::TyInt(IntTy::I64), Infer(i)) => Ok(I64(i as i64)), + (&ty::TyInt(IntTy::Is), Infer(i)) => { + Ok(Isize(ConstIsize::new_truncating(i as i64, tcx.sess.target.int_type))) + }, + + (&ty::TyInt(IntTy::I8), InferSigned(i)) => Ok(I8(i as i8)), + (&ty::TyInt(IntTy::I16), InferSigned(i)) => Ok(I16(i as i16)), + (&ty::TyInt(IntTy::I32), InferSigned(i)) => Ok(I32(i as i32)), + (&ty::TyInt(IntTy::I64), InferSigned(i)) => Ok(I64(i)), + (&ty::TyInt(IntTy::Is), InferSigned(i)) => { + Ok(Isize(ConstIsize::new_truncating(i, tcx.sess.target.int_type))) + }, + + (&ty::TyUint(UintTy::U8), Infer(i)) => Ok(U8(i as u8)), + (&ty::TyUint(UintTy::U16), Infer(i)) => Ok(U16(i as u16)), + (&ty::TyUint(UintTy::U32), Infer(i)) => Ok(U32(i as u32)), + (&ty::TyUint(UintTy::U64), Infer(i)) => Ok(U64(i)), + (&ty::TyUint(UintTy::Us), Infer(i)) => { + Ok(Usize(ConstUsize::new_truncating(i, tcx.sess.target.uint_type))) + }, + (&ty::TyUint(_), InferSigned(_)) => Err(IntermediateUnsignedNegative), + + (&ty::TyInt(ity), i) => Err(TypeMismatch(ity.to_string(), i)), + (&ty::TyUint(ity), i) => Err(TypeMismatch(ity.to_string(), i)), + + (&ty::TyAdt(adt, _), i) if adt.is_enum() => { + let hints = tcx.lookup_repr_hints(adt.did); + let int_ty = tcx.enum_repr_type(hints.iter().next()); + infer(i, tcx, &int_ty.to_ty(tcx).sty) + }, + (_, i) => Err(BadType(ConstVal::Integral(i))), + } +} + +fn resolve_trait_associated_const<'a, 'tcx: 'a>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + trait_item_id: DefId, + default_value: Option<(&'tcx Expr, Option>)>, + trait_id: DefId, + rcvr_substs: &'tcx Substs<'tcx> +) -> Option<(&'tcx Expr, Option>)> +{ + let trait_ref = ty::Binder(ty::TraitRef::new(trait_id, rcvr_substs)); + debug!("resolve_trait_associated_const: trait_ref={:?}", + trait_ref); + + tcx.populate_implementations_for_trait_if_necessary(trait_id); + tcx.infer_ctxt(None, None, Reveal::NotSpecializable).enter(|infcx| { + let mut selcx = traits::SelectionContext::new(&infcx); + let obligation = traits::Obligation::new(traits::ObligationCause::dummy(), + trait_ref.to_poly_trait_predicate()); + let selection = match selcx.select(&obligation) { + Ok(Some(vtable)) => vtable, + // Still ambiguous, so give up and let the caller decide whether this + // expression is really needed yet. Some associated constant values + // can't be evaluated until monomorphization is done in trans. + Ok(None) => { + return None + } + Err(_) => { + return None + } + }; + + // NOTE: this code does not currently account for specialization, but when + // it does so, it should hook into the Reveal to determine when the + // constant should resolve; this will also require plumbing through to this + // function whether we are in "trans mode" to pick the right Reveal + // when constructing the inference context above. + match selection { + traits::VtableImpl(ref impl_data) => { + let name = tcx.associated_item(trait_item_id).name; + let ac = tcx.associated_items(impl_data.impl_def_id) + .find(|item| item.kind == ty::AssociatedKind::Const && item.name == name); + match ac { + Some(ic) => lookup_const_by_id(tcx, ic.def_id, None), + None => default_value, + } + } + _ => { + bug!("resolve_trait_associated_const: unexpected vtable type") + } + } + }) +} + +fn cast_const_int<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, val: ConstInt, ty: ty::Ty) -> CastResult { + let v = val.to_u64_unchecked(); + match ty.sty { + ty::TyBool if v == 0 => Ok(Bool(false)), + ty::TyBool if v == 1 => Ok(Bool(true)), + ty::TyInt(ast::IntTy::I8) => Ok(Integral(I8(v as i64 as i8))), + ty::TyInt(ast::IntTy::I16) => Ok(Integral(I16(v as i64 as i16))), + ty::TyInt(ast::IntTy::I32) => Ok(Integral(I32(v as i64 as i32))), + ty::TyInt(ast::IntTy::I64) => Ok(Integral(I64(v as i64))), + ty::TyInt(ast::IntTy::Is) => { + Ok(Integral(Isize(ConstIsize::new_truncating(v as i64, tcx.sess.target.int_type)))) + }, + ty::TyUint(ast::UintTy::U8) => Ok(Integral(U8(v as u8))), + ty::TyUint(ast::UintTy::U16) => Ok(Integral(U16(v as u16))), + ty::TyUint(ast::UintTy::U32) => Ok(Integral(U32(v as u32))), + ty::TyUint(ast::UintTy::U64) => Ok(Integral(U64(v))), + ty::TyUint(ast::UintTy::Us) => { + Ok(Integral(Usize(ConstUsize::new_truncating(v, tcx.sess.target.uint_type)))) + }, + ty::TyFloat(ast::FloatTy::F64) => match val.erase_type() { + Infer(u) => Ok(Float(F64(u as f64))), + InferSigned(i) => Ok(Float(F64(i as f64))), + _ => bug!("ConstInt::erase_type returned something other than Infer/InferSigned"), + }, + ty::TyFloat(ast::FloatTy::F32) => match val.erase_type() { + Infer(u) => Ok(Float(F32(u as f32))), + InferSigned(i) => Ok(Float(F32(i as f32))), + _ => bug!("ConstInt::erase_type returned something other than Infer/InferSigned"), + }, + ty::TyRawPtr(_) => Err(ErrKind::UnimplementedConstVal("casting an address to a raw ptr")), + ty::TyChar => match infer(val, tcx, &ty::TyUint(ast::UintTy::U8)) { + Ok(U8(u)) => Ok(Char(u as char)), + // can only occur before typeck, typeck blocks `T as char` for `T` != `u8` + _ => Err(CharCast(val)), + }, + _ => Err(CannotCast), + } +} + +fn cast_const_float<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + val: ConstFloat, + ty: ty::Ty) -> CastResult { + match ty.sty { + ty::TyInt(_) | ty::TyUint(_) => { + let i = match val { + F32(f) if f >= 0.0 => Infer(f as u64), + FInfer { f64: f, .. } | + F64(f) if f >= 0.0 => Infer(f as u64), + + F32(f) => InferSigned(f as i64), + FInfer { f64: f, .. } | + F64(f) => InferSigned(f as i64) + }; + + if let (InferSigned(_), &ty::TyUint(_)) = (i, &ty.sty) { + return Err(CannotCast); + } + + cast_const_int(tcx, i, ty) + } + ty::TyFloat(ast::FloatTy::F64) => Ok(Float(F64(match val { + F32(f) => f as f64, + FInfer { f64: f, .. } | F64(f) => f + }))), + ty::TyFloat(ast::FloatTy::F32) => Ok(Float(F32(match val { + F64(f) => f as f32, + FInfer { f32: f, .. } | F32(f) => f + }))), + _ => Err(CannotCast), + } +} + +fn cast_const<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, val: ConstVal, ty: ty::Ty) -> CastResult { + match val { + Integral(i) => cast_const_int(tcx, i, ty), + Bool(b) => cast_const_int(tcx, Infer(b as u64), ty), + Float(f) => cast_const_float(tcx, f, ty), + Char(c) => cast_const_int(tcx, Infer(c as u64), ty), + Function(_) => Err(UnimplementedConstVal("casting fn pointers")), + ByteStr(b) => match ty.sty { + ty::TyRawPtr(_) => { + Err(ErrKind::UnimplementedConstVal("casting a bytestr to a raw ptr")) + }, + ty::TyRef(_, ty::TypeAndMut { ref ty, mutbl: hir::MutImmutable }) => match ty.sty { + ty::TyArray(ty, n) if ty == tcx.types.u8 && n == b.len() => Ok(ByteStr(b)), + ty::TySlice(_) => { + Err(ErrKind::UnimplementedConstVal("casting a bytestr to slice")) + }, + _ => Err(CannotCast), + }, + _ => Err(CannotCast), + }, + Str(s) => match ty.sty { + ty::TyRawPtr(_) => Err(ErrKind::UnimplementedConstVal("casting a str to a raw ptr")), + ty::TyRef(_, ty::TypeAndMut { ref ty, mutbl: hir::MutImmutable }) => match ty.sty { + ty::TyStr => Ok(Str(s)), + _ => Err(CannotCast), + }, + _ => Err(CannotCast), + }, + _ => Err(CannotCast), + } +} + +fn lit_to_const<'a, 'tcx>(lit: &ast::LitKind, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + ty_hint: Option>) + -> Result { + use syntax::ast::*; + use syntax::ast::LitIntType::*; + match *lit { + LitKind::Str(ref s, _) => Ok(Str(s.as_str())), + LitKind::ByteStr(ref data) => Ok(ByteStr(data.clone())), + LitKind::Byte(n) => Ok(Integral(U8(n))), + LitKind::Int(n, Signed(ity)) => { + infer(InferSigned(n as i64), tcx, &ty::TyInt(ity)).map(Integral) + }, + + LitKind::Int(n, Unsuffixed) => { + match ty_hint.map(|t| &t.sty) { + Some(&ty::TyInt(ity)) => { + infer(InferSigned(n as i64), tcx, &ty::TyInt(ity)).map(Integral) + }, + Some(&ty::TyUint(uty)) => { + infer(Infer(n), tcx, &ty::TyUint(uty)).map(Integral) + }, + None => Ok(Integral(Infer(n))), + Some(&ty::TyAdt(adt, _)) => { + let hints = tcx.lookup_repr_hints(adt.did); + let int_ty = tcx.enum_repr_type(hints.iter().next()); + infer(Infer(n), tcx, &int_ty.to_ty(tcx).sty).map(Integral) + }, + Some(ty_hint) => bug!("bad ty_hint: {:?}, {:?}", ty_hint, lit), + } + }, + LitKind::Int(n, Unsigned(ity)) => { + infer(Infer(n), tcx, &ty::TyUint(ity)).map(Integral) + }, + + LitKind::Float(n, fty) => { + parse_float(&n.as_str(), Some(fty)).map(Float) + } + LitKind::FloatUnsuffixed(n) => { + let fty_hint = match ty_hint.map(|t| &t.sty) { + Some(&ty::TyFloat(fty)) => Some(fty), + _ => None + }; + parse_float(&n.as_str(), fty_hint).map(Float) + } + LitKind::Bool(b) => Ok(Bool(b)), + LitKind::Char(c) => Ok(Char(c)), + } +} + +fn parse_float(num: &str, fty_hint: Option) + -> Result { + let val = match fty_hint { + Some(ast::FloatTy::F32) => num.parse::().map(F32), + Some(ast::FloatTy::F64) => num.parse::().map(F64), + None => { + num.parse::().and_then(|f32| { + num.parse::().map(|f64| { + FInfer { f32: f32, f64: f64 } + }) + }) + } + }; + val.map_err(|_| { + // FIXME(#31407) this is only necessary because float parsing is buggy + UnimplementedConstVal("could not evaluate float literal (see issue #31407)") + }) +} + +pub fn compare_const_vals(tcx: TyCtxt, span: Span, a: &ConstVal, b: &ConstVal) + -> Result +{ + let result = match (a, b) { + (&Integral(a), &Integral(b)) => a.try_cmp(b).ok(), + (&Float(a), &Float(b)) => a.try_cmp(b).ok(), + (&Str(ref a), &Str(ref b)) => Some(a.cmp(b)), + (&Bool(a), &Bool(b)) => Some(a.cmp(&b)), + (&ByteStr(ref a), &ByteStr(ref b)) => Some(a.cmp(b)), + (&Char(a), &Char(ref b)) => Some(a.cmp(b)), + _ => None, + }; + + match result { + Some(result) => Ok(result), + None => { + // FIXME: can this ever be reached? + span_err!(tcx.sess, span, E0298, + "type mismatch comparing {} and {}", + a.description(), + b.description()); + Err(ErrorReported) + } + } +} + +pub fn compare_lit_exprs<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + span: Span, + a: &Expr, + b: &Expr) -> Result { + let a = match eval_const_expr_partial(tcx, a, ExprTypeChecked, None) { + Ok(a) => a, + Err(e) => { + report_const_eval_err(tcx, &e, a.span, "expression").emit(); + return Err(ErrorReported); + } + }; + let b = match eval_const_expr_partial(tcx, b, ExprTypeChecked, None) { + Ok(b) => b, + Err(e) => { + report_const_eval_err(tcx, &e, b.span, "expression").emit(); + return Err(ErrorReported); + } + }; + compare_const_vals(tcx, span, &a, &b) +} + + +/// Returns the value of the length-valued expression +pub fn eval_length<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + count_expr: &hir::Expr, + reason: &str) + -> Result +{ + let hint = UncheckedExprHint(tcx.types.usize); + match eval_const_expr_partial(tcx, count_expr, hint, None) { + Ok(Integral(Usize(count))) => { + let val = count.as_u64(tcx.sess.target.uint_type); + assert_eq!(val as usize as u64, val); + Ok(val as usize) + }, + Ok(const_val) => { + struct_span_err!(tcx.sess, count_expr.span, E0306, + "expected `usize` for {}, found {}", + reason, + const_val.description()) + .span_label(count_expr.span, &format!("expected `usize`")) + .emit(); + + Err(ErrorReported) + } + Err(err) => { + let mut diag = report_const_eval_err( + tcx, &err, count_expr.span, reason); + + if let hir::ExprPath(hir::QPath::Resolved(None, ref path)) = count_expr.node { + if let Def::Local(..) = path.def { + diag.note(&format!("`{}` is a variable", path)); + } + } + + diag.emit(); + Err(ErrorReported) + } + } +} diff --git a/src/librustc_const_eval/lib.rs b/src/librustc_const_eval/lib.rs new file mode 100644 index 0000000000000..7a6cc4937232d --- /dev/null +++ b/src/librustc_const_eval/lib.rs @@ -0,0 +1,56 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! constant evaluation on the HIR and code to validate patterns/matches +//! +//! # Note +//! +//! This API is completely unstable and subject to change. + +#![crate_name = "rustc_const_eval"] +#![unstable(feature = "rustc_private", issue = "27812")] +#![crate_type = "dylib"] +#![crate_type = "rlib"] +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] + +#![feature(rustc_private)] +#![feature(staged_api)] +#![feature(rustc_diagnostic_macros)] +#![feature(slice_patterns)] +#![feature(box_patterns)] +#![feature(box_syntax)] + +extern crate arena; +#[macro_use] extern crate syntax; +#[macro_use] extern crate log; +#[macro_use] extern crate rustc; +extern crate rustc_back; +extern crate rustc_const_math; +extern crate rustc_data_structures; +extern crate rustc_errors; +extern crate graphviz; +extern crate syntax_pos; +extern crate serialize as rustc_serialize; // used by deriving + +// NB: This module needs to be declared first so diagnostics are +// registered before they are used. +pub mod diagnostics; + +mod eval; +mod _match; +pub mod check_match; +pub mod pattern; + +pub use eval::*; + +// Build the diagnostics array at the end so that the metadata includes error use sites. +__build_diagnostic_array! { librustc_const_eval, DIAGNOSTICS } diff --git a/src/librustc_const_eval/pattern.rs b/src/librustc_const_eval/pattern.rs new file mode 100644 index 0000000000000..e93178c89c22b --- /dev/null +++ b/src/librustc_const_eval/pattern.rs @@ -0,0 +1,604 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use eval; + +use rustc::middle::const_val::ConstVal; +use rustc::mir::{Field, BorrowKind, Mutability}; +use rustc::ty::{self, TyCtxt, AdtDef, Ty, Region}; +use rustc::hir::{self, PatKind}; +use rustc::hir::def::Def; +use rustc::hir::def_id::DefId; +use rustc::hir::pat_util::EnumerateAndAdjustIterator; + +use rustc_data_structures::indexed_vec::Idx; + +use syntax::ast; +use syntax::ptr::P; +use syntax_pos::Span; + +#[derive(Clone, Debug)] +pub enum PatternError { + StaticInPattern(Span), + BadConstInPattern(Span, DefId), + ConstEval(eval::ConstEvalErr), +} + +#[derive(Copy, Clone, Debug)] +pub enum BindingMode<'tcx> { + ByValue, + ByRef(&'tcx Region, BorrowKind), +} + +#[derive(Clone, Debug)] +pub struct FieldPattern<'tcx> { + pub field: Field, + pub pattern: Pattern<'tcx>, +} + +#[derive(Clone, Debug)] +pub struct Pattern<'tcx> { + pub ty: Ty<'tcx>, + pub span: Span, + pub kind: Box>, +} + +#[derive(Clone, Debug)] +pub enum PatternKind<'tcx> { + Wild, + + /// x, ref x, x @ P, etc + Binding { + mutability: Mutability, + name: ast::Name, + mode: BindingMode<'tcx>, + var: ast::NodeId, + ty: Ty<'tcx>, + subpattern: Option>, + }, + + /// Foo(...) or Foo{...} or Foo, where `Foo` is a variant name from an adt with >1 variants + Variant { + adt_def: &'tcx AdtDef, + variant_index: usize, + subpatterns: Vec>, + }, + + /// (...), Foo(...), Foo{...}, or Foo, where `Foo` is a variant name from an adt with 1 variant + Leaf { + subpatterns: Vec>, + }, + + /// box P, &P, &mut P, etc + Deref { + subpattern: Pattern<'tcx>, + }, + + Constant { + value: ConstVal, + }, + + Range { + lo: ConstVal, + hi: ConstVal, + }, + + /// matches against a slice, checking the length and extracting elements + Slice { + prefix: Vec>, + slice: Option>, + suffix: Vec>, + }, + + /// fixed match against an array, irrefutable + Array { + prefix: Vec>, + slice: Option>, + suffix: Vec>, + }, +} + +pub struct PatternContext<'a, 'gcx: 'tcx, 'tcx: 'a> { + pub tcx: TyCtxt<'a, 'gcx, 'tcx>, + pub errors: Vec, +} + +impl<'a, 'gcx, 'tcx> Pattern<'tcx> { + pub fn from_hir(tcx: TyCtxt<'a, 'gcx, 'tcx>, pat: &hir::Pat) -> Self { + let mut pcx = PatternContext::new(tcx); + let result = pcx.lower_pattern(pat); + if !pcx.errors.is_empty() { + span_bug!(pat.span, "encountered errors lowering pattern: {:?}", pcx.errors) + } + debug!("Pattern::from_hir({:?}) = {:?}", pat, result); + result + } +} + +impl<'a, 'gcx, 'tcx> PatternContext<'a, 'gcx, 'tcx> { + pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Self { + PatternContext { tcx: tcx, errors: vec![] } + } + + pub fn lower_pattern(&mut self, pat: &hir::Pat) -> Pattern<'tcx> { + let mut ty = self.tcx.tables().node_id_to_type(pat.id); + + let kind = match pat.node { + PatKind::Wild => PatternKind::Wild, + + PatKind::Lit(ref value) => { + match eval::eval_const_expr_checked(self.tcx.global_tcx(), value) { + Ok(value) => { + PatternKind::Constant { value: value } + } + Err(e) => { + self.errors.push(PatternError::ConstEval(e)); + PatternKind::Wild + } + } + } + + PatKind::Range(ref lo, ref hi) => { + let r_lo = eval::eval_const_expr_checked(self.tcx.global_tcx(), lo); + if let Err(ref e_lo) = r_lo { + self.errors.push(PatternError::ConstEval(e_lo.clone())); + } + + let r_hi = eval::eval_const_expr_checked(self.tcx.global_tcx(), hi); + if let Err(ref e_hi) = r_hi { + self.errors.push(PatternError::ConstEval(e_hi.clone())); + } + + if let (Ok(lo), Ok(hi)) = (r_lo, r_hi) { + PatternKind::Range { lo: lo, hi: hi } + } else { + PatternKind::Wild + } + } + + PatKind::Path(ref qpath) => { + let def = self.tcx.tables().qpath_def(qpath, pat.id); + match def { + Def::Const(def_id) | Def::AssociatedConst(def_id) => { + let tcx = self.tcx.global_tcx(); + let substs = tcx.tables().node_id_item_substs(pat.id) + .unwrap_or_else(|| tcx.intern_substs(&[])); + match eval::lookup_const_by_id(tcx, def_id, Some(substs)) { + Some((const_expr, _const_ty)) => { + match eval::const_expr_to_pat( + tcx, const_expr, pat.id, pat.span) + { + Ok(pat) => return self.lower_pattern(&pat), + Err(_) => { + self.errors.push(PatternError::BadConstInPattern( + pat.span, def_id)); + PatternKind::Wild + } + } + } + None => { + self.errors.push(PatternError::StaticInPattern(pat.span)); + PatternKind::Wild + } + } + } + _ => self.lower_variant_or_leaf(def, vec![]) + } + } + + PatKind::Ref(ref subpattern, _) | + PatKind::Box(ref subpattern) => { + PatternKind::Deref { subpattern: self.lower_pattern(subpattern) } + } + + PatKind::Slice(ref prefix, ref slice, ref suffix) => { + let ty = self.tcx.tables().node_id_to_type(pat.id); + match ty.sty { + ty::TyRef(_, mt) => + PatternKind::Deref { + subpattern: Pattern { + ty: mt.ty, + span: pat.span, + kind: Box::new(self.slice_or_array_pattern( + pat.span, mt.ty, prefix, slice, suffix)) + }, + }, + + ty::TySlice(..) | + ty::TyArray(..) => + self.slice_or_array_pattern(pat.span, ty, prefix, slice, suffix), + + ref sty => + span_bug!( + pat.span, + "unexpanded type for vector pattern: {:?}", + sty), + } + } + + PatKind::Tuple(ref subpatterns, ddpos) => { + let ty = self.tcx.tables().node_id_to_type(pat.id); + match ty.sty { + ty::TyTuple(ref tys) => { + let subpatterns = + subpatterns.iter() + .enumerate_and_adjust(tys.len(), ddpos) + .map(|(i, subpattern)| FieldPattern { + field: Field::new(i), + pattern: self.lower_pattern(subpattern) + }) + .collect(); + + PatternKind::Leaf { subpatterns: subpatterns } + } + + ref sty => span_bug!(pat.span, "unexpected type for tuple pattern: {:?}", sty), + } + } + + PatKind::Binding(bm, def_id, ref ident, ref sub) => { + let id = self.tcx.map.as_local_node_id(def_id).unwrap(); + let var_ty = self.tcx.tables().node_id_to_type(pat.id); + let region = match var_ty.sty { + ty::TyRef(r, _) => Some(r), + _ => None, + }; + let (mutability, mode) = match bm { + hir::BindByValue(hir::MutMutable) => + (Mutability::Mut, BindingMode::ByValue), + hir::BindByValue(hir::MutImmutable) => + (Mutability::Not, BindingMode::ByValue), + hir::BindByRef(hir::MutMutable) => + (Mutability::Not, BindingMode::ByRef(region.unwrap(), BorrowKind::Mut)), + hir::BindByRef(hir::MutImmutable) => + (Mutability::Not, BindingMode::ByRef(region.unwrap(), BorrowKind::Shared)), + }; + + // A ref x pattern is the same node used for x, and as such it has + // x's type, which is &T, where we want T (the type being matched). + if let hir::BindByRef(_) = bm { + if let ty::TyRef(_, mt) = ty.sty { + ty = mt.ty; + } else { + bug!("`ref {}` has wrong type {}", ident.node, ty); + } + } + + PatternKind::Binding { + mutability: mutability, + mode: mode, + name: ident.node, + var: id, + ty: var_ty, + subpattern: self.lower_opt_pattern(sub), + } + } + + PatKind::TupleStruct(ref qpath, ref subpatterns, ddpos) => { + let def = self.tcx.tables().qpath_def(qpath, pat.id); + let pat_ty = self.tcx.tables().node_id_to_type(pat.id); + let adt_def = match pat_ty.sty { + ty::TyAdt(adt_def, _) => adt_def, + _ => span_bug!(pat.span, "tuple struct pattern not applied to an ADT"), + }; + let variant_def = adt_def.variant_of_def(def); + + let subpatterns = + subpatterns.iter() + .enumerate_and_adjust(variant_def.fields.len(), ddpos) + .map(|(i, field)| FieldPattern { + field: Field::new(i), + pattern: self.lower_pattern(field), + }) + .collect(); + self.lower_variant_or_leaf(def, subpatterns) + } + + PatKind::Struct(ref qpath, ref fields, _) => { + let def = self.tcx.tables().qpath_def(qpath, pat.id); + let pat_ty = self.tcx.tables().node_id_to_type(pat.id); + let adt_def = match pat_ty.sty { + ty::TyAdt(adt_def, _) => adt_def, + _ => { + span_bug!( + pat.span, + "struct pattern not applied to an ADT"); + } + }; + let variant_def = adt_def.variant_of_def(def); + + let subpatterns = + fields.iter() + .map(|field| { + let index = variant_def.index_of_field_named(field.node.name); + let index = index.unwrap_or_else(|| { + span_bug!( + pat.span, + "no field with name {:?}", + field.node.name); + }); + FieldPattern { + field: Field::new(index), + pattern: self.lower_pattern(&field.node.pat), + } + }) + .collect(); + + self.lower_variant_or_leaf(def, subpatterns) + } + }; + + Pattern { + span: pat.span, + ty: ty, + kind: Box::new(kind), + } + } + + fn lower_patterns(&mut self, pats: &[P]) -> Vec> { + pats.iter().map(|p| self.lower_pattern(p)).collect() + } + + fn lower_opt_pattern(&mut self, pat: &Option>) -> Option> + { + pat.as_ref().map(|p| self.lower_pattern(p)) + } + + fn flatten_nested_slice_patterns( + &mut self, + prefix: Vec>, + slice: Option>, + suffix: Vec>) + -> (Vec>, Option>, Vec>) + { + let orig_slice = match slice { + Some(orig_slice) => orig_slice, + None => return (prefix, slice, suffix) + }; + let orig_prefix = prefix; + let orig_suffix = suffix; + + // dance because of intentional borrow-checker stupidity. + let kind = *orig_slice.kind; + match kind { + PatternKind::Slice { prefix, slice, mut suffix } | + PatternKind::Array { prefix, slice, mut suffix } => { + let mut orig_prefix = orig_prefix; + + orig_prefix.extend(prefix); + suffix.extend(orig_suffix); + + (orig_prefix, slice, suffix) + } + _ => { + (orig_prefix, Some(Pattern { + kind: box kind, ..orig_slice + }), orig_suffix) + } + } + } + + fn slice_or_array_pattern( + &mut self, + span: Span, + ty: Ty<'tcx>, + prefix: &[P], + slice: &Option>, + suffix: &[P]) + -> PatternKind<'tcx> + { + let prefix = self.lower_patterns(prefix); + let slice = self.lower_opt_pattern(slice); + let suffix = self.lower_patterns(suffix); + let (prefix, slice, suffix) = + self.flatten_nested_slice_patterns(prefix, slice, suffix); + + match ty.sty { + ty::TySlice(..) => { + // matching a slice or fixed-length array + PatternKind::Slice { prefix: prefix, slice: slice, suffix: suffix } + } + + ty::TyArray(_, len) => { + // fixed-length array + assert!(len >= prefix.len() + suffix.len()); + PatternKind::Array { prefix: prefix, slice: slice, suffix: suffix } + } + + _ => { + span_bug!(span, "bad slice pattern type {:?}", ty); + } + } + } + + fn lower_variant_or_leaf( + &mut self, + def: Def, + subpatterns: Vec>) + -> PatternKind<'tcx> + { + match def { + Def::Variant(variant_id) | Def::VariantCtor(variant_id, ..) => { + let enum_id = self.tcx.parent_def_id(variant_id).unwrap(); + let adt_def = self.tcx.lookup_adt_def(enum_id); + if adt_def.variants.len() > 1 { + PatternKind::Variant { + adt_def: adt_def, + variant_index: adt_def.variant_index_with_id(variant_id), + subpatterns: subpatterns, + } + } else { + PatternKind::Leaf { subpatterns: subpatterns } + } + } + + Def::Struct(..) | Def::StructCtor(..) | Def::Union(..) | + Def::TyAlias(..) | Def::AssociatedTy(..) | Def::SelfTy(..) => { + PatternKind::Leaf { subpatterns: subpatterns } + } + + _ => bug!() + } + } +} + +pub trait PatternFoldable<'tcx> : Sized { + fn fold_with>(&self, folder: &mut F) -> Self { + self.super_fold_with(folder) + } + + fn super_fold_with>(&self, folder: &mut F) -> Self; +} + +pub trait PatternFolder<'tcx> : Sized { + fn fold_pattern(&mut self, pattern: &Pattern<'tcx>) -> Pattern<'tcx> { + pattern.super_fold_with(self) + } + + fn fold_pattern_kind(&mut self, kind: &PatternKind<'tcx>) -> PatternKind<'tcx> { + kind.super_fold_with(self) + } +} + + +impl<'tcx, T: PatternFoldable<'tcx>> PatternFoldable<'tcx> for Box { + fn super_fold_with>(&self, folder: &mut F) -> Self { + let content: T = (**self).fold_with(folder); + box content + } +} + +impl<'tcx, T: PatternFoldable<'tcx>> PatternFoldable<'tcx> for Vec { + fn super_fold_with>(&self, folder: &mut F) -> Self { + self.iter().map(|t| t.fold_with(folder)).collect() + } +} + +impl<'tcx, T: PatternFoldable<'tcx>> PatternFoldable<'tcx> for Option { + fn super_fold_with>(&self, folder: &mut F) -> Self{ + self.as_ref().map(|t| t.fold_with(folder)) + } +} + +macro_rules! CloneImpls { + (<$lt_tcx:tt> $($ty:ty),+) => { + $( + impl<$lt_tcx> PatternFoldable<$lt_tcx> for $ty { + fn super_fold_with>(&self, _: &mut F) -> Self { + Clone::clone(self) + } + } + )+ + } +} + +CloneImpls!{ <'tcx> + Span, Field, Mutability, ast::Name, ast::NodeId, usize, ConstVal, + Ty<'tcx>, BindingMode<'tcx>, &'tcx AdtDef +} + +impl<'tcx> PatternFoldable<'tcx> for FieldPattern<'tcx> { + fn super_fold_with>(&self, folder: &mut F) -> Self { + FieldPattern { + field: self.field.fold_with(folder), + pattern: self.pattern.fold_with(folder) + } + } +} + +impl<'tcx> PatternFoldable<'tcx> for Pattern<'tcx> { + fn fold_with>(&self, folder: &mut F) -> Self { + folder.fold_pattern(self) + } + + fn super_fold_with>(&self, folder: &mut F) -> Self { + Pattern { + ty: self.ty.fold_with(folder), + span: self.span.fold_with(folder), + kind: self.kind.fold_with(folder) + } + } +} + +impl<'tcx> PatternFoldable<'tcx> for PatternKind<'tcx> { + fn fold_with>(&self, folder: &mut F) -> Self { + folder.fold_pattern_kind(self) + } + + fn super_fold_with>(&self, folder: &mut F) -> Self { + match *self { + PatternKind::Wild => PatternKind::Wild, + PatternKind::Binding { + mutability, + name, + mode, + var, + ty, + ref subpattern, + } => PatternKind::Binding { + mutability: mutability.fold_with(folder), + name: name.fold_with(folder), + mode: mode.fold_with(folder), + var: var.fold_with(folder), + ty: ty.fold_with(folder), + subpattern: subpattern.fold_with(folder), + }, + PatternKind::Variant { + adt_def, + variant_index, + ref subpatterns, + } => PatternKind::Variant { + adt_def: adt_def.fold_with(folder), + variant_index: variant_index.fold_with(folder), + subpatterns: subpatterns.fold_with(folder) + }, + PatternKind::Leaf { + ref subpatterns, + } => PatternKind::Leaf { + subpatterns: subpatterns.fold_with(folder), + }, + PatternKind::Deref { + ref subpattern, + } => PatternKind::Deref { + subpattern: subpattern.fold_with(folder), + }, + PatternKind::Constant { + ref value + } => PatternKind::Constant { + value: value.fold_with(folder) + }, + PatternKind::Range { + ref lo, + ref hi + } => PatternKind::Range { + lo: lo.fold_with(folder), + hi: hi.fold_with(folder) + }, + PatternKind::Slice { + ref prefix, + ref slice, + ref suffix, + } => PatternKind::Slice { + prefix: prefix.fold_with(folder), + slice: slice.fold_with(folder), + suffix: suffix.fold_with(folder) + }, + PatternKind::Array { + ref prefix, + ref slice, + ref suffix + } => PatternKind::Array { + prefix: prefix.fold_with(folder), + slice: slice.fold_with(folder), + suffix: suffix.fold_with(folder) + }, + } + } +} diff --git a/src/librustc_const_math/Cargo.toml b/src/librustc_const_math/Cargo.toml new file mode 100644 index 0000000000000..10aadabe22ed7 --- /dev/null +++ b/src/librustc_const_math/Cargo.toml @@ -0,0 +1,14 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_const_math" +version = "0.0.0" + +[lib] +name = "rustc_const_math" +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +log = { path = "../liblog" } +serialize = { path = "../libserialize" } +syntax = { path = "../libsyntax" } diff --git a/src/librustc_const_math/err.rs b/src/librustc_const_math/err.rs new file mode 100644 index 0000000000000..e2e30ef026c2f --- /dev/null +++ b/src/librustc_const_math/err.rs @@ -0,0 +1,85 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use syntax::ast; + +#[derive(Debug, PartialEq, Eq, Clone, RustcEncodable, RustcDecodable)] +pub enum ConstMathErr { + NotInRange, + CmpBetweenUnequalTypes, + UnequalTypes(Op), + Overflow(Op), + ShiftNegative, + DivisionByZero, + RemainderByZero, + UnsignedNegation, + ULitOutOfRange(ast::UintTy), + LitOutOfRange(ast::IntTy), +} +pub use self::ConstMathErr::*; + +#[derive(Debug, PartialEq, Eq, Clone, RustcEncodable, RustcDecodable)] +pub enum Op { + Add, + Sub, + Mul, + Div, + Rem, + Shr, + Shl, + Neg, + BitAnd, + BitOr, + BitXor, +} + +impl ConstMathErr { + pub fn description(&self) -> &'static str { + use self::Op::*; + match *self { + NotInRange => "inferred value out of range", + CmpBetweenUnequalTypes => "compared two values of different types", + UnequalTypes(Add) => "tried to add two values of different types", + UnequalTypes(Sub) => "tried to subtract two values of different types", + UnequalTypes(Mul) => "tried to multiply two values of different types", + UnequalTypes(Div) => "tried to divide two values of different types", + UnequalTypes(Rem) => { + "tried to calculate the remainder of two values of different types" + }, + UnequalTypes(BitAnd) => "tried to bitand two values of different types", + UnequalTypes(BitOr) => "tried to bitor two values of different types", + UnequalTypes(BitXor) => "tried to xor two values of different types", + UnequalTypes(_) => unreachable!(), + Overflow(Add) => "attempt to add with overflow", + Overflow(Sub) => "attempt to subtract with overflow", + Overflow(Mul) => "attempt to multiply with overflow", + Overflow(Div) => "attempt to divide with overflow", + Overflow(Rem) => "attempt to calculate the remainder with overflow", + Overflow(Neg) => "attempt to negate with overflow", + Overflow(Shr) => "attempt to shift right with overflow", + Overflow(Shl) => "attempt to shift left with overflow", + Overflow(_) => unreachable!(), + ShiftNegative => "attempt to shift by a negative amount", + DivisionByZero => "attempt to divide by zero", + RemainderByZero => "attempt to calculate the remainder with a divisor of zero", + UnsignedNegation => "unary negation of unsigned integer", + ULitOutOfRange(ast::UintTy::U8) => "literal out of range for u8", + ULitOutOfRange(ast::UintTy::U16) => "literal out of range for u16", + ULitOutOfRange(ast::UintTy::U32) => "literal out of range for u32", + ULitOutOfRange(ast::UintTy::U64) => "literal out of range for u64", + ULitOutOfRange(ast::UintTy::Us) => "literal out of range for usize", + LitOutOfRange(ast::IntTy::I8) => "literal out of range for i8", + LitOutOfRange(ast::IntTy::I16) => "literal out of range for i16", + LitOutOfRange(ast::IntTy::I32) => "literal out of range for i32", + LitOutOfRange(ast::IntTy::I64) => "literal out of range for i64", + LitOutOfRange(ast::IntTy::Is) => "literal out of range for isize", + } + } +} diff --git a/src/librustc_const_math/float.rs b/src/librustc_const_math/float.rs new file mode 100644 index 0000000000000..4610c183e1b1f --- /dev/null +++ b/src/librustc_const_math/float.rs @@ -0,0 +1,173 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::cmp::Ordering; +use std::hash; +use std::mem::transmute; + +use super::err::*; + +#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)] +pub enum ConstFloat { + F32(f32), + F64(f64), + + // When the type isn't known, we have to operate on both possibilities. + FInfer { + f32: f32, + f64: f64 + } +} +pub use self::ConstFloat::*; + +impl ConstFloat { + /// Description of the type, not the value + pub fn description(&self) -> &'static str { + match *self { + FInfer {..} => "float", + F32(_) => "f32", + F64(_) => "f64", + } + } + + pub fn is_nan(&self) -> bool { + match *self { + F32(f) => f.is_nan(), + F64(f) => f.is_nan(), + FInfer { f32, f64 } => f32.is_nan() || f64.is_nan() + } + } + + /// Compares the values if they are of the same type + pub fn try_cmp(self, rhs: Self) -> Result { + match (self, rhs) { + (F64(a), F64(b)) | + (F64(a), FInfer { f64: b, .. }) | + (FInfer { f64: a, .. }, F64(b)) | + (FInfer { f64: a, .. }, FInfer { f64: b, .. }) => { + // This is pretty bad but it is the existing behavior. + Ok(if a == b { + Ordering::Equal + } else if a < b { + Ordering::Less + } else { + Ordering::Greater + }) + } + + (F32(a), F32(b)) | + (F32(a), FInfer { f32: b, .. }) | + (FInfer { f32: a, .. }, F32(b)) => { + Ok(if a == b { + Ordering::Equal + } else if a < b { + Ordering::Less + } else { + Ordering::Greater + }) + } + + _ => Err(CmpBetweenUnequalTypes), + } + } +} + +/// Note that equality for `ConstFloat` means that the it is the same +/// constant, not that the rust values are equal. In particular, `NaN +/// == NaN` (at least if it's the same NaN; distinct encodings for NaN +/// are considering unequal). +impl PartialEq for ConstFloat { + fn eq(&self, other: &Self) -> bool { + match (*self, *other) { + (F64(a), F64(b)) | + (F64(a), FInfer { f64: b, .. }) | + (FInfer { f64: a, .. }, F64(b)) | + (FInfer { f64: a, .. }, FInfer { f64: b, .. }) => { + unsafe{transmute::<_,u64>(a) == transmute::<_,u64>(b)} + } + (F32(a), F32(b)) => { + unsafe{transmute::<_,u32>(a) == transmute::<_,u32>(b)} + } + _ => false + } + } +} + +impl Eq for ConstFloat {} + +impl hash::Hash for ConstFloat { + fn hash(&self, state: &mut H) { + match *self { + F64(a) | FInfer { f64: a, .. } => { + unsafe { transmute::<_,u64>(a) }.hash(state) + } + F32(a) => { + unsafe { transmute::<_,u32>(a) }.hash(state) + } + } + } +} + +impl ::std::fmt::Display for ConstFloat { + fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { + match *self { + FInfer { f64, .. } => write!(fmt, "{}", f64), + F32(f) => write!(fmt, "{}f32", f), + F64(f) => write!(fmt, "{}f64", f), + } + } +} + +macro_rules! derive_binop { + ($op:ident, $func:ident) => { + impl ::std::ops::$op for ConstFloat { + type Output = Result; + fn $func(self, rhs: Self) -> Result { + match (self, rhs) { + (F32(a), F32(b)) | + (F32(a), FInfer { f32: b, .. }) | + (FInfer { f32: a, .. }, F32(b)) => Ok(F32(a.$func(b))), + + (F64(a), F64(b)) | + (FInfer { f64: a, .. }, F64(b)) | + (F64(a), FInfer { f64: b, .. }) => Ok(F64(a.$func(b))), + + (FInfer { f32: a32, f64: a64 }, + FInfer { f32: b32, f64: b64 }) => Ok(FInfer { + f32: a32.$func(b32), + f64: a64.$func(b64) + }), + + _ => Err(UnequalTypes(Op::$op)), + } + } + } + } +} + +derive_binop!(Add, add); +derive_binop!(Sub, sub); +derive_binop!(Mul, mul); +derive_binop!(Div, div); +derive_binop!(Rem, rem); + +impl ::std::ops::Neg for ConstFloat { + type Output = Self; + fn neg(self) -> Self { + match self { + F32(f) => F32(-f), + F64(f) => F64(-f), + FInfer { f32, f64 } => FInfer { + f32: -f32, + f64: -f64 + } + } + } +} diff --git a/src/librustc_const_math/int.rs b/src/librustc_const_math/int.rs new file mode 100644 index 0000000000000..28a5887847252 --- /dev/null +++ b/src/librustc_const_math/int.rs @@ -0,0 +1,608 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::cmp::Ordering; +use syntax::attr::IntType; +use syntax::ast::{IntTy, UintTy}; + +use super::is::*; +use super::us::*; +use super::err::*; + +#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, Hash, Eq, PartialEq)] +pub enum ConstInt { + I8(i8), + I16(i16), + I32(i32), + I64(i64), + Isize(ConstIsize), + U8(u8), + U16(u16), + U32(u32), + U64(u64), + Usize(ConstUsize), + Infer(u64), + InferSigned(i64), +} +pub use self::ConstInt::*; + + +macro_rules! bounds { + ($($t:ident $min:ident $max:ident)*) => { + mod as_u64 { + $( + #[allow(dead_code)] + pub const $min: u64 = ::std::$t::MIN as u64; + #[allow(dead_code)] + pub const $max: u64 = ::std::$t::MAX as u64; + )* + } + mod as_i64 { + $( + #[allow(dead_code)] + pub const $min: i64 = ::std::$t::MIN as i64; + #[allow(dead_code)] + pub const $max: i64 = ::std::$t::MAX as i64; + )* + } + } +} + +bounds!{ + i8 I8MIN I8MAX i16 I16MIN I16MAX i32 I32MIN I32MAX i64 I64MIN I64MAX isize IMIN IMAX + u8 U8MIN U8MAX u16 U16MIN U16MAX u32 U32MIN U32MAX u64 U64MIN U64MAX usize UMIN UMAX +} + +impl ConstInt { + /// If either value is `Infer` or `InferSigned`, try to turn the value into the type of + /// the other value. If both values have no type, don't do anything + pub fn infer(self, other: Self) -> Result<(Self, Self), ConstMathErr> { + let inferred = match (self, other) { + (InferSigned(_), InferSigned(_)) + | (Infer(_), Infer(_)) => self, // no inference possible + // kindof wrong, you could have had values > I64MAX during computation of a + (Infer(a @ 0...as_u64::I64MAX), InferSigned(_)) => InferSigned(a as i64), + (Infer(_), InferSigned(_)) => return Err(ConstMathErr::NotInRange), + (_, InferSigned(_)) + | (_, Infer(_)) => return other.infer(self).map(|(b, a)| (a, b)), + + (Infer(a @ 0...as_u64::I8MAX), I8(_)) => I8(a as i64 as i8), + (Infer(a @ 0...as_u64::I16MAX), I16(_)) => I16(a as i64 as i16), + (Infer(a @ 0...as_u64::I32MAX), I32(_)) => I32(a as i64 as i32), + (Infer(a @ 0...as_u64::I64MAX), I64(_)) => I64(a as i64), + (Infer(a @ 0...as_u64::I16MAX), Isize(Is16(_))) => Isize(Is16(a as i64 as i16)), + (Infer(a @ 0...as_u64::I32MAX), Isize(Is32(_))) => Isize(Is32(a as i64 as i32)), + (Infer(a @ 0...as_u64::I64MAX), Isize(Is64(_))) => Isize(Is64(a as i64)), + (Infer(a @ 0...as_u64::U8MAX), U8(_)) => U8(a as u8), + (Infer(a @ 0...as_u64::U16MAX), U16(_)) => U16(a as u16), + (Infer(a @ 0...as_u64::U32MAX), U32(_)) => U32(a as u32), + (Infer(a), U64(_)) => U64(a), + (Infer(a @ 0...as_u64::U16MAX), Usize(Us16(_))) => Usize(Us16(a as u16)), + (Infer(a @ 0...as_u64::U32MAX), Usize(Us32(_))) => Usize(Us32(a as u32)), + (Infer(a), Usize(Us64(_))) => Usize(Us64(a)), + + (Infer(_), _) => return Err(ConstMathErr::NotInRange), + + (InferSigned(a @ as_i64::I8MIN...as_i64::I8MAX), I8(_)) => I8(a as i8), + (InferSigned(a @ as_i64::I16MIN...as_i64::I16MAX), I16(_)) => I16(a as i16), + (InferSigned(a @ as_i64::I32MIN...as_i64::I32MAX), I32(_)) => I32(a as i32), + (InferSigned(a), I64(_)) => I64(a), + (InferSigned(a @ as_i64::I16MIN...as_i64::I16MAX), Isize(Is16(_))) => { + Isize(Is16(a as i16)) + }, + (InferSigned(a @ as_i64::I32MIN...as_i64::I32MAX), Isize(Is32(_))) => { + Isize(Is32(a as i32)) + }, + (InferSigned(a), Isize(Is64(_))) => Isize(Is64(a)), + (InferSigned(a @ 0...as_i64::U8MAX), U8(_)) => U8(a as u8), + (InferSigned(a @ 0...as_i64::U16MAX), U16(_)) => U16(a as u16), + (InferSigned(a @ 0...as_i64::U32MAX), U32(_)) => U32(a as u32), + (InferSigned(a @ 0...as_i64::I64MAX), U64(_)) => U64(a as u64), + (InferSigned(a @ 0...as_i64::U16MAX), Usize(Us16(_))) => Usize(Us16(a as u16)), + (InferSigned(a @ 0...as_i64::U32MAX), Usize(Us32(_))) => Usize(Us32(a as u32)), + (InferSigned(a @ 0...as_i64::I64MAX), Usize(Us64(_))) => Usize(Us64(a as u64)), + (InferSigned(_), _) => return Err(ConstMathErr::NotInRange), + _ => self, // already known types + }; + Ok((inferred, other)) + } + + /// Turn this value into an `Infer` or an `InferSigned` + pub fn erase_type(self) -> Self { + match self { + Infer(i) => Infer(i), + InferSigned(i) if i < 0 => InferSigned(i), + I8(i) if i < 0 => InferSigned(i as i64), + I16(i) if i < 0 => InferSigned(i as i64), + I32(i) if i < 0 => InferSigned(i as i64), + I64(i) if i < 0 => InferSigned(i as i64), + Isize(Is16(i)) if i < 0 => InferSigned(i as i64), + Isize(Is32(i)) if i < 0 => InferSigned(i as i64), + Isize(Is64(i)) if i < 0 => InferSigned(i as i64), + InferSigned(i) => Infer(i as u64), + I8(i) => Infer(i as u64), + I16(i) => Infer(i as u64), + I32(i) => Infer(i as u64), + I64(i) => Infer(i as u64), + Isize(Is16(i)) => Infer(i as u64), + Isize(Is32(i)) => Infer(i as u64), + Isize(Is64(i)) => Infer(i as u64), + U8(i) => Infer(i as u64), + U16(i) => Infer(i as u64), + U32(i) => Infer(i as u64), + U64(i) => Infer(i as u64), + Usize(Us16(i)) => Infer(i as u64), + Usize(Us32(i)) => Infer(i as u64), + Usize(Us64(i)) => Infer(i), + } + } + + /// Description of the type, not the value + pub fn description(&self) -> &'static str { + match *self { + Infer(_) => "not yet inferred integral", + InferSigned(_) => "not yet inferred signed integral", + I8(_) => "i8", + I16(_) => "i16", + I32(_) => "i32", + I64(_) => "i64", + Isize(_) => "isize", + U8(_) => "u8", + U16(_) => "u16", + U32(_) => "u32", + U64(_) => "u64", + Usize(_) => "usize", + } + } + + /// Erases the type and returns a u64. + /// This is not the same as `-5i8 as u64` but as `-5i8 as i64 as u64` + pub fn to_u64_unchecked(self) -> u64 { + match self.erase_type() { + ConstInt::Infer(i) => i, + ConstInt::InferSigned(i) => i as u64, + _ => unreachable!(), + } + } + + /// Converts the value to a `u32` if it's in the range 0...std::u32::MAX + pub fn to_u32(&self) -> Option { + match *self { + I8(v) if v >= 0 => Some(v as u32), + I16(v) if v >= 0 => Some(v as u32), + I32(v) if v >= 0 => Some(v as u32), + InferSigned(v) + | Isize(Is64(v)) + | I64(v) if v >= 0 && v <= ::std::u32::MAX as i64 => Some(v as u32), + Isize(Is32(v)) if v >= 0 => Some(v as u32), + Isize(Is16(v)) if v >= 0 => Some(v as u32), + U8(v) => Some(v as u32), + U16(v) => Some(v as u32), + U32(v) => Some(v), + Infer(v) + | Usize(Us64(v)) + | U64(v) if v <= ::std::u32::MAX as u64 => Some(v as u32), + Usize(Us32(v)) => Some(v), + Usize(Us16(v)) => Some(v as u32), + _ => None, + } + } + + /// Converts the value to a `u64` if it's >= 0 + pub fn to_u64(&self) -> Option { + match *self { + Infer(v) => Some(v), + InferSigned(v) if v >= 0 => Some(v as u64), + I8(v) if v >= 0 => Some(v as u64), + I16(v) if v >= 0 => Some(v as u64), + I32(v) if v >= 0 => Some(v as u64), + I64(v) if v >= 0 => Some(v as u64), + Isize(Is16(v)) if v >= 0 => Some(v as u64), + Isize(Is32(v)) if v >= 0 => Some(v as u64), + Isize(Is64(v)) if v >= 0 => Some(v as u64), + U8(v) => Some(v as u64), + U16(v) => Some(v as u64), + U32(v) => Some(v as u64), + U64(v) => Some(v), + Usize(Us16(v)) => Some(v as u64), + Usize(Us32(v)) => Some(v as u64), + Usize(Us64(v)) => Some(v), + _ => None, + } + } + + pub fn is_negative(&self) -> bool { + match *self { + I8(v) => v < 0, + I16(v) => v < 0, + I32(v) => v < 0, + I64(v) => v < 0, + Isize(Is16(v)) => v < 0, + Isize(Is32(v)) => v < 0, + Isize(Is64(v)) => v < 0, + InferSigned(v) => v < 0, + _ => false, + } + } + + /// Compares the values if they are of the same type + pub fn try_cmp(self, rhs: Self) -> Result<::std::cmp::Ordering, ConstMathErr> { + match self.infer(rhs)? { + (I8(a), I8(b)) => Ok(a.cmp(&b)), + (I16(a), I16(b)) => Ok(a.cmp(&b)), + (I32(a), I32(b)) => Ok(a.cmp(&b)), + (I64(a), I64(b)) => Ok(a.cmp(&b)), + (Isize(Is16(a)), Isize(Is16(b))) => Ok(a.cmp(&b)), + (Isize(Is32(a)), Isize(Is32(b))) => Ok(a.cmp(&b)), + (Isize(Is64(a)), Isize(Is64(b))) => Ok(a.cmp(&b)), + (U8(a), U8(b)) => Ok(a.cmp(&b)), + (U16(a), U16(b)) => Ok(a.cmp(&b)), + (U32(a), U32(b)) => Ok(a.cmp(&b)), + (U64(a), U64(b)) => Ok(a.cmp(&b)), + (Usize(Us16(a)), Usize(Us16(b))) => Ok(a.cmp(&b)), + (Usize(Us32(a)), Usize(Us32(b))) => Ok(a.cmp(&b)), + (Usize(Us64(a)), Usize(Us64(b))) => Ok(a.cmp(&b)), + (Infer(a), Infer(b)) => Ok(a.cmp(&b)), + (InferSigned(a), InferSigned(b)) => Ok(a.cmp(&b)), + _ => Err(CmpBetweenUnequalTypes), + } + } + + /// Adds 1 to the value and wraps around if the maximum for the type is reached + pub fn wrap_incr(self) -> Self { + macro_rules! add1 { + ($e:expr) => { ($e).wrapping_add(1) } + } + match self { + ConstInt::I8(i) => ConstInt::I8(add1!(i)), + ConstInt::I16(i) => ConstInt::I16(add1!(i)), + ConstInt::I32(i) => ConstInt::I32(add1!(i)), + ConstInt::I64(i) => ConstInt::I64(add1!(i)), + ConstInt::Isize(ConstIsize::Is16(i)) => ConstInt::Isize(ConstIsize::Is16(add1!(i))), + ConstInt::Isize(ConstIsize::Is32(i)) => ConstInt::Isize(ConstIsize::Is32(add1!(i))), + ConstInt::Isize(ConstIsize::Is64(i)) => ConstInt::Isize(ConstIsize::Is64(add1!(i))), + ConstInt::U8(i) => ConstInt::U8(add1!(i)), + ConstInt::U16(i) => ConstInt::U16(add1!(i)), + ConstInt::U32(i) => ConstInt::U32(add1!(i)), + ConstInt::U64(i) => ConstInt::U64(add1!(i)), + ConstInt::Usize(ConstUsize::Us16(i)) => ConstInt::Usize(ConstUsize::Us16(add1!(i))), + ConstInt::Usize(ConstUsize::Us32(i)) => ConstInt::Usize(ConstUsize::Us32(add1!(i))), + ConstInt::Usize(ConstUsize::Us64(i)) => ConstInt::Usize(ConstUsize::Us64(add1!(i))), + ConstInt::Infer(_) | ConstInt::InferSigned(_) => panic!("no type info for const int"), + } + } + + pub fn int_type(self) -> Option { + match self { + ConstInt::I8(_) => Some(IntType::SignedInt(IntTy::I8)), + ConstInt::I16(_) => Some(IntType::SignedInt(IntTy::I16)), + ConstInt::I32(_) => Some(IntType::SignedInt(IntTy::I32)), + ConstInt::I64(_) => Some(IntType::SignedInt(IntTy::I64)), + ConstInt::Isize(_) => Some(IntType::SignedInt(IntTy::Is)), + ConstInt::U8(_) => Some(IntType::UnsignedInt(UintTy::U8)), + ConstInt::U16(_) => Some(IntType::UnsignedInt(UintTy::U16)), + ConstInt::U32(_) => Some(IntType::UnsignedInt(UintTy::U32)), + ConstInt::U64(_) => Some(IntType::UnsignedInt(UintTy::U64)), + ConstInt::Usize(_) => Some(IntType::UnsignedInt(UintTy::Us)), + _ => None, + } + } +} + +impl ::std::cmp::PartialOrd for ConstInt { + fn partial_cmp(&self, other: &Self) -> Option { + self.try_cmp(*other).ok() + } +} + +impl ::std::cmp::Ord for ConstInt { + fn cmp(&self, other: &Self) -> Ordering { + self.try_cmp(*other).unwrap() + } +} + +impl ::std::fmt::Display for ConstInt { + fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { + match *self { + Infer(i) => write!(fmt, "{}", i), + InferSigned(i) => write!(fmt, "{}", i), + I8(i) => write!(fmt, "{}i8", i), + I16(i) => write!(fmt, "{}i16", i), + I32(i) => write!(fmt, "{}i32", i), + I64(i) => write!(fmt, "{}i64", i), + Isize(ConstIsize::Is64(i)) => write!(fmt, "{}isize", i), + Isize(ConstIsize::Is32(i)) => write!(fmt, "{}isize", i), + Isize(ConstIsize::Is16(i)) => write!(fmt, "{}isize", i), + U8(i) => write!(fmt, "{}u8", i), + U16(i) => write!(fmt, "{}u16", i), + U32(i) => write!(fmt, "{}u32", i), + U64(i) => write!(fmt, "{}u64", i), + Usize(ConstUsize::Us64(i)) => write!(fmt, "{}usize", i), + Usize(ConstUsize::Us32(i)) => write!(fmt, "{}usize", i), + Usize(ConstUsize::Us16(i)) => write!(fmt, "{}usize", i), + } + } +} + +macro_rules! overflowing { + ($e:expr, $err:expr) => {{ + if $e.1 { + return Err(Overflow($err)); + } else { + $e.0 + } + }} +} + +macro_rules! impl_binop { + ($op:ident, $func:ident, $checked_func:ident) => { + impl ::std::ops::$op for ConstInt { + type Output = Result; + fn $func(self, rhs: Self) -> Result { + match self.infer(rhs)? { + (I8(a), I8(b)) => a.$checked_func(b).map(I8), + (I16(a), I16(b)) => a.$checked_func(b).map(I16), + (I32(a), I32(b)) => a.$checked_func(b).map(I32), + (I64(a), I64(b)) => a.$checked_func(b).map(I64), + (Isize(Is16(a)), Isize(Is16(b))) => a.$checked_func(b).map(Is16).map(Isize), + (Isize(Is32(a)), Isize(Is32(b))) => a.$checked_func(b).map(Is32).map(Isize), + (Isize(Is64(a)), Isize(Is64(b))) => a.$checked_func(b).map(Is64).map(Isize), + (U8(a), U8(b)) => a.$checked_func(b).map(U8), + (U16(a), U16(b)) => a.$checked_func(b).map(U16), + (U32(a), U32(b)) => a.$checked_func(b).map(U32), + (U64(a), U64(b)) => a.$checked_func(b).map(U64), + (Usize(Us16(a)), Usize(Us16(b))) => a.$checked_func(b).map(Us16).map(Usize), + (Usize(Us32(a)), Usize(Us32(b))) => a.$checked_func(b).map(Us32).map(Usize), + (Usize(Us64(a)), Usize(Us64(b))) => a.$checked_func(b).map(Us64).map(Usize), + (Infer(a), Infer(b)) => a.$checked_func(b).map(Infer), + (InferSigned(a), InferSigned(b)) => a.$checked_func(b).map(InferSigned), + _ => return Err(UnequalTypes(Op::$op)), + }.ok_or(Overflow(Op::$op)) + } + } + } +} + +macro_rules! derive_binop { + ($op:ident, $func:ident) => { + impl ::std::ops::$op for ConstInt { + type Output = Result; + fn $func(self, rhs: Self) -> Result { + match self.infer(rhs)? { + (I8(a), I8(b)) => Ok(I8(a.$func(b))), + (I16(a), I16(b)) => Ok(I16(a.$func(b))), + (I32(a), I32(b)) => Ok(I32(a.$func(b))), + (I64(a), I64(b)) => Ok(I64(a.$func(b))), + (Isize(Is16(a)), Isize(Is16(b))) => Ok(Isize(Is16(a.$func(b)))), + (Isize(Is32(a)), Isize(Is32(b))) => Ok(Isize(Is32(a.$func(b)))), + (Isize(Is64(a)), Isize(Is64(b))) => Ok(Isize(Is64(a.$func(b)))), + (U8(a), U8(b)) => Ok(U8(a.$func(b))), + (U16(a), U16(b)) => Ok(U16(a.$func(b))), + (U32(a), U32(b)) => Ok(U32(a.$func(b))), + (U64(a), U64(b)) => Ok(U64(a.$func(b))), + (Usize(Us16(a)), Usize(Us16(b))) => Ok(Usize(Us16(a.$func(b)))), + (Usize(Us32(a)), Usize(Us32(b))) => Ok(Usize(Us32(a.$func(b)))), + (Usize(Us64(a)), Usize(Us64(b))) => Ok(Usize(Us64(a.$func(b)))), + (Infer(a), Infer(b)) => Ok(Infer(a.$func(b))), + (InferSigned(a), InferSigned(b)) => Ok(InferSigned(a.$func(b))), + _ => Err(UnequalTypes(Op::$op)), + } + } + } + } +} + +impl_binop!(Add, add, checked_add); +impl_binop!(Sub, sub, checked_sub); +impl_binop!(Mul, mul, checked_mul); +derive_binop!(BitAnd, bitand); +derive_binop!(BitOr, bitor); +derive_binop!(BitXor, bitxor); + +fn check_division( + lhs: ConstInt, + rhs: ConstInt, + op: Op, + zerr: ConstMathErr, +) -> Result<(), ConstMathErr> { + match (lhs, rhs) { + (I8(_), I8(0)) => Err(zerr), + (I16(_), I16(0)) => Err(zerr), + (I32(_), I32(0)) => Err(zerr), + (I64(_), I64(0)) => Err(zerr), + (Isize(_), Isize(Is16(0))) => Err(zerr), + (Isize(_), Isize(Is32(0))) => Err(zerr), + (Isize(_), Isize(Is64(0))) => Err(zerr), + (InferSigned(_), InferSigned(0)) => Err(zerr), + + (U8(_), U8(0)) => Err(zerr), + (U16(_), U16(0)) => Err(zerr), + (U32(_), U32(0)) => Err(zerr), + (U64(_), U64(0)) => Err(zerr), + (Usize(_), Usize(Us16(0))) => Err(zerr), + (Usize(_), Usize(Us32(0))) => Err(zerr), + (Usize(_), Usize(Us64(0))) => Err(zerr), + (Infer(_), Infer(0)) => Err(zerr), + + (I8(::std::i8::MIN), I8(-1)) => Err(Overflow(op)), + (I16(::std::i16::MIN), I16(-1)) => Err(Overflow(op)), + (I32(::std::i32::MIN), I32(-1)) => Err(Overflow(op)), + (I64(::std::i64::MIN), I64(-1)) => Err(Overflow(op)), + (Isize(Is16(::std::i16::MIN)), Isize(Is16(-1))) => Err(Overflow(op)), + (Isize(Is32(::std::i32::MIN)), Isize(Is32(-1))) => Err(Overflow(op)), + (Isize(Is64(::std::i64::MIN)), Isize(Is64(-1))) => Err(Overflow(op)), + (InferSigned(::std::i64::MIN), InferSigned(-1)) => Err(Overflow(op)), + + _ => Ok(()), + } +} + +impl ::std::ops::Div for ConstInt { + type Output = Result; + fn div(self, rhs: Self) -> Result { + let (lhs, rhs) = self.infer(rhs)?; + check_division(lhs, rhs, Op::Div, DivisionByZero)?; + match (lhs, rhs) { + (I8(a), I8(b)) => Ok(I8(a/b)), + (I16(a), I16(b)) => Ok(I16(a/b)), + (I32(a), I32(b)) => Ok(I32(a/b)), + (I64(a), I64(b)) => Ok(I64(a/b)), + (Isize(Is16(a)), Isize(Is16(b))) => Ok(Isize(Is16(a/b))), + (Isize(Is32(a)), Isize(Is32(b))) => Ok(Isize(Is32(a/b))), + (Isize(Is64(a)), Isize(Is64(b))) => Ok(Isize(Is64(a/b))), + (InferSigned(a), InferSigned(b)) => Ok(InferSigned(a/b)), + + (U8(a), U8(b)) => Ok(U8(a/b)), + (U16(a), U16(b)) => Ok(U16(a/b)), + (U32(a), U32(b)) => Ok(U32(a/b)), + (U64(a), U64(b)) => Ok(U64(a/b)), + (Usize(Us16(a)), Usize(Us16(b))) => Ok(Usize(Us16(a/b))), + (Usize(Us32(a)), Usize(Us32(b))) => Ok(Usize(Us32(a/b))), + (Usize(Us64(a)), Usize(Us64(b))) => Ok(Usize(Us64(a/b))), + (Infer(a), Infer(b)) => Ok(Infer(a/b)), + + _ => Err(UnequalTypes(Op::Div)), + } + } +} + +impl ::std::ops::Rem for ConstInt { + type Output = Result; + fn rem(self, rhs: Self) -> Result { + let (lhs, rhs) = self.infer(rhs)?; + // should INT_MIN%-1 be zero or an error? + check_division(lhs, rhs, Op::Rem, RemainderByZero)?; + match (lhs, rhs) { + (I8(a), I8(b)) => Ok(I8(a%b)), + (I16(a), I16(b)) => Ok(I16(a%b)), + (I32(a), I32(b)) => Ok(I32(a%b)), + (I64(a), I64(b)) => Ok(I64(a%b)), + (Isize(Is16(a)), Isize(Is16(b))) => Ok(Isize(Is16(a%b))), + (Isize(Is32(a)), Isize(Is32(b))) => Ok(Isize(Is32(a%b))), + (Isize(Is64(a)), Isize(Is64(b))) => Ok(Isize(Is64(a%b))), + (InferSigned(a), InferSigned(b)) => Ok(InferSigned(a%b)), + + (U8(a), U8(b)) => Ok(U8(a%b)), + (U16(a), U16(b)) => Ok(U16(a%b)), + (U32(a), U32(b)) => Ok(U32(a%b)), + (U64(a), U64(b)) => Ok(U64(a%b)), + (Usize(Us16(a)), Usize(Us16(b))) => Ok(Usize(Us16(a%b))), + (Usize(Us32(a)), Usize(Us32(b))) => Ok(Usize(Us32(a%b))), + (Usize(Us64(a)), Usize(Us64(b))) => Ok(Usize(Us64(a%b))), + (Infer(a), Infer(b)) => Ok(Infer(a%b)), + + _ => Err(UnequalTypes(Op::Rem)), + } + } +} + +impl ::std::ops::Shl for ConstInt { + type Output = Result; + fn shl(self, rhs: Self) -> Result { + let b = rhs.to_u32().ok_or(ShiftNegative)?; + match self { + I8(a) => Ok(I8(overflowing!(a.overflowing_shl(b), Op::Shl))), + I16(a) => Ok(I16(overflowing!(a.overflowing_shl(b), Op::Shl))), + I32(a) => Ok(I32(overflowing!(a.overflowing_shl(b), Op::Shl))), + I64(a) => Ok(I64(overflowing!(a.overflowing_shl(b), Op::Shl))), + Isize(Is16(a)) => Ok(Isize(Is16(overflowing!(a.overflowing_shl(b), Op::Shl)))), + Isize(Is32(a)) => Ok(Isize(Is32(overflowing!(a.overflowing_shl(b), Op::Shl)))), + Isize(Is64(a)) => Ok(Isize(Is64(overflowing!(a.overflowing_shl(b), Op::Shl)))), + U8(a) => Ok(U8(overflowing!(a.overflowing_shl(b), Op::Shl))), + U16(a) => Ok(U16(overflowing!(a.overflowing_shl(b), Op::Shl))), + U32(a) => Ok(U32(overflowing!(a.overflowing_shl(b), Op::Shl))), + U64(a) => Ok(U64(overflowing!(a.overflowing_shl(b), Op::Shl))), + Usize(Us16(a)) => Ok(Usize(Us16(overflowing!(a.overflowing_shl(b), Op::Shl)))), + Usize(Us32(a)) => Ok(Usize(Us32(overflowing!(a.overflowing_shl(b), Op::Shl)))), + Usize(Us64(a)) => Ok(Usize(Us64(overflowing!(a.overflowing_shl(b), Op::Shl)))), + Infer(a) => Ok(Infer(overflowing!(a.overflowing_shl(b), Op::Shl))), + InferSigned(a) => Ok(InferSigned(overflowing!(a.overflowing_shl(b), Op::Shl))), + } + } +} + +impl ::std::ops::Shr for ConstInt { + type Output = Result; + fn shr(self, rhs: Self) -> Result { + let b = rhs.to_u32().ok_or(ShiftNegative)?; + match self { + I8(a) => Ok(I8(overflowing!(a.overflowing_shr(b), Op::Shr))), + I16(a) => Ok(I16(overflowing!(a.overflowing_shr(b), Op::Shr))), + I32(a) => Ok(I32(overflowing!(a.overflowing_shr(b), Op::Shr))), + I64(a) => Ok(I64(overflowing!(a.overflowing_shr(b), Op::Shr))), + Isize(Is16(a)) => Ok(Isize(Is16(overflowing!(a.overflowing_shr(b), Op::Shr)))), + Isize(Is32(a)) => Ok(Isize(Is32(overflowing!(a.overflowing_shr(b), Op::Shr)))), + Isize(Is64(a)) => Ok(Isize(Is64(overflowing!(a.overflowing_shr(b), Op::Shr)))), + U8(a) => Ok(U8(overflowing!(a.overflowing_shr(b), Op::Shr))), + U16(a) => Ok(U16(overflowing!(a.overflowing_shr(b), Op::Shr))), + U32(a) => Ok(U32(overflowing!(a.overflowing_shr(b), Op::Shr))), + U64(a) => Ok(U64(overflowing!(a.overflowing_shr(b), Op::Shr))), + Usize(Us16(a)) => Ok(Usize(Us16(overflowing!(a.overflowing_shr(b), Op::Shr)))), + Usize(Us32(a)) => Ok(Usize(Us32(overflowing!(a.overflowing_shr(b), Op::Shr)))), + Usize(Us64(a)) => Ok(Usize(Us64(overflowing!(a.overflowing_shr(b), Op::Shr)))), + Infer(a) => Ok(Infer(overflowing!(a.overflowing_shr(b), Op::Shr))), + InferSigned(a) => Ok(InferSigned(overflowing!(a.overflowing_shr(b), Op::Shr))), + } + } +} + +impl ::std::ops::Neg for ConstInt { + type Output = Result; + fn neg(self) -> Result { + match self { + I8(a) => Ok(I8(overflowing!(a.overflowing_neg(), Op::Neg))), + I16(a) => Ok(I16(overflowing!(a.overflowing_neg(), Op::Neg))), + I32(a) => Ok(I32(overflowing!(a.overflowing_neg(), Op::Neg))), + I64(a) => Ok(I64(overflowing!(a.overflowing_neg(), Op::Neg))), + Isize(Is16(a)) => Ok(Isize(Is16(overflowing!(a.overflowing_neg(), Op::Neg)))), + Isize(Is32(a)) => Ok(Isize(Is32(overflowing!(a.overflowing_neg(), Op::Neg)))), + Isize(Is64(a)) => Ok(Isize(Is64(overflowing!(a.overflowing_neg(), Op::Neg)))), + U8(0) => Ok(U8(0)), + U16(0) => Ok(U16(0)), + U32(0) => Ok(U32(0)), + U64(0) => Ok(U64(0)), + Usize(Us16(0)) => Ok(Usize(Us16(0))), + Usize(Us32(0)) => Ok(Usize(Us32(0))), + Usize(Us64(0)) => Ok(Usize(Us64(0))), + U8(_) => Err(UnsignedNegation), + U16(_) => Err(UnsignedNegation), + U32(_) => Err(UnsignedNegation), + U64(_) => Err(UnsignedNegation), + Usize(_) => Err(UnsignedNegation), + Infer(a @ 0...as_u64::I64MAX) => Ok(InferSigned(-(a as i64))), + Infer(_) => Err(Overflow(Op::Neg)), + InferSigned(a) => Ok(InferSigned(overflowing!(a.overflowing_neg(), Op::Neg))), + } + } +} + +impl ::std::ops::Not for ConstInt { + type Output = Result; + fn not(self) -> Result { + match self { + I8(a) => Ok(I8(!a)), + I16(a) => Ok(I16(!a)), + I32(a) => Ok(I32(!a)), + I64(a) => Ok(I64(!a)), + Isize(Is16(a)) => Ok(Isize(Is16(!a))), + Isize(Is32(a)) => Ok(Isize(Is32(!a))), + Isize(Is64(a)) => Ok(Isize(Is64(!a))), + U8(a) => Ok(U8(!a)), + U16(a) => Ok(U16(!a)), + U32(a) => Ok(U32(!a)), + U64(a) => Ok(U64(!a)), + Usize(Us16(a)) => Ok(Usize(Us16(!a))), + Usize(Us32(a)) => Ok(Usize(Us32(!a))), + Usize(Us64(a)) => Ok(Usize(Us64(!a))), + Infer(a) => Ok(Infer(!a)), + InferSigned(a) => Ok(InferSigned(!a)), + } + } +} diff --git a/src/librustc_const_math/is.rs b/src/librustc_const_math/is.rs new file mode 100644 index 0000000000000..ef92b628523e7 --- /dev/null +++ b/src/librustc_const_math/is.rs @@ -0,0 +1,52 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use syntax::ast; +use super::err::*; + +/// Depending on the target only one variant is ever used in a compilation. +/// Anything else is an error. This invariant is checked at several locations +#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, Hash, Eq, PartialEq)] +pub enum ConstIsize { + Is16(i16), + Is32(i32), + Is64(i64), +} +pub use self::ConstIsize::*; + +impl ConstIsize { + pub fn as_i64(self, target_int_ty: ast::IntTy) -> i64 { + match (self, target_int_ty) { + (Is16(i), ast::IntTy::I16) => i as i64, + (Is32(i), ast::IntTy::I32) => i as i64, + (Is64(i), ast::IntTy::I64) => i, + _ => panic!("unable to convert self ({:?}) to target isize ({:?})", + self, target_int_ty), + } + } + pub fn new(i: i64, target_int_ty: ast::IntTy) -> Result { + match target_int_ty { + ast::IntTy::I16 if i as i16 as i64 == i => Ok(Is16(i as i16)), + ast::IntTy::I16 => Err(LitOutOfRange(ast::IntTy::Is)), + ast::IntTy::I32 if i as i32 as i64 == i => Ok(Is32(i as i32)), + ast::IntTy::I32 => Err(LitOutOfRange(ast::IntTy::Is)), + ast::IntTy::I64 => Ok(Is64(i)), + _ => unreachable!(), + } + } + pub fn new_truncating(i: i64, target_int_ty: ast::IntTy) -> Self { + match target_int_ty { + ast::IntTy::I16 => Is16(i as i16), + ast::IntTy::I32 => Is32(i as i32), + ast::IntTy::I64 => Is64(i), + _ => unreachable!(), + } + } +} diff --git a/src/librustc_const_math/lib.rs b/src/librustc_const_math/lib.rs new file mode 100644 index 0000000000000..f667ff23b27a6 --- /dev/null +++ b/src/librustc_const_math/lib.rs @@ -0,0 +1,44 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Rusty Mathematics +//! +//! # Note +//! +//! This API is completely unstable and subject to change. + +#![crate_name = "rustc_const_math"] +#![unstable(feature = "rustc_private", issue = "27812")] +#![crate_type = "dylib"] +#![crate_type = "rlib"] +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] + + +#![feature(rustc_private)] +#![feature(staged_api)] + +#[macro_use] extern crate log; +#[macro_use] extern crate syntax; + +extern crate serialize as rustc_serialize; // used by deriving + +mod float; +mod int; +mod us; +mod is; +mod err; + +pub use float::*; +pub use int::*; +pub use us::*; +pub use is::*; +pub use err::{ConstMathErr, Op}; diff --git a/src/librustc_const_math/us.rs b/src/librustc_const_math/us.rs new file mode 100644 index 0000000000000..bf73ff03c9895 --- /dev/null +++ b/src/librustc_const_math/us.rs @@ -0,0 +1,52 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use syntax::ast; +use super::err::*; + +/// Depending on the target only one variant is ever used in a compilation. +/// Anything else is an error. This invariant is checked at several locations +#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, Hash, Eq, PartialEq)] +pub enum ConstUsize { + Us16(u16), + Us32(u32), + Us64(u64), +} +pub use self::ConstUsize::*; + +impl ConstUsize { + pub fn as_u64(self, target_uint_ty: ast::UintTy) -> u64 { + match (self, target_uint_ty) { + (Us16(i), ast::UintTy::U16) => i as u64, + (Us32(i), ast::UintTy::U32) => i as u64, + (Us64(i), ast::UintTy::U64) => i, + _ => panic!("unable to convert self ({:?}) to target usize ({:?})", + self, target_uint_ty), + } + } + pub fn new(i: u64, target_uint_ty: ast::UintTy) -> Result { + match target_uint_ty { + ast::UintTy::U16 if i as u16 as u64 == i => Ok(Us16(i as u16)), + ast::UintTy::U16 => Err(ULitOutOfRange(ast::UintTy::Us)), + ast::UintTy::U32 if i as u32 as u64 == i => Ok(Us32(i as u32)), + ast::UintTy::U32 => Err(ULitOutOfRange(ast::UintTy::Us)), + ast::UintTy::U64 => Ok(Us64(i)), + _ => unreachable!(), + } + } + pub fn new_truncating(i: u64, target_uint_ty: ast::UintTy) -> Self { + match target_uint_ty { + ast::UintTy::U16 => Us16(i as u16), + ast::UintTy::U32 => Us32(i as u32), + ast::UintTy::U64 => Us64(i), + _ => unreachable!(), + } + } +} diff --git a/src/librustc_data_structures/Cargo.toml b/src/librustc_data_structures/Cargo.toml new file mode 100644 index 0000000000000..e2e16059d9871 --- /dev/null +++ b/src/librustc_data_structures/Cargo.toml @@ -0,0 +1,13 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_data_structures" +version = "0.0.0" + +[lib] +name = "rustc_data_structures" +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +log = { path = "../liblog" } +serialize = { path = "../libserialize" } diff --git a/src/librustc_data_structures/accumulate_vec.rs b/src/librustc_data_structures/accumulate_vec.rs new file mode 100644 index 0000000000000..937cb3f600746 --- /dev/null +++ b/src/librustc_data_structures/accumulate_vec.rs @@ -0,0 +1,198 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A vector type intended to be used for collecting from iterators onto the stack. +//! +//! Space for up to N elements is provided on the stack. If more elements are collected, Vec is +//! used to store the values on the heap. +//! +//! The N above is determined by Array's implementor, by way of an associatated constant. + +use std::ops::{Deref, DerefMut}; +use std::iter::{self, IntoIterator, FromIterator}; +use std::slice; +use std::vec; + +use rustc_serialize::{Encodable, Encoder, Decodable, Decoder}; + +use array_vec::{self, Array, ArrayVec}; + +#[derive(PartialEq, Eq, Hash, Debug)] +pub enum AccumulateVec { + Array(ArrayVec), + Heap(Vec) +} + +impl Clone for AccumulateVec + where A: Array, + A::Element: Clone { + fn clone(&self) -> Self { + match *self { + AccumulateVec::Array(ref arr) => AccumulateVec::Array(arr.clone()), + AccumulateVec::Heap(ref vec) => AccumulateVec::Heap(vec.clone()), + } + } +} + +impl AccumulateVec { + pub fn new() -> AccumulateVec { + AccumulateVec::Array(ArrayVec::new()) + } + + pub fn one(el: A::Element) -> Self { + iter::once(el).collect() + } + + pub fn many>(iter: I) -> Self { + iter.into_iter().collect() + } + + pub fn len(&self) -> usize { + match *self { + AccumulateVec::Array(ref arr) => arr.len(), + AccumulateVec::Heap(ref vec) => vec.len(), + } + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + pub fn pop(&mut self) -> Option { + match *self { + AccumulateVec::Array(ref mut arr) => arr.pop(), + AccumulateVec::Heap(ref mut vec) => vec.pop(), + } + } +} + +impl Deref for AccumulateVec { + type Target = [A::Element]; + fn deref(&self) -> &Self::Target { + match *self { + AccumulateVec::Array(ref v) => &v[..], + AccumulateVec::Heap(ref v) => &v[..], + } + } +} + +impl DerefMut for AccumulateVec { + fn deref_mut(&mut self) -> &mut [A::Element] { + match *self { + AccumulateVec::Array(ref mut v) => &mut v[..], + AccumulateVec::Heap(ref mut v) => &mut v[..], + } + } +} + +impl FromIterator for AccumulateVec { + fn from_iter(iter: I) -> AccumulateVec where I: IntoIterator { + let iter = iter.into_iter(); + if iter.size_hint().1.map_or(false, |n| n <= A::LEN) { + let mut v = ArrayVec::new(); + v.extend(iter); + AccumulateVec::Array(v) + } else { + AccumulateVec::Heap(iter.collect()) + } + } +} + +pub struct IntoIter { + repr: IntoIterRepr, +} + +enum IntoIterRepr { + Array(array_vec::Iter), + Heap(vec::IntoIter), +} + +impl Iterator for IntoIter { + type Item = A::Element; + + fn next(&mut self) -> Option { + match self.repr { + IntoIterRepr::Array(ref mut arr) => arr.next(), + IntoIterRepr::Heap(ref mut iter) => iter.next(), + } + } + + fn size_hint(&self) -> (usize, Option) { + match self.repr { + IntoIterRepr::Array(ref iter) => iter.size_hint(), + IntoIterRepr::Heap(ref iter) => iter.size_hint(), + } + } +} + +impl IntoIterator for AccumulateVec { + type Item = A::Element; + type IntoIter = IntoIter; + fn into_iter(self) -> Self::IntoIter { + IntoIter { + repr: match self { + AccumulateVec::Array(arr) => IntoIterRepr::Array(arr.into_iter()), + AccumulateVec::Heap(vec) => IntoIterRepr::Heap(vec.into_iter()), + } + } + } +} + +impl<'a, A: Array> IntoIterator for &'a AccumulateVec { + type Item = &'a A::Element; + type IntoIter = slice::Iter<'a, A::Element>; + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a, A: Array> IntoIterator for &'a mut AccumulateVec { + type Item = &'a mut A::Element; + type IntoIter = slice::IterMut<'a, A::Element>; + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +impl From> for AccumulateVec { + fn from(v: Vec) -> AccumulateVec { + AccumulateVec::many(v) + } +} + +impl Default for AccumulateVec { + fn default() -> AccumulateVec { + AccumulateVec::new() + } +} + +impl Encodable for AccumulateVec + where A: Array, + A::Element: Encodable { + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + s.emit_seq(self.len(), |s| { + for (i, e) in self.iter().enumerate() { + try!(s.emit_seq_elt(i, |s| e.encode(s))); + } + Ok(()) + }) + } +} + +impl Decodable for AccumulateVec + where A: Array, + A::Element: Decodable { + fn decode(d: &mut D) -> Result, D::Error> { + d.read_seq(|d, len| { + Ok(try!((0..len).map(|i| d.read_seq_elt(i, |d| Decodable::decode(d))).collect())) + }) + } +} + diff --git a/src/librustc_data_structures/array_vec.rs b/src/librustc_data_structures/array_vec.rs new file mode 100644 index 0000000000000..631cf2cfcf6db --- /dev/null +++ b/src/librustc_data_structures/array_vec.rs @@ -0,0 +1,235 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A stack-allocated vector, allowing storage of N elements on the stack. + +use std::marker::Unsize; +use std::iter::Extend; +use std::ptr::{self, drop_in_place}; +use std::ops::{Deref, DerefMut, Range}; +use std::hash::{Hash, Hasher}; +use std::slice; +use std::fmt; +use std::mem; + +pub unsafe trait Array { + type Element; + type PartialStorage: Default + Unsize<[ManuallyDrop]>; + const LEN: usize; +} + +unsafe impl Array for [T; 1] { + type Element = T; + type PartialStorage = [ManuallyDrop; 1]; + const LEN: usize = 1; +} + +unsafe impl Array for [T; 8] { + type Element = T; + type PartialStorage = [ManuallyDrop; 8]; + const LEN: usize = 8; +} + +pub struct ArrayVec { + count: usize, + values: A::PartialStorage +} + +impl Hash for ArrayVec + where A: Array, + A::Element: Hash { + fn hash(&self, state: &mut H) where H: Hasher { + (&self[..]).hash(state); + } +} + +impl PartialEq for ArrayVec { + fn eq(&self, other: &Self) -> bool { + self == other + } +} + +impl Eq for ArrayVec {} + +impl Clone for ArrayVec + where A: Array, + A::Element: Clone { + fn clone(&self) -> Self { + let mut v = ArrayVec::new(); + v.extend(self.iter().cloned()); + v + } +} + +impl ArrayVec { + pub fn new() -> Self { + ArrayVec { + count: 0, + values: Default::default(), + } + } + + pub fn len(&self) -> usize { + self.count + } + + pub unsafe fn set_len(&mut self, len: usize) { + self.count = len; + } + + /// Panics when the stack vector is full. + pub fn push(&mut self, el: A::Element) { + let arr = &mut self.values as &mut [ManuallyDrop<_>]; + arr[self.count] = ManuallyDrop { value: el }; + self.count += 1; + } + + pub fn pop(&mut self) -> Option { + if self.count > 0 { + let arr = &mut self.values as &mut [ManuallyDrop<_>]; + self.count -= 1; + unsafe { + let value = ptr::read(&arr[self.count]); + Some(value.value) + } + } else { + None + } + } +} + +impl Default for ArrayVec + where A: Array { + fn default() -> Self { + ArrayVec::new() + } +} + +impl fmt::Debug for ArrayVec + where A: Array, + A::Element: fmt::Debug { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self[..].fmt(f) + } +} + +impl Deref for ArrayVec { + type Target = [A::Element]; + fn deref(&self) -> &Self::Target { + unsafe { + slice::from_raw_parts(&self.values as *const _ as *const A::Element, self.count) + } + } +} + +impl DerefMut for ArrayVec { + fn deref_mut(&mut self) -> &mut [A::Element] { + unsafe { + slice::from_raw_parts_mut(&mut self.values as *mut _ as *mut A::Element, self.count) + } + } +} + +impl Drop for ArrayVec { + fn drop(&mut self) { + unsafe { + drop_in_place(&mut self[..]) + } + } +} + +impl Extend for ArrayVec { + fn extend(&mut self, iter: I) where I: IntoIterator { + for el in iter { + self.push(el); + } + } +} + +pub struct Iter { + indices: Range, + store: A::PartialStorage, +} + +impl Drop for Iter { + fn drop(&mut self) { + for _ in self {} + } +} + +impl Iterator for Iter { + type Item = A::Element; + + fn next(&mut self) -> Option { + let arr = &self.store as &[ManuallyDrop<_>]; + unsafe { + self.indices.next().map(|i| ptr::read(&arr[i]).value) + } + } + + fn size_hint(&self) -> (usize, Option) { + self.indices.size_hint() + } +} + +impl IntoIterator for ArrayVec { + type Item = A::Element; + type IntoIter = Iter; + fn into_iter(self) -> Self::IntoIter { + let store = unsafe { + ptr::read(&self.values) + }; + let indices = 0..self.count; + mem::forget(self); + Iter { + indices: indices, + store: store, + } + } +} + +impl<'a, A: Array> IntoIterator for &'a ArrayVec { + type Item = &'a A::Element; + type IntoIter = slice::Iter<'a, A::Element>; + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a, A: Array> IntoIterator for &'a mut ArrayVec { + type Item = &'a mut A::Element; + type IntoIter = slice::IterMut<'a, A::Element>; + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +// FIXME: This should use repr(transparent) from rust-lang/rfcs#1758. +#[allow(unions_with_drop_fields)] +pub union ManuallyDrop { + value: T, + #[allow(dead_code)] + empty: (), +} + +impl ManuallyDrop { + fn new() -> ManuallyDrop { + ManuallyDrop { + empty: () + } + } +} + +impl Default for ManuallyDrop { + fn default() -> Self { + ManuallyDrop::new() + } +} + diff --git a/src/librustc_data_structures/base_n.rs b/src/librustc_data_structures/base_n.rs new file mode 100644 index 0000000000000..bf3e682f86f69 --- /dev/null +++ b/src/librustc_data_structures/base_n.rs @@ -0,0 +1,64 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// Convert unsigned integers into a string representation with some base. +/// Bases up to and including 36 can be used for case-insensitive things. + +use std::str; + +pub const MAX_BASE: u64 = 64; +const BASE_64: &'static [u8; MAX_BASE as usize] = + b"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ@$"; + +#[inline] +pub fn push_str(mut n: u64, base: u64, output: &mut String) { + debug_assert!(base >= 2 && base <= MAX_BASE); + let mut s = [0u8; 64]; + let mut index = 0; + + loop { + s[index] = BASE_64[(n % base) as usize]; + index += 1; + n /= base; + + if n == 0 { + break; + } + } + &mut s[0..index].reverse(); + output.push_str(str::from_utf8(&s[0..index]).unwrap()); +} + +#[inline] +pub fn encode(n: u64, base: u64) -> String { + let mut s = String::with_capacity(13); + push_str(n, base, &mut s); + s +} + +#[test] +fn test_encode() { + fn test(n: u64, base: u64) { + assert_eq!(Ok(n), u64::from_str_radix(&encode(n, base)[..], base as u32)); + } + + for base in 2..37 { + test(0, base); + test(1, base); + test(35, base); + test(36, base); + test(37, base); + test(u64::max_value(), base); + + for i in 0 .. 1_000 { + test(i * 983, base); + } + } +} diff --git a/src/librustc_data_structures/bitslice.rs b/src/librustc_data_structures/bitslice.rs new file mode 100644 index 0000000000000..ba53578e57918 --- /dev/null +++ b/src/librustc_data_structures/bitslice.rs @@ -0,0 +1,142 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// FIXME: merge with `bitvec` + +use std::mem; + +pub type Word = usize; + +/// `BitSlice` provides helper methods for treating a `[Word]` +/// as a bitvector. +pub trait BitSlice { + fn clear_bit(&mut self, idx: usize) -> bool; + fn set_bit(&mut self, idx: usize) -> bool; + fn get_bit(&self, idx: usize) -> bool; +} + +impl BitSlice for [Word] { + /// Clears bit at `idx` to 0; returns true iff this changed `self.` + fn clear_bit(&mut self, idx: usize) -> bool { + let words = self; + debug!("clear_bit: words={} idx={}", + bits_to_string(words, words.len() * mem::size_of::()), bit_str(idx)); + let BitLookup { word, bit_in_word, bit_mask } = bit_lookup(idx); + debug!("word={} bit_in_word={} bit_mask={}", word, bit_in_word, bit_mask); + let oldv = words[word]; + let newv = oldv & !bit_mask; + words[word] = newv; + oldv != newv + } + + /// Sets bit at `idx` to 1; returns true iff this changed `self.` + fn set_bit(&mut self, idx: usize) -> bool { + let words = self; + debug!("set_bit: words={} idx={}", + bits_to_string(words, words.len() * mem::size_of::()), bit_str(idx)); + let BitLookup { word, bit_in_word, bit_mask } = bit_lookup(idx); + debug!("word={} bit_in_word={} bit_mask={}", word, bit_in_word, bit_mask); + let oldv = words[word]; + let newv = oldv | bit_mask; + words[word] = newv; + oldv != newv + } + + /// Extracts value of bit at `idx` in `self`. + fn get_bit(&self, idx: usize) -> bool { + let words = self; + let BitLookup { word, bit_mask, .. } = bit_lookup(idx); + (words[word] & bit_mask) != 0 + } +} + +struct BitLookup { + /// An index of the word holding the bit in original `[Word]` of query. + word: usize, + /// Index of the particular bit within the word holding the bit. + bit_in_word: usize, + /// Word with single 1-bit set corresponding to where the bit is located. + bit_mask: Word, +} + +#[inline] +fn bit_lookup(bit: usize) -> BitLookup { + let word_bits = mem::size_of::() * 8; + let word = bit / word_bits; + let bit_in_word = bit % word_bits; + let bit_mask = 1 << bit_in_word; + BitLookup { word: word, bit_in_word: bit_in_word, bit_mask: bit_mask } +} + + +fn bit_str(bit: Word) -> String { + let byte = bit >> 3; + let lobits = 1 << (bit & 0b111); + format!("[{}:{}-{:02x}]", bit, byte, lobits) +} + +pub fn bits_to_string(words: &[Word], bits: usize) -> String { + let mut result = String::new(); + let mut sep = '['; + + // Note: this is a little endian printout of bytes. + + // i tracks how many bits we have printed so far. + let mut i = 0; + for &word in words.iter() { + let mut v = word; + loop { // for each byte in `v`: + let remain = bits - i; + // If less than a byte remains, then mask just that many bits. + let mask = if remain <= 8 { (1 << remain) - 1 } else { 0xFF }; + assert!(mask <= 0xFF); + let byte = v & mask; + + result.push(sep); + result.push_str(&format!("{:02x}", byte)); + + if remain <= 8 { break; } + v >>= 8; + i += 8; + sep = '-'; + } + } + result.push(']'); + return result +} + +#[inline] +pub fn bitwise(out_vec: &mut [usize], + in_vec: &[usize], + op: &Op) -> bool { + assert_eq!(out_vec.len(), in_vec.len()); + let mut changed = false; + for (out_elt, in_elt) in out_vec.iter_mut().zip(in_vec) { + let old_val = *out_elt; + let new_val = op.join(old_val, *in_elt); + *out_elt = new_val; + changed |= old_val != new_val; + } + changed +} + +pub trait BitwiseOperator { + /// Applies some bit-operation pointwise to each of the bits in the two inputs. + fn join(&self, pred1: usize, pred2: usize) -> usize; +} + +pub struct Union; +impl BitwiseOperator for Union { + fn join(&self, a: usize, b: usize) -> usize { a | b } +} +pub struct Subtract; +impl BitwiseOperator for Subtract { + fn join(&self, a: usize, b: usize) -> usize { a & !b } +} diff --git a/src/librustc_data_structures/bitvec.rs b/src/librustc_data_structures/bitvec.rs index f26307fd8c58e..0dab230f47a2d 100644 --- a/src/librustc_data_structures/bitvec.rs +++ b/src/librustc_data_structures/bitvec.rs @@ -8,9 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::iter::FromIterator; + /// A very simple BitVector type. +#[derive(Clone, Debug, PartialEq)] pub struct BitVector { - data: Vec + data: Vec, } impl BitVector { @@ -19,17 +22,25 @@ impl BitVector { BitVector { data: vec![0; num_words] } } + pub fn clear(&mut self) { + for p in &mut self.data { + *p = 0; + } + } + pub fn contains(&self, bit: usize) -> bool { let (word, mask) = word_mask(bit); (self.data[word] & mask) != 0 } + /// Returns true if the bit has changed. pub fn insert(&mut self, bit: usize) -> bool { let (word, mask) = word_mask(bit); let data = &mut self.data[word]; let value = *data; - *data = value | mask; - (value | mask) != value + let new_value = value | mask; + *data = new_value; + new_value != value } pub fn insert_all(&mut self, all: &BitVector) -> bool { @@ -38,53 +49,116 @@ impl BitVector { for (i, j) in self.data.iter_mut().zip(&all.data) { let value = *i; *i = value | *j; - if value != *i { changed = true; } + if value != *i { + changed = true; + } } changed } pub fn grow(&mut self, num_bits: usize) { let num_words = u64s(num_bits); - let extra_words = self.data.len() - num_words; - self.data.extend((0..extra_words).map(|_| 0)); + if self.data.len() < num_words { + self.data.resize(num_words, 0) + } + } + + /// Iterates over indexes of set bits in a sorted order + pub fn iter<'a>(&'a self) -> BitVectorIter<'a> { + BitVectorIter { + iter: self.data.iter(), + current: 0, + idx: 0, + } + } +} + +pub struct BitVectorIter<'a> { + iter: ::std::slice::Iter<'a, u64>, + current: u64, + idx: usize, +} + +impl<'a> Iterator for BitVectorIter<'a> { + type Item = usize; + fn next(&mut self) -> Option { + while self.current == 0 { + self.current = if let Some(&i) = self.iter.next() { + if i == 0 { + self.idx += 64; + continue; + } else { + self.idx = u64s(self.idx) * 64; + i + } + } else { + return None; + } + } + let offset = self.current.trailing_zeros() as usize; + self.current >>= offset; + self.current >>= 1; // shift otherwise overflows for 0b1000_0000_…_0000 + self.idx += offset + 1; + return Some(self.idx - 1); + } +} + +impl FromIterator for BitVector { + fn from_iter(iter: I) -> BitVector where I: IntoIterator { + let iter = iter.into_iter(); + let (len, _) = iter.size_hint(); + // Make the minimum length for the bitvector 64 bits since that's + // the smallest non-zero size anyway. + let len = if len < 64 { 64 } else { len }; + let mut bv = BitVector::new(len); + for (idx, val) in iter.enumerate() { + if idx > len { + bv.grow(idx); + } + if val { + bv.insert(idx); + } + } + + bv } } -/// A "bit matrix" is basically a square matrix of booleans -/// represented as one gigantic bitvector. In other words, it is as if -/// you have N bitvectors, each of length N. Note that `elements` here is `N`/ +/// A "bit matrix" is basically a matrix of booleans represented as +/// one gigantic bitvector. In other words, it is as if you have +/// `rows` bitvectors, each of length `columns`. #[derive(Clone)] pub struct BitMatrix { - elements: usize, + columns: usize, vector: Vec, } impl BitMatrix { - // Create a new `elements x elements` matrix, initially empty. - pub fn new(elements: usize) -> BitMatrix { + // Create a new `rows x columns` matrix, initially empty. + pub fn new(rows: usize, columns: usize) -> BitMatrix { // For every element, we need one bit for every other // element. Round up to an even number of u64s. - let u64s_per_elem = u64s(elements); + let u64s_per_row = u64s(columns); BitMatrix { - elements: elements, - vector: vec![0; elements * u64s_per_elem] + columns: columns, + vector: vec![0; rows * u64s_per_row], } } - /// The range of bits for a given element. - fn range(&self, element: usize) -> (usize, usize) { - let u64s_per_elem = u64s(self.elements); - let start = element * u64s_per_elem; - (start, start + u64s_per_elem) + /// The range of bits for a given row. + fn range(&self, row: usize) -> (usize, usize) { + let u64s_per_row = u64s(self.columns); + let start = row * u64s_per_row; + (start, start + u64s_per_row) } pub fn add(&mut self, source: usize, target: usize) -> bool { let (start, _) = self.range(source); let (word, mask) = word_mask(target); let mut vector = &mut self.vector[..]; - let v1 = vector[start+word]; + let v1 = vector[start + word]; let v2 = v1 | mask; - vector[start+word] = v2; + vector[start + word] = v2; v1 != v2 } @@ -95,7 +169,7 @@ impl BitMatrix { pub fn contains(&self, source: usize, target: usize) -> bool { let (start, _) = self.range(source); let (word, mask) = word_mask(target); - (self.vector[start+word] & mask) != 0 + (self.vector[start + word] & mask) != 0 } /// Returns those indices that are reachable from both `a` and @@ -105,12 +179,16 @@ impl BitMatrix { pub fn intersection(&self, a: usize, b: usize) -> Vec { let (a_start, a_end) = self.range(a); let (b_start, b_end) = self.range(b); - let mut result = Vec::with_capacity(self.elements); + let mut result = Vec::with_capacity(self.columns); for (base, (i, j)) in (a_start..a_end).zip(b_start..b_end).enumerate() { let mut v = self.vector[i] & self.vector[j]; for bit in 0..64 { - if v == 0 { break; } - if v & 0x1 != 0 { result.push(base*64 + bit); } + if v == 0 { + break; + } + if v & 0x1 != 0 { + result.push(base * 64 + bit); + } v >>= 1; } } @@ -129,9 +207,7 @@ impl BitMatrix { let (write_start, write_end) = self.range(write); let vector = &mut self.vector[..]; let mut changed = false; - for (read_index, write_index) in - (read_start..read_end).zip(write_start..write_end) - { + for (read_index, write_index) in (read_start..read_end).zip(write_start..write_end) { let v1 = vector[write_index]; let v2 = v1 | vector[read_index]; vector[write_index] = v2; @@ -139,6 +215,15 @@ impl BitMatrix { } changed } + + pub fn iter<'a>(&'a self, row: usize) -> BitVectorIter<'a> { + let (start, end) = self.range(row); + BitVectorIter { + iter: self.vector[start..end].iter(), + current: 0, + idx: 0, + } + } } fn u64s(elements: usize) -> usize { @@ -151,6 +236,34 @@ fn word_mask(index: usize) -> (usize, u64) { (word, mask) } +#[test] +fn bitvec_iter_works() { + let mut bitvec = BitVector::new(100); + bitvec.insert(1); + bitvec.insert(10); + bitvec.insert(19); + bitvec.insert(62); + bitvec.insert(63); + bitvec.insert(64); + bitvec.insert(65); + bitvec.insert(66); + bitvec.insert(99); + assert_eq!(bitvec.iter().collect::>(), + [1, 10, 19, 62, 63, 64, 65, 66, 99]); +} + + +#[test] +fn bitvec_iter_works_2() { + let mut bitvec = BitVector::new(319); + bitvec.insert(0); + bitvec.insert(127); + bitvec.insert(191); + bitvec.insert(255); + bitvec.insert(319); + assert_eq!(bitvec.iter().collect::>(), [0, 127, 191, 255, 319]); +} + #[test] fn union_two_vecs() { let mut vec1 = BitVector::new(65); @@ -171,20 +284,32 @@ fn union_two_vecs() { #[test] fn grow() { let mut vec1 = BitVector::new(65); - assert!(vec1.insert(3)); - assert!(!vec1.insert(3)); - assert!(vec1.insert(5)); - assert!(vec1.insert(64)); + for index in 0 .. 65 { + assert!(vec1.insert(index)); + assert!(!vec1.insert(index)); + } vec1.grow(128); - assert!(vec1.contains(3)); - assert!(vec1.contains(5)); - assert!(vec1.contains(64)); - assert!(!vec1.contains(126)); + + // Check if the bits set before growing are still set + for index in 0 .. 65 { + assert!(vec1.contains(index)); + } + + // Check if the new bits are all un-set + for index in 65 .. 128 { + assert!(!vec1.contains(index)); + } + + // Check that we can set all new bits without running out of bounds + for index in 65 .. 128 { + assert!(vec1.insert(index)); + assert!(!vec1.insert(index)); + } } #[test] fn matrix_intersection() { - let mut vec1 = BitMatrix::new(200); + let mut vec1 = BitMatrix::new(200, 200); // (*) Elements reachable from both 2 and 65. @@ -212,3 +337,45 @@ fn matrix_intersection() { let intersection = vec1.intersection(2, 65); assert_eq!(intersection, &[10, 64, 160]); } + +#[test] +fn matrix_iter() { + let mut matrix = BitMatrix::new(64, 100); + matrix.add(3, 22); + matrix.add(3, 75); + matrix.add(2, 99); + matrix.add(4, 0); + matrix.merge(3, 5); + + let expected = [99]; + let mut iter = expected.iter(); + for i in matrix.iter(2) { + let j = *iter.next().unwrap(); + assert_eq!(i, j); + } + assert!(iter.next().is_none()); + + let expected = [22, 75]; + let mut iter = expected.iter(); + for i in matrix.iter(3) { + let j = *iter.next().unwrap(); + assert_eq!(i, j); + } + assert!(iter.next().is_none()); + + let expected = [0]; + let mut iter = expected.iter(); + for i in matrix.iter(4) { + let j = *iter.next().unwrap(); + assert_eq!(i, j); + } + assert!(iter.next().is_none()); + + let expected = [22, 75]; + let mut iter = expected.iter(); + for i in matrix.iter(5) { + let j = *iter.next().unwrap(); + assert_eq!(i, j); + } + assert!(iter.next().is_none()); +} diff --git a/src/librustc_data_structures/blake2b.rs b/src/librustc_data_structures/blake2b.rs new file mode 100644 index 0000000000000..8c82c135dc426 --- /dev/null +++ b/src/librustc_data_structures/blake2b.rs @@ -0,0 +1,338 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +// An implementation of the Blake2b cryptographic hash function. +// The implementation closely follows: https://tools.ietf.org/html/rfc7693 +// +// "BLAKE2 is a cryptographic hash function faster than MD5, SHA-1, SHA-2, and +// SHA-3, yet is at least as secure as the latest standard SHA-3." +// according to their own website :) +// +// Indeed this implementation is two to three times as fast as our SHA-256 +// implementation. If you have the luxury of being able to use crates from +// crates.io, you can go there and find still faster implementations. + +use std::mem; +use std::slice; + +pub struct Blake2bCtx { + b: [u8; 128], + h: [u64; 8], + t: [u64; 2], + c: usize, + outlen: u16, + finalized: bool +} + +impl ::std::fmt::Debug for Blake2bCtx { + fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { + try!(write!(fmt, "hash: ")); + for v in &self.h[..] { + try!(write!(fmt, "{:x}", v)); + } + Ok(()) + } +} + +#[inline(always)] +fn b2b_g(v: &mut [u64; 16], + a: usize, + b: usize, + c: usize, + d: usize, + x: u64, + y: u64) +{ + v[a] = v[a].wrapping_add(v[b]).wrapping_add(x); + v[d] = (v[d] ^ v[a]).rotate_right(32); + v[c] = v[c].wrapping_add(v[d]); + v[b] = (v[b] ^ v[c]).rotate_right(24); + v[a] = v[a].wrapping_add(v[b]).wrapping_add(y); + v[d] = (v[d] ^ v[a]).rotate_right(16); + v[c] = v[c].wrapping_add(v[d]); + v[b] = (v[b] ^ v[c]).rotate_right(63); +} + +// Initialization vector +const BLAKE2B_IV: [u64; 8] = [ + 0x6A09E667F3BCC908, 0xBB67AE8584CAA73B, + 0x3C6EF372FE94F82B, 0xA54FF53A5F1D36F1, + 0x510E527FADE682D1, 0x9B05688C2B3E6C1F, + 0x1F83D9ABFB41BD6B, 0x5BE0CD19137E2179 +]; + +fn blake2b_compress(ctx: &mut Blake2bCtx, last: bool) { + + const SIGMA: [[usize; 16]; 12] = [ + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ], + [14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 ], + [11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 ], + [7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 ], + [9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 ], + [2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 ], + [12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 ], + [13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 ], + [6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 ], + [10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 ], + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ], + [14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 ] + ]; + + let mut v: [u64; 16] = [ + ctx.h[0], + ctx.h[1], + ctx.h[2], + ctx.h[3], + ctx.h[4], + ctx.h[5], + ctx.h[6], + ctx.h[7], + + BLAKE2B_IV[0], + BLAKE2B_IV[1], + BLAKE2B_IV[2], + BLAKE2B_IV[3], + BLAKE2B_IV[4], + BLAKE2B_IV[5], + BLAKE2B_IV[6], + BLAKE2B_IV[7], + ]; + + v[12] ^= ctx.t[0]; // low 64 bits of offset + v[13] ^= ctx.t[1]; // high 64 bits + if last { + v[14] = !v[14]; + } + + { + // Re-interpret the input buffer in the state as u64s + let m: &mut [u64; 16] = unsafe { + let b: &mut [u8; 128] = &mut ctx.b; + ::std::mem::transmute(b) + }; + + // It's OK to modify the buffer in place since this is the last time + // this data will be accessed before it's overwritten + if cfg!(target_endian = "big") { + for word in &mut m[..] { + *word = word.to_be(); + } + } + + for i in 0 .. 12 { + b2b_g(&mut v, 0, 4, 8, 12, m[SIGMA[i][ 0]], m[SIGMA[i][ 1]]); + b2b_g(&mut v, 1, 5, 9, 13, m[SIGMA[i][ 2]], m[SIGMA[i][ 3]]); + b2b_g(&mut v, 2, 6, 10, 14, m[SIGMA[i][ 4]], m[SIGMA[i][ 5]]); + b2b_g(&mut v, 3, 7, 11, 15, m[SIGMA[i][ 6]], m[SIGMA[i][ 7]]); + b2b_g(&mut v, 0, 5, 10, 15, m[SIGMA[i][ 8]], m[SIGMA[i][ 9]]); + b2b_g(&mut v, 1, 6, 11, 12, m[SIGMA[i][10]], m[SIGMA[i][11]]); + b2b_g(&mut v, 2, 7, 8, 13, m[SIGMA[i][12]], m[SIGMA[i][13]]); + b2b_g(&mut v, 3, 4, 9, 14, m[SIGMA[i][14]], m[SIGMA[i][15]]); + } + } + + for i in 0 .. 8 { + ctx.h[i] ^= v[i] ^ v[i + 8]; + } +} + +fn blake2b_new(outlen: usize, key: &[u8]) -> Blake2bCtx { + assert!(outlen > 0 && outlen <= 64 && key.len() <= 64); + + let mut ctx = Blake2bCtx { + b: [0; 128], + h: BLAKE2B_IV, + t: [0; 2], + c: 0, + outlen: outlen as u16, + finalized: false, + }; + + ctx.h[0] ^= 0x01010000 ^ ((key.len() << 8) as u64) ^ (outlen as u64); + + if key.len() > 0 { + blake2b_update(&mut ctx, key); + ctx.c = ctx.b.len(); + } + + ctx +} + +fn blake2b_update(ctx: &mut Blake2bCtx, mut data: &[u8]) { + assert!(!ctx.finalized, "Blake2bCtx already finalized"); + + let mut bytes_to_copy = data.len(); + let mut space_in_buffer = ctx.b.len() - ctx.c; + + while bytes_to_copy > space_in_buffer { + checked_mem_copy(data, &mut ctx.b[ctx.c .. ], space_in_buffer); + + ctx.t[0] = ctx.t[0].wrapping_add(ctx.b.len() as u64); + if ctx.t[0] < (ctx.b.len() as u64) { + ctx.t[1] += 1; + } + blake2b_compress(ctx, false); + ctx.c = 0; + + data = &data[space_in_buffer .. ]; + bytes_to_copy -= space_in_buffer; + space_in_buffer = ctx.b.len(); + } + + if bytes_to_copy > 0 { + checked_mem_copy(data, &mut ctx.b[ctx.c .. ], bytes_to_copy); + ctx.c += bytes_to_copy; + } +} + +fn blake2b_final(ctx: &mut Blake2bCtx) +{ + assert!(!ctx.finalized, "Blake2bCtx already finalized"); + + ctx.t[0] = ctx.t[0].wrapping_add(ctx.c as u64); + if ctx.t[0] < ctx.c as u64 { + ctx.t[1] += 1; + } + + while ctx.c < 128 { + ctx.b[ctx.c] = 0; + ctx.c += 1; + } + + blake2b_compress(ctx, true); + + if cfg!(target_endian = "big") { + // Make sure that the data is in memory in little endian format, as is + // demanded by BLAKE2 + for word in &mut ctx.h { + *word = word.to_le(); + } + } + + ctx.finalized = true; +} + +#[inline(always)] +fn checked_mem_copy(from: &[T1], to: &mut [T2], byte_count: usize) { + let from_size = from.len() * mem::size_of::(); + let to_size = to.len() * mem::size_of::(); + assert!(from_size >= byte_count); + assert!(to_size >= byte_count); + let from_byte_ptr = from.as_ptr() as * const u8; + let to_byte_ptr = to.as_mut_ptr() as * mut u8; + unsafe { + ::std::ptr::copy_nonoverlapping(from_byte_ptr, to_byte_ptr, byte_count); + } +} + +pub fn blake2b(out: &mut [u8], key: &[u8], data: &[u8]) +{ + let mut ctx = blake2b_new(out.len(), key); + blake2b_update(&mut ctx, data); + blake2b_final(&mut ctx); + checked_mem_copy(&ctx.h, out, ctx.outlen as usize); +} + +pub struct Blake2bHasher(Blake2bCtx); + +impl ::std::hash::Hasher for Blake2bHasher { + fn write(&mut self, bytes: &[u8]) { + blake2b_update(&mut self.0, bytes); + } + + fn finish(&self) -> u64 { + assert!(self.0.outlen == 8, + "Hasher initialized with incompatible output length"); + u64::from_le(self.0.h[0]) + } +} + +impl Blake2bHasher { + pub fn new(outlen: usize, key: &[u8]) -> Blake2bHasher { + Blake2bHasher(blake2b_new(outlen, key)) + } + + pub fn finalize(&mut self) -> &[u8] { + if !self.0.finalized { + blake2b_final(&mut self.0); + } + debug_assert!(mem::size_of_val(&self.0.h) >= self.0.outlen as usize); + let raw_ptr = (&self.0.h[..]).as_ptr() as * const u8; + unsafe { + slice::from_raw_parts(raw_ptr, self.0.outlen as usize) + } + } +} + +impl ::std::fmt::Debug for Blake2bHasher { + fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { + write!(fmt, "{:?}", self.0) + } +} + +#[cfg(test)] +fn selftest_seq(out: &mut [u8], seed: u32) +{ + let mut a: u32 = 0xDEAD4BADu32.wrapping_mul(seed); + let mut b: u32 = 1; + + for i in 0 .. out.len() { + let t: u32 = a.wrapping_add(b); + a = b; + b = t; + out[i] = ((t >> 24) & 0xFF) as u8; + } +} + +#[test] +fn blake2b_selftest() +{ + use std::hash::Hasher; + + // grand hash of hash results + const BLAKE2B_RES: [u8; 32] = [ + 0xC2, 0x3A, 0x78, 0x00, 0xD9, 0x81, 0x23, 0xBD, + 0x10, 0xF5, 0x06, 0xC6, 0x1E, 0x29, 0xDA, 0x56, + 0x03, 0xD7, 0x63, 0xB8, 0xBB, 0xAD, 0x2E, 0x73, + 0x7F, 0x5E, 0x76, 0x5A, 0x7B, 0xCC, 0xD4, 0x75 + ]; + + // parameter sets + const B2B_MD_LEN: [usize; 4] = [20, 32, 48, 64]; + const B2B_IN_LEN: [usize; 6] = [0, 3, 128, 129, 255, 1024]; + + let mut data = [0u8; 1024]; + let mut md = [0u8; 64]; + let mut key = [0u8; 64]; + + let mut hasher = Blake2bHasher::new(32, &[]); + + for i in 0 .. 4 { + let outlen = B2B_MD_LEN[i]; + for j in 0 .. 6 { + let inlen = B2B_IN_LEN[j]; + + selftest_seq(&mut data[.. inlen], inlen as u32); // unkeyed hash + blake2b(&mut md[.. outlen], &[], &data[.. inlen]); + hasher.write(&md[.. outlen]); // hash the hash + + selftest_seq(&mut key[0 .. outlen], outlen as u32); // keyed hash + blake2b(&mut md[.. outlen], &key[.. outlen], &data[.. inlen]); + hasher.write(&md[.. outlen]); // hash the hash + } + } + + // compute and compare the hash of hashes + let md = hasher.finalize(); + for i in 0 .. 32 { + assert_eq!(md[i], BLAKE2B_RES[i]); + } +} diff --git a/src/librustc_data_structures/control_flow_graph/dominators/mod.rs b/src/librustc_data_structures/control_flow_graph/dominators/mod.rs new file mode 100644 index 0000000000000..ab675db21503e --- /dev/null +++ b/src/librustc_data_structures/control_flow_graph/dominators/mod.rs @@ -0,0 +1,284 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Algorithm citation: +//! A Simple, Fast Dominance Algorithm. +//! Keith D. Cooper, Timothy J. Harvey, and Ken Kennedy +//! Rice Computer Science TS-06-33870 +//! https://www.cs.rice.edu/~keith/EMBED/dom.pdf + +use super::ControlFlowGraph; +use super::iterate::reverse_post_order; +use super::super::indexed_vec::{IndexVec, Idx}; + +use std::fmt; + +#[cfg(test)] +mod test; + +pub fn dominators(graph: &G) -> Dominators { + let start_node = graph.start_node(); + let rpo = reverse_post_order(graph, start_node); + dominators_given_rpo(graph, &rpo) +} + +pub fn dominators_given_rpo(graph: &G, + rpo: &[G::Node]) + -> Dominators { + let start_node = graph.start_node(); + assert_eq!(rpo[0], start_node); + + // compute the post order index (rank) for each node + let mut post_order_rank: IndexVec = IndexVec::from_elem_n(usize::default(), + graph.num_nodes()); + for (index, node) in rpo.iter().rev().cloned().enumerate() { + post_order_rank[node] = index; + } + + let mut immediate_dominators: IndexVec> = + IndexVec::from_elem_n(Option::default(), graph.num_nodes()); + immediate_dominators[start_node] = Some(start_node); + + let mut changed = true; + while changed { + changed = false; + + for &node in &rpo[1..] { + let mut new_idom = None; + for pred in graph.predecessors(node) { + if immediate_dominators[pred].is_some() { + // (*) + // (*) dominators for `pred` have been calculated + new_idom = intersect_opt(&post_order_rank, + &immediate_dominators, + new_idom, + Some(pred)); + } + } + + if new_idom != immediate_dominators[node] { + immediate_dominators[node] = new_idom; + changed = true; + } + } + } + + Dominators { + post_order_rank: post_order_rank, + immediate_dominators: immediate_dominators, + } +} + +fn intersect_opt(post_order_rank: &IndexVec, + immediate_dominators: &IndexVec>, + node1: Option, + node2: Option) + -> Option { + match (node1, node2) { + (None, None) => None, + (Some(n), None) | (None, Some(n)) => Some(n), + (Some(n1), Some(n2)) => Some(intersect(post_order_rank, immediate_dominators, n1, n2)), + } +} + +fn intersect(post_order_rank: &IndexVec, + immediate_dominators: &IndexVec>, + mut node1: Node, + mut node2: Node) + -> Node { + while node1 != node2 { + while post_order_rank[node1] < post_order_rank[node2] { + node1 = immediate_dominators[node1].unwrap(); + } + + while post_order_rank[node2] < post_order_rank[node1] { + node2 = immediate_dominators[node2].unwrap(); + } + } + return node1; +} + +#[derive(Clone, Debug)] +pub struct Dominators { + post_order_rank: IndexVec, + immediate_dominators: IndexVec>, +} + +impl Dominators { + pub fn is_reachable(&self, node: Node) -> bool { + self.immediate_dominators[node].is_some() + } + + pub fn immediate_dominator(&self, node: Node) -> Node { + assert!(self.is_reachable(node), "node {:?} is not reachable", node); + self.immediate_dominators[node].unwrap() + } + + pub fn dominators(&self, node: Node) -> Iter { + assert!(self.is_reachable(node), "node {:?} is not reachable", node); + Iter { + dominators: self, + node: Some(node), + } + } + + pub fn is_dominated_by(&self, node: Node, dom: Node) -> bool { + // FIXME -- could be optimized by using post-order-rank + self.dominators(node).any(|n| n == dom) + } + + pub fn mutual_dominator_node(&self, node1: Node, node2: Node) -> Node { + assert!(self.is_reachable(node1), + "node {:?} is not reachable", + node1); + assert!(self.is_reachable(node2), + "node {:?} is not reachable", + node2); + intersect::(&self.post_order_rank, + &self.immediate_dominators, + node1, + node2) + } + + pub fn mutual_dominator(&self, iter: I) -> Option + where I: IntoIterator + { + let mut iter = iter.into_iter(); + iter.next() + .map(|dom| iter.fold(dom, |dom, node| self.mutual_dominator_node(dom, node))) + } + + pub fn all_immediate_dominators(&self) -> &IndexVec> { + &self.immediate_dominators + } + + pub fn dominator_tree(&self) -> DominatorTree { + let elem: Vec = Vec::new(); + let mut children: IndexVec> = + IndexVec::from_elem_n(elem, self.immediate_dominators.len()); + let mut root = None; + for (index, immed_dom) in self.immediate_dominators.iter().enumerate() { + let node = Node::new(index); + match *immed_dom { + None => { + // node not reachable + } + Some(immed_dom) => { + if node == immed_dom { + root = Some(node); + } else { + children[immed_dom].push(node); + } + } + } + } + DominatorTree { + root: root.unwrap(), + children: children, + } + } +} + +pub struct Iter<'dom, Node: Idx + 'dom> { + dominators: &'dom Dominators, + node: Option, +} + +impl<'dom, Node: Idx> Iterator for Iter<'dom, Node> { + type Item = Node; + + fn next(&mut self) -> Option { + if let Some(node) = self.node { + let dom = self.dominators.immediate_dominator(node); + if dom == node { + self.node = None; // reached the root + } else { + self.node = Some(dom); + } + return Some(node); + } else { + return None; + } + } +} + +pub struct DominatorTree { + root: N, + children: IndexVec>, +} + +impl DominatorTree { + pub fn root(&self) -> Node { + self.root + } + + pub fn children(&self, node: Node) -> &[Node] { + &self.children[node] + } + + pub fn iter_children_of(&self, node: Node) -> IterChildrenOf { + IterChildrenOf { + tree: self, + stack: vec![node], + } + } +} + +pub struct IterChildrenOf<'iter, Node: Idx + 'iter> { + tree: &'iter DominatorTree, + stack: Vec, +} + +impl<'iter, Node: Idx> Iterator for IterChildrenOf<'iter, Node> { + type Item = Node; + + fn next(&mut self) -> Option { + if let Some(node) = self.stack.pop() { + self.stack.extend(self.tree.children(node)); + Some(node) + } else { + None + } + } +} + +impl fmt::Debug for DominatorTree { + fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fmt::Debug::fmt(&DominatorTreeNode { + tree: self, + node: self.root, + }, + fmt) + } +} + +struct DominatorTreeNode<'tree, Node: Idx> { + tree: &'tree DominatorTree, + node: Node, +} + +impl<'tree, Node: Idx> fmt::Debug for DominatorTreeNode<'tree, Node> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { + let subtrees: Vec<_> = self.tree + .children(self.node) + .iter() + .map(|&child| { + DominatorTreeNode { + tree: self.tree, + node: child, + } + }) + .collect(); + fmt.debug_tuple("") + .field(&self.node) + .field(&subtrees) + .finish() + } +} diff --git a/src/librustc_data_structures/control_flow_graph/dominators/test.rs b/src/librustc_data_structures/control_flow_graph/dominators/test.rs new file mode 100644 index 0000000000000..0af878cac2df1 --- /dev/null +++ b/src/librustc_data_structures/control_flow_graph/dominators/test.rs @@ -0,0 +1,43 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::super::test::TestGraph; + +use super::*; + +#[test] +fn diamond() { + let graph = TestGraph::new(0, &[(0, 1), (0, 2), (1, 3), (2, 3)]); + + let dominators = dominators(&graph); + let immediate_dominators = dominators.all_immediate_dominators(); + assert_eq!(immediate_dominators[0], Some(0)); + assert_eq!(immediate_dominators[1], Some(0)); + assert_eq!(immediate_dominators[2], Some(0)); + assert_eq!(immediate_dominators[3], Some(0)); +} + +#[test] +fn paper() { + // example from the paper: + let graph = TestGraph::new(6, + &[(6, 5), (6, 4), (5, 1), (4, 2), (4, 3), (1, 2), (2, 3), (3, 2), + (2, 1)]); + + let dominators = dominators(&graph); + let immediate_dominators = dominators.all_immediate_dominators(); + assert_eq!(immediate_dominators[0], None); // <-- note that 0 is not in graph + assert_eq!(immediate_dominators[1], Some(6)); + assert_eq!(immediate_dominators[2], Some(6)); + assert_eq!(immediate_dominators[3], Some(6)); + assert_eq!(immediate_dominators[4], Some(6)); + assert_eq!(immediate_dominators[5], Some(6)); + assert_eq!(immediate_dominators[6], Some(6)); +} diff --git a/src/librustc_data_structures/control_flow_graph/iterate/mod.rs b/src/librustc_data_structures/control_flow_graph/iterate/mod.rs new file mode 100644 index 0000000000000..11b557cbcadb9 --- /dev/null +++ b/src/librustc_data_structures/control_flow_graph/iterate/mod.rs @@ -0,0 +1,70 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::ControlFlowGraph; +use super::super::indexed_vec::IndexVec; + +#[cfg(test)] +mod test; + +pub fn post_order_from(graph: &G, start_node: G::Node) -> Vec { + post_order_from_to(graph, start_node, None) +} + +pub fn post_order_from_to(graph: &G, + start_node: G::Node, + end_node: Option) + -> Vec { + let mut visited: IndexVec = IndexVec::from_elem_n(false, graph.num_nodes()); + let mut result: Vec = Vec::with_capacity(graph.num_nodes()); + if let Some(end_node) = end_node { + visited[end_node] = true; + } + post_order_walk(graph, start_node, &mut result, &mut visited); + result +} + +fn post_order_walk(graph: &G, + node: G::Node, + result: &mut Vec, + visited: &mut IndexVec) { + if visited[node] { + return; + } + visited[node] = true; + + for successor in graph.successors(node) { + post_order_walk(graph, successor, result, visited); + } + + result.push(node); +} + +pub fn pre_order_walk(graph: &G, + node: G::Node, + result: &mut Vec, + visited: &mut IndexVec) { + if visited[node] { + return; + } + visited[node] = true; + + result.push(node); + + for successor in graph.successors(node) { + pre_order_walk(graph, successor, result, visited); + } +} + +pub fn reverse_post_order(graph: &G, start_node: G::Node) -> Vec { + let mut vec = post_order_from(graph, start_node); + vec.reverse(); + vec +} diff --git a/src/librustc_data_structures/control_flow_graph/iterate/test.rs b/src/librustc_data_structures/control_flow_graph/iterate/test.rs new file mode 100644 index 0000000000000..dca45602f17c4 --- /dev/null +++ b/src/librustc_data_structures/control_flow_graph/iterate/test.rs @@ -0,0 +1,41 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::super::test::TestGraph; +use super::super::transpose::TransposedGraph; + +use super::*; + +#[test] +fn diamond_post_order() { + let graph = TestGraph::new(0, &[(0, 1), (0, 2), (1, 3), (2, 3)]); + + let result = post_order_from(&graph, 0); + assert_eq!(result, vec![3, 1, 2, 0]); +} + + +#[test] +fn rev_post_order_inner_loop() { + // 0 -> 1 -> 2 -> 3 -> 5 + // ^ ^ v | + // | 6 <- 4 | + // +-----------------+ + let graph = TestGraph::new(0, + &[(0, 1), (1, 2), (2, 3), (3, 5), (3, 1), (2, 4), (4, 6), (6, 2)]); + + let rev_graph = TransposedGraph::new(&graph); + + let result = post_order_from_to(&rev_graph, 6, Some(2)); + assert_eq!(result, vec![4, 6]); + + let result = post_order_from_to(&rev_graph, 3, Some(1)); + assert_eq!(result, vec![4, 6, 2, 3]); +} diff --git a/src/librustc_data_structures/control_flow_graph/mod.rs b/src/librustc_data_structures/control_flow_graph/mod.rs new file mode 100644 index 0000000000000..eb6839df6274f --- /dev/null +++ b/src/librustc_data_structures/control_flow_graph/mod.rs @@ -0,0 +1,45 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::indexed_vec::Idx; +pub use std::slice::Iter; + +pub mod dominators; +pub mod iterate; +pub mod reachable; +mod reference; +pub mod transpose; + +#[cfg(test)] +mod test; + +pub trait ControlFlowGraph + where Self: for<'graph> GraphPredecessors<'graph, Item=::Node>, + Self: for<'graph> GraphSuccessors<'graph, Item=::Node> +{ + type Node: Idx; + + fn num_nodes(&self) -> usize; + fn start_node(&self) -> Self::Node; + fn predecessors<'graph>(&'graph self, node: Self::Node) + -> >::Iter; + fn successors<'graph>(&'graph self, node: Self::Node) + -> >::Iter; +} + +pub trait GraphPredecessors<'graph> { + type Item; + type Iter: Iterator; +} + +pub trait GraphSuccessors<'graph> { + type Item; + type Iter: Iterator; +} diff --git a/src/librustc_data_structures/control_flow_graph/reachable/mod.rs b/src/librustc_data_structures/control_flow_graph/reachable/mod.rs new file mode 100644 index 0000000000000..24210ebb95d3d --- /dev/null +++ b/src/librustc_data_structures/control_flow_graph/reachable/mod.rs @@ -0,0 +1,62 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Compute reachability using a simple dataflow propagation. +//! Store end-result in a big NxN bit matrix. + +use super::ControlFlowGraph; +use super::super::bitvec::BitVector; +use super::iterate::reverse_post_order; +use super::super::indexed_vec::{IndexVec, Idx}; + +#[cfg(test)] +mod test; + +pub fn reachable(graph: &G) -> Reachability { + let reverse_post_order = reverse_post_order(graph, graph.start_node()); + reachable_given_rpo(graph, &reverse_post_order) +} + +pub fn reachable_given_rpo(graph: &G, + reverse_post_order: &[G::Node]) + -> Reachability { + let mut reachability = Reachability::new(graph); + let mut changed = true; + while changed { + changed = false; + for &node in reverse_post_order.iter().rev() { + // every node can reach itself + changed |= reachability.bits[node].insert(node.index()); + + // and every pred can reach everything node can reach + for pred in graph.predecessors(node) { + let nodes_bits = reachability.bits[node].clone(); + changed |= reachability.bits[pred].insert_all(&nodes_bits); + } + } + } + reachability +} + +pub struct Reachability { + bits: IndexVec, +} + +impl Reachability { + fn new(graph: &G) -> Self { + let num_nodes = graph.num_nodes(); + Reachability { bits: IndexVec::from_elem_n(BitVector::new(num_nodes), num_nodes) } + } + + pub fn can_reach(&self, source: Node, target: Node) -> bool { + let bit: usize = target.index(); + self.bits[source].contains(bit) + } +} diff --git a/src/librustc_data_structures/control_flow_graph/reachable/test.rs b/src/librustc_data_structures/control_flow_graph/reachable/test.rs new file mode 100644 index 0000000000000..ef45deeaafc78 --- /dev/null +++ b/src/librustc_data_structures/control_flow_graph/reachable/test.rs @@ -0,0 +1,50 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::super::test::TestGraph; + +use super::*; + +#[test] +fn test1() { + // 0 -> 1 -> 2 -> 3 + // ^ v + // 6 <- 4 -> 5 + let graph = TestGraph::new(0, &[(0, 1), (1, 2), (2, 3), (2, 4), (4, 5), (4, 6), (6, 1)]); + let reachable = reachable(&graph); + assert!((0..6).all(|i| reachable.can_reach(0, i))); + assert!((1..6).all(|i| reachable.can_reach(1, i))); + assert!((1..6).all(|i| reachable.can_reach(2, i))); + assert!((1..6).all(|i| reachable.can_reach(4, i))); + assert!((1..6).all(|i| reachable.can_reach(6, i))); + assert!(reachable.can_reach(3, 3)); + assert!(!reachable.can_reach(3, 5)); + assert!(!reachable.can_reach(5, 3)); +} + +/// use bigger indices to cross between words in the bit set +#[test] +fn test2() { + // 30 -> 31 -> 32 -> 33 + // ^ v + // 36 <- 34 -> 35 + let graph = TestGraph::new(30, + &[(30, 31), (31, 32), (32, 33), (32, 34), (34, 35), (34, 36), + (36, 31)]); + let reachable = reachable(&graph); + assert!((30..36).all(|i| reachable.can_reach(30, i))); + assert!((31..36).all(|i| reachable.can_reach(31, i))); + assert!((31..36).all(|i| reachable.can_reach(32, i))); + assert!((31..36).all(|i| reachable.can_reach(34, i))); + assert!((31..36).all(|i| reachable.can_reach(36, i))); + assert!(reachable.can_reach(33, 33)); + assert!(!reachable.can_reach(33, 35)); + assert!(!reachable.can_reach(35, 33)); +} diff --git a/src/librustc_data_structures/control_flow_graph/reference.rs b/src/librustc_data_structures/control_flow_graph/reference.rs new file mode 100644 index 0000000000000..3b8b01f2ff43b --- /dev/null +++ b/src/librustc_data_structures/control_flow_graph/reference.rs @@ -0,0 +1,43 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::*; + +impl<'graph, G: ControlFlowGraph> ControlFlowGraph for &'graph G { + type Node = G::Node; + + fn num_nodes(&self) -> usize { + (**self).num_nodes() + } + + fn start_node(&self) -> Self::Node { + (**self).start_node() + } + + fn predecessors<'iter>(&'iter self, + node: Self::Node) + -> >::Iter { + (**self).predecessors(node) + } + + fn successors<'iter>(&'iter self, node: Self::Node) -> >::Iter { + (**self).successors(node) + } +} + +impl<'iter, 'graph, G: ControlFlowGraph> GraphPredecessors<'iter> for &'graph G { + type Item = G::Node; + type Iter = >::Iter; +} + +impl<'iter, 'graph, G: ControlFlowGraph> GraphSuccessors<'iter> for &'graph G { + type Item = G::Node; + type Iter = >::Iter; +} diff --git a/src/librustc_data_structures/control_flow_graph/test.rs b/src/librustc_data_structures/control_flow_graph/test.rs new file mode 100644 index 0000000000000..d48a6e684ad8e --- /dev/null +++ b/src/librustc_data_structures/control_flow_graph/test.rs @@ -0,0 +1,77 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::collections::HashMap; +use std::cmp::max; +use std::slice; +use std::iter; + +use super::{ControlFlowGraph, GraphPredecessors, GraphSuccessors}; + +pub struct TestGraph { + num_nodes: usize, + start_node: usize, + successors: HashMap>, + predecessors: HashMap>, +} + +impl TestGraph { + pub fn new(start_node: usize, edges: &[(usize, usize)]) -> Self { + let mut graph = TestGraph { + num_nodes: start_node + 1, + start_node: start_node, + successors: HashMap::new(), + predecessors: HashMap::new(), + }; + for &(source, target) in edges { + graph.num_nodes = max(graph.num_nodes, source + 1); + graph.num_nodes = max(graph.num_nodes, target + 1); + graph.successors.entry(source).or_insert(vec![]).push(target); + graph.predecessors.entry(target).or_insert(vec![]).push(source); + } + for node in 0..graph.num_nodes { + graph.successors.entry(node).or_insert(vec![]); + graph.predecessors.entry(node).or_insert(vec![]); + } + graph + } +} + +impl ControlFlowGraph for TestGraph { + type Node = usize; + + fn start_node(&self) -> usize { + self.start_node + } + + fn num_nodes(&self) -> usize { + self.num_nodes + } + + fn predecessors<'graph>(&'graph self, + node: usize) + -> >::Iter { + self.predecessors[&node].iter().cloned() + } + + fn successors<'graph>(&'graph self, node: usize) -> >::Iter { + self.successors[&node].iter().cloned() + } +} + +impl<'graph> GraphPredecessors<'graph> for TestGraph { + type Item = usize; + type Iter = iter::Cloned>; +} + +impl<'graph> GraphSuccessors<'graph> for TestGraph { + type Item = usize; + type Iter = iter::Cloned>; +} diff --git a/src/librustc_data_structures/control_flow_graph/transpose.rs b/src/librustc_data_structures/control_flow_graph/transpose.rs new file mode 100644 index 0000000000000..a1a117edb94fc --- /dev/null +++ b/src/librustc_data_structures/control_flow_graph/transpose.rs @@ -0,0 +1,64 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::*; + +pub struct TransposedGraph { + base_graph: G, + start_node: G::Node, +} + +impl TransposedGraph { + pub fn new(base_graph: G) -> Self { + let start_node = base_graph.start_node(); + Self::with_start(base_graph, start_node) + } + + pub fn with_start(base_graph: G, start_node: G::Node) -> Self { + TransposedGraph { + base_graph: base_graph, + start_node: start_node, + } + } +} + +impl ControlFlowGraph for TransposedGraph { + type Node = G::Node; + + fn num_nodes(&self) -> usize { + self.base_graph.num_nodes() + } + + fn start_node(&self) -> Self::Node { + self.start_node + } + + fn predecessors<'graph>(&'graph self, + node: Self::Node) + -> >::Iter { + self.base_graph.successors(node) + } + + fn successors<'graph>(&'graph self, + node: Self::Node) + -> >::Iter { + self.base_graph.predecessors(node) + } +} + +impl<'graph, G: ControlFlowGraph> GraphPredecessors<'graph> for TransposedGraph { + type Item = G::Node; + type Iter = >::Iter; +} + +impl<'graph, G: ControlFlowGraph> GraphSuccessors<'graph> for TransposedGraph { + type Item = G::Node; + type Iter = >::Iter; +} diff --git a/src/librustc_data_structures/flock.rs b/src/librustc_data_structures/flock.rs new file mode 100644 index 0000000000000..510c9ceef0960 --- /dev/null +++ b/src/librustc_data_structures/flock.rs @@ -0,0 +1,359 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Simple file-locking apis for each OS. +//! +//! This is not meant to be in the standard library, it does nothing with +//! green/native threading. This is just a bare-bones enough solution for +//! librustdoc, it is not production quality at all. + +#![allow(non_camel_case_types)] +use std::path::Path; + +pub use self::imp::Lock; + +#[cfg(unix)] +mod imp { + use std::ffi::{CString, OsStr}; + use std::os::unix::prelude::*; + use std::path::Path; + use std::io; + use libc; + + #[cfg(target_os = "linux")] + mod os { + use libc; + + pub struct flock { + pub l_type: libc::c_short, + pub l_whence: libc::c_short, + pub l_start: libc::off_t, + pub l_len: libc::off_t, + pub l_pid: libc::pid_t, + + // not actually here, but brings in line with freebsd + pub l_sysid: libc::c_int, + } + + pub const F_RDLCK: libc::c_short = 0; + pub const F_WRLCK: libc::c_short = 1; + pub const F_UNLCK: libc::c_short = 2; + pub const F_SETLK: libc::c_int = 6; + pub const F_SETLKW: libc::c_int = 7; + } + + #[cfg(target_os = "freebsd")] + mod os { + use libc; + + pub struct flock { + pub l_start: libc::off_t, + pub l_len: libc::off_t, + pub l_pid: libc::pid_t, + pub l_type: libc::c_short, + pub l_whence: libc::c_short, + pub l_sysid: libc::c_int, + } + + pub const F_RDLCK: libc::c_short = 1; + pub const F_UNLCK: libc::c_short = 2; + pub const F_WRLCK: libc::c_short = 3; + pub const F_SETLK: libc::c_int = 12; + pub const F_SETLKW: libc::c_int = 13; + } + + #[cfg(any(target_os = "dragonfly", + target_os = "bitrig", + target_os = "netbsd", + target_os = "openbsd"))] + mod os { + use libc; + + pub struct flock { + pub l_start: libc::off_t, + pub l_len: libc::off_t, + pub l_pid: libc::pid_t, + pub l_type: libc::c_short, + pub l_whence: libc::c_short, + + // not actually here, but brings in line with freebsd + pub l_sysid: libc::c_int, + } + + pub const F_RDLCK: libc::c_short = 1; + pub const F_UNLCK: libc::c_short = 2; + pub const F_WRLCK: libc::c_short = 3; + pub const F_SETLK: libc::c_int = 8; + pub const F_SETLKW: libc::c_int = 9; + } + + #[cfg(target_os = "haiku")] + mod os { + use libc; + + pub struct flock { + pub l_type: libc::c_short, + pub l_whence: libc::c_short, + pub l_start: libc::off_t, + pub l_len: libc::off_t, + pub l_pid: libc::pid_t, + + // not actually here, but brings in line with freebsd + pub l_sysid: libc::c_int, + } + + pub const F_UNLCK: libc::c_short = 0x0200; + pub const F_WRLCK: libc::c_short = 0x0400; + pub const F_SETLK: libc::c_int = 0x0080; + pub const F_SETLKW: libc::c_int = 0x0100; + } + + #[cfg(any(target_os = "macos", target_os = "ios"))] + mod os { + use libc; + + pub struct flock { + pub l_start: libc::off_t, + pub l_len: libc::off_t, + pub l_pid: libc::pid_t, + pub l_type: libc::c_short, + pub l_whence: libc::c_short, + + // not actually here, but brings in line with freebsd + pub l_sysid: libc::c_int, + } + + pub const F_RDLCK: libc::c_short = 1; + pub const F_UNLCK: libc::c_short = 2; + pub const F_WRLCK: libc::c_short = 3; + pub const F_SETLK: libc::c_int = 8; + pub const F_SETLKW: libc::c_int = 9; + } + + #[cfg(target_os = "solaris")] + mod os { + use libc; + + pub struct flock { + pub l_type: libc::c_short, + pub l_whence: libc::c_short, + pub l_start: libc::off_t, + pub l_len: libc::off_t, + pub l_sysid: libc::c_int, + pub l_pid: libc::pid_t, + } + + pub const F_RDLCK: libc::c_short = 1; + pub const F_WRLCK: libc::c_short = 2; + pub const F_UNLCK: libc::c_short = 3; + pub const F_SETLK: libc::c_int = 6; + pub const F_SETLKW: libc::c_int = 7; + } + + #[derive(Debug)] + pub struct Lock { + fd: libc::c_int, + } + + impl Lock { + pub fn new(p: &Path, + wait: bool, + create: bool, + exclusive: bool) + -> io::Result { + let os: &OsStr = p.as_ref(); + let buf = CString::new(os.as_bytes()).unwrap(); + let open_flags = if create { + libc::O_RDWR | libc::O_CREAT + } else { + libc::O_RDWR + }; + + let fd = unsafe { + libc::open(buf.as_ptr(), open_flags, + libc::S_IRWXU as libc::c_int) + }; + + if fd < 0 { + return Err(io::Error::last_os_error()); + } + + let lock_type = if exclusive { + os::F_WRLCK + } else { + os::F_RDLCK + }; + + let flock = os::flock { + l_start: 0, + l_len: 0, + l_pid: 0, + l_whence: libc::SEEK_SET as libc::c_short, + l_type: lock_type, + l_sysid: 0, + }; + let cmd = if wait { os::F_SETLKW } else { os::F_SETLK }; + let ret = unsafe { + libc::fcntl(fd, cmd, &flock) + }; + if ret == -1 { + let err = io::Error::last_os_error(); + unsafe { libc::close(fd); } + Err(err) + } else { + Ok(Lock { fd: fd }) + } + } + } + + impl Drop for Lock { + fn drop(&mut self) { + let flock = os::flock { + l_start: 0, + l_len: 0, + l_pid: 0, + l_whence: libc::SEEK_SET as libc::c_short, + l_type: os::F_UNLCK, + l_sysid: 0, + }; + unsafe { + libc::fcntl(self.fd, os::F_SETLK, &flock); + libc::close(self.fd); + } + } + } +} + +#[cfg(windows)] +#[allow(bad_style)] +mod imp { + use std::io; + use std::mem; + use std::os::windows::prelude::*; + use std::os::windows::raw::HANDLE; + use std::path::Path; + use std::fs::{File, OpenOptions}; + use std::os::raw::{c_ulong, c_ulonglong, c_int}; + + type DWORD = c_ulong; + type BOOL = c_int; + type ULONG_PTR = c_ulonglong; + + type LPOVERLAPPED = *mut OVERLAPPED; + const LOCKFILE_EXCLUSIVE_LOCK: DWORD = 0x00000002; + const LOCKFILE_FAIL_IMMEDIATELY: DWORD = 0x00000001; + + const FILE_SHARE_DELETE: DWORD = 0x4; + const FILE_SHARE_READ: DWORD = 0x1; + const FILE_SHARE_WRITE: DWORD = 0x2; + + #[repr(C)] + struct OVERLAPPED { + Internal: ULONG_PTR, + InternalHigh: ULONG_PTR, + Offset: DWORD, + OffsetHigh: DWORD, + hEvent: HANDLE, + } + + extern "system" { + fn LockFileEx(hFile: HANDLE, + dwFlags: DWORD, + dwReserved: DWORD, + nNumberOfBytesToLockLow: DWORD, + nNumberOfBytesToLockHigh: DWORD, + lpOverlapped: LPOVERLAPPED) -> BOOL; + } + + #[derive(Debug)] + pub struct Lock { + _file: File, + } + + impl Lock { + pub fn new(p: &Path, + wait: bool, + create: bool, + exclusive: bool) + -> io::Result { + assert!(p.parent().unwrap().exists(), + "Parent directory of lock-file must exist: {}", + p.display()); + + let share_mode = FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE; + + let mut open_options = OpenOptions::new(); + open_options.read(true) + .share_mode(share_mode); + + if create { + open_options.create(true) + .write(true); + } + + debug!("Attempting to open lock file `{}`", p.display()); + let file = match open_options.open(p) { + Ok(file) => { + debug!("Lock file opened successfully"); + file + } + Err(err) => { + debug!("Error opening lock file: {}", err); + return Err(err) + } + }; + + let ret = unsafe { + let mut overlapped: OVERLAPPED = mem::zeroed(); + + let mut dwFlags = 0; + if !wait { + dwFlags |= LOCKFILE_FAIL_IMMEDIATELY; + } + + if exclusive { + dwFlags |= LOCKFILE_EXCLUSIVE_LOCK; + } + + debug!("Attempting to acquire lock on lock file `{}`", + p.display()); + LockFileEx(file.as_raw_handle(), + dwFlags, + 0, + 0xFFFF_FFFF, + 0xFFFF_FFFF, + &mut overlapped) + }; + if ret == 0 { + let err = io::Error::last_os_error(); + debug!("Failed acquiring file lock: {}", err); + Err(err) + } else { + debug!("Successfully acquired lock."); + Ok(Lock { _file: file }) + } + } + } + + // Note that we don't need a Drop impl on the Windows: The file is unlocked + // automatically when it's closed. +} + +impl imp::Lock { + pub fn panicking_new(p: &Path, + wait: bool, + create: bool, + exclusive: bool) + -> Lock { + Lock::new(p, wait, create, exclusive).unwrap_or_else(|err| { + panic!("could not lock `{}`: {}", p.display(), err); + }) + } +} diff --git a/src/librustc_data_structures/fmt_wrap.rs b/src/librustc_data_structures/fmt_wrap.rs new file mode 100644 index 0000000000000..50fd1d802b7ff --- /dev/null +++ b/src/librustc_data_structures/fmt_wrap.rs @@ -0,0 +1,31 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::fmt; + +// Provide some more formatting options for some data types (at the moment +// that's just `{:x}` for slices of u8). + +pub struct FmtWrap(pub T); + +impl<'a> fmt::LowerHex for FmtWrap<&'a [u8]> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + for byte in self.0.iter() { + try!(write!(formatter, "{:02x}", byte)); + } + Ok(()) + } +} + +#[test] +fn test_lower_hex() { + let bytes: &[u8] = &[0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]; + assert_eq!("0123456789abcdef", &format!("{:x}", FmtWrap(bytes))); +} diff --git a/src/librustc_data_structures/fnv.rs b/src/librustc_data_structures/fnv.rs index 77baa84c0236d..ae90c2fac8321 100644 --- a/src/librustc_data_structures/fnv.rs +++ b/src/librustc_data_structures/fnv.rs @@ -9,21 +9,20 @@ // except according to those terms. use std::collections::{HashMap, HashSet}; -use std::collections::hash_state::DefaultState; use std::default::Default; -use std::hash::{Hasher, Hash}; +use std::hash::{Hasher, Hash, BuildHasherDefault}; -pub type FnvHashMap = HashMap>; -pub type FnvHashSet = HashSet>; +pub type FnvHashMap = HashMap>; +pub type FnvHashSet = HashSet>; #[allow(non_snake_case)] pub fn FnvHashMap() -> FnvHashMap { - Default::default() + HashMap::default() } #[allow(non_snake_case)] pub fn FnvHashSet() -> FnvHashSet { - Default::default() + HashSet::default() } /// A speedy hash algorithm for node ids and def ids. The hashmap in @@ -36,10 +35,15 @@ pub fn FnvHashSet() -> FnvHashSet { pub struct FnvHasher(u64); impl Default for FnvHasher { - fn default() -> FnvHasher { FnvHasher(0xcbf29ce484222325) } + /// Creates a `FnvHasher`, with a 64-bit hex initial value. + #[inline] + fn default() -> FnvHasher { + FnvHasher(0xcbf29ce484222325) + } } impl Hasher for FnvHasher { + #[inline] fn write(&mut self, bytes: &[u8]) { let FnvHasher(mut hash) = *self; for byte in bytes { @@ -48,5 +52,15 @@ impl Hasher for FnvHasher { } *self = FnvHasher(hash); } - fn finish(&self) -> u64 { self.0 } + + #[inline] + fn finish(&self) -> u64 { + self.0 + } +} + +pub fn hash(v: &T) -> u64 { + let mut state = FnvHasher::default(); + v.hash(&mut state); + state.finish() } diff --git a/src/librustc_data_structures/fx.rs b/src/librustc_data_structures/fx.rs new file mode 100644 index 0000000000000..1fb7673521d88 --- /dev/null +++ b/src/librustc_data_structures/fx.rs @@ -0,0 +1,115 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::collections::{HashMap, HashSet}; +use std::default::Default; +use std::hash::{Hasher, Hash, BuildHasherDefault}; +use std::ops::BitXor; + +pub type FxHashMap = HashMap>; +pub type FxHashSet = HashSet>; + +#[allow(non_snake_case)] +pub fn FxHashMap() -> FxHashMap { + HashMap::default() +} + +#[allow(non_snake_case)] +pub fn FxHashSet() -> FxHashSet { + HashSet::default() +} + +/// A speedy hash algorithm for use within rustc. The hashmap in libcollections +/// by default uses SipHash which isn't quite as speedy as we want. In the +/// compiler we're not really worried about DOS attempts, so we use a fast +/// non-cryptographic hash. +/// +/// This is the same as the algorithm used by Firefox -- which is a homespun +/// one not based on any widely-known algorithm -- though modified to produce +/// 64-bit hash values instead of 32-bit hash values. It consistently +/// out-performs an FNV-based hash within rustc itself -- the collision rate is +/// similar or slightly worse than FNV, but the speed of the hash function +/// itself is much higher because it works on up to 8 bytes at a time. +pub struct FxHasher { + hash: usize +} + +#[cfg(target_pointer_width = "32")] +const K: usize = 0x9e3779b9; +#[cfg(target_pointer_width = "64")] +const K: usize = 0x517cc1b727220a95; + +impl Default for FxHasher { + #[inline] + fn default() -> FxHasher { + FxHasher { hash: 0 } + } +} + +impl FxHasher { + #[inline] + fn add_to_hash(&mut self, i: usize) { + self.hash = self.hash.rotate_left(5).bitxor(i).wrapping_mul(K); + } +} + +impl Hasher for FxHasher { + #[inline] + fn write(&mut self, bytes: &[u8]) { + for byte in bytes { + let i = *byte; + self.add_to_hash(i as usize); + } + } + + #[inline] + fn write_u8(&mut self, i: u8) { + self.add_to_hash(i as usize); + } + + #[inline] + fn write_u16(&mut self, i: u16) { + self.add_to_hash(i as usize); + } + + #[inline] + fn write_u32(&mut self, i: u32) { + self.add_to_hash(i as usize); + } + + #[cfg(target_pointer_width = "32")] + #[inline] + fn write_u64(&mut self, i: u64) { + self.add_to_hash(i as usize); + self.add_to_hash((i >> 32) as usize); + } + + #[cfg(target_pointer_width = "64")] + #[inline] + fn write_u64(&mut self, i: u64) { + self.add_to_hash(i as usize); + } + + #[inline] + fn write_usize(&mut self, i: usize) { + self.add_to_hash(i); + } + + #[inline] + fn finish(&self) -> u64 { + self.hash as u64 + } +} + +pub fn hash(v: &T) -> u64 { + let mut state = FxHasher::default(); + v.hash(&mut state); + state.finish() +} diff --git a/src/librustc_data_structures/graph/mod.rs b/src/librustc_data_structures/graph/mod.rs index 1ea09490aed2f..f94ed6b720946 100644 --- a/src/librustc_data_structures/graph/mod.rs +++ b/src/librustc_data_structures/graph/mod.rs @@ -38,9 +38,9 @@ use snapshot_vec::{SnapshotVec, SnapshotVecDelegate}; #[cfg(test)] mod tests; -pub struct Graph { - nodes: SnapshotVec> , - edges: SnapshotVec> , +pub struct Graph { + nodes: SnapshotVec>, + edges: SnapshotVec>, } pub struct Node { @@ -71,9 +71,13 @@ impl SnapshotVecDelegate for Edge { impl Debug for Edge { fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { - write!(f, "Edge {{ next_edge: [{:?}, {:?}], source: {:?}, target: {:?}, data: {:?} }}", - self.next_edge[0], self.next_edge[1], self.source, - self.target, self.data) + write!(f, + "Edge {{ next_edge: [{:?}, {:?}], source: {:?}, target: {:?}, data: {:?} }}", + self.next_edge[0], + self.next_edge[1], + self.source, + self.target, + self.data) } } @@ -87,7 +91,9 @@ pub const INVALID_EDGE_INDEX: EdgeIndex = EdgeIndex(usize::MAX); // Use a private field here to guarantee no more instances are created: #[derive(Copy, Clone, Debug, PartialEq)] -pub struct Direction { repr: usize } +pub struct Direction { + repr: usize, +} pub const OUTGOING: Direction = Direction { repr: 0 }; @@ -95,27 +101,30 @@ pub const INCOMING: Direction = Direction { repr: 1 }; impl NodeIndex { /// Returns unique id (unique with respect to the graph holding associated node). - pub fn node_id(&self) -> usize { self.0 } + pub fn node_id(&self) -> usize { + self.0 + } } impl EdgeIndex { /// Returns unique id (unique with respect to the graph holding associated edge). - pub fn edge_id(&self) -> usize { self.0 } + pub fn edge_id(&self) -> usize { + self.0 + } } -impl Graph { - pub fn new() -> Graph { +impl Graph { + pub fn new() -> Graph { Graph { nodes: SnapshotVec::new(), edges: SnapshotVec::new(), } } - /////////////////////////////////////////////////////////////////////////// - // Simple accessors + // # Simple accessors #[inline] - pub fn all_nodes<'a>(&'a self) -> &'a [Node] { + pub fn all_nodes(&self) -> &[Node] { &self.nodes } @@ -125,7 +134,7 @@ impl Graph { } #[inline] - pub fn all_edges<'a>(&'a self) -> &'a [Edge] { + pub fn all_edges(&self) -> &[Edge] { &self.edges } @@ -134,8 +143,7 @@ impl Graph { self.edges.len() } - /////////////////////////////////////////////////////////////////////////// - // Node construction + // # Node construction pub fn next_node_index(&self) -> NodeIndex { NodeIndex(self.nodes.len()) @@ -145,43 +153,37 @@ impl Graph { let idx = self.next_node_index(); self.nodes.push(Node { first_edge: [INVALID_EDGE_INDEX, INVALID_EDGE_INDEX], - data: data + data: data, }); idx } - pub fn mut_node_data<'a>(&'a mut self, idx: NodeIndex) -> &'a mut N { + pub fn mut_node_data(&mut self, idx: NodeIndex) -> &mut N { &mut self.nodes[idx.0].data } - pub fn node_data<'a>(&'a self, idx: NodeIndex) -> &'a N { + pub fn node_data(&self, idx: NodeIndex) -> &N { &self.nodes[idx.0].data } - pub fn node<'a>(&'a self, idx: NodeIndex) -> &'a Node { + pub fn node(&self, idx: NodeIndex) -> &Node { &self.nodes[idx.0] } - /////////////////////////////////////////////////////////////////////////// - // Edge construction and queries + // # Edge construction and queries pub fn next_edge_index(&self) -> EdgeIndex { EdgeIndex(self.edges.len()) } - pub fn add_edge(&mut self, - source: NodeIndex, - target: NodeIndex, - data: E) -> EdgeIndex { + pub fn add_edge(&mut self, source: NodeIndex, target: NodeIndex, data: E) -> EdgeIndex { debug!("graph: add_edge({:?}, {:?}, {:?})", source, target, data); let idx = self.next_edge_index(); // read current first of the list of edges from each node - let source_first = self.nodes[source.0] - .first_edge[OUTGOING.repr]; - let target_first = self.nodes[target.0] - .first_edge[INCOMING.repr]; + let source_first = self.nodes[source.0].first_edge[OUTGOING.repr]; + let target_first = self.nodes[target.0].first_edge[INCOMING.repr]; // create the new edge, with the previous firsts from each node // as the next pointers @@ -189,7 +191,7 @@ impl Graph { next_edge: [source_first, target_first], source: source, target: target, - data: data + data: data, }); // adjust the firsts for each node target be the next object. @@ -199,15 +201,15 @@ impl Graph { return idx; } - pub fn mut_edge_data<'a>(&'a mut self, idx: EdgeIndex) -> &'a mut E { + pub fn mut_edge_data(&mut self, idx: EdgeIndex) -> &mut E { &mut self.edges[idx.0].data } - pub fn edge_data<'a>(&'a self, idx: EdgeIndex) -> &'a E { + pub fn edge_data(&self, idx: EdgeIndex) -> &E { &self.edges[idx.0].data } - pub fn edge<'a>(&'a self, idx: EdgeIndex) -> &'a Edge { + pub fn edge(&self, idx: EdgeIndex) -> &Edge { &self.edges[idx.0] } @@ -227,98 +229,165 @@ impl Graph { self.edges[edge.0].next_edge[dir.repr] } - /////////////////////////////////////////////////////////////////////////// - // Iterating over nodes, edges + // # Iterating over nodes, edges + + pub fn enumerated_nodes(&self) -> EnumeratedNodes { + EnumeratedNodes { + iter: self.nodes.iter().enumerate() + } + } + + pub fn enumerated_edges(&self) -> EnumeratedEdges { + EnumeratedEdges { + iter: self.edges.iter().enumerate() + } + } - pub fn each_node<'a, F>(&'a self, mut f: F) -> bool where - F: FnMut(NodeIndex, &'a Node) -> bool, + pub fn each_node<'a, F>(&'a self, mut f: F) -> bool + where F: FnMut(NodeIndex, &'a Node) -> bool { //! Iterates over all edges defined in the graph. - self.nodes.iter().enumerate().all(|(i, node)| f(NodeIndex(i), node)) + self.enumerated_nodes().all(|(node_idx, node)| f(node_idx, node)) } - pub fn each_edge<'a, F>(&'a self, mut f: F) -> bool where - F: FnMut(EdgeIndex, &'a Edge) -> bool, + pub fn each_edge<'a, F>(&'a self, mut f: F) -> bool + where F: FnMut(EdgeIndex, &'a Edge) -> bool { //! Iterates over all edges defined in the graph - self.edges.iter().enumerate().all(|(i, edge)| f(EdgeIndex(i), edge)) + self.enumerated_edges().all(|(edge_idx, edge)| f(edge_idx, edge)) } - pub fn outgoing_edges(&self, source: NodeIndex) -> AdjacentEdges { + pub fn outgoing_edges(&self, source: NodeIndex) -> AdjacentEdges { self.adjacent_edges(source, OUTGOING) } - pub fn incoming_edges(&self, source: NodeIndex) -> AdjacentEdges { + pub fn incoming_edges(&self, source: NodeIndex) -> AdjacentEdges { self.adjacent_edges(source, INCOMING) } - pub fn adjacent_edges(&self, source: NodeIndex, direction: Direction) -> AdjacentEdges { + pub fn adjacent_edges(&self, source: NodeIndex, direction: Direction) -> AdjacentEdges { let first_edge = self.node(source).first_edge[direction.repr]; - AdjacentEdges { graph: self, direction: direction, next: first_edge } + AdjacentEdges { + graph: self, + direction: direction, + next: first_edge, + } } - pub fn successor_nodes<'a>(&'a self, source: NodeIndex) -> AdjacentTargets { + pub fn successor_nodes(&self, source: NodeIndex) -> AdjacentTargets { self.outgoing_edges(source).targets() } - pub fn predecessor_nodes<'a>(&'a self, target: NodeIndex) -> AdjacentSources { + pub fn predecessor_nodes(&self, target: NodeIndex) -> AdjacentSources { self.incoming_edges(target).sources() } - /////////////////////////////////////////////////////////////////////////// - // Fixed-point iteration - // - // A common use for graphs in our compiler is to perform - // fixed-point iteration. In this case, each edge represents a - // constraint, and the nodes themselves are associated with - // variables or other bitsets. This method facilitates such a - // computation. - - pub fn iterate_until_fixed_point<'a, F>(&'a self, mut op: F) where - F: FnMut(usize, EdgeIndex, &'a Edge) -> bool, + /// A common use for graphs in our compiler is to perform + /// fixed-point iteration. In this case, each edge represents a + /// constraint, and the nodes themselves are associated with + /// variables or other bitsets. This method facilitates such a + /// computation. + pub fn iterate_until_fixed_point<'a, F>(&'a self, mut op: F) + where F: FnMut(usize, EdgeIndex, &'a Edge) -> bool { let mut iteration = 0; let mut changed = true; while changed { changed = false; iteration += 1; - for (i, edge) in self.edges.iter().enumerate() { - changed |= op(iteration, EdgeIndex(i), edge); + for (edge_index, edge) in self.enumerated_edges() { + changed |= op(iteration, edge_index, edge); } } } - pub fn depth_traverse<'a>(&'a self, start: NodeIndex) -> DepthFirstTraversal<'a, N, E> { - DepthFirstTraversal { - graph: self, - stack: vec![start], - visited: BitVector::new(self.nodes.len()), + pub fn depth_traverse<'a>(&'a self, + start: NodeIndex, + direction: Direction) + -> DepthFirstTraversal<'a, N, E> { + DepthFirstTraversal::with_start_node(self, start, direction) + } + + /// Whether or not a node can be reached from itself. + pub fn is_node_cyclic(&self, starting_node_index: NodeIndex) -> bool { + // This is similar to depth traversal below, but we + // can't use that, because depth traversal doesn't show + // the starting node a second time. + let mut visited = BitVector::new(self.len_nodes()); + let mut stack = vec![starting_node_index]; + + while let Some(current_node_index) = stack.pop() { + visited.insert(current_node_index.0); + + // Directionality doesn't change the answer, + // so just use outgoing edges. + for (_, edge) in self.outgoing_edges(current_node_index) { + let target_node_index = edge.target(); + + if target_node_index == starting_node_index { + return true; + } + + if !visited.contains(target_node_index.0) { + stack.push(target_node_index); + } + } } + + false } } -/////////////////////////////////////////////////////////////////////////// -// Iterators +// # Iterators + +pub struct EnumeratedNodes<'g, N> + where N: 'g, +{ + iter: ::std::iter::Enumerate<::std::slice::Iter<'g, Node>> +} + +impl<'g, N: Debug> Iterator for EnumeratedNodes<'g, N> { + type Item = (NodeIndex, &'g Node); + + fn next(&mut self) -> Option<(NodeIndex, &'g Node)> { + self.iter.next().map(|(idx, n)| (NodeIndex(idx), n)) + } +} -pub struct AdjacentEdges<'g,N,E> - where N:'g, E:'g +pub struct EnumeratedEdges<'g, E> + where E: 'g, +{ + iter: ::std::iter::Enumerate<::std::slice::Iter<'g, Edge>> +} + +impl<'g, E: Debug> Iterator for EnumeratedEdges<'g, E> { + type Item = (EdgeIndex, &'g Edge); + + fn next(&mut self) -> Option<(EdgeIndex, &'g Edge)> { + self.iter.next().map(|(idx, e)| (EdgeIndex(idx), e)) + } +} + +pub struct AdjacentEdges<'g, N, E> + where N: 'g, + E: 'g { graph: &'g Graph, direction: Direction, next: EdgeIndex, } -impl<'g,N,E> AdjacentEdges<'g,N,E> { - fn targets(self) -> AdjacentTargets<'g,N,E> { +impl<'g, N, E> AdjacentEdges<'g, N, E> { + fn targets(self) -> AdjacentTargets<'g, N, E> { AdjacentTargets { edges: self } } - fn sources(self) -> AdjacentSources<'g,N,E> { + fn sources(self) -> AdjacentSources<'g, N, E> { AdjacentSources { edges: self } } } -impl<'g, N:Debug, E:Debug> Iterator for AdjacentEdges<'g, N, E> { +impl<'g, N: Debug, E: Debug> Iterator for AdjacentEdges<'g, N, E> { type Item = (EdgeIndex, &'g Edge); fn next(&mut self) -> Option<(EdgeIndex, &'g Edge)> { @@ -333,13 +402,14 @@ impl<'g, N:Debug, E:Debug> Iterator for AdjacentEdges<'g, N, E> { } } -pub struct AdjacentTargets<'g,N:'g,E:'g> - where N:'g, E:'g +pub struct AdjacentTargets<'g, N, E> + where N: 'g, + E: 'g { - edges: AdjacentEdges<'g,N,E>, + edges: AdjacentEdges<'g, N, E>, } -impl<'g, N:Debug, E:Debug> Iterator for AdjacentTargets<'g, N, E> { +impl<'g, N: Debug, E: Debug> Iterator for AdjacentTargets<'g, N, E> { type Item = NodeIndex; fn next(&mut self) -> Option { @@ -347,13 +417,14 @@ impl<'g, N:Debug, E:Debug> Iterator for AdjacentTargets<'g, N, E> { } } -pub struct AdjacentSources<'g,N:'g,E:'g> - where N:'g, E:'g +pub struct AdjacentSources<'g, N, E> + where N: 'g, + E: 'g { - edges: AdjacentEdges<'g,N,E>, + edges: AdjacentEdges<'g, N, E>, } -impl<'g, N:Debug, E:Debug> Iterator for AdjacentSources<'g, N, E> { +impl<'g, N: Debug, E: Debug> Iterator for AdjacentSources<'g, N, E> { type Item = NodeIndex; fn next(&mut self) -> Option { @@ -361,36 +432,72 @@ impl<'g, N:Debug, E:Debug> Iterator for AdjacentSources<'g, N, E> { } } -pub struct DepthFirstTraversal<'g, N:'g, E:'g> { +pub struct DepthFirstTraversal<'g, N, E> + where N: 'g, + E: 'g +{ graph: &'g Graph, stack: Vec, - visited: BitVector + visited: BitVector, + direction: Direction, } -impl<'g, N:Debug, E:Debug> Iterator for DepthFirstTraversal<'g, N, E> { - type Item = NodeIndex; +impl<'g, N: Debug, E: Debug> DepthFirstTraversal<'g, N, E> { + pub fn new(graph: &'g Graph, direction: Direction) -> Self { + let visited = BitVector::new(graph.len_nodes()); + DepthFirstTraversal { + graph: graph, + stack: vec![], + visited: visited, + direction: direction, + } + } - fn next(&mut self) -> Option { - while let Some(idx) = self.stack.pop() { - if !self.visited.insert(idx.node_id()) { - continue; - } + pub fn with_start_node(graph: &'g Graph, + start_node: NodeIndex, + direction: Direction) + -> Self { + let mut visited = BitVector::new(graph.len_nodes()); + visited.insert(start_node.node_id()); + DepthFirstTraversal { + graph: graph, + stack: vec![start_node], + visited: visited, + direction: direction, + } + } - for (_, edge) in self.graph.outgoing_edges(idx) { - if !self.visited.contains(edge.target().node_id()) { - self.stack.push(edge.target()); - } - } + pub fn reset(&mut self, start_node: NodeIndex) { + self.stack.truncate(0); + self.stack.push(start_node); + self.visited.clear(); + self.visited.insert(start_node.node_id()); + } - return Some(idx); + fn visit(&mut self, node: NodeIndex) { + if self.visited.insert(node.node_id()) { + self.stack.push(node); } + } +} + +impl<'g, N: Debug, E: Debug> Iterator for DepthFirstTraversal<'g, N, E> { + type Item = NodeIndex; - return None; + fn next(&mut self) -> Option { + let next = self.stack.pop(); + if let Some(idx) = next { + for (_, edge) in self.graph.adjacent_edges(idx, self.direction) { + let target = edge.source_or_target(self.direction); + self.visit(target); + } + } + next } } -pub fn each_edge_index(max_edge_index: EdgeIndex, mut f: F) where - F: FnMut(EdgeIndex) -> bool, +pub fn each_edge_index(max_edge_index: EdgeIndex, mut f: F) + where F: FnMut(EdgeIndex) -> bool { let mut i = 0; let n = max_edge_index.0; diff --git a/src/librustc_data_structures/graph/tests.rs b/src/librustc_data_structures/graph/tests.rs index 33b2edd2e106d..a87410e6e1c8c 100644 --- a/src/librustc_data_structures/graph/tests.rs +++ b/src/librustc_data_structures/graph/tests.rs @@ -20,10 +20,13 @@ fn create_graph() -> TestGraph { // Create a simple graph // - // A -+> B --> C - // | | ^ - // | v | - // F D --> E + // F + // | + // V + // A --> B --> C + // | ^ + // v | + // D --> E let a = graph.add_node("A"); let b = graph.add_node("B"); @@ -42,6 +45,29 @@ fn create_graph() -> TestGraph { return graph; } +fn create_graph_with_cycle() -> TestGraph { + let mut graph = Graph::new(); + + // Create a graph with a cycle. + // + // A --> B <-- + + // | | + // v | + // C --> D + + let a = graph.add_node("A"); + let b = graph.add_node("B"); + let c = graph.add_node("C"); + let d = graph.add_node("D"); + + graph.add_edge(a, b, "AB"); + graph.add_edge(b, c, "BC"); + graph.add_edge(c, d, "CD"); + graph.add_edge(d, b, "DB"); + + return graph; +} + #[test] fn each_node() { let graph = create_graph(); @@ -64,11 +90,11 @@ fn each_edge() { }); } -fn test_adjacent_edges(graph: &Graph, - start_index: NodeIndex, - start_data: N, - expected_incoming: &[(E,N)], - expected_outgoing: &[(E,N)]) { +fn test_adjacent_edges(graph: &Graph, + start_index: NodeIndex, + start_data: N, + expected_incoming: &[(E, N)], + expected_outgoing: &[(E, N)]) { assert!(graph.node_data(start_index) == &start_data); let mut counter = 0; @@ -76,7 +102,10 @@ fn test_adjacent_edges(graph: &Graph, assert!(graph.edge_data(edge_index) == &edge.data); assert!(counter < expected_incoming.len()); debug!("counter={:?} expected={:?} edge_index={:?} edge={:?}", - counter, expected_incoming[counter], edge_index, edge); + counter, + expected_incoming[counter], + edge_index, + edge); match expected_incoming[counter] { (ref e, ref n) => { assert!(e == &edge.data); @@ -93,7 +122,10 @@ fn test_adjacent_edges(graph: &Graph, assert!(graph.edge_data(edge_index) == &edge.data); assert!(counter < expected_outgoing.len()); debug!("counter={:?} expected={:?} edge_index={:?} edge={:?}", - counter, expected_outgoing[counter], edge_index, edge); + counter, + expected_outgoing[counter], + edge_index, + edge); match expected_outgoing[counter] { (ref e, ref n) => { assert!(e == &edge.data); @@ -109,31 +141,39 @@ fn test_adjacent_edges(graph: &Graph, #[test] fn each_adjacent_from_a() { let graph = create_graph(); - test_adjacent_edges(&graph, NodeIndex(0), "A", - &[], - &[("AB", "B")]); + test_adjacent_edges(&graph, NodeIndex(0), "A", &[], &[("AB", "B")]); } #[test] fn each_adjacent_from_b() { let graph = create_graph(); - test_adjacent_edges(&graph, NodeIndex(1), "B", - &[("FB", "F"), ("AB", "A"),], - &[("BD", "D"), ("BC", "C"),]); + test_adjacent_edges(&graph, + NodeIndex(1), + "B", + &[("FB", "F"), ("AB", "A")], + &[("BD", "D"), ("BC", "C")]); } #[test] fn each_adjacent_from_c() { let graph = create_graph(); - test_adjacent_edges(&graph, NodeIndex(2), "C", - &[("EC", "E"), ("BC", "B")], - &[]); + test_adjacent_edges(&graph, NodeIndex(2), "C", &[("EC", "E"), ("BC", "B")], &[]); } #[test] fn each_adjacent_from_d() { let graph = create_graph(); - test_adjacent_edges(&graph, NodeIndex(3), "D", - &[("BD", "B")], - &[("DE", "E")]); + test_adjacent_edges(&graph, NodeIndex(3), "D", &[("BD", "B")], &[("DE", "E")]); +} + +#[test] +fn is_node_cyclic_a() { + let graph = create_graph_with_cycle(); + assert!(!graph.is_node_cyclic(NodeIndex(0))); +} + +#[test] +fn is_node_cyclic_b() { + let graph = create_graph_with_cycle(); + assert!(graph.is_node_cyclic(NodeIndex(1))); } diff --git a/src/librustc_data_structures/indexed_set.rs b/src/librustc_data_structures/indexed_set.rs new file mode 100644 index 0000000000000..2e9e054e97eaf --- /dev/null +++ b/src/librustc_data_structures/indexed_set.rs @@ -0,0 +1,156 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::fmt; +use std::marker::PhantomData; +use std::mem; +use std::ops::{Deref, DerefMut, Range}; +use bitslice::{BitSlice, Word}; +use bitslice::{bitwise, Union, Subtract}; +use indexed_vec::Idx; + +/// Represents a set (or packed family of sets), of some element type +/// E, where each E is identified by some unique index type `T`. +/// +/// In other words, `T` is the type used to index into the bitvector +/// this type uses to represent the set of object it holds. +pub struct IdxSetBuf { + _pd: PhantomData, + bits: Vec, +} + +impl Clone for IdxSetBuf { + fn clone(&self) -> Self { + IdxSetBuf { _pd: PhantomData, bits: self.bits.clone() } + } +} + +// pnkfelix wants to have this be `IdxSet([Word]) and then pass +// around `&mut IdxSet` or `&IdxSet`. +// +// WARNING: Mapping a `&IdxSetBuf` to `&IdxSet` (at least today) +// requires a transmute relying on representation guarantees that may +// not hold in the future. + +/// Represents a set (or packed family of sets), of some element type +/// E, where each E is identified by some unique index type `T`. +/// +/// In other words, `T` is the type used to index into the bitslice +/// this type uses to represent the set of object it holds. +pub struct IdxSet { + _pd: PhantomData, + bits: [Word], +} + +impl fmt::Debug for IdxSetBuf { + fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result { self.bits.fmt(w) } +} + +impl fmt::Debug for IdxSet { + fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result { self.bits.fmt(w) } +} + +impl IdxSetBuf { + fn new(init: Word, universe_size: usize) -> Self { + let bits_per_word = mem::size_of::() * 8; + let num_words = (universe_size + (bits_per_word - 1)) / bits_per_word; + IdxSetBuf { + _pd: Default::default(), + bits: vec![init; num_words], + } + } + + /// Creates set holding every element whose index falls in range 0..universe_size. + pub fn new_filled(universe_size: usize) -> Self { + Self::new(!0, universe_size) + } + + /// Creates set holding no elements. + pub fn new_empty(universe_size: usize) -> Self { + Self::new(0, universe_size) + } +} + +impl IdxSet { + unsafe fn from_slice(s: &[Word]) -> &Self { + mem::transmute(s) // (see above WARNING) + } + + unsafe fn from_slice_mut(s: &mut [Word]) -> &mut Self { + mem::transmute(s) // (see above WARNING) + } +} + +impl Deref for IdxSetBuf { + type Target = IdxSet; + fn deref(&self) -> &IdxSet { + unsafe { IdxSet::from_slice(&self.bits[..]) } + } +} + +impl DerefMut for IdxSetBuf { + fn deref_mut(&mut self) -> &mut IdxSet { + unsafe { IdxSet::from_slice_mut(&mut self.bits[..]) } + } +} + +impl IdxSet { + pub fn to_owned(&self) -> IdxSetBuf { + IdxSetBuf { + _pd: Default::default(), + bits: self.bits.to_owned(), + } + } + + /// Removes `elem` from the set `self`; returns true iff this changed `self`. + pub fn remove(&mut self, elem: &T) -> bool { + self.bits.clear_bit(elem.index()) + } + + /// Adds `elem` to the set `self`; returns true iff this changed `self`. + pub fn add(&mut self, elem: &T) -> bool { + self.bits.set_bit(elem.index()) + } + + pub fn range(&self, elems: &Range) -> &Self { + let elems = elems.start.index()..elems.end.index(); + unsafe { Self::from_slice(&self.bits[elems]) } + } + + pub fn range_mut(&mut self, elems: &Range) -> &mut Self { + let elems = elems.start.index()..elems.end.index(); + unsafe { Self::from_slice_mut(&mut self.bits[elems]) } + } + + /// Returns true iff set `self` contains `elem`. + pub fn contains(&self, elem: &T) -> bool { + self.bits.get_bit(elem.index()) + } + + pub fn words(&self) -> &[Word] { + &self.bits[..] + } + + pub fn words_mut(&mut self) -> &mut [Word] { + &mut self.bits[..] + } + + pub fn clone_from(&mut self, other: &IdxSet) { + self.words_mut().clone_from_slice(other.words()); + } + + pub fn union(&mut self, other: &IdxSet) -> bool { + bitwise(self.words_mut(), other.words(), &Union) + } + + pub fn subtract(&mut self, other: &IdxSet) -> bool { + bitwise(self.words_mut(), other.words(), &Subtract) + } +} diff --git a/src/librustc_data_structures/indexed_vec.rs b/src/librustc_data_structures/indexed_vec.rs new file mode 100644 index 0000000000000..00cea9cbdf6b7 --- /dev/null +++ b/src/librustc_data_structures/indexed_vec.rs @@ -0,0 +1,257 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::fmt::Debug; +use std::iter::{self, FromIterator}; +use std::slice; +use std::marker::PhantomData; +use std::ops::{Index, IndexMut, Range}; +use std::fmt; +use std::vec; +use std::u32; + +use rustc_serialize as serialize; + +/// Represents some newtyped `usize` wrapper. +/// +/// (purpose: avoid mixing indexes for different bitvector domains.) +pub trait Idx: Copy + 'static + Eq + Debug { + fn new(usize) -> Self; + fn index(self) -> usize; +} + +impl Idx for usize { + fn new(idx: usize) -> Self { idx } + fn index(self) -> usize { self } +} + +impl Idx for u32 { + fn new(idx: usize) -> Self { assert!(idx <= u32::MAX as usize); idx as u32 } + fn index(self) -> usize { self as usize } +} + +#[derive(Clone)] +pub struct IndexVec { + pub raw: Vec, + _marker: PhantomData +} + +impl serialize::Encodable for IndexVec { + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + serialize::Encodable::encode(&self.raw, s) + } +} + +impl serialize::Decodable for IndexVec { + fn decode(d: &mut D) -> Result { + serialize::Decodable::decode(d).map(|v| { + IndexVec { raw: v, _marker: PhantomData } + }) + } +} + +impl fmt::Debug for IndexVec { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.raw, fmt) + } +} + +pub type Enumerated = iter::Map, IntoIdx>; + +impl IndexVec { + #[inline] + pub fn new() -> Self { + IndexVec { raw: Vec::new(), _marker: PhantomData } + } + + #[inline] + pub fn with_capacity(capacity: usize) -> Self { + IndexVec { raw: Vec::with_capacity(capacity), _marker: PhantomData } + } + + #[inline] + pub fn from_elem(elem: T, universe: &IndexVec) -> Self + where T: Clone + { + IndexVec { raw: vec![elem; universe.len()], _marker: PhantomData } + } + + #[inline] + pub fn from_elem_n(elem: T, n: usize) -> Self + where T: Clone + { + IndexVec { raw: vec![elem; n], _marker: PhantomData } + } + + #[inline] + pub fn push(&mut self, d: T) -> I { + let idx = I::new(self.len()); + self.raw.push(d); + idx + } + + #[inline] + pub fn len(&self) -> usize { + self.raw.len() + } + + #[inline] + pub fn is_empty(&self) -> bool { + self.raw.is_empty() + } + + #[inline] + pub fn into_iter(self) -> vec::IntoIter { + self.raw.into_iter() + } + + #[inline] + pub fn into_iter_enumerated(self) -> Enumerated> + { + self.raw.into_iter().enumerate().map(IntoIdx { _marker: PhantomData }) + } + + #[inline] + pub fn iter(&self) -> slice::Iter { + self.raw.iter() + } + + #[inline] + pub fn iter_enumerated(&self) -> Enumerated> + { + self.raw.iter().enumerate().map(IntoIdx { _marker: PhantomData }) + } + + #[inline] + pub fn indices(&self) -> iter::Map, IntoIdx> { + (0..self.len()).map(IntoIdx { _marker: PhantomData }) + } + + #[inline] + pub fn iter_mut(&mut self) -> slice::IterMut { + self.raw.iter_mut() + } + + #[inline] + pub fn iter_enumerated_mut(&mut self) -> Enumerated> + { + self.raw.iter_mut().enumerate().map(IntoIdx { _marker: PhantomData }) + } + + #[inline] + pub fn last(&self) -> Option { + self.len().checked_sub(1).map(I::new) + } + + #[inline] + pub fn shrink_to_fit(&mut self) { + self.raw.shrink_to_fit() + } + + #[inline] + pub fn swap(&mut self, a: usize, b: usize) { + self.raw.swap(a, b) + } + + #[inline] + pub fn truncate(&mut self, a: usize) { + self.raw.truncate(a) + } +} + +impl Index for IndexVec { + type Output = T; + + #[inline] + fn index(&self, index: I) -> &T { + &self.raw[index.index()] + } +} + +impl IndexMut for IndexVec { + #[inline] + fn index_mut(&mut self, index: I) -> &mut T { + &mut self.raw[index.index()] + } +} + +impl Extend for IndexVec { + #[inline] + fn extend>(&mut self, iter: J) { + self.raw.extend(iter); + } +} + +impl FromIterator for IndexVec { + #[inline] + fn from_iter(iter: J) -> Self where J: IntoIterator { + IndexVec { raw: FromIterator::from_iter(iter), _marker: PhantomData } + } +} + +impl IntoIterator for IndexVec { + type Item = T; + type IntoIter = vec::IntoIter; + + #[inline] + fn into_iter(self) -> vec::IntoIter { + self.raw.into_iter() + } + +} + +impl<'a, I: Idx, T> IntoIterator for &'a IndexVec { + type Item = &'a T; + type IntoIter = slice::Iter<'a, T>; + + #[inline] + fn into_iter(self) -> slice::Iter<'a, T> { + self.raw.iter() + } +} + +impl<'a, I: Idx, T> IntoIterator for &'a mut IndexVec { + type Item = &'a mut T; + type IntoIter = slice::IterMut<'a, T>; + + #[inline] + fn into_iter(mut self) -> slice::IterMut<'a, T> { + self.raw.iter_mut() + } +} + +pub struct IntoIdx { _marker: PhantomData } +impl FnOnce<((usize, T),)> for IntoIdx { + type Output = (I, T); + + extern "rust-call" fn call_once(self, ((n, t),): ((usize, T),)) -> Self::Output { + (I::new(n), t) + } +} + +impl FnMut<((usize, T),)> for IntoIdx { + extern "rust-call" fn call_mut(&mut self, ((n, t),): ((usize, T),)) -> Self::Output { + (I::new(n), t) + } +} + +impl FnOnce<(usize,)> for IntoIdx { + type Output = I; + + extern "rust-call" fn call_once(self, (n,): (usize,)) -> Self::Output { + I::new(n) + } +} + +impl FnMut<(usize,)> for IntoIdx { + extern "rust-call" fn call_mut(&mut self, (n,): (usize,)) -> Self::Output { + I::new(n) + } +} diff --git a/src/librustc_data_structures/ivar.rs b/src/librustc_data_structures/ivar.rs index dabe1b984df2a..f842f4a41a118 100644 --- a/src/librustc_data_structures/ivar.rs +++ b/src/librustc_data_structures/ivar.rs @@ -26,14 +26,12 @@ use std::cell::Cell; /// suffices for the current purposes. #[derive(PartialEq)] pub struct Ivar { - data: Cell> + data: Cell>, } impl Ivar { pub fn new() -> Ivar { - Ivar { - data: Cell::new(None) - } + Ivar { data: Cell::new(None) } } pub fn get(&self) -> Option { @@ -41,8 +39,7 @@ impl Ivar { } pub fn fulfill(&self, value: T) { - assert!(self.data.get().is_none(), - "Value already set!"); + assert!(self.data.get().is_none(), "Value already set!"); self.data.set(Some(value)); } @@ -55,11 +52,11 @@ impl Ivar { } } -impl fmt::Debug for Ivar { +impl fmt::Debug for Ivar { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self.get() { Some(val) => write!(f, "Ivar({:?})", val), - None => f.write_str("Ivar()") + None => f.write_str("Ivar()"), } } } @@ -68,7 +65,7 @@ impl Clone for Ivar { fn clone(&self) -> Ivar { match self.get() { Some(val) => Ivar { data: Cell::new(Some(val)) }, - None => Ivar::new() + None => Ivar::new(), } } } diff --git a/src/librustc_data_structures/lib.rs b/src/librustc_data_structures/lib.rs index 1fbbdf17455b2..de13b9bf4be10 100644 --- a/src/librustc_data_structures/lib.rs +++ b/src/librustc_data_structures/lib.rs @@ -23,28 +23,50 @@ #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://www.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] -#![feature(hashmap_hasher)] #![feature(nonzero)] #![feature(rustc_private)] #![feature(staged_api)] +#![feature(unboxed_closures)] +#![feature(fn_traits)] +#![feature(untagged_unions)] +#![feature(associated_consts)] +#![feature(unsize)] +#![cfg_attr(unix, feature(libc))] #![cfg_attr(test, feature(test))] extern crate core; -#[macro_use] extern crate log; +#[macro_use] +extern crate log; extern crate serialize as rustc_serialize; // used by deriving +#[cfg(unix)] +extern crate libc; +pub mod array_vec; +pub mod accumulate_vec; +pub mod small_vec; +pub mod base_n; +pub mod bitslice; +pub mod blake2b; pub mod bitvec; +pub mod fmt_wrap; pub mod graph; pub mod ivar; +pub mod indexed_set; +pub mod indexed_vec; pub mod obligation_forest; +pub mod snapshot_map; pub mod snapshot_vec; pub mod transitive_relation; pub mod unify; pub mod fnv; +pub mod fx; pub mod tuple_slice; pub mod veccell; +pub mod control_flow_graph; +pub mod flock; // See comments in src/librustc/lib.rs #[doc(hidden)] diff --git a/src/librustc_data_structures/obligation_forest/README.md b/src/librustc_data_structures/obligation_forest/README.md index 1ffe07bb43b4e..982a2bacce164 100644 --- a/src/librustc_data_structures/obligation_forest/README.md +++ b/src/librustc_data_structures/obligation_forest/README.md @@ -9,15 +9,18 @@ place). `ObligationForest` supports two main public operations (there are a few others not discussed here): -1. Add a new root obligation (`push_root`). +1. Add a new root obligations (`push_tree`). 2. Process the pending obligations (`process_obligations`). When a new obligation `N` is added, it becomes the root of an -obligation tree. This tree is a singleton to start, so `N` is both the -root and the only leaf. Each time the `process_obligations` method is -called, it will invoke its callback with every pending obligation (so -that will include `N`, the first time). The callback shoud process the -obligation `O` that it is given and return one of three results: +obligation tree. This tree can also carry some per-tree state `T`, +which is given at the same time. This tree is a singleton to start, so +`N` is both the root and the only leaf. Each time the +`process_obligations` method is called, it will invoke its callback +with every pending obligation (so that will include `N`, the first +time). The callback also receives a (mutable) reference to the +per-tree state `T`. The callback should process the obligation `O` +that it is given and return one of three results: - `Ok(None)` -> ambiguous result. Obligation was neither a success nor a failure. It is assumed that further attempts to process the @@ -57,7 +60,7 @@ which includes three bits of information: `process_obligations` would simply yield back further ambiguous results. This is used by the `FulfillmentContext` to decide when it has reached a steady state. - + #### Snapshots The `ObligationForest` supports a limited form of snapshots; see @@ -76,5 +79,3 @@ parent and (for convenience) its root (which may be itself). It also has a current state, described by `NodeState`. After each processing step, we compress the vector to remove completed and error nodes, which aren't needed anymore. - - diff --git a/src/librustc_data_structures/obligation_forest/mod.rs b/src/librustc_data_structures/obligation_forest/mod.rs index 0d92a2b158f82..a46238309bb46 100644 --- a/src/librustc_data_structures/obligation_forest/mod.rs +++ b/src/librustc_data_structures/obligation_forest/mod.rs @@ -15,15 +15,45 @@ //! in the first place). See README.md for a general overview of how //! to use this class. +use fx::{FxHashMap, FxHashSet}; + +use std::cell::Cell; +use std::collections::hash_map::Entry; use std::fmt::Debug; -use std::mem; +use std::hash; +use std::marker::PhantomData; mod node_index; +use self::node_index::NodeIndex; #[cfg(test)] mod test; -pub struct ObligationForest { +pub trait ForestObligation : Clone + Debug { + type Predicate : Clone + hash::Hash + Eq + Debug; + + fn as_predicate(&self) -> &Self::Predicate; +} + +pub trait ObligationProcessor { + type Obligation : ForestObligation; + type Error : Debug; + + fn process_obligation(&mut self, + obligation: &mut Self::Obligation) + -> Result>, Self::Error>; + + fn process_backedge<'c, I>(&mut self, cycle: I, + _marker: PhantomData<&'c Self::Obligation>) + where I: Clone + Iterator; +} + +struct SnapshotData { + node_len: usize, + cache_list_len: usize, +} + +pub struct ObligationForest { /// The list of obligations. In between calls to /// `process_obligations`, this list only contains nodes in the /// `Pending` or `Success` state (with a non-zero number of @@ -37,52 +67,76 @@ pub struct ObligationForest { /// at a higher index than its parent. This is needed by the /// backtrace iterator (which uses `split_at`). nodes: Vec>, - snapshots: Vec + /// A cache of predicates that have been successfully completed. + done_cache: FxHashSet, + /// An cache of the nodes in `nodes`, indexed by predicate. + waiting_cache: FxHashMap, + /// A list of the obligations added in snapshots, to allow + /// for their removal. + cache_list: Vec, + snapshots: Vec, + scratch: Option>, } pub struct Snapshot { len: usize, } -pub use self::node_index::NodeIndex; - +#[derive(Debug)] struct Node { - state: NodeState, + obligation: O, + state: Cell, + + /// Obligations that depend on this obligation for their + /// completion. They must all be in a non-pending state. + dependents: Vec, + /// The parent of a node - the original obligation of + /// which it is a subobligation. Except for error reporting, + /// this is just another member of `dependents`. parent: Option, - root: NodeIndex, // points to the root, which may be the current node } /// The state of one node in some tree within the forest. This /// represents the current state of processing for the obligation (of /// type `O`) associated with this node. -#[derive(Debug)] -enum NodeState { - /// Obligation not yet resolved to success or error. - Pending { obligation: O }, - - /// Obligation resolved to success; `num_incomplete_children` - /// indicates the number of children still in an "incomplete" - /// state. Incomplete means that either the child is still - /// pending, or it has children which are incomplete. (Basically, - /// there is pending work somewhere in the subtree of the child.) - /// - /// Once all children have completed, success nodes are removed - /// from the vector by the compression step. - Success { obligation: O, num_incomplete_children: usize }, +/// +/// Outside of ObligationForest methods, nodes should be either Pending +/// or Waiting. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +enum NodeState { + /// Obligations for which selection had not yet returned a + /// non-ambiguous result. + Pending, + + /// This obligation was selected successfuly, but may or + /// may not have subobligations. + Success, + + /// This obligation was selected sucessfully, but it has + /// a pending subobligation. + Waiting, + + /// This obligation, along with its subobligations, are complete, + /// and will be removed in the next collection. + Done, /// This obligation was resolved to an error. Error nodes are /// removed from the vector by the compression step. Error, + + /// This is a temporary state used in DFS loops to detect cycles, + /// it should not exist outside of these DFSes. + OnDfsStack, } #[derive(Debug)] -pub struct Outcome { +pub struct Outcome { /// Obligations that were completely evaluated, including all /// (transitive) subobligations. pub completed: Vec, /// Backtrace of obligations that were found to be in error. - pub errors: Vec>, + pub errors: Vec>, /// If true, then we saw no successful obligations, which means /// there is no point in further iteration. This is based on the @@ -94,16 +148,20 @@ pub struct Outcome { } #[derive(Debug, PartialEq, Eq)] -pub struct Error { +pub struct Error { pub error: E, pub backtrace: Vec, } -impl ObligationForest { +impl ObligationForest { pub fn new() -> ObligationForest { ObligationForest { nodes: vec![], - snapshots: vec![] + snapshots: vec![], + done_cache: FxHashSet(), + waiting_cache: FxHashMap(), + cache_list: vec![], + scratch: Some(vec![]), } } @@ -114,56 +172,96 @@ impl ObligationForest { } pub fn start_snapshot(&mut self) -> Snapshot { - self.snapshots.push(self.nodes.len()); + self.snapshots.push(SnapshotData { + node_len: self.nodes.len(), + cache_list_len: self.cache_list.len() + }); Snapshot { len: self.snapshots.len() } } pub fn commit_snapshot(&mut self, snapshot: Snapshot) { assert_eq!(snapshot.len, self.snapshots.len()); - let nodes_len = self.snapshots.pop().unwrap(); - assert!(self.nodes.len() >= nodes_len); + let info = self.snapshots.pop().unwrap(); + assert!(self.nodes.len() >= info.node_len); + assert!(self.cache_list.len() >= info.cache_list_len); } pub fn rollback_snapshot(&mut self, snapshot: Snapshot) { // Check that we are obeying stack discipline. assert_eq!(snapshot.len, self.snapshots.len()); - let nodes_len = self.snapshots.pop().unwrap(); + let info = self.snapshots.pop().unwrap(); - // The only action permitted while in a snapshot is to push - // new root obligations. Because no processing will have been - // done, those roots should still be in the pending state. - debug_assert!(self.nodes[nodes_len..].iter().all(|n| match n.state { - NodeState::Pending { .. } => true, - _ => false, - })); + for entry in &self.cache_list[info.cache_list_len..] { + self.done_cache.remove(entry); + self.waiting_cache.remove(entry); + } - self.nodes.truncate(nodes_len); + self.nodes.truncate(info.node_len); + self.cache_list.truncate(info.cache_list_len); } pub fn in_snapshot(&self) -> bool { !self.snapshots.is_empty() } - /// Adds a new tree to the forest. + /// Registers an obligation /// - /// This CAN be done during a snapshot. - pub fn push_root(&mut self, obligation: O) { - let index = NodeIndex::new(self.nodes.len()); - self.nodes.push(Node::new(index, None, obligation)); + /// This CAN be done in a snapshot + pub fn register_obligation(&mut self, obligation: O) { + // Ignore errors here - there is no guarantee of success. + let _ = self.register_obligation_at(obligation, None); + } + + // returns Err(()) if we already know this obligation failed. + fn register_obligation_at(&mut self, obligation: O, parent: Option) + -> Result<(), ()> + { + if self.done_cache.contains(obligation.as_predicate()) { + return Ok(()) + } + + match self.waiting_cache.entry(obligation.as_predicate().clone()) { + Entry::Occupied(o) => { + debug!("register_obligation_at({:?}, {:?}) - duplicate of {:?}!", + obligation, parent, o.get()); + if let Some(parent) = parent { + if self.nodes[o.get().get()].dependents.contains(&parent) { + debug!("register_obligation_at({:?}, {:?}) - duplicate subobligation", + obligation, parent); + } else { + self.nodes[o.get().get()].dependents.push(parent); + } + } + if let NodeState::Error = self.nodes[o.get().get()].state.get() { + Err(()) + } else { + Ok(()) + } + } + Entry::Vacant(v) => { + debug!("register_obligation_at({:?}, {:?}) - ok", + obligation, parent); + v.insert(NodeIndex::new(self.nodes.len())); + self.cache_list.push(obligation.as_predicate().clone()); + self.nodes.push(Node::new(parent, obligation)); + Ok(()) + } + } } /// Convert all remaining obligations to the given error. /// /// This cannot be done during a snapshot. - pub fn to_errors(&mut self, error: E) -> Vec> { + pub fn to_errors(&mut self, error: E) -> Vec> { assert!(!self.in_snapshot()); let mut errors = vec![]; for index in 0..self.nodes.len() { - debug_assert!(!self.nodes[index].is_popped()); - self.inherit_error(index); - if let NodeState::Pending { .. } = self.nodes[index].state { - let backtrace = self.backtrace(index); - errors.push(Error { error: error.clone(), backtrace: backtrace }); + if let NodeState::Pending = self.nodes[index].state.get() { + let backtrace = self.error_at(index); + errors.push(Error { + error: error.clone(), + backtrace: backtrace, + }); } } let successful_obligations = self.compress(); @@ -172,21 +270,22 @@ impl ObligationForest { } /// Returns the set of obligations that are in a pending state. - pub fn pending_obligations(&self) -> Vec where O: Clone { - self.nodes.iter() - .filter_map(|n| match n.state { - NodeState::Pending { ref obligation } => Some(obligation), - _ => None, - }) - .cloned() - .collect() + pub fn pending_obligations(&self) -> Vec + where O: Clone + { + self.nodes + .iter() + .filter(|n| n.state.get() == NodeState::Pending) + .map(|n| n.obligation.clone()) + .collect() } - /// Process the obligations. + /// Perform a pass through the obligation list. This must + /// be called in a loop until `outcome.stalled` is false. /// /// This CANNOT be unrolled (presently, at least). - pub fn process_obligations(&mut self, mut action: F) -> Outcome - where E: Debug, F: FnMut(&mut O, Backtrace) -> Result>, E> + pub fn process_obligations

(&mut self, processor: &mut P) -> Outcome + where P: ObligationProcessor { debug!("process_obligations(len={})", self.nodes.len()); assert!(!self.in_snapshot()); // cannot unroll this action @@ -194,35 +293,23 @@ impl ObligationForest { let mut errors = vec![]; let mut stalled = true; - // We maintain the invariant that the list is in pre-order, so - // parents occur before their children. Also, whenever an - // error occurs, we propagate it from the child all the way to - // the root of the tree. Together, these two facts mean that - // when we visit a node, we can check if its root is in error, - // and we will find out if any prior node within this forest - // encountered an error. - for index in 0..self.nodes.len() { - debug_assert!(!self.nodes[index].is_popped()); - self.inherit_error(index); - debug!("process_obligations: node {} == {:?}", - index, self.nodes[index].state); - - let result = { - let parent = self.nodes[index].parent; - let (prefix, suffix) = self.nodes.split_at_mut(index); - let backtrace = Backtrace::new(prefix, parent); - match suffix[0].state { - NodeState::Error | - NodeState::Success { .. } => - continue, - NodeState::Pending { ref mut obligation } => - action(obligation, backtrace), + index, + self.nodes[index]); + + let result = match self.nodes[index] { + Node { state: ref _state, ref mut obligation, .. } + if _state.get() == NodeState::Pending => + { + processor.process_obligation(obligation) } + _ => continue }; - debug!("process_obligations: node {} got result {:?}", index, result); + debug!("process_obligations: node {} got result {:?}", + index, + result); match result { Ok(None) => { @@ -231,232 +318,330 @@ impl ObligationForest { Ok(Some(children)) => { // if we saw a Some(_) result, we are not (yet) stalled stalled = false; - self.success(index, children); + self.nodes[index].state.set(NodeState::Success); + + for child in children { + let st = self.register_obligation_at( + child, + Some(NodeIndex::new(index)) + ); + if let Err(()) = st { + // error already reported - propagate it + // to our node. + self.error_at(index); + } + } } Err(err) => { - let backtrace = self.backtrace(index); - errors.push(Error { error: err, backtrace: backtrace }); + stalled = false; + let backtrace = self.error_at(index); + errors.push(Error { + error: err, + backtrace: backtrace, + }); } } } + if stalled { + // There's no need to perform marking, cycle processing and compression when nothing + // changed. + return Outcome { + completed: vec![], + errors: errors, + stalled: stalled, + }; + } + + self.mark_as_waiting(); + self.process_cycles(processor); + // Now we have to compress the result - let successful_obligations = self.compress(); + let completed_obligations = self.compress(); debug!("process_obligations: complete"); Outcome { - completed: successful_obligations, + completed: completed_obligations, errors: errors, stalled: stalled, } } - /// Indicates that node `index` has been processed successfully, - /// yielding `children` as the derivative work. If children is an - /// empty vector, this will update the ref count on the parent of - /// `index` to indicate that a child has completed - /// successfully. Otherwise, adds new nodes to represent the child - /// work. - fn success(&mut self, index: usize, children: Vec) { - debug!("success(index={}, children={:?})", index, children); - - let num_incomplete_children = children.len(); - - if num_incomplete_children == 0 { - // if there is no work left to be done, decrement parent's ref count - self.update_parent(index); - } else { - // create child work - let root_index = self.nodes[index].root; - let node_index = NodeIndex::new(index); - self.nodes.extend( - children.into_iter() - .map(|o| Node::new(root_index, Some(node_index), o))); + /// Mark all NodeState::Success nodes as NodeState::Done and + /// report all cycles between them. This should be called + /// after `mark_as_waiting` marks all nodes with pending + /// subobligations as NodeState::Waiting. + fn process_cycles

(&mut self, processor: &mut P) + where P: ObligationProcessor + { + let mut stack = self.scratch.take().unwrap(); + + for index in 0..self.nodes.len() { + // For rustc-benchmarks/inflate-0.1.0 this state test is extremely + // hot and the state is almost always `Pending` or `Waiting`. It's + // a win to handle the no-op cases immediately to avoid the cost of + // the function call. + let state = self.nodes[index].state.get(); + match state { + NodeState::Waiting | NodeState::Pending | NodeState::Done | NodeState::Error => {}, + _ => self.find_cycles_from_node(&mut stack, processor, index), + } } - // change state from `Pending` to `Success`, temporarily swapping in `Error` - let state = mem::replace(&mut self.nodes[index].state, NodeState::Error); - self.nodes[index].state = match state { - NodeState::Pending { obligation } => - NodeState::Success { obligation: obligation, - num_incomplete_children: num_incomplete_children }, - NodeState::Success { .. } | - NodeState::Error => - unreachable!() - }; + self.scratch = Some(stack); } - /// Decrements the ref count on the parent of `child`; if the - /// parent's ref count then reaches zero, proceeds recursively. - fn update_parent(&mut self, child: usize) { - debug!("update_parent(child={})", child); - if let Some(parent) = self.nodes[child].parent { - let parent = parent.get(); - match self.nodes[parent].state { - NodeState::Success { ref mut num_incomplete_children, .. } => { - *num_incomplete_children -= 1; - if *num_incomplete_children > 0 { - return; + fn find_cycles_from_node

(&self, stack: &mut Vec, + processor: &mut P, index: usize) + where P: ObligationProcessor + { + let node = &self.nodes[index]; + let state = node.state.get(); + match state { + NodeState::OnDfsStack => { + let index = + stack.iter().rposition(|n| *n == index).unwrap(); + // I need a Clone closure + #[derive(Clone)] + struct GetObligation<'a, O: 'a>(&'a [Node]); + impl<'a, 'b, O> FnOnce<(&'b usize,)> for GetObligation<'a, O> { + type Output = &'a O; + extern "rust-call" fn call_once(self, args: (&'b usize,)) -> &'a O { + &self.0[*args.0].obligation + } + } + impl<'a, 'b, O> FnMut<(&'b usize,)> for GetObligation<'a, O> { + extern "rust-call" fn call_mut(&mut self, args: (&'b usize,)) -> &'a O { + &self.0[*args.0].obligation } } - _ => unreachable!(), - } - self.update_parent(parent); - } - } - /// If the root of `child` is in an error state, places `child` - /// into an error state. This is used during processing so that we - /// skip the remaining obligations from a tree once some other - /// node in the tree is found to be in error. - fn inherit_error(&mut self, child: usize) { - let root = self.nodes[child].root.get(); - if let NodeState::Error = self.nodes[root].state { - self.nodes[child].state = NodeState::Error; - } + processor.process_backedge(stack[index..].iter().map(GetObligation(&self.nodes)), + PhantomData); + } + NodeState::Success => { + node.state.set(NodeState::OnDfsStack); + stack.push(index); + if let Some(parent) = node.parent { + self.find_cycles_from_node(stack, processor, parent.get()); + } + for dependent in &node.dependents { + self.find_cycles_from_node(stack, processor, dependent.get()); + } + stack.pop(); + node.state.set(NodeState::Done); + }, + NodeState::Waiting | NodeState::Pending => { + // this node is still reachable from some pending node. We + // will get to it when they are all processed. + } + NodeState::Done | NodeState::Error => { + // already processed that node + } + }; } /// Returns a vector of obligations for `p` and all of its /// ancestors, putting them into the error state in the process. - /// The fact that the root is now marked as an error is used by - /// `inherit_error` above to propagate the error state to the - /// remainder of the tree. - fn backtrace(&mut self, mut p: usize) -> Vec { + fn error_at(&mut self, p: usize) -> Vec { + let mut error_stack = self.scratch.take().unwrap(); let mut trace = vec![]; + + let mut n = p; loop { - let state = mem::replace(&mut self.nodes[p].state, NodeState::Error); - match state { - NodeState::Pending { obligation } | - NodeState::Success { obligation, .. } => { - trace.push(obligation); - } - NodeState::Error => { - // we should not encounter an error, because if - // there was an error in the ancestors, it should - // have been propagated down and we should never - // have tried to process this obligation - panic!("encountered error in node {:?} when collecting stack trace", p); - } - } + self.nodes[n].state.set(NodeState::Error); + trace.push(self.nodes[n].obligation.clone()); + error_stack.extend(self.nodes[n].dependents.iter().map(|x| x.get())); // loop to the parent - match self.nodes[p].parent { - Some(q) => { p = q.get(); } - None => { return trace; } + match self.nodes[n].parent { + Some(q) => n = q.get(), + None => break } } + + loop { + // non-standard `while let` to bypass #6393 + let i = match error_stack.pop() { + Some(i) => i, + None => break + }; + + let node = &self.nodes[i]; + + match node.state.get() { + NodeState::Error => continue, + _ => node.state.set(NodeState::Error) + } + + error_stack.extend( + node.dependents.iter().cloned().chain(node.parent).map(|x| x.get()) + ); + } + + self.scratch = Some(error_stack); + trace + } + + #[inline] + fn mark_neighbors_as_waiting_from(&self, node: &Node) { + if let Some(parent) = node.parent { + self.mark_as_waiting_from(&self.nodes[parent.get()]); + } + + for dependent in &node.dependents { + self.mark_as_waiting_from(&self.nodes[dependent.get()]); + } + } + + /// Marks all nodes that depend on a pending node as NodeState::Waiting. + fn mark_as_waiting(&self) { + for node in &self.nodes { + if node.state.get() == NodeState::Waiting { + node.state.set(NodeState::Success); + } + } + + for node in &self.nodes { + if node.state.get() == NodeState::Pending { + self.mark_neighbors_as_waiting_from(node); + } + } + } + + fn mark_as_waiting_from(&self, node: &Node) { + match node.state.get() { + NodeState::Waiting | NodeState::Error | NodeState::OnDfsStack => return, + NodeState::Success => node.state.set(NodeState::Waiting), + NodeState::Pending | NodeState::Done => {}, + } + + self.mark_neighbors_as_waiting_from(node); } /// Compresses the vector, removing all popped nodes. This adjusts /// the indices and hence invalidates any outstanding /// indices. Cannot be used during a transaction. + /// + /// Beforehand, all nodes must be marked as `Done` and no cycles + /// on these nodes may be present. This is done by e.g. `process_cycles`. + #[inline(never)] fn compress(&mut self) -> Vec { assert!(!self.in_snapshot()); // didn't write code to unroll this action - let mut rewrites: Vec<_> = (0..self.nodes.len()).collect(); - // Finish propagating error state. Note that in this case we - // only have to check immediate parents, rather than all - // ancestors, because all errors have already occurred that - // are going to occur. let nodes_len = self.nodes.len(); - for i in 0..nodes_len { - if !self.nodes[i].is_popped() { - self.inherit_error(i); + let mut node_rewrites: Vec<_> = self.scratch.take().unwrap(); + node_rewrites.extend(0..nodes_len); + let mut dead_nodes = 0; + + // Now move all popped nodes to the end. Try to keep the order. + // + // LOOP INVARIANT: + // self.nodes[0..i - dead_nodes] are the first remaining nodes + // self.nodes[i - dead_nodes..i] are all dead + // self.nodes[i..] are unchanged + for i in 0..self.nodes.len() { + match self.nodes[i].state.get() { + NodeState::Pending | NodeState::Waiting => { + if dead_nodes > 0 { + self.nodes.swap(i, i - dead_nodes); + node_rewrites[i] -= dead_nodes; + } + } + NodeState::Done => { + self.waiting_cache.remove(self.nodes[i].obligation.as_predicate()); + // FIXME(HashMap): why can't I get my key back? + self.done_cache.insert(self.nodes[i].obligation.as_predicate().clone()); + node_rewrites[i] = nodes_len; + dead_nodes += 1; + } + NodeState::Error => { + // We *intentionally* remove the node from the cache at this point. Otherwise + // tests must come up with a different type on every type error they + // check against. + self.waiting_cache.remove(self.nodes[i].obligation.as_predicate()); + node_rewrites[i] = nodes_len; + dead_nodes += 1; + } + NodeState::OnDfsStack | NodeState::Success => unreachable!() } } - // Now go through and move all nodes that are either - // successful or which have an error over into to the end of - // the list, preserving the relative order of the survivors - // (which is important for the `inherit_error` logic). - let mut dead = 0; - for i in 0..nodes_len { - if self.nodes[i].is_popped() { - dead += 1; - } else if dead > 0 { - self.nodes.swap(i, i - dead); - rewrites[i] -= dead; - } + // No compression needed. + if dead_nodes == 0 { + node_rewrites.truncate(0); + self.scratch = Some(node_rewrites); + return vec![]; } // Pop off all the nodes we killed and extract the success // stories. - let successful = - (0 .. dead).map(|_| self.nodes.pop().unwrap()) - .flat_map(|node| match node.state { - NodeState::Error => None, - NodeState::Pending { .. } => unreachable!(), - NodeState::Success { obligation, num_incomplete_children } => { - assert_eq!(num_incomplete_children, 0); - Some(obligation) - } - }) - .collect(); - - // Adjust the parent indices, since we compressed things. - for node in &mut self.nodes { - if let Some(ref mut index) = node.parent { - let new_index = rewrites[index.get()]; - debug_assert!(new_index < (nodes_len - dead)); - *index = NodeIndex::new(new_index); - } - - node.root = NodeIndex::new(rewrites[node.root.get()]); - } + let successful = (0..dead_nodes) + .map(|_| self.nodes.pop().unwrap()) + .flat_map(|node| { + match node.state.get() { + NodeState::Error => None, + NodeState::Done => Some(node.obligation), + _ => unreachable!() + } + }) + .collect(); + self.apply_rewrites(&node_rewrites); + + node_rewrites.truncate(0); + self.scratch = Some(node_rewrites); successful } -} -impl Node { - fn new(root: NodeIndex, parent: Option, obligation: O) -> Node { - Node { - parent: parent, - state: NodeState::Pending { obligation: obligation }, - root: root - } - } + fn apply_rewrites(&mut self, node_rewrites: &[usize]) { + let nodes_len = node_rewrites.len(); + + for node in &mut self.nodes { + if let Some(index) = node.parent { + let new_index = node_rewrites[index.get()]; + if new_index >= nodes_len { + // parent dead due to error + node.parent = None; + } else { + node.parent = Some(NodeIndex::new(new_index)); + } + } - fn is_popped(&self) -> bool { - match self.state { - NodeState::Pending { .. } => false, - NodeState::Success { num_incomplete_children, .. } => num_incomplete_children == 0, - NodeState::Error => true, + let mut i = 0; + while i < node.dependents.len() { + let new_index = node_rewrites[node.dependents[i].get()]; + if new_index >= nodes_len { + node.dependents.swap_remove(i); + } else { + node.dependents[i] = NodeIndex::new(new_index); + i += 1; + } + } } - } -} -#[derive(Clone)] -pub struct Backtrace<'b, O: 'b> { - nodes: &'b [Node], - pointer: Option, -} + let mut kill_list = vec![]; + for (predicate, index) in self.waiting_cache.iter_mut() { + let new_index = node_rewrites[index.get()]; + if new_index >= nodes_len { + kill_list.push(predicate.clone()); + } else { + *index = NodeIndex::new(new_index); + } + } -impl<'b, O> Backtrace<'b, O> { - fn new(nodes: &'b [Node], pointer: Option) -> Backtrace<'b, O> { - Backtrace { nodes: nodes, pointer: pointer } + for predicate in kill_list { self.waiting_cache.remove(&predicate); } } } -impl<'b, O> Iterator for Backtrace<'b, O> { - type Item = &'b O; - - fn next(&mut self) -> Option<&'b O> { - debug!("Backtrace: self.pointer = {:?}", self.pointer); - if let Some(p) = self.pointer { - self.pointer = self.nodes[p.get()].parent; - match self.nodes[p.get()].state { - NodeState::Pending { ref obligation } | - NodeState::Success { ref obligation, .. } => { - Some(obligation) - } - NodeState::Error => { - panic!("Backtrace encountered an error."); - } - } - } else { - None +impl Node { + fn new(parent: Option, obligation: O) -> Node { + Node { + obligation: obligation, + parent: parent, + state: Cell::new(NodeState::Pending), + dependents: vec![], } } } diff --git a/src/librustc_data_structures/obligation_forest/node_index.rs b/src/librustc_data_structures/obligation_forest/node_index.rs index 465cee0b60cc0..1063bb3611ef3 100644 --- a/src/librustc_data_structures/obligation_forest/node_index.rs +++ b/src/librustc_data_structures/obligation_forest/node_index.rs @@ -13,19 +13,16 @@ use std::u32; #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct NodeIndex { - index: NonZero + index: NonZero, } impl NodeIndex { pub fn new(value: usize) -> NodeIndex { assert!(value < (u32::MAX as usize)); - unsafe { - NodeIndex { index: NonZero::new((value as u32) + 1) } - } + unsafe { NodeIndex { index: NonZero::new((value as u32) + 1) } } } pub fn get(self) -> usize { (*self.index - 1) as usize } } - diff --git a/src/librustc_data_structures/obligation_forest/test.rs b/src/librustc_data_structures/obligation_forest/test.rs index 519b282a6a8c7..a95b2b84b34c8 100644 --- a/src/librustc_data_structures/obligation_forest/test.rs +++ b/src/librustc_data_structures/obligation_forest/test.rs @@ -8,30 +8,88 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::{ObligationForest, Outcome, Error}; +#![cfg(test)] + +use super::{ObligationForest, ObligationProcessor, Outcome, Error}; + +use std::fmt; +use std::marker::PhantomData; + +impl<'a> super::ForestObligation for &'a str { + type Predicate = &'a str; + + fn as_predicate(&self) -> &Self::Predicate { + self + } +} + +struct ClosureObligationProcessor { + process_obligation: OF, + _process_backedge: BF, + marker: PhantomData<(O, E)>, +} + +#[allow(non_snake_case)] +fn C(of: OF, bf: BF) -> ClosureObligationProcessor + where OF: FnMut(&mut O) -> Result>, &'static str>, + BF: FnMut(&[O]) +{ + ClosureObligationProcessor { + process_obligation: of, + _process_backedge: bf, + marker: PhantomData + } +} + +impl ObligationProcessor for ClosureObligationProcessor + where O: super::ForestObligation + fmt::Debug, + E: fmt::Debug, + OF: FnMut(&mut O) -> Result>, E>, + BF: FnMut(&[O]) +{ + type Obligation = O; + type Error = E; + + fn process_obligation(&mut self, + obligation: &mut Self::Obligation) + -> Result>, Self::Error> + { + (self.process_obligation)(obligation) + } + + fn process_backedge<'c, I>(&mut self, _cycle: I, + _marker: PhantomData<&'c Self::Obligation>) + where I: Clone + Iterator { + } +} + #[test] fn push_pop() { let mut forest = ObligationForest::new(); - forest.push_root("A"); - forest.push_root("B"); - forest.push_root("C"); + forest.register_obligation("A"); + forest.register_obligation("B"); + forest.register_obligation("C"); // first round, B errors out, A has subtasks, and C completes, creating this: // A |-> A.1 // |-> A.2 // |-> A.3 - let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(|obligation, _| { - match *obligation { - "A" => Ok(Some(vec!["A.1", "A.2", "A.3"])), - "B" => Err("B is for broken"), - "C" => Ok(Some(vec![])), - _ => unreachable!(), - } - }); + let Outcome { completed: ok, errors: err, .. } = + forest.process_obligations(&mut C(|obligation| { + match *obligation { + "A" => Ok(Some(vec!["A.1", "A.2", "A.3"])), + "B" => Err("B is for broken"), + "C" => Ok(Some(vec![])), + _ => unreachable!(), + } + }, |_| {})); assert_eq!(ok, vec!["C"]); - assert_eq!(err, vec![Error {error: "B is for broken", - backtrace: vec!["B"]}]); + assert_eq!(err, + vec![Error { + error: "B is for broken", + backtrace: vec!["B"], + }]); // second round: two delays, one success, creating an uneven set of subtasks: // A |-> A.1 @@ -39,9 +97,9 @@ fn push_pop() { // |-> A.3 |-> A.3.i // D |-> D.1 // |-> D.2 - forest.push_root("D"); - let Outcome { completed: ok, errors: err, .. }: Outcome<&'static str, ()> = - forest.process_obligations(|obligation, _| { + forest.register_obligation("D"); + let Outcome { completed: ok, errors: err, .. } = + forest.process_obligations(&mut C(|obligation| { match *obligation { "A.1" => Ok(None), "A.2" => Ok(None), @@ -49,38 +107,48 @@ fn push_pop() { "D" => Ok(Some(vec!["D.1", "D.2"])), _ => unreachable!(), } - }); + }, |_| {})); assert_eq!(ok, Vec::<&'static str>::new()); assert_eq!(err, Vec::new()); // third round: ok in A.1 but trigger an error in A.2. Check that it - // propagates to A.3.i, but not D.1 or D.2. + // propagates to A, but not D.1 or D.2. // D |-> D.1 |-> D.1.i // |-> D.2 |-> D.2.i - let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(|obligation, _| { - match *obligation { - "A.1" => Ok(Some(vec![])), - "A.2" => Err("A is for apple"), - "D.1" => Ok(Some(vec!["D.1.i"])), - "D.2" => Ok(Some(vec!["D.2.i"])), - _ => unreachable!(), - } - }); - assert_eq!(ok, vec!["A.1"]); - assert_eq!(err, vec![Error { error: "A is for apple", - backtrace: vec!["A.2", "A"] }]); - - // fourth round: error in D.1.i that should propagate to D.2.i - let Outcome { completed: ok, errors: err, .. } = forest.process_obligations(|obligation, _| { - match *obligation { - "D.1.i" => Err("D is for dumb"), - _ => panic!("unexpected obligation {:?}", obligation), - } - }); - assert_eq!(ok, Vec::<&'static str>::new()); - assert_eq!(err, vec![Error { error: "D is for dumb", - backtrace: vec!["D.1.i", "D.1", "D"] }]); + let Outcome { completed: ok, errors: err, .. } = + forest.process_obligations(&mut C(|obligation| { + match *obligation { + "A.1" => Ok(Some(vec![])), + "A.2" => Err("A is for apple"), + "A.3.i" => Ok(Some(vec![])), + "D.1" => Ok(Some(vec!["D.1.i"])), + "D.2" => Ok(Some(vec!["D.2.i"])), + _ => unreachable!(), + } + }, |_| {})); + assert_eq!(ok, vec!["A.3", "A.1", "A.3.i"]); + assert_eq!(err, + vec![Error { + error: "A is for apple", + backtrace: vec!["A.2", "A"], + }]); + + // fourth round: error in D.1.i + let Outcome { completed: ok, errors: err, .. } = + forest.process_obligations(&mut C(|obligation| { + match *obligation { + "D.1.i" => Err("D is for dumb"), + "D.2.i" => Ok(Some(vec![])), + _ => panic!("unexpected obligation {:?}", obligation), + } + }, |_| {})); + assert_eq!(ok, vec!["D.2.i", "D.2"]); + assert_eq!(err, + vec![Error { + error: "D is for dumb", + backtrace: vec!["D.1.i", "D.1", "D"], + }]); } // Test that if a tree with grandchildren succeeds, everything is @@ -94,53 +162,54 @@ fn push_pop() { #[test] fn success_in_grandchildren() { let mut forest = ObligationForest::new(); - forest.push_root("A"); + forest.register_obligation("A"); let Outcome { completed: ok, errors: err, .. } = - forest.process_obligations::<(),_>(|obligation, _| { + forest.process_obligations(&mut C(|obligation| { match *obligation { "A" => Ok(Some(vec!["A.1", "A.2", "A.3"])), _ => unreachable!(), } - }); + }, |_| {})); assert!(ok.is_empty()); assert!(err.is_empty()); let Outcome { completed: ok, errors: err, .. } = - forest.process_obligations::<(),_>(|obligation, _| { + forest.process_obligations(&mut C(|obligation| { match *obligation { "A.1" => Ok(Some(vec![])), "A.2" => Ok(Some(vec!["A.2.i", "A.2.ii"])), "A.3" => Ok(Some(vec![])), _ => unreachable!(), } - }); + }, |_| {})); assert_eq!(ok, vec!["A.3", "A.1"]); assert!(err.is_empty()); let Outcome { completed: ok, errors: err, .. } = - forest.process_obligations::<(),_>(|obligation, _| { + forest.process_obligations(&mut C(|obligation| { match *obligation { "A.2.i" => Ok(Some(vec!["A.2.i.a"])), "A.2.ii" => Ok(Some(vec![])), _ => unreachable!(), } - }); + }, |_| {})); assert_eq!(ok, vec!["A.2.ii"]); assert!(err.is_empty()); let Outcome { completed: ok, errors: err, .. } = - forest.process_obligations::<(),_>(|obligation, _| { + forest.process_obligations(&mut C(|obligation| { match *obligation { "A.2.i.a" => Ok(Some(vec![])), _ => unreachable!(), } - }); + }, |_| {})); assert_eq!(ok, vec!["A.2.i.a", "A.2.i", "A.2", "A"]); assert!(err.is_empty()); let Outcome { completed: ok, errors: err, .. } = - forest.process_obligations::<(),_>(|_, _| unreachable!()); + forest.process_obligations(&mut C(|_| unreachable!(), |_| {})); + assert!(ok.is_empty()); assert!(err.is_empty()); } @@ -148,59 +217,244 @@ fn success_in_grandchildren() { #[test] fn to_errors_no_throw() { // check that converting multiple children with common parent (A) - // only yields one of them (and does not panic, in particular). + // yields to correct errors (and does not panic, in particular). let mut forest = ObligationForest::new(); - forest.push_root("A"); + forest.register_obligation("A"); let Outcome { completed: ok, errors: err, .. } = - forest.process_obligations::<(),_>(|obligation, _| { + forest.process_obligations(&mut C(|obligation| { match *obligation { "A" => Ok(Some(vec!["A.1", "A.2", "A.3"])), _ => unreachable!(), } - }); + }, |_|{})); assert_eq!(ok.len(), 0); assert_eq!(err.len(), 0); let errors = forest.to_errors(()); - assert_eq!(errors.len(), 1); + assert_eq!(errors[0].backtrace, vec!["A.1", "A"]); + assert_eq!(errors[1].backtrace, vec!["A.2", "A"]); + assert_eq!(errors[2].backtrace, vec!["A.3", "A"]); + assert_eq!(errors.len(), 3); } #[test] -fn backtrace() { - // check that converting multiple children with common parent (A) - // only yields one of them (and does not panic, in particular). - let mut forest: ObligationForest<&'static str> = ObligationForest::new(); - forest.push_root("A"); +fn diamond() { + // check that diamond dependencies are handled correctly + let mut forest = ObligationForest::new(); + forest.register_obligation("A"); let Outcome { completed: ok, errors: err, .. } = - forest.process_obligations::<(),_>(|obligation, mut backtrace| { - assert!(backtrace.next().is_none()); + forest.process_obligations(&mut C(|obligation| { match *obligation { - "A" => Ok(Some(vec!["A.1"])), + "A" => Ok(Some(vec!["A.1", "A.2"])), _ => unreachable!(), } - }); - assert!(ok.is_empty()); - assert!(err.is_empty()); + }, |_|{})); + assert_eq!(ok.len(), 0); + assert_eq!(err.len(), 0); + let Outcome { completed: ok, errors: err, .. } = - forest.process_obligations::<(),_>(|obligation, mut backtrace| { - assert!(backtrace.next().unwrap() == &"A"); - assert!(backtrace.next().is_none()); + forest.process_obligations(&mut C(|obligation| { match *obligation { - "A.1" => Ok(Some(vec!["A.1.i"])), + "A.1" => Ok(Some(vec!["D"])), + "A.2" => Ok(Some(vec!["D"])), _ => unreachable!(), } - }); - assert!(ok.is_empty()); - assert!(err.is_empty()); + }, |_|{})); + assert_eq!(ok.len(), 0); + assert_eq!(err.len(), 0); + + let mut d_count = 0; let Outcome { completed: ok, errors: err, .. } = - forest.process_obligations::<(),_>(|obligation, mut backtrace| { - assert!(backtrace.next().unwrap() == &"A.1"); - assert!(backtrace.next().unwrap() == &"A"); - assert!(backtrace.next().is_none()); + forest.process_obligations(&mut C(|obligation| { match *obligation { - "A.1.i" => Ok(None), + "D" => { d_count += 1; Ok(Some(vec![])) }, _ => unreachable!(), } - }); + }, |_|{})); + assert_eq!(d_count, 1); + assert_eq!(ok, vec!["D", "A.2", "A.1", "A"]); + assert_eq!(err.len(), 0); + + let errors = forest.to_errors(()); + assert_eq!(errors.len(), 0); + + forest.register_obligation("A'"); + let Outcome { completed: ok, errors: err, .. } = + forest.process_obligations(&mut C(|obligation| { + match *obligation { + "A'" => Ok(Some(vec!["A'.1", "A'.2"])), + _ => unreachable!(), + } + }, |_|{})); assert_eq!(ok.len(), 0); - assert!(err.is_empty()); + assert_eq!(err.len(), 0); + + let Outcome { completed: ok, errors: err, .. } = + forest.process_obligations(&mut C(|obligation| { + match *obligation { + "A'.1" => Ok(Some(vec!["D'", "A'"])), + "A'.2" => Ok(Some(vec!["D'"])), + _ => unreachable!(), + } + }, |_|{})); + assert_eq!(ok.len(), 0); + assert_eq!(err.len(), 0); + + let mut d_count = 0; + let Outcome { completed: ok, errors: err, .. } = + forest.process_obligations(&mut C(|obligation| { + match *obligation { + "D'" => { d_count += 1; Err("operation failed") }, + _ => unreachable!(), + } + }, |_|{})); + assert_eq!(d_count, 1); + assert_eq!(ok.len(), 0); + assert_eq!(err, vec![super::Error { + error: "operation failed", + backtrace: vec!["D'", "A'.1", "A'"] + }]); + + let errors = forest.to_errors(()); + assert_eq!(errors.len(), 0); +} + +#[test] +fn done_dependency() { + // check that the local cache works + let mut forest = ObligationForest::new(); + forest.register_obligation("A: Sized"); + forest.register_obligation("B: Sized"); + forest.register_obligation("C: Sized"); + + let Outcome { completed: ok, errors: err, .. } = + forest.process_obligations(&mut C(|obligation| { + match *obligation { + "A: Sized" | "B: Sized" | "C: Sized" => Ok(Some(vec![])), + _ => unreachable!(), + } + }, |_|{})); + assert_eq!(ok, vec!["C: Sized", "B: Sized", "A: Sized"]); + assert_eq!(err.len(), 0); + + forest.register_obligation("(A,B,C): Sized"); + let Outcome { completed: ok, errors: err, .. } = + forest.process_obligations(&mut C(|obligation| { + match *obligation { + "(A,B,C): Sized" => Ok(Some(vec![ + "A: Sized", + "B: Sized", + "C: Sized" + ])), + _ => unreachable!(), + } + }, |_|{})); + assert_eq!(ok, vec!["(A,B,C): Sized"]); + assert_eq!(err.len(), 0); + + +} + + +#[test] +fn orphan() { + // check that orphaned nodes are handled correctly + let mut forest = ObligationForest::new(); + forest.register_obligation("A"); + forest.register_obligation("B"); + forest.register_obligation("C1"); + forest.register_obligation("C2"); + + let Outcome { completed: ok, errors: err, .. } = + forest.process_obligations(&mut C(|obligation| { + match *obligation { + "A" => Ok(Some(vec!["D", "E"])), + "B" => Ok(None), + "C1" => Ok(Some(vec![])), + "C2" => Ok(Some(vec![])), + _ => unreachable!(), + } + }, |_|{})); + assert_eq!(ok, vec!["C2", "C1"]); + assert_eq!(err.len(), 0); + + let Outcome { completed: ok, errors: err, .. } = + forest.process_obligations(&mut C(|obligation| { + match *obligation { + "D" | "E" => Ok(None), + "B" => Ok(Some(vec!["D"])), + _ => unreachable!(), + } + }, |_|{})); + assert_eq!(ok.len(), 0); + assert_eq!(err.len(), 0); + + let Outcome { completed: ok, errors: err, .. } = + forest.process_obligations(&mut C(|obligation| { + match *obligation { + "D" => Ok(None), + "E" => Err("E is for error"), + _ => unreachable!(), + } + }, |_|{})); + assert_eq!(ok.len(), 0); + assert_eq!(err, vec![super::Error { + error: "E is for error", + backtrace: vec!["E", "A"] + }]); + + let Outcome { completed: ok, errors: err, .. } = + forest.process_obligations(&mut C(|obligation| { + match *obligation { + "D" => Err("D is dead"), + _ => unreachable!(), + } + }, |_|{})); + assert_eq!(ok.len(), 0); + assert_eq!(err, vec![super::Error { + error: "D is dead", + backtrace: vec!["D"] + }]); + + let errors = forest.to_errors(()); + assert_eq!(errors.len(), 0); +} + +#[test] +fn simultaneous_register_and_error() { + // check that registering a failed obligation works correctly + let mut forest = ObligationForest::new(); + forest.register_obligation("A"); + forest.register_obligation("B"); + + let Outcome { completed: ok, errors: err, .. } = + forest.process_obligations(&mut C(|obligation| { + match *obligation { + "A" => Err("An error"), + "B" => Ok(Some(vec!["A"])), + _ => unreachable!(), + } + }, |_|{})); + assert_eq!(ok.len(), 0); + assert_eq!(err, vec![super::Error { + error: "An error", + backtrace: vec!["A"] + }]); + + let mut forest = ObligationForest::new(); + forest.register_obligation("B"); + forest.register_obligation("A"); + + let Outcome { completed: ok, errors: err, .. } = + forest.process_obligations(&mut C(|obligation| { + match *obligation { + "A" => Err("An error"), + "B" => Ok(Some(vec!["A"])), + _ => unreachable!(), + } + }, |_|{})); + assert_eq!(ok.len(), 0); + assert_eq!(err, vec![super::Error { + error: "An error", + backtrace: vec!["A"] + }]); } diff --git a/src/librustc_data_structures/small_vec.rs b/src/librustc_data_structures/small_vec.rs new file mode 100644 index 0000000000000..4e2b378602102 --- /dev/null +++ b/src/librustc_data_structures/small_vec.rs @@ -0,0 +1,215 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A vector type intended to be used for collecting from iterators onto the stack. +//! +//! Space for up to N elements is provided on the stack. If more elements are collected, Vec is +//! used to store the values on the heap. SmallVec is similar to AccumulateVec, but adds +//! the ability to push elements. +//! +//! The N above is determined by Array's implementor, by way of an associatated constant. + +use std::ops::{Deref, DerefMut}; +use std::iter::{IntoIterator, FromIterator}; +use std::fmt::{self, Debug}; +use std::mem; +use std::ptr; + +use rustc_serialize::{Encodable, Encoder, Decodable, Decoder}; + +use accumulate_vec::{IntoIter, AccumulateVec}; +use array_vec::Array; + +pub struct SmallVec(AccumulateVec); + +impl Clone for SmallVec + where A: Array, + A::Element: Clone { + fn clone(&self) -> Self { + SmallVec(self.0.clone()) + } +} + +impl Debug for SmallVec + where A: Array + Debug, + A::Element: Debug { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("SmallVec").field(&self.0).finish() + } +} + +impl SmallVec { + pub fn new() -> Self { + SmallVec(AccumulateVec::new()) + } + + pub fn with_capacity(cap: usize) -> Self { + let mut vec = SmallVec::new(); + vec.reserve(cap); + vec + } + + pub fn one(el: A::Element) -> Self { + SmallVec(AccumulateVec::one(el)) + } + + pub fn many>(els: I) -> Self { + SmallVec(AccumulateVec::many(els)) + } + + pub fn expect_one(self, err: &'static str) -> A::Element { + assert!(self.len() == 1, err); + match self.0 { + AccumulateVec::Array(arr) => arr.into_iter().next().unwrap(), + AccumulateVec::Heap(vec) => vec.into_iter().next().unwrap(), + } + } + + /// Will reallocate onto the heap if needed. + pub fn push(&mut self, el: A::Element) { + self.reserve(1); + match self.0 { + AccumulateVec::Array(ref mut array) => array.push(el), + AccumulateVec::Heap(ref mut vec) => vec.push(el), + } + } + + pub fn reserve(&mut self, n: usize) { + match self.0 { + AccumulateVec::Array(_) => { + if self.len() + n > A::LEN { + let len = self.len(); + let array = mem::replace(&mut self.0, + AccumulateVec::Heap(Vec::with_capacity(len + n))); + if let AccumulateVec::Array(array) = array { + match self.0 { + AccumulateVec::Heap(ref mut vec) => vec.extend(array), + _ => unreachable!() + } + } + } + } + AccumulateVec::Heap(ref mut vec) => vec.reserve(n) + } + } + + pub unsafe fn set_len(&mut self, len: usize) { + match self.0 { + AccumulateVec::Array(ref mut arr) => arr.set_len(len), + AccumulateVec::Heap(ref mut vec) => vec.set_len(len), + } + } + + pub fn insert(&mut self, index: usize, element: A::Element) { + let len = self.len(); + + // Reserve space for shifting elements to the right + self.reserve(1); + + assert!(index <= len); + + unsafe { + // infallible + // The spot to put the new value + { + let p = self.as_mut_ptr().offset(index as isize); + // Shift everything over to make space. (Duplicating the + // `index`th element into two consecutive places.) + ptr::copy(p, p.offset(1), len - index); + // Write it in, overwriting the first copy of the `index`th + // element. + ptr::write(p, element); + } + self.set_len(len + 1); + } + } + + pub fn truncate(&mut self, len: usize) { + unsafe { + while len < self.len() { + // Decrement len before the drop_in_place(), so a panic on Drop + // doesn't re-drop the just-failed value. + let newlen = self.len() - 1; + self.set_len(newlen); + ::std::ptr::drop_in_place(self.get_unchecked_mut(newlen)); + } + } + } +} + +impl Deref for SmallVec { + type Target = AccumulateVec; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for SmallVec { + fn deref_mut(&mut self) -> &mut AccumulateVec { + &mut self.0 + } +} + +impl FromIterator for SmallVec { + fn from_iter(iter: I) -> Self where I: IntoIterator { + SmallVec(iter.into_iter().collect()) + } +} + +impl Extend for SmallVec { + fn extend>(&mut self, iter: I) { + let iter = iter.into_iter(); + self.reserve(iter.size_hint().0); + for el in iter { + self.push(el); + } + } +} + +impl IntoIterator for SmallVec { + type Item = A::Element; + type IntoIter = IntoIter; + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl Default for SmallVec { + fn default() -> SmallVec { + SmallVec::new() + } +} + +impl Encodable for SmallVec + where A: Array, + A::Element: Encodable { + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + s.emit_seq(self.len(), |s| { + for (i, e) in self.iter().enumerate() { + try!(s.emit_seq_elt(i, |s| e.encode(s))); + } + Ok(()) + }) + } +} + +impl Decodable for SmallVec + where A: Array, + A::Element: Decodable { + fn decode(d: &mut D) -> Result, D::Error> { + d.read_seq(|d, len| { + let mut vec = SmallVec::with_capacity(len); + for i in 0..len { + vec.push(try!(d.read_seq_elt(i, |d| Decodable::decode(d)))); + } + Ok(vec) + }) + } +} diff --git a/src/librustc_data_structures/snapshot_map/mod.rs b/src/librustc_data_structures/snapshot_map/mod.rs new file mode 100644 index 0000000000000..cd7143ad3ce84 --- /dev/null +++ b/src/librustc_data_structures/snapshot_map/mod.rs @@ -0,0 +1,170 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use fx::FxHashMap; +use std::hash::Hash; +use std::ops; +use std::mem; + +#[cfg(test)] +mod test; + +pub struct SnapshotMap + where K: Hash + Clone + Eq +{ + map: FxHashMap, + undo_log: Vec>, +} + +pub struct Snapshot { + len: usize, +} + +enum UndoLog { + OpenSnapshot, + CommittedSnapshot, + Inserted(K), + Overwrite(K, V), + Noop, +} + +impl SnapshotMap + where K: Hash + Clone + Eq +{ + pub fn new() -> Self { + SnapshotMap { + map: FxHashMap(), + undo_log: vec![], + } + } + + pub fn insert(&mut self, key: K, value: V) -> bool { + match self.map.insert(key.clone(), value) { + None => { + if !self.undo_log.is_empty() { + self.undo_log.push(UndoLog::Inserted(key)); + } + true + } + Some(old_value) => { + if !self.undo_log.is_empty() { + self.undo_log.push(UndoLog::Overwrite(key, old_value)); + } + false + } + } + } + + pub fn remove(&mut self, key: K) -> bool { + match self.map.remove(&key) { + Some(old_value) => { + if !self.undo_log.is_empty() { + self.undo_log.push(UndoLog::Overwrite(key, old_value)); + } + true + } + None => false, + } + } + + pub fn get(&self, key: &K) -> Option<&V> { + self.map.get(key) + } + + pub fn snapshot(&mut self) -> Snapshot { + self.undo_log.push(UndoLog::OpenSnapshot); + let len = self.undo_log.len() - 1; + Snapshot { len: len } + } + + fn assert_open_snapshot(&self, snapshot: &Snapshot) { + assert!(snapshot.len < self.undo_log.len()); + assert!(match self.undo_log[snapshot.len] { + UndoLog::OpenSnapshot => true, + _ => false, + }); + } + + pub fn commit(&mut self, snapshot: Snapshot) { + self.assert_open_snapshot(&snapshot); + if snapshot.len == 0 { + // The root snapshot. + self.undo_log.truncate(0); + } else { + self.undo_log[snapshot.len] = UndoLog::CommittedSnapshot; + } + } + + pub fn partial_rollback(&mut self, + snapshot: &Snapshot, + should_revert_key: &F) + where F: Fn(&K) -> bool + { + self.assert_open_snapshot(snapshot); + for i in (snapshot.len + 1..self.undo_log.len()).rev() { + let reverse = match self.undo_log[i] { + UndoLog::OpenSnapshot => false, + UndoLog::CommittedSnapshot => false, + UndoLog::Noop => false, + UndoLog::Inserted(ref k) => should_revert_key(k), + UndoLog::Overwrite(ref k, _) => should_revert_key(k), + }; + + if reverse { + let entry = mem::replace(&mut self.undo_log[i], UndoLog::Noop); + self.reverse(entry); + } + } + } + + pub fn rollback_to(&mut self, snapshot: Snapshot) { + self.assert_open_snapshot(&snapshot); + while self.undo_log.len() > snapshot.len + 1 { + let entry = self.undo_log.pop().unwrap(); + self.reverse(entry); + } + + let v = self.undo_log.pop().unwrap(); + assert!(match v { + UndoLog::OpenSnapshot => true, + _ => false, + }); + assert!(self.undo_log.len() == snapshot.len); + } + + fn reverse(&mut self, entry: UndoLog) { + match entry { + UndoLog::OpenSnapshot => { + panic!("cannot rollback an uncommitted snapshot"); + } + + UndoLog::CommittedSnapshot => {} + + UndoLog::Inserted(key) => { + self.map.remove(&key); + } + + UndoLog::Overwrite(key, old_value) => { + self.map.insert(key, old_value); + } + + UndoLog::Noop => {} + } + } +} + +impl<'k, K, V> ops::Index<&'k K> for SnapshotMap + where K: Hash + Clone + Eq +{ + type Output = V; + fn index(&self, key: &'k K) -> &V { + &self.map[key] + } +} diff --git a/src/librustc_data_structures/snapshot_map/test.rs b/src/librustc_data_structures/snapshot_map/test.rs new file mode 100644 index 0000000000000..4114082839b0b --- /dev/null +++ b/src/librustc_data_structures/snapshot_map/test.rs @@ -0,0 +1,50 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::SnapshotMap; + +#[test] +fn basic() { + let mut map = SnapshotMap::new(); + map.insert(22, "twenty-two"); + let snapshot = map.snapshot(); + map.insert(22, "thirty-three"); + assert_eq!(map[&22], "thirty-three"); + map.insert(44, "fourty-four"); + assert_eq!(map[&44], "fourty-four"); + assert_eq!(map.get(&33), None); + map.rollback_to(snapshot); + assert_eq!(map[&22], "twenty-two"); + assert_eq!(map.get(&33), None); + assert_eq!(map.get(&44), None); +} + +#[test] +#[should_panic] +fn out_of_order() { + let mut map = SnapshotMap::new(); + map.insert(22, "twenty-two"); + let snapshot1 = map.snapshot(); + let _snapshot2 = map.snapshot(); + map.rollback_to(snapshot1); +} + +#[test] +fn nested_commit_then_rollback() { + let mut map = SnapshotMap::new(); + map.insert(22, "twenty-two"); + let snapshot1 = map.snapshot(); + let snapshot2 = map.snapshot(); + map.insert(22, "thirty-three"); + map.commit(snapshot2); + assert_eq!(map[&22], "thirty-three"); + map.rollback_to(snapshot1); + assert_eq!(map[&22], "twenty-two"); +} diff --git a/src/librustc_data_structures/snapshot_vec.rs b/src/librustc_data_structures/snapshot_vec.rs index 5ab740f3629aa..dac074ab91e1b 100644 --- a/src/librustc_data_structures/snapshot_vec.rs +++ b/src/librustc_data_structures/snapshot_vec.rs @@ -23,7 +23,7 @@ use self::UndoLog::*; use std::mem; use std::ops; -pub enum UndoLog { +pub enum UndoLog { /// Indicates where a snapshot started. OpenSnapshot, @@ -37,10 +37,10 @@ pub enum UndoLog { SetElem(usize, D::Value), /// Extensible set of actions - Other(D::Undo) + Other(D::Undo), } -pub struct SnapshotVec { +pub struct SnapshotVec { values: Vec, undo_log: Vec>, } @@ -58,7 +58,7 @@ pub trait SnapshotVecDelegate { fn reverse(values: &mut Vec, action: Self::Undo); } -impl SnapshotVec { +impl SnapshotVec { pub fn new() -> SnapshotVec { SnapshotVec { values: Vec::new(), @@ -91,14 +91,14 @@ impl SnapshotVec { len } - pub fn get<'a>(&'a self, index: usize) -> &'a D::Value { + pub fn get(&self, index: usize) -> &D::Value { &self.values[index] } /// Returns a mutable pointer into the vec; whatever changes you make here cannot be undone /// automatically, so you should be sure call `record()` with some sort of suitable undo /// action. - pub fn get_mut<'a>(&'a mut self, index: usize) -> &'a mut D::Value { + pub fn get_mut(&mut self, index: usize) -> &mut D::Value { &mut self.values[index] } @@ -117,9 +117,7 @@ impl SnapshotVec { Snapshot { length: length } } - pub fn actions_since_snapshot(&self, - snapshot: &Snapshot) - -> &[UndoLog] { + pub fn actions_since_snapshot(&self, snapshot: &Snapshot) -> &[UndoLog] { &self.undo_log[snapshot.length..] } @@ -128,11 +126,10 @@ impl SnapshotVec { assert!(self.undo_log.len() > snapshot.length); // Invariant established by start_snapshot(): - assert!( - match self.undo_log[snapshot.length] { - OpenSnapshot => true, - _ => false - }); + assert!(match self.undo_log[snapshot.length] { + OpenSnapshot => true, + _ => false, + }); } pub fn rollback_to(&mut self, snapshot: Snapshot) { @@ -168,7 +165,10 @@ impl SnapshotVec { } let v = self.undo_log.pop().unwrap(); - assert!(match v { OpenSnapshot => true, _ => false }); + assert!(match v { + OpenSnapshot => true, + _ => false, + }); assert!(self.undo_log.len() == snapshot.length); } @@ -188,20 +188,36 @@ impl SnapshotVec { } } -impl ops::Deref for SnapshotVec { +impl ops::Deref for SnapshotVec { type Target = [D::Value]; - fn deref(&self) -> &[D::Value] { &*self.values } + fn deref(&self) -> &[D::Value] { + &*self.values + } } -impl ops::DerefMut for SnapshotVec { - fn deref_mut(&mut self) -> &mut [D::Value] { &mut *self.values } +impl ops::DerefMut for SnapshotVec { + fn deref_mut(&mut self) -> &mut [D::Value] { + &mut *self.values + } } -impl ops::Index for SnapshotVec { +impl ops::Index for SnapshotVec { type Output = D::Value; - fn index(&self, index: usize) -> &D::Value { self.get(index) } + fn index(&self, index: usize) -> &D::Value { + self.get(index) + } +} + +impl ops::IndexMut for SnapshotVec { + fn index_mut(&mut self, index: usize) -> &mut D::Value { + self.get_mut(index) + } } -impl ops::IndexMut for SnapshotVec { - fn index_mut(&mut self, index: usize) -> &mut D::Value { self.get_mut(index) } +impl Extend for SnapshotVec { + fn extend(&mut self, iterable: T) where T: IntoIterator { + for item in iterable { + self.push(item); + } + } } diff --git a/src/librustc_data_structures/transitive_relation.rs b/src/librustc_data_structures/transitive_relation.rs index 7ea5cb8721d59..e09e260afc8d9 100644 --- a/src/librustc_data_structures/transitive_relation.rs +++ b/src/librustc_data_structures/transitive_relation.rs @@ -14,7 +14,7 @@ use std::fmt::Debug; use std::mem; #[derive(Clone)] -pub struct TransitiveRelation { +pub struct TransitiveRelation { // List of elements. This is used to map from a T to a usize. We // expect domain to be small so just use a linear list versus a // hashmap or something. @@ -33,7 +33,7 @@ pub struct TransitiveRelation { // are added with new elements. Perhaps better would be to ask the // user for a batch of edges to minimize this effect, but I // already wrote the code this way. :P -nmatsakis - closure: RefCell> + closure: RefCell>, } #[derive(Clone, PartialEq, PartialOrd)] @@ -45,11 +45,13 @@ struct Edge { target: Index, } -impl TransitiveRelation { +impl TransitiveRelation { pub fn new() -> TransitiveRelation { - TransitiveRelation { elements: vec![], - edges: vec![], - closure: RefCell::new(None) } + TransitiveRelation { + elements: vec![], + edges: vec![], + closure: RefCell::new(None), + } } fn index(&self, a: &T) -> Option { @@ -74,7 +76,10 @@ impl TransitiveRelation { pub fn add(&mut self, a: T, b: T) { let a = self.add_index(a); let b = self.add_index(b); - let edge = Edge { source: a, target: b }; + let edge = Edge { + source: a, + target: b, + }; if !self.edges.contains(&edge) { self.edges.push(edge); @@ -86,10 +91,8 @@ impl TransitiveRelation { /// Check whether `a < target` (transitively) pub fn contains(&self, a: &T, b: &T) -> bool { match (self.index(a), self.index(b)) { - (Some(a), Some(b)) => - self.with_closure(|closure| closure.contains(a.0, b.0)), - (None, _) | (_, None) => - false, + (Some(a), Some(b)) => self.with_closure(|closure| closure.contains(a.0, b.0)), + (None, _) | (_, None) => false, } } @@ -156,7 +159,9 @@ impl TransitiveRelation { pub fn minimal_upper_bounds(&self, a: &T, b: &T) -> Vec<&T> { let (mut a, mut b) = match (self.index(a), self.index(b)) { (Some(a), Some(b)) => (a, b), - (None, _) | (_, None) => { return vec![]; } + (None, _) | (_, None) => { + return vec![]; + } }; // in some cases, there are some arbitrary choices to be made; @@ -233,7 +238,7 @@ impl TransitiveRelation { .collect() } - fn with_closure(&self, op: OP) -> R + fn with_closure(&self, op: OP) -> R where OP: FnOnce(&BitMatrix) -> R { let mut closure_cell = self.closure.borrow_mut(); @@ -247,7 +252,8 @@ impl TransitiveRelation { } fn compute_closure(&self) -> BitMatrix { - let mut matrix = BitMatrix::new(self.elements.len()); + let mut matrix = BitMatrix::new(self.elements.len(), + self.elements.len()); let mut changed = true; while changed { changed = false; @@ -431,14 +437,15 @@ fn pdub_crisscross() { // b -> b1 ---+ let mut relation = TransitiveRelation::new(); - relation.add("a", "a1"); - relation.add("a", "b1"); - relation.add("b", "a1"); - relation.add("b", "b1"); + relation.add("a", "a1"); + relation.add("a", "b1"); + relation.add("b", "a1"); + relation.add("b", "b1"); relation.add("a1", "x"); relation.add("b1", "x"); - assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"a1", &"b1"]); + assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), + vec![&"a1", &"b1"]); assert_eq!(relation.postdom_upper_bound(&"a", &"b"), Some(&"x")); } @@ -451,23 +458,25 @@ fn pdub_crisscross_more() { // b -> b1 -> b2 ---------+ let mut relation = TransitiveRelation::new(); - relation.add("a", "a1"); - relation.add("a", "b1"); - relation.add("b", "a1"); - relation.add("b", "b1"); + relation.add("a", "a1"); + relation.add("a", "b1"); + relation.add("b", "a1"); + relation.add("b", "b1"); - relation.add("a1", "a2"); - relation.add("a1", "b2"); - relation.add("b1", "a2"); - relation.add("b1", "b2"); + relation.add("a1", "a2"); + relation.add("a1", "b2"); + relation.add("b1", "a2"); + relation.add("b1", "b2"); relation.add("a2", "a3"); relation.add("a3", "x"); relation.add("b2", "x"); - assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"a1", &"b1"]); - assert_eq!(relation.minimal_upper_bounds(&"a1", &"b1"), vec![&"a2", &"b2"]); + assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), + vec![&"a1", &"b1"]); + assert_eq!(relation.minimal_upper_bounds(&"a1", &"b1"), + vec![&"a2", &"b2"]); assert_eq!(relation.postdom_upper_bound(&"a", &"b"), Some(&"x")); } @@ -479,8 +488,8 @@ fn pdub_lub() { // b -> b1 ---+ let mut relation = TransitiveRelation::new(); - relation.add("a", "a1"); - relation.add("b", "b1"); + relation.add("a", "a1"); + relation.add("b", "b1"); relation.add("a1", "x"); relation.add("b1", "x"); @@ -497,9 +506,9 @@ fn mubs_intermediate_node_on_one_side_only() { // "digraph { a -> c -> d; b -> d; }", let mut relation = TransitiveRelation::new(); - relation.add("a", "c"); - relation.add("c", "d"); - relation.add("b", "d"); + relation.add("a", "c"); + relation.add("c", "d"); + relation.add("b", "d"); assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"d"]); } @@ -516,11 +525,11 @@ fn mubs_scc_1() { // "digraph { a -> c -> d; d -> c; a -> d; b -> d; }", let mut relation = TransitiveRelation::new(); - relation.add("a", "c"); - relation.add("c", "d"); - relation.add("d", "c"); - relation.add("a", "d"); - relation.add("b", "d"); + relation.add("a", "c"); + relation.add("c", "d"); + relation.add("d", "c"); + relation.add("a", "d"); + relation.add("b", "d"); assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"c"]); } @@ -536,11 +545,11 @@ fn mubs_scc_2() { // "digraph { a -> c -> d; d -> c; b -> d; b -> c; }", let mut relation = TransitiveRelation::new(); - relation.add("a", "c"); - relation.add("c", "d"); - relation.add("d", "c"); - relation.add("b", "d"); - relation.add("b", "c"); + relation.add("a", "c"); + relation.add("c", "d"); + relation.add("d", "c"); + relation.add("b", "d"); + relation.add("b", "c"); assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"c"]); } @@ -556,12 +565,12 @@ fn mubs_scc_3() { // "digraph { a -> c -> d -> e -> c; b -> d; b -> e; }", let mut relation = TransitiveRelation::new(); - relation.add("a", "c"); - relation.add("c", "d"); - relation.add("d", "e"); - relation.add("e", "c"); - relation.add("b", "d"); - relation.add("b", "e"); + relation.add("a", "c"); + relation.add("c", "d"); + relation.add("d", "e"); + relation.add("e", "c"); + relation.add("b", "d"); + relation.add("b", "e"); assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"c"]); } @@ -578,12 +587,12 @@ fn mubs_scc_4() { // "digraph { a -> c -> d -> e -> c; a -> d; b -> e; }" let mut relation = TransitiveRelation::new(); - relation.add("a", "c"); - relation.add("c", "d"); - relation.add("d", "e"); - relation.add("e", "c"); - relation.add("a", "d"); - relation.add("b", "e"); + relation.add("a", "c"); + relation.add("c", "d"); + relation.add("d", "e"); + relation.add("e", "c"); + relation.add("a", "d"); + relation.add("b", "e"); assert_eq!(relation.minimal_upper_bounds(&"a", &"b"), vec![&"c"]); } diff --git a/src/librustc_data_structures/tuple_slice.rs b/src/librustc_data_structures/tuple_slice.rs index f157d82eda12b..b7c71dd366469 100644 --- a/src/librustc_data_structures/tuple_slice.rs +++ b/src/librustc_data_structures/tuple_slice.rs @@ -36,25 +36,35 @@ macro_rules! impl_tuple_slice { } } -impl_tuple_slice!((T,T), 2); -impl_tuple_slice!((T,T,T), 3); -impl_tuple_slice!((T,T,T,T), 4); -impl_tuple_slice!((T,T,T,T,T), 5); -impl_tuple_slice!((T,T,T,T,T,T), 6); -impl_tuple_slice!((T,T,T,T,T,T,T), 7); -impl_tuple_slice!((T,T,T,T,T,T,T,T), 8); +impl_tuple_slice!((T, T), 2); +impl_tuple_slice!((T, T, T), 3); +impl_tuple_slice!((T, T, T, T), 4); +impl_tuple_slice!((T, T, T, T, T), 5); +impl_tuple_slice!((T, T, T, T, T, T), 6); +impl_tuple_slice!((T, T, T, T, T, T, T), 7); +impl_tuple_slice!((T, T, T, T, T, T, T, T), 8); #[test] fn test_sliced_tuples() { - let t2 = (100i32, 101i32); - assert_eq!(t2.as_slice(), &[100i32, 101i32]); + let t2 = (100, 101); + assert_eq!(t2.as_slice(), &[100, 101]); - let t3 = (102i32, 103i32, 104i32); - assert_eq!(t3.as_slice(), &[102i32, 103i32, 104i32]); + let t3 = (102, 103, 104); + assert_eq!(t3.as_slice(), &[102, 103, 104]); - let t4 = (105i32, 106i32, 107i32, 108i32); - assert_eq!(t4.as_slice(), &[105i32, 106i32, 107i32, 108i32]); + let t4 = (105, 106, 107, 108); + assert_eq!(t4.as_slice(), &[105, 106, 107, 108]); + + let t5 = (109, 110, 111, 112, 113); + assert_eq!(t5.as_slice(), &[109, 110, 111, 112, 113]); + + let t6 = (114, 115, 116, 117, 118, 119); + assert_eq!(t6.as_slice(), &[114, 115, 116, 117, 118, 119]); + + let t7 = (120, 121, 122, 123, 124, 125, 126); + assert_eq!(t7.as_slice(), &[120, 121, 122, 123, 124, 125, 126]); + + let t8 = (127, 128, 129, 130, 131, 132, 133, 134); + assert_eq!(t8.as_slice(), &[127, 128, 129, 130, 131, 132, 133, 134]); - let t5 = (109i32, 110i32, 111i32, 112i32, 113i32); - assert_eq!(t5.as_slice(), &[109i32, 110i32, 111i32, 112i32, 113i32]); } diff --git a/src/librustc_data_structures/unify/mod.rs b/src/librustc_data_structures/unify/mod.rs index c6da70eef750a..e2d3a4f453749 100644 --- a/src/librustc_data_structures/unify/mod.rs +++ b/src/librustc_data_structures/unify/mod.rs @@ -27,7 +27,7 @@ mod tests; /// /// Clients are expected to provide implementations of this trait; you /// can see some examples in the `test` module. -pub trait UnifyKey : Copy + Clone + Debug + PartialEq { +pub trait UnifyKey: Copy + Clone + Debug + PartialEq { type Value: Clone + PartialEq + Debug; fn index(&self) -> u32; @@ -56,21 +56,21 @@ impl Combine for () { /// time of the algorithm under control. For more information, see /// . #[derive(PartialEq,Clone,Debug)] -pub struct VarValue { - parent: K, // if equal to self, this is a root +pub struct VarValue { + parent: K, // if equal to self, this is a root value: K::Value, // value assigned (only relevant to root) - rank: u32, // max depth (only relevant to root) + rank: u32, // max depth (only relevant to root) } /// Table of unification keys and their values. -pub struct UnificationTable { +pub struct UnificationTable { /// Indicates the current value of each key. values: sv::SnapshotVec>, } /// At any time, users may snapshot a unification table. The changes /// made during the snapshot may either be *committed* or *rolled back*. -pub struct Snapshot { +pub struct Snapshot { // Link snapshot to the key type `K` of the table. marker: marker::PhantomData, snapshot: sv::Snapshot, @@ -79,15 +79,17 @@ pub struct Snapshot { #[derive(Copy, Clone)] struct Delegate(PhantomData); -impl VarValue { +impl VarValue { fn new_var(key: K, value: K::Value) -> VarValue { VarValue::new(key, value, 0) } fn new(parent: K, value: K::Value, rank: u32) -> VarValue { - VarValue { parent: parent, // this is a root - value: value, - rank: rank } + VarValue { + parent: parent, // this is a root + value: value, + rank: rank, + } } fn redirect(self, to: K) -> VarValue { @@ -95,7 +97,11 @@ impl VarValue { } fn root(self, rank: u32, value: K::Value) -> VarValue { - VarValue { rank: rank, value: value, ..self } + VarValue { + rank: rank, + value: value, + ..self + } } /// Returns the key of this node. Only valid if this is a root @@ -109,11 +115,7 @@ impl VarValue { } fn if_not_self(&self, key: K, self_key: K) -> Option { - if key == self_key { - None - } else { - Some(key) - } + if key == self_key { None } else { Some(key) } } } @@ -122,18 +124,18 @@ impl VarValue { // other type parameter U, and we have no way to say // Option:LatticeValue. -impl UnificationTable { +impl UnificationTable { pub fn new() -> UnificationTable { - UnificationTable { - values: sv::SnapshotVec::new() - } + UnificationTable { values: sv::SnapshotVec::new() } } /// Starts a new snapshot. Each snapshot must be either /// rolled back or committed in a "LIFO" (stack) order. pub fn snapshot(&mut self) -> Snapshot { - Snapshot { marker: marker::PhantomData::, - snapshot: self.values.start_snapshot() } + Snapshot { + marker: marker::PhantomData::, + snapshot: self.values.start_snapshot(), + } } /// Reverses all changes since the last snapshot. Also @@ -154,9 +156,7 @@ impl UnificationTable { let len = self.values.len(); let key: K = UnifyKey::from_index(len as u32); self.values.push(VarValue::new_var(key, value)); - debug!("{}: created new key: {:?}", - UnifyKey::tag(None::), - key); + debug!("{}: created new key: {:?}", UnifyKey::tag(None::), key); key } @@ -179,9 +179,7 @@ impl UnificationTable { } root } - None => { - value - } + None => value, } } @@ -195,8 +193,7 @@ impl UnificationTable { fn set(&mut self, key: K, new_value: VarValue) { assert!(self.is_root(key)); - debug!("Updating variable {:?} to {:?}", - key, new_value); + debug!("Updating variable {:?} to {:?}", key, new_value); let index = key.index() as usize; self.values.set(index, new_value); @@ -210,7 +207,7 @@ impl UnificationTable { /// really more of a building block. If the values associated with /// your key are non-trivial, you would probably prefer to call /// `unify_var_var` below. - fn unify(&mut self, root_a: VarValue, root_b: VarValue, new_value: K::Value) { + fn unify(&mut self, root_a: VarValue, root_b: VarValue, new_value: K::Value) -> K { debug!("unify(root_a(id={:?}, rank={:?}), root_b(id={:?}, rank={:?}))", root_a.key(), root_a.rank, @@ -220,14 +217,14 @@ impl UnificationTable { if root_a.rank > root_b.rank { // a has greater rank, so a should become b's parent, // i.e., b should redirect to a. - self.redirect_root(root_a.rank, root_b, root_a, new_value); + self.redirect_root(root_a.rank, root_b, root_a, new_value) } else if root_a.rank < root_b.rank { // b has greater rank, so a should redirect to b. - self.redirect_root(root_b.rank, root_a, root_b, new_value); + self.redirect_root(root_b.rank, root_a, root_b, new_value) } else { // If equal, redirect one to the other and increment the // other's rank. - self.redirect_root(root_a.rank + 1, root_a, root_b, new_value); + self.redirect_root(root_a.rank + 1, root_a, root_b, new_value) } } @@ -235,35 +232,38 @@ impl UnificationTable { new_rank: u32, old_root: VarValue, new_root: VarValue, - new_value: K::Value) { + new_value: K::Value) + -> K { let old_root_key = old_root.key(); let new_root_key = new_root.key(); self.set(old_root_key, old_root.redirect(new_root_key)); self.set(new_root_key, new_root.root(new_rank, new_value)); + new_root_key } } -impl sv::SnapshotVecDelegate for Delegate { +impl sv::SnapshotVecDelegate for Delegate { type Value = VarValue; type Undo = (); fn reverse(_: &mut Vec>, _: ()) {} } -/////////////////////////////////////////////////////////////////////////// -// Base union-find algorithm, where we are just making sets +// # Base union-find algorithm, where we are just making sets -impl<'tcx,K:UnifyKey> UnificationTable +impl<'tcx, K: UnifyKey> UnificationTable where K::Value: Combine { - pub fn union(&mut self, a_id: K, b_id: K) { + pub fn union(&mut self, a_id: K, b_id: K) -> K { let node_a = self.get(a_id); let node_b = self.get(b_id); let a_id = node_a.key(); let b_id = node_b.key(); if a_id != b_id { let new_value = node_a.value.combine(&node_b.value); - self.unify(node_a, node_b, new_value); + self.unify(node_a, node_b, new_value) + } else { + a_id } } @@ -280,35 +280,31 @@ impl<'tcx,K:UnifyKey> UnificationTable } } -/////////////////////////////////////////////////////////////////////////// +// # Non-subtyping unification +// // Code to handle keys which carry a value, like ints, // floats---anything that doesn't have a subtyping relationship we // need to worry about. -impl<'tcx,K,V> UnificationTable - where K: UnifyKey>, - V: Clone+PartialEq+Debug, +impl<'tcx, K, V> UnificationTable + where K: UnifyKey>, + V: Clone + PartialEq + Debug { - pub fn unify_var_var(&mut self, - a_id: K, - b_id: K) - -> Result<(),(V,V)> - { + pub fn unify_var_var(&mut self, a_id: K, b_id: K) -> Result { let node_a = self.get(a_id); let node_b = self.get(b_id); let a_id = node_a.key(); let b_id = node_b.key(); - if a_id == b_id { return Ok(()); } + if a_id == b_id { + return Ok(a_id); + } let combined = { match (&node_a.value, &node_b.value) { - (&None, &None) => { - None - } - (&Some(ref v), &None) | (&None, &Some(ref v)) => { - Some(v.clone()) - } + (&None, &None) => None, + (&Some(ref v), &None) | + (&None, &Some(ref v)) => Some(v.clone()), (&Some(ref v1), &Some(ref v2)) => { if *v1 != *v2 { return Err((v1.clone(), v2.clone())); @@ -323,11 +319,7 @@ impl<'tcx,K,V> UnificationTable /// Sets the value of the key `a_id` to `b`. Because simple keys do not have any subtyping /// relationships, if `a_id` already has a value, it must be the same as `b`. - pub fn unify_var_value(&mut self, - a_id: K, - b: V) - -> Result<(),(V,V)> - { + pub fn unify_var_value(&mut self, a_id: K, b: V) -> Result<(), (V, V)> { let mut node_a = self.get(a_id); match node_a.value { @@ -352,13 +344,19 @@ impl<'tcx,K,V> UnificationTable } pub fn probe(&mut self, a_id: K) -> Option { - self.get(a_id).value.clone() + self.get(a_id).value } pub fn unsolved_variables(&mut self) -> Vec { self.values .iter() - .filter_map(|vv| if vv.value.is_some() { None } else { Some(vv.key()) }) + .filter_map(|vv| { + if vv.value.is_some() { + None + } else { + Some(vv.key()) + } + }) .collect() } } diff --git a/src/librustc_data_structures/unify/tests.rs b/src/librustc_data_structures/unify/tests.rs index 089e629a569d3..f29a7132e831b 100644 --- a/src/librustc_data_structures/unify/tests.rs +++ b/src/librustc_data_structures/unify/tests.rs @@ -19,9 +19,15 @@ struct UnitKey(u32); impl UnifyKey for UnitKey { type Value = (); - fn index(&self) -> u32 { self.0 } - fn from_index(u: u32) -> UnitKey { UnitKey(u) } - fn tag(_: Option) -> &'static str { "UnitKey" } + fn index(&self) -> u32 { + self.0 + } + fn from_index(u: u32) -> UnitKey { + UnitKey(u) + } + fn tag(_: Option) -> &'static str { + "UnitKey" + } } #[test] @@ -45,7 +51,7 @@ fn big_array() { } for i in 1..MAX { - let l = keys[i-1]; + let l = keys[i - 1]; let r = keys[i]; ut.union(l, r); } @@ -68,7 +74,7 @@ fn big_array_bench(b: &mut Bencher) { b.iter(|| { for i in 1..MAX { - let l = keys[i-1]; + let l = keys[i - 1]; let r = keys[i]; ut.union(l, r); } @@ -90,16 +96,16 @@ fn even_odd() { keys.push(key); if i >= 2 { - ut.union(key, keys[i-2]); + ut.union(key, keys[i - 2]); } } for i in 1..MAX { - assert!(!ut.unioned(keys[i-1], keys[i])); + assert!(!ut.unioned(keys[i - 1], keys[i])); } for i in 2..MAX { - assert!(ut.unioned(keys[i-2], keys[i])); + assert!(ut.unioned(keys[i - 2], keys[i])); } } @@ -108,9 +114,15 @@ struct IntKey(u32); impl UnifyKey for IntKey { type Value = Option; - fn index(&self) -> u32 { self.0 } - fn from_index(u: u32) -> IntKey { IntKey(u) } - fn tag(_: Option) -> &'static str { "IntKey" } + fn index(&self) -> u32 { + self.0 + } + fn from_index(u: u32) -> IntKey { + IntKey(u) + } + fn tag(_: Option) -> &'static str { + "IntKey" + } } /// Test unifying a key whose value is `Some(_)` with a key whose value is `None`. @@ -191,4 +203,3 @@ fn unify_key_Some_x_val_x() { assert!(ut.unify_var_value(k1, 22).is_ok()); assert_eq!(ut.probe(k1), Some(22)); } - diff --git a/src/librustc_data_structures/veccell/mod.rs b/src/librustc_data_structures/veccell/mod.rs index 008642d9d6567..054eee8829a4a 100644 --- a/src/librustc_data_structures/veccell/mod.rs +++ b/src/librustc_data_structures/veccell/mod.rs @@ -12,11 +12,11 @@ use std::cell::UnsafeCell; use std::mem; pub struct VecCell { - data: UnsafeCell> + data: UnsafeCell>, } impl VecCell { - pub fn with_capacity(capacity: usize) -> VecCell{ + pub fn with_capacity(capacity: usize) -> VecCell { VecCell { data: UnsafeCell::new(Vec::with_capacity(capacity)) } } diff --git a/src/librustc_driver/Cargo.toml b/src/librustc_driver/Cargo.toml new file mode 100644 index 0000000000000..99d3e155e8936 --- /dev/null +++ b/src/librustc_driver/Cargo.toml @@ -0,0 +1,38 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_driver" +version = "0.0.0" + +[lib] +name = "rustc_driver" +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +arena = { path = "../libarena" } +flate = { path = "../libflate" } +graphviz = { path = "../libgraphviz" } +log = { path = "../liblog" } +proc_macro_plugin = { path = "../libproc_macro_plugin" } +rustc = { path = "../librustc" } +rustc_back = { path = "../librustc_back" } +rustc_borrowck = { path = "../librustc_borrowck" } +rustc_const_eval = { path = "../librustc_const_eval" } +rustc_data_structures = { path = "../librustc_data_structures" } +rustc_errors = { path = "../librustc_errors" } +rustc_incremental = { path = "../librustc_incremental" } +rustc_lint = { path = "../librustc_lint" } +rustc_llvm = { path = "../librustc_llvm" } +rustc_metadata = { path = "../librustc_metadata" } +rustc_mir = { path = "../librustc_mir" } +rustc_passes = { path = "../librustc_passes" } +rustc_plugin = { path = "../librustc_plugin" } +rustc_privacy = { path = "../librustc_privacy" } +rustc_resolve = { path = "../librustc_resolve" } +rustc_save_analysis = { path = "../librustc_save_analysis" } +rustc_trans = { path = "../librustc_trans" } +rustc_typeck = { path = "../librustc_typeck" } +serialize = { path = "../libserialize" } +syntax = { path = "../libsyntax" } +syntax_ext = { path = "../libsyntax_ext" } +syntax_pos = { path = "../libsyntax_pos" } diff --git a/src/librustc_driver/derive_registrar.rs b/src/librustc_driver/derive_registrar.rs new file mode 100644 index 0000000000000..4db620b2bec3b --- /dev/null +++ b/src/librustc_driver/derive_registrar.rs @@ -0,0 +1,41 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::dep_graph::DepNode; +use rustc::hir::itemlikevisit::ItemLikeVisitor; +use rustc::hir::map::Map; +use rustc::hir; +use syntax::ast; +use syntax::attr; + +pub fn find(hir_map: &Map) -> Option { + let _task = hir_map.dep_graph.in_task(DepNode::PluginRegistrar); + let krate = hir_map.krate(); + + let mut finder = Finder { registrar: None }; + krate.visit_all_item_likes(&mut finder); + finder.registrar +} + +struct Finder { + registrar: Option, +} + +impl<'v> ItemLikeVisitor<'v> for Finder { + fn visit_item(&mut self, item: &hir::Item) { + if attr::contains_name(&item.attrs, "rustc_derive_registrar") { + self.registrar = Some(item.id); + } + } + + fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) { + } +} + diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index 01ffd0efbe314..069f0a89bef08 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -8,211 +8,252 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc::front; -use rustc::front::map as hir_map; +use rustc::hir; +use rustc::hir::{map as hir_map, FreevarMap, TraitMap}; +use rustc::hir::lowering::lower_crate; +use rustc_data_structures::blake2b::Blake2bHasher; +use rustc_data_structures::fmt_wrap::FmtWrap; +use rustc::ty::util::ArchIndependentHasher; use rustc_mir as mir; -use rustc_mir::mir_map::MirMap; -use rustc::session::Session; -use rustc::session::config::{self, Input, OutputFilenames, OutputType}; +use rustc::session::{Session, CompileResult, compile_result_from_err_count}; +use rustc::session::config::{self, Input, OutputFilenames, OutputType, + OutputTypes}; use rustc::session::search_paths::PathKind; use rustc::lint; -use rustc::middle::{stability, ty, reachable}; -use rustc::middle::dependency_format; -use rustc::middle; +use rustc::middle::{self, dependency_format, stability, reachable}; +use rustc::middle::privacy::AccessLevels; +use rustc::ty::{self, TyCtxt}; use rustc::util::common::time; +use rustc::util::nodemap::{NodeSet, NodeMap}; use rustc_borrowck as borrowck; -use rustc_resolve as resolve; -use rustc_metadata::macro_import; -use rustc_metadata::creader::LocalCrateReader; +use rustc_incremental::{self, IncrementalHashesMap}; +use rustc_resolve::{MakeGlobMap, Resolver}; +use rustc_metadata::creader::CrateLoader; use rustc_metadata::cstore::CStore; -use rustc_trans::back::link; -use rustc_trans::back::write; -use rustc_trans::trans; +use rustc_trans::back::{link, write}; +use rustc_trans as trans; use rustc_typeck as typeck; use rustc_privacy; use rustc_plugin::registry::Registry; use rustc_plugin as plugin; -use rustc_front::hir; -use rustc_front::lowering::{lower_crate, LoweringContext}; +use rustc_passes::{ast_validation, no_asm, loops, consts, rvalues, + static_recursion, hir_stats}; +use rustc_const_eval::check_match; use super::Compilation; use serialize::json; -use std::collections::HashMap; use std::env; +use std::mem; use std::ffi::{OsString, OsStr}; use std::fs; use std::io::{self, Write}; use std::path::{Path, PathBuf}; -use syntax::ast::{self, NodeIdAssigner}; +use syntax::{ast, diagnostics, visit}; use syntax::attr; -use syntax::attr::AttrMetaMethods; -use syntax::diagnostics; -use syntax::fold::Folder; -use syntax::parse; -use syntax::parse::token; +use syntax::ext::base::ExtCtxt; +use syntax::parse::{self, PResult}; +use syntax::symbol::Symbol; use syntax::util::node_count::NodeCounter; -use syntax::visit; use syntax; use syntax_ext; -pub fn compile_input(sess: Session, +use derive_registrar; + +#[derive(Clone)] +pub struct Resolutions { + pub freevars: FreevarMap, + pub trait_map: TraitMap, + pub maybe_unused_trait_imports: NodeSet, +} + +pub fn compile_input(sess: &Session, cstore: &CStore, - cfg: ast::CrateConfig, input: &Input, outdir: &Option, output: &Option, addl_plugins: Option>, - control: CompileController) { - macro_rules! controller_entry_point{($point: ident, $tsess: expr, $make_state: expr) => ({ - let state = $make_state; - (control.$point.callback)(state); - - $tsess.abort_if_errors(); - if control.$point.stop == Compilation::Stop { - return; - } - })} + control: &CompileController) -> CompileResult { + macro_rules! controller_entry_point { + ($point: ident, $tsess: expr, $make_state: expr, $phase_result: expr) => {{ + let state = &mut $make_state; + let phase_result: &CompileResult = &$phase_result; + if phase_result.is_ok() || control.$point.run_callback_on_error { + (control.$point.callback)(state); + } + + if control.$point.stop == Compilation::Stop { + return compile_result_from_err_count($tsess.err_count()); + } + }} + } // We need nested scopes here, because the intermediate results can keep // large chunks of memory alive and we want to free them as soon as // possible to keep the peak memory usage low - let result = { - let (outputs, expanded_crate, id) = { - let krate = phase_1_parse_input(&sess, cfg, input); + let (outputs, trans) = { + let krate = match phase_1_parse_input(sess, input) { + Ok(krate) => krate, + Err(mut parse_error) => { + parse_error.emit(); + return Err(1); + } + }; + let (krate, registry) = { + let mut compile_state = CompileState::state_after_parse(input, + sess, + outdir, + output, + krate, + &cstore); controller_entry_point!(after_parse, sess, - CompileState::state_after_parse(input, &sess, outdir, &krate)); + compile_state, + Ok(())); - let outputs = build_output_filenames(input, outdir, output, &krate.attrs, &sess); - let id = link::find_crate_name(Some(&sess), &krate.attrs, input); - let expanded_crate = match phase_2_configure_and_expand(&sess, - &cstore, - krate, - &id[..], - addl_plugins) { - None => return, - Some(k) => k, - }; + (compile_state.krate.unwrap(), compile_state.registry) + }; - (outputs, expanded_crate, id) + let outputs = build_output_filenames(input, outdir, output, &krate.attrs, sess); + let crate_name = link::find_crate_name(Some(sess), &krate.attrs, input); + let ExpansionResult { expanded_crate, defs, analysis, resolutions, mut hir_forest } = { + phase_2_configure_and_expand( + sess, &cstore, krate, registry, &crate_name, addl_plugins, control.make_glob_map, + |expanded_crate| { + let mut state = CompileState::state_after_expand( + input, sess, outdir, output, &cstore, expanded_crate, &crate_name, + ); + controller_entry_point!(after_expand, sess, state, Ok(())); + Ok(()) + } + )? }; - controller_entry_point!(after_expand, - sess, - CompileState::state_after_expand(input, - &sess, - outdir, - &expanded_crate, - &id[..])); - - let expanded_crate = assign_node_ids(&sess, expanded_crate); - // Lower ast -> hir. - let lcx = LoweringContext::new(&sess, Some(&expanded_crate)); - let mut hir_forest = time(sess.time_passes(), - "lowering ast -> hir", - || hir_map::Forest::new(lower_crate(&lcx, &expanded_crate))); - - // Discard MTWT tables that aren't required past lowering to HIR. - if !sess.opts.debugging_opts.keep_mtwt_tables && - !sess.opts.debugging_opts.save_analysis { - syntax::ext::mtwt::clear_tables(); - } + write_out_deps(sess, &outputs, &crate_name); let arenas = ty::CtxtArenas::new(); - let hir_map = make_map(&sess, &mut hir_forest); - write_out_deps(&sess, &outputs, &id); + // Construct the HIR map + let hir_map = time(sess.time_passes(), + "indexing hir", + || hir_map::map_crate(&mut hir_forest, defs)); - controller_entry_point!(after_write_deps, - sess, - CompileState::state_after_write_deps(input, - &sess, - outdir, - &hir_map, - &expanded_crate, - &hir_map.krate(), - &id[..], - &lcx)); + { + let _ignore = hir_map.dep_graph.in_ignore(); + controller_entry_point!(after_hir_lowering, + sess, + CompileState::state_after_hir_lowering(input, + sess, + outdir, + output, + &arenas, + &cstore, + &hir_map, + &analysis, + &resolutions, + &expanded_crate, + &hir_map.krate(), + &crate_name), + Ok(())); + } time(sess.time_passes(), "attribute checking", || { - front::check_attr::check_crate(&sess, &expanded_crate); + hir::check_attr::check_crate(sess, &expanded_crate); }); - time(sess.time_passes(), - "early lint checks", - || lint::check_ast_crate(&sess, &expanded_crate)); - - let opt_crate = if sess.opts.debugging_opts.keep_ast || - sess.opts.debugging_opts.save_analysis { + let opt_crate = if keep_ast(sess) { Some(&expanded_crate) } else { drop(expanded_crate); None }; - phase_3_run_analysis_passes(&sess, - &cstore, + phase_3_run_analysis_passes(sess, hir_map, + analysis, + resolutions, &arenas, - &id, - control.make_glob_map, - |tcx, mir_map, analysis| { - - { - let state = - CompileState::state_after_analysis(input, - &tcx.sess, - outdir, - opt_crate, - tcx.map.krate(), - &analysis, - &mir_map, - tcx, - &lcx, - &id); - (control.after_analysis.callback)(state); - - tcx.sess.abort_if_errors(); - if control.after_analysis.stop == Compilation::Stop { - return Err(()); - } - } - - if log_enabled!(::log::INFO) { - println!("Pre-trans"); - tcx.print_debug_stats(); - } - let trans = phase_4_translate_to_llvm(tcx, - mir_map, - analysis); - - if log_enabled!(::log::INFO) { - println!("Post-trans"); - tcx.print_debug_stats(); - } - - // Discard interned strings as they are no longer required. - token::get_ident_interner().clear(); - - Ok((outputs, trans)) - }) - }; + &crate_name, + |tcx, analysis, incremental_hashes_map, result| { + { + // Eventually, we will want to track plugins. + let _ignore = tcx.dep_graph.in_ignore(); + + let mut state = CompileState::state_after_analysis(input, + sess, + outdir, + output, + opt_crate, + tcx.map.krate(), + &analysis, + tcx, + &crate_name); + (control.after_analysis.callback)(&mut state); + + if control.after_analysis.stop == Compilation::Stop { + return result.and_then(|_| Err(0usize)); + } + } - let (outputs, trans) = if let Ok(out) = result { - out - } else { - return; + result?; + + if log_enabled!(::log::INFO) { + println!("Pre-trans"); + tcx.print_debug_stats(); + } + let trans = phase_4_translate_to_llvm(tcx, analysis, &incremental_hashes_map); + + if log_enabled!(::log::INFO) { + println!("Post-trans"); + tcx.print_debug_stats(); + } + + Ok((outputs, trans)) + })?? }; - phase_5_run_llvm_passes(&sess, &trans, &outputs); + if sess.opts.debugging_opts.print_type_sizes { + sess.code_stats.borrow().print_type_sizes(); + } + + let phase5_result = phase_5_run_llvm_passes(sess, &trans, &outputs); controller_entry_point!(after_llvm, sess, - CompileState::state_after_llvm(input, &sess, outdir, &trans)); + CompileState::state_after_llvm(input, sess, outdir, output, &trans), + phase5_result); + phase5_result?; + + write::cleanup_llvm(&trans); + + phase_6_link_output(sess, &trans, &outputs); + + // Now that we won't touch anything in the incremental compilation directory + // any more, we can finalize it (which involves renaming it) + rustc_incremental::finalize_session_directory(sess, trans.link.crate_hash); + + if sess.opts.debugging_opts.perf_stats { + sess.print_perf_stats(); + } + + controller_entry_point!(compilation_done, + sess, + CompileState::state_when_compilation_done(input, sess, outdir, output), + Ok(())); - phase_6_link_output(&sess, &trans, &outputs); + Ok(()) +} + +fn keep_hygiene_data(sess: &Session) -> bool { + sess.opts.debugging_opts.keep_hygiene_data +} + +fn keep_ast(sess: &Session) -> bool { + sess.opts.debugging_opts.keep_ast || + sess.opts.debugging_opts.save_analysis || + sess.opts.debugging_opts.save_analysis_csv || + sess.opts.debugging_opts.save_analysis_api } /// The name used for source code that doesn't originate in a file @@ -225,7 +266,7 @@ pub fn source_name(input: &Input) -> String { match *input { // FIXME (#9639): This needs to handle non-utf8 paths Input::File(ref ifile) => ifile.to_str().unwrap().to_string(), - Input::Str(_) => anon_src(), + Input::Str { ref name, .. } => name.clone(), } } @@ -246,11 +287,12 @@ pub fn source_name(input: &Input) -> String { pub struct CompileController<'a> { pub after_parse: PhaseController<'a>, pub after_expand: PhaseController<'a>, - pub after_write_deps: PhaseController<'a>, + pub after_hir_lowering: PhaseController<'a>, pub after_analysis: PhaseController<'a>, pub after_llvm: PhaseController<'a>, + pub compilation_done: PhaseController<'a>, - pub make_glob_map: resolve::MakeGlobMap, + pub make_glob_map: MakeGlobMap, } impl<'a> CompileController<'a> { @@ -258,23 +300,28 @@ impl<'a> CompileController<'a> { CompileController { after_parse: PhaseController::basic(), after_expand: PhaseController::basic(), - after_write_deps: PhaseController::basic(), + after_hir_lowering: PhaseController::basic(), after_analysis: PhaseController::basic(), after_llvm: PhaseController::basic(), - make_glob_map: resolve::MakeGlobMap::No, + compilation_done: PhaseController::basic(), + make_glob_map: MakeGlobMap::No, } } } pub struct PhaseController<'a> { pub stop: Compilation, - pub callback: Box () + 'a>, + // If true then the compiler will try to run the callback even if the phase + // ends with an error. Note that this is not always possible. + pub run_callback_on_error: bool, + pub callback: Box, } impl<'a> PhaseController<'a> { pub fn basic() -> PhaseController<'a> { PhaseController { stop: Compilation::Continue, + run_callback_on_error: false, callback: box |_| {}, } } @@ -283,141 +330,176 @@ impl<'a> PhaseController<'a> { /// State that is passed to a callback. What state is available depends on when /// during compilation the callback is made. See the various constructor methods /// (`state_*`) in the impl to see which data is provided for any given entry point. -pub struct CompileState<'a, 'ast: 'a, 'tcx: 'a> { +pub struct CompileState<'a, 'tcx: 'a> { pub input: &'a Input, - pub session: &'a Session, - pub cfg: Option<&'a ast::CrateConfig>, - pub krate: Option<&'a ast::Crate>, + pub session: &'tcx Session, + pub krate: Option, + pub registry: Option>, + pub cstore: Option<&'a CStore>, pub crate_name: Option<&'a str>, pub output_filenames: Option<&'a OutputFilenames>, pub out_dir: Option<&'a Path>, + pub out_file: Option<&'a Path>, + pub arenas: Option<&'tcx ty::CtxtArenas<'tcx>>, pub expanded_crate: Option<&'a ast::Crate>, pub hir_crate: Option<&'a hir::Crate>, - pub ast_map: Option<&'a hir_map::Map<'ast>>, - pub mir_map: Option<&'a MirMap<'tcx>>, - pub analysis: Option<&'a ty::CrateAnalysis<'a>>, - pub tcx: Option<&'a ty::ctxt<'tcx>>, - pub lcx: Option<&'a LoweringContext<'a>>, + pub ast_map: Option<&'a hir_map::Map<'tcx>>, + pub resolutions: Option<&'a Resolutions>, + pub analysis: Option<&'a ty::CrateAnalysis<'tcx>>, + pub tcx: Option>, pub trans: Option<&'a trans::CrateTranslation>, } -impl<'a, 'ast, 'tcx> CompileState<'a, 'ast, 'tcx> { +impl<'a, 'tcx> CompileState<'a, 'tcx> { fn empty(input: &'a Input, - session: &'a Session, + session: &'tcx Session, out_dir: &'a Option) - -> CompileState<'a, 'ast, 'tcx> { + -> Self { CompileState { input: input, session: session, out_dir: out_dir.as_ref().map(|s| &**s), - cfg: None, + out_file: None, + arenas: None, krate: None, + registry: None, + cstore: None, crate_name: None, output_filenames: None, expanded_crate: None, hir_crate: None, ast_map: None, + resolutions: None, analysis: None, - mir_map: None, tcx: None, - lcx: None, trans: None, } } fn state_after_parse(input: &'a Input, - session: &'a Session, + session: &'tcx Session, out_dir: &'a Option, - krate: &'a ast::Crate) - -> CompileState<'a, 'ast, 'tcx> { - CompileState { krate: Some(krate), ..CompileState::empty(input, session, out_dir) } + out_file: &'a Option, + krate: ast::Crate, + cstore: &'a CStore) + -> Self { + CompileState { + // Initialize the registry before moving `krate` + registry: Some(Registry::new(&session, krate.span)), + krate: Some(krate), + cstore: Some(cstore), + out_file: out_file.as_ref().map(|s| &**s), + ..CompileState::empty(input, session, out_dir) + } } fn state_after_expand(input: &'a Input, - session: &'a Session, + session: &'tcx Session, out_dir: &'a Option, + out_file: &'a Option, + cstore: &'a CStore, expanded_crate: &'a ast::Crate, crate_name: &'a str) - -> CompileState<'a, 'ast, 'tcx> { + -> Self { CompileState { crate_name: Some(crate_name), + cstore: Some(cstore), expanded_crate: Some(expanded_crate), + out_file: out_file.as_ref().map(|s| &**s), ..CompileState::empty(input, session, out_dir) } } - fn state_after_write_deps(input: &'a Input, - session: &'a Session, - out_dir: &'a Option, - hir_map: &'a hir_map::Map<'ast>, - krate: &'a ast::Crate, - hir_crate: &'a hir::Crate, - crate_name: &'a str, - lcx: &'a LoweringContext<'a>) - -> CompileState<'a, 'ast, 'tcx> { + fn state_after_hir_lowering(input: &'a Input, + session: &'tcx Session, + out_dir: &'a Option, + out_file: &'a Option, + arenas: &'tcx ty::CtxtArenas<'tcx>, + cstore: &'a CStore, + hir_map: &'a hir_map::Map<'tcx>, + analysis: &'a ty::CrateAnalysis<'static>, + resolutions: &'a Resolutions, + krate: &'a ast::Crate, + hir_crate: &'a hir::Crate, + crate_name: &'a str) + -> Self { CompileState { crate_name: Some(crate_name), + arenas: Some(arenas), + cstore: Some(cstore), ast_map: Some(hir_map), - krate: Some(krate), + analysis: Some(analysis), + resolutions: Some(resolutions), + expanded_crate: Some(krate), hir_crate: Some(hir_crate), - lcx: Some(lcx), + out_file: out_file.as_ref().map(|s| &**s), ..CompileState::empty(input, session, out_dir) } } fn state_after_analysis(input: &'a Input, - session: &'a Session, + session: &'tcx Session, out_dir: &'a Option, + out_file: &'a Option, krate: Option<&'a ast::Crate>, hir_crate: &'a hir::Crate, - analysis: &'a ty::CrateAnalysis, - mir_map: &'a MirMap<'tcx>, - tcx: &'a ty::ctxt<'tcx>, - lcx: &'a LoweringContext<'a>, + analysis: &'a ty::CrateAnalysis<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_name: &'a str) - -> CompileState<'a, 'ast, 'tcx> { + -> Self { CompileState { analysis: Some(analysis), - mir_map: Some(mir_map), tcx: Some(tcx), - krate: krate, + expanded_crate: krate, hir_crate: Some(hir_crate), - lcx: Some(lcx), crate_name: Some(crate_name), + out_file: out_file.as_ref().map(|s| &**s), ..CompileState::empty(input, session, out_dir) } } fn state_after_llvm(input: &'a Input, - session: &'a Session, + session: &'tcx Session, out_dir: &'a Option, + out_file: &'a Option, trans: &'a trans::CrateTranslation) - -> CompileState<'a, 'ast, 'tcx> { - CompileState { trans: Some(trans), ..CompileState::empty(input, session, out_dir) } + -> Self { + CompileState { + trans: Some(trans), + out_file: out_file.as_ref().map(|s| &**s), + ..CompileState::empty(input, session, out_dir) + } + } + + fn state_when_compilation_done(input: &'a Input, + session: &'tcx Session, + out_dir: &'a Option, + out_file: &'a Option) + -> Self { + CompileState { + out_file: out_file.as_ref().map(|s| &**s), + ..CompileState::empty(input, session, out_dir) + } } } -pub fn phase_1_parse_input(sess: &Session, cfg: ast::CrateConfig, input: &Input) -> ast::Crate { - // These may be left in an incoherent state after a previous compile. - // `clear_tables` and `get_ident_interner().clear()` can be used to free - // memory, but they do not restore the initial state. - syntax::ext::mtwt::reset_tables(); - token::reset_ident_interner(); +pub fn phase_1_parse_input<'a>(sess: &'a Session, input: &Input) -> PResult<'a, ast::Crate> { + let continue_after_error = sess.opts.debugging_opts.continue_parse_after_error; + sess.diagnostic().set_continue_after_error(continue_after_error); let krate = time(sess.time_passes(), "parsing", || { match *input { Input::File(ref file) => { - parse::parse_crate_from_file(&(*file), cfg.clone(), &sess.parse_sess) + parse::parse_crate_from_file(file, &sess.parse_sess) } - Input::Str(ref src) => { - parse::parse_crate_from_source_str(anon_src().to_string(), - src.to_string(), - cfg.clone(), - &sess.parse_sess) + Input::Str { ref input, ref name } => { + parse::parse_crate_from_source_str(name.clone(), input.clone(), &sess.parse_sess) } } - }); + })?; + + sess.diagnostic().set_continue_after_error(true); if sess.opts.debugging_opts.ast_json_noexpand { println!("{}", json::as_json(&krate)); @@ -432,7 +514,11 @@ pub fn phase_1_parse_input(sess: &Session, cfg: ast::CrateConfig, input: &Input) syntax::show_span::run(sess.diagnostic(), s, &krate); } - krate + if sess.opts.debugging_opts.hir_stats { + hir_stats::print_ast_stats(&krate, "PRE EXPANSION AST STATS"); + } + + Ok(krate) } fn count_nodes(krate: &ast::Crate) -> usize { @@ -444,66 +530,60 @@ fn count_nodes(krate: &ast::Crate) -> usize { // For continuing compilation after a parsed crate has been // modified +pub struct ExpansionResult { + pub expanded_crate: ast::Crate, + pub defs: hir_map::Definitions, + pub analysis: ty::CrateAnalysis<'static>, + pub resolutions: Resolutions, + pub hir_forest: hir_map::Forest, +} + /// Run the "early phases" of the compiler: initial `cfg` processing, /// loading compiler plugins (including those from `addl_plugins`), /// syntax expansion, secondary `cfg` expansion, synthesis of a test -/// harness if one is to be provided and injection of a dependency on the -/// standard library and prelude. +/// harness if one is to be provided, injection of a dependency on the +/// standard library and prelude, and name resolution. /// /// Returns `None` if we're aborting after handling -W help. -pub fn phase_2_configure_and_expand(sess: &Session, - cstore: &CStore, - mut krate: ast::Crate, - crate_name: &str, - addl_plugins: Option>) - -> Option { +pub fn phase_2_configure_and_expand(sess: &Session, + cstore: &CStore, + krate: ast::Crate, + registry: Option, + crate_name: &str, + addl_plugins: Option>, + make_glob_map: MakeGlobMap, + after_expand: F) + -> Result + where F: FnOnce(&ast::Crate) -> CompileResult, +{ let time_passes = sess.time_passes(); - // strip before anything else because crate metadata may use #[cfg_attr] - // and so macros can depend on configuration variables, such as - // - // #[macro_use] #[cfg(foo)] - // mod bar { macro_rules! baz!(() => {{}}) } - // - // baz! should not use this definition unless foo is enabled. - - let mut feature_gated_cfgs = vec![]; - krate = time(time_passes, "configuration 1", || { - syntax::config::strip_unconfigured_items(sess.diagnostic(), krate, &mut feature_gated_cfgs) - }); + let (mut krate, features) = syntax::config::features(krate, &sess.parse_sess, sess.opts.test); + // these need to be set "early" so that expansion sees `quote` if enabled. + *sess.features.borrow_mut() = features; *sess.crate_types.borrow_mut() = collect_crate_types(sess, &krate.attrs); - *sess.crate_metadata.borrow_mut() = collect_crate_metadata(sess, &krate.attrs); + *sess.crate_disambiguator.borrow_mut() = Symbol::intern(&compute_crate_disambiguator(sess)); time(time_passes, "recursion limit", || { - middle::recursion_limit::update_recursion_limit(sess, &krate); + middle::recursion_limit::update_limits(sess, &krate); }); - time(time_passes, "gated macro checking", || { - let features = syntax::feature_gate::check_crate_macros(sess.codemap(), - &sess.parse_sess.span_diagnostic, - &krate); - - // these need to be set "early" so that expansion sees `quote` if enabled. - *sess.features.borrow_mut() = features; - sess.abort_if_errors(); - }); - - krate = time(time_passes, "crate injection", || { - syntax::std_inject::maybe_inject_crates_ref(krate, sess.opts.alt_std_name.clone()) + let alt_std_name = sess.opts.alt_std_name.clone(); + syntax::std_inject::maybe_inject_crates_ref(&sess.parse_sess, krate, alt_std_name) }); - let macros = time(time_passes, - "macro loading", - || macro_import::read_macro_defs(sess, &cstore, &krate)); - let mut addl_plugins = Some(addl_plugins); let registrars = time(time_passes, "plugin loading", || { - plugin::load::load_plugins(sess, &cstore, &krate, addl_plugins.take().unwrap()) + plugin::load::load_plugins(sess, + &cstore, + &krate, + crate_name, + addl_plugins.take().unwrap()) }); - let mut registry = Registry::new(sess, &krate); + let mut registry = registry.unwrap_or(Registry::new(sess, krate.span)); time(time_passes, "plugin registration", || { if sess.features.borrow().rustc_diagnostic_macros { @@ -522,9 +602,9 @@ pub fn phase_2_configure_and_expand(sess: &Session, }); let Registry { syntax_exts, early_lint_passes, late_lint_passes, lint_groups, - llvm_passes, attributes, .. } = registry; + llvm_passes, attributes, mir_passes, .. } = registry; - { + sess.track_errors(|| { let mut ls = sess.lint_store.borrow_mut(); for pass in early_lint_passes { ls.register_early_pass(Some(sess), true, pass); @@ -538,18 +618,29 @@ pub fn phase_2_configure_and_expand(sess: &Session, } *sess.plugin_llvm_passes.borrow_mut() = llvm_passes; + sess.mir_passes.borrow_mut().extend(mir_passes); *sess.plugin_attributes.borrow_mut() = attributes.clone(); - } + })?; // Lint plugins are registered; now we can process command line flags. if sess.opts.describe_lints { - super::describe_lints(&*sess.lint_store.borrow(), true); - return None; + super::describe_lints(&sess.lint_store.borrow(), true); + return Err(0); } - sess.lint_store.borrow_mut().process_command_line(sess); - - // Abort if there are errors from lint processing or a plugin registrar. - sess.abort_if_errors(); + sess.track_errors(|| sess.lint_store.borrow_mut().process_command_line(sess))?; + + // Currently, we ignore the name resolution data structures for the purposes of dependency + // tracking. Instead we will run name resolution and include its output in the hash of each + // item, much like we do for macro expansion. In other words, the hash reflects not just + // its contents but the results of name resolution on those contents. Hopefully we'll push + // this back at some point. + let _ignore = sess.dep_graph.in_ignore(); + let mut crate_loader = CrateLoader::new(sess, &cstore, crate_name); + crate_loader.preprocess(&krate); + let resolver_arenas = Resolver::arenas(); + let mut resolver = + Resolver::new(sess, &krate, make_glob_map, &mut crate_loader, &resolver_arenas); + syntax_ext::register_builtins(&mut resolver, syntax_exts, sess.features.borrow().quote); krate = time(time_passes, "expansion", || { // Windows dlls do not have rpaths, so they don't know how to find their @@ -557,341 +648,434 @@ pub fn phase_2_configure_and_expand(sess: &Session, // dependent dlls. Note that this uses cfg!(windows) as opposed to // targ_cfg because syntax extensions are always loaded for the host // compiler, not for the target. - let mut _old_path = OsString::new(); + // + // This is somewhat of an inherently racy operation, however, as + // multiple threads calling this function could possibly continue + // extending PATH far beyond what it should. To solve this for now we + // just don't add any new elements to PATH which are already there + // within PATH. This is basically a targeted fix at #17360 for rustdoc + // which runs rustc in parallel but has been seen (#33844) to cause + // problems with PATH becoming too long. + let mut old_path = OsString::new(); if cfg!(windows) { - _old_path = env::var_os("PATH").unwrap_or(_old_path); + old_path = env::var_os("PATH").unwrap_or(old_path); let mut new_path = sess.host_filesearch(PathKind::All) .get_dylib_search_paths(); - new_path.extend(env::split_paths(&_old_path)); + for path in env::split_paths(&old_path) { + if !new_path.contains(&path) { + new_path.push(path); + } + } env::set_var("PATH", &env::join_paths(new_path).unwrap()); } let features = sess.features.borrow(); let cfg = syntax::ext::expand::ExpansionConfig { - crate_name: crate_name.to_string(), features: Some(&features), recursion_limit: sess.recursion_limit.get(), trace_mac: sess.opts.debugging_opts.trace_macros, + should_test: sess.opts.test, + ..syntax::ext::expand::ExpansionConfig::default(crate_name.to_string()) }; - let mut ecx = syntax::ext::base::ExtCtxt::new(&sess.parse_sess, - krate.config.clone(), - cfg, - &mut feature_gated_cfgs); - syntax_ext::register_builtins(&mut ecx.syntax_env); - let (ret, macro_names) = syntax::ext::expand::expand_crate(ecx, - macros, - syntax_exts, - krate); + let mut ecx = ExtCtxt::new(&sess.parse_sess, cfg, &mut resolver); + let err_count = ecx.parse_sess.span_diagnostic.err_count(); + + let krate = ecx.monotonic_expander().expand_crate(krate); + + if ecx.parse_sess.span_diagnostic.err_count() - ecx.resolve_err_count > err_count { + ecx.parse_sess.span_diagnostic.abort_if_errors(); + } if cfg!(windows) { - env::set_var("PATH", &_old_path); + env::set_var("PATH", &old_path); } - *sess.available_macros.borrow_mut() = macro_names; - ret + krate }); - // Needs to go *after* expansion to be able to check the results - // of macro expansion. This runs before #[cfg] to try to catch as - // much as possible (e.g. help the programmer avoid platform - // specific differences) - time(time_passes, "complete gated feature checking 1", || { - let features = syntax::feature_gate::check_crate(sess.codemap(), - &sess.parse_sess.span_diagnostic, - &krate, - &attributes, - sess.opts.unstable_features); - *sess.features.borrow_mut() = features; - sess.abort_if_errors(); - }); + krate.exported_macros = mem::replace(&mut resolver.exported_macros, Vec::new()); - // JBC: make CFG processing part of expansion to avoid this problem: - - // strip again, in case expansion added anything with a #[cfg]. - krate = time(time_passes, "configuration 2", || { - syntax::config::strip_unconfigured_items(sess.diagnostic(), krate, &mut feature_gated_cfgs) + krate = time(time_passes, "maybe building test harness", || { + syntax::test::modify_for_testing(&sess.parse_sess, + &mut resolver, + sess.opts.test, + krate, + sess.diagnostic()) }); - time(time_passes, "gated configuration checking", || { - let features = sess.features.borrow(); - feature_gated_cfgs.sort(); - feature_gated_cfgs.dedup(); - for cfg in &feature_gated_cfgs { - cfg.check_and_emit(sess.diagnostic(), &features, sess.codemap()); - } - }); + // If we're in rustdoc we're always compiling as an rlib, but that'll trip a + // bunch of checks in the `modify` function below. For now just skip this + // step entirely if we're rustdoc as it's not too useful anyway. + if !sess.opts.actually_rustdoc { + krate = time(time_passes, "maybe creating a macro crate", || { + let crate_types = sess.crate_types.borrow(); + let num_crate_types = crate_types.len(); + let is_proc_macro_crate = crate_types.contains(&config::CrateTypeProcMacro); + let is_test_crate = sess.opts.test; + syntax_ext::proc_macro_registrar::modify(&sess.parse_sess, + &mut resolver, + krate, + is_proc_macro_crate, + is_test_crate, + num_crate_types, + sess.diagnostic(), + &sess.features.borrow()) + }); + } - krate = time(time_passes, "maybe building test harness", || { - syntax::test::modify_for_testing(&sess.parse_sess, &sess.opts.cfg, krate, sess.diagnostic()) - }); + if sess.opts.debugging_opts.input_stats { + println!("Post-expansion node count: {}", count_nodes(&krate)); + } - krate = time(time_passes, - "prelude injection", - || syntax::std_inject::maybe_inject_prelude(&sess.parse_sess, krate)); + if sess.opts.debugging_opts.hir_stats { + hir_stats::print_ast_stats(&krate, "POST EXPANSION AST STATS"); + } - time(time_passes, - "checking that all macro invocations are gone", - || syntax::ext::expand::check_for_macros(&sess.parse_sess, &krate)); + if sess.opts.debugging_opts.ast_json { + println!("{}", json::as_json(&krate)); + } time(time_passes, "checking for inline asm in case the target doesn't support it", - || ::rustc_passes::no_asm::check_crate(sess, &krate)); - - // One final feature gating of the true AST that gets compiled - // later, to make sure we've got everything (e.g. configuration - // can insert new attributes via `cfg_attr`) - time(time_passes, "complete gated feature checking 2", || { - let features = syntax::feature_gate::check_crate(sess.codemap(), - &sess.parse_sess.span_diagnostic, - &krate, - &attributes, - sess.opts.unstable_features); - *sess.features.borrow_mut() = features; - sess.abort_if_errors(); - }); + || no_asm::check_crate(sess, &krate)); + + // Needs to go *after* expansion to be able to check the results of macro expansion. + time(time_passes, "complete gated feature checking", || { + sess.track_errors(|| { + syntax::feature_gate::check_crate(&krate, + &sess.parse_sess, + &sess.features.borrow(), + &attributes, + sess.opts.unstable_features); + }) + })?; - time(time_passes, - "const fn bodies and arguments", - || ::rustc_passes::const_fn::check_crate(sess, &krate)); + time(sess.time_passes(), + "early lint checks", + || lint::check_ast_crate(sess, &krate)); - if sess.opts.debugging_opts.input_stats { - println!("Post-expansion node count: {}", count_nodes(&krate)); - } + time(sess.time_passes(), + "AST validation", + || ast_validation::check_crate(sess, &krate)); - Some(krate) -} + time(sess.time_passes(), "name resolution", || -> CompileResult { + // Since import resolution will eventually happen in expansion, + // don't perform `after_expand` until after import resolution. + after_expand(&krate)?; -pub fn assign_node_ids(sess: &Session, krate: ast::Crate) -> ast::Crate { - struct NodeIdAssigner<'a> { - sess: &'a Session, - } + resolver.resolve_crate(&krate); + Ok(()) + })?; + + // Lower ast -> hir. + let hir_forest = time(sess.time_passes(), "lowering ast -> hir", || { + let hir_crate = lower_crate(sess, &krate, &mut resolver); - impl<'a> Folder for NodeIdAssigner<'a> { - fn new_id(&mut self, old_id: ast::NodeId) -> ast::NodeId { - assert_eq!(old_id, ast::DUMMY_NODE_ID); - self.sess.next_node_id() + if sess.opts.debugging_opts.hir_stats { + hir_stats::print_hir_stats(&hir_crate); } - } - let krate = time(sess.time_passes(), - "assigning node ids", - || NodeIdAssigner { sess: sess }.fold_crate(krate)); + hir_map::Forest::new(hir_crate, &sess.dep_graph) + }); - if sess.opts.debugging_opts.ast_json { - println!("{}", json::as_json(&krate)); + // Discard hygiene data, which isn't required past lowering to HIR. + if !keep_hygiene_data(sess) { + syntax::ext::hygiene::reset_hygiene_data(); } - krate -} - -pub fn make_map<'ast>(sess: &Session, - forest: &'ast mut hir_map::Forest) - -> hir_map::Map<'ast> { - // Construct the HIR map - time(sess.time_passes(), - "indexing hir", - move || hir_map::map_crate(forest)) + Ok(ExpansionResult { + expanded_crate: krate, + defs: resolver.definitions, + analysis: ty::CrateAnalysis { + export_map: resolver.export_map, + access_levels: AccessLevels::default(), + reachable: NodeSet(), + name: crate_name.to_string(), + glob_map: if resolver.make_glob_map { Some(resolver.glob_map) } else { None }, + hir_ty_to_ty: NodeMap(), + }, + resolutions: Resolutions { + freevars: resolver.freevars, + trait_map: resolver.trait_map, + maybe_unused_trait_imports: resolver.maybe_unused_trait_imports, + }, + hir_forest: hir_forest + }) } /// Run the resolution, typechecking, region checking and other /// miscellaneous analysis passes on the crate. Return various /// structures carrying the results of the analysis. pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session, - cstore: &CStore, hir_map: hir_map::Map<'tcx>, + mut analysis: ty::CrateAnalysis<'tcx>, + resolutions: Resolutions, arenas: &'tcx ty::CtxtArenas<'tcx>, name: &str, - make_glob_map: resolve::MakeGlobMap, f: F) - -> R - where F: for<'a> FnOnce(&'a ty::ctxt<'tcx>, MirMap<'tcx>, ty::CrateAnalysis) -> R + -> Result + where F: for<'a> FnOnce(TyCtxt<'a, 'tcx, 'tcx>, + ty::CrateAnalysis<'tcx>, + IncrementalHashesMap, + CompileResult) -> R { + macro_rules! try_with_f { + ($e: expr, ($t: expr, $a: expr, $h: expr)) => { + match $e { + Ok(x) => x, + Err(x) => { + f($t, $a, $h, Err(x)); + return Err(x); + } + } + } + } + let time_passes = sess.time_passes(); - let krate = hir_map.krate(); - time(time_passes, - "external crate/lib resolution", - || LocalCrateReader::new(sess, cstore, &hir_map).read_crates(krate)); - - let lang_items = time(time_passes, - "language item collection", - || middle::lang_items::collect_language_items(&sess, &hir_map)); - - let resolve::CrateMap { - def_map, - freevars, - export_map, - trait_map, - external_exports, - glob_map, - } = time(time_passes, - "resolution", - || resolve::resolve_crate(sess, &hir_map, make_glob_map)); + let lang_items = time(time_passes, "language item collection", || { + sess.track_errors(|| { + middle::lang_items::collect_language_items(&sess, &hir_map) + }) + })?; let named_region_map = time(time_passes, "lifetime resolution", - || middle::resolve_lifetime::krate(sess, krate, &def_map.borrow())); + || middle::resolve_lifetime::krate(sess, &hir_map))?; time(time_passes, "looking for entry point", || middle::entry::find_entry_point(sess, &hir_map)); sess.plugin_registrar_fn.set(time(time_passes, "looking for plugin registrar", || { - plugin::build::find_plugin_registrar(sess.diagnostic(), krate) + plugin::build::find_plugin_registrar(sess.diagnostic(), &hir_map) })); + sess.derive_registrar_fn.set(derive_registrar::find(&hir_map)); let region_map = time(time_passes, "region resolution", - || middle::region::resolve_crate(sess, krate)); + || middle::region::resolve_crate(sess, &hir_map)); time(time_passes, "loop checking", - || middle::check_loop::check_crate(sess, krate)); + || loops::check_crate(sess, &hir_map)); time(time_passes, - "static item recursion checking", - || middle::check_static_recursion::check_crate(sess, krate, &def_map.borrow(), &hir_map)); - - ty::ctxt::create_and_enter(sess, - arenas, - def_map, - named_region_map, - hir_map, - freevars, - region_map, - lang_items, - stability::Index::new(krate), - |tcx| { - // passes are timed inside typeck - typeck::check_crate(tcx, trait_map); - - time(time_passes, - "const checking", - || middle::check_const::check_crate(tcx)); - - let access_levels = - time(time_passes, "privacy checking", || { - rustc_privacy::check_crate(tcx, - &export_map, - external_exports) - }); - - // Do not move this check past lint - time(time_passes, "stability index", || { - tcx.stability.borrow_mut().build(tcx, krate, &access_levels) - }); - - time(time_passes, - "intrinsic checking", - || middle::intrinsicck::check_crate(tcx)); - - time(time_passes, - "effect checking", - || middle::effect::check_crate(tcx)); - - time(time_passes, - "match checking", - || middle::check_match::check_crate(tcx)); - - let mir_map = - time(time_passes, - "MIR dump", - || mir::mir_map::build_mir_for_crate(tcx)); - - time(time_passes, - "liveness checking", - || middle::liveness::check_crate(tcx)); - - time(time_passes, - "borrow checking", - || borrowck::check_crate(tcx)); - - time(time_passes, - "rvalue checking", - || middle::check_rvalues::check_crate(tcx)); - - // Avoid overwhelming user with errors if type checking failed. - // I'm not sure how helpful this is, to be honest, but it avoids - // a - // lot of annoying errors in the compile-fail tests (basically, - // lint warnings and so on -- kindck used to do this abort, but - // kindck is gone now). -nmatsakis - tcx.sess.abort_if_errors(); - - let reachable_map = - time(time_passes, - "reachability checking", - || reachable::find_reachable(tcx, &access_levels)); - - time(time_passes, "death checking", || { - middle::dead::check_crate(tcx, &access_levels); - }); - - let ref lib_features_used = - time(time_passes, - "stability checking", - || stability::check_unstable_api_usage(tcx)); - - time(time_passes, "unused lib feature checking", || { - stability::check_unused_or_stable_features(&tcx.sess, - lib_features_used) - }); - - time(time_passes, - "lint checking", - || lint::check_crate(tcx, &access_levels)); - - // The above three passes generate errors w/o aborting - tcx.sess.abort_if_errors(); - - f(tcx, - mir_map, - ty::CrateAnalysis { - export_map: export_map, - access_levels: access_levels, - reachable: reachable_map, - name: name, - glob_map: glob_map, - }) - }) + "static item recursion checking", + || static_recursion::check_crate(sess, &hir_map))?; + + let index = stability::Index::new(&hir_map); + + TyCtxt::create_and_enter(sess, + arenas, + resolutions.trait_map, + named_region_map, + hir_map, + resolutions.freevars, + resolutions.maybe_unused_trait_imports, + region_map, + lang_items, + index, + name, + |tcx| { + let incremental_hashes_map = + time(time_passes, + "compute_incremental_hashes_map", + || rustc_incremental::compute_incremental_hashes_map(tcx)); + time(time_passes, + "load_dep_graph", + || rustc_incremental::load_dep_graph(tcx, &incremental_hashes_map)); + + time(time_passes, "stability index", || { + tcx.stability.borrow_mut().build(tcx) + }); + + time(time_passes, + "stability checking", + || stability::check_unstable_api_usage(tcx)); + + // passes are timed inside typeck + analysis.hir_ty_to_ty = + try_with_f!(typeck::check_crate(tcx), (tcx, analysis, incremental_hashes_map)); + + time(time_passes, + "const checking", + || consts::check_crate(tcx)); + + analysis.access_levels = + time(time_passes, "privacy checking", || { + rustc_privacy::check_crate(tcx, &analysis.export_map) + }); + + time(time_passes, + "intrinsic checking", + || middle::intrinsicck::check_crate(tcx)); + + time(time_passes, + "effect checking", + || middle::effect::check_crate(tcx)); + + time(time_passes, + "match checking", + || check_match::check_crate(tcx)); + + // this must run before MIR dump, because + // "not all control paths return a value" is reported here. + // + // maybe move the check to a MIR pass? + time(time_passes, + "liveness checking", + || middle::liveness::check_crate(tcx)); + + time(time_passes, + "rvalue checking", + || rvalues::check_crate(tcx)); + + time(time_passes, + "MIR dump", + || mir::mir_map::build_mir_for_crate(tcx)); + + time(time_passes, "MIR cleanup and validation", || { + let mut passes = sess.mir_passes.borrow_mut(); + // Push all the built-in validation passes. + // NB: if you’re adding an *optimisation* it ought to go to another set of passes + // in stage 4 below. + passes.push_hook(box mir::transform::dump_mir::DumpMir); + passes.push_pass(box mir::transform::simplify::SimplifyCfg::new("initial")); + passes.push_pass( + box mir::transform::qualify_consts::QualifyAndPromoteConstants::default()); + passes.push_pass(box mir::transform::type_check::TypeckMir); + passes.push_pass( + box mir::transform::simplify_branches::SimplifyBranches::new("initial")); + passes.push_pass(box mir::transform::simplify::SimplifyCfg::new("qualify-consts")); + // And run everything. + passes.run_passes(tcx); + }); + + time(time_passes, + "borrow checking", + || borrowck::check_crate(tcx)); + + // Avoid overwhelming user with errors if type checking failed. + // I'm not sure how helpful this is, to be honest, but it avoids + // a + // lot of annoying errors in the compile-fail tests (basically, + // lint warnings and so on -- kindck used to do this abort, but + // kindck is gone now). -nmatsakis + if sess.err_count() > 0 { + return Ok(f(tcx, analysis, incremental_hashes_map, Err(sess.err_count()))); + } + + analysis.reachable = + time(time_passes, + "reachability checking", + || reachable::find_reachable(tcx, &analysis.access_levels)); + + time(time_passes, "death checking", || { + middle::dead::check_crate(tcx, &analysis.access_levels); + }); + + time(time_passes, "unused lib feature checking", || { + stability::check_unused_or_stable_features(tcx, &analysis.access_levels) + }); + + time(time_passes, + "lint checking", + || lint::check_crate(tcx, &analysis.access_levels)); + + // The above three passes generate errors w/o aborting + if sess.err_count() > 0 { + return Ok(f(tcx, analysis, incremental_hashes_map, Err(sess.err_count()))); + } + + Ok(f(tcx, analysis, incremental_hashes_map, Ok(()))) + }) } /// Run the translation phase to LLVM, after which the AST and analysis can -/// be discarded. -pub fn phase_4_translate_to_llvm<'tcx>(tcx: &ty::ctxt<'tcx>, - mut mir_map: MirMap<'tcx>, - analysis: ty::CrateAnalysis) - -> trans::CrateTranslation { +pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + analysis: ty::CrateAnalysis, + incremental_hashes_map: &IncrementalHashesMap) + -> trans::CrateTranslation { let time_passes = tcx.sess.time_passes(); time(time_passes, "resolving dependency formats", || dependency_format::calculate(&tcx.sess)); + // Run the passes that transform the MIR into a more suitable form for translation to LLVM + // code. + time(time_passes, "MIR optimisations", || { + let mut passes = ::rustc::mir::transform::Passes::new(); + passes.push_hook(box mir::transform::dump_mir::DumpMir); + passes.push_pass(box mir::transform::no_landing_pads::NoLandingPads); + passes.push_pass(box mir::transform::simplify::SimplifyCfg::new("no-landing-pads")); + + // From here on out, regions are gone. + passes.push_pass(box mir::transform::erase_regions::EraseRegions); + + passes.push_pass(box mir::transform::add_call_guards::AddCallGuards); + passes.push_pass(box borrowck::ElaborateDrops); + passes.push_pass(box mir::transform::no_landing_pads::NoLandingPads); + passes.push_pass(box mir::transform::simplify::SimplifyCfg::new("elaborate-drops")); + + // No lifetime analysis based on borrowing can be done from here on out. + passes.push_pass(box mir::transform::instcombine::InstCombine::new()); + passes.push_pass(box mir::transform::deaggregator::Deaggregator); + passes.push_pass(box mir::transform::copy_prop::CopyPropagation); + + passes.push_pass(box mir::transform::simplify::SimplifyLocals); + passes.push_pass(box mir::transform::add_call_guards::AddCallGuards); + passes.push_pass(box mir::transform::dump_mir::Marker("PreTrans")); + + passes.run_passes(tcx); + }); + + let translation = + time(time_passes, + "translation", + move || trans::trans_crate(tcx, analysis, &incremental_hashes_map)); + time(time_passes, - "erasing regions from MIR", - || mir::transform::erase_regions::erase_regions(tcx, &mut mir_map)); + "assert dep graph", + || rustc_incremental::assert_dep_graph(tcx)); - // Option dance to work around the lack of stack once closures. time(time_passes, - "translation", - move || trans::trans_crate(tcx, &mir_map, analysis)) + "serialize dep graph", + || rustc_incremental::save_dep_graph(tcx, + &incremental_hashes_map, + translation.link.crate_hash)); + translation } /// Run LLVM itself, producing a bitcode file, assembly file or object file /// as a side effect. pub fn phase_5_run_llvm_passes(sess: &Session, trans: &trans::CrateTranslation, - outputs: &OutputFilenames) { - if sess.opts.cg.no_integrated_as { - let mut map = HashMap::new(); - map.insert(OutputType::Assembly, None); + outputs: &OutputFilenames) -> CompileResult { + if sess.opts.cg.no_integrated_as || + (sess.target.target.options.no_integrated_as && + (outputs.outputs.contains_key(&OutputType::Object) || + outputs.outputs.contains_key(&OutputType::Exe))) + { + let output_types = OutputTypes::new(&[(OutputType::Assembly, None)]); time(sess.time_passes(), "LLVM passes", - || write::run_passes(sess, trans, &map, outputs)); + || write::run_passes(sess, trans, &output_types, outputs)); write::run_assembler(sess, outputs); + // HACK the linker expects the object file to be named foo.0.o but + // `run_assembler` produces an object named just foo.o. Rename it if we + // are going to build an executable + if sess.opts.output_types.contains_key(&OutputType::Exe) { + let f = outputs.path(OutputType::Object); + fs::copy(&f, + f.with_file_name(format!("{}.0.o", + f.file_stem().unwrap().to_string_lossy()))).unwrap(); + fs::remove_file(f).unwrap(); + } + // Remove assembly source, unless --save-temps was specified if !sess.opts.cg.save_temps { - fs::remove_file(&outputs.temp_path(OutputType::Assembly)).unwrap(); + fs::remove_file(&outputs.temp_path(OutputType::Assembly, None)).unwrap(); } } else { time(sess.time_passes(), @@ -899,7 +1083,15 @@ pub fn phase_5_run_llvm_passes(sess: &Session, || write::run_passes(sess, trans, &sess.opts.output_types, outputs)); } - sess.abort_if_errors(); + time(sess.time_passes(), + "serialize work products", + move || rustc_incremental::save_work_products(sess)); + + if sess.err_count() > 0 { + Err(sess.err_count()) + } else { + Ok(()) + } } /// Run the linker on any artifacts that resulted from the LLVM run. @@ -909,7 +1101,7 @@ pub fn phase_6_link_output(sess: &Session, outputs: &OutputFilenames) { time(sess.time_passes(), "linking", - || link::link_binary(sess, trans, outputs, &trans.link.crate_name)); + || link::link_binary(sess, trans, outputs, &trans.link.crate_name.as_str())); } fn escape_dep_filename(filename: &str) -> String { @@ -918,14 +1110,14 @@ fn escape_dep_filename(filename: &str) -> String { filename.replace(" ", "\\ ") } -fn write_out_deps(sess: &Session, outputs: &OutputFilenames, id: &str) { +fn write_out_deps(sess: &Session, outputs: &OutputFilenames, crate_name: &str) { let mut out_filenames = Vec::new(); for output_type in sess.opts.output_types.keys() { let file = outputs.path(*output_type); match *output_type { OutputType::Exe => { for output in sess.crate_types.borrow().iter() { - let p = link::filename_for_input(sess, *output, id, outputs); + let p = link::filename_for_input(sess, *output, crate_name, outputs); out_filenames.push(p); } } @@ -953,16 +1145,16 @@ fn write_out_deps(sess: &Session, outputs: &OutputFilenames, id: &str) { .filter(|fmap| !fmap.is_imported()) .map(|fmap| escape_dep_filename(&fmap.name)) .collect(); - let mut file = try!(fs::File::create(&deps_filename)); + let mut file = fs::File::create(&deps_filename)?; for path in &out_filenames { - try!(write!(file, "{}: {}\n\n", path.display(), files.join(" "))); + write!(file, "{}: {}\n\n", path.display(), files.join(" "))?; } // Emit a fake target for each input file to the compilation. This // prevents `make` from spitting out an error if a file is later // deleted. For more info see #28735 for path in files { - try!(writeln!(file, "{}:", path)); + writeln!(file, "{}:", path)?; } Ok(()) })(); @@ -987,15 +1179,24 @@ pub fn collect_crate_types(session: &Session, attrs: &[ast::Attribute]) -> Vec { Some(config::CrateTypeRlib) } + Some(ref n) if *n == "metadata" => { + Some(config::CrateTypeMetadata) + } Some(ref n) if *n == "dylib" => { Some(config::CrateTypeDylib) } + Some(ref n) if *n == "cdylib" => { + Some(config::CrateTypeCdylib) + } Some(ref n) if *n == "lib" => { Some(config::default_lib_output()) } Some(ref n) if *n == "staticlib" => { Some(config::CrateTypeStaticlib) } + Some(ref n) if *n == "proc-macro" => { + Some(config::CrateTypeProcMacro) + } Some(ref n) if *n == "bin" => Some(config::CrateTypeExecutable), Some(_) => { session.add_lint(lint::builtin::UNKNOWN_CRATE_TYPES, @@ -1051,8 +1252,42 @@ pub fn collect_crate_types(session: &Session, attrs: &[ast::Attribute]) -> Vec Vec { - session.opts.cg.metadata.clone() +pub fn compute_crate_disambiguator(session: &Session) -> String { + use std::hash::Hasher; + + // The crate_disambiguator is a 128 bit hash. The disambiguator is fed + // into various other hashes quite a bit (symbol hashes, incr. comp. hashes, + // debuginfo type IDs, etc), so we don't want it to be too wide. 128 bits + // should still be safe enough to avoid collisions in practice. + // FIXME(mw): It seems that the crate_disambiguator is used everywhere as + // a hex-string instead of raw bytes. We should really use the + // smaller representation. + let mut hasher = ArchIndependentHasher::new(Blake2bHasher::new(128 / 8, &[])); + + let mut metadata = session.opts.cg.metadata.clone(); + // We don't want the crate_disambiguator to dependent on the order + // -C metadata arguments, so sort them: + metadata.sort(); + // Every distinct -C metadata value is only incorporated once: + metadata.dedup(); + + hasher.write(b"metadata"); + for s in &metadata { + // Also incorporate the length of a metadata string, so that we generate + // different values for `-Cmetadata=ab -Cmetadata=c` and + // `-Cmetadata=a -Cmetadata=bc` + hasher.write_usize(s.len()); + hasher.write(s.as_bytes()); + } + + let mut hash_state = hasher.into_inner(); + let hash_bytes = hash_state.finalize(); + + // If this is an executable, add a special suffix, so that we don't get + // symbol conflicts when linking against a library of the same name. + let is_exe = session.crate_types.borrow().contains(&config::CrateTypeExecutable); + + format!("{:x}{}", FmtWrap(hash_bytes), if is_exe { "-exe" } else {""}) } pub fn build_output_filenames(input: &Input, diff --git a/src/librustc_driver/lib.rs b/src/librustc_driver/lib.rs index 31151e10a5a55..f84622c2f0285 100644 --- a/src/librustc_driver/lib.rs +++ b/src/librustc_driver/lib.rs @@ -21,6 +21,7 @@ #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] #![feature(box_syntax)] #![feature(libc)] @@ -38,14 +39,18 @@ extern crate libc; extern crate rustc; extern crate rustc_back; extern crate rustc_borrowck; +extern crate rustc_const_eval; +extern crate rustc_data_structures; +extern crate rustc_errors as errors; extern crate rustc_passes; -extern crate rustc_front; extern crate rustc_lint; extern crate rustc_plugin; extern crate rustc_privacy; +extern crate rustc_incremental; extern crate rustc_metadata; extern crate rustc_mir; extern crate rustc_resolve; +extern crate rustc_save_analysis; extern crate rustc_trans; extern crate rustc_typeck; extern crate serialize; @@ -55,22 +60,28 @@ extern crate log; #[macro_use] extern crate syntax; extern crate syntax_ext; +extern crate syntax_pos; use driver::CompileController; use pretty::{PpMode, UserIdentifiedItem}; use rustc_resolve as resolve; +use rustc_save_analysis as save; use rustc_trans::back::link; -use rustc_trans::save; -use rustc::session::{config, Session, build_session}; +use rustc_trans::back::write::{create_target_machine, RELOC_MODEL_ARGS, CODE_GEN_MODEL_ARGS}; +use rustc::dep_graph::DepGraph; +use rustc::session::{self, config, Session, build_session, CompileResult}; use rustc::session::config::{Input, PrintRequest, OutputType, ErrorOutputType}; -use rustc::middle::cstore::CrateStore; +use rustc::session::config::nightly_options; +use rustc::session::{early_error, early_warn}; use rustc::lint::Lint; use rustc::lint; -use rustc_metadata::loader; +use rustc_metadata::locator; use rustc_metadata::cstore::CStore; use rustc::util::common::time; +use serialize::json::ToJson; + use std::cmp::max; use std::cmp::Ordering::Equal; use std::default::Default; @@ -84,14 +95,11 @@ use std::str; use std::sync::{Arc, Mutex}; use std::thread; -use rustc::session::early_error; - use syntax::ast; -use syntax::parse; -use syntax::errors; -use syntax::errors::emitter::Emitter; -use syntax::diagnostics; -use syntax::parse::token; +use syntax::codemap::{CodeMap, FileLoader, RealFileLoader}; +use syntax::feature_gate::{GatedCfg, UnstableFeatures}; +use syntax::parse::{self, PResult}; +use syntax_pos::{DUMMY_SP, MultiSpan}; #[cfg(test)] pub mod test; @@ -99,72 +107,119 @@ pub mod test; pub mod driver; pub mod pretty; pub mod target_features; - +mod derive_registrar; const BUG_REPORT_URL: &'static str = "https://github.com/rust-lang/rust/blob/master/CONTRIBUTING.\ md#bug-reports"; -pub fn run(args: Vec) -> isize { - monitor(move || run_compiler(&args, &mut RustcDefaultCalls)); +#[inline] +fn abort_msg(err_count: usize) -> String { + match err_count { + 0 => "aborting with no errors (maybe a bug?)".to_owned(), + 1 => "aborting due to previous error".to_owned(), + e => format!("aborting due to {} previous errors", e), + } +} + +pub fn abort_on_err(result: Result, sess: &Session) -> T { + match result { + Err(err_count) => { + sess.fatal(&abort_msg(err_count)); + } + Ok(x) => x, + } +} + +pub fn run(run_compiler: F) -> isize + where F: FnOnce() -> (CompileResult, Option) + Send + 'static +{ + monitor(move || { + let (result, session) = run_compiler(); + if let Err(err_count) = result { + if err_count > 0 { + match session { + Some(sess) => sess.fatal(&abort_msg(err_count)), + None => { + let emitter = + errors::emitter::EmitterWriter::stderr(errors::ColorConfig::Auto, None); + let handler = errors::Handler::with_emitter(true, false, Box::new(emitter)); + handler.emit(&MultiSpan::new(), + &abort_msg(err_count), + errors::Level::Fatal); + exit_on_err(); + } + } + } + } + }); 0 } // Parse args and run the compiler. This is the primary entry point for rustc. // See comments on CompilerCalls below for details about the callbacks argument. -pub fn run_compiler<'a>(args: &[String], callbacks: &mut CompilerCalls<'a>) { - macro_rules! do_or_return {($expr: expr) => { +// The FileLoader provides a way to load files from sources other than the file system. +pub fn run_compiler<'a>(args: &[String], + callbacks: &mut CompilerCalls<'a>, + file_loader: Option>, + emitter_dest: Option>) + -> (CompileResult, Option) +{ + macro_rules! do_or_return {($expr: expr, $sess: expr) => { match $expr { - Compilation::Stop => return, + Compilation::Stop => return (Ok(()), $sess), Compilation::Continue => {} } }} - let matches = match handle_options(args.to_vec()) { + let matches = match handle_options(args) { Some(matches) => matches, - None => return, + None => return (Ok(()), None), }; - let sopts = config::build_session_options(&matches); + let (sopts, cfg) = config::build_session_options_and_crate_config(&matches); + + if sopts.debugging_opts.debug_llvm { + unsafe { llvm::LLVMRustSetDebug(1); } + } let descriptions = diagnostics_registry(); - do_or_return!(callbacks.early_callback(&matches, &descriptions, sopts.error_format)); + do_or_return!(callbacks.early_callback(&matches, + &sopts, + &cfg, + &descriptions, + sopts.error_format), + None); let (odir, ofile) = make_output(&matches); let (input, input_file_path) = match make_input(&matches.free) { Some((input, input_file_path)) => callbacks.some_input(input, input_file_path), - None => match callbacks.no_input(&matches, &sopts, &odir, &ofile, &descriptions) { + None => match callbacks.no_input(&matches, &sopts, &cfg, &odir, &ofile, &descriptions) { Some((input, input_file_path)) => (input, input_file_path), - None => return, + None => return (Ok(()), None), }, }; - let cstore = Rc::new(CStore::new(token::get_ident_interner())); - let sess = build_session(sopts, input_file_path, descriptions, - cstore.clone()); + let dep_graph = DepGraph::new(sopts.build_dep_graph()); + let cstore = Rc::new(CStore::new(&dep_graph)); + + let loader = file_loader.unwrap_or(box RealFileLoader); + let codemap = Rc::new(CodeMap::with_file_loader(loader)); + let mut sess = session::build_session_with_codemap( + sopts, &dep_graph, input_file_path, descriptions, cstore.clone(), codemap, emitter_dest, + ); rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess)); - let mut cfg = config::build_configuration(&sess); - target_features::add_configuration(&mut cfg, &sess); - do_or_return!(callbacks.late_callback(&matches, &sess, &input, &odir, &ofile)); + let mut cfg = config::build_configuration(&sess, cfg); + target_features::add_configuration(&mut cfg, &sess); + sess.parse_sess.config = cfg; - // It is somewhat unfortunate that this is hardwired in - this is forced by - // the fact that pretty_print_input requires the session by value. - let pretty = callbacks.parse_pretty(&sess, &matches); - match pretty { - Some((ppm, opt_uii)) => { - pretty::pretty_print_input(sess, &cstore, cfg, &input, ppm, opt_uii, ofile); - return; - } - None => { - // continue - } - } + do_or_return!(callbacks.late_callback(&matches, &sess, &input, &odir, &ofile), Some(sess)); let plugins = sess.opts.debugging_opts.extra_plugins.clone(); - let control = callbacks.build_controller(&sess); - driver::compile_input(sess, &cstore, cfg, &input, &odir, &ofile, - Some(plugins), control); + let control = callbacks.build_controller(&sess, &matches); + (driver::compile_input(&sess, &cstore, &input, &odir, &ofile, Some(plugins), &control), + Some(sess)) } // Extract output directory and file from matches. @@ -181,7 +236,8 @@ fn make_input(free_matches: &[String]) -> Option<(Input, Option)> { if ifile == "-" { let mut src = String::new(); io::stdin().read_to_string(&mut src).unwrap(); - Some((Input::Str(src), None)) + Some((Input::Str { name: driver::anon_src(), input: src }, + None)) } else { Some((Input::File(PathBuf::from(ifile)), Some(PathBuf::from(ifile)))) @@ -191,6 +247,27 @@ fn make_input(free_matches: &[String]) -> Option<(Input, Option)> { } } +fn parse_pretty(sess: &Session, + matches: &getopts::Matches) + -> Option<(PpMode, Option)> { + let pretty = if sess.opts.debugging_opts.unstable_options { + matches.opt_default("pretty", "normal").map(|a| { + // stable pretty-print variants only + pretty::parse_pretty(sess, &a, false) + }) + } else { + None + }; + if pretty.is_none() && sess.unstable_options() { + matches.opt_str("unpretty").map(|a| { + // extended with unstable pretty-print variants + pretty::parse_pretty(sess, &a, true) + }) + } else { + pretty + } +} + // Whether to stop or continue compilation. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub enum Compilation { @@ -215,7 +292,9 @@ pub trait CompilerCalls<'a> { // else (e.g., selecting input and output). fn early_callback(&mut self, _: &getopts::Matches, - _: &diagnostics::registry::Registry, + _: &config::Options, + _: &ast::CrateConfig, + _: &errors::registry::Registry, _: ErrorOutputType) -> Compilation { Compilation::Continue @@ -252,78 +331,71 @@ pub trait CompilerCalls<'a> { fn no_input(&mut self, _: &getopts::Matches, _: &config::Options, + _: &ast::CrateConfig, _: &Option, _: &Option, - _: &diagnostics::registry::Registry) + _: &errors::registry::Registry) -> Option<(Input, Option)> { None } - // Parse pretty printing information from the arguments. The implementer can - // choose to ignore this (the default will return None) which will skip pretty - // printing. If you do want to pretty print, it is recommended to use the - // implementation of this method from RustcDefaultCalls. - // FIXME, this is a terrible bit of API. Parsing of pretty printing stuff - // should be done as part of the framework and the implementor should customise - // handling of it. However, that is not possible atm because pretty printing - // essentially goes off and takes another path through the compiler which - // means the session is either moved or not depending on what parse_pretty - // returns (we could fix this by cloning, but it's another hack). The proper - // solution is to handle pretty printing as if it were a compiler extension, - // extending CompileController to make this work (see for example the treatment - // of save-analysis in RustcDefaultCalls::build_controller). - fn parse_pretty(&mut self, - _sess: &Session, - _matches: &getopts::Matches) - -> Option<(PpMode, Option)> { - None - } - // Create a CompilController struct for controlling the behaviour of // compilation. - fn build_controller(&mut self, &Session) -> CompileController<'a>; + fn build_controller(&mut self, &Session, &getopts::Matches) -> CompileController<'a>; } // CompilerCalls instance for a regular rustc build. #[derive(Copy, Clone)] pub struct RustcDefaultCalls; +fn handle_explain(code: &str, + descriptions: &errors::registry::Registry, + output: ErrorOutputType) { + let normalised = if code.starts_with("E") { + code.to_string() + } else { + format!("E{0:0>4}", code) + }; + match descriptions.find_description(&normalised) { + Some(ref description) => { + // Slice off the leading newline and print. + print!("{}", &(&description[1..]).split("\n").map(|x| { + format!("{}\n", if x.starts_with("```") { + "```" + } else { + x + }) + }).collect::()); + } + None => { + early_error(output, &format!("no extended information for {}", code)); + } + } +} + impl<'a> CompilerCalls<'a> for RustcDefaultCalls { fn early_callback(&mut self, matches: &getopts::Matches, - descriptions: &diagnostics::registry::Registry, + _: &config::Options, + _: &ast::CrateConfig, + descriptions: &errors::registry::Registry, output: ErrorOutputType) -> Compilation { - match matches.opt_str("explain") { - Some(ref code) => { - let normalised = if !code.starts_with("E") { - format!("E{0:0>4}", code) - } else { - code.to_string() - }; - match descriptions.find_description(&normalised) { - Some(ref description) => { - // Slice off the leading newline and print. - print!("{}", &description[1..]); - } - None => { - early_error(output, &format!("no extended information for {}", code)); - } - } - return Compilation::Stop; - } - None => (), + if let Some(ref code) = matches.opt_str("explain") { + handle_explain(code, descriptions, output); + return Compilation::Stop; } - return Compilation::Continue; + Compilation::Continue } fn no_input(&mut self, matches: &getopts::Matches, sopts: &config::Options, + cfg: &ast::CrateConfig, odir: &Option, ofile: &Option, - descriptions: &diagnostics::registry::Registry) + descriptions: &errors::registry::Registry) -> Option<(Input, Option)> { match matches.free.len() { 0 => { @@ -333,11 +405,20 @@ impl<'a> CompilerCalls<'a> for RustcDefaultCalls { describe_lints(&ls, false); return None; } - let cstore = Rc::new(CStore::new(token::get_ident_interner())); - let sess = build_session(sopts.clone(), None, descriptions.clone(), - cstore.clone()); + let dep_graph = DepGraph::new(sopts.build_dep_graph()); + let cstore = Rc::new(CStore::new(&dep_graph)); + let mut sess = build_session(sopts.clone(), + &dep_graph, + None, + descriptions.clone(), + cstore.clone()); rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess)); - let should_stop = RustcDefaultCalls::print_crate_info(&sess, None, odir, ofile); + let mut cfg = config::build_configuration(&sess, cfg.clone()); + target_features::add_configuration(&mut cfg, &sess); + sess.parse_sess.config = cfg; + let should_stop = + RustcDefaultCalls::print_crate_info(&sess, None, odir, ofile); + if should_stop == Compilation::Stop { return None; } @@ -346,30 +427,6 @@ impl<'a> CompilerCalls<'a> for RustcDefaultCalls { 1 => panic!("make_input should have provided valid inputs"), _ => early_error(sopts.error_format, "multiple input filenames provided"), } - - None - } - - fn parse_pretty(&mut self, - sess: &Session, - matches: &getopts::Matches) - -> Option<(PpMode, Option)> { - let pretty = if sess.opts.debugging_opts.unstable_options { - matches.opt_default("pretty", "normal").map(|a| { - // stable pretty-print variants only - pretty::parse_pretty(sess, &a, false) - }) - } else { - None - }; - if pretty.is_none() && sess.unstable_options() { - matches.opt_str("unpretty").map(|a| { - // extended with unstable pretty-print variants - pretty::parse_pretty(sess, &a, true) - }) - } else { - pretty - } } fn late_callback(&mut self, @@ -383,37 +440,75 @@ impl<'a> CompilerCalls<'a> for RustcDefaultCalls { .and_then(|| RustcDefaultCalls::list_metadata(sess, matches, input)) } - fn build_controller(&mut self, sess: &Session) -> CompileController<'a> { + fn build_controller(&mut self, + sess: &Session, + matches: &getopts::Matches) + -> CompileController<'a> { let mut control = CompileController::basic(); - if sess.opts.parse_only || sess.opts.debugging_opts.show_span.is_some() || - sess.opts.debugging_opts.ast_json_noexpand { - control.after_parse.stop = Compilation::Stop; + if let Some((ppm, opt_uii)) = parse_pretty(sess, matches) { + if ppm.needs_ast_map(&opt_uii) { + control.after_hir_lowering.stop = Compilation::Stop; + + control.after_parse.callback = box move |state| { + state.krate = Some(pretty::fold_crate(state.krate.take().unwrap(), ppm)); + }; + control.after_hir_lowering.callback = box move |state| { + pretty::print_after_hir_lowering(state.session, + state.ast_map.unwrap(), + state.analysis.unwrap(), + state.resolutions.unwrap(), + state.input, + &state.expanded_crate.take().unwrap(), + state.crate_name.unwrap(), + ppm, + state.arenas.unwrap(), + opt_uii.clone(), + state.out_file); + }; + } else { + control.after_parse.stop = Compilation::Stop; + + control.after_parse.callback = box move |state| { + let krate = pretty::fold_crate(state.krate.take().unwrap(), ppm); + pretty::print_after_parsing(state.session, + state.input, + &krate, + ppm, + state.out_file); + }; + } + + return control; } - if sess.opts.no_analysis || sess.opts.debugging_opts.ast_json { - control.after_write_deps.stop = Compilation::Stop; + if sess.opts.debugging_opts.parse_only || + sess.opts.debugging_opts.show_span.is_some() || + sess.opts.debugging_opts.ast_json_noexpand { + control.after_parse.stop = Compilation::Stop; } - if sess.opts.no_trans { - control.after_analysis.stop = Compilation::Stop; + if sess.opts.debugging_opts.no_analysis || + sess.opts.debugging_opts.ast_json { + control.after_hir_lowering.stop = Compilation::Stop; } if !sess.opts.output_types.keys().any(|&i| i == OutputType::Exe) { control.after_llvm.stop = Compilation::Stop; } - if sess.opts.debugging_opts.save_analysis { + if save_analysis(sess) { control.after_analysis.callback = box |state| { time(state.session.time_passes(), "save analysis", || { save::process_crate(state.tcx.unwrap(), - state.lcx.unwrap(), - state.krate.unwrap(), + state.expanded_crate.unwrap(), state.analysis.unwrap(), state.crate_name.unwrap(), - state.out_dir) + state.out_dir, + save_analysis_format(state.session)) }); }; + control.after_analysis.run_callback_on_error = true; control.make_glob_map = resolve::MakeGlobMap::Yes; } @@ -421,6 +516,24 @@ impl<'a> CompilerCalls<'a> for RustcDefaultCalls { } } +fn save_analysis(sess: &Session) -> bool { + sess.opts.debugging_opts.save_analysis || + sess.opts.debugging_opts.save_analysis_csv || + sess.opts.debugging_opts.save_analysis_api +} + +fn save_analysis_format(sess: &Session) -> save::Format { + if sess.opts.debugging_opts.save_analysis { + save::Format::Json + } else if sess.opts.debugging_opts.save_analysis_csv { + save::Format::Csv + } else if sess.opts.debugging_opts.save_analysis_api { + save::Format::JsonApi + } else { + unreachable!(); + } +} + impl RustcDefaultCalls { pub fn list_metadata(sess: &Session, matches: &getopts::Matches, input: &Input) -> Compilation { let r = matches.opt_strs("Z"); @@ -429,11 +542,10 @@ impl RustcDefaultCalls { &Input::File(ref ifile) => { let path = &(*ifile); let mut v = Vec::new(); - loader::list_file_metadata(&sess.target.target, path, &mut v) - .unwrap(); + locator::list_file_metadata(&sess.target.target, path, &mut v).unwrap(); println!("{}", String::from_utf8(v).unwrap()); } - &Input::Str(_) => { + &Input::Str { .. } => { early_error(ErrorOutputType::default(), "cannot list metadata for stdin"); } } @@ -453,10 +565,28 @@ impl RustcDefaultCalls { return Compilation::Continue; } - let attrs = input.map(|input| parse_crate_attrs(sess, input)); + let attrs = match input { + None => None, + Some(input) => { + let result = parse_crate_attrs(sess, input); + match result { + Ok(attrs) => Some(attrs), + Err(mut parse_error) => { + parse_error.emit(); + return Compilation::Stop; + } + } + } + }; for req in &sess.opts.prints { match *req { + PrintRequest::TargetList => { + let mut targets = rustc_back::target::get_targets().collect::>(); + targets.sort(); + println!("{}", targets.join("\n")); + }, PrintRequest::Sysroot => println!("{}", sess.sysroot().display()), + PrintRequest::TargetSpec => println!("{}", sess.target.target.to_json().pretty()), PrintRequest::FileNames | PrintRequest::CrateName => { let input = match input { @@ -471,8 +601,6 @@ impl RustcDefaultCalls { continue; } let crate_types = driver::collect_crate_types(sess, attrs); - let metadata = driver::collect_crate_metadata(sess, attrs); - *sess.crate_metadata.borrow_mut() = metadata; for &style in &crate_types { let fname = link::filename_for_input(sess, style, &id, &t_outputs); println!("{}", @@ -481,6 +609,55 @@ impl RustcDefaultCalls { .to_string_lossy()); } } + PrintRequest::Cfg => { + let allow_unstable_cfg = UnstableFeatures::from_environment() + .is_nightly_build(); + + let mut cfgs = Vec::new(); + for &(name, ref value) in sess.parse_sess.config.iter() { + let gated_cfg = GatedCfg::gate(&ast::MetaItem { + name: name, + node: ast::MetaItemKind::Word, + span: DUMMY_SP, + }); + if !allow_unstable_cfg && gated_cfg.is_some() { + continue; + } + + cfgs.push(if let &Some(ref value) = value { + format!("{}=\"{}\"", name, value) + } else { + format!("{}", name) + }); + } + + cfgs.sort(); + for cfg in cfgs { + println!("{}", cfg); + } + } + PrintRequest::TargetCPUs => { + let tm = create_target_machine(sess); + unsafe { llvm::LLVMRustPrintTargetCPUs(tm); } + } + PrintRequest::TargetFeatures => { + let tm = create_target_machine(sess); + unsafe { llvm::LLVMRustPrintTargetFeatures(tm); } + } + PrintRequest::RelocationModels => { + println!("Available relocation models:"); + for &(name, _) in RELOC_MODEL_ARGS.iter() { + println!(" {}", name); + } + println!(""); + } + PrintRequest::CodeModels => { + println!("Available code models:"); + for &(name, _) in CODE_GEN_MODEL_ARGS.iter(){ + println!(" {}", name); + } + println!(""); + } } } return Compilation::Stop; @@ -518,6 +695,10 @@ pub fn version(binary: &str, matches: &getopts::Matches) { println!("commit-date: {}", unw(commit_date_str())); println!("host: {}", config::host_triple()); println!("release: {}", unw(release_str())); + unsafe { + println!("LLVM version: {}.{}", + llvm::LLVMRustVersionMajor(), llvm::LLVMRustVersionMinor()); + } } } @@ -591,7 +772,7 @@ Available lint options: let (plugin_groups, builtin_groups): (Vec<_>, _) = lint_store.get_lint_groups() .iter() .cloned() - .partition(|&(_, _, p)| p); + .partition(|&(.., p)| p); let plugin_groups = sort_lint_groups(plugin_groups); let builtin_groups = sort_lint_groups(builtin_groups); @@ -651,7 +832,7 @@ Available lint options: for (name, to) in lints { let name = name.to_lowercase().replace("_", "-"); let desc = to.into_iter() - .map(|x| x.as_str().replace("_", "-")) + .map(|x| x.to_string().replace("_", "-")) .collect::>() .join(", "); println!(" {} {}", padded(&name[..]), desc); @@ -666,7 +847,7 @@ Available lint options: println!("Compiler plugins can provide additional lints and lint groups. To see a \ listing of these, re-run `rustc -W help` with a crate filename."); } - (false, _, _) => panic!("didn't load lint plugins but got them anyway!"), + (false, ..) => panic!("didn't load lint plugins but got them anyway!"), (true, 0, 0) => println!("This crate does not load any lint plugins or lint groups."), (true, l, g) => { if l > 0 { @@ -719,11 +900,34 @@ fn print_flag_list(cmdline_opt: &str, } /// Process command line options. Emits messages as appropriate. If compilation -/// should continue, returns a getopts::Matches object parsed from args, otherwise -/// returns None. -pub fn handle_options(mut args: Vec) -> Option { +/// should continue, returns a getopts::Matches object parsed from args, +/// otherwise returns None. +/// +/// The compiler's handling of options is a little complication as it ties into +/// our stability story, and it's even *more* complicated by historical +/// accidents. The current intention of each compiler option is to have one of +/// three modes: +/// +/// 1. An option is stable and can be used everywhere. +/// 2. An option is unstable, but was historically allowed on the stable +/// channel. +/// 3. An option is unstable, and can only be used on nightly. +/// +/// Like unstable library and language features, however, unstable options have +/// always required a form of "opt in" to indicate that you're using them. This +/// provides the easy ability to scan a code base to check to see if anything +/// unstable is being used. Currently, this "opt in" is the `-Z` "zed" flag. +/// +/// All options behind `-Z` are considered unstable by default. Other top-level +/// options can also be considered unstable, and they were unlocked through the +/// `-Z unstable-options` flag. Note that `-Z` remains to be the root of +/// instability in both cases, though. +/// +/// So with all that in mind, the comments below have some more detail about the +/// contortions done here to get things to work out correctly. +pub fn handle_options(args: &[String]) -> Option { // Throw away the first argument, the name of the binary - let _binary = args.remove(0); + let args = &args[1..]; if args.is_empty() { // user did not write `-v` nor `-Z unstable-options`, so do not @@ -732,67 +936,43 @@ pub fn handle_options(mut args: Vec) -> Option { return None; } - fn allows_unstable_options(matches: &getopts::Matches) -> bool { - let r = matches.opt_strs("Z"); - r.iter().any(|x| *x == "unstable-options") - } - - fn parse_all_options(args: &Vec) -> getopts::Matches { - let all_groups: Vec = config::rustc_optgroups() - .into_iter() - .map(|x| x.opt_group) - .collect(); - match getopts::getopts(&args[..], &all_groups) { - Ok(m) => { - if !allows_unstable_options(&m) { - // If -Z unstable-options was not specified, verify that - // no unstable options were present. - for opt in config::rustc_optgroups().into_iter().filter(|x| !x.is_stable()) { - let opt_name = if !opt.opt_group.long_name.is_empty() { - &opt.opt_group.long_name - } else { - &opt.opt_group.short_name - }; - if m.opt_present(opt_name) { - early_error(ErrorOutputType::default(), - &format!("use of unstable option '{}' requires -Z \ - unstable-options", - opt_name)); - } - } - } - m - } - Err(f) => early_error(ErrorOutputType::default(), &f.to_string()), - } - } - - // As a speed optimization, first try to parse the command-line using just - // the stable options. - let matches = match getopts::getopts(&args[..], &config::optgroups()) { - Ok(ref m) if allows_unstable_options(m) => { - // If -Z unstable-options was specified, redo parsing with the - // unstable options to ensure that unstable options are defined - // in the returned getopts::Matches. - parse_all_options(&args) - } + // Parse with *all* options defined in the compiler, we don't worry about + // option stability here we just want to parse as much as possible. + let all_groups: Vec = config::rustc_optgroups() + .into_iter() + .map(|x| x.opt_group) + .collect(); + let matches = match getopts::getopts(&args[..], &all_groups) { Ok(m) => m, - Err(_) => { - // redo option parsing, including unstable options this time, - // in anticipation that the mishandled option was one of the - // unstable ones. - parse_all_options(&args) - } + Err(f) => early_error(ErrorOutputType::default(), &f.to_string()), }; + // For all options we just parsed, we check a few aspects: + // + // * If the option is stable, we're all good + // * If the option wasn't passed, we're all good + // * If `-Z unstable-options` wasn't passed (and we're not a -Z option + // ourselves), then we require the `-Z unstable-options` flag to unlock + // this option that was passed. + // * If we're a nightly compiler, then unstable options are now unlocked, so + // we're good to go. + // * Otherwise, if we're a truly unstable option then we generate an error + // (unstable option being used on stable) + // * If we're a historically stable-but-should-be-unstable option then we + // emit a warning that we're going to turn this into an error soon. + nightly_options::check_nightly_options(&matches, &config::rustc_optgroups()); + if matches.opt_present("h") || matches.opt_present("help") { + // Only show unstable options in --help if we *really* accept unstable + // options, which catches the case where we got `-Z unstable-options` on + // the stable channel of Rust which was accidentally allowed + // historically. usage(matches.opt_present("verbose"), - allows_unstable_options(&matches)); + nightly_options::is_unstable_enabled(&matches)); return None; } // Don't handle -W help here, because we might first load plugins. - let r = matches.opt_strs("Z"); if r.iter().any(|x| *x == "help") { describe_debug_flags(); @@ -805,6 +985,11 @@ pub fn handle_options(mut args: Vec) -> Option { return None; } + if cg_flags.iter().any(|x| *x == "no-stack-check") { + early_warn(ErrorOutputType::default(), + "the --no-stack-check flag is deprecated and does nothing"); + } + if cg_flags.contains(&"passes=list".to_string()) { unsafe { ::llvm::LLVMRustPrintPasses(); @@ -820,19 +1005,15 @@ pub fn handle_options(mut args: Vec) -> Option { Some(matches) } -fn parse_crate_attrs(sess: &Session, input: &Input) -> Vec { - let result = match *input { +fn parse_crate_attrs<'a>(sess: &'a Session, input: &Input) -> PResult<'a, Vec> { + match *input { Input::File(ref ifile) => { - parse::parse_crate_attrs_from_file(ifile, Vec::new(), &sess.parse_sess) + parse::parse_crate_attrs_from_file(ifile, &sess.parse_sess) } - Input::Str(ref src) => { - parse::parse_crate_attrs_from_source_str(driver::anon_src().to_string(), - src.to_string(), - Vec::new(), - &sess.parse_sess) + Input::Str { ref name, ref input } => { + parse::parse_crate_attrs_from_source_str(name.clone(), input.clone(), &sess.parse_sess) } - }; - result.into_iter().collect() + } } /// Run a procedure which will detect panics in the compiler and print nicer @@ -841,7 +1022,8 @@ fn parse_crate_attrs(sess: &Session, input: &Input) -> Vec { /// The diagnostic emitter yielded to the procedure should be used for reporting /// errors of the compiler. pub fn monitor(f: F) { - const STACK_SIZE: usize = 8 * 1024 * 1024; // 8MB + // Temporarily have stack size set to 16MB to deal with nom-using crates failing + const STACK_SIZE: usize = 16 * 1024 * 1024; // 16MB struct Sink(Arc>>); impl Write for Sink { @@ -864,52 +1046,59 @@ pub fn monitor(f: F) { cfg = cfg.stack_size(STACK_SIZE); } - match cfg.spawn(move || { - io::set_panic(box err); - f() - }) - .unwrap() - .join() { - Ok(()) => { - // fallthrough - } - Err(value) => { - // Thread panicked without emitting a fatal diagnostic - if !value.is::() { - let mut emitter = errors::emitter::BasicEmitter::stderr(errors::ColorConfig::Auto); - - // a .span_bug or .bug call has already printed what - // it wants to print. - if !value.is::() { - emitter.emit(None, "unexpected panic", None, errors::Level::Bug); - } - - let xs = ["the compiler unexpectedly panicked. this is a bug.".to_string(), - format!("we would appreciate a bug report: {}", BUG_REPORT_URL)]; - for note in &xs { - emitter.emit(None, ¬e[..], None, errors::Level::Note) - } - if let None = env::var_os("RUST_BACKTRACE") { - emitter.emit(None, - "run with `RUST_BACKTRACE=1` for a backtrace", - None, - errors::Level::Note); - } + let thread = cfg.spawn(move || { + io::set_panic(Some(box err)); + f() + }); + + if let Err(value) = thread.unwrap().join() { + // Thread panicked without emitting a fatal diagnostic + if !value.is::() { + let emitter = + Box::new(errors::emitter::EmitterWriter::stderr(errors::ColorConfig::Auto, None)); + let handler = errors::Handler::with_emitter(true, false, emitter); + + // a .span_bug or .bug call has already printed what + // it wants to print. + if !value.is::() { + handler.emit(&MultiSpan::new(), + "unexpected panic", + errors::Level::Bug); + } - println!("{}", str::from_utf8(&data.lock().unwrap()).unwrap()); + let xs = ["the compiler unexpectedly panicked. this is a bug.".to_string(), + format!("we would appreciate a bug report: {}", BUG_REPORT_URL)]; + for note in &xs { + handler.emit(&MultiSpan::new(), + ¬e[..], + errors::Level::Note); + } + if match env::var_os("RUST_BACKTRACE") { + Some(val) => &val != "0", + None => false, + } { + handler.emit(&MultiSpan::new(), + "run with `RUST_BACKTRACE=1` for a backtrace", + errors::Level::Note); } - // Panic so the process returns a failure code, but don't pollute the - // output with some unnecessary panic messages, we've already - // printed everything that we needed to. - io::set_panic(box io::sink()); - panic!(); + writeln!(io::stderr(), "{}", str::from_utf8(&data.lock().unwrap()).unwrap()).unwrap(); } + + exit_on_err(); } } -pub fn diagnostics_registry() -> diagnostics::registry::Registry { - use syntax::diagnostics::registry::Registry; +fn exit_on_err() -> ! { + // Panic so the process returns a failure code, but don't pollute the + // output with some unnecessary panic messages, we've already + // printed everything that we needed to. + io::set_panic(Some(box io::sink())); + panic!(); +} + +pub fn diagnostics_registry() -> errors::registry::Registry { + use errors::registry::Registry; let mut all_errors = Vec::new(); all_errors.extend_from_slice(&rustc::DIAGNOSTICS); @@ -918,11 +1107,16 @@ pub fn diagnostics_registry() -> diagnostics::registry::Registry { all_errors.extend_from_slice(&rustc_resolve::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_privacy::DIAGNOSTICS); all_errors.extend_from_slice(&rustc_trans::DIAGNOSTICS); + all_errors.extend_from_slice(&rustc_const_eval::DIAGNOSTICS); + all_errors.extend_from_slice(&rustc_metadata::DIAGNOSTICS); - Registry::new(&*all_errors) + Registry::new(&all_errors) } pub fn main() { - let result = run(env::args().collect()); + let result = run(|| run_compiler(&env::args().collect::>(), + &mut RustcDefaultCalls, + None, + None)); process::exit(result as i32); } diff --git a/src/librustc_driver/pretty.rs b/src/librustc_driver/pretty.rs index ba5ecc22e7474..b055b043723e4 100644 --- a/src/librustc_driver/pretty.rs +++ b/src/librustc_driver/pretty.rs @@ -15,41 +15,42 @@ pub use self::PpSourceMode::*; pub use self::PpMode::*; use self::NodesMatchingUII::*; -use rustc_trans::back::link; +use abort_on_err; +use driver::{self, Resolutions}; -use driver; - -use rustc::middle::ty; -use rustc::middle::cfg; -use rustc::middle::cfg::graphviz::LabelledCFG; +use rustc::ty::{self, TyCtxt}; +use rustc::cfg; +use rustc::cfg::graphviz::LabelledCFG; +use rustc::dep_graph::DepGraph; use rustc::session::Session; use rustc::session::config::Input; use rustc_borrowck as borrowck; use rustc_borrowck::graphviz as borrowck_dot; -use rustc_resolve as resolve; -use rustc_metadata::cstore::CStore; -use syntax::ast; -use syntax::codemap; +use rustc_mir::pretty::write_mir_pretty; +use rustc_mir::graphviz::write_mir_graphviz; + +use syntax::ast::{self, BlockCheckMode}; use syntax::fold::{self, Folder}; use syntax::print::{pp, pprust}; use syntax::print::pprust::PrintState; use syntax::ptr::P; use syntax::util::small_vector::SmallVector; +use syntax_pos; use graphviz as dot; use std::fs::File; use std::io::{self, Write}; +use std::iter; use std::option; -use std::path::PathBuf; +use std::path::Path; use std::str::FromStr; -use rustc::front::map as hir_map; -use rustc::front::map::{blocks, NodePrinter}; -use rustc_front::hir; -use rustc_front::lowering::{lower_crate, LoweringContext}; -use rustc_front::print::pprust as pprust_hir; +use rustc::hir::map as hir_map; +use rustc::hir::map::{blocks, NodePrinter}; +use rustc::hir; +use rustc::hir::print as pprust_hir; #[derive(Copy, Clone, PartialEq, Debug)] pub enum PpSourceMode { @@ -76,6 +77,34 @@ pub enum PpMode { PpmSource(PpSourceMode), PpmHir(PpSourceMode), PpmFlowGraph(PpFlowGraphMode), + PpmMir, + PpmMirCFG, +} + +impl PpMode { + pub fn needs_ast_map(&self, opt_uii: &Option) -> bool { + match *self { + PpmSource(PpmNormal) | + PpmSource(PpmEveryBodyLoops) | + PpmSource(PpmIdentified) => opt_uii.is_some(), + + PpmSource(PpmExpanded) | + PpmSource(PpmExpandedIdentified) | + PpmSource(PpmExpandedHygiene) | + PpmHir(_) | + PpmMir | + PpmMirCFG | + PpmFlowGraph(_) => true, + PpmSource(PpmTyped) => panic!("invalid state"), + } + } + + pub fn needs_analysis(&self) -> bool { + match *self { + PpmMir | PpmMirCFG | PpmFlowGraph(_) => true, + _ => false, + } + } } pub fn parse_pretty(sess: &Session, @@ -95,6 +124,8 @@ pub fn parse_pretty(sess: &Session, ("hir", true) => PpmHir(PpmNormal), ("hir,identified", true) => PpmHir(PpmIdentified), ("hir,typed", true) => PpmHir(PpmTyped), + ("mir", true) => PpmMir, + ("mir-cfg", true) => PpmMirCFG, ("flowgraph", true) => PpmFlowGraph(PpFlowGraphMode::Default), ("flowgraph,unlabelled", true) => PpmFlowGraph(PpFlowGraphMode::UnlabelledEdges), _ => { @@ -102,7 +133,7 @@ pub fn parse_pretty(sess: &Session, sess.fatal(&format!("argument to `unpretty` must be one of `normal`, \ `expanded`, `flowgraph[,unlabelled]=`, \ `identified`, `expanded,identified`, `everybody_loops`, \ - `hir`, `hir,identified`, or `hir,typed`; got {}", + `hir`, `hir,identified`, `hir,typed`, or `mir`; got {}", name)); } else { sess.fatal(&format!("argument to `pretty` must be one of `normal`, `expanded`, \ @@ -134,7 +165,7 @@ impl PpSourceMode { /// Constructs a `PrinterSupport` object and passes it to `f`. fn call_with_pp_support<'tcx, A, B, F>(&self, sess: &'tcx Session, - ast_map: Option>, + ast_map: Option<&hir_map::Map<'tcx>>, payload: B, f: F) -> A @@ -144,7 +175,7 @@ impl PpSourceMode { PpmNormal | PpmEveryBodyLoops | PpmExpanded => { let annotation = NoAnn { sess: sess, - ast_map: ast_map, + ast_map: ast_map.map(|m| m.clone()), }; f(&annotation, payload) } @@ -152,14 +183,14 @@ impl PpSourceMode { PpmIdentified | PpmExpandedIdentified => { let annotation = IdentifiedAnnotation { sess: sess, - ast_map: ast_map, + ast_map: ast_map.map(|m| m.clone()), }; f(&annotation, payload) } PpmExpandedHygiene => { let annotation = HygieneAnnotation { sess: sess, - ast_map: ast_map, + ast_map: ast_map.map(|m| m.clone()), }; f(&annotation, payload) } @@ -168,8 +199,9 @@ impl PpSourceMode { } fn call_with_pp_support_hir<'tcx, A, B, F>(&self, sess: &'tcx Session, - cstore: &CStore, ast_map: &hir_map::Map<'tcx>, + analysis: &ty::CrateAnalysis<'tcx>, + resolutions: &Resolutions, arenas: &'tcx ty::CtxtArenas<'tcx>, id: &str, payload: B, @@ -183,7 +215,7 @@ impl PpSourceMode { sess: sess, ast_map: Some(ast_map.clone()), }; - f(&annotation, payload, &ast_map.forest.krate) + f(&annotation, payload, ast_map.forest.krate()) } PpmIdentified => { @@ -191,24 +223,21 @@ impl PpSourceMode { sess: sess, ast_map: Some(ast_map.clone()), }; - f(&annotation, payload, &ast_map.forest.krate) + f(&annotation, payload, ast_map.forest.krate()) } PpmTyped => { - driver::phase_3_run_analysis_passes(sess, - cstore, - ast_map.clone(), - arenas, - id, - resolve::MakeGlobMap::No, - |tcx, _, _| { - let annotation = TypedAnnotation { - tcx: tcx, - }; - let _ignore = tcx.dep_graph.in_ignore(); - f(&annotation, - payload, - &ast_map.forest.krate) - }) + abort_on_err(driver::phase_3_run_analysis_passes(sess, + ast_map.clone(), + analysis.clone(), + resolutions.clone(), + arenas, + id, + |tcx, _, _, _| { + let annotation = TypedAnnotation { tcx: tcx }; + let _ignore = tcx.dep_graph.in_ignore(); + f(&annotation, payload, ast_map.forest.krate()) + }), + sess) } _ => panic!("Should use call_with_pp_support"), } @@ -245,6 +274,17 @@ trait HirPrinterSupport<'ast>: pprust_hir::PpAnn { /// (Rust does not yet support upcasting from a trait object to /// an object for one of its super-traits.) fn pp_ann<'a>(&'a self) -> &'a pprust_hir::PpAnn; + + /// Computes an user-readable representation of a path, if possible. + fn node_path(&self, id: ast::NodeId) -> Option { + self.ast_map().and_then(|map| map.def_path_from_id(id)).map(|path| { + path.data + .into_iter() + .map(|elem| elem.data.to_string()) + .collect::>() + .join("::") + }) + } } struct NoAnn<'ast> { @@ -311,27 +351,28 @@ impl<'ast> pprust::PpAnn for IdentifiedAnnotation<'ast> { } fn post(&self, s: &mut pprust::State, node: pprust::AnnNode) -> io::Result<()> { match node { - pprust::NodeIdent(_) | pprust::NodeName(_) => Ok(()), + pprust::NodeIdent(_) | + pprust::NodeName(_) => Ok(()), pprust::NodeItem(item) => { - try!(pp::space(&mut s.s)); + pp::space(&mut s.s)?; s.synth_comment(item.id.to_string()) } pprust::NodeSubItem(id) => { - try!(pp::space(&mut s.s)); + pp::space(&mut s.s)?; s.synth_comment(id.to_string()) } pprust::NodeBlock(blk) => { - try!(pp::space(&mut s.s)); + pp::space(&mut s.s)?; s.synth_comment(format!("block {}", blk.id)) } pprust::NodeExpr(expr) => { - try!(pp::space(&mut s.s)); - try!(s.synth_comment(expr.id.to_string())); + pp::space(&mut s.s)?; + s.synth_comment(expr.id.to_string())?; s.pclose() } pprust::NodePat(pat) => { - try!(pp::space(&mut s.s)); + pp::space(&mut s.s)?; s.synth_comment(format!("pat {}", pat.id)) } } @@ -363,24 +404,24 @@ impl<'ast> pprust_hir::PpAnn for IdentifiedAnnotation<'ast> { match node { pprust_hir::NodeName(_) => Ok(()), pprust_hir::NodeItem(item) => { - try!(pp::space(&mut s.s)); + pp::space(&mut s.s)?; s.synth_comment(item.id.to_string()) } pprust_hir::NodeSubItem(id) => { - try!(pp::space(&mut s.s)); + pp::space(&mut s.s)?; s.synth_comment(id.to_string()) } pprust_hir::NodeBlock(blk) => { - try!(pp::space(&mut s.s)); + pp::space(&mut s.s)?; s.synth_comment(format!("block {}", blk.id)) } pprust_hir::NodeExpr(expr) => { - try!(pp::space(&mut s.s)); - try!(s.synth_comment(expr.id.to_string())); + pp::space(&mut s.s)?; + s.synth_comment(expr.id.to_string())?; s.pclose() } pprust_hir::NodePat(pat) => { - try!(pp::space(&mut s.s)); + pp::space(&mut s.s)?; s.synth_comment(format!("pat {}", pat.id)) } } @@ -409,15 +450,15 @@ impl<'ast> PrinterSupport<'ast> for HygieneAnnotation<'ast> { impl<'ast> pprust::PpAnn for HygieneAnnotation<'ast> { fn post(&self, s: &mut pprust::State, node: pprust::AnnNode) -> io::Result<()> { match node { - pprust::NodeIdent(&ast::Ident { name: ast::Name(nm), ctxt }) => { - try!(pp::space(&mut s.s)); + pprust::NodeIdent(&ast::Ident { name, ctxt }) => { + pp::space(&mut s.s)?; // FIXME #16420: this doesn't display the connections // between syntax contexts - s.synth_comment(format!("{}#{}", nm, ctxt.0)) + s.synth_comment(format!("{}{:?}", name.as_u32(), ctxt)) } - pprust::NodeName(&ast::Name(nm)) => { - try!(pp::space(&mut s.s)); - s.synth_comment(nm.to_string()) + pprust::NodeName(&name) => { + pp::space(&mut s.s)?; + s.synth_comment(name.as_u32().to_string()) } _ => Ok(()), } @@ -426,7 +467,7 @@ impl<'ast> pprust::PpAnn for HygieneAnnotation<'ast> { struct TypedAnnotation<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, } impl<'b, 'tcx> HirPrinterSupport<'tcx> for TypedAnnotation<'b, 'tcx> { @@ -441,6 +482,10 @@ impl<'b, 'tcx> HirPrinterSupport<'tcx> for TypedAnnotation<'b, 'tcx> { fn pp_ann<'a>(&'a self) -> &'a pprust_hir::PpAnn { self } + + fn node_path(&self, id: ast::NodeId) -> Option { + Some(self.tcx.node_path_str(id)) + } } impl<'a, 'tcx> pprust_hir::PpAnn for TypedAnnotation<'a, 'tcx> { @@ -453,10 +498,10 @@ impl<'a, 'tcx> pprust_hir::PpAnn for TypedAnnotation<'a, 'tcx> { fn post(&self, s: &mut pprust_hir::State, node: pprust_hir::AnnNode) -> io::Result<()> { match node { pprust_hir::NodeExpr(expr) => { - try!(pp::space(&mut s.s)); - try!(pp::word(&mut s.s, "as")); - try!(pp::space(&mut s.s)); - try!(pp::word(&mut s.s, &self.tcx.expr_ty(expr).to_string())); + pp::space(&mut s.s)?; + pp::word(&mut s.s, "as")?; + pp::space(&mut s.s)?; + pp::word(&mut s.s, &self.tcx.tables().expr_ty(expr).to_string())?; s.pclose() } _ => Ok(()), @@ -492,6 +537,7 @@ impl FromStr for UserIdentifiedItem { type Err = (); fn from_str(s: &str) -> Result { Ok(s.parse() + .map(ast::NodeId::new) .map(ItemViaNode) .unwrap_or_else(|_| ItemViaPath(s.split("::").map(|s| s.to_string()).collect()))) } @@ -558,36 +604,6 @@ impl UserIdentifiedItem { } } -fn needs_ast_map(ppm: &PpMode, opt_uii: &Option) -> bool { - match *ppm { - PpmSource(PpmNormal) | - PpmSource(PpmEveryBodyLoops) | - PpmSource(PpmIdentified) => opt_uii.is_some(), - - PpmSource(PpmExpanded) | - PpmSource(PpmExpandedIdentified) | - PpmSource(PpmExpandedHygiene) | - PpmHir(_) | - PpmFlowGraph(_) => true, - PpmSource(PpmTyped) => panic!("invalid state"), - } -} - -fn needs_expansion(ppm: &PpMode) -> bool { - match *ppm { - PpmSource(PpmNormal) | - PpmSource(PpmEveryBodyLoops) | - PpmSource(PpmIdentified) => false, - - PpmSource(PpmExpanded) | - PpmSource(PpmExpandedIdentified) | - PpmSource(PpmExpandedHygiene) | - PpmHir(_) | - PpmFlowGraph(_) => true, - PpmSource(PpmTyped) => panic!("invalid state"), - } -} - struct ReplaceBodyWithLoop { within_static_or_const: bool, } @@ -599,23 +615,22 @@ impl ReplaceBodyWithLoop { } impl fold::Folder for ReplaceBodyWithLoop { - fn fold_item_underscore(&mut self, i: ast::Item_) -> ast::Item_ { + fn fold_item_kind(&mut self, i: ast::ItemKind) -> ast::ItemKind { match i { - ast::ItemStatic(..) | ast::ItemConst(..) => { + ast::ItemKind::Static(..) | + ast::ItemKind::Const(..) => { self.within_static_or_const = true; - let ret = fold::noop_fold_item_underscore(i, self); + let ret = fold::noop_fold_item_kind(i, self); self.within_static_or_const = false; return ret; } - _ => { - fold::noop_fold_item_underscore(i, self) - } + _ => fold::noop_fold_item_kind(i, self), } } - fn fold_trait_item(&mut self, i: P) -> SmallVector> { + fn fold_trait_item(&mut self, i: ast::TraitItem) -> SmallVector { match i.node { - ast::ConstTraitItem(..) => { + ast::TraitItemKind::Const(..) => { self.within_static_or_const = true; let ret = fold::noop_fold_trait_item(i, self); self.within_static_or_const = false; @@ -625,7 +640,7 @@ impl fold::Folder for ReplaceBodyWithLoop { } } - fn fold_impl_item(&mut self, i: P) -> SmallVector> { + fn fold_impl_item(&mut self, i: ast::ImplItem) -> SmallVector { match i.node { ast::ImplItemKind::Const(..) => { self.within_static_or_const = true; @@ -640,22 +655,29 @@ impl fold::Folder for ReplaceBodyWithLoop { fn fold_block(&mut self, b: P) -> P { fn expr_to_block(rules: ast::BlockCheckMode, e: Option>) -> P { P(ast::Block { - expr: e, - stmts: vec![], + stmts: e.map(|e| { + ast::Stmt { + id: ast::DUMMY_NODE_ID, + span: e.span, + node: ast::StmtKind::Expr(e), + } + }) + .into_iter() + .collect(), rules: rules, id: ast::DUMMY_NODE_ID, - span: codemap::DUMMY_SP, + span: syntax_pos::DUMMY_SP, }) } if !self.within_static_or_const { - let empty_block = expr_to_block(ast::DefaultBlock, None); + let empty_block = expr_to_block(BlockCheckMode::Default, None); let loop_expr = P(ast::Expr { - node: ast::ExprLoop(empty_block, None), + node: ast::ExprKind::Loop(empty_block, None), id: ast::DUMMY_NODE_ID, - span: codemap::DUMMY_SP, - attrs: None, + span: syntax_pos::DUMMY_SP, + attrs: ast::ThinVec::new(), }); expr_to_block(b.rules, Some(loop_expr)) @@ -672,203 +694,18 @@ impl fold::Folder for ReplaceBodyWithLoop { } } -pub fn pretty_print_input(sess: Session, - cstore: &CStore, - cfg: ast::CrateConfig, - input: &Input, - ppm: PpMode, - opt_uii: Option, - ofile: Option) { - let krate = driver::phase_1_parse_input(&sess, cfg, input); - - let krate = if let PpmSource(PpmEveryBodyLoops) = ppm { - let mut fold = ReplaceBodyWithLoop::new(); - fold.fold_crate(krate) - } else { - krate - }; - - let id = link::find_crate_name(Some(&sess), &krate.attrs, input); - - let is_expanded = needs_expansion(&ppm); - let compute_ast_map = needs_ast_map(&ppm, &opt_uii); - let krate = if compute_ast_map { - match driver::phase_2_configure_and_expand(&sess, &cstore, krate, &id[..], None) { - None => return, - Some(k) => driver::assign_node_ids(&sess, k), - } - } else { - krate - }; - - // There is some twisted, god-forsaken tangle of lifetimes here which makes - // the ordering of stuff super-finicky. - let mut hir_forest; - let lcx = LoweringContext::new(&sess, Some(&krate)); - let arenas = ty::CtxtArenas::new(); - let ast_map = if compute_ast_map { - hir_forest = hir_map::Forest::new(lower_crate(&lcx, &krate)); - let map = driver::make_map(&sess, &mut hir_forest); - Some(map) - } else { - None - }; - - let src_name = driver::source_name(input); - let src = sess.codemap() - .get_filemap(&src_name[..]) - .src - .as_ref() - .unwrap() - .as_bytes() - .to_vec(); - let mut rdr = &src[..]; - - let mut out = Vec::new(); - - match (ppm, opt_uii) { - (PpmSource(s), _) => { - // Silently ignores an identified node. - let out: &mut Write = &mut out; - s.call_with_pp_support(&sess, ast_map, box out, |annotation, out| { - debug!("pretty printing source code {:?}", s); - let sess = annotation.sess(); - pprust::print_crate(sess.codemap(), - sess.diagnostic(), - &krate, - src_name.to_string(), - &mut rdr, - out, - annotation.pp_ann(), - is_expanded) - }) - } - - (PpmHir(s), None) => { - let out: &mut Write = &mut out; - s.call_with_pp_support_hir(&sess, - cstore, - &ast_map.unwrap(), - &arenas, - &id, - box out, - |annotation, out, krate| { - debug!("pretty printing source code {:?}", s); - let sess = annotation.sess(); - pprust_hir::print_crate(sess.codemap(), - sess.diagnostic(), - krate, - src_name.to_string(), - &mut rdr, - out, - annotation.pp_ann(), - is_expanded) - }) - } - - (PpmHir(s), Some(uii)) => { - let out: &mut Write = &mut out; - s.call_with_pp_support_hir(&sess, - cstore, - &ast_map.unwrap(), - &arenas, - &id, - (out,uii), - |annotation, (out,uii), _| { - debug!("pretty printing source code {:?}", s); - let sess = annotation.sess(); - let ast_map = annotation.ast_map().expect("--pretty missing ast_map"); - let mut pp_state = - pprust_hir::State::new_from_input(sess.codemap(), - sess.diagnostic(), - src_name.to_string(), - &mut rdr, - box out, - annotation.pp_ann(), - true, - Some(ast_map.krate())); - for node_id in uii.all_matching_node_ids(ast_map) { - let node = ast_map.get(node_id); - try!(pp_state.print_node(&node)); - try!(pp::space(&mut pp_state.s)); - try!(pp_state.synth_comment(ast_map.path_to_string(node_id))); - try!(pp::hardbreak(&mut pp_state.s)); - } - pp::eof(&mut pp_state.s) - }) - } - - (PpmFlowGraph(mode), opt_uii) => { - debug!("pretty printing flow graph for {:?}", opt_uii); - let uii = opt_uii.unwrap_or_else(|| { - sess.fatal(&format!("`pretty flowgraph=..` needs NodeId (int) or - \ - unique path suffix (b::c::d)")) - - }); - let ast_map = ast_map.expect("--pretty flowgraph missing ast_map"); - let nodeid = uii.to_one_node_id("--pretty", &sess, &ast_map); - - let node = ast_map.find(nodeid).unwrap_or_else(|| { - sess.fatal(&format!("--pretty flowgraph couldn't find id: {}", nodeid)) - }); - - let code = blocks::Code::from_node(node); - let out: &mut Write = &mut out; - match code { - Some(code) => { - let variants = gather_flowgraph_variants(&sess); - driver::phase_3_run_analysis_passes(&sess, - &cstore, - ast_map, - &arenas, - &id, - resolve::MakeGlobMap::No, - |tcx, _, _| { - print_flowgraph(variants, - tcx, - code, - mode, - out) - }) - } - None => { - let message = format!("--pretty=flowgraph needs block, fn, or method; got \ - {:?}", - node); - - // point to what was found, if there's an - // accessible span. - match ast_map.opt_span(nodeid) { - Some(sp) => sess.span_fatal(sp, &message[..]), - None => sess.fatal(&message[..]), - } - } - } - } - } - .unwrap(); - - match ofile { - None => print!("{}", String::from_utf8(out).unwrap()), - Some(p) => { - match File::create(&p) { - Ok(mut w) => w.write_all(&out).unwrap(), - Err(e) => panic!("print-print failed to open {} due to {}", p.display(), e), - } - } - } -} - -fn print_flowgraph(variants: Vec, - tcx: &ty::ctxt, - code: blocks::Code, - mode: PpFlowGraphMode, - mut out: W) - -> io::Result<()> { +fn print_flowgraph<'a, 'tcx, W: Write>(variants: Vec, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + code: blocks::Code<'tcx>, + mode: PpFlowGraphMode, + mut out: W) + -> io::Result<()> { let cfg = match code { - blocks::BlockCode(block) => cfg::CFG::new(tcx, &*block), - blocks::FnLikeCode(fn_like) => cfg::CFG::new(tcx, &*fn_like.body()), + blocks::Code::Expr(expr) => cfg::CFG::new(tcx, expr), + blocks::Code::FnLike(fn_like) => { + let body = tcx.map.expr(fn_like.body()); + cfg::CFG::new(tcx, body) + }, }; let labelled_edges = mode != PpFlowGraphMode::UnlabelledEdges; let lcfg = LabelledCFG { @@ -883,16 +720,14 @@ fn print_flowgraph(variants: Vec, let r = dot::render(&lcfg, &mut out); return expand_err_details(r); } - blocks::BlockCode(_) => { + blocks::Code::Expr(_) => { tcx.sess.err("--pretty flowgraph with -Z flowgraph-print annotations requires \ fn-like node id."); return Ok(()); } - blocks::FnLikeCode(fn_like) => { + blocks::Code::FnLike(fn_like) => { let (bccx, analysis_data) = - borrowck::build_borrowck_dataflow_data_for_fn(tcx, - fn_like.to_fn_parts(), - &cfg); + borrowck::build_borrowck_dataflow_data_for_fn(tcx, fn_like.to_fn_parts(), &cfg); let lcfg = borrowck_dot::DataflowLabeller { inner: lcfg, @@ -912,3 +747,274 @@ fn print_flowgraph(variants: Vec, }) } } + +pub fn fold_crate(krate: ast::Crate, ppm: PpMode) -> ast::Crate { + if let PpmSource(PpmEveryBodyLoops) = ppm { + let mut fold = ReplaceBodyWithLoop::new(); + fold.fold_crate(krate) + } else { + krate + } +} + +fn get_source(input: &Input, sess: &Session) -> (Vec, String) { + let src_name = driver::source_name(input); + let src = sess.codemap() + .get_filemap(&src_name) + .unwrap() + .src + .as_ref() + .unwrap() + .as_bytes() + .to_vec(); + (src, src_name) +} + +fn write_output(out: Vec, ofile: Option<&Path>) { + match ofile { + None => print!("{}", String::from_utf8(out).unwrap()), + Some(p) => { + match File::create(p) { + Ok(mut w) => w.write_all(&out).unwrap(), + Err(e) => panic!("print-print failed to open {} due to {}", p.display(), e), + } + } + } +} + +pub fn print_after_parsing(sess: &Session, + input: &Input, + krate: &ast::Crate, + ppm: PpMode, + ofile: Option<&Path>) { + let dep_graph = DepGraph::new(false); + let _ignore = dep_graph.in_ignore(); + + let (src, src_name) = get_source(input, sess); + + let mut rdr = &*src; + let mut out = Vec::new(); + + if let PpmSource(s) = ppm { + // Silently ignores an identified node. + let out: &mut Write = &mut out; + s.call_with_pp_support(sess, None, box out, |annotation, out| { + debug!("pretty printing source code {:?}", s); + let sess = annotation.sess(); + pprust::print_crate(sess.codemap(), + sess.diagnostic(), + krate, + src_name.to_string(), + &mut rdr, + out, + annotation.pp_ann(), + false) + }) + .unwrap() + } else { + unreachable!(); + }; + + write_output(out, ofile); +} + +pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, + ast_map: &hir_map::Map<'tcx>, + analysis: &ty::CrateAnalysis<'tcx>, + resolutions: &Resolutions, + input: &Input, + krate: &ast::Crate, + crate_name: &str, + ppm: PpMode, + arenas: &'tcx ty::CtxtArenas<'tcx>, + opt_uii: Option, + ofile: Option<&Path>) { + let dep_graph = DepGraph::new(false); + let _ignore = dep_graph.in_ignore(); + + if ppm.needs_analysis() { + print_with_analysis(sess, + ast_map, + analysis, + resolutions, + crate_name, + arenas, + ppm, + opt_uii, + ofile); + return; + } + + let (src, src_name) = get_source(input, sess); + + let mut rdr = &src[..]; + let mut out = Vec::new(); + + match (ppm, opt_uii) { + (PpmSource(s), _) => { + // Silently ignores an identified node. + let out: &mut Write = &mut out; + s.call_with_pp_support(sess, Some(ast_map), box out, |annotation, out| { + debug!("pretty printing source code {:?}", s); + let sess = annotation.sess(); + pprust::print_crate(sess.codemap(), + sess.diagnostic(), + krate, + src_name.to_string(), + &mut rdr, + out, + annotation.pp_ann(), + true) + }) + } + + (PpmHir(s), None) => { + let out: &mut Write = &mut out; + s.call_with_pp_support_hir(sess, + ast_map, + analysis, + resolutions, + arenas, + crate_name, + box out, + |annotation, out, krate| { + debug!("pretty printing source code {:?}", s); + let sess = annotation.sess(); + pprust_hir::print_crate(sess.codemap(), + sess.diagnostic(), + krate, + src_name.to_string(), + &mut rdr, + out, + annotation.pp_ann(), + true) + }) + } + + (PpmHir(s), Some(uii)) => { + let out: &mut Write = &mut out; + s.call_with_pp_support_hir(sess, + ast_map, + analysis, + resolutions, + arenas, + crate_name, + (out, uii), + |annotation, (out, uii), _| { + debug!("pretty printing source code {:?}", s); + let sess = annotation.sess(); + let ast_map = annotation.ast_map().expect("--unpretty missing HIR map"); + let mut pp_state = pprust_hir::State::new_from_input(sess.codemap(), + sess.diagnostic(), + src_name.to_string(), + &mut rdr, + box out, + annotation.pp_ann(), + true, + Some(ast_map.krate())); + for node_id in uii.all_matching_node_ids(ast_map) { + let node = ast_map.get(node_id); + pp_state.print_node(&node)?; + pp::space(&mut pp_state.s)?; + let path = annotation.node_path(node_id) + .expect("--unpretty missing node paths"); + pp_state.synth_comment(path)?; + pp::hardbreak(&mut pp_state.s)?; + } + pp::eof(&mut pp_state.s) + }) + } + _ => unreachable!(), + } + .unwrap(); + + write_output(out, ofile); +} + +// In an ideal world, this would be a public function called by the driver after +// analsysis is performed. However, we want to call `phase_3_run_analysis_passes` +// with a different callback than the standard driver, so that isn't easy. +// Instead, we call that function ourselves. +fn print_with_analysis<'tcx, 'a: 'tcx>(sess: &'a Session, + ast_map: &hir_map::Map<'tcx>, + analysis: &ty::CrateAnalysis<'tcx>, + resolutions: &Resolutions, + crate_name: &str, + arenas: &'tcx ty::CtxtArenas<'tcx>, + ppm: PpMode, + uii: Option, + ofile: Option<&Path>) { + let nodeid = if let Some(uii) = uii { + debug!("pretty printing for {:?}", uii); + Some(uii.to_one_node_id("--unpretty", sess, &ast_map)) + } else { + debug!("pretty printing for whole crate"); + None + }; + + let mut out = Vec::new(); + + abort_on_err(driver::phase_3_run_analysis_passes(sess, + ast_map.clone(), + analysis.clone(), + resolutions.clone(), + arenas, + crate_name, + |tcx, _, _, _| { + match ppm { + PpmMir | PpmMirCFG => { + if let Some(nodeid) = nodeid { + let def_id = tcx.map.local_def_id(nodeid); + match ppm { + PpmMir => write_mir_pretty(tcx, iter::once(def_id), &mut out), + PpmMirCFG => write_mir_graphviz(tcx, iter::once(def_id), &mut out), + _ => unreachable!(), + }?; + } else { + match ppm { + PpmMir => { + write_mir_pretty(tcx, tcx.mir_map.borrow().keys().into_iter(), &mut out) + } + PpmMirCFG => { + write_mir_graphviz(tcx, + tcx.mir_map.borrow().keys().into_iter(), + &mut out) + } + _ => unreachable!(), + }?; + } + Ok(()) + } + PpmFlowGraph(mode) => { + let nodeid = + nodeid.expect("`pretty flowgraph=..` needs NodeId (int) or unique path \ + suffix (b::c::d)"); + let node = tcx.map.find(nodeid).unwrap_or_else(|| { + tcx.sess.fatal(&format!("--pretty flowgraph couldn't find id: {}", nodeid)) + }); + + match blocks::Code::from_node(&tcx.map, nodeid) { + Some(code) => { + let variants = gather_flowgraph_variants(tcx.sess); + + let out: &mut Write = &mut out; + + print_flowgraph(variants, tcx, code, mode, out) + } + None => { + let message = format!("--pretty=flowgraph needs block, fn, or method; \ + got {:?}", + node); + + tcx.sess.span_fatal(tcx.map.span(nodeid), &message) + } + } + } + _ => unreachable!(), + } + }), + sess) + .unwrap(); + + write_output(out, ofile); +} diff --git a/src/librustc_driver/target_features.rs b/src/librustc_driver/target_features.rs index 27ffb595a4051..124e7aafcc515 100644 --- a/src/librustc_driver/target_features.rs +++ b/src/librustc_driver/target_features.rs @@ -8,80 +8,72 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use syntax::{ast, attr}; +use syntax::ast; +use llvm::LLVMRustHasFeature; use rustc::session::Session; -use syntax::parse::token::InternedString; -use syntax::parse::token::intern_and_get_ident as intern; +use rustc_trans::back::write::create_target_machine; +use syntax::feature_gate::UnstableFeatures; +use syntax::symbol::Symbol; +use libc::c_char; + +// WARNING: the features must be known to LLVM or the feature +// detection code will walk past the end of the feature array, +// leading to crashes. + +const ARM_WHITELIST: &'static [&'static str] = &["neon\0", "vfp2\0", "vfp3\0", "vfp4\0"]; + +const X86_WHITELIST: &'static [&'static str] = &["avx\0", "avx2\0", "bmi\0", "bmi2\0", "sse\0", + "sse2\0", "sse3\0", "sse4.1\0", "sse4.2\0", + "ssse3\0", "tbm\0", "lzcnt\0", "popcnt\0", + "sse4a\0"]; /// Add `target_feature = "..."` cfgs for a variety of platform /// specific features (SSE, NEON etc.). /// -/// This uses a scheme similar to that employed by clang: reimplement -/// the target feature knowledge. *Theoretically* we could query LLVM -/// since that has perfect knowledge about what things are enabled in -/// code-generation, however, it is extremely non-obvious how to do -/// this successfully. Each platform defines a subclass of a -/// SubtargetInfo, which knows all this information, but the ways to -/// query them do not seem to be public. +/// This is performed by checking whether a whitelisted set of +/// features is available on the target machine, by querying LLVM. pub fn add_configuration(cfg: &mut ast::CrateConfig, sess: &Session) { - let tf = InternedString::new("target_feature"); - macro_rules! fillout { - ($($func: ident, $name: expr;)*) => {{ - $(if $func(sess) { - cfg.push(attr::mk_name_value_item_str(tf.clone(), intern($name))) - })* - }} - } - fillout! { - has_sse, "sse"; - has_sse2, "sse2"; - has_sse3, "sse3"; - has_ssse3, "ssse3"; - has_sse41, "sse4.1"; - has_sse42, "sse4.2"; - has_avx, "avx"; - has_avx2, "avx2"; - has_neon, "neon"; - has_vfp, "vfp"; + let target_machine = create_target_machine(sess); + + let whitelist = match &*sess.target.target.arch { + "arm" => ARM_WHITELIST, + "x86" | "x86_64" => X86_WHITELIST, + _ => &[], + }; + + let tf = Symbol::intern("target_feature"); + for feat in whitelist { + assert_eq!(feat.chars().last(), Some('\0')); + if unsafe { LLVMRustHasFeature(target_machine, feat.as_ptr() as *const c_char) } { + cfg.insert((tf, Some(Symbol::intern(&feat[..feat.len() - 1])))); + } } -} + let requested_features = sess.opts.cg.target_feature.split(','); + let unstable_options = sess.opts.debugging_opts.unstable_options; + let is_nightly = UnstableFeatures::from_environment().is_nightly_build(); + let found_negative = requested_features.clone().any(|r| r == "-crt-static"); + let found_positive = requested_features.clone().any(|r| r == "+crt-static"); -fn features_contain(sess: &Session, s: &str) -> bool { - sess.target.target.options.features.contains(s) || sess.opts.cg.target_feature.contains(s) -} + // If the target we're compiling for requests a static crt by default, + // then see if the `-crt-static` feature was passed to disable that. + // Otherwise if we don't have a static crt by default then see if the + // `+crt-static` feature was passed. + let crt_static = if sess.target.target.options.crt_static_default { + !found_negative + } else { + found_positive + }; -pub fn has_sse(sess: &Session) -> bool { - features_contain(sess, "+sse") || has_sse2(sess) -} -pub fn has_sse2(sess: &Session) -> bool { - // x86-64 requires at least SSE2 support - sess.target.target.arch == "x86_64" || features_contain(sess, "+sse2") || has_sse3(sess) -} -pub fn has_sse3(sess: &Session) -> bool { - features_contain(sess, "+sse3") || has_ssse3(sess) -} -pub fn has_ssse3(sess: &Session) -> bool { - features_contain(sess, "+ssse3") || has_sse41(sess) -} -pub fn has_sse41(sess: &Session) -> bool { - features_contain(sess, "+sse4.1") || has_sse42(sess) -} -pub fn has_sse42(sess: &Session) -> bool { - features_contain(sess, "+sse4.2") || has_avx(sess) -} -pub fn has_avx(sess: &Session) -> bool { - features_contain(sess, "+avx") || has_avx2(sess) -} -pub fn has_avx2(sess: &Session) -> bool { - features_contain(sess, "+avx2") -} + // If we switched from the default then that's only allowed on nightly, so + // gate that here. + if (found_positive || found_negative) && (!is_nightly || !unstable_options) { + sess.fatal("specifying the `crt-static` target feature is only allowed \ + on the nightly channel with `-Z unstable-options` passed \ + as well"); + } -pub fn has_neon(sess: &Session) -> bool { - // AArch64 requires NEON support - sess.target.target.arch == "aarch64" || features_contain(sess, "+neon") -} -pub fn has_vfp(sess: &Session) -> bool { - // AArch64 requires VFP support - sess.target.target.arch == "aarch64" || features_contain(sess, "+vfp") + if crt_static { + cfg.insert((tf, Some(Symbol::intern("crt-static")))); + } } diff --git a/src/librustc_driver/test.rs b/src/librustc_driver/test.rs index b19628baa88be..464e15faeaf75 100644 --- a/src/librustc_driver/test.rs +++ b/src/librustc_driver/test.rs @@ -11,39 +11,36 @@ //! # Standalone Tests for the Inference Module use driver; +use rustc::dep_graph::DepGraph; use rustc_lint; -use rustc_resolve as resolve; -use rustc_typeck::middle::lang_items; -use rustc_typeck::middle::free_region::FreeRegionMap; -use rustc_typeck::middle::region::{self, CodeExtent}; -use rustc_typeck::middle::region::CodeExtentData; -use rustc_typeck::middle::resolve_lifetime; -use rustc_typeck::middle::stability; -use rustc_typeck::middle::subst; -use rustc_typeck::middle::subst::Subst; -use rustc_typeck::middle::ty::{self, Ty, TypeFoldable}; -use rustc_typeck::middle::ty::relate::TypeRelation; -use rustc_typeck::middle::infer::{self, TypeOrigin}; -use rustc_typeck::middle::infer::lub::Lub; -use rustc_typeck::middle::infer::glb::Glb; -use rustc_typeck::middle::infer::sub::Sub; +use rustc_resolve::MakeGlobMap; +use rustc::middle::lang_items; +use rustc::middle::free_region::FreeRegionMap; +use rustc::middle::region::{self, CodeExtent}; +use rustc::middle::region::CodeExtentData; +use rustc::middle::resolve_lifetime; +use rustc::middle::stability; +use rustc::ty::subst::{Kind, Subst}; +use rustc::traits::{ObligationCause, Reveal}; +use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc::infer::{self, InferOk, InferResult}; use rustc_metadata::cstore::CStore; -use rustc::front::map as hir_map; +use rustc::hir::map as hir_map; use rustc::session::{self, config}; use std::rc::Rc; -use syntax::{abi, ast}; -use syntax::codemap::{Span, CodeMap, DUMMY_SP}; -use syntax::errors; -use syntax::errors::emitter::Emitter; -use syntax::errors::{Level, RenderSpan}; -use syntax::parse::token; +use syntax::ast; +use syntax::abi::Abi; +use syntax::codemap::CodeMap; +use errors; +use errors::emitter::Emitter; +use errors::{Level, DiagnosticBuilder}; use syntax::feature_gate::UnstableFeatures; +use syntax::symbol::Symbol; -use rustc_front::lowering::{lower_crate, LoweringContext}; -use rustc_front::hir; +use rustc::hir; -struct Env<'a, 'tcx: 'a> { - infcx: &'a infer::InferCtxt<'a, 'tcx>, +struct Env<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { + infcx: &'a infer::InferCtxt<'a, 'gcx, 'tcx>, } struct RH<'a> { @@ -71,29 +68,24 @@ fn remove_message(e: &mut ExpectErrorEmitter, msg: &str, lvl: Level) { e.messages.remove(i); } None => { + debug!("Unexpected error: {} Expected: {:?}", msg, e.messages); panic!("Unexpected error: {} Expected: {:?}", msg, e.messages); } } } impl Emitter for ExpectErrorEmitter { - fn emit(&mut self, - _sp: Option, - msg: &str, - _: Option<&str>, - lvl: Level) { - remove_message(self, msg, lvl); - } - - fn custom_emit(&mut self, _sp: RenderSpan, msg: &str, lvl: Level) { - remove_message(self, msg, lvl); + fn emit(&mut self, db: &DiagnosticBuilder) { + remove_message(self, &db.message, db.level); + for child in &db.children { + remove_message(self, &child.message, child.level); + } } } fn errors(msgs: &[&str]) -> (Box, usize) { let v = msgs.iter().map(|m| m.to_string()).collect(); - (box ExpectErrorEmitter { messages: v } as Box, - msgs.len()) + (box ExpectErrorEmitter { messages: v } as Box, msgs.len()) } fn test_env(source_string: &str, @@ -106,50 +98,66 @@ fn test_env(source_string: &str, options.unstable_features = UnstableFeatures::Allow; let diagnostic_handler = errors::Handler::with_emitter(true, false, emitter); - let cstore = Rc::new(CStore::new(token::get_ident_interner())); - let sess = session::build_session_(options, None, diagnostic_handler, - Rc::new(CodeMap::new()), cstore.clone()); + let dep_graph = DepGraph::new(false); + let _ignore = dep_graph.in_ignore(); + let cstore = Rc::new(CStore::new(&dep_graph)); + let sess = session::build_session_(options, + &dep_graph, + None, + diagnostic_handler, + Rc::new(CodeMap::new()), + cstore.clone()); rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess)); - let krate_config = Vec::new(); - let input = config::Input::Str(source_string.to_string()); - let krate = driver::phase_1_parse_input(&sess, krate_config, &input); - let krate = driver::phase_2_configure_and_expand(&sess, &cstore, krate, "test", None) - .expect("phase 2 aborted"); - - let krate = driver::assign_node_ids(&sess, krate); - let lcx = LoweringContext::new(&sess, Some(&krate)); - let mut hir_forest = hir_map::Forest::new(lower_crate(&lcx, &krate)); + let input = config::Input::Str { + name: driver::anon_src(), + input: source_string.to_string(), + }; + let krate = driver::phase_1_parse_input(&sess, &input).unwrap(); + let driver::ExpansionResult { defs, resolutions, mut hir_forest, .. } = { + driver::phase_2_configure_and_expand(&sess, + &cstore, + krate, + None, + "test", + None, + MakeGlobMap::No, + |_| Ok(())) + .expect("phase 2 aborted") + }; + let _ignore = dep_graph.in_ignore(); + let arenas = ty::CtxtArenas::new(); - let ast_map = driver::make_map(&sess, &mut hir_forest); - let krate = ast_map.krate(); + let ast_map = hir_map::map_crate(&mut hir_forest, defs); // run just enough stuff to build a tcx: let lang_items = lang_items::collect_language_items(&sess, &ast_map); - let resolve::CrateMap { def_map, freevars, .. } = - resolve::resolve_crate(&sess, &ast_map, resolve::MakeGlobMap::No); - let named_region_map = resolve_lifetime::krate(&sess, krate, &def_map.borrow()); - let region_map = region::resolve_crate(&sess, krate); - ty::ctxt::create_and_enter(&sess, - &arenas, - def_map, - named_region_map, - ast_map, - freevars, - region_map, - lang_items, - stability::Index::new(krate), - |tcx| { - let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None); - body(Env { infcx: &infcx }); - let free_regions = FreeRegionMap::new(); - infcx.resolve_regions_and_report_errors(&free_regions, - ast::CRATE_NODE_ID); - assert_eq!(tcx.sess.err_count(), expected_err_count); - }); + let named_region_map = resolve_lifetime::krate(&sess, &ast_map); + let region_map = region::resolve_crate(&sess, &ast_map); + let index = stability::Index::new(&ast_map); + TyCtxt::create_and_enter(&sess, + &arenas, + resolutions.trait_map, + named_region_map.unwrap(), + ast_map, + resolutions.freevars, + resolutions.maybe_unused_trait_imports, + region_map, + lang_items, + index, + "test_crate", + |tcx| { + tcx.infer_ctxt(None, None, Reveal::NotSpecializable).enter(|infcx| { + + body(Env { infcx: &infcx }); + let free_regions = FreeRegionMap::new(); + infcx.resolve_regions_and_report_errors(&free_regions, ast::CRATE_NODE_ID); + assert_eq!(tcx.sess.err_count(), expected_err_count); + }); + }); } -impl<'a, 'tcx> Env<'a, 'tcx> { - pub fn tcx(&self) -> &ty::ctxt<'tcx> { +impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> { + pub fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> { self.infcx.tcx } @@ -163,14 +171,23 @@ impl<'a, 'tcx> Env<'a, 'tcx> { pub fn create_simple_region_hierarchy(&self) { // creates a region hierarchy where 1 is root, 10 and 11 are // children of 1, etc + + let node = ast::NodeId::from_u32; let dscope = self.infcx - .tcx - .region_maps - .intern_code_extent(CodeExtentData::DestructionScope(1), - region::ROOT_CODE_EXTENT); + .tcx + .region_maps + .intern_code_extent(CodeExtentData::DestructionScope(node(1)), + region::ROOT_CODE_EXTENT); self.create_region_hierarchy(&RH { - id: 1, - sub: &[RH { id: 10, sub: &[] }, RH { id: 11, sub: &[] }], + id: node(1), + sub: &[RH { + id: node(10), + sub: &[], + }, + RH { + id: node(11), + sub: &[], + }], }, dscope); } @@ -211,37 +228,29 @@ impl<'a, 'tcx> Env<'a, 'tcx> { hir::ItemStatic(..) | hir::ItemFn(..) | hir::ItemForeignMod(..) | - hir::ItemTy(..) => { - None - } + hir::ItemTy(..) => None, hir::ItemEnum(..) | hir::ItemStruct(..) | + hir::ItemUnion(..) | hir::ItemTrait(..) | hir::ItemImpl(..) | - hir::ItemDefaultImpl(..) => { - None - } + hir::ItemDefaultImpl(..) => None, - hir::ItemMod(ref m) => { - search_mod(this, m, idx, names) - } + hir::ItemMod(ref m) => search_mod(this, m, idx, names), }; } } pub fn make_subtype(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> bool { - match infer::mk_subty(self.infcx, true, TypeOrigin::Misc(DUMMY_SP), a, b) { + match self.infcx.sub_types(true, &ObligationCause::dummy(), a, b) { Ok(_) => true, Err(ref e) => panic!("Encountered error: {}", e), } } pub fn is_subtype(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> bool { - match infer::can_mk_subty(self.infcx, a, b) { - Ok(_) => true, - Err(_) => false, - } + self.infcx.can_sub_types(a, b).is_ok() } pub fn assert_subtype(&self, a: Ty<'tcx>, b: Ty<'tcx>) { @@ -257,16 +266,15 @@ impl<'a, 'tcx> Env<'a, 'tcx> { pub fn t_fn(&self, input_tys: &[Ty<'tcx>], output_ty: Ty<'tcx>) -> Ty<'tcx> { let input_args = input_tys.iter().cloned().collect(); - self.infcx.tcx.mk_fn(None, - self.infcx.tcx.mk_bare_fn(ty::BareFnTy { - unsafety: hir::Unsafety::Normal, - abi: abi::Rust, - sig: ty::Binder(ty::FnSig { - inputs: input_args, - output: ty::FnConverging(output_ty), - variadic: false, - }), - })) + self.infcx.tcx.mk_fn_ptr(self.infcx.tcx.mk_bare_fn(ty::BareFnTy { + unsafety: hir::Unsafety::Normal, + abi: Abi::Rust, + sig: ty::Binder(ty::FnSig { + inputs: input_args, + output: output_ty, + variadic: false, + }), + })) } pub fn t_nil(&self) -> Ty<'tcx> { @@ -274,38 +282,36 @@ impl<'a, 'tcx> Env<'a, 'tcx> { } pub fn t_pair(&self, ty1: Ty<'tcx>, ty2: Ty<'tcx>) -> Ty<'tcx> { - self.infcx.tcx.mk_tup(vec![ty1, ty2]) + self.infcx.tcx.intern_tup(&[ty1, ty2]) } - pub fn t_param(&self, space: subst::ParamSpace, index: u32) -> Ty<'tcx> { + pub fn t_param(&self, index: u32) -> Ty<'tcx> { let name = format!("T{}", index); - self.infcx.tcx.mk_param(space, index, token::intern(&name[..])) + self.infcx.tcx.mk_param(index, Symbol::intern(&name[..])) } - pub fn re_early_bound(&self, - space: subst::ParamSpace, - index: u32, - name: &'static str) - -> ty::Region { - let name = token::intern(name); - ty::ReEarlyBound(ty::EarlyBoundRegion { - space: space, + pub fn re_early_bound(&self, index: u32, name: &'static str) -> &'tcx ty::Region { + let name = Symbol::intern(name); + self.infcx.tcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion { index: index, name: name, - }) + })) } - pub fn re_late_bound_with_debruijn(&self, id: u32, debruijn: ty::DebruijnIndex) -> ty::Region { - ty::ReLateBound(debruijn, ty::BrAnon(id)) + pub fn re_late_bound_with_debruijn(&self, + id: u32, + debruijn: ty::DebruijnIndex) + -> &'tcx ty::Region { + self.infcx.tcx.mk_region(ty::ReLateBound(debruijn, ty::BrAnon(id))) } - pub fn t_rptr(&self, r: ty::Region) -> Ty<'tcx> { - self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r), self.tcx().types.isize) + pub fn t_rptr(&self, r: &'tcx ty::Region) -> Ty<'tcx> { + self.infcx.tcx.mk_imm_ref(r, self.tcx().types.isize) } pub fn t_rptr_late_bound(&self, id: u32) -> Ty<'tcx> { let r = self.re_late_bound_with_debruijn(id, ty::DebruijnIndex::new(1)); - self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r), self.tcx().types.isize) + self.infcx.tcx.mk_imm_ref(r, self.tcx().types.isize) } pub fn t_rptr_late_bound_with_debruijn(&self, @@ -313,24 +319,24 @@ impl<'a, 'tcx> Env<'a, 'tcx> { debruijn: ty::DebruijnIndex) -> Ty<'tcx> { let r = self.re_late_bound_with_debruijn(id, debruijn); - self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r), self.tcx().types.isize) + self.infcx.tcx.mk_imm_ref(r, self.tcx().types.isize) } - pub fn t_rptr_scope(&self, id: ast::NodeId) -> Ty<'tcx> { - let r = ty::ReScope(self.tcx().region_maps.node_extent(id)); + pub fn t_rptr_scope(&self, id: u32) -> Ty<'tcx> { + let r = ty::ReScope(self.tcx().region_maps.node_extent(ast::NodeId::from_u32(id))); self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r), self.tcx().types.isize) } - pub fn re_free(&self, nid: ast::NodeId, id: u32) -> ty::Region { - ty::ReFree(ty::FreeRegion { + pub fn re_free(&self, nid: ast::NodeId, id: u32) -> &'tcx ty::Region { + self.infcx.tcx.mk_region(ty::ReFree(ty::FreeRegion { scope: self.tcx().region_maps.item_extent(nid), bound_region: ty::BrAnon(id), - }) + })) } - pub fn t_rptr_free(&self, nid: ast::NodeId, id: u32) -> Ty<'tcx> { - let r = self.re_free(nid, id); - self.infcx.tcx.mk_imm_ref(self.infcx.tcx.mk_region(r), self.tcx().types.isize) + pub fn t_rptr_free(&self, nid: u32, id: u32) -> Ty<'tcx> { + let r = self.re_free(ast::NodeId::from_u32(nid), id); + self.infcx.tcx.mk_imm_ref(r, self.tcx().types.isize) } pub fn t_rptr_static(&self) -> Ty<'tcx> { @@ -347,26 +353,29 @@ impl<'a, 'tcx> Env<'a, 'tcx> { infer::TypeTrace::dummy(self.tcx()) } - pub fn sub(&self) -> Sub<'a, 'tcx> { + pub fn sub(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) -> InferResult<'tcx, Ty<'tcx>> { let trace = self.dummy_type_trace(); - self.infcx.sub(true, trace) + self.infcx.sub(true, trace, &t1, &t2) } - pub fn lub(&self) -> Lub<'a, 'tcx> { + pub fn lub(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) -> InferResult<'tcx, Ty<'tcx>> { let trace = self.dummy_type_trace(); - self.infcx.lub(true, trace) + self.infcx.lub(true, trace, &t1, &t2) } - pub fn glb(&self) -> Glb<'a, 'tcx> { + pub fn glb(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) -> InferResult<'tcx, Ty<'tcx>> { let trace = self.dummy_type_trace(); - self.infcx.glb(true, trace) + self.infcx.glb(true, trace, &t1, &t2) } /// Checks that `t1 <: t2` is true (this may register additional /// region checks). pub fn check_sub(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) { - match self.sub().relate(&t1, &t2) { - Ok(_) => {} + match self.sub(t1, t2) { + Ok(InferOk { obligations, .. }) => { + // FIXME(#32730) once obligations are being propagated, assert the right thing. + assert!(obligations.is_empty()); + } Err(ref e) => { panic!("unexpected error computing sub({:?},{:?}): {}", t1, t2, e); } @@ -376,7 +385,7 @@ impl<'a, 'tcx> Env<'a, 'tcx> { /// Checks that `t1 <: t2` is false (this may register additional /// region checks). pub fn check_not_sub(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) { - match self.sub().relate(&t1, &t2) { + match self.sub(t1, t2) { Err(_) => {} Ok(_) => { panic!("unexpected success computing sub({:?},{:?})", t1, t2); @@ -386,24 +395,26 @@ impl<'a, 'tcx> Env<'a, 'tcx> { /// Checks that `LUB(t1,t2) == t_lub` pub fn check_lub(&self, t1: Ty<'tcx>, t2: Ty<'tcx>, t_lub: Ty<'tcx>) { - match self.lub().relate(&t1, &t2) { - Ok(t) => { + match self.lub(t1, t2) { + Ok(InferOk { obligations, value: t }) => { + // FIXME(#32730) once obligations are being propagated, assert the right thing. + assert!(obligations.is_empty()); + self.assert_eq(t, t_lub); } - Err(ref e) => { - panic!("unexpected error in LUB: {}", e) - } + Err(ref e) => panic!("unexpected error in LUB: {}", e), } } /// Checks that `GLB(t1,t2) == t_glb` pub fn check_glb(&self, t1: Ty<'tcx>, t2: Ty<'tcx>, t_glb: Ty<'tcx>) { debug!("check_glb(t1={}, t2={}, t_glb={})", t1, t2, t_glb); - match self.glb().relate(&t1, &t2) { - Err(e) => { - panic!("unexpected error computing LUB: {:?}", e) - } - Ok(t) => { + match self.glb(t1, t2) { + Err(e) => panic!("unexpected error computing LUB: {:?}", e), + Ok(InferOk { obligations, value: t }) => { + // FIXME(#32730) once obligations are being propagated, assert the right thing. + assert!(obligations.is_empty()); + self.assert_eq(t, t_glb); // sanity check for good measure: @@ -428,7 +439,7 @@ fn contravariant_region_ptr_ok() { #[test] fn contravariant_region_ptr_err() { - test_env(EMPTY_SOURCE_STR, errors(&["lifetime mismatch"]), |env| { + test_env(EMPTY_SOURCE_STR, errors(&["mismatched types"]), |env| { env.create_simple_region_hierarchy(); let t_rptr1 = env.t_rptr_scope(1); let t_rptr10 = env.t_rptr_scope(10); @@ -666,12 +677,12 @@ fn subst_ty_renumber_bound() { // t_source = fn(A) let t_source = { - let t_param = env.t_param(subst::TypeSpace, 0); + let t_param = env.t_param(0); env.t_fn(&[t_param], env.t_nil()) }; - let substs = subst::Substs::new_type(vec![t_rptr_bound1], vec![]); - let t_substituted = t_source.subst(env.infcx.tcx, &substs); + let substs = env.infcx.tcx.intern_substs(&[Kind::from(t_rptr_bound1)]); + let t_substituted = t_source.subst(env.infcx.tcx, substs); // t_expected = fn(&'a isize) let t_expected = { @@ -701,12 +712,12 @@ fn subst_ty_renumber_some_bounds() { // t_source = (A, fn(A)) let t_source = { - let t_param = env.t_param(subst::TypeSpace, 0); + let t_param = env.t_param(0); env.t_pair(t_param, env.t_fn(&[t_param], env.t_nil())) }; - let substs = subst::Substs::new_type(vec![t_rptr_bound1], vec![]); - let t_substituted = t_source.subst(env.infcx.tcx, &substs); + let substs = env.infcx.tcx.intern_substs(&[Kind::from(t_rptr_bound1)]); + let t_substituted = t_source.subst(env.infcx.tcx, substs); // t_expected = (&'a isize, fn(&'a isize)) // @@ -747,7 +758,7 @@ fn escaping() { assert!(t_rptr_bound2.has_escaping_regions()); // t_fn = fn(A) - let t_param = env.t_param(subst::TypeSpace, 0); + let t_param = env.t_param(0); assert!(!t_param.has_escaping_regions()); let t_fn = env.t_fn(&[t_param], env.t_nil()); assert!(!t_fn.has_escaping_regions()); @@ -763,12 +774,12 @@ fn subst_region_renumber_region() { // type t_source<'a> = fn(&'a isize) let t_source = { - let re_early = env.re_early_bound(subst::TypeSpace, 0, "'a"); + let re_early = env.re_early_bound(0, "'a"); env.t_fn(&[env.t_rptr(re_early)], env.t_nil()) }; - let substs = subst::Substs::new_type(vec![], vec![re_bound1]); - let t_substituted = t_source.subst(env.infcx.tcx, &substs); + let substs = env.infcx.tcx.intern_substs(&[Kind::from(re_bound1)]); + let t_substituted = t_source.subst(env.infcx.tcx, substs); // t_expected = fn(&'a isize) // @@ -794,8 +805,8 @@ fn walk_ty() { let tcx = env.infcx.tcx; let int_ty = tcx.types.isize; let uint_ty = tcx.types.usize; - let tup1_ty = tcx.mk_tup(vec![int_ty, uint_ty, int_ty, uint_ty]); - let tup2_ty = tcx.mk_tup(vec![tup1_ty, tup1_ty, uint_ty]); + let tup1_ty = tcx.intern_tup(&[int_ty, uint_ty, int_ty, uint_ty]); + let tup2_ty = tcx.intern_tup(&[tup1_ty, tup1_ty, uint_ty]); let uniq_ty = tcx.mk_box(tup2_ty); let walked: Vec<_> = uniq_ty.walk().collect(); assert_eq!(walked, @@ -810,8 +821,8 @@ fn walk_ty_skip_subtree() { let tcx = env.infcx.tcx; let int_ty = tcx.types.isize; let uint_ty = tcx.types.usize; - let tup1_ty = tcx.mk_tup(vec![int_ty, uint_ty, int_ty, uint_ty]); - let tup2_ty = tcx.mk_tup(vec![tup1_ty, tup1_ty, uint_ty]); + let tup1_ty = tcx.intern_tup(&[int_ty, uint_ty, int_ty, uint_ty]); + let tup2_ty = tcx.intern_tup(&[tup1_ty, tup1_ty, uint_ty]); let uniq_ty = tcx.mk_box(tup2_ty); // types we expect to see (in order), plus a boolean saying diff --git a/src/librustc_errors/Cargo.toml b/src/librustc_errors/Cargo.toml new file mode 100644 index 0000000000000..c92e4d8f5aba5 --- /dev/null +++ b/src/librustc_errors/Cargo.toml @@ -0,0 +1,14 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_errors" +version = "0.0.0" + +[lib] +name = "rustc_errors" +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +log = { path = "../liblog" } +serialize = { path = "../libserialize" } +syntax_pos = { path = "../libsyntax_pos" } diff --git a/src/librustc_errors/diagnostic.rs b/src/librustc_errors/diagnostic.rs new file mode 100644 index 0000000000000..730ca8f9e2e44 --- /dev/null +++ b/src/librustc_errors/diagnostic.rs @@ -0,0 +1,202 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use CodeSuggestion; +use Level; +use RenderSpan; +use RenderSpan::Suggestion; +use std::fmt; +use syntax_pos::{MultiSpan, Span}; + +#[must_use] +#[derive(Clone, Debug, PartialEq)] +pub struct Diagnostic { + pub level: Level, + pub message: String, + pub code: Option, + pub span: MultiSpan, + pub children: Vec, +} + +/// For example a note attached to an error. +#[derive(Clone, Debug, PartialEq)] +pub struct SubDiagnostic { + pub level: Level, + pub message: String, + pub span: MultiSpan, + pub render_span: Option, +} + +impl Diagnostic { + pub fn new(level: Level, message: &str) -> Self { + Diagnostic::new_with_code(level, None, message) + } + + pub fn new_with_code(level: Level, code: Option, message: &str) -> Self { + Diagnostic { + level: level, + message: message.to_owned(), + code: code, + span: MultiSpan::new(), + children: vec![], + } + } + + /// Cancel the diagnostic (a structured diagnostic must either be emitted or + /// cancelled or it will panic when dropped). + /// BEWARE: if this DiagnosticBuilder is an error, then creating it will + /// bump the error count on the Handler and cancelling it won't undo that. + /// If you want to decrement the error count you should use `Handler::cancel`. + pub fn cancel(&mut self) { + self.level = Level::Cancelled; + } + + pub fn cancelled(&self) -> bool { + self.level == Level::Cancelled + } + + pub fn is_fatal(&self) -> bool { + self.level == Level::Fatal + } + + /// Add a span/label to be included in the resulting snippet. + /// This is pushed onto the `MultiSpan` that was created when the + /// diagnostic was first built. If you don't call this function at + /// all, and you just supplied a `Span` to create the diagnostic, + /// then the snippet will just include that `Span`, which is + /// called the primary span. + pub fn span_label(&mut self, span: Span, label: &fmt::Display) + -> &mut Self { + self.span.push_span_label(span, format!("{}", label)); + self + } + + pub fn note_expected_found(&mut self, + label: &fmt::Display, + expected: &fmt::Display, + found: &fmt::Display) + -> &mut Self + { + self.note_expected_found_extra(label, expected, found, &"", &"") + } + + pub fn note_expected_found_extra(&mut self, + label: &fmt::Display, + expected: &fmt::Display, + found: &fmt::Display, + expected_extra: &fmt::Display, + found_extra: &fmt::Display) + -> &mut Self + { + // For now, just attach these as notes + self.note(&format!("expected {} `{}`{}", label, expected, expected_extra)); + self.note(&format!(" found {} `{}`{}", label, found, found_extra)); + self + } + + pub fn note(&mut self, msg: &str) -> &mut Self { + self.sub(Level::Note, msg, MultiSpan::new(), None); + self + } + + pub fn span_note>(&mut self, + sp: S, + msg: &str) + -> &mut Self { + self.sub(Level::Note, msg, sp.into(), None); + self + } + + pub fn warn(&mut self, msg: &str) -> &mut Self { + self.sub(Level::Warning, msg, MultiSpan::new(), None); + self + } + + pub fn span_warn>(&mut self, + sp: S, + msg: &str) + -> &mut Self { + self.sub(Level::Warning, msg, sp.into(), None); + self + } + + pub fn help(&mut self , msg: &str) -> &mut Self { + self.sub(Level::Help, msg, MultiSpan::new(), None); + self + } + + pub fn span_help>(&mut self, + sp: S, + msg: &str) + -> &mut Self { + self.sub(Level::Help, msg, sp.into(), None); + self + } + + /// Prints out a message with a suggested edit of the code. + /// + /// See `diagnostic::RenderSpan::Suggestion` for more information. + pub fn span_suggestion>(&mut self, + sp: S, + msg: &str, + suggestion: String) + -> &mut Self { + self.sub(Level::Help, + msg, + MultiSpan::new(), + Some(Suggestion(CodeSuggestion { + msp: sp.into(), + substitutes: vec![suggestion], + }))); + self + } + + pub fn set_span>(&mut self, sp: S) -> &mut Self { + self.span = sp.into(); + self + } + + pub fn code(&mut self, s: String) -> &mut Self { + self.code = Some(s); + self + } + + pub fn message(&self) -> &str { + &self.message + } + + pub fn level(&self) -> Level { + self.level + } + + /// Used by a lint. Copies over all details *but* the "main + /// message". + pub fn copy_details_not_message(&mut self, from: &Diagnostic) { + self.span = from.span.clone(); + self.code = from.code.clone(); + self.children.extend(from.children.iter().cloned()) + } + + /// Convenience function for internal use, clients should use one of the + /// public methods above. + fn sub(&mut self, + level: Level, + message: &str, + span: MultiSpan, + render_span: Option) { + let sub = SubDiagnostic { + level: level, + message: message.to_owned(), + span: span, + render_span: render_span, + }; + self.children.push(sub); + } +} diff --git a/src/librustc_errors/diagnostic_builder.rs b/src/librustc_errors/diagnostic_builder.rs new file mode 100644 index 0000000000000..7dfea6b8951b0 --- /dev/null +++ b/src/librustc_errors/diagnostic_builder.rs @@ -0,0 +1,196 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use Diagnostic; +use Level; +use Handler; +use std::fmt::{self, Debug}; +use std::ops::{Deref, DerefMut}; +use std::thread::panicking; +use syntax_pos::{MultiSpan, Span}; + +/// Used for emitting structured error messages and other diagnostic information. +#[must_use] +#[derive(Clone)] +pub struct DiagnosticBuilder<'a> { + handler: &'a Handler, + diagnostic: Diagnostic, +} + +/// In general, the `DiagnosticBuilder` uses deref to allow access to +/// the fields and methods of the embedded `diagnostic` in a +/// transparent way. *However,* many of the methods are intended to +/// be used in a chained way, and hence ought to return `self`. In +/// that case, we can't just naively forward to the method on the +/// `diagnostic`, because the return type would be a `&Diagnostic` +/// instead of a `&DiagnosticBuilder<'a>`. This `forward!` macro makes +/// it easy to declare such methods on the builder. +macro_rules! forward { + // Forward pattern for &self -> &Self + (pub fn $n:ident(&self, $($name:ident: $ty:ty),*) -> &Self) => { + pub fn $n(&self, $($name: $ty),*) -> &Self { + self.diagnostic.$n($($name),*); + self + } + }; + + // Forward pattern for &mut self -> &mut Self + (pub fn $n:ident(&mut self, $($name:ident: $ty:ty),*) -> &mut Self) => { + pub fn $n(&mut self, $($name: $ty),*) -> &mut Self { + self.diagnostic.$n($($name),*); + self + } + }; + + // Forward pattern for &mut self -> &mut Self, with S: Into + // type parameter. No obvious way to make this more generic. + (pub fn $n:ident>(&mut self, $($name:ident: $ty:ty),*) -> &mut Self) => { + pub fn $n>(&mut self, $($name: $ty),*) -> &mut Self { + self.diagnostic.$n($($name),*); + self + } + }; +} + +impl<'a> Deref for DiagnosticBuilder<'a> { + type Target = Diagnostic; + + fn deref(&self) -> &Diagnostic { + &self.diagnostic + } +} + +impl<'a> DerefMut for DiagnosticBuilder<'a> { + fn deref_mut(&mut self) -> &mut Diagnostic { + &mut self.diagnostic + } +} + +impl<'a> DiagnosticBuilder<'a> { + /// Emit the diagnostic. + pub fn emit(&mut self) { + if self.cancelled() { + return; + } + + match self.level { + Level::Bug | + Level::Fatal | + Level::PhaseFatal | + Level::Error => { + self.handler.bump_err_count(); + } + + Level::Warning | + Level::Note | + Level::Help | + Level::Cancelled => { + } + } + + self.handler.emitter.borrow_mut().emit(&self); + self.cancel(); + self.handler.panic_if_treat_err_as_bug(); + + // if self.is_fatal() { + // panic!(FatalError); + // } + } + + /// Add a span/label to be included in the resulting snippet. + /// This is pushed onto the `MultiSpan` that was created when the + /// diagnostic was first built. If you don't call this function at + /// all, and you just supplied a `Span` to create the diagnostic, + /// then the snippet will just include that `Span`, which is + /// called the primary span. + forward!(pub fn span_label(&mut self, span: Span, label: &fmt::Display) + -> &mut Self); + + forward!(pub fn note_expected_found(&mut self, + label: &fmt::Display, + expected: &fmt::Display, + found: &fmt::Display) + -> &mut Self); + + forward!(pub fn note_expected_found_extra(&mut self, + label: &fmt::Display, + expected: &fmt::Display, + found: &fmt::Display, + expected_extra: &fmt::Display, + found_extra: &fmt::Display) + -> &mut Self); + + forward!(pub fn note(&mut self, msg: &str) -> &mut Self); + forward!(pub fn span_note>(&mut self, + sp: S, + msg: &str) + -> &mut Self); + forward!(pub fn warn(&mut self, msg: &str) -> &mut Self); + forward!(pub fn span_warn>(&mut self, sp: S, msg: &str) -> &mut Self); + forward!(pub fn help(&mut self , msg: &str) -> &mut Self); + forward!(pub fn span_help>(&mut self, + sp: S, + msg: &str) + -> &mut Self); + forward!(pub fn span_suggestion>(&mut self, + sp: S, + msg: &str, + suggestion: String) + -> &mut Self); + forward!(pub fn set_span>(&mut self, sp: S) -> &mut Self); + forward!(pub fn code(&mut self, s: String) -> &mut Self); + + /// Convenience function for internal use, clients should use one of the + /// struct_* methods on Handler. + pub fn new(handler: &'a Handler, level: Level, message: &str) -> DiagnosticBuilder<'a> { + DiagnosticBuilder::new_with_code(handler, level, None, message) + } + + /// Convenience function for internal use, clients should use one of the + /// struct_* methods on Handler. + pub fn new_with_code(handler: &'a Handler, + level: Level, + code: Option, + message: &str) + -> DiagnosticBuilder<'a> { + DiagnosticBuilder { + handler: handler, + diagnostic: Diagnostic::new_with_code(level, code, message) + } + } + + pub fn into_diagnostic(mut self) -> Diagnostic { + // annoyingly, the Drop impl means we can't actually move + let result = self.diagnostic.clone(); + self.cancel(); + result + } +} + +impl<'a> Debug for DiagnosticBuilder<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.diagnostic.fmt(f) + } +} + +/// Destructor bomb - a DiagnosticBuilder must be either emitted or cancelled or +/// we emit a bug. +impl<'a> Drop for DiagnosticBuilder<'a> { + fn drop(&mut self) { + if !panicking() && !self.cancelled() { + let mut db = DiagnosticBuilder::new(self.handler, + Level::Bug, + "Error constructed but not emitted"); + db.emit(); + panic!(); + } + } +} + diff --git a/src/librustc_errors/emitter.rs b/src/librustc_errors/emitter.rs new file mode 100644 index 0000000000000..808fe504b95cd --- /dev/null +++ b/src/librustc_errors/emitter.rs @@ -0,0 +1,1208 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use self::Destination::*; + +use syntax_pos::{COMMAND_LINE_SP, DUMMY_SP, FileMap, Span, MultiSpan, CharPos}; + +use {Level, CodeSuggestion, DiagnosticBuilder, SubDiagnostic, CodeMapper}; +use RenderSpan::*; +use snippet::{Annotation, AnnotationType, Line, MultilineAnnotation, StyledString, Style}; +use styled_buffer::StyledBuffer; + +use std::io::prelude::*; +use std::io; +use std::rc::Rc; +use term; + +/// Emitter trait for emitting errors. +pub trait Emitter { + /// Emit a structured diagnostic. + fn emit(&mut self, db: &DiagnosticBuilder); +} + +impl Emitter for EmitterWriter { + fn emit(&mut self, db: &DiagnosticBuilder) { + let mut primary_span = db.span.clone(); + let mut children = db.children.clone(); + self.fix_multispans_in_std_macros(&mut primary_span, &mut children); + self.emit_messages_default(&db.level, &db.message, &db.code, &primary_span, &children); + } +} + +/// maximum number of lines we will print for each error; arbitrary. +pub const MAX_HIGHLIGHT_LINES: usize = 6; + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum ColorConfig { + Auto, + Always, + Never, +} + +impl ColorConfig { + fn use_color(&self) -> bool { + match *self { + ColorConfig::Always => true, + ColorConfig::Never => false, + ColorConfig::Auto => stderr_isatty(), + } + } +} + +pub struct EmitterWriter { + dst: Destination, + cm: Option>, +} + +struct FileWithAnnotatedLines { + file: Rc, + lines: Vec, + multiline_depth: usize, +} + + +/// Do not use this for messages that end in `\n` – use `println_maybe_styled` instead. See +/// `EmitterWriter::print_maybe_styled` for details. +macro_rules! print_maybe_styled { + ($dst: expr, $style: expr, $($arg: tt)*) => { + $dst.print_maybe_styled(format_args!($($arg)*), $style, false) + } +} + +macro_rules! println_maybe_styled { + ($dst: expr, $style: expr, $($arg: tt)*) => { + $dst.print_maybe_styled(format_args!($($arg)*), $style, true) + } +} + +impl EmitterWriter { + pub fn stderr(color_config: ColorConfig, code_map: Option>) -> EmitterWriter { + if color_config.use_color() { + let dst = Destination::from_stderr(); + EmitterWriter { + dst: dst, + cm: code_map, + } + } else { + EmitterWriter { + dst: Raw(Box::new(io::stderr())), + cm: code_map, + } + } + } + + pub fn new(dst: Box, code_map: Option>) -> EmitterWriter { + EmitterWriter { + dst: Raw(dst), + cm: code_map, + } + } + + fn preprocess_annotations(&self, msp: &MultiSpan) -> Vec { + fn add_annotation_to_file(file_vec: &mut Vec, + file: Rc, + line_index: usize, + ann: Annotation) { + + for slot in file_vec.iter_mut() { + // Look through each of our files for the one we're adding to + if slot.file.name == file.name { + // See if we already have a line for it + for line_slot in &mut slot.lines { + if line_slot.line_index == line_index { + line_slot.annotations.push(ann); + return; + } + } + // We don't have a line yet, create one + slot.lines.push(Line { + line_index: line_index, + annotations: vec![ann], + }); + slot.lines.sort(); + return; + } + } + // This is the first time we're seeing the file + file_vec.push(FileWithAnnotatedLines { + file: file, + lines: vec![Line { + line_index: line_index, + annotations: vec![ann], + }], + multiline_depth: 0, + }); + } + + let mut output = vec![]; + let mut multiline_annotations = vec![]; + + if let Some(ref cm) = self.cm { + for span_label in msp.span_labels() { + if span_label.span == DUMMY_SP || span_label.span == COMMAND_LINE_SP { + continue; + } + let lo = cm.lookup_char_pos(span_label.span.lo); + let mut hi = cm.lookup_char_pos(span_label.span.hi); + let mut is_minimized = false; + + // If the span is long multi-line, simplify down to the span of one character + let max_multiline_span_length = 8; + if lo.line != hi.line && (hi.line - lo.line) > max_multiline_span_length { + hi.line = lo.line; + hi.col = CharPos(lo.col.0 + 1); + is_minimized = true; + } + + // Watch out for "empty spans". If we get a span like 6..6, we + // want to just display a `^` at 6, so convert that to + // 6..7. This is degenerate input, but it's best to degrade + // gracefully -- and the parser likes to supply a span like + // that for EOF, in particular. + if lo.col == hi.col && lo.line == hi.line { + hi.col = CharPos(lo.col.0 + 1); + } + + let mut ann = Annotation { + start_col: lo.col.0, + end_col: hi.col.0, + is_primary: span_label.is_primary, + label: span_label.label.clone(), + annotation_type: AnnotationType::Singleline, + }; + if is_minimized { + ann.annotation_type = AnnotationType::Minimized; + } else if lo.line != hi.line { + let ml = MultilineAnnotation { + depth: 1, + line_start: lo.line, + line_end: hi.line, + start_col: lo.col.0, + end_col: hi.col.0, + is_primary: span_label.is_primary, + label: span_label.label.clone(), + }; + ann.annotation_type = AnnotationType::Multiline(ml.clone()); + multiline_annotations.push((lo.file.clone(), ml)); + }; + + if !ann.is_multiline() { + add_annotation_to_file(&mut output, + lo.file, + lo.line, + ann); + } + } + } + + // Find overlapping multiline annotations, put them at different depths + multiline_annotations.sort_by(|a, b| { + (a.1.line_start, a.1.line_end).cmp(&(b.1.line_start, b.1.line_end)) + }); + for item in multiline_annotations.clone() { + let ann = item.1; + for item in multiline_annotations.iter_mut() { + let ref mut a = item.1; + // Move all other multiline annotations overlapping with this one + // one level to the right. + if &ann != a && + num_overlap(ann.line_start, ann.line_end, a.line_start, a.line_end, true) + { + a.increase_depth(); + } else { + break; + } + } + } + + let mut max_depth = 0; // max overlapping multiline spans + for (file, ann) in multiline_annotations { + if ann.depth > max_depth { + max_depth = ann.depth; + } + add_annotation_to_file(&mut output, file.clone(), ann.line_start, ann.as_start()); + for line in ann.line_start + 1..ann.line_end { + add_annotation_to_file(&mut output, file.clone(), line, ann.as_line()); + } + add_annotation_to_file(&mut output, file, ann.line_end, ann.as_end()); + } + for file_vec in output.iter_mut() { + file_vec.multiline_depth = max_depth; + } + output + } + + fn render_source_line(&self, + buffer: &mut StyledBuffer, + file: Rc, + line: &Line, + width_offset: usize, + multiline_depth: usize) { + let source_string = file.get_line(line.line_index - 1) + .unwrap_or(""); + + let line_offset = buffer.num_lines(); + let code_offset = if multiline_depth == 0 { + width_offset + } else { + width_offset + multiline_depth + 1 + }; + + // First create the source line we will highlight. + buffer.puts(line_offset, code_offset, &source_string, Style::Quotation); + buffer.puts(line_offset, + 0, + &(line.line_index.to_string()), + Style::LineNumber); + + draw_col_separator(buffer, line_offset, width_offset - 2); + + // We want to display like this: + // + // vec.push(vec.pop().unwrap()); + // --- ^^^ - previous borrow ends here + // | | + // | error occurs here + // previous borrow of `vec` occurs here + // + // But there are some weird edge cases to be aware of: + // + // vec.push(vec.pop().unwrap()); + // -------- - previous borrow ends here + // || + // |this makes no sense + // previous borrow of `vec` occurs here + // + // For this reason, we group the lines into "highlight lines" + // and "annotations lines", where the highlight lines have the `~`. + + // Sort the annotations by (start, end col) + let mut annotations = line.annotations.clone(); + annotations.sort(); + annotations.reverse(); + + // First, figure out where each label will be positioned. + // + // In the case where you have the following annotations: + // + // vec.push(vec.pop().unwrap()); + // -------- - previous borrow ends here [C] + // || + // |this makes no sense [B] + // previous borrow of `vec` occurs here [A] + // + // `annotations_position` will hold [(2, A), (1, B), (0, C)]. + // + // We try, when possible, to stick the rightmost annotation at the end + // of the highlight line: + // + // vec.push(vec.pop().unwrap()); + // --- --- - previous borrow ends here + // + // But sometimes that's not possible because one of the other + // annotations overlaps it. For example, from the test + // `span_overlap_label`, we have the following annotations + // (written on distinct lines for clarity): + // + // fn foo(x: u32) { + // -------------- + // - + // + // In this case, we can't stick the rightmost-most label on + // the highlight line, or we would get: + // + // fn foo(x: u32) { + // -------- x_span + // | + // fn_span + // + // which is totally weird. Instead we want: + // + // fn foo(x: u32) { + // -------------- + // | | + // | x_span + // fn_span + // + // which is...less weird, at least. In fact, in general, if + // the rightmost span overlaps with any other span, we should + // use the "hang below" version, so we can at least make it + // clear where the span *starts*. + let mut annotations_position = vec![]; + let mut line_len = 0; + let mut p = 0; + let mut ann_iter = annotations.iter().peekable(); + while let Some(annotation) = ann_iter.next() { + let is_line = if let AnnotationType::MultilineLine(_) = annotation.annotation_type { + true + } else { + false + }; + let peek = ann_iter.peek(); + if let Some(next) = peek { + let next_is_line = if let AnnotationType::MultilineLine(_) = next.annotation_type { + true + } else { + false + }; + + if overlaps(next, annotation) && !is_line && !next_is_line { + p += 1; + } + } + annotations_position.push((p, annotation)); + if let Some(next) = peek { + let next_is_line = if let AnnotationType::MultilineLine(_) = next.annotation_type { + true + } else { + false + }; + let l = if let Some(ref label) = next.label { + label.len() + 2 + } else { + 0 + }; + if (overlaps(next, annotation) || next.end_col + l > annotation.start_col) + && !is_line && !next_is_line + { + p += 1; + } + } + if line_len < p { + line_len = p; + } + } + if line_len != 0 { + line_len += 1; + } + + // If there are no annotations or the only annotations on this line are + // MultilineLine, then there's only code being shown, stop processing. + if line.annotations.is_empty() || line.annotations.iter() + .filter(|a| { + // Set the multiline annotation vertical lines to the left of + // the code in this line. + if let AnnotationType::MultilineLine(depth) = a.annotation_type { + buffer.putc(line_offset, + width_offset + depth - 1, + '|', + if a.is_primary { + Style::UnderlinePrimary + } else { + Style::UnderlineSecondary + }); + false + } else { + true + } + }).collect::>().len() == 0 + { + return; + } + + for pos in 0..line_len + 1 { + draw_col_separator(buffer, line_offset + pos + 1, width_offset - 2); + buffer.putc(line_offset + pos + 1, + width_offset - 2, + '|', + Style::LineNumber); + } + + // Write the horizontal lines for multiline annotations + // (only the first and last lines need this). + // + // After this we will have: + // + // 2 | fn foo() { + // | __________ + // | + // | + // 3 | + // 4 | } + // | _ + for &(pos, annotation) in &annotations_position { + let style = if annotation.is_primary { + Style::UnderlinePrimary + } else { + Style::UnderlineSecondary + }; + let pos = pos + 1; + match annotation.annotation_type { + AnnotationType::MultilineStart(depth) | + AnnotationType::MultilineEnd(depth) => { + draw_range(buffer, + '_', + line_offset + pos, + width_offset + depth, + code_offset + annotation.start_col, + style); + } + _ => (), + } + } + + // Write the vertical lines for multiline spans and for labels that are + // on a different line as the underline. + // + // After this we will have: + // + // 2 | fn foo() { + // | __________ + // | | | + // | | + // 3 | | + // 4 | | } + // | |_ + for &(pos, annotation) in &annotations_position { + let style = if annotation.is_primary { + Style::UnderlinePrimary + } else { + Style::UnderlineSecondary + }; + let pos = pos + 1; + if pos > 1 { + for p in line_offset + 1..line_offset + pos + 1 { + buffer.putc(p, + code_offset + annotation.start_col, + '|', + style); + } + } + match annotation.annotation_type { + AnnotationType::MultilineStart(depth) => { + for p in line_offset + pos + 1..line_offset + line_len + 2 { + buffer.putc(p, + width_offset + depth - 1, + '|', + style); + } + } + AnnotationType::MultilineEnd(depth) => { + for p in line_offset..line_offset + pos + 1 { + buffer.putc(p, + width_offset + depth - 1, + '|', + style); + } + } + AnnotationType::MultilineLine(depth) => { + // the first line will have already be filled when we checked + // wether there were any annotations for this line. + for p in line_offset + 1..line_offset + line_len + 2 { + buffer.putc(p, + width_offset + depth - 1, + '|', + style); + } + } + _ => (), + } + } + + // Write the labels on the annotations that actually have a label. + // + // After this we will have: + // + // 2 | fn foo() { + // | __________ starting here... + // | | | + // | | something about `foo` + // 3 | | + // 4 | | } + // | |_ ...ending here: test + for &(pos, annotation) in &annotations_position { + let style = if annotation.is_primary { + Style::LabelPrimary + } else { + Style::LabelSecondary + }; + let (pos, col) = if pos == 0 { + (pos + 1, annotation.end_col + 1) + } else { + (pos + 2, annotation.start_col) + }; + if let Some(ref label) = annotation.label { + buffer.puts(line_offset + pos, + code_offset + col, + &label, + style); + } + } + + // Sort from biggest span to smallest span so that smaller spans are + // represented in the output: + // + // x | fn foo() + // | ^^^---^^ + // | | | + // | | something about `foo` + // | something about `fn foo()` + annotations_position.sort_by(|a, b| { + fn len(a: &Annotation) -> usize { + // Account for usize underflows + if a.end_col > a.start_col { + a.end_col - a.start_col + } else { + a.start_col - a.end_col + } + } + // Decreasing order + len(a.1).cmp(&len(b.1)).reverse() + }); + + // Write the underlines. + // + // After this we will have: + // + // 2 | fn foo() { + // | ____-_____^ starting here... + // | | | + // | | something about `foo` + // 3 | | + // 4 | | } + // | |_^ ...ending here: test + for &(_, annotation) in &annotations_position { + let (underline, style) = if annotation.is_primary { + ('^', Style::UnderlinePrimary) + } else { + ('-', Style::UnderlineSecondary) + }; + for p in annotation.start_col..annotation.end_col { + buffer.putc(line_offset + 1, + code_offset + p, + underline, + style); + } + } + } + + fn get_multispan_max_line_num(&mut self, msp: &MultiSpan) -> usize { + let mut max = 0; + if let Some(ref cm) = self.cm { + for primary_span in msp.primary_spans() { + if primary_span != &DUMMY_SP && primary_span != &COMMAND_LINE_SP { + let hi = cm.lookup_char_pos(primary_span.hi); + if hi.line > max { + max = hi.line; + } + } + } + for span_label in msp.span_labels() { + if span_label.span != DUMMY_SP && span_label.span != COMMAND_LINE_SP { + let hi = cm.lookup_char_pos(span_label.span.hi); + if hi.line > max { + max = hi.line; + } + } + } + } + max + } + + fn get_max_line_num(&mut self, span: &MultiSpan, children: &Vec) -> usize { + let mut max = 0; + + let primary = self.get_multispan_max_line_num(span); + max = if primary > max { primary } else { max }; + + for sub in children { + let sub_result = self.get_multispan_max_line_num(&sub.span); + max = if sub_result > max { primary } else { max }; + } + max + } + + // This "fixes" MultiSpans that contain Spans that are pointing to locations inside of + // <*macros>. Since these locations are often difficult to read, we move these Spans from + // <*macros> to their corresponding use site. + fn fix_multispan_in_std_macros(&mut self, span: &mut MultiSpan) -> bool { + let mut spans_updated = false; + + if let Some(ref cm) = self.cm { + let mut before_after: Vec<(Span, Span)> = vec![]; + let mut new_labels: Vec<(Span, String)> = vec![]; + + // First, find all the spans in <*macros> and point instead at their use site + for sp in span.primary_spans() { + if (*sp == COMMAND_LINE_SP) || (*sp == DUMMY_SP) { + continue; + } + if cm.span_to_filename(sp.clone()).contains("macros>") { + let v = cm.macro_backtrace(sp.clone()); + if let Some(use_site) = v.last() { + before_after.push((sp.clone(), use_site.call_site.clone())); + } + } + for trace in cm.macro_backtrace(sp.clone()).iter().rev() { + // Only show macro locations that are local + // and display them like a span_note + if let Some(def_site) = trace.def_site_span { + if (def_site == COMMAND_LINE_SP) || (def_site == DUMMY_SP) { + continue; + } + // Check to make sure we're not in any <*macros> + if !cm.span_to_filename(def_site).contains("macros>") && + !trace.macro_decl_name.starts_with("#[") { + new_labels.push((trace.call_site, + "in this macro invocation".to_string())); + break; + } + } + } + } + for (label_span, label_text) in new_labels { + span.push_span_label(label_span, label_text); + } + for sp_label in span.span_labels() { + if (sp_label.span == COMMAND_LINE_SP) || (sp_label.span == DUMMY_SP) { + continue; + } + if cm.span_to_filename(sp_label.span.clone()).contains("macros>") { + let v = cm.macro_backtrace(sp_label.span.clone()); + if let Some(use_site) = v.last() { + before_after.push((sp_label.span.clone(), use_site.call_site.clone())); + } + } + } + // After we have them, make sure we replace these 'bad' def sites with their use sites + for (before, after) in before_after { + span.replace(before, after); + spans_updated = true; + } + } + + spans_updated + } + + // This does a small "fix" for multispans by looking to see if it can find any that + // point directly at <*macros>. Since these are often difficult to read, this + // will change the span to point at the use site. + fn fix_multispans_in_std_macros(&mut self, + span: &mut MultiSpan, + children: &mut Vec) { + let mut spans_updated = self.fix_multispan_in_std_macros(span); + for child in children.iter_mut() { + spans_updated |= self.fix_multispan_in_std_macros(&mut child.span); + } + if spans_updated { + children.push(SubDiagnostic { + level: Level::Note, + message: "this error originates in a macro outside of the current crate" + .to_string(), + span: MultiSpan::new(), + render_span: None, + }); + } + } + + fn emit_message_default(&mut self, + msp: &MultiSpan, + msg: &str, + code: &Option, + level: &Level, + max_line_num_len: usize, + is_secondary: bool) + -> io::Result<()> { + let mut buffer = StyledBuffer::new(); + + if msp.primary_spans().is_empty() && msp.span_labels().is_empty() && is_secondary { + // This is a secondary message with no span info + for _ in 0..max_line_num_len { + buffer.prepend(0, " ", Style::NoStyle); + } + draw_note_separator(&mut buffer, 0, max_line_num_len + 1); + buffer.append(0, &level.to_string(), Style::HeaderMsg); + buffer.append(0, ": ", Style::NoStyle); + buffer.append(0, msg, Style::NoStyle); + } else { + buffer.append(0, &level.to_string(), Style::Level(level.clone())); + match code { + &Some(ref code) => { + buffer.append(0, "[", Style::Level(level.clone())); + buffer.append(0, &code, Style::Level(level.clone())); + buffer.append(0, "]", Style::Level(level.clone())); + } + _ => {} + } + buffer.append(0, ": ", Style::HeaderMsg); + buffer.append(0, msg, Style::HeaderMsg); + } + + // Preprocess all the annotations so that they are grouped by file and by line number + // This helps us quickly iterate over the whole message (including secondary file spans) + let mut annotated_files = self.preprocess_annotations(msp); + + // Make sure our primary file comes first + let primary_lo = if let (Some(ref cm), Some(ref primary_span)) = + (self.cm.as_ref(), msp.primary_span().as_ref()) { + if primary_span != &&DUMMY_SP && primary_span != &&COMMAND_LINE_SP { + cm.lookup_char_pos(primary_span.lo) + } else { + emit_to_destination(&buffer.render(), level, &mut self.dst)?; + return Ok(()); + } + } else { + // If we don't have span information, emit and exit + emit_to_destination(&buffer.render(), level, &mut self.dst)?; + return Ok(()); + }; + if let Ok(pos) = + annotated_files.binary_search_by(|x| x.file.name.cmp(&primary_lo.file.name)) { + annotated_files.swap(0, pos); + } + + // Print out the annotate source lines that correspond with the error + for annotated_file in annotated_files { + // print out the span location and spacer before we print the annotated source + // to do this, we need to know if this span will be primary + let is_primary = primary_lo.file.name == annotated_file.file.name; + if is_primary { + // remember where we are in the output buffer for easy reference + let buffer_msg_line_offset = buffer.num_lines(); + + buffer.prepend(buffer_msg_line_offset, "--> ", Style::LineNumber); + let loc = primary_lo.clone(); + buffer.append(buffer_msg_line_offset, + &format!("{}:{}:{}", loc.file.name, loc.line, loc.col.0 + 1), + Style::LineAndColumn); + for _ in 0..max_line_num_len { + buffer.prepend(buffer_msg_line_offset, " ", Style::NoStyle); + } + } else { + // remember where we are in the output buffer for easy reference + let buffer_msg_line_offset = buffer.num_lines(); + + // Add spacing line + draw_col_separator(&mut buffer, buffer_msg_line_offset, max_line_num_len + 1); + + // Then, the secondary file indicator + buffer.prepend(buffer_msg_line_offset + 1, "::: ", Style::LineNumber); + buffer.append(buffer_msg_line_offset + 1, + &annotated_file.file.name, + Style::LineAndColumn); + for _ in 0..max_line_num_len { + buffer.prepend(buffer_msg_line_offset + 1, " ", Style::NoStyle); + } + } + + // Put in the spacer between the location and annotated source + let buffer_msg_line_offset = buffer.num_lines(); + draw_col_separator_no_space(&mut buffer, buffer_msg_line_offset, max_line_num_len + 1); + + // Next, output the annotate source for this file + for line_idx in 0..annotated_file.lines.len() { + self.render_source_line(&mut buffer, + annotated_file.file.clone(), + &annotated_file.lines[line_idx], + 3 + max_line_num_len, + annotated_file.multiline_depth); + + // check to see if we need to print out or elide lines that come between + // this annotated line and the next one + if line_idx < (annotated_file.lines.len() - 1) { + let line_idx_delta = annotated_file.lines[line_idx + 1].line_index - + annotated_file.lines[line_idx].line_index; + if line_idx_delta > 2 { + let last_buffer_line_num = buffer.num_lines(); + buffer.puts(last_buffer_line_num, 0, "...", Style::LineNumber); + } else if line_idx_delta == 2 { + let unannotated_line = annotated_file.file + .get_line(annotated_file.lines[line_idx].line_index) + .unwrap_or(""); + + let last_buffer_line_num = buffer.num_lines(); + + buffer.puts(last_buffer_line_num, + 0, + &(annotated_file.lines[line_idx + 1].line_index - 1) + .to_string(), + Style::LineNumber); + draw_col_separator(&mut buffer, last_buffer_line_num, 1 + max_line_num_len); + buffer.puts(last_buffer_line_num, + 3 + max_line_num_len, + &unannotated_line, + Style::Quotation); + } + } + } + } + + // final step: take our styled buffer, render it, then output it + emit_to_destination(&buffer.render(), level, &mut self.dst)?; + + Ok(()) + } + fn emit_suggestion_default(&mut self, + suggestion: &CodeSuggestion, + level: &Level, + msg: &str, + max_line_num_len: usize) + -> io::Result<()> { + use std::borrow::Borrow; + + let primary_span = suggestion.msp.primary_span().unwrap(); + if let Some(ref cm) = self.cm { + let mut buffer = StyledBuffer::new(); + + buffer.append(0, &level.to_string(), Style::Level(level.clone())); + buffer.append(0, ": ", Style::HeaderMsg); + buffer.append(0, msg, Style::HeaderMsg); + + let lines = cm.span_to_lines(primary_span).unwrap(); + + assert!(!lines.lines.is_empty()); + + let complete = suggestion.splice_lines(cm.borrow()); + + // print the suggestion without any line numbers, but leave + // space for them. This helps with lining up with previous + // snippets from the actual error being reported. + let mut lines = complete.lines(); + let mut row_num = 1; + for line in lines.by_ref().take(MAX_HIGHLIGHT_LINES) { + draw_col_separator(&mut buffer, row_num, max_line_num_len + 1); + buffer.append(row_num, line, Style::NoStyle); + row_num += 1; + } + + // if we elided some lines, add an ellipsis + if let Some(_) = lines.next() { + buffer.append(row_num, "...", Style::NoStyle); + } + emit_to_destination(&buffer.render(), level, &mut self.dst)?; + } + Ok(()) + } + fn emit_messages_default(&mut self, + level: &Level, + message: &String, + code: &Option, + span: &MultiSpan, + children: &Vec) { + let max_line_num = self.get_max_line_num(span, children); + let max_line_num_len = max_line_num.to_string().len(); + + match self.emit_message_default(span, message, code, level, max_line_num_len, false) { + Ok(()) => { + if !children.is_empty() { + let mut buffer = StyledBuffer::new(); + draw_col_separator_no_space(&mut buffer, 0, max_line_num_len + 1); + match emit_to_destination(&buffer.render(), level, &mut self.dst) { + Ok(()) => (), + Err(e) => panic!("failed to emit error: {}", e) + } + } + for child in children { + match child.render_span { + Some(FullSpan(ref msp)) => { + match self.emit_message_default(msp, + &child.message, + &None, + &child.level, + max_line_num_len, + true) { + Err(e) => panic!("failed to emit error: {}", e), + _ => () + } + }, + Some(Suggestion(ref cs)) => { + match self.emit_suggestion_default(cs, + &child.level, + &child.message, + max_line_num_len) { + Err(e) => panic!("failed to emit error: {}", e), + _ => () + } + }, + None => { + match self.emit_message_default(&child.span, + &child.message, + &None, + &child.level, + max_line_num_len, + true) { + Err(e) => panic!("failed to emit error: {}", e), + _ => () + } + } + } + } + } + Err(e) => panic!("failed to emit error: {}", e), + } + match write!(&mut self.dst, "\n") { + Err(e) => panic!("failed to emit error: {}", e), + _ => { + match self.dst.flush() { + Err(e) => panic!("failed to emit error: {}", e), + _ => (), + } + } + } + } +} + +fn draw_col_separator(buffer: &mut StyledBuffer, line: usize, col: usize) { + buffer.puts(line, col, "| ", Style::LineNumber); +} + +fn draw_col_separator_no_space(buffer: &mut StyledBuffer, line: usize, col: usize) { + draw_col_separator_no_space_with_style(buffer, line, col, Style::LineNumber); +} + +fn draw_col_separator_no_space_with_style(buffer: &mut StyledBuffer, + line: usize, + col: usize, + style: Style) { + buffer.putc(line, col, '|', style); +} + +fn draw_range(buffer: &mut StyledBuffer, symbol: char, line: usize, + col_from: usize, col_to: usize, style: Style) { + for col in col_from..col_to { + buffer.putc(line, col, symbol, style); + } +} + +fn draw_note_separator(buffer: &mut StyledBuffer, line: usize, col: usize) { + buffer.puts(line, col, "= ", Style::LineNumber); +} + +fn num_overlap(a_start: usize, a_end: usize, b_start: usize, b_end:usize, inclusive: bool) -> bool { + let extra = if inclusive { + 1 + } else { + 0 + }; + (b_start..b_end + extra).contains(a_start) || + (a_start..a_end + extra).contains(b_start) +} +fn overlaps(a1: &Annotation, a2: &Annotation) -> bool { + num_overlap(a1.start_col, a1.end_col, a2.start_col, a2.end_col, false) +} + +fn emit_to_destination(rendered_buffer: &Vec>, + lvl: &Level, + dst: &mut Destination) + -> io::Result<()> { + use lock; + + // In order to prevent error message interleaving, where multiple error lines get intermixed + // when multiple compiler processes error simultaneously, we emit errors with additional + // steps. + // + // On Unix systems, we write into a buffered terminal rather than directly to a terminal. When + // the .flush() is called we take the buffer created from the buffered writes and write it at + // one shot. Because the Unix systems use ANSI for the colors, which is a text-based styling + // scheme, this buffered approach works and maintains the styling. + // + // On Windows, styling happens through calls to a terminal API. This prevents us from using the + // same buffering approach. Instead, we use a global Windows mutex, which we acquire long + // enough to output the full error message, then we release. + let _buffer_lock = lock::acquire_global_lock("rustc_errors"); + for line in rendered_buffer { + for part in line { + dst.apply_style(lvl.clone(), part.style)?; + write!(dst, "{}", part.text)?; + dst.reset_attrs()?; + } + write!(dst, "\n")?; + } + dst.flush()?; + Ok(()) +} + +#[cfg(unix)] +fn stderr_isatty() -> bool { + use libc; + unsafe { libc::isatty(libc::STDERR_FILENO) != 0 } +} +#[cfg(windows)] +fn stderr_isatty() -> bool { + type DWORD = u32; + type BOOL = i32; + type HANDLE = *mut u8; + const STD_ERROR_HANDLE: DWORD = -12i32 as DWORD; + extern "system" { + fn GetStdHandle(which: DWORD) -> HANDLE; + fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: *mut DWORD) -> BOOL; + } + unsafe { + let handle = GetStdHandle(STD_ERROR_HANDLE); + let mut out = 0; + GetConsoleMode(handle, &mut out) != 0 + } +} + +pub type BufferedStderr = term::Terminal + Send; + +pub enum Destination { + Terminal(Box), + BufferedTerminal(Box), + Raw(Box), +} + +/// Buffered writer gives us a way on Unix to buffer up an entire error message before we output +/// it. This helps to prevent interleaving of multiple error messages when multiple compiler +/// processes error simultaneously +pub struct BufferedWriter { + buffer: Vec, +} + +impl BufferedWriter { + // note: we use _new because the conditional compilation at its use site may make this + // this function unused on some platforms + fn _new() -> BufferedWriter { + BufferedWriter { buffer: vec![] } + } +} + +impl Write for BufferedWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + for b in buf { + self.buffer.push(*b); + } + Ok(buf.len()) + } + fn flush(&mut self) -> io::Result<()> { + let mut stderr = io::stderr(); + let result = (|| { + stderr.write_all(&self.buffer)?; + stderr.flush() + })(); + self.buffer.clear(); + result + } +} + +impl Destination { + #[cfg(not(windows))] + /// When not on Windows, prefer the buffered terminal so that we can buffer an entire error + /// to be emitted at one time. + fn from_stderr() -> Destination { + let stderr: Option> = + term::TerminfoTerminal::new(BufferedWriter::_new()) + .map(|t| Box::new(t) as Box); + + match stderr { + Some(t) => BufferedTerminal(t), + None => Raw(Box::new(io::stderr())), + } + } + + #[cfg(windows)] + /// Return a normal, unbuffered terminal when on Windows. + fn from_stderr() -> Destination { + let stderr: Option> = term::TerminfoTerminal::new(io::stderr()) + .map(|t| Box::new(t) as Box) + .or_else(|| { + term::WinConsole::new(io::stderr()) + .ok() + .map(|t| Box::new(t) as Box) + }); + + match stderr { + Some(t) => Terminal(t), + None => Raw(Box::new(io::stderr())), + } + } + + fn apply_style(&mut self, lvl: Level, style: Style) -> io::Result<()> { + match style { + Style::FileNameStyle | Style::LineAndColumn => {} + Style::LineNumber => { + self.start_attr(term::Attr::Bold)?; + if cfg!(windows) { + self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_CYAN))?; + } else { + self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_BLUE))?; + } + } + Style::ErrorCode => { + self.start_attr(term::Attr::Bold)?; + self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_MAGENTA))?; + } + Style::Quotation => {} + Style::OldSchoolNote => { + self.start_attr(term::Attr::Bold)?; + self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_GREEN))?; + } + Style::OldSchoolNoteText | Style::HeaderMsg => { + self.start_attr(term::Attr::Bold)?; + if cfg!(windows) { + self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_WHITE))?; + } + } + Style::UnderlinePrimary | Style::LabelPrimary => { + self.start_attr(term::Attr::Bold)?; + self.start_attr(term::Attr::ForegroundColor(lvl.color()))?; + } + Style::UnderlineSecondary | + Style::LabelSecondary => { + self.start_attr(term::Attr::Bold)?; + if cfg!(windows) { + self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_CYAN))?; + } else { + self.start_attr(term::Attr::ForegroundColor(term::color::BRIGHT_BLUE))?; + } + } + Style::NoStyle => {} + Style::Level(l) => { + self.start_attr(term::Attr::Bold)?; + self.start_attr(term::Attr::ForegroundColor(l.color()))?; + } + } + Ok(()) + } + + fn start_attr(&mut self, attr: term::Attr) -> io::Result<()> { + match *self { + Terminal(ref mut t) => { + t.attr(attr)?; + } + BufferedTerminal(ref mut t) => { + t.attr(attr)?; + } + Raw(_) => {} + } + Ok(()) + } + + fn reset_attrs(&mut self) -> io::Result<()> { + match *self { + Terminal(ref mut t) => { + t.reset()?; + } + BufferedTerminal(ref mut t) => { + t.reset()?; + } + Raw(_) => {} + } + Ok(()) + } +} + +impl Write for Destination { + fn write(&mut self, bytes: &[u8]) -> io::Result { + match *self { + Terminal(ref mut t) => t.write(bytes), + BufferedTerminal(ref mut t) => t.write(bytes), + Raw(ref mut w) => w.write(bytes), + } + } + fn flush(&mut self) -> io::Result<()> { + match *self { + Terminal(ref mut t) => t.flush(), + BufferedTerminal(ref mut t) => t.flush(), + Raw(ref mut w) => w.flush(), + } + } +} diff --git a/src/librustc_errors/lib.rs b/src/librustc_errors/lib.rs new file mode 100644 index 0000000000000..d7c15f550e040 --- /dev/null +++ b/src/librustc_errors/lib.rs @@ -0,0 +1,543 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![crate_name = "rustc_errors"] +#![unstable(feature = "rustc_private", issue = "27812")] +#![crate_type = "dylib"] +#![crate_type = "rlib"] +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] + +#![feature(custom_attribute)] +#![allow(unused_attributes)] +#![feature(rustc_private)] +#![feature(staged_api)] +#![feature(range_contains)] +#![feature(libc)] +#![feature(unicode)] + +extern crate serialize; +extern crate term; +#[macro_use] +extern crate log; +#[macro_use] +extern crate libc; +extern crate rustc_unicode; +extern crate serialize as rustc_serialize; // used by deriving +extern crate syntax_pos; + +pub use emitter::ColorConfig; + +use self::Level::*; + +use emitter::{Emitter, EmitterWriter}; + +use std::cell::{RefCell, Cell}; +use std::{error, fmt}; +use std::rc::Rc; + +pub mod diagnostic; +pub mod diagnostic_builder; +pub mod emitter; +pub mod snippet; +pub mod registry; +pub mod styled_buffer; +mod lock; + +use syntax_pos::{BytePos, Loc, FileLinesResult, FileName, MultiSpan, Span, NO_EXPANSION}; +use syntax_pos::MacroBacktrace; + +#[derive(Clone, Debug, PartialEq)] +pub enum RenderSpan { + /// A FullSpan renders with both with an initial line for the + /// message, prefixed by file:linenum, followed by a summary of + /// the source code covered by the span. + FullSpan(MultiSpan), + + /// A suggestion renders with both with an initial line for the + /// message, prefixed by file:linenum, followed by a summary + /// of hypothetical source code, where each `String` is spliced + /// into the lines in place of the code covered by each span. + Suggestion(CodeSuggestion), +} + +#[derive(Clone, Debug, PartialEq)] +pub struct CodeSuggestion { + pub msp: MultiSpan, + pub substitutes: Vec, +} + +pub trait CodeMapper { + fn lookup_char_pos(&self, pos: BytePos) -> Loc; + fn span_to_lines(&self, sp: Span) -> FileLinesResult; + fn span_to_string(&self, sp: Span) -> String; + fn span_to_filename(&self, sp: Span) -> FileName; + fn macro_backtrace(&self, span: Span) -> Vec; + fn merge_spans(&self, sp_lhs: Span, sp_rhs: Span) -> Option; +} + +impl CodeSuggestion { + /// Returns the assembled code suggestion. + pub fn splice_lines(&self, cm: &CodeMapper) -> String { + use syntax_pos::{CharPos, Loc, Pos}; + + fn push_trailing(buf: &mut String, + line_opt: Option<&str>, + lo: &Loc, + hi_opt: Option<&Loc>) { + let (lo, hi_opt) = (lo.col.to_usize(), hi_opt.map(|hi| hi.col.to_usize())); + if let Some(line) = line_opt { + if line.len() > lo { + buf.push_str(match hi_opt { + Some(hi) => &line[lo..hi], + None => &line[lo..], + }); + } + if let None = hi_opt { + buf.push('\n'); + } + } + } + + let mut primary_spans = self.msp.primary_spans().to_owned(); + + assert_eq!(primary_spans.len(), self.substitutes.len()); + if primary_spans.is_empty() { + return format!(""); + } + + // Assumption: all spans are in the same file, and all spans + // are disjoint. Sort in ascending order. + primary_spans.sort_by_key(|sp| sp.lo); + + // Find the bounding span. + let lo = primary_spans.iter().map(|sp| sp.lo).min().unwrap(); + let hi = primary_spans.iter().map(|sp| sp.hi).min().unwrap(); + let bounding_span = Span { + lo: lo, + hi: hi, + expn_id: NO_EXPANSION, + }; + let lines = cm.span_to_lines(bounding_span).unwrap(); + assert!(!lines.lines.is_empty()); + + // To build up the result, we do this for each span: + // - push the line segment trailing the previous span + // (at the beginning a "phantom" span pointing at the start of the line) + // - push lines between the previous and current span (if any) + // - if the previous and current span are not on the same line + // push the line segment leading up to the current span + // - splice in the span substitution + // + // Finally push the trailing line segment of the last span + let fm = &lines.file; + let mut prev_hi = cm.lookup_char_pos(bounding_span.lo); + prev_hi.col = CharPos::from_usize(0); + + let mut prev_line = fm.get_line(lines.lines[0].line_index); + let mut buf = String::new(); + + for (sp, substitute) in primary_spans.iter().zip(self.substitutes.iter()) { + let cur_lo = cm.lookup_char_pos(sp.lo); + if prev_hi.line == cur_lo.line { + push_trailing(&mut buf, prev_line, &prev_hi, Some(&cur_lo)); + } else { + push_trailing(&mut buf, prev_line, &prev_hi, None); + // push lines between the previous and current span (if any) + for idx in prev_hi.line..(cur_lo.line - 1) { + if let Some(line) = fm.get_line(idx) { + buf.push_str(line); + buf.push('\n'); + } + } + if let Some(cur_line) = fm.get_line(cur_lo.line - 1) { + buf.push_str(&cur_line[..cur_lo.col.to_usize()]); + } + } + buf.push_str(substitute); + prev_hi = cm.lookup_char_pos(sp.hi); + prev_line = fm.get_line(prev_hi.line - 1); + } + push_trailing(&mut buf, prev_line, &prev_hi, None); + // remove trailing newline + buf.pop(); + buf + } +} + +/// Used as a return value to signify a fatal error occurred. (It is also +/// used as the argument to panic at the moment, but that will eventually +/// not be true.) +#[derive(Copy, Clone, Debug)] +#[must_use] +pub struct FatalError; + +impl fmt::Display for FatalError { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(f, "parser fatal error") + } +} + +impl error::Error for FatalError { + fn description(&self) -> &str { + "The parser has encountered a fatal error" + } +} + +/// Signifies that the compiler died with an explicit call to `.bug` +/// or `.span_bug` rather than a failed assertion, etc. +#[derive(Copy, Clone, Debug)] +pub struct ExplicitBug; + +impl fmt::Display for ExplicitBug { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + write!(f, "parser internal bug") + } +} + +impl error::Error for ExplicitBug { + fn description(&self) -> &str { + "The parser has encountered an internal bug" + } +} + +pub use diagnostic::{Diagnostic, SubDiagnostic}; +pub use diagnostic_builder::DiagnosticBuilder; + +/// A handler deals with errors; certain errors +/// (fatal, bug, unimpl) may cause immediate exit, +/// others log errors for later reporting. +pub struct Handler { + err_count: Cell, + emitter: RefCell>, + pub can_emit_warnings: bool, + treat_err_as_bug: bool, + continue_after_error: Cell, + delayed_span_bug: RefCell>, +} + +impl Handler { + pub fn with_tty_emitter(color_config: ColorConfig, + can_emit_warnings: bool, + treat_err_as_bug: bool, + cm: Option>) + -> Handler { + let emitter = Box::new(EmitterWriter::stderr(color_config, cm)); + Handler::with_emitter(can_emit_warnings, treat_err_as_bug, emitter) + } + + pub fn with_emitter(can_emit_warnings: bool, + treat_err_as_bug: bool, + e: Box) + -> Handler { + Handler { + err_count: Cell::new(0), + emitter: RefCell::new(e), + can_emit_warnings: can_emit_warnings, + treat_err_as_bug: treat_err_as_bug, + continue_after_error: Cell::new(true), + delayed_span_bug: RefCell::new(None), + } + } + + pub fn set_continue_after_error(&self, continue_after_error: bool) { + self.continue_after_error.set(continue_after_error); + } + + pub fn struct_dummy<'a>(&'a self) -> DiagnosticBuilder<'a> { + DiagnosticBuilder::new(self, Level::Cancelled, "") + } + + pub fn struct_span_warn<'a, S: Into>(&'a self, + sp: S, + msg: &str) + -> DiagnosticBuilder<'a> { + let mut result = DiagnosticBuilder::new(self, Level::Warning, msg); + result.set_span(sp); + if !self.can_emit_warnings { + result.cancel(); + } + result + } + pub fn struct_span_warn_with_code<'a, S: Into>(&'a self, + sp: S, + msg: &str, + code: &str) + -> DiagnosticBuilder<'a> { + let mut result = DiagnosticBuilder::new(self, Level::Warning, msg); + result.set_span(sp); + result.code(code.to_owned()); + if !self.can_emit_warnings { + result.cancel(); + } + result + } + pub fn struct_warn<'a>(&'a self, msg: &str) -> DiagnosticBuilder<'a> { + let mut result = DiagnosticBuilder::new(self, Level::Warning, msg); + if !self.can_emit_warnings { + result.cancel(); + } + result + } + pub fn struct_span_err<'a, S: Into>(&'a self, + sp: S, + msg: &str) + -> DiagnosticBuilder<'a> { + let mut result = DiagnosticBuilder::new(self, Level::Error, msg); + result.set_span(sp); + result + } + pub fn struct_span_err_with_code<'a, S: Into>(&'a self, + sp: S, + msg: &str, + code: &str) + -> DiagnosticBuilder<'a> { + let mut result = DiagnosticBuilder::new(self, Level::Error, msg); + result.set_span(sp); + result.code(code.to_owned()); + result + } + pub fn struct_err<'a>(&'a self, msg: &str) -> DiagnosticBuilder<'a> { + DiagnosticBuilder::new(self, Level::Error, msg) + } + pub fn struct_span_fatal<'a, S: Into>(&'a self, + sp: S, + msg: &str) + -> DiagnosticBuilder<'a> { + let mut result = DiagnosticBuilder::new(self, Level::Fatal, msg); + result.set_span(sp); + result + } + pub fn struct_span_fatal_with_code<'a, S: Into>(&'a self, + sp: S, + msg: &str, + code: &str) + -> DiagnosticBuilder<'a> { + let mut result = DiagnosticBuilder::new(self, Level::Fatal, msg); + result.set_span(sp); + result.code(code.to_owned()); + result + } + pub fn struct_fatal<'a>(&'a self, msg: &str) -> DiagnosticBuilder<'a> { + DiagnosticBuilder::new(self, Level::Fatal, msg) + } + + pub fn cancel(&self, err: &mut DiagnosticBuilder) { + err.cancel(); + } + + fn panic_if_treat_err_as_bug(&self) { + if self.treat_err_as_bug { + panic!("encountered error with `-Z treat_err_as_bug"); + } + } + + pub fn span_fatal>(&self, sp: S, msg: &str) -> FatalError { + self.emit(&sp.into(), msg, Fatal); + self.panic_if_treat_err_as_bug(); + return FatalError; + } + pub fn span_fatal_with_code>(&self, + sp: S, + msg: &str, + code: &str) + -> FatalError { + self.emit_with_code(&sp.into(), msg, code, Fatal); + self.panic_if_treat_err_as_bug(); + return FatalError; + } + pub fn span_err>(&self, sp: S, msg: &str) { + self.emit(&sp.into(), msg, Error); + self.panic_if_treat_err_as_bug(); + } + pub fn mut_span_err<'a, S: Into>(&'a self, + sp: S, + msg: &str) + -> DiagnosticBuilder<'a> { + let mut result = DiagnosticBuilder::new(self, Level::Error, msg); + result.set_span(sp); + result + } + pub fn span_err_with_code>(&self, sp: S, msg: &str, code: &str) { + self.emit_with_code(&sp.into(), msg, code, Error); + self.panic_if_treat_err_as_bug(); + } + pub fn span_warn>(&self, sp: S, msg: &str) { + self.emit(&sp.into(), msg, Warning); + } + pub fn span_warn_with_code>(&self, sp: S, msg: &str, code: &str) { + self.emit_with_code(&sp.into(), msg, code, Warning); + } + pub fn span_bug>(&self, sp: S, msg: &str) -> ! { + self.emit(&sp.into(), msg, Bug); + panic!(ExplicitBug); + } + pub fn delay_span_bug>(&self, sp: S, msg: &str) { + let mut delayed = self.delayed_span_bug.borrow_mut(); + *delayed = Some((sp.into(), msg.to_string())); + } + pub fn span_bug_no_panic>(&self, sp: S, msg: &str) { + self.emit(&sp.into(), msg, Bug); + } + pub fn span_note_without_error>(&self, sp: S, msg: &str) { + self.emit(&sp.into(), msg, Note); + } + pub fn span_unimpl>(&self, sp: S, msg: &str) -> ! { + self.span_bug(sp, &format!("unimplemented {}", msg)); + } + pub fn fatal(&self, msg: &str) -> FatalError { + if self.treat_err_as_bug { + self.bug(msg); + } + let mut db = DiagnosticBuilder::new(self, Fatal, msg); + db.emit(); + FatalError + } + pub fn err(&self, msg: &str) { + if self.treat_err_as_bug { + self.bug(msg); + } + let mut db = DiagnosticBuilder::new(self, Error, msg); + db.emit(); + } + pub fn warn(&self, msg: &str) { + let mut db = DiagnosticBuilder::new(self, Warning, msg); + db.emit(); + } + pub fn note_without_error(&self, msg: &str) { + let mut db = DiagnosticBuilder::new(self, Note, msg); + db.emit(); + } + pub fn bug(&self, msg: &str) -> ! { + let mut db = DiagnosticBuilder::new(self, Bug, msg); + db.emit(); + panic!(ExplicitBug); + } + pub fn unimpl(&self, msg: &str) -> ! { + self.bug(&format!("unimplemented {}", msg)); + } + + pub fn bump_err_count(&self) { + self.err_count.set(self.err_count.get() + 1); + } + + pub fn err_count(&self) -> usize { + self.err_count.get() + } + + pub fn has_errors(&self) -> bool { + self.err_count.get() > 0 + } + pub fn abort_if_errors(&self) { + let s; + match self.err_count.get() { + 0 => { + let delayed_bug = self.delayed_span_bug.borrow(); + match *delayed_bug { + Some((ref span, ref errmsg)) => { + self.span_bug(span.clone(), errmsg); + } + _ => {} + } + + return; + } + 1 => s = "aborting due to previous error".to_string(), + _ => { + s = format!("aborting due to {} previous errors", self.err_count.get()); + } + } + + panic!(self.fatal(&s)); + } + pub fn emit(&self, msp: &MultiSpan, msg: &str, lvl: Level) { + if lvl == Warning && !self.can_emit_warnings { + return; + } + let mut db = DiagnosticBuilder::new(self, lvl, msg); + db.set_span(msp.clone()); + db.emit(); + if !self.continue_after_error.get() { + self.abort_if_errors(); + } + } + pub fn emit_with_code(&self, msp: &MultiSpan, msg: &str, code: &str, lvl: Level) { + if lvl == Warning && !self.can_emit_warnings { + return; + } + let mut db = DiagnosticBuilder::new_with_code(self, lvl, Some(code.to_owned()), msg); + db.set_span(msp.clone()); + db.emit(); + if !self.continue_after_error.get() { + self.abort_if_errors(); + } + } +} + + +#[derive(Copy, PartialEq, Clone, Debug)] +pub enum Level { + Bug, + Fatal, + // An error which while not immediately fatal, should stop the compiler + // progressing beyond the current phase. + PhaseFatal, + Error, + Warning, + Note, + Help, + Cancelled, +} + +impl fmt::Display for Level { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.to_str().fmt(f) + } +} + +impl Level { + pub fn color(self) -> term::color::Color { + match self { + Bug | Fatal | PhaseFatal | Error => term::color::BRIGHT_RED, + Warning => { + if cfg!(windows) { + term::color::BRIGHT_YELLOW + } else { + term::color::YELLOW + } + } + Note => term::color::BRIGHT_GREEN, + Help => term::color::BRIGHT_CYAN, + Cancelled => unreachable!(), + } + } + + pub fn to_str(self) -> &'static str { + match self { + Bug => "error: internal compiler error", + Fatal | PhaseFatal | Error => "error", + Warning => "warning", + Note => "note", + Help => "help", + Cancelled => panic!("Shouldn't call on cancelled error"), + } + } +} + +pub fn expect(diag: &Handler, opt: Option, msg: M) -> T + where M: FnOnce() -> String +{ + match opt { + Some(t) => t, + None => diag.bug(&msg()), + } +} diff --git a/src/librustc_errors/lock.rs b/src/librustc_errors/lock.rs new file mode 100644 index 0000000000000..4c298228c37c7 --- /dev/null +++ b/src/librustc_errors/lock.rs @@ -0,0 +1,115 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Bindings to acquire a global named lock. +//! +//! This is intended to be used to synchronize multiple compiler processes to +//! ensure that we can output complete errors without interleaving on Windows. +//! Note that this is currently only needed for allowing only one 32-bit MSVC +//! linker to execute at once on MSVC hosts, so this is only implemented for +//! `cfg(windows)`. Also note that this may not always be used on Windows, +//! only when targeting 32-bit MSVC. +//! +//! For more information about why this is necessary, see where this is called. + +use std::any::Any; + +#[cfg(windows)] +#[allow(bad_style)] +pub fn acquire_global_lock(name: &str) -> Box { + use std::ffi::CString; + use std::io; + + type LPSECURITY_ATTRIBUTES = *mut u8; + type BOOL = i32; + type LPCSTR = *const u8; + type HANDLE = *mut u8; + type DWORD = u32; + + const INFINITE: DWORD = !0; + const WAIT_OBJECT_0: DWORD = 0; + const WAIT_ABANDONED: DWORD = 0x00000080; + + extern "system" { + fn CreateMutexA(lpMutexAttributes: LPSECURITY_ATTRIBUTES, + bInitialOwner: BOOL, + lpName: LPCSTR) + -> HANDLE; + fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) -> DWORD; + fn ReleaseMutex(hMutex: HANDLE) -> BOOL; + fn CloseHandle(hObject: HANDLE) -> BOOL; + } + + struct Handle(HANDLE); + + impl Drop for Handle { + fn drop(&mut self) { + unsafe { + CloseHandle(self.0); + } + } + } + + struct Guard(Handle); + + impl Drop for Guard { + fn drop(&mut self) { + unsafe { + ReleaseMutex((self.0).0); + } + } + } + + let cname = CString::new(name).unwrap(); + unsafe { + // Create a named mutex, with no security attributes and also not + // acquired when we create it. + // + // This will silently create one if it doesn't already exist, or it'll + // open up a handle to one if it already exists. + let mutex = CreateMutexA(0 as *mut _, 0, cname.as_ptr() as *const u8); + if mutex.is_null() { + panic!("failed to create global mutex named `{}`: {}", + name, + io::Error::last_os_error()); + } + let mutex = Handle(mutex); + + // Acquire the lock through `WaitForSingleObject`. + // + // A return value of `WAIT_OBJECT_0` means we successfully acquired it. + // + // A return value of `WAIT_ABANDONED` means that the previous holder of + // the thread exited without calling `ReleaseMutex`. This can happen, + // for example, when the compiler crashes or is interrupted via ctrl-c + // or the like. In this case, however, we are still transferred + // ownership of the lock so we continue. + // + // If an error happens.. well... that's surprising! + match WaitForSingleObject(mutex.0, INFINITE) { + WAIT_OBJECT_0 | WAIT_ABANDONED => {} + code => { + panic!("WaitForSingleObject failed on global mutex named \ + `{}`: {} (ret={:x})", + name, + io::Error::last_os_error(), + code); + } + } + + // Return a guard which will call `ReleaseMutex` when dropped. + Box::new(Guard(mutex)) + } +} + +#[cfg(unix)] +pub fn acquire_global_lock(_name: &str) -> Box { + Box::new(()) +} diff --git a/src/librustc_errors/registry.rs b/src/librustc_errors/registry.rs new file mode 100644 index 0000000000000..83737681471e2 --- /dev/null +++ b/src/librustc_errors/registry.rs @@ -0,0 +1,26 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::collections::HashMap; + +#[derive(Clone)] +pub struct Registry { + descriptions: HashMap<&'static str, &'static str>, +} + +impl Registry { + pub fn new(descriptions: &[(&'static str, &'static str)]) -> Registry { + Registry { descriptions: descriptions.iter().cloned().collect() } + } + + pub fn find_description(&self, code: &str) -> Option<&'static str> { + self.descriptions.get(code).cloned() + } +} diff --git a/src/librustc_errors/snippet.rs b/src/librustc_errors/snippet.rs new file mode 100644 index 0000000000000..b8c1726443db3 --- /dev/null +++ b/src/librustc_errors/snippet.rs @@ -0,0 +1,188 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Code for annotating snippets. + +use syntax_pos::{Span, FileMap}; +use CodeMapper; +use std::rc::Rc; +use Level; + +#[derive(Clone)] +pub struct SnippetData { + codemap: Rc, + files: Vec, +} + +#[derive(Clone)] +pub struct FileInfo { + file: Rc, + + /// The "primary file", if any, gets a `-->` marker instead of + /// `>>>`, and has a line-number/column printed and not just a + /// filename. It appears first in the listing. It is known to + /// contain at least one primary span, though primary spans (which + /// are designated with `^^^`) may also occur in other files. + primary_span: Option, + + lines: Vec, +} + +#[derive(Clone, Debug, PartialOrd, Ord, PartialEq, Eq)] +pub struct Line { + pub line_index: usize, + pub annotations: Vec, +} + + +#[derive(Clone, Debug, PartialOrd, Ord, PartialEq, Eq)] +pub struct MultilineAnnotation { + pub depth: usize, + pub line_start: usize, + pub line_end: usize, + pub start_col: usize, + pub end_col: usize, + pub is_primary: bool, + pub label: Option, +} + +impl MultilineAnnotation { + pub fn increase_depth(&mut self) { + self.depth += 1; + } + + pub fn as_start(&self) -> Annotation { + Annotation { + start_col: self.start_col, + end_col: self.start_col + 1, + is_primary: self.is_primary, + label: Some("starting here...".to_owned()), + annotation_type: AnnotationType::MultilineStart(self.depth) + } + } + + pub fn as_end(&self) -> Annotation { + Annotation { + start_col: self.end_col - 1, + end_col: self.end_col, + is_primary: self.is_primary, + label: match self.label { + Some(ref label) => Some(format!("...ending here: {}", label)), + None => Some("...ending here".to_owned()), + }, + annotation_type: AnnotationType::MultilineEnd(self.depth) + } + } + + pub fn as_line(&self) -> Annotation { + Annotation { + start_col: 0, + end_col: 0, + is_primary: self.is_primary, + label: None, + annotation_type: AnnotationType::MultilineLine(self.depth) + } + } +} + +#[derive(Clone, Debug, PartialOrd, Ord, PartialEq, Eq)] +pub enum AnnotationType { + /// Annotation under a single line of code + Singleline, + + /// Annotation under the first character of a multiline span + Minimized, + + /// Annotation enclosing the first and last character of a multiline span + Multiline(MultilineAnnotation), + + // The Multiline type above is replaced with the following three in order + // to reuse the current label drawing code. + // + // Each of these corresponds to one part of the following diagram: + // + // x | foo(1 + bar(x, + // | _________^ starting here... < MultilineStart + // x | | y), < MultilineLine + // | |______________^ ...ending here: label < MultilineEnd + // x | z); + /// Annotation marking the first character of a fully shown multiline span + MultilineStart(usize), + /// Annotation marking the last character of a fully shown multiline span + MultilineEnd(usize), + /// Line at the left enclosing the lines of a fully shown multiline span + MultilineLine(usize), +} + +#[derive(Clone, Debug, PartialOrd, Ord, PartialEq, Eq)] +pub struct Annotation { + /// Start column, 0-based indexing -- counting *characters*, not + /// utf-8 bytes. Note that it is important that this field goes + /// first, so that when we sort, we sort orderings by start + /// column. + pub start_col: usize, + + /// End column within the line (exclusive) + pub end_col: usize, + + /// Is this annotation derived from primary span + pub is_primary: bool, + + /// Optional label to display adjacent to the annotation. + pub label: Option, + + /// Is this a single line, multiline or multiline span minimized down to a + /// smaller span. + pub annotation_type: AnnotationType, +} + +impl Annotation { + pub fn is_minimized(&self) -> bool { + match self.annotation_type { + AnnotationType::Minimized => true, + _ => false, + } + } + + pub fn is_multiline(&self) -> bool { + match self.annotation_type { + AnnotationType::Multiline(_) | + AnnotationType::MultilineStart(_) | + AnnotationType::MultilineLine(_) | + AnnotationType::MultilineEnd(_) => true, + _ => false, + } + } + +} + +#[derive(Debug)] +pub struct StyledString { + pub text: String, + pub style: Style, +} + +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum Style { + HeaderMsg, + FileNameStyle, + LineAndColumn, + LineNumber, + Quotation, + UnderlinePrimary, + UnderlineSecondary, + LabelPrimary, + LabelSecondary, + OldSchoolNoteText, + OldSchoolNote, + NoStyle, + ErrorCode, + Level(Level), +} diff --git a/src/librustc_errors/styled_buffer.rs b/src/librustc_errors/styled_buffer.rs new file mode 100644 index 0000000000000..dfc7c64de0197 --- /dev/null +++ b/src/librustc_errors/styled_buffer.rs @@ -0,0 +1,145 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Code for creating styled buffers + +use snippet::{Style, StyledString}; + +#[derive(Debug)] +pub struct StyledBuffer { + text: Vec>, + styles: Vec>, +} + +impl StyledBuffer { + pub fn new() -> StyledBuffer { + StyledBuffer { + text: vec![], + styles: vec![], + } + } + + pub fn copy_tabs(&mut self, row: usize) { + if row < self.text.len() { + for i in row + 1..self.text.len() { + for j in 0..self.text[i].len() { + if self.text[row].len() > j && self.text[row][j] == '\t' && + self.text[i][j] == ' ' { + self.text[i][j] = '\t'; + } + } + } + } + } + + pub fn render(&mut self) -> Vec> { + let mut output: Vec> = vec![]; + let mut styled_vec: Vec = vec![]; + + // before we render, do a little patch-up work to support tabs + self.copy_tabs(3); + + for (row, row_style) in self.text.iter().zip(&self.styles) { + let mut current_style = Style::NoStyle; + let mut current_text = String::new(); + + for (&c, &s) in row.iter().zip(row_style) { + if s != current_style { + if !current_text.is_empty() { + styled_vec.push(StyledString { + text: current_text, + style: current_style, + }); + } + current_style = s; + current_text = String::new(); + } + current_text.push(c); + } + if !current_text.is_empty() { + styled_vec.push(StyledString { + text: current_text, + style: current_style, + }); + } + + // We're done with the row, push and keep going + output.push(styled_vec); + + styled_vec = vec![]; + } + + output + } + + fn ensure_lines(&mut self, line: usize) { + while line >= self.text.len() { + self.text.push(vec![]); + self.styles.push(vec![]); + } + } + + pub fn putc(&mut self, line: usize, col: usize, chr: char, style: Style) { + self.ensure_lines(line); + if col < self.text[line].len() { + self.text[line][col] = chr; + self.styles[line][col] = style; + } else { + let mut i = self.text[line].len(); + while i < col { + self.text[line].push(' '); + self.styles[line].push(Style::NoStyle); + i += 1; + } + self.text[line].push(chr); + self.styles[line].push(style); + } + } + + pub fn puts(&mut self, line: usize, col: usize, string: &str, style: Style) { + let mut n = col; + for c in string.chars() { + self.putc(line, n, c, style); + n += 1; + } + } + + pub fn set_style(&mut self, line: usize, col: usize, style: Style) { + if self.styles.len() > line && self.styles[line].len() > col { + self.styles[line][col] = style; + } + } + + pub fn prepend(&mut self, line: usize, string: &str, style: Style) { + self.ensure_lines(line); + let string_len = string.len(); + + // Push the old content over to make room for new content + for _ in 0..string_len { + self.styles[line].insert(0, Style::NoStyle); + self.text[line].insert(0, ' '); + } + + self.puts(line, 0, string, style); + } + + pub fn append(&mut self, line: usize, string: &str, style: Style) { + if line >= self.text.len() { + self.puts(line, 0, string, style); + } else { + let col = self.text[line].len(); + self.puts(line, col, string, style); + } + } + + pub fn num_lines(&self) -> usize { + self.text.len() + } +} diff --git a/src/librustc_front/fold.rs b/src/librustc_front/fold.rs deleted file mode 100644 index e456b1eadf5d7..0000000000000 --- a/src/librustc_front/fold.rs +++ /dev/null @@ -1,1176 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A Folder represents an HIR->HIR fold; it accepts a HIR piece, -//! and returns a piece of the same type. - -use hir::*; -use syntax::ast::{Name, NodeId, DUMMY_NODE_ID, Attribute, Attribute_, MetaItem}; -use syntax::ast::{MetaWord, MetaList, MetaNameValue}; -use syntax::attr::ThinAttributesExt; -use hir; -use syntax::codemap::{respan, Span, Spanned}; -use syntax::ptr::P; -use syntax::parse::token; -use syntax::util::move_map::MoveMap; - -pub trait Folder : Sized { - // Any additions to this trait should happen in form - // of a call to a public `noop_*` function that only calls - // out to the folder again, not other `noop_*` functions. - // - // This is a necessary API workaround to the problem of not - // being able to call out to the super default method - // in an overridden default method. - - fn fold_crate(&mut self, c: Crate) -> Crate { - noop_fold_crate(c, self) - } - - fn fold_meta_items(&mut self, meta_items: HirVec>) -> HirVec> { - noop_fold_meta_items(meta_items, self) - } - - fn fold_meta_item(&mut self, meta_item: P) -> P { - noop_fold_meta_item(meta_item, self) - } - - fn fold_view_path(&mut self, view_path: P) -> P { - noop_fold_view_path(view_path, self) - } - - fn fold_foreign_item(&mut self, ni: ForeignItem) -> ForeignItem { - noop_fold_foreign_item(ni, self) - } - - fn fold_item(&mut self, i: Item) -> Item { - noop_fold_item(i, self) - } - - fn fold_item_id(&mut self, i: ItemId) -> ItemId { - noop_fold_item_id(i, self) - } - - fn fold_struct_field(&mut self, sf: StructField) -> StructField { - noop_fold_struct_field(sf, self) - } - - fn fold_item_underscore(&mut self, i: Item_) -> Item_ { - noop_fold_item_underscore(i, self) - } - - fn fold_trait_item(&mut self, i: TraitItem) -> TraitItem { - noop_fold_trait_item(i, self) - } - - fn fold_impl_item(&mut self, i: ImplItem) -> ImplItem { - noop_fold_impl_item(i, self) - } - - fn fold_fn_decl(&mut self, d: P) -> P { - noop_fold_fn_decl(d, self) - } - - fn fold_block(&mut self, b: P) -> P { - noop_fold_block(b, self) - } - - fn fold_stmt(&mut self, s: Stmt) -> Stmt { - noop_fold_stmt(s, self) - } - - fn fold_arm(&mut self, a: Arm) -> Arm { - noop_fold_arm(a, self) - } - - fn fold_pat(&mut self, p: P) -> P { - noop_fold_pat(p, self) - } - - fn fold_decl(&mut self, d: P) -> P { - noop_fold_decl(d, self) - } - - fn fold_expr(&mut self, e: P) -> P { - e.map(|e| noop_fold_expr(e, self)) - } - - fn fold_ty(&mut self, t: P) -> P { - noop_fold_ty(t, self) - } - - fn fold_ty_binding(&mut self, t: TypeBinding) -> TypeBinding { - noop_fold_ty_binding(t, self) - } - - fn fold_mod(&mut self, m: Mod) -> Mod { - noop_fold_mod(m, self) - } - - fn fold_foreign_mod(&mut self, nm: ForeignMod) -> ForeignMod { - noop_fold_foreign_mod(nm, self) - } - - fn fold_variant(&mut self, v: Variant) -> Variant { - noop_fold_variant(v, self) - } - - fn fold_name(&mut self, n: Name) -> Name { - noop_fold_name(n, self) - } - - fn fold_ident(&mut self, i: Ident) -> Ident { - noop_fold_ident(i, self) - } - - fn fold_usize(&mut self, i: usize) -> usize { - noop_fold_usize(i, self) - } - - fn fold_path(&mut self, p: Path) -> Path { - noop_fold_path(p, self) - } - - fn fold_path_parameters(&mut self, p: PathParameters) -> PathParameters { - noop_fold_path_parameters(p, self) - } - - fn fold_angle_bracketed_parameter_data(&mut self, - p: AngleBracketedParameterData) - -> AngleBracketedParameterData { - noop_fold_angle_bracketed_parameter_data(p, self) - } - - fn fold_parenthesized_parameter_data(&mut self, - p: ParenthesizedParameterData) - -> ParenthesizedParameterData { - noop_fold_parenthesized_parameter_data(p, self) - } - - fn fold_local(&mut self, l: P) -> P { - noop_fold_local(l, self) - } - - fn fold_explicit_self(&mut self, es: ExplicitSelf) -> ExplicitSelf { - noop_fold_explicit_self(es, self) - } - - fn fold_explicit_self_underscore(&mut self, es: ExplicitSelf_) -> ExplicitSelf_ { - noop_fold_explicit_self_underscore(es, self) - } - - fn fold_lifetime(&mut self, l: Lifetime) -> Lifetime { - noop_fold_lifetime(l, self) - } - - fn fold_lifetime_def(&mut self, l: LifetimeDef) -> LifetimeDef { - noop_fold_lifetime_def(l, self) - } - - fn fold_attribute(&mut self, at: Attribute) -> Option { - noop_fold_attribute(at, self) - } - - fn fold_arg(&mut self, a: Arg) -> Arg { - noop_fold_arg(a, self) - } - - fn fold_generics(&mut self, generics: Generics) -> Generics { - noop_fold_generics(generics, self) - } - - fn fold_trait_ref(&mut self, p: TraitRef) -> TraitRef { - noop_fold_trait_ref(p, self) - } - - fn fold_poly_trait_ref(&mut self, p: PolyTraitRef) -> PolyTraitRef { - noop_fold_poly_trait_ref(p, self) - } - - fn fold_variant_data(&mut self, vdata: VariantData) -> VariantData { - noop_fold_variant_data(vdata, self) - } - - fn fold_lifetimes(&mut self, lts: HirVec) -> HirVec { - noop_fold_lifetimes(lts, self) - } - - fn fold_lifetime_defs(&mut self, lts: HirVec) -> HirVec { - noop_fold_lifetime_defs(lts, self) - } - - fn fold_ty_param(&mut self, tp: TyParam) -> TyParam { - noop_fold_ty_param(tp, self) - } - - fn fold_ty_params(&mut self, tps: HirVec) -> HirVec { - noop_fold_ty_params(tps, self) - } - - fn fold_opt_lifetime(&mut self, o_lt: Option) -> Option { - noop_fold_opt_lifetime(o_lt, self) - } - - fn fold_opt_bounds(&mut self, - b: Option) - -> Option { - noop_fold_opt_bounds(b, self) - } - - fn fold_bounds(&mut self, b: TyParamBounds) -> TyParamBounds { - noop_fold_bounds(b, self) - } - - fn fold_ty_param_bound(&mut self, tpb: TyParamBound) -> TyParamBound { - noop_fold_ty_param_bound(tpb, self) - } - - fn fold_mt(&mut self, mt: MutTy) -> MutTy { - noop_fold_mt(mt, self) - } - - fn fold_field(&mut self, field: Field) -> Field { - noop_fold_field(field, self) - } - - fn fold_where_clause(&mut self, where_clause: WhereClause) -> WhereClause { - noop_fold_where_clause(where_clause, self) - } - - fn fold_where_predicate(&mut self, where_predicate: WherePredicate) -> WherePredicate { - noop_fold_where_predicate(where_predicate, self) - } - - /// called for the `id` on each declaration - fn new_id(&mut self, i: NodeId) -> NodeId { - i - } - - /// called for ids that are references (e.g., ItemDef) - fn map_id(&mut self, i: NodeId) -> NodeId { - i - } - - fn new_span(&mut self, sp: Span) -> Span { - sp - } -} - -pub fn noop_fold_meta_items(meta_items: HirVec>, - fld: &mut T) - -> HirVec> { - meta_items.move_map(|x| fld.fold_meta_item(x)) -} - -pub fn noop_fold_view_path(view_path: P, fld: &mut T) -> P { - view_path.map(|Spanned { node, span }| { - Spanned { - node: match node { - ViewPathSimple(name, path) => { - ViewPathSimple(name, fld.fold_path(path)) - } - ViewPathGlob(path) => { - ViewPathGlob(fld.fold_path(path)) - } - ViewPathList(path, path_list_idents) => { - ViewPathList(fld.fold_path(path), - path_list_idents.move_map(|path_list_ident| { - Spanned { - node: match path_list_ident.node { - PathListIdent { id, name, rename } => PathListIdent { - id: fld.new_id(id), - name: name, - rename: rename, - }, - PathListMod { id, rename } => PathListMod { - id: fld.new_id(id), - rename: rename, - }, - }, - span: fld.new_span(path_list_ident.span), - } - })) - } - }, - span: fld.new_span(span), - } - }) -} - -pub fn fold_attrs(attrs: HirVec, fld: &mut T) -> HirVec { - attrs.move_flat_map(|x| fld.fold_attribute(x)) -} - -pub fn noop_fold_arm(Arm { attrs, pats, guard, body }: Arm, fld: &mut T) -> Arm { - Arm { - attrs: fold_attrs(attrs, fld), - pats: pats.move_map(|x| fld.fold_pat(x)), - guard: guard.map(|x| fld.fold_expr(x)), - body: fld.fold_expr(body), - } -} - -pub fn noop_fold_decl(d: P, fld: &mut T) -> P { - d.map(|Spanned { node, span }| { - match node { - DeclLocal(l) => Spanned { - node: DeclLocal(fld.fold_local(l)), - span: fld.new_span(span), - }, - DeclItem(it) => Spanned { - node: DeclItem(fld.fold_item_id(it)), - span: fld.new_span(span), - }, - } - }) -} - -pub fn noop_fold_ty_binding(b: TypeBinding, fld: &mut T) -> TypeBinding { - TypeBinding { - id: fld.new_id(b.id), - name: b.name, - ty: fld.fold_ty(b.ty), - span: fld.new_span(b.span), - } -} - -pub fn noop_fold_ty(t: P, fld: &mut T) -> P { - t.map(|Ty { id, node, span }| { - Ty { - id: fld.new_id(id), - node: match node { - TyInfer => node, - TyVec(ty) => TyVec(fld.fold_ty(ty)), - TyPtr(mt) => TyPtr(fld.fold_mt(mt)), - TyRptr(region, mt) => { - TyRptr(fld.fold_opt_lifetime(region), fld.fold_mt(mt)) - } - TyBareFn(f) => { - TyBareFn(f.map(|BareFnTy { lifetimes, unsafety, abi, decl }| { - BareFnTy { - lifetimes: fld.fold_lifetime_defs(lifetimes), - unsafety: unsafety, - abi: abi, - decl: fld.fold_fn_decl(decl), - } - })) - } - TyTup(tys) => TyTup(tys.move_map(|ty| fld.fold_ty(ty))), - TyPath(qself, path) => { - let qself = qself.map(|QSelf { ty, position }| { - QSelf { - ty: fld.fold_ty(ty), - position: position, - } - }); - TyPath(qself, fld.fold_path(path)) - } - TyObjectSum(ty, bounds) => { - TyObjectSum(fld.fold_ty(ty), fld.fold_bounds(bounds)) - } - TyFixedLengthVec(ty, e) => { - TyFixedLengthVec(fld.fold_ty(ty), fld.fold_expr(e)) - } - TyTypeof(expr) => { - TyTypeof(fld.fold_expr(expr)) - } - TyPolyTraitRef(bounds) => { - TyPolyTraitRef(bounds.move_map(|b| fld.fold_ty_param_bound(b))) - } - }, - span: fld.new_span(span), - } - }) -} - -pub fn noop_fold_foreign_mod(ForeignMod { abi, items }: ForeignMod, - fld: &mut T) - -> ForeignMod { - ForeignMod { - abi: abi, - items: items.move_map(|x| fld.fold_foreign_item(x)), - } -} - -pub fn noop_fold_variant(v: Variant, fld: &mut T) -> Variant { - Spanned { - node: Variant_ { - name: v.node.name, - attrs: fold_attrs(v.node.attrs, fld), - data: fld.fold_variant_data(v.node.data), - disr_expr: v.node.disr_expr.map(|e| fld.fold_expr(e)), - }, - span: fld.new_span(v.span), - } -} - -pub fn noop_fold_name(n: Name, _: &mut T) -> Name { - n -} - -pub fn noop_fold_ident(i: Ident, _: &mut T) -> Ident { - i -} - -pub fn noop_fold_usize(i: usize, _: &mut T) -> usize { - i -} - -pub fn noop_fold_path(Path { global, segments, span }: Path, fld: &mut T) -> Path { - Path { - global: global, - segments: segments.move_map(|PathSegment { identifier, parameters }| { - PathSegment { - identifier: fld.fold_ident(identifier), - parameters: fld.fold_path_parameters(parameters), - } - }), - span: fld.new_span(span), - } -} - -pub fn noop_fold_path_parameters(path_parameters: PathParameters, - fld: &mut T) - -> PathParameters { - match path_parameters { - AngleBracketedParameters(data) => - AngleBracketedParameters(fld.fold_angle_bracketed_parameter_data(data)), - ParenthesizedParameters(data) => - ParenthesizedParameters(fld.fold_parenthesized_parameter_data(data)), - } -} - -pub fn noop_fold_angle_bracketed_parameter_data(data: AngleBracketedParameterData, - fld: &mut T) - -> AngleBracketedParameterData { - let AngleBracketedParameterData { lifetimes, types, bindings } = data; - AngleBracketedParameterData { - lifetimes: fld.fold_lifetimes(lifetimes), - types: types.move_map(|ty| fld.fold_ty(ty)), - bindings: bindings.move_map(|b| fld.fold_ty_binding(b)), - } -} - -pub fn noop_fold_parenthesized_parameter_data(data: ParenthesizedParameterData, - fld: &mut T) - -> ParenthesizedParameterData { - let ParenthesizedParameterData { inputs, output, span } = data; - ParenthesizedParameterData { - inputs: inputs.move_map(|ty| fld.fold_ty(ty)), - output: output.map(|ty| fld.fold_ty(ty)), - span: fld.new_span(span), - } -} - -pub fn noop_fold_local(l: P, fld: &mut T) -> P { - l.map(|Local { id, pat, ty, init, span, attrs }| { - Local { - id: fld.new_id(id), - ty: ty.map(|t| fld.fold_ty(t)), - pat: fld.fold_pat(pat), - init: init.map(|e| fld.fold_expr(e)), - span: fld.new_span(span), - attrs: attrs.map_thin_attrs(|attrs| fold_attrs(attrs.into(), fld).into()), - } - }) -} - -pub fn noop_fold_attribute(at: Attribute, fld: &mut T) -> Option { - let Spanned {node: Attribute_ {id, style, value, is_sugared_doc}, span} = at; - Some(Spanned { - node: Attribute_ { - id: id, - style: style, - value: fld.fold_meta_item(value), - is_sugared_doc: is_sugared_doc, - }, - span: fld.new_span(span), - }) -} - -pub fn noop_fold_explicit_self_underscore(es: ExplicitSelf_, - fld: &mut T) - -> ExplicitSelf_ { - match es { - SelfStatic | SelfValue(_) => es, - SelfRegion(lifetime, m, name) => { - SelfRegion(fld.fold_opt_lifetime(lifetime), m, name) - } - SelfExplicit(typ, name) => { - SelfExplicit(fld.fold_ty(typ), name) - } - } -} - -pub fn noop_fold_explicit_self(Spanned { span, node }: ExplicitSelf, - fld: &mut T) - -> ExplicitSelf { - Spanned { - node: fld.fold_explicit_self_underscore(node), - span: fld.new_span(span), - } -} - -pub fn noop_fold_meta_item(mi: P, fld: &mut T) -> P { - mi.map(|Spanned { node, span }| { - Spanned { - node: match node { - MetaWord(id) => MetaWord(id), - MetaList(id, mis) => { - MetaList(id, mis.move_map(|e| fld.fold_meta_item(e))) - } - MetaNameValue(id, s) => MetaNameValue(id, s), - }, - span: fld.new_span(span), - } - }) -} - -pub fn noop_fold_arg(Arg { id, pat, ty }: Arg, fld: &mut T) -> Arg { - Arg { - id: fld.new_id(id), - pat: fld.fold_pat(pat), - ty: fld.fold_ty(ty), - } -} - -pub fn noop_fold_fn_decl(decl: P, fld: &mut T) -> P { - decl.map(|FnDecl { inputs, output, variadic }| { - FnDecl { - inputs: inputs.move_map(|x| fld.fold_arg(x)), - output: match output { - Return(ty) => Return(fld.fold_ty(ty)), - DefaultReturn(span) => DefaultReturn(span), - NoReturn(span) => NoReturn(span), - }, - variadic: variadic, - } - }) -} - -pub fn noop_fold_ty_param_bound(tpb: TyParamBound, fld: &mut T) -> TyParamBound - where T: Folder -{ - match tpb { - TraitTyParamBound(ty, modifier) => TraitTyParamBound(fld.fold_poly_trait_ref(ty), modifier), - RegionTyParamBound(lifetime) => RegionTyParamBound(fld.fold_lifetime(lifetime)), - } -} - -pub fn noop_fold_ty_param(tp: TyParam, fld: &mut T) -> TyParam { - let TyParam {id, name, bounds, default, span} = tp; - TyParam { - id: fld.new_id(id), - name: name, - bounds: fld.fold_bounds(bounds), - default: default.map(|x| fld.fold_ty(x)), - span: span, - } -} - -pub fn noop_fold_ty_params(tps: HirVec, - fld: &mut T) - -> HirVec { - tps.move_map(|tp| fld.fold_ty_param(tp)) -} - -pub fn noop_fold_lifetime(l: Lifetime, fld: &mut T) -> Lifetime { - Lifetime { - id: fld.new_id(l.id), - name: l.name, - span: fld.new_span(l.span), - } -} - -pub fn noop_fold_lifetime_def(l: LifetimeDef, fld: &mut T) -> LifetimeDef { - LifetimeDef { - lifetime: fld.fold_lifetime(l.lifetime), - bounds: fld.fold_lifetimes(l.bounds), - } -} - -pub fn noop_fold_lifetimes(lts: HirVec, fld: &mut T) -> HirVec { - lts.move_map(|l| fld.fold_lifetime(l)) -} - -pub fn noop_fold_lifetime_defs(lts: HirVec, - fld: &mut T) - -> HirVec { - lts.move_map(|l| fld.fold_lifetime_def(l)) -} - -pub fn noop_fold_opt_lifetime(o_lt: Option, fld: &mut T) -> Option { - o_lt.map(|lt| fld.fold_lifetime(lt)) -} - -pub fn noop_fold_generics(Generics { ty_params, lifetimes, where_clause }: Generics, - fld: &mut T) - -> Generics { - Generics { - ty_params: fld.fold_ty_params(ty_params), - lifetimes: fld.fold_lifetime_defs(lifetimes), - where_clause: fld.fold_where_clause(where_clause), - } -} - -pub fn noop_fold_where_clause(WhereClause { id, predicates }: WhereClause, - fld: &mut T) - -> WhereClause { - WhereClause { - id: fld.new_id(id), - predicates: predicates.move_map(|predicate| fld.fold_where_predicate(predicate)), - } -} - -pub fn noop_fold_where_predicate(pred: WherePredicate, fld: &mut T) -> WherePredicate { - match pred { - hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate{bound_lifetimes, - bounded_ty, - bounds, - span}) => { - hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate { - bound_lifetimes: fld.fold_lifetime_defs(bound_lifetimes), - bounded_ty: fld.fold_ty(bounded_ty), - bounds: bounds.move_map(|x| fld.fold_ty_param_bound(x)), - span: fld.new_span(span), - }) - } - hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate{lifetime, - bounds, - span}) => { - hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate { - span: fld.new_span(span), - lifetime: fld.fold_lifetime(lifetime), - bounds: bounds.move_map(|bound| fld.fold_lifetime(bound)), - }) - } - hir::WherePredicate::EqPredicate(hir::WhereEqPredicate{id, - path, - ty, - span}) => { - hir::WherePredicate::EqPredicate(hir::WhereEqPredicate { - id: fld.new_id(id), - path: fld.fold_path(path), - ty: fld.fold_ty(ty), - span: fld.new_span(span), - }) - } - } -} - -pub fn noop_fold_variant_data(vdata: VariantData, fld: &mut T) -> VariantData { - match vdata { - VariantData::Struct(fields, id) => { - VariantData::Struct(fields.move_map(|f| fld.fold_struct_field(f)), - fld.new_id(id)) - } - VariantData::Tuple(fields, id) => { - VariantData::Tuple(fields.move_map(|f| fld.fold_struct_field(f)), - fld.new_id(id)) - } - VariantData::Unit(id) => VariantData::Unit(fld.new_id(id)), - } -} - -pub fn noop_fold_trait_ref(p: TraitRef, fld: &mut T) -> TraitRef { - let id = fld.new_id(p.ref_id); - let TraitRef { - path, - ref_id: _, - } = p; - hir::TraitRef { - path: fld.fold_path(path), - ref_id: id, - } -} - -pub fn noop_fold_poly_trait_ref(p: PolyTraitRef, fld: &mut T) -> PolyTraitRef { - hir::PolyTraitRef { - bound_lifetimes: fld.fold_lifetime_defs(p.bound_lifetimes), - trait_ref: fld.fold_trait_ref(p.trait_ref), - span: fld.new_span(p.span), - } -} - -pub fn noop_fold_struct_field(f: StructField, fld: &mut T) -> StructField { - let StructField {node: StructField_ {id, kind, ty, attrs}, span} = f; - Spanned { - node: StructField_ { - id: fld.new_id(id), - kind: kind, - ty: fld.fold_ty(ty), - attrs: fold_attrs(attrs, fld), - }, - span: fld.new_span(span), - } -} - -pub fn noop_fold_field(Field { name, expr, span }: Field, folder: &mut T) -> Field { - Field { - name: respan(folder.new_span(name.span), folder.fold_name(name.node)), - expr: folder.fold_expr(expr), - span: folder.new_span(span), - } -} - -pub fn noop_fold_mt(MutTy { ty, mutbl }: MutTy, folder: &mut T) -> MutTy { - MutTy { - ty: folder.fold_ty(ty), - mutbl: mutbl, - } -} - -pub fn noop_fold_opt_bounds(b: Option, - folder: &mut T) - -> Option { - b.map(|bounds| folder.fold_bounds(bounds)) -} - -fn noop_fold_bounds(bounds: TyParamBounds, folder: &mut T) -> TyParamBounds { - bounds.move_map(|bound| folder.fold_ty_param_bound(bound)) -} - -pub fn noop_fold_block(b: P, folder: &mut T) -> P { - b.map(|Block { id, stmts, expr, rules, span }| { - Block { - id: folder.new_id(id), - stmts: stmts.move_map(|s| folder.fold_stmt(s)), - expr: expr.map(|x| folder.fold_expr(x)), - rules: rules, - span: folder.new_span(span), - } - }) -} - -pub fn noop_fold_item_underscore(i: Item_, folder: &mut T) -> Item_ { - match i { - ItemExternCrate(string) => ItemExternCrate(string), - ItemUse(view_path) => { - ItemUse(folder.fold_view_path(view_path)) - } - ItemStatic(t, m, e) => { - ItemStatic(folder.fold_ty(t), m, folder.fold_expr(e)) - } - ItemConst(t, e) => { - ItemConst(folder.fold_ty(t), folder.fold_expr(e)) - } - ItemFn(decl, unsafety, constness, abi, generics, body) => { - ItemFn(folder.fold_fn_decl(decl), - unsafety, - constness, - abi, - folder.fold_generics(generics), - folder.fold_block(body)) - } - ItemMod(m) => ItemMod(folder.fold_mod(m)), - ItemForeignMod(nm) => ItemForeignMod(folder.fold_foreign_mod(nm)), - ItemTy(t, generics) => { - ItemTy(folder.fold_ty(t), folder.fold_generics(generics)) - } - ItemEnum(enum_definition, generics) => { - ItemEnum(hir::EnumDef { - variants: enum_definition.variants.move_map(|x| folder.fold_variant(x)), - }, - folder.fold_generics(generics)) - } - ItemStruct(struct_def, generics) => { - let struct_def = folder.fold_variant_data(struct_def); - ItemStruct(struct_def, folder.fold_generics(generics)) - } - ItemDefaultImpl(unsafety, ref trait_ref) => { - ItemDefaultImpl(unsafety, folder.fold_trait_ref((*trait_ref).clone())) - } - ItemImpl(unsafety, polarity, generics, ifce, ty, impl_items) => { - let new_impl_items = impl_items - .move_map(|item| folder.fold_impl_item(item)); - let ifce = match ifce { - None => None, - Some(ref trait_ref) => { - Some(folder.fold_trait_ref((*trait_ref).clone())) - } - }; - ItemImpl(unsafety, - polarity, - folder.fold_generics(generics), - ifce, - folder.fold_ty(ty), - new_impl_items) - } - ItemTrait(unsafety, generics, bounds, items) => { - let bounds = folder.fold_bounds(bounds); - let items = items.move_map(|item| folder.fold_trait_item(item)); - ItemTrait(unsafety, folder.fold_generics(generics), bounds, items) - } - } -} - -pub fn noop_fold_trait_item(i: TraitItem, - folder: &mut T) - -> TraitItem { - TraitItem { - id: folder.new_id(i.id), - name: folder.fold_name(i.name), - attrs: fold_attrs(i.attrs, folder), - node: match i.node { - ConstTraitItem(ty, default) => { - ConstTraitItem(folder.fold_ty(ty), default.map(|x| folder.fold_expr(x))) - } - MethodTraitItem(sig, body) => { - MethodTraitItem(noop_fold_method_sig(sig, folder), - body.map(|x| folder.fold_block(x))) - } - TypeTraitItem(bounds, default) => { - TypeTraitItem(folder.fold_bounds(bounds), - default.map(|x| folder.fold_ty(x))) - } - }, - span: folder.new_span(i.span), - } -} - -pub fn noop_fold_impl_item(i: ImplItem, folder: &mut T) -> ImplItem { - ImplItem { - id: folder.new_id(i.id), - name: folder.fold_name(i.name), - attrs: fold_attrs(i.attrs, folder), - vis: i.vis, - node: match i.node { - ImplItemKind::Const(ty, expr) => { - ImplItemKind::Const(folder.fold_ty(ty), folder.fold_expr(expr)) - } - ImplItemKind::Method(sig, body) => { - ImplItemKind::Method(noop_fold_method_sig(sig, folder), folder.fold_block(body)) - } - ImplItemKind::Type(ty) => ImplItemKind::Type(folder.fold_ty(ty)), - }, - span: folder.new_span(i.span), - } -} - -pub fn noop_fold_mod(Mod { inner, item_ids }: Mod, folder: &mut T) -> Mod { - Mod { - inner: folder.new_span(inner), - item_ids: item_ids.move_map(|x| folder.fold_item_id(x)), - } -} - -pub fn noop_fold_crate(Crate { module, attrs, config, span, - exported_macros, items }: Crate, - folder: &mut T) - -> Crate { - let config = folder.fold_meta_items(config); - - let crate_mod = folder.fold_item(hir::Item { - name: token::special_idents::invalid.name, - attrs: attrs, - id: DUMMY_NODE_ID, - vis: hir::Public, - span: span, - node: hir::ItemMod(module), - }); - - let (module, attrs, span) = match crate_mod { - hir::Item { attrs, span, node, .. } => { - match node { - hir::ItemMod(m) => (m, attrs, span), - _ => panic!("fold converted a module to not a module"), - } - } - }; - - let items = items.into_iter() - .map(|(id, item)| (id, folder.fold_item(item))) - .collect(); - - Crate { - module: module, - attrs: attrs, - config: config, - span: span, - exported_macros: exported_macros, - items: items, - } -} - -pub fn noop_fold_item_id(i: ItemId, folder: &mut T) -> ItemId { - let id = folder.map_id(i.id); - ItemId { id: id } -} - -// fold one item into one item -pub fn noop_fold_item(item: Item, folder: &mut T) -> Item { - let Item { id, name, attrs, node, vis, span } = item; - let id = folder.new_id(id); - let node = folder.fold_item_underscore(node); - // FIXME: we should update the impl_pretty_name, but it uses pretty printing. - // let ident = match node { - // // The node may have changed, recompute the "pretty" impl name. - // ItemImpl(_, _, _, ref maybe_trait, ref ty, _) => { - // impl_pretty_name(maybe_trait, Some(&**ty)) - // } - // _ => ident - // }; - - Item { - id: id, - name: folder.fold_name(name), - attrs: fold_attrs(attrs, folder), - node: node, - vis: vis, - span: folder.new_span(span), - } -} - -pub fn noop_fold_foreign_item(ni: ForeignItem, folder: &mut T) -> ForeignItem { - ForeignItem { - id: folder.new_id(ni.id), - name: folder.fold_name(ni.name), - attrs: fold_attrs(ni.attrs, folder), - node: match ni.node { - ForeignItemFn(fdec, generics) => { - ForeignItemFn(folder.fold_fn_decl(fdec), folder.fold_generics(generics)) - } - ForeignItemStatic(t, m) => { - ForeignItemStatic(folder.fold_ty(t), m) - } - }, - vis: ni.vis, - span: folder.new_span(ni.span), - } -} - -pub fn noop_fold_method_sig(sig: MethodSig, folder: &mut T) -> MethodSig { - MethodSig { - generics: folder.fold_generics(sig.generics), - abi: sig.abi, - explicit_self: folder.fold_explicit_self(sig.explicit_self), - unsafety: sig.unsafety, - constness: sig.constness, - decl: folder.fold_fn_decl(sig.decl), - } -} - -pub fn noop_fold_pat(p: P, folder: &mut T) -> P { - p.map(|Pat { id, node, span }| { - Pat { - id: folder.new_id(id), - node: match node { - PatWild => PatWild, - PatIdent(binding_mode, pth1, sub) => { - PatIdent(binding_mode, - Spanned { - span: folder.new_span(pth1.span), - node: folder.fold_ident(pth1.node), - }, - sub.map(|x| folder.fold_pat(x))) - } - PatLit(e) => PatLit(folder.fold_expr(e)), - PatEnum(pth, pats) => { - PatEnum(folder.fold_path(pth), - pats.map(|pats| pats.move_map(|x| folder.fold_pat(x)))) - } - PatQPath(qself, pth) => { - let qself = QSelf { ty: folder.fold_ty(qself.ty), ..qself }; - PatQPath(qself, folder.fold_path(pth)) - } - PatStruct(pth, fields, etc) => { - let pth = folder.fold_path(pth); - let fs = fields.move_map(|f| { - Spanned { - span: folder.new_span(f.span), - node: hir::FieldPat { - name: f.node.name, - pat: folder.fold_pat(f.node.pat), - is_shorthand: f.node.is_shorthand, - }, - } - }); - PatStruct(pth, fs, etc) - } - PatTup(elts) => PatTup(elts.move_map(|x| folder.fold_pat(x))), - PatBox(inner) => PatBox(folder.fold_pat(inner)), - PatRegion(inner, mutbl) => PatRegion(folder.fold_pat(inner), mutbl), - PatRange(e1, e2) => { - PatRange(folder.fold_expr(e1), folder.fold_expr(e2)) - } - PatVec(before, slice, after) => { - PatVec(before.move_map(|x| folder.fold_pat(x)), - slice.map(|x| folder.fold_pat(x)), - after.move_map(|x| folder.fold_pat(x))) - } - }, - span: folder.new_span(span), - } - }) -} - -pub fn noop_fold_expr(Expr { id, node, span, attrs }: Expr, folder: &mut T) -> Expr { - Expr { - id: folder.new_id(id), - node: match node { - ExprBox(e) => { - ExprBox(folder.fold_expr(e)) - } - ExprVec(exprs) => { - ExprVec(exprs.move_map(|x| folder.fold_expr(x))) - } - ExprRepeat(expr, count) => { - ExprRepeat(folder.fold_expr(expr), folder.fold_expr(count)) - } - ExprTup(elts) => ExprTup(elts.move_map(|x| folder.fold_expr(x))), - ExprCall(f, args) => { - ExprCall(folder.fold_expr(f), args.move_map(|x| folder.fold_expr(x))) - } - ExprMethodCall(name, tps, args) => { - ExprMethodCall(respan(folder.new_span(name.span), folder.fold_name(name.node)), - tps.move_map(|x| folder.fold_ty(x)), - args.move_map(|x| folder.fold_expr(x))) - } - ExprBinary(binop, lhs, rhs) => { - ExprBinary(binop, folder.fold_expr(lhs), folder.fold_expr(rhs)) - } - ExprUnary(binop, ohs) => { - ExprUnary(binop, folder.fold_expr(ohs)) - } - ExprLit(l) => ExprLit(l), - ExprCast(expr, ty) => { - ExprCast(folder.fold_expr(expr), folder.fold_ty(ty)) - } - ExprType(expr, ty) => { - ExprType(folder.fold_expr(expr), folder.fold_ty(ty)) - } - ExprAddrOf(m, ohs) => ExprAddrOf(m, folder.fold_expr(ohs)), - ExprIf(cond, tr, fl) => { - ExprIf(folder.fold_expr(cond), - folder.fold_block(tr), - fl.map(|x| folder.fold_expr(x))) - } - ExprWhile(cond, body, opt_ident) => { - ExprWhile(folder.fold_expr(cond), - folder.fold_block(body), - opt_ident.map(|i| folder.fold_ident(i))) - } - ExprLoop(body, opt_ident) => { - ExprLoop(folder.fold_block(body), - opt_ident.map(|i| folder.fold_ident(i))) - } - ExprMatch(expr, arms, source) => { - ExprMatch(folder.fold_expr(expr), - arms.move_map(|x| folder.fold_arm(x)), - source) - } - ExprClosure(capture_clause, decl, body) => { - ExprClosure(capture_clause, - folder.fold_fn_decl(decl), - folder.fold_block(body)) - } - ExprBlock(blk) => ExprBlock(folder.fold_block(blk)), - ExprAssign(el, er) => { - ExprAssign(folder.fold_expr(el), folder.fold_expr(er)) - } - ExprAssignOp(op, el, er) => { - ExprAssignOp(op, folder.fold_expr(el), folder.fold_expr(er)) - } - ExprField(el, name) => { - ExprField(folder.fold_expr(el), - respan(folder.new_span(name.span), folder.fold_name(name.node))) - } - ExprTupField(el, index) => { - ExprTupField(folder.fold_expr(el), - respan(folder.new_span(index.span), folder.fold_usize(index.node))) - } - ExprIndex(el, er) => { - ExprIndex(folder.fold_expr(el), folder.fold_expr(er)) - } - ExprRange(e1, e2) => { - ExprRange(e1.map(|x| folder.fold_expr(x)), - e2.map(|x| folder.fold_expr(x))) - } - ExprPath(qself, path) => { - let qself = qself.map(|QSelf { ty, position }| { - QSelf { - ty: folder.fold_ty(ty), - position: position, - } - }); - ExprPath(qself, folder.fold_path(path)) - } - ExprBreak(opt_ident) => ExprBreak(opt_ident.map(|label| { - respan(folder.new_span(label.span), folder.fold_ident(label.node)) - })), - ExprAgain(opt_ident) => ExprAgain(opt_ident.map(|label| { - respan(folder.new_span(label.span), folder.fold_ident(label.node)) - })), - ExprRet(e) => ExprRet(e.map(|x| folder.fold_expr(x))), - ExprInlineAsm(InlineAsm { - inputs, - outputs, - asm, - asm_str_style, - clobbers, - volatile, - alignstack, - dialect, - expn_id, - }) => ExprInlineAsm(InlineAsm { - inputs: inputs.move_map(|(c, input)| (c, folder.fold_expr(input))), - outputs: outputs.move_map(|out| { - InlineAsmOutput { - constraint: out.constraint, - expr: folder.fold_expr(out.expr), - is_rw: out.is_rw, - is_indirect: out.is_indirect, - } - }), - asm: asm, - asm_str_style: asm_str_style, - clobbers: clobbers, - volatile: volatile, - alignstack: alignstack, - dialect: dialect, - expn_id: expn_id, - }), - ExprStruct(path, fields, maybe_expr) => { - ExprStruct(folder.fold_path(path), - fields.move_map(|x| folder.fold_field(x)), - maybe_expr.map(|x| folder.fold_expr(x))) - } - }, - span: folder.new_span(span), - attrs: attrs.map_thin_attrs(|attrs| fold_attrs(attrs.into(), folder).into()), - } -} - -pub fn noop_fold_stmt(stmt: Stmt, folder: &mut T) -> Stmt { - let span = folder.new_span(stmt.span); - match stmt.node { - StmtDecl(d, id) => { - let id = folder.new_id(id); - Spanned { - node: StmtDecl(folder.fold_decl(d), id), - span: span - } - } - StmtExpr(e, id) => { - let id = folder.new_id(id); - Spanned { - node: StmtExpr(folder.fold_expr(e), id), - span: span, - } - } - StmtSemi(e, id) => { - let id = folder.new_id(id); - Spanned { - node: StmtSemi(folder.fold_expr(e), id), - span: span, - } - } - } -} diff --git a/src/librustc_front/hir.rs b/src/librustc_front/hir.rs deleted file mode 100644 index 2625e34c820e4..0000000000000 --- a/src/librustc_front/hir.rs +++ /dev/null @@ -1,1444 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// The Rust HIR. - -pub use self::BindingMode::*; -pub use self::BinOp_::*; -pub use self::BlockCheckMode::*; -pub use self::CaptureClause::*; -pub use self::Decl_::*; -pub use self::ExplicitSelf_::*; -pub use self::Expr_::*; -pub use self::FunctionRetTy::*; -pub use self::ForeignItem_::*; -pub use self::Item_::*; -pub use self::Mutability::*; -pub use self::Pat_::*; -pub use self::PathListItem_::*; -pub use self::PrimTy::*; -pub use self::Stmt_::*; -pub use self::StructFieldKind::*; -pub use self::TraitItem_::*; -pub use self::Ty_::*; -pub use self::TyParamBound::*; -pub use self::UnOp::*; -pub use self::UnsafeSource::*; -pub use self::ViewPath_::*; -pub use self::Visibility::*; -pub use self::PathParameters::*; - -use intravisit::Visitor; -use std::collections::BTreeMap; -use syntax::codemap::{self, Span, Spanned, DUMMY_SP, ExpnId}; -use syntax::abi::Abi; -use syntax::ast::{Name, NodeId, DUMMY_NODE_ID, TokenTree, AsmDialect}; -use syntax::ast::{Attribute, Lit, StrStyle, FloatTy, IntTy, UintTy, MetaItem}; -use syntax::attr::ThinAttributes; -use syntax::parse::token::InternedString; -use syntax::ptr::P; - -use print::pprust; -use util; - -use std::fmt; -use std::hash::{Hash, Hasher}; -use serialize::{Encodable, Decodable, Encoder, Decoder}; - -/// HIR doesn't commit to a concrete storage type and have its own alias for a vector. -/// It can be `Vec`, `P<[T]>` or potentially `Box<[T]>`, or some other container with similar -/// behavior. Unlike AST, HIR is mostly a static structure, so we can use an owned slice instead -/// of `Vec` to avoid keeping extra capacity. -pub type HirVec = P<[T]>; - -macro_rules! hir_vec { - ($elem:expr; $n:expr) => ( - $crate::hir::HirVec::from(vec![$elem; $n]) - ); - ($($x:expr),*) => ( - $crate::hir::HirVec::from(vec![$($x),*]) - ); - ($($x:expr,)*) => (vec![$($x),*]) -} - -/// Identifier in HIR -#[derive(Clone, Copy, Eq)] -pub struct Ident { - /// Hygienic name (renamed), should be used by default - pub name: Name, - /// Unhygienic name (original, not renamed), needed in few places in name resolution - pub unhygienic_name: Name, -} - -impl Ident { - /// Creates a HIR identifier with both `name` and `unhygienic_name` initialized with - /// the argument. Hygiene properties of the created identifier depend entirely on this - /// argument. If the argument is a plain interned string `intern("iter")`, then the result - /// is unhygienic and can interfere with other entities named "iter". If the argument is - /// a "fresh" name created with `gensym("iter")`, then the result is hygienic and can't - /// interfere with other entities having the same string as a name. - pub fn from_name(name: Name) -> Ident { - Ident { name: name, unhygienic_name: name } - } -} - -impl PartialEq for Ident { - fn eq(&self, other: &Ident) -> bool { - self.name == other.name - } -} - -impl Hash for Ident { - fn hash(&self, state: &mut H) { - self.name.hash(state) - } -} - -impl fmt::Debug for Ident { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(&self.name, f) - } -} - -impl fmt::Display for Ident { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&self.name, f) - } -} - -impl Encodable for Ident { - fn encode(&self, s: &mut S) -> Result<(), S::Error> { - self.name.encode(s) - } -} - -impl Decodable for Ident { - fn decode(d: &mut D) -> Result { - Ok(Ident::from_name(try!(Name::decode(d)))) - } -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy)] -pub struct Lifetime { - pub id: NodeId, - pub span: Span, - pub name: Name, -} - -impl fmt::Debug for Lifetime { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, - "lifetime({}: {})", - self.id, - pprust::lifetime_to_string(self)) - } -} - -/// A lifetime definition, eg `'a: 'b+'c+'d` -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct LifetimeDef { - pub lifetime: Lifetime, - pub bounds: HirVec, -} - -/// A "Path" is essentially Rust's notion of a name; for instance: -/// std::cmp::PartialEq . It's represented as a sequence of identifiers, -/// along with a bunch of supporting information. -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)] -pub struct Path { - pub span: Span, - /// A `::foo` path, is relative to the crate root rather than current - /// module (like paths in an import). - pub global: bool, - /// The segments in the path: the things separated by `::`. - pub segments: HirVec, -} - -impl fmt::Debug for Path { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "path({})", pprust::path_to_string(self)) - } -} - -impl fmt::Display for Path { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", pprust::path_to_string(self)) - } -} - -/// A segment of a path: an identifier, an optional lifetime, and a set of -/// types. -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct PathSegment { - /// The identifier portion of this path segment. - /// - /// Hygiene properties of this identifier are worth noting. - /// Most path segments are not hygienic and they are not renamed during - /// lowering from AST to HIR (see comments to `fn lower_path`). However segments from - /// unqualified paths with one segment originating from `ExprPath` (local-variable-like paths) - /// can be hygienic, so they are renamed. You should not normally care about this peculiarity - /// and just use `identifier.name` unless you modify identifier resolution code - /// (`fn resolve_identifier` and other functions called by it in `rustc_resolve`). - pub identifier: Ident, - - /// Type/lifetime parameters attached to this path. They come in - /// two flavors: `Path` and `Path(A,B) -> C`. Note that - /// this is more than just simple syntactic sugar; the use of - /// parens affects the region binding rules, so we preserve the - /// distinction. - pub parameters: PathParameters, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum PathParameters { - /// The `<'a, A,B,C>` in `foo::bar::baz::<'a, A,B,C>` - AngleBracketedParameters(AngleBracketedParameterData), - /// The `(A,B)` and `C` in `Foo(A,B) -> C` - ParenthesizedParameters(ParenthesizedParameterData), -} - -impl PathParameters { - pub fn none() -> PathParameters { - AngleBracketedParameters(AngleBracketedParameterData { - lifetimes: HirVec::new(), - types: HirVec::new(), - bindings: HirVec::new(), - }) - } - - pub fn is_empty(&self) -> bool { - match *self { - AngleBracketedParameters(ref data) => data.is_empty(), - - // Even if the user supplied no types, something like - // `X()` is equivalent to `X<(),()>`. - ParenthesizedParameters(..) => false, - } - } - - pub fn has_lifetimes(&self) -> bool { - match *self { - AngleBracketedParameters(ref data) => !data.lifetimes.is_empty(), - ParenthesizedParameters(_) => false, - } - } - - pub fn has_types(&self) -> bool { - match *self { - AngleBracketedParameters(ref data) => !data.types.is_empty(), - ParenthesizedParameters(..) => true, - } - } - - /// Returns the types that the user wrote. Note that these do not necessarily map to the type - /// parameters in the parenthesized case. - pub fn types(&self) -> HirVec<&P> { - match *self { - AngleBracketedParameters(ref data) => { - data.types.iter().collect() - } - ParenthesizedParameters(ref data) => { - data.inputs - .iter() - .chain(data.output.iter()) - .collect() - } - } - } - - pub fn lifetimes(&self) -> HirVec<&Lifetime> { - match *self { - AngleBracketedParameters(ref data) => { - data.lifetimes.iter().collect() - } - ParenthesizedParameters(_) => { - HirVec::new() - } - } - } - - pub fn bindings(&self) -> HirVec<&TypeBinding> { - match *self { - AngleBracketedParameters(ref data) => { - data.bindings.iter().collect() - } - ParenthesizedParameters(_) => { - HirVec::new() - } - } - } -} - -/// A path like `Foo<'a, T>` -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct AngleBracketedParameterData { - /// The lifetime parameters for this path segment. - pub lifetimes: HirVec, - /// The type parameters for this path segment, if present. - pub types: HirVec>, - /// Bindings (equality constraints) on associated types, if present. - /// E.g., `Foo`. - pub bindings: HirVec, -} - -impl AngleBracketedParameterData { - fn is_empty(&self) -> bool { - self.lifetimes.is_empty() && self.types.is_empty() && self.bindings.is_empty() - } -} - -/// A path like `Foo(A,B) -> C` -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct ParenthesizedParameterData { - /// Overall span - pub span: Span, - - /// `(A,B)` - pub inputs: HirVec>, - - /// `C` - pub output: Option>, -} - -/// The AST represents all type param bounds as types. -/// typeck::collect::compute_bounds matches these against -/// the "special" built-in traits (see middle::lang_items) and -/// detects Copy, Send and Sync. -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum TyParamBound { - TraitTyParamBound(PolyTraitRef, TraitBoundModifier), - RegionTyParamBound(Lifetime), -} - -/// A modifier on a bound, currently this is only used for `?Sized`, where the -/// modifier is `Maybe`. Negative bounds should also be handled here. -#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum TraitBoundModifier { - None, - Maybe, -} - -pub type TyParamBounds = HirVec; - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct TyParam { - pub name: Name, - pub id: NodeId, - pub bounds: TyParamBounds, - pub default: Option>, - pub span: Span, -} - -/// Represents lifetimes and type parameters attached to a declaration -/// of a function, enum, trait, etc. -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct Generics { - pub lifetimes: HirVec, - pub ty_params: HirVec, - pub where_clause: WhereClause, -} - -impl Generics { - pub fn is_lt_parameterized(&self) -> bool { - !self.lifetimes.is_empty() - } - pub fn is_type_parameterized(&self) -> bool { - !self.ty_params.is_empty() - } - pub fn is_parameterized(&self) -> bool { - self.is_lt_parameterized() || self.is_type_parameterized() - } -} - -/// A `where` clause in a definition -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct WhereClause { - pub id: NodeId, - pub predicates: HirVec, -} - -/// A single predicate in a `where` clause -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum WherePredicate { - /// A type binding, eg `for<'c> Foo: Send+Clone+'c` - BoundPredicate(WhereBoundPredicate), - /// A lifetime predicate, e.g. `'a: 'b+'c` - RegionPredicate(WhereRegionPredicate), - /// An equality predicate (unsupported) - EqPredicate(WhereEqPredicate), -} - -/// A type bound, eg `for<'c> Foo: Send+Clone+'c` -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct WhereBoundPredicate { - pub span: Span, - /// Any lifetimes from a `for` binding - pub bound_lifetimes: HirVec, - /// The type being bounded - pub bounded_ty: P, - /// Trait and lifetime bounds (`Clone+Send+'static`) - pub bounds: TyParamBounds, -} - -/// A lifetime predicate, e.g. `'a: 'b+'c` -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct WhereRegionPredicate { - pub span: Span, - pub lifetime: Lifetime, - pub bounds: HirVec, -} - -/// An equality predicate (unsupported), e.g. `T=int` -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct WhereEqPredicate { - pub id: NodeId, - pub span: Span, - pub path: Path, - pub ty: P, -} - -pub type CrateConfig = HirVec>; - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Debug)] -pub struct Crate { - pub module: Mod, - pub attrs: HirVec, - pub config: CrateConfig, - pub span: Span, - pub exported_macros: HirVec, - - // NB: We use a BTreeMap here so that `visit_all_items` iterates - // over the ids in increasing order. In principle it should not - // matter what order we visit things in, but in *practice* it - // does, because it can affect the order in which errors are - // detected, which in turn can make compile-fail tests yield - // slightly different results. - pub items: BTreeMap, -} - -impl Crate { - pub fn item(&self, id: NodeId) -> &Item { - &self.items[&id] - } - - /// Visits all items in the crate in some determinstic (but - /// unspecified) order. If you just need to process every item, - /// but don't care about nesting, this method is the best choice. - /// - /// If you do care about nesting -- usually because your algorithm - /// follows lexical scoping rules -- then you want a different - /// approach. You should override `visit_nested_item` in your - /// visitor and then call `intravisit::walk_crate` instead. - pub fn visit_all_items<'hir, V:Visitor<'hir>>(&'hir self, visitor: &mut V) { - for (_, item) in &self.items { - visitor.visit_item(item); - } - } -} - -/// A macro definition, in this crate or imported from another. -/// -/// Not parsed directly, but created on macro import or `macro_rules!` expansion. -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct MacroDef { - pub name: Name, - pub attrs: HirVec, - pub id: NodeId, - pub span: Span, - pub imported_from: Option, - pub export: bool, - pub use_locally: bool, - pub allow_internal_unstable: bool, - pub body: HirVec, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct Block { - /// Statements in a block - pub stmts: HirVec, - /// An expression at the end of the block - /// without a semicolon, if any - pub expr: Option>, - pub id: NodeId, - /// Distinguishes between `unsafe { ... }` and `{ ... }` - pub rules: BlockCheckMode, - pub span: Span, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)] -pub struct Pat { - pub id: NodeId, - pub node: Pat_, - pub span: Span, -} - -impl fmt::Debug for Pat { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "pat({}: {})", self.id, pprust::pat_to_string(self)) - } -} - -/// A single field in a struct pattern -/// -/// Patterns like the fields of Foo `{ x, ref y, ref mut z }` -/// are treated the same as` x: x, y: ref y, z: ref mut z`, -/// except is_shorthand is true -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct FieldPat { - /// The identifier for the field - pub name: Name, - /// The pattern the field is destructured to - pub pat: P, - pub is_shorthand: bool, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] -pub enum BindingMode { - BindByRef(Mutability), - BindByValue(Mutability), -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum Pat_ { - /// Represents a wildcard pattern (`_`) - PatWild, - - /// A PatIdent may either be a new bound variable, - /// or a nullary enum (in which case the third field - /// is None). - /// - /// In the nullary enum case, the parser can't determine - /// which it is. The resolver determines this, and - /// records this pattern's NodeId in an auxiliary - /// set (of "PatIdents that refer to nullary enums") - PatIdent(BindingMode, Spanned, Option>), - - /// "None" means a `Variant(..)` pattern where we don't bind the fields to names. - PatEnum(Path, Option>>), - - /// An associated const named using the qualified path `::CONST` or - /// `::CONST`. Associated consts from inherent impls can be - /// referred to as simply `T::CONST`, in which case they will end up as - /// PatEnum, and the resolver will have to sort that out. - PatQPath(QSelf, Path), - - /// Destructuring of a struct, e.g. `Foo {x, y, ..}` - /// The `bool` is `true` in the presence of a `..` - PatStruct(Path, HirVec>, bool), - /// A tuple pattern `(a, b)` - PatTup(HirVec>), - /// A `box` pattern - PatBox(P), - /// A reference pattern, e.g. `&mut (a, b)` - PatRegion(P, Mutability), - /// A literal - PatLit(P), - /// A range pattern, e.g. `1...2` - PatRange(P, P), - /// `[a, b, ..i, y, z]` is represented as: - /// `PatVec(box [a, b], Some(i), box [y, z])` - PatVec(HirVec>, Option>, HirVec>), -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] -pub enum Mutability { - MutMutable, - MutImmutable, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] -pub enum BinOp_ { - /// The `+` operator (addition) - BiAdd, - /// The `-` operator (subtraction) - BiSub, - /// The `*` operator (multiplication) - BiMul, - /// The `/` operator (division) - BiDiv, - /// The `%` operator (modulus) - BiRem, - /// The `&&` operator (logical and) - BiAnd, - /// The `||` operator (logical or) - BiOr, - /// The `^` operator (bitwise xor) - BiBitXor, - /// The `&` operator (bitwise and) - BiBitAnd, - /// The `|` operator (bitwise or) - BiBitOr, - /// The `<<` operator (shift left) - BiShl, - /// The `>>` operator (shift right) - BiShr, - /// The `==` operator (equality) - BiEq, - /// The `<` operator (less than) - BiLt, - /// The `<=` operator (less than or equal to) - BiLe, - /// The `!=` operator (not equal to) - BiNe, - /// The `>=` operator (greater than or equal to) - BiGe, - /// The `>` operator (greater than) - BiGt, -} - -pub type BinOp = Spanned; - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] -pub enum UnOp { - /// The `*` operator for dereferencing - UnDeref, - /// The `!` operator for logical inversion - UnNot, - /// The `-` operator for negation - UnNeg, -} - -/// A statement -pub type Stmt = Spanned; - -impl fmt::Debug for Stmt_ { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // Sadness. - let spanned = codemap::dummy_spanned(self.clone()); - write!(f, - "stmt({}: {})", - util::stmt_id(&spanned), - pprust::stmt_to_string(&spanned)) - } -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)] -pub enum Stmt_ { - /// Could be an item or a local (let) binding: - StmtDecl(P, NodeId), - - /// Expr without trailing semi-colon (must have unit type): - StmtExpr(P, NodeId), - - /// Expr with trailing semi-colon (may have any type): - StmtSemi(P, NodeId), -} - -// FIXME (pending discussion of #1697, #2178...): local should really be -// a refinement on pat. -/// Local represents a `let` statement, e.g., `let : = ;` -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct Local { - pub pat: P, - pub ty: Option>, - /// Initializer expression to set the value, if any - pub init: Option>, - pub id: NodeId, - pub span: Span, - pub attrs: ThinAttributes, -} - -pub type Decl = Spanned; - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum Decl_ { - /// A local (let) binding: - DeclLocal(P), - /// An item binding: - DeclItem(ItemId), -} - -/// represents one arm of a 'match' -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct Arm { - pub attrs: HirVec, - pub pats: HirVec>, - pub guard: Option>, - pub body: P, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct Field { - pub name: Spanned, - pub expr: P, - pub span: Span, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] -pub enum BlockCheckMode { - DefaultBlock, - UnsafeBlock(UnsafeSource), - PushUnsafeBlock(UnsafeSource), - PopUnsafeBlock(UnsafeSource), - // Within this block (but outside a PopUnstableBlock), we suspend checking of stability. - PushUnstableBlock, - PopUnstableBlock, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] -pub enum UnsafeSource { - CompilerGenerated, - UserProvided, -} - -/// An expression -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)] -pub struct Expr { - pub id: NodeId, - pub node: Expr_, - pub span: Span, - pub attrs: ThinAttributes, -} - -impl fmt::Debug for Expr { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "expr({}: {})", self.id, pprust::expr_to_string(self)) - } -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum Expr_ { - /// A `box x` expression. - ExprBox(P), - /// An array (`[a, b, c, d]`) - ExprVec(HirVec>), - /// A function call - /// - /// The first field resolves to the function itself, - /// and the second field is the list of arguments - ExprCall(P, HirVec>), - /// A method call (`x.foo::(a, b, c, d)`) - /// - /// The `Spanned` is the identifier for the method name. - /// The vector of `Ty`s are the ascripted type parameters for the method - /// (within the angle brackets). - /// - /// The first element of the vector of `Expr`s is the expression that evaluates - /// to the object on which the method is being called on (the receiver), - /// and the remaining elements are the rest of the arguments. - /// - /// Thus, `x.foo::(a, b, c, d)` is represented as - /// `ExprMethodCall(foo, [Bar, Baz], [x, a, b, c, d])`. - ExprMethodCall(Spanned, HirVec>, HirVec>), - /// A tuple (`(a, b, c ,d)`) - ExprTup(HirVec>), - /// A binary operation (For example: `a + b`, `a * b`) - ExprBinary(BinOp, P, P), - /// A unary operation (For example: `!x`, `*x`) - ExprUnary(UnOp, P), - /// A literal (For example: `1u8`, `"foo"`) - ExprLit(P), - /// A cast (`foo as f64`) - ExprCast(P, P), - ExprType(P, P), - /// An `if` block, with an optional else block - /// - /// `if expr { block } else { expr }` - ExprIf(P, P, Option>), - /// A while loop, with an optional label - /// - /// `'label: while expr { block }` - ExprWhile(P, P, Option), - /// Conditionless loop (can be exited with break, continue, or return) - /// - /// `'label: loop { block }` - ExprLoop(P, Option), - /// A `match` block, with a source that indicates whether or not it is - /// the result of a desugaring, and if so, which kind. - ExprMatch(P, HirVec, MatchSource), - /// A closure (for example, `move |a, b, c| {a + b + c}`) - ExprClosure(CaptureClause, P, P), - /// A block (`{ ... }`) - ExprBlock(P), - - /// An assignment (`a = foo()`) - ExprAssign(P, P), - /// An assignment with an operator - /// - /// For example, `a += 1`. - ExprAssignOp(BinOp, P, P), - /// Access of a named struct field (`obj.foo`) - ExprField(P, Spanned), - /// Access of an unnamed field of a struct or tuple-struct - /// - /// For example, `foo.0`. - ExprTupField(P, Spanned), - /// An indexing operation (`foo[2]`) - ExprIndex(P, P), - /// A range (`1..2`, `1..`, or `..2`) - ExprRange(Option>, Option>), - - /// Variable reference, possibly containing `::` and/or type - /// parameters, e.g. foo::bar::. - /// - /// Optionally "qualified", - /// e.g. ` as SomeTrait>::SomeType`. - ExprPath(Option, Path), - - /// A referencing operation (`&a` or `&mut a`) - ExprAddrOf(Mutability, P), - /// A `break`, with an optional label to break - ExprBreak(Option>), - /// A `continue`, with an optional label - ExprAgain(Option>), - /// A `return`, with an optional value to be returned - ExprRet(Option>), - - /// Output of the `asm!()` macro - ExprInlineAsm(InlineAsm), - - /// A struct literal expression. - /// - /// For example, `Foo {x: 1, y: 2}`, or - /// `Foo {x: 1, .. base}`, where `base` is the `Option`. - ExprStruct(Path, HirVec, Option>), - - /// A vector literal constructed from one repeated element. - /// - /// For example, `[1u8; 5]`. The first expression is the element - /// to be repeated; the second is the number of times to repeat it. - ExprRepeat(P, P), -} - -/// The explicit Self type in a "qualified path". The actual -/// path, including the trait and the associated item, is stored -/// separately. `position` represents the index of the associated -/// item qualified with this Self type. -/// -/// as a::b::Trait>::AssociatedItem -/// ^~~~~ ~~~~~~~~~~~~~~^ -/// ty position = 3 -/// -/// >::AssociatedItem -/// ^~~~~ ^ -/// ty position = 0 -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct QSelf { - pub ty: P, - pub position: usize, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] -pub enum MatchSource { - Normal, - IfLetDesugar { - contains_else_clause: bool, - }, - WhileLetDesugar, - ForLoopDesugar, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] -pub enum CaptureClause { - CaptureByValue, - CaptureByRef, -} - -// NB: If you change this, you'll probably want to change the corresponding -// type structure in middle/ty.rs as well. -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct MutTy { - pub ty: P, - pub mutbl: Mutability, -} - -/// Represents a method's signature in a trait declaration, -/// or in an implementation. -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct MethodSig { - pub unsafety: Unsafety, - pub constness: Constness, - pub abi: Abi, - pub decl: P, - pub generics: Generics, - pub explicit_self: ExplicitSelf, -} - -/// Represents a method declaration in a trait declaration, possibly including -/// a default implementation A trait method is either required (meaning it -/// doesn't have an implementation, just a signature) or provided (meaning it -/// has a default implementation). -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct TraitItem { - pub id: NodeId, - pub name: Name, - pub attrs: HirVec, - pub node: TraitItem_, - pub span: Span, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum TraitItem_ { - ConstTraitItem(P, Option>), - MethodTraitItem(MethodSig, Option>), - TypeTraitItem(TyParamBounds, Option>), -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct ImplItem { - pub id: NodeId, - pub name: Name, - pub vis: Visibility, - pub attrs: HirVec, - pub node: ImplItemKind, - pub span: Span, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum ImplItemKind { - Const(P, P), - Method(MethodSig, P), - Type(P), -} - -// Bind a type to an associated type: `A=Foo`. -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct TypeBinding { - pub id: NodeId, - pub name: Name, - pub ty: P, - pub span: Span, -} - - -// NB PartialEq method appears below. -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)] -pub struct Ty { - pub id: NodeId, - pub node: Ty_, - pub span: Span, -} - -impl fmt::Debug for Ty { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "type({})", pprust::ty_to_string(self)) - } -} - -/// Not represented directly in the AST, referred to by name through a ty_path. -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] -pub enum PrimTy { - TyInt(IntTy), - TyUint(UintTy), - TyFloat(FloatTy), - TyStr, - TyBool, - TyChar, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct BareFnTy { - pub unsafety: Unsafety, - pub abi: Abi, - pub lifetimes: HirVec, - pub decl: P, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -/// The different kinds of types recognized by the compiler -pub enum Ty_ { - TyVec(P), - /// A fixed length array (`[T; n]`) - TyFixedLengthVec(P, P), - /// A raw pointer (`*const T` or `*mut T`) - TyPtr(MutTy), - /// A reference (`&'a T` or `&'a mut T`) - TyRptr(Option, MutTy), - /// A bare function (e.g. `fn(usize) -> bool`) - TyBareFn(P), - /// A tuple (`(A, B, C, D,...)`) - TyTup(HirVec>), - /// A path (`module::module::...::Type`), optionally - /// "qualified", e.g. ` as SomeTrait>::SomeType`. - /// - /// Type parameters are stored in the Path itself - TyPath(Option, Path), - /// Something like `A+B`. Note that `B` must always be a path. - TyObjectSum(P, TyParamBounds), - /// A type like `for<'a> Foo<&'a Bar>` - TyPolyTraitRef(TyParamBounds), - /// Unused for now - TyTypeof(P), - /// TyInfer means the type should be inferred instead of it having been - /// specified. This can appear anywhere in a type. - TyInfer, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct InlineAsmOutput { - pub constraint: InternedString, - pub expr: P, - pub is_rw: bool, - pub is_indirect: bool, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct InlineAsm { - pub asm: InternedString, - pub asm_str_style: StrStyle, - pub outputs: HirVec, - pub inputs: HirVec<(InternedString, P)>, - pub clobbers: HirVec, - pub volatile: bool, - pub alignstack: bool, - pub dialect: AsmDialect, - pub expn_id: ExpnId, -} - -/// represents an argument in a function header -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct Arg { - pub ty: P, - pub pat: P, - pub id: NodeId, -} - -impl Arg { - pub fn new_self(span: Span, mutability: Mutability, self_ident: Ident) -> Arg { - let path = Spanned { - span: span, - node: self_ident, - }; - Arg { - // HACK(eddyb) fake type for the self argument. - ty: P(Ty { - id: DUMMY_NODE_ID, - node: TyInfer, - span: DUMMY_SP, - }), - pat: P(Pat { - id: DUMMY_NODE_ID, - node: PatIdent(BindByValue(mutability), path, None), - span: span, - }), - id: DUMMY_NODE_ID, - } - } -} - -/// Represents the header (not the body) of a function declaration -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct FnDecl { - pub inputs: HirVec, - pub output: FunctionRetTy, - pub variadic: bool, -} - -#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum Unsafety { - Unsafe, - Normal, -} - -#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum Constness { - Const, - NotConst, -} - -impl fmt::Display for Unsafety { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(match *self { - Unsafety::Normal => "normal", - Unsafety::Unsafe => "unsafe", - }, - f) - } -} - -#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash)] -pub enum ImplPolarity { - /// `impl Trait for Type` - Positive, - /// `impl !Trait for Type` - Negative, -} - -impl fmt::Debug for ImplPolarity { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - ImplPolarity::Positive => "positive".fmt(f), - ImplPolarity::Negative => "negative".fmt(f), - } - } -} - - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum FunctionRetTy { - /// Functions with return type `!`that always - /// raise an error or exit (i.e. never return to the caller) - NoReturn(Span), - /// Return type is not specified. - /// - /// Functions default to `()` and - /// closures default to inference. Span points to where return - /// type would be inserted. - DefaultReturn(Span), - /// Everything else - Return(P), -} - -impl FunctionRetTy { - pub fn span(&self) -> Span { - match *self { - NoReturn(span) => span, - DefaultReturn(span) => span, - Return(ref ty) => ty.span, - } - } -} - -/// Represents the kind of 'self' associated with a method -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum ExplicitSelf_ { - /// No self - SelfStatic, - /// `self` - SelfValue(Name), - /// `&'lt self`, `&'lt mut self` - SelfRegion(Option, Mutability, Name), - /// `self: TYPE` - SelfExplicit(P, Name), -} - -pub type ExplicitSelf = Spanned; - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct Mod { - /// A span from the first token past `{` to the last token until `}`. - /// For `mod foo;`, the inner span ranges from the first token - /// to the last token in the external file. - pub inner: Span, - pub item_ids: HirVec, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct ForeignMod { - pub abi: Abi, - pub items: HirVec, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct EnumDef { - pub variants: HirVec, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct Variant_ { - pub name: Name, - pub attrs: HirVec, - pub data: VariantData, - /// Explicit discriminant, eg `Foo = 1` - pub disr_expr: Option>, -} - -pub type Variant = Spanned; - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] -pub enum PathListItem_ { - PathListIdent { - name: Name, - /// renamed in list, eg `use foo::{bar as baz};` - rename: Option, - id: NodeId, - }, - PathListMod { - /// renamed in list, eg `use foo::{self as baz};` - rename: Option, - id: NodeId, - }, -} - -impl PathListItem_ { - pub fn id(&self) -> NodeId { - match *self { - PathListIdent { id, .. } | PathListMod { id, .. } => id, - } - } - - pub fn name(&self) -> Option { - match *self { - PathListIdent { name, .. } => Some(name), - PathListMod { .. } => None, - } - } - - pub fn rename(&self) -> Option { - match *self { - PathListIdent { rename, .. } | PathListMod { rename, .. } => rename, - } - } -} - -pub type PathListItem = Spanned; - -pub type ViewPath = Spanned; - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum ViewPath_ { - /// `foo::bar::baz as quux` - /// - /// or just - /// - /// `foo::bar::baz` (with `as baz` implicitly on the right) - ViewPathSimple(Name, Path), - - /// `foo::bar::*` - ViewPathGlob(Path), - - /// `foo::bar::{a,b,c}` - ViewPathList(Path, HirVec), -} - -/// TraitRef's appear in impls. -/// -/// resolve maps each TraitRef's ref_id to its defining trait; that's all -/// that the ref_id is for. The impl_id maps to the "self type" of this impl. -/// If this impl is an ItemImpl, the impl_id is redundant (it could be the -/// same as the impl's node id). -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct TraitRef { - pub path: Path, - pub ref_id: NodeId, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct PolyTraitRef { - /// The `'a` in `<'a> Foo<&'a T>` - pub bound_lifetimes: HirVec, - - /// The `Foo<&'a T>` in `<'a> Foo<&'a T>` - pub trait_ref: TraitRef, - - pub span: Span, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] -pub enum Visibility { - Public, - Inherited, -} - -impl Visibility { - pub fn inherit_from(&self, parent_visibility: Visibility) -> Visibility { - match self { - &Inherited => parent_visibility, - &Public => *self, - } - } -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct StructField_ { - pub kind: StructFieldKind, - pub id: NodeId, - pub ty: P, - pub attrs: HirVec, -} - -impl StructField_ { - pub fn name(&self) -> Option { - match self.kind { - NamedField(name, _) => Some(name), - UnnamedField(_) => None, - } - } -} - -pub type StructField = Spanned; - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] -pub enum StructFieldKind { - NamedField(Name, Visibility), - /// Element of a tuple-like struct - UnnamedField(Visibility), -} - -impl StructFieldKind { - pub fn is_unnamed(&self) -> bool { - match *self { - UnnamedField(..) => true, - NamedField(..) => false, - } - } - - pub fn visibility(&self) -> Visibility { - match *self { - NamedField(_, vis) | UnnamedField(vis) => vis, - } - } -} - -/// Fields and Ids of enum variants and structs -/// -/// For enum variants: `NodeId` represents both an Id of the variant itself (relevant for all -/// variant kinds) and an Id of the variant's constructor (not relevant for `Struct`-variants). -/// One shared Id can be successfully used for these two purposes. -/// Id of the whole enum lives in `Item`. -/// -/// For structs: `NodeId` represents an Id of the structure's constructor, so it is not actually -/// used for `Struct`-structs (but still presents). Structures don't have an analogue of "Id of -/// the variant itself" from enum variants. -/// Id of the whole struct lives in `Item`. -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum VariantData { - Struct(HirVec, NodeId), - Tuple(HirVec, NodeId), - Unit(NodeId), -} - -impl VariantData { - pub fn fields(&self) -> &[StructField] { - match *self { - VariantData::Struct(ref fields, _) | VariantData::Tuple(ref fields, _) => fields, - _ => &[], - } - } - pub fn id(&self) -> NodeId { - match *self { - VariantData::Struct(_, id) | VariantData::Tuple(_, id) | VariantData::Unit(id) => id, - } - } - pub fn is_struct(&self) -> bool { - if let VariantData::Struct(..) = *self { - true - } else { - false - } - } - pub fn is_tuple(&self) -> bool { - if let VariantData::Tuple(..) = *self { - true - } else { - false - } - } - pub fn is_unit(&self) -> bool { - if let VariantData::Unit(..) = *self { - true - } else { - false - } - } -} - -// The bodies for items are stored "out of line", in a separate -// hashmap in the `Crate`. Here we just record the node-id of the item -// so it can fetched later. -#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct ItemId { - pub id: NodeId, -} - -// FIXME (#3300): Should allow items to be anonymous. Right now -// we just use dummy names for anon items. -/// An item -/// -/// The name might be a dummy name in case of anonymous items -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct Item { - pub name: Name, - pub attrs: HirVec, - pub id: NodeId, - pub node: Item_, - pub vis: Visibility, - pub span: Span, -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum Item_ { - /// An`extern crate` item, with optional original crate name, - /// - /// e.g. `extern crate foo` or `extern crate foo_bar as foo` - ItemExternCrate(Option), - /// A `use` or `pub use` item - ItemUse(P), - - /// A `static` item - ItemStatic(P, Mutability, P), - /// A `const` item - ItemConst(P, P), - /// A function declaration - ItemFn(P, Unsafety, Constness, Abi, Generics, P), - /// A module - ItemMod(Mod), - /// An external module - ItemForeignMod(ForeignMod), - /// A type alias, e.g. `type Foo = Bar` - ItemTy(P, Generics), - /// An enum definition, e.g. `enum Foo {C, D}` - ItemEnum(EnumDef, Generics), - /// A struct definition, e.g. `struct Foo {x: A}` - ItemStruct(VariantData, Generics), - /// Represents a Trait Declaration - ItemTrait(Unsafety, Generics, TyParamBounds, HirVec), - - // Default trait implementations - /// - /// `impl Trait for .. {}` - ItemDefaultImpl(Unsafety, TraitRef), - /// An implementation, eg `impl Trait for Foo { .. }` - ItemImpl(Unsafety, - ImplPolarity, - Generics, - Option, // (optional) trait this impl implements - P, // self - HirVec), -} - -impl Item_ { - pub fn descriptive_variant(&self) -> &str { - match *self { - ItemExternCrate(..) => "extern crate", - ItemUse(..) => "use", - ItemStatic(..) => "static item", - ItemConst(..) => "constant item", - ItemFn(..) => "function", - ItemMod(..) => "module", - ItemForeignMod(..) => "foreign module", - ItemTy(..) => "type alias", - ItemEnum(..) => "enum", - ItemStruct(..) => "struct", - ItemTrait(..) => "trait", - ItemImpl(..) | - ItemDefaultImpl(..) => "item", - } - } -} - -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub struct ForeignItem { - pub name: Name, - pub attrs: HirVec, - pub node: ForeignItem_, - pub id: NodeId, - pub span: Span, - pub vis: Visibility, -} - -/// An item within an `extern` block -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] -pub enum ForeignItem_ { - /// A foreign function - ForeignItemFn(P, Generics), - /// A foreign static item (`static ext: u8`), with optional mutability - /// (the boolean is true when mutable) - ForeignItemStatic(P, bool), -} - -impl ForeignItem_ { - pub fn descriptive_variant(&self) -> &str { - match *self { - ForeignItemFn(..) => "foreign function", - ForeignItemStatic(..) => "foreign static item", - } - } -} diff --git a/src/librustc_front/intravisit.rs b/src/librustc_front/intravisit.rs deleted file mode 100644 index 03b021cfa6395..0000000000000 --- a/src/librustc_front/intravisit.rs +++ /dev/null @@ -1,820 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! HIR walker. Each overridden visit method has full control over what -//! happens with its node, it can do its own traversal of the node's children, -//! call `intravisit::walk_*` to apply the default traversal algorithm, or prevent -//! deeper traversal by doing nothing. -//! -//! When visiting the HIR, the contents of nested items are NOT visited -//! by default. This is different from the AST visitor, which does a deep walk. -//! Hence this module is called `intravisit`; see the method `visit_nested_item` -//! for more details. -//! -//! Note: it is an important invariant that the default visitor walks -//! the body of a function in "execution order" (more concretely, -//! reverse post-order with respect to the CFG implied by the AST), -//! meaning that if AST node A may execute before AST node B, then A -//! is visited first. The borrow checker in particular relies on this -//! property. - -use syntax::abi::Abi; -use syntax::ast::{NodeId, CRATE_NODE_ID, Name, Attribute}; -use syntax::codemap::Span; -use hir::*; - -#[derive(Copy, Clone, PartialEq, Eq)] -pub enum FnKind<'a> { - /// fn foo() or extern "Abi" fn foo() - ItemFn(Name, &'a Generics, Unsafety, Constness, Abi, Visibility), - - /// fn foo(&self) - Method(Name, &'a MethodSig, Option), - - /// |x, y| {} - Closure, -} - -/// Each method of the Visitor trait is a hook to be potentially -/// overridden. Each method's default implementation recursively visits -/// the substructure of the input via the corresponding `walk` method; -/// e.g. the `visit_mod` method by default calls `intravisit::walk_mod`. -/// -/// Note that this visitor does NOT visit nested items by default -/// (this is why the module is called `intravisit`, to distinguish it -/// from the AST's `visit` module, which acts differently). If you -/// simply want to visit all items in the crate in some order, you -/// should call `Crate::visit_all_items`. Otherwise, see the comment -/// on `visit_nested_item` for details on how to visit nested items. -/// -/// If you want to ensure that your code handles every variant -/// explicitly, you need to override each method. (And you also need -/// to monitor future changes to `Visitor` in case a new method with a -/// new default implementation gets introduced.) -pub trait Visitor<'v> : Sized { - /////////////////////////////////////////////////////////////////////////// - // Nested items. - - /// Invoked when a nested item is encountered. By default, does - /// nothing. If you want a deep walk, you need to override to - /// fetch the item contents. But most of the time, it is easier - /// (and better) to invoke `Crate::visit_all_items`, which visits - /// all items in the crate in some order (but doesn't respect - /// nesting). - #[allow(unused_variables)] - fn visit_nested_item(&mut self, id: ItemId) { - } - - /// Visit the top-level item and (optionally) nested items. See - /// `visit_nested_item` for details. - fn visit_item(&mut self, i: &'v Item) { - walk_item(self, i) - } - - /////////////////////////////////////////////////////////////////////////// - - fn visit_name(&mut self, _span: Span, _name: Name) { - // Nothing to do. - } - fn visit_ident(&mut self, span: Span, ident: Ident) { - walk_ident(self, span, ident); - } - fn visit_mod(&mut self, m: &'v Mod, _s: Span, _n: NodeId) { - walk_mod(self, m) - } - fn visit_foreign_item(&mut self, i: &'v ForeignItem) { - walk_foreign_item(self, i) - } - fn visit_local(&mut self, l: &'v Local) { - walk_local(self, l) - } - fn visit_block(&mut self, b: &'v Block) { - walk_block(self, b) - } - fn visit_stmt(&mut self, s: &'v Stmt) { - walk_stmt(self, s) - } - fn visit_arm(&mut self, a: &'v Arm) { - walk_arm(self, a) - } - fn visit_pat(&mut self, p: &'v Pat) { - walk_pat(self, p) - } - fn visit_decl(&mut self, d: &'v Decl) { - walk_decl(self, d) - } - fn visit_expr(&mut self, ex: &'v Expr) { - walk_expr(self, ex) - } - fn visit_expr_post(&mut self, _ex: &'v Expr) { - } - fn visit_ty(&mut self, t: &'v Ty) { - walk_ty(self, t) - } - fn visit_generics(&mut self, g: &'v Generics) { - walk_generics(self, g) - } - fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v FnDecl, b: &'v Block, s: Span, _: NodeId) { - walk_fn(self, fk, fd, b, s) - } - fn visit_trait_item(&mut self, ti: &'v TraitItem) { - walk_trait_item(self, ti) - } - fn visit_impl_item(&mut self, ii: &'v ImplItem) { - walk_impl_item(self, ii) - } - fn visit_trait_ref(&mut self, t: &'v TraitRef) { - walk_trait_ref(self, t) - } - fn visit_ty_param_bound(&mut self, bounds: &'v TyParamBound) { - walk_ty_param_bound(self, bounds) - } - fn visit_poly_trait_ref(&mut self, t: &'v PolyTraitRef, m: &'v TraitBoundModifier) { - walk_poly_trait_ref(self, t, m) - } - fn visit_variant_data(&mut self, - s: &'v VariantData, - _: Name, - _: &'v Generics, - _: NodeId, - _: Span) { - walk_struct_def(self, s) - } - fn visit_struct_field(&mut self, s: &'v StructField) { - walk_struct_field(self, s) - } - fn visit_enum_def(&mut self, - enum_definition: &'v EnumDef, - generics: &'v Generics, - item_id: NodeId, - _: Span) { - walk_enum_def(self, enum_definition, generics, item_id) - } - fn visit_variant(&mut self, v: &'v Variant, g: &'v Generics, item_id: NodeId) { - walk_variant(self, v, g, item_id) - } - fn visit_lifetime(&mut self, lifetime: &'v Lifetime) { - walk_lifetime(self, lifetime) - } - fn visit_lifetime_def(&mut self, lifetime: &'v LifetimeDef) { - walk_lifetime_def(self, lifetime) - } - fn visit_explicit_self(&mut self, es: &'v ExplicitSelf) { - walk_explicit_self(self, es) - } - fn visit_path(&mut self, path: &'v Path, _id: NodeId) { - walk_path(self, path) - } - fn visit_path_list_item(&mut self, prefix: &'v Path, item: &'v PathListItem) { - walk_path_list_item(self, prefix, item) - } - fn visit_path_segment(&mut self, path_span: Span, path_segment: &'v PathSegment) { - walk_path_segment(self, path_span, path_segment) - } - fn visit_path_parameters(&mut self, path_span: Span, path_parameters: &'v PathParameters) { - walk_path_parameters(self, path_span, path_parameters) - } - fn visit_assoc_type_binding(&mut self, type_binding: &'v TypeBinding) { - walk_assoc_type_binding(self, type_binding) - } - fn visit_attribute(&mut self, _attr: &'v Attribute) { - } - fn visit_macro_def(&mut self, macro_def: &'v MacroDef) { - walk_macro_def(self, macro_def) - } -} - -pub fn walk_opt_name<'v, V: Visitor<'v>>(visitor: &mut V, span: Span, opt_name: Option) { - for name in opt_name { - visitor.visit_name(span, name); - } -} - -pub fn walk_opt_ident<'v, V: Visitor<'v>>(visitor: &mut V, span: Span, opt_ident: Option) { - for ident in opt_ident { - visitor.visit_ident(span, ident); - } -} - -pub fn walk_ident<'v, V: Visitor<'v>>(visitor: &mut V, span: Span, ident: Ident) { - visitor.visit_name(span, ident.name); -} - -/// Walks the contents of a crate. See also `Crate::visit_all_items`. -pub fn walk_crate<'v, V: Visitor<'v>>(visitor: &mut V, krate: &'v Crate) { - visitor.visit_mod(&krate.module, krate.span, CRATE_NODE_ID); - walk_list!(visitor, visit_attribute, &krate.attrs); - walk_list!(visitor, visit_macro_def, &krate.exported_macros); -} - -pub fn walk_macro_def<'v, V: Visitor<'v>>(visitor: &mut V, macro_def: &'v MacroDef) { - visitor.visit_name(macro_def.span, macro_def.name); - walk_opt_name(visitor, macro_def.span, macro_def.imported_from); - walk_list!(visitor, visit_attribute, ¯o_def.attrs); -} - -pub fn walk_mod<'v, V: Visitor<'v>>(visitor: &mut V, module: &'v Mod) { - for &item_id in &module.item_ids { - visitor.visit_nested_item(item_id); - } -} - -pub fn walk_local<'v, V: Visitor<'v>>(visitor: &mut V, local: &'v Local) { - visitor.visit_pat(&local.pat); - walk_list!(visitor, visit_ty, &local.ty); - walk_list!(visitor, visit_expr, &local.init); -} - -pub fn walk_lifetime<'v, V: Visitor<'v>>(visitor: &mut V, lifetime: &'v Lifetime) { - visitor.visit_name(lifetime.span, lifetime.name); -} - -pub fn walk_lifetime_def<'v, V: Visitor<'v>>(visitor: &mut V, lifetime_def: &'v LifetimeDef) { - visitor.visit_lifetime(&lifetime_def.lifetime); - walk_list!(visitor, visit_lifetime, &lifetime_def.bounds); -} - -pub fn walk_explicit_self<'v, V: Visitor<'v>>(visitor: &mut V, explicit_self: &'v ExplicitSelf) { - match explicit_self.node { - SelfStatic => {} - SelfValue(name) => { - visitor.visit_name(explicit_self.span, name) - } - SelfRegion(ref opt_lifetime, _, name) => { - visitor.visit_name(explicit_self.span, name); - walk_list!(visitor, visit_lifetime, opt_lifetime); - } - SelfExplicit(ref typ, name) => { - visitor.visit_name(explicit_self.span, name); - visitor.visit_ty(typ) - } - } -} - -pub fn walk_poly_trait_ref<'v, V>(visitor: &mut V, - trait_ref: &'v PolyTraitRef, - _modifier: &'v TraitBoundModifier) - where V: Visitor<'v> -{ - walk_list!(visitor, visit_lifetime_def, &trait_ref.bound_lifetimes); - visitor.visit_trait_ref(&trait_ref.trait_ref); -} - -pub fn walk_trait_ref<'v, V>(visitor: &mut V, trait_ref: &'v TraitRef) - where V: Visitor<'v> -{ - visitor.visit_path(&trait_ref.path, trait_ref.ref_id) -} - -pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item) { - visitor.visit_name(item.span, item.name); - match item.node { - ItemExternCrate(opt_name) => { - walk_opt_name(visitor, item.span, opt_name) - } - ItemUse(ref vp) => { - match vp.node { - ViewPathSimple(name, ref path) => { - visitor.visit_name(vp.span, name); - visitor.visit_path(path, item.id); - } - ViewPathGlob(ref path) => { - visitor.visit_path(path, item.id); - } - ViewPathList(ref prefix, ref list) => { - if !list.is_empty() { - for item in list { - visitor.visit_path_list_item(prefix, item) - } - } else { - visitor.visit_path(prefix, item.id); - } - } - } - } - ItemStatic(ref typ, _, ref expr) | - ItemConst(ref typ, ref expr) => { - visitor.visit_ty(typ); - visitor.visit_expr(expr); - } - ItemFn(ref declaration, unsafety, constness, abi, ref generics, ref body) => { - visitor.visit_fn(FnKind::ItemFn(item.name, - generics, - unsafety, - constness, - abi, - item.vis), - declaration, - body, - item.span, - item.id) - } - ItemMod(ref module) => { - visitor.visit_mod(module, item.span, item.id) - } - ItemForeignMod(ref foreign_module) => { - walk_list!(visitor, visit_foreign_item, &foreign_module.items); - } - ItemTy(ref typ, ref type_parameters) => { - visitor.visit_ty(typ); - visitor.visit_generics(type_parameters) - } - ItemEnum(ref enum_definition, ref type_parameters) => { - visitor.visit_generics(type_parameters); - visitor.visit_enum_def(enum_definition, type_parameters, item.id, item.span) - } - ItemDefaultImpl(_, ref trait_ref) => { - visitor.visit_trait_ref(trait_ref) - } - ItemImpl(_, _, ref type_parameters, ref opt_trait_reference, ref typ, ref impl_items) => { - visitor.visit_generics(type_parameters); - walk_list!(visitor, visit_trait_ref, opt_trait_reference); - visitor.visit_ty(typ); - walk_list!(visitor, visit_impl_item, impl_items); - } - ItemStruct(ref struct_definition, ref generics) => { - visitor.visit_generics(generics); - visitor.visit_variant_data(struct_definition, item.name, generics, item.id, item.span); - } - ItemTrait(_, ref generics, ref bounds, ref methods) => { - visitor.visit_generics(generics); - walk_list!(visitor, visit_ty_param_bound, bounds); - walk_list!(visitor, visit_trait_item, methods); - } - } - walk_list!(visitor, visit_attribute, &item.attrs); -} - -pub fn walk_enum_def<'v, V: Visitor<'v>>(visitor: &mut V, - enum_definition: &'v EnumDef, - generics: &'v Generics, - item_id: NodeId) { - walk_list!(visitor, - visit_variant, - &enum_definition.variants, - generics, - item_id); -} - -pub fn walk_variant<'v, V: Visitor<'v>>(visitor: &mut V, - variant: &'v Variant, - generics: &'v Generics, - item_id: NodeId) { - visitor.visit_name(variant.span, variant.node.name); - visitor.visit_variant_data(&variant.node.data, - variant.node.name, - generics, - item_id, - variant.span); - walk_list!(visitor, visit_expr, &variant.node.disr_expr); - walk_list!(visitor, visit_attribute, &variant.node.attrs); -} - -pub fn walk_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty) { - match typ.node { - TyVec(ref ty) => { - visitor.visit_ty(ty) - } - TyPtr(ref mutable_type) => { - visitor.visit_ty(&mutable_type.ty) - } - TyRptr(ref opt_lifetime, ref mutable_type) => { - walk_list!(visitor, visit_lifetime, opt_lifetime); - visitor.visit_ty(&mutable_type.ty) - } - TyTup(ref tuple_element_types) => { - walk_list!(visitor, visit_ty, tuple_element_types); - } - TyBareFn(ref function_declaration) => { - walk_fn_decl(visitor, &function_declaration.decl); - walk_list!(visitor, visit_lifetime_def, &function_declaration.lifetimes); - } - TyPath(ref maybe_qself, ref path) => { - if let Some(ref qself) = *maybe_qself { - visitor.visit_ty(&qself.ty); - } - visitor.visit_path(path, typ.id); - } - TyObjectSum(ref ty, ref bounds) => { - visitor.visit_ty(ty); - walk_list!(visitor, visit_ty_param_bound, bounds); - } - TyFixedLengthVec(ref ty, ref expression) => { - visitor.visit_ty(ty); - visitor.visit_expr(expression) - } - TyPolyTraitRef(ref bounds) => { - walk_list!(visitor, visit_ty_param_bound, bounds); - } - TyTypeof(ref expression) => { - visitor.visit_expr(expression) - } - TyInfer => {} - } -} - -pub fn walk_path<'v, V: Visitor<'v>>(visitor: &mut V, path: &'v Path) { - for segment in &path.segments { - visitor.visit_path_segment(path.span, segment); - } -} - -pub fn walk_path_list_item<'v, V: Visitor<'v>>(visitor: &mut V, - prefix: &'v Path, - item: &'v PathListItem) { - for segment in &prefix.segments { - visitor.visit_path_segment(prefix.span, segment); - } - - walk_opt_name(visitor, item.span, item.node.name()); - walk_opt_name(visitor, item.span, item.node.rename()); -} - -pub fn walk_path_segment<'v, V: Visitor<'v>>(visitor: &mut V, - path_span: Span, - segment: &'v PathSegment) { - visitor.visit_ident(path_span, segment.identifier); - visitor.visit_path_parameters(path_span, &segment.parameters); -} - -pub fn walk_path_parameters<'v, V: Visitor<'v>>(visitor: &mut V, - _path_span: Span, - path_parameters: &'v PathParameters) { - match *path_parameters { - AngleBracketedParameters(ref data) => { - walk_list!(visitor, visit_ty, &data.types); - walk_list!(visitor, visit_lifetime, &data.lifetimes); - walk_list!(visitor, visit_assoc_type_binding, &data.bindings); - } - ParenthesizedParameters(ref data) => { - walk_list!(visitor, visit_ty, &data.inputs); - walk_list!(visitor, visit_ty, &data.output); - } - } -} - -pub fn walk_assoc_type_binding<'v, V: Visitor<'v>>(visitor: &mut V, - type_binding: &'v TypeBinding) { - visitor.visit_name(type_binding.span, type_binding.name); - visitor.visit_ty(&type_binding.ty); -} - -pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat) { - match pattern.node { - PatEnum(ref path, ref opt_children) => { - visitor.visit_path(path, pattern.id); - if let Some(ref children) = *opt_children { - walk_list!(visitor, visit_pat, children); - } - } - PatQPath(ref qself, ref path) => { - visitor.visit_ty(&qself.ty); - visitor.visit_path(path, pattern.id) - } - PatStruct(ref path, ref fields, _) => { - visitor.visit_path(path, pattern.id); - for field in fields { - visitor.visit_name(field.span, field.node.name); - visitor.visit_pat(&field.node.pat) - } - } - PatTup(ref tuple_elements) => { - walk_list!(visitor, visit_pat, tuple_elements); - } - PatBox(ref subpattern) | - PatRegion(ref subpattern, _) => { - visitor.visit_pat(subpattern) - } - PatIdent(_, ref pth1, ref optional_subpattern) => { - visitor.visit_ident(pth1.span, pth1.node); - walk_list!(visitor, visit_pat, optional_subpattern); - } - PatLit(ref expression) => visitor.visit_expr(expression), - PatRange(ref lower_bound, ref upper_bound) => { - visitor.visit_expr(lower_bound); - visitor.visit_expr(upper_bound) - } - PatWild => (), - PatVec(ref prepatterns, ref slice_pattern, ref postpatterns) => { - walk_list!(visitor, visit_pat, prepatterns); - walk_list!(visitor, visit_pat, slice_pattern); - walk_list!(visitor, visit_pat, postpatterns); - } - } -} - -pub fn walk_foreign_item<'v, V: Visitor<'v>>(visitor: &mut V, foreign_item: &'v ForeignItem) { - visitor.visit_name(foreign_item.span, foreign_item.name); - - match foreign_item.node { - ForeignItemFn(ref function_declaration, ref generics) => { - walk_fn_decl(visitor, function_declaration); - visitor.visit_generics(generics) - } - ForeignItemStatic(ref typ, _) => visitor.visit_ty(typ), - } - - walk_list!(visitor, visit_attribute, &foreign_item.attrs); -} - -pub fn walk_ty_param_bound<'v, V: Visitor<'v>>(visitor: &mut V, bound: &'v TyParamBound) { - match *bound { - TraitTyParamBound(ref typ, ref modifier) => { - visitor.visit_poly_trait_ref(typ, modifier); - } - RegionTyParamBound(ref lifetime) => { - visitor.visit_lifetime(lifetime); - } - } -} - -pub fn walk_generics<'v, V: Visitor<'v>>(visitor: &mut V, generics: &'v Generics) { - for param in &generics.ty_params { - visitor.visit_name(param.span, param.name); - walk_list!(visitor, visit_ty_param_bound, ¶m.bounds); - walk_list!(visitor, visit_ty, ¶m.default); - } - walk_list!(visitor, visit_lifetime_def, &generics.lifetimes); - for predicate in &generics.where_clause.predicates { - match predicate { - &WherePredicate::BoundPredicate(WhereBoundPredicate{ref bounded_ty, - ref bounds, - ref bound_lifetimes, - ..}) => { - visitor.visit_ty(bounded_ty); - walk_list!(visitor, visit_ty_param_bound, bounds); - walk_list!(visitor, visit_lifetime_def, bound_lifetimes); - } - &WherePredicate::RegionPredicate(WhereRegionPredicate{ref lifetime, - ref bounds, - ..}) => { - visitor.visit_lifetime(lifetime); - walk_list!(visitor, visit_lifetime, bounds); - } - &WherePredicate::EqPredicate(WhereEqPredicate{id, - ref path, - ref ty, - ..}) => { - visitor.visit_path(path, id); - visitor.visit_ty(ty); - } - } - } -} - -pub fn walk_fn_ret_ty<'v, V: Visitor<'v>>(visitor: &mut V, ret_ty: &'v FunctionRetTy) { - if let Return(ref output_ty) = *ret_ty { - visitor.visit_ty(output_ty) - } -} - -pub fn walk_fn_decl<'v, V: Visitor<'v>>(visitor: &mut V, function_declaration: &'v FnDecl) { - for argument in &function_declaration.inputs { - visitor.visit_pat(&argument.pat); - visitor.visit_ty(&argument.ty) - } - walk_fn_ret_ty(visitor, &function_declaration.output) -} - -pub fn walk_fn_decl_nopat<'v, V: Visitor<'v>>(visitor: &mut V, function_declaration: &'v FnDecl) { - for argument in &function_declaration.inputs { - visitor.visit_ty(&argument.ty) - } - walk_fn_ret_ty(visitor, &function_declaration.output) -} - -pub fn walk_fn_kind<'v, V: Visitor<'v>>(visitor: &mut V, function_kind: FnKind<'v>) { - match function_kind { - FnKind::ItemFn(_, generics, _, _, _, _) => { - visitor.visit_generics(generics); - } - FnKind::Method(_, sig, _) => { - visitor.visit_generics(&sig.generics); - visitor.visit_explicit_self(&sig.explicit_self); - } - FnKind::Closure => {} - } -} - -pub fn walk_fn<'v, V: Visitor<'v>>(visitor: &mut V, - function_kind: FnKind<'v>, - function_declaration: &'v FnDecl, - function_body: &'v Block, - _span: Span) { - walk_fn_decl(visitor, function_declaration); - walk_fn_kind(visitor, function_kind); - visitor.visit_block(function_body) -} - -pub fn walk_trait_item<'v, V: Visitor<'v>>(visitor: &mut V, trait_item: &'v TraitItem) { - visitor.visit_name(trait_item.span, trait_item.name); - walk_list!(visitor, visit_attribute, &trait_item.attrs); - match trait_item.node { - ConstTraitItem(ref ty, ref default) => { - visitor.visit_ty(ty); - walk_list!(visitor, visit_expr, default); - } - MethodTraitItem(ref sig, None) => { - visitor.visit_explicit_self(&sig.explicit_self); - visitor.visit_generics(&sig.generics); - walk_fn_decl(visitor, &sig.decl); - } - MethodTraitItem(ref sig, Some(ref body)) => { - visitor.visit_fn(FnKind::Method(trait_item.name, sig, None), - &sig.decl, - body, - trait_item.span, - trait_item.id); - } - TypeTraitItem(ref bounds, ref default) => { - walk_list!(visitor, visit_ty_param_bound, bounds); - walk_list!(visitor, visit_ty, default); - } - } -} - -pub fn walk_impl_item<'v, V: Visitor<'v>>(visitor: &mut V, impl_item: &'v ImplItem) { - visitor.visit_name(impl_item.span, impl_item.name); - walk_list!(visitor, visit_attribute, &impl_item.attrs); - match impl_item.node { - ImplItemKind::Const(ref ty, ref expr) => { - visitor.visit_ty(ty); - visitor.visit_expr(expr); - } - ImplItemKind::Method(ref sig, ref body) => { - visitor.visit_fn(FnKind::Method(impl_item.name, sig, Some(impl_item.vis)), - &sig.decl, - body, - impl_item.span, - impl_item.id); - } - ImplItemKind::Type(ref ty) => { - visitor.visit_ty(ty); - } - } -} - -pub fn walk_struct_def<'v, V: Visitor<'v>>(visitor: &mut V, struct_definition: &'v VariantData) { - walk_list!(visitor, visit_struct_field, struct_definition.fields()); -} - -pub fn walk_struct_field<'v, V: Visitor<'v>>(visitor: &mut V, struct_field: &'v StructField) { - walk_opt_name(visitor, struct_field.span, struct_field.node.name()); - visitor.visit_ty(&struct_field.node.ty); - walk_list!(visitor, visit_attribute, &struct_field.node.attrs); -} - -pub fn walk_block<'v, V: Visitor<'v>>(visitor: &mut V, block: &'v Block) { - walk_list!(visitor, visit_stmt, &block.stmts); - walk_list!(visitor, visit_expr, &block.expr); -} - -pub fn walk_stmt<'v, V: Visitor<'v>>(visitor: &mut V, statement: &'v Stmt) { - match statement.node { - StmtDecl(ref declaration, _) => visitor.visit_decl(declaration), - StmtExpr(ref expression, _) | StmtSemi(ref expression, _) => { - visitor.visit_expr(expression) - } - } -} - -pub fn walk_decl<'v, V: Visitor<'v>>(visitor: &mut V, declaration: &'v Decl) { - match declaration.node { - DeclLocal(ref local) => visitor.visit_local(local), - DeclItem(item) => visitor.visit_nested_item(item), - } -} - -pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) { - match expression.node { - ExprBox(ref subexpression) => { - visitor.visit_expr(subexpression) - } - ExprVec(ref subexpressions) => { - walk_list!(visitor, visit_expr, subexpressions); - } - ExprRepeat(ref element, ref count) => { - visitor.visit_expr(element); - visitor.visit_expr(count) - } - ExprStruct(ref path, ref fields, ref optional_base) => { - visitor.visit_path(path, expression.id); - for field in fields { - visitor.visit_name(field.name.span, field.name.node); - visitor.visit_expr(&field.expr) - } - walk_list!(visitor, visit_expr, optional_base); - } - ExprTup(ref subexpressions) => { - walk_list!(visitor, visit_expr, subexpressions); - } - ExprCall(ref callee_expression, ref arguments) => { - walk_list!(visitor, visit_expr, arguments); - visitor.visit_expr(callee_expression) - } - ExprMethodCall(ref name, ref types, ref arguments) => { - visitor.visit_name(name.span, name.node); - walk_list!(visitor, visit_expr, arguments); - walk_list!(visitor, visit_ty, types); - } - ExprBinary(_, ref left_expression, ref right_expression) => { - visitor.visit_expr(left_expression); - visitor.visit_expr(right_expression) - } - ExprAddrOf(_, ref subexpression) | ExprUnary(_, ref subexpression) => { - visitor.visit_expr(subexpression) - } - ExprLit(_) => {} - ExprCast(ref subexpression, ref typ) | ExprType(ref subexpression, ref typ) => { - visitor.visit_expr(subexpression); - visitor.visit_ty(typ) - } - ExprIf(ref head_expression, ref if_block, ref optional_else) => { - visitor.visit_expr(head_expression); - visitor.visit_block(if_block); - walk_list!(visitor, visit_expr, optional_else); - } - ExprWhile(ref subexpression, ref block, opt_ident) => { - visitor.visit_expr(subexpression); - visitor.visit_block(block); - walk_opt_ident(visitor, expression.span, opt_ident) - } - ExprLoop(ref block, opt_ident) => { - visitor.visit_block(block); - walk_opt_ident(visitor, expression.span, opt_ident) - } - ExprMatch(ref subexpression, ref arms, _) => { - visitor.visit_expr(subexpression); - walk_list!(visitor, visit_arm, arms); - } - ExprClosure(_, ref function_declaration, ref body) => { - visitor.visit_fn(FnKind::Closure, - function_declaration, - body, - expression.span, - expression.id) - } - ExprBlock(ref block) => visitor.visit_block(block), - ExprAssign(ref left_hand_expression, ref right_hand_expression) => { - visitor.visit_expr(right_hand_expression); - visitor.visit_expr(left_hand_expression) - } - ExprAssignOp(_, ref left_expression, ref right_expression) => { - visitor.visit_expr(right_expression); - visitor.visit_expr(left_expression) - } - ExprField(ref subexpression, ref name) => { - visitor.visit_expr(subexpression); - visitor.visit_name(name.span, name.node); - } - ExprTupField(ref subexpression, _) => { - visitor.visit_expr(subexpression); - } - ExprIndex(ref main_expression, ref index_expression) => { - visitor.visit_expr(main_expression); - visitor.visit_expr(index_expression) - } - ExprRange(ref start, ref end) => { - walk_list!(visitor, visit_expr, start); - walk_list!(visitor, visit_expr, end); - } - ExprPath(ref maybe_qself, ref path) => { - if let Some(ref qself) = *maybe_qself { - visitor.visit_ty(&qself.ty); - } - visitor.visit_path(path, expression.id) - } - ExprBreak(ref opt_sp_ident) | ExprAgain(ref opt_sp_ident) => { - for sp_ident in opt_sp_ident { - visitor.visit_ident(sp_ident.span, sp_ident.node); - } - } - ExprRet(ref optional_expression) => { - walk_list!(visitor, visit_expr, optional_expression); - } - ExprInlineAsm(ref ia) => { - for &(_, ref input) in &ia.inputs { - visitor.visit_expr(&input) - } - for output in &ia.outputs { - visitor.visit_expr(&output.expr) - } - } - } - - visitor.visit_expr_post(expression) -} - -pub fn walk_arm<'v, V: Visitor<'v>>(visitor: &mut V, arm: &'v Arm) { - walk_list!(visitor, visit_pat, &arm.pats); - walk_list!(visitor, visit_expr, &arm.guard); - visitor.visit_expr(&arm.body); - walk_list!(visitor, visit_attribute, &arm.attrs); -} diff --git a/src/librustc_front/lib.rs b/src/librustc_front/lib.rs deleted file mode 100644 index b12c41d060a07..0000000000000 --- a/src/librustc_front/lib.rs +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! The Rust compiler. -//! -//! # Note -//! -//! This API is completely unstable and subject to change. - -#![crate_name = "rustc_front"] -#![unstable(feature = "rustc_private", issue = "27812")] -#![crate_type = "dylib"] -#![crate_type = "rlib"] -#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "http://doc.rust-lang.org/nightly/")] - -#![feature(associated_consts)] -#![feature(box_patterns)] -#![feature(box_syntax)] -#![feature(const_fn)] -#![feature(quote)] -#![feature(rustc_diagnostic_macros)] -#![feature(rustc_private)] -#![feature(slice_patterns)] -#![feature(staged_api)] -#![feature(str_char)] - -extern crate serialize; -#[macro_use] -extern crate log; -#[macro_use] -extern crate syntax; -#[macro_use] -#[no_link] -extern crate rustc_bitflags; - -extern crate serialize as rustc_serialize; // used by deriving - -#[macro_use] -pub mod hir; -pub mod lowering; -pub mod fold; -pub mod intravisit; -pub mod util; - -pub mod print { - pub mod pprust; -} diff --git a/src/librustc_front/lowering.rs b/src/librustc_front/lowering.rs deleted file mode 100644 index 81d3367ab00dc..0000000000000 --- a/src/librustc_front/lowering.rs +++ /dev/null @@ -1,1961 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// Lowers the AST to the HIR. -// -// Since the AST and HIR are fairly similar, this is mostly a simple procedure, -// much like a fold. Where lowering involves a bit more work things get more -// interesting and there are some invariants you should know about. These mostly -// concern spans and ids. -// -// Spans are assigned to AST nodes during parsing and then are modified during -// expansion to indicate the origin of a node and the process it went through -// being expanded. Ids are assigned to AST nodes just before lowering. -// -// For the simpler lowering steps, ids and spans should be preserved. Unlike -// expansion we do not preserve the process of lowering in the spans, so spans -// should not be modified here. When creating a new node (as opposed to -// 'folding' an existing one), then you create a new id using `next_id()`. -// -// You must ensure that ids are unique. That means that you should only use the -// id from an AST node in a single HIR node (you can assume that AST node ids -// are unique). Every new node must have a unique id. Avoid cloning HIR nodes. -// If you do, you must then set the new node's id to a fresh one. -// -// Lowering must be reproducable (the compiler only lowers once, but tools and -// custom lints may lower an AST node to a HIR node to interact with the -// compiler). The most interesting bit of this is ids - if you lower an AST node -// and create new HIR nodes with fresh ids, when re-lowering the same node, you -// must ensure you get the same ids! To do this, we keep track of the next id -// when we translate a node which requires new ids. By checking this cache and -// using node ids starting with the cached id, we ensure ids are reproducible. -// To use this system, you just need to hold on to a CachedIdSetter object -// whilst lowering. This is an RAII object that takes care of setting and -// restoring the cached id, etc. -// -// This whole system relies on node ids being incremented one at a time and -// all increments being for lowering. This means that you should not call any -// non-lowering function which will use new node ids. -// -// We must also cache gensym'ed Idents to ensure that we get the same Ident -// every time we lower a node with gensym'ed names. One consequence of this is -// that you can only gensym a name once in a lowering (you don't need to worry -// about nested lowering though). That's because we cache based on the name and -// the currently cached node id, which is unique per lowered node. -// -// Spans are used for error messages and for tools to map semantics back to -// source code. It is therefore not as important with spans as ids to be strict -// about use (you can't break the compiler by screwing up a span). Obviously, a -// HIR node can only have a single span. But multiple nodes can have the same -// span and spans don't need to be kept in order, etc. Where code is preserved -// by lowering, it should have the same span as in the AST. Where HIR nodes are -// new it is probably best to give a span for the whole AST node being lowered. -// All nodes should have real spans, don't use dummy spans. Tools are likely to -// get confused if the spans from leaf AST nodes occur in multiple places -// in the HIR, especially for multiple identifiers. - -use hir; - -use std::collections::BTreeMap; -use std::collections::HashMap; -use syntax::ast::*; -use syntax::attr::{ThinAttributes, ThinAttributesExt}; -use syntax::ext::mtwt; -use syntax::ptr::P; -use syntax::codemap::{respan, Spanned, Span}; -use syntax::parse::token; -use syntax::std_inject; -use syntax::visit::{self, Visitor}; - -use std::cell::{Cell, RefCell}; - -pub struct LoweringContext<'a> { - crate_root: Option<&'static str>, - // Map AST ids to ids used for expanded nodes. - id_cache: RefCell>, - // Use if there are no cached ids for the current node. - id_assigner: &'a NodeIdAssigner, - // 0 == no cached id. Must be incremented to align with previous id - // incrementing. - cached_id: Cell, - // Keep track of gensym'ed idents. - gensym_cache: RefCell>, - // A copy of cached_id, but is also set to an id while it is being cached. - gensym_key: Cell, -} - -impl<'a, 'hir> LoweringContext<'a> { - pub fn new(id_assigner: &'a NodeIdAssigner, c: Option<&Crate>) -> LoweringContext<'a> { - let crate_root = c.and_then(|c| { - if std_inject::no_core(c) { - None - } else if std_inject::no_std(c) { - Some("core") - } else { - Some("std") - } - }); - - LoweringContext { - crate_root: crate_root, - id_cache: RefCell::new(HashMap::new()), - id_assigner: id_assigner, - cached_id: Cell::new(0), - gensym_cache: RefCell::new(HashMap::new()), - gensym_key: Cell::new(0), - } - } - - fn next_id(&self) -> NodeId { - let cached = self.cached_id.get(); - if cached == 0 { - return self.id_assigner.next_node_id(); - } - - self.cached_id.set(cached + 1); - cached - } - - fn str_to_ident(&self, s: &'static str) -> hir::Ident { - let cached_id = self.gensym_key.get(); - if cached_id == 0 { - return hir::Ident::from_name(token::gensym(s)); - } - - let cached = self.gensym_cache.borrow().contains_key(&(cached_id, s)); - if cached { - self.gensym_cache.borrow()[&(cached_id, s)] - } else { - let result = hir::Ident::from_name(token::gensym(s)); - self.gensym_cache.borrow_mut().insert((cached_id, s), result); - result - } - } -} - -pub fn lower_ident(_lctx: &LoweringContext, ident: Ident) -> hir::Ident { - hir::Ident { - name: mtwt::resolve(ident), - unhygienic_name: ident.name, - } -} - -pub fn lower_attrs(_lctx: &LoweringContext, attrs: &Vec) -> hir::HirVec { - attrs.clone().into() -} - -pub fn lower_view_path(lctx: &LoweringContext, view_path: &ViewPath) -> P { - P(Spanned { - node: match view_path.node { - ViewPathSimple(ident, ref path) => { - hir::ViewPathSimple(ident.name, lower_path(lctx, path)) - } - ViewPathGlob(ref path) => { - hir::ViewPathGlob(lower_path(lctx, path)) - } - ViewPathList(ref path, ref path_list_idents) => { - hir::ViewPathList(lower_path(lctx, path), - path_list_idents.iter() - .map(|path_list_ident| { - Spanned { - node: match path_list_ident.node { - PathListIdent { id, name, rename } => - hir::PathListIdent { - id: id, - name: name.name, - rename: rename.map(|x| x.name), - }, - PathListMod { id, rename } => - hir::PathListMod { - id: id, - rename: rename.map(|x| x.name), - }, - }, - span: path_list_ident.span, - } - }) - .collect()) - } - }, - span: view_path.span, - }) -} - -pub fn lower_arm(lctx: &LoweringContext, arm: &Arm) -> hir::Arm { - hir::Arm { - attrs: lower_attrs(lctx, &arm.attrs), - pats: arm.pats.iter().map(|x| lower_pat(lctx, x)).collect(), - guard: arm.guard.as_ref().map(|ref x| lower_expr(lctx, x)), - body: lower_expr(lctx, &arm.body), - } -} - -pub fn lower_decl(lctx: &LoweringContext, d: &Decl) -> P { - match d.node { - DeclLocal(ref l) => P(Spanned { - node: hir::DeclLocal(lower_local(lctx, l)), - span: d.span, - }), - DeclItem(ref it) => P(Spanned { - node: hir::DeclItem(lower_item_id(lctx, it)), - span: d.span, - }), - } -} - -pub fn lower_ty_binding(lctx: &LoweringContext, b: &TypeBinding) -> hir::TypeBinding { - hir::TypeBinding { - id: b.id, - name: b.ident.name, - ty: lower_ty(lctx, &b.ty), - span: b.span, - } -} - -pub fn lower_ty(lctx: &LoweringContext, t: &Ty) -> P { - P(hir::Ty { - id: t.id, - node: match t.node { - TyInfer => hir::TyInfer, - TyVec(ref ty) => hir::TyVec(lower_ty(lctx, ty)), - TyPtr(ref mt) => hir::TyPtr(lower_mt(lctx, mt)), - TyRptr(ref region, ref mt) => { - hir::TyRptr(lower_opt_lifetime(lctx, region), lower_mt(lctx, mt)) - } - TyBareFn(ref f) => { - hir::TyBareFn(P(hir::BareFnTy { - lifetimes: lower_lifetime_defs(lctx, &f.lifetimes), - unsafety: lower_unsafety(lctx, f.unsafety), - abi: f.abi, - decl: lower_fn_decl(lctx, &f.decl), - })) - } - TyTup(ref tys) => hir::TyTup(tys.iter().map(|ty| lower_ty(lctx, ty)).collect()), - TyParen(ref ty) => { - return lower_ty(lctx, ty); - } - TyPath(ref qself, ref path) => { - let qself = qself.as_ref().map(|&QSelf { ref ty, position }| { - hir::QSelf { - ty: lower_ty(lctx, ty), - position: position, - } - }); - hir::TyPath(qself, lower_path(lctx, path)) - } - TyObjectSum(ref ty, ref bounds) => { - hir::TyObjectSum(lower_ty(lctx, ty), lower_bounds(lctx, bounds)) - } - TyFixedLengthVec(ref ty, ref e) => { - hir::TyFixedLengthVec(lower_ty(lctx, ty), lower_expr(lctx, e)) - } - TyTypeof(ref expr) => { - hir::TyTypeof(lower_expr(lctx, expr)) - } - TyPolyTraitRef(ref bounds) => { - hir::TyPolyTraitRef(bounds.iter().map(|b| lower_ty_param_bound(lctx, b)).collect()) - } - TyMac(_) => panic!("TyMac should have been expanded by now."), - }, - span: t.span, - }) -} - -pub fn lower_foreign_mod(lctx: &LoweringContext, fm: &ForeignMod) -> hir::ForeignMod { - hir::ForeignMod { - abi: fm.abi, - items: fm.items.iter().map(|x| lower_foreign_item(lctx, x)).collect(), - } -} - -pub fn lower_variant(lctx: &LoweringContext, v: &Variant) -> hir::Variant { - Spanned { - node: hir::Variant_ { - name: v.node.name.name, - attrs: lower_attrs(lctx, &v.node.attrs), - data: lower_variant_data(lctx, &v.node.data), - disr_expr: v.node.disr_expr.as_ref().map(|e| lower_expr(lctx, e)), - }, - span: v.span, - } -} - -// Path segments are usually unhygienic, hygienic path segments can occur only in -// identifier-like paths originating from `ExprPath`. -// Make life simpler for rustc_resolve by renaming only such segments. -pub fn lower_path_full(lctx: &LoweringContext, p: &Path, maybe_hygienic: bool) -> hir::Path { - let maybe_hygienic = maybe_hygienic && !p.global && p.segments.len() == 1; - hir::Path { - global: p.global, - segments: p.segments - .iter() - .map(|&PathSegment { identifier, ref parameters }| { - hir::PathSegment { - identifier: if maybe_hygienic { - lower_ident(lctx, identifier) - } else { - hir::Ident::from_name(identifier.name) - }, - parameters: lower_path_parameters(lctx, parameters), - } - }) - .collect(), - span: p.span, - } -} - -pub fn lower_path(lctx: &LoweringContext, p: &Path) -> hir::Path { - lower_path_full(lctx, p, false) -} - -pub fn lower_path_parameters(lctx: &LoweringContext, - path_parameters: &PathParameters) - -> hir::PathParameters { - match *path_parameters { - PathParameters::AngleBracketed(ref data) => - hir::AngleBracketedParameters(lower_angle_bracketed_parameter_data(lctx, data)), - PathParameters::Parenthesized(ref data) => - hir::ParenthesizedParameters(lower_parenthesized_parameter_data(lctx, data)), - } -} - -pub fn lower_angle_bracketed_parameter_data(lctx: &LoweringContext, - data: &AngleBracketedParameterData) - -> hir::AngleBracketedParameterData { - let &AngleBracketedParameterData { ref lifetimes, ref types, ref bindings } = data; - hir::AngleBracketedParameterData { - lifetimes: lower_lifetimes(lctx, lifetimes), - types: types.iter().map(|ty| lower_ty(lctx, ty)).collect(), - bindings: bindings.iter().map(|b| lower_ty_binding(lctx, b)).collect(), - } -} - -pub fn lower_parenthesized_parameter_data(lctx: &LoweringContext, - data: &ParenthesizedParameterData) - -> hir::ParenthesizedParameterData { - let &ParenthesizedParameterData { ref inputs, ref output, span } = data; - hir::ParenthesizedParameterData { - inputs: inputs.iter().map(|ty| lower_ty(lctx, ty)).collect(), - output: output.as_ref().map(|ty| lower_ty(lctx, ty)), - span: span, - } -} - -pub fn lower_local(lctx: &LoweringContext, l: &Local) -> P { - P(hir::Local { - id: l.id, - ty: l.ty.as_ref().map(|t| lower_ty(lctx, t)), - pat: lower_pat(lctx, &l.pat), - init: l.init.as_ref().map(|e| lower_expr(lctx, e)), - span: l.span, - attrs: l.attrs.clone(), - }) -} - -pub fn lower_explicit_self_underscore(lctx: &LoweringContext, - es: &ExplicitSelf_) - -> hir::ExplicitSelf_ { - match *es { - SelfStatic => hir::SelfStatic, - SelfValue(v) => hir::SelfValue(v.name), - SelfRegion(ref lifetime, m, ident) => { - hir::SelfRegion(lower_opt_lifetime(lctx, lifetime), - lower_mutability(lctx, m), - ident.name) - } - SelfExplicit(ref typ, ident) => { - hir::SelfExplicit(lower_ty(lctx, typ), ident.name) - } - } -} - -pub fn lower_mutability(_lctx: &LoweringContext, m: Mutability) -> hir::Mutability { - match m { - MutMutable => hir::MutMutable, - MutImmutable => hir::MutImmutable, - } -} - -pub fn lower_explicit_self(lctx: &LoweringContext, s: &ExplicitSelf) -> hir::ExplicitSelf { - Spanned { - node: lower_explicit_self_underscore(lctx, &s.node), - span: s.span, - } -} - -pub fn lower_arg(lctx: &LoweringContext, arg: &Arg) -> hir::Arg { - hir::Arg { - id: arg.id, - pat: lower_pat(lctx, &arg.pat), - ty: lower_ty(lctx, &arg.ty), - } -} - -pub fn lower_fn_decl(lctx: &LoweringContext, decl: &FnDecl) -> P { - P(hir::FnDecl { - inputs: decl.inputs.iter().map(|x| lower_arg(lctx, x)).collect(), - output: match decl.output { - Return(ref ty) => hir::Return(lower_ty(lctx, ty)), - DefaultReturn(span) => hir::DefaultReturn(span), - NoReturn(span) => hir::NoReturn(span), - }, - variadic: decl.variadic, - }) -} - -pub fn lower_ty_param_bound(lctx: &LoweringContext, tpb: &TyParamBound) -> hir::TyParamBound { - match *tpb { - TraitTyParamBound(ref ty, modifier) => { - hir::TraitTyParamBound(lower_poly_trait_ref(lctx, ty), - lower_trait_bound_modifier(lctx, modifier)) - } - RegionTyParamBound(ref lifetime) => { - hir::RegionTyParamBound(lower_lifetime(lctx, lifetime)) - } - } -} - -pub fn lower_ty_param(lctx: &LoweringContext, tp: &TyParam) -> hir::TyParam { - hir::TyParam { - id: tp.id, - name: tp.ident.name, - bounds: lower_bounds(lctx, &tp.bounds), - default: tp.default.as_ref().map(|x| lower_ty(lctx, x)), - span: tp.span, - } -} - -pub fn lower_ty_params(lctx: &LoweringContext, - tps: &P<[TyParam]>) - -> hir::HirVec { - tps.iter().map(|tp| lower_ty_param(lctx, tp)).collect() -} - -pub fn lower_lifetime(_lctx: &LoweringContext, l: &Lifetime) -> hir::Lifetime { - hir::Lifetime { - id: l.id, - name: l.name, - span: l.span, - } -} - -pub fn lower_lifetime_def(lctx: &LoweringContext, l: &LifetimeDef) -> hir::LifetimeDef { - hir::LifetimeDef { - lifetime: lower_lifetime(lctx, &l.lifetime), - bounds: lower_lifetimes(lctx, &l.bounds), - } -} - -pub fn lower_lifetimes(lctx: &LoweringContext, lts: &Vec) -> hir::HirVec { - lts.iter().map(|l| lower_lifetime(lctx, l)).collect() -} - -pub fn lower_lifetime_defs(lctx: &LoweringContext, - lts: &Vec) - -> hir::HirVec { - lts.iter().map(|l| lower_lifetime_def(lctx, l)).collect() -} - -pub fn lower_opt_lifetime(lctx: &LoweringContext, - o_lt: &Option) - -> Option { - o_lt.as_ref().map(|lt| lower_lifetime(lctx, lt)) -} - -pub fn lower_generics(lctx: &LoweringContext, g: &Generics) -> hir::Generics { - hir::Generics { - ty_params: lower_ty_params(lctx, &g.ty_params), - lifetimes: lower_lifetime_defs(lctx, &g.lifetimes), - where_clause: lower_where_clause(lctx, &g.where_clause), - } -} - -pub fn lower_where_clause(lctx: &LoweringContext, wc: &WhereClause) -> hir::WhereClause { - hir::WhereClause { - id: wc.id, - predicates: wc.predicates - .iter() - .map(|predicate| lower_where_predicate(lctx, predicate)) - .collect(), - } -} - -pub fn lower_where_predicate(lctx: &LoweringContext, - pred: &WherePredicate) - -> hir::WherePredicate { - match *pred { - WherePredicate::BoundPredicate(WhereBoundPredicate{ ref bound_lifetimes, - ref bounded_ty, - ref bounds, - span}) => { - hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate { - bound_lifetimes: lower_lifetime_defs(lctx, bound_lifetimes), - bounded_ty: lower_ty(lctx, bounded_ty), - bounds: bounds.iter().map(|x| lower_ty_param_bound(lctx, x)).collect(), - span: span, - }) - } - WherePredicate::RegionPredicate(WhereRegionPredicate{ ref lifetime, - ref bounds, - span}) => { - hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate { - span: span, - lifetime: lower_lifetime(lctx, lifetime), - bounds: bounds.iter().map(|bound| lower_lifetime(lctx, bound)).collect(), - }) - } - WherePredicate::EqPredicate(WhereEqPredicate{ id, - ref path, - ref ty, - span}) => { - hir::WherePredicate::EqPredicate(hir::WhereEqPredicate { - id: id, - path: lower_path(lctx, path), - ty: lower_ty(lctx, ty), - span: span, - }) - } - } -} - -pub fn lower_variant_data(lctx: &LoweringContext, vdata: &VariantData) -> hir::VariantData { - match *vdata { - VariantData::Struct(ref fields, id) => { - hir::VariantData::Struct(fields.iter() - .map(|f| lower_struct_field(lctx, f)) - .collect(), - id) - } - VariantData::Tuple(ref fields, id) => { - hir::VariantData::Tuple(fields.iter() - .map(|f| lower_struct_field(lctx, f)) - .collect(), - id) - } - VariantData::Unit(id) => hir::VariantData::Unit(id), - } -} - -pub fn lower_trait_ref(lctx: &LoweringContext, p: &TraitRef) -> hir::TraitRef { - hir::TraitRef { - path: lower_path(lctx, &p.path), - ref_id: p.ref_id, - } -} - -pub fn lower_poly_trait_ref(lctx: &LoweringContext, p: &PolyTraitRef) -> hir::PolyTraitRef { - hir::PolyTraitRef { - bound_lifetimes: lower_lifetime_defs(lctx, &p.bound_lifetimes), - trait_ref: lower_trait_ref(lctx, &p.trait_ref), - span: p.span, - } -} - -pub fn lower_struct_field(lctx: &LoweringContext, f: &StructField) -> hir::StructField { - Spanned { - node: hir::StructField_ { - id: f.node.id, - kind: lower_struct_field_kind(lctx, &f.node.kind), - ty: lower_ty(lctx, &f.node.ty), - attrs: lower_attrs(lctx, &f.node.attrs), - }, - span: f.span, - } -} - -pub fn lower_field(lctx: &LoweringContext, f: &Field) -> hir::Field { - hir::Field { - name: respan(f.ident.span, f.ident.node.name), - expr: lower_expr(lctx, &f.expr), - span: f.span, - } -} - -pub fn lower_mt(lctx: &LoweringContext, mt: &MutTy) -> hir::MutTy { - hir::MutTy { - ty: lower_ty(lctx, &mt.ty), - mutbl: lower_mutability(lctx, mt.mutbl), - } -} - -pub fn lower_opt_bounds(lctx: &LoweringContext, - b: &Option) - -> Option { - b.as_ref().map(|ref bounds| lower_bounds(lctx, bounds)) -} - -fn lower_bounds(lctx: &LoweringContext, bounds: &TyParamBounds) -> hir::TyParamBounds { - bounds.iter().map(|bound| lower_ty_param_bound(lctx, bound)).collect() -} - -pub fn lower_block(lctx: &LoweringContext, b: &Block) -> P { - P(hir::Block { - id: b.id, - stmts: b.stmts.iter().map(|s| lower_stmt(lctx, s)).collect(), - expr: b.expr.as_ref().map(|ref x| lower_expr(lctx, x)), - rules: lower_block_check_mode(lctx, &b.rules), - span: b.span, - }) -} - -pub fn lower_item_underscore(lctx: &LoweringContext, i: &Item_) -> hir::Item_ { - match *i { - ItemExternCrate(string) => hir::ItemExternCrate(string), - ItemUse(ref view_path) => { - hir::ItemUse(lower_view_path(lctx, view_path)) - } - ItemStatic(ref t, m, ref e) => { - hir::ItemStatic(lower_ty(lctx, t), - lower_mutability(lctx, m), - lower_expr(lctx, e)) - } - ItemConst(ref t, ref e) => { - hir::ItemConst(lower_ty(lctx, t), lower_expr(lctx, e)) - } - ItemFn(ref decl, unsafety, constness, abi, ref generics, ref body) => { - hir::ItemFn(lower_fn_decl(lctx, decl), - lower_unsafety(lctx, unsafety), - lower_constness(lctx, constness), - abi, - lower_generics(lctx, generics), - lower_block(lctx, body)) - } - ItemMod(ref m) => hir::ItemMod(lower_mod(lctx, m)), - ItemForeignMod(ref nm) => hir::ItemForeignMod(lower_foreign_mod(lctx, nm)), - ItemTy(ref t, ref generics) => { - hir::ItemTy(lower_ty(lctx, t), lower_generics(lctx, generics)) - } - ItemEnum(ref enum_definition, ref generics) => { - hir::ItemEnum(hir::EnumDef { - variants: enum_definition.variants - .iter() - .map(|x| lower_variant(lctx, x)) - .collect(), - }, - lower_generics(lctx, generics)) - } - ItemStruct(ref struct_def, ref generics) => { - let struct_def = lower_variant_data(lctx, struct_def); - hir::ItemStruct(struct_def, lower_generics(lctx, generics)) - } - ItemDefaultImpl(unsafety, ref trait_ref) => { - hir::ItemDefaultImpl(lower_unsafety(lctx, unsafety), - lower_trait_ref(lctx, trait_ref)) - } - ItemImpl(unsafety, polarity, ref generics, ref ifce, ref ty, ref impl_items) => { - let new_impl_items = impl_items.iter() - .map(|item| lower_impl_item(lctx, item)) - .collect(); - let ifce = ifce.as_ref().map(|trait_ref| lower_trait_ref(lctx, trait_ref)); - hir::ItemImpl(lower_unsafety(lctx, unsafety), - lower_impl_polarity(lctx, polarity), - lower_generics(lctx, generics), - ifce, - lower_ty(lctx, ty), - new_impl_items) - } - ItemTrait(unsafety, ref generics, ref bounds, ref items) => { - let bounds = lower_bounds(lctx, bounds); - let items = items.iter().map(|item| lower_trait_item(lctx, item)).collect(); - hir::ItemTrait(lower_unsafety(lctx, unsafety), - lower_generics(lctx, generics), - bounds, - items) - } - ItemMac(_) => panic!("Shouldn't still be around"), - } -} - -pub fn lower_trait_item(lctx: &LoweringContext, i: &TraitItem) -> hir::TraitItem { - hir::TraitItem { - id: i.id, - name: i.ident.name, - attrs: lower_attrs(lctx, &i.attrs), - node: match i.node { - ConstTraitItem(ref ty, ref default) => { - hir::ConstTraitItem(lower_ty(lctx, ty), - default.as_ref().map(|x| lower_expr(lctx, x))) - } - MethodTraitItem(ref sig, ref body) => { - hir::MethodTraitItem(lower_method_sig(lctx, sig), - body.as_ref().map(|x| lower_block(lctx, x))) - } - TypeTraitItem(ref bounds, ref default) => { - hir::TypeTraitItem(lower_bounds(lctx, bounds), - default.as_ref().map(|x| lower_ty(lctx, x))) - } - }, - span: i.span, - } -} - -pub fn lower_impl_item(lctx: &LoweringContext, i: &ImplItem) -> hir::ImplItem { - hir::ImplItem { - id: i.id, - name: i.ident.name, - attrs: lower_attrs(lctx, &i.attrs), - vis: lower_visibility(lctx, i.vis), - node: match i.node { - ImplItemKind::Const(ref ty, ref expr) => { - hir::ImplItemKind::Const(lower_ty(lctx, ty), lower_expr(lctx, expr)) - } - ImplItemKind::Method(ref sig, ref body) => { - hir::ImplItemKind::Method(lower_method_sig(lctx, sig), lower_block(lctx, body)) - } - ImplItemKind::Type(ref ty) => hir::ImplItemKind::Type(lower_ty(lctx, ty)), - ImplItemKind::Macro(..) => panic!("Shouldn't exist any more"), - }, - span: i.span, - } -} - -pub fn lower_mod(lctx: &LoweringContext, m: &Mod) -> hir::Mod { - hir::Mod { - inner: m.inner, - item_ids: m.items.iter().map(|x| lower_item_id(lctx, x)).collect(), - } -} - -struct ItemLowerer<'lcx, 'interner: 'lcx> { - items: BTreeMap, - lctx: &'lcx LoweringContext<'interner>, -} - -impl<'lcx, 'interner> Visitor<'lcx> for ItemLowerer<'lcx, 'interner> { - fn visit_item(&mut self, item: &'lcx Item) { - self.items.insert(item.id, lower_item(self.lctx, item)); - visit::walk_item(self, item); - } -} - -pub fn lower_crate(lctx: &LoweringContext, c: &Crate) -> hir::Crate { - let items = { - let mut item_lowerer = ItemLowerer { items: BTreeMap::new(), lctx: lctx }; - visit::walk_crate(&mut item_lowerer, c); - item_lowerer.items - }; - - hir::Crate { - module: lower_mod(lctx, &c.module), - attrs: lower_attrs(lctx, &c.attrs), - config: c.config.clone().into(), - span: c.span, - exported_macros: c.exported_macros.iter().map(|m| lower_macro_def(lctx, m)).collect(), - items: items, - } -} - -pub fn lower_macro_def(lctx: &LoweringContext, m: &MacroDef) -> hir::MacroDef { - hir::MacroDef { - name: m.ident.name, - attrs: lower_attrs(lctx, &m.attrs), - id: m.id, - span: m.span, - imported_from: m.imported_from.map(|x| x.name), - export: m.export, - use_locally: m.use_locally, - allow_internal_unstable: m.allow_internal_unstable, - body: m.body.clone().into(), - } -} - -pub fn lower_item_id(_lctx: &LoweringContext, i: &Item) -> hir::ItemId { - hir::ItemId { id: i.id } -} - -pub fn lower_item(lctx: &LoweringContext, i: &Item) -> hir::Item { - let node = lower_item_underscore(lctx, &i.node); - - hir::Item { - id: i.id, - name: i.ident.name, - attrs: lower_attrs(lctx, &i.attrs), - node: node, - vis: lower_visibility(lctx, i.vis), - span: i.span, - } -} - -pub fn lower_foreign_item(lctx: &LoweringContext, i: &ForeignItem) -> hir::ForeignItem { - hir::ForeignItem { - id: i.id, - name: i.ident.name, - attrs: lower_attrs(lctx, &i.attrs), - node: match i.node { - ForeignItemFn(ref fdec, ref generics) => { - hir::ForeignItemFn(lower_fn_decl(lctx, fdec), lower_generics(lctx, generics)) - } - ForeignItemStatic(ref t, m) => { - hir::ForeignItemStatic(lower_ty(lctx, t), m) - } - }, - vis: lower_visibility(lctx, i.vis), - span: i.span, - } -} - -pub fn lower_method_sig(lctx: &LoweringContext, sig: &MethodSig) -> hir::MethodSig { - hir::MethodSig { - generics: lower_generics(lctx, &sig.generics), - abi: sig.abi, - explicit_self: lower_explicit_self(lctx, &sig.explicit_self), - unsafety: lower_unsafety(lctx, sig.unsafety), - constness: lower_constness(lctx, sig.constness), - decl: lower_fn_decl(lctx, &sig.decl), - } -} - -pub fn lower_unsafety(_lctx: &LoweringContext, u: Unsafety) -> hir::Unsafety { - match u { - Unsafety::Unsafe => hir::Unsafety::Unsafe, - Unsafety::Normal => hir::Unsafety::Normal, - } -} - -pub fn lower_constness(_lctx: &LoweringContext, c: Constness) -> hir::Constness { - match c { - Constness::Const => hir::Constness::Const, - Constness::NotConst => hir::Constness::NotConst, - } -} - -pub fn lower_unop(_lctx: &LoweringContext, u: UnOp) -> hir::UnOp { - match u { - UnDeref => hir::UnDeref, - UnNot => hir::UnNot, - UnNeg => hir::UnNeg, - } -} - -pub fn lower_binop(_lctx: &LoweringContext, b: BinOp) -> hir::BinOp { - Spanned { - node: match b.node { - BiAdd => hir::BiAdd, - BiSub => hir::BiSub, - BiMul => hir::BiMul, - BiDiv => hir::BiDiv, - BiRem => hir::BiRem, - BiAnd => hir::BiAnd, - BiOr => hir::BiOr, - BiBitXor => hir::BiBitXor, - BiBitAnd => hir::BiBitAnd, - BiBitOr => hir::BiBitOr, - BiShl => hir::BiShl, - BiShr => hir::BiShr, - BiEq => hir::BiEq, - BiLt => hir::BiLt, - BiLe => hir::BiLe, - BiNe => hir::BiNe, - BiGe => hir::BiGe, - BiGt => hir::BiGt, - }, - span: b.span, - } -} - -pub fn lower_pat(lctx: &LoweringContext, p: &Pat) -> P { - P(hir::Pat { - id: p.id, - node: match p.node { - PatWild => hir::PatWild, - PatIdent(ref binding_mode, pth1, ref sub) => { - hir::PatIdent(lower_binding_mode(lctx, binding_mode), - respan(pth1.span, lower_ident(lctx, pth1.node)), - sub.as_ref().map(|x| lower_pat(lctx, x))) - } - PatLit(ref e) => hir::PatLit(lower_expr(lctx, e)), - PatEnum(ref pth, ref pats) => { - hir::PatEnum(lower_path(lctx, pth), - pats.as_ref() - .map(|pats| pats.iter().map(|x| lower_pat(lctx, x)).collect())) - } - PatQPath(ref qself, ref pth) => { - let qself = hir::QSelf { - ty: lower_ty(lctx, &qself.ty), - position: qself.position, - }; - hir::PatQPath(qself, lower_path(lctx, pth)) - } - PatStruct(ref pth, ref fields, etc) => { - let pth = lower_path(lctx, pth); - let fs = fields.iter() - .map(|f| { - Spanned { - span: f.span, - node: hir::FieldPat { - name: f.node.ident.name, - pat: lower_pat(lctx, &f.node.pat), - is_shorthand: f.node.is_shorthand, - }, - } - }) - .collect(); - hir::PatStruct(pth, fs, etc) - } - PatTup(ref elts) => hir::PatTup(elts.iter().map(|x| lower_pat(lctx, x)).collect()), - PatBox(ref inner) => hir::PatBox(lower_pat(lctx, inner)), - PatRegion(ref inner, mutbl) => { - hir::PatRegion(lower_pat(lctx, inner), lower_mutability(lctx, mutbl)) - } - PatRange(ref e1, ref e2) => { - hir::PatRange(lower_expr(lctx, e1), lower_expr(lctx, e2)) - } - PatVec(ref before, ref slice, ref after) => { - hir::PatVec(before.iter().map(|x| lower_pat(lctx, x)).collect(), - slice.as_ref().map(|x| lower_pat(lctx, x)), - after.iter().map(|x| lower_pat(lctx, x)).collect()) - } - PatMac(_) => panic!("Shouldn't exist here"), - }, - span: p.span, - }) -} - -// Utility fn for setting and unsetting the cached id. -fn cache_ids<'a, OP, R>(lctx: &LoweringContext, expr_id: NodeId, op: OP) -> R - where OP: FnOnce(&LoweringContext) -> R -{ - // Only reset the id if it was previously 0, i.e., was not cached. - // If it was cached, we are in a nested node, but our id count will - // still count towards the parent's count. - let reset_cached_id = lctx.cached_id.get() == 0; - - { - let id_cache: &mut HashMap<_, _> = &mut lctx.id_cache.borrow_mut(); - - if id_cache.contains_key(&expr_id) { - let cached_id = lctx.cached_id.get(); - if cached_id == 0 { - // We're entering a node where we need to track ids, but are not - // yet tracking. - lctx.cached_id.set(id_cache[&expr_id]); - lctx.gensym_key.set(id_cache[&expr_id]); - } else { - // We're already tracking - check that the tracked id is the same - // as the expected id. - assert!(cached_id == id_cache[&expr_id], "id mismatch"); - } - } else { - let next_id = lctx.id_assigner.peek_node_id(); - id_cache.insert(expr_id, next_id); - lctx.gensym_key.set(next_id); - } - } - - let result = op(lctx); - - if reset_cached_id { - lctx.cached_id.set(0); - lctx.gensym_key.set(0); - } - - result -} - -pub fn lower_expr(lctx: &LoweringContext, e: &Expr) -> P { - P(hir::Expr { - id: e.id, - node: match e.node { - // Issue #22181: - // Eventually a desugaring for `box EXPR` - // (similar to the desugaring above for `in PLACE BLOCK`) - // should go here, desugaring - // - // to: - // - // let mut place = BoxPlace::make_place(); - // let raw_place = Place::pointer(&mut place); - // let value = $value; - // unsafe { - // ::std::ptr::write(raw_place, value); - // Boxed::finalize(place) - // } - // - // But for now there are type-inference issues doing that. - ExprBox(ref e) => { - hir::ExprBox(lower_expr(lctx, e)) - } - - // Desugar ExprBox: `in (PLACE) EXPR` - ExprInPlace(ref placer, ref value_expr) => { - // to: - // - // let p = PLACE; - // let mut place = Placer::make_place(p); - // let raw_place = Place::pointer(&mut place); - // push_unsafe!({ - // std::intrinsics::move_val_init(raw_place, pop_unsafe!( EXPR )); - // InPlace::finalize(place) - // }) - return cache_ids(lctx, e.id, |lctx| { - let placer_expr = lower_expr(lctx, placer); - let value_expr = lower_expr(lctx, value_expr); - - let placer_ident = lctx.str_to_ident("placer"); - let place_ident = lctx.str_to_ident("place"); - let p_ptr_ident = lctx.str_to_ident("p_ptr"); - - let make_place = ["ops", "Placer", "make_place"]; - let place_pointer = ["ops", "Place", "pointer"]; - let move_val_init = ["intrinsics", "move_val_init"]; - let inplace_finalize = ["ops", "InPlace", "finalize"]; - - let make_call = |lctx: &LoweringContext, p, args| { - let path = core_path(lctx, e.span, p); - let path = expr_path(lctx, path, None); - expr_call(lctx, e.span, path, args, None) - }; - - let mk_stmt_let = |lctx: &LoweringContext, bind, expr| { - stmt_let(lctx, e.span, false, bind, expr, None) - }; - - let mk_stmt_let_mut = |lctx: &LoweringContext, bind, expr| { - stmt_let(lctx, e.span, true, bind, expr, None) - }; - - // let placer = ; - let s1 = { - let placer_expr = signal_block_expr(lctx, - hir_vec![], - placer_expr, - e.span, - hir::PopUnstableBlock, - None); - mk_stmt_let(lctx, placer_ident, placer_expr) - }; - - // let mut place = Placer::make_place(placer); - let s2 = { - let placer = expr_ident(lctx, e.span, placer_ident, None); - let call = make_call(lctx, &make_place, hir_vec![placer]); - mk_stmt_let_mut(lctx, place_ident, call) - }; - - // let p_ptr = Place::pointer(&mut place); - let s3 = { - let agent = expr_ident(lctx, e.span, place_ident, None); - let args = hir_vec![expr_mut_addr_of(lctx, e.span, agent, None)]; - let call = make_call(lctx, &place_pointer, args); - mk_stmt_let(lctx, p_ptr_ident, call) - }; - - // pop_unsafe!(EXPR)); - let pop_unsafe_expr = { - let value_expr = signal_block_expr(lctx, - hir_vec![], - value_expr, - e.span, - hir::PopUnstableBlock, - None); - signal_block_expr(lctx, - hir_vec![], - value_expr, - e.span, - hir::PopUnsafeBlock(hir::CompilerGenerated), None) - }; - - // push_unsafe!({ - // std::intrinsics::move_val_init(raw_place, pop_unsafe!( EXPR )); - // InPlace::finalize(place) - // }) - let expr = { - let ptr = expr_ident(lctx, e.span, p_ptr_ident, None); - let call_move_val_init = - hir::StmtSemi( - make_call(lctx, &move_val_init, hir_vec![ptr, pop_unsafe_expr]), - lctx.next_id()); - let call_move_val_init = respan(e.span, call_move_val_init); - - let place = expr_ident(lctx, e.span, place_ident, None); - let call = make_call(lctx, &inplace_finalize, hir_vec![place]); - signal_block_expr(lctx, - hir_vec![call_move_val_init], - call, - e.span, - hir::PushUnsafeBlock(hir::CompilerGenerated), None) - }; - - signal_block_expr(lctx, - hir_vec![s1, s2, s3], - expr, - e.span, - hir::PushUnstableBlock, - e.attrs.clone()) - }); - } - - ExprVec(ref exprs) => { - hir::ExprVec(exprs.iter().map(|x| lower_expr(lctx, x)).collect()) - } - ExprRepeat(ref expr, ref count) => { - let expr = lower_expr(lctx, expr); - let count = lower_expr(lctx, count); - hir::ExprRepeat(expr, count) - } - ExprTup(ref elts) => { - hir::ExprTup(elts.iter().map(|x| lower_expr(lctx, x)).collect()) - } - ExprCall(ref f, ref args) => { - let f = lower_expr(lctx, f); - hir::ExprCall(f, args.iter().map(|x| lower_expr(lctx, x)).collect()) - } - ExprMethodCall(i, ref tps, ref args) => { - let tps = tps.iter().map(|x| lower_ty(lctx, x)).collect(); - let args = args.iter().map(|x| lower_expr(lctx, x)).collect(); - hir::ExprMethodCall(respan(i.span, i.node.name), tps, args) - } - ExprBinary(binop, ref lhs, ref rhs) => { - let binop = lower_binop(lctx, binop); - let lhs = lower_expr(lctx, lhs); - let rhs = lower_expr(lctx, rhs); - hir::ExprBinary(binop, lhs, rhs) - } - ExprUnary(op, ref ohs) => { - let op = lower_unop(lctx, op); - let ohs = lower_expr(lctx, ohs); - hir::ExprUnary(op, ohs) - } - ExprLit(ref l) => hir::ExprLit(P((**l).clone())), - ExprCast(ref expr, ref ty) => { - let expr = lower_expr(lctx, expr); - hir::ExprCast(expr, lower_ty(lctx, ty)) - } - ExprType(ref expr, ref ty) => { - let expr = lower_expr(lctx, expr); - hir::ExprType(expr, lower_ty(lctx, ty)) - } - ExprAddrOf(m, ref ohs) => { - let m = lower_mutability(lctx, m); - let ohs = lower_expr(lctx, ohs); - hir::ExprAddrOf(m, ohs) - } - // More complicated than you might expect because the else branch - // might be `if let`. - ExprIf(ref cond, ref blk, ref else_opt) => { - let else_opt = else_opt.as_ref().map(|els| { - match els.node { - ExprIfLet(..) => { - cache_ids(lctx, e.id, |lctx| { - // wrap the if-let expr in a block - let span = els.span; - let els = lower_expr(lctx, els); - let id = lctx.next_id(); - let blk = P(hir::Block { - stmts: hir_vec![], - expr: Some(els), - id: id, - rules: hir::DefaultBlock, - span: span, - }); - expr_block(lctx, blk, None) - }) - } - _ => lower_expr(lctx, els), - } - }); - - hir::ExprIf(lower_expr(lctx, cond), lower_block(lctx, blk), else_opt) - } - ExprWhile(ref cond, ref body, opt_ident) => { - hir::ExprWhile(lower_expr(lctx, cond), lower_block(lctx, body), - opt_ident.map(|ident| lower_ident(lctx, ident))) - } - ExprLoop(ref body, opt_ident) => { - hir::ExprLoop(lower_block(lctx, body), - opt_ident.map(|ident| lower_ident(lctx, ident))) - } - ExprMatch(ref expr, ref arms) => { - hir::ExprMatch(lower_expr(lctx, expr), - arms.iter().map(|x| lower_arm(lctx, x)).collect(), - hir::MatchSource::Normal) - } - ExprClosure(capture_clause, ref decl, ref body) => { - hir::ExprClosure(lower_capture_clause(lctx, capture_clause), - lower_fn_decl(lctx, decl), - lower_block(lctx, body)) - } - ExprBlock(ref blk) => hir::ExprBlock(lower_block(lctx, blk)), - ExprAssign(ref el, ref er) => { - hir::ExprAssign(lower_expr(lctx, el), lower_expr(lctx, er)) - } - ExprAssignOp(op, ref el, ref er) => { - hir::ExprAssignOp(lower_binop(lctx, op), - lower_expr(lctx, el), - lower_expr(lctx, er)) - } - ExprField(ref el, ident) => { - hir::ExprField(lower_expr(lctx, el), respan(ident.span, ident.node.name)) - } - ExprTupField(ref el, ident) => { - hir::ExprTupField(lower_expr(lctx, el), ident) - } - ExprIndex(ref el, ref er) => { - hir::ExprIndex(lower_expr(lctx, el), lower_expr(lctx, er)) - } - ExprRange(ref e1, ref e2) => { - hir::ExprRange(e1.as_ref().map(|x| lower_expr(lctx, x)), - e2.as_ref().map(|x| lower_expr(lctx, x))) - } - ExprPath(ref qself, ref path) => { - let hir_qself = qself.as_ref().map(|&QSelf { ref ty, position }| { - hir::QSelf { - ty: lower_ty(lctx, ty), - position: position, - } - }); - hir::ExprPath(hir_qself, lower_path_full(lctx, path, qself.is_none())) - } - ExprBreak(opt_ident) => hir::ExprBreak(opt_ident.map(|sp_ident| { - respan(sp_ident.span, lower_ident(lctx, sp_ident.node)) - })), - ExprAgain(opt_ident) => hir::ExprAgain(opt_ident.map(|sp_ident| { - respan(sp_ident.span, lower_ident(lctx, sp_ident.node)) - })), - ExprRet(ref e) => hir::ExprRet(e.as_ref().map(|x| lower_expr(lctx, x))), - ExprInlineAsm(InlineAsm { - ref inputs, - ref outputs, - ref asm, - asm_str_style, - ref clobbers, - volatile, - alignstack, - dialect, - expn_id, - }) => hir::ExprInlineAsm(hir::InlineAsm { - inputs: inputs.iter() - .map(|&(ref c, ref input)| (c.clone(), lower_expr(lctx, input))) - .collect(), - outputs: outputs.iter() - .map(|out| { - hir::InlineAsmOutput { - constraint: out.constraint.clone(), - expr: lower_expr(lctx, &out.expr), - is_rw: out.is_rw, - is_indirect: out.is_indirect, - } - }) - .collect(), - asm: asm.clone(), - asm_str_style: asm_str_style, - clobbers: clobbers.clone().into(), - volatile: volatile, - alignstack: alignstack, - dialect: dialect, - expn_id: expn_id, - }), - ExprStruct(ref path, ref fields, ref maybe_expr) => { - hir::ExprStruct(lower_path(lctx, path), - fields.iter().map(|x| lower_field(lctx, x)).collect(), - maybe_expr.as_ref().map(|x| lower_expr(lctx, x))) - } - ExprParen(ref ex) => { - // merge attributes into the inner expression. - return lower_expr(lctx, ex).map(|mut ex| { - ex.attrs.update(|attrs| { - attrs.prepend(e.attrs.clone()) - }); - ex - }); - } - - // Desugar ExprIfLet - // From: `if let = []` - ExprIfLet(ref pat, ref sub_expr, ref body, ref else_opt) => { - // to: - // - // match { - // => , - // [_ if => ,] - // _ => [ | ()] - // } - - return cache_ids(lctx, e.id, |lctx| { - // ` => ` - let pat_arm = { - let body = lower_block(lctx, body); - let body_expr = expr_block(lctx, body, None); - arm(hir_vec![lower_pat(lctx, pat)], body_expr) - }; - - // `[_ if => ,]` - let mut else_opt = else_opt.as_ref().map(|e| lower_expr(lctx, e)); - let else_if_arms = { - let mut arms = vec![]; - loop { - let else_opt_continue = else_opt.and_then(|els| { - els.and_then(|els| { - match els.node { - // else if - hir::ExprIf(cond, then, else_opt) => { - let pat_under = pat_wild(lctx, e.span); - arms.push(hir::Arm { - attrs: hir_vec![], - pats: hir_vec![pat_under], - guard: Some(cond), - body: expr_block(lctx, then, None), - }); - else_opt.map(|else_opt| (else_opt, true)) - } - _ => Some((P(els), false)), - } - }) - }); - match else_opt_continue { - Some((e, true)) => { - else_opt = Some(e); - } - Some((e, false)) => { - else_opt = Some(e); - break; - } - None => { - else_opt = None; - break; - } - } - } - arms - }; - - let contains_else_clause = else_opt.is_some(); - - // `_ => [ | ()]` - let else_arm = { - let pat_under = pat_wild(lctx, e.span); - let else_expr = - else_opt.unwrap_or_else( - || expr_tuple(lctx, e.span, hir_vec![], None)); - arm(hir_vec![pat_under], else_expr) - }; - - let mut arms = Vec::with_capacity(else_if_arms.len() + 2); - arms.push(pat_arm); - arms.extend(else_if_arms); - arms.push(else_arm); - - let sub_expr = lower_expr(lctx, sub_expr); - // add attributes to the outer returned expr node - expr(lctx, - e.span, - hir::ExprMatch(sub_expr, - arms.into(), - hir::MatchSource::IfLetDesugar { - contains_else_clause: contains_else_clause, - }), - e.attrs.clone()) - }); - } - - // Desugar ExprWhileLet - // From: `[opt_ident]: while let = ` - ExprWhileLet(ref pat, ref sub_expr, ref body, opt_ident) => { - // to: - // - // [opt_ident]: loop { - // match { - // => , - // _ => break - // } - // } - - return cache_ids(lctx, e.id, |lctx| { - // ` => ` - let pat_arm = { - let body = lower_block(lctx, body); - let body_expr = expr_block(lctx, body, None); - arm(hir_vec![lower_pat(lctx, pat)], body_expr) - }; - - // `_ => break` - let break_arm = { - let pat_under = pat_wild(lctx, e.span); - let break_expr = expr_break(lctx, e.span, None); - arm(hir_vec![pat_under], break_expr) - }; - - // `match { ... }` - let arms = hir_vec![pat_arm, break_arm]; - let sub_expr = lower_expr(lctx, sub_expr); - let match_expr = expr(lctx, - e.span, - hir::ExprMatch(sub_expr, - arms, - hir::MatchSource::WhileLetDesugar), - None); - - // `[opt_ident]: loop { ... }` - let loop_block = block_expr(lctx, match_expr); - let loop_expr = hir::ExprLoop(loop_block, - opt_ident.map(|ident| lower_ident(lctx, ident))); - // add attributes to the outer returned expr node - expr(lctx, e.span, loop_expr, e.attrs.clone()) - }); - } - - // Desugar ExprForLoop - // From: `[opt_ident]: for in ` - ExprForLoop(ref pat, ref head, ref body, opt_ident) => { - // to: - // - // { - // let result = match ::std::iter::IntoIterator::into_iter() { - // mut iter => { - // [opt_ident]: loop { - // match ::std::iter::Iterator::next(&mut iter) { - // ::std::option::Option::Some() => , - // ::std::option::Option::None => break - // } - // } - // } - // }; - // result - // } - - return cache_ids(lctx, e.id, |lctx| { - // expand - let head = lower_expr(lctx, head); - - let iter = lctx.str_to_ident("iter"); - - // `::std::option::Option::Some() => ` - let pat_arm = { - let body_block = lower_block(lctx, body); - let body_span = body_block.span; - let body_expr = P(hir::Expr { - id: lctx.next_id(), - node: hir::ExprBlock(body_block), - span: body_span, - attrs: None, - }); - let pat = lower_pat(lctx, pat); - let some_pat = pat_some(lctx, e.span, pat); - - arm(hir_vec![some_pat], body_expr) - }; - - // `::std::option::Option::None => break` - let break_arm = { - let break_expr = expr_break(lctx, e.span, None); - - arm(hir_vec![pat_none(lctx, e.span)], break_expr) - }; - - // `match ::std::iter::Iterator::next(&mut iter) { ... }` - let match_expr = { - let next_path = { - let strs = std_path(lctx, &["iter", "Iterator", "next"]); - - path_global(e.span, strs) - }; - let iter = expr_ident(lctx, e.span, iter, None); - let ref_mut_iter = expr_mut_addr_of(lctx, e.span, iter, None); - let next_path = expr_path(lctx, next_path, None); - let next_expr = expr_call(lctx, - e.span, - next_path, - hir_vec![ref_mut_iter], - None); - let arms = hir_vec![pat_arm, break_arm]; - - expr(lctx, - e.span, - hir::ExprMatch(next_expr, arms, hir::MatchSource::ForLoopDesugar), - None) - }; - - // `[opt_ident]: loop { ... }` - let loop_block = block_expr(lctx, match_expr); - let loop_expr = hir::ExprLoop(loop_block, - opt_ident.map(|ident| lower_ident(lctx, ident))); - let loop_expr = expr(lctx, e.span, loop_expr, None); - - // `mut iter => { ... }` - let iter_arm = { - let iter_pat = pat_ident_binding_mode(lctx, - e.span, - iter, - hir::BindByValue(hir::MutMutable)); - arm(hir_vec![iter_pat], loop_expr) - }; - - // `match ::std::iter::IntoIterator::into_iter() { ... }` - let into_iter_expr = { - let into_iter_path = { - let strs = std_path(lctx, &["iter", "IntoIterator", "into_iter"]); - - path_global(e.span, strs) - }; - - let into_iter = expr_path(lctx, into_iter_path, None); - expr_call(lctx, e.span, into_iter, hir_vec![head], None) - }; - - let match_expr = expr_match(lctx, - e.span, - into_iter_expr, - hir_vec![iter_arm], - hir::MatchSource::ForLoopDesugar, - None); - - // `{ let _result = ...; _result }` - // underscore prevents an unused_variables lint if the head diverges - let result_ident = lctx.str_to_ident("_result"); - let let_stmt = stmt_let(lctx, e.span, false, result_ident, match_expr, None); - let result = expr_ident(lctx, e.span, result_ident, None); - let block = block_all(lctx, e.span, hir_vec![let_stmt], Some(result)); - // add the attributes to the outer returned expr node - expr_block(lctx, block, e.attrs.clone()) - }); - } - - ExprMac(_) => panic!("Shouldn't exist here"), - }, - span: e.span, - attrs: e.attrs.clone(), - }) -} - -pub fn lower_stmt(lctx: &LoweringContext, s: &Stmt) -> hir::Stmt { - match s.node { - StmtDecl(ref d, id) => { - Spanned { - node: hir::StmtDecl(lower_decl(lctx, d), id), - span: s.span, - } - } - StmtExpr(ref e, id) => { - Spanned { - node: hir::StmtExpr(lower_expr(lctx, e), id), - span: s.span, - } - } - StmtSemi(ref e, id) => { - Spanned { - node: hir::StmtSemi(lower_expr(lctx, e), id), - span: s.span, - } - } - StmtMac(..) => panic!("Shouldn't exist here"), - } -} - -pub fn lower_capture_clause(_lctx: &LoweringContext, c: CaptureClause) -> hir::CaptureClause { - match c { - CaptureByValue => hir::CaptureByValue, - CaptureByRef => hir::CaptureByRef, - } -} - -pub fn lower_visibility(_lctx: &LoweringContext, v: Visibility) -> hir::Visibility { - match v { - Public => hir::Public, - Inherited => hir::Inherited, - } -} - -pub fn lower_block_check_mode(lctx: &LoweringContext, b: &BlockCheckMode) -> hir::BlockCheckMode { - match *b { - DefaultBlock => hir::DefaultBlock, - UnsafeBlock(u) => hir::UnsafeBlock(lower_unsafe_source(lctx, u)), - } -} - -pub fn lower_binding_mode(lctx: &LoweringContext, b: &BindingMode) -> hir::BindingMode { - match *b { - BindingMode::ByRef(m) => hir::BindByRef(lower_mutability(lctx, m)), - BindingMode::ByValue(m) => hir::BindByValue(lower_mutability(lctx, m)), - } -} - -pub fn lower_struct_field_kind(lctx: &LoweringContext, - s: &StructFieldKind) - -> hir::StructFieldKind { - match *s { - NamedField(ident, vis) => hir::NamedField(ident.name, lower_visibility(lctx, vis)), - UnnamedField(vis) => hir::UnnamedField(lower_visibility(lctx, vis)), - } -} - -pub fn lower_unsafe_source(_lctx: &LoweringContext, u: UnsafeSource) -> hir::UnsafeSource { - match u { - CompilerGenerated => hir::CompilerGenerated, - UserProvided => hir::UserProvided, - } -} - -pub fn lower_impl_polarity(_lctx: &LoweringContext, i: ImplPolarity) -> hir::ImplPolarity { - match i { - ImplPolarity::Positive => hir::ImplPolarity::Positive, - ImplPolarity::Negative => hir::ImplPolarity::Negative, - } -} - -pub fn lower_trait_bound_modifier(_lctx: &LoweringContext, - f: TraitBoundModifier) - -> hir::TraitBoundModifier { - match f { - TraitBoundModifier::None => hir::TraitBoundModifier::None, - TraitBoundModifier::Maybe => hir::TraitBoundModifier::Maybe, - } -} - -// Helper methods for building HIR. - -fn arm(pats: hir::HirVec>, expr: P) -> hir::Arm { - hir::Arm { - attrs: hir_vec![], - pats: pats, - guard: None, - body: expr, - } -} - -fn expr_break(lctx: &LoweringContext, span: Span, - attrs: ThinAttributes) -> P { - expr(lctx, span, hir::ExprBreak(None), attrs) -} - -fn expr_call(lctx: &LoweringContext, - span: Span, - e: P, - args: hir::HirVec>, - attrs: ThinAttributes) - -> P { - expr(lctx, span, hir::ExprCall(e, args), attrs) -} - -fn expr_ident(lctx: &LoweringContext, span: Span, id: hir::Ident, - attrs: ThinAttributes) -> P { - expr_path(lctx, path_ident(span, id), attrs) -} - -fn expr_mut_addr_of(lctx: &LoweringContext, span: Span, e: P, - attrs: ThinAttributes) -> P { - expr(lctx, span, hir::ExprAddrOf(hir::MutMutable, e), attrs) -} - -fn expr_path(lctx: &LoweringContext, path: hir::Path, - attrs: ThinAttributes) -> P { - expr(lctx, path.span, hir::ExprPath(None, path), attrs) -} - -fn expr_match(lctx: &LoweringContext, - span: Span, - arg: P, - arms: hir::HirVec, - source: hir::MatchSource, - attrs: ThinAttributes) - -> P { - expr(lctx, span, hir::ExprMatch(arg, arms, source), attrs) -} - -fn expr_block(lctx: &LoweringContext, b: P, - attrs: ThinAttributes) -> P { - expr(lctx, b.span, hir::ExprBlock(b), attrs) -} - -fn expr_tuple(lctx: &LoweringContext, sp: Span, exprs: hir::HirVec>, - attrs: ThinAttributes) -> P { - expr(lctx, sp, hir::ExprTup(exprs), attrs) -} - -fn expr(lctx: &LoweringContext, span: Span, node: hir::Expr_, - attrs: ThinAttributes) -> P { - P(hir::Expr { - id: lctx.next_id(), - node: node, - span: span, - attrs: attrs, - }) -} - -fn stmt_let(lctx: &LoweringContext, - sp: Span, - mutbl: bool, - ident: hir::Ident, - ex: P, - attrs: ThinAttributes) - -> hir::Stmt { - let pat = if mutbl { - pat_ident_binding_mode(lctx, sp, ident, hir::BindByValue(hir::MutMutable)) - } else { - pat_ident(lctx, sp, ident) - }; - let local = P(hir::Local { - pat: pat, - ty: None, - init: Some(ex), - id: lctx.next_id(), - span: sp, - attrs: attrs, - }); - let decl = respan(sp, hir::DeclLocal(local)); - respan(sp, hir::StmtDecl(P(decl), lctx.next_id())) -} - -fn block_expr(lctx: &LoweringContext, expr: P) -> P { - block_all(lctx, expr.span, hir::HirVec::new(), Some(expr)) -} - -fn block_all(lctx: &LoweringContext, - span: Span, - stmts: hir::HirVec, - expr: Option>) - -> P { - P(hir::Block { - stmts: stmts, - expr: expr, - id: lctx.next_id(), - rules: hir::DefaultBlock, - span: span, - }) -} - -fn pat_some(lctx: &LoweringContext, span: Span, pat: P) -> P { - let some = std_path(lctx, &["option", "Option", "Some"]); - let path = path_global(span, some); - pat_enum(lctx, span, path, hir_vec![pat]) -} - -fn pat_none(lctx: &LoweringContext, span: Span) -> P { - let none = std_path(lctx, &["option", "Option", "None"]); - let path = path_global(span, none); - pat_enum(lctx, span, path, hir_vec![]) -} - -fn pat_enum(lctx: &LoweringContext, - span: Span, - path: hir::Path, - subpats: hir::HirVec>) - -> P { - let pt = hir::PatEnum(path, Some(subpats)); - pat(lctx, span, pt) -} - -fn pat_ident(lctx: &LoweringContext, span: Span, ident: hir::Ident) -> P { - pat_ident_binding_mode(lctx, span, ident, hir::BindByValue(hir::MutImmutable)) -} - -fn pat_ident_binding_mode(lctx: &LoweringContext, - span: Span, - ident: hir::Ident, - bm: hir::BindingMode) - -> P { - let pat_ident = hir::PatIdent(bm, - Spanned { - span: span, - node: ident, - }, - None); - pat(lctx, span, pat_ident) -} - -fn pat_wild(lctx: &LoweringContext, span: Span) -> P { - pat(lctx, span, hir::PatWild) -} - -fn pat(lctx: &LoweringContext, span: Span, pat: hir::Pat_) -> P { - P(hir::Pat { - id: lctx.next_id(), - node: pat, - span: span, - }) -} - -fn path_ident(span: Span, id: hir::Ident) -> hir::Path { - path(span, vec![id]) -} - -fn path(span: Span, strs: Vec) -> hir::Path { - path_all(span, false, strs, hir::HirVec::new(), hir::HirVec::new(), hir::HirVec::new()) -} - -fn path_global(span: Span, strs: Vec) -> hir::Path { - path_all(span, true, strs, hir::HirVec::new(), hir::HirVec::new(), hir::HirVec::new()) -} - -fn path_all(sp: Span, - global: bool, - mut idents: Vec, - lifetimes: hir::HirVec, - types: hir::HirVec>, - bindings: hir::HirVec) - -> hir::Path { - let last_identifier = idents.pop().unwrap(); - let mut segments: Vec = idents.into_iter() - .map(|ident| { - hir::PathSegment { - identifier: ident, - parameters: hir::PathParameters::none(), - } - }) - .collect(); - segments.push(hir::PathSegment { - identifier: last_identifier, - parameters: hir::AngleBracketedParameters(hir::AngleBracketedParameterData { - lifetimes: lifetimes, - types: types, - bindings: bindings, - }), - }); - hir::Path { - span: sp, - global: global, - segments: segments.into(), - } -} - -fn std_path(lctx: &LoweringContext, components: &[&str]) -> Vec { - let mut v = Vec::new(); - if let Some(s) = lctx.crate_root { - v.push(hir::Ident::from_name(token::intern(s))); - } - v.extend(components.iter().map(|s| hir::Ident::from_name(token::intern(s)))); - return v; -} - -// Given suffix ["b","c","d"], returns path `::std::b::c::d` when -// `fld.cx.use_std`, and `::core::b::c::d` otherwise. -fn core_path(lctx: &LoweringContext, span: Span, components: &[&str]) -> hir::Path { - let idents = std_path(lctx, components); - path_global(span, idents) -} - -fn signal_block_expr(lctx: &LoweringContext, - stmts: hir::HirVec, - expr: P, - span: Span, - rule: hir::BlockCheckMode, - attrs: ThinAttributes) - -> P { - let id = lctx.next_id(); - expr_block(lctx, - P(hir::Block { - rules: rule, - span: span, - id: id, - stmts: stmts, - expr: Some(expr), - }), - attrs) -} - - - -#[cfg(test)] -mod test { - use super::*; - use syntax::ast::{self, NodeId, NodeIdAssigner}; - use syntax::{parse, codemap}; - use syntax::fold::Folder; - use std::cell::Cell; - - struct MockAssigner { - next_id: Cell, - } - - impl MockAssigner { - fn new() -> MockAssigner { - MockAssigner { next_id: Cell::new(0) } - } - } - - trait FakeExtCtxt { - fn call_site(&self) -> codemap::Span; - fn cfg(&self) -> ast::CrateConfig; - fn ident_of(&self, st: &str) -> ast::Ident; - fn name_of(&self, st: &str) -> ast::Name; - fn parse_sess(&self) -> &parse::ParseSess; - } - - impl FakeExtCtxt for parse::ParseSess { - fn call_site(&self) -> codemap::Span { - codemap::Span { - lo: codemap::BytePos(0), - hi: codemap::BytePos(0), - expn_id: codemap::NO_EXPANSION, - } - } - fn cfg(&self) -> ast::CrateConfig { - Vec::new() - } - fn ident_of(&self, st: &str) -> ast::Ident { - parse::token::str_to_ident(st) - } - fn name_of(&self, st: &str) -> ast::Name { - parse::token::intern(st) - } - fn parse_sess(&self) -> &parse::ParseSess { - self - } - } - - impl NodeIdAssigner for MockAssigner { - fn next_node_id(&self) -> NodeId { - let result = self.next_id.get(); - self.next_id.set(result + 1); - result - } - - fn peek_node_id(&self) -> NodeId { - self.next_id.get() - } - } - - impl Folder for MockAssigner { - fn new_id(&mut self, old_id: NodeId) -> NodeId { - assert_eq!(old_id, ast::DUMMY_NODE_ID); - self.next_node_id() - } - } - - #[test] - fn test_preserves_ids() { - let cx = parse::ParseSess::new(); - let mut assigner = MockAssigner::new(); - - let ast_if_let = quote_expr!(&cx, - if let Some(foo) = baz { - bar(foo); - }); - let ast_if_let = assigner.fold_expr(ast_if_let); - let ast_while_let = quote_expr!(&cx, - while let Some(foo) = baz { - bar(foo); - }); - let ast_while_let = assigner.fold_expr(ast_while_let); - let ast_for = quote_expr!(&cx, - for i in 0..10 { - foo(i); - }); - let ast_for = assigner.fold_expr(ast_for); - let ast_in = quote_expr!(&cx, in HEAP { foo() }); - let ast_in = assigner.fold_expr(ast_in); - - let lctx = LoweringContext::new(&assigner, None); - let hir1 = lower_expr(&lctx, &ast_if_let); - let hir2 = lower_expr(&lctx, &ast_if_let); - assert!(hir1 == hir2); - - let hir1 = lower_expr(&lctx, &ast_while_let); - let hir2 = lower_expr(&lctx, &ast_while_let); - assert!(hir1 == hir2); - - let hir1 = lower_expr(&lctx, &ast_for); - let hir2 = lower_expr(&lctx, &ast_for); - assert!(hir1 == hir2); - - let hir1 = lower_expr(&lctx, &ast_in); - let hir2 = lower_expr(&lctx, &ast_in); - assert!(hir1 == hir2); - } -} diff --git a/src/librustc_front/print/pprust.rs b/src/librustc_front/print/pprust.rs deleted file mode 100644 index c5ce76c1b6e69..0000000000000 --- a/src/librustc_front/print/pprust.rs +++ /dev/null @@ -1,2422 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub use self::AnnNode::*; - -use syntax::abi; -use syntax::ast; -use syntax::codemap::{self, CodeMap, BytePos, Spanned}; -use syntax::errors; -use syntax::parse::token::{self, BinOpToken}; -use syntax::parse::lexer::comments; -use syntax::parse; -use syntax::print::pp::{self, break_offset, word, space, hardbreak}; -use syntax::print::pp::{Breaks, eof}; -use syntax::print::pp::Breaks::{Consistent, Inconsistent}; -use syntax::print::pprust::{self as ast_pp, PrintState}; -use syntax::ptr::P; - -use hir; -use hir::{Crate, RegionTyParamBound, TraitTyParamBound, TraitBoundModifier}; - -use std::io::{self, Write, Read}; - -pub enum AnnNode<'a> { - NodeName(&'a ast::Name), - NodeBlock(&'a hir::Block), - NodeItem(&'a hir::Item), - NodeSubItem(ast::NodeId), - NodeExpr(&'a hir::Expr), - NodePat(&'a hir::Pat), -} - -pub trait PpAnn { - fn pre(&self, _state: &mut State, _node: AnnNode) -> io::Result<()> { - Ok(()) - } - fn post(&self, _state: &mut State, _node: AnnNode) -> io::Result<()> { - Ok(()) - } -} - -#[derive(Copy, Clone)] -pub struct NoAnn; - -impl PpAnn for NoAnn {} - - -pub struct State<'a> { - krate: Option<&'a Crate>, - pub s: pp::Printer<'a>, - cm: Option<&'a CodeMap>, - comments: Option>, - literals: Option>, - cur_cmnt_and_lit: ast_pp::CurrentCommentAndLiteral, - boxes: Vec, - ann: &'a (PpAnn + 'a), -} - -impl<'a> PrintState<'a> for State<'a> { - fn writer(&mut self) -> &mut pp::Printer<'a> { - &mut self.s - } - - fn boxes(&mut self) -> &mut Vec { - &mut self.boxes - } - - fn comments(&mut self) -> &mut Option> { - &mut self.comments - } - - fn cur_cmnt_and_lit(&mut self) -> &mut ast_pp::CurrentCommentAndLiteral { - &mut self.cur_cmnt_and_lit - } - - fn literals(&self) -> &Option> { - &self.literals - } -} - -pub fn rust_printer<'a>(writer: Box, krate: Option<&'a Crate>) -> State<'a> { - static NO_ANN: NoAnn = NoAnn; - rust_printer_annotated(writer, &NO_ANN, krate) -} - -pub fn rust_printer_annotated<'a>(writer: Box, - ann: &'a PpAnn, - krate: Option<&'a Crate>) - -> State<'a> { - State { - krate: krate, - s: pp::mk_printer(writer, default_columns), - cm: None, - comments: None, - literals: None, - cur_cmnt_and_lit: ast_pp::CurrentCommentAndLiteral { - cur_cmnt: 0, - cur_lit: 0, - }, - boxes: Vec::new(), - ann: ann, - } -} - -#[allow(non_upper_case_globals)] -pub const indent_unit: usize = 4; - -#[allow(non_upper_case_globals)] -pub const default_columns: usize = 78; - - -/// Requires you to pass an input filename and reader so that -/// it can scan the input text for comments and literals to -/// copy forward. -pub fn print_crate<'a>(cm: &'a CodeMap, - span_diagnostic: &errors::Handler, - krate: &hir::Crate, - filename: String, - input: &mut Read, - out: Box, - ann: &'a PpAnn, - is_expanded: bool) - -> io::Result<()> { - let mut s = State::new_from_input(cm, span_diagnostic, filename, input, - out, ann, is_expanded, Some(krate)); - - // When printing the AST, we sometimes need to inject `#[no_std]` here. - // Since you can't compile the HIR, it's not necessary. - - try!(s.print_mod(&krate.module, &krate.attrs)); - try!(s.print_remaining_comments()); - eof(&mut s.s) -} - -impl<'a> State<'a> { - pub fn new_from_input(cm: &'a CodeMap, - span_diagnostic: &errors::Handler, - filename: String, - input: &mut Read, - out: Box, - ann: &'a PpAnn, - is_expanded: bool, - krate: Option<&'a Crate>) - -> State<'a> { - let (cmnts, lits) = comments::gather_comments_and_literals(span_diagnostic, - filename, - input); - - State::new(cm, - out, - ann, - Some(cmnts), - // If the code is post expansion, don't use the table of - // literals, since it doesn't correspond with the literals - // in the AST anymore. - if is_expanded { - None - } else { - Some(lits) - }, - krate) - } - - pub fn new(cm: &'a CodeMap, - out: Box, - ann: &'a PpAnn, - comments: Option>, - literals: Option>, - krate: Option<&'a Crate>) - -> State<'a> { - State { - krate: krate, - s: pp::mk_printer(out, default_columns), - cm: Some(cm), - comments: comments.clone(), - literals: literals.clone(), - cur_cmnt_and_lit: ast_pp::CurrentCommentAndLiteral { - cur_cmnt: 0, - cur_lit: 0, - }, - boxes: Vec::new(), - ann: ann, - } - } -} - -pub fn to_string(f: F) -> String - where F: FnOnce(&mut State) -> io::Result<()> -{ - let mut wr = Vec::new(); - { - let mut printer = rust_printer(Box::new(&mut wr), None); - f(&mut printer).unwrap(); - eof(&mut printer.s).unwrap(); - } - String::from_utf8(wr).unwrap() -} - -pub fn binop_to_string(op: BinOpToken) -> &'static str { - match op { - token::Plus => "+", - token::Minus => "-", - token::Star => "*", - token::Slash => "/", - token::Percent => "%", - token::Caret => "^", - token::And => "&", - token::Or => "|", - token::Shl => "<<", - token::Shr => ">>", - } -} - -pub fn ty_to_string(ty: &hir::Ty) -> String { - to_string(|s| s.print_type(ty)) -} - -pub fn bounds_to_string(bounds: &[hir::TyParamBound]) -> String { - to_string(|s| s.print_bounds("", bounds)) -} - -pub fn pat_to_string(pat: &hir::Pat) -> String { - to_string(|s| s.print_pat(pat)) -} - -pub fn arm_to_string(arm: &hir::Arm) -> String { - to_string(|s| s.print_arm(arm)) -} - -pub fn expr_to_string(e: &hir::Expr) -> String { - to_string(|s| s.print_expr(e)) -} - -pub fn lifetime_to_string(e: &hir::Lifetime) -> String { - to_string(|s| s.print_lifetime(e)) -} - -pub fn stmt_to_string(stmt: &hir::Stmt) -> String { - to_string(|s| s.print_stmt(stmt)) -} - -pub fn item_to_string(i: &hir::Item) -> String { - to_string(|s| s.print_item(i)) -} - -pub fn impl_item_to_string(i: &hir::ImplItem) -> String { - to_string(|s| s.print_impl_item(i)) -} - -pub fn trait_item_to_string(i: &hir::TraitItem) -> String { - to_string(|s| s.print_trait_item(i)) -} - -pub fn generics_to_string(generics: &hir::Generics) -> String { - to_string(|s| s.print_generics(generics)) -} - -pub fn where_clause_to_string(i: &hir::WhereClause) -> String { - to_string(|s| s.print_where_clause(i)) -} - -pub fn fn_block_to_string(p: &hir::FnDecl) -> String { - to_string(|s| s.print_fn_block_args(p)) -} - -pub fn path_to_string(p: &hir::Path) -> String { - to_string(|s| s.print_path(p, false, 0)) -} - -pub fn name_to_string(name: ast::Name) -> String { - to_string(|s| s.print_name(name)) -} - -pub fn fun_to_string(decl: &hir::FnDecl, - unsafety: hir::Unsafety, - constness: hir::Constness, - name: ast::Name, - opt_explicit_self: Option<&hir::ExplicitSelf_>, - generics: &hir::Generics) - -> String { - to_string(|s| { - try!(s.head("")); - try!(s.print_fn(decl, - unsafety, - constness, - abi::Rust, - Some(name), - generics, - opt_explicit_self, - hir::Inherited)); - try!(s.end()); // Close the head box - s.end() // Close the outer box - }) -} - -pub fn block_to_string(blk: &hir::Block) -> String { - to_string(|s| { - // containing cbox, will be closed by print-block at } - try!(s.cbox(indent_unit)); - // head-ibox, will be closed by print-block after { - try!(s.ibox(0)); - s.print_block(blk) - }) -} - -pub fn explicit_self_to_string(explicit_self: &hir::ExplicitSelf_) -> String { - to_string(|s| s.print_explicit_self(explicit_self, hir::MutImmutable).map(|_| {})) -} - -pub fn variant_to_string(var: &hir::Variant) -> String { - to_string(|s| s.print_variant(var)) -} - -pub fn arg_to_string(arg: &hir::Arg) -> String { - to_string(|s| s.print_arg(arg)) -} - -pub fn visibility_qualified(vis: hir::Visibility, s: &str) -> String { - match vis { - hir::Public => format!("pub {}", s), - hir::Inherited => s.to_string(), - } -} - -fn needs_parentheses(expr: &hir::Expr) -> bool { - match expr.node { - hir::ExprAssign(..) | - hir::ExprBinary(..) | - hir::ExprClosure(..) | - hir::ExprAssignOp(..) | - hir::ExprCast(..) | - hir::ExprType(..) => true, - _ => false, - } -} - -impl<'a> State<'a> { - pub fn cbox(&mut self, u: usize) -> io::Result<()> { - self.boxes.push(pp::Breaks::Consistent); - pp::cbox(&mut self.s, u) - } - - pub fn nbsp(&mut self) -> io::Result<()> { - word(&mut self.s, " ") - } - - pub fn word_nbsp(&mut self, w: &str) -> io::Result<()> { - try!(word(&mut self.s, w)); - self.nbsp() - } - - pub fn head(&mut self, w: &str) -> io::Result<()> { - // outer-box is consistent - try!(self.cbox(indent_unit)); - // head-box is inconsistent - try!(self.ibox(w.len() + 1)); - // keyword that starts the head - if !w.is_empty() { - try!(self.word_nbsp(w)); - } - Ok(()) - } - - pub fn bopen(&mut self) -> io::Result<()> { - try!(word(&mut self.s, "{")); - self.end() // close the head-box - } - - pub fn bclose_(&mut self, span: codemap::Span, indented: usize) -> io::Result<()> { - self.bclose_maybe_open(span, indented, true) - } - pub fn bclose_maybe_open(&mut self, - span: codemap::Span, - indented: usize, - close_box: bool) - -> io::Result<()> { - try!(self.maybe_print_comment(span.hi)); - try!(self.break_offset_if_not_bol(1, -(indented as isize))); - try!(word(&mut self.s, "}")); - if close_box { - try!(self.end()); // close the outer-box - } - Ok(()) - } - pub fn bclose(&mut self, span: codemap::Span) -> io::Result<()> { - self.bclose_(span, indent_unit) - } - - pub fn in_cbox(&self) -> bool { - match self.boxes.last() { - Some(&last_box) => last_box == pp::Breaks::Consistent, - None => false, - } - } - pub fn space_if_not_bol(&mut self) -> io::Result<()> { - if !self.is_bol() { - try!(space(&mut self.s)); - } - Ok(()) - } - pub fn break_offset_if_not_bol(&mut self, n: usize, off: isize) -> io::Result<()> { - if !self.is_bol() { - break_offset(&mut self.s, n, off) - } else { - if off != 0 && self.s.last_token().is_hardbreak_tok() { - // We do something pretty sketchy here: tuck the nonzero - // offset-adjustment we were going to deposit along with the - // break into the previous hardbreak. - self.s.replace_last_token(pp::hardbreak_tok_offset(off)); - } - Ok(()) - } - } - - // Synthesizes a comment that was not textually present in the original source - // file. - pub fn synth_comment(&mut self, text: String) -> io::Result<()> { - try!(word(&mut self.s, "/*")); - try!(space(&mut self.s)); - try!(word(&mut self.s, &text[..])); - try!(space(&mut self.s)); - word(&mut self.s, "*/") - } - - - pub fn commasep_cmnt(&mut self, - b: Breaks, - elts: &[T], - mut op: F, - mut get_span: G) - -> io::Result<()> - where F: FnMut(&mut State, &T) -> io::Result<()>, - G: FnMut(&T) -> codemap::Span - { - try!(self.rbox(0, b)); - let len = elts.len(); - let mut i = 0; - for elt in elts { - try!(self.maybe_print_comment(get_span(elt).hi)); - try!(op(self, elt)); - i += 1; - if i < len { - try!(word(&mut self.s, ",")); - try!(self.maybe_print_trailing_comment(get_span(elt), Some(get_span(&elts[i]).hi))); - try!(self.space_if_not_bol()); - } - } - self.end() - } - - pub fn commasep_exprs(&mut self, b: Breaks, exprs: &[P]) -> io::Result<()> { - self.commasep_cmnt(b, exprs, |s, e| s.print_expr(&**e), |e| e.span) - } - - pub fn print_mod(&mut self, _mod: &hir::Mod, attrs: &[ast::Attribute]) -> io::Result<()> { - try!(self.print_inner_attributes(attrs)); - for item_id in &_mod.item_ids { - try!(self.print_item_id(item_id)); - } - Ok(()) - } - - pub fn print_foreign_mod(&mut self, - nmod: &hir::ForeignMod, - attrs: &[ast::Attribute]) - -> io::Result<()> { - try!(self.print_inner_attributes(attrs)); - for item in &nmod.items { - try!(self.print_foreign_item(item)); - } - Ok(()) - } - - pub fn print_opt_lifetime(&mut self, lifetime: &Option) -> io::Result<()> { - if let Some(l) = *lifetime { - try!(self.print_lifetime(&l)); - try!(self.nbsp()); - } - Ok(()) - } - - pub fn print_type(&mut self, ty: &hir::Ty) -> io::Result<()> { - try!(self.maybe_print_comment(ty.span.lo)); - try!(self.ibox(0)); - match ty.node { - hir::TyVec(ref ty) => { - try!(word(&mut self.s, "[")); - try!(self.print_type(&**ty)); - try!(word(&mut self.s, "]")); - } - hir::TyPtr(ref mt) => { - try!(word(&mut self.s, "*")); - match mt.mutbl { - hir::MutMutable => try!(self.word_nbsp("mut")), - hir::MutImmutable => try!(self.word_nbsp("const")), - } - try!(self.print_type(&*mt.ty)); - } - hir::TyRptr(ref lifetime, ref mt) => { - try!(word(&mut self.s, "&")); - try!(self.print_opt_lifetime(lifetime)); - try!(self.print_mt(mt)); - } - hir::TyTup(ref elts) => { - try!(self.popen()); - try!(self.commasep(Inconsistent, &elts[..], |s, ty| s.print_type(&**ty))); - if elts.len() == 1 { - try!(word(&mut self.s, ",")); - } - try!(self.pclose()); - } - hir::TyBareFn(ref f) => { - let generics = hir::Generics { - lifetimes: f.lifetimes.clone(), - ty_params: hir::HirVec::new(), - where_clause: hir::WhereClause { - id: ast::DUMMY_NODE_ID, - predicates: hir::HirVec::new(), - }, - }; - try!(self.print_ty_fn(f.abi, f.unsafety, &*f.decl, None, &generics, None)); - } - hir::TyPath(None, ref path) => { - try!(self.print_path(path, false, 0)); - } - hir::TyPath(Some(ref qself), ref path) => { - try!(self.print_qpath(path, qself, false)) - } - hir::TyObjectSum(ref ty, ref bounds) => { - try!(self.print_type(&**ty)); - try!(self.print_bounds("+", &bounds[..])); - } - hir::TyPolyTraitRef(ref bounds) => { - try!(self.print_bounds("", &bounds[..])); - } - hir::TyFixedLengthVec(ref ty, ref v) => { - try!(word(&mut self.s, "[")); - try!(self.print_type(&**ty)); - try!(word(&mut self.s, "; ")); - try!(self.print_expr(&**v)); - try!(word(&mut self.s, "]")); - } - hir::TyTypeof(ref e) => { - try!(word(&mut self.s, "typeof(")); - try!(self.print_expr(&**e)); - try!(word(&mut self.s, ")")); - } - hir::TyInfer => { - try!(word(&mut self.s, "_")); - } - } - self.end() - } - - pub fn print_foreign_item(&mut self, item: &hir::ForeignItem) -> io::Result<()> { - try!(self.hardbreak_if_not_bol()); - try!(self.maybe_print_comment(item.span.lo)); - try!(self.print_outer_attributes(&item.attrs)); - match item.node { - hir::ForeignItemFn(ref decl, ref generics) => { - try!(self.head("")); - try!(self.print_fn(decl, - hir::Unsafety::Normal, - hir::Constness::NotConst, - abi::Rust, - Some(item.name), - generics, - None, - item.vis)); - try!(self.end()); // end head-ibox - try!(word(&mut self.s, ";")); - self.end() // end the outer fn box - } - hir::ForeignItemStatic(ref t, m) => { - try!(self.head(&visibility_qualified(item.vis, "static"))); - if m { - try!(self.word_space("mut")); - } - try!(self.print_name(item.name)); - try!(self.word_space(":")); - try!(self.print_type(&**t)); - try!(word(&mut self.s, ";")); - try!(self.end()); // end the head-ibox - self.end() // end the outer cbox - } - } - } - - fn print_associated_const(&mut self, - name: ast::Name, - ty: &hir::Ty, - default: Option<&hir::Expr>, - vis: hir::Visibility) - -> io::Result<()> { - try!(word(&mut self.s, &visibility_qualified(vis, ""))); - try!(self.word_space("const")); - try!(self.print_name(name)); - try!(self.word_space(":")); - try!(self.print_type(ty)); - if let Some(expr) = default { - try!(space(&mut self.s)); - try!(self.word_space("=")); - try!(self.print_expr(expr)); - } - word(&mut self.s, ";") - } - - fn print_associated_type(&mut self, - name: ast::Name, - bounds: Option<&hir::TyParamBounds>, - ty: Option<&hir::Ty>) - -> io::Result<()> { - try!(self.word_space("type")); - try!(self.print_name(name)); - if let Some(bounds) = bounds { - try!(self.print_bounds(":", bounds)); - } - if let Some(ty) = ty { - try!(space(&mut self.s)); - try!(self.word_space("=")); - try!(self.print_type(ty)); - } - word(&mut self.s, ";") - } - - pub fn print_item_id(&mut self, item_id: &hir::ItemId) -> io::Result<()> { - if let Some(krate) = self.krate { - // skip nested items if krate context was not provided - let item = &krate.items[&item_id.id]; - self.print_item(item) - } else { - Ok(()) - } - } - - /// Pretty-print an item - pub fn print_item(&mut self, item: &hir::Item) -> io::Result<()> { - try!(self.hardbreak_if_not_bol()); - try!(self.maybe_print_comment(item.span.lo)); - try!(self.print_outer_attributes(&item.attrs)); - try!(self.ann.pre(self, NodeItem(item))); - match item.node { - hir::ItemExternCrate(ref optional_path) => { - try!(self.head(&visibility_qualified(item.vis, "extern crate"))); - if let Some(p) = *optional_path { - let val = p.as_str(); - if val.contains("-") { - try!(self.print_string(&val, ast::CookedStr)); - } else { - try!(self.print_name(p)); - } - try!(space(&mut self.s)); - try!(word(&mut self.s, "as")); - try!(space(&mut self.s)); - } - try!(self.print_name(item.name)); - try!(word(&mut self.s, ";")); - try!(self.end()); // end inner head-block - try!(self.end()); // end outer head-block - } - hir::ItemUse(ref vp) => { - try!(self.head(&visibility_qualified(item.vis, "use"))); - try!(self.print_view_path(&**vp)); - try!(word(&mut self.s, ";")); - try!(self.end()); // end inner head-block - try!(self.end()); // end outer head-block - } - hir::ItemStatic(ref ty, m, ref expr) => { - try!(self.head(&visibility_qualified(item.vis, "static"))); - if m == hir::MutMutable { - try!(self.word_space("mut")); - } - try!(self.print_name(item.name)); - try!(self.word_space(":")); - try!(self.print_type(&**ty)); - try!(space(&mut self.s)); - try!(self.end()); // end the head-ibox - - try!(self.word_space("=")); - try!(self.print_expr(&**expr)); - try!(word(&mut self.s, ";")); - try!(self.end()); // end the outer cbox - } - hir::ItemConst(ref ty, ref expr) => { - try!(self.head(&visibility_qualified(item.vis, "const"))); - try!(self.print_name(item.name)); - try!(self.word_space(":")); - try!(self.print_type(&**ty)); - try!(space(&mut self.s)); - try!(self.end()); // end the head-ibox - - try!(self.word_space("=")); - try!(self.print_expr(&**expr)); - try!(word(&mut self.s, ";")); - try!(self.end()); // end the outer cbox - } - hir::ItemFn(ref decl, unsafety, constness, abi, ref typarams, ref body) => { - try!(self.head("")); - try!(self.print_fn(decl, - unsafety, - constness, - abi, - Some(item.name), - typarams, - None, - item.vis)); - try!(word(&mut self.s, " ")); - try!(self.print_block_with_attrs(&**body, &item.attrs)); - } - hir::ItemMod(ref _mod) => { - try!(self.head(&visibility_qualified(item.vis, "mod"))); - try!(self.print_name(item.name)); - try!(self.nbsp()); - try!(self.bopen()); - try!(self.print_mod(_mod, &item.attrs)); - try!(self.bclose(item.span)); - } - hir::ItemForeignMod(ref nmod) => { - try!(self.head("extern")); - try!(self.word_nbsp(&nmod.abi.to_string())); - try!(self.bopen()); - try!(self.print_foreign_mod(nmod, &item.attrs)); - try!(self.bclose(item.span)); - } - hir::ItemTy(ref ty, ref params) => { - try!(self.ibox(indent_unit)); - try!(self.ibox(0)); - try!(self.word_nbsp(&visibility_qualified(item.vis, "type"))); - try!(self.print_name(item.name)); - try!(self.print_generics(params)); - try!(self.end()); // end the inner ibox - - try!(self.print_where_clause(¶ms.where_clause)); - try!(space(&mut self.s)); - try!(self.word_space("=")); - try!(self.print_type(&**ty)); - try!(word(&mut self.s, ";")); - try!(self.end()); // end the outer ibox - } - hir::ItemEnum(ref enum_definition, ref params) => { - try!(self.print_enum_def(enum_definition, params, item.name, item.span, item.vis)); - } - hir::ItemStruct(ref struct_def, ref generics) => { - try!(self.head(&visibility_qualified(item.vis, "struct"))); - try!(self.print_struct(struct_def, generics, item.name, item.span, true)); - } - - hir::ItemDefaultImpl(unsafety, ref trait_ref) => { - try!(self.head("")); - try!(self.print_visibility(item.vis)); - try!(self.print_unsafety(unsafety)); - try!(self.word_nbsp("impl")); - try!(self.print_trait_ref(trait_ref)); - try!(space(&mut self.s)); - try!(self.word_space("for")); - try!(self.word_space("..")); - try!(self.bopen()); - try!(self.bclose(item.span)); - } - hir::ItemImpl(unsafety, - polarity, - ref generics, - ref opt_trait, - ref ty, - ref impl_items) => { - try!(self.head("")); - try!(self.print_visibility(item.vis)); - try!(self.print_unsafety(unsafety)); - try!(self.word_nbsp("impl")); - - if generics.is_parameterized() { - try!(self.print_generics(generics)); - try!(space(&mut self.s)); - } - - match polarity { - hir::ImplPolarity::Negative => { - try!(word(&mut self.s, "!")); - } - _ => {} - } - - match opt_trait { - &Some(ref t) => { - try!(self.print_trait_ref(t)); - try!(space(&mut self.s)); - try!(self.word_space("for")); - } - &None => {} - } - - try!(self.print_type(&**ty)); - try!(self.print_where_clause(&generics.where_clause)); - - try!(space(&mut self.s)); - try!(self.bopen()); - try!(self.print_inner_attributes(&item.attrs)); - for impl_item in impl_items { - try!(self.print_impl_item(impl_item)); - } - try!(self.bclose(item.span)); - } - hir::ItemTrait(unsafety, ref generics, ref bounds, ref trait_items) => { - try!(self.head("")); - try!(self.print_visibility(item.vis)); - try!(self.print_unsafety(unsafety)); - try!(self.word_nbsp("trait")); - try!(self.print_name(item.name)); - try!(self.print_generics(generics)); - let mut real_bounds = Vec::with_capacity(bounds.len()); - for b in bounds.iter() { - if let TraitTyParamBound(ref ptr, hir::TraitBoundModifier::Maybe) = *b { - try!(space(&mut self.s)); - try!(self.word_space("for ?")); - try!(self.print_trait_ref(&ptr.trait_ref)); - } else { - real_bounds.push(b.clone()); - } - } - try!(self.print_bounds(":", &real_bounds[..])); - try!(self.print_where_clause(&generics.where_clause)); - try!(word(&mut self.s, " ")); - try!(self.bopen()); - for trait_item in trait_items { - try!(self.print_trait_item(trait_item)); - } - try!(self.bclose(item.span)); - } - } - self.ann.post(self, NodeItem(item)) - } - - fn print_trait_ref(&mut self, t: &hir::TraitRef) -> io::Result<()> { - self.print_path(&t.path, false, 0) - } - - fn print_formal_lifetime_list(&mut self, lifetimes: &[hir::LifetimeDef]) -> io::Result<()> { - if !lifetimes.is_empty() { - try!(word(&mut self.s, "for<")); - let mut comma = false; - for lifetime_def in lifetimes { - if comma { - try!(self.word_space(",")) - } - try!(self.print_lifetime_def(lifetime_def)); - comma = true; - } - try!(word(&mut self.s, ">")); - } - Ok(()) - } - - fn print_poly_trait_ref(&mut self, t: &hir::PolyTraitRef) -> io::Result<()> { - try!(self.print_formal_lifetime_list(&t.bound_lifetimes)); - self.print_trait_ref(&t.trait_ref) - } - - pub fn print_enum_def(&mut self, - enum_definition: &hir::EnumDef, - generics: &hir::Generics, - name: ast::Name, - span: codemap::Span, - visibility: hir::Visibility) - -> io::Result<()> { - try!(self.head(&visibility_qualified(visibility, "enum"))); - try!(self.print_name(name)); - try!(self.print_generics(generics)); - try!(self.print_where_clause(&generics.where_clause)); - try!(space(&mut self.s)); - self.print_variants(&enum_definition.variants, span) - } - - pub fn print_variants(&mut self, - variants: &[hir::Variant], - span: codemap::Span) - -> io::Result<()> { - try!(self.bopen()); - for v in variants { - try!(self.space_if_not_bol()); - try!(self.maybe_print_comment(v.span.lo)); - try!(self.print_outer_attributes(&v.node.attrs)); - try!(self.ibox(indent_unit)); - try!(self.print_variant(v)); - try!(word(&mut self.s, ",")); - try!(self.end()); - try!(self.maybe_print_trailing_comment(v.span, None)); - } - self.bclose(span) - } - - pub fn print_visibility(&mut self, vis: hir::Visibility) -> io::Result<()> { - match vis { - hir::Public => self.word_nbsp("pub"), - hir::Inherited => Ok(()), - } - } - - pub fn print_struct(&mut self, - struct_def: &hir::VariantData, - generics: &hir::Generics, - name: ast::Name, - span: codemap::Span, - print_finalizer: bool) - -> io::Result<()> { - try!(self.print_name(name)); - try!(self.print_generics(generics)); - if !struct_def.is_struct() { - if struct_def.is_tuple() { - try!(self.popen()); - try!(self.commasep(Inconsistent, struct_def.fields(), |s, field| { - match field.node.kind { - hir::NamedField(..) => panic!("unexpected named field"), - hir::UnnamedField(vis) => { - try!(s.print_visibility(vis)); - try!(s.maybe_print_comment(field.span.lo)); - s.print_type(&*field.node.ty) - } - } - })); - try!(self.pclose()); - } - try!(self.print_where_clause(&generics.where_clause)); - if print_finalizer { - try!(word(&mut self.s, ";")); - } - try!(self.end()); - self.end() // close the outer-box - } else { - try!(self.print_where_clause(&generics.where_clause)); - try!(self.nbsp()); - try!(self.bopen()); - try!(self.hardbreak_if_not_bol()); - - for field in struct_def.fields() { - match field.node.kind { - hir::UnnamedField(..) => panic!("unexpected unnamed field"), - hir::NamedField(name, visibility) => { - try!(self.hardbreak_if_not_bol()); - try!(self.maybe_print_comment(field.span.lo)); - try!(self.print_outer_attributes(&field.node.attrs)); - try!(self.print_visibility(visibility)); - try!(self.print_name(name)); - try!(self.word_nbsp(":")); - try!(self.print_type(&*field.node.ty)); - try!(word(&mut self.s, ",")); - } - } - } - - self.bclose(span) - } - } - - pub fn print_variant(&mut self, v: &hir::Variant) -> io::Result<()> { - try!(self.head("")); - let generics = ::util::empty_generics(); - try!(self.print_struct(&v.node.data, &generics, v.node.name, v.span, false)); - match v.node.disr_expr { - Some(ref d) => { - try!(space(&mut self.s)); - try!(self.word_space("=")); - self.print_expr(&**d) - } - _ => Ok(()), - } - } - - pub fn print_method_sig(&mut self, - name: ast::Name, - m: &hir::MethodSig, - vis: hir::Visibility) - -> io::Result<()> { - self.print_fn(&m.decl, - m.unsafety, - m.constness, - m.abi, - Some(name), - &m.generics, - Some(&m.explicit_self.node), - vis) - } - - pub fn print_trait_item(&mut self, ti: &hir::TraitItem) -> io::Result<()> { - try!(self.ann.pre(self, NodeSubItem(ti.id))); - try!(self.hardbreak_if_not_bol()); - try!(self.maybe_print_comment(ti.span.lo)); - try!(self.print_outer_attributes(&ti.attrs)); - match ti.node { - hir::ConstTraitItem(ref ty, ref default) => { - try!(self.print_associated_const(ti.name, - &ty, - default.as_ref().map(|expr| &**expr), - hir::Inherited)); - } - hir::MethodTraitItem(ref sig, ref body) => { - if body.is_some() { - try!(self.head("")); - } - try!(self.print_method_sig(ti.name, sig, hir::Inherited)); - if let Some(ref body) = *body { - try!(self.nbsp()); - try!(self.print_block_with_attrs(body, &ti.attrs)); - } else { - try!(word(&mut self.s, ";")); - } - } - hir::TypeTraitItem(ref bounds, ref default) => { - try!(self.print_associated_type(ti.name, - Some(bounds), - default.as_ref().map(|ty| &**ty))); - } - } - self.ann.post(self, NodeSubItem(ti.id)) - } - - pub fn print_impl_item(&mut self, ii: &hir::ImplItem) -> io::Result<()> { - try!(self.ann.pre(self, NodeSubItem(ii.id))); - try!(self.hardbreak_if_not_bol()); - try!(self.maybe_print_comment(ii.span.lo)); - try!(self.print_outer_attributes(&ii.attrs)); - match ii.node { - hir::ImplItemKind::Const(ref ty, ref expr) => { - try!(self.print_associated_const(ii.name, &ty, Some(&expr), ii.vis)); - } - hir::ImplItemKind::Method(ref sig, ref body) => { - try!(self.head("")); - try!(self.print_method_sig(ii.name, sig, ii.vis)); - try!(self.nbsp()); - try!(self.print_block_with_attrs(body, &ii.attrs)); - } - hir::ImplItemKind::Type(ref ty) => { - try!(self.print_associated_type(ii.name, None, Some(ty))); - } - } - self.ann.post(self, NodeSubItem(ii.id)) - } - - pub fn print_stmt(&mut self, st: &hir::Stmt) -> io::Result<()> { - try!(self.maybe_print_comment(st.span.lo)); - match st.node { - hir::StmtDecl(ref decl, _) => { - try!(self.print_decl(&**decl)); - } - hir::StmtExpr(ref expr, _) => { - try!(self.space_if_not_bol()); - try!(self.print_expr(&**expr)); - } - hir::StmtSemi(ref expr, _) => { - try!(self.space_if_not_bol()); - try!(self.print_expr(&**expr)); - try!(word(&mut self.s, ";")); - } - } - if stmt_ends_with_semi(&st.node) { - try!(word(&mut self.s, ";")); - } - self.maybe_print_trailing_comment(st.span, None) - } - - pub fn print_block(&mut self, blk: &hir::Block) -> io::Result<()> { - self.print_block_with_attrs(blk, &[]) - } - - pub fn print_block_unclosed(&mut self, blk: &hir::Block) -> io::Result<()> { - self.print_block_unclosed_indent(blk, indent_unit) - } - - pub fn print_block_unclosed_indent(&mut self, - blk: &hir::Block, - indented: usize) - -> io::Result<()> { - self.print_block_maybe_unclosed(blk, indented, &[], false) - } - - pub fn print_block_with_attrs(&mut self, - blk: &hir::Block, - attrs: &[ast::Attribute]) - -> io::Result<()> { - self.print_block_maybe_unclosed(blk, indent_unit, attrs, true) - } - - pub fn print_block_maybe_unclosed(&mut self, - blk: &hir::Block, - indented: usize, - attrs: &[ast::Attribute], - close_box: bool) - -> io::Result<()> { - match blk.rules { - hir::UnsafeBlock(..) => try!(self.word_space("unsafe")), - hir::PushUnsafeBlock(..) => try!(self.word_space("push_unsafe")), - hir::PopUnsafeBlock(..) => try!(self.word_space("pop_unsafe")), - hir::PushUnstableBlock => try!(self.word_space("push_unstable")), - hir::PopUnstableBlock => try!(self.word_space("pop_unstable")), - hir::DefaultBlock => (), - } - try!(self.maybe_print_comment(blk.span.lo)); - try!(self.ann.pre(self, NodeBlock(blk))); - try!(self.bopen()); - - try!(self.print_inner_attributes(attrs)); - - for st in &blk.stmts { - try!(self.print_stmt(st)); - } - match blk.expr { - Some(ref expr) => { - try!(self.space_if_not_bol()); - try!(self.print_expr(&**expr)); - try!(self.maybe_print_trailing_comment(expr.span, Some(blk.span.hi))); - } - _ => (), - } - try!(self.bclose_maybe_open(blk.span, indented, close_box)); - self.ann.post(self, NodeBlock(blk)) - } - - fn print_else(&mut self, els: Option<&hir::Expr>) -> io::Result<()> { - match els { - Some(_else) => { - match _else.node { - // "another else-if" - hir::ExprIf(ref i, ref then, ref e) => { - try!(self.cbox(indent_unit - 1)); - try!(self.ibox(0)); - try!(word(&mut self.s, " else if ")); - try!(self.print_expr(&**i)); - try!(space(&mut self.s)); - try!(self.print_block(&**then)); - self.print_else(e.as_ref().map(|e| &**e)) - } - // "final else" - hir::ExprBlock(ref b) => { - try!(self.cbox(indent_unit - 1)); - try!(self.ibox(0)); - try!(word(&mut self.s, " else ")); - self.print_block(&**b) - } - // BLEAH, constraints would be great here - _ => { - panic!("print_if saw if with weird alternative"); - } - } - } - _ => Ok(()), - } - } - - pub fn print_if(&mut self, - test: &hir::Expr, - blk: &hir::Block, - elseopt: Option<&hir::Expr>) - -> io::Result<()> { - try!(self.head("if")); - try!(self.print_expr(test)); - try!(space(&mut self.s)); - try!(self.print_block(blk)); - self.print_else(elseopt) - } - - pub fn print_if_let(&mut self, - pat: &hir::Pat, - expr: &hir::Expr, - blk: &hir::Block, - elseopt: Option<&hir::Expr>) - -> io::Result<()> { - try!(self.head("if let")); - try!(self.print_pat(pat)); - try!(space(&mut self.s)); - try!(self.word_space("=")); - try!(self.print_expr(expr)); - try!(space(&mut self.s)); - try!(self.print_block(blk)); - self.print_else(elseopt) - } - - - fn print_call_post(&mut self, args: &[P]) -> io::Result<()> { - try!(self.popen()); - try!(self.commasep_exprs(Inconsistent, args)); - self.pclose() - } - - pub fn print_expr_maybe_paren(&mut self, expr: &hir::Expr) -> io::Result<()> { - let needs_par = needs_parentheses(expr); - if needs_par { - try!(self.popen()); - } - try!(self.print_expr(expr)); - if needs_par { - try!(self.pclose()); - } - Ok(()) - } - - fn print_expr_vec(&mut self, exprs: &[P]) -> io::Result<()> { - try!(self.ibox(indent_unit)); - try!(word(&mut self.s, "[")); - try!(self.commasep_exprs(Inconsistent, &exprs[..])); - try!(word(&mut self.s, "]")); - self.end() - } - - fn print_expr_repeat(&mut self, element: &hir::Expr, count: &hir::Expr) -> io::Result<()> { - try!(self.ibox(indent_unit)); - try!(word(&mut self.s, "[")); - try!(self.print_expr(element)); - try!(self.word_space(";")); - try!(self.print_expr(count)); - try!(word(&mut self.s, "]")); - self.end() - } - - fn print_expr_struct(&mut self, - path: &hir::Path, - fields: &[hir::Field], - wth: &Option>) - -> io::Result<()> { - try!(self.print_path(path, true, 0)); - try!(word(&mut self.s, "{")); - try!(self.commasep_cmnt(Consistent, - &fields[..], - |s, field| { - try!(s.ibox(indent_unit)); - try!(s.print_name(field.name.node)); - try!(s.word_space(":")); - try!(s.print_expr(&*field.expr)); - s.end() - }, - |f| f.span)); - match *wth { - Some(ref expr) => { - try!(self.ibox(indent_unit)); - if !fields.is_empty() { - try!(word(&mut self.s, ",")); - try!(space(&mut self.s)); - } - try!(word(&mut self.s, "..")); - try!(self.print_expr(&**expr)); - try!(self.end()); - } - _ => if !fields.is_empty() { - try!(word(&mut self.s, ",")) - }, - } - try!(word(&mut self.s, "}")); - Ok(()) - } - - fn print_expr_tup(&mut self, exprs: &[P]) -> io::Result<()> { - try!(self.popen()); - try!(self.commasep_exprs(Inconsistent, &exprs[..])); - if exprs.len() == 1 { - try!(word(&mut self.s, ",")); - } - self.pclose() - } - - fn print_expr_call(&mut self, func: &hir::Expr, args: &[P]) -> io::Result<()> { - try!(self.print_expr_maybe_paren(func)); - self.print_call_post(args) - } - - fn print_expr_method_call(&mut self, - name: Spanned, - tys: &[P], - args: &[P]) - -> io::Result<()> { - let base_args = &args[1..]; - try!(self.print_expr(&*args[0])); - try!(word(&mut self.s, ".")); - try!(self.print_name(name.node)); - if !tys.is_empty() { - try!(word(&mut self.s, "::<")); - try!(self.commasep(Inconsistent, tys, |s, ty| s.print_type(&**ty))); - try!(word(&mut self.s, ">")); - } - self.print_call_post(base_args) - } - - fn print_expr_binary(&mut self, - op: hir::BinOp, - lhs: &hir::Expr, - rhs: &hir::Expr) - -> io::Result<()> { - try!(self.print_expr(lhs)); - try!(space(&mut self.s)); - try!(self.word_space(::util::binop_to_string(op.node))); - self.print_expr(rhs) - } - - fn print_expr_unary(&mut self, op: hir::UnOp, expr: &hir::Expr) -> io::Result<()> { - try!(word(&mut self.s, ::util::unop_to_string(op))); - self.print_expr_maybe_paren(expr) - } - - fn print_expr_addr_of(&mut self, - mutability: hir::Mutability, - expr: &hir::Expr) - -> io::Result<()> { - try!(word(&mut self.s, "&")); - try!(self.print_mutability(mutability)); - self.print_expr_maybe_paren(expr) - } - - pub fn print_expr(&mut self, expr: &hir::Expr) -> io::Result<()> { - try!(self.maybe_print_comment(expr.span.lo)); - try!(self.ibox(indent_unit)); - try!(self.ann.pre(self, NodeExpr(expr))); - match expr.node { - hir::ExprBox(ref expr) => { - try!(self.word_space("box")); - try!(self.print_expr(expr)); - } - hir::ExprVec(ref exprs) => { - try!(self.print_expr_vec(&exprs[..])); - } - hir::ExprRepeat(ref element, ref count) => { - try!(self.print_expr_repeat(&**element, &**count)); - } - hir::ExprStruct(ref path, ref fields, ref wth) => { - try!(self.print_expr_struct(path, &fields[..], wth)); - } - hir::ExprTup(ref exprs) => { - try!(self.print_expr_tup(&exprs[..])); - } - hir::ExprCall(ref func, ref args) => { - try!(self.print_expr_call(&**func, &args[..])); - } - hir::ExprMethodCall(name, ref tys, ref args) => { - try!(self.print_expr_method_call(name, &tys[..], &args[..])); - } - hir::ExprBinary(op, ref lhs, ref rhs) => { - try!(self.print_expr_binary(op, &**lhs, &**rhs)); - } - hir::ExprUnary(op, ref expr) => { - try!(self.print_expr_unary(op, &**expr)); - } - hir::ExprAddrOf(m, ref expr) => { - try!(self.print_expr_addr_of(m, &**expr)); - } - hir::ExprLit(ref lit) => { - try!(self.print_literal(&**lit)); - } - hir::ExprCast(ref expr, ref ty) => { - try!(self.print_expr(&**expr)); - try!(space(&mut self.s)); - try!(self.word_space("as")); - try!(self.print_type(&**ty)); - } - hir::ExprType(ref expr, ref ty) => { - try!(self.print_expr(&**expr)); - try!(self.word_space(":")); - try!(self.print_type(&**ty)); - } - hir::ExprIf(ref test, ref blk, ref elseopt) => { - try!(self.print_if(&**test, &**blk, elseopt.as_ref().map(|e| &**e))); - } - hir::ExprWhile(ref test, ref blk, opt_ident) => { - if let Some(ident) = opt_ident { - try!(self.print_name(ident.name)); - try!(self.word_space(":")); - } - try!(self.head("while")); - try!(self.print_expr(&**test)); - try!(space(&mut self.s)); - try!(self.print_block(&**blk)); - } - hir::ExprLoop(ref blk, opt_ident) => { - if let Some(ident) = opt_ident { - try!(self.print_name(ident.name)); - try!(self.word_space(":")); - } - try!(self.head("loop")); - try!(space(&mut self.s)); - try!(self.print_block(&**blk)); - } - hir::ExprMatch(ref expr, ref arms, _) => { - try!(self.cbox(indent_unit)); - try!(self.ibox(4)); - try!(self.word_nbsp("match")); - try!(self.print_expr(&**expr)); - try!(space(&mut self.s)); - try!(self.bopen()); - for arm in arms { - try!(self.print_arm(arm)); - } - try!(self.bclose_(expr.span, indent_unit)); - } - hir::ExprClosure(capture_clause, ref decl, ref body) => { - try!(self.print_capture_clause(capture_clause)); - - try!(self.print_fn_block_args(&**decl)); - try!(space(&mut self.s)); - - let default_return = match decl.output { - hir::DefaultReturn(..) => true, - _ => false, - }; - - if !default_return || !body.stmts.is_empty() || body.expr.is_none() { - try!(self.print_block_unclosed(&**body)); - } else { - // we extract the block, so as not to create another set of boxes - match body.expr.as_ref().unwrap().node { - hir::ExprBlock(ref blk) => { - try!(self.print_block_unclosed(&**blk)); - } - _ => { - // this is a bare expression - try!(self.print_expr(body.expr.as_ref().map(|e| &**e).unwrap())); - try!(self.end()); // need to close a box - } - } - } - // a box will be closed by print_expr, but we didn't want an overall - // wrapper so we closed the corresponding opening. so create an - // empty box to satisfy the close. - try!(self.ibox(0)); - } - hir::ExprBlock(ref blk) => { - // containing cbox, will be closed by print-block at } - try!(self.cbox(indent_unit)); - // head-box, will be closed by print-block after { - try!(self.ibox(0)); - try!(self.print_block(&**blk)); - } - hir::ExprAssign(ref lhs, ref rhs) => { - try!(self.print_expr(&**lhs)); - try!(space(&mut self.s)); - try!(self.word_space("=")); - try!(self.print_expr(&**rhs)); - } - hir::ExprAssignOp(op, ref lhs, ref rhs) => { - try!(self.print_expr(&**lhs)); - try!(space(&mut self.s)); - try!(word(&mut self.s, ::util::binop_to_string(op.node))); - try!(self.word_space("=")); - try!(self.print_expr(&**rhs)); - } - hir::ExprField(ref expr, name) => { - try!(self.print_expr(&**expr)); - try!(word(&mut self.s, ".")); - try!(self.print_name(name.node)); - } - hir::ExprTupField(ref expr, id) => { - try!(self.print_expr(&**expr)); - try!(word(&mut self.s, ".")); - try!(self.print_usize(id.node)); - } - hir::ExprIndex(ref expr, ref index) => { - try!(self.print_expr(&**expr)); - try!(word(&mut self.s, "[")); - try!(self.print_expr(&**index)); - try!(word(&mut self.s, "]")); - } - hir::ExprRange(ref start, ref end) => { - if let &Some(ref e) = start { - try!(self.print_expr(&**e)); - } - try!(word(&mut self.s, "..")); - if let &Some(ref e) = end { - try!(self.print_expr(&**e)); - } - } - hir::ExprPath(None, ref path) => { - try!(self.print_path(path, true, 0)) - } - hir::ExprPath(Some(ref qself), ref path) => { - try!(self.print_qpath(path, qself, true)) - } - hir::ExprBreak(opt_ident) => { - try!(word(&mut self.s, "break")); - try!(space(&mut self.s)); - if let Some(ident) = opt_ident { - try!(self.print_name(ident.node.name)); - try!(space(&mut self.s)); - } - } - hir::ExprAgain(opt_ident) => { - try!(word(&mut self.s, "continue")); - try!(space(&mut self.s)); - if let Some(ident) = opt_ident { - try!(self.print_name(ident.node.name)); - try!(space(&mut self.s)) - } - } - hir::ExprRet(ref result) => { - try!(word(&mut self.s, "return")); - match *result { - Some(ref expr) => { - try!(word(&mut self.s, " ")); - try!(self.print_expr(&**expr)); - } - _ => (), - } - } - hir::ExprInlineAsm(ref a) => { - try!(word(&mut self.s, "asm!")); - try!(self.popen()); - try!(self.print_string(&a.asm, a.asm_str_style)); - try!(self.word_space(":")); - - try!(self.commasep(Inconsistent, &a.outputs, |s, out| { - match out.constraint.slice_shift_char() { - Some(('=', operand)) if out.is_rw => { - try!(s.print_string(&format!("+{}", operand), ast::CookedStr)) - } - _ => try!(s.print_string(&out.constraint, ast::CookedStr)), - } - try!(s.popen()); - try!(s.print_expr(&*out.expr)); - try!(s.pclose()); - Ok(()) - })); - try!(space(&mut self.s)); - try!(self.word_space(":")); - - try!(self.commasep(Inconsistent, &a.inputs, |s, &(ref co, ref o)| { - try!(s.print_string(&co, ast::CookedStr)); - try!(s.popen()); - try!(s.print_expr(&**o)); - try!(s.pclose()); - Ok(()) - })); - try!(space(&mut self.s)); - try!(self.word_space(":")); - - try!(self.commasep(Inconsistent, &a.clobbers, |s, co| { - try!(s.print_string(&co, ast::CookedStr)); - Ok(()) - })); - - let mut options = vec![]; - if a.volatile { - options.push("volatile"); - } - if a.alignstack { - options.push("alignstack"); - } - if a.dialect == ast::AsmDialect::Intel { - options.push("intel"); - } - - if !options.is_empty() { - try!(space(&mut self.s)); - try!(self.word_space(":")); - try!(self.commasep(Inconsistent, &*options, |s, &co| { - try!(s.print_string(co, ast::CookedStr)); - Ok(()) - })); - } - - try!(self.pclose()); - } - } - try!(self.ann.post(self, NodeExpr(expr))); - self.end() - } - - pub fn print_local_decl(&mut self, loc: &hir::Local) -> io::Result<()> { - try!(self.print_pat(&*loc.pat)); - if let Some(ref ty) = loc.ty { - try!(self.word_space(":")); - try!(self.print_type(&**ty)); - } - Ok(()) - } - - pub fn print_decl(&mut self, decl: &hir::Decl) -> io::Result<()> { - try!(self.maybe_print_comment(decl.span.lo)); - match decl.node { - hir::DeclLocal(ref loc) => { - try!(self.space_if_not_bol()); - try!(self.ibox(indent_unit)); - try!(self.word_nbsp("let")); - - try!(self.ibox(indent_unit)); - try!(self.print_local_decl(&**loc)); - try!(self.end()); - if let Some(ref init) = loc.init { - try!(self.nbsp()); - try!(self.word_space("=")); - try!(self.print_expr(&**init)); - } - self.end() - } - hir::DeclItem(ref item) => { - self.print_item_id(item) - } - } - } - - pub fn print_usize(&mut self, i: usize) -> io::Result<()> { - word(&mut self.s, &i.to_string()) - } - - pub fn print_name(&mut self, name: ast::Name) -> io::Result<()> { - try!(word(&mut self.s, &name.as_str())); - self.ann.post(self, NodeName(&name)) - } - - pub fn print_for_decl(&mut self, loc: &hir::Local, coll: &hir::Expr) -> io::Result<()> { - try!(self.print_local_decl(loc)); - try!(space(&mut self.s)); - try!(self.word_space("in")); - self.print_expr(coll) - } - - fn print_path(&mut self, - path: &hir::Path, - colons_before_params: bool, - depth: usize) - -> io::Result<()> { - try!(self.maybe_print_comment(path.span.lo)); - - let mut first = !path.global; - for segment in &path.segments[..path.segments.len() - depth] { - if first { - first = false - } else { - try!(word(&mut self.s, "::")) - } - - try!(self.print_name(segment.identifier.name)); - - try!(self.print_path_parameters(&segment.parameters, colons_before_params)); - } - - Ok(()) - } - - fn print_qpath(&mut self, - path: &hir::Path, - qself: &hir::QSelf, - colons_before_params: bool) - -> io::Result<()> { - try!(word(&mut self.s, "<")); - try!(self.print_type(&qself.ty)); - if qself.position > 0 { - try!(space(&mut self.s)); - try!(self.word_space("as")); - let depth = path.segments.len() - qself.position; - try!(self.print_path(&path, false, depth)); - } - try!(word(&mut self.s, ">")); - try!(word(&mut self.s, "::")); - let item_segment = path.segments.last().unwrap(); - try!(self.print_name(item_segment.identifier.name)); - self.print_path_parameters(&item_segment.parameters, colons_before_params) - } - - fn print_path_parameters(&mut self, - parameters: &hir::PathParameters, - colons_before_params: bool) - -> io::Result<()> { - if parameters.is_empty() { - return Ok(()); - } - - if colons_before_params { - try!(word(&mut self.s, "::")) - } - - match *parameters { - hir::AngleBracketedParameters(ref data) => { - try!(word(&mut self.s, "<")); - - let mut comma = false; - for lifetime in &data.lifetimes { - if comma { - try!(self.word_space(",")) - } - try!(self.print_lifetime(lifetime)); - comma = true; - } - - if !data.types.is_empty() { - if comma { - try!(self.word_space(",")) - } - try!(self.commasep(Inconsistent, &data.types, |s, ty| s.print_type(&**ty))); - comma = true; - } - - for binding in data.bindings.iter() { - if comma { - try!(self.word_space(",")) - } - try!(self.print_name(binding.name)); - try!(space(&mut self.s)); - try!(self.word_space("=")); - try!(self.print_type(&*binding.ty)); - comma = true; - } - - try!(word(&mut self.s, ">")) - } - - hir::ParenthesizedParameters(ref data) => { - try!(word(&mut self.s, "(")); - try!(self.commasep(Inconsistent, &data.inputs, |s, ty| s.print_type(&**ty))); - try!(word(&mut self.s, ")")); - - match data.output { - None => {} - Some(ref ty) => { - try!(self.space_if_not_bol()); - try!(self.word_space("->")); - try!(self.print_type(&**ty)); - } - } - } - } - - Ok(()) - } - - pub fn print_pat(&mut self, pat: &hir::Pat) -> io::Result<()> { - try!(self.maybe_print_comment(pat.span.lo)); - try!(self.ann.pre(self, NodePat(pat))); - // Pat isn't normalized, but the beauty of it - // is that it doesn't matter - match pat.node { - hir::PatWild => try!(word(&mut self.s, "_")), - hir::PatIdent(binding_mode, ref path1, ref sub) => { - match binding_mode { - hir::BindByRef(mutbl) => { - try!(self.word_nbsp("ref")); - try!(self.print_mutability(mutbl)); - } - hir::BindByValue(hir::MutImmutable) => {} - hir::BindByValue(hir::MutMutable) => { - try!(self.word_nbsp("mut")); - } - } - try!(self.print_name(path1.node.name)); - match *sub { - Some(ref p) => { - try!(word(&mut self.s, "@")); - try!(self.print_pat(&**p)); - } - None => (), - } - } - hir::PatEnum(ref path, ref args_) => { - try!(self.print_path(path, true, 0)); - match *args_ { - None => try!(word(&mut self.s, "(..)")), - Some(ref args) => { - if !args.is_empty() { - try!(self.popen()); - try!(self.commasep(Inconsistent, &args[..], |s, p| s.print_pat(&**p))); - try!(self.pclose()); - } - } - } - } - hir::PatQPath(ref qself, ref path) => { - try!(self.print_qpath(path, qself, false)); - } - hir::PatStruct(ref path, ref fields, etc) => { - try!(self.print_path(path, true, 0)); - try!(self.nbsp()); - try!(self.word_space("{")); - try!(self.commasep_cmnt(Consistent, - &fields[..], - |s, f| { - try!(s.cbox(indent_unit)); - if !f.node.is_shorthand { - try!(s.print_name(f.node.name)); - try!(s.word_nbsp(":")); - } - try!(s.print_pat(&*f.node.pat)); - s.end() - }, - |f| f.node.pat.span)); - if etc { - if !fields.is_empty() { - try!(self.word_space(",")); - } - try!(word(&mut self.s, "..")); - } - try!(space(&mut self.s)); - try!(word(&mut self.s, "}")); - } - hir::PatTup(ref elts) => { - try!(self.popen()); - try!(self.commasep(Inconsistent, &elts[..], |s, p| s.print_pat(&**p))); - if elts.len() == 1 { - try!(word(&mut self.s, ",")); - } - try!(self.pclose()); - } - hir::PatBox(ref inner) => { - try!(word(&mut self.s, "box ")); - try!(self.print_pat(&**inner)); - } - hir::PatRegion(ref inner, mutbl) => { - try!(word(&mut self.s, "&")); - if mutbl == hir::MutMutable { - try!(word(&mut self.s, "mut ")); - } - try!(self.print_pat(&**inner)); - } - hir::PatLit(ref e) => try!(self.print_expr(&**e)), - hir::PatRange(ref begin, ref end) => { - try!(self.print_expr(&**begin)); - try!(space(&mut self.s)); - try!(word(&mut self.s, "...")); - try!(self.print_expr(&**end)); - } - hir::PatVec(ref before, ref slice, ref after) => { - try!(word(&mut self.s, "[")); - try!(self.commasep(Inconsistent, &before[..], |s, p| s.print_pat(&**p))); - if let Some(ref p) = *slice { - if !before.is_empty() { - try!(self.word_space(",")); - } - if p.node != hir::PatWild { - try!(self.print_pat(&**p)); - } - try!(word(&mut self.s, "..")); - if !after.is_empty() { - try!(self.word_space(",")); - } - } - try!(self.commasep(Inconsistent, &after[..], |s, p| s.print_pat(&**p))); - try!(word(&mut self.s, "]")); - } - } - self.ann.post(self, NodePat(pat)) - } - - fn print_arm(&mut self, arm: &hir::Arm) -> io::Result<()> { - // I have no idea why this check is necessary, but here it - // is :( - if arm.attrs.is_empty() { - try!(space(&mut self.s)); - } - try!(self.cbox(indent_unit)); - try!(self.ibox(0)); - try!(self.print_outer_attributes(&arm.attrs)); - let mut first = true; - for p in &arm.pats { - if first { - first = false; - } else { - try!(space(&mut self.s)); - try!(self.word_space("|")); - } - try!(self.print_pat(&**p)); - } - try!(space(&mut self.s)); - if let Some(ref e) = arm.guard { - try!(self.word_space("if")); - try!(self.print_expr(&**e)); - try!(space(&mut self.s)); - } - try!(self.word_space("=>")); - - match arm.body.node { - hir::ExprBlock(ref blk) => { - // the block will close the pattern's ibox - try!(self.print_block_unclosed_indent(&**blk, indent_unit)); - - // If it is a user-provided unsafe block, print a comma after it - if let hir::UnsafeBlock(hir::UserProvided) = blk.rules { - try!(word(&mut self.s, ",")); - } - } - _ => { - try!(self.end()); // close the ibox for the pattern - try!(self.print_expr(&*arm.body)); - try!(word(&mut self.s, ",")); - } - } - self.end() // close enclosing cbox - } - - // Returns whether it printed anything - fn print_explicit_self(&mut self, - explicit_self: &hir::ExplicitSelf_, - mutbl: hir::Mutability) - -> io::Result { - try!(self.print_mutability(mutbl)); - match *explicit_self { - hir::SelfStatic => { - return Ok(false); - } - hir::SelfValue(_) => { - try!(word(&mut self.s, "self")); - } - hir::SelfRegion(ref lt, m, _) => { - try!(word(&mut self.s, "&")); - try!(self.print_opt_lifetime(lt)); - try!(self.print_mutability(m)); - try!(word(&mut self.s, "self")); - } - hir::SelfExplicit(ref typ, _) => { - try!(word(&mut self.s, "self")); - try!(self.word_space(":")); - try!(self.print_type(&**typ)); - } - } - return Ok(true); - } - - pub fn print_fn(&mut self, - decl: &hir::FnDecl, - unsafety: hir::Unsafety, - constness: hir::Constness, - abi: abi::Abi, - name: Option, - generics: &hir::Generics, - opt_explicit_self: Option<&hir::ExplicitSelf_>, - vis: hir::Visibility) - -> io::Result<()> { - try!(self.print_fn_header_info(unsafety, constness, abi, vis)); - - if let Some(name) = name { - try!(self.nbsp()); - try!(self.print_name(name)); - } - try!(self.print_generics(generics)); - try!(self.print_fn_args_and_ret(decl, opt_explicit_self)); - self.print_where_clause(&generics.where_clause) - } - - pub fn print_fn_args(&mut self, - decl: &hir::FnDecl, - opt_explicit_self: Option<&hir::ExplicitSelf_>) - -> io::Result<()> { - // It is unfortunate to duplicate the commasep logic, but we want the - // self type and the args all in the same box. - try!(self.rbox(0, Inconsistent)); - let mut first = true; - if let Some(explicit_self) = opt_explicit_self { - let m = match explicit_self { - &hir::SelfStatic => hir::MutImmutable, - _ => match decl.inputs[0].pat.node { - hir::PatIdent(hir::BindByValue(m), _, _) => m, - _ => hir::MutImmutable, - }, - }; - first = !try!(self.print_explicit_self(explicit_self, m)); - } - - // HACK(eddyb) ignore the separately printed self argument. - let args = if first { - &decl.inputs[..] - } else { - &decl.inputs[1..] - }; - - for arg in args { - if first { - first = false; - } else { - try!(self.word_space(",")); - } - try!(self.print_arg(arg)); - } - - self.end() - } - - pub fn print_fn_args_and_ret(&mut self, - decl: &hir::FnDecl, - opt_explicit_self: Option<&hir::ExplicitSelf_>) - -> io::Result<()> { - try!(self.popen()); - try!(self.print_fn_args(decl, opt_explicit_self)); - if decl.variadic { - try!(word(&mut self.s, ", ...")); - } - try!(self.pclose()); - - self.print_fn_output(decl) - } - - pub fn print_fn_block_args(&mut self, decl: &hir::FnDecl) -> io::Result<()> { - try!(word(&mut self.s, "|")); - try!(self.print_fn_args(decl, None)); - try!(word(&mut self.s, "|")); - - if let hir::DefaultReturn(..) = decl.output { - return Ok(()); - } - - try!(self.space_if_not_bol()); - try!(self.word_space("->")); - match decl.output { - hir::Return(ref ty) => { - try!(self.print_type(&**ty)); - self.maybe_print_comment(ty.span.lo) - } - hir::DefaultReturn(..) => unreachable!(), - hir::NoReturn(span) => { - try!(self.word_nbsp("!")); - self.maybe_print_comment(span.lo) - } - } - } - - pub fn print_capture_clause(&mut self, capture_clause: hir::CaptureClause) -> io::Result<()> { - match capture_clause { - hir::CaptureByValue => self.word_space("move"), - hir::CaptureByRef => Ok(()), - } - } - - pub fn print_bounds(&mut self, prefix: &str, bounds: &[hir::TyParamBound]) -> io::Result<()> { - if !bounds.is_empty() { - try!(word(&mut self.s, prefix)); - let mut first = true; - for bound in bounds { - try!(self.nbsp()); - if first { - first = false; - } else { - try!(self.word_space("+")); - } - - try!(match *bound { - TraitTyParamBound(ref tref, TraitBoundModifier::None) => { - self.print_poly_trait_ref(tref) - } - TraitTyParamBound(ref tref, TraitBoundModifier::Maybe) => { - try!(word(&mut self.s, "?")); - self.print_poly_trait_ref(tref) - } - RegionTyParamBound(ref lt) => { - self.print_lifetime(lt) - } - }) - } - Ok(()) - } else { - Ok(()) - } - } - - pub fn print_lifetime(&mut self, lifetime: &hir::Lifetime) -> io::Result<()> { - self.print_name(lifetime.name) - } - - pub fn print_lifetime_def(&mut self, lifetime: &hir::LifetimeDef) -> io::Result<()> { - try!(self.print_lifetime(&lifetime.lifetime)); - let mut sep = ":"; - for v in &lifetime.bounds { - try!(word(&mut self.s, sep)); - try!(self.print_lifetime(v)); - sep = "+"; - } - Ok(()) - } - - pub fn print_generics(&mut self, generics: &hir::Generics) -> io::Result<()> { - let total = generics.lifetimes.len() + generics.ty_params.len(); - if total == 0 { - return Ok(()); - } - - try!(word(&mut self.s, "<")); - - let mut ints = Vec::new(); - for i in 0..total { - ints.push(i); - } - - try!(self.commasep(Inconsistent, &ints[..], |s, &idx| { - if idx < generics.lifetimes.len() { - let lifetime = &generics.lifetimes[idx]; - s.print_lifetime_def(lifetime) - } else { - let idx = idx - generics.lifetimes.len(); - let param = &generics.ty_params[idx]; - s.print_ty_param(param) - } - })); - - try!(word(&mut self.s, ">")); - Ok(()) - } - - pub fn print_ty_param(&mut self, param: &hir::TyParam) -> io::Result<()> { - try!(self.print_name(param.name)); - try!(self.print_bounds(":", ¶m.bounds)); - match param.default { - Some(ref default) => { - try!(space(&mut self.s)); - try!(self.word_space("=")); - self.print_type(&**default) - } - _ => Ok(()), - } - } - - pub fn print_where_clause(&mut self, where_clause: &hir::WhereClause) -> io::Result<()> { - if where_clause.predicates.is_empty() { - return Ok(()); - } - - try!(space(&mut self.s)); - try!(self.word_space("where")); - - for (i, predicate) in where_clause.predicates.iter().enumerate() { - if i != 0 { - try!(self.word_space(",")); - } - - match predicate { - &hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate{ref bound_lifetimes, - ref bounded_ty, - ref bounds, - ..}) => { - try!(self.print_formal_lifetime_list(bound_lifetimes)); - try!(self.print_type(&**bounded_ty)); - try!(self.print_bounds(":", bounds)); - } - &hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate{ref lifetime, - ref bounds, - ..}) => { - try!(self.print_lifetime(lifetime)); - try!(word(&mut self.s, ":")); - - for (i, bound) in bounds.iter().enumerate() { - try!(self.print_lifetime(bound)); - - if i != 0 { - try!(word(&mut self.s, ":")); - } - } - } - &hir::WherePredicate::EqPredicate(hir::WhereEqPredicate{ref path, ref ty, ..}) => { - try!(self.print_path(path, false, 0)); - try!(space(&mut self.s)); - try!(self.word_space("=")); - try!(self.print_type(&**ty)); - } - } - } - - Ok(()) - } - - pub fn print_view_path(&mut self, vp: &hir::ViewPath) -> io::Result<()> { - match vp.node { - hir::ViewPathSimple(name, ref path) => { - try!(self.print_path(path, false, 0)); - - if path.segments.last().unwrap().identifier.name != name { - try!(space(&mut self.s)); - try!(self.word_space("as")); - try!(self.print_name(name)); - } - - Ok(()) - } - - hir::ViewPathGlob(ref path) => { - try!(self.print_path(path, false, 0)); - word(&mut self.s, "::*") - } - - hir::ViewPathList(ref path, ref segments) => { - if path.segments.is_empty() { - try!(word(&mut self.s, "{")); - } else { - try!(self.print_path(path, false, 0)); - try!(word(&mut self.s, "::{")); - } - try!(self.commasep(Inconsistent, &segments[..], |s, w| { - match w.node { - hir::PathListIdent { name, .. } => { - s.print_name(name) - } - hir::PathListMod { .. } => { - word(&mut s.s, "self") - } - } - })); - word(&mut self.s, "}") - } - } - } - - pub fn print_mutability(&mut self, mutbl: hir::Mutability) -> io::Result<()> { - match mutbl { - hir::MutMutable => self.word_nbsp("mut"), - hir::MutImmutable => Ok(()), - } - } - - pub fn print_mt(&mut self, mt: &hir::MutTy) -> io::Result<()> { - try!(self.print_mutability(mt.mutbl)); - self.print_type(&*mt.ty) - } - - pub fn print_arg(&mut self, input: &hir::Arg) -> io::Result<()> { - try!(self.ibox(indent_unit)); - match input.ty.node { - hir::TyInfer => try!(self.print_pat(&*input.pat)), - _ => { - match input.pat.node { - hir::PatIdent(_, ref path1, _) if - path1.node.name == - parse::token::special_idents::invalid.name => { - // Do nothing. - } - _ => { - try!(self.print_pat(&*input.pat)); - try!(word(&mut self.s, ":")); - try!(space(&mut self.s)); - } - } - try!(self.print_type(&*input.ty)); - } - } - self.end() - } - - pub fn print_fn_output(&mut self, decl: &hir::FnDecl) -> io::Result<()> { - if let hir::DefaultReturn(..) = decl.output { - return Ok(()); - } - - try!(self.space_if_not_bol()); - try!(self.ibox(indent_unit)); - try!(self.word_space("->")); - match decl.output { - hir::NoReturn(_) => try!(self.word_nbsp("!")), - hir::DefaultReturn(..) => unreachable!(), - hir::Return(ref ty) => try!(self.print_type(&**ty)), - } - try!(self.end()); - - match decl.output { - hir::Return(ref output) => self.maybe_print_comment(output.span.lo), - _ => Ok(()), - } - } - - pub fn print_ty_fn(&mut self, - abi: abi::Abi, - unsafety: hir::Unsafety, - decl: &hir::FnDecl, - name: Option, - generics: &hir::Generics, - opt_explicit_self: Option<&hir::ExplicitSelf_>) - -> io::Result<()> { - try!(self.ibox(indent_unit)); - if !generics.lifetimes.is_empty() || !generics.ty_params.is_empty() { - try!(word(&mut self.s, "for")); - try!(self.print_generics(generics)); - } - let generics = hir::Generics { - lifetimes: hir::HirVec::new(), - ty_params: hir::HirVec::new(), - where_clause: hir::WhereClause { - id: ast::DUMMY_NODE_ID, - predicates: hir::HirVec::new(), - }, - }; - try!(self.print_fn(decl, - unsafety, - hir::Constness::NotConst, - abi, - name, - &generics, - opt_explicit_self, - hir::Inherited)); - self.end() - } - - pub fn maybe_print_trailing_comment(&mut self, - span: codemap::Span, - next_pos: Option) - -> io::Result<()> { - let cm = match self.cm { - Some(cm) => cm, - _ => return Ok(()), - }; - match self.next_comment() { - Some(ref cmnt) => { - if (*cmnt).style != comments::Trailing { - return Ok(()); - } - let span_line = cm.lookup_char_pos(span.hi); - let comment_line = cm.lookup_char_pos((*cmnt).pos); - let mut next = (*cmnt).pos + BytePos(1); - match next_pos { - None => (), - Some(p) => next = p, - } - if span.hi < (*cmnt).pos && (*cmnt).pos < next && - span_line.line == comment_line.line { - try!(self.print_comment(cmnt)); - self.cur_cmnt_and_lit.cur_cmnt += 1; - } - } - _ => (), - } - Ok(()) - } - - pub fn print_remaining_comments(&mut self) -> io::Result<()> { - // If there aren't any remaining comments, then we need to manually - // make sure there is a line break at the end. - if self.next_comment().is_none() { - try!(hardbreak(&mut self.s)); - } - loop { - match self.next_comment() { - Some(ref cmnt) => { - try!(self.print_comment(cmnt)); - self.cur_cmnt_and_lit.cur_cmnt += 1; - } - _ => break, - } - } - Ok(()) - } - - pub fn print_opt_abi_and_extern_if_nondefault(&mut self, - opt_abi: Option) - -> io::Result<()> { - match opt_abi { - Some(abi::Rust) => Ok(()), - Some(abi) => { - try!(self.word_nbsp("extern")); - self.word_nbsp(&abi.to_string()) - } - None => Ok(()), - } - } - - pub fn print_extern_opt_abi(&mut self, opt_abi: Option) -> io::Result<()> { - match opt_abi { - Some(abi) => { - try!(self.word_nbsp("extern")); - self.word_nbsp(&abi.to_string()) - } - None => Ok(()), - } - } - - pub fn print_fn_header_info(&mut self, - unsafety: hir::Unsafety, - constness: hir::Constness, - abi: abi::Abi, - vis: hir::Visibility) - -> io::Result<()> { - try!(word(&mut self.s, &visibility_qualified(vis, ""))); - try!(self.print_unsafety(unsafety)); - - match constness { - hir::Constness::NotConst => {} - hir::Constness::Const => try!(self.word_nbsp("const")), - } - - if abi != abi::Rust { - try!(self.word_nbsp("extern")); - try!(self.word_nbsp(&abi.to_string())); - } - - word(&mut self.s, "fn") - } - - pub fn print_unsafety(&mut self, s: hir::Unsafety) -> io::Result<()> { - match s { - hir::Unsafety::Normal => Ok(()), - hir::Unsafety::Unsafe => self.word_nbsp("unsafe"), - } - } -} - -// Dup'ed from parse::classify, but adapted for the HIR. -/// Does this expression require a semicolon to be treated -/// as a statement? The negation of this: 'can this expression -/// be used as a statement without a semicolon' -- is used -/// as an early-bail-out in the parser so that, for instance, -/// if true {...} else {...} -/// |x| 5 -/// isn't parsed as (if true {...} else {...} | x) | 5 -fn expr_requires_semi_to_be_stmt(e: &hir::Expr) -> bool { - match e.node { - hir::ExprIf(..) | - hir::ExprMatch(..) | - hir::ExprBlock(_) | - hir::ExprWhile(..) | - hir::ExprLoop(..) => false, - _ => true, - } -} - -/// this statement requires a semicolon after it. -/// note that in one case (stmt_semi), we've already -/// seen the semicolon, and thus don't need another. -fn stmt_ends_with_semi(stmt: &hir::Stmt_) -> bool { - match *stmt { - hir::StmtDecl(ref d, _) => { - match d.node { - hir::DeclLocal(_) => true, - hir::DeclItem(_) => false, - } - } - hir::StmtExpr(ref e, _) => { - expr_requires_semi_to_be_stmt(&**e) - } - hir::StmtSemi(..) => { - false - } - } -} diff --git a/src/librustc_front/util.rs b/src/librustc_front/util.rs deleted file mode 100644 index 57ffefd3be435..0000000000000 --- a/src/librustc_front/util.rs +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hir; -use hir::*; -use intravisit::{self, Visitor, FnKind}; -use syntax::ast_util; -use syntax::ast::{Name, NodeId, DUMMY_NODE_ID}; -use syntax::codemap::Span; -use syntax::ptr::P; - -pub fn walk_pat(pat: &Pat, mut it: F) -> bool - where F: FnMut(&Pat) -> bool -{ - // FIXME(#19596) this is a workaround, but there should be a better way - fn walk_pat_(pat: &Pat, it: &mut G) -> bool - where G: FnMut(&Pat) -> bool - { - if !(*it)(pat) { - return false; - } - - match pat.node { - PatIdent(_, _, Some(ref p)) => walk_pat_(&**p, it), - PatStruct(_, ref fields, _) => { - fields.iter().all(|field| walk_pat_(&*field.node.pat, it)) - } - PatEnum(_, Some(ref s)) | PatTup(ref s) => { - s.iter().all(|p| walk_pat_(&**p, it)) - } - PatBox(ref s) | PatRegion(ref s, _) => { - walk_pat_(&**s, it) - } - PatVec(ref before, ref slice, ref after) => { - before.iter().all(|p| walk_pat_(&**p, it)) && - slice.iter().all(|p| walk_pat_(&**p, it)) && - after.iter().all(|p| walk_pat_(&**p, it)) - } - PatWild | - PatLit(_) | - PatRange(_, _) | - PatIdent(_, _, _) | - PatEnum(_, _) | - PatQPath(_, _) => { - true - } - } - } - - walk_pat_(pat, &mut it) -} - -pub fn binop_to_string(op: BinOp_) -> &'static str { - match op { - BiAdd => "+", - BiSub => "-", - BiMul => "*", - BiDiv => "/", - BiRem => "%", - BiAnd => "&&", - BiOr => "||", - BiBitXor => "^", - BiBitAnd => "&", - BiBitOr => "|", - BiShl => "<<", - BiShr => ">>", - BiEq => "==", - BiLt => "<", - BiLe => "<=", - BiNe => "!=", - BiGe => ">=", - BiGt => ">", - } -} - -pub fn stmt_id(s: &Stmt) -> NodeId { - match s.node { - StmtDecl(_, id) => id, - StmtExpr(_, id) => id, - StmtSemi(_, id) => id, - } -} - -pub fn lazy_binop(b: BinOp_) -> bool { - match b { - BiAnd => true, - BiOr => true, - _ => false, - } -} - -pub fn is_shift_binop(b: BinOp_) -> bool { - match b { - BiShl => true, - BiShr => true, - _ => false, - } -} - -pub fn is_comparison_binop(b: BinOp_) -> bool { - match b { - BiEq | BiLt | BiLe | BiNe | BiGt | BiGe => true, - BiAnd | - BiOr | - BiAdd | - BiSub | - BiMul | - BiDiv | - BiRem | - BiBitXor | - BiBitAnd | - BiBitOr | - BiShl | - BiShr => false, - } -} - -/// Returns `true` if the binary operator takes its arguments by value -pub fn is_by_value_binop(b: BinOp_) -> bool { - !is_comparison_binop(b) -} - -/// Returns `true` if the unary operator takes its argument by value -pub fn is_by_value_unop(u: UnOp) -> bool { - match u { - UnNeg | UnNot => true, - _ => false, - } -} - -pub fn unop_to_string(op: UnOp) -> &'static str { - match op { - UnDeref => "*", - UnNot => "!", - UnNeg => "-", - } -} - -pub struct IdVisitor<'a, O: 'a> { - operation: &'a mut O, - - // In general, the id visitor visits the contents of an item, but - // not including nested trait/impl items, nor other nested items. - // The base visitor itself always skips nested items, but not - // trait/impl items. This means in particular that if you start by - // visiting a trait or an impl, you should not visit the - // trait/impl items respectively. This is handled by setting - // `skip_members` to true when `visit_item` is on the stack. This - // way, if the user begins by calling `visit_trait_item`, we will - // visit the trait item, but if they begin with `visit_item`, we - // won't visit the (nested) trait items. - skip_members: bool, -} - -impl<'a, O: ast_util::IdVisitingOperation> IdVisitor<'a, O> { - pub fn new(operation: &'a mut O) -> IdVisitor<'a, O> { - IdVisitor { operation: operation, skip_members: false } - } - - fn visit_generics_helper(&mut self, generics: &Generics) { - for type_parameter in generics.ty_params.iter() { - self.operation.visit_id(type_parameter.id) - } - for lifetime in &generics.lifetimes { - self.operation.visit_id(lifetime.lifetime.id) - } - } -} - -impl<'a, 'v, O: ast_util::IdVisitingOperation> Visitor<'v> for IdVisitor<'a, O> { - fn visit_mod(&mut self, module: &Mod, _: Span, node_id: NodeId) { - self.operation.visit_id(node_id); - intravisit::walk_mod(self, module) - } - - fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) { - self.operation.visit_id(foreign_item.id); - intravisit::walk_foreign_item(self, foreign_item) - } - - fn visit_item(&mut self, item: &Item) { - assert!(!self.skip_members); - self.skip_members = true; - - self.operation.visit_id(item.id); - match item.node { - ItemUse(ref view_path) => { - match view_path.node { - ViewPathSimple(_, _) | - ViewPathGlob(_) => {} - ViewPathList(_, ref paths) => { - for path in paths { - self.operation.visit_id(path.node.id()) - } - } - } - } - _ => {} - } - intravisit::walk_item(self, item); - - self.skip_members = false; - } - - fn visit_local(&mut self, local: &Local) { - self.operation.visit_id(local.id); - intravisit::walk_local(self, local) - } - - fn visit_block(&mut self, block: &Block) { - self.operation.visit_id(block.id); - intravisit::walk_block(self, block) - } - - fn visit_stmt(&mut self, statement: &Stmt) { - self.operation.visit_id(stmt_id(statement)); - intravisit::walk_stmt(self, statement) - } - - fn visit_pat(&mut self, pattern: &Pat) { - self.operation.visit_id(pattern.id); - intravisit::walk_pat(self, pattern) - } - - fn visit_expr(&mut self, expression: &Expr) { - self.operation.visit_id(expression.id); - intravisit::walk_expr(self, expression) - } - - fn visit_ty(&mut self, typ: &Ty) { - self.operation.visit_id(typ.id); - intravisit::walk_ty(self, typ) - } - - fn visit_generics(&mut self, generics: &Generics) { - self.visit_generics_helper(generics); - intravisit::walk_generics(self, generics) - } - - fn visit_fn(&mut self, - function_kind: FnKind<'v>, - function_declaration: &'v FnDecl, - block: &'v Block, - span: Span, - node_id: NodeId) { - self.operation.visit_id(node_id); - - match function_kind { - FnKind::ItemFn(_, generics, _, _, _, _) => { - self.visit_generics_helper(generics) - } - FnKind::Method(_, sig, _) => { - self.visit_generics_helper(&sig.generics) - } - FnKind::Closure => {} - } - - for argument in &function_declaration.inputs { - self.operation.visit_id(argument.id) - } - - intravisit::walk_fn(self, function_kind, function_declaration, block, span); - } - - fn visit_struct_field(&mut self, struct_field: &StructField) { - self.operation.visit_id(struct_field.node.id); - intravisit::walk_struct_field(self, struct_field) - } - - fn visit_variant_data(&mut self, - struct_def: &VariantData, - _: Name, - _: &hir::Generics, - _: NodeId, - _: Span) { - self.operation.visit_id(struct_def.id()); - intravisit::walk_struct_def(self, struct_def); - } - - fn visit_trait_item(&mut self, ti: &hir::TraitItem) { - if !self.skip_members { - self.operation.visit_id(ti.id); - intravisit::walk_trait_item(self, ti); - } - } - - fn visit_impl_item(&mut self, ii: &hir::ImplItem) { - if !self.skip_members { - self.operation.visit_id(ii.id); - intravisit::walk_impl_item(self, ii); - } - } - - fn visit_lifetime(&mut self, lifetime: &Lifetime) { - self.operation.visit_id(lifetime.id); - } - - fn visit_lifetime_def(&mut self, def: &LifetimeDef) { - self.visit_lifetime(&def.lifetime); - } - - fn visit_trait_ref(&mut self, trait_ref: &TraitRef) { - self.operation.visit_id(trait_ref.ref_id); - intravisit::walk_trait_ref(self, trait_ref); - } -} - -/// Computes the id range for a single fn body, ignoring nested items. -pub fn compute_id_range_for_fn_body(fk: FnKind, - decl: &FnDecl, - body: &Block, - sp: Span, - id: NodeId) - -> ast_util::IdRange { - let mut visitor = ast_util::IdRangeComputingVisitor { result: ast_util::IdRange::max() }; - let mut id_visitor = IdVisitor::new(&mut visitor); - id_visitor.visit_fn(fk, decl, body, sp, id); - id_visitor.operation.result -} - -pub fn is_path(e: P) -> bool { - match e.node { - ExprPath(..) => true, - _ => false, - } -} - -pub fn empty_generics() -> Generics { - Generics { - lifetimes: HirVec::new(), - ty_params: HirVec::new(), - where_clause: WhereClause { - id: DUMMY_NODE_ID, - predicates: HirVec::new(), - }, - } -} - -// convert a span and an identifier to the corresponding -// 1-segment path -pub fn ident_to_path(s: Span, ident: Ident) -> Path { - hir::Path { - span: s, - global: false, - segments: hir_vec![hir::PathSegment { - identifier: ident, - parameters: hir::AngleBracketedParameters(hir::AngleBracketedParameterData { - lifetimes: HirVec::new(), - types: HirVec::new(), - bindings: HirVec::new(), - }), - }], - } -} diff --git a/src/librustc_incremental/Cargo.toml b/src/librustc_incremental/Cargo.toml new file mode 100644 index 0000000000000..e3ee752754504 --- /dev/null +++ b/src/librustc_incremental/Cargo.toml @@ -0,0 +1,18 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_incremental" +version = "0.0.0" + +[lib] +name = "rustc_incremental" +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +graphviz = { path = "../libgraphviz" } +rustc = { path = "../librustc" } +rustc_data_structures = { path = "../librustc_data_structures" } +serialize = { path = "../libserialize" } +log = { path = "../liblog" } +syntax = { path = "../libsyntax" } +syntax_pos = { path = "../libsyntax_pos" } diff --git a/src/librustc_incremental/assert_dep_graph.rs b/src/librustc_incremental/assert_dep_graph.rs new file mode 100644 index 0000000000000..87e6b2befdc32 --- /dev/null +++ b/src/librustc_incremental/assert_dep_graph.rs @@ -0,0 +1,423 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This pass is only used for the UNIT TESTS and DEBUGGING NEEDS +//! around dependency graph construction. It serves two purposes; it +//! will dump graphs in graphviz form to disk, and it searches for +//! `#[rustc_if_this_changed]` and `#[rustc_then_this_would_need]` +//! annotations. These annotations can be used to test whether paths +//! exist in the graph. These checks run after trans, so they view the +//! the final state of the dependency graph. Note that there are +//! similar assertions found in `persist::dirty_clean` which check the +//! **initial** state of the dependency graph, just after it has been +//! loaded from disk. +//! +//! In this code, we report errors on each `rustc_if_this_changed` +//! annotation. If a path exists in all cases, then we would report +//! "all path(s) exist". Otherwise, we report: "no path to `foo`" for +//! each case where no path exists. `compile-fail` tests can then be +//! used to check when paths exist or do not. +//! +//! The full form of the `rustc_if_this_changed` annotation is +//! `#[rustc_if_this_changed("foo")]`, which will report a +//! source node of `foo(def_id)`. The `"foo"` is optional and +//! defaults to `"Hir"` if omitted. +//! +//! Example: +//! +//! ``` +//! #[rustc_if_this_changed(Hir)] +//! fn foo() { } +//! +//! #[rustc_then_this_would_need(trans)] //~ ERROR no path from `foo` +//! fn bar() { } +//! +//! #[rustc_then_this_would_need(trans)] //~ ERROR OK +//! fn baz() { foo(); } +//! ``` + +use graphviz as dot; +use rustc::dep_graph::{DepGraphQuery, DepNode}; +use rustc::dep_graph::debug::{DepNodeFilter, EdgeFilter}; +use rustc::hir::def_id::DefId; +use rustc::ty::TyCtxt; +use rustc_data_structures::fx::FxHashSet; +use rustc_data_structures::graph::{Direction, INCOMING, OUTGOING, NodeIndex}; +use rustc::hir; +use rustc::hir::itemlikevisit::ItemLikeVisitor; +use graphviz::IntoCow; +use std::env; +use std::fs::File; +use std::io::Write; +use syntax::ast; +use syntax_pos::Span; +use {ATTR_IF_THIS_CHANGED, ATTR_THEN_THIS_WOULD_NEED}; + +pub fn assert_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { + let _ignore = tcx.dep_graph.in_ignore(); + + if tcx.sess.opts.debugging_opts.dump_dep_graph { + dump_graph(tcx); + } + + // if the `rustc_attrs` feature is not enabled, then the + // attributes we are interested in cannot be present anyway, so + // skip the walk. + if !tcx.sess.features.borrow().rustc_attrs { + return; + } + + // Find annotations supplied by user (if any). + let (if_this_changed, then_this_would_need) = { + let mut visitor = IfThisChanged { tcx: tcx, + if_this_changed: vec![], + then_this_would_need: vec![] }; + visitor.process_attrs(ast::CRATE_NODE_ID, &tcx.map.krate().attrs); + tcx.map.krate().visit_all_item_likes(&mut visitor); + (visitor.if_this_changed, visitor.then_this_would_need) + }; + + if !if_this_changed.is_empty() || !then_this_would_need.is_empty() { + assert!(tcx.sess.opts.debugging_opts.query_dep_graph, + "cannot use the `#[{}]` or `#[{}]` annotations \ + without supplying `-Z query-dep-graph`", + ATTR_IF_THIS_CHANGED, ATTR_THEN_THIS_WOULD_NEED); + } + + // Check paths. + check_paths(tcx, &if_this_changed, &then_this_would_need); +} + +type Sources = Vec<(Span, DefId, DepNode)>; +type Targets = Vec<(Span, ast::Name, ast::NodeId, DepNode)>; + +struct IfThisChanged<'a, 'tcx:'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + if_this_changed: Sources, + then_this_would_need: Targets, +} + +impl<'a, 'tcx> IfThisChanged<'a, 'tcx> { + fn argument(&self, attr: &ast::Attribute) -> Option { + let mut value = None; + for list_item in attr.meta_item_list().unwrap_or_default() { + match list_item.word() { + Some(word) if value.is_none() => + value = Some(word.name().clone()), + _ => + // FIXME better-encapsulate meta_item (don't directly access `node`) + span_bug!(list_item.span(), "unexpected meta-item {:?}", list_item.node), + } + } + value + } + + fn process_attrs(&mut self, node_id: ast::NodeId, attrs: &[ast::Attribute]) { + let def_id = self.tcx.map.local_def_id(node_id); + for attr in attrs { + if attr.check_name(ATTR_IF_THIS_CHANGED) { + let dep_node_interned = self.argument(attr); + let dep_node = match dep_node_interned { + None => DepNode::Hir(def_id), + Some(n) => { + match DepNode::from_label_string(&n.as_str(), def_id) { + Ok(n) => n, + Err(()) => { + self.tcx.sess.span_fatal( + attr.span, + &format!("unrecognized DepNode variant {:?}", n)); + } + } + } + }; + self.if_this_changed.push((attr.span, def_id, dep_node)); + } else if attr.check_name(ATTR_THEN_THIS_WOULD_NEED) { + let dep_node_interned = self.argument(attr); + let dep_node = match dep_node_interned { + Some(n) => { + match DepNode::from_label_string(&n.as_str(), def_id) { + Ok(n) => n, + Err(()) => { + self.tcx.sess.span_fatal( + attr.span, + &format!("unrecognized DepNode variant {:?}", n)); + } + } + } + None => { + self.tcx.sess.span_fatal( + attr.span, + &format!("missing DepNode variant")); + } + }; + self.then_this_would_need.push((attr.span, + dep_node_interned.unwrap(), + node_id, + dep_node)); + } + } + } +} + +impl<'a, 'tcx> ItemLikeVisitor<'tcx> for IfThisChanged<'a, 'tcx> { + fn visit_item(&mut self, item: &'tcx hir::Item) { + self.process_attrs(item.id, &item.attrs); + } + + fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) { + self.process_attrs(impl_item.id, &impl_item.attrs); + } +} + +fn check_paths<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + if_this_changed: &Sources, + then_this_would_need: &Targets) +{ + // Return early here so as not to construct the query, which is not cheap. + if if_this_changed.is_empty() { + for &(target_span, _, _, _) in then_this_would_need { + tcx.sess.span_err( + target_span, + &format!("no #[rustc_if_this_changed] annotation detected")); + + } + return; + } + let query = tcx.dep_graph.query(); + for &(_, source_def_id, ref source_dep_node) in if_this_changed { + let dependents = query.transitive_successors(source_dep_node); + for &(target_span, ref target_pass, _, ref target_dep_node) in then_this_would_need { + if !dependents.contains(&target_dep_node) { + tcx.sess.span_err( + target_span, + &format!("no path from `{}` to `{}`", + tcx.item_path_str(source_def_id), + target_pass)); + } else { + tcx.sess.span_err( + target_span, + &format!("OK")); + } + } + } +} + +fn dump_graph(tcx: TyCtxt) { + let path: String = env::var("RUST_DEP_GRAPH").unwrap_or_else(|_| format!("dep_graph")); + let query = tcx.dep_graph.query(); + + let nodes = match env::var("RUST_DEP_GRAPH_FILTER") { + Ok(string) => { + // Expect one of: "-> target", "source -> target", or "source ->". + let edge_filter = EdgeFilter::new(&string).unwrap_or_else(|e| { + bug!("invalid filter: {}", e) + }); + let sources = node_set(&query, &edge_filter.source); + let targets = node_set(&query, &edge_filter.target); + filter_nodes(&query, &sources, &targets) + } + Err(_) => { + query.nodes() + .into_iter() + .collect() + } + }; + let edges = filter_edges(&query, &nodes); + + { // dump a .txt file with just the edges: + let txt_path = format!("{}.txt", path); + let mut file = File::create(&txt_path).unwrap(); + for &(ref source, ref target) in &edges { + write!(file, "{:?} -> {:?}\n", source, target).unwrap(); + } + } + + { // dump a .dot file in graphviz format: + let dot_path = format!("{}.dot", path); + let mut v = Vec::new(); + dot::render(&GraphvizDepGraph(nodes, edges), &mut v).unwrap(); + File::create(&dot_path).and_then(|mut f| f.write_all(&v)).unwrap(); + } +} + +pub struct GraphvizDepGraph<'q>(FxHashSet<&'q DepNode>, + Vec<(&'q DepNode, &'q DepNode)>); + +impl<'a, 'tcx, 'q> dot::GraphWalk<'a> for GraphvizDepGraph<'q> { + type Node = &'q DepNode; + type Edge = (&'q DepNode, &'q DepNode); + fn nodes(&self) -> dot::Nodes<&'q DepNode> { + let nodes: Vec<_> = self.0.iter().cloned().collect(); + nodes.into_cow() + } + fn edges(&self) -> dot::Edges<(&'q DepNode, &'q DepNode)> { + self.1[..].into_cow() + } + fn source(&self, edge: &(&'q DepNode, &'q DepNode)) -> &'q DepNode { + edge.0 + } + fn target(&self, edge: &(&'q DepNode, &'q DepNode)) -> &'q DepNode { + edge.1 + } +} + +impl<'a, 'tcx, 'q> dot::Labeller<'a> for GraphvizDepGraph<'q> { + type Node = &'q DepNode; + type Edge = (&'q DepNode, &'q DepNode); + fn graph_id(&self) -> dot::Id { + dot::Id::new("DependencyGraph").unwrap() + } + fn node_id(&self, n: &&'q DepNode) -> dot::Id { + let s: String = + format!("{:?}", n).chars() + .map(|c| if c == '_' || c.is_alphanumeric() { c } else { '_' }) + .collect(); + debug!("n={:?} s={:?}", n, s); + dot::Id::new(s).unwrap() + } + fn node_label(&self, n: &&'q DepNode) -> dot::LabelText { + dot::LabelText::label(format!("{:?}", n)) + } +} + +// Given an optional filter like `"x,y,z"`, returns either `None` (no +// filter) or the set of nodes whose labels contain all of those +// substrings. +fn node_set<'q>(query: &'q DepGraphQuery, filter: &DepNodeFilter) + -> Option>> +{ + debug!("node_set(filter={:?})", filter); + + if filter.accepts_all() { + return None; + } + + Some(query.nodes().into_iter().filter(|n| filter.test(n)).collect()) +} + +fn filter_nodes<'q>(query: &'q DepGraphQuery, + sources: &Option>>, + targets: &Option>>) + -> FxHashSet<&'q DepNode> +{ + if let &Some(ref sources) = sources { + if let &Some(ref targets) = targets { + walk_between(query, sources, targets) + } else { + walk_nodes(query, sources, OUTGOING) + } + } else if let &Some(ref targets) = targets { + walk_nodes(query, targets, INCOMING) + } else { + query.nodes().into_iter().collect() + } +} + +fn walk_nodes<'q>(query: &'q DepGraphQuery, + starts: &FxHashSet<&'q DepNode>, + direction: Direction) + -> FxHashSet<&'q DepNode> +{ + let mut set = FxHashSet(); + for &start in starts { + debug!("walk_nodes: start={:?} outgoing?={:?}", start, direction == OUTGOING); + if set.insert(start) { + let mut stack = vec![query.indices[start]]; + while let Some(index) = stack.pop() { + for (_, edge) in query.graph.adjacent_edges(index, direction) { + let neighbor_index = edge.source_or_target(direction); + let neighbor = query.graph.node_data(neighbor_index); + if set.insert(neighbor) { + stack.push(neighbor_index); + } + } + } + } + } + set +} + +fn walk_between<'q>(query: &'q DepGraphQuery, + sources: &FxHashSet<&'q DepNode>, + targets: &FxHashSet<&'q DepNode>) + -> FxHashSet<&'q DepNode> +{ + // This is a bit tricky. We want to include a node only if it is: + // (a) reachable from a source and (b) will reach a target. And we + // have to be careful about cycles etc. Luckily efficiency is not + // a big concern! + + #[derive(Copy, Clone, PartialEq)] + enum State { Undecided, Deciding, Included, Excluded } + + let mut node_states = vec![State::Undecided; query.graph.len_nodes()]; + + for &target in targets { + node_states[query.indices[target].0] = State::Included; + } + + for source in sources.iter().map(|&n| query.indices[n]) { + recurse(query, &mut node_states, source); + } + + return query.nodes() + .into_iter() + .filter(|&n| { + let index = query.indices[n]; + node_states[index.0] == State::Included + }) + .collect(); + + fn recurse(query: &DepGraphQuery, + node_states: &mut [State], + node: NodeIndex) + -> bool + { + match node_states[node.0] { + // known to reach a target + State::Included => return true, + + // known not to reach a target + State::Excluded => return false, + + // backedge, not yet known, say false + State::Deciding => return false, + + State::Undecided => { } + } + + node_states[node.0] = State::Deciding; + + for neighbor_index in query.graph.successor_nodes(node) { + if recurse(query, node_states, neighbor_index) { + node_states[node.0] = State::Included; + } + } + + // if we didn't find a path to target, then set to excluded + if node_states[node.0] == State::Deciding { + node_states[node.0] = State::Excluded; + false + } else { + assert!(node_states[node.0] == State::Included); + true + } + } +} + +fn filter_edges<'q>(query: &'q DepGraphQuery, + nodes: &FxHashSet<&'q DepNode>) + -> Vec<(&'q DepNode, &'q DepNode)> +{ + query.edges() + .into_iter() + .filter(|&(source, target)| nodes.contains(source) && nodes.contains(target)) + .collect() +} diff --git a/src/librustc_incremental/calculate_svh/caching_codemap_view.rs b/src/librustc_incremental/calculate_svh/caching_codemap_view.rs new file mode 100644 index 0000000000000..ad9c48420e217 --- /dev/null +++ b/src/librustc_incremental/calculate_svh/caching_codemap_view.rs @@ -0,0 +1,115 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::ty::TyCtxt; +use std::rc::Rc; +use syntax::codemap::CodeMap; +use syntax_pos::{BytePos, FileMap}; + +#[derive(Clone)] +struct CacheEntry { + time_stamp: usize, + line_number: usize, + line_start: BytePos, + line_end: BytePos, + file: Rc, +} + +pub struct CachingCodemapView<'tcx> { + codemap: &'tcx CodeMap, + line_cache: [CacheEntry; 3], + time_stamp: usize, +} + +impl<'tcx> CachingCodemapView<'tcx> { + pub fn new<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> CachingCodemapView<'tcx> { + let codemap = tcx.sess.codemap(); + let first_file = codemap.files.borrow()[0].clone(); + let entry = CacheEntry { + time_stamp: 0, + line_number: 0, + line_start: BytePos(0), + line_end: BytePos(0), + file: first_file, + }; + + CachingCodemapView { + codemap: codemap, + line_cache: [entry.clone(), entry.clone(), entry.clone()], + time_stamp: 0, + } + } + + pub fn codemap(&self) -> &'tcx CodeMap { + self.codemap + } + + pub fn byte_pos_to_line_and_col(&mut self, + pos: BytePos) + -> Option<(Rc, usize, BytePos)> { + self.time_stamp += 1; + + // Check if the position is in one of the cached lines + for cache_entry in self.line_cache.iter_mut() { + if pos >= cache_entry.line_start && pos < cache_entry.line_end { + cache_entry.time_stamp = self.time_stamp; + return Some((cache_entry.file.clone(), + cache_entry.line_number, + pos - cache_entry.line_start)); + } + } + + // No cache hit ... + let mut oldest = 0; + for index in 1 .. self.line_cache.len() { + if self.line_cache[index].time_stamp < self.line_cache[oldest].time_stamp { + oldest = index; + } + } + + let cache_entry = &mut self.line_cache[oldest]; + + // If the entry doesn't point to the correct file, fix it up + if pos < cache_entry.file.start_pos || pos >= cache_entry.file.end_pos { + let file_valid; + let files = self.codemap.files.borrow(); + + if files.len() > 0 { + let file_index = self.codemap.lookup_filemap_idx(pos); + let file = files[file_index].clone(); + + if pos >= file.start_pos && pos < file.end_pos { + cache_entry.file = file; + file_valid = true; + } else { + file_valid = false; + } + } else { + file_valid = false; + } + + if !file_valid { + return None; + } + } + + let line_index = cache_entry.file.lookup_line(pos).unwrap(); + let line_bounds = cache_entry.file.line_bounds(line_index); + + cache_entry.line_number = line_index + 1; + cache_entry.line_start = line_bounds.0; + cache_entry.line_end = line_bounds.1; + cache_entry.time_stamp = self.time_stamp; + + return Some((cache_entry.file.clone(), + cache_entry.line_number, + pos - cache_entry.line_start)); + } +} diff --git a/src/librustc_incremental/calculate_svh/def_path_hash.rs b/src/librustc_incremental/calculate_svh/def_path_hash.rs new file mode 100644 index 0000000000000..8aa134ba3bfd0 --- /dev/null +++ b/src/librustc_incremental/calculate_svh/def_path_hash.rs @@ -0,0 +1,36 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::hir::def_id::DefId; +use rustc::ty::TyCtxt; +use rustc::util::nodemap::DefIdMap; + +pub struct DefPathHashes<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + data: DefIdMap, +} + +impl<'a, 'tcx> DefPathHashes<'a, 'tcx> { + pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self { + DefPathHashes { + tcx: tcx, + data: DefIdMap() + } + } + + pub fn hash(&mut self, def_id: DefId) -> u64 { + let tcx = self.tcx; + *self.data.entry(def_id) + .or_insert_with(|| { + let def_path = tcx.def_path(def_id); + def_path.deterministic_hash(tcx) + }) + } +} diff --git a/src/librustc_incremental/calculate_svh/hasher.rs b/src/librustc_incremental/calculate_svh/hasher.rs new file mode 100644 index 0000000000000..d7d9c231a91f4 --- /dev/null +++ b/src/librustc_incremental/calculate_svh/hasher.rs @@ -0,0 +1,88 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::mem; +use std::hash::Hasher; +use rustc_data_structures::blake2b::Blake2bHasher; +use rustc::ty::util::ArchIndependentHasher; +use ich::Fingerprint; +use rustc_serialize::leb128::write_unsigned_leb128; + +#[derive(Debug)] +pub struct IchHasher { + state: ArchIndependentHasher, + leb128_helper: Vec, + bytes_hashed: u64, +} + +impl IchHasher { + pub fn new() -> IchHasher { + let hash_size = mem::size_of::(); + IchHasher { + state: ArchIndependentHasher::new(Blake2bHasher::new(hash_size, &[])), + leb128_helper: vec![], + bytes_hashed: 0 + } + } + + pub fn bytes_hashed(&self) -> u64 { + self.bytes_hashed + } + + pub fn finish(self) -> Fingerprint { + let mut fingerprint = Fingerprint::zero(); + fingerprint.0.copy_from_slice(self.state.into_inner().finalize()); + fingerprint + } + + #[inline] + fn write_uleb128(&mut self, value: u64) { + let len = write_unsigned_leb128(&mut self.leb128_helper, 0, value); + self.state.write(&self.leb128_helper[0..len]); + self.bytes_hashed += len as u64; + } +} + +// For the non-u8 integer cases we leb128 encode them first. Because small +// integers dominate, this significantly and cheaply reduces the number of +// bytes hashed, which is good because blake2b is expensive. +impl Hasher for IchHasher { + fn finish(&self) -> u64 { + bug!("Use other finish() implementation to get the full 128-bit hash."); + } + + #[inline] + fn write(&mut self, bytes: &[u8]) { + self.state.write(bytes); + self.bytes_hashed += bytes.len() as u64; + } + + // There is no need to leb128-encode u8 values. + + #[inline] + fn write_u16(&mut self, i: u16) { + self.write_uleb128(i as u64); + } + + #[inline] + fn write_u32(&mut self, i: u32) { + self.write_uleb128(i as u64); + } + + #[inline] + fn write_u64(&mut self, i: u64) { + self.write_uleb128(i); + } + + #[inline] + fn write_usize(&mut self, i: usize) { + self.write_uleb128(i as u64); + } +} diff --git a/src/librustc_incremental/calculate_svh/mod.rs b/src/librustc_incremental/calculate_svh/mod.rs new file mode 100644 index 0000000000000..4595a940f100d --- /dev/null +++ b/src/librustc_incremental/calculate_svh/mod.rs @@ -0,0 +1,246 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Calculation of the (misnamed) "strict version hash" for crates and +//! items. This hash is used to tell when the HIR changed in such a +//! way that results from previous compilations may no longer be +//! applicable and hence must be recomputed. It should probably be +//! renamed to the ICH (incremental compilation hash). +//! +//! The hashes for all items are computed once at the beginning of +//! compilation and stored into a map. In addition, a hash is computed +//! of the **entire crate**. +//! +//! Storing the hashes in a map avoids the need to compute them twice +//! (once when loading prior incremental results and once when +//! saving), but it is also important for correctness: at least as of +//! the time of this writing, the typeck passes rewrites entries in +//! the dep-map in-place to accommodate UFCS resolutions. Since name +//! resolution is part of the hash, the result is that hashes computed +//! at the end of compilation would be different from those computed +//! at the beginning. + +use syntax::ast; +use std::cell::RefCell; +use std::hash::Hash; +use rustc::dep_graph::DepNode; +use rustc::hir; +use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId}; +use rustc::hir::intravisit as visit; +use rustc::hir::intravisit::{Visitor, NestedVisitorMap}; +use rustc::ty::TyCtxt; +use rustc_data_structures::fx::FxHashMap; +use rustc::util::common::record_time; +use rustc::session::config::DebugInfoLevel::NoDebugInfo; + +use self::def_path_hash::DefPathHashes; +use self::svh_visitor::StrictVersionHashVisitor; +use self::caching_codemap_view::CachingCodemapView; +use self::hasher::IchHasher; +use ich::Fingerprint; + + +mod def_path_hash; +mod svh_visitor; +mod caching_codemap_view; +pub mod hasher; + +pub struct IncrementalHashesMap { + hashes: FxHashMap, Fingerprint>, + + // These are the metadata hashes for the current crate as they were stored + // during the last compilation session. They are only loaded if + // -Z query-dep-graph was specified and are needed for auto-tests using + // the #[rustc_metadata_dirty] and #[rustc_metadata_clean] attributes to + // check whether some metadata hash has changed in between two revisions. + pub prev_metadata_hashes: RefCell>, +} + +impl IncrementalHashesMap { + pub fn new() -> IncrementalHashesMap { + IncrementalHashesMap { + hashes: FxHashMap(), + prev_metadata_hashes: RefCell::new(FxHashMap()), + } + } + + pub fn insert(&mut self, k: DepNode, v: Fingerprint) -> Option { + self.hashes.insert(k, v) + } + + pub fn iter<'a>(&'a self) + -> ::std::collections::hash_map::Iter<'a, DepNode, Fingerprint> { + self.hashes.iter() + } + + pub fn len(&self) -> usize { + self.hashes.len() + } +} + +impl<'a> ::std::ops::Index<&'a DepNode> for IncrementalHashesMap { + type Output = Fingerprint; + + fn index(&self, index: &'a DepNode) -> &Fingerprint { + match self.hashes.get(index) { + Some(fingerprint) => fingerprint, + None => { + bug!("Could not find ICH for {:?}", index); + } + } + } +} + + +pub fn compute_incremental_hashes_map<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> IncrementalHashesMap { + let _ignore = tcx.dep_graph.in_ignore(); + let krate = tcx.map.krate(); + let hash_spans = tcx.sess.opts.debuginfo != NoDebugInfo; + let mut visitor = HashItemsVisitor { + tcx: tcx, + hashes: IncrementalHashesMap::new(), + def_path_hashes: DefPathHashes::new(tcx), + codemap: CachingCodemapView::new(tcx), + hash_spans: hash_spans, + }; + record_time(&tcx.sess.perf_stats.incr_comp_hashes_time, || { + visitor.calculate_def_id(DefId::local(CRATE_DEF_INDEX), + |v| visit::walk_crate(v, krate)); + krate.visit_all_item_likes(&mut visitor.as_deep_visitor()); + + for macro_def in krate.exported_macros.iter() { + visitor.calculate_node_id(macro_def.id, + |v| v.visit_macro_def(macro_def)); + } + }); + + tcx.sess.perf_stats.incr_comp_hashes_count.set(visitor.hashes.len() as u64); + + record_time(&tcx.sess.perf_stats.svh_time, || visitor.compute_crate_hash()); + visitor.hashes +} + +struct HashItemsVisitor<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_path_hashes: DefPathHashes<'a, 'tcx>, + codemap: CachingCodemapView<'tcx>, + hashes: IncrementalHashesMap, + hash_spans: bool, +} + +impl<'a, 'tcx> HashItemsVisitor<'a, 'tcx> { + fn calculate_node_id(&mut self, id: ast::NodeId, walk_op: W) + where W: for<'v> FnMut(&mut StrictVersionHashVisitor<'v, 'a, 'tcx>) + { + let def_id = self.tcx.map.local_def_id(id); + self.calculate_def_id(def_id, walk_op) + } + + fn calculate_def_id(&mut self, def_id: DefId, mut walk_op: W) + where W: for<'v> FnMut(&mut StrictVersionHashVisitor<'v, 'a, 'tcx>) + { + assert!(def_id.is_local()); + debug!("HashItemsVisitor::calculate(def_id={:?})", def_id); + self.calculate_def_hash(DepNode::Hir(def_id), false, &mut walk_op); + self.calculate_def_hash(DepNode::HirBody(def_id), true, &mut walk_op); + } + + fn calculate_def_hash(&mut self, + dep_node: DepNode, + hash_bodies: bool, + walk_op: &mut W) + where W: for<'v> FnMut(&mut StrictVersionHashVisitor<'v, 'a, 'tcx>) + { + let mut state = IchHasher::new(); + walk_op(&mut StrictVersionHashVisitor::new(&mut state, + self.tcx, + &mut self.def_path_hashes, + &mut self.codemap, + self.hash_spans, + hash_bodies)); + let bytes_hashed = state.bytes_hashed(); + let item_hash = state.finish(); + debug!("calculate_def_hash: dep_node={:?} hash={:?}", dep_node, item_hash); + self.hashes.insert(dep_node, item_hash); + + let bytes_hashed = self.tcx.sess.perf_stats.incr_comp_bytes_hashed.get() + + bytes_hashed; + self.tcx.sess.perf_stats.incr_comp_bytes_hashed.set(bytes_hashed); + } + + fn compute_crate_hash(&mut self) { + let krate = self.tcx.map.krate(); + + let mut crate_state = IchHasher::new(); + + let crate_disambiguator = self.tcx.sess.local_crate_disambiguator(); + "crate_disambiguator".hash(&mut crate_state); + crate_disambiguator.as_str().len().hash(&mut crate_state); + crate_disambiguator.as_str().hash(&mut crate_state); + + // add each item (in some deterministic order) to the overall + // crate hash. + { + let def_path_hashes = &mut self.def_path_hashes; + let mut item_hashes: Vec<_> = + self.hashes.iter() + .map(|(item_dep_node, &item_hash)| { + // convert from a DepNode tp a + // DepNode where the u64 is the + // hash of the def-id's def-path: + let item_dep_node = + item_dep_node.map_def(|&did| Some(def_path_hashes.hash(did))) + .unwrap(); + (item_dep_node, item_hash) + }) + .collect(); + item_hashes.sort(); // avoid artificial dependencies on item ordering + item_hashes.hash(&mut crate_state); + } + + { + let mut visitor = StrictVersionHashVisitor::new(&mut crate_state, + self.tcx, + &mut self.def_path_hashes, + &mut self.codemap, + self.hash_spans, + false); + visitor.hash_attributes(&krate.attrs); + } + + let crate_hash = crate_state.finish(); + self.hashes.insert(DepNode::Krate, crate_hash); + debug!("calculate_crate_hash: crate_hash={:?}", crate_hash); + } +} + + +impl<'a, 'tcx> Visitor<'tcx> for HashItemsVisitor<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::None + } + + fn visit_item(&mut self, item: &'tcx hir::Item) { + self.calculate_node_id(item.id, |v| v.visit_item(item)); + visit::walk_item(self, item); + } + + fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) { + self.calculate_node_id(impl_item.id, |v| v.visit_impl_item(impl_item)); + visit::walk_impl_item(self, impl_item); + } + + fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem) { + self.calculate_node_id(item.id, |v| v.visit_foreign_item(item)); + visit::walk_foreign_item(self, item); + } +} + diff --git a/src/librustc_incremental/calculate_svh/svh_visitor.rs b/src/librustc_incremental/calculate_svh/svh_visitor.rs new file mode 100644 index 0000000000000..681ad2efa0c14 --- /dev/null +++ b/src/librustc_incremental/calculate_svh/svh_visitor.rs @@ -0,0 +1,1088 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use self::SawExprComponent::*; +use self::SawAbiComponent::*; +use self::SawItemComponent::*; +use self::SawPatComponent::*; +use self::SawTyComponent::*; +use self::SawTraitOrImplItemComponent::*; +use syntax::abi::Abi; +use syntax::ast::{self, Name, NodeId}; +use syntax::attr; +use syntax::parse::token; +use syntax::symbol::{Symbol, InternedString}; +use syntax_pos::{Span, NO_EXPANSION, COMMAND_LINE_EXPN, BytePos}; +use syntax::tokenstream; +use rustc::hir; +use rustc::hir::*; +use rustc::hir::def::Def; +use rustc::hir::def_id::DefId; +use rustc::hir::intravisit as visit; +use rustc::ty::TyCtxt; +use rustc_data_structures::fnv; +use std::hash::Hash; + +use super::def_path_hash::DefPathHashes; +use super::caching_codemap_view::CachingCodemapView; +use super::hasher::IchHasher; + +const IGNORED_ATTRIBUTES: &'static [&'static str] = &[ + "cfg", + ::ATTR_IF_THIS_CHANGED, + ::ATTR_THEN_THIS_WOULD_NEED, + ::ATTR_DIRTY, + ::ATTR_CLEAN, + ::ATTR_DIRTY_METADATA, + ::ATTR_CLEAN_METADATA +]; + +pub struct StrictVersionHashVisitor<'a, 'hash: 'a, 'tcx: 'hash> { + pub tcx: TyCtxt<'hash, 'tcx, 'tcx>, + pub st: &'a mut IchHasher, + // collect a deterministic hash of def-ids that we have seen + def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>, + hash_spans: bool, + codemap: &'a mut CachingCodemapView<'tcx>, + overflow_checks_enabled: bool, + hash_bodies: bool, +} + +impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> { + pub fn new(st: &'a mut IchHasher, + tcx: TyCtxt<'hash, 'tcx, 'tcx>, + def_path_hashes: &'a mut DefPathHashes<'hash, 'tcx>, + codemap: &'a mut CachingCodemapView<'tcx>, + hash_spans: bool, + hash_bodies: bool) + -> Self { + let check_overflow = tcx.sess.opts.debugging_opts.force_overflow_checks + .unwrap_or(tcx.sess.opts.debug_assertions); + + StrictVersionHashVisitor { + st: st, + tcx: tcx, + def_path_hashes: def_path_hashes, + hash_spans: hash_spans, + codemap: codemap, + overflow_checks_enabled: check_overflow, + hash_bodies: hash_bodies, + } + } + + fn compute_def_id_hash(&mut self, def_id: DefId) -> u64 { + self.def_path_hashes.hash(def_id) + } + + // Hash a span in a stable way. We can't directly hash the span's BytePos + // fields (that would be similar to hashing pointers, since those are just + // offsets into the CodeMap). Instead, we hash the (file name, line, column) + // triple, which stays the same even if the containing FileMap has moved + // within the CodeMap. + // Also note that we are hashing byte offsets for the column, not unicode + // codepoint offsets. For the purpose of the hash that's sufficient. + // Also, hashing filenames is expensive so we avoid doing it twice when the + // span starts and ends in the same file, which is almost always the case. + fn hash_span(&mut self, span: Span) { + debug!("hash_span: st={:?}", self.st); + + // If this is not an empty or invalid span, we want to hash the last + // position that belongs to it, as opposed to hashing the first + // position past it. + let span_hi = if span.hi > span.lo { + // We might end up in the middle of a multibyte character here, + // but that's OK, since we are not trying to decode anything at + // this position. + span.hi - BytePos(1) + } else { + span.hi + }; + + let expn_kind = match span.expn_id { + NO_EXPANSION => SawSpanExpnKind::NoExpansion, + COMMAND_LINE_EXPN => SawSpanExpnKind::CommandLine, + _ => SawSpanExpnKind::SomeExpansion, + }; + + let loc1 = self.codemap.byte_pos_to_line_and_col(span.lo); + let loc1 = loc1.as_ref() + .map(|&(ref fm, line, col)| (&fm.name[..], line, col)) + .unwrap_or(("???", 0, BytePos(0))); + + let loc2 = self.codemap.byte_pos_to_line_and_col(span_hi); + let loc2 = loc2.as_ref() + .map(|&(ref fm, line, col)| (&fm.name[..], line, col)) + .unwrap_or(("???", 0, BytePos(0))); + + let saw = if loc1.0 == loc2.0 { + SawSpan(loc1.0, + loc1.1, loc1.2, + loc2.1, loc2.2, + expn_kind) + } else { + SawSpanTwoFiles(loc1.0, loc1.1, loc1.2, + loc2.0, loc2.1, loc2.2, + expn_kind) + }; + saw.hash(self.st); + + if expn_kind == SawSpanExpnKind::SomeExpansion { + let call_site = self.codemap.codemap().source_callsite(span); + self.hash_span(call_site); + } + } + + fn hash_discriminant(&mut self, v: &T) { + unsafe { + let disr = ::std::intrinsics::discriminant_value(v); + debug!("hash_discriminant: disr={}, st={:?}", disr, self.st); + disr.hash(self.st); + } + } +} + +// To off-load the bulk of the hash-computation on #[derive(Hash)], +// we define a set of enums corresponding to the content that our +// crate visitor will encounter as it traverses the ast. +// +// The important invariant is that all of the Saw*Component enums +// do not carry any Spans, Names, or Idents. +// +// Not carrying any Names/Idents is the important fix for problem +// noted on PR #13948: using the ident.name as the basis for a +// hash leads to unstable SVH, because ident.name is just an index +// into intern table (i.e. essentially a random address), not +// computed from the name content. +// +// With the below enums, the SVH computation is not sensitive to +// artifacts of how rustc was invoked nor of how the source code +// was laid out. (Or at least it is *less* sensitive.) + +// This enum represents the different potential bits of code the +// visitor could encounter that could affect the ABI for the crate, +// and assigns each a distinct tag to feed into the hash computation. +#[derive(Hash)] +enum SawAbiComponent<'a> { + + // FIXME (#14132): should we include (some function of) + // ident.ctxt as well? + SawIdent(InternedString), + SawStructDef(InternedString), + + SawLifetime, + SawLifetimeDef(usize), + + SawMod, + SawForeignItem, + SawItem(SawItemComponent), + SawTy(SawTyComponent), + SawGenerics, + SawTraitItem(SawTraitOrImplItemComponent), + SawImplItem(SawTraitOrImplItemComponent), + SawStructField, + SawVariant, + SawQPath, + SawPath(bool), + SawPathSegment, + SawPathParameters, + SawBlock, + SawPat(SawPatComponent), + SawLocal, + SawArm, + SawExpr(SawExprComponent<'a>), + SawStmt, + SawVis, + SawAssociatedItemKind(hir::AssociatedItemKind), + SawDefaultness(hir::Defaultness), + SawWherePredicate, + SawTyParamBound, + SawPolyTraitRef, + SawAssocTypeBinding, + SawAttribute(ast::AttrStyle), + SawMacroDef, + SawSpan(&'a str, + usize, BytePos, + usize, BytePos, + SawSpanExpnKind), + SawSpanTwoFiles(&'a str, usize, BytePos, + &'a str, usize, BytePos, + SawSpanExpnKind), +} + +/// SawExprComponent carries all of the information that we want +/// to include in the hash that *won't* be covered by the +/// subsequent recursive traversal of the expression's +/// substructure by the visitor. +/// +/// We know every Expr_ variant is covered by a variant because +/// `fn saw_expr` maps each to some case below. Ensuring that +/// each variant carries an appropriate payload has to be verified +/// by hand. +/// +/// (However, getting that *exactly* right is not so important +/// because the SVH is just a developer convenience; there is no +/// guarantee of collision-freedom, hash collisions are just +/// (hopefully) unlikely.) +/// +/// The xxxComponent enums and saw_xxx functions for Item, Pat, +/// Ty, TraitItem and ImplItem follow the same methodology. +#[derive(Hash)] +enum SawExprComponent<'a> { + + SawExprLoop(Option), + SawExprField(InternedString), + SawExprTupField(usize), + SawExprBreak(Option), + SawExprAgain(Option), + + SawExprBox, + SawExprArray, + SawExprCall, + SawExprMethodCall, + SawExprTup, + SawExprBinary(hir::BinOp_), + SawExprUnary(hir::UnOp), + SawExprLit(ast::LitKind), + SawExprLitStr(InternedString, ast::StrStyle), + SawExprLitFloat(InternedString, Option), + SawExprCast, + SawExprType, + SawExprIf, + SawExprWhile, + SawExprMatch, + SawExprClosure(CaptureClause), + SawExprBlock, + SawExprAssign, + SawExprAssignOp(hir::BinOp_), + SawExprIndex, + SawExprPath, + SawExprAddrOf(hir::Mutability), + SawExprRet, + SawExprInlineAsm(&'a hir::InlineAsm), + SawExprStruct, + SawExprRepeat, +} + +// The boolean returned indicates whether the span of this expression is always +// significant, regardless of debuginfo. +fn saw_expr<'a>(node: &'a Expr_, + overflow_checks_enabled: bool) + -> (SawExprComponent<'a>, bool) { + let binop_can_panic_at_runtime = |binop| { + match binop { + BiAdd | + BiSub | + BiMul => overflow_checks_enabled, + + BiDiv | + BiRem => true, + + BiAnd | + BiOr | + BiBitXor | + BiBitAnd | + BiBitOr | + BiShl | + BiShr | + BiEq | + BiLt | + BiLe | + BiNe | + BiGe | + BiGt => false + } + }; + + let unop_can_panic_at_runtime = |unop| { + match unop { + UnDeref | + UnNot => false, + UnNeg => overflow_checks_enabled, + } + }; + + match *node { + ExprBox(..) => (SawExprBox, false), + ExprArray(..) => (SawExprArray, false), + ExprCall(..) => (SawExprCall, false), + ExprMethodCall(..) => (SawExprMethodCall, false), + ExprTup(..) => (SawExprTup, false), + ExprBinary(op, ..) => { + (SawExprBinary(op.node), binop_can_panic_at_runtime(op.node)) + } + ExprUnary(op, _) => { + (SawExprUnary(op), unop_can_panic_at_runtime(op)) + } + ExprLit(ref lit) => (saw_lit(lit), false), + ExprCast(..) => (SawExprCast, false), + ExprType(..) => (SawExprType, false), + ExprIf(..) => (SawExprIf, false), + ExprWhile(..) => (SawExprWhile, false), + ExprLoop(_, id, _) => (SawExprLoop(id.map(|id| id.node.as_str())), false), + ExprMatch(..) => (SawExprMatch, false), + ExprClosure(cc, _, _, _) => (SawExprClosure(cc), false), + ExprBlock(..) => (SawExprBlock, false), + ExprAssign(..) => (SawExprAssign, false), + ExprAssignOp(op, ..) => { + (SawExprAssignOp(op.node), binop_can_panic_at_runtime(op.node)) + } + ExprField(_, name) => (SawExprField(name.node.as_str()), false), + ExprTupField(_, id) => (SawExprTupField(id.node), false), + ExprIndex(..) => (SawExprIndex, true), + ExprPath(_) => (SawExprPath, false), + ExprAddrOf(m, _) => (SawExprAddrOf(m), false), + ExprBreak(label, _) => (SawExprBreak(label.map(|l| l.name.as_str())), false), + ExprAgain(label) => (SawExprAgain(label.map(|l| l.name.as_str())), false), + ExprRet(..) => (SawExprRet, false), + ExprInlineAsm(ref a,..) => (SawExprInlineAsm(a), false), + ExprStruct(..) => (SawExprStruct, false), + ExprRepeat(..) => (SawExprRepeat, false), + } +} + +fn saw_lit(lit: &ast::Lit) -> SawExprComponent<'static> { + match lit.node { + ast::LitKind::Str(s, style) => SawExprLitStr(s.as_str(), style), + ast::LitKind::Float(s, ty) => SawExprLitFloat(s.as_str(), Some(ty)), + ast::LitKind::FloatUnsuffixed(s) => SawExprLitFloat(s.as_str(), None), + ref node @ _ => SawExprLit(node.clone()), + } +} + +#[derive(Hash)] +enum SawItemComponent { + SawItemExternCrate, + SawItemUse(UseKind), + SawItemStatic(Mutability), + SawItemConst, + SawItemFn(Unsafety, Constness, Abi), + SawItemMod, + SawItemForeignMod, + SawItemTy, + SawItemEnum, + SawItemStruct, + SawItemUnion, + SawItemTrait(Unsafety), + SawItemDefaultImpl(Unsafety), + SawItemImpl(Unsafety, ImplPolarity) +} + +fn saw_item(node: &Item_) -> SawItemComponent { + match *node { + ItemExternCrate(..) => SawItemExternCrate, + ItemUse(_, kind) => SawItemUse(kind), + ItemStatic(_, mutability, _) => SawItemStatic(mutability), + ItemConst(..) =>SawItemConst, + ItemFn(_, unsafety, constness, abi, _, _) => SawItemFn(unsafety, constness, abi), + ItemMod(..) => SawItemMod, + ItemForeignMod(..) => SawItemForeignMod, + ItemTy(..) => SawItemTy, + ItemEnum(..) => SawItemEnum, + ItemStruct(..) => SawItemStruct, + ItemUnion(..) => SawItemUnion, + ItemTrait(unsafety, ..) => SawItemTrait(unsafety), + ItemDefaultImpl(unsafety, _) => SawItemDefaultImpl(unsafety), + ItemImpl(unsafety, implpolarity, ..) => SawItemImpl(unsafety, implpolarity) + } +} + +#[derive(Hash)] +enum SawPatComponent { + SawPatWild, + SawPatBinding(BindingMode), + SawPatStruct, + SawPatTupleStruct, + SawPatPath, + SawPatTuple, + SawPatBox, + SawPatRef(Mutability), + SawPatLit, + SawPatRange, + SawPatSlice +} + +fn saw_pat(node: &PatKind) -> SawPatComponent { + match *node { + PatKind::Wild => SawPatWild, + PatKind::Binding(bindingmode, ..) => SawPatBinding(bindingmode), + PatKind::Struct(..) => SawPatStruct, + PatKind::TupleStruct(..) => SawPatTupleStruct, + PatKind::Path(_) => SawPatPath, + PatKind::Tuple(..) => SawPatTuple, + PatKind::Box(..) => SawPatBox, + PatKind::Ref(_, mutability) => SawPatRef(mutability), + PatKind::Lit(..) => SawPatLit, + PatKind::Range(..) => SawPatRange, + PatKind::Slice(..) => SawPatSlice + } +} + +#[derive(Hash)] +enum SawTyComponent { + SawTySlice, + SawTyArray, + SawTyPtr(Mutability), + SawTyRptr(Mutability), + SawTyBareFn(Unsafety, Abi), + SawTyNever, + SawTyTup, + SawTyPath, + SawTyObjectSum, + SawTyPolyTraitRef, + SawTyImplTrait, + SawTyTypeof, + SawTyInfer +} + +fn saw_ty(node: &Ty_) -> SawTyComponent { + match *node { + TySlice(..) => SawTySlice, + TyArray(..) => SawTyArray, + TyPtr(ref mty) => SawTyPtr(mty.mutbl), + TyRptr(_, ref mty) => SawTyRptr(mty.mutbl), + TyBareFn(ref barefnty) => SawTyBareFn(barefnty.unsafety, barefnty.abi), + TyNever => SawTyNever, + TyTup(..) => SawTyTup, + TyPath(_) => SawTyPath, + TyObjectSum(..) => SawTyObjectSum, + TyPolyTraitRef(..) => SawTyPolyTraitRef, + TyImplTrait(..) => SawTyImplTrait, + TyTypeof(..) => SawTyTypeof, + TyInfer => SawTyInfer + } +} + +#[derive(Hash)] +enum SawTraitOrImplItemComponent { + SawTraitOrImplItemConst, + // The boolean signifies whether a body is present + SawTraitOrImplItemMethod(Unsafety, Constness, Abi, bool), + SawTraitOrImplItemType +} + +fn saw_trait_item(ti: &TraitItem_) -> SawTraitOrImplItemComponent { + match *ti { + ConstTraitItem(..) => SawTraitOrImplItemConst, + MethodTraitItem(ref sig, ref body) => + SawTraitOrImplItemMethod(sig.unsafety, sig.constness, sig.abi, body.is_some()), + TypeTraitItem(..) => SawTraitOrImplItemType + } +} + +fn saw_impl_item(ii: &ImplItemKind) -> SawTraitOrImplItemComponent { + match *ii { + ImplItemKind::Const(..) => SawTraitOrImplItemConst, + ImplItemKind::Method(ref sig, _) => + SawTraitOrImplItemMethod(sig.unsafety, sig.constness, sig.abi, true), + ImplItemKind::Type(..) => SawTraitOrImplItemType + } +} + +#[derive(Clone, Copy, Hash, Eq, PartialEq)] +enum SawSpanExpnKind { + NoExpansion, + CommandLine, + SomeExpansion, +} + +macro_rules! hash_attrs { + ($visitor:expr, $attrs:expr) => ({ + let attrs = $attrs; + if attrs.len() > 0 { + $visitor.hash_attributes(attrs); + } + }) +} + +macro_rules! hash_span { + ($visitor:expr, $span:expr) => ({ + hash_span!($visitor, $span, false) + }); + ($visitor:expr, $span:expr, $force:expr) => ({ + if $force || $visitor.hash_spans { + $visitor.hash_span($span); + } + }); +} + +impl<'a, 'hash, 'tcx> visit::Visitor<'tcx> for StrictVersionHashVisitor<'a, 'hash, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> visit::NestedVisitorMap<'this, 'tcx> { + if self.hash_bodies { + visit::NestedVisitorMap::OnlyBodies(&self.tcx.map) + } else { + visit::NestedVisitorMap::None + } + } + + fn visit_variant_data(&mut self, + s: &'tcx VariantData, + name: Name, + _: &'tcx Generics, + _: NodeId, + span: Span) { + debug!("visit_variant_data: st={:?}", self.st); + SawStructDef(name.as_str()).hash(self.st); + hash_span!(self, span); + visit::walk_struct_def(self, s); + } + + fn visit_variant(&mut self, + v: &'tcx Variant, + g: &'tcx Generics, + item_id: NodeId) { + debug!("visit_variant: st={:?}", self.st); + SawVariant.hash(self.st); + hash_attrs!(self, &v.node.attrs); + visit::walk_variant(self, v, g, item_id) + } + + fn visit_name(&mut self, span: Span, name: Name) { + debug!("visit_name: st={:?}", self.st); + SawIdent(name.as_str()).hash(self.st); + hash_span!(self, span); + } + + fn visit_lifetime(&mut self, l: &'tcx Lifetime) { + debug!("visit_lifetime: st={:?}", self.st); + SawLifetime.hash(self.st); + visit::walk_lifetime(self, l); + } + + fn visit_lifetime_def(&mut self, l: &'tcx LifetimeDef) { + debug!("visit_lifetime_def: st={:?}", self.st); + SawLifetimeDef(l.bounds.len()).hash(self.st); + visit::walk_lifetime_def(self, l); + } + + fn visit_expr(&mut self, ex: &'tcx Expr) { + debug!("visit_expr: st={:?}", self.st); + let (saw_expr, force_span) = saw_expr(&ex.node, + self.overflow_checks_enabled); + SawExpr(saw_expr).hash(self.st); + // No need to explicitly hash the discriminant here, since we are + // implicitly hashing the discriminant of SawExprComponent. + hash_span!(self, ex.span, force_span); + hash_attrs!(self, &ex.attrs); + visit::walk_expr(self, ex) + } + + fn visit_stmt(&mut self, s: &'tcx Stmt) { + debug!("visit_stmt: st={:?}", self.st); + + // We don't want to modify the hash for decls, because + // they might be item decls (if they are local decls, + // we'll hash that fact in visit_local); but we do want to + // remember if this was a StmtExpr or StmtSemi (the later + // had an explicit semi-colon; this affects the typing + // rules). + match s.node { + StmtDecl(..) => (), + StmtExpr(..) => { + SawStmt.hash(self.st); + self.hash_discriminant(&s.node); + hash_span!(self, s.span); + } + StmtSemi(..) => { + SawStmt.hash(self.st); + self.hash_discriminant(&s.node); + hash_span!(self, s.span); + } + } + + visit::walk_stmt(self, s) + } + + fn visit_foreign_item(&mut self, i: &'tcx ForeignItem) { + debug!("visit_foreign_item: st={:?}", self.st); + + SawForeignItem.hash(self.st); + hash_span!(self, i.span); + hash_attrs!(self, &i.attrs); + visit::walk_foreign_item(self, i) + } + + fn visit_item(&mut self, i: &'tcx Item) { + debug!("visit_item: {:?} st={:?}", i, self.st); + + self.maybe_enable_overflow_checks(&i.attrs); + + SawItem(saw_item(&i.node)).hash(self.st); + hash_span!(self, i.span); + hash_attrs!(self, &i.attrs); + visit::walk_item(self, i) + } + + fn visit_mod(&mut self, m: &'tcx Mod, _s: Span, n: NodeId) { + debug!("visit_mod: st={:?}", self.st); + SawMod.hash(self.st); + visit::walk_mod(self, m, n) + } + + fn visit_ty(&mut self, t: &'tcx Ty) { + debug!("visit_ty: st={:?}", self.st); + SawTy(saw_ty(&t.node)).hash(self.st); + hash_span!(self, t.span); + visit::walk_ty(self, t) + } + + fn visit_generics(&mut self, g: &'tcx Generics) { + debug!("visit_generics: st={:?}", self.st); + SawGenerics.hash(self.st); + visit::walk_generics(self, g) + } + + fn visit_trait_item(&mut self, ti: &'tcx TraitItem) { + debug!("visit_trait_item: st={:?}", self.st); + + self.maybe_enable_overflow_checks(&ti.attrs); + + SawTraitItem(saw_trait_item(&ti.node)).hash(self.st); + hash_span!(self, ti.span); + hash_attrs!(self, &ti.attrs); + visit::walk_trait_item(self, ti) + } + + fn visit_impl_item(&mut self, ii: &'tcx ImplItem) { + debug!("visit_impl_item: st={:?}", self.st); + + self.maybe_enable_overflow_checks(&ii.attrs); + + SawImplItem(saw_impl_item(&ii.node)).hash(self.st); + hash_span!(self, ii.span); + hash_attrs!(self, &ii.attrs); + visit::walk_impl_item(self, ii) + } + + fn visit_struct_field(&mut self, s: &'tcx StructField) { + debug!("visit_struct_field: st={:?}", self.st); + SawStructField.hash(self.st); + hash_span!(self, s.span); + hash_attrs!(self, &s.attrs); + visit::walk_struct_field(self, s) + } + + fn visit_qpath(&mut self, qpath: &'tcx QPath, id: NodeId, span: Span) { + debug!("visit_qpath: st={:?}", self.st); + SawQPath.hash(self.st); + self.hash_discriminant(qpath); + visit::walk_qpath(self, qpath, id, span) + } + + fn visit_path(&mut self, path: &'tcx Path, _: ast::NodeId) { + debug!("visit_path: st={:?}", self.st); + SawPath(path.global).hash(self.st); + hash_span!(self, path.span); + visit::walk_path(self, path) + } + + fn visit_def_mention(&mut self, def: Def) { + self.hash_def(def); + } + + fn visit_block(&mut self, b: &'tcx Block) { + debug!("visit_block: st={:?}", self.st); + SawBlock.hash(self.st); + hash_span!(self, b.span); + visit::walk_block(self, b) + } + + fn visit_pat(&mut self, p: &'tcx Pat) { + debug!("visit_pat: st={:?}", self.st); + SawPat(saw_pat(&p.node)).hash(self.st); + hash_span!(self, p.span); + visit::walk_pat(self, p) + } + + fn visit_local(&mut self, l: &'tcx Local) { + debug!("visit_local: st={:?}", self.st); + SawLocal.hash(self.st); + hash_attrs!(self, &l.attrs); + visit::walk_local(self, l) + // No need to hash span, we are hashing all component spans + } + + fn visit_arm(&mut self, a: &'tcx Arm) { + debug!("visit_arm: st={:?}", self.st); + SawArm.hash(self.st); + hash_attrs!(self, &a.attrs); + visit::walk_arm(self, a) + } + + fn visit_id(&mut self, id: NodeId) { + debug!("visit_id: id={} st={:?}", id, self.st); + self.hash_resolve(id) + } + + fn visit_vis(&mut self, v: &'tcx Visibility) { + debug!("visit_vis: st={:?}", self.st); + SawVis.hash(self.st); + self.hash_discriminant(v); + visit::walk_vis(self, v) + } + + fn visit_associated_item_kind(&mut self, kind: &'tcx AssociatedItemKind) { + debug!("visit_associated_item_kind: st={:?}", self.st); + SawAssociatedItemKind(*kind).hash(self.st); + visit::walk_associated_item_kind(self, kind); + } + + fn visit_defaultness(&mut self, defaultness: &'tcx Defaultness) { + debug!("visit_associated_item_kind: st={:?}", self.st); + SawDefaultness(*defaultness).hash(self.st); + visit::walk_defaultness(self, defaultness); + } + + fn visit_where_predicate(&mut self, predicate: &'tcx WherePredicate) { + debug!("visit_where_predicate: st={:?}", self.st); + SawWherePredicate.hash(self.st); + self.hash_discriminant(predicate); + // Ignoring span. Any important nested components should be visited. + visit::walk_where_predicate(self, predicate) + } + + fn visit_ty_param_bound(&mut self, bounds: &'tcx TyParamBound) { + debug!("visit_ty_param_bound: st={:?}", self.st); + SawTyParamBound.hash(self.st); + self.hash_discriminant(bounds); + // The TraitBoundModifier in TraitTyParamBound will be hash in + // visit_poly_trait_ref() + visit::walk_ty_param_bound(self, bounds) + } + + fn visit_poly_trait_ref(&mut self, t: &'tcx PolyTraitRef, m: &'tcx TraitBoundModifier) { + debug!("visit_poly_trait_ref: st={:?}", self.st); + SawPolyTraitRef.hash(self.st); + m.hash(self.st); + visit::walk_poly_trait_ref(self, t, m) + } + + fn visit_path_segment(&mut self, path_span: Span, path_segment: &'tcx PathSegment) { + debug!("visit_path_segment: st={:?}", self.st); + SawPathSegment.hash(self.st); + visit::walk_path_segment(self, path_span, path_segment) + } + + fn visit_path_parameters(&mut self, path_span: Span, path_parameters: &'tcx PathParameters) { + debug!("visit_path_parameters: st={:?}", self.st); + SawPathParameters.hash(self.st); + self.hash_discriminant(path_parameters); + visit::walk_path_parameters(self, path_span, path_parameters) + } + + fn visit_assoc_type_binding(&mut self, type_binding: &'tcx TypeBinding) { + debug!("visit_assoc_type_binding: st={:?}", self.st); + SawAssocTypeBinding.hash(self.st); + hash_span!(self, type_binding.span); + visit::walk_assoc_type_binding(self, type_binding) + } + + fn visit_attribute(&mut self, _: &ast::Attribute) { + // We explicitly do not use this method, since doing that would + // implicitly impose an order on the attributes being hashed, while we + // explicitly don't want their order to matter + } + + fn visit_macro_def(&mut self, macro_def: &'tcx MacroDef) { + debug!("visit_macro_def: st={:?}", self.st); + SawMacroDef.hash(self.st); + hash_attrs!(self, ¯o_def.attrs); + for tt in ¯o_def.body { + self.hash_token_tree(tt); + } + visit::walk_macro_def(self, macro_def) + } +} + +#[derive(Hash)] +pub enum DefHash { + SawDefId, + SawLabel, + SawPrimTy, + SawSelfTy, + SawErr, +} + +impl<'a, 'hash, 'tcx> StrictVersionHashVisitor<'a, 'hash, 'tcx> { + fn hash_resolve(&mut self, id: ast::NodeId) { + // Because whether or not a given id has an entry is dependent + // solely on expr variant etc, we don't need to hash whether + // or not an entry was present (we are already hashing what + // variant it is above when we visit the HIR). + + if let Some(traits) = self.tcx.trait_map.get(&id) { + debug!("hash_resolve: id={:?} traits={:?} st={:?}", id, traits, self.st); + traits.len().hash(self.st); + + // The ordering of the candidates is not fixed. So we hash + // the def-ids and then sort them and hash the collection. + let mut candidates: Vec<_> = + traits.iter() + .map(|&TraitCandidate { def_id, import_id: _ }| { + self.compute_def_id_hash(def_id) + }) + .collect(); + candidates.sort(); + candidates.hash(self.st); + } + } + + fn hash_def_id(&mut self, def_id: DefId) { + self.compute_def_id_hash(def_id).hash(self.st); + } + + fn hash_def(&mut self, def: Def) { + match def { + // Crucial point: for all of these variants, the variant + + // add'l data that is added is always the same if the + // def-id is the same, so it suffices to hash the def-id + Def::Fn(..) | + Def::Mod(..) | + Def::Static(..) | + Def::Variant(..) | + Def::VariantCtor(..) | + Def::Enum(..) | + Def::TyAlias(..) | + Def::AssociatedTy(..) | + Def::TyParam(..) | + Def::Struct(..) | + Def::StructCtor(..) | + Def::Union(..) | + Def::Trait(..) | + Def::Method(..) | + Def::Const(..) | + Def::AssociatedConst(..) | + Def::Local(..) | + Def::Upvar(..) | + Def::Macro(..) => { + DefHash::SawDefId.hash(self.st); + self.hash_def_id(def.def_id()); + } + + Def::Label(..) => { + DefHash::SawLabel.hash(self.st); + // we don't encode the `id` because it always refers to something + // within this item, so if it changed, there would have to be other + // changes too + } + Def::PrimTy(ref prim_ty) => { + DefHash::SawPrimTy.hash(self.st); + prim_ty.hash(self.st); + } + Def::SelfTy(..) => { + DefHash::SawSelfTy.hash(self.st); + // the meaning of Self is always the same within a + // given context, so we don't need to hash the other + // fields + } + Def::Err => { + DefHash::SawErr.hash(self.st); + } + } + } + + fn hash_meta_item(&mut self, meta_item: &ast::MetaItem) { + debug!("hash_meta_item: st={:?}", self.st); + + // ignoring span information, it doesn't matter here + self.hash_discriminant(&meta_item.node); + meta_item.name.as_str().len().hash(self.st); + meta_item.name.as_str().hash(self.st); + + match meta_item.node { + ast::MetaItemKind::Word => {} + ast::MetaItemKind::NameValue(ref lit) => saw_lit(lit).hash(self.st), + ast::MetaItemKind::List(ref items) => { + // Sort subitems so the hash does not depend on their order + let indices = self.indices_sorted_by(&items, |p| { + (p.name().map(Symbol::as_str), fnv::hash(&p.literal().map(saw_lit))) + }); + items.len().hash(self.st); + for (index, &item_index) in indices.iter().enumerate() { + index.hash(self.st); + let nested_meta_item: &ast::NestedMetaItemKind = &items[item_index].node; + self.hash_discriminant(nested_meta_item); + match *nested_meta_item { + ast::NestedMetaItemKind::MetaItem(ref meta_item) => { + self.hash_meta_item(meta_item); + } + ast::NestedMetaItemKind::Literal(ref lit) => { + saw_lit(lit).hash(self.st); + } + } + } + } + } + } + + pub fn hash_attributes(&mut self, attributes: &[ast::Attribute]) { + debug!("hash_attributes: st={:?}", self.st); + let indices = self.indices_sorted_by(attributes, |attr| attr.name()); + + for i in indices { + let attr = &attributes[i]; + if !attr.is_sugared_doc && + !IGNORED_ATTRIBUTES.contains(&&*attr.value.name().as_str()) { + SawAttribute(attr.style).hash(self.st); + self.hash_meta_item(&attr.value); + } + } + } + + fn indices_sorted_by(&mut self, items: &[T], get_key: F) -> Vec + where K: Ord, + F: Fn(&T) -> K + { + let mut indices = Vec::with_capacity(items.len()); + indices.extend(0 .. items.len()); + indices.sort_by_key(|index| get_key(&items[*index])); + indices + } + + fn maybe_enable_overflow_checks(&mut self, item_attrs: &[ast::Attribute]) { + if attr::contains_name(item_attrs, "rustc_inherit_overflow_checks") { + self.overflow_checks_enabled = true; + } + } + + fn hash_token_tree(&mut self, tt: &tokenstream::TokenTree) { + self.hash_discriminant(tt); + match *tt { + tokenstream::TokenTree::Token(span, ref token) => { + hash_span!(self, span); + self.hash_token(token, span); + } + tokenstream::TokenTree::Delimited(span, ref delimited) => { + hash_span!(self, span); + let tokenstream::Delimited { + ref delim, + open_span, + ref tts, + close_span, + } = **delimited; + + delim.hash(self.st); + hash_span!(self, open_span); + tts.len().hash(self.st); + for sub_tt in tts { + self.hash_token_tree(sub_tt); + } + hash_span!(self, close_span); + } + tokenstream::TokenTree::Sequence(span, ref sequence_repetition) => { + hash_span!(self, span); + let tokenstream::SequenceRepetition { + ref tts, + ref separator, + op, + num_captures, + } = **sequence_repetition; + + tts.len().hash(self.st); + for sub_tt in tts { + self.hash_token_tree(sub_tt); + } + self.hash_discriminant(separator); + if let Some(ref separator) = *separator { + self.hash_token(separator, span); + } + op.hash(self.st); + num_captures.hash(self.st); + } + } + } + + fn hash_token(&mut self, + token: &token::Token, + error_reporting_span: Span) { + self.hash_discriminant(token); + match *token { + token::Token::Eq | + token::Token::Lt | + token::Token::Le | + token::Token::EqEq | + token::Token::Ne | + token::Token::Ge | + token::Token::Gt | + token::Token::AndAnd | + token::Token::OrOr | + token::Token::Not | + token::Token::Tilde | + token::Token::At | + token::Token::Dot | + token::Token::DotDot | + token::Token::DotDotDot | + token::Token::Comma | + token::Token::Semi | + token::Token::Colon | + token::Token::ModSep | + token::Token::RArrow | + token::Token::LArrow | + token::Token::FatArrow | + token::Token::Pound | + token::Token::Dollar | + token::Token::Question | + token::Token::Underscore | + token::Token::Whitespace | + token::Token::Comment | + token::Token::Eof => {} + + token::Token::BinOp(bin_op_token) | + token::Token::BinOpEq(bin_op_token) => bin_op_token.hash(self.st), + + token::Token::OpenDelim(delim_token) | + token::Token::CloseDelim(delim_token) => delim_token.hash(self.st), + + token::Token::Literal(ref lit, ref opt_name) => { + self.hash_discriminant(lit); + match *lit { + token::Lit::Byte(val) | + token::Lit::Char(val) | + token::Lit::Integer(val) | + token::Lit::Float(val) | + token::Lit::Str_(val) | + token::Lit::ByteStr(val) => val.as_str().hash(self.st), + token::Lit::StrRaw(val, n) | + token::Lit::ByteStrRaw(val, n) => { + val.as_str().hash(self.st); + n.hash(self.st); + } + }; + opt_name.map(ast::Name::as_str).hash(self.st); + } + + token::Token::Ident(ident) | + token::Token::Lifetime(ident) | + token::Token::SubstNt(ident) => ident.name.as_str().hash(self.st), + token::Token::MatchNt(ident1, ident2) => { + ident1.name.as_str().hash(self.st); + ident2.name.as_str().hash(self.st); + } + + token::Token::Interpolated(ref non_terminal) => { + // FIXME(mw): This could be implemented properly. It's just a + // lot of work, since we would need to hash the AST + // in a stable way, in addition to the HIR. + // Since this is hardly used anywhere, just emit a + // warning for now. + if self.tcx.sess.opts.debugging_opts.incremental.is_some() { + let msg = format!("Quasi-quoting might make incremental \ + compilation very inefficient: {:?}", + non_terminal); + self.tcx.sess.span_warn(error_reporting_span, &msg[..]); + } + + non_terminal.hash(self.st); + } + + token::Token::DocComment(val) | + token::Token::Shebang(val) => val.as_str().hash(self.st), + } + } +} diff --git a/src/librustc_incremental/ich/fingerprint.rs b/src/librustc_incremental/ich/fingerprint.rs new file mode 100644 index 0000000000000..005ac3896ce4c --- /dev/null +++ b/src/librustc_incremental/ich/fingerprint.rs @@ -0,0 +1,81 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc_serialize::{Encodable, Decodable, Encoder, Decoder}; + +const FINGERPRINT_LENGTH: usize = 16; + +#[derive(Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Clone, Copy)] +pub struct Fingerprint(pub [u8; FINGERPRINT_LENGTH]); + +impl Fingerprint { + #[inline] + pub fn zero() -> Fingerprint { + Fingerprint([0; FINGERPRINT_LENGTH]) + } + + pub fn from_smaller_hash(hash: u64) -> Fingerprint { + let mut result = Fingerprint::zero(); + result.0[0] = (hash >> 0) as u8; + result.0[1] = (hash >> 8) as u8; + result.0[2] = (hash >> 16) as u8; + result.0[3] = (hash >> 24) as u8; + result.0[4] = (hash >> 32) as u8; + result.0[5] = (hash >> 40) as u8; + result.0[6] = (hash >> 48) as u8; + result.0[7] = (hash >> 56) as u8; + result + } + + pub fn to_smaller_hash(&self) -> u64 { + ((self.0[0] as u64) << 0) | + ((self.0[1] as u64) << 8) | + ((self.0[2] as u64) << 16) | + ((self.0[3] as u64) << 24) | + ((self.0[4] as u64) << 32) | + ((self.0[5] as u64) << 40) | + ((self.0[6] as u64) << 48) | + ((self.0[7] as u64) << 56) + } +} + +impl Encodable for Fingerprint { + #[inline] + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + for &byte in &self.0[..] { + s.emit_u8(byte)?; + } + Ok(()) + } +} + +impl Decodable for Fingerprint { + #[inline] + fn decode(d: &mut D) -> Result { + let mut result = Fingerprint([0u8; FINGERPRINT_LENGTH]); + for byte in &mut result.0[..] { + *byte = d.read_u8()?; + } + Ok(result) + } +} + +impl ::std::fmt::Display for Fingerprint { + fn fmt(&self, formatter: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { + for i in 0 .. self.0.len() { + if i > 0 { + write!(formatter, "::")?; + } + + write!(formatter, "{}", self.0[i])?; + } + Ok(()) + } +} diff --git a/src/librustc_incremental/ich/mod.rs b/src/librustc_incremental/ich/mod.rs new file mode 100644 index 0000000000000..8edd04322d7f6 --- /dev/null +++ b/src/librustc_incremental/ich/mod.rs @@ -0,0 +1,13 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub use self::fingerprint::Fingerprint; + +mod fingerprint; diff --git a/src/librustc_incremental/lib.rs b/src/librustc_incremental/lib.rs new file mode 100644 index 0000000000000..b72766bccea21 --- /dev/null +++ b/src/librustc_incremental/lib.rs @@ -0,0 +1,56 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Support for serializing the dep-graph and reloading it. + +#![crate_name = "rustc_incremental"] +#![unstable(feature = "rustc_private", issue = "27812")] +#![crate_type = "dylib"] +#![crate_type = "rlib"] +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] + +#![feature(rustc_private)] +#![feature(staged_api)] +#![feature(rand)] +#![feature(core_intrinsics)] + +extern crate graphviz; +#[macro_use] extern crate rustc; +extern crate rustc_data_structures; +extern crate serialize as rustc_serialize; + +#[macro_use] extern crate log; +#[macro_use] extern crate syntax; +extern crate syntax_pos; + +const ATTR_DIRTY: &'static str = "rustc_dirty"; +const ATTR_CLEAN: &'static str = "rustc_clean"; +const ATTR_DIRTY_METADATA: &'static str = "rustc_metadata_dirty"; +const ATTR_CLEAN_METADATA: &'static str = "rustc_metadata_clean"; +const ATTR_IF_THIS_CHANGED: &'static str = "rustc_if_this_changed"; +const ATTR_THEN_THIS_WOULD_NEED: &'static str = "rustc_then_this_would_need"; + +mod assert_dep_graph; +mod calculate_svh; +mod persist; +pub mod ich; + +pub use assert_dep_graph::assert_dep_graph; +pub use calculate_svh::compute_incremental_hashes_map; +pub use calculate_svh::IncrementalHashesMap; +pub use persist::load_dep_graph; +pub use persist::save_dep_graph; +pub use persist::save_trans_partition; +pub use persist::save_work_products; +pub use persist::in_incr_comp_dir; +pub use persist::finalize_session_directory; diff --git a/src/librustc_incremental/persist/README.md b/src/librustc_incremental/persist/README.md new file mode 100644 index 0000000000000..95e0940001639 --- /dev/null +++ b/src/librustc_incremental/persist/README.md @@ -0,0 +1,13 @@ +This is the code to load/save the dependency graph. Loading is assumed +to run early in compilation, and saving at the very end. When loading, +the basic idea is that we will load up the dependency graph from the +previous compilation and compare the hashes of our HIR nodes to the +hashes of the HIR nodes that existed at the time. For each node whose +hash has changed, or which no longer exists in the new HIR, we can +remove that node from the old graph along with any nodes that depend +on it. Then we add what's left to the new graph (if any such nodes or +edges already exist, then there would be no effect, but since we do +this first thing, they do not). + + + diff --git a/src/librustc_incremental/persist/data.rs b/src/librustc_incremental/persist/data.rs new file mode 100644 index 0000000000000..f0e4f4f99ef08 --- /dev/null +++ b/src/librustc_incremental/persist/data.rs @@ -0,0 +1,121 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The data that we will serialize and deserialize. + +use rustc::dep_graph::{DepNode, WorkProduct, WorkProductId}; +use rustc::hir::def_id::DefIndex; +use std::sync::Arc; +use rustc_data_structures::fx::FxHashMap; +use ich::Fingerprint; + +use super::directory::DefPathIndex; + +/// Data for use when recompiling the **current crate**. +#[derive(Debug, RustcEncodable, RustcDecodable)] +pub struct SerializedDepGraph { + pub edges: Vec, + + /// These are hashes of two things: + /// - the HIR nodes in this crate + /// - the metadata nodes from dependent crates we use + /// + /// In each case, we store a hash summarizing the contents of + /// those items as they were at the time we did this compilation. + /// In the case of HIR nodes, this hash is derived by walking the + /// HIR itself. In the case of metadata nodes, the hash is loaded + /// from saved state. + /// + /// When we do the next compile, we will load these back up and + /// compare them against the hashes we see at that time, which + /// will tell us what has changed, either in this crate or in some + /// crate that we depend on. + /// + /// Because they will be reloaded, we don't store the DefId (which + /// will be different when we next compile) related to each node, + /// but rather the `DefPathIndex`. This can then be retraced + /// to find the current def-id. + pub hashes: Vec, +} + +/// Represents a "reduced" dependency edge. Unlike the full dep-graph, +/// the dep-graph we serialize contains only edges `S -> T` where the +/// source `S` is something hashable (a HIR node or foreign metadata) +/// and the target `T` is something significant, like a work-product. +/// Normally, significant nodes are only those that have saved data on +/// disk, but in unit-testing the set of significant nodes can be +/// increased. +pub type SerializedEdge = (DepNode, DepNode); + +#[derive(Debug, RustcEncodable, RustcDecodable)] +pub struct SerializedHash { + /// def-id of thing being hashed + pub dep_node: DepNode, + + /// the hash as of previous compilation, computed by code in + /// `hash` module + pub hash: Fingerprint, +} + +#[derive(Debug, RustcEncodable, RustcDecodable)] +pub struct SerializedWorkProduct { + /// node that produced the work-product + pub id: Arc, + + /// work-product data itself + pub work_product: WorkProduct, +} + +/// Data for use when downstream crates get recompiled. +#[derive(Debug, RustcEncodable, RustcDecodable)] +pub struct SerializedMetadataHashes { + /// For each def-id defined in this crate that appears in the + /// metadata, we hash all the inputs that were used when producing + /// the metadata. We save this after compilation is done. Then, + /// when some downstream crate is being recompiled, it can compare + /// the hashes we saved against the hashes that it saw from + /// before; this will tell it which of the items in this crate + /// changed, which in turn implies what items in the downstream + /// crate need to be recompiled. + /// + /// Note that we store the def-ids here. This is because we don't + /// reload this file when we recompile this crate, we will just + /// regenerate it completely with the current hashes and new def-ids. + /// + /// Then downstream creates will load up their + /// `SerializedDepGraph`, which may contain `MetaData(X)` nodes + /// where `X` refers to some item in this crate. That `X` will be + /// a `DefPathIndex` that gets retracted to the current `DefId` + /// (matching the one found in this structure). + pub hashes: Vec, + + /// For each DefIndex (as it occurs in SerializedMetadataHash), this + /// map stores the DefPathIndex (as it occurs in DefIdDirectory), so + /// that we can find the new DefId for a SerializedMetadataHash in a + /// subsequent compilation session. + /// + /// This map is only needed for running auto-tests using the + /// #[rustc_metadata_dirty] and #[rustc_metadata_clean] attributes, and + /// is only populated if -Z query-dep-graph is specified. It will be + /// empty otherwise. Importing crates are perfectly happy with just having + /// the DefIndex. + pub index_map: FxHashMap +} + +/// The hash for some metadata that (when saving) will be exported +/// from this crate, or which (when importing) was exported by an +/// upstream crate. +#[derive(Debug, RustcEncodable, RustcDecodable)] +pub struct SerializedMetadataHash { + pub def_index: DefIndex, + + /// the hash itself, computed by `calculate_item_hash` + pub hash: Fingerprint, +} diff --git a/src/librustc_incremental/persist/directory.rs b/src/librustc_incremental/persist/directory.rs new file mode 100644 index 0000000000000..546feb212243a --- /dev/null +++ b/src/librustc_incremental/persist/directory.rs @@ -0,0 +1,208 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Code to convert a DefId into a DefPath (when serializing) and then +//! back again (when deserializing). Note that the new DefId +//! necessarily will not be the same as the old (and of course the +//! item might even be removed in the meantime). + +use rustc::dep_graph::DepNode; +use rustc::hir::map::DefPath; +use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; +use rustc::ty::TyCtxt; +use rustc::util::nodemap::DefIdMap; +use std::fmt::{self, Debug}; +use std::iter::once; +use std::collections::HashMap; + +/// Index into the DefIdDirectory +#[derive(Copy, Clone, Debug, PartialOrd, Ord, Hash, PartialEq, Eq, + RustcEncodable, RustcDecodable)] +pub struct DefPathIndex { + index: u32 +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct DefIdDirectory { + // N.B. don't use Removable here because these def-ids are loaded + // directly without remapping, so loading them should not fail. + paths: Vec, + + // For each crate, saves the crate-name/disambiguator so that + // later we can match crate-numbers up again. + krates: Vec, +} + +#[derive(Debug, RustcEncodable, RustcDecodable)] +pub struct CrateInfo { + krate: CrateNum, + name: String, + disambiguator: String, +} + +impl DefIdDirectory { + pub fn new(krates: Vec) -> DefIdDirectory { + DefIdDirectory { paths: vec![], krates: krates } + } + + fn max_current_crate(&self, tcx: TyCtxt) -> CrateNum { + tcx.sess.cstore.crates() + .into_iter() + .max() + .unwrap_or(LOCAL_CRATE) + } + + /// Returns a string form for `index`; useful for debugging + pub fn def_path_string(&self, tcx: TyCtxt, index: DefPathIndex) -> String { + let path = &self.paths[index.index as usize]; + if self.krate_still_valid(tcx, self.max_current_crate(tcx), path.krate) { + path.to_string(tcx) + } else { + format!("", path.krate) + } + } + + pub fn krate_still_valid(&self, + tcx: TyCtxt, + max_current_crate: CrateNum, + krate: CrateNum) -> bool { + // Check that the crate-number still matches. For now, if it + // doesn't, just return None. We could do better, such as + // finding the new number. + + if krate > max_current_crate { + false + } else { + let old_info = &self.krates[krate.as_usize()]; + assert_eq!(old_info.krate, krate); + let old_name: &str = &old_info.name; + let old_disambiguator: &str = &old_info.disambiguator; + let new_name: &str = &tcx.crate_name(krate).as_str(); + let new_disambiguator: &str = &tcx.crate_disambiguator(krate).as_str(); + old_name == new_name && old_disambiguator == new_disambiguator + } + } + + pub fn retrace(&self, tcx: TyCtxt) -> RetracedDefIdDirectory { + + fn make_key(name: &str, disambiguator: &str) -> String { + format!("{}/{}", name, disambiguator) + } + + let new_krates: HashMap<_, _> = + once(LOCAL_CRATE) + .chain(tcx.sess.cstore.crates()) + .map(|krate| (make_key(&tcx.crate_name(krate).as_str(), + &tcx.crate_disambiguator(krate).as_str()), krate)) + .collect(); + + let ids = self.paths.iter() + .map(|path| { + let old_krate_id = path.krate.as_usize(); + assert!(old_krate_id < self.krates.len()); + let old_crate_info = &self.krates[old_krate_id]; + let old_crate_key = make_key(&old_crate_info.name, + &old_crate_info.disambiguator); + if let Some(&new_crate_key) = new_krates.get(&old_crate_key) { + tcx.retrace_path(new_crate_key, &path.data) + } else { + debug!("crate {:?} no longer exists", old_crate_key); + None + } + }) + .collect(); + RetracedDefIdDirectory { ids: ids } + } +} + +#[derive(Debug, RustcEncodable, RustcDecodable)] +pub struct RetracedDefIdDirectory { + ids: Vec> +} + +impl RetracedDefIdDirectory { + pub fn def_id(&self, index: DefPathIndex) -> Option { + self.ids[index.index as usize] + } + + pub fn map(&self, node: &DepNode) -> Option> { + node.map_def(|&index| self.def_id(index)) + } +} + +pub struct DefIdDirectoryBuilder<'a,'tcx:'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + hash: DefIdMap, + directory: DefIdDirectory, +} + +impl<'a,'tcx> DefIdDirectoryBuilder<'a,'tcx> { + pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> DefIdDirectoryBuilder<'a, 'tcx> { + let mut krates: Vec<_> = + once(LOCAL_CRATE) + .chain(tcx.sess.cstore.crates()) + .map(|krate| { + CrateInfo { + krate: krate, + name: tcx.crate_name(krate).to_string(), + disambiguator: tcx.crate_disambiguator(krate).to_string() + } + }) + .collect(); + + // the result of crates() is not in order, so sort list of + // crates so that we can just index it later + krates.sort_by_key(|k| k.krate); + + DefIdDirectoryBuilder { + tcx: tcx, + hash: DefIdMap(), + directory: DefIdDirectory::new(krates), + } + } + + pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { + self.tcx + } + + pub fn add(&mut self, def_id: DefId) -> DefPathIndex { + debug!("DefIdDirectoryBuilder: def_id={:?}", def_id); + let tcx = self.tcx; + let paths = &mut self.directory.paths; + self.hash.entry(def_id) + .or_insert_with(|| { + let def_path = tcx.def_path(def_id); + let index = paths.len() as u32; + paths.push(def_path); + DefPathIndex { index: index } + }) + .clone() + } + + pub fn lookup_def_path(&self, id: DefPathIndex) -> &DefPath { + &self.directory.paths[id.index as usize] + } + + pub fn map(&mut self, node: &DepNode) -> DepNode { + node.map_def(|&def_id| Some(self.add(def_id))).unwrap() + } + + pub fn directory(&self) -> &DefIdDirectory { + &self.directory + } +} + +impl Debug for DefIdDirectory { + fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fmt.debug_list() + .entries(self.paths.iter().enumerate()) + .finish() + } +} diff --git a/src/librustc_incremental/persist/dirty_clean.rs b/src/librustc_incremental/persist/dirty_clean.rs new file mode 100644 index 0000000000000..40873011a7b8d --- /dev/null +++ b/src/librustc_incremental/persist/dirty_clean.rs @@ -0,0 +1,300 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Debugging code to test the state of the dependency graph just +//! after it is loaded from disk and just after it has been saved. +//! For each node marked with `#[rustc_clean]` or `#[rustc_dirty]`, +//! we will check that a suitable node for that item either appears +//! or does not appear in the dep-graph, as appropriate: +//! +//! - `#[rustc_dirty(label="TypeckItemBody", cfg="rev2")]` if we are +//! in `#[cfg(rev2)]`, then there MUST NOT be a node +//! `DepNode::TypeckItemBody(X)` where `X` is the def-id of the +//! current node. +//! - `#[rustc_clean(label="TypeckItemBody", cfg="rev2")]` same as above, +//! except that the node MUST exist. +//! +//! Errors are reported if we are in the suitable configuration but +//! the required condition is not met. +//! +//! The `#[rustc_metadata_dirty]` and `#[rustc_metadata_clean]` attributes +//! can be used to check the incremental compilation hash (ICH) values of +//! metadata exported in rlibs. +//! +//! - If a node is marked with `#[rustc_metadata_clean(cfg="rev2")]` we +//! check that the metadata hash for that node is the same for "rev2" +//! it was for "rev1". +//! - If a node is marked with `#[rustc_metadata_dirty(cfg="rev2")]` we +//! check that the metadata hash for that node is *different* for "rev2" +//! than it was for "rev1". +//! +//! Note that the metadata-testing attributes must never specify the +//! first revision. This would lead to a crash since there is no +//! previous revision to compare things to. +//! + +use super::directory::RetracedDefIdDirectory; +use super::load::DirtyNodes; +use rustc::dep_graph::{DepGraphQuery, DepNode}; +use rustc::hir; +use rustc::hir::def_id::DefId; +use rustc::hir::itemlikevisit::ItemLikeVisitor; +use syntax::ast::{self, Attribute, NestedMetaItem}; +use rustc_data_structures::fx::{FxHashSet, FxHashMap}; +use syntax_pos::Span; +use rustc::ty::TyCtxt; +use ich::Fingerprint; + +use {ATTR_DIRTY, ATTR_CLEAN, ATTR_DIRTY_METADATA, ATTR_CLEAN_METADATA}; + +const LABEL: &'static str = "label"; +const CFG: &'static str = "cfg"; + +pub fn check_dirty_clean_annotations<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + dirty_inputs: &DirtyNodes, + retraced: &RetracedDefIdDirectory) { + // can't add `#[rustc_dirty]` etc without opting in to this feature + if !tcx.sess.features.borrow().rustc_attrs { + return; + } + + let _ignore = tcx.dep_graph.in_ignore(); + let dirty_inputs: FxHashSet> = + dirty_inputs.iter() + .filter_map(|d| retraced.map(d)) + .collect(); + let query = tcx.dep_graph.query(); + debug!("query-nodes: {:?}", query.nodes()); + let krate = tcx.map.krate(); + krate.visit_all_item_likes(&mut DirtyCleanVisitor { + tcx: tcx, + query: &query, + dirty_inputs: dirty_inputs, + }); +} + +pub struct DirtyCleanVisitor<'a, 'tcx:'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + query: &'a DepGraphQuery, + dirty_inputs: FxHashSet>, +} + +impl<'a, 'tcx> DirtyCleanVisitor<'a, 'tcx> { + fn dep_node(&self, attr: &Attribute, def_id: DefId) -> DepNode { + for item in attr.meta_item_list().unwrap_or(&[]) { + if item.check_name(LABEL) { + let value = expect_associated_value(self.tcx, item); + match DepNode::from_label_string(&value.as_str(), def_id) { + Ok(def_id) => return def_id, + Err(()) => { + self.tcx.sess.span_fatal( + item.span, + &format!("dep-node label `{}` not recognized", value)); + } + } + } + } + + self.tcx.sess.span_fatal(attr.span, "no `label` found"); + } + + fn dep_node_str(&self, dep_node: &DepNode) -> DepNode { + dep_node.map_def(|&def_id| Some(self.tcx.item_path_str(def_id))).unwrap() + } + + fn assert_dirty(&self, item: &hir::Item, dep_node: DepNode) { + debug!("assert_dirty({:?})", dep_node); + + match dep_node { + DepNode::Krate | + DepNode::Hir(_) | + DepNode::HirBody(_) => { + // HIR nodes are inputs, so if we are asserting that the HIR node is + // dirty, we check the dirty input set. + if !self.dirty_inputs.contains(&dep_node) { + let dep_node_str = self.dep_node_str(&dep_node); + self.tcx.sess.span_err( + item.span, + &format!("`{:?}` not found in dirty set, but should be dirty", + dep_node_str)); + } + } + _ => { + // Other kinds of nodes would be targets, so check if + // the dep-graph contains the node. + if self.query.contains_node(&dep_node) { + let dep_node_str = self.dep_node_str(&dep_node); + self.tcx.sess.span_err( + item.span, + &format!("`{:?}` found in dep graph, but should be dirty", dep_node_str)); + } + } + } + } + + fn assert_clean(&self, item: &hir::Item, dep_node: DepNode) { + debug!("assert_clean({:?})", dep_node); + + match dep_node { + DepNode::Krate | + DepNode::Hir(_) | + DepNode::HirBody(_) => { + // For HIR nodes, check the inputs. + if self.dirty_inputs.contains(&dep_node) { + let dep_node_str = self.dep_node_str(&dep_node); + self.tcx.sess.span_err( + item.span, + &format!("`{:?}` found in dirty-node set, but should be clean", + dep_node_str)); + } + } + _ => { + // Otherwise, check if the dep-node exists. + if !self.query.contains_node(&dep_node) { + let dep_node_str = self.dep_node_str(&dep_node); + self.tcx.sess.span_err( + item.span, + &format!("`{:?}` not found in dep graph, but should be clean", + dep_node_str)); + } + } + } + } +} + +impl<'a, 'tcx> ItemLikeVisitor<'tcx> for DirtyCleanVisitor<'a, 'tcx> { + fn visit_item(&mut self, item: &'tcx hir::Item) { + let def_id = self.tcx.map.local_def_id(item.id); + for attr in self.tcx.get_attrs(def_id).iter() { + if attr.check_name(ATTR_DIRTY) { + if check_config(self.tcx, attr) { + self.assert_dirty(item, self.dep_node(attr, def_id)); + } + } else if attr.check_name(ATTR_CLEAN) { + if check_config(self.tcx, attr) { + self.assert_clean(item, self.dep_node(attr, def_id)); + } + } + } + } + + fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) { + } +} + +pub fn check_dirty_clean_metadata<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + prev_metadata_hashes: &FxHashMap, + current_metadata_hashes: &FxHashMap) { + if !tcx.sess.opts.debugging_opts.query_dep_graph { + return; + } + + tcx.dep_graph.with_ignore(||{ + let krate = tcx.map.krate(); + krate.visit_all_item_likes(&mut DirtyCleanMetadataVisitor { + tcx: tcx, + prev_metadata_hashes: prev_metadata_hashes, + current_metadata_hashes: current_metadata_hashes, + }); + }); +} + +pub struct DirtyCleanMetadataVisitor<'a, 'tcx:'a, 'm> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + prev_metadata_hashes: &'m FxHashMap, + current_metadata_hashes: &'m FxHashMap, +} + +impl<'a, 'tcx, 'm> ItemLikeVisitor<'tcx> for DirtyCleanMetadataVisitor<'a, 'tcx, 'm> { + fn visit_item(&mut self, item: &'tcx hir::Item) { + let def_id = self.tcx.map.local_def_id(item.id); + + for attr in self.tcx.get_attrs(def_id).iter() { + if attr.check_name(ATTR_DIRTY_METADATA) { + if check_config(self.tcx, attr) { + self.assert_state(false, def_id, item.span); + } + } else if attr.check_name(ATTR_CLEAN_METADATA) { + if check_config(self.tcx, attr) { + self.assert_state(true, def_id, item.span); + } + } + } + } + + fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) { + } +} + +impl<'a, 'tcx, 'm> DirtyCleanMetadataVisitor<'a, 'tcx, 'm> { + + fn assert_state(&self, should_be_clean: bool, def_id: DefId, span: Span) { + let item_path = self.tcx.item_path_str(def_id); + debug!("assert_state({})", item_path); + + if let Some(&prev_hash) = self.prev_metadata_hashes.get(&def_id) { + let hashes_are_equal = prev_hash == self.current_metadata_hashes[&def_id]; + + if should_be_clean && !hashes_are_equal { + self.tcx.sess.span_err( + span, + &format!("Metadata hash of `{}` is dirty, but should be clean", + item_path)); + } + + let should_be_dirty = !should_be_clean; + if should_be_dirty && hashes_are_equal { + self.tcx.sess.span_err( + span, + &format!("Metadata hash of `{}` is clean, but should be dirty", + item_path)); + } + } else { + self.tcx.sess.span_err( + span, + &format!("Could not find previous metadata hash of `{}`", + item_path)); + } + } +} + +/// Given a `#[rustc_dirty]` or `#[rustc_clean]` attribute, scan +/// for a `cfg="foo"` attribute and check whether we have a cfg +/// flag called `foo`. +fn check_config(tcx: TyCtxt, attr: &ast::Attribute) -> bool { + debug!("check_config(attr={:?})", attr); + let config = &tcx.sess.parse_sess.config; + debug!("check_config: config={:?}", config); + for item in attr.meta_item_list().unwrap_or(&[]) { + if item.check_name(CFG) { + let value = expect_associated_value(tcx, item); + debug!("check_config: searching for cfg {:?}", value); + return config.contains(&(value, None)); + } + } + + tcx.sess.span_fatal( + attr.span, + &format!("no cfg attribute")); +} + +fn expect_associated_value(tcx: TyCtxt, item: &NestedMetaItem) -> ast::Name { + if let Some(value) = item.value_str() { + value + } else { + let msg = if let Some(name) = item.name() { + format!("associated value expected for `{}`", name) + } else { + "expected an associated value".to_string() + }; + + tcx.sess.span_fatal(item.span, &msg); + } +} diff --git a/src/librustc_incremental/persist/file_format.rs b/src/librustc_incremental/persist/file_format.rs new file mode 100644 index 0000000000000..b67caa6750a81 --- /dev/null +++ b/src/librustc_incremental/persist/file_format.rs @@ -0,0 +1,136 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This module defines a generic file format that allows to check if a given +//! file generated by incremental compilation was generated by a compatible +//! compiler version. This file format is used for the on-disk version of the +//! dependency graph and the exported metadata hashes. +//! +//! In practice "compatible compiler version" means "exactly the same compiler +//! version", since the header encodes the git commit hash of the compiler. +//! Since we can always just ignore the incremental compilation cache and +//! compiler versions don't change frequently for the typical user, being +//! conservative here practically has no downside. + +use std::io::{self, Read}; +use std::path::Path; +use std::fs::File; +use std::env; + +use rustc::session::Session; +use rustc::session::config::nightly_options; + +/// The first few bytes of files generated by incremental compilation +const FILE_MAGIC: &'static [u8] = b"RSIC"; + +/// Change this if the header format changes +const HEADER_FORMAT_VERSION: u16 = 0; + +/// A version string that hopefully is always different for compiler versions +/// with different encodings of incremental compilation artifacts. Contains +/// the git commit hash. +const RUSTC_VERSION: Option<&'static str> = option_env!("CFG_VERSION"); + +pub fn write_file_header(stream: &mut W) -> io::Result<()> { + stream.write_all(FILE_MAGIC)?; + stream.write_all(&[(HEADER_FORMAT_VERSION >> 0) as u8, + (HEADER_FORMAT_VERSION >> 8) as u8])?; + + let rustc_version = rustc_version(); + assert_eq!(rustc_version.len(), (rustc_version.len() as u8) as usize); + stream.write_all(&[rustc_version.len() as u8])?; + stream.write_all(rustc_version.as_bytes())?; + + Ok(()) +} + +/// Reads the contents of a file with a file header as defined in this module. +/// +/// - Returns `Ok(Some(data))` if the file existed and was generated by a +/// compatible compiler version. `data` is the entire contents of the file +/// *after* the header. +/// - Returns `Ok(None)` if the file did not exist or was generated by an +/// incompatible version of the compiler. +/// - Returns `Err(..)` if some kind of IO error occurred while reading the +/// file. +pub fn read_file(sess: &Session, path: &Path) -> io::Result>> { + if !path.exists() { + return Ok(None); + } + + let mut file = File::open(path)?; + + // Check FILE_MAGIC + { + debug_assert!(FILE_MAGIC.len() == 4); + let mut file_magic = [0u8; 4]; + file.read_exact(&mut file_magic)?; + if file_magic != FILE_MAGIC { + report_format_mismatch(sess, path, "Wrong FILE_MAGIC"); + return Ok(None) + } + } + + // Check HEADER_FORMAT_VERSION + { + debug_assert!(::std::mem::size_of_val(&HEADER_FORMAT_VERSION) == 2); + let mut header_format_version = [0u8; 2]; + file.read_exact(&mut header_format_version)?; + let header_format_version = (header_format_version[0] as u16) | + ((header_format_version[1] as u16) << 8); + + if header_format_version != HEADER_FORMAT_VERSION { + report_format_mismatch(sess, path, "Wrong HEADER_FORMAT_VERSION"); + return Ok(None) + } + } + + // Check RUSTC_VERSION + { + let mut rustc_version_str_len = [0u8; 1]; + file.read_exact(&mut rustc_version_str_len)?; + let rustc_version_str_len = rustc_version_str_len[0] as usize; + let mut buffer = Vec::with_capacity(rustc_version_str_len); + buffer.resize(rustc_version_str_len, 0); + file.read_exact(&mut buffer[..])?; + + if &buffer[..] != rustc_version().as_bytes() { + report_format_mismatch(sess, path, "Different compiler version"); + return Ok(None); + } + } + + let mut data = vec![]; + file.read_to_end(&mut data)?; + + Ok(Some(data)) +} + +fn report_format_mismatch(sess: &Session, file: &Path, message: &str) { + debug!("read_file: {}", message); + + if sess.opts.debugging_opts.incremental_info { + println!("incremental: ignoring cache artifact `{}`: {}", + file.file_name().unwrap().to_string_lossy(), + message); + } +} + +fn rustc_version() -> String { + if nightly_options::is_nightly_build() { + if let Some(val) = env::var_os("RUSTC_FORCE_INCR_COMP_ARTIFACT_HEADER") { + return val.to_string_lossy().into_owned() + } + } + + RUSTC_VERSION.expect("Cannot use rustc without explicit version for \ + incremental compilation") + .to_string() +} diff --git a/src/librustc_incremental/persist/fs.rs b/src/librustc_incremental/persist/fs.rs new file mode 100644 index 0000000000000..2ad37e98c708a --- /dev/null +++ b/src/librustc_incremental/persist/fs.rs @@ -0,0 +1,1057 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +//! This module manages how the incremental compilation cache is represented in +//! the file system. +//! +//! Incremental compilation caches are managed according to a copy-on-write +//! strategy: Once a complete, consistent cache version is finalized, it is +//! never modified. Instead, when a subsequent compilation session is started, +//! the compiler will allocate a new version of the cache that starts out as +//! a copy of the previous version. Then only this new copy is modified and it +//! will not be visible to other processes until it is finalized. This ensures +//! that multiple compiler processes can be executed concurrently for the same +//! crate without interfering with each other or blocking each other. +//! +//! More concretely this is implemented via the following protocol: +//! +//! 1. For a newly started compilation session, the compiler allocates a +//! new `session` directory within the incremental compilation directory. +//! This session directory will have a unique name that ends with the suffix +//! "-working" and that contains a creation timestamp. +//! 2. Next, the compiler looks for the newest finalized session directory, +//! that is, a session directory from a previous compilation session that +//! has been marked as valid and consistent. A session directory is +//! considered finalized if the "-working" suffix in the directory name has +//! been replaced by the SVH of the crate. +//! 3. Once the compiler has found a valid, finalized session directory, it will +//! hard-link/copy its contents into the new "-working" directory. If all +//! goes well, it will have its own, private copy of the source directory and +//! subsequently not have to worry about synchronizing with other compiler +//! processes. +//! 4. Now the compiler can do its normal compilation process, which involves +//! reading and updating its private session directory. +//! 5. When compilation finishes without errors, the private session directory +//! will be in a state where it can be used as input for other compilation +//! sessions. That is, it will contain a dependency graph and cache artifacts +//! that are consistent with the state of the source code it was compiled +//! from, with no need to change them ever again. At this point, the compiler +//! finalizes and "publishes" its private session directory by renaming it +//! from "s-{timestamp}-{random}-working" to "s-{timestamp}-{SVH}". +//! 6. At this point the "old" session directory that we copied our data from +//! at the beginning of the session has become obsolete because we have just +//! published a more current version. Thus the compiler will delete it. +//! +//! ## Garbage Collection +//! +//! Naively following the above protocol might lead to old session directories +//! piling up if a compiler instance crashes for some reason before its able to +//! remove its private session directory. In order to avoid wasting disk space, +//! the compiler also does some garbage collection each time it is started in +//! incremental compilation mode. Specifically, it will scan the incremental +//! compilation directory for private session directories that are not in use +//! any more and will delete those. It will also delete any finalized session +//! directories for a given crate except for the most recent one. +//! +//! ## Synchronization +//! +//! There is some synchronization needed in order for the compiler to be able to +//! determine whether a given private session directory is not in used any more. +//! This is done by creating a lock file for each session directory and +//! locking it while the directory is still being used. Since file locks have +//! operating system support, we can rely on the lock being released if the +//! compiler process dies for some unexpected reason. Thus, when garbage +//! collecting private session directories, the collecting process can determine +//! whether the directory is still in use by trying to acquire a lock on the +//! file. If locking the file fails, the original process must still be alive. +//! If locking the file succeeds, we know that the owning process is not alive +//! any more and we can safely delete the directory. +//! There is still a small time window between the original process creating the +//! lock file and actually locking it. In order to minimize the chance that +//! another process tries to acquire the lock in just that instance, only +//! session directories that are older than a few seconds are considered for +//! garbage collection. +//! +//! Another case that has to be considered is what happens if one process +//! deletes a finalized session directory that another process is currently +//! trying to copy from. This case is also handled via the lock file. Before +//! a process starts copying a finalized session directory, it will acquire a +//! shared lock on the directory's lock file. Any garbage collecting process, +//! on the other hand, will acquire an exclusive lock on the lock file. +//! Thus, if a directory is being collected, any reader process will fail +//! acquiring the shared lock and will leave the directory alone. Conversely, +//! if a collecting process can't acquire the exclusive lock because the +//! directory is currently being read from, it will leave collecting that +//! directory to another process at a later point in time. +//! The exact same scheme is also used when reading the metadata hashes file +//! from an extern crate. When a crate is compiled, the hash values of its +//! metadata are stored in a file in its session directory. When the +//! compilation session of another crate imports the first crate's metadata, +//! it also has to read in the accompanying metadata hashes. It thus will access +//! the finalized session directory of all crates it links to and while doing +//! so, it will also place a read lock on that the respective session directory +//! so that it won't be deleted while the metadata hashes are loaded. +//! +//! ## Preconditions +//! +//! This system relies on two features being available in the file system in +//! order to work really well: file locking and hard linking. +//! If hard linking is not available (like on FAT) the data in the cache +//! actually has to be copied at the beginning of each session. +//! If file locking does not work reliably (like on NFS), some of the +//! synchronization will go haywire. +//! In both cases we recommend to locate the incremental compilation directory +//! on a file system that supports these things. +//! It might be a good idea though to try and detect whether we are on an +//! unsupported file system and emit a warning in that case. This is not yet +//! implemented. + +use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; +use rustc::hir::svh::Svh; +use rustc::session::Session; +use rustc::ty::TyCtxt; +use rustc::util::fs as fs_util; +use rustc_data_structures::{flock, base_n}; +use rustc_data_structures::fx::{FxHashSet, FxHashMap}; + +use std::ffi::OsString; +use std::fs as std_fs; +use std::io; +use std::mem; +use std::path::{Path, PathBuf}; +use std::time::{UNIX_EPOCH, SystemTime, Duration}; +use std::__rand::{thread_rng, Rng}; + +const LOCK_FILE_EXT: &'static str = ".lock"; +const DEP_GRAPH_FILENAME: &'static str = "dep-graph.bin"; +const WORK_PRODUCTS_FILENAME: &'static str = "work-products.bin"; +const METADATA_HASHES_FILENAME: &'static str = "metadata.bin"; + +// We encode integers using the following base, so they are shorter than decimal +// or hexadecimal numbers (we want short file and directory names). Since these +// numbers will be used in file names, we choose an encoding that is not +// case-sensitive (as opposed to base64, for example). +const INT_ENCODE_BASE: u64 = 36; + +pub fn dep_graph_path(sess: &Session) -> PathBuf { + in_incr_comp_dir_sess(sess, DEP_GRAPH_FILENAME) +} + +pub fn work_products_path(sess: &Session) -> PathBuf { + in_incr_comp_dir_sess(sess, WORK_PRODUCTS_FILENAME) +} + +pub fn metadata_hash_export_path(sess: &Session) -> PathBuf { + in_incr_comp_dir_sess(sess, METADATA_HASHES_FILENAME) +} + +pub fn metadata_hash_import_path(import_session_dir: &Path) -> PathBuf { + import_session_dir.join(METADATA_HASHES_FILENAME) +} + +pub fn lock_file_path(session_dir: &Path) -> PathBuf { + let crate_dir = session_dir.parent().unwrap(); + + let directory_name = session_dir.file_name().unwrap().to_string_lossy(); + assert_no_characters_lost(&directory_name); + + let dash_indices: Vec<_> = directory_name.match_indices("-") + .map(|(idx, _)| idx) + .collect(); + if dash_indices.len() != 3 { + bug!("Encountered incremental compilation session directory with \ + malformed name: {}", + session_dir.display()) + } + + crate_dir.join(&directory_name[0 .. dash_indices[2]]) + .with_extension(&LOCK_FILE_EXT[1..]) +} + +pub fn in_incr_comp_dir_sess(sess: &Session, file_name: &str) -> PathBuf { + in_incr_comp_dir(&sess.incr_comp_session_dir(), file_name) +} + +pub fn in_incr_comp_dir(incr_comp_session_dir: &Path, file_name: &str) -> PathBuf { + incr_comp_session_dir.join(file_name) +} + +/// Allocates the private session directory. The boolean in the Ok() result +/// indicates whether we should try loading a dep graph from the successfully +/// initialized directory, or not. +/// The post-condition of this fn is that we have a valid incremental +/// compilation session directory, if the result is `Ok`. A valid session +/// directory is one that contains a locked lock file. It may or may not contain +/// a dep-graph and work products from a previous session. +/// If the call fails, the fn may leave behind an invalid session directory. +/// The garbage collection will take care of it. +pub fn prepare_session_directory(tcx: TyCtxt) -> Result { + debug!("prepare_session_directory"); + + // {incr-comp-dir}/{crate-name-and-disambiguator} + let crate_dir = crate_path_tcx(tcx, LOCAL_CRATE); + debug!("crate-dir: {}", crate_dir.display()); + try!(create_dir(tcx.sess, &crate_dir, "crate")); + + // Hack: canonicalize the path *after creating the directory* + // because, on windows, long paths can cause problems; + // canonicalization inserts this weird prefix that makes windows + // tolerate long paths. + let crate_dir = match crate_dir.canonicalize() { + Ok(v) => v, + Err(err) => { + tcx.sess.err(&format!("incremental compilation: error canonicalizing path `{}`: {}", + crate_dir.display(), err)); + return Err(()); + } + }; + + let mut source_directories_already_tried = FxHashSet(); + + loop { + // Generate a session directory of the form: + // + // {incr-comp-dir}/{crate-name-and-disambiguator}/s-{timestamp}-{random}-working + let session_dir = generate_session_dir_path(&crate_dir); + debug!("session-dir: {}", session_dir.display()); + + // Lock the new session directory. If this fails, return an + // error without retrying + let (directory_lock, lock_file_path) = try!(lock_directory(tcx.sess, &session_dir)); + + // Now that we have the lock, we can actually create the session + // directory + try!(create_dir(tcx.sess, &session_dir, "session")); + + // Find a suitable source directory to copy from. Ignore those that we + // have already tried before. + let source_directory = find_source_directory(&crate_dir, + &source_directories_already_tried); + + let source_directory = if let Some(dir) = source_directory { + dir + } else { + // There's nowhere to copy from, we're done + debug!("no source directory found. Continuing with empty session \ + directory."); + + tcx.sess.init_incr_comp_session(session_dir, directory_lock); + return Ok(false) + }; + + debug!("attempting to copy data from source: {}", + source_directory.display()); + + let print_file_copy_stats = tcx.sess.opts.debugging_opts.incremental_info; + + // Try copying over all files from the source directory + if let Ok(allows_links) = copy_files(&session_dir, &source_directory, + print_file_copy_stats) { + debug!("successfully copied data from: {}", + source_directory.display()); + + if !allows_links { + tcx.sess.warn(&format!("Hard linking files in the incremental \ + compilation cache failed. Copying files \ + instead. Consider moving the cache \ + directory to a file system which supports \ + hard linking in session dir `{}`", + session_dir.display()) + ); + } + + tcx.sess.init_incr_comp_session(session_dir, directory_lock); + return Ok(true) + } else { + debug!("copying failed - trying next directory"); + + // Something went wrong while trying to copy/link files from the + // source directory. Try again with a different one. + source_directories_already_tried.insert(source_directory); + + // Try to remove the session directory we just allocated. We don't + // know if there's any garbage in it from the failed copy action. + if let Err(err) = safe_remove_dir_all(&session_dir) { + tcx.sess.warn(&format!("Failed to delete partly initialized \ + session dir `{}`: {}", + session_dir.display(), + err)); + } + + delete_session_dir_lock_file(tcx.sess, &lock_file_path); + mem::drop(directory_lock); + } + } +} + + +/// This function finalizes and thus 'publishes' the session directory by +/// renaming it to `s-{timestamp}-{svh}` and releasing the file lock. +/// If there have been compilation errors, however, this function will just +/// delete the presumably invalid session directory. +pub fn finalize_session_directory(sess: &Session, svh: Svh) { + if sess.opts.incremental.is_none() { + return; + } + + let incr_comp_session_dir: PathBuf = sess.incr_comp_session_dir().clone(); + + if sess.has_errors() { + // If there have been any errors during compilation, we don't want to + // publish this session directory. Rather, we'll just delete it. + + debug!("finalize_session_directory() - invalidating session directory: {}", + incr_comp_session_dir.display()); + + if let Err(err) = safe_remove_dir_all(&*incr_comp_session_dir) { + sess.warn(&format!("Error deleting incremental compilation \ + session directory `{}`: {}", + incr_comp_session_dir.display(), + err)); + } + + let lock_file_path = lock_file_path(&*incr_comp_session_dir); + delete_session_dir_lock_file(sess, &lock_file_path); + sess.mark_incr_comp_session_as_invalid(); + } + + debug!("finalize_session_directory() - session directory: {}", + incr_comp_session_dir.display()); + + let old_sub_dir_name = incr_comp_session_dir.file_name() + .unwrap() + .to_string_lossy(); + assert_no_characters_lost(&old_sub_dir_name); + + // Keep the 's-{timestamp}-{random-number}' prefix, but replace the + // '-working' part with the SVH of the crate + let dash_indices: Vec<_> = old_sub_dir_name.match_indices("-") + .map(|(idx, _)| idx) + .collect(); + if dash_indices.len() != 3 { + bug!("Encountered incremental compilation session directory with \ + malformed name: {}", + incr_comp_session_dir.display()) + } + + // State: "s-{timestamp}-{random-number}-" + let mut new_sub_dir_name = String::from(&old_sub_dir_name[.. dash_indices[2] + 1]); + + // Append the svh + base_n::push_str(svh.as_u64(), INT_ENCODE_BASE, &mut new_sub_dir_name); + + // Create the full path + let new_path = incr_comp_session_dir.parent().unwrap().join(new_sub_dir_name); + debug!("finalize_session_directory() - new path: {}", new_path.display()); + + match std_fs::rename(&*incr_comp_session_dir, &new_path) { + Ok(_) => { + debug!("finalize_session_directory() - directory renamed successfully"); + + // This unlocks the directory + sess.finalize_incr_comp_session(new_path); + } + Err(e) => { + // Warn about the error. However, no need to abort compilation now. + sess.warn(&format!("Error finalizing incremental compilation \ + session directory `{}`: {}", + incr_comp_session_dir.display(), + e)); + + debug!("finalize_session_directory() - error, marking as invalid"); + // Drop the file lock, so we can garage collect + sess.mark_incr_comp_session_as_invalid(); + } + } + + let _ = garbage_collect_session_directories(sess); +} + +pub fn delete_all_session_dir_contents(sess: &Session) -> io::Result<()> { + let sess_dir_iterator = sess.incr_comp_session_dir().read_dir()?; + for entry in sess_dir_iterator { + let entry = entry?; + safe_remove_file(&entry.path())? + } + Ok(()) +} + +fn copy_files(target_dir: &Path, + source_dir: &Path, + print_stats_on_success: bool) + -> Result { + // We acquire a shared lock on the lock file of the directory, so that + // nobody deletes it out from under us while we are reading from it. + let lock_file_path = lock_file_path(source_dir); + let _lock = if let Ok(lock) = flock::Lock::new(&lock_file_path, + false, // don't wait, + false, // don't create + false) { // not exclusive + lock + } else { + // Could not acquire the lock, don't try to copy from here + return Err(()) + }; + + let source_dir_iterator = match source_dir.read_dir() { + Ok(it) => it, + Err(_) => return Err(()) + }; + + let mut files_linked = 0; + let mut files_copied = 0; + + for entry in source_dir_iterator { + match entry { + Ok(entry) => { + let file_name = entry.file_name(); + + let target_file_path = target_dir.join(file_name); + let source_path = entry.path(); + + debug!("copying into session dir: {}", source_path.display()); + match fs_util::link_or_copy(source_path, target_file_path) { + Ok(fs_util::LinkOrCopy::Link) => { + files_linked += 1 + } + Ok(fs_util::LinkOrCopy::Copy) => { + files_copied += 1 + } + Err(_) => return Err(()) + } + } + Err(_) => { + return Err(()) + } + } + } + + if print_stats_on_success { + println!("incremental: session directory: {} files hard-linked", files_linked); + println!("incremental: session directory: {} files copied", files_copied); + } + + Ok(files_linked > 0 || files_copied == 0) +} + +/// Generate unique directory path of the form: +/// {crate_dir}/s-{timestamp}-{random-number}-working +fn generate_session_dir_path(crate_dir: &Path) -> PathBuf { + let timestamp = timestamp_to_string(SystemTime::now()); + debug!("generate_session_dir_path: timestamp = {}", timestamp); + let random_number = thread_rng().next_u32(); + debug!("generate_session_dir_path: random_number = {}", random_number); + + let directory_name = format!("s-{}-{}-working", + timestamp, + base_n::encode(random_number as u64, + INT_ENCODE_BASE)); + debug!("generate_session_dir_path: directory_name = {}", directory_name); + let directory_path = crate_dir.join(directory_name); + debug!("generate_session_dir_path: directory_path = {}", directory_path.display()); + directory_path +} + +fn create_dir(sess: &Session, path: &Path, dir_tag: &str) -> Result<(),()> { + match fs_util::create_dir_racy(path) { + Ok(()) => { + debug!("{} directory created successfully", dir_tag); + Ok(()) + } + Err(err) => { + sess.err(&format!("Could not create incremental compilation {} \ + directory `{}`: {}", + dir_tag, + path.display(), + err)); + Err(()) + } + } +} + +/// Allocate a the lock-file and lock it. +fn lock_directory(sess: &Session, + session_dir: &Path) + -> Result<(flock::Lock, PathBuf), ()> { + let lock_file_path = lock_file_path(session_dir); + debug!("lock_directory() - lock_file: {}", lock_file_path.display()); + + match flock::Lock::new(&lock_file_path, + false, // don't wait + true, // create the lock file + true) { // the lock should be exclusive + Ok(lock) => Ok((lock, lock_file_path)), + Err(err) => { + sess.err(&format!("incremental compilation: could not create \ + session directory lock file: {}", err)); + Err(()) + } + } +} + +fn delete_session_dir_lock_file(sess: &Session, + lock_file_path: &Path) { + if let Err(err) = safe_remove_file(&lock_file_path) { + sess.warn(&format!("Error deleting lock file for incremental \ + compilation session directory `{}`: {}", + lock_file_path.display(), + err)); + } +} + +/// Find the most recent published session directory that is not in the +/// ignore-list. +fn find_source_directory(crate_dir: &Path, + source_directories_already_tried: &FxHashSet) + -> Option { + let iter = crate_dir.read_dir() + .unwrap() // FIXME + .filter_map(|e| e.ok().map(|e| e.path())); + + find_source_directory_in_iter(iter, source_directories_already_tried) +} + +fn find_source_directory_in_iter(iter: I, + source_directories_already_tried: &FxHashSet) + -> Option + where I: Iterator +{ + let mut best_candidate = (UNIX_EPOCH, None); + + for session_dir in iter { + debug!("find_source_directory_in_iter - inspecting `{}`", + session_dir.display()); + + let directory_name = session_dir.file_name().unwrap().to_string_lossy(); + assert_no_characters_lost(&directory_name); + + if source_directories_already_tried.contains(&session_dir) || + !is_session_directory(&directory_name) || + !is_finalized(&directory_name) { + debug!("find_source_directory_in_iter - ignoring."); + continue + } + + let timestamp = extract_timestamp_from_session_dir(&directory_name) + .unwrap_or_else(|_| { + bug!("unexpected incr-comp session dir: {}", session_dir.display()) + }); + + if timestamp > best_candidate.0 { + best_candidate = (timestamp, Some(session_dir.clone())); + } + } + + best_candidate.1 +} + +fn is_finalized(directory_name: &str) -> bool { + !directory_name.ends_with("-working") +} + +fn is_session_directory(directory_name: &str) -> bool { + directory_name.starts_with("s-") && + !directory_name.ends_with(LOCK_FILE_EXT) +} + +fn is_session_directory_lock_file(file_name: &str) -> bool { + file_name.starts_with("s-") && file_name.ends_with(LOCK_FILE_EXT) +} + +fn extract_timestamp_from_session_dir(directory_name: &str) + -> Result { + if !is_session_directory(directory_name) { + return Err(()) + } + + let dash_indices: Vec<_> = directory_name.match_indices("-") + .map(|(idx, _)| idx) + .collect(); + if dash_indices.len() != 3 { + return Err(()) + } + + string_to_timestamp(&directory_name[dash_indices[0]+1 .. dash_indices[1]]) +} + +fn timestamp_to_string(timestamp: SystemTime) -> String { + let duration = timestamp.duration_since(UNIX_EPOCH).unwrap(); + let micros = duration.as_secs() * 1_000_000 + + (duration.subsec_nanos() as u64) / 1000; + base_n::encode(micros, INT_ENCODE_BASE) +} + +fn string_to_timestamp(s: &str) -> Result { + let micros_since_unix_epoch = u64::from_str_radix(s, 36); + + if micros_since_unix_epoch.is_err() { + return Err(()) + } + + let micros_since_unix_epoch = micros_since_unix_epoch.unwrap(); + + let duration = Duration::new(micros_since_unix_epoch / 1_000_000, + 1000 * (micros_since_unix_epoch % 1_000_000) as u32); + Ok(UNIX_EPOCH + duration) +} + +fn crate_path_tcx(tcx: TyCtxt, cnum: CrateNum) -> PathBuf { + crate_path(tcx.sess, &tcx.crate_name(cnum).as_str(), &tcx.crate_disambiguator(cnum).as_str()) +} + +/// Finds the session directory containing the correct metadata hashes file for +/// the given crate. In order to do that it has to compute the crate directory +/// of the given crate, and in there, look for the session directory with the +/// correct SVH in it. +/// Note that we have to match on the exact SVH here, not just the +/// crate's (name, disambiguator) pair. The metadata hashes are only valid for +/// the exact version of the binary we are reading from now (i.e. the hashes +/// are part of the dependency graph of a specific compilation session). +pub fn find_metadata_hashes_for(tcx: TyCtxt, cnum: CrateNum) -> Option { + let crate_directory = crate_path_tcx(tcx, cnum); + + if !crate_directory.exists() { + return None + } + + let dir_entries = match crate_directory.read_dir() { + Ok(dir_entries) => dir_entries, + Err(e) => { + tcx.sess + .err(&format!("incremental compilation: Could not read crate directory `{}`: {}", + crate_directory.display(), e)); + return None + } + }; + + let target_svh = tcx.sess.cstore.crate_hash(cnum); + let target_svh = base_n::encode(target_svh.as_u64(), INT_ENCODE_BASE); + + let sub_dir = find_metadata_hashes_iter(&target_svh, dir_entries.filter_map(|e| { + e.ok().map(|e| e.file_name().to_string_lossy().into_owned()) + })); + + sub_dir.map(|sub_dir_name| crate_directory.join(&sub_dir_name)) +} + +fn find_metadata_hashes_iter<'a, I>(target_svh: &str, iter: I) -> Option + where I: Iterator +{ + for sub_dir_name in iter { + if !is_session_directory(&sub_dir_name) || !is_finalized(&sub_dir_name) { + // This is not a usable session directory + continue + } + + let is_match = if let Some(last_dash_pos) = sub_dir_name.rfind("-") { + let candidate_svh = &sub_dir_name[last_dash_pos + 1 .. ]; + target_svh == candidate_svh + } else { + // some kind of invalid directory name + continue + }; + + if is_match { + return Some(OsString::from(sub_dir_name)) + } + } + + None +} + +fn crate_path(sess: &Session, + crate_name: &str, + crate_disambiguator: &str) + -> PathBuf { + use std::hash::{Hasher, Hash}; + use std::collections::hash_map::DefaultHasher; + + let incr_dir = sess.opts.incremental.as_ref().unwrap().clone(); + + // The full crate disambiguator is really long. A hash of it should be + // sufficient. + let mut hasher = DefaultHasher::new(); + crate_disambiguator.hash(&mut hasher); + + let crate_name = format!("{}-{}", + crate_name, + base_n::encode(hasher.finish(), INT_ENCODE_BASE)); + incr_dir.join(crate_name) +} + +fn assert_no_characters_lost(s: &str) { + if s.contains('\u{FFFD}') { + bug!("Could not losslessly convert '{}'.", s) + } +} + +fn is_old_enough_to_be_collected(timestamp: SystemTime) -> bool { + timestamp < SystemTime::now() - Duration::from_secs(10) +} + +pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> { + debug!("garbage_collect_session_directories() - begin"); + + let session_directory = sess.incr_comp_session_dir(); + debug!("garbage_collect_session_directories() - session directory: {}", + session_directory.display()); + + let crate_directory = session_directory.parent().unwrap(); + debug!("garbage_collect_session_directories() - crate directory: {}", + crate_directory.display()); + + // First do a pass over the crate directory, collecting lock files and + // session directories + let mut session_directories = FxHashSet(); + let mut lock_files = FxHashSet(); + + for dir_entry in try!(crate_directory.read_dir()) { + let dir_entry = match dir_entry { + Ok(dir_entry) => dir_entry, + _ => { + // Ignore any errors + continue + } + }; + + let entry_name = dir_entry.file_name(); + let entry_name = entry_name.to_string_lossy(); + + if is_session_directory_lock_file(&entry_name) { + assert_no_characters_lost(&entry_name); + lock_files.insert(entry_name.into_owned()); + } else if is_session_directory(&entry_name) { + assert_no_characters_lost(&entry_name); + session_directories.insert(entry_name.into_owned()); + } else { + // This is something we don't know, leave it alone + } + } + + // Now map from lock files to session directories + let lock_file_to_session_dir: FxHashMap> = + lock_files.into_iter() + .map(|lock_file_name| { + assert!(lock_file_name.ends_with(LOCK_FILE_EXT)); + let dir_prefix_end = lock_file_name.len() - LOCK_FILE_EXT.len(); + let session_dir = { + let dir_prefix = &lock_file_name[0 .. dir_prefix_end]; + session_directories.iter() + .find(|dir_name| dir_name.starts_with(dir_prefix)) + }; + (lock_file_name, session_dir.map(String::clone)) + }) + .collect(); + + // Delete all lock files, that don't have an associated directory. They must + // be some kind of leftover + for (lock_file_name, directory_name) in &lock_file_to_session_dir { + if directory_name.is_none() { + let timestamp = match extract_timestamp_from_session_dir(lock_file_name) { + Ok(timestamp) => timestamp, + Err(()) => { + debug!("Found lock-file with malformed timestamp: {}", + crate_directory.join(&lock_file_name).display()); + // Ignore it + continue + } + }; + + let lock_file_path = crate_directory.join(&**lock_file_name); + + if is_old_enough_to_be_collected(timestamp) { + debug!("garbage_collect_session_directories() - deleting \ + garbage lock file: {}", lock_file_path.display()); + delete_session_dir_lock_file(sess, &lock_file_path); + } else { + debug!("garbage_collect_session_directories() - lock file with \ + no session dir not old enough to be collected: {}", + lock_file_path.display()); + } + } + } + + // Filter out `None` directories + let lock_file_to_session_dir: FxHashMap = + lock_file_to_session_dir.into_iter() + .filter_map(|(lock_file_name, directory_name)| { + directory_name.map(|n| (lock_file_name, n)) + }) + .collect(); + + let mut deletion_candidates = vec![]; + let mut definitely_delete = vec![]; + + for (lock_file_name, directory_name) in &lock_file_to_session_dir { + debug!("garbage_collect_session_directories() - inspecting: {}", + directory_name); + + let timestamp = match extract_timestamp_from_session_dir(directory_name) { + Ok(timestamp) => timestamp, + Err(()) => { + debug!("Found session-dir with malformed timestamp: {}", + crate_directory.join(directory_name).display()); + // Ignore it + continue + } + }; + + if is_finalized(directory_name) { + let lock_file_path = crate_directory.join(lock_file_name); + match flock::Lock::new(&lock_file_path, + false, // don't wait + false, // don't create the lock-file + true) { // get an exclusive lock + Ok(lock) => { + debug!("garbage_collect_session_directories() - \ + successfully acquired lock"); + debug!("garbage_collect_session_directories() - adding \ + deletion candidate: {}", directory_name); + + // Note that we are holding on to the lock + deletion_candidates.push((timestamp, + crate_directory.join(directory_name), + Some(lock))); + } + Err(_) => { + debug!("garbage_collect_session_directories() - \ + not collecting, still in use"); + } + } + } else if is_old_enough_to_be_collected(timestamp) { + // When cleaning out "-working" session directories, i.e. + // session directories that might still be in use by another + // compiler instance, we only look a directories that are + // at least ten seconds old. This is supposed to reduce the + // chance of deleting a directory in the time window where + // the process has allocated the directory but has not yet + // acquired the file-lock on it. + + // Try to acquire the directory lock. If we can't, it + // means that the owning process is still alive and we + // leave this directory alone. + let lock_file_path = crate_directory.join(lock_file_name); + match flock::Lock::new(&lock_file_path, + false, // don't wait + false, // don't create the lock-file + true) { // get an exclusive lock + Ok(lock) => { + debug!("garbage_collect_session_directories() - \ + successfully acquired lock"); + + // Note that we are holding on to the lock + definitely_delete.push((crate_directory.join(directory_name), + Some(lock))); + } + Err(_) => { + debug!("garbage_collect_session_directories() - \ + not collecting, still in use"); + } + } + } else { + debug!("garbage_collect_session_directories() - not finalized, not \ + old enough"); + } + } + + // Delete all but the most recent of the candidates + for (path, lock) in all_except_most_recent(deletion_candidates) { + debug!("garbage_collect_session_directories() - deleting `{}`", + path.display()); + + if let Err(err) = safe_remove_dir_all(&path) { + sess.warn(&format!("Failed to garbage collect finalized incremental \ + compilation session directory `{}`: {}", + path.display(), + err)); + } else { + delete_session_dir_lock_file(sess, &lock_file_path(&path)); + } + + + // Let's make it explicit that the file lock is released at this point, + // or rather, that we held on to it until here + mem::drop(lock); + } + + for (path, lock) in definitely_delete { + debug!("garbage_collect_session_directories() - deleting `{}`", + path.display()); + + if let Err(err) = safe_remove_dir_all(&path) { + sess.warn(&format!("Failed to garbage collect incremental \ + compilation session directory `{}`: {}", + path.display(), + err)); + } else { + delete_session_dir_lock_file(sess, &lock_file_path(&path)); + } + + // Let's make it explicit that the file lock is released at this point, + // or rather, that we held on to it until here + mem::drop(lock); + } + + Ok(()) +} + +fn all_except_most_recent(deletion_candidates: Vec<(SystemTime, PathBuf, Option)>) + -> FxHashMap> { + let most_recent = deletion_candidates.iter() + .map(|&(timestamp, ..)| timestamp) + .max(); + + if let Some(most_recent) = most_recent { + deletion_candidates.into_iter() + .filter(|&(timestamp, ..)| timestamp != most_recent) + .map(|(_, path, lock)| (path, lock)) + .collect() + } else { + FxHashMap() + } +} + +/// Since paths of artifacts within session directories can get quite long, we +/// need to support deleting files with very long paths. The regular +/// WinApi functions only support paths up to 260 characters, however. In order +/// to circumvent this limitation, we canonicalize the path of the directory +/// before passing it to std::fs::remove_dir_all(). This will convert the path +/// into the '\\?\' format, which supports much longer paths. +fn safe_remove_dir_all(p: &Path) -> io::Result<()> { + if p.exists() { + let canonicalized = try!(p.canonicalize()); + std_fs::remove_dir_all(canonicalized) + } else { + Ok(()) + } +} + +fn safe_remove_file(p: &Path) -> io::Result<()> { + if p.exists() { + let canonicalized = try!(p.canonicalize()); + std_fs::remove_file(canonicalized) + } else { + Ok(()) + } +} + +#[test] +fn test_all_except_most_recent() { + assert_eq!(all_except_most_recent( + vec![ + (UNIX_EPOCH + Duration::new(4, 0), PathBuf::from("4"), None), + (UNIX_EPOCH + Duration::new(1, 0), PathBuf::from("1"), None), + (UNIX_EPOCH + Duration::new(5, 0), PathBuf::from("5"), None), + (UNIX_EPOCH + Duration::new(3, 0), PathBuf::from("3"), None), + (UNIX_EPOCH + Duration::new(2, 0), PathBuf::from("2"), None), + ]).keys().cloned().collect::>(), + vec![ + PathBuf::from("1"), + PathBuf::from("2"), + PathBuf::from("3"), + PathBuf::from("4"), + ].into_iter().collect::>() + ); + + assert_eq!(all_except_most_recent( + vec![ + ]).keys().cloned().collect::>(), + FxHashSet() + ); +} + +#[test] +fn test_timestamp_serialization() { + for i in 0 .. 1_000u64 { + let time = UNIX_EPOCH + Duration::new(i * 1_434_578, (i as u32) * 239_000); + let s = timestamp_to_string(time); + assert_eq!(Ok(time), string_to_timestamp(&s)); + } +} + +#[test] +fn test_find_source_directory_in_iter() { + let already_visited = FxHashSet(); + + // Find newest + assert_eq!(find_source_directory_in_iter( + vec![PathBuf::from("crate-dir/s-3234-0000-svh"), + PathBuf::from("crate-dir/s-2234-0000-svh"), + PathBuf::from("crate-dir/s-1234-0000-svh")].into_iter(), &already_visited), + Some(PathBuf::from("crate-dir/s-3234-0000-svh"))); + + // Filter out "-working" + assert_eq!(find_source_directory_in_iter( + vec![PathBuf::from("crate-dir/s-3234-0000-working"), + PathBuf::from("crate-dir/s-2234-0000-svh"), + PathBuf::from("crate-dir/s-1234-0000-svh")].into_iter(), &already_visited), + Some(PathBuf::from("crate-dir/s-2234-0000-svh"))); + + // Handle empty + assert_eq!(find_source_directory_in_iter(vec![].into_iter(), &already_visited), + None); + + // Handle only working + assert_eq!(find_source_directory_in_iter( + vec![PathBuf::from("crate-dir/s-3234-0000-working"), + PathBuf::from("crate-dir/s-2234-0000-working"), + PathBuf::from("crate-dir/s-1234-0000-working")].into_iter(), &already_visited), + None); +} + +#[test] +fn test_find_metadata_hashes_iter() +{ + assert_eq!(find_metadata_hashes_iter("testsvh2", + vec![ + String::from("s-timestamp1-testsvh1"), + String::from("s-timestamp2-testsvh2"), + String::from("s-timestamp3-testsvh3"), + ].into_iter()), + Some(OsString::from("s-timestamp2-testsvh2")) + ); + + assert_eq!(find_metadata_hashes_iter("testsvh2", + vec![ + String::from("s-timestamp1-testsvh1"), + String::from("s-timestamp2-testsvh2"), + String::from("invalid-name"), + ].into_iter()), + Some(OsString::from("s-timestamp2-testsvh2")) + ); + + assert_eq!(find_metadata_hashes_iter("testsvh2", + vec![ + String::from("s-timestamp1-testsvh1"), + String::from("s-timestamp2-testsvh2-working"), + String::from("s-timestamp3-testsvh3"), + ].into_iter()), + None + ); + + assert_eq!(find_metadata_hashes_iter("testsvh1", + vec![ + String::from("s-timestamp1-random1-working"), + String::from("s-timestamp2-random2-working"), + String::from("s-timestamp3-random3-working"), + ].into_iter()), + None + ); + + assert_eq!(find_metadata_hashes_iter("testsvh2", + vec![ + String::from("timestamp1-testsvh2"), + String::from("timestamp2-testsvh2"), + String::from("timestamp3-testsvh2"), + ].into_iter()), + None + ); +} diff --git a/src/librustc_incremental/persist/hash.rs b/src/librustc_incremental/persist/hash.rs new file mode 100644 index 0000000000000..e5203ea02b45a --- /dev/null +++ b/src/librustc_incremental/persist/hash.rs @@ -0,0 +1,216 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::dep_graph::DepNode; +use rustc::hir::def_id::{CrateNum, DefId}; +use rustc::hir::svh::Svh; +use rustc::ty::TyCtxt; +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::flock; +use rustc_serialize::Decodable; +use rustc_serialize::opaque::Decoder; + +use IncrementalHashesMap; +use ich::Fingerprint; +use super::data::*; +use super::fs::*; +use super::file_format; + +pub struct HashContext<'a, 'tcx: 'a> { + pub tcx: TyCtxt<'a, 'tcx, 'tcx>, + incremental_hashes_map: &'a IncrementalHashesMap, + item_metadata_hashes: FxHashMap, + crate_hashes: FxHashMap, +} + +impl<'a, 'tcx> HashContext<'a, 'tcx> { + pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, + incremental_hashes_map: &'a IncrementalHashesMap) + -> Self { + HashContext { + tcx: tcx, + incremental_hashes_map: incremental_hashes_map, + item_metadata_hashes: FxHashMap(), + crate_hashes: FxHashMap(), + } + } + + pub fn is_hashable(dep_node: &DepNode) -> bool { + match *dep_node { + DepNode::Krate | + DepNode::Hir(_) | + DepNode::HirBody(_) => + true, + DepNode::MetaData(def_id) => !def_id.is_local(), + _ => false, + } + } + + pub fn hash(&mut self, dep_node: &DepNode) -> Option { + match *dep_node { + DepNode::Krate => { + Some(self.incremental_hashes_map[dep_node]) + } + + // HIR nodes (which always come from our crate) are an input: + DepNode::Hir(def_id) | DepNode::HirBody(def_id) => { + assert!(def_id.is_local(), + "cannot hash HIR for non-local def-id {:?} => {:?}", + def_id, + self.tcx.item_path_str(def_id)); + + assert!(!self.tcx.map.is_inlined_def_id(def_id), + "cannot hash HIR for inlined def-id {:?} => {:?}", + def_id, + self.tcx.item_path_str(def_id)); + + Some(self.incremental_hashes_map[dep_node]) + } + + // MetaData from other crates is an *input* to us. + // MetaData nodes from *our* crates are an *output*; we + // don't hash them, but we do compute a hash for them and + // save it for others to use. + DepNode::MetaData(def_id) if !def_id.is_local() => { + Some(self.metadata_hash(def_id)) + } + + _ => { + // Other kinds of nodes represent computed by-products + // that we don't hash directly; instead, they should + // have some transitive dependency on a Hir or + // MetaData node, so we'll just hash that + None + } + } + } + + fn metadata_hash(&mut self, def_id: DefId) -> Fingerprint { + debug!("metadata_hash(def_id={:?})", def_id); + + assert!(!def_id.is_local()); + loop { + // check whether we have a result cached for this def-id + if let Some(&hash) = self.item_metadata_hashes.get(&def_id) { + debug!("metadata_hash: def_id={:?} hash={:?}", def_id, hash); + return hash; + } + + // check whether we did not find detailed metadata for this + // krate; in that case, we just use the krate's overall hash + if let Some(&svh) = self.crate_hashes.get(&def_id.krate) { + debug!("metadata_hash: def_id={:?} crate_hash={:?}", def_id, svh); + + // micro-"optimization": avoid a cache miss if we ask + // for metadata from this particular def-id again. + let fingerprint = svh_to_fingerprint(svh); + self.item_metadata_hashes.insert(def_id, fingerprint); + + return fingerprint; + } + + // otherwise, load the data and repeat. + self.load_data(def_id.krate); + assert!(self.crate_hashes.contains_key(&def_id.krate)); + } + } + + fn load_data(&mut self, cnum: CrateNum) { + debug!("load_data(cnum={})", cnum); + + let svh = self.tcx.sess.cstore.crate_hash(cnum); + let old = self.crate_hashes.insert(cnum, svh); + debug!("load_data: svh={}", svh); + assert!(old.is_none(), "loaded data for crate {:?} twice", cnum); + + if let Some(session_dir) = find_metadata_hashes_for(self.tcx, cnum) { + debug!("load_data: session_dir={:?}", session_dir); + + // Lock the directory we'll be reading the hashes from. + let lock_file_path = lock_file_path(&session_dir); + let _lock = match flock::Lock::new(&lock_file_path, + false, // don't wait + false, // don't create the lock-file + false) { // shared lock + Ok(lock) => lock, + Err(err) => { + debug!("Could not acquire lock on `{}` while trying to \ + load metadata hashes: {}", + lock_file_path.display(), + err); + + // Could not acquire the lock. The directory is probably in + // in the process of being deleted. It's OK to just exit + // here. It's the same scenario as if the file had not + // existed in the first place. + return + } + }; + + let hashes_file_path = metadata_hash_import_path(&session_dir); + + match file_format::read_file(self.tcx.sess, &hashes_file_path) + { + Ok(Some(data)) => { + match self.load_from_data(cnum, &data, svh) { + Ok(()) => { } + Err(err) => { + bug!("decoding error in dep-graph from `{}`: {}", + &hashes_file_path.display(), err); + } + } + } + Ok(None) => { + // If the file is not found, that's ok. + } + Err(err) => { + self.tcx.sess.err( + &format!("could not load dep information from `{}`: {}", + hashes_file_path.display(), err)); + } + } + } + } + + fn load_from_data(&mut self, + cnum: CrateNum, + data: &[u8], + expected_svh: Svh) -> Result<(), String> { + debug!("load_from_data(cnum={})", cnum); + + // Load up the hashes for the def-ids from this crate. + let mut decoder = Decoder::new(data, 0); + let svh_in_hashes_file = Svh::decode(&mut decoder)?; + + if svh_in_hashes_file != expected_svh { + // We should not be able to get here. If we do, then + // `fs::find_metadata_hashes_for()` has messed up. + bug!("mismatch between SVH in crate and SVH in incr. comp. hashes") + } + + let serialized_hashes = SerializedMetadataHashes::decode(&mut decoder)?; + for serialized_hash in serialized_hashes.hashes { + // the hashes are stored with just a def-index, which is + // always relative to the old crate; convert that to use + // our internal crate number + let def_id = DefId { krate: cnum, index: serialized_hash.def_index }; + + // record the hash for this dep-node + let old = self.item_metadata_hashes.insert(def_id, serialized_hash.hash); + debug!("load_from_data: def_id={:?} hash={}", def_id, serialized_hash.hash); + assert!(old.is_none(), "already have hash for {:?}", def_id); + } + Ok(()) + } +} + +fn svh_to_fingerprint(svh: Svh) -> Fingerprint { + Fingerprint::from_smaller_hash(svh.as_u64()) +} diff --git a/src/librustc_incremental/persist/load.rs b/src/librustc_incremental/persist/load.rs new file mode 100644 index 0000000000000..8ff04a565e96b --- /dev/null +++ b/src/librustc_incremental/persist/load.rs @@ -0,0 +1,390 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Code to save/load the dep-graph from files. + +use rustc::dep_graph::DepNode; +use rustc::hir::def_id::DefId; +use rustc::hir::svh::Svh; +use rustc::session::Session; +use rustc::ty::TyCtxt; +use rustc_data_structures::fx::{FxHashSet, FxHashMap}; +use rustc_serialize::Decodable as RustcDecodable; +use rustc_serialize::opaque::Decoder; +use std::fs; +use std::path::{Path}; + +use IncrementalHashesMap; +use ich::Fingerprint; +use super::data::*; +use super::directory::*; +use super::dirty_clean; +use super::hash::*; +use super::fs::*; +use super::file_format; + +pub type DirtyNodes = FxHashSet>; + +/// If we are in incremental mode, and a previous dep-graph exists, +/// then load up those nodes/edges that are still valid into the +/// dep-graph for this session. (This is assumed to be running very +/// early in compilation, before we've really done any work, but +/// actually it doesn't matter all that much.) See `README.md` for +/// more general overview. +pub fn load_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + incremental_hashes_map: &IncrementalHashesMap) { + if tcx.sess.opts.incremental.is_none() { + return; + } + + match prepare_session_directory(tcx) { + Ok(true) => { + // We successfully allocated a session directory and there is + // something in it to load, so continue + } + Ok(false) => { + // We successfully allocated a session directory, but there is no + // dep-graph data in it to load (because this is the first + // compilation session with this incr. comp. dir.) + return + } + Err(()) => { + // Something went wrong while trying to allocate the session + // directory. Don't try to use it any further. + return + } + } + + let _ignore = tcx.dep_graph.in_ignore(); + load_dep_graph_if_exists(tcx, incremental_hashes_map); +} + +fn load_dep_graph_if_exists<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + incremental_hashes_map: &IncrementalHashesMap) { + let dep_graph_path = dep_graph_path(tcx.sess); + let dep_graph_data = match load_data(tcx.sess, &dep_graph_path) { + Some(p) => p, + None => return // no file + }; + + let work_products_path = work_products_path(tcx.sess); + let work_products_data = match load_data(tcx.sess, &work_products_path) { + Some(p) => p, + None => return // no file + }; + + match decode_dep_graph(tcx, incremental_hashes_map, &dep_graph_data, &work_products_data) { + Ok(dirty_nodes) => dirty_nodes, + Err(err) => { + tcx.sess.warn( + &format!("decoding error in dep-graph from `{}` and `{}`: {}", + dep_graph_path.display(), + work_products_path.display(), + err)); + } + } +} + +fn load_data(sess: &Session, path: &Path) -> Option> { + match file_format::read_file(sess, path) { + Ok(Some(data)) => return Some(data), + Ok(None) => { + // The file either didn't exist or was produced by an incompatible + // compiler version. Neither is an error. + } + Err(err) => { + sess.err( + &format!("could not load dep-graph from `{}`: {}", + path.display(), err)); + } + } + + if let Err(err) = delete_all_session_dir_contents(sess) { + sess.err(&format!("could not clear incompatible incremental \ + compilation session directory `{}`: {}", + path.display(), err)); + } + + None +} + +/// Decode the dep graph and load the edges/nodes that are still clean +/// into `tcx.dep_graph`. +pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + incremental_hashes_map: &IncrementalHashesMap, + dep_graph_data: &[u8], + work_products_data: &[u8]) + -> Result<(), String> +{ + // Decode the list of work_products + let mut work_product_decoder = Decoder::new(work_products_data, 0); + let work_products = >::decode(&mut work_product_decoder)?; + + // Deserialize the directory and dep-graph. + let mut dep_graph_decoder = Decoder::new(dep_graph_data, 0); + let prev_commandline_args_hash = u64::decode(&mut dep_graph_decoder)?; + + if prev_commandline_args_hash != tcx.sess.opts.dep_tracking_hash() { + if tcx.sess.opts.debugging_opts.incremental_info { + println!("incremental: completely ignoring cache because of \ + differing commandline arguments"); + } + // We can't reuse the cache, purge it. + debug!("decode_dep_graph: differing commandline arg hashes"); + for swp in work_products { + delete_dirty_work_product(tcx, swp); + } + + // No need to do any further work + return Ok(()); + } + + let directory = DefIdDirectory::decode(&mut dep_graph_decoder)?; + let serialized_dep_graph = SerializedDepGraph::decode(&mut dep_graph_decoder)?; + + // Retrace the paths in the directory to find their current location (if any). + let retraced = directory.retrace(tcx); + + // Compute the set of Hir nodes whose data has changed or which + // have been removed. These are "raw" source nodes, which means + // that they still use the original `DefPathIndex` values from the + // encoding, rather than having been retraced to a `DefId`. The + // reason for this is that this way we can include nodes that have + // been removed (which no longer have a `DefId` in the current + // compilation). + let dirty_raw_source_nodes = dirty_nodes(tcx, + incremental_hashes_map, + &serialized_dep_graph.hashes, + &retraced); + + // Create a list of (raw-source-node -> + // retracted-target-node) edges. In the process of retracing the + // target nodes, we may discover some of them def-paths no longer exist, + // in which case there is no need to mark the corresopnding nodes as dirty + // (they are just not present). So this list may be smaller than the original. + // + // Note though that in the common case the target nodes are + // `DepNode::WorkProduct` instances, and those don't have a + // def-id, so they will never be considered to not exist. Instead, + // we do a secondary hashing step (later, in trans) when we know + // the set of symbols that go into a work-product: if any symbols + // have been removed (or added) the hash will be different and + // we'll ignore the work-product then. + let retraced_edges: Vec<_> = + serialized_dep_graph.edges.iter() + .filter_map(|&(ref raw_source_node, ref raw_target_node)| { + retraced.map(raw_target_node) + .map(|target_node| (raw_source_node, target_node)) + }) + .collect(); + + // Compute which work-products have an input that has changed or + // been removed. Put the dirty ones into a set. + let mut dirty_target_nodes = FxHashSet(); + for &(raw_source_node, ref target_node) in &retraced_edges { + if dirty_raw_source_nodes.contains(raw_source_node) { + if !dirty_target_nodes.contains(target_node) { + dirty_target_nodes.insert(target_node.clone()); + + if tcx.sess.opts.debugging_opts.incremental_info { + // It'd be nice to pretty-print these paths better than just + // using the `Debug` impls, but wev. + println!("incremental: module {:?} is dirty because {:?} \ + changed or was removed", + target_node, + raw_source_node.map_def(|&index| { + Some(directory.def_path_string(tcx, index)) + }).unwrap()); + } + } + } + } + + // For work-products that are still clean, add their deps into the + // graph. This is needed because later we will have to save this + // back out again! + let dep_graph = tcx.dep_graph.clone(); + for (raw_source_node, target_node) in retraced_edges { + if dirty_target_nodes.contains(&target_node) { + continue; + } + + let source_node = retraced.map(raw_source_node).unwrap(); + + debug!("decode_dep_graph: clean edge: {:?} -> {:?}", source_node, target_node); + + let _task = dep_graph.in_task(target_node); + dep_graph.read(source_node); + } + + // Add in work-products that are still clean, and delete those that are + // dirty. + reconcile_work_products(tcx, work_products, &dirty_target_nodes); + + dirty_clean::check_dirty_clean_annotations(tcx, &dirty_raw_source_nodes, &retraced); + + load_prev_metadata_hashes(tcx, + &retraced, + &mut *incremental_hashes_map.prev_metadata_hashes.borrow_mut()); + Ok(()) +} + +/// Computes which of the original set of def-ids are dirty. Stored in +/// a bit vector where the index is the DefPathIndex. +fn dirty_nodes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + incremental_hashes_map: &IncrementalHashesMap, + serialized_hashes: &[SerializedHash], + retraced: &RetracedDefIdDirectory) + -> DirtyNodes { + let mut hcx = HashContext::new(tcx, incremental_hashes_map); + let mut dirty_nodes = FxHashSet(); + + for hash in serialized_hashes { + if let Some(dep_node) = retraced.map(&hash.dep_node) { + let current_hash = hcx.hash(&dep_node).unwrap(); + if current_hash == hash.hash { + debug!("initial_dirty_nodes: {:?} is clean (hash={:?})", + dep_node.map_def(|&def_id| Some(tcx.def_path(def_id))).unwrap(), + current_hash); + continue; + } + + if tcx.sess.opts.debugging_opts.incremental_dump_hash { + println!("node {:?} is dirty as hash is {:?} was {:?}", + dep_node.map_def(|&def_id| Some(tcx.def_path(def_id))).unwrap(), + current_hash, + hash.hash); + } + + debug!("initial_dirty_nodes: {:?} is dirty as hash is {:?}, was {:?}", + dep_node.map_def(|&def_id| Some(tcx.def_path(def_id))).unwrap(), + current_hash, + hash.hash); + } else { + if tcx.sess.opts.debugging_opts.incremental_dump_hash { + println!("node {:?} is dirty as it was removed", + hash.dep_node); + } + + debug!("initial_dirty_nodes: {:?} is dirty as it was removed", + hash.dep_node); + } + + dirty_nodes.insert(hash.dep_node.clone()); + } + + dirty_nodes +} + +/// Go through the list of work-products produced in the previous run. +/// Delete any whose nodes have been found to be dirty or which are +/// otherwise no longer applicable. +fn reconcile_work_products<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + work_products: Vec, + dirty_target_nodes: &FxHashSet>) { + debug!("reconcile_work_products({:?})", work_products); + for swp in work_products { + if dirty_target_nodes.contains(&DepNode::WorkProduct(swp.id.clone())) { + debug!("reconcile_work_products: dep-node for {:?} is dirty", swp); + delete_dirty_work_product(tcx, swp); + } else { + let mut all_files_exist = true; + for &(_, ref file_name) in swp.work_product.saved_files.iter() { + let path = in_incr_comp_dir_sess(tcx.sess, file_name); + if !path.exists() { + all_files_exist = false; + + if tcx.sess.opts.debugging_opts.incremental_info { + println!("incremental: could not find file for up-to-date work product: {}", + path.display()); + } + } + } + + if all_files_exist { + debug!("reconcile_work_products: all files for {:?} exist", swp); + tcx.dep_graph.insert_previous_work_product(&swp.id, swp.work_product); + } else { + debug!("reconcile_work_products: some file for {:?} does not exist", swp); + delete_dirty_work_product(tcx, swp); + } + } + } +} + +fn delete_dirty_work_product(tcx: TyCtxt, + swp: SerializedWorkProduct) { + debug!("delete_dirty_work_product({:?})", swp); + for &(_, ref file_name) in &swp.work_product.saved_files { + let path = in_incr_comp_dir_sess(tcx.sess, file_name); + match fs::remove_file(&path) { + Ok(()) => { } + Err(err) => { + tcx.sess.warn( + &format!("file-system error deleting outdated file `{}`: {}", + path.display(), err)); + } + } + } +} + +fn load_prev_metadata_hashes(tcx: TyCtxt, + retraced: &RetracedDefIdDirectory, + output: &mut FxHashMap) { + if !tcx.sess.opts.debugging_opts.query_dep_graph { + return + } + + debug!("load_prev_metadata_hashes() - Loading previous metadata hashes"); + + let file_path = metadata_hash_export_path(tcx.sess); + + if !file_path.exists() { + debug!("load_prev_metadata_hashes() - Couldn't find file containing \ + hashes at `{}`", file_path.display()); + return + } + + debug!("load_prev_metadata_hashes() - File: {}", file_path.display()); + + let data = match file_format::read_file(tcx.sess, &file_path) { + Ok(Some(data)) => data, + Ok(None) => { + debug!("load_prev_metadata_hashes() - File produced by incompatible \ + compiler version: {}", file_path.display()); + return + } + Err(err) => { + debug!("load_prev_metadata_hashes() - Error reading file `{}`: {}", + file_path.display(), err); + return + } + }; + + debug!("load_prev_metadata_hashes() - Decoding hashes"); + let mut decoder = Decoder::new(&data, 0); + let _ = Svh::decode(&mut decoder).unwrap(); + let serialized_hashes = SerializedMetadataHashes::decode(&mut decoder).unwrap(); + + debug!("load_prev_metadata_hashes() - Mapping DefIds"); + + assert_eq!(serialized_hashes.index_map.len(), serialized_hashes.hashes.len()); + for serialized_hash in serialized_hashes.hashes { + let def_path_index = serialized_hashes.index_map[&serialized_hash.def_index]; + if let Some(def_id) = retraced.def_id(def_path_index) { + let old = output.insert(def_id, serialized_hash.hash); + assert!(old.is_none(), "already have hash for {:?}", def_id); + } + } + + debug!("load_prev_metadata_hashes() - successfully loaded {} hashes", + serialized_hashes.index_map.len()); +} + diff --git a/src/librustc_incremental/persist/mod.rs b/src/librustc_incremental/persist/mod.rs new file mode 100644 index 0000000000000..26fcde05868b9 --- /dev/null +++ b/src/librustc_incremental/persist/mod.rs @@ -0,0 +1,31 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! When in incremental mode, this pass dumps out the dependency graph +//! into the given directory. At the same time, it also hashes the +//! various HIR nodes. + +mod data; +mod directory; +mod dirty_clean; +mod fs; +mod hash; +mod load; +mod preds; +mod save; +mod work_product; +mod file_format; + +pub use self::fs::finalize_session_directory; +pub use self::fs::in_incr_comp_dir; +pub use self::load::load_dep_graph; +pub use self::save::save_dep_graph; +pub use self::save::save_work_products; +pub use self::work_product::save_trans_partition; diff --git a/src/librustc_incremental/persist/preds.rs b/src/librustc_incremental/persist/preds.rs new file mode 100644 index 0000000000000..e1968ce8d7b6a --- /dev/null +++ b/src/librustc_incremental/persist/preds.rs @@ -0,0 +1,74 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::dep_graph::{DepGraphQuery, DepNode}; +use rustc::hir::def_id::DefId; +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::graph::{DepthFirstTraversal, INCOMING, NodeIndex}; + +use super::hash::*; +use ich::Fingerprint; + +/// A data-structure that makes it easy to enumerate the hashable +/// predecessors of any given dep-node. +pub struct Predecessors<'query> { + // - Keys: dep-nodes that may have work-products, output meta-data + // nodes. + // - Values: transitive predecessors of the key that are hashable + // (e.g., HIR nodes, input meta-data nodes) + pub inputs: FxHashMap<&'query DepNode, Vec<&'query DepNode>>, + + // - Keys: some hashable node + // - Values: the hash thereof + pub hashes: FxHashMap<&'query DepNode, Fingerprint>, +} + +impl<'q> Predecessors<'q> { + pub fn new(query: &'q DepGraphQuery, hcx: &mut HashContext) -> Self { + // Find nodes for which we want to know the full set of preds + let mut dfs = DepthFirstTraversal::new(&query.graph, INCOMING); + let all_nodes = query.graph.all_nodes(); + let tcx = hcx.tcx; + + let inputs: FxHashMap<_, _> = all_nodes.iter() + .enumerate() + .filter(|&(_, node)| match node.data { + DepNode::WorkProduct(_) => true, + DepNode::MetaData(ref def_id) => def_id.is_local(), + + // if -Z query-dep-graph is passed, save more extended data + // to enable better unit testing + DepNode::TypeckItemBody(_) | + DepNode::TransCrateItem(_) => tcx.sess.opts.debugging_opts.query_dep_graph, + + _ => false, + }) + .map(|(node_index, node)| { + dfs.reset(NodeIndex(node_index)); + let inputs: Vec<_> = dfs.by_ref() + .map(|i| &all_nodes[i.node_id()].data) + .filter(|d| HashContext::is_hashable(d)) + .collect(); + (&node.data, inputs) + }) + .collect(); + + let mut hashes = FxHashMap(); + for input in inputs.values().flat_map(|v| v.iter().cloned()) { + hashes.entry(input) + .or_insert_with(|| hcx.hash(input).unwrap()); + } + + Predecessors { + inputs: inputs, + hashes: hashes, + } + } +} diff --git a/src/librustc_incremental/persist/save.rs b/src/librustc_incremental/persist/save.rs new file mode 100644 index 0000000000000..1ce4bf7f03341 --- /dev/null +++ b/src/librustc_incremental/persist/save.rs @@ -0,0 +1,308 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::dep_graph::DepNode; +use rustc::hir::def_id::DefId; +use rustc::hir::svh::Svh; +use rustc::session::Session; +use rustc::ty::TyCtxt; +use rustc_data_structures::fx::FxHashMap; +use rustc_serialize::Encodable as RustcEncodable; +use rustc_serialize::opaque::Encoder; +use std::hash::Hash; +use std::io::{self, Cursor, Write}; +use std::fs::{self, File}; +use std::path::PathBuf; + +use IncrementalHashesMap; +use ich::Fingerprint; +use super::data::*; +use super::directory::*; +use super::hash::*; +use super::preds::*; +use super::fs::*; +use super::dirty_clean; +use super::file_format; +use calculate_svh::hasher::IchHasher; + +pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + incremental_hashes_map: &IncrementalHashesMap, + svh: Svh) { + debug!("save_dep_graph()"); + let _ignore = tcx.dep_graph.in_ignore(); + let sess = tcx.sess; + if sess.opts.incremental.is_none() { + return; + } + + let mut builder = DefIdDirectoryBuilder::new(tcx); + let query = tcx.dep_graph.query(); + let mut hcx = HashContext::new(tcx, incremental_hashes_map); + let preds = Predecessors::new(&query, &mut hcx); + let mut current_metadata_hashes = FxHashMap(); + + // IMPORTANT: We are saving the metadata hashes *before* the dep-graph, + // since metadata-encoding might add new entries to the + // DefIdDirectory (which is saved in the dep-graph file). + save_in(sess, + metadata_hash_export_path(sess), + |e| encode_metadata_hashes(tcx, + svh, + &preds, + &mut builder, + &mut current_metadata_hashes, + e)); + save_in(sess, + dep_graph_path(sess), + |e| encode_dep_graph(&preds, &mut builder, e)); + + let prev_metadata_hashes = incremental_hashes_map.prev_metadata_hashes.borrow(); + dirty_clean::check_dirty_clean_metadata(tcx, + &*prev_metadata_hashes, + ¤t_metadata_hashes); +} + +pub fn save_work_products(sess: &Session) { + if sess.opts.incremental.is_none() { + return; + } + + debug!("save_work_products()"); + let _ignore = sess.dep_graph.in_ignore(); + let path = work_products_path(sess); + save_in(sess, path, |e| encode_work_products(sess, e)); +} + +fn save_in(sess: &Session, path_buf: PathBuf, encode: F) + where F: FnOnce(&mut Encoder) -> io::Result<()> +{ + debug!("save: storing data in {}", path_buf.display()); + + // delete the old dep-graph, if any + // Note: It's important that we actually delete the old file and not just + // truncate and overwrite it, since it might be a shared hard-link, the + // underlying data of which we don't want to modify + if path_buf.exists() { + match fs::remove_file(&path_buf) { + Ok(()) => { + debug!("save: remove old file"); + } + Err(err) => { + sess.err(&format!("unable to delete old dep-graph at `{}`: {}", + path_buf.display(), + err)); + return; + } + } + } + + // generate the data in a memory buffer + let mut wr = Cursor::new(Vec::new()); + file_format::write_file_header(&mut wr).unwrap(); + match encode(&mut Encoder::new(&mut wr)) { + Ok(()) => {} + Err(err) => { + sess.err(&format!("could not encode dep-graph to `{}`: {}", + path_buf.display(), + err)); + return; + } + } + + // write the data out + let data = wr.into_inner(); + match File::create(&path_buf).and_then(|mut file| file.write_all(&data)) { + Ok(_) => { + debug!("save: data written to disk successfully"); + } + Err(err) => { + sess.err(&format!("failed to write dep-graph to `{}`: {}", + path_buf.display(), + err)); + return; + } + } +} + +pub fn encode_dep_graph(preds: &Predecessors, + builder: &mut DefIdDirectoryBuilder, + encoder: &mut Encoder) + -> io::Result<()> { + // First encode the commandline arguments hash + let tcx = builder.tcx(); + tcx.sess.opts.dep_tracking_hash().encode(encoder)?; + + // Create a flat list of (Input, WorkProduct) edges for + // serialization. + let mut edges = vec![]; + for (&target, sources) in &preds.inputs { + match *target { + DepNode::MetaData(ref def_id) => { + // Metadata *targets* are always local metadata nodes. We have + // already handled those in `encode_metadata_hashes`. + assert!(def_id.is_local()); + continue; + } + _ => (), + } + let target = builder.map(target); + for &source in sources { + let source = builder.map(source); + edges.push((source, target.clone())); + } + } + + if tcx.sess.opts.debugging_opts.incremental_dump_hash { + for (dep_node, hash) in &preds.hashes { + println!("HIR hash for {:?} is {}", dep_node, hash); + } + } + + // Create the serialized dep-graph. + let graph = SerializedDepGraph { + edges: edges, + hashes: preds.hashes + .iter() + .map(|(&dep_node, &hash)| { + SerializedHash { + dep_node: builder.map(dep_node), + hash: hash, + } + }) + .collect(), + }; + + debug!("graph = {:#?}", graph); + + // Encode the directory and then the graph data. + builder.directory().encode(encoder)?; + graph.encode(encoder)?; + + Ok(()) +} + +pub fn encode_metadata_hashes(tcx: TyCtxt, + svh: Svh, + preds: &Predecessors, + builder: &mut DefIdDirectoryBuilder, + current_metadata_hashes: &mut FxHashMap, + encoder: &mut Encoder) + -> io::Result<()> { + // For each `MetaData(X)` node where `X` is local, accumulate a + // hash. These are the metadata items we export. Downstream + // crates will want to see a hash that tells them whether we might + // have changed the metadata for a given item since they last + // compiled. + // + // (I initially wrote this with an iterator, but it seemed harder to read.) + let mut serialized_hashes = SerializedMetadataHashes { + hashes: vec![], + index_map: FxHashMap() + }; + + let mut def_id_hashes = FxHashMap(); + + for (&target, sources) in &preds.inputs { + let def_id = match *target { + DepNode::MetaData(def_id) => { + assert!(def_id.is_local()); + def_id + } + _ => continue, + }; + + let mut def_id_hash = |def_id: DefId| -> u64 { + *def_id_hashes.entry(def_id) + .or_insert_with(|| { + let index = builder.add(def_id); + let path = builder.lookup_def_path(index); + path.deterministic_hash(tcx) + }) + }; + + // To create the hash for each item `X`, we don't hash the raw + // bytes of the metadata (though in principle we + // could). Instead, we walk the predecessors of `MetaData(X)` + // from the dep-graph. This corresponds to all the inputs that + // were read to construct the metadata. To create the hash for + // the metadata, we hash (the hash of) all of those inputs. + debug!("save: computing metadata hash for {:?}", def_id); + + // Create a vector containing a pair of (source-id, hash). + // The source-id is stored as a `DepNode`, where the u64 + // is the det. hash of the def-path. This is convenient + // because we can sort this to get a stable ordering across + // compilations, even if the def-ids themselves have changed. + let mut hashes: Vec<(DepNode, Fingerprint)> = sources.iter() + .map(|dep_node| { + let hash_dep_node = dep_node.map_def(|&def_id| Some(def_id_hash(def_id))).unwrap(); + let hash = preds.hashes[dep_node]; + (hash_dep_node, hash) + }) + .collect(); + + hashes.sort(); + let mut state = IchHasher::new(); + hashes.hash(&mut state); + let hash = state.finish(); + + debug!("save: metadata hash for {:?} is {}", def_id, hash); + + if tcx.sess.opts.debugging_opts.incremental_dump_hash { + println!("metadata hash for {:?} is {}", def_id, hash); + for dep_node in sources { + println!("metadata hash for {:?} depends on {:?} with hash {}", + def_id, dep_node, preds.hashes[dep_node]); + } + } + + serialized_hashes.hashes.push(SerializedMetadataHash { + def_index: def_id.index, + hash: hash, + }); + } + + if tcx.sess.opts.debugging_opts.query_dep_graph { + for serialized_hash in &serialized_hashes.hashes { + let def_id = DefId::local(serialized_hash.def_index); + + // Store entry in the index_map + let def_path_index = builder.add(def_id); + serialized_hashes.index_map.insert(def_id.index, def_path_index); + + // Record hash in current_metadata_hashes + current_metadata_hashes.insert(def_id, serialized_hash.hash); + } + + debug!("save: stored index_map (len={}) for serialized hashes", + serialized_hashes.index_map.len()); + } + + // Encode everything. + svh.encode(encoder)?; + serialized_hashes.encode(encoder)?; + + Ok(()) +} + +pub fn encode_work_products(sess: &Session, encoder: &mut Encoder) -> io::Result<()> { + let work_products: Vec<_> = sess.dep_graph + .work_products() + .iter() + .map(|(id, work_product)| { + SerializedWorkProduct { + id: id.clone(), + work_product: work_product.clone(), + } + }) + .collect(); + + work_products.encode(encoder) +} diff --git a/src/librustc_incremental/persist/work_product.rs b/src/librustc_incremental/persist/work_product.rs new file mode 100644 index 0000000000000..a9ebd27ce9928 --- /dev/null +++ b/src/librustc_incremental/persist/work_product.rs @@ -0,0 +1,63 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This module contains files for saving intermediate work-products. + +use persist::fs::*; +use rustc::dep_graph::{WorkProduct, WorkProductId}; +use rustc::session::Session; +use rustc::session::config::OutputType; +use rustc::util::fs::link_or_copy; +use std::path::PathBuf; +use std::sync::Arc; + +pub fn save_trans_partition(sess: &Session, + cgu_name: &str, + partition_hash: u64, + files: &[(OutputType, PathBuf)]) { + debug!("save_trans_partition({:?},{},{:?})", + cgu_name, + partition_hash, + files); + if sess.opts.incremental.is_none() { + return; + } + let work_product_id = Arc::new(WorkProductId(cgu_name.to_string())); + + let saved_files: Option> = + files.iter() + .map(|&(kind, ref path)| { + let file_name = format!("cgu-{}.{}", cgu_name, kind.extension()); + let path_in_incr_dir = in_incr_comp_dir_sess(sess, &file_name); + match link_or_copy(path, &path_in_incr_dir) { + Ok(_) => Some((kind, file_name)), + Err(err) => { + sess.warn(&format!("error copying object file `{}` \ + to incremental directory as `{}`: {}", + path.display(), + path_in_incr_dir.display(), + err)); + None + } + } + }) + .collect(); + let saved_files = match saved_files { + Some(v) => v, + None => return, + }; + + let work_product = WorkProduct { + input_hash: partition_hash, + saved_files: saved_files, + }; + + sess.dep_graph.insert_work_product(&work_product_id, work_product); +} diff --git a/src/librustc_lint/Cargo.toml b/src/librustc_lint/Cargo.toml new file mode 100644 index 0000000000000..4d5c0d7ba0ae1 --- /dev/null +++ b/src/librustc_lint/Cargo.toml @@ -0,0 +1,18 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_lint" +version = "0.0.0" + +[lib] +name = "rustc_lint" +path = "lib.rs" +crate-type = ["dylib"] +test = false + +[dependencies] +log = { path = "../liblog" } +rustc = { path = "../librustc" } +rustc_back = { path = "../librustc_back" } +rustc_const_eval = { path = "../librustc_const_eval" } +syntax = { path = "../libsyntax" } +syntax_pos = { path = "../libsyntax_pos" } diff --git a/src/librustc_lint/bad_style.rs b/src/librustc_lint/bad_style.rs index b5f8be496fb02..7c3ea656124bc 100644 --- a/src/librustc_lint/bad_style.rs +++ b/src/librustc_lint/bad_style.rs @@ -8,35 +8,37 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use middle::def; -use middle::ty; +use rustc::hir::def::Def; +use rustc::ty; use lint::{LateContext, LintContext, LintArray}; use lint::{LintPass, LateLintPass}; use syntax::ast; -use syntax::attr::{self, AttrMetaMethods}; -use syntax::codemap::Span; +use syntax::attr; +use syntax_pos::Span; -use rustc_front::hir; -use rustc_front::intravisit::FnKind; +use rustc::hir::{self, PatKind}; +use rustc::hir::intravisit::FnKind; #[derive(PartialEq)] pub enum MethodLateContext { TraitDefaultImpl, TraitImpl, - PlainImpl + PlainImpl, } pub fn method_context(cx: &LateContext, id: ast::NodeId, span: Span) -> MethodLateContext { let def_id = cx.tcx.map.local_def_id(id); - match cx.tcx.impl_or_trait_items.borrow().get(&def_id) { - None => cx.sess().span_bug(span, "missing method descriptor?!"), - Some(item) => match item.container() { - ty::TraitContainer(..) => MethodLateContext::TraitDefaultImpl, - ty::ImplContainer(cid) => { - match cx.tcx.impl_trait_ref(cid) { - Some(_) => MethodLateContext::TraitImpl, - None => MethodLateContext::PlainImpl + match cx.tcx.associated_items.borrow().get(&def_id) { + None => span_bug!(span, "missing method descriptor?!"), + Some(item) => { + match item.container { + ty::TraitContainer(..) => MethodLateContext::TraitDefaultImpl, + ty::ImplContainer(cid) => { + match cx.tcx.impl_trait_ref(cid) { + Some(_) => MethodLateContext::TraitImpl, + None => MethodLateContext::PlainImpl, + } } } } @@ -63,27 +65,28 @@ impl NonCamelCaseTypes { // start with a non-lowercase letter rather than non-uppercase // ones (some scripts don't have a concept of upper/lowercase) - !name.is_empty() && !name.char_at(0).is_lowercase() && !name.contains('_') + !name.is_empty() && !name.chars().next().unwrap().is_lowercase() && !name.contains('_') } fn to_camel_case(s: &str) -> String { - s.split('_').flat_map(|word| word.chars().enumerate().map(|(i, c)| - if i == 0 { - c.to_uppercase().collect::() - } else { - c.to_lowercase().collect() - } - )).collect::>().concat() + s.split('_') + .flat_map(|word| { + word.chars().enumerate().map(|(i, c)| if i == 0 { + c.to_uppercase().collect::() + } else { + c.to_lowercase().collect() + }) + }) + .collect::>() + .concat() } - let s = name.as_str(); - if !is_camel_case(name) { - let c = to_camel_case(&s); + let c = to_camel_case(&name.as_str()); let m = if c.is_empty() { - format!("{} `{}` should have a camel case name such as `CamelCase`", sort, s) + format!("{} `{}` should have a camel case name such as `CamelCase`", sort, name) } else { - format!("{} `{}` should have a camel case name such as `{}`", sort, s, c) + format!("{} `{}` should have a camel case name such as `{}`", sort, name, c) }; cx.span_lint(NON_CAMEL_CASE_TYPES, span, &m[..]); } @@ -98,10 +101,14 @@ impl LintPass for NonCamelCaseTypes { impl LateLintPass for NonCamelCaseTypes { fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { - let extern_repr_count = it.attrs.iter().filter(|attr| { - attr::find_repr_attrs(cx.tcx.sess.diagnostic(), attr).iter() - .any(|r| r == &attr::ReprExtern) - }).count(); + let extern_repr_count = it.attrs + .iter() + .filter(|attr| { + attr::find_repr_attrs(cx.tcx.sess.diagnostic(), attr) + .iter() + .any(|r| r == &attr::ReprExtern) + }) + .count(); let has_extern_repr = extern_repr_count > 0; if has_extern_repr { @@ -109,12 +116,10 @@ impl LateLintPass for NonCamelCaseTypes { } match it.node { - hir::ItemTy(..) | hir::ItemStruct(..) => { - self.check_case(cx, "type", it.name, it.span) - } - hir::ItemTrait(..) => { - self.check_case(cx, "trait", it.name, it.span) - } + hir::ItemTy(..) | + hir::ItemStruct(..) | + hir::ItemUnion(..) => self.check_case(cx, "type", it.name, it.span), + hir::ItemTrait(..) => self.check_case(cx, "trait", it.name, it.span), hir::ItemEnum(ref enum_definition, _) => { if has_extern_repr { return; @@ -124,7 +129,7 @@ impl LateLintPass for NonCamelCaseTypes { self.check_case(cx, "variant", variant.node.name, variant.span); } } - _ => () + _ => (), } } @@ -163,9 +168,7 @@ impl NonSnakeCase { continue; } for ch in s.chars() { - if !buf.is_empty() && buf != "'" - && ch.is_uppercase() - && !last_upper { + if !buf.is_empty() && buf != "'" && ch.is_uppercase() && !last_upper { words.push(buf); buf = String::new(); } @@ -203,10 +206,11 @@ impl NonSnakeCase { let sc = NonSnakeCase::to_snake_case(name); let msg = if sc != name { format!("{} `{}` should have a snake case name such as `{}`", - sort, name, sc) + sort, + name, + sc) } else { - format!("{} `{}` should have a snake case name", - sort, name) + format!("{} `{}` should have a snake case name", sort, name) }; match span { Some(span) => cx.span_lint(NON_SNAKE_CASE, span, &msg), @@ -224,32 +228,40 @@ impl LintPass for NonSnakeCase { impl LateLintPass for NonSnakeCase { fn check_crate(&mut self, cx: &LateContext, cr: &hir::Crate) { - let attr_crate_name = cr.attrs.iter().find(|at| at.check_name("crate_name")) - .and_then(|at| at.value_str().map(|s| (at, s))); + let attr_crate_name = cr.attrs + .iter() + .find(|at| at.check_name("crate_name")) + .and_then(|at| at.value_str().map(|s| (at, s))); if let Some(ref name) = cx.tcx.sess.opts.crate_name { self.check_snake_case(cx, "crate", name, None); - } else if let Some((attr, ref name)) = attr_crate_name { - self.check_snake_case(cx, "crate", name, Some(attr.span)); + } else if let Some((attr, name)) = attr_crate_name { + self.check_snake_case(cx, "crate", &name.as_str(), Some(attr.span)); } } - fn check_fn(&mut self, cx: &LateContext, - fk: FnKind, _: &hir::FnDecl, - _: &hir::Block, span: Span, id: ast::NodeId) { + fn check_fn(&mut self, + cx: &LateContext, + fk: FnKind, + _: &hir::FnDecl, + _: &hir::Expr, + span: Span, + id: ast::NodeId) { match fk { - FnKind::Method(name, _, _) => match method_context(cx, id, span) { - MethodLateContext::PlainImpl => { - self.check_snake_case(cx, "method", &name.as_str(), Some(span)) - }, - MethodLateContext::TraitDefaultImpl => { - self.check_snake_case(cx, "trait method", &name.as_str(), Some(span)) - }, - _ => (), - }, - FnKind::ItemFn(name, _, _, _, _, _) => { + FnKind::Method(name, ..) => { + match method_context(cx, id, span) { + MethodLateContext::PlainImpl => { + self.check_snake_case(cx, "method", &name.as_str(), Some(span)) + } + MethodLateContext::TraitDefaultImpl => { + self.check_snake_case(cx, "trait method", &name.as_str(), Some(span)) + } + _ => (), + } + } + FnKind::ItemFn(name, ..) => { self.check_snake_case(cx, "function", &name.as_str(), Some(span)) - }, - _ => (), + } + FnKind::Closure(_) => (), } } @@ -261,32 +273,42 @@ impl LateLintPass for NonSnakeCase { fn check_trait_item(&mut self, cx: &LateContext, trait_item: &hir::TraitItem) { if let hir::MethodTraitItem(_, None) = trait_item.node { - self.check_snake_case(cx, "trait method", &trait_item.name.as_str(), + self.check_snake_case(cx, + "trait method", + &trait_item.name.as_str(), Some(trait_item.span)); } } fn check_lifetime_def(&mut self, cx: &LateContext, t: &hir::LifetimeDef) { - self.check_snake_case(cx, "lifetime", &t.lifetime.name.as_str(), + self.check_snake_case(cx, + "lifetime", + &t.lifetime.name.as_str(), Some(t.lifetime.span)); } fn check_pat(&mut self, cx: &LateContext, p: &hir::Pat) { - if let &hir::PatIdent(_, ref path1, _) = &p.node { - let def = cx.tcx.def_map.borrow().get(&p.id).map(|d| d.full_def()); - if let Some(def::DefLocal(..)) = def { - self.check_snake_case(cx, "variable", &path1.node.name.as_str(), Some(p.span)); + // Exclude parameter names from foreign functions + let parent_node = cx.tcx.map.get_parent_node(p.id); + if let hir::map::NodeForeignItem(item) = cx.tcx.map.get(parent_node) { + if let hir::ForeignItemFn(..) = item.node { + return; } } + + if let &PatKind::Binding(_, _, ref path1, _) = &p.node { + self.check_snake_case(cx, "variable", &path1.node.as_str(), Some(p.span)); + } } - fn check_struct_def(&mut self, cx: &LateContext, s: &hir::VariantData, - _: ast::Name, _: &hir::Generics, _: ast::NodeId) { + fn check_struct_def(&mut self, + cx: &LateContext, + s: &hir::VariantData, + _: ast::Name, + _: &hir::Generics, + _: ast::NodeId) { for sf in s.fields() { - if let hir::StructField_ { kind: hir::NamedField(name, _), .. } = sf.node { - self.check_snake_case(cx, "structure field", &name.as_str(), - Some(sf.span)); - } + self.check_snake_case(cx, "structure field", &sf.name.as_str(), Some(sf.span)); } } } @@ -302,18 +324,19 @@ pub struct NonUpperCaseGlobals; impl NonUpperCaseGlobals { fn check_upper_case(cx: &LateContext, sort: &str, name: ast::Name, span: Span) { - let s = name.as_str(); - - if s.chars().any(|c| c.is_lowercase()) { - let uc = NonSnakeCase::to_snake_case(&s).to_uppercase(); - if uc != &s[..] { - cx.span_lint(NON_UPPER_CASE_GLOBALS, span, - &format!("{} `{}` should have an upper case name such as `{}`", - sort, s, uc)); + if name.as_str().chars().any(|c| c.is_lowercase()) { + let uc = NonSnakeCase::to_snake_case(&name.as_str()).to_uppercase(); + if name != &*uc { + cx.span_lint(NON_UPPER_CASE_GLOBALS, + span, + &format!("{} `{}` should have an upper case name such as `{}`", + sort, + name, + uc)); } else { - cx.span_lint(NON_UPPER_CASE_GLOBALS, span, - &format!("{} `{}` should have an upper case name", - sort, s)); + cx.span_lint(NON_UPPER_CASE_GLOBALS, + span, + &format!("{} `{}` should have an upper case name", sort, name)); } } } @@ -328,9 +351,8 @@ impl LintPass for NonUpperCaseGlobals { impl LateLintPass for NonUpperCaseGlobals { fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { match it.node { - // only check static constants - hir::ItemStatic(_, hir::MutImmutable, _) => { - NonUpperCaseGlobals::check_upper_case(cx, "static constant", it.name, it.span); + hir::ItemStatic(..) => { + NonUpperCaseGlobals::check_upper_case(cx, "static variable", it.name, it.span); } hir::ItemConst(..) => { NonUpperCaseGlobals::check_upper_case(cx, "constant", it.name, it.span); @@ -342,8 +364,7 @@ impl LateLintPass for NonUpperCaseGlobals { fn check_trait_item(&mut self, cx: &LateContext, ti: &hir::TraitItem) { match ti.node { hir::ConstTraitItem(..) => { - NonUpperCaseGlobals::check_upper_case(cx, "associated constant", - ti.name, ti.span); + NonUpperCaseGlobals::check_upper_case(cx, "associated constant", ti.name, ti.span); } _ => {} } @@ -352,8 +373,7 @@ impl LateLintPass for NonUpperCaseGlobals { fn check_impl_item(&mut self, cx: &LateContext, ii: &hir::ImplItem) { match ii.node { hir::ImplItemKind::Const(..) => { - NonUpperCaseGlobals::check_upper_case(cx, "associated constant", - ii.name, ii.span); + NonUpperCaseGlobals::check_upper_case(cx, "associated constant", ii.name, ii.span); } _ => {} } @@ -361,12 +381,15 @@ impl LateLintPass for NonUpperCaseGlobals { fn check_pat(&mut self, cx: &LateContext, p: &hir::Pat) { // Lint for constants that look like binding identifiers (#7526) - match (&p.node, cx.tcx.def_map.borrow().get(&p.id).map(|d| d.full_def())) { - (&hir::PatIdent(_, ref path1, _), Some(def::DefConst(..))) => { - NonUpperCaseGlobals::check_upper_case(cx, "constant in pattern", - path1.node.name, p.span); + if let PatKind::Path(hir::QPath::Resolved(None, ref path)) = p.node { + if !path.global && path.segments.len() == 1 && path.segments[0].parameters.is_empty() { + if let Def::Const(..) = path.def { + NonUpperCaseGlobals::check_upper_case(cx, + "constant in pattern", + path.segments[0].name, + path.span); + } } - _ => {} } } } diff --git a/src/librustc_lint/builtin.rs b/src/librustc_lint/builtin.rs index 8985b1e56bc00..f14fa7d4fdc23 100644 --- a/src/librustc_lint/builtin.rs +++ b/src/librustc_lint/builtin.rs @@ -28,25 +28,26 @@ //! Use the former for unit-like structs and the latter for structs with //! a `pub fn new()`. -use middle::{cfg, def, infer, stability, traits}; -use middle::cstore::CrateStore; -use middle::def_id::DefId; -use middle::subst::Substs; -use middle::ty::{self, Ty}; -use middle::ty::adjustment; -use rustc::front::map as hir_map; -use util::nodemap::{NodeSet}; -use lint::{Level, LateContext, LintContext, LintArray, Lint}; -use lint::{LintPass, LateLintPass}; +use rustc::hir::def::Def; +use rustc::hir::def_id::DefId; +use rustc::cfg; +use rustc::ty::subst::Substs; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::traits::{self, Reveal}; +use rustc::hir::map as hir_map; +use util::nodemap::NodeSet; +use lint::{Level, LateContext, LintContext, LintArray}; +use lint::{LintPass, LateLintPass, EarlyLintPass, EarlyContext}; use std::collections::HashSet; -use syntax::{ast}; -use syntax::attr::{self, AttrMetaMethods}; -use syntax::codemap::{self, Span}; +use syntax::ast; +use syntax::attr; +use syntax::feature_gate::{AttributeGate, AttributeType, Stability, deprecated_attributes}; +use syntax_pos::Span; -use rustc_front::hir; -use rustc_front::intravisit::FnKind; +use rustc::hir::{self, PatKind}; +use rustc::hir::intravisit::FnKind; use bad_style::{MethodLateContext, method_context}; @@ -70,10 +71,11 @@ impl LintPass for WhileTrue { impl LateLintPass for WhileTrue { fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) { - if let hir::ExprWhile(ref cond, _, _) = e.node { + if let hir::ExprWhile(ref cond, ..) = e.node { if let hir::ExprLit(ref lit) = cond.node { - if let ast::LitBool(true) = lit.node { - cx.span_lint(WHILE_TRUE, e.span, + if let ast::LitKind::Bool(true) = lit.node { + cx.span_lint(WHILE_TRUE, + e.span, "denote infinite loops with loop { ... }"); } } @@ -91,8 +93,7 @@ declare_lint! { pub struct BoxPointers; impl BoxPointers { - fn check_heap_type<'a, 'tcx>(&self, cx: &LateContext<'a, 'tcx>, - span: Span, ty: Ty<'tcx>) { + fn check_heap_type<'a, 'tcx>(&self, cx: &LateContext<'a, 'tcx>, span: Span, ty: Ty<'tcx>) { for leaf_ty in ty.walk() { if let ty::TyBox(_) = leaf_ty.sty { let m = format!("type uses owned (Box type) pointers: {}", ty); @@ -114,26 +115,30 @@ impl LateLintPass for BoxPointers { hir::ItemFn(..) | hir::ItemTy(..) | hir::ItemEnum(..) | - hir::ItemStruct(..) => - self.check_heap_type(cx, it.span, - cx.tcx.node_id_to_type(it.id)), + hir::ItemStruct(..) | + hir::ItemUnion(..) => { + let def_id = cx.tcx.map.local_def_id(it.id); + self.check_heap_type(cx, it.span, cx.tcx.item_type(def_id)) + } _ => () } // If it's a struct, we also have to check the fields' types match it.node { - hir::ItemStruct(ref struct_def, _) => { + hir::ItemStruct(ref struct_def, _) | + hir::ItemUnion(ref struct_def, _) => { for struct_field in struct_def.fields() { + let def_id = cx.tcx.map.local_def_id(struct_field.id); self.check_heap_type(cx, struct_field.span, - cx.tcx.node_id_to_type(struct_field.node.id)); + cx.tcx.item_type(def_id)); } } - _ => () + _ => (), } } fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) { - let ty = cx.tcx.node_id_to_type(e.id); + let ty = cx.tcx.tables().node_id_to_type(e.id); self.check_heap_type(cx, e.span, ty); } } @@ -155,25 +160,18 @@ impl LintPass for NonShorthandFieldPatterns { impl LateLintPass for NonShorthandFieldPatterns { fn check_pat(&mut self, cx: &LateContext, pat: &hir::Pat) { - let def_map = cx.tcx.def_map.borrow(); - if let hir::PatStruct(_, ref v, _) = pat.node { - let field_pats = v.iter().filter(|fieldpat| { + if let PatKind::Struct(_, ref field_pats, _) = pat.node { + for fieldpat in field_pats { if fieldpat.node.is_shorthand { - return false; - } - let def = def_map.get(&fieldpat.node.pat.id).map(|d| d.full_def()); - if let Some(def_id) = cx.tcx.map.opt_local_def_id(fieldpat.node.pat.id) { - def == Some(def::DefLocal(def_id, fieldpat.node.pat.id)) - } else { - false + continue; } - }); - for fieldpat in field_pats { - if let hir::PatIdent(_, ident, None) = fieldpat.node.pat.node { - if ident.node.unhygienic_name == fieldpat.node.name { - cx.span_lint(NON_SHORTHAND_FIELD_PATTERNS, fieldpat.span, + if let PatKind::Binding(_, _, ident, None) = fieldpat.node.pat.node { + if ident.node == fieldpat.node.name { + cx.span_lint(NON_SHORTHAND_FIELD_PATTERNS, + fieldpat.span, &format!("the `{}:` in this pattern is redundant and can \ - be removed", ident.node)) + be removed", + ident.node)) } } } @@ -208,27 +206,35 @@ impl LateLintPass for UnsafeCode { fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { match it.node { - hir::ItemTrait(hir::Unsafety::Unsafe, _, _, _) => - cx.span_lint(UNSAFE_CODE, it.span, "declaration of an `unsafe` trait"), + hir::ItemTrait(hir::Unsafety::Unsafe, ..) => { + cx.span_lint(UNSAFE_CODE, it.span, "declaration of an `unsafe` trait") + } - hir::ItemImpl(hir::Unsafety::Unsafe, _, _, _, _, _) => - cx.span_lint(UNSAFE_CODE, it.span, "implementation of an `unsafe` trait"), + hir::ItemImpl(hir::Unsafety::Unsafe, ..) => { + cx.span_lint(UNSAFE_CODE, it.span, "implementation of an `unsafe` trait") + } _ => return, } } - fn check_fn(&mut self, cx: &LateContext, fk: FnKind, _: &hir::FnDecl, - _: &hir::Block, span: Span, _: ast::NodeId) { + fn check_fn(&mut self, + cx: &LateContext, + fk: FnKind, + _: &hir::FnDecl, + _: &hir::Expr, + span: Span, + _: ast::NodeId) { match fk { - FnKind::ItemFn(_, _, hir::Unsafety::Unsafe, _, _, _) => - cx.span_lint(UNSAFE_CODE, span, "declaration of an `unsafe` function"), + FnKind::ItemFn(_, _, hir::Unsafety::Unsafe, ..) => { + cx.span_lint(UNSAFE_CODE, span, "declaration of an `unsafe` function") + } - FnKind::Method(_, sig, _) => { + FnKind::Method(_, sig, ..) => { if sig.unsafety == hir::Unsafety::Unsafe { cx.span_lint(UNSAFE_CODE, span, "implementation of an `unsafe` method") } - }, + } _ => (), } @@ -237,7 +243,8 @@ impl LateLintPass for UnsafeCode { fn check_trait_item(&mut self, cx: &LateContext, trait_item: &hir::TraitItem) { if let hir::MethodTraitItem(ref sig, None) = trait_item.node { if sig.unsafety == hir::Unsafety::Unsafe { - cx.span_lint(UNSAFE_CODE, trait_item.span, + cx.span_lint(UNSAFE_CODE, + trait_item.span, "declaration of an `unsafe` method") } } @@ -268,9 +275,9 @@ pub struct MissingDoc { impl MissingDoc { pub fn new() -> MissingDoc { MissingDoc { - struct_def_stack: vec!(), + struct_def_stack: vec![], in_variant: false, - doc_hidden_stack: vec!(false), + doc_hidden_stack: vec![false], private_traits: HashSet::new(), } } @@ -280,11 +287,11 @@ impl MissingDoc { } fn check_missing_docs_attrs(&self, - cx: &LateContext, - id: Option, - attrs: &[ast::Attribute], - sp: Span, - desc: &'static str) { + cx: &LateContext, + id: Option, + attrs: &[ast::Attribute], + sp: Span, + desc: &'static str) { // If we're building a test harness, then warning about // documentation is probably not really relevant right now. if cx.sess().opts.test { @@ -305,14 +312,10 @@ impl MissingDoc { } } - let has_doc = attrs.iter().any(|a| { - match a.node.value.node { - ast::MetaNameValue(ref name, _) if *name == "doc" => true, - _ => false - } - }); + let has_doc = attrs.iter().any(|a| a.is_value_str() && a.name() == "doc"); if !has_doc { - cx.span_lint(MISSING_DOCS, sp, + cx.span_lint(MISSING_DOCS, + sp, &format!("missing documentation for {}", desc)); } } @@ -326,10 +329,12 @@ impl LintPass for MissingDoc { impl LateLintPass for MissingDoc { fn enter_lint_attrs(&mut self, _: &LateContext, attrs: &[ast::Attribute]) { - let doc_hidden = self.doc_hidden() || attrs.iter().any(|attr| { - attr.check_name("doc") && match attr.meta_item_list() { + let doc_hidden = self.doc_hidden() || + attrs.iter().any(|attr| { + attr.check_name("doc") && + match attr.meta_item_list() { None => false, - Some(l) => attr::contains_name(&l[..], "hidden"), + Some(l) => attr::list_contains_name(&l[..], "hidden"), } }); self.doc_hidden_stack.push(doc_hidden); @@ -339,13 +344,21 @@ impl LateLintPass for MissingDoc { self.doc_hidden_stack.pop().expect("empty doc_hidden_stack"); } - fn check_struct_def(&mut self, _: &LateContext, _: &hir::VariantData, - _: ast::Name, _: &hir::Generics, item_id: ast::NodeId) { + fn check_struct_def(&mut self, + _: &LateContext, + _: &hir::VariantData, + _: ast::Name, + _: &hir::Generics, + item_id: ast::NodeId) { self.struct_def_stack.push(item_id); } - fn check_struct_def_post(&mut self, _: &LateContext, _: &hir::VariantData, - _: ast::Name, _: &hir::Generics, item_id: ast::NodeId) { + fn check_struct_def_post(&mut self, + _: &LateContext, + _: &hir::VariantData, + _: ast::Name, + _: &hir::Generics, + item_id: ast::NodeId) { let popped = self.struct_def_stack.pop().expect("empty struct_def_stack"); assert!(popped == item_id); } @@ -360,44 +373,49 @@ impl LateLintPass for MissingDoc { hir::ItemMod(..) => "a module", hir::ItemEnum(..) => "an enum", hir::ItemStruct(..) => "a struct", - hir::ItemTrait(_, _, _, ref items) => { + hir::ItemUnion(..) => "a union", + hir::ItemTrait(.., ref items) => { // Issue #11592, traits are always considered exported, even when private. if it.vis == hir::Visibility::Inherited { self.private_traits.insert(it.id); for itm in items { self.private_traits.insert(itm.id); } - return + return; } "a trait" - }, + } hir::ItemTy(..) => "a type alias", - hir::ItemImpl(_, _, _, Some(ref trait_ref), _, ref impl_items) => { + hir::ItemImpl(.., Some(ref trait_ref), _, ref impl_item_refs) => { // If the trait is private, add the impl items to private_traits so they don't get // reported for missing docs. - let real_trait = cx.tcx.trait_ref_to_def_id(trait_ref); + let real_trait = trait_ref.path.def.def_id(); if let Some(node_id) = cx.tcx.map.as_local_node_id(real_trait) { match cx.tcx.map.find(node_id) { - Some(hir_map::NodeItem(item)) => if item.vis == hir::Visibility::Inherited { - for itm in impl_items { - self.private_traits.insert(itm.id); + Some(hir_map::NodeItem(item)) => { + if item.vis == hir::Visibility::Inherited { + for impl_item_ref in impl_item_refs { + self.private_traits.insert(impl_item_ref.id.node_id); + } } - }, - _ => { } + } + _ => {} } } - return - }, + return; + } hir::ItemConst(..) => "a constant", hir::ItemStatic(..) => "a static", - _ => return + _ => return, }; self.check_missing_docs_attrs(cx, Some(it.id), &it.attrs, it.span, desc); } fn check_trait_item(&mut self, cx: &LateContext, trait_item: &hir::TraitItem) { - if self.private_traits.contains(&trait_item.id) { return } + if self.private_traits.contains(&trait_item.id) { + return; + } let desc = match trait_item.node { hir::ConstTraitItem(..) => "an associated constant", @@ -405,9 +423,11 @@ impl LateLintPass for MissingDoc { hir::TypeTraitItem(..) => "an associated type", }; - self.check_missing_docs_attrs(cx, Some(trait_item.id), + self.check_missing_docs_attrs(cx, + Some(trait_item.id), &trait_item.attrs, - trait_item.span, desc); + trait_item.span, + desc); } fn check_impl_item(&mut self, cx: &LateContext, impl_item: &hir::ImplItem) { @@ -421,26 +441,34 @@ impl LateLintPass for MissingDoc { hir::ImplItemKind::Method(..) => "a method", hir::ImplItemKind::Type(_) => "an associated type", }; - self.check_missing_docs_attrs(cx, Some(impl_item.id), + self.check_missing_docs_attrs(cx, + Some(impl_item.id), &impl_item.attrs, - impl_item.span, desc); + impl_item.span, + desc); } fn check_struct_field(&mut self, cx: &LateContext, sf: &hir::StructField) { - if let hir::NamedField(_, vis) = sf.node.kind { - if vis == hir::Public || self.in_variant { - let cur_struct_def = *self.struct_def_stack.last() + if !sf.is_positional() { + if sf.vis == hir::Public || self.in_variant { + let cur_struct_def = *self.struct_def_stack + .last() .expect("empty struct_def_stack"); - self.check_missing_docs_attrs(cx, Some(cur_struct_def), - &sf.node.attrs, sf.span, + self.check_missing_docs_attrs(cx, + Some(cur_struct_def), + &sf.attrs, + sf.span, "a struct field") } } } fn check_variant(&mut self, cx: &LateContext, v: &hir::Variant, _: &hir::Generics) { - self.check_missing_docs_attrs(cx, Some(v.node.data.id()), - &v.node.attrs, v.span, "a variant"); + self.check_missing_docs_attrs(cx, + Some(v.node.data.id()), + &v.node.attrs, + v.span, + "a variant"); assert!(!self.in_variant); self.in_variant = true; } @@ -477,27 +505,34 @@ impl LateLintPass for MissingCopyImplementations { return; } let def = cx.tcx.lookup_adt_def(cx.tcx.map.local_def_id(item.id)); - (def, cx.tcx.mk_struct(def, - cx.tcx.mk_substs(Substs::empty()))) + (def, cx.tcx.mk_adt(def, cx.tcx.intern_substs(&[]))) + } + hir::ItemUnion(_, ref ast_generics) => { + if ast_generics.is_parameterized() { + return; + } + let def = cx.tcx.lookup_adt_def(cx.tcx.map.local_def_id(item.id)); + (def, cx.tcx.mk_adt(def, cx.tcx.intern_substs(&[]))) } hir::ItemEnum(_, ref ast_generics) => { if ast_generics.is_parameterized() { return; } let def = cx.tcx.lookup_adt_def(cx.tcx.map.local_def_id(item.id)); - (def, cx.tcx.mk_enum(def, - cx.tcx.mk_substs(Substs::empty()))) + (def, cx.tcx.mk_adt(def, cx.tcx.intern_substs(&[]))) } _ => return, }; - if def.has_dtor() { return; } + if def.has_dtor() { + return; + } let parameter_environment = cx.tcx.empty_parameter_environment(); // FIXME (@jroesch) should probably inver this so that the parameter env still impls this // method - if !ty.moves_by_default(¶meter_environment, item.span) { + if !ty.moves_by_default(cx.tcx, ¶meter_environment, item.span) { return; } - if parameter_environment.can_type_implement_copy(ty, item.span).is_ok() { + if parameter_environment.can_type_implement_copy(cx.tcx, ty, item.span).is_ok() { cx.span_lint(MISSING_COPY_IMPLEMENTATIONS, item.span, "type could implement `Copy`; consider adding `impl \ @@ -518,9 +553,7 @@ pub struct MissingDebugImplementations { impl MissingDebugImplementations { pub fn new() -> MissingDebugImplementations { - MissingDebugImplementations { - impling_types: None, - } + MissingDebugImplementations { impling_types: None } } } @@ -537,7 +570,9 @@ impl LateLintPass for MissingDebugImplementations { } match item.node { - hir::ItemStruct(..) | hir::ItemEnum(..) => {}, + hir::ItemStruct(..) | + hir::ItemUnion(..) | + hir::ItemEnum(..) => {} _ => return, } @@ -550,11 +585,9 @@ impl LateLintPass for MissingDebugImplementations { let debug_def = cx.tcx.lookup_trait_def(debug); let mut impls = NodeSet(); debug_def.for_each_impl(cx.tcx, |d| { - if let Some(n) = cx.tcx.map.as_local_node_id(d) { - if let Some(ty_def) = cx.tcx.node_id_to_type(n).ty_to_def_id() { - if let Some(node_id) = cx.tcx.map.as_local_node_id(ty_def) { - impls.insert(node_id); - } + if let Some(ty_def) = cx.tcx.item_type(d).ty_to_def_id() { + if let Some(node_id) = cx.tcx.map.as_local_node_id(ty_def) { + impls.insert(node_id); } } }); @@ -573,73 +606,50 @@ impl LateLintPass for MissingDebugImplementations { } declare_lint! { - DEPRECATED, + DEPRECATED_ATTR, Warn, - "detects use of deprecated items" + "detects use of deprecated attributes" } -/// Checks for use of items with `#[deprecated]` or `#[rustc_deprecated]` attributes -#[derive(Copy, Clone)] -pub struct Deprecated; - -impl Deprecated { - fn lint(&self, cx: &LateContext, _id: DefId, span: Span, - stability: &Option<&attr::Stability>, deprecation: &Option) { - // Deprecated attributes apply in-crate and cross-crate. - if let Some(&attr::Stability{rustc_depr: Some(attr::RustcDeprecation{ref reason, ..}), ..}) - = *stability { - output(cx, DEPRECATED, span, Some(&reason)) - } else if let Some(attr::Deprecation{ref note, ..}) = *deprecation { - output(cx, DEPRECATED, span, note.as_ref().map(|x| &**x)) - } - - fn output(cx: &LateContext, lint: &'static Lint, span: Span, note: Option<&str>) { - let msg = if let Some(note) = note { - format!("use of deprecated item: {}", note) - } else { - format!("use of deprecated item") - }; +/// Checks for use of attributes which have been deprecated. +#[derive(Clone)] +pub struct DeprecatedAttr { + // This is not free to compute, so we want to keep it around, rather than + // compute it for every attribute. + depr_attrs: Vec<&'static (&'static str, AttributeType, AttributeGate)>, +} - cx.span_lint(lint, span, &msg); +impl DeprecatedAttr { + pub fn new() -> DeprecatedAttr { + DeprecatedAttr { + depr_attrs: deprecated_attributes(), } } } -impl LintPass for Deprecated { +impl LintPass for DeprecatedAttr { fn get_lints(&self) -> LintArray { - lint_array!(DEPRECATED) + lint_array!(DEPRECATED_ATTR) } } -impl LateLintPass for Deprecated { - fn check_item(&mut self, cx: &LateContext, item: &hir::Item) { - stability::check_item(cx.tcx, item, false, - &mut |id, sp, stab, depr| - self.lint(cx, id, sp, &stab, &depr)); - } - - fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) { - stability::check_expr(cx.tcx, e, - &mut |id, sp, stab, depr| - self.lint(cx, id, sp, &stab, &depr)); - } - - fn check_path(&mut self, cx: &LateContext, path: &hir::Path, id: ast::NodeId) { - stability::check_path(cx.tcx, path, id, - &mut |id, sp, stab, depr| - self.lint(cx, id, sp, &stab, &depr)); - } - - fn check_path_list_item(&mut self, cx: &LateContext, item: &hir::PathListItem) { - stability::check_path_list_item(cx.tcx, item, - &mut |id, sp, stab, depr| - self.lint(cx, id, sp, &stab, &depr)); - } - - fn check_pat(&mut self, cx: &LateContext, pat: &hir::Pat) { - stability::check_pat(cx.tcx, pat, - &mut |id, sp, stab, depr| - self.lint(cx, id, sp, &stab, &depr)); +impl EarlyLintPass for DeprecatedAttr { + fn check_attribute(&mut self, cx: &EarlyContext, attr: &ast::Attribute) { + let name = attr.name(); + for &&(n, _, ref g) in &self.depr_attrs { + if name == n { + if let &AttributeGate::Gated(Stability::Deprecated(link), + ref name, + ref reason, + _) = g { + cx.span_lint(DEPRECATED, + attr.span, + &format!("use of deprecated attribute `{}`: {}. See {}", + name, reason, link)); + } + return; + } + } } } @@ -660,15 +670,20 @@ impl LintPass for UnconditionalRecursion { } impl LateLintPass for UnconditionalRecursion { - fn check_fn(&mut self, cx: &LateContext, fn_kind: FnKind, _: &hir::FnDecl, - blk: &hir::Block, sp: Span, id: ast::NodeId) { + fn check_fn(&mut self, + cx: &LateContext, + fn_kind: FnKind, + _: &hir::FnDecl, + blk: &hir::Expr, + sp: Span, + id: ast::NodeId) { let method = match fn_kind { FnKind::ItemFn(..) => None, FnKind::Method(..) => { - cx.tcx.impl_or_trait_item(cx.tcx.map.local_def_id(id)).as_opt_method() + Some(cx.tcx.associated_item(cx.tcx.map.local_def_id(id))) } // closures can't recur, so they don't matter. - FnKind::Closure => return + FnKind::Closure(_) => return, }; // Walk through this function (say `f`) looking to see if @@ -723,10 +738,8 @@ impl LateLintPass for UnconditionalRecursion { // is this a recursive call? let self_recursive = if node_id != ast::DUMMY_NODE_ID { match method { - Some(ref method) => { - expr_refers_to_this_method(cx.tcx, method, node_id) - } - None => expr_refers_to_this_fn(cx.tcx, id, node_id) + Some(ref method) => expr_refers_to_this_method(cx.tcx, method, node_id), + None => expr_refers_to_this_fn(cx.tcx, id, node_id), } } else { false @@ -752,7 +765,8 @@ impl LateLintPass for UnconditionalRecursion { // no break */ }`) shouldn't be linted unless it actually // recurs. if !reached_exit_without_self_call && !self_call_spans.is_empty() { - let mut db = cx.struct_span_lint(UNCONDITIONAL_RECURSION, sp, + let mut db = cx.struct_span_lint(UNCONDITIONAL_RECURSION, + sp, "function cannot return without recurring"); // FIXME #19668: these could be span_lint_note's instead of this manual guard. @@ -761,8 +775,8 @@ impl LateLintPass for UnconditionalRecursion { for call in &self_call_spans { db.span_note(*call, "recursive call site"); } - db.fileline_help(sp, "a `loop` may express intention \ - better if this is on purpose"); + db.help("a `loop` may express intention \ + better if this is on purpose"); } db.emit(); } @@ -773,27 +787,29 @@ impl LateLintPass for UnconditionalRecursion { // Functions for identifying if the given Expr NodeId `id` // represents a call to the function `fn_id`/method `method`. - fn expr_refers_to_this_fn(tcx: &ty::ctxt, - fn_id: ast::NodeId, - id: ast::NodeId) -> bool { + fn expr_refers_to_this_fn(tcx: TyCtxt, fn_id: ast::NodeId, id: ast::NodeId) -> bool { match tcx.map.get(id) { hir_map::NodeExpr(&hir::Expr { node: hir::ExprCall(ref callee, _), .. }) => { - tcx.def_map - .borrow() - .get(&callee.id) - .map_or(false, - |def| def.def_id() == tcx.map.local_def_id(fn_id)) + let def = if let hir::ExprPath(ref qpath) = callee.node { + tcx.tables().qpath_def(qpath, callee.id) + } else { + return false; + }; + def.def_id() == tcx.map.local_def_id(fn_id) } - _ => false + _ => false, } } // Check if the expression `id` performs a call to `method`. - fn expr_refers_to_this_method(tcx: &ty::ctxt, - method: &ty::Method, - id: ast::NodeId) -> bool { + fn expr_refers_to_this_method<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + method: &ty::AssociatedItem, + id: ast::NodeId) + -> bool { + use rustc::ty::adjustment::*; + // Check for method calls and overloaded operators. - let opt_m = tcx.tables.borrow().method_map.get(&ty::MethodCall::expr(id)).cloned(); + let opt_m = tcx.tables().method_map.get(&ty::MethodCall::expr(id)).cloned(); if let Some(m) = opt_m { if method_call_refers_to_method(tcx, method, m.def_id, m.substs, id) { return true; @@ -801,13 +817,12 @@ impl LateLintPass for UnconditionalRecursion { } // Check for overloaded autoderef method calls. - let opt_adj = tcx.tables.borrow().adjustments.get(&id).cloned(); - if let Some(adjustment::AdjustDerefRef(adj)) = opt_adj { - for i in 0..adj.autoderefs { + let opt_adj = tcx.tables().adjustments.get(&id).cloned(); + if let Some(Adjustment { kind: Adjust::DerefRef { autoderefs, .. }, .. }) = opt_adj { + for i in 0..autoderefs { let method_call = ty::MethodCall::autoderef(id, i as u32); - if let Some(m) = tcx.tables.borrow().method_map - .get(&method_call) - .cloned() { + if let Some(m) = tcx.tables().method_map.get(&method_call) + .cloned() { if method_call_refers_to_method(tcx, method, m.def_id, m.substs, id) { return true; } @@ -818,45 +833,44 @@ impl LateLintPass for UnconditionalRecursion { // Check for calls to methods via explicit paths (e.g. `T::method()`). match tcx.map.get(id) { hir_map::NodeExpr(&hir::Expr { node: hir::ExprCall(ref callee, _), .. }) => { - match tcx.def_map.borrow().get(&callee.id).map(|d| d.full_def()) { - Some(def::DefMethod(def_id)) => { - let item_substs = - tcx.tables.borrow().item_substs - .get(&callee.id) - .cloned() - .unwrap_or_else(|| ty::ItemSubsts::empty()); + let def = if let hir::ExprPath(ref qpath) = callee.node { + tcx.tables().qpath_def(qpath, callee.id) + } else { + return false; + }; + match def { + Def::Method(def_id) => { + let substs = tcx.tables().node_id_item_substs(callee.id) + .unwrap_or_else(|| tcx.intern_substs(&[])); method_call_refers_to_method( - tcx, method, def_id, &item_substs.substs, id) + tcx, method, def_id, substs, id) } - _ => false + _ => false, } } - _ => false + _ => false, } } // Check if the method call to the method with the ID `callee_id` // and instantiated with `callee_substs` refers to method `method`. - fn method_call_refers_to_method<'tcx>(tcx: &ty::ctxt<'tcx>, - method: &ty::Method, - callee_id: DefId, - callee_substs: &Substs<'tcx>, - expr_id: ast::NodeId) -> bool { - let callee_item = tcx.impl_or_trait_item(callee_id); - - match callee_item.container() { + fn method_call_refers_to_method<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + method: &ty::AssociatedItem, + callee_id: DefId, + callee_substs: &Substs<'tcx>, + expr_id: ast::NodeId) + -> bool { + let callee_item = tcx.associated_item(callee_id); + + match callee_item.container { // This is an inherent method, so the `def_id` refers // directly to the method definition. - ty::ImplContainer(_) => { - callee_id == method.def_id - } + ty::ImplContainer(_) => callee_id == method.def_id, // A trait method, from any number of possible sources. // Attempt to select a concrete impl before checking. ty::TraitContainer(trait_def_id) => { - let trait_substs = callee_substs.clone().method_to_trait(); - let trait_substs = tcx.mk_substs(trait_substs); - let trait_ref = ty::TraitRef::new(trait_def_id, trait_substs); + let trait_ref = ty::TraitRef::from_method(tcx, trait_def_id, callee_substs); let trait_ref = ty::Binder(trait_ref); let span = tcx.map.span(expr_id); let obligation = @@ -868,36 +882,35 @@ impl LateLintPass for UnconditionalRecursion { // checking, so it's always local let node_id = tcx.map.as_local_node_id(method.def_id).unwrap(); - let param_env = ty::ParameterEnvironment::for_item(tcx, node_id); - let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(param_env)); - let mut selcx = traits::SelectionContext::new(&infcx); - match selcx.select(&obligation) { - // The method comes from a `T: Trait` bound. - // If `T` is `Self`, then this call is inside - // a default method definition. - Ok(Some(traits::VtableParam(_))) => { - let self_ty = callee_substs.self_ty(); - let on_self = self_ty.map_or(false, |t| t.is_self()); - // We can only be recurring in a default - // method if we're being called literally - // on the `Self` type. - on_self && callee_id == method.def_id - } + let param_env = Some(ty::ParameterEnvironment::for_item(tcx, node_id)); + tcx.infer_ctxt(None, param_env, Reveal::NotSpecializable).enter(|infcx| { + let mut selcx = traits::SelectionContext::new(&infcx); + match selcx.select(&obligation) { + // The method comes from a `T: Trait` bound. + // If `T` is `Self`, then this call is inside + // a default method definition. + Ok(Some(traits::VtableParam(_))) => { + let on_self = trait_ref.self_ty().is_self(); + // We can only be recurring in a default + // method if we're being called literally + // on the `Self` type. + on_self && callee_id == method.def_id + } - // The `impl` is known, so we check that with a - // special case: - Ok(Some(traits::VtableImpl(vtable_impl))) => { - let container = ty::ImplContainer(vtable_impl.impl_def_id); - // It matches if it comes from the same impl, - // and has the same method name. - container == method.container - && callee_item.name() == method.name - } + // The `impl` is known, so we check that with a + // special case: + Ok(Some(traits::VtableImpl(vtable_impl))) => { + let container = ty::ImplContainer(vtable_impl.impl_def_id); + // It matches if it comes from the same impl, + // and has the same method name. + container == method.container && callee_item.name == method.name + } - // There's no way to know if this call is - // recursive, so we assume it's not. - _ => return false - } + // There's no way to know if this call is + // recursive, so we assume it's not. + _ => false, + } + }) } } } @@ -942,7 +955,8 @@ impl LateLintPass for PluginAsLibrary { }; if prfn.is_some() { - cx.span_lint(PLUGIN_AS_LIBRARY, it.span, + cx.span_lint(PLUGIN_AS_LIBRARY, + it.span, "compiler plugin used as an ordinary library"); } } @@ -987,7 +1001,7 @@ impl LintPass for InvalidNoMangleItems { impl LateLintPass for InvalidNoMangleItems { fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { match it.node { - hir::ItemFn(_, _, _, _, ref generics, _) => { + hir::ItemFn(.., ref generics, _) => { if attr::contains_name(&it.attrs, "no_mangle") { if !cx.access_levels.is_reachable(it.id) { let msg = format!("function {} is marked #[no_mangle], but not exported", @@ -1000,15 +1014,15 @@ impl LateLintPass for InvalidNoMangleItems { "generic functions must be mangled"); } } - }, + } hir::ItemStatic(..) => { if attr::contains_name(&it.attrs, "no_mangle") && - !cx.access_levels.is_reachable(it.id) { + !cx.access_levels.is_reachable(it.id) { let msg = format!("static {} is marked #[no_mangle], but not exported", it.name); cx.span_lint(PRIVATE_NO_MANGLE_STATICS, it.span, &msg); } - }, + } hir::ItemConst(..) => { if attr::contains_name(&it.attrs, "no_mangle") { // Const items do not refer to a particular location in memory, and therefore @@ -1018,7 +1032,7 @@ impl LateLintPass for InvalidNoMangleItems { cx.span_lint(NO_MANGLE_CONST_ITEMS, it.span, msg); } } - _ => {}, + _ => {} } } } @@ -1040,53 +1054,52 @@ impl LintPass for MutableTransmutes { impl LateLintPass for MutableTransmutes { fn check_expr(&mut self, cx: &LateContext, expr: &hir::Expr) { - use syntax::abi::RustIntrinsic; + use syntax::abi::Abi::RustIntrinsic; - let msg = "mutating transmuted &mut T from &T may cause undefined behavior,\ + let msg = "mutating transmuted &mut T from &T may cause undefined behavior, \ consider instead using an UnsafeCell"; match get_transmute_from_to(cx, expr) { Some((&ty::TyRef(_, from_mt), &ty::TyRef(_, to_mt))) => { - if to_mt.mutbl == hir::Mutability::MutMutable - && from_mt.mutbl == hir::Mutability::MutImmutable { + if to_mt.mutbl == hir::Mutability::MutMutable && + from_mt.mutbl == hir::Mutability::MutImmutable { cx.span_lint(MUTABLE_TRANSMUTES, expr.span, msg); } } - _ => () + _ => (), } - fn get_transmute_from_to<'a, 'tcx>(cx: &LateContext<'a, 'tcx>, expr: &hir::Expr) - -> Option<(&'tcx ty::TypeVariants<'tcx>, &'tcx ty::TypeVariants<'tcx>)> { - match expr.node { - hir::ExprPath(..) => (), - _ => return None - } - if let def::DefFn(did, _) = cx.tcx.resolve_expr(expr) { + fn get_transmute_from_to<'a, 'tcx> + (cx: &LateContext<'a, 'tcx>, + expr: &hir::Expr) + -> Option<(&'tcx ty::TypeVariants<'tcx>, &'tcx ty::TypeVariants<'tcx>)> { + let def = if let hir::ExprPath(ref qpath) = expr.node { + cx.tcx.tables().qpath_def(qpath, expr.id) + } else { + return None; + }; + if let Def::Fn(did) = def { if !def_id_is_transmute(cx, did) { return None; } - let typ = cx.tcx.node_id_to_type(expr.id); + let typ = cx.tcx.tables().node_id_to_type(expr.id); match typ.sty { - ty::TyBareFn(_, ref bare_fn) if bare_fn.abi == RustIntrinsic => { - if let ty::FnConverging(to) = bare_fn.sig.0.output { - let from = bare_fn.sig.0.inputs[0]; - return Some((&from.sty, &to.sty)); - } - }, - _ => () + ty::TyFnDef(.., ref bare_fn) if bare_fn.abi == RustIntrinsic => { + let from = bare_fn.sig.0.inputs[0]; + let to = bare_fn.sig.0.output; + return Some((&from.sty, &to.sty)); + } + _ => (), } } None } fn def_id_is_transmute(cx: &LateContext, def_id: DefId) -> bool { - match cx.tcx.lookup_item_type(def_id).ty.sty { - ty::TyBareFn(_, ref bfty) if bfty.abi == RustIntrinsic => (), - _ => return false + match cx.tcx.item_type(def_id).sty { + ty::TyFnDef(.., ref bfty) if bfty.abi == RustIntrinsic => (), + _ => return false, } - cx.tcx.with_path(def_id, |path| match path.last() { - Some(ref last) => last.name().as_str() == "transmute", - _ => false - }) + cx.tcx.item_name(def_id) == "transmute" } } } @@ -1109,65 +1122,45 @@ impl LintPass for UnstableFeatures { impl LateLintPass for UnstableFeatures { fn check_attribute(&mut self, ctx: &LateContext, attr: &ast::Attribute) { - if attr::contains_name(&[attr.node.value.clone()], "feature") { - if let Some(items) = attr.node.value.meta_item_list() { + if attr.meta().check_name("feature") { + if let Some(items) = attr.meta().meta_item_list() { for item in items { - ctx.span_lint(UNSTABLE_FEATURES, item.span, "unstable feature"); + ctx.span_lint(UNSTABLE_FEATURES, item.span(), "unstable feature"); } } } } } -/// Lints for attempts to impl Drop on types that have `#[repr(C)]` -/// attribute (see issue #24585). -#[derive(Copy, Clone)] -pub struct DropWithReprExtern; +/// Lint for unions that contain fields with possibly non-trivial destructors. +pub struct UnionsWithDropFields; declare_lint! { - DROP_WITH_REPR_EXTERN, + UNIONS_WITH_DROP_FIELDS, Warn, - "use of #[repr(C)] on a type that implements Drop" + "use of unions that contain fields with possibly non-trivial drop code" } -impl LintPass for DropWithReprExtern { +impl LintPass for UnionsWithDropFields { fn get_lints(&self) -> LintArray { - lint_array!(DROP_WITH_REPR_EXTERN) + lint_array!(UNIONS_WITH_DROP_FIELDS) } } -impl LateLintPass for DropWithReprExtern { - fn check_crate(&mut self, ctx: &LateContext, _: &hir::Crate) { - let drop_trait = match ctx.tcx.lang_items.drop_trait() { - Some(id) => ctx.tcx.lookup_trait_def(id), None => { return } - }; - drop_trait.for_each_impl(ctx.tcx, |drop_impl_did| { - if !drop_impl_did.is_local() { - return; - } - let dtor_self_type = ctx.tcx.lookup_item_type(drop_impl_did).ty; - - match dtor_self_type.sty { - ty::TyEnum(self_type_def, _) | - ty::TyStruct(self_type_def, _) => { - let self_type_did = self_type_def.did; - let hints = ctx.tcx.lookup_repr_hints(self_type_did); - if hints.iter().any(|attr| *attr == attr::ReprExtern) && - self_type_def.dtor_kind().has_drop_flag() { - let drop_impl_span = ctx.tcx.map.def_id_span(drop_impl_did, - codemap::DUMMY_SP); - let self_defn_span = ctx.tcx.map.def_id_span(self_type_did, - codemap::DUMMY_SP); - ctx.span_lint_note(DROP_WITH_REPR_EXTERN, - drop_impl_span, - "implementing Drop adds hidden state to types, \ - possibly conflicting with `#[repr(C)]`", - self_defn_span, - "the `#[repr(C)]` attribute is attached here"); - } +impl LateLintPass for UnionsWithDropFields { + fn check_item(&mut self, ctx: &LateContext, item: &hir::Item) { + if let hir::ItemUnion(ref vdata, _) = item.node { + let param_env = &ty::ParameterEnvironment::for_item(ctx.tcx, item.id); + for field in vdata.fields() { + let field_ty = ctx.tcx.item_type(ctx.tcx.map.local_def_id(field.id)); + if ctx.tcx.type_needs_drop_given_env(field_ty, param_env) { + ctx.span_lint(UNIONS_WITH_DROP_FIELDS, + field.span, + "union contains a field with possibly non-trivial drop code, \ + drop code of union fields is ignored when dropping the union"); + return; } - _ => {} } - }) + } } } diff --git a/src/librustc_lint/lib.rs b/src/librustc_lint/lib.rs index 78252c491ecfe..6c9a3e99a0458 100644 --- a/src/librustc_lint/lib.rs +++ b/src/librustc_lint/lib.rs @@ -26,6 +26,7 @@ #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] #![cfg_attr(test, feature(test))] #![feature(box_patterns)] @@ -35,7 +36,6 @@ #![feature(rustc_private)] #![feature(slice_patterns)] #![feature(staged_api)] -#![feature(str_char)] #[macro_use] extern crate syntax; @@ -43,13 +43,14 @@ extern crate syntax; extern crate rustc; #[macro_use] extern crate log; -extern crate rustc_front; extern crate rustc_back; +extern crate rustc_const_eval; +extern crate syntax_pos; -pub use rustc::lint as lint; -pub use rustc::middle as middle; -pub use rustc::session as session; -pub use rustc::util as util; +pub use rustc::lint; +pub use rustc::middle; +pub use rustc::session; +pub use rustc::util; use session::Session; use lint::LintId; @@ -93,6 +94,14 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { ) } + macro_rules! add_early_builtin_with_new { + ($sess:ident, $($name:ident),*,) => ( + {$( + store.register_early_pass($sess, false, box $name::new()); + )*} + ) + } + macro_rules! add_lint_group { ($sess:ident, $name:expr, $($lint:ident),*) => ( store.register_group($sess, false, $name, vec![$(LintId::of($lint)),*]); @@ -101,12 +110,18 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { add_early_builtin!(sess, UnusedParens, + UnusedImportBraces, ); + add_early_builtin_with_new!(sess, + DeprecatedAttr, + ); + add_builtin!(sess, HardwiredLints, WhileTrue, ImproperCTypes, + VariantSizeDifferences, BoxPointers, UnusedAttributes, PathStatements, @@ -114,7 +129,6 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { NonCamelCaseTypes, NonSnakeCase, NonUpperCaseGlobals, - UnusedImportBraces, NonShorthandFieldPatterns, UnusedUnsafe, UnsafeCode, @@ -122,12 +136,11 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { UnusedAllocation, MissingCopyImplementations, UnstableFeatures, - Deprecated, UnconditionalRecursion, InvalidNoMangleItems, PluginAsLibrary, - DropWithReprExtern, MutableTransmutes, + UnionsWithDropFields, ); add_builtin_with_new!(sess, @@ -136,13 +149,24 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { MissingDebugImplementations, ); - add_lint_group!(sess, "bad_style", - NON_CAMEL_CASE_TYPES, NON_SNAKE_CASE, NON_UPPER_CASE_GLOBALS); - - add_lint_group!(sess, "unused", - UNUSED_IMPORTS, UNUSED_VARIABLES, UNUSED_ASSIGNMENTS, DEAD_CODE, - UNUSED_MUT, UNREACHABLE_CODE, UNUSED_MUST_USE, - UNUSED_UNSAFE, PATH_STATEMENTS, UNUSED_ATTRIBUTES); + add_lint_group!(sess, + "bad_style", + NON_CAMEL_CASE_TYPES, + NON_SNAKE_CASE, + NON_UPPER_CASE_GLOBALS); + + add_lint_group!(sess, + "unused", + UNUSED_IMPORTS, + UNUSED_VARIABLES, + UNUSED_ASSIGNMENTS, + DEAD_CODE, + UNUSED_MUT, + UNREACHABLE_CODE, + UNUSED_MUST_USE, + UNUSED_UNSAFE, + PATH_STATEMENTS, + UNUSED_ATTRIBUTES); // Guidelines for creating a future incompatibility lint: // @@ -152,31 +176,75 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { // and include the full URL. // - Later, change lint to error // - Eventually, remove lint - store.register_future_incompatible(sess, vec![ + store.register_future_incompatible(sess, + vec![ FutureIncompatibleInfo { id: LintId::of(PRIVATE_IN_PUBLIC), - reference: "the explanation for E0446 (`--explain E0446`)", + reference: "issue #34537 ", + }, + FutureIncompatibleInfo { + id: LintId::of(INACCESSIBLE_EXTERN_CRATE), + reference: "issue #36886 ", }, FutureIncompatibleInfo { id: LintId::of(INVALID_TYPE_PARAM_DEFAULT), - reference: "PR 30742 ", + reference: "issue #36887 ", + }, + FutureIncompatibleInfo { + id: LintId::of(SUPER_OR_SELF_IN_GLOBAL_PATH), + reference: "issue #36888 ", + }, + FutureIncompatibleInfo { + id: LintId::of(TRANSMUTE_FROM_FN_ITEM_TYPES), + reference: "issue #19925 ", + }, + FutureIncompatibleInfo { + id: LintId::of(OVERLAPPING_INHERENT_IMPLS), + reference: "issue #36889 ", }, FutureIncompatibleInfo { - id: LintId::of(MATCH_OF_UNIT_VARIANT_VIA_PAREN_DOTDOT), - reference: "RFC 218 ", + id: LintId::of(ILLEGAL_FLOATING_POINT_CONSTANT_PATTERN), + reference: "issue #36890 ", + }, + FutureIncompatibleInfo { + id: LintId::of(ILLEGAL_STRUCT_OR_ENUM_CONSTANT_PATTERN), + reference: "issue #36891 ", + }, + FutureIncompatibleInfo { + id: LintId::of(HR_LIFETIME_IN_ASSOC_TYPE), + reference: "issue #33685 ", + }, + FutureIncompatibleInfo { + id: LintId::of(LIFETIME_UNDERSCORE), + reference: "issue #36892 ", + }, + FutureIncompatibleInfo { + id: LintId::of(SAFE_EXTERN_STATICS), + reference: "issue #36247 ", + }, + FutureIncompatibleInfo { + id: LintId::of(PATTERNS_IN_FNS_WITHOUT_BODY), + reference: "issue #35203 ", + }, + FutureIncompatibleInfo { + id: LintId::of(EXTRA_REQUIREMENT_IN_IMPL), + reference: "issue #37166 ", + }, + FutureIncompatibleInfo { + id: LintId::of(LEGACY_DIRECTORY_OWNERSHIP), + reference: "issue #37872 ", }, ]); - // We have one lint pass defined specially - store.register_late_pass(sess, false, box lint::GatherNodeLevels); - // Register renamed and removed lints store.register_renamed("unknown_features", "unused_features"); - store.register_removed("unsigned_negation", "replaced by negate_unsigned feature gate"); + store.register_removed("unsigned_negation", + "replaced by negate_unsigned feature gate"); store.register_removed("negate_unsigned", "cast a signed value instead"); store.register_removed("raw_pointer_derive", "using derive with raw pointers is ok"); // This was renamed to raw_pointer_derive, which was then removed, // so it is also considered removed - store.register_removed("raw_pointer_deriving", "using derive with raw pointers is ok"); + store.register_removed("raw_pointer_deriving", + "using derive with raw pointers is ok"); + store.register_removed("drop_with_repr_extern", "drop flags have been removed"); } diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs index c3dfca44349eb..bba31c8237d18 100644 --- a/src/librustc_lint/types.rs +++ b/src/librustc_lint/types.rs @@ -10,27 +10,28 @@ #![allow(non_snake_case)] -use middle::{infer}; -use middle::def_id::DefId; -use middle::subst::Substs; -use middle::ty::{self, Ty}; -use middle::const_eval::{eval_const_expr_partial, ConstVal}; -use middle::const_eval::EvalHint::ExprTypeChecked; -use util::nodemap::{FnvHashSet}; +use rustc::hir::def_id::DefId; +use rustc::ty::subst::Substs; +use rustc::ty::{self, AdtKind, Ty, TyCtxt}; +use rustc::ty::layout::{Layout, Primitive}; +use rustc::traits::Reveal; +use middle::const_val::ConstVal; +use rustc_const_eval::eval_const_expr_partial; +use rustc_const_eval::EvalHint::ExprTypeChecked; +use util::nodemap::FxHashSet; use lint::{LateContext, LintContext, LintArray}; use lint::{LintPass, LateLintPass}; use std::cmp; use std::{i8, i16, i32, i64, u8, u16, u32, u64, f32, f64}; -use syntax::{abi, ast}; -use syntax::attr::{self, AttrMetaMethods}; -use syntax::codemap::{self, Span}; -use syntax::ast::{TyIs, TyUs, TyI8, TyU8, TyI16, TyU16, TyI32, TyU32, TyI64, TyU64}; +use syntax::ast; +use syntax::abi::Abi; +use syntax::attr; +use syntax_pos::Span; +use syntax::codemap; -use rustc_front::hir; -use rustc_front::intravisit::{self, Visitor}; -use rustc_front::util::is_shift_binop; +use rustc::hir; register_long_diagnostics! { E0519: r##" @@ -55,7 +56,6 @@ let Wrapping(x) = x; let y: usize = 1.wrapping_neg(); assert_eq!(x, y); ``` - "## } @@ -77,6 +77,12 @@ declare_lint! { "shift exceeds the type's number of bits" } +declare_lint! { + VARIANT_SIZE_DIFFERENCES, + Allow, + "detects enums with widely varying variant sizes" +} + #[derive(Copy, Clone)] pub struct TypeLimits { /// Id of the last visited negated expression @@ -85,15 +91,15 @@ pub struct TypeLimits { impl TypeLimits { pub fn new() -> TypeLimits { - TypeLimits { - negated_expr_id: !0, - } + TypeLimits { negated_expr_id: ast::DUMMY_NODE_ID } } } impl LintPass for TypeLimits { fn get_lints(&self) -> LintArray { - lint_array!(UNUSED_COMPARISONS, OVERFLOWING_LITERALS, EXCEEDING_BITSHIFTS) + lint_array!(UNUSED_COMPARISONS, + OVERFLOWING_LITERALS, + EXCEEDING_BITSHIFTS) } } @@ -103,18 +109,18 @@ impl LateLintPass for TypeLimits { hir::ExprUnary(hir::UnNeg, ref expr) => { if let hir::ExprLit(ref lit) = expr.node { match lit.node { - ast::LitInt(_, ast::UnsignedIntLit(_)) => { + ast::LitKind::Int(_, ast::LitIntType::Unsigned(_)) => { forbid_unsigned_negation(cx, e.span); - }, - ast::LitInt(_, ast::UnsuffixedIntLit(_)) => { - if let ty::TyUint(_) = cx.tcx.node_id_to_type(e.id).sty { + } + ast::LitKind::Int(_, ast::LitIntType::Unsuffixed) => { + if let ty::TyUint(_) = cx.tcx.tables().node_id_to_type(e.id).sty { forbid_unsigned_negation(cx, e.span); } - }, - _ => () + } + _ => (), } } else { - let t = cx.tcx.node_id_to_type(expr.id); + let t = cx.tcx.tables().node_id_to_type(expr.id); if let ty::TyUint(_) = t.sty { forbid_unsigned_negation(cx, e.span); } @@ -123,45 +129,54 @@ impl LateLintPass for TypeLimits { if self.negated_expr_id != e.id { self.negated_expr_id = expr.id; } - }, + } hir::ExprBinary(binop, ref l, ref r) => { - if is_comparison(binop) && !check_limits(cx.tcx, binop, &**l, &**r) { - cx.span_lint(UNUSED_COMPARISONS, e.span, + if is_comparison(binop) && !check_limits(cx.tcx, binop, &l, &r) { + cx.span_lint(UNUSED_COMPARISONS, + e.span, "comparison is useless due to type limits"); } - if is_shift_binop(binop.node) { - let opt_ty_bits = match cx.tcx.node_id_to_type(l.id).sty { + if binop.node.is_shift() { + let opt_ty_bits = match cx.tcx.tables().node_id_to_type(l.id).sty { ty::TyInt(t) => Some(int_ty_bits(t, cx.sess().target.int_type)), ty::TyUint(t) => Some(uint_ty_bits(t, cx.sess().target.uint_type)), - _ => None + _ => None, }; if let Some(bits) = opt_ty_bits { let exceeding = if let hir::ExprLit(ref lit) = r.node { - if let ast::LitInt(shift, _) = lit.node { shift >= bits } - else { false } + if let ast::LitKind::Int(shift, _) = lit.node { + shift >= bits + } else { + false + } } else { match eval_const_expr_partial(cx.tcx, &r, ExprTypeChecked, None) { - Ok(ConstVal::Int(shift)) => { shift as u64 >= bits }, - Ok(ConstVal::Uint(shift)) => { shift >= bits }, - _ => { false } + Ok(ConstVal::Integral(i)) => { + i.is_negative() || + i.to_u64() + .map(|i| i >= bits) + .unwrap_or(true) + } + _ => false, } }; if exceeding { - cx.span_lint(EXCEEDING_BITSHIFTS, e.span, + cx.span_lint(EXCEEDING_BITSHIFTS, + e.span, "bitshift exceeds the type's number of bits"); } }; } - }, + } hir::ExprLit(ref lit) => { - match cx.tcx.node_id_to_type(e.id).sty { + match cx.tcx.tables().node_id_to_type(e.id).sty { ty::TyInt(t) => { match lit.node { - ast::LitInt(v, ast::SignedIntLit(_, ast::Plus)) | - ast::LitInt(v, ast::UnsuffixedIntLit(ast::Plus)) => { - let int_type = if let ast::TyIs = t { + ast::LitKind::Int(v, ast::LitIntType::Signed(_)) | + ast::LitKind::Int(v, ast::LitIntType::Unsuffixed) => { + let int_type = if let ast::IntTy::Is = t { cx.sess().target.int_type } else { t @@ -173,182 +188,186 @@ impl LateLintPass for TypeLimits { // avoiding use of -min to prevent overflow/panic if (negative && v > max as u64 + 1) || (!negative && v > max as u64) { - cx.span_lint(OVERFLOWING_LITERALS, e.span, - &*format!("literal out of range for {:?}", t)); + cx.span_lint(OVERFLOWING_LITERALS, + e.span, + &format!("literal out of range for {:?}", t)); return; } } - _ => panic!() + _ => bug!(), }; - }, + } ty::TyUint(t) => { - let uint_type = if let ast::TyUs = t { + let uint_type = if let ast::UintTy::Us = t { cx.sess().target.uint_type } else { t }; let (min, max) = uint_ty_range(uint_type); let lit_val: u64 = match lit.node { - ast::LitByte(_v) => return, // _v is u8, within range by definition - ast::LitInt(v, _) => v, - _ => panic!() + // _v is u8, within range by definition + ast::LitKind::Byte(_v) => return, + ast::LitKind::Int(v, _) => v, + _ => bug!(), }; if lit_val < min || lit_val > max { - cx.span_lint(OVERFLOWING_LITERALS, e.span, - &*format!("literal out of range for {:?}", t)); + cx.span_lint(OVERFLOWING_LITERALS, + e.span, + &format!("literal out of range for {:?}", t)); } - }, + } ty::TyFloat(t) => { let (min, max) = float_ty_range(t); let lit_val: f64 = match lit.node { - ast::LitFloat(ref v, _) | - ast::LitFloatUnsuffixed(ref v) => { - match v.parse() { + ast::LitKind::Float(v, _) | + ast::LitKind::FloatUnsuffixed(v) => { + match v.as_str().parse() { Ok(f) => f, - Err(_) => return + Err(_) => return, } } - _ => panic!() + _ => bug!(), }; if lit_val < min || lit_val > max { - cx.span_lint(OVERFLOWING_LITERALS, e.span, - &*format!("literal out of range for {:?}", t)); + cx.span_lint(OVERFLOWING_LITERALS, + e.span, + &format!("literal out of range for {:?}", t)); } - }, - _ => () + } + _ => (), }; - }, - _ => () + } + _ => (), }; - fn is_valid(binop: hir::BinOp, v: T, - min: T, max: T) -> bool { + fn is_valid(binop: hir::BinOp, v: T, min: T, max: T) -> bool { match binop.node { - hir::BiLt => v > min && v <= max, - hir::BiLe => v >= min && v < max, - hir::BiGt => v >= min && v < max, - hir::BiGe => v > min && v <= max, + hir::BiLt => v > min && v <= max, + hir::BiLe => v >= min && v < max, + hir::BiGt => v >= min && v < max, + hir::BiGe => v > min && v <= max, hir::BiEq | hir::BiNe => v >= min && v <= max, - _ => panic!() + _ => bug!(), } } fn rev_binop(binop: hir::BinOp) -> hir::BinOp { - codemap::respan(binop.span, match binop.node { - hir::BiLt => hir::BiGt, - hir::BiLe => hir::BiGe, - hir::BiGt => hir::BiLt, - hir::BiGe => hir::BiLe, - _ => return binop - }) + codemap::respan(binop.span, + match binop.node { + hir::BiLt => hir::BiGt, + hir::BiLe => hir::BiGe, + hir::BiGt => hir::BiLt, + hir::BiGe => hir::BiLe, + _ => return binop, + }) } // for isize & usize, be conservative with the warnings, so that the // warnings are consistent between 32- and 64-bit platforms fn int_ty_range(int_ty: ast::IntTy) -> (i64, i64) { match int_ty { - ast::TyIs => (i64::MIN, i64::MAX), - ast::TyI8 => (i8::MIN as i64, i8::MAX as i64), - ast::TyI16 => (i16::MIN as i64, i16::MAX as i64), - ast::TyI32 => (i32::MIN as i64, i32::MAX as i64), - ast::TyI64 => (i64::MIN, i64::MAX) + ast::IntTy::Is => (i64::MIN, i64::MAX), + ast::IntTy::I8 => (i8::MIN as i64, i8::MAX as i64), + ast::IntTy::I16 => (i16::MIN as i64, i16::MAX as i64), + ast::IntTy::I32 => (i32::MIN as i64, i32::MAX as i64), + ast::IntTy::I64 => (i64::MIN, i64::MAX), } } fn uint_ty_range(uint_ty: ast::UintTy) -> (u64, u64) { match uint_ty { - ast::TyUs => (u64::MIN, u64::MAX), - ast::TyU8 => (u8::MIN as u64, u8::MAX as u64), - ast::TyU16 => (u16::MIN as u64, u16::MAX as u64), - ast::TyU32 => (u32::MIN as u64, u32::MAX as u64), - ast::TyU64 => (u64::MIN, u64::MAX) + ast::UintTy::Us => (u64::MIN, u64::MAX), + ast::UintTy::U8 => (u8::MIN as u64, u8::MAX as u64), + ast::UintTy::U16 => (u16::MIN as u64, u16::MAX as u64), + ast::UintTy::U32 => (u32::MIN as u64, u32::MAX as u64), + ast::UintTy::U64 => (u64::MIN, u64::MAX), } } fn float_ty_range(float_ty: ast::FloatTy) -> (f64, f64) { match float_ty { - ast::TyF32 => (f32::MIN as f64, f32::MAX as f64), - ast::TyF64 => (f64::MIN, f64::MAX) + ast::FloatTy::F32 => (f32::MIN as f64, f32::MAX as f64), + ast::FloatTy::F64 => (f64::MIN, f64::MAX), } } fn int_ty_bits(int_ty: ast::IntTy, target_int_ty: ast::IntTy) -> u64 { match int_ty { - ast::TyIs => int_ty_bits(target_int_ty, target_int_ty), - ast::TyI8 => 8, - ast::TyI16 => 16 as u64, - ast::TyI32 => 32, - ast::TyI64 => 64, + ast::IntTy::Is => int_ty_bits(target_int_ty, target_int_ty), + ast::IntTy::I8 => 8, + ast::IntTy::I16 => 16 as u64, + ast::IntTy::I32 => 32, + ast::IntTy::I64 => 64, } } fn uint_ty_bits(uint_ty: ast::UintTy, target_uint_ty: ast::UintTy) -> u64 { match uint_ty { - ast::TyUs => uint_ty_bits(target_uint_ty, target_uint_ty), - ast::TyU8 => 8, - ast::TyU16 => 16, - ast::TyU32 => 32, - ast::TyU64 => 64, + ast::UintTy::Us => uint_ty_bits(target_uint_ty, target_uint_ty), + ast::UintTy::U8 => 8, + ast::UintTy::U16 => 16, + ast::UintTy::U32 => 32, + ast::UintTy::U64 => 64, } } - fn check_limits(tcx: &ty::ctxt, binop: hir::BinOp, - l: &hir::Expr, r: &hir::Expr) -> bool { + fn check_limits<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + binop: hir::BinOp, + l: &hir::Expr, + r: &hir::Expr) + -> bool { let (lit, expr, swap) = match (&l.node, &r.node) { (&hir::ExprLit(_), _) => (l, r, true), (_, &hir::ExprLit(_)) => (r, l, false), - _ => return true + _ => return true, }; // Normalize the binop so that the literal is always on the RHS in // the comparison - let norm_binop = if swap { - rev_binop(binop) - } else { - binop - }; - match tcx.node_id_to_type(expr.id).sty { + let norm_binop = if swap { rev_binop(binop) } else { binop }; + match tcx.tables().node_id_to_type(expr.id).sty { ty::TyInt(int_ty) => { let (min, max) = int_ty_range(int_ty); let lit_val: i64 = match lit.node { - hir::ExprLit(ref li) => match li.node { - ast::LitInt(v, ast::SignedIntLit(_, ast::Plus)) | - ast::LitInt(v, ast::UnsuffixedIntLit(ast::Plus)) => v as i64, - ast::LitInt(v, ast::SignedIntLit(_, ast::Minus)) | - ast::LitInt(v, ast::UnsuffixedIntLit(ast::Minus)) => -(v as i64), - _ => return true - }, - _ => panic!() + hir::ExprLit(ref li) => { + match li.node { + ast::LitKind::Int(v, ast::LitIntType::Signed(_)) | + ast::LitKind::Int(v, ast::LitIntType::Unsuffixed) => v as i64, + _ => return true, + } + } + _ => bug!(), }; is_valid(norm_binop, lit_val, min, max) } ty::TyUint(uint_ty) => { let (min, max): (u64, u64) = uint_ty_range(uint_ty); let lit_val: u64 = match lit.node { - hir::ExprLit(ref li) => match li.node { - ast::LitInt(v, _) => v, - _ => return true - }, - _ => panic!() + hir::ExprLit(ref li) => { + match li.node { + ast::LitKind::Int(v, _) => v, + _ => return true, + } + } + _ => bug!(), }; is_valid(norm_binop, lit_val, min, max) } - _ => true + _ => true, } } fn is_comparison(binop: hir::BinOp) -> bool { match binop.node { - hir::BiEq | hir::BiLt | hir::BiLe | - hir::BiNe | hir::BiGe | hir::BiGt => true, - _ => false + hir::BiEq | hir::BiLt | hir::BiLe | hir::BiNe | hir::BiGe | hir::BiGt => true, + _ => false, } } fn forbid_unsigned_negation(cx: &LateContext, span: Span) { cx.sess() - .struct_span_err_with_code(span, "unary negation of unsigned integer", "E0519") - .span_help(span, "use a cast or the `!` operator") - .emit(); + .struct_span_err_with_code(span, "unary negation of unsigned integer", "E0519") + .span_help(span, "use a cast or the `!` operator") + .emit(); } } } @@ -360,14 +379,15 @@ declare_lint! { } struct ImproperCTypesVisitor<'a, 'tcx: 'a> { - cx: &'a LateContext<'a, 'tcx> + cx: &'a LateContext<'a, 'tcx>, } enum FfiResult { FfiSafe, FfiUnsafe(&'static str), FfiBadStruct(DefId, &'static str), - FfiBadEnum(DefId, &'static str) + FfiBadUnion(DefId, &'static str), + FfiBadEnum(DefId, &'static str), } /// Check if this enum can be safely exported based on the @@ -375,10 +395,10 @@ enum FfiResult { /// to function pointers and references, but could be /// expanded to cover NonZero raw pointers and newtypes. /// FIXME: This duplicates code in trans. -fn is_repr_nullable_ptr<'tcx>(tcx: &ty::ctxt<'tcx>, - def: ty::AdtDef<'tcx>, - substs: &Substs<'tcx>) - -> bool { +fn is_repr_nullable_ptr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + def: &'tcx ty::AdtDef, + substs: &Substs<'tcx>) + -> bool { if def.variants.len() == 2 { let data_idx; @@ -392,34 +412,25 @@ fn is_repr_nullable_ptr<'tcx>(tcx: &ty::ctxt<'tcx>, if def.variants[data_idx].fields.len() == 1 { match def.variants[data_idx].fields[0].ty(tcx, substs).sty { - ty::TyBareFn(None, _) => { return true; } - ty::TyRef(..) => { return true; } - _ => { } + ty::TyFnPtr(_) => { + return true; + } + ty::TyRef(..) => { + return true; + } + _ => {} } } } false } -fn ast_ty_to_normalized<'tcx>(tcx: &ty::ctxt<'tcx>, - id: ast::NodeId) - -> Ty<'tcx> { - let tty = match tcx.ast_ty_to_ty_cache.borrow().get(&id) { - Some(&t) => t, - None => panic!("ast_ty_to_ty_cache was incomplete after typeck!") - }; - infer::normalize_associated_type(tcx, &tty) -} - impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { /// Check if the given type is "ffi-safe" (has a stable, well-defined /// representation which can be exported to C code). - fn check_type_for_ffi(&self, - cache: &mut FnvHashSet>, - ty: Ty<'tcx>) - -> FfiResult { + fn check_type_for_ffi(&self, cache: &mut FxHashSet>, ty: Ty<'tcx>) -> FfiResult { use self::FfiResult::*; - let cx = &self.cx.tcx; + let cx = self.cx.tcx; // Protect against infinite recursion, for example // `struct S(*mut S);`. @@ -430,89 +441,117 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { } match ty.sty { - ty::TyStruct(def, substs) => { - if !cx.lookup_repr_hints(def.did).contains(&attr::ReprExtern) { - return FfiUnsafe( - "found struct without foreign-function-safe \ - representation annotation in foreign module, \ - consider adding a #[repr(C)] attribute to \ - the type"); - } + ty::TyAdt(def, substs) => { + match def.adt_kind() { + AdtKind::Struct => { + if !cx.lookup_repr_hints(def.did).contains(&attr::ReprExtern) { + return FfiUnsafe("found struct without foreign-function-safe \ + representation annotation in foreign module, \ + consider adding a #[repr(C)] attribute to the type"); + } - // We can't completely trust repr(C) markings; make sure the - // fields are actually safe. - if def.struct_variant().fields.is_empty() { - return FfiUnsafe( - "found zero-size struct in foreign module, consider \ - adding a member to this struct"); - } + // We can't completely trust repr(C) markings; make sure the + // fields are actually safe. + if def.struct_variant().fields.is_empty() { + return FfiUnsafe("found zero-size struct in foreign module, consider \ + adding a member to this struct"); + } - for field in &def.struct_variant().fields { - let field_ty = infer::normalize_associated_type(cx, &field.ty(cx, substs)); - let r = self.check_type_for_ffi(cache, field_ty); - match r { - FfiSafe => {} - FfiBadStruct(..) | FfiBadEnum(..) => { return r; } - FfiUnsafe(s) => { return FfiBadStruct(def.did, s); } + for field in &def.struct_variant().fields { + let field_ty = cx.normalize_associated_type(&field.ty(cx, substs)); + let r = self.check_type_for_ffi(cache, field_ty); + match r { + FfiSafe => {} + FfiBadStruct(..) | FfiBadUnion(..) | FfiBadEnum(..) => { + return r; + } + FfiUnsafe(s) => { + return FfiBadStruct(def.did, s); + } + } + } + FfiSafe } - } - FfiSafe - } - ty::TyEnum(def, substs) => { - if def.variants.is_empty() { - // Empty enums are okay... although sort of useless. - return FfiSafe - } + AdtKind::Union => { + if !cx.lookup_repr_hints(def.did).contains(&attr::ReprExtern) { + return FfiUnsafe("found union without foreign-function-safe \ + representation annotation in foreign module, \ + consider adding a #[repr(C)] attribute to the type"); + } - // Check for a repr() attribute to specify the size of the - // discriminant. - let repr_hints = cx.lookup_repr_hints(def.did); - match &**repr_hints { - [] => { - // Special-case types like `Option`. - if !is_repr_nullable_ptr(cx, def, substs) { - return FfiUnsafe( - "found enum without foreign-function-safe \ - representation annotation in foreign module, \ - consider adding a #[repr(...)] attribute to \ - the type") + for field in &def.struct_variant().fields { + let field_ty = cx.normalize_associated_type(&field.ty(cx, substs)); + let r = self.check_type_for_ffi(cache, field_ty); + match r { + FfiSafe => {} + FfiBadStruct(..) | FfiBadUnion(..) | FfiBadEnum(..) => { + return r; + } + FfiUnsafe(s) => { + return FfiBadUnion(def.did, s); + } + } } + FfiSafe } - [ref hint] => { - if !hint.is_ffi_safe() { - // FIXME: This shouldn't be reachable: we should check - // this earlier. - return FfiUnsafe( - "enum has unexpected #[repr(...)] attribute") + AdtKind::Enum => { + if def.variants.is_empty() { + // Empty enums are okay... although sort of useless. + return FfiSafe; } - // Enum with an explicitly sized discriminant; either - // a C-style enum or a discriminated union. + // Check for a repr() attribute to specify the size of the + // discriminant. + let repr_hints = cx.lookup_repr_hints(def.did); + match &repr_hints[..] { + &[] => { + // Special-case types like `Option`. + if !is_repr_nullable_ptr(cx, def, substs) { + return FfiUnsafe("found enum without foreign-function-safe \ + representation annotation in foreign \ + module, consider adding a #[repr(...)] \ + attribute to the type"); + } + } + &[ref hint] => { + if !hint.is_ffi_safe() { + // FIXME: This shouldn't be reachable: we should check + // this earlier. + return FfiUnsafe("enum has unexpected #[repr(...)] attribute"); + } - // The layout of enum variants is implicitly repr(C). - // FIXME: Is that correct? - } - _ => { - // FIXME: This shouldn't be reachable: we should check - // this earlier. - return FfiUnsafe( - "enum has too many #[repr(...)] attributes"); - } - } + // Enum with an explicitly sized discriminant; either + // a C-style enum or a discriminated union. - // Check the contained variants. - for variant in &def.variants { - for field in &variant.fields { - let arg = infer::normalize_associated_type(cx, &field.ty(cx, substs)); - let r = self.check_type_for_ffi(cache, arg); - match r { - FfiSafe => {} - FfiBadStruct(..) | FfiBadEnum(..) => { return r; } - FfiUnsafe(s) => { return FfiBadEnum(def.did, s); } + // The layout of enum variants is implicitly repr(C). + // FIXME: Is that correct? + } + _ => { + // FIXME: This shouldn't be reachable: we should check + // this earlier. + return FfiUnsafe("enum has too many #[repr(...)] attributes"); + } } + + // Check the contained variants. + for variant in &def.variants { + for field in &variant.fields { + let arg = cx.normalize_associated_type(&field.ty(cx, substs)); + let r = self.check_type_for_ffi(cache, arg); + match r { + FfiSafe => {} + FfiBadStruct(..) | FfiBadUnion(..) | FfiBadEnum(..) => { + return r; + } + FfiUnsafe(s) => { + return FfiBadEnum(def.did, s); + } + } + } + } + FfiSafe } } - FfiSafe } ty::TyChar => { @@ -521,8 +560,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { } // Primitive types with a stable representation. - ty::TyBool | ty::TyInt(..) | ty::TyUint(..) | - ty::TyFloat(..) => FfiSafe, + ty::TyBool | ty::TyInt(..) | ty::TyUint(..) | ty::TyFloat(..) | ty::TyNever => FfiSafe, ty::TyBox(..) => { FfiUnsafe("found Rust type Box<_> in foreign module, \ @@ -534,7 +572,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { consider using a raw pointer instead") } - ty::TyTrait(..) => { + ty::TyDynamic(..) => { FfiUnsafe("found Rust trait type in foreign module, \ consider using a raw pointer instead") } @@ -546,41 +584,31 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { ty::TyTuple(_) => { FfiUnsafe("found Rust tuple type in foreign module; \ - consider using a struct instead`") + consider using a struct instead") } - ty::TyRawPtr(ref m) | ty::TyRef(_, ref m) => { - self.check_type_for_ffi(cache, m.ty) - } + ty::TyRawPtr(ref m) | + ty::TyRef(_, ref m) => self.check_type_for_ffi(cache, m.ty), - ty::TyArray(ty, _) => { - self.check_type_for_ffi(cache, ty) - } + ty::TyArray(ty, _) => self.check_type_for_ffi(cache, ty), - ty::TyBareFn(None, bare_fn) => { + ty::TyFnPtr(bare_fn) => { match bare_fn.abi { - abi::Rust | - abi::RustIntrinsic | - abi::PlatformIntrinsic | - abi::RustCall => { - return FfiUnsafe( - "found function pointer with Rust calling \ - convention in foreign module; consider using an \ - `extern` function pointer") + Abi::Rust | Abi::RustIntrinsic | Abi::PlatformIntrinsic | Abi::RustCall => { + return FfiUnsafe("found function pointer with Rust calling convention in \ + foreign module; consider using an `extern` function \ + pointer") } _ => {} } let sig = cx.erase_late_bound_regions(&bare_fn.sig); - match sig.output { - ty::FnDiverging => {} - ty::FnConverging(output) => { - if !output.is_nil() { - let r = self.check_type_for_ffi(cache, output); - match r { - FfiSafe => {} - _ => { return r; } - } + if !sig.output.is_nil() { + let r = self.check_type_for_ffi(cache, sig.output); + match r { + FfiSafe => {} + _ => { + return r; } } } @@ -588,24 +616,30 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { let r = self.check_type_for_ffi(cache, arg); match r { FfiSafe => {} - _ => { return r; } + _ => { + return r; + } } } FfiSafe } - ty::TyParam(..) | ty::TyInfer(..) | ty::TyError | - ty::TyClosure(..) | ty::TyProjection(..) | - ty::TyBareFn(Some(_), _) => { - panic!("Unexpected type in foreign function") - } + ty::TyParam(..) | + ty::TyInfer(..) | + ty::TyError | + ty::TyClosure(..) | + ty::TyProjection(..) | + ty::TyAnon(..) | + ty::TyFnDef(..) => bug!("Unexpected type in foreign function"), } } - fn check_def(&mut self, sp: Span, id: ast::NodeId) { - let tty = ast_ty_to_normalized(self.cx.tcx, id); + fn check_type_for_ffi_and_report_errors(&mut self, sp: Span, ty: Ty<'tcx>) { + // it is only OK to use this function because extern fns cannot have + // any generic types right now: + let ty = self.cx.tcx.normalize_associated_type(&ty); - match ImproperCTypesVisitor::check_type_for_ffi(self, &mut FnvHashSet(), tty) { + match self.check_type_for_ffi(&mut FxHashSet(), ty) { FfiResult::FfiSafe => {} FfiResult::FfiUnsafe(s) => { self.cx.span_lint(IMPROPER_CTYPES, sp, s); @@ -613,40 +647,54 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { FfiResult::FfiBadStruct(_, s) => { // FIXME: This diagnostic is difficult to read, and doesn't // point at the relevant field. - self.cx.span_lint(IMPROPER_CTYPES, sp, - &format!("found non-foreign-function-safe member in \ - struct marked #[repr(C)]: {}", s)); + self.cx.span_lint(IMPROPER_CTYPES, + sp, + &format!("found non-foreign-function-safe member in struct \ + marked #[repr(C)]: {}", + s)); + } + FfiResult::FfiBadUnion(_, s) => { + // FIXME: This diagnostic is difficult to read, and doesn't + // point at the relevant field. + self.cx.span_lint(IMPROPER_CTYPES, + sp, + &format!("found non-foreign-function-safe member in union \ + marked #[repr(C)]: {}", + s)); } FfiResult::FfiBadEnum(_, s) => { // FIXME: This diagnostic is difficult to read, and doesn't // point at the relevant variant. - self.cx.span_lint(IMPROPER_CTYPES, sp, - &format!("found non-foreign-function-safe member in \ - enum: {}", s)); + self.cx.span_lint(IMPROPER_CTYPES, + sp, + &format!("found non-foreign-function-safe member in enum: {}", + s)); } } } -} -impl<'a, 'tcx, 'v> Visitor<'v> for ImproperCTypesVisitor<'a, 'tcx> { - fn visit_ty(&mut self, ty: &hir::Ty) { - match ty.node { - hir::TyPath(..) | - hir::TyBareFn(..) => self.check_def(ty.span, ty.id), - hir::TyVec(..) => { - self.cx.span_lint(IMPROPER_CTYPES, ty.span, - "found Rust slice type in foreign module, consider \ - using a raw pointer instead"); - } - hir::TyFixedLengthVec(ref ty, _) => self.visit_ty(ty), - hir::TyTup(..) => { - self.cx.span_lint(IMPROPER_CTYPES, ty.span, - "found Rust tuple type in foreign module; \ - consider using a struct instead`") + fn check_foreign_fn(&mut self, id: ast::NodeId, decl: &hir::FnDecl) { + let def_id = self.cx.tcx.map.local_def_id(id); + let sig = self.cx.tcx.item_type(def_id).fn_sig(); + let sig = self.cx.tcx.erase_late_bound_regions(&sig); + + for (&input_ty, input_hir) in sig.inputs.iter().zip(&decl.inputs) { + self.check_type_for_ffi_and_report_errors(input_hir.ty.span, &input_ty); + } + + if let hir::Return(ref ret_hir) = decl.output { + let ret_ty = sig.output; + if !ret_ty.is_nil() { + self.check_type_for_ffi_and_report_errors(ret_hir.span, ret_ty); } - _ => intravisit::walk_ty(self, ty) } } + + fn check_foreign_static(&mut self, id: ast::NodeId, span: Span) { + let def_id = self.cx.tcx.map.local_def_id(id); + let ty = self.cx.tcx.item_type(def_id); + self.check_type_for_ffi_and_report_errors(span, ty); + } } #[derive(Copy, Clone)] @@ -660,29 +708,79 @@ impl LintPass for ImproperCTypes { impl LateLintPass for ImproperCTypes { fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { - fn check_ty(cx: &LateContext, ty: &hir::Ty) { - let mut vis = ImproperCTypesVisitor { cx: cx }; - vis.visit_ty(ty); - } - - fn check_foreign_fn(cx: &LateContext, decl: &hir::FnDecl) { - for input in &decl.inputs { - check_ty(cx, &*input.ty); - } - if let hir::Return(ref ret_ty) = decl.output { - let tty = ast_ty_to_normalized(cx.tcx, ret_ty.id); - if !tty.is_nil() { - check_ty(cx, &ret_ty); + let mut vis = ImproperCTypesVisitor { cx: cx }; + if let hir::ItemForeignMod(ref nmod) = it.node { + if nmod.abi != Abi::RustIntrinsic && nmod.abi != Abi::PlatformIntrinsic { + for ni in &nmod.items { + match ni.node { + hir::ForeignItemFn(ref decl, _) => { + vis.check_foreign_fn(ni.id, decl); + } + hir::ForeignItemStatic(ref ty, _) => { + vis.check_foreign_static(ni.id, ty.span); + } + } } } } + } +} - if let hir::ItemForeignMod(ref nmod) = it.node { - if nmod.abi != abi::RustIntrinsic && nmod.abi != abi::PlatformIntrinsic { - for ni in &nmod.items { - match ni.node { - hir::ForeignItemFn(ref decl, _) => check_foreign_fn(cx, &**decl), - hir::ForeignItemStatic(ref t, _) => check_ty(cx, &**t) +pub struct VariantSizeDifferences; + +impl LintPass for VariantSizeDifferences { + fn get_lints(&self) -> LintArray { + lint_array!(VARIANT_SIZE_DIFFERENCES) + } +} + +impl LateLintPass for VariantSizeDifferences { + fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { + if let hir::ItemEnum(ref enum_definition, ref gens) = it.node { + if gens.ty_params.is_empty() { + // sizes only make sense for non-generic types + let t = cx.tcx.item_type(cx.tcx.map.local_def_id(it.id)); + let layout = cx.tcx.infer_ctxt(None, None, Reveal::All).enter(|infcx| { + let ty = cx.tcx.erase_regions(&t); + ty.layout(&infcx).unwrap_or_else(|e| { + bug!("failed to get layout for `{}`: {}", t, e) + }) + }); + + if let Layout::General { ref variants, ref size, discr, .. } = *layout { + let discr_size = Primitive::Int(discr).size(&cx.tcx.data_layout).bytes(); + + debug!("enum `{}` is {} bytes large", t, size.bytes()); + + let (largest, slargest, largest_index) = enum_definition.variants + .iter() + .zip(variants) + .map(|(variant, variant_layout)| { + // Subtract the size of the enum discriminant + let bytes = variant_layout.min_size + .bytes() + .saturating_sub(discr_size); + + debug!("- variant `{}` is {} bytes large", variant.node.name, bytes); + bytes + }) + .enumerate() + .fold((0, 0, 0), |(l, s, li), (idx, size)| if size > l { + (size, l, idx) + } else if size > s { + (l, size, li) + } else { + (l, s, li) + }); + + // we only warn if the largest variant is at least thrice as large as + // the second-largest. + if largest > slargest * 3 && slargest > 0 { + cx.span_lint(VARIANT_SIZE_DIFFERENCES, + enum_definition.variants[largest_index].span, + &format!("enum variant is more than three times larger \ + ({} bytes) than the next largest", + largest)); } } } diff --git a/src/librustc_lint/unused.rs b/src/librustc_lint/unused.rs index 18a3a96069e19..873c141065ec3 100644 --- a/src/librustc_lint/unused.rs +++ b/src/librustc_lint/unused.rs @@ -8,24 +8,24 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use middle::pat_util; -use middle::ty; -use middle::ty::adjustment; -use util::nodemap::FnvHashMap; +use rustc::ty; +use rustc::ty::adjustment; +use util::nodemap::FxHashMap; use lint::{LateContext, EarlyContext, LintContext, LintArray}; use lint::{LintPass, EarlyLintPass, LateLintPass}; use std::collections::hash_map::Entry::{Occupied, Vacant}; use syntax::ast; -use syntax::attr::{self, AttrMetaMethods}; -use syntax::codemap::Span; -use syntax::feature_gate::{KNOWN_ATTRIBUTES, AttributeType}; +use syntax::attr; +use syntax::feature_gate::{BUILTIN_ATTRIBUTES, AttributeType}; +use syntax::symbol::keywords; use syntax::ptr::P; +use syntax_pos::Span; use rustc_back::slice; -use rustc_front::hir; -use rustc_front::intravisit::FnKind; +use rustc::hir; +use rustc::hir::intravisit::FnKind; declare_lint! { pub UNUSED_MUT, @@ -41,15 +41,19 @@ impl UnusedMut { // collect all mutable pattern and group their NodeIDs by their Identifier to // avoid false warnings in match arms with multiple patterns - let mut mutables = FnvHashMap(); + let mut mutables = FxHashMap(); for p in pats { - pat_util::pat_bindings(&cx.tcx.def_map, p, |mode, id, _, path1| { + p.each_binding(|mode, id, _, path1| { let name = path1.node; if let hir::BindByValue(hir::MutMutable) = mode { if !name.as_str().starts_with("_") { - match mutables.entry(name.0 as usize) { - Vacant(entry) => { entry.insert(vec![id]); }, - Occupied(mut entry) => { entry.get_mut().push(id); }, + match mutables.entry(name) { + Vacant(entry) => { + entry.insert(vec![id]); + } + Occupied(mut entry) => { + entry.get_mut().push(id); + } } } } @@ -59,7 +63,8 @@ impl UnusedMut { let used_mutables = cx.tcx.used_mut_nodes.borrow(); for (_, v) in &mutables { if !v.iter().any(|e| used_mutables.contains(e)) { - cx.span_lint(UNUSED_MUT, cx.tcx.map.span(v[0]), + cx.span_lint(UNUSED_MUT, + cx.tcx.map.span(v[0]), "variable does not need to be mutable"); } } @@ -89,9 +94,13 @@ impl LateLintPass for UnusedMut { } } - fn check_fn(&mut self, cx: &LateContext, - _: FnKind, decl: &hir::FnDecl, - _: &hir::Block, _: Span, _: ast::NodeId) { + fn check_fn(&mut self, + cx: &LateContext, + _: FnKind, + decl: &hir::FnDecl, + _: &hir::Expr, + _: Span, + _: ast::NodeId) { for a in &decl.inputs { self.check_unused_mut_pat(cx, slice::ref_slice(&a.pat)); } @@ -123,19 +132,19 @@ impl LateLintPass for UnusedResults { fn check_stmt(&mut self, cx: &LateContext, s: &hir::Stmt) { let expr = match s.node { hir::StmtSemi(ref expr, _) => &**expr, - _ => return + _ => return, }; if let hir::ExprRet(..) = expr.node { return; } - let t = cx.tcx.expr_ty(&expr); + let t = cx.tcx.tables().expr_ty(&expr); let warned = match t.sty { ty::TyTuple(ref tys) if tys.is_empty() => return, + ty::TyNever => return, ty::TyBool => return, - ty::TyStruct(def, _) | - ty::TyEnum(def, _) => { + ty::TyAdt(def, _) => { let attrs = cx.tcx.get_attrs(def.did); check_must_use(cx, &attrs[..], s.span) } @@ -150,12 +159,9 @@ impl LateLintPass for UnusedResults { if attr.check_name("must_use") { let mut msg = "unused result which must be used".to_string(); // check for #[must_use="..."] - match attr.value_str() { - None => {} - Some(s) => { - msg.push_str(": "); - msg.push_str(&s); - } + if let Some(s) = attr.value_str() { + msg.push_str(": "); + msg.push_str(&s.as_str()); } cx.span_lint(UNUSED_MUST_USE, sp, &msg); return true; @@ -186,8 +192,8 @@ impl LateLintPass for UnusedUnsafe { if let hir::ExprBlock(ref blk) = e.node { // Don't warn about generated blocks, that'll just pollute the output. if blk.rules == hir::UnsafeBlock(hir::UserProvided) && - !cx.tcx.used_unsafe.borrow().contains(&blk.id) { - cx.span_lint(UNUSED_UNSAFE, blk.span, "unnecessary `unsafe` block"); + !cx.tcx.used_unsafe.borrow().contains(&blk.id) { + cx.span_lint(UNUSED_UNSAFE, blk.span, "unnecessary `unsafe` block"); } } } @@ -211,9 +217,8 @@ impl LintPass for PathStatements { impl LateLintPass for PathStatements { fn check_stmt(&mut self, cx: &LateContext, s: &hir::Stmt) { if let hir::StmtSemi(ref expr, _) = s.node { - if let hir::ExprPath(..) = expr.node { - cx.span_lint(PATH_STATEMENTS, s.span, - "path statement with no effect"); + if let hir::ExprPath(_) = expr.node { + cx.span_lint(PATH_STATEMENTS, s.span, "path statement with no effect"); } } } @@ -236,47 +241,52 @@ impl LintPass for UnusedAttributes { impl LateLintPass for UnusedAttributes { fn check_attribute(&mut self, cx: &LateContext, attr: &ast::Attribute) { + debug!("checking attribute: {:?}", attr); + // Note that check_name() marks the attribute as used if it matches. - for &(ref name, ty, _) in KNOWN_ATTRIBUTES { + for &(ref name, ty, _) in BUILTIN_ATTRIBUTES { match ty { AttributeType::Whitelisted if attr.check_name(name) => { + debug!("{:?} is Whitelisted", name); break; - }, - _ => () + } + _ => (), } } let plugin_attributes = cx.sess().plugin_attributes.borrow_mut(); for &(ref name, ty) in plugin_attributes.iter() { - if ty == AttributeType::Whitelisted && attr.check_name(&*name) { + if ty == AttributeType::Whitelisted && attr.check_name(&name) { + debug!("{:?} (plugin attr) is whitelisted with ty {:?}", name, ty); break; } } if !attr::is_used(attr) { + debug!("Emitting warning for: {:?}", attr); cx.span_lint(UNUSED_ATTRIBUTES, attr.span, "unused attribute"); // Is it a builtin attribute that must be used at the crate level? - let known_crate = KNOWN_ATTRIBUTES.iter().find(|&&(name, ty, _)| { - attr.name() == name && - ty == AttributeType::CrateLevel - }).is_some(); + let known_crate = BUILTIN_ATTRIBUTES.iter() + .find(|&&(name, ty, _)| attr.name() == name && ty == AttributeType::CrateLevel) + .is_some(); // Has a plugin registered this attribute as one which must be used at // the crate level? let plugin_crate = plugin_attributes.iter() - .find(|&&(ref x, t)| { - &*attr.name() == &*x && - AttributeType::CrateLevel == t - }).is_some(); - if known_crate || plugin_crate { - let msg = match attr.node.style { - ast::AttrStyle::Outer => "crate-level attribute should be an inner \ - attribute: add an exclamation mark: #![foo]", - ast::AttrStyle::Inner => "crate-level attribute should be in the \ - root module", + .find(|&&(ref x, t)| attr.name() == &**x && AttributeType::CrateLevel == t) + .is_some(); + if known_crate || plugin_crate { + let msg = match attr.style { + ast::AttrStyle::Outer => { + "crate-level attribute should be an inner attribute: add an exclamation \ + mark: #![foo]" + } + ast::AttrStyle::Inner => "crate-level attribute should be in the root module", }; cx.span_lint(UNUSED_ATTRIBUTES, attr.span, msg); } + } else { + debug!("Attr was used: {:?}", attr); } } } @@ -291,12 +301,16 @@ declare_lint! { pub struct UnusedParens; impl UnusedParens { - fn check_unused_parens_core(&self, cx: &EarlyContext, value: &ast::Expr, msg: &str, + fn check_unused_parens_core(&self, + cx: &EarlyContext, + value: &ast::Expr, + msg: &str, struct_lit_needs_parens: bool) { - if let ast::ExprParen(ref inner) = value.node { - let necessary = struct_lit_needs_parens && contains_exterior_struct_lit(&**inner); + if let ast::ExprKind::Paren(ref inner) = value.node { + let necessary = struct_lit_needs_parens && contains_exterior_struct_lit(&inner); if !necessary { - cx.span_lint(UNUSED_PARENS, value.span, + cx.span_lint(UNUSED_PARENS, + value.span, &format!("unnecessary parentheses around {}", msg)) } } @@ -308,31 +322,30 @@ impl UnusedParens { /// y: 1 }) == foo` does not. fn contains_exterior_struct_lit(value: &ast::Expr) -> bool { match value.node { - ast::ExprStruct(..) => true, + ast::ExprKind::Struct(..) => true, - ast::ExprAssign(ref lhs, ref rhs) | - ast::ExprAssignOp(_, ref lhs, ref rhs) | - ast::ExprBinary(_, ref lhs, ref rhs) => { + ast::ExprKind::Assign(ref lhs, ref rhs) | + ast::ExprKind::AssignOp(_, ref lhs, ref rhs) | + ast::ExprKind::Binary(_, ref lhs, ref rhs) => { // X { y: 1 } + X { y: 2 } - contains_exterior_struct_lit(&**lhs) || - contains_exterior_struct_lit(&**rhs) + contains_exterior_struct_lit(&lhs) || contains_exterior_struct_lit(&rhs) } - ast::ExprUnary(_, ref x) | - ast::ExprCast(ref x, _) | - ast::ExprType(ref x, _) | - ast::ExprField(ref x, _) | - ast::ExprTupField(ref x, _) | - ast::ExprIndex(ref x, _) => { + ast::ExprKind::Unary(_, ref x) | + ast::ExprKind::Cast(ref x, _) | + ast::ExprKind::Type(ref x, _) | + ast::ExprKind::Field(ref x, _) | + ast::ExprKind::TupField(ref x, _) | + ast::ExprKind::Index(ref x, _) => { // &X { y: 1 }, X { y: 1 }.y - contains_exterior_struct_lit(&**x) + contains_exterior_struct_lit(&x) } - ast::ExprMethodCall(_, _, ref exprs) => { + ast::ExprKind::MethodCall(.., ref exprs) => { // X { y: 1 }.bar(...) - contains_exterior_struct_lit(&*exprs[0]) + contains_exterior_struct_lit(&exprs[0]) } - _ => false + _ => false, } } } @@ -346,34 +359,34 @@ impl LintPass for UnusedParens { impl EarlyLintPass for UnusedParens { fn check_expr(&mut self, cx: &EarlyContext, e: &ast::Expr) { + use syntax::ast::ExprKind::*; let (value, msg, struct_lit_needs_parens) = match e.node { - ast::ExprIf(ref cond, _, _) => (cond, "`if` condition", true), - ast::ExprWhile(ref cond, _, _) => (cond, "`while` condition", true), - ast::ExprIfLet(_, ref cond, _, _) => (cond, "`if let` head expression", true), - ast::ExprWhileLet(_, ref cond, _, _) => (cond, "`while let` head expression", true), - ast::ExprForLoop(_, ref cond, _, _) => (cond, "`for` head expression", true), - ast::ExprMatch(ref head, _) => (head, "`match` head expression", true), - ast::ExprRet(Some(ref value)) => (value, "`return` value", false), - ast::ExprAssign(_, ref value) => (value, "assigned value", false), - ast::ExprAssignOp(_, _, ref value) => (value, "assigned value", false), - ast::ExprInPlace(_, ref value) => (value, "emplacement value", false), - _ => return + If(ref cond, ..) => (cond, "`if` condition", true), + While(ref cond, ..) => (cond, "`while` condition", true), + IfLet(_, ref cond, ..) => (cond, "`if let` head expression", true), + WhileLet(_, ref cond, ..) => (cond, "`while let` head expression", true), + ForLoop(_, ref cond, ..) => (cond, "`for` head expression", true), + Match(ref head, _) => (head, "`match` head expression", true), + Ret(Some(ref value)) => (value, "`return` value", false), + Assign(_, ref value) => (value, "assigned value", false), + AssignOp(.., ref value) => (value, "assigned value", false), + InPlace(_, ref value) => (value, "emplacement value", false), + _ => return, }; - self.check_unused_parens_core(cx, &**value, msg, struct_lit_needs_parens); + self.check_unused_parens_core(cx, &value, msg, struct_lit_needs_parens); } fn check_stmt(&mut self, cx: &EarlyContext, s: &ast::Stmt) { let (value, msg) = match s.node { - ast::StmtDecl(ref decl, _) => match decl.node { - ast::DeclLocal(ref local) => match local.init { + ast::StmtKind::Local(ref local) => { + match local.init { Some(ref value) => (value, "assigned value"), - None => return - }, - _ => return - }, - _ => return + None => return, + } + } + _ => return, }; - self.check_unused_parens_core(cx, &**value, msg, false); + self.check_unused_parens_core(cx, &value, msg, false); } } @@ -392,17 +405,13 @@ impl LintPass for UnusedImportBraces { } } -impl LateLintPass for UnusedImportBraces { - fn check_item(&mut self, cx: &LateContext, item: &hir::Item) { - if let hir::ItemUse(ref view_path) = item.node { - if let hir::ViewPathList(_, ref items) = view_path.node { - if items.len() == 1 { - if let hir::PathListIdent {ref name, ..} = items[0].node { - let m = format!("braces around {} is unnecessary", - name); - cx.span_lint(UNUSED_IMPORT_BRACES, item.span, - &m[..]); - } +impl EarlyLintPass for UnusedImportBraces { + fn check_item(&mut self, cx: &EarlyContext, item: &ast::Item) { + if let ast::ItemKind::Use(ref view_path) = item.node { + if let ast::ViewPathList(_, ref items) = view_path.node { + if items.len() == 1 && items[0].node.name.name != keywords::SelfValue.name() { + let msg = format!("braces around {} is unnecessary", items[0].node.name); + cx.span_lint(UNUSED_IMPORT_BRACES, item.span, &msg); } } } @@ -428,23 +437,23 @@ impl LateLintPass for UnusedAllocation { fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) { match e.node { hir::ExprBox(_) => {} - _ => return + _ => return, } - if let Some(adjustment) = cx.tcx.tables.borrow().adjustments.get(&e.id) { - if let adjustment::AdjustDerefRef(adjustment::AutoDerefRef { - ref autoref, .. - }) = *adjustment { + if let Some(adjustment) = cx.tcx.tables().adjustments.get(&e.id) { + if let adjustment::Adjust::DerefRef { autoref, .. } = adjustment.kind { match autoref { - &Some(adjustment::AutoPtr(_, hir::MutImmutable)) => { - cx.span_lint(UNUSED_ALLOCATION, e.span, + Some(adjustment::AutoBorrow::Ref(_, hir::MutImmutable)) => { + cx.span_lint(UNUSED_ALLOCATION, + e.span, "unnecessary allocation, use & instead"); } - &Some(adjustment::AutoPtr(_, hir::MutMutable)) => { - cx.span_lint(UNUSED_ALLOCATION, e.span, + Some(adjustment::AutoBorrow::Ref(_, hir::MutMutable)) => { + cx.span_lint(UNUSED_ALLOCATION, + e.span, "unnecessary allocation, use &mut instead"); } - _ => () + _ => (), } } } diff --git a/src/librustc_llvm/Cargo.lock b/src/librustc_llvm/Cargo.lock new file mode 100644 index 0000000000000..17678ef2bbd8f --- /dev/null +++ b/src/librustc_llvm/Cargo.lock @@ -0,0 +1,22 @@ +[root] +name = "rustc_llvm" +version = "0.0.0" +dependencies = [ + "build_helper 0.1.0", + "gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_bitflags 0.0.0", +] + +[[package]] +name = "build_helper" +version = "0.1.0" + +[[package]] +name = "gcc" +version = "0.3.28" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rustc_bitflags" +version = "0.0.0" + diff --git a/src/librustc_llvm/Cargo.toml b/src/librustc_llvm/Cargo.toml new file mode 100644 index 0000000000000..f97daa22ff662 --- /dev/null +++ b/src/librustc_llvm/Cargo.toml @@ -0,0 +1,20 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_llvm" +version = "0.0.0" +build = "build.rs" + +[lib] +name = "rustc_llvm" +path = "lib.rs" +crate-type = ["dylib"] + +[features] +static-libstdcpp = [] + +[dependencies] +rustc_bitflags = { path = "../librustc_bitflags" } + +[build-dependencies] +build_helper = { path = "../build_helper" } +gcc = "0.3.27" diff --git a/src/librustc_llvm/archive_ro.rs b/src/librustc_llvm/archive_ro.rs index 85c0c721114f5..b3f5f8e536052 100644 --- a/src/librustc_llvm/archive_ro.rs +++ b/src/librustc_llvm/archive_ro.rs @@ -18,7 +18,9 @@ use std::path::Path; use std::slice; use std::str; -pub struct ArchiveRO { ptr: ArchiveRef } +pub struct ArchiveRO { + ptr: ArchiveRef, +} pub struct Iter<'a> { archive: &'a ArchiveRO, @@ -61,11 +63,16 @@ impl ArchiveRO { } } - pub fn raw(&self) -> ArchiveRef { self.ptr } + pub fn raw(&self) -> ArchiveRef { + self.ptr + } pub fn iter(&self) -> Iter { unsafe { - Iter { ptr: ::LLVMRustArchiveIteratorNew(self.ptr), archive: self } + Iter { + ptr: ::LLVMRustArchiveIteratorNew(self.ptr), + archive: self, + } } } } @@ -79,14 +86,17 @@ impl Drop for ArchiveRO { } impl<'a> Iterator for Iter<'a> { - type Item = Child<'a>; + type Item = Result, String>; - fn next(&mut self) -> Option> { + fn next(&mut self) -> Option, String>> { let ptr = unsafe { ::LLVMRustArchiveIteratorNext(self.ptr) }; if ptr.is_null() { - None + ::last_error().map(Err) } else { - Some(Child { ptr: ptr, _data: marker::PhantomData }) + Some(Ok(Child { + ptr: ptr, + _data: marker::PhantomData, + })) } } } @@ -107,8 +117,7 @@ impl<'a> Child<'a> { if name_ptr.is_null() { None } else { - let name = slice::from_raw_parts(name_ptr as *const u8, - name_len as usize); + let name = slice::from_raw_parts(name_ptr as *const u8, name_len as usize); str::from_utf8(name).ok().map(|s| s.trim()) } } @@ -125,11 +134,15 @@ impl<'a> Child<'a> { } } - pub fn raw(&self) -> ::ArchiveChildRef { self.ptr } + pub fn raw(&self) -> ::ArchiveChildRef { + self.ptr + } } impl<'a> Drop for Child<'a> { fn drop(&mut self) { - unsafe { ::LLVMRustArchiveChildFree(self.ptr); } + unsafe { + ::LLVMRustArchiveChildFree(self.ptr); + } } } diff --git a/src/librustc_llvm/build.rs b/src/librustc_llvm/build.rs new file mode 100644 index 0000000000000..50bc3e7b6243f --- /dev/null +++ b/src/librustc_llvm/build.rs @@ -0,0 +1,247 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +extern crate gcc; +extern crate build_helper; + +use std::process::Command; +use std::env; +use std::path::{PathBuf, Path}; + +use build_helper::output; + +fn detect_llvm_link(llvm_config: &Path) -> (&'static str, Option<&'static str>) { + let mut version_cmd = Command::new(llvm_config); + version_cmd.arg("--version"); + let version_output = output(&mut version_cmd); + let mut parts = version_output.split('.').take(2) + .filter_map(|s| s.parse::().ok()); + if let (Some(major), Some(minor)) = (parts.next(), parts.next()) { + if major > 3 || (major == 3 && minor >= 9) { + // Force the link mode we want, preferring static by default, but + // possibly overridden by `configure --enable-llvm-link-shared`. + if env::var_os("LLVM_LINK_SHARED").is_some() { + return ("dylib", Some("--link-shared")); + } else { + return ("static", Some("--link-static")); + } + } else if major == 3 && minor == 8 { + // Find out LLVM's default linking mode. + let mut mode_cmd = Command::new(llvm_config); + mode_cmd.arg("--shared-mode"); + if output(&mut mode_cmd).trim() == "shared" { + return ("dylib", None); + } else { + return ("static", None); + } + } + } + ("static", None) +} + +fn main() { + println!("cargo:rustc-cfg=cargobuild"); + + let target = env::var("TARGET").expect("TARGET was not set"); + let llvm_config = env::var_os("LLVM_CONFIG") + .map(PathBuf::from) + .unwrap_or_else(|| { + if let Some(dir) = env::var_os("CARGO_TARGET_DIR").map(PathBuf::from) { + let to_test = dir.parent() + .unwrap() + .parent() + .unwrap() + .join(&target) + .join("llvm/bin/llvm-config"); + if Command::new(&to_test).output().is_ok() { + return to_test; + } + } + PathBuf::from("llvm-config") + }); + + println!("cargo:rerun-if-changed={}", llvm_config.display()); + + // Test whether we're cross-compiling LLVM. This is a pretty rare case + // currently where we're producing an LLVM for a different platform than + // what this build script is currently running on. + // + // In that case, there's no guarantee that we can actually run the target, + // so the build system works around this by giving us the LLVM_CONFIG for + // the host platform. This only really works if the host LLVM and target + // LLVM are compiled the same way, but for us that's typically the case. + // + // We *want* detect this cross compiling situation by asking llvm-config + // what it's host-target is. If that's not the TARGET, then we're cross + // compiling. Unfortunately `llvm-config` seems either be buggy, or we're + // misconfiguring it, because the `i686-pc-windows-gnu` build of LLVM will + // report itself with a `--host-target` of `x86_64-pc-windows-gnu`. This + // tricks us into thinking we're doing a cross build when we aren't, so + // havoc ensues. + // + // In any case, if we're cross compiling, this generally just means that we + // can't trust all the output of llvm-config becaues it might be targeted + // for the host rather than the target. As a result a bunch of blocks below + // are gated on `if !is_crossed` + let target = env::var("TARGET").expect("TARGET was not set"); + let host = env::var("HOST").expect("HOST was not set"); + let is_crossed = target != host; + + let optional_components = + ["x86", "arm", "aarch64", "mips", "powerpc", "pnacl", "systemz", "jsbackend", "msp430"]; + + // FIXME: surely we don't need all these components, right? Stuff like mcjit + // or interpreter the compiler itself never uses. + let required_components = &["ipo", + "bitreader", + "bitwriter", + "linker", + "asmparser", + "mcjit", + "interpreter", + "instrumentation"]; + + let components = output(Command::new(&llvm_config).arg("--components")); + let mut components = components.split_whitespace().collect::>(); + components.retain(|c| optional_components.contains(c) || required_components.contains(c)); + + for component in required_components { + if !components.contains(component) { + panic!("require llvm component {} but wasn't found", component); + } + } + + for component in components.iter() { + println!("cargo:rustc-cfg=llvm_component=\"{}\"", component); + } + + // Link in our own LLVM shims, compiled with the same flags as LLVM + let mut cmd = Command::new(&llvm_config); + cmd.arg("--cxxflags"); + let cxxflags = output(&mut cmd); + let mut cfg = gcc::Config::new(); + for flag in cxxflags.split_whitespace() { + // Ignore flags like `-m64` when we're doing a cross build + if is_crossed && flag.starts_with("-m") { + continue; + } + cfg.flag(flag); + } + + for component in &components[..] { + let mut flag = String::from("-DLLVM_COMPONENT_"); + flag.push_str(&component.to_uppercase()); + cfg.flag(&flag); + } + + if env::var_os("LLVM_RUSTLLVM").is_some() { + cfg.flag("-DLLVM_RUSTLLVM"); + } + + println!("cargo:rerun-if-changed=../rustllvm/PassWrapper.cpp"); + println!("cargo:rerun-if-changed=../rustllvm/RustWrapper.cpp"); + println!("cargo:rerun-if-changed=../rustllvm/ArchiveWrapper.cpp"); + cfg.file("../rustllvm/PassWrapper.cpp") + .file("../rustllvm/RustWrapper.cpp") + .file("../rustllvm/ArchiveWrapper.cpp") + .cpp(true) + .cpp_link_stdlib(None) // we handle this below + .compile("librustllvm.a"); + + let (llvm_kind, llvm_link_arg) = detect_llvm_link(&llvm_config); + + // Link in all LLVM libraries, if we're uwring the "wrong" llvm-config then + // we don't pick up system libs because unfortunately they're for the host + // of llvm-config, not the target that we're attempting to link. + let mut cmd = Command::new(&llvm_config); + cmd.arg("--libs"); + + if let Some(link_arg) = llvm_link_arg { + cmd.arg(link_arg); + } + + if !is_crossed { + cmd.arg("--system-libs"); + } + cmd.args(&components[..]); + + for lib in output(&mut cmd).split_whitespace() { + let name = if lib.starts_with("-l") { + &lib[2..] + } else if lib.starts_with("-") { + &lib[1..] + } else if Path::new(lib).exists() { + // On MSVC llvm-config will print the full name to libraries, but + // we're only interested in the name part + let name = Path::new(lib).file_name().unwrap().to_str().unwrap(); + name.trim_right_matches(".lib") + } else if lib.ends_with(".lib") { + // Some MSVC libraries just come up with `.lib` tacked on, so chop + // that off + lib.trim_right_matches(".lib") + } else { + continue; + }; + + // Don't need or want this library, but LLVM's CMake build system + // doesn't provide a way to disable it, so filter it here even though we + // may or may not have built it. We don't reference anything from this + // library and it otherwise may just pull in extra dependencies on + // libedit which we don't want + if name == "LLVMLineEditor" { + continue; + } + + let kind = if name.starts_with("LLVM") { + llvm_kind + } else { + "dylib" + }; + println!("cargo:rustc-link-lib={}={}", kind, name); + } + + // LLVM ldflags + // + // If we're a cross-compile of LLVM then unfortunately we can't trust these + // ldflags (largely where all the LLVM libs are located). Currently just + // hack around this by replacing the host triple with the target and pray + // that those -L directories are the same! + let mut cmd = Command::new(&llvm_config); + cmd.arg("--ldflags"); + for lib in output(&mut cmd).split_whitespace() { + if lib.starts_with("-LIBPATH:") { + println!("cargo:rustc-link-search=native={}", &lib[9..]); + } else if is_crossed { + if lib.starts_with("-L") { + println!("cargo:rustc-link-search=native={}", + lib[2..].replace(&host, &target)); + } + } else if lib.starts_with("-l") { + println!("cargo:rustc-link-lib={}", &lib[2..]); + } else if lib.starts_with("-L") { + println!("cargo:rustc-link-search=native={}", &lib[2..]); + } + } + + // C++ runtime library + if !target.contains("msvc") { + if let Some(s) = env::var_os("LLVM_STATIC_STDCPP") { + assert!(!cxxflags.contains("stdlib=libc++")); + let path = PathBuf::from(s); + println!("cargo:rustc-link-search=native={}", + path.parent().unwrap().display()); + println!("cargo:rustc-link-lib=static=stdc++"); + } else if cxxflags.contains("stdlib=libc++") { + println!("cargo:rustc-link-lib=c++"); + } else { + println!("cargo:rustc-link-lib=stdc++"); + } + } +} diff --git a/src/librustc_llvm/diagnostic.rs b/src/librustc_llvm/diagnostic.rs index acb47516150cf..cef6199a74af6 100644 --- a/src/librustc_llvm/diagnostic.rs +++ b/src/librustc_llvm/diagnostic.rs @@ -13,25 +13,31 @@ pub use self::OptimizationDiagnosticKind::*; pub use self::Diagnostic::*; -use libc::{c_char, c_uint}; +use libc::c_uint; use std::ptr; -use {ValueRef, TwineRef, DebugLocRef, DiagnosticInfoRef}; +use {DiagnosticInfoRef, TwineRef, ValueRef}; +use ffi::DebugLocRef; #[derive(Copy, Clone)] pub enum OptimizationDiagnosticKind { OptimizationRemark, OptimizationMissed, OptimizationAnalysis, + OptimizationAnalysisFPCommute, + OptimizationAnalysisAliasing, OptimizationFailure, + OptimizationRemarkOther, } impl OptimizationDiagnosticKind { pub fn describe(self) -> &'static str { match self { - OptimizationRemark => "remark", + OptimizationRemark | OptimizationRemarkOther => "remark", OptimizationMissed => "missed", OptimizationAnalysis => "analysis", + OptimizationAnalysisFPCommute => "floating-point", + OptimizationAnalysisAliasing => "aliasing", OptimizationFailure => "failure", } } @@ -39,31 +45,37 @@ impl OptimizationDiagnosticKind { pub struct OptimizationDiagnostic { pub kind: OptimizationDiagnosticKind, - pub pass_name: *const c_char, + pub pass_name: String, pub function: ValueRef, pub debug_loc: DebugLocRef, - pub message: TwineRef, + pub message: String, } impl OptimizationDiagnostic { - unsafe fn unpack(kind: OptimizationDiagnosticKind, di: DiagnosticInfoRef) - -> OptimizationDiagnostic { - - let mut opt = OptimizationDiagnostic { + unsafe fn unpack(kind: OptimizationDiagnosticKind, + di: DiagnosticInfoRef) + -> OptimizationDiagnostic { + let mut function = ptr::null_mut(); + let mut debug_loc = ptr::null_mut(); + + let mut message = None; + let pass_name = super::build_string(|pass_name| + message = super::build_string(|message| + super::LLVMRustUnpackOptimizationDiagnostic(di, + pass_name, + &mut function, + &mut debug_loc, + message) + ) + ); + + OptimizationDiagnostic { kind: kind, - pass_name: ptr::null(), - function: ptr::null_mut(), - debug_loc: ptr::null_mut(), - message: ptr::null_mut(), - }; - - super::LLVMUnpackOptimizationDiagnostic(di, - &mut opt.pass_name, - &mut opt.function, - &mut opt.debug_loc, - &mut opt.message); - - opt + pass_name: pass_name.expect("got a non-UTF8 pass name from LLVM"), + function: function, + debug_loc: debug_loc, + message: message.expect("got a non-UTF8 OptimizationDiagnostic message from LLVM") + } } } @@ -75,8 +87,7 @@ pub struct InlineAsmDiagnostic { } impl InlineAsmDiagnostic { - unsafe fn unpack(di: DiagnosticInfoRef) - -> InlineAsmDiagnostic { + unsafe fn unpack(di: DiagnosticInfoRef) -> InlineAsmDiagnostic { let mut opt = InlineAsmDiagnostic { cookie: 0, @@ -84,10 +95,10 @@ impl InlineAsmDiagnostic { instruction: ptr::null_mut(), }; - super::LLVMUnpackInlineAsmDiagnostic(di, - &mut opt.cookie, - &mut opt.message, - &mut opt.instruction); + super::LLVMRustUnpackInlineAsmDiagnostic(di, + &mut opt.cookie, + &mut opt.message, + &mut opt.instruction); opt } @@ -103,25 +114,39 @@ pub enum Diagnostic { impl Diagnostic { pub unsafe fn unpack(di: DiagnosticInfoRef) -> Diagnostic { - let kind = super::LLVMGetDiagInfoKind(di); + use super::DiagnosticKind as Dk; + let kind = super::LLVMRustGetDiagInfoKind(di); match kind { - super::DK_InlineAsm - => InlineAsm(InlineAsmDiagnostic::unpack(di)), - - super::DK_OptimizationRemark - => Optimization(OptimizationDiagnostic::unpack(OptimizationRemark, di)), - - super::DK_OptimizationRemarkMissed - => Optimization(OptimizationDiagnostic::unpack(OptimizationMissed, di)), - - super::DK_OptimizationRemarkAnalysis - => Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysis, di)), - - super::DK_OptimizationFailure - => Optimization(OptimizationDiagnostic::unpack(OptimizationFailure, di)), - - _ => UnknownDiagnostic(di) + Dk::InlineAsm => InlineAsm(InlineAsmDiagnostic::unpack(di)), + + Dk::OptimizationRemark => { + Optimization(OptimizationDiagnostic::unpack(OptimizationRemark, di)) + } + Dk::OptimizationRemarkOther => { + Optimization(OptimizationDiagnostic::unpack(OptimizationRemarkOther, di)) + } + Dk::OptimizationRemarkMissed => { + Optimization(OptimizationDiagnostic::unpack(OptimizationMissed, di)) + } + + Dk::OptimizationRemarkAnalysis => { + Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysis, di)) + } + + Dk::OptimizationRemarkAnalysisFPCommute => { + Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysisFPCommute, di)) + } + + Dk::OptimizationRemarkAnalysisAliasing => { + Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysisAliasing, di)) + } + + Dk::OptimizationFailure => { + Optimization(OptimizationDiagnostic::unpack(OptimizationFailure, di)) + } + + _ => UnknownDiagnostic(di), } } } diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs new file mode 100644 index 0000000000000..b8f1540ad84d6 --- /dev/null +++ b/src/librustc_llvm/ffi.rs @@ -0,0 +1,1653 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use debuginfo::{DIBuilderRef, DIDescriptor, DIFile, DILexicalBlock, DISubprogram, DIType, + DIBasicType, DIDerivedType, DICompositeType, DIScope, DIVariable, + DIGlobalVariable, DIArray, DISubrange, DITemplateTypeParameter, DIEnumerator, + DINameSpace, DIFlags}; + +use libc::{c_uint, c_int, size_t, c_char}; +use libc::{c_longlong, c_ulonglong, c_void}; + +use RustStringRef; + +pub type Opcode = u32; +pub type Bool = c_uint; + +pub const True: Bool = 1 as Bool; +pub const False: Bool = 0 as Bool; + +#[derive(Copy, Clone, PartialEq)] +#[repr(C)] +pub enum LLVMRustResult { + Success, + Failure, +} +// Consts for the LLVM CallConv type, pre-cast to usize. + +/// LLVM CallingConv::ID. Should we wrap this? +#[derive(Copy, Clone, PartialEq)] +#[repr(C)] +pub enum CallConv { + CCallConv = 0, + FastCallConv = 8, + ColdCallConv = 9, + X86StdcallCallConv = 64, + X86FastcallCallConv = 65, + ArmAapcsCallConv = 67, + X86_64_SysV = 78, + X86_64_Win64 = 79, + X86_VectorCall = 80, +} + +/// LLVMRustLinkage +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[repr(C)] +pub enum Linkage { + ExternalLinkage = 0, + AvailableExternallyLinkage = 1, + LinkOnceAnyLinkage = 2, + LinkOnceODRLinkage = 3, + WeakAnyLinkage = 4, + WeakODRLinkage = 5, + AppendingLinkage = 6, + InternalLinkage = 7, + PrivateLinkage = 8, + ExternalWeakLinkage = 9, + CommonLinkage = 10, +} + +/// LLVMDiagnosticSeverity +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub enum DiagnosticSeverity { + Error = 0, + Warning = 1, + Remark = 2, + Note = 3, +} + +/// LLVMDLLStorageClass +#[derive(Copy, Clone)] +#[repr(C)] +pub enum DLLStorageClass { + Default = 0, + DllImport = 1, // Function to be imported from DLL. + DllExport = 2, // Function to be accessible from DLL. +} + +/// Matches LLVMRustAttribute in rustllvm.h +/// Semantically a subset of the C++ enum llvm::Attribute::AttrKind, +/// though it is not ABI compatible (since it's a C++ enum) +#[repr(C)] +#[derive(Copy, Clone, Debug)] +pub enum Attribute { + AlwaysInline = 0, + ByVal = 1, + Cold = 2, + InlineHint = 3, + MinSize = 4, + Naked = 5, + NoAlias = 6, + NoCapture = 7, + NoInline = 8, + NonNull = 9, + NoRedZone = 10, + NoReturn = 11, + NoUnwind = 12, + OptimizeForSize = 13, + ReadOnly = 14, + SExt = 15, + StructRet = 16, + UWTable = 17, + ZExt = 18, +} + +/// LLVMIntPredicate +#[derive(Copy, Clone)] +#[repr(C)] +pub enum IntPredicate { + IntEQ = 32, + IntNE = 33, + IntUGT = 34, + IntUGE = 35, + IntULT = 36, + IntULE = 37, + IntSGT = 38, + IntSGE = 39, + IntSLT = 40, + IntSLE = 41, +} + +/// LLVMRealPredicate +#[derive(Copy, Clone)] +#[repr(C)] +pub enum RealPredicate { + RealPredicateFalse = 0, + RealOEQ = 1, + RealOGT = 2, + RealOGE = 3, + RealOLT = 4, + RealOLE = 5, + RealONE = 6, + RealORD = 7, + RealUNO = 8, + RealUEQ = 9, + RealUGT = 10, + RealUGE = 11, + RealULT = 12, + RealULE = 13, + RealUNE = 14, + RealPredicateTrue = 15, +} + +/// LLVMTypeKind +#[derive(Copy, Clone, PartialEq, Debug)] +#[repr(C)] +pub enum TypeKind { + Void = 0, + Half = 1, + Float = 2, + Double = 3, + X86_FP80 = 4, + FP128 = 5, + PPC_FP128 = 6, + Label = 7, + Integer = 8, + Function = 9, + Struct = 10, + Array = 11, + Pointer = 12, + Vector = 13, + Metadata = 14, + X86_MMX = 15, + Token = 16, +} + +/// LLVMAtomicRmwBinOp +#[derive(Copy, Clone)] +#[repr(C)] +pub enum AtomicRmwBinOp { + AtomicXchg = 0, + AtomicAdd = 1, + AtomicSub = 2, + AtomicAnd = 3, + AtomicNand = 4, + AtomicOr = 5, + AtomicXor = 6, + AtomicMax = 7, + AtomicMin = 8, + AtomicUMax = 9, + AtomicUMin = 10, +} + +/// LLVMAtomicOrdering +#[derive(Copy, Clone)] +#[repr(C)] +pub enum AtomicOrdering { + NotAtomic = 0, + Unordered = 1, + Monotonic = 2, + // Consume = 3, // Not specified yet. + Acquire = 4, + Release = 5, + AcquireRelease = 6, + SequentiallyConsistent = 7, +} + +/// LLVMRustSynchronizationScope +#[derive(Copy, Clone)] +#[repr(C)] +pub enum SynchronizationScope { + Other, + SingleThread, + CrossThread, +} + +/// LLVMRustFileType +#[derive(Copy, Clone)] +#[repr(C)] +pub enum FileType { + Other, + AssemblyFile, + ObjectFile, +} + +/// LLVMMetadataType +#[derive(Copy, Clone)] +#[repr(C)] +pub enum MetadataType { + MD_dbg = 0, + MD_tbaa = 1, + MD_prof = 2, + MD_fpmath = 3, + MD_range = 4, + MD_tbaa_struct = 5, + MD_invariant_load = 6, + MD_alias_scope = 7, + MD_noalias = 8, + MD_nontemporal = 9, + MD_mem_parallel_loop_access = 10, + MD_nonnull = 11, +} + +/// LLVMRustAsmDialect +#[derive(Copy, Clone)] +#[repr(C)] +pub enum AsmDialect { + Other, + Att, + Intel, +} + +/// LLVMRustCodeGenOptLevel +#[derive(Copy, Clone, PartialEq)] +#[repr(C)] +pub enum CodeGenOptLevel { + Other, + None, + Less, + Default, + Aggressive, +} + +/// LLVMRelocMode +#[derive(Copy, Clone, PartialEq)] +#[repr(C)] +pub enum RelocMode { + Default = 0, + Static = 1, + PIC = 2, + DynamicNoPic = 3, +} + +/// LLVMRustCodeModel +#[derive(Copy, Clone)] +#[repr(C)] +pub enum CodeModel { + Other, + Default, + JITDefault, + Small, + Kernel, + Medium, + Large, +} + +/// LLVMRustDiagnosticKind +#[derive(Copy, Clone)] +#[repr(C)] +pub enum DiagnosticKind { + Other, + InlineAsm, + StackSize, + DebugMetadataVersion, + SampleProfile, + OptimizationRemark, + OptimizationRemarkMissed, + OptimizationRemarkAnalysis, + OptimizationRemarkAnalysisFPCommute, + OptimizationRemarkAnalysisAliasing, + OptimizationRemarkOther, + OptimizationFailure, +} + +/// LLVMRustArchiveKind +#[derive(Copy, Clone)] +#[repr(C)] +pub enum ArchiveKind { + Other, + K_GNU, + K_MIPS64, + K_BSD, + K_COFF, +} + +/// LLVMRustPassKind +#[derive(Copy, Clone, PartialEq, Debug)] +#[repr(C)] +pub enum PassKind { + Other, + Function, + Module, +} + +// Opaque pointer types +#[allow(missing_copy_implementations)] +pub enum Module_opaque {} +pub type ModuleRef = *mut Module_opaque; +#[allow(missing_copy_implementations)] +pub enum Context_opaque {} +pub type ContextRef = *mut Context_opaque; +#[allow(missing_copy_implementations)] +pub enum Type_opaque {} +pub type TypeRef = *mut Type_opaque; +#[allow(missing_copy_implementations)] +pub enum Value_opaque {} +pub type ValueRef = *mut Value_opaque; +#[allow(missing_copy_implementations)] +pub enum Metadata_opaque {} +pub type MetadataRef = *mut Metadata_opaque; +#[allow(missing_copy_implementations)] +pub enum BasicBlock_opaque {} +pub type BasicBlockRef = *mut BasicBlock_opaque; +#[allow(missing_copy_implementations)] +pub enum Builder_opaque {} +pub type BuilderRef = *mut Builder_opaque; +#[allow(missing_copy_implementations)] +pub enum ExecutionEngine_opaque {} +pub type ExecutionEngineRef = *mut ExecutionEngine_opaque; +#[allow(missing_copy_implementations)] +pub enum MemoryBuffer_opaque {} +pub type MemoryBufferRef = *mut MemoryBuffer_opaque; +#[allow(missing_copy_implementations)] +pub enum PassManager_opaque {} +pub type PassManagerRef = *mut PassManager_opaque; +#[allow(missing_copy_implementations)] +pub enum PassManagerBuilder_opaque {} +pub type PassManagerBuilderRef = *mut PassManagerBuilder_opaque; +#[allow(missing_copy_implementations)] +pub enum Use_opaque {} +pub type UseRef = *mut Use_opaque; +#[allow(missing_copy_implementations)] +pub enum TargetData_opaque {} +pub type TargetDataRef = *mut TargetData_opaque; +#[allow(missing_copy_implementations)] +pub enum ObjectFile_opaque {} +pub type ObjectFileRef = *mut ObjectFile_opaque; +#[allow(missing_copy_implementations)] +pub enum SectionIterator_opaque {} +pub type SectionIteratorRef = *mut SectionIterator_opaque; +#[allow(missing_copy_implementations)] +pub enum Pass_opaque {} +pub type PassRef = *mut Pass_opaque; +#[allow(missing_copy_implementations)] +pub enum TargetMachine_opaque {} +pub type TargetMachineRef = *mut TargetMachine_opaque; +pub enum Archive_opaque {} +pub type ArchiveRef = *mut Archive_opaque; +pub enum ArchiveIterator_opaque {} +pub type ArchiveIteratorRef = *mut ArchiveIterator_opaque; +pub enum ArchiveChild_opaque {} +pub type ArchiveChildRef = *mut ArchiveChild_opaque; +#[allow(missing_copy_implementations)] +pub enum Twine_opaque {} +pub type TwineRef = *mut Twine_opaque; +#[allow(missing_copy_implementations)] +pub enum DiagnosticInfo_opaque {} +pub type DiagnosticInfoRef = *mut DiagnosticInfo_opaque; +#[allow(missing_copy_implementations)] +pub enum DebugLoc_opaque {} +pub type DebugLocRef = *mut DebugLoc_opaque; +#[allow(missing_copy_implementations)] +pub enum SMDiagnostic_opaque {} +pub type SMDiagnosticRef = *mut SMDiagnostic_opaque; +#[allow(missing_copy_implementations)] +pub enum RustArchiveMember_opaque {} +pub type RustArchiveMemberRef = *mut RustArchiveMember_opaque; +#[allow(missing_copy_implementations)] +pub enum OperandBundleDef_opaque {} +pub type OperandBundleDefRef = *mut OperandBundleDef_opaque; + +pub type DiagnosticHandler = unsafe extern "C" fn(DiagnosticInfoRef, *mut c_void); +pub type InlineAsmDiagHandler = unsafe extern "C" fn(SMDiagnosticRef, *const c_void, c_uint); + +/// LLVMVisibility +#[repr(C)] +pub enum Visibility { + Default, + Hidden, + Protected, +} + +pub mod debuginfo { + use super::MetadataRef; + + #[allow(missing_copy_implementations)] + pub enum DIBuilder_opaque {} + pub type DIBuilderRef = *mut DIBuilder_opaque; + + pub type DIDescriptor = MetadataRef; + pub type DIScope = DIDescriptor; + pub type DILocation = DIDescriptor; + pub type DIFile = DIScope; + pub type DILexicalBlock = DIScope; + pub type DISubprogram = DIScope; + pub type DINameSpace = DIScope; + pub type DIType = DIDescriptor; + pub type DIBasicType = DIType; + pub type DIDerivedType = DIType; + pub type DICompositeType = DIDerivedType; + pub type DIVariable = DIDescriptor; + pub type DIGlobalVariable = DIDescriptor; + pub type DIArray = DIDescriptor; + pub type DISubrange = DIDescriptor; + pub type DIEnumerator = DIDescriptor; + pub type DITemplateTypeParameter = DIDescriptor; + + // These values **must** match with LLVMRustDIFlags!! + bitflags! { + #[repr(C)] + #[derive(Debug, Default)] + flags DIFlags: ::libc::uint32_t { + const FlagZero = 0, + const FlagPrivate = 1, + const FlagProtected = 2, + const FlagPublic = 3, + const FlagFwdDecl = (1 << 2), + const FlagAppleBlock = (1 << 3), + const FlagBlockByrefStruct = (1 << 4), + const FlagVirtual = (1 << 5), + const FlagArtificial = (1 << 6), + const FlagExplicit = (1 << 7), + const FlagPrototyped = (1 << 8), + const FlagObjcClassComplete = (1 << 9), + const FlagObjectPointer = (1 << 10), + const FlagVector = (1 << 11), + const FlagStaticMember = (1 << 12), + const FlagLValueReference = (1 << 13), + const FlagRValueReference = (1 << 14), + } + } +} + + +// Link to our native llvm bindings (things that we need to use the C++ api +// for) and because llvm is written in C++ we need to link against libstdc++ +// +// You'll probably notice that there is an omission of all LLVM libraries +// from this location. This is because the set of LLVM libraries that we +// link to is mostly defined by LLVM, and the `llvm-config` tool is used to +// figure out the exact set of libraries. To do this, the build system +// generates an llvmdeps.rs file next to this one which will be +// automatically updated whenever LLVM is updated to include an up-to-date +// set of the libraries we need to link to LLVM for. +#[link(name = "rustllvm", kind = "static")] +#[cfg(not(cargobuild))] +extern "C" {} + +#[linked_from = "rustllvm"] // not quite true but good enough +extern "C" { + // Create and destroy contexts. + pub fn LLVMContextCreate() -> ContextRef; + pub fn LLVMContextDispose(C: ContextRef); + pub fn LLVMGetMDKindIDInContext(C: ContextRef, Name: *const c_char, SLen: c_uint) -> c_uint; + + // Create and destroy modules. + pub fn LLVMModuleCreateWithNameInContext(ModuleID: *const c_char, C: ContextRef) -> ModuleRef; + pub fn LLVMGetModuleContext(M: ModuleRef) -> ContextRef; + pub fn LLVMCloneModule(M: ModuleRef) -> ModuleRef; + pub fn LLVMDisposeModule(M: ModuleRef); + + /// Data layout. See Module::getDataLayout. + pub fn LLVMGetDataLayout(M: ModuleRef) -> *const c_char; + pub fn LLVMSetDataLayout(M: ModuleRef, Triple: *const c_char); + + /// See Module::dump. + pub fn LLVMDumpModule(M: ModuleRef); + + /// See Module::setModuleInlineAsm. + pub fn LLVMSetModuleInlineAsm(M: ModuleRef, Asm: *const c_char); + + /// See llvm::LLVMTypeKind::getTypeID. + pub fn LLVMRustGetTypeKind(Ty: TypeRef) -> TypeKind; + + /// See llvm::Value::getContext + pub fn LLVMRustGetValueContext(V: ValueRef) -> ContextRef; + + // Operations on integer types + pub fn LLVMInt1TypeInContext(C: ContextRef) -> TypeRef; + pub fn LLVMInt8TypeInContext(C: ContextRef) -> TypeRef; + pub fn LLVMInt16TypeInContext(C: ContextRef) -> TypeRef; + pub fn LLVMInt32TypeInContext(C: ContextRef) -> TypeRef; + pub fn LLVMInt64TypeInContext(C: ContextRef) -> TypeRef; + pub fn LLVMIntTypeInContext(C: ContextRef, NumBits: c_uint) -> TypeRef; + + pub fn LLVMGetIntTypeWidth(IntegerTy: TypeRef) -> c_uint; + + // Operations on real types + pub fn LLVMFloatTypeInContext(C: ContextRef) -> TypeRef; + pub fn LLVMDoubleTypeInContext(C: ContextRef) -> TypeRef; + + // Operations on function types + pub fn LLVMFunctionType(ReturnType: TypeRef, + ParamTypes: *const TypeRef, + ParamCount: c_uint, + IsVarArg: Bool) + -> TypeRef; + pub fn LLVMGetReturnType(FunctionTy: TypeRef) -> TypeRef; + pub fn LLVMCountParamTypes(FunctionTy: TypeRef) -> c_uint; + pub fn LLVMGetParamTypes(FunctionTy: TypeRef, Dest: *mut TypeRef); + + // Operations on struct types + pub fn LLVMStructTypeInContext(C: ContextRef, + ElementTypes: *const TypeRef, + ElementCount: c_uint, + Packed: Bool) + -> TypeRef; + pub fn LLVMCountStructElementTypes(StructTy: TypeRef) -> c_uint; + pub fn LLVMGetStructElementTypes(StructTy: TypeRef, Dest: *mut TypeRef); + pub fn LLVMIsPackedStruct(StructTy: TypeRef) -> Bool; + + // Operations on array, pointer, and vector types (sequence types) + pub fn LLVMRustArrayType(ElementType: TypeRef, ElementCount: u64) -> TypeRef; + pub fn LLVMPointerType(ElementType: TypeRef, AddressSpace: c_uint) -> TypeRef; + pub fn LLVMVectorType(ElementType: TypeRef, ElementCount: c_uint) -> TypeRef; + + pub fn LLVMGetElementType(Ty: TypeRef) -> TypeRef; + pub fn LLVMGetArrayLength(ArrayTy: TypeRef) -> c_uint; + pub fn LLVMGetVectorSize(VectorTy: TypeRef) -> c_uint; + + // Operations on other types + pub fn LLVMVoidTypeInContext(C: ContextRef) -> TypeRef; + pub fn LLVMRustMetadataTypeInContext(C: ContextRef) -> TypeRef; + + // Operations on all values + pub fn LLVMTypeOf(Val: ValueRef) -> TypeRef; + pub fn LLVMGetValueName(Val: ValueRef) -> *const c_char; + pub fn LLVMSetValueName(Val: ValueRef, Name: *const c_char); + pub fn LLVMReplaceAllUsesWith(OldVal: ValueRef, NewVal: ValueRef); + pub fn LLVMSetMetadata(Val: ValueRef, KindID: c_uint, Node: ValueRef); + + // Operations on Uses + pub fn LLVMGetFirstUse(Val: ValueRef) -> UseRef; + pub fn LLVMGetNextUse(U: UseRef) -> UseRef; + pub fn LLVMGetUser(U: UseRef) -> ValueRef; + + // Operations on Users + pub fn LLVMGetOperand(Val: ValueRef, Index: c_uint) -> ValueRef; + + // Operations on constants of any type + pub fn LLVMConstNull(Ty: TypeRef) -> ValueRef; + pub fn LLVMConstICmp(Pred: IntPredicate, V1: ValueRef, V2: ValueRef) -> ValueRef; + pub fn LLVMConstFCmp(Pred: RealPredicate, V1: ValueRef, V2: ValueRef) -> ValueRef; + // only for isize/vector + pub fn LLVMGetUndef(Ty: TypeRef) -> ValueRef; + pub fn LLVMIsNull(Val: ValueRef) -> Bool; + pub fn LLVMIsUndef(Val: ValueRef) -> Bool; + + // Operations on metadata + pub fn LLVMMDNodeInContext(C: ContextRef, Vals: *const ValueRef, Count: c_uint) -> ValueRef; + + // Operations on scalar constants + pub fn LLVMConstInt(IntTy: TypeRef, N: c_ulonglong, SignExtend: Bool) -> ValueRef; + pub fn LLVMConstReal(RealTy: TypeRef, N: f64) -> ValueRef; + pub fn LLVMConstIntGetZExtValue(ConstantVal: ValueRef) -> c_ulonglong; + pub fn LLVMConstIntGetSExtValue(ConstantVal: ValueRef) -> c_longlong; + + + // Operations on composite constants + pub fn LLVMConstStringInContext(C: ContextRef, + Str: *const c_char, + Length: c_uint, + DontNullTerminate: Bool) + -> ValueRef; + pub fn LLVMConstStructInContext(C: ContextRef, + ConstantVals: *const ValueRef, + Count: c_uint, + Packed: Bool) + -> ValueRef; + + pub fn LLVMConstArray(ElementTy: TypeRef, + ConstantVals: *const ValueRef, + Length: c_uint) + -> ValueRef; + pub fn LLVMConstVector(ScalarConstantVals: *const ValueRef, Size: c_uint) -> ValueRef; + + // Constant expressions + pub fn LLVMSizeOf(Ty: TypeRef) -> ValueRef; + pub fn LLVMConstNeg(ConstantVal: ValueRef) -> ValueRef; + pub fn LLVMConstFNeg(ConstantVal: ValueRef) -> ValueRef; + pub fn LLVMConstNot(ConstantVal: ValueRef) -> ValueRef; + pub fn LLVMConstAdd(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef; + pub fn LLVMConstFAdd(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef; + pub fn LLVMConstSub(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef; + pub fn LLVMConstFSub(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef; + pub fn LLVMConstMul(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef; + pub fn LLVMConstFMul(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef; + pub fn LLVMConstUDiv(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef; + pub fn LLVMConstSDiv(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef; + pub fn LLVMConstFDiv(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef; + pub fn LLVMConstURem(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef; + pub fn LLVMConstSRem(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef; + pub fn LLVMConstFRem(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef; + pub fn LLVMConstAnd(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef; + pub fn LLVMConstOr(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef; + pub fn LLVMConstXor(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef; + pub fn LLVMConstShl(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef; + pub fn LLVMConstLShr(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef; + pub fn LLVMConstAShr(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef; + pub fn LLVMConstTrunc(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef; + pub fn LLVMConstZExt(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef; + pub fn LLVMConstUIToFP(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef; + pub fn LLVMConstSIToFP(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef; + pub fn LLVMConstFPToUI(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef; + pub fn LLVMConstFPToSI(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef; + pub fn LLVMConstPtrToInt(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef; + pub fn LLVMConstIntToPtr(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef; + pub fn LLVMConstBitCast(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef; + pub fn LLVMConstPointerCast(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef; + pub fn LLVMConstIntCast(ConstantVal: ValueRef, ToType: TypeRef, isSigned: Bool) -> ValueRef; + pub fn LLVMConstFPCast(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef; + pub fn LLVMConstExtractValue(AggConstant: ValueRef, + IdxList: *const c_uint, + NumIdx: c_uint) + -> ValueRef; + pub fn LLVMConstInlineAsm(Ty: TypeRef, + AsmString: *const c_char, + Constraints: *const c_char, + HasSideEffects: Bool, + IsAlignStack: Bool) + -> ValueRef; + + + // Operations on global variables, functions, and aliases (globals) + pub fn LLVMGetGlobalParent(Global: ValueRef) -> ModuleRef; + pub fn LLVMIsDeclaration(Global: ValueRef) -> Bool; + pub fn LLVMRustGetLinkage(Global: ValueRef) -> Linkage; + pub fn LLVMRustSetLinkage(Global: ValueRef, RustLinkage: Linkage); + pub fn LLVMGetSection(Global: ValueRef) -> *const c_char; + pub fn LLVMSetSection(Global: ValueRef, Section: *const c_char); + pub fn LLVMSetVisibility(Global: ValueRef, Viz: Visibility); + pub fn LLVMGetAlignment(Global: ValueRef) -> c_uint; + pub fn LLVMSetAlignment(Global: ValueRef, Bytes: c_uint); + pub fn LLVMSetDLLStorageClass(V: ValueRef, C: DLLStorageClass); + + + // Operations on global variables + pub fn LLVMIsAGlobalVariable(GlobalVar: ValueRef) -> ValueRef; + pub fn LLVMAddGlobal(M: ModuleRef, Ty: TypeRef, Name: *const c_char) -> ValueRef; + pub fn LLVMGetNamedGlobal(M: ModuleRef, Name: *const c_char) -> ValueRef; + pub fn LLVMRustGetOrInsertGlobal(M: ModuleRef, Name: *const c_char, T: TypeRef) -> ValueRef; + pub fn LLVMGetFirstGlobal(M: ModuleRef) -> ValueRef; + pub fn LLVMGetNextGlobal(GlobalVar: ValueRef) -> ValueRef; + pub fn LLVMDeleteGlobal(GlobalVar: ValueRef); + pub fn LLVMGetInitializer(GlobalVar: ValueRef) -> ValueRef; + pub fn LLVMSetInitializer(GlobalVar: ValueRef, ConstantVal: ValueRef); + pub fn LLVMSetThreadLocal(GlobalVar: ValueRef, IsThreadLocal: Bool); + pub fn LLVMIsGlobalConstant(GlobalVar: ValueRef) -> Bool; + pub fn LLVMSetGlobalConstant(GlobalVar: ValueRef, IsConstant: Bool); + pub fn LLVMRustGetNamedValue(M: ModuleRef, Name: *const c_char) -> ValueRef; + + // Operations on functions + pub fn LLVMAddFunction(M: ModuleRef, Name: *const c_char, FunctionTy: TypeRef) -> ValueRef; + pub fn LLVMGetNamedFunction(M: ModuleRef, Name: *const c_char) -> ValueRef; + pub fn LLVMGetFirstFunction(M: ModuleRef) -> ValueRef; + pub fn LLVMGetNextFunction(Fn: ValueRef) -> ValueRef; + pub fn LLVMRustGetOrInsertFunction(M: ModuleRef, + Name: *const c_char, + FunctionTy: TypeRef) + -> ValueRef; + pub fn LLVMSetFunctionCallConv(Fn: ValueRef, CC: c_uint); + pub fn LLVMRustAddDereferenceableAttr(Fn: ValueRef, index: c_uint, bytes: u64); + pub fn LLVMRustAddFunctionAttribute(Fn: ValueRef, index: c_uint, attr: Attribute); + pub fn LLVMRustAddFunctionAttrStringValue(Fn: ValueRef, + index: c_uint, + Name: *const c_char, + Value: *const c_char); + pub fn LLVMRustRemoveFunctionAttributes(Fn: ValueRef, index: c_uint, attr: Attribute); + + // Operations on parameters + pub fn LLVMCountParams(Fn: ValueRef) -> c_uint; + pub fn LLVMGetParam(Fn: ValueRef, Index: c_uint) -> ValueRef; + + // Operations on basic blocks + pub fn LLVMBasicBlockAsValue(BB: BasicBlockRef) -> ValueRef; + pub fn LLVMGetBasicBlockParent(BB: BasicBlockRef) -> ValueRef; + pub fn LLVMAppendBasicBlockInContext(C: ContextRef, + Fn: ValueRef, + Name: *const c_char) + -> BasicBlockRef; + pub fn LLVMDeleteBasicBlock(BB: BasicBlockRef); + + // Operations on instructions + pub fn LLVMGetInstructionParent(Inst: ValueRef) -> BasicBlockRef; + pub fn LLVMGetFirstInstruction(BB: BasicBlockRef) -> ValueRef; + pub fn LLVMInstructionEraseFromParent(Inst: ValueRef); + + // Operations on call sites + pub fn LLVMSetInstructionCallConv(Instr: ValueRef, CC: c_uint); + pub fn LLVMRustAddCallSiteAttribute(Instr: ValueRef, index: c_uint, attr: Attribute); + pub fn LLVMRustAddDereferenceableCallSiteAttr(Instr: ValueRef, index: c_uint, bytes: u64); + + // Operations on load/store instructions (only) + pub fn LLVMSetVolatile(MemoryAccessInst: ValueRef, volatile: Bool); + + // Operations on phi nodes + pub fn LLVMAddIncoming(PhiNode: ValueRef, + IncomingValues: *const ValueRef, + IncomingBlocks: *const BasicBlockRef, + Count: c_uint); + + // Instruction builders + pub fn LLVMCreateBuilderInContext(C: ContextRef) -> BuilderRef; + pub fn LLVMPositionBuilder(Builder: BuilderRef, Block: BasicBlockRef, Instr: ValueRef); + pub fn LLVMPositionBuilderBefore(Builder: BuilderRef, Instr: ValueRef); + pub fn LLVMPositionBuilderAtEnd(Builder: BuilderRef, Block: BasicBlockRef); + pub fn LLVMGetInsertBlock(Builder: BuilderRef) -> BasicBlockRef; + pub fn LLVMDisposeBuilder(Builder: BuilderRef); + + // Metadata + pub fn LLVMSetCurrentDebugLocation(Builder: BuilderRef, L: ValueRef); + pub fn LLVMGetCurrentDebugLocation(Builder: BuilderRef) -> ValueRef; + pub fn LLVMSetInstDebugLocation(Builder: BuilderRef, Inst: ValueRef); + + // Terminators + pub fn LLVMBuildRetVoid(B: BuilderRef) -> ValueRef; + pub fn LLVMBuildRet(B: BuilderRef, V: ValueRef) -> ValueRef; + pub fn LLVMBuildAggregateRet(B: BuilderRef, RetVals: *const ValueRef, N: c_uint) -> ValueRef; + pub fn LLVMBuildBr(B: BuilderRef, Dest: BasicBlockRef) -> ValueRef; + pub fn LLVMBuildCondBr(B: BuilderRef, + If: ValueRef, + Then: BasicBlockRef, + Else: BasicBlockRef) + -> ValueRef; + pub fn LLVMBuildSwitch(B: BuilderRef, + V: ValueRef, + Else: BasicBlockRef, + NumCases: c_uint) + -> ValueRef; + pub fn LLVMBuildIndirectBr(B: BuilderRef, Addr: ValueRef, NumDests: c_uint) -> ValueRef; + pub fn LLVMRustBuildInvoke(B: BuilderRef, + Fn: ValueRef, + Args: *const ValueRef, + NumArgs: c_uint, + Then: BasicBlockRef, + Catch: BasicBlockRef, + Bundle: OperandBundleDefRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMRustBuildLandingPad(B: BuilderRef, + Ty: TypeRef, + PersFn: ValueRef, + NumClauses: c_uint, + Name: *const c_char, + F: ValueRef) + -> ValueRef; + pub fn LLVMBuildResume(B: BuilderRef, Exn: ValueRef) -> ValueRef; + pub fn LLVMBuildUnreachable(B: BuilderRef) -> ValueRef; + + pub fn LLVMRustBuildCleanupPad(B: BuilderRef, + ParentPad: ValueRef, + ArgCnt: c_uint, + Args: *const ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMRustBuildCleanupRet(B: BuilderRef, + CleanupPad: ValueRef, + UnwindBB: BasicBlockRef) + -> ValueRef; + pub fn LLVMRustBuildCatchPad(B: BuilderRef, + ParentPad: ValueRef, + ArgCnt: c_uint, + Args: *const ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMRustBuildCatchRet(B: BuilderRef, Pad: ValueRef, BB: BasicBlockRef) -> ValueRef; + pub fn LLVMRustBuildCatchSwitch(Builder: BuilderRef, + ParentPad: ValueRef, + BB: BasicBlockRef, + NumHandlers: c_uint, + Name: *const c_char) + -> ValueRef; + pub fn LLVMRustAddHandler(CatchSwitch: ValueRef, Handler: BasicBlockRef); + pub fn LLVMRustSetPersonalityFn(B: BuilderRef, Pers: ValueRef); + + // Add a case to the switch instruction + pub fn LLVMAddCase(Switch: ValueRef, OnVal: ValueRef, Dest: BasicBlockRef); + + // Add a clause to the landing pad instruction + pub fn LLVMAddClause(LandingPad: ValueRef, ClauseVal: ValueRef); + + // Set the cleanup on a landing pad instruction + pub fn LLVMSetCleanup(LandingPad: ValueRef, Val: Bool); + + // Arithmetic + pub fn LLVMBuildAdd(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildNSWAdd(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildNUWAdd(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildFAdd(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildSub(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildNSWSub(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildNUWSub(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildFSub(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildMul(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildNSWMul(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildNUWMul(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildFMul(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildUDiv(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildSDiv(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildExactSDiv(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildFDiv(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildURem(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildSRem(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildFRem(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildShl(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildLShr(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildAShr(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildAnd(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildOr(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildXor(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildBinOp(B: BuilderRef, + Op: Opcode, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildNeg(B: BuilderRef, V: ValueRef, Name: *const c_char) -> ValueRef; + pub fn LLVMBuildNSWNeg(B: BuilderRef, V: ValueRef, Name: *const c_char) -> ValueRef; + pub fn LLVMBuildNUWNeg(B: BuilderRef, V: ValueRef, Name: *const c_char) -> ValueRef; + pub fn LLVMBuildFNeg(B: BuilderRef, V: ValueRef, Name: *const c_char) -> ValueRef; + pub fn LLVMBuildNot(B: BuilderRef, V: ValueRef, Name: *const c_char) -> ValueRef; + pub fn LLVMRustSetHasUnsafeAlgebra(Instr: ValueRef); + + // Memory + pub fn LLVMBuildAlloca(B: BuilderRef, Ty: TypeRef, Name: *const c_char) -> ValueRef; + pub fn LLVMBuildFree(B: BuilderRef, PointerVal: ValueRef) -> ValueRef; + pub fn LLVMBuildLoad(B: BuilderRef, PointerVal: ValueRef, Name: *const c_char) -> ValueRef; + + pub fn LLVMBuildStore(B: BuilderRef, Val: ValueRef, Ptr: ValueRef) -> ValueRef; + + pub fn LLVMBuildGEP(B: BuilderRef, + Pointer: ValueRef, + Indices: *const ValueRef, + NumIndices: c_uint, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildInBoundsGEP(B: BuilderRef, + Pointer: ValueRef, + Indices: *const ValueRef, + NumIndices: c_uint, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildStructGEP(B: BuilderRef, + Pointer: ValueRef, + Idx: c_uint, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildGlobalString(B: BuilderRef, + Str: *const c_char, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildGlobalStringPtr(B: BuilderRef, + Str: *const c_char, + Name: *const c_char) + -> ValueRef; + + // Casts + pub fn LLVMBuildTrunc(B: BuilderRef, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildZExt(B: BuilderRef, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildSExt(B: BuilderRef, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildFPToUI(B: BuilderRef, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildFPToSI(B: BuilderRef, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildUIToFP(B: BuilderRef, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildSIToFP(B: BuilderRef, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildFPTrunc(B: BuilderRef, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildFPExt(B: BuilderRef, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildPtrToInt(B: BuilderRef, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildIntToPtr(B: BuilderRef, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildBitCast(B: BuilderRef, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildZExtOrBitCast(B: BuilderRef, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildSExtOrBitCast(B: BuilderRef, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildTruncOrBitCast(B: BuilderRef, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildCast(B: BuilderRef, + Op: Opcode, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildPointerCast(B: BuilderRef, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildIntCast(B: BuilderRef, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildFPCast(B: BuilderRef, + Val: ValueRef, + DestTy: TypeRef, + Name: *const c_char) + -> ValueRef; + + // Comparisons + pub fn LLVMBuildICmp(B: BuilderRef, + Op: c_uint, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildFCmp(B: BuilderRef, + Op: c_uint, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + + // Miscellaneous instructions + pub fn LLVMBuildPhi(B: BuilderRef, Ty: TypeRef, Name: *const c_char) -> ValueRef; + pub fn LLVMRustBuildCall(B: BuilderRef, + Fn: ValueRef, + Args: *const ValueRef, + NumArgs: c_uint, + Bundle: OperandBundleDefRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildSelect(B: BuilderRef, + If: ValueRef, + Then: ValueRef, + Else: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildVAArg(B: BuilderRef, + list: ValueRef, + Ty: TypeRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildExtractElement(B: BuilderRef, + VecVal: ValueRef, + Index: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildInsertElement(B: BuilderRef, + VecVal: ValueRef, + EltVal: ValueRef, + Index: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildShuffleVector(B: BuilderRef, + V1: ValueRef, + V2: ValueRef, + Mask: ValueRef, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildExtractValue(B: BuilderRef, + AggVal: ValueRef, + Index: c_uint, + Name: *const c_char) + -> ValueRef; + pub fn LLVMBuildInsertValue(B: BuilderRef, + AggVal: ValueRef, + EltVal: ValueRef, + Index: c_uint, + Name: *const c_char) + -> ValueRef; + + pub fn LLVMBuildIsNull(B: BuilderRef, Val: ValueRef, Name: *const c_char) -> ValueRef; + pub fn LLVMBuildIsNotNull(B: BuilderRef, Val: ValueRef, Name: *const c_char) -> ValueRef; + pub fn LLVMBuildPtrDiff(B: BuilderRef, + LHS: ValueRef, + RHS: ValueRef, + Name: *const c_char) + -> ValueRef; + + // Atomic Operations + pub fn LLVMRustBuildAtomicLoad(B: BuilderRef, + PointerVal: ValueRef, + Name: *const c_char, + Order: AtomicOrdering, + Alignment: c_uint) + -> ValueRef; + + pub fn LLVMRustBuildAtomicStore(B: BuilderRef, + Val: ValueRef, + Ptr: ValueRef, + Order: AtomicOrdering, + Alignment: c_uint) + -> ValueRef; + + pub fn LLVMRustBuildAtomicCmpXchg(B: BuilderRef, + LHS: ValueRef, + CMP: ValueRef, + RHS: ValueRef, + Order: AtomicOrdering, + FailureOrder: AtomicOrdering, + Weak: Bool) + -> ValueRef; + + pub fn LLVMBuildAtomicRMW(B: BuilderRef, + Op: AtomicRmwBinOp, + LHS: ValueRef, + RHS: ValueRef, + Order: AtomicOrdering, + SingleThreaded: Bool) + -> ValueRef; + + pub fn LLVMRustBuildAtomicFence(B: BuilderRef, + Order: AtomicOrdering, + Scope: SynchronizationScope); + + + // Selected entries from the downcasts. + pub fn LLVMIsATerminatorInst(Inst: ValueRef) -> ValueRef; + pub fn LLVMIsAStoreInst(Inst: ValueRef) -> ValueRef; + + /// Writes a module to the specified path. Returns 0 on success. + pub fn LLVMWriteBitcodeToFile(M: ModuleRef, Path: *const c_char) -> c_int; + + /// Creates target data from a target layout string. + pub fn LLVMCreateTargetData(StringRep: *const c_char) -> TargetDataRef; + /// Number of bytes clobbered when doing a Store to *T. + pub fn LLVMSizeOfTypeInBits(TD: TargetDataRef, Ty: TypeRef) -> c_ulonglong; + + /// Distance between successive elements in an array of T. Includes ABI padding. + pub fn LLVMABISizeOfType(TD: TargetDataRef, Ty: TypeRef) -> c_ulonglong; + + /// Returns the preferred alignment of a type. + pub fn LLVMPreferredAlignmentOfType(TD: TargetDataRef, Ty: TypeRef) -> c_uint; + /// Returns the minimum alignment of a type. + pub fn LLVMABIAlignmentOfType(TD: TargetDataRef, Ty: TypeRef) -> c_uint; + + /// Computes the byte offset of the indexed struct element for a + /// target. + pub fn LLVMOffsetOfElement(TD: TargetDataRef, + StructTy: TypeRef, + Element: c_uint) + -> c_ulonglong; + + /// Disposes target data. + pub fn LLVMDisposeTargetData(TD: TargetDataRef); + + /// Creates a pass manager. + pub fn LLVMCreatePassManager() -> PassManagerRef; + + /// Creates a function-by-function pass manager + pub fn LLVMCreateFunctionPassManagerForModule(M: ModuleRef) -> PassManagerRef; + + /// Disposes a pass manager. + pub fn LLVMDisposePassManager(PM: PassManagerRef); + + /// Runs a pass manager on a module. + pub fn LLVMRunPassManager(PM: PassManagerRef, M: ModuleRef) -> Bool; + + pub fn LLVMInitializePasses(); + + pub fn LLVMPassManagerBuilderCreate() -> PassManagerBuilderRef; + pub fn LLVMPassManagerBuilderDispose(PMB: PassManagerBuilderRef); + pub fn LLVMPassManagerBuilderSetSizeLevel(PMB: PassManagerBuilderRef, Value: Bool); + pub fn LLVMPassManagerBuilderSetDisableUnrollLoops(PMB: PassManagerBuilderRef, Value: Bool); + pub fn LLVMPassManagerBuilderUseInlinerWithThreshold(PMB: PassManagerBuilderRef, + threshold: c_uint); + pub fn LLVMPassManagerBuilderPopulateModulePassManager(PMB: PassManagerBuilderRef, + PM: PassManagerRef); + + pub fn LLVMPassManagerBuilderPopulateFunctionPassManager(PMB: PassManagerBuilderRef, + PM: PassManagerRef); + pub fn LLVMPassManagerBuilderPopulateLTOPassManager(PMB: PassManagerBuilderRef, + PM: PassManagerRef, + Internalize: Bool, + RunInliner: Bool); + + // Stuff that's in rustllvm/ because it's not upstream yet. + + /// Opens an object file. + pub fn LLVMCreateObjectFile(MemBuf: MemoryBufferRef) -> ObjectFileRef; + /// Closes an object file. + pub fn LLVMDisposeObjectFile(ObjFile: ObjectFileRef); + + /// Enumerates the sections in an object file. + pub fn LLVMGetSections(ObjFile: ObjectFileRef) -> SectionIteratorRef; + /// Destroys a section iterator. + pub fn LLVMDisposeSectionIterator(SI: SectionIteratorRef); + /// Returns true if the section iterator is at the end of the section + /// list: + pub fn LLVMIsSectionIteratorAtEnd(ObjFile: ObjectFileRef, SI: SectionIteratorRef) -> Bool; + /// Moves the section iterator to point to the next section. + pub fn LLVMMoveToNextSection(SI: SectionIteratorRef); + /// Returns the current section size. + pub fn LLVMGetSectionSize(SI: SectionIteratorRef) -> c_ulonglong; + /// Returns the current section contents as a string buffer. + pub fn LLVMGetSectionContents(SI: SectionIteratorRef) -> *const c_char; + + /// Reads the given file and returns it as a memory buffer. Use + /// LLVMDisposeMemoryBuffer() to get rid of it. + pub fn LLVMRustCreateMemoryBufferWithContentsOfFile(Path: *const c_char) -> MemoryBufferRef; + + pub fn LLVMStartMultithreaded() -> Bool; + + /// Returns a string describing the last error caused by an LLVMRust* call. + pub fn LLVMRustGetLastError() -> *const c_char; + + /// Print the pass timings since static dtors aren't picking them up. + pub fn LLVMRustPrintPassTimings(); + + pub fn LLVMStructCreateNamed(C: ContextRef, Name: *const c_char) -> TypeRef; + + pub fn LLVMStructSetBody(StructTy: TypeRef, + ElementTypes: *const TypeRef, + ElementCount: c_uint, + Packed: Bool); + + pub fn LLVMConstNamedStruct(S: TypeRef, + ConstantVals: *const ValueRef, + Count: c_uint) + -> ValueRef; + + /// Enables LLVM debug output. + pub fn LLVMRustSetDebug(Enabled: c_int); + + /// Prepares inline assembly. + pub fn LLVMRustInlineAsm(Ty: TypeRef, + AsmString: *const c_char, + Constraints: *const c_char, + SideEffects: Bool, + AlignStack: Bool, + Dialect: AsmDialect) + -> ValueRef; + + pub fn LLVMRustDebugMetadataVersion() -> u32; + pub fn LLVMRustVersionMajor() -> u32; + pub fn LLVMRustVersionMinor() -> u32; + + pub fn LLVMRustAddModuleFlag(M: ModuleRef, name: *const c_char, value: u32); + + pub fn LLVMRustDIBuilderCreate(M: ModuleRef) -> DIBuilderRef; + + pub fn LLVMRustDIBuilderDispose(Builder: DIBuilderRef); + + pub fn LLVMRustDIBuilderFinalize(Builder: DIBuilderRef); + + pub fn LLVMRustDIBuilderCreateCompileUnit(Builder: DIBuilderRef, + Lang: c_uint, + File: *const c_char, + Dir: *const c_char, + Producer: *const c_char, + isOptimized: bool, + Flags: *const c_char, + RuntimeVer: c_uint, + SplitName: *const c_char) + -> DIDescriptor; + + pub fn LLVMRustDIBuilderCreateFile(Builder: DIBuilderRef, + Filename: *const c_char, + Directory: *const c_char) + -> DIFile; + + pub fn LLVMRustDIBuilderCreateSubroutineType(Builder: DIBuilderRef, + File: DIFile, + ParameterTypes: DIArray) + -> DICompositeType; + + pub fn LLVMRustDIBuilderCreateFunction(Builder: DIBuilderRef, + Scope: DIDescriptor, + Name: *const c_char, + LinkageName: *const c_char, + File: DIFile, + LineNo: c_uint, + Ty: DIType, + isLocalToUnit: bool, + isDefinition: bool, + ScopeLine: c_uint, + Flags: DIFlags, + isOptimized: bool, + Fn: ValueRef, + TParam: DIArray, + Decl: DIDescriptor) + -> DISubprogram; + + pub fn LLVMRustDIBuilderCreateBasicType(Builder: DIBuilderRef, + Name: *const c_char, + SizeInBits: u64, + AlignInBits: u64, + Encoding: c_uint) + -> DIBasicType; + + pub fn LLVMRustDIBuilderCreatePointerType(Builder: DIBuilderRef, + PointeeTy: DIType, + SizeInBits: u64, + AlignInBits: u64, + Name: *const c_char) + -> DIDerivedType; + + pub fn LLVMRustDIBuilderCreateStructType(Builder: DIBuilderRef, + Scope: DIDescriptor, + Name: *const c_char, + File: DIFile, + LineNumber: c_uint, + SizeInBits: u64, + AlignInBits: u64, + Flags: DIFlags, + DerivedFrom: DIType, + Elements: DIArray, + RunTimeLang: c_uint, + VTableHolder: DIType, + UniqueId: *const c_char) + -> DICompositeType; + + pub fn LLVMRustDIBuilderCreateMemberType(Builder: DIBuilderRef, + Scope: DIDescriptor, + Name: *const c_char, + File: DIFile, + LineNo: c_uint, + SizeInBits: u64, + AlignInBits: u64, + OffsetInBits: u64, + Flags: DIFlags, + Ty: DIType) + -> DIDerivedType; + + pub fn LLVMRustDIBuilderCreateLexicalBlock(Builder: DIBuilderRef, + Scope: DIScope, + File: DIFile, + Line: c_uint, + Col: c_uint) + -> DILexicalBlock; + + pub fn LLVMRustDIBuilderCreateLexicalBlockFile(Builder: DIBuilderRef, + Scope: DIScope, + File: DIFile) + -> DILexicalBlock; + + pub fn LLVMRustDIBuilderCreateStaticVariable(Builder: DIBuilderRef, + Context: DIScope, + Name: *const c_char, + LinkageName: *const c_char, + File: DIFile, + LineNo: c_uint, + Ty: DIType, + isLocalToUnit: bool, + Val: ValueRef, + Decl: DIDescriptor) + -> DIGlobalVariable; + + pub fn LLVMRustDIBuilderCreateVariable(Builder: DIBuilderRef, + Tag: c_uint, + Scope: DIDescriptor, + Name: *const c_char, + File: DIFile, + LineNo: c_uint, + Ty: DIType, + AlwaysPreserve: bool, + Flags: DIFlags, + ArgNo: c_uint) + -> DIVariable; + + pub fn LLVMRustDIBuilderCreateArrayType(Builder: DIBuilderRef, + Size: u64, + AlignInBits: u64, + Ty: DIType, + Subscripts: DIArray) + -> DIType; + + pub fn LLVMRustDIBuilderCreateVectorType(Builder: DIBuilderRef, + Size: u64, + AlignInBits: u64, + Ty: DIType, + Subscripts: DIArray) + -> DIType; + + pub fn LLVMRustDIBuilderGetOrCreateSubrange(Builder: DIBuilderRef, + Lo: i64, + Count: i64) + -> DISubrange; + + pub fn LLVMRustDIBuilderGetOrCreateArray(Builder: DIBuilderRef, + Ptr: *const DIDescriptor, + Count: c_uint) + -> DIArray; + + pub fn LLVMRustDIBuilderInsertDeclareAtEnd(Builder: DIBuilderRef, + Val: ValueRef, + VarInfo: DIVariable, + AddrOps: *const i64, + AddrOpsCount: c_uint, + DL: ValueRef, + InsertAtEnd: BasicBlockRef) + -> ValueRef; + + pub fn LLVMRustDIBuilderCreateEnumerator(Builder: DIBuilderRef, + Name: *const c_char, + Val: u64) + -> DIEnumerator; + + pub fn LLVMRustDIBuilderCreateEnumerationType(Builder: DIBuilderRef, + Scope: DIScope, + Name: *const c_char, + File: DIFile, + LineNumber: c_uint, + SizeInBits: u64, + AlignInBits: u64, + Elements: DIArray, + ClassType: DIType) + -> DIType; + + pub fn LLVMRustDIBuilderCreateUnionType(Builder: DIBuilderRef, + Scope: DIScope, + Name: *const c_char, + File: DIFile, + LineNumber: c_uint, + SizeInBits: u64, + AlignInBits: u64, + Flags: DIFlags, + Elements: DIArray, + RunTimeLang: c_uint, + UniqueId: *const c_char) + -> DIType; + + pub fn LLVMSetUnnamedAddr(GlobalVar: ValueRef, UnnamedAddr: Bool); + + pub fn LLVMRustDIBuilderCreateTemplateTypeParameter(Builder: DIBuilderRef, + Scope: DIScope, + Name: *const c_char, + Ty: DIType, + File: DIFile, + LineNo: c_uint, + ColumnNo: c_uint) + -> DITemplateTypeParameter; + + + pub fn LLVMRustDIBuilderCreateNameSpace(Builder: DIBuilderRef, + Scope: DIScope, + Name: *const c_char, + File: DIFile, + LineNo: c_uint) + -> DINameSpace; + pub fn LLVMRustDICompositeTypeSetTypeArray(Builder: DIBuilderRef, + CompositeType: DIType, + TypeArray: DIArray); + + + pub fn LLVMRustDIBuilderCreateDebugLocation(Context: ContextRef, + Line: c_uint, + Column: c_uint, + Scope: DIScope, + InlinedAt: MetadataRef) + -> ValueRef; + pub fn LLVMRustDIBuilderCreateOpDeref() -> i64; + pub fn LLVMRustDIBuilderCreateOpPlus() -> i64; + + pub fn LLVMRustWriteTypeToString(Type: TypeRef, s: RustStringRef); + pub fn LLVMRustWriteValueToString(value_ref: ValueRef, s: RustStringRef); + + pub fn LLVMIsAConstantInt(value_ref: ValueRef) -> ValueRef; + + pub fn LLVMRustPassKind(Pass: PassRef) -> PassKind; + pub fn LLVMRustFindAndCreatePass(Pass: *const c_char) -> PassRef; + pub fn LLVMRustAddPass(PM: PassManagerRef, Pass: PassRef); + + pub fn LLVMRustHasFeature(T: TargetMachineRef, s: *const c_char) -> bool; + + pub fn LLVMRustPrintTargetCPUs(T: TargetMachineRef); + pub fn LLVMRustPrintTargetFeatures(T: TargetMachineRef); + + pub fn LLVMRustCreateTargetMachine(Triple: *const c_char, + CPU: *const c_char, + Features: *const c_char, + Model: CodeModel, + Reloc: RelocMode, + Level: CodeGenOptLevel, + UseSoftFP: bool, + PositionIndependentExecutable: bool, + FunctionSections: bool, + DataSections: bool) + -> TargetMachineRef; + pub fn LLVMRustDisposeTargetMachine(T: TargetMachineRef); + pub fn LLVMRustAddAnalysisPasses(T: TargetMachineRef, PM: PassManagerRef, M: ModuleRef); + pub fn LLVMRustAddBuilderLibraryInfo(PMB: PassManagerBuilderRef, + M: ModuleRef, + DisableSimplifyLibCalls: bool); + pub fn LLVMRustConfigurePassManagerBuilder(PMB: PassManagerBuilderRef, + OptLevel: CodeGenOptLevel, + MergeFunctions: bool, + SLPVectorize: bool, + LoopVectorize: bool); + pub fn LLVMRustAddLibraryInfo(PM: PassManagerRef, + M: ModuleRef, + DisableSimplifyLibCalls: bool); + pub fn LLVMRustRunFunctionPassManager(PM: PassManagerRef, M: ModuleRef); + pub fn LLVMRustWriteOutputFile(T: TargetMachineRef, + PM: PassManagerRef, + M: ModuleRef, + Output: *const c_char, + FileType: FileType) + -> LLVMRustResult; + pub fn LLVMRustPrintModule(PM: PassManagerRef, M: ModuleRef, Output: *const c_char); + pub fn LLVMRustSetLLVMOptions(Argc: c_int, Argv: *const *const c_char); + pub fn LLVMRustPrintPasses(); + pub fn LLVMRustSetNormalizedTarget(M: ModuleRef, triple: *const c_char); + pub fn LLVMRustAddAlwaysInlinePass(P: PassManagerBuilderRef, AddLifetimes: bool); + pub fn LLVMRustLinkInExternalBitcode(M: ModuleRef, bc: *const c_char, len: size_t) -> bool; + pub fn LLVMRustRunRestrictionPass(M: ModuleRef, syms: *const *const c_char, len: size_t); + pub fn LLVMRustMarkAllFunctionsNounwind(M: ModuleRef); + + pub fn LLVMRustOpenArchive(path: *const c_char) -> ArchiveRef; + pub fn LLVMRustArchiveIteratorNew(AR: ArchiveRef) -> ArchiveIteratorRef; + pub fn LLVMRustArchiveIteratorNext(AIR: ArchiveIteratorRef) -> ArchiveChildRef; + pub fn LLVMRustArchiveChildName(ACR: ArchiveChildRef, size: *mut size_t) -> *const c_char; + pub fn LLVMRustArchiveChildData(ACR: ArchiveChildRef, size: *mut size_t) -> *const c_char; + pub fn LLVMRustArchiveChildFree(ACR: ArchiveChildRef); + pub fn LLVMRustArchiveIteratorFree(AIR: ArchiveIteratorRef); + pub fn LLVMRustDestroyArchive(AR: ArchiveRef); + + pub fn LLVMRustGetSectionName(SI: SectionIteratorRef, data: *mut *const c_char) -> size_t; + + pub fn LLVMRustWriteTwineToString(T: TwineRef, s: RustStringRef); + + pub fn LLVMContextSetDiagnosticHandler(C: ContextRef, + Handler: DiagnosticHandler, + DiagnosticContext: *mut c_void); + + pub fn LLVMRustUnpackOptimizationDiagnostic(DI: DiagnosticInfoRef, + pass_name_out: RustStringRef, + function_out: *mut ValueRef, + debugloc_out: *mut DebugLocRef, + message_out: RustStringRef); + pub fn LLVMRustUnpackInlineAsmDiagnostic(DI: DiagnosticInfoRef, + cookie_out: *mut c_uint, + message_out: *mut TwineRef, + instruction_out: *mut ValueRef); + + pub fn LLVMRustWriteDiagnosticInfoToString(DI: DiagnosticInfoRef, s: RustStringRef); + pub fn LLVMRustGetDiagInfoKind(DI: DiagnosticInfoRef) -> DiagnosticKind; + + pub fn LLVMRustWriteDebugLocToString(C: ContextRef, DL: DebugLocRef, s: RustStringRef); + + pub fn LLVMRustSetInlineAsmDiagnosticHandler(C: ContextRef, + H: InlineAsmDiagHandler, + CX: *mut c_void); + + pub fn LLVMRustWriteSMDiagnosticToString(d: SMDiagnosticRef, s: RustStringRef); + + pub fn LLVMRustWriteArchive(Dst: *const c_char, + NumMembers: size_t, + Members: *const RustArchiveMemberRef, + WriteSymbtab: bool, + Kind: ArchiveKind) + -> LLVMRustResult; + pub fn LLVMRustArchiveMemberNew(Filename: *const c_char, + Name: *const c_char, + Child: ArchiveChildRef) + -> RustArchiveMemberRef; + pub fn LLVMRustArchiveMemberFree(Member: RustArchiveMemberRef); + + pub fn LLVMRustSetDataLayoutFromTargetMachine(M: ModuleRef, TM: TargetMachineRef); + pub fn LLVMRustGetModuleDataLayout(M: ModuleRef) -> TargetDataRef; + + pub fn LLVMRustBuildOperandBundleDef(Name: *const c_char, + Inputs: *const ValueRef, + NumInputs: c_uint) + -> OperandBundleDefRef; + pub fn LLVMRustFreeOperandBundleDef(Bundle: OperandBundleDefRef); + + pub fn LLVMRustPositionBuilderAtStart(B: BuilderRef, BB: BasicBlockRef); + + pub fn LLVMRustSetComdat(M: ModuleRef, V: ValueRef, Name: *const c_char); + pub fn LLVMRustUnsetComdat(V: ValueRef); + pub fn LLVMRustSetModulePIELevel(M: ModuleRef); +} + + +// LLVM requires symbols from this library, but apparently they're not printed +// during llvm-config? +#[cfg(windows)] +#[link(name = "ole32")] +extern "C" {} diff --git a/src/librustc_llvm/lib.rs b/src/librustc_llvm/lib.rs index fc7fa299fb8fa..2fe13d1120fc8 100644 --- a/src/librustc_llvm/lib.rs +++ b/src/librustc_llvm/lib.rs @@ -12,7 +12,6 @@ #![allow(non_camel_case_types)] #![allow(non_snake_case)] #![allow(dead_code)] -#![allow(trivial_casts)] #![crate_name = "rustc_llvm"] #![unstable(feature = "rustc_private", issue = "27812")] @@ -21,2153 +20,124 @@ #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] #![feature(associated_consts)] #![feature(box_syntax)] +#![feature(concat_idents)] #![feature(libc)] #![feature(link_args)] -#![feature(staged_api)] #![feature(linked_from)] -#![feature(concat_idents)] +#![feature(staged_api)] +#![cfg_attr(not(stage0), feature(rustc_private))] extern crate libc; -#[macro_use] #[no_link] extern crate rustc_bitflags; +#[macro_use] +#[no_link] +extern crate rustc_bitflags; -pub use self::OtherAttribute::*; -pub use self::SpecialAttribute::*; -pub use self::AttributeSet::*; pub use self::IntPredicate::*; pub use self::RealPredicate::*; pub use self::TypeKind::*; -pub use self::AtomicBinOp::*; -pub use self::AtomicOrdering::*; -pub use self::SynchronizationScope::*; -pub use self::FileType::*; +pub use self::AtomicRmwBinOp::*; pub use self::MetadataType::*; -pub use self::AsmDialect::*; -pub use self::CodeGenOptLevel::*; -pub use self::RelocMode::*; -pub use self::CodeGenModel::*; +pub use self::CodeGenOptSize::*; pub use self::DiagnosticKind::*; pub use self::CallConv::*; -pub use self::Visibility::*; pub use self::DiagnosticSeverity::*; pub use self::Linkage::*; -pub use self::DLLStorageClassTypes::*; -use std::ffi::CString; -use std::cell::RefCell; +use std::str::FromStr; use std::slice; -use libc::{c_uint, c_ushort, uint64_t, c_int, size_t, c_char}; -use libc::{c_longlong, c_ulonglong, c_void}; -use debuginfo::{DIBuilderRef, DIDescriptor, - DIFile, DILexicalBlock, DISubprogram, DIType, - DIBasicType, DIDerivedType, DICompositeType, DIScope, - DIVariable, DIGlobalVariable, DIArray, DISubrange, - DITemplateTypeParameter, DIEnumerator, DINameSpace}; +use std::ffi::{CString, CStr}; +use std::cell::RefCell; +use libc::{c_uint, c_char, size_t}; pub mod archive_ro; pub mod diagnostic; +pub mod ffi; -pub type Opcode = u32; -pub type Bool = c_uint; +pub use ffi::*; -pub const True: Bool = 1 as Bool; -pub const False: Bool = 0 as Bool; - -// Consts for the LLVM CallConv type, pre-cast to usize. - -#[derive(Copy, Clone, PartialEq)] -pub enum CallConv { - CCallConv = 0, - FastCallConv = 8, - ColdCallConv = 9, - X86StdcallCallConv = 64, - X86FastcallCallConv = 65, - X86_64_Win64 = 79, - X86_VectorCall = 80 -} - -#[derive(Copy, Clone)] -pub enum Visibility { - LLVMDefaultVisibility = 0, - HiddenVisibility = 1, - ProtectedVisibility = 2, -} - -// This enum omits the obsolete (and no-op) linkage types DLLImportLinkage, -// DLLExportLinkage, GhostLinkage and LinkOnceODRAutoHideLinkage. -// LinkerPrivateLinkage and LinkerPrivateWeakLinkage are not included either; -// they've been removed in upstream LLVM commit r203866. -#[derive(Copy, Clone)] -pub enum Linkage { - ExternalLinkage = 0, - AvailableExternallyLinkage = 1, - LinkOnceAnyLinkage = 2, - LinkOnceODRLinkage = 3, - WeakAnyLinkage = 5, - WeakODRLinkage = 6, - AppendingLinkage = 7, - InternalLinkage = 8, - PrivateLinkage = 9, - ExternalWeakLinkage = 12, - CommonLinkage = 14, -} - -#[repr(C)] -#[derive(Copy, Clone, Debug)] -pub enum DiagnosticSeverity { - Error, - Warning, - Remark, - Note, -} - - -#[repr(C)] -#[derive(Copy, Clone)] -pub enum DLLStorageClassTypes { - DefaultStorageClass = 0, - DLLImportStorageClass = 1, - DLLExportStorageClass = 2, -} - -bitflags! { - flags Attribute : u64 { - const ZExt = 1 << 0, - const SExt = 1 << 1, - const NoReturn = 1 << 2, - const InReg = 1 << 3, - const StructRet = 1 << 4, - const NoUnwind = 1 << 5, - const NoAlias = 1 << 6, - const ByVal = 1 << 7, - const Nest = 1 << 8, - const ReadNone = 1 << 9, - const ReadOnly = 1 << 10, - const NoInline = 1 << 11, - const AlwaysInline = 1 << 12, - const OptimizeForSize = 1 << 13, - const StackProtect = 1 << 14, - const StackProtectReq = 1 << 15, - const Alignment = 1 << 16, - const NoCapture = 1 << 21, - const NoRedZone = 1 << 22, - const NoImplicitFloat = 1 << 23, - const Naked = 1 << 24, - const InlineHint = 1 << 25, - const Stack = 7 << 26, - const ReturnsTwice = 1 << 29, - const UWTable = 1 << 30, - const NonLazyBind = 1 << 31, - const OptimizeNone = 1 << 42, - } -} - - -#[repr(u64)] -#[derive(Copy, Clone)] -pub enum OtherAttribute { - // The following are not really exposed in - // the LLVM C api so instead to add these - // we call a wrapper function in RustWrapper - // that uses the C++ api. - SanitizeAddressAttribute = 1 << 32, - MinSizeAttribute = 1 << 33, - NoDuplicateAttribute = 1 << 34, - StackProtectStrongAttribute = 1 << 35, - SanitizeThreadAttribute = 1 << 36, - SanitizeMemoryAttribute = 1 << 37, - NoBuiltinAttribute = 1 << 38, - ReturnedAttribute = 1 << 39, - ColdAttribute = 1 << 40, - BuiltinAttribute = 1 << 41, - OptimizeNoneAttribute = 1 << 42, - InAllocaAttribute = 1 << 43, - NonNullAttribute = 1 << 44, -} - -#[derive(Copy, Clone)] -pub enum SpecialAttribute { - DereferenceableAttribute(u64) -} - -#[repr(C)] -#[derive(Copy, Clone)] -pub enum AttributeSet { - ReturnIndex = 0, - FunctionIndex = !0 -} - -pub trait AttrHelper { - fn apply_llfn(&self, idx: c_uint, llfn: ValueRef); - fn apply_callsite(&self, idx: c_uint, callsite: ValueRef); -} - -impl AttrHelper for Attribute { - fn apply_llfn(&self, idx: c_uint, llfn: ValueRef) { - unsafe { - LLVMAddFunctionAttribute(llfn, idx, self.bits() as uint64_t); - } - } - - fn apply_callsite(&self, idx: c_uint, callsite: ValueRef) { - unsafe { - LLVMAddCallSiteAttribute(callsite, idx, self.bits() as uint64_t); +impl LLVMRustResult { + pub fn into_result(self) -> Result<(), ()> { + match self { + LLVMRustResult::Success => Ok(()), + LLVMRustResult::Failure => Err(()), } } } -impl AttrHelper for OtherAttribute { - fn apply_llfn(&self, idx: c_uint, llfn: ValueRef) { - unsafe { - LLVMAddFunctionAttribute(llfn, idx, *self as uint64_t); - } - } - - fn apply_callsite(&self, idx: c_uint, callsite: ValueRef) { - unsafe { - LLVMAddCallSiteAttribute(callsite, idx, *self as uint64_t); - } - } -} - -impl AttrHelper for SpecialAttribute { - fn apply_llfn(&self, idx: c_uint, llfn: ValueRef) { - match *self { - DereferenceableAttribute(bytes) => unsafe { - LLVMAddDereferenceableAttr(llfn, idx, bytes as uint64_t); - } - } - } - - fn apply_callsite(&self, idx: c_uint, callsite: ValueRef) { - match *self { - DereferenceableAttribute(bytes) => unsafe { - LLVMAddDereferenceableCallSiteAttr(callsite, idx, bytes as uint64_t); - } - } +pub fn AddFunctionAttrStringValue(llfn: ValueRef, + idx: AttributePlace, + attr: &CStr, + value: &CStr) { + unsafe { + LLVMRustAddFunctionAttrStringValue(llfn, + idx.as_uint(), + attr.as_ptr(), + value.as_ptr()) } } -pub struct AttrBuilder { - attrs: Vec<(usize, Box)> +#[repr(C)] +#[derive(Copy, Clone)] +pub enum AttributePlace { + Argument(u32), + Function, } -impl AttrBuilder { - pub fn new() -> AttrBuilder { - AttrBuilder { - attrs: Vec::new() - } - } - - pub fn arg<'a, T: AttrHelper + 'static>(&'a mut self, idx: usize, a: T) -> &'a mut AttrBuilder { - self.attrs.push((idx, box a as Box)); - self +impl AttributePlace { + pub fn ReturnValue() -> Self { + AttributePlace::Argument(0) } - pub fn ret<'a, T: AttrHelper + 'static>(&'a mut self, a: T) -> &'a mut AttrBuilder { - self.attrs.push((ReturnIndex as usize, box a as Box)); - self - } - - pub fn apply_llfn(&self, llfn: ValueRef) { - for &(idx, ref attr) in &self.attrs { - attr.apply_llfn(idx as c_uint, llfn); - } - } - - pub fn apply_callsite(&self, callsite: ValueRef) { - for &(idx, ref attr) in &self.attrs { - attr.apply_callsite(idx as c_uint, callsite); + pub fn as_uint(self) -> c_uint { + match self { + AttributePlace::Function => !0, + AttributePlace::Argument(i) => i, } } } -// enum for the LLVM IntPredicate type -#[derive(Copy, Clone)] -pub enum IntPredicate { - IntEQ = 32, - IntNE = 33, - IntUGT = 34, - IntUGE = 35, - IntULT = 36, - IntULE = 37, - IntSGT = 38, - IntSGE = 39, - IntSLT = 40, - IntSLE = 41, -} - -// enum for the LLVM RealPredicate type -#[derive(Copy, Clone)] -pub enum RealPredicate { - RealPredicateFalse = 0, - RealOEQ = 1, - RealOGT = 2, - RealOGE = 3, - RealOLT = 4, - RealOLE = 5, - RealONE = 6, - RealORD = 7, - RealUNO = 8, - RealUEQ = 9, - RealUGT = 10, - RealUGE = 11, - RealULT = 12, - RealULE = 13, - RealUNE = 14, - RealPredicateTrue = 15, -} - -// The LLVM TypeKind type - must stay in sync with the def of -// LLVMTypeKind in llvm/include/llvm-c/Core.h -#[derive(Copy, Clone, PartialEq, Debug)] -#[repr(C)] -pub enum TypeKind { - Void = 0, - Half = 1, - Float = 2, - Double = 3, - X86_FP80 = 4, - FP128 = 5, - PPC_FP128 = 6, - Label = 7, - Integer = 8, - Function = 9, - Struct = 10, - Array = 11, - Pointer = 12, - Vector = 13, - Metadata = 14, - X86_MMX = 15, -} - -#[repr(C)] -#[derive(Copy, Clone)] -pub enum AtomicBinOp { - AtomicXchg = 0, - AtomicAdd = 1, - AtomicSub = 2, - AtomicAnd = 3, - AtomicNand = 4, - AtomicOr = 5, - AtomicXor = 6, - AtomicMax = 7, - AtomicMin = 8, - AtomicUMax = 9, - AtomicUMin = 10, -} - -#[repr(C)] -#[derive(Copy, Clone)] -pub enum AtomicOrdering { - NotAtomic = 0, - Unordered = 1, - Monotonic = 2, - // Consume = 3, // Not specified yet. - Acquire = 4, - Release = 5, - AcquireRelease = 6, - SequentiallyConsistent = 7 -} - -#[repr(C)] -#[derive(Copy, Clone)] -pub enum SynchronizationScope { - SingleThread = 0, - CrossThread = 1 -} - -// Consts for the LLVMCodeGenFileType type (in include/llvm/c/TargetMachine.h) -#[repr(C)] -#[derive(Copy, Clone)] -pub enum FileType { - AssemblyFileType = 0, - ObjectFileType = 1 -} - -#[derive(Copy, Clone)] -pub enum MetadataType { - MD_dbg = 0, - MD_tbaa = 1, - MD_prof = 2, - MD_fpmath = 3, - MD_range = 4, - MD_tbaa_struct = 5, - MD_invariant_load = 6, - MD_alias_scope = 7, - MD_noalias = 8, - MD_nontemporal = 9, - MD_mem_parallel_loop_access = 10, - MD_nonnull = 11, -} - -// Inline Asm Dialect -#[derive(Copy, Clone)] -pub enum AsmDialect { - AD_ATT = 0, - AD_Intel = 1 -} - -#[derive(Copy, Clone, PartialEq)] -#[repr(C)] -pub enum CodeGenOptLevel { - CodeGenLevelNone = 0, - CodeGenLevelLess = 1, - CodeGenLevelDefault = 2, - CodeGenLevelAggressive = 3, -} - #[derive(Copy, Clone, PartialEq)] #[repr(C)] -pub enum RelocMode { - RelocDefault = 0, - RelocStatic = 1, - RelocPIC = 2, - RelocDynamicNoPic = 3, -} - -#[repr(C)] -#[derive(Copy, Clone)] -pub enum CodeGenModel { - CodeModelDefault = 0, - CodeModelJITDefault = 1, - CodeModelSmall = 2, - CodeModelKernel = 3, - CodeModelMedium = 4, - CodeModelLarge = 5, -} - -#[repr(C)] -#[derive(Copy, Clone)] -pub enum DiagnosticKind { - DK_InlineAsm = 0, - DK_StackSize, - DK_DebugMetadataVersion, - DK_SampleProfile, - DK_OptimizationRemark, - DK_OptimizationRemarkMissed, - DK_OptimizationRemarkAnalysis, - DK_OptimizationFailure, -} - -#[repr(C)] -#[derive(Copy, Clone)] -pub enum ArchiveKind { - K_GNU, - K_MIPS64, - K_BSD, - K_COFF, -} - -// Opaque pointer types -#[allow(missing_copy_implementations)] -pub enum Module_opaque {} -pub type ModuleRef = *mut Module_opaque; -#[allow(missing_copy_implementations)] -pub enum Context_opaque {} -pub type ContextRef = *mut Context_opaque; -#[allow(missing_copy_implementations)] -pub enum Type_opaque {} -pub type TypeRef = *mut Type_opaque; -#[allow(missing_copy_implementations)] -pub enum Value_opaque {} -pub type ValueRef = *mut Value_opaque; -#[allow(missing_copy_implementations)] -pub enum Metadata_opaque {} -pub type MetadataRef = *mut Metadata_opaque; -#[allow(missing_copy_implementations)] -pub enum BasicBlock_opaque {} -pub type BasicBlockRef = *mut BasicBlock_opaque; -#[allow(missing_copy_implementations)] -pub enum Builder_opaque {} -pub type BuilderRef = *mut Builder_opaque; -#[allow(missing_copy_implementations)] -pub enum ExecutionEngine_opaque {} -pub type ExecutionEngineRef = *mut ExecutionEngine_opaque; -#[allow(missing_copy_implementations)] -pub enum MemoryBuffer_opaque {} -pub type MemoryBufferRef = *mut MemoryBuffer_opaque; -#[allow(missing_copy_implementations)] -pub enum PassManager_opaque {} -pub type PassManagerRef = *mut PassManager_opaque; -#[allow(missing_copy_implementations)] -pub enum PassManagerBuilder_opaque {} -pub type PassManagerBuilderRef = *mut PassManagerBuilder_opaque; -#[allow(missing_copy_implementations)] -pub enum Use_opaque {} -pub type UseRef = *mut Use_opaque; -#[allow(missing_copy_implementations)] -pub enum TargetData_opaque {} -pub type TargetDataRef = *mut TargetData_opaque; -#[allow(missing_copy_implementations)] -pub enum ObjectFile_opaque {} -pub type ObjectFileRef = *mut ObjectFile_opaque; -#[allow(missing_copy_implementations)] -pub enum SectionIterator_opaque {} -pub type SectionIteratorRef = *mut SectionIterator_opaque; -#[allow(missing_copy_implementations)] -pub enum Pass_opaque {} -pub type PassRef = *mut Pass_opaque; -#[allow(missing_copy_implementations)] -pub enum TargetMachine_opaque {} -pub type TargetMachineRef = *mut TargetMachine_opaque; -pub enum Archive_opaque {} -pub type ArchiveRef = *mut Archive_opaque; -pub enum ArchiveIterator_opaque {} -pub type ArchiveIteratorRef = *mut ArchiveIterator_opaque; -pub enum ArchiveChild_opaque {} -pub type ArchiveChildRef = *mut ArchiveChild_opaque; -#[allow(missing_copy_implementations)] -pub enum Twine_opaque {} -pub type TwineRef = *mut Twine_opaque; -#[allow(missing_copy_implementations)] -pub enum DiagnosticInfo_opaque {} -pub type DiagnosticInfoRef = *mut DiagnosticInfo_opaque; -#[allow(missing_copy_implementations)] -pub enum DebugLoc_opaque {} -pub type DebugLocRef = *mut DebugLoc_opaque; -#[allow(missing_copy_implementations)] -pub enum SMDiagnostic_opaque {} -pub type SMDiagnosticRef = *mut SMDiagnostic_opaque; -#[allow(missing_copy_implementations)] -pub enum RustArchiveMember_opaque {} -pub type RustArchiveMemberRef = *mut RustArchiveMember_opaque; - -pub type DiagnosticHandler = unsafe extern "C" fn(DiagnosticInfoRef, *mut c_void); -pub type InlineAsmDiagHandler = unsafe extern "C" fn(SMDiagnosticRef, *const c_void, c_uint); - -pub mod debuginfo { - pub use self::DIDescriptorFlags::*; - use super::{MetadataRef}; - - #[allow(missing_copy_implementations)] - pub enum DIBuilder_opaque {} - pub type DIBuilderRef = *mut DIBuilder_opaque; - - pub type DIDescriptor = MetadataRef; - pub type DIScope = DIDescriptor; - pub type DILocation = DIDescriptor; - pub type DIFile = DIScope; - pub type DILexicalBlock = DIScope; - pub type DISubprogram = DIScope; - pub type DINameSpace = DIScope; - pub type DIType = DIDescriptor; - pub type DIBasicType = DIType; - pub type DIDerivedType = DIType; - pub type DICompositeType = DIDerivedType; - pub type DIVariable = DIDescriptor; - pub type DIGlobalVariable = DIDescriptor; - pub type DIArray = DIDescriptor; - pub type DISubrange = DIDescriptor; - pub type DIEnumerator = DIDescriptor; - pub type DITemplateTypeParameter = DIDescriptor; - - #[derive(Copy, Clone)] - pub enum DIDescriptorFlags { - FlagPrivate = 1 << 0, - FlagProtected = 1 << 1, - FlagFwdDecl = 1 << 2, - FlagAppleBlock = 1 << 3, - FlagBlockByrefStruct = 1 << 4, - FlagVirtual = 1 << 5, - FlagArtificial = 1 << 6, - FlagExplicit = 1 << 7, - FlagPrototyped = 1 << 8, - FlagObjcClassComplete = 1 << 9, - FlagObjectPointer = 1 << 10, - FlagVector = 1 << 11, - FlagStaticMember = 1 << 12, - FlagIndirectVariable = 1 << 13, - FlagLValueReference = 1 << 14, - FlagRValueReference = 1 << 15 +pub enum CodeGenOptSize { + CodeGenOptSizeNone = 0, + CodeGenOptSizeDefault = 1, + CodeGenOptSizeAggressive = 2, +} + +impl FromStr for ArchiveKind { + type Err = (); + + fn from_str(s: &str) -> Result { + match s { + "gnu" => Ok(ArchiveKind::K_GNU), + "mips64" => Ok(ArchiveKind::K_MIPS64), + "bsd" => Ok(ArchiveKind::K_BSD), + "coff" => Ok(ArchiveKind::K_COFF), + _ => Err(()), + } } } +#[allow(missing_copy_implementations)] +pub enum RustString_opaque {} +pub type RustStringRef = *mut RustString_opaque; +type RustStringRepr = *mut RefCell>; -// Link to our native llvm bindings (things that we need to use the C++ api -// for) and because llvm is written in C++ we need to link against libstdc++ -// -// You'll probably notice that there is an omission of all LLVM libraries -// from this location. This is because the set of LLVM libraries that we -// link to is mostly defined by LLVM, and the `llvm-config` tool is used to -// figure out the exact set of libraries. To do this, the build system -// generates an llvmdeps.rs file next to this one which will be -// automatically updated whenever LLVM is updated to include an up-to-date -// set of the libraries we need to link to LLVM for. -#[link(name = "rustllvm", kind = "static")] -#[linked_from = "rustllvm"] // not quite true but good enough -extern { - /* Create and destroy contexts. */ - pub fn LLVMContextCreate() -> ContextRef; - pub fn LLVMContextDispose(C: ContextRef); - pub fn LLVMGetMDKindIDInContext(C: ContextRef, - Name: *const c_char, - SLen: c_uint) - -> c_uint; - - /* Create and destroy modules. */ - pub fn LLVMModuleCreateWithNameInContext(ModuleID: *const c_char, - C: ContextRef) - -> ModuleRef; - pub fn LLVMGetModuleContext(M: ModuleRef) -> ContextRef; - pub fn LLVMCloneModule(M: ModuleRef) -> ModuleRef; - pub fn LLVMDisposeModule(M: ModuleRef); - - /// Data layout. See Module::getDataLayout. - pub fn LLVMGetDataLayout(M: ModuleRef) -> *const c_char; - pub fn LLVMSetDataLayout(M: ModuleRef, Triple: *const c_char); - - /// Target triple. See Module::getTargetTriple. - pub fn LLVMGetTarget(M: ModuleRef) -> *const c_char; - pub fn LLVMSetTarget(M: ModuleRef, Triple: *const c_char); - - /// See Module::dump. - pub fn LLVMDumpModule(M: ModuleRef); - - /// See Module::setModuleInlineAsm. - pub fn LLVMSetModuleInlineAsm(M: ModuleRef, Asm: *const c_char); - - /// See llvm::LLVMTypeKind::getTypeID. - pub fn LLVMGetTypeKind(Ty: TypeRef) -> TypeKind; - - /// See llvm::LLVMType::getContext. - pub fn LLVMGetTypeContext(Ty: TypeRef) -> ContextRef; - - /* Operations on integer types */ - pub fn LLVMInt1TypeInContext(C: ContextRef) -> TypeRef; - pub fn LLVMInt8TypeInContext(C: ContextRef) -> TypeRef; - pub fn LLVMInt16TypeInContext(C: ContextRef) -> TypeRef; - pub fn LLVMInt32TypeInContext(C: ContextRef) -> TypeRef; - pub fn LLVMInt64TypeInContext(C: ContextRef) -> TypeRef; - pub fn LLVMIntTypeInContext(C: ContextRef, NumBits: c_uint) - -> TypeRef; - - pub fn LLVMGetIntTypeWidth(IntegerTy: TypeRef) -> c_uint; - - /* Operations on real types */ - pub fn LLVMFloatTypeInContext(C: ContextRef) -> TypeRef; - pub fn LLVMDoubleTypeInContext(C: ContextRef) -> TypeRef; - pub fn LLVMX86FP80TypeInContext(C: ContextRef) -> TypeRef; - pub fn LLVMFP128TypeInContext(C: ContextRef) -> TypeRef; - pub fn LLVMPPCFP128TypeInContext(C: ContextRef) -> TypeRef; - - /* Operations on function types */ - pub fn LLVMFunctionType(ReturnType: TypeRef, - ParamTypes: *const TypeRef, - ParamCount: c_uint, - IsVarArg: Bool) - -> TypeRef; - pub fn LLVMIsFunctionVarArg(FunctionTy: TypeRef) -> Bool; - pub fn LLVMGetReturnType(FunctionTy: TypeRef) -> TypeRef; - pub fn LLVMCountParamTypes(FunctionTy: TypeRef) -> c_uint; - pub fn LLVMGetParamTypes(FunctionTy: TypeRef, Dest: *mut TypeRef); - - /* Operations on struct types */ - pub fn LLVMStructTypeInContext(C: ContextRef, - ElementTypes: *const TypeRef, - ElementCount: c_uint, - Packed: Bool) - -> TypeRef; - pub fn LLVMCountStructElementTypes(StructTy: TypeRef) -> c_uint; - pub fn LLVMGetStructElementTypes(StructTy: TypeRef, - Dest: *mut TypeRef); - pub fn LLVMIsPackedStruct(StructTy: TypeRef) -> Bool; - - /* Operations on array, pointer, and vector types (sequence types) */ - pub fn LLVMRustArrayType(ElementType: TypeRef, ElementCount: u64) -> TypeRef; - pub fn LLVMPointerType(ElementType: TypeRef, AddressSpace: c_uint) - -> TypeRef; - pub fn LLVMVectorType(ElementType: TypeRef, ElementCount: c_uint) - -> TypeRef; - - pub fn LLVMGetElementType(Ty: TypeRef) -> TypeRef; - pub fn LLVMGetArrayLength(ArrayTy: TypeRef) -> c_uint; - pub fn LLVMGetPointerAddressSpace(PointerTy: TypeRef) -> c_uint; - pub fn LLVMGetPointerToGlobal(EE: ExecutionEngineRef, V: ValueRef) - -> *const c_void; - pub fn LLVMGetVectorSize(VectorTy: TypeRef) -> c_uint; - - /* Operations on other types */ - pub fn LLVMVoidTypeInContext(C: ContextRef) -> TypeRef; - pub fn LLVMLabelTypeInContext(C: ContextRef) -> TypeRef; - pub fn LLVMMetadataTypeInContext(C: ContextRef) -> TypeRef; - - /* Operations on all values */ - pub fn LLVMTypeOf(Val: ValueRef) -> TypeRef; - pub fn LLVMGetValueName(Val: ValueRef) -> *const c_char; - pub fn LLVMSetValueName(Val: ValueRef, Name: *const c_char); - pub fn LLVMDumpValue(Val: ValueRef); - pub fn LLVMReplaceAllUsesWith(OldVal: ValueRef, NewVal: ValueRef); - pub fn LLVMHasMetadata(Val: ValueRef) -> c_int; - pub fn LLVMGetMetadata(Val: ValueRef, KindID: c_uint) -> ValueRef; - pub fn LLVMSetMetadata(Val: ValueRef, KindID: c_uint, Node: ValueRef); - - /* Operations on Uses */ - pub fn LLVMGetFirstUse(Val: ValueRef) -> UseRef; - pub fn LLVMGetNextUse(U: UseRef) -> UseRef; - pub fn LLVMGetUser(U: UseRef) -> ValueRef; - pub fn LLVMGetUsedValue(U: UseRef) -> ValueRef; - - /* Operations on Users */ - pub fn LLVMGetNumOperands(Val: ValueRef) -> c_int; - pub fn LLVMGetOperand(Val: ValueRef, Index: c_uint) -> ValueRef; - pub fn LLVMSetOperand(Val: ValueRef, Index: c_uint, Op: ValueRef); - - /* Operations on constants of any type */ - pub fn LLVMConstNull(Ty: TypeRef) -> ValueRef; - /* all zeroes */ - pub fn LLVMConstAllOnes(Ty: TypeRef) -> ValueRef; - pub fn LLVMConstICmp(Pred: c_ushort, V1: ValueRef, V2: ValueRef) - -> ValueRef; - pub fn LLVMConstFCmp(Pred: c_ushort, V1: ValueRef, V2: ValueRef) - -> ValueRef; - /* only for isize/vector */ - pub fn LLVMGetUndef(Ty: TypeRef) -> ValueRef; - pub fn LLVMIsConstant(Val: ValueRef) -> Bool; - pub fn LLVMIsNull(Val: ValueRef) -> Bool; - pub fn LLVMIsUndef(Val: ValueRef) -> Bool; - pub fn LLVMConstPointerNull(Ty: TypeRef) -> ValueRef; - - /* Operations on metadata */ - pub fn LLVMMDStringInContext(C: ContextRef, - Str: *const c_char, - SLen: c_uint) - -> ValueRef; - pub fn LLVMMDNodeInContext(C: ContextRef, - Vals: *const ValueRef, - Count: c_uint) - -> ValueRef; - pub fn LLVMAddNamedMetadataOperand(M: ModuleRef, - Str: *const c_char, - Val: ValueRef); - - /* Operations on scalar constants */ - pub fn LLVMConstInt(IntTy: TypeRef, N: c_ulonglong, SignExtend: Bool) - -> ValueRef; - pub fn LLVMConstIntOfString(IntTy: TypeRef, Text: *const c_char, Radix: u8) - -> ValueRef; - pub fn LLVMConstIntOfStringAndSize(IntTy: TypeRef, - Text: *const c_char, - SLen: c_uint, - Radix: u8) - -> ValueRef; - pub fn LLVMConstReal(RealTy: TypeRef, N: f64) -> ValueRef; - pub fn LLVMConstRealOfString(RealTy: TypeRef, Text: *const c_char) - -> ValueRef; - pub fn LLVMConstRealOfStringAndSize(RealTy: TypeRef, - Text: *const c_char, - SLen: c_uint) - -> ValueRef; - pub fn LLVMConstIntGetZExtValue(ConstantVal: ValueRef) -> c_ulonglong; - pub fn LLVMConstIntGetSExtValue(ConstantVal: ValueRef) -> c_longlong; - - - /* Operations on composite constants */ - pub fn LLVMConstStringInContext(C: ContextRef, - Str: *const c_char, - Length: c_uint, - DontNullTerminate: Bool) - -> ValueRef; - pub fn LLVMConstStructInContext(C: ContextRef, - ConstantVals: *const ValueRef, - Count: c_uint, - Packed: Bool) - -> ValueRef; - - pub fn LLVMConstArray(ElementTy: TypeRef, - ConstantVals: *const ValueRef, - Length: c_uint) - -> ValueRef; - pub fn LLVMConstVector(ScalarConstantVals: *const ValueRef, Size: c_uint) - -> ValueRef; - - /* Constant expressions */ - pub fn LLVMAlignOf(Ty: TypeRef) -> ValueRef; - pub fn LLVMSizeOf(Ty: TypeRef) -> ValueRef; - pub fn LLVMConstNeg(ConstantVal: ValueRef) -> ValueRef; - pub fn LLVMConstNSWNeg(ConstantVal: ValueRef) -> ValueRef; - pub fn LLVMConstNUWNeg(ConstantVal: ValueRef) -> ValueRef; - pub fn LLVMConstFNeg(ConstantVal: ValueRef) -> ValueRef; - pub fn LLVMConstNot(ConstantVal: ValueRef) -> ValueRef; - pub fn LLVMConstAdd(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstNSWAdd(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstNUWAdd(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstFAdd(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstSub(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstNSWSub(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstNUWSub(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstFSub(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstMul(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstNSWMul(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstNUWMul(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstFMul(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstUDiv(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstSDiv(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstExactSDiv(LHSConstant: ValueRef, - RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstFDiv(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstURem(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstSRem(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstFRem(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstAnd(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstOr(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstXor(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstShl(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstLShr(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstAShr(LHSConstant: ValueRef, RHSConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstGEP(ConstantVal: ValueRef, - ConstantIndices: *const ValueRef, - NumIndices: c_uint) - -> ValueRef; - pub fn LLVMConstInBoundsGEP(ConstantVal: ValueRef, - ConstantIndices: *const ValueRef, - NumIndices: c_uint) - -> ValueRef; - pub fn LLVMConstTrunc(ConstantVal: ValueRef, ToType: TypeRef) - -> ValueRef; - pub fn LLVMConstSExt(ConstantVal: ValueRef, ToType: TypeRef) - -> ValueRef; - pub fn LLVMConstZExt(ConstantVal: ValueRef, ToType: TypeRef) - -> ValueRef; - pub fn LLVMConstFPTrunc(ConstantVal: ValueRef, ToType: TypeRef) - -> ValueRef; - pub fn LLVMConstFPExt(ConstantVal: ValueRef, ToType: TypeRef) - -> ValueRef; - pub fn LLVMConstUIToFP(ConstantVal: ValueRef, ToType: TypeRef) - -> ValueRef; - pub fn LLVMConstSIToFP(ConstantVal: ValueRef, ToType: TypeRef) - -> ValueRef; - pub fn LLVMConstFPToUI(ConstantVal: ValueRef, ToType: TypeRef) - -> ValueRef; - pub fn LLVMConstFPToSI(ConstantVal: ValueRef, ToType: TypeRef) - -> ValueRef; - pub fn LLVMConstPtrToInt(ConstantVal: ValueRef, ToType: TypeRef) - -> ValueRef; - pub fn LLVMConstIntToPtr(ConstantVal: ValueRef, ToType: TypeRef) - -> ValueRef; - pub fn LLVMConstBitCast(ConstantVal: ValueRef, ToType: TypeRef) - -> ValueRef; - pub fn LLVMConstZExtOrBitCast(ConstantVal: ValueRef, ToType: TypeRef) - -> ValueRef; - pub fn LLVMConstSExtOrBitCast(ConstantVal: ValueRef, ToType: TypeRef) - -> ValueRef; - pub fn LLVMConstTruncOrBitCast(ConstantVal: ValueRef, ToType: TypeRef) - -> ValueRef; - pub fn LLVMConstPointerCast(ConstantVal: ValueRef, ToType: TypeRef) - -> ValueRef; - pub fn LLVMConstIntCast(ConstantVal: ValueRef, - ToType: TypeRef, - isSigned: Bool) - -> ValueRef; - pub fn LLVMConstFPCast(ConstantVal: ValueRef, ToType: TypeRef) - -> ValueRef; - pub fn LLVMConstSelect(ConstantCondition: ValueRef, - ConstantIfTrue: ValueRef, - ConstantIfFalse: ValueRef) - -> ValueRef; - pub fn LLVMConstExtractElement(VectorConstant: ValueRef, - IndexConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstInsertElement(VectorConstant: ValueRef, - ElementValueConstant: ValueRef, - IndexConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstShuffleVector(VectorAConstant: ValueRef, - VectorBConstant: ValueRef, - MaskConstant: ValueRef) - -> ValueRef; - pub fn LLVMConstExtractValue(AggConstant: ValueRef, - IdxList: *const c_uint, - NumIdx: c_uint) - -> ValueRef; - pub fn LLVMConstInsertValue(AggConstant: ValueRef, - ElementValueConstant: ValueRef, - IdxList: *const c_uint, - NumIdx: c_uint) - -> ValueRef; - pub fn LLVMConstInlineAsm(Ty: TypeRef, - AsmString: *const c_char, - Constraints: *const c_char, - HasSideEffects: Bool, - IsAlignStack: Bool) - -> ValueRef; - pub fn LLVMBlockAddress(F: ValueRef, BB: BasicBlockRef) -> ValueRef; - - - - /* Operations on global variables, functions, and aliases (globals) */ - pub fn LLVMGetGlobalParent(Global: ValueRef) -> ModuleRef; - pub fn LLVMIsDeclaration(Global: ValueRef) -> Bool; - pub fn LLVMGetLinkage(Global: ValueRef) -> c_uint; - pub fn LLVMSetLinkage(Global: ValueRef, Link: c_uint); - pub fn LLVMGetSection(Global: ValueRef) -> *const c_char; - pub fn LLVMSetSection(Global: ValueRef, Section: *const c_char); - pub fn LLVMGetVisibility(Global: ValueRef) -> c_uint; - pub fn LLVMSetVisibility(Global: ValueRef, Viz: c_uint); - pub fn LLVMGetAlignment(Global: ValueRef) -> c_uint; - pub fn LLVMSetAlignment(Global: ValueRef, Bytes: c_uint); - - - /* Operations on global variables */ - pub fn LLVMIsAGlobalVariable(GlobalVar: ValueRef) -> ValueRef; - pub fn LLVMAddGlobal(M: ModuleRef, Ty: TypeRef, Name: *const c_char) - -> ValueRef; - pub fn LLVMAddGlobalInAddressSpace(M: ModuleRef, - Ty: TypeRef, - Name: *const c_char, - AddressSpace: c_uint) - -> ValueRef; - pub fn LLVMGetNamedGlobal(M: ModuleRef, Name: *const c_char) -> ValueRef; - pub fn LLVMGetOrInsertGlobal(M: ModuleRef, Name: *const c_char, T: TypeRef) -> ValueRef; - pub fn LLVMGetFirstGlobal(M: ModuleRef) -> ValueRef; - pub fn LLVMGetLastGlobal(M: ModuleRef) -> ValueRef; - pub fn LLVMGetNextGlobal(GlobalVar: ValueRef) -> ValueRef; - pub fn LLVMGetPreviousGlobal(GlobalVar: ValueRef) -> ValueRef; - pub fn LLVMDeleteGlobal(GlobalVar: ValueRef); - pub fn LLVMGetInitializer(GlobalVar: ValueRef) -> ValueRef; - pub fn LLVMSetInitializer(GlobalVar: ValueRef, - ConstantVal: ValueRef); - pub fn LLVMIsThreadLocal(GlobalVar: ValueRef) -> Bool; - pub fn LLVMSetThreadLocal(GlobalVar: ValueRef, IsThreadLocal: Bool); - pub fn LLVMIsGlobalConstant(GlobalVar: ValueRef) -> Bool; - pub fn LLVMSetGlobalConstant(GlobalVar: ValueRef, IsConstant: Bool); - pub fn LLVMGetNamedValue(M: ModuleRef, Name: *const c_char) -> ValueRef; - - /* Operations on aliases */ - pub fn LLVMAddAlias(M: ModuleRef, - Ty: TypeRef, - Aliasee: ValueRef, - Name: *const c_char) - -> ValueRef; - - /* Operations on functions */ - pub fn LLVMAddFunction(M: ModuleRef, - Name: *const c_char, - FunctionTy: TypeRef) - -> ValueRef; - pub fn LLVMGetNamedFunction(M: ModuleRef, Name: *const c_char) -> ValueRef; - pub fn LLVMGetFirstFunction(M: ModuleRef) -> ValueRef; - pub fn LLVMGetLastFunction(M: ModuleRef) -> ValueRef; - pub fn LLVMGetNextFunction(Fn: ValueRef) -> ValueRef; - pub fn LLVMGetPreviousFunction(Fn: ValueRef) -> ValueRef; - pub fn LLVMDeleteFunction(Fn: ValueRef); - pub fn LLVMGetOrInsertFunction(M: ModuleRef, - Name: *const c_char, - FunctionTy: TypeRef) - -> ValueRef; - pub fn LLVMGetIntrinsicID(Fn: ValueRef) -> c_uint; - pub fn LLVMGetFunctionCallConv(Fn: ValueRef) -> c_uint; - pub fn LLVMSetFunctionCallConv(Fn: ValueRef, CC: c_uint); - pub fn LLVMGetGC(Fn: ValueRef) -> *const c_char; - pub fn LLVMSetGC(Fn: ValueRef, Name: *const c_char); - pub fn LLVMAddDereferenceableAttr(Fn: ValueRef, index: c_uint, bytes: uint64_t); - pub fn LLVMAddFunctionAttribute(Fn: ValueRef, index: c_uint, PA: uint64_t); - pub fn LLVMAddFunctionAttrString(Fn: ValueRef, index: c_uint, Name: *const c_char); - pub fn LLVMAddFunctionAttrStringValue(Fn: ValueRef, index: c_uint, - Name: *const c_char, - Value: *const c_char); - pub fn LLVMRemoveFunctionAttrString(Fn: ValueRef, index: c_uint, Name: *const c_char); - pub fn LLVMGetFunctionAttr(Fn: ValueRef) -> c_ulonglong; - pub fn LLVMRemoveFunctionAttr(Fn: ValueRef, val: c_ulonglong); - - /* Operations on parameters */ - pub fn LLVMCountParams(Fn: ValueRef) -> c_uint; - pub fn LLVMGetParams(Fn: ValueRef, Params: *const ValueRef); - pub fn LLVMGetParam(Fn: ValueRef, Index: c_uint) -> ValueRef; - pub fn LLVMGetParamParent(Inst: ValueRef) -> ValueRef; - pub fn LLVMGetFirstParam(Fn: ValueRef) -> ValueRef; - pub fn LLVMGetLastParam(Fn: ValueRef) -> ValueRef; - pub fn LLVMGetNextParam(Arg: ValueRef) -> ValueRef; - pub fn LLVMGetPreviousParam(Arg: ValueRef) -> ValueRef; - pub fn LLVMAddAttribute(Arg: ValueRef, PA: c_uint); - pub fn LLVMRemoveAttribute(Arg: ValueRef, PA: c_uint); - pub fn LLVMGetAttribute(Arg: ValueRef) -> c_uint; - pub fn LLVMSetParamAlignment(Arg: ValueRef, align: c_uint); - - /* Operations on basic blocks */ - pub fn LLVMBasicBlockAsValue(BB: BasicBlockRef) -> ValueRef; - pub fn LLVMValueIsBasicBlock(Val: ValueRef) -> Bool; - pub fn LLVMValueAsBasicBlock(Val: ValueRef) -> BasicBlockRef; - pub fn LLVMGetBasicBlockParent(BB: BasicBlockRef) -> ValueRef; - pub fn LLVMCountBasicBlocks(Fn: ValueRef) -> c_uint; - pub fn LLVMGetBasicBlocks(Fn: ValueRef, BasicBlocks: *const ValueRef); - pub fn LLVMGetFirstBasicBlock(Fn: ValueRef) -> BasicBlockRef; - pub fn LLVMGetLastBasicBlock(Fn: ValueRef) -> BasicBlockRef; - pub fn LLVMGetNextBasicBlock(BB: BasicBlockRef) -> BasicBlockRef; - pub fn LLVMGetPreviousBasicBlock(BB: BasicBlockRef) -> BasicBlockRef; - pub fn LLVMGetEntryBasicBlock(Fn: ValueRef) -> BasicBlockRef; - - pub fn LLVMAppendBasicBlockInContext(C: ContextRef, - Fn: ValueRef, - Name: *const c_char) - -> BasicBlockRef; - pub fn LLVMInsertBasicBlockInContext(C: ContextRef, - BB: BasicBlockRef, - Name: *const c_char) - -> BasicBlockRef; - pub fn LLVMDeleteBasicBlock(BB: BasicBlockRef); - - pub fn LLVMMoveBasicBlockAfter(BB: BasicBlockRef, - MoveAfter: BasicBlockRef); - - pub fn LLVMMoveBasicBlockBefore(BB: BasicBlockRef, - MoveBefore: BasicBlockRef); - - /* Operations on instructions */ - pub fn LLVMGetInstructionParent(Inst: ValueRef) -> BasicBlockRef; - pub fn LLVMGetFirstInstruction(BB: BasicBlockRef) -> ValueRef; - pub fn LLVMGetLastInstruction(BB: BasicBlockRef) -> ValueRef; - pub fn LLVMGetNextInstruction(Inst: ValueRef) -> ValueRef; - pub fn LLVMGetPreviousInstruction(Inst: ValueRef) -> ValueRef; - pub fn LLVMInstructionEraseFromParent(Inst: ValueRef); - - /* Operations on call sites */ - pub fn LLVMSetInstructionCallConv(Instr: ValueRef, CC: c_uint); - pub fn LLVMGetInstructionCallConv(Instr: ValueRef) -> c_uint; - pub fn LLVMAddInstrAttribute(Instr: ValueRef, - index: c_uint, - IA: c_uint); - pub fn LLVMRemoveInstrAttribute(Instr: ValueRef, - index: c_uint, - IA: c_uint); - pub fn LLVMSetInstrParamAlignment(Instr: ValueRef, - index: c_uint, - align: c_uint); - pub fn LLVMAddCallSiteAttribute(Instr: ValueRef, - index: c_uint, - Val: uint64_t); - pub fn LLVMAddDereferenceableCallSiteAttr(Instr: ValueRef, - index: c_uint, - bytes: uint64_t); - - /* Operations on call instructions (only) */ - pub fn LLVMIsTailCall(CallInst: ValueRef) -> Bool; - pub fn LLVMSetTailCall(CallInst: ValueRef, IsTailCall: Bool); - - /* Operations on load/store instructions (only) */ - pub fn LLVMGetVolatile(MemoryAccessInst: ValueRef) -> Bool; - pub fn LLVMSetVolatile(MemoryAccessInst: ValueRef, volatile: Bool); - - /* Operations on phi nodes */ - pub fn LLVMAddIncoming(PhiNode: ValueRef, - IncomingValues: *const ValueRef, - IncomingBlocks: *const BasicBlockRef, - Count: c_uint); - pub fn LLVMCountIncoming(PhiNode: ValueRef) -> c_uint; - pub fn LLVMGetIncomingValue(PhiNode: ValueRef, Index: c_uint) - -> ValueRef; - pub fn LLVMGetIncomingBlock(PhiNode: ValueRef, Index: c_uint) - -> BasicBlockRef; - - /* Instruction builders */ - pub fn LLVMCreateBuilderInContext(C: ContextRef) -> BuilderRef; - pub fn LLVMPositionBuilder(Builder: BuilderRef, - Block: BasicBlockRef, - Instr: ValueRef); - pub fn LLVMPositionBuilderBefore(Builder: BuilderRef, - Instr: ValueRef); - pub fn LLVMPositionBuilderAtEnd(Builder: BuilderRef, - Block: BasicBlockRef); - pub fn LLVMGetInsertBlock(Builder: BuilderRef) -> BasicBlockRef; - pub fn LLVMClearInsertionPosition(Builder: BuilderRef); - pub fn LLVMInsertIntoBuilder(Builder: BuilderRef, Instr: ValueRef); - pub fn LLVMInsertIntoBuilderWithName(Builder: BuilderRef, - Instr: ValueRef, - Name: *const c_char); - pub fn LLVMDisposeBuilder(Builder: BuilderRef); - - /* Execution engine */ - pub fn LLVMBuildExecutionEngine(Mod: ModuleRef) -> ExecutionEngineRef; - pub fn LLVMDisposeExecutionEngine(EE: ExecutionEngineRef); - pub fn LLVMExecutionEngineFinalizeObject(EE: ExecutionEngineRef); - pub fn LLVMRustLoadDynamicLibrary(path: *const c_char) -> Bool; - pub fn LLVMExecutionEngineAddModule(EE: ExecutionEngineRef, M: ModuleRef); - pub fn LLVMExecutionEngineRemoveModule(EE: ExecutionEngineRef, M: ModuleRef) - -> Bool; - - /* Metadata */ - pub fn LLVMSetCurrentDebugLocation(Builder: BuilderRef, L: ValueRef); - pub fn LLVMGetCurrentDebugLocation(Builder: BuilderRef) -> ValueRef; - pub fn LLVMSetInstDebugLocation(Builder: BuilderRef, Inst: ValueRef); - - /* Terminators */ - pub fn LLVMBuildRetVoid(B: BuilderRef) -> ValueRef; - pub fn LLVMBuildRet(B: BuilderRef, V: ValueRef) -> ValueRef; - pub fn LLVMBuildAggregateRet(B: BuilderRef, - RetVals: *const ValueRef, - N: c_uint) - -> ValueRef; - pub fn LLVMBuildBr(B: BuilderRef, Dest: BasicBlockRef) -> ValueRef; - pub fn LLVMBuildCondBr(B: BuilderRef, - If: ValueRef, - Then: BasicBlockRef, - Else: BasicBlockRef) - -> ValueRef; - pub fn LLVMBuildSwitch(B: BuilderRef, - V: ValueRef, - Else: BasicBlockRef, - NumCases: c_uint) - -> ValueRef; - pub fn LLVMBuildIndirectBr(B: BuilderRef, - Addr: ValueRef, - NumDests: c_uint) - -> ValueRef; - pub fn LLVMBuildInvoke(B: BuilderRef, - Fn: ValueRef, - Args: *const ValueRef, - NumArgs: c_uint, - Then: BasicBlockRef, - Catch: BasicBlockRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMRustBuildLandingPad(B: BuilderRef, - Ty: TypeRef, - PersFn: ValueRef, - NumClauses: c_uint, - Name: *const c_char, - F: ValueRef) - -> ValueRef; - pub fn LLVMBuildResume(B: BuilderRef, Exn: ValueRef) -> ValueRef; - pub fn LLVMBuildUnreachable(B: BuilderRef) -> ValueRef; - - /* Add a case to the switch instruction */ - pub fn LLVMAddCase(Switch: ValueRef, - OnVal: ValueRef, - Dest: BasicBlockRef); - - /* Add a destination to the indirectbr instruction */ - pub fn LLVMAddDestination(IndirectBr: ValueRef, Dest: BasicBlockRef); - - /* Add a clause to the landing pad instruction */ - pub fn LLVMAddClause(LandingPad: ValueRef, ClauseVal: ValueRef); - - /* Set the cleanup on a landing pad instruction */ - pub fn LLVMSetCleanup(LandingPad: ValueRef, Val: Bool); - - /* Arithmetic */ - pub fn LLVMBuildAdd(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildNSWAdd(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildNUWAdd(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildFAdd(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildSub(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildNSWSub(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildNUWSub(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildFSub(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildMul(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildNSWMul(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildNUWMul(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildFMul(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildUDiv(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildSDiv(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildExactSDiv(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildFDiv(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildURem(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildSRem(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildFRem(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildShl(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildLShr(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildAShr(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildAnd(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildOr(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildXor(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildBinOp(B: BuilderRef, - Op: Opcode, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildNeg(B: BuilderRef, V: ValueRef, Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildNSWNeg(B: BuilderRef, V: ValueRef, Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildNUWNeg(B: BuilderRef, V: ValueRef, Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildFNeg(B: BuilderRef, V: ValueRef, Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildNot(B: BuilderRef, V: ValueRef, Name: *const c_char) - -> ValueRef; - - /* Memory */ - pub fn LLVMBuildAlloca(B: BuilderRef, Ty: TypeRef, Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildFree(B: BuilderRef, PointerVal: ValueRef) -> ValueRef; - pub fn LLVMBuildLoad(B: BuilderRef, - PointerVal: ValueRef, - Name: *const c_char) - -> ValueRef; - - pub fn LLVMBuildStore(B: BuilderRef, Val: ValueRef, Ptr: ValueRef) - -> ValueRef; - - pub fn LLVMBuildGEP(B: BuilderRef, - Pointer: ValueRef, - Indices: *const ValueRef, - NumIndices: c_uint, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildInBoundsGEP(B: BuilderRef, - Pointer: ValueRef, - Indices: *const ValueRef, - NumIndices: c_uint, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildStructGEP(B: BuilderRef, - Pointer: ValueRef, - Idx: c_uint, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildGlobalString(B: BuilderRef, - Str: *const c_char, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildGlobalStringPtr(B: BuilderRef, - Str: *const c_char, - Name: *const c_char) - -> ValueRef; - - /* Casts */ - pub fn LLVMBuildTrunc(B: BuilderRef, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildZExt(B: BuilderRef, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildSExt(B: BuilderRef, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildFPToUI(B: BuilderRef, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildFPToSI(B: BuilderRef, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildUIToFP(B: BuilderRef, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildSIToFP(B: BuilderRef, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildFPTrunc(B: BuilderRef, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildFPExt(B: BuilderRef, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildPtrToInt(B: BuilderRef, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildIntToPtr(B: BuilderRef, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildBitCast(B: BuilderRef, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildZExtOrBitCast(B: BuilderRef, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildSExtOrBitCast(B: BuilderRef, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildTruncOrBitCast(B: BuilderRef, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildCast(B: BuilderRef, - Op: Opcode, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) -> ValueRef; - pub fn LLVMBuildPointerCast(B: BuilderRef, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildIntCast(B: BuilderRef, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildFPCast(B: BuilderRef, - Val: ValueRef, - DestTy: TypeRef, - Name: *const c_char) - -> ValueRef; - - /* Comparisons */ - pub fn LLVMBuildICmp(B: BuilderRef, - Op: c_uint, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildFCmp(B: BuilderRef, - Op: c_uint, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - - /* Miscellaneous instructions */ - pub fn LLVMBuildPhi(B: BuilderRef, Ty: TypeRef, Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildCall(B: BuilderRef, - Fn: ValueRef, - Args: *const ValueRef, - NumArgs: c_uint, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildSelect(B: BuilderRef, - If: ValueRef, - Then: ValueRef, - Else: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildVAArg(B: BuilderRef, - list: ValueRef, - Ty: TypeRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildExtractElement(B: BuilderRef, - VecVal: ValueRef, - Index: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildInsertElement(B: BuilderRef, - VecVal: ValueRef, - EltVal: ValueRef, - Index: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildShuffleVector(B: BuilderRef, - V1: ValueRef, - V2: ValueRef, - Mask: ValueRef, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildExtractValue(B: BuilderRef, - AggVal: ValueRef, - Index: c_uint, - Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildInsertValue(B: BuilderRef, - AggVal: ValueRef, - EltVal: ValueRef, - Index: c_uint, - Name: *const c_char) - -> ValueRef; - - pub fn LLVMBuildIsNull(B: BuilderRef, Val: ValueRef, Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildIsNotNull(B: BuilderRef, Val: ValueRef, Name: *const c_char) - -> ValueRef; - pub fn LLVMBuildPtrDiff(B: BuilderRef, - LHS: ValueRef, - RHS: ValueRef, - Name: *const c_char) - -> ValueRef; - - /* Atomic Operations */ - pub fn LLVMBuildAtomicLoad(B: BuilderRef, - PointerVal: ValueRef, - Name: *const c_char, - Order: AtomicOrdering, - Alignment: c_uint) - -> ValueRef; - - pub fn LLVMBuildAtomicStore(B: BuilderRef, - Val: ValueRef, - Ptr: ValueRef, - Order: AtomicOrdering, - Alignment: c_uint) - -> ValueRef; - - pub fn LLVMBuildAtomicCmpXchg(B: BuilderRef, - LHS: ValueRef, - CMP: ValueRef, - RHS: ValueRef, - Order: AtomicOrdering, - FailureOrder: AtomicOrdering) - -> ValueRef; - pub fn LLVMBuildAtomicRMW(B: BuilderRef, - Op: AtomicBinOp, - LHS: ValueRef, - RHS: ValueRef, - Order: AtomicOrdering, - SingleThreaded: Bool) - -> ValueRef; - - pub fn LLVMBuildAtomicFence(B: BuilderRef, - Order: AtomicOrdering, - Scope: SynchronizationScope); - - - /* Selected entries from the downcasts. */ - pub fn LLVMIsATerminatorInst(Inst: ValueRef) -> ValueRef; - pub fn LLVMIsAStoreInst(Inst: ValueRef) -> ValueRef; - - /// Writes a module to the specified path. Returns 0 on success. - pub fn LLVMWriteBitcodeToFile(M: ModuleRef, Path: *const c_char) -> c_int; - - /// Creates target data from a target layout string. - pub fn LLVMCreateTargetData(StringRep: *const c_char) -> TargetDataRef; - /// Adds the target data to the given pass manager. The pass manager - /// references the target data only weakly. - pub fn LLVMAddTargetData(TD: TargetDataRef, PM: PassManagerRef); - /// Number of bytes clobbered when doing a Store to *T. - pub fn LLVMStoreSizeOfType(TD: TargetDataRef, Ty: TypeRef) - -> c_ulonglong; - - /// Number of bytes clobbered when doing a Store to *T. - pub fn LLVMSizeOfTypeInBits(TD: TargetDataRef, Ty: TypeRef) - -> c_ulonglong; - - /// Distance between successive elements in an array of T. Includes ABI padding. - pub fn LLVMABISizeOfType(TD: TargetDataRef, Ty: TypeRef) -> c_ulonglong; - - /// Returns the preferred alignment of a type. - pub fn LLVMPreferredAlignmentOfType(TD: TargetDataRef, Ty: TypeRef) - -> c_uint; - /// Returns the minimum alignment of a type. - pub fn LLVMABIAlignmentOfType(TD: TargetDataRef, Ty: TypeRef) - -> c_uint; - - /// Computes the byte offset of the indexed struct element for a - /// target. - pub fn LLVMOffsetOfElement(TD: TargetDataRef, - StructTy: TypeRef, - Element: c_uint) - -> c_ulonglong; - - /// Returns the minimum alignment of a type when part of a call frame. - pub fn LLVMCallFrameAlignmentOfType(TD: TargetDataRef, Ty: TypeRef) - -> c_uint; - - /// Disposes target data. - pub fn LLVMDisposeTargetData(TD: TargetDataRef); - - /// Creates a pass manager. - pub fn LLVMCreatePassManager() -> PassManagerRef; - - /// Creates a function-by-function pass manager - pub fn LLVMCreateFunctionPassManagerForModule(M: ModuleRef) - -> PassManagerRef; - - /// Disposes a pass manager. - pub fn LLVMDisposePassManager(PM: PassManagerRef); - - /// Runs a pass manager on a module. - pub fn LLVMRunPassManager(PM: PassManagerRef, M: ModuleRef) -> Bool; - - /// Runs the function passes on the provided function. - pub fn LLVMRunFunctionPassManager(FPM: PassManagerRef, F: ValueRef) - -> Bool; - - /// Initializes all the function passes scheduled in the manager - pub fn LLVMInitializeFunctionPassManager(FPM: PassManagerRef) -> Bool; - - /// Finalizes all the function passes scheduled in the manager - pub fn LLVMFinalizeFunctionPassManager(FPM: PassManagerRef) -> Bool; - - pub fn LLVMInitializePasses(); - - /// Adds a verification pass. - pub fn LLVMAddVerifierPass(PM: PassManagerRef); - - pub fn LLVMAddGlobalOptimizerPass(PM: PassManagerRef); - pub fn LLVMAddIPSCCPPass(PM: PassManagerRef); - pub fn LLVMAddDeadArgEliminationPass(PM: PassManagerRef); - pub fn LLVMAddInstructionCombiningPass(PM: PassManagerRef); - pub fn LLVMAddCFGSimplificationPass(PM: PassManagerRef); - pub fn LLVMAddFunctionInliningPass(PM: PassManagerRef); - pub fn LLVMAddFunctionAttrsPass(PM: PassManagerRef); - pub fn LLVMAddScalarReplAggregatesPass(PM: PassManagerRef); - pub fn LLVMAddScalarReplAggregatesPassSSA(PM: PassManagerRef); - pub fn LLVMAddJumpThreadingPass(PM: PassManagerRef); - pub fn LLVMAddConstantPropagationPass(PM: PassManagerRef); - pub fn LLVMAddReassociatePass(PM: PassManagerRef); - pub fn LLVMAddLoopRotatePass(PM: PassManagerRef); - pub fn LLVMAddLICMPass(PM: PassManagerRef); - pub fn LLVMAddLoopUnswitchPass(PM: PassManagerRef); - pub fn LLVMAddLoopDeletionPass(PM: PassManagerRef); - pub fn LLVMAddLoopUnrollPass(PM: PassManagerRef); - pub fn LLVMAddGVNPass(PM: PassManagerRef); - pub fn LLVMAddMemCpyOptPass(PM: PassManagerRef); - pub fn LLVMAddSCCPPass(PM: PassManagerRef); - pub fn LLVMAddDeadStoreEliminationPass(PM: PassManagerRef); - pub fn LLVMAddStripDeadPrototypesPass(PM: PassManagerRef); - pub fn LLVMAddConstantMergePass(PM: PassManagerRef); - pub fn LLVMAddArgumentPromotionPass(PM: PassManagerRef); - pub fn LLVMAddTailCallEliminationPass(PM: PassManagerRef); - pub fn LLVMAddIndVarSimplifyPass(PM: PassManagerRef); - pub fn LLVMAddAggressiveDCEPass(PM: PassManagerRef); - pub fn LLVMAddGlobalDCEPass(PM: PassManagerRef); - pub fn LLVMAddCorrelatedValuePropagationPass(PM: PassManagerRef); - pub fn LLVMAddPruneEHPass(PM: PassManagerRef); - pub fn LLVMAddSimplifyLibCallsPass(PM: PassManagerRef); - pub fn LLVMAddLoopIdiomPass(PM: PassManagerRef); - pub fn LLVMAddEarlyCSEPass(PM: PassManagerRef); - pub fn LLVMAddTypeBasedAliasAnalysisPass(PM: PassManagerRef); - pub fn LLVMAddBasicAliasAnalysisPass(PM: PassManagerRef); - - pub fn LLVMPassManagerBuilderCreate() -> PassManagerBuilderRef; - pub fn LLVMPassManagerBuilderDispose(PMB: PassManagerBuilderRef); - pub fn LLVMPassManagerBuilderSetOptLevel(PMB: PassManagerBuilderRef, - OptimizationLevel: c_uint); - pub fn LLVMPassManagerBuilderSetSizeLevel(PMB: PassManagerBuilderRef, - Value: Bool); - pub fn LLVMPassManagerBuilderSetDisableUnitAtATime( - PMB: PassManagerBuilderRef, - Value: Bool); - pub fn LLVMPassManagerBuilderSetDisableUnrollLoops( - PMB: PassManagerBuilderRef, - Value: Bool); - pub fn LLVMPassManagerBuilderSetDisableSimplifyLibCalls( - PMB: PassManagerBuilderRef, - Value: Bool); - pub fn LLVMPassManagerBuilderUseInlinerWithThreshold( - PMB: PassManagerBuilderRef, - threshold: c_uint); - pub fn LLVMPassManagerBuilderPopulateModulePassManager( - PMB: PassManagerBuilderRef, - PM: PassManagerRef); - - pub fn LLVMPassManagerBuilderPopulateFunctionPassManager( - PMB: PassManagerBuilderRef, - PM: PassManagerRef); - pub fn LLVMPassManagerBuilderPopulateLTOPassManager( - PMB: PassManagerBuilderRef, - PM: PassManagerRef, - Internalize: Bool, - RunInliner: Bool); - - /// Destroys a memory buffer. - pub fn LLVMDisposeMemoryBuffer(MemBuf: MemoryBufferRef); - - - /* Stuff that's in rustllvm/ because it's not upstream yet. */ - - /// Opens an object file. - pub fn LLVMCreateObjectFile(MemBuf: MemoryBufferRef) -> ObjectFileRef; - /// Closes an object file. - pub fn LLVMDisposeObjectFile(ObjFile: ObjectFileRef); - - /// Enumerates the sections in an object file. - pub fn LLVMGetSections(ObjFile: ObjectFileRef) -> SectionIteratorRef; - /// Destroys a section iterator. - pub fn LLVMDisposeSectionIterator(SI: SectionIteratorRef); - /// Returns true if the section iterator is at the end of the section - /// list: - pub fn LLVMIsSectionIteratorAtEnd(ObjFile: ObjectFileRef, - SI: SectionIteratorRef) - -> Bool; - /// Moves the section iterator to point to the next section. - pub fn LLVMMoveToNextSection(SI: SectionIteratorRef); - /// Returns the current section size. - pub fn LLVMGetSectionSize(SI: SectionIteratorRef) -> c_ulonglong; - /// Returns the current section contents as a string buffer. - pub fn LLVMGetSectionContents(SI: SectionIteratorRef) -> *const c_char; - - /// Reads the given file and returns it as a memory buffer. Use - /// LLVMDisposeMemoryBuffer() to get rid of it. - pub fn LLVMRustCreateMemoryBufferWithContentsOfFile(Path: *const c_char) - -> MemoryBufferRef; - /// Borrows the contents of the memory buffer (doesn't copy it) - pub fn LLVMCreateMemoryBufferWithMemoryRange(InputData: *const c_char, - InputDataLength: size_t, - BufferName: *const c_char, - RequiresNull: Bool) - -> MemoryBufferRef; - pub fn LLVMCreateMemoryBufferWithMemoryRangeCopy(InputData: *const c_char, - InputDataLength: size_t, - BufferName: *const c_char) - -> MemoryBufferRef; - - pub fn LLVMIsMultithreaded() -> Bool; - pub fn LLVMStartMultithreaded() -> Bool; - - /// Returns a string describing the last error caused by an LLVMRust* call. - pub fn LLVMRustGetLastError() -> *const c_char; - - /// Print the pass timings since static dtors aren't picking them up. - pub fn LLVMRustPrintPassTimings(); - - pub fn LLVMStructCreateNamed(C: ContextRef, Name: *const c_char) -> TypeRef; - - pub fn LLVMStructSetBody(StructTy: TypeRef, - ElementTypes: *const TypeRef, - ElementCount: c_uint, - Packed: Bool); - - pub fn LLVMConstNamedStruct(S: TypeRef, - ConstantVals: *const ValueRef, - Count: c_uint) - -> ValueRef; - - /// Enables LLVM debug output. - pub fn LLVMSetDebug(Enabled: c_int); - - /// Prepares inline assembly. - pub fn LLVMInlineAsm(Ty: TypeRef, - AsmString: *const c_char, - Constraints: *const c_char, - SideEffects: Bool, - AlignStack: Bool, - Dialect: c_uint) - -> ValueRef; - - pub fn LLVMRustDebugMetadataVersion() -> u32; - pub fn LLVMVersionMajor() -> u32; - pub fn LLVMVersionMinor() -> u32; - - pub fn LLVMRustAddModuleFlag(M: ModuleRef, - name: *const c_char, - value: u32); - - pub fn LLVMDIBuilderCreate(M: ModuleRef) -> DIBuilderRef; - - pub fn LLVMDIBuilderDispose(Builder: DIBuilderRef); - - pub fn LLVMDIBuilderFinalize(Builder: DIBuilderRef); - - pub fn LLVMDIBuilderCreateCompileUnit(Builder: DIBuilderRef, - Lang: c_uint, - File: *const c_char, - Dir: *const c_char, - Producer: *const c_char, - isOptimized: bool, - Flags: *const c_char, - RuntimeVer: c_uint, - SplitName: *const c_char) - -> DIDescriptor; - - pub fn LLVMDIBuilderCreateFile(Builder: DIBuilderRef, - Filename: *const c_char, - Directory: *const c_char) - -> DIFile; - - pub fn LLVMDIBuilderCreateSubroutineType(Builder: DIBuilderRef, - File: DIFile, - ParameterTypes: DIArray) - -> DICompositeType; - - pub fn LLVMDIBuilderCreateFunction(Builder: DIBuilderRef, - Scope: DIDescriptor, - Name: *const c_char, - LinkageName: *const c_char, - File: DIFile, - LineNo: c_uint, - Ty: DIType, - isLocalToUnit: bool, - isDefinition: bool, - ScopeLine: c_uint, - Flags: c_uint, - isOptimized: bool, - Fn: ValueRef, - TParam: DIArray, - Decl: DIDescriptor) - -> DISubprogram; - - pub fn LLVMDIBuilderCreateBasicType(Builder: DIBuilderRef, - Name: *const c_char, - SizeInBits: c_ulonglong, - AlignInBits: c_ulonglong, - Encoding: c_uint) - -> DIBasicType; - - pub fn LLVMDIBuilderCreatePointerType(Builder: DIBuilderRef, - PointeeTy: DIType, - SizeInBits: c_ulonglong, - AlignInBits: c_ulonglong, - Name: *const c_char) - -> DIDerivedType; - - pub fn LLVMDIBuilderCreateStructType(Builder: DIBuilderRef, - Scope: DIDescriptor, - Name: *const c_char, - File: DIFile, - LineNumber: c_uint, - SizeInBits: c_ulonglong, - AlignInBits: c_ulonglong, - Flags: c_uint, - DerivedFrom: DIType, - Elements: DIArray, - RunTimeLang: c_uint, - VTableHolder: DIType, - UniqueId: *const c_char) - -> DICompositeType; - - pub fn LLVMDIBuilderCreateMemberType(Builder: DIBuilderRef, - Scope: DIDescriptor, - Name: *const c_char, - File: DIFile, - LineNo: c_uint, - SizeInBits: c_ulonglong, - AlignInBits: c_ulonglong, - OffsetInBits: c_ulonglong, - Flags: c_uint, - Ty: DIType) - -> DIDerivedType; - - pub fn LLVMDIBuilderCreateLexicalBlock(Builder: DIBuilderRef, - Scope: DIScope, - File: DIFile, - Line: c_uint, - Col: c_uint) - -> DILexicalBlock; - - pub fn LLVMDIBuilderCreateStaticVariable(Builder: DIBuilderRef, - Context: DIScope, - Name: *const c_char, - LinkageName: *const c_char, - File: DIFile, - LineNo: c_uint, - Ty: DIType, - isLocalToUnit: bool, - Val: ValueRef, - Decl: DIDescriptor) - -> DIGlobalVariable; - - pub fn LLVMDIBuilderCreateVariable(Builder: DIBuilderRef, - Tag: c_uint, - Scope: DIDescriptor, - Name: *const c_char, - File: DIFile, - LineNo: c_uint, - Ty: DIType, - AlwaysPreserve: bool, - Flags: c_uint, - AddrOps: *const i64, - AddrOpsCount: c_uint, - ArgNo: c_uint) - -> DIVariable; - - pub fn LLVMDIBuilderCreateArrayType(Builder: DIBuilderRef, - Size: c_ulonglong, - AlignInBits: c_ulonglong, - Ty: DIType, - Subscripts: DIArray) - -> DIType; - - pub fn LLVMDIBuilderCreateVectorType(Builder: DIBuilderRef, - Size: c_ulonglong, - AlignInBits: c_ulonglong, - Ty: DIType, - Subscripts: DIArray) - -> DIType; - - pub fn LLVMDIBuilderGetOrCreateSubrange(Builder: DIBuilderRef, - Lo: c_longlong, - Count: c_longlong) - -> DISubrange; - - pub fn LLVMDIBuilderGetOrCreateArray(Builder: DIBuilderRef, - Ptr: *const DIDescriptor, - Count: c_uint) - -> DIArray; - - pub fn LLVMDIBuilderInsertDeclareAtEnd(Builder: DIBuilderRef, - Val: ValueRef, - VarInfo: DIVariable, - AddrOps: *const i64, - AddrOpsCount: c_uint, - DL: ValueRef, - InsertAtEnd: BasicBlockRef) - -> ValueRef; - - pub fn LLVMDIBuilderInsertDeclareBefore(Builder: DIBuilderRef, - Val: ValueRef, - VarInfo: DIVariable, - AddrOps: *const i64, - AddrOpsCount: c_uint, - DL: ValueRef, - InsertBefore: ValueRef) - -> ValueRef; - - pub fn LLVMDIBuilderCreateEnumerator(Builder: DIBuilderRef, - Name: *const c_char, - Val: c_ulonglong) - -> DIEnumerator; - - pub fn LLVMDIBuilderCreateEnumerationType(Builder: DIBuilderRef, - Scope: DIScope, - Name: *const c_char, - File: DIFile, - LineNumber: c_uint, - SizeInBits: c_ulonglong, - AlignInBits: c_ulonglong, - Elements: DIArray, - ClassType: DIType) - -> DIType; - - pub fn LLVMDIBuilderCreateUnionType(Builder: DIBuilderRef, - Scope: DIScope, - Name: *const c_char, - File: DIFile, - LineNumber: c_uint, - SizeInBits: c_ulonglong, - AlignInBits: c_ulonglong, - Flags: c_uint, - Elements: DIArray, - RunTimeLang: c_uint, - UniqueId: *const c_char) - -> DIType; - - pub fn LLVMSetUnnamedAddr(GlobalVar: ValueRef, UnnamedAddr: Bool); - - pub fn LLVMDIBuilderCreateTemplateTypeParameter(Builder: DIBuilderRef, - Scope: DIScope, - Name: *const c_char, - Ty: DIType, - File: DIFile, - LineNo: c_uint, - ColumnNo: c_uint) - -> DITemplateTypeParameter; - - pub fn LLVMDIBuilderCreateOpDeref() -> i64; - - pub fn LLVMDIBuilderCreateOpPlus() -> i64; - - pub fn LLVMDIBuilderCreateNameSpace(Builder: DIBuilderRef, - Scope: DIScope, - Name: *const c_char, - File: DIFile, - LineNo: c_uint) - -> DINameSpace; - - pub fn LLVMDIBuilderCreateDebugLocation(Context: ContextRef, - Line: c_uint, - Column: c_uint, - Scope: DIScope, - InlinedAt: MetadataRef) - -> ValueRef; - - pub fn LLVMDICompositeTypeSetTypeArray(Builder: DIBuilderRef, - CompositeType: DIType, - TypeArray: DIArray); - pub fn LLVMWriteTypeToString(Type: TypeRef, s: RustStringRef); - pub fn LLVMWriteValueToString(value_ref: ValueRef, s: RustStringRef); - - pub fn LLVMIsAArgument(value_ref: ValueRef) -> ValueRef; - - pub fn LLVMIsAAllocaInst(value_ref: ValueRef) -> ValueRef; - pub fn LLVMIsAConstantInt(value_ref: ValueRef) -> ValueRef; - - pub fn LLVMRustAddPass(PM: PassManagerRef, Pass: *const c_char) -> bool; - pub fn LLVMRustCreateTargetMachine(Triple: *const c_char, - CPU: *const c_char, - Features: *const c_char, - Model: CodeGenModel, - Reloc: RelocMode, - Level: CodeGenOptLevel, - UseSoftFP: bool, - PositionIndependentExecutable: bool, - FunctionSections: bool, - DataSections: bool) -> TargetMachineRef; - pub fn LLVMRustDisposeTargetMachine(T: TargetMachineRef); - pub fn LLVMRustAddAnalysisPasses(T: TargetMachineRef, - PM: PassManagerRef, - M: ModuleRef); - pub fn LLVMRustAddBuilderLibraryInfo(PMB: PassManagerBuilderRef, - M: ModuleRef, - DisableSimplifyLibCalls: bool); - pub fn LLVMRustConfigurePassManagerBuilder(PMB: PassManagerBuilderRef, - OptLevel: CodeGenOptLevel, - MergeFunctions: bool, - SLPVectorize: bool, - LoopVectorize: bool); - pub fn LLVMRustAddLibraryInfo(PM: PassManagerRef, M: ModuleRef, - DisableSimplifyLibCalls: bool); - pub fn LLVMRustRunFunctionPassManager(PM: PassManagerRef, M: ModuleRef); - pub fn LLVMRustWriteOutputFile(T: TargetMachineRef, - PM: PassManagerRef, - M: ModuleRef, - Output: *const c_char, - FileType: FileType) -> bool; - pub fn LLVMRustPrintModule(PM: PassManagerRef, - M: ModuleRef, - Output: *const c_char); - pub fn LLVMRustSetLLVMOptions(Argc: c_int, Argv: *const *const c_char); - pub fn LLVMRustPrintPasses(); - pub fn LLVMRustSetNormalizedTarget(M: ModuleRef, triple: *const c_char); - pub fn LLVMRustAddAlwaysInlinePass(P: PassManagerBuilderRef, - AddLifetimes: bool); - pub fn LLVMRustLinkInExternalBitcode(M: ModuleRef, - bc: *const c_char, - len: size_t) -> bool; - pub fn LLVMRustRunRestrictionPass(M: ModuleRef, - syms: *const *const c_char, - len: size_t); - pub fn LLVMRustMarkAllFunctionsNounwind(M: ModuleRef); - - pub fn LLVMRustOpenArchive(path: *const c_char) -> ArchiveRef; - pub fn LLVMRustArchiveIteratorNew(AR: ArchiveRef) -> ArchiveIteratorRef; - pub fn LLVMRustArchiveIteratorNext(AIR: ArchiveIteratorRef) -> ArchiveChildRef; - pub fn LLVMRustArchiveChildName(ACR: ArchiveChildRef, - size: *mut size_t) -> *const c_char; - pub fn LLVMRustArchiveChildData(ACR: ArchiveChildRef, - size: *mut size_t) -> *const c_char; - pub fn LLVMRustArchiveChildFree(ACR: ArchiveChildRef); - pub fn LLVMRustArchiveIteratorFree(AIR: ArchiveIteratorRef); - pub fn LLVMRustDestroyArchive(AR: ArchiveRef); - - pub fn LLVMRustSetDLLStorageClass(V: ValueRef, - C: DLLStorageClassTypes); - - pub fn LLVMRustGetSectionName(SI: SectionIteratorRef, - data: *mut *const c_char) -> c_int; - - pub fn LLVMWriteTwineToString(T: TwineRef, s: RustStringRef); - - pub fn LLVMContextSetDiagnosticHandler(C: ContextRef, - Handler: DiagnosticHandler, - DiagnosticContext: *mut c_void); - - pub fn LLVMUnpackOptimizationDiagnostic(DI: DiagnosticInfoRef, - pass_name_out: *mut *const c_char, - function_out: *mut ValueRef, - debugloc_out: *mut DebugLocRef, - message_out: *mut TwineRef); - pub fn LLVMUnpackInlineAsmDiagnostic(DI: DiagnosticInfoRef, - cookie_out: *mut c_uint, - message_out: *mut TwineRef, - instruction_out: *mut ValueRef); - - pub fn LLVMWriteDiagnosticInfoToString(DI: DiagnosticInfoRef, s: RustStringRef); - pub fn LLVMGetDiagInfoSeverity(DI: DiagnosticInfoRef) -> DiagnosticSeverity; - pub fn LLVMGetDiagInfoKind(DI: DiagnosticInfoRef) -> DiagnosticKind; - - pub fn LLVMWriteDebugLocToString(C: ContextRef, DL: DebugLocRef, s: RustStringRef); - - pub fn LLVMSetInlineAsmDiagnosticHandler(C: ContextRef, - H: InlineAsmDiagHandler, - CX: *mut c_void); - - pub fn LLVMWriteSMDiagnosticToString(d: SMDiagnosticRef, s: RustStringRef); - - pub fn LLVMRustWriteArchive(Dst: *const c_char, - NumMembers: size_t, - Members: *const RustArchiveMemberRef, - WriteSymbtab: bool, - Kind: ArchiveKind) -> c_int; - pub fn LLVMRustArchiveMemberNew(Filename: *const c_char, - Name: *const c_char, - Child: ArchiveChildRef) -> RustArchiveMemberRef; - pub fn LLVMRustArchiveMemberFree(Member: RustArchiveMemberRef); - - pub fn LLVMRustSetDataLayoutFromTargetMachine(M: ModuleRef, - TM: TargetMachineRef); - pub fn LLVMRustGetModuleDataLayout(M: ModuleRef) -> TargetDataRef; -} +/// Appending to a Rust string -- used by raw_rust_string_ostream. +#[no_mangle] +pub unsafe extern "C" fn rust_llvm_string_write_impl(sr: RustStringRef, + ptr: *const c_char, + size: size_t) { + let slice = slice::from_raw_parts(ptr as *const u8, size as usize); -#[cfg(have_component_x86)] -extern { - pub fn LLVMInitializeX86TargetInfo(); - pub fn LLVMInitializeX86Target(); - pub fn LLVMInitializeX86TargetMC(); - pub fn LLVMInitializeX86AsmPrinter(); - pub fn LLVMInitializeX86AsmParser(); -} -#[cfg(have_component_arm)] -extern { - pub fn LLVMInitializeARMTargetInfo(); - pub fn LLVMInitializeARMTarget(); - pub fn LLVMInitializeARMTargetMC(); - pub fn LLVMInitializeARMAsmPrinter(); - pub fn LLVMInitializeARMAsmParser(); -} -#[cfg(have_component_aarch64)] -extern { - pub fn LLVMInitializeAArch64TargetInfo(); - pub fn LLVMInitializeAArch64Target(); - pub fn LLVMInitializeAArch64TargetMC(); - pub fn LLVMInitializeAArch64AsmPrinter(); - pub fn LLVMInitializeAArch64AsmParser(); -} -#[cfg(have_component_mips)] -extern { - pub fn LLVMInitializeMipsTargetInfo(); - pub fn LLVMInitializeMipsTarget(); - pub fn LLVMInitializeMipsTargetMC(); - pub fn LLVMInitializeMipsAsmPrinter(); - pub fn LLVMInitializeMipsAsmParser(); -} -#[cfg(have_component_powerpc)] -extern { - pub fn LLVMInitializePowerPCTargetInfo(); - pub fn LLVMInitializePowerPCTarget(); - pub fn LLVMInitializePowerPCTargetMC(); - pub fn LLVMInitializePowerPCAsmPrinter(); - pub fn LLVMInitializePowerPCAsmParser(); -} -#[cfg(have_component_pnacl)] -extern { - pub fn LLVMInitializePNaClTargetInfo(); - pub fn LLVMInitializePNaClTarget(); - pub fn LLVMInitializePNaClTargetMC(); + let sr = sr as RustStringRepr; + (*sr).borrow_mut().extend_from_slice(slice); } -// LLVM requires symbols from this library, but apparently they're not printed -// during llvm-config? -#[cfg(windows)] -#[link(name = "ole32")] -extern {} - pub fn SetInstructionCallConv(instr: ValueRef, cc: CallConv) { unsafe { LLVMSetInstructionCallConv(instr, cc as c_uint); @@ -2178,15 +148,22 @@ pub fn SetFunctionCallConv(fn_: ValueRef, cc: CallConv) { LLVMSetFunctionCallConv(fn_, cc as c_uint); } } -pub fn SetLinkage(global: ValueRef, link: Linkage) { + +// Externally visible symbols that might appear in multiple translation units need to appear in +// their own comdat section so that the duplicates can be discarded at link time. This can for +// example happen for generics when using multiple codegen units. This function simply uses the +// value's name as the comdat value to make sure that it is in a 1-to-1 relationship to the +// function. +// For more details on COMDAT sections see e.g. http://www.airs.com/blog/archives/52 +pub fn SetUniqueComdat(llmod: ModuleRef, val: ValueRef) { unsafe { - LLVMSetLinkage(global, link as c_uint); + LLVMRustSetComdat(llmod, val, LLVMGetValueName(val)); } } -pub fn SetDLLStorageClass(global: ValueRef, class: DLLStorageClassTypes) { +pub fn UnsetComdat(val: ValueRef) { unsafe { - LLVMRustSetDLLStorageClass(global, class); + LLVMRustUnsetComdat(val); } } @@ -2202,28 +179,32 @@ pub fn set_thread_local(global: ValueRef, is_thread_local: bool) { } } -pub fn ConstICmp(pred: IntPredicate, v1: ValueRef, v2: ValueRef) -> ValueRef { - unsafe { - LLVMConstICmp(pred as c_ushort, v1, v2) +impl Attribute { + pub fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) { + unsafe { LLVMRustAddFunctionAttribute(llfn, idx.as_uint(), *self) } } -} -pub fn ConstFCmp(pred: RealPredicate, v1: ValueRef, v2: ValueRef) -> ValueRef { - unsafe { - LLVMConstFCmp(pred as c_ushort, v1, v2) + + pub fn apply_callsite(&self, idx: AttributePlace, callsite: ValueRef) { + unsafe { LLVMRustAddCallSiteAttribute(callsite, idx.as_uint(), *self) } } -} -pub fn SetFunctionAttribute(fn_: ValueRef, attr: Attribute) { - unsafe { - LLVMAddFunctionAttribute(fn_, FunctionIndex as c_uint, - attr.bits() as uint64_t) + pub fn unapply_llfn(&self, idx: AttributePlace, llfn: ValueRef) { + unsafe { LLVMRustRemoveFunctionAttributes(llfn, idx.as_uint(), *self) } + } + + pub fn toggle_llfn(&self, idx: AttributePlace, llfn: ValueRef, set: bool) { + if set { + self.apply_llfn(idx, llfn); + } else { + self.unapply_llfn(idx, llfn); + } } } -/* Memory-managed interface to target data. */ +// Memory-managed interface to target data. pub struct TargetData { - pub lltd: TargetDataRef + pub lltd: TargetDataRef, } impl Drop for TargetData { @@ -2236,12 +217,10 @@ impl Drop for TargetData { pub fn mk_target_data(string_rep: &str) -> TargetData { let string_rep = CString::new(string_rep).unwrap(); - TargetData { - lltd: unsafe { LLVMCreateTargetData(string_rep.as_ptr()) } - } + TargetData { lltd: unsafe { LLVMCreateTargetData(string_rep.as_ptr()) } } } -/* Memory-managed interface to object files. */ +// Memory-managed interface to object files. pub struct ObjectFile { pub llof: ObjectFileRef, @@ -2254,12 +233,10 @@ impl ObjectFile { let llof = LLVMCreateObjectFile(llmb); if llof as isize == 0 { // LLVMCreateObjectFile took ownership of llmb - return None + return None; } - Some(ObjectFile { - llof: llof, - }) + Some(ObjectFile { llof: llof }) } } } @@ -2272,10 +249,10 @@ impl Drop for ObjectFile { } } -/* Memory-managed interface to section iterators. */ +// Memory-managed interface to section iterators. pub struct SectionIter { - pub llsi: SectionIteratorRef + pub llsi: SectionIteratorRef, } impl Drop for SectionIter { @@ -2287,11 +264,7 @@ impl Drop for SectionIter { } pub fn mk_section_iter(llof: ObjectFileRef) -> SectionIter { - unsafe { - SectionIter { - llsi: LLVMGetSections(llof) - } - } + unsafe { SectionIter { llsi: LLVMGetSections(llof) } } } /// Safe wrapper around `LLVMGetParam`, because segfaults are no fun. @@ -2314,54 +287,33 @@ pub fn get_params(llfn: ValueRef) -> Vec { } } -#[allow(missing_copy_implementations)] -pub enum RustString_opaque {} -pub type RustStringRef = *mut RustString_opaque; -type RustStringRepr = *mut RefCell>; - -/// Appending to a Rust string -- used by raw_rust_string_ostream. -#[no_mangle] -pub unsafe extern "C" fn rust_llvm_string_write_impl(sr: RustStringRef, - ptr: *const c_char, - size: size_t) { - let slice = slice::from_raw_parts(ptr as *const u8, size as usize); - - let sr = sr as RustStringRepr; - (*sr).borrow_mut().extend_from_slice(slice); -} - -pub fn build_string(f: F) -> Option where F: FnOnce(RustStringRef){ +pub fn build_string(f: F) -> Option + where F: FnOnce(RustStringRef) +{ let mut buf = RefCell::new(Vec::new()); f(&mut buf as RustStringRepr as RustStringRef); String::from_utf8(buf.into_inner()).ok() } pub unsafe fn twine_to_string(tr: TwineRef) -> String { - build_string(|s| LLVMWriteTwineToString(tr, s)) - .expect("got a non-UTF8 Twine from LLVM") + build_string(|s| LLVMRustWriteTwineToString(tr, s)).expect("got a non-UTF8 Twine from LLVM") } pub unsafe fn debug_loc_to_string(c: ContextRef, tr: DebugLocRef) -> String { - build_string(|s| LLVMWriteDebugLocToString(c, tr, s)) + build_string(|s| LLVMRustWriteDebugLocToString(c, tr, s)) .expect("got a non-UTF8 DebugLoc from LLVM") } pub fn initialize_available_targets() { macro_rules! init_target( - ($cfg:ident $arch:ident) => { { + ($cfg:meta, $($method:ident),*) => { { #[cfg($cfg)] fn init() { + extern { + $(fn $method();)* + } unsafe { - let f = concat_idents!(LLVMInitialize, $arch, TargetInfo); - f(); - let f = concat_idents!(LLVMInitialize, $arch, Target); - f(); - let f = concat_idents!(LLVMInitialize, $arch, TargetMC); - f(); - let f = concat_idents!(LLVMInitialize, $arch, AsmPrinter); - f(); - let f = concat_idents!(LLVMInitialize, $arch, AsmParser); - f(); + $($method();)* } } #[cfg(not($cfg))] @@ -2369,26 +321,95 @@ pub fn initialize_available_targets() { init(); } } ); + init_target!(llvm_component = "x86", + LLVMInitializeX86TargetInfo, + LLVMInitializeX86Target, + LLVMInitializeX86TargetMC, + LLVMInitializeX86AsmPrinter, + LLVMInitializeX86AsmParser); + init_target!(llvm_component = "arm", + LLVMInitializeARMTargetInfo, + LLVMInitializeARMTarget, + LLVMInitializeARMTargetMC, + LLVMInitializeARMAsmPrinter, + LLVMInitializeARMAsmParser); + init_target!(llvm_component = "aarch64", + LLVMInitializeAArch64TargetInfo, + LLVMInitializeAArch64Target, + LLVMInitializeAArch64TargetMC, + LLVMInitializeAArch64AsmPrinter, + LLVMInitializeAArch64AsmParser); + init_target!(llvm_component = "mips", + LLVMInitializeMipsTargetInfo, + LLVMInitializeMipsTarget, + LLVMInitializeMipsTargetMC, + LLVMInitializeMipsAsmPrinter, + LLVMInitializeMipsAsmParser); + init_target!(llvm_component = "powerpc", + LLVMInitializePowerPCTargetInfo, + LLVMInitializePowerPCTarget, + LLVMInitializePowerPCTargetMC, + LLVMInitializePowerPCAsmPrinter, + LLVMInitializePowerPCAsmParser); + init_target!(llvm_component = "pnacl", + LLVMInitializePNaClTargetInfo, + LLVMInitializePNaClTarget, + LLVMInitializePNaClTargetMC); + init_target!(llvm_component = "systemz", + LLVMInitializeSystemZTargetInfo, + LLVMInitializeSystemZTarget, + LLVMInitializeSystemZTargetMC, + LLVMInitializeSystemZAsmPrinter, + LLVMInitializeSystemZAsmParser); + init_target!(llvm_component = "jsbackend", + LLVMInitializeJSBackendTargetInfo, + LLVMInitializeJSBackendTarget, + LLVMInitializeJSBackendTargetMC); + init_target!(llvm_component = "msp430", + LLVMInitializeMSP430TargetInfo, + LLVMInitializeMSP430Target, + LLVMInitializeMSP430TargetMC, + LLVMInitializeMSP430AsmPrinter); +} + +pub fn last_error() -> Option { + unsafe { + let cstr = LLVMRustGetLastError(); + if cstr.is_null() { + None + } else { + let err = CStr::from_ptr(cstr).to_bytes(); + let err = String::from_utf8_lossy(err).to_string(); + libc::free(cstr as *mut _); + Some(err) + } + } +} - init_target!(have_component_powerpc PowerPC); - init_target!(have_component_mips Mips); - init_target!(have_component_aarch64 AArch64); - init_target!(have_component_arm ARM); - init_target!(have_component_x86 X86); +pub struct OperandBundleDef { + inner: OperandBundleDefRef, +} - // PNaCl doesn't provide some of the optional target components, so we - // manually initialize it here. - #[cfg(have_component_pnacl)] - fn init_pnacl() { +impl OperandBundleDef { + pub fn new(name: &str, vals: &[ValueRef]) -> OperandBundleDef { + let name = CString::new(name).unwrap(); + let def = unsafe { + LLVMRustBuildOperandBundleDef(name.as_ptr(), vals.as_ptr(), vals.len() as c_uint) + }; + OperandBundleDef { inner: def } + } + + pub fn raw(&self) -> OperandBundleDefRef { + self.inner + } +} + +impl Drop for OperandBundleDef { + fn drop(&mut self) { unsafe { - LLVMInitializePNaClTargetInfo(); - LLVMInitializePNaClTarget(); - LLVMInitializePNaClTargetMC(); + LLVMRustFreeOperandBundleDef(self.inner); } } - #[cfg(not(have_component_pnacl))] - fn init_pnacl() { } - init_pnacl(); } // The module containing the native LLVM dependencies, generated by the build system @@ -2396,6 +417,7 @@ pub fn initialize_available_targets() { // parts of LLVM that rustllvm depends on aren't thrown away by the linker. // Works to the above fix for #15460 to ensure LLVM dependencies that // are only used by rustllvm don't get stripped by the linker. +#[cfg(not(cargobuild))] mod llvmdeps { include! { env!("CFG_LLVM_LINKAGE_FILE") } } diff --git a/src/librustc_metadata/Cargo.toml b/src/librustc_metadata/Cargo.toml new file mode 100644 index 0000000000000..6f7f03ca216b9 --- /dev/null +++ b/src/librustc_metadata/Cargo.toml @@ -0,0 +1,24 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_metadata" +version = "0.0.0" + +[lib] +name = "rustc_metadata" +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +flate = { path = "../libflate" } +log = { path = "../liblog" } +proc_macro = { path = "../libproc_macro" } +rustc = { path = "../librustc" } +rustc_back = { path = "../librustc_back" } +rustc_const_math = { path = "../librustc_const_math" } +rustc_data_structures = { path = "../librustc_data_structures" } +rustc_errors = { path = "../librustc_errors" } +rustc_llvm = { path = "../librustc_llvm" } +serialize = { path = "../libserialize" } +syntax = { path = "../libsyntax" } +syntax_ext = { path = "../libsyntax_ext" } +syntax_pos = { path = "../libsyntax_pos" } diff --git a/src/librustc_metadata/astencode.rs b/src/librustc_metadata/astencode.rs index 8c3bd3c4f8a53..6598b7dcc527f 100644 --- a/src/librustc_metadata/astencode.rs +++ b/src/librustc_metadata/astencode.rs @@ -8,1405 +8,148 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(non_camel_case_types)] -// FIXME: remove this after snapshot, and Results are handled -#![allow(unused_must_use)] +use rustc::hir::map as ast_map; -use rustc::front::map as ast_map; -use rustc::session::Session; +use rustc::hir::intravisit::{Visitor, IdRangeComputingVisitor, IdRange, NestedVisitorMap}; -use rustc_front::hir; -use rustc_front::fold; -use rustc_front::fold::Folder; +use cstore::CrateMetadata; +use encoder::EncodeContext; +use schema::*; -use common as c; -use cstore; -use decoder; -use encoder as e; -use tydecode; -use tyencode; +use rustc::middle::cstore::{InlinedItem, InlinedItemRef}; +use rustc::middle::const_qualif::ConstQualif; +use rustc::hir::def::Def; +use rustc::hir::def_id::DefId; +use rustc::ty::{self, TyCtxt, Ty}; -use middle::cstore::{InlinedItem, InlinedItemRef}; -use middle::ty::adjustment; -use middle::ty::cast; -use middle::check_const::ConstQualif; -use middle::def; -use middle::def_id::DefId; -use middle::privacy::{AllPublic, LastMod}; -use middle::region; -use middle::subst; -use middle::ty::{self, Ty}; +use syntax::ast; -use syntax::{ast, ast_util, codemap}; -use syntax::ast::NodeIdAssigner; -use syntax::ptr::P; +use rustc_serialize::Encodable; -use std::cell::Cell; -use std::io::SeekFrom; -use std::io::prelude::*; -use std::fmt::Debug; - -use rbml::reader; -use rbml::writer::Encoder; -use rbml; -use serialize; -use serialize::{Decodable, Decoder, DecoderHelpers, Encodable}; -use serialize::EncoderHelpers; - -#[cfg(test)] use std::io::Cursor; -#[cfg(test)] use syntax::parse; -#[cfg(test)] use syntax::ast::NodeId; -#[cfg(test)] use rustc_front::print::pprust; -#[cfg(test)] use rustc_front::lowering::{lower_item, LoweringContext}; - -struct DecodeContext<'a, 'b, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, - cdata: &'b cstore::crate_metadata, - from_id_range: ast_util::IdRange, - to_id_range: ast_util::IdRange, - // Cache the last used filemap for translating spans as an optimization. - last_filemap_index: Cell, +#[derive(RustcEncodable, RustcDecodable)] +pub struct Ast<'tcx> { + id_range: IdRange, + item: Lazy, + side_tables: LazySeq<(ast::NodeId, TableEntry<'tcx>)>, } -trait tr { - fn tr(&self, dcx: &DecodeContext) -> Self; +#[derive(RustcEncodable, RustcDecodable)] +enum TableEntry<'tcx> { + TypeRelativeDef(Def), + NodeType(Ty<'tcx>), + ItemSubsts(ty::ItemSubsts<'tcx>), + Adjustment(ty::adjustment::Adjustment<'tcx>), + ConstQualif(ConstQualif), } -// ______________________________________________________________________ -// Top-level methods. - -pub fn encode_inlined_item(ecx: &e::EncodeContext, - rbml_w: &mut Encoder, - ii: InlinedItemRef) { - let id = match ii { - InlinedItemRef::Item(i) => i.id, - InlinedItemRef::Foreign(i) => i.id, - InlinedItemRef::TraitItem(_, ti) => ti.id, - InlinedItemRef::ImplItem(_, ii) => ii.id, - }; - debug!("> Encoding inlined item: {} ({:?})", - ecx.tcx.map.path_to_string(id), - rbml_w.writer.seek(SeekFrom::Current(0))); - - // Folding could be avoided with a smarter encoder. - let ii = simplify_ast(ii); - let id_range = inlined_item_id_range(&ii); +impl<'a, 'tcx> EncodeContext<'a, 'tcx> { + pub fn encode_inlined_item(&mut self, ii: InlinedItemRef<'tcx>) -> Lazy> { + let mut id_visitor = IdRangeComputingVisitor::new(&self.tcx.map); + ii.visit(&mut id_visitor); - rbml_w.start_tag(c::tag_ast as usize); - id_range.encode(rbml_w); - encode_ast(rbml_w, &ii); - encode_side_tables_for_ii(ecx, rbml_w, &ii); - rbml_w.end_tag(); - - debug!("< Encoded inlined fn: {} ({:?})", - ecx.tcx.map.path_to_string(id), - rbml_w.writer.seek(SeekFrom::Current(0))); -} - -impl<'a, 'b, 'c, 'tcx> ast_map::FoldOps for &'a DecodeContext<'b, 'c, 'tcx> { - fn new_id(&self, id: ast::NodeId) -> ast::NodeId { - if id == ast::DUMMY_NODE_ID { - // Used by ast_map to map the NodeInlinedParent. - self.tcx.sess.next_node_id() - } else { - self.tr_id(id) - } - } - fn new_def_id(&self, def_id: DefId) -> DefId { - self.tr_def_id(def_id) - } - fn new_span(&self, span: codemap::Span) -> codemap::Span { - self.tr_span(span) - } -} + let ii_pos = self.position(); + ii.encode(self).unwrap(); -/// Decodes an item from its AST in the cdata's metadata and adds it to the -/// ast-map. -pub fn decode_inlined_item<'tcx>(cdata: &cstore::crate_metadata, - tcx: &ty::ctxt<'tcx>, - parent_path: Vec, - parent_def_path: ast_map::DefPath, - par_doc: rbml::Doc, - orig_did: DefId) - -> Result<&'tcx InlinedItem, (Vec, - ast_map::DefPath)> { - match par_doc.opt_child(c::tag_ast) { - None => Err((parent_path, parent_def_path)), - Some(ast_doc) => { - let mut path_as_str = None; - debug!("> Decoding inlined fn: {:?}::?", - { - // Do an Option dance to use the path after it is moved below. - let s = ast_map::path_to_string(parent_path.iter().cloned()); - path_as_str = Some(s); - path_as_str.as_ref().map(|x| &x[..]) - }); - let mut ast_dsr = reader::Decoder::new(ast_doc); - let from_id_range = Decodable::decode(&mut ast_dsr).unwrap(); - let to_id_range = reserve_id_range(&tcx.sess, from_id_range); - let dcx = &DecodeContext { - cdata: cdata, - tcx: tcx, - from_id_range: from_id_range, - to_id_range: to_id_range, - last_filemap_index: Cell::new(0) - }; - let raw_ii = decode_ast(ast_doc); - let ii = ast_map::map_decoded_item(&dcx.tcx.map, - parent_path, - parent_def_path, - raw_ii, - dcx); - let name = match *ii { - InlinedItem::Item(ref i) => i.name, - InlinedItem::Foreign(ref i) => i.name, - InlinedItem::TraitItem(_, ref ti) => ti.name, - InlinedItem::ImplItem(_, ref ii) => ii.name + let tables_pos = self.position(); + let tables_count = { + let mut visitor = SideTableEncodingIdVisitor { + ecx: self, + count: 0, + }; + ii.visit(&mut visitor); + visitor.count }; - debug!("Fn named: {}", name); - debug!("< Decoded inlined fn: {}::{}", - path_as_str.unwrap(), - name); - region::resolve_inlined_item(&tcx.sess, &tcx.region_maps, ii); - decode_side_tables(dcx, ast_doc); - copy_item_types(dcx, ii, orig_did); - match *ii { - InlinedItem::Item(ref i) => { - debug!(">>> DECODED ITEM >>>\n{}\n<<< DECODED ITEM <<<", - ::rustc_front::print::pprust::item_to_string(&**i)); - } - _ => { } - } - Ok(ii) - } - } -} - -// ______________________________________________________________________ -// Enumerating the IDs which appear in an AST - -fn reserve_id_range(sess: &Session, - from_id_range: ast_util::IdRange) -> ast_util::IdRange { - // Handle the case of an empty range: - if from_id_range.empty() { return from_id_range; } - let cnt = from_id_range.max - from_id_range.min; - let to_id_min = sess.reserve_node_ids(cnt); - let to_id_max = to_id_min + cnt; - ast_util::IdRange { min: to_id_min, max: to_id_max } -} - -impl<'a, 'b, 'tcx> DecodeContext<'a, 'b, 'tcx> { - /// Translates an internal id, meaning a node id that is known to refer to some part of the - /// item currently being inlined, such as a local variable or argument. All naked node-ids - /// that appear in types have this property, since if something might refer to an external item - /// we would use a def-id to allow for the possibility that the item resides in another crate. - pub fn tr_id(&self, id: ast::NodeId) -> ast::NodeId { - // from_id_range should be non-empty - assert!(!self.from_id_range.empty()); - // Use wrapping arithmetic because otherwise it introduces control flow. - // Maybe we should just have the control flow? -- aatch - (id.wrapping_sub(self.from_id_range.min).wrapping_add(self.to_id_range.min)) - } - - /// Translates an EXTERNAL def-id, converting the crate number from the one used in the encoded - /// data to the current crate numbers.. By external, I mean that it be translated to a - /// reference to the item in its original crate, as opposed to being translated to a reference - /// to the inlined version of the item. This is typically, but not always, what you want, - /// because most def-ids refer to external things like types or other fns that may or may not - /// be inlined. Note that even when the inlined function is referencing itself recursively, we - /// would want `tr_def_id` for that reference--- conceptually the function calls the original, - /// non-inlined version, and trans deals with linking that recursive call to the inlined copy. - pub fn tr_def_id(&self, did: DefId) -> DefId { - decoder::translate_def_id(self.cdata, did) - } - - /// Translates a `Span` from an extern crate to the corresponding `Span` - /// within the local crate's codemap. - pub fn tr_span(&self, span: codemap::Span) -> codemap::Span { - decoder::translate_span(self.cdata, - self.tcx.sess.codemap(), - &self.last_filemap_index, - span) - } -} - -impl tr for DefId { - fn tr(&self, dcx: &DecodeContext) -> DefId { - dcx.tr_def_id(*self) - } -} - -impl tr for Option { - fn tr(&self, dcx: &DecodeContext) -> Option { - self.map(|d| dcx.tr_def_id(d)) - } -} -impl tr for codemap::Span { - fn tr(&self, dcx: &DecodeContext) -> codemap::Span { - dcx.tr_span(*self) - } -} - -trait def_id_encoder_helpers { - fn emit_def_id(&mut self, did: DefId); -} - -impl def_id_encoder_helpers for S - where ::Error: Debug -{ - fn emit_def_id(&mut self, did: DefId) { - did.encode(self).unwrap() - } -} - -trait def_id_decoder_helpers { - fn read_def_id(&mut self, dcx: &DecodeContext) -> DefId; - fn read_def_id_nodcx(&mut self, - cdata: &cstore::crate_metadata) -> DefId; -} - -impl def_id_decoder_helpers for D - where ::Error: Debug -{ - fn read_def_id(&mut self, dcx: &DecodeContext) -> DefId { - let did: DefId = Decodable::decode(self).unwrap(); - did.tr(dcx) - } - - fn read_def_id_nodcx(&mut self, - cdata: &cstore::crate_metadata) - -> DefId { - let did: DefId = Decodable::decode(self).unwrap(); - decoder::translate_def_id(cdata, did) - } -} - -// ______________________________________________________________________ -// Encoding and decoding the AST itself -// -// When decoding, we have to renumber the AST so that the node ids that -// appear within are disjoint from the node ids in our existing ASTs. -// We also have to adjust the spans: for now we just insert a dummy span, -// but eventually we should add entries to the local codemap as required. - -fn encode_ast(rbml_w: &mut Encoder, item: &InlinedItem) { - rbml_w.start_tag(c::tag_tree as usize); - rbml_w.emit_opaque(|this| item.encode(this)); - rbml_w.end_tag(); -} - -struct NestedItemsDropper; - -impl Folder for NestedItemsDropper { - fn fold_block(&mut self, blk: P) -> P { - blk.and_then(|hir::Block {id, stmts, expr, rules, span, ..}| { - let stmts_sans_items = stmts.into_iter().filter_map(|stmt| { - let use_stmt = match stmt.node { - hir::StmtExpr(_, _) | hir::StmtSemi(_, _) => true, - hir::StmtDecl(ref decl, _) => { - match decl.node { - hir::DeclLocal(_) => true, - hir::DeclItem(_) => false, - } - } - }; - if use_stmt { - Some(stmt) - } else { - None - } - }).collect(); - let blk_sans_items = P(hir::Block { - stmts: stmts_sans_items, - expr: expr, - id: id, - rules: rules, - span: span, - }); - fold::noop_fold_block(blk_sans_items, self) + self.lazy(&Ast { + id_range: id_visitor.result(), + item: Lazy::with_position(ii_pos), + side_tables: LazySeq::with_position_and_length(tables_pos, tables_count), }) } } -// Produces a simplified copy of the AST which does not include things -// that we do not need to or do not want to export. For example, we -// do not include any nested items: if these nested items are to be -// inlined, their AST will be exported separately (this only makes -// sense because, in Rust, nested items are independent except for -// their visibility). -// -// As it happens, trans relies on the fact that we do not export -// nested items, as otherwise it would get confused when translating -// inlined items. -fn simplify_ast(ii: InlinedItemRef) -> InlinedItem { - let mut fld = NestedItemsDropper; - - match ii { - // HACK we're not dropping items. - InlinedItemRef::Item(i) => { - InlinedItem::Item(P(fold::noop_fold_item(i.clone(), &mut fld))) - } - InlinedItemRef::TraitItem(d, ti) => { - InlinedItem::TraitItem(d, P(fold::noop_fold_trait_item(ti.clone(), &mut fld))) - } - InlinedItemRef::ImplItem(d, ii) => { - InlinedItem::ImplItem(d, P(fold::noop_fold_impl_item(ii.clone(), &mut fld))) - } - InlinedItemRef::Foreign(i) => { - InlinedItem::Foreign(P(fold::noop_fold_foreign_item(i.clone(), &mut fld))) - } - } -} - -fn decode_ast(par_doc: rbml::Doc) -> InlinedItem { - let chi_doc = par_doc.get(c::tag_tree as usize); - let mut rbml_r = reader::Decoder::new(chi_doc); - rbml_r.read_opaque(|decoder, _| Decodable::decode(decoder)).unwrap() -} - -// ______________________________________________________________________ -// Encoding and decoding of ast::def - -fn decode_def(dcx: &DecodeContext, dsr: &mut reader::Decoder) -> def::Def { - let def: def::Def = Decodable::decode(dsr).unwrap(); - def.tr(dcx) -} - -impl tr for def::Def { - fn tr(&self, dcx: &DecodeContext) -> def::Def { - match *self { - def::DefFn(did, is_ctor) => def::DefFn(did.tr(dcx), is_ctor), - def::DefMethod(did) => def::DefMethod(did.tr(dcx)), - def::DefSelfTy(opt_did, impl_ids) => { def::DefSelfTy(opt_did.map(|did| did.tr(dcx)), - impl_ids.map(|(nid1, nid2)| { - (dcx.tr_id(nid1), - dcx.tr_id(nid2)) - })) } - def::DefMod(did) => { def::DefMod(did.tr(dcx)) } - def::DefForeignMod(did) => { def::DefForeignMod(did.tr(dcx)) } - def::DefStatic(did, m) => { def::DefStatic(did.tr(dcx), m) } - def::DefConst(did) => { def::DefConst(did.tr(dcx)) } - def::DefAssociatedConst(did) => def::DefAssociatedConst(did.tr(dcx)), - def::DefLocal(_, nid) => { - let nid = dcx.tr_id(nid); - let did = dcx.tcx.map.local_def_id(nid); - def::DefLocal(did, nid) - } - def::DefVariant(e_did, v_did, is_s) => { - def::DefVariant(e_did.tr(dcx), v_did.tr(dcx), is_s) - }, - def::DefTrait(did) => def::DefTrait(did.tr(dcx)), - def::DefTy(did, is_enum) => def::DefTy(did.tr(dcx), is_enum), - def::DefAssociatedTy(trait_did, did) => - def::DefAssociatedTy(trait_did.tr(dcx), did.tr(dcx)), - def::DefPrimTy(p) => def::DefPrimTy(p), - def::DefTyParam(s, index, def_id, n) => def::DefTyParam(s, index, def_id.tr(dcx), n), - def::DefUpvar(_, nid1, index, nid2) => { - let nid1 = dcx.tr_id(nid1); - let nid2 = dcx.tr_id(nid2); - let did1 = dcx.tcx.map.local_def_id(nid1); - def::DefUpvar(did1, nid1, index, nid2) - } - def::DefStruct(did) => def::DefStruct(did.tr(dcx)), - def::DefLabel(nid) => def::DefLabel(dcx.tr_id(nid)), - def::DefErr => def::DefErr, - } - } -} - -// ______________________________________________________________________ -// Encoding and decoding of freevar information - -fn encode_freevar_entry(rbml_w: &mut Encoder, fv: &ty::Freevar) { - (*fv).encode(rbml_w).unwrap(); -} - -trait rbml_decoder_helper { - fn read_freevar_entry(&mut self, dcx: &DecodeContext) - -> ty::Freevar; - fn read_capture_mode(&mut self) -> hir::CaptureClause; -} - -impl<'a> rbml_decoder_helper for reader::Decoder<'a> { - fn read_freevar_entry(&mut self, dcx: &DecodeContext) - -> ty::Freevar { - let fv: ty::Freevar = Decodable::decode(self).unwrap(); - fv.tr(dcx) - } - - fn read_capture_mode(&mut self) -> hir::CaptureClause { - let cm: hir::CaptureClause = Decodable::decode(self).unwrap(); - cm - } -} - -impl tr for ty::Freevar { - fn tr(&self, dcx: &DecodeContext) -> ty::Freevar { - ty::Freevar { - def: self.def.tr(dcx), - span: self.span.tr(dcx), - } - } -} - -// ______________________________________________________________________ -// Encoding and decoding of MethodCallee - -trait read_method_callee_helper<'tcx> { - fn read_method_callee<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> (u32, ty::MethodCallee<'tcx>); -} - -fn encode_method_callee<'a, 'tcx>(ecx: &e::EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - autoderef: u32, - method: &ty::MethodCallee<'tcx>) { - use serialize::Encoder; - - rbml_w.emit_struct("MethodCallee", 4, |rbml_w| { - rbml_w.emit_struct_field("autoderef", 0, |rbml_w| { - autoderef.encode(rbml_w) - }); - rbml_w.emit_struct_field("def_id", 1, |rbml_w| { - Ok(rbml_w.emit_def_id(method.def_id)) - }); - rbml_w.emit_struct_field("ty", 2, |rbml_w| { - Ok(rbml_w.emit_ty(ecx, method.ty)) - }); - rbml_w.emit_struct_field("substs", 3, |rbml_w| { - Ok(rbml_w.emit_substs(ecx, &method.substs)) - }) - }).unwrap(); -} - -impl<'a, 'tcx> read_method_callee_helper<'tcx> for reader::Decoder<'a> { - fn read_method_callee<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> (u32, ty::MethodCallee<'tcx>) { - - self.read_struct("MethodCallee", 4, |this| { - let autoderef = this.read_struct_field("autoderef", 0, - Decodable::decode).unwrap(); - Ok((autoderef, ty::MethodCallee { - def_id: this.read_struct_field("def_id", 1, |this| { - Ok(this.read_def_id(dcx)) - }).unwrap(), - ty: this.read_struct_field("ty", 2, |this| { - Ok(this.read_ty(dcx)) - }).unwrap(), - substs: this.read_struct_field("substs", 3, |this| { - Ok(dcx.tcx.mk_substs(this.read_substs(dcx))) - }).unwrap() - })) - }).unwrap() - } -} - -pub fn encode_cast_kind(ebml_w: &mut Encoder, kind: cast::CastKind) { - kind.encode(ebml_w).unwrap(); -} - -// ______________________________________________________________________ -// Encoding and decoding the side tables - -trait rbml_writer_helpers<'tcx> { - fn emit_region(&mut self, ecx: &e::EncodeContext, r: ty::Region); - fn emit_ty<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, ty: Ty<'tcx>); - fn emit_tys<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, tys: &[Ty<'tcx>]); - fn emit_predicate<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, - predicate: &ty::Predicate<'tcx>); - fn emit_trait_ref<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, - ty: &ty::TraitRef<'tcx>); - fn emit_substs<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, - substs: &subst::Substs<'tcx>); - fn emit_existential_bounds<'b>(&mut self, ecx: &e::EncodeContext<'b,'tcx>, - bounds: &ty::ExistentialBounds<'tcx>); - fn emit_builtin_bounds(&mut self, ecx: &e::EncodeContext, bounds: &ty::BuiltinBounds); - fn emit_upvar_capture(&mut self, ecx: &e::EncodeContext, capture: &ty::UpvarCapture); - fn emit_auto_adjustment<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, - adj: &adjustment::AutoAdjustment<'tcx>); - fn emit_autoref<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, - autoref: &adjustment::AutoRef<'tcx>); - fn emit_auto_deref_ref<'a>(&mut self, ecx: &e::EncodeContext<'a, 'tcx>, - auto_deref_ref: &adjustment::AutoDerefRef<'tcx>); -} - -impl<'a, 'tcx> rbml_writer_helpers<'tcx> for Encoder<'a> { - fn emit_region(&mut self, ecx: &e::EncodeContext, r: ty::Region) { - self.emit_opaque(|this| Ok(tyencode::enc_region(&mut this.cursor, - &ecx.ty_str_ctxt(), - r))); - } - - fn emit_ty<'b>(&mut self, ecx: &e::EncodeContext<'b, 'tcx>, ty: Ty<'tcx>) { - self.emit_opaque(|this| Ok(tyencode::enc_ty(&mut this.cursor, - &ecx.ty_str_ctxt(), - ty))); - } - - fn emit_tys<'b>(&mut self, ecx: &e::EncodeContext<'b, 'tcx>, tys: &[Ty<'tcx>]) { - self.emit_from_vec(tys, |this, ty| Ok(this.emit_ty(ecx, *ty))); - } - - fn emit_trait_ref<'b>(&mut self, ecx: &e::EncodeContext<'b, 'tcx>, - trait_ref: &ty::TraitRef<'tcx>) { - self.emit_opaque(|this| Ok(tyencode::enc_trait_ref(&mut this.cursor, - &ecx.ty_str_ctxt(), - *trait_ref))); - } - - fn emit_predicate<'b>(&mut self, ecx: &e::EncodeContext<'b, 'tcx>, - predicate: &ty::Predicate<'tcx>) { - self.emit_opaque(|this| { - Ok(tyencode::enc_predicate(&mut this.cursor, - &ecx.ty_str_ctxt(), - predicate)) - }); - } - - fn emit_existential_bounds<'b>(&mut self, ecx: &e::EncodeContext<'b,'tcx>, - bounds: &ty::ExistentialBounds<'tcx>) { - self.emit_opaque(|this| Ok(tyencode::enc_existential_bounds(&mut this.cursor, - &ecx.ty_str_ctxt(), - bounds))); - } - - fn emit_builtin_bounds(&mut self, ecx: &e::EncodeContext, bounds: &ty::BuiltinBounds) { - self.emit_opaque(|this| Ok(tyencode::enc_builtin_bounds(&mut this.cursor, - &ecx.ty_str_ctxt(), - bounds))); - } - - fn emit_upvar_capture(&mut self, ecx: &e::EncodeContext, capture: &ty::UpvarCapture) { - use serialize::Encoder; - - self.emit_enum("UpvarCapture", |this| { - match *capture { - ty::UpvarCapture::ByValue => { - this.emit_enum_variant("ByValue", 1, 0, |_| Ok(())) - } - ty::UpvarCapture::ByRef(ty::UpvarBorrow { kind, region }) => { - this.emit_enum_variant("ByRef", 2, 0, |this| { - this.emit_enum_variant_arg(0, - |this| kind.encode(this)); - this.emit_enum_variant_arg(1, - |this| Ok(this.emit_region(ecx, region))) - }) - } - } - }).unwrap() - } - - fn emit_substs<'b>(&mut self, ecx: &e::EncodeContext<'b, 'tcx>, - substs: &subst::Substs<'tcx>) { - self.emit_opaque(|this| Ok(tyencode::enc_substs(&mut this.cursor, - &ecx.ty_str_ctxt(), - substs))); - } - - fn emit_auto_adjustment<'b>(&mut self, ecx: &e::EncodeContext<'b, 'tcx>, - adj: &adjustment::AutoAdjustment<'tcx>) { - use serialize::Encoder; - - self.emit_enum("AutoAdjustment", |this| { - match *adj { - adjustment::AdjustReifyFnPointer=> { - this.emit_enum_variant("AdjustReifyFnPointer", 1, 0, |_| Ok(())) - } - - adjustment::AdjustUnsafeFnPointer => { - this.emit_enum_variant("AdjustUnsafeFnPointer", 2, 0, |_| { - Ok(()) - }) - } - - adjustment::AdjustDerefRef(ref auto_deref_ref) => { - this.emit_enum_variant("AdjustDerefRef", 3, 2, |this| { - this.emit_enum_variant_arg(0, - |this| Ok(this.emit_auto_deref_ref(ecx, auto_deref_ref))) - }) - } - } - }); - } - - fn emit_autoref<'b>(&mut self, ecx: &e::EncodeContext<'b, 'tcx>, - autoref: &adjustment::AutoRef<'tcx>) { - use serialize::Encoder; - - self.emit_enum("AutoRef", |this| { - match autoref { - &adjustment::AutoPtr(r, m) => { - this.emit_enum_variant("AutoPtr", 0, 2, |this| { - this.emit_enum_variant_arg(0, - |this| Ok(this.emit_region(ecx, *r))); - this.emit_enum_variant_arg(1, |this| m.encode(this)) - }) - } - &adjustment::AutoUnsafe(m) => { - this.emit_enum_variant("AutoUnsafe", 1, 1, |this| { - this.emit_enum_variant_arg(0, |this| m.encode(this)) - }) - } - } - }); - } - - fn emit_auto_deref_ref<'b>(&mut self, ecx: &e::EncodeContext<'b, 'tcx>, - auto_deref_ref: &adjustment::AutoDerefRef<'tcx>) { - use serialize::Encoder; - - self.emit_struct("AutoDerefRef", 2, |this| { - this.emit_struct_field("autoderefs", 0, |this| auto_deref_ref.autoderefs.encode(this)); - - this.emit_struct_field("autoref", 1, |this| { - this.emit_option(|this| { - match auto_deref_ref.autoref { - None => this.emit_option_none(), - Some(ref a) => this.emit_option_some(|this| Ok(this.emit_autoref(ecx, a))), - } - }) - }); - - this.emit_struct_field("unsize", 2, |this| { - this.emit_option(|this| { - match auto_deref_ref.unsize { - None => this.emit_option_none(), - Some(target) => this.emit_option_some(|this| { - Ok(this.emit_ty(ecx, target)) - }) - } - }) - }) - }); - } -} - -trait write_tag_and_id { - fn tag(&mut self, tag_id: c::astencode_tag, f: F) where F: FnOnce(&mut Self); - fn id(&mut self, id: ast::NodeId); +struct SideTableEncodingIdVisitor<'a, 'b: 'a, 'tcx: 'b> { + ecx: &'a mut EncodeContext<'b, 'tcx>, + count: usize, } -impl<'a> write_tag_and_id for Encoder<'a> { - fn tag(&mut self, - tag_id: c::astencode_tag, - f: F) where - F: FnOnce(&mut Encoder<'a>), - { - self.start_tag(tag_id as usize); - f(self); - self.end_tag(); +impl<'a, 'b, 'tcx> Visitor<'tcx> for SideTableEncodingIdVisitor<'a, 'b, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.ecx.tcx.map) } - fn id(&mut self, id: ast::NodeId) { - id.encode(self).unwrap(); - } -} - -struct SideTableEncodingIdVisitor<'a, 'b:'a, 'c:'a, 'tcx:'c> { - ecx: &'a e::EncodeContext<'c, 'tcx>, - rbml_w: &'a mut Encoder<'b>, -} - -impl<'a, 'b, 'c, 'tcx> ast_util::IdVisitingOperation for - SideTableEncodingIdVisitor<'a, 'b, 'c, 'tcx> { fn visit_id(&mut self, id: ast::NodeId) { - encode_side_tables_for_id(self.ecx, self.rbml_w, id) - } -} - -fn encode_side_tables_for_ii(ecx: &e::EncodeContext, - rbml_w: &mut Encoder, - ii: &InlinedItem) { - rbml_w.start_tag(c::tag_table as usize); - ii.visit_ids(&mut SideTableEncodingIdVisitor { - ecx: ecx, - rbml_w: rbml_w - }); - rbml_w.end_tag(); -} + debug!("Encoding side tables for id {}", id); -fn encode_side_tables_for_id(ecx: &e::EncodeContext, - rbml_w: &mut Encoder, - id: ast::NodeId) { - let tcx = ecx.tcx; - - debug!("Encoding side tables for id {}", id); - - if let Some(def) = tcx.def_map.borrow().get(&id).map(|d| d.full_def()) { - rbml_w.tag(c::tag_table_def, |rbml_w| { - rbml_w.id(id); - def.encode(rbml_w).unwrap(); - }) - } - - if let Some(ty) = tcx.node_types().get(&id) { - rbml_w.tag(c::tag_table_node_type, |rbml_w| { - rbml_w.id(id); - rbml_w.emit_ty(ecx, *ty); - }) - } - - if let Some(item_substs) = tcx.tables.borrow().item_substs.get(&id) { - rbml_w.tag(c::tag_table_item_subst, |rbml_w| { - rbml_w.id(id); - rbml_w.emit_substs(ecx, &item_substs.substs); - }) - } - - if let Some(fv) = tcx.freevars.borrow().get(&id) { - rbml_w.tag(c::tag_table_freevars, |rbml_w| { - rbml_w.id(id); - rbml_w.emit_from_vec(fv, |rbml_w, fv_entry| { - Ok(encode_freevar_entry(rbml_w, fv_entry)) - }); - }); - - for freevar in fv { - rbml_w.tag(c::tag_table_upvar_capture_map, |rbml_w| { - rbml_w.id(id); - - let var_id = freevar.def.var_id(); - let upvar_id = ty::UpvarId { - var_id: var_id, - closure_expr_id: id - }; - let upvar_capture = tcx.tables - .borrow() - .upvar_capture_map - .get(&upvar_id) - .unwrap() - .clone(); - var_id.encode(rbml_w); - rbml_w.emit_upvar_capture(ecx, &upvar_capture); - }) - } - } - - let method_call = ty::MethodCall::expr(id); - if let Some(method) = tcx.tables.borrow().method_map.get(&method_call) { - rbml_w.tag(c::tag_table_method_map, |rbml_w| { - rbml_w.id(id); - encode_method_callee(ecx, rbml_w, method_call.autoderef, method) - }) - } - - if let Some(adjustment) = tcx.tables.borrow().adjustments.get(&id) { - match *adjustment { - adjustment::AdjustDerefRef(ref adj) => { - for autoderef in 0..adj.autoderefs { - let method_call = ty::MethodCall::autoderef(id, autoderef as u32); - if let Some(method) = tcx.tables.borrow().method_map.get(&method_call) { - rbml_w.tag(c::tag_table_method_map, |rbml_w| { - rbml_w.id(id); - encode_method_callee(ecx, rbml_w, - method_call.autoderef, method) - }) - } - } + let tcx = self.ecx.tcx; + let mut encode = |entry: Option| { + if let Some(entry) = entry { + (id, entry).encode(self.ecx).unwrap(); + self.count += 1; } - _ => {} - } - - rbml_w.tag(c::tag_table_adjustments, |rbml_w| { - rbml_w.id(id); - rbml_w.emit_auto_adjustment(ecx, adjustment); - }) - } - - if let Some(cast_kind) = tcx.cast_kinds.borrow().get(&id) { - rbml_w.tag(c::tag_table_cast_kinds, |rbml_w| { - rbml_w.id(id); - encode_cast_kind(rbml_w, *cast_kind) - }) - } - - if let Some(qualif) = tcx.const_qualif_map.borrow().get(&id) { - rbml_w.tag(c::tag_table_const_qualif, |rbml_w| { - rbml_w.id(id); - qualif.encode(rbml_w).unwrap() - }) - } -} - -trait doc_decoder_helpers: Sized { - fn as_int(&self) -> isize; - fn opt_child(&self, tag: c::astencode_tag) -> Option; -} + }; -impl<'a> doc_decoder_helpers for rbml::Doc<'a> { - fn as_int(&self) -> isize { reader::doc_as_u64(*self) as isize } - fn opt_child(&self, tag: c::astencode_tag) -> Option> { - reader::maybe_get_doc(*self, tag as usize) + encode(tcx.tables().type_relative_path_defs.get(&id).cloned() + .map(TableEntry::TypeRelativeDef)); + encode(tcx.tables().node_types.get(&id).cloned().map(TableEntry::NodeType)); + encode(tcx.tables().item_substs.get(&id).cloned().map(TableEntry::ItemSubsts)); + encode(tcx.tables().adjustments.get(&id).cloned().map(TableEntry::Adjustment)); + encode(tcx.const_qualif_map.borrow().get(&id).cloned().map(TableEntry::ConstQualif)); } } -trait rbml_decoder_decoder_helpers<'tcx> { - fn read_ty_encoded<'a, 'b, F, R>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>, - f: F) -> R - where F: for<'x> FnOnce(&mut tydecode::TyDecoder<'x, 'tcx>) -> R; - - fn read_region(&mut self, dcx: &DecodeContext) -> ty::Region; - fn read_ty<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) -> Ty<'tcx>; - fn read_tys<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) -> Vec>; - fn read_trait_ref<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> ty::TraitRef<'tcx>; - fn read_poly_trait_ref<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> ty::PolyTraitRef<'tcx>; - fn read_predicate<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> ty::Predicate<'tcx>; - fn read_existential_bounds<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> ty::ExistentialBounds<'tcx>; - fn read_substs<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> subst::Substs<'tcx>; - fn read_upvar_capture(&mut self, dcx: &DecodeContext) - -> ty::UpvarCapture; - fn read_auto_adjustment<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> adjustment::AutoAdjustment<'tcx>; - fn read_cast_kind<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> cast::CastKind; - fn read_auto_deref_ref<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> adjustment::AutoDerefRef<'tcx>; - fn read_autoref<'a, 'b>(&mut self, dcx: &DecodeContext<'a, 'b, 'tcx>) - -> adjustment::AutoRef<'tcx>; - - // Versions of the type reading functions that don't need the full - // DecodeContext. - fn read_ty_nodcx(&mut self, - tcx: &ty::ctxt<'tcx>, cdata: &cstore::crate_metadata) -> Ty<'tcx>; - fn read_tys_nodcx(&mut self, - tcx: &ty::ctxt<'tcx>, - cdata: &cstore::crate_metadata) -> Vec>; - fn read_substs_nodcx(&mut self, tcx: &ty::ctxt<'tcx>, - cdata: &cstore::crate_metadata) - -> subst::Substs<'tcx>; -} - -impl<'a, 'tcx> rbml_decoder_decoder_helpers<'tcx> for reader::Decoder<'a> { - fn read_ty_nodcx(&mut self, - tcx: &ty::ctxt<'tcx>, - cdata: &cstore::crate_metadata) - -> Ty<'tcx> { - self.read_opaque(|_, doc| { - Ok( - tydecode::TyDecoder::with_doc(tcx, cdata.cnum, doc, - &mut |id| decoder::translate_def_id(cdata, id)) - .parse_ty()) - }).unwrap() - } - - fn read_tys_nodcx(&mut self, - tcx: &ty::ctxt<'tcx>, - cdata: &cstore::crate_metadata) -> Vec> { - self.read_to_vec(|this| Ok(this.read_ty_nodcx(tcx, cdata)) ) - .unwrap() - .into_iter() - .collect() - } - - fn read_substs_nodcx(&mut self, - tcx: &ty::ctxt<'tcx>, - cdata: &cstore::crate_metadata) - -> subst::Substs<'tcx> - { - self.read_opaque(|_, doc| { - Ok( - tydecode::TyDecoder::with_doc(tcx, cdata.cnum, doc, - &mut |id| decoder::translate_def_id(cdata, id)) - .parse_substs()) - }).unwrap() - } - - fn read_ty_encoded<'b, 'c, F, R>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>, op: F) -> R - where F: for<'x> FnOnce(&mut tydecode::TyDecoder<'x,'tcx>) -> R - { - return self.read_opaque(|_, doc| { - debug!("read_ty_encoded({})", type_string(doc)); - Ok(op( - &mut tydecode::TyDecoder::with_doc( - dcx.tcx, dcx.cdata.cnum, doc, - &mut |d| convert_def_id(dcx, d)))) - }).unwrap(); - - fn type_string(doc: rbml::Doc) -> String { - let mut str = String::new(); - for i in doc.start..doc.end { - str.push(doc.data[i] as char); +/// Decodes an item from its AST in the cdata's metadata and adds it to the +/// ast-map. +pub fn decode_inlined_item<'a, 'tcx>(cdata: &CrateMetadata, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + parent_def_path: ast_map::DefPath, + parent_did: DefId, + ast: Ast<'tcx>, + orig_did: DefId) + -> &'tcx InlinedItem { + debug!("> Decoding inlined fn: {:?}", tcx.item_path_str(orig_did)); + + let cnt = ast.id_range.max.as_usize() - ast.id_range.min.as_usize(); + let start = tcx.sess.reserve_node_ids(cnt); + let id_ranges = [ast.id_range, + IdRange { + min: start, + max: ast::NodeId::new(start.as_usize() + cnt), + }]; + + let ii = ast.item.decode((cdata, tcx, id_ranges)); + let item_node_id = tcx.sess.next_node_id(); + let ii = ast_map::map_decoded_item(&tcx.map, + parent_def_path, + parent_did, + ii, + item_node_id); + + let inlined_did = tcx.map.local_def_id(item_node_id); + let ty = tcx.item_type(orig_did); + let generics = tcx.item_generics(orig_did); + tcx.item_types.borrow_mut().insert(inlined_did, ty); + tcx.generics.borrow_mut().insert(inlined_did, generics); + + for (id, entry) in ast.side_tables.decode((cdata, tcx, id_ranges)) { + match entry { + TableEntry::TypeRelativeDef(def) => { + tcx.tables.borrow_mut().type_relative_path_defs.insert(id, def); } - str - } - } - fn read_region(&mut self, dcx: &DecodeContext) -> ty::Region { - // Note: regions types embed local node ids. In principle, we - // should translate these node ids into the new decode - // context. However, we do not bother, because region types - // are not used during trans. This also applies to read_ty. - return self.read_ty_encoded(dcx, |decoder| decoder.parse_region()); - } - fn read_ty<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) -> Ty<'tcx> { - return self.read_ty_encoded(dcx, |decoder| decoder.parse_ty()); - } - - fn read_tys<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> Vec> { - self.read_to_vec(|this| Ok(this.read_ty(dcx))).unwrap().into_iter().collect() - } - - fn read_trait_ref<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> ty::TraitRef<'tcx> { - self.read_ty_encoded(dcx, |decoder| decoder.parse_trait_ref()) - } - - fn read_poly_trait_ref<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> ty::PolyTraitRef<'tcx> { - ty::Binder(self.read_ty_encoded(dcx, |decoder| decoder.parse_trait_ref())) - } - - fn read_predicate<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> ty::Predicate<'tcx> - { - self.read_ty_encoded(dcx, |decoder| decoder.parse_predicate()) - } - - fn read_existential_bounds<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> ty::ExistentialBounds<'tcx> - { - self.read_ty_encoded(dcx, |decoder| decoder.parse_existential_bounds()) - } - - fn read_substs<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> subst::Substs<'tcx> { - self.read_opaque(|_, doc| { - Ok(tydecode::TyDecoder::with_doc(dcx.tcx, dcx.cdata.cnum, doc, - &mut |d| convert_def_id(dcx, d)) - .parse_substs()) - }).unwrap() - } - fn read_upvar_capture(&mut self, dcx: &DecodeContext) -> ty::UpvarCapture { - self.read_enum("UpvarCapture", |this| { - let variants = ["ByValue", "ByRef"]; - this.read_enum_variant(&variants, |this, i| { - Ok(match i { - 1 => ty::UpvarCapture::ByValue, - 2 => ty::UpvarCapture::ByRef(ty::UpvarBorrow { - kind: this.read_enum_variant_arg(0, - |this| Decodable::decode(this)).unwrap(), - region: this.read_enum_variant_arg(1, - |this| Ok(this.read_region(dcx))).unwrap() - }), - _ => panic!("bad enum variant for ty::UpvarCapture") - }) - }) - }).unwrap() - } - fn read_auto_adjustment<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> adjustment::AutoAdjustment<'tcx> { - self.read_enum("AutoAdjustment", |this| { - let variants = ["AdjustReifyFnPointer", "AdjustUnsafeFnPointer", "AdjustDerefRef"]; - this.read_enum_variant(&variants, |this, i| { - Ok(match i { - 1 => adjustment::AdjustReifyFnPointer, - 2 => adjustment::AdjustUnsafeFnPointer, - 3 => { - let auto_deref_ref: adjustment::AutoDerefRef = - this.read_enum_variant_arg(0, - |this| Ok(this.read_auto_deref_ref(dcx))).unwrap(); - - adjustment::AdjustDerefRef(auto_deref_ref) - } - _ => panic!("bad enum variant for adjustment::AutoAdjustment") - }) - }) - }).unwrap() - } - - fn read_auto_deref_ref<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> adjustment::AutoDerefRef<'tcx> { - self.read_struct("AutoDerefRef", 2, |this| { - Ok(adjustment::AutoDerefRef { - autoderefs: this.read_struct_field("autoderefs", 0, |this| { - Decodable::decode(this) - }).unwrap(), - autoref: this.read_struct_field("autoref", 1, |this| { - this.read_option(|this, b| { - if b { - Ok(Some(this.read_autoref(dcx))) - } else { - Ok(None) - } - }) - }).unwrap(), - unsize: this.read_struct_field("unsize", 2, |this| { - this.read_option(|this, b| { - if b { - Ok(Some(this.read_ty(dcx))) - } else { - Ok(None) - } - }) - }).unwrap(), - }) - }).unwrap() - } - - fn read_autoref<'b, 'c>(&mut self, dcx: &DecodeContext<'b, 'c, 'tcx>) - -> adjustment::AutoRef<'tcx> { - self.read_enum("AutoRef", |this| { - let variants = ["AutoPtr", "AutoUnsafe"]; - this.read_enum_variant(&variants, |this, i| { - Ok(match i { - 0 => { - let r: ty::Region = - this.read_enum_variant_arg(0, |this| { - Ok(this.read_region(dcx)) - }).unwrap(); - let m: hir::Mutability = - this.read_enum_variant_arg(1, |this| { - Decodable::decode(this) - }).unwrap(); - - adjustment::AutoPtr(dcx.tcx.mk_region(r), m) - } - 1 => { - let m: hir::Mutability = - this.read_enum_variant_arg(0, |this| Decodable::decode(this)).unwrap(); - - adjustment::AutoUnsafe(m) - } - _ => panic!("bad enum variant for adjustment::AutoRef") - }) - }) - }).unwrap() - } - - fn read_cast_kind<'b, 'c>(&mut self, _dcx: &DecodeContext<'b, 'c, 'tcx>) - -> cast::CastKind - { - Decodable::decode(self).unwrap() - } -} - -// Converts a def-id that appears in a type. The correct -// translation will depend on what kind of def-id this is. -// This is a subtle point: type definitions are not -// inlined into the current crate, so if the def-id names -// a nominal type or type alias, then it should be -// translated to refer to the source crate. -// -// However, *type parameters* are cloned along with the function -// they are attached to. So we should translate those def-ids -// to refer to the new, cloned copy of the type parameter. -// We only see references to free type parameters in the body of -// an inlined function. In such cases, we need the def-id to -// be a local id so that the TypeContents code is able to lookup -// the relevant info in the ty_param_defs table. -// -// *Region parameters*, unfortunately, are another kettle of fish. -// In such cases, def_id's can appear in types to distinguish -// shadowed bound regions and so forth. It doesn't actually -// matter so much what we do to these, since regions are erased -// at trans time, but it's good to keep them consistent just in -// case. We translate them with `tr_def_id()` which will map -// the crate numbers back to the original source crate. -// -// Scopes will end up as being totally bogus. This can actually -// be fixed though. -// -// Unboxed closures are cloned along with the function being -// inlined, and all side tables use interned node IDs, so we -// translate their def IDs accordingly. -// -// It'd be really nice to refactor the type repr to not include -// def-ids so that all these distinctions were unnecessary. -fn convert_def_id(dcx: &DecodeContext, - did: DefId) - -> DefId { - let r = dcx.tr_def_id(did); - debug!("convert_def_id(did={:?})={:?}", did, r); - return r; -} - -fn decode_side_tables(dcx: &DecodeContext, - ast_doc: rbml::Doc) { - let tbl_doc = ast_doc.get(c::tag_table as usize); - for (tag, entry_doc) in reader::docs(tbl_doc) { - let mut entry_dsr = reader::Decoder::new(entry_doc); - let id0: ast::NodeId = Decodable::decode(&mut entry_dsr).unwrap(); - let id = dcx.tr_id(id0); - - debug!(">> Side table document with tag 0x{:x} \ - found for id {} (orig {})", - tag, id, id0); - let tag = tag as u32; - let decoded_tag: Option = c::astencode_tag::from_u32(tag); - match decoded_tag { - None => { - dcx.tcx.sess.bug( - &format!("unknown tag found in side tables: {:x}", - tag)); + TableEntry::NodeType(ty) => { + tcx.tables.borrow_mut().node_types.insert(id, ty); } - Some(value) => { - let val_dsr = &mut entry_dsr; - - match value { - c::tag_table_def => { - let def = decode_def(dcx, val_dsr); - dcx.tcx.def_map.borrow_mut().insert(id, def::PathResolution { - base_def: def, - // This doesn't matter cross-crate. - last_private: LastMod(AllPublic), - depth: 0 - }); - } - c::tag_table_node_type => { - let ty = val_dsr.read_ty(dcx); - debug!("inserting ty for node {}: {:?}", - id, ty); - dcx.tcx.node_type_insert(id, ty); - } - c::tag_table_item_subst => { - let item_substs = ty::ItemSubsts { - substs: val_dsr.read_substs(dcx) - }; - dcx.tcx.tables.borrow_mut().item_substs.insert( - id, item_substs); - } - c::tag_table_freevars => { - let fv_info = val_dsr.read_to_vec(|val_dsr| { - Ok(val_dsr.read_freevar_entry(dcx)) - }).unwrap().into_iter().collect(); - dcx.tcx.freevars.borrow_mut().insert(id, fv_info); - } - c::tag_table_upvar_capture_map => { - let var_id: ast::NodeId = Decodable::decode(val_dsr).unwrap(); - let upvar_id = ty::UpvarId { - var_id: dcx.tr_id(var_id), - closure_expr_id: id - }; - let ub = val_dsr.read_upvar_capture(dcx); - dcx.tcx.tables.borrow_mut().upvar_capture_map.insert(upvar_id, ub); - } - c::tag_table_method_map => { - let (autoderef, method) = val_dsr.read_method_callee(dcx); - let method_call = ty::MethodCall { - expr_id: id, - autoderef: autoderef - }; - dcx.tcx.tables.borrow_mut().method_map.insert(method_call, method); - } - c::tag_table_adjustments => { - let adj = - val_dsr.read_auto_adjustment(dcx); - dcx.tcx.tables.borrow_mut().adjustments.insert(id, adj); - } - c::tag_table_cast_kinds => { - let cast_kind = - val_dsr.read_cast_kind(dcx); - dcx.tcx.cast_kinds.borrow_mut().insert(id, cast_kind); - } - c::tag_table_const_qualif => { - let qualif: ConstQualif = Decodable::decode(val_dsr).unwrap(); - dcx.tcx.const_qualif_map.borrow_mut().insert(id, qualif); - } - _ => { - dcx.tcx.sess.bug( - &format!("unknown tag found in side tables: {:x}", - tag)); - } - } + TableEntry::ItemSubsts(item_substs) => { + tcx.tables.borrow_mut().item_substs.insert(id, item_substs); } - } - - debug!(">< Side table doc loaded"); - } -} - -// copy the tcache entries from the original item to the new -// inlined item -fn copy_item_types(dcx: &DecodeContext, ii: &InlinedItem, orig_did: DefId) { - fn copy_item_type(dcx: &DecodeContext, - inlined_id: ast::NodeId, - remote_did: DefId) { - let inlined_did = dcx.tcx.map.local_def_id(inlined_id); - dcx.tcx.register_item_type(inlined_did, - dcx.tcx.lookup_item_type(remote_did)); - - } - // copy the entry for the item itself - let item_node_id = match ii { - &InlinedItem::Item(ref i) => i.id, - &InlinedItem::TraitItem(_, ref ti) => ti.id, - &InlinedItem::ImplItem(_, ref ii) => ii.id, - &InlinedItem::Foreign(ref fi) => fi.id - }; - copy_item_type(dcx, item_node_id, orig_did); - - // copy the entries of inner items - if let &InlinedItem::Item(ref item) = ii { - match item.node { - hir::ItemEnum(ref def, _) => { - let orig_def = dcx.tcx.lookup_adt_def(orig_did); - for (i_variant, orig_variant) in - def.variants.iter().zip(orig_def.variants.iter()) - { - debug!("astencode: copying variant {:?} => {:?}", - orig_variant.did, i_variant.node.data.id()); - copy_item_type(dcx, i_variant.node.data.id(), orig_variant.did); - } + TableEntry::Adjustment(adj) => { + tcx.tables.borrow_mut().adjustments.insert(id, adj); } - hir::ItemStruct(ref def, _) => { - if !def.is_struct() { - let ctor_did = dcx.tcx.lookup_adt_def(orig_did) - .struct_variant().did; - debug!("astencode: copying ctor {:?} => {:?}", ctor_did, - def.id()); - copy_item_type(dcx, def.id(), ctor_did); - } + TableEntry::ConstQualif(qualif) => { + tcx.const_qualif_map.borrow_mut().insert(id, qualif); } - _ => {} } } -} - -fn inlined_item_id_range(v: &InlinedItem) -> ast_util::IdRange { - let mut visitor = ast_util::IdRangeComputingVisitor::new(); - v.visit_ids(&mut visitor); - visitor.result() -} -// ______________________________________________________________________ -// Testing of astencode_gen - -#[cfg(test)] -fn encode_item_ast(rbml_w: &mut Encoder, item: &hir::Item) { - rbml_w.start_tag(c::tag_tree as usize); - (*item).encode(rbml_w); - rbml_w.end_tag(); -} - -#[cfg(test)] -fn decode_item_ast(par_doc: rbml::Doc) -> hir::Item { - let chi_doc = par_doc.get(c::tag_tree as usize); - let mut d = reader::Decoder::new(chi_doc); - Decodable::decode(&mut d).unwrap() -} - -#[cfg(test)] -trait FakeExtCtxt { - fn call_site(&self) -> codemap::Span; - fn cfg(&self) -> ast::CrateConfig; - fn ident_of(&self, st: &str) -> ast::Ident; - fn name_of(&self, st: &str) -> ast::Name; - fn parse_sess(&self) -> &parse::ParseSess; -} - -#[cfg(test)] -impl FakeExtCtxt for parse::ParseSess { - fn call_site(&self) -> codemap::Span { - codemap::Span { - lo: codemap::BytePos(0), - hi: codemap::BytePos(0), - expn_id: codemap::NO_EXPANSION, - } - } - fn cfg(&self) -> ast::CrateConfig { Vec::new() } - fn ident_of(&self, st: &str) -> ast::Ident { - parse::token::str_to_ident(st) - } - fn name_of(&self, st: &str) -> ast::Name { - parse::token::intern(st) - } - fn parse_sess(&self) -> &parse::ParseSess { self } -} - -#[cfg(test)] -struct FakeNodeIdAssigner; - -#[cfg(test)] -// It should go without saying that this may give unexpected results. Avoid -// lowering anything which needs new nodes. -impl NodeIdAssigner for FakeNodeIdAssigner { - fn next_node_id(&self) -> NodeId { - 0 - } - - fn peek_node_id(&self) -> NodeId { - 0 - } -} - -#[cfg(test)] -fn mk_ctxt() -> parse::ParseSess { - parse::ParseSess::new() -} - -#[cfg(test)] -fn roundtrip(in_item: hir::Item) { - let mut wr = Cursor::new(Vec::new()); - encode_item_ast(&mut Encoder::new(&mut wr), &in_item); - let rbml_doc = rbml::Doc::new(wr.get_ref()); - let out_item = decode_item_ast(rbml_doc); - - assert!(in_item == out_item); -} - -#[test] -fn test_basic() { - let cx = mk_ctxt(); - let fnia = FakeNodeIdAssigner; - let lcx = LoweringContext::new(&fnia, None); - roundtrip(lower_item(&lcx, "e_item!(&cx, - fn foo() {} - ).unwrap())); -} - -#[test] -fn test_smalltalk() { - let cx = mk_ctxt(); - let fnia = FakeNodeIdAssigner; - let lcx = LoweringContext::new(&fnia, None); - roundtrip(lower_item(&lcx, "e_item!(&cx, - fn foo() -> isize { 3 + 4 } // first smalltalk program ever executed. - ).unwrap())); -} - -#[test] -fn test_more() { - let cx = mk_ctxt(); - let fnia = FakeNodeIdAssigner; - let lcx = LoweringContext::new(&fnia, None); - roundtrip(lower_item(&lcx, "e_item!(&cx, - fn foo(x: usize, y: usize) -> usize { - let z = x + y; - return z; - } - ).unwrap())); -} - -#[test] -fn test_simplification() { - let cx = mk_ctxt(); - let item = quote_item!(&cx, - fn new_int_alist() -> alist { - fn eq_int(a: isize, b: isize) -> bool { a == b } - return alist {eq_fn: eq_int, data: Vec::new()}; - } - ).unwrap(); - let fnia = FakeNodeIdAssigner; - let lcx = LoweringContext::new(&fnia, None); - let hir_item = lower_item(&lcx, &item); - let item_in = InlinedItemRef::Item(&hir_item); - let item_out = simplify_ast(item_in); - let item_exp = InlinedItem::Item(P(lower_item(&lcx, "e_item!(&cx, - fn new_int_alist() -> alist { - return alist {eq_fn: eq_int, data: Vec::new()}; - } - ).unwrap()))); - match (item_out, item_exp) { - (InlinedItem::Item(item_out), InlinedItem::Item(item_exp)) => { - assert!(pprust::item_to_string(&*item_out) == - pprust::item_to_string(&*item_exp)); - } - _ => panic!() - } + ii } diff --git a/src/librustc_metadata/common.rs b/src/librustc_metadata/common.rs deleted file mode 100644 index 479ab75927847..0000000000000 --- a/src/librustc_metadata/common.rs +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(non_camel_case_types, non_upper_case_globals)] - -pub use self::astencode_tag::*; - -// RBML enum definitions and utils shared by the encoder and decoder -// -// 0x00..0x1f: reserved for RBML generic type tags -// 0x20..0xef: free for use, preferred for frequent tags -// 0xf0..0xff: internally used by RBML to encode 0x100..0xfff in two bytes -// 0x100..0xfff: free for use, preferred for infrequent tags - -pub const tag_items: usize = 0x100; // top-level only - -pub const tag_paths_data_name: usize = 0x20; - -pub const tag_def_id: usize = 0x21; - -pub const tag_items_data: usize = 0x22; - -pub const tag_items_data_item: usize = 0x23; - -pub const tag_items_data_item_family: usize = 0x24; - -pub const tag_items_data_item_type: usize = 0x25; - -pub const tag_items_data_item_symbol: usize = 0x26; - -pub const tag_items_data_item_variant: usize = 0x27; - -pub const tag_items_data_parent_item: usize = 0x28; - -pub const tag_items_data_item_is_tuple_struct_ctor: usize = 0x29; - -pub const tag_items_closure_kind: usize = 0x2a; -pub const tag_items_closure_ty: usize = 0x2b; -pub const tag_def_key: usize = 0x2c; - -// GAP 0x2d 0x2e - -pub const tag_index: usize = 0x110; // top-level only -pub const tag_xref_index: usize = 0x111; // top-level only -pub const tag_xref_data: usize = 0x112; // top-level only - -pub const tag_meta_item_name_value: usize = 0x2f; - -pub const tag_meta_item_name: usize = 0x30; - -pub const tag_meta_item_value: usize = 0x31; - -pub const tag_attributes: usize = 0x101; // top-level only - -pub const tag_attribute: usize = 0x32; - -pub const tag_meta_item_word: usize = 0x33; - -pub const tag_meta_item_list: usize = 0x34; - -// The list of crates that this crate depends on -pub const tag_crate_deps: usize = 0x102; // top-level only - -// A single crate dependency -pub const tag_crate_dep: usize = 0x35; - -pub const tag_crate_hash: usize = 0x103; // top-level only -pub const tag_crate_crate_name: usize = 0x104; // top-level only - -pub const tag_crate_dep_crate_name: usize = 0x36; -pub const tag_crate_dep_hash: usize = 0x37; -pub const tag_crate_dep_explicitly_linked: usize = 0x38; // top-level only - -pub const tag_item_trait_item: usize = 0x3a; - -pub const tag_item_trait_ref: usize = 0x3b; - -// discriminator value for variants -pub const tag_disr_val: usize = 0x3c; - -// used to encode ast_map::PathElem -pub const tag_path: usize = 0x3d; -pub const tag_path_len: usize = 0x3e; -pub const tag_path_elem_mod: usize = 0x3f; -pub const tag_path_elem_name: usize = 0x40; -pub const tag_item_field: usize = 0x41; - -pub const tag_item_variances: usize = 0x43; -/* - trait items contain tag_item_trait_item elements, - impl items contain tag_item_impl_item elements, and classes - have both. That's because some code treats classes like traits, - and other code treats them like impls. Because classes can contain - both, tag_item_trait_item and tag_item_impl_item have to be two - different tags. - */ -pub const tag_item_impl_item: usize = 0x44; -pub const tag_item_trait_method_explicit_self: usize = 0x45; - - -// Reexports are found within module tags. Each reexport contains def_ids -// and names. -pub const tag_items_data_item_reexport: usize = 0x46; -pub const tag_items_data_item_reexport_def_id: usize = 0x47; -pub const tag_items_data_item_reexport_name: usize = 0x48; - -// used to encode crate_ctxt side tables -enum_from_u32! { - #[derive(Copy, Clone, PartialEq)] - #[repr(usize)] - pub enum astencode_tag { // Reserves 0x50 -- 0x6f - tag_ast = 0x50, - - tag_tree = 0x51, - - tag_mir = 0x52, - - tag_table = 0x53, - // GAP 0x54, 0x55 - tag_table_def = 0x56, - tag_table_node_type = 0x57, - tag_table_item_subst = 0x58, - tag_table_freevars = 0x59, - // GAP 0x5a, 0x5b, 0x5c, 0x5d, 0x5e - tag_table_method_map = 0x5f, - // GAP 0x60 - tag_table_adjustments = 0x61, - // GAP 0x62, 0x63, 0x64, 0x65 - tag_table_upvar_capture_map = 0x66, - // GAP 0x67, 0x68 - tag_table_const_qualif = 0x69, - tag_table_cast_kinds = 0x6a, - } -} - -pub const tag_item_trait_item_sort: usize = 0x70; - -pub const tag_crate_triple: usize = 0x105; // top-level only - -pub const tag_dylib_dependency_formats: usize = 0x106; // top-level only - -// Language items are a top-level directory (for speed). Hierarchy: -// -// tag_lang_items -// - tag_lang_items_item -// - tag_lang_items_item_id: u32 -// - tag_lang_items_item_index: u32 - -pub const tag_lang_items: usize = 0x107; // top-level only -pub const tag_lang_items_item: usize = 0x73; -pub const tag_lang_items_item_id: usize = 0x74; -pub const tag_lang_items_item_index: usize = 0x75; -pub const tag_lang_items_missing: usize = 0x76; - -pub const tag_item_unnamed_field: usize = 0x77; -pub const tag_items_data_item_visibility: usize = 0x78; -pub const tag_items_data_item_inherent_impl: usize = 0x79; -// GAP 0x7a -pub const tag_mod_child: usize = 0x7b; -pub const tag_misc_info: usize = 0x108; // top-level only -pub const tag_misc_info_crate_items: usize = 0x7c; - -pub const tag_impls: usize = 0x109; // top-level only -pub const tag_impls_trait: usize = 0x7d; -pub const tag_impls_trait_impl: usize = 0x7e; - -// GAP 0x7f, 0x80, 0x81 - -pub const tag_native_libraries: usize = 0x10a; // top-level only -pub const tag_native_libraries_lib: usize = 0x82; -pub const tag_native_libraries_name: usize = 0x83; -pub const tag_native_libraries_kind: usize = 0x84; - -pub const tag_plugin_registrar_fn: usize = 0x10b; // top-level only - -pub const tag_method_argument_names: usize = 0x85; -pub const tag_method_argument_name: usize = 0x86; - -pub const tag_reachable_ids: usize = 0x10c; // top-level only -pub const tag_reachable_id: usize = 0x87; - -pub const tag_items_data_item_stability: usize = 0x88; - -pub const tag_items_data_item_repr: usize = 0x89; - -pub const tag_struct_fields: usize = 0x10d; // top-level only -pub const tag_struct_field: usize = 0x8a; - -pub const tag_items_data_item_struct_ctor: usize = 0x8b; -pub const tag_attribute_is_sugared_doc: usize = 0x8c; -// GAP 0x8d -pub const tag_items_data_region: usize = 0x8e; - -pub const tag_region_param_def: usize = 0x8f; -pub const tag_region_param_def_ident: usize = 0x90; -pub const tag_region_param_def_def_id: usize = 0x91; -pub const tag_region_param_def_space: usize = 0x92; -pub const tag_region_param_def_index: usize = 0x93; - -pub const tag_type_param_def: usize = 0x94; - -pub const tag_item_generics: usize = 0x95; -pub const tag_method_ty_generics: usize = 0x96; - -pub const tag_type_predicate: usize = 0x97; -pub const tag_self_predicate: usize = 0x98; -pub const tag_fn_predicate: usize = 0x99; - -pub const tag_unsafety: usize = 0x9a; - -pub const tag_associated_type_names: usize = 0x9b; -pub const tag_associated_type_name: usize = 0x9c; - -pub const tag_polarity: usize = 0x9d; - -pub const tag_macro_defs: usize = 0x10e; // top-level only -pub const tag_macro_def: usize = 0x9e; -pub const tag_macro_def_body: usize = 0x9f; - -pub const tag_paren_sugar: usize = 0xa0; - -pub const tag_codemap: usize = 0xa1; -pub const tag_codemap_filemap: usize = 0xa2; - -pub const tag_item_super_predicates: usize = 0xa3; - -pub const tag_defaulted_trait: usize = 0xa4; - -pub const tag_impl_coerce_unsized_kind: usize = 0xa5; - -pub const tag_items_data_item_constness: usize = 0xa6; - -pub const tag_items_data_item_deprecation: usize = 0xa7; - -pub const tag_rustc_version: usize = 0x10f; -pub fn rustc_version() -> String { - format!( - "rustc {}", - option_env!("CFG_VERSION").unwrap_or("unknown version") - ) -} diff --git a/src/librustc_metadata/creader.rs b/src/librustc_metadata/creader.rs index 9122148a8cc05..2c266068fe814 100644 --- a/src/librustc_metadata/creader.rs +++ b/src/librustc_metadata/creader.rs @@ -8,100 +8,87 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(non_camel_case_types)] - //! Validates all used crates and extern libraries and loads their metadata -use common::rustc_version; use cstore::{self, CStore, CrateSource, MetadataBlob}; -use decoder; -use loader::{self, CratePaths}; +use locator::{self, CratePaths}; +use schema::CrateRoot; -use rustc::back::svh::Svh; +use rustc::hir::def_id::{CrateNum, DefIndex}; +use rustc::hir::svh::Svh; +use rustc::middle::cstore::DepKind; use rustc::session::{config, Session}; +use rustc_back::PanicStrategy; use rustc::session::search_paths::PathKind; -use rustc::middle::cstore::{CrateStore, validate_crate_name}; -use rustc::util::nodemap::FnvHashMap; -use rustc::front::map as hir_map; +use rustc::middle; +use rustc::middle::cstore::{CrateStore, validate_crate_name, ExternCrate}; +use rustc::util::nodemap::{FxHashMap, FxHashSet}; +use rustc::middle::cstore::NativeLibrary; +use rustc::hir::map::Definitions; use std::cell::{RefCell, Cell}; +use std::ops::Deref; use std::path::PathBuf; use std::rc::Rc; -use std::fs; +use std::{cmp, fs}; use syntax::ast; -use syntax::abi; -use syntax::codemap::{self, Span, mk_sp, Pos}; -use syntax::parse; +use syntax::abi::Abi; use syntax::attr; -use syntax::attr::AttrMetaMethods; -use syntax::errors::FatalError; -use syntax::parse::token::InternedString; -use syntax::util::small_vector::SmallVector; -use rustc_front::intravisit::Visitor; -use rustc_front::hir; +use syntax::ext::base::SyntaxExtension; +use syntax::feature_gate::{self, GateIssue}; +use syntax::symbol::Symbol; +use syntax_pos::{Span, DUMMY_SP}; use log; -pub struct LocalCrateReader<'a, 'b:'a> { - sess: &'a Session, - cstore: &'a CStore, - creader: CrateReader<'a>, - ast_map: &'a hir_map::Map<'b>, +pub struct Library { + pub dylib: Option<(PathBuf, PathKind)>, + pub rlib: Option<(PathBuf, PathKind)>, + pub rmeta: Option<(PathBuf, PathKind)>, + pub metadata: MetadataBlob, } -pub struct CrateReader<'a> { - sess: &'a Session, +pub struct CrateLoader<'a> { + pub sess: &'a Session, cstore: &'a CStore, - next_crate_num: ast::CrateNum, - foreign_item_map: FnvHashMap>, -} - -impl<'a, 'b, 'hir> Visitor<'hir> for LocalCrateReader<'a, 'b> { - fn visit_item(&mut self, a: &'hir hir::Item) { - self.process_item(a); - } + next_crate_num: CrateNum, + foreign_item_map: FxHashMap>, + local_crate_name: Symbol, } fn dump_crates(cstore: &CStore) { info!("resolved crates:"); - cstore.iter_crate_data_origins(|_, data, opt_source| { + cstore.iter_crate_data(|_, data| { info!(" name: {}", data.name()); info!(" cnum: {}", data.cnum); info!(" hash: {}", data.hash()); - info!(" reqd: {}", data.explicitly_linked.get()); - opt_source.map(|cs| { - let CrateSource { dylib, rlib, cnum: _ } = cs; - dylib.map(|dl| info!(" dylib: {}", dl.0.display())); - rlib.map(|rl| info!(" rlib: {}", rl.0.display())); - }); - }) + info!(" reqd: {:?}", data.dep_kind.get()); + let CrateSource { dylib, rlib, rmeta } = data.source.clone(); + dylib.map(|dl| info!(" dylib: {}", dl.0.display())); + rlib.map(|rl| info!(" rlib: {}", rl.0.display())); + rmeta.map(|rl| info!(" rmeta: {}", rl.0.display())); + }); } -fn should_link(i: &ast::Item) -> bool { - !attr::contains_name(&i.attrs, "no_link") -} -// Dup for the hir -fn should_link_hir(i: &hir::Item) -> bool { - !attr::contains_name(&i.attrs, "no_link") -} - -struct CrateInfo { - ident: String, - name: String, +#[derive(Debug)] +struct ExternCrateInfo { + ident: Symbol, + name: Symbol, id: ast::NodeId, - should_link: bool, + dep_kind: DepKind, } fn register_native_lib(sess: &Session, cstore: &CStore, span: Option, - name: String, - kind: cstore::NativeLibraryKind) { - if name.is_empty() { + lib: NativeLibrary) { + if lib.name.as_str().is_empty() { match span { Some(span) => { - span_err!(sess, span, E0454, - "#[link(name = \"\")] given with empty name"); + struct_span_err!(sess, span, E0454, + "#[link(name = \"\")] given with empty name") + .span_label(span, &format!("empty name given")) + .emit(); } None => { sess.err("empty library name given via `-l`"); @@ -110,17 +97,21 @@ fn register_native_lib(sess: &Session, return } let is_osx = sess.target.target.options.is_like_osx; - if kind == cstore::NativeFramework && !is_osx { + if lib.kind == cstore::NativeFramework && !is_osx { let msg = "native frameworks are only available on OSX targets"; match span { - Some(span) => { - span_err!(sess, span, E0455, - "{}", msg) - } + Some(span) => span_err!(sess, span, E0455, "{}", msg), None => sess.err(msg), } } - cstore.add_used_library(name, kind); + if lib.cfg.is_some() && !sess.features.borrow().link_cfg { + feature_gate::emit_feature_err(&sess.parse_sess, + "link_cfg", + span.unwrap(), + GateIssue::Language, + "is feature gated"); + } + cstore.add_used_library(lib); } // Extra info about a crate loaded for plugins or exported macros. @@ -131,80 +122,67 @@ struct ExtensionCrate { } enum PMDSource { - Registered(Rc), - Owned(MetadataBlob), + Registered(Rc), + Owned(Library), } -impl PMDSource { - pub fn as_slice<'a>(&'a self) -> &'a [u8] { +impl Deref for PMDSource { + type Target = MetadataBlob; + + fn deref(&self) -> &MetadataBlob { match *self { - PMDSource::Registered(ref cmd) => cmd.data(), - PMDSource::Owned(ref mdb) => mdb.as_slice(), + PMDSource::Registered(ref cmd) => &cmd.blob, + PMDSource::Owned(ref lib) => &lib.metadata } } } -impl<'a> CrateReader<'a> { - pub fn new(sess: &'a Session, cstore: &'a CStore) -> CrateReader<'a> { - CrateReader { +enum LoadResult { + Previous(CrateNum), + Loaded(Library), +} + +impl<'a> CrateLoader<'a> { + pub fn new(sess: &'a Session, cstore: &'a CStore, local_crate_name: &str) -> Self { + CrateLoader { sess: sess, cstore: cstore, next_crate_num: cstore.next_crate_num(), - foreign_item_map: FnvHashMap(), + foreign_item_map: FxHashMap(), + local_crate_name: Symbol::intern(local_crate_name), } } - fn extract_crate_info(&self, i: &ast::Item) -> Option { + fn extract_crate_info(&self, i: &ast::Item) -> Option { match i.node { - ast::ItemExternCrate(ref path_opt) => { + ast::ItemKind::ExternCrate(ref path_opt) => { debug!("resolving extern crate stmt. ident: {} path_opt: {:?}", i.ident, path_opt); let name = match *path_opt { Some(name) => { validate_crate_name(Some(self.sess), &name.as_str(), Some(i.span)); - name.to_string() - } - None => i.ident.to_string(), - }; - Some(CrateInfo { - ident: i.ident.to_string(), - name: name, - id: i.id, - should_link: should_link(i), - }) - } - _ => None - } - } - - // Dup of the above, but for the hir - fn extract_crate_info_hir(&self, i: &hir::Item) -> Option { - match i.node { - hir::ItemExternCrate(ref path_opt) => { - debug!("resolving extern crate stmt. ident: {} path_opt: {:?}", - i.name, path_opt); - let name = match *path_opt { - Some(name) => { - validate_crate_name(Some(self.sess), &name.as_str(), - Some(i.span)); - name.to_string() + name } - None => i.name.to_string(), + None => i.ident.name, }; - Some(CrateInfo { - ident: i.name.to_string(), + Some(ExternCrateInfo { + ident: i.ident.name, name: name, id: i.id, - should_link: should_link_hir(i), + dep_kind: if attr::contains_name(&i.attrs, "no_link") { + DepKind::UnexportedMacrosOnly + } else { + DepKind::Explicit + }, }) } _ => None } } - fn existing_match(&self, name: &str, hash: Option<&Svh>, kind: PathKind) - -> Option { + fn existing_match(&self, name: Symbol, hash: Option<&Svh>, kind: PathKind) + -> Option { let mut ret = None; self.cstore.iter_crate_data(|cnum, data| { if data.name != name { return } @@ -225,7 +203,7 @@ impl<'a> CrateReader<'a> { // `source` stores paths which are normalized which may be different // from the strings on the command line. let source = self.cstore.used_crate_source(cnum); - if let Some(locs) = self.sess.opts.externs.get(name) { + if let Some(locs) = self.sess.opts.externs.get(&*name.as_str()) { let found = locs.iter().any(|l| { let l = fs::canonicalize(l).ok(); source.dylib.as_ref().map(|p| &p.0) == l.as_ref() || @@ -252,38 +230,49 @@ impl<'a> CrateReader<'a> { return ret; } - fn verify_rustc_version(&self, - name: &str, - span: Span, - metadata: &MetadataBlob) { - let crate_rustc_version = decoder::crate_rustc_version(metadata.as_slice()); - if crate_rustc_version != Some(rustc_version()) { - span_err!(self.sess, span, E0514, - "the crate `{}` has been compiled with {}, which is \ - incompatible with this version of rustc", - name, - crate_rustc_version - .as_ref().map(|s|&**s) - .unwrap_or("an old version of rustc") - ); - self.sess.abort_if_errors(); - } + fn verify_no_symbol_conflicts(&self, + span: Span, + root: &CrateRoot) { + // Check for (potential) conflicts with the local crate + if self.local_crate_name == root.name && + self.sess.local_crate_disambiguator() == root.disambiguator { + span_fatal!(self.sess, span, E0519, + "the current crate is indistinguishable from one of its \ + dependencies: it has the same crate-name `{}` and was \ + compiled with the same `-C metadata` arguments. This \ + will result in symbol conflicts between the two.", + root.name) + } + + // Check for conflicts with any crate loaded so far + self.cstore.iter_crate_data(|_, other| { + if other.name() == root.name && // same crate-name + other.disambiguator() == root.disambiguator && // same crate-disambiguator + other.hash() != root.hash { // but different SVH + span_fatal!(self.sess, span, E0523, + "found two different crates with name `{}` that are \ + not distinguished by differing `-C metadata`. This \ + will result in symbol conflicts between the two.", + root.name) + } + }); } fn register_crate(&mut self, root: &Option, - ident: &str, - name: &str, + ident: Symbol, + name: Symbol, span: Span, - lib: loader::Library, - explicitly_linked: bool) - -> (ast::CrateNum, Rc, - cstore::CrateSource) { - self.verify_rustc_version(name, span, &lib.metadata); + lib: Library, + dep_kind: DepKind) + -> (CrateNum, Rc) { + info!("register crate `extern crate {} as {}`", name, ident); + let crate_root = lib.metadata.get_root(); + self.verify_no_symbol_conflicts(span, &crate_root); // Claim this crate number and cache it let cnum = self.next_crate_num; - self.next_crate_num += 1; + self.next_crate_num = CrateNum::from_u32(cnum.as_u32() + 1); // Stash paths for top-most crate locally if necessary. let crate_paths = if root.is_none() { @@ -291,6 +280,7 @@ impl<'a> CrateReader<'a> { ident: ident.to_string(), dylib: lib.dylib.clone().map(|p| p.0), rlib: lib.rlib.clone().map(|p| p.0), + rmeta: lib.rmeta.clone().map(|p| p.0), }) } else { None @@ -298,185 +288,250 @@ impl<'a> CrateReader<'a> { // Maintain a reference to the top most crate. let root = if root.is_some() { root } else { &crate_paths }; - let loader::Library { dylib, rlib, metadata } = lib; + let Library { dylib, rlib, rmeta, metadata } = lib; - let cnum_map = self.resolve_crate_deps(root, metadata.as_slice(), span); - let staged_api = self.is_staged_api(metadata.as_slice()); + let cnum_map = self.resolve_crate_deps(root, &crate_root, &metadata, cnum, span, dep_kind); - let cmeta = Rc::new(cstore::crate_metadata { - name: name.to_string(), - local_path: RefCell::new(SmallVector::zero()), - local_def_path: RefCell::new(vec![]), - index: decoder::load_index(metadata.as_slice()), - xref_index: decoder::load_xrefs(metadata.as_slice()), - data: metadata, + let cmeta = Rc::new(cstore::CrateMetadata { + name: name, + extern_crate: Cell::new(None), + key_map: metadata.load_key_map(crate_root.index), + proc_macros: crate_root.macro_derive_registrar.map(|_| { + self.load_derive_macros(&crate_root, dylib.clone().map(|p| p.0), span) + }), + root: crate_root, + blob: metadata, cnum_map: RefCell::new(cnum_map), cnum: cnum, codemap_import_info: RefCell::new(vec![]), - span: span, - staged_api: staged_api, - explicitly_linked: Cell::new(explicitly_linked), + dep_kind: Cell::new(dep_kind), + source: cstore::CrateSource { + dylib: dylib, + rlib: rlib, + rmeta: rmeta, + }, }); - let source = cstore::CrateSource { - dylib: dylib, - rlib: rlib, - cnum: cnum, - }; - self.cstore.set_crate_data(cnum, cmeta.clone()); - self.cstore.add_used_crate_source(source.clone()); - (cnum, cmeta, source) - } - - fn is_staged_api(&self, data: &[u8]) -> bool { - let attrs = decoder::get_crate_attributes(data); - for attr in &attrs { - if attr.name() == "stable" || attr.name() == "unstable" { - return true - } - } - false + (cnum, cmeta) } fn resolve_crate(&mut self, root: &Option, - ident: &str, - name: &str, + ident: Symbol, + name: Symbol, hash: Option<&Svh>, span: Span, - kind: PathKind, - explicitly_linked: bool) - -> (ast::CrateNum, Rc, - cstore::CrateSource) { - enum LookupResult { - Previous(ast::CrateNum), - Loaded(loader::Library), - } - let result = match self.existing_match(name, hash, kind) { - Some(cnum) => LookupResult::Previous(cnum), - None => { - let mut load_ctxt = loader::Context { - sess: self.sess, - span: span, - ident: ident, - crate_name: name, - hash: hash.map(|a| &*a), - filesearch: self.sess.target_filesearch(kind), - target: &self.sess.target.target, - triple: &self.sess.opts.target_triple, - root: root, - rejected_via_hash: vec!(), - rejected_via_triple: vec!(), - rejected_via_kind: vec!(), - should_match_name: true, + path_kind: PathKind, + mut dep_kind: DepKind) + -> (CrateNum, Rc) { + info!("resolving crate `extern crate {} as {}`", name, ident); + let result = if let Some(cnum) = self.existing_match(name, hash, path_kind) { + LoadResult::Previous(cnum) + } else { + info!("falling back to a load"); + let mut locate_ctxt = locator::Context { + sess: self.sess, + span: span, + ident: ident, + crate_name: name, + hash: hash.map(|a| &*a), + filesearch: self.sess.target_filesearch(path_kind), + target: &self.sess.target.target, + triple: &self.sess.opts.target_triple, + root: root, + rejected_via_hash: vec![], + rejected_via_triple: vec![], + rejected_via_kind: vec![], + rejected_via_version: vec![], + rejected_via_filename: vec![], + should_match_name: true, + is_proc_macro: Some(false), + }; + + self.load(&mut locate_ctxt).or_else(|| { + dep_kind = DepKind::UnexportedMacrosOnly; + + let mut proc_macro_locator = locator::Context { + target: &self.sess.host, + triple: config::host_triple(), + filesearch: self.sess.host_filesearch(path_kind), + rejected_via_hash: vec![], + rejected_via_triple: vec![], + rejected_via_kind: vec![], + rejected_via_version: vec![], + rejected_via_filename: vec![], + is_proc_macro: Some(true), + ..locate_ctxt }; - let library = load_ctxt.load_library_crate(); - - // In the case that we're loading a crate, but not matching - // against a hash, we could load a crate which has the same hash - // as an already loaded crate. If this is the case prevent - // duplicates by just using the first crate. - let meta_hash = decoder::get_crate_hash(library.metadata - .as_slice()); - let mut result = LookupResult::Loaded(library); - self.cstore.iter_crate_data(|cnum, data| { - if data.name() == name && meta_hash == data.hash() { - assert!(hash.is_none()); - result = LookupResult::Previous(cnum); - } - }); - result - } + + self.load(&mut proc_macro_locator) + }).unwrap_or_else(|| locate_ctxt.report_errs()) }; match result { - LookupResult::Previous(cnum) => { + LoadResult::Previous(cnum) => { let data = self.cstore.get_crate_data(cnum); - if explicitly_linked && !data.explicitly_linked.get() { - data.explicitly_linked.set(explicitly_linked); + if data.root.macro_derive_registrar.is_some() { + dep_kind = DepKind::UnexportedMacrosOnly; } - (cnum, data, self.cstore.used_crate_source(cnum)) + data.dep_kind.set(cmp::max(data.dep_kind.get(), dep_kind)); + (cnum, data) } - LookupResult::Loaded(library) => { - self.register_crate(root, ident, name, span, library, - explicitly_linked) + LoadResult::Loaded(library) => { + self.register_crate(root, ident, name, span, library, dep_kind) } } } + fn load(&mut self, locate_ctxt: &mut locator::Context) -> Option { + let library = match locate_ctxt.maybe_load_library_crate() { + Some(lib) => lib, + None => return None, + }; + + // In the case that we're loading a crate, but not matching + // against a hash, we could load a crate which has the same hash + // as an already loaded crate. If this is the case prevent + // duplicates by just using the first crate. + // + // Note that we only do this for target triple crates, though, as we + // don't want to match a host crate against an equivalent target one + // already loaded. + let root = library.metadata.get_root(); + if locate_ctxt.triple == self.sess.opts.target_triple { + let mut result = LoadResult::Loaded(library); + self.cstore.iter_crate_data(|cnum, data| { + if data.name() == root.name && root.hash == data.hash() { + assert!(locate_ctxt.hash.is_none()); + info!("load success, going to previous cnum: {}", cnum); + result = LoadResult::Previous(cnum); + } + }); + Some(result) + } else { + Some(LoadResult::Loaded(library)) + } + } + + fn update_extern_crate(&mut self, + cnum: CrateNum, + mut extern_crate: ExternCrate, + visited: &mut FxHashSet<(CrateNum, bool)>) + { + if !visited.insert((cnum, extern_crate.direct)) { return } + + let cmeta = self.cstore.get_crate_data(cnum); + let old_extern_crate = cmeta.extern_crate.get(); + + // Prefer: + // - something over nothing (tuple.0); + // - direct extern crate to indirect (tuple.1); + // - shorter paths to longer (tuple.2). + let new_rank = (true, extern_crate.direct, !extern_crate.path_len); + let old_rank = match old_extern_crate { + None => (false, false, !0), + Some(ref c) => (true, c.direct, !c.path_len), + }; + + if old_rank >= new_rank { + return; // no change needed + } + + cmeta.extern_crate.set(Some(extern_crate)); + // Propagate the extern crate info to dependencies. + extern_crate.direct = false; + for &dep_cnum in cmeta.cnum_map.borrow().iter() { + self.update_extern_crate(dep_cnum, extern_crate, visited); + } + } + // Go through the crate metadata and load any crates that it references fn resolve_crate_deps(&mut self, root: &Option, - cdata: &[u8], span : Span) - -> cstore::cnum_map { + crate_root: &CrateRoot, + metadata: &MetadataBlob, + krate: CrateNum, + span: Span, + dep_kind: DepKind) + -> cstore::CrateNumMap { debug!("resolving deps of external crate"); - // The map from crate numbers in the crate we're resolving to local crate - // numbers - decoder::get_crate_deps(cdata).iter().map(|dep| { + if crate_root.macro_derive_registrar.is_some() { + return cstore::CrateNumMap::new(); + } + + // The map from crate numbers in the crate we're resolving to local crate numbers. + // We map 0 and all other holes in the map to our parent crate. The "additional" + // self-dependencies should be harmless. + ::std::iter::once(krate).chain(crate_root.crate_deps.decode(metadata).map(|dep| { debug!("resolving dep crate {} hash: `{}`", dep.name, dep.hash); - let (local_cnum, _, _) = self.resolve_crate(root, - &dep.name, - &dep.name, - Some(&dep.hash), - span, - PathKind::Dependency, - dep.explicitly_linked); - (dep.cnum, local_cnum) - }).collect() + if dep.kind == DepKind::UnexportedMacrosOnly { + return krate; + } + let dep_kind = match dep_kind { + DepKind::MacrosOnly => DepKind::MacrosOnly, + _ => dep.kind, + }; + let (local_cnum, ..) = self.resolve_crate( + root, dep.name, dep.name, Some(&dep.hash), span, PathKind::Dependency, dep_kind, + ); + local_cnum + })).collect() } - fn read_extension_crate(&mut self, span: Span, info: &CrateInfo) -> ExtensionCrate { + fn read_extension_crate(&mut self, span: Span, info: &ExternCrateInfo) -> ExtensionCrate { + info!("read extension crate {} `extern crate {} as {}` dep_kind={:?}", + info.id, info.name, info.ident, info.dep_kind); let target_triple = &self.sess.opts.target_triple[..]; let is_cross = target_triple != config::host_triple(); - let mut should_link = info.should_link && !is_cross; let mut target_only = false; - let ident = info.ident.clone(); - let name = info.name.clone(); - let mut load_ctxt = loader::Context { + let mut locate_ctxt = locator::Context { sess: self.sess, span: span, - ident: &ident[..], - crate_name: &name[..], + ident: info.ident, + crate_name: info.name, hash: None, filesearch: self.sess.host_filesearch(PathKind::Crate), target: &self.sess.host, triple: config::host_triple(), root: &None, - rejected_via_hash: vec!(), - rejected_via_triple: vec!(), - rejected_via_kind: vec!(), + rejected_via_hash: vec![], + rejected_via_triple: vec![], + rejected_via_kind: vec![], + rejected_via_version: vec![], + rejected_via_filename: vec![], should_match_name: true, + is_proc_macro: None, }; - let library = match load_ctxt.maybe_load_library_crate() { - Some(l) => l, - None if is_cross => { - // Try loading from target crates. This will abort later if we - // try to load a plugin registrar function, - target_only = true; - should_link = info.should_link; - - load_ctxt.target = &self.sess.target.target; - load_ctxt.triple = target_triple; - load_ctxt.filesearch = self.sess.target_filesearch(PathKind::Crate); - load_ctxt.load_library_crate() + let library = self.load(&mut locate_ctxt).or_else(|| { + if !is_cross { + return None } - None => { load_ctxt.report_load_errs(); unreachable!() }, + // Try loading from target crates. This will abort later if we + // try to load a plugin registrar function, + target_only = true; + + locate_ctxt.target = &self.sess.target.target; + locate_ctxt.triple = target_triple; + locate_ctxt.filesearch = self.sess.target_filesearch(PathKind::Crate); + + self.load(&mut locate_ctxt) + }); + let library = match library { + Some(l) => l, + None => locate_ctxt.report_errs(), }; - let dylib = library.dylib.clone(); - let register = should_link && self.existing_match(&info.name, - None, - PathKind::Crate).is_none(); - let metadata = if register { - // Register crate now to avoid double-reading metadata - let (_, cmd, _) = self.register_crate(&None, &info.ident, - &info.name, span, library, - true); - PMDSource::Registered(cmd) - } else { - // Not registering the crate; just hold on to the metadata - PMDSource::Owned(library.metadata) + let (dylib, metadata) = match library { + LoadResult::Previous(cnum) => { + let data = self.cstore.get_crate_data(cnum); + (data.source.dylib.clone(), PMDSource::Registered(data)) + } + LoadResult::Loaded(library) => { + let dylib = library.dylib.clone(); + let metadata = PMDSource::Owned(library); + (dylib, metadata) + } }; ExtensionCrate { @@ -486,65 +541,75 @@ impl<'a> CrateReader<'a> { } } - /// Read exported macros. - pub fn read_exported_macros(&mut self, item: &ast::Item) -> Vec { - let ci = self.extract_crate_info(item).unwrap(); - let ekrate = self.read_extension_crate(item.span, &ci); - - let source_name = format!("<{} macros>", item.ident); - let mut macros = vec![]; - decoder::each_exported_macro(ekrate.metadata.as_slice(), - &*self.cstore.intr, - |name, attrs, body| { - // NB: Don't use parse::parse_tts_from_source_str because it parses with - // quote_depth > 0. - let mut p = parse::new_parser_from_source_str(&self.sess.parse_sess, - self.sess.opts.cfg.clone(), - source_name.clone(), - body); - let lo = p.span.lo; - let body = match p.parse_all_token_trees() { - Ok(body) => body, - Err(mut err) => { - err.emit(); - panic!(FatalError); - } - }; - let span = mk_sp(lo, p.last_span.hi); - p.abort_if_errors(); + /// Load custom derive macros. + /// + /// Note that this is intentionally similar to how we load plugins today, + /// but also intentionally separate. Plugins are likely always going to be + /// implemented as dynamic libraries, but we have a possible future where + /// custom derive (and other macro-1.1 style features) are implemented via + /// executables and custom IPC. + fn load_derive_macros(&mut self, root: &CrateRoot, dylib: Option, span: Span) + -> Vec<(ast::Name, Rc)> { + use std::{env, mem}; + use proc_macro::TokenStream; + use proc_macro::__internal::Registry; + use rustc_back::dynamic_lib::DynamicLibrary; + use syntax_ext::deriving::custom::CustomDerive; + + let path = match dylib { + Some(dylib) => dylib, + None => span_bug!(span, "proc-macro crate not dylib"), + }; + // Make sure the path contains a / or the linker will search for it. + let path = env::current_dir().unwrap().join(path); + let lib = match DynamicLibrary::open(Some(&path)) { + Ok(lib) => lib, + Err(err) => self.sess.span_fatal(span, &err), + }; - // Mark the attrs as used - for attr in &attrs { - attr::mark_used(attr); - } + let sym = self.sess.generate_derive_registrar_symbol(&root.hash, + root.macro_derive_registrar.unwrap()); + let registrar = unsafe { + let sym = match lib.symbol(&sym) { + Ok(f) => f, + Err(err) => self.sess.span_fatal(span, &err), + }; + mem::transmute::<*mut u8, fn(&mut Registry)>(sym) + }; - macros.push(ast::MacroDef { - ident: ast::Ident::with_empty_ctxt(name), - attrs: attrs, - id: ast::DUMMY_NODE_ID, - span: span, - imported_from: Some(item.ident), - // overridden in plugin/load.rs - export: false, - use_locally: false, - allow_internal_unstable: false, - - body: body, - }); - true + struct MyRegistrar(Vec<(ast::Name, Rc)>); + + impl Registry for MyRegistrar { + fn register_custom_derive(&mut self, + trait_name: &str, + expand: fn(TokenStream) -> TokenStream, + attributes: &[&'static str]) { + let attrs = attributes.iter().cloned().map(Symbol::intern).collect(); + let derive = SyntaxExtension::CustomDerive( + Box::new(CustomDerive::new(expand, attrs)) + ); + self.0.push((Symbol::intern(trait_name), Rc::new(derive))); } - ); - macros + } + + let mut my_registrar = MyRegistrar(Vec::new()); + registrar(&mut my_registrar); + + // Intentionally leak the dynamic library. We can't ever unload it + // since the library can make things that will live arbitrarily long. + mem::forget(lib); + my_registrar.0 } - /// Look for a plugin registrar. Returns library path and symbol name. + /// Look for a plugin registrar. Returns library path, crate + /// SVH and DefIndex of the registrar function. pub fn find_plugin_registrar(&mut self, span: Span, name: &str) - -> Option<(PathBuf, String)> { - let ekrate = self.read_extension_crate(span, &CrateInfo { - name: name.to_string(), - ident: name.to_string(), + -> Option<(PathBuf, Svh, DefIndex)> { + let ekrate = self.read_extension_crate(span, &ExternCrateInfo { + name: Symbol::intern(name), + ident: Symbol::intern(name), id: ast::DUMMY_NODE_ID, - should_link: false, + dep_kind: DepKind::UnexportedMacrosOnly, }); if ekrate.target_only { @@ -554,16 +619,14 @@ impl<'a> CrateReader<'a> { name, config::host_triple(), self.sess.opts.target_triple); - span_err!(self.sess, span, E0456, "{}", &message[..]); - self.sess.abort_if_errors(); + span_fatal!(self.sess, span, E0456, "{}", &message[..]); } - let registrar = - decoder::get_plugin_registrar_fn(ekrate.metadata.as_slice()) - .map(|id| decoder::get_symbol_from_buf(ekrate.metadata.as_slice(), id)); - - match (ekrate.dylib.as_ref(), registrar) { - (Some(dylib), Some(reg)) => Some((dylib.to_path_buf(), reg)), + let root = ekrate.metadata.get_root(); + match (ekrate.dylib.as_ref(), root.plugin_registrar_fn) { + (Some(dylib), Some(reg)) => { + Some((dylib.to_path_buf(), root.hash, reg)) + } (None, Some(_)) => { span_err!(self.sess, span, E0457, "plugin `{}` only found in rlib format, but must be available \ @@ -579,9 +642,9 @@ impl<'a> CrateReader<'a> { fn register_statically_included_foreign_items(&mut self) { let libs = self.cstore.get_used_libraries(); - for (lib, list) in self.foreign_item_map.iter() { - let is_static = libs.borrow().iter().any(|&(ref name, kind)| { - lib == name && kind == cstore::NativeStatic + for (foreign_lib, list) in self.foreign_item_map.iter() { + let is_static = libs.borrow().iter().any(|lib| { + lib.name == &**foreign_lib && lib.kind == cstore::NativeStatic }); if is_static { for id in list { @@ -591,6 +654,85 @@ impl<'a> CrateReader<'a> { } } + fn inject_panic_runtime(&mut self, krate: &ast::Crate) { + // If we're only compiling an rlib, then there's no need to select a + // panic runtime, so we just skip this section entirely. + let any_non_rlib = self.sess.crate_types.borrow().iter().any(|ct| { + *ct != config::CrateTypeRlib + }); + if !any_non_rlib { + info!("panic runtime injection skipped, only generating rlib"); + return + } + + // If we need a panic runtime, we try to find an existing one here. At + // the same time we perform some general validation of the DAG we've got + // going such as ensuring everything has a compatible panic strategy. + // + // The logic for finding the panic runtime here is pretty much the same + // as the allocator case with the only addition that the panic strategy + // compilation mode also comes into play. + let desired_strategy = self.sess.panic_strategy(); + let mut runtime_found = false; + let mut needs_panic_runtime = attr::contains_name(&krate.attrs, + "needs_panic_runtime"); + self.cstore.iter_crate_data(|cnum, data| { + needs_panic_runtime = needs_panic_runtime || data.needs_panic_runtime(); + if data.is_panic_runtime() { + // Inject a dependency from all #![needs_panic_runtime] to this + // #![panic_runtime] crate. + self.inject_dependency_if(cnum, "a panic runtime", + &|data| data.needs_panic_runtime()); + runtime_found = runtime_found || data.dep_kind.get() == DepKind::Explicit; + } + }); + + // If an explicitly linked and matching panic runtime was found, or if + // we just don't need one at all, then we're done here and there's + // nothing else to do. + if !needs_panic_runtime || runtime_found { + return + } + + // By this point we know that we (a) need a panic runtime and (b) no + // panic runtime was explicitly linked. Here we just load an appropriate + // default runtime for our panic strategy and then inject the + // dependencies. + // + // We may resolve to an already loaded crate (as the crate may not have + // been explicitly linked prior to this) and we may re-inject + // dependencies again, but both of those situations are fine. + // + // Also note that we have yet to perform validation of the crate graph + // in terms of everyone has a compatible panic runtime format, that's + // performed later as part of the `dependency_format` module. + let name = match desired_strategy { + PanicStrategy::Unwind => Symbol::intern("panic_unwind"), + PanicStrategy::Abort => Symbol::intern("panic_abort"), + }; + info!("panic runtime not found -- loading {}", name); + + let dep_kind = DepKind::Implicit; + let (cnum, data) = + self.resolve_crate(&None, name, name, None, DUMMY_SP, PathKind::Crate, dep_kind); + + // Sanity check the loaded crate to ensure it is indeed a panic runtime + // and the panic strategy is indeed what we thought it was. + if !data.is_panic_runtime() { + self.sess.err(&format!("the crate `{}` is not a panic runtime", + name)); + } + if data.panic_strategy() != desired_strategy { + self.sess.err(&format!("the crate `{}` does not have the panic \ + strategy `{}`", + name, desired_strategy.desc())); + } + + self.sess.injected_panic_runtime.set(Some(cnum)); + self.inject_dependency_if(cnum, "a panic runtime", + &|data| data.needs_panic_runtime()); + } + fn inject_allocator_crate(&mut self) { // Make sure that we actually need an allocator, if none of our // dependencies need one then we definitely don't! @@ -602,10 +744,11 @@ impl<'a> CrateReader<'a> { self.cstore.iter_crate_data(|cnum, data| { needs_allocator = needs_allocator || data.needs_allocator(); if data.is_allocator() { - debug!("{} required by rlib and is an allocator", data.name()); - self.inject_allocator_dependency(cnum); + info!("{} required by rlib and is an allocator", data.name()); + self.inject_dependency_if(cnum, "an allocator", + &|data| data.needs_allocator()); found_required_allocator = found_required_allocator || - data.explicitly_linked.get(); + data.dep_kind.get() == DepKind::Explicit; } }); if !needs_allocator || found_required_allocator { return } @@ -623,8 +766,11 @@ impl<'a> CrateReader<'a> { match *ct { config::CrateTypeExecutable => need_exe_alloc = true, config::CrateTypeDylib | + config::CrateTypeProcMacro | + config::CrateTypeCdylib | config::CrateTypeStaticlib => need_lib_alloc = true, - config::CrateTypeRlib => {} + config::CrateTypeRlib | + config::CrateTypeMetadata => {} } } if !need_lib_alloc && !need_exe_alloc { return } @@ -645,150 +791,85 @@ impl<'a> CrateReader<'a> { // * Staticlibs and Rust dylibs use system malloc // * Rust dylibs used as dependencies to rust use jemalloc let name = if need_lib_alloc && !self.sess.opts.cg.prefer_dynamic { - &self.sess.target.target.options.lib_allocation_crate + Symbol::intern(&self.sess.target.target.options.lib_allocation_crate) } else { - &self.sess.target.target.options.exe_allocation_crate + Symbol::intern(&self.sess.target.target.options.exe_allocation_crate) }; - let (cnum, data, _) = self.resolve_crate(&None, name, name, None, - codemap::DUMMY_SP, - PathKind::Crate, false); + let dep_kind = DepKind::Implicit; + let (cnum, data) = + self.resolve_crate(&None, name, name, None, DUMMY_SP, PathKind::Crate, dep_kind); - // To ensure that the `-Z allocation-crate=foo` option isn't abused, and - // to ensure that the allocator is indeed an allocator, we verify that - // the crate loaded here is indeed tagged #![allocator]. + // Sanity check the crate we loaded to ensure that it is indeed an + // allocator. if !data.is_allocator() { self.sess.err(&format!("the allocator crate `{}` is not tagged \ with #![allocator]", data.name())); } self.sess.injected_allocator.set(Some(cnum)); - self.inject_allocator_dependency(cnum); + self.inject_dependency_if(cnum, "an allocator", + &|data| data.needs_allocator()); } - fn inject_allocator_dependency(&self, allocator: ast::CrateNum) { + fn inject_dependency_if(&self, + krate: CrateNum, + what: &str, + needs_dep: &Fn(&cstore::CrateMetadata) -> bool) { + // don't perform this validation if the session has errors, as one of + // those errors may indicate a circular dependency which could cause + // this to stack overflow. + if self.sess.has_errors() { + return + } + // Before we inject any dependencies, make sure we don't inject a - // circular dependency by validating that this allocator crate doesn't - // transitively depend on any `#![needs_allocator]` crates. - validate(self, allocator, allocator); - - // All crates tagged with `needs_allocator` do not explicitly depend on - // the allocator selected for this compile, but in order for this - // compilation to be successfully linked we need to inject a dependency - // (to order the crates on the command line correctly). - // - // Here we inject a dependency from all crates with #![needs_allocator] - // to the crate tagged with #![allocator] for this compilation unit. + // circular dependency by validating that this crate doesn't + // transitively depend on any crates satisfying `needs_dep`. + for dep in self.cstore.crate_dependencies_in_rpo(krate) { + let data = self.cstore.get_crate_data(dep); + if needs_dep(&data) { + self.sess.err(&format!("the crate `{}` cannot depend \ + on a crate that needs {}, but \ + it depends on `{}`", + self.cstore.get_crate_data(krate).name(), + what, + data.name())); + } + } + + // All crates satisfying `needs_dep` do not explicitly depend on the + // crate provided for this compile, but in order for this compilation to + // be successfully linked we need to inject a dependency (to order the + // crates on the command line correctly). self.cstore.iter_crate_data(|cnum, data| { - if !data.needs_allocator() { + if !needs_dep(data) { return } - info!("injecting a dep from {} to {}", cnum, allocator); - let mut cnum_map = data.cnum_map.borrow_mut(); - let remote_cnum = cnum_map.len() + 1; - let prev = cnum_map.insert(remote_cnum as ast::CrateNum, allocator); - assert!(prev.is_none()); + info!("injecting a dep from {} to {}", cnum, krate); + data.cnum_map.borrow_mut().push(krate); }); - - fn validate(me: &CrateReader, krate: ast::CrateNum, - allocator: ast::CrateNum) { - let data = me.cstore.get_crate_data(krate); - if data.needs_allocator() { - let krate_name = data.name(); - let data = me.cstore.get_crate_data(allocator); - let alloc_name = data.name(); - me.sess.err(&format!("the allocator crate `{}` cannot depend \ - on a crate that needs an allocator, but \ - it depends on `{}`", alloc_name, - krate_name)); - } - - for (_, &dep) in data.cnum_map.borrow().iter() { - validate(me, dep, allocator); - } - } } } -impl<'a, 'b> LocalCrateReader<'a, 'b> { - pub fn new(sess: &'a Session, cstore: &'a CStore, - map: &'a hir_map::Map<'b>) -> LocalCrateReader<'a, 'b> { - LocalCrateReader { - sess: sess, - cstore: cstore, - creader: CrateReader::new(sess, cstore), - ast_map: map, - } - } - - // Traverses an AST, reading all the information about use'd crates and - // extern libraries necessary for later resolving, typechecking, linking, - // etc. - pub fn read_crates(&mut self, krate: &hir::Crate) { - self.process_crate(krate); - krate.visit_all_items(self); - self.creader.inject_allocator_crate(); - - if log_enabled!(log::INFO) { - dump_crates(&self.cstore); - } - - for &(ref name, kind) in &self.sess.opts.libs { - register_native_lib(self.sess, self.cstore, None, name.clone(), kind); - } - self.creader.register_statically_included_foreign_items(); - } - - fn process_crate(&self, c: &hir::Crate) { - for a in c.attrs.iter().filter(|m| m.name() == "link_args") { - match a.value_str() { - Some(ref linkarg) => self.cstore.add_used_link_args(&linkarg), - None => { /* fallthrough */ } - } - } - } - - fn process_item(&mut self, i: &hir::Item) { - match i.node { - hir::ItemExternCrate(_) => { - if !should_link_hir(i) { - return; - } - - match self.creader.extract_crate_info_hir(i) { - Some(info) => { - let (cnum, cmeta, _) = self.creader.resolve_crate(&None, - &info.ident, - &info.name, - None, - i.span, - PathKind::Crate, - true); - let def_id = self.ast_map.local_def_id(i.id); - let def_path = self.ast_map.def_path(def_id); - cmeta.update_local_def_path(def_path); - self.ast_map.with_path(i.id, |path| { - cmeta.update_local_path(path) - }); - self.cstore.add_extern_mod_stmt_cnum(info.id, cnum); - } - None => () - } +impl<'a> CrateLoader<'a> { + pub fn preprocess(&mut self, krate: &ast::Crate) { + for attr in krate.attrs.iter().filter(|m| m.name() == "link_args") { + if let Some(linkarg) = attr.value_str() { + self.cstore.add_used_link_args(&linkarg.as_str()); } - hir::ItemForeignMod(ref fm) => self.process_foreign_mod(i, fm), - _ => { } } } - fn process_foreign_mod(&mut self, i: &hir::Item, fm: &hir::ForeignMod) { - if fm.abi == abi::Rust || fm.abi == abi::RustIntrinsic || fm.abi == abi::PlatformIntrinsic { + fn process_foreign_mod(&mut self, i: &ast::Item, fm: &ast::ForeignMod) { + if fm.abi == Abi::Rust || fm.abi == Abi::RustIntrinsic || fm.abi == Abi::PlatformIntrinsic { return; } // First, add all of the custom #[link_args] attributes for m in i.attrs.iter().filter(|a| a.check_name("link_args")) { if let Some(linkarg) = m.value_str() { - self.cstore.add_used_link_args(&linkarg); + self.cstore.add_used_link_args(&linkarg.as_str()); } } @@ -800,14 +881,15 @@ impl<'a, 'b> LocalCrateReader<'a, 'b> { }; let kind = items.iter().find(|k| { k.check_name("kind") - }).and_then(|a| a.value_str()); + }).and_then(|a| a.value_str()).map(Symbol::as_str); let kind = match kind.as_ref().map(|s| &s[..]) { Some("static") => cstore::NativeStatic, Some("dylib") => cstore::NativeUnknown, Some("framework") => cstore::NativeFramework, Some(k) => { - span_err!(self.sess, m.span, E0458, - "unknown kind: `{}`", k); + struct_span_err!(self.sess, m.span, E0458, + "unknown kind: `{}`", k) + .span_label(m.span, &format!("unknown kind")).emit(); cstore::NativeUnknown } None => cstore::NativeUnknown @@ -818,12 +900,24 @@ impl<'a, 'b> LocalCrateReader<'a, 'b> { let n = match n { Some(n) => n, None => { - span_err!(self.sess, m.span, E0459, - "#[link(...)] specified without `name = \"foo\"`"); - InternedString::new("foo") + struct_span_err!(self.sess, m.span, E0459, + "#[link(...)] specified without `name = \"foo\"`") + .span_label(m.span, &format!("missing `name` argument")).emit(); + Symbol::intern("foo") } }; - register_native_lib(self.sess, self.cstore, Some(m.span), n.to_string(), kind); + let cfg = items.iter().find(|k| { + k.check_name("cfg") + }).and_then(|a| a.meta_item_list()); + let cfg = cfg.map(|list| { + list[0].meta_item().unwrap().clone() + }); + let lib = NativeLibrary { + name: n, + kind: kind, + cfg: cfg, + }; + register_native_lib(self.sess, self.cstore, Some(m.span), lib); } // Finally, process the #[linked_from = "..."] attribute @@ -832,137 +926,51 @@ impl<'a, 'b> LocalCrateReader<'a, 'b> { Some(name) => name, None => continue, }; - let list = self.creader.foreign_item_map.entry(lib_name.to_string()) + let list = self.foreign_item_map.entry(lib_name.to_string()) .or_insert(Vec::new()); list.extend(fm.items.iter().map(|it| it.id)); } } } -/// Imports the codemap from an external crate into the codemap of the crate -/// currently being compiled (the "local crate"). -/// -/// The import algorithm works analogous to how AST items are inlined from an -/// external crate's metadata: -/// For every FileMap in the external codemap an 'inline' copy is created in the -/// local codemap. The correspondence relation between external and local -/// FileMaps is recorded in the `ImportedFileMap` objects returned from this -/// function. When an item from an external crate is later inlined into this -/// crate, this correspondence information is used to translate the span -/// information of the inlined item so that it refers the correct positions in -/// the local codemap (see `astencode::DecodeContext::tr_span()`). -/// -/// The import algorithm in the function below will reuse FileMaps already -/// existing in the local codemap. For example, even if the FileMap of some -/// source file of libstd gets imported many times, there will only ever be -/// one FileMap object for the corresponding file in the local codemap. -/// -/// Note that imported FileMaps do not actually contain the source code of the -/// file they represent, just information about length, line breaks, and -/// multibyte characters. This information is enough to generate valid debuginfo -/// for items inlined from other crates. -pub fn import_codemap(local_codemap: &codemap::CodeMap, - metadata: &MetadataBlob) - -> Vec { - let external_codemap = decoder::get_imported_filemaps(metadata.as_slice()); - - let imported_filemaps = external_codemap.into_iter().map(|filemap_to_import| { - // Try to find an existing FileMap that can be reused for the filemap to - // be imported. A FileMap is reusable if it is exactly the same, just - // positioned at a different offset within the codemap. - let reusable_filemap = { - local_codemap.files - .borrow() - .iter() - .find(|fm| are_equal_modulo_startpos(&fm, &filemap_to_import)) - .map(|rc| rc.clone()) - }; - - match reusable_filemap { - Some(fm) => { - cstore::ImportedFileMap { - original_start_pos: filemap_to_import.start_pos, - original_end_pos: filemap_to_import.end_pos, - translated_filemap: fm - } - } - None => { - // We can't reuse an existing FileMap, so allocate a new one - // containing the information we need. - let codemap::FileMap { - name, - start_pos, - end_pos, - lines, - multibyte_chars, - .. - } = filemap_to_import; - - let source_length = (end_pos - start_pos).to_usize(); - - // Translate line-start positions and multibyte character - // position into frame of reference local to file. - // `CodeMap::new_imported_filemap()` will then translate those - // coordinates to their new global frame of reference when the - // offset of the FileMap is known. - let mut lines = lines.into_inner(); - for pos in &mut lines { - *pos = *pos - start_pos; - } - let mut multibyte_chars = multibyte_chars.into_inner(); - for mbc in &mut multibyte_chars { - mbc.pos = mbc.pos - start_pos; - } +impl<'a> middle::cstore::CrateLoader for CrateLoader<'a> { + fn postprocess(&mut self, krate: &ast::Crate) { + self.inject_allocator_crate(); + self.inject_panic_runtime(krate); - let local_version = local_codemap.new_imported_filemap(name, - source_length, - lines, - multibyte_chars); - cstore::ImportedFileMap { - original_start_pos: start_pos, - original_end_pos: end_pos, - translated_filemap: local_version - } - } - } - }).collect(); - - return imported_filemaps; - - fn are_equal_modulo_startpos(fm1: &codemap::FileMap, - fm2: &codemap::FileMap) - -> bool { - if fm1.name != fm2.name { - return false; + if log_enabled!(log::INFO) { + dump_crates(&self.cstore); } - let lines1 = fm1.lines.borrow(); - let lines2 = fm2.lines.borrow(); - - if lines1.len() != lines2.len() { - return false; + for &(ref name, kind) in &self.sess.opts.libs { + let lib = NativeLibrary { + name: Symbol::intern(name), + kind: kind, + cfg: None, + }; + register_native_lib(self.sess, self.cstore, None, lib); } + self.register_statically_included_foreign_items(); + } - for (&line1, &line2) in lines1.iter().zip(lines2.iter()) { - if (line1 - fm1.start_pos) != (line2 - fm2.start_pos) { - return false; - } + fn process_item(&mut self, item: &ast::Item, definitions: &Definitions) { + match item.node { + ast::ItemKind::ExternCrate(_) => {} + ast::ItemKind::ForeignMod(ref fm) => return self.process_foreign_mod(item, fm), + _ => return, } - let multibytes1 = fm1.multibyte_chars.borrow(); - let multibytes2 = fm2.multibyte_chars.borrow(); - - if multibytes1.len() != multibytes2.len() { - return false; - } + let info = self.extract_crate_info(item).unwrap(); + let (cnum, ..) = self.resolve_crate( + &None, info.ident, info.name, None, item.span, PathKind::Crate, info.dep_kind, + ); - for (mb1, mb2) in multibytes1.iter().zip(multibytes2.iter()) { - if (mb1.bytes != mb2.bytes) || - ((mb1.pos - fm1.start_pos) != (mb2.pos - fm2.start_pos)) { - return false; - } - } + let def_id = definitions.opt_local_def_id(item.id).unwrap(); + let len = definitions.def_path(def_id.index).data.len(); - true + let extern_crate = + ExternCrate { def_id: def_id, span: item.span, direct: true, path_len: len }; + self.update_extern_crate(cnum, extern_crate, &mut FxHashSet()); + self.cstore.add_extern_mod_stmt_cnum(info.id, cnum); } } diff --git a/src/librustc_metadata/csearch.rs b/src/librustc_metadata/csearch.rs deleted file mode 100644 index ecbc840233091..0000000000000 --- a/src/librustc_metadata/csearch.rs +++ /dev/null @@ -1,514 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use astencode; -use cstore; -use decoder; -use encoder; -use loader; - -use middle::cstore::{CrateStore, CrateSource, ChildItem, FoundAst}; -use middle::cstore::{NativeLibraryKind, LinkMeta, LinkagePreference}; -use middle::def; -use middle::lang_items; -use middle::ty::{self, Ty}; -use middle::def_id::{DefId, DefIndex}; - -use rustc::front::map as hir_map; -use rustc::mir::repr::Mir; -use rustc::util::nodemap::{FnvHashMap, NodeMap, NodeSet}; - -use std::cell::RefCell; -use std::rc::Rc; -use std::path::PathBuf; -use syntax::ast; -use syntax::attr; -use syntax::parse::token; -use rustc_back::svh::Svh; -use rustc_back::target::Target; -use rustc_front::hir; - -impl<'tcx> CrateStore<'tcx> for cstore::CStore { - fn stability(&self, def: DefId) -> Option - { - let cdata = self.get_crate_data(def.krate); - decoder::get_stability(&*cdata, def.index) - } - - fn deprecation(&self, def: DefId) -> Option - { - let cdata = self.get_crate_data(def.krate); - decoder::get_deprecation(&*cdata, def.index) - } - - fn closure_kind(&self, _tcx: &ty::ctxt<'tcx>, def_id: DefId) -> ty::ClosureKind - { - assert!(!def_id.is_local()); - let cdata = self.get_crate_data(def_id.krate); - decoder::closure_kind(&*cdata, def_id.index) - } - - fn closure_ty(&self, tcx: &ty::ctxt<'tcx>, def_id: DefId) -> ty::ClosureTy<'tcx> - { - assert!(!def_id.is_local()); - let cdata = self.get_crate_data(def_id.krate); - decoder::closure_ty(&*cdata, def_id.index, tcx) - } - - fn item_variances(&self, def: DefId) -> ty::ItemVariances { - let cdata = self.get_crate_data(def.krate); - decoder::get_item_variances(&*cdata, def.index) - } - - fn repr_attrs(&self, def: DefId) -> Vec { - let cdata = self.get_crate_data(def.krate); - decoder::get_repr_attrs(&*cdata, def.index) - } - - fn item_type(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> ty::TypeScheme<'tcx> - { - let cdata = self.get_crate_data(def.krate); - decoder::get_type(&*cdata, def.index, tcx) - } - - fn item_predicates(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> ty::GenericPredicates<'tcx> - { - let cdata = self.get_crate_data(def.krate); - decoder::get_predicates(&*cdata, def.index, tcx) - } - - fn item_super_predicates(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> ty::GenericPredicates<'tcx> - { - let cdata = self.get_crate_data(def.krate); - decoder::get_super_predicates(&*cdata, def.index, tcx) - } - - fn item_attrs(&self, def_id: DefId) -> Vec - { - let cdata = self.get_crate_data(def_id.krate); - decoder::get_item_attrs(&*cdata, def_id.index) - } - - fn item_symbol(&self, def: DefId) -> String - { - let cdata = self.get_crate_data(def.krate); - decoder::get_symbol(&cdata, def.index) - } - - fn trait_def(&self, tcx: &ty::ctxt<'tcx>, def: DefId) -> ty::TraitDef<'tcx> - { - let cdata = self.get_crate_data(def.krate); - decoder::get_trait_def(&*cdata, def.index, tcx) - } - - fn adt_def(&self, tcx: &ty::ctxt<'tcx>, def: DefId) -> ty::AdtDefMaster<'tcx> - { - let cdata = self.get_crate_data(def.krate); - decoder::get_adt_def(&self.intr, &*cdata, def.index, tcx) - } - - fn method_arg_names(&self, did: DefId) -> Vec - { - let cdata = self.get_crate_data(did.krate); - decoder::get_method_arg_names(&cdata, did.index) - } - - fn item_path(&self, def: DefId) -> Vec { - let cdata = self.get_crate_data(def.krate); - let path = decoder::get_item_path(&*cdata, def.index); - - cdata.with_local_path(|cpath| { - let mut r = Vec::with_capacity(cpath.len() + path.len()); - r.extend_from_slice(cpath); - r.extend_from_slice(&path); - r - }) - } - - fn extern_item_path(&self, def: DefId) -> Vec { - let cdata = self.get_crate_data(def.krate); - let path = decoder::get_item_path(&*cdata, def.index); - - let mut r = Vec::with_capacity(path.len() + 1); - let crate_name = hir_map::PathMod(token::intern(&cdata.name)); - r.push(crate_name); - r.extend_from_slice(&path); - r - } - - fn item_name(&self, def: DefId) -> ast::Name { - let cdata = self.get_crate_data(def.krate); - decoder::get_item_name(&self.intr, &cdata, def.index) - } - - - fn inherent_implementations_for_type(&self, def_id: DefId) -> Vec - { - let mut result = vec![]; - let cdata = self.get_crate_data(def_id.krate); - decoder::each_inherent_implementation_for_type(&*cdata, def_id.index, - |iid| result.push(iid)); - result - } - - fn implementations_of_trait(&self, def_id: DefId) -> Vec - { - let mut result = vec![]; - self.iter_crate_data(|_, cdata| { - decoder::each_implementation_for_trait(cdata, def_id, &mut |iid| { - result.push(iid) - }) - }); - result - } - - fn provided_trait_methods(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> Vec>> - { - let cdata = self.get_crate_data(def.krate); - decoder::get_provided_trait_methods(self.intr.clone(), &*cdata, def.index, tcx) - } - - fn trait_item_def_ids(&self, def: DefId) - -> Vec - { - let cdata = self.get_crate_data(def.krate); - decoder::get_trait_item_def_ids(&*cdata, def.index) - } - - fn impl_items(&self, impl_def_id: DefId) -> Vec - { - let cdata = self.get_crate_data(impl_def_id.krate); - decoder::get_impl_items(&*cdata, impl_def_id.index) - } - - fn impl_polarity(&self, def: DefId) -> Option - { - let cdata = self.get_crate_data(def.krate); - decoder::get_impl_polarity(&*cdata, def.index) - } - - fn impl_trait_ref(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> Option> - { - let cdata = self.get_crate_data(def.krate); - decoder::get_impl_trait(&*cdata, def.index, tcx) - } - - fn custom_coerce_unsized_kind(&self, def: DefId) - -> Option - { - let cdata = self.get_crate_data(def.krate); - decoder::get_custom_coerce_unsized_kind(&*cdata, def.index) - } - - // FIXME: killme - fn associated_consts(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> Vec>> { - let cdata = self.get_crate_data(def.krate); - decoder::get_associated_consts(self.intr.clone(), &*cdata, def.index, tcx) - } - - fn trait_of_item(&self, tcx: &ty::ctxt<'tcx>, def_id: DefId) -> Option - { - let cdata = self.get_crate_data(def_id.krate); - decoder::get_trait_of_item(&*cdata, def_id.index, tcx) - } - - fn impl_or_trait_item(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> ty::ImplOrTraitItem<'tcx> - { - let cdata = self.get_crate_data(def.krate); - decoder::get_impl_or_trait_item( - self.intr.clone(), - &*cdata, - def.index, - tcx) - } - - fn is_const_fn(&self, did: DefId) -> bool - { - let cdata = self.get_crate_data(did.krate); - decoder::is_const_fn(&cdata, did.index) - } - - fn is_defaulted_trait(&self, trait_def_id: DefId) -> bool - { - let cdata = self.get_crate_data(trait_def_id.krate); - decoder::is_defaulted_trait(&*cdata, trait_def_id.index) - } - - fn is_impl(&self, did: DefId) -> bool - { - let cdata = self.get_crate_data(did.krate); - decoder::is_impl(&*cdata, did.index) - } - - fn is_default_impl(&self, impl_did: DefId) -> bool { - let cdata = self.get_crate_data(impl_did.krate); - decoder::is_default_impl(&*cdata, impl_did.index) - } - - fn is_extern_fn(&self, tcx: &ty::ctxt<'tcx>, did: DefId) -> bool - { - let cdata = self.get_crate_data(did.krate); - decoder::is_extern_fn(&*cdata, did.index, tcx) - } - - fn is_static(&self, did: DefId) -> bool - { - let cdata = self.get_crate_data(did.krate); - decoder::is_static(&*cdata, did.index) - } - - fn is_static_method(&self, def: DefId) -> bool - { - let cdata = self.get_crate_data(def.krate); - decoder::is_static_method(&*cdata, def.index) - } - - fn is_statically_included_foreign_item(&self, id: ast::NodeId) -> bool - { - self.do_is_statically_included_foreign_item(id) - } - - fn is_typedef(&self, did: DefId) -> bool { - let cdata = self.get_crate_data(did.krate); - decoder::is_typedef(&*cdata, did.index) - } - - fn dylib_dependency_formats(&self, cnum: ast::CrateNum) - -> Vec<(ast::CrateNum, LinkagePreference)> - { - let cdata = self.get_crate_data(cnum); - decoder::get_dylib_dependency_formats(&cdata) - } - - fn lang_items(&self, cnum: ast::CrateNum) -> Vec<(DefIndex, usize)> - { - let mut result = vec![]; - let crate_data = self.get_crate_data(cnum); - decoder::each_lang_item(&*crate_data, |did, lid| { - result.push((did, lid)); true - }); - result - } - - fn missing_lang_items(&self, cnum: ast::CrateNum) - -> Vec - { - let cdata = self.get_crate_data(cnum); - decoder::get_missing_lang_items(&*cdata) - } - - fn is_staged_api(&self, cnum: ast::CrateNum) -> bool - { - self.get_crate_data(cnum).staged_api - } - - fn is_explicitly_linked(&self, cnum: ast::CrateNum) -> bool - { - self.get_crate_data(cnum).explicitly_linked.get() - } - - fn is_allocator(&self, cnum: ast::CrateNum) -> bool - { - self.get_crate_data(cnum).is_allocator() - } - - fn crate_attrs(&self, cnum: ast::CrateNum) -> Vec - { - decoder::get_crate_attributes(self.get_crate_data(cnum).data()) - } - - fn crate_name(&self, cnum: ast::CrateNum) -> String - { - self.get_crate_data(cnum).name.clone() - } - - fn crate_hash(&self, cnum: ast::CrateNum) -> Svh - { - let cdata = self.get_crate_data(cnum); - decoder::get_crate_hash(cdata.data()) - } - - fn crate_struct_field_attrs(&self, cnum: ast::CrateNum) - -> FnvHashMap> - { - decoder::get_struct_field_attrs(&*self.get_crate_data(cnum)) - } - - fn plugin_registrar_fn(&self, cnum: ast::CrateNum) -> Option - { - let cdata = self.get_crate_data(cnum); - decoder::get_plugin_registrar_fn(cdata.data()).map(|index| DefId { - krate: cnum, - index: index - }) - } - - fn native_libraries(&self, cnum: ast::CrateNum) -> Vec<(NativeLibraryKind, String)> - { - let cdata = self.get_crate_data(cnum); - decoder::get_native_libraries(&*cdata) - } - - fn reachable_ids(&self, cnum: ast::CrateNum) -> Vec - { - let cdata = self.get_crate_data(cnum); - decoder::get_reachable_ids(&*cdata) - } - - fn def_path(&self, def: DefId) -> hir_map::DefPath - { - let cdata = self.get_crate_data(def.krate); - let path = decoder::def_path(&*cdata, def.index); - let local_path = cdata.local_def_path(); - local_path.into_iter().chain(path).collect() - } - - fn tuple_struct_definition_if_ctor(&self, did: DefId) -> Option - { - let cdata = self.get_crate_data(did.krate); - decoder::get_tuple_struct_definition_if_ctor(&*cdata, did.index) - } - - fn struct_field_names(&self, def: DefId) -> Vec - { - let cdata = self.get_crate_data(def.krate); - decoder::get_struct_field_names(&self.intr, &*cdata, def.index) - } - - fn item_children(&self, def_id: DefId) -> Vec - { - let mut result = vec![]; - let crate_data = self.get_crate_data(def_id.krate); - let get_crate_data = |cnum| self.get_crate_data(cnum); - decoder::each_child_of_item( - self.intr.clone(), &*crate_data, - def_id.index, get_crate_data, - |def, name, vis| result.push(ChildItem { - def: def, - name: name, - vis: vis - })); - result - } - - fn crate_top_level_items(&self, cnum: ast::CrateNum) -> Vec - { - let mut result = vec![]; - let crate_data = self.get_crate_data(cnum); - let get_crate_data = |cnum| self.get_crate_data(cnum); - decoder::each_top_level_item_of_crate( - self.intr.clone(), &*crate_data, get_crate_data, - |def, name, vis| result.push(ChildItem { - def: def, - name: name, - vis: vis - })); - result - } - - fn maybe_get_item_ast(&'tcx self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> FoundAst<'tcx> - { - let cdata = self.get_crate_data(def.krate); - let decode_inlined_item = Box::new(astencode::decode_inlined_item); - decoder::maybe_get_item_ast(&*cdata, tcx, def.index, decode_inlined_item) - } - - fn maybe_get_item_mir(&self, tcx: &ty::ctxt<'tcx>, def: DefId) - -> Option> { - let cdata = self.get_crate_data(def.krate); - decoder::maybe_get_item_mir(&*cdata, tcx, def.index) - } - - fn crates(&self) -> Vec - { - let mut result = vec![]; - self.iter_crate_data(|cnum, _| result.push(cnum)); - result - } - - fn used_libraries(&self) -> Vec<(String, NativeLibraryKind)> - { - self.get_used_libraries().borrow().clone() - } - - fn used_link_args(&self) -> Vec - { - self.get_used_link_args().borrow().clone() - } - - fn metadata_filename(&self) -> &str - { - loader::METADATA_FILENAME - } - - fn metadata_section_name(&self, target: &Target) -> &str - { - loader::meta_section_name(target) - } - fn encode_type(&self, tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> Vec - { - encoder::encoded_ty(tcx, ty) - } - - fn used_crates(&self, prefer: LinkagePreference) -> Vec<(ast::CrateNum, Option)> - { - self.do_get_used_crates(prefer) - } - - fn used_crate_source(&self, cnum: ast::CrateNum) -> CrateSource - { - self.opt_used_crate_source(cnum).unwrap() - } - - fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option - { - self.do_extern_mod_stmt_cnum(emod_id) - } - - fn encode_metadata(&self, - tcx: &ty::ctxt<'tcx>, - reexports: &def::ExportMap, - item_symbols: &RefCell>, - link_meta: &LinkMeta, - reachable: &NodeSet, - mir_map: &NodeMap>, - krate: &hir::Crate) -> Vec - { - let encode_inlined_item: encoder::EncodeInlinedItem = - Box::new(|ecx, rbml_w, ii| astencode::encode_inlined_item(ecx, rbml_w, ii)); - - let encode_params = encoder::EncodeParams { - diag: tcx.sess.diagnostic(), - tcx: tcx, - reexports: reexports, - item_symbols: item_symbols, - link_meta: link_meta, - cstore: self, - encode_inlined_item: encode_inlined_item, - reachable: reachable, - mir_map: mir_map, - }; - encoder::encode_metadata(encode_params, krate) - - } - - fn metadata_encoding_version(&self) -> &[u8] - { - encoder::metadata_encoding_version - } -} diff --git a/src/librustc_metadata/cstore.rs b/src/librustc_metadata/cstore.rs index b0eef29467ba2..73e03a4519664 100644 --- a/src/librustc_metadata/cstore.rs +++ b/src/librustc_metadata/cstore.rs @@ -8,162 +8,173 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(non_camel_case_types)] - // The crate store - a central repo for information collected about external // crates and libraries -pub use self::MetadataBlob::*; - -use creader; -use decoder; -use index; -use loader; +use locator; +use schema; -use rustc::back::svh::Svh; -use rustc::front::map as ast_map; -use rustc::util::nodemap::{FnvHashMap, NodeMap, NodeSet}; +use rustc::dep_graph::DepGraph; +use rustc::hir::def_id::{CRATE_DEF_INDEX, CrateNum, DefIndex, DefId}; +use rustc::hir::map::DefKey; +use rustc::hir::svh::Svh; +use rustc::middle::cstore::{DepKind, ExternCrate}; +use rustc_back::PanicStrategy; +use rustc_data_structures::indexed_vec::IndexVec; +use rustc::util::nodemap::{FxHashMap, NodeMap, NodeSet, DefIdMap}; -use std::cell::{RefCell, Ref, Cell}; +use std::cell::{RefCell, Cell}; use std::rc::Rc; -use std::path::PathBuf; use flate::Bytes; -use syntax::ast; -use syntax::attr; -use syntax::codemap; -use syntax::parse::token; -use syntax::parse::token::IdentInterner; -use syntax::util::small_vector::SmallVector; +use syntax::{ast, attr}; +use syntax::ext::base::SyntaxExtension; +use syntax::symbol::Symbol; +use syntax_pos; -pub use middle::cstore::{NativeLibraryKind, LinkagePreference}; -pub use middle::cstore::{NativeStatic, NativeFramework, NativeUnknown}; -pub use middle::cstore::{CrateSource, LinkMeta}; +pub use rustc::middle::cstore::{NativeLibrary, LinkagePreference}; +pub use rustc::middle::cstore::{NativeStatic, NativeFramework, NativeUnknown}; +pub use rustc::middle::cstore::{CrateSource, LinkMeta, LibSource}; // A map from external crate numbers (as decoded from some crate file) to // local crate numbers (as generated during this session). Each external // crate may refer to types in other external crates, and each has their // own crate numbers. -pub type cnum_map = FnvHashMap; +pub type CrateNumMap = IndexVec; pub enum MetadataBlob { - MetadataVec(Bytes), - MetadataArchive(loader::ArchiveMetadata), + Inflated(Bytes), + Archive(locator::ArchiveMetadata), + Raw(Vec), } -/// Holds information about a codemap::FileMap imported from another crate. -/// See creader::import_codemap() for more information. +/// Holds information about a syntax_pos::FileMap imported from another crate. +/// See `imported_filemaps()` for more information. pub struct ImportedFileMap { /// This FileMap's byte-offset within the codemap of its original crate - pub original_start_pos: codemap::BytePos, + pub original_start_pos: syntax_pos::BytePos, /// The end of this FileMap within the codemap of its original crate - pub original_end_pos: codemap::BytePos, + pub original_end_pos: syntax_pos::BytePos, /// The imported FileMap's representation within the local codemap - pub translated_filemap: Rc + pub translated_filemap: Rc, } -pub struct crate_metadata { - pub name: String, - pub local_path: RefCell>, - pub local_def_path: RefCell, - pub data: MetadataBlob, - pub cnum_map: RefCell, - pub cnum: ast::CrateNum, +pub struct CrateMetadata { + pub name: Symbol, + + /// Information about the extern crate that caused this crate to + /// be loaded. If this is `None`, then the crate was injected + /// (e.g., by the allocator) + pub extern_crate: Cell>, + + pub blob: MetadataBlob, + pub cnum_map: RefCell, + pub cnum: CrateNum, pub codemap_import_info: RefCell>, - pub span: codemap::Span, - pub staged_api: bool, - pub index: index::Index, - pub xref_index: index::DenseIndex, + pub root: schema::CrateRoot, + + /// For each public item in this crate, we encode a key. When the + /// crate is loaded, we read all the keys and put them in this + /// hashmap, which gives the reverse mapping. This allows us to + /// quickly retrace a `DefPath`, which is needed for incremental + /// compilation support. + pub key_map: FxHashMap, + + pub dep_kind: Cell, + pub source: CrateSource, + + pub proc_macros: Option)>>, +} - /// Flag if this crate is required by an rlib version of this crate, or in - /// other words whether it was explicitly linked to. An example of a crate - /// where this is false is when an allocator crate is injected into the - /// dependency list, and therefore isn't actually needed to link an rlib. - pub explicitly_linked: Cell, +pub struct CachedInlinedItem { + /// The NodeId of the RootInlinedParent HIR map entry + pub inlined_root: ast::NodeId, + /// The local NodeId of the inlined entity + pub item_id: ast::NodeId, } pub struct CStore { - metas: RefCell>>, + pub dep_graph: DepGraph, + metas: RefCell>>, /// Map from NodeId's of local extern crate statements to crate numbers - extern_mod_crate_map: RefCell>, - used_crate_sources: RefCell>, - used_libraries: RefCell>, + extern_mod_crate_map: RefCell>, + used_libraries: RefCell>, used_link_args: RefCell>, statically_included_foreign_items: RefCell, - pub intr: Rc, + pub inlined_item_cache: RefCell>>, + pub defid_for_inlined_node: RefCell>, + pub visible_parent_map: RefCell>, } impl CStore { - pub fn new(intr: Rc) -> CStore { + pub fn new(dep_graph: &DepGraph) -> CStore { CStore { - metas: RefCell::new(FnvHashMap()), - extern_mod_crate_map: RefCell::new(FnvHashMap()), - used_crate_sources: RefCell::new(Vec::new()), + dep_graph: dep_graph.clone(), + metas: RefCell::new(FxHashMap()), + extern_mod_crate_map: RefCell::new(FxHashMap()), used_libraries: RefCell::new(Vec::new()), used_link_args: RefCell::new(Vec::new()), - intr: intr, statically_included_foreign_items: RefCell::new(NodeSet()), + visible_parent_map: RefCell::new(FxHashMap()), + inlined_item_cache: RefCell::new(FxHashMap()), + defid_for_inlined_node: RefCell::new(FxHashMap()), } } - pub fn next_crate_num(&self) -> ast::CrateNum { - self.metas.borrow().len() as ast::CrateNum + 1 + pub fn next_crate_num(&self) -> CrateNum { + CrateNum::new(self.metas.borrow().len() + 1) } - pub fn get_crate_data(&self, cnum: ast::CrateNum) -> Rc { + pub fn get_crate_data(&self, cnum: CrateNum) -> Rc { self.metas.borrow().get(&cnum).unwrap().clone() } - pub fn get_crate_hash(&self, cnum: ast::CrateNum) -> Svh { - let cdata = self.get_crate_data(cnum); - decoder::get_crate_hash(cdata.data()) + pub fn get_crate_hash(&self, cnum: CrateNum) -> Svh { + self.get_crate_data(cnum).hash() } - pub fn set_crate_data(&self, cnum: ast::CrateNum, data: Rc) { + pub fn set_crate_data(&self, cnum: CrateNum, data: Rc) { self.metas.borrow_mut().insert(cnum, data); } - pub fn iter_crate_data(&self, mut i: I) where - I: FnMut(ast::CrateNum, &Rc), + pub fn iter_crate_data(&self, mut i: I) + where I: FnMut(CrateNum, &Rc) { for (&k, v) in self.metas.borrow().iter() { i(k, v); } } - /// Like `iter_crate_data`, but passes source paths (if available) as well. - pub fn iter_crate_data_origins(&self, mut i: I) where - I: FnMut(ast::CrateNum, &crate_metadata, Option), - { - for (&k, v) in self.metas.borrow().iter() { - let origin = self.opt_used_crate_source(k); - origin.as_ref().map(|cs| { assert!(k == cs.cnum); }); - i(k, &**v, origin); - } - } - - pub fn add_used_crate_source(&self, src: CrateSource) { - let mut used_crate_sources = self.used_crate_sources.borrow_mut(); - if !used_crate_sources.contains(&src) { - used_crate_sources.push(src); - } - } - - pub fn opt_used_crate_source(&self, cnum: ast::CrateNum) - -> Option { - self.used_crate_sources.borrow_mut() - .iter().find(|source| source.cnum == cnum).cloned() - } - pub fn reset(&self) { self.metas.borrow_mut().clear(); self.extern_mod_crate_map.borrow_mut().clear(); - self.used_crate_sources.borrow_mut().clear(); self.used_libraries.borrow_mut().clear(); self.used_link_args.borrow_mut().clear(); self.statically_included_foreign_items.borrow_mut().clear(); } + pub fn crate_dependencies_in_rpo(&self, krate: CrateNum) -> Vec { + let mut ordering = Vec::new(); + self.push_dependencies_in_postorder(&mut ordering, krate); + ordering.reverse(); + ordering + } + + pub fn push_dependencies_in_postorder(&self, ordering: &mut Vec, krate: CrateNum) { + if ordering.contains(&krate) { + return; + } + + let data = self.get_crate_data(krate); + for &dep in data.cnum_map.borrow().iter() { + if dep != krate { + self.push_dependencies_in_postorder(ordering, dep); + } + } + + ordering.push(krate); + } + // This method is used when generating the command line to pass through to // system linker. The linker expects undefined symbols on the left of the // command line to be defined in libraries on the right, not the other way @@ -173,29 +184,36 @@ impl CStore { // In order to get this left-to-right dependency ordering, we perform a // topological sort of all crates putting the leaves at the right-most // positions. - pub fn do_get_used_crates(&self, prefer: LinkagePreference) - -> Vec<(ast::CrateNum, Option)> { + pub fn do_get_used_crates(&self, + prefer: LinkagePreference) + -> Vec<(CrateNum, LibSource)> { let mut ordering = Vec::new(); - fn visit(cstore: &CStore, cnum: ast::CrateNum, - ordering: &mut Vec) { - if ordering.contains(&cnum) { return } - let meta = cstore.get_crate_data(cnum); - for (_, &dep) in meta.cnum_map.borrow().iter() { - visit(cstore, dep, ordering); - } - ordering.push(cnum); - } for (&num, _) in self.metas.borrow().iter() { - visit(self, num, &mut ordering); + self.push_dependencies_in_postorder(&mut ordering, num); } info!("topological ordering: {:?}", ordering); ordering.reverse(); - let mut libs = self.used_crate_sources.borrow() + let mut libs = self.metas + .borrow() .iter() - .map(|src| (src.cnum, match prefer { - LinkagePreference::RequireDynamic => src.dylib.clone().map(|p| p.0), - LinkagePreference::RequireStatic => src.rlib.clone().map(|p| p.0), - })) + .filter_map(|(&cnum, data)| { + if data.dep_kind.get().macros_only() { return None; } + let path = match prefer { + LinkagePreference::RequireDynamic => data.source.dylib.clone().map(|p| p.0), + LinkagePreference::RequireStatic => data.source.rlib.clone().map(|p| p.0), + }; + let path = match path { + Some(p) => LibSource::Some(p), + None => { + if data.source.rmeta.is_some() { + LibSource::MetadataOnly + } else { + LibSource::None + } + } + }; + Some((cnum, path)) + }) .collect::>(); libs.sort_by(|&(a, _), &(b, _)| { let a = ordering.iter().position(|x| *x == a); @@ -205,14 +223,12 @@ impl CStore { libs } - pub fn add_used_library(&self, lib: String, kind: NativeLibraryKind) { - assert!(!lib.is_empty()); - self.used_libraries.borrow_mut().push((lib, kind)); + pub fn add_used_library(&self, lib: NativeLibrary) { + assert!(!lib.name.as_str().is_empty()); + self.used_libraries.borrow_mut().push(lib); } - pub fn get_used_libraries<'a>(&'a self) - -> &'a RefCell> { + pub fn get_used_libraries(&self) -> &RefCell> { &self.used_libraries } @@ -222,13 +238,11 @@ impl CStore { } } - pub fn get_used_link_args<'a>(&'a self) -> &'a RefCell > { + pub fn get_used_link_args<'a>(&'a self) -> &'a RefCell> { &self.used_link_args } - pub fn add_extern_mod_stmt_cnum(&self, - emod_id: ast::NodeId, - cnum: ast::CrateNum) { + pub fn add_extern_mod_stmt_cnum(&self, emod_id: ast::NodeId, cnum: CrateNum) { self.extern_mod_crate_map.borrow_mut().insert(emod_id, cnum); } @@ -240,104 +254,59 @@ impl CStore { self.statically_included_foreign_items.borrow().contains(&id) } - pub fn do_extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option - { + pub fn do_extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option { self.extern_mod_crate_map.borrow().get(&emod_id).cloned() } } -impl crate_metadata { - pub fn data<'a>(&'a self) -> &'a [u8] { self.data.as_slice() } - pub fn name(&self) -> String { decoder::get_crate_name(self.data()) } - pub fn hash(&self) -> Svh { decoder::get_crate_hash(self.data()) } - pub fn imported_filemaps<'a>(&'a self, codemap: &codemap::CodeMap) - -> Ref<'a, Vec> { - let filemaps = self.codemap_import_info.borrow(); - if filemaps.is_empty() { - drop(filemaps); - let filemaps = creader::import_codemap(codemap, &self.data); - - // This shouldn't borrow twice, but there is no way to downgrade RefMut to Ref. - *self.codemap_import_info.borrow_mut() = filemaps; - self.codemap_import_info.borrow() - } else { - filemaps - } - } - - pub fn with_local_path(&self, f: F) -> T - where F: Fn(&[ast_map::PathElem]) -> T - { - let cpath = self.local_path.borrow(); - if cpath.is_empty() { - let name = ast_map::PathMod(token::intern(&self.name)); - f(&[name]) - } else { - f(cpath.as_slice()) - } +impl CrateMetadata { + pub fn name(&self) -> Symbol { + self.root.name } - - pub fn update_local_path<'a, 'b>(&self, candidate: ast_map::PathElems<'a, 'b>) { - let mut cpath = self.local_path.borrow_mut(); - let cap = cpath.len(); - match cap { - 0 => *cpath = candidate.collect(), - 1 => (), - _ => { - let candidate: SmallVector<_> = candidate.collect(); - if candidate.len() < cap { - *cpath = candidate; - } - }, - } + pub fn hash(&self) -> Svh { + self.root.hash } - - pub fn local_def_path(&self) -> ast_map::DefPath { - let local_def_path = self.local_def_path.borrow(); - if local_def_path.is_empty() { - let name = ast_map::DefPathData::DetachedCrate(token::intern(&self.name)); - vec![ast_map::DisambiguatedDefPathData { data: name, disambiguator: 0 }] - } else { - local_def_path.clone() - } + pub fn disambiguator(&self) -> Symbol { + self.root.disambiguator } - pub fn update_local_def_path(&self, candidate: ast_map::DefPath) { - let mut local_def_path = self.local_def_path.borrow_mut(); - if local_def_path.is_empty() || candidate.len() < local_def_path.len() { - *local_def_path = candidate; - } + pub fn is_staged_api(&self) -> bool { + self.get_item_attrs(CRATE_DEF_INDEX) + .iter() + .any(|attr| attr.name() == "stable" || attr.name() == "unstable") } pub fn is_allocator(&self) -> bool { - let attrs = decoder::get_crate_attributes(self.data()); + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "allocator") } pub fn needs_allocator(&self) -> bool { - let attrs = decoder::get_crate_attributes(self.data()); + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); attr::contains_name(&attrs, "needs_allocator") } -} -impl MetadataBlob { - pub fn as_slice<'a>(&'a self) -> &'a [u8] { - let slice = match *self { - MetadataVec(ref vec) => &vec[..], - MetadataArchive(ref ar) => ar.as_slice(), - }; - if slice.len() < 4 { - &[] // corrupt metadata - } else { - let len = (((slice[0] as u32) << 24) | - ((slice[1] as u32) << 16) | - ((slice[2] as u32) << 8) | - ((slice[3] as u32) << 0)) as usize; - if len + 4 <= slice.len() { - &slice[4.. len + 4] - } else { - &[] // corrupt or old metadata - } - } + pub fn is_panic_runtime(&self) -> bool { + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); + attr::contains_name(&attrs, "panic_runtime") + } + + pub fn needs_panic_runtime(&self) -> bool { + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); + attr::contains_name(&attrs, "needs_panic_runtime") + } + + pub fn is_compiler_builtins(&self) -> bool { + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); + attr::contains_name(&attrs, "compiler_builtins") + } + + pub fn is_no_builtins(&self) -> bool { + let attrs = self.get_item_attrs(CRATE_DEF_INDEX); + attr::contains_name(&attrs, "no_builtins") + } + + pub fn panic_strategy(&self) -> PanicStrategy { + self.root.panic_strategy.clone() } } diff --git a/src/librustc_metadata/cstore_impl.rs b/src/librustc_metadata/cstore_impl.rs new file mode 100644 index 0000000000000..3150f74e61e7c --- /dev/null +++ b/src/librustc_metadata/cstore_impl.rs @@ -0,0 +1,623 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use cstore; +use encoder; +use locator; +use schema; + +use rustc::middle::cstore::{InlinedItem, CrateStore, CrateSource, LibSource, DepKind, ExternCrate}; +use rustc::middle::cstore::{NativeLibrary, LinkMeta, LinkagePreference, LoadedMacro}; +use rustc::hir::def::{self, Def}; +use rustc::middle::lang_items; +use rustc::session::Session; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX}; + +use rustc::dep_graph::DepNode; +use rustc::hir::map as hir_map; +use rustc::hir::map::DefKey; +use rustc::mir::Mir; +use rustc::util::nodemap::{NodeSet, DefIdMap}; +use rustc_back::PanicStrategy; + +use syntax::ast; +use syntax::attr; +use syntax::parse::new_parser_from_source_str; +use syntax::symbol::Symbol; +use syntax_pos::{mk_sp, Span}; +use rustc::hir::svh::Svh; +use rustc_back::target::Target; +use rustc::hir; + +impl<'tcx> CrateStore<'tcx> for cstore::CStore { + fn describe_def(&self, def: DefId) -> Option { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_def(def.index) + } + + fn def_span(&self, sess: &Session, def: DefId) -> Span { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_span(def.index, sess) + } + + fn stability(&self, def: DefId) -> Option { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_stability(def.index) + } + + fn deprecation(&self, def: DefId) -> Option { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_deprecation(def.index) + } + + fn visibility(&self, def: DefId) -> ty::Visibility { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_visibility(def.index) + } + + fn closure_kind(&self, def_id: DefId) -> ty::ClosureKind + { + assert!(!def_id.is_local()); + self.dep_graph.read(DepNode::MetaData(def_id)); + self.get_crate_data(def_id.krate).closure_kind(def_id.index) + } + + fn closure_ty<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> ty::ClosureTy<'tcx> { + assert!(!def_id.is_local()); + self.dep_graph.read(DepNode::MetaData(def_id)); + self.get_crate_data(def_id.krate).closure_ty(def_id.index, tcx) + } + + fn item_variances(&self, def: DefId) -> Vec { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_item_variances(def.index) + } + + fn item_type<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> Ty<'tcx> + { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_type(def.index, tcx) + } + + fn item_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> ty::GenericPredicates<'tcx> + { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_predicates(def.index, tcx) + } + + fn item_super_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> ty::GenericPredicates<'tcx> + { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_super_predicates(def.index, tcx) + } + + fn item_generics<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> ty::Generics<'tcx> + { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_generics(def.index, tcx) + } + + fn item_attrs(&self, def_id: DefId) -> Vec + { + self.dep_graph.read(DepNode::MetaData(def_id)); + self.get_crate_data(def_id.krate).get_item_attrs(def_id.index) + } + + fn trait_def<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> ty::TraitDef + { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_trait_def(def.index, tcx) + } + + fn adt_def<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> &'tcx ty::AdtDef + { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_adt_def(def.index, tcx) + } + + fn fn_arg_names(&self, did: DefId) -> Vec + { + self.dep_graph.read(DepNode::MetaData(did)); + self.get_crate_data(did.krate).get_fn_arg_names(did.index) + } + + fn inherent_implementations_for_type(&self, def_id: DefId) -> Vec + { + self.dep_graph.read(DepNode::MetaData(def_id)); + self.get_crate_data(def_id.krate).get_inherent_implementations_for_type(def_id.index) + } + + fn implementations_of_trait(&self, filter: Option) -> Vec + { + if let Some(def_id) = filter { + self.dep_graph.read(DepNode::MetaData(def_id)); + } + let mut result = vec![]; + self.iter_crate_data(|_, cdata| { + cdata.get_implementations_for_trait(filter, &mut result) + }); + result + } + + fn associated_item_def_ids(&self, def_id: DefId) -> Vec { + self.dep_graph.read(DepNode::MetaData(def_id)); + let mut result = vec![]; + self.get_crate_data(def_id.krate) + .each_child_of_item(def_id.index, |child| result.push(child.def.def_id())); + result + } + + fn impl_polarity(&self, def: DefId) -> hir::ImplPolarity + { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_impl_polarity(def.index) + } + + fn impl_trait_ref<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> Option> + { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_impl_trait(def.index, tcx) + } + + fn custom_coerce_unsized_kind(&self, def: DefId) + -> Option + { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_custom_coerce_unsized_kind(def.index) + } + + fn impl_parent(&self, impl_def: DefId) -> Option { + self.dep_graph.read(DepNode::MetaData(impl_def)); + self.get_crate_data(impl_def.krate).get_parent_impl(impl_def.index) + } + + fn trait_of_item(&self, def_id: DefId) -> Option { + self.dep_graph.read(DepNode::MetaData(def_id)); + self.get_crate_data(def_id.krate).get_trait_of_item(def_id.index) + } + + fn associated_item<'a>(&self, _tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) + -> Option + { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_associated_item(def.index) + } + + fn is_const_fn(&self, did: DefId) -> bool + { + self.dep_graph.read(DepNode::MetaData(did)); + self.get_crate_data(did.krate).is_const_fn(did.index) + } + + fn is_defaulted_trait(&self, trait_def_id: DefId) -> bool + { + self.dep_graph.read(DepNode::MetaData(trait_def_id)); + self.get_crate_data(trait_def_id.krate).is_defaulted_trait(trait_def_id.index) + } + + fn is_default_impl(&self, impl_did: DefId) -> bool { + self.dep_graph.read(DepNode::MetaData(impl_did)); + self.get_crate_data(impl_did.krate).is_default_impl(impl_did.index) + } + + fn is_foreign_item(&self, did: DefId) -> bool { + self.get_crate_data(did.krate).is_foreign_item(did.index) + } + + fn is_statically_included_foreign_item(&self, id: ast::NodeId) -> bool + { + self.do_is_statically_included_foreign_item(id) + } + + fn dylib_dependency_formats(&self, cnum: CrateNum) + -> Vec<(CrateNum, LinkagePreference)> + { + self.get_crate_data(cnum).get_dylib_dependency_formats() + } + + fn dep_kind(&self, cnum: CrateNum) -> DepKind + { + self.get_crate_data(cnum).dep_kind.get() + } + + fn export_macros(&self, cnum: CrateNum) { + if self.get_crate_data(cnum).dep_kind.get() == DepKind::UnexportedMacrosOnly { + self.get_crate_data(cnum).dep_kind.set(DepKind::MacrosOnly) + } + } + + fn lang_items(&self, cnum: CrateNum) -> Vec<(DefIndex, usize)> + { + self.get_crate_data(cnum).get_lang_items() + } + + fn missing_lang_items(&self, cnum: CrateNum) + -> Vec + { + self.get_crate_data(cnum).get_missing_lang_items() + } + + fn is_staged_api(&self, cnum: CrateNum) -> bool + { + self.get_crate_data(cnum).is_staged_api() + } + + fn is_allocator(&self, cnum: CrateNum) -> bool + { + self.get_crate_data(cnum).is_allocator() + } + + fn is_panic_runtime(&self, cnum: CrateNum) -> bool + { + self.get_crate_data(cnum).is_panic_runtime() + } + + fn is_compiler_builtins(&self, cnum: CrateNum) -> bool { + self.get_crate_data(cnum).is_compiler_builtins() + } + + fn panic_strategy(&self, cnum: CrateNum) -> PanicStrategy { + self.get_crate_data(cnum).panic_strategy() + } + + fn crate_name(&self, cnum: CrateNum) -> Symbol + { + self.get_crate_data(cnum).name + } + + fn original_crate_name(&self, cnum: CrateNum) -> Symbol + { + self.get_crate_data(cnum).name() + } + + fn extern_crate(&self, cnum: CrateNum) -> Option + { + self.get_crate_data(cnum).extern_crate.get() + } + + fn crate_hash(&self, cnum: CrateNum) -> Svh + { + self.get_crate_hash(cnum) + } + + fn crate_disambiguator(&self, cnum: CrateNum) -> Symbol + { + self.get_crate_data(cnum).disambiguator() + } + + fn plugin_registrar_fn(&self, cnum: CrateNum) -> Option + { + self.get_crate_data(cnum).root.plugin_registrar_fn.map(|index| DefId { + krate: cnum, + index: index + }) + } + + fn native_libraries(&self, cnum: CrateNum) -> Vec + { + self.get_crate_data(cnum).get_native_libraries() + } + + fn reachable_ids(&self, cnum: CrateNum) -> Vec + { + self.get_crate_data(cnum).get_reachable_ids() + } + + fn is_no_builtins(&self, cnum: CrateNum) -> bool { + self.get_crate_data(cnum).is_no_builtins() + } + + fn def_index_for_def_key(&self, + cnum: CrateNum, + def: DefKey) + -> Option { + let cdata = self.get_crate_data(cnum); + cdata.key_map.get(&def).cloned() + } + + /// Returns the `DefKey` for a given `DefId`. This indicates the + /// parent `DefId` as well as some idea of what kind of data the + /// `DefId` refers to. + fn def_key(&self, def: DefId) -> hir_map::DefKey { + // Note: loading the def-key (or def-path) for a def-id is not + // a *read* of its metadata. This is because the def-id is + // really just an interned shorthand for a def-path, which is the + // canonical name for an item. + // + // self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).def_key(def.index) + } + + fn relative_def_path(&self, def: DefId) -> Option { + // See `Note` above in `def_key()` for why this read is + // commented out: + // + // self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).def_path(def.index) + } + + fn struct_field_names(&self, def: DefId) -> Vec + { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).get_struct_field_names(def.index) + } + + fn item_children(&self, def_id: DefId) -> Vec + { + self.dep_graph.read(DepNode::MetaData(def_id)); + let mut result = vec![]; + self.get_crate_data(def_id.krate) + .each_child_of_item(def_id.index, |child| result.push(child)); + result + } + + fn load_macro(&self, id: DefId, sess: &Session) -> LoadedMacro { + let data = self.get_crate_data(id.krate); + if let Some(ref proc_macros) = data.proc_macros { + return LoadedMacro::ProcMacro(proc_macros[id.index.as_usize() - 1].1.clone()); + } + + let (name, def) = data.get_macro(id.index); + let source_name = format!("<{} macros>", name); + + // NB: Don't use parse_tts_from_source_str because it parses with quote_depth > 0. + let mut parser = new_parser_from_source_str(&sess.parse_sess, source_name, def.body); + + let lo = parser.span.lo; + let body = match parser.parse_all_token_trees() { + Ok(body) => body, + Err(mut err) => { + err.emit(); + sess.abort_if_errors(); + unreachable!(); + } + }; + let local_span = mk_sp(lo, parser.prev_span.hi); + + // Mark the attrs as used + let attrs = data.get_item_attrs(id.index); + for attr in &attrs { + attr::mark_used(attr); + } + + let name = data.def_key(id.index).disambiguated_data.data + .get_opt_name().expect("no name in load_macro"); + sess.imported_macro_spans.borrow_mut() + .insert(local_span, (name.to_string(), data.get_span(id.index, sess))); + + LoadedMacro::MacroRules(ast::MacroDef { + ident: ast::Ident::with_empty_ctxt(name), + id: ast::DUMMY_NODE_ID, + span: local_span, + imported_from: None, // FIXME + allow_internal_unstable: attr::contains_name(&attrs, "allow_internal_unstable"), + attrs: attrs, + body: body, + }) + } + + fn maybe_get_item_ast<'a>(&'tcx self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId) + -> Option<(&'tcx InlinedItem, ast::NodeId)> + { + self.dep_graph.read(DepNode::MetaData(def_id)); + + match self.inlined_item_cache.borrow().get(&def_id) { + Some(&None) => { + return None; // Not inlinable + } + Some(&Some(ref cached_inlined_item)) => { + // Already inline + debug!("maybe_get_item_ast({}): already inline as node id {}", + tcx.item_path_str(def_id), cached_inlined_item.item_id); + return Some((tcx.map.expect_inlined_item(cached_inlined_item.inlined_root), + cached_inlined_item.item_id)); + } + None => { + // Not seen yet + } + } + + debug!("maybe_get_item_ast({}): inlining item", tcx.item_path_str(def_id)); + + let inlined = self.get_crate_data(def_id.krate).maybe_get_item_ast(tcx, def_id.index); + + let cache_inlined_item = |original_def_id, inlined_item_id, inlined_root_node_id| { + let cache_entry = cstore::CachedInlinedItem { + inlined_root: inlined_root_node_id, + item_id: inlined_item_id, + }; + self.inlined_item_cache + .borrow_mut() + .insert(original_def_id, Some(cache_entry)); + self.defid_for_inlined_node + .borrow_mut() + .insert(inlined_item_id, original_def_id); + }; + + let find_inlined_item_root = |inlined_item_id| { + let mut node = inlined_item_id; + + // If we can't find the inline root after a thousand hops, we can + // be pretty sure there's something wrong with the HIR map. + for _ in 0 .. 1000 { + let parent_node = tcx.map.get_parent_node(node); + if parent_node == node { + return node; + } + node = parent_node; + } + bug!("cycle in HIR map parent chain") + }; + + match inlined { + None => { + self.inlined_item_cache + .borrow_mut() + .insert(def_id, None); + } + Some(&InlinedItem { ref body, .. }) => { + let inlined_root_node_id = find_inlined_item_root(body.id); + cache_inlined_item(def_id, inlined_root_node_id, inlined_root_node_id); + } + } + + // We can be sure to hit the cache now + return self.maybe_get_item_ast(tcx, def_id); + } + + fn local_node_for_inlined_defid(&'tcx self, def_id: DefId) -> Option { + assert!(!def_id.is_local()); + match self.inlined_item_cache.borrow().get(&def_id) { + Some(&Some(ref cached_inlined_item)) => { + Some(cached_inlined_item.item_id) + } + Some(&None) => { + None + } + _ => { + bug!("Trying to lookup inlined NodeId for unexpected item"); + } + } + } + + fn defid_for_inlined_node(&'tcx self, node_id: ast::NodeId) -> Option { + self.defid_for_inlined_node.borrow().get(&node_id).map(|x| *x) + } + + fn get_item_mir<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId) -> Mir<'tcx> { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).maybe_get_item_mir(tcx, def.index).unwrap_or_else(|| { + bug!("get_item_mir: missing MIR for {}", tcx.item_path_str(def)) + }) + } + + fn is_item_mir_available(&self, def: DefId) -> bool { + self.dep_graph.read(DepNode::MetaData(def)); + self.get_crate_data(def.krate).is_item_mir_available(def.index) + } + + fn crates(&self) -> Vec + { + let mut result = vec![]; + self.iter_crate_data(|cnum, _| result.push(cnum)); + result + } + + fn used_libraries(&self) -> Vec + { + self.get_used_libraries().borrow().clone() + } + + fn used_link_args(&self) -> Vec + { + self.get_used_link_args().borrow().clone() + } + + fn metadata_filename(&self) -> &str + { + locator::METADATA_FILENAME + } + + fn metadata_section_name(&self, target: &Target) -> &str + { + locator::meta_section_name(target) + } + + fn used_crates(&self, prefer: LinkagePreference) -> Vec<(CrateNum, LibSource)> + { + self.do_get_used_crates(prefer) + } + + fn used_crate_source(&self, cnum: CrateNum) -> CrateSource + { + self.get_crate_data(cnum).source.clone() + } + + fn extern_mod_stmt_cnum(&self, emod_id: ast::NodeId) -> Option + { + self.do_extern_mod_stmt_cnum(emod_id) + } + + fn encode_metadata<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + reexports: &def::ExportMap, + link_meta: &LinkMeta, + reachable: &NodeSet) -> Vec + { + encoder::encode_metadata(tcx, self, reexports, link_meta, reachable) + } + + fn metadata_encoding_version(&self) -> &[u8] + { + schema::METADATA_HEADER + } + + /// Returns a map from a sufficiently visible external item (i.e. an external item that is + /// visible from at least one local module) to a sufficiently visible parent (considering + /// modules that re-export the external item to be parents). + fn visible_parent_map<'a>(&'a self) -> ::std::cell::RefMut<'a, DefIdMap> { + let mut visible_parent_map = self.visible_parent_map.borrow_mut(); + if !visible_parent_map.is_empty() { return visible_parent_map; } + + use std::collections::vec_deque::VecDeque; + use std::collections::hash_map::Entry; + for cnum in (1 .. self.next_crate_num().as_usize()).map(CrateNum::new) { + let cdata = self.get_crate_data(cnum); + + match cdata.extern_crate.get() { + // Ignore crates without a corresponding local `extern crate` item. + Some(extern_crate) if !extern_crate.direct => continue, + _ => {}, + } + + let mut bfs_queue = &mut VecDeque::new(); + let mut add_child = |bfs_queue: &mut VecDeque<_>, child: def::Export, parent: DefId| { + let child = child.def.def_id(); + + if self.visibility(child) != ty::Visibility::Public { + return; + } + + match visible_parent_map.entry(child) { + Entry::Occupied(mut entry) => { + // If `child` is defined in crate `cnum`, ensure + // that it is mapped to a parent in `cnum`. + if child.krate == cnum && entry.get().krate != cnum { + entry.insert(parent); + } + } + Entry::Vacant(entry) => { + entry.insert(parent); + bfs_queue.push_back(child); + } + } + }; + + bfs_queue.push_back(DefId { + krate: cnum, + index: CRATE_DEF_INDEX + }); + while let Some(def) = bfs_queue.pop_front() { + for child in self.item_children(def) { + add_child(bfs_queue, child, def); + } + } + } + + visible_parent_map + } +} diff --git a/src/librustc_metadata/decoder.rs b/src/librustc_metadata/decoder.rs index def5897e92d9d..fe536b69c61d5 100644 --- a/src/librustc_metadata/decoder.rs +++ b/src/librustc_metadata/decoder.rs @@ -10,1708 +10,1262 @@ // Decoding metadata from a single crate's metadata -#![allow(non_camel_case_types)] - -use self::Family::*; - -use cstore::{self, crate_metadata}; -use common::*; -use encoder::def_to_u64; -use index; -use tls_context; -use tydecode::TyDecoder; - -use rustc::back::svh::Svh; -use rustc::front::map as hir_map; -use rustc::util::nodemap::FnvHashMap; -use rustc_front::hir; - -use middle::cstore::{LOCAL_CRATE, FoundAst, InlinedItem, LinkagePreference}; -use middle::cstore::{DefLike, DlDef, DlField, DlImpl, tls}; -use middle::def; -use middle::def_id::{DefId, DefIndex}; -use middle::lang_items; -use middle::subst; -use middle::ty::{ImplContainer, TraitContainer}; -use middle::ty::{self, Ty, TypeFoldable}; - -use rustc::mir; -use rustc::mir::visit::MutVisitor; - -use std::cell::Cell; -use std::io::prelude::*; +use astencode::decode_inlined_item; +use cstore::{self, CrateMetadata, MetadataBlob, NativeLibrary}; +use index::Index; +use schema::*; + +use rustc::hir::map as hir_map; +use rustc::hir::map::{DefKey, DefPathData}; +use rustc::util::nodemap::FxHashMap; +use rustc::hir; +use rustc::hir::intravisit::IdRange; + +use rustc::middle::cstore::{InlinedItem, LinkagePreference}; +use rustc::hir::def::{self, Def, CtorKind}; +use rustc::hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX, LOCAL_CRATE}; +use rustc::middle::lang_items; +use rustc::session::Session; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::subst::Substs; + +use rustc_const_math::ConstInt; + +use rustc::mir::Mir; + +use std::borrow::Cow; +use std::cell::Ref; use std::io; -use std::rc::Rc; +use std::mem; use std::str; +use std::u32; -use rbml::reader; -use rbml; -use serialize::Decodable; +use rustc_serialize::{Decodable, Decoder, SpecializedDecoder, opaque}; use syntax::attr; -use syntax::parse::token::{IdentInterner, special_idents}; -use syntax::parse::token; -use syntax::ast; -use syntax::abi; -use syntax::codemap::{self, Span}; -use syntax::print::pprust; -use syntax::ptr::P; - - -pub type Cmd<'a> = &'a crate_metadata; - -impl crate_metadata { - fn get_item(&self, item_id: DefIndex) -> Option { - self.index.lookup_item(self.data(), item_id).map(|pos| { - reader::doc_at(self.data(), pos as usize).unwrap().doc - }) +use syntax::ast::{self, NodeId}; +use syntax::codemap; +use syntax_pos::{self, Span, BytePos, Pos}; + +pub struct DecodeContext<'a, 'tcx: 'a> { + opaque: opaque::Decoder<'a>, + cdata: Option<&'a CrateMetadata>, + sess: Option<&'a Session>, + tcx: Option>, + from_id_range: IdRange, + to_id_range: IdRange, + + // Cache the last used filemap for translating spans as an optimization. + last_filemap_index: usize, + + lazy_state: LazyState, +} + +/// Abstract over the various ways one can create metadata decoders. +pub trait Metadata<'a, 'tcx>: Copy { + fn raw_bytes(self) -> &'a [u8]; + fn cdata(self) -> Option<&'a CrateMetadata> { None } + fn sess(self) -> Option<&'a Session> { None } + fn tcx(self) -> Option> { None } + + fn decoder(self, pos: usize) -> DecodeContext<'a, 'tcx> { + let id_range = IdRange { + min: NodeId::from_u32(u32::MIN), + max: NodeId::from_u32(u32::MAX), + }; + let tcx = self.tcx(); + DecodeContext { + opaque: opaque::Decoder::new(self.raw_bytes(), pos), + cdata: self.cdata(), + sess: self.sess().or(tcx.map(|tcx| tcx.sess)), + tcx: tcx, + from_id_range: id_range, + to_id_range: id_range, + last_filemap_index: 0, + lazy_state: LazyState::NoNode, + } } +} - fn lookup_item(&self, item_id: DefIndex) -> rbml::Doc { - match self.get_item(item_id) { - None => panic!("lookup_item: id not found: {:?}", item_id), - Some(d) => d +impl<'a, 'tcx> Metadata<'a, 'tcx> for &'a MetadataBlob { + fn raw_bytes(self) -> &'a [u8] { + match *self { + MetadataBlob::Inflated(ref vec) => vec, + MetadataBlob::Archive(ref ar) => ar.as_slice(), + MetadataBlob::Raw(ref vec) => vec, } } } -pub fn load_index(data: &[u8]) -> index::Index { - let index = reader::get_doc(rbml::Doc::new(data), tag_index); - index::Index::from_rbml(index) +impl<'a, 'tcx> Metadata<'a, 'tcx> for &'a CrateMetadata { + fn raw_bytes(self) -> &'a [u8] { + self.blob.raw_bytes() + } + fn cdata(self) -> Option<&'a CrateMetadata> { + Some(self) + } } -pub fn crate_rustc_version(data: &[u8]) -> Option { - let doc = rbml::Doc::new(data); - reader::maybe_get_doc(doc, tag_rustc_version).map(|s| s.as_str()) +impl<'a, 'tcx> Metadata<'a, 'tcx> for (&'a CrateMetadata, &'a Session) { + fn raw_bytes(self) -> &'a [u8] { + self.0.raw_bytes() + } + fn cdata(self) -> Option<&'a CrateMetadata> { + Some(self.0) + } + fn sess(self) -> Option<&'a Session> { + Some(&self.1) + } } -pub fn load_xrefs(data: &[u8]) -> index::DenseIndex { - let index = reader::get_doc(rbml::Doc::new(data), tag_xref_index); - index::DenseIndex::from_buf(index.data, index.start, index.end) +impl<'a, 'tcx> Metadata<'a, 'tcx> for (&'a CrateMetadata, TyCtxt<'a, 'tcx, 'tcx>) { + fn raw_bytes(self) -> &'a [u8] { + self.0.raw_bytes() + } + fn cdata(self) -> Option<&'a CrateMetadata> { + Some(self.0) + } + fn tcx(self) -> Option> { + Some(self.1) + } } -#[derive(Debug, PartialEq)] -enum Family { - ImmStatic, // c - MutStatic, // b - Fn, // f - CtorFn, // o - StaticMethod, // F - Method, // h - Type, // y - Mod, // m - ForeignMod, // n - Enum, // t - StructVariant, // V - TupleVariant, // v - UnitVariant, // w - Impl, // i - DefaultImpl, // d - Trait, // I - Struct, // S - TupleStruct, // s - UnitStruct, // u - PublicField, // g - InheritedField, // N - Constant, // C -} +// HACK(eddyb) Only used by astencode to customize the from/to IdRange's. +impl<'a, 'tcx> Metadata<'a, 'tcx> for (&'a CrateMetadata, TyCtxt<'a, 'tcx, 'tcx>, [IdRange; 2]) { + fn raw_bytes(self) -> &'a [u8] { + self.0.raw_bytes() + } + fn cdata(self) -> Option<&'a CrateMetadata> { + Some(self.0) + } + fn tcx(self) -> Option> { + Some(self.1) + } -fn item_family(item: rbml::Doc) -> Family { - let fam = reader::get_doc(item, tag_items_data_item_family); - match reader::doc_as_u8(fam) as char { - 'C' => Constant, - 'c' => ImmStatic, - 'b' => MutStatic, - 'f' => Fn, - 'o' => CtorFn, - 'F' => StaticMethod, - 'h' => Method, - 'y' => Type, - 'm' => Mod, - 'n' => ForeignMod, - 't' => Enum, - 'V' => StructVariant, - 'v' => TupleVariant, - 'w' => UnitVariant, - 'i' => Impl, - 'd' => DefaultImpl, - 'I' => Trait, - 'S' => Struct, - 's' => TupleStruct, - 'u' => UnitStruct, - 'g' => PublicField, - 'N' => InheritedField, - c => panic!("unexpected family char: {}", c) + fn decoder(self, pos: usize) -> DecodeContext<'a, 'tcx> { + let mut dcx = (self.0, self.1).decoder(pos); + dcx.from_id_range = self.2[0]; + dcx.to_id_range = self.2[1]; + dcx } } -fn item_visibility(item: rbml::Doc) -> hir::Visibility { - match reader::maybe_get_doc(item, tag_items_data_item_visibility) { - None => hir::Public, - Some(visibility_doc) => { - match reader::doc_as_u8(visibility_doc) as char { - 'y' => hir::Public, - 'i' => hir::Inherited, - _ => panic!("unknown visibility character") - } - } +impl<'a, 'tcx: 'a, T: Decodable> Lazy { + pub fn decode>(self, meta: M) -> T { + let mut dcx = meta.decoder(self.position); + dcx.lazy_state = LazyState::NodeStart(self.position); + T::decode(&mut dcx).unwrap() } } -fn fn_constness(item: rbml::Doc) -> hir::Constness { - match reader::maybe_get_doc(item, tag_items_data_item_constness) { - None => hir::Constness::NotConst, - Some(constness_doc) => { - match reader::doc_as_u8(constness_doc) as char { - 'c' => hir::Constness::Const, - 'n' => hir::Constness::NotConst, - _ => panic!("unknown constness character") - } - } +impl<'a, 'tcx: 'a, T: Decodable> LazySeq { + pub fn decode>(self, meta: M) -> impl Iterator + 'a { + let mut dcx = meta.decoder(self.position); + dcx.lazy_state = LazyState::NodeStart(self.position); + (0..self.len).map(move |_| T::decode(&mut dcx).unwrap()) } } -fn item_sort(item: rbml::Doc) -> Option { - reader::tagged_docs(item, tag_item_trait_item_sort).nth(0).map(|doc| { - doc.as_str_slice().as_bytes()[0] as char - }) -} +impl<'a, 'tcx> DecodeContext<'a, 'tcx> { + pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { + self.tcx.expect("missing TyCtxt in DecodeContext") + } -fn item_symbol(item: rbml::Doc) -> String { - reader::get_doc(item, tag_items_data_item_symbol).as_str().to_string() -} + pub fn cdata(&self) -> &'a CrateMetadata { + self.cdata.expect("missing CrateMetadata in DecodeContext") + } -fn translated_def_id(cdata: Cmd, d: rbml::Doc) -> DefId { - let id = reader::doc_as_u64(d); - let index = DefIndex::new((id & 0xFFFF_FFFF) as usize); - let def_id = DefId { krate: (id >> 32) as u32, index: index }; - translate_def_id(cdata, def_id) -} + fn with_position R, R>(&mut self, pos: usize, f: F) -> R { + let new_opaque = opaque::Decoder::new(self.opaque.data, pos); + let old_opaque = mem::replace(&mut self.opaque, new_opaque); + let old_state = mem::replace(&mut self.lazy_state, LazyState::NoNode); + let r = f(self); + self.opaque = old_opaque; + self.lazy_state = old_state; + r + } -fn item_parent_item(cdata: Cmd, d: rbml::Doc) -> Option { - reader::tagged_docs(d, tag_items_data_parent_item).nth(0).map(|did| { - translated_def_id(cdata, did) - }) + fn read_lazy_distance(&mut self, min_size: usize) -> Result::Error> { + let distance = self.read_usize()?; + let position = match self.lazy_state { + LazyState::NoNode => bug!("read_lazy_distance: outside of a metadata node"), + LazyState::NodeStart(start) => { + assert!(distance + min_size <= start); + start - distance - min_size + } + LazyState::Previous(last_min_end) => last_min_end + distance, + }; + self.lazy_state = LazyState::Previous(position + min_size); + Ok(position) + } } -fn item_require_parent_item(cdata: Cmd, d: rbml::Doc) -> DefId { - translated_def_id(cdata, reader::get_doc(d, tag_items_data_parent_item)) +macro_rules! decoder_methods { + ($($name:ident -> $ty:ty;)*) => { + $(fn $name(&mut self) -> Result<$ty, Self::Error> { + self.opaque.$name() + })* + } } -fn item_def_id(d: rbml::Doc, cdata: Cmd) -> DefId { - translated_def_id(cdata, reader::get_doc(d, tag_def_id)) -} +impl<'doc, 'tcx> Decoder for DecodeContext<'doc, 'tcx> { + type Error = as Decoder>::Error; -fn reexports<'a>(d: rbml::Doc<'a>) -> reader::TaggedDocsIterator<'a> { - reader::tagged_docs(d, tag_items_data_item_reexport) -} + decoder_methods! { + read_nil -> (); -fn variant_disr_val(d: rbml::Doc) -> Option { - reader::maybe_get_doc(d, tag_disr_val).and_then(|val_doc| { - reader::with_doc_data(val_doc, |data| { - str::from_utf8(data).ok().and_then(|s| s.parse().ok()) - }) - }) -} + read_u64 -> u64; + read_u32 -> u32; + read_u16 -> u16; + read_u8 -> u8; + read_usize -> usize; -fn doc_type<'tcx>(doc: rbml::Doc, tcx: &ty::ctxt<'tcx>, cdata: Cmd) -> Ty<'tcx> { - let tp = reader::get_doc(doc, tag_items_data_item_type); - TyDecoder::with_doc(tcx, cdata.cnum, tp, - &mut |did| translate_def_id(cdata, did)) - .parse_ty() -} + read_i64 -> i64; + read_i32 -> i32; + read_i16 -> i16; + read_i8 -> i8; + read_isize -> isize; -fn maybe_doc_type<'tcx>(doc: rbml::Doc, tcx: &ty::ctxt<'tcx>, cdata: Cmd) -> Option> { - reader::maybe_get_doc(doc, tag_items_data_item_type).map(|tp| { - TyDecoder::with_doc(tcx, cdata.cnum, tp, - &mut |did| translate_def_id(cdata, did)) - .parse_ty() - }) -} + read_bool -> bool; + read_f64 -> f64; + read_f32 -> f32; + read_char -> char; + read_str -> Cow; + } -pub fn item_type<'tcx>(_item_id: DefId, item: rbml::Doc, - tcx: &ty::ctxt<'tcx>, cdata: Cmd) -> Ty<'tcx> { - doc_type(item, tcx, cdata) + fn error(&mut self, err: &str) -> Self::Error { + self.opaque.error(err) + } } -fn doc_trait_ref<'tcx>(doc: rbml::Doc, tcx: &ty::ctxt<'tcx>, cdata: Cmd) - -> ty::TraitRef<'tcx> { - TyDecoder::with_doc(tcx, cdata.cnum, doc, - &mut |did| translate_def_id(cdata, did)) - .parse_trait_ref() +impl<'a, 'tcx, T> SpecializedDecoder> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result, Self::Error> { + Ok(Lazy::with_position(self.read_lazy_distance(Lazy::::min_size())?)) + } } -fn item_trait_ref<'tcx>(doc: rbml::Doc, tcx: &ty::ctxt<'tcx>, cdata: Cmd) - -> ty::TraitRef<'tcx> { - let tp = reader::get_doc(doc, tag_item_trait_ref); - doc_trait_ref(tp, tcx, cdata) +impl<'a, 'tcx, T> SpecializedDecoder> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result, Self::Error> { + let len = self.read_usize()?; + let position = if len == 0 { + 0 + } else { + self.read_lazy_distance(LazySeq::::min_size(len))? + }; + Ok(LazySeq::with_position_and_length(position, len)) + } } -fn item_path(item_doc: rbml::Doc) -> Vec { - let path_doc = reader::get_doc(item_doc, tag_path); - reader::docs(path_doc).filter_map(|(tag, elt_doc)| { - if tag == tag_path_elem_mod { - let s = elt_doc.as_str_slice(); - Some(hir_map::PathMod(token::intern(s))) - } else if tag == tag_path_elem_name { - let s = elt_doc.as_str_slice(); - Some(hir_map::PathName(token::intern(s))) - } else { - // ignore tag_path_len element - None +impl<'a, 'tcx> SpecializedDecoder for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result { + let id = u32::decode(self)?; + + // from_id_range should be non-empty + assert!(!self.from_id_range.empty()); + // Make sure that translating the NodeId will actually yield a + // meaningful result + if !self.from_id_range.contains(NodeId::from_u32(id)) { + bug!("NodeId::decode: {} out of DecodeContext range ({:?} -> {:?})", + id, + self.from_id_range, + self.to_id_range); } - }).collect() -} -fn item_name(intr: &IdentInterner, item: rbml::Doc) -> ast::Name { - let name = reader::get_doc(item, tag_paths_data_name); - let string = name.as_str_slice(); - match intr.find(string) { - None => token::intern(string), - Some(val) => val, + // Use wrapping arithmetic because otherwise it introduces control flow. + // Maybe we should just have the control flow? -- aatch + Ok(NodeId::from_u32(id.wrapping_sub(self.from_id_range.min.as_u32()) + .wrapping_add(self.to_id_range.min.as_u32()))) } } -fn item_to_def_like(cdata: Cmd, item: rbml::Doc, did: DefId) -> DefLike { - let fam = item_family(item); - match fam { - Constant => { - // Check whether we have an associated const item. - match item_sort(item) { - Some('C') | Some('c') => { - DlDef(def::DefAssociatedConst(did)) - } - _ => { - // Regular const item. - DlDef(def::DefConst(did)) - } - } - } - ImmStatic => DlDef(def::DefStatic(did, false)), - MutStatic => DlDef(def::DefStatic(did, true)), - Struct | TupleStruct | UnitStruct => DlDef(def::DefStruct(did)), - Fn => DlDef(def::DefFn(did, false)), - CtorFn => DlDef(def::DefFn(did, true)), - Method | StaticMethod => { - DlDef(def::DefMethod(did)) - } - Type => { - if item_sort(item) == Some('t') { - let trait_did = item_require_parent_item(cdata, item); - DlDef(def::DefAssociatedTy(trait_did, did)) - } else { - DlDef(def::DefTy(did, false)) - } - } - Mod => DlDef(def::DefMod(did)), - ForeignMod => DlDef(def::DefForeignMod(did)), - StructVariant => { - let enum_did = item_require_parent_item(cdata, item); - DlDef(def::DefVariant(enum_did, did, true)) - } - TupleVariant | UnitVariant => { - let enum_did = item_require_parent_item(cdata, item); - DlDef(def::DefVariant(enum_did, did, false)) +impl<'a, 'tcx> SpecializedDecoder for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result { + let cnum = CrateNum::from_u32(u32::decode(self)?); + if cnum == LOCAL_CRATE { + Ok(self.cdata().cnum) + } else { + Ok(self.cdata().cnum_map.borrow()[cnum]) } - Trait => DlDef(def::DefTrait(did)), - Enum => DlDef(def::DefTy(did, true)), - Impl | DefaultImpl => DlImpl(did), - PublicField | InheritedField => DlField, } } -fn parse_unsafety(item_doc: rbml::Doc) -> hir::Unsafety { - let unsafety_doc = reader::get_doc(item_doc, tag_unsafety); - if reader::doc_as_u8(unsafety_doc) != 0 { - hir::Unsafety::Unsafe - } else { - hir::Unsafety::Normal - } -} +impl<'a, 'tcx> SpecializedDecoder for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result { + let lo = BytePos::decode(self)?; + let hi = BytePos::decode(self)?; -fn parse_paren_sugar(item_doc: rbml::Doc) -> bool { - let paren_sugar_doc = reader::get_doc(item_doc, tag_paren_sugar); - reader::doc_as_u8(paren_sugar_doc) != 0 -} + let sess = if let Some(sess) = self.sess { + sess + } else { + return Ok(syntax_pos::mk_sp(lo, hi)); + }; -fn parse_polarity(item_doc: rbml::Doc) -> hir::ImplPolarity { - let polarity_doc = reader::get_doc(item_doc, tag_polarity); - if reader::doc_as_u8(polarity_doc) != 0 { - hir::ImplPolarity::Negative - } else { - hir::ImplPolarity::Positive - } -} + let (lo, hi) = if lo > hi { + // Currently macro expansion sometimes produces invalid Span values + // where lo > hi. In order not to crash the compiler when trying to + // translate these values, let's transform them into something we + // can handle (and which will produce useful debug locations at + // least some of the time). + // This workaround is only necessary as long as macro expansion is + // not fixed. FIXME(#23480) + (lo, lo) + } else { + (lo, hi) + }; -fn parse_associated_type_names(item_doc: rbml::Doc) -> Vec { - let names_doc = reader::get_doc(item_doc, tag_associated_type_names); - reader::tagged_docs(names_doc, tag_associated_type_name) - .map(|name_doc| token::intern(name_doc.as_str_slice())) - .collect() -} + let imported_filemaps = self.cdata().imported_filemaps(&sess.codemap()); + let filemap = { + // Optimize for the case that most spans within a translated item + // originate from the same filemap. + let last_filemap = &imported_filemaps[self.last_filemap_index]; -pub fn get_trait_def<'tcx>(cdata: Cmd, - item_id: DefIndex, - tcx: &ty::ctxt<'tcx>) -> ty::TraitDef<'tcx> -{ - let item_doc = cdata.lookup_item(item_id); - let generics = doc_generics(item_doc, tcx, cdata, tag_item_generics); - let unsafety = parse_unsafety(item_doc); - let associated_type_names = parse_associated_type_names(item_doc); - let paren_sugar = parse_paren_sugar(item_doc); - - ty::TraitDef::new(unsafety, - paren_sugar, - generics, - item_trait_ref(item_doc, tcx, cdata), - associated_type_names) -} + if lo >= last_filemap.original_start_pos && lo <= last_filemap.original_end_pos && + hi >= last_filemap.original_start_pos && + hi <= last_filemap.original_end_pos { + last_filemap + } else { + let mut a = 0; + let mut b = imported_filemaps.len(); -pub fn get_adt_def<'tcx>(intr: &IdentInterner, - cdata: Cmd, - item_id: DefIndex, - tcx: &ty::ctxt<'tcx>) -> ty::AdtDefMaster<'tcx> -{ - fn family_to_variant_kind<'tcx>(family: Family, tcx: &ty::ctxt<'tcx>) -> ty::VariantKind { - match family { - Struct | StructVariant => ty::VariantKind::Struct, - TupleStruct | TupleVariant => ty::VariantKind::Tuple, - UnitStruct | UnitVariant => ty::VariantKind::Unit, - _ => tcx.sess.bug(&format!("unexpected family: {:?}", family)), - } - } - fn get_enum_variants<'tcx>(intr: &IdentInterner, - cdata: Cmd, - doc: rbml::Doc, - tcx: &ty::ctxt<'tcx>) -> Vec> { - let mut disr_val = 0; - reader::tagged_docs(doc, tag_items_data_item_variant).map(|p| { - let did = translated_def_id(cdata, p); - let item = cdata.lookup_item(did.index); - - if let Some(disr) = variant_disr_val(item) { - disr_val = disr; - } - let disr = disr_val; - disr_val = disr_val.wrapping_add(1); - - ty::VariantDefData { - did: did, - name: item_name(intr, item), - fields: get_variant_fields(intr, cdata, item, tcx), - disr_val: disr, - kind: family_to_variant_kind(item_family(item), tcx), + while b - a > 1 { + let m = (a + b) / 2; + if imported_filemaps[m].original_start_pos > lo { + b = m; + } else { + a = m; + } + } + + self.last_filemap_index = a; + &imported_filemaps[a] } - }).collect() - } - fn get_variant_fields<'tcx>(intr: &IdentInterner, - cdata: Cmd, - doc: rbml::Doc, - tcx: &ty::ctxt<'tcx>) -> Vec> { - reader::tagged_docs(doc, tag_item_field).map(|f| { - let ff = item_family(f); - match ff { - PublicField | InheritedField => {}, - _ => tcx.sess.bug(&format!("expected field, found {:?}", ff)) - }; - ty::FieldDefData::new(item_def_id(f, cdata), - item_name(intr, f), - struct_field_family_to_visibility(ff)) - }).chain(reader::tagged_docs(doc, tag_item_unnamed_field).map(|f| { - let ff = item_family(f); - ty::FieldDefData::new(item_def_id(f, cdata), - special_idents::unnamed_field.name, - struct_field_family_to_visibility(ff)) - })).collect() - } - fn get_struct_variant<'tcx>(intr: &IdentInterner, - cdata: Cmd, - doc: rbml::Doc, - did: DefId, - tcx: &ty::ctxt<'tcx>) -> ty::VariantDefData<'tcx, 'tcx> { - ty::VariantDefData { - did: did, - name: item_name(intr, doc), - fields: get_variant_fields(intr, cdata, doc, tcx), - disr_val: 0, - kind: family_to_variant_kind(item_family(doc), tcx), - } + }; + + let lo = (lo - filemap.original_start_pos) + filemap.translated_filemap.start_pos; + let hi = (hi - filemap.original_start_pos) + filemap.translated_filemap.start_pos; + + Ok(syntax_pos::mk_sp(lo, hi)) } +} - let doc = cdata.lookup_item(item_id); - let did = DefId { krate: cdata.cnum, index: item_id }; - let (kind, variants) = match item_family(doc) { - Enum => { - (ty::AdtKind::Enum, - get_enum_variants(intr, cdata, doc, tcx)) - } - Struct | TupleStruct | UnitStruct => { - let ctor_did = - reader::maybe_get_doc(doc, tag_items_data_item_struct_ctor). - map_or(did, |ctor_doc| translated_def_id(cdata, ctor_doc)); - (ty::AdtKind::Struct, - vec![get_struct_variant(intr, cdata, doc, ctor_did, tcx)]) - } - _ => tcx.sess.bug( - &format!("get_adt_def called on a non-ADT {:?} - {:?}", - item_family(doc), did)) - }; - - let adt = tcx.intern_adt_def(did, kind, variants); - - // this needs to be done *after* the variant is interned, - // to support recursive structures - for variant in &adt.variants { - if variant.kind() == ty::VariantKind::Tuple && - adt.adt_kind() == ty::AdtKind::Enum { - // tuple-like enum variant fields aren't real items - get the types - // from the ctor. - debug!("evaluating the ctor-type of {:?}", - variant.name); - let ctor_ty = get_type(cdata, variant.did.index, tcx).ty; - debug!("evaluating the ctor-type of {:?}.. {:?}", - variant.name, - ctor_ty); - let field_tys = match ctor_ty.sty { - ty::TyBareFn(_, &ty::BareFnTy { sig: ty::Binder(ty::FnSig { - ref inputs, .. - }), ..}) => { - // tuple-struct constructors don't have escaping regions - assert!(!inputs.has_escaping_regions()); - inputs - }, - _ => tcx.sess.bug("tuple-variant ctor is not an ADT") +// FIXME(#36588) These impls are horribly unsound as they allow +// the caller to pick any lifetime for 'tcx, including 'static, +// by using the unspecialized proxies to them. + +impl<'a, 'tcx> SpecializedDecoder> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result, Self::Error> { + let tcx = self.tcx(); + + // Handle shorthands first, if we have an usize > 0x80. + if self.opaque.data[self.opaque.position()] & 0x80 != 0 { + let pos = self.read_usize()?; + assert!(pos >= SHORTHAND_OFFSET); + let key = ty::CReaderCacheKey { + cnum: self.cdata().cnum, + pos: pos - SHORTHAND_OFFSET, }; - for (field, &ty) in variant.fields.iter().zip(field_tys.iter()) { - field.fulfill_ty(ty); + if let Some(ty) = tcx.rcache.borrow().get(&key).cloned() { + return Ok(ty); } + + let ty = self.with_position(key.pos, Ty::decode)?; + tcx.rcache.borrow_mut().insert(key, ty); + Ok(ty) } else { - for field in &variant.fields { - debug!("evaluating the type of {:?}::{:?}", variant.name, field.name); - let ty = get_type(cdata, field.did.index, tcx).ty; - field.fulfill_ty(ty); - debug!("evaluating the type of {:?}::{:?}: {:?}", - variant.name, field.name, ty); - } + Ok(tcx.mk_ty(ty::TypeVariants::decode(self)?)) } } - - adt } -pub fn get_predicates<'tcx>(cdata: Cmd, - item_id: DefIndex, - tcx: &ty::ctxt<'tcx>) - -> ty::GenericPredicates<'tcx> -{ - let item_doc = cdata.lookup_item(item_id); - doc_predicates(item_doc, tcx, cdata, tag_item_generics) -} -pub fn get_super_predicates<'tcx>(cdata: Cmd, - item_id: DefIndex, - tcx: &ty::ctxt<'tcx>) - -> ty::GenericPredicates<'tcx> -{ - let item_doc = cdata.lookup_item(item_id); - doc_predicates(item_doc, tcx, cdata, tag_item_super_predicates) -} +impl<'a, 'tcx> SpecializedDecoder> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result, Self::Error> { + Ok(ty::GenericPredicates { + parent: Decodable::decode(self)?, + predicates: (0..self.read_usize()?).map(|_| { + // Handle shorthands first, if we have an usize > 0x80. + if self.opaque.data[self.opaque.position()] & 0x80 != 0 { + let pos = self.read_usize()?; + assert!(pos >= SHORTHAND_OFFSET); + let pos = pos - SHORTHAND_OFFSET; -pub fn get_type<'tcx>(cdata: Cmd, id: DefIndex, tcx: &ty::ctxt<'tcx>) - -> ty::TypeScheme<'tcx> -{ - let item_doc = cdata.lookup_item(id); - let t = item_type(DefId { krate: cdata.cnum, index: id }, item_doc, tcx, - cdata); - let generics = doc_generics(item_doc, tcx, cdata, tag_item_generics); - ty::TypeScheme { - generics: generics, - ty: t + self.with_position(pos, ty::Predicate::decode) + } else { + ty::Predicate::decode(self) + } + }) + .collect::, _>>()?, + }) } } -pub fn get_stability(cdata: Cmd, id: DefIndex) -> Option { - let item = cdata.lookup_item(id); - reader::maybe_get_doc(item, tag_items_data_item_stability).map(|doc| { - let mut decoder = reader::Decoder::new(doc); - Decodable::decode(&mut decoder).unwrap() - }) +impl<'a, 'tcx> SpecializedDecoder<&'tcx Substs<'tcx>> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result<&'tcx Substs<'tcx>, Self::Error> { + Ok(self.tcx().mk_substs((0..self.read_usize()?).map(|_| Decodable::decode(self)))?) + } } -pub fn get_deprecation(cdata: Cmd, id: DefIndex) -> Option { - let item = cdata.lookup_item(id); - reader::maybe_get_doc(item, tag_items_data_item_deprecation).map(|doc| { - let mut decoder = reader::Decoder::new(doc); - Decodable::decode(&mut decoder).unwrap() - }) +impl<'a, 'tcx> SpecializedDecoder<&'tcx ty::Region> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result<&'tcx ty::Region, Self::Error> { + Ok(self.tcx().mk_region(Decodable::decode(self)?)) + } } -pub fn get_repr_attrs(cdata: Cmd, id: DefIndex) -> Vec { - let item = cdata.lookup_item(id); - match reader::maybe_get_doc(item, tag_items_data_item_repr).map(|doc| { - let mut decoder = reader::Decoder::new(doc); - Decodable::decode(&mut decoder).unwrap() - }) { - Some(attrs) => attrs, - None => Vec::new(), +impl<'a, 'tcx> SpecializedDecoder<&'tcx ty::Slice>> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result<&'tcx ty::Slice>, Self::Error> { + Ok(self.tcx().mk_type_list((0..self.read_usize()?).map(|_| Decodable::decode(self)))?) } } -pub fn get_impl_polarity<'tcx>(cdata: Cmd, - id: DefIndex) - -> Option -{ - let item_doc = cdata.lookup_item(id); - let fam = item_family(item_doc); - match fam { - Family::Impl => { - Some(parse_polarity(item_doc)) - } - _ => None +impl<'a, 'tcx> SpecializedDecoder<&'tcx ty::BareFnTy<'tcx>> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result<&'tcx ty::BareFnTy<'tcx>, Self::Error> { + Ok(self.tcx().mk_bare_fn(Decodable::decode(self)?)) } } -pub fn get_custom_coerce_unsized_kind<'tcx>( - cdata: Cmd, - id: DefIndex) - -> Option -{ - let item_doc = cdata.lookup_item(id); - reader::maybe_get_doc(item_doc, tag_impl_coerce_unsized_kind).map(|kind_doc| { - let mut decoder = reader::Decoder::new(kind_doc); - Decodable::decode(&mut decoder).unwrap() - }) +impl<'a, 'tcx> SpecializedDecoder<&'tcx ty::AdtDef> for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) -> Result<&'tcx ty::AdtDef, Self::Error> { + let def_id = DefId::decode(self)?; + Ok(self.tcx().lookup_adt_def(def_id)) + } } -pub fn get_impl_trait<'tcx>(cdata: Cmd, - id: DefIndex, - tcx: &ty::ctxt<'tcx>) - -> Option> -{ - let item_doc = cdata.lookup_item(id); - let fam = item_family(item_doc); - match fam { - Family::Impl | Family::DefaultImpl => { - reader::maybe_get_doc(item_doc, tag_item_trait_ref).map(|tp| { - doc_trait_ref(tp, tcx, cdata) - }) - } - _ => None +impl<'a, 'tcx> SpecializedDecoder<&'tcx ty::Slice>> + for DecodeContext<'a, 'tcx> { + fn specialized_decode(&mut self) + -> Result<&'tcx ty::Slice>, Self::Error> { + Ok(self.tcx().mk_existential_predicates((0..self.read_usize()?) + .map(|_| Decodable::decode(self)))?) } } -pub fn get_symbol(cdata: Cmd, id: DefIndex) -> String { - return item_symbol(cdata.lookup_item(id)); -} +impl<'a, 'tcx> MetadataBlob { + pub fn is_compatible(&self) -> bool { + self.raw_bytes().starts_with(METADATA_HEADER) + } + + pub fn get_rustc_version(&self) -> String { + Lazy::with_position(METADATA_HEADER.len() + 4).decode(self) + } -/// If you have a crate_metadata, call get_symbol instead -pub fn get_symbol_from_buf(data: &[u8], id: DefIndex) -> String { - let index = load_index(data); - let pos = index.lookup_item(data, id).unwrap(); - let doc = reader::doc_at(data, pos as usize).unwrap().doc; - item_symbol(doc) + pub fn get_root(&self) -> CrateRoot { + let slice = self.raw_bytes(); + let offset = METADATA_HEADER.len(); + let pos = (((slice[offset + 0] as u32) << 24) | ((slice[offset + 1] as u32) << 16) | + ((slice[offset + 2] as u32) << 8) | + ((slice[offset + 3] as u32) << 0)) as usize; + Lazy::with_position(pos).decode(self) + } + + /// Go through each item in the metadata and create a map from that + /// item's def-key to the item's DefIndex. + pub fn load_key_map(&self, index: LazySeq) -> FxHashMap { + index.iter_enumerated(self.raw_bytes()) + .map(|(index, item)| (item.decode(self).def_key.decode(self), index)) + .collect() + } + + pub fn list_crate_metadata(&self, out: &mut io::Write) -> io::Result<()> { + write!(out, "=External Dependencies=\n")?; + let root = self.get_root(); + for (i, dep) in root.crate_deps.decode(self).enumerate() { + write!(out, "{} {}-{}\n", i + 1, dep.name, dep.hash)?; + } + write!(out, "\n")?; + Ok(()) + } } -/// Iterates over the language items in the given crate. -pub fn each_lang_item(cdata: Cmd, mut f: F) -> bool where - F: FnMut(DefIndex, usize) -> bool, -{ - let root = rbml::Doc::new(cdata.data()); - let lang_items = reader::get_doc(root, tag_lang_items); - reader::tagged_docs(lang_items, tag_lang_items_item).all(|item_doc| { - let id_doc = reader::get_doc(item_doc, tag_lang_items_item_id); - let id = reader::doc_as_u32(id_doc) as usize; - let index_doc = reader::get_doc(item_doc, tag_lang_items_item_index); - let index = DefIndex::from_u32(reader::doc_as_u32(index_doc)); - - f(index, id) - }) +impl<'tcx> EntryKind<'tcx> { + fn to_def(&self, did: DefId) -> Option { + Some(match *self { + EntryKind::Const => Def::Const(did), + EntryKind::AssociatedConst(_) => Def::AssociatedConst(did), + EntryKind::ImmStatic | + EntryKind::ForeignImmStatic => Def::Static(did, false), + EntryKind::MutStatic | + EntryKind::ForeignMutStatic => Def::Static(did, true), + EntryKind::Struct(_) => Def::Struct(did), + EntryKind::Union(_) => Def::Union(did), + EntryKind::Fn(_) | + EntryKind::ForeignFn(_) => Def::Fn(did), + EntryKind::Method(_) => Def::Method(did), + EntryKind::Type => Def::TyAlias(did), + EntryKind::AssociatedType(_) => Def::AssociatedTy(did), + EntryKind::Mod(_) => Def::Mod(did), + EntryKind::Variant(_) => Def::Variant(did), + EntryKind::Trait(_) => Def::Trait(did), + EntryKind::Enum => Def::Enum(did), + EntryKind::MacroDef(_) => Def::Macro(did), + + EntryKind::ForeignMod | + EntryKind::Impl(_) | + EntryKind::DefaultImpl(_) | + EntryKind::Field | + EntryKind::Closure(_) => return None, + }) + } } -fn each_child_of_item_or_crate(intr: Rc, - cdata: Cmd, - item_doc: rbml::Doc, - mut get_crate_data: G, - mut callback: F) where - F: FnMut(DefLike, ast::Name, hir::Visibility), - G: FnMut(ast::CrateNum) -> Rc, -{ - // Iterate over all children. - for child_info_doc in reader::tagged_docs(item_doc, tag_mod_child) { - let child_def_id = translated_def_id(cdata, child_info_doc); - - // This item may be in yet another crate if it was the child of a - // reexport. - let crate_data = if child_def_id.krate == cdata.cnum { - None - } else { - Some(get_crate_data(child_def_id.krate)) - }; - let crate_data = match crate_data { - Some(ref cdata) => &**cdata, - None => cdata - }; +impl<'a, 'tcx> CrateMetadata { + fn maybe_entry(&self, item_id: DefIndex) -> Option>> { + self.root.index.lookup(self.blob.raw_bytes(), item_id) + } - // Get the item. - match crate_data.get_item(child_def_id.index) { - None => {} - Some(child_item_doc) => { - // Hand off the item to the callback. - let child_name = item_name(&*intr, child_item_doc); - let def_like = item_to_def_like(crate_data, child_item_doc, child_def_id); - let visibility = item_visibility(child_item_doc); - callback(def_like, child_name, visibility); + fn entry(&self, item_id: DefIndex) -> Entry<'tcx> { + match self.maybe_entry(item_id) { + None => { + bug!("entry: id not found: {:?} in crate {:?} with number {}", + item_id, + self.name, + self.cnum) } + Some(d) => d.decode(self), } } - // As a special case, iterate over all static methods of - // associated implementations too. This is a bit of a botch. - // --pcwalton - for inherent_impl_def_id_doc in reader::tagged_docs(item_doc, - tag_items_data_item_inherent_impl) { - let inherent_impl_def_id = item_def_id(inherent_impl_def_id_doc, cdata); - if let Some(inherent_impl_doc) = cdata.get_item(inherent_impl_def_id.index) { - for impl_item_def_id_doc in reader::tagged_docs(inherent_impl_doc, - tag_item_impl_item) { - let impl_item_def_id = item_def_id(impl_item_def_id_doc, - cdata); - if let Some(impl_method_doc) = cdata.get_item(impl_item_def_id.index) { - if let StaticMethod = item_family(impl_method_doc) { - // Hand off the static method to the callback. - let static_method_name = item_name(&*intr, impl_method_doc); - let static_method_def_like = item_to_def_like(cdata, impl_method_doc, - impl_item_def_id); - callback(static_method_def_like, - static_method_name, - item_visibility(impl_method_doc)); - } - } - } + fn local_def_id(&self, index: DefIndex) -> DefId { + DefId { + krate: self.cnum, + index: index, } } - for reexport_doc in reexports(item_doc) { - let def_id_doc = reader::get_doc(reexport_doc, - tag_items_data_item_reexport_def_id); - let child_def_id = translated_def_id(cdata, def_id_doc); - - let name_doc = reader::get_doc(reexport_doc, - tag_items_data_item_reexport_name); - let name = name_doc.as_str_slice(); + fn item_name(&self, item: &Entry<'tcx>) -> ast::Name { + item.def_key + .decode(self) + .disambiguated_data + .data + .get_opt_name() + .expect("no name in item_name") + } - // This reexport may be in yet another crate. - let crate_data = if child_def_id.krate == cdata.cnum { - None + pub fn get_def(&self, index: DefIndex) -> Option { + if self.proc_macros.is_some() { + Some(match index { + CRATE_DEF_INDEX => Def::Mod(self.local_def_id(index)), + _ => Def::Macro(self.local_def_id(index)), + }) } else { - Some(get_crate_data(child_def_id.krate)) - }; - let crate_data = match crate_data { - Some(ref cdata) => &**cdata, - None => cdata - }; - - // Get the item. - if let Some(child_item_doc) = crate_data.get_item(child_def_id.index) { - // Hand off the item to the callback. - let def_like = item_to_def_like(crate_data, child_item_doc, child_def_id); - // These items have a public visibility because they're part of - // a public re-export. - callback(def_like, token::intern(name), hir::Public); + self.entry(index).kind.to_def(self.local_def_id(index)) } } -} -/// Iterates over each child of the given item. -pub fn each_child_of_item(intr: Rc, - cdata: Cmd, - id: DefIndex, - get_crate_data: G, - callback: F) where - F: FnMut(DefLike, ast::Name, hir::Visibility), - G: FnMut(ast::CrateNum) -> Rc, -{ - // Find the item. - let item_doc = match cdata.get_item(id) { - None => return, - Some(item_doc) => item_doc, - }; - - each_child_of_item_or_crate(intr, - cdata, - item_doc, - get_crate_data, - callback) -} + pub fn get_span(&self, index: DefIndex, sess: &Session) -> Span { + self.entry(index).span.decode((self, sess)) + } -/// Iterates over all the top-level crate items. -pub fn each_top_level_item_of_crate(intr: Rc, - cdata: Cmd, - get_crate_data: G, - callback: F) where - F: FnMut(DefLike, ast::Name, hir::Visibility), - G: FnMut(ast::CrateNum) -> Rc, -{ - let root_doc = rbml::Doc::new(cdata.data()); - let misc_info_doc = reader::get_doc(root_doc, tag_misc_info); - let crate_items_doc = reader::get_doc(misc_info_doc, - tag_misc_info_crate_items); - - each_child_of_item_or_crate(intr, - cdata, - crate_items_doc, - get_crate_data, - callback) -} + pub fn get_trait_def(&self, + item_id: DefIndex, + tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> ty::TraitDef { + let data = match self.entry(item_id).kind { + EntryKind::Trait(data) => data.decode(self), + _ => bug!(), + }; -pub fn get_item_path(cdata: Cmd, id: DefIndex) -> Vec { - item_path(cdata.lookup_item(id)) -} + ty::TraitDef::new(self.local_def_id(item_id), + data.unsafety, + data.paren_sugar, + self.def_path(item_id).unwrap().deterministic_hash(tcx)) + } -pub fn get_item_name(intr: &IdentInterner, cdata: Cmd, id: DefIndex) -> ast::Name { - item_name(intr, cdata.lookup_item(id)) -} + fn get_variant(&self, + item: &Entry<'tcx>, + index: DefIndex) + -> (ty::VariantDef, Option) { + let data = match item.kind { + EntryKind::Variant(data) | + EntryKind::Struct(data) | + EntryKind::Union(data) => data.decode(self), + _ => bug!(), + }; -pub type DecodeInlinedItem<'a> = - Box FnMut(Cmd, - &ty::ctxt<'tcx>, - Vec, // parent_path - hir_map::DefPath, // parent_def_path - rbml::Doc, - DefId) - -> Result<&'tcx InlinedItem, (Vec, - hir_map::DefPath)> + 'a>; - -pub fn maybe_get_item_ast<'tcx>(cdata: Cmd, - tcx: &ty::ctxt<'tcx>, - id: DefIndex, - mut decode_inlined_item: DecodeInlinedItem) - -> FoundAst<'tcx> { - debug!("Looking up item: {:?}", id); - let item_doc = cdata.lookup_item(id); - let item_did = item_def_id(item_doc, cdata); - let parent_path = { - let mut path = item_path(item_doc); - path.pop(); - path - }; - let parent_def_path = { - let mut def_path = def_path(cdata, id); - def_path.pop(); - def_path - }; - match decode_inlined_item(cdata, - tcx, - parent_path, - parent_def_path, - item_doc, - item_did) { - Ok(ii) => FoundAst::Found(ii), - Err((mut parent_path, mut parent_def_path)) => { - match item_parent_item(cdata, item_doc) { - Some(parent_did) => { - // Remove the last element from the paths, since we are now - // trying to inline the parent. - parent_path.pop(); - parent_def_path.pop(); - - let parent_item = cdata.lookup_item(parent_did.index); - match decode_inlined_item(cdata, - tcx, - parent_path, - parent_def_path, - parent_item, - parent_did) { - Ok(ii) => FoundAst::FoundParent(parent_did, ii), - Err(_) => FoundAst::NotFound - } + (ty::VariantDef { + did: self.local_def_id(data.struct_ctor.unwrap_or(index)), + name: self.item_name(item), + fields: item.children.decode(self).map(|index| { + let f = self.entry(index); + ty::FieldDef { + did: self.local_def_id(index), + name: self.item_name(&f), + vis: f.visibility } - None => FoundAst::NotFound - } - } + }).collect(), + disr_val: ConstInt::Infer(data.disr), + ctor_kind: data.ctor_kind, + }, data.struct_ctor) } -} -pub fn maybe_get_item_mir<'tcx>(cdata: Cmd, - tcx: &ty::ctxt<'tcx>, - id: DefIndex) - -> Option> { - let item_doc = cdata.lookup_item(id); - - return reader::maybe_get_doc(item_doc, tag_mir as usize).map(|mir_doc| { - let dcx = tls_context::DecodingContext { - crate_metadata: cdata, - tcx: tcx, + pub fn get_adt_def(&self, + item_id: DefIndex, + tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> &'tcx ty::AdtDef { + let item = self.entry(item_id); + let did = self.local_def_id(item_id); + let mut ctor_index = None; + let variants = if let EntryKind::Enum = item.kind { + item.children + .decode(self) + .map(|index| { + let (variant, struct_ctor) = self.get_variant(&self.entry(index), index); + assert_eq!(struct_ctor, None); + variant + }) + .collect() + } else { + let (variant, struct_ctor) = self.get_variant(&item, item_id); + ctor_index = struct_ctor; + vec![variant] }; - let mut decoder = reader::Decoder::new(mir_doc); - - let mut mir = decoder.read_opaque(|opaque_decoder, _| { - tls::enter_decoding_context(&dcx, opaque_decoder, |_, opaque_decoder| { - Decodable::decode(opaque_decoder) - }) - }).unwrap(); - - let mut def_id_and_span_translator = MirDefIdAndSpanTranslator { - crate_metadata: cdata, - codemap: tcx.sess.codemap(), - last_filemap_index_hint: Cell::new(0), + let kind = match item.kind { + EntryKind::Enum => ty::AdtKind::Enum, + EntryKind::Struct(_) => ty::AdtKind::Struct, + EntryKind::Union(_) => ty::AdtKind::Union, + _ => bug!("get_adt_def called on a non-ADT {:?}", did), }; - def_id_and_span_translator.visit_mir(&mut mir); - - mir - }); + let adt = tcx.alloc_adt_def(did, kind, variants); + if let Some(ctor_index) = ctor_index { + // Make adt definition available through constructor id as well. + tcx.adt_defs.borrow_mut().insert(self.local_def_id(ctor_index), adt); + } - struct MirDefIdAndSpanTranslator<'cdata, 'codemap> { - crate_metadata: Cmd<'cdata>, - codemap: &'codemap codemap::CodeMap, - last_filemap_index_hint: Cell + adt } - impl<'v, 'cdata, 'codemap> mir::visit::MutVisitor<'v> - for MirDefIdAndSpanTranslator<'cdata, 'codemap> - { - fn visit_def_id(&mut self, def_id: &mut DefId) { - *def_id = translate_def_id(self.crate_metadata, *def_id); - } + pub fn get_predicates(&self, + item_id: DefIndex, + tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> ty::GenericPredicates<'tcx> { + self.entry(item_id).predicates.unwrap().decode((self, tcx)) + } - fn visit_span(&mut self, span: &mut Span) { - *span = translate_span(self.crate_metadata, - self.codemap, - &self.last_filemap_index_hint, - *span); + pub fn get_super_predicates(&self, + item_id: DefIndex, + tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> ty::GenericPredicates<'tcx> { + match self.entry(item_id).kind { + EntryKind::Trait(data) => data.decode(self).super_predicates.decode((self, tcx)), + _ => bug!(), } } -} -fn get_explicit_self(item: rbml::Doc) -> ty::ExplicitSelfCategory { - fn get_mutability(ch: u8) -> hir::Mutability { - match ch as char { - 'i' => hir::MutImmutable, - 'm' => hir::MutMutable, - _ => panic!("unknown mutability character: `{}`", ch as char), - } + pub fn get_generics(&self, + item_id: DefIndex, + tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> ty::Generics<'tcx> { + self.entry(item_id).generics.unwrap().decode((self, tcx)) } - let explicit_self_doc = reader::get_doc(item, tag_item_trait_method_explicit_self); - let string = explicit_self_doc.as_str_slice(); + pub fn get_type(&self, id: DefIndex, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> { + self.entry(id).ty.unwrap().decode((self, tcx)) + } - let explicit_self_kind = string.as_bytes()[0]; - match explicit_self_kind as char { - 's' => ty::ExplicitSelfCategory::Static, - 'v' => ty::ExplicitSelfCategory::ByValue, - '~' => ty::ExplicitSelfCategory::ByBox, - // FIXME(#4846) expl. region - '&' => { - ty::ExplicitSelfCategory::ByReference( - ty::ReEmpty, - get_mutability(string.as_bytes()[1])) + pub fn get_stability(&self, id: DefIndex) -> Option { + match self.proc_macros { + Some(_) if id != CRATE_DEF_INDEX => None, + _ => self.entry(id).stability.map(|stab| stab.decode(self)), } - _ => panic!("unknown self type code: `{}`", explicit_self_kind as char) } -} -/// Returns the def IDs of all the items in the given implementation. -pub fn get_impl_items(cdata: Cmd, impl_id: DefIndex) - -> Vec { - reader::tagged_docs(cdata.lookup_item(impl_id), tag_item_impl_item).map(|doc| { - let def_id = item_def_id(doc, cdata); - match item_sort(doc) { - Some('C') | Some('c') => ty::ConstTraitItemId(def_id), - Some('r') | Some('p') => ty::MethodTraitItemId(def_id), - Some('t') => ty::TypeTraitItemId(def_id), - _ => panic!("unknown impl item sort"), + pub fn get_deprecation(&self, id: DefIndex) -> Option { + match self.proc_macros { + Some(_) if id != CRATE_DEF_INDEX => None, + _ => self.entry(id).deprecation.map(|depr| depr.decode(self)), } - }).collect() -} - -pub fn get_trait_name(intr: Rc, - cdata: Cmd, - id: DefIndex) - -> ast::Name { - let doc = cdata.lookup_item(id); - item_name(&*intr, doc) -} + } -pub fn is_static_method(cdata: Cmd, id: DefIndex) -> bool { - let doc = cdata.lookup_item(id); - match item_sort(doc) { - Some('r') | Some('p') => { - get_explicit_self(doc) == ty::ExplicitSelfCategory::Static + pub fn get_visibility(&self, id: DefIndex) -> ty::Visibility { + match self.proc_macros { + Some(_) => ty::Visibility::Public, + _ => self.entry(id).visibility, } - _ => false } -} -pub fn get_impl_or_trait_item<'tcx>(intr: Rc, - cdata: Cmd, - id: DefIndex, - tcx: &ty::ctxt<'tcx>) - -> ty::ImplOrTraitItem<'tcx> { - let item_doc = cdata.lookup_item(id); - - let def_id = item_def_id(item_doc, cdata); - - let container_id = item_require_parent_item(cdata, item_doc); - let container_doc = cdata.lookup_item(container_id.index); - let container = match item_family(container_doc) { - Trait => TraitContainer(container_id), - _ => ImplContainer(container_id), - }; - - let name = item_name(&*intr, item_doc); - let vis = item_visibility(item_doc); - - match item_sort(item_doc) { - sort @ Some('C') | sort @ Some('c') => { - let ty = doc_type(item_doc, tcx, cdata); - ty::ConstTraitItem(Rc::new(ty::AssociatedConst { - name: name, - ty: ty, - vis: vis, - def_id: def_id, - container: container, - has_value: sort == Some('C') - })) - } - Some('r') | Some('p') => { - let generics = doc_generics(item_doc, tcx, cdata, tag_method_ty_generics); - let predicates = doc_predicates(item_doc, tcx, cdata, tag_method_ty_generics); - let ity = tcx.lookup_item_type(def_id).ty; - let fty = match ity.sty { - ty::TyBareFn(_, fty) => fty.clone(), - _ => tcx.sess.bug(&format!( - "the type {:?} of the method {:?} is not a function?", - ity, name)) - }; - let explicit_self = get_explicit_self(item_doc); - - ty::MethodTraitItem(Rc::new(ty::Method::new(name, - generics, - predicates, - fty, - explicit_self, - vis, - def_id, - container))) - } - Some('t') => { - let ty = maybe_doc_type(item_doc, tcx, cdata); - ty::TypeTraitItem(Rc::new(ty::AssociatedType { - name: name, - ty: ty, - vis: vis, - def_id: def_id, - container: container, - })) + fn get_impl_data(&self, id: DefIndex) -> ImplData<'tcx> { + match self.entry(id).kind { + EntryKind::Impl(data) => data.decode(self), + _ => bug!(), } - _ => panic!("unknown impl/trait item sort"), } -} -pub fn get_trait_item_def_ids(cdata: Cmd, id: DefIndex) - -> Vec { - let item = cdata.lookup_item(id); - reader::tagged_docs(item, tag_item_trait_item).map(|mth| { - let def_id = item_def_id(mth, cdata); - match item_sort(mth) { - Some('C') | Some('c') => ty::ConstTraitItemId(def_id), - Some('r') | Some('p') => ty::MethodTraitItemId(def_id), - Some('t') => ty::TypeTraitItemId(def_id), - _ => panic!("unknown trait item sort"), - } - }).collect() -} + pub fn get_parent_impl(&self, id: DefIndex) -> Option { + self.get_impl_data(id).parent_impl + } -pub fn get_item_variances(cdata: Cmd, id: DefIndex) -> ty::ItemVariances { - let item_doc = cdata.lookup_item(id); - let variance_doc = reader::get_doc(item_doc, tag_item_variances); - let mut decoder = reader::Decoder::new(variance_doc); - Decodable::decode(&mut decoder).unwrap() -} + pub fn get_impl_polarity(&self, id: DefIndex) -> hir::ImplPolarity { + self.get_impl_data(id).polarity + } -pub fn get_provided_trait_methods<'tcx>(intr: Rc, - cdata: Cmd, - id: DefIndex, - tcx: &ty::ctxt<'tcx>) - -> Vec>> { - let item = cdata.lookup_item(id); - - reader::tagged_docs(item, tag_item_trait_item).filter_map(|mth_id| { - let did = item_def_id(mth_id, cdata); - let mth = cdata.lookup_item(did.index); - - if item_sort(mth) == Some('p') { - let trait_item = get_impl_or_trait_item(intr.clone(), - cdata, - did.index, - tcx); - if let ty::MethodTraitItem(ref method) = trait_item { - Some((*method).clone()) - } else { - None - } - } else { - None - } - }).collect() -} + pub fn get_custom_coerce_unsized_kind(&self, + id: DefIndex) + -> Option { + self.get_impl_data(id).coerce_unsized_kind + } -pub fn get_associated_consts<'tcx>(intr: Rc, - cdata: Cmd, - id: DefIndex, - tcx: &ty::ctxt<'tcx>) - -> Vec>> { - let item = cdata.lookup_item(id); - - [tag_item_trait_item, tag_item_impl_item].iter().flat_map(|&tag| { - reader::tagged_docs(item, tag).filter_map(|ac_id| { - let did = item_def_id(ac_id, cdata); - let ac_doc = cdata.lookup_item(did.index); - - match item_sort(ac_doc) { - Some('C') | Some('c') => { - let trait_item = get_impl_or_trait_item(intr.clone(), - cdata, - did.index, - tcx); - if let ty::ConstTraitItem(ref ac) = trait_item { - Some((*ac).clone()) - } else { - None - } + pub fn get_impl_trait(&self, + id: DefIndex, + tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> Option> { + self.get_impl_data(id).trait_ref.map(|tr| tr.decode((self, tcx))) + } + + /// Iterates over the language items in the given crate. + pub fn get_lang_items(&self) -> Vec<(DefIndex, usize)> { + self.root.lang_items.decode(self).collect() + } + + /// Iterates over each child of the given item. + pub fn each_child_of_item(&self, id: DefIndex, mut callback: F) + where F: FnMut(def::Export) + { + if let Some(ref proc_macros) = self.proc_macros { + if id == CRATE_DEF_INDEX { + for (id, &(name, _)) in proc_macros.iter().enumerate() { + let def = Def::Macro(DefId { krate: self.cnum, index: DefIndex::new(id + 1) }); + callback(def::Export { name: name, def: def }); } - _ => None } - }) - }).collect() -} + return + } -/// If node_id is the constructor of a tuple struct, retrieve the NodeId of -/// the actual type definition, otherwise, return None -pub fn get_tuple_struct_definition_if_ctor(cdata: Cmd, - node_id: DefIndex) - -> Option -{ - let item = cdata.lookup_item(node_id); - reader::tagged_docs(item, tag_items_data_item_is_tuple_struct_ctor).next().map(|_| { - item_require_parent_item(cdata, item) - }) -} + // Find the item. + let item = match self.maybe_entry(id) { + None => return, + Some(item) => item.decode(self), + }; -pub fn get_item_attrs(cdata: Cmd, - orig_node_id: DefIndex) - -> Vec { - // The attributes for a tuple struct are attached to the definition, not the ctor; - // we assume that someone passing in a tuple struct ctor is actually wanting to - // look at the definition - let node_id = get_tuple_struct_definition_if_ctor(cdata, orig_node_id); - let node_id = node_id.map(|x| x.index).unwrap_or(orig_node_id); - let item = cdata.lookup_item(node_id); - get_attributes(item) -} + // Iterate over all children. + let macros_only = self.dep_kind.get().macros_only(); + for child_index in item.children.decode(self) { + if macros_only { + continue + } -pub fn get_struct_field_attrs(cdata: Cmd) -> FnvHashMap> { - let data = rbml::Doc::new(cdata.data()); - let fields = reader::get_doc(data, tag_struct_fields); - reader::tagged_docs(fields, tag_struct_field).map(|field| { - let def_id = translated_def_id(cdata, reader::get_doc(field, tag_def_id)); - let attrs = get_attributes(field); - (def_id, attrs) - }).collect() -} + // Get the item. + if let Some(child) = self.maybe_entry(child_index) { + let child = child.decode(self); + match child.kind { + EntryKind::MacroDef(..) => {} + _ if macros_only => continue, + _ => {} + } -fn struct_field_family_to_visibility(family: Family) -> hir::Visibility { - match family { - PublicField => hir::Public, - InheritedField => hir::Inherited, - _ => panic!() - } -} + // Hand off the item to the callback. + match child.kind { + // FIXME(eddyb) Don't encode these in children. + EntryKind::ForeignMod => { + for child_index in child.children.decode(self) { + if let Some(def) = self.get_def(child_index) { + callback(def::Export { + def: def, + name: self.item_name(&self.entry(child_index)), + }); + } + } + continue; + } + EntryKind::Impl(_) | + EntryKind::DefaultImpl(_) => continue, -pub fn get_struct_field_names(intr: &IdentInterner, cdata: Cmd, id: DefIndex) - -> Vec { - let item = cdata.lookup_item(id); - reader::tagged_docs(item, tag_item_field).map(|an_item| { - item_name(intr, an_item) - }).chain(reader::tagged_docs(item, tag_item_unnamed_field).map(|_| { - special_idents::unnamed_field.name - })).collect() -} + _ => {} + } -fn get_meta_items(md: rbml::Doc) -> Vec> { - reader::tagged_docs(md, tag_meta_item_word).map(|meta_item_doc| { - let nd = reader::get_doc(meta_item_doc, tag_meta_item_name); - let n = token::intern_and_get_ident(nd.as_str_slice()); - attr::mk_word_item(n) - }).chain(reader::tagged_docs(md, tag_meta_item_name_value).map(|meta_item_doc| { - let nd = reader::get_doc(meta_item_doc, tag_meta_item_name); - let vd = reader::get_doc(meta_item_doc, tag_meta_item_value); - let n = token::intern_and_get_ident(nd.as_str_slice()); - let v = token::intern_and_get_ident(vd.as_str_slice()); - // FIXME (#623): Should be able to decode MetaNameValue variants, - // but currently the encoder just drops them - attr::mk_name_value_item_str(n, v) - })).chain(reader::tagged_docs(md, tag_meta_item_list).map(|meta_item_doc| { - let nd = reader::get_doc(meta_item_doc, tag_meta_item_name); - let n = token::intern_and_get_ident(nd.as_str_slice()); - let subitems = get_meta_items(meta_item_doc); - attr::mk_list_item(n, subitems) - })).collect() -} + let def_key = child.def_key.decode(self); + if let (Some(def), Some(name)) = + (self.get_def(child_index), def_key.disambiguated_data.data.get_opt_name()) { + callback(def::Export { + def: def, + name: name, + }); + // For non-reexport structs and variants add their constructors to children. + // Reexport lists automatically contain constructors when necessary. + match def { + Def::Struct(..) => { + if let Some(ctor_def_id) = self.get_struct_ctor_def_id(child_index) { + let ctor_kind = self.get_ctor_kind(child_index); + let ctor_def = Def::StructCtor(ctor_def_id, ctor_kind); + callback(def::Export { + def: ctor_def, + name: name, + }); + } + } + Def::Variant(def_id) => { + // Braced variants, unlike structs, generate unusable names in + // value namespace, they are reserved for possible future use. + let ctor_kind = self.get_ctor_kind(child_index); + let ctor_def = Def::VariantCtor(def_id, ctor_kind); + callback(def::Export { + def: ctor_def, + name: name, + }); + } + _ => {} + } + } + } + } -fn get_attributes(md: rbml::Doc) -> Vec { - match reader::maybe_get_doc(md, tag_attributes) { - Some(attrs_d) => { - reader::tagged_docs(attrs_d, tag_attribute).map(|attr_doc| { - let is_sugared_doc = reader::doc_as_u8( - reader::get_doc(attr_doc, tag_attribute_is_sugared_doc) - ) == 1; - let meta_items = get_meta_items(attr_doc); - // Currently it's only possible to have a single meta item on - // an attribute - assert_eq!(meta_items.len(), 1); - let meta_item = meta_items.into_iter().nth(0).unwrap(); - codemap::Spanned { - node: ast::Attribute_ { - id: attr::mk_attr_id(), - style: ast::AttrStyle::Outer, - value: meta_item, - is_sugared_doc: is_sugared_doc, - }, - span: codemap::DUMMY_SP + if let EntryKind::Mod(data) = item.kind { + for exp in data.decode(self).reexports.decode(self) { + match exp.def { + Def::Macro(..) => {} + _ if macros_only => continue, + _ => {} } - }).collect() - }, - None => vec![], + callback(exp); + } + } } -} - -fn list_crate_attributes(md: rbml::Doc, hash: &Svh, - out: &mut io::Write) -> io::Result<()> { - try!(write!(out, "=Crate Attributes ({})=\n", *hash)); - let r = get_attributes(md); - for attr in &r { - try!(write!(out, "{}\n", pprust::attribute_to_string(attr))); + pub fn maybe_get_item_ast(&self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + id: DefIndex) + -> Option<&'tcx InlinedItem> { + debug!("Looking up item: {:?}", id); + let item_doc = self.entry(id); + let item_did = self.local_def_id(id); + let parent_def_id = self.local_def_id(self.def_key(id).parent.unwrap()); + let mut parent_def_path = self.def_path(id).unwrap(); + parent_def_path.data.pop(); + item_doc.ast.map(|ast| { + let ast = ast.decode(self); + decode_inlined_item(self, tcx, parent_def_path, parent_def_id, ast, item_did) + }) } - write!(out, "\n\n") -} - -pub fn get_crate_attributes(data: &[u8]) -> Vec { - get_attributes(rbml::Doc::new(data)) -} - -#[derive(Clone)] -pub struct CrateDep { - pub cnum: ast::CrateNum, - pub name: String, - pub hash: Svh, - pub explicitly_linked: bool, -} - -pub fn get_crate_deps(data: &[u8]) -> Vec { - let cratedoc = rbml::Doc::new(data); - let depsdoc = reader::get_doc(cratedoc, tag_crate_deps); - - fn docstr(doc: rbml::Doc, tag_: usize) -> String { - let d = reader::get_doc(doc, tag_); - d.as_str_slice().to_string() - } - - reader::tagged_docs(depsdoc, tag_crate_dep).enumerate().map(|(crate_num, depdoc)| { - let name = docstr(depdoc, tag_crate_dep_crate_name); - let hash = Svh::new(&docstr(depdoc, tag_crate_dep_hash)); - let doc = reader::get_doc(depdoc, tag_crate_dep_explicitly_linked); - let explicitly_linked = reader::doc_as_u8(doc) != 0; - CrateDep { - cnum: crate_num as u32 + 1, - name: name, - hash: hash, - explicitly_linked: explicitly_linked, - } - }).collect() -} - -fn list_crate_deps(data: &[u8], out: &mut io::Write) -> io::Result<()> { - try!(write!(out, "=External Dependencies=\n")); - for dep in &get_crate_deps(data) { - try!(write!(out, "{} {}-{}\n", dep.cnum, dep.name, dep.hash)); + pub fn is_item_mir_available(&self, id: DefIndex) -> bool { + self.maybe_entry(id).and_then(|item| item.decode(self).mir).is_some() } - try!(write!(out, "\n")); - Ok(()) -} -pub fn maybe_get_crate_hash(data: &[u8]) -> Option { - let cratedoc = rbml::Doc::new(data); - reader::maybe_get_doc(cratedoc, tag_crate_hash).map(|doc| { - Svh::new(doc.as_str_slice()) - }) -} - -pub fn get_crate_hash(data: &[u8]) -> Svh { - let cratedoc = rbml::Doc::new(data); - let hashdoc = reader::get_doc(cratedoc, tag_crate_hash); - Svh::new(hashdoc.as_str_slice()) -} + pub fn maybe_get_item_mir(&self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + id: DefIndex) + -> Option> { + self.entry(id).mir.map(|mir| mir.decode((self, tcx))) + } -pub fn maybe_get_crate_name(data: &[u8]) -> Option { - let cratedoc = rbml::Doc::new(data); - reader::maybe_get_doc(cratedoc, tag_crate_crate_name).map(|doc| { - doc.as_str_slice().to_string() - }) -} + pub fn get_associated_item(&self, id: DefIndex) -> Option { + let item = self.entry(id); + let parent_and_name = || { + let def_key = item.def_key.decode(self); + (self.local_def_id(def_key.parent.unwrap()), + def_key.disambiguated_data.data.get_opt_name().unwrap()) + }; -pub fn get_crate_triple(data: &[u8]) -> Option { - let cratedoc = rbml::Doc::new(data); - let triple_doc = reader::maybe_get_doc(cratedoc, tag_crate_triple); - triple_doc.map(|s| s.as_str().to_string()) -} + Some(match item.kind { + EntryKind::AssociatedConst(container) => { + let (parent, name) = parent_and_name(); + ty::AssociatedItem { + name: name, + kind: ty::AssociatedKind::Const, + vis: item.visibility, + defaultness: container.defaultness(), + def_id: self.local_def_id(id), + container: container.with_def_id(parent), + method_has_self_argument: false + } + } + EntryKind::Method(data) => { + let (parent, name) = parent_and_name(); + let data = data.decode(self); + ty::AssociatedItem { + name: name, + kind: ty::AssociatedKind::Method, + vis: item.visibility, + defaultness: data.container.defaultness(), + def_id: self.local_def_id(id), + container: data.container.with_def_id(parent), + method_has_self_argument: data.has_self + } + } + EntryKind::AssociatedType(container) => { + let (parent, name) = parent_and_name(); + ty::AssociatedItem { + name: name, + kind: ty::AssociatedKind::Type, + vis: item.visibility, + defaultness: container.defaultness(), + def_id: self.local_def_id(id), + container: container.with_def_id(parent), + method_has_self_argument: false + } + } + _ => return None, + }) + } -pub fn get_crate_name(data: &[u8]) -> String { - maybe_get_crate_name(data).expect("no crate name in crate") -} + pub fn get_item_variances(&self, id: DefIndex) -> Vec { + self.entry(id).variances.decode(self).collect() + } -pub fn list_crate_metadata(bytes: &[u8], out: &mut io::Write) -> io::Result<()> { - let hash = get_crate_hash(bytes); - let md = rbml::Doc::new(bytes); - try!(list_crate_attributes(md, &hash, out)); - list_crate_deps(bytes, out) -} + pub fn get_ctor_kind(&self, node_id: DefIndex) -> CtorKind { + match self.entry(node_id).kind { + EntryKind::Struct(data) | + EntryKind::Union(data) | + EntryKind::Variant(data) => data.decode(self).ctor_kind, + _ => CtorKind::Fictive, + } + } -// Translates a def_id from an external crate to a def_id for the current -// compilation environment. We use this when trying to load types from -// external crates - if those types further refer to types in other crates -// then we must translate the crate number from that encoded in the external -// crate to the correct local crate number. -pub fn translate_def_id(cdata: Cmd, did: DefId) -> DefId { - if did.is_local() { - return DefId { krate: cdata.cnum, index: did.index }; - } - - match cdata.cnum_map.borrow().get(&did.krate) { - Some(&n) => { - DefId { - krate: n, - index: did.index, + pub fn get_struct_ctor_def_id(&self, node_id: DefIndex) -> Option { + match self.entry(node_id).kind { + EntryKind::Struct(data) => { + data.decode(self).struct_ctor.map(|index| self.local_def_id(index)) } + _ => None, } - None => panic!("didn't find a crate in the cnum_map") } -} -// Translate a DefId from the current compilation environment to a DefId -// for an external crate. -fn reverse_translate_def_id(cdata: Cmd, did: DefId) -> Option { - if did.krate == cdata.cnum { - return Some(DefId { krate: LOCAL_CRATE, index: did.index }); + pub fn get_item_attrs(&self, node_id: DefIndex) -> Vec { + if self.proc_macros.is_some() && node_id != CRATE_DEF_INDEX { + return Vec::new(); + } + // The attributes for a tuple struct are attached to the definition, not the ctor; + // we assume that someone passing in a tuple struct ctor is actually wanting to + // look at the definition + let mut item = self.entry(node_id); + let def_key = item.def_key.decode(self); + if def_key.disambiguated_data.data == DefPathData::StructCtor { + item = self.entry(def_key.parent.unwrap()); + } + self.get_attributes(&item) } - for (&local, &global) in cdata.cnum_map.borrow().iter() { - if global == did.krate { - return Some(DefId { krate: local, index: did.index }); - } + pub fn get_struct_field_names(&self, id: DefIndex) -> Vec { + self.entry(id) + .children + .decode(self) + .map(|index| self.item_name(&self.entry(index))) + .collect() } - None -} + fn get_attributes(&self, item: &Entry<'tcx>) -> Vec { + item.attributes + .decode(self) + .map(|mut attr| { + // Need new unique IDs: old thread-local IDs won't map to new threads. + attr.id = attr::mk_attr_id(); + attr + }) + .collect() + } -/// Translates a `Span` from an extern crate to the corresponding `Span` -/// within the local crate's codemap. -pub fn translate_span(cdata: Cmd, - codemap: &codemap::CodeMap, - last_filemap_index_hint: &Cell, - span: codemap::Span) - -> codemap::Span { - let span = if span.lo > span.hi { - // Currently macro expansion sometimes produces invalid Span values - // where lo > hi. In order not to crash the compiler when trying to - // translate these values, let's transform them into something we - // can handle (and which will produce useful debug locations at - // least some of the time). - // This workaround is only necessary as long as macro expansion is - // not fixed. FIXME(#23480) - codemap::mk_sp(span.lo, span.lo) - } else { - span - }; - - let imported_filemaps = cdata.imported_filemaps(&codemap); - let filemap = { - // Optimize for the case that most spans within a translated item - // originate from the same filemap. - let last_filemap_index = last_filemap_index_hint.get(); - let last_filemap = &imported_filemaps[last_filemap_index]; - - if span.lo >= last_filemap.original_start_pos && - span.lo <= last_filemap.original_end_pos && - span.hi >= last_filemap.original_start_pos && - span.hi <= last_filemap.original_end_pos { - last_filemap - } else { - let mut a = 0; - let mut b = imported_filemaps.len(); - - while b - a > 1 { - let m = (a + b) / 2; - if imported_filemaps[m].original_start_pos > span.lo { - b = m; - } else { - a = m; - } + // Translate a DefId from the current compilation environment to a DefId + // for an external crate. + fn reverse_translate_def_id(&self, did: DefId) -> Option { + for (local, &global) in self.cnum_map.borrow().iter_enumerated() { + if global == did.krate { + return Some(DefId { + krate: local, + index: did.index, + }); } - - last_filemap_index_hint.set(a); - &imported_filemaps[a] } - }; - let lo = (span.lo - filemap.original_start_pos) + - filemap.translated_filemap.start_pos; - let hi = (span.hi - filemap.original_start_pos) + - filemap.translated_filemap.start_pos; - - codemap::mk_sp(lo, hi) -} + None + } -pub fn each_inherent_implementation_for_type(cdata: Cmd, - id: DefIndex, - mut callback: F) - where F: FnMut(DefId), -{ - let item_doc = cdata.lookup_item(id); - for impl_doc in reader::tagged_docs(item_doc, tag_items_data_item_inherent_impl) { - if reader::maybe_get_doc(impl_doc, tag_item_trait_ref).is_none() { - callback(item_def_id(impl_doc, cdata)); - } + pub fn get_inherent_implementations_for_type(&self, id: DefIndex) -> Vec { + self.entry(id) + .inherent_impls + .decode(self) + .map(|index| self.local_def_id(index)) + .collect() } -} -pub fn each_implementation_for_trait(cdata: Cmd, - def_id: DefId, - mut callback: F) where - F: FnMut(DefId), -{ - // Do a reverse lookup beforehand to avoid touching the crate_num - // hash map in the loop below. - if let Some(crate_local_did) = reverse_translate_def_id(cdata, def_id) { - let def_id_u64 = def_to_u64(crate_local_did); - - let impls_doc = reader::get_doc(rbml::Doc::new(cdata.data()), tag_impls); - for trait_doc in reader::tagged_docs(impls_doc, tag_impls_trait) { - let trait_def_id = reader::get_doc(trait_doc, tag_def_id); - if reader::doc_as_u64(trait_def_id) != def_id_u64 { + pub fn get_implementations_for_trait(&self, filter: Option, result: &mut Vec) { + // Do a reverse lookup beforehand to avoid touching the crate_num + // hash map in the loop below. + let filter = match filter.map(|def_id| self.reverse_translate_def_id(def_id)) { + Some(Some(def_id)) => Some((def_id.krate.as_u32(), def_id.index)), + Some(None) => return, + None => None, + }; + + // FIXME(eddyb) Make this O(1) instead of O(n). + for trait_impls in self.root.impls.decode(self) { + if filter.is_some() && filter != Some(trait_impls.trait_id) { continue; } - for impl_doc in reader::tagged_docs(trait_doc, tag_impls_trait_impl) { - callback(translated_def_id(cdata, impl_doc)); + + result.extend(trait_impls.impls.decode(self).map(|index| self.local_def_id(index))); + + if filter.is_some() { + break; } } } -} -pub fn get_trait_of_item(cdata: Cmd, id: DefIndex, tcx: &ty::ctxt) - -> Option { - let item_doc = cdata.lookup_item(id); - let parent_item_id = match item_parent_item(cdata, item_doc) { - None => return None, - Some(item_id) => item_id, - }; - let parent_item_doc = cdata.lookup_item(parent_item_id.index); - match item_family(parent_item_doc) { - Trait => Some(item_def_id(parent_item_doc, cdata)), - Impl | DefaultImpl => { - reader::maybe_get_doc(parent_item_doc, tag_item_trait_ref) - .map(|_| item_trait_ref(parent_item_doc, tcx, cdata).def_id) - } - _ => None + pub fn get_trait_of_item(&self, id: DefIndex) -> Option { + self.entry(id).def_key.decode(self).parent.and_then(|parent_index| { + match self.entry(parent_index).kind { + EntryKind::Trait(_) => Some(self.local_def_id(parent_index)), + _ => None, + } + }) } -} -pub fn get_native_libraries(cdata: Cmd) - -> Vec<(cstore::NativeLibraryKind, String)> { - let libraries = reader::get_doc(rbml::Doc::new(cdata.data()), - tag_native_libraries); - reader::tagged_docs(libraries, tag_native_libraries_lib).map(|lib_doc| { - let kind_doc = reader::get_doc(lib_doc, tag_native_libraries_kind); - let name_doc = reader::get_doc(lib_doc, tag_native_libraries_name); - let kind: cstore::NativeLibraryKind = - cstore::NativeLibraryKind::from_u32(reader::doc_as_u32(kind_doc)).unwrap(); - let name = name_doc.as_str().to_string(); - (kind, name) - }).collect() -} + pub fn get_native_libraries(&self) -> Vec { + self.root.native_libraries.decode(self).collect() + } -pub fn get_plugin_registrar_fn(data: &[u8]) -> Option { - reader::maybe_get_doc(rbml::Doc::new(data), tag_plugin_registrar_fn) - .map(|doc| DefIndex::from_u32(reader::doc_as_u32(doc))) -} + pub fn get_dylib_dependency_formats(&self) -> Vec<(CrateNum, LinkagePreference)> { + self.root + .dylib_dependency_formats + .decode(self) + .enumerate() + .flat_map(|(i, link)| { + let cnum = CrateNum::new(i + 1); + link.map(|link| (self.cnum_map.borrow()[cnum], link)) + }) + .collect() + } -pub fn each_exported_macro(data: &[u8], intr: &IdentInterner, mut f: F) where - F: FnMut(ast::Name, Vec, String) -> bool, -{ - let macros = reader::get_doc(rbml::Doc::new(data), tag_macro_defs); - for macro_doc in reader::tagged_docs(macros, tag_macro_def) { - let name = item_name(intr, macro_doc); - let attrs = get_attributes(macro_doc); - let body = reader::get_doc(macro_doc, tag_macro_def_body); - if !f(name, attrs, body.as_str().to_string()) { - break; - } + pub fn get_missing_lang_items(&self) -> Vec { + self.root.lang_items_missing.decode(self).collect() } -} -pub fn get_dylib_dependency_formats(cdata: Cmd) - -> Vec<(ast::CrateNum, LinkagePreference)> -{ - let formats = reader::get_doc(rbml::Doc::new(cdata.data()), - tag_dylib_dependency_formats); - let mut result = Vec::new(); - - debug!("found dylib deps: {}", formats.as_str_slice()); - for spec in formats.as_str_slice().split(',') { - if spec.is_empty() { continue } - let cnum = spec.split(':').nth(0).unwrap(); - let link = spec.split(':').nth(1).unwrap(); - let cnum: ast::CrateNum = cnum.parse().unwrap(); - let cnum = match cdata.cnum_map.borrow().get(&cnum) { - Some(&n) => n, - None => panic!("didn't find a crate in the cnum_map") + pub fn get_fn_arg_names(&self, id: DefIndex) -> Vec { + let arg_names = match self.entry(id).kind { + EntryKind::Fn(data) | + EntryKind::ForeignFn(data) => data.decode(self).arg_names, + EntryKind::Method(data) => data.decode(self).fn_data.arg_names, + _ => LazySeq::empty(), }; - result.push((cnum, if link == "d" { - LinkagePreference::RequireDynamic - } else { - LinkagePreference::RequireStatic - })); + arg_names.decode(self).collect() } - return result; -} - -pub fn get_missing_lang_items(cdata: Cmd) - -> Vec -{ - let items = reader::get_doc(rbml::Doc::new(cdata.data()), tag_lang_items); - reader::tagged_docs(items, tag_lang_items_missing).map(|missing_docs| { - lang_items::LangItem::from_u32(reader::doc_as_u32(missing_docs)).unwrap() - }).collect() -} -pub fn get_method_arg_names(cdata: Cmd, id: DefIndex) -> Vec { - let method_doc = cdata.lookup_item(id); - match reader::maybe_get_doc(method_doc, tag_method_argument_names) { - Some(args_doc) => { - reader::tagged_docs(args_doc, tag_method_argument_name).map(|name_doc| { - name_doc.as_str_slice().to_string() - }).collect() - }, - None => vec![], + pub fn get_reachable_ids(&self) -> Vec { + self.root.reachable_ids.decode(self).map(|index| self.local_def_id(index)).collect() } -} -pub fn get_reachable_ids(cdata: Cmd) -> Vec { - let items = reader::get_doc(rbml::Doc::new(cdata.data()), - tag_reachable_ids); - reader::tagged_docs(items, tag_reachable_id).map(|doc| { - DefId { - krate: cdata.cnum, - index: DefIndex::from_u32(reader::doc_as_u32(doc)), + pub fn get_macro(&self, id: DefIndex) -> (ast::Name, MacroDef) { + let entry = self.entry(id); + match entry.kind { + EntryKind::MacroDef(macro_def) => (self.item_name(&entry), macro_def.decode(self)), + _ => bug!(), } - }).collect() -} - -pub fn is_typedef(cdata: Cmd, id: DefIndex) -> bool { - let item_doc = cdata.lookup_item(id); - match item_family(item_doc) { - Type => true, - _ => false, } -} -pub fn is_const_fn(cdata: Cmd, id: DefIndex) -> bool { - let item_doc = cdata.lookup_item(id); - match fn_constness(item_doc) { - hir::Constness::Const => true, - hir::Constness::NotConst => false, + pub fn is_const_fn(&self, id: DefIndex) -> bool { + let constness = match self.entry(id).kind { + EntryKind::Method(data) => data.decode(self).fn_data.constness, + EntryKind::Fn(data) => data.decode(self).constness, + _ => hir::Constness::NotConst, + }; + constness == hir::Constness::Const } -} -pub fn is_static(cdata: Cmd, id: DefIndex) -> bool { - let item_doc = cdata.lookup_item(id); - match item_family(item_doc) { - ImmStatic | MutStatic => true, - _ => false, + pub fn is_foreign_item(&self, id: DefIndex) -> bool { + match self.entry(id).kind { + EntryKind::ForeignImmStatic | + EntryKind::ForeignMutStatic | + EntryKind::ForeignFn(_) => true, + _ => false, + } } -} -pub fn is_impl(cdata: Cmd, id: DefIndex) -> bool { - let item_doc = cdata.lookup_item(id); - match item_family(item_doc) { - Impl => true, - _ => false, + pub fn is_defaulted_trait(&self, trait_id: DefIndex) -> bool { + match self.entry(trait_id).kind { + EntryKind::Trait(data) => data.decode(self).has_default_impl, + _ => bug!(), + } } -} -fn doc_generics<'tcx>(base_doc: rbml::Doc, - tcx: &ty::ctxt<'tcx>, - cdata: Cmd, - tag: usize) - -> ty::Generics<'tcx> -{ - let doc = reader::get_doc(base_doc, tag); - - let mut types = subst::VecPerParamSpace::empty(); - for p in reader::tagged_docs(doc, tag_type_param_def) { - let bd = - TyDecoder::with_doc(tcx, cdata.cnum, p, - &mut |did| translate_def_id(cdata, did)) - .parse_type_param_def(); - types.push(bd.space, bd); - } - - let mut regions = subst::VecPerParamSpace::empty(); - for rp_doc in reader::tagged_docs(doc, tag_region_param_def) { - let ident_str_doc = reader::get_doc(rp_doc, - tag_region_param_def_ident); - let name = item_name(&*token::get_ident_interner(), ident_str_doc); - let def_id_doc = reader::get_doc(rp_doc, - tag_region_param_def_def_id); - let def_id = translated_def_id(cdata, def_id_doc); - - let doc = reader::get_doc(rp_doc, tag_region_param_def_space); - let space = subst::ParamSpace::from_uint(reader::doc_as_u64(doc) as usize); - - let doc = reader::get_doc(rp_doc, tag_region_param_def_index); - let index = reader::doc_as_u64(doc) as u32; - - let bounds = reader::tagged_docs(rp_doc, tag_items_data_region).map(|p| { - TyDecoder::with_doc(tcx, cdata.cnum, p, - &mut |did| translate_def_id(cdata, did)) - .parse_region() - }).collect(); - - regions.push(space, ty::RegionParameterDef { name: name, - def_id: def_id, - space: space, - index: index, - bounds: bounds }); - } - - ty::Generics { types: types, regions: regions } -} - -fn doc_predicate<'tcx>(cdata: Cmd, - doc: rbml::Doc, - tcx: &ty::ctxt<'tcx>) - -> ty::Predicate<'tcx> -{ - let predicate_pos = cdata.xref_index.lookup( - cdata.data(), reader::doc_as_u32(doc)).unwrap() as usize; - TyDecoder::new( - cdata.data(), cdata.cnum, predicate_pos, tcx, - &mut |did| translate_def_id(cdata, did) - ).parse_predicate() -} + pub fn is_default_impl(&self, impl_id: DefIndex) -> bool { + match self.entry(impl_id).kind { + EntryKind::DefaultImpl(_) => true, + _ => false, + } + } -fn doc_predicates<'tcx>(base_doc: rbml::Doc, - tcx: &ty::ctxt<'tcx>, - cdata: Cmd, - tag: usize) - -> ty::GenericPredicates<'tcx> -{ - let doc = reader::get_doc(base_doc, tag); + pub fn closure_kind(&self, closure_id: DefIndex) -> ty::ClosureKind { + match self.entry(closure_id).kind { + EntryKind::Closure(data) => data.decode(self).kind, + _ => bug!(), + } + } - let mut predicates = subst::VecPerParamSpace::empty(); - for predicate_doc in reader::tagged_docs(doc, tag_type_predicate) { - predicates.push(subst::TypeSpace, - doc_predicate(cdata, predicate_doc, tcx)); + pub fn closure_ty(&self, + closure_id: DefIndex, + tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> ty::ClosureTy<'tcx> { + match self.entry(closure_id).kind { + EntryKind::Closure(data) => data.decode(self).ty.decode((self, tcx)), + _ => bug!(), + } } - for predicate_doc in reader::tagged_docs(doc, tag_self_predicate) { - predicates.push(subst::SelfSpace, - doc_predicate(cdata, predicate_doc, tcx)); + + pub fn def_key(&self, id: DefIndex) -> hir_map::DefKey { + debug!("def_key: id={:?}", id); + self.entry(id).def_key.decode(self) } - for predicate_doc in reader::tagged_docs(doc, tag_fn_predicate) { - predicates.push(subst::FnSpace, - doc_predicate(cdata, predicate_doc, tcx)); + + // Returns the path leading to the thing with this `id`. Note that + // some def-ids don't wind up in the metadata, so `def_path` sometimes + // returns `None` + pub fn def_path(&self, id: DefIndex) -> Option { + debug!("def_path(id={:?})", id); + if self.maybe_entry(id).is_some() { + Some(hir_map::DefPath::make(self.cnum, id, |parent| self.def_key(parent))) + } else { + None + } } - ty::GenericPredicates { predicates: predicates } -} + /// Imports the codemap from an external crate into the codemap of the crate + /// currently being compiled (the "local crate"). + /// + /// The import algorithm works analogous to how AST items are inlined from an + /// external crate's metadata: + /// For every FileMap in the external codemap an 'inline' copy is created in the + /// local codemap. The correspondence relation between external and local + /// FileMaps is recorded in the `ImportedFileMap` objects returned from this + /// function. When an item from an external crate is later inlined into this + /// crate, this correspondence information is used to translate the span + /// information of the inlined item so that it refers the correct positions in + /// the local codemap (see `>`). + /// + /// The import algorithm in the function below will reuse FileMaps already + /// existing in the local codemap. For example, even if the FileMap of some + /// source file of libstd gets imported many times, there will only ever be + /// one FileMap object for the corresponding file in the local codemap. + /// + /// Note that imported FileMaps do not actually contain the source code of the + /// file they represent, just information about length, line breaks, and + /// multibyte characters. This information is enough to generate valid debuginfo + /// for items inlined from other crates. + pub fn imported_filemaps(&'a self, + local_codemap: &codemap::CodeMap) + -> Ref<'a, Vec> { + { + let filemaps = self.codemap_import_info.borrow(); + if !filemaps.is_empty() { + return filemaps; + } + } -pub fn is_defaulted_trait(cdata: Cmd, trait_id: DefIndex) -> bool { - let trait_doc = cdata.lookup_item(trait_id); - assert!(item_family(trait_doc) == Family::Trait); - let defaulted_doc = reader::get_doc(trait_doc, tag_defaulted_trait); - reader::doc_as_u8(defaulted_doc) != 0 -} + let external_codemap = self.root.codemap.decode(self); + + let imported_filemaps = external_codemap.map(|filemap_to_import| { + // Try to find an existing FileMap that can be reused for the filemap to + // be imported. A FileMap is reusable if it is exactly the same, just + // positioned at a different offset within the codemap. + let reusable_filemap = { + local_codemap.files + .borrow() + .iter() + .find(|fm| are_equal_modulo_startpos(&fm, &filemap_to_import)) + .map(|rc| rc.clone()) + }; + + match reusable_filemap { + Some(fm) => { + + debug!("CrateMetaData::imported_filemaps reuse \ + filemap {:?} original (start_pos {:?} end_pos {:?}) \ + translated (start_pos {:?} end_pos {:?})", + filemap_to_import.name, + filemap_to_import.start_pos, filemap_to_import.end_pos, + fm.start_pos, fm.end_pos); + + cstore::ImportedFileMap { + original_start_pos: filemap_to_import.start_pos, + original_end_pos: filemap_to_import.end_pos, + translated_filemap: fm, + } + } + None => { + // We can't reuse an existing FileMap, so allocate a new one + // containing the information we need. + let syntax_pos::FileMap { name, + abs_path, + start_pos, + end_pos, + lines, + multibyte_chars, + .. } = filemap_to_import; + + let source_length = (end_pos - start_pos).to_usize(); + + // Translate line-start positions and multibyte character + // position into frame of reference local to file. + // `CodeMap::new_imported_filemap()` will then translate those + // coordinates to their new global frame of reference when the + // offset of the FileMap is known. + let mut lines = lines.into_inner(); + for pos in &mut lines { + *pos = *pos - start_pos; + } + let mut multibyte_chars = multibyte_chars.into_inner(); + for mbc in &mut multibyte_chars { + mbc.pos = mbc.pos - start_pos; + } + + let local_version = local_codemap.new_imported_filemap(name, + abs_path, + source_length, + lines, + multibyte_chars); + debug!("CrateMetaData::imported_filemaps alloc \ + filemap {:?} original (start_pos {:?} end_pos {:?}) \ + translated (start_pos {:?} end_pos {:?})", + local_version.name, start_pos, end_pos, + local_version.start_pos, local_version.end_pos); + + cstore::ImportedFileMap { + original_start_pos: start_pos, + original_end_pos: end_pos, + translated_filemap: local_version, + } + } + } + }) + .collect(); -pub fn is_default_impl(cdata: Cmd, impl_id: DefIndex) -> bool { - let impl_doc = cdata.lookup_item(impl_id); - item_family(impl_doc) == Family::DefaultImpl + // This shouldn't borrow twice, but there is no way to downgrade RefMut to Ref. + *self.codemap_import_info.borrow_mut() = imported_filemaps; + self.codemap_import_info.borrow() + } } -pub fn get_imported_filemaps(metadata: &[u8]) -> Vec { - let crate_doc = rbml::Doc::new(metadata); - let cm_doc = reader::get_doc(crate_doc, tag_codemap); +fn are_equal_modulo_startpos(fm1: &syntax_pos::FileMap, fm2: &syntax_pos::FileMap) -> bool { + if fm1.byte_length() != fm2.byte_length() { + return false; + } - reader::tagged_docs(cm_doc, tag_codemap_filemap).map(|filemap_doc| { - let mut decoder = reader::Decoder::new(filemap_doc); - decoder.read_opaque(|opaque_decoder, _| { - Decodable::decode(opaque_decoder) - }).unwrap() - }).collect() -} + if fm1.name != fm2.name { + return false; + } -pub fn is_extern_fn(cdata: Cmd, id: DefIndex, tcx: &ty::ctxt) -> bool { - let item_doc = match cdata.get_item(id) { - Some(doc) => doc, - None => return false, - }; - if let Fn = item_family(item_doc) { - let ty::TypeScheme { generics, ty } = get_type(cdata, id, tcx); - generics.types.is_empty() && match ty.sty { - ty::TyBareFn(_, fn_ty) => fn_ty.abi != abi::Rust, - _ => false, + let lines1 = fm1.lines.borrow(); + let lines2 = fm2.lines.borrow(); + + if lines1.len() != lines2.len() { + return false; + } + + for (&line1, &line2) in lines1.iter().zip(lines2.iter()) { + if (line1 - fm1.start_pos) != (line2 - fm2.start_pos) { + return false; } - } else { - false } -} -pub fn closure_kind(cdata: Cmd, closure_id: DefIndex) -> ty::ClosureKind { - let closure_doc = cdata.lookup_item(closure_id); - let closure_kind_doc = reader::get_doc(closure_doc, tag_items_closure_kind); - let mut decoder = reader::Decoder::new(closure_kind_doc); - ty::ClosureKind::decode(&mut decoder).unwrap() -} + let multibytes1 = fm1.multibyte_chars.borrow(); + let multibytes2 = fm2.multibyte_chars.borrow(); -pub fn closure_ty<'tcx>(cdata: Cmd, closure_id: DefIndex, tcx: &ty::ctxt<'tcx>) - -> ty::ClosureTy<'tcx> { - let closure_doc = cdata.lookup_item(closure_id); - let closure_ty_doc = reader::get_doc(closure_doc, tag_items_closure_ty); - TyDecoder::with_doc(tcx, cdata.cnum, closure_ty_doc, &mut |did| translate_def_id(cdata, did)) - .parse_closure_ty() -} + if multibytes1.len() != multibytes2.len() { + return false; + } -fn def_key(item_doc: rbml::Doc) -> hir_map::DefKey { - match reader::maybe_get_doc(item_doc, tag_def_key) { - Some(def_key_doc) => { - let mut decoder = reader::Decoder::new(def_key_doc); - hir_map::DefKey::decode(&mut decoder).unwrap() - } - None => { - panic!("failed to find block with tag {:?} for item with family {:?}", - tag_def_key, - item_family(item_doc)) + for (mb1, mb2) in multibytes1.iter().zip(multibytes2.iter()) { + if (mb1.bytes != mb2.bytes) || ((mb1.pos - fm1.start_pos) != (mb2.pos - fm2.start_pos)) { + return false; } } -} -pub fn def_path(cdata: Cmd, id: DefIndex) -> hir_map::DefPath { - debug!("def_path(id={:?})", id); - hir_map::definitions::make_def_path(id, |parent| { - debug!("def_path: parent={:?}", parent); - let parent_doc = cdata.lookup_item(parent); - def_key(parent_doc) - }) + true } diff --git a/src/librustc_metadata/diagnostics.rs b/src/librustc_metadata/diagnostics.rs index 50b9ea5755086..6cf1a9e8a390d 100644 --- a/src/librustc_metadata/diagnostics.rs +++ b/src/librustc_metadata/diagnostics.rs @@ -14,36 +14,59 @@ register_long_diagnostics! { E0454: r##" A link name was given with an empty name. Erroneous code example: -``` +```compile_fail,E0454 #[link(name = "")] extern {} // error: #[link(name = "")] given with empty name ``` The rust compiler cannot link to an external library if you don't give it its name. Example: -``` +```ignore #[link(name = "some_lib")] extern {} // ok! ``` "##, +E0455: r##" +Linking with `kind=framework` is only supported when targeting OS X, +as frameworks are specific to that operating system. + +Erroneous code example: + +```ignore +#[link(name = "FooCoreServices", kind = "framework")] extern {} +// OS used to compile is Linux for example +``` + +To solve this error you can use conditional compilation: + +``` +#[cfg_attr(target="macos", link(name = "FooCoreServices", kind = "framework"))] +extern {} +``` + +See more: https://doc.rust-lang.org/book/conditional-compilation.html +"##, + E0458: r##" An unknown "kind" was specified for a link attribute. Erroneous code example: -``` +```compile_fail,E0458 #[link(kind = "wonderful_unicorn")] extern {} // error: unknown kind: `wonderful_unicorn` ``` Please specify a valid "kind" value, from one of the following: + * static * dylib * framework + "##, E0459: r##" A link was used without a name parameter. Erroneous code example: -``` +```compile_fail,E0459 #[link(kind = "dylib")] extern {} // error: #[link(...)] specified without `name = "foo"` ``` @@ -51,7 +74,7 @@ A link was used without a name parameter. Erroneous code example: Please add the name parameter to allow the rust compiler to find the library you want. Example: -``` +```ignore #[link(kind = "dylib", name = "some_lib")] extern {} // ok! ``` "##, @@ -59,7 +82,7 @@ you want. Example: E0463: r##" A plugin/crate was declared but cannot be found. Erroneous code example: -``` +```compile_fail,E0463 #![feature(plugin)] #![plugin(cookie_monster)] // error: can't find crate for `cookie_monster` extern crate cake_is_a_lie; // error: can't find crate for `cake_is_a_lie` @@ -73,7 +96,6 @@ well, and you link to them the same way. } register_diagnostics! { - E0455, // native frameworks are only available on OSX targets E0456, // plugin `..` is not available for triple `..` E0457, // plugin `..` only found in rlib format, but must be available... E0514, // metadata version mismatch @@ -82,9 +104,6 @@ register_diagnostics! { E0462, // found staticlib `..` instead of rlib or dylib E0464, // multiple matching crates for `..` E0465, // multiple .. candidates for `..` found - E0466, // bad macro import - E0467, // bad macro reexport - E0468, // an `extern crate` loading macros must be at the crate root - E0469, // imported macro not found - E0470, // reexported macro not found + E0519, // local crate and dependency have same (crate-name, disambiguator) + E0523, // two dependencies have same (crate-name, disambiguator) but different SVH } diff --git a/src/librustc_metadata/encoder.rs b/src/librustc_metadata/encoder.rs index ec70a610e0b3a..83904b24de328 100644 --- a/src/librustc_metadata/encoder.rs +++ b/src/librustc_metadata/encoder.rs @@ -8,2089 +8,1412 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// Metadata encoding - -#![allow(unused_must_use)] // everything is just a MemWriter, can't fail -#![allow(non_camel_case_types)] - -use common::*; use cstore; -use decoder; -use tyencode; -use index::{self, IndexData}; - -use middle::cstore::{LOCAL_CRATE, CrateStore, InlinedItemRef, LinkMeta, tls}; -use middle::def; -use middle::def_id::{CRATE_DEF_INDEX, DefId}; -use middle::dependency_format::Linkage; -use middle::stability; -use middle::subst; -use middle::ty::{self, Ty}; - -use rustc::back::svh::Svh; -use rustc::front::map::{LinkedPath, PathElem, PathElems}; -use rustc::front::map as ast_map; -use rustc::mir::repr::Mir; -use rustc::session::config; -use rustc::util::nodemap::{FnvHashMap, NodeMap, NodeSet}; - -use serialize::Encodable; -use std::cell::RefCell; +use index::Index; +use schema::*; + +use rustc::middle::cstore::{InlinedItemRef, LinkMeta}; +use rustc::middle::cstore::{LinkagePreference, NativeLibrary}; +use rustc::hir::def; +use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefIndex, DefId}; +use rustc::middle::dependency_format::Linkage; +use rustc::middle::lang_items; +use rustc::mir; +use rustc::traits::specialization_graph; +use rustc::ty::{self, Ty, TyCtxt}; + +use rustc::session::config::{self, CrateTypeProcMacro}; +use rustc::util::nodemap::{FxHashMap, NodeSet}; + +use rustc_serialize::{Encodable, Encoder, SpecializedEncoder, opaque}; +use std::hash::Hash; +use std::intrinsics; use std::io::prelude::*; -use std::io::{Cursor, SeekFrom}; +use std::io::Cursor; use std::rc::Rc; use std::u32; -use syntax::abi; -use syntax::ast::{self, NodeId, Name, CRATE_NODE_ID, CrateNum}; +use syntax::ast::{self, CRATE_NODE_ID}; use syntax::attr; -use syntax::attr::AttrMetaMethods; -use syntax::errors::Handler; -use syntax::parse::token::special_idents; -use syntax; -use rbml::writer::Encoder; - -use rustc_front::hir; -use rustc_front::intravisit::Visitor; -use rustc_front::intravisit; - -pub type EncodeInlinedItem<'a> = - Box; - -pub struct EncodeParams<'a, 'tcx: 'a> { - pub diag: &'a Handler, - pub tcx: &'a ty::ctxt<'tcx>, - pub reexports: &'a def::ExportMap, - pub item_symbols: &'a RefCell>, - pub link_meta: &'a LinkMeta, - pub cstore: &'a cstore::CStore, - pub encode_inlined_item: EncodeInlinedItem<'a>, - pub reachable: &'a NodeSet, - pub mir_map: &'a NodeMap>, -} +use syntax::symbol::Symbol; +use syntax_pos; -pub struct EncodeContext<'a, 'tcx: 'a> { - pub diag: &'a Handler, - pub tcx: &'a ty::ctxt<'tcx>, - pub reexports: &'a def::ExportMap, - pub item_symbols: &'a RefCell>, - pub link_meta: &'a LinkMeta, - pub cstore: &'a cstore::CStore, - pub encode_inlined_item: RefCell>, - pub type_abbrevs: tyencode::abbrev_map<'tcx>, - pub reachable: &'a NodeSet, - pub mir_map: &'a NodeMap>, -} +use rustc::hir::{self, PatKind}; +use rustc::hir::itemlikevisit::ItemLikeVisitor; +use rustc::hir::intravisit::{Visitor, NestedVisitorMap}; +use rustc::hir::intravisit; + +use super::index_builder::{FromId, IndexBuilder, Untracked}; -impl<'a, 'tcx> EncodeContext<'a,'tcx> { - fn local_id(&self, def_id: DefId) -> NodeId { - self.tcx.map.as_local_node_id(def_id).unwrap() +pub struct EncodeContext<'a, 'tcx: 'a> { + opaque: opaque::Encoder<'a>, + pub tcx: TyCtxt<'a, 'tcx, 'tcx>, + reexports: &'a def::ExportMap, + link_meta: &'a LinkMeta, + cstore: &'a cstore::CStore, + reachable: &'a NodeSet, + + lazy_state: LazyState, + type_shorthands: FxHashMap, usize>, + predicate_shorthands: FxHashMap, usize>, +} + +macro_rules! encoder_methods { + ($($name:ident($ty:ty);)*) => { + $(fn $name(&mut self, value: $ty) -> Result<(), Self::Error> { + self.opaque.$name(value) + })* } } -/// "interned" entries referenced by id -#[derive(PartialEq, Eq, Hash)] -pub enum XRef<'tcx> { Predicate(ty::Predicate<'tcx>) } - -struct CrateIndex<'tcx> { - items: IndexData, - xrefs: FnvHashMap, u32>, // sequentially-assigned -} +impl<'a, 'tcx> Encoder for EncodeContext<'a, 'tcx> { + type Error = as Encoder>::Error; -impl<'tcx> CrateIndex<'tcx> { - fn record(&mut self, id: DefId, rbml_w: &mut Encoder) { - let position = rbml_w.mark_stable_position(); - self.items.record(id, position); + fn emit_nil(&mut self) -> Result<(), Self::Error> { + Ok(()) } - fn add_xref(&mut self, xref: XRef<'tcx>) -> u32 { - let old_len = self.xrefs.len() as u32; - *self.xrefs.entry(xref).or_insert(old_len) + encoder_methods! { + emit_usize(usize); + emit_u64(u64); + emit_u32(u32); + emit_u16(u16); + emit_u8(u8); + + emit_isize(isize); + emit_i64(i64); + emit_i32(i32); + emit_i16(i16); + emit_i8(i8); + + emit_bool(bool); + emit_f64(f64); + emit_f32(f32); + emit_char(char); + emit_str(&str); } } -fn encode_name(rbml_w: &mut Encoder, name: Name) { - rbml_w.wr_tagged_str(tag_paths_data_name, &name.as_str()); -} - -fn encode_def_id(rbml_w: &mut Encoder, id: DefId) { - rbml_w.wr_tagged_u64(tag_def_id, def_to_u64(id)); -} - -/// For every DefId that we create a metadata item for, we include a -/// serialized copy of its DefKey, which allows us to recreate a path. -fn encode_def_id_and_key(ecx: &EncodeContext, - rbml_w: &mut Encoder, - def_id: DefId) -{ - encode_def_id(rbml_w, def_id); - encode_def_key(ecx, rbml_w, def_id); -} - -fn encode_def_key(ecx: &EncodeContext, - rbml_w: &mut Encoder, - def_id: DefId) -{ - rbml_w.start_tag(tag_def_key); - let def_key = ecx.tcx.map.def_key(def_id); - def_key.encode(rbml_w); - rbml_w.end_tag(); -} - -fn encode_trait_ref<'a, 'tcx>(rbml_w: &mut Encoder, - ecx: &EncodeContext<'a, 'tcx>, - trait_ref: ty::TraitRef<'tcx>, - tag: usize) { - rbml_w.start_tag(tag); - tyencode::enc_trait_ref(rbml_w.writer, &ecx.ty_str_ctxt(), trait_ref); - rbml_w.mark_stable_position(); - rbml_w.end_tag(); -} - -// Item info table encoding -fn encode_family(rbml_w: &mut Encoder, c: char) { - rbml_w.wr_tagged_u8(tag_items_data_item_family, c as u8); -} - -pub fn def_to_u64(did: DefId) -> u64 { - assert!(did.index.as_u32() < u32::MAX); - (did.krate as u64) << 32 | (did.index.as_usize() as u64) -} - -pub fn def_to_string(did: DefId) -> String { - format!("{}:{}", did.krate, did.index.as_usize()) -} - -fn encode_item_variances(rbml_w: &mut Encoder, - ecx: &EncodeContext, - id: NodeId) { - let v = ecx.tcx.item_variances(ecx.tcx.map.local_def_id(id)); - rbml_w.start_tag(tag_item_variances); - v.encode(rbml_w); - rbml_w.end_tag(); -} - -fn encode_bounds_and_type_for_item<'a, 'tcx>(rbml_w: &mut Encoder, - ecx: &EncodeContext<'a, 'tcx>, - index: &mut CrateIndex<'tcx>, - id: NodeId) { - encode_bounds_and_type(rbml_w, - ecx, - index, - &ecx.tcx.lookup_item_type(ecx.tcx.map.local_def_id(id)), - &ecx.tcx.lookup_predicates(ecx.tcx.map.local_def_id(id))); -} - -fn encode_bounds_and_type<'a, 'tcx>(rbml_w: &mut Encoder, - ecx: &EncodeContext<'a, 'tcx>, - index: &mut CrateIndex<'tcx>, - scheme: &ty::TypeScheme<'tcx>, - predicates: &ty::GenericPredicates<'tcx>) { - encode_generics(rbml_w, ecx, index, - &scheme.generics, &predicates, tag_item_generics); - encode_type(ecx, rbml_w, scheme.ty); -} - -fn encode_variant_id(rbml_w: &mut Encoder, vid: DefId) { - let id = def_to_u64(vid); - rbml_w.wr_tagged_u64(tag_items_data_item_variant, id); - rbml_w.wr_tagged_u64(tag_mod_child, id); -} - -fn write_closure_type<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - closure_type: &ty::ClosureTy<'tcx>) { - tyencode::enc_closure_ty(rbml_w.writer, &ecx.ty_str_ctxt(), closure_type); - rbml_w.mark_stable_position(); -} - -fn encode_type<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - typ: Ty<'tcx>) { - rbml_w.start_tag(tag_items_data_item_type); - tyencode::enc_ty(rbml_w.writer, &ecx.ty_str_ctxt(), typ); - rbml_w.mark_stable_position(); - rbml_w.end_tag(); -} - -fn encode_region(ecx: &EncodeContext, - rbml_w: &mut Encoder, - r: ty::Region) { - rbml_w.start_tag(tag_items_data_region); - tyencode::enc_region(rbml_w.writer, &ecx.ty_str_ctxt(), r); - rbml_w.mark_stable_position(); - rbml_w.end_tag(); +impl<'a, 'tcx, T> SpecializedEncoder> for EncodeContext<'a, 'tcx> { + fn specialized_encode(&mut self, lazy: &Lazy) -> Result<(), Self::Error> { + self.emit_lazy_distance(lazy.position, Lazy::::min_size()) + } } -fn encode_symbol(ecx: &EncodeContext, - rbml_w: &mut Encoder, - id: NodeId) { - match ecx.item_symbols.borrow().get(&id) { - Some(x) => { - debug!("encode_symbol(id={}, str={})", id, *x); - rbml_w.wr_tagged_str(tag_items_data_item_symbol, x); - } - None => { - ecx.diag.bug(&format!("encode_symbol: id not found {}", id)); +impl<'a, 'tcx, T> SpecializedEncoder> for EncodeContext<'a, 'tcx> { + fn specialized_encode(&mut self, seq: &LazySeq) -> Result<(), Self::Error> { + self.emit_usize(seq.len)?; + if seq.len == 0 { + return Ok(()); } + self.emit_lazy_distance(seq.position, LazySeq::::min_size(seq.len)) } } -fn encode_disr_val(_: &EncodeContext, - rbml_w: &mut Encoder, - disr_val: ty::Disr) { - rbml_w.wr_tagged_str(tag_disr_val, &disr_val.to_string()); -} - -fn encode_parent_item(rbml_w: &mut Encoder, id: DefId) { - rbml_w.wr_tagged_u64(tag_items_data_parent_item, def_to_u64(id)); +impl<'a, 'tcx> SpecializedEncoder> for EncodeContext<'a, 'tcx> { + fn specialized_encode(&mut self, ty: &Ty<'tcx>) -> Result<(), Self::Error> { + self.encode_with_shorthand(ty, &ty.sty, |ecx| &mut ecx.type_shorthands) + } } -fn encode_struct_fields(rbml_w: &mut Encoder, - variant: ty::VariantDef) { - for f in &variant.fields { - if f.name == special_idents::unnamed_field.name { - rbml_w.start_tag(tag_item_unnamed_field); - } else { - rbml_w.start_tag(tag_item_field); - encode_name(rbml_w, f.name); +impl<'a, 'tcx> SpecializedEncoder> for EncodeContext<'a, 'tcx> { + fn specialized_encode(&mut self, + predicates: &ty::GenericPredicates<'tcx>) + -> Result<(), Self::Error> { + predicates.parent.encode(self)?; + predicates.predicates.len().encode(self)?; + for predicate in &predicates.predicates { + self.encode_with_shorthand(predicate, predicate, |ecx| &mut ecx.predicate_shorthands)? } - encode_struct_field_family(rbml_w, f.vis); - encode_def_id(rbml_w, f.did); - rbml_w.end_tag(); + Ok(()) } } -fn encode_enum_variant_info<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - id: NodeId, - vis: hir::Visibility, - index: &mut CrateIndex<'tcx>) { - debug!("encode_enum_variant_info(id={})", id); - - let mut disr_val = 0; - let def = ecx.tcx.lookup_adt_def(ecx.tcx.map.local_def_id(id)); - for variant in &def.variants { - let vid = variant.did; - let variant_node_id = ecx.local_id(vid); - - if let ty::VariantKind::Struct = variant.kind() { - // tuple-like enum variant fields aren't really items so - // don't try to encode them. - for field in &variant.fields { - encode_field(ecx, rbml_w, field, index); - } - } - - index.record(vid, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, vid); - encode_family(rbml_w, match variant.kind() { - ty::VariantKind::Struct => 'V', - ty::VariantKind::Tuple => 'v', - ty::VariantKind::Unit => 'w', - }); - encode_name(rbml_w, variant.name); - encode_parent_item(rbml_w, ecx.tcx.map.local_def_id(id)); - encode_visibility(rbml_w, vis); - - let attrs = ecx.tcx.get_attrs(vid); - encode_attributes(rbml_w, &attrs); - encode_repr_attrs(rbml_w, ecx, &attrs); - - let stab = stability::lookup_stability(ecx.tcx, vid); - let depr = stability::lookup_deprecation(ecx.tcx, vid); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - - encode_struct_fields(rbml_w, variant); - - let specified_disr_val = variant.disr_val; - if specified_disr_val != disr_val { - encode_disr_val(ecx, rbml_w, specified_disr_val); - disr_val = specified_disr_val; - } - encode_bounds_and_type_for_item(rbml_w, ecx, index, variant_node_id); - - ecx.tcx.map.with_path(variant_node_id, |path| encode_path(rbml_w, path)); - rbml_w.end_tag(); - disr_val = disr_val.wrapping_add(1); +impl<'a, 'tcx> EncodeContext<'a, 'tcx> { + pub fn position(&self) -> usize { + self.opaque.position() } -} -fn encode_path>(rbml_w: &mut Encoder, path: PI) { - let path = path.collect::>(); - rbml_w.start_tag(tag_path); - rbml_w.wr_tagged_u32(tag_path_len, path.len() as u32); - for pe in &path { - let tag = match *pe { - ast_map::PathMod(_) => tag_path_elem_mod, - ast_map::PathName(_) => tag_path_elem_name - }; - rbml_w.wr_tagged_str(tag, &pe.name().as_str()); + fn emit_node R, R>(&mut self, f: F) -> R { + assert_eq!(self.lazy_state, LazyState::NoNode); + let pos = self.position(); + self.lazy_state = LazyState::NodeStart(pos); + let r = f(self, pos); + self.lazy_state = LazyState::NoNode; + r } - rbml_w.end_tag(); -} -/// Iterates through "auxiliary node IDs", which are node IDs that describe -/// top-level items that are sub-items of the given item. Specifically: -/// -/// * For newtype structs, iterates through the node ID of the constructor. -fn each_auxiliary_node_id(item: &hir::Item, callback: F) -> bool where - F: FnOnce(NodeId) -> bool, -{ - let mut continue_ = true; - match item.node { - hir::ItemStruct(ref struct_def, _) => { - // If this is a newtype struct, return the constructor. - if struct_def.is_tuple() { - continue_ = callback(struct_def.id()); + fn emit_lazy_distance(&mut self, + position: usize, + min_size: usize) + -> Result<(), ::Error> { + let min_end = position + min_size; + let distance = match self.lazy_state { + LazyState::NoNode => bug!("emit_lazy_distance: outside of a metadata node"), + LazyState::NodeStart(start) => { + assert!(min_end <= start); + start - min_end } - } - _ => {} + LazyState::Previous(last_min_end) => { + assert!(last_min_end <= position); + position - last_min_end + } + }; + self.lazy_state = LazyState::Previous(min_end); + self.emit_usize(distance) } - continue_ -} + pub fn lazy(&mut self, value: &T) -> Lazy { + self.emit_node(|ecx, pos| { + value.encode(ecx).unwrap(); -fn encode_reexports(ecx: &EncodeContext, - rbml_w: &mut Encoder, - id: NodeId) { - debug!("(encoding info for module) encoding reexports for {}", id); - match ecx.reexports.get(&id) { - Some(exports) => { - debug!("(encoding info for module) found reexports for {}", id); - for exp in exports { - debug!("(encoding info for module) reexport '{}' ({:?}) for \ - {}", - exp.name, - exp.def_id, - id); - rbml_w.start_tag(tag_items_data_item_reexport); - rbml_w.wr_tagged_u64(tag_items_data_item_reexport_def_id, - def_to_u64(exp.def_id)); - rbml_w.wr_tagged_str(tag_items_data_item_reexport_name, - &exp.name.as_str()); - rbml_w.end_tag(); - } - }, - None => debug!("(encoding info for module) found no reexports for {}", id), + assert!(pos + Lazy::::min_size() <= ecx.position()); + Lazy::with_position(pos) + }) } -} -fn encode_info_for_mod(ecx: &EncodeContext, - rbml_w: &mut Encoder, - md: &hir::Mod, - attrs: &[ast::Attribute], - id: NodeId, - path: PathElems, - name: Name, - vis: hir::Visibility) { - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, ecx.tcx.map.local_def_id(id)); - encode_family(rbml_w, 'm'); - encode_name(rbml_w, name); - debug!("(encoding info for module) encoding info for module ID {}", id); - - // Encode info about all the module children. - for item_id in &md.item_ids { - rbml_w.wr_tagged_u64(tag_mod_child, - def_to_u64(ecx.tcx.map.local_def_id(item_id.id))); - - let item = ecx.tcx.map.expect_item(item_id.id); - each_auxiliary_node_id(item, |auxiliary_node_id| { - rbml_w.wr_tagged_u64(tag_mod_child, - def_to_u64(ecx.tcx.map.local_def_id(auxiliary_node_id))); - true - }); - } + fn lazy_seq(&mut self, iter: I) -> LazySeq + where I: IntoIterator, + T: Encodable + { + self.emit_node(|ecx, pos| { + let len = iter.into_iter().map(|value| value.encode(ecx).unwrap()).count(); - encode_path(rbml_w, path.clone()); - encode_visibility(rbml_w, vis); + assert!(pos + LazySeq::::min_size(len) <= ecx.position()); + LazySeq::with_position_and_length(pos, len) + }) + } - let stab = stability::lookup_stability(ecx.tcx, ecx.tcx.map.local_def_id(id)); - let depr = stability::lookup_deprecation(ecx.tcx, ecx.tcx.map.local_def_id(id)); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); + fn lazy_seq_ref<'b, I, T>(&mut self, iter: I) -> LazySeq + where I: IntoIterator, + T: 'b + Encodable + { + self.emit_node(|ecx, pos| { + let len = iter.into_iter().map(|value| value.encode(ecx).unwrap()).count(); - // Encode the reexports of this module, if this module is public. - if vis == hir::Public { - debug!("(encoding info for module) encoding reexports for {}", id); - encode_reexports(ecx, rbml_w, id); + assert!(pos + LazySeq::::min_size(len) <= ecx.position()); + LazySeq::with_position_and_length(pos, len) + }) } - encode_attributes(rbml_w, attrs); - rbml_w.end_tag(); -} + /// Encode the given value or a previously cached shorthand. + fn encode_with_shorthand(&mut self, + value: &T, + variant: &U, + map: M) + -> Result<(), ::Error> + where M: for<'b> Fn(&'b mut Self) -> &'b mut FxHashMap, + T: Clone + Eq + Hash, + U: Encodable + { + let existing_shorthand = map(self).get(value).cloned(); + if let Some(shorthand) = existing_shorthand { + return self.emit_usize(shorthand); + } -fn encode_struct_field_family(rbml_w: &mut Encoder, - visibility: hir::Visibility) { - encode_family(rbml_w, match visibility { - hir::Public => 'g', - hir::Inherited => 'N' - }); -} + let start = self.position(); + variant.encode(self)?; + let len = self.position() - start; -fn encode_visibility(rbml_w: &mut Encoder, visibility: hir::Visibility) { - let ch = match visibility { - hir::Public => 'y', - hir::Inherited => 'i', - }; - rbml_w.wr_tagged_u8(tag_items_data_item_visibility, ch as u8); -} + // The shorthand encoding uses the same usize as the + // discriminant, with an offset so they can't conflict. + let discriminant = unsafe { intrinsics::discriminant_value(variant) }; + assert!(discriminant < SHORTHAND_OFFSET as u64); + let shorthand = start + SHORTHAND_OFFSET; -fn encode_constness(rbml_w: &mut Encoder, constness: hir::Constness) { - rbml_w.start_tag(tag_items_data_item_constness); - let ch = match constness { - hir::Constness::Const => 'c', - hir::Constness::NotConst => 'n', - }; - rbml_w.wr_str(&ch.to_string()); - rbml_w.end_tag(); -} + // Get the number of bits that leb128 could fit + // in the same space as the fully encoded type. + let leb128_bits = len * 7; -fn encode_explicit_self(rbml_w: &mut Encoder, - explicit_self: &ty::ExplicitSelfCategory) { - let tag = tag_item_trait_method_explicit_self; - - // Encode the base self type. - match *explicit_self { - ty::ExplicitSelfCategory::Static => { - rbml_w.wr_tagged_bytes(tag, &['s' as u8]); - } - ty::ExplicitSelfCategory::ByValue => { - rbml_w.wr_tagged_bytes(tag, &['v' as u8]); + // Check that the shorthand is a not longer than the + // full encoding itself, i.e. it's an obvious win. + if leb128_bits >= 64 || (shorthand as u64) < (1 << leb128_bits) { + map(self).insert(value.clone(), shorthand); } - ty::ExplicitSelfCategory::ByBox => { - rbml_w.wr_tagged_bytes(tag, &['~' as u8]); - } - ty::ExplicitSelfCategory::ByReference(_, m) => { - // FIXME(#4846) encode custom lifetime - let ch = encode_mutability(m); - rbml_w.wr_tagged_bytes(tag, &['&' as u8, ch]); - } - } - fn encode_mutability(m: hir::Mutability) -> u8 { - match m { - hir::MutImmutable => 'i' as u8, - hir::MutMutable => 'm' as u8, - } + Ok(()) } -} -fn encode_item_sort(rbml_w: &mut Encoder, sort: char) { - rbml_w.wr_tagged_u8(tag_item_trait_item_sort, sort as u8); -} - -fn encode_field<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - field: ty::FieldDef<'tcx>, - index: &mut CrateIndex<'tcx>) { - let nm = field.name; - let id = ecx.local_id(field.did); - - index.record(field.did, rbml_w); - rbml_w.start_tag(tag_items_data_item); - debug!("encode_field: encoding {} {}", nm, id); - encode_struct_field_family(rbml_w, field.vis); - encode_name(rbml_w, nm); - encode_bounds_and_type_for_item(rbml_w, ecx, index, id); - encode_def_id_and_key(ecx, rbml_w, field.did); - - let stab = stability::lookup_stability(ecx.tcx, field.did); - let depr = stability::lookup_deprecation(ecx.tcx, field.did); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - - rbml_w.end_tag(); -} - -fn encode_info_for_struct_ctor<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - name: Name, - ctor_id: NodeId, - index: &mut CrateIndex<'tcx>, - struct_id: NodeId) { - let ctor_def_id = ecx.tcx.map.local_def_id(ctor_id); - - index.record(ctor_def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, ctor_def_id); - encode_family(rbml_w, 'o'); - encode_bounds_and_type_for_item(rbml_w, ecx, index, ctor_id); - encode_name(rbml_w, name); - ecx.tcx.map.with_path(ctor_id, |path| encode_path(rbml_w, path)); - encode_parent_item(rbml_w, ecx.tcx.map.local_def_id(struct_id)); - - if ecx.item_symbols.borrow().contains_key(&ctor_id) { - encode_symbol(ecx, rbml_w, ctor_id); + /// For every DefId that we create a metadata item for, we include a + /// serialized copy of its DefKey, which allows us to recreate a path. + fn encode_def_key(&mut self, def_id: DefId) -> Lazy { + let tcx = self.tcx; + self.lazy(&tcx.map.def_key(def_id)) } - let stab = stability::lookup_stability(ecx.tcx, ecx.tcx.map.local_def_id(ctor_id)); - let depr= stability::lookup_deprecation(ecx.tcx, ecx.tcx.map.local_def_id(ctor_id)); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - - // indicate that this is a tuple struct ctor, because downstream users will normally want - // the tuple struct definition, but without this there is no way for them to tell that - // they actually have a ctor rather than a normal function - rbml_w.wr_tagged_bytes(tag_items_data_item_is_tuple_struct_ctor, &[]); - - rbml_w.end_tag(); -} - -fn encode_generics<'a, 'tcx>(rbml_w: &mut Encoder, - ecx: &EncodeContext<'a, 'tcx>, - index: &mut CrateIndex<'tcx>, - generics: &ty::Generics<'tcx>, - predicates: &ty::GenericPredicates<'tcx>, - tag: usize) -{ - rbml_w.start_tag(tag); - - for param in &generics.types { - rbml_w.start_tag(tag_type_param_def); - tyencode::enc_type_param_def(rbml_w.writer, &ecx.ty_str_ctxt(), param); - rbml_w.mark_stable_position(); - rbml_w.end_tag(); + fn encode_item_variances(&mut self, def_id: DefId) -> LazySeq { + let tcx = self.tcx; + self.lazy_seq(tcx.item_variances(def_id).iter().cloned()) } - // Region parameters - for param in &generics.regions { - rbml_w.start_tag(tag_region_param_def); - - rbml_w.start_tag(tag_region_param_def_ident); - encode_name(rbml_w, param.name); - rbml_w.end_tag(); - - rbml_w.wr_tagged_u64(tag_region_param_def_def_id, - def_to_u64(param.def_id)); - - rbml_w.wr_tagged_u64(tag_region_param_def_space, - param.space.to_uint() as u64); + fn encode_item_type(&mut self, def_id: DefId) -> Lazy> { + let tcx = self.tcx; + self.lazy(&tcx.item_type(def_id)) + } - rbml_w.wr_tagged_u64(tag_region_param_def_index, - param.index as u64); + /// Encode data for the given variant of the given ADT. The + /// index of the variant is untracked: this is ok because we + /// will have to lookup the adt-def by its id, and that gives us + /// the right to access any information in the adt-def (including, + /// e.g., the length of the various vectors). + fn encode_enum_variant_info(&mut self, + (enum_did, Untracked(index)): (DefId, Untracked)) + -> Entry<'tcx> { + let tcx = self.tcx; + let def = tcx.lookup_adt_def(enum_did); + let variant = &def.variants[index]; + let def_id = variant.did; + + let data = VariantData { + ctor_kind: variant.ctor_kind, + disr: variant.disr_val.to_u64_unchecked(), + struct_ctor: None, + }; - for &bound_region in ¶m.bounds { - encode_region(ecx, rbml_w, bound_region); + let enum_id = tcx.map.as_local_node_id(enum_did).unwrap(); + let enum_vis = &tcx.map.expect_item(enum_id).vis; + + Entry { + kind: EntryKind::Variant(self.lazy(&data)), + visibility: enum_vis.simplify(), + span: self.lazy(&tcx.def_span(def_id)), + def_key: self.encode_def_key(def_id), + attributes: self.encode_attributes(&tcx.get_attrs(def_id)), + children: self.lazy_seq(variant.fields.iter().map(|f| { + assert!(f.did.is_local()); + f.did.index + })), + stability: self.encode_stability(def_id), + deprecation: self.encode_deprecation(def_id), + + ty: Some(self.encode_item_type(def_id)), + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: Some(self.encode_generics(def_id)), + predicates: Some(self.encode_predicates(def_id)), + + ast: None, + mir: None, } - - rbml_w.end_tag(); } - encode_predicates_in_current_doc(rbml_w, ecx, index, predicates); - - rbml_w.end_tag(); -} - -fn encode_predicates_in_current_doc<'a,'tcx>(rbml_w: &mut Encoder, - _ecx: &EncodeContext<'a,'tcx>, - index: &mut CrateIndex<'tcx>, - predicates: &ty::GenericPredicates<'tcx>) -{ - for (space, _, predicate) in predicates.predicates.iter_enumerated() { - let tag = match space { - subst::TypeSpace => tag_type_predicate, - subst::SelfSpace => tag_self_predicate, - subst::FnSpace => tag_fn_predicate + fn encode_info_for_mod(&mut self, + FromId(id, (md, attrs, vis)): FromId<(&hir::Mod, + &[ast::Attribute], + &hir::Visibility)>) + -> Entry<'tcx> { + let tcx = self.tcx; + let def_id = tcx.map.local_def_id(id); + + let data = ModData { + reexports: match self.reexports.get(&id) { + Some(exports) if *vis == hir::Public => self.lazy_seq_ref(exports), + _ => LazySeq::empty(), + }, }; - rbml_w.wr_tagged_u32(tag, - index.add_xref(XRef::Predicate(predicate.clone()))); + Entry { + kind: EntryKind::Mod(self.lazy(&data)), + visibility: vis.simplify(), + span: self.lazy(&md.inner), + def_key: self.encode_def_key(def_id), + attributes: self.encode_attributes(attrs), + children: self.lazy_seq(md.item_ids.iter().map(|item_id| { + tcx.map.local_def_id(item_id.id).index + })), + stability: self.encode_stability(def_id), + deprecation: self.encode_deprecation(def_id), + + ty: None, + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: None, + predicates: None, + + ast: None, + mir: None + } } } -fn encode_predicates<'a,'tcx>(rbml_w: &mut Encoder, - ecx: &EncodeContext<'a,'tcx>, - index: &mut CrateIndex<'tcx>, - predicates: &ty::GenericPredicates<'tcx>, - tag: usize) -{ - rbml_w.start_tag(tag); - encode_predicates_in_current_doc(rbml_w, ecx, index, predicates); - rbml_w.end_tag(); +trait Visibility { + fn simplify(&self) -> ty::Visibility; } -fn encode_method_ty_fields<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - index: &mut CrateIndex<'tcx>, - method_ty: &ty::Method<'tcx>) { - encode_def_id_and_key(ecx, rbml_w, method_ty.def_id); - encode_name(rbml_w, method_ty.name); - encode_generics(rbml_w, ecx, index, - &method_ty.generics, &method_ty.predicates, - tag_method_ty_generics); - encode_visibility(rbml_w, method_ty.vis); - encode_explicit_self(rbml_w, &method_ty.explicit_self); - match method_ty.explicit_self { - ty::ExplicitSelfCategory::Static => { - encode_family(rbml_w, STATIC_METHOD_FAMILY); +impl Visibility for hir::Visibility { + fn simplify(&self) -> ty::Visibility { + if *self == hir::Public { + ty::Visibility::Public + } else { + ty::Visibility::PrivateExternal } - _ => encode_family(rbml_w, METHOD_FAMILY) } } -fn encode_info_for_associated_const<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - index: &mut CrateIndex<'tcx>, - associated_const: &ty::AssociatedConst, - impl_path: PathElems, - parent_id: NodeId, - impl_item_opt: Option<&hir::ImplItem>) { - debug!("encode_info_for_associated_const({:?},{:?})", - associated_const.def_id, - associated_const.name); - - index.record(associated_const.def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - - encode_def_id_and_key(ecx, rbml_w, associated_const.def_id); - encode_name(rbml_w, associated_const.name); - encode_visibility(rbml_w, associated_const.vis); - encode_family(rbml_w, 'C'); - - encode_parent_item(rbml_w, ecx.tcx.map.local_def_id(parent_id)); - encode_item_sort(rbml_w, 'C'); - - encode_bounds_and_type_for_item(rbml_w, ecx, index, - ecx.local_id(associated_const.def_id)); - - let stab = stability::lookup_stability(ecx.tcx, associated_const.def_id); - let depr = stability::lookup_deprecation(ecx.tcx, associated_const.def_id); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - - let elem = ast_map::PathName(associated_const.name); - encode_path(rbml_w, impl_path.chain(Some(elem))); - - if let Some(ii) = impl_item_opt { - encode_attributes(rbml_w, &ii.attrs); - encode_inlined_item(ecx, - rbml_w, - InlinedItemRef::ImplItem(ecx.tcx.map.local_def_id(parent_id), - ii)); +impl Visibility for ty::Visibility { + fn simplify(&self) -> ty::Visibility { + if *self == ty::Visibility::Public { + ty::Visibility::Public + } else { + ty::Visibility::PrivateExternal + } } - - rbml_w.end_tag(); } -fn encode_info_for_method<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - index: &mut CrateIndex<'tcx>, - m: &ty::Method<'tcx>, - impl_path: PathElems, - is_default_impl: bool, - parent_id: NodeId, - impl_item_opt: Option<&hir::ImplItem>) { - - debug!("encode_info_for_method: {:?} {:?}", m.def_id, - m.name); - index.record(m.def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - - encode_method_ty_fields(ecx, rbml_w, index, m); - encode_parent_item(rbml_w, ecx.tcx.map.local_def_id(parent_id)); - encode_item_sort(rbml_w, 'r'); - - let stab = stability::lookup_stability(ecx.tcx, m.def_id); - let depr = stability::lookup_deprecation(ecx.tcx, m.def_id); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - - let m_node_id = ecx.local_id(m.def_id); - encode_bounds_and_type_for_item(rbml_w, ecx, index, m_node_id); - - let elem = ast_map::PathName(m.name); - encode_path(rbml_w, impl_path.chain(Some(elem))); - if let Some(impl_item) = impl_item_opt { - if let hir::ImplItemKind::Method(ref sig, _) = impl_item.node { - encode_attributes(rbml_w, &impl_item.attrs); - let scheme = ecx.tcx.lookup_item_type(m.def_id); - let any_types = !scheme.generics.types.is_empty(); - let needs_inline = any_types || is_default_impl || - attr::requests_inline(&impl_item.attrs); - if needs_inline || sig.constness == hir::Constness::Const { - encode_inlined_item(ecx, - rbml_w, - InlinedItemRef::ImplItem(ecx.tcx.map.local_def_id(parent_id), - impl_item)); +impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> { + fn encode_fields(&mut self, adt_def_id: DefId) { + let def = self.tcx.lookup_adt_def(adt_def_id); + for (variant_index, variant) in def.variants.iter().enumerate() { + for (field_index, field) in variant.fields.iter().enumerate() { + self.record(field.did, + EncodeContext::encode_field, + (adt_def_id, Untracked((variant_index, field_index)))); } - encode_constness(rbml_w, sig.constness); - if !any_types { - let m_id = ecx.local_id(m.def_id); - encode_symbol(ecx, rbml_w, m_id); - } - encode_method_argument_names(rbml_w, &sig.decl); } } - - rbml_w.end_tag(); } -fn encode_info_for_associated_type<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - index: &mut CrateIndex<'tcx>, - associated_type: &ty::AssociatedType<'tcx>, - impl_path: PathElems, - parent_id: NodeId, - impl_item_opt: Option<&hir::ImplItem>) { - debug!("encode_info_for_associated_type({:?},{:?})", - associated_type.def_id, - associated_type.name); - - index.record(associated_type.def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - - encode_def_id_and_key(ecx, rbml_w, associated_type.def_id); - encode_name(rbml_w, associated_type.name); - encode_visibility(rbml_w, associated_type.vis); - encode_family(rbml_w, 'y'); - encode_parent_item(rbml_w, ecx.tcx.map.local_def_id(parent_id)); - encode_item_sort(rbml_w, 't'); - - let stab = stability::lookup_stability(ecx.tcx, associated_type.def_id); - let depr = stability::lookup_deprecation(ecx.tcx, associated_type.def_id); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - - let elem = ast_map::PathName(associated_type.name); - encode_path(rbml_w, impl_path.chain(Some(elem))); - - if let Some(ii) = impl_item_opt { - encode_attributes(rbml_w, &ii.attrs); - } else { - encode_predicates(rbml_w, ecx, index, - &ecx.tcx.lookup_predicates(associated_type.def_id), - tag_item_generics); +impl<'a, 'tcx> EncodeContext<'a, 'tcx> { + /// Encode data for the given field of the given variant of the + /// given ADT. The indices of the variant/field are untracked: + /// this is ok because we will have to lookup the adt-def by its + /// id, and that gives us the right to access any information in + /// the adt-def (including, e.g., the length of the various + /// vectors). + fn encode_field(&mut self, + (adt_def_id, Untracked((variant_index, field_index))): (DefId, + Untracked<(usize, + usize)>)) + -> Entry<'tcx> { + let tcx = self.tcx; + let variant = &tcx.lookup_adt_def(adt_def_id).variants[variant_index]; + let field = &variant.fields[field_index]; + + let def_id = field.did; + let variant_id = tcx.map.as_local_node_id(variant.did).unwrap(); + let variant_data = tcx.map.expect_variant_data(variant_id); + + Entry { + kind: EntryKind::Field, + visibility: field.vis.simplify(), + span: self.lazy(&tcx.def_span(def_id)), + def_key: self.encode_def_key(def_id), + attributes: self.encode_attributes(&variant_data.fields()[field_index].attrs), + children: LazySeq::empty(), + stability: self.encode_stability(def_id), + deprecation: self.encode_deprecation(def_id), + + ty: Some(self.encode_item_type(def_id)), + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: Some(self.encode_generics(def_id)), + predicates: Some(self.encode_predicates(def_id)), + + ast: None, + mir: None, + } } - if let Some(ty) = associated_type.ty { - encode_type(ecx, rbml_w, ty); - } + fn encode_struct_ctor(&mut self, (adt_def_id, def_id): (DefId, DefId)) -> Entry<'tcx> { + let tcx = self.tcx; + let variant = tcx.lookup_adt_def(adt_def_id).struct_variant(); - rbml_w.end_tag(); -} + let data = VariantData { + ctor_kind: variant.ctor_kind, + disr: variant.disr_val.to_u64_unchecked(), + struct_ctor: Some(def_id.index), + }; -fn encode_method_argument_names(rbml_w: &mut Encoder, - decl: &hir::FnDecl) { - rbml_w.start_tag(tag_method_argument_names); - for arg in &decl.inputs { - let tag = tag_method_argument_name; - if let hir::PatIdent(_, ref path1, _) = arg.pat.node { - let name = path1.node.name.as_str(); - rbml_w.wr_tagged_bytes(tag, name.as_bytes()); - } else { - rbml_w.wr_tagged_bytes(tag, &[]); + let struct_id = tcx.map.as_local_node_id(adt_def_id).unwrap(); + let struct_vis = &tcx.map.expect_item(struct_id).vis; + + Entry { + kind: EntryKind::Struct(self.lazy(&data)), + visibility: struct_vis.simplify(), + span: self.lazy(&tcx.def_span(def_id)), + def_key: self.encode_def_key(def_id), + attributes: LazySeq::empty(), + children: LazySeq::empty(), + stability: self.encode_stability(def_id), + deprecation: self.encode_deprecation(def_id), + + ty: Some(self.encode_item_type(def_id)), + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: Some(self.encode_generics(def_id)), + predicates: Some(self.encode_predicates(def_id)), + + ast: None, + mir: None, } } - rbml_w.end_tag(); -} -fn encode_repr_attrs(rbml_w: &mut Encoder, - ecx: &EncodeContext, - attrs: &[ast::Attribute]) { - let mut repr_attrs = Vec::new(); - for attr in attrs { - repr_attrs.extend(attr::find_repr_attrs(ecx.tcx.sess.diagnostic(), - attr)); + fn encode_generics(&mut self, def_id: DefId) -> Lazy> { + let tcx = self.tcx; + self.lazy(tcx.item_generics(def_id)) } - rbml_w.start_tag(tag_items_data_item_repr); - repr_attrs.encode(rbml_w); - rbml_w.end_tag(); -} - -fn encode_inlined_item(ecx: &EncodeContext, - rbml_w: &mut Encoder, - ii: InlinedItemRef) { - let mut eii = ecx.encode_inlined_item.borrow_mut(); - let eii: &mut EncodeInlinedItem = &mut *eii; - eii(ecx, rbml_w, ii); - - let node_id = match ii { - InlinedItemRef::Item(item) => item.id, - InlinedItemRef::TraitItem(_, trait_item) => trait_item.id, - InlinedItemRef::ImplItem(_, impl_item) => impl_item.id, - InlinedItemRef::Foreign(foreign_item) => foreign_item.id - }; - encode_mir(ecx, rbml_w, node_id); -} - -fn encode_mir(ecx: &EncodeContext, rbml_w: &mut Encoder, node_id: NodeId) { - if let Some(mir) = ecx.mir_map.get(&node_id) { - rbml_w.start_tag(tag_mir as usize); - rbml_w.emit_opaque(|opaque_encoder| { - tls::enter_encoding_context(ecx, opaque_encoder, |_, opaque_encoder| { - Encodable::encode(mir, opaque_encoder) - }) - }).unwrap(); - rbml_w.end_tag(); + fn encode_predicates(&mut self, def_id: DefId) -> Lazy> { + let tcx = self.tcx; + self.lazy(&tcx.item_predicates(def_id)) } -} -const FN_FAMILY: char = 'f'; -const STATIC_METHOD_FAMILY: char = 'F'; -const METHOD_FAMILY: char = 'h'; - -// Encodes the inherent implementations of a structure, enumeration, or trait. -fn encode_inherent_implementations(ecx: &EncodeContext, - rbml_w: &mut Encoder, - def_id: DefId) { - match ecx.tcx.inherent_impls.borrow().get(&def_id) { - None => {} - Some(implementations) => { - for &impl_def_id in implementations.iter() { - rbml_w.start_tag(tag_items_data_item_inherent_impl); - encode_def_id(rbml_w, impl_def_id); - rbml_w.end_tag(); - } - } - } -} + fn encode_info_for_trait_item(&mut self, def_id: DefId) -> Entry<'tcx> { + let tcx = self.tcx; -fn encode_stability(rbml_w: &mut Encoder, stab_opt: Option<&attr::Stability>) { - stab_opt.map(|stab| { - rbml_w.start_tag(tag_items_data_item_stability); - stab.encode(rbml_w).unwrap(); - rbml_w.end_tag(); - }); -} + let node_id = tcx.map.as_local_node_id(def_id).unwrap(); + let ast_item = tcx.map.expect_trait_item(node_id); + let trait_item = tcx.associated_item(def_id); -fn encode_deprecation(rbml_w: &mut Encoder, depr_opt: Option) { - depr_opt.map(|depr| { - rbml_w.start_tag(tag_items_data_item_deprecation); - depr.encode(rbml_w).unwrap(); - rbml_w.end_tag(); - }); -} + let container = match trait_item.defaultness { + hir::Defaultness::Default { has_value: true } => + AssociatedContainer::TraitWithDefault, + hir::Defaultness::Default { has_value: false } => + AssociatedContainer::TraitRequired, + hir::Defaultness::Final => + span_bug!(ast_item.span, "traits cannot have final items"), + }; -fn encode_xrefs<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - xrefs: FnvHashMap, u32>) -{ - let mut xref_positions = vec![0; xrefs.len()]; - rbml_w.start_tag(tag_xref_data); - for (xref, id) in xrefs.into_iter() { - xref_positions[id as usize] = rbml_w.mark_stable_position() as u32; - match xref { - XRef::Predicate(p) => { - tyencode::enc_predicate(rbml_w.writer, &ecx.ty_str_ctxt(), &p) + let kind = match trait_item.kind { + ty::AssociatedKind::Const => EntryKind::AssociatedConst(container), + ty::AssociatedKind::Method => { + let fn_data = if let hir::MethodTraitItem(ref sig, _) = ast_item.node { + FnData { + constness: hir::Constness::NotConst, + arg_names: self.encode_fn_arg_names(&sig.decl), + } + } else { + bug!() + }; + EntryKind::Method(self.lazy(&MethodData { + fn_data: fn_data, + container: container, + has_self: trait_item.method_has_self_argument, + })) } + ty::AssociatedKind::Type => EntryKind::AssociatedType(container), + }; + + Entry { + kind: kind, + visibility: trait_item.vis.simplify(), + span: self.lazy(&ast_item.span), + def_key: self.encode_def_key(def_id), + attributes: self.encode_attributes(&ast_item.attrs), + children: LazySeq::empty(), + stability: self.encode_stability(def_id), + deprecation: self.encode_deprecation(def_id), + + ty: match trait_item.kind { + ty::AssociatedKind::Const | + ty::AssociatedKind::Method => { + Some(self.encode_item_type(def_id)) + } + ty::AssociatedKind::Type => { + if trait_item.defaultness.has_value() { + Some(self.encode_item_type(def_id)) + } else { + None + } + } + }, + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: Some(self.encode_generics(def_id)), + predicates: Some(self.encode_predicates(def_id)), + + ast: if let hir::ConstTraitItem(_, Some(_)) = ast_item.node { + // We only save the HIR for associated consts with bodies + // (InlinedItemRef::from_trait_item panics otherwise) + let trait_def_id = trait_item.container.id(); + Some(self.encode_inlined_item( + InlinedItemRef::from_trait_item(trait_def_id, ast_item, tcx) + )) + } else { + None + }, + mir: self.encode_mir(def_id), } } - rbml_w.mark_stable_position(); - rbml_w.end_tag(); - rbml_w.start_tag(tag_xref_index); - index::write_dense_index(xref_positions, rbml_w.writer); - rbml_w.end_tag(); -} - -fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - item: &hir::Item, - index: &mut CrateIndex<'tcx>, - path: PathElems, - vis: hir::Visibility) { - let tcx = ecx.tcx; - - debug!("encoding info for item at {}", - tcx.sess.codemap().span_to_string(item.span)); - - let def_id = ecx.tcx.map.local_def_id(item.id); - let stab = stability::lookup_stability(tcx, ecx.tcx.map.local_def_id(item.id)); - let depr = stability::lookup_deprecation(tcx, ecx.tcx.map.local_def_id(item.id)); - - match item.node { - hir::ItemStatic(_, m, _) => { - index.record(def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - if m == hir::MutMutable { - encode_family(rbml_w, 'b'); - } else { - encode_family(rbml_w, 'c'); - } - encode_bounds_and_type_for_item(rbml_w, ecx, index, item.id); - encode_symbol(ecx, rbml_w, item.id); - encode_name(rbml_w, item.name); - encode_path(rbml_w, path); - encode_visibility(rbml_w, vis); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - encode_attributes(rbml_w, &item.attrs); - rbml_w.end_tag(); - } - hir::ItemConst(_, _) => { - index.record(def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_family(rbml_w, 'C'); - encode_bounds_and_type_for_item(rbml_w, ecx, index, item.id); - encode_name(rbml_w, item.name); - encode_path(rbml_w, path); - encode_attributes(rbml_w, &item.attrs); - encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item)); - encode_visibility(rbml_w, vis); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - rbml_w.end_tag(); - } - hir::ItemFn(ref decl, _, constness, _, ref generics, _) => { - index.record(def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_family(rbml_w, FN_FAMILY); - let tps_len = generics.ty_params.len(); - encode_bounds_and_type_for_item(rbml_w, ecx, index, item.id); - encode_name(rbml_w, item.name); - encode_path(rbml_w, path); - encode_attributes(rbml_w, &item.attrs); - let needs_inline = tps_len > 0 || attr::requests_inline(&item.attrs); - if needs_inline || constness == hir::Constness::Const { - encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item)); - } - if tps_len == 0 { - encode_symbol(ecx, rbml_w, item.id); - } - encode_constness(rbml_w, constness); - encode_visibility(rbml_w, vis); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - encode_method_argument_names(rbml_w, &**decl); - rbml_w.end_tag(); - } - hir::ItemMod(ref m) => { - index.record(def_id, rbml_w); - encode_info_for_mod(ecx, - rbml_w, - m, - &item.attrs, - item.id, - path, - item.name, - item.vis); - } - hir::ItemForeignMod(ref fm) => { - index.record(def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_family(rbml_w, 'n'); - encode_name(rbml_w, item.name); - encode_path(rbml_w, path); - - // Encode all the items in this module. - for foreign_item in &fm.items { - rbml_w.wr_tagged_u64(tag_mod_child, - def_to_u64(ecx.tcx.map.local_def_id(foreign_item.id))); - } - encode_visibility(rbml_w, vis); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - rbml_w.end_tag(); - } - hir::ItemTy(..) => { - index.record(def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_family(rbml_w, 'y'); - encode_bounds_and_type_for_item(rbml_w, ecx, index, item.id); - encode_name(rbml_w, item.name); - encode_path(rbml_w, path); - encode_visibility(rbml_w, vis); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - rbml_w.end_tag(); - } - hir::ItemEnum(ref enum_definition, _) => { - index.record(def_id, rbml_w); - - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_family(rbml_w, 't'); - encode_item_variances(rbml_w, ecx, item.id); - encode_bounds_and_type_for_item(rbml_w, ecx, index, item.id); - encode_name(rbml_w, item.name); - encode_attributes(rbml_w, &item.attrs); - encode_repr_attrs(rbml_w, ecx, &item.attrs); - for v in &enum_definition.variants { - encode_variant_id(rbml_w, ecx.tcx.map.local_def_id(v.node.data.id())); - } - encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item)); - encode_path(rbml_w, path); - - // Encode inherent implementations for this enumeration. - encode_inherent_implementations(ecx, rbml_w, def_id); - - encode_visibility(rbml_w, vis); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - rbml_w.end_tag(); - - encode_enum_variant_info(ecx, - rbml_w, - item.id, - vis, - index); - } - hir::ItemStruct(ref struct_def, _) => { - let def = ecx.tcx.lookup_adt_def(def_id); - let variant = def.struct_variant(); - - /* Index the class*/ - index.record(def_id, rbml_w); - - /* Now, make an item for the class itself */ - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_family(rbml_w, match *struct_def { - hir::VariantData::Struct(..) => 'S', - hir::VariantData::Tuple(..) => 's', - hir::VariantData::Unit(..) => 'u', - }); - encode_bounds_and_type_for_item(rbml_w, ecx, index, item.id); - - encode_item_variances(rbml_w, ecx, item.id); - encode_name(rbml_w, item.name); - encode_attributes(rbml_w, &item.attrs); - encode_path(rbml_w, path.clone()); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - encode_visibility(rbml_w, vis); - encode_repr_attrs(rbml_w, ecx, &item.attrs); - - /* Encode def_ids for each field and method - for methods, write all the stuff get_trait_method - needs to know*/ - encode_struct_fields(rbml_w, variant); - - encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item)); - - // Encode inherent implementations for this structure. - encode_inherent_implementations(ecx, rbml_w, def_id); - - if !struct_def.is_struct() { - let ctor_did = ecx.tcx.map.local_def_id(struct_def.id()); - rbml_w.wr_tagged_u64(tag_items_data_item_struct_ctor, - def_to_u64(ctor_did)); - } + fn encode_info_for_impl_item(&mut self, def_id: DefId) -> Entry<'tcx> { + let tcx = self.tcx; - rbml_w.end_tag(); + let node_id = self.tcx.map.as_local_node_id(def_id).unwrap(); + let ast_item = self.tcx.map.expect_impl_item(node_id); + let impl_item = self.tcx.associated_item(def_id); + let impl_def_id = impl_item.container.id(); - for field in &variant.fields { - encode_field(ecx, rbml_w, field, index); - } + let container = match impl_item.defaultness { + hir::Defaultness::Default { has_value: true } => AssociatedContainer::ImplDefault, + hir::Defaultness::Final => AssociatedContainer::ImplFinal, + hir::Defaultness::Default { has_value: false } => + span_bug!(ast_item.span, "impl items always have values (currently)"), + }; - // If this is a tuple-like struct, encode the type of the constructor. - if !struct_def.is_struct() { - encode_info_for_struct_ctor(ecx, rbml_w, item.name, struct_def.id(), index, item.id); - } - } - hir::ItemDefaultImpl(unsafety, _) => { - index.record(def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_family(rbml_w, 'd'); - encode_name(rbml_w, item.name); - encode_unsafety(rbml_w, unsafety); - - let trait_ref = tcx.impl_trait_ref(ecx.tcx.map.local_def_id(item.id)).unwrap(); - encode_trait_ref(rbml_w, ecx, trait_ref, tag_item_trait_ref); - rbml_w.end_tag(); - } - hir::ItemImpl(unsafety, polarity, _, _, _, ref ast_items) => { - // We need to encode information about the default methods we - // have inherited, so we drive this based on the impl structure. - let impl_items = tcx.impl_items.borrow(); - let items = impl_items.get(&def_id).unwrap(); - - index.record(def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_family(rbml_w, 'i'); - encode_bounds_and_type_for_item(rbml_w, ecx, index, item.id); - encode_name(rbml_w, item.name); - encode_attributes(rbml_w, &item.attrs); - encode_unsafety(rbml_w, unsafety); - encode_polarity(rbml_w, polarity); - - match tcx.custom_coerce_unsized_kinds.borrow().get(&ecx.tcx.map.local_def_id(item.id)) { - Some(&kind) => { - rbml_w.start_tag(tag_impl_coerce_unsized_kind); - kind.encode(rbml_w); - rbml_w.end_tag(); + let kind = match impl_item.kind { + ty::AssociatedKind::Const => EntryKind::AssociatedConst(container), + ty::AssociatedKind::Method => { + let fn_data = if let hir::ImplItemKind::Method(ref sig, _) = ast_item.node { + FnData { + constness: sig.constness, + arg_names: self.encode_fn_arg_names(&sig.decl), + } + } else { + bug!() + }; + EntryKind::Method(self.lazy(&MethodData { + fn_data: fn_data, + container: container, + has_self: impl_item.method_has_self_argument, + })) } - None => {} - } + ty::AssociatedKind::Type => EntryKind::AssociatedType(container) + }; - for &item_def_id in items { - rbml_w.start_tag(tag_item_impl_item); - match item_def_id { - ty::ConstTraitItemId(item_def_id) => { - encode_def_id(rbml_w, item_def_id); - encode_item_sort(rbml_w, 'C'); - } - ty::MethodTraitItemId(item_def_id) => { - encode_def_id(rbml_w, item_def_id); - encode_item_sort(rbml_w, 'r'); - } - ty::TypeTraitItemId(item_def_id) => { - encode_def_id(rbml_w, item_def_id); - encode_item_sort(rbml_w, 't'); - } - } - rbml_w.end_tag(); - } - if let Some(trait_ref) = tcx.impl_trait_ref(ecx.tcx.map.local_def_id(item.id)) { - encode_trait_ref(rbml_w, ecx, trait_ref, tag_item_trait_ref); - } - encode_path(rbml_w, path.clone()); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - rbml_w.end_tag(); - - // Iterate down the trait items, emitting them. We rely on the - // assumption that all of the actually implemented trait items - // appear first in the impl structure, in the same order they do - // in the ast. This is a little sketchy. - let num_implemented_methods = ast_items.len(); - for (i, &trait_item_def_id) in items.iter().enumerate() { - let ast_item = if i < num_implemented_methods { - Some(&ast_items[i]) + let (ast, mir) = if impl_item.kind == ty::AssociatedKind::Const { + (true, true) + } else if let hir::ImplItemKind::Method(ref sig, _) = ast_item.node { + let generics = self.tcx.item_generics(def_id); + let types = generics.parent_types as usize + generics.types.len(); + let needs_inline = types > 0 || attr::requests_inline(&ast_item.attrs); + let is_const_fn = sig.constness == hir::Constness::Const; + (is_const_fn, needs_inline || is_const_fn) + } else { + (false, false) + }; + + Entry { + kind: kind, + visibility: impl_item.vis.simplify(), + span: self.lazy(&ast_item.span), + def_key: self.encode_def_key(def_id), + attributes: self.encode_attributes(&ast_item.attrs), + children: LazySeq::empty(), + stability: self.encode_stability(def_id), + deprecation: self.encode_deprecation(def_id), + + ty: Some(self.encode_item_type(def_id)), + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: Some(self.encode_generics(def_id)), + predicates: Some(self.encode_predicates(def_id)), + + ast: if ast { + Some(self.encode_inlined_item( + InlinedItemRef::from_impl_item(impl_def_id, ast_item, tcx) + )) } else { None - }; - - match tcx.impl_or_trait_item(trait_item_def_id.def_id()) { - ty::ConstTraitItem(ref associated_const) => { - encode_info_for_associated_const(ecx, - rbml_w, - index, - &*associated_const, - path.clone(), - item.id, - ast_item) - } - ty::MethodTraitItem(ref method_type) => { - encode_info_for_method(ecx, - rbml_w, - index, - &**method_type, - path.clone(), - false, - item.id, - ast_item) - } - ty::TypeTraitItem(ref associated_type) => { - encode_info_for_associated_type(ecx, - rbml_w, - index, - &**associated_type, - path.clone(), - item.id, - ast_item) - } - } + }, + mir: if mir { self.encode_mir(def_id) } else { None }, } - } - hir::ItemTrait(_, _, _, ref ms) => { - index.record(def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_family(rbml_w, 'I'); - encode_item_variances(rbml_w, ecx, item.id); - let trait_def = tcx.lookup_trait_def(def_id); - let trait_predicates = tcx.lookup_predicates(def_id); - encode_unsafety(rbml_w, trait_def.unsafety); - encode_paren_sugar(rbml_w, trait_def.paren_sugar); - encode_defaulted(rbml_w, tcx.trait_has_default_impl(def_id)); - encode_associated_type_names(rbml_w, &trait_def.associated_type_names); - encode_generics(rbml_w, ecx, index, - &trait_def.generics, &trait_predicates, - tag_item_generics); - encode_predicates(rbml_w, ecx, index, - &tcx.lookup_super_predicates(def_id), - tag_item_super_predicates); - encode_trait_ref(rbml_w, ecx, trait_def.trait_ref, tag_item_trait_ref); - encode_name(rbml_w, item.name); - encode_attributes(rbml_w, &item.attrs); - encode_visibility(rbml_w, vis); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - for &method_def_id in tcx.trait_item_def_ids(def_id).iter() { - rbml_w.start_tag(tag_item_trait_item); - match method_def_id { - ty::ConstTraitItemId(const_def_id) => { - encode_def_id(rbml_w, const_def_id); - encode_item_sort(rbml_w, 'C'); - } - ty::MethodTraitItemId(method_def_id) => { - encode_def_id(rbml_w, method_def_id); - encode_item_sort(rbml_w, 'r'); - } - ty::TypeTraitItemId(type_def_id) => { - encode_def_id(rbml_w, type_def_id); - encode_item_sort(rbml_w, 't'); - } + } + + fn encode_fn_arg_names(&mut self, decl: &hir::FnDecl) -> LazySeq { + self.lazy_seq(decl.inputs.iter().map(|arg| { + if let PatKind::Binding(_, _, ref path1, _) = arg.pat.node { + path1.node + } else { + Symbol::intern("") } - rbml_w.end_tag(); + })) + } + + fn encode_mir(&mut self, def_id: DefId) -> Option>> { + self.tcx.mir_map.borrow().get(&def_id).map(|mir| self.lazy(&*mir.borrow())) + } - rbml_w.wr_tagged_u64(tag_mod_child, - def_to_u64(method_def_id.def_id())); + // Encodes the inherent implementations of a structure, enumeration, or trait. + fn encode_inherent_implementations(&mut self, def_id: DefId) -> LazySeq { + match self.tcx.inherent_impls.borrow().get(&def_id) { + None => LazySeq::empty(), + Some(implementations) => { + self.lazy_seq(implementations.iter().map(|&def_id| { + assert!(def_id.is_local()); + def_id.index + })) + } } - encode_path(rbml_w, path.clone()); + } - // Encode inherent implementations for this trait. - encode_inherent_implementations(ecx, rbml_w, def_id); + fn encode_stability(&mut self, def_id: DefId) -> Option> { + self.tcx.lookup_stability(def_id).map(|stab| self.lazy(stab)) + } - rbml_w.end_tag(); + fn encode_deprecation(&mut self, def_id: DefId) -> Option> { + self.tcx.lookup_deprecation(def_id).map(|depr| self.lazy(&depr)) + } - // Now output the trait item info for each trait item. - let r = tcx.trait_item_def_ids(def_id); - for (i, &item_def_id) in r.iter().enumerate() { - assert_eq!(item_def_id.def_id().krate, LOCAL_CRATE); + fn encode_info_for_item(&mut self, (def_id, item): (DefId, &'tcx hir::Item)) -> Entry<'tcx> { + let tcx = self.tcx; - index.record(item_def_id.def_id(), rbml_w); - rbml_w.start_tag(tag_items_data_item); + debug!("encoding info for item at {}", + tcx.sess.codemap().span_to_string(item.span)); - encode_parent_item(rbml_w, def_id); + let kind = match item.node { + hir::ItemStatic(_, hir::MutMutable, _) => EntryKind::MutStatic, + hir::ItemStatic(_, hir::MutImmutable, _) => EntryKind::ImmStatic, + hir::ItemConst(..) => EntryKind::Const, + hir::ItemFn(ref decl, _, constness, ..) => { + let data = FnData { + constness: constness, + arg_names: self.encode_fn_arg_names(&decl), + }; - let stab = stability::lookup_stability(tcx, item_def_id.def_id()); - let depr = stability::lookup_deprecation(tcx, item_def_id.def_id()); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); + EntryKind::Fn(self.lazy(&data)) + } + hir::ItemMod(ref m) => { + return self.encode_info_for_mod(FromId(item.id, (m, &item.attrs, &item.vis))); + } + hir::ItemForeignMod(_) => EntryKind::ForeignMod, + hir::ItemTy(..) => EntryKind::Type, + hir::ItemEnum(..) => EntryKind::Enum, + hir::ItemStruct(ref struct_def, _) => { + let variant = tcx.lookup_adt_def(def_id).struct_variant(); + + // Encode def_ids for each field and method + // for methods, write all the stuff get_trait_method + // needs to know + let struct_ctor = if !struct_def.is_struct() { + Some(tcx.map.local_def_id(struct_def.id()).index) + } else { + None + }; + EntryKind::Struct(self.lazy(&VariantData { + ctor_kind: variant.ctor_kind, + disr: variant.disr_val.to_u64_unchecked(), + struct_ctor: struct_ctor, + })) + } + hir::ItemUnion(..) => { + let variant = tcx.lookup_adt_def(def_id).struct_variant(); + + EntryKind::Union(self.lazy(&VariantData { + ctor_kind: variant.ctor_kind, + disr: variant.disr_val.to_u64_unchecked(), + struct_ctor: None, + })) + } + hir::ItemDefaultImpl(..) => { + let data = ImplData { + polarity: hir::ImplPolarity::Positive, + parent_impl: None, + coerce_unsized_kind: None, + trait_ref: tcx.impl_trait_ref(def_id).map(|trait_ref| self.lazy(&trait_ref)), + }; - let trait_item_type = - tcx.impl_or_trait_item(item_def_id.def_id()); - let is_nonstatic_method; - match trait_item_type { - ty::ConstTraitItem(associated_const) => { - encode_name(rbml_w, associated_const.name); - encode_def_id_and_key(ecx, rbml_w, associated_const.def_id); - encode_visibility(rbml_w, associated_const.vis); + EntryKind::DefaultImpl(self.lazy(&data)) + } + hir::ItemImpl(_, polarity, ..) => { + let trait_ref = tcx.impl_trait_ref(def_id); + let parent = if let Some(trait_ref) = trait_ref { + let trait_def = tcx.lookup_trait_def(trait_ref.def_id); + trait_def.ancestors(def_id).skip(1).next().and_then(|node| { + match node { + specialization_graph::Node::Impl(parent) => Some(parent), + _ => None, + } + }) + } else { + None + }; - let elem = ast_map::PathName(associated_const.name); - encode_path(rbml_w, - path.clone().chain(Some(elem))); + let data = ImplData { + polarity: polarity, + parent_impl: parent, + coerce_unsized_kind: tcx.custom_coerce_unsized_kinds + .borrow() + .get(&def_id) + .cloned(), + trait_ref: trait_ref.map(|trait_ref| self.lazy(&trait_ref)), + }; - encode_family(rbml_w, 'C'); + EntryKind::Impl(self.lazy(&data)) + } + hir::ItemTrait(..) => { + let trait_def = tcx.lookup_trait_def(def_id); + let data = TraitData { + unsafety: trait_def.unsafety, + paren_sugar: trait_def.paren_sugar, + has_default_impl: tcx.trait_has_default_impl(def_id), + super_predicates: self.lazy(&tcx.item_super_predicates(def_id)), + }; - encode_bounds_and_type_for_item(rbml_w, ecx, index, - ecx.local_id(associated_const.def_id)); + EntryKind::Trait(self.lazy(&data)) + } + hir::ItemExternCrate(_) | + hir::ItemUse(..) => bug!("cannot encode info for item {:?}", item), + }; - is_nonstatic_method = false; + Entry { + kind: kind, + visibility: item.vis.simplify(), + span: self.lazy(&item.span), + def_key: self.encode_def_key(def_id), + attributes: self.encode_attributes(&item.attrs), + children: match item.node { + hir::ItemForeignMod(ref fm) => { + self.lazy_seq(fm.items + .iter() + .map(|foreign_item| tcx.map.local_def_id(foreign_item.id).index)) } - ty::MethodTraitItem(method_ty) => { - let method_def_id = item_def_id.def_id(); - - encode_method_ty_fields(ecx, rbml_w, index, &*method_ty); - - let elem = ast_map::PathName(method_ty.name); - encode_path(rbml_w, - path.clone().chain(Some(elem))); - - match method_ty.explicit_self { - ty::ExplicitSelfCategory::Static => { - encode_family(rbml_w, - STATIC_METHOD_FAMILY); - } - _ => { - encode_family(rbml_w, - METHOD_FAMILY); - } - } - encode_bounds_and_type_for_item(rbml_w, ecx, index, - ecx.local_id(method_def_id)); - - is_nonstatic_method = method_ty.explicit_self != - ty::ExplicitSelfCategory::Static; + hir::ItemEnum(..) => { + let def = self.tcx.lookup_adt_def(def_id); + self.lazy_seq(def.variants.iter().map(|v| { + assert!(v.did.is_local()); + v.did.index + })) } - ty::TypeTraitItem(associated_type) => { - encode_name(rbml_w, associated_type.name); - encode_def_id_and_key(ecx, rbml_w, associated_type.def_id); - - let elem = ast_map::PathName(associated_type.name); - encode_path(rbml_w, - path.clone().chain(Some(elem))); - - encode_item_sort(rbml_w, 't'); - encode_family(rbml_w, 'y'); - - if let Some(ty) = associated_type.ty { - encode_type(ecx, rbml_w, ty); - } - - is_nonstatic_method = false; + hir::ItemStruct(..) | + hir::ItemUnion(..) => { + let def = self.tcx.lookup_adt_def(def_id); + self.lazy_seq(def.struct_variant().fields.iter().map(|f| { + assert!(f.did.is_local()); + f.did.index + })) } - } - - let trait_item = &ms[i]; - encode_attributes(rbml_w, &trait_item.attrs); - match trait_item.node { - hir::ConstTraitItem(_, ref default) => { - if default.is_some() { - encode_item_sort(rbml_w, 'C'); - } else { - encode_item_sort(rbml_w, 'c'); - } - - encode_inlined_item(ecx, rbml_w, - InlinedItemRef::TraitItem(def_id, trait_item)); + hir::ItemImpl(..) | + hir::ItemTrait(..) => { + self.lazy_seq(tcx.associated_item_def_ids(def_id).iter().map(|&def_id| { + assert!(def_id.is_local()); + def_id.index + })) } - hir::MethodTraitItem(ref sig, ref body) => { - // If this is a static method, we've already - // encoded this. - if is_nonstatic_method { - // FIXME: I feel like there is something funny - // going on. - encode_bounds_and_type_for_item(rbml_w, ecx, index, - ecx.local_id(item_def_id.def_id())); - } - - if body.is_some() { - encode_item_sort(rbml_w, 'p'); - encode_inlined_item(ecx, rbml_w, - InlinedItemRef::TraitItem(def_id, trait_item)); + _ => LazySeq::empty(), + }, + stability: self.encode_stability(def_id), + deprecation: self.encode_deprecation(def_id), + + ty: match item.node { + hir::ItemStatic(..) | + hir::ItemConst(..) | + hir::ItemFn(..) | + hir::ItemTy(..) | + hir::ItemEnum(..) | + hir::ItemStruct(..) | + hir::ItemUnion(..) | + hir::ItemImpl(..) => Some(self.encode_item_type(def_id)), + _ => None, + }, + inherent_impls: self.encode_inherent_implementations(def_id), + variances: match item.node { + hir::ItemEnum(..) | + hir::ItemStruct(..) | + hir::ItemUnion(..) | + hir::ItemTrait(..) => self.encode_item_variances(def_id), + _ => LazySeq::empty(), + }, + generics: match item.node { + hir::ItemStatic(..) | + hir::ItemConst(..) | + hir::ItemFn(..) | + hir::ItemTy(..) | + hir::ItemEnum(..) | + hir::ItemStruct(..) | + hir::ItemUnion(..) | + hir::ItemImpl(..) | + hir::ItemTrait(..) => Some(self.encode_generics(def_id)), + _ => None, + }, + predicates: match item.node { + hir::ItemStatic(..) | + hir::ItemConst(..) | + hir::ItemFn(..) | + hir::ItemTy(..) | + hir::ItemEnum(..) | + hir::ItemStruct(..) | + hir::ItemUnion(..) | + hir::ItemImpl(..) | + hir::ItemTrait(..) => Some(self.encode_predicates(def_id)), + _ => None, + }, + + ast: match item.node { + hir::ItemConst(..) | + hir::ItemFn(_, _, hir::Constness::Const, ..) => { + Some(self.encode_inlined_item( + InlinedItemRef::from_item(def_id, item, tcx) + )) + } + _ => None, + }, + mir: match item.node { + hir::ItemConst(..) => self.encode_mir(def_id), + hir::ItemFn(_, _, constness, _, ref generics, _) => { + let tps_len = generics.ty_params.len(); + let needs_inline = tps_len > 0 || attr::requests_inline(&item.attrs); + if needs_inline || constness == hir::Constness::Const { + self.encode_mir(def_id) } else { - encode_item_sort(rbml_w, 'r'); + None } - encode_method_argument_names(rbml_w, &sig.decl); } + _ => None, + }, + } + } - hir::TypeTraitItem(..) => {} - } - - rbml_w.end_tag(); + /// Serialize the text of exported macros + fn encode_info_for_macro_def(&mut self, macro_def: &hir::MacroDef) -> Entry<'tcx> { + let def_id = self.tcx.map.local_def_id(macro_def.id); + Entry { + kind: EntryKind::MacroDef(self.lazy(&MacroDef { + body: ::syntax::print::pprust::tts_to_string(¯o_def.body) + })), + visibility: ty::Visibility::Public, + span: self.lazy(¯o_def.span), + def_key: self.encode_def_key(def_id), + + attributes: self.encode_attributes(¯o_def.attrs), + children: LazySeq::empty(), + stability: None, + deprecation: None, + ty: None, + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: None, + predicates: None, + ast: None, + mir: None, } - } - hir::ItemExternCrate(_) | hir::ItemUse(_) => { - // these are encoded separately - } } } -fn encode_info_for_foreign_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder, - nitem: &hir::ForeignItem, - index: &mut CrateIndex<'tcx>, - path: PathElems, - abi: abi::Abi) { - let def_id = ecx.tcx.map.local_def_id(nitem.id); - - index.record(def_id, rbml_w); - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); - encode_visibility(rbml_w, nitem.vis); - match nitem.node { - hir::ForeignItemFn(ref fndecl, _) => { - encode_family(rbml_w, FN_FAMILY); - encode_bounds_and_type_for_item(rbml_w, ecx, index, nitem.id); - encode_name(rbml_w, nitem.name); - if abi == abi::RustIntrinsic || abi == abi::PlatformIntrinsic { - encode_inlined_item(ecx, rbml_w, InlinedItemRef::Foreign(nitem)); - } - encode_attributes(rbml_w, &*nitem.attrs); - let stab = stability::lookup_stability(ecx.tcx, ecx.tcx.map.local_def_id(nitem.id)); - let depr = stability::lookup_deprecation(ecx.tcx, ecx.tcx.map.local_def_id(nitem.id)); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - encode_symbol(ecx, rbml_w, nitem.id); - encode_method_argument_names(rbml_w, &*fndecl); - } - hir::ForeignItemStatic(_, mutbl) => { - if mutbl { - encode_family(rbml_w, 'b'); - } else { - encode_family(rbml_w, 'c'); +impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> { + /// In some cases, along with the item itself, we also + /// encode some sub-items. Usually we want some info from the item + /// so it's easier to do that here then to wait until we would encounter + /// normally in the visitor walk. + fn encode_addl_info_for_item(&mut self, item: &hir::Item) { + let def_id = self.tcx.map.local_def_id(item.id); + match item.node { + hir::ItemStatic(..) | + hir::ItemConst(..) | + hir::ItemFn(..) | + hir::ItemMod(..) | + hir::ItemForeignMod(..) | + hir::ItemExternCrate(..) | + hir::ItemUse(..) | + hir::ItemDefaultImpl(..) | + hir::ItemTy(..) => { + // no sub-item recording needed in these cases + } + hir::ItemEnum(..) => { + self.encode_fields(def_id); + + let def = self.tcx.lookup_adt_def(def_id); + for (i, variant) in def.variants.iter().enumerate() { + self.record(variant.did, + EncodeContext::encode_enum_variant_info, + (def_id, Untracked(i))); + } + } + hir::ItemStruct(ref struct_def, _) => { + self.encode_fields(def_id); + + // If the struct has a constructor, encode it. + if !struct_def.is_struct() { + let ctor_def_id = self.tcx.map.local_def_id(struct_def.id()); + self.record(ctor_def_id, + EncodeContext::encode_struct_ctor, + (def_id, ctor_def_id)); + } + } + hir::ItemUnion(..) => { + self.encode_fields(def_id); + } + hir::ItemImpl(..) => { + for &trait_item_def_id in &self.tcx.associated_item_def_ids(def_id)[..] { + self.record(trait_item_def_id, + EncodeContext::encode_info_for_impl_item, + trait_item_def_id); + } + } + hir::ItemTrait(..) => { + for &item_def_id in &self.tcx.associated_item_def_ids(def_id)[..] { + self.record(item_def_id, + EncodeContext::encode_info_for_trait_item, + item_def_id); + } + } } - encode_bounds_and_type_for_item(rbml_w, ecx, index, nitem.id); - encode_attributes(rbml_w, &*nitem.attrs); - let stab = stability::lookup_stability(ecx.tcx, ecx.tcx.map.local_def_id(nitem.id)); - let depr = stability::lookup_deprecation(ecx.tcx, ecx.tcx.map.local_def_id(nitem.id)); - encode_stability(rbml_w, stab); - encode_deprecation(rbml_w, depr); - encode_symbol(ecx, rbml_w, nitem.id); - encode_name(rbml_w, nitem.name); - } } - encode_path(rbml_w, path); - rbml_w.end_tag(); } -fn my_visit_expr(expr: &hir::Expr, - rbml_w: &mut Encoder, - ecx: &EncodeContext, - index: &mut CrateIndex) { - match expr.node { - hir::ExprClosure(..) => { - let def_id = ecx.tcx.map.local_def_id(expr.id); - - index.record(def_id, rbml_w); - - rbml_w.start_tag(tag_items_data_item); - encode_def_id_and_key(ecx, rbml_w, def_id); +impl<'a, 'tcx> EncodeContext<'a, 'tcx> { + fn encode_info_for_foreign_item(&mut self, + (def_id, nitem): (DefId, &hir::ForeignItem)) + -> Entry<'tcx> { + let tcx = self.tcx; - rbml_w.start_tag(tag_items_closure_ty); - write_closure_type(ecx, rbml_w, &ecx.tcx.tables.borrow().closure_tys[&def_id]); - rbml_w.end_tag(); + debug!("writing foreign item {}", tcx.node_path_str(nitem.id)); - rbml_w.start_tag(tag_items_closure_kind); - ecx.tcx.closure_kind(def_id).encode(rbml_w).unwrap(); - rbml_w.end_tag(); - - ecx.tcx.map.with_path(expr.id, |path| encode_path(rbml_w, path)); - - assert!(ecx.mir_map.contains_key(&expr.id)); - encode_mir(ecx, rbml_w, expr.id); + let kind = match nitem.node { + hir::ForeignItemFn(ref fndecl, _) => { + let data = FnData { + constness: hir::Constness::NotConst, + arg_names: self.encode_fn_arg_names(&fndecl), + }; + EntryKind::ForeignFn(self.lazy(&data)) + } + hir::ForeignItemStatic(_, true) => EntryKind::ForeignMutStatic, + hir::ForeignItemStatic(_, false) => EntryKind::ForeignImmStatic, + }; - rbml_w.end_tag(); + Entry { + kind: kind, + visibility: nitem.vis.simplify(), + span: self.lazy(&nitem.span), + def_key: self.encode_def_key(def_id), + attributes: self.encode_attributes(&nitem.attrs), + children: LazySeq::empty(), + stability: self.encode_stability(def_id), + deprecation: self.encode_deprecation(def_id), + + ty: Some(self.encode_item_type(def_id)), + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: Some(self.encode_generics(def_id)), + predicates: Some(self.encode_predicates(def_id)), + + ast: None, + mir: None, } - _ => { } } } -fn my_visit_item<'a, 'tcx>(i: &hir::Item, - rbml_w: &mut Encoder, - ecx: &EncodeContext<'a, 'tcx>, - index: &mut CrateIndex<'tcx>) { - ecx.tcx.map.with_path(i.id, |path| { - encode_info_for_item(ecx, rbml_w, i, index, path, i.vis); - }); +struct EncodeVisitor<'a, 'b: 'a, 'tcx: 'b> { + index: IndexBuilder<'a, 'b, 'tcx>, } -fn my_visit_foreign_item<'a, 'tcx>(ni: &hir::ForeignItem, - rbml_w: &mut Encoder, - ecx: &EncodeContext<'a, 'tcx>, - index: &mut CrateIndex<'tcx>) { - debug!("writing foreign item {}::{}", - ecx.tcx.map.path_to_string(ni.id), - ni.name); - - let abi = ecx.tcx.map.get_foreign_abi(ni.id); - ecx.tcx.map.with_path(ni.id, |path| { - encode_info_for_foreign_item(ecx, rbml_w, - ni, index, - path, abi); - }); -} - -struct EncodeVisitor<'a, 'b:'a, 'c:'a, 'tcx:'c> { - rbml_w_for_visit_item: &'a mut Encoder<'b>, - ecx: &'a EncodeContext<'c,'tcx>, - index: &'a mut CrateIndex<'tcx>, -} - -impl<'a, 'b, 'c, 'tcx> Visitor<'tcx> for EncodeVisitor<'a, 'b, 'c, 'tcx> { +impl<'a, 'b, 'tcx> Visitor<'tcx> for EncodeVisitor<'a, 'b, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.index.tcx.map) + } fn visit_expr(&mut self, ex: &'tcx hir::Expr) { intravisit::walk_expr(self, ex); - my_visit_expr(ex, self.rbml_w_for_visit_item, self.ecx, self.index); + self.index.encode_info_for_expr(ex); } - fn visit_item(&mut self, i: &'tcx hir::Item) { - intravisit::walk_item(self, i); - my_visit_item(i, self.rbml_w_for_visit_item, self.ecx, self.index); + fn visit_item(&mut self, item: &'tcx hir::Item) { + intravisit::walk_item(self, item); + let def_id = self.index.tcx.map.local_def_id(item.id); + match item.node { + hir::ItemExternCrate(_) | + hir::ItemUse(..) => (), // ignore these + _ => self.index.record(def_id, EncodeContext::encode_info_for_item, (def_id, item)), + } + self.index.encode_addl_info_for_item(item); } fn visit_foreign_item(&mut self, ni: &'tcx hir::ForeignItem) { intravisit::walk_foreign_item(self, ni); - my_visit_foreign_item(ni, self.rbml_w_for_visit_item, self.ecx, self.index); + let def_id = self.index.tcx.map.local_def_id(ni.id); + self.index.record(def_id, + EncodeContext::encode_info_for_foreign_item, + (def_id, ni)); } -} - -fn encode_info_for_items<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>, - rbml_w: &mut Encoder) - -> CrateIndex<'tcx> { - let krate = ecx.tcx.map.krate(); - - let mut index = CrateIndex { - items: IndexData::new(ecx.tcx.map.num_local_def_ids()), - xrefs: FnvHashMap() - }; - rbml_w.start_tag(tag_items_data); - - index.record(DefId::local(CRATE_DEF_INDEX), rbml_w); - encode_info_for_mod(ecx, - rbml_w, - &krate.module, - &[], - CRATE_NODE_ID, - [].iter().cloned().chain(LinkedPath::empty()), - syntax::parse::token::intern(&ecx.link_meta.crate_name), - hir::Public); - - krate.visit_all_items(&mut EncodeVisitor { - index: &mut index, - ecx: ecx, - rbml_w_for_visit_item: &mut *rbml_w, - }); - - rbml_w.end_tag(); - index -} - -fn encode_item_index(rbml_w: &mut Encoder, index: IndexData) { - rbml_w.start_tag(tag_index); - index.write_index(rbml_w.writer); - rbml_w.end_tag(); -} - -fn encode_meta_item(rbml_w: &mut Encoder, mi: &ast::MetaItem) { - match mi.node { - ast::MetaWord(ref name) => { - rbml_w.start_tag(tag_meta_item_word); - rbml_w.wr_tagged_str(tag_meta_item_name, name); - rbml_w.end_tag(); - } - ast::MetaNameValue(ref name, ref value) => { - match value.node { - ast::LitStr(ref value, _) => { - rbml_w.start_tag(tag_meta_item_name_value); - rbml_w.wr_tagged_str(tag_meta_item_name, name); - rbml_w.wr_tagged_str(tag_meta_item_value, value); - rbml_w.end_tag(); - } - _ => {/* FIXME (#623): encode other variants */ } - } - } - ast::MetaList(ref name, ref items) => { - rbml_w.start_tag(tag_meta_item_list); - rbml_w.wr_tagged_str(tag_meta_item_name, name); - for inner_item in items { - encode_meta_item(rbml_w, &**inner_item); - } - rbml_w.end_tag(); - } - } -} - -fn encode_attributes(rbml_w: &mut Encoder, attrs: &[ast::Attribute]) { - rbml_w.start_tag(tag_attributes); - for attr in attrs { - rbml_w.start_tag(tag_attribute); - rbml_w.wr_tagged_u8(tag_attribute_is_sugared_doc, attr.node.is_sugared_doc as u8); - encode_meta_item(rbml_w, &*attr.node.value); - rbml_w.end_tag(); + fn visit_ty(&mut self, ty: &'tcx hir::Ty) { + intravisit::walk_ty(self, ty); + self.index.encode_info_for_ty(ty); } - rbml_w.end_tag(); -} - -fn encode_unsafety(rbml_w: &mut Encoder, unsafety: hir::Unsafety) { - let byte: u8 = match unsafety { - hir::Unsafety::Normal => 0, - hir::Unsafety::Unsafe => 1, - }; - rbml_w.wr_tagged_u8(tag_unsafety, byte); -} - -fn encode_paren_sugar(rbml_w: &mut Encoder, paren_sugar: bool) { - let byte: u8 = if paren_sugar {1} else {0}; - rbml_w.wr_tagged_u8(tag_paren_sugar, byte); -} - -fn encode_defaulted(rbml_w: &mut Encoder, is_defaulted: bool) { - let byte: u8 = if is_defaulted {1} else {0}; - rbml_w.wr_tagged_u8(tag_defaulted_trait, byte); -} - -fn encode_associated_type_names(rbml_w: &mut Encoder, names: &[Name]) { - rbml_w.start_tag(tag_associated_type_names); - for &name in names { - rbml_w.wr_tagged_str(tag_associated_type_name, &name.as_str()); + fn visit_macro_def(&mut self, macro_def: &'tcx hir::MacroDef) { + let def_id = self.index.tcx.map.local_def_id(macro_def.id); + self.index.record(def_id, EncodeContext::encode_info_for_macro_def, macro_def); } - rbml_w.end_tag(); } -fn encode_polarity(rbml_w: &mut Encoder, polarity: hir::ImplPolarity) { - let byte: u8 = match polarity { - hir::ImplPolarity::Positive => 0, - hir::ImplPolarity::Negative => 1, - }; - rbml_w.wr_tagged_u8(tag_polarity, byte); -} - -fn encode_crate_deps(rbml_w: &mut Encoder, cstore: &cstore::CStore) { - fn get_ordered_deps(cstore: &cstore::CStore) - -> Vec<(CrateNum, Rc)> { - // Pull the cnums and name,vers,hash out of cstore - let mut deps = Vec::new(); - cstore.iter_crate_data(|cnum, val| { - deps.push((cnum, val.clone())); - }); - - // Sort by cnum - deps.sort_by(|kv1, kv2| kv1.0.cmp(&kv2.0)); - - // Sanity-check the crate numbers - let mut expected_cnum = 1; - for &(n, _) in &deps { - assert_eq!(n, expected_cnum); - expected_cnum += 1; +impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> { + fn encode_info_for_ty(&mut self, ty: &hir::Ty) { + if let hir::TyImplTrait(_) = ty.node { + let def_id = self.tcx.map.local_def_id(ty.id); + self.record(def_id, EncodeContext::encode_info_for_anon_ty, def_id); } - - deps } - // We're just going to write a list of crate 'name-hash-version's, with - // the assumption that they are numbered 1 to n. - // FIXME (#2166): This is not nearly enough to support correct versioning - // but is enough to get transitive crate dependencies working. - rbml_w.start_tag(tag_crate_deps); - for (_cnum, dep) in get_ordered_deps(cstore) { - encode_crate_dep(rbml_w, &dep); + fn encode_info_for_expr(&mut self, expr: &hir::Expr) { + match expr.node { + hir::ExprClosure(..) => { + let def_id = self.tcx.map.local_def_id(expr.id); + self.record(def_id, EncodeContext::encode_info_for_closure, def_id); + } + _ => {} + } } - rbml_w.end_tag(); } -fn encode_lang_items(ecx: &EncodeContext, rbml_w: &mut Encoder) { - rbml_w.start_tag(tag_lang_items); - - for (i, &opt_def_id) in ecx.tcx.lang_items.items() { - if let Some(def_id) = opt_def_id { - if def_id.is_local() { - rbml_w.start_tag(tag_lang_items_item); - rbml_w.wr_tagged_u32(tag_lang_items_item_id, i as u32); - rbml_w.wr_tagged_u32(tag_lang_items_item_index, def_id.index.as_u32()); - rbml_w.end_tag(); - } +impl<'a, 'tcx> EncodeContext<'a, 'tcx> { + fn encode_info_for_anon_ty(&mut self, def_id: DefId) -> Entry<'tcx> { + let tcx = self.tcx; + Entry { + kind: EntryKind::Type, + visibility: ty::Visibility::Public, + span: self.lazy(&tcx.def_span(def_id)), + def_key: self.encode_def_key(def_id), + attributes: LazySeq::empty(), + children: LazySeq::empty(), + stability: None, + deprecation: None, + + ty: Some(self.encode_item_type(def_id)), + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: Some(self.encode_generics(def_id)), + predicates: Some(self.encode_predicates(def_id)), + + ast: None, + mir: None, } } - for i in &ecx.tcx.lang_items.missing { - rbml_w.wr_tagged_u32(tag_lang_items_missing, *i as u32); - } + fn encode_info_for_closure(&mut self, def_id: DefId) -> Entry<'tcx> { + let tcx = self.tcx; - rbml_w.end_tag(); // tag_lang_items -} + let data = ClosureData { + kind: tcx.closure_kind(def_id), + ty: self.lazy(&tcx.tables().closure_tys[&def_id]), + }; -fn encode_native_libraries(ecx: &EncodeContext, rbml_w: &mut Encoder) { - rbml_w.start_tag(tag_native_libraries); - - for &(ref lib, kind) in ecx.tcx.sess.cstore.used_libraries().iter() { - match kind { - cstore::NativeStatic => {} // these libraries are not propagated - cstore::NativeFramework | cstore::NativeUnknown => { - rbml_w.start_tag(tag_native_libraries_lib); - rbml_w.wr_tagged_u32(tag_native_libraries_kind, kind as u32); - rbml_w.wr_tagged_str(tag_native_libraries_name, lib); - rbml_w.end_tag(); - } + Entry { + kind: EntryKind::Closure(self.lazy(&data)), + visibility: ty::Visibility::Public, + span: self.lazy(&tcx.def_span(def_id)), + def_key: self.encode_def_key(def_id), + attributes: self.encode_attributes(&tcx.get_attrs(def_id)), + children: LazySeq::empty(), + stability: None, + deprecation: None, + + ty: Some(self.encode_item_type(def_id)), + inherent_impls: LazySeq::empty(), + variances: LazySeq::empty(), + generics: Some(self.encode_generics(def_id)), + predicates: None, + + ast: None, + mir: self.encode_mir(def_id), } } - rbml_w.end_tag(); -} - -fn encode_plugin_registrar_fn(ecx: &EncodeContext, rbml_w: &mut Encoder) { - match ecx.tcx.sess.plugin_registrar_fn.get() { - Some(id) => { - let def_id = ecx.tcx.map.local_def_id(id); - rbml_w.wr_tagged_u32(tag_plugin_registrar_fn, def_id.index.as_u32()); + fn encode_info_for_items(&mut self) -> Index { + let krate = self.tcx.map.krate(); + let mut index = IndexBuilder::new(self); + index.record(DefId::local(CRATE_DEF_INDEX), + EncodeContext::encode_info_for_mod, + FromId(CRATE_NODE_ID, (&krate.module, &krate.attrs, &hir::Public))); + let mut visitor = EncodeVisitor { index: index }; + krate.visit_all_item_likes(&mut visitor.as_deep_visitor()); + for macro_def in &krate.exported_macros { + visitor.visit_macro_def(macro_def); } - None => {} + visitor.index.into_items() } -} -fn encode_codemap(ecx: &EncodeContext, rbml_w: &mut Encoder) { - rbml_w.start_tag(tag_codemap); - let codemap = ecx.tcx.sess.codemap(); + fn encode_attributes(&mut self, attrs: &[ast::Attribute]) -> LazySeq { + self.lazy_seq_ref(attrs) + } - for filemap in &codemap.files.borrow()[..] { + fn encode_crate_deps(&mut self) -> LazySeq { + fn get_ordered_deps(cstore: &cstore::CStore) -> Vec<(CrateNum, Rc)> { + // Pull the cnums and name,vers,hash out of cstore + let mut deps = Vec::new(); + cstore.iter_crate_data(|cnum, val| { + deps.push((cnum, val.clone())); + }); + + // Sort by cnum + deps.sort_by(|kv1, kv2| kv1.0.cmp(&kv2.0)); + + // Sanity-check the crate numbers + let mut expected_cnum = 1; + for &(n, _) in &deps { + assert_eq!(n, CrateNum::new(expected_cnum)); + expected_cnum += 1; + } - if filemap.lines.borrow().is_empty() || filemap.is_imported() { - // No need to export empty filemaps, as they can't contain spans - // that need translation. - // Also no need to re-export imported filemaps, as any downstream - // crate will import them from their original source. - continue; + deps } - rbml_w.start_tag(tag_codemap_filemap); - rbml_w.emit_opaque(|opaque_encoder| { - filemap.encode(opaque_encoder) - }).unwrap(); - rbml_w.end_tag(); + // We're just going to write a list of crate 'name-hash-version's, with + // the assumption that they are numbered 1 to n. + // FIXME (#2166): This is not nearly enough to support correct versioning + // but is enough to get transitive crate dependencies working. + let deps = get_ordered_deps(self.cstore); + self.lazy_seq(deps.iter().map(|&(_, ref dep)| { + CrateDep { + name: dep.name(), + hash: dep.hash(), + kind: dep.dep_kind.get(), + } + })) } - rbml_w.end_tag(); -} - -/// Serialize the text of the exported macros -fn encode_macro_defs(rbml_w: &mut Encoder, - krate: &hir::Crate) { - rbml_w.start_tag(tag_macro_defs); - for def in &krate.exported_macros { - rbml_w.start_tag(tag_macro_def); - - encode_name(rbml_w, def.name); - encode_attributes(rbml_w, &def.attrs); - - rbml_w.wr_tagged_str(tag_macro_def_body, - &::syntax::print::pprust::tts_to_string(&def.body)); - - rbml_w.end_tag(); + fn encode_lang_items(&mut self) -> (LazySeq<(DefIndex, usize)>, LazySeq) { + let tcx = self.tcx; + let lang_items = tcx.lang_items.items().iter(); + (self.lazy_seq(lang_items.enumerate().filter_map(|(i, &opt_def_id)| { + if let Some(def_id) = opt_def_id { + if def_id.is_local() { + return Some((def_id.index, i)); + } + } + None + })), + self.lazy_seq_ref(&tcx.lang_items.missing)) } - rbml_w.end_tag(); -} -fn encode_struct_field_attrs(ecx: &EncodeContext, - rbml_w: &mut Encoder, - krate: &hir::Crate) { - struct StructFieldVisitor<'a, 'b:'a, 'c:'a, 'tcx:'b> { - ecx: &'a EncodeContext<'b, 'tcx>, - rbml_w: &'a mut Encoder<'c>, + fn encode_native_libraries(&mut self) -> LazySeq { + let used_libraries = self.tcx.sess.cstore.used_libraries(); + self.lazy_seq(used_libraries) } - impl<'a, 'b, 'c, 'tcx, 'v> Visitor<'v> for StructFieldVisitor<'a, 'b, 'c, 'tcx> { - fn visit_struct_field(&mut self, field: &hir::StructField) { - self.rbml_w.start_tag(tag_struct_field); - let def_id = self.ecx.tcx.map.local_def_id(field.node.id); - encode_def_id(self.rbml_w, def_id); - encode_attributes(self.rbml_w, &field.node.attrs); - self.rbml_w.end_tag(); - } + fn encode_codemap(&mut self) -> LazySeq { + let codemap = self.tcx.sess.codemap(); + let all_filemaps = codemap.files.borrow(); + self.lazy_seq_ref(all_filemaps.iter() + .filter(|filemap| { + // No need to re-export imported filemaps, as any downstream + // crate will import them from their original source. + !filemap.is_imported() + }) + .map(|filemap| &**filemap)) } - - rbml_w.start_tag(tag_struct_fields); - krate.visit_all_items(&mut StructFieldVisitor { ecx: ecx, rbml_w: rbml_w }); - rbml_w.end_tag(); } - - -struct ImplVisitor<'a, 'tcx:'a> { - tcx: &'a ty::ctxt<'tcx>, - impls: FnvHashMap> +struct ImplVisitor<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + impls: FxHashMap>, } -impl<'a, 'tcx, 'v> Visitor<'v> for ImplVisitor<'a, 'tcx> { +impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for ImplVisitor<'a, 'tcx> { fn visit_item(&mut self, item: &hir::Item) { if let hir::ItemImpl(..) = item.node { let impl_id = self.tcx.map.local_def_id(item.id); if let Some(trait_ref) = self.tcx.impl_trait_ref(impl_id) { - self.impls.entry(trait_ref.def_id) + self.impls + .entry(trait_ref.def_id) .or_insert(vec![]) - .push(impl_id); + .push(impl_id.index); } } } -} -/// Encodes an index, mapping each trait to its (local) implementations. -fn encode_impls<'a>(ecx: &'a EncodeContext, - krate: &hir::Crate, - rbml_w: &'a mut Encoder) { - let mut visitor = ImplVisitor { - tcx: ecx.tcx, - impls: FnvHashMap() - }; - krate.visit_all_items(&mut visitor); - - rbml_w.start_tag(tag_impls); - for (trait_, trait_impls) in visitor.impls { - rbml_w.start_tag(tag_impls_trait); - encode_def_id(rbml_w, trait_); - for impl_ in trait_impls { - rbml_w.wr_tagged_u64(tag_impls_trait_impl, def_to_u64(impl_)); - } - rbml_w.end_tag(); + fn visit_impl_item(&mut self, _impl_item: &'v hir::ImplItem) { + // handled in `visit_item` above } - rbml_w.end_tag(); } -fn encode_misc_info(ecx: &EncodeContext, - krate: &hir::Crate, - rbml_w: &mut Encoder) { - rbml_w.start_tag(tag_misc_info); - rbml_w.start_tag(tag_misc_info_crate_items); - for item_id in &krate.module.item_ids { - rbml_w.wr_tagged_u64(tag_mod_child, - def_to_u64(ecx.tcx.map.local_def_id(item_id.id))); - - let item = ecx.tcx.map.expect_item(item_id.id); - each_auxiliary_node_id(item, |auxiliary_node_id| { - rbml_w.wr_tagged_u64(tag_mod_child, - def_to_u64(ecx.tcx.map.local_def_id(auxiliary_node_id))); - true - }); - } - - // Encode reexports for the root module. - encode_reexports(ecx, rbml_w, 0); +impl<'a, 'tcx> EncodeContext<'a, 'tcx> { + /// Encodes an index, mapping each trait to its (local) implementations. + fn encode_impls(&mut self) -> LazySeq { + let mut visitor = ImplVisitor { + tcx: self.tcx, + impls: FxHashMap(), + }; + self.tcx.map.krate().visit_all_item_likes(&mut visitor); + + let all_impls: Vec<_> = visitor.impls + .into_iter() + .map(|(trait_def_id, impls)| { + TraitImpls { + trait_id: (trait_def_id.krate.as_u32(), trait_def_id.index), + impls: self.lazy_seq(impls), + } + }) + .collect(); - rbml_w.end_tag(); - rbml_w.end_tag(); -} + self.lazy_seq(all_impls) + } -// Encodes all reachable symbols in this crate into the metadata. -// -// This pass is seeded off the reachability list calculated in the -// middle::reachable module but filters out items that either don't have a -// symbol associated with them (they weren't translated) or if they're an FFI -// definition (as that's not defined in this crate). -fn encode_reachable(ecx: &EncodeContext, rbml_w: &mut Encoder) { - rbml_w.start_tag(tag_reachable_ids); - for &id in ecx.reachable { - let def_id = ecx.tcx.map.local_def_id(id); - rbml_w.wr_tagged_u32(tag_reachable_id, def_id.index.as_u32()); + // Encodes all reachable symbols in this crate into the metadata. + // + // This pass is seeded off the reachability list calculated in the + // middle::reachable module but filters out items that either don't have a + // symbol associated with them (they weren't translated) or if they're an FFI + // definition (as that's not defined in this crate). + fn encode_reachable(&mut self) -> LazySeq { + let reachable = self.reachable; + let tcx = self.tcx; + self.lazy_seq(reachable.iter().map(|&id| tcx.map.local_def_id(id).index)) } - rbml_w.end_tag(); -} -fn encode_crate_dep(rbml_w: &mut Encoder, - dep: &cstore::crate_metadata) { - rbml_w.start_tag(tag_crate_dep); - rbml_w.wr_tagged_str(tag_crate_dep_crate_name, &dep.name()); - let hash = decoder::get_crate_hash(dep.data()); - rbml_w.wr_tagged_str(tag_crate_dep_hash, hash.as_str()); - rbml_w.wr_tagged_u8(tag_crate_dep_explicitly_linked, - dep.explicitly_linked.get() as u8); - rbml_w.end_tag(); -} + fn encode_dylib_dependency_formats(&mut self) -> LazySeq> { + match self.tcx.sess.dependency_formats.borrow().get(&config::CrateTypeDylib) { + Some(arr) => { + self.lazy_seq(arr.iter().map(|slot| { + match *slot { + Linkage::NotLinked | + Linkage::IncludedFromDylib => None, -fn encode_hash(rbml_w: &mut Encoder, hash: &Svh) { - rbml_w.wr_tagged_str(tag_crate_hash, hash.as_str()); -} + Linkage::Dynamic => Some(LinkagePreference::RequireDynamic), + Linkage::Static => Some(LinkagePreference::RequireStatic), + } + })) + } + None => LazySeq::empty(), + } + } -fn encode_rustc_version(rbml_w: &mut Encoder) { - rbml_w.wr_tagged_str(tag_rustc_version, &rustc_version()); -} + fn encode_crate_root(&mut self) -> Lazy { + let mut i = self.position(); + let crate_deps = self.encode_crate_deps(); + let dylib_dependency_formats = self.encode_dylib_dependency_formats(); + let dep_bytes = self.position() - i; + + // Encode the language items. + i = self.position(); + let (lang_items, lang_items_missing) = self.encode_lang_items(); + let lang_item_bytes = self.position() - i; + + // Encode the native libraries used + i = self.position(); + let native_libraries = self.encode_native_libraries(); + let native_lib_bytes = self.position() - i; + + // Encode codemap + i = self.position(); + let codemap = self.encode_codemap(); + let codemap_bytes = self.position() - i; + + // Encode the def IDs of impls, for coherence checking. + i = self.position(); + let impls = self.encode_impls(); + let impl_bytes = self.position() - i; + + // Encode reachability info. + i = self.position(); + let reachable_ids = self.encode_reachable(); + let reachable_bytes = self.position() - i; + + // Encode and index the items. + i = self.position(); + let items = self.encode_info_for_items(); + let item_bytes = self.position() - i; + + i = self.position(); + let index = items.write_index(&mut self.opaque.cursor); + let index_bytes = self.position() - i; + + let tcx = self.tcx; + let link_meta = self.link_meta; + let is_proc_macro = tcx.sess.crate_types.borrow().contains(&CrateTypeProcMacro); + let root = self.lazy(&CrateRoot { + name: link_meta.crate_name, + triple: tcx.sess.opts.target_triple.clone(), + hash: link_meta.crate_hash, + disambiguator: tcx.sess.local_crate_disambiguator(), + panic_strategy: tcx.sess.panic_strategy(), + plugin_registrar_fn: tcx.sess + .plugin_registrar_fn + .get() + .map(|id| tcx.map.local_def_id(id).index), + macro_derive_registrar: if is_proc_macro { + let id = tcx.sess.derive_registrar_fn.get().unwrap(); + Some(tcx.map.local_def_id(id).index) + } else { + None + }, + + crate_deps: crate_deps, + dylib_dependency_formats: dylib_dependency_formats, + lang_items: lang_items, + lang_items_missing: lang_items_missing, + native_libraries: native_libraries, + codemap: codemap, + impls: impls, + reachable_ids: reachable_ids, + index: index, + }); -fn encode_crate_name(rbml_w: &mut Encoder, crate_name: &str) { - rbml_w.wr_tagged_str(tag_crate_crate_name, crate_name); -} + let total_bytes = self.position(); -fn encode_crate_triple(rbml_w: &mut Encoder, triple: &str) { - rbml_w.wr_tagged_str(tag_crate_triple, triple); -} + if self.tcx.sess.meta_stats() { + let mut zero_bytes = 0; + for e in self.opaque.cursor.get_ref() { + if *e == 0 { + zero_bytes += 1; + } + } -fn encode_dylib_dependency_formats(rbml_w: &mut Encoder, ecx: &EncodeContext) { - let tag = tag_dylib_dependency_formats; - match ecx.tcx.sess.dependency_formats.borrow().get(&config::CrateTypeDylib) { - Some(arr) => { - let s = arr.iter().enumerate().filter_map(|(i, slot)| { - let kind = match *slot { - Linkage::NotLinked | - Linkage::IncludedFromDylib => return None, - Linkage::Dynamic => "d", - Linkage::Static => "s", - }; - Some(format!("{}:{}", i + 1, kind)) - }).collect::>(); - rbml_w.wr_tagged_str(tag, &s.join(",")); - } - None => { - rbml_w.wr_tagged_str(tag, ""); + println!("metadata stats:"); + println!(" dep bytes: {}", dep_bytes); + println!(" lang item bytes: {}", lang_item_bytes); + println!(" native bytes: {}", native_lib_bytes); + println!(" codemap bytes: {}", codemap_bytes); + println!(" impl bytes: {}", impl_bytes); + println!(" reachable bytes: {}", reachable_bytes); + println!(" item bytes: {}", item_bytes); + println!(" index bytes: {}", index_bytes); + println!(" zero bytes: {}", zero_bytes); + println!(" total bytes: {}", total_bytes); } + + root } } -// NB: Increment this as you change the metadata encoding version. -#[allow(non_upper_case_globals)] -pub const metadata_encoding_version : &'static [u8] = &[b'r', b'u', b's', b't', 0, 0, 0, 2 ]; - -pub fn encode_metadata(parms: EncodeParams, krate: &hir::Crate) -> Vec { - let EncodeParams { - item_symbols, - diag, - tcx, - reexports, - cstore, - encode_inlined_item, - link_meta, - reachable, - mir_map, - .. - } = parms; - let ecx = EncodeContext { - diag: diag, - tcx: tcx, - reexports: reexports, - item_symbols: item_symbols, - link_meta: link_meta, - cstore: cstore, - encode_inlined_item: RefCell::new(encode_inlined_item), - type_abbrevs: RefCell::new(FnvHashMap()), - reachable: reachable, - mir_map: mir_map, - }; - - let mut wr = Cursor::new(Vec::new()); - - { - let mut rbml_w = Encoder::new(&mut wr); - encode_metadata_inner(&mut rbml_w, &ecx, krate) - } +// NOTE(eddyb) The following comment was preserved for posterity, even +// though it's no longer relevant as EBML (which uses nested & tagged +// "documents") was replaced with a scheme that can't go out of bounds. +// +// And here we run into yet another obscure archive bug: in which metadata +// loaded from archives may have trailing garbage bytes. Awhile back one of +// our tests was failing sporadically on the OSX 64-bit builders (both nopt +// and opt) by having ebml generate an out-of-bounds panic when looking at +// metadata. +// +// Upon investigation it turned out that the metadata file inside of an rlib +// (and ar archive) was being corrupted. Some compilations would generate a +// metadata file which would end in a few extra bytes, while other +// compilations would not have these extra bytes appended to the end. These +// extra bytes were interpreted by ebml as an extra tag, so they ended up +// being interpreted causing the out-of-bounds. +// +// The root cause of why these extra bytes were appearing was never +// discovered, and in the meantime the solution we're employing is to insert +// the length of the metadata to the start of the metadata. Later on this +// will allow us to slice the metadata to the precise length that we just +// generated regardless of trailing bytes that end up in it. + +pub fn encode_metadata<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + cstore: &cstore::CStore, + reexports: &def::ExportMap, + link_meta: &LinkMeta, + reachable: &NodeSet) + -> Vec { + let mut cursor = Cursor::new(vec![]); + cursor.write_all(METADATA_HEADER).unwrap(); + + // Will be filed with the root position after encoding everything. + cursor.write_all(&[0, 0, 0, 0]).unwrap(); + + let root = { + let mut ecx = EncodeContext { + opaque: opaque::Encoder::new(&mut cursor), + tcx: tcx, + reexports: reexports, + link_meta: link_meta, + cstore: cstore, + reachable: reachable, + lazy_state: LazyState::NoNode, + type_shorthands: Default::default(), + predicate_shorthands: Default::default(), + }; - // RBML compacts the encoded bytes whenever appropriate, - // so there are some garbages left after the end of the data. - let metalen = wr.seek(SeekFrom::Current(0)).unwrap() as usize; - let mut v = wr.into_inner(); - v.truncate(metalen); - assert_eq!(v.len(), metalen); - - // And here we run into yet another obscure archive bug: in which metadata - // loaded from archives may have trailing garbage bytes. Awhile back one of - // our tests was failing sporadically on the OSX 64-bit builders (both nopt - // and opt) by having rbml generate an out-of-bounds panic when looking at - // metadata. - // - // Upon investigation it turned out that the metadata file inside of an rlib - // (and ar archive) was being corrupted. Some compilations would generate a - // metadata file which would end in a few extra bytes, while other - // compilations would not have these extra bytes appended to the end. These - // extra bytes were interpreted by rbml as an extra tag, so they ended up - // being interpreted causing the out-of-bounds. - // - // The root cause of why these extra bytes were appearing was never - // discovered, and in the meantime the solution we're employing is to insert - // the length of the metadata to the start of the metadata. Later on this - // will allow us to slice the metadata to the precise length that we just - // generated regardless of trailing bytes that end up in it. - let len = v.len() as u32; - v.insert(0, (len >> 0) as u8); - v.insert(0, (len >> 8) as u8); - v.insert(0, (len >> 16) as u8); - v.insert(0, (len >> 24) as u8); - return v; -} + // Encode the rustc version string in a predictable location. + rustc_version().encode(&mut ecx).unwrap(); -fn encode_metadata_inner(rbml_w: &mut Encoder, - ecx: &EncodeContext, - krate: &hir::Crate) { - struct Stats { - attr_bytes: u64, - dep_bytes: u64, - lang_item_bytes: u64, - native_lib_bytes: u64, - plugin_registrar_fn_bytes: u64, - codemap_bytes: u64, - macro_defs_bytes: u64, - impl_bytes: u64, - misc_bytes: u64, - item_bytes: u64, - index_bytes: u64, - xref_bytes: u64, - zero_bytes: u64, - total_bytes: u64, - } - let mut stats = Stats { - attr_bytes: 0, - dep_bytes: 0, - lang_item_bytes: 0, - native_lib_bytes: 0, - plugin_registrar_fn_bytes: 0, - codemap_bytes: 0, - macro_defs_bytes: 0, - impl_bytes: 0, - misc_bytes: 0, - item_bytes: 0, - index_bytes: 0, - xref_bytes: 0, - zero_bytes: 0, - total_bytes: 0, + // Encode all the entries and extra information in the crate, + // culminating in the `CrateRoot` which points to all of it. + ecx.encode_crate_root() }; + let mut result = cursor.into_inner(); - encode_rustc_version(rbml_w); - encode_crate_name(rbml_w, &ecx.link_meta.crate_name); - encode_crate_triple(rbml_w, &ecx.tcx.sess.opts.target_triple); - encode_hash(rbml_w, &ecx.link_meta.crate_hash); - encode_dylib_dependency_formats(rbml_w, &ecx); - - let mut i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_attributes(rbml_w, &krate.attrs); - stats.attr_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_crate_deps(rbml_w, ecx.cstore); - stats.dep_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - // Encode the language items. - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_lang_items(&ecx, rbml_w); - stats.lang_item_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - // Encode the native libraries used - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_native_libraries(&ecx, rbml_w); - stats.native_lib_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - // Encode the plugin registrar function - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_plugin_registrar_fn(&ecx, rbml_w); - stats.plugin_registrar_fn_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - // Encode codemap - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_codemap(&ecx, rbml_w); - stats.codemap_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - // Encode macro definitions - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_macro_defs(rbml_w, krate); - stats.macro_defs_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - // Encode the def IDs of impls, for coherence checking. - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_impls(&ecx, krate, rbml_w); - stats.impl_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - // Encode miscellaneous info. - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_misc_info(&ecx, krate, rbml_w); - encode_reachable(&ecx, rbml_w); - stats.misc_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - // Encode and index the items. - rbml_w.start_tag(tag_items); - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - let index = encode_info_for_items(&ecx, rbml_w); - stats.item_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - rbml_w.end_tag(); - - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_item_index(rbml_w, index.items); - stats.index_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - i = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - encode_xrefs(&ecx, rbml_w, index.xrefs); - stats.xref_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap() - i; - - encode_struct_field_attrs(&ecx, rbml_w, krate); - - stats.total_bytes = rbml_w.writer.seek(SeekFrom::Current(0)).unwrap(); - - if ecx.tcx.sess.meta_stats() { - for e in rbml_w.writer.get_ref() { - if *e == 0 { - stats.zero_bytes += 1; - } - } - - println!("metadata stats:"); - println!(" attribute bytes: {}", stats.attr_bytes); - println!(" dep bytes: {}", stats.dep_bytes); - println!(" lang item bytes: {}", stats.lang_item_bytes); - println!(" native bytes: {}", stats.native_lib_bytes); - println!("plugin registrar bytes: {}", stats.plugin_registrar_fn_bytes); - println!(" codemap bytes: {}", stats.codemap_bytes); - println!(" macro def bytes: {}", stats.macro_defs_bytes); - println!(" impl bytes: {}", stats.impl_bytes); - println!(" misc bytes: {}", stats.misc_bytes); - println!(" item bytes: {}", stats.item_bytes); - println!(" index bytes: {}", stats.index_bytes); - println!(" xref bytes: {}", stats.xref_bytes); - println!(" zero bytes: {}", stats.zero_bytes); - println!(" total bytes: {}", stats.total_bytes); - } -} + // Encode the root position. + let header = METADATA_HEADER.len(); + let pos = root.position; + result[header + 0] = (pos >> 24) as u8; + result[header + 1] = (pos >> 16) as u8; + result[header + 2] = (pos >> 8) as u8; + result[header + 3] = (pos >> 0) as u8; -// Get the encoded string for a type -pub fn encoded_ty<'tcx>(tcx: &ty::ctxt<'tcx>, t: Ty<'tcx>) -> Vec { - let mut wr = Cursor::new(Vec::new()); - tyencode::enc_ty(&mut wr, &tyencode::ctxt { - diag: tcx.sess.diagnostic(), - ds: def_to_string, - tcx: tcx, - abbrevs: &RefCell::new(FnvHashMap()) - }, t); - wr.into_inner() + result } diff --git a/src/librustc_metadata/index.rs b/src/librustc_metadata/index.rs index 60bbdaddd7516..53e6988c756c9 100644 --- a/src/librustc_metadata/index.rs +++ b/src/librustc_metadata/index.rs @@ -8,53 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use middle::def_id::{DefId, DefIndex}; -use rbml; +use schema::*; + +use rustc::hir::def_id::{DefId, DefIndex}; use std::io::{Cursor, Write}; use std::slice; use std::u32; -/// As part of the metadata, we generate an index that stores, for -/// each DefIndex, the position of the corresponding RBML document (if -/// any). This is just a big `[u32]` slice, where an entry of -/// `u32::MAX` indicates that there is no RBML document. This little -/// struct just stores the offsets within the metadata of the start -/// and end of this slice. These are actually part of an RBML -/// document, but for looking things up in the metadata, we just -/// discard the RBML positioning and jump directly to the data. -pub struct Index { - data_start: usize, - data_end: usize, -} - -impl Index { - /// Given the RBML doc representing the index, save the offests - /// for later. - pub fn from_rbml(index: rbml::Doc) -> Index { - Index { data_start: index.start, data_end: index.end } - } - - /// Given the metadata, extract out the offset of a particular - /// DefIndex (if any). - #[inline(never)] - pub fn lookup_item(&self, bytes: &[u8], def_index: DefIndex) -> Option { - let words = bytes_to_words(&bytes[self.data_start..self.data_end]); - let index = def_index.as_usize(); - - debug!("lookup_item: index={:?} words.len={:?}", - index, words.len()); - - let position = u32::from_be(words[index]); - if position == u32::MAX { - debug!("lookup_item: position=u32::MAX"); - None - } else { - debug!("lookup_item: position={:?}", position); - Some(position) - } - } -} - /// While we are generating the metadata, we also track the position /// of each DefIndex. It is not required that all definitions appear /// in the metadata, nor that they are serialized in order, and @@ -62,84 +22,83 @@ impl Index { /// `u32::MAX`. Whenever an index is visited, we fill in the /// appropriate spot by calling `record_position`. We should never /// visit the same index twice. -pub struct IndexData { +pub struct Index { positions: Vec, } -impl IndexData { - pub fn new(max_index: usize) -> IndexData { - IndexData { - positions: vec![u32::MAX; max_index] - } +impl Index { + pub fn new(max_index: usize) -> Index { + Index { positions: vec![u32::MAX; max_index] } } - pub fn record(&mut self, def_id: DefId, position: u64) { + pub fn record(&mut self, def_id: DefId, entry: Lazy) { assert!(def_id.is_local()); - self.record_index(def_id.index, position) + self.record_index(def_id.index, entry); } - pub fn record_index(&mut self, item: DefIndex, position: u64) { + pub fn record_index(&mut self, item: DefIndex, entry: Lazy) { let item = item.as_usize(); - assert!(position < (u32::MAX as u64)); - let position = position as u32; + assert!(entry.position < (u32::MAX as usize)); + let position = entry.position as u32; assert!(self.positions[item] == u32::MAX, "recorded position for item {:?} twice, first at {:?} and now at {:?}", - item, self.positions[item], position); + item, + self.positions[item], + position); - self.positions[item] = position; + self.positions[item] = position.to_le(); } - pub fn write_index(&self, buf: &mut Cursor>) { - for &position in &self.positions { - write_be_u32(buf, position); - } + pub fn write_index(&self, buf: &mut Cursor>) -> LazySeq { + let pos = buf.position(); + buf.write_all(words_to_bytes(&self.positions)).unwrap(); + LazySeq::with_position_and_length(pos as usize, self.positions.len()) } } -/// A dense index with integer keys. Different API from IndexData (should -/// these be merged?) -pub struct DenseIndex { - start: usize, - end: usize -} +impl<'tcx> LazySeq { + /// Given the metadata, extract out the offset of a particular + /// DefIndex (if any). + #[inline(never)] + pub fn lookup(&self, bytes: &[u8], def_index: DefIndex) -> Option>> { + let words = &bytes_to_words(&bytes[self.position..])[..self.len]; + let index = def_index.as_usize(); -impl DenseIndex { - pub fn lookup(&self, buf: &[u8], ix: u32) -> Option { - let data = bytes_to_words(&buf[self.start..self.end]); - data.get(ix as usize).map(|d| u32::from_be(*d)) - } - pub fn from_buf(buf: &[u8], start: usize, end: usize) -> Self { - assert!((end-start)%4 == 0 && start <= end && end <= buf.len()); - DenseIndex { - start: start, - end: end + debug!("Index::lookup: index={:?} words.len={:?}", + index, + words.len()); + + let position = u32::from_le(words[index]); + if position == u32::MAX { + debug!("Index::lookup: position=u32::MAX"); + None + } else { + debug!("Index::lookup: position={:?}", position); + Some(Lazy::with_position(position as usize)) } } -} -pub fn write_dense_index(entries: Vec, buf: &mut Cursor>) { - let elen = entries.len(); - assert!(elen < u32::MAX as usize); - - for entry in entries { - write_be_u32(buf, entry); + pub fn iter_enumerated<'a>(&self, + bytes: &'a [u8]) + -> impl Iterator>)> + 'a { + let words = &bytes_to_words(&bytes[self.position..])[..self.len]; + words.iter().enumerate().filter_map(|(index, &position)| { + if position == u32::MAX { + None + } else { + let position = u32::from_le(position) as usize; + Some((DefIndex::new(index), Lazy::with_position(position))) + } + }) } - - info!("write_dense_index: {} entries", elen); } -fn write_be_u32(w: &mut W, u: u32) { - let _ = w.write_all(&[ - (u >> 24) as u8, - (u >> 16) as u8, - (u >> 8) as u8, - (u >> 0) as u8, - ]); +fn bytes_to_words(b: &[u8]) -> &[u32] { + unsafe { slice::from_raw_parts(b.as_ptr() as *const u32, b.len() / 4) } } -fn bytes_to_words(b: &[u8]) -> &[u32] { - assert!(b.len() % 4 == 0); - unsafe { slice::from_raw_parts(b.as_ptr() as *const u32, b.len()/4) } +fn words_to_bytes(w: &[u32]) -> &[u8] { + unsafe { slice::from_raw_parts(w.as_ptr() as *const u8, w.len() * 4) } } diff --git a/src/librustc_metadata/index_builder.rs b/src/librustc_metadata/index_builder.rs new file mode 100644 index 0000000000000..1a74a92545477 --- /dev/null +++ b/src/librustc_metadata/index_builder.rs @@ -0,0 +1,225 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Builder types for generating the "item data" section of the +//! metadata. This section winds up looking like this: +//! +//! ``` +//! // big list of item-like things... +//! // ...for most def-ids, there is an entry. +//! +//! +//! ``` +//! +//! As we generate this listing, we collect the offset of each +//! `data_item` entry and store it in an index. Then, when we load the +//! metadata, we can skip right to the metadata for a particular item. +//! +//! In addition to the offset, we need to track the data that was used +//! to generate the contents of each `data_item`. This is so that we +//! can figure out which HIR nodes contributed to that data for +//! incremental compilation purposes. +//! +//! The `IndexBuilder` facilitates both of these. It is created +//! with an `EncodingContext` (`ecx`), which it encapsulates. +//! It has one main method, `record()`. You invoke `record` +//! like so to create a new `data_item` element in the list: +//! +//! ``` +//! index.record(some_def_id, callback_fn, data) +//! ``` +//! +//! What record will do is to (a) record the current offset, (b) emit +//! the `common::data_item` tag, and then call `callback_fn` with the +//! given data as well as the `EncodingContext`. Once `callback_fn` +//! returns, the `common::data_item` tag will be closed. +//! +//! `EncodingContext` does not offer the `record` method, so that we +//! can ensure that `common::data_item` elements are never nested. +//! +//! In addition, while the `callback_fn` is executing, we will push a +//! task `MetaData(some_def_id)`, which can then observe the +//! reads/writes that occur in the task. For this reason, the `data` +//! argument that is given to the `callback_fn` must implement the +//! trait `DepGraphRead`, which indicates how to register reads on the +//! data in this new task (note that many types of data, such as +//! `DefId`, do not currently require any reads to be registered, +//! since they are not derived from a HIR node). This is also why we +//! give a callback fn, rather than taking a closure: it allows us to +//! easily control precisely what data is given to that fn. + +use encoder::EncodeContext; +use index::Index; +use schema::*; + +use rustc::dep_graph::DepNode; +use rustc::hir; +use rustc::hir::def_id::DefId; +use rustc::ty::TyCtxt; +use syntax::ast; + +use std::ops::{Deref, DerefMut}; + +/// Builder that can encode new items, adding them into the index. +/// Item encoding cannot be nested. +pub struct IndexBuilder<'a, 'b: 'a, 'tcx: 'b> { + items: Index, + pub ecx: &'a mut EncodeContext<'b, 'tcx>, +} + +impl<'a, 'b, 'tcx> Deref for IndexBuilder<'a, 'b, 'tcx> { + type Target = EncodeContext<'b, 'tcx>; + fn deref(&self) -> &Self::Target { + self.ecx + } +} + +impl<'a, 'b, 'tcx> DerefMut for IndexBuilder<'a, 'b, 'tcx> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.ecx + } +} + +impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> { + pub fn new(ecx: &'a mut EncodeContext<'b, 'tcx>) -> Self { + IndexBuilder { + items: Index::new(ecx.tcx.map.num_local_def_ids()), + ecx: ecx, + } + } + + /// Emit the data for a def-id to the metadata. The function to + /// emit the data is `op`, and it will be given `data` as + /// arguments. This `record` function will call `op` to generate + /// the `Entry` (which may point to other encoded information) + /// and will then record the `Lazy` for use in the index. + /// + /// In addition, it will setup a dep-graph task to track what data + /// `op` accesses to generate the metadata, which is later used by + /// incremental compilation to compute a hash for the metadata and + /// track changes. + /// + /// The reason that `op` is a function pointer, and not a closure, + /// is that we want to be able to completely track all data it has + /// access to, so that we can be sure that `DATA: DepGraphRead` + /// holds, and that it is therefore not gaining "secret" access to + /// bits of HIR or other state that would not be trackd by the + /// content system. + pub fn record(&mut self, + id: DefId, + op: fn(&mut EncodeContext<'b, 'tcx>, DATA) -> Entry<'tcx>, + data: DATA) + where DATA: DepGraphRead + { + let _task = self.tcx.dep_graph.in_task(DepNode::MetaData(id)); + data.read(self.tcx); + let entry = op(&mut self.ecx, data); + self.items.record(id, self.ecx.lazy(&entry)); + } + + pub fn into_items(self) -> Index { + self.items + } +} + +/// Trait used for data that can be passed from outside a dep-graph +/// task. The data must either be of some safe type, such as a +/// `DefId` index, or implement the `read` method so that it can add +/// a read of whatever dep-graph nodes are appropriate. +pub trait DepGraphRead { + fn read(&self, tcx: TyCtxt); +} + +impl DepGraphRead for DefId { + fn read(&self, _tcx: TyCtxt) {} +} + +impl DepGraphRead for ast::NodeId { + fn read(&self, _tcx: TyCtxt) {} +} + +impl DepGraphRead for Option + where T: DepGraphRead +{ + fn read(&self, tcx: TyCtxt) { + match *self { + Some(ref v) => v.read(tcx), + None => (), + } + } +} + +impl DepGraphRead for [T] + where T: DepGraphRead +{ + fn read(&self, tcx: TyCtxt) { + for i in self { + i.read(tcx); + } + } +} + +macro_rules! read_tuple { + ($($name:ident),*) => { + impl<$($name),*> DepGraphRead for ($($name),*) + where $($name: DepGraphRead),* + { + #[allow(non_snake_case)] + fn read(&self, tcx: TyCtxt) { + let &($(ref $name),*) = self; + $($name.read(tcx);)* + } + } + } +} +read_tuple!(A, B); +read_tuple!(A, B, C); + +macro_rules! read_hir { + ($t:ty) => { + impl<'tcx> DepGraphRead for &'tcx $t { + fn read(&self, tcx: TyCtxt) { + tcx.map.read(self.id); + } + } + } +} +read_hir!(hir::Item); +read_hir!(hir::ImplItem); +read_hir!(hir::TraitItem); +read_hir!(hir::ForeignItem); +read_hir!(hir::MacroDef); + +/// Leaks access to a value of type T without any tracking. This is +/// suitable for ambiguous types like `usize`, which *could* represent +/// tracked data (e.g., if you read it out of a HIR node) or might not +/// (e.g., if it's an index). Adding in an `Untracked` is an +/// assertion, essentially, that the data does not need to be tracked +/// (or that read edges will be added by some other way). +/// +/// A good idea is to add to each use of `Untracked` an explanation of +/// why this value is ok. +pub struct Untracked(pub T); + +impl DepGraphRead for Untracked { + fn read(&self, _tcx: TyCtxt) {} +} + +/// Newtype that can be used to package up misc data extracted from a +/// HIR node that doesn't carry its own id. This will allow an +/// arbitrary `T` to be passed in, but register a read on the given +/// node-id. +pub struct FromId(pub ast::NodeId, pub T); + +impl DepGraphRead for FromId { + fn read(&self, tcx: TyCtxt) { + tcx.map.read(self.0); + } +} diff --git a/src/librustc_metadata/lib.rs b/src/librustc_metadata/lib.rs index 42332c4696979..56f3cfc12c97f 100644 --- a/src/librustc_metadata/lib.rs +++ b/src/librustc_metadata/lib.rs @@ -13,47 +13,51 @@ #![crate_type = "dylib"] #![crate_type = "rlib"] #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] #![feature(box_patterns)] -#![feature(enumset)] +#![feature(conservative_impl_trait)] +#![feature(core_intrinsics)] +#![feature(proc_macro_internals)] +#![feature(proc_macro_lib)] #![feature(quote)] #![feature(rustc_diagnostic_macros)] #![feature(rustc_private)] +#![feature(specialization)] #![feature(staged_api)] -#![feature(time2)] - -#[macro_use] extern crate log; -#[macro_use] extern crate syntax; -#[macro_use] #[no_link] extern crate rustc_bitflags; +#[macro_use] +extern crate log; +#[macro_use] +extern crate syntax; +extern crate syntax_pos; extern crate flate; -extern crate rbml; -extern crate serialize; +extern crate serialize as rustc_serialize; // used by deriving +extern crate rustc_errors as errors; +extern crate syntax_ext; +extern crate proc_macro; +#[macro_use] extern crate rustc; extern crate rustc_back; -extern crate rustc_front; +extern crate rustc_const_math; +extern crate rustc_data_structures; extern crate rustc_llvm; -pub use rustc::middle; +mod diagnostics; -#[macro_use] -mod macros; - -pub mod diagnostics; +mod astencode; +mod index_builder; +mod index; +mod encoder; +mod decoder; +mod cstore_impl; +mod schema; -pub mod astencode; -pub mod common; -pub mod tyencode; -pub mod tydecode; -pub mod encoder; -pub mod decoder; pub mod creader; -pub mod csearch; pub mod cstore; -pub mod index; -pub mod loader; -pub mod macro_import; -pub mod tls_context; +pub mod locator; + +__build_diagnostic_array! { librustc_metadata, DIAGNOSTICS } diff --git a/src/librustc_metadata/loader.rs b/src/librustc_metadata/loader.rs deleted file mode 100644 index 40665beaa5ac2..0000000000000 --- a/src/librustc_metadata/loader.rs +++ /dev/null @@ -1,870 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Finds crate binaries and loads their metadata -//! -//! Might I be the first to welcome you to a world of platform differences, -//! version requirements, dependency graphs, conflicting desires, and fun! This -//! is the major guts (along with metadata::creader) of the compiler for loading -//! crates and resolving dependencies. Let's take a tour! -//! -//! # The problem -//! -//! Each invocation of the compiler is immediately concerned with one primary -//! problem, to connect a set of crates to resolved crates on the filesystem. -//! Concretely speaking, the compiler follows roughly these steps to get here: -//! -//! 1. Discover a set of `extern crate` statements. -//! 2. Transform these directives into crate names. If the directive does not -//! have an explicit name, then the identifier is the name. -//! 3. For each of these crate names, find a corresponding crate on the -//! filesystem. -//! -//! Sounds easy, right? Let's walk into some of the nuances. -//! -//! ## Transitive Dependencies -//! -//! Let's say we've got three crates: A, B, and C. A depends on B, and B depends -//! on C. When we're compiling A, we primarily need to find and locate B, but we -//! also end up needing to find and locate C as well. -//! -//! The reason for this is that any of B's types could be composed of C's types, -//! any function in B could return a type from C, etc. To be able to guarantee -//! that we can always typecheck/translate any function, we have to have -//! complete knowledge of the whole ecosystem, not just our immediate -//! dependencies. -//! -//! So now as part of the "find a corresponding crate on the filesystem" step -//! above, this involves also finding all crates for *all upstream -//! dependencies*. This includes all dependencies transitively. -//! -//! ## Rlibs and Dylibs -//! -//! The compiler has two forms of intermediate dependencies. These are dubbed -//! rlibs and dylibs for the static and dynamic variants, respectively. An rlib -//! is a rustc-defined file format (currently just an ar archive) while a dylib -//! is a platform-defined dynamic library. Each library has a metadata somewhere -//! inside of it. -//! -//! When translating a crate name to a crate on the filesystem, we all of a -//! sudden need to take into account both rlibs and dylibs! Linkage later on may -//! use either one of these files, as each has their pros/cons. The job of crate -//! loading is to discover what's possible by finding all candidates. -//! -//! Most parts of this loading systems keep the dylib/rlib as just separate -//! variables. -//! -//! ## Where to look? -//! -//! We can't exactly scan your whole hard drive when looking for dependencies, -//! so we need to places to look. Currently the compiler will implicitly add the -//! target lib search path ($prefix/lib/rustlib/$target/lib) to any compilation, -//! and otherwise all -L flags are added to the search paths. -//! -//! ## What criterion to select on? -//! -//! This a pretty tricky area of loading crates. Given a file, how do we know -//! whether it's the right crate? Currently, the rules look along these lines: -//! -//! 1. Does the filename match an rlib/dylib pattern? That is to say, does the -//! filename have the right prefix/suffix? -//! 2. Does the filename have the right prefix for the crate name being queried? -//! This is filtering for files like `libfoo*.rlib` and such. -//! 3. Is the file an actual rust library? This is done by loading the metadata -//! from the library and making sure it's actually there. -//! 4. Does the name in the metadata agree with the name of the library? -//! 5. Does the target in the metadata agree with the current target? -//! 6. Does the SVH match? (more on this later) -//! -//! If the file answers `yes` to all these questions, then the file is -//! considered as being *candidate* for being accepted. It is illegal to have -//! more than two candidates as the compiler has no method by which to resolve -//! this conflict. Additionally, rlib/dylib candidates are considered -//! separately. -//! -//! After all this has happened, we have 1 or two files as candidates. These -//! represent the rlib/dylib file found for a library, and they're returned as -//! being found. -//! -//! ### What about versions? -//! -//! A lot of effort has been put forth to remove versioning from the compiler. -//! There have been forays in the past to have versioning baked in, but it was -//! largely always deemed insufficient to the point that it was recognized that -//! it's probably something the compiler shouldn't do anyway due to its -//! complicated nature and the state of the half-baked solutions. -//! -//! With a departure from versioning, the primary criterion for loading crates -//! is just the name of a crate. If we stopped here, it would imply that you -//! could never link two crates of the same name from different sources -//! together, which is clearly a bad state to be in. -//! -//! To resolve this problem, we come to the next section! -//! -//! # Expert Mode -//! -//! A number of flags have been added to the compiler to solve the "version -//! problem" in the previous section, as well as generally enabling more -//! powerful usage of the crate loading system of the compiler. The goal of -//! these flags and options are to enable third-party tools to drive the -//! compiler with prior knowledge about how the world should look. -//! -//! ## The `--extern` flag -//! -//! The compiler accepts a flag of this form a number of times: -//! -//! ```text -//! --extern crate-name=path/to/the/crate.rlib -//! ``` -//! -//! This flag is basically the following letter to the compiler: -//! -//! > Dear rustc, -//! > -//! > When you are attempting to load the immediate dependency `crate-name`, I -//! > would like you to assume that the library is located at -//! > `path/to/the/crate.rlib`, and look nowhere else. Also, please do not -//! > assume that the path I specified has the name `crate-name`. -//! -//! This flag basically overrides most matching logic except for validating that -//! the file is indeed a rust library. The same `crate-name` can be specified -//! twice to specify the rlib/dylib pair. -//! -//! ## Enabling "multiple versions" -//! -//! This basically boils down to the ability to specify arbitrary packages to -//! the compiler. For example, if crate A wanted to use Bv1 and Bv2, then it -//! would look something like: -//! -//! ```ignore -//! extern crate b1; -//! extern crate b2; -//! -//! fn main() {} -//! ``` -//! -//! and the compiler would be invoked as: -//! -//! ```text -//! rustc a.rs --extern b1=path/to/libb1.rlib --extern b2=path/to/libb2.rlib -//! ``` -//! -//! In this scenario there are two crates named `b` and the compiler must be -//! manually driven to be informed where each crate is. -//! -//! ## Frobbing symbols -//! -//! One of the immediate problems with linking the same library together twice -//! in the same problem is dealing with duplicate symbols. The primary way to -//! deal with this in rustc is to add hashes to the end of each symbol. -//! -//! In order to force hashes to change between versions of a library, if -//! desired, the compiler exposes an option `-C metadata=foo`, which is used to -//! initially seed each symbol hash. The string `foo` is prepended to each -//! string-to-hash to ensure that symbols change over time. -//! -//! ## Loading transitive dependencies -//! -//! Dealing with same-named-but-distinct crates is not just a local problem, but -//! one that also needs to be dealt with for transitive dependencies. Note that -//! in the letter above `--extern` flags only apply to the *local* set of -//! dependencies, not the upstream transitive dependencies. Consider this -//! dependency graph: -//! -//! ```text -//! A.1 A.2 -//! | | -//! | | -//! B C -//! \ / -//! \ / -//! D -//! ``` -//! -//! In this scenario, when we compile `D`, we need to be able to distinctly -//! resolve `A.1` and `A.2`, but an `--extern` flag cannot apply to these -//! transitive dependencies. -//! -//! Note that the key idea here is that `B` and `C` are both *already compiled*. -//! That is, they have already resolved their dependencies. Due to unrelated -//! technical reasons, when a library is compiled, it is only compatible with -//! the *exact same* version of the upstream libraries it was compiled against. -//! We use the "Strict Version Hash" to identify the exact copy of an upstream -//! library. -//! -//! With this knowledge, we know that `B` and `C` will depend on `A` with -//! different SVH values, so we crawl the normal `-L` paths looking for -//! `liba*.rlib` and filter based on the contained SVH. -//! -//! In the end, this ends up not needing `--extern` to specify upstream -//! transitive dependencies. -//! -//! # Wrapping up -//! -//! That's the general overview of loading crates in the compiler, but it's by -//! no means all of the necessary details. Take a look at the rest of -//! metadata::loader or metadata::creader for all the juicy details! - -use cstore::{MetadataBlob, MetadataVec, MetadataArchive}; -use decoder; -use encoder; - -use rustc::back::svh::Svh; -use rustc::session::Session; -use rustc::session::filesearch::{FileSearch, FileMatches, FileDoesntMatch}; -use rustc::session::search_paths::PathKind; -use rustc::util::common; - -use rustc_llvm as llvm; -use rustc_llvm::{False, ObjectFile, mk_section_iter}; -use rustc_llvm::archive_ro::ArchiveRO; -use syntax::codemap::Span; -use syntax::errors::DiagnosticBuilder; -use rustc_back::target::Target; - -use std::cmp; -use std::collections::HashMap; -use std::fs; -use std::io::prelude::*; -use std::io; -use std::path::{Path, PathBuf}; -use std::ptr; -use std::slice; -use std::time::Instant; - -use flate; - -pub struct CrateMismatch { - path: PathBuf, - got: String, -} - -pub struct Context<'a> { - pub sess: &'a Session, - pub span: Span, - pub ident: &'a str, - pub crate_name: &'a str, - pub hash: Option<&'a Svh>, - // points to either self.sess.target.target or self.sess.host, must match triple - pub target: &'a Target, - pub triple: &'a str, - pub filesearch: FileSearch<'a>, - pub root: &'a Option, - pub rejected_via_hash: Vec, - pub rejected_via_triple: Vec, - pub rejected_via_kind: Vec, - pub should_match_name: bool, -} - -pub struct Library { - pub dylib: Option<(PathBuf, PathKind)>, - pub rlib: Option<(PathBuf, PathKind)>, - pub metadata: MetadataBlob, -} - -pub struct ArchiveMetadata { - _archive: ArchiveRO, - // points into self._archive - data: *const [u8], -} - -pub struct CratePaths { - pub ident: String, - pub dylib: Option, - pub rlib: Option -} - -pub const METADATA_FILENAME: &'static str = "rust.metadata.bin"; - -impl CratePaths { - fn paths(&self) -> Vec { - match (&self.dylib, &self.rlib) { - (&None, &None) => vec!(), - (&Some(ref p), &None) | - (&None, &Some(ref p)) => vec!(p.clone()), - (&Some(ref p1), &Some(ref p2)) => vec!(p1.clone(), p2.clone()), - } - } -} - -impl<'a> Context<'a> { - pub fn maybe_load_library_crate(&mut self) -> Option { - self.find_library_crate() - } - - pub fn load_library_crate(&mut self) -> Library { - match self.find_library_crate() { - Some(t) => t, - None => { - self.report_load_errs(); - unreachable!() - } - } - } - - pub fn report_load_errs(&mut self) { - let add = match self.root { - &None => String::new(), - &Some(ref r) => format!(" which `{}` depends on", - r.ident) - }; - let mut err = if !self.rejected_via_hash.is_empty() { - struct_span_err!(self.sess, self.span, E0460, - "found possibly newer version of crate `{}`{}", - self.ident, add) - } else if !self.rejected_via_triple.is_empty() { - struct_span_err!(self.sess, self.span, E0461, - "couldn't find crate `{}` with expected target triple {}{}", - self.ident, self.triple, add) - } else if !self.rejected_via_kind.is_empty() { - struct_span_err!(self.sess, self.span, E0462, - "found staticlib `{}` instead of rlib or dylib{}", - self.ident, add) - } else { - struct_span_err!(self.sess, self.span, E0463, - "can't find crate for `{}`{}", - self.ident, add) - }; - - if !self.rejected_via_triple.is_empty() { - let mismatches = self.rejected_via_triple.iter(); - for (i, &CrateMismatch{ ref path, ref got }) in mismatches.enumerate() { - err.fileline_note(self.span, - &format!("crate `{}`, path #{}, triple {}: {}", - self.ident, i+1, got, path.display())); - } - } - if !self.rejected_via_hash.is_empty() { - err.span_note(self.span, "perhaps this crate needs \ - to be recompiled?"); - let mismatches = self.rejected_via_hash.iter(); - for (i, &CrateMismatch{ ref path, .. }) in mismatches.enumerate() { - err.fileline_note(self.span, - &format!("crate `{}` path #{}: {}", - self.ident, i+1, path.display())); - } - match self.root { - &None => {} - &Some(ref r) => { - for (i, path) in r.paths().iter().enumerate() { - err.fileline_note(self.span, - &format!("crate `{}` path #{}: {}", - r.ident, i+1, path.display())); - } - } - } - } - if !self.rejected_via_kind.is_empty() { - err.fileline_help(self.span, "please recompile this crate using \ - --crate-type lib"); - let mismatches = self.rejected_via_kind.iter(); - for (i, &CrateMismatch { ref path, .. }) in mismatches.enumerate() { - err.fileline_note(self.span, - &format!("crate `{}` path #{}: {}", - self.ident, i+1, path.display())); - } - } - - err.emit(); - self.sess.abort_if_errors(); - } - - fn find_library_crate(&mut self) -> Option { - // If an SVH is specified, then this is a transitive dependency that - // must be loaded via -L plus some filtering. - if self.hash.is_none() { - self.should_match_name = false; - if let Some(s) = self.sess.opts.externs.get(self.crate_name) { - return self.find_commandline_library(s); - } - self.should_match_name = true; - } - - let dypair = self.dylibname(); - - // want: crate_name.dir_part() + prefix + crate_name.file_part + "-" - let dylib_prefix = format!("{}{}", dypair.0, self.crate_name); - let rlib_prefix = format!("lib{}", self.crate_name); - let staticlib_prefix = format!("lib{}", self.crate_name); - - let mut candidates = HashMap::new(); - let mut staticlibs = vec!(); - - // First, find all possible candidate rlibs and dylibs purely based on - // the name of the files themselves. We're trying to match against an - // exact crate name and a possibly an exact hash. - // - // During this step, we can filter all found libraries based on the - // name and id found in the crate id (we ignore the path portion for - // filename matching), as well as the exact hash (if specified). If we - // end up having many candidates, we must look at the metadata to - // perform exact matches against hashes/crate ids. Note that opening up - // the metadata is where we do an exact match against the full contents - // of the crate id (path/name/id). - // - // The goal of this step is to look at as little metadata as possible. - self.filesearch.search(|path, kind| { - let file = match path.file_name().and_then(|s| s.to_str()) { - None => return FileDoesntMatch, - Some(file) => file, - }; - let (hash, rlib) = if file.starts_with(&rlib_prefix[..]) && - file.ends_with(".rlib") { - (&file[(rlib_prefix.len()) .. (file.len() - ".rlib".len())], - true) - } else if file.starts_with(&dylib_prefix) && - file.ends_with(&dypair.1) { - (&file[(dylib_prefix.len()) .. (file.len() - dypair.1.len())], - false) - } else { - if file.starts_with(&staticlib_prefix[..]) && - file.ends_with(".a") { - staticlibs.push(CrateMismatch { - path: path.to_path_buf(), - got: "static".to_string() - }); - } - return FileDoesntMatch - }; - info!("lib candidate: {}", path.display()); - - let hash_str = hash.to_string(); - let slot = candidates.entry(hash_str) - .or_insert_with(|| (HashMap::new(), HashMap::new())); - let (ref mut rlibs, ref mut dylibs) = *slot; - fs::canonicalize(path).map(|p| { - if rlib { - rlibs.insert(p, kind); - } else { - dylibs.insert(p, kind); - } - FileMatches - }).unwrap_or(FileDoesntMatch) - }); - self.rejected_via_kind.extend(staticlibs); - - // We have now collected all known libraries into a set of candidates - // keyed of the filename hash listed. For each filename, we also have a - // list of rlibs/dylibs that apply. Here, we map each of these lists - // (per hash), to a Library candidate for returning. - // - // A Library candidate is created if the metadata for the set of - // libraries corresponds to the crate id and hash criteria that this - // search is being performed for. - let mut libraries = Vec::new(); - for (_hash, (rlibs, dylibs)) in candidates { - let mut metadata = None; - let rlib = self.extract_one(rlibs, "rlib", &mut metadata); - let dylib = self.extract_one(dylibs, "dylib", &mut metadata); - match metadata { - Some(metadata) => { - libraries.push(Library { - dylib: dylib, - rlib: rlib, - metadata: metadata, - }) - } - None => {} - } - } - - // Having now translated all relevant found hashes into libraries, see - // what we've got and figure out if we found multiple candidates for - // libraries or not. - match libraries.len() { - 0 => None, - 1 => Some(libraries.into_iter().next().unwrap()), - _ => { - let mut err = struct_span_err!(self.sess, self.span, E0464, - "multiple matching crates for `{}`", - self.crate_name); - err.note("candidates:"); - for lib in &libraries { - match lib.dylib { - Some((ref p, _)) => { - err.note(&format!("path: {}", - p.display())); - } - None => {} - } - match lib.rlib { - Some((ref p, _)) => { - err.note(&format!("path: {}", - p.display())); - } - None => {} - } - let data = lib.metadata.as_slice(); - let name = decoder::get_crate_name(data); - note_crate_name(&mut err, &name); - } - err.emit(); - None - } - } - } - - // Attempts to extract *one* library from the set `m`. If the set has no - // elements, `None` is returned. If the set has more than one element, then - // the errors and notes are emitted about the set of libraries. - // - // With only one library in the set, this function will extract it, and then - // read the metadata from it if `*slot` is `None`. If the metadata couldn't - // be read, it is assumed that the file isn't a valid rust library (no - // errors are emitted). - fn extract_one(&mut self, m: HashMap, flavor: &str, - slot: &mut Option) -> Option<(PathBuf, PathKind)> { - let mut ret = None::<(PathBuf, PathKind)>; - let mut error = 0; - - if slot.is_some() { - // FIXME(#10786): for an optimization, we only read one of the - // library's metadata sections. In theory we should - // read both, but reading dylib metadata is quite - // slow. - if m.is_empty() { - return None - } else if m.len() == 1 { - return Some(m.into_iter().next().unwrap()) - } - } - - let mut err: Option = None; - for (lib, kind) in m { - info!("{} reading metadata from: {}", flavor, lib.display()); - let metadata = match get_metadata_section(self.target, &lib) { - Ok(blob) => { - if self.crate_matches(blob.as_slice(), &lib) { - blob - } else { - info!("metadata mismatch"); - continue - } - } - Err(err) => { - info!("no metadata found: {}", err); - continue - } - }; - // If we've already found a candidate and we're not matching hashes, - // emit an error about duplicate candidates found. If we're matching - // based on a hash, however, then if we've gotten this far both - // candidates have the same hash, so they're not actually - // duplicates that we should warn about. - if ret.is_some() && self.hash.is_none() { - let mut e = struct_span_err!(self.sess, self.span, E0465, - "multiple {} candidates for `{}` found", - flavor, self.crate_name); - e.span_note(self.span, - &format!(r"candidate #1: {}", - ret.as_ref().unwrap().0 - .display())); - if let Some(ref mut e) = err { - e.emit(); - } - err = Some(e); - error = 1; - ret = None; - } - if error > 0 { - error += 1; - err.as_mut().unwrap().span_note(self.span, - &format!(r"candidate #{}: {}", error, - lib.display())); - continue - } - *slot = Some(metadata); - ret = Some((lib, kind)); - } - - if error > 0 { - err.unwrap().emit(); - None - } else { - ret - } - } - - fn crate_matches(&mut self, crate_data: &[u8], libpath: &Path) -> bool { - if self.should_match_name { - match decoder::maybe_get_crate_name(crate_data) { - Some(ref name) if self.crate_name == *name => {} - _ => { info!("Rejecting via crate name"); return false } - } - } - let hash = match decoder::maybe_get_crate_hash(crate_data) { - Some(hash) => hash, None => { - info!("Rejecting via lack of crate hash"); - return false; - } - }; - - let triple = match decoder::get_crate_triple(crate_data) { - None => { debug!("triple not present"); return false } - Some(t) => t, - }; - if triple != self.triple { - info!("Rejecting via crate triple: expected {} got {}", self.triple, triple); - self.rejected_via_triple.push(CrateMismatch { - path: libpath.to_path_buf(), - got: triple.to_string() - }); - return false; - } - - match self.hash { - None => true, - Some(myhash) => { - if *myhash != hash { - info!("Rejecting via hash: expected {} got {}", *myhash, hash); - self.rejected_via_hash.push(CrateMismatch { - path: libpath.to_path_buf(), - got: myhash.as_str().to_string() - }); - false - } else { - true - } - } - } - } - - - // Returns the corresponding (prefix, suffix) that files need to have for - // dynamic libraries - fn dylibname(&self) -> (String, String) { - let t = &self.target; - (t.options.dll_prefix.clone(), t.options.dll_suffix.clone()) - } - - fn find_commandline_library(&mut self, locs: &[String]) -> Option { - // First, filter out all libraries that look suspicious. We only accept - // files which actually exist that have the correct naming scheme for - // rlibs/dylibs. - let sess = self.sess; - let dylibname = self.dylibname(); - let mut rlibs = HashMap::new(); - let mut dylibs = HashMap::new(); - { - let locs = locs.iter().map(|l| PathBuf::from(l)).filter(|loc| { - if !loc.exists() { - sess.err(&format!("extern location for {} does not exist: {}", - self.crate_name, loc.display())); - return false; - } - let file = match loc.file_name().and_then(|s| s.to_str()) { - Some(file) => file, - None => { - sess.err(&format!("extern location for {} is not a file: {}", - self.crate_name, loc.display())); - return false; - } - }; - if file.starts_with("lib") && file.ends_with(".rlib") { - return true - } else { - let (ref prefix, ref suffix) = dylibname; - if file.starts_with(&prefix[..]) && - file.ends_with(&suffix[..]) { - return true - } - } - sess.struct_err(&format!("extern location for {} is of an unknown type: {}", - self.crate_name, loc.display())) - .help(&format!("file name should be lib*.rlib or {}*.{}", - dylibname.0, dylibname.1)) - .emit(); - false - }); - - // Now that we have an iterator of good candidates, make sure - // there's at most one rlib and at most one dylib. - for loc in locs { - if loc.file_name().unwrap().to_str().unwrap().ends_with(".rlib") { - rlibs.insert(fs::canonicalize(&loc).unwrap(), - PathKind::ExternFlag); - } else { - dylibs.insert(fs::canonicalize(&loc).unwrap(), - PathKind::ExternFlag); - } - } - }; - - // Extract the rlib/dylib pair. - let mut metadata = None; - let rlib = self.extract_one(rlibs, "rlib", &mut metadata); - let dylib = self.extract_one(dylibs, "dylib", &mut metadata); - - if rlib.is_none() && dylib.is_none() { return None } - match metadata { - Some(metadata) => Some(Library { - dylib: dylib, - rlib: rlib, - metadata: metadata, - }), - None => None, - } - } -} - -pub fn note_crate_name(err: &mut DiagnosticBuilder, name: &str) { - err.note(&format!("crate name: {}", name)); -} - -impl ArchiveMetadata { - fn new(ar: ArchiveRO) -> Option { - let data = { - let section = ar.iter().find(|sect| { - sect.name() == Some(METADATA_FILENAME) - }); - match section { - Some(s) => s.data() as *const [u8], - None => { - debug!("didn't find '{}' in the archive", METADATA_FILENAME); - return None; - } - } - }; - - Some(ArchiveMetadata { - _archive: ar, - data: data, - }) - } - - pub fn as_slice<'a>(&'a self) -> &'a [u8] { unsafe { &*self.data } } -} - -// Just a small wrapper to time how long reading metadata takes. -fn get_metadata_section(target: &Target, filename: &Path) - -> Result { - let start = Instant::now(); - let ret = get_metadata_section_imp(target, filename); - info!("reading {:?} => {:?}", filename.file_name().unwrap(), - start.elapsed()); - return ret -} - -fn get_metadata_section_imp(target: &Target, filename: &Path) - -> Result { - if !filename.exists() { - return Err(format!("no such file: '{}'", filename.display())); - } - if filename.file_name().unwrap().to_str().unwrap().ends_with(".rlib") { - // Use ArchiveRO for speed here, it's backed by LLVM and uses mmap - // internally to read the file. We also avoid even using a memcpy by - // just keeping the archive along while the metadata is in use. - let archive = match ArchiveRO::open(filename) { - Some(ar) => ar, - None => { - debug!("llvm didn't like `{}`", filename.display()); - return Err(format!("failed to read rlib metadata: '{}'", - filename.display())); - } - }; - return match ArchiveMetadata::new(archive).map(|ar| MetadataArchive(ar)) { - None => Err(format!("failed to read rlib metadata: '{}'", - filename.display())), - Some(blob) => Ok(blob) - }; - } - unsafe { - let buf = common::path2cstr(filename); - let mb = llvm::LLVMRustCreateMemoryBufferWithContentsOfFile(buf.as_ptr()); - if mb as isize == 0 { - return Err(format!("error reading library: '{}'", - filename.display())) - } - let of = match ObjectFile::new(mb) { - Some(of) => of, - _ => { - return Err((format!("provided path not an object file: '{}'", - filename.display()))) - } - }; - let si = mk_section_iter(of.llof); - while llvm::LLVMIsSectionIteratorAtEnd(of.llof, si.llsi) == False { - let mut name_buf = ptr::null(); - let name_len = llvm::LLVMRustGetSectionName(si.llsi, &mut name_buf); - let name = slice::from_raw_parts(name_buf as *const u8, - name_len as usize).to_vec(); - let name = String::from_utf8(name).unwrap(); - debug!("get_metadata_section: name {}", name); - if read_meta_section_name(target) == name { - let cbuf = llvm::LLVMGetSectionContents(si.llsi); - let csz = llvm::LLVMGetSectionSize(si.llsi) as usize; - let cvbuf: *const u8 = cbuf as *const u8; - let vlen = encoder::metadata_encoding_version.len(); - debug!("checking {} bytes of metadata-version stamp", - vlen); - let minsz = cmp::min(vlen, csz); - let buf0 = slice::from_raw_parts(cvbuf, minsz); - let version_ok = buf0 == encoder::metadata_encoding_version; - if !version_ok { - return Err((format!("incompatible metadata version found: '{}'", - filename.display()))); - } - - let cvbuf1 = cvbuf.offset(vlen as isize); - debug!("inflating {} bytes of compressed metadata", - csz - vlen); - let bytes = slice::from_raw_parts(cvbuf1, csz - vlen); - match flate::inflate_bytes(bytes) { - Ok(inflated) => return Ok(MetadataVec(inflated)), - Err(_) => {} - } - } - llvm::LLVMMoveToNextSection(si.llsi); - } - Err(format!("metadata not found: '{}'", filename.display())) - } -} - -pub fn meta_section_name(target: &Target) -> &'static str { - if target.options.is_like_osx { - "__DATA,__note.rustc" - } else if target.options.is_like_msvc { - // When using link.exe it was seen that the section name `.note.rustc` - // was getting shortened to `.note.ru`, and according to the PE and COFF - // specification: - // - // > Executable images do not use a string table and do not support - // > section names longer than 8 characters - // - // https://msdn.microsoft.com/en-us/library/windows/hardware/gg463119.aspx - // - // As a result, we choose a slightly shorter name! As to why - // `.note.rustc` works on MinGW, that's another good question... - ".rustc" - } else { - ".note.rustc" - } -} - -pub fn read_meta_section_name(target: &Target) -> &'static str { - if target.options.is_like_osx { - "__note.rustc" - } else if target.options.is_like_msvc { - ".rustc" - } else { - ".note.rustc" - } -} - -// A diagnostic function for dumping crate metadata to an output stream -pub fn list_file_metadata(target: &Target, path: &Path, - out: &mut io::Write) -> io::Result<()> { - match get_metadata_section(target, path) { - Ok(bytes) => decoder::list_crate_metadata(bytes.as_slice(), out), - Err(msg) => { - write!(out, "{}\n", msg) - } - } -} diff --git a/src/librustc_metadata/locator.rs b/src/librustc_metadata/locator.rs new file mode 100644 index 0000000000000..de465ea92f6b8 --- /dev/null +++ b/src/librustc_metadata/locator.rs @@ -0,0 +1,984 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Finds crate binaries and loads their metadata +//! +//! Might I be the first to welcome you to a world of platform differences, +//! version requirements, dependency graphs, conflicting desires, and fun! This +//! is the major guts (along with metadata::creader) of the compiler for loading +//! crates and resolving dependencies. Let's take a tour! +//! +//! # The problem +//! +//! Each invocation of the compiler is immediately concerned with one primary +//! problem, to connect a set of crates to resolved crates on the filesystem. +//! Concretely speaking, the compiler follows roughly these steps to get here: +//! +//! 1. Discover a set of `extern crate` statements. +//! 2. Transform these directives into crate names. If the directive does not +//! have an explicit name, then the identifier is the name. +//! 3. For each of these crate names, find a corresponding crate on the +//! filesystem. +//! +//! Sounds easy, right? Let's walk into some of the nuances. +//! +//! ## Transitive Dependencies +//! +//! Let's say we've got three crates: A, B, and C. A depends on B, and B depends +//! on C. When we're compiling A, we primarily need to find and locate B, but we +//! also end up needing to find and locate C as well. +//! +//! The reason for this is that any of B's types could be composed of C's types, +//! any function in B could return a type from C, etc. To be able to guarantee +//! that we can always typecheck/translate any function, we have to have +//! complete knowledge of the whole ecosystem, not just our immediate +//! dependencies. +//! +//! So now as part of the "find a corresponding crate on the filesystem" step +//! above, this involves also finding all crates for *all upstream +//! dependencies*. This includes all dependencies transitively. +//! +//! ## Rlibs and Dylibs +//! +//! The compiler has two forms of intermediate dependencies. These are dubbed +//! rlibs and dylibs for the static and dynamic variants, respectively. An rlib +//! is a rustc-defined file format (currently just an ar archive) while a dylib +//! is a platform-defined dynamic library. Each library has a metadata somewhere +//! inside of it. +//! +//! A third kind of dependency is an rmeta file. These are metadata files and do +//! not contain any code, etc. To a first approximation, these are treated in the +//! same way as rlibs. Where there is both an rlib and an rmeta file, the rlib +//! gets priority (even if the rmeta file is newer). An rmeta file is only +//! useful for checking a downstream crate, attempting to link one will cause an +//! error. +//! +//! When translating a crate name to a crate on the filesystem, we all of a +//! sudden need to take into account both rlibs and dylibs! Linkage later on may +//! use either one of these files, as each has their pros/cons. The job of crate +//! loading is to discover what's possible by finding all candidates. +//! +//! Most parts of this loading systems keep the dylib/rlib as just separate +//! variables. +//! +//! ## Where to look? +//! +//! We can't exactly scan your whole hard drive when looking for dependencies, +//! so we need to places to look. Currently the compiler will implicitly add the +//! target lib search path ($prefix/lib/rustlib/$target/lib) to any compilation, +//! and otherwise all -L flags are added to the search paths. +//! +//! ## What criterion to select on? +//! +//! This a pretty tricky area of loading crates. Given a file, how do we know +//! whether it's the right crate? Currently, the rules look along these lines: +//! +//! 1. Does the filename match an rlib/dylib pattern? That is to say, does the +//! filename have the right prefix/suffix? +//! 2. Does the filename have the right prefix for the crate name being queried? +//! This is filtering for files like `libfoo*.rlib` and such. +//! 3. Is the file an actual rust library? This is done by loading the metadata +//! from the library and making sure it's actually there. +//! 4. Does the name in the metadata agree with the name of the library? +//! 5. Does the target in the metadata agree with the current target? +//! 6. Does the SVH match? (more on this later) +//! +//! If the file answers `yes` to all these questions, then the file is +//! considered as being *candidate* for being accepted. It is illegal to have +//! more than two candidates as the compiler has no method by which to resolve +//! this conflict. Additionally, rlib/dylib candidates are considered +//! separately. +//! +//! After all this has happened, we have 1 or two files as candidates. These +//! represent the rlib/dylib file found for a library, and they're returned as +//! being found. +//! +//! ### What about versions? +//! +//! A lot of effort has been put forth to remove versioning from the compiler. +//! There have been forays in the past to have versioning baked in, but it was +//! largely always deemed insufficient to the point that it was recognized that +//! it's probably something the compiler shouldn't do anyway due to its +//! complicated nature and the state of the half-baked solutions. +//! +//! With a departure from versioning, the primary criterion for loading crates +//! is just the name of a crate. If we stopped here, it would imply that you +//! could never link two crates of the same name from different sources +//! together, which is clearly a bad state to be in. +//! +//! To resolve this problem, we come to the next section! +//! +//! # Expert Mode +//! +//! A number of flags have been added to the compiler to solve the "version +//! problem" in the previous section, as well as generally enabling more +//! powerful usage of the crate loading system of the compiler. The goal of +//! these flags and options are to enable third-party tools to drive the +//! compiler with prior knowledge about how the world should look. +//! +//! ## The `--extern` flag +//! +//! The compiler accepts a flag of this form a number of times: +//! +//! ```text +//! --extern crate-name=path/to/the/crate.rlib +//! ``` +//! +//! This flag is basically the following letter to the compiler: +//! +//! > Dear rustc, +//! > +//! > When you are attempting to load the immediate dependency `crate-name`, I +//! > would like you to assume that the library is located at +//! > `path/to/the/crate.rlib`, and look nowhere else. Also, please do not +//! > assume that the path I specified has the name `crate-name`. +//! +//! This flag basically overrides most matching logic except for validating that +//! the file is indeed a rust library. The same `crate-name` can be specified +//! twice to specify the rlib/dylib pair. +//! +//! ## Enabling "multiple versions" +//! +//! This basically boils down to the ability to specify arbitrary packages to +//! the compiler. For example, if crate A wanted to use Bv1 and Bv2, then it +//! would look something like: +//! +//! ```ignore +//! extern crate b1; +//! extern crate b2; +//! +//! fn main() {} +//! ``` +//! +//! and the compiler would be invoked as: +//! +//! ```text +//! rustc a.rs --extern b1=path/to/libb1.rlib --extern b2=path/to/libb2.rlib +//! ``` +//! +//! In this scenario there are two crates named `b` and the compiler must be +//! manually driven to be informed where each crate is. +//! +//! ## Frobbing symbols +//! +//! One of the immediate problems with linking the same library together twice +//! in the same problem is dealing with duplicate symbols. The primary way to +//! deal with this in rustc is to add hashes to the end of each symbol. +//! +//! In order to force hashes to change between versions of a library, if +//! desired, the compiler exposes an option `-C metadata=foo`, which is used to +//! initially seed each symbol hash. The string `foo` is prepended to each +//! string-to-hash to ensure that symbols change over time. +//! +//! ## Loading transitive dependencies +//! +//! Dealing with same-named-but-distinct crates is not just a local problem, but +//! one that also needs to be dealt with for transitive dependencies. Note that +//! in the letter above `--extern` flags only apply to the *local* set of +//! dependencies, not the upstream transitive dependencies. Consider this +//! dependency graph: +//! +//! ```text +//! A.1 A.2 +//! | | +//! | | +//! B C +//! \ / +//! \ / +//! D +//! ``` +//! +//! In this scenario, when we compile `D`, we need to be able to distinctly +//! resolve `A.1` and `A.2`, but an `--extern` flag cannot apply to these +//! transitive dependencies. +//! +//! Note that the key idea here is that `B` and `C` are both *already compiled*. +//! That is, they have already resolved their dependencies. Due to unrelated +//! technical reasons, when a library is compiled, it is only compatible with +//! the *exact same* version of the upstream libraries it was compiled against. +//! We use the "Strict Version Hash" to identify the exact copy of an upstream +//! library. +//! +//! With this knowledge, we know that `B` and `C` will depend on `A` with +//! different SVH values, so we crawl the normal `-L` paths looking for +//! `liba*.rlib` and filter based on the contained SVH. +//! +//! In the end, this ends up not needing `--extern` to specify upstream +//! transitive dependencies. +//! +//! # Wrapping up +//! +//! That's the general overview of loading crates in the compiler, but it's by +//! no means all of the necessary details. Take a look at the rest of +//! metadata::locator or metadata::creader for all the juicy details! + +use cstore::MetadataBlob; +use creader::Library; +use schema::{METADATA_HEADER, rustc_version}; + +use rustc::hir::svh::Svh; +use rustc::session::{config, Session}; +use rustc::session::filesearch::{FileSearch, FileMatches, FileDoesntMatch}; +use rustc::session::search_paths::PathKind; +use rustc::util::common; +use rustc::util::nodemap::FxHashMap; + +use rustc_llvm as llvm; +use rustc_llvm::{False, ObjectFile, mk_section_iter}; +use rustc_llvm::archive_ro::ArchiveRO; +use errors::DiagnosticBuilder; +use syntax::symbol::Symbol; +use syntax_pos::Span; +use rustc_back::target::Target; + +use std::cmp; +use std::fmt; +use std::fs::{self, File}; +use std::io::{self, Read}; +use std::path::{Path, PathBuf}; +use std::ptr; +use std::slice; +use std::time::Instant; + +use flate; + +pub struct CrateMismatch { + path: PathBuf, + got: String, +} + +pub struct Context<'a> { + pub sess: &'a Session, + pub span: Span, + pub ident: Symbol, + pub crate_name: Symbol, + pub hash: Option<&'a Svh>, + // points to either self.sess.target.target or self.sess.host, must match triple + pub target: &'a Target, + pub triple: &'a str, + pub filesearch: FileSearch<'a>, + pub root: &'a Option, + pub rejected_via_hash: Vec, + pub rejected_via_triple: Vec, + pub rejected_via_kind: Vec, + pub rejected_via_version: Vec, + pub rejected_via_filename: Vec, + pub should_match_name: bool, + pub is_proc_macro: Option, +} + +pub struct ArchiveMetadata { + _archive: ArchiveRO, + // points into self._archive + data: *const [u8], +} + +pub struct CratePaths { + pub ident: String, + pub dylib: Option, + pub rlib: Option, + pub rmeta: Option, +} + +pub const METADATA_FILENAME: &'static str = "rust.metadata.bin"; + +#[derive(Copy, Clone, PartialEq)] +enum CrateFlavor { + Rlib, + Rmeta, + Dylib, +} + +impl fmt::Display for CrateFlavor { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(match *self { + CrateFlavor::Rlib => "rlib", + CrateFlavor::Rmeta => "rmeta", + CrateFlavor::Dylib => "dylib", + }) + } +} + +impl CratePaths { + fn paths(&self) -> Vec { + self.dylib.iter().chain(self.rlib.iter()).chain(self.rmeta.iter()).cloned().collect() + } +} + +impl<'a> Context<'a> { + pub fn maybe_load_library_crate(&mut self) -> Option { + self.find_library_crate() + } + + pub fn load_library_crate(&mut self) -> Library { + self.find_library_crate().unwrap_or_else(|| self.report_errs()) + } + + pub fn report_errs(&mut self) -> ! { + let add = match self.root { + &None => String::new(), + &Some(ref r) => format!(" which `{}` depends on", r.ident), + }; + let mut err = if !self.rejected_via_hash.is_empty() { + struct_span_err!(self.sess, + self.span, + E0460, + "found possibly newer version of crate `{}`{}", + self.ident, + add) + } else if !self.rejected_via_triple.is_empty() { + struct_span_err!(self.sess, + self.span, + E0461, + "couldn't find crate `{}` with expected target triple {}{}", + self.ident, + self.triple, + add) + } else if !self.rejected_via_kind.is_empty() { + struct_span_err!(self.sess, + self.span, + E0462, + "found staticlib `{}` instead of rlib or dylib{}", + self.ident, + add) + } else if !self.rejected_via_version.is_empty() { + struct_span_err!(self.sess, + self.span, + E0514, + "found crate `{}` compiled by an incompatible version of rustc{}", + self.ident, + add) + } else { + let mut err = struct_span_err!(self.sess, + self.span, + E0463, + "can't find crate for `{}`{}", + self.ident, + add); + + if (self.ident == "std" || self.ident == "core") + && self.triple != config::host_triple() { + err.note(&format!("the `{}` target may not be installed", self.triple)); + } + err.span_label(self.span, &format!("can't find crate")); + err + }; + + if !self.rejected_via_triple.is_empty() { + let mismatches = self.rejected_via_triple.iter(); + for (i, &CrateMismatch { ref path, ref got }) in mismatches.enumerate() { + err.note(&format!("crate `{}`, path #{}, triple {}: {}", + self.ident, + i + 1, + got, + path.display())); + } + } + if !self.rejected_via_hash.is_empty() { + err.note("perhaps that crate needs to be recompiled?"); + let mismatches = self.rejected_via_hash.iter(); + for (i, &CrateMismatch { ref path, .. }) in mismatches.enumerate() { + err.note(&format!("crate `{}` path #{}: {}", self.ident, i + 1, path.display())); + } + match self.root { + &None => {} + &Some(ref r) => { + for (i, path) in r.paths().iter().enumerate() { + err.note(&format!("crate `{}` path #{}: {}", + r.ident, + i + 1, + path.display())); + } + } + } + } + if !self.rejected_via_kind.is_empty() { + err.help("please recompile that crate using --crate-type lib"); + let mismatches = self.rejected_via_kind.iter(); + for (i, &CrateMismatch { ref path, .. }) in mismatches.enumerate() { + err.note(&format!("crate `{}` path #{}: {}", self.ident, i + 1, path.display())); + } + } + if !self.rejected_via_version.is_empty() { + err.help(&format!("please recompile that crate using this compiler ({})", + rustc_version())); + let mismatches = self.rejected_via_version.iter(); + for (i, &CrateMismatch { ref path, ref got }) in mismatches.enumerate() { + err.note(&format!("crate `{}` path #{}: {} compiled by {:?}", + self.ident, + i + 1, + path.display(), + got)); + } + } + if !self.rejected_via_filename.is_empty() { + let dylibname = self.dylibname(); + let mismatches = self.rejected_via_filename.iter(); + for &CrateMismatch { ref path, .. } in mismatches { + err.note(&format!("extern location for {} is of an unknown type: {}", + self.crate_name, + path.display())) + .help(&format!("file name should be lib*.rlib or {}*.{}", + dylibname.0, + dylibname.1)); + } + } + + err.emit(); + self.sess.abort_if_errors(); + unreachable!(); + } + + fn find_library_crate(&mut self) -> Option { + // If an SVH is specified, then this is a transitive dependency that + // must be loaded via -L plus some filtering. + if self.hash.is_none() { + self.should_match_name = false; + if let Some(s) = self.sess.opts.externs.get(&self.crate_name.as_str()) { + return self.find_commandline_library(s.iter()); + } + self.should_match_name = true; + } + + let dypair = self.dylibname(); + let staticpair = self.staticlibname(); + + // want: crate_name.dir_part() + prefix + crate_name.file_part + "-" + let dylib_prefix = format!("{}{}", dypair.0, self.crate_name); + let rlib_prefix = format!("lib{}", self.crate_name); + let staticlib_prefix = format!("{}{}", staticpair.0, self.crate_name); + + let mut candidates = FxHashMap(); + let mut staticlibs = vec![]; + + // First, find all possible candidate rlibs and dylibs purely based on + // the name of the files themselves. We're trying to match against an + // exact crate name and a possibly an exact hash. + // + // During this step, we can filter all found libraries based on the + // name and id found in the crate id (we ignore the path portion for + // filename matching), as well as the exact hash (if specified). If we + // end up having many candidates, we must look at the metadata to + // perform exact matches against hashes/crate ids. Note that opening up + // the metadata is where we do an exact match against the full contents + // of the crate id (path/name/id). + // + // The goal of this step is to look at as little metadata as possible. + self.filesearch.search(|path, kind| { + let file = match path.file_name().and_then(|s| s.to_str()) { + None => return FileDoesntMatch, + Some(file) => file, + }; + let (hash, found_kind) = + if file.starts_with(&rlib_prefix[..]) && file.ends_with(".rlib") { + (&file[(rlib_prefix.len())..(file.len() - ".rlib".len())], CrateFlavor::Rlib) + } else if file.starts_with(&rlib_prefix[..]) && file.ends_with(".rmeta") { + (&file[(rlib_prefix.len())..(file.len() - ".rmeta".len())], CrateFlavor::Rmeta) + } else if file.starts_with(&dylib_prefix) && + file.ends_with(&dypair.1) { + (&file[(dylib_prefix.len())..(file.len() - dypair.1.len())], CrateFlavor::Dylib) + } else { + if file.starts_with(&staticlib_prefix[..]) && file.ends_with(&staticpair.1) { + staticlibs.push(CrateMismatch { + path: path.to_path_buf(), + got: "static".to_string(), + }); + } + return FileDoesntMatch; + }; + info!("lib candidate: {}", path.display()); + + let hash_str = hash.to_string(); + let slot = candidates.entry(hash_str) + .or_insert_with(|| (FxHashMap(), FxHashMap(), FxHashMap())); + let (ref mut rlibs, ref mut rmetas, ref mut dylibs) = *slot; + fs::canonicalize(path) + .map(|p| { + match found_kind { + CrateFlavor::Rlib => { rlibs.insert(p, kind); } + CrateFlavor::Rmeta => { rmetas.insert(p, kind); } + CrateFlavor::Dylib => { dylibs.insert(p, kind); } + } + FileMatches + }) + .unwrap_or(FileDoesntMatch) + }); + self.rejected_via_kind.extend(staticlibs); + + // We have now collected all known libraries into a set of candidates + // keyed of the filename hash listed. For each filename, we also have a + // list of rlibs/dylibs that apply. Here, we map each of these lists + // (per hash), to a Library candidate for returning. + // + // A Library candidate is created if the metadata for the set of + // libraries corresponds to the crate id and hash criteria that this + // search is being performed for. + let mut libraries = FxHashMap(); + for (_hash, (rlibs, rmetas, dylibs)) in candidates { + let mut slot = None; + let rlib = self.extract_one(rlibs, CrateFlavor::Rlib, &mut slot); + let rmeta = self.extract_one(rmetas, CrateFlavor::Rmeta, &mut slot); + let dylib = self.extract_one(dylibs, CrateFlavor::Dylib, &mut slot); + if let Some((h, m)) = slot { + libraries.insert(h, + Library { + dylib: dylib, + rlib: rlib, + rmeta: rmeta, + metadata: m, + }); + } + } + + // Having now translated all relevant found hashes into libraries, see + // what we've got and figure out if we found multiple candidates for + // libraries or not. + match libraries.len() { + 0 => None, + 1 => Some(libraries.into_iter().next().unwrap().1), + _ => { + let mut err = struct_span_err!(self.sess, + self.span, + E0464, + "multiple matching crates for `{}`", + self.crate_name); + err.note("candidates:"); + for (_, lib) in libraries { + if let Some((ref p, _)) = lib.dylib { + err.note(&format!("path: {}", p.display())); + } + if let Some((ref p, _)) = lib.rlib { + err.note(&format!("path: {}", p.display())); + } + note_crate_name(&mut err, &lib.metadata.get_root().name.as_str()); + } + err.emit(); + None + } + } + } + + // Attempts to extract *one* library from the set `m`. If the set has no + // elements, `None` is returned. If the set has more than one element, then + // the errors and notes are emitted about the set of libraries. + // + // With only one library in the set, this function will extract it, and then + // read the metadata from it if `*slot` is `None`. If the metadata couldn't + // be read, it is assumed that the file isn't a valid rust library (no + // errors are emitted). + fn extract_one(&mut self, + m: FxHashMap, + flavor: CrateFlavor, + slot: &mut Option<(Svh, MetadataBlob)>) + -> Option<(PathBuf, PathKind)> { + let mut ret: Option<(PathBuf, PathKind)> = None; + let mut error = 0; + + if slot.is_some() { + // FIXME(#10786): for an optimization, we only read one of the + // libraries' metadata sections. In theory we should + // read both, but reading dylib metadata is quite + // slow. + if m.is_empty() { + return None; + } else if m.len() == 1 { + return Some(m.into_iter().next().unwrap()); + } + } + + let mut err: Option = None; + for (lib, kind) in m { + info!("{} reading metadata from: {}", flavor, lib.display()); + let (hash, metadata) = match get_metadata_section(self.target, flavor, &lib) { + Ok(blob) => { + if let Some(h) = self.crate_matches(&blob, &lib) { + (h, blob) + } else { + info!("metadata mismatch"); + continue; + } + } + Err(err) => { + info!("no metadata found: {}", err); + continue; + } + }; + // If we see multiple hashes, emit an error about duplicate candidates. + if slot.as_ref().map_or(false, |s| s.0 != hash) { + let mut e = struct_span_err!(self.sess, + self.span, + E0465, + "multiple {} candidates for `{}` found", + flavor, + self.crate_name); + e.span_note(self.span, + &format!(r"candidate #1: {}", + ret.as_ref() + .unwrap() + .0 + .display())); + if let Some(ref mut e) = err { + e.emit(); + } + err = Some(e); + error = 1; + *slot = None; + } + if error > 0 { + error += 1; + err.as_mut().unwrap().span_note(self.span, + &format!(r"candidate #{}: {}", + error, + lib.display())); + continue; + } + *slot = Some((hash, metadata)); + ret = Some((lib, kind)); + } + + if error > 0 { + err.unwrap().emit(); + None + } else { + ret + } + } + + fn crate_matches(&mut self, metadata: &MetadataBlob, libpath: &Path) -> Option { + let rustc_version = rustc_version(); + let found_version = metadata.get_rustc_version(); + if found_version != rustc_version { + info!("Rejecting via version: expected {} got {}", + rustc_version, + found_version); + self.rejected_via_version.push(CrateMismatch { + path: libpath.to_path_buf(), + got: found_version, + }); + return None; + } + + let root = metadata.get_root(); + if let Some(is_proc_macro) = self.is_proc_macro { + if root.macro_derive_registrar.is_some() != is_proc_macro { + return None; + } + } + + if self.should_match_name { + if self.crate_name != root.name { + info!("Rejecting via crate name"); + return None; + } + } + + if root.triple != self.triple { + info!("Rejecting via crate triple: expected {} got {}", + self.triple, + root.triple); + self.rejected_via_triple.push(CrateMismatch { + path: libpath.to_path_buf(), + got: root.triple, + }); + return None; + } + + if let Some(myhash) = self.hash { + if *myhash != root.hash { + info!("Rejecting via hash: expected {} got {}", *myhash, root.hash); + self.rejected_via_hash.push(CrateMismatch { + path: libpath.to_path_buf(), + got: myhash.to_string(), + }); + return None; + } + } + + Some(root.hash) + } + + + // Returns the corresponding (prefix, suffix) that files need to have for + // dynamic libraries + fn dylibname(&self) -> (String, String) { + let t = &self.target; + (t.options.dll_prefix.clone(), t.options.dll_suffix.clone()) + } + + // Returns the corresponding (prefix, suffix) that files need to have for + // static libraries + fn staticlibname(&self) -> (String, String) { + let t = &self.target; + (t.options.staticlib_prefix.clone(), t.options.staticlib_suffix.clone()) + } + + fn find_commandline_library<'b, LOCS>(&mut self, locs: LOCS) -> Option + where LOCS: Iterator + { + // First, filter out all libraries that look suspicious. We only accept + // files which actually exist that have the correct naming scheme for + // rlibs/dylibs. + let sess = self.sess; + let dylibname = self.dylibname(); + let mut rlibs = FxHashMap(); + let mut rmetas = FxHashMap(); + let mut dylibs = FxHashMap(); + { + let locs = locs.map(|l| PathBuf::from(l)).filter(|loc| { + if !loc.exists() { + sess.err(&format!("extern location for {} does not exist: {}", + self.crate_name, + loc.display())); + return false; + } + let file = match loc.file_name().and_then(|s| s.to_str()) { + Some(file) => file, + None => { + sess.err(&format!("extern location for {} is not a file: {}", + self.crate_name, + loc.display())); + return false; + } + }; + if file.starts_with("lib") && + (file.ends_with(".rlib") || file.ends_with(".rmeta")) { + return true; + } else { + let (ref prefix, ref suffix) = dylibname; + if file.starts_with(&prefix[..]) && file.ends_with(&suffix[..]) { + return true; + } + } + + self.rejected_via_filename.push(CrateMismatch { + path: loc.clone(), + got: String::new(), + }); + + false + }); + + // Now that we have an iterator of good candidates, make sure + // there's at most one rlib and at most one dylib. + for loc in locs { + if loc.file_name().unwrap().to_str().unwrap().ends_with(".rlib") { + rlibs.insert(fs::canonicalize(&loc).unwrap(), PathKind::ExternFlag); + } else if loc.file_name().unwrap().to_str().unwrap().ends_with(".rmeta") { + rmetas.insert(fs::canonicalize(&loc).unwrap(), PathKind::ExternFlag); + } else { + dylibs.insert(fs::canonicalize(&loc).unwrap(), PathKind::ExternFlag); + } + } + }; + + // Extract the rlib/dylib pair. + let mut slot = None; + let rlib = self.extract_one(rlibs, CrateFlavor::Rlib, &mut slot); + let rmeta = self.extract_one(rmetas, CrateFlavor::Rmeta, &mut slot); + let dylib = self.extract_one(dylibs, CrateFlavor::Dylib, &mut slot); + + if rlib.is_none() && rmeta.is_none() && dylib.is_none() { + return None; + } + match slot { + Some((_, metadata)) => { + Some(Library { + dylib: dylib, + rlib: rlib, + rmeta: rmeta, + metadata: metadata, + }) + } + None => None, + } + } +} + +pub fn note_crate_name(err: &mut DiagnosticBuilder, name: &str) { + err.note(&format!("crate name: {}", name)); +} + +impl ArchiveMetadata { + fn new(ar: ArchiveRO) -> Option { + let data = { + let section = ar.iter() + .filter_map(|s| s.ok()) + .find(|sect| sect.name() == Some(METADATA_FILENAME)); + match section { + Some(s) => s.data() as *const [u8], + None => { + debug!("didn't find '{}' in the archive", METADATA_FILENAME); + return None; + } + } + }; + + Some(ArchiveMetadata { + _archive: ar, + data: data, + }) + } + + pub fn as_slice<'a>(&'a self) -> &'a [u8] { + unsafe { &*self.data } + } +} + +fn verify_decompressed_encoding_version(blob: &MetadataBlob, + filename: &Path) + -> Result<(), String> { + if !blob.is_compatible() { + Err((format!("incompatible metadata version found: '{}'", + filename.display()))) + } else { + Ok(()) + } +} + +// Just a small wrapper to time how long reading metadata takes. +fn get_metadata_section(target: &Target, + flavor: CrateFlavor, + filename: &Path) + -> Result { + let start = Instant::now(); + let ret = get_metadata_section_imp(target, flavor, filename); + info!("reading {:?} => {:?}", + filename.file_name().unwrap(), + start.elapsed()); + return ret; +} + +fn get_metadata_section_imp(target: &Target, + flavor: CrateFlavor, + filename: &Path) + -> Result { + if !filename.exists() { + return Err(format!("no such file: '{}'", filename.display())); + } + if flavor == CrateFlavor::Rlib { + // Use ArchiveRO for speed here, it's backed by LLVM and uses mmap + // internally to read the file. We also avoid even using a memcpy by + // just keeping the archive along while the metadata is in use. + let archive = match ArchiveRO::open(filename) { + Some(ar) => ar, + None => { + debug!("llvm didn't like `{}`", filename.display()); + return Err(format!("failed to read rlib metadata: '{}'", filename.display())); + } + }; + return match ArchiveMetadata::new(archive).map(|ar| MetadataBlob::Archive(ar)) { + None => Err(format!("failed to read rlib metadata: '{}'", filename.display())), + Some(blob) => { + verify_decompressed_encoding_version(&blob, filename)?; + Ok(blob) + } + }; + } else if flavor == CrateFlavor::Rmeta { + let mut file = File::open(filename).map_err(|_| + format!("could not open file: '{}'", filename.display()))?; + let mut buf = vec![]; + file.read_to_end(&mut buf).map_err(|_| + format!("failed to read rlib metadata: '{}'", filename.display()))?; + let blob = MetadataBlob::Raw(buf); + verify_decompressed_encoding_version(&blob, filename)?; + return Ok(blob); + } + unsafe { + let buf = common::path2cstr(filename); + let mb = llvm::LLVMRustCreateMemoryBufferWithContentsOfFile(buf.as_ptr()); + if mb as isize == 0 { + return Err(format!("error reading library: '{}'", filename.display())); + } + let of = match ObjectFile::new(mb) { + Some(of) => of, + _ => { + return Err((format!("provided path not an object file: '{}'", filename.display()))) + } + }; + let si = mk_section_iter(of.llof); + while llvm::LLVMIsSectionIteratorAtEnd(of.llof, si.llsi) == False { + let mut name_buf = ptr::null(); + let name_len = llvm::LLVMRustGetSectionName(si.llsi, &mut name_buf); + let name = slice::from_raw_parts(name_buf as *const u8, name_len as usize).to_vec(); + let name = String::from_utf8(name).unwrap(); + debug!("get_metadata_section: name {}", name); + if read_meta_section_name(target) == name { + let cbuf = llvm::LLVMGetSectionContents(si.llsi); + let csz = llvm::LLVMGetSectionSize(si.llsi) as usize; + let cvbuf: *const u8 = cbuf as *const u8; + let vlen = METADATA_HEADER.len(); + debug!("checking {} bytes of metadata-version stamp", vlen); + let minsz = cmp::min(vlen, csz); + let buf0 = slice::from_raw_parts(cvbuf, minsz); + let version_ok = buf0 == METADATA_HEADER; + if !version_ok { + return Err((format!("incompatible metadata version found: '{}'", + filename.display()))); + } + + let cvbuf1 = cvbuf.offset(vlen as isize); + debug!("inflating {} bytes of compressed metadata", csz - vlen); + let bytes = slice::from_raw_parts(cvbuf1, csz - vlen); + match flate::inflate_bytes(bytes) { + Ok(inflated) => { + let blob = MetadataBlob::Inflated(inflated); + verify_decompressed_encoding_version(&blob, filename)?; + return Ok(blob); + } + Err(_) => {} + } + } + llvm::LLVMMoveToNextSection(si.llsi); + } + Err(format!("metadata not found: '{}'", filename.display())) + } +} + +pub fn meta_section_name(target: &Target) -> &'static str { + // Historical note: + // + // When using link.exe it was seen that the section name `.note.rustc` + // was getting shortened to `.note.ru`, and according to the PE and COFF + // specification: + // + // > Executable images do not use a string table and do not support + // > section names longer than 8 characters + // + // https://msdn.microsoft.com/en-us/library/windows/hardware/gg463119.aspx + // + // As a result, we choose a slightly shorter name! As to why + // `.note.rustc` works on MinGW, that's another good question... + + if target.options.is_like_osx { + "__DATA,.rustc" + } else { + ".rustc" + } +} + +pub fn read_meta_section_name(_target: &Target) -> &'static str { + ".rustc" +} + +// A diagnostic function for dumping crate metadata to an output stream +pub fn list_file_metadata(target: &Target, path: &Path, out: &mut io::Write) -> io::Result<()> { + let filename = path.file_name().unwrap().to_str().unwrap(); + let flavor = if filename.ends_with(".rlib") { + CrateFlavor::Rlib + } else if filename.ends_with(".rmeta") { + CrateFlavor::Rmeta + } else { + CrateFlavor::Dylib + }; + match get_metadata_section(target, flavor, path) { + Ok(metadata) => metadata.list_crate_metadata(out), + Err(msg) => write!(out, "{}\n", msg), + } +} diff --git a/src/librustc_metadata/macro_import.rs b/src/librustc_metadata/macro_import.rs deleted file mode 100644 index d67fc3a0eaba5..0000000000000 --- a/src/librustc_metadata/macro_import.rs +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Used by `rustc` when loading a crate with exported macros. - -use creader::CrateReader; -use cstore::CStore; - -use rustc::session::Session; - -use std::collections::{HashSet, HashMap}; -use syntax::codemap::Span; -use syntax::parse::token; -use syntax::ast; -use syntax::attr; -use syntax::visit; -use syntax::visit::Visitor; -use syntax::attr::AttrMetaMethods; - -struct MacroLoader<'a> { - sess: &'a Session, - span_whitelist: HashSet, - reader: CrateReader<'a>, - macros: Vec, -} - -impl<'a> MacroLoader<'a> { - fn new(sess: &'a Session, cstore: &'a CStore) -> MacroLoader<'a> { - MacroLoader { - sess: sess, - span_whitelist: HashSet::new(), - reader: CrateReader::new(sess, cstore), - macros: vec![], - } - } -} - -pub fn call_bad_macro_reexport(a: &Session, b: Span) { - span_err!(a, b, E0467, "bad macro reexport"); -} - -/// Read exported macros. -pub fn read_macro_defs(sess: &Session, cstore: &CStore, krate: &ast::Crate) - -> Vec -{ - let mut loader = MacroLoader::new(sess, cstore); - - // We need to error on `#[macro_use] extern crate` when it isn't at the - // crate root, because `$crate` won't work properly. Identify these by - // spans, because the crate map isn't set up yet. - for item in &krate.module.items { - if let ast::ItemExternCrate(_) = item.node { - loader.span_whitelist.insert(item.span); - } - } - - visit::walk_crate(&mut loader, krate); - - loader.macros -} - -pub type MacroSelection = HashMap; - -// note that macros aren't expanded yet, and therefore macros can't add macro imports. -impl<'a, 'v> Visitor<'v> for MacroLoader<'a> { - fn visit_item(&mut self, item: &ast::Item) { - // We're only interested in `extern crate`. - match item.node { - ast::ItemExternCrate(_) => {} - _ => { - visit::walk_item(self, item); - return; - } - } - - // Parse the attributes relating to macros. - let mut import = Some(HashMap::new()); // None => load all - let mut reexport = HashMap::new(); - - for attr in &item.attrs { - let mut used = true; - match &attr.name()[..] { - "macro_use" => { - let names = attr.meta_item_list(); - if names.is_none() { - // no names => load all - import = None; - } - if let (Some(sel), Some(names)) = (import.as_mut(), names) { - for attr in names { - if let ast::MetaWord(ref name) = attr.node { - sel.insert(name.clone(), attr.span); - } else { - span_err!(self.sess, attr.span, E0466, "bad macro import"); - } - } - } - } - "macro_reexport" => { - let names = match attr.meta_item_list() { - Some(names) => names, - None => { - call_bad_macro_reexport(self.sess, attr.span); - continue; - } - }; - - for attr in names { - if let ast::MetaWord(ref name) = attr.node { - reexport.insert(name.clone(), attr.span); - } else { - call_bad_macro_reexport(self.sess, attr.span); - } - } - } - _ => used = false, - } - if used { - attr::mark_used(attr); - } - } - - self.load_macros(item, import, reexport) - } - - fn visit_mac(&mut self, _: &ast::Mac) { - // bummer... can't see macro imports inside macros. - // do nothing. - } -} - -impl<'a> MacroLoader<'a> { - fn load_macros<'b>(&mut self, - vi: &ast::Item, - import: Option, - reexport: MacroSelection) { - if let Some(sel) = import.as_ref() { - if sel.is_empty() && reexport.is_empty() { - return; - } - } - - if !self.span_whitelist.contains(&vi.span) { - span_err!(self.sess, vi.span, E0468, - "an `extern crate` loading macros must be at the crate root"); - return; - } - - let macros = self.reader.read_exported_macros(vi); - let mut seen = HashSet::new(); - - for mut def in macros { - let name = def.ident.name.as_str(); - - def.use_locally = match import.as_ref() { - None => true, - Some(sel) => sel.contains_key(&name), - }; - def.export = reexport.contains_key(&name); - def.allow_internal_unstable = attr::contains_name(&def.attrs, - "allow_internal_unstable"); - debug!("load_macros: loaded: {:?}", def); - self.macros.push(def); - seen.insert(name); - } - - if let Some(sel) = import.as_ref() { - for (name, span) in sel { - if !seen.contains(&name) { - span_err!(self.sess, *span, E0469, - "imported macro not found"); - } - } - } - - for (name, span) in &reexport { - if !seen.contains(&name) { - span_err!(self.sess, *span, E0470, - "reexported macro not found"); - } - } - } -} diff --git a/src/librustc_metadata/macros.rs b/src/librustc_metadata/macros.rs deleted file mode 100644 index ed764ebd9f95d..0000000000000 --- a/src/librustc_metadata/macros.rs +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -macro_rules! enum_from_u32 { - ($(#[$attr:meta])* pub enum $name:ident { - $($variant:ident = $e:expr,)* - }) => { - $(#[$attr])* - pub enum $name { - $($variant = $e),* - } - - impl $name { - pub fn from_u32(u: u32) -> Option<$name> { - $(if u == $name::$variant as u32 { - return Some($name::$variant) - })* - None - } - } - }; - ($(#[$attr:meta])* pub enum $name:ident { - $($variant:ident,)* - }) => { - $(#[$attr])* - pub enum $name { - $($variant,)* - } - - impl $name { - pub fn from_u32(u: u32) -> Option<$name> { - $(if u == $name::$variant as u32 { - return Some($name::$variant) - })* - None - } - } - } -} diff --git a/src/librustc_metadata/schema.rs b/src/librustc_metadata/schema.rs new file mode 100644 index 0000000000000..00c3709435de5 --- /dev/null +++ b/src/librustc_metadata/schema.rs @@ -0,0 +1,339 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use astencode; +use index; + +use rustc::hir; +use rustc::hir::def::{self, CtorKind}; +use rustc::hir::def_id::{DefIndex, DefId}; +use rustc::middle::cstore::{DepKind, LinkagePreference, NativeLibrary}; +use rustc::middle::lang_items; +use rustc::mir; +use rustc::ty::{self, Ty}; +use rustc_back::PanicStrategy; + +use rustc_serialize as serialize; +use syntax::{ast, attr}; +use syntax::symbol::Symbol; +use syntax_pos::{self, Span}; + +use std::marker::PhantomData; + +pub fn rustc_version() -> String { + format!("rustc {}", + option_env!("CFG_VERSION").unwrap_or("unknown version")) +} + +/// Metadata encoding version. +/// NB: increment this if you change the format of metadata such that +/// the rustc version can't be found to compare with `rustc_version()`. +pub const METADATA_VERSION: u8 = 4; + +/// Metadata header which includes `METADATA_VERSION`. +/// To get older versions of rustc to ignore this metadata, +/// there are 4 zero bytes at the start, which are treated +/// as a length of 0 by old compilers. +/// +/// This header is followed by the position of the `CrateRoot`, +/// which is encoded as a 32-bit big-endian unsigned integer, +/// and further followed by the rustc version string. +pub const METADATA_HEADER: &'static [u8; 12] = + &[0, 0, 0, 0, b'r', b'u', b's', b't', 0, 0, 0, METADATA_VERSION]; + +/// The shorthand encoding uses an enum's variant index `usize` +/// and is offset by this value so it never matches a real variant. +/// This offset is also chosen so that the first byte is never < 0x80. +pub const SHORTHAND_OFFSET: usize = 0x80; + +/// A value of type T referred to by its absolute position +/// in the metadata, and which can be decoded lazily. +/// +/// Metadata is effective a tree, encoded in post-order, +/// and with the root's position written next to the header. +/// That means every single `Lazy` points to some previous +/// location in the metadata and is part of a larger node. +/// +/// The first `Lazy` in a node is encoded as the backwards +/// distance from the position where the containing node +/// starts and where the `Lazy` points to, while the rest +/// use the forward distance from the previous `Lazy`. +/// Distances start at 1, as 0-byte nodes are invalid. +/// Also invalid are nodes being referred in a different +/// order than they were encoded in. +#[must_use] +pub struct Lazy { + pub position: usize, + _marker: PhantomData, +} + +impl Lazy { + pub fn with_position(position: usize) -> Lazy { + Lazy { + position: position, + _marker: PhantomData, + } + } + + /// Returns the minimum encoded size of a value of type `T`. + // FIXME(eddyb) Give better estimates for certain types. + pub fn min_size() -> usize { + 1 + } +} + +impl Copy for Lazy {} +impl Clone for Lazy { + fn clone(&self) -> Self { + *self + } +} + +impl serialize::UseSpecializedEncodable for Lazy {} +impl serialize::UseSpecializedDecodable for Lazy {} + +/// A sequence of type T referred to by its absolute position +/// in the metadata and length, and which can be decoded lazily. +/// The sequence is a single node for the purposes of `Lazy`. +/// +/// Unlike `Lazy>`, the length is encoded next to the +/// position, not at the position, which means that the length +/// doesn't need to be known before encoding all the elements. +/// +/// If the length is 0, no position is encoded, but otherwise, +/// the encoding is that of `Lazy`, with the distinction that +/// the minimal distance the length of the sequence, i.e. +/// it's assumed there's no 0-byte element in the sequence. +#[must_use] +pub struct LazySeq { + pub len: usize, + pub position: usize, + _marker: PhantomData, +} + +impl LazySeq { + pub fn empty() -> LazySeq { + LazySeq::with_position_and_length(0, 0) + } + + pub fn with_position_and_length(position: usize, len: usize) -> LazySeq { + LazySeq { + len: len, + position: position, + _marker: PhantomData, + } + } + + /// Returns the minimum encoded size of `length` values of type `T`. + pub fn min_size(length: usize) -> usize { + length + } +} + +impl Copy for LazySeq {} +impl Clone for LazySeq { + fn clone(&self) -> Self { + *self + } +} + +impl serialize::UseSpecializedEncodable for LazySeq {} +impl serialize::UseSpecializedDecodable for LazySeq {} + +/// Encoding / decoding state for `Lazy` and `LazySeq`. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum LazyState { + /// Outside of a metadata node. + NoNode, + + /// Inside a metadata node, and before any `Lazy` or `LazySeq`. + /// The position is that of the node itself. + NodeStart(usize), + + /// Inside a metadata node, with a previous `Lazy` or `LazySeq`. + /// The position is a conservative estimate of where that + /// previous `Lazy` / `LazySeq` would end (see their comments). + Previous(usize), +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct CrateRoot { + pub name: Symbol, + pub triple: String, + pub hash: hir::svh::Svh, + pub disambiguator: Symbol, + pub panic_strategy: PanicStrategy, + pub plugin_registrar_fn: Option, + pub macro_derive_registrar: Option, + + pub crate_deps: LazySeq, + pub dylib_dependency_formats: LazySeq>, + pub lang_items: LazySeq<(DefIndex, usize)>, + pub lang_items_missing: LazySeq, + pub native_libraries: LazySeq, + pub codemap: LazySeq, + pub impls: LazySeq, + pub reachable_ids: LazySeq, + pub index: LazySeq, +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct CrateDep { + pub name: ast::Name, + pub hash: hir::svh::Svh, + pub kind: DepKind, +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct TraitImpls { + pub trait_id: (u32, DefIndex), + pub impls: LazySeq, +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct Entry<'tcx> { + pub kind: EntryKind<'tcx>, + pub visibility: ty::Visibility, + pub span: Lazy, + pub def_key: Lazy, + pub attributes: LazySeq, + pub children: LazySeq, + pub stability: Option>, + pub deprecation: Option>, + + pub ty: Option>>, + pub inherent_impls: LazySeq, + pub variances: LazySeq, + pub generics: Option>>, + pub predicates: Option>>, + + pub ast: Option>>, + pub mir: Option>>, +} + +#[derive(Copy, Clone, RustcEncodable, RustcDecodable)] +pub enum EntryKind<'tcx> { + Const, + ImmStatic, + MutStatic, + ForeignImmStatic, + ForeignMutStatic, + ForeignMod, + Type, + Enum, + Field, + Variant(Lazy), + Struct(Lazy), + Union(Lazy), + Fn(Lazy), + ForeignFn(Lazy), + Mod(Lazy), + MacroDef(Lazy), + Closure(Lazy>), + Trait(Lazy>), + Impl(Lazy>), + DefaultImpl(Lazy>), + Method(Lazy), + AssociatedType(AssociatedContainer), + AssociatedConst(AssociatedContainer), +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct ModData { + pub reexports: LazySeq, +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct MacroDef { + pub body: String, +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct FnData { + pub constness: hir::Constness, + pub arg_names: LazySeq, +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct VariantData { + pub ctor_kind: CtorKind, + pub disr: u64, + + /// If this is a struct's only variant, this + /// is the index of the "struct ctor" item. + pub struct_ctor: Option, +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct TraitData<'tcx> { + pub unsafety: hir::Unsafety, + pub paren_sugar: bool, + pub has_default_impl: bool, + pub super_predicates: Lazy>, +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct ImplData<'tcx> { + pub polarity: hir::ImplPolarity, + pub parent_impl: Option, + pub coerce_unsized_kind: Option, + pub trait_ref: Option>>, +} + +/// Describes whether the container of an associated item +/// is a trait or an impl and whether, in a trait, it has +/// a default, or an in impl, whether it's marked "default". +#[derive(Copy, Clone, RustcEncodable, RustcDecodable)] +pub enum AssociatedContainer { + TraitRequired, + TraitWithDefault, + ImplDefault, + ImplFinal, +} + +impl AssociatedContainer { + pub fn with_def_id(&self, def_id: DefId) -> ty::AssociatedItemContainer { + match *self { + AssociatedContainer::TraitRequired | + AssociatedContainer::TraitWithDefault => ty::TraitContainer(def_id), + + AssociatedContainer::ImplDefault | + AssociatedContainer::ImplFinal => ty::ImplContainer(def_id), + } + } + + pub fn defaultness(&self) -> hir::Defaultness { + match *self { + AssociatedContainer::TraitRequired => hir::Defaultness::Default { + has_value: false, + }, + + AssociatedContainer::TraitWithDefault | + AssociatedContainer::ImplDefault => hir::Defaultness::Default { + has_value: true, + }, + + AssociatedContainer::ImplFinal => hir::Defaultness::Final, + } + } +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct MethodData { + pub fn_data: FnData, + pub container: AssociatedContainer, + pub has_self: bool, +} + +#[derive(RustcEncodable, RustcDecodable)] +pub struct ClosureData<'tcx> { + pub kind: ty::ClosureKind, + pub ty: Lazy>, +} diff --git a/src/librustc_metadata/tls_context.rs b/src/librustc_metadata/tls_context.rs deleted file mode 100644 index 37e661c21e15a..0000000000000 --- a/src/librustc_metadata/tls_context.rs +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// This module provides implementations for the thread-local encoding and -// decoding context traits in rustc::middle::cstore::tls. - -use rbml::opaque::Encoder as OpaqueEncoder; -use rbml::opaque::Decoder as OpaqueDecoder; -use rustc::middle::cstore::tls; -use rustc::middle::def_id::DefId; -use rustc::middle::subst::Substs; -use rustc::middle::ty; - -use decoder::{self, Cmd}; -use encoder; -use tydecode::TyDecoder; -use tyencode; - -impl<'a, 'tcx: 'a> tls::EncodingContext<'tcx> for encoder::EncodeContext<'a, 'tcx> { - - fn tcx<'s>(&'s self) -> &'s ty::ctxt<'tcx> { - &self.tcx - } - - fn encode_ty(&self, encoder: &mut OpaqueEncoder, t: ty::Ty<'tcx>) { - tyencode::enc_ty(encoder.cursor, &self.ty_str_ctxt(), t); - } - - fn encode_substs(&self, encoder: &mut OpaqueEncoder, substs: &Substs<'tcx>) { - tyencode::enc_substs(encoder.cursor, &self.ty_str_ctxt(), substs); - } -} - -pub struct DecodingContext<'a, 'tcx: 'a> { - pub crate_metadata: Cmd<'a>, - pub tcx: &'a ty::ctxt<'tcx>, -} - -impl<'a, 'tcx: 'a> tls::DecodingContext<'tcx> for DecodingContext<'a, 'tcx> { - - fn tcx<'s>(&'s self) -> &'s ty::ctxt<'tcx> { - &self.tcx - } - - fn decode_ty(&self, decoder: &mut OpaqueDecoder) -> ty::Ty<'tcx> { - let def_id_convert = &mut |did| { - decoder::translate_def_id(self.crate_metadata, did) - }; - - let starting_position = decoder.position(); - - let mut ty_decoder = TyDecoder::new( - self.crate_metadata.data.as_slice(), - self.crate_metadata.cnum, - starting_position, - self.tcx, - def_id_convert); - - let ty = ty_decoder.parse_ty(); - - let end_position = ty_decoder.position(); - - // We can just reuse the tydecode implementation for parsing types, but - // we have to make sure to leave the rbml reader at the position just - // after the type. - decoder.advance(end_position - starting_position); - ty - } - - fn decode_substs(&self, decoder: &mut OpaqueDecoder) -> Substs<'tcx> { - let def_id_convert = &mut |did| { - decoder::translate_def_id(self.crate_metadata, did) - }; - - let starting_position = decoder.position(); - - let mut ty_decoder = TyDecoder::new( - self.crate_metadata.data.as_slice(), - self.crate_metadata.cnum, - starting_position, - self.tcx, - def_id_convert); - - let substs = ty_decoder.parse_substs(); - - let end_position = ty_decoder.position(); - - decoder.advance(end_position - starting_position); - substs - } - - fn translate_def_id(&self, def_id: DefId) -> DefId { - decoder::translate_def_id(self.crate_metadata, def_id) - } -} diff --git a/src/librustc_metadata/tydecode.rs b/src/librustc_metadata/tydecode.rs deleted file mode 100644 index 5a48d6019d699..0000000000000 --- a/src/librustc_metadata/tydecode.rs +++ /dev/null @@ -1,726 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - -// Type decoding - -// tjc note: Would be great to have a `match check` macro equivalent -// for some of these - -#![allow(non_camel_case_types)] - -use rustc_front::hir; - -use middle::def_id::{DefId, DefIndex}; -use middle::region; -use middle::subst; -use middle::subst::VecPerParamSpace; -use middle::ty::{self, ToPredicate, Ty, TypeFoldable}; - -use rbml; -use rbml::leb128; -use std::str; -use syntax::abi; -use syntax::ast; -use syntax::parse::token; - -// Compact string representation for Ty values. API TyStr & -// parse_from_str. Extra parameters are for converting to/from def_ids in the -// data buffer. Whatever format you choose should not contain pipe characters. - -pub type DefIdConvert<'a> = &'a mut FnMut(DefId) -> DefId; - -pub struct TyDecoder<'a, 'tcx: 'a> { - data: &'a [u8], - krate: ast::CrateNum, - pos: usize, - tcx: &'a ty::ctxt<'tcx>, - conv_def_id: DefIdConvert<'a>, -} - -impl<'a,'tcx> TyDecoder<'a,'tcx> { - pub fn with_doc(tcx: &'a ty::ctxt<'tcx>, - crate_num: ast::CrateNum, - doc: rbml::Doc<'a>, - conv: DefIdConvert<'a>) - -> TyDecoder<'a,'tcx> { - TyDecoder::new(doc.data, crate_num, doc.start, tcx, conv) - } - - pub fn new(data: &'a [u8], - crate_num: ast::CrateNum, - pos: usize, - tcx: &'a ty::ctxt<'tcx>, - conv: DefIdConvert<'a>) - -> TyDecoder<'a, 'tcx> { - TyDecoder { - data: data, - krate: crate_num, - pos: pos, - tcx: tcx, - conv_def_id: conv, - } - } - - pub fn position(&self) -> usize { - self.pos - } - - fn peek(&self) -> char { - self.data[self.pos] as char - } - - fn next(&mut self) -> char { - let ch = self.data[self.pos] as char; - self.pos = self.pos + 1; - return ch; - } - - fn next_byte(&mut self) -> u8 { - let b = self.data[self.pos]; - self.pos = self.pos + 1; - return b; - } - - fn scan(&mut self, mut is_last: F) -> &'a [u8] - where F: FnMut(char) -> bool, - { - let start_pos = self.pos; - debug!("scan: '{}' (start)", self.data[self.pos] as char); - while !is_last(self.data[self.pos] as char) { - self.pos += 1; - debug!("scan: '{}'", self.data[self.pos] as char); - } - let end_pos = self.pos; - self.pos += 1; - return &self.data[start_pos..end_pos]; - } - - fn parse_vuint(&mut self) -> usize { - let (value, bytes_read) = leb128::read_unsigned_leb128(self.data, - self.pos); - self.pos += bytes_read; - value as usize - } - - fn parse_name(&mut self, last: char) -> ast::Name { - fn is_last(b: char, c: char) -> bool { return c == b; } - let bytes = self.scan(|a| is_last(last, a)); - token::intern(str::from_utf8(bytes).unwrap()) - } - - fn parse_size(&mut self) -> Option { - assert_eq!(self.next(), '/'); - - if self.peek() == '|' { - assert_eq!(self.next(), '|'); - None - } else { - let n = self.parse_uint(); - assert_eq!(self.next(), '|'); - Some(n) - } - } - - fn parse_vec_per_param_space(&mut self, mut f: F) -> VecPerParamSpace where - F: FnMut(&mut TyDecoder<'a, 'tcx>) -> T, - { - let mut r = VecPerParamSpace::empty(); - for &space in &subst::ParamSpace::all() { - assert_eq!(self.next(), '['); - while self.peek() != ']' { - r.push(space, f(self)); - } - assert_eq!(self.next(), ']'); - } - r - } - - pub fn parse_substs(&mut self) -> subst::Substs<'tcx> { - let regions = self.parse_region_substs(); - let types = self.parse_vec_per_param_space(|this| this.parse_ty()); - subst::Substs { types: types, regions: regions } - } - - fn parse_region_substs(&mut self) -> subst::RegionSubsts { - match self.next() { - 'e' => subst::ErasedRegions, - 'n' => { - subst::NonerasedRegions( - self.parse_vec_per_param_space(|this| this.parse_region())) - } - _ => panic!("parse_bound_region: bad input") - } - } - - fn parse_bound_region(&mut self) -> ty::BoundRegion { - match self.next() { - 'a' => { - let id = self.parse_u32(); - assert_eq!(self.next(), '|'); - ty::BrAnon(id) - } - '[' => { - let def = self.parse_def(); - let name = token::intern(&self.parse_str(']')); - ty::BrNamed(def, name) - } - 'f' => { - let id = self.parse_u32(); - assert_eq!(self.next(), '|'); - ty::BrFresh(id) - } - 'e' => ty::BrEnv, - _ => panic!("parse_bound_region: bad input") - } - } - - pub fn parse_region(&mut self) -> ty::Region { - match self.next() { - 'b' => { - assert_eq!(self.next(), '['); - let id = ty::DebruijnIndex::new(self.parse_u32()); - assert_eq!(self.next(), '|'); - let br = self.parse_bound_region(); - assert_eq!(self.next(), ']'); - ty::ReLateBound(id, br) - } - 'B' => { - assert_eq!(self.next(), '['); - let space = self.parse_param_space(); - assert_eq!(self.next(), '|'); - let index = self.parse_u32(); - assert_eq!(self.next(), '|'); - let name = token::intern(&self.parse_str(']')); - ty::ReEarlyBound(ty::EarlyBoundRegion { - space: space, - index: index, - name: name - }) - } - 'f' => { - assert_eq!(self.next(), '['); - let scope = self.parse_scope(); - assert_eq!(self.next(), '|'); - let br = self.parse_bound_region(); - assert_eq!(self.next(), ']'); - ty::ReFree(ty::FreeRegion { scope: scope, - bound_region: br}) - } - 's' => { - let scope = self.parse_scope(); - assert_eq!(self.next(), '|'); - ty::ReScope(scope) - } - 't' => { - ty::ReStatic - } - 'e' => { - ty::ReStatic - } - _ => panic!("parse_region: bad input") - } - } - - fn parse_scope(&mut self) -> region::CodeExtent { - self.tcx.region_maps.bogus_code_extent(match self.next() { - // This creates scopes with the wrong NodeId. This isn't - // actually a problem because scopes only exist *within* - // functions, and functions aren't loaded until trans which - // doesn't care about regions. - // - // May still be worth fixing though. - 'C' => { - assert_eq!(self.next(), '['); - let fn_id = self.parse_uint() as ast::NodeId; - assert_eq!(self.next(), '|'); - let body_id = self.parse_uint() as ast::NodeId; - assert_eq!(self.next(), ']'); - region::CodeExtentData::CallSiteScope { - fn_id: fn_id, body_id: body_id - } - } - // This creates scopes with the wrong NodeId. (See note above.) - 'P' => { - assert_eq!(self.next(), '['); - let fn_id = self.parse_uint() as ast::NodeId; - assert_eq!(self.next(), '|'); - let body_id = self.parse_uint() as ast::NodeId; - assert_eq!(self.next(), ']'); - region::CodeExtentData::ParameterScope { - fn_id: fn_id, body_id: body_id - } - } - 'M' => { - let node_id = self.parse_uint() as ast::NodeId; - region::CodeExtentData::Misc(node_id) - } - 'D' => { - let node_id = self.parse_uint() as ast::NodeId; - region::CodeExtentData::DestructionScope(node_id) - } - 'B' => { - assert_eq!(self.next(), '['); - let node_id = self.parse_uint() as ast::NodeId; - assert_eq!(self.next(), '|'); - let first_stmt_index = self.parse_u32(); - assert_eq!(self.next(), ']'); - let block_remainder = region::BlockRemainder { - block: node_id, first_statement_index: first_stmt_index, - }; - region::CodeExtentData::Remainder(block_remainder) - } - _ => panic!("parse_scope: bad input") - }) - } - - fn parse_opt(&mut self, f: F) -> Option - where F: FnOnce(&mut TyDecoder<'a, 'tcx>) -> T, - { - match self.next() { - 'n' => None, - 's' => Some(f(self)), - _ => panic!("parse_opt: bad input") - } - } - - fn parse_str(&mut self, term: char) -> String { - let mut result = String::new(); - while self.peek() != term { - unsafe { - result.as_mut_vec().extend_from_slice(&[self.next_byte()]) - } - } - self.next(); - result - } - - pub fn parse_trait_ref(&mut self) -> ty::TraitRef<'tcx> { - let def = self.parse_def(); - let substs = self.tcx.mk_substs(self.parse_substs()); - ty::TraitRef {def_id: def, substs: substs} - } - - pub fn parse_ty(&mut self) -> Ty<'tcx> { - let tcx = self.tcx; - match self.next() { - 'b' => return tcx.types.bool, - 'i' => { /* eat the s of is */ self.next(); return tcx.types.isize }, - 'u' => { /* eat the s of us */ self.next(); return tcx.types.usize }, - 'M' => { - match self.next() { - 'b' => return tcx.types.u8, - 'w' => return tcx.types.u16, - 'l' => return tcx.types.u32, - 'd' => return tcx.types.u64, - 'B' => return tcx.types.i8, - 'W' => return tcx.types.i16, - 'L' => return tcx.types.i32, - 'D' => return tcx.types.i64, - 'f' => return tcx.types.f32, - 'F' => return tcx.types.f64, - _ => panic!("parse_ty: bad numeric type") - } - } - 'c' => return tcx.types.char, - 't' => { - assert_eq!(self.next(), '['); - let did = self.parse_def(); - let substs = self.parse_substs(); - assert_eq!(self.next(), ']'); - let def = self.tcx.lookup_adt_def(did); - return tcx.mk_enum(def, self.tcx.mk_substs(substs)); - } - 'x' => { - assert_eq!(self.next(), '['); - let trait_ref = ty::Binder(self.parse_trait_ref()); - let bounds = self.parse_existential_bounds(); - assert_eq!(self.next(), ']'); - return tcx.mk_trait(trait_ref, bounds); - } - 'p' => { - assert_eq!(self.next(), '['); - let index = self.parse_u32(); - assert_eq!(self.next(), '|'); - let space = self.parse_param_space(); - assert_eq!(self.next(), '|'); - let name = token::intern(&self.parse_str(']')); - return tcx.mk_param(space, index, name); - } - '~' => return tcx.mk_box(self.parse_ty()), - '*' => return tcx.mk_ptr(self.parse_mt()), - '&' => { - let r = self.parse_region(); - let mt = self.parse_mt(); - return tcx.mk_ref(tcx.mk_region(r), mt); - } - 'V' => { - let t = self.parse_ty(); - return match self.parse_size() { - Some(n) => tcx.mk_array(t, n), - None => tcx.mk_slice(t) - }; - } - 'v' => { - return tcx.mk_str(); - } - 'T' => { - assert_eq!(self.next(), '['); - let mut params = Vec::new(); - while self.peek() != ']' { params.push(self.parse_ty()); } - self.pos = self.pos + 1; - return tcx.mk_tup(params); - } - 'F' => { - let def_id = self.parse_def(); - return tcx.mk_fn(Some(def_id), tcx.mk_bare_fn(self.parse_bare_fn_ty())); - } - 'G' => { - return tcx.mk_fn(None, tcx.mk_bare_fn(self.parse_bare_fn_ty())); - } - '#' => { - // This is a hacky little caching scheme. The idea is that if we encode - // the same type twice, the second (and third, and fourth...) time we will - // just write `#123`, where `123` is the offset in the metadata of the - // first appearance. Now when we are *decoding*, if we see a `#123`, we - // can first check a cache (`tcx.rcache`) for that offset. If we find something, - // we return it (modulo closure types, see below). But if not, then we - // jump to offset 123 and read the type from there. - - let pos = self.parse_vuint(); - let key = ty::CReaderCacheKey { cnum: self.krate, pos: pos }; - match tcx.rcache.borrow().get(&key).cloned() { - Some(tt) => { - // If there is a closure buried in the type some where, then we - // need to re-convert any def ids (see case 'k', below). That means - // we can't reuse the cached version. - if !tt.has_closure_types() { - return tt; - } - } - None => {} - } - - let mut substate = TyDecoder::new(self.data, - self.krate, - pos, - self.tcx, - self.conv_def_id); - let tt = substate.parse_ty(); - tcx.rcache.borrow_mut().insert(key, tt); - return tt; - } - '\"' => { - let _ = self.parse_def(); - let inner = self.parse_ty(); - inner - } - 'a' => { - assert_eq!(self.next(), '['); - let did = self.parse_def(); - let substs = self.parse_substs(); - assert_eq!(self.next(), ']'); - let def = self.tcx.lookup_adt_def(did); - return self.tcx.mk_struct(def, self.tcx.mk_substs(substs)); - } - 'k' => { - assert_eq!(self.next(), '['); - let did = self.parse_def(); - let substs = self.parse_substs(); - let mut tys = vec![]; - while self.peek() != '.' { - tys.push(self.parse_ty()); - } - assert_eq!(self.next(), '.'); - assert_eq!(self.next(), ']'); - return self.tcx.mk_closure(did, self.tcx.mk_substs(substs), tys); - } - 'P' => { - assert_eq!(self.next(), '['); - let trait_ref = self.parse_trait_ref(); - let name = token::intern(&self.parse_str(']')); - return tcx.mk_projection(trait_ref, name); - } - 'e' => { - return tcx.types.err; - } - c => { panic!("unexpected char in type string: {}", c);} - } - } - - fn parse_mutability(&mut self) -> hir::Mutability { - match self.peek() { - 'm' => { self.next(); hir::MutMutable } - _ => { hir::MutImmutable } - } - } - - fn parse_mt(&mut self) -> ty::TypeAndMut<'tcx> { - let m = self.parse_mutability(); - ty::TypeAndMut { ty: self.parse_ty(), mutbl: m } - } - - fn parse_def(&mut self) -> DefId { - let def_id = parse_defid(self.scan(|c| c == '|')); - return (self.conv_def_id)(def_id); - } - - fn parse_uint(&mut self) -> usize { - let mut n = 0; - loop { - let cur = self.peek(); - if cur < '0' || cur > '9' { return n; } - self.pos = self.pos + 1; - n *= 10; - n += (cur as usize) - ('0' as usize); - }; - } - - fn parse_u32(&mut self) -> u32 { - let n = self.parse_uint(); - let m = n as u32; - assert_eq!(m as usize, n); - m - } - - fn parse_param_space(&mut self) -> subst::ParamSpace { - subst::ParamSpace::from_uint(self.parse_uint()) - } - - fn parse_abi_set(&mut self) -> abi::Abi { - assert_eq!(self.next(), '['); - let bytes = self.scan(|c| c == ']'); - let abi_str = str::from_utf8(bytes).unwrap(); - abi::lookup(&abi_str[..]).expect(abi_str) - } - - pub fn parse_closure_ty(&mut self) -> ty::ClosureTy<'tcx> { - let unsafety = parse_unsafety(self.next()); - let sig = self.parse_sig(); - let abi = self.parse_abi_set(); - ty::ClosureTy { - unsafety: unsafety, - sig: sig, - abi: abi, - } - } - - pub fn parse_bare_fn_ty(&mut self) -> ty::BareFnTy<'tcx> { - let unsafety = parse_unsafety(self.next()); - let abi = self.parse_abi_set(); - let sig = self.parse_sig(); - ty::BareFnTy { - unsafety: unsafety, - abi: abi, - sig: sig - } - } - - fn parse_sig(&mut self) -> ty::PolyFnSig<'tcx> { - assert_eq!(self.next(), '['); - let mut inputs = Vec::new(); - while self.peek() != ']' { - inputs.push(self.parse_ty()); - } - self.pos += 1; // eat the ']' - let variadic = match self.next() { - 'V' => true, - 'N' => false, - r => panic!(format!("bad variadic: {}", r)), - }; - let output = match self.peek() { - 'z' => { - self.pos += 1; - ty::FnDiverging - } - _ => ty::FnConverging(self.parse_ty()) - }; - ty::Binder(ty::FnSig {inputs: inputs, - output: output, - variadic: variadic}) - } - - pub fn parse_predicate(&mut self) -> ty::Predicate<'tcx> { - match self.next() { - 't' => ty::Binder(self.parse_trait_ref()).to_predicate(), - 'e' => ty::Binder(ty::EquatePredicate(self.parse_ty(), - self.parse_ty())).to_predicate(), - 'r' => ty::Binder(ty::OutlivesPredicate(self.parse_region(), - self.parse_region())).to_predicate(), - 'o' => ty::Binder(ty::OutlivesPredicate(self.parse_ty(), - self.parse_region())).to_predicate(), - 'p' => ty::Binder(self.parse_projection_predicate()).to_predicate(), - 'w' => ty::Predicate::WellFormed(self.parse_ty()), - 'O' => { - let def_id = self.parse_def(); - assert_eq!(self.next(), '|'); - ty::Predicate::ObjectSafe(def_id) - } - c => panic!("Encountered invalid character in metadata: {}", c) - } - } - - fn parse_projection_predicate(&mut self) -> ty::ProjectionPredicate<'tcx> { - ty::ProjectionPredicate { - projection_ty: ty::ProjectionTy { - trait_ref: self.parse_trait_ref(), - item_name: token::intern(&self.parse_str('|')), - }, - ty: self.parse_ty(), - } - } - - pub fn parse_type_param_def(&mut self) -> ty::TypeParameterDef<'tcx> { - let name = self.parse_name(':'); - let def_id = self.parse_def(); - let space = self.parse_param_space(); - assert_eq!(self.next(), '|'); - let index = self.parse_u32(); - assert_eq!(self.next(), '|'); - let default_def_id = self.parse_def(); - let default = self.parse_opt(|this| this.parse_ty()); - let object_lifetime_default = self.parse_object_lifetime_default(); - - ty::TypeParameterDef { - name: name, - def_id: def_id, - space: space, - index: index, - default_def_id: default_def_id, - default: default, - object_lifetime_default: object_lifetime_default, - } - } - - pub fn parse_region_param_def(&mut self) -> ty::RegionParameterDef { - let name = self.parse_name(':'); - let def_id = self.parse_def(); - let space = self.parse_param_space(); - assert_eq!(self.next(), '|'); - let index = self.parse_u32(); - assert_eq!(self.next(), '|'); - let mut bounds = vec![]; - loop { - match self.next() { - 'R' => bounds.push(self.parse_region()), - '.' => { break; } - c => { - panic!("parse_region_param_def: bad bounds ('{}')", c) - } - } - } - ty::RegionParameterDef { - name: name, - def_id: def_id, - space: space, - index: index, - bounds: bounds - } - } - - - fn parse_object_lifetime_default(&mut self) -> ty::ObjectLifetimeDefault { - match self.next() { - 'a' => ty::ObjectLifetimeDefault::Ambiguous, - 'b' => ty::ObjectLifetimeDefault::BaseDefault, - 's' => { - let region = self.parse_region(); - ty::ObjectLifetimeDefault::Specific(region) - } - _ => panic!("parse_object_lifetime_default: bad input") - } - } - - pub fn parse_existential_bounds(&mut self) -> ty::ExistentialBounds<'tcx> { - let builtin_bounds = self.parse_builtin_bounds(); - let region_bound = self.parse_region(); - let mut projection_bounds = Vec::new(); - - loop { - match self.next() { - 'P' => { - projection_bounds.push(ty::Binder(self.parse_projection_predicate())); - } - '.' => { break; } - c => { - panic!("parse_bounds: bad bounds ('{}')", c) - } - } - } - - ty::ExistentialBounds::new( - region_bound, builtin_bounds, projection_bounds) - } - - fn parse_builtin_bounds(&mut self) -> ty::BuiltinBounds { - let mut builtin_bounds = ty::BuiltinBounds::empty(); - loop { - match self.next() { - 'S' => { - builtin_bounds.insert(ty::BoundSend); - } - 'Z' => { - builtin_bounds.insert(ty::BoundSized); - } - 'P' => { - builtin_bounds.insert(ty::BoundCopy); - } - 'T' => { - builtin_bounds.insert(ty::BoundSync); - } - '.' => { - return builtin_bounds; - } - c => { - panic!("parse_bounds: bad builtin bounds ('{}')", c) - } - } - } - } -} - -// Rust metadata parsing -fn parse_defid(buf: &[u8]) -> DefId { - let mut colon_idx = 0; - let len = buf.len(); - while colon_idx < len && buf[colon_idx] != ':' as u8 { colon_idx += 1; } - if colon_idx == len { - error!("didn't find ':' when parsing def id"); - panic!(); - } - - let crate_part = &buf[0..colon_idx]; - let def_part = &buf[colon_idx + 1..len]; - - let crate_num = match str::from_utf8(crate_part).ok().and_then(|s| { - s.parse::().ok() - }) { - Some(cn) => cn as ast::CrateNum, - None => panic!("internal error: parse_defid: crate number expected, found {:?}", - crate_part) - }; - let def_num = match str::from_utf8(def_part).ok().and_then(|s| { - s.parse::().ok() - }) { - Some(dn) => dn, - None => panic!("internal error: parse_defid: id expected, found {:?}", - def_part) - }; - let index = DefIndex::new(def_num); - DefId { krate: crate_num, index: index } -} - -fn parse_unsafety(c: char) -> hir::Unsafety { - match c { - 'u' => hir::Unsafety::Unsafe, - 'n' => hir::Unsafety::Normal, - _ => panic!("parse_unsafety: bad unsafety {}", c) - } -} diff --git a/src/librustc_metadata/tyencode.rs b/src/librustc_metadata/tyencode.rs deleted file mode 100644 index f03c25d698feb..0000000000000 --- a/src/librustc_metadata/tyencode.rs +++ /dev/null @@ -1,502 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// Type encoding - -#![allow(unused_must_use)] // as with encoding, everything is a no-fail MemWriter -#![allow(non_camel_case_types)] - -use std::cell::RefCell; -use std::io::Cursor; -use std::io::prelude::*; - -use middle::def_id::DefId; -use middle::region; -use middle::subst; -use middle::subst::VecPerParamSpace; -use middle::ty::ParamTy; -use middle::ty::{self, Ty}; -use rustc::util::nodemap::FnvHashMap; - -use rustc_front::hir; - -use syntax::abi::Abi; -use syntax::ast; -use syntax::errors::Handler; - -use rbml::leb128; -use encoder; - -pub struct ctxt<'a, 'tcx: 'a> { - pub diag: &'a Handler, - // Def -> str Callback: - pub ds: fn(DefId) -> String, - // The type context. - pub tcx: &'a ty::ctxt<'tcx>, - pub abbrevs: &'a abbrev_map<'tcx> -} - -impl<'a, 'tcx> encoder::EncodeContext<'a, 'tcx> { - pub fn ty_str_ctxt<'b>(&'b self) -> ctxt<'b, 'tcx> { - ctxt { - diag: self.tcx.sess.diagnostic(), - ds: encoder::def_to_string, - tcx: self.tcx, - abbrevs: &self.type_abbrevs - } - } -} - -// Compact string representation for Ty values. API TyStr & parse_from_str. -// Extra parameters are for converting to/from def_ids in the string rep. -// Whatever format you choose should not contain pipe characters. -pub struct ty_abbrev { - s: Vec -} - -pub type abbrev_map<'tcx> = RefCell, ty_abbrev>>; - -pub fn enc_ty<'a, 'tcx>(w: &mut Cursor>, cx: &ctxt<'a, 'tcx>, t: Ty<'tcx>) { - match cx.abbrevs.borrow_mut().get(&t) { - Some(a) => { w.write_all(&a.s); return; } - None => {} - } - - let pos = w.position(); - - match t.sty { - ty::TyBool => { write!(w, "b"); } - ty::TyChar => { write!(w, "c"); } - ty::TyInt(t) => { - match t { - ast::TyIs => write!(w, "is"), - ast::TyI8 => write!(w, "MB"), - ast::TyI16 => write!(w, "MW"), - ast::TyI32 => write!(w, "ML"), - ast::TyI64 => write!(w, "MD") - }; - } - ty::TyUint(t) => { - match t { - ast::TyUs => write!(w, "us"), - ast::TyU8 => write!(w, "Mb"), - ast::TyU16 => write!(w, "Mw"), - ast::TyU32 => write!(w, "Ml"), - ast::TyU64 => write!(w, "Md") - }; - } - ty::TyFloat(t) => { - match t { - ast::TyF32 => write!(w, "Mf"), - ast::TyF64 => write!(w, "MF"), - }; - } - ty::TyEnum(def, substs) => { - write!(w, "t[{}|", (cx.ds)(def.did)); - enc_substs(w, cx, substs); - write!(w, "]"); - } - ty::TyTrait(box ty::TraitTy { ref principal, - ref bounds }) => { - write!(w, "x["); - enc_trait_ref(w, cx, principal.0); - enc_existential_bounds(w, cx, bounds); - write!(w, "]"); - } - ty::TyTuple(ref ts) => { - write!(w, "T["); - for t in ts { enc_ty(w, cx, *t); } - write!(w, "]"); - } - ty::TyBox(typ) => { write!(w, "~"); enc_ty(w, cx, typ); } - ty::TyRawPtr(mt) => { write!(w, "*"); enc_mt(w, cx, mt); } - ty::TyRef(r, mt) => { - write!(w, "&"); - enc_region(w, cx, *r); - enc_mt(w, cx, mt); - } - ty::TyArray(t, sz) => { - write!(w, "V"); - enc_ty(w, cx, t); - write!(w, "/{}|", sz); - } - ty::TySlice(t) => { - write!(w, "V"); - enc_ty(w, cx, t); - write!(w, "/|"); - } - ty::TyStr => { - write!(w, "v"); - } - ty::TyBareFn(Some(def_id), f) => { - write!(w, "F"); - write!(w, "{}|", (cx.ds)(def_id)); - enc_bare_fn_ty(w, cx, f); - } - ty::TyBareFn(None, f) => { - write!(w, "G"); - enc_bare_fn_ty(w, cx, f); - } - ty::TyInfer(_) => { - cx.diag.bug("cannot encode inference variable types"); - } - ty::TyParam(ParamTy {space, idx, name}) => { - write!(w, "p[{}|{}|{}]", idx, space.to_uint(), name); - } - ty::TyStruct(def, substs) => { - write!(w, "a[{}|", (cx.ds)(def.did)); - enc_substs(w, cx, substs); - write!(w, "]"); - } - ty::TyClosure(def, ref substs) => { - write!(w, "k[{}|", (cx.ds)(def)); - enc_substs(w, cx, &substs.func_substs); - for ty in &substs.upvar_tys { - enc_ty(w, cx, ty); - } - write!(w, "."); - write!(w, "]"); - } - ty::TyProjection(ref data) => { - write!(w, "P["); - enc_trait_ref(w, cx, data.trait_ref); - write!(w, "{}]", data.item_name); - } - ty::TyError => { - write!(w, "e"); - } - } - - let end = w.position(); - let len = end - pos; - - let mut abbrev = Cursor::new(Vec::with_capacity(16)); - abbrev.write_all(b"#"); - { - let start_position = abbrev.position() as usize; - let bytes_written = leb128::write_unsigned_leb128(abbrev.get_mut(), - start_position, - pos); - abbrev.set_position((start_position + bytes_written) as u64); - } - - cx.abbrevs.borrow_mut().insert(t, ty_abbrev { - s: if abbrev.position() < len { - abbrev.get_ref()[..abbrev.position() as usize].to_owned() - } else { - // if the abbreviation is longer than the real type, - // don't use #-notation. However, insert it here so - // other won't have to `mark_stable_position` - w.get_ref()[pos as usize .. end as usize].to_owned() - } - }); -} - -fn enc_mutability(w: &mut Cursor>, mt: hir::Mutability) { - match mt { - hir::MutImmutable => (), - hir::MutMutable => { - write!(w, "m"); - } - }; -} - -fn enc_mt<'a, 'tcx>(w: &mut Cursor>, cx: &ctxt<'a, 'tcx>, - mt: ty::TypeAndMut<'tcx>) { - enc_mutability(w, mt.mutbl); - enc_ty(w, cx, mt.ty); -} - -fn enc_opt(w: &mut Cursor>, t: Option, enc_f: F) where - F: FnOnce(&mut Cursor>, T), -{ - match t { - None => { - write!(w, "n"); - } - Some(v) => { - write!(w, "s"); - enc_f(w, v); - } - } -} - -fn enc_vec_per_param_space<'a, 'tcx, T, F>(w: &mut Cursor>, - cx: &ctxt<'a, 'tcx>, - v: &VecPerParamSpace, - mut op: F) where - F: FnMut(&mut Cursor>, &ctxt<'a, 'tcx>, &T), -{ - for &space in &subst::ParamSpace::all() { - write!(w, "["); - for t in v.get_slice(space) { - op(w, cx, t); - } - write!(w, "]"); - } -} - -pub fn enc_substs<'a, 'tcx>(w: &mut Cursor>, cx: &ctxt<'a, 'tcx>, - substs: &subst::Substs<'tcx>) { - enc_region_substs(w, cx, &substs.regions); - enc_vec_per_param_space(w, cx, &substs.types, - |w, cx, &ty| enc_ty(w, cx, ty)); -} - -fn enc_region_substs(w: &mut Cursor>, cx: &ctxt, substs: &subst::RegionSubsts) { - match *substs { - subst::ErasedRegions => { - write!(w, "e"); - } - subst::NonerasedRegions(ref regions) => { - write!(w, "n"); - enc_vec_per_param_space(w, cx, regions, - |w, cx, &r| enc_region(w, cx, r)); - } - } -} - -pub fn enc_region(w: &mut Cursor>, cx: &ctxt, r: ty::Region) { - match r { - ty::ReLateBound(id, br) => { - write!(w, "b[{}|", id.depth); - enc_bound_region(w, cx, br); - write!(w, "]"); - } - ty::ReEarlyBound(ref data) => { - write!(w, "B[{}|{}|{}]", - data.space.to_uint(), - data.index, - data.name); - } - ty::ReFree(ref fr) => { - write!(w, "f["); - enc_scope(w, cx, fr.scope); - write!(w, "|"); - enc_bound_region(w, cx, fr.bound_region); - write!(w, "]"); - } - ty::ReScope(scope) => { - write!(w, "s"); - enc_scope(w, cx, scope); - write!(w, "|"); - } - ty::ReStatic => { - write!(w, "t"); - } - ty::ReEmpty => { - write!(w, "e"); - } - ty::ReVar(_) | ty::ReSkolemized(..) => { - // these should not crop up after typeck - cx.diag.bug("cannot encode region variables"); - } - } -} - -fn enc_scope(w: &mut Cursor>, cx: &ctxt, scope: region::CodeExtent) { - match cx.tcx.region_maps.code_extent_data(scope) { - region::CodeExtentData::CallSiteScope { - fn_id, body_id } => write!(w, "C[{}|{}]", fn_id, body_id), - region::CodeExtentData::ParameterScope { - fn_id, body_id } => write!(w, "P[{}|{}]", fn_id, body_id), - region::CodeExtentData::Misc(node_id) => write!(w, "M{}", node_id), - region::CodeExtentData::Remainder(region::BlockRemainder { - block: b, first_statement_index: i }) => write!(w, "B[{}|{}]", b, i), - region::CodeExtentData::DestructionScope(node_id) => write!(w, "D{}", node_id), - }; -} - -fn enc_bound_region(w: &mut Cursor>, cx: &ctxt, br: ty::BoundRegion) { - match br { - ty::BrAnon(idx) => { - write!(w, "a{}|", idx); - } - ty::BrNamed(d, name) => { - write!(w, "[{}|{}]", - (cx.ds)(d), - name); - } - ty::BrFresh(id) => { - write!(w, "f{}|", id); - } - ty::BrEnv => { - write!(w, "e|"); - } - } -} - -pub fn enc_trait_ref<'a, 'tcx>(w: &mut Cursor>, cx: &ctxt<'a, 'tcx>, - s: ty::TraitRef<'tcx>) { - write!(w, "{}|", (cx.ds)(s.def_id)); - enc_substs(w, cx, s.substs); -} - -fn enc_unsafety(w: &mut Cursor>, p: hir::Unsafety) { - match p { - hir::Unsafety::Normal => write!(w, "n"), - hir::Unsafety::Unsafe => write!(w, "u"), - }; -} - -fn enc_abi(w: &mut Cursor>, abi: Abi) { - write!(w, "["); - write!(w, "{}", abi.name()); - write!(w, "]"); -} - -pub fn enc_bare_fn_ty<'a, 'tcx>(w: &mut Cursor>, cx: &ctxt<'a, 'tcx>, - ft: &ty::BareFnTy<'tcx>) { - enc_unsafety(w, ft.unsafety); - enc_abi(w, ft.abi); - enc_fn_sig(w, cx, &ft.sig); -} - -pub fn enc_closure_ty<'a, 'tcx>(w: &mut Cursor>, cx: &ctxt<'a, 'tcx>, - ft: &ty::ClosureTy<'tcx>) { - enc_unsafety(w, ft.unsafety); - enc_fn_sig(w, cx, &ft.sig); - enc_abi(w, ft.abi); -} - -fn enc_fn_sig<'a, 'tcx>(w: &mut Cursor>, cx: &ctxt<'a, 'tcx>, - fsig: &ty::PolyFnSig<'tcx>) { - write!(w, "["); - for ty in &fsig.0.inputs { - enc_ty(w, cx, *ty); - } - write!(w, "]"); - if fsig.0.variadic { - write!(w, "V"); - } else { - write!(w, "N"); - } - match fsig.0.output { - ty::FnConverging(result_type) => { - enc_ty(w, cx, result_type); - } - ty::FnDiverging => { - write!(w, "z"); - } - } -} - -pub fn enc_builtin_bounds(w: &mut Cursor>, _cx: &ctxt, bs: &ty::BuiltinBounds) { - for bound in bs { - match bound { - ty::BoundSend => write!(w, "S"), - ty::BoundSized => write!(w, "Z"), - ty::BoundCopy => write!(w, "P"), - ty::BoundSync => write!(w, "T"), - }; - } - - write!(w, "."); -} - -pub fn enc_existential_bounds<'a,'tcx>(w: &mut Cursor>, - cx: &ctxt<'a,'tcx>, - bs: &ty::ExistentialBounds<'tcx>) { - enc_builtin_bounds(w, cx, &bs.builtin_bounds); - - enc_region(w, cx, bs.region_bound); - - for tp in &bs.projection_bounds { - write!(w, "P"); - enc_projection_predicate(w, cx, &tp.0); - } - - write!(w, "."); -} - -pub fn enc_type_param_def<'a, 'tcx>(w: &mut Cursor>, cx: &ctxt<'a, 'tcx>, - v: &ty::TypeParameterDef<'tcx>) { - write!(w, "{}:{}|{}|{}|{}|", - v.name, (cx.ds)(v.def_id), - v.space.to_uint(), v.index, (cx.ds)(v.default_def_id)); - enc_opt(w, v.default, |w, t| enc_ty(w, cx, t)); - enc_object_lifetime_default(w, cx, v.object_lifetime_default); -} - -pub fn enc_region_param_def(w: &mut Cursor>, cx: &ctxt, - v: &ty::RegionParameterDef) { - write!(w, "{}:{}|{}|{}|", - v.name, (cx.ds)(v.def_id), - v.space.to_uint(), v.index); - for &r in &v.bounds { - write!(w, "R"); - enc_region(w, cx, r); - } - write!(w, "."); -} - -fn enc_object_lifetime_default<'a, 'tcx>(w: &mut Cursor>, - cx: &ctxt<'a, 'tcx>, - default: ty::ObjectLifetimeDefault) -{ - match default { - ty::ObjectLifetimeDefault::Ambiguous => { - write!(w, "a"); - } - ty::ObjectLifetimeDefault::BaseDefault => { - write!(w, "b"); - } - ty::ObjectLifetimeDefault::Specific(r) => { - write!(w, "s"); - enc_region(w, cx, r); - } - } -} - -pub fn enc_predicate<'a, 'tcx>(w: &mut Cursor>, - cx: &ctxt<'a, 'tcx>, - p: &ty::Predicate<'tcx>) -{ - match *p { - ty::Predicate::Trait(ref trait_ref) => { - write!(w, "t"); - enc_trait_ref(w, cx, trait_ref.0.trait_ref); - } - ty::Predicate::Equate(ty::Binder(ty::EquatePredicate(a, b))) => { - write!(w, "e"); - enc_ty(w, cx, a); - enc_ty(w, cx, b); - } - ty::Predicate::RegionOutlives(ty::Binder(ty::OutlivesPredicate(a, b))) => { - write!(w, "r"); - enc_region(w, cx, a); - enc_region(w, cx, b); - } - ty::Predicate::TypeOutlives(ty::Binder(ty::OutlivesPredicate(a, b))) => { - write!(w, "o"); - enc_ty(w, cx, a); - enc_region(w, cx, b); - } - ty::Predicate::Projection(ty::Binder(ref data)) => { - write!(w, "p"); - enc_projection_predicate(w, cx, data); - } - ty::Predicate::WellFormed(data) => { - write!(w, "w"); - enc_ty(w, cx, data); - } - ty::Predicate::ObjectSafe(trait_def_id) => { - write!(w, "O{}|", (cx.ds)(trait_def_id)); - } - } -} - -fn enc_projection_predicate<'a, 'tcx>(w: &mut Cursor>, - cx: &ctxt<'a, 'tcx>, - data: &ty::ProjectionPredicate<'tcx>) { - enc_trait_ref(w, cx, data.projection_ty.trait_ref); - write!(w, "{}|", data.projection_ty.item_name); - enc_ty(w, cx, data.ty); -} diff --git a/src/librustc_mir/Cargo.toml b/src/librustc_mir/Cargo.toml new file mode 100644 index 0000000000000..2a1a815330675 --- /dev/null +++ b/src/librustc_mir/Cargo.toml @@ -0,0 +1,21 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_mir" +version = "0.0.0" + +[lib] +name = "rustc_mir" +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +graphviz = { path = "../libgraphviz" } +log = { path = "../liblog" } +rustc = { path = "../librustc" } +rustc_back = { path = "../librustc_back" } +rustc_const_eval = { path = "../librustc_const_eval" } +rustc_const_math = { path = "../librustc_const_math" } +rustc_data_structures = { path = "../librustc_data_structures" } +rustc_bitflags = { path = "../librustc_bitflags" } +syntax = { path = "../libsyntax" } +syntax_pos = { path = "../libsyntax_pos" } diff --git a/src/librustc_mir/build/block.rs b/src/librustc_mir/build/block.rs index 12b9130b48c61..2c7b47c766999 100644 --- a/src/librustc_mir/build/block.rs +++ b/src/librustc_mir/build/block.rs @@ -10,25 +10,92 @@ use build::{BlockAnd, BlockAndExtension, Builder}; use hair::*; -use rustc::mir::repr::*; -use rustc_front::hir; +use rustc::mir::*; +use rustc::hir; -impl<'a,'tcx> Builder<'a,'tcx> { +impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn ast_block(&mut self, destination: &Lvalue<'tcx>, + // FIXME(#32959): temporary measure for the issue + dest_is_unit: bool, mut block: BasicBlock, ast_block: &'tcx hir::Block) -> BlockAnd<()> { let Block { extent, span, stmts, expr } = self.hir.mirror(ast_block); self.in_scope(extent, block, move |this| { - unpack!(block = this.stmts(block, stmts)); - match expr { - Some(expr) => this.into(destination, block, expr), - None => { - this.cfg.push_assign_unit(block, span, destination); - block.unit() + // This convoluted structure is to avoid using recursion as we walk down a list + // of statements. Basically, the structure we get back is something like: + // + // let x = in { + // expr1; + // let y = in { + // expr2; + // expr3; + // ... + // } + // } + // + // The let bindings are valid till the end of block so all we have to do is to pop all + // the let-scopes at the end. + // + // First we build all the statements in the block. + let mut let_extent_stack = Vec::with_capacity(8); + let outer_visibility_scope = this.visibility_scope; + for stmt in stmts { + let Stmt { span: _, kind } = this.hir.mirror(stmt); + match kind { + StmtKind::Expr { scope, expr } => { + unpack!(block = this.in_scope(scope, block, |this| { + let expr = this.hir.mirror(expr); + this.stmt_expr(block, expr) + })); + } + StmtKind::Let { remainder_scope, init_scope, pattern, initializer } => { + let tcx = this.hir.tcx(); + + // Enter the remainder scope, i.e. the bindings' destruction scope. + this.push_scope(remainder_scope); + let_extent_stack.push(remainder_scope); + + // Declare the bindings, which may create a visibility scope. + let remainder_span = remainder_scope.span(&tcx.region_maps, &tcx.map); + let remainder_span = remainder_span.unwrap_or(span); + let scope = this.declare_bindings(None, remainder_span, &pattern); + + // Evaluate the initializer, if present. + if let Some(init) = initializer { + unpack!(block = this.in_scope(init_scope, block, move |this| { + // FIXME #30046 ^~~~ + this.expr_into_pattern(block, pattern, init) + })); + } else { + this.storage_live_for_bindings(block, &pattern); + } + + // Enter the visibility scope, after evaluating the initializer. + if let Some(visibility_scope) = scope { + this.visibility_scope = visibility_scope; + } + } } } + // Then, the block may have an optional trailing expression which is a “return” value + // of the block. + if let Some(expr) = expr { + unpack!(block = this.into(destination, block, expr)); + } else if dest_is_unit { + // FIXME(#31472) + let source_info = this.source_info(span); + this.cfg.push_assign_unit(block, source_info, destination); + } + // Finally, we pop all the let scopes before exiting out from the scope of block + // itself. + for extent in let_extent_stack.into_iter().rev() { + unpack!(block = this.pop_scope(extent, block)); + } + // Restore the original visibility scope. + this.visibility_scope = outer_visibility_scope; + block.unit() }) } } diff --git a/src/librustc_mir/build/cfg.rs b/src/librustc_mir/build/cfg.rs index 523ac85cdc509..71e97e4bfe0d3 100644 --- a/src/librustc_mir/build/cfg.rs +++ b/src/librustc_mir/build/cfg.rs @@ -14,22 +14,25 @@ //! Routines for manipulating the control-flow graph. use build::CFG; -use rustc::mir::repr::*; -use syntax::codemap::Span; +use rustc::mir::*; impl<'tcx> CFG<'tcx> { pub fn block_data(&self, blk: BasicBlock) -> &BasicBlockData<'tcx> { - &self.basic_blocks[blk.index()] + &self.basic_blocks[blk] } pub fn block_data_mut(&mut self, blk: BasicBlock) -> &mut BasicBlockData<'tcx> { - &mut self.basic_blocks[blk.index()] + &mut self.basic_blocks[blk] } pub fn start_new_block(&mut self) -> BasicBlock { - let node_index = self.basic_blocks.len(); - self.basic_blocks.push(BasicBlockData::new(None)); - BasicBlock::new(node_index) + self.basic_blocks.push(BasicBlockData::new(None)) + } + + pub fn start_new_cleanup_block(&mut self) -> BasicBlock { + let bb = self.start_new_block(); + self.block_data_mut(bb).is_cleanup = true; + bb } pub fn push(&mut self, block: BasicBlock, statement: Statement<'tcx>) { @@ -37,48 +40,47 @@ impl<'tcx> CFG<'tcx> { self.block_data_mut(block).statements.push(statement); } - pub fn push_drop(&mut self, block: BasicBlock, span: Span, - kind: DropKind, lvalue: &Lvalue<'tcx>) { - self.push(block, Statement { - span: span, - kind: StatementKind::Drop(kind, lvalue.clone()) - }); - } - pub fn push_assign(&mut self, block: BasicBlock, - span: Span, + source_info: SourceInfo, lvalue: &Lvalue<'tcx>, rvalue: Rvalue<'tcx>) { self.push(block, Statement { - span: span, + source_info: source_info, kind: StatementKind::Assign(lvalue.clone(), rvalue) }); } pub fn push_assign_constant(&mut self, block: BasicBlock, - span: Span, + source_info: SourceInfo, temp: &Lvalue<'tcx>, constant: Constant<'tcx>) { - self.push_assign(block, span, temp, Rvalue::Use(Operand::Constant(constant))); + self.push_assign(block, source_info, temp, + Rvalue::Use(Operand::Constant(constant))); } pub fn push_assign_unit(&mut self, block: BasicBlock, - span: Span, + source_info: SourceInfo, lvalue: &Lvalue<'tcx>) { - self.push_assign(block, span, lvalue, Rvalue::Aggregate( + self.push_assign(block, source_info, lvalue, Rvalue::Aggregate( AggregateKind::Tuple, vec![] )); } pub fn terminate(&mut self, block: BasicBlock, - terminator: Terminator<'tcx>) { + source_info: SourceInfo, + kind: TerminatorKind<'tcx>) { + debug!("terminating block {:?} <- {:?}", block, kind); debug_assert!(self.block_data(block).terminator.is_none(), - "terminate: block {:?} already has a terminator set", block); - self.block_data_mut(block).terminator = Some(terminator); + "terminate: block {:?}={:?} already has a terminator set", + block, + self.block_data(block)); + self.block_data_mut(block).terminator = Some(Terminator { + source_info: source_info, + kind: kind, + }); } } - diff --git a/src/librustc_mir/build/expr/as_constant.rs b/src/librustc_mir/build/expr/as_constant.rs index 6f186b8ada426..6230123a9ca17 100644 --- a/src/librustc_mir/build/expr/as_constant.rs +++ b/src/librustc_mir/build/expr/as_constant.rs @@ -12,9 +12,9 @@ use build::Builder; use hair::*; -use rustc::mir::repr::*; +use rustc::mir::*; -impl<'a,'tcx> Builder<'a,'tcx> { +impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Compile `expr`, yielding a compile-time constant. Assumes that /// `expr` is a valid compile-time constant! pub fn as_constant(&mut self, expr: M) -> Constant<'tcx> @@ -33,9 +33,10 @@ impl<'a,'tcx> Builder<'a,'tcx> { ExprKind::Literal { literal } => Constant { span: span, ty: ty, literal: literal }, _ => - this.hir.span_bug( + span_bug!( span, - &format!("expression is not a valid constant {:?}", kind)), + "expression is not a valid constant {:?}", + kind), } } } diff --git a/src/librustc_mir/build/expr/as_lvalue.rs b/src/librustc_mir/build/expr/as_lvalue.rs index 4e03ed489eb9f..58abaa0c484f2 100644 --- a/src/librustc_mir/build/expr/as_lvalue.rs +++ b/src/librustc_mir/build/expr/as_lvalue.rs @@ -13,9 +13,11 @@ use build::{BlockAnd, BlockAndExtension, Builder}; use build::expr::category::Category; use hair::*; -use rustc::mir::repr::*; +use rustc::mir::*; -impl<'a,'tcx> Builder<'a,'tcx> { +use rustc_data_structures::indexed_vec::Idx; + +impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Compile `expr`, yielding an lvalue that we can move from etc. pub fn as_lvalue(&mut self, block: BasicBlock, @@ -35,13 +37,14 @@ impl<'a,'tcx> Builder<'a,'tcx> { let this = self; let expr_span = expr.span; + let source_info = this.source_info(expr_span); match expr.kind { ExprKind::Scope { extent, value } => { this.in_scope(extent, block, |this| this.as_lvalue(block, value)) } ExprKind::Field { lhs, name } => { let lvalue = unpack!(block = this.as_lvalue(block, lhs)); - let lvalue = lvalue.field(name); + let lvalue = lvalue.field(name, expr.ty); block.and(lvalue) } ExprKind::Deref { arg } => { @@ -58,28 +61,27 @@ impl<'a,'tcx> Builder<'a,'tcx> { // bounds check: let (len, lt) = (this.temp(usize_ty.clone()), this.temp(bool_ty)); - this.cfg.push_assign(block, expr_span, // len = len(slice) + this.cfg.push_assign(block, source_info, // len = len(slice) &len, Rvalue::Len(slice.clone())); - this.cfg.push_assign(block, expr_span, // lt = idx < len + this.cfg.push_assign(block, source_info, // lt = idx < len <, Rvalue::BinaryOp(BinOp::Lt, idx.clone(), Operand::Consume(len.clone()))); - let (success, failure) = (this.cfg.start_new_block(), this.cfg.start_new_block()); - this.cfg.terminate(block, - Terminator::If { - cond: Operand::Consume(lt), - targets: (success, failure), - }); - this.panic_bounds_check(failure, idx.clone(), Operand::Consume(len), expr_span); + let msg = AssertMessage::BoundsCheck { + len: Operand::Consume(len), + index: idx.clone() + }; + let success = this.assert(block, Operand::Consume(lt), true, + msg, expr_span); success.and(slice.index(idx)) } ExprKind::SelfRef => { - block.and(Lvalue::Arg(0)) + block.and(Lvalue::Local(Local::new(1))) } ExprKind::VarRef { id } => { let index = this.var_indices[&id]; - block.and(Lvalue::Var(index)) + block.and(Lvalue::Local(index)) } ExprKind::StaticRef { id } => { block.and(Lvalue::Static(id)) @@ -94,6 +96,8 @@ impl<'a,'tcx> Builder<'a,'tcx> { ExprKind::LogicalOp { .. } | ExprKind::Box { .. } | ExprKind::Cast { .. } | + ExprKind::Use { .. } | + ExprKind::NeverToAny { .. } | ExprKind::ReifyFnPointer { .. } | ExprKind::UnsafeFnPointer { .. } | ExprKind::Unsize { .. } | diff --git a/src/librustc_mir/build/expr/as_operand.rs b/src/librustc_mir/build/expr/as_operand.rs index 7738ebca26b47..09cdcc74ef63e 100644 --- a/src/librustc_mir/build/expr/as_operand.rs +++ b/src/librustc_mir/build/expr/as_operand.rs @@ -13,9 +13,9 @@ use build::{BlockAnd, BlockAndExtension, Builder}; use build::expr::category::Category; use hair::*; -use rustc::mir::repr::*; +use rustc::mir::*; -impl<'a,'tcx> Builder<'a,'tcx> { +impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Compile `expr` into a value that can be used as an operand. /// If `expr` is an lvalue like `x`, this will introduce a /// temporary `tmp = x`, so that we capture the value of `x` at diff --git a/src/librustc_mir/build/expr/as_rvalue.rs b/src/librustc_mir/build/expr/as_rvalue.rs index 2f57dd22454cb..b75e52fd4b10d 100644 --- a/src/librustc_mir/build/expr/as_rvalue.rs +++ b/src/librustc_mir/build/expr/as_rvalue.rs @@ -10,14 +10,23 @@ //! See docs in build/expr/mod.rs -use rustc_data_structures::fnv::FnvHashMap; +use std; + +use rustc_const_math::{ConstMathErr, Op}; +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::indexed_vec::Idx; use build::{BlockAnd, BlockAndExtension, Builder}; use build::expr::category::{Category, RvalueFunc}; use hair::*; -use rustc::mir::repr::*; +use rustc_const_math::{ConstInt, ConstIsize}; +use rustc::middle::const_val::ConstVal; +use rustc::ty; +use rustc::mir::*; +use syntax::ast; +use syntax_pos::Span; -impl<'a,'tcx> Builder<'a,'tcx> { +impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Compile `expr`, yielding an rvalue. pub fn as_rvalue(&mut self, block: BasicBlock, expr: M) -> BlockAnd> where M: Mirror<'tcx, Output = Expr<'tcx>> @@ -34,17 +43,29 @@ impl<'a,'tcx> Builder<'a,'tcx> { let this = self; let expr_span = expr.span; + let source_info = this.source_info(expr_span); match expr.kind { ExprKind::Scope { extent, value } => { this.in_scope(extent, block, |this| this.as_rvalue(block, value)) } - ExprKind::InlineAsm { asm } => { - block.and(Rvalue::InlineAsm(asm.clone())) + ExprKind::InlineAsm { asm, outputs, inputs } => { + let outputs = outputs.into_iter().map(|output| { + unpack!(block = this.as_lvalue(block, output)) + }).collect(); + + let inputs = inputs.into_iter().map(|input| { + unpack!(block = this.as_operand(block, input)) + }).collect(); + + block.and(Rvalue::InlineAsm { + asm: asm.clone(), + outputs: outputs, + inputs: inputs + }) } ExprKind::Repeat { value, count } => { let value_operand = unpack!(block = this.as_operand(block, value)); - let count = this.as_constant(count); block.and(Rvalue::Repeat(value_operand, count)) } ExprKind::Borrow { region, borrow_kind, arg } => { @@ -54,37 +75,50 @@ impl<'a,'tcx> Builder<'a,'tcx> { ExprKind::Binary { op, lhs, rhs } => { let lhs = unpack!(block = this.as_operand(block, lhs)); let rhs = unpack!(block = this.as_operand(block, rhs)); - block.and(Rvalue::BinaryOp(op, lhs, rhs)) + this.build_binary_op(block, op, expr_span, expr.ty, + lhs, rhs) } ExprKind::Unary { op, arg } => { let arg = unpack!(block = this.as_operand(block, arg)); + // Check for -MIN on signed integers + if this.hir.check_overflow() && op == UnOp::Neg && expr.ty.is_signed() { + let bool_ty = this.hir.bool_ty(); + + let minval = this.minval_literal(expr_span, expr.ty); + let is_min = this.temp(bool_ty); + + this.cfg.push_assign(block, source_info, &is_min, + Rvalue::BinaryOp(BinOp::Eq, arg.clone(), minval)); + + let err = ConstMathErr::Overflow(Op::Neg); + block = this.assert(block, Operand::Consume(is_min), false, + AssertMessage::Math(err), expr_span); + } block.and(Rvalue::UnaryOp(op, arg)) } - ExprKind::Box { value } => { + ExprKind::Box { value, value_extents } => { let value = this.hir.mirror(value); - let value_ty = value.ty.clone(); - let result = this.temp(value_ty.clone()); - + let result = this.temp(expr.ty); // to start, malloc some memory of suitable type (thus far, uninitialized): - let rvalue = Rvalue::Box(value.ty.clone()); - this.cfg.push_assign(block, expr_span, &result, rvalue); - - // schedule a shallow free of that memory, lest we unwind: - let extent = this.extent_of_innermost_scope(); - this.schedule_drop(expr_span, extent, DropKind::Free, &result, value_ty); - - // initialize the box contents: - let contents = result.clone().deref(); - unpack!(block = this.into(&contents, block, value)); - - // now that the result is fully initialized, cancel the drop - // by "using" the result (which is linear): - block.and(Rvalue::Use(Operand::Consume(result))) + this.cfg.push_assign(block, source_info, &result, Rvalue::Box(value.ty)); + this.in_scope(value_extents, block, |this| { + // schedule a shallow free of that memory, lest we unwind: + this.schedule_box_free(expr_span, value_extents, &result, value.ty); + // initialize the box contents: + unpack!(block = this.into(&result.clone().deref(), block, value)); + block.and(Rvalue::Use(Operand::Consume(result))) + }) } ExprKind::Cast { source } => { + let source = this.hir.mirror(source); + let source = unpack!(block = this.as_operand(block, source)); block.and(Rvalue::Cast(CastKind::Misc, source, expr.ty)) } + ExprKind::Use { source } => { + let source = unpack!(block = this.as_operand(block, source)); + block.and(Rvalue::Use(source)) + } ExprKind::ReifyFnPointer { source } => { let source = unpack!(block = this.as_operand(block, source)); block.and(Rvalue::Cast(CastKind::ReifyFnPointer, source, expr.ty)) @@ -130,7 +164,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { .map(|f| unpack!(block = this.as_operand(block, f))) .collect(); - block.and(Rvalue::Aggregate(AggregateKind::Vec, fields)) + block.and(Rvalue::Aggregate(AggregateKind::Array, fields)) } ExprKind::Tuple { fields } => { // see (*) above // first process the set of fields @@ -148,41 +182,51 @@ impl<'a,'tcx> Builder<'a,'tcx> { .collect(); block.and(Rvalue::Aggregate(AggregateKind::Closure(closure_id, substs), upvars)) } - ExprKind::Adt { adt_def, variant_index, substs, fields, base } => { // see (*) above + ExprKind::Adt { + adt_def, variant_index, substs, fields, base + } => { // see (*) above + let is_union = adt_def.is_union(); + let active_field_index = if is_union { Some(fields[0].name.index()) } else { None }; + // first process the set of fields that were provided // (evaluating them in order given by user) - let fields_map: FnvHashMap<_, _> = + let fields_map: FxHashMap<_, _> = fields.into_iter() .map(|f| (f.name, unpack!(block = this.as_operand(block, f.expr)))) .collect(); - // if base expression is given, evaluate it now - let base = base.map(|base| unpack!(block = this.as_lvalue(block, base))); - - // get list of all fields that we will need let field_names = this.hir.all_fields(adt_def, variant_index); - // for the actual values we use, take either the - // expr the user specified or, if they didn't - // specify something for this field name, create a - // path relative to the base (which must have been - // supplied, or the IR is internally - // inconsistent). - let fields: Vec<_> = + let fields = if let Some(FruInfo { base, field_types }) = base { + let base = unpack!(block = this.as_lvalue(block, base)); + + // MIR does not natively support FRU, so for each + // base-supplied field, generate an operand that + // reads it from the base. field_names.into_iter() - .map(|n| match fields_map.get(&n) { - Some(v) => v.clone(), - None => Operand::Consume(base.clone().unwrap().field(n)), - }) - .collect(); + .zip(field_types.into_iter()) + .map(|(n, ty)| match fields_map.get(&n) { + Some(v) => v.clone(), + None => Operand::Consume(base.clone().field(n, ty)) + }) + .collect() + } else { + field_names.iter().filter_map(|n| fields_map.get(n).cloned()).collect() + }; - block.and(Rvalue::Aggregate(AggregateKind::Adt(adt_def, variant_index, substs), - fields)) + let adt = AggregateKind::Adt(adt_def, variant_index, substs, active_field_index); + block.and(Rvalue::Aggregate(adt, fields)) + } + ExprKind::Assign { .. } | + ExprKind::AssignOp { .. } => { + block = unpack!(this.stmt_expr(block, expr)); + block.and(this.unit_rvalue()) } ExprKind::Literal { .. } | ExprKind::Block { .. } | ExprKind::Match { .. } | ExprKind::If { .. } | + ExprKind::NeverToAny { .. } | ExprKind::Loop { .. } | ExprKind::LogicalOp { .. } | ExprKind::Call { .. } | @@ -191,8 +235,6 @@ impl<'a,'tcx> Builder<'a,'tcx> { ExprKind::Index { .. } | ExprKind::VarRef { .. } | ExprKind::SelfRef | - ExprKind::Assign { .. } | - ExprKind::AssignOp { .. } | ExprKind::Break { .. } | ExprKind::Continue { .. } | ExprKind::Return { .. } | @@ -208,4 +250,149 @@ impl<'a,'tcx> Builder<'a,'tcx> { } } } + + pub fn build_binary_op(&mut self, mut block: BasicBlock, + op: BinOp, span: Span, ty: ty::Ty<'tcx>, + lhs: Operand<'tcx>, rhs: Operand<'tcx>) -> BlockAnd> { + let source_info = self.source_info(span); + let bool_ty = self.hir.bool_ty(); + if self.hir.check_overflow() && op.is_checkable() && ty.is_integral() { + let result_tup = self.hir.tcx().intern_tup(&[ty, bool_ty]); + let result_value = self.temp(result_tup); + + self.cfg.push_assign(block, source_info, + &result_value, Rvalue::CheckedBinaryOp(op, + lhs, + rhs)); + let val_fld = Field::new(0); + let of_fld = Field::new(1); + + let val = result_value.clone().field(val_fld, ty); + let of = result_value.field(of_fld, bool_ty); + + let err = ConstMathErr::Overflow(match op { + BinOp::Add => Op::Add, + BinOp::Sub => Op::Sub, + BinOp::Mul => Op::Mul, + BinOp::Shl => Op::Shl, + BinOp::Shr => Op::Shr, + _ => { + bug!("MIR build_binary_op: {:?} is not checkable", op) + } + }); + + block = self.assert(block, Operand::Consume(of), false, + AssertMessage::Math(err), span); + + block.and(Rvalue::Use(Operand::Consume(val))) + } else { + if ty.is_integral() && (op == BinOp::Div || op == BinOp::Rem) { + // Checking division and remainder is more complex, since we 1. always check + // and 2. there are two possible failure cases, divide-by-zero and overflow. + + let (zero_err, overflow_err) = if op == BinOp::Div { + (ConstMathErr::DivisionByZero, + ConstMathErr::Overflow(Op::Div)) + } else { + (ConstMathErr::RemainderByZero, + ConstMathErr::Overflow(Op::Rem)) + }; + + // Check for / 0 + let is_zero = self.temp(bool_ty); + let zero = self.zero_literal(span, ty); + self.cfg.push_assign(block, source_info, &is_zero, + Rvalue::BinaryOp(BinOp::Eq, rhs.clone(), zero)); + + block = self.assert(block, Operand::Consume(is_zero), false, + AssertMessage::Math(zero_err), span); + + // We only need to check for the overflow in one case: + // MIN / -1, and only for signed values. + if ty.is_signed() { + let neg_1 = self.neg_1_literal(span, ty); + let min = self.minval_literal(span, ty); + + let is_neg_1 = self.temp(bool_ty); + let is_min = self.temp(bool_ty); + let of = self.temp(bool_ty); + + // this does (rhs == -1) & (lhs == MIN). It could short-circuit instead + + self.cfg.push_assign(block, source_info, &is_neg_1, + Rvalue::BinaryOp(BinOp::Eq, rhs.clone(), neg_1)); + self.cfg.push_assign(block, source_info, &is_min, + Rvalue::BinaryOp(BinOp::Eq, lhs.clone(), min)); + + let is_neg_1 = Operand::Consume(is_neg_1); + let is_min = Operand::Consume(is_min); + self.cfg.push_assign(block, source_info, &of, + Rvalue::BinaryOp(BinOp::BitAnd, is_neg_1, is_min)); + + block = self.assert(block, Operand::Consume(of), false, + AssertMessage::Math(overflow_err), span); + } + } + + block.and(Rvalue::BinaryOp(op, lhs, rhs)) + } + } + + // Helper to get a `-1` value of the appropriate type + fn neg_1_literal(&mut self, span: Span, ty: ty::Ty<'tcx>) -> Operand<'tcx> { + let literal = match ty.sty { + ty::TyInt(ity) => { + let val = match ity { + ast::IntTy::I8 => ConstInt::I8(-1), + ast::IntTy::I16 => ConstInt::I16(-1), + ast::IntTy::I32 => ConstInt::I32(-1), + ast::IntTy::I64 => ConstInt::I64(-1), + ast::IntTy::Is => { + let int_ty = self.hir.tcx().sess.target.int_type; + let val = ConstIsize::new(-1, int_ty).unwrap(); + ConstInt::Isize(val) + } + }; + + Literal::Value { value: ConstVal::Integral(val) } + } + _ => { + span_bug!(span, "Invalid type for neg_1_literal: `{:?}`", ty) + } + }; + + self.literal_operand(span, ty, literal) + } + + // Helper to get the minimum value of the appropriate type + fn minval_literal(&mut self, span: Span, ty: ty::Ty<'tcx>) -> Operand<'tcx> { + let literal = match ty.sty { + ty::TyInt(ity) => { + let val = match ity { + ast::IntTy::I8 => ConstInt::I8(std::i8::MIN), + ast::IntTy::I16 => ConstInt::I16(std::i16::MIN), + ast::IntTy::I32 => ConstInt::I32(std::i32::MIN), + ast::IntTy::I64 => ConstInt::I64(std::i64::MIN), + ast::IntTy::Is => { + let int_ty = self.hir.tcx().sess.target.int_type; + let min = match int_ty { + ast::IntTy::I16 => std::i16::MIN as i64, + ast::IntTy::I32 => std::i32::MIN as i64, + ast::IntTy::I64 => std::i64::MIN, + _ => unreachable!() + }; + let val = ConstIsize::new(min, int_ty).unwrap(); + ConstInt::Isize(val) + } + }; + + Literal::Value { value: ConstVal::Integral(val) } + } + _ => { + span_bug!(span, "Invalid type for minval_literal: `{:?}`", ty) + } + }; + + self.literal_operand(span, ty, literal) + } } diff --git a/src/librustc_mir/build/expr/as_temp.rs b/src/librustc_mir/build/expr/as_temp.rs index 53f8090ad0f2f..fb12e08affd2d 100644 --- a/src/librustc_mir/build/expr/as_temp.rs +++ b/src/librustc_mir/build/expr/as_temp.rs @@ -13,9 +13,9 @@ use build::{BlockAnd, BlockAndExtension, Builder}; use build::expr::category::Category; use hair::*; -use rustc::mir::repr::*; +use rustc::mir::*; -impl<'a,'tcx> Builder<'a,'tcx> { +impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Compile `expr` into a fresh temporary. This is used when building /// up rvalues so as to freeze the value that will be consumed. pub fn as_temp(&mut self, block: BasicBlock, expr: M) -> BlockAnd> @@ -35,13 +35,16 @@ impl<'a,'tcx> Builder<'a,'tcx> { let expr_ty = expr.ty.clone(); let temp = this.temp(expr_ty.clone()); - let temp_lifetime = match expr.temp_lifetime { - Some(t) => t, - None => { - this.hir.span_bug(expr.span, "no temp_lifetime for expr"); - } - }; - this.schedule_drop(expr.span, temp_lifetime, DropKind::Deep, &temp, expr_ty); + let temp_lifetime = expr.temp_lifetime; + let expr_span = expr.span; + let source_info = this.source_info(expr_span); + + if temp_lifetime.is_some() { + this.cfg.push(block, Statement { + source_info: source_info, + kind: StatementKind::StorageLive(temp.clone()) + }); + } // Careful here not to cause an infinite cycle. If we always // called `into`, then for lvalues like `x.f`, it would @@ -52,16 +55,22 @@ impl<'a,'tcx> Builder<'a,'tcx> { // course) `as_temp`. match Category::of(&expr.kind).unwrap() { Category::Lvalue => { - let expr_span = expr.span; let lvalue = unpack!(block = this.as_lvalue(block, expr)); let rvalue = Rvalue::Use(Operand::Consume(lvalue)); - this.cfg.push_assign(block, expr_span, &temp, rvalue); + this.cfg.push_assign(block, source_info, &temp, rvalue); } _ => { unpack!(block = this.into(&temp, block, expr)); } } + // In constants, temp_lifetime is None. We should not need to drop + // anything because no values with a destructor can be created in + // a constant at this time, even if the type may need dropping. + if let Some(temp_lifetime) = temp_lifetime { + this.schedule_drop(expr_span, temp_lifetime, &temp, expr_ty); + } + block.and(temp) } } diff --git a/src/librustc_mir/build/expr/category.rs b/src/librustc_mir/build/expr/category.rs index 658b7779b44a9..9671f80f48ba7 100644 --- a/src/librustc_mir/build/expr/category.rs +++ b/src/librustc_mir/build/expr/category.rs @@ -56,6 +56,7 @@ impl Category { ExprKind::LogicalOp { .. } | ExprKind::If { .. } | ExprKind::Match { .. } | + ExprKind::NeverToAny { .. } | ExprKind::Call { .. } => Some(Category::Rvalue(RvalueFunc::Into)), @@ -67,6 +68,7 @@ impl Category { ExprKind::Binary { .. } | ExprKind::Box { .. } | ExprKind::Cast { .. } | + ExprKind::Use { .. } | ExprKind::ReifyFnPointer { .. } | ExprKind::UnsafeFnPointer { .. } | ExprKind::Unsize { .. } | diff --git a/src/librustc_mir/build/expr/into.rs b/src/librustc_mir/build/expr/into.rs index 63eb760720479..5a77de0807028 100644 --- a/src/librustc_mir/build/expr/into.rs +++ b/src/librustc_mir/build/expr/into.rs @@ -12,14 +12,11 @@ use build::{BlockAnd, BlockAndExtension, Builder}; use build::expr::category::{Category, RvalueFunc}; -use build::scope::LoopScope; use hair::*; -use rustc::middle::region::CodeExtent; -use rustc::middle::ty; -use rustc::mir::repr::*; -use syntax::codemap::Span; +use rustc::ty; +use rustc::mir::*; -impl<'a,'tcx> Builder<'a,'tcx> { +impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Compile `expr`, storing the result into `destination`, which /// is assumed to be uninitialized. pub fn into_expr(&mut self, @@ -36,23 +33,43 @@ impl<'a,'tcx> Builder<'a,'tcx> { // just use the name `this` uniformly let this = self; let expr_span = expr.span; + let source_info = this.source_info(expr_span); match expr.kind { ExprKind::Scope { extent, value } => { this.in_scope(extent, block, |this| this.into(destination, block, value)) } ExprKind::Block { body: ast_block } => { - this.ast_block(destination, block, ast_block) + this.ast_block(destination, expr.ty.is_nil(), block, ast_block) } ExprKind::Match { discriminant, arms } => { this.match_expr(destination, expr_span, block, discriminant, arms) } + ExprKind::NeverToAny { source } => { + let source = this.hir.mirror(source); + let is_call = match source.kind { + ExprKind::Call { .. } => true, + _ => false, + }; + + unpack!(block = this.as_rvalue(block, source)); + + // This is an optimization. If the expression was a call then we already have an + // unreachable block. Don't bother to terminate it and create a new one. + if is_call { + block.unit() + } else { + this.cfg.terminate(block, source_info, TerminatorKind::Unreachable); + let end_block = this.cfg.start_new_block(); + end_block.unit() + } + } ExprKind::If { condition: cond_expr, then: then_expr, otherwise: else_expr } => { let operand = unpack!(block = this.as_operand(block, cond_expr)); let mut then_block = this.cfg.start_new_block(); let mut else_block = this.cfg.start_new_block(); - this.cfg.terminate(block, Terminator::If { + this.cfg.terminate(block, source_info, TerminatorKind::If { cond: operand, targets: (then_block, else_block) }); @@ -63,13 +80,15 @@ impl<'a,'tcx> Builder<'a,'tcx> { } else { // Body of the `if` expression without an `else` clause must return `()`, thus // we implicitly generate a `else {}` if it is not specified. - this.cfg.push_assign_unit(else_block, expr_span, &Lvalue::ReturnPointer); + this.cfg.push_assign_unit(else_block, source_info, destination); else_block }; let join_block = this.cfg.start_new_block(); - this.cfg.terminate(then_block, Terminator::Goto { target: join_block }); - this.cfg.terminate(else_block, Terminator::Goto { target: join_block }); + this.cfg.terminate(then_block, source_info, + TerminatorKind::Goto { target: join_block }); + this.cfg.terminate(else_block, source_info, + TerminatorKind::Goto { target: join_block }); join_block.unit() } @@ -95,16 +114,17 @@ impl<'a,'tcx> Builder<'a,'tcx> { LogicalOp::And => (else_block, false_block), LogicalOp::Or => (true_block, else_block), }; - this.cfg.terminate(block, Terminator::If { cond: lhs, targets: blocks }); + this.cfg.terminate(block, source_info, + TerminatorKind::If { cond: lhs, targets: blocks }); let rhs = unpack!(else_block = this.as_operand(else_block, rhs)); - this.cfg.terminate(else_block, Terminator::If { + this.cfg.terminate(else_block, source_info, TerminatorKind::If { cond: rhs, targets: (true_block, false_block) }); this.cfg.push_assign_constant( - true_block, expr_span, destination, + true_block, source_info, destination, Constant { span: expr_span, ty: this.hir.bool_ty(), @@ -112,15 +132,17 @@ impl<'a,'tcx> Builder<'a,'tcx> { }); this.cfg.push_assign_constant( - false_block, expr_span, destination, + false_block, source_info, destination, Constant { span: expr_span, ty: this.hir.bool_ty(), literal: this.hir.false_literal(), }); - this.cfg.terminate(true_block, Terminator::Goto { target: join_block }); - this.cfg.terminate(false_block, Terminator::Goto { target: join_block }); + this.cfg.terminate(true_block, source_info, + TerminatorKind::Goto { target: join_block }); + this.cfg.terminate(false_block, source_info, + TerminatorKind::Goto { target: join_block }); join_block.unit() } @@ -144,96 +166,50 @@ impl<'a,'tcx> Builder<'a,'tcx> { let exit_block = this.cfg.start_new_block(); // start the loop - this.cfg.terminate(block, Terminator::Goto { target: loop_block }); + this.cfg.terminate(block, source_info, + TerminatorKind::Goto { target: loop_block }); - this.in_loop_scope(loop_block, exit_block, |this| { - // conduct the test, if necessary - let body_block; - let opt_cond_expr = opt_cond_expr; // FIXME rustc bug - if let Some(cond_expr) = opt_cond_expr { - let loop_block_end; - let cond = unpack!(loop_block_end = this.as_operand(loop_block, cond_expr)); - body_block = this.cfg.start_new_block(); - this.cfg.terminate(loop_block_end, - Terminator::If { - cond: cond, - targets: (body_block, exit_block) - }); - } else { - body_block = loop_block; - } + this.in_loop_scope( + loop_block, exit_block, destination.clone(), + move |this| { + // conduct the test, if necessary + let body_block; + if let Some(cond_expr) = opt_cond_expr { + let loop_block_end; + let cond = unpack!( + loop_block_end = this.as_operand(loop_block, cond_expr)); + body_block = this.cfg.start_new_block(); + this.cfg.terminate(loop_block_end, source_info, + TerminatorKind::If { + cond: cond, + targets: (body_block, exit_block) + }); - // execute the body, branching back to the test - // We write body’s “return value” into the destination of loop. This is fine, - // because: - // - // * In Rust both loop expression and its body are required to have `()` - // as the “return value”; - // * The destination will be considered uninitialised (given it was - // uninitialised before the loop) during the first iteration, thus - // disallowing its use inside the body. Alternatively, if it was already - // initialised, the `destination` can only possibly have a value of `()`, - // therefore, “mutating” the destination during iteration is fine. - let body_block_end = unpack!(this.into(destination, body_block, body)); - this.cfg.terminate(body_block_end, Terminator::Goto { target: loop_block }); - exit_block.unit() - }) - } - ExprKind::Assign { lhs, rhs } => { - // Note: we evaluate assignments right-to-left. This - // is better for borrowck interaction with overloaded - // operators like x[j] = x[i]. - let rhs = unpack!(block = this.as_operand(block, rhs)); - let lhs = unpack!(block = this.as_lvalue(block, lhs)); - this.cfg.push_drop(block, expr_span, DropKind::Deep, &lhs); - this.cfg.push_assign(block, expr_span, &lhs, Rvalue::Use(rhs)); - block.unit() - } - ExprKind::AssignOp { op, lhs, rhs } => { - // FIXME(#28160) there is an interesting semantics - // question raised here -- should we "freeze" the - // value of the lhs here? I'm inclined to think not, - // since it seems closer to the semantics of the - // overloaded version, which takes `&mut self`. This - // only affects weird things like `x += {x += 1; x}` - // -- is that equal to `x + (x + 1)` or `2*(x+1)`? - - // As above, RTL. - let rhs = unpack!(block = this.as_operand(block, rhs)); - let lhs = unpack!(block = this.as_lvalue(block, lhs)); - - // we don't have to drop prior contents or anything - // because AssignOp is only legal for Copy types - // (overloaded ops should be desugared into a call). - this.cfg.push_assign(block, expr_span, &lhs, - Rvalue::BinaryOp(op, - Operand::Consume(lhs.clone()), - rhs)); + // if the test is false, there's no `break` to assign `destination`, so + // we have to do it; this overwrites any `break`-assigned value but it's + // always `()` anyway + this.cfg.push_assign_unit(exit_block, source_info, destination); + } else { + body_block = loop_block; + } - block.unit() - } - ExprKind::Continue { label } => { - this.break_or_continue(expr_span, label, block, - |loop_scope| loop_scope.continue_block) - } - ExprKind::Break { label } => { - this.break_or_continue(expr_span, label, block, |loop_scope| loop_scope.break_block) - } - ExprKind::Return { value } => { - block = match value { - Some(value) => unpack!(this.into(&Lvalue::ReturnPointer, block, value)), - None => { - this.cfg.push_assign_unit(block, expr_span, &Lvalue::ReturnPointer); - block + // The “return” value of the loop body must always be an unit. We therefore + // introduce a unit temporary as the destination for the loop body. + let tmp = this.get_unit_temp(); + // Execute the body, branching back to the test. + let body_block_end = unpack!(this.into(&tmp, body_block, body)); + this.cfg.terminate(body_block_end, source_info, + TerminatorKind::Goto { target: loop_block }); } - }; - let extent = this.extent_of_outermost_scope(); - this.exit_scope(expr_span, extent, block, END_BLOCK); - this.cfg.start_new_block().unit() + ); + exit_block.unit() } ExprKind::Call { ty, fun, args } => { let diverges = match ty.sty { - ty::TyBareFn(_, ref f) => f.sig.0.output.diverges(), + ty::TyFnDef(_, _, ref f) | ty::TyFnPtr(ref f) => { + // FIXME(canndrew): This is_never should probably be an is_uninhabited + f.sig.0.output.is_never() + } _ => false }; let fun = unpack!(block = this.as_operand(block, fun)); @@ -244,30 +220,34 @@ impl<'a,'tcx> Builder<'a,'tcx> { let success = this.cfg.start_new_block(); let cleanup = this.diverge_cleanup(); - this.cfg.terminate(block, Terminator::Call { + this.cfg.terminate(block, source_info, TerminatorKind::Call { func: fun, args: args, - kind: match (cleanup, diverges) { - (None, true) => CallKind::Diverging, - (Some(c), true) => CallKind::DivergingCleanup(c), - (None, false) => CallKind::Converging { - destination: destination.clone(), - target: success - }, - (Some(c), false) => CallKind::ConvergingCleanup { - destination: destination.clone(), - targets: (success, c) - } + cleanup: cleanup, + destination: if diverges { + None + } else { + Some ((destination.clone(), success)) } }); success.unit() } + // These cases don't actually need a destination + ExprKind::Assign { .. } | + ExprKind::AssignOp { .. } | + ExprKind::Continue { .. } | + ExprKind::Break { .. } | + ExprKind::Return {.. } => { + this.stmt_expr(block, expr) + } + // these are the cases that are more naturally handled by some other mode ExprKind::Unary { .. } | ExprKind::Binary { .. } | ExprKind::Box { .. } | ExprKind::Cast { .. } | + ExprKind::Use { .. } | ExprKind::ReifyFnPointer { .. } | ExprKind::UnsafeFnPointer { .. } | ExprKind::Unsize { .. } | @@ -291,23 +271,9 @@ impl<'a,'tcx> Builder<'a,'tcx> { }); let rvalue = unpack!(block = this.as_rvalue(block, expr)); - this.cfg.push_assign(block, expr_span, destination, rvalue); + this.cfg.push_assign(block, source_info, destination, rvalue); block.unit() } } } - - fn break_or_continue(&mut self, - span: Span, - label: Option, - block: BasicBlock, - exit_selector: F) - -> BlockAnd<()> - where F: FnOnce(&LoopScope) -> BasicBlock - { - let loop_scope = self.find_loop_scope(span, label); - let exit_block = exit_selector(&loop_scope); - self.exit_scope(span, loop_scope.extent, block, exit_block); - self.cfg.start_new_block().unit() - } } diff --git a/src/librustc_mir/build/expr/mod.rs b/src/librustc_mir/build/expr/mod.rs index 0f168f307aa01..17b34f4586e8b 100644 --- a/src/librustc_mir/build/expr/mod.rs +++ b/src/librustc_mir/build/expr/mod.rs @@ -77,3 +77,4 @@ mod as_operand; mod as_temp; mod category; mod into; +mod stmt; diff --git a/src/librustc_mir/build/expr/stmt.rs b/src/librustc_mir/build/expr/stmt.rs new file mode 100644 index 0000000000000..f04d630379a35 --- /dev/null +++ b/src/librustc_mir/build/expr/stmt.rs @@ -0,0 +1,130 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use build::{BlockAnd, BlockAndExtension, Builder}; +use build::scope::LoopScope; +use hair::*; +use rustc::mir::*; + +impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { + + pub fn stmt_expr(&mut self, mut block: BasicBlock, expr: Expr<'tcx>) -> BlockAnd<()> { + let this = self; + let expr_span = expr.span; + let source_info = this.source_info(expr.span); + // Handle a number of expressions that don't need a destination at all. This + // avoids needing a mountain of temporary `()` variables. + match expr.kind { + ExprKind::Scope { extent, value } => { + let value = this.hir.mirror(value); + this.in_scope(extent, block, |this| this.stmt_expr(block, value)) + } + ExprKind::Assign { lhs, rhs } => { + let lhs = this.hir.mirror(lhs); + let rhs = this.hir.mirror(rhs); + let lhs_span = lhs.span; + + // Note: we evaluate assignments right-to-left. This + // is better for borrowck interaction with overloaded + // operators like x[j] = x[i]. + + // Generate better code for things that don't need to be + // dropped. + if this.hir.needs_drop(lhs.ty) { + let rhs = unpack!(block = this.as_operand(block, rhs)); + let lhs = unpack!(block = this.as_lvalue(block, lhs)); + unpack!(block = this.build_drop_and_replace( + block, lhs_span, lhs, rhs + )); + block.unit() + } else { + let rhs = unpack!(block = this.as_rvalue(block, rhs)); + let lhs = unpack!(block = this.as_lvalue(block, lhs)); + this.cfg.push_assign(block, source_info, &lhs, rhs); + block.unit() + } + } + ExprKind::AssignOp { op, lhs, rhs } => { + // FIXME(#28160) there is an interesting semantics + // question raised here -- should we "freeze" the + // value of the lhs here? I'm inclined to think not, + // since it seems closer to the semantics of the + // overloaded version, which takes `&mut self`. This + // only affects weird things like `x += {x += 1; x}` + // -- is that equal to `x + (x + 1)` or `2*(x+1)`? + + let lhs = this.hir.mirror(lhs); + let lhs_ty = lhs.ty; + + // As above, RTL. + let rhs = unpack!(block = this.as_operand(block, rhs)); + let lhs = unpack!(block = this.as_lvalue(block, lhs)); + + // we don't have to drop prior contents or anything + // because AssignOp is only legal for Copy types + // (overloaded ops should be desugared into a call). + let result = unpack!(block = this.build_binary_op(block, op, expr_span, lhs_ty, + Operand::Consume(lhs.clone()), rhs)); + this.cfg.push_assign(block, source_info, &lhs, result); + + block.unit() + } + ExprKind::Continue { label } => { + let LoopScope { continue_block, extent, .. } = + *this.find_loop_scope(expr_span, label); + this.exit_scope(expr_span, extent, block, continue_block); + this.cfg.start_new_block().unit() + } + ExprKind::Break { label, value } => { + let (break_block, extent, destination) = { + let LoopScope { + break_block, + extent, + ref break_destination, + .. + } = *this.find_loop_scope(expr_span, label); + (break_block, extent, break_destination.clone()) + }; + if let Some(value) = value { + unpack!(block = this.into(&destination, block, value)) + } else { + this.cfg.push_assign_unit(block, source_info, &destination) + } + this.exit_scope(expr_span, extent, block, break_block); + this.cfg.start_new_block().unit() + } + ExprKind::Return { value } => { + block = match value { + Some(value) => { + unpack!(this.into(&Lvalue::Local(RETURN_POINTER), block, value)) + } + None => { + this.cfg.push_assign_unit(block, + source_info, + &Lvalue::Local(RETURN_POINTER)); + block + } + }; + let extent = this.extent_of_return_scope(); + let return_block = this.return_block(); + this.exit_scope(expr_span, extent, block, return_block); + this.cfg.start_new_block().unit() + } + _ => { + let expr_ty = expr.ty; + let temp = this.temp(expr.ty.clone()); + unpack!(block = this.into(&temp, block, expr)); + unpack!(block = this.build_drop(block, expr_span, temp, expr_ty)); + block.unit() + } + } + } + +} diff --git a/src/librustc_mir/build/into.rs b/src/librustc_mir/build/into.rs index 77d9d926328fc..5c133780e433b 100644 --- a/src/librustc_mir/build/into.rs +++ b/src/librustc_mir/build/into.rs @@ -16,17 +16,17 @@ use build::{BlockAnd, Builder}; use hair::*; -use rustc::mir::repr::*; +use rustc::mir::*; pub trait EvalInto<'tcx> { - fn eval_into<'a>(self, - builder: &mut Builder<'a, 'tcx>, - destination: &Lvalue<'tcx>, - block: BasicBlock) - -> BlockAnd<()>; + fn eval_into<'a, 'gcx>(self, + builder: &mut Builder<'a, 'gcx, 'tcx>, + destination: &Lvalue<'tcx>, + block: BasicBlock) + -> BlockAnd<()>; } -impl<'a,'tcx> Builder<'a,'tcx> { +impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn into(&mut self, destination: &Lvalue<'tcx>, block: BasicBlock, @@ -39,22 +39,22 @@ impl<'a,'tcx> Builder<'a,'tcx> { } impl<'tcx> EvalInto<'tcx> for ExprRef<'tcx> { - fn eval_into<'a>(self, - builder: &mut Builder<'a, 'tcx>, - destination: &Lvalue<'tcx>, - block: BasicBlock) - -> BlockAnd<()> { + fn eval_into<'a, 'gcx>(self, + builder: &mut Builder<'a, 'gcx, 'tcx>, + destination: &Lvalue<'tcx>, + block: BasicBlock) + -> BlockAnd<()> { let expr = builder.hir.mirror(self); builder.into_expr(destination, block, expr) } } impl<'tcx> EvalInto<'tcx> for Expr<'tcx> { - fn eval_into<'a>(self, - builder: &mut Builder<'a, 'tcx>, - destination: &Lvalue<'tcx>, - block: BasicBlock) - -> BlockAnd<()> { + fn eval_into<'a, 'gcx>(self, + builder: &mut Builder<'a, 'gcx, 'tcx>, + destination: &Lvalue<'tcx>, + block: BasicBlock) + -> BlockAnd<()> { builder.into_expr(destination, block, self) } } diff --git a/src/librustc_mir/build/matches/mod.rs b/src/librustc_mir/build/matches/mod.rs index c2c87fcbd20da..e06d940de7e58 100644 --- a/src/librustc_mir/build/matches/mod.rs +++ b/src/librustc_mir/build/matches/mod.rs @@ -14,21 +14,21 @@ //! details. use build::{BlockAnd, BlockAndExtension, Builder}; -use rustc_data_structures::fnv::FnvHashMap; -use rustc::middle::const_eval::ConstVal; -use rustc::middle::region::CodeExtent; -use rustc::middle::ty::{AdtDef, Ty}; -use rustc::mir::repr::*; +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::bitvec::BitVector; +use rustc::middle::const_val::ConstVal; +use rustc::ty::{AdtDef, Ty}; +use rustc::mir::*; use hair::*; use syntax::ast::{Name, NodeId}; -use syntax::codemap::Span; +use syntax_pos::Span; // helper functions, broken out by category: mod simplify; mod test; mod util; -impl<'a,'tcx> Builder<'a,'tcx> { +impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn match_expr(&mut self, destination: &Lvalue<'tcx>, span: Span, @@ -38,25 +38,18 @@ impl<'a,'tcx> Builder<'a,'tcx> { -> BlockAnd<()> { let discriminant_lvalue = unpack!(block = self.as_lvalue(block, discriminant)); - // Before we do anything, create uninitialized variables with - // suitable extent for all of the bindings in this match. It's - // easiest to do this up front because some of these arms may - // be unreachable or reachable multiple times. - let var_extent = self.extent_of_innermost_scope(); - for arm in &arms { - self.declare_bindings(var_extent, &arm.patterns[0]); - } - let mut arm_blocks = ArmBlocks { blocks: arms.iter() .map(|_| self.cfg.start_new_block()) .collect(), }; - let arm_bodies: Vec> = - arms.iter() - .map(|arm| arm.body.clone()) - .collect(); + // Get the arm bodies and their scopes, while declaring bindings. + let arm_bodies: Vec<_> = arms.iter().map(|arm| { + let body = self.hir.mirror(arm.body.clone()); + let scope = self.declare_bindings(None, body.span, &arm.patterns[0]); + (body, scope.unwrap_or(self.visibility_scope)) + }).collect(); // assemble a list of candidates: there is one candidate per // pattern, which means there may be more than one candidate @@ -72,6 +65,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { }) .map(|(arm_index, pattern, guard)| { Candidate { + span: pattern.span, match_pairs: vec![MatchPair::new(discriminant_lvalue.clone(), pattern)], bindings: vec![], guard: guard, @@ -84,69 +78,70 @@ impl<'a,'tcx> Builder<'a,'tcx> { // branch to the appropriate arm block let otherwise = self.match_candidates(span, &mut arm_blocks, candidates, block); - // because all matches are exhaustive, in principle we expect - // an empty vector to be returned here, but the algorithm is - // not entirely precise if !otherwise.is_empty() { - let join_block = self.join_otherwise_blocks(otherwise); - self.panic(join_block, "something about matches algorithm not being precise", span); + // All matches are exhaustive. However, because some matches + // only have exponentially-large exhaustive decision trees, we + // sometimes generate an inexhaustive decision tree. + // + // In that case, the inexhaustive tips of the decision tree + // can't be reached - terminate them with an `unreachable`. + let source_info = self.source_info(span); + + let mut otherwise = otherwise; + otherwise.sort(); + otherwise.dedup(); // variant switches can introduce duplicate target blocks + for block in otherwise { + self.cfg.terminate(block, source_info, TerminatorKind::Unreachable); + } } // all the arm blocks will rejoin here let end_block = self.cfg.start_new_block(); - for (arm_index, arm_body) in arm_bodies.into_iter().enumerate() { + let outer_source_info = self.source_info(span); + for (arm_index, (body, visibility_scope)) in arm_bodies.into_iter().enumerate() { let mut arm_block = arm_blocks.blocks[arm_index]; - unpack!(arm_block = self.into(destination, arm_block, arm_body)); - self.cfg.terminate(arm_block, Terminator::Goto { target: end_block }); + // Re-enter the visibility scope we created the bindings in. + self.visibility_scope = visibility_scope; + unpack!(arm_block = self.into(destination, arm_block, body)); + self.cfg.terminate(arm_block, outer_source_info, + TerminatorKind::Goto { target: end_block }); } + self.visibility_scope = outer_source_info.scope; end_block.unit() } pub fn expr_into_pattern(&mut self, mut block: BasicBlock, - var_extent: CodeExtent, // lifetime of vars irrefutable_pat: Pattern<'tcx>, initializer: ExprRef<'tcx>) -> BlockAnd<()> { // optimize the case of `let x = ...` match *irrefutable_pat.kind { - PatternKind::Binding { mutability, - name, - mode: BindingMode::ByValue, + PatternKind::Binding { mode: BindingMode::ByValue, var, - ty, - subpattern: None } => { - let index = self.declare_binding(var_extent, - mutability, - name, - var, - ty, - irrefutable_pat.span); - let lvalue = Lvalue::Var(index); + subpattern: None, .. } => { + self.storage_live_for_bindings(block, &irrefutable_pat); + let lvalue = Lvalue::Local(self.var_indices[&var]); return self.into(&lvalue, block, initializer); } _ => {} } let lvalue = unpack!(block = self.as_lvalue(block, initializer)); self.lvalue_into_pattern(block, - var_extent, irrefutable_pat, &lvalue) } pub fn lvalue_into_pattern(&mut self, mut block: BasicBlock, - var_extent: CodeExtent, irrefutable_pat: Pattern<'tcx>, initializer: &Lvalue<'tcx>) -> BlockAnd<()> { - // first, creating the bindings - self.declare_bindings(var_extent, &irrefutable_pat); - // create a dummy candidate let mut candidate = Candidate { + span: irrefutable_pat.span, match_pairs: vec![MatchPair::new(initializer.clone(), &irrefutable_pat)], bindings: vec![], guard: None, @@ -158,10 +153,10 @@ impl<'a,'tcx> Builder<'a,'tcx> { unpack!(block = self.simplify_candidate(block, &mut candidate)); if !candidate.match_pairs.is_empty() { - self.hir.span_bug(candidate.match_pairs[0].pattern.span, - &format!("match pairs {:?} remaining after simplifying \ - irrefutable pattern", - candidate.match_pairs)); + span_bug!(candidate.match_pairs[0].pattern.span, + "match pairs {:?} remaining after simplifying \ + irrefutable pattern", + candidate.match_pairs); } // now apply the bindings, which will also declare the variables @@ -170,29 +165,81 @@ impl<'a,'tcx> Builder<'a,'tcx> { block.unit() } - pub fn declare_bindings(&mut self, var_extent: CodeExtent, pattern: &Pattern<'tcx>) { + /// Declares the bindings of the given pattern and returns the visibility scope + /// for the bindings in this patterns, if such a scope had to be created. + /// NOTE: Declaring the bindings should always be done in their drop scope. + pub fn declare_bindings(&mut self, + mut var_scope: Option, + scope_span: Span, + pattern: &Pattern<'tcx>) + -> Option { match *pattern.kind { PatternKind::Binding { mutability, name, mode: _, var, ty, ref subpattern } => { - self.declare_binding(var_extent, mutability, name, var, ty, pattern.span); + if var_scope.is_none() { + var_scope = Some(self.new_visibility_scope(scope_span)); + } + let source_info = SourceInfo { + span: pattern.span, + scope: var_scope.unwrap() + }; + self.declare_binding(source_info, mutability, name, var, ty); if let Some(subpattern) = subpattern.as_ref() { - self.declare_bindings(var_extent, subpattern); + var_scope = self.declare_bindings(var_scope, scope_span, subpattern); } } PatternKind::Array { ref prefix, ref slice, ref suffix } | PatternKind::Slice { ref prefix, ref slice, ref suffix } => { for subpattern in prefix.iter().chain(slice).chain(suffix) { - self.declare_bindings(var_extent, subpattern); + var_scope = self.declare_bindings(var_scope, scope_span, subpattern); } } PatternKind::Constant { .. } | PatternKind::Range { .. } | PatternKind::Wild => { } PatternKind::Deref { ref subpattern } => { - self.declare_bindings(var_extent, subpattern); + var_scope = self.declare_bindings(var_scope, scope_span, subpattern); } PatternKind::Leaf { ref subpatterns } | PatternKind::Variant { ref subpatterns, .. } => { for subpattern in subpatterns { - self.declare_bindings(var_extent, &subpattern.pattern); + var_scope = self.declare_bindings(var_scope, scope_span, &subpattern.pattern); + } + } + } + var_scope + } + + /// Emit `StorageLive` for every binding in the pattern. + pub fn storage_live_for_bindings(&mut self, + block: BasicBlock, + pattern: &Pattern<'tcx>) { + match *pattern.kind { + PatternKind::Binding { var, ref subpattern, .. } => { + let lvalue = Lvalue::Local(self.var_indices[&var]); + let source_info = self.source_info(pattern.span); + self.cfg.push(block, Statement { + source_info: source_info, + kind: StatementKind::StorageLive(lvalue) + }); + + if let Some(subpattern) = subpattern.as_ref() { + self.storage_live_for_bindings(block, subpattern); + } + } + PatternKind::Array { ref prefix, ref slice, ref suffix } | + PatternKind::Slice { ref prefix, ref slice, ref suffix } => { + for subpattern in prefix.iter().chain(slice).chain(suffix) { + self.storage_live_for_bindings(block, subpattern); + } + } + PatternKind::Constant { .. } | PatternKind::Range { .. } | PatternKind::Wild => { + } + PatternKind::Deref { ref subpattern } => { + self.storage_live_for_bindings(block, subpattern); + } + PatternKind::Leaf { ref subpatterns } | + PatternKind::Variant { ref subpatterns, .. } => { + for subpattern in subpatterns { + self.storage_live_for_bindings(block, &subpattern.pattern); } } } @@ -207,6 +254,9 @@ struct ArmBlocks { #[derive(Clone, Debug)] pub struct Candidate<'pat, 'tcx:'pat> { + // span of the original pattern that gave rise to this candidate + span: Span, + // all of these must be satisfied... match_pairs: Vec>, @@ -228,7 +278,7 @@ struct Binding<'tcx> { var_id: NodeId, var_ty: Ty<'tcx>, mutability: Mutability, - binding_mode: BindingMode, + binding_mode: BindingMode<'tcx>, } #[derive(Clone, Debug)] @@ -238,20 +288,28 @@ pub struct MatchPair<'pat, 'tcx:'pat> { // ... must match this pattern. pattern: &'pat Pattern<'tcx>, + + // HACK(eddyb) This is used to toggle whether a Slice pattern + // has had its length checked. This is only necessary because + // the "rest" part of the pattern right now has type &[T] and + // as such, it requires an Rvalue::Slice to be generated. + // See RFC 495 / issue #23121 for the eventual (proper) solution. + slice_len_checked: bool } #[derive(Clone, Debug, PartialEq)] enum TestKind<'tcx> { // test the branches of enum Switch { - adt_def: AdtDef<'tcx>, + adt_def: &'tcx AdtDef, + variants: BitVector, }, // test the branches of enum SwitchInt { switch_ty: Ty<'tcx>, options: Vec, - indices: FnvHashMap, + indices: FxHashMap, }, // test for equality @@ -269,7 +327,7 @@ enum TestKind<'tcx> { // test length of the slice is equal to len Len { - len: usize, + len: u64, op: BinOp, }, } @@ -283,7 +341,7 @@ pub struct Test<'tcx> { /////////////////////////////////////////////////////////////////////////// // Main matching algorithm -impl<'a,'tcx> Builder<'a,'tcx> { +impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// The main match algorithm. It begins with a set of candidates /// `candidates` and has the job of generating code to determine /// which of these candidates, if any, is the correct one. The @@ -364,20 +422,25 @@ impl<'a,'tcx> Builder<'a,'tcx> { } // Otherwise, let's process those remaining candidates. - let join_block = self.join_otherwise_blocks(otherwise); + let join_block = self.join_otherwise_blocks(span, otherwise); self.match_candidates(span, arm_blocks, untested_candidates, join_block) } fn join_otherwise_blocks(&mut self, - otherwise: Vec) + span: Span, + mut otherwise: Vec) -> BasicBlock { + let source_info = self.source_info(span); + otherwise.sort(); + otherwise.dedup(); // variant switches can introduce duplicate target blocks if otherwise.len() == 1 { otherwise[0] } else { let join_block = self.cfg.start_new_block(); for block in otherwise { - self.cfg.terminate(block, Terminator::Goto { target: join_block }); + self.cfg.terminate(block, source_info, + TerminatorKind::Goto { target: join_block }); } join_block } @@ -413,42 +476,87 @@ impl<'a,'tcx> Builder<'a,'tcx> { /// simpler (and, in fact, irrefutable). /// /// But there may also be candidates that the test just doesn't - /// apply to. For example, consider the case of #29740: + /// apply to. The classical example involves wildcards: + /// + /// ```rust,ignore + /// match (x, y, z) { + /// (true, _, true) => true, // (0) + /// (_, true, _) => true, // (1) + /// (false, false, _) => false, // (2) + /// (true, _, false) => false, // (3) + /// } + /// ``` + /// + /// In that case, after we test on `x`, there are 2 overlapping candidate + /// sets: + /// + /// - If the outcome is that `x` is true, candidates 0, 1, and 3 + /// - If the outcome is that `x` is false, candidates 1 and 2 + /// + /// Here, the traditional "decision tree" method would generate 2 + /// separate code-paths for the 2 separate cases. + /// + /// In some cases, this duplication can create an exponential amount of + /// code. This is most easily seen by noticing that this method terminates + /// with precisely the reachable arms being reachable - but that problem + /// is trivially NP-complete: + /// + /// ```rust + /// match (var0, var1, var2, var3, ..) { + /// (true, _, _, false, true, ...) => false, + /// (_, true, true, false, _, ...) => false, + /// (false, _, false, false, _, ...) => false, + /// ... + /// _ => true + /// } + /// ``` + /// + /// Here the last arm is reachable only if there is an assignment to + /// the variables that does not match any of the literals. Therefore, + /// compilation would take an exponential amount of time in some cases. + /// + /// That kind of exponential worst-case might not occur in practice, but + /// our simplistic treatment of constants and guards would make it occur + /// in very common situations - for example #29740: /// /// ```rust /// match x { - /// "foo" => ..., - /// "bar" => ..., - /// "baz" => ..., - /// _ => ..., + /// "foo" if foo_guard => ..., + /// "bar" if bar_guard => ..., + /// "baz" if baz_guard => ..., + /// ... /// } /// ``` /// - /// Here the match-pair we are testing will be `x @ "foo"`, and we - /// will generate an `Eq` test. Because `"bar"` and `"baz"` are different - /// constants, we will decide that these later candidates are just not - /// informed by the eq test. So we'll wind up with three candidate sets: + /// Here we first test the match-pair `x @ "foo"`, which is an `Eq` test. + /// + /// It might seem that we would end up with 2 disjoint candidate + /// sets, consisting of the first candidate or the other 3, but our + /// algorithm doesn't reason about "foo" being distinct from the other + /// constants; it considers the latter arms to potentially match after + /// both outcomes, which obviously leads to an exponential amount + /// of tests. /// - /// - If outcome is that `x == "foo"` (one candidate, derived from `x @ "foo"`) - /// - If outcome is that `x != "foo"` (empty list of candidates) - /// - Otherwise (three candidates, `x @ "bar"`, `x @ "baz"`, `x @ - /// _`). Here we have the invariant that everything in the - /// otherwise list is of **lower priority** than the stuff in the - /// other lists. + /// To avoid these kinds of problems, our algorithm tries to ensure + /// the amount of generated tests is linear. When we do a k-way test, + /// we return an additional "unmatched" set alongside the obvious `k` + /// sets. When we encounter a candidate that would be present in more + /// than one of the sets, we put it and all candidates below it into the + /// "unmatched" set. This ensures these `k+1` sets are disjoint. /// - /// So we'll compile the test. For each outcome of the test, we - /// recursively call `match_candidates` with the corresponding set - /// of candidates. But note that this set is now inexhaustive: for - /// example, in the case where the test returns false, there are - /// NO candidates, even though there is stll a value to be - /// matched. So we'll collect the return values from - /// `match_candidates`, which are the blocks where control-flow - /// goes if none of the candidates matched. At this point, we can - /// continue with the "otherwise" list. + /// After we perform our test, we branch into the appropriate candidate + /// set and recurse with `match_candidates`. These sub-matches are + /// obviously inexhaustive - as we discarded our otherwise set - so + /// we set their continuation to do `match_candidates` on the + /// "unmatched" set (which is again inexhaustive). /// /// If you apply this to the above test, you basically wind up /// with an if-else-if chain, testing each candidate in turn, /// which is precisely what we want. + /// + /// In addition to avoiding exponential-time blowups, this algorithm + /// also has nice property that each guard and arm is only generated + /// once. fn test_candidates<'pat>(&mut self, span: Span, arm_blocks: &mut ArmBlocks, @@ -476,6 +584,15 @@ impl<'a,'tcx> Builder<'a,'tcx> { } } } + TestKind::Switch { adt_def: _, ref mut variants} => { + for candidate in candidates.iter() { + if !self.add_variants_to_switch(&match_pair.lvalue, + candidate, + variants) { + break; + } + } + } _ => { } } @@ -499,6 +616,8 @@ impl<'a,'tcx> Builder<'a,'tcx> { &mut target_candidates)) .count(); assert!(tested_candidates > 0); // at least the last candidate ought to be tested + debug!("tested_candidates: {}", tested_candidates); + debug!("untested_candidates: {}", candidates.len() - tested_candidates); // For each outcome of test, process the candidates that still // apply. Collect a list of blocks where control flow will @@ -547,13 +666,18 @@ impl<'a,'tcx> Builder<'a,'tcx> { if let Some(guard) = candidate.guard { // the block to branch to if the guard fails; if there is no // guard, this block is simply unreachable + let guard = self.hir.mirror(guard); + let source_info = self.source_info(guard.span); let cond = unpack!(block = self.as_operand(block, guard)); let otherwise = self.cfg.start_new_block(); - self.cfg.terminate(block, Terminator::If { cond: cond, - targets: (arm_block, otherwise)}); + self.cfg.terminate(block, source_info, + TerminatorKind::If { cond: cond, + targets: (arm_block, otherwise)}); Some(otherwise) } else { - self.cfg.terminate(block, Terminator::Goto { target: arm_block }); + let source_info = self.source_info(candidate.span); + self.cfg.terminate(block, source_info, + TerminatorKind::Goto { target: arm_block }); None } } @@ -578,34 +702,39 @@ impl<'a,'tcx> Builder<'a,'tcx> { Rvalue::Ref(region, borrow_kind, binding.source), }; - self.cfg.push_assign(block, binding.span, &Lvalue::Var(var_index), rvalue); + let source_info = self.source_info(binding.span); + self.cfg.push(block, Statement { + source_info: source_info, + kind: StatementKind::StorageLive(Lvalue::Local(var_index)) + }); + self.cfg.push_assign(block, source_info, + &Lvalue::Local(var_index), rvalue); } } fn declare_binding(&mut self, - var_extent: CodeExtent, + source_info: SourceInfo, mutability: Mutability, name: Name, var_id: NodeId, - var_ty: Ty<'tcx>, - span: Span) - -> u32 + var_ty: Ty<'tcx>) + -> Local { - debug!("declare_binding(var_id={:?}, name={:?}, var_ty={:?}, var_extent={:?}, span={:?})", - var_id, name, var_ty, var_extent, span); + debug!("declare_binding(var_id={:?}, name={:?}, var_ty={:?}, source_info={:?})", + var_id, name, var_ty, source_info); - let index = self.var_decls.len(); - self.var_decls.push(VarDecl::<'tcx> { + let var = self.local_decls.push(LocalDecl::<'tcx> { mutability: mutability, - name: name, ty: var_ty.clone(), + name: Some(name), + source_info: Some(source_info), }); - let index = index as u32; - self.schedule_drop(span, var_extent, DropKind::Deep, &Lvalue::Var(index), var_ty); - self.var_indices.insert(var_id, index); + let extent = self.extent_of_innermost_scope(); + self.schedule_drop(source_info.span, extent, &Lvalue::Local(var), var_ty); + self.var_indices.insert(var_id, var); - debug!("declare_binding: index={:?}", index); + debug!("declare_binding: var={:?}", var); - index + var } } diff --git a/src/librustc_mir/build/matches/simplify.rs b/src/librustc_mir/build/matches/simplify.rs index 2c8e1c1ccf673..71282dcf0ba07 100644 --- a/src/librustc_mir/build/matches/simplify.rs +++ b/src/librustc_mir/build/matches/simplify.rs @@ -25,13 +25,13 @@ use build::{BlockAnd, BlockAndExtension, Builder}; use build::matches::{Binding, MatchPair, Candidate}; use hair::*; -use rustc::mir::repr::*; +use rustc::mir::*; use std::mem; -impl<'a,'tcx> Builder<'a,'tcx> { +impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn simplify_candidate<'pat>(&mut self, - mut block: BasicBlock, + block: BasicBlock, candidate: &mut Candidate<'pat, 'tcx>) -> BlockAnd<()> { // repeatedly simplify match pairs until fixed point is reached @@ -39,10 +39,8 @@ impl<'a,'tcx> Builder<'a,'tcx> { let match_pairs = mem::replace(&mut candidate.match_pairs, vec![]); let mut progress = match_pairs.len(); // count how many were simplified for match_pair in match_pairs { - match self.simplify_match_pair(block, match_pair, candidate) { - Ok(b) => { - block = b; - } + match self.simplify_match_pair(match_pair, candidate) { + Ok(()) => {} Err(match_pair) => { candidate.match_pairs.push(match_pair); progress -= 1; // this one was not simplified @@ -61,14 +59,13 @@ impl<'a,'tcx> Builder<'a,'tcx> { /// possible, Err is returned and no changes are made to /// candidate. fn simplify_match_pair<'pat>(&mut self, - mut block: BasicBlock, match_pair: MatchPair<'pat, 'tcx>, candidate: &mut Candidate<'pat, 'tcx>) - -> Result> { + -> Result<(), MatchPair<'pat, 'tcx>> { match *match_pair.pattern.kind { PatternKind::Wild => { // nothing left to do - Ok(block) + Ok(()) } PatternKind::Binding { name, mutability, mode, var, ty, ref subpattern } => { @@ -87,7 +84,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { candidate.match_pairs.push(MatchPair::new(match_pair.lvalue, subpattern)); } - Ok(block) + Ok(()) } PatternKind::Constant { .. } => { @@ -95,34 +92,32 @@ impl<'a,'tcx> Builder<'a,'tcx> { Err(match_pair) } - PatternKind::Array { ref prefix, ref slice, ref suffix } => { - unpack!(block = self.prefix_suffix_slice(&mut candidate.match_pairs, - block, - match_pair.lvalue.clone(), - prefix, - slice.as_ref(), - suffix)); - Ok(block) - } - - PatternKind::Slice { .. } | PatternKind::Range { .. } | - PatternKind::Variant { .. } => { - // cannot simplify, test is required + PatternKind::Variant { .. } | + PatternKind::Slice { .. } => { Err(match_pair) } + PatternKind::Array { ref prefix, ref slice, ref suffix } => { + self.prefix_slice_suffix(&mut candidate.match_pairs, + &match_pair.lvalue, + prefix, + slice.as_ref(), + suffix); + Ok(()) + } + PatternKind::Leaf { ref subpatterns } => { // tuple struct, match subpats (if any) candidate.match_pairs .extend(self.field_match_pairs(match_pair.lvalue, subpatterns)); - Ok(block) + Ok(()) } PatternKind::Deref { ref subpattern } => { let lvalue = match_pair.lvalue.deref(); candidate.match_pairs.push(MatchPair::new(lvalue, subpattern)); - Ok(block) + Ok(()) } } } diff --git a/src/librustc_mir/build/matches/test.rs b/src/librustc_mir/build/matches/test.rs index ec67429379f95..cb449037aeba3 100644 --- a/src/librustc_mir/build/matches/test.rs +++ b/src/librustc_mir/build/matches/test.rs @@ -18,13 +18,15 @@ use build::Builder; use build::matches::{Candidate, MatchPair, Test, TestKind}; use hair::*; -use rustc_data_structures::fnv::FnvHashMap; -use rustc::middle::const_eval::ConstVal; -use rustc::middle::ty::{self, Ty}; -use rustc::mir::repr::*; -use syntax::codemap::Span; - -impl<'a,'tcx> Builder<'a,'tcx> { +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::bitvec::BitVector; +use rustc::middle::const_val::ConstVal; +use rustc::ty::{self, Ty}; +use rustc::mir::*; +use syntax_pos::Span; +use std::cmp::Ordering; + +impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Identifies what test is needed to decide if `match_pair` is applicable. /// /// It is a bug to call this with a simplifyable pattern. @@ -33,7 +35,10 @@ impl<'a,'tcx> Builder<'a,'tcx> { PatternKind::Variant { ref adt_def, variant_index: _, subpatterns: _ } => { Test { span: match_pair.pattern.span, - kind: TestKind::Switch { adt_def: adt_def.clone() }, + kind: TestKind::Switch { + adt_def: adt_def.clone(), + variants: BitVector::new(self.hir.num_variants(adt_def)), + }, } } @@ -49,7 +54,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { // these maps are empty to start; cases are // added below in add_cases_to_switch options: vec![], - indices: FnvHashMap(), + indices: FxHashMap(), } } } @@ -68,14 +73,15 @@ impl<'a,'tcx> Builder<'a,'tcx> { Test { span: match_pair.pattern.span, kind: TestKind::Range { - lo: lo.clone(), - hi: hi.clone(), + lo: Literal::Value { value: lo.clone() }, + hi: Literal::Value { value: hi.clone() }, ty: match_pair.pattern.ty.clone(), }, } } - PatternKind::Slice { ref prefix, ref slice, ref suffix } => { + PatternKind::Slice { ref prefix, ref slice, ref suffix } + if !match_pair.slice_len_checked => { let len = prefix.len() + suffix.len(); let op = if slice.is_some() { BinOp::Ge @@ -84,11 +90,12 @@ impl<'a,'tcx> Builder<'a,'tcx> { }; Test { span: match_pair.pattern.span, - kind: TestKind::Len { len: len, op: op }, + kind: TestKind::Len { len: len as u64, op: op }, } } PatternKind::Array { .. } | + PatternKind::Slice { .. } | PatternKind::Wild | PatternKind::Binding { .. } | PatternKind::Leaf { .. } | @@ -103,7 +110,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { candidate: &Candidate<'pat, 'tcx>, switch_ty: Ty<'tcx>, options: &mut Vec, - indices: &mut FnvHashMap) + indices: &mut FxHashMap) -> bool { let match_pair = match candidate.match_pairs.iter().find(|mp| mp.lvalue == *test_lvalue) { @@ -123,9 +130,10 @@ impl<'a,'tcx> Builder<'a,'tcx> { }); true } - + PatternKind::Variant { .. } => { + panic!("you should have called add_variants_to_switch instead!"); + } PatternKind::Range { .. } | - PatternKind::Variant { .. } | PatternKind::Slice { .. } | PatternKind::Array { .. } | PatternKind::Wild | @@ -138,19 +146,55 @@ impl<'a,'tcx> Builder<'a,'tcx> { } } + pub fn add_variants_to_switch<'pat>(&mut self, + test_lvalue: &Lvalue<'tcx>, + candidate: &Candidate<'pat, 'tcx>, + variants: &mut BitVector) + -> bool + { + let match_pair = match candidate.match_pairs.iter().find(|mp| mp.lvalue == *test_lvalue) { + Some(match_pair) => match_pair, + _ => { return false; } + }; + + match *match_pair.pattern.kind { + PatternKind::Variant { adt_def: _ , variant_index, .. } => { + // We have a pattern testing for variant `variant_index` + // set the corresponding index to true + variants.insert(variant_index); + true + } + _ => { + // don't know how to add these patterns to a switch + false + } + } + } + /// Generates the code to perform a test. pub fn perform_test(&mut self, block: BasicBlock, lvalue: &Lvalue<'tcx>, test: &Test<'tcx>) -> Vec { + let source_info = self.source_info(test.span); match test.kind { - TestKind::Switch { adt_def } => { + TestKind::Switch { adt_def, ref variants } => { let num_enum_variants = self.hir.num_variants(adt_def); - let target_blocks: Vec<_> = - (0..num_enum_variants).map(|_| self.cfg.start_new_block()) - .collect(); - self.cfg.terminate(block, Terminator::Switch { + let mut otherwise_block = None; + let target_blocks: Vec<_> = (0..num_enum_variants).map(|i| { + if variants.contains(i) { + self.cfg.start_new_block() + } else { + if otherwise_block.is_none() { + otherwise_block = Some(self.cfg.start_new_block()); + } + otherwise_block.unwrap() + } + }).collect(); + debug!("num_enum_variants: {}, num tested variants: {}, variants: {:?}", + num_enum_variants, variants.iter().count(), variants); + self.cfg.terminate(block, source_info, TerminatorKind::Switch { discr: lvalue.clone(), adt_def: adt_def, targets: target_blocks.clone() @@ -159,29 +203,125 @@ impl<'a,'tcx> Builder<'a,'tcx> { } TestKind::SwitchInt { switch_ty, ref options, indices: _ } => { - let otherwise = self.cfg.start_new_block(); - let targets: Vec<_> = - options.iter() - .map(|_| self.cfg.start_new_block()) - .chain(Some(otherwise)) - .collect(); - self.cfg.terminate(block, Terminator::SwitchInt { - discr: lvalue.clone(), - switch_ty: switch_ty, - values: options.clone(), - targets: targets.clone(), - }); + let (targets, term) = match switch_ty.sty { + // If we're matching on boolean we can + // use the If TerminatorKind instead + ty::TyBool => { + assert!(options.len() > 0 && options.len() <= 2); + + let (true_bb, else_bb) = + (self.cfg.start_new_block(), + self.cfg.start_new_block()); + + let targets = match &options[0] { + &ConstVal::Bool(true) => vec![true_bb, else_bb], + &ConstVal::Bool(false) => vec![else_bb, true_bb], + v => span_bug!(test.span, "expected boolean value but got {:?}", v) + }; + + (targets, + TerminatorKind::If { + cond: Operand::Consume(lvalue.clone()), + targets: (true_bb, else_bb) + }) + + } + _ => { + // The switch may be inexhaustive so we + // add a catch all block + let otherwise = self.cfg.start_new_block(); + let targets: Vec<_> = + options.iter() + .map(|_| self.cfg.start_new_block()) + .chain(Some(otherwise)) + .collect(); + + (targets.clone(), + TerminatorKind::SwitchInt { + discr: lvalue.clone(), + switch_ty: switch_ty, + values: options.clone(), + targets: targets + }) + } + }; + + self.cfg.terminate(block, source_info, term); targets } - TestKind::Eq { ref value, ty } => { - let expect = self.literal_operand(test.span, ty.clone(), Literal::Value { - value: value.clone() - }); - let val = Operand::Consume(lvalue.clone()); + TestKind::Eq { ref value, mut ty } => { + let mut val = Operand::Consume(lvalue.clone()); + + // If we're using b"..." as a pattern, we need to insert an + // unsizing coercion, as the byte string has the type &[u8; N]. + let expect = if let ConstVal::ByteStr(ref bytes) = *value { + let tcx = self.hir.tcx(); + + // Unsize the lvalue to &[u8], too, if necessary. + if let ty::TyRef(region, mt) = ty.sty { + if let ty::TyArray(_, _) = mt.ty.sty { + ty = tcx.mk_imm_ref(region, tcx.mk_slice(tcx.types.u8)); + let val_slice = self.temp(ty); + self.cfg.push_assign(block, source_info, &val_slice, + Rvalue::Cast(CastKind::Unsize, val, ty)); + val = Operand::Consume(val_slice); + } + } + + assert!(ty.is_slice()); + + let array_ty = tcx.mk_array(tcx.types.u8, bytes.len()); + let array_ref = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), array_ty); + let array = self.literal_operand(test.span, array_ref, Literal::Value { + value: value.clone() + }); + + let slice = self.temp(ty); + self.cfg.push_assign(block, source_info, &slice, + Rvalue::Cast(CastKind::Unsize, array, ty)); + Operand::Consume(slice) + } else { + self.literal_operand(test.span, ty, Literal::Value { + value: value.clone() + }) + }; + + // Use PartialEq::eq for &str and &[u8] slices, instead of BinOp::Eq. let fail = self.cfg.start_new_block(); - let block = self.compare(block, fail, test.span, BinOp::Eq, expect, val.clone()); - vec![block, fail] + if let ty::TyRef(_, mt) = ty.sty { + assert!(ty.is_slice()); + let eq_def_id = self.hir.tcx().lang_items.eq_trait().unwrap(); + let ty = mt.ty; + let (mty, method) = self.hir.trait_method(eq_def_id, "eq", ty, &[ty]); + + let bool_ty = self.hir.bool_ty(); + let eq_result = self.temp(bool_ty); + let eq_block = self.cfg.start_new_block(); + let cleanup = self.diverge_cleanup(); + self.cfg.terminate(block, source_info, TerminatorKind::Call { + func: Operand::Constant(Constant { + span: test.span, + ty: mty, + literal: method + }), + args: vec![val, expect], + destination: Some((eq_result.clone(), eq_block)), + cleanup: cleanup, + }); + + // check the result + let block = self.cfg.start_new_block(); + self.cfg.terminate(eq_block, source_info, TerminatorKind::If { + cond: Operand::Consume(eq_result), + targets: (block, fail), + }); + + vec![block, fail] + } else { + let block = self.compare(block, fail, test.span, BinOp::Eq, expect, val); + vec![block, fail] + } } TestKind::Range { ref lo, ref hi, ty } => { @@ -202,15 +342,14 @@ impl<'a,'tcx> Builder<'a,'tcx> { let (actual, result) = (self.temp(usize_ty), self.temp(bool_ty)); // actual = len(lvalue) - self.cfg.push_assign(block, test.span, &actual, Rvalue::Len(lvalue.clone())); + self.cfg.push_assign(block, source_info, + &actual, Rvalue::Len(lvalue.clone())); // expected = - let expected = self.push_usize(block, test.span, len); + let expected = self.push_usize(block, source_info, len); // result = actual == expected OR result = actual < expected - self.cfg.push_assign(block, - test.span, - &result, + self.cfg.push_assign(block, source_info, &result, Rvalue::BinaryOp(op, Operand::Consume(actual), Operand::Consume(expected))); @@ -218,7 +357,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { // branch based on result let target_blocks: Vec<_> = vec![self.cfg.start_new_block(), self.cfg.start_new_block()]; - self.cfg.terminate(block, Terminator::If { + self.cfg.terminate(block, source_info, TerminatorKind::If { cond: Operand::Consume(result), targets: (target_blocks[0], target_blocks[1]) }); @@ -239,11 +378,13 @@ impl<'a,'tcx> Builder<'a,'tcx> { let result = self.temp(bool_ty); // result = op(left, right) - self.cfg.push_assign(block, span, &result, Rvalue::BinaryOp(op, left, right)); + let source_info = self.source_info(span); + self.cfg.push_assign(block, source_info, &result, + Rvalue::BinaryOp(op, left, right)); // branch based on result let target_block = self.cfg.start_new_block(); - self.cfg.terminate(block, Terminator::If { + self.cfg.terminate(block, source_info, TerminatorKind::If { cond: Operand::Consume(result), targets: (target_block, fail_block) }); @@ -306,52 +447,118 @@ impl<'a,'tcx> Builder<'a,'tcx> { } }; - match test.kind { + match (&test.kind, &*match_pair.pattern.kind) { // If we are performing a variant switch, then this // informs variant patterns, but nothing else. - TestKind::Switch { adt_def: tested_adt_def } => { - match *match_pair.pattern.kind { - PatternKind::Variant { adt_def, variant_index, ref subpatterns } => { - assert_eq!(adt_def, tested_adt_def); - let new_candidate = - self.candidate_after_variant_switch(match_pair_index, - adt_def, - variant_index, - subpatterns, - candidate); - resulting_candidates[variant_index].push(new_candidate); + (&TestKind::Switch { adt_def: tested_adt_def, .. }, + &PatternKind::Variant { adt_def, variant_index, ref subpatterns }) => { + assert_eq!(adt_def, tested_adt_def); + let new_candidate = + self.candidate_after_variant_switch(match_pair_index, + adt_def, + variant_index, + subpatterns, + candidate); + resulting_candidates[variant_index].push(new_candidate); + true + } + (&TestKind::Switch { .. }, _) => false, + + // If we are performing a switch over integers, then this informs integer + // equality, but nothing else. + // + // FIXME(#29623) we could use PatternKind::Range to rule + // things out here, in some cases. + (&TestKind::SwitchInt { switch_ty: _, options: _, ref indices }, + &PatternKind::Constant { ref value }) + if is_switch_ty(match_pair.pattern.ty) => { + let index = indices[value]; + let new_candidate = self.candidate_without_match_pair(match_pair_index, + candidate); + resulting_candidates[index].push(new_candidate); + true + } + (&TestKind::SwitchInt { .. }, _) => false, + + + (&TestKind::Len { len: test_len, op: BinOp::Eq }, + &PatternKind::Slice { ref prefix, ref slice, ref suffix }) => { + let pat_len = (prefix.len() + suffix.len()) as u64; + match (test_len.cmp(&pat_len), slice) { + (Ordering::Equal, &None) => { + // on true, min_len = len = $actual_length, + // on false, len != $actual_length + resulting_candidates[0].push( + self.candidate_after_slice_test(match_pair_index, + candidate, + prefix, + slice.as_ref(), + suffix) + ); true } - _ => { + (Ordering::Less, _) => { + // test_len < pat_len. If $actual_len = test_len, + // then $actual_len < pat_len and we don't have + // enough elements. + resulting_candidates[1].push(candidate.clone()); + true + } + (Ordering::Equal, &Some(_)) | (Ordering::Greater, &Some(_)) => { + // This can match both if $actual_len = test_len >= pat_len, + // and if $actual_len > test_len. We can't advance. false } + (Ordering::Greater, &None) => { + // test_len != pat_len, so if $actual_len = test_len, then + // $actual_len != pat_len. + resulting_candidates[1].push(candidate.clone()); + true + } } } - // If we are performing a switch over integers, then this informs integer - // equality, but nothing else. - // - // FIXME(#29623) we could use TestKind::Range to rule - // things out here, in some cases. - TestKind::SwitchInt { switch_ty: _, options: _, ref indices } => { - match *match_pair.pattern.kind { - PatternKind::Constant { ref value } - if is_switch_ty(match_pair.pattern.ty) => { - let index = indices[value]; - let new_candidate = self.candidate_without_match_pair(match_pair_index, - candidate); - resulting_candidates[index].push(new_candidate); + (&TestKind::Len { len: test_len, op: BinOp::Ge }, + &PatternKind::Slice { ref prefix, ref slice, ref suffix }) => { + // the test is `$actual_len >= test_len` + let pat_len = (prefix.len() + suffix.len()) as u64; + match (test_len.cmp(&pat_len), slice) { + (Ordering::Equal, &Some(_)) => { + // $actual_len >= test_len = pat_len, + // so we can match. + resulting_candidates[0].push( + self.candidate_after_slice_test(match_pair_index, + candidate, + prefix, + slice.as_ref(), + suffix) + ); true } - _ => { + (Ordering::Less, _) | (Ordering::Equal, &None) => { + // test_len <= pat_len. If $actual_len < test_len, + // then it is also < pat_len, so the test passing is + // necessary (but insufficient). + resulting_candidates[0].push(candidate.clone()); + true + } + (Ordering::Greater, &None) => { + // test_len > pat_len. If $actual_len >= test_len > pat_len, + // then we know we won't have a match. + resulting_candidates[1].push(candidate.clone()); + true + } + (Ordering::Greater, &Some(_)) => { + // test_len < pat_len, and is therefore less + // strict. This can still go both ways. false } } } - TestKind::Eq { .. } | - TestKind::Range { .. } | - TestKind::Len { .. } => { + (&TestKind::Eq { .. }, _) | + (&TestKind::Range { .. }, _) | + (&TestKind::Len { .. }, _) => { // These are all binary tests. // // FIXME(#29623) we can be more clever here @@ -379,6 +586,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { .map(|(_, mp)| mp.clone()) .collect(); Candidate { + span: candidate.span, match_pairs: other_match_pairs, bindings: candidate.bindings.clone(), guard: candidate.guard.clone(), @@ -386,9 +594,28 @@ impl<'a,'tcx> Builder<'a,'tcx> { } } + fn candidate_after_slice_test<'pat>(&mut self, + match_pair_index: usize, + candidate: &Candidate<'pat, 'tcx>, + prefix: &'pat [Pattern<'tcx>], + opt_slice: Option<&'pat Pattern<'tcx>>, + suffix: &'pat [Pattern<'tcx>]) + -> Candidate<'pat, 'tcx> { + let mut new_candidate = + self.candidate_without_match_pair(match_pair_index, candidate); + self.prefix_slice_suffix( + &mut new_candidate.match_pairs, + &candidate.match_pairs[match_pair_index].lvalue, + prefix, + opt_slice, + suffix); + + new_candidate + } + fn candidate_after_variant_switch<'pat>(&mut self, match_pair_index: usize, - adt_def: ty::AdtDef<'tcx>, + adt_def: &'tcx ty::AdtDef, variant_index: usize, subpatterns: &'pat [FieldPattern<'tcx>], candidate: &Candidate<'pat, 'tcx>) @@ -404,7 +631,8 @@ impl<'a,'tcx> Builder<'a,'tcx> { subpatterns.iter() .map(|subpattern| { // e.g., `(x as Variant).0` - let lvalue = downcast_lvalue.clone().field(subpattern.field); + let lvalue = downcast_lvalue.clone().field(subpattern.field, + subpattern.pattern.ty); // e.g., `(x as Variant).0 @ P1` MatchPair::new(lvalue, &subpattern.pattern) }); @@ -419,6 +647,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { let all_match_pairs = consequent_match_pairs.chain(other_match_pairs).collect(); Candidate { + span: candidate.span, match_pairs: all_match_pairs, bindings: candidate.bindings.clone(), guard: candidate.guard.clone(), @@ -427,8 +656,9 @@ impl<'a,'tcx> Builder<'a,'tcx> { } fn error_simplifyable<'pat>(&mut self, match_pair: &MatchPair<'pat, 'tcx>) -> ! { - self.hir.span_bug(match_pair.pattern.span, - &format!("simplifyable pattern found: {:?}", match_pair.pattern)) + span_bug!(match_pair.pattern.span, + "simplifyable pattern found: {:?}", + match_pair.pattern) } } diff --git a/src/librustc_mir/build/matches/util.rs b/src/librustc_mir/build/matches/util.rs index dbb00a13cd3b4..a013875b3110b 100644 --- a/src/librustc_mir/build/matches/util.rs +++ b/src/librustc_mir/build/matches/util.rs @@ -8,78 +8,37 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use build::{BlockAnd, BlockAndExtension, Builder}; +use build::Builder; use build::matches::MatchPair; use hair::*; -use rustc::mir::repr::*; +use rustc::mir::*; use std::u32; -impl<'a,'tcx> Builder<'a,'tcx> { +impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn field_match_pairs<'pat>(&mut self, lvalue: Lvalue<'tcx>, subpatterns: &'pat [FieldPattern<'tcx>]) -> Vec> { subpatterns.iter() .map(|fieldpat| { - let lvalue = lvalue.clone().field(fieldpat.field); + let lvalue = lvalue.clone().field(fieldpat.field, + fieldpat.pattern.ty); MatchPair::new(lvalue, &fieldpat.pattern) }) .collect() } - /// When processing an array/slice pattern like `lv @ [x, y, ..s, z]`, - /// this function converts the prefix (`x`, `y`) and suffix (`z`) into - /// distinct match pairs: - /// - /// lv[0 of 3] @ x // see ProjectionElem::ConstantIndex (and its Debug impl) - /// lv[1 of 3] @ y // to explain the `[x of y]` notation - /// lv[-1 of 3] @ z - /// - /// If a slice like `s` is present, then the function also creates - /// a temporary like: - /// - /// tmp0 = lv[2..-1] // using the special Rvalue::Slice - /// - /// and creates a match pair `tmp0 @ s` - pub fn prefix_suffix_slice<'pat>(&mut self, + pub fn prefix_slice_suffix<'pat>(&mut self, match_pairs: &mut Vec>, - block: BasicBlock, - lvalue: Lvalue<'tcx>, + lvalue: &Lvalue<'tcx>, prefix: &'pat [Pattern<'tcx>], opt_slice: Option<&'pat Pattern<'tcx>>, - suffix: &'pat [Pattern<'tcx>]) - -> BlockAnd<()> { - // If there is a `..P` pattern, create a temporary `t0` for - // the slice and then a match pair `t0 @ P`: - if let Some(slice) = opt_slice { - let prefix_len = prefix.len(); - let suffix_len = suffix.len(); - let rvalue = Rvalue::Slice { - input: lvalue.clone(), - from_start: prefix_len, - from_end: suffix_len, - }; - let temp = self.temp(slice.ty.clone()); // no need to schedule drop, temp is always copy - self.cfg.push_assign(block, slice.span, &temp, rvalue); - match_pairs.push(MatchPair::new(temp, slice)); - } - - self.prefix_suffix(match_pairs, lvalue, prefix, suffix); - - block.unit() - } - - /// Helper for `prefix_suffix_slice` which just processes the prefix and suffix. - fn prefix_suffix<'pat>(&mut self, - match_pairs: &mut Vec>, - lvalue: Lvalue<'tcx>, - prefix: &'pat [Pattern<'tcx>], - suffix: &'pat [Pattern<'tcx>]) { + suffix: &'pat [Pattern<'tcx>]) { let min_length = prefix.len() + suffix.len(); assert!(min_length < u32::MAX as usize); let min_length = min_length as u32; - let prefix_pairs: Vec<_> = + match_pairs.extend( prefix.iter() .enumerate() .map(|(idx, subpattern)| { @@ -91,9 +50,17 @@ impl<'a,'tcx> Builder<'a,'tcx> { let lvalue = lvalue.clone().elem(elem); MatchPair::new(lvalue, subpattern) }) - .collect(); + ); + + if let Some(subslice_pat) = opt_slice { + let subslice = lvalue.clone().elem(ProjectionElem::Subslice { + from: prefix.len() as u32, + to: suffix.len() as u32 + }); + match_pairs.push(MatchPair::new(subslice, subslice_pat)); + } - let suffix_pairs: Vec<_> = + match_pairs.extend( suffix.iter() .rev() .enumerate() @@ -106,9 +73,7 @@ impl<'a,'tcx> Builder<'a,'tcx> { let lvalue = lvalue.clone().elem(elem); MatchPair::new(lvalue, subpattern) }) - .collect(); - - match_pairs.extend(prefix_pairs.into_iter().chain(suffix_pairs)); + ); } } @@ -117,6 +82,7 @@ impl<'pat, 'tcx> MatchPair<'pat, 'tcx> { MatchPair { lvalue: lvalue, pattern: pattern, + slice_len_checked: false, } } } diff --git a/src/librustc_mir/build/misc.rs b/src/librustc_mir/build/misc.rs index 5d040bcb40ad8..a5f51ef35b741 100644 --- a/src/librustc_mir/build/misc.rs +++ b/src/librustc_mir/build/misc.rs @@ -12,25 +12,26 @@ //! kind of thing. use build::Builder; -use hair::*; -use rustc::middle::ty::Ty; -use rustc::mir::repr::*; -use std::u32; -use syntax::codemap::Span; -impl<'a,'tcx> Builder<'a,'tcx> { +use rustc_const_math::{ConstInt, ConstUsize, ConstIsize}; +use rustc::middle::const_val::ConstVal; +use rustc::ty::{self, Ty}; + +use rustc::mir::*; +use syntax::ast; +use syntax_pos::Span; + +impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Add a new temporary value of type `ty` storing the result of /// evaluating `expr`. /// /// NB: **No cleanup is scheduled for this temporary.** You should /// call `schedule_drop` once the temporary is initialized. pub fn temp(&mut self, ty: Ty<'tcx>) -> Lvalue<'tcx> { - let index = self.temp_decls.len(); - self.temp_decls.push(TempDecl { ty: ty }); - assert!(index < (u32::MAX) as usize); - let lvalue = Lvalue::Temp(index as u32); + let temp = self.local_decls.push(LocalDecl::new_temp(ty)); + let lvalue = Lvalue::Local(temp); debug!("temp: created temp {:?} with type {:?}", - lvalue, self.temp_decls.last().unwrap().ty); + lvalue, self.local_decls[temp].ty); lvalue } @@ -47,28 +48,70 @@ impl<'a,'tcx> Builder<'a,'tcx> { Operand::Constant(constant) } - pub fn push_usize(&mut self, block: BasicBlock, span: Span, value: usize) -> Lvalue<'tcx> { + pub fn unit_rvalue(&mut self) -> Rvalue<'tcx> { + Rvalue::Aggregate(AggregateKind::Tuple, vec![]) + } + + // Returns a zero literal operand for the appropriate type, works for + // bool, char and integers. + pub fn zero_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> { + let literal = match ty.sty { + ty::TyBool => { + self.hir.false_literal() + } + ty::TyChar => Literal::Value { value: ConstVal::Char('\0') }, + ty::TyUint(ity) => { + let val = match ity { + ast::UintTy::U8 => ConstInt::U8(0), + ast::UintTy::U16 => ConstInt::U16(0), + ast::UintTy::U32 => ConstInt::U32(0), + ast::UintTy::U64 => ConstInt::U64(0), + ast::UintTy::Us => { + let uint_ty = self.hir.tcx().sess.target.uint_type; + let val = ConstUsize::new(0, uint_ty).unwrap(); + ConstInt::Usize(val) + } + }; + + Literal::Value { value: ConstVal::Integral(val) } + } + ty::TyInt(ity) => { + let val = match ity { + ast::IntTy::I8 => ConstInt::I8(0), + ast::IntTy::I16 => ConstInt::I16(0), + ast::IntTy::I32 => ConstInt::I32(0), + ast::IntTy::I64 => ConstInt::I64(0), + ast::IntTy::Is => { + let int_ty = self.hir.tcx().sess.target.int_type; + let val = ConstIsize::new(0, int_ty).unwrap(); + ConstInt::Isize(val) + } + }; + + Literal::Value { value: ConstVal::Integral(val) } + } + _ => { + span_bug!(span, "Invalid type for zero_literal: `{:?}`", ty) + } + }; + + self.literal_operand(span, ty, literal) + } + + pub fn push_usize(&mut self, + block: BasicBlock, + source_info: SourceInfo, + value: u64) + -> Lvalue<'tcx> { let usize_ty = self.hir.usize_ty(); let temp = self.temp(usize_ty); self.cfg.push_assign_constant( - block, span, &temp, + block, source_info, &temp, Constant { - span: span, + span: source_info.span, ty: self.hir.usize_ty(), literal: self.hir.usize_literal(value), }); temp } - - pub fn item_ref_operand(&mut self, - span: Span, - item_ref: ItemRef<'tcx>) - -> Operand<'tcx> { - let literal = Literal::Item { - def_id: item_ref.def_id, - kind: item_ref.kind, - substs: item_ref.substs, - }; - self.literal_operand(span, item_ref.ty, literal) - } } diff --git a/src/librustc_mir/build/mod.rs b/src/librustc_mir/build/mod.rs index d217eb0664793..0e4dbb0477716 100644 --- a/src/librustc_mir/build/mod.rs +++ b/src/librustc_mir/build/mod.rs @@ -9,34 +9,77 @@ // except according to those terms. use hair::cx::Cx; -use rustc::middle::region::CodeExtent; -use rustc::middle::ty::{FnOutput, Ty}; -use rustc::mir::repr::*; -use rustc_data_structures::fnv::FnvHashMap; -use rustc_front::hir; +use hair::Pattern; +use rustc::middle::region::{CodeExtent, CodeExtentData, ROOT_CODE_EXTENT}; +use rustc::ty::{self, Ty}; +use rustc::mir::*; +use rustc::util::nodemap::NodeMap; +use rustc::hir; +use syntax::abi::Abi; use syntax::ast; -use syntax::codemap::Span; +use syntax::symbol::keywords; +use syntax_pos::Span; -pub struct Builder<'a, 'tcx: 'a> { - hir: Cx<'a, 'tcx>, +use rustc_data_structures::indexed_vec::{IndexVec, Idx}; + +use std::u32; + +pub struct Builder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + hir: Cx<'a, 'gcx, 'tcx>, cfg: CFG<'tcx>, + + fn_span: Span, + arg_count: usize, + + /// the current set of scopes, updated as we traverse; + /// see the `scope` module for more details scopes: Vec>, - loop_scopes: Vec, - var_decls: Vec>, - var_indices: FnvHashMap, - temp_decls: Vec>, + + /// the current set of loops; see the `scope` module for more + /// details + loop_scopes: Vec>, + + /// the vector of all scopes that we have created thus far; + /// we track this for debuginfo later + visibility_scopes: IndexVec, + visibility_scope: VisibilityScope, + + /// Maps node ids of variable bindings to the `Local`s created for them. + var_indices: NodeMap, + local_decls: IndexVec>, + unit_temp: Option>, + + /// cached block with the RESUME terminator; this is created + /// when first set of cleanups are built. + cached_resume_block: Option, + /// cached block with the RETURN terminator + cached_return_block: Option, } struct CFG<'tcx> { - basic_blocks: Vec>, + basic_blocks: IndexVec>, +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct ScopeId(u32); + +impl Idx for ScopeId { + fn new(index: usize) -> ScopeId { + assert!(index < (u32::MAX as usize)); + ScopeId(index as u32) + } + + fn index(self) -> usize { + self.0 as usize + } } /////////////////////////////////////////////////////////////////////////// -// The `BlockAnd` "monad" packages up the new basic block along with a -// produced value (sometimes just unit, of course). The `unpack!` -// macro (and methods below) makes working with `BlockAnd` much more -// convenient. +/// The `BlockAnd` "monad" packages up the new basic block along with a +/// produced value (sometimes just unit, of course). The `unpack!` +/// macro (and methods below) makes working with `BlockAnd` much more +/// convenient. #[must_use] // if you don't use one of these results, you're leaving a dangling edge pub struct BlockAnd(BasicBlock, T); @@ -76,85 +119,234 @@ macro_rules! unpack { } /////////////////////////////////////////////////////////////////////////// -// construct() -- the main entry point for building MIR for a function - -pub fn construct<'a,'tcx>(hir: Cx<'a,'tcx>, - _span: Span, - implicit_arguments: Vec>, - explicit_arguments: Vec<(Ty<'tcx>, &'tcx hir::Pat)>, - argument_extent: CodeExtent, - return_ty: FnOutput<'tcx>, - ast_block: &'tcx hir::Block) - -> Mir<'tcx> { - let cfg = CFG { basic_blocks: vec![] }; - - let mut builder = Builder { - hir: hir, - cfg: cfg, - scopes: vec![], - loop_scopes: vec![], - temp_decls: vec![], - var_decls: vec![], - var_indices: FnvHashMap(), - }; +/// the main entry point for building MIR for a function + +pub fn construct_fn<'a, 'gcx, 'tcx, A>(hir: Cx<'a, 'gcx, 'tcx>, + fn_id: ast::NodeId, + arguments: A, + abi: Abi, + return_ty: Ty<'gcx>, + ast_body: &'gcx hir::Expr) + -> Mir<'tcx> + where A: Iterator, Option<&'gcx hir::Pat>)> +{ + let arguments: Vec<_> = arguments.collect(); - assert_eq!(builder.cfg.start_new_block(), START_BLOCK); - assert_eq!(builder.cfg.start_new_block(), END_BLOCK); + let tcx = hir.tcx(); + let span = tcx.map.span(fn_id); + let mut builder = Builder::new(hir, span, arguments.len(), return_ty); + let body_id = ast_body.id; + let call_site_extent = + tcx.region_maps.lookup_code_extent( + CodeExtentData::CallSiteScope { fn_id: fn_id, body_id: body_id }); + let arg_extent = + tcx.region_maps.lookup_code_extent( + CodeExtentData::ParameterScope { fn_id: fn_id, body_id: body_id }); let mut block = START_BLOCK; - let arg_decls = unpack!(block = builder.args_and_body(block, - implicit_arguments, - explicit_arguments, - argument_extent, - ast_block)); - - builder.cfg.terminate(block, Terminator::Goto { target: END_BLOCK }); - builder.cfg.terminate(END_BLOCK, Terminator::Return); - - Mir { - basic_blocks: builder.cfg.basic_blocks, - var_decls: builder.var_decls, - arg_decls: arg_decls, - temp_decls: builder.temp_decls, - return_ty: return_ty, + unpack!(block = builder.in_scope(call_site_extent, block, |builder| { + unpack!(block = builder.in_scope(arg_extent, block, |builder| { + builder.args_and_body(block, &arguments, arg_extent, ast_body) + })); + // Attribute epilogue to function's closing brace + let fn_end = Span { lo: span.hi, ..span }; + let source_info = builder.source_info(fn_end); + let return_block = builder.return_block(); + builder.cfg.terminate(block, source_info, + TerminatorKind::Goto { target: return_block }); + builder.cfg.terminate(return_block, source_info, + TerminatorKind::Return); + return_block.unit() + })); + assert_eq!(block, builder.return_block()); + + let mut spread_arg = None; + if abi == Abi::RustCall { + // RustCall pseudo-ABI untuples the last argument. + spread_arg = Some(Local::new(arguments.len())); } + + // Gather the upvars of a closure, if any. + let upvar_decls: Vec<_> = tcx.with_freevars(fn_id, |freevars| { + freevars.iter().map(|fv| { + let var_id = tcx.map.as_local_node_id(fv.def.def_id()).unwrap(); + let by_ref = tcx.tables().upvar_capture(ty::UpvarId { + var_id: var_id, + closure_expr_id: fn_id + }).map_or(false, |capture| match capture { + ty::UpvarCapture::ByValue => false, + ty::UpvarCapture::ByRef(..) => true + }); + let mut decl = UpvarDecl { + debug_name: keywords::Invalid.name(), + by_ref: by_ref + }; + if let Some(hir::map::NodeLocal(pat)) = tcx.map.find(var_id) { + if let hir::PatKind::Binding(_, _, ref ident, _) = pat.node { + decl.debug_name = ident.node; + } + } + decl + }).collect() + }); + + let mut mir = builder.finish(upvar_decls, return_ty); + mir.spread_arg = spread_arg; + mir +} + +pub fn construct_const<'a, 'gcx, 'tcx>(hir: Cx<'a, 'gcx, 'tcx>, + item_id: ast::NodeId, + ast_expr: &'tcx hir::Expr) + -> Mir<'tcx> { + let tcx = hir.tcx(); + let ty = tcx.tables().expr_ty_adjusted(ast_expr); + let span = tcx.map.span(item_id); + let mut builder = Builder::new(hir, span, 0, ty); + + let extent = tcx.region_maps.temporary_scope(ast_expr.id) + .unwrap_or(ROOT_CODE_EXTENT); + let mut block = START_BLOCK; + let _ = builder.in_scope(extent, block, |builder| { + let expr = builder.hir.mirror(ast_expr); + unpack!(block = builder.into(&Lvalue::Local(RETURN_POINTER), block, expr)); + + let source_info = builder.source_info(span); + let return_block = builder.return_block(); + builder.cfg.terminate(block, source_info, + TerminatorKind::Goto { target: return_block }); + builder.cfg.terminate(return_block, source_info, + TerminatorKind::Return); + + return_block.unit() + }); + + builder.finish(vec![], ty) } -impl<'a,'tcx> Builder<'a,'tcx> { +impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { + fn new(hir: Cx<'a, 'gcx, 'tcx>, + span: Span, + arg_count: usize, + return_ty: Ty<'tcx>) + -> Builder<'a, 'gcx, 'tcx> { + let mut builder = Builder { + hir: hir, + cfg: CFG { basic_blocks: IndexVec::new() }, + fn_span: span, + arg_count: arg_count, + scopes: vec![], + visibility_scopes: IndexVec::new(), + visibility_scope: ARGUMENT_VISIBILITY_SCOPE, + loop_scopes: vec![], + local_decls: IndexVec::from_elem_n(LocalDecl::new_return_pointer(return_ty), 1), + var_indices: NodeMap(), + unit_temp: None, + cached_resume_block: None, + cached_return_block: None + }; + + assert_eq!(builder.cfg.start_new_block(), START_BLOCK); + assert_eq!(builder.new_visibility_scope(span), ARGUMENT_VISIBILITY_SCOPE); + builder.visibility_scopes[ARGUMENT_VISIBILITY_SCOPE].parent_scope = None; + + builder + } + + fn finish(self, + upvar_decls: Vec, + return_ty: Ty<'tcx>) + -> Mir<'tcx> { + for (index, block) in self.cfg.basic_blocks.iter().enumerate() { + if block.terminator.is_none() { + span_bug!(self.fn_span, "no terminator on block {:?}", index); + } + } + + Mir::new(self.cfg.basic_blocks, + self.visibility_scopes, + IndexVec::new(), + return_ty, + self.local_decls, + self.arg_count, + upvar_decls, + self.fn_span + ) + } + fn args_and_body(&mut self, mut block: BasicBlock, - implicit_arguments: Vec>, - explicit_arguments: Vec<(Ty<'tcx>, &'tcx hir::Pat)>, + arguments: &[(Ty<'gcx>, Option<&'gcx hir::Pat>)], argument_extent: CodeExtent, - ast_block: &'tcx hir::Block) - -> BlockAnd>> + ast_body: &'gcx hir::Expr) + -> BlockAnd<()> { - self.in_scope(argument_extent, block, |this| { - // to start, translate the argument patterns and collect the argument types. - let implicits = implicit_arguments.into_iter().map(|ty| (ty, None)); - let explicits = explicit_arguments.into_iter().map(|(ty, pat)| (ty, Some(pat))); - let arg_decls = - implicits - .chain(explicits) - .enumerate() - .map(|(index, (ty, pattern))| { - if let Some(pattern) = pattern { - let lvalue = Lvalue::Arg(index as u32); - let pattern = this.hir.irrefutable_pat(pattern); - unpack!(block = this.lvalue_into_pattern(block, - argument_extent, - pattern, - &lvalue)); - } - ArgDecl { ty: ty } - }) - .collect(); - - // start the first basic block and translate the body - unpack!(block = this.ast_block(&Lvalue::ReturnPointer, block, ast_block)); - - block.and(arg_decls) - }) + // Allocate locals for the function arguments + for &(ty, pattern) in arguments.iter() { + // If this is a simple binding pattern, give the local a nice name for debuginfo. + let mut name = None; + if let Some(pat) = pattern { + if let hir::PatKind::Binding(_, _, ref ident, _) = pat.node { + name = Some(ident.node); + } + } + + self.local_decls.push(LocalDecl { + mutability: Mutability::Not, + ty: ty, + source_info: None, + name: name, + }); + } + + let mut scope = None; + // Bind the argument patterns + for (index, &(ty, pattern)) in arguments.iter().enumerate() { + // Function arguments always get the first Local indices after the return pointer + let lvalue = Lvalue::Local(Local::new(index + 1)); + + if let Some(pattern) = pattern { + let pattern = Pattern::from_hir(self.hir.tcx(), pattern); + scope = self.declare_bindings(scope, ast_body.span, &pattern); + unpack!(block = self.lvalue_into_pattern(block, pattern, &lvalue)); + } + + // Make sure we drop (parts of) the argument even when not matched on. + self.schedule_drop(pattern.as_ref().map_or(ast_body.span, |pat| pat.span), + argument_extent, &lvalue, ty); + + } + + // Enter the argument pattern bindings visibility scope, if it exists. + if let Some(visibility_scope) = scope { + self.visibility_scope = visibility_scope; + } + + let body = self.hir.mirror(ast_body); + self.into(&Lvalue::Local(RETURN_POINTER), block, body) + } + + fn get_unit_temp(&mut self) -> Lvalue<'tcx> { + match self.unit_temp { + Some(ref tmp) => tmp.clone(), + None => { + let ty = self.hir.unit_ty(); + let tmp = self.temp(ty); + self.unit_temp = Some(tmp.clone()); + tmp + } + } + } + + fn return_block(&mut self) -> BasicBlock { + match self.cached_return_block { + Some(rb) => rb, + None => { + let rb = self.cfg.start_new_block(); + self.cached_return_block = Some(rb); + rb + } + } } } @@ -170,4 +362,3 @@ mod into; mod matches; mod misc; mod scope; -mod stmt; diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index 90d6a90682f6e..c02a1822d7369 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -26,7 +26,7 @@ multiple-exit (SEME) region in the control-flow graph. For now, we keep a mapping from each `CodeExtent` to its corresponding SEME region for later reference (see caveat in next paragraph). This is because region scopes are tied to -them. Eventually, when we shift to non-lexical lifetimes, three should +them. Eventually, when we shift to non-lexical lifetimes, there should be no need to remember this mapping. There is one additional wrinkle, actually, that I wanted to hide from @@ -47,7 +47,7 @@ set of scheduled drops up front, and so whenever we exit from the scope we only drop the values scheduled thus far. For example, consider the scope S corresponding to this loop: -``` +```rust,ignore loop { let x = ...; if cond { break; } @@ -67,7 +67,7 @@ There are numerous "normal" ways to early exit a scope: `break`, early exit occurs, the method `exit_scope` is called. It is given the current point in execution where the early exit occurs, as well as the scope you want to branch to (note that all early exits from to some -other enclosing scope). `exit_scope` will record thid exit point and +other enclosing scope). `exit_scope` will record this exit point and also add all drops. Panics are handled in a similar fashion, except that a panic always @@ -86,59 +86,195 @@ should go to. */ -use build::{BlockAnd, BlockAndExtension, Builder}; -use rustc::middle::region::CodeExtent; +use build::{BlockAnd, BlockAndExtension, Builder, CFG}; +use rustc::middle::region::{CodeExtent, CodeExtentData}; use rustc::middle::lang_items; -use rustc::middle::subst::Substs; -use rustc::middle::ty::{self, Ty}; -use rustc::mir::repr::*; -use syntax::codemap::{Span, DUMMY_SP}; -use syntax::parse::token::intern_and_get_ident; +use rustc::ty::subst::{Kind, Subst}; +use rustc::ty::{Ty, TyCtxt}; +use rustc::mir::*; +use syntax_pos::Span; +use rustc_data_structures::indexed_vec::Idx; +use rustc_data_structures::fx::FxHashMap; pub struct Scope<'tcx> { + /// The visibility scope this scope was created in. + visibility_scope: VisibilityScope, + + /// the extent of this scope within source code. extent: CodeExtent, - drops: Vec<(DropKind, Span, Lvalue<'tcx>)>, - cached_block: Option, + + /// Whether there's anything to do for the cleanup path, that is, + /// when unwinding through this scope. This includes destructors, + /// but not StorageDead statements, which don't get emitted at all + /// for unwinding, for several reasons: + /// * clang doesn't emit llvm.lifetime.end for C++ unwinding + /// * LLVM's memory dependency analysis can't handle it atm + /// * pollutting the cleanup MIR with StorageDead creates + /// landing pads even though there's no actual destructors + /// * freeing up stack space has no effect during unwinding + needs_cleanup: bool, + + /// set of lvalues to drop when exiting this scope. This starts + /// out empty but grows as variables are declared during the + /// building process. This is a stack, so we always drop from the + /// end of the vector (top of the stack) first. + drops: Vec>, + + /// A scope may only have one associated free, because: + /// + /// 1. We require a `free` to only be scheduled in the scope of + /// `EXPR` in `box EXPR`; + /// 2. It only makes sense to have it translated into the diverge-path. + /// + /// This kind of drop will be run *after* all the regular drops + /// scheduled onto this scope, because drops may have dependencies + /// on the allocated memory. + /// + /// This is expected to go away once `box EXPR` becomes a sugar + /// for placement protocol and gets desugared in some earlier + /// stage. + free: Option>, + + /// The cache for drop chain on “normal” exit into a particular BasicBlock. + cached_exits: FxHashMap<(BasicBlock, CodeExtent), BasicBlock>, +} + +struct DropData<'tcx> { + /// span where drop obligation was incurred (typically where lvalue was declared) + span: Span, + + /// lvalue to drop + location: Lvalue<'tcx>, + + /// Whether this is a full value Drop, or just a StorageDead. + kind: DropKind +} + +enum DropKind { + Value { + /// The cached block for the cleanups-on-diverge path. This block + /// contains code to run the current drop and all the preceding + /// drops (i.e. those having lower index in Drop’s Scope drop + /// array) + cached_block: Option + }, + Storage +} + +struct FreeData<'tcx> { + /// span where free obligation was incurred + span: Span, + + /// Lvalue containing the allocated box. + value: Lvalue<'tcx>, + + /// type of item for which the box was allocated for (i.e. the T in Box). + item_ty: Ty<'tcx>, + + /// The cached block containing code to run the free. The block will also execute all the drops + /// in the scope. + cached_block: Option } #[derive(Clone, Debug)] -pub struct LoopScope { - pub extent: CodeExtent, // extent of the loop - pub continue_block: BasicBlock, // where to go on a `loop` - pub break_block: BasicBlock, // where to go on a `break +pub struct LoopScope<'tcx> { + /// Extent of the loop + pub extent: CodeExtent, + /// Where the body of the loop begins + pub continue_block: BasicBlock, + /// Block to branch into when the loop terminates (either by being `break`-en out from, or by + /// having its condition to become false) + pub break_block: BasicBlock, + /// The destination of the loop expression itself (i.e. where to put the result of a `break` + /// expression) + pub break_destination: Lvalue<'tcx>, +} + +impl<'tcx> Scope<'tcx> { + /// Invalidate all the cached blocks in the scope. + /// + /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a + /// larger extent of code. + /// + /// `unwind` controls whether caches for the unwind branch are also invalidated. + fn invalidate_cache(&mut self, unwind: bool) { + self.cached_exits.clear(); + if !unwind { return; } + for dropdata in &mut self.drops { + if let DropKind::Value { ref mut cached_block } = dropdata.kind { + *cached_block = None; + } + } + if let Some(ref mut freedata) = self.free { + freedata.cached_block = None; + } + } + + /// Returns the cached entrypoint for diverging exit from this scope. + /// + /// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for + /// this method to work correctly. + fn cached_block(&self) -> Option { + let mut drops = self.drops.iter().rev().filter_map(|data| { + match data.kind { + DropKind::Value { cached_block } => Some(cached_block), + DropKind::Storage => None + } + }); + if let Some(cached_block) = drops.next() { + Some(cached_block.expect("drop cache is not filled")) + } else if let Some(ref data) = self.free { + Some(data.cached_block.expect("free cache is not filled")) + } else { + None + } + } + + /// Given a span and this scope's visibility scope, make a SourceInfo. + fn source_info(&self, span: Span) -> SourceInfo { + SourceInfo { + span: span, + scope: self.visibility_scope + } + } } -impl<'a,'tcx> Builder<'a,'tcx> { +impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { + // Adding and removing scopes + // ========================== /// Start a loop scope, which tracks where `continue` and `break` /// should branch to. See module comment for more details. - pub fn in_loop_scope(&mut self, - loop_block: BasicBlock, - break_block: BasicBlock, - f: F) - -> BlockAnd - where F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd + /// + /// Returns the might_break attribute of the LoopScope used. + pub fn in_loop_scope(&mut self, + loop_block: BasicBlock, + break_block: BasicBlock, + break_destination: Lvalue<'tcx>, + f: F) + where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) { let extent = self.extent_of_innermost_scope(); let loop_scope = LoopScope { extent: extent.clone(), continue_block: loop_block, break_block: break_block, + break_destination: break_destination, }; self.loop_scopes.push(loop_scope); - let r = f(self); - assert!(self.loop_scopes.pop().unwrap().extent == extent); - r + f(self); + let loop_scope = self.loop_scopes.pop().unwrap(); + assert!(loop_scope.extent == extent); } /// Convenience wrapper that pushes a scope and then executes `f` /// to build its contents, popping the scope afterwards. pub fn in_scope(&mut self, extent: CodeExtent, mut block: BasicBlock, f: F) -> BlockAnd - where F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd + where F: FnOnce(&mut Builder<'a, 'gcx, 'tcx>) -> BlockAnd { debug!("in_scope(extent={:?}, block={:?})", extent, block); - self.push_scope(extent, block); + self.push_scope(extent); let rv = unpack!(block = f(self)); - self.pop_scope(extent, block); + unpack!(block = self.pop_scope(extent, block)); debug!("in_scope: exiting extent={:?} block={:?}", extent, block); block.and(rv) } @@ -147,260 +283,519 @@ impl<'a,'tcx> Builder<'a,'tcx> { /// scope and call `pop_scope` afterwards. Note that these two /// calls must be paired; using `in_scope` as a convenience /// wrapper maybe preferable. - pub fn push_scope(&mut self, extent: CodeExtent, block: BasicBlock) { - debug!("push_scope({:?}, {:?})", extent, block); - - // push scope, execute `f`, then pop scope again + pub fn push_scope(&mut self, extent: CodeExtent) { + debug!("push_scope({:?})", extent); + let vis_scope = self.visibility_scope; self.scopes.push(Scope { - extent: extent.clone(), + visibility_scope: vis_scope, + extent: extent, + needs_cleanup: false, drops: vec![], - cached_block: None, + free: None, + cached_exits: FxHashMap() }); } /// Pops a scope, which should have extent `extent`, adding any /// drops onto the end of `block` that are needed. This must /// match 1-to-1 with `push_scope`. - pub fn pop_scope(&mut self, extent: CodeExtent, block: BasicBlock) { + pub fn pop_scope(&mut self, + extent: CodeExtent, + mut block: BasicBlock) + -> BlockAnd<()> { debug!("pop_scope({:?}, {:?})", extent, block); + // We need to have `cached_block`s available for all the drops, so we call diverge_cleanup + // to make sure all the `cached_block`s are filled in. + self.diverge_cleanup(); let scope = self.scopes.pop().unwrap(); - assert_eq!(scope.extent, extent); + unpack!(block = build_scope_drops(&mut self.cfg, + &scope, + &self.scopes, + block, + self.arg_count)); + block.unit() + } - // add in any drops needed on the fallthrough path (any other - // exiting paths, such as those that arise from `break`, will - // have drops already) - for (kind, span, lvalue) in scope.drops { - self.cfg.push_drop(block, span, kind, &lvalue); + + /// Branch out of `block` to `target`, exiting all scopes up to + /// and including `extent`. This will insert whatever drops are + /// needed, as well as tracking this exit for the SEME region. See + /// module comment for details. + pub fn exit_scope(&mut self, + span: Span, + extent: CodeExtent, + mut block: BasicBlock, + target: BasicBlock) { + debug!("exit_scope(extent={:?}, block={:?}, target={:?})", extent, block, target); + let scope_count = 1 + self.scopes.iter().rev().position(|scope| scope.extent == extent) + .unwrap_or_else(||{ + span_bug!(span, "extent {:?} does not enclose", extent) + }); + let len = self.scopes.len(); + assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes"); + let tmp = self.get_unit_temp(); + { + let mut rest = &mut self.scopes[(len - scope_count)..]; + while let Some((scope, rest_)) = {rest}.split_last_mut() { + rest = rest_; + block = if let Some(&e) = scope.cached_exits.get(&(target, extent)) { + self.cfg.terminate(block, scope.source_info(span), + TerminatorKind::Goto { target: e }); + return; + } else { + let b = self.cfg.start_new_block(); + self.cfg.terminate(block, scope.source_info(span), + TerminatorKind::Goto { target: b }); + scope.cached_exits.insert((target, extent), b); + b + }; + unpack!(block = build_scope_drops(&mut self.cfg, + scope, + rest, + block, + self.arg_count)); + if let Some(ref free_data) = scope.free { + let next = self.cfg.start_new_block(); + let free = build_free(self.hir.tcx(), &tmp, free_data, next); + self.cfg.terminate(block, scope.source_info(span), free); + block = next; + } } + } + let scope = &self.scopes[len - scope_count]; + self.cfg.terminate(block, scope.source_info(span), + TerminatorKind::Goto { target: target }); } + /// Creates a new visibility scope, nested in the current one. + pub fn new_visibility_scope(&mut self, span: Span) -> VisibilityScope { + let parent = self.visibility_scope; + let scope = VisibilityScope::new(self.visibility_scopes.len()); + self.visibility_scopes.push(VisibilityScopeData { + span: span, + parent_scope: Some(parent), + }); + scope + } + // Finding scopes + // ============== /// Finds the loop scope for a given label. This is used for /// resolving `break` and `continue`. pub fn find_loop_scope(&mut self, span: Span, label: Option) - -> LoopScope { - let loop_scope = - match label { - None => { - // no label? return the innermost loop scope - self.loop_scopes.iter() - .rev() - .next() - } - Some(label) => { - // otherwise, find the loop-scope with the correct id - self.loop_scopes.iter() - .rev() - .filter(|loop_scope| loop_scope.extent == label) - .next() - } - }; + -> &mut LoopScope<'tcx> { + let loop_scopes = &mut self.loop_scopes; + match label { + None => { + // no label? return the innermost loop scope + loop_scopes.iter_mut().rev().next() + } + Some(label) => { + // otherwise, find the loop-scope with the correct id + loop_scopes.iter_mut() + .rev() + .filter(|loop_scope| loop_scope.extent == label) + .next() + } + }.unwrap_or_else(|| span_bug!(span, "no enclosing loop scope found?")) + } - match loop_scope { - Some(loop_scope) => loop_scope.clone(), - None => self.hir.span_bug(span, "no enclosing loop scope found?"), + /// Given a span and the current visibility scope, make a SourceInfo. + pub fn source_info(&self, span: Span) -> SourceInfo { + SourceInfo { + span: span, + scope: self.visibility_scope } } - /// Branch out of `block` to `target`, exiting all scopes up to - /// and including `extent`. This will insert whatever drops are - /// needed, as well as tracking this exit for the SEME region. See - /// module comment for details. - pub fn exit_scope(&mut self, - span: Span, - extent: CodeExtent, - block: BasicBlock, - target: BasicBlock) { - let popped_scopes = - match self.scopes.iter().rev().position(|scope| scope.extent == extent) { - Some(p) => p + 1, - None => self.hir.span_bug(span, &format!("extent {:?} does not enclose", - extent)), - }; + pub fn extent_of_innermost_scope(&self) -> CodeExtent { + self.scopes.last().map(|scope| scope.extent).unwrap() + } - for scope in self.scopes.iter_mut().rev().take(popped_scopes) { - for &(kind, drop_span, ref lvalue) in &scope.drops { - self.cfg.push_drop(block, drop_span, kind, lvalue); + /// Returns the extent of the scope which should be exited by a + /// return. + pub fn extent_of_return_scope(&self) -> CodeExtent { + // The outermost scope (`scopes[0]`) will be the `CallSiteScope`. + // We want `scopes[1]`, which is the `ParameterScope`. + assert!(self.scopes.len() >= 2); + assert!(match self.hir.tcx().region_maps.code_extent_data(self.scopes[1].extent) { + CodeExtentData::ParameterScope { .. } => true, + _ => false, + }); + self.scopes[1].extent + } + + // Scheduling drops + // ================ + /// Indicates that `lvalue` should be dropped on exit from + /// `extent`. + pub fn schedule_drop(&mut self, + span: Span, + extent: CodeExtent, + lvalue: &Lvalue<'tcx>, + lvalue_ty: Ty<'tcx>) { + let needs_drop = self.hir.needs_drop(lvalue_ty); + let drop_kind = if needs_drop { + DropKind::Value { cached_block: None } + } else { + // Only temps and vars need their storage dead. + match *lvalue { + Lvalue::Local(index) if index.index() > self.arg_count => DropKind::Storage, + _ => return + } + }; + + for scope in self.scopes.iter_mut().rev() { + let this_scope = scope.extent == extent; + // When building drops, we try to cache chains of drops in such a way so these drops + // could be reused by the drops which would branch into the cached (already built) + // blocks. This, however, means that whenever we add a drop into a scope which already + // had some blocks built (and thus, cached) for it, we must invalidate all caches which + // might branch into the scope which had a drop just added to it. This is necessary, + // because otherwise some other code might use the cache to branch into already built + // chain of drops, essentially ignoring the newly added drop. + // + // For example consider there’s two scopes with a drop in each. These are built and + // thus the caches are filled: + // + // +--------------------------------------------------------+ + // | +---------------------------------+ | + // | | +--------+ +-------------+ | +---------------+ | + // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | | + // | | +--------+ +-------------+ | +---------------+ | + // | +------------|outer_scope cache|--+ | + // +------------------------------|middle_scope cache|------+ + // + // Now, a new, inner-most scope is added along with a new drop into both inner-most and + // outer-most scopes: + // + // +------------------------------------------------------------+ + // | +----------------------------------+ | + // | | +--------+ +-------------+ | +---------------+ | +-------------+ + // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) | + // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+ + // | | +-+ +-------------+ | | + // | +---|invalid outer_scope cache|----+ | + // +----=----------------|invalid middle_scope cache|-----------+ + // + // If, when adding `drop(new)` we do not invalidate the cached blocks for both + // outer_scope and middle_scope, then, when building drops for the inner (right-most) + // scope, the old, cached blocks, without `drop(new)` will get used, producing the + // wrong results. + // + // The cache and its invalidation for unwind branch is somewhat special. The cache is + // per-drop, rather than per scope, which has a several different implications. Adding + // a new drop into a scope will not invalidate cached blocks of the prior drops in the + // scope. That is true, because none of the already existing drops will have an edge + // into a block with the newly added drop. + // + // Note that this code iterates scopes from the inner-most to the outer-most, + // invalidating caches of each scope visited. This way bare minimum of the + // caches gets invalidated. i.e. if a new drop is added into the middle scope, the + // cache of outer scpoe stays intact. + let invalidate_unwind = needs_drop && !this_scope; + scope.invalidate_cache(invalidate_unwind); + if this_scope { + if let DropKind::Value { .. } = drop_kind { + scope.needs_cleanup = true; + } + let tcx = self.hir.tcx(); + let extent_span = extent.span(&tcx.region_maps, &tcx.map).unwrap(); + // Attribute scope exit drops to scope's closing brace + let scope_end = Span { lo: extent_span.hi, .. extent_span}; + scope.drops.push(DropData { + span: scope_end, + location: lvalue.clone(), + kind: drop_kind + }); + return; } } + span_bug!(span, "extent {:?} not in scope to drop {:?}", extent, lvalue); + } - self.cfg.terminate(block, Terminator::Goto { target: target }); + /// Schedule dropping of a not-yet-fully-initialised box. + /// + /// This cleanup will only be translated into unwind branch. + /// The extent should be for the `EXPR` inside `box EXPR`. + /// There may only be one “free” scheduled in any given scope. + pub fn schedule_box_free(&mut self, + span: Span, + extent: CodeExtent, + value: &Lvalue<'tcx>, + item_ty: Ty<'tcx>) { + for scope in self.scopes.iter_mut().rev() { + // See the comment in schedule_drop above. The primary difference is that we invalidate + // the unwind blocks unconditionally. That’s because the box free may be considered + // outer-most cleanup within the scope. + scope.invalidate_cache(true); + if scope.extent == extent { + assert!(scope.free.is_none(), "scope already has a scheduled free!"); + scope.needs_cleanup = true; + scope.free = Some(FreeData { + span: span, + value: value.clone(), + item_ty: item_ty, + cached_block: None + }); + return; + } + } + span_bug!(span, "extent {:?} not in scope to free {:?}", extent, value); } + // Other + // ===== /// Creates a path that performs all required cleanup for unwinding. /// /// This path terminates in Resume. Returns the start of the path. /// See module comment for more details. None indicates there’s no /// cleanup to do at this point. pub fn diverge_cleanup(&mut self) -> Option { - if self.scopes.is_empty() { + if !self.scopes.iter().any(|scope| scope.needs_cleanup) { return None; } + assert!(!self.scopes.is_empty()); // or `any` above would be false + + let unit_temp = self.get_unit_temp(); + let Builder { ref mut hir, ref mut cfg, ref mut scopes, + ref mut cached_resume_block, .. } = *self; + + // Build up the drops in **reverse** order. The end result will + // look like: + // + // scopes[n] -> scopes[n-1] -> ... -> scopes[0] + // + // However, we build this in **reverse order**. That is, we + // process scopes[0], then scopes[1], etc, pointing each one at + // the result generates from the one before. Along the way, we + // store caches. If everything is cached, we'll just walk right + // to left reading the cached results but never created anything. + + // To start, create the resume terminator. + let mut target = if let Some(target) = *cached_resume_block { + target + } else { + let resumeblk = cfg.start_new_cleanup_block(); + cfg.terminate(resumeblk, + scopes[0].source_info(self.fn_span), + TerminatorKind::Resume); + *cached_resume_block = Some(resumeblk); + resumeblk + }; - let mut terminator = Terminator::Resume; - // Given an array of scopes, we generate these from the outermost scope to the innermost - // one. Thus for array [S0, S1, S2] with corresponding cleanup blocks [B0, B1, B2], we will - // generate B0 <- B1 <- B2 in left-to-right order. The outermost scope (B0) will always - // terminate with a Resume terminator. - for scope in self.scopes.iter_mut().filter(|s| !s.drops.is_empty()) { - if let Some(b) = scope.cached_block { - terminator = Terminator::Goto { target: b }; - continue; - } else { - let new_block = self.cfg.start_new_block(); - self.cfg.block_data_mut(new_block).is_cleanup = true; - self.cfg.terminate(new_block, terminator); - terminator = Terminator::Goto { target: new_block }; - for &(kind, span, ref lvalue) in scope.drops.iter().rev() { - self.cfg.push_drop(new_block, span, kind, lvalue); - } - scope.cached_block = Some(new_block); - } + for scope in scopes.iter_mut().filter(|s| s.needs_cleanup) { + target = build_diverge_scope(hir.tcx(), cfg, &unit_temp, scope, target); } - // Return the innermost cached block, most likely the one we just generated. - // Note that if there are no cleanups in scope we return None. - self.scopes.iter().rev().flat_map(|b| b.cached_block).next() + Some(target) } - /// Indicates that `lvalue` should be dropped on exit from - /// `extent`. - pub fn schedule_drop(&mut self, - span: Span, - extent: CodeExtent, - kind: DropKind, - lvalue: &Lvalue<'tcx>, - lvalue_ty: Ty<'tcx>) { - if self.hir.needs_drop(lvalue_ty) { - for scope in self.scopes.iter_mut().rev() { - // We must invalidate all the cached_blocks leading up to the scope we’re looking - // for, because otherwise some/most of the blocks in the chain might become - // incorrect (i.e. they still are pointing at old cached_block). - scope.cached_block = None; - if scope.extent == extent { - scope.drops.push((kind, span, lvalue.clone())); - return; - } - } - self.hir.span_bug(span, - &format!("extent {:?} not in scope to drop {:?}", extent, lvalue)); + /// Utility function for *non*-scope code to build their own drops + pub fn build_drop(&mut self, + block: BasicBlock, + span: Span, + location: Lvalue<'tcx>, + ty: Ty<'tcx>) -> BlockAnd<()> { + if !self.hir.needs_drop(ty) { + return block.unit(); } + let source_info = self.source_info(span); + let next_target = self.cfg.start_new_block(); + let diverge_target = self.diverge_cleanup(); + self.cfg.terminate(block, source_info, + TerminatorKind::Drop { + location: location, + target: next_target, + unwind: diverge_target, + }); + next_target.unit() } - pub fn extent_of_innermost_scope(&self) -> CodeExtent { - self.scopes.last().map(|scope| scope.extent).unwrap() + /// Utility function for *non*-scope code to build their own drops + pub fn build_drop_and_replace(&mut self, + block: BasicBlock, + span: Span, + location: Lvalue<'tcx>, + value: Operand<'tcx>) -> BlockAnd<()> { + let source_info = self.source_info(span); + let next_target = self.cfg.start_new_block(); + let diverge_target = self.diverge_cleanup(); + self.cfg.terminate(block, source_info, + TerminatorKind::DropAndReplace { + location: location, + value: value, + target: next_target, + unwind: diverge_target, + }); + next_target.unit() } - pub fn extent_of_outermost_scope(&self) -> CodeExtent { - self.scopes.first().map(|scope| scope.extent).unwrap() + /// Create an Assert terminator and return the success block. + /// If the boolean condition operand is not the expected value, + /// a runtime panic will be caused with the given message. + pub fn assert(&mut self, block: BasicBlock, + cond: Operand<'tcx>, + expected: bool, + msg: AssertMessage<'tcx>, + span: Span) + -> BasicBlock { + let source_info = self.source_info(span); + + let success_block = self.cfg.start_new_block(); + let cleanup = self.diverge_cleanup(); + + self.cfg.terminate(block, source_info, + TerminatorKind::Assert { + cond: cond, + expected: expected, + msg: msg, + target: success_block, + cleanup: cleanup + }); + + success_block } +} - pub fn panic_bounds_check(&mut self, - block: BasicBlock, - index: Operand<'tcx>, - len: Operand<'tcx>, - span: Span) { - // fn(&(filename: &'static str, line: u32), index: usize, length: usize) -> ! - let func = self.lang_function(lang_items::PanicBoundsCheckFnLangItem); - let args = func.ty.fn_args(); - let ref_ty = args.skip_binder()[0]; - let (region, tup_ty) = if let ty::TyRef(region, tyandmut) = ref_ty.sty { - (region, tyandmut.ty) - } else { - self.hir.span_bug(span, &format!("unexpected panic_bound_check type: {:?}", func.ty)); - }; - let (tuple, tuple_ref) = (self.temp(tup_ty), self.temp(ref_ty)); - let (file, line) = self.span_to_fileline_args(span); - let elems = vec![Operand::Constant(file), Operand::Constant(line)]; - // FIXME: We should have this as a constant, rather than a stack variable (to not pollute - // icache with cold branch code), however to achieve that we either have to rely on rvalue - // promotion or have some way, in MIR, to create constants. - self.cfg.push_assign(block, DUMMY_SP, &tuple, // tuple = (file_arg, line_arg); - Rvalue::Aggregate(AggregateKind::Tuple, elems)); - // FIXME: is this region really correct here? - self.cfg.push_assign(block, DUMMY_SP, &tuple_ref, // tuple_ref = &tuple; - Rvalue::Ref(*region, BorrowKind::Unique, tuple)); - let cleanup = self.diverge_cleanup(); - self.cfg.terminate(block, Terminator::Call { - func: Operand::Constant(func), - args: vec![Operand::Consume(tuple_ref), index, len], - kind: match cleanup { - None => CallKind::Diverging, - Some(c) => CallKind::DivergingCleanup(c) +/// Builds drops for pop_scope and exit_scope. +fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>, + scope: &Scope<'tcx>, + earlier_scopes: &[Scope<'tcx>], + mut block: BasicBlock, + arg_count: usize) + -> BlockAnd<()> { + let mut iter = scope.drops.iter().rev().peekable(); + while let Some(drop_data) = iter.next() { + let source_info = scope.source_info(drop_data.span); + if let DropKind::Value { .. } = drop_data.kind { + // Try to find the next block with its cached block + // for us to diverge into in case the drop panics. + let on_diverge = iter.peek().iter().filter_map(|dd| { + match dd.kind { + DropKind::Value { cached_block } => cached_block, + DropKind::Storage => None + } + }).next(); + // If there’s no `cached_block`s within current scope, + // we must look for one in the enclosing scope. + let on_diverge = on_diverge.or_else(||{ + earlier_scopes.iter().rev().flat_map(|s| s.cached_block()).next() + }); + let next = cfg.start_new_block(); + cfg.terminate(block, source_info, TerminatorKind::Drop { + location: drop_data.location.clone(), + target: next, + unwind: on_diverge + }); + block = next; + } + match drop_data.kind { + DropKind::Value { .. } | + DropKind::Storage => { + // Only temps and vars need their storage dead. + match drop_data.location { + Lvalue::Local(index) if index.index() > arg_count => {} + _ => continue + } + + cfg.push(block, Statement { + source_info: source_info, + kind: StatementKind::StorageDead(drop_data.location.clone()) + }); } - }); + } } + block.unit() +} - /// Create diverge cleanup and branch to it from `block`. - pub fn panic(&mut self, block: BasicBlock, msg: &'static str, span: Span) { - // fn(&(msg: &'static str filename: &'static str, line: u32)) -> ! - let func = self.lang_function(lang_items::PanicFnLangItem); - let args = func.ty.fn_args(); - let ref_ty = args.skip_binder()[0]; - let (region, tup_ty) = if let ty::TyRef(region, tyandmut) = ref_ty.sty { - (region, tyandmut.ty) +fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + cfg: &mut CFG<'tcx>, + unit_temp: &Lvalue<'tcx>, + scope: &mut Scope<'tcx>, + mut target: BasicBlock) + -> BasicBlock +{ + // Build up the drops in **reverse** order. The end result will + // look like: + // + // [drops[n]] -...-> [drops[0]] -> [Free] -> [target] + // | | + // +------------------------------------+ + // code for scope + // + // The code in this function reads from right to left. At each + // point, we check for cached blocks representing the + // remainder. If everything is cached, we'll just walk right to + // left reading the cached results but never created anything. + + let visibility_scope = scope.visibility_scope; + let source_info = |span| SourceInfo { + span: span, + scope: visibility_scope + }; + + // Next, build up any free. + if let Some(ref mut free_data) = scope.free { + target = if let Some(cached_block) = free_data.cached_block { + cached_block } else { - self.hir.span_bug(span, &format!("unexpected panic type: {:?}", func.ty)); + let into = cfg.start_new_cleanup_block(); + cfg.terminate(into, source_info(free_data.span), + build_free(tcx, unit_temp, free_data, target)); + free_data.cached_block = Some(into); + into }; - let (tuple, tuple_ref) = (self.temp(tup_ty), self.temp(ref_ty)); - let (file, line) = self.span_to_fileline_args(span); - let message = Constant { - span: DUMMY_SP, - ty: self.hir.tcx().mk_static_str(), - literal: self.hir.str_literal(intern_and_get_ident(msg)) - }; - let elems = vec![Operand::Constant(message), - Operand::Constant(file), - Operand::Constant(line)]; - // FIXME: We should have this as a constant, rather than a stack variable (to not pollute - // icache with cold branch code), however to achieve that we either have to rely on rvalue - // promotion or have some way, in MIR, to create constants. - self.cfg.push_assign(block, DUMMY_SP, &tuple, // tuple = (message_arg, file_arg, line_arg); - Rvalue::Aggregate(AggregateKind::Tuple, elems)); - // FIXME: is this region really correct here? - self.cfg.push_assign(block, DUMMY_SP, &tuple_ref, // tuple_ref = &tuple; - Rvalue::Ref(*region, BorrowKind::Unique, tuple)); - let cleanup = self.diverge_cleanup(); - self.cfg.terminate(block, Terminator::Call { - func: Operand::Constant(func), - args: vec![Operand::Consume(tuple_ref)], - kind: match cleanup { - None => CallKind::Diverging, - Some(c) => CallKind::DivergingCleanup(c) - } - }); } - fn lang_function(&mut self, lang_item: lang_items::LangItem) -> Constant<'tcx> { - let funcdid = match self.hir.tcx().lang_items.require(lang_item) { - Ok(d) => d, - Err(m) => { - self.hir.tcx().sess.fatal(&*m) - } + // Next, build up the drops. Here we iterate the vector in + // *forward* order, so that we generate drops[0] first (right to + // left in diagram above). + for drop_data in &mut scope.drops { + // Only full value drops are emitted in the diverging path, + // not StorageDead. + let cached_block = match drop_data.kind { + DropKind::Value { ref mut cached_block } => cached_block, + DropKind::Storage => continue + }; + target = if let Some(cached_block) = *cached_block { + cached_block + } else { + let block = cfg.start_new_cleanup_block(); + cfg.terminate(block, source_info(drop_data.span), + TerminatorKind::Drop { + location: drop_data.location.clone(), + target: target, + unwind: None + }); + *cached_block = Some(block); + block }; - Constant { - span: DUMMY_SP, - ty: self.hir.tcx().lookup_item_type(funcdid).ty, - literal: Literal::Item { - def_id: funcdid, - kind: ItemKind::Function, - substs: self.hir.tcx().mk_substs(Substs::empty()) - } - } } - fn span_to_fileline_args(&mut self, span: Span) -> (Constant<'tcx>, Constant<'tcx>) { - let span_lines = self.hir.tcx().sess.codemap().lookup_char_pos(span.lo); - (Constant { - span: DUMMY_SP, - ty: self.hir.tcx().mk_static_str(), - literal: self.hir.str_literal(intern_and_get_ident(&span_lines.file.name)) - }, Constant { - span: DUMMY_SP, - ty: self.hir.tcx().types.u32, - literal: self.hir.usize_literal(span_lines.line) - }) + target +} + +fn build_free<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + unit_temp: &Lvalue<'tcx>, + data: &FreeData<'tcx>, + target: BasicBlock) + -> TerminatorKind<'tcx> { + let free_func = tcx.require_lang_item(lang_items::BoxFreeFnLangItem); + let substs = tcx.intern_substs(&[Kind::from(data.item_ty)]); + TerminatorKind::Call { + func: Operand::Constant(Constant { + span: data.span, + ty: tcx.item_type(free_func).subst(tcx, substs), + literal: Literal::Item { + def_id: free_func, + substs: substs + } + }), + args: vec![Operand::Consume(data.value.clone())], + destination: Some((unit_temp.clone(), target)), + cleanup: None } } diff --git a/src/librustc_mir/build/stmt.rs b/src/librustc_mir/build/stmt.rs deleted file mode 100644 index c70b22893ae2e..0000000000000 --- a/src/librustc_mir/build/stmt.rs +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use build::{BlockAnd, BlockAndExtension, Builder}; -use hair::*; -use rustc::mir::repr::*; - -impl<'a,'tcx> Builder<'a,'tcx> { - pub fn stmts(&mut self, mut block: BasicBlock, stmts: Vec>) -> BlockAnd<()> { - // This convoluted structure is to avoid using recursion as we walk down a list - // of statements. Basically, the structure we get back is something like: - // - // let x = in { - // let y = in { - // expr1; - // expr2; - // } - // } - // - // To process this, we keep a stack of (Option, - // vec::IntoIter) pairs. At each point we pull off the - // top most pair and extract one statement from the - // iterator. Once it's complete, we pop the scope from the - // first half the pair. - let this = self; - let mut stmt_lists = vec![(None, stmts.into_iter())]; - while !stmt_lists.is_empty() { - let stmt = { - let &mut (_, ref mut stmts) = stmt_lists.last_mut().unwrap(); - stmts.next() - }; - - let stmt = match stmt { - Some(stmt) => stmt, - None => { - let (extent, _) = stmt_lists.pop().unwrap(); - if let Some(extent) = extent { - this.pop_scope(extent, block); - } - continue - } - }; - - let Stmt { span, kind } = this.hir.mirror(stmt); - match kind { - StmtKind::Let { remainder_scope, init_scope, pattern, initializer, stmts } => { - this.push_scope(remainder_scope, block); - stmt_lists.push((Some(remainder_scope), stmts.into_iter())); - unpack!(block = this.in_scope(init_scope, block, move |this| { - // FIXME #30046 ^~~~ - match initializer { - Some(initializer) => { - this.expr_into_pattern(block, remainder_scope, pattern, initializer) - } - None => { - this.declare_bindings(remainder_scope, &pattern); - block.unit() - } - } - })); - } - - StmtKind::Expr { scope, expr } => { - unpack!(block = this.in_scope(scope, block, |this| { - let expr = this.hir.mirror(expr); - let temp = this.temp(expr.ty.clone()); - unpack!(block = this.into(&temp, block, expr)); - this.cfg.push_drop(block, span, DropKind::Deep, &temp); - block.unit() - })); - } - } - } - block.unit() - } -} diff --git a/src/librustc_mir/def_use.rs b/src/librustc_mir/def_use.rs new file mode 100644 index 0000000000000..d20d50c561140 --- /dev/null +++ b/src/librustc_mir/def_use.rs @@ -0,0 +1,163 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Def-use analysis. + +use rustc::mir::{Local, Location, Lvalue, Mir}; +use rustc::mir::visit::{LvalueContext, MutVisitor, Visitor}; +use rustc_data_structures::indexed_vec::IndexVec; +use std::marker::PhantomData; +use std::mem; + +pub struct DefUseAnalysis<'tcx> { + info: IndexVec>, +} + +#[derive(Clone)] +pub struct Info<'tcx> { + pub defs_and_uses: Vec>, +} + +#[derive(Clone)] +pub struct Use<'tcx> { + pub context: LvalueContext<'tcx>, + pub location: Location, +} + +impl<'tcx> DefUseAnalysis<'tcx> { + pub fn new(mir: &Mir<'tcx>) -> DefUseAnalysis<'tcx> { + DefUseAnalysis { + info: IndexVec::from_elem_n(Info::new(), mir.local_decls.len()), + } + } + + pub fn analyze(&mut self, mir: &Mir<'tcx>) { + let mut finder = DefUseFinder { + info: mem::replace(&mut self.info, IndexVec::new()), + }; + finder.visit_mir(mir); + self.info = finder.info + } + + pub fn local_info(&self, local: Local) -> &Info<'tcx> { + &self.info[local] + } + + pub fn local_info_mut(&mut self, local: Local) -> &mut Info<'tcx> { + &mut self.info[local] + } + + fn mutate_defs_and_uses(&self, local: Local, mir: &mut Mir<'tcx>, mut callback: F) + where F: for<'a> FnMut(&'a mut Lvalue<'tcx>, + LvalueContext<'tcx>, + Location) { + for lvalue_use in &self.info[local].defs_and_uses { + MutateUseVisitor::new(local, + &mut callback, + mir).visit_location(mir, lvalue_use.location) + } + } + + /// FIXME(pcwalton): This should update the def-use chains. + pub fn replace_all_defs_and_uses_with(&self, + local: Local, + mir: &mut Mir<'tcx>, + new_lvalue: Lvalue<'tcx>) { + self.mutate_defs_and_uses(local, mir, |lvalue, _, _| *lvalue = new_lvalue.clone()) + } +} + +struct DefUseFinder<'tcx> { + info: IndexVec>, +} + +impl<'tcx> DefUseFinder<'tcx> { + fn lvalue_mut_info(&mut self, lvalue: &Lvalue<'tcx>) -> Option<&mut Info<'tcx>> { + let info = &mut self.info; + + if let Lvalue::Local(local) = *lvalue { + Some(&mut info[local]) + } else { + None + } + } +} + +impl<'tcx> Visitor<'tcx> for DefUseFinder<'tcx> { + fn visit_lvalue(&mut self, + lvalue: &Lvalue<'tcx>, + context: LvalueContext<'tcx>, + location: Location) { + if let Some(ref mut info) = self.lvalue_mut_info(lvalue) { + info.defs_and_uses.push(Use { + context: context, + location: location, + }) + } + self.super_lvalue(lvalue, context, location) + } +} + +impl<'tcx> Info<'tcx> { + fn new() -> Info<'tcx> { + Info { + defs_and_uses: vec![], + } + } + + pub fn def_count(&self) -> usize { + self.defs_and_uses.iter().filter(|lvalue_use| lvalue_use.context.is_mutating_use()).count() + } + + pub fn def_count_not_including_drop(&self) -> usize { + self.defs_and_uses.iter().filter(|lvalue_use| { + lvalue_use.context.is_mutating_use() && !lvalue_use.context.is_drop() + }).count() + } + + pub fn use_count(&self) -> usize { + self.defs_and_uses.iter().filter(|lvalue_use| { + lvalue_use.context.is_nonmutating_use() + }).count() + } +} + +struct MutateUseVisitor<'tcx, F> { + query: Local, + callback: F, + phantom: PhantomData<&'tcx ()>, +} + +impl<'tcx, F> MutateUseVisitor<'tcx, F> { + fn new(query: Local, callback: F, _: &Mir<'tcx>) + -> MutateUseVisitor<'tcx, F> + where F: for<'a> FnMut(&'a mut Lvalue<'tcx>, LvalueContext<'tcx>, Location) { + MutateUseVisitor { + query: query, + callback: callback, + phantom: PhantomData, + } + } +} + +impl<'tcx, F> MutVisitor<'tcx> for MutateUseVisitor<'tcx, F> + where F: for<'a> FnMut(&'a mut Lvalue<'tcx>, LvalueContext<'tcx>, Location) { + fn visit_lvalue(&mut self, + lvalue: &mut Lvalue<'tcx>, + context: LvalueContext<'tcx>, + location: Location) { + if let Lvalue::Local(local) = *lvalue { + if local == self.query { + (self.callback)(lvalue, context, location) + } + } + self.super_lvalue(lvalue, context, location) + } +} diff --git a/src/librustc_mir/diagnostics.rs b/src/librustc_mir/diagnostics.rs new file mode 100644 index 0000000000000..eb16812af9b02 --- /dev/null +++ b/src/librustc_mir/diagnostics.rs @@ -0,0 +1,404 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_snake_case)] + +register_long_diagnostics! { + +E0010: r##" +The value of statics and constants must be known at compile time, and they live +for the entire lifetime of a program. Creating a boxed value allocates memory on +the heap at runtime, and therefore cannot be done at compile time. Erroneous +code example: + +```compile_fail,E0010 +#![feature(box_syntax)] + +const CON : Box = box 0; +``` +"##, + +E0013: r##" +Static and const variables can refer to other const variables. But a const +variable cannot refer to a static variable. For example, `Y` cannot refer to +`X` here: + +```compile_fail,E0013 +static X: i32 = 42; +const Y: i32 = X; +``` + +To fix this, the value can be extracted as a const and then used: + +``` +const A: i32 = 42; +static X: i32 = A; +const Y: i32 = A; +``` +"##, + +// FIXME(#24111) Change the language here when const fn stabilizes +E0015: r##" +The only functions that can be called in static or constant expressions are +`const` functions, and struct/enum constructors. `const` functions are only +available on a nightly compiler. Rust currently does not support more general +compile-time function execution. + +``` +const FOO: Option = Some(1); // enum constructor +struct Bar {x: u8} +const BAR: Bar = Bar {x: 1}; // struct constructor +``` + +See [RFC 911] for more details on the design of `const fn`s. + +[RFC 911]: https://github.com/rust-lang/rfcs/blob/master/text/0911-const-fn.md +"##, + +E0016: r##" +Blocks in constants may only contain items (such as constant, function +definition, etc...) and a tail expression. Erroneous code example: + +```compile_fail,E0016 +const FOO: i32 = { let x = 0; x }; // 'x' isn't an item! +``` + +To avoid it, you have to replace the non-item object: + +``` +const FOO: i32 = { const X : i32 = 0; X }; +``` +"##, + +E0017: r##" +References in statics and constants may only refer to immutable values. +Erroneous code example: + +```compile_fail,E0017 +static X: i32 = 1; +const C: i32 = 2; + +// these three are not allowed: +const CR: &'static mut i32 = &mut C; +static STATIC_REF: &'static mut i32 = &mut X; +static CONST_REF: &'static mut i32 = &mut C; +``` + +Statics are shared everywhere, and if they refer to mutable data one might +violate memory safety since holding multiple mutable references to shared data +is not allowed. + +If you really want global mutable state, try using `static mut` or a global +`UnsafeCell`. +"##, + +E0018: r##" + +The value of static and constant integers must be known at compile time. You +can't cast a pointer to an integer because the address of a pointer can +vary. + +For example, if you write: + +```compile_fail,E0018 +static MY_STATIC: u32 = 42; +static MY_STATIC_ADDR: usize = &MY_STATIC as *const _ as usize; +static WHAT: usize = (MY_STATIC_ADDR^17) + MY_STATIC_ADDR; +``` + +Then `MY_STATIC_ADDR` would contain the address of `MY_STATIC`. However, +the address can change when the program is linked, as well as change +between different executions due to ASLR, and many linkers would +not be able to calculate the value of `WHAT`. + +On the other hand, static and constant pointers can point either to +a known numeric address or to the address of a symbol. + +``` +static MY_STATIC_ADDR: &'static u32 = &MY_STATIC; +// ... and also +static MY_STATIC_ADDR2: *const u32 = &MY_STATIC; + +const CONST_ADDR: *const u8 = 0x5f3759df as *const u8; +``` + +This does not pose a problem by itself because they can't be +accessed directly. +"##, + +E0019: r##" +A function call isn't allowed in the const's initialization expression +because the expression's value must be known at compile-time. Erroneous code +example: + +```compile_fail +enum Test { + V1 +} + +impl Test { + fn test(&self) -> i32 { + 12 + } +} + +fn main() { + const FOO: Test = Test::V1; + + const A: i32 = FOO.test(); // You can't call Test::func() here! +} +``` + +Remember: you can't use a function call inside a const's initialization +expression! However, you can totally use it anywhere else: + +``` +fn main() { + const FOO: Test = Test::V1; + + FOO.func(); // here is good + let x = FOO.func(); // or even here! +} +``` +"##, + +E0022: r##" +Constant functions are not allowed to mutate anything. Thus, binding to an +argument with a mutable pattern is not allowed. For example, + +```compile_fail +const fn foo(mut x: u8) { + // do stuff +} +``` + +Is incorrect because the function body may not mutate `x`. + +Remove any mutable bindings from the argument list to fix this error. In case +you need to mutate the argument, try lazily initializing a global variable +instead of using a `const fn`, or refactoring the code to a functional style to +avoid mutation if possible. +"##, + +E0394: r##" +A static was referred to by value by another static. + +Erroneous code examples: + +```compile_fail,E0394 +static A: u32 = 0; +static B: u32 = A; // error: cannot refer to other statics by value, use the + // address-of operator or a constant instead +``` + +A static cannot be referred by value. To fix this issue, either use a +constant: + +``` +const A: u32 = 0; // `A` is now a constant +static B: u32 = A; // ok! +``` + +Or refer to `A` by reference: + +``` +static A: u32 = 0; +static B: &'static u32 = &A; // ok! +``` +"##, + +E0395: r##" +The value assigned to a constant scalar must be known at compile time, +which is not the case when comparing raw pointers. + +Erroneous code example: + +```compile_fail,E0395 +static FOO: i32 = 42; +static BAR: i32 = 42; + +static BAZ: bool = { (&FOO as *const i32) == (&BAR as *const i32) }; +// error: raw pointers cannot be compared in statics! +``` + +The address assigned by the linker to `FOO` and `BAR` may or may not +be identical, so the value of `BAZ` can't be determined. + +If you want to do the comparison, please do it at run-time. + +For example: + +``` +static FOO: i32 = 42; +static BAR: i32 = 42; + +let baz: bool = { (&FOO as *const i32) == (&BAR as *const i32) }; +// baz isn't a constant expression so it's ok +``` +"##, + +E0396: r##" +The value behind a raw pointer can't be determined at compile-time +(or even link-time), which means it can't be used in a constant +expression. Erroneous code example: + +```compile_fail,E0396 +const REG_ADDR: *const u8 = 0x5f3759df as *const u8; + +const VALUE: u8 = unsafe { *REG_ADDR }; +// error: raw pointers cannot be dereferenced in constants +``` + +A possible fix is to dereference your pointer at some point in run-time. + +For example: + +``` +const REG_ADDR: *const u8 = 0x5f3759df as *const u8; + +let reg_value = unsafe { *REG_ADDR }; +``` +"##, + +E0492: r##" +A borrow of a constant containing interior mutability was attempted. Erroneous +code example: + +```compile_fail,E0492 +use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; + +const A: AtomicUsize = ATOMIC_USIZE_INIT; +static B: &'static AtomicUsize = &A; +// error: cannot borrow a constant which contains interior mutability, create a +// static instead +``` + +A `const` represents a constant value that should never change. If one takes +a `&` reference to the constant, then one is taking a pointer to some memory +location containing the value. Normally this is perfectly fine: most values +can't be changed via a shared `&` pointer, but interior mutability would allow +it. That is, a constant value could be mutated. On the other hand, a `static` is +explicitly a single memory location, which can be mutated at will. + +So, in order to solve this error, either use statics which are `Sync`: + +``` +use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; + +static A: AtomicUsize = ATOMIC_USIZE_INIT; +static B: &'static AtomicUsize = &A; // ok! +``` + +You can also have this error while using a cell type: + +```compile_fail,E0492 +#![feature(const_fn)] + +use std::cell::Cell; + +const A: Cell = Cell::new(1); +const B: &'static Cell = &A; +// error: cannot borrow a constant which contains interior mutability, create +// a static instead + +// or: +struct C { a: Cell } + +const D: C = C { a: Cell::new(1) }; +const E: &'static Cell = &D.a; // error + +// or: +const F: &'static C = &D; // error +``` + +This is because cell types do operations that are not thread-safe. Due to this, +they don't implement Sync and thus can't be placed in statics. In this +case, `StaticMutex` would work just fine, but it isn't stable yet: +https://doc.rust-lang.org/nightly/std/sync/struct.StaticMutex.html + +However, if you still wish to use these types, you can achieve this by an unsafe +wrapper: + +``` +#![feature(const_fn)] + +use std::cell::Cell; +use std::marker::Sync; + +struct NotThreadSafe { + value: Cell, +} + +unsafe impl Sync for NotThreadSafe {} + +static A: NotThreadSafe = NotThreadSafe { value : Cell::new(1) }; +static B: &'static NotThreadSafe = &A; // ok! +``` + +Remember this solution is unsafe! You will have to ensure that accesses to the +cell are synchronized. +"##, + +E0493: r##" +A type with a destructor was assigned to an invalid type of variable. Erroneous +code example: + +```compile_fail,E0493 +struct Foo { + a: u32 +} + +impl Drop for Foo { + fn drop(&mut self) {} +} + +const F : Foo = Foo { a : 0 }; +// error: constants are not allowed to have destructors +static S : Foo = Foo { a : 0 }; +// error: destructors in statics are an unstable feature +``` + +To solve this issue, please use a type which does allow the usage of type with +destructors. +"##, + +E0494: r##" +A reference of an interior static was assigned to another const/static. +Erroneous code example: + +```compile_fail,E0494 +struct Foo { + a: u32 +} + +static S : Foo = Foo { a : 0 }; +static A : &'static u32 = &S.a; +// error: cannot refer to the interior of another static, use a +// constant instead +``` + +The "base" variable has to be a const if you want another static/const variable +to refer to one of its fields. Example: + +``` +struct Foo { + a: u32 +} + +const S : Foo = Foo { a : 0 }; +static A : &'static u32 = &S.a; // ok! +``` +"##, + +} + +register_diagnostics! { + E0526, // shuffle indices are not constant +} diff --git a/src/librustc_mir/graphviz.rs b/src/librustc_mir/graphviz.rs index 1b8fe6505583c..dd4dd4699d858 100644 --- a/src/librustc_mir/graphviz.rs +++ b/src/librustc_mir/graphviz.rs @@ -9,73 +9,115 @@ // except according to those terms. use dot; -use rustc::mir::repr::*; -use rustc::middle::ty; +use rustc::hir::def_id::DefId; +use rustc::mir::*; +use rustc::ty::TyCtxt; use std::fmt::Debug; use std::io::{self, Write}; +use syntax::ast::NodeId; + +use rustc_data_structures::indexed_vec::Idx; + +/// Write a graphviz DOT graph of a list of MIRs. +pub fn write_mir_graphviz<'a, 'b, 'tcx, W, I>(tcx: TyCtxt<'b, 'tcx, 'tcx>, + iter: I, + w: &mut W) + -> io::Result<()> + where W: Write, I: Iterator +{ + for def_id in iter { + let nodeid = tcx.map.as_local_node_id(def_id).unwrap(); + let mir = &tcx.item_mir(def_id); + + writeln!(w, "digraph Mir_{} {{", nodeid)?; + + // Global graph properties + writeln!(w, r#" graph [fontname="monospace"];"#)?; + writeln!(w, r#" node [fontname="monospace"];"#)?; + writeln!(w, r#" edge [fontname="monospace"];"#)?; + + // Graph label + write_graph_label(tcx, nodeid, mir, w)?; + + // Nodes + for (block, _) in mir.basic_blocks().iter_enumerated() { + write_node(block, mir, w)?; + } -/// Write a graphviz DOT graph for the given MIR. -pub fn write_mir_graphviz(mir: &Mir, w: &mut W) -> io::Result<()> { - try!(writeln!(w, "digraph Mir {{")); - - // Global graph properties - try!(writeln!(w, r#" graph [fontname="monospace"];"#)); - try!(writeln!(w, r#" node [fontname="monospace"];"#)); - try!(writeln!(w, r#" edge [fontname="monospace"];"#)); - - // Graph label - try!(write_graph_label(mir, w)); - - // Nodes - for block in mir.all_basic_blocks() { - try!(write_node(block, mir, w)); - } - - // Edges - for source in mir.all_basic_blocks() { - try!(write_edges(source, mir, w)); + // Edges + for (source, _) in mir.basic_blocks().iter_enumerated() { + write_edges(source, mir, w)?; + } + writeln!(w, "}}")? } - - writeln!(w, "}}") + Ok(()) } -/// Write a graphviz DOT node for the given basic block. -fn write_node(block: BasicBlock, mir: &Mir, w: &mut W) -> io::Result<()> { - let data = mir.basic_block_data(block); - - // Start a new node with the label to follow, in one of DOT's pseudo-HTML tables. - try!(write!(w, r#" {} [shape="none", label=<"#, node(block))); - try!(write!(w, r#""#)); +/// Write a graphviz HTML-styled label for the given basic block, with +/// all necessary escaping already performed. (This is suitable for +/// emitting directly, as is done in this module, or for use with the +/// LabelText::HtmlStr from libgraphviz.) +/// +/// `init` and `fini` are callbacks for emitting additional rows of +/// data (using HTML enclosed with `` in the emitted text). +pub fn write_node_label(block: BasicBlock, + mir: &Mir, + w: &mut W, + num_cols: u32, + init: INIT, + fini: FINI) -> io::Result<()> + where INIT: Fn(&mut W) -> io::Result<()>, + FINI: Fn(&mut W) -> io::Result<()> +{ + let data = &mir[block]; + + write!(w, r#"
"#)?; // Basic block number at the top. - try!(write!(w, r#""#, block.index())); + write!(w, r#""#, + attrs=r#"bgcolor="gray" align="center""#, + colspan=num_cols, + blk=block.index())?; + + init(w)?; // List of statements in the middle. if !data.statements.is_empty() { - try!(write!(w, r#"")); + write!(w, "")?; } // Terminator head at the bottom, not including the list of successor blocks. Those will be // displayed as labels on the edges between blocks. let mut terminator_head = String::new(); - data.terminator().fmt_head(&mut terminator_head).unwrap(); - try!(write!(w, r#""#, dot::escape_html(&terminator_head))); + data.terminator().kind.fmt_head(&mut terminator_head).unwrap(); + write!(w, r#""#, dot::escape_html(&terminator_head))?; + + fini(w)?; - // Close the table, node label, and the node itself. - writeln!(w, "
{}
{blk}
"#)); + write!(w, r#"
"#)?; for statement in &data.statements { - try!(write!(w, "{}
", escape(statement))); + write!(w, "{}
", escape(statement))?; } - try!(write!(w, "
{}
{}
>];") + // Close the table + writeln!(w, "") +} + +/// Write a graphviz DOT node for the given basic block. +fn write_node(block: BasicBlock, mir: &Mir, w: &mut W) -> io::Result<()> { + // Start a new node with the label to follow, in one of DOT's pseudo-HTML tables. + write!(w, r#" {} [shape="none", label=<"#, node(block))?; + write_node_label(block, mir, w, 1, |_| Ok(()), |_| Ok(()))?; + // Close the node label and the node itself. + writeln!(w, ">];") } /// Write graphviz DOT edges with labels between the given basic block and all of its successors. fn write_edges(source: BasicBlock, mir: &Mir, w: &mut W) -> io::Result<()> { - let terminator = &mir.basic_block_data(source).terminator(); - let labels = terminator.fmt_successor_labels(); + let terminator = mir[source].terminator(); + let labels = terminator.kind.fmt_successor_labels(); for (&target, label) in terminator.successors().iter().zip(labels) { - try!(writeln!(w, r#" {} -> {} [label="{}"];"#, node(source), node(target), label)); + writeln!(w, r#" {} -> {} [label="{}"];"#, node(source), node(target), label)?; } Ok(()) @@ -84,41 +126,39 @@ fn write_edges(source: BasicBlock, mir: &Mir, w: &mut W) -> io::Result /// Write the graphviz DOT label for the overall graph. This is essentially a block of text that /// will appear below the graph, showing the type of the `fn` this MIR represents and the types of /// all the variables and temporaries. -fn write_graph_label(mir: &Mir, w: &mut W) -> io::Result<()> { - try!(write!(w, " label=(tcx: TyCtxt<'a, 'tcx, 'tcx>, + nid: NodeId, + mir: &Mir, + w: &mut W) + -> io::Result<()> { + write!(w, " label= 0 { - try!(write!(w, ", ")); + write!(w, ", ")?; } - try!(write!(w, "{:?}: {}", Lvalue::Arg(i as u32), escape(&arg.ty))); + write!(w, "{:?}: {}", Lvalue::Local(arg), escape(&mir.local_decls[arg].ty))?; } - try!(write!(w, ") -> ")); - - // fn return type. - match mir.return_ty { - ty::FnOutput::FnConverging(ty) => try!(write!(w, "{}", escape(ty))), - ty::FnOutput::FnDiverging => try!(write!(w, "!")), - } + write!(w, ") -> {}", escape(mir.return_ty))?; + write!(w, r#"
"#)?; - try!(write!(w, r#"
"#)); + for local in mir.vars_and_temps_iter() { + let decl = &mir.local_decls[local]; - // User variable types (including the user's name in a comment). - for (i, var) in mir.var_decls.iter().enumerate() { - try!(write!(w, "let ")); - if var.mutability == Mutability::Mut { - try!(write!(w, "mut ")); + write!(w, "let ")?; + if decl.mutability == Mutability::Mut { + write!(w, "mut ")?; } - try!(write!(w, r#"{:?}: {}; // {}
"#, - Lvalue::Var(i as u32), escape(&var.ty), var.name)); - } - // Compiler-introduced temporary types. - for (i, temp) in mir.temp_decls.iter().enumerate() { - try!(write!(w, r#"let mut {:?}: {};
"#, - Lvalue::Temp(i as u32), escape(&temp.ty))); + if let Some(name) = decl.name { + write!(w, r#"{:?}: {}; // {}
"#, + Lvalue::Local(local), escape(&decl.ty), name)?; + } else { + write!(w, r#"let mut {:?}: {};
"#, + Lvalue::Local(local), escape(&decl.ty))?; + } } writeln!(w, ">;") diff --git a/src/librustc_mir/hair/cx/block.rs b/src/librustc_mir/hair/cx/block.rs index 49617ed5171bd..cb69de2cb3cac 100644 --- a/src/librustc_mir/hair/cx/block.rs +++ b/src/librustc_mir/hair/cx/block.rs @@ -12,71 +12,61 @@ use hair::*; use hair::cx::Cx; use hair::cx::to_ref::ToRef; use rustc::middle::region::{BlockRemainder, CodeExtentData}; -use rustc_front::hir; +use rustc::hir; use syntax::ast; impl<'tcx> Mirror<'tcx> for &'tcx hir::Block { type Output = Block<'tcx>; - fn make_mirror<'a>(self, cx: &mut Cx<'a, 'tcx>) -> Block<'tcx> { + fn make_mirror<'a, 'gcx>(self, cx: &mut Cx<'a, 'gcx, 'tcx>) -> Block<'tcx> { // We have to eagerly translate the "spine" of the statements // in order to get the lexical scoping correctly. - let stmts = mirror_stmts(cx, self.id, self.stmts.iter().enumerate()); + let stmts = mirror_stmts(cx, self.id, &*self.stmts); Block { extent: cx.tcx.region_maps.node_extent(self.id), span: self.span, stmts: stmts, - expr: self.expr.to_ref(), + expr: self.expr.to_ref() } } } -fn mirror_stmts<'a,'tcx:'a,STMTS>(cx: &mut Cx<'a,'tcx>, - block_id: ast::NodeId, - mut stmts: STMTS) - -> Vec> - where STMTS: Iterator +fn mirror_stmts<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, + block_id: ast::NodeId, + stmts: &'tcx [hir::Stmt]) + -> Vec> { let mut result = vec![]; - while let Some((index, stmt)) = stmts.next() { + for (index, stmt) in stmts.iter().enumerate() { match stmt.node { hir::StmtExpr(ref expr, id) | hir::StmtSemi(ref expr, id) => - result.push( - StmtRef::Mirror( - Box::new(Stmt { span: stmt.span, - kind: StmtKind::Expr { - scope: cx.tcx.region_maps.node_extent(id), - expr: expr.to_ref() } }))), - - hir::StmtDecl(ref decl, id) => { - match decl.node { - hir::DeclItem(..) => { /* ignore for purposes of the MIR */ } - hir::DeclLocal(ref local) => { - let remainder_extent = CodeExtentData::Remainder(BlockRemainder { - block: block_id, - first_statement_index: index as u32, - }); - let remainder_extent = - cx.tcx.region_maps.lookup_code_extent(remainder_extent); - - // pull in all following statements, since - // they are within the scope of this let: - let following_stmts = mirror_stmts(cx, block_id, stmts); - - let pattern = cx.irrefutable_pat(&local.pat); - result.push(StmtRef::Mirror(Box::new(Stmt { - span: stmt.span, - kind: StmtKind::Let { - remainder_scope: remainder_extent, - init_scope: cx.tcx.region_maps.node_extent(id), - pattern: pattern, - initializer: local.init.to_ref(), - stmts: following_stmts, - }, - }))); - - return result; + result.push(StmtRef::Mirror(Box::new(Stmt { + span: stmt.span, + kind: StmtKind::Expr { + scope: cx.tcx.region_maps.node_extent(id), + expr: expr.to_ref() } + }))), + hir::StmtDecl(ref decl, id) => match decl.node { + hir::DeclItem(..) => { /* ignore for purposes of the MIR */ } + hir::DeclLocal(ref local) => { + let remainder_extent = CodeExtentData::Remainder(BlockRemainder { + block: block_id, + first_statement_index: index as u32, + }); + let remainder_extent = + cx.tcx.region_maps.lookup_code_extent(remainder_extent); + + let pattern = Pattern::from_hir(cx.tcx, &local.pat); + result.push(StmtRef::Mirror(Box::new(Stmt { + span: stmt.span, + kind: StmtKind::Let { + remainder_scope: remainder_extent, + init_scope: cx.tcx.region_maps.node_extent(id), + pattern: pattern, + initializer: local.init.to_ref(), + }, + }))); } } } @@ -84,8 +74,10 @@ fn mirror_stmts<'a,'tcx:'a,STMTS>(cx: &mut Cx<'a,'tcx>, return result; } -pub fn to_expr_ref<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, block: &'tcx hir::Block) -> ExprRef<'tcx> { - let block_ty = cx.tcx.node_id_to_type(block.id); +pub fn to_expr_ref<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, + block: &'tcx hir::Block) + -> ExprRef<'tcx> { + let block_ty = cx.tcx.tables().node_id_to_type(block.id); let temp_lifetime = cx.tcx.region_maps.temporary_scope(block.id); let expr = Expr { ty: block_ty, diff --git a/src/librustc_mir/hair/cx/expr.rs b/src/librustc_mir/hair/cx/expr.rs index 8090fca66bb62..e850f6c4b045c 100644 --- a/src/librustc_mir/hair/cx/expr.rs +++ b/src/librustc_mir/hair/cx/expr.rs @@ -9,396 +9,39 @@ // except according to those terms. use hair::*; -use rustc_data_structures::fnv::FnvHashMap; +use rustc_data_structures::indexed_vec::Idx; +use rustc_const_math::ConstInt; use hair::cx::Cx; use hair::cx::block; use hair::cx::to_ref::ToRef; -use rustc::front::map; -use rustc::middle::def; -use rustc::middle::region::CodeExtent; -use rustc::middle::pat_util; -use rustc::middle::ty::{self, VariantDef, Ty}; -use rustc::mir::repr::*; -use rustc_front::hir; -use rustc_front::util as hir_util; -use syntax::parse::token; +use rustc::hir::map; +use rustc::hir::def::{Def, CtorKind}; +use rustc::middle::const_val::ConstVal; +use rustc_const_eval as const_eval; +use rustc::ty::{self, AdtKind, VariantDef, Ty}; +use rustc::ty::cast::CastKind as TyCastKind; +use rustc::hir; use syntax::ptr::P; impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { type Output = Expr<'tcx>; - fn make_mirror<'a>(self, cx: &mut Cx<'a, 'tcx>) -> Expr<'tcx> { - debug!("Expr::make_mirror(): id={}, span={:?}", self.id, self.span); - - let expr_ty = cx.tcx.expr_ty(self); // note: no adjustments (yet)! - - let kind = match self.node { - // Here comes the interesting stuff: - hir::ExprMethodCall(_, _, ref args) => { - // Rewrite a.b(c) into UFCS form like Trait::b(a, c) - let expr = method_callee(cx, self, ty::MethodCall::expr(self.id)); - let args = args.iter() - .map(|e| e.to_ref()) - .collect(); - ExprKind::Call { - ty: expr.ty, - fun: expr.to_ref(), - args: args, - } - } - - hir::ExprCall(ref fun, ref args) => { - if cx.tcx.is_method_call(self.id) { - // The callee is something implementing Fn, FnMut, or FnOnce. - // Find the actual method implementation being called and - // build the appropriate UFCS call expression with the - // callee-object as self parameter. - let method = method_callee(cx, self, ty::MethodCall::expr(self.id)); - let mut argrefs = vec![fun.to_ref()]; - argrefs.extend(args.iter().map(|a| a.to_ref())); - - ExprKind::Call { - ty: method.ty, - fun: method.to_ref(), - args: argrefs, - } - } else { - let adt_data = if let hir::ExprPath(..) = fun.node { - // Tuple-like ADTs are represented as ExprCall. We convert them here. - expr_ty.ty_adt_def().and_then(|adt_def|{ - match cx.tcx.def_map.borrow()[&fun.id].full_def() { - def::DefVariant(_, variant_id, false) => { - Some((adt_def, adt_def.variant_index_with_id(variant_id))) - }, - def::DefStruct(_) => { - Some((adt_def, 0)) - }, - _ => None - } - }) - } else { None }; - if let Some((adt_def, index)) = adt_data { - let substs = cx.tcx.mk_substs(cx.tcx.node_id_item_substs(fun.id).substs); - let field_refs = args.iter().enumerate().map(|(idx, e)| FieldExprRef { - name: Field::new(idx), - expr: e.to_ref() - }).collect(); - ExprKind::Adt { - adt_def: adt_def, - substs: substs, - variant_index: index, - fields: field_refs, - base: None - } - } else { - ExprKind::Call { - ty: cx.tcx.node_id_to_type(fun.id), - fun: fun.to_ref(), - args: args.to_ref(), - } - } - } - } - - hir::ExprAddrOf(mutbl, ref expr) => { - let region = match expr_ty.sty { - ty::TyRef(r, _) => r, - _ => cx.tcx.sess.span_bug(expr.span, "type of & not region"), - }; - ExprKind::Borrow { - region: *region, - borrow_kind: to_borrow_kind(mutbl), - arg: expr.to_ref(), - } - } - - hir::ExprBlock(ref blk) => { - ExprKind::Block { body: &**blk } - } - - hir::ExprAssign(ref lhs, ref rhs) => { - ExprKind::Assign { - lhs: lhs.to_ref(), - rhs: rhs.to_ref(), - } - } - - hir::ExprAssignOp(op, ref lhs, ref rhs) => { - let op = bin_op(op.node); - ExprKind::AssignOp { - op: op, - lhs: lhs.to_ref(), - rhs: rhs.to_ref(), - } - } - - hir::ExprLit(..) => ExprKind::Literal { - literal: cx.const_eval_literal(self) - }, - - hir::ExprBinary(op, ref lhs, ref rhs) => { - if cx.tcx.is_method_call(self.id) { - let pass_args = if hir_util::is_by_value_binop(op.node) { - PassArgs::ByValue - } else { - PassArgs::ByRef - }; - overloaded_operator(cx, self, ty::MethodCall::expr(self.id), - pass_args, lhs.to_ref(), vec![rhs]) - } else { - // FIXME overflow - match op.node { - hir::BinOp_::BiAnd => { - ExprKind::LogicalOp { - op: LogicalOp::And, - lhs: lhs.to_ref(), - rhs: rhs.to_ref(), - } - } - hir::BinOp_::BiOr => { - ExprKind::LogicalOp { - op: LogicalOp::Or, - lhs: lhs.to_ref(), - rhs: rhs.to_ref(), - } - } - _ => { - let op = bin_op(op.node); - ExprKind::Binary { - op: op, - lhs: lhs.to_ref(), - rhs: rhs.to_ref(), - } - } - } - } - } - - hir::ExprIndex(ref lhs, ref index) => { - if cx.tcx.is_method_call(self.id) { - overloaded_lvalue(cx, self, ty::MethodCall::expr(self.id), - PassArgs::ByValue, lhs.to_ref(), vec![index]) - } else { - ExprKind::Index { - lhs: lhs.to_ref(), - index: index.to_ref(), - } - } - } - - hir::ExprUnary(hir::UnOp::UnDeref, ref arg) => { - if cx.tcx.is_method_call(self.id) { - overloaded_lvalue(cx, self, ty::MethodCall::expr(self.id), - PassArgs::ByValue, arg.to_ref(), vec![]) - } else { - ExprKind::Deref { arg: arg.to_ref() } - } - } - - hir::ExprUnary(op, ref arg) => { - if cx.tcx.is_method_call(self.id) { - overloaded_operator(cx, self, ty::MethodCall::expr(self.id), - PassArgs::ByValue, arg.to_ref(), vec![]) - } else { - // FIXME overflow - let op = match op { - hir::UnOp::UnNot => UnOp::Not, - hir::UnOp::UnNeg => UnOp::Neg, - hir::UnOp::UnDeref => { - cx.tcx.sess.span_bug( - self.span, - "UnDeref should have been handled elsewhere"); - } - }; - ExprKind::Unary { - op: op, - arg: arg.to_ref(), - } - } - } - - hir::ExprStruct(_, ref fields, ref base) => { - match expr_ty.sty { - ty::TyStruct(adt, substs) => { - let field_refs = field_refs(&adt.variants[0], fields); - ExprKind::Adt { - adt_def: adt, - variant_index: 0, - substs: substs, - fields: field_refs, - base: base.to_ref(), - } - } - ty::TyEnum(adt, substs) => { - match cx.tcx.def_map.borrow()[&self.id].full_def() { - def::DefVariant(enum_id, variant_id, _) => { - debug_assert!(adt.did == enum_id); - let index = adt.variant_index_with_id(variant_id); - let field_refs = field_refs(&adt.variants[index], fields); - ExprKind::Adt { - adt_def: adt, - variant_index: index, - substs: substs, - fields: field_refs, - base: base.to_ref(), - } - } - ref def => { - cx.tcx.sess.span_bug( - self.span, - &format!("unexpected def: {:?}", def)); - } - } - } - _ => { - cx.tcx.sess.span_bug( - self.span, - &format!("unexpected type for struct literal: {:?}", expr_ty)); - } - } - } - - hir::ExprClosure(..) => { - let closure_ty = cx.tcx.expr_ty(self); - let (def_id, substs) = match closure_ty.sty { - ty::TyClosure(def_id, ref substs) => (def_id, substs), - _ => { - cx.tcx.sess.span_bug(self.span, - &format!("closure expr w/o closure type: {:?}", - closure_ty)); - } - }; - let upvars = cx.tcx.with_freevars(self.id, |freevars| { - freevars.iter() - .enumerate() - .map(|(i, fv)| capture_freevar(cx, self, fv, substs.upvar_tys[i])) - .collect() - }); - ExprKind::Closure { - closure_id: def_id, - substs: &**substs, - upvars: upvars, - } - } - - hir::ExprRange(ref start, ref end) => { - let range_ty = cx.tcx.expr_ty(self); - let (adt_def, substs) = match range_ty.sty { - ty::TyStruct(adt_def, substs) => (adt_def, substs), - _ => { - cx.tcx.sess.span_bug(self.span, "unexpanded ast"); - } - }; - - let field_expr_ref = |s: &'tcx P, name: &str| { - let name = token::intern(name); - let index = adt_def.variants[0].index_of_field_named(name).unwrap(); - FieldExprRef { name: Field::new(index), expr: s.to_ref() } - }; - - let start_field = start.as_ref() - .into_iter() - .map(|s| field_expr_ref(s, "start")); - - let end_field = end.as_ref() - .into_iter() - .map(|e| field_expr_ref(e, "end")); - - ExprKind::Adt { - adt_def: adt_def, - variant_index: 0, - substs: substs, - fields: start_field.chain(end_field).collect(), - base: None, - } - } - - hir::ExprPath(..) => { - convert_path_expr(cx, self) - } - - hir::ExprInlineAsm(ref asm) => { - ExprKind::InlineAsm { asm: asm } - } - - // Now comes the rote stuff: - - hir::ExprRepeat(ref v, ref c) => ExprKind::Repeat { - value: v.to_ref(), - count: Expr { - ty: cx.tcx.expr_ty(c), - temp_lifetime: None, - span: c.span, - kind: ExprKind::Literal { - literal: cx.const_eval_literal(c) - } - }.to_ref() - }, - hir::ExprRet(ref v) => - ExprKind::Return { value: v.to_ref() }, - hir::ExprBreak(label) => - ExprKind::Break { label: label.map(|_| loop_label(cx, self)) }, - hir::ExprAgain(label) => - ExprKind::Continue { label: label.map(|_| loop_label(cx, self)) }, - hir::ExprMatch(ref discr, ref arms, _) => - ExprKind::Match { discriminant: discr.to_ref(), - arms: arms.iter().map(|a| convert_arm(cx, a)).collect() }, - hir::ExprIf(ref cond, ref then, ref otherwise) => - ExprKind::If { condition: cond.to_ref(), - then: block::to_expr_ref(cx, then), - otherwise: otherwise.to_ref() }, - hir::ExprWhile(ref cond, ref body, _) => - ExprKind::Loop { condition: Some(cond.to_ref()), - body: block::to_expr_ref(cx, body) }, - hir::ExprLoop(ref body, _) => - ExprKind::Loop { condition: None, - body: block::to_expr_ref(cx, body) }, - hir::ExprField(ref source, name) => { - let index = match cx.tcx.expr_ty_adjusted(source).sty { - ty::TyStruct(adt_def, _) => - adt_def.variants[0].index_of_field_named(name.node), - ref ty => - cx.tcx.sess.span_bug( - self.span, - &format!("field of non-struct: {:?}", ty)), - }; - let index = index.unwrap_or_else(|| { - cx.tcx.sess.span_bug( - self.span, - &format!("no index found for field `{}`", name.node)); - }); - ExprKind::Field { lhs: source.to_ref(), name: Field::new(index) } - } - hir::ExprTupField(ref source, index) => - ExprKind::Field { lhs: source.to_ref(), - name: Field::new(index.node as usize) }, - hir::ExprCast(ref source, _) => - ExprKind::Cast { source: source.to_ref() }, - hir::ExprType(ref source, _) => - return source.make_mirror(cx), - hir::ExprBox(ref value) => - ExprKind::Box { value: value.to_ref() }, - hir::ExprVec(ref fields) => - ExprKind::Vec { fields: fields.to_ref() }, - hir::ExprTup(ref fields) => - ExprKind::Tuple { fields: fields.to_ref() }, - }; - + fn make_mirror<'a, 'gcx>(self, cx: &mut Cx<'a, 'gcx, 'tcx>) -> Expr<'tcx> { let temp_lifetime = cx.tcx.region_maps.temporary_scope(self.id); let expr_extent = cx.tcx.region_maps.node_extent(self.id); - let mut expr = Expr { - temp_lifetime: temp_lifetime, - ty: expr_ty, - span: self.span, - kind: kind, - }; + debug!("Expr::make_mirror(): id={}, span={:?}", self.id, self.span); + + let mut expr = make_mirror_unadjusted(cx, self); + let adj = cx.tcx.tables().adjustments.get(&self.id).cloned(); + + debug!("make_mirror: unadjusted-expr={:?} applying adjustments={:?}", + expr, adj); // Now apply adjustments, if any. - match cx.tcx.tables.borrow().adjustments.get(&self.id) { + match adj.map(|adj| (adj.kind, adj.target)) { None => {} - Some(&ty::adjustment::AdjustReifyFnPointer) => { - let adjusted_ty = cx.tcx.expr_ty_adjusted(self); + Some((ty::adjustment::Adjust::ReifyFnPointer, adjusted_ty)) => { expr = Expr { temp_lifetime: temp_lifetime, ty: adjusted_ty, @@ -406,8 +49,7 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { kind: ExprKind::ReifyFnPointer { source: expr.to_ref() }, }; } - Some(&ty::adjustment::AdjustUnsafeFnPointer) => { - let adjusted_ty = cx.tcx.expr_ty_adjusted(self); + Some((ty::adjustment::Adjust::UnsafeFnPointer, adjusted_ty)) => { expr = Expr { temp_lifetime: temp_lifetime, ty: adjusted_ty, @@ -415,8 +57,25 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { kind: ExprKind::UnsafeFnPointer { source: expr.to_ref() }, }; } - Some(&ty::adjustment::AdjustDerefRef(ref adj)) => { - for i in 0..adj.autoderefs { + Some((ty::adjustment::Adjust::NeverToAny, adjusted_ty)) => { + expr = Expr { + temp_lifetime: temp_lifetime, + ty: adjusted_ty, + span: self.span, + kind: ExprKind::NeverToAny { source: expr.to_ref() }, + }; + } + Some((ty::adjustment::Adjust::MutToConstPointer, adjusted_ty)) => { + expr = Expr { + temp_lifetime: temp_lifetime, + ty: adjusted_ty, + span: self.span, + kind: ExprKind::Cast { source: expr.to_ref() }, + }; + } + Some((ty::adjustment::Adjust::DerefRef { autoderefs, autoref, unsize }, + adjusted_ty)) => { + for i in 0..autoderefs { let i = i as u32; let adjusted_ty = expr.ty.adjust_for_autoderef( @@ -424,11 +83,38 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { self.id, self.span, i, - |mc| cx.tcx.tables.borrow().method_map.get(&mc).map(|m| m.ty)); - let kind = if cx.tcx.is_overloaded_autoderef(self.id, i) { - overloaded_lvalue(cx, self, ty::MethodCall::autoderef(self.id, i), - PassArgs::ByValue, expr.to_ref(), vec![]) + |mc| cx.tcx.tables().method_map.get(&mc).map(|m| m.ty)); + debug!("make_mirror: autoderef #{}, adjusted_ty={:?}", i, adjusted_ty); + let method_key = ty::MethodCall::autoderef(self.id, i); + let meth_ty = + cx.tcx.tables().method_map.get(&method_key).map(|m| m.ty); + let kind = if let Some(meth_ty) = meth_ty { + debug!("make_mirror: overloaded autoderef (meth_ty={:?})", meth_ty); + + let ref_ty = cx.tcx.no_late_bound_regions(&meth_ty.fn_ret()); + let (region, mutbl) = match ref_ty { + Some(&ty::TyS { + sty: ty::TyRef(region, mt), .. + }) => (region, mt.mutbl), + _ => span_bug!(expr.span, "autoderef returned bad type") + }; + + expr = Expr { + temp_lifetime: temp_lifetime, + ty: cx.tcx.mk_ref( + region, ty::TypeAndMut { ty: expr.ty, mutbl: mutbl }), + span: expr.span, + kind: ExprKind::Borrow { + region: region, + borrow_kind: to_borrow_kind(mutbl), + arg: expr.to_ref() + } + }; + + overloaded_lvalue(cx, self, method_key, + PassArgs::ByRef, expr.to_ref(), vec![]) } else { + debug!("make_mirror: built-in autoderef"); ExprKind::Deref { arg: expr.to_ref() } }; expr = Expr { @@ -439,22 +125,22 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { }; } - if let Some(autoref) = adj.autoref { + if let Some(autoref) = autoref { let adjusted_ty = expr.ty.adjust_for_autoref(cx.tcx, Some(autoref)); match autoref { - ty::adjustment::AutoPtr(r, m) => { + ty::adjustment::AutoBorrow::Ref(r, m) => { expr = Expr { temp_lifetime: temp_lifetime, ty: adjusted_ty, span: self.span, kind: ExprKind::Borrow { - region: *r, + region: r, borrow_kind: to_borrow_kind(m), arg: expr.to_ref(), }, }; } - ty::adjustment::AutoUnsafe(m) => { + ty::adjustment::AutoBorrow::RawPtr(m) => { // Convert this to a suitable `&foo` and // then an unsafe coercion. Limit the region to be just this // expression. @@ -465,7 +151,7 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { ty: cx.tcx.mk_ref(region, ty::TypeAndMut { ty: expr.ty, mutbl: m }), span: self.span, kind: ExprKind::Borrow { - region: *region, + region: region, borrow_kind: to_borrow_kind(m), arg: expr.to_ref(), }, @@ -480,10 +166,10 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { } } - if let Some(target) = adj.unsize { + if unsize { expr = Expr { temp_lifetime: temp_lifetime, - ty: target, + ty: adjusted_ty, span: self.span, kind: ExprKind::Unsize { source: expr.to_ref() }, }; @@ -520,12 +206,442 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { } } -fn method_callee<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, - expr: &hir::Expr, - method_call: ty::MethodCall) - -> Expr<'tcx> { - let tables = cx.tcx.tables.borrow(); - let callee = &tables.method_map[&method_call]; +fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, + expr: &'tcx hir::Expr) + -> Expr<'tcx> { + let expr_ty = cx.tcx.tables().expr_ty(expr); + let temp_lifetime = cx.tcx.region_maps.temporary_scope(expr.id); + + let kind = match expr.node { + // Here comes the interesting stuff: + hir::ExprMethodCall(.., ref args) => { + // Rewrite a.b(c) into UFCS form like Trait::b(a, c) + let expr = method_callee(cx, expr, ty::MethodCall::expr(expr.id)); + let args = args.iter() + .map(|e| e.to_ref()) + .collect(); + ExprKind::Call { + ty: expr.ty, + fun: expr.to_ref(), + args: args, + } + } + + hir::ExprCall(ref fun, ref args) => { + if cx.tcx.tables().is_method_call(expr.id) { + // The callee is something implementing Fn, FnMut, or FnOnce. + // Find the actual method implementation being called and + // build the appropriate UFCS call expression with the + // callee-object as expr parameter. + + // rewrite f(u, v) into FnOnce::call_once(f, (u, v)) + + let method = method_callee(cx, expr, ty::MethodCall::expr(expr.id)); + + let sig = match method.ty.sty { + ty::TyFnDef(.., fn_ty) => &fn_ty.sig, + _ => span_bug!(expr.span, "type of method is not an fn") + }; + + let sig = cx.tcx.no_late_bound_regions(sig).unwrap_or_else(|| { + span_bug!(expr.span, "method call has late-bound regions") + }); + + assert_eq!(sig.inputs.len(), 2); + + let tupled_args = Expr { + ty: sig.inputs[1], + temp_lifetime: temp_lifetime, + span: expr.span, + kind: ExprKind::Tuple { + fields: args.iter().map(ToRef::to_ref).collect() + } + }; + + ExprKind::Call { + ty: method.ty, + fun: method.to_ref(), + args: vec![fun.to_ref(), tupled_args.to_ref()] + } + } else { + let adt_data = if let hir::ExprPath(hir::QPath::Resolved(_, ref path)) = fun.node { + // Tuple-like ADTs are represented as ExprCall. We convert them here. + expr_ty.ty_adt_def().and_then(|adt_def|{ + match path.def { + Def::VariantCtor(variant_id, CtorKind::Fn) => { + Some((adt_def, adt_def.variant_index_with_id(variant_id))) + }, + Def::StructCtor(_, CtorKind::Fn) => { + Some((adt_def, 0)) + }, + _ => None + } + }) + } else { None }; + if let Some((adt_def, index)) = adt_data { + let substs = cx.tcx.tables().node_id_item_substs(fun.id) + .unwrap_or_else(|| cx.tcx.intern_substs(&[])); + let field_refs = args.iter().enumerate().map(|(idx, e)| FieldExprRef { + name: Field::new(idx), + expr: e.to_ref() + }).collect(); + ExprKind::Adt { + adt_def: adt_def, + substs: substs, + variant_index: index, + fields: field_refs, + base: None + } + } else { + ExprKind::Call { + ty: cx.tcx.tables().node_id_to_type(fun.id), + fun: fun.to_ref(), + args: args.to_ref(), + } + } + } + } + + hir::ExprAddrOf(mutbl, ref expr) => { + let region = match expr_ty.sty { + ty::TyRef(r, _) => r, + _ => span_bug!(expr.span, "type of & not region"), + }; + ExprKind::Borrow { + region: region, + borrow_kind: to_borrow_kind(mutbl), + arg: expr.to_ref(), + } + } + + hir::ExprBlock(ref blk) => { + ExprKind::Block { body: &blk } + } + + hir::ExprAssign(ref lhs, ref rhs) => { + ExprKind::Assign { + lhs: lhs.to_ref(), + rhs: rhs.to_ref(), + } + } + + hir::ExprAssignOp(op, ref lhs, ref rhs) => { + if cx.tcx.tables().is_method_call(expr.id) { + let pass_args = if op.node.is_by_value() { + PassArgs::ByValue + } else { + PassArgs::ByRef + }; + overloaded_operator(cx, expr, ty::MethodCall::expr(expr.id), + pass_args, lhs.to_ref(), vec![rhs]) + } else { + ExprKind::AssignOp { + op: bin_op(op.node), + lhs: lhs.to_ref(), + rhs: rhs.to_ref(), + } + } + } + + hir::ExprLit(..) => ExprKind::Literal { + literal: cx.const_eval_literal(expr) + }, + + hir::ExprBinary(op, ref lhs, ref rhs) => { + if cx.tcx.tables().is_method_call(expr.id) { + let pass_args = if op.node.is_by_value() { + PassArgs::ByValue + } else { + PassArgs::ByRef + }; + overloaded_operator(cx, expr, ty::MethodCall::expr(expr.id), + pass_args, lhs.to_ref(), vec![rhs]) + } else { + // FIXME overflow + match (op.node, cx.constness) { + // FIXME(eddyb) use logical ops in constants when + // they can handle that kind of control-flow. + (hir::BinOp_::BiAnd, hir::Constness::Const) => { + ExprKind::Binary { + op: BinOp::BitAnd, + lhs: lhs.to_ref(), + rhs: rhs.to_ref(), + } + } + (hir::BinOp_::BiOr, hir::Constness::Const) => { + ExprKind::Binary { + op: BinOp::BitOr, + lhs: lhs.to_ref(), + rhs: rhs.to_ref(), + } + } + + (hir::BinOp_::BiAnd, hir::Constness::NotConst) => { + ExprKind::LogicalOp { + op: LogicalOp::And, + lhs: lhs.to_ref(), + rhs: rhs.to_ref(), + } + } + (hir::BinOp_::BiOr, hir::Constness::NotConst) => { + ExprKind::LogicalOp { + op: LogicalOp::Or, + lhs: lhs.to_ref(), + rhs: rhs.to_ref(), + } + } + + _ => { + let op = bin_op(op.node); + ExprKind::Binary { + op: op, + lhs: lhs.to_ref(), + rhs: rhs.to_ref(), + } + } + } + } + } + + hir::ExprIndex(ref lhs, ref index) => { + if cx.tcx.tables().is_method_call(expr.id) { + overloaded_lvalue(cx, expr, ty::MethodCall::expr(expr.id), + PassArgs::ByValue, lhs.to_ref(), vec![index]) + } else { + ExprKind::Index { + lhs: lhs.to_ref(), + index: index.to_ref(), + } + } + } + + hir::ExprUnary(hir::UnOp::UnDeref, ref arg) => { + if cx.tcx.tables().is_method_call(expr.id) { + overloaded_lvalue(cx, expr, ty::MethodCall::expr(expr.id), + PassArgs::ByValue, arg.to_ref(), vec![]) + } else { + ExprKind::Deref { arg: arg.to_ref() } + } + } + + hir::ExprUnary(hir::UnOp::UnNot, ref arg) => { + if cx.tcx.tables().is_method_call(expr.id) { + overloaded_operator(cx, expr, ty::MethodCall::expr(expr.id), + PassArgs::ByValue, arg.to_ref(), vec![]) + } else { + ExprKind::Unary { + op: UnOp::Not, + arg: arg.to_ref(), + } + } + } + + hir::ExprUnary(hir::UnOp::UnNeg, ref arg) => { + if cx.tcx.tables().is_method_call(expr.id) { + overloaded_operator(cx, expr, ty::MethodCall::expr(expr.id), + PassArgs::ByValue, arg.to_ref(), vec![]) + } else { + // FIXME runtime-overflow + if let hir::ExprLit(_) = arg.node { + ExprKind::Literal { + literal: cx.const_eval_literal(expr), + } + } else { + ExprKind::Unary { + op: UnOp::Neg, + arg: arg.to_ref(), + } + } + } + } + + hir::ExprStruct(ref qpath, ref fields, ref base) => { + match expr_ty.sty { + ty::TyAdt(adt, substs) => match adt.adt_kind() { + AdtKind::Struct | AdtKind::Union => { + let field_refs = field_refs(&adt.variants[0], fields); + ExprKind::Adt { + adt_def: adt, + variant_index: 0, + substs: substs, + fields: field_refs, + base: base.as_ref().map(|base| { + FruInfo { + base: base.to_ref(), + field_types: + cx.tcx.tables().fru_field_types[&expr.id].clone() + } + }) + } + } + AdtKind::Enum => { + let def = match *qpath { + hir::QPath::Resolved(_, ref path) => path.def, + hir::QPath::TypeRelative(..) => Def::Err + }; + match def { + Def::Variant(variant_id) => { + assert!(base.is_none()); + + let index = adt.variant_index_with_id(variant_id); + let field_refs = field_refs(&adt.variants[index], fields); + ExprKind::Adt { + adt_def: adt, + variant_index: index, + substs: substs, + fields: field_refs, + base: None + } + } + _ => { + span_bug!( + expr.span, + "unexpected def: {:?}", + def); + } + } + } + }, + _ => { + span_bug!( + expr.span, + "unexpected type for struct literal: {:?}", + expr_ty); + } + } + } + + hir::ExprClosure(..) => { + let closure_ty = cx.tcx.tables().expr_ty(expr); + let (def_id, substs) = match closure_ty.sty { + ty::TyClosure(def_id, substs) => (def_id, substs), + _ => { + span_bug!(expr.span, + "closure expr w/o closure type: {:?}", + closure_ty); + } + }; + let upvars = cx.tcx.with_freevars(expr.id, |freevars| { + freevars.iter() + .zip(substs.upvar_tys(def_id, cx.tcx)) + .map(|(fv, ty)| capture_freevar(cx, expr, fv, ty)) + .collect() + }); + ExprKind::Closure { + closure_id: def_id, + substs: substs, + upvars: upvars, + } + } + + hir::ExprPath(ref qpath) => { + let def = cx.tcx.tables().qpath_def(qpath, expr.id); + convert_path_expr(cx, expr, def) + } + + hir::ExprInlineAsm(ref asm, ref outputs, ref inputs) => { + ExprKind::InlineAsm { + asm: asm, + outputs: outputs.to_ref(), + inputs: inputs.to_ref() + } + } + + // Now comes the rote stuff: + + hir::ExprRepeat(ref v, ref c) => ExprKind::Repeat { + value: v.to_ref(), + count: TypedConstVal { + ty: cx.tcx.tables().expr_ty(c), + span: c.span, + value: match const_eval::eval_const_expr(cx.tcx.global_tcx(), c) { + ConstVal::Integral(ConstInt::Usize(u)) => u, + other => bug!("constant evaluation of repeat count yielded {:?}", other), + }, + } + }, + hir::ExprRet(ref v) => + ExprKind::Return { value: v.to_ref() }, + hir::ExprBreak(label, ref value) => + ExprKind::Break { + label: label.map(|label| { + cx.tcx.region_maps.node_extent(label.loop_id) + }), + value: value.to_ref() + }, + hir::ExprAgain(label) => + ExprKind::Continue { + label: label.map(|label| { + cx.tcx.region_maps.node_extent(label.loop_id) + }) + }, + hir::ExprMatch(ref discr, ref arms, _) => + ExprKind::Match { discriminant: discr.to_ref(), + arms: arms.iter().map(|a| convert_arm(cx, a)).collect() }, + hir::ExprIf(ref cond, ref then, ref otherwise) => + ExprKind::If { condition: cond.to_ref(), + then: block::to_expr_ref(cx, then), + otherwise: otherwise.to_ref() }, + hir::ExprWhile(ref cond, ref body, _) => + ExprKind::Loop { condition: Some(cond.to_ref()), + body: block::to_expr_ref(cx, body) }, + hir::ExprLoop(ref body, _, _) => + ExprKind::Loop { condition: None, + body: block::to_expr_ref(cx, body) }, + hir::ExprField(ref source, name) => { + let index = match cx.tcx.tables().expr_ty_adjusted(source).sty { + ty::TyAdt(adt_def, _) => + adt_def.variants[0].index_of_field_named(name.node), + ref ty => + span_bug!(expr.span, "field of non-ADT: {:?}", ty), + }; + let index = index.unwrap_or_else(|| { + span_bug!( + expr.span, + "no index found for field `{}`", + name.node) + }); + ExprKind::Field { lhs: source.to_ref(), name: Field::new(index) } + } + hir::ExprTupField(ref source, index) => + ExprKind::Field { lhs: source.to_ref(), + name: Field::new(index.node as usize) }, + hir::ExprCast(ref source, _) => { + // Check to see if this cast is a "coercion cast", where the cast is actually done + // using a coercion (or is a no-op). + if let Some(&TyCastKind::CoercionCast) = cx.tcx.cast_kinds.borrow().get(&source.id) { + // Convert the lexpr to a vexpr. + ExprKind::Use { source: source.to_ref() } + } else { + ExprKind::Cast { source: source.to_ref() } + } + } + hir::ExprType(ref source, _) => + return source.make_mirror(cx), + hir::ExprBox(ref value) => + ExprKind::Box { + value: value.to_ref(), + value_extents: cx.tcx.region_maps.node_extent(value.id) + }, + hir::ExprArray(ref fields) => + ExprKind::Vec { fields: fields.to_ref() }, + hir::ExprTup(ref fields) => + ExprKind::Tuple { fields: fields.to_ref() }, + }; + + Expr { + temp_lifetime: temp_lifetime, + ty: expr_ty, + span: expr.span, + kind: kind, + } +} + +fn method_callee<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, + expr: &hir::Expr, + method_call: ty::MethodCall) + -> Expr<'tcx> { + let callee = cx.tcx.tables().method_map[&method_call]; let temp_lifetime = cx.tcx.region_maps.temporary_scope(expr.id); Expr { temp_lifetime: temp_lifetime, @@ -534,7 +650,6 @@ fn method_callee<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, kind: ExprKind::Literal { literal: Literal::Item { def_id: callee.def_id, - kind: ItemKind::Method, substs: callee.substs, }, }, @@ -548,125 +663,92 @@ fn to_borrow_kind(m: hir::Mutability) -> BorrowKind { } } -fn convert_arm<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, arm: &'tcx hir::Arm) -> Arm<'tcx> { - let mut map; - let opt_map = if arm.pats.len() == 1 { - None - } else { - map = FnvHashMap(); - pat_util::pat_bindings(&cx.tcx.def_map, &arm.pats[0], |_, p_id, _, path| { - map.insert(path.node, p_id); - }); - Some(&map) - }; - +fn convert_arm<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, + arm: &'tcx hir::Arm) -> Arm<'tcx> { Arm { - patterns: arm.pats.iter().map(|p| cx.refutable_pat(opt_map, p)).collect(), + patterns: arm.pats.iter().map(|p| Pattern::from_hir(cx.tcx, p)).collect(), guard: arm.guard.to_ref(), body: arm.body.to_ref(), } } -fn convert_path_expr<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, expr: &'tcx hir::Expr) -> ExprKind<'tcx> { - let substs = cx.tcx.mk_substs(cx.tcx.node_id_item_substs(expr.id).substs); - // Otherwise there may be def_map borrow conflicts - let def = cx.tcx.def_map.borrow()[&expr.id].full_def(); - let (def_id, kind) = match def { - // A regular function. - def::DefFn(def_id, _) => (def_id, ItemKind::Function), - def::DefMethod(def_id) => (def_id, ItemKind::Method), - def::DefStruct(def_id) => match cx.tcx.node_id_to_type(expr.id).sty { - // A tuple-struct constructor. Should only be reached if not called in the same - // expression. - ty::TyBareFn(..) => (def_id, ItemKind::Function), - // A unit struct which is used as a value. We return a completely different ExprKind - // here to account for this special case. - ty::TyStruct(adt_def, substs) => return ExprKind::Adt { - adt_def: adt_def, - variant_index: 0, - substs: substs, - fields: vec![], - base: None - }, - ref sty => panic!("unexpected sty: {:?}", sty) - }, - def::DefVariant(enum_id, variant_id, false) => match cx.tcx.node_id_to_type(expr.id).sty { - // A variant constructor. Should only be reached if not called in the same - // expression. - ty::TyBareFn(..) => (variant_id, ItemKind::Function), - // A unit variant, similar special case to the struct case above. - ty::TyEnum(adt_def, substs) => { - debug_assert!(adt_def.did == enum_id); - let index = adt_def.variant_index_with_id(variant_id); - return ExprKind::Adt { +fn convert_path_expr<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, + expr: &'tcx hir::Expr, + def: Def) + -> ExprKind<'tcx> { + let substs = cx.tcx.tables().node_id_item_substs(expr.id) + .unwrap_or_else(|| cx.tcx.intern_substs(&[])); + let def_id = match def { + // A regular function, constructor function or a constant. + Def::Fn(def_id) | Def::Method(def_id) | + Def::StructCtor(def_id, CtorKind::Fn) | + Def::VariantCtor(def_id, CtorKind::Fn) | + Def::Const(def_id) | Def::AssociatedConst(def_id) => def_id, + + Def::StructCtor(def_id, CtorKind::Const) | + Def::VariantCtor(def_id, CtorKind::Const) => { + match cx.tcx.tables().node_id_to_type(expr.id).sty { + // A unit struct/variant which is used as a value. + // We return a completely different ExprKind here to account for this special case. + ty::TyAdt(adt_def, substs) => return ExprKind::Adt { adt_def: adt_def, + variant_index: adt_def.variant_index_with_id(def_id), substs: substs, - variant_index: index, fields: vec![], - base: None - }; - }, - ref sty => panic!("unexpected sty: {:?}", sty) - }, - def::DefConst(def_id) | - def::DefAssociatedConst(def_id) => { - if let Some(v) = cx.try_const_eval_literal(expr) { - return ExprKind::Literal { literal: v }; - } else { - (def_id, ItemKind::Constant) + base: None, + }, + ref sty => bug!("unexpected sty: {:?}", sty) } } - def::DefStatic(node_id, _) => return ExprKind::StaticRef { + Def::Static(node_id, _) => return ExprKind::StaticRef { id: node_id, }, - def @ def::DefLocal(..) | - def @ def::DefUpvar(..) => return convert_var(cx, expr, def), + Def::Local(..) | Def::Upvar(..) => return convert_var(cx, expr, def), - def => - cx.tcx.sess.span_bug( - expr.span, - &format!("def `{:?}` not yet implemented", def)), + _ => span_bug!(expr.span, "def `{:?}` not yet implemented", def), }; ExprKind::Literal { - literal: Literal::Item { def_id: def_id, kind: kind, substs: substs } + literal: Literal::Item { def_id: def_id, substs: substs } } } -fn convert_var<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, - expr: &'tcx hir::Expr, - def: def::Def) - -> ExprKind<'tcx> { +fn convert_var<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, + expr: &'tcx hir::Expr, + def: Def) + -> ExprKind<'tcx> { let temp_lifetime = cx.tcx.region_maps.temporary_scope(expr.id); match def { - def::DefLocal(_, node_id) => { + Def::Local(def_id) => { + let node_id = cx.tcx.map.as_local_node_id(def_id).unwrap(); ExprKind::VarRef { id: node_id, } } - def::DefUpvar(_, id_var, index, closure_expr_id) => { + Def::Upvar(def_id, index, closure_expr_id) => { + let id_var = cx.tcx.map.as_local_node_id(def_id).unwrap(); debug!("convert_var(upvar({:?}, {:?}, {:?}))", id_var, index, closure_expr_id); - let var_ty = cx.tcx.node_id_to_type(id_var); + let var_ty = cx.tcx.tables().node_id_to_type(id_var); let body_id = match cx.tcx.map.find(closure_expr_id) { Some(map::NodeExpr(expr)) => { match expr.node { - hir::ExprClosure(_, _, ref body) => body.id, + hir::ExprClosure(.., body_id, _) => body_id.node_id(), _ => { - cx.tcx.sess.span_bug(expr.span, "closure expr is not a closure expr"); + span_bug!(expr.span, "closure expr is not a closure expr"); } } } _ => { - cx.tcx.sess.span_bug(expr.span, "ast-map has garbage for closure expr"); + span_bug!(expr.span, "ast-map has garbage for closure expr"); } }; // FIXME free regions in closures are not right - let closure_ty = cx.tcx.node_id_to_type(closure_expr_id); + let closure_ty = cx.tcx.tables().node_id_to_type(closure_expr_id); // FIXME we're just hard-coding the idea that the // signature will be &self or &mut self and hence will @@ -678,7 +760,7 @@ fn convert_var<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, let region = cx.tcx.mk_region(region); let self_expr = match cx.tcx.closure_kind(cx.tcx.map.local_def_id(closure_expr_id)) { - ty::ClosureKind::FnClosureKind => { + ty::ClosureKind::Fn => { let ref_closure_ty = cx.tcx.mk_ref(region, ty::TypeAndMut { ty: closure_ty, @@ -697,7 +779,7 @@ fn convert_var<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, } } } - ty::ClosureKind::FnMutClosureKind => { + ty::ClosureKind::FnMut => { let ref_closure_ty = cx.tcx.mk_ref(region, ty::TypeAndMut { ty: closure_ty, @@ -716,7 +798,7 @@ fn convert_var<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, } } } - ty::ClosureKind::FnOnceClosureKind => { + ty::ClosureKind::FnOnce => { Expr { ty: closure_ty, temp_lifetime: temp_lifetime, @@ -738,21 +820,26 @@ fn convert_var<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, var_id: id_var, closure_expr_id: closure_expr_id, }; - let upvar_capture = match cx.tcx.upvar_capture(upvar_id) { + let upvar_capture = match cx.tcx.tables().upvar_capture(upvar_id) { Some(c) => c, None => { - cx.tcx.sess.span_bug( + span_bug!( expr.span, - &format!("no upvar_capture for {:?}", upvar_id)); + "no upvar_capture for {:?}", + upvar_id); } }; match upvar_capture { ty::UpvarCapture::ByValue => field_kind, - ty::UpvarCapture::ByRef(_) => { + ty::UpvarCapture::ByRef(borrow) => { ExprKind::Deref { arg: Expr { temp_lifetime: temp_lifetime, - ty: var_ty, + ty: cx.tcx.mk_ref(borrow.region, + ty::TypeAndMut { + ty: var_ty, + mutbl: borrow.kind.to_mutbl_lossy() + }), span: expr.span, kind: field_kind, }.to_ref() @@ -761,7 +848,7 @@ fn convert_var<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, } } - _ => cx.tcx.sess.span_bug(expr.span, "type of & not region"), + _ => span_bug!(expr.span, "type of & not region"), } } @@ -784,7 +871,7 @@ fn bin_op(op: hir::BinOp_) -> BinOp { hir::BinOp_::BiNe => BinOp::Ne, hir::BinOp_::BiGe => BinOp::Ge, hir::BinOp_::BiGt => BinOp::Gt, - _ => panic!("no equivalent for ast binop {:?}", op), + _ => bug!("no equivalent for ast binop {:?}", op), } } @@ -793,13 +880,13 @@ enum PassArgs { ByRef, } -fn overloaded_operator<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, - expr: &'tcx hir::Expr, - method_call: ty::MethodCall, - pass_args: PassArgs, - receiver: ExprRef<'tcx>, - args: Vec<&'tcx P>) - -> ExprKind<'tcx> { +fn overloaded_operator<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, + expr: &'tcx hir::Expr, + method_call: ty::MethodCall, + pass_args: PassArgs, + receiver: ExprRef<'tcx>, + args: Vec<&'tcx P>) + -> ExprKind<'tcx> { // the receiver has all the adjustments that are needed, so we can // just push a reference to it let mut argrefs = vec![receiver]; @@ -812,13 +899,12 @@ fn overloaded_operator<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, } PassArgs::ByRef => { - let scope = cx.tcx.region_maps.node_extent(expr.id); - let region = cx.tcx.mk_region(ty::ReScope(scope)); + let region = cx.tcx.node_scope_region(expr.id); let temp_lifetime = cx.tcx.region_maps.temporary_scope(expr.id); argrefs.extend( args.iter() .map(|arg| { - let arg_ty = cx.tcx.expr_ty_adjusted(arg); + let arg_ty = cx.tcx.tables().expr_ty_adjusted(arg); let adjusted_ty = cx.tcx.mk_ref(region, ty::TypeAndMut { ty: arg_ty, @@ -827,7 +913,7 @@ fn overloaded_operator<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, temp_lifetime: temp_lifetime, ty: adjusted_ty, span: expr.span, - kind: ExprKind::Borrow { region: *region, + kind: ExprKind::Borrow { region: region, borrow_kind: BorrowKind::Shared, arg: arg.to_ref() } }.to_ref() @@ -844,25 +930,21 @@ fn overloaded_operator<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, } } -fn overloaded_lvalue<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, - expr: &'tcx hir::Expr, - method_call: ty::MethodCall, - pass_args: PassArgs, - receiver: ExprRef<'tcx>, - args: Vec<&'tcx P>) - -> ExprKind<'tcx> { +fn overloaded_lvalue<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, + expr: &'tcx hir::Expr, + method_call: ty::MethodCall, + pass_args: PassArgs, + receiver: ExprRef<'tcx>, + args: Vec<&'tcx P>) + -> ExprKind<'tcx> { // For an overloaded *x or x[y] expression of type T, the method // call returns an &T and we must add the deref so that the types // line up (this is because `*x` and `x[y]` represent lvalues): // to find the type &T of the content returned by the method; - let tables = cx.tcx.tables.borrow(); - let callee = &tables.method_map[&method_call]; - let ref_ty = callee.ty.fn_ret(); - let ref_ty = cx.tcx.no_late_bound_regions(&ref_ty).unwrap().unwrap(); - // 1~~~~~ 2~~~~~ - // (1) callees always have all late-bound regions fully instantiated, - // (2) overloaded methods don't return `!` + let ref_ty = cx.tcx.tables().method_map[&method_call].ty.fn_ret(); + let ref_ty = cx.tcx.no_late_bound_regions(&ref_ty).unwrap(); + // callees always have all late-bound regions fully instantiated, // construct the complete expression `foo()` for the overloaded call, // which will yield the &T type @@ -879,19 +961,19 @@ fn overloaded_lvalue<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, ExprKind::Deref { arg: ref_expr.to_ref() } } -fn capture_freevar<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, - closure_expr: &'tcx hir::Expr, - freevar: &ty::Freevar, - freevar_ty: Ty<'tcx>) - -> ExprRef<'tcx> { - let id_var = freevar.def.var_id(); +fn capture_freevar<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, + closure_expr: &'tcx hir::Expr, + freevar: &hir::Freevar, + freevar_ty: Ty<'tcx>) + -> ExprRef<'tcx> { + let id_var = cx.tcx.map.as_local_node_id(freevar.def.def_id()).unwrap(); let upvar_id = ty::UpvarId { var_id: id_var, closure_expr_id: closure_expr.id, }; - let upvar_capture = cx.tcx.upvar_capture(upvar_id).unwrap(); + let upvar_capture = cx.tcx.tables().upvar_capture(upvar_id).unwrap(); let temp_lifetime = cx.tcx.region_maps.temporary_scope(closure_expr.id); - let var_ty = cx.tcx.node_id_to_type(id_var); + let var_ty = cx.tcx.tables().node_id_to_type(id_var); let captured_var = Expr { temp_lifetime: temp_lifetime, ty: var_ty, @@ -920,17 +1002,8 @@ fn capture_freevar<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, } } -fn loop_label<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, expr: &'tcx hir::Expr) -> CodeExtent { - match cx.tcx.def_map.borrow().get(&expr.id).map(|d| d.full_def()) { - Some(def::DefLabel(loop_id)) => cx.tcx.region_maps.node_extent(loop_id), - d => { - cx.tcx.sess.span_bug(expr.span, &format!("loop scope resolved to {:?}", d)); - } - } -} - /// Converts a list of named fields (i.e. for struct-like struct/enum ADTs) into FieldExprRef. -fn field_refs<'tcx>(variant: VariantDef<'tcx>, +fn field_refs<'tcx>(variant: &'tcx VariantDef, fields: &'tcx [hir::Field]) -> Vec> { diff --git a/src/librustc_mir/hair/cx/mod.rs b/src/librustc_mir/hair/cx/mod.rs index b49dc6d896242..e7a6b40c830bd 100644 --- a/src/librustc_mir/hair/cx/mod.rs +++ b/src/librustc_mir/hair/cx/mod.rs @@ -16,31 +16,81 @@ */ use hair::*; -use rustc::mir::repr::*; - -use rustc::middle::const_eval::{self, ConstVal}; -use rustc::middle::infer::InferCtxt; -use rustc::middle::ty::{self, Ty}; -use syntax::codemap::Span; -use syntax::parse::token; -use rustc_front::hir; +use rustc::mir::transform::MirSource; + +use rustc::middle::const_val::ConstVal; +use rustc_const_eval as const_eval; +use rustc_data_structures::indexed_vec::Idx; +use rustc::dep_graph::DepNode; +use rustc::hir::def_id::DefId; +use rustc::hir::map::blocks::FnLikeNode; +use rustc::infer::InferCtxt; +use rustc::ty::subst::Subst; +use rustc::ty::{self, Ty, TyCtxt}; +use syntax::symbol::{Symbol, InternedString}; +use rustc::hir; +use rustc_const_math::{ConstInt, ConstUsize}; #[derive(Copy, Clone)] -pub struct Cx<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, - infcx: &'a InferCtxt<'a, 'tcx>, +pub struct Cx<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + tcx: TyCtxt<'a, 'gcx, 'tcx>, + infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, + constness: hir::Constness, + + /// True if this constant/function needs overflow checks. + check_overflow: bool } -impl<'a,'tcx> Cx<'a,'tcx> { - pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> Cx<'a, 'tcx> { +impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { + pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, + src: MirSource) + -> Cx<'a, 'gcx, 'tcx> { + let constness = match src { + MirSource::Const(_) | + MirSource::Static(..) => hir::Constness::Const, + MirSource::Fn(id) => { + let fn_like = FnLikeNode::from_node(infcx.tcx.map.get(id)); + fn_like.map_or(hir::Constness::NotConst, |f| f.constness()) + } + MirSource::Promoted(..) => bug!() + }; + + let src_node_id = src.item_id(); + + // We are going to be accessing various tables + // generated by TypeckItemBody; we also assume + // that the body passes type check. These tables + // are not individually tracked, so just register + // a read here. + let src_def_id = infcx.tcx.map.local_def_id(src_node_id); + infcx.tcx.dep_graph.read(DepNode::TypeckItemBody(src_def_id)); + + let attrs = infcx.tcx.map.attrs(src_node_id); + + // Some functions always have overflow checks enabled, + // however, they may not get codegen'd, depending on + // the settings for the crate they are translated in. + let mut check_overflow = attrs.iter().any(|item| { + item.check_name("rustc_inherit_overflow_checks") + }); + + // Respect -Z force-overflow-checks=on and -C debug-assertions. + check_overflow |= infcx.tcx.sess.opts.debugging_opts.force_overflow_checks + .unwrap_or(infcx.tcx.sess.opts.debug_assertions); + + // Constants and const fn's always need overflow checks. + check_overflow |= constness == hir::Constness::Const; + Cx { tcx: infcx.tcx, infcx: infcx, + constness: constness, + check_overflow: check_overflow } } } -impl<'a,'tcx:'a> Cx<'a, 'tcx> { +impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { /// Normalizes `ast` into the appropriate `mirror` type. pub fn mirror>(&mut self, ast: M) -> M::Output { ast.make_mirror(self) @@ -50,15 +100,22 @@ impl<'a,'tcx:'a> Cx<'a, 'tcx> { self.tcx.types.usize } - pub fn usize_literal(&mut self, value: usize) -> Literal<'tcx> { - Literal::Value { value: ConstVal::Uint(value as u64) } + pub fn usize_literal(&mut self, value: u64) -> Literal<'tcx> { + match ConstUsize::new(value, self.tcx.sess.target.uint_type) { + Ok(val) => Literal::Value { value: ConstVal::Integral(ConstInt::Usize(val))}, + Err(_) => bug!("usize literal out of range for target"), + } } pub fn bool_ty(&mut self) -> Ty<'tcx> { self.tcx.types.bool } - pub fn str_literal(&mut self, value: token::InternedString) -> Literal<'tcx> { + pub fn unit_ty(&mut self) -> Ty<'tcx> { + self.tcx.mk_nil() + } + + pub fn str_literal(&mut self, value: InternedString) -> Literal<'tcx> { Literal::Value { value: ConstVal::Str(value) } } @@ -71,40 +128,60 @@ impl<'a,'tcx:'a> Cx<'a, 'tcx> { } pub fn const_eval_literal(&mut self, e: &hir::Expr) -> Literal<'tcx> { - Literal::Value { value: const_eval::eval_const_expr(self.tcx, e) } + Literal::Value { + value: const_eval::eval_const_expr(self.tcx.global_tcx(), e) + } } - pub fn try_const_eval_literal(&mut self, e: &hir::Expr) -> Option> { - let hint = const_eval::EvalHint::ExprTypeChecked; - const_eval::eval_const_expr_partial(self.tcx, e, hint, None) - .ok() - .map(|v| Literal::Value { value: v }) + pub fn trait_method(&mut self, + trait_def_id: DefId, + method_name: &str, + self_ty: Ty<'tcx>, + params: &[Ty<'tcx>]) + -> (Ty<'tcx>, Literal<'tcx>) { + let method_name = Symbol::intern(method_name); + let substs = self.tcx.mk_substs_trait(self_ty, params); + for item in self.tcx.associated_items(trait_def_id) { + if item.kind == ty::AssociatedKind::Method && item.name == method_name { + let method_ty = self.tcx.item_type(item.def_id); + let method_ty = method_ty.subst(self.tcx, substs); + return (method_ty, Literal::Item { + def_id: item.def_id, + substs: substs, + }); + } + } + + bug!("found no method `{}` in `{:?}`", method_name, trait_def_id); } - pub fn num_variants(&mut self, adt_def: ty::AdtDef<'tcx>) -> usize { + pub fn num_variants(&mut self, adt_def: &ty::AdtDef) -> usize { adt_def.variants.len() } - pub fn all_fields(&mut self, adt_def: ty::AdtDef<'tcx>, variant_index: usize) -> Vec { + pub fn all_fields(&mut self, adt_def: &ty::AdtDef, variant_index: usize) -> Vec { (0..adt_def.variants[variant_index].fields.len()) .map(Field::new) .collect() } pub fn needs_drop(&mut self, ty: Ty<'tcx>) -> bool { + let ty = self.tcx.lift_to_global(&ty).unwrap_or_else(|| { + bug!("MIR: Cx::needs_drop({}) got \ + type with inference types/regions", ty); + }); self.tcx.type_needs_drop_given_env(ty, &self.infcx.parameter_environment) } - pub fn span_bug(&mut self, span: Span, message: &str) -> ! { - self.tcx.sess.span_bug(span, message) + pub fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> { + self.tcx } - pub fn tcx(&self) -> &'a ty::ctxt<'tcx> { - self.tcx + pub fn check_overflow(&self) -> bool { + self.check_overflow } } mod block; mod expr; -mod pattern; mod to_ref; diff --git a/src/librustc_mir/hair/cx/pattern.rs b/src/librustc_mir/hair/cx/pattern.rs deleted file mode 100644 index dc377ac731a65..0000000000000 --- a/src/librustc_mir/hair/cx/pattern.rs +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hair::*; -use hair::cx::Cx; -use rustc_data_structures::fnv::FnvHashMap; -use rustc::middle::const_eval; -use rustc::middle::def; -use rustc::middle::pat_util::{pat_is_resolved_const, pat_is_binding}; -use rustc::middle::ty::{self, Ty}; -use rustc::mir::repr::*; -use rustc_front::hir; -use syntax::ast; -use syntax::codemap::Span; -use syntax::ptr::P; - -/// When there are multiple patterns in a single arm, each one has its -/// own node-ids for the bindings. References to the variables always -/// use the node-ids from the first pattern in the arm, so we just -/// remap the ids for all subsequent bindings to the first one. -/// -/// Example: -/// ``` -/// match foo { -/// Test1(flavor /* def 1 */) | -/// Test2(flavor /* def 2 */) if flavor /* ref 1 */.is_tasty() => { ... } -/// _ => { ... } -/// } -/// ``` -struct PatCx<'patcx, 'cx: 'patcx, 'tcx: 'cx> { - cx: &'patcx mut Cx<'cx, 'tcx>, - binding_map: Option<&'patcx FnvHashMap>, -} - -impl<'cx, 'tcx> Cx<'cx, 'tcx> { - pub fn irrefutable_pat(&mut self, pat: &hir::Pat) -> Pattern<'tcx> { - PatCx::new(self, None).to_pattern(pat) - } - - pub fn refutable_pat(&mut self, - binding_map: Option<&FnvHashMap>, - pat: &hir::Pat) - -> Pattern<'tcx> { - PatCx::new(self, binding_map).to_pattern(pat) - } -} - -impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> { - fn new(cx: &'patcx mut Cx<'cx, 'tcx>, - binding_map: Option<&'patcx FnvHashMap>) - -> PatCx<'patcx, 'cx, 'tcx> { - PatCx { - cx: cx, - binding_map: binding_map, - } - } - - fn to_pattern(&mut self, pat: &hir::Pat) -> Pattern<'tcx> { - let kind = match pat.node { - hir::PatWild => PatternKind::Wild, - - hir::PatLit(ref value) => { - let value = const_eval::eval_const_expr(self.cx.tcx, value); - PatternKind::Constant { value: value } - } - - hir::PatRange(ref lo, ref hi) => { - let lo = const_eval::eval_const_expr(self.cx.tcx, lo); - let lo = Literal::Value { value: lo }; - let hi = const_eval::eval_const_expr(self.cx.tcx, hi); - let hi = Literal::Value { value: hi }; - PatternKind::Range { lo: lo, hi: hi } - }, - - hir::PatEnum(..) | hir::PatIdent(..) | hir::PatQPath(..) - if pat_is_resolved_const(&self.cx.tcx.def_map.borrow(), pat) => - { - let def = self.cx.tcx.def_map.borrow().get(&pat.id).unwrap().full_def(); - match def { - def::DefConst(def_id) | def::DefAssociatedConst(def_id) => - match const_eval::lookup_const_by_id(self.cx.tcx, def_id, - Some(pat.id), None) { - Some(const_expr) => { - let pat = const_eval::const_expr_to_pat(self.cx.tcx, const_expr, - pat.span); - return self.to_pattern(&*pat); - } - None => { - self.cx.tcx.sess.span_bug( - pat.span, - &format!("cannot eval constant: {:?}", def_id)) - } - }, - _ => - self.cx.tcx.sess.span_bug( - pat.span, - &format!("def not a constant: {:?}", def)), - } - } - - hir::PatRegion(ref subpattern, _) | - hir::PatBox(ref subpattern) => { - PatternKind::Deref { subpattern: self.to_pattern(subpattern) } - } - - hir::PatVec(ref prefix, ref slice, ref suffix) => { - let ty = self.cx.tcx.node_id_to_type(pat.id); - match ty.sty { - ty::TyRef(_, mt) => - PatternKind::Deref { - subpattern: Pattern { - ty: mt.ty, - span: pat.span, - kind: Box::new(self.slice_or_array_pattern(pat.span, mt.ty, prefix, - slice, suffix)), - }, - }, - - ty::TySlice(..) | - ty::TyArray(..) => - self.slice_or_array_pattern(pat.span, ty, prefix, slice, suffix), - - ref sty => - self.cx.tcx.sess.span_bug( - pat.span, - &format!("unexpanded type for vector pattern: {:?}", sty)), - } - } - - hir::PatTup(ref subpatterns) => { - let subpatterns = - subpatterns.iter() - .enumerate() - .map(|(i, subpattern)| FieldPattern { - field: Field::new(i), - pattern: self.to_pattern(subpattern), - }) - .collect(); - - PatternKind::Leaf { subpatterns: subpatterns } - } - - hir::PatIdent(bm, ref ident, ref sub) - if pat_is_binding(&self.cx.tcx.def_map.borrow(), pat) => - { - let id = match self.binding_map { - None => pat.id, - Some(ref map) => map[&ident.node.name], - }; - let var_ty = self.cx.tcx.node_id_to_type(pat.id); - let region = match var_ty.sty { - ty::TyRef(&r, _) => Some(r), - _ => None, - }; - let (mutability, mode) = match bm { - hir::BindByValue(hir::MutMutable) => - (Mutability::Mut, BindingMode::ByValue), - hir::BindByValue(hir::MutImmutable) => - (Mutability::Not, BindingMode::ByValue), - hir::BindByRef(hir::MutMutable) => - (Mutability::Not, BindingMode::ByRef(region.unwrap(), BorrowKind::Mut)), - hir::BindByRef(hir::MutImmutable) => - (Mutability::Not, BindingMode::ByRef(region.unwrap(), BorrowKind::Shared)), - }; - PatternKind::Binding { - mutability: mutability, - mode: mode, - name: ident.node.name, - var: id, - ty: var_ty, - subpattern: self.to_opt_pattern(sub), - } - } - - hir::PatIdent(..) => { - self.variant_or_leaf(pat, vec![]) - } - - hir::PatEnum(_, ref opt_subpatterns) => { - let subpatterns = - opt_subpatterns.iter() - .flat_map(|v| v.iter()) - .enumerate() - .map(|(i, field)| FieldPattern { - field: Field::new(i), - pattern: self.to_pattern(field), - }) - .collect(); - self.variant_or_leaf(pat, subpatterns) - } - - hir::PatStruct(_, ref fields, _) => { - let pat_ty = self.cx.tcx.node_id_to_type(pat.id); - let adt_def = match pat_ty.sty { - ty::TyStruct(adt_def, _) | ty::TyEnum(adt_def, _) => adt_def, - _ => { - self.cx.tcx.sess.span_bug( - pat.span, - "struct pattern not applied to struct or enum"); - } - }; - - let def = self.cx.tcx.def_map.borrow().get(&pat.id).unwrap().full_def(); - let variant_def = adt_def.variant_of_def(def); - - let subpatterns = - fields.iter() - .map(|field| { - let index = variant_def.index_of_field_named(field.node.name); - let index = index.unwrap_or_else(|| { - self.cx.tcx.sess.span_bug( - pat.span, - &format!("no field with name {:?}", field.node.name)); - }); - FieldPattern { - field: Field::new(index), - pattern: self.to_pattern(&field.node.pat), - } - }) - .collect(); - - self.variant_or_leaf(pat, subpatterns) - } - - hir::PatQPath(..) => { - self.cx.tcx.sess.span_bug(pat.span, "unexpanded macro or bad constant etc"); - } - }; - - let ty = self.cx.tcx.node_id_to_type(pat.id); - - Pattern { - span: pat.span, - ty: ty, - kind: Box::new(kind), - } - } - - fn to_patterns(&mut self, pats: &[P]) -> Vec> { - pats.iter().map(|p| self.to_pattern(p)).collect() - } - - fn to_opt_pattern(&mut self, pat: &Option>) -> Option> { - pat.as_ref().map(|p| self.to_pattern(p)) - } - - fn slice_or_array_pattern(&mut self, - span: Span, - ty: Ty<'tcx>, - prefix: &[P], - slice: &Option>, - suffix: &[P]) - -> PatternKind<'tcx> { - match ty.sty { - ty::TySlice(..) => { - // matching a slice or fixed-length array - PatternKind::Slice { - prefix: self.to_patterns(prefix), - slice: self.to_opt_pattern(slice), - suffix: self.to_patterns(suffix), - } - } - - ty::TyArray(_, len) => { - // fixed-length array - assert!(len >= prefix.len() + suffix.len()); - PatternKind::Array { - prefix: self.to_patterns(prefix), - slice: self.to_opt_pattern(slice), - suffix: self.to_patterns(suffix), - } - } - - _ => { - self.cx.tcx.sess.span_bug(span, "unexpanded macro or bad constant etc"); - } - } - } - - fn variant_or_leaf(&mut self, - pat: &hir::Pat, - subpatterns: Vec>) - -> PatternKind<'tcx> { - let def = self.cx.tcx.def_map.borrow().get(&pat.id).unwrap().full_def(); - match def { - def::DefVariant(enum_id, variant_id, _) => { - let adt_def = self.cx.tcx.lookup_adt_def(enum_id); - if adt_def.variants.len() > 1 { - PatternKind::Variant { - adt_def: adt_def, - variant_index: adt_def.variant_index_with_id(variant_id), - subpatterns: subpatterns, - } - } else { - PatternKind::Leaf { subpatterns: subpatterns } - } - } - - // NB: resolving to DefStruct means the struct *constructor*, - // not the struct as a type. - def::DefStruct(..) | def::DefTy(..) => { - PatternKind::Leaf { subpatterns: subpatterns } - } - - _ => { - self.cx.tcx.sess.span_bug(pat.span, - &format!("inappropriate def for pattern: {:?}", def)); - } - } - } -} diff --git a/src/librustc_mir/hair/cx/to_ref.rs b/src/librustc_mir/hair/cx/to_ref.rs index 24fcc2f4fcd56..63dbde474380a 100644 --- a/src/librustc_mir/hair/cx/to_ref.rs +++ b/src/librustc_mir/hair/cx/to_ref.rs @@ -10,7 +10,7 @@ use hair::*; -use rustc_front::hir; +use rustc::hir; use syntax::ptr::P; pub trait ToRef { diff --git a/src/librustc_mir/hair/mod.rs b/src/librustc_mir/hair/mod.rs index fb81cc7e6d97a..22c07f1903bac 100644 --- a/src/librustc_mir/hair/mod.rs +++ b/src/librustc_mir/hair/mod.rs @@ -14,26 +14,19 @@ //! unit-tested and separated from the Rust source and compiler data //! structures. -use rustc::mir::repr::{BinOp, BorrowKind, Field, Literal, Mutability, UnOp, ItemKind}; -use rustc::middle::const_eval::ConstVal; -use rustc::middle::def_id::DefId; +use rustc::mir::{BinOp, BorrowKind, Field, Literal, UnOp, TypedConstVal}; +use rustc::hir::def_id::DefId; use rustc::middle::region::CodeExtent; -use rustc::middle::subst::Substs; -use rustc::middle::ty::{self, AdtDef, ClosureSubsts, Region, Ty}; -use rustc_front::hir; +use rustc::ty::subst::Substs; +use rustc::ty::{self, AdtDef, ClosureSubsts, Region, Ty}; +use rustc::hir; use syntax::ast; -use syntax::codemap::Span; +use syntax_pos::Span; use self::cx::Cx; pub mod cx; -#[derive(Clone, Debug)] -pub struct ItemRef<'tcx> { - pub ty: Ty<'tcx>, - pub kind: ItemKind, - pub def_id: DefId, - pub substs: &'tcx Substs<'tcx>, -} +pub use rustc_const_eval::pattern::{BindingMode, Pattern, PatternKind, FieldPattern}; #[derive(Clone, Debug)] pub struct Block<'tcx> { @@ -77,40 +70,37 @@ pub enum StmtKind<'tcx> { pattern: Pattern<'tcx>, /// let pat = ... - initializer: Option>, - - /// let pat = init; - stmts: Vec>, + initializer: Option> }, } -// The Hair trait implementor translates their expressions (`&'tcx H::Expr`) -// into instances of this `Expr` enum. This translation can be done -// basically as lazilly or as eagerly as desired: every recursive -// reference to an expression in this enum is an `ExprRef<'tcx>`, which -// may in turn be another instance of this enum (boxed), or else an -// untranslated `&'tcx H::Expr`. Note that instances of `Expr` are very -// shortlived. They are created by `Hair::to_expr`, analyzed and -// converted into MIR, and then discarded. -// -// If you compare `Expr` to the full compiler AST, you will see it is -// a good bit simpler. In fact, a number of the more straight-forward -// MIR simplifications are already done in the impl of `Hair`. For -// example, method calls and overloaded operators are absent: they are -// expected to be converted into `Expr::Call` instances. +/// The Hair trait implementor translates their expressions (`&'tcx H::Expr`) +/// into instances of this `Expr` enum. This translation can be done +/// basically as lazilly or as eagerly as desired: every recursive +/// reference to an expression in this enum is an `ExprRef<'tcx>`, which +/// may in turn be another instance of this enum (boxed), or else an +/// untranslated `&'tcx H::Expr`. Note that instances of `Expr` are very +/// shortlived. They are created by `Hair::to_expr`, analyzed and +/// converted into MIR, and then discarded. +/// +/// If you compare `Expr` to the full compiler AST, you will see it is +/// a good bit simpler. In fact, a number of the more straight-forward +/// MIR simplifications are already done in the impl of `Hair`. For +/// example, method calls and overloaded operators are absent: they are +/// expected to be converted into `Expr::Call` instances. #[derive(Clone, Debug)] pub struct Expr<'tcx> { - // type of this expression + /// type of this expression pub ty: Ty<'tcx>, - // lifetime of this expression if it should be spilled into a - // temporary; should be None only if in a constant context + /// lifetime of this expression if it should be spilled into a + /// temporary; should be None only if in a constant context pub temp_lifetime: Option, - // span of the expression in the source + /// span of the expression in the source pub span: Span, - // kind of expression + /// kind of expression pub kind: ExprKind<'tcx>, } @@ -122,6 +112,7 @@ pub enum ExprKind<'tcx> { }, Box { value: ExprRef<'tcx>, + value_extents: CodeExtent, }, Call { ty: ty::Ty<'tcx>, @@ -148,6 +139,12 @@ pub enum ExprKind<'tcx> { Cast { source: ExprRef<'tcx>, }, + Use { + source: ExprRef<'tcx>, + }, // Use a lexpr to get a vexpr. + NeverToAny { + source: ExprRef<'tcx>, + }, ReifyFnPointer { source: ExprRef<'tcx>, }, @@ -193,17 +190,19 @@ pub enum ExprKind<'tcx> { VarRef { id: ast::NodeId, }, - SelfRef, // first argument, used for self in a closure + /// first argument, used for self in a closure + SelfRef, StaticRef { id: DefId, }, Borrow { - region: Region, + region: &'tcx Region, borrow_kind: BorrowKind, arg: ExprRef<'tcx>, }, Break { label: Option, + value: Option>, }, Continue { label: Option, @@ -213,10 +212,7 @@ pub enum ExprKind<'tcx> { }, Repeat { value: ExprRef<'tcx>, - // FIXME(#29789): Add a separate hair::Constant<'tcx> so this could be more explicit about - // its contained data. Currently this should only contain expression of ExprKind::Literal - // kind. - count: ExprRef<'tcx>, + count: TypedConstVal<'tcx>, }, Vec { fields: Vec>, @@ -225,15 +221,15 @@ pub enum ExprKind<'tcx> { fields: Vec>, }, Adt { - adt_def: AdtDef<'tcx>, + adt_def: &'tcx AdtDef, variant_index: usize, substs: &'tcx Substs<'tcx>, fields: Vec>, - base: Option>, + base: Option> }, Closure { closure_id: DefId, - substs: &'tcx ClosureSubsts<'tcx>, + substs: ClosureSubsts<'tcx>, upvars: Vec>, }, Literal { @@ -241,6 +237,8 @@ pub enum ExprKind<'tcx> { }, InlineAsm { asm: &'tcx hir::InlineAsm, + outputs: Vec>, + inputs: Vec> }, } @@ -256,6 +254,12 @@ pub struct FieldExprRef<'tcx> { pub expr: ExprRef<'tcx>, } +#[derive(Clone, Debug)] +pub struct FruInfo<'tcx> { + pub base: ExprRef<'tcx>, + pub field_types: Vec> +} + #[derive(Clone, Debug)] pub struct Arm<'tcx> { pub patterns: Vec>, @@ -263,85 +267,12 @@ pub struct Arm<'tcx> { pub body: ExprRef<'tcx>, } -#[derive(Clone, Debug)] -pub struct Pattern<'tcx> { - pub ty: Ty<'tcx>, - pub span: Span, - pub kind: Box>, -} - #[derive(Copy, Clone, Debug)] pub enum LogicalOp { And, Or, } -#[derive(Clone, Debug)] -pub enum PatternKind<'tcx> { - Wild, - - // x, ref x, x @ P, etc - Binding { - mutability: Mutability, - name: ast::Name, - mode: BindingMode, - var: ast::NodeId, - ty: Ty<'tcx>, - subpattern: Option>, - }, - - // Foo(...) or Foo{...} or Foo, where `Foo` is a variant name from an adt with >1 variants - Variant { - adt_def: AdtDef<'tcx>, - variant_index: usize, - subpatterns: Vec>, - }, - - // (...), Foo(...), Foo{...}, or Foo, where `Foo` is a variant name from an adt with 1 variant - Leaf { - subpatterns: Vec>, - }, - - Deref { - subpattern: Pattern<'tcx>, - }, // box P, &P, &mut P, etc - - Constant { - value: ConstVal, - }, - - Range { - lo: Literal<'tcx>, - hi: Literal<'tcx>, - }, - - // matches against a slice, checking the length and extracting elements - Slice { - prefix: Vec>, - slice: Option>, - suffix: Vec>, - }, - - // fixed match against an array, irrefutable - Array { - prefix: Vec>, - slice: Option>, - suffix: Vec>, - }, -} - -#[derive(Copy, Clone, Debug)] -pub enum BindingMode { - ByValue, - ByRef(Region, BorrowKind), -} - -#[derive(Clone, Debug)] -pub struct FieldPattern<'tcx> { - pub field: Field, - pub pattern: Pattern<'tcx>, -} - /////////////////////////////////////////////////////////////////////////// // The Mirror trait @@ -360,13 +291,13 @@ pub struct FieldPattern<'tcx> { pub trait Mirror<'tcx> { type Output; - fn make_mirror<'a>(self, cx: &mut Cx<'a, 'tcx>) -> Self::Output; + fn make_mirror<'a, 'gcx>(self, cx: &mut Cx<'a, 'gcx, 'tcx>) -> Self::Output; } impl<'tcx> Mirror<'tcx> for Expr<'tcx> { type Output = Expr<'tcx>; - fn make_mirror<'a>(self, _: &mut Cx<'a, 'tcx>) -> Expr<'tcx> { + fn make_mirror<'a, 'gcx>(self, _: &mut Cx<'a, 'gcx, 'tcx>) -> Expr<'tcx> { self } } @@ -374,7 +305,7 @@ impl<'tcx> Mirror<'tcx> for Expr<'tcx> { impl<'tcx> Mirror<'tcx> for ExprRef<'tcx> { type Output = Expr<'tcx>; - fn make_mirror<'a>(self, hir: &mut Cx<'a, 'tcx>) -> Expr<'tcx> { + fn make_mirror<'a, 'gcx>(self, hir: &mut Cx<'a, 'gcx, 'tcx>) -> Expr<'tcx> { match self { ExprRef::Hair(h) => h.make_mirror(hir), ExprRef::Mirror(m) => *m, @@ -385,7 +316,7 @@ impl<'tcx> Mirror<'tcx> for ExprRef<'tcx> { impl<'tcx> Mirror<'tcx> for Stmt<'tcx> { type Output = Stmt<'tcx>; - fn make_mirror<'a>(self, _: &mut Cx<'a, 'tcx>) -> Stmt<'tcx> { + fn make_mirror<'a, 'gcx>(self, _: &mut Cx<'a, 'gcx, 'tcx>) -> Stmt<'tcx> { self } } @@ -393,7 +324,7 @@ impl<'tcx> Mirror<'tcx> for Stmt<'tcx> { impl<'tcx> Mirror<'tcx> for StmtRef<'tcx> { type Output = Stmt<'tcx>; - fn make_mirror<'a>(self, _: &mut Cx<'a,'tcx>) -> Stmt<'tcx> { + fn make_mirror<'a, 'gcx>(self, _: &mut Cx<'a, 'gcx, 'tcx>) -> Stmt<'tcx> { match self { StmtRef::Mirror(m) => *m, } @@ -403,7 +334,7 @@ impl<'tcx> Mirror<'tcx> for StmtRef<'tcx> { impl<'tcx> Mirror<'tcx> for Block<'tcx> { type Output = Block<'tcx>; - fn make_mirror<'a>(self, _: &mut Cx<'a, 'tcx>) -> Block<'tcx> { + fn make_mirror<'a, 'gcx>(self, _: &mut Cx<'a, 'gcx, 'tcx>) -> Block<'tcx> { self } } diff --git a/src/librustc_mir/lib.rs b/src/librustc_mir/lib.rs index 9cc40bbc3838a..617bd81d96a2c 100644 --- a/src/librustc_mir/lib.rs +++ b/src/librustc_mir/lib.rs @@ -17,20 +17,38 @@ Rust MIR: a lowered representation of Rust. Also: an experiment! #![crate_name = "rustc_mir"] #![crate_type = "rlib"] #![crate_type = "dylib"] +#![cfg_attr(not(stage0), deny(warnings))] +#![unstable(feature = "rustc_private", issue = "27812")] +#![feature(associated_consts)] +#![feature(box_patterns)] +#![cfg_attr(stage0, feature(item_like_imports))] +#![feature(rustc_diagnostic_macros)] #![feature(rustc_private)] +#![feature(staged_api)] #[macro_use] extern crate log; extern crate graphviz as dot; +#[macro_use] extern crate rustc; extern crate rustc_data_structures; -extern crate rustc_front; extern crate rustc_back; +#[macro_use] +#[no_link] +extern crate rustc_bitflags; +#[macro_use] extern crate syntax; +extern crate syntax_pos; +extern crate rustc_const_math; +extern crate rustc_const_eval; + +pub mod diagnostics; pub mod build; +pub mod def_use; pub mod graphviz; mod hair; pub mod mir_map; pub mod pretty; pub mod transform; + diff --git a/src/librustc_mir/mir_map.rs b/src/librustc_mir/mir_map.rs index 3886a6b83ac9a..88d02d7d004c9 100644 --- a/src/librustc_mir/mir_map.rs +++ b/src/librustc_mir/mir_map.rs @@ -16,225 +16,247 @@ //! - `#[rustc_mir(graphviz="file.gv")]` //! - `#[rustc_mir(pretty="file.mir")]` -extern crate syntax; -extern crate rustc; -extern crate rustc_front; - use build; -use graphviz; -use pretty; -use transform::*; use rustc::dep_graph::DepNode; -use rustc::mir::repr::Mir; +use rustc::hir::def_id::DefId; +use rustc::mir::Mir; +use rustc::mir::transform::MirSource; +use rustc::mir::visit::MutVisitor; +use pretty; use hair::cx::Cx; -use std::fs::File; - -use self::rustc::middle::infer; -use self::rustc::middle::region::CodeExtentData; -use self::rustc::middle::ty::{self, Ty}; -use self::rustc::util::common::ErrorReported; -use self::rustc::util::nodemap::NodeMap; -use self::rustc_front::hir; -use self::rustc_front::intravisit::{self, Visitor}; -use self::syntax::ast; -use self::syntax::attr::AttrMetaMethods; -use self::syntax::codemap::Span; - -pub type MirMap<'tcx> = NodeMap>; - -pub fn build_mir_for_crate<'tcx>(tcx: &ty::ctxt<'tcx>) -> MirMap<'tcx> { - let mut map = NodeMap(); - { - let mut dump = OuterDump { - tcx: tcx, - map: &mut map, - }; - tcx.visit_all_items_in_krate(DepNode::MirMapConstruction, &mut dump); + +use rustc::infer::InferCtxtBuilder; +use rustc::traits::Reveal; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::subst::Substs; +use rustc::hir; +use rustc::hir::intravisit::{self, FnKind, Visitor, NestedVisitorMap}; +use syntax::abi::Abi; +use syntax::ast; +use syntax_pos::Span; + +use std::mem; + +pub fn build_mir_for_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { + tcx.visit_all_item_likes_in_krate(DepNode::Mir, &mut BuildMir { + tcx: tcx + }.as_deep_visitor()); +} + +/// A pass to lift all the types and substitutions in a Mir +/// to the global tcx. Sadly, we don't have a "folder" that +/// can change 'tcx so we have to transmute afterwards. +struct GlobalizeMir<'a, 'gcx: 'a> { + tcx: TyCtxt<'a, 'gcx, 'gcx>, + span: Span +} + +impl<'a, 'gcx: 'tcx, 'tcx> MutVisitor<'tcx> for GlobalizeMir<'a, 'gcx> { + fn visit_ty(&mut self, ty: &mut Ty<'tcx>) { + if let Some(lifted) = self.tcx.lift(ty) { + *ty = lifted; + } else { + span_bug!(self.span, + "found type `{:?}` with inference types/regions in MIR", + ty); + } + } + + fn visit_substs(&mut self, substs: &mut &'tcx Substs<'tcx>) { + if let Some(lifted) = self.tcx.lift(substs) { + *substs = lifted; + } else { + span_bug!(self.span, + "found substs `{:?}` with inference types/regions in MIR", + substs); + } } - map } /////////////////////////////////////////////////////////////////////////// -// OuterDump -- walks a crate, looking for fn items and methods to build MIR from +// BuildMir -- walks a crate, looking for fn items and methods to build MIR from + +struct BuildMir<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx> +} -struct OuterDump<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, - map: &'a mut MirMap<'tcx>, +/// Helper type of a temporary returned by BuildMir::cx(...). +/// Necessary because we can't write the following bound: +/// F: for<'b, 'tcx> where 'gcx: 'tcx FnOnce(Cx<'b, 'gcx, 'tcx>). +struct CxBuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + src: MirSource, + def_id: DefId, + infcx: InferCtxtBuilder<'a, 'gcx, 'tcx> } -impl<'a, 'tcx> OuterDump<'a, 'tcx> { - fn visit_mir(&mut self, attributes: &'a [ast::Attribute], mut walk_op: OP) - where OP: for<'m> FnMut(&mut InnerDump<'a, 'm, 'tcx>) +impl<'a, 'gcx, 'tcx> BuildMir<'a, 'gcx> { + fn cx<'b>(&'b mut self, src: MirSource) -> CxBuilder<'b, 'gcx, 'tcx> { + let param_env = ty::ParameterEnvironment::for_item(self.tcx, src.item_id()); + let def_id = self.tcx.map.local_def_id(src.item_id()); + CxBuilder { + src: src, + infcx: self.tcx.infer_ctxt(None, Some(param_env), Reveal::NotSpecializable), + def_id: def_id + } + } +} + +impl<'a, 'gcx, 'tcx> CxBuilder<'a, 'gcx, 'tcx> { + fn build(&'tcx mut self, f: F) + where F: for<'b> FnOnce(Cx<'b, 'gcx, 'tcx>) -> Mir<'tcx> { - let mut closure_dump = InnerDump { - tcx: self.tcx, - attr: None, - map: &mut *self.map, - }; - for attr in attributes { - if attr.check_name("rustc_mir") { - closure_dump.attr = Some(attr); - } + let (src, def_id) = (self.src, self.def_id); + self.infcx.enter(|infcx| { + let mut mir = f(Cx::new(&infcx, src)); + + // Convert the Mir to global types. + let tcx = infcx.tcx.global_tcx(); + let mut globalizer = GlobalizeMir { + tcx: tcx, + span: mir.span + }; + globalizer.visit_mir(&mut mir); + let mir = unsafe { + mem::transmute::>(mir) + }; + + pretty::dump_mir(tcx, "mir_map", &0, src, &mir); + + let mir = tcx.alloc_mir(mir); + assert!(tcx.mir_map.borrow_mut().insert(def_id, mir).is_none()); + }); + } +} + +impl<'a, 'gcx> BuildMir<'a, 'gcx> { + fn build_const_integer(&mut self, expr: &'gcx hir::Expr) { + // FIXME(eddyb) Closures should have separate + // function definition IDs and expression IDs. + // Type-checking should not let closures get + // this far in an integer constant position. + if let hir::ExprClosure(..) = expr.node { + return; } - walk_op(&mut closure_dump); + self.cx(MirSource::Const(expr.id)).build(|cx| { + build::construct_const(cx, expr.id, expr) + }); } } +impl<'a, 'tcx> Visitor<'tcx> for BuildMir<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.tcx.map) + } -impl<'a, 'tcx> Visitor<'tcx> for OuterDump<'a, 'tcx> { + // Const and static items. fn visit_item(&mut self, item: &'tcx hir::Item) { - self.visit_mir(&item.attrs, |c| intravisit::walk_item(c, item)); + match item.node { + hir::ItemConst(_, ref expr) => { + self.cx(MirSource::Const(item.id)).build(|cx| { + build::construct_const(cx, item.id, expr) + }); + } + hir::ItemStatic(_, m, ref expr) => { + self.cx(MirSource::Static(item.id, m)).build(|cx| { + build::construct_const(cx, item.id, expr) + }); + } + _ => {} + } intravisit::walk_item(self, item); } - fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) { - match trait_item.node { - hir::MethodTraitItem(_, Some(_)) => { - self.visit_mir(&trait_item.attrs, |c| intravisit::walk_trait_item(c, trait_item)); - } - hir::MethodTraitItem(_, None) | - hir::ConstTraitItem(..) | - hir::TypeTraitItem(..) => {} + // Trait associated const defaults. + fn visit_trait_item(&mut self, item: &'tcx hir::TraitItem) { + if let hir::ConstTraitItem(_, Some(ref expr)) = item.node { + self.cx(MirSource::Const(item.id)).build(|cx| { + build::construct_const(cx, item.id, expr) + }); } - intravisit::walk_trait_item(self, trait_item); + intravisit::walk_trait_item(self, item); } - fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) { - match impl_item.node { - hir::ImplItemKind::Method(..) => { - self.visit_mir(&impl_item.attrs, |c| intravisit::walk_impl_item(c, impl_item)); - } - hir::ImplItemKind::Const(..) | hir::ImplItemKind::Type(..) => {} + // Impl associated const. + fn visit_impl_item(&mut self, item: &'tcx hir::ImplItem) { + if let hir::ImplItemKind::Const(_, ref expr) = item.node { + self.cx(MirSource::Const(item.id)).build(|cx| { + build::construct_const(cx, item.id, expr) + }); } - intravisit::walk_impl_item(self, impl_item); + intravisit::walk_impl_item(self, item); } -} -/////////////////////////////////////////////////////////////////////////// -// InnerDump -- dumps MIR for a single fn and its contained closures - -struct InnerDump<'a, 'm, 'tcx: 'a + 'm> { - tcx: &'a ty::ctxt<'tcx>, - map: &'m mut MirMap<'tcx>, - attr: Option<&'a ast::Attribute>, -} + // Repeat counts, i.e. [expr; constant]. + fn visit_expr(&mut self, expr: &'tcx hir::Expr) { + if let hir::ExprRepeat(_, ref count) = expr.node { + self.build_const_integer(count); + } + intravisit::walk_expr(self, expr); + } -impl<'a, 'm, 'tcx> Visitor<'tcx> for InnerDump<'a,'m,'tcx> { - fn visit_trait_item(&mut self, _: &'tcx hir::TraitItem) { - // ignore methods; the outer dump will call us for them independently + // Array lengths, i.e. [T; constant]. + fn visit_ty(&mut self, ty: &'tcx hir::Ty) { + if let hir::TyArray(_, ref length) = ty.node { + self.build_const_integer(length); + } + intravisit::walk_ty(self, ty); } - fn visit_impl_item(&mut self, _: &'tcx hir::ImplItem) { - // ignore methods; the outer dump will call us for them independently + // Enum variant discriminant values. + fn visit_variant(&mut self, v: &'tcx hir::Variant, + g: &'tcx hir::Generics, item_id: ast::NodeId) { + if let Some(ref expr) = v.node.disr_expr { + self.build_const_integer(expr); + } + intravisit::walk_variant(self, v, g, item_id); } fn visit_fn(&mut self, - fk: intravisit::FnKind<'tcx>, + fk: FnKind<'tcx>, decl: &'tcx hir::FnDecl, - body: &'tcx hir::Block, + body_id: hir::ExprId, span: Span, id: ast::NodeId) { - let (prefix, implicit_arg_tys) = match fk { - intravisit::FnKind::Closure => - (format!("{}-", id), vec![closure_self_ty(&self.tcx, id, body.id)]), - _ => - (format!(""), vec![]), + // fetch the fully liberated fn signature (that is, all bound + // types/lifetimes replaced) + let fn_sig = match self.tcx.tables().liberated_fn_sigs.get(&id) { + Some(f) => f.clone(), + None => { + span_bug!(span, "no liberated fn sig for {:?}", id); + } }; - let param_env = ty::ParameterEnvironment::for_item(self.tcx, id); - - let infcx = infer::new_infer_ctxt(self.tcx, &self.tcx.tables, Some(param_env)); - - match build_mir(Cx::new(&infcx), implicit_arg_tys, id, span, decl, body) { - Ok(mut mir) => { - simplify_cfg::SimplifyCfg::new().run_on_mir(&mut mir); - - let meta_item_list = self.attr - .iter() - .flat_map(|a| a.meta_item_list()) - .flat_map(|l| l.iter()); - for item in meta_item_list { - if item.check_name("graphviz") || item.check_name("pretty") { - match item.value_str() { - Some(s) => { - let filename = format!("{}{}", prefix, s); - let result = File::create(&filename).and_then(|ref mut output| { - if item.check_name("graphviz") { - graphviz::write_mir_graphviz(&mir, output) - } else { - pretty::write_mir_pretty(&mir, output) - } - }); - - if let Err(e) = result { - self.tcx.sess.span_fatal( - item.span, - &format!("Error writing MIR {} results to `{}`: {}", - item.name(), filename, e)); - } - } - None => { - self.tcx.sess.span_err( - item.span, - &format!("{} attribute requires a path", item.name())); - } - } - } - } - - let previous = self.map.insert(id, mir); - assert!(previous.is_none()); - } - Err(ErrorReported) => {} - } + let (abi, implicit_argument) = if let FnKind::Closure(..) = fk { + (Abi::Rust, Some((closure_self_ty(self.tcx, id, body_id.node_id()), None))) + } else { + let def_id = self.tcx.map.local_def_id(id); + (self.tcx.item_type(def_id).fn_abi(), None) + }; - intravisit::walk_fn(self, fk, decl, body, span); - } -} + let explicit_arguments = + decl.inputs + .iter() + .enumerate() + .map(|(index, arg)| { + (fn_sig.inputs[index], Some(&*arg.pat)) + }); -fn build_mir<'a,'tcx:'a>(cx: Cx<'a,'tcx>, - implicit_arg_tys: Vec>, - fn_id: ast::NodeId, - span: Span, - decl: &'tcx hir::FnDecl, - body: &'tcx hir::Block) - -> Result, ErrorReported> { - // fetch the fully liberated fn signature (that is, all bound - // types/lifetimes replaced) - let fn_sig = match cx.tcx().tables.borrow().liberated_fn_sigs.get(&fn_id) { - Some(f) => f.clone(), - None => { - cx.tcx().sess.span_bug(span, - &format!("no liberated fn sig for {:?}", fn_id)); - } - }; - - let arguments = - decl.inputs - .iter() - .enumerate() - .map(|(index, arg)| { - (fn_sig.inputs[index], &*arg.pat) - }) - .collect(); - - let parameter_scope = - cx.tcx().region_maps.lookup_code_extent( - CodeExtentData::ParameterScope { fn_id: fn_id, body_id: body.id }); - Ok(build::construct(cx, - span, - implicit_arg_tys, - arguments, - parameter_scope, - fn_sig.output, - body)) + let body = self.tcx.map.expr(body_id); + + let arguments = implicit_argument.into_iter().chain(explicit_arguments); + self.cx(MirSource::Fn(id)).build(|cx| { + build::construct_fn(cx, id, arguments, abi, fn_sig.output, body) + }); + + intravisit::walk_fn(self, fk, decl, body_id, span, id); + } } -fn closure_self_ty<'a, 'tcx>(tcx: &ty::ctxt<'tcx>, +fn closure_self_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, closure_expr_id: ast::NodeId, body_id: ast::NodeId) -> Ty<'tcx> { - let closure_ty = tcx.node_id_to_type(closure_expr_id); + let closure_ty = tcx.tables().node_id_to_type(closure_expr_id); // We're just hard-coding the idea that the signature will be // &self or &mut self and hence will have a bound region with @@ -246,15 +268,15 @@ fn closure_self_ty<'a, 'tcx>(tcx: &ty::ctxt<'tcx>, let region = tcx.mk_region(region); match tcx.closure_kind(tcx.map.local_def_id(closure_expr_id)) { - ty::ClosureKind::FnClosureKind => + ty::ClosureKind::Fn => tcx.mk_ref(region, ty::TypeAndMut { ty: closure_ty, mutbl: hir::MutImmutable }), - ty::ClosureKind::FnMutClosureKind => + ty::ClosureKind::FnMut => tcx.mk_ref(region, ty::TypeAndMut { ty: closure_ty, mutbl: hir::MutMutable }), - ty::ClosureKind::FnOnceClosureKind => + ty::ClosureKind::FnOnce => closure_ty } } diff --git a/src/librustc_mir/pretty.rs b/src/librustc_mir/pretty.rs index ea4036a4d375f..e7188d536980f 100644 --- a/src/librustc_mir/pretty.rs +++ b/src/librustc_mir/pretty.rs @@ -8,77 +8,303 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use rustc::mir::repr::*; -use rustc::middle::ty; +use rustc::hir; +use rustc::hir::def_id::DefId; +use rustc::mir::*; +use rustc::mir::transform::MirSource; +use rustc::ty::TyCtxt; +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::indexed_vec::{Idx}; +use std::fmt::Display; +use std::fs; use std::io::{self, Write}; +use std::path::{PathBuf, Path}; const INDENT: &'static str = " "; +/// Alignment for lining up comments following MIR statements +const ALIGN: usize = 40; + +/// If the session is properly configured, dumps a human-readable +/// representation of the mir into: +/// +/// ```text +/// rustc.node.. +/// ``` +/// +/// Output from this function is controlled by passing `-Z dump-mir=`, +/// where `` takes the following forms: +/// +/// - `all` -- dump MIR for all fns, all passes, all everything +/// - `substring1&substring2,...` -- `&`-separated list of substrings +/// that can appear in the pass-name or the `item_path_str` for the given +/// node-id. If any one of the substrings match, the data is dumped out. +pub fn dump_mir<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + pass_name: &str, + disambiguator: &Display, + src: MirSource, + mir: &Mir<'tcx>) { + let filters = match tcx.sess.opts.debugging_opts.dump_mir { + None => return, + Some(ref filters) => filters, + }; + let node_id = src.item_id(); + let node_path = tcx.item_path_str(tcx.map.local_def_id(node_id)); + let is_matched = + filters.split("&") + .any(|filter| { + filter == "all" || + pass_name.contains(filter) || + node_path.contains(filter) + }); + if !is_matched { + return; + } + + let promotion_id = match src { + MirSource::Promoted(_, id) => format!("-{:?}", id), + _ => String::new() + }; + + let mut file_path = PathBuf::new(); + if let Some(ref file_dir) = tcx.sess.opts.debugging_opts.dump_mir_dir { + let p = Path::new(file_dir); + file_path.push(p); + }; + let file_name = format!("rustc.node{}{}.{}.{}.mir", + node_id, promotion_id, pass_name, disambiguator); + file_path.push(&file_name); + let _ = fs::File::create(&file_path).and_then(|mut file| { + writeln!(file, "// MIR for `{}`", node_path)?; + writeln!(file, "// node_id = {}", node_id)?; + writeln!(file, "// pass_name = {}", pass_name)?; + writeln!(file, "// disambiguator = {}", disambiguator)?; + writeln!(file, "")?; + write_mir_fn(tcx, src, mir, &mut file)?; + Ok(()) + }); +} /// Write out a human-readable textual representation for the given MIR. -pub fn write_mir_pretty(mir: &Mir, w: &mut W) -> io::Result<()> { - try!(write_mir_intro(mir, w)); +pub fn write_mir_pretty<'a, 'b, 'tcx, I>(tcx: TyCtxt<'b, 'tcx, 'tcx>, + iter: I, + w: &mut Write) + -> io::Result<()> + where I: Iterator, 'tcx: 'a +{ + let mut first = true; + for def_id in iter { + let mir = &tcx.item_mir(def_id); + + if first { + first = false; + } else { + // Put empty lines between all items + writeln!(w, "")?; + } + + let id = tcx.map.as_local_node_id(def_id).unwrap(); + let src = MirSource::from_node(tcx, id); + write_mir_fn(tcx, src, mir, w)?; + + for (i, mir) in mir.promoted.iter_enumerated() { + writeln!(w, "")?; + write_mir_fn(tcx, MirSource::Promoted(id, i), mir, w)?; + } + } + Ok(()) +} - // Nodes - for block in mir.all_basic_blocks() { - try!(write_basic_block(block, mir, w)); +pub fn write_mir_fn<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + src: MirSource, + mir: &Mir<'tcx>, + w: &mut Write) + -> io::Result<()> { + write_mir_intro(tcx, src, mir, w)?; + for block in mir.basic_blocks().indices() { + write_basic_block(tcx, block, mir, w)?; + if block.index() + 1 != mir.basic_blocks().len() { + writeln!(w, "")?; + } } - writeln!(w, "}}") + writeln!(w, "}}")?; + Ok(()) } /// Write out a human-readable textual representation for the given basic block. -fn write_basic_block(block: BasicBlock, mir: &Mir, w: &mut W) -> io::Result<()> { - let data = mir.basic_block_data(block); +fn write_basic_block(tcx: TyCtxt, + block: BasicBlock, + mir: &Mir, + w: &mut Write) + -> io::Result<()> { + let data = &mir[block]; // Basic block label at the top. - try!(writeln!(w, "\n{}{:?}: {{", INDENT, block)); + writeln!(w, "{}{:?}: {{", INDENT, block)?; // List of statements in the middle. + let mut current_location = Location { block: block, statement_index: 0 }; for statement in &data.statements { - try!(writeln!(w, "{0}{0}{1:?};", INDENT, statement)); + let indented_mir = format!("{0}{0}{1:?};", INDENT, statement); + writeln!(w, "{0:1$} // {2}", + indented_mir, + ALIGN, + comment(tcx, statement.source_info))?; + + current_location.statement_index += 1; } // Terminator at the bottom. - try!(writeln!(w, "{0}{0}{1:?};", INDENT, data.terminator())); + let indented_terminator = format!("{0}{0}{1:?};", INDENT, data.terminator().kind); + writeln!(w, "{0:1$} // {2}", + indented_terminator, + ALIGN, + comment(tcx, data.terminator().source_info))?; writeln!(w, "{}}}", INDENT) } +fn comment(tcx: TyCtxt, SourceInfo { span, scope }: SourceInfo) -> String { + format!("scope {} at {}", scope.index(), tcx.sess.codemap().span_to_string(span)) +} + +/// Prints user-defined variables in a scope tree. +/// +/// Returns the total number of variables printed. +fn write_scope_tree(tcx: TyCtxt, + mir: &Mir, + scope_tree: &FxHashMap>, + w: &mut Write, + parent: VisibilityScope, + depth: usize) + -> io::Result<()> { + let indent = depth * INDENT.len(); + + let children = match scope_tree.get(&parent) { + Some(childs) => childs, + None => return Ok(()), + }; + + for &child in children { + let data = &mir.visibility_scopes[child]; + assert_eq!(data.parent_scope, Some(parent)); + writeln!(w, "{0:1$}scope {2} {{", "", indent, child.index())?; + + // User variable types (including the user's name in a comment). + for local in mir.vars_iter() { + let var = &mir.local_decls[local]; + let (name, source_info) = if var.source_info.unwrap().scope == child { + (var.name.unwrap(), var.source_info.unwrap()) + } else { + // Not a variable or not declared in this scope. + continue; + }; + + let mut_str = if var.mutability == Mutability::Mut { + "mut " + } else { + "" + }; + + let indent = indent + INDENT.len(); + let indented_var = format!("{0:1$}let {2}{3:?}: {4};", + INDENT, + indent, + mut_str, + local, + var.ty); + writeln!(w, "{0:1$} // \"{2}\" in {3}", + indented_var, + ALIGN, + name, + comment(tcx, source_info))?; + } + + write_scope_tree(tcx, mir, scope_tree, w, child, depth + 1)?; + + writeln!(w, "{0:1$}}}", "", depth * INDENT.len())?; + } + + Ok(()) +} + /// Write out a human-readable textual representation of the MIR's `fn` type and the types of its /// local variables (both user-defined bindings and compiler temporaries). -fn write_mir_intro(mir: &Mir, w: &mut W) -> io::Result<()> { - try!(write!(w, "fn(")); +fn write_mir_intro<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + src: MirSource, + mir: &Mir, + w: &mut Write) + -> io::Result<()> { + write_mir_sig(tcx, src, mir, w)?; + writeln!(w, " {{")?; - // fn argument types. - for (i, arg) in mir.arg_decls.iter().enumerate() { - if i > 0 { - try!(write!(w, ", ")); + // construct a scope tree and write it out + let mut scope_tree: FxHashMap> = FxHashMap(); + for (index, scope_data) in mir.visibility_scopes.iter().enumerate() { + if let Some(parent) = scope_data.parent_scope { + scope_tree.entry(parent) + .or_insert(vec![]) + .push(VisibilityScope::new(index)); + } else { + // Only the argument scope has no parent, because it's the root. + assert_eq!(index, ARGUMENT_VISIBILITY_SCOPE.index()); } - try!(write!(w, "{:?}: {}", Lvalue::Arg(i as u32), arg.ty)); } - try!(write!(w, ") -> ")); + // Print return pointer + let indented_retptr = format!("{}let mut {:?}: {};", + INDENT, + RETURN_POINTER, + mir.return_ty); + writeln!(w, "{0:1$} // return pointer", + indented_retptr, + ALIGN)?; + + write_scope_tree(tcx, mir, &scope_tree, w, ARGUMENT_VISIBILITY_SCOPE, 1)?; + + write_temp_decls(mir, w)?; + + // Add an empty line before the first block is printed. + writeln!(w, "")?; + + Ok(()) +} - // fn return type. - match mir.return_ty { - ty::FnOutput::FnConverging(ty) => try!(write!(w, "{}", ty)), - ty::FnOutput::FnDiverging => try!(write!(w, "!")), +fn write_mir_sig(tcx: TyCtxt, src: MirSource, mir: &Mir, w: &mut Write) + -> io::Result<()> +{ + match src { + MirSource::Fn(_) => write!(w, "fn")?, + MirSource::Const(_) => write!(w, "const")?, + MirSource::Static(_, hir::MutImmutable) => write!(w, "static")?, + MirSource::Static(_, hir::MutMutable) => write!(w, "static mut")?, + MirSource::Promoted(_, i) => write!(w, "{:?} in", i)? } - try!(writeln!(w, " {{")); + write!(w, " {}", tcx.node_path_str(src.item_id()))?; + + if let MirSource::Fn(_) = src { + write!(w, "(")?; - // User variable types (including the user's name in a comment). - for (i, var) in mir.var_decls.iter().enumerate() { - try!(write!(w, "{}let ", INDENT)); - if var.mutability == Mutability::Mut { - try!(write!(w, "mut ")); + // fn argument types. + for (i, arg) in mir.args_iter().enumerate() { + if i != 0 { + write!(w, ", ")?; + } + write!(w, "{:?}: {}", Lvalue::Local(arg), mir.local_decls[arg].ty)?; } - try!(writeln!(w, "{:?}: {}; // {}", Lvalue::Var(i as u32), var.ty, var.name)); + + write!(w, ") -> {}", mir.return_ty) + } else { + assert_eq!(mir.arg_count, 0); + write!(w, ": {} =", mir.return_ty) } +} +fn write_temp_decls(mir: &Mir, w: &mut Write) -> io::Result<()> { // Compiler-introduced temporary types. - for (i, temp) in mir.temp_decls.iter().enumerate() { - try!(writeln!(w, "{}let mut {:?}: {};", INDENT, Lvalue::Temp(i as u32), temp.ty)); + for temp in mir.temps_iter() { + writeln!(w, "{}let mut {:?}: {};", INDENT, temp, mir.local_decls[temp].ty)?; } Ok(()) diff --git a/src/librustc_mir/transform/add_call_guards.rs b/src/librustc_mir/transform/add_call_guards.rs new file mode 100644 index 0000000000000..89e644e4fb077 --- /dev/null +++ b/src/librustc_mir/transform/add_call_guards.rs @@ -0,0 +1,82 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::ty::TyCtxt; +use rustc::mir::*; +use rustc::mir::transform::{MirPass, MirSource, Pass}; +use rustc_data_structures::indexed_vec::{Idx, IndexVec}; + +pub struct AddCallGuards; + +/** + * Breaks outgoing critical edges for call terminators in the MIR. + * + * Critical edges are edges that are neither the only edge leaving a + * block, nor the only edge entering one. + * + * When you want something to happen "along" an edge, you can either + * do at the end of the predecessor block, or at the start of the + * successor block. Critical edges have to be broken in order to prevent + * "edge actions" from affecting other edges. We need this for calls that are + * translated to LLVM invoke instructions, because invoke is a block terminator + * in LLVM so we can't insert any code to handle the call's result into the + * block that performs the call. + * + * This function will break those edges by inserting new blocks along them. + * + * NOTE: Simplify CFG will happily undo most of the work this pass does. + * + */ + +impl<'tcx> MirPass<'tcx> for AddCallGuards { + fn run_pass<'a>(&mut self, _tcx: TyCtxt<'a, 'tcx, 'tcx>, _src: MirSource, mir: &mut Mir<'tcx>) { + let pred_count: IndexVec<_, _> = + mir.predecessors().iter().map(|ps| ps.len()).collect(); + + // We need a place to store the new blocks generated + let mut new_blocks = Vec::new(); + + let cur_len = mir.basic_blocks().len(); + + for block in mir.basic_blocks_mut() { + match block.terminator { + Some(Terminator { + kind: TerminatorKind::Call { + destination: Some((_, ref mut destination)), + cleanup: Some(_), + .. + }, source_info + }) if pred_count[*destination] > 1 => { + // It's a critical edge, break it + let call_guard = BasicBlockData { + statements: vec![], + is_cleanup: block.is_cleanup, + terminator: Some(Terminator { + source_info: source_info, + kind: TerminatorKind::Goto { target: *destination } + }) + }; + + // Get the index it will be when inserted into the MIR + let idx = cur_len + new_blocks.len(); + new_blocks.push(call_guard); + *destination = BasicBlock::new(idx); + } + _ => {} + } + } + + debug!("Broke {} N edges", new_blocks.len()); + + mir.basic_blocks_mut().extend(new_blocks); + } +} + +impl Pass for AddCallGuards {} diff --git a/src/librustc_mir/transform/copy_prop.rs b/src/librustc_mir/transform/copy_prop.rs new file mode 100644 index 0000000000000..8c8c42a1c7687 --- /dev/null +++ b/src/librustc_mir/transform/copy_prop.rs @@ -0,0 +1,323 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Trivial copy propagation pass. +//! +//! This uses def-use analysis to remove values that have exactly one def and one use, which must +//! be an assignment. +//! +//! To give an example, we look for patterns that look like: +//! +//! DEST = SRC +//! ... +//! USE(DEST) +//! +//! where `DEST` and `SRC` are both locals of some form. We replace that with: +//! +//! NOP +//! ... +//! USE(SRC) +//! +//! The assignment `DEST = SRC` must be (a) the only mutation of `DEST` and (b) the only +//! (non-mutating) use of `SRC`. These restrictions are conservative and may be relaxed in the +//! future. + +use def_use::DefUseAnalysis; +use rustc::mir::{Constant, Local, Location, Lvalue, Mir, Operand, Rvalue, StatementKind}; +use rustc::mir::transform::{MirPass, MirSource, Pass}; +use rustc::mir::visit::MutVisitor; +use rustc::ty::TyCtxt; +use transform::qualify_consts; + +pub struct CopyPropagation; + +impl Pass for CopyPropagation {} + +impl<'tcx> MirPass<'tcx> for CopyPropagation { + fn run_pass<'a>(&mut self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + source: MirSource, + mir: &mut Mir<'tcx>) { + match source { + MirSource::Const(_) => { + // Don't run on constants, because constant qualification might reject the + // optimized IR. + return + } + MirSource::Static(..) | MirSource::Promoted(..) => { + // Don't run on statics and promoted statics, because trans might not be able to + // evaluate the optimized IR. + return + } + MirSource::Fn(function_node_id) => { + if qualify_consts::is_const_fn(tcx, tcx.map.local_def_id(function_node_id)) { + // Don't run on const functions, as, again, trans might not be able to evaluate + // the optimized IR. + return + } + } + } + + // We only run when the MIR optimization level is at least 1. This avoids messing up debug + // info. + match tcx.sess.opts.debugging_opts.mir_opt_level { + Some(0) | None => return, + _ => {} + } + + loop { + let mut def_use_analysis = DefUseAnalysis::new(mir); + def_use_analysis.analyze(mir); + + let mut changed = false; + for dest_local in mir.local_decls.indices() { + debug!("Considering destination local: {:?}", dest_local); + + let action; + let location; + { + // The destination must have exactly one def. + let dest_use_info = def_use_analysis.local_info(dest_local); + let dest_def_count = dest_use_info.def_count_not_including_drop(); + if dest_def_count == 0 { + debug!(" Can't copy-propagate local: dest {:?} undefined", + dest_local); + continue + } + if dest_def_count > 1 { + debug!(" Can't copy-propagate local: dest {:?} defined {} times", + dest_local, + dest_use_info.def_count()); + continue + } + if dest_use_info.use_count() == 0 { + debug!(" Can't copy-propagate local: dest {:?} unused", + dest_local); + continue + } + let dest_lvalue_def = dest_use_info.defs_and_uses.iter().filter(|lvalue_def| { + lvalue_def.context.is_mutating_use() && !lvalue_def.context.is_drop() + }).next().unwrap(); + location = dest_lvalue_def.location; + + let basic_block = &mir[location.block]; + let statement_index = location.statement_index; + let statement = match basic_block.statements.get(statement_index) { + Some(statement) => statement, + None => { + debug!(" Can't copy-propagate local: used in terminator"); + continue + } + }; + + // That use of the source must be an assignment. + match statement.kind { + StatementKind::Assign(Lvalue::Local(local), Rvalue::Use(ref operand)) if + local == dest_local => { + let maybe_action = match *operand { + Operand::Consume(ref src_lvalue) => { + Action::local_copy(&def_use_analysis, src_lvalue) + } + Operand::Constant(ref src_constant) => { + Action::constant(src_constant) + } + }; + match maybe_action { + Some(this_action) => action = this_action, + None => continue, + } + } + _ => { + debug!(" Can't copy-propagate local: source use is not an \ + assignment"); + continue + } + } + } + + changed = action.perform(mir, &def_use_analysis, dest_local, location) || changed; + // FIXME(pcwalton): Update the use-def chains to delete the instructions instead of + // regenerating the chains. + break + } + if !changed { + break + } + } + } +} + +enum Action<'tcx> { + PropagateLocalCopy(Local), + PropagateConstant(Constant<'tcx>), +} + +impl<'tcx> Action<'tcx> { + fn local_copy(def_use_analysis: &DefUseAnalysis, src_lvalue: &Lvalue<'tcx>) + -> Option> { + // The source must be a local. + let src_local = if let Lvalue::Local(local) = *src_lvalue { + local + } else { + debug!(" Can't copy-propagate local: source is not a local"); + return None; + }; + + // We're trying to copy propagate a local. + // There must be exactly one use of the source used in a statement (not in a terminator). + let src_use_info = def_use_analysis.local_info(src_local); + let src_use_count = src_use_info.use_count(); + if src_use_count == 0 { + debug!(" Can't copy-propagate local: no uses"); + return None + } + if src_use_count != 1 { + debug!(" Can't copy-propagate local: {} uses", src_use_info.use_count()); + return None + } + + // Verify that the source doesn't change in between. This is done conservatively for now, + // by ensuring that the source has exactly one mutation. The goal is to prevent things + // like: + // + // DEST = SRC; + // SRC = X; + // USE(DEST); + // + // From being misoptimized into: + // + // SRC = X; + // USE(SRC); + let src_def_count = src_use_info.def_count_not_including_drop(); + if src_def_count != 1 { + debug!(" Can't copy-propagate local: {} defs of src", + src_use_info.def_count_not_including_drop()); + return None + } + + Some(Action::PropagateLocalCopy(src_local)) + } + + fn constant(src_constant: &Constant<'tcx>) -> Option> { + Some(Action::PropagateConstant((*src_constant).clone())) + } + + fn perform(self, + mir: &mut Mir<'tcx>, + def_use_analysis: &DefUseAnalysis<'tcx>, + dest_local: Local, + location: Location) + -> bool { + match self { + Action::PropagateLocalCopy(src_local) => { + // Eliminate the destination and the assignment. + // + // First, remove all markers. + // + // FIXME(pcwalton): Don't do this. Merge live ranges instead. + debug!(" Replacing all uses of {:?} with {:?} (local)", + dest_local, + src_local); + for lvalue_use in &def_use_analysis.local_info(dest_local).defs_and_uses { + if lvalue_use.context.is_storage_marker() { + mir.make_statement_nop(lvalue_use.location) + } + } + for lvalue_use in &def_use_analysis.local_info(src_local).defs_and_uses { + if lvalue_use.context.is_storage_marker() { + mir.make_statement_nop(lvalue_use.location) + } + } + + // Replace all uses of the destination local with the source local. + let src_lvalue = Lvalue::Local(src_local); + def_use_analysis.replace_all_defs_and_uses_with(dest_local, mir, src_lvalue); + + // Finally, zap the now-useless assignment instruction. + debug!(" Deleting assignment"); + mir.make_statement_nop(location); + + true + } + Action::PropagateConstant(src_constant) => { + // First, remove all markers. + // + // FIXME(pcwalton): Don't do this. Merge live ranges instead. + debug!(" Replacing all uses of {:?} with {:?} (constant)", + dest_local, + src_constant); + let dest_local_info = def_use_analysis.local_info(dest_local); + for lvalue_use in &dest_local_info.defs_and_uses { + if lvalue_use.context.is_storage_marker() { + mir.make_statement_nop(lvalue_use.location) + } + } + + // Replace all uses of the destination local with the constant. + let mut visitor = ConstantPropagationVisitor::new(dest_local, + src_constant); + for dest_lvalue_use in &dest_local_info.defs_and_uses { + visitor.visit_location(mir, dest_lvalue_use.location) + } + + // Zap the assignment instruction if we eliminated all the uses. We won't have been + // able to do that if the destination was used in a projection, because projections + // must have lvalues on their LHS. + let use_count = dest_local_info.use_count(); + if visitor.uses_replaced == use_count { + debug!(" {} of {} use(s) replaced; deleting assignment", + visitor.uses_replaced, + use_count); + mir.make_statement_nop(location); + true + } else if visitor.uses_replaced == 0 { + debug!(" No uses replaced; not deleting assignment"); + false + } else { + debug!(" {} of {} use(s) replaced; not deleting assignment", + visitor.uses_replaced, + use_count); + true + } + } + } + } +} + +struct ConstantPropagationVisitor<'tcx> { + dest_local: Local, + constant: Constant<'tcx>, + uses_replaced: usize, +} + +impl<'tcx> ConstantPropagationVisitor<'tcx> { + fn new(dest_local: Local, constant: Constant<'tcx>) + -> ConstantPropagationVisitor<'tcx> { + ConstantPropagationVisitor { + dest_local: dest_local, + constant: constant, + uses_replaced: 0, + } + } +} + +impl<'tcx> MutVisitor<'tcx> for ConstantPropagationVisitor<'tcx> { + fn visit_operand(&mut self, operand: &mut Operand<'tcx>, location: Location) { + self.super_operand(operand, location); + + match *operand { + Operand::Consume(Lvalue::Local(local)) if local == self.dest_local => {} + _ => return, + } + + *operand = Operand::Constant(self.constant.clone()); + self.uses_replaced += 1 + } +} diff --git a/src/librustc_mir/transform/deaggregator.rs b/src/librustc_mir/transform/deaggregator.rs new file mode 100644 index 0000000000000..fcdeae6d6c080 --- /dev/null +++ b/src/librustc_mir/transform/deaggregator.rs @@ -0,0 +1,134 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::ty::TyCtxt; +use rustc::mir::*; +use rustc::mir::transform::{MirPass, MirSource, Pass}; +use rustc_data_structures::indexed_vec::Idx; + +pub struct Deaggregator; + +impl Pass for Deaggregator {} + +impl<'tcx> MirPass<'tcx> for Deaggregator { + fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + source: MirSource, mir: &mut Mir<'tcx>) { + let node_id = source.item_id(); + let node_path = tcx.item_path_str(tcx.map.local_def_id(node_id)); + debug!("running on: {:?}", node_path); + // we only run when mir_opt_level > 1 + match tcx.sess.opts.debugging_opts.mir_opt_level { + Some(0) | + Some(1) | + None => { return; }, + _ => {} + }; + + // Do not trigger on constants. Could be revised in future + if let MirSource::Fn(_) = source {} else { return; } + // In fact, we might not want to trigger in other cases. + // Ex: when we could use SROA. See issue #35259 + + let mut curr: usize = 0; + for bb in mir.basic_blocks_mut() { + let idx = match get_aggregate_statement_index(curr, &bb.statements) { + Some(idx) => idx, + None => continue, + }; + // do the replacement + debug!("removing statement {:?}", idx); + let src_info = bb.statements[idx].source_info; + let suffix_stmts = bb.statements.split_off(idx+1); + let orig_stmt = bb.statements.pop().unwrap(); + let (lhs, rhs) = match orig_stmt.kind { + StatementKind::Assign(ref lhs, ref rhs) => (lhs, rhs), + _ => span_bug!(src_info.span, "expected assign, not {:?}", orig_stmt), + }; + let (agg_kind, operands) = match rhs { + &Rvalue::Aggregate(ref agg_kind, ref operands) => (agg_kind, operands), + _ => span_bug!(src_info.span, "expected aggregate, not {:?}", rhs), + }; + let (adt_def, variant, substs) = match agg_kind { + &AggregateKind::Adt(adt_def, variant, substs, None) => (adt_def, variant, substs), + _ => span_bug!(src_info.span, "expected struct, not {:?}", rhs), + }; + let n = bb.statements.len(); + bb.statements.reserve(n + operands.len() + suffix_stmts.len()); + for (i, op) in operands.iter().enumerate() { + let ref variant_def = adt_def.variants[variant]; + let ty = variant_def.fields[i].ty(tcx, substs); + let rhs = Rvalue::Use(op.clone()); + + let lhs_cast = if adt_def.variants.len() > 1 { + Lvalue::Projection(Box::new(LvalueProjection { + base: lhs.clone(), + elem: ProjectionElem::Downcast(adt_def, variant), + })) + } else { + lhs.clone() + }; + + let lhs_proj = Lvalue::Projection(Box::new(LvalueProjection { + base: lhs_cast, + elem: ProjectionElem::Field(Field::new(i), ty), + })); + let new_statement = Statement { + source_info: src_info, + kind: StatementKind::Assign(lhs_proj, rhs), + }; + debug!("inserting: {:?} @ {:?}", new_statement, idx + i); + bb.statements.push(new_statement); + } + + // if the aggregate was an enum, we need to set the discriminant + if adt_def.variants.len() > 1 { + let set_discriminant = Statement { + kind: StatementKind::SetDiscriminant { + lvalue: lhs.clone(), + variant_index: variant, + }, + source_info: src_info, + }; + bb.statements.push(set_discriminant); + }; + + curr = bb.statements.len(); + bb.statements.extend(suffix_stmts); + } + } +} + +fn get_aggregate_statement_index<'a, 'tcx, 'b>(start: usize, + statements: &Vec>) + -> Option { + for i in start..statements.len() { + let ref statement = statements[i]; + let rhs = match statement.kind { + StatementKind::Assign(_, ref rhs) => rhs, + _ => continue, + }; + let (kind, operands) = match rhs { + &Rvalue::Aggregate(ref kind, ref operands) => (kind, operands), + _ => continue, + }; + let (adt_def, variant) = match kind { + &AggregateKind::Adt(adt_def, variant, _, None) => (adt_def, variant), + _ => continue, + }; + if operands.len() == 0 { + // don't deaggregate () + continue; + } + debug!("getting variant {:?}", variant); + debug!("for adt_def {:?}", adt_def); + return Some(i); + }; + None +} diff --git a/src/librustc_mir/transform/dump_mir.rs b/src/librustc_mir/transform/dump_mir.rs new file mode 100644 index 0000000000000..035f33de91aa5 --- /dev/null +++ b/src/librustc_mir/transform/dump_mir.rs @@ -0,0 +1,72 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This pass just dumps MIR at a specified point. + +use std::fmt; + +use rustc::ty::TyCtxt; +use rustc::mir::*; +use rustc::mir::transform::{Pass, MirPass, MirPassHook, MirSource}; +use pretty; + +pub struct Marker<'a>(pub &'a str); + +impl<'b, 'tcx> MirPass<'tcx> for Marker<'b> { + fn run_pass<'a>(&mut self, _tcx: TyCtxt<'a, 'tcx, 'tcx>, + _src: MirSource, _mir: &mut Mir<'tcx>) + {} +} + +impl<'b> Pass for Marker<'b> { + fn name(&self) -> ::std::borrow::Cow<'static, str> { String::from(self.0).into() } +} + +pub struct Disambiguator<'a> { + pass: &'a Pass, + is_after: bool +} + +impl<'a> fmt::Display for Disambiguator<'a> { + fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + let title = if self.is_after { "after" } else { "before" }; + if let Some(fmt) = self.pass.disambiguator() { + write!(formatter, "{}-{}", fmt, title) + } else { + write!(formatter, "{}", title) + } + } +} + +pub struct DumpMir; + +impl<'tcx> MirPassHook<'tcx> for DumpMir { + fn on_mir_pass<'a>( + &mut self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + src: MirSource, + mir: &Mir<'tcx>, + pass: &Pass, + is_after: bool) + { + pretty::dump_mir( + tcx, + &*pass.name(), + &Disambiguator { + pass: pass, + is_after: is_after + }, + src, + mir + ); + } +} + +impl<'b> Pass for DumpMir {} diff --git a/src/librustc_mir/transform/erase_regions.rs b/src/librustc_mir/transform/erase_regions.rs index 9679654d958e9..cebd9dd9668e3 100644 --- a/src/librustc_mir/transform/erase_regions.rs +++ b/src/librustc_mir/transform/erase_regions.rs @@ -12,213 +12,42 @@ //! We want to do this once just before trans, so trans does not have to take //! care erasing regions all over the place. -use rustc::middle::ty; -use rustc::mir::repr::*; -use transform::MirPass; -use mir_map::MirMap; - -pub fn erase_regions<'tcx>(tcx: &ty::ctxt<'tcx>, mir_map: &mut MirMap<'tcx>) { - let mut eraser = EraseRegions::new(tcx); - - for mir in mir_map.iter_mut().map(|(_, v)| v) { - eraser.run_on_mir(mir); - } -} - -pub struct EraseRegions<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, -} - -impl<'a, 'tcx> MirPass<'tcx> for EraseRegions<'a, 'tcx> { - - fn run_on_mir(&mut self, mir: &mut Mir<'tcx>) { - - for basic_block in &mut mir.basic_blocks { - self.erase_regions_basic_block(basic_block); - } - - self.erase_regions_return_ty(&mut mir.return_ty); - - self.erase_regions_tys(mir.var_decls.iter_mut().map(|d| &mut d.ty)); - self.erase_regions_tys(mir.arg_decls.iter_mut().map(|d| &mut d.ty)); - self.erase_regions_tys(mir.temp_decls.iter_mut().map(|d| &mut d.ty)); - } +use rustc::ty::subst::Substs; +use rustc::ty::{Ty, TyCtxt}; +use rustc::mir::*; +use rustc::mir::visit::MutVisitor; +use rustc::mir::transform::{MirPass, MirSource, Pass}; + +struct EraseRegionsVisitor<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, } -impl<'a, 'tcx> EraseRegions<'a, 'tcx> { - - pub fn new(tcx: &'a ty::ctxt<'tcx>) -> EraseRegions<'a, 'tcx> { - EraseRegions { +impl<'a, 'tcx> EraseRegionsVisitor<'a, 'tcx> { + pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self { + EraseRegionsVisitor { tcx: tcx } } +} - fn erase_regions_basic_block(&mut self, - basic_block: &mut BasicBlockData<'tcx>) { - for statement in &mut basic_block.statements { - self.erase_regions_statement(statement); - } - - self.erase_regions_terminator(basic_block.terminator_mut()); - } - - fn erase_regions_statement(&mut self, - statement: &mut Statement<'tcx>) { - match statement.kind { - StatementKind::Assign(ref mut lvalue, ref mut rvalue) => { - self.erase_regions_lvalue(lvalue); - self.erase_regions_rvalue(rvalue); - } - StatementKind::Drop(_, ref mut lvalue) => { - self.erase_regions_lvalue(lvalue); - } - } - } - - fn erase_regions_terminator(&mut self, - terminator: &mut Terminator<'tcx>) { - match *terminator { - Terminator::Goto { .. } | - Terminator::Resume | - Terminator::Return => { - /* nothing to do */ - } - Terminator::If { ref mut cond, .. } => { - self.erase_regions_operand(cond); - } - Terminator::Switch { ref mut discr, .. } => { - self.erase_regions_lvalue(discr); - } - Terminator::SwitchInt { ref mut discr, ref mut switch_ty, .. } => { - self.erase_regions_lvalue(discr); - *switch_ty = self.tcx.erase_regions(switch_ty); - }, - Terminator::Call { ref mut func, ref mut args, ref mut kind } => { - if let Some(destination) = kind.destination_mut() { - self.erase_regions_lvalue(destination); - } - self.erase_regions_operand(func); - for arg in &mut *args { - self.erase_regions_operand(arg); - } - } - } - } - - fn erase_regions_operand(&mut self, operand: &mut Operand<'tcx>) { - match *operand { - Operand::Consume(ref mut lvalue) => { - self.erase_regions_lvalue(lvalue); - } - Operand::Constant(ref mut constant) => { - self.erase_regions_constant(constant); - } - } - } - - fn erase_regions_lvalue(&mut self, lvalue: &mut Lvalue<'tcx>) { - match *lvalue { - Lvalue::Var(_) | - Lvalue::Temp(_) | - Lvalue::Arg(_) | - Lvalue::Static(_) | - Lvalue::ReturnPointer => {} - Lvalue::Projection(ref mut lvalue_projection) => { - self.erase_regions_lvalue(&mut lvalue_projection.base); - match lvalue_projection.elem { - ProjectionElem::Deref | - ProjectionElem::Field(_) | - ProjectionElem::Downcast(..) | - ProjectionElem::ConstantIndex {..} => { /* nothing to do */ } - ProjectionElem::Index(ref mut index) => { - self.erase_regions_operand(index); - } - } - } - } +impl<'a, 'tcx> MutVisitor<'tcx> for EraseRegionsVisitor<'a, 'tcx> { + fn visit_ty(&mut self, ty: &mut Ty<'tcx>) { + let old_ty = *ty; + *ty = self.tcx.erase_regions(&old_ty); } - fn erase_regions_rvalue(&mut self, rvalue: &mut Rvalue<'tcx>) { - match *rvalue { - Rvalue::Use(ref mut operand) => { - self.erase_regions_operand(operand) - } - Rvalue::Repeat(ref mut operand, ref mut constant) => { - self.erase_regions_operand(operand); - self.erase_regions_constant(constant); - } - Rvalue::Ref(ref mut region, _, ref mut lvalue) => { - *region = ty::ReStatic; - self.erase_regions_lvalue(lvalue); - } - Rvalue::Len(ref mut lvalue) => self.erase_regions_lvalue(lvalue), - Rvalue::Cast(_, ref mut operand, ref mut ty) => { - self.erase_regions_operand(operand); - *ty = self.tcx.erase_regions(ty); - } - Rvalue::BinaryOp(_, ref mut operand1, ref mut operand2) => { - self.erase_regions_operand(operand1); - self.erase_regions_operand(operand2); - } - Rvalue::UnaryOp(_, ref mut operand) => { - self.erase_regions_operand(operand); - } - Rvalue::Box(ref mut ty) => *ty = self.tcx.erase_regions(ty), - Rvalue::Aggregate(ref mut aggregate_kind, ref mut operands) => { - match *aggregate_kind { - AggregateKind::Vec | - AggregateKind::Tuple => {}, - AggregateKind::Adt(_, _, ref mut substs) => { - let erased = self.tcx.erase_regions(*substs); - *substs = self.tcx.mk_substs(erased); - } - AggregateKind::Closure(def_id, ref mut closure_substs) => { - let cloned = Box::new(closure_substs.clone()); - let ty = self.tcx.mk_closure_from_closure_substs(def_id, - cloned); - let erased = self.tcx.erase_regions(&ty); - *closure_substs = match erased.sty { - ty::TyClosure(_, ref closure_substs) => &*closure_substs, - _ => unreachable!() - }; - } - } - for operand in &mut *operands { - self.erase_regions_operand(operand); - } - } - Rvalue::Slice { ref mut input, .. } => { - self.erase_regions_lvalue(input); - } - Rvalue::InlineAsm(_) => {}, - } + fn visit_substs(&mut self, substs: &mut &'tcx Substs<'tcx>) { + *substs = self.tcx.erase_regions(&{*substs}); } +} - fn erase_regions_constant(&mut self, constant: &mut Constant<'tcx>) { - constant.ty = self.tcx.erase_regions(&constant.ty); - match constant.literal { - Literal::Item { ref mut substs, .. } => { - *substs = self.tcx.mk_substs(self.tcx.erase_regions(substs)); - } - Literal::Value { .. } => { /* nothing to do */ } - } - } +pub struct EraseRegions; - fn erase_regions_return_ty(&mut self, fn_output: &mut ty::FnOutput<'tcx>) { - match *fn_output { - ty::FnConverging(ref mut ty) => { - *ty = self.tcx.erase_regions(ty); - }, - ty::FnDiverging => {} - } - } +impl Pass for EraseRegions {} - fn erase_regions_tys<'b, T>(&mut self, tys: T) - where T: Iterator>, - 'tcx: 'b - { - for ty in tys { - *ty = self.tcx.erase_regions(ty); - } +impl<'tcx> MirPass<'tcx> for EraseRegions { + fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + _: MirSource, mir: &mut Mir<'tcx>) { + EraseRegionsVisitor::new(tcx).visit_mir(mir); } } diff --git a/src/librustc_mir/transform/instcombine.rs b/src/librustc_mir/transform/instcombine.rs new file mode 100644 index 0000000000000..c4a8d34bda008 --- /dev/null +++ b/src/librustc_mir/transform/instcombine.rs @@ -0,0 +1,111 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Performs various peephole optimizations. + +use rustc::mir::{Location, Lvalue, Mir, Operand, ProjectionElem, Rvalue, Local}; +use rustc::mir::transform::{MirPass, MirSource, Pass}; +use rustc::mir::visit::{MutVisitor, Visitor}; +use rustc::ty::TyCtxt; +use rustc::util::nodemap::FxHashSet; +use rustc_data_structures::indexed_vec::Idx; +use std::mem; + +pub struct InstCombine { + optimizations: OptimizationList, +} + +impl InstCombine { + pub fn new() -> InstCombine { + InstCombine { + optimizations: OptimizationList::default(), + } + } +} + +impl Pass for InstCombine {} + +impl<'tcx> MirPass<'tcx> for InstCombine { + fn run_pass<'a>(&mut self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + _: MirSource, + mir: &mut Mir<'tcx>) { + // We only run when optimizing MIR (at any level). + if tcx.sess.opts.debugging_opts.mir_opt_level == Some(0) { + return + } + + // First, find optimization opportunities. This is done in a pre-pass to keep the MIR + // read-only so that we can do global analyses on the MIR in the process (e.g. + // `Lvalue::ty()`). + { + let mut optimization_finder = OptimizationFinder::new(mir, tcx); + optimization_finder.visit_mir(mir); + self.optimizations = optimization_finder.optimizations + } + + // Then carry out those optimizations. + MutVisitor::visit_mir(&mut *self, mir); + } +} + +impl<'tcx> MutVisitor<'tcx> for InstCombine { + fn visit_rvalue(&mut self, rvalue: &mut Rvalue<'tcx>, location: Location) { + if self.optimizations.and_stars.remove(&location) { + debug!("Replacing `&*`: {:?}", rvalue); + let new_lvalue = match *rvalue { + Rvalue::Ref(_, _, Lvalue::Projection(ref mut projection)) => { + // Replace with dummy + mem::replace(&mut projection.base, Lvalue::Local(Local::new(0))) + } + _ => bug!("Detected `&*` but didn't find `&*`!"), + }; + *rvalue = Rvalue::Use(Operand::Consume(new_lvalue)) + } + + self.super_rvalue(rvalue, location) + } +} + +/// Finds optimization opportunities on the MIR. +struct OptimizationFinder<'b, 'a, 'tcx:'a+'b> { + mir: &'b Mir<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + optimizations: OptimizationList, +} + +impl<'b, 'a, 'tcx:'b> OptimizationFinder<'b, 'a, 'tcx> { + fn new(mir: &'b Mir<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> OptimizationFinder<'b, 'a, 'tcx> { + OptimizationFinder { + mir: mir, + tcx: tcx, + optimizations: OptimizationList::default(), + } + } +} + +impl<'b, 'a, 'tcx> Visitor<'tcx> for OptimizationFinder<'b, 'a, 'tcx> { + fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) { + if let Rvalue::Ref(_, _, Lvalue::Projection(ref projection)) = *rvalue { + if let ProjectionElem::Deref = projection.elem { + if projection.base.ty(self.mir, self.tcx).to_ty(self.tcx).is_region_ptr() { + self.optimizations.and_stars.insert(location); + } + } + } + + self.super_rvalue(rvalue, location) + } +} + +#[derive(Default)] +struct OptimizationList { + and_stars: FxHashSet, +} diff --git a/src/librustc_mir/transform/mod.rs b/src/librustc_mir/transform/mod.rs index 174718f7b1672..ae255f70fb788 100644 --- a/src/librustc_mir/transform/mod.rs +++ b/src/librustc_mir/transform/mod.rs @@ -8,12 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -pub mod simplify_cfg; +pub mod simplify_branches; +pub mod simplify; pub mod erase_regions; -mod util; - -use rustc::mir::repr::Mir; - -pub trait MirPass<'tcx> { - fn run_on_mir(&mut self, mir: &mut Mir<'tcx>); -} +pub mod no_landing_pads; +pub mod type_check; +pub mod add_call_guards; +pub mod promote_consts; +pub mod qualify_consts; +pub mod dump_mir; +pub mod deaggregator; +pub mod instcombine; +pub mod copy_prop; diff --git a/src/librustc_mir/transform/no_landing_pads.rs b/src/librustc_mir/transform/no_landing_pads.rs new file mode 100644 index 0000000000000..6ef5720b330c9 --- /dev/null +++ b/src/librustc_mir/transform/no_landing_pads.rs @@ -0,0 +1,56 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This pass removes the unwind branch of all the terminators when the no-landing-pads option is +//! specified. + +use rustc::ty::TyCtxt; +use rustc::mir::*; +use rustc::mir::visit::MutVisitor; +use rustc::mir::transform::{Pass, MirPass, MirSource}; + +pub struct NoLandingPads; + +impl<'tcx> MutVisitor<'tcx> for NoLandingPads { + fn visit_terminator(&mut self, + bb: BasicBlock, + terminator: &mut Terminator<'tcx>, + location: Location) { + match terminator.kind { + TerminatorKind::Goto { .. } | + TerminatorKind::Resume | + TerminatorKind::Return | + TerminatorKind::Unreachable | + TerminatorKind::If { .. } | + TerminatorKind::Switch { .. } | + TerminatorKind::SwitchInt { .. } => { + /* nothing to do */ + }, + TerminatorKind::Call { cleanup: ref mut unwind, .. } | + TerminatorKind::Assert { cleanup: ref mut unwind, .. } | + TerminatorKind::DropAndReplace { ref mut unwind, .. } | + TerminatorKind::Drop { ref mut unwind, .. } => { + unwind.take(); + }, + } + self.super_terminator(bb, terminator, location); + } +} + +impl<'tcx> MirPass<'tcx> for NoLandingPads { + fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + _: MirSource, mir: &mut Mir<'tcx>) { + if tcx.sess.no_landing_pads() { + self.visit_mir(mir); + } + } +} + +impl Pass for NoLandingPads {} diff --git a/src/librustc_mir/transform/promote_consts.rs b/src/librustc_mir/transform/promote_consts.rs new file mode 100644 index 0000000000000..41698574e0f1f --- /dev/null +++ b/src/librustc_mir/transform/promote_consts.rs @@ -0,0 +1,442 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A pass that promotes borrows of constant rvalues. +//! +//! The rvalues considered constant are trees of temps, +//! each with exactly one initialization, and holding +//! a constant value with no interior mutability. +//! They are placed into a new MIR constant body in +//! `promoted` and the borrow rvalue is replaced with +//! a `Literal::Promoted` using the index into `promoted` +//! of that constant MIR. +//! +//! This pass assumes that every use is dominated by an +//! initialization and can otherwise silence errors, if +//! move analysis runs after promotion on broken MIR. + +use rustc::mir::*; +use rustc::mir::visit::{LvalueContext, MutVisitor, Visitor}; +use rustc::mir::traversal::ReversePostorder; +use rustc::ty::TyCtxt; +use syntax_pos::Span; + +use rustc_data_structures::indexed_vec::{IndexVec, Idx}; + +use std::iter; +use std::mem; +use std::usize; + +/// State of a temporary during collection and promotion. +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum TempState { + /// No references to this temp. + Undefined, + /// One direct assignment and any number of direct uses. + /// A borrow of this temp is promotable if the assigned + /// value is qualified as constant. + Defined { + location: Location, + uses: usize + }, + /// Any other combination of assignments/uses. + Unpromotable, + /// This temp was part of an rvalue which got extracted + /// during promotion and needs cleanup. + PromotedOut +} + +impl TempState { + pub fn is_promotable(&self) -> bool { + if let TempState::Defined { uses, .. } = *self { + uses > 0 + } else { + false + } + } +} + +/// A "root candidate" for promotion, which will become the +/// returned value in a promoted MIR, unless it's a subset +/// of a larger candidate. +pub enum Candidate { + /// Borrow of a constant temporary. + Ref(Location), + + /// Array of indices found in the third argument of + /// a call to one of the simd_shuffleN intrinsics. + ShuffleIndices(BasicBlock) +} + +struct TempCollector<'tcx> { + temps: IndexVec, + span: Span, + mir: &'tcx Mir<'tcx>, +} + +impl<'tcx> Visitor<'tcx> for TempCollector<'tcx> { + fn visit_lvalue(&mut self, + lvalue: &Lvalue<'tcx>, + context: LvalueContext<'tcx>, + location: Location) { + self.super_lvalue(lvalue, context, location); + if let Lvalue::Local(index) = *lvalue { + // We're only interested in temporaries + if self.mir.local_kind(index) != LocalKind::Temp { + return; + } + + // Ignore drops, if the temp gets promoted, + // then it's constant and thus drop is noop. + // Storage live ranges are also irrelevant. + match context { + LvalueContext::Drop | + LvalueContext::StorageLive | + LvalueContext::StorageDead => return, + _ => {} + } + + let temp = &mut self.temps[index]; + if *temp == TempState::Undefined { + match context { + LvalueContext::Store | + LvalueContext::Call => { + *temp = TempState::Defined { + location: location, + uses: 0 + }; + return; + } + _ => { /* mark as unpromotable below */ } + } + } else if let TempState::Defined { ref mut uses, .. } = *temp { + match context { + LvalueContext::Borrow {..} | + LvalueContext::Consume | + LvalueContext::Inspect => { + *uses += 1; + return; + } + _ => { /* mark as unpromotable below */ } + } + } + *temp = TempState::Unpromotable; + } + } + + fn visit_source_info(&mut self, source_info: &SourceInfo) { + self.span = source_info.span; + } +} + +pub fn collect_temps(mir: &Mir, rpo: &mut ReversePostorder) -> IndexVec { + let mut collector = TempCollector { + temps: IndexVec::from_elem(TempState::Undefined, &mir.local_decls), + span: mir.span, + mir: mir, + }; + for (bb, data) in rpo { + collector.visit_basic_block_data(bb, data); + } + collector.temps +} + +struct Promoter<'a, 'tcx: 'a> { + source: &'a mut Mir<'tcx>, + promoted: Mir<'tcx>, + temps: &'a mut IndexVec, + + /// If true, all nested temps are also kept in the + /// source MIR, not moved to the promoted MIR. + keep_original: bool +} + +impl<'a, 'tcx> Promoter<'a, 'tcx> { + fn new_block(&mut self) -> BasicBlock { + let span = self.promoted.span; + self.promoted.basic_blocks_mut().push(BasicBlockData { + statements: vec![], + terminator: Some(Terminator { + source_info: SourceInfo { + span: span, + scope: ARGUMENT_VISIBILITY_SCOPE + }, + kind: TerminatorKind::Return + }), + is_cleanup: false + }) + } + + fn assign(&mut self, dest: Local, rvalue: Rvalue<'tcx>, span: Span) { + let last = self.promoted.basic_blocks().last().unwrap(); + let data = &mut self.promoted[last]; + data.statements.push(Statement { + source_info: SourceInfo { + span: span, + scope: ARGUMENT_VISIBILITY_SCOPE + }, + kind: StatementKind::Assign(Lvalue::Local(dest), rvalue) + }); + } + + /// Copy the initialization of this temp to the + /// promoted MIR, recursing through temps. + fn promote_temp(&mut self, temp: Local) -> Local { + let old_keep_original = self.keep_original; + let (bb, stmt_idx) = match self.temps[temp] { + TempState::Defined { + location: Location { block, statement_index }, + uses + } if uses > 0 => { + if uses > 1 { + self.keep_original = true; + } + (block, statement_index) + } + state => { + span_bug!(self.promoted.span, "{:?} not promotable: {:?}", + temp, state); + } + }; + if !self.keep_original { + self.temps[temp] = TempState::PromotedOut; + } + + let no_stmts = self.source[bb].statements.len(); + + // First, take the Rvalue or Call out of the source MIR, + // or duplicate it, depending on keep_original. + let (mut rvalue, mut call) = (None, None); + let source_info = if stmt_idx < no_stmts { + let statement = &mut self.source[bb].statements[stmt_idx]; + let rhs = match statement.kind { + StatementKind::Assign(_, ref mut rhs) => rhs, + _ => { + span_bug!(statement.source_info.span, "{:?} is not an assignment", + statement); + } + }; + if self.keep_original { + rvalue = Some(rhs.clone()); + } else { + let unit = Rvalue::Aggregate(AggregateKind::Tuple, vec![]); + rvalue = Some(mem::replace(rhs, unit)); + } + statement.source_info + } else if self.keep_original { + let terminator = self.source[bb].terminator().clone(); + call = Some(terminator.kind); + terminator.source_info + } else { + let terminator = self.source[bb].terminator_mut(); + let target = match terminator.kind { + TerminatorKind::Call { + destination: ref mut dest @ Some(_), + ref mut cleanup, .. + } => { + // No cleanup necessary. + cleanup.take(); + + // We'll put a new destination in later. + dest.take().unwrap().1 + } + ref kind => { + span_bug!(terminator.source_info.span, "{:?} not promotable", kind); + } + }; + call = Some(mem::replace(&mut terminator.kind, TerminatorKind::Goto { + target: target + })); + terminator.source_info + }; + + // Then, recurse for components in the Rvalue or Call. + if stmt_idx < no_stmts { + self.visit_rvalue(rvalue.as_mut().unwrap(), Location { + block: bb, + statement_index: stmt_idx + }); + } else { + self.visit_terminator_kind(bb, call.as_mut().unwrap(), Location { + block: bb, + statement_index: no_stmts + }); + } + + let new_temp = self.promoted.local_decls.push( + LocalDecl::new_temp(self.source.local_decls[temp].ty)); + + // Inject the Rvalue or Call into the promoted MIR. + if stmt_idx < no_stmts { + self.assign(new_temp, rvalue.unwrap(), source_info.span); + } else { + let last = self.promoted.basic_blocks().last().unwrap(); + let new_target = self.new_block(); + let mut call = call.unwrap(); + match call { + TerminatorKind::Call { ref mut destination, ..} => { + *destination = Some((Lvalue::Local(new_temp), new_target)); + } + _ => bug!() + } + let terminator = self.promoted[last].terminator_mut(); + terminator.source_info.span = source_info.span; + terminator.kind = call; + } + + // Restore the old duplication state. + self.keep_original = old_keep_original; + + new_temp + } + + fn promote_candidate(mut self, candidate: Candidate) { + let span = self.promoted.span; + let new_operand = Operand::Constant(Constant { + span: span, + ty: self.promoted.return_ty, + literal: Literal::Promoted { + index: Promoted::new(self.source.promoted.len()) + } + }); + let mut rvalue = match candidate { + Candidate::Ref(Location { block: bb, statement_index: stmt_idx }) => { + let ref mut statement = self.source[bb].statements[stmt_idx]; + match statement.kind { + StatementKind::Assign(_, ref mut rvalue) => { + mem::replace(rvalue, Rvalue::Use(new_operand)) + } + _ => bug!() + } + } + Candidate::ShuffleIndices(bb) => { + match self.source[bb].terminator_mut().kind { + TerminatorKind::Call { ref mut args, .. } => { + Rvalue::Use(mem::replace(&mut args[2], new_operand)) + } + _ => bug!() + } + } + }; + self.visit_rvalue(&mut rvalue, Location { + block: BasicBlock::new(0), + statement_index: usize::MAX + }); + + self.assign(RETURN_POINTER, rvalue, span); + self.source.promoted.push(self.promoted); + } +} + +/// Replaces all temporaries with their promoted counterparts. +impl<'a, 'tcx> MutVisitor<'tcx> for Promoter<'a, 'tcx> { + fn visit_lvalue(&mut self, + lvalue: &mut Lvalue<'tcx>, + context: LvalueContext<'tcx>, + location: Location) { + if let Lvalue::Local(ref mut temp) = *lvalue { + if self.source.local_kind(*temp) == LocalKind::Temp { + *temp = self.promote_temp(*temp); + } + } + self.super_lvalue(lvalue, context, location); + } +} + +pub fn promote_candidates<'a, 'tcx>(mir: &mut Mir<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mut temps: IndexVec, + candidates: Vec) { + // Visit candidates in reverse, in case they're nested. + for candidate in candidates.into_iter().rev() { + let (span, ty) = match candidate { + Candidate::Ref(Location { block: bb, statement_index: stmt_idx }) => { + let statement = &mir[bb].statements[stmt_idx]; + let dest = match statement.kind { + StatementKind::Assign(ref dest, _) => dest, + _ => { + span_bug!(statement.source_info.span, + "expected assignment to promote"); + } + }; + if let Lvalue::Local(index) = *dest { + if temps[index] == TempState::PromotedOut { + // Already promoted. + continue; + } + } + (statement.source_info.span, dest.ty(mir, tcx).to_ty(tcx)) + } + Candidate::ShuffleIndices(bb) => { + let terminator = mir[bb].terminator(); + let ty = match terminator.kind { + TerminatorKind::Call { ref args, .. } => { + args[2].ty(mir, tcx) + } + _ => { + span_bug!(terminator.source_info.span, + "expected simd_shuffleN call to promote"); + } + }; + (terminator.source_info.span, ty) + } + }; + + // Declare return pointer local + let initial_locals = iter::once(LocalDecl::new_return_pointer(ty)).collect(); + + let mut promoter = Promoter { + promoted: Mir::new( + IndexVec::new(), + Some(VisibilityScopeData { + span: span, + parent_scope: None + }).into_iter().collect(), + IndexVec::new(), + ty, + initial_locals, + 0, + vec![], + span + ), + source: mir, + temps: &mut temps, + keep_original: false + }; + assert_eq!(promoter.new_block(), START_BLOCK); + promoter.promote_candidate(candidate); + } + + // Eliminate assignments to, and drops of promoted temps. + let promoted = |index: Local| temps[index] == TempState::PromotedOut; + for block in mir.basic_blocks_mut() { + block.statements.retain(|statement| { + match statement.kind { + StatementKind::Assign(Lvalue::Local(index), _) | + StatementKind::StorageLive(Lvalue::Local(index)) | + StatementKind::StorageDead(Lvalue::Local(index)) => { + !promoted(index) + } + _ => true + } + }); + let terminator = block.terminator_mut(); + match terminator.kind { + TerminatorKind::Drop { location: Lvalue::Local(index), target, .. } => { + if promoted(index) { + terminator.kind = TerminatorKind::Goto { + target: target + }; + } + } + _ => {} + } + } +} diff --git a/src/librustc_mir/transform/qualify_consts.rs b/src/librustc_mir/transform/qualify_consts.rs new file mode 100644 index 0000000000000..893478a933182 --- /dev/null +++ b/src/librustc_mir/transform/qualify_consts.rs @@ -0,0 +1,1053 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A pass that qualifies constness of temporaries in constants, +//! static initializers and functions and also drives promotion. +//! +//! The Qualif flags below can be used to also provide better +//! diagnostics as to why a constant rvalue wasn't promoted. + +use rustc_data_structures::bitvec::BitVector; +use rustc_data_structures::indexed_vec::{IndexVec, Idx}; +use rustc::hir; +use rustc::hir::map as hir_map; +use rustc::hir::def_id::DefId; +use rustc::hir::map::blocks::FnLikeNode; +use rustc::traits::{self, Reveal}; +use rustc::ty::{self, TyCtxt, Ty}; +use rustc::ty::cast::CastTy; +use rustc::mir::*; +use rustc::mir::traversal::ReversePostorder; +use rustc::mir::transform::{Pass, MirPass, MirSource}; +use rustc::mir::visit::{LvalueContext, Visitor}; +use rustc::util::nodemap::DefIdMap; +use rustc::middle::lang_items; +use syntax::abi::Abi; +use syntax::feature_gate::UnstableFeatures; +use syntax_pos::Span; + +use std::collections::hash_map::Entry; +use std::fmt; +use std::usize; + +use super::promote_consts::{self, Candidate, TempState}; + +bitflags! { + flags Qualif: u8 { + // Const item's qualification while recursing. + // Recursive consts are an error. + const RECURSIVE = 1 << 0, + + // Constant containing interior mutability (UnsafeCell). + const MUTABLE_INTERIOR = 1 << 1, + + // Constant containing an ADT that implements Drop. + const NEEDS_DROP = 1 << 2, + + // Function argument. + const FN_ARGUMENT = 1 << 3, + + // Static lvalue or move from a static. + const STATIC = 1 << 4, + + // Reference to a static. + const STATIC_REF = 1 << 5, + + // Not constant at all - non-`const fn` calls, asm!, + // pointer comparisons, ptr-to-int casts, etc. + const NOT_CONST = 1 << 6, + + // Refers to temporaries which cannot be promoted as + // promote_consts decided they weren't simple enough. + const NOT_PROMOTABLE = 1 << 7, + + // Borrows of temporaries can be promoted only + // if they have none of the above qualifications. + const NEVER_PROMOTE = !0, + + // Const items can only have MUTABLE_INTERIOR + // and NOT_PROMOTABLE without producing an error. + const CONST_ERROR = !Qualif::MUTABLE_INTERIOR.bits & + !Qualif::NOT_PROMOTABLE.bits + } +} + +impl<'a, 'tcx> Qualif { + /// Remove flags which are impossible for the given type. + fn restrict(&mut self, ty: Ty<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: &ty::ParameterEnvironment<'tcx>) { + if !ty.type_contents(tcx).interior_unsafe() { + *self = *self - Qualif::MUTABLE_INTERIOR; + } + if !tcx.type_needs_drop_given_env(ty, param_env) { + *self = *self - Qualif::NEEDS_DROP; + } + } +} + +/// What kind of item we are in. +#[derive(Copy, Clone, PartialEq, Eq)] +enum Mode { + Const, + Static, + StaticMut, + ConstFn, + Fn +} + +impl fmt::Display for Mode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Mode::Const => write!(f, "constant"), + Mode::Static | Mode::StaticMut => write!(f, "static"), + Mode::ConstFn => write!(f, "constant function"), + Mode::Fn => write!(f, "function") + } + } +} + +pub fn is_const_fn(tcx: TyCtxt, def_id: DefId) -> bool { + if let Some(node_id) = tcx.map.as_local_node_id(def_id) { + if let Some(fn_like) = FnLikeNode::from_node(tcx.map.get(node_id)) { + fn_like.constness() == hir::Constness::Const + } else { + false + } + } else { + tcx.sess.cstore.is_const_fn(def_id) + } +} + +struct Qualifier<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + mode: Mode, + span: Span, + def_id: DefId, + mir: &'a Mir<'tcx>, + rpo: ReversePostorder<'a, 'tcx>, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + param_env: ty::ParameterEnvironment<'tcx>, + qualif_map: &'a mut DefIdMap, + temp_qualif: IndexVec>, + return_qualif: Option, + qualif: Qualif, + const_fn_arg_vars: BitVector, + temp_promotion_state: IndexVec, + promotion_candidates: Vec +} + +impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { + fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: ty::ParameterEnvironment<'tcx>, + qualif_map: &'a mut DefIdMap, + def_id: DefId, + mir: &'a Mir<'tcx>, + mode: Mode) + -> Qualifier<'a, 'tcx, 'tcx> { + let mut rpo = traversal::reverse_postorder(mir); + let temps = promote_consts::collect_temps(mir, &mut rpo); + rpo.reset(); + Qualifier { + mode: mode, + span: mir.span, + def_id: def_id, + mir: mir, + rpo: rpo, + tcx: tcx, + param_env: param_env, + qualif_map: qualif_map, + temp_qualif: IndexVec::from_elem(None, &mir.local_decls), + return_qualif: None, + qualif: Qualif::empty(), + const_fn_arg_vars: BitVector::new(mir.local_decls.len()), + temp_promotion_state: temps, + promotion_candidates: vec![] + } + } + + // FIXME(eddyb) we could split the errors into meaningful + // categories, but enabling full miri would make that + // slightly pointless (even with feature-gating). + fn not_const(&mut self) { + self.add(Qualif::NOT_CONST); + if self.mode != Mode::Fn { + span_err!(self.tcx.sess, self.span, E0019, + "{} contains unimplemented expression type", self.mode); + } + } + + /// Error about extra statements in a constant. + fn statement_like(&mut self) { + self.add(Qualif::NOT_CONST); + if self.mode != Mode::Fn { + span_err!(self.tcx.sess, self.span, E0016, + "blocks in {}s are limited to items and tail expressions", + self.mode); + } + } + + /// Add the given qualification to self.qualif. + fn add(&mut self, qualif: Qualif) { + self.qualif = self.qualif | qualif; + } + + /// Add the given type's qualification to self.qualif. + fn add_type(&mut self, ty: Ty<'tcx>) { + self.add(Qualif::MUTABLE_INTERIOR | Qualif::NEEDS_DROP); + self.qualif.restrict(ty, self.tcx, &self.param_env); + } + + /// Within the provided closure, self.qualif will start + /// out empty, and its value after the closure returns will + /// be combined with the value before the call to nest. + fn nest(&mut self, f: F) { + let original = self.qualif; + self.qualif = Qualif::empty(); + f(self); + self.add(original); + } + + /// Check for NEEDS_DROP (from an ADT or const fn call) and + /// error, unless we're in a function, or the feature-gate + /// for globals with destructors is enabled. + fn deny_drop(&self) { + if self.mode == Mode::Fn || !self.qualif.intersects(Qualif::NEEDS_DROP) { + return; + } + + // Static and const fn's allow destructors, but they're feature-gated. + let msg = if self.mode != Mode::Const { + // Feature-gate for globals with destructors is enabled. + if self.tcx.sess.features.borrow().drop_types_in_const { + return; + } + + // This comes from a macro that has #[allow_internal_unstable]. + if self.tcx.sess.codemap().span_allows_unstable(self.span) { + return; + } + + format!("destructors in {}s are an unstable feature", + self.mode) + } else { + format!("{}s are not allowed to have destructors", + self.mode) + }; + + let mut err = + struct_span_err!(self.tcx.sess, self.span, E0493, "{}", msg); + + if self.mode != Mode::Const { + help!(&mut err, + "in Nightly builds, add `#![feature(drop_types_in_const)]` \ + to the crate attributes to enable"); + } else { + self.find_drop_implementation_method_span() + .map(|span| err.span_label(span, &format!("destructor defined here"))); + + err.span_label(self.span, &format!("constants cannot have destructors")); + } + + err.emit(); + } + + fn find_drop_implementation_method_span(&self) -> Option { + self.tcx.lang_items + .drop_trait() + .and_then(|drop_trait_id| { + let mut span = None; + + self.tcx + .lookup_trait_def(drop_trait_id) + .for_each_relevant_impl(self.tcx, self.mir.return_ty, |impl_did| { + self.tcx.map + .as_local_node_id(impl_did) + .and_then(|impl_node_id| self.tcx.map.find(impl_node_id)) + .map(|node| { + if let hir_map::NodeItem(item) = node { + if let hir::ItemImpl(.., ref impl_item_refs) = item.node { + span = impl_item_refs.first() + .map(|iiref| { + self.tcx.map.impl_item(iiref.id) + .span + }); + } + } + }); + }); + + span + }) + } + + /// Check if an Lvalue with the current qualifications could + /// be consumed, by either an operand or a Deref projection. + fn try_consume(&mut self) -> bool { + if self.qualif.intersects(Qualif::STATIC) && self.mode != Mode::Fn { + let msg = if self.mode == Mode::Static || + self.mode == Mode::StaticMut { + "cannot refer to other statics by value, use the \ + address-of operator or a constant instead" + } else { + "cannot refer to statics by value, use a constant instead" + }; + struct_span_err!(self.tcx.sess, self.span, E0394, "{}", msg) + .span_label(self.span, &format!("referring to another static by value")) + .note(&format!("use the address-of operator or a constant instead")) + .emit(); + + // Replace STATIC with NOT_CONST to avoid further errors. + self.qualif = self.qualif - Qualif::STATIC; + self.add(Qualif::NOT_CONST); + + false + } else { + true + } + } + + /// Assign the current qualification to the given destination. + fn assign(&mut self, dest: &Lvalue<'tcx>, location: Location) { + let qualif = self.qualif; + let span = self.span; + let store = |slot: &mut Option| { + if slot.is_some() { + span_bug!(span, "multiple assignments to {:?}", dest); + } + *slot = Some(qualif); + }; + + // Only handle promotable temps in non-const functions. + if self.mode == Mode::Fn { + if let Lvalue::Local(index) = *dest { + if self.mir.local_kind(index) == LocalKind::Temp + && self.temp_promotion_state[index].is_promotable() { + debug!("store to promotable temp {:?}", index); + store(&mut self.temp_qualif[index]); + } + } + return; + } + + match *dest { + Lvalue::Local(index) if self.mir.local_kind(index) == LocalKind::Temp => { + debug!("store to temp {:?}", index); + store(&mut self.temp_qualif[index]) + } + Lvalue::Local(index) if self.mir.local_kind(index) == LocalKind::ReturnPointer => { + debug!("store to return pointer {:?}", index); + store(&mut self.return_qualif) + } + + Lvalue::Projection(box Projection { + base: Lvalue::Local(index), + elem: ProjectionElem::Deref + }) if self.mir.local_kind(index) == LocalKind::Temp + && self.mir.local_decls[index].ty.is_unique() + && self.temp_qualif[index].map_or(false, |qualif| { + qualif.intersects(Qualif::NOT_CONST) + }) => { + // Part of `box expr`, we should've errored + // already for the Box allocation Rvalue. + } + + // This must be an explicit assignment. + _ => { + // Catch more errors in the destination. + self.visit_lvalue(dest, LvalueContext::Store, location); + self.statement_like(); + } + } + } + + /// Qualify a whole const, static initializer or const fn. + fn qualify_const(&mut self) -> Qualif { + debug!("qualifying {} {}", self.mode, self.tcx.item_path_str(self.def_id)); + + let mir = self.mir; + + let mut seen_blocks = BitVector::new(mir.basic_blocks().len()); + let mut bb = START_BLOCK; + loop { + seen_blocks.insert(bb.index()); + + self.visit_basic_block_data(bb, &mir[bb]); + + let target = match mir[bb].terminator().kind { + TerminatorKind::Goto { target } | + // Drops are considered noops. + TerminatorKind::Drop { target, .. } | + TerminatorKind::Assert { target, .. } | + TerminatorKind::Call { destination: Some((_, target)), .. } => { + Some(target) + } + + // Non-terminating calls cannot produce any value. + TerminatorKind::Call { destination: None, .. } => { + return Qualif::empty(); + } + + TerminatorKind::If {..} | + TerminatorKind::Switch {..} | + TerminatorKind::SwitchInt {..} | + TerminatorKind::DropAndReplace { .. } | + TerminatorKind::Resume | + TerminatorKind::Unreachable => None, + + TerminatorKind::Return => { + // Check for unused values. This usually means + // there are extra statements in the AST. + for temp in mir.temps_iter() { + if self.temp_qualif[temp].is_none() { + continue; + } + + let state = self.temp_promotion_state[temp]; + if let TempState::Defined { location, uses: 0 } = state { + let data = &mir[location.block]; + let stmt_idx = location.statement_index; + + // Get the span for the initialization. + let source_info = if stmt_idx < data.statements.len() { + data.statements[stmt_idx].source_info + } else { + data.terminator().source_info + }; + self.span = source_info.span; + + // Treat this as a statement in the AST. + self.statement_like(); + } + } + + // Make sure there are no extra unassigned variables. + self.qualif = Qualif::NOT_CONST; + for index in mir.vars_iter() { + if !self.const_fn_arg_vars.contains(index.index()) { + debug!("unassigned variable {:?}", index); + self.assign(&Lvalue::Local(index), Location { + block: bb, + statement_index: usize::MAX, + }); + } + } + + break; + } + }; + + match target { + // No loops allowed. + Some(target) if !seen_blocks.contains(target.index()) => { + bb = target; + } + _ => { + self.not_const(); + break; + } + } + } + + let return_ty = mir.return_ty; + self.qualif = self.return_qualif.unwrap_or(Qualif::NOT_CONST); + + match self.mode { + Mode::StaticMut => { + // Check for destructors in static mut. + self.add_type(return_ty); + self.deny_drop(); + } + _ => { + // Account for errors in consts by using the + // conservative type qualification instead. + if self.qualif.intersects(Qualif::CONST_ERROR) { + self.qualif = Qualif::empty(); + self.add_type(return_ty); + } + } + } + self.qualif + } +} + +/// Accumulates an Rvalue or Call's effects in self.qualif. +/// For functions (constant or not), it also records +/// candidates for promotion in promotion_candidates. +impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { + fn visit_lvalue(&mut self, + lvalue: &Lvalue<'tcx>, + context: LvalueContext<'tcx>, + location: Location) { + match *lvalue { + Lvalue::Local(local) => match self.mir.local_kind(local) { + LocalKind::ReturnPointer => { + self.not_const(); + } + LocalKind::Arg => { + self.add(Qualif::FN_ARGUMENT); + } + LocalKind::Var => { + self.add(Qualif::NOT_CONST); + } + LocalKind::Temp => { + if !self.temp_promotion_state[local].is_promotable() { + self.add(Qualif::NOT_PROMOTABLE); + } + + if let Some(qualif) = self.temp_qualif[local] { + self.add(qualif); + } else { + self.not_const(); + } + } + }, + Lvalue::Static(_) => { + self.add(Qualif::STATIC); + if self.mode == Mode::Const || self.mode == Mode::ConstFn { + span_err!(self.tcx.sess, self.span, E0013, + "{}s cannot refer to statics, use \ + a constant instead", self.mode); + } + } + Lvalue::Projection(ref proj) => { + self.nest(|this| { + this.super_lvalue(lvalue, context, location); + match proj.elem { + ProjectionElem::Deref => { + if !this.try_consume() { + return; + } + + if this.qualif.intersects(Qualif::STATIC_REF) { + this.qualif = this.qualif - Qualif::STATIC_REF; + this.add(Qualif::STATIC); + } + + let base_ty = proj.base.ty(this.mir, this.tcx).to_ty(this.tcx); + if let ty::TyRawPtr(_) = base_ty.sty { + this.add(Qualif::NOT_CONST); + if this.mode != Mode::Fn { + struct_span_err!(this.tcx.sess, + this.span, E0396, + "raw pointers cannot be dereferenced in {}s", + this.mode) + .span_label(this.span, + &format!("dereference of raw pointer in constant")) + .emit(); + } + } + } + + ProjectionElem::Field(..) | + ProjectionElem::Index(_) => { + if this.mode != Mode::Fn && + this.qualif.intersects(Qualif::STATIC) { + span_err!(this.tcx.sess, this.span, E0494, + "cannot refer to the interior of another \ + static, use a constant instead"); + } + let ty = lvalue.ty(this.mir, this.tcx).to_ty(this.tcx); + this.qualif.restrict(ty, this.tcx, &this.param_env); + } + + ProjectionElem::ConstantIndex {..} | + ProjectionElem::Subslice {..} | + ProjectionElem::Downcast(..) => { + this.not_const() + } + } + }); + } + } + } + + fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) { + match *operand { + Operand::Consume(_) => { + self.nest(|this| { + this.super_operand(operand, location); + this.try_consume(); + }); + } + Operand::Constant(ref constant) => { + // Only functions and methods can have these types. + if let ty::TyFnDef(..) = constant.ty.sty { + return; + } + + if let Literal::Item { def_id, substs } = constant.literal { + // Don't peek inside generic (associated) constants. + if substs.types().next().is_some() { + self.add_type(constant.ty); + } else { + let qualif = qualify_const_item_cached(self.tcx, + self.qualif_map, + def_id); + self.add(qualif); + } + + // FIXME(eddyb) check recursive constants here, + // instead of rustc_passes::static_recursion. + if self.qualif.intersects(Qualif::RECURSIVE) { + span_bug!(constant.span, + "recursive constant wasn't caught earlier"); + } + + // Let `const fn` transitively have destructors, + // but they do get stopped in `const` or `static`. + if self.mode != Mode::ConstFn { + self.deny_drop(); + } + } + } + } + } + + fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) { + // Recurse through operands and lvalues. + self.super_rvalue(rvalue, location); + + match *rvalue { + Rvalue::Use(_) | + Rvalue::Repeat(..) | + Rvalue::UnaryOp(..) | + Rvalue::CheckedBinaryOp(..) | + Rvalue::Cast(CastKind::ReifyFnPointer, ..) | + Rvalue::Cast(CastKind::UnsafeFnPointer, ..) | + Rvalue::Cast(CastKind::Unsize, ..) => {} + + Rvalue::Len(_) => { + // Static lvalues in consts would have errored already, + // don't treat length checks as reads from statics. + self.qualif = self.qualif - Qualif::STATIC; + } + + Rvalue::Ref(_, kind, ref lvalue) => { + // Static lvalues in consts would have errored already, + // only keep track of references to them here. + if self.qualif.intersects(Qualif::STATIC) { + self.qualif = self.qualif - Qualif::STATIC; + self.add(Qualif::STATIC_REF); + } + + let ty = lvalue.ty(self.mir, self.tcx).to_ty(self.tcx); + if kind == BorrowKind::Mut { + // In theory, any zero-sized value could be borrowed + // mutably without consequences. However, only &mut [] + // is allowed right now, and only in functions. + let allow = if self.mode == Mode::StaticMut { + // Inside a `static mut`, &mut [...] is also allowed. + match ty.sty { + ty::TyArray(..) | ty::TySlice(_) => { + // Mutating can expose drops, be conservative. + self.add_type(ty); + self.deny_drop(); + true + } + _ => false + } + } else if let ty::TyArray(_, 0) = ty.sty { + self.mode == Mode::Fn + } else { + false + }; + + if !allow { + self.add(Qualif::NOT_CONST); + if self.mode != Mode::Fn { + struct_span_err!(self.tcx.sess, self.span, E0017, + "references in {}s may only refer \ + to immutable values", self.mode) + .span_label(self.span, &format!("{}s require immutable values", + self.mode)) + .emit(); + } + } + } else { + // Constants cannot be borrowed if they contain interior mutability as + // it means that our "silent insertion of statics" could change + // initializer values (very bad). + if self.qualif.intersects(Qualif::MUTABLE_INTERIOR) { + // Replace MUTABLE_INTERIOR with NOT_CONST to avoid + // duplicate errors (from reborrowing, for example). + self.qualif = self.qualif - Qualif::MUTABLE_INTERIOR; + self.add(Qualif::NOT_CONST); + if self.mode != Mode::Fn { + span_err!(self.tcx.sess, self.span, E0492, + "cannot borrow a constant which contains \ + interior mutability, create a static instead"); + } + } + } + + // We might have a candidate for promotion. + let candidate = Candidate::Ref(location); + if self.mode == Mode::Fn || self.mode == Mode::ConstFn { + if !self.qualif.intersects(Qualif::NEVER_PROMOTE) { + // We can only promote direct borrows of temps. + if let Lvalue::Local(local) = *lvalue { + if self.mir.local_kind(local) == LocalKind::Temp { + self.promotion_candidates.push(candidate); + } + } + } + } + } + + Rvalue::Cast(CastKind::Misc, ref operand, cast_ty) => { + let operand_ty = operand.ty(self.mir, self.tcx); + let cast_in = CastTy::from_ty(operand_ty).expect("bad input type for cast"); + let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); + match (cast_in, cast_out) { + (CastTy::Ptr(_), CastTy::Int(_)) | + (CastTy::FnPtr, CastTy::Int(_)) => { + self.add(Qualif::NOT_CONST); + if self.mode != Mode::Fn { + span_err!(self.tcx.sess, self.span, E0018, + "raw pointers cannot be cast to integers in {}s", + self.mode); + } + } + _ => {} + } + } + + Rvalue::BinaryOp(op, ref lhs, _) => { + if let ty::TyRawPtr(_) = lhs.ty(self.mir, self.tcx).sty { + assert!(op == BinOp::Eq || op == BinOp::Ne || + op == BinOp::Le || op == BinOp::Lt || + op == BinOp::Ge || op == BinOp::Gt); + + self.add(Qualif::NOT_CONST); + if self.mode != Mode::Fn { + struct_span_err!( + self.tcx.sess, self.span, E0395, + "raw pointers cannot be compared in {}s", + self.mode) + .span_label( + self.span, + &format!("comparing raw pointers in static")) + .emit(); + } + } + } + + Rvalue::Box(_) => { + self.add(Qualif::NOT_CONST); + if self.mode != Mode::Fn { + struct_span_err!(self.tcx.sess, self.span, E0010, + "allocations are not allowed in {}s", self.mode) + .span_label(self.span, &format!("allocation not allowed in {}s", self.mode)) + .emit(); + } + } + + Rvalue::Aggregate(ref kind, _) => { + if let AggregateKind::Adt(def, ..) = *kind { + if def.has_dtor() { + self.add(Qualif::NEEDS_DROP); + self.deny_drop(); + } + + if Some(def.did) == self.tcx.lang_items.unsafe_cell_type() { + let ty = rvalue.ty(self.mir, self.tcx).unwrap(); + self.add_type(ty); + assert!(self.qualif.intersects(Qualif::MUTABLE_INTERIOR)); + // Even if the value inside may not need dropping, + // mutating it would change that. + if !self.qualif.intersects(Qualif::NOT_CONST) { + self.deny_drop(); + } + } + } + } + + Rvalue::InlineAsm {..} => { + self.not_const(); + } + } + } + + fn visit_terminator_kind(&mut self, + bb: BasicBlock, + kind: &TerminatorKind<'tcx>, + location: Location) { + if let TerminatorKind::Call { ref func, ref args, ref destination, .. } = *kind { + self.visit_operand(func, location); + + let fn_ty = func.ty(self.mir, self.tcx); + let (is_shuffle, is_const_fn) = match fn_ty.sty { + ty::TyFnDef(def_id, _, f) => { + (f.abi == Abi::PlatformIntrinsic && + self.tcx.item_name(def_id).as_str().starts_with("simd_shuffle"), + is_const_fn(self.tcx, def_id)) + } + _ => (false, false) + }; + + for (i, arg) in args.iter().enumerate() { + self.nest(|this| { + this.visit_operand(arg, location); + if is_shuffle && i == 2 && this.mode == Mode::Fn { + let candidate = Candidate::ShuffleIndices(bb); + if !this.qualif.intersects(Qualif::NEVER_PROMOTE) { + this.promotion_candidates.push(candidate); + } else { + span_err!(this.tcx.sess, this.span, E0526, + "shuffle indices are not constant"); + } + } + }); + } + + // Const fn calls. + if is_const_fn { + // We are in a const or static initializer, + if self.mode != Mode::Fn && + + // feature-gate is not enabled, + !self.tcx.sess.features.borrow().const_fn && + + // this doesn't come from a crate with the feature-gate enabled, + self.def_id.is_local() && + + // this doesn't come from a macro that has #[allow_internal_unstable] + !self.tcx.sess.codemap().span_allows_unstable(self.span) + { + let mut err = self.tcx.sess.struct_span_err(self.span, + "const fns are an unstable feature"); + help!(&mut err, + "in Nightly builds, add `#![feature(const_fn)]` \ + to the crate attributes to enable"); + err.emit(); + } + } else { + self.qualif = Qualif::NOT_CONST; + if self.mode != Mode::Fn { + // FIXME(#24111) Remove this check when const fn stabilizes + let (msg, note) = if let UnstableFeatures::Disallow = + self.tcx.sess.opts.unstable_features { + (format!("calls in {}s are limited to \ + struct and enum constructors", + self.mode), + Some("a limited form of compile-time function \ + evaluation is available on a nightly \ + compiler via `const fn`")) + } else { + (format!("calls in {}s are limited \ + to constant functions, \ + struct and enum constructors", + self.mode), + None) + }; + let mut err = struct_span_err!(self.tcx.sess, self.span, E0015, "{}", msg); + if let Some(note) = note { + err.span_note(self.span, note); + } + err.emit(); + } + } + + if let Some((ref dest, _)) = *destination { + // Avoid propagating irrelevant callee/argument qualifications. + if self.qualif.intersects(Qualif::CONST_ERROR) { + self.qualif = Qualif::NOT_CONST; + } else { + // Be conservative about the returned value of a const fn. + let tcx = self.tcx; + let ty = dest.ty(self.mir, tcx).to_ty(tcx); + self.qualif = Qualif::empty(); + self.add_type(ty); + + // Let `const fn` transitively have destructors, + // but they do get stopped in `const` or `static`. + if self.mode != Mode::ConstFn { + self.deny_drop(); + } + } + self.assign(dest, location); + } + } else { + // Qualify any operands inside other terminators. + self.super_terminator_kind(bb, kind, location); + } + } + + fn visit_assign(&mut self, + _: BasicBlock, + dest: &Lvalue<'tcx>, + rvalue: &Rvalue<'tcx>, + location: Location) { + self.visit_rvalue(rvalue, location); + + // Check the allowed const fn argument forms. + if let (Mode::ConstFn, &Lvalue::Local(index)) = (self.mode, dest) { + if self.mir.local_kind(index) == LocalKind::Var && + self.const_fn_arg_vars.insert(index.index()) { + + // Direct use of an argument is permitted. + if let Rvalue::Use(Operand::Consume(Lvalue::Local(local))) = *rvalue { + if self.mir.local_kind(local) == LocalKind::Arg { + return; + } + } + + // Avoid a generic error for other uses of arguments. + if self.qualif.intersects(Qualif::FN_ARGUMENT) { + let decl = &self.mir.local_decls[index]; + span_err!(self.tcx.sess, decl.source_info.unwrap().span, E0022, + "arguments of constant functions can only \ + be immutable by-value bindings"); + return; + } + } + } + + self.assign(dest, location); + } + + fn visit_source_info(&mut self, source_info: &SourceInfo) { + self.span = source_info.span; + } + + fn visit_statement(&mut self, bb: BasicBlock, statement: &Statement<'tcx>, location: Location) { + self.nest(|this| { + this.visit_source_info(&statement.source_info); + match statement.kind { + StatementKind::Assign(ref lvalue, ref rvalue) => { + this.visit_assign(bb, lvalue, rvalue, location); + } + StatementKind::SetDiscriminant { .. } | + StatementKind::StorageLive(_) | + StatementKind::StorageDead(_) | + StatementKind::Nop => {} + } + }); + } + + fn visit_terminator(&mut self, + bb: BasicBlock, + terminator: &Terminator<'tcx>, + location: Location) { + self.nest(|this| this.super_terminator(bb, terminator, location)); + } +} + +fn qualify_const_item_cached<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + qualif_map: &mut DefIdMap, + def_id: DefId) + -> Qualif { + match qualif_map.entry(def_id) { + Entry::Occupied(entry) => return *entry.get(), + Entry::Vacant(entry) => { + // Guard against `const` recursion. + entry.insert(Qualif::RECURSIVE); + } + } + + let param_env = if def_id.is_local() { + let node_id = tcx.map.as_local_node_id(def_id).unwrap(); + ty::ParameterEnvironment::for_item(tcx, node_id) + } else { + // These should only be monomorphic constants. + tcx.empty_parameter_environment() + }; + + let mir = &tcx.item_mir(def_id); + let mut qualifier = Qualifier::new(tcx, param_env, qualif_map, def_id, mir, Mode::Const); + let qualif = qualifier.qualify_const(); + qualifier.qualif_map.insert(def_id, qualif); + qualif +} + +#[derive(Default)] +pub struct QualifyAndPromoteConstants { + qualif_map: DefIdMap +} + +impl Pass for QualifyAndPromoteConstants {} + +impl<'tcx> MirPass<'tcx> for QualifyAndPromoteConstants { + fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + src: MirSource, mir: &mut Mir<'tcx>) { + let id = src.item_id(); + let def_id = tcx.map.local_def_id(id); + let mode = match src { + MirSource::Fn(_) => { + if is_const_fn(tcx, def_id) { + Mode::ConstFn + } else { + Mode::Fn + } + } + MirSource::Const(_) => { + match self.qualif_map.entry(def_id) { + Entry::Occupied(_) => return, + Entry::Vacant(entry) => { + // Guard against `const` recursion. + entry.insert(Qualif::RECURSIVE); + } + } + Mode::Const + } + MirSource::Static(_, hir::MutImmutable) => Mode::Static, + MirSource::Static(_, hir::MutMutable) => Mode::StaticMut, + MirSource::Promoted(..) => return + }; + let param_env = ty::ParameterEnvironment::for_item(tcx, id); + + if mode == Mode::Fn || mode == Mode::ConstFn { + // This is ugly because Qualifier holds onto mir, + // which can't be mutated until its scope ends. + let (temps, candidates) = { + let mut qualifier = Qualifier::new(tcx, param_env, + &mut self.qualif_map, + def_id, mir, mode); + if mode == Mode::ConstFn { + // Enforce a constant-like CFG for `const fn`. + qualifier.qualify_const(); + } else { + while let Some((bb, data)) = qualifier.rpo.next() { + qualifier.visit_basic_block_data(bb, data); + } + } + + (qualifier.temp_promotion_state, qualifier.promotion_candidates) + }; + + // Do the actual promotion, now that we know what's viable. + promote_consts::promote_candidates(mir, tcx, temps, candidates); + } else { + let mut qualifier = Qualifier::new(tcx, param_env, + &mut self.qualif_map, + def_id, mir, mode); + let qualif = qualifier.qualify_const(); + + if mode == Mode::Const { + qualifier.qualif_map.insert(def_id, qualif); + } + } + + // Statics must be Sync. + if mode == Mode::Static { + let ty = mir.return_ty; + tcx.infer_ctxt(None, None, Reveal::NotSpecializable).enter(|infcx| { + let cause = traits::ObligationCause::new(mir.span, id, traits::SharedStatic); + let mut fulfillment_cx = traits::FulfillmentContext::new(); + fulfillment_cx.register_bound(&infcx, ty, + tcx.require_lang_item(lang_items::SyncTraitLangItem), + cause); + if let Err(err) = fulfillment_cx.select_all_or_error(&infcx) { + infcx.report_fulfillment_errors(&err); + } + }); + } + } +} diff --git a/src/librustc_mir/transform/simplify.rs b/src/librustc_mir/transform/simplify.rs new file mode 100644 index 0000000000000..d5fc90289e2cc --- /dev/null +++ b/src/librustc_mir/transform/simplify.rs @@ -0,0 +1,349 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A number of passes which remove various redundancies in the CFG. +//! +//! The `SimplifyCfg` pass gets rid of unnecessary blocks in the CFG, whereas the `SimplifyLocals` +//! gets rid of all the unnecessary local variable declarations. +//! +//! The `SimplifyLocals` pass is kinda expensive and therefore not very suitable to be run often. +//! Most of the passes should not care or be impacted in meaningful ways due to extra locals +//! either, so running the pass once, right before translation, should suffice. +//! +//! On the other side of the spectrum, the `SimplifyCfg` pass is considerably cheap to run, thus +//! one should run it after every pass which may modify CFG in significant ways. This pass must +//! also be run before any analysis passes because it removes dead blocks, and some of these can be +//! ill-typed. +//! +//! The cause of this typing issue is typeck allowing most blocks whose end is not reachable have +//! an arbitrary return type, rather than having the usual () return type (as a note, typeck's +//! notion of reachability is in fact slightly weaker than MIR CFG reachability - see #31617). A +//! standard example of the situation is: +//! +//! ```rust +//! fn example() { +//! let _a: char = { return; }; +//! } +//! ``` +//! +//! Here the block (`{ return; }`) has the return type `char`, rather than `()`, but the MIR we +//! naively generate still contains the `_a = ()` write in the unreachable block "after" the +//! return. + +use rustc_data_structures::bitvec::BitVector; +use rustc_data_structures::indexed_vec::{Idx, IndexVec}; +use rustc::ty::TyCtxt; +use rustc::mir::*; +use rustc::mir::transform::{MirPass, MirSource, Pass}; +use rustc::mir::visit::{MutVisitor, Visitor, LvalueContext}; +use std::fmt; + +pub struct SimplifyCfg<'a> { label: &'a str } + +impl<'a> SimplifyCfg<'a> { + pub fn new(label: &'a str) -> Self { + SimplifyCfg { label: label } + } +} + +impl<'l, 'tcx> MirPass<'tcx> for SimplifyCfg<'l> { + fn run_pass<'a>(&mut self, _tcx: TyCtxt<'a, 'tcx, 'tcx>, _src: MirSource, mir: &mut Mir<'tcx>) { + debug!("SimplifyCfg({:?}) - simplifying {:?}", self.label, mir); + CfgSimplifier::new(mir).simplify(); + remove_dead_blocks(mir); + + // FIXME: Should probably be moved into some kind of pass manager + mir.basic_blocks_mut().raw.shrink_to_fit(); + } +} + +impl<'l> Pass for SimplifyCfg<'l> { + fn disambiguator<'a>(&'a self) -> Option> { + Some(Box::new(self.label)) + } + + // avoid calling `type_name` - it contains `<'static>` + fn name(&self) -> ::std::borrow::Cow<'static, str> { "SimplifyCfg".into() } +} + +pub struct CfgSimplifier<'a, 'tcx: 'a> { + basic_blocks: &'a mut IndexVec>, + pred_count: IndexVec +} + +impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> { + fn new(mir: &'a mut Mir<'tcx>) -> Self { + let mut pred_count = IndexVec::from_elem(0u32, mir.basic_blocks()); + + // we can't use mir.predecessors() here because that counts + // dead blocks, which we don't want to. + pred_count[START_BLOCK] = 1; + + for (_, data) in traversal::preorder(mir) { + if let Some(ref term) = data.terminator { + for &tgt in term.successors().iter() { + pred_count[tgt] += 1; + } + } + } + + let basic_blocks = mir.basic_blocks_mut(); + + CfgSimplifier { + basic_blocks: basic_blocks, + pred_count: pred_count + } + } + + fn simplify(mut self) { + loop { + let mut changed = false; + + for bb in (0..self.basic_blocks.len()).map(BasicBlock::new) { + if self.pred_count[bb] == 0 { + continue + } + + debug!("simplifying {:?}", bb); + + let mut terminator = self.basic_blocks[bb].terminator.take() + .expect("invalid terminator state"); + + for successor in terminator.successors_mut() { + self.collapse_goto_chain(successor, &mut changed); + } + + let mut new_stmts = vec![]; + let mut inner_changed = true; + while inner_changed { + inner_changed = false; + inner_changed |= self.simplify_branch(&mut terminator); + inner_changed |= self.merge_successor(&mut new_stmts, &mut terminator); + changed |= inner_changed; + } + + self.basic_blocks[bb].statements.extend(new_stmts); + self.basic_blocks[bb].terminator = Some(terminator); + + changed |= inner_changed; + } + + if !changed { break } + } + } + + // Collapse a goto chain starting from `start` + fn collapse_goto_chain(&mut self, start: &mut BasicBlock, changed: &mut bool) { + let mut terminator = match self.basic_blocks[*start] { + BasicBlockData { + ref statements, + terminator: ref mut terminator @ Some(Terminator { + kind: TerminatorKind::Goto { .. }, .. + }), .. + } if statements.is_empty() => terminator.take(), + // if `terminator` is None, this means we are in a loop. In that + // case, let all the loop collapse to its entry. + _ => return + }; + + let target = match terminator { + Some(Terminator { kind: TerminatorKind::Goto { ref mut target }, .. }) => { + self.collapse_goto_chain(target, changed); + *target + } + _ => unreachable!() + }; + self.basic_blocks[*start].terminator = terminator; + + debug!("collapsing goto chain from {:?} to {:?}", *start, target); + + *changed |= *start != target; + + if self.pred_count[*start] == 1 { + // This is the last reference to *start, so the pred-count to + // to target is moved into the current block. + self.pred_count[*start] = 0; + } else { + self.pred_count[target] += 1; + self.pred_count[*start] -= 1; + } + + *start = target; + } + + // merge a block with 1 `goto` predecessor to its parent + fn merge_successor(&mut self, + new_stmts: &mut Vec>, + terminator: &mut Terminator<'tcx>) + -> bool + { + let target = match terminator.kind { + TerminatorKind::Goto { target } + if self.pred_count[target] == 1 + => target, + _ => return false + }; + + debug!("merging block {:?} into {:?}", target, terminator); + *terminator = match self.basic_blocks[target].terminator.take() { + Some(terminator) => terminator, + None => { + // unreachable loop - this should not be possible, as we + // don't strand blocks, but handle it correctly. + return false + } + }; + new_stmts.extend(self.basic_blocks[target].statements.drain(..)); + self.pred_count[target] = 0; + + true + } + + // turn a branch with all successors identical to a goto + fn simplify_branch(&mut self, terminator: &mut Terminator<'tcx>) -> bool { + match terminator.kind { + TerminatorKind::If { .. } | + TerminatorKind::Switch { .. } | + TerminatorKind::SwitchInt { .. } => {}, + _ => return false + }; + + let first_succ = { + let successors = terminator.successors(); + if let Some(&first_succ) = terminator.successors().get(0) { + if successors.iter().all(|s| *s == first_succ) { + self.pred_count[first_succ] -= (successors.len()-1) as u32; + first_succ + } else { + return false + } + } else { + return false + } + }; + + debug!("simplifying branch {:?}", terminator); + terminator.kind = TerminatorKind::Goto { target: first_succ }; + true + } +} + +fn remove_dead_blocks(mir: &mut Mir) { + let mut seen = BitVector::new(mir.basic_blocks().len()); + for (bb, _) in traversal::preorder(mir) { + seen.insert(bb.index()); + } + + let basic_blocks = mir.basic_blocks_mut(); + + let num_blocks = basic_blocks.len(); + let mut replacements : Vec<_> = (0..num_blocks).map(BasicBlock::new).collect(); + let mut used_blocks = 0; + for alive_index in seen.iter() { + replacements[alive_index] = BasicBlock::new(used_blocks); + if alive_index != used_blocks { + // Swap the next alive block data with the current available slot. Since alive_index is + // non-decreasing this is a valid operation. + basic_blocks.raw.swap(alive_index, used_blocks); + } + used_blocks += 1; + } + basic_blocks.raw.truncate(used_blocks); + + for block in basic_blocks { + for target in block.terminator_mut().successors_mut() { + *target = replacements[target.index()]; + } + } +} + + +pub struct SimplifyLocals; + +impl Pass for SimplifyLocals { + fn name(&self) -> ::std::borrow::Cow<'static, str> { "SimplifyLocals".into() } +} + +impl<'tcx> MirPass<'tcx> for SimplifyLocals { + fn run_pass<'a>(&mut self, _: TyCtxt<'a, 'tcx, 'tcx>, _: MirSource, mir: &mut Mir<'tcx>) { + let mut marker = DeclMarker { locals: BitVector::new(mir.local_decls.len()) }; + marker.visit_mir(mir); + // Return pointer and arguments are always live + marker.locals.insert(0); + for idx in mir.args_iter() { + marker.locals.insert(idx.index()); + } + let map = make_local_map(&mut mir.local_decls, marker.locals); + // Update references to all vars and tmps now + LocalUpdater { map: map }.visit_mir(mir); + mir.local_decls.shrink_to_fit(); + } +} + +/// Construct the mapping while swapping out unused stuff out from the `vec`. +fn make_local_map<'tcx, I: Idx, V>(vec: &mut IndexVec, mask: BitVector) -> Vec { + let mut map: Vec = ::std::iter::repeat(!0).take(vec.len()).collect(); + let mut used = 0; + for alive_index in mask.iter() { + map[alive_index] = used; + if alive_index != used { + vec.swap(alive_index, used); + } + used += 1; + } + vec.truncate(used); + map +} + +struct DeclMarker { + pub locals: BitVector, +} + +impl<'tcx> Visitor<'tcx> for DeclMarker { + fn visit_lvalue(&mut self, lval: &Lvalue<'tcx>, ctx: LvalueContext<'tcx>, loc: Location) { + if ctx == LvalueContext::StorageLive || ctx == LvalueContext::StorageDead { + // ignore these altogether, they get removed along with their otherwise unused decls. + return; + } + if let Lvalue::Local(ref v) = *lval { + self.locals.insert(v.index()); + } + self.super_lvalue(lval, ctx, loc); + } +} + +struct LocalUpdater { + map: Vec, +} + +impl<'tcx> MutVisitor<'tcx> for LocalUpdater { + fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) { + // Remove unnecessary StorageLive and StorageDead annotations. + data.statements.retain(|stmt| { + match stmt.kind { + StatementKind::StorageLive(ref lval) | StatementKind::StorageDead(ref lval) => { + match *lval { + Lvalue::Local(l) => self.map[l.index()] != !0, + _ => true + } + } + _ => true + } + }); + self.super_basic_block_data(block, data); + } + fn visit_lvalue(&mut self, lval: &mut Lvalue<'tcx>, ctx: LvalueContext<'tcx>, loc: Location) { + match *lval { + Lvalue::Local(ref mut l) => *l = Local::new(self.map[l.index()]), + _ => (), + }; + self.super_lvalue(lval, ctx, loc); + } +} diff --git a/src/librustc_mir/transform/simplify_branches.rs b/src/librustc_mir/transform/simplify_branches.rs new file mode 100644 index 0000000000000..8759a340d7e3c --- /dev/null +++ b/src/librustc_mir/transform/simplify_branches.rs @@ -0,0 +1,66 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A pass that simplifies branches when their condition is known. + +use rustc::ty::TyCtxt; +use rustc::middle::const_val::ConstVal; +use rustc::mir::transform::{MirPass, MirSource, Pass}; +use rustc::mir::*; + +use std::fmt; + +pub struct SimplifyBranches<'a> { label: &'a str } + +impl<'a> SimplifyBranches<'a> { + pub fn new(label: &'a str) -> Self { + SimplifyBranches { label: label } + } +} + +impl<'l, 'tcx> MirPass<'tcx> for SimplifyBranches<'l> { + fn run_pass<'a>(&mut self, _tcx: TyCtxt<'a, 'tcx, 'tcx>, _src: MirSource, mir: &mut Mir<'tcx>) { + for block in mir.basic_blocks_mut() { + let terminator = block.terminator_mut(); + terminator.kind = match terminator.kind { + TerminatorKind::If { ref targets, cond: Operand::Constant(Constant { + literal: Literal::Value { + value: ConstVal::Bool(cond) + }, .. + }) } => { + if cond { + TerminatorKind::Goto { target: targets.0 } + } else { + TerminatorKind::Goto { target: targets.1 } + } + } + + TerminatorKind::Assert { target, cond: Operand::Constant(Constant { + literal: Literal::Value { + value: ConstVal::Bool(cond) + }, .. + }), expected, .. } if cond == expected => { + TerminatorKind::Goto { target: target } + } + + _ => continue + }; + } + } +} + +impl<'l> Pass for SimplifyBranches<'l> { + fn disambiguator<'a>(&'a self) -> Option> { + Some(Box::new(self.label)) + } + + // avoid calling `type_name` - it contains `<'static>` + fn name(&self) -> ::std::borrow::Cow<'static, str> { "SimplifyBranches".into() } +} diff --git a/src/librustc_mir/transform/simplify_cfg.rs b/src/librustc_mir/transform/simplify_cfg.rs deleted file mode 100644 index 7a5a00a8d560b..0000000000000 --- a/src/librustc_mir/transform/simplify_cfg.rs +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::middle::const_eval::ConstVal; -use rustc::mir::repr::*; -use transform::util; -use transform::MirPass; - -pub struct SimplifyCfg; - -impl SimplifyCfg { - pub fn new() -> SimplifyCfg { - SimplifyCfg - } - - fn remove_dead_blocks(&self, mir: &mut Mir) { - let mut seen = vec![false; mir.basic_blocks.len()]; - - // These blocks are always required. - seen[START_BLOCK.index()] = true; - seen[END_BLOCK.index()] = true; - - let mut worklist = vec![START_BLOCK]; - while let Some(bb) = worklist.pop() { - for succ in mir.basic_block_data(bb).terminator().successors() { - if !seen[succ.index()] { - seen[succ.index()] = true; - worklist.push(*succ); - } - } - } - - util::retain_basic_blocks(mir, &seen); - } - - fn remove_goto_chains(&self, mir: &mut Mir) -> bool { - - // Find the target at the end of the jump chain, return None if there is a loop - fn final_target(mir: &Mir, mut target: BasicBlock) -> Option { - // Keep track of already seen blocks to detect loops - let mut seen: Vec = Vec::with_capacity(8); - - while mir.basic_block_data(target).statements.is_empty() { - match mir.basic_block_data(target).terminator { - Some(Terminator::Goto { target: next }) => { - if seen.contains(&next) { - return None; - } - seen.push(next); - target = next; - } - _ => break - } - } - - Some(target) - } - - let mut changed = false; - for bb in mir.all_basic_blocks() { - // Temporarily take ownership of the terminator we're modifying to keep borrowck happy - let mut terminator = mir.basic_block_data_mut(bb).terminator.take() - .expect("invalid terminator state"); - - for target in terminator.successors_mut() { - let new_target = match final_target(mir, *target) { - Some(new_target) => new_target, - None if mir.basic_block_data(bb).statements.is_empty() => bb, - None => continue - }; - changed |= *target != new_target; - *target = new_target; - } - mir.basic_block_data_mut(bb).terminator = Some(terminator); - } - changed - } - - fn simplify_branches(&self, mir: &mut Mir) -> bool { - let mut changed = false; - - for bb in mir.all_basic_blocks() { - let basic_block = mir.basic_block_data_mut(bb); - let mut terminator = basic_block.terminator_mut(); - - *terminator = match *terminator { - Terminator::If { ref targets, .. } if targets.0 == targets.1 => { - changed = true; - Terminator::Goto { target: targets.0 } - } - Terminator::If { ref targets, cond: Operand::Constant(Constant { - literal: Literal::Value { - value: ConstVal::Bool(cond) - }, .. - }) } => { - changed = true; - if cond { - Terminator::Goto { target: targets.0 } - } else { - Terminator::Goto { target: targets.1 } - } - } - Terminator::SwitchInt { ref targets, .. } if targets.len() == 1 => { - Terminator::Goto { target: targets[0] } - } - _ => continue - } - } - - changed - } -} - -impl<'tcx> MirPass<'tcx> for SimplifyCfg { - fn run_on_mir(&mut self, mir: &mut Mir<'tcx>) { - let mut changed = true; - while changed { - changed = self.simplify_branches(mir); - changed |= self.remove_goto_chains(mir); - self.remove_dead_blocks(mir); - } - // FIXME: Should probably be moved into some kind of pass manager - mir.basic_blocks.shrink_to_fit(); - } -} diff --git a/src/librustc_mir/transform/type_check.rs b/src/librustc_mir/transform/type_check.rs new file mode 100644 index 0000000000000..0ceed274b6da6 --- /dev/null +++ b/src/librustc_mir/transform/type_check.rs @@ -0,0 +1,745 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This pass type-checks the MIR to ensure it is not broken. +#![allow(unreachable_code)] + +use rustc::infer::{self, InferCtxt, InferOk}; +use rustc::traits::{self, Reveal}; +use rustc::ty::fold::TypeFoldable; +use rustc::ty::{self, Ty, TyCtxt, TypeVariants}; +use rustc::mir::*; +use rustc::mir::tcx::LvalueTy; +use rustc::mir::transform::{MirPass, MirSource, Pass}; +use rustc::mir::visit::Visitor; +use std::fmt; +use syntax::ast; +use syntax_pos::{Span, DUMMY_SP}; + +use rustc_data_structures::indexed_vec::Idx; + +macro_rules! span_mirbug { + ($context:expr, $elem:expr, $($message:tt)*) => ({ + $context.tcx().sess.span_warn( + $context.last_span, + &format!("broken MIR ({:?}): {}", $elem, format!($($message)*)) + ) + }) +} + +macro_rules! span_mirbug_and_err { + ($context:expr, $elem:expr, $($message:tt)*) => ({ + { + $context.tcx().sess.span_warn( + $context.last_span, + &format!("broken MIR ({:?}): {:?}", $elem, format!($($message)*)) + ); + $context.error() + } + }) +} + +enum FieldAccessError { + OutOfRange { field_count: usize } +} + +/// Verifies that MIR types are sane to not crash further checks. +/// +/// The sanitize_XYZ methods here take an MIR object and compute its +/// type, calling `span_mirbug` and returning an error type if there +/// is a problem. +struct TypeVerifier<'a, 'b: 'a, 'gcx: 'b+'tcx, 'tcx: 'b> { + cx: &'a mut TypeChecker<'b, 'gcx, 'tcx>, + mir: &'a Mir<'tcx>, + last_span: Span, + errors_reported: bool +} + +impl<'a, 'b, 'gcx, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'gcx, 'tcx> { + fn visit_span(&mut self, span: &Span) { + if *span != DUMMY_SP { + self.last_span = *span; + } + } + + fn visit_lvalue(&mut self, + lvalue: &Lvalue<'tcx>, + _context: visit::LvalueContext, + location: Location) { + self.sanitize_lvalue(lvalue, location); + } + + fn visit_constant(&mut self, constant: &Constant<'tcx>, location: Location) { + self.super_constant(constant, location); + self.sanitize_type(constant, constant.ty); + } + + fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) { + self.super_rvalue(rvalue, location); + if let Some(ty) = rvalue.ty(self.mir, self.tcx()) { + self.sanitize_type(rvalue, ty); + } + } + + fn visit_mir(&mut self, mir: &Mir<'tcx>) { + self.sanitize_type(&"return type", mir.return_ty); + for local_decl in &mir.local_decls { + self.sanitize_type(local_decl, local_decl.ty); + } + if self.errors_reported { + return; + } + self.super_mir(mir); + } +} + +impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { + fn new(cx: &'a mut TypeChecker<'b, 'gcx, 'tcx>, mir: &'a Mir<'tcx>) -> Self { + TypeVerifier { + cx: cx, + mir: mir, + last_span: mir.span, + errors_reported: false + } + } + + fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> { + self.cx.infcx.tcx + } + + fn sanitize_type(&mut self, parent: &fmt::Debug, ty: Ty<'tcx>) -> Ty<'tcx> { + if ty.needs_infer() || ty.has_escaping_regions() || ty.references_error() { + span_mirbug_and_err!(self, parent, "bad type {:?}", ty) + } else { + ty + } + } + + fn sanitize_lvalue(&mut self, lvalue: &Lvalue<'tcx>, location: Location) -> LvalueTy<'tcx> { + debug!("sanitize_lvalue: {:?}", lvalue); + match *lvalue { + Lvalue::Local(index) => LvalueTy::Ty { ty: self.mir.local_decls[index].ty }, + Lvalue::Static(def_id) => + LvalueTy::Ty { ty: self.tcx().item_type(def_id) }, + Lvalue::Projection(ref proj) => { + let base_ty = self.sanitize_lvalue(&proj.base, location); + if let LvalueTy::Ty { ty } = base_ty { + if ty.references_error() { + assert!(self.errors_reported); + return LvalueTy::Ty { ty: self.tcx().types.err }; + } + } + self.sanitize_projection(base_ty, &proj.elem, lvalue, location) + } + } + } + + fn sanitize_projection(&mut self, + base: LvalueTy<'tcx>, + pi: &LvalueElem<'tcx>, + lvalue: &Lvalue<'tcx>, + location: Location) + -> LvalueTy<'tcx> { + debug!("sanitize_projection: {:?} {:?} {:?}", base, pi, lvalue); + let tcx = self.tcx(); + let base_ty = base.to_ty(tcx); + let span = self.last_span; + match *pi { + ProjectionElem::Deref => { + let deref_ty = base_ty.builtin_deref(true, ty::LvaluePreference::NoPreference); + LvalueTy::Ty { + ty: deref_ty.map(|t| t.ty).unwrap_or_else(|| { + span_mirbug_and_err!( + self, lvalue, "deref of non-pointer {:?}", base_ty) + }) + } + } + ProjectionElem::Index(ref i) => { + self.visit_operand(i, location); + let index_ty = i.ty(self.mir, tcx); + if index_ty != tcx.types.usize { + LvalueTy::Ty { + ty: span_mirbug_and_err!(self, i, "index by non-usize {:?}", i) + } + } else { + LvalueTy::Ty { + ty: base_ty.builtin_index().unwrap_or_else(|| { + span_mirbug_and_err!( + self, lvalue, "index of non-array {:?}", base_ty) + }) + } + } + } + ProjectionElem::ConstantIndex { .. } => { + // consider verifying in-bounds + LvalueTy::Ty { + ty: base_ty.builtin_index().unwrap_or_else(|| { + span_mirbug_and_err!( + self, lvalue, "index of non-array {:?}", base_ty) + }) + } + } + ProjectionElem::Subslice { from, to } => { + LvalueTy::Ty { + ty: match base_ty.sty { + ty::TyArray(inner, size) => { + let min_size = (from as usize) + (to as usize); + if let Some(rest_size) = size.checked_sub(min_size) { + tcx.mk_array(inner, rest_size) + } else { + span_mirbug_and_err!( + self, lvalue, "taking too-small slice of {:?}", base_ty) + } + } + ty::TySlice(..) => base_ty, + _ => { + span_mirbug_and_err!( + self, lvalue, "slice of non-array {:?}", base_ty) + } + } + } + } + ProjectionElem::Downcast(adt_def1, index) => + match base_ty.sty { + ty::TyAdt(adt_def, substs) if adt_def.is_enum() && adt_def == adt_def1 => { + if index >= adt_def.variants.len() { + LvalueTy::Ty { + ty: span_mirbug_and_err!( + self, + lvalue, + "cast to variant #{:?} but enum only has {:?}", + index, + adt_def.variants.len()) + } + } else { + LvalueTy::Downcast { + adt_def: adt_def, + substs: substs, + variant_index: index + } + } + } + _ => LvalueTy::Ty { + ty: span_mirbug_and_err!( + self, lvalue, "can't downcast {:?} as {:?}", + base_ty, adt_def1) + } + }, + ProjectionElem::Field(field, fty) => { + let fty = self.sanitize_type(lvalue, fty); + match self.field_ty(lvalue, base, field) { + Ok(ty) => { + if let Err(terr) = self.cx.eq_types(span, ty, fty) { + span_mirbug!( + self, lvalue, "bad field access ({:?}: {:?}): {:?}", + ty, fty, terr); + } + } + Err(FieldAccessError::OutOfRange { field_count }) => { + span_mirbug!( + self, lvalue, "accessed field #{} but variant only has {}", + field.index(), field_count) + } + } + LvalueTy::Ty { ty: fty } + } + } + } + + fn error(&mut self) -> Ty<'tcx> { + self.errors_reported = true; + self.tcx().types.err + } + + fn field_ty(&mut self, + parent: &fmt::Debug, + base_ty: LvalueTy<'tcx>, + field: Field) + -> Result, FieldAccessError> + { + let tcx = self.tcx(); + + let (variant, substs) = match base_ty { + LvalueTy::Downcast { adt_def, substs, variant_index } => { + (&adt_def.variants[variant_index], substs) + } + LvalueTy::Ty { ty } => match ty.sty { + ty::TyAdt(adt_def, substs) if adt_def.is_univariant() => { + (&adt_def.variants[0], substs) + } + ty::TyClosure(def_id, substs) => { + return match substs.upvar_tys(def_id, tcx).nth(field.index()) { + Some(ty) => Ok(ty), + None => Err(FieldAccessError::OutOfRange { + field_count: substs.upvar_tys(def_id, tcx).count() + }) + } + } + ty::TyTuple(tys) => { + return match tys.get(field.index()) { + Some(&ty) => Ok(ty), + None => Err(FieldAccessError::OutOfRange { + field_count: tys.len() + }) + } + } + _ => return Ok(span_mirbug_and_err!( + self, parent, "can't project out of {:?}", base_ty)) + } + }; + + if let Some(field) = variant.fields.get(field.index()) { + Ok(self.cx.normalize(&field.ty(tcx, substs))) + } else { + Err(FieldAccessError::OutOfRange { field_count: variant.fields.len() }) + } + } +} + +pub struct TypeChecker<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, + fulfillment_cx: traits::FulfillmentContext<'tcx>, + last_span: Span, + body_id: ast::NodeId, +} + +impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { + fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, body_id: ast::NodeId) -> Self { + TypeChecker { + infcx: infcx, + fulfillment_cx: traits::FulfillmentContext::new(), + last_span: DUMMY_SP, + body_id: body_id, + } + } + + fn misc(&self, span: Span) -> traits::ObligationCause<'tcx> { + traits::ObligationCause::misc(span, self.body_id) + } + + pub fn register_infer_ok_obligations(&mut self, infer_ok: InferOk<'tcx, T>) -> T { + for obligation in infer_ok.obligations { + self.fulfillment_cx.register_predicate_obligation(self.infcx, obligation); + } + infer_ok.value + } + + fn sub_types(&mut self, sup: Ty<'tcx>, sub: Ty<'tcx>) + -> infer::UnitResult<'tcx> + { + self.infcx.sub_types(false, &self.misc(self.last_span), sup, sub) + .map(|ok| self.register_infer_ok_obligations(ok)) + } + + fn eq_types(&mut self, span: Span, a: Ty<'tcx>, b: Ty<'tcx>) + -> infer::UnitResult<'tcx> + { + self.infcx.eq_types(false, &self.misc(span), a, b) + .map(|ok| self.register_infer_ok_obligations(ok)) + } + + fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> { + self.infcx.tcx + } + + fn check_stmt(&mut self, mir: &Mir<'tcx>, stmt: &Statement<'tcx>) { + debug!("check_stmt: {:?}", stmt); + let tcx = self.tcx(); + match stmt.kind { + StatementKind::Assign(ref lv, ref rv) => { + let lv_ty = lv.ty(mir, tcx).to_ty(tcx); + let rv_ty = rv.ty(mir, tcx); + if let Some(rv_ty) = rv_ty { + if let Err(terr) = self.sub_types(rv_ty, lv_ty) { + span_mirbug!(self, stmt, "bad assignment ({:?} = {:?}): {:?}", + lv_ty, rv_ty, terr); + } + // FIXME: rvalue with undeterminable type - e.g. inline + // asm. + } + } + StatementKind::SetDiscriminant{ ref lvalue, variant_index } => { + let lvalue_type = lvalue.ty(mir, tcx).to_ty(tcx); + let adt = match lvalue_type.sty { + TypeVariants::TyAdt(adt, _) if adt.is_enum() => adt, + _ => { + span_bug!(stmt.source_info.span, + "bad set discriminant ({:?} = {:?}): lhs is not an enum", + lvalue, + variant_index); + } + }; + if variant_index >= adt.variants.len() { + span_bug!(stmt.source_info.span, + "bad set discriminant ({:?} = {:?}): value of of range", + lvalue, + variant_index); + }; + } + StatementKind::StorageLive(ref lv) | + StatementKind::StorageDead(ref lv) => { + match *lv { + Lvalue::Local(_) => {} + _ => { + span_mirbug!(self, stmt, "bad lvalue: expected local"); + } + } + } + StatementKind::Nop => {} + } + } + + fn check_terminator(&mut self, + mir: &Mir<'tcx>, + term: &Terminator<'tcx>) { + debug!("check_terminator: {:?}", term); + let tcx = self.tcx(); + match term.kind { + TerminatorKind::Goto { .. } | + TerminatorKind::Resume | + TerminatorKind::Return | + TerminatorKind::Unreachable | + TerminatorKind::Drop { .. } => { + // no checks needed for these + } + + + TerminatorKind::DropAndReplace { + ref location, + ref value, + .. + } => { + let lv_ty = location.ty(mir, tcx).to_ty(tcx); + let rv_ty = value.ty(mir, tcx); + if let Err(terr) = self.sub_types(rv_ty, lv_ty) { + span_mirbug!(self, term, "bad DropAndReplace ({:?} = {:?}): {:?}", + lv_ty, rv_ty, terr); + } + } + + TerminatorKind::If { ref cond, .. } => { + let cond_ty = cond.ty(mir, tcx); + match cond_ty.sty { + ty::TyBool => {} + _ => { + span_mirbug!(self, term, "bad If ({:?}, not bool", cond_ty); + } + } + } + TerminatorKind::SwitchInt { ref discr, switch_ty, .. } => { + let discr_ty = discr.ty(mir, tcx).to_ty(tcx); + if let Err(terr) = self.sub_types(discr_ty, switch_ty) { + span_mirbug!(self, term, "bad SwitchInt ({:?} on {:?}): {:?}", + switch_ty, discr_ty, terr); + } + if !switch_ty.is_integral() && !switch_ty.is_char() && + !switch_ty.is_bool() + { + span_mirbug!(self, term, "bad SwitchInt discr ty {:?}",switch_ty); + } + // FIXME: check the values + } + TerminatorKind::Switch { ref discr, adt_def, ref targets } => { + let discr_ty = discr.ty(mir, tcx).to_ty(tcx); + match discr_ty.sty { + ty::TyAdt(def, _) if def.is_enum() && + def == adt_def && + adt_def.variants.len() == targets.len() + => {}, + _ => { + span_mirbug!(self, term, "bad Switch ({:?} on {:?})", + adt_def, discr_ty); + } + } + } + TerminatorKind::Call { ref func, ref args, ref destination, .. } => { + let func_ty = func.ty(mir, tcx); + debug!("check_terminator: call, func_ty={:?}", func_ty); + let func_ty = match func_ty.sty { + ty::TyFnDef(.., func_ty) | ty::TyFnPtr(func_ty) => func_ty, + _ => { + span_mirbug!(self, term, "call to non-function {:?}", func_ty); + return; + } + }; + let sig = tcx.erase_late_bound_regions(&func_ty.sig); + let sig = self.normalize(&sig); + self.check_call_dest(mir, term, &sig, destination); + + if self.is_box_free(func) { + self.check_box_free_inputs(mir, term, &sig, args); + } else { + self.check_call_inputs(mir, term, &sig, args); + } + } + TerminatorKind::Assert { ref cond, ref msg, .. } => { + let cond_ty = cond.ty(mir, tcx); + if cond_ty != tcx.types.bool { + span_mirbug!(self, term, "bad Assert ({:?}, not bool", cond_ty); + } + + if let AssertMessage::BoundsCheck { ref len, ref index } = *msg { + if len.ty(mir, tcx) != tcx.types.usize { + span_mirbug!(self, len, "bounds-check length non-usize {:?}", len) + } + if index.ty(mir, tcx) != tcx.types.usize { + span_mirbug!(self, index, "bounds-check index non-usize {:?}", index) + } + } + } + } + } + + fn check_call_dest(&mut self, + mir: &Mir<'tcx>, + term: &Terminator<'tcx>, + sig: &ty::FnSig<'tcx>, + destination: &Option<(Lvalue<'tcx>, BasicBlock)>) { + let tcx = self.tcx(); + match *destination { + Some((ref dest, _)) => { + let dest_ty = dest.ty(mir, tcx).to_ty(tcx); + if let Err(terr) = self.sub_types(sig.output, dest_ty) { + span_mirbug!(self, term, + "call dest mismatch ({:?} <- {:?}): {:?}", + dest_ty, sig.output, terr); + } + }, + None => { + // FIXME(canndrew): This is_never should probably be an is_uninhabited + if !sig.output.is_never() { + span_mirbug!(self, term, "call to converging function {:?} w/o dest", sig); + } + }, + } + } + + fn check_call_inputs(&mut self, + mir: &Mir<'tcx>, + term: &Terminator<'tcx>, + sig: &ty::FnSig<'tcx>, + args: &[Operand<'tcx>]) + { + debug!("check_call_inputs({:?}, {:?})", sig, args); + if args.len() < sig.inputs.len() || + (args.len() > sig.inputs.len() && !sig.variadic) { + span_mirbug!(self, term, "call to {:?} with wrong # of args", sig); + } + for (n, (fn_arg, op_arg)) in sig.inputs.iter().zip(args).enumerate() { + let op_arg_ty = op_arg.ty(mir, self.tcx()); + if let Err(terr) = self.sub_types(op_arg_ty, fn_arg) { + span_mirbug!(self, term, "bad arg #{:?} ({:?} <- {:?}): {:?}", + n, fn_arg, op_arg_ty, terr); + } + } + } + + fn is_box_free(&self, operand: &Operand<'tcx>) -> bool { + match operand { + &Operand::Constant(Constant { + literal: Literal::Item { def_id, .. }, .. + }) => { + Some(def_id) == self.tcx().lang_items.box_free_fn() + } + _ => false, + } + } + + fn check_box_free_inputs(&mut self, + mir: &Mir<'tcx>, + term: &Terminator<'tcx>, + sig: &ty::FnSig<'tcx>, + args: &[Operand<'tcx>]) + { + debug!("check_box_free_inputs"); + + // box_free takes a Box as a pointer. Allow for that. + + if sig.inputs.len() != 1 { + span_mirbug!(self, term, "box_free should take 1 argument"); + return; + } + + let pointee_ty = match sig.inputs[0].sty { + ty::TyRawPtr(mt) => mt.ty, + _ => { + span_mirbug!(self, term, "box_free should take a raw ptr"); + return; + } + }; + + if args.len() != 1 { + span_mirbug!(self, term, "box_free called with wrong # of args"); + return; + } + + let arg_ty = match args[0].ty(mir, self.tcx()).sty { + ty::TyRawPtr(mt) => mt.ty, + ty::TyBox(ty) => ty, + _ => { + span_mirbug!(self, term, "box_free called with bad arg ty"); + return; + } + }; + + if let Err(terr) = self.sub_types(arg_ty, pointee_ty) { + span_mirbug!(self, term, "bad box_free arg ({:?} <- {:?}): {:?}", + pointee_ty, arg_ty, terr); + } + } + + fn check_iscleanup(&mut self, mir: &Mir<'tcx>, block: &BasicBlockData<'tcx>) + { + let is_cleanup = block.is_cleanup; + self.last_span = block.terminator().source_info.span; + match block.terminator().kind { + TerminatorKind::Goto { target } => + self.assert_iscleanup(mir, block, target, is_cleanup), + TerminatorKind::If { targets: (on_true, on_false), .. } => { + self.assert_iscleanup(mir, block, on_true, is_cleanup); + self.assert_iscleanup(mir, block, on_false, is_cleanup); + } + TerminatorKind::Switch { ref targets, .. } | + TerminatorKind::SwitchInt { ref targets, .. } => { + for target in targets { + self.assert_iscleanup(mir, block, *target, is_cleanup); + } + } + TerminatorKind::Resume => { + if !is_cleanup { + span_mirbug!(self, block, "resume on non-cleanup block!") + } + } + TerminatorKind::Return => { + if is_cleanup { + span_mirbug!(self, block, "return on cleanup block") + } + } + TerminatorKind::Unreachable => {} + TerminatorKind::Drop { target, unwind, .. } | + TerminatorKind::DropAndReplace { target, unwind, .. } | + TerminatorKind::Assert { target, cleanup: unwind, .. } => { + self.assert_iscleanup(mir, block, target, is_cleanup); + if let Some(unwind) = unwind { + if is_cleanup { + span_mirbug!(self, block, "unwind on cleanup block") + } + self.assert_iscleanup(mir, block, unwind, true); + } + } + TerminatorKind::Call { ref destination, cleanup, .. } => { + if let &Some((_, target)) = destination { + self.assert_iscleanup(mir, block, target, is_cleanup); + } + if let Some(cleanup) = cleanup { + if is_cleanup { + span_mirbug!(self, block, "cleanup on cleanup block") + } + self.assert_iscleanup(mir, block, cleanup, true); + } + } + } + } + + fn assert_iscleanup(&mut self, + mir: &Mir<'tcx>, + ctxt: &fmt::Debug, + bb: BasicBlock, + iscleanuppad: bool) + { + if mir[bb].is_cleanup != iscleanuppad { + span_mirbug!(self, ctxt, "cleanuppad mismatch: {:?} should be {:?}", + bb, iscleanuppad); + } + } + + fn typeck_mir(&mut self, mir: &Mir<'tcx>) { + self.last_span = mir.span; + debug!("run_on_mir: {:?}", mir.span); + for block in mir.basic_blocks() { + for stmt in &block.statements { + if stmt.source_info.span != DUMMY_SP { + self.last_span = stmt.source_info.span; + } + self.check_stmt(mir, stmt); + } + + self.check_terminator(mir, block.terminator()); + self.check_iscleanup(mir, block); + } + } + + + fn normalize(&mut self, value: &T) -> T + where T: fmt::Debug + TypeFoldable<'tcx> + { + let mut selcx = traits::SelectionContext::new(self.infcx); + let cause = traits::ObligationCause::misc(self.last_span, ast::CRATE_NODE_ID); + let traits::Normalized { value, obligations } = + traits::normalize(&mut selcx, cause, value); + + debug!("normalize: value={:?} obligations={:?}", + value, + obligations); + + let mut fulfill_cx = &mut self.fulfillment_cx; + for obligation in obligations { + fulfill_cx.register_predicate_obligation(self.infcx, obligation); + } + + value + } + + fn verify_obligations(&mut self, mir: &Mir<'tcx>) { + self.last_span = mir.span; + if let Err(e) = self.fulfillment_cx.select_all_or_error(self.infcx) { + span_mirbug!(self, "", "errors selecting obligation: {:?}", + e); + } + } +} + +pub struct TypeckMir; + +impl TypeckMir { + pub fn new() -> Self { + TypeckMir + } +} + +impl<'tcx> MirPass<'tcx> for TypeckMir { + fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + src: MirSource, mir: &mut Mir<'tcx>) { + debug!("run_pass: {}", tcx.node_path_str(src.item_id())); + + if tcx.sess.err_count() > 0 { + // compiling a broken program can obviously result in a + // broken MIR, so try not to report duplicate errors. + return; + } + let param_env = ty::ParameterEnvironment::for_item(tcx, src.item_id()); + tcx.infer_ctxt(None, Some(param_env), Reveal::NotSpecializable).enter(|infcx| { + let mut checker = TypeChecker::new(&infcx, src.item_id()); + { + let mut verifier = TypeVerifier::new(&mut checker, mir); + verifier.visit_mir(mir); + if verifier.errors_reported { + // don't do further checks to avoid ICEs + return; + } + } + checker.typeck_mir(mir); + checker.verify_obligations(mir); + }); + } +} + +impl Pass for TypeckMir { +} diff --git a/src/librustc_mir/transform/util.rs b/src/librustc_mir/transform/util.rs deleted file mode 100644 index 7e44beb18a2e9..0000000000000 --- a/src/librustc_mir/transform/util.rs +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::mir::repr::*; - -/// Update basic block ids in all terminators using the given replacements, -/// useful e.g. after removal of several basic blocks to update all terminators -/// in a single pass -pub fn update_basic_block_ids(mir: &mut Mir, replacements: &[BasicBlock]) { - for bb in mir.all_basic_blocks() { - for target in mir.basic_block_data_mut(bb).terminator_mut().successors_mut() { - *target = replacements[target.index()]; - } - } -} - -/// Mass removal of basic blocks to keep the ID-remapping cheap. -pub fn retain_basic_blocks(mir: &mut Mir, keep: &[bool]) { - let num_blocks = mir.basic_blocks.len(); - - // Check that we have a usage flag for every block - assert_eq!(num_blocks, keep.len()); - - let first_dead = match keep.iter().position(|&k| !k) { - None => return, - Some(first_dead) => first_dead, - }; - - // `replacements` maps the old block ids to the new ones - let mut replacements: Vec<_> = (0..num_blocks).map(BasicBlock::new).collect(); - - let mut dead = 0; - for i in first_dead..num_blocks { - if keep[i] { - replacements[i] = BasicBlock::new(i - dead); - mir.basic_blocks.swap(i, i - dead); - } else { - dead += 1; - } - } - mir.basic_blocks.truncate(num_blocks - dead); - - update_basic_block_ids(mir, &replacements); -} diff --git a/src/librustc_passes/Cargo.toml b/src/librustc_passes/Cargo.toml new file mode 100644 index 0000000000000..cc710e0ac3563 --- /dev/null +++ b/src/librustc_passes/Cargo.toml @@ -0,0 +1,18 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_passes" +version = "0.0.0" + +[lib] +name = "rustc_passes" +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +log = { path = "../liblog" } +rustc = { path = "../librustc" } +rustc_const_eval = { path = "../librustc_const_eval" } +rustc_const_math = { path = "../librustc_const_math" } +syntax = { path = "../libsyntax" } +syntax_pos = { path = "../libsyntax_pos" } +rustc_errors = { path = "../librustc_errors" } \ No newline at end of file diff --git a/src/librustc_passes/ast_validation.rs b/src/librustc_passes/ast_validation.rs new file mode 100644 index 0000000000000..fa07006aa63e1 --- /dev/null +++ b/src/librustc_passes/ast_validation.rs @@ -0,0 +1,292 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Validate AST before lowering it to HIR +// +// This pass is supposed to catch things that fit into AST data structures, +// but not permitted by the language. It runs after expansion when AST is frozen, +// so it can check for erroneous constructions produced by syntax extensions. +// This pass is supposed to perform only simple checks not requiring name resolution +// or type checking or some other kind of complex analysis. + +use rustc::lint; +use rustc::session::Session; +use syntax::ast::*; +use syntax::attr; +use syntax::codemap::Spanned; +use syntax::parse::token; +use syntax::symbol::keywords; +use syntax::visit::{self, Visitor}; +use syntax_pos::Span; +use errors; + +struct AstValidator<'a> { + session: &'a Session, +} + +impl<'a> AstValidator<'a> { + fn err_handler(&self) -> &errors::Handler { + &self.session.parse_sess.span_diagnostic + } + + fn check_label(&self, label: Ident, span: Span, id: NodeId) { + if label.name == keywords::StaticLifetime.name() { + self.err_handler().span_err(span, &format!("invalid label name `{}`", label.name)); + } + if label.name == "'_" { + self.session.add_lint(lint::builtin::LIFETIME_UNDERSCORE, + id, + span, + format!("invalid label name `{}`", label.name)); + } + } + + fn invalid_visibility(&self, vis: &Visibility, span: Span, note: Option<&str>) { + if vis != &Visibility::Inherited { + let mut err = struct_span_err!(self.session, + span, + E0449, + "unnecessary visibility qualifier"); + if vis == &Visibility::Public { + err.span_label(span, &format!("`pub` not needed here")); + } + if let Some(note) = note { + err.note(note); + } + err.emit(); + } + } + + fn check_decl_no_pat(&self, decl: &FnDecl, report_err: ReportFn) { + for arg in &decl.inputs { + match arg.pat.node { + PatKind::Ident(BindingMode::ByValue(Mutability::Immutable), _, None) | + PatKind::Wild => {} + PatKind::Ident(..) => report_err(arg.pat.span, true), + _ => report_err(arg.pat.span, false), + } + } + } + + fn check_trait_fn_not_const(&self, constness: Spanned) { + match constness.node { + Constness::Const => { + struct_span_err!(self.session, constness.span, E0379, + "trait fns cannot be declared const") + .span_label(constness.span, &format!("trait fns cannot be const")) + .emit(); + } + _ => {} + } + } + + fn no_questions_in_bounds(&self, bounds: &TyParamBounds, where_: &str, is_trait: bool) { + for bound in bounds { + if let TraitTyParamBound(ref poly, TraitBoundModifier::Maybe) = *bound { + let mut err = self.err_handler().struct_span_err(poly.span, + &format!("`?Trait` is not permitted in {}", where_)); + if is_trait { + err.note(&format!("traits are `?{}` by default", poly.trait_ref.path)); + } + err.emit(); + } + } + } +} + +impl<'a> Visitor for AstValidator<'a> { + fn visit_lifetime(&mut self, lt: &Lifetime) { + if lt.name == "'_" { + self.session.add_lint(lint::builtin::LIFETIME_UNDERSCORE, + lt.id, + lt.span, + format!("invalid lifetime name `{}`", lt.name)); + } + + visit::walk_lifetime(self, lt) + } + + fn visit_expr(&mut self, expr: &Expr) { + match expr.node { + ExprKind::While(.., Some(ident)) | + ExprKind::Loop(_, Some(ident)) | + ExprKind::WhileLet(.., Some(ident)) | + ExprKind::ForLoop(.., Some(ident)) | + ExprKind::Break(Some(ident), _) | + ExprKind::Continue(Some(ident)) => { + self.check_label(ident.node, ident.span, expr.id); + } + _ => {} + } + + visit::walk_expr(self, expr) + } + + fn visit_ty(&mut self, ty: &Ty) { + match ty.node { + TyKind::BareFn(ref bfty) => { + self.check_decl_no_pat(&bfty.decl, |span, _| { + let mut err = struct_span_err!(self.session, + span, + E0561, + "patterns aren't allowed in function pointer \ + types"); + err.span_note(span, + "this is a recent error, see issue #35203 for more details"); + err.emit(); + }); + } + TyKind::ObjectSum(_, ref bounds) | + TyKind::PolyTraitRef(ref bounds) => { + self.no_questions_in_bounds(bounds, "trait object types", false); + } + _ => {} + } + + visit::walk_ty(self, ty) + } + + fn visit_path(&mut self, path: &Path, id: NodeId) { + if path.global && path.segments.len() > 0 { + let ident = path.segments[0].identifier; + if token::Ident(ident).is_path_segment_keyword() { + self.session.add_lint(lint::builtin::SUPER_OR_SELF_IN_GLOBAL_PATH, + id, + path.span, + format!("global paths cannot start with `{}`", ident)); + } + } + + visit::walk_path(self, path) + } + + fn visit_item(&mut self, item: &Item) { + match item.node { + ItemKind::Use(ref view_path) => { + let path = view_path.node.path(); + if !path.segments.iter().all(|segment| segment.parameters.is_empty()) { + self.err_handler() + .span_err(path.span, "type or lifetime parameters in import path"); + } + } + ItemKind::Impl(.., Some(..), _, ref impl_items) => { + self.invalid_visibility(&item.vis, item.span, None); + for impl_item in impl_items { + self.invalid_visibility(&impl_item.vis, impl_item.span, None); + if let ImplItemKind::Method(ref sig, _) = impl_item.node { + self.check_trait_fn_not_const(sig.constness); + } + } + } + ItemKind::Impl(.., None, _, _) => { + self.invalid_visibility(&item.vis, + item.span, + Some("place qualifiers on individual impl items instead")); + } + ItemKind::DefaultImpl(..) => { + self.invalid_visibility(&item.vis, item.span, None); + } + ItemKind::ForeignMod(..) => { + self.invalid_visibility(&item.vis, + item.span, + Some("place qualifiers on individual foreign items \ + instead")); + } + ItemKind::Enum(ref def, _) => { + for variant in &def.variants { + for field in variant.node.data.fields() { + self.invalid_visibility(&field.vis, field.span, None); + } + } + } + ItemKind::Trait(.., ref bounds, ref trait_items) => { + self.no_questions_in_bounds(bounds, "supertraits", true); + for trait_item in trait_items { + if let TraitItemKind::Method(ref sig, ref block) = trait_item.node { + self.check_trait_fn_not_const(sig.constness); + if block.is_none() { + self.check_decl_no_pat(&sig.decl, |span, _| { + self.session.add_lint(lint::builtin::PATTERNS_IN_FNS_WITHOUT_BODY, + trait_item.id, span, + "patterns aren't allowed in methods \ + without bodies".to_string()); + }); + } + } + } + } + ItemKind::Mod(_) => { + // Ensure that `path` attributes on modules are recorded as used (c.f. #35584). + attr::first_attr_value_str_by_name(&item.attrs, "path"); + if let Some(attr) = + item.attrs.iter().find(|attr| attr.name() == "warn_directory_ownership") { + let lint = lint::builtin::LEGACY_DIRECTORY_OWNERSHIP; + let msg = "cannot declare a new module at this location"; + self.session.add_lint(lint, item.id, item.span, msg.to_string()); + attr::mark_used(attr); + } + } + ItemKind::Union(ref vdata, _) => { + if !vdata.is_struct() { + self.err_handler().span_err(item.span, + "tuple and unit unions are not permitted"); + } + if vdata.fields().len() == 0 { + self.err_handler().span_err(item.span, + "unions cannot have zero fields"); + } + } + _ => {} + } + + visit::walk_item(self, item) + } + + fn visit_foreign_item(&mut self, fi: &ForeignItem) { + match fi.node { + ForeignItemKind::Fn(ref decl, _) => { + self.check_decl_no_pat(decl, |span, is_recent| { + let mut err = struct_span_err!(self.session, + span, + E0130, + "patterns aren't allowed in foreign function \ + declarations"); + err.span_label(span, &format!("pattern not allowed in foreign function")); + if is_recent { + err.span_note(span, + "this is a recent error, see issue #35203 for more details"); + } + err.emit(); + }); + } + ForeignItemKind::Static(..) => {} + } + + visit::walk_foreign_item(self, fi) + } + + fn visit_vis(&mut self, vis: &Visibility) { + match *vis { + Visibility::Restricted { ref path, .. } => { + if !path.segments.iter().all(|segment| segment.parameters.is_empty()) { + self.err_handler() + .span_err(path.span, "type or lifetime parameters in visibility path"); + } + } + _ => {} + } + + visit::walk_vis(self, vis) + } +} + +pub fn check_crate(session: &Session, krate: &Crate) { + visit::walk_crate(&mut AstValidator { session: session }, krate) +} diff --git a/src/librustc_passes/const_fn.rs b/src/librustc_passes/const_fn.rs deleted file mode 100644 index cda5267f7271b..0000000000000 --- a/src/librustc_passes/const_fn.rs +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Verifies that const fn arguments are immutable by value bindings -//! and the const fn body doesn't contain any statements - -use rustc::session::Session; - -use syntax::ast; -use syntax::visit::{self, Visitor, FnKind}; -use syntax::codemap::Span; - -pub fn check_crate(sess: &Session, krate: &ast::Crate) { - visit::walk_crate(&mut CheckConstFn{ sess: sess }, krate); - sess.abort_if_errors(); -} - -struct CheckConstFn<'a> { - sess: &'a Session, -} - -struct CheckBlock<'a> { - sess: &'a Session, - kind: &'static str, -} - -impl<'a, 'v> Visitor<'v> for CheckBlock<'a> { - fn visit_block(&mut self, block: &'v ast::Block) { - check_block(&self.sess, block, self.kind); - CheckConstFn{ sess: self.sess}.visit_block(block); - } - fn visit_expr(&mut self, e: &'v ast::Expr) { - if let ast::ExprClosure(..) = e.node { - CheckConstFn{ sess: self.sess}.visit_expr(e); - } else { - visit::walk_expr(self, e); - } - } - fn visit_item(&mut self, _i: &'v ast::Item) { panic!("should be handled in CheckConstFn") } - fn visit_fn(&mut self, - _fk: FnKind<'v>, - _fd: &'v ast::FnDecl, - _b: &'v ast::Block, - _s: Span, - _fn_id: ast::NodeId) { panic!("should be handled in CheckConstFn") } -} - -fn check_block(sess: &Session, b: &ast::Block, kind: &'static str) { - // Check all statements in the block - for stmt in &b.stmts { - let span = match stmt.node { - ast::StmtDecl(ref decl, _) => { - match decl.node { - ast::DeclLocal(_) => decl.span, - - // Item statements are allowed - ast::DeclItem(_) => continue, - } - } - ast::StmtExpr(ref expr, _) => expr.span, - ast::StmtSemi(ref semi, _) => semi.span, - ast::StmtMac(..) => unreachable!(), - }; - span_err!(sess, span, E0016, - "blocks in {}s are limited to items and tail expressions", kind); - } -} - -impl<'a, 'v> Visitor<'v> for CheckConstFn<'a> { - fn visit_item(&mut self, i: &'v ast::Item) { - visit::walk_item(self, i); - match i.node { - ast::ItemConst(_, ref e) => { - CheckBlock{ sess: self.sess, kind: "constant"}.visit_expr(e) - }, - ast::ItemStatic(_, _, ref e) => { - CheckBlock{ sess: self.sess, kind: "static"}.visit_expr(e) - }, - _ => {}, - } - } - - fn visit_fn(&mut self, - fk: FnKind<'v>, - fd: &'v ast::FnDecl, - b: &'v ast::Block, - s: Span, - _fn_id: ast::NodeId) { - visit::walk_fn(self, fk, fd, b, s); - match fk { - FnKind::ItemFn(_, _, _, ast::Constness::Const, _, _) => {}, - FnKind::Method(_, m, _) if m.constness == ast::Constness::Const => {}, - _ => return, - } - - // Ensure the arguments are simple, not mutable/by-ref or patterns. - for arg in &fd.inputs { - match arg.pat.node { - ast::PatWild => {} - ast::PatIdent(ast::BindingMode::ByValue(ast::MutImmutable), _, None) => {} - _ => { - span_err!(self.sess, arg.pat.span, E0022, - "arguments of constant functions can only \ - be immutable by-value bindings"); - } - } - } - check_block(&self.sess, b, "const function"); - } -} diff --git a/src/librustc_passes/consts.rs b/src/librustc_passes/consts.rs new file mode 100644 index 0000000000000..86f56d0035841 --- /dev/null +++ b/src/librustc_passes/consts.rs @@ -0,0 +1,777 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Verifies that the types and values of const and static items +// are safe. The rules enforced by this module are: +// +// - For each *mutable* static item, it checks that its **type**: +// - doesn't have a destructor +// - doesn't own a box +// +// - For each *immutable* static item, it checks that its **value**: +// - doesn't own a box +// - doesn't contain a struct literal or a call to an enum variant / struct constructor where +// - the type of the struct/enum has a dtor +// +// Rules Enforced Elsewhere: +// - It's not possible to take the address of a static item with unsafe interior. This is enforced +// by borrowck::gather_loans + +use rustc::dep_graph::DepNode; +use rustc::ty::cast::CastKind; +use rustc_const_eval::{ConstEvalErr, lookup_const_fn_by_id, compare_lit_exprs}; +use rustc_const_eval::{ConstFnNode, eval_const_expr_partial, lookup_const_by_id}; +use rustc_const_eval::ErrKind::{IndexOpFeatureGated, UnimplementedConstVal, MiscCatchAll, Math}; +use rustc_const_eval::ErrKind::{ErroneousReferencedConstant, MiscBinaryOp, NonConstPath}; +use rustc_const_eval::ErrKind::UnresolvedPath; +use rustc_const_eval::EvalHint::ExprTypeChecked; +use rustc_const_math::{ConstMathErr, Op}; +use rustc::hir::def::{Def, CtorKind}; +use rustc::hir::def_id::DefId; +use rustc::middle::expr_use_visitor as euv; +use rustc::middle::mem_categorization as mc; +use rustc::middle::mem_categorization::Categorization; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::traits::Reveal; +use rustc::util::common::ErrorReported; +use rustc::util::nodemap::NodeMap; +use rustc::middle::const_qualif::ConstQualif; +use rustc::lint::builtin::CONST_ERR; + +use rustc::hir::{self, PatKind}; +use syntax::ast; +use syntax_pos::Span; +use rustc::hir::intravisit::{self, FnKind, Visitor, NestedVisitorMap}; + +use std::collections::hash_map::Entry; +use std::cmp::Ordering; + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +enum Mode { + Const, + ConstFn, + Static, + StaticMut, + + // An expression that occurs outside of any constant context + // (i.e. `const`, `static`, array lengths, etc.). The value + // can be variable at runtime, but will be promotable to + // static memory if we can prove it is actually constant. + Var, +} + +struct CheckCrateVisitor<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mode: Mode, + qualif: ConstQualif, + rvalue_borrows: NodeMap, +} + +impl<'a, 'gcx> CheckCrateVisitor<'a, 'gcx> { + fn with_mode(&mut self, mode: Mode, f: F) -> R + where F: FnOnce(&mut CheckCrateVisitor<'a, 'gcx>) -> R + { + let (old_mode, old_qualif) = (self.mode, self.qualif); + self.mode = mode; + self.qualif = ConstQualif::empty(); + let r = f(self); + self.mode = old_mode; + self.qualif = old_qualif; + r + } + + fn with_euv(&mut self, item_id: Option, f: F) -> R + where F: for<'b, 'tcx> FnOnce(&mut euv::ExprUseVisitor<'b, 'gcx, 'tcx>) -> R + { + let param_env = match item_id { + Some(item_id) => ty::ParameterEnvironment::for_item(self.tcx, item_id), + None => self.tcx.empty_parameter_environment(), + }; + + self.tcx + .infer_ctxt(None, Some(param_env), Reveal::NotSpecializable) + .enter(|infcx| f(&mut euv::ExprUseVisitor::new(self, &infcx))) + } + + fn global_expr(&mut self, mode: Mode, expr: &'gcx hir::Expr) -> ConstQualif { + assert!(mode != Mode::Var); + match self.tcx.const_qualif_map.borrow_mut().entry(expr.id) { + Entry::Occupied(entry) => return *entry.get(), + Entry::Vacant(entry) => { + // Prevent infinite recursion on re-entry. + entry.insert(ConstQualif::empty()); + } + } + if let Err(err) = eval_const_expr_partial(self.tcx, expr, ExprTypeChecked, None) { + match err.kind { + UnimplementedConstVal(_) => {} + IndexOpFeatureGated => {} + ErroneousReferencedConstant(_) => {} + _ => { + self.tcx.sess.add_lint(CONST_ERR, + expr.id, + expr.span, + format!("constant evaluation error: {}. This will \ + become a HARD ERROR in the future", + err.description().into_oneline())) + } + } + } + self.with_mode(mode, |this| { + this.with_euv(None, |euv| euv.consume_expr(expr)); + this.visit_expr(expr); + this.qualif + }) + } + + fn fn_like(&mut self, + fk: FnKind<'gcx>, + fd: &'gcx hir::FnDecl, + b: hir::ExprId, + s: Span, + fn_id: ast::NodeId) + -> ConstQualif { + match self.tcx.const_qualif_map.borrow_mut().entry(fn_id) { + Entry::Occupied(entry) => return *entry.get(), + Entry::Vacant(entry) => { + // Prevent infinite recursion on re-entry. + entry.insert(ConstQualif::empty()); + } + } + + let mode = match fk { + FnKind::ItemFn(_, _, _, hir::Constness::Const, ..) + => Mode::ConstFn, + FnKind::Method(_, m, ..) => { + if m.constness == hir::Constness::Const { + Mode::ConstFn + } else { + Mode::Var + } + } + _ => Mode::Var, + }; + + let qualif = self.with_mode(mode, |this| { + let body = this.tcx.map.expr(b); + this.with_euv(Some(fn_id), |euv| euv.walk_fn(fd, body)); + intravisit::walk_fn(this, fk, fd, b, s, fn_id); + this.qualif + }); + + // Keep only bits that aren't affected by function body (NON_ZERO_SIZED), + // and bits that don't change semantics, just optimizations (PREFER_IN_PLACE). + let qualif = qualif & (ConstQualif::NON_ZERO_SIZED | ConstQualif::PREFER_IN_PLACE); + + self.tcx.const_qualif_map.borrow_mut().insert(fn_id, qualif); + qualif + } + + fn add_qualif(&mut self, qualif: ConstQualif) { + self.qualif = self.qualif | qualif; + } + + /// Returns true if the call is to a const fn or method. + fn handle_const_fn_call(&mut self, _expr: &hir::Expr, def_id: DefId, ret_ty: Ty<'gcx>) -> bool { + match lookup_const_fn_by_id(self.tcx, def_id) { + Some(ConstFnNode::Local(fn_like)) => { + let qualif = self.fn_like(fn_like.kind(), + fn_like.decl(), + fn_like.body(), + fn_like.span(), + fn_like.id()); + + self.add_qualif(qualif); + + if ret_ty.type_contents(self.tcx).interior_unsafe() { + self.add_qualif(ConstQualif::MUTABLE_MEM); + } + + true + }, + Some(ConstFnNode::Inlined(ii)) => { + let node_id = ii.body.id; + + let qualif = match self.tcx.const_qualif_map.borrow_mut().entry(node_id) { + Entry::Occupied(entry) => *entry.get(), + _ => bug!("const qualif entry missing for inlined item") + }; + + self.add_qualif(qualif); + + if ret_ty.type_contents(self.tcx).interior_unsafe() { + self.add_qualif(ConstQualif::MUTABLE_MEM); + } + + true + }, + None => false + } + } + + fn record_borrow(&mut self, id: ast::NodeId, mutbl: hir::Mutability) { + match self.rvalue_borrows.entry(id) { + Entry::Occupied(mut entry) => { + // Merge the two borrows, taking the most demanding + // one, mutability-wise. + if mutbl == hir::MutMutable { + entry.insert(mutbl); + } + } + Entry::Vacant(entry) => { + entry.insert(mutbl); + } + } + } +} + +impl<'a, 'tcx> Visitor<'tcx> for CheckCrateVisitor<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.tcx.map) + } + + fn visit_item(&mut self, i: &'tcx hir::Item) { + debug!("visit_item(item={})", self.tcx.map.node_to_string(i.id)); + assert_eq!(self.mode, Mode::Var); + match i.node { + hir::ItemStatic(_, hir::MutImmutable, ref expr) => { + self.global_expr(Mode::Static, &expr); + } + hir::ItemStatic(_, hir::MutMutable, ref expr) => { + self.global_expr(Mode::StaticMut, &expr); + } + hir::ItemConst(_, ref expr) => { + self.global_expr(Mode::Const, &expr); + } + hir::ItemEnum(ref enum_definition, _) => { + for var in &enum_definition.variants { + if let Some(ref ex) = var.node.disr_expr { + self.global_expr(Mode::Const, &ex); + } + } + } + _ => { + intravisit::walk_item(self, i); + } + } + } + + fn visit_trait_item(&mut self, t: &'tcx hir::TraitItem) { + match t.node { + hir::ConstTraitItem(_, ref default) => { + if let Some(ref expr) = *default { + self.global_expr(Mode::Const, &expr); + } else { + intravisit::walk_trait_item(self, t); + } + } + _ => self.with_mode(Mode::Var, |v| intravisit::walk_trait_item(v, t)), + } + } + + fn visit_impl_item(&mut self, i: &'tcx hir::ImplItem) { + match i.node { + hir::ImplItemKind::Const(_, ref expr) => { + self.global_expr(Mode::Const, &expr); + } + _ => self.with_mode(Mode::Var, |v| intravisit::walk_impl_item(v, i)), + } + } + + fn visit_fn(&mut self, + fk: FnKind<'tcx>, + fd: &'tcx hir::FnDecl, + b: hir::ExprId, + s: Span, + fn_id: ast::NodeId) { + self.fn_like(fk, fd, b, s, fn_id); + } + + fn visit_pat(&mut self, p: &'tcx hir::Pat) { + match p.node { + PatKind::Lit(ref lit) => { + self.global_expr(Mode::Const, &lit); + } + PatKind::Range(ref start, ref end) => { + self.global_expr(Mode::Const, &start); + self.global_expr(Mode::Const, &end); + + match compare_lit_exprs(self.tcx, p.span, start, end) { + Ok(Ordering::Less) | + Ok(Ordering::Equal) => {} + Ok(Ordering::Greater) => { + struct_span_err!(self.tcx.sess, start.span, E0030, + "lower range bound must be less than or equal to upper") + .span_label(start.span, &format!("lower bound larger than upper bound")) + .emit(); + } + Err(ErrorReported) => {} + } + } + _ => intravisit::walk_pat(self, p), + } + } + + fn visit_block(&mut self, block: &'tcx hir::Block) { + // Check all statements in the block + for stmt in &block.stmts { + match stmt.node { + hir::StmtDecl(ref decl, _) => { + match decl.node { + hir::DeclLocal(_) => {} + // Item statements are allowed + hir::DeclItem(_) => continue, + } + } + hir::StmtExpr(..) => {} + hir::StmtSemi(..) => {} + } + self.add_qualif(ConstQualif::NOT_CONST); + } + intravisit::walk_block(self, block); + } + + fn visit_expr(&mut self, ex: &'tcx hir::Expr) { + let mut outer = self.qualif; + self.qualif = ConstQualif::empty(); + + let node_ty = self.tcx.tables().node_id_to_type(ex.id); + check_expr(self, ex, node_ty); + check_adjustments(self, ex); + + // Special-case some expressions to avoid certain flags bubbling up. + match ex.node { + hir::ExprCall(ref callee, ref args) => { + for arg in args { + self.visit_expr(&arg) + } + + let inner = self.qualif; + self.visit_expr(&callee); + // The callee's size doesn't count in the call. + let added = self.qualif - inner; + self.qualif = inner | (added - ConstQualif::NON_ZERO_SIZED); + } + hir::ExprRepeat(ref element, _) => { + self.visit_expr(&element); + // The count is checked elsewhere (typeck). + let count = match node_ty.sty { + ty::TyArray(_, n) => n, + _ => bug!(), + }; + // [element; 0] is always zero-sized. + if count == 0 { + self.qualif.remove(ConstQualif::NON_ZERO_SIZED | ConstQualif::PREFER_IN_PLACE); + } + } + hir::ExprMatch(ref discr, ref arms, _) => { + // Compute the most demanding borrow from all the arms' + // patterns and set that on the discriminator. + let mut borrow = None; + for pat in arms.iter().flat_map(|arm| &arm.pats) { + let pat_borrow = self.rvalue_borrows.remove(&pat.id); + match (borrow, pat_borrow) { + (None, _) | + (_, Some(hir::MutMutable)) => { + borrow = pat_borrow; + } + _ => {} + } + } + if let Some(mutbl) = borrow { + self.record_borrow(discr.id, mutbl); + } + intravisit::walk_expr(self, ex); + } + _ => intravisit::walk_expr(self, ex), + } + + // Handle borrows on (or inside the autorefs of) this expression. + match self.rvalue_borrows.remove(&ex.id) { + Some(hir::MutImmutable) => { + // Constants cannot be borrowed if they contain interior mutability as + // it means that our "silent insertion of statics" could change + // initializer values (very bad). + // If the type doesn't have interior mutability, then `ConstQualif::MUTABLE_MEM` has + // propagated from another error, so erroring again would be just noise. + let tc = node_ty.type_contents(self.tcx); + if self.qualif.intersects(ConstQualif::MUTABLE_MEM) && tc.interior_unsafe() { + outer = outer | ConstQualif::NOT_CONST; + } + // If the reference has to be 'static, avoid in-place initialization + // as that will end up pointing to the stack instead. + if !self.qualif.intersects(ConstQualif::NON_STATIC_BORROWS) { + self.qualif = self.qualif - ConstQualif::PREFER_IN_PLACE; + self.add_qualif(ConstQualif::HAS_STATIC_BORROWS); + } + } + Some(hir::MutMutable) => { + // `&mut expr` means expr could be mutated, unless it's zero-sized. + if self.qualif.intersects(ConstQualif::NON_ZERO_SIZED) { + if self.mode == Mode::Var { + outer = outer | ConstQualif::NOT_CONST; + self.add_qualif(ConstQualif::MUTABLE_MEM); + } + } + if !self.qualif.intersects(ConstQualif::NON_STATIC_BORROWS) { + self.add_qualif(ConstQualif::HAS_STATIC_BORROWS); + } + } + None => {} + } + + if self.mode == Mode::Var && !self.qualif.intersects(ConstQualif::NOT_CONST) { + match eval_const_expr_partial(self.tcx, ex, ExprTypeChecked, None) { + Ok(_) => {} + Err(ConstEvalErr { kind: UnimplementedConstVal(_), .. }) | + Err(ConstEvalErr { kind: MiscCatchAll, .. }) | + Err(ConstEvalErr { kind: MiscBinaryOp, .. }) | + Err(ConstEvalErr { kind: NonConstPath, .. }) | + Err(ConstEvalErr { kind: UnresolvedPath, .. }) | + Err(ConstEvalErr { kind: ErroneousReferencedConstant(_), .. }) | + Err(ConstEvalErr { kind: Math(ConstMathErr::Overflow(Op::Shr)), .. }) | + Err(ConstEvalErr { kind: Math(ConstMathErr::Overflow(Op::Shl)), .. }) | + Err(ConstEvalErr { kind: IndexOpFeatureGated, .. }) => {} + Err(msg) => { + self.tcx.sess.add_lint(CONST_ERR, + ex.id, + msg.span, + msg.description().into_oneline().into_owned()) + } + } + } + + self.tcx.const_qualif_map.borrow_mut().insert(ex.id, self.qualif); + // Don't propagate certain flags. + self.qualif = outer | (self.qualif - ConstQualif::HAS_STATIC_BORROWS); + } +} + +/// This function is used to enforce the constraints on +/// const/static items. It walks through the *value* +/// of the item walking down the expression and evaluating +/// every nested expression. If the expression is not part +/// of a const/static item, it is qualified for promotion +/// instead of producing errors. +fn check_expr<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>, e: &hir::Expr, node_ty: Ty<'tcx>) { + match node_ty.sty { + ty::TyAdt(def, _) if def.has_dtor() => { + v.add_qualif(ConstQualif::NEEDS_DROP); + } + _ => {} + } + + let method_call = ty::MethodCall::expr(e.id); + match e.node { + hir::ExprUnary(..) | + hir::ExprBinary(..) | + hir::ExprIndex(..) if v.tcx.tables().method_map.contains_key(&method_call) => { + v.add_qualif(ConstQualif::NOT_CONST); + } + hir::ExprBox(_) => { + v.add_qualif(ConstQualif::NOT_CONST); + } + hir::ExprUnary(op, ref inner) => { + match v.tcx.tables().node_id_to_type(inner.id).sty { + ty::TyRawPtr(_) => { + assert!(op == hir::UnDeref); + + v.add_qualif(ConstQualif::NOT_CONST); + } + _ => {} + } + } + hir::ExprBinary(op, ref lhs, _) => { + match v.tcx.tables().node_id_to_type(lhs.id).sty { + ty::TyRawPtr(_) => { + assert!(op.node == hir::BiEq || op.node == hir::BiNe || + op.node == hir::BiLe || op.node == hir::BiLt || + op.node == hir::BiGe || op.node == hir::BiGt); + + v.add_qualif(ConstQualif::NOT_CONST); + } + _ => {} + } + } + hir::ExprCast(ref from, _) => { + debug!("Checking const cast(id={})", from.id); + match v.tcx.cast_kinds.borrow().get(&from.id) { + None => span_bug!(e.span, "no kind for cast"), + Some(&CastKind::PtrAddrCast) | Some(&CastKind::FnPtrAddrCast) => { + v.add_qualif(ConstQualif::NOT_CONST); + } + _ => {} + } + } + hir::ExprPath(ref qpath) => { + let def = v.tcx.tables().qpath_def(qpath, e.id); + match def { + Def::VariantCtor(_, CtorKind::Const) => { + // Size is determined by the whole enum, may be non-zero. + v.add_qualif(ConstQualif::NON_ZERO_SIZED); + } + Def::VariantCtor(..) | Def::StructCtor(..) | + Def::Fn(..) | Def::Method(..) => {} + Def::Static(..) => { + match v.mode { + Mode::Static | Mode::StaticMut => {} + Mode::Const | Mode::ConstFn => {} + Mode::Var => v.add_qualif(ConstQualif::NOT_CONST) + } + } + Def::Const(did) | Def::AssociatedConst(did) => { + let substs = Some(v.tcx.tables().node_id_item_substs(e.id) + .unwrap_or_else(|| v.tcx.intern_substs(&[]))); + if let Some((expr, _)) = lookup_const_by_id(v.tcx, did, substs) { + let inner = v.global_expr(Mode::Const, expr); + v.add_qualif(inner); + } + } + Def::Local(..) if v.mode == Mode::ConstFn => { + // Sadly, we can't determine whether the types are zero-sized. + v.add_qualif(ConstQualif::NOT_CONST | ConstQualif::NON_ZERO_SIZED); + } + _ => { + v.add_qualif(ConstQualif::NOT_CONST); + } + } + } + hir::ExprCall(ref callee, _) => { + let mut callee = &**callee; + loop { + callee = match callee.node { + hir::ExprBlock(ref block) => match block.expr { + Some(ref tail) => &tail, + None => break + }, + _ => break + }; + } + // The callee is an arbitrary expression, it doesn't necessarily have a definition. + let def = if let hir::ExprPath(ref qpath) = callee.node { + v.tcx.tables().qpath_def(qpath, callee.id) + } else { + Def::Err + }; + let is_const = match def { + Def::StructCtor(_, CtorKind::Fn) | + Def::VariantCtor(_, CtorKind::Fn) => { + // `NON_ZERO_SIZED` is about the call result, not about the ctor itself. + v.add_qualif(ConstQualif::NON_ZERO_SIZED); + true + } + Def::Fn(did) => { + v.handle_const_fn_call(e, did, node_ty) + } + Def::Method(did) => { + match v.tcx.associated_item(did).container { + ty::ImplContainer(_) => { + v.handle_const_fn_call(e, did, node_ty) + } + ty::TraitContainer(_) => false + } + } + _ => false + }; + if !is_const { + v.add_qualif(ConstQualif::NOT_CONST); + } + } + hir::ExprMethodCall(..) => { + let method = v.tcx.tables().method_map[&method_call]; + let is_const = match v.tcx.associated_item(method.def_id).container { + ty::ImplContainer(_) => v.handle_const_fn_call(e, method.def_id, node_ty), + ty::TraitContainer(_) => false + }; + if !is_const { + v.add_qualif(ConstQualif::NOT_CONST); + } + } + hir::ExprStruct(..) => { + if let ty::TyAdt(adt, ..) = v.tcx.tables().expr_ty(e).sty { + // unsafe_cell_type doesn't necessarily exist with no_core + if Some(adt.did) == v.tcx.lang_items.unsafe_cell_type() { + v.add_qualif(ConstQualif::MUTABLE_MEM); + } + } + } + + hir::ExprLit(_) | + hir::ExprAddrOf(..) => { + v.add_qualif(ConstQualif::NON_ZERO_SIZED); + } + + hir::ExprRepeat(..) => { + v.add_qualif(ConstQualif::PREFER_IN_PLACE); + } + + hir::ExprClosure(..) => { + // Paths in constant contexts cannot refer to local variables, + // as there are none, and thus closures can't have upvars there. + if v.tcx.with_freevars(e.id, |fv| !fv.is_empty()) { + assert!(v.mode == Mode::Var, + "global closures can't capture anything"); + v.add_qualif(ConstQualif::NOT_CONST); + } + } + + hir::ExprBlock(_) | + hir::ExprIndex(..) | + hir::ExprField(..) | + hir::ExprTupField(..) | + hir::ExprArray(_) | + hir::ExprType(..) | + hir::ExprTup(..) => {} + + // Conditional control flow (possible to implement). + hir::ExprMatch(..) | + hir::ExprIf(..) | + + // Loops (not very meaningful in constants). + hir::ExprWhile(..) | + hir::ExprLoop(..) | + + // More control flow (also not very meaningful). + hir::ExprBreak(..) | + hir::ExprAgain(_) | + hir::ExprRet(_) | + + // Expressions with side-effects. + hir::ExprAssign(..) | + hir::ExprAssignOp(..) | + hir::ExprInlineAsm(..) => { + v.add_qualif(ConstQualif::NOT_CONST); + } + } +} + +/// Check the adjustments of an expression +fn check_adjustments<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>, e: &hir::Expr) { + use rustc::ty::adjustment::*; + + match v.tcx.tables().adjustments.get(&e.id).map(|adj| adj.kind) { + None | + Some(Adjust::NeverToAny) | + Some(Adjust::ReifyFnPointer) | + Some(Adjust::UnsafeFnPointer) | + Some(Adjust::MutToConstPointer) => {} + + Some(Adjust::DerefRef { autoderefs, .. }) => { + if (0..autoderefs as u32) + .any(|autoderef| v.tcx.tables().is_overloaded_autoderef(e.id, autoderef)) { + v.add_qualif(ConstQualif::NOT_CONST); + } + } + } +} + +pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { + tcx.visit_all_item_likes_in_krate(DepNode::CheckConst, + &mut CheckCrateVisitor { + tcx: tcx, + mode: Mode::Var, + qualif: ConstQualif::NOT_CONST, + rvalue_borrows: NodeMap(), + }.as_deep_visitor()); + tcx.sess.abort_if_errors(); +} + +impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for CheckCrateVisitor<'a, 'gcx> { + fn consume(&mut self, + _consume_id: ast::NodeId, + _consume_span: Span, + cmt: mc::cmt, + _mode: euv::ConsumeMode) { + let mut cur = &cmt; + loop { + match cur.cat { + Categorization::StaticItem => { + break; + } + Categorization::Deref(ref cmt, ..) | + Categorization::Downcast(ref cmt, _) | + Categorization::Interior(ref cmt, _) => cur = cmt, + + Categorization::Rvalue(..) | + Categorization::Upvar(..) | + Categorization::Local(..) => break, + } + } + } + fn borrow(&mut self, + borrow_id: ast::NodeId, + _borrow_span: Span, + cmt: mc::cmt<'tcx>, + _loan_region: &'tcx ty::Region, + bk: ty::BorrowKind, + loan_cause: euv::LoanCause) { + // Kind of hacky, but we allow Unsafe coercions in constants. + // These occur when we convert a &T or *T to a *U, as well as + // when making a thin pointer (e.g., `*T`) into a fat pointer + // (e.g., `*Trait`). + match loan_cause { + euv::LoanCause::AutoUnsafe => { + return; + } + _ => {} + } + + let mut cur = &cmt; + loop { + match cur.cat { + Categorization::Rvalue(..) => { + if loan_cause == euv::MatchDiscriminant { + // Ignore the dummy immutable borrow created by EUV. + break; + } + let mutbl = bk.to_mutbl_lossy(); + if mutbl == hir::MutMutable && self.mode == Mode::StaticMut { + // Mutable slices are the only `&mut` allowed in + // globals, but only in `static mut`, nowhere else. + // FIXME: This exception is really weird... there isn't + // any fundamental reason to restrict this based on + // type of the expression. `&mut [1]` has exactly the + // same representation as &mut 1. + match cmt.ty.sty { + ty::TyArray(..) | + ty::TySlice(_) => break, + _ => {} + } + } + self.record_borrow(borrow_id, mutbl); + break; + } + Categorization::StaticItem => { + break; + } + Categorization::Deref(ref cmt, ..) | + Categorization::Downcast(ref cmt, _) | + Categorization::Interior(ref cmt, _) => { + cur = cmt; + } + + Categorization::Upvar(..) | + Categorization::Local(..) => break, + } + } + } + + fn decl_without_init(&mut self, _id: ast::NodeId, _span: Span) {} + fn mutate(&mut self, + _assignment_id: ast::NodeId, + _assignment_span: Span, + _assignee_cmt: mc::cmt, + _mode: euv::MutateMode) { + } + + fn matched_pat(&mut self, _: &hir::Pat, _: mc::cmt, _: euv::MatchMode) {} + + fn consume_pat(&mut self, _consume_pat: &hir::Pat, _cmt: mc::cmt, _mode: euv::ConsumeMode) {} +} diff --git a/src/librustc_passes/diagnostics.rs b/src/librustc_passes/diagnostics.rs index 380eada18a1e1..b2ef1abd2a4e7 100644 --- a/src/librustc_passes/diagnostics.rs +++ b/src/librustc_passes/diagnostics.rs @@ -11,40 +11,222 @@ #![allow(non_snake_case)] register_long_diagnostics! { -E0016: r##" -Blocks in constants may only contain items (such as constant, function -definition, etc...) and a tail expression. Example: +/* +E0014: r##" +Constants can only be initialized by a constant value or, in a future +version of Rust, a call to a const function. This error indicates the use +of a path (like a::b, or x) denoting something other than one of these +allowed items. Erroneous code xample: -``` -const FOO: i32 = { let x = 0; x }; // 'x' isn't an item! +```compile_fail +const FOO: i32 = { let x = 0; x }; // 'x' isn't a constant nor a function! ``` -To avoid it, you have to replace the non-item object: +To avoid it, you have to replace the non-constant value: ``` const FOO: i32 = { const X : i32 = 0; X }; +// or even: +const FOO2: i32 = { 0 }; // but brackets are useless here +``` +"##, +*/ +E0030: r##" +When matching against a range, the compiler verifies that the range is +non-empty. Range patterns include both end-points, so this is equivalent to +requiring the start of the range to be less than or equal to the end of the +range. + +For example: + +```compile_fail +match 5u32 { + // This range is ok, albeit pointless. + 1 ... 1 => {} + // This range is empty, and the compiler can tell. + 1000 ... 5 => {} +} +``` +"##, + +E0130: r##" +You declared a pattern as an argument in a foreign function declaration. +Erroneous code example: + +```compile_fail +extern { + fn foo((a, b): (u32, u32)); // error: patterns aren't allowed in foreign + // function declarations +} +``` + +Please replace the pattern argument with a regular one. Example: + +``` +struct SomeStruct { + a: u32, + b: u32, +} + +extern { + fn foo(s: SomeStruct); // ok! +} +``` + +Or: + +``` +extern { + fn foo(a: (u32, u32)); // ok! +} +``` +"##, + +E0161: r##" +A value was moved. However, its size was not known at compile time, and only +values of a known size can be moved. + +Erroneous code example: + +```compile_fail +#![feature(box_syntax)] + +fn main() { + let array: &[isize] = &[1, 2, 3]; + let _x: Box<[isize]> = box *array; + // error: cannot move a value of type [isize]: the size of [isize] cannot + // be statically determined +} +``` + +In Rust, you can only move a value when its size is known at compile time. + +To work around this restriction, consider "hiding" the value behind a reference: +either `&x` or `&mut x`. Since a reference has a fixed size, this lets you move +it around as usual. Example: + +``` +#![feature(box_syntax)] + +fn main() { + let array: &[isize] = &[1, 2, 3]; + let _x: Box<&[isize]> = box array; // ok! +} +``` +"##, + +E0265: r##" +This error indicates that a static or constant references itself. +All statics and constants need to resolve to a value in an acyclic manner. + +For example, neither of the following can be sensibly compiled: + +```compile_fail,E0265 +const X: u32 = X; +``` + +```compile_fail,E0265 +const X: u32 = Y; +const Y: u32 = X; ``` "##, -E0022: r##" -Constant functions are not allowed to mutate anything. Thus, binding to an -argument with a mutable pattern is not allowed. For example, +E0267: r##" +This error indicates the use of a loop keyword (`break` or `continue`) inside a +closure but outside of any loop. Erroneous code example: + +```compile_fail,E0267 +let w = || { break; }; // error: `break` inside of a closure +``` + +`break` and `continue` keywords can be used as normal inside closures as long as +they are also contained within a loop. To halt the execution of a closure you +should instead use a return statement. Example: + +``` +let w = || { + for _ in 0..10 { + break; + } +}; + +w(); +``` +"##, + +E0268: r##" +This error indicates the use of a loop keyword (`break` or `continue`) outside +of a loop. Without a loop to break out of or continue in, no sensible action can +be taken. Erroneous code example: + +```compile_fail,E0268 +fn some_func() { + break; // error: `break` outside of loop +} +``` + +Please verify that you are using `break` and `continue` only in loops. Example: ``` -const fn foo(mut x: u8) { - // do stuff +fn some_func() { + for _ in 0..10 { + break; // ok! + } } ``` +"##, -is bad because the function body may not mutate `x`. +E0379: r##" +Trait methods cannot be declared `const` by design. For more information, see +[RFC 911]. -Remove any mutable bindings from the argument list to fix this error. In case -you need to mutate the argument, try lazily initializing a global variable -instead of using a `const fn`, or refactoring the code to a functional style to -avoid mutation if possible. +[RFC 911]: https://github.com/rust-lang/rfcs/pull/911 "##, + +E0449: r##" +A visibility qualifier was used when it was unnecessary. Erroneous code +examples: + +```compile_fail +struct Bar; + +trait Foo { + fn foo(); +} + +pub impl Bar {} // error: unnecessary visibility qualifier + +pub impl Foo for Bar { // error: unnecessary visibility qualifier + pub fn foo() {} // error: unnecessary visibility qualifier +} +``` + +To fix this error, please remove the visibility qualifier when it is not +required. Example: + +```ignore +struct Bar; + +trait Foo { + fn foo(); +} + +// Directly implemented methods share the visibility of the type itself, +// so `pub` is unnecessary here +impl Bar {} + +// Trait methods share the visibility of the trait, so `pub` is +// unnecessary in either case +pub impl Foo for Bar { + pub fn foo() {} +} +``` +"##, + } register_diagnostics! { E0472, // asm! is unsupported on this target + E0561, // patterns aren't allowed in function pointer types + E0571, // `break` with a value in a non-`loop`-loop } diff --git a/src/librustc_passes/hir_stats.rs b/src/librustc_passes/hir_stats.rs new file mode 100644 index 0000000000000..b785801398895 --- /dev/null +++ b/src/librustc_passes/hir_stats.rs @@ -0,0 +1,378 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// The visitors in this module collect sizes and counts of the most important +// pieces of AST and HIR. The resulting numbers are good approximations but not +// completely accurate (some things might be counted twice, others missed). + +use rustc::hir; +use rustc::hir::intravisit as hir_visit; +use rustc::util::common::to_readable_str; +use rustc::util::nodemap::{FxHashMap, FxHashSet}; +use syntax::ast::{self, NodeId, AttrId}; +use syntax::visit as ast_visit; +use syntax_pos::Span; + +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +enum Id { + Node(NodeId), + Attr(AttrId), + None, +} + +struct NodeData { + count: usize, + size: usize, +} + +struct StatCollector<'k> { + krate: Option<&'k hir::Crate>, + data: FxHashMap<&'static str, NodeData>, + seen: FxHashSet, +} + +pub fn print_hir_stats(krate: &hir::Crate) { + let mut collector = StatCollector { + krate: Some(krate), + data: FxHashMap(), + seen: FxHashSet(), + }; + hir_visit::walk_crate(&mut collector, krate); + collector.print("HIR STATS"); +} + +pub fn print_ast_stats(krate: &ast::Crate, title: &str) { + let mut collector = StatCollector { + krate: None, + data: FxHashMap(), + seen: FxHashSet(), + }; + ast_visit::walk_crate(&mut collector, krate); + collector.print(title); +} + +impl<'k> StatCollector<'k> { + + fn record(&mut self, label: &'static str, id: Id, node: &T) { + if id != Id::None { + if !self.seen.insert(id) { + return + } + } + + let entry = self.data.entry(label).or_insert(NodeData { + count: 0, + size: 0, + }); + + entry.count += 1; + entry.size = ::std::mem::size_of_val(node); + } + + fn print(&self, title: &str) { + let mut stats: Vec<_> = self.data.iter().collect(); + + stats.sort_by_key(|&(_, ref d)| d.count * d.size); + + let mut total_size = 0; + + println!("\n{}\n", title); + + println!("{:<18}{:>18}{:>14}{:>14}", + "Name", "Accumulated Size", "Count", "Item Size"); + println!("----------------------------------------------------------------"); + + for (label, data) in stats { + println!("{:<18}{:>18}{:>14}{:>14}", + label, + to_readable_str(data.count * data.size), + to_readable_str(data.count), + to_readable_str(data.size)); + + total_size += data.count * data.size; + } + println!("----------------------------------------------------------------"); + println!("{:<18}{:>18}\n", + "Total", + to_readable_str(total_size)); + } +} + +impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> { + fn nested_visit_map<'this>(&'this mut self) -> hir_visit::NestedVisitorMap<'this, 'v> { + panic!("visit_nested_xxx must be manually implemented in this visitor") + } + + fn visit_nested_item(&mut self, id: hir::ItemId) { + let nested_item = self.krate.unwrap().item(id.id); + self.visit_item(nested_item) + } + + fn visit_nested_impl_item(&mut self, impl_item_id: hir::ImplItemId) { + let nested_impl_item = self.krate.unwrap().impl_item(impl_item_id); + self.visit_impl_item(nested_impl_item) + } + + fn visit_item(&mut self, i: &'v hir::Item) { + self.record("Item", Id::Node(i.id), i); + hir_visit::walk_item(self, i) + } + + /////////////////////////////////////////////////////////////////////////// + + fn visit_mod(&mut self, m: &'v hir::Mod, _s: Span, n: NodeId) { + self.record("Mod", Id::None, m); + hir_visit::walk_mod(self, m, n) + } + fn visit_foreign_item(&mut self, i: &'v hir::ForeignItem) { + self.record("ForeignItem", Id::Node(i.id), i); + hir_visit::walk_foreign_item(self, i) + } + fn visit_local(&mut self, l: &'v hir::Local) { + self.record("Local", Id::Node(l.id), l); + hir_visit::walk_local(self, l) + } + fn visit_block(&mut self, b: &'v hir::Block) { + self.record("Block", Id::Node(b.id), b); + hir_visit::walk_block(self, b) + } + fn visit_stmt(&mut self, s: &'v hir::Stmt) { + self.record("Stmt", Id::Node(s.node.id()), s); + hir_visit::walk_stmt(self, s) + } + fn visit_arm(&mut self, a: &'v hir::Arm) { + self.record("Arm", Id::None, a); + hir_visit::walk_arm(self, a) + } + fn visit_pat(&mut self, p: &'v hir::Pat) { + self.record("Pat", Id::Node(p.id), p); + hir_visit::walk_pat(self, p) + } + fn visit_decl(&mut self, d: &'v hir::Decl) { + self.record("Decl", Id::None, d); + hir_visit::walk_decl(self, d) + } + fn visit_expr(&mut self, ex: &'v hir::Expr) { + self.record("Expr", Id::Node(ex.id), ex); + hir_visit::walk_expr(self, ex) + } + + fn visit_ty(&mut self, t: &'v hir::Ty) { + self.record("Ty", Id::Node(t.id), t); + hir_visit::walk_ty(self, t) + } + + fn visit_fn(&mut self, + fk: hir_visit::FnKind<'v>, + fd: &'v hir::FnDecl, + b: hir::ExprId, + s: Span, + id: NodeId) { + self.record("FnDecl", Id::None, fd); + hir_visit::walk_fn(self, fk, fd, b, s, id) + } + + fn visit_where_predicate(&mut self, predicate: &'v hir::WherePredicate) { + self.record("WherePredicate", Id::None, predicate); + hir_visit::walk_where_predicate(self, predicate) + } + + fn visit_trait_item(&mut self, ti: &'v hir::TraitItem) { + self.record("TraitItem", Id::Node(ti.id), ti); + hir_visit::walk_trait_item(self, ti) + } + fn visit_impl_item(&mut self, ii: &'v hir::ImplItem) { + self.record("ImplItem", Id::Node(ii.id), ii); + hir_visit::walk_impl_item(self, ii) + } + + fn visit_ty_param_bound(&mut self, bounds: &'v hir::TyParamBound) { + self.record("TyParamBound", Id::None, bounds); + hir_visit::walk_ty_param_bound(self, bounds) + } + + fn visit_struct_field(&mut self, s: &'v hir::StructField) { + self.record("StructField", Id::Node(s.id), s); + hir_visit::walk_struct_field(self, s) + } + + fn visit_variant(&mut self, + v: &'v hir::Variant, + g: &'v hir::Generics, + item_id: NodeId) { + self.record("Variant", Id::None, v); + hir_visit::walk_variant(self, v, g, item_id) + } + fn visit_lifetime(&mut self, lifetime: &'v hir::Lifetime) { + self.record("Lifetime", Id::Node(lifetime.id), lifetime); + hir_visit::walk_lifetime(self, lifetime) + } + fn visit_lifetime_def(&mut self, lifetime: &'v hir::LifetimeDef) { + self.record("LifetimeDef", Id::None, lifetime); + hir_visit::walk_lifetime_def(self, lifetime) + } + fn visit_qpath(&mut self, qpath: &'v hir::QPath, id: NodeId, span: Span) { + self.record("QPath", Id::None, qpath); + hir_visit::walk_qpath(self, qpath, id, span) + } + fn visit_path(&mut self, path: &'v hir::Path, _id: NodeId) { + self.record("Path", Id::None, path); + hir_visit::walk_path(self, path) + } + fn visit_path_segment(&mut self, + path_span: Span, + path_segment: &'v hir::PathSegment) { + self.record("PathSegment", Id::None, path_segment); + hir_visit::walk_path_segment(self, path_span, path_segment) + } + fn visit_assoc_type_binding(&mut self, type_binding: &'v hir::TypeBinding) { + self.record("TypeBinding", Id::Node(type_binding.id), type_binding); + hir_visit::walk_assoc_type_binding(self, type_binding) + } + fn visit_attribute(&mut self, attr: &'v ast::Attribute) { + self.record("Attribute", Id::Attr(attr.id), attr); + } + fn visit_macro_def(&mut self, macro_def: &'v hir::MacroDef) { + self.record("MacroDef", Id::Node(macro_def.id), macro_def); + hir_visit::walk_macro_def(self, macro_def) + } +} + +impl<'v> ast_visit::Visitor for StatCollector<'v> { + + fn visit_mod(&mut self, m: &ast::Mod, _s: Span, _n: NodeId) { + self.record("Mod", Id::None, m); + ast_visit::walk_mod(self, m) + } + + fn visit_foreign_item(&mut self, i: &ast::ForeignItem) { + self.record("ForeignItem", Id::None, i); + ast_visit::walk_foreign_item(self, i) + } + + fn visit_item(&mut self, i: &ast::Item) { + self.record("Item", Id::None, i); + ast_visit::walk_item(self, i) + } + + fn visit_local(&mut self, l: &ast::Local) { + self.record("Local", Id::None, l); + ast_visit::walk_local(self, l) + } + + fn visit_block(&mut self, b: &ast::Block) { + self.record("Block", Id::None, b); + ast_visit::walk_block(self, b) + } + + fn visit_stmt(&mut self, s: &ast::Stmt) { + self.record("Stmt", Id::None, s); + ast_visit::walk_stmt(self, s) + } + + fn visit_arm(&mut self, a: &ast::Arm) { + self.record("Arm", Id::None, a); + ast_visit::walk_arm(self, a) + } + + fn visit_pat(&mut self, p: &ast::Pat) { + self.record("Pat", Id::None, p); + ast_visit::walk_pat(self, p) + } + + fn visit_expr(&mut self, ex: &ast::Expr) { + self.record("Expr", Id::None, ex); + ast_visit::walk_expr(self, ex) + } + + fn visit_ty(&mut self, t: &ast::Ty) { + self.record("Ty", Id::None, t); + ast_visit::walk_ty(self, t) + } + + fn visit_fn(&mut self, + fk: ast_visit::FnKind, + fd: &ast::FnDecl, + s: Span, + _: NodeId) { + self.record("FnDecl", Id::None, fd); + ast_visit::walk_fn(self, fk, fd, s) + } + + fn visit_trait_item(&mut self, ti: &ast::TraitItem) { + self.record("TraitItem", Id::None, ti); + ast_visit::walk_trait_item(self, ti) + } + + fn visit_impl_item(&mut self, ii: &ast::ImplItem) { + self.record("ImplItem", Id::None, ii); + ast_visit::walk_impl_item(self, ii) + } + + fn visit_ty_param_bound(&mut self, bounds: &ast::TyParamBound) { + self.record("TyParamBound", Id::None, bounds); + ast_visit::walk_ty_param_bound(self, bounds) + } + + fn visit_struct_field(&mut self, s: &ast::StructField) { + self.record("StructField", Id::None, s); + ast_visit::walk_struct_field(self, s) + } + + fn visit_variant(&mut self, + v: &ast::Variant, + g: &ast::Generics, + item_id: NodeId) { + self.record("Variant", Id::None, v); + ast_visit::walk_variant(self, v, g, item_id) + } + + fn visit_lifetime(&mut self, lifetime: &ast::Lifetime) { + self.record("Lifetime", Id::None, lifetime); + ast_visit::walk_lifetime(self, lifetime) + } + + fn visit_lifetime_def(&mut self, lifetime: &ast::LifetimeDef) { + self.record("LifetimeDef", Id::None, lifetime); + ast_visit::walk_lifetime_def(self, lifetime) + } + + fn visit_mac(&mut self, mac: &ast::Mac) { + self.record("Mac", Id::None, mac); + } + + fn visit_path_list_item(&mut self, + prefix: &ast::Path, + item: &ast::PathListItem) { + self.record("PathListItem", Id::None, item); + ast_visit::walk_path_list_item(self, prefix, item) + } + + fn visit_path_segment(&mut self, + path_span: Span, + path_segment: &ast::PathSegment) { + self.record("PathSegment", Id::None, path_segment); + ast_visit::walk_path_segment(self, path_span, path_segment) + } + + fn visit_assoc_type_binding(&mut self, type_binding: &ast::TypeBinding) { + self.record("TypeBinding", Id::None, type_binding); + ast_visit::walk_assoc_type_binding(self, type_binding) + } + + fn visit_attribute(&mut self, attr: &ast::Attribute) { + self.record("Attribute", Id::None, attr); + } + + fn visit_macro_def(&mut self, macro_def: &ast::MacroDef) { + self.record("MacroDef", Id::None, macro_def); + ast_visit::walk_macro_def(self, macro_def) + } +} diff --git a/src/librustc_passes/lib.rs b/src/librustc_passes/lib.rs index 4adaa0cab7a04..525d49ddd820c 100644 --- a/src/librustc_passes/lib.rs +++ b/src/librustc_passes/lib.rs @@ -19,18 +19,33 @@ #![crate_type = "dylib"] #![crate_type = "rlib"] #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] #![feature(rustc_diagnostic_macros)] #![feature(staged_api)] #![feature(rustc_private)] extern crate core; +#[macro_use] extern crate rustc; +extern crate rustc_const_eval; +extern crate rustc_const_math; -#[macro_use] extern crate syntax; +#[macro_use] +extern crate log; +#[macro_use] +extern crate syntax; +extern crate syntax_pos; +extern crate rustc_errors as errors; pub mod diagnostics; -pub mod const_fn; + +pub mod ast_validation; +pub mod consts; +pub mod hir_stats; +pub mod loops; pub mod no_asm; +pub mod rvalues; +pub mod static_recursion; diff --git a/src/librustc_passes/loops.rs b/src/librustc_passes/loops.rs new file mode 100644 index 0000000000000..10f464a9901d0 --- /dev/null +++ b/src/librustc_passes/loops.rs @@ -0,0 +1,153 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +use self::Context::*; + +use rustc::session::Session; + +use rustc::dep_graph::DepNode; +use rustc::hir::map::Map; +use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; +use rustc::hir; +use syntax::ast; +use syntax_pos::Span; + +#[derive(Clone, Copy, PartialEq)] +enum LoopKind { + Loop(hir::LoopSource), + WhileLoop, +} + +impl LoopKind { + fn name(self) -> &'static str { + match self { + LoopKind::Loop(hir::LoopSource::Loop) => "loop", + LoopKind::Loop(hir::LoopSource::WhileLet) => "while let", + LoopKind::Loop(hir::LoopSource::ForLoop) => "for", + LoopKind::WhileLoop => "while", + } + } +} + +#[derive(Clone, Copy, PartialEq)] +enum Context { + Normal, + Loop(LoopKind), + Closure, +} + +#[derive(Copy, Clone)] +struct CheckLoopVisitor<'a, 'ast: 'a> { + sess: &'a Session, + hir_map: &'a Map<'ast>, + cx: Context, +} + +pub fn check_crate(sess: &Session, map: &Map) { + let _task = map.dep_graph.in_task(DepNode::CheckLoops); + let krate = map.krate(); + krate.visit_all_item_likes(&mut CheckLoopVisitor { + sess: sess, + hir_map: map, + cx: Normal, + }.as_deep_visitor()); +} + +impl<'a, 'ast> Visitor<'ast> for CheckLoopVisitor<'a, 'ast> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'ast> { + NestedVisitorMap::OnlyBodies(&self.hir_map) + } + + fn visit_item(&mut self, i: &'ast hir::Item) { + self.with_context(Normal, |v| intravisit::walk_item(v, i)); + } + + fn visit_impl_item(&mut self, i: &'ast hir::ImplItem) { + self.with_context(Normal, |v| intravisit::walk_impl_item(v, i)); + } + + fn visit_expr(&mut self, e: &'ast hir::Expr) { + match e.node { + hir::ExprWhile(ref e, ref b, _) => { + self.with_context(Loop(LoopKind::WhileLoop), |v| { + v.visit_expr(&e); + v.visit_block(&b); + }); + } + hir::ExprLoop(ref b, _, source) => { + self.with_context(Loop(LoopKind::Loop(source)), |v| v.visit_block(&b)); + } + hir::ExprClosure(.., b, _) => { + self.with_context(Closure, |v| v.visit_body(b)); + } + hir::ExprBreak(label, ref opt_expr) => { + if opt_expr.is_some() { + let loop_kind = if let Some(label) = label { + if label.loop_id == ast::DUMMY_NODE_ID { + None + } else { + Some(match self.hir_map.expect_expr(label.loop_id).node { + hir::ExprWhile(..) => LoopKind::WhileLoop, + hir::ExprLoop(_, _, source) => LoopKind::Loop(source), + ref r => span_bug!(e.span, + "break label resolved to a non-loop: {:?}", r), + }) + } + } else if let Loop(kind) = self.cx { + Some(kind) + } else { + // `break` outside a loop - caught below + None + }; + match loop_kind { + None | Some(LoopKind::Loop(hir::LoopSource::Loop)) => (), + Some(kind) => { + struct_span_err!(self.sess, e.span, E0571, + "`break` with value from a `{}` loop", + kind.name()) + .span_label(e.span, + &format!("can only break with a value inside `loop`")) + .emit(); + } + } + } + self.require_loop("break", e.span); + } + hir::ExprAgain(_) => self.require_loop("continue", e.span), + _ => intravisit::walk_expr(self, e), + } + } +} + +impl<'a, 'ast> CheckLoopVisitor<'a, 'ast> { + fn with_context(&mut self, cx: Context, f: F) + where F: FnOnce(&mut CheckLoopVisitor<'a, 'ast>) + { + let old_cx = self.cx; + self.cx = cx; + f(self); + self.cx = old_cx; + } + + fn require_loop(&self, name: &str, span: Span) { + match self.cx { + Loop(_) => {} + Closure => { + struct_span_err!(self.sess, span, E0267, "`{}` inside of a closure", name) + .span_label(span, &format!("cannot break inside of a closure")) + .emit(); + } + Normal => { + struct_span_err!(self.sess, span, E0268, "`{}` outside of loop", name) + .span_label(span, &format!("cannot break outside of a loop")) + .emit(); + } + } + } +} diff --git a/src/librustc_passes/no_asm.rs b/src/librustc_passes/no_asm.rs index 3022d9fb9e3c3..af3065d64e8db 100644 --- a/src/librustc_passes/no_asm.rs +++ b/src/librustc_passes/no_asm.rs @@ -19,9 +19,11 @@ use syntax::visit::Visitor; use syntax::visit; pub fn check_crate(sess: &Session, krate: &ast::Crate) { - if sess.target.target.options.allow_asm { return; } + if sess.target.target.options.allow_asm { + return; + } - visit::walk_crate(&mut CheckNoAsm { sess: sess, }, krate); + visit::walk_crate(&mut CheckNoAsm { sess: sess }, krate); } #[derive(Copy, Clone)] @@ -29,12 +31,16 @@ struct CheckNoAsm<'a> { sess: &'a Session, } -impl<'a, 'v> Visitor<'v> for CheckNoAsm<'a> { +impl<'a> Visitor for CheckNoAsm<'a> { fn visit_expr(&mut self, e: &ast::Expr) { match e.node { - ast::ExprInlineAsm(_) => span_err!(self.sess, e.span, E0472, - "asm! is unsupported on this target"), - _ => {}, + ast::ExprKind::InlineAsm(_) => { + span_err!(self.sess, + e.span, + E0472, + "asm! is unsupported on this target") + } + _ => {} } visit::walk_expr(self, e) } diff --git a/src/librustc_passes/rvalues.rs b/src/librustc_passes/rvalues.rs new file mode 100644 index 0000000000000..ddb5af1e80c34 --- /dev/null +++ b/src/librustc_passes/rvalues.rs @@ -0,0 +1,112 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Checks that all rvalues in a crate have statically known size. check_crate +// is the public starting point. + +use rustc::dep_graph::DepNode; +use rustc::middle::expr_use_visitor as euv; +use rustc::middle::mem_categorization as mc; +use rustc::ty::{self, TyCtxt, ParameterEnvironment}; +use rustc::traits::Reveal; + +use rustc::hir; +use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; +use syntax::ast; +use syntax_pos::Span; + +pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { + let mut rvcx = RvalueContext { tcx: tcx }; + tcx.visit_all_item_likes_in_krate(DepNode::RvalueCheck, &mut rvcx.as_deep_visitor()); +} + +struct RvalueContext<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, +} + +impl<'a, 'tcx> Visitor<'tcx> for RvalueContext<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.tcx.map) + } + + fn visit_fn(&mut self, + fk: intravisit::FnKind<'tcx>, + fd: &'tcx hir::FnDecl, + b: hir::ExprId, + s: Span, + fn_id: ast::NodeId) { + // FIXME (@jroesch) change this to be an inference context + let param_env = ParameterEnvironment::for_item(self.tcx, fn_id); + self.tcx.infer_ctxt(None, Some(param_env.clone()), + Reveal::NotSpecializable).enter(|infcx| { + let mut delegate = RvalueContextDelegate { + tcx: infcx.tcx, + param_env: ¶m_env + }; + let body = infcx.tcx.map.expr(b); + let mut euv = euv::ExprUseVisitor::new(&mut delegate, &infcx); + euv.walk_fn(fd, body); + }); + intravisit::walk_fn(self, fk, fd, b, s, fn_id) + } +} + +struct RvalueContextDelegate<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + tcx: TyCtxt<'a, 'gcx, 'tcx>, + param_env: &'a ty::ParameterEnvironment<'gcx>, +} + +impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for RvalueContextDelegate<'a, 'gcx, 'tcx> { + fn consume(&mut self, + _: ast::NodeId, + span: Span, + cmt: mc::cmt<'tcx>, + _: euv::ConsumeMode) { + debug!("consume; cmt: {:?}; type: {:?}", *cmt, cmt.ty); + let ty = self.tcx.lift_to_global(&cmt.ty).unwrap(); + if !ty.is_sized(self.tcx.global_tcx(), self.param_env, span) { + span_err!(self.tcx.sess, span, E0161, + "cannot move a value of type {0}: the size of {0} cannot be statically determined", + ty); + } + } + + fn matched_pat(&mut self, + _matched_pat: &hir::Pat, + _cmt: mc::cmt, + _mode: euv::MatchMode) {} + + fn consume_pat(&mut self, + _consume_pat: &hir::Pat, + _cmt: mc::cmt, + _mode: euv::ConsumeMode) { + } + + fn borrow(&mut self, + _borrow_id: ast::NodeId, + _borrow_span: Span, + _cmt: mc::cmt, + _loan_region: &'tcx ty::Region, + _bk: ty::BorrowKind, + _loan_cause: euv::LoanCause) { + } + + fn decl_without_init(&mut self, + _id: ast::NodeId, + _span: Span) { + } + + fn mutate(&mut self, + _assignment_id: ast::NodeId, + _assignment_span: Span, + _assignee_cmt: mc::cmt, + _mode: euv::MutateMode) { + } +} diff --git a/src/librustc_passes/static_recursion.rs b/src/librustc_passes/static_recursion.rs new file mode 100644 index 0000000000000..ffb5045fe3b07 --- /dev/null +++ b/src/librustc_passes/static_recursion.rs @@ -0,0 +1,301 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// This compiler pass detects constants that refer to themselves +// recursively. + +use rustc::dep_graph::DepNode; +use rustc::hir::map as ast_map; +use rustc::session::{CompileResult, Session}; +use rustc::hir::def::{Def, CtorKind}; +use rustc::util::nodemap::NodeMap; + +use syntax::ast; +use syntax::feature_gate::{GateIssue, emit_feature_err}; +use syntax_pos::Span; +use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; +use rustc::hir; + +use std::cell::RefCell; + +struct CheckCrateVisitor<'a, 'ast: 'a> { + sess: &'a Session, + ast_map: &'a ast_map::Map<'ast>, + // `discriminant_map` is a cache that associates the `NodeId`s of local + // variant definitions with the discriminant expression that applies to + // each one. If the variant uses the default values (starting from `0`), + // then `None` is stored. + discriminant_map: RefCell>>, +} + +impl<'a, 'ast: 'a> Visitor<'ast> for CheckCrateVisitor<'a, 'ast> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'ast> { + NestedVisitorMap::None + } + + fn visit_item(&mut self, it: &'ast hir::Item) { + match it.node { + hir::ItemStatic(..) | + hir::ItemConst(..) => { + let mut recursion_visitor = CheckItemRecursionVisitor::new(self, &it.span); + recursion_visitor.visit_item(it); + } + hir::ItemEnum(ref enum_def, ref generics) => { + // We could process the whole enum, but handling the variants + // with discriminant expressions one by one gives more specific, + // less redundant output. + for variant in &enum_def.variants { + if let Some(_) = variant.node.disr_expr { + let mut recursion_visitor = CheckItemRecursionVisitor::new(self, + &variant.span); + recursion_visitor.populate_enum_discriminants(enum_def); + recursion_visitor.visit_variant(variant, generics, it.id); + } + } + } + _ => {} + } + intravisit::walk_item(self, it) + } + + fn visit_trait_item(&mut self, ti: &'ast hir::TraitItem) { + match ti.node { + hir::ConstTraitItem(_, ref default) => { + if let Some(_) = *default { + let mut recursion_visitor = CheckItemRecursionVisitor::new(self, &ti.span); + recursion_visitor.visit_trait_item(ti); + } + } + _ => {} + } + intravisit::walk_trait_item(self, ti) + } + + fn visit_impl_item(&mut self, ii: &'ast hir::ImplItem) { + match ii.node { + hir::ImplItemKind::Const(..) => { + let mut recursion_visitor = CheckItemRecursionVisitor::new(self, &ii.span); + recursion_visitor.visit_impl_item(ii); + } + _ => {} + } + intravisit::walk_impl_item(self, ii) + } +} + +pub fn check_crate<'ast>(sess: &Session, + ast_map: &ast_map::Map<'ast>) + -> CompileResult { + let _task = ast_map.dep_graph.in_task(DepNode::CheckStaticRecursion); + + let mut visitor = CheckCrateVisitor { + sess: sess, + ast_map: ast_map, + discriminant_map: RefCell::new(NodeMap()), + }; + sess.track_errors(|| { + // FIXME(#37712) could use ItemLikeVisitor if trait items were item-like + ast_map.krate().visit_all_item_likes(&mut visitor.as_deep_visitor()); + }) +} + +struct CheckItemRecursionVisitor<'a, 'ast: 'a> { + root_span: &'a Span, + sess: &'a Session, + ast_map: &'a ast_map::Map<'ast>, + discriminant_map: &'a RefCell>>, + idstack: Vec, +} + +impl<'a, 'ast: 'a> CheckItemRecursionVisitor<'a, 'ast> { + fn new(v: &'a CheckCrateVisitor<'a, 'ast>, + span: &'a Span) + -> CheckItemRecursionVisitor<'a, 'ast> { + CheckItemRecursionVisitor { + root_span: span, + sess: v.sess, + ast_map: v.ast_map, + discriminant_map: &v.discriminant_map, + idstack: Vec::new(), + } + } + fn with_item_id_pushed(&mut self, id: ast::NodeId, f: F, span: Span) + where F: Fn(&mut Self) + { + if self.idstack.iter().any(|&x| x == id) { + let any_static = self.idstack.iter().any(|&x| { + if let ast_map::NodeItem(item) = self.ast_map.get(x) { + if let hir::ItemStatic(..) = item.node { + true + } else { + false + } + } else { + false + } + }); + if any_static { + if !self.sess.features.borrow().static_recursion { + emit_feature_err(&self.sess.parse_sess, + "static_recursion", + *self.root_span, + GateIssue::Language, + "recursive static"); + } + } else { + struct_span_err!(self.sess, span, E0265, "recursive constant") + .span_label(span, &format!("recursion not allowed in constant")) + .emit(); + } + return; + } + self.idstack.push(id); + f(self); + self.idstack.pop(); + } + // If a variant has an expression specifying its discriminant, then it needs + // to be checked just like a static or constant. However, if there are more + // variants with no explicitly specified discriminant, those variants will + // increment the same expression to get their values. + // + // So for every variant, we need to track whether there is an expression + // somewhere in the enum definition that controls its discriminant. We do + // this by starting from the end and searching backward. + fn populate_enum_discriminants(&self, enum_definition: &'ast hir::EnumDef) { + // Get the map, and return if we already processed this enum or if it + // has no variants. + let mut discriminant_map = self.discriminant_map.borrow_mut(); + match enum_definition.variants.first() { + None => { + return; + } + Some(variant) if discriminant_map.contains_key(&variant.node.data.id()) => { + return; + } + _ => {} + } + + // Go through all the variants. + let mut variant_stack: Vec = Vec::new(); + for variant in enum_definition.variants.iter().rev() { + variant_stack.push(variant.node.data.id()); + // When we find an expression, every variant currently on the stack + // is affected by that expression. + if let Some(ref expr) = variant.node.disr_expr { + for id in &variant_stack { + discriminant_map.insert(*id, Some(expr)); + } + variant_stack.clear() + } + } + // If we are at the top, that always starts at 0, so any variant on the + // stack has a default value and does not need to be checked. + for id in &variant_stack { + discriminant_map.insert(*id, None); + } + } +} + +impl<'a, 'ast: 'a> Visitor<'ast> for CheckItemRecursionVisitor<'a, 'ast> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'ast> { + NestedVisitorMap::OnlyBodies(&self.ast_map) + } + + fn visit_item(&mut self, it: &'ast hir::Item) { + self.with_item_id_pushed(it.id, |v| intravisit::walk_item(v, it), it.span); + } + + fn visit_enum_def(&mut self, + enum_definition: &'ast hir::EnumDef, + generics: &'ast hir::Generics, + item_id: ast::NodeId, + _: Span) { + self.populate_enum_discriminants(enum_definition); + intravisit::walk_enum_def(self, enum_definition, generics, item_id); + } + + fn visit_variant(&mut self, + variant: &'ast hir::Variant, + _: &'ast hir::Generics, + _: ast::NodeId) { + let variant_id = variant.node.data.id(); + let maybe_expr; + if let Some(get_expr) = self.discriminant_map.borrow().get(&variant_id) { + // This is necessary because we need to let the `discriminant_map` + // borrow fall out of scope, so that we can reborrow farther down. + maybe_expr = (*get_expr).clone(); + } else { + span_bug!(variant.span, + "`check_static_recursion` attempted to visit \ + variant with unknown discriminant") + } + // If `maybe_expr` is `None`, that's because no discriminant is + // specified that affects this variant. Thus, no risk of recursion. + if let Some(expr) = maybe_expr { + self.with_item_id_pushed(expr.id, |v| intravisit::walk_expr(v, expr), expr.span); + } + } + + fn visit_trait_item(&mut self, ti: &'ast hir::TraitItem) { + self.with_item_id_pushed(ti.id, |v| intravisit::walk_trait_item(v, ti), ti.span); + } + + fn visit_impl_item(&mut self, ii: &'ast hir::ImplItem) { + self.with_item_id_pushed(ii.id, |v| intravisit::walk_impl_item(v, ii), ii.span); + } + + fn visit_expr(&mut self, e: &'ast hir::Expr) { + match e.node { + hir::ExprPath(hir::QPath::Resolved(_, ref path)) => { + match path.def { + Def::Static(def_id, _) | + Def::AssociatedConst(def_id) | + Def::Const(def_id) => { + if let Some(node_id) = self.ast_map.as_local_node_id(def_id) { + match self.ast_map.get(node_id) { + ast_map::NodeItem(item) => self.visit_item(item), + ast_map::NodeTraitItem(item) => self.visit_trait_item(item), + ast_map::NodeImplItem(item) => self.visit_impl_item(item), + ast_map::NodeForeignItem(_) => {} + _ => { + span_bug!(e.span, + "expected item, found {}", + self.ast_map.node_to_string(node_id)); + } + } + } + } + // For variants, we only want to check expressions that + // affect the specific variant used, but we need to check + // the whole enum definition to see what expression that + // might be (if any). + Def::VariantCtor(variant_id, CtorKind::Const) => { + if let Some(variant_id) = self.ast_map.as_local_node_id(variant_id) { + let variant = self.ast_map.expect_variant(variant_id); + let enum_id = self.ast_map.get_parent(variant_id); + let enum_item = self.ast_map.expect_item(enum_id); + if let hir::ItemEnum(ref enum_def, ref generics) = enum_item.node { + self.populate_enum_discriminants(enum_def); + self.visit_variant(variant, generics, enum_id); + } else { + span_bug!(e.span, + "`check_static_recursion` found \ + non-enum in Def::VariantCtor"); + } + } + } + _ => (), + } + } + _ => (), + } + intravisit::walk_expr(self, e); + } +} diff --git a/src/librustc_platform_intrinsics/Cargo.toml b/src/librustc_platform_intrinsics/Cargo.toml new file mode 100644 index 0000000000000..92f37f974efdc --- /dev/null +++ b/src/librustc_platform_intrinsics/Cargo.toml @@ -0,0 +1,9 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_platform_intrinsics" +version = "0.0.0" + +[lib] +name = "rustc_platform_intrinsics" +path = "lib.rs" +crate-type = ["dylib"] diff --git a/src/librustc_platform_intrinsics/aarch64.rs b/src/librustc_platform_intrinsics/aarch64.rs index fda65554cd2f2..0fb8513e138f3 100644 --- a/src/librustc_platform_intrinsics/aarch64.rs +++ b/src/librustc_platform_intrinsics/aarch64.rs @@ -13,3404 +13,3403 @@ #![allow(unused_imports)] -use {Intrinsic, i, i_, u, u_, f, v, v_, agg, p, void}; +use {Intrinsic, Type}; use IntrinsicDef::Named; -use rustc::middle::ty; // The default inlining settings trigger a pathological behaviour in // LLVM, which causes makes compilation very slow. See #28273. #[inline(never)] -pub fn find<'tcx>(_tcx: &ty::ctxt<'tcx>, name: &str) -> Option { +pub fn find(name: &str) -> Option { if !name.starts_with("aarch64_v") { return None } Some(match &name["aarch64_v".len()..] { "hadd_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.shadd.v8i8") }, "hadd_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.uhadd.v8i8") }, "hadd_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.shadd.v4i16") }, "hadd_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.uhadd.v4i16") }, "hadd_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.shadd.v2i32") }, "hadd_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.uhadd.v2i32") }, "haddq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.shadd.v16i8") }, "haddq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.uhadd.v16i8") }, "haddq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.shadd.v8i16") }, "haddq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.uhadd.v8i16") }, "haddq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.shadd.v4i32") }, "haddq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.uhadd.v4i32") }, "rhadd_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.srhadd.v8i8") }, "rhadd_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.urhadd.v8i8") }, "rhadd_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.srhadd.v4i16") }, "rhadd_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.urhadd.v4i16") }, "rhadd_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.srhadd.v2i32") }, "rhadd_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.urhadd.v2i32") }, "rhaddq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.srhadd.v16i8") }, "rhaddq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.urhadd.v16i8") }, "rhaddq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.srhadd.v8i16") }, "rhaddq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.urhadd.v8i16") }, "rhaddq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.srhadd.v4i32") }, "rhaddq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.urhadd.v4i32") }, "qadd_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.sqadd.v8i8") }, "qadd_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.uqadd.v8i8") }, "qadd_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.sqadd.v4i16") }, "qadd_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.uqadd.v4i16") }, "qadd_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.sqadd.v2i32") }, "qadd_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.uqadd.v2i32") }, "qadd_s64" => Intrinsic { - inputs: vec![v(i(64), 1), v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.aarch64.neon.sqadd.v1i64") }, "qadd_u64" => Intrinsic { - inputs: vec![v(u(64), 1), v(u(64), 1)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.aarch64.neon.uqadd.v1i64") }, "qaddq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.sqadd.v16i8") }, "qaddq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.uqadd.v16i8") }, "qaddq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.sqadd.v8i16") }, "qaddq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.uqadd.v8i16") }, "qaddq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.sqadd.v4i32") }, "qaddq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.uqadd.v4i32") }, "qaddq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.aarch64.neon.sqadd.v2i64") }, "qaddq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.aarch64.neon.uqadd.v2i64") }, "uqadd_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(u(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::U8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.suqadd.v16i8") }, "uqadd_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(u(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.suqadd.v8i16") }, "uqadd_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(u(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.suqadd.v4i32") }, "uqadd_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(u(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.aarch64.neon.suqadd.v2i64") }, "sqadd_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(i(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.usqadd.v16i8") }, "sqadd_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(i(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.usqadd.v8i16") }, "sqadd_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(i(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.usqadd.v4i32") }, "sqadd_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(i(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.aarch64.neon.usqadd.v2i64") }, "raddhn_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.raddhn.v8i8") }, "raddhn_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.raddhn.v8i8") }, "raddhn_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.raddhn.v4i16") }, "raddhn_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.raddhn.v4i16") }, "raddhn_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.raddhn.v2i32") }, "raddhn_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.raddhn.v2i32") }, "fmulx_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.aarch64.neon.fmulx.v2f32") }, "fmulx_f64" => Intrinsic { - inputs: vec![v(f(64), 1), v(f(64), 1)], - output: v(f(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS }, + output: &::F64x1, definition: Named("llvm.aarch64.neon.fmulx.v1f64") }, "fmulxq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.aarch64.neon.fmulx.v4f32") }, "fmulxq_f64" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.aarch64.neon.fmulx.v2f64") }, "fma_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.fma.v2f32") }, "fma_f64" => Intrinsic { - inputs: vec![v(f(64), 1), v(f(64), 1)], - output: v(f(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS }, + output: &::F64x1, definition: Named("llvm.fma.v1f64") }, "fmaq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.fma.v4f32") }, "fmaq_f64" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.fma.v2f64") }, "qdmulh_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.sqdmulh.v4i16") }, "qdmulh_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.sqdmulh.v2i32") }, "qdmulhq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.sqdmulh.v8i16") }, "qdmulhq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.sqdmulh.v4i32") }, "qrdmulh_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.sqrdmulh.v4i16") }, "qrdmulh_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.sqrdmulh.v2i32") }, "qrdmulhq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.sqrdmulh.v8i16") }, "qrdmulhq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.sqrdmulh.v4i32") }, "mull_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.smull.v8i16") }, "mull_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.umull.v8i16") }, "mull_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.smull.v4i32") }, "mull_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.umull.v4i32") }, "mull_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.aarch64.neon.smull.v2i64") }, "mull_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.aarch64.neon.umull.v2i64") }, "qdmullq_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.sqdmull.v8i16") }, "qdmullq_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.sqdmull.v4i32") }, "hsub_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.shsub.v8i8") }, "hsub_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.uhsub.v8i8") }, "hsub_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.shsub.v4i16") }, "hsub_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.uhsub.v4i16") }, "hsub_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.shsub.v2i32") }, "hsub_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.uhsub.v2i32") }, "hsubq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.shsub.v16i8") }, "hsubq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.uhsub.v16i8") }, "hsubq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.shsub.v8i16") }, "hsubq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.uhsub.v8i16") }, "hsubq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.shsub.v4i32") }, "hsubq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.uhsub.v4i32") }, "qsub_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.sqsub.v8i8") }, "qsub_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.uqsub.v8i8") }, "qsub_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.sqsub.v4i16") }, "qsub_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.uqsub.v4i16") }, "qsub_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.sqsub.v2i32") }, "qsub_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.uqsub.v2i32") }, "qsub_s64" => Intrinsic { - inputs: vec![v(i(64), 1), v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.aarch64.neon.sqsub.v1i64") }, "qsub_u64" => Intrinsic { - inputs: vec![v(u(64), 1), v(u(64), 1)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.aarch64.neon.uqsub.v1i64") }, "qsubq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.sqsub.v16i8") }, "qsubq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.uqsub.v16i8") }, "qsubq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.sqsub.v8i16") }, "qsubq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.uqsub.v8i16") }, "qsubq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.sqsub.v4i32") }, "qsubq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.uqsub.v4i32") }, "qsubq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.aarch64.neon.sqsub.v2i64") }, "qsubq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.aarch64.neon.uqsub.v2i64") }, "rsubhn_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.rsubhn.v8i8") }, "rsubhn_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.rsubhn.v8i8") }, "rsubhn_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.rsubhn.v4i16") }, "rsubhn_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.rsubhn.v4i16") }, "rsubhn_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.rsubhn.v2i32") }, "rsubhn_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.rsubhn.v2i32") }, "abd_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.sabd.v8i8") }, "abd_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.uabd.v8i8") }, "abd_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.sabd.v4i16") }, "abd_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.uabd.v4i16") }, "abd_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.sabd.v2i32") }, "abd_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.uabd.v2i32") }, "abd_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.aarch64.neon.fabd.v2f32") }, "abd_f64" => Intrinsic { - inputs: vec![v(f(64), 1), v(f(64), 1)], - output: v(f(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS }, + output: &::F64x1, definition: Named("llvm.aarch64.neon.fabd.v1f64") }, "abdq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.sabd.v16i8") }, "abdq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.uabd.v16i8") }, "abdq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.sabd.v8i16") }, "abdq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.uabd.v8i16") }, "abdq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.sabd.v4i32") }, "abdq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.uabd.v4i32") }, "abdq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.aarch64.neon.fabd.v4f32") }, "abdq_f64" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.aarch64.neon.fabd.v2f64") }, "max_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.smax.v8i8") }, "max_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.umax.v8i8") }, "max_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.smax.v4i16") }, "max_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.umax.v4i16") }, "max_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.smax.v2i32") }, "max_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.umax.v2i32") }, "max_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.aarch64.neon.fmax.v2f32") }, "max_f64" => Intrinsic { - inputs: vec![v(f(64), 1), v(f(64), 1)], - output: v(f(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS }, + output: &::F64x1, definition: Named("llvm.aarch64.neon.fmax.v1f64") }, "maxq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.smax.v16i8") }, "maxq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.umax.v16i8") }, "maxq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.smax.v8i16") }, "maxq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.umax.v8i16") }, "maxq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.smax.v4i32") }, "maxq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.umax.v4i32") }, "maxq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.aarch64.neon.fmax.v4f32") }, "maxq_f64" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.aarch64.neon.fmax.v2f64") }, "min_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.smin.v8i8") }, "min_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.umin.v8i8") }, "min_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.smin.v4i16") }, "min_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.umin.v4i16") }, "min_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.smin.v2i32") }, "min_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.umin.v2i32") }, "min_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.aarch64.neon.fmin.v2f32") }, "min_f64" => Intrinsic { - inputs: vec![v(f(64), 1), v(f(64), 1)], - output: v(f(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS }, + output: &::F64x1, definition: Named("llvm.aarch64.neon.fmin.v1f64") }, "minq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.smin.v16i8") }, "minq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.umin.v16i8") }, "minq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.smin.v8i16") }, "minq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.umin.v8i16") }, "minq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.smin.v4i32") }, "minq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.umin.v4i32") }, "minq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.aarch64.neon.fmin.v4f32") }, "minq_f64" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.aarch64.neon.fmin.v2f64") }, "maxnm_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.aarch64.neon.fmaxnm.v2f32") }, "maxnm_f64" => Intrinsic { - inputs: vec![v(f(64), 1), v(f(64), 1)], - output: v(f(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS }, + output: &::F64x1, definition: Named("llvm.aarch64.neon.fmaxnm.v1f64") }, "maxnmq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.aarch64.neon.fmaxnm.v4f32") }, "maxnmq_f64" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.aarch64.neon.fmaxnm.v2f64") }, "minnm_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.aarch64.neon.fminnm.v2f32") }, "minnm_f64" => Intrinsic { - inputs: vec![v(f(64), 1), v(f(64), 1)], - output: v(f(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS }, + output: &::F64x1, definition: Named("llvm.aarch64.neon.fminnm.v1f64") }, "minnmq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.aarch64.neon.fminnm.v4f32") }, "minnmq_f64" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.aarch64.neon.fminnm.v2f64") }, "shl_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.sshl.v8i8") }, "shl_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(i(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.ushl.v8i8") }, "shl_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.sshl.v4i16") }, "shl_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(i(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.ushl.v4i16") }, "shl_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.sshl.v2i32") }, "shl_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(i(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.ushl.v2i32") }, "shl_s64" => Intrinsic { - inputs: vec![v(i(64), 1), v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.aarch64.neon.sshl.v1i64") }, "shl_u64" => Intrinsic { - inputs: vec![v(u(64), 1), v(i(64), 1)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.aarch64.neon.ushl.v1i64") }, "shlq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.sshl.v16i8") }, "shlq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(i(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.ushl.v16i8") }, "shlq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.sshl.v8i16") }, "shlq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(i(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.ushl.v8i16") }, "shlq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.sshl.v4i32") }, "shlq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(i(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.ushl.v4i32") }, "shlq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.aarch64.neon.sshl.v2i64") }, "shlq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(i(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.aarch64.neon.ushl.v2i64") }, "qshl_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.sqshl.v8i8") }, "qshl_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(i(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.uqshl.v8i8") }, "qshl_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.sqshl.v4i16") }, "qshl_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(i(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.uqshl.v4i16") }, "qshl_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.sqshl.v2i32") }, "qshl_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(i(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.uqshl.v2i32") }, "qshl_s64" => Intrinsic { - inputs: vec![v(i(64), 1), v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.aarch64.neon.sqshl.v1i64") }, "qshl_u64" => Intrinsic { - inputs: vec![v(u(64), 1), v(i(64), 1)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.aarch64.neon.uqshl.v1i64") }, "qshlq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.sqshl.v16i8") }, "qshlq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(i(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.uqshl.v16i8") }, "qshlq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.sqshl.v8i16") }, "qshlq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(i(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.uqshl.v8i16") }, "qshlq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.sqshl.v4i32") }, "qshlq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(i(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.uqshl.v4i32") }, "qshlq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.aarch64.neon.sqshl.v2i64") }, "qshlq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(i(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.aarch64.neon.uqshl.v2i64") }, "rshl_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.srshl.v8i8") }, "rshl_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(i(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.urshl.v8i8") }, "rshl_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.srshl.v4i16") }, "rshl_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(i(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.urshl.v4i16") }, "rshl_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.srshl.v2i32") }, "rshl_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(i(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.urshl.v2i32") }, "rshl_s64" => Intrinsic { - inputs: vec![v(i(64), 1), v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.aarch64.neon.srshl.v1i64") }, "rshl_u64" => Intrinsic { - inputs: vec![v(u(64), 1), v(i(64), 1)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.aarch64.neon.urshl.v1i64") }, "rshlq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.srshl.v16i8") }, "rshlq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(i(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.urshl.v16i8") }, "rshlq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.srshl.v8i16") }, "rshlq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(i(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.urshl.v8i16") }, "rshlq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.srshl.v4i32") }, "rshlq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(i(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.urshl.v4i32") }, "rshlq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.aarch64.neon.srshl.v2i64") }, "rshlq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(i(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.aarch64.neon.urshl.v2i64") }, "qrshl_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.sqrshl.v8i8") }, "qrshl_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(i(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.uqrshl.v8i8") }, "qrshl_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.sqrshl.v4i16") }, "qrshl_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(i(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.uqrshl.v4i16") }, "qrshl_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.sqrshl.v2i32") }, "qrshl_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(i(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.uqrshl.v2i32") }, "qrshl_s64" => Intrinsic { - inputs: vec![v(i(64), 1), v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.aarch64.neon.sqrshl.v1i64") }, "qrshl_u64" => Intrinsic { - inputs: vec![v(u(64), 1), v(i(64), 1)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.aarch64.neon.uqrshl.v1i64") }, "qrshlq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.sqrshl.v16i8") }, "qrshlq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(i(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.uqrshl.v16i8") }, "qrshlq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.sqrshl.v8i16") }, "qrshlq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(i(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.uqrshl.v8i16") }, "qrshlq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.sqrshl.v4i32") }, "qrshlq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(i(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.uqrshl.v4i32") }, "qrshlq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.aarch64.neon.sqrshl.v2i64") }, "qrshlq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(i(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.aarch64.neon.uqrshl.v2i64") }, "qshrun_n_s16" => Intrinsic { - inputs: vec![v(i(16), 8), u(32)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.sqshrun.v8i8") }, "qshrun_n_s32" => Intrinsic { - inputs: vec![v(i(32), 4), u(32)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.sqshrun.v4i16") }, "qshrun_n_s64" => Intrinsic { - inputs: vec![v(i(64), 2), u(32)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.sqshrun.v2i32") }, "qrshrun_n_s16" => Intrinsic { - inputs: vec![v(i(16), 8), u(32)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.sqrshrun.v8i8") }, "qrshrun_n_s32" => Intrinsic { - inputs: vec![v(i(32), 4), u(32)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.sqrshrun.v4i16") }, "qrshrun_n_s64" => Intrinsic { - inputs: vec![v(i(64), 2), u(32)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.sqrshrun.v2i32") }, "qshrn_n_s16" => Intrinsic { - inputs: vec![v(i(16), 8), u(32)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.sqshrn.v8i8") }, "qshrn_n_u16" => Intrinsic { - inputs: vec![v(u(16), 8), u(32)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.uqshrn.v8i8") }, "qshrn_n_s32" => Intrinsic { - inputs: vec![v(i(32), 4), u(32)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.sqshrn.v4i16") }, "qshrn_n_u32" => Intrinsic { - inputs: vec![v(u(32), 4), u(32)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.uqshrn.v4i16") }, "qshrn_n_s64" => Intrinsic { - inputs: vec![v(i(64), 2), u(32)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.sqshrn.v2i32") }, "qshrn_n_u64" => Intrinsic { - inputs: vec![v(u(64), 2), u(32)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.uqshrn.v2i32") }, "rshrn_n_s16" => Intrinsic { - inputs: vec![v(i(16), 8), u(32)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.rshrn.v8i8") }, "rshrn_n_u16" => Intrinsic { - inputs: vec![v(u(16), 8), u(32)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.rshrn.v8i8") }, "rshrn_n_s32" => Intrinsic { - inputs: vec![v(i(32), 4), u(32)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.rshrn.v4i16") }, "rshrn_n_u32" => Intrinsic { - inputs: vec![v(u(32), 4), u(32)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.rshrn.v4i16") }, "rshrn_n_s64" => Intrinsic { - inputs: vec![v(i(64), 2), u(32)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.rshrn.v2i32") }, "rshrn_n_u64" => Intrinsic { - inputs: vec![v(u(64), 2), u(32)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.rshrn.v2i32") }, "qrshrn_n_s16" => Intrinsic { - inputs: vec![v(i(16), 8), u(32)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.sqrshrn.v8i8") }, "qrshrn_n_u16" => Intrinsic { - inputs: vec![v(u(16), 8), u(32)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.uqrshrn.v8i8") }, "qrshrn_n_s32" => Intrinsic { - inputs: vec![v(i(32), 4), u(32)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.sqrshrn.v4i16") }, "qrshrn_n_u32" => Intrinsic { - inputs: vec![v(u(32), 4), u(32)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.uqrshrn.v4i16") }, "qrshrn_n_s64" => Intrinsic { - inputs: vec![v(i(64), 2), u(32)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.sqrshrn.v2i32") }, "qrshrn_n_u64" => Intrinsic { - inputs: vec![v(u(64), 2), u(32)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.uqrshrn.v2i32") }, "sri_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.vsri.v8i8") }, "sri_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.vsri.v8i8") }, "sri_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.vsri.v4i16") }, "sri_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.vsri.v4i16") }, "sri_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.vsri.v2i32") }, "sri_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.vsri.v2i32") }, "sri_s64" => Intrinsic { - inputs: vec![v(i(64), 1), v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.aarch64.neon.vsri.v1i64") }, "sri_u64" => Intrinsic { - inputs: vec![v(u(64), 1), v(u(64), 1)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.aarch64.neon.vsri.v1i64") }, "sriq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.vsri.v16i8") }, "sriq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.vsri.v16i8") }, "sriq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.vsri.v8i16") }, "sriq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.vsri.v8i16") }, "sriq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.vsri.v4i32") }, "sriq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.vsri.v4i32") }, "sriq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.aarch64.neon.vsri.v2i64") }, "sriq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.aarch64.neon.vsri.v2i64") }, "sli_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.vsli.v8i8") }, "sli_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.vsli.v8i8") }, "sli_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.vsli.v4i16") }, "sli_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.vsli.v4i16") }, "sli_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.vsli.v2i32") }, "sli_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.vsli.v2i32") }, "sli_s64" => Intrinsic { - inputs: vec![v(i(64), 1), v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.aarch64.neon.vsli.v1i64") }, "sli_u64" => Intrinsic { - inputs: vec![v(u(64), 1), v(u(64), 1)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.aarch64.neon.vsli.v1i64") }, "sliq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.vsli.v16i8") }, "sliq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.vsli.v16i8") }, "sliq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.vsli.v8i16") }, "sliq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.vsli.v8i16") }, "sliq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.vsli.v4i32") }, "sliq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.vsli.v4i32") }, "sliq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.aarch64.neon.vsli.v2i64") }, "sliq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.aarch64.neon.vsli.v2i64") }, "vqmovn_s16" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.sqxtn.v8i8") }, "vqmovn_u16" => Intrinsic { - inputs: vec![v(u(16), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.uqxtn.v8i8") }, "vqmovn_s32" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.sqxtn.v4i16") }, "vqmovn_u32" => Intrinsic { - inputs: vec![v(u(32), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.uqxtn.v4i16") }, "vqmovn_s64" => Intrinsic { - inputs: vec![v(i(64), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.sqxtn.v2i32") }, "vqmovn_u64" => Intrinsic { - inputs: vec![v(u(64), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::U64x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.uqxtn.v2i32") }, "abs_s8" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.abs.v8i8") }, "abs_s16" => Intrinsic { - inputs: vec![v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.abs.v4i16") }, "abs_s32" => Intrinsic { - inputs: vec![v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.abs.v2i32") }, "abs_s64" => Intrinsic { - inputs: vec![v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 1] = [&::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.aarch64.neon.abs.v1i64") }, "absq_s8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.abs.v16i8") }, "absq_s16" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.abs.v8i16") }, "absq_s32" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.abs.v4i32") }, "absq_s64" => Intrinsic { - inputs: vec![v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.aarch64.neon.abs.v2i64") }, "abs_f32" => Intrinsic { - inputs: vec![v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.fabs.v2f32") }, "abs_f64" => Intrinsic { - inputs: vec![v(f(64), 1)], - output: v(f(64), 1), + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x1]; &INPUTS }, + output: &::F64x1, definition: Named("llvm.fabs.v1f64") }, "absq_f32" => Intrinsic { - inputs: vec![v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.fabs.v4f32") }, "absq_f64" => Intrinsic { - inputs: vec![v(f(64), 2)], - output: v(f(64), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.fabs.v2f64") }, "qabs_s8" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.sqabs.v8i8") }, "qabs_s16" => Intrinsic { - inputs: vec![v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.sqabs.v4i16") }, "qabs_s32" => Intrinsic { - inputs: vec![v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.sqabs.v2i32") }, "qabs_s64" => Intrinsic { - inputs: vec![v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 1] = [&::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.aarch64.neon.sqabs.v1i64") }, "qabsq_s8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.sqabs.v16i8") }, "qabsq_s16" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.sqabs.v8i16") }, "qabsq_s32" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.sqabs.v4i32") }, "qabsq_s64" => Intrinsic { - inputs: vec![v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.aarch64.neon.sqabs.v2i64") }, "qneg_s8" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.sqneg.v8i8") }, "qneg_s16" => Intrinsic { - inputs: vec![v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.sqneg.v4i16") }, "qneg_s32" => Intrinsic { - inputs: vec![v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.sqneg.v2i32") }, "qneg_s64" => Intrinsic { - inputs: vec![v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 1] = [&::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.aarch64.neon.sqneg.v1i64") }, "qnegq_s8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.sqneg.v16i8") }, "qnegq_s16" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.sqneg.v8i16") }, "qnegq_s32" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.sqneg.v4i32") }, "qnegq_s64" => Intrinsic { - inputs: vec![v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.aarch64.neon.sqneg.v2i64") }, "clz_s8" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.ctlz.v8i8") }, "clz_u8" => Intrinsic { - inputs: vec![v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.ctlz.v8i8") }, "clz_s16" => Intrinsic { - inputs: vec![v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.ctlz.v4i16") }, "clz_u16" => Intrinsic { - inputs: vec![v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.ctlz.v4i16") }, "clz_s32" => Intrinsic { - inputs: vec![v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.ctlz.v2i32") }, "clz_u32" => Intrinsic { - inputs: vec![v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.ctlz.v2i32") }, "clzq_s8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.ctlz.v16i8") }, "clzq_u8" => Intrinsic { - inputs: vec![v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.ctlz.v16i8") }, "clzq_s16" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.ctlz.v8i16") }, "clzq_u16" => Intrinsic { - inputs: vec![v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.ctlz.v8i16") }, "clzq_s32" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.ctlz.v4i32") }, "clzq_u32" => Intrinsic { - inputs: vec![v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.ctlz.v4i32") }, "cls_s8" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.cls.v8i8") }, "cls_u8" => Intrinsic { - inputs: vec![v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.cls.v8i8") }, "cls_s16" => Intrinsic { - inputs: vec![v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.cls.v4i16") }, "cls_u16" => Intrinsic { - inputs: vec![v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.cls.v4i16") }, "cls_s32" => Intrinsic { - inputs: vec![v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.cls.v2i32") }, "cls_u32" => Intrinsic { - inputs: vec![v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.cls.v2i32") }, "clsq_s8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.cls.v16i8") }, "clsq_u8" => Intrinsic { - inputs: vec![v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.cls.v16i8") }, "clsq_s16" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.cls.v8i16") }, "clsq_u16" => Intrinsic { - inputs: vec![v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.cls.v8i16") }, "clsq_s32" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.cls.v4i32") }, "clsq_u32" => Intrinsic { - inputs: vec![v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.cls.v4i32") }, "cnt_s8" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.ctpop.v8i8") }, "cnt_u8" => Intrinsic { - inputs: vec![v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.ctpop.v8i8") }, "cntq_s8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.ctpop.v16i8") }, "cntq_u8" => Intrinsic { - inputs: vec![v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.ctpop.v16i8") }, "recpe_u32" => Intrinsic { - inputs: vec![v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.urecpe.v2i32") }, "recpe_f32" => Intrinsic { - inputs: vec![v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.aarch64.neon.frecpe.v2f32") }, "recpe_f64" => Intrinsic { - inputs: vec![v(f(64), 1)], - output: v(f(64), 1), + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x1]; &INPUTS }, + output: &::F64x1, definition: Named("llvm.aarch64.neon.frecpe.v1f64") }, "recpeq_u32" => Intrinsic { - inputs: vec![v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.urecpe.v4i32") }, "recpeq_f32" => Intrinsic { - inputs: vec![v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.aarch64.neon.frecpe.v4f32") }, "recpeq_f64" => Intrinsic { - inputs: vec![v(f(64), 2)], - output: v(f(64), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.aarch64.neon.frecpe.v2f64") }, "recps_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.aarch64.neon.frecps.v2f32") }, "recps_f64" => Intrinsic { - inputs: vec![v(f(64), 1), v(f(64), 1)], - output: v(f(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS }, + output: &::F64x1, definition: Named("llvm.aarch64.neon.frecps.v1f64") }, "recpsq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.aarch64.neon.frecps.v4f32") }, "recpsq_f64" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.aarch64.neon.frecps.v2f64") }, "sqrt_f32" => Intrinsic { - inputs: vec![v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.sqrt.v2f32") }, "sqrt_f64" => Intrinsic { - inputs: vec![v(f(64), 1)], - output: v(f(64), 1), + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x1]; &INPUTS }, + output: &::F64x1, definition: Named("llvm.sqrt.v1f64") }, "sqrtq_f32" => Intrinsic { - inputs: vec![v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.sqrt.v4f32") }, "sqrtq_f64" => Intrinsic { - inputs: vec![v(f(64), 2)], - output: v(f(64), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.sqrt.v2f64") }, "rsqrte_u32" => Intrinsic { - inputs: vec![v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.ursqrte.v2i32") }, "rsqrte_f32" => Intrinsic { - inputs: vec![v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.aarch64.neon.frsqrte.v2f32") }, "rsqrte_f64" => Intrinsic { - inputs: vec![v(f(64), 1)], - output: v(f(64), 1), + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x1]; &INPUTS }, + output: &::F64x1, definition: Named("llvm.aarch64.neon.frsqrte.v1f64") }, "rsqrteq_u32" => Intrinsic { - inputs: vec![v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.ursqrte.v4i32") }, "rsqrteq_f32" => Intrinsic { - inputs: vec![v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.aarch64.neon.frsqrte.v4f32") }, "rsqrteq_f64" => Intrinsic { - inputs: vec![v(f(64), 2)], - output: v(f(64), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.aarch64.neon.frsqrte.v2f64") }, "rsqrts_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.aarch64.neon.frsqrts.v2f32") }, "rsqrts_f64" => Intrinsic { - inputs: vec![v(f(64), 1), v(f(64), 1)], - output: v(f(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS }, + output: &::F64x1, definition: Named("llvm.aarch64.neon.frsqrts.v1f64") }, "rsqrtsq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.aarch64.neon.frsqrts.v4f32") }, "rsqrtsq_f64" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.aarch64.neon.frsqrts.v2f64") }, "rbit_s8" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.rbit.v8i8") }, "rbit_u8" => Intrinsic { - inputs: vec![v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.rbit.v8i8") }, "rbitq_s8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.rbit.v16i8") }, "rbitq_u8" => Intrinsic { - inputs: vec![v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.rbit.v16i8") }, "ld2_s8" => Intrinsic { - inputs: vec![p(true, i(8), Some(v(i(8), 8)))], - output: agg(false, vec![v(i(8), 8), v(i(8), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, Some(&::I8x8), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v8i8.p0v8i8") }, "ld2_u8" => Intrinsic { - inputs: vec![p(true, u(8), Some(v(u(8), 8)))], - output: agg(false, vec![v(u(8), 8), v(u(8), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, Some(&::U8x8), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v8i8.p0v8i8") }, "ld2_s16" => Intrinsic { - inputs: vec![p(true, i(16), Some(v(i(16), 4)))], - output: agg(false, vec![v(i(16), 4), v(i(16), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, Some(&::I16x4), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v4i16.p0v4i16") }, "ld2_u16" => Intrinsic { - inputs: vec![p(true, u(16), Some(v(u(16), 4)))], - output: agg(false, vec![v(u(16), 4), v(u(16), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, Some(&::U16x4), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v4i16.p0v4i16") }, "ld2_s32" => Intrinsic { - inputs: vec![p(true, i(32), Some(v(i(32), 2)))], - output: agg(false, vec![v(i(32), 2), v(i(32), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I32x2), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v2i32.p0v2i32") }, "ld2_u32" => Intrinsic { - inputs: vec![p(true, u(32), Some(v(u(32), 2)))], - output: agg(false, vec![v(u(32), 2), v(u(32), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, Some(&::U32x2), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v2i32.p0v2i32") }, "ld2_s64" => Intrinsic { - inputs: vec![p(true, i(64), Some(v(i(64), 1)))], - output: agg(false, vec![v(i(64), 1), v(i(64), 1)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I64x1), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v1i64.p0v1i64") }, "ld2_u64" => Intrinsic { - inputs: vec![p(true, u(64), Some(v(u(64), 1)))], - output: agg(false, vec![v(u(64), 1), v(u(64), 1)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, Some(&::U64x1), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v1i64.p0v1i64") }, "ld2_f32" => Intrinsic { - inputs: vec![p(true, f(32), Some(v(f(32), 2)))], - output: agg(false, vec![v(f(32), 2), v(f(32), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::F32x2), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v2f32.p0v2f32") }, "ld2_f64" => Intrinsic { - inputs: vec![p(true, f(64), Some(v(f(64), 1)))], - output: agg(false, vec![v(f(64), 1), v(f(64), 1)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::F64x1), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v1f64.p0v1f64") }, "ld2q_s8" => Intrinsic { - inputs: vec![p(true, i(8), Some(v(i(8), 16)))], - output: agg(false, vec![v(i(8), 16), v(i(8), 16)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, Some(&::I8x16), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v16i8.p0v16i8") }, "ld2q_u8" => Intrinsic { - inputs: vec![p(true, u(8), Some(v(u(8), 16)))], - output: agg(false, vec![v(u(8), 16), v(u(8), 16)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, Some(&::U8x16), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v16i8.p0v16i8") }, "ld2q_s16" => Intrinsic { - inputs: vec![p(true, i(16), Some(v(i(16), 8)))], - output: agg(false, vec![v(i(16), 8), v(i(16), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, Some(&::I16x8), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v8i16.p0v8i16") }, "ld2q_u16" => Intrinsic { - inputs: vec![p(true, u(16), Some(v(u(16), 8)))], - output: agg(false, vec![v(u(16), 8), v(u(16), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, Some(&::U16x8), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v8i16.p0v8i16") }, "ld2q_s32" => Intrinsic { - inputs: vec![p(true, i(32), Some(v(i(32), 4)))], - output: agg(false, vec![v(i(32), 4), v(i(32), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I32x4), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v4i32.p0v4i32") }, "ld2q_u32" => Intrinsic { - inputs: vec![p(true, u(32), Some(v(u(32), 4)))], - output: agg(false, vec![v(u(32), 4), v(u(32), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, Some(&::U32x4), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v4i32.p0v4i32") }, "ld2q_s64" => Intrinsic { - inputs: vec![p(true, i(64), Some(v(i(64), 2)))], - output: agg(false, vec![v(i(64), 2), v(i(64), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I64x2), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v2i64.p0v2i64") }, "ld2q_u64" => Intrinsic { - inputs: vec![p(true, u(64), Some(v(u(64), 2)))], - output: agg(false, vec![v(u(64), 2), v(u(64), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, Some(&::U64x2), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v2i64.p0v2i64") }, "ld2q_f32" => Intrinsic { - inputs: vec![p(true, f(32), Some(v(f(32), 4)))], - output: agg(false, vec![v(f(32), 4), v(f(32), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::F32x4), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v4f32.p0v4f32") }, "ld2q_f64" => Intrinsic { - inputs: vec![p(true, f(64), Some(v(f(64), 2)))], - output: agg(false, vec![v(f(64), 2), v(f(64), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::F64x2), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v2f64.p0v2f64") }, "ld3_s8" => Intrinsic { - inputs: vec![p(true, i(8), Some(v(i(8), 8)))], - output: agg(false, vec![v(i(8), 8), v(i(8), 8), v(i(8), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, Some(&::I8x8), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v8i8.p0v8i8") }, "ld3_u8" => Intrinsic { - inputs: vec![p(true, u(8), Some(v(u(8), 8)))], - output: agg(false, vec![v(u(8), 8), v(u(8), 8), v(u(8), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, Some(&::U8x8), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v8i8.p0v8i8") }, "ld3_s16" => Intrinsic { - inputs: vec![p(true, i(16), Some(v(i(16), 4)))], - output: agg(false, vec![v(i(16), 4), v(i(16), 4), v(i(16), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, Some(&::I16x4), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I16x4, &::I16x4, &::I16x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v4i16.p0v4i16") }, "ld3_u16" => Intrinsic { - inputs: vec![p(true, u(16), Some(v(u(16), 4)))], - output: agg(false, vec![v(u(16), 4), v(u(16), 4), v(u(16), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, Some(&::U16x4), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U16x4, &::U16x4, &::U16x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v4i16.p0v4i16") }, "ld3_s32" => Intrinsic { - inputs: vec![p(true, i(32), Some(v(i(32), 2)))], - output: agg(false, vec![v(i(32), 2), v(i(32), 2), v(i(32), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I32x2), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I32x2, &::I32x2, &::I32x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v2i32.p0v2i32") }, "ld3_u32" => Intrinsic { - inputs: vec![p(true, u(32), Some(v(u(32), 2)))], - output: agg(false, vec![v(u(32), 2), v(u(32), 2), v(u(32), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, Some(&::U32x2), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U32x2, &::U32x2, &::U32x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v2i32.p0v2i32") }, "ld3_s64" => Intrinsic { - inputs: vec![p(true, i(64), Some(v(i(64), 1)))], - output: agg(false, vec![v(i(64), 1), v(i(64), 1), v(i(64), 1)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I64x1), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I64x1, &::I64x1, &::I64x1]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v1i64.p0v1i64") }, "ld3_u64" => Intrinsic { - inputs: vec![p(true, u(64), Some(v(u(64), 1)))], - output: agg(false, vec![v(u(64), 1), v(u(64), 1), v(u(64), 1)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, Some(&::U64x1), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U64x1, &::U64x1, &::U64x1]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v1i64.p0v1i64") }, "ld3_f32" => Intrinsic { - inputs: vec![p(true, f(32), Some(v(f(32), 2)))], - output: agg(false, vec![v(f(32), 2), v(f(32), 2), v(f(32), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::F32x2), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F32x2, &::F32x2, &::F32x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v2f32.p0v2f32") }, "ld3_f64" => Intrinsic { - inputs: vec![p(true, f(64), Some(v(f(64), 1)))], - output: agg(false, vec![v(f(64), 1), v(f(64), 1), v(f(64), 1)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::F64x1), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F64x1, &::F64x1, &::F64x1]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v1f64.p0v1f64") }, "ld3q_s8" => Intrinsic { - inputs: vec![p(true, i(8), Some(v(i(8), 16)))], - output: agg(false, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, Some(&::I8x16), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v16i8.p0v16i8") }, "ld3q_u8" => Intrinsic { - inputs: vec![p(true, u(8), Some(v(u(8), 16)))], - output: agg(false, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, Some(&::U8x16), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v16i8.p0v16i8") }, "ld3q_s16" => Intrinsic { - inputs: vec![p(true, i(16), Some(v(i(16), 8)))], - output: agg(false, vec![v(i(16), 8), v(i(16), 8), v(i(16), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, Some(&::I16x8), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I16x8, &::I16x8, &::I16x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v8i16.p0v8i16") }, "ld3q_u16" => Intrinsic { - inputs: vec![p(true, u(16), Some(v(u(16), 8)))], - output: agg(false, vec![v(u(16), 8), v(u(16), 8), v(u(16), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, Some(&::U16x8), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U16x8, &::U16x8, &::U16x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v8i16.p0v8i16") }, "ld3q_s32" => Intrinsic { - inputs: vec![p(true, i(32), Some(v(i(32), 4)))], - output: agg(false, vec![v(i(32), 4), v(i(32), 4), v(i(32), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I32x4), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I32x4, &::I32x4, &::I32x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v4i32.p0v4i32") }, "ld3q_u32" => Intrinsic { - inputs: vec![p(true, u(32), Some(v(u(32), 4)))], - output: agg(false, vec![v(u(32), 4), v(u(32), 4), v(u(32), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, Some(&::U32x4), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U32x4, &::U32x4, &::U32x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v4i32.p0v4i32") }, "ld3q_s64" => Intrinsic { - inputs: vec![p(true, i(64), Some(v(i(64), 2)))], - output: agg(false, vec![v(i(64), 2), v(i(64), 2), v(i(64), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I64x2), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I64x2, &::I64x2, &::I64x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v2i64.p0v2i64") }, "ld3q_u64" => Intrinsic { - inputs: vec![p(true, u(64), Some(v(u(64), 2)))], - output: agg(false, vec![v(u(64), 2), v(u(64), 2), v(u(64), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, Some(&::U64x2), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U64x2, &::U64x2, &::U64x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v2i64.p0v2i64") }, "ld3q_f32" => Intrinsic { - inputs: vec![p(true, f(32), Some(v(f(32), 4)))], - output: agg(false, vec![v(f(32), 4), v(f(32), 4), v(f(32), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::F32x4), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v4f32.p0v4f32") }, "ld3q_f64" => Intrinsic { - inputs: vec![p(true, f(64), Some(v(f(64), 2)))], - output: agg(false, vec![v(f(64), 2), v(f(64), 2), v(f(64), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::F64x2), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v2f64.p0v2f64") }, "ld4_s8" => Intrinsic { - inputs: vec![p(true, i(8), Some(v(i(8), 8)))], - output: agg(false, vec![v(i(8), 8), v(i(8), 8), v(i(8), 8), v(i(8), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, Some(&::I8x8), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I8x8, &::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v8i8.p0v8i8") }, "ld4_u8" => Intrinsic { - inputs: vec![p(true, u(8), Some(v(u(8), 8)))], - output: agg(false, vec![v(u(8), 8), v(u(8), 8), v(u(8), 8), v(u(8), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, Some(&::U8x8), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U8x8, &::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v8i8.p0v8i8") }, "ld4_s16" => Intrinsic { - inputs: vec![p(true, i(16), Some(v(i(16), 4)))], - output: agg(false, vec![v(i(16), 4), v(i(16), 4), v(i(16), 4), v(i(16), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, Some(&::I16x4), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I16x4, &::I16x4, &::I16x4, &::I16x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v4i16.p0v4i16") }, "ld4_u16" => Intrinsic { - inputs: vec![p(true, u(16), Some(v(u(16), 4)))], - output: agg(false, vec![v(u(16), 4), v(u(16), 4), v(u(16), 4), v(u(16), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, Some(&::U16x4), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U16x4, &::U16x4, &::U16x4, &::U16x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v4i16.p0v4i16") }, "ld4_s32" => Intrinsic { - inputs: vec![p(true, i(32), Some(v(i(32), 2)))], - output: agg(false, vec![v(i(32), 2), v(i(32), 2), v(i(32), 2), v(i(32), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I32x2), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I32x2, &::I32x2, &::I32x2, &::I32x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v2i32.p0v2i32") }, "ld4_u32" => Intrinsic { - inputs: vec![p(true, u(32), Some(v(u(32), 2)))], - output: agg(false, vec![v(u(32), 2), v(u(32), 2), v(u(32), 2), v(u(32), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, Some(&::U32x2), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U32x2, &::U32x2, &::U32x2, &::U32x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v2i32.p0v2i32") }, "ld4_s64" => Intrinsic { - inputs: vec![p(true, i(64), Some(v(i(64), 1)))], - output: agg(false, vec![v(i(64), 1), v(i(64), 1), v(i(64), 1), v(i(64), 1)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I64x1), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I64x1, &::I64x1, &::I64x1, &::I64x1]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v1i64.p0v1i64") }, "ld4_u64" => Intrinsic { - inputs: vec![p(true, u(64), Some(v(u(64), 1)))], - output: agg(false, vec![v(u(64), 1), v(u(64), 1), v(u(64), 1), v(u(64), 1)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, Some(&::U64x1), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U64x1, &::U64x1, &::U64x1, &::U64x1]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v1i64.p0v1i64") }, "ld4_f32" => Intrinsic { - inputs: vec![p(true, f(32), Some(v(f(32), 2)))], - output: agg(false, vec![v(f(32), 2), v(f(32), 2), v(f(32), 2), v(f(32), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::F32x2), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F32x2, &::F32x2, &::F32x2, &::F32x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v2f32.p0v2f32") }, "ld4_f64" => Intrinsic { - inputs: vec![p(true, f(64), Some(v(f(64), 1)))], - output: agg(false, vec![v(f(64), 1), v(f(64), 1), v(f(64), 1), v(f(64), 1)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::F64x1), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F64x1, &::F64x1, &::F64x1, &::F64x1]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v1f64.p0v1f64") }, "ld4q_s8" => Intrinsic { - inputs: vec![p(true, i(8), Some(v(i(8), 16)))], - output: agg(false, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16), v(i(8), 16)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, Some(&::I8x16), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I8x16, &::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v16i8.p0v16i8") }, "ld4q_u8" => Intrinsic { - inputs: vec![p(true, u(8), Some(v(u(8), 16)))], - output: agg(false, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16), v(u(8), 16)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, Some(&::U8x16), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U8x16, &::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v16i8.p0v16i8") }, "ld4q_s16" => Intrinsic { - inputs: vec![p(true, i(16), Some(v(i(16), 8)))], - output: agg(false, vec![v(i(16), 8), v(i(16), 8), v(i(16), 8), v(i(16), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, Some(&::I16x8), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I16x8, &::I16x8, &::I16x8, &::I16x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v8i16.p0v8i16") }, "ld4q_u16" => Intrinsic { - inputs: vec![p(true, u(16), Some(v(u(16), 8)))], - output: agg(false, vec![v(u(16), 8), v(u(16), 8), v(u(16), 8), v(u(16), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, Some(&::U16x8), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U16x8, &::U16x8, &::U16x8, &::U16x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v8i16.p0v8i16") }, "ld4q_s32" => Intrinsic { - inputs: vec![p(true, i(32), Some(v(i(32), 4)))], - output: agg(false, vec![v(i(32), 4), v(i(32), 4), v(i(32), 4), v(i(32), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I32x4), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I32x4, &::I32x4, &::I32x4, &::I32x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v4i32.p0v4i32") }, "ld4q_u32" => Intrinsic { - inputs: vec![p(true, u(32), Some(v(u(32), 4)))], - output: agg(false, vec![v(u(32), 4), v(u(32), 4), v(u(32), 4), v(u(32), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, Some(&::U32x4), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U32x4, &::U32x4, &::U32x4, &::U32x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v4i32.p0v4i32") }, "ld4q_s64" => Intrinsic { - inputs: vec![p(true, i(64), Some(v(i(64), 2)))], - output: agg(false, vec![v(i(64), 2), v(i(64), 2), v(i(64), 2), v(i(64), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I64x2), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I64x2, &::I64x2, &::I64x2, &::I64x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v2i64.p0v2i64") }, "ld4q_u64" => Intrinsic { - inputs: vec![p(true, u(64), Some(v(u(64), 2)))], - output: agg(false, vec![v(u(64), 2), v(u(64), 2), v(u(64), 2), v(u(64), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, Some(&::U64x2), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U64x2, &::U64x2, &::U64x2, &::U64x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v2i64.p0v2i64") }, "ld4q_f32" => Intrinsic { - inputs: vec![p(true, f(32), Some(v(f(32), 4)))], - output: agg(false, vec![v(f(32), 4), v(f(32), 4), v(f(32), 4), v(f(32), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::F32x4), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F32x4, &::F32x4, &::F32x4, &::F32x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v4f32.p0v4f32") }, "ld4q_f64" => Intrinsic { - inputs: vec![p(true, f(64), Some(v(f(64), 2)))], - output: agg(false, vec![v(f(64), 2), v(f(64), 2), v(f(64), 2), v(f(64), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::F64x2), true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F64x2, &::F64x2, &::F64x2, &::F64x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v2f64.p0v2f64") }, "ld2_dup_s8" => Intrinsic { - inputs: vec![p(true, i(8), None)], - output: agg(false, vec![v(i(8), 8), v(i(8), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v8i8.p0i8") }, "ld2_dup_u8" => Intrinsic { - inputs: vec![p(true, u(8), None)], - output: agg(false, vec![v(u(8), 8), v(u(8), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v8i8.p0i8") }, "ld2_dup_s16" => Intrinsic { - inputs: vec![p(true, i(16), None)], - output: agg(false, vec![v(i(16), 4), v(i(16), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v4i16.p0i16") }, "ld2_dup_u16" => Intrinsic { - inputs: vec![p(true, u(16), None)], - output: agg(false, vec![v(u(16), 4), v(u(16), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v4i16.p0i16") }, "ld2_dup_s32" => Intrinsic { - inputs: vec![p(true, i(32), None)], - output: agg(false, vec![v(i(32), 2), v(i(32), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v2i32.p0i32") }, "ld2_dup_u32" => Intrinsic { - inputs: vec![p(true, u(32), None)], - output: agg(false, vec![v(u(32), 2), v(u(32), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v2i32.p0i32") }, "ld2_dup_s64" => Intrinsic { - inputs: vec![p(true, i(64), None)], - output: agg(false, vec![v(i(64), 1), v(i(64), 1)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v1i64.p0i64") }, "ld2_dup_u64" => Intrinsic { - inputs: vec![p(true, u(64), None)], - output: agg(false, vec![v(u(64), 1), v(u(64), 1)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v1i64.p0i64") }, "ld2_dup_f32" => Intrinsic { - inputs: vec![p(true, f(32), None)], - output: agg(false, vec![v(f(32), 2), v(f(32), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v2f32.p0f32") }, "ld2_dup_f64" => Intrinsic { - inputs: vec![p(true, f(64), None)], - output: agg(false, vec![v(f(64), 1), v(f(64), 1)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v1f64.p0f64") }, "ld2q_dup_s8" => Intrinsic { - inputs: vec![p(true, i(8), None)], - output: agg(false, vec![v(i(8), 16), v(i(8), 16)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v16i8.p0i8") }, "ld2q_dup_u8" => Intrinsic { - inputs: vec![p(true, u(8), None)], - output: agg(false, vec![v(u(8), 16), v(u(8), 16)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v16i8.p0i8") }, "ld2q_dup_s16" => Intrinsic { - inputs: vec![p(true, i(16), None)], - output: agg(false, vec![v(i(16), 8), v(i(16), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v8i16.p0i16") }, "ld2q_dup_u16" => Intrinsic { - inputs: vec![p(true, u(16), None)], - output: agg(false, vec![v(u(16), 8), v(u(16), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v8i16.p0i16") }, "ld2q_dup_s32" => Intrinsic { - inputs: vec![p(true, i(32), None)], - output: agg(false, vec![v(i(32), 4), v(i(32), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v4i32.p0i32") }, "ld2q_dup_u32" => Intrinsic { - inputs: vec![p(true, u(32), None)], - output: agg(false, vec![v(u(32), 4), v(u(32), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v4i32.p0i32") }, "ld2q_dup_s64" => Intrinsic { - inputs: vec![p(true, i(64), None)], - output: agg(false, vec![v(i(64), 2), v(i(64), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v2i64.p0i64") }, "ld2q_dup_u64" => Intrinsic { - inputs: vec![p(true, u(64), None)], - output: agg(false, vec![v(u(64), 2), v(u(64), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v2i64.p0i64") }, "ld2q_dup_f32" => Intrinsic { - inputs: vec![p(true, f(32), None)], - output: agg(false, vec![v(f(32), 4), v(f(32), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v4f32.p0f32") }, "ld2q_dup_f64" => Intrinsic { - inputs: vec![p(true, f(64), None)], - output: agg(false, vec![v(f(64), 2), v(f(64), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld2.v2f64.p0f64") }, "ld3_dup_s8" => Intrinsic { - inputs: vec![p(true, i(8), None)], - output: agg(false, vec![v(i(8), 8), v(i(8), 8), v(i(8), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v8i8.p0i8") }, "ld3_dup_u8" => Intrinsic { - inputs: vec![p(true, u(8), None)], - output: agg(false, vec![v(u(8), 8), v(u(8), 8), v(u(8), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v8i8.p0i8") }, "ld3_dup_s16" => Intrinsic { - inputs: vec![p(true, i(16), None)], - output: agg(false, vec![v(i(16), 4), v(i(16), 4), v(i(16), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I16x4, &::I16x4, &::I16x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v4i16.p0i16") }, "ld3_dup_u16" => Intrinsic { - inputs: vec![p(true, u(16), None)], - output: agg(false, vec![v(u(16), 4), v(u(16), 4), v(u(16), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U16x4, &::U16x4, &::U16x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v4i16.p0i16") }, "ld3_dup_s32" => Intrinsic { - inputs: vec![p(true, i(32), None)], - output: agg(false, vec![v(i(32), 2), v(i(32), 2), v(i(32), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I32x2, &::I32x2, &::I32x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v2i32.p0i32") }, "ld3_dup_u32" => Intrinsic { - inputs: vec![p(true, u(32), None)], - output: agg(false, vec![v(u(32), 2), v(u(32), 2), v(u(32), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U32x2, &::U32x2, &::U32x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v2i32.p0i32") }, "ld3_dup_s64" => Intrinsic { - inputs: vec![p(true, i(64), None)], - output: agg(false, vec![v(i(64), 1), v(i(64), 1), v(i(64), 1)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I64x1, &::I64x1, &::I64x1]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v1i64.p0i64") }, "ld3_dup_u64" => Intrinsic { - inputs: vec![p(true, u(64), None)], - output: agg(false, vec![v(u(64), 1), v(u(64), 1), v(u(64), 1)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U64x1, &::U64x1, &::U64x1]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v1i64.p0i64") }, "ld3_dup_f32" => Intrinsic { - inputs: vec![p(true, f(32), None)], - output: agg(false, vec![v(f(32), 2), v(f(32), 2), v(f(32), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F32x2, &::F32x2, &::F32x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v2f32.p0f32") }, "ld3_dup_f64" => Intrinsic { - inputs: vec![p(true, f(64), None)], - output: agg(false, vec![v(f(64), 1), v(f(64), 1), v(f(64), 1)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F64x1, &::F64x1, &::F64x1]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v1f64.p0f64") }, "ld3q_dup_s8" => Intrinsic { - inputs: vec![p(true, i(8), None)], - output: agg(false, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v16i8.p0i8") }, "ld3q_dup_u8" => Intrinsic { - inputs: vec![p(true, u(8), None)], - output: agg(false, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v16i8.p0i8") }, "ld3q_dup_s16" => Intrinsic { - inputs: vec![p(true, i(16), None)], - output: agg(false, vec![v(i(16), 8), v(i(16), 8), v(i(16), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I16x8, &::I16x8, &::I16x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v8i16.p0i16") }, "ld3q_dup_u16" => Intrinsic { - inputs: vec![p(true, u(16), None)], - output: agg(false, vec![v(u(16), 8), v(u(16), 8), v(u(16), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U16x8, &::U16x8, &::U16x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v8i16.p0i16") }, "ld3q_dup_s32" => Intrinsic { - inputs: vec![p(true, i(32), None)], - output: agg(false, vec![v(i(32), 4), v(i(32), 4), v(i(32), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I32x4, &::I32x4, &::I32x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v4i32.p0i32") }, "ld3q_dup_u32" => Intrinsic { - inputs: vec![p(true, u(32), None)], - output: agg(false, vec![v(u(32), 4), v(u(32), 4), v(u(32), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U32x4, &::U32x4, &::U32x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v4i32.p0i32") }, "ld3q_dup_s64" => Intrinsic { - inputs: vec![p(true, i(64), None)], - output: agg(false, vec![v(i(64), 2), v(i(64), 2), v(i(64), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I64x2, &::I64x2, &::I64x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v2i64.p0i64") }, "ld3q_dup_u64" => Intrinsic { - inputs: vec![p(true, u(64), None)], - output: agg(false, vec![v(u(64), 2), v(u(64), 2), v(u(64), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U64x2, &::U64x2, &::U64x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v2i64.p0i64") }, "ld3q_dup_f32" => Intrinsic { - inputs: vec![p(true, f(32), None)], - output: agg(false, vec![v(f(32), 4), v(f(32), 4), v(f(32), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v4f32.p0f32") }, "ld3q_dup_f64" => Intrinsic { - inputs: vec![p(true, f(64), None)], - output: agg(false, vec![v(f(64), 2), v(f(64), 2), v(f(64), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld3.v2f64.p0f64") }, "ld4_dup_s8" => Intrinsic { - inputs: vec![p(true, i(8), None)], - output: agg(false, vec![v(i(8), 8), v(i(8), 8), v(i(8), 8), v(i(8), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I8x8, &::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v8i8.p0i8") }, "ld4_dup_u8" => Intrinsic { - inputs: vec![p(true, u(8), None)], - output: agg(false, vec![v(u(8), 8), v(u(8), 8), v(u(8), 8), v(u(8), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U8x8, &::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v8i8.p0i8") }, "ld4_dup_s16" => Intrinsic { - inputs: vec![p(true, i(16), None)], - output: agg(false, vec![v(i(16), 4), v(i(16), 4), v(i(16), 4), v(i(16), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I16x4, &::I16x4, &::I16x4, &::I16x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v4i16.p0i16") }, "ld4_dup_u16" => Intrinsic { - inputs: vec![p(true, u(16), None)], - output: agg(false, vec![v(u(16), 4), v(u(16), 4), v(u(16), 4), v(u(16), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U16x4, &::U16x4, &::U16x4, &::U16x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v4i16.p0i16") }, "ld4_dup_s32" => Intrinsic { - inputs: vec![p(true, i(32), None)], - output: agg(false, vec![v(i(32), 2), v(i(32), 2), v(i(32), 2), v(i(32), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I32x2, &::I32x2, &::I32x2, &::I32x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v2i32.p0i32") }, "ld4_dup_u32" => Intrinsic { - inputs: vec![p(true, u(32), None)], - output: agg(false, vec![v(u(32), 2), v(u(32), 2), v(u(32), 2), v(u(32), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U32x2, &::U32x2, &::U32x2, &::U32x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v2i32.p0i32") }, "ld4_dup_s64" => Intrinsic { - inputs: vec![p(true, i(64), None)], - output: agg(false, vec![v(i(64), 1), v(i(64), 1), v(i(64), 1), v(i(64), 1)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I64x1, &::I64x1, &::I64x1, &::I64x1]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v1i64.p0i64") }, "ld4_dup_u64" => Intrinsic { - inputs: vec![p(true, u(64), None)], - output: agg(false, vec![v(u(64), 1), v(u(64), 1), v(u(64), 1), v(u(64), 1)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U64x1, &::U64x1, &::U64x1, &::U64x1]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v1i64.p0i64") }, "ld4_dup_f32" => Intrinsic { - inputs: vec![p(true, f(32), None)], - output: agg(false, vec![v(f(32), 2), v(f(32), 2), v(f(32), 2), v(f(32), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F32x2, &::F32x2, &::F32x2, &::F32x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v2f32.p0f32") }, "ld4_dup_f64" => Intrinsic { - inputs: vec![p(true, f(64), None)], - output: agg(false, vec![v(f(64), 1), v(f(64), 1), v(f(64), 1), v(f(64), 1)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F64x1, &::F64x1, &::F64x1, &::F64x1]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v1f64.p0f64") }, "ld4q_dup_s8" => Intrinsic { - inputs: vec![p(true, i(8), None)], - output: agg(false, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16), v(i(8), 16)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I8x16, &::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v16i8.p0i8") }, "ld4q_dup_u8" => Intrinsic { - inputs: vec![p(true, u(8), None)], - output: agg(false, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16), v(u(8), 16)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U8x16, &::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v16i8.p0i8") }, "ld4q_dup_s16" => Intrinsic { - inputs: vec![p(true, i(16), None)], - output: agg(false, vec![v(i(16), 8), v(i(16), 8), v(i(16), 8), v(i(16), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I16x8, &::I16x8, &::I16x8, &::I16x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v8i16.p0i16") }, "ld4q_dup_u16" => Intrinsic { - inputs: vec![p(true, u(16), None)], - output: agg(false, vec![v(u(16), 8), v(u(16), 8), v(u(16), 8), v(u(16), 8)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U16x8, &::U16x8, &::U16x8, &::U16x8]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v8i16.p0i16") }, "ld4q_dup_s32" => Intrinsic { - inputs: vec![p(true, i(32), None)], - output: agg(false, vec![v(i(32), 4), v(i(32), 4), v(i(32), 4), v(i(32), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I32x4, &::I32x4, &::I32x4, &::I32x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v4i32.p0i32") }, "ld4q_dup_u32" => Intrinsic { - inputs: vec![p(true, u(32), None)], - output: agg(false, vec![v(u(32), 4), v(u(32), 4), v(u(32), 4), v(u(32), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U32x4, &::U32x4, &::U32x4, &::U32x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v4i32.p0i32") }, "ld4q_dup_s64" => Intrinsic { - inputs: vec![p(true, i(64), None)], - output: agg(false, vec![v(i(64), 2), v(i(64), 2), v(i(64), 2), v(i(64), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I64x2, &::I64x2, &::I64x2, &::I64x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v2i64.p0i64") }, "ld4q_dup_u64" => Intrinsic { - inputs: vec![p(true, u(64), None)], - output: agg(false, vec![v(u(64), 2), v(u(64), 2), v(u(64), 2), v(u(64), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U64x2, &::U64x2, &::U64x2, &::U64x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v2i64.p0i64") }, "ld4q_dup_f32" => Intrinsic { - inputs: vec![p(true, f(32), None)], - output: agg(false, vec![v(f(32), 4), v(f(32), 4), v(f(32), 4), v(f(32), 4)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F32x4, &::F32x4, &::F32x4, &::F32x4]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v4f32.p0f32") }, "ld4q_dup_f64" => Intrinsic { - inputs: vec![p(true, f(64), None)], - output: agg(false, vec![v(f(64), 2), v(f(64), 2), v(f(64), 2), v(f(64), 2)]), + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, None, true); &PTR }]; &INPUTS }, + output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F64x2, &::F64x2, &::F64x2, &::F64x2]; &PARTS }); &AGG }, definition: Named("llvm.aarch64.neon.ld4.v2f64.p0f64") }, "padd_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.addp.v8i8") }, "padd_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.addp.v8i8") }, "padd_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.addp.v4i16") }, "padd_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.addp.v4i16") }, "padd_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.addp.v2i32") }, "padd_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.addp.v2i32") }, "padd_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.aarch64.neon.addp.v2f32") }, "paddq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.addp.v16i8") }, "paddq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.addp.v16i8") }, "paddq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.addp.v8i16") }, "paddq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.addp.v8i16") }, "paddq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.addp.v4i32") }, "paddq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.addp.v4i32") }, "paddq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.aarch64.neon.addp.v4f32") }, "paddq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.aarch64.neon.addp.v2i64") }, "paddq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.aarch64.neon.addp.v2i64") }, "paddq_f64" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.aarch64.neon.addp.v2f64") }, "paddl_s16" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.saddlp.v4i16.v8i8") }, "paddl_u16" => Intrinsic { - inputs: vec![v(u(8), 8)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.uaddlp.v4i16.v8i8") }, "paddl_s32" => Intrinsic { - inputs: vec![v(i(16), 4)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.saddlp.v2i32.v4i16") }, "paddl_u32" => Intrinsic { - inputs: vec![v(u(16), 4)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.uaddlp.v2i32.v4i16") }, "paddl_s64" => Intrinsic { - inputs: vec![v(i(32), 2)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.aarch64.neon.saddlp.v1i64.v2i32") }, "paddl_u64" => Intrinsic { - inputs: vec![v(u(32), 2)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.aarch64.neon.uaddlp.v1i64.v2i32") }, "paddlq_s16" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.saddlp.v8i16.v16i8") }, "paddlq_u16" => Intrinsic { - inputs: vec![v(u(8), 16)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.uaddlp.v8i16.v16i8") }, "paddlq_s32" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.saddlp.v4i32.v8i16") }, "paddlq_u32" => Intrinsic { - inputs: vec![v(u(16), 8)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.uaddlp.v4i32.v8i16") }, "paddlq_s64" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.aarch64.neon.saddlp.v2i64.v4i32") }, "paddlq_u64" => Intrinsic { - inputs: vec![v(u(32), 4)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.aarch64.neon.uaddlp.v2i64.v4i32") }, "pmax_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.smaxp.v8i8") }, "pmax_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.umaxp.v8i8") }, "pmax_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.smaxp.v4i16") }, "pmax_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.umaxp.v4i16") }, "pmax_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.smaxp.v2i32") }, "pmax_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.umaxp.v2i32") }, "pmax_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.aarch64.neon.fmaxp.v2f32") }, "pmaxq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.smaxp.v16i8") }, "pmaxq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.umaxp.v16i8") }, "pmaxq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.smaxp.v8i16") }, "pmaxq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.umaxp.v8i16") }, "pmaxq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.smaxp.v4i32") }, "pmaxq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.umaxp.v4i32") }, "pmaxq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.aarch64.neon.fmaxp.v4f32") }, "pmaxq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.aarch64.neon.smaxp.v2i64") }, "pmaxq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.aarch64.neon.umaxp.v2i64") }, "pmaxq_f64" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.aarch64.neon.fmaxp.v2f64") }, "pmin_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.sminp.v8i8") }, "pmin_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.uminp.v8i8") }, "pmin_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.sminp.v4i16") }, "pmin_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.uminp.v4i16") }, "pmin_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.sminp.v2i32") }, "pmin_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.uminp.v2i32") }, "pmin_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.aarch64.neon.fminp.v2f32") }, "pminq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.sminp.v16i8") }, "pminq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.uminp.v16i8") }, "pminq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.sminp.v8i16") }, "pminq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.uminp.v8i16") }, "pminq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.sminp.v4i32") }, "pminq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.uminp.v4i32") }, "pminq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.aarch64.neon.fminp.v4f32") }, "pminq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.aarch64.neon.sminp.v2i64") }, "pminq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.aarch64.neon.uminp.v2i64") }, "pminq_f64" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.aarch64.neon.fminp.v2f64") }, "pmaxnm_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.smaxnmp.v8i8") }, "pmaxnm_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.umaxnmp.v8i8") }, "pmaxnm_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.aarch64.neon.smaxnmp.v4i16") }, "pmaxnm_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.aarch64.neon.umaxnmp.v4i16") }, "pmaxnm_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.aarch64.neon.smaxnmp.v2i32") }, "pmaxnm_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.aarch64.neon.umaxnmp.v2i32") }, "pmaxnm_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.aarch64.neon.fmaxnmp.v2f32") }, "pmaxnmq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.smaxnmp.v16i8") }, "pmaxnmq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.umaxnmp.v16i8") }, "pmaxnmq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.aarch64.neon.smaxnmp.v8i16") }, "pmaxnmq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.aarch64.neon.umaxnmp.v8i16") }, "pmaxnmq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.aarch64.neon.smaxnmp.v4i32") }, "pmaxnmq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.aarch64.neon.umaxnmp.v4i32") }, "pmaxnmq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.aarch64.neon.fmaxnmp.v4f32") }, "pmaxnmq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.aarch64.neon.smaxnmp.v2i64") }, "pmaxnmq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.aarch64.neon.umaxnmp.v2i64") }, "pmaxnmq_f64" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.aarch64.neon.fmaxnmp.v2f64") }, "pminnm_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.aarch64.neon.fminnmp.v2f32") }, "pminnmq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.aarch64.neon.fminnmp.v4f32") }, "pminnmq_f64" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.aarch64.neon.fminnmp.v2f64") }, "addv_s8" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: i(8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I8, definition: Named("llvm.aarch64.neon.saddv.i8.v8i8") }, "addv_u8" => Intrinsic { - inputs: vec![v(u(8), 8)], - output: u(8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, + output: &::U8, definition: Named("llvm.aarch64.neon.uaddv.i8.v8i8") }, "addv_s16" => Intrinsic { - inputs: vec![v(i(16), 4)], - output: i(16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, + output: &::I16, definition: Named("llvm.aarch64.neon.saddv.i16.v4i16") }, "addv_u16" => Intrinsic { - inputs: vec![v(u(16), 4)], - output: u(16), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, + output: &::U16, definition: Named("llvm.aarch64.neon.uaddv.i16.v4i16") }, "addv_s32" => Intrinsic { - inputs: vec![v(i(32), 2)], - output: i(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, + output: &::I32, definition: Named("llvm.aarch64.neon.saddv.i32.v2i32") }, "addv_u32" => Intrinsic { - inputs: vec![v(u(32), 2)], - output: u(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, + output: &::U32, definition: Named("llvm.aarch64.neon.uaddv.i32.v2i32") }, "addv_f32" => Intrinsic { - inputs: vec![v(f(32), 2)], - output: f(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, + output: &::F32, definition: Named("llvm.aarch64.neon.faddv.f32.v2f32") }, "addvq_s8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: i(8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I8, definition: Named("llvm.aarch64.neon.saddv.i8.v16i8") }, "addvq_u8" => Intrinsic { - inputs: vec![v(u(8), 16)], - output: u(8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, + output: &::U8, definition: Named("llvm.aarch64.neon.uaddv.i8.v16i8") }, "addvq_s16" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: i(16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I16, definition: Named("llvm.aarch64.neon.saddv.i16.v8i16") }, "addvq_u16" => Intrinsic { - inputs: vec![v(u(16), 8)], - output: u(16), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, + output: &::U16, definition: Named("llvm.aarch64.neon.uaddv.i16.v8i16") }, "addvq_s32" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: i(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I32, definition: Named("llvm.aarch64.neon.saddv.i32.v4i32") }, "addvq_u32" => Intrinsic { - inputs: vec![v(u(32), 4)], - output: u(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, + output: &::U32, definition: Named("llvm.aarch64.neon.uaddv.i32.v4i32") }, "addvq_f32" => Intrinsic { - inputs: vec![v(f(32), 4)], - output: f(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32, definition: Named("llvm.aarch64.neon.faddv.f32.v4f32") }, "addvq_s64" => Intrinsic { - inputs: vec![v(i(64), 2)], - output: i(64), + inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS }, + output: &::I64, definition: Named("llvm.aarch64.neon.saddv.i64.v2i64") }, "addvq_u64" => Intrinsic { - inputs: vec![v(u(64), 2)], - output: u(64), + inputs: { static INPUTS: [&'static Type; 1] = [&::U64x2]; &INPUTS }, + output: &::U64, definition: Named("llvm.aarch64.neon.uaddv.i64.v2i64") }, "addvq_f64" => Intrinsic { - inputs: vec![v(f(64), 2)], - output: f(64), + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, + output: &::F64, definition: Named("llvm.aarch64.neon.faddv.f64.v2f64") }, "addlv_s8" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: i(16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I16, definition: Named("llvm.aarch64.neon.saddlv.i16.v8i8") }, "addlv_u8" => Intrinsic { - inputs: vec![v(u(8), 8)], - output: u(16), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, + output: &::U16, definition: Named("llvm.aarch64.neon.uaddlv.i16.v8i8") }, "addlv_s16" => Intrinsic { - inputs: vec![v(i(16), 4)], - output: i(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, + output: &::I32, definition: Named("llvm.aarch64.neon.saddlv.i32.v4i16") }, "addlv_u16" => Intrinsic { - inputs: vec![v(u(16), 4)], - output: u(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, + output: &::U32, definition: Named("llvm.aarch64.neon.uaddlv.i32.v4i16") }, "addlv_s32" => Intrinsic { - inputs: vec![v(i(32), 2)], - output: i(64), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, + output: &::I64, definition: Named("llvm.aarch64.neon.saddlv.i64.v2i32") }, "addlv_u32" => Intrinsic { - inputs: vec![v(u(32), 2)], - output: u(64), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, + output: &::U64, definition: Named("llvm.aarch64.neon.uaddlv.i64.v2i32") }, "addlvq_s8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: i(16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I16, definition: Named("llvm.aarch64.neon.saddlv.i16.v16i8") }, "addlvq_u8" => Intrinsic { - inputs: vec![v(u(8), 16)], - output: u(16), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, + output: &::U16, definition: Named("llvm.aarch64.neon.uaddlv.i16.v16i8") }, "addlvq_s16" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: i(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I32, definition: Named("llvm.aarch64.neon.saddlv.i32.v8i16") }, "addlvq_u16" => Intrinsic { - inputs: vec![v(u(16), 8)], - output: u(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, + output: &::U32, definition: Named("llvm.aarch64.neon.uaddlv.i32.v8i16") }, "addlvq_s32" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: i(64), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I64, definition: Named("llvm.aarch64.neon.saddlv.i64.v4i32") }, "addlvq_u32" => Intrinsic { - inputs: vec![v(u(32), 4)], - output: u(64), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, + output: &::U64, definition: Named("llvm.aarch64.neon.uaddlv.i64.v4i32") }, "maxv_s8" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: i(8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I8, definition: Named("llvm.aarch64.neon.smaxv.i8.v8i8") }, "maxv_u8" => Intrinsic { - inputs: vec![v(u(8), 8)], - output: u(8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, + output: &::U8, definition: Named("llvm.aarch64.neon.umaxv.i8.v8i8") }, "maxv_s16" => Intrinsic { - inputs: vec![v(i(16), 4)], - output: i(16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, + output: &::I16, definition: Named("llvm.aarch64.neon.smaxv.i16.v4i16") }, "maxv_u16" => Intrinsic { - inputs: vec![v(u(16), 4)], - output: u(16), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, + output: &::U16, definition: Named("llvm.aarch64.neon.umaxv.i16.v4i16") }, "maxv_s32" => Intrinsic { - inputs: vec![v(i(32), 2)], - output: i(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, + output: &::I32, definition: Named("llvm.aarch64.neon.smaxv.i32.v2i32") }, "maxv_u32" => Intrinsic { - inputs: vec![v(u(32), 2)], - output: u(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, + output: &::U32, definition: Named("llvm.aarch64.neon.umaxv.i32.v2i32") }, "maxv_f32" => Intrinsic { - inputs: vec![v(f(32), 2)], - output: f(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, + output: &::F32, definition: Named("llvm.aarch64.neon.fmaxv.f32.v2f32") }, "maxvq_s8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: i(8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I8, definition: Named("llvm.aarch64.neon.smaxv.i8.v16i8") }, "maxvq_u8" => Intrinsic { - inputs: vec![v(u(8), 16)], - output: u(8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, + output: &::U8, definition: Named("llvm.aarch64.neon.umaxv.i8.v16i8") }, "maxvq_s16" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: i(16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I16, definition: Named("llvm.aarch64.neon.smaxv.i16.v8i16") }, "maxvq_u16" => Intrinsic { - inputs: vec![v(u(16), 8)], - output: u(16), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, + output: &::U16, definition: Named("llvm.aarch64.neon.umaxv.i16.v8i16") }, "maxvq_s32" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: i(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I32, definition: Named("llvm.aarch64.neon.smaxv.i32.v4i32") }, "maxvq_u32" => Intrinsic { - inputs: vec![v(u(32), 4)], - output: u(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, + output: &::U32, definition: Named("llvm.aarch64.neon.umaxv.i32.v4i32") }, "maxvq_f32" => Intrinsic { - inputs: vec![v(f(32), 4)], - output: f(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32, definition: Named("llvm.aarch64.neon.fmaxv.f32.v4f32") }, "maxvq_f64" => Intrinsic { - inputs: vec![v(f(64), 2)], - output: f(64), + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, + output: &::F64, definition: Named("llvm.aarch64.neon.fmaxv.f64.v2f64") }, "minv_s8" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: i(8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I8, definition: Named("llvm.aarch64.neon.sminv.i8.v8i8") }, "minv_u8" => Intrinsic { - inputs: vec![v(u(8), 8)], - output: u(8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, + output: &::U8, definition: Named("llvm.aarch64.neon.uminv.i8.v8i8") }, "minv_s16" => Intrinsic { - inputs: vec![v(i(16), 4)], - output: i(16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, + output: &::I16, definition: Named("llvm.aarch64.neon.sminv.i16.v4i16") }, "minv_u16" => Intrinsic { - inputs: vec![v(u(16), 4)], - output: u(16), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, + output: &::U16, definition: Named("llvm.aarch64.neon.uminv.i16.v4i16") }, "minv_s32" => Intrinsic { - inputs: vec![v(i(32), 2)], - output: i(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, + output: &::I32, definition: Named("llvm.aarch64.neon.sminv.i32.v2i32") }, "minv_u32" => Intrinsic { - inputs: vec![v(u(32), 2)], - output: u(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, + output: &::U32, definition: Named("llvm.aarch64.neon.uminv.i32.v2i32") }, "minv_f32" => Intrinsic { - inputs: vec![v(f(32), 2)], - output: f(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, + output: &::F32, definition: Named("llvm.aarch64.neon.fminv.f32.v2f32") }, "minvq_s8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: i(8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I8, definition: Named("llvm.aarch64.neon.sminv.i8.v16i8") }, "minvq_u8" => Intrinsic { - inputs: vec![v(u(8), 16)], - output: u(8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, + output: &::U8, definition: Named("llvm.aarch64.neon.uminv.i8.v16i8") }, "minvq_s16" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: i(16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I16, definition: Named("llvm.aarch64.neon.sminv.i16.v8i16") }, "minvq_u16" => Intrinsic { - inputs: vec![v(u(16), 8)], - output: u(16), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, + output: &::U16, definition: Named("llvm.aarch64.neon.uminv.i16.v8i16") }, "minvq_s32" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: i(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I32, definition: Named("llvm.aarch64.neon.sminv.i32.v4i32") }, "minvq_u32" => Intrinsic { - inputs: vec![v(u(32), 4)], - output: u(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, + output: &::U32, definition: Named("llvm.aarch64.neon.uminv.i32.v4i32") }, "minvq_f32" => Intrinsic { - inputs: vec![v(f(32), 4)], - output: f(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32, definition: Named("llvm.aarch64.neon.fminv.f32.v4f32") }, "minvq_f64" => Intrinsic { - inputs: vec![v(f(64), 2)], - output: f(64), + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, + output: &::F64, definition: Named("llvm.aarch64.neon.fminv.f64.v2f64") }, "maxnmv_f32" => Intrinsic { - inputs: vec![v(f(32), 2)], - output: f(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, + output: &::F32, definition: Named("llvm.aarch64.neon.fmaxnmv.f32.v2f32") }, "maxnmvq_f32" => Intrinsic { - inputs: vec![v(f(32), 4)], - output: f(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32, definition: Named("llvm.aarch64.neon.fmaxnmv.f32.v4f32") }, "maxnmvq_f64" => Intrinsic { - inputs: vec![v(f(64), 2)], - output: f(64), + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, + output: &::F64, definition: Named("llvm.aarch64.neon.fmaxnmv.f64.v2f64") }, "minnmv_f32" => Intrinsic { - inputs: vec![v(f(32), 2)], - output: f(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, + output: &::F32, definition: Named("llvm.aarch64.neon.fminnmv.f32.v2f32") }, "minnmvq_f32" => Intrinsic { - inputs: vec![v(f(32), 4)], - output: f(32), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32, definition: Named("llvm.aarch64.neon.fminnmv.f32.v4f32") }, "minnmvq_f64" => Intrinsic { - inputs: vec![v(f(64), 2)], - output: f(64), + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, + output: &::F64, definition: Named("llvm.aarch64.neon.fminnmv.f64.v2f64") }, "qtbl1_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(u(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::U8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.tbl1.v8i8") }, "qtbl1_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.tbl1.v8i8") }, "qtbl1q_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(u(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::U8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.tbl1.v16i8") }, "qtbl1q_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.tbl1.v16i8") }, "qtbx1_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 16), v(u(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, &::I8x16, &::U8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.tbx1.v8i8") }, "qtbx1_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 16), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, &::U8x16, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.tbx1.v8i8") }, "qtbx1q_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16), v(u(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::U8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.tbx1.v16i8") }, "qtbx1q_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.tbx1.v16i8") }, "qtbl2_s8" => Intrinsic { - inputs: vec![agg(true, vec![v(i(8), 16), v(i(8), 16)]), v(u(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.tbl2.v8i8") }, "qtbl2_u8" => Intrinsic { - inputs: vec![agg(true, vec![v(u(8), 16), v(u(8), 16)]), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.tbl2.v8i8") }, "qtbl2q_s8" => Intrinsic { - inputs: vec![agg(true, vec![v(i(8), 16), v(i(8), 16)]), v(u(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.tbl2.v16i8") }, "qtbl2q_u8" => Intrinsic { - inputs: vec![agg(true, vec![v(u(8), 16), v(u(8), 16)]), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.tbl2.v16i8") }, "qtbx2_s8" => Intrinsic { - inputs: vec![agg(true, vec![v(i(8), 16), v(i(8), 16)]), v(u(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.tbx2.v8i8") }, "qtbx2_u8" => Intrinsic { - inputs: vec![agg(true, vec![v(u(8), 16), v(u(8), 16)]), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.tbx2.v8i8") }, "qtbx2q_s8" => Intrinsic { - inputs: vec![agg(true, vec![v(i(8), 16), v(i(8), 16)]), v(u(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.tbx2.v16i8") }, "qtbx2q_u8" => Intrinsic { - inputs: vec![agg(true, vec![v(u(8), 16), v(u(8), 16)]), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.tbx2.v16i8") }, "qtbl3_s8" => Intrinsic { - inputs: vec![agg(true, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16)]), v(u(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.tbl3.v8i8") }, "qtbl3_u8" => Intrinsic { - inputs: vec![agg(true, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16)]), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.tbl3.v8i8") }, "qtbl3q_s8" => Intrinsic { - inputs: vec![agg(true, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16)]), v(u(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.tbl3.v16i8") }, "qtbl3q_u8" => Intrinsic { - inputs: vec![agg(true, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16)]), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.tbl3.v16i8") }, "qtbx3_s8" => Intrinsic { - inputs: vec![v(i(8), 8), agg(true, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16)]), v(u(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.tbx3.v8i8") }, "qtbx3_u8" => Intrinsic { - inputs: vec![v(u(8), 8), agg(true, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16)]), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.tbx3.v8i8") }, "qtbx3q_s8" => Intrinsic { - inputs: vec![v(i(8), 16), agg(true, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16)]), v(u(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.tbx3.v16i8") }, "qtbx3q_u8" => Intrinsic { - inputs: vec![v(u(8), 16), agg(true, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16)]), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 3] = [&::U8x16, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.tbx3.v16i8") }, "qtbl4_s8" => Intrinsic { - inputs: vec![agg(true, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16), v(i(8), 16)]), v(u(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x16, &::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.tbl4.v8i8") }, "qtbl4_u8" => Intrinsic { - inputs: vec![agg(true, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16), v(u(8), 16)]), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x16, &::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.tbl4.v8i8") }, "qtbl4q_s8" => Intrinsic { - inputs: vec![agg(true, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16), v(i(8), 16)]), v(u(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x16, &::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.tbl4.v16i8") }, "qtbl4q_u8" => Intrinsic { - inputs: vec![agg(true, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16), v(u(8), 16)]), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x16, &::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.tbl4.v16i8") }, "qtbx4_s8" => Intrinsic { - inputs: vec![v(i(8), 8), agg(true, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16), v(i(8), 16)]), v(u(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x16, &::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.aarch64.neon.tbx4.v8i8") }, "qtbx4_u8" => Intrinsic { - inputs: vec![v(u(8), 8), agg(true, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16), v(u(8), 16)]), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x16, &::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.aarch64.neon.tbx4.v8i8") }, "qtbx4q_s8" => Intrinsic { - inputs: vec![v(i(8), 16), agg(true, vec![v(i(8), 16), v(i(8), 16), v(i(8), 16), v(i(8), 16)]), v(u(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x16, &::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.aarch64.neon.tbx4.v16i8") }, "qtbx4q_u8" => Intrinsic { - inputs: vec![v(u(8), 16), agg(true, vec![v(u(8), 16), v(u(8), 16), v(u(8), 16), v(u(8), 16)]), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 3] = [&::U8x16, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x16, &::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.aarch64.neon.tbx4.v16i8") }, _ => return None, diff --git a/src/librustc_platform_intrinsics/arm.rs b/src/librustc_platform_intrinsics/arm.rs index 166bf66d819c7..834528aaaa314 100644 --- a/src/librustc_platform_intrinsics/arm.rs +++ b/src/librustc_platform_intrinsics/arm.rs @@ -13,2074 +13,2073 @@ #![allow(unused_imports)] -use {Intrinsic, i, i_, u, u_, f, v, v_, agg, p, void}; +use {Intrinsic, Type}; use IntrinsicDef::Named; -use rustc::middle::ty; // The default inlining settings trigger a pathological behaviour in // LLVM, which causes makes compilation very slow. See #28273. #[inline(never)] -pub fn find<'tcx>(_tcx: &ty::ctxt<'tcx>, name: &str) -> Option { +pub fn find(name: &str) -> Option { if !name.starts_with("arm_v") { return None } Some(match &name["arm_v".len()..] { "hadd_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vhadds.v8i8") }, "hadd_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vhaddu.v8i8") }, "hadd_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vhadds.v4i16") }, "hadd_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vhaddu.v4i16") }, "hadd_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vhadds.v2i32") }, "hadd_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vhaddu.v2i32") }, "haddq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vhadds.v16i8") }, "haddq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.neon.vhaddu.v16i8") }, "haddq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vhadds.v8i16") }, "haddq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vhaddu.v8i16") }, "haddq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vhadds.v4i32") }, "haddq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vhaddu.v4i32") }, "rhadd_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vrhadds.v8i8") }, "rhadd_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vrhaddu.v8i8") }, "rhadd_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vrhadds.v4i16") }, "rhadd_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vrhaddu.v4i16") }, "rhadd_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vrhadds.v2i32") }, "rhadd_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vrhaddu.v2i32") }, "rhaddq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vrhadds.v16i8") }, "rhaddq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.neon.vrhaddu.v16i8") }, "rhaddq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vrhadds.v8i16") }, "rhaddq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vrhaddu.v8i16") }, "rhaddq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vrhadds.v4i32") }, "rhaddq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vrhaddu.v4i32") }, "qadd_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vqadds.v8i8") }, "qadd_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vqaddu.v8i8") }, "qadd_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vqadds.v4i16") }, "qadd_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vqaddu.v4i16") }, "qadd_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vqadds.v2i32") }, "qadd_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vqaddu.v2i32") }, "qadd_s64" => Intrinsic { - inputs: vec![v(i(64), 1), v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.neon.vqadds.v1i64") }, "qadd_u64" => Intrinsic { - inputs: vec![v(u(64), 1), v(u(64), 1)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.neon.vqaddu.v1i64") }, "qaddq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vqadds.v16i8") }, "qaddq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.neon.vqaddu.v16i8") }, "qaddq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vqadds.v8i16") }, "qaddq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vqaddu.v8i16") }, "qaddq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vqadds.v4i32") }, "qaddq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vqaddu.v4i32") }, "qaddq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.neon.vqadds.v2i64") }, "qaddq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.neon.vqaddu.v2i64") }, "raddhn_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vraddhn.v8i8") }, "raddhn_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vraddhn.v8i8") }, "raddhn_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vraddhn.v4i16") }, "raddhn_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vraddhn.v4i16") }, "raddhn_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vraddhn.v2i32") }, "raddhn_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vraddhn.v2i32") }, "fma_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.fma.v2f32") }, "fmaq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.fma.v4f32") }, "qdmulh_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vsqdmulh.v4i16") }, "qdmulh_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vsqdmulh.v2i32") }, "qdmulhq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vsqdmulh.v8i16") }, "qdmulhq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vsqdmulh.v4i32") }, "qrdmulh_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vsqrdmulh.v4i16") }, "qrdmulh_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vsqrdmulh.v2i32") }, "qrdmulhq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vsqrdmulh.v8i16") }, "qrdmulhq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vsqrdmulh.v4i32") }, "mull_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vmulls.v8i16") }, "mull_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vmullu.v8i16") }, "mull_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vmulls.v4i32") }, "mull_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vmullu.v4i32") }, "mull_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.neon.vmulls.v2i64") }, "mull_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.neon.vmullu.v2i64") }, "qdmullq_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vsqdmull.v8i16") }, "qdmullq_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vsqdmull.v4i32") }, "hsub_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vhsubs.v8i8") }, "hsub_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vhsubu.v8i8") }, "hsub_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vhsubs.v4i16") }, "hsub_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vhsubu.v4i16") }, "hsub_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vhsubs.v2i32") }, "hsub_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vhsubu.v2i32") }, "hsubq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vhsubs.v16i8") }, "hsubq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.neon.vhsubu.v16i8") }, "hsubq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vhsubs.v8i16") }, "hsubq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vhsubu.v8i16") }, "hsubq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vhsubs.v4i32") }, "hsubq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vhsubu.v4i32") }, "qsub_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vqsubs.v8i8") }, "qsub_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vqsubu.v8i8") }, "qsub_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vqsubs.v4i16") }, "qsub_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vqsubu.v4i16") }, "qsub_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vqsubs.v2i32") }, "qsub_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vqsubu.v2i32") }, "qsub_s64" => Intrinsic { - inputs: vec![v(i(64), 1), v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.neon.vqsubs.v1i64") }, "qsub_u64" => Intrinsic { - inputs: vec![v(u(64), 1), v(u(64), 1)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.neon.vqsubu.v1i64") }, "qsubq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vqsubs.v16i8") }, "qsubq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.neon.vqsubu.v16i8") }, "qsubq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vqsubs.v8i16") }, "qsubq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vqsubu.v8i16") }, "qsubq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vqsubs.v4i32") }, "qsubq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vqsubu.v4i32") }, "qsubq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.neon.vqsubs.v2i64") }, "qsubq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.neon.vqsubu.v2i64") }, "rsubhn_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vrsubhn.v8i8") }, "rsubhn_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vrsubhn.v8i8") }, "rsubhn_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vrsubhn.v4i16") }, "rsubhn_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vrsubhn.v4i16") }, "rsubhn_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vrsubhn.v2i32") }, "rsubhn_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vrsubhn.v2i32") }, "abd_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vabds.v8i8") }, "abd_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vabdu.v8i8") }, "abd_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vabds.v4i16") }, "abd_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vabdu.v4i16") }, "abd_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vabds.v2i32") }, "abd_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vabdu.v2i32") }, "abd_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.neon.vabdf.v2f32") }, "abdq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vabds.v16i8") }, "abdq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.neon.vabdu.v16i8") }, "abdq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vabds.v8i16") }, "abdq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vabdu.v8i16") }, "abdq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vabds.v4i32") }, "abdq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vabdu.v4i32") }, "abdq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.neon.vabdf.v4f32") }, "max_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vmaxs.v8i8") }, "max_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vmaxu.v8i8") }, "max_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vmaxs.v4i16") }, "max_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vmaxu.v4i16") }, "max_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vmaxs.v2i32") }, "max_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vmaxu.v2i32") }, "max_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.neon.vmaxf.v2f32") }, "maxq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vmaxs.v16i8") }, "maxq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.neon.vmaxu.v16i8") }, "maxq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vmaxs.v8i16") }, "maxq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vmaxu.v8i16") }, "maxq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vmaxs.v4i32") }, "maxq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vmaxu.v4i32") }, "maxq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.neon.vmaxf.v4f32") }, "min_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vmins.v8i8") }, "min_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vminu.v8i8") }, "min_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vmins.v4i16") }, "min_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vminu.v4i16") }, "min_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vmins.v2i32") }, "min_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vminu.v2i32") }, "min_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.neon.vminf.v2f32") }, "minq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vmins.v16i8") }, "minq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.neon.vminu.v16i8") }, "minq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vmins.v8i16") }, "minq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vminu.v8i16") }, "minq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vmins.v4i32") }, "minq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vminu.v4i32") }, "minq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.neon.vminf.v4f32") }, "shl_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vshls.v8i8") }, "shl_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(i(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vshlu.v8i8") }, "shl_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vshls.v4i16") }, "shl_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(i(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vshlu.v4i16") }, "shl_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vshls.v2i32") }, "shl_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(i(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vshlu.v2i32") }, "shl_s64" => Intrinsic { - inputs: vec![v(i(64), 1), v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.neon.vshls.v1i64") }, "shl_u64" => Intrinsic { - inputs: vec![v(u(64), 1), v(i(64), 1)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.neon.vshlu.v1i64") }, "shlq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vshls.v16i8") }, "shlq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(i(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.neon.vshlu.v16i8") }, "shlq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vshls.v8i16") }, "shlq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(i(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vshlu.v8i16") }, "shlq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vshls.v4i32") }, "shlq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(i(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vshlu.v4i32") }, "shlq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.neon.vshls.v2i64") }, "shlq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(i(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.neon.vshlu.v2i64") }, "qshl_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vqshls.v8i8") }, "qshl_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(i(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vqshlu.v8i8") }, "qshl_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vqshls.v4i16") }, "qshl_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(i(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vqshlu.v4i16") }, "qshl_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vqshls.v2i32") }, "qshl_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(i(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vqshlu.v2i32") }, "qshl_s64" => Intrinsic { - inputs: vec![v(i(64), 1), v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.neon.vqshls.v1i64") }, "qshl_u64" => Intrinsic { - inputs: vec![v(u(64), 1), v(i(64), 1)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.neon.vqshlu.v1i64") }, "qshlq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vqshls.v16i8") }, "qshlq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(i(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.neon.vqshlu.v16i8") }, "qshlq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vqshls.v8i16") }, "qshlq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(i(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vqshlu.v8i16") }, "qshlq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vqshls.v4i32") }, "qshlq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(i(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vqshlu.v4i32") }, "qshlq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.neon.vqshls.v2i64") }, "qshlq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(i(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.neon.vqshlu.v2i64") }, "rshl_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vrshls.v8i8") }, "rshl_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(i(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vrshlu.v8i8") }, "rshl_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vrshls.v4i16") }, "rshl_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(i(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vrshlu.v4i16") }, "rshl_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vrshls.v2i32") }, "rshl_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(i(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vrshlu.v2i32") }, "rshl_s64" => Intrinsic { - inputs: vec![v(i(64), 1), v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.neon.vrshls.v1i64") }, "rshl_u64" => Intrinsic { - inputs: vec![v(u(64), 1), v(i(64), 1)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.neon.vrshlu.v1i64") }, "rshlq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vrshls.v16i8") }, "rshlq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(i(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.neon.vrshlu.v16i8") }, "rshlq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vrshls.v8i16") }, "rshlq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(i(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vrshlu.v8i16") }, "rshlq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vrshls.v4i32") }, "rshlq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(i(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vrshlu.v4i32") }, "rshlq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.neon.vrshls.v2i64") }, "rshlq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(i(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.neon.vrshlu.v2i64") }, "qrshl_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vqrshls.v8i8") }, "qrshl_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(i(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vqrshlu.v8i8") }, "qrshl_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vqrshls.v4i16") }, "qrshl_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(i(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vqrshlu.v4i16") }, "qrshl_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vqrshls.v2i32") }, "qrshl_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(i(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vqrshlu.v2i32") }, "qrshl_s64" => Intrinsic { - inputs: vec![v(i(64), 1), v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.neon.vqrshls.v1i64") }, "qrshl_u64" => Intrinsic { - inputs: vec![v(u(64), 1), v(i(64), 1)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.neon.vqrshlu.v1i64") }, "qrshlq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vqrshls.v16i8") }, "qrshlq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(i(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.neon.vqrshlu.v16i8") }, "qrshlq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vqrshls.v8i16") }, "qrshlq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(i(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vqrshlu.v8i16") }, "qrshlq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vqrshls.v4i32") }, "qrshlq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(i(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vqrshlu.v4i32") }, "qrshlq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.neon.vqrshls.v2i64") }, "qrshlq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(i(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.neon.vqrshlu.v2i64") }, "qshrun_n_s16" => Intrinsic { - inputs: vec![v(i(16), 8), u(32)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vsqshrun.v8i8") }, "qshrun_n_s32" => Intrinsic { - inputs: vec![v(i(32), 4), u(32)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vsqshrun.v4i16") }, "qshrun_n_s64" => Intrinsic { - inputs: vec![v(i(64), 2), u(32)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vsqshrun.v2i32") }, "qrshrun_n_s16" => Intrinsic { - inputs: vec![v(i(16), 8), u(32)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vsqrshrun.v8i8") }, "qrshrun_n_s32" => Intrinsic { - inputs: vec![v(i(32), 4), u(32)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vsqrshrun.v4i16") }, "qrshrun_n_s64" => Intrinsic { - inputs: vec![v(i(64), 2), u(32)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vsqrshrun.v2i32") }, "qshrn_n_s16" => Intrinsic { - inputs: vec![v(i(16), 8), u(32)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vqshrns.v8i8") }, "qshrn_n_u16" => Intrinsic { - inputs: vec![v(u(16), 8), u(32)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vqshrnu.v8i8") }, "qshrn_n_s32" => Intrinsic { - inputs: vec![v(i(32), 4), u(32)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vqshrns.v4i16") }, "qshrn_n_u32" => Intrinsic { - inputs: vec![v(u(32), 4), u(32)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vqshrnu.v4i16") }, "qshrn_n_s64" => Intrinsic { - inputs: vec![v(i(64), 2), u(32)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vqshrns.v2i32") }, "qshrn_n_u64" => Intrinsic { - inputs: vec![v(u(64), 2), u(32)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vqshrnu.v2i32") }, "rshrn_n_s16" => Intrinsic { - inputs: vec![v(i(16), 8), u(32)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vrshrn.v8i8") }, "rshrn_n_u16" => Intrinsic { - inputs: vec![v(u(16), 8), u(32)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vrshrn.v8i8") }, "rshrn_n_s32" => Intrinsic { - inputs: vec![v(i(32), 4), u(32)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vrshrn.v4i16") }, "rshrn_n_u32" => Intrinsic { - inputs: vec![v(u(32), 4), u(32)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vrshrn.v4i16") }, "rshrn_n_s64" => Intrinsic { - inputs: vec![v(i(64), 2), u(32)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vrshrn.v2i32") }, "rshrn_n_u64" => Intrinsic { - inputs: vec![v(u(64), 2), u(32)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vrshrn.v2i32") }, "qrshrn_n_s16" => Intrinsic { - inputs: vec![v(i(16), 8), u(32)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vqrshrns.v8i8") }, "qrshrn_n_u16" => Intrinsic { - inputs: vec![v(u(16), 8), u(32)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vqrshrnu.v8i8") }, "qrshrn_n_s32" => Intrinsic { - inputs: vec![v(i(32), 4), u(32)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vqrshrns.v4i16") }, "qrshrn_n_u32" => Intrinsic { - inputs: vec![v(u(32), 4), u(32)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vqrshrnu.v4i16") }, "qrshrn_n_s64" => Intrinsic { - inputs: vec![v(i(64), 2), u(32)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vqrshrns.v2i32") }, "qrshrn_n_u64" => Intrinsic { - inputs: vec![v(u(64), 2), u(32)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vqrshrnu.v2i32") }, "sri_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vvsri.v8i8") }, "sri_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vvsri.v8i8") }, "sri_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vvsri.v4i16") }, "sri_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vvsri.v4i16") }, "sri_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vvsri.v2i32") }, "sri_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vvsri.v2i32") }, "sri_s64" => Intrinsic { - inputs: vec![v(i(64), 1), v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.neon.vvsri.v1i64") }, "sri_u64" => Intrinsic { - inputs: vec![v(u(64), 1), v(u(64), 1)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.neon.vvsri.v1i64") }, "sriq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vvsri.v16i8") }, "sriq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.neon.vvsri.v16i8") }, "sriq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vvsri.v8i16") }, "sriq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vvsri.v8i16") }, "sriq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vvsri.v4i32") }, "sriq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vvsri.v4i32") }, "sriq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.neon.vvsri.v2i64") }, "sriq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.neon.vvsri.v2i64") }, "sli_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vvsli.v8i8") }, "sli_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vvsli.v8i8") }, "sli_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vvsli.v4i16") }, "sli_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vvsli.v4i16") }, "sli_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vvsli.v2i32") }, "sli_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vvsli.v2i32") }, "sli_s64" => Intrinsic { - inputs: vec![v(i(64), 1), v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.neon.vvsli.v1i64") }, "sli_u64" => Intrinsic { - inputs: vec![v(u(64), 1), v(u(64), 1)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.neon.vvsli.v1i64") }, "sliq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vvsli.v16i8") }, "sliq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.neon.vvsli.v16i8") }, "sliq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vvsli.v8i16") }, "sliq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vvsli.v8i16") }, "sliq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vvsli.v4i32") }, "sliq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vvsli.v4i32") }, "sliq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.neon.vvsli.v2i64") }, "sliq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.neon.vvsli.v2i64") }, "vqmovn_s16" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vqxtns.v8i8") }, "vqmovn_u16" => Intrinsic { - inputs: vec![v(u(16), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vqxtnu.v8i8") }, "vqmovn_s32" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vqxtns.v4i16") }, "vqmovn_u32" => Intrinsic { - inputs: vec![v(u(32), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vqxtnu.v4i16") }, "vqmovn_s64" => Intrinsic { - inputs: vec![v(i(64), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vqxtns.v2i32") }, "vqmovn_u64" => Intrinsic { - inputs: vec![v(u(64), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::U64x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vqxtnu.v2i32") }, "abs_s8" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vabs.v8i8") }, "abs_s16" => Intrinsic { - inputs: vec![v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vabs.v4i16") }, "abs_s32" => Intrinsic { - inputs: vec![v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vabs.v2i32") }, "absq_s8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vabs.v16i8") }, "absq_s16" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vabs.v8i16") }, "absq_s32" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vabs.v4i32") }, "abs_f32" => Intrinsic { - inputs: vec![v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.fabs.v2f32") }, "absq_f32" => Intrinsic { - inputs: vec![v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.fabs.v4f32") }, "qabs_s8" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vsqabs.v8i8") }, "qabs_s16" => Intrinsic { - inputs: vec![v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vsqabs.v4i16") }, "qabs_s32" => Intrinsic { - inputs: vec![v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vsqabs.v2i32") }, "qabsq_s8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vsqabs.v16i8") }, "qabsq_s16" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vsqabs.v8i16") }, "qabsq_s32" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vsqabs.v4i32") }, "qneg_s8" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vsqneg.v8i8") }, "qneg_s16" => Intrinsic { - inputs: vec![v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vsqneg.v4i16") }, "qneg_s32" => Intrinsic { - inputs: vec![v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vsqneg.v2i32") }, "qnegq_s8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vsqneg.v16i8") }, "qnegq_s16" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vsqneg.v8i16") }, "qnegq_s32" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vsqneg.v4i32") }, "clz_s8" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.ctlz.v8i8") }, "clz_u8" => Intrinsic { - inputs: vec![v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.ctlz.v8i8") }, "clz_s16" => Intrinsic { - inputs: vec![v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.ctlz.v4i16") }, "clz_u16" => Intrinsic { - inputs: vec![v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.ctlz.v4i16") }, "clz_s32" => Intrinsic { - inputs: vec![v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.ctlz.v2i32") }, "clz_u32" => Intrinsic { - inputs: vec![v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.ctlz.v2i32") }, "clzq_s8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.ctlz.v16i8") }, "clzq_u8" => Intrinsic { - inputs: vec![v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.ctlz.v16i8") }, "clzq_s16" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.ctlz.v8i16") }, "clzq_u16" => Intrinsic { - inputs: vec![v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.ctlz.v8i16") }, "clzq_s32" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.ctlz.v4i32") }, "clzq_u32" => Intrinsic { - inputs: vec![v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.ctlz.v4i32") }, "cls_s8" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vcls.v8i8") }, "cls_u8" => Intrinsic { - inputs: vec![v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vcls.v8i8") }, "cls_s16" => Intrinsic { - inputs: vec![v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vcls.v4i16") }, "cls_u16" => Intrinsic { - inputs: vec![v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vcls.v4i16") }, "cls_s32" => Intrinsic { - inputs: vec![v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vcls.v2i32") }, "cls_u32" => Intrinsic { - inputs: vec![v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vcls.v2i32") }, "clsq_s8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vcls.v16i8") }, "clsq_u8" => Intrinsic { - inputs: vec![v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.neon.vcls.v16i8") }, "clsq_s16" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vcls.v8i16") }, "clsq_u16" => Intrinsic { - inputs: vec![v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vcls.v8i16") }, "clsq_s32" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vcls.v4i32") }, "clsq_u32" => Intrinsic { - inputs: vec![v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vcls.v4i32") }, "cnt_s8" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.ctpop.v8i8") }, "cnt_u8" => Intrinsic { - inputs: vec![v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.ctpop.v8i8") }, "cntq_s8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.ctpop.v16i8") }, "cntq_u8" => Intrinsic { - inputs: vec![v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.ctpop.v16i8") }, "recpe_u32" => Intrinsic { - inputs: vec![v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vrecpe.v2i32") }, "recpe_f32" => Intrinsic { - inputs: vec![v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.neon.vrecpe.v2f32") }, "recpeq_u32" => Intrinsic { - inputs: vec![v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vrecpe.v4i32") }, "recpeq_f32" => Intrinsic { - inputs: vec![v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.neon.vrecpe.v4f32") }, "recps_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.neon.vfrecps.v2f32") }, "recpsq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.neon.vfrecps.v4f32") }, "sqrt_f32" => Intrinsic { - inputs: vec![v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.sqrt.v2f32") }, "sqrtq_f32" => Intrinsic { - inputs: vec![v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.sqrt.v4f32") }, "rsqrte_u32" => Intrinsic { - inputs: vec![v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vrsqrte.v2i32") }, "rsqrte_f32" => Intrinsic { - inputs: vec![v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.neon.vrsqrte.v2f32") }, "rsqrteq_u32" => Intrinsic { - inputs: vec![v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vrsqrte.v4i32") }, "rsqrteq_f32" => Intrinsic { - inputs: vec![v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.neon.vrsqrte.v4f32") }, "rsqrts_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.neon.vrsqrts.v2f32") }, "rsqrtsq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.neon.vrsqrts.v4f32") }, "bsl_s8" => Intrinsic { - inputs: vec![v(u(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vbsl.v8i8") }, "bsl_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vbsl.v8i8") }, "bsl_s16" => Intrinsic { - inputs: vec![v(u(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vbsl.v4i16") }, "bsl_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vbsl.v4i16") }, "bsl_s32" => Intrinsic { - inputs: vec![v(u(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vbsl.v2i32") }, "bsl_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vbsl.v2i32") }, "bsl_s64" => Intrinsic { - inputs: vec![v(u(64), 1), v(i(64), 1)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.neon.vbsl.v1i64") }, "bsl_u64" => Intrinsic { - inputs: vec![v(u(64), 1), v(u(64), 1)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.neon.vbsl.v1i64") }, "bslq_s8" => Intrinsic { - inputs: vec![v(u(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vbsl.v16i8") }, "bslq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.neon.vbsl.v16i8") }, "bslq_s16" => Intrinsic { - inputs: vec![v(u(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vbsl.v8i16") }, "bslq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vbsl.v8i16") }, "bslq_s32" => Intrinsic { - inputs: vec![v(u(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vbsl.v4i32") }, "bslq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vbsl.v4i32") }, "bslq_s64" => Intrinsic { - inputs: vec![v(u(64), 2), v(i(64), 2)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.neon.vbsl.v2i64") }, "bslq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.neon.vbsl.v2i64") }, "padd_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vpadd.v8i8") }, "padd_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vpadd.v8i8") }, "padd_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vpadd.v4i16") }, "padd_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vpadd.v4i16") }, "padd_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vpadd.v2i32") }, "padd_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vpadd.v2i32") }, "padd_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.neon.vpadd.v2f32") }, "paddl_s16" => Intrinsic { - inputs: vec![v(i(8), 8)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vpaddls.v4i16.v8i8") }, "paddl_u16" => Intrinsic { - inputs: vec![v(u(8), 8)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vpaddlu.v4i16.v8i8") }, "paddl_s32" => Intrinsic { - inputs: vec![v(i(16), 4)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vpaddls.v2i32.v4i16") }, "paddl_u32" => Intrinsic { - inputs: vec![v(u(16), 4)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vpaddlu.v2i32.v4i16") }, "paddl_s64" => Intrinsic { - inputs: vec![v(i(32), 2)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.neon.vpaddls.v1i64.v2i32") }, "paddl_u64" => Intrinsic { - inputs: vec![v(u(32), 2)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.neon.vpaddlu.v1i64.v2i32") }, "paddlq_s16" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vpaddls.v8i16.v16i8") }, "paddlq_u16" => Intrinsic { - inputs: vec![v(u(8), 16)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vpaddlu.v8i16.v16i8") }, "paddlq_s32" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vpaddls.v4i32.v8i16") }, "paddlq_u32" => Intrinsic { - inputs: vec![v(u(16), 8)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vpaddlu.v4i32.v8i16") }, "paddlq_s64" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.neon.vpaddls.v2i64.v4i32") }, "paddlq_u64" => Intrinsic { - inputs: vec![v(u(32), 4)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.neon.vpaddlu.v2i64.v4i32") }, "padal_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(8), 8)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I8x8]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vpadals.v4i16.v4i16") }, "padal_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(8), 8)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U8x8]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vpadalu.v4i16.v4i16") }, "padal_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(16), 4)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I16x4]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vpadals.v2i32.v2i32") }, "padal_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(16), 4)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U16x4]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vpadalu.v2i32.v2i32") }, "padal_s64" => Intrinsic { - inputs: vec![v(i(64), 1), v(i(32), 2)], - output: v(i(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I32x2]; &INPUTS }, + output: &::I64x1, definition: Named("llvm.neon.vpadals.v1i64.v1i64") }, "padal_u64" => Intrinsic { - inputs: vec![v(u(64), 1), v(u(32), 2)], - output: v(u(64), 1), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U32x2]; &INPUTS }, + output: &::U64x1, definition: Named("llvm.neon.vpadalu.v1i64.v1i64") }, "padalq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(8), 16)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I8x16]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vpadals.v8i16.v8i16") }, "padalq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(8), 16)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U8x16]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vpadalu.v8i16.v8i16") }, "padalq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(16), 8)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I16x8]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vpadals.v4i32.v4i32") }, "padalq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(16), 8)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U16x8]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vpadalu.v4i32.v4i32") }, "padalq_s64" => Intrinsic { - inputs: vec![v(i(64), 2), v(i(32), 4)], - output: v(i(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I32x4]; &INPUTS }, + output: &::I64x2, definition: Named("llvm.neon.vpadals.v2i64.v2i64") }, "padalq_u64" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(32), 4)], - output: v(u(64), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32x4]; &INPUTS }, + output: &::U64x2, definition: Named("llvm.neon.vpadalu.v2i64.v2i64") }, "pmax_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vpmaxs.v8i8") }, "pmax_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vpmaxu.v8i8") }, "pmax_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vpmaxs.v4i16") }, "pmax_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vpmaxu.v4i16") }, "pmax_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vpmaxs.v2i32") }, "pmax_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vpmaxu.v2i32") }, "pmax_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.neon.vpmaxf.v2f32") }, "pmin_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vpmins.v8i8") }, "pmin_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vpminu.v8i8") }, "pmin_s16" => Intrinsic { - inputs: vec![v(i(16), 4), v(i(16), 4)], - output: v(i(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, + output: &::I16x4, definition: Named("llvm.neon.vpmins.v4i16") }, "pmin_u16" => Intrinsic { - inputs: vec![v(u(16), 4), v(u(16), 4)], - output: v(u(16), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, + output: &::U16x4, definition: Named("llvm.neon.vpminu.v4i16") }, "pmin_s32" => Intrinsic { - inputs: vec![v(i(32), 2), v(i(32), 2)], - output: v(i(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, + output: &::I32x2, definition: Named("llvm.neon.vpmins.v2i32") }, "pmin_u32" => Intrinsic { - inputs: vec![v(u(32), 2), v(u(32), 2)], - output: v(u(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, + output: &::U32x2, definition: Named("llvm.neon.vpminu.v2i32") }, "pmin_f32" => Intrinsic { - inputs: vec![v(f(32), 2), v(f(32), 2)], - output: v(f(32), 2), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, + output: &::F32x2, definition: Named("llvm.neon.vpminf.v2f32") }, "pminq_s8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, definition: Named("llvm.neon.vpmins.v16i8") }, "pminq_u8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, definition: Named("llvm.neon.vpminu.v16i8") }, "pminq_s16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, definition: Named("llvm.neon.vpmins.v8i16") }, "pminq_u16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, definition: Named("llvm.neon.vpminu.v8i16") }, "pminq_s32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, definition: Named("llvm.neon.vpmins.v4i32") }, "pminq_u32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, definition: Named("llvm.neon.vpminu.v4i32") }, "pminq_f32" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.neon.vpminf.v4f32") }, "tbl1_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(u(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::U8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vtbl1") }, "tbl1_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vtbl1") }, "tbx1_s8" => Intrinsic { - inputs: vec![v(i(8), 8), v(i(8), 8), v(u(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, &::I8x8, &::U8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vtbx1") }, "tbx1_u8" => Intrinsic { - inputs: vec![v(u(8), 8), v(u(8), 8), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, &::U8x8, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vtbx1") }, "tbl2_s8" => Intrinsic { - inputs: vec![agg(true, vec![v(i(8), 8), v(i(8), 8)]), v(u(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vtbl2") }, "tbl2_u8" => Intrinsic { - inputs: vec![agg(true, vec![v(u(8), 8), v(u(8), 8)]), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vtbl2") }, "tbx2_s8" => Intrinsic { - inputs: vec![agg(true, vec![v(i(8), 8), v(i(8), 8)]), v(u(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vtbx2") }, "tbx2_u8" => Intrinsic { - inputs: vec![agg(true, vec![v(u(8), 8), v(u(8), 8)]), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vtbx2") }, "tbl3_s8" => Intrinsic { - inputs: vec![agg(true, vec![v(i(8), 8), v(i(8), 8), v(i(8), 8)]), v(u(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vtbl3") }, "tbl3_u8" => Intrinsic { - inputs: vec![agg(true, vec![v(u(8), 8), v(u(8), 8), v(u(8), 8)]), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vtbl3") }, "tbx3_s8" => Intrinsic { - inputs: vec![v(i(8), 8), agg(true, vec![v(i(8), 8), v(i(8), 8), v(i(8), 8)]), v(u(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vtbx3") }, "tbx3_u8" => Intrinsic { - inputs: vec![v(u(8), 8), agg(true, vec![v(u(8), 8), v(u(8), 8), v(u(8), 8)]), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vtbx3") }, "tbl4_s8" => Intrinsic { - inputs: vec![agg(true, vec![v(i(8), 8), v(i(8), 8), v(i(8), 8), v(i(8), 8)]), v(u(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x8, &::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vtbl4") }, "tbl4_u8" => Intrinsic { - inputs: vec![agg(true, vec![v(u(8), 8), v(u(8), 8), v(u(8), 8), v(u(8), 8)]), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x8, &::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vtbl4") }, "tbx4_s8" => Intrinsic { - inputs: vec![v(i(8), 8), agg(true, vec![v(i(8), 8), v(i(8), 8), v(i(8), 8), v(i(8), 8)]), v(u(8), 8)], - output: v(i(8), 8), + inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x8, &::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::I8x8, definition: Named("llvm.neon.vtbx4") }, "tbx4_u8" => Intrinsic { - inputs: vec![v(u(8), 8), agg(true, vec![v(u(8), 8), v(u(8), 8), v(u(8), 8), v(u(8), 8)]), v(u(8), 8)], - output: v(u(8), 8), + inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x8, &::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, + output: &::U8x8, definition: Named("llvm.neon.vtbx4") }, _ => return None, diff --git a/src/librustc_platform_intrinsics/lib.rs b/src/librustc_platform_intrinsics/lib.rs index e857434682d15..6fe1f0c2b9cc6 100644 --- a/src/librustc_platform_intrinsics/lib.rs +++ b/src/librustc_platform_intrinsics/lib.rs @@ -12,16 +12,13 @@ #![unstable(feature = "rustc_private", issue = "27812")] #![crate_type = "dylib"] #![crate_type = "rlib"] -#![feature(staged_api, rustc_private)] - -extern crate rustc_llvm as llvm; -extern crate rustc; - -use rustc::middle::ty; +#![feature(staged_api)] +#![cfg_attr(not(stage0), deny(warnings))] +#![allow(bad_style)] pub struct Intrinsic { - pub inputs: Vec, - pub output: Type, + pub inputs: &'static [&'static Type], + pub output: &'static Type, pub definition: IntrinsicDef, } @@ -31,47 +28,82 @@ pub enum Type { Void, Integer(/* signed */ bool, u8, /* llvm width */ u8), Float(u8), - Pointer(Box, Option>, /* const */ bool), - Vector(Box, Option>, u8), - Aggregate(bool, Vec), + Pointer(&'static Type, Option<&'static Type>, /* const */ bool), + Vector(&'static Type, Option<&'static Type>, u8), + Aggregate(bool, &'static [&'static Type]), } pub enum IntrinsicDef { Named(&'static str), } -fn i(width: u8) -> Type { Type::Integer(true, width, width) } -fn i_(width: u8, llvm_width: u8) -> Type { Type::Integer(true, width, llvm_width) } -fn u(width: u8) -> Type { Type::Integer(false, width, width) } -#[allow(dead_code)] -fn u_(width: u8, llvm_width: u8) -> Type { Type::Integer(false, width, llvm_width) } -fn f(width: u8) -> Type { Type::Float(width) } -fn v(x: Type, length: u8) -> Type { Type::Vector(Box::new(x), None, length) } -fn v_(x: Type, bitcast: Type, length: u8) -> Type { - Type::Vector(Box::new(x), Some(Box::new(bitcast)), length) -} -fn agg(flatten: bool, types: Vec) -> Type { - Type::Aggregate(flatten, types) -} -fn p(const_: bool, elem: Type, llvm_elem: Option) -> Type { - Type::Pointer(Box::new(elem), llvm_elem.map(Box::new), const_) -} -fn void() -> Type { - Type::Void -} +static I8: Type = Type::Integer(true, 8, 8); +static I16: Type = Type::Integer(true, 16, 16); +static I32: Type = Type::Integer(true, 32, 32); +static I64: Type = Type::Integer(true, 64, 64); +static U8: Type = Type::Integer(false, 8, 8); +static U16: Type = Type::Integer(false, 16, 16); +static U32: Type = Type::Integer(false, 32, 32); +static U64: Type = Type::Integer(false, 64, 64); +static F32: Type = Type::Float(32); +static F64: Type = Type::Float(64); + +static I32_8: Type = Type::Integer(true, 32, 8); + +static I8x8: Type = Type::Vector(&I8, None, 8); +static U8x8: Type = Type::Vector(&U8, None, 8); +static I8x16: Type = Type::Vector(&I8, None, 16); +static U8x16: Type = Type::Vector(&U8, None, 16); +static I8x32: Type = Type::Vector(&I8, None, 32); +static U8x32: Type = Type::Vector(&U8, None, 32); + +static I16x4: Type = Type::Vector(&I16, None, 4); +static U16x4: Type = Type::Vector(&U16, None, 4); +static I16x8: Type = Type::Vector(&I16, None, 8); +static U16x8: Type = Type::Vector(&U16, None, 8); +static I16x16: Type = Type::Vector(&I16, None, 16); +static U16x16: Type = Type::Vector(&U16, None, 16); + +static I32x2: Type = Type::Vector(&I32, None, 2); +static U32x2: Type = Type::Vector(&U32, None, 2); +static I32x4: Type = Type::Vector(&I32, None, 4); +static U32x4: Type = Type::Vector(&U32, None, 4); +static I32x8: Type = Type::Vector(&I32, None, 8); +static U32x8: Type = Type::Vector(&U32, None, 8); + +static I64x1: Type = Type::Vector(&I64, None, 1); +static U64x1: Type = Type::Vector(&U64, None, 1); +static I64x2: Type = Type::Vector(&I64, None, 2); +static U64x2: Type = Type::Vector(&U64, None, 2); +static I64x4: Type = Type::Vector(&I64, None, 4); +static U64x4: Type = Type::Vector(&U64, None, 4); + +static F32x2: Type = Type::Vector(&F32, None, 2); +static F32x4: Type = Type::Vector(&F32, None, 4); +static F32x8: Type = Type::Vector(&F32, None, 8); +static F64x1: Type = Type::Vector(&F64, None, 1); +static F64x2: Type = Type::Vector(&F64, None, 2); +static F64x4: Type = Type::Vector(&F64, None, 4); + +static I32x4_F32: Type = Type::Vector(&I32, Some(&F32), 4); +static I32x8_F32: Type = Type::Vector(&I32, Some(&F32), 8); +static I64x2_F64: Type = Type::Vector(&I64, Some(&F64), 2); +static I64x4_F64: Type = Type::Vector(&I64, Some(&F64), 4); + +static VOID: Type = Type::Void; mod x86; mod arm; mod aarch64; impl Intrinsic { - pub fn find<'tcx>(tcx: &ty::ctxt<'tcx>, name: &str) -> Option { + pub fn find(name: &str) -> Option { if name.starts_with("x86_") { - x86::find(tcx, name) + x86::find(name) } else if name.starts_with("arm_") { - arm::find(tcx, name) + arm::find(name) } else if name.starts_with("aarch64_") { - aarch64::find(tcx, name) + aarch64::find(name) } else { None } diff --git a/src/librustc_platform_intrinsics/x86.rs b/src/librustc_platform_intrinsics/x86.rs index 144fd5f4e761b..91ecad8402fd4 100644 --- a/src/librustc_platform_intrinsics/x86.rs +++ b/src/librustc_platform_intrinsics/x86.rs @@ -13,1100 +13,1339 @@ #![allow(unused_imports)] -use {Intrinsic, i, i_, u, u_, f, v, v_, agg, p, void}; +use {Intrinsic, Type}; use IntrinsicDef::Named; -use rustc::middle::ty; // The default inlining settings trigger a pathological behaviour in // LLVM, which causes makes compilation very slow. See #28273. #[inline(never)] -pub fn find<'tcx>(_tcx: &ty::ctxt<'tcx>, name: &str) -> Option { - if !name.starts_with("x86_mm") { return None } - Some(match &name["x86_mm".len()..] { - "_movemask_ps" => Intrinsic { - inputs: vec![v(f(32), 4)], - output: i(32), - definition: Named("llvm.x86.sse.movmsk.ps") - }, - "_max_ps" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), - definition: Named("llvm.x86.sse.max.ps") - }, - "_min_ps" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), - definition: Named("llvm.x86.sse.min.ps") - }, - "_rsqrt_ps" => Intrinsic { - inputs: vec![v(f(32), 4)], - output: v(f(32), 4), - definition: Named("llvm.x86.sse.rsqrt.ps") - }, - "_rcp_ps" => Intrinsic { - inputs: vec![v(f(32), 4)], - output: v(f(32), 4), - definition: Named("llvm.x86.sse.rcp.ps") - }, - "_sqrt_ps" => Intrinsic { - inputs: vec![v(f(32), 4)], - output: v(f(32), 4), - definition: Named("llvm.sqrt.v4f32") - }, - "_storeu_ps" => Intrinsic { - inputs: vec![p(false, f(32), Some(i(8))), v(f(32), 4)], - output: void(), - definition: Named("llvm.x86.sse.storeu.ps") - }, - "_adds_epi8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), - definition: Named("llvm.x86.sse2.padds.b") - }, - "_adds_epu8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), - definition: Named("llvm.x86.sse2.paddus.b") - }, - "_adds_epi16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), - definition: Named("llvm.x86.sse2.padds.w") - }, - "_adds_epu16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), - definition: Named("llvm.x86.sse2.paddus.w") - }, - "_avg_epu8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), - definition: Named("llvm.x86.sse2.pavg.b") - }, - "_avg_epu16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), - definition: Named("llvm.x86.sse2.pavg.w") - }, - "_lfence" => Intrinsic { - inputs: vec![], - output: void(), - definition: Named("llvm.x86.sse2.lfence") - }, - "_madd_epi16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(32), 4), - definition: Named("llvm.x86.sse2.pmadd.wd") - }, - "_maskmoveu_si128" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16), p(false, u(8), None)], - output: void(), - definition: Named("llvm.x86.sse2.maskmov.dqu") - }, - "_max_epi16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), - definition: Named("llvm.x86.sse2.pmaxs.w") - }, - "_max_epu8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), - definition: Named("llvm.x86.sse2.pmaxu.b") - }, - "_max_pd" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), - definition: Named("llvm.x86.sse2.max.pd") +pub fn find(name: &str) -> Option { + if !name.starts_with("x86") { return None } + Some(match &name["x86".len()..] { + "_mm256_abs_epi8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x32]; &INPUTS }, + output: &::I8x32, + definition: Named("llvm.x86.avx2.pabs.b") }, - "_mfence" => Intrinsic { - inputs: vec![], - output: void(), - definition: Named("llvm.x86.sse2.fence") + "_mm256_abs_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x16]; &INPUTS }, + output: &::I16x16, + definition: Named("llvm.x86.avx2.pabs.w") }, - "_min_epi16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), - definition: Named("llvm.x86.sse2.pmins.w") + "_mm256_abs_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x8]; &INPUTS }, + output: &::I32x8, + definition: Named("llvm.x86.avx2.pabs.d") }, - "_min_epu8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), - definition: Named("llvm.x86.sse2.pminu.b") + "_mm256_adds_epi8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS }, + output: &::I8x32, + definition: Named("llvm.x86.avx2.padds.b") }, - "_min_pd" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), - definition: Named("llvm.x86.sse2.min.pd") + "_mm256_adds_epu8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x32, &::U8x32]; &INPUTS }, + output: &::U8x32, + definition: Named("llvm.x86.avx2.paddus.b") }, - "_movemask_pd" => Intrinsic { - inputs: vec![v(f(64), 2)], - output: i(32), - definition: Named("llvm.x86.sse2.movmsk.pd") + "_mm256_adds_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, + output: &::I16x16, + definition: Named("llvm.x86.avx2.padds.w") }, - "_movemask_epi8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: i(32), - definition: Named("llvm.x86.sse2.pmovmskb.128") + "_mm256_adds_epu16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x16, &::U16x16]; &INPUTS }, + output: &::U16x16, + definition: Named("llvm.x86.avx2.paddus.w") }, - "_mul_epu32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(64), 2), - definition: Named("llvm.x86.sse2.pmulu.dq") + "_mm256_avg_epu8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x32, &::U8x32]; &INPUTS }, + output: &::U8x32, + definition: Named("llvm.x86.avx2.pavg.b") }, - "_mulhi_epi16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), - definition: Named("llvm.x86.sse2.pmulh.w") + "_mm256_avg_epu16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x16, &::U16x16]; &INPUTS }, + output: &::U16x16, + definition: Named("llvm.x86.avx2.pavg.w") }, - "_mulhi_epu16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), - definition: Named("llvm.x86.sse2.pmulhu.w") + "_mm256_hadd_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, + output: &::I16x16, + definition: Named("llvm.x86.avx2.phadd.w") }, - "_packs_epi16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(8), 16), - definition: Named("llvm.x86.sse2.packsswb.128") + "_mm256_hadd_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS }, + output: &::I32x8, + definition: Named("llvm.x86.avx2.phadd.d") }, - "_packs_epi32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(16), 8), - definition: Named("llvm.x86.sse2.packssdw.128") + "_mm256_hadds_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, + output: &::I16x16, + definition: Named("llvm.x86.avx2.phadd.sw") }, - "_packus_epi16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(u(8), 16), - definition: Named("llvm.x86.sse2.packuswb.128") + "_mm256_hsub_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, + output: &::I16x16, + definition: Named("llvm.x86.avx2.phsub.w") }, - "_sad_epu8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(64), 2), - definition: Named("llvm.x86.sse2.psad.bw") + "_mm256_hsub_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS }, + output: &::I32x8, + definition: Named("llvm.x86.avx2.phsub.d") }, - "_sfence" => Intrinsic { - inputs: vec![], - output: void(), - definition: Named("llvm.x86.sse2.sfence") + "_mm256_hsubs_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, + output: &::I16x16, + definition: Named("llvm.x86.avx2.phsub.sw") }, - "_sqrt_pd" => Intrinsic { - inputs: vec![v(f(64), 2)], - output: v(f(64), 2), - definition: Named("llvm.sqrt.v2f64") + "_mm256_madd_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, + output: &::I32x8, + definition: Named("llvm.x86.avx2.pmadd.wd") }, - "_storeu_pd" => Intrinsic { - inputs: vec![p(false, f(64), Some(u(8))), v(f(64), 2)], - output: void(), - definition: Named("llvm.x86.sse2.storeu.pd") + "_mm256_maddubs_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS }, + output: &::I16x16, + definition: Named("llvm.x86.avx2.pmadd.ub.sw") }, - "_storeu_si128" => Intrinsic { - inputs: vec![p(false, v(u(8), 16), Some(u(8))), v(u(8), 16)], - output: void(), - definition: Named("llvm.x86.sse2.storeu.dq") + "_mm_mask_i32gather_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::I32x4, { static PTR: Type = Type::Pointer(&::I32, Some(&::I8), true); &PTR }, &::I32x4, &::I32x4, &::I32_8]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.x86.avx2.gather.d.d") }, - "_subs_epi8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), - definition: Named("llvm.x86.sse2.psubs.b") + "_mm_mask_i32gather_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::F32x4, { static PTR: Type = Type::Pointer(&::F32, Some(&::I8), true); &PTR }, &::I32x4, &::I32x4_F32, &::I32_8]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.x86.avx2.gather.d.ps") }, - "_subs_epu8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16)], - output: v(u(8), 16), - definition: Named("llvm.x86.sse2.psubus.b") + "_mm256_mask_i32gather_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::I32x8, { static PTR: Type = Type::Pointer(&::I32, Some(&::I8), true); &PTR }, &::I32x8, &::I32x8, &::I32_8]; &INPUTS }, + output: &::I32x8, + definition: Named("llvm.x86.avx2.gather.d.d.256") }, - "_subs_epi16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), - definition: Named("llvm.x86.sse2.psubs.w") + "_mm256_mask_i32gather_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::F32x8, { static PTR: Type = Type::Pointer(&::F32, Some(&::I8), true); &PTR }, &::I32x8, &::I32x8_F32, &::I32_8]; &INPUTS }, + output: &::F32x8, + definition: Named("llvm.x86.avx2.gather.d.ps.256") }, - "_subs_epu16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), - definition: Named("llvm.x86.sse2.psubus.w") + "_mm_mask_i32gather_epi64" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::I64x2, { static PTR: Type = Type::Pointer(&::I64, Some(&::I8), true); &PTR }, &::I32x4, &::I64x2, &::I32_8]; &INPUTS }, + output: &::I64x2, + definition: Named("llvm.x86.avx2.gather.d.q") }, - "_addsub_ps" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), - definition: Named("llvm.x86.sse3.addsub.ps") + "_mm_mask_i32gather_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::F64x2, { static PTR: Type = Type::Pointer(&::F64, Some(&::I8), true); &PTR }, &::I32x4, &::I64x2_F64, &::I32_8]; &INPUTS }, + output: &::F64x2, + definition: Named("llvm.x86.avx2.gather.d.pd") }, - "_addsub_pd" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), - definition: Named("llvm.x86.sse3.addsub.pd") + "_mm256_mask_i32gather_epi64" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::I64x4, { static PTR: Type = Type::Pointer(&::I64, Some(&::I8), true); &PTR }, &::I32x4, &::I64x4, &::I32_8]; &INPUTS }, + output: &::I64x4, + definition: Named("llvm.x86.avx2.gather.d.q.256") }, - "_hadd_ps" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), - definition: Named("llvm.x86.sse3.hadd.ps") + "_mm256_mask_i32gather_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::F64x4, { static PTR: Type = Type::Pointer(&::F64, Some(&::I8), true); &PTR }, &::I32x4, &::I64x4_F64, &::I32_8]; &INPUTS }, + output: &::F64x4, + definition: Named("llvm.x86.avx2.gather.d.pd.256") }, - "_hadd_pd" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), - definition: Named("llvm.x86.sse3.hadd.pd") + "_mm_mask_i64gather_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::I32x4, { static PTR: Type = Type::Pointer(&::I32, Some(&::I8), true); &PTR }, &::I64x2, &::I32x4, &::I32_8]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.x86.avx2.gather.q.d") }, - "_hsub_ps" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: v(f(32), 4), - definition: Named("llvm.x86.sse3.hsub.ps") + "_mm_mask_i64gather_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::F32x4, { static PTR: Type = Type::Pointer(&::F32, Some(&::I8), true); &PTR }, &::I64x2, &::I32x4_F32, &::I32_8]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.x86.avx2.gather.q.ps") }, - "_hsub_pd" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: v(f(64), 2), - definition: Named("llvm.x86.sse3.hsub.pd") + "_mm256_mask_i64gather_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::I32x4, { static PTR: Type = Type::Pointer(&::I32, Some(&::I8), true); &PTR }, &::I64x4, &::I32x4, &::I32_8]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.x86.avx2.gather.q.d") }, - "_lddqu_si128" => Intrinsic { - inputs: vec![p(true, v(u(8), 16), Some(i(8)))], - output: v(u(8), 16), - definition: Named("llvm.x86.sse3.ldu.dq") + "_mm256_mask_i64gather_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::F32x4, { static PTR: Type = Type::Pointer(&::F32, Some(&::I8), true); &PTR }, &::I64x4, &::I32x4_F32, &::I32_8]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.x86.avx2.gather.q.ps") }, - "_abs_epi8" => Intrinsic { - inputs: vec![v(i(8), 16)], - output: v(i(8), 16), - definition: Named("llvm.x86.ssse3.pabs.b.128") + "_mm_mask_i64gather_epi64" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::I64x2, { static PTR: Type = Type::Pointer(&::I64, Some(&::I8), true); &PTR }, &::I64x2, &::I64x2, &::I32_8]; &INPUTS }, + output: &::I64x2, + definition: Named("llvm.x86.avx2.gather.q.q") }, - "_abs_epi16" => Intrinsic { - inputs: vec![v(i(16), 8)], - output: v(i(16), 8), - definition: Named("llvm.x86.ssse3.pabs.w.128") + "_mm_mask_i64gather_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::F64x2, { static PTR: Type = Type::Pointer(&::F64, Some(&::I8), true); &PTR }, &::I64x2, &::I64x2_F64, &::I32_8]; &INPUTS }, + output: &::F64x2, + definition: Named("llvm.x86.avx2.gather.q.pd") }, - "_abs_epi32" => Intrinsic { - inputs: vec![v(i(32), 4)], - output: v(i(32), 4), - definition: Named("llvm.x86.ssse3.pabs.d.128") + "_mm256_mask_i64gather_epi64" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::I64x4, { static PTR: Type = Type::Pointer(&::I64, Some(&::I8), true); &PTR }, &::I64x4, &::I64x4, &::I32_8]; &INPUTS }, + output: &::I64x4, + definition: Named("llvm.x86.avx2.gather.q.q.256") }, - "_hadd_epi16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), - definition: Named("llvm.x86.ssse3.phadd.w.128") + "_mm256_mask_i64gather_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::F64x4, { static PTR: Type = Type::Pointer(&::F64, Some(&::I8), true); &PTR }, &::I64x4, &::I64x4_F64, &::I32_8]; &INPUTS }, + output: &::F64x4, + definition: Named("llvm.x86.avx2.gather.q.pd.256") }, - "_hadd_epi32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), - definition: Named("llvm.x86.ssse3.phadd.d.128") + "_mm_maskload_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::I32x4, Some(&::I8), true); &PTR }, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.x86.avx2.maskload.d") }, - "_hadds_epi16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), - definition: Named("llvm.x86.ssse3.phadd.sw.128") + "_mm_maskload_epi64" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::I64x2, Some(&::I8), true); &PTR }, &::I64x2]; &INPUTS }, + output: &::I64x2, + definition: Named("llvm.x86.avx2.maskload.q") }, - "_hsub_epi16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), - definition: Named("llvm.x86.ssse3.phsub.w.128") + "_mm256_maskload_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::I32x8, Some(&::I8), true); &PTR }, &::I32x8]; &INPUTS }, + output: &::I32x8, + definition: Named("llvm.x86.avx2.maskload.d.256") }, - "_hsub_epi32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), - definition: Named("llvm.x86.ssse3.phsub.d.128") + "_mm256_maskload_epi64" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::I64x4, Some(&::I8), true); &PTR }, &::I64x4]; &INPUTS }, + output: &::I64x4, + definition: Named("llvm.x86.avx2.maskload.q.256") }, - "_hsubs_epi16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), - definition: Named("llvm.x86.ssse3.phsub.sw.128") + "_mm_maskstore_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I8), false); &PTR }, &::I32x4, &::I32x4]; &INPUTS }, + output: &::VOID, + definition: Named("llvm.x86.avx2.maskstore.d") }, - "_maddubs_epi16" => Intrinsic { - inputs: vec![v(u(8), 16), v(i(8), 16)], - output: v(i(16), 8), - definition: Named("llvm.x86.ssse3.pmadd.ub.sw.128") + "_mm_maskstore_epi64" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I8), false); &PTR }, &::I64x2, &::I64x2]; &INPUTS }, + output: &::VOID, + definition: Named("llvm.x86.avx2.maskstore.q") }, - "_mulhrs_epi16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), - definition: Named("llvm.x86.ssse3.pmul.hr.sw.128") + "_mm256_maskstore_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I8), false); &PTR }, &::I32x8, &::I32x8]; &INPUTS }, + output: &::VOID, + definition: Named("llvm.x86.avx2.maskstore.d.256") }, - "_shuffle_epi8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), - definition: Named("llvm.x86.ssse3.pshuf.b.128") + "_mm256_maskstore_epi64" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I8), false); &PTR }, &::I64x4, &::I64x4]; &INPUTS }, + output: &::VOID, + definition: Named("llvm.x86.avx2.maskstore.q.256") }, - "_sign_epi8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), - definition: Named("llvm.x86.ssse3.psign.b.128") + "_mm256_max_epi8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS }, + output: &::I8x32, + definition: Named("llvm.x86.avx2.pmaxs.b") }, - "_sign_epi16" => Intrinsic { - inputs: vec![v(i(16), 8), v(i(16), 8)], - output: v(i(16), 8), - definition: Named("llvm.x86.ssse3.psign.w.128") + "_mm256_max_epu8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x32, &::U8x32]; &INPUTS }, + output: &::U8x32, + definition: Named("llvm.x86.avx2.pmaxu.b") }, - "_sign_epi32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), - definition: Named("llvm.x86.ssse3.psign.d.128") + "_mm256_max_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, + output: &::I16x16, + definition: Named("llvm.x86.avx2.pmaxs.w") }, - "_dp_ps" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4), i_(32, 8)], - output: v(f(32), 4), - definition: Named("llvm.x86.sse41.dpps") + "_mm256_max_epu16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x16, &::U16x16]; &INPUTS }, + output: &::U16x16, + definition: Named("llvm.x86.avx2.pmaxu.w") }, - "_dp_pd" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2), i_(32, 8)], - output: v(f(64), 2), - definition: Named("llvm.x86.sse41.dppd") + "_mm256_max_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS }, + output: &::I32x8, + definition: Named("llvm.x86.avx2.pmaxs.d") }, - "_max_epi8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), - definition: Named("llvm.x86.sse41.pmaxsb") + "_mm256_max_epu32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x8, &::U32x8]; &INPUTS }, + output: &::U32x8, + definition: Named("llvm.x86.avx2.pmaxu.d") }, - "_max_epu16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), - definition: Named("llvm.x86.sse41.pmaxuw") + "_mm256_min_epi8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS }, + output: &::I8x32, + definition: Named("llvm.x86.avx2.pmins.b") }, - "_max_epi32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), - definition: Named("llvm.x86.sse41.pmaxsd") + "_mm256_min_epu8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x32, &::U8x32]; &INPUTS }, + output: &::U8x32, + definition: Named("llvm.x86.avx2.pminu.b") }, - "_max_epu32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), - definition: Named("llvm.x86.sse41.pmaxud") + "_mm256_min_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, + output: &::I16x16, + definition: Named("llvm.x86.avx2.pmins.w") }, - "_min_epi8" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16)], - output: v(i(8), 16), - definition: Named("llvm.x86.sse41.pminsb") + "_mm256_min_epu16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x16, &::U16x16]; &INPUTS }, + output: &::U16x16, + definition: Named("llvm.x86.avx2.pminu.w") }, - "_min_epu16" => Intrinsic { - inputs: vec![v(u(16), 8), v(u(16), 8)], - output: v(u(16), 8), - definition: Named("llvm.x86.sse41.pminuw") + "_mm256_min_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS }, + output: &::I32x8, + definition: Named("llvm.x86.avx2.pmins.d") }, - "_min_epi32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(32), 4), - definition: Named("llvm.x86.sse41.pminsd") + "_mm256_min_epu32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x8, &::U32x8]; &INPUTS }, + output: &::U32x8, + definition: Named("llvm.x86.avx2.pminu.d") }, - "_min_epu32" => Intrinsic { - inputs: vec![v(u(32), 4), v(u(32), 4)], - output: v(u(32), 4), - definition: Named("llvm.x86.sse41.pminud") + "_mm256_movemask_epi8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x32]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.avx2.pmovmskb") }, - "_minpos_epu16" => Intrinsic { - inputs: vec![v(u(16), 8)], - output: v(u(16), 8), - definition: Named("llvm.x86.sse41.phminposuw") + "_mm256_mpsadbw_epu8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::U8x32, &::U8x32, &::I32_8]; &INPUTS }, + output: &::U16x16, + definition: Named("llvm.x86.avx2.mpsadbw") }, - "_mpsadbw_epu8" => Intrinsic { - inputs: vec![v(u(8), 16), v(u(8), 16), i_(32, 8)], - output: v(u(16), 8), - definition: Named("llvm.x86.sse41.mpsadbw") + "_mm256_mul_epi64" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS }, + output: &::I64x4, + definition: Named("llvm.x86.avx2.pmulq.dq") }, - "_mul_epi32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(i(64), 2), - definition: Named("llvm.x86.sse41.pmuldq") + "_mm256_mul_epu64" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x8, &::U32x8]; &INPUTS }, + output: &::U64x4, + definition: Named("llvm.x86.avx2.pmulq.dq") }, - "_packus_epi32" => Intrinsic { - inputs: vec![v(i(32), 4), v(i(32), 4)], - output: v(u(16), 8), - definition: Named("llvm.x86.sse41.packusdw") + "_mm256_mulhi_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, + output: &::I16x16, + definition: Named("llvm.x86.avx2.pmulhw.w") }, - "_testc_si128" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: i(32), - definition: Named("llvm.x86.sse41.ptestc") + "_mm256_mulhi_epu16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x16, &::U16x16]; &INPUTS }, + output: &::U16x16, + definition: Named("llvm.x86.avx2.pmulhw.w") }, - "_testnzc_si128" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: i(32), - definition: Named("llvm.x86.sse41.ptestnzc") + "_mm256_mulhrs_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, + output: &::I16x16, + definition: Named("llvm.x86.avx2.pmul.hr.sw") }, - "_testz_si128" => Intrinsic { - inputs: vec![v(u(64), 2), v(u(64), 2)], - output: i(32), - definition: Named("llvm.x86.sse41.ptestz") + "_mm256_packs_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, + output: &::I8x32, + definition: Named("llvm.x86.avx2.packsswb") }, - "_cmpestra" => Intrinsic { - inputs: vec![v(i(8), 16), i(32), v(i(8), 16), i(32), i_(32, 8)], - output: i(32), - definition: Named("llvm.x86.sse42.pcmpestria128") + "_mm256_packus_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, + output: &::U8x32, + definition: Named("llvm.x86.avx2.packuswb") }, - "_cmpestrc" => Intrinsic { - inputs: vec![v(i(8), 16), i(32), v(i(8), 16), i(32), i_(32, 8)], - output: i(32), - definition: Named("llvm.x86.sse42.pcmpestric128") + "_mm256_packs_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS }, + output: &::I16x16, + definition: Named("llvm.x86.avx2.packssdw") }, - "_cmpestri" => Intrinsic { - inputs: vec![v(i(8), 16), i(32), v(i(8), 16), i(32), i_(32, 8)], - output: i(32), - definition: Named("llvm.x86.sse42.pcmpestri128") + "_mm256_packus_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS }, + output: &::U16x16, + definition: Named("llvm.x86.avx2.packusdw") }, - "_cmpestrm" => Intrinsic { - inputs: vec![v(i(8), 16), i(32), v(i(8), 16), i(32), i_(32, 8)], - output: v(i(8), 16), - definition: Named("llvm.x86.sse42.pcmpestrm128") + "_mm256_permutevar8x32_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS }, + output: &::I32x8, + definition: Named("llvm.x86.avx2.permd") }, - "_cmpestro" => Intrinsic { - inputs: vec![v(i(8), 16), i(32), v(i(8), 16), i(32), i_(32, 8)], - output: i(32), - definition: Named("llvm.x86.sse42.pcmpestrio128") + "_mm256_permutevar8x32_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::I32x8]; &INPUTS }, + output: &::F32x8, + definition: Named("llvm.x86.avx2.permps") }, - "_cmpestrs" => Intrinsic { - inputs: vec![v(i(8), 16), i(32), v(i(8), 16), i(32), i_(32, 8)], - output: i(32), - definition: Named("llvm.x86.sse42.pcmpestris128") + "_mm256_sad_epu8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x32, &::U8x32]; &INPUTS }, + output: &::U8x32, + definition: Named("llvm.x86.avx2.psad.bw") }, - "_cmpestrz" => Intrinsic { - inputs: vec![v(i(8), 16), i(32), v(i(8), 16), i(32), i_(32, 8)], - output: i(32), - definition: Named("llvm.x86.sse42.pcmpestriz128") + "_mm256_shuffle_epi8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS }, + output: &::I8x32, + definition: Named("llvm.x86.avx2.pshuf.b") }, - "_cmpistra" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16), i_(32, 8)], - output: i(32), - definition: Named("llvm.x86.sse42.pcmpistria128") + "_mm256_sign_epi8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS }, + output: &::I8x32, + definition: Named("llvm.x86.avx2.psign.b") }, - "_cmpistrc" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16), i_(32, 8)], - output: i(32), - definition: Named("llvm.x86.sse42.pcmpistric128") + "_mm256_sign_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, + output: &::I16x16, + definition: Named("llvm.x86.avx2.psign.w") }, - "_cmpistri" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16), i_(32, 8)], - output: i(32), - definition: Named("llvm.x86.sse42.pcmpistri128") + "_mm256_sign_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS }, + output: &::I32x8, + definition: Named("llvm.x86.avx2.psign.d") }, - "_cmpistrm" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16), i_(32, 8)], - output: v(i(8), 16), - definition: Named("llvm.x86.sse42.pcmpistrm128") + "_mm256_subs_epi8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS }, + output: &::I8x32, + definition: Named("llvm.x86.avx2.psubs.b") }, - "_cmpistro" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16), i_(32, 8)], - output: i(32), - definition: Named("llvm.x86.sse42.pcmpistrio128") + "_mm256_subs_epu8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x32, &::U8x32]; &INPUTS }, + output: &::U8x32, + definition: Named("llvm.x86.avx2.psubus.b") }, - "_cmpistrs" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16), i_(32, 8)], - output: i(32), - definition: Named("llvm.x86.sse42.pcmpistris128") + "_mm256_subs_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, + output: &::I16x16, + definition: Named("llvm.x86.avx2.psubs.w") }, - "_cmpistrz" => Intrinsic { - inputs: vec![v(i(8), 16), v(i(8), 16), i_(32, 8)], - output: i(32), - definition: Named("llvm.x86.sse42.pcmpistriz128") + "_mm256_subs_epu16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x16, &::U16x16]; &INPUTS }, + output: &::U16x16, + definition: Named("llvm.x86.avx2.psubus.w") }, - "256_addsub_ps" => Intrinsic { - inputs: vec![v(f(32), 8), v(f(32), 8)], - output: v(f(32), 8), + "_mm256_addsub_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS }, + output: &::F32x8, definition: Named("llvm.x86.avx.addsub.ps.256") }, - "256_addsub_pd" => Intrinsic { - inputs: vec![v(f(64), 4), v(f(64), 4)], - output: v(f(64), 4), + "_mm256_addsub_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS }, + output: &::F64x4, definition: Named("llvm.x86.avx.addsub.pd.256") }, - "256_dp_ps" => Intrinsic { - inputs: vec![v(f(32), 8), v(f(32), 8), i_(32, 8)], - output: v(f(32), 8), + "_mm256_blendv_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS }, + output: &::F32x8, + definition: Named("llvm.x86.avx.blendv.ps.256") + }, + "_mm256_blendv_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS }, + output: &::F64x4, + definition: Named("llvm.x86.avx.blendv.pd.256") + }, + "_mm256_broadcast_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS }, + output: &::F32x8, + definition: Named("llvm.x86.avx.vbroadcastf128.ps.256") + }, + "_mm256_broadcast_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS }, + output: &::F64x4, + definition: Named("llvm.x86.avx.vbroadcastf128.pd.256") + }, + "_mm256_cmp_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::I8]; &INPUTS }, + output: &::F32x8, + definition: Named("llvm.x86.avx.cmp.ps.256") + }, + "_mm256_cmp_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::I8]; &INPUTS }, + output: &::F64x4, + definition: Named("llvm.x86.avx.cmp.pd.256") + }, + "_mm256_cvtepi32_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::F64x4, + definition: Named("llvm.x86.avx.cvtdq2.pd.256") + }, + "_mm256_cvtepi32_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x8]; &INPUTS }, + output: &::F32x8, + definition: Named("llvm.x86.avx.cvtdq2.ps.256") + }, + "_mm256_cvtpd_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.x86.avx.cvt.pd2dq.256") + }, + "_mm256_cvtpd_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.x86.avx.cvt.pd2.ps.256") + }, + "_mm256_cvtps_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x8]; &INPUTS }, + output: &::I32x8, + definition: Named("llvm.x86.avx.cvt.ps2dq.256") + }, + "_mm256_cvtps_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F64x4, + definition: Named("llvm.x86.avx.cvt.ps2.pd.256") + }, + "_mm256_cvttpd_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.x86.avx.cvtt.pd2dq.256") + }, + "_mm256_cvttps_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x8]; &INPUTS }, + output: &::I32x8, + definition: Named("llvm.x86.avx.cvtt.ps2dq.256") + }, + "_mm256_dp_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::I32_8]; &INPUTS }, + output: &::F32x8, definition: Named("llvm.x86.avx.dp.ps.256") }, - "256_hadd_ps" => Intrinsic { - inputs: vec![v(f(32), 8), v(f(32), 8)], - output: v(f(32), 8), + "_mm256_hadd_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS }, + output: &::F32x8, definition: Named("llvm.x86.avx.hadd.ps.256") }, - "256_hadd_pd" => Intrinsic { - inputs: vec![v(f(64), 4), v(f(64), 4)], - output: v(f(64), 4), + "_mm256_hadd_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS }, + output: &::F64x4, definition: Named("llvm.x86.avx.hadd.pd.256") }, - "256_hsub_ps" => Intrinsic { - inputs: vec![v(f(32), 8), v(f(32), 8)], - output: v(f(32), 8), + "_mm256_hsub_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS }, + output: &::F32x8, definition: Named("llvm.x86.avx.hsub.ps.256") }, - "256_hsub_pd" => Intrinsic { - inputs: vec![v(f(64), 4), v(f(64), 4)], - output: v(f(64), 4), + "_mm256_hsub_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS }, + output: &::F64x4, definition: Named("llvm.x86.avx.hsub.pd.256") }, - "256_max_ps" => Intrinsic { - inputs: vec![v(f(32), 8), v(f(32), 8)], - output: v(f(32), 8), + "_mm256_max_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS }, + output: &::F32x8, definition: Named("llvm.x86.avx.max.ps.256") }, - "256_max_pd" => Intrinsic { - inputs: vec![v(f(64), 4), v(f(64), 4)], - output: v(f(64), 4), + "_mm256_max_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS }, + output: &::F64x4, definition: Named("llvm.x86.avx.max.pd.256") }, - "_maskload_ps" => Intrinsic { - inputs: vec![p(true, f(32), Some(i(8))), v_(i(32), f(32), 4)], - output: v(f(32), 4), + "_mm_maskload_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::I8), true); &PTR }, &::I32x4_F32]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.x86.avx.maskload.ps") }, - "_maskload_pd" => Intrinsic { - inputs: vec![p(true, f(64), Some(i(8))), v_(i(64), f(64), 2)], - output: v(f(64), 2), + "_mm_maskload_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::I8), true); &PTR }, &::I64x2_F64]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.x86.avx.maskload.pd") }, - "256_maskload_ps" => Intrinsic { - inputs: vec![p(true, f(32), Some(i(8))), v_(i(32), f(32), 8)], - output: v(f(32), 8), + "_mm256_maskload_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::I8), true); &PTR }, &::I32x8_F32]; &INPUTS }, + output: &::F32x8, definition: Named("llvm.x86.avx.maskload.ps.256") }, - "256_maskload_pd" => Intrinsic { - inputs: vec![p(true, f(64), Some(i(8))), v_(i(64), f(64), 4)], - output: v(f(64), 4), + "_mm256_maskload_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::I8), true); &PTR }, &::I64x4_F64]; &INPUTS }, + output: &::F64x4, definition: Named("llvm.x86.avx.maskload.pd.256") }, - "_maskstore_ps" => Intrinsic { - inputs: vec![p(false, f(32), Some(i(8))), v_(i(32), f(32), 4), v(f(32), 4)], - output: void(), + "_mm_maskstore_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::I8), false); &PTR }, &::I32x4_F32, &::F32x4]; &INPUTS }, + output: &::VOID, definition: Named("llvm.x86.avx.maskstore.ps") }, - "_maskstore_pd" => Intrinsic { - inputs: vec![p(false, f(64), Some(i(8))), v_(i(64), f(64), 2), v(f(64), 2)], - output: void(), + "_mm_maskstore_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::I8), false); &PTR }, &::I64x2_F64, &::F64x2]; &INPUTS }, + output: &::VOID, definition: Named("llvm.x86.avx.maskstore.pd") }, - "256_maskstore_ps" => Intrinsic { - inputs: vec![p(false, f(32), Some(i(8))), v_(i(32), f(32), 8), v(f(32), 8)], - output: void(), + "_mm256_maskstore_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::I8), false); &PTR }, &::I32x8_F32, &::F32x8]; &INPUTS }, + output: &::VOID, definition: Named("llvm.x86.avx.maskstore.ps.256") }, - "256_maskstore_pd" => Intrinsic { - inputs: vec![p(false, f(64), Some(i(8))), v_(i(64), f(64), 4), v(f(64), 4)], - output: void(), + "_mm256_maskstore_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::I8), false); &PTR }, &::I64x4_F64, &::F64x4]; &INPUTS }, + output: &::VOID, definition: Named("llvm.x86.avx.maskstore.pd.256") }, - "256_min_ps" => Intrinsic { - inputs: vec![v(f(32), 8), v(f(32), 8)], - output: v(f(32), 8), + "_mm256_min_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS }, + output: &::F32x8, definition: Named("llvm.x86.avx.min.ps.256") }, - "256_min_pd" => Intrinsic { - inputs: vec![v(f(64), 4), v(f(64), 4)], - output: v(f(64), 4), + "_mm256_min_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS }, + output: &::F64x4, definition: Named("llvm.x86.avx.min.pd.256") }, - "256_movemask_ps" => Intrinsic { - inputs: vec![v(f(32), 8)], - output: i(32), + "_mm256_movemask_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x8]; &INPUTS }, + output: &::I32, definition: Named("llvm.x86.avx.movmsk.ps.256") }, - "256_movemask_pd" => Intrinsic { - inputs: vec![v(f(64), 4)], - output: i(32), + "_mm256_movemask_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x4]; &INPUTS }, + output: &::I32, definition: Named("llvm.x86.avx.movmsk.pd.256") }, - "_permutevar_ps" => Intrinsic { - inputs: vec![v(f(32), 4), v(i(32), 4)], - output: v(f(32), 4), + "_mm_permutevar_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::I32x4]; &INPUTS }, + output: &::F32x4, definition: Named("llvm.x86.avx.vpermilvar.ps") }, - "_permutevar_pd" => Intrinsic { - inputs: vec![v(f(64), 2), v(i(64), 2)], - output: v(f(64), 2), + "_mm_permutevar_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::I64x2]; &INPUTS }, + output: &::F64x2, definition: Named("llvm.x86.avx.vpermilvar.pd") }, - "256_permutevar_ps" => Intrinsic { - inputs: vec![v(f(32), 8), v(i(32), 8)], - output: v(f(32), 8), + "_mm256_permutevar_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::I32x8]; &INPUTS }, + output: &::F32x8, definition: Named("llvm.x86.avx.vpermilvar.ps.256") }, - "256_permutevar_pd" => Intrinsic { - inputs: vec![v(f(64), 4), v(i(64), 4)], - output: v(f(64), 4), + "_mm256_permutevar_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::I64x4]; &INPUTS }, + output: &::F64x4, definition: Named("llvm.x86.avx.vpermilvar.pd.256") }, - "256_rcp_ps" => Intrinsic { - inputs: vec![v(f(32), 8)], - output: v(f(32), 8), + "_mm256_rcp_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x8]; &INPUTS }, + output: &::F32x8, definition: Named("llvm.x86.avx.rcp.ps.256") }, - "256_rsqrt_ps" => Intrinsic { - inputs: vec![v(f(32), 8)], - output: v(f(32), 8), + "_mm256_rsqrt_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x8]; &INPUTS }, + output: &::F32x8, definition: Named("llvm.x86.avx.rsqrt.ps.256") }, - "256_storeu_ps" => Intrinsic { - inputs: vec![p(false, v(f(32), 8), Some(u(8))), v(f(32), 8)], - output: void(), + "_mm256_storeu_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F32x8, Some(&::U8), false); &PTR }, &::F32x8]; &INPUTS }, + output: &::VOID, definition: Named("llvm.x86.avx.storeu.ps.256") }, - "256_storeu_pd" => Intrinsic { - inputs: vec![p(false, v(f(64), 4), Some(u(8))), v(f(64), 4)], - output: void(), + "_mm256_storeu_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F64x4, Some(&::U8), false); &PTR }, &::F64x4]; &INPUTS }, + output: &::VOID, definition: Named("llvm.x86.avx.storeu.ps.256") }, - "256_storeu_si256" => Intrinsic { - inputs: vec![p(false, v(u(8), 32), Some(u(8))), v(u(8), 32)], - output: void(), + "_mm256_storeu_si256" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::U8x32, Some(&::U8), false); &PTR }, &::U8x32]; &INPUTS }, + output: &::VOID, definition: Named("llvm.x86.avx.storeu.dq.256") }, - "256_sqrt_ps" => Intrinsic { - inputs: vec![v(f(32), 8)], - output: v(f(32), 8), + "_mm256_sqrt_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x8]; &INPUTS }, + output: &::F32x8, definition: Named("llvm.sqrt.v8f32") }, - "256_sqrt_pd" => Intrinsic { - inputs: vec![v(f(64), 4)], - output: v(f(64), 4), + "_mm256_sqrt_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x4]; &INPUTS }, + output: &::F64x4, definition: Named("llvm.sqrt.v4f64") }, - "_testc_ps" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: i(32), + "_mm_testc_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::I32, definition: Named("llvm.x86.avx.vtestc.ps") }, - "256_testc_ps" => Intrinsic { - inputs: vec![v(f(32), 8), v(f(32), 8)], - output: i(32), + "_mm256_testc_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS }, + output: &::I32, definition: Named("llvm.x86.avx.vtestc.ps.256") }, - "_testc_pd" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: i(32), + "_mm_testc_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::I32, definition: Named("llvm.x86.avx.vtestc.pd") }, - "256_testc_pd" => Intrinsic { - inputs: vec![v(f(64), 4), v(f(64), 4)], - output: i(32), + "_mm256_testc_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS }, + output: &::I32, definition: Named("llvm.x86.avx.vtestc.pd.256") }, - "256_testc_si256" => Intrinsic { - inputs: vec![v(u(64), 4), v(u(64), 4)], - output: i(32), + "_mm256_testc_si256" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x4, &::U64x4]; &INPUTS }, + output: &::I32, definition: Named("llvm.x86.avx.ptestc.256") }, - "_testnzc_ps" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: i(32), + "_mm_testnzc_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::I32, definition: Named("llvm.x86.avx.vtestnzc.ps") }, - "256_testnzc_ps" => Intrinsic { - inputs: vec![v(f(32), 8), v(f(32), 8)], - output: i(32), + "_mm256_testnzc_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS }, + output: &::I32, definition: Named("llvm.x86.avx.vtestnzc.ps.256") }, - "_testnzc_pd" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: i(32), + "_mm_testnzc_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::I32, definition: Named("llvm.x86.avx.vtestnzc.pd") }, - "256_testnzc_pd" => Intrinsic { - inputs: vec![v(f(64), 4), v(f(64), 4)], - output: i(32), + "_mm256_testnzc_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS }, + output: &::I32, definition: Named("llvm.x86.avx.vtestnzc.pd.256") }, - "256_testnzc_si256" => Intrinsic { - inputs: vec![v(u(64), 4), v(u(64), 4)], - output: i(32), + "_mm256_testnzc_si256" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x4, &::U64x4]; &INPUTS }, + output: &::I32, definition: Named("llvm.x86.avx.ptestnzc.256") }, - "_testz_ps" => Intrinsic { - inputs: vec![v(f(32), 4), v(f(32), 4)], - output: i(32), + "_mm_testz_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::I32, definition: Named("llvm.x86.avx.vtestz.ps") }, - "256_testz_ps" => Intrinsic { - inputs: vec![v(f(32), 8), v(f(32), 8)], - output: i(32), + "_mm256_testz_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS }, + output: &::I32, definition: Named("llvm.x86.avx.vtestz.ps.256") }, - "_testz_pd" => Intrinsic { - inputs: vec![v(f(64), 2), v(f(64), 2)], - output: i(32), + "_mm_testz_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::I32, definition: Named("llvm.x86.avx.vtestz.pd") }, - "256_testz_pd" => Intrinsic { - inputs: vec![v(f(64), 4), v(f(64), 4)], - output: i(32), + "_mm256_testz_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS }, + output: &::I32, definition: Named("llvm.x86.avx.vtestz.pd.256") }, - "256_testz_si256" => Intrinsic { - inputs: vec![v(u(64), 4), v(u(64), 4)], - output: i(32), + "_mm256_testz_si256" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x4, &::U64x4]; &INPUTS }, + output: &::I32, definition: Named("llvm.x86.avx.ptestz.256") }, - "256_zeroall" => Intrinsic { - inputs: vec![], - output: void(), + "_mm256_zeroall" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, + output: &::VOID, definition: Named("llvm.x86.avx.vzeroall") }, - "256_zeroupper" => Intrinsic { - inputs: vec![], - output: void(), + "_mm256_zeroupper" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, + output: &::VOID, definition: Named("llvm.x86.avx.vzeroupper") }, - "256_abs_epi8" => Intrinsic { - inputs: vec![v(i(8), 32)], - output: v(i(8), 32), - definition: Named("llvm.x86.avx2.pabs.b") + "_bmi2_bzhi_32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U32, &::U32]; &INPUTS }, + output: &::U32, + definition: Named("llvm.x86.bmi.bzhi.32") + }, + "_bmi2_bzhi_64" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U64, &::U64]; &INPUTS }, + output: &::U64, + definition: Named("llvm.x86.bmi.bzhi.64") + }, + "_bmi2_pdep_32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U32, &::U32]; &INPUTS }, + output: &::U32, + definition: Named("llvm.x86.bmi.pdep.32") + }, + "_bmi2_pdep_64" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U64, &::U64]; &INPUTS }, + output: &::U64, + definition: Named("llvm.x86.bmi.pdep.64") + }, + "_bmi2_pext_32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U32, &::U32]; &INPUTS }, + output: &::U32, + definition: Named("llvm.x86.bmi.pext.32") + }, + "_bmi2_pext_64" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U64, &::U64]; &INPUTS }, + output: &::U64, + definition: Named("llvm.x86.bmi.pext.64") + }, + "_bmi_bextr_32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U32, &::U32]; &INPUTS }, + output: &::U32, + definition: Named("llvm.x86.bmi.bextr.32") + }, + "_bmi_bextr_64" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U64, &::U64]; &INPUTS }, + output: &::U64, + definition: Named("llvm.x86.bmi.bextr.64") + }, + "_mm_fmadd_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.x86.fma.vfmadd.ps") + }, + "_mm_fmadd_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, + definition: Named("llvm.x86.fma.vfmadd.pd") + }, + "_mm256_fmadd_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS }, + output: &::F32x8, + definition: Named("llvm.x86.fma.vfmadd.ps.256") + }, + "_mm256_fmadd_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS }, + output: &::F64x4, + definition: Named("llvm.x86.fma.vfmadd.pd.256") + }, + "_mm_fmaddsub_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.x86.fma.vfmaddsub.ps") + }, + "_mm_fmaddsub_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, + definition: Named("llvm.x86.fma.vfmaddsub.pd") + }, + "_mm256_fmaddsub_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS }, + output: &::F32x8, + definition: Named("llvm.x86.fma.vfmaddsub.ps.256") + }, + "_mm256_fmaddsub_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS }, + output: &::F64x4, + definition: Named("llvm.x86.fma.vfmaddsub.pd.256") + }, + "_mm_fmsub_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.x86.fma.vfmsub.ps") + }, + "_mm_fmsub_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, + definition: Named("llvm.x86.fma.vfmsub.pd") + }, + "_mm256_fmsub_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS }, + output: &::F32x8, + definition: Named("llvm.x86.fma.vfmsub.ps.256") + }, + "_mm256_fmsub_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS }, + output: &::F64x4, + definition: Named("llvm.x86.fma.vfmsub.pd.256") + }, + "_mm_fmsubadd_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.x86.fma.vfmsubadd.ps") + }, + "_mm_fmsubadd_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, + definition: Named("llvm.x86.fma.vfmsubadd.pd") + }, + "_mm256_fmsubadd_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS }, + output: &::F32x8, + definition: Named("llvm.x86.fma.vfmsubadd.ps.256") + }, + "_mm256_fmsubadd_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS }, + output: &::F64x4, + definition: Named("llvm.x86.fma.vfmsubadd.pd.256") + }, + "_mm_fnmadd_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.x86.fma.vfnmadd.ps") + }, + "_mm_fnmadd_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, + definition: Named("llvm.x86.fma.vfnmadd.pd") + }, + "_mm256_fnmadd_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS }, + output: &::F32x8, + definition: Named("llvm.x86.fma.vfnmadd.ps.256") + }, + "_mm256_fnmadd_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS }, + output: &::F64x4, + definition: Named("llvm.x86.fma.vfnmadd.pd.256") + }, + "_mm_fnmsub_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.x86.fma.vfnmsub.ps") + }, + "_mm_fnmsub_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, + definition: Named("llvm.x86.fma.vfnmsub.pd") + }, + "_mm256_fnmsub_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS }, + output: &::F32x8, + definition: Named("llvm.x86.fma.vfnmsub.ps.256") + }, + "_mm256_fnmsub_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS }, + output: &::F64x4, + definition: Named("llvm.x86.fma.vfnmsub.pd.256") + }, + "_mm_adds_epi8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.x86.sse2.padds.b") }, - "256_abs_epi16" => Intrinsic { - inputs: vec![v(i(16), 16)], - output: v(i(16), 16), - definition: Named("llvm.x86.avx2.pabs.w") + "_mm_adds_epu8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, + definition: Named("llvm.x86.sse2.paddus.b") }, - "256_abs_epi32" => Intrinsic { - inputs: vec![v(i(32), 8)], - output: v(i(32), 8), - definition: Named("llvm.x86.avx2.pabs.d") + "_mm_adds_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.x86.sse2.padds.w") }, - "256_adds_epi8" => Intrinsic { - inputs: vec![v(i(8), 32), v(i(8), 32)], - output: v(i(8), 32), - definition: Named("llvm.x86.avx2.padds.b") + "_mm_adds_epu16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, + definition: Named("llvm.x86.sse2.paddus.w") }, - "256_adds_epu8" => Intrinsic { - inputs: vec![v(u(8), 32), v(u(8), 32)], - output: v(u(8), 32), - definition: Named("llvm.x86.avx2.paddus.b") + "_mm_avg_epu8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, + definition: Named("llvm.x86.sse2.pavg.b") }, - "256_adds_epi16" => Intrinsic { - inputs: vec![v(i(16), 16), v(i(16), 16)], - output: v(i(16), 16), - definition: Named("llvm.x86.avx2.padds.w") + "_mm_avg_epu16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, + definition: Named("llvm.x86.sse2.pavg.w") }, - "256_adds_epu16" => Intrinsic { - inputs: vec![v(u(16), 16), v(u(16), 16)], - output: v(u(16), 16), - definition: Named("llvm.x86.avx2.paddus.w") + "_mm_lfence" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, + output: &::VOID, + definition: Named("llvm.x86.sse2.lfence") }, - "256_avg_epu8" => Intrinsic { - inputs: vec![v(u(8), 32), v(u(8), 32)], - output: v(u(8), 32), - definition: Named("llvm.x86.avx2.pavg.b") + "_mm_madd_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.x86.sse2.pmadd.wd") }, - "256_avg_epu16" => Intrinsic { - inputs: vec![v(u(16), 16), v(u(16), 16)], - output: v(u(16), 16), - definition: Named("llvm.x86.avx2.pavg.w") + "_mm_maskmoveu_si128" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::U8x16, &::U8x16, { static PTR: Type = Type::Pointer(&::U8, None, false); &PTR }]; &INPUTS }, + output: &::VOID, + definition: Named("llvm.x86.sse2.maskmov.dqu") }, - "256_hadd_epi16" => Intrinsic { - inputs: vec![v(i(16), 16), v(i(16), 16)], - output: v(i(16), 16), - definition: Named("llvm.x86.avx2.phadd.w") + "_mm_max_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.x86.sse2.pmaxs.w") }, - "256_hadd_epi32" => Intrinsic { - inputs: vec![v(i(32), 8), v(i(32), 8)], - output: v(i(32), 8), - definition: Named("llvm.x86.avx2.phadd.d") + "_mm_max_epu8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, + definition: Named("llvm.x86.sse2.pmaxu.b") }, - "256_hadds_epi16" => Intrinsic { - inputs: vec![v(i(16), 16), v(i(16), 16)], - output: v(i(16), 16), - definition: Named("llvm.x86.avx2.phadd.sw") + "_mm_max_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, + definition: Named("llvm.x86.sse2.max.pd") }, - "256_hsub_epi16" => Intrinsic { - inputs: vec![v(i(16), 16), v(i(16), 16)], - output: v(i(16), 16), - definition: Named("llvm.x86.avx2.phsub.w") + "_mm_mfence" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, + output: &::VOID, + definition: Named("llvm.x86.sse2.fence") }, - "256_hsub_epi32" => Intrinsic { - inputs: vec![v(i(32), 8), v(i(32), 8)], - output: v(i(32), 8), - definition: Named("llvm.x86.avx2.phsub.d") + "_mm_min_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.x86.sse2.pmins.w") }, - "256_hsubs_epi16" => Intrinsic { - inputs: vec![v(i(16), 16), v(i(16), 16)], - output: v(i(16), 16), - definition: Named("llvm.x86.avx2.phsub.sw") + "_mm_min_epu8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, + definition: Named("llvm.x86.sse2.pminu.b") }, - "256_madd_epi16" => Intrinsic { - inputs: vec![v(i(16), 16), v(i(16), 16)], - output: v(i(32), 8), - definition: Named("llvm.x86.avx2.pmadd.wd") + "_mm_min_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, + definition: Named("llvm.x86.sse2.min.pd") }, - "256_maddubs_epi16" => Intrinsic { - inputs: vec![v(i(8), 32), v(i(8), 32)], - output: v(i(16), 16), - definition: Named("llvm.x86.avx2.pmadd.ub.sw") + "_mm_movemask_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.sse2.movmsk.pd") }, - "_mask_i32gather_epi32" => Intrinsic { - inputs: vec![v(i(32), 4), p(true, i(32), Some(i(8))), v(i(32), 4), v(i(32), 4), i_(32, 8)], - output: v(i(32), 4), - definition: Named("llvm.x86.avx2.gather.d.d") + "_mm_movemask_epi8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.sse2.pmovmskb.128") }, - "_mask_i32gather_ps" => Intrinsic { - inputs: vec![v(f(32), 4), p(true, f(32), Some(i(8))), v(i(32), 4), v_(i(32), f(32), 4), i_(32, 8)], - output: v(f(32), 4), - definition: Named("llvm.x86.avx2.gather.d.ps") + "_mm_mul_epu32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U64x2, + definition: Named("llvm.x86.sse2.pmulu.dq") }, - "256_mask_i32gather_epi32" => Intrinsic { - inputs: vec![v(i(32), 8), p(true, i(32), Some(i(8))), v(i(32), 8), v(i(32), 8), i_(32, 8)], - output: v(i(32), 8), - definition: Named("llvm.x86.avx2.gather.d.d.256") + "_mm_mulhi_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.x86.sse2.pmulh.w") }, - "256_mask_i32gather_ps" => Intrinsic { - inputs: vec![v(f(32), 8), p(true, f(32), Some(i(8))), v(i(32), 8), v_(i(32), f(32), 8), i_(32, 8)], - output: v(f(32), 8), - definition: Named("llvm.x86.avx2.gather.d.ps.256") + "_mm_mulhi_epu16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, + definition: Named("llvm.x86.sse2.pmulhu.w") }, - "_mask_i32gather_epi64" => Intrinsic { - inputs: vec![v(i(64), 2), p(true, i(64), Some(i(8))), v(i(32), 4), v(i(64), 2), i_(32, 8)], - output: v(i(64), 2), - definition: Named("llvm.x86.avx2.gather.d.q") + "_mm_packs_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.x86.sse2.packsswb.128") }, - "_mask_i32gather_pd" => Intrinsic { - inputs: vec![v(f(64), 2), p(true, f(64), Some(i(8))), v(i(32), 4), v_(i(64), f(64), 2), i_(32, 8)], - output: v(f(64), 2), - definition: Named("llvm.x86.avx2.gather.d.pd") + "_mm_packs_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.x86.sse2.packssdw.128") }, - "256_mask_i32gather_epi64" => Intrinsic { - inputs: vec![v(i(64), 4), p(true, i(64), Some(i(8))), v(i(32), 4), v(i(64), 4), i_(32, 8)], - output: v(i(64), 4), - definition: Named("llvm.x86.avx2.gather.d.q.256") + "_mm_packus_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::U8x16, + definition: Named("llvm.x86.sse2.packuswb.128") }, - "256_mask_i32gather_pd" => Intrinsic { - inputs: vec![v(f(64), 4), p(true, f(64), Some(i(8))), v(i(32), 4), v_(i(64), f(64), 4), i_(32, 8)], - output: v(f(64), 4), - definition: Named("llvm.x86.avx2.gather.d.pd.256") + "_mm_sad_epu8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U64x2, + definition: Named("llvm.x86.sse2.psad.bw") }, - "_mask_i64gather_epi32" => Intrinsic { - inputs: vec![v(i(32), 4), p(true, i(32), Some(i(8))), v(i(64), 2), v(i(32), 4), i_(32, 8)], - output: v(i(32), 4), - definition: Named("llvm.x86.avx2.gather.q.d") + "_mm_sfence" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, + output: &::VOID, + definition: Named("llvm.x86.sse2.sfence") }, - "_mask_i64gather_ps" => Intrinsic { - inputs: vec![v(f(32), 4), p(true, f(32), Some(i(8))), v(i(64), 2), v_(i(32), f(32), 4), i_(32, 8)], - output: v(f(32), 4), - definition: Named("llvm.x86.avx2.gather.q.ps") + "_mm_sqrt_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, + output: &::F64x2, + definition: Named("llvm.sqrt.v2f64") }, - "256_mask_i64gather_epi32" => Intrinsic { - inputs: vec![v(i(32), 4), p(true, i(32), Some(i(8))), v(i(64), 4), v(i(32), 4), i_(32, 8)], - output: v(i(32), 4), - definition: Named("llvm.x86.avx2.gather.q.d") + "_mm_storeu_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::U8), false); &PTR }, &::F64x2]; &INPUTS }, + output: &::VOID, + definition: Named("llvm.x86.sse2.storeu.pd") }, - "256_mask_i64gather_ps" => Intrinsic { - inputs: vec![v(f(32), 4), p(true, f(32), Some(i(8))), v(i(64), 4), v_(i(32), f(32), 4), i_(32, 8)], - output: v(f(32), 4), - definition: Named("llvm.x86.avx2.gather.q.ps") + "_mm_storeu_si128" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::U8x16, Some(&::U8), false); &PTR }, &::U8x16]; &INPUTS }, + output: &::VOID, + definition: Named("llvm.x86.sse2.storeu.dq") }, - "_mask_i64gather_epi64" => Intrinsic { - inputs: vec![v(i(64), 2), p(true, i(64), Some(i(8))), v(i(64), 2), v(i(64), 2), i_(32, 8)], - output: v(i(64), 2), - definition: Named("llvm.x86.avx2.gather.q.q") + "_mm_subs_epi8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.x86.sse2.psubs.b") }, - "_mask_i64gather_pd" => Intrinsic { - inputs: vec![v(f(64), 2), p(true, f(64), Some(i(8))), v(i(64), 2), v_(i(64), f(64), 2), i_(32, 8)], - output: v(f(64), 2), - definition: Named("llvm.x86.avx2.gather.q.pd") + "_mm_subs_epu8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, + output: &::U8x16, + definition: Named("llvm.x86.sse2.psubus.b") }, - "256_mask_i64gather_epi64" => Intrinsic { - inputs: vec![v(i(64), 4), p(true, i(64), Some(i(8))), v(i(64), 4), v(i(64), 4), i_(32, 8)], - output: v(i(64), 4), - definition: Named("llvm.x86.avx2.gather.q.q.256") + "_mm_subs_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.x86.sse2.psubs.w") }, - "256_mask_i64gather_pd" => Intrinsic { - inputs: vec![v(f(64), 4), p(true, f(64), Some(i(8))), v(i(64), 4), v_(i(64), f(64), 4), i_(32, 8)], - output: v(f(64), 4), - definition: Named("llvm.x86.avx2.gather.q.pd.256") + "_mm_subs_epu16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, + definition: Named("llvm.x86.sse2.psubus.w") }, - "_maskload_epi32" => Intrinsic { - inputs: vec![p(true, v(i(32), 4), Some(i(8))), v(i(32), 4)], - output: v(i(32), 4), - definition: Named("llvm.x86.avx2.maskload.d") + "_mm_addsub_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.x86.sse3.addsub.ps") }, - "_maskload_epi64" => Intrinsic { - inputs: vec![p(true, v(i(64), 2), Some(i(8))), v(i(64), 2)], - output: v(i(64), 2), - definition: Named("llvm.x86.avx2.maskload.q") + "_mm_addsub_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, + definition: Named("llvm.x86.sse3.addsub.pd") }, - "256_maskload_epi32" => Intrinsic { - inputs: vec![p(true, v(i(32), 8), Some(i(8))), v(i(32), 8)], - output: v(i(32), 8), - definition: Named("llvm.x86.avx2.maskload.d.256") + "_mm_hadd_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.x86.sse3.hadd.ps") }, - "256_maskload_epi64" => Intrinsic { - inputs: vec![p(true, v(i(64), 4), Some(i(8))), v(i(64), 4)], - output: v(i(64), 4), - definition: Named("llvm.x86.avx2.maskload.q.256") + "_mm_hadd_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, + definition: Named("llvm.x86.sse3.hadd.pd") }, - "_maskstore_epi32" => Intrinsic { - inputs: vec![p(false, i(32), Some(i(8))), v(i(32), 4), v(i(32), 4)], - output: void(), - definition: Named("llvm.x86.avx2.maskstore.d") + "_mm_hsub_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.x86.sse3.hsub.ps") }, - "_maskstore_epi64" => Intrinsic { - inputs: vec![p(false, i(64), Some(i(8))), v(i(64), 2), v(i(64), 2)], - output: void(), - definition: Named("llvm.x86.avx2.maskstore.q") + "_mm_hsub_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, + output: &::F64x2, + definition: Named("llvm.x86.sse3.hsub.pd") }, - "256_maskstore_epi32" => Intrinsic { - inputs: vec![p(false, i(32), Some(i(8))), v(i(32), 8), v(i(32), 8)], - output: void(), - definition: Named("llvm.x86.avx2.maskstore.d.256") + "_mm_lddqu_si128" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8x16, Some(&::I8), true); &PTR }]; &INPUTS }, + output: &::U8x16, + definition: Named("llvm.x86.sse3.ldu.dq") }, - "256_maskstore_epi64" => Intrinsic { - inputs: vec![p(false, i(64), Some(i(8))), v(i(64), 4), v(i(64), 4)], - output: void(), - definition: Named("llvm.x86.avx2.maskstore.q.256") + "_mm_dp_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::I32_8]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.x86.sse41.dpps") }, - "256_max_epi8" => Intrinsic { - inputs: vec![v(i(8), 32), v(i(8), 32)], - output: v(i(8), 32), - definition: Named("llvm.x86.avx2.pmaxs.b") + "_mm_dp_pd" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::I32_8]; &INPUTS }, + output: &::F64x2, + definition: Named("llvm.x86.sse41.dppd") }, - "256_max_epu8" => Intrinsic { - inputs: vec![v(u(8), 32), v(u(8), 32)], - output: v(u(8), 32), - definition: Named("llvm.x86.avx2.pmaxu.b") + "_mm_max_epi8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.x86.sse41.pmaxsb") }, - "256_max_epi16" => Intrinsic { - inputs: vec![v(i(16), 16), v(i(16), 16)], - output: v(i(16), 16), - definition: Named("llvm.x86.avx2.pmaxs.w") + "_mm_max_epu16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, + definition: Named("llvm.x86.sse41.pmaxuw") }, - "256_max_epu16" => Intrinsic { - inputs: vec![v(u(16), 16), v(u(16), 16)], - output: v(u(16), 16), - definition: Named("llvm.x86.avx2.pmaxu.w") + "_mm_max_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.x86.sse41.pmaxsd") }, - "256_max_epi32" => Intrinsic { - inputs: vec![v(i(32), 8), v(i(32), 8)], - output: v(i(32), 8), - definition: Named("llvm.x86.avx2.pmaxs.d") + "_mm_max_epu32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, + definition: Named("llvm.x86.sse41.pmaxud") }, - "256_max_epu32" => Intrinsic { - inputs: vec![v(u(32), 8), v(u(32), 8)], - output: v(u(32), 8), - definition: Named("llvm.x86.avx2.pmaxu.d") + "_mm_min_epi8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.x86.sse41.pminsb") }, - "256_min_epi8" => Intrinsic { - inputs: vec![v(i(8), 32), v(i(8), 32)], - output: v(i(8), 32), - definition: Named("llvm.x86.avx2.pmins.b") + "_mm_min_epu16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, + output: &::U16x8, + definition: Named("llvm.x86.sse41.pminuw") }, - "256_min_epu8" => Intrinsic { - inputs: vec![v(u(8), 32), v(u(8), 32)], - output: v(u(8), 32), - definition: Named("llvm.x86.avx2.pminu.b") + "_mm_min_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.x86.sse41.pminsd") }, - "256_min_epi16" => Intrinsic { - inputs: vec![v(i(16), 16), v(i(16), 16)], - output: v(i(16), 16), - definition: Named("llvm.x86.avx2.pmins.w") + "_mm_min_epu32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, + output: &::U32x4, + definition: Named("llvm.x86.sse41.pminud") }, - "256_min_epu16" => Intrinsic { - inputs: vec![v(u(16), 16), v(u(16), 16)], - output: v(u(16), 16), - definition: Named("llvm.x86.avx2.pminu.w") + "_mm_minpos_epu16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, + output: &::U16x8, + definition: Named("llvm.x86.sse41.phminposuw") }, - "256_min_epi32" => Intrinsic { - inputs: vec![v(i(32), 8), v(i(32), 8)], - output: v(i(32), 8), - definition: Named("llvm.x86.avx2.pmins.d") + "_mm_mpsadbw_epu8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::I32_8]; &INPUTS }, + output: &::U16x8, + definition: Named("llvm.x86.sse41.mpsadbw") }, - "256_min_epu32" => Intrinsic { - inputs: vec![v(u(32), 8), v(u(32), 8)], - output: v(u(32), 8), - definition: Named("llvm.x86.avx2.pminu.d") + "_mm_mul_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I64x2, + definition: Named("llvm.x86.sse41.pmuldq") }, - "256_movemask_epi8" => Intrinsic { - inputs: vec![v(i(8), 32)], - output: i(32), - definition: Named("llvm.x86.avx2.pmovmskb") + "_mm_packus_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::U16x8, + definition: Named("llvm.x86.sse41.packusdw") }, - "256_mpsadbw_epu8" => Intrinsic { - inputs: vec![v(u(8), 32), v(u(8), 32), i_(32, 8)], - output: v(u(16), 16), - definition: Named("llvm.x86.avx2.mpsadbw") + "_mm_testc_si128" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.sse41.ptestc") }, - "256_mul_epi64" => Intrinsic { - inputs: vec![v(i(32), 8), v(i(32), 8)], - output: v(i(64), 4), - definition: Named("llvm.x86.avx2.pmulq.dq") + "_mm_testnzc_si128" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.sse41.ptestnzc") }, - "256_mul_epu64" => Intrinsic { - inputs: vec![v(u(32), 8), v(u(32), 8)], - output: v(u(64), 4), - definition: Named("llvm.x86.avx2.pmulq.dq") + "_mm_testz_si128" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.sse41.ptestz") }, - "256_mulhi_epi16" => Intrinsic { - inputs: vec![v(i(16), 16), v(i(16), 16)], - output: v(i(16), 16), - definition: Named("llvm.x86.avx2.pmulhw.w") + "_mm_cmpestra" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.sse42.pcmpestria128") }, - "256_mulhi_epu16" => Intrinsic { - inputs: vec![v(u(16), 16), v(u(16), 16)], - output: v(u(16), 16), - definition: Named("llvm.x86.avx2.pmulhw.w") + "_mm_cmpestrc" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.sse42.pcmpestric128") }, - "256_mulhrs_epi16" => Intrinsic { - inputs: vec![v(i(16), 16), v(i(16), 16)], - output: v(i(16), 16), - definition: Named("llvm.x86.avx2.pmul.hr.sw") + "_mm_cmpestri" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.sse42.pcmpestri128") }, - "256_packs_epi16" => Intrinsic { - inputs: vec![v(i(16), 16), v(i(16), 16)], - output: v(i(8), 32), - definition: Named("llvm.x86.avx2.packsswb") + "_mm_cmpestrm" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.x86.sse42.pcmpestrm128") }, - "256_packus_epi16" => Intrinsic { - inputs: vec![v(i(16), 16), v(i(16), 16)], - output: v(u(8), 32), - definition: Named("llvm.x86.avx2.packuswb") + "_mm_cmpestro" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.sse42.pcmpestrio128") }, - "256_packs_epi32" => Intrinsic { - inputs: vec![v(i(32), 8), v(i(32), 8)], - output: v(i(16), 16), - definition: Named("llvm.x86.avx2.packssdw") + "_mm_cmpestrs" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.sse42.pcmpestris128") }, - "256_packus_epi32" => Intrinsic { - inputs: vec![v(i(32), 8), v(i(32), 8)], - output: v(u(16), 16), - definition: Named("llvm.x86.avx2.packusdw") + "_mm_cmpestrz" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.sse42.pcmpestriz128") }, - "256_permutevar8x32_epi32" => Intrinsic { - inputs: vec![v(i(32), 8), v(i(32), 8)], - output: v(i(32), 8), - definition: Named("llvm.x86.avx2.permd") + "_mm_cmpistra" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.sse42.pcmpistria128") }, - "256_permutevar8x32_ps" => Intrinsic { - inputs: vec![v(f(32), 8), v(i(32), 8)], - output: v(f(32), 8), - definition: Named("llvm.x86.avx2.permps") + "_mm_cmpistrc" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.sse42.pcmpistric128") }, - "256_sad_epu8" => Intrinsic { - inputs: vec![v(u(8), 32), v(u(8), 32)], - output: v(u(8), 32), - definition: Named("llvm.x86.avx2.psad.bw") + "_mm_cmpistri" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.sse42.pcmpistri128") }, - "256_shuffle_epi8" => Intrinsic { - inputs: vec![v(i(8), 32), v(i(8), 32)], - output: v(i(8), 32), - definition: Named("llvm.x86.avx2.pshuf.b") + "_mm_cmpistrm" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.x86.sse42.pcmpistrm128") }, - "256_sign_epi8" => Intrinsic { - inputs: vec![v(i(8), 32), v(i(8), 32)], - output: v(i(8), 32), - definition: Named("llvm.x86.avx2.psign.b") + "_mm_cmpistro" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.sse42.pcmpistrio128") }, - "256_sign_epi16" => Intrinsic { - inputs: vec![v(i(16), 16), v(i(16), 16)], - output: v(i(16), 16), - definition: Named("llvm.x86.avx2.psign.w") + "_mm_cmpistrs" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.sse42.pcmpistris128") }, - "256_sign_epi32" => Intrinsic { - inputs: vec![v(i(32), 8), v(i(32), 8)], - output: v(i(32), 8), - definition: Named("llvm.x86.avx2.psign.d") + "_mm_cmpistrz" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.sse42.pcmpistriz128") }, - "256_subs_epi8" => Intrinsic { - inputs: vec![v(i(8), 32), v(i(8), 32)], - output: v(i(8), 32), - definition: Named("llvm.x86.avx2.psubs.b") + "_mm_movemask_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::I32, + definition: Named("llvm.x86.sse.movmsk.ps") }, - "256_subs_epu8" => Intrinsic { - inputs: vec![v(u(8), 32), v(u(8), 32)], - output: v(u(8), 32), - definition: Named("llvm.x86.avx2.psubus.b") + "_mm_max_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.x86.sse.max.ps") }, - "256_subs_epi16" => Intrinsic { - inputs: vec![v(i(16), 16), v(i(16), 16)], - output: v(i(16), 16), - definition: Named("llvm.x86.avx2.psubs.w") + "_mm_min_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.x86.sse.min.ps") }, - "256_subs_epu16" => Intrinsic { - inputs: vec![v(u(16), 16), v(u(16), 16)], - output: v(u(16), 16), - definition: Named("llvm.x86.avx2.psubus.w") + "_mm_rsqrt_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.x86.sse.rsqrt.ps") + }, + "_mm_rcp_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.x86.sse.rcp.ps") + }, + "_mm_sqrt_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, + output: &::F32x4, + definition: Named("llvm.sqrt.v4f32") + }, + "_mm_storeu_ps" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::I8), false); &PTR }, &::F32x4]; &INPUTS }, + output: &::VOID, + definition: Named("llvm.x86.sse.storeu.ps") + }, + "_mm_abs_epi8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.x86.ssse3.pabs.b.128") + }, + "_mm_abs_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.x86.ssse3.pabs.w.128") + }, + "_mm_abs_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.x86.ssse3.pabs.d.128") + }, + "_mm_hadd_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.x86.ssse3.phadd.w.128") + }, + "_mm_hadd_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.x86.ssse3.phadd.d.128") + }, + "_mm_hadds_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.x86.ssse3.phadd.sw.128") + }, + "_mm_hsub_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.x86.ssse3.phsub.w.128") + }, + "_mm_hsub_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.x86.ssse3.phsub.d.128") + }, + "_mm_hsubs_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.x86.ssse3.phsub.sw.128") + }, + "_mm_maddubs_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.x86.ssse3.pmadd.ub.sw.128") + }, + "_mm_mulhrs_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.x86.ssse3.pmul.hr.sw.128") + }, + "_mm_shuffle_epi8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.x86.ssse3.pshuf.b.128") + }, + "_mm_sign_epi8" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, + output: &::I8x16, + definition: Named("llvm.x86.ssse3.psign.b.128") + }, + "_mm_sign_epi16" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, + output: &::I16x8, + definition: Named("llvm.x86.ssse3.psign.w.128") + }, + "_mm_sign_epi32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, + output: &::I32x4, + definition: Named("llvm.x86.ssse3.psign.d.128") + }, + "_tbm_bextri_u32" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U32, &::U32]; &INPUTS }, + output: &::U32, + definition: Named("llvm.x86.tbm.bextri.u32") + }, + "_tbm_bextri_u64" => Intrinsic { + inputs: { static INPUTS: [&'static Type; 2] = [&::U64, &::U64]; &INPUTS }, + output: &::U64, + definition: Named("llvm.x86.tbm.bextri.u64") }, _ => return None, }) diff --git a/src/librustc_plugin/Cargo.toml b/src/librustc_plugin/Cargo.toml new file mode 100644 index 0000000000000..514d81ecc94f2 --- /dev/null +++ b/src/librustc_plugin/Cargo.toml @@ -0,0 +1,19 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_plugin" +version = "0.0.0" + +[lib] +name = "rustc_plugin" +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +log = { path = "../liblog" } +rustc = { path = "../librustc" } +rustc_back = { path = "../librustc_back" } +rustc_bitflags = { path = "../librustc_bitflags" } +rustc_metadata = { path = "../librustc_metadata" } +syntax = { path = "../libsyntax" } +syntax_pos = { path = "../libsyntax_pos" } +rustc_errors = { path = "../librustc_errors" } diff --git a/src/librustc_plugin/build.rs b/src/librustc_plugin/build.rs index 5adde4304f57c..75046f6aeb874 100644 --- a/src/librustc_plugin/build.rs +++ b/src/librustc_plugin/build.rs @@ -12,16 +12,18 @@ use syntax::ast; use syntax::attr; -use syntax::codemap::Span; -use syntax::errors; -use rustc_front::intravisit::Visitor; -use rustc_front::hir; +use errors; +use syntax_pos::Span; +use rustc::dep_graph::DepNode; +use rustc::hir::map::Map; +use rustc::hir::itemlikevisit::ItemLikeVisitor; +use rustc::hir; struct RegistrarFinder { registrars: Vec<(ast::NodeId, Span)> , } -impl<'v> Visitor<'v> for RegistrarFinder { +impl<'v> ItemLikeVisitor<'v> for RegistrarFinder { fn visit_item(&mut self, item: &hir::Item) { if let hir::ItemFn(..) = item.node { if attr::contains_name(&item.attrs, @@ -30,14 +32,20 @@ impl<'v> Visitor<'v> for RegistrarFinder { } } } + + fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) { + } } /// Find the function marked with `#[plugin_registrar]`, if any. pub fn find_plugin_registrar(diagnostic: &errors::Handler, - krate: &hir::Crate) + hir_map: &Map) -> Option { + let _task = hir_map.dep_graph.in_task(DepNode::PluginRegistrar); + let krate = hir_map.krate(); + let mut finder = RegistrarFinder { registrars: Vec::new() }; - krate.visit_all_items(&mut finder); + krate.visit_all_item_likes(&mut finder); match finder.registrars.len() { 0 => None, diff --git a/src/librustc_plugin/lib.rs b/src/librustc_plugin/lib.rs index 333c226c2a373..91e0fd636c9c1 100644 --- a/src/librustc_plugin/lib.rs +++ b/src/librustc_plugin/lib.rs @@ -27,7 +27,7 @@ //! //! extern crate rustc; //! -//! use rustc::plugin::Registry; +//! use rustc_plugin::Registry; //! //! #[plugin_registrar] //! pub fn plugin_registrar(reg: &mut Registry) { @@ -55,10 +55,10 @@ #![crate_type = "dylib"] #![crate_type = "rlib"] #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] -#![feature(dynamic_lib)] #![feature(staged_api)] #![feature(rustc_diagnostic_macros)] #![feature(rustc_private)] @@ -68,8 +68,10 @@ #[macro_use] #[no_link] extern crate rustc_bitflags; extern crate rustc; -extern crate rustc_front; +extern crate rustc_back; extern crate rustc_metadata; +extern crate syntax_pos; +extern crate rustc_errors as errors; pub use self::registry::Registry; diff --git a/src/librustc_plugin/load.rs b/src/librustc_plugin/load.rs index 51eec07505a42..1bfc445fca98d 100644 --- a/src/librustc_plugin/load.rs +++ b/src/librustc_plugin/load.rs @@ -11,7 +11,7 @@ //! Used by `rustc` when loading a plugin. use rustc::session::Session; -use rustc_metadata::creader::CrateReader; +use rustc_metadata::creader::CrateLoader; use rustc_metadata::cstore::CStore; use registry::Registry; @@ -20,9 +20,7 @@ use std::env; use std::mem; use std::path::PathBuf; use syntax::ast; -use syntax::codemap::{Span, COMMAND_LINE_SP}; -use syntax::ptr::P; -use syntax::attr::AttrMetaMethods; +use syntax_pos::{Span, COMMAND_LINE_SP}; /// Pointer to a registrar function. pub type PluginRegistrarFun = @@ -30,12 +28,12 @@ pub type PluginRegistrarFun = pub struct PluginRegistrar { pub fun: PluginRegistrarFun, - pub args: Vec>, + pub args: Vec, } struct PluginLoader<'a> { sess: &'a Session, - reader: CrateReader<'a>, + reader: CrateLoader<'a>, plugins: Vec, } @@ -44,31 +42,40 @@ fn call_malformed_plugin_attribute(a: &Session, b: Span) { } /// Read plugin metadata and dynamically load registrar functions. -pub fn load_plugins(sess: &Session, cstore: &CStore, krate: &ast::Crate, +pub fn load_plugins(sess: &Session, + cstore: &CStore, + krate: &ast::Crate, + crate_name: &str, addl_plugins: Option>) -> Vec { - let mut loader = PluginLoader::new(sess, cstore); - - for attr in &krate.attrs { - if !attr.check_name("plugin") { - continue; - } - - let plugins = match attr.meta_item_list() { - Some(xs) => xs, - None => { - call_malformed_plugin_attribute(sess, attr.span); + let mut loader = PluginLoader::new(sess, cstore, crate_name); + + // do not report any error now. since crate attributes are + // not touched by expansion, every use of plugin without + // the feature enabled will result in an error later... + if sess.features.borrow().plugin { + for attr in &krate.attrs { + if !attr.check_name("plugin") { continue; } - }; - for plugin in plugins { - if plugin.value_str().is_some() { - call_malformed_plugin_attribute(sess, attr.span); - continue; + let plugins = match attr.meta_item_list() { + Some(xs) => xs, + None => { + call_malformed_plugin_attribute(sess, attr.span); + continue; + } + }; + + for plugin in plugins { + // plugins must have a name and can't be key = value + match plugin.name() { + Some(name) if !plugin.is_value_str() => { + let args = plugin.meta_item_list().map(ToOwned::to_owned); + loader.load_plugin(plugin.span, &name.as_str(), args.unwrap_or_default()); + }, + _ => call_malformed_plugin_attribute(sess, attr.span), + } } - - let args = plugin.meta_item_list().map(ToOwned::to_owned).unwrap_or_default(); - loader.load_plugin(plugin.span, &*plugin.name(), args); } } @@ -82,18 +89,19 @@ pub fn load_plugins(sess: &Session, cstore: &CStore, krate: &ast::Crate, } impl<'a> PluginLoader<'a> { - fn new(sess: &'a Session, cstore: &'a CStore) -> PluginLoader<'a> { + fn new(sess: &'a Session, cstore: &'a CStore, crate_name: &str) -> Self { PluginLoader { sess: sess, - reader: CrateReader::new(sess, cstore), + reader: CrateLoader::new(sess, cstore, crate_name), plugins: vec![], } } - fn load_plugin(&mut self, span: Span, name: &str, args: Vec>) { + fn load_plugin(&mut self, span: Span, name: &str, args: Vec) { let registrar = self.reader.find_plugin_registrar(span, name); - if let Some((lib, symbol)) = registrar { + if let Some((lib, svh, index)) = registrar { + let symbol = self.sess.generate_plugin_registrar_symbol(&svh, index); let fun = self.dylink_registrar(span, lib, symbol); self.plugins.push(PluginRegistrar { fun: fun, @@ -103,12 +111,11 @@ impl<'a> PluginLoader<'a> { } // Dynamically link a registrar function into the compiler process. - #[allow(deprecated)] fn dylink_registrar(&mut self, span: Span, path: PathBuf, symbol: String) -> PluginRegistrarFun { - use std::dynamic_lib::DynamicLibrary; + use rustc_back::dynamic_lib::DynamicLibrary; // Make sure the path contains a / or the linker will search for it. let path = env::current_dir().unwrap().join(&path); diff --git a/src/librustc_plugin/registry.rs b/src/librustc_plugin/registry.rs index 3138d7fa1db56..fe2f9713d1beb 100644 --- a/src/librustc_plugin/registry.rs +++ b/src/librustc_plugin/registry.rs @@ -13,14 +13,14 @@ use rustc::lint::{EarlyLintPassObject, LateLintPassObject, LintId, Lint}; use rustc::session::Session; -use syntax::ext::base::{SyntaxExtension, NamedSyntaxExtension, NormalTT}; -use syntax::ext::base::{IdentTT, MultiModifier, MultiDecorator}; -use syntax::ext::base::{MacroExpanderFn, MacroRulesTT}; -use syntax::codemap::Span; -use syntax::parse::token; -use syntax::ptr::P; +use rustc::mir::transform::MirMapPass; + +use syntax::ext::base::{SyntaxExtension, NamedSyntaxExtension, NormalTT, IdentTT}; +use syntax::ext::base::MacroExpanderFn; +use syntax::symbol::Symbol; use syntax::ast; use syntax::feature_gate::AttributeType; +use syntax_pos::Span; use std::collections::HashMap; use std::borrow::ToOwned; @@ -39,7 +39,7 @@ pub struct Registry<'a> { pub sess: &'a Session, #[doc(hidden)] - pub args_hidden: Option>>, + pub args_hidden: Option>, #[doc(hidden)] pub krate_span: Span, @@ -53,6 +53,9 @@ pub struct Registry<'a> { #[doc(hidden)] pub late_lint_passes: Vec, + #[doc(hidden)] + pub mir_passes: Vec MirMapPass<'pcx>>>, + #[doc(hidden)] pub lint_groups: HashMap<&'static str, Vec>, @@ -65,17 +68,18 @@ pub struct Registry<'a> { impl<'a> Registry<'a> { #[doc(hidden)] - pub fn new(sess: &'a Session, krate: &ast::Crate) -> Registry<'a> { + pub fn new(sess: &'a Session, krate_span: Span) -> Registry<'a> { Registry { sess: sess, args_hidden: None, - krate_span: krate.span, - syntax_exts: vec!(), - early_lint_passes: vec!(), - late_lint_passes: vec!(), + krate_span: krate_span, + syntax_exts: vec![], + early_lint_passes: vec![], + late_lint_passes: vec![], lint_groups: HashMap::new(), - llvm_passes: vec!(), - attributes: vec!(), + llvm_passes: vec![], + attributes: vec![], + mir_passes: Vec::new(), } } @@ -86,14 +90,20 @@ impl<'a> Registry<'a> { /// ```no_run /// #![plugin(my_plugin_name(... args ...))] /// ``` - pub fn args<'b>(&'b self) -> &'b Vec> { - self.args_hidden.as_ref().expect("args not set") + /// + /// Returns empty slice in case the plugin was loaded + /// with `--extra-plugins` + pub fn args<'b>(&'b self) -> &'b [ast::NestedMetaItem] { + self.args_hidden.as_ref().map(|v| &v[..]).unwrap_or(&[]) } /// Register a syntax extension of any kind. /// /// This is the most general hook into `libsyntax`'s expansion behavior. pub fn register_syntax_extension(&mut self, name: ast::Name, extension: SyntaxExtension) { + if name == "macro_rules" { + panic!("user-defined macros may not be named `macro_rules`"); + } self.syntax_exts.push((name, match extension { NormalTT(ext, _, allow_internal_unstable) => { NormalTT(ext, Some(self.krate_span), allow_internal_unstable) @@ -101,12 +111,7 @@ impl<'a> Registry<'a> { IdentTT(ext, _, allow_internal_unstable) => { IdentTT(ext, Some(self.krate_span), allow_internal_unstable) } - MultiDecorator(ext) => MultiDecorator(ext), - MultiModifier(ext) => MultiModifier(ext), - MacroRulesTT => { - self.sess.err("plugin tried to register a new MacroRulesTT"); - return; - } + _ => extension, })); } @@ -116,7 +121,7 @@ impl<'a> Registry<'a> { /// It builds for you a `NormalTT` that calls `expander`, /// and also takes care of interning the macro's name. pub fn register_macro(&mut self, name: &str, expander: MacroExpanderFn) { - self.register_syntax_extension(token::intern(name), + self.register_syntax_extension(Symbol::intern(name), NormalTT(Box::new(expander), None, false)); } @@ -134,6 +139,11 @@ impl<'a> Registry<'a> { self.lint_groups.insert(name, to.into_iter().map(|x| LintId::of(x)).collect()); } + /// Register a MIR pass + pub fn register_mir_pass(&mut self, pass: Box MirMapPass<'pcx>>) { + self.mir_passes.push(pass); + } + /// Register an LLVM pass. /// /// Registration with LLVM itself is handled through static C++ objects with @@ -143,7 +153,6 @@ impl<'a> Registry<'a> { self.llvm_passes.push(name.to_owned()); } - /// Register an attribute with an attribute type. /// /// Registered attributes will bypass the `custom_attribute` feature gate. diff --git a/src/librustc_privacy/Cargo.toml b/src/librustc_privacy/Cargo.toml new file mode 100644 index 0000000000000..439fa661e0ab5 --- /dev/null +++ b/src/librustc_privacy/Cargo.toml @@ -0,0 +1,14 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_privacy" +version = "0.0.0" + +[lib] +name = "rustc_privacy" +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +rustc = { path = "../librustc" } +syntax = { path = "../libsyntax" } +syntax_pos = { path = "../libsyntax_pos" } diff --git a/src/librustc_privacy/diagnostics.rs b/src/librustc_privacy/diagnostics.rs index 3fbe3bc200534..66afe5835bf6f 100644 --- a/src/librustc_privacy/diagnostics.rs +++ b/src/librustc_privacy/diagnostics.rs @@ -16,28 +16,29 @@ E0445: r##" A private trait was used on a public type parameter bound. Erroneous code examples: -``` +```compile_fail,E0445 +#![deny(private_in_public)] + trait Foo { fn dummy(&self) { } } pub trait Bar : Foo {} // error: private trait in public interface -pub struct Bar(pub T); // same error +pub struct Bar2(pub T); // same error pub fn foo (t: T) {} // same error ``` To solve this error, please ensure that the trait is also public. The trait -can be made inaccessible if necessary by placing it into a private inner module, -but it still has to be marked with `pub`. -Example: +can be made inaccessible if necessary by placing it into a private inner +module, but it still has to be marked with `pub`. Example: -``` +```ignore pub trait Foo { // we set the Foo trait public fn dummy(&self) { } } pub trait Bar : Foo {} // ok! -pub struct Bar(pub T); // ok! +pub struct Bar2(pub T); // ok! pub fn foo (t: T) {} // ok! ``` "##, @@ -45,7 +46,9 @@ pub fn foo (t: T) {} // ok! E0446: r##" A private type was used in a public type signature. Erroneous code example: -``` +```compile_fail,E0446 +#![deny(private_in_public)] + mod Foo { struct Bar(u32); @@ -56,8 +59,8 @@ mod Foo { ``` To solve this error, please ensure that the type is also public. The type -can be made inaccessible if necessary by placing it into a private inner module, -but it still has to be marked with `pub`. +can be made inaccessible if necessary by placing it into a private inner +module, but it still has to be marked with `pub`. Example: ``` @@ -74,7 +77,7 @@ mod Foo { E0447: r##" The `pub` keyword was used inside a function. Erroneous code example: -``` +```ignore fn foo() { pub struct Bar; // error: visibility has no effect inside functions } @@ -88,7 +91,7 @@ is invalid. E0448: r##" The `pub` keyword was used inside a public enum. Erroneous code example: -``` +```compile_fail pub enum Foo { pub Bar, // error: unnecessary `pub` visibility } @@ -97,64 +100,26 @@ pub enum Foo { Since the enum is already public, adding `pub` on one its elements is unnecessary. Example: -``` +```compile_fail, enum Foo { - pub Bar, // ok! + pub Bar, // not ok! } +``` -// or: +This is the correct syntax: +```ignore pub enum Foo { Bar, // ok! } ``` "##, -E0449: r##" -A visibility qualifier was used when it was unnecessary. Erroneous code -examples: - -``` -struct Bar; - -trait Foo { - fn foo(); -} - -pub impl Bar {} // error: unnecessary visibility qualifier - -pub impl Foo for Bar { // error: unnecessary visibility qualifier - pub fn foo() {} // error: unnecessary visibility qualifier -} -``` - -To fix this error, please remove the visibility qualifier when it is not -required. Example: - -``` -struct Bar; - -trait Foo { - fn foo(); -} - -// Directly implemented methods share the visibility of the type itself, -// so `pub` is unnecessary here -impl Bar {} - -// Trait methods share the visibility of the trait, so `pub` is -// unnecessary in either case -pub impl Foo for Bar { - pub fn foo() {} -} -``` -"##, - E0450: r##" A tuple constructor was invoked while some of its fields are private. Erroneous code example: -``` +```compile_fail,E0450 mod Bar { pub struct Foo(isize); } @@ -164,7 +129,7 @@ let f = Bar::Foo(0); // error: cannot invoke tuple struct constructor with ``` To solve this issue, please ensure that all of the fields of the tuple struct -are public. Alternatively, provide a new() method to the tuple struct to +are public. Alternatively, provide a `new()` method to the tuple struct to construct it from a given inner value. Example: ``` @@ -179,7 +144,7 @@ mod bar { pub struct Foo(isize); impl Foo { - pub fn new(x: isize) { + pub fn new(x: isize) -> Foo { Foo(x) } } @@ -192,7 +157,7 @@ let f = bar::Foo::new(1); E0451: r##" A struct constructor with private fields was invoked. Erroneous code example: -``` +```compile_fail,E0451 mod Bar { pub struct Foo { pub a: isize, @@ -204,8 +169,8 @@ let f = Bar::Foo{ a: 0, b: 0 }; // error: field `b` of struct `Bar::Foo` // is private ``` -To fix this error, please ensure that all the fields of the struct, or -implement a function for easy instantiation. Examples: +To fix this error, please ensure that all the fields of the struct are public, +or implement a function for easy instantiation. Examples: ``` mod Bar { @@ -216,8 +181,11 @@ mod Bar { } let f = Bar::Foo{ a: 0, b: 0 }; // ok! +``` -// or: +Or: + +``` mod Bar { pub struct Foo { pub a: isize, diff --git a/src/librustc_privacy/lib.rs b/src/librustc_privacy/lib.rs index d3da93a3e080d..145b9176f6b13 100644 --- a/src/librustc_privacy/lib.rs +++ b/src/librustc_privacy/lib.rs @@ -15,149 +15,42 @@ #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] #![feature(rustc_diagnostic_macros)] #![feature(rustc_private)] #![feature(staged_api)] -#[macro_use] extern crate log; -#[macro_use] extern crate syntax; - extern crate rustc; -extern crate rustc_front; - -use self::PrivacyResult::*; -use self::FieldName::*; - -use std::cmp; -use std::mem::replace; - -use rustc_front::hir; -use rustc_front::intravisit::{self, Visitor}; +#[macro_use] extern crate syntax; +extern crate syntax_pos; use rustc::dep_graph::DepNode; +use rustc::hir::{self, PatKind}; +use rustc::hir::def::{self, Def, CtorKind}; +use rustc::hir::def_id::DefId; +use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; +use rustc::hir::itemlikevisit::DeepVisitor; +use rustc::hir::pat_util::EnumerateAndAdjustIterator; use rustc::lint; -use rustc::middle::def; -use rustc::middle::def_id::DefId; use rustc::middle::privacy::{AccessLevel, AccessLevels}; -use rustc::middle::privacy::ImportUse::*; -use rustc::middle::privacy::LastPrivate::*; -use rustc::middle::privacy::PrivateDep::*; -use rustc::middle::privacy::ExternalExports; -use rustc::middle::ty; -use rustc::util::nodemap::{NodeMap, NodeSet}; -use rustc::front::map as ast_map; - +use rustc::ty::{self, TyCtxt, Ty, TypeFoldable}; +use rustc::ty::fold::TypeVisitor; +use rustc::util::nodemap::NodeSet; use syntax::ast; -use syntax::codemap::Span; - -pub mod diagnostics; - -type Context<'a, 'tcx> = (&'a ty::MethodMap<'tcx>, &'a def::ExportMap); - -/// Result of a checking operation - None => no errors were found. Some => an -/// error and contains the span and message for reporting that error and -/// optionally the same for a note about the error. -type CheckResult = Option<(Span, String, Option<(Span, String)>)>; - -//////////////////////////////////////////////////////////////////////////////// -/// The parent visitor, used to determine what's the parent of what (node-wise) -//////////////////////////////////////////////////////////////////////////////// - -struct ParentVisitor<'a, 'tcx:'a> { - tcx: &'a ty::ctxt<'tcx>, - parents: NodeMap, - curparent: ast::NodeId, -} - -impl<'a, 'tcx, 'v> Visitor<'v> for ParentVisitor<'a, 'tcx> { - /// We want to visit items in the context of their containing - /// module and so forth, so supply a crate for doing a deep walk. - fn visit_nested_item(&mut self, item: hir::ItemId) { - self.visit_item(self.tcx.map.expect_item(item.id)) - } - fn visit_item(&mut self, item: &hir::Item) { - self.parents.insert(item.id, self.curparent); - - let prev = self.curparent; - match item.node { - hir::ItemMod(..) => { self.curparent = item.id; } - // Enum variants are parented to the enum definition itself because - // they inherit privacy - hir::ItemEnum(ref def, _) => { - for variant in &def.variants { - // The parent is considered the enclosing enum because the - // enum will dictate the privacy visibility of this variant - // instead. - self.parents.insert(variant.node.data.id(), item.id); - } - } - - // Trait methods are always considered "public", but if the trait is - // private then we need some private item in the chain from the - // method to the root. In this case, if the trait is private, then - // parent all the methods to the trait to indicate that they're - // private. - hir::ItemTrait(_, _, _, ref trait_items) if item.vis != hir::Public => { - for trait_item in trait_items { - self.parents.insert(trait_item.id, item.id); - } - } +use syntax_pos::Span; - _ => {} - } - intravisit::walk_item(self, item); - self.curparent = prev; - } - - fn visit_foreign_item(&mut self, a: &hir::ForeignItem) { - self.parents.insert(a.id, self.curparent); - intravisit::walk_foreign_item(self, a); - } - - fn visit_fn(&mut self, a: intravisit::FnKind<'v>, b: &'v hir::FnDecl, - c: &'v hir::Block, d: Span, id: ast::NodeId) { - // We already took care of some trait methods above, otherwise things - // like impl methods and pub trait methods are parented to the - // containing module, not the containing trait. - if !self.parents.contains_key(&id) { - self.parents.insert(id, self.curparent); - } - intravisit::walk_fn(self, a, b, c, d); - } - - fn visit_impl_item(&mut self, ii: &'v hir::ImplItem) { - // visit_fn handles methods, but associated consts have to be handled - // here. - if !self.parents.contains_key(&ii.id) { - self.parents.insert(ii.id, self.curparent); - } - intravisit::walk_impl_item(self, ii); - } - - fn visit_variant_data(&mut self, s: &hir::VariantData, _: ast::Name, - _: &'v hir::Generics, item_id: ast::NodeId, _: Span) { - // Struct constructors are parented to their struct definitions because - // they essentially are the struct definitions. - if !s.is_struct() { - self.parents.insert(s.id(), item_id); - } +use std::cmp; +use std::mem::replace; - // While we have the id of the struct definition, go ahead and parent - // all the fields. - for field in s.fields() { - self.parents.insert(field.node.id, self.curparent); - } - intravisit::walk_struct_def(self, s) - } -} +pub mod diagnostics; //////////////////////////////////////////////////////////////////////////////// /// The embargo visitor, used to determine the exports of the ast //////////////////////////////////////////////////////////////////////////////// struct EmbargoVisitor<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, export_map: &'a def::ExportMap, // Accessibility levels for reachable nodes @@ -168,33 +61,34 @@ struct EmbargoVisitor<'a, 'tcx: 'a> { changed: bool, } +struct ReachEverythingInTheInterfaceVisitor<'b, 'a: 'b, 'tcx: 'a> { + item_def_id: DefId, + ev: &'b mut EmbargoVisitor<'a, 'tcx>, +} + impl<'a, 'tcx> EmbargoVisitor<'a, 'tcx> { - fn ty_level(&self, ty: &hir::Ty) -> Option { - if let hir::TyPath(..) = ty.node { - match self.tcx.def_map.borrow().get(&ty.id).unwrap().full_def() { - def::DefPrimTy(..) | def::DefSelfTy(..) | def::DefTyParam(..) => { - Some(AccessLevel::Public) - } - def => { - if let Some(node_id) = self.tcx.map.as_local_node_id(def.def_id()) { - self.get(node_id) - } else { - Some(AccessLevel::Public) - } - } - } + fn item_ty_level(&self, item_def_id: DefId) -> Option { + let ty_def_id = match self.tcx.item_type(item_def_id).sty { + ty::TyAdt(adt, _) => adt.did, + ty::TyDynamic(ref obj, ..) if obj.principal().is_some() => + obj.principal().unwrap().def_id(), + ty::TyProjection(ref proj) => proj.trait_ref.def_id, + _ => return Some(AccessLevel::Public) + }; + if let Some(node_id) = self.tcx.map.as_local_node_id(ty_def_id) { + self.get(node_id) } else { Some(AccessLevel::Public) } } - fn trait_level(&self, trait_ref: &hir::TraitRef) -> Option { - let did = self.tcx.trait_ref_to_def_id(trait_ref); - if let Some(node_id) = self.tcx.map.as_local_node_id(did) { - self.get(node_id) - } else { - Some(AccessLevel::Public) + fn impl_trait_level(&self, impl_def_id: DefId) -> Option { + if let Some(trait_ref) = self.tcx.impl_trait_ref(impl_def_id) { + if let Some(node_id) = self.tcx.map.as_local_node_id(trait_ref.def_id) { + return self.get(node_id); + } } + Some(AccessLevel::Public) } fn get(&self, id: ast::NodeId) -> Option { @@ -213,26 +107,33 @@ impl<'a, 'tcx> EmbargoVisitor<'a, 'tcx> { old_level } } + + fn reach<'b>(&'b mut self, item_id: ast::NodeId) + -> ReachEverythingInTheInterfaceVisitor<'b, 'a, 'tcx> { + ReachEverythingInTheInterfaceVisitor { + item_def_id: self.tcx.map.local_def_id(item_id), + ev: self, + } + } } -impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> { +impl<'a, 'tcx> Visitor<'tcx> for EmbargoVisitor<'a, 'tcx> { /// We want to visit items in the context of their containing /// module and so forth, so supply a crate for doing a deep walk. - fn visit_nested_item(&mut self, item: hir::ItemId) { - self.visit_item(self.tcx.map.expect_item(item.id)) + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::All(&self.tcx.map) } - fn visit_item(&mut self, item: &hir::Item) { + fn visit_item(&mut self, item: &'tcx hir::Item) { let inherited_item_level = match item.node { // Impls inherit level from their types and traits - hir::ItemImpl(_, _, _, None, ref ty, _) => { - self.ty_level(&ty) + hir::ItemImpl(..) => { + let def_id = self.tcx.map.local_def_id(item.id); + cmp::min(self.item_ty_level(def_id), self.impl_trait_level(def_id)) } - hir::ItemImpl(_, _, _, Some(ref trait_ref), ref ty, _) => { - cmp::min(self.ty_level(&ty), self.trait_level(trait_ref)) - } - hir::ItemDefaultImpl(_, ref trait_ref) => { - self.trait_level(trait_ref) + hir::ItemDefaultImpl(..) => { + let def_id = self.tcx.map.local_def_id(item.id); + self.impl_trait_level(def_id) } // Foreign mods inherit level from parents hir::ItemForeignMod(..) => { @@ -244,43 +145,45 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> { } }; - // Update id of the item itself + // Update level of the item itself let item_level = self.update(item.id, inherited_item_level); - // Update ids of nested things + // Update levels of nested things match item.node { hir::ItemEnum(ref def, _) => { for variant in &def.variants { let variant_level = self.update(variant.node.data.id(), item_level); for field in variant.node.data.fields() { - self.update(field.node.id, variant_level); + self.update(field.id, variant_level); } } } - hir::ItemImpl(_, _, _, None, _, ref impl_items) => { - for impl_item in impl_items { + hir::ItemImpl(.., None, _, ref impl_item_refs) => { + for impl_item_ref in impl_item_refs { + let impl_item = self.tcx.map.impl_item(impl_item_ref.id); if impl_item.vis == hir::Public { self.update(impl_item.id, item_level); } } } - hir::ItemImpl(_, _, _, Some(_), _, ref impl_items) => { - for impl_item in impl_items { + hir::ItemImpl(.., Some(_), _, ref impl_item_refs) => { + for impl_item_ref in impl_item_refs { + let impl_item = self.tcx.map.impl_item(impl_item_ref.id); self.update(impl_item.id, item_level); } } - hir::ItemTrait(_, _, _, ref trait_items) => { + hir::ItemTrait(.., ref trait_items) => { for trait_item in trait_items { self.update(trait_item.id, item_level); } } - hir::ItemStruct(ref def, _) => { + hir::ItemStruct(ref def, _) | hir::ItemUnion(ref def, _) => { if !def.is_struct() { self.update(def.id(), item_level); } for field in def.fields() { - if field.node.kind.visibility() == hir::Public { - self.update(field.node.id, item_level); + if field.vis == hir::Public { + self.update(field.id, item_level); } } } @@ -291,19 +194,92 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> { } } } - hir::ItemTy(ref ty, _) if item_level.is_some() => { - if let hir::TyPath(..) = ty.node { - match self.tcx.def_map.borrow().get(&ty.id).unwrap().full_def() { - def::DefPrimTy(..) | def::DefSelfTy(..) | def::DefTyParam(..) => {}, - def => { - if let Some(node_id) = self.tcx.map.as_local_node_id(def.def_id()) { - self.update(node_id, Some(AccessLevel::Reachable)); - } + _ => {} + } + + // Mark all items in interfaces of reachable items as reachable + match item.node { + // The interface is empty + hir::ItemExternCrate(..) => {} + // All nested items are checked by visit_item + hir::ItemMod(..) => {} + // Reexports are handled in visit_mod + hir::ItemUse(..) => {} + // The interface is empty + hir::ItemDefaultImpl(..) => {} + // Visit everything + hir::ItemConst(..) | hir::ItemStatic(..) | + hir::ItemFn(..) | hir::ItemTy(..) => { + if item_level.is_some() { + self.reach(item.id).generics().predicates().item_type(); + } + } + hir::ItemTrait(.., ref trait_items) => { + if item_level.is_some() { + self.reach(item.id).generics().predicates(); + + for trait_item in trait_items { + let mut reach = self.reach(trait_item.id); + reach.generics().predicates(); + + if let hir::TypeTraitItem(_, None) = trait_item.node { + // No type to visit. + } else { + reach.item_type(); + } + } + } + } + // Visit everything except for private impl items + hir::ItemImpl(.., ref trait_ref, _, ref impl_items) => { + if item_level.is_some() { + self.reach(item.id).generics().predicates().impl_trait_ref(); + + for impl_item in impl_items { + let id = impl_item.id.node_id; + if trait_ref.is_some() || self.get(id).is_some() { + self.reach(id).generics().predicates().item_type(); + } + } + } + } + + // Visit everything, but enum variants have their own levels + hir::ItemEnum(ref def, _) => { + if item_level.is_some() { + self.reach(item.id).generics().predicates(); + } + for variant in &def.variants { + if self.get(variant.node.data.id()).is_some() { + for field in variant.node.data.fields() { + self.reach(field.id).item_type(); + } + // Corner case: if the variant is reachable, but its + // enum is not, make the enum reachable as well. + self.update(item.id, Some(AccessLevel::Reachable)); + } + } + } + // Visit everything, but foreign items have their own levels + hir::ItemForeignMod(ref foreign_mod) => { + for foreign_item in &foreign_mod.items { + if self.get(foreign_item.id).is_some() { + self.reach(foreign_item.id).generics().predicates().item_type(); + } + } + } + // Visit everything except for private fields + hir::ItemStruct(ref struct_def, _) | + hir::ItemUnion(ref struct_def, _) => { + if item_level.is_some() { + self.reach(item.id).generics().predicates(); + for field in struct_def.fields() { + if self.get(field.id).is_some() { + self.reach(field.id).item_type(); } } } } - _ => {} } let orig_level = self.prev_level; @@ -314,7 +290,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> { self.prev_level = orig_level; } - fn visit_block(&mut self, b: &'v hir::Block) { + fn visit_block(&mut self, b: &'tcx hir::Block) { let orig_level = replace(&mut self.prev_level, None); // Blocks can have public items, for example impls, but they always @@ -325,583 +301,199 @@ impl<'a, 'tcx, 'v> Visitor<'v> for EmbargoVisitor<'a, 'tcx> { self.prev_level = orig_level; } - fn visit_mod(&mut self, m: &hir::Mod, _sp: Span, id: ast::NodeId) { + fn visit_mod(&mut self, m: &'tcx hir::Mod, _sp: Span, id: ast::NodeId) { // This code is here instead of in visit_item so that the // crate module gets processed as well. if self.prev_level.is_some() { if let Some(exports) = self.export_map.get(&id) { for export in exports { - if let Some(node_id) = self.tcx.map.as_local_node_id(export.def_id) { + if let Some(node_id) = self.tcx.map.as_local_node_id(export.def.def_id()) { self.update(node_id, Some(AccessLevel::Exported)); } } } } - intravisit::walk_mod(self, m); + intravisit::walk_mod(self, m, id); } - fn visit_macro_def(&mut self, md: &'v hir::MacroDef) { + fn visit_macro_def(&mut self, md: &'tcx hir::MacroDef) { self.update(md.id, Some(AccessLevel::Public)); } -} - -//////////////////////////////////////////////////////////////////////////////// -/// The privacy visitor, where privacy checks take place (violations reported) -//////////////////////////////////////////////////////////////////////////////// - -struct PrivacyVisitor<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, - curitem: ast::NodeId, - in_foreign: bool, - parents: NodeMap, - external_exports: ExternalExports, -} - -#[derive(Debug)] -enum PrivacyResult { - Allowable, - ExternallyDenied, - DisallowedBy(ast::NodeId), -} - -enum FieldName { - UnnamedField(usize), // index - NamedField(ast::Name), -} - -impl<'a, 'tcx> PrivacyVisitor<'a, 'tcx> { - // used when debugging - fn nodestr(&self, id: ast::NodeId) -> String { - self.tcx.map.node_to_string(id).to_string() - } - // Determines whether the given definition is public from the point of view - // of the current item. - fn def_privacy(&self, did: DefId) -> PrivacyResult { - let node_id = if let Some(node_id) = self.tcx.map.as_local_node_id(did) { - node_id - } else { - if self.external_exports.contains(&did) { - debug!("privacy - {:?} was externally exported", did); - return Allowable; + fn visit_ty(&mut self, ty: &'tcx hir::Ty) { + if let hir::TyImplTrait(..) = ty.node { + if self.get(ty.id).is_some() { + // Reach the (potentially private) type and the API being exposed. + self.reach(ty.id).item_type().predicates(); } - debug!("privacy - is {:?} a public method", did); - - return match self.tcx.impl_or_trait_items.borrow().get(&did) { - Some(&ty::ConstTraitItem(ref ac)) => { - debug!("privacy - it's a const: {:?}", *ac); - match ac.container { - ty::TraitContainer(id) => { - debug!("privacy - recursing on trait {:?}", id); - self.def_privacy(id) - } - ty::ImplContainer(id) => { - match self.tcx.impl_trait_ref(id) { - Some(t) => { - debug!("privacy - impl of trait {:?}", id); - self.def_privacy(t.def_id) - } - None => { - debug!("privacy - found inherent \ - associated constant {:?}", - ac.vis); - if ac.vis == hir::Public { - Allowable - } else { - ExternallyDenied - } - } - } - } - } - } - Some(&ty::MethodTraitItem(ref meth)) => { - debug!("privacy - well at least it's a method: {:?}", - *meth); - match meth.container { - ty::TraitContainer(id) => { - debug!("privacy - recursing on trait {:?}", id); - self.def_privacy(id) - } - ty::ImplContainer(id) => { - match self.tcx.impl_trait_ref(id) { - Some(t) => { - debug!("privacy - impl of trait {:?}", id); - self.def_privacy(t.def_id) - } - None => { - debug!("privacy - found a method {:?}", - meth.vis); - if meth.vis == hir::Public { - Allowable - } else { - ExternallyDenied - } - } - } - } - } - } - Some(&ty::TypeTraitItem(ref typedef)) => { - match typedef.container { - ty::TraitContainer(id) => { - debug!("privacy - recursing on trait {:?}", id); - self.def_privacy(id) - } - ty::ImplContainer(id) => { - match self.tcx.impl_trait_ref(id) { - Some(t) => { - debug!("privacy - impl of trait {:?}", id); - self.def_privacy(t.def_id) - } - None => { - debug!("privacy - found a typedef {:?}", - typedef.vis); - if typedef.vis == hir::Public { - Allowable - } else { - ExternallyDenied - } - } - } - } - } - } - None => { - debug!("privacy - nope, not even a method"); - ExternallyDenied - } - }; - }; - - debug!("privacy - local {} not public all the way down", - self.tcx.map.node_to_string(node_id)); - // return quickly for things in the same module - if self.parents.get(&node_id) == self.parents.get(&self.curitem) { - debug!("privacy - same parent, we're done here"); - return Allowable; } - // We now know that there is at least one private member between the - // destination and the root. - let mut closest_private_id = node_id; - loop { - debug!("privacy - examining {}", self.nodestr(closest_private_id)); - let vis = match self.tcx.map.find(closest_private_id) { - // If this item is a method, then we know for sure that it's an - // actual method and not a static method. The reason for this is - // that these cases are only hit in the ExprMethodCall - // expression, and ExprCall will have its path checked later - // (the path of the trait/impl) if it's a static method. - // - // With this information, then we can completely ignore all - // trait methods. The privacy violation would be if the trait - // couldn't get imported, not if the method couldn't be used - // (all trait methods are public). - // - // However, if this is an impl method, then we dictate this - // decision solely based on the privacy of the method - // invocation. - // FIXME(#10573) is this the right behavior? Why not consider - // where the method was defined? - Some(ast_map::NodeImplItem(ii)) => { - match ii.node { - hir::ImplItemKind::Const(..) | - hir::ImplItemKind::Method(..) => { - let imp = self.tcx.map - .get_parent_did(closest_private_id); - match self.tcx.impl_trait_ref(imp) { - Some(..) => return Allowable, - _ if ii.vis == hir::Public => { - return Allowable - } - _ => ii.vis - } - } - hir::ImplItemKind::Type(_) => return Allowable, - } - } - Some(ast_map::NodeTraitItem(_)) => { - return Allowable; - } - - // This is not a method call, extract the visibility as one - // would normally look at it - Some(ast_map::NodeItem(it)) => it.vis, - Some(ast_map::NodeForeignItem(_)) => { - self.tcx.map.get_foreign_vis(closest_private_id) - } - Some(ast_map::NodeVariant(..)) => { - hir::Public // need to move up a level (to the enum) - } - _ => hir::Public, - }; - if vis != hir::Public { break } - // if we've reached the root, then everything was allowable and this - // access is public. - if closest_private_id == ast::CRATE_NODE_ID { return Allowable } - closest_private_id = *self.parents.get(&closest_private_id).unwrap(); - - // If we reached the top, then we were public all the way down and - // we can allow this access. - if closest_private_id == ast::DUMMY_NODE_ID { return Allowable } - } - debug!("privacy - closest priv {}", self.nodestr(closest_private_id)); - if self.private_accessible(closest_private_id) { - Allowable - } else { - DisallowedBy(closest_private_id) - } + intravisit::walk_ty(self, ty); } +} - /// True if `id` is both local and private-accessible - fn local_private_accessible(&self, did: DefId) -> bool { - if let Some(node_id) = self.tcx.map.as_local_node_id(did) { - self.private_accessible(node_id) - } else { - false - } +impl<'b, 'a, 'tcx> ReachEverythingInTheInterfaceVisitor<'b, 'a, 'tcx> { + fn generics(&mut self) -> &mut Self { + self.ev.tcx.item_generics(self.item_def_id).visit_with(self); + self } - /// For a local private node in the AST, this function will determine - /// whether the node is accessible by the current module that iteration is - /// inside. - fn private_accessible(&self, id: ast::NodeId) -> bool { - let parent = *self.parents.get(&id).unwrap(); - debug!("privacy - accessible parent {}", self.nodestr(parent)); - - // After finding `did`'s closest private member, we roll ourselves back - // to see if this private member's parent is anywhere in our ancestry. - // By the privacy rules, we can access all of our ancestor's private - // members, so that's why we test the parent, and not the did itself. - let mut cur = self.curitem; - loop { - debug!("privacy - questioning {}, {}", self.nodestr(cur), cur); - match cur { - // If the relevant parent is in our history, then we're allowed - // to look inside any of our ancestor's immediate private items, - // so this access is valid. - x if x == parent => return true, - - // If we've reached the root, then we couldn't access this item - // in the first place - ast::DUMMY_NODE_ID => return false, - - // Keep going up - _ => {} - } + fn predicates(&mut self) -> &mut Self { + self.ev.tcx.item_predicates(self.item_def_id).visit_with(self); + self + } - cur = *self.parents.get(&cur).unwrap(); - } + fn item_type(&mut self) -> &mut Self { + self.ev.tcx.item_type(self.item_def_id).visit_with(self); + self } - fn report_error(&self, result: CheckResult) -> bool { - match result { - None => true, - Some((span, msg, note)) => { - let mut err = self.tcx.sess.struct_span_err(span, &msg[..]); - if let Some((span, msg)) = note { - err.span_note(span, &msg[..]); - } - err.emit(); - false - }, - } + fn impl_trait_ref(&mut self) -> &mut Self { + self.ev.tcx.impl_trait_ref(self.item_def_id).visit_with(self); + self } +} - /// Guarantee that a particular definition is public. Returns a CheckResult - /// which contains any errors found. These can be reported using `report_error`. - /// If the result is `None`, no errors were found. - fn ensure_public(&self, - span: Span, - to_check: DefId, - source_did: Option, - msg: &str) - -> CheckResult { - debug!("ensure_public(span={:?}, to_check={:?}, source_did={:?}, msg={:?})", - span, to_check, source_did, msg); - let def_privacy = self.def_privacy(to_check); - debug!("ensure_public: def_privacy={:?}", def_privacy); - let id = match def_privacy { - ExternallyDenied => { - return Some((span, format!("{} is private", msg), None)) - } - Allowable => return None, - DisallowedBy(id) => id, +impl<'b, 'a, 'tcx> TypeVisitor<'tcx> for ReachEverythingInTheInterfaceVisitor<'b, 'a, 'tcx> { + fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool { + let ty_def_id = match ty.sty { + ty::TyAdt(adt, _) => Some(adt.did), + ty::TyDynamic(ref obj, ..) => obj.principal().map(|p| p.def_id()), + ty::TyProjection(ref proj) => Some(proj.trait_ref.def_id), + ty::TyFnDef(def_id, ..) | + ty::TyAnon(def_id, _) => Some(def_id), + _ => None }; - // If we're disallowed by a particular id, then we attempt to - // give a nice error message to say why it was disallowed. It - // was either because the item itself is private or because - // its parent is private and its parent isn't in our - // ancestry. (Both the item being checked and its parent must - // be local.) - let def_id = source_did.unwrap_or(to_check); - let node_id = self.tcx.map.as_local_node_id(def_id); - let (err_span, err_msg) = if Some(id) == node_id { - return Some((span, format!("{} is private", msg), None)); - } else { - (span, format!("{} is inaccessible", msg)) - }; - let item = match self.tcx.map.find(id) { - Some(ast_map::NodeItem(item)) => { - match item.node { - // If an impl disallowed this item, then this is resolve's - // way of saying that a struct/enum's static method was - // invoked, and the struct/enum itself is private. Crawl - // back up the chains to find the relevant struct/enum that - // was private. - hir::ItemImpl(_, _, _, _, ref ty, _) => { - match ty.node { - hir::TyPath(..) => {} - _ => return Some((err_span, err_msg, None)), - }; - let def = self.tcx.def_map.borrow().get(&ty.id).unwrap().full_def(); - let did = def.def_id(); - let node_id = self.tcx.map.as_local_node_id(did).unwrap(); - match self.tcx.map.get(node_id) { - ast_map::NodeItem(item) => item, - _ => self.tcx.sess.span_bug(item.span, - "path is not an item") - } - } - _ => item - } + if let Some(def_id) = ty_def_id { + if let Some(node_id) = self.ev.tcx.map.as_local_node_id(def_id) { + self.ev.update(node_id, Some(AccessLevel::Reachable)); } - Some(..) | None => return Some((err_span, err_msg, None)), - }; - let desc = match item.node { - hir::ItemMod(..) => "module", - hir::ItemTrait(..) => "trait", - hir::ItemStruct(..) => "struct", - hir::ItemEnum(..) => "enum", - _ => return Some((err_span, err_msg, None)) - }; - let msg = format!("{} `{}` is private", desc, item.name); - Some((err_span, err_msg, Some((span, msg)))) + } + + ty.super_visit_with(self) } - // Checks that a field is in scope. - fn check_field(&mut self, - span: Span, - def: ty::AdtDef<'tcx>, - v: ty::VariantDef<'tcx>, - name: FieldName) { - let field = match name { - NamedField(f_name) => { - debug!("privacy - check named field {} in struct {:?}", f_name, def); - v.field_named(f_name) - } - UnnamedField(idx) => &v.fields[idx] - }; - if field.vis == hir::Public || self.local_private_accessible(field.did) { - return; + fn visit_trait_ref(&mut self, trait_ref: ty::TraitRef<'tcx>) -> bool { + if let Some(node_id) = self.ev.tcx.map.as_local_node_id(trait_ref.def_id) { + let item = self.ev.tcx.map.expect_item(node_id); + self.ev.update(item.id, Some(AccessLevel::Reachable)); } - let struct_desc = match def.adt_kind() { - ty::AdtKind::Struct => - format!("struct `{}`", self.tcx.item_path_str(def.did)), - // struct variant fields have inherited visibility - ty::AdtKind::Enum => return - }; - let msg = match name { - NamedField(name) => format!("field `{}` of {} is private", - name, struct_desc), - UnnamedField(idx) => format!("field #{} of {} is private", - idx + 1, struct_desc), - }; - span_err!(self.tcx.sess, span, E0451, - "{}", &msg[..]); + trait_ref.super_visit_with(self) } +} - // Given the ID of a method, checks to ensure it's in scope. - fn check_static_method(&mut self, - span: Span, - method_id: DefId, - name: ast::Name) { - self.report_error(self.ensure_public(span, - method_id, - None, - &format!("method `{}`", - name))); +//////////////////////////////////////////////////////////////////////////////// +/// The privacy visitor, where privacy checks take place (violations reported) +//////////////////////////////////////////////////////////////////////////////// + +struct PrivacyVisitor<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + curitem: ast::NodeId, + in_foreign: bool, +} + +impl<'a, 'tcx> PrivacyVisitor<'a, 'tcx> { + fn item_is_accessible(&self, did: DefId) -> bool { + match self.tcx.map.as_local_node_id(did) { + Some(node_id) => + ty::Visibility::from_hir(&self.tcx.map.expect_item(node_id).vis, node_id, self.tcx), + None => self.tcx.sess.cstore.visibility(did), + }.is_accessible_from(self.curitem, &self.tcx.map) } - // Checks that a path is in scope. - fn check_path(&mut self, span: Span, path_id: ast::NodeId, last: ast::Name) { - debug!("privacy - path {}", self.nodestr(path_id)); - let path_res = *self.tcx.def_map.borrow().get(&path_id).unwrap(); - let ck = |tyname: &str| { - let ck_public = |def: DefId| { - debug!("privacy - ck_public {:?}", def); - let origdid = path_res.def_id(); - self.ensure_public(span, - def, - Some(origdid), - &format!("{} `{}`", tyname, last)) - }; - - match path_res.last_private { - LastMod(AllPublic) => {}, - LastMod(DependsOn(def)) => { - self.report_error(ck_public(def)); - }, - LastImport { value_priv, - value_used: check_value, - type_priv, - type_used: check_type } => { - // This dance with found_error is because we don't want to - // report a privacy error twice for the same directive. - let found_error = match (type_priv, check_type) { - (Some(DependsOn(def)), Used) => { - !self.report_error(ck_public(def)) - }, - _ => false, - }; - if !found_error { - match (value_priv, check_value) { - (Some(DependsOn(def)), Used) => { - self.report_error(ck_public(def)); - }, - _ => {}, - } - } - // If an import is not used in either namespace, we still - // want to check that it could be legal. Therefore we check - // in both namespaces and only report an error if both would - // be illegal. We only report one error, even if it is - // illegal to import from both namespaces. - match (value_priv, check_value, type_priv, check_type) { - (Some(p), Unused, None, _) | - (None, _, Some(p), Unused) => { - let p = match p { - AllPublic => None, - DependsOn(def) => ck_public(def), - }; - if p.is_some() { - self.report_error(p); - } - }, - (Some(v), Unused, Some(t), Unused) => { - let v = match v { - AllPublic => None, - DependsOn(def) => ck_public(def), - }; - let t = match t { - AllPublic => None, - DependsOn(def) => ck_public(def), - }; - if let (Some(_), Some(t)) = (v, t) { - self.report_error(Some(t)); - } - }, - _ => {}, - } - }, - } - }; - // FIXME(#12334) Imports can refer to definitions in both the type and - // value namespaces. The privacy information is aware of this, but the - // def map is not. Therefore the names we work out below will not always - // be accurate and we can get slightly wonky error messages (but type - // checking is always correct). - match path_res.full_def() { - def::DefFn(..) => ck("function"), - def::DefStatic(..) => ck("static"), - def::DefConst(..) => ck("const"), - def::DefAssociatedConst(..) => ck("associated const"), - def::DefVariant(..) => ck("variant"), - def::DefTy(_, false) => ck("type"), - def::DefTy(_, true) => ck("enum"), - def::DefTrait(..) => ck("trait"), - def::DefStruct(..) => ck("struct"), - def::DefMethod(..) => ck("method"), - def::DefMod(..) => ck("module"), - _ => {} + // Checks that a field is in scope. + fn check_field(&mut self, span: Span, def: &'tcx ty::AdtDef, field: &'tcx ty::FieldDef) { + if !def.is_enum() && !field.vis.is_accessible_from(self.curitem, &self.tcx.map) { + struct_span_err!(self.tcx.sess, span, E0451, "field `{}` of {} `{}` is private", + field.name, def.variant_descr(), self.tcx.item_path_str(def.did)) + .span_label(span, &format!("field `{}` is private", field.name)) + .emit(); } } // Checks that a method is in scope. - fn check_method(&mut self, span: Span, method_def_id: DefId, - name: ast::Name) { - match self.tcx.impl_or_trait_item(method_def_id).container() { - ty::ImplContainer(_) => { - self.check_static_method(span, method_def_id, name) - } + fn check_method(&mut self, span: Span, method_def_id: DefId) { + match self.tcx.associated_item(method_def_id).container { // Trait methods are always all public. The only controlling factor // is whether the trait itself is accessible or not. - ty::TraitContainer(trait_def_id) => { - self.report_error(self.ensure_public(span, trait_def_id, - None, "source trait")); + ty::TraitContainer(trait_def_id) if !self.item_is_accessible(trait_def_id) => { + let msg = format!("source trait `{}` is private", + self.tcx.item_path_str(trait_def_id)); + self.tcx.sess.span_err(span, &msg); } + _ => {} } } } -impl<'a, 'tcx, 'v> Visitor<'v> for PrivacyVisitor<'a, 'tcx> { +impl<'a, 'tcx> Visitor<'tcx> for PrivacyVisitor<'a, 'tcx> { /// We want to visit items in the context of their containing /// module and so forth, so supply a crate for doing a deep walk. - fn visit_nested_item(&mut self, item: hir::ItemId) { - self.visit_item(self.tcx.map.expect_item(item.id)) + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::All(&self.tcx.map) } - fn visit_item(&mut self, item: &hir::Item) { + fn visit_item(&mut self, item: &'tcx hir::Item) { let orig_curitem = replace(&mut self.curitem, item.id); intravisit::walk_item(self, item); self.curitem = orig_curitem; } - fn visit_expr(&mut self, expr: &hir::Expr) { + fn visit_expr(&mut self, expr: &'tcx hir::Expr) { match expr.node { - hir::ExprField(ref base, name) => { - if let ty::TyStruct(def, _) = self.tcx.expr_ty_adjusted(&**base).sty { - self.check_field(expr.span, - def, - def.struct_variant(), - NamedField(name.node)); - } - } - hir::ExprTupField(ref base, idx) => { - if let ty::TyStruct(def, _) = self.tcx.expr_ty_adjusted(&**base).sty { - self.check_field(expr.span, - def, - def.struct_variant(), - UnnamedField(idx.node)); - } - } - hir::ExprMethodCall(name, _, _) => { + hir::ExprMethodCall(..) => { let method_call = ty::MethodCall::expr(expr.id); - let method = self.tcx.tables.borrow().method_map[&method_call]; - debug!("(privacy checking) checking impl method"); - self.check_method(expr.span, method.def_id, name.node); + let method = self.tcx.tables().method_map[&method_call]; + self.check_method(expr.span, method.def_id); } - hir::ExprStruct(..) => { - let adt = self.tcx.expr_ty(expr).ty_adt_def().unwrap(); - let variant = adt.variant_of_def(self.tcx.resolve_expr(expr)); + hir::ExprStruct(ref qpath, ref expr_fields, _) => { + let def = self.tcx.tables().qpath_def(qpath, expr.id); + let adt = self.tcx.tables().expr_ty(expr).ty_adt_def().unwrap(); + let variant = adt.variant_of_def(def); // RFC 736: ensure all unmentioned fields are visible. // Rather than computing the set of unmentioned fields - // (i.e. `all_fields - fields`), just check them all. - for field in &variant.fields { - self.check_field(expr.span, adt, variant, NamedField(field.name)); + // (i.e. `all_fields - fields`), just check them all, + // unless the ADT is a union, then unmentioned fields + // are not checked. + if adt.is_union() { + for expr_field in expr_fields { + self.check_field(expr.span, adt, variant.field_named(expr_field.name.node)); + } + } else { + for field in &variant.fields { + let expr_field = expr_fields.iter().find(|f| f.name.node == field.name); + let span = if let Some(f) = expr_field { f.span } else { expr.span }; + self.check_field(span, adt, field); + } } } - hir::ExprPath(..) => { - - if let def::DefStruct(_) = self.tcx.resolve_expr(expr) { - let expr_ty = self.tcx.expr_ty(expr); - let def = match expr_ty.sty { - ty::TyBareFn(_, &ty::BareFnTy { sig: ty::Binder(ty::FnSig { - output: ty::FnConverging(ty), .. - }), ..}) => ty, - _ => expr_ty - }.ty_adt_def().unwrap(); - let any_priv = def.struct_variant().fields.iter().any(|f| { - f.vis != hir::Public && !self.local_private_accessible(f.did) - }); - if any_priv { - span_err!(self.tcx.sess, expr.span, E0450, - "cannot invoke tuple struct constructor with private \ - fields"); + hir::ExprPath(hir::QPath::Resolved(_, ref path)) => { + if let Def::StructCtor(_, CtorKind::Fn) = path.def { + let adt_def = self.tcx.expect_variant_def(path.def); + let private_indexes = adt_def.fields.iter().enumerate().filter(|&(_, field)| { + !field.vis.is_accessible_from(self.curitem, &self.tcx.map) + }).map(|(i, _)| i).collect::>(); + + if !private_indexes.is_empty() { + let mut error = struct_span_err!(self.tcx.sess, expr.span, E0450, + "cannot invoke tuple struct constructor \ + with private fields"); + error.span_label(expr.span, + &format!("cannot construct with a private field")); + + if let Some(node_id) = self.tcx.map.as_local_node_id(adt_def.did) { + let node = self.tcx.map.find(node_id); + if let Some(hir::map::NodeStructCtor(vdata)) = node { + for i in private_indexes { + error.span_label(vdata.fields()[i].span, + &format!("private field declared here")); + } + } + } + error.emit(); } } } @@ -911,7 +503,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivacyVisitor<'a, 'tcx> { intravisit::walk_expr(self, expr); } - fn visit_pat(&mut self, pattern: &hir::Pat) { + fn visit_pat(&mut self, pattern: &'tcx hir::Pat) { // Foreign functions do not have their patterns mapped in the def_map, // and there's nothing really relevant there anyway, so don't bother // checking privacy. If you can name the type then you can pass it to an @@ -919,37 +511,28 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivacyVisitor<'a, 'tcx> { if self.in_foreign { return } match pattern.node { - hir::PatStruct(_, ref fields, _) => { - let adt = self.tcx.pat_ty(pattern).ty_adt_def().unwrap(); - let def = self.tcx.def_map.borrow().get(&pattern.id).unwrap().full_def(); + PatKind::Struct(ref qpath, ref fields, _) => { + let def = self.tcx.tables().qpath_def(qpath, pattern.id); + let adt = self.tcx.tables().pat_ty(pattern).ty_adt_def().unwrap(); let variant = adt.variant_of_def(def); for field in fields { - self.check_field(pattern.span, adt, variant, - NamedField(field.node.name)); + self.check_field(field.span, adt, variant.field_named(field.node.name)); } } - - // Patterns which bind no fields are allowable (the path is check - // elsewhere). - hir::PatEnum(_, Some(ref fields)) => { - match self.tcx.pat_ty(pattern).sty { - ty::TyStruct(def, _) => { - for (i, field) in fields.iter().enumerate() { - if let hir::PatWild = field.node { + PatKind::TupleStruct(_, ref fields, ddpos) => { + match self.tcx.tables().pat_ty(pattern).sty { + // enum fields have no privacy at this time + ty::TyAdt(def, _) if !def.is_enum() => { + let expected_len = def.struct_variant().fields.len(); + for (i, field) in fields.iter().enumerate_and_adjust(expected_len, ddpos) { + if let PatKind::Wild = field.node { continue } - self.check_field(field.span, - def, - def.struct_variant(), - UnnamedField(i)); + self.check_field(field.span, def, &def.struct_variant().fields[i]); } } - ty::TyEnum(..) => { - // enum fields have no privacy at this time - } _ => {} } - } _ => {} } @@ -957,149 +540,11 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivacyVisitor<'a, 'tcx> { intravisit::walk_pat(self, pattern); } - fn visit_foreign_item(&mut self, fi: &hir::ForeignItem) { + fn visit_foreign_item(&mut self, fi: &'tcx hir::ForeignItem) { self.in_foreign = true; intravisit::walk_foreign_item(self, fi); self.in_foreign = false; } - - fn visit_path(&mut self, path: &hir::Path, id: ast::NodeId) { - if !path.segments.is_empty() { - self.check_path(path.span, id, path.segments.last().unwrap().identifier.name); - intravisit::walk_path(self, path); - } - } - - fn visit_path_list_item(&mut self, prefix: &hir::Path, item: &hir::PathListItem) { - let name = if let hir::PathListIdent { name, .. } = item.node { - name - } else if !prefix.segments.is_empty() { - prefix.segments.last().unwrap().identifier.name - } else { - self.tcx.sess.bug("`self` import in an import list with empty prefix"); - }; - self.check_path(item.span, item.node.id(), name); - intravisit::walk_path_list_item(self, prefix, item); - } -} - -//////////////////////////////////////////////////////////////////////////////// -/// The privacy sanity check visitor, ensures unnecessary visibility isn't here -//////////////////////////////////////////////////////////////////////////////// - -struct SanePrivacyVisitor<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, - in_block: bool, -} - -impl<'a, 'tcx, 'v> Visitor<'v> for SanePrivacyVisitor<'a, 'tcx> { - /// We want to visit items in the context of their containing - /// module and so forth, so supply a crate for doing a deep walk. - fn visit_nested_item(&mut self, item: hir::ItemId) { - self.visit_item(self.tcx.map.expect_item(item.id)) - } - - fn visit_item(&mut self, item: &hir::Item) { - self.check_sane_privacy(item); - if self.in_block { - self.check_all_inherited(item); - } - - let orig_in_block = self.in_block; - - // Modules turn privacy back on, otherwise we inherit - self.in_block = if let hir::ItemMod(..) = item.node { false } else { orig_in_block }; - - intravisit::walk_item(self, item); - self.in_block = orig_in_block; - } - - fn visit_block(&mut self, b: &'v hir::Block) { - let orig_in_block = replace(&mut self.in_block, true); - intravisit::walk_block(self, b); - self.in_block = orig_in_block; - } -} - -impl<'a, 'tcx> SanePrivacyVisitor<'a, 'tcx> { - /// Validates all of the visibility qualifiers placed on the item given. This - /// ensures that there are no extraneous qualifiers that don't actually do - /// anything. In theory these qualifiers wouldn't parse, but that may happen - /// later on down the road... - fn check_sane_privacy(&self, item: &hir::Item) { - let check_inherited = |sp, vis, note: &str| { - if vis != hir::Inherited { - let mut err = struct_span_err!(self.tcx.sess, sp, E0449, - "unnecessary visibility qualifier"); - if !note.is_empty() { - err.span_note(sp, note); - } - err.emit(); - } - }; - - match item.node { - // implementations of traits don't need visibility qualifiers because - // that's controlled by having the trait in scope. - hir::ItemImpl(_, _, _, Some(..), _, ref impl_items) => { - check_inherited(item.span, item.vis, - "visibility qualifiers have no effect on trait impls"); - for impl_item in impl_items { - check_inherited(impl_item.span, impl_item.vis, ""); - } - } - hir::ItemImpl(_, _, _, None, _, _) => { - check_inherited(item.span, item.vis, - "place qualifiers on individual methods instead"); - } - hir::ItemDefaultImpl(..) => { - check_inherited(item.span, item.vis, - "visibility qualifiers have no effect on trait impls"); - } - hir::ItemForeignMod(..) => { - check_inherited(item.span, item.vis, - "place qualifiers on individual functions instead"); - } - hir::ItemStruct(..) | hir::ItemEnum(..) | hir::ItemTrait(..) | - hir::ItemConst(..) | hir::ItemStatic(..) | hir::ItemFn(..) | - hir::ItemMod(..) | hir::ItemExternCrate(..) | - hir::ItemUse(..) | hir::ItemTy(..) => {} - } - } - - /// When inside of something like a function or a method, visibility has no - /// control over anything so this forbids any mention of any visibility - fn check_all_inherited(&self, item: &hir::Item) { - let check_inherited = |sp, vis| { - if vis != hir::Inherited { - span_err!(self.tcx.sess, sp, E0447, - "visibility has no effect inside functions or block expressions"); - } - }; - - check_inherited(item.span, item.vis); - match item.node { - hir::ItemImpl(_, _, _, _, _, ref impl_items) => { - for impl_item in impl_items { - check_inherited(impl_item.span, impl_item.vis); - } - } - hir::ItemForeignMod(ref fm) => { - for fi in &fm.items { - check_inherited(fi.span, fi.vis); - } - } - hir::ItemStruct(ref vdata, _) => { - for f in vdata.fields() { - check_inherited(f.span, f.node.kind.visibility()); - } - } - hir::ItemDefaultImpl(..) | hir::ItemEnum(..) | hir::ItemTrait(..) | - hir::ItemConst(..) | hir::ItemStatic(..) | hir::ItemFn(..) | - hir::ItemMod(..) | hir::ItemExternCrate(..) | - hir::ItemUse(..) | hir::ItemTy(..) => {} - } - } } /////////////////////////////////////////////////////////////////////////////// @@ -1110,7 +555,7 @@ impl<'a, 'tcx> SanePrivacyVisitor<'a, 'tcx> { /////////////////////////////////////////////////////////////////////////////// struct ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, access_levels: &'a AccessLevels, in_variant: bool, // set of errors produced by this obsolete visitor @@ -1129,11 +574,10 @@ struct ObsoleteCheckTypeForPrivatenessVisitor<'a, 'b: 'a, 'tcx: 'b> { } impl<'a, 'tcx> ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { - fn path_is_private_type(&self, path_id: ast::NodeId) -> bool { - let did = match self.tcx.def_map.borrow().get(&path_id).map(|d| d.full_def()) { - // `int` etc. (None doesn't seem to occur.) - None | Some(def::DefPrimTy(..)) | Some(def::DefSelfTy(..)) => return false, - Some(def) => def.def_id(), + fn path_is_private_type(&self, path: &hir::Path) -> bool { + let did = match path.def { + Def::PrimTy(..) | Def::SelfTy(..) => return false, + def => def.def_id(), }; // A path can only be private if: @@ -1142,7 +586,7 @@ impl<'a, 'tcx> ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { // .. and it corresponds to a private type in the AST (this returns // None for type parameters) match self.tcx.map.find(node_id) { - Some(ast_map::NodeItem(ref item)) => item.vis != hir::Public, + Some(hir::map::NodeItem(ref item)) => item.vis != hir::Public, Some(_) | None => false, } } else { @@ -1159,26 +603,33 @@ impl<'a, 'tcx> ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { fn check_ty_param_bound(&mut self, ty_param_bound: &hir::TyParamBound) { if let hir::TraitTyParamBound(ref trait_ref, _) = *ty_param_bound { - if self.path_is_private_type(trait_ref.trait_ref.ref_id) { + if self.path_is_private_type(&trait_ref.trait_ref.path) { self.old_error_set.insert(trait_ref.trait_ref.ref_id); } } } - fn item_is_public(&self, id: &ast::NodeId, vis: hir::Visibility) -> bool { - self.access_levels.is_reachable(*id) || vis == hir::Public + fn item_is_public(&self, id: &ast::NodeId, vis: &hir::Visibility) -> bool { + self.access_levels.is_reachable(*id) || *vis == hir::Public } } impl<'a, 'b, 'tcx, 'v> Visitor<'v> for ObsoleteCheckTypeForPrivatenessVisitor<'a, 'b, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> { + NestedVisitorMap::None + } + fn visit_ty(&mut self, ty: &hir::Ty) { - if let hir::TyPath(..) = ty.node { - if self.inner.path_is_private_type(ty.id) { + if let hir::TyPath(hir::QPath::Resolved(_, ref path)) = ty.node { + if self.inner.path_is_private_type(path) { self.contains_private = true; // found what we're looking for so let's stop // working. return - } else if self.at_outer_type { + } + } + if let hir::TyPath(_) = ty.node { + if self.at_outer_type { self.outer_type_is_public_path = true; } } @@ -1190,14 +641,14 @@ impl<'a, 'b, 'tcx, 'v> Visitor<'v> for ObsoleteCheckTypeForPrivatenessVisitor<'a fn visit_expr(&mut self, _: &hir::Expr) {} } -impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { +impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { /// We want to visit items in the context of their containing /// module and so forth, so supply a crate for doing a deep walk. - fn visit_nested_item(&mut self, item: hir::ItemId) { - self.visit_item(self.tcx.map.expect_item(item.id)) + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::All(&self.tcx.map) } - fn visit_item(&mut self, item: &hir::Item) { + fn visit_item(&mut self, item: &'tcx hir::Item) { match item.node { // contents of a private mod can be reexported, so we need // to check internals. @@ -1207,7 +658,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> // namespace (the contents have their own privacies). hir::ItemForeignMod(_) => {} - hir::ItemTrait(_, _, ref bounds, _) => { + hir::ItemTrait(.., ref bounds, _) => { if !self.trait_is_public(item.id) { return } @@ -1222,7 +673,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> // (i.e. we could just return here to not check them at // all, or some worse estimation of whether an impl is // publicly visible). - hir::ItemImpl(_, _, ref g, ref trait_ref, ref self_, ref impl_items) => { + hir::ItemImpl(.., ref g, ref trait_ref, ref self_, ref impl_item_refs) => { // `impl [... for] Private` is never visible. let self_contains_private; // impl [... for] Public<...>, but not `impl [... for] @@ -1237,7 +688,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> at_outer_type: true, outer_type_is_public_path: false, }; - visitor.visit_ty(&**self_); + visitor.visit_ty(&self_); self_contains_private = visitor.contains_private; self_is_public_path = visitor.outer_type_is_public_path; } @@ -1248,7 +699,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> let not_private_trait = trait_ref.as_ref().map_or(true, // no trait counts as public trait |tr| { - let did = self.tcx.trait_ref_to_def_id(tr); + let did = tr.path.def.def_id(); if let Some(node_id) = self.tcx.map.as_local_node_id(did) { self.trait_is_public(node_id) @@ -1267,16 +718,17 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> // are private (because `T` won't be visible externally). let trait_or_some_public_method = trait_ref.is_some() || - impl_items.iter() - .any(|impl_item| { - match impl_item.node { - hir::ImplItemKind::Const(..) | - hir::ImplItemKind::Method(..) => { - self.access_levels.is_reachable(impl_item.id) - } - hir::ImplItemKind::Type(_) => false, - } - }); + impl_item_refs.iter() + .any(|impl_item_ref| { + let impl_item = self.tcx.map.impl_item(impl_item_ref.id); + match impl_item.node { + hir::ImplItemKind::Const(..) | + hir::ImplItemKind::Method(..) => { + self.access_levels.is_reachable(impl_item.id) + } + hir::ImplItemKind::Type(_) => false, + } + }); if !self_contains_private && not_private_trait && @@ -1286,16 +738,17 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> match *trait_ref { None => { - for impl_item in impl_items { + for impl_item_ref in impl_item_refs { // This is where we choose whether to walk down // further into the impl to check its items. We // should only walk into public items so that we // don't erroneously report errors for private // types in private items. + let impl_item = self.tcx.map.impl_item(impl_item_ref.id); match impl_item.node { hir::ImplItemKind::Const(..) | hir::ImplItemKind::Method(..) - if self.item_is_public(&impl_item.id, impl_item.vis) => + if self.item_is_public(&impl_item.id, &impl_item.vis) => { intravisit::walk_impl_item(self, impl_item) } @@ -1323,7 +776,8 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> intravisit::walk_path(self, &tr.path); // Those in 3. are warned with this call. - for impl_item in impl_items { + for impl_item_ref in impl_item_refs { + let impl_item = self.tcx.map.impl_item(impl_item_ref.id); if let hir::ImplItemKind::Type(ref ty) = impl_item.node { self.visit_ty(ty); } @@ -1334,17 +788,18 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> // impl Public { ... }. Any public static // methods will be visible as `Public::foo`. let mut found_pub_static = false; - for impl_item in impl_items { + for impl_item_ref in impl_item_refs { + let impl_item = self.tcx.map.impl_item(impl_item_ref.id); match impl_item.node { hir::ImplItemKind::Const(..) => { - if self.item_is_public(&impl_item.id, impl_item.vis) { + if self.item_is_public(&impl_item.id, &impl_item.vis) { found_pub_static = true; intravisit::walk_impl_item(self, impl_item); } } hir::ImplItemKind::Method(ref sig, _) => { - if sig.explicit_self.node == hir::SelfStatic && - self.item_is_public(&impl_item.id, impl_item.vis) { + if !sig.decl.has_self() && + self.item_is_public(&impl_item.id, &impl_item.vis) { found_pub_static = true; intravisit::walk_impl_item(self, impl_item); } @@ -1364,7 +819,7 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> hir::ItemTy(..) => return, // not at all public, so we don't care - _ if !self.item_is_public(&item.id, item.vis) => { + _ if !self.item_is_public(&item.id, &item.vis) => { return; } @@ -1375,11 +830,10 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> // any `visit_ty`'s will be called on things that are in // public signatures, i.e. things that we're interested in for // this visitor. - debug!("VisiblePrivateTypesVisitor entering item {:?}", item); intravisit::walk_item(self, item); } - fn visit_generics(&mut self, generics: &hir::Generics) { + fn visit_generics(&mut self, generics: &'tcx hir::Generics) { for ty_param in generics.ty_params.iter() { for bound in ty_param.bounds.iter() { self.check_ty_param_bound(bound) @@ -1394,29 +848,31 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> } &hir::WherePredicate::RegionPredicate(_) => {} &hir::WherePredicate::EqPredicate(ref eq_pred) => { - self.visit_ty(&*eq_pred.ty); + self.visit_ty(&eq_pred.ty); } } } } - fn visit_foreign_item(&mut self, item: &hir::ForeignItem) { + fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem) { if self.access_levels.is_reachable(item.id) { intravisit::walk_foreign_item(self, item) } } - fn visit_ty(&mut self, t: &hir::Ty) { - debug!("VisiblePrivateTypesVisitor checking ty {:?}", t); - if let hir::TyPath(..) = t.node { - if self.path_is_private_type(t.id) { + fn visit_ty(&mut self, t: &'tcx hir::Ty) { + if let hir::TyPath(hir::QPath::Resolved(_, ref path)) = t.node { + if self.path_is_private_type(path) { self.old_error_set.insert(t.id); } } intravisit::walk_ty(self, t) } - fn visit_variant(&mut self, v: &hir::Variant, g: &hir::Generics, item_id: ast::NodeId) { + fn visit_variant(&mut self, + v: &'tcx hir::Variant, + g: &'tcx hir::Generics, + item_id: ast::NodeId) { if self.access_levels.is_reachable(v.node.data.id()) { self.in_variant = true; intravisit::walk_variant(self, v, g, item_id); @@ -1424,11 +880,8 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> } } - fn visit_struct_field(&mut self, s: &hir::StructField) { - let vis = match s.node.kind { - hir::NamedField(_, vis) | hir::UnnamedField(vis) => vis - }; - if vis == hir::Public || self.in_variant { + fn visit_struct_field(&mut self, s: &'tcx hir::StructField) { + if s.vis == hir::Public || self.in_variant { intravisit::walk_struct_field(self, s); } } @@ -1437,11 +890,8 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> // expression/block context can't possibly contain exported things. // (Making them no-ops stops us from traversing the whole AST without // having to be super careful about our `walk_...` calls above.) - // FIXME(#29524): Unfortunately this ^^^ is not true, blocks can contain - // exported items (e.g. impls) and actual code in rustc itself breaks - // if we don't traverse blocks in `EmbargoVisitor` - fn visit_block(&mut self, _: &hir::Block) {} - fn visit_expr(&mut self, _: &hir::Expr) {} + fn visit_block(&mut self, _: &'tcx hir::Block) {} + fn visit_expr(&mut self, _: &'tcx hir::Expr) {} } /////////////////////////////////////////////////////////////////////////////// @@ -1452,166 +902,180 @@ impl<'a, 'tcx, 'v> Visitor<'v> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> /////////////////////////////////////////////////////////////////////////////// struct SearchInterfaceForPrivateItemsVisitor<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, - // Do not report an error when a private type is found - is_quiet: bool, - // Is private component found? - is_public: bool, - old_error_set: &'a NodeSet, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + item_def_id: DefId, + span: Span, + /// The visitor checks that each component type is at least this visible + required_visibility: ty::Visibility, + /// The visibility of the least visible component that has been visited + min_visibility: ty::Visibility, + has_old_errors: bool, } impl<'a, 'tcx: 'a> SearchInterfaceForPrivateItemsVisitor<'a, 'tcx> { - // Check if the type alias contain private types when substituted - fn is_public_type_alias(&self, item: &hir::Item, path: &hir::Path) -> bool { - // We substitute type aliases only when determining impl publicity - // FIXME: This will probably change and all type aliases will be substituted, - // requires an amendment to RFC 136. - if !self.is_quiet { - return false - } - // Type alias is considered public if the aliased type is - // public, even if the type alias itself is private. So, something - // like `type A = u8; pub fn f() -> A {...}` doesn't cause an error. - if let hir::ItemTy(ref ty, ref generics) = item.node { - let mut check = SearchInterfaceForPrivateItemsVisitor { is_public: true, ..*self }; - check.visit_ty(ty); - // If a private type alias with default type parameters is used in public - // interface we must ensure, that the defaults are public if they are actually used. - // ``` - // type Alias = T; - // pub fn f() -> Alias {...} // `Private` is implicitly used here, so it must be public - // ``` - let provided_params = path.segments.last().unwrap().parameters.types().len(); - for ty_param in &generics.ty_params[provided_params..] { - if let Some(ref default_ty) = ty_param.default { - check.visit_ty(default_ty); - } - } - check.is_public - } else { - false - } + fn generics(&mut self) -> &mut Self { + self.tcx.item_generics(self.item_def_id).visit_with(self); + self + } + + fn predicates(&mut self) -> &mut Self { + self.tcx.item_predicates(self.item_def_id).visit_with(self); + self + } + + fn item_type(&mut self) -> &mut Self { + self.tcx.item_type(self.item_def_id).visit_with(self); + self + } + + fn impl_trait_ref(&mut self) -> &mut Self { + self.tcx.impl_trait_ref(self.item_def_id).visit_with(self); + self } } -impl<'a, 'tcx: 'a, 'v> Visitor<'v> for SearchInterfaceForPrivateItemsVisitor<'a, 'tcx> { - fn visit_ty(&mut self, ty: &hir::Ty) { - if self.is_quiet && !self.is_public { - // We are in quiet mode and a private type is already found, no need to proceed - return - } - if let hir::TyPath(_, ref path) = ty.node { - let def = self.tcx.def_map.borrow().get(&ty.id).unwrap().full_def(); - match def { - def::DefPrimTy(..) | def::DefSelfTy(..) | def::DefTyParam(..) => { - // Public - } - def::DefAssociatedTy(..) if self.is_quiet => { +impl<'a, 'tcx: 'a> TypeVisitor<'tcx> for SearchInterfaceForPrivateItemsVisitor<'a, 'tcx> { + fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool { + let ty_def_id = match ty.sty { + ty::TyAdt(adt, _) => Some(adt.did), + ty::TyDynamic(ref obj, ..) => obj.principal().map(|p| p.def_id()), + ty::TyProjection(ref proj) => { + if self.required_visibility == ty::Visibility::PrivateExternal { // Conservatively approximate the whole type alias as public without // recursing into its components when determining impl publicity. // For example, `impl ::Alias {...}` may be a public impl // even if both `Type` and `Trait` are private. // Ideally, associated types should be substituted in the same way as // free type aliases, but this isn't done yet. - return + return false; } - def::DefStruct(def_id) | def::DefTy(def_id, _) | - def::DefTrait(def_id) | def::DefAssociatedTy(def_id, _) => { - // Non-local means public (private items can't leave their crate, modulo bugs) - if let Some(node_id) = self.tcx.map.as_local_node_id(def_id) { - let item = self.tcx.map.expect_item(node_id); - if item.vis != hir::Public && !self.is_public_type_alias(item, path) { - if !self.is_quiet { - if self.old_error_set.contains(&ty.id) { - span_err!(self.tcx.sess, ty.span, E0446, - "private type in public interface"); - } else { - self.tcx.sess.add_lint ( - lint::builtin::PRIVATE_IN_PUBLIC, - node_id, - ty.span, - format!("private type in public interface"), - ); - } - } - self.is_public = false; - } + + Some(proj.trait_ref.def_id) + } + _ => None + }; + + if let Some(def_id) = ty_def_id { + // Non-local means public (private items can't leave their crate, modulo bugs) + if let Some(node_id) = self.tcx.map.as_local_node_id(def_id) { + let item = self.tcx.map.expect_item(node_id); + let vis = ty::Visibility::from_hir(&item.vis, node_id, self.tcx); + + if !vis.is_at_least(self.min_visibility, &self.tcx.map) { + self.min_visibility = vis; + } + if !vis.is_at_least(self.required_visibility, &self.tcx.map) { + if self.tcx.sess.features.borrow().pub_restricted || self.has_old_errors { + let mut err = struct_span_err!(self.tcx.sess, self.span, E0446, + "private type `{}` in public interface", ty); + err.span_label(self.span, &format!("can't leak private type")); + err.emit(); + } else { + self.tcx.sess.add_lint(lint::builtin::PRIVATE_IN_PUBLIC, + node_id, + self.span, + format!("private type `{}` in public \ + interface (error E0446)", ty)); } } - _ => {} } } - intravisit::walk_ty(self, ty); + if let ty::TyProjection(ref proj) = ty.sty { + // Avoid calling `visit_trait_ref` below on the trait, + // as we have already checked the trait itself above. + proj.trait_ref.super_visit_with(self) + } else { + ty.super_visit_with(self) + } } - fn visit_trait_ref(&mut self, trait_ref: &hir::TraitRef) { - if self.is_quiet && !self.is_public { - // We are in quiet mode and a private type is already found, no need to proceed - return - } + fn visit_trait_ref(&mut self, trait_ref: ty::TraitRef<'tcx>) -> bool { // Non-local means public (private items can't leave their crate, modulo bugs) - let def_id = self.tcx.trait_ref_to_def_id(trait_ref); - if let Some(node_id) = self.tcx.map.as_local_node_id(def_id) { + if let Some(node_id) = self.tcx.map.as_local_node_id(trait_ref.def_id) { let item = self.tcx.map.expect_item(node_id); - if item.vis != hir::Public { - if !self.is_quiet { - if self.old_error_set.contains(&trait_ref.ref_id) { - span_err!(self.tcx.sess, trait_ref.path.span, E0445, - "private trait in public interface"); - } else { - self.tcx.sess.add_lint(lint::builtin::PRIVATE_IN_PUBLIC, - node_id, - trait_ref.path.span, - "private trait in public interface (error E0445)" - .to_string()); - } + let vis = ty::Visibility::from_hir(&item.vis, node_id, self.tcx); + + if !vis.is_at_least(self.min_visibility, &self.tcx.map) { + self.min_visibility = vis; + } + if !vis.is_at_least(self.required_visibility, &self.tcx.map) { + if self.tcx.sess.features.borrow().pub_restricted || self.has_old_errors { + struct_span_err!(self.tcx.sess, self.span, E0445, + "private trait `{}` in public interface", trait_ref) + .span_label(self.span, &format!( + "private trait can't be public")) + .emit(); + } else { + self.tcx.sess.add_lint(lint::builtin::PRIVATE_IN_PUBLIC, + node_id, + self.span, + format!("private trait `{}` in public \ + interface (error E0445)", trait_ref)); } - self.is_public = false; } } - intravisit::walk_trait_ref(self, trait_ref); + trait_ref.super_visit_with(self) } - - // Don't recurse into function bodies - fn visit_block(&mut self, _: &hir::Block) {} - // Don't recurse into expressions in array sizes or const initializers - fn visit_expr(&mut self, _: &hir::Expr) {} - // Don't recurse into patterns in function arguments - fn visit_pat(&mut self, _: &hir::Pat) {} } struct PrivateItemsInPublicInterfacesVisitor<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, + tcx: TyCtxt<'a, 'tcx, 'tcx>, old_error_set: &'a NodeSet, + inner_visibility: ty::Visibility, } impl<'a, 'tcx> PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> { - // A type is considered public if it doesn't contain any private components - fn is_public_ty(&self, ty: &hir::Ty) -> bool { - let mut check = SearchInterfaceForPrivateItemsVisitor { - tcx: self.tcx, is_quiet: true, is_public: true, old_error_set: self.old_error_set - }; - check.visit_ty(ty); - check.is_public - } + fn check(&self, item_id: ast::NodeId, required_visibility: ty::Visibility) + -> SearchInterfaceForPrivateItemsVisitor<'a, 'tcx> { + let mut has_old_errors = false; + + // Slow path taken only if there any errors in the crate. + for &id in self.old_error_set { + // Walk up the nodes until we find `item_id` (or we hit a root). + let mut id = id; + loop { + if id == item_id { + has_old_errors = true; + break; + } + let parent = self.tcx.map.get_parent_node(id); + if parent == id { + break; + } + id = parent; + } - // A trait reference is considered public if it doesn't contain any private components - fn is_public_trait_ref(&self, trait_ref: &hir::TraitRef) -> bool { - let mut check = SearchInterfaceForPrivateItemsVisitor { - tcx: self.tcx, is_quiet: true, is_public: true, old_error_set: self.old_error_set - }; - check.visit_trait_ref(trait_ref); - check.is_public + if has_old_errors { + break; + } + } + + SearchInterfaceForPrivateItemsVisitor { + tcx: self.tcx, + item_def_id: self.tcx.map.local_def_id(item_id), + span: self.tcx.map.span(item_id), + min_visibility: ty::Visibility::Public, + required_visibility: required_visibility, + has_old_errors: has_old_errors, + } } } -impl<'a, 'tcx, 'v> Visitor<'v> for PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> { - fn visit_item(&mut self, item: &hir::Item) { - let mut check = SearchInterfaceForPrivateItemsVisitor { - tcx: self.tcx, is_quiet: false, is_public: true, old_error_set: self.old_error_set +impl<'a, 'tcx> Visitor<'tcx> for PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.tcx.map) + } + + fn visit_item(&mut self, item: &'tcx hir::Item) { + let tcx = self.tcx; + let min = |vis1: ty::Visibility, vis2| { + if vis1.is_at_least(vis2, &tcx.map) { vis2 } else { vis1 } }; + + let item_visibility = ty::Visibility::from_hir(&item.vis, item.id, tcx); + match item.node { // Crates are always public hir::ItemExternCrate(..) => {} @@ -1621,89 +1085,127 @@ impl<'a, 'tcx, 'v> Visitor<'v> for PrivateItemsInPublicInterfacesVisitor<'a, 'tc hir::ItemUse(..) => {} // Subitems of these items have inherited publicity hir::ItemConst(..) | hir::ItemStatic(..) | hir::ItemFn(..) | - hir::ItemEnum(..) | hir::ItemTrait(..) | hir::ItemTy(..) => { - if item.vis == hir::Public { - check.visit_item(item); + hir::ItemTy(..) => { + self.check(item.id, item_visibility).generics().predicates().item_type(); + + // Recurse for e.g. `impl Trait` (see `visit_ty`). + self.inner_visibility = item_visibility; + intravisit::walk_item(self, item); + } + hir::ItemTrait(.., ref trait_items) => { + self.check(item.id, item_visibility).generics().predicates(); + + for trait_item in trait_items { + let mut check = self.check(trait_item.id, item_visibility); + check.generics().predicates(); + + if let hir::TypeTraitItem(_, None) = trait_item.node { + // No type to visit. + } else { + check.item_type(); + } + } + } + hir::ItemEnum(ref def, _) => { + self.check(item.id, item_visibility).generics().predicates(); + + for variant in &def.variants { + for field in variant.node.data.fields() { + self.check(field.id, item_visibility).item_type(); + } } } // Subitems of foreign modules have their own publicity hir::ItemForeignMod(ref foreign_mod) => { for foreign_item in &foreign_mod.items { - if foreign_item.vis == hir::Public { - check.visit_foreign_item(foreign_item); - } + let vis = ty::Visibility::from_hir(&foreign_item.vis, item.id, tcx); + self.check(foreign_item.id, vis).generics().predicates().item_type(); } } - // Subitems of structs have their own publicity - hir::ItemStruct(ref struct_def, ref generics) => { - if item.vis == hir::Public { - check.visit_generics(generics); - for field in struct_def.fields() { - if field.node.kind.visibility() == hir::Public { - check.visit_struct_field(field); - } - } + // Subitems of structs and unions have their own publicity + hir::ItemStruct(ref struct_def, _) | + hir::ItemUnion(ref struct_def, _) => { + self.check(item.id, item_visibility).generics().predicates(); + + for field in struct_def.fields() { + let field_visibility = ty::Visibility::from_hir(&field.vis, item.id, tcx); + self.check(field.id, min(item_visibility, field_visibility)).item_type(); } } // The interface is empty hir::ItemDefaultImpl(..) => {} // An inherent impl is public when its type is public // Subitems of inherent impls have their own publicity - hir::ItemImpl(_, _, ref generics, None, ref ty, ref impl_items) => { - if self.is_public_ty(ty) { - check.visit_generics(generics); - for impl_item in impl_items { - if impl_item.vis == hir::Public { - check.visit_impl_item(impl_item); - } - } + hir::ItemImpl(.., None, _, ref impl_item_refs) => { + let ty_vis = self.check(item.id, ty::Visibility::PrivateExternal) + .item_type().min_visibility; + self.check(item.id, ty_vis).generics().predicates(); + + for impl_item_ref in impl_item_refs { + let impl_item = self.tcx.map.impl_item(impl_item_ref.id); + let impl_item_vis = + ty::Visibility::from_hir(&impl_item.vis, item.id, tcx); + self.check(impl_item.id, min(impl_item_vis, ty_vis)) + .generics().predicates().item_type(); + + // Recurse for e.g. `impl Trait` (see `visit_ty`). + self.inner_visibility = impl_item_vis; + intravisit::walk_impl_item(self, impl_item); } } // A trait impl is public when both its type and its trait are public // Subitems of trait impls have inherited publicity - hir::ItemImpl(_, _, ref generics, Some(ref trait_ref), ref ty, ref impl_items) => { - if self.is_public_ty(ty) && self.is_public_trait_ref(trait_ref) { - check.visit_generics(generics); - for impl_item in impl_items { - check.visit_impl_item(impl_item); - } + hir::ItemImpl(.., Some(_), _, ref impl_item_refs) => { + let vis = self.check(item.id, ty::Visibility::PrivateExternal) + .item_type().impl_trait_ref().min_visibility; + self.check(item.id, vis).generics().predicates(); + for impl_item_ref in impl_item_refs { + let impl_item = self.tcx.map.impl_item(impl_item_ref.id); + self.check(impl_item.id, vis).generics().predicates().item_type(); + + // Recurse for e.g. `impl Trait` (see `visit_ty`). + self.inner_visibility = vis; + intravisit::walk_impl_item(self, impl_item); } } } } + + fn visit_impl_item(&mut self, _impl_item: &'tcx hir::ImplItem) { + // handled in `visit_item` above + } + + fn visit_ty(&mut self, ty: &'tcx hir::Ty) { + if let hir::TyImplTrait(..) = ty.node { + // Check the traits being exposed, as they're separate, + // e.g. `impl Iterator` has two predicates, + // `X: Iterator` and `::Item == T`, + // where `X` is the `impl Iterator` itself, + // stored in `item_predicates`, not in the `Ty` itself. + self.check(ty.id, self.inner_visibility).predicates(); + } + + intravisit::walk_ty(self, ty); + } + + // Don't recurse into expressions in array sizes or const initializers + fn visit_expr(&mut self, _: &'tcx hir::Expr) {} + // Don't recurse into patterns in function arguments + fn visit_pat(&mut self, _: &'tcx hir::Pat) {} } -pub fn check_crate(tcx: &ty::ctxt, - export_map: &def::ExportMap, - external_exports: ExternalExports) - -> AccessLevels { +pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + export_map: &def::ExportMap) + -> AccessLevels { let _task = tcx.dep_graph.in_task(DepNode::Privacy); let krate = tcx.map.krate(); - // Sanity check to make sure that all privacy usage and controls are - // reasonable. - let mut visitor = SanePrivacyVisitor { - tcx: tcx, - in_block: false, - }; - intravisit::walk_crate(&mut visitor, krate); - - // Figure out who everyone's parent is - let mut visitor = ParentVisitor { - tcx: tcx, - parents: NodeMap(), - curparent: ast::DUMMY_NODE_ID, - }; - intravisit::walk_crate(&mut visitor, krate); - // Use the parent map to check the privacy of everything let mut visitor = PrivacyVisitor { curitem: ast::DUMMY_NODE_ID, in_foreign: false, tcx: tcx, - parents: visitor.parents, - external_exports: external_exports, }; intravisit::walk_crate(&mut visitor, krate); @@ -1741,8 +1243,9 @@ pub fn check_crate(tcx: &ty::ctxt, let mut visitor = PrivateItemsInPublicInterfacesVisitor { tcx: tcx, old_error_set: &visitor.old_error_set, + inner_visibility: ty::Visibility::Public, }; - krate.visit_all_items(&mut visitor); + krate.visit_all_item_likes(&mut DeepVisitor::new(&mut visitor)); } visitor.access_levels diff --git a/src/librustc_resolve/Cargo.toml b/src/librustc_resolve/Cargo.toml new file mode 100644 index 0000000000000..5ce4c74e735fd --- /dev/null +++ b/src/librustc_resolve/Cargo.toml @@ -0,0 +1,18 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_resolve" +version = "0.0.0" + +[lib] +name = "rustc_resolve" +path = "lib.rs" +crate-type = ["dylib"] +test = false + +[dependencies] +log = { path = "../liblog" } +syntax = { path = "../libsyntax" } +rustc = { path = "../librustc" } +arena = { path = "../libarena" } +rustc_errors = { path = "../librustc_errors" } +syntax_pos = { path = "../libsyntax_pos" } diff --git a/src/librustc_resolve/build_reduced_graph.rs b/src/librustc_resolve/build_reduced_graph.rs index 2e713a2f50e0f..d90a49213d141 100644 --- a/src/librustc_resolve/build_reduced_graph.rs +++ b/src/librustc_resolve/build_reduced_graph.rs @@ -13,194 +13,112 @@ //! Here we build the "reduced graph": the graph of the module tree without //! any imports resolved. -use DefModifiers; +use macros::{InvocationData, LegacyScope}; use resolve_imports::ImportDirective; -use resolve_imports::ImportDirectiveSubclass::{self, SingleImport, GlobImport}; -use resolve_imports::{ImportResolution, ImportResolutionPerNamespace}; -use Module; -use Namespace::{TypeNS, ValueNS}; -use NameBindings; -use {names_to_string, module_to_string}; -use ParentLink::{ModuleParentLink, BlockParentLink}; -use Resolver; -use resolve_imports::Shadowable; +use resolve_imports::ImportDirectiveSubclass::{self, GlobImport, SingleImport}; +use {Resolver, Module, ModuleS, ModuleKind, NameBinding, NameBindingKind, ToNameBinding}; +use Namespace::{self, TypeNS, ValueNS, MacroNS}; use {resolve_error, resolve_struct_error, ResolutionError}; -use self::DuplicateCheckingMode::*; - -use rustc::middle::cstore::{CrateStore, ChildItem, DlDef, DlField, DlImpl}; -use rustc::middle::def::*; -use rustc::middle::def_id::{CRATE_DEF_INDEX, DefId}; - -use syntax::ast::{Name, NodeId}; -use syntax::attr::AttrMetaMethods; -use syntax::parse::token::special_idents; -use syntax::codemap::{Span, DUMMY_SP}; - -use rustc_front::hir; -use rustc_front::hir::{Block, DeclItem}; -use rustc_front::hir::{ForeignItem, ForeignItemFn, ForeignItemStatic}; -use rustc_front::hir::{Item, ItemConst, ItemEnum, ItemExternCrate, ItemFn}; -use rustc_front::hir::{ItemForeignMod, ItemImpl, ItemMod, ItemStatic, ItemDefaultImpl}; -use rustc_front::hir::{ItemStruct, ItemTrait, ItemTy, ItemUse}; -use rustc_front::hir::{NamedField, PathListIdent, PathListMod}; -use rustc_front::hir::StmtDecl; -use rustc_front::hir::UnnamedField; -use rustc_front::hir::{Variant, ViewPathGlob, ViewPathList, ViewPathSimple}; -use rustc_front::hir::Visibility; -use rustc_front::intravisit::{self, Visitor}; - -use std::mem::replace; -use std::ops::{Deref, DerefMut}; - -// Specifies how duplicates should be handled when adding a child item if -// another item exists with the same name in some namespace. -#[derive(Copy, Clone, PartialEq)] -enum DuplicateCheckingMode { - ForbidDuplicateTypes, - ForbidDuplicateValues, - ForbidDuplicateTypesAndValues, - OverwriteDuplicates, -} - -struct GraphBuilder<'a, 'b: 'a, 'tcx: 'b> { - resolver: &'a mut Resolver<'b, 'tcx>, -} - -impl<'a, 'b:'a, 'tcx:'b> Deref for GraphBuilder<'a, 'b, 'tcx> { - type Target = Resolver<'b, 'tcx>; - - fn deref(&self) -> &Resolver<'b, 'tcx> { - &*self.resolver +use rustc::middle::cstore::LoadedMacro; +use rustc::hir::def::*; +use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId}; +use rustc::ty; + +use std::cell::Cell; +use std::rc::Rc; + +use syntax::ast::Name; +use syntax::attr; + +use syntax::ast::{self, Block, ForeignItem, ForeignItemKind, Item, ItemKind}; +use syntax::ast::{Mutability, StmtKind, TraitItem, TraitItemKind}; +use syntax::ast::{Variant, ViewPathGlob, ViewPathList, ViewPathSimple}; +use syntax::ext::base::SyntaxExtension; +use syntax::ext::base::Determinacy::Undetermined; +use syntax::ext::expand::mark_tts; +use syntax::ext::hygiene::Mark; +use syntax::ext::tt::macro_rules; +use syntax::symbol::keywords; +use syntax::visit::{self, Visitor}; + +use syntax_pos::{Span, DUMMY_SP}; + +impl<'a> ToNameBinding<'a> for (Module<'a>, ty::Visibility, Span, Mark) { + fn to_name_binding(self) -> NameBinding<'a> { + NameBinding { + kind: NameBindingKind::Module(self.0), + vis: self.1, + span: self.2, + expansion: self.3, + } } } -impl<'a, 'b:'a, 'tcx:'b> DerefMut for GraphBuilder<'a, 'b, 'tcx> { - fn deref_mut(&mut self) -> &mut Resolver<'b, 'tcx> { - &mut *self.resolver +impl<'a> ToNameBinding<'a> for (Def, ty::Visibility, Span, Mark) { + fn to_name_binding(self) -> NameBinding<'a> { + NameBinding { + kind: NameBindingKind::Def(self.0), + vis: self.1, + span: self.2, + expansion: self.3, + } } } -impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> { - /// Constructs the reduced graph for the entire crate. - fn build_reduced_graph(self, krate: &hir::Crate) { - let mut visitor = BuildReducedGraphVisitor { - parent: self.graph_root, - builder: self, - }; - intravisit::walk_crate(&mut visitor, krate); - } - - /// Adds a new child item to the module definition of the parent node, - /// or if there is already a child, does duplicate checking on the child. - /// Returns the child's corresponding name bindings. - fn add_child(&self, - name: Name, - parent: Module<'b>, - duplicate_checking_mode: DuplicateCheckingMode, - // For printing errors - sp: Span) - -> NameBindings<'b> { - self.check_for_conflicts_between_external_crates_and_items(parent, name, sp); - - // Add or reuse the child. - let child = parent.children.borrow().get(&name).cloned(); - match child { - None => { - let child = NameBindings::new(); - parent.children.borrow_mut().insert(name, child.clone()); - child - } - Some(child) => { - // Enforce the duplicate checking mode: - // - // * If we're requesting duplicate type checking, check that - // the name isn't defined in the type namespace. - // - // * If we're requesting duplicate value checking, check that - // the name isn't defined in the value namespace. - // - // * If we're requesting duplicate type and value checking, - // check that the name isn't defined in either namespace. - // - // * If no duplicate checking was requested at all, do - // nothing. - - let ns = match duplicate_checking_mode { - ForbidDuplicateTypes if child.type_ns.defined() => TypeNS, - ForbidDuplicateValues if child.value_ns.defined() => ValueNS, - ForbidDuplicateTypesAndValues if child.type_ns.defined() => TypeNS, - ForbidDuplicateTypesAndValues if child.value_ns.defined() => ValueNS, - _ => return child, - }; - - // Record an error here by looking up the namespace that had the duplicate - let ns_str = match ns { TypeNS => "type or module", ValueNS => "value" }; - let mut err = resolve_struct_error(self, - sp, - ResolutionError::DuplicateDefinition(ns_str, - name)); +#[derive(Default, PartialEq, Eq)] +struct LegacyMacroImports { + import_all: Option, + imports: Vec<(Name, Span)>, + reexports: Vec<(Name, Span)>, +} - if let Some(sp) = child[ns].span() { - let note = format!("first definition of {} `{}` here", ns_str, name); - err.span_note(sp, ¬e); - } - err.emit(); - child - } +impl<'b> Resolver<'b> { + /// Defines `name` in namespace `ns` of module `parent` to be `def` if it is not yet defined; + /// otherwise, reports an error. + fn define(&mut self, parent: Module<'b>, name: Name, ns: Namespace, def: T) + where T: ToNameBinding<'b>, + { + let binding = def.to_name_binding(); + if let Err(old_binding) = self.try_define(parent, name, ns, binding.clone()) { + self.report_conflict(parent, name, ns, old_binding, &binding); } } fn block_needs_anonymous_module(&mut self, block: &Block) -> bool { - // Check each statement. - for statement in &block.stmts { - match statement.node { - StmtDecl(ref declaration, _) => { - match declaration.node { - DeclItem(_) => { - return true; - } - _ => { - // Keep searching. - } - } - } - _ => { - // Keep searching. - } - } - } - - // If we found no items, we don't need to create - // an anonymous module. + // If any statements are items, we need to create an anonymous module + block.stmts.iter().any(|statement| match statement.node { + StmtKind::Item(_) | StmtKind::Mac(_) => true, + _ => false, + }) + } - return false; + fn insert_field_names(&mut self, def_id: DefId, field_names: Vec) { + if !field_names.is_empty() { + self.field_names.insert(def_id, field_names); + } } /// Constructs the reduced graph for one item. - fn build_reduced_graph_for_item(&mut self, item: &Item, parent: Module<'b>) -> Module<'b> { - let name = item.name; + fn build_reduced_graph_for_item(&mut self, item: &Item, expansion: Mark) { + let parent = self.current_module; + let name = item.ident.name; let sp = item.span; - let is_public = item.vis == hir::Public; - let modifiers = if is_public { - DefModifiers::PUBLIC - } else { - DefModifiers::empty() - } | DefModifiers::IMPORTABLE; + let vis = self.resolve_visibility(&item.vis); match item.node { - ItemUse(ref view_path) => { + ItemKind::Use(ref view_path) => { // Extract and intern the module part of the path. For // globs and lists, the path is found directly in the AST; // for simple paths we have to munge the path a little. - let module_path = match view_path.node { + let module_path: Vec<_> = match view_path.node { ViewPathSimple(_, ref full_path) => { full_path.segments .split_last() .unwrap() .1 .iter() - .map(|seg| seg.identifier.name) + .map(|seg| seg.identifier) .collect() } @@ -208,49 +126,55 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> { ViewPathList(ref module_ident_path, _) => { module_ident_path.segments .iter() - .map(|seg| seg.identifier.name) + .map(|seg| seg.identifier) .collect() } }; // Build up the import directives. - let shadowable = item.attrs.iter().any(|attr| { - attr.name() == special_idents::prelude_import.name.as_str() - }); - let shadowable = if shadowable { - Shadowable::Always - } else { - Shadowable::Never - }; + let is_prelude = attr::contains_name(&item.attrs, "prelude_import"); match view_path.node { ViewPathSimple(binding, ref full_path) => { - let source_name = full_path.segments.last().unwrap().identifier.name; - if source_name.as_str() == "mod" || source_name.as_str() == "self" { + let mut source = full_path.segments.last().unwrap().identifier; + let source_name = source.name; + if source_name == "mod" || source_name == "self" { resolve_error(self, view_path.span, ResolutionError::SelfImportsOnlyAllowedWithin); + } else if source_name == "$crate" && full_path.segments.len() == 1 { + let crate_root = self.resolve_crate_var(source.ctxt); + let crate_name = match crate_root.kind { + ModuleKind::Def(_, name) => name, + ModuleKind::Block(..) => unreachable!(), + }; + source.name = crate_name; + + self.session.struct_span_warn(item.span, "`$crate` may not be imported") + .note("`use $crate;` was erroneously allowed and \ + will become a hard error in a future release") + .emit(); } - let subclass = SingleImport(binding, source_name); - self.build_import_directive(parent, - module_path, - subclass, - view_path.span, - item.id, - is_public, - shadowable); + let subclass = SingleImport { + target: binding.name, + source: source.name, + result: self.per_ns(|_, _| Cell::new(Err(Undetermined))), + }; + self.add_import_directive( + module_path, subclass, view_path.span, item.id, vis, expansion, + ); } ViewPathList(_, ref source_items) => { // Make sure there's at most one `mod` import in the list. - let mod_spans = source_items.iter() - .filter_map(|item| { - match item.node { - PathListMod { .. } => Some(item.span), - _ => None, - } - }) - .collect::>(); + let mod_spans = source_items.iter().filter_map(|item| { + if item.node.name.name == keywords::SelfValue.name() { + Some(item.span) + } else { + None + } + }).collect::>(); + if mod_spans.len() > 1 { let mut e = resolve_struct_error(self, mod_spans[0], @@ -262,12 +186,14 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> { } for source_item in source_items { - let (module_path, name, rename) = match source_item.node { - PathListIdent { name, rename, .. } => - (module_path.clone(), name, rename.unwrap_or(name)), - PathListMod { rename, .. } => { + let node = source_item.node; + let (module_path, name, rename) = { + if node.name.name != keywords::SelfValue.name() { + let rename = node.rename.unwrap_or(node.name).name; + (module_path.clone(), node.name.name, rename) + } else { let name = match module_path.last() { - Some(name) => *name, + Some(ident) => ident.name, None => { resolve_error( self, @@ -279,218 +205,156 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> { } }; let module_path = module_path.split_last().unwrap().1; - let rename = rename.unwrap_or(name); + let rename = node.rename.map(|i| i.name).unwrap_or(name); (module_path.to_vec(), name, rename) } }; - self.build_import_directive(parent, - module_path, - SingleImport(rename, name), - source_item.span, - source_item.node.id(), - is_public, - shadowable); + let subclass = SingleImport { + target: rename, + source: name, + result: self.per_ns(|_, _| Cell::new(Err(Undetermined))), + }; + let id = source_item.node.id; + self.add_import_directive( + module_path, subclass, source_item.span, id, vis, expansion, + ); } } ViewPathGlob(_) => { - self.build_import_directive(parent, - module_path, - GlobImport, - view_path.span, - item.id, - is_public, - shadowable); + let subclass = GlobImport { + is_prelude: is_prelude, + max_vis: Cell::new(ty::Visibility::PrivateExternal), + }; + self.add_import_directive( + module_path, subclass, view_path.span, item.id, vis, expansion, + ); } } - parent - } - - ItemExternCrate(_) => { - // n.b. we don't need to look at the path option here, because cstore already - // did - if let Some(crate_id) = self.session.cstore.extern_mod_stmt_cnum(item.id) { - let def_id = DefId { - krate: crate_id, - index: CRATE_DEF_INDEX, - }; - self.external_exports.insert(def_id); - let parent_link = ModuleParentLink(parent, name); - let def = DefMod(def_id); - let external_module = self.new_module(parent_link, Some(def), false, true); - - debug!("(build reduced graph for item) found extern `{}`", - module_to_string(&*external_module)); - self.check_for_conflicts_for_external_crate(parent, name, sp); - parent.external_module_children - .borrow_mut() - .insert(name, external_module); - self.build_reduced_graph_for_external_crate(&external_module); - } - parent } - ItemMod(..) => { - let name_bindings = self.add_child(name, parent, ForbidDuplicateTypes, sp); + ItemKind::ExternCrate(_) => { + self.crate_loader.process_item(item, &self.definitions); + + // n.b. we don't need to look at the path option here, because cstore already did + let crate_id = self.session.cstore.extern_mod_stmt_cnum(item.id).unwrap(); + let module = self.get_extern_crate_root(crate_id); + let binding = (module, ty::Visibility::Public, sp, expansion).to_name_binding(); + let binding = self.arenas.alloc_name_binding(binding); + let directive = self.arenas.alloc_import_directive(ImportDirective { + id: item.id, + parent: parent, + imported_module: Cell::new(Some(module)), + subclass: ImportDirectiveSubclass::ExternCrate, + span: item.span, + module_path: Vec::new(), + vis: Cell::new(vis), + expansion: expansion, + }); + let imported_binding = self.import(binding, directive); + self.define(parent, name, TypeNS, imported_binding); + self.populate_module_if_necessary(module); + self.process_legacy_macro_imports(item, module, expansion); + } + + ItemKind::Mod(..) if item.ident == keywords::Invalid.ident() => {} // Crate root + + ItemKind::Mod(..) => { + let def = Def::Mod(self.definitions.local_def_id(item.id)); + let module = self.arenas.alloc_module(ModuleS { + no_implicit_prelude: parent.no_implicit_prelude || { + attr::contains_name(&item.attrs, "no_implicit_prelude") + }, + normal_ancestor_id: Some(item.id), + ..ModuleS::new(Some(parent), ModuleKind::Def(def, name)) + }); + self.define(parent, name, TypeNS, (module, vis, sp, expansion)); + self.module_map.insert(item.id, module); - let parent_link = ModuleParentLink(parent, name); - let def = DefMod(self.ast_map.local_def_id(item.id)); - let module = self.new_module(parent_link, Some(def), false, is_public); - name_bindings.define_module(module.clone(), sp); - module + // Descend into the module. + self.current_module = module; } - ItemForeignMod(..) => parent, + ItemKind::ForeignMod(..) => self.crate_loader.process_item(item, &self.definitions), // These items live in the value namespace. - ItemStatic(_, m, _) => { - let name_bindings = self.add_child(name, parent, ForbidDuplicateValues, sp); - let mutbl = m == hir::MutMutable; - - name_bindings.define_value(DefStatic(self.ast_map.local_def_id(item.id), mutbl), - sp, - modifiers); - parent + ItemKind::Static(_, m, _) => { + let mutbl = m == Mutability::Mutable; + let def = Def::Static(self.definitions.local_def_id(item.id), mutbl); + self.define(parent, name, ValueNS, (def, vis, sp, expansion)); } - ItemConst(_, _) => { - self.add_child(name, parent, ForbidDuplicateValues, sp) - .define_value(DefConst(self.ast_map.local_def_id(item.id)), sp, modifiers); - parent + ItemKind::Const(..) => { + let def = Def::Const(self.definitions.local_def_id(item.id)); + self.define(parent, name, ValueNS, (def, vis, sp, expansion)); } - ItemFn(_, _, _, _, _, _) => { - let name_bindings = self.add_child(name, parent, ForbidDuplicateValues, sp); - - let def = DefFn(self.ast_map.local_def_id(item.id), false); - name_bindings.define_value(def, sp, modifiers); - parent + ItemKind::Fn(..) => { + let def = Def::Fn(self.definitions.local_def_id(item.id)); + self.define(parent, name, ValueNS, (def, vis, sp, expansion)); } // These items live in the type namespace. - ItemTy(..) => { - let name_bindings = self.add_child(name, - parent, - ForbidDuplicateTypes, - sp); - - let parent_link = ModuleParentLink(parent, name); - let def = DefTy(self.ast_map.local_def_id(item.id), false); - let module = self.new_module(parent_link, Some(def), false, is_public); - name_bindings.define_module(module, sp); - parent - } - - ItemEnum(ref enum_definition, _) => { - let name_bindings = self.add_child(name, - parent, - ForbidDuplicateTypes, - sp); - - let parent_link = ModuleParentLink(parent, name); - let def = DefTy(self.ast_map.local_def_id(item.id), true); - let module = self.new_module(parent_link, Some(def), false, is_public); - name_bindings.define_module(module.clone(), sp); - - let variant_modifiers = if is_public { - DefModifiers::empty() - } else { - DefModifiers::PRIVATE_VARIANT - }; + ItemKind::Ty(..) => { + let def = Def::TyAlias(self.definitions.local_def_id(item.id)); + self.define(parent, name, TypeNS, (def, vis, sp, expansion)); + } + + ItemKind::Enum(ref enum_definition, _) => { + let def = Def::Enum(self.definitions.local_def_id(item.id)); + let module = self.new_module(parent, ModuleKind::Def(def, name), true); + self.define(parent, name, TypeNS, (module, vis, sp, expansion)); + for variant in &(*enum_definition).variants { - let item_def_id = self.ast_map.local_def_id(item.id); - self.build_reduced_graph_for_variant(variant, item_def_id, - &module, variant_modifiers); + self.build_reduced_graph_for_variant(variant, module, vis, expansion); } - parent } // These items live in both the type and value namespaces. - ItemStruct(ref struct_def, _) => { - // Adding to both Type and Value namespaces or just Type? - let (forbid, ctor_id) = if struct_def.is_struct() { - (ForbidDuplicateTypes, None) - } else { - (ForbidDuplicateTypesAndValues, Some(struct_def.id())) - }; - - let name_bindings = self.add_child(name, parent, forbid, sp); - + ItemKind::Struct(ref struct_def, _) => { // Define a name in the type namespace. - name_bindings.define_type(DefTy(self.ast_map.local_def_id(item.id), false), - sp, - modifiers); - - // If this is a newtype or unit-like struct, define a name - // in the value namespace as well - if let Some(cid) = ctor_id { - name_bindings.define_value(DefStruct(self.ast_map.local_def_id(cid)), - sp, - modifiers); + let def = Def::Struct(self.definitions.local_def_id(item.id)); + self.define(parent, name, TypeNS, (def, vis, sp, expansion)); + + // If this is a tuple or unit struct, define a name + // in the value namespace as well. + if !struct_def.is_struct() { + let ctor_def = Def::StructCtor(self.definitions.local_def_id(struct_def.id()), + CtorKind::from_ast(struct_def)); + self.define(parent, name, ValueNS, (ctor_def, vis, sp, expansion)); } - // Record the def ID and fields of this struct. - let named_fields = struct_def.fields() - .iter() - .filter_map(|f| { - match f.node.kind { - NamedField(name, _) => Some(name), - UnnamedField(_) => None, - } - }) - .collect(); - let item_def_id = self.ast_map.local_def_id(item.id); - self.structs.insert(item_def_id, named_fields); - - parent + // Record field names for error reporting. + let field_names = struct_def.fields().iter().filter_map(|field| { + self.resolve_visibility(&field.vis); + field.ident.map(|ident| ident.name) + }).collect(); + let item_def_id = self.definitions.local_def_id(item.id); + self.insert_field_names(item_def_id, field_names); } - ItemDefaultImpl(_, _) | - ItemImpl(..) => parent, + ItemKind::Union(ref vdata, _) => { + let def = Def::Union(self.definitions.local_def_id(item.id)); + self.define(parent, name, TypeNS, (def, vis, sp, expansion)); + + // Record field names for error reporting. + let field_names = vdata.fields().iter().filter_map(|field| { + self.resolve_visibility(&field.vis); + field.ident.map(|ident| ident.name) + }).collect(); + let item_def_id = self.definitions.local_def_id(item.id); + self.insert_field_names(item_def_id, field_names); + } - ItemTrait(_, _, _, ref items) => { - let name_bindings = self.add_child(name, - parent, - ForbidDuplicateTypes, - sp); + ItemKind::DefaultImpl(..) | ItemKind::Impl(..) => {} - let def_id = self.ast_map.local_def_id(item.id); + ItemKind::Trait(..) => { + let def_id = self.definitions.local_def_id(item.id); // Add all the items within to a new module. - let parent_link = ModuleParentLink(parent, name); - let def = DefTrait(def_id); - let module_parent = self.new_module(parent_link, Some(def), false, is_public); - name_bindings.define_module(module_parent.clone(), sp); - - // Add the names of all the items to the trait info. - for trait_item in items { - let name_bindings = self.add_child(trait_item.name, - &module_parent, - ForbidDuplicateTypesAndValues, - trait_item.span); - - match trait_item.node { - hir::ConstTraitItem(..) => { - let def = DefAssociatedConst(self.ast_map.local_def_id(trait_item.id)); - // NB: not DefModifiers::IMPORTABLE - name_bindings.define_value(def, trait_item.span, DefModifiers::PUBLIC); - } - hir::MethodTraitItem(..) => { - let def = DefMethod(self.ast_map.local_def_id(trait_item.id)); - // NB: not DefModifiers::IMPORTABLE - name_bindings.define_value(def, trait_item.span, DefModifiers::PUBLIC); - } - hir::TypeTraitItem(..) => { - let def = DefAssociatedTy(self.ast_map.local_def_id(item.id), - self.ast_map.local_def_id(trait_item.id)); - // NB: not DefModifiers::IMPORTABLE - name_bindings.define_type(def, trait_item.span, DefModifiers::PUBLIC); - } - } - - let trait_item_def_id = self.ast_map.local_def_id(trait_item.id); - self.trait_item_map.insert((trait_item.name, def_id), trait_item_def_id); - } - - parent + let module = + self.new_module(parent, ModuleKind::Def(Def::Trait(def_id), name), true); + self.define(parent, name, TypeNS, (module, vis, sp, expansion)); + self.current_module = module; } + ItemKind::Mac(_) => panic!("unexpanded macro in resolve!"), } } @@ -498,59 +362,43 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> { // type and value namespaces. fn build_reduced_graph_for_variant(&mut self, variant: &Variant, - item_id: DefId, parent: Module<'b>, - variant_modifiers: DefModifiers) { - let name = variant.node.name; - let is_exported = if variant.node.data.is_struct() { - // Not adding fields for variants as they are not accessed with a self receiver - let variant_def_id = self.ast_map.local_def_id(variant.node.data.id()); - self.structs.insert(variant_def_id, Vec::new()); - true - } else { - false - }; - - let child = self.add_child(name, parent, ForbidDuplicateTypesAndValues, variant.span); - // variants are always treated as importable to allow them to be glob - // used - child.define_value(DefVariant(item_id, - self.ast_map.local_def_id(variant.node.data.id()), - is_exported), - variant.span, - DefModifiers::PUBLIC | DefModifiers::IMPORTABLE | variant_modifiers); - child.define_type(DefVariant(item_id, - self.ast_map.local_def_id(variant.node.data.id()), - is_exported), - variant.span, - DefModifiers::PUBLIC | DefModifiers::IMPORTABLE | variant_modifiers); + vis: ty::Visibility, + expansion: Mark) { + let name = variant.node.name.name; + let def_id = self.definitions.local_def_id(variant.node.data.id()); + + // Define a name in the type namespace. + let def = Def::Variant(def_id); + self.define(parent, name, TypeNS, (def, vis, variant.span, expansion)); + + // Define a constructor name in the value namespace. + // Braced variants, unlike structs, generate unusable names in + // value namespace, they are reserved for possible future use. + let ctor_kind = CtorKind::from_ast(&variant.node.data); + let ctor_def = Def::VariantCtor(def_id, ctor_kind); + self.define(parent, name, ValueNS, (ctor_def, vis, variant.span, expansion)); } /// Constructs the reduced graph for one foreign item. - fn build_reduced_graph_for_foreign_item(&mut self, - foreign_item: &ForeignItem, - parent: Module<'b>) { - let name = foreign_item.name; - let is_public = foreign_item.vis == hir::Public; - let modifiers = if is_public { - DefModifiers::PUBLIC - } else { - DefModifiers::empty() - } | DefModifiers::IMPORTABLE; - let name_bindings = self.add_child(name, parent, ForbidDuplicateValues, foreign_item.span); + fn build_reduced_graph_for_foreign_item(&mut self, item: &ForeignItem, expansion: Mark) { + let parent = self.current_module; + let name = item.ident.name; - let def = match foreign_item.node { - ForeignItemFn(..) => { - DefFn(self.ast_map.local_def_id(foreign_item.id), false) + let def = match item.node { + ForeignItemKind::Fn(..) => { + Def::Fn(self.definitions.local_def_id(item.id)) } - ForeignItemStatic(_, m) => { - DefStatic(self.ast_map.local_def_id(foreign_item.id), m) + ForeignItemKind::Static(_, m) => { + Def::Static(self.definitions.local_def_id(item.id), m) } }; - name_bindings.define_value(def, foreign_item.span, modifiers); + let vis = self.resolve_visibility(&item.vis); + self.define(parent, name, ValueNS, (def, vis, item.span, expansion)); } - fn build_reduced_graph_for_block(&mut self, block: &Block, parent: Module<'b>) -> Module<'b> { + fn build_reduced_graph_for_block(&mut self, block: &Block) { + let parent = self.current_module; if self.block_needs_anonymous_module(block) { let block_id = block.id; @@ -558,341 +406,365 @@ impl<'a, 'b:'a, 'tcx:'b> GraphBuilder<'a, 'b, 'tcx> { {}", block_id); - let parent_link = BlockParentLink(parent, block_id); - let new_module = self.new_module(parent_link, None, false, false); - parent.anonymous_children.borrow_mut().insert(block_id, new_module); - new_module - } else { - parent + let new_module = self.new_module(parent, ModuleKind::Block(block_id), true); + self.module_map.insert(block_id, new_module); + self.current_module = new_module; // Descend into the block. } } - fn handle_external_def(&mut self, - def: Def, - vis: Visibility, - child_name_bindings: &NameBindings<'b>, - final_ident: &str, - name: Name, - new_parent: Module<'b>) { - debug!("(building reduced graph for external crate) building external def {}, priv {:?}", - final_ident, - vis); - let is_public = vis == hir::Public; - let modifiers = if is_public { - DefModifiers::PUBLIC - } else { - DefModifiers::empty() - } | DefModifiers::IMPORTABLE; - let is_exported = is_public && - match new_parent.def_id() { - None => true, - Some(did) => self.external_exports.contains(&did), + /// Builds the reduced graph for a single item in an external crate. + fn build_reduced_graph_for_external_crate_def(&mut self, parent: Module<'b>, child: Export) { + let name = child.name; + let def = child.def; + let def_id = def.def_id(); + let vis = match def { + Def::Macro(..) => ty::Visibility::Public, + _ if parent.is_trait() => ty::Visibility::Public, + _ => self.session.cstore.visibility(def_id), }; - if is_exported { - self.external_exports.insert(def.def_id()); - } match def { - DefMod(_) | - DefForeignMod(_) | - DefStruct(_) | - DefTy(..) => { - if let Some(module_def) = child_name_bindings.type_ns.module() { - debug!("(building reduced graph for external crate) already created module"); - module_def.def.set(Some(def)); - } else { - debug!("(building reduced graph for external crate) building module {} {}", - final_ident, - is_public); - let parent_link = ModuleParentLink(new_parent, name); - let module = self.new_module(parent_link, Some(def), true, is_public); - child_name_bindings.define_module(module, DUMMY_SP); - } + Def::Mod(..) | Def::Enum(..) => { + let module = self.new_module(parent, ModuleKind::Def(def, name), false); + self.define(parent, name, TypeNS, (module, vis, DUMMY_SP, Mark::root())); } - _ => {} - } - - match def { - DefMod(_) | DefForeignMod(_) => {} - DefVariant(_, variant_id, is_struct) => { - debug!("(building reduced graph for external crate) building variant {}", - final_ident); - // variants are always treated as importable to allow them to be - // glob used - let modifiers = DefModifiers::PUBLIC | DefModifiers::IMPORTABLE; - if is_struct { - child_name_bindings.define_type(def, DUMMY_SP, modifiers); - // Not adding fields for variants as they are not accessed with a self receiver - self.structs.insert(variant_id, Vec::new()); - } else { - child_name_bindings.define_value(def, DUMMY_SP, modifiers); - } + Def::Variant(..) => { + self.define(parent, name, TypeNS, (def, vis, DUMMY_SP, Mark::root())); } - DefFn(ctor_id, true) => { - child_name_bindings.define_value( - self.session.cstore.tuple_struct_definition_if_ctor(ctor_id) - .map_or(def, |_| DefStruct(ctor_id)), DUMMY_SP, modifiers); - } - DefFn(..) | - DefStatic(..) | - DefConst(..) | - DefAssociatedConst(..) | - DefMethod(..) => { - debug!("(building reduced graph for external crate) building value (fn/static) {}", - final_ident); - // impl methods have already been defined with the correct importability - // modifier - let mut modifiers = match *child_name_bindings.value_ns.borrow() { - Some(ref def) => (modifiers & !DefModifiers::IMPORTABLE) | - (def.modifiers & DefModifiers::IMPORTABLE), - None => modifiers, - }; - if !new_parent.is_normal() { - modifiers = modifiers & !DefModifiers::IMPORTABLE; - } - child_name_bindings.define_value(def, DUMMY_SP, modifiers); + Def::VariantCtor(..) => { + self.define(parent, name, ValueNS, (def, vis, DUMMY_SP, Mark::root())); } - DefTrait(def_id) => { - debug!("(building reduced graph for external crate) building type {}", - final_ident); - - // If this is a trait, add all the trait item names to the trait - // info. - - let trait_item_def_ids = self.session.cstore.trait_item_def_ids(def_id); - for trait_item_def in &trait_item_def_ids { - let trait_item_name = - self.session.cstore.item_name(trait_item_def.def_id()); - - debug!("(building reduced graph for external crate) ... adding trait item \ - '{}'", - trait_item_name); - - self.trait_item_map.insert((trait_item_name, def_id), trait_item_def.def_id()); - - if is_exported { - self.external_exports.insert(trait_item_def.def_id()); - } - } - - // Define a module if necessary. - let parent_link = ModuleParentLink(new_parent, name); - let module = self.new_module(parent_link, Some(def), true, is_public); - child_name_bindings.define_module(module, DUMMY_SP); + Def::Fn(..) | + Def::Static(..) | + Def::Const(..) | + Def::AssociatedConst(..) | + Def::Method(..) => { + self.define(parent, name, ValueNS, (def, vis, DUMMY_SP, Mark::root())); } - DefTy(..) | DefAssociatedTy(..) => { - debug!("(building reduced graph for external crate) building type {}", - final_ident); - - let modifiers = match new_parent.is_normal() { - true => modifiers, - _ => modifiers & !DefModifiers::IMPORTABLE, - }; - - if let DefTy(..) = def { - child_name_bindings.type_ns.set_modifiers(modifiers); - } else { - child_name_bindings.define_type(def, DUMMY_SP, modifiers); + Def::Trait(..) => { + let module = self.new_module(parent, ModuleKind::Def(def, name), false); + self.define(parent, name, TypeNS, (module, vis, DUMMY_SP, Mark::root())); + + // If this is a trait, add all the trait item names to the trait info. + let trait_item_def_ids = self.session.cstore.associated_item_def_ids(def_id); + for trait_item_def_id in trait_item_def_ids { + let trait_item_name = self.session.cstore.def_key(trait_item_def_id) + .disambiguated_data.data.get_opt_name() + .expect("opt_item_name returned None for trait"); + self.trait_item_map.insert((trait_item_name, def_id), false); } } - DefStruct(def_id) => { - debug!("(building reduced graph for external crate) building type and value for \ - {}", - final_ident); - child_name_bindings.define_type(def, DUMMY_SP, modifiers); - let fields = self.session.cstore.struct_field_names(def_id); - - if fields.is_empty() { - child_name_bindings.define_value(def, DUMMY_SP, modifiers); - } + Def::TyAlias(..) | Def::AssociatedTy(..) => { + self.define(parent, name, TypeNS, (def, vis, DUMMY_SP, Mark::root())); + } + Def::Struct(..) => { + self.define(parent, name, TypeNS, (def, vis, DUMMY_SP, Mark::root())); - // Record the def ID and fields of this struct. - self.structs.insert(def_id, fields); + // Record field names for error reporting. + let field_names = self.session.cstore.struct_field_names(def_id); + self.insert_field_names(def_id, field_names); } - DefLocal(..) | - DefPrimTy(..) | - DefTyParam(..) | - DefUpvar(..) | - DefLabel(..) | - DefSelfTy(..) | - DefErr => { - panic!("didn't expect `{:?}`", def); + Def::StructCtor(..) => { + self.define(parent, name, ValueNS, (def, vis, DUMMY_SP, Mark::root())); } - } - } + Def::Union(..) => { + self.define(parent, name, TypeNS, (def, vis, DUMMY_SP, Mark::root())); - /// Builds the reduced graph for a single item in an external crate. - fn build_reduced_graph_for_external_crate_def(&mut self, - root: Module<'b>, - xcdef: ChildItem) { - match xcdef.def { - DlDef(def) => { - // Add the new child item, if necessary. - match def { - DefForeignMod(def_id) => { - // Foreign modules have no names. Recur and populate - // eagerly. - for child in self.session.cstore.item_children(def_id) { - self.build_reduced_graph_for_external_crate_def(root, child) - } - } - _ => { - let child_name_bindings = self.add_child(xcdef.name, - root, - OverwriteDuplicates, - DUMMY_SP); - - self.handle_external_def(def, - xcdef.vis, - &child_name_bindings, - &xcdef.name.as_str(), - xcdef.name, - root); - } - } + // Record field names for error reporting. + let field_names = self.session.cstore.struct_field_names(def_id); + self.insert_field_names(def_id, field_names); } - DlImpl(_) => { - debug!("(building reduced graph for external crate) ignoring impl"); + Def::Macro(..) => { + self.define(parent, name, MacroNS, (def, vis, DUMMY_SP, Mark::root())); } - DlField => { - debug!("(building reduced graph for external crate) ignoring field"); + Def::Local(..) | + Def::PrimTy(..) | + Def::TyParam(..) | + Def::Upvar(..) | + Def::Label(..) | + Def::SelfTy(..) | + Def::Err => { + bug!("unexpected definition: {:?}", def); } } } - /// Builds the reduced graph rooted at the given external module. - fn populate_external_module(&mut self, module: Module<'b>) { - debug!("(populating external module) attempting to populate {}", - module_to_string(module)); + fn get_extern_crate_root(&mut self, cnum: CrateNum) -> Module<'b> { + let def_id = DefId { krate: cnum, index: CRATE_DEF_INDEX }; + let macros_only = self.session.cstore.dep_kind(cnum).macros_only(); + let arenas = self.arenas; + *self.extern_crate_roots.entry((cnum, macros_only)).or_insert_with(|| { + arenas.alloc_module(ModuleS { + populated: Cell::new(false), + ..ModuleS::new(None, ModuleKind::Def(Def::Mod(def_id), keywords::Invalid.name())) + }) + }) + } - let def_id = match module.def_id() { - None => { - debug!("(populating external module) ... no def ID!"); - return; - } - Some(def_id) => def_id, + pub fn get_macro(&mut self, binding: &'b NameBinding<'b>) -> Rc { + let def_id = match binding.kind { + NameBindingKind::Def(Def::Macro(def_id)) => def_id, + NameBindingKind::Import { binding, .. } => return self.get_macro(binding), + NameBindingKind::Ambiguity { b1, .. } => return self.get_macro(b1), + _ => panic!("Expected Def::Macro(..)"), + }; + if let Some(ext) = self.macro_map.get(&def_id) { + return ext.clone(); + } + + let mut macro_rules = match self.session.cstore.load_macro(def_id, &self.session) { + LoadedMacro::MacroRules(macro_rules) => macro_rules, + LoadedMacro::ProcMacro(ext) => return ext, }; - for child in self.session.cstore.item_children(def_id) { - debug!("(populating external module) ... found ident: {}", - child.name); + let mark = Mark::fresh(); + let invocation = self.arenas.alloc_invocation_data(InvocationData { + module: Cell::new(self.get_extern_crate_root(def_id.krate)), + def_index: CRATE_DEF_INDEX, + const_integer: false, + legacy_scope: Cell::new(LegacyScope::Empty), + expansion: Cell::new(LegacyScope::Empty), + }); + self.invocations.insert(mark, invocation); + macro_rules.body = mark_tts(¯o_rules.body, mark); + let ext = Rc::new(macro_rules::compile(&self.session.parse_sess, ¯o_rules)); + self.macro_map.insert(def_id, ext.clone()); + ext + } + + /// Ensures that the reduced graph rooted at the given external module + /// is built, building it if it is not. + pub fn populate_module_if_necessary(&mut self, module: Module<'b>) { + if module.populated.get() { return } + for child in self.session.cstore.item_children(module.def_id().unwrap()) { self.build_reduced_graph_for_external_crate_def(module, child); } module.populated.set(true) } - /// Ensures that the reduced graph rooted at the given external module - /// is built, building it if it is not. - fn populate_module_if_necessary(&mut self, module: Module<'b>) { - if !module.populated.get() { - self.populate_external_module(module) + fn legacy_import_macro(&mut self, + name: Name, + binding: &'b NameBinding<'b>, + span: Span, + allow_shadowing: bool) { + self.used_crates.insert(binding.def().def_id().krate); + self.macro_names.insert(name); + if self.builtin_macros.insert(name, binding).is_some() && !allow_shadowing { + let msg = format!("`{}` is already in scope", name); + let note = + "macro-expanded `#[macro_use]`s may not shadow existing macros (see RFC 1560)"; + self.session.struct_span_err(span, &msg).note(note).emit(); } - assert!(module.populated.get()) } - /// Builds the reduced graph rooted at the 'use' directive for an external - /// crate. - fn build_reduced_graph_for_external_crate(&mut self, root: Module<'b>) { - let root_cnum = root.def_id().unwrap().krate; - for child in self.session.cstore.crate_top_level_items(root_cnum) { - self.build_reduced_graph_for_external_crate_def(root, child); + fn process_legacy_macro_imports(&mut self, item: &Item, module: Module<'b>, expansion: Mark) { + let allow_shadowing = expansion == Mark::root(); + let legacy_imports = self.legacy_macro_imports(&item.attrs); + let cnum = module.def_id().unwrap().krate; + + // `#[macro_use]` and `#[macro_reexport]` are only allowed at the crate root. + if self.current_module.parent.is_some() && legacy_imports != LegacyMacroImports::default() { + span_err!(self.session, item.span, E0468, + "an `extern crate` loading macros must be at the crate root"); + } else if !self.use_extern_macros && + self.session.cstore.dep_kind(cnum).macros_only() && + legacy_imports == LegacyMacroImports::default() { + let msg = "custom derive crates and `#[no_link]` crates have no effect without \ + `#[macro_use]`"; + self.session.span_warn(item.span, msg); + self.used_crates.insert(cnum); // Avoid the normal unused extern crate warning + } + + if let Some(span) = legacy_imports.import_all { + module.for_each_child(|name, ns, binding| if ns == MacroNS { + self.legacy_import_macro(name, binding, span, allow_shadowing); + }); + } else { + for (name, span) in legacy_imports.imports { + let result = self.resolve_name_in_module(module, name, MacroNS, false, None); + if let Ok(binding) = result { + self.legacy_import_macro(name, binding, span, allow_shadowing); + } else { + span_err!(self.session, span, E0469, "imported macro not found"); + } + } + } + for (name, span) in legacy_imports.reexports { + let krate = module.def_id().unwrap().krate; + self.used_crates.insert(krate); + self.session.cstore.export_macros(krate); + let result = self.resolve_name_in_module(module, name, MacroNS, false, None); + if let Ok(binding) = result { + self.macro_exports.push(Export { name: name, def: binding.def() }); + } else { + span_err!(self.session, span, E0470, "reexported macro not found"); + } } } - /// Creates and adds an import directive to the given module. - fn build_import_directive(&mut self, - module_: Module<'b>, - module_path: Vec, - subclass: ImportDirectiveSubclass, - span: Span, - id: NodeId, - is_public: bool, - shadowable: Shadowable) { - module_.imports - .borrow_mut() - .push(ImportDirective::new(module_path, subclass, span, id, is_public, shadowable)); - self.unresolved_imports += 1; - - if is_public { - module_.inc_pub_count(); + // does this attribute list contain "macro_use"? + fn contains_macro_use(&mut self, attrs: &[ast::Attribute]) -> bool { + for attr in attrs { + if attr.check_name("macro_escape") { + let msg = "macro_escape is a deprecated synonym for macro_use"; + let mut err = self.session.struct_span_warn(attr.span, msg); + if let ast::AttrStyle::Inner = attr.style { + err.help("consider an outer attribute, #[macro_use] mod ...").emit(); + } else { + err.emit(); + } + } else if !attr.check_name("macro_use") { + continue; + } + + if !attr.is_word() { + self.session.span_err(attr.span, "arguments to macro_use are not allowed here"); + } + return true; } - // Bump the reference count on the name. Or, if this is a glob, set - // the appropriate flag. - - match subclass { - SingleImport(target, _) => { - debug!("(building import directive) building import directive: {}::{}", - names_to_string(&module_.imports.borrow().last().unwrap().module_path), - target); - - let mut import_resolutions = module_.import_resolutions.borrow_mut(); - match import_resolutions.get_mut(&target) { - Some(resolution_per_ns) => { - debug!("(building import directive) bumping reference"); - resolution_per_ns.outstanding_references += 1; - - // the source of this name is different now - let resolution = - ImportResolution { id: id, is_public: is_public, target: None }; - resolution_per_ns[TypeNS] = resolution.clone(); - resolution_per_ns[ValueNS] = resolution; - return; - } - None => {} + false + } + + fn legacy_macro_imports(&mut self, attrs: &[ast::Attribute]) -> LegacyMacroImports { + let mut imports = LegacyMacroImports::default(); + for attr in attrs { + if attr.check_name("macro_use") { + match attr.meta_item_list() { + Some(names) => for attr in names { + if let Some(word) = attr.word() { + imports.imports.push((word.name(), attr.span())); + } else { + span_err!(self.session, attr.span(), E0466, "bad macro import"); + } + }, + None => imports.import_all = Some(attr.span), } - debug!("(building import directive) creating new"); - let mut import_resolution_per_ns = ImportResolutionPerNamespace::new(id, is_public); - import_resolution_per_ns.outstanding_references = 1; - import_resolutions.insert(target, import_resolution_per_ns); - } - GlobImport => { - // Set the glob flag. This tells us that we don't know the - // module's exports ahead of time. - - module_.inc_glob_count(); - if is_public { - module_.inc_pub_glob_count(); + } else if attr.check_name("macro_reexport") { + let bad_macro_reexport = |this: &mut Self, span| { + span_err!(this.session, span, E0467, "bad macro reexport"); + }; + if let Some(names) = attr.meta_item_list() { + for attr in names { + if let Some(word) = attr.word() { + imports.reexports.push((word.name(), attr.span())); + } else { + bad_macro_reexport(self, attr.span()); + } + } + } else { + bad_macro_reexport(self, attr.span()); } } } + imports } } -struct BuildReducedGraphVisitor<'a, 'b: 'a, 'tcx: 'b> { - builder: GraphBuilder<'a, 'b, 'tcx>, - parent: Module<'b>, +pub struct BuildReducedGraphVisitor<'a, 'b: 'a> { + pub resolver: &'a mut Resolver<'b>, + pub legacy_scope: LegacyScope<'b>, + pub expansion: Mark, +} + +impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> { + fn visit_invoc(&mut self, id: ast::NodeId) -> &'b InvocationData<'b> { + let mark = Mark::from_placeholder_id(id); + self.resolver.current_module.unresolved_invocations.borrow_mut().insert(mark); + let invocation = self.resolver.invocations[&mark]; + invocation.module.set(self.resolver.current_module); + invocation.legacy_scope.set(self.legacy_scope); + invocation + } } -impl<'a, 'b, 'v, 'tcx> Visitor<'v> for BuildReducedGraphVisitor<'a, 'b, 'tcx> { - fn visit_nested_item(&mut self, item: hir::ItemId) { - self.visit_item(self.builder.resolver.ast_map.expect_item(item.id)) +macro_rules! method { + ($visit:ident: $ty:ty, $invoc:path, $walk:ident) => { + fn $visit(&mut self, node: &$ty) { + if let $invoc(..) = node.node { + self.visit_invoc(node.id); + } else { + visit::$walk(self, node); + } + } } +} + +impl<'a, 'b> Visitor for BuildReducedGraphVisitor<'a, 'b> { + method!(visit_impl_item: ast::ImplItem, ast::ImplItemKind::Macro, walk_impl_item); + method!(visit_expr: ast::Expr, ast::ExprKind::Mac, walk_expr); + method!(visit_pat: ast::Pat, ast::PatKind::Mac, walk_pat); + method!(visit_ty: ast::Ty, ast::TyKind::Mac, walk_ty); fn visit_item(&mut self, item: &Item) { - let p = self.builder.build_reduced_graph_for_item(item, &self.parent); - let old_parent = replace(&mut self.parent, p); - intravisit::walk_item(self, item); - self.parent = old_parent; + let macro_use = match item.node { + ItemKind::Mac(..) if item.id == ast::DUMMY_NODE_ID => return, // Scope placeholder + ItemKind::Mac(..) => { + return self.legacy_scope = LegacyScope::Expansion(self.visit_invoc(item.id)); + } + ItemKind::Mod(..) => self.resolver.contains_macro_use(&item.attrs), + _ => false, + }; + + let (parent, legacy_scope) = (self.resolver.current_module, self.legacy_scope); + self.resolver.build_reduced_graph_for_item(item, self.expansion); + visit::walk_item(self, item); + self.resolver.current_module = parent; + if !macro_use { + self.legacy_scope = legacy_scope; + } + } + + fn visit_stmt(&mut self, stmt: &ast::Stmt) { + if let ast::StmtKind::Mac(..) = stmt.node { + self.legacy_scope = LegacyScope::Expansion(self.visit_invoc(stmt.id)); + } else { + visit::walk_stmt(self, stmt); + } } fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) { - self.builder.build_reduced_graph_for_foreign_item(foreign_item, &self.parent); + self.resolver.build_reduced_graph_for_foreign_item(foreign_item, self.expansion); + visit::walk_foreign_item(self, foreign_item); } fn visit_block(&mut self, block: &Block) { - let np = self.builder.build_reduced_graph_for_block(block, &self.parent); - let old_parent = replace(&mut self.parent, np); - intravisit::walk_block(self, block); - self.parent = old_parent; + let (parent, legacy_scope) = (self.resolver.current_module, self.legacy_scope); + self.resolver.build_reduced_graph_for_block(block); + visit::walk_block(self, block); + self.resolver.current_module = parent; + self.legacy_scope = legacy_scope; } -} -pub fn build_reduced_graph(resolver: &mut Resolver, krate: &hir::Crate) { - GraphBuilder { resolver: resolver }.build_reduced_graph(krate); -} + fn visit_trait_item(&mut self, item: &TraitItem) { + let parent = self.resolver.current_module; + let def_id = parent.def_id().unwrap(); + + if let TraitItemKind::Macro(_) = item.node { + self.visit_invoc(item.id); + return + } -pub fn populate_module_if_necessary<'a, 'tcx>(resolver: &mut Resolver<'a, 'tcx>, - module: Module<'a>) { - GraphBuilder { resolver: resolver }.populate_module_if_necessary(module); + // Add the item to the trait info. + let item_def_id = self.resolver.definitions.local_def_id(item.id); + let mut is_static_method = false; + let (def, ns) = match item.node { + TraitItemKind::Const(..) => (Def::AssociatedConst(item_def_id), ValueNS), + TraitItemKind::Method(ref sig, _) => { + is_static_method = !sig.decl.has_self(); + (Def::Method(item_def_id), ValueNS) + } + TraitItemKind::Type(..) => (Def::AssociatedTy(item_def_id), TypeNS), + TraitItemKind::Macro(_) => bug!(), // handled above + }; + + self.resolver.trait_item_map.insert((item.ident.name, def_id), is_static_method); + + let vis = ty::Visibility::Public; + self.resolver.define(parent, item.ident.name, ns, (def, vis, item.span, self.expansion)); + + self.resolver.current_module = parent.parent.unwrap(); // nearest normal ancestor + visit::walk_trait_item(self, item); + self.resolver.current_module = parent; + } } diff --git a/src/librustc_resolve/check_unused.rs b/src/librustc_resolve/check_unused.rs index 7f740f9c03335..492c5e695bbbb 100644 --- a/src/librustc_resolve/check_unused.rs +++ b/src/librustc_resolve/check_unused.rs @@ -16,113 +16,77 @@ // resolve data structures and because it finalises the privacy information for // `use` directives. // +// Unused trait imports can't be checked until the method resolution. We save +// candidates here, and do the acutal check in librustc_typeck/check_unused.rs. use std::ops::{Deref, DerefMut}; use Resolver; -use Namespace::{TypeNS, ValueNS}; use rustc::lint; -use rustc::middle::privacy::{DependsOn, LastImport, Used, Unused}; -use syntax::ast; -use syntax::codemap::{Span, DUMMY_SP}; +use rustc::util::nodemap::NodeMap; +use syntax::ast::{self, ViewPathGlob, ViewPathList, ViewPathSimple}; +use syntax::visit::{self, Visitor}; +use syntax_pos::{Span, MultiSpan, DUMMY_SP}; -use rustc_front::hir; -use rustc_front::hir::{ViewPathGlob, ViewPathList, ViewPathSimple}; -use rustc_front::intravisit::Visitor; -struct UnusedImportCheckVisitor<'a, 'b: 'a, 'tcx: 'b> { - resolver: &'a mut Resolver<'b, 'tcx>, +struct UnusedImportCheckVisitor<'a, 'b: 'a> { + resolver: &'a mut Resolver<'b>, + /// All the (so far) unused imports, grouped path list + unused_imports: NodeMap>, } // Deref and DerefMut impls allow treating UnusedImportCheckVisitor as Resolver. -impl<'a, 'b, 'tcx:'b> Deref for UnusedImportCheckVisitor<'a, 'b, 'tcx> { - type Target = Resolver<'b, 'tcx>; +impl<'a, 'b> Deref for UnusedImportCheckVisitor<'a, 'b> { + type Target = Resolver<'b>; - fn deref<'c>(&'c self) -> &'c Resolver<'b, 'tcx> { + fn deref<'c>(&'c self) -> &'c Resolver<'b> { &*self.resolver } } -impl<'a, 'b, 'tcx:'b> DerefMut for UnusedImportCheckVisitor<'a, 'b, 'tcx> { - fn deref_mut<'c>(&'c mut self) -> &'c mut Resolver<'b, 'tcx> { +impl<'a, 'b> DerefMut for UnusedImportCheckVisitor<'a, 'b> { + fn deref_mut<'c>(&'c mut self) -> &'c mut Resolver<'b> { &mut *self.resolver } } -impl<'a, 'b, 'tcx> UnusedImportCheckVisitor<'a, 'b, 'tcx> { +impl<'a, 'b> UnusedImportCheckVisitor<'a, 'b> { // We have information about whether `use` (import) directives are actually - // used now. If an import is not used at all, we signal a lint error. If an - // import is only used for a single namespace, we remove the other namespace - // from the recorded privacy information. That means in privacy.rs, we will - // only check imports and namespaces which are used. In particular, this - // means that if an import could name either a public or private item, we - // will check the correct thing, dependent on how the import is used. - fn finalize_import(&mut self, id: ast::NodeId, span: Span) { - debug!("finalizing import uses for {:?}", - self.session.codemap().span_to_snippet(span)); - - if !self.used_imports.contains(&(id, TypeNS)) && - !self.used_imports.contains(&(id, ValueNS)) { - self.session.add_lint(lint::builtin::UNUSED_IMPORTS, - id, - span, - "unused import".to_string()); - } - - let mut def_map = self.def_map.borrow_mut(); - let path_res = if let Some(r) = def_map.get_mut(&id) { - r - } else { - return; - }; - let (v_priv, t_priv) = match path_res.last_private { - LastImport { value_priv, type_priv, .. } => (value_priv, type_priv), - _ => { - panic!("we should only have LastImport for `use` directives") + // used now. If an import is not used at all, we signal a lint error. + fn check_import(&mut self, item_id: ast::NodeId, id: ast::NodeId, span: Span) { + let mut used = false; + self.per_ns(|this, ns| used |= this.used_imports.contains(&(id, ns))); + if !used { + if self.maybe_unused_trait_imports.contains(&id) { + // Check later. + return; } - }; - - let mut v_used = if self.used_imports.contains(&(id, ValueNS)) { - Used - } else { - Unused - }; - let t_used = if self.used_imports.contains(&(id, TypeNS)) { - Used + self.unused_imports.entry(item_id).or_insert_with(NodeMap).insert(id, span); } else { - Unused - }; - - match (v_priv, t_priv) { - // Since some items may be both in the value _and_ type namespaces (e.g., structs) - // we might have two LastPrivates pointing at the same thing. There is no point - // checking both, so lets not check the value one. - (Some(DependsOn(def_v)), Some(DependsOn(def_t))) if def_v == def_t => v_used = Unused, - _ => {} + // This trait import is definitely used, in a way other than + // method resolution. + self.maybe_unused_trait_imports.remove(&id); + if let Some(i) = self.unused_imports.get_mut(&item_id) { + i.remove(&id); + } } - - path_res.last_private = LastImport { - value_priv: v_priv, - value_used: v_used, - type_priv: t_priv, - type_used: t_used, - }; } } -impl<'a, 'b, 'v, 'tcx> Visitor<'v> for UnusedImportCheckVisitor<'a, 'b, 'tcx> { - fn visit_item(&mut self, item: &hir::Item) { +impl<'a, 'b> Visitor for UnusedImportCheckVisitor<'a, 'b> { + fn visit_item(&mut self, item: &ast::Item) { + visit::walk_item(self, item); // Ignore is_public import statements because there's no way to be sure // whether they're used or not. Also ignore imports with a dummy span // because this means that they were generated in some fashion by the // compiler and we don't need to consider them. - if item.vis == hir::Public || item.span == DUMMY_SP { + if item.vis == ast::Visibility::Public || item.span.source_equal(&DUMMY_SP) { return; } match item.node { - hir::ItemExternCrate(_) => { + ast::ItemKind::ExternCrate(_) => { if let Some(crate_num) = self.session.cstore.extern_mod_stmt_cnum(item.id) { if !self.used_crates.contains(&crate_num) { self.session.add_lint(lint::builtin::UNUSED_EXTERN_CRATES, @@ -132,26 +96,19 @@ impl<'a, 'b, 'v, 'tcx> Visitor<'v> for UnusedImportCheckVisitor<'a, 'b, 'tcx> { } } } - hir::ItemUse(ref p) => { + ast::ItemKind::Use(ref p) => { match p.node { - ViewPathSimple(_, _) => { - self.finalize_import(item.id, p.span) + ViewPathSimple(..) => { + self.check_import(item.id, item.id, p.span) } ViewPathList(_, ref list) => { for i in list { - self.finalize_import(i.node.id(), i.span); + self.check_import(item.id, i.node.id, i.span); } } ViewPathGlob(_) => { - if !self.used_imports.contains(&(item.id, TypeNS)) && - !self.used_imports.contains(&(item.id, ValueNS)) { - self.session - .add_lint(lint::builtin::UNUSED_IMPORTS, - item.id, - p.span, - "unused import".to_string()); - } + self.check_import(item.id, item.id, p.span); } } } @@ -160,7 +117,36 @@ impl<'a, 'b, 'v, 'tcx> Visitor<'v> for UnusedImportCheckVisitor<'a, 'b, 'tcx> { } } -pub fn check_crate(resolver: &mut Resolver, krate: &hir::Crate) { - let mut visitor = UnusedImportCheckVisitor { resolver: resolver }; - krate.visit_all_items(&mut visitor); +pub fn check_crate(resolver: &mut Resolver, krate: &ast::Crate) { + let mut visitor = UnusedImportCheckVisitor { + resolver: resolver, + unused_imports: NodeMap(), + }; + visit::walk_crate(&mut visitor, krate); + + for (id, spans) in &visitor.unused_imports { + let len = spans.len(); + let mut spans = spans.values().map(|s| *s).collect::>(); + spans.sort(); + let ms = MultiSpan::from_spans(spans.clone()); + let mut span_snippets = spans.iter() + .filter_map(|s| { + match visitor.session.codemap().span_to_snippet(*s) { + Ok(s) => Some(format!("`{}`", s)), + _ => None, + } + }).collect::>(); + span_snippets.sort(); + let msg = format!("unused import{}{}", + if len > 1 { "s" } else { "" }, + if span_snippets.len() > 0 { + format!(": {}", span_snippets.join(", ")) + } else { + String::new() + }); + visitor.session.add_lint(lint::builtin::UNUSED_IMPORTS, + *id, + ms, + msg); + } } diff --git a/src/librustc_resolve/diagnostics.rs b/src/librustc_resolve/diagnostics.rs index 04ab3fe70e9fa..d54f4e7b20c7a 100644 --- a/src/librustc_resolve/diagnostics.rs +++ b/src/librustc_resolve/diagnostics.rs @@ -16,17 +16,19 @@ register_long_diagnostics! { E0154: r##" +## Note: this error code is no longer emitted by the compiler. + Imports (`use` statements) are not allowed after non-item statements, such as variable declarations and expression statements. Here is an example that demonstrates the error: -``` +```ignore fn f() { // Variable declaration before import let x = 0; use std::io::Read; - ... + // ... } ``` @@ -39,7 +41,7 @@ Here is the previous example again, with the correct order: fn f() { use std::io::Read; let x = 0; - ... + // ... } ``` @@ -50,12 +52,14 @@ https://doc.rust-lang.org/reference.html#statements "##, E0251: r##" +## Note: this error code is no longer emitted by the compiler. + Two items of the same name cannot be imported without rebinding one of the items under a new local name. An example of this error: -``` +```ignore use foo::baz; use bar::*; // error, do `use foo::baz as quux` instead on the previous line @@ -75,9 +79,9 @@ E0252: r##" Two items of the same name cannot be imported without rebinding one of the items under a new local name. -An example of this error: +Erroneous code example: -``` +```compile_fail,E0252 use foo::baz; use bar::baz; // error, do `use bar::baz as quux` instead @@ -87,6 +91,41 @@ mod foo { pub struct baz; } +mod bar { + pub mod baz {} +} +``` + +You can use aliases in order to fix this error. Example: + +``` +use foo::baz as foo_baz; +use bar::baz; // ok! + +fn main() {} + +mod foo { + pub struct baz; +} + +mod bar { + pub mod baz {} +} +``` + +Or you can reference the item with its parent: + +``` +use bar::baz; + +fn main() { + let x = foo::baz; // ok! +} + +mod foo { + pub struct baz; +} + mod bar { pub mod baz {} } @@ -94,29 +133,88 @@ mod bar { "##, E0253: r##" -Attempt was made to import an unimportable value. This can happen when -trying to import a method from a trait. An example of this error: +Attempt was made to import an unimportable value. This can happen when trying +to import a method from a trait. -``` +Erroneous code example: + +```compile_fail,E0253 mod foo { pub trait MyTrait { fn do_something(); } } + use foo::MyTrait::do_something; +// error: `do_something` is not directly importable + +fn main() {} ``` It's invalid to directly import methods belonging to a trait or concrete type. "##, +E0254: r##" +Attempt was made to import an item whereas an extern crate with this name has +already been imported. + +Erroneous code example: + +```compile_fail,E0254 +extern crate collections; + +mod foo { + pub trait collections { + fn do_something(); + } +} + +use foo::collections; // error: an extern crate named `collections` has already + // been imported in this module + +fn main() {} +``` + +To fix issue issue, you have to rename at least one of the two imports. +Example: + +```ignore +extern crate collections as libcollections; // ok! + +mod foo { + pub trait collections { + fn do_something(); + } +} + +use foo::collections; + +fn main() {} +``` +"##, + E0255: r##" You can't import a value whose name is the same as another value defined in the module. -An example of this error: +Erroneous code example: +```compile_fail,E0255 +use bar::foo; // error: an item named `foo` is already in scope + +fn foo() {} + +mod bar { + pub fn foo() {} +} + +fn main() {} ``` -use bar::foo; // error, do `use bar::foo as baz` instead + +You can use aliases in order to fix this error. Example: + +``` +use bar::foo as bar_foo; // ok! fn foo() {} @@ -126,15 +224,31 @@ mod bar { fn main() {} ``` + +Or you can reference the item with its parent: + +``` +fn foo() {} + +mod bar { + pub fn foo() {} +} + +fn main() { + bar::foo(); // we get the item by referring to its parent +} +``` "##, E0256: r##" +## Note: this error code is no longer emitted by the compiler. + You can't import a type or module when the name of the item being imported is the same as another type or submodule defined in the module. An example of this error: -``` +```compile_fail use foo::Bar; // error type Bar = u32; @@ -148,14 +262,16 @@ fn main() {} "##, E0259: r##" -The name chosen for an external crate conflicts with another external crate that -has been imported into the current module. +The name chosen for an external crate conflicts with another external crate +that has been imported into the current module. -Wrong example: +Erroneous code example: -``` -extern crate a; -extern crate crate_a as a; +```compile_fail,E0259 +extern crate std; +extern crate libc as std; + +fn main() {} ``` The solution is to choose a different name that doesn't conflict with any @@ -163,18 +279,18 @@ external crate imported into the current module. Correct example: -``` -extern crate a; -extern crate crate_a as other_name; +```ignore +extern crate std; +extern crate libc as other_name; ``` "##, E0260: r##" The name for an item declaration conflicts with an external crate's name. -For instance, +Erroneous code example: -``` +```ignore,E0260 extern crate abc; struct abc; @@ -184,7 +300,7 @@ There are two possible solutions: Solution #1: Rename the item. -``` +```ignore extern crate abc; struct xyz; @@ -192,7 +308,7 @@ struct xyz; Solution #2: Import the crate with a different name. -``` +```ignore extern crate abc as xyz; struct abc; @@ -204,28 +320,20 @@ about what constitutes an Item declaration and what does not: https://doc.rust-lang.org/reference.html#statements "##, -E0317: r##" -User-defined types or type parameters cannot shadow the primitive types. -This error indicates you tried to define a type, struct or enum with the same -name as an existing primitive type. - -See the Types section of the reference for more information about the primitive -types: - -https://doc.rust-lang.org/reference.html#types -"##, - E0364: r##" -Private items cannot be publicly re-exported. This error indicates that -you attempted to `pub use` a type or value that was not itself public. +Private items cannot be publicly re-exported. This error indicates that you +attempted to `pub use` a type or value that was not itself public. -Here is an example that demonstrates the error: +Erroneous code example: -``` +```compile_fail mod foo { const X: u32 = 1; } + pub use foo::X; + +fn main() {} ``` The solution to this problem is to ensure that the items that you are @@ -235,28 +343,34 @@ re-exporting are themselves marked with `pub`: mod foo { pub const X: u32 = 1; } + pub use foo::X; + +fn main() {} ``` -See the 'Use Declarations' section of the reference for more information -on this topic: +See the 'Use Declarations' section of the reference for more information on +this topic: https://doc.rust-lang.org/reference.html#use-declarations "##, E0365: r##" -Private modules cannot be publicly re-exported. This error indicates -that you attempted to `pub use` a module that was not itself public. +Private modules cannot be publicly re-exported. This error indicates that you +attempted to `pub use` a module that was not itself public. -Here is an example that demonstrates the error: +Erroneous code example: -``` +```compile_fail,E0365 mod foo { pub const X: u32 = 1; } + pub use foo as foo2; +fn main() {} ``` + The solution to this problem is to ensure that the module that you are re-exporting is itself marked with `pub`: @@ -264,7 +378,10 @@ re-exporting is itself marked with `pub`: pub mod foo { pub const X: u32 = 1; } + pub use foo as foo2; + +fn main() {} ``` See the 'Use Declarations' section of the reference for more information @@ -274,10 +391,12 @@ https://doc.rust-lang.org/reference.html#use-declarations "##, E0401: r##" -Inner items do not inherit type parameters from the functions they are -embedded in. For example, this will not compile: +Inner items do not inherit type parameters from the functions they are embedded +in. -``` +Erroneous code example: + +```compile_fail,E0401 fn foo(x: T) { fn bar(y: T) { // T is defined in the "outer" function // .. @@ -286,18 +405,18 @@ fn foo(x: T) { } ``` -nor will this: +Nor will this: -``` +```compile_fail,E0401 fn foo(x: T) { type MaybeT = Option; // ... } ``` -or this: +Or this: -``` +```compile_fail,E0401 fn foo(x: T) { struct Foo { x: T, @@ -317,7 +436,7 @@ If the item is a function, you may use a closure: fn foo(x: T) { let bar = |y: T| { // explicit type annotation may not be necessary // .. - } + }; bar(x); } ``` @@ -363,11 +482,12 @@ This may require additional type hints in the function body. In case the item is a function inside an `impl`, defining a private helper function might be easier: -``` +```ignore impl Foo { pub fn foo(&self, x: T) { self.bar(x); } + fn bar(&self, y: T) { // .. } @@ -379,9 +499,11 @@ closures or copying the parameters should still work. "##, E0403: r##" -Some type parameters have the same name. Example of erroneous code: +Some type parameters have the same name. -``` +Erroneous code example: + +```compile_fail,E0403 fn foo(s: T, u: T) {} // error: the name `T` is already used for a type // parameter in this type parameter list ``` @@ -395,10 +517,11 @@ fn foo(s: T, u: Y) {} // ok! "##, E0404: r##" -You tried to implement something which was not a trait on an object. Example of -erroneous code: +You tried to implement something which was not a trait on an object. -``` +Erroneous code example: + +```compile_fail,E0404 struct Foo; struct Bar; @@ -421,18 +544,20 @@ impl Foo for Bar { // ok! "##, E0405: r##" -An unknown trait was implemented. Example of erroneous code: +The code refers to a trait that is not in scope. -``` +Erroneous code example: + +```compile_fail,E0405 struct Foo; -impl SomeTrait for Foo {} // error: use of undeclared trait name `SomeTrait` +impl SomeTrait for Foo {} // error: trait `SomeTrait` is not in scope ``` Please verify that the name of the trait wasn't misspelled and ensure that it was imported. Example: -``` +```ignore // solution 1: use some_file::SomeTrait; @@ -451,9 +576,11 @@ impl SomeTrait for Foo { // ok! E0407: r##" A definition of a method not in the implemented trait was given in a trait -implementation. Example of erroneous code: +implementation. -``` +Erroneous code example: + +```compile_fail,E0407 trait Foo { fn a(); } @@ -502,17 +629,103 @@ impl Bar { ``` "##, -E0411: r##" -The `Self` keyword was used outside an impl or a trait. Erroneous -code example: +E0408: r##" +An "or" pattern was used where the variable bindings are not consistently bound +across patterns. + +Erroneous code example: + +```compile_fail,E0408 +match x { + Some(y) | None => { /* use y */ } // error: variable `y` from pattern #1 is + // not bound in pattern #2 + _ => () +} +``` + +Here, `y` is bound to the contents of the `Some` and can be used within the +block corresponding to the match arm. However, in case `x` is `None`, we have +not specified what `y` is, and the block will use a nonexistent variable. + +To fix this error, either split into multiple match arms: ``` +let x = Some(1); +match x { + Some(y) => { /* use y */ } + None => { /* ... */ } +} +``` + +or, bind the variable to a field of the same type in all sub-patterns of the +or pattern: + +``` +let x = (0, 2); +match x { + (0, y) | (y, 0) => { /* use y */} + _ => {} +} +``` + +In this example, if `x` matches the pattern `(0, _)`, the second field is set +to `y`. If it matches `(_, 0)`, the first field is set to `y`; so in all +cases `y` is set to some value. +"##, + +E0409: r##" +An "or" pattern was used where the variable bindings are not consistently bound +across patterns. + +Erroneous code example: + +```compile_fail,E0409 +let x = (0, 2); +match x { + (0, ref y) | (y, 0) => { /* use y */} // error: variable `y` is bound with + // different mode in pattern #2 + // than in pattern #1 + _ => () +} +``` + +Here, `y` is bound by-value in one case and by-reference in the other. + +To fix this error, just use the same mode in both cases. +Generally using `ref` or `ref mut` where not already used will fix this: + +```ignore +let x = (0, 2); +match x { + (0, ref y) | (ref y, 0) => { /* use y */} + _ => () +} +``` + +Alternatively, split the pattern: + +``` +let x = (0, 2); +match x { + (y, 0) => { /* use y */ } + (0, ref y) => { /* use y */} + _ => () +} +``` +"##, + +E0411: r##" +The `Self` keyword was used outside an impl or a trait. + +Erroneous code example: + +```compile_fail,E0411 ::foo; // error: use of `Self` outside of an impl or trait ``` -The `Self` keyword represents the current type, which explains why it -can only be used inside an impl or a trait. It gives access to the -associated items of a type: +The `Self` keyword represents the current type, which explains why it can only +be used inside an impl or a trait. It gives access to the associated items of a +type: ``` trait Foo { @@ -524,9 +737,9 @@ trait Baz : Foo { } ``` -However, be careful when two types has a common associated type: +However, be careful when two types have a common associated type: -``` +```compile_fail trait Foo { type Bar; } @@ -541,10 +754,18 @@ trait Baz : Foo + Foo2 { } ``` -This problem can be solved by specifying from which trait we want -to use the `Bar` type: +This problem can be solved by specifying from which trait we want to use the +`Bar` type: ``` +trait Foo { + type Bar; +} + +trait Foo2 { + type Bar; +} + trait Baz : Foo + Foo2 { fn bar() -> ::Bar; // ok! } @@ -552,73 +773,52 @@ trait Baz : Foo + Foo2 { "##, E0412: r##" -An undeclared type name was used. Example of erroneous codes: +The type name used is not in scope. + +Erroneous code examples: + +```compile_fail,E0412 +impl Something {} // error: type name `Something` is not in scope -``` -impl Something {} // error: use of undeclared type name `Something` // or: + trait Foo { - fn bar(N); // error: use of undeclared type name `N` + fn bar(N); // error: type name `N` is not in scope } + // or: -fn foo(x: T) {} // error: use of undeclared type name `T` + +fn foo(x: T) {} // type name `T` is not in scope ``` -To fix this error, please verify you didn't misspell the type name, -you did declare it or imported it into the scope. Examples: +To fix this error, please verify you didn't misspell the type name, you did +declare it or imported it into the scope. Examples: ``` struct Something; impl Something {} // ok! + // or: + trait Foo { type N; fn bar(Self::N); // ok! } -//or: -fn foo(x: T) {} // ok! -``` -"##, - -E0413: r##" -A declaration shadows an enum variant or unit-like struct in scope. -Example of erroneous code: - -``` -struct Foo; - -let Foo = 12i32; // error: declaration of `Foo` shadows an enum variant or - // unit-like struct in scope -``` - -To fix this error, rename the variable such that it doesn't shadow any enum -variable or structure in scope. Example: - -``` -struct Foo; - -let foo = 12i32; // ok! -``` - -Or: - -``` -struct FooStruct; +// or: -let Foo = 12i32; // ok! +fn foo(x: T) {} // ok! ``` - -The goal here is to avoid a conflict of names. "##, E0415: r##" -More than one function parameter have the same name. Example of erroneous -code: +More than one function parameter have the same name. -``` +Erroneous code example: + +```compile_fail,E0415 fn foo(f: i32, f: i32) {} // error: identifier `f` is bound more than // once in this parameter list ``` @@ -631,10 +831,11 @@ fn foo(f: i32, g: i32) {} // ok! "##, E0416: r##" -An identifier is bound more than once in a pattern. Example of erroneous -code: +An identifier is bound more than once in a pattern. -``` +Erroneous code example: + +```compile_fail,E0416 match (1, 2) { (x, x) => {} // error: identifier `x` is bound more than once in the // same pattern @@ -651,7 +852,7 @@ match (1, 2) { Or maybe did you mean to unify? Consider using a guard: -``` +```ignore match (A, B, C) { (x, x2, see) if x == x2 => { /* A and B are equal, do one thing */ } (y, z, see) => { /* A and B unequal; do another thing */ } @@ -659,98 +860,21 @@ match (A, B, C) { ``` "##, -E0417: r##" -A static variable was referenced in a pattern. Example of erroneous code: - -``` -static FOO : i32 = 0; - -match 0 { - FOO => {} // error: static variables cannot be referenced in a - // pattern, use a `const` instead - _ => {} -} -``` - -The compiler needs to know the value of the pattern at compile time; -compile-time patterns can defined via const or enum items. Please verify -that the identifier is spelled correctly, and if so, use a const instead -of static to define it. Example: - -``` -const FOO : i32 = 0; - -match 0 { - FOO => {} // ok! - _ => {} -} -``` -"##, - -E0419: r##" -An unknown enum variant, struct or const was used. Example of -erroneous code: - -``` -match 0 { - Something::Foo => {} // error: unresolved enum variant, struct - // or const `Foo` -} -``` - -Please verify you didn't misspell it and the enum variant, struct or const has -been declared and imported into scope. Example: - -``` -enum Something { - Foo, - NotFoo, -} - -match Something::NotFoo { - Something::Foo => {} // ok! - _ => {} -} -``` -"##, - -E0422: r##" -You are trying to use an identifier that is either undefined or not a -struct. For instance: -``` -fn main () { - let x = Foo { x: 1, y: 2 }; -} -``` - -In this case, `Foo` is undefined, so it inherently isn't anything, and -definitely not a struct. - -``` -fn main () { - let foo = 1; - let x = foo { x: 1, y: 2 }; -} -``` - -In this case, `foo` is defined, but is not a struct, so Rust can't use -it as one. -"##, - E0423: r##" -A `struct` variant name was used like a function name. Example of -erroneous code: +A `struct` variant name was used like a function name. -``` -struct Foo { a: bool}; +Erroneous code example: + +```compile_fail,E0423 +struct Foo { a: bool }; let f = Foo(); // error: `Foo` is a struct variant name, but this expression uses // it like a function name ``` -Please verify you didn't misspell the name of what you actually wanted -to use here. Example: +Please verify you didn't misspell the name of what you actually wanted to use +here. Example: ``` fn Foo() -> u32 { 0 } @@ -760,9 +884,11 @@ let f = Foo(); // ok! "##, E0424: r##" -The `self` keyword was used in a static method. Example of erroneous code: +The `self` keyword was used in a static method. -``` +Erroneous code example: + +```compile_fail,E0424 struct Foo; impl Foo { @@ -792,13 +918,16 @@ impl Foo { "##, E0425: r##" -An unresolved name was used. Example of erroneous codes: +An unresolved name was used. -``` +Erroneous code examples: + +```compile_fail,E0425 something_that_doesnt_exist::foo; // error: unresolved name `something_that_doesnt_exist::foo` // or: + trait Foo { fn bar() { Self; // error: unresolved name `Self` @@ -806,6 +935,7 @@ trait Foo { } // or: + let x = unknown_variable; // error: unresolved name `unknown_variable` ``` @@ -814,26 +944,45 @@ identifier being referred to is valid for the given situation. Example: ``` enum something_that_does_exist { - foo + Foo, } +``` -// or: +Or: + +``` mod something_that_does_exist { pub static foo : i32 = 0i32; } something_that_does_exist::foo; // ok! +``` -// or: +Or: + +``` let unknown_variable = 12u32; let x = unknown_variable; // ok! ``` + +If the item is not defined in the current module, it must be imported using a +`use` statement, like so: + +```ignore +use foo::bar; +bar(); +``` + +If the item you are importing is not defined in some super-module of the +current module, then it must also be declared as public (e.g., `pub fn`). "##, E0426: r##" -An undeclared label was used. Example of erroneous code: +An undeclared label was used. -``` +Erroneous code example: + +```compile_fail,E0426 loop { break 'a; // error: use of undeclared label `'a` } @@ -849,10 +998,11 @@ Please verify you spelt or declare the label correctly. Example: "##, E0428: r##" -A type or module has been defined more than once. Example of erroneous -code: +A type or module has been defined more than once. -``` +Erroneous code example: + +```compile_fail,E0428 struct Bar; struct Bar; // error: duplicate definition of value `Bar` ``` @@ -866,10 +1016,36 @@ struct Bar2; // ok! ``` "##, -E0430: r##" -The `self` import appears more than once in the list. Erroneous code example: +E0429: r##" +The `self` keyword cannot appear alone as the last segment in a `use` +declaration. + +Erroneous code example: + +```compile_fail,E0429 +use std::fmt::self; // error: `self` imports are only allowed within a { } list +``` + +To use a namespace itself in addition to some of its members, `self` may appear +as part of a brace-enclosed list of imports: + +``` +use std::fmt::{self, Debug}; +``` + +If you only want to import the namespace, do so directly: ``` +use std::fmt; +``` +"##, + +E0430: r##" +The `self` import appears more than once in the list. + +Erroneous code example: + +```compile_fail,E0430 use something::{self, self}; // error: `self` import can only appear once in // the list ``` @@ -877,15 +1053,17 @@ use something::{self, self}; // error: `self` import can only appear once in Please verify you didn't misspell the import name or remove the duplicated `self` import. Example: -``` +```ignore use something::self; // ok! ``` "##, E0431: r##" -`self` import was made. Erroneous code example: +An invalid `self` import was made. -``` +Erroneous code example: + +```compile_fail,E0431 use {self}; // error: `self` import can only appear in an import list with a // non-empty prefix ``` @@ -895,17 +1073,22 @@ or verify you didn't misspell it. "##, E0432: r##" -An import was unresolved. Erroneous code example: +An import was unresolved. -``` +Erroneous code example: + +```compile_fail,E0432 use something::Foo; // error: unresolved import `something::Foo`. ``` -Please verify you didn't misspell the import name or the import does exist -in the module from where you tried to import it. Example: +Paths in `use` statements are relative to the crate root. To import items +relative to the current and parent modules, use the `self::` and `super::` +prefixes, respectively. Also verify that you didn't misspell the import +name and that the import exists in the module from where you tried to +import it. Example: -``` -use something::Foo; // ok! +```ignore +use self::something::Foo; // ok! mod something { pub struct Foo; @@ -913,9 +1096,9 @@ mod something { ``` Or, if you tried to use a module from an external crate, you may have missed -the `extern crate` declaration: +the `extern crate` declaration (which is usually placed in the crate root): -``` +```ignore extern crate homura; // Required to use the `homura` crate use homura::Madoka; @@ -923,21 +1106,76 @@ use homura::Madoka; "##, E0433: r##" -Invalid import. Example of erroneous code: +An undeclared type or module was used. + +Erroneous code example: +```compile_fail,E0433 +let map = HashMap::new(); +// error: failed to resolve. Use of undeclared type or module `HashMap` ``` -use something_which_doesnt_exist; -// error: unresolved import `something_which_doesnt_exist` + +Please verify you didn't misspell the type/module's name or that you didn't +forgot to import it: + + +``` +use std::collections::HashMap; // HashMap has been imported. +let map: HashMap = HashMap::new(); // So it can be used! ``` +"##, + +E0434: r##" +This error indicates that a variable usage inside an inner function is invalid +because the variable comes from a dynamic environment. Inner functions do not +have access to their containing environment. + +Erroneous code example: -Please verify you didn't misspell the import's name. +```compile_fail,E0434 +fn foo() { + let y = 5; + fn bar() -> u32 { + y // error: can't capture dynamic environment in a fn item; use the + // || { ... } closure form instead. + } +} +``` + +Functions do not capture local variables. To fix this error, you can replace the +function with a closure: + +``` +fn foo() { + let y = 5; + let bar = || { + y + }; +} +``` + +or replace the captured variable with a constant or a static item: + +``` +fn foo() { + static mut X: u32 = 4; + const Y: u32 = 5; + fn bar() -> u32 { + unsafe { + X = 3; + } + Y + } +} +``` "##, E0435: r##" -A non-constant value was used to initialise a constant. Example of erroneous -code: +A non-constant value was used to initialise a constant. -``` +Erroneous code example: + +```compile_fail,E0435 let foo = 42u32; const FOO : u32 = foo; // error: attempt to use a non-constant value in a // constant @@ -947,8 +1185,11 @@ To fix this error, please replace the value with a constant. Example: ``` const FOO : u32 = 42u32; // ok! +``` -// or: +Or: + +``` const OTHER_FOO : u32 = 42u32; const FOO : u32 = OTHER_FOO; // ok! ``` @@ -960,9 +1201,9 @@ the trait in question. This error indicates that you attempted to implement an associated type whose name does not match the name of any associated type in the trait. -Here is an example that demonstrates the error: +Erroneous code example: -``` +```compile_fail,E0437 trait Foo {} impl Foo for i32 { @@ -985,9 +1226,9 @@ members of the trait in question. This error indicates that you attempted to implement an associated constant whose name does not match the name of any associated constant in the trait. -Here is an example that demonstrates the error: +Erroneous code example: -``` +```compile_fail,E0438 #![feature(associated_consts)] trait Foo {} @@ -1004,27 +1245,281 @@ trait Foo {} impl Foo for i32 {} ``` -"## +"##, + +E0466: r##" +Macro import declarations were malformed. + +Erroneous code examples: + +```compile_fail,E0466 +#[macro_use(a_macro(another_macro))] // error: invalid import declaration +extern crate core as some_crate; + +#[macro_use(i_want = "some_macros")] // error: invalid import declaration +extern crate core as another_crate; +``` + +This is a syntax error at the level of attribute declarations. The proper +syntax for macro imports is the following: + +```ignore +// In some_crate: +#[macro_export] +macro_rules! get_tacos { + ... +} + +#[macro_export] +macro_rules! get_pimientos { + ... +} + +// In your crate: +#[macro_use(get_tacos, get_pimientos)] // It imports `get_tacos` and +extern crate some_crate; // `get_pimientos` macros from some_crate +``` + +If you would like to import all exported macros, write `macro_use` with no +arguments. +"##, + +E0467: r##" +Macro reexport declarations were empty or malformed. + +Erroneous code examples: + +```compile_fail,E0467 +#[macro_reexport] // error: no macros listed for export +extern crate core as macros_for_good; + +#[macro_reexport(fun_macro = "foo")] // error: not a macro identifier +extern crate core as other_macros_for_good; +``` + +This is a syntax error at the level of attribute declarations. + +Currently, `macro_reexport` requires at least one macro name to be listed. +Unlike `macro_use`, listing no names does not reexport all macros from the +given crate. + +Decide which macros you would like to export and list them properly. + +These are proper reexport declarations: + +```ignore +#[macro_reexport(some_macro, another_macro)] +extern crate macros_for_good; +``` +"##, + +E0468: r##" +A non-root module attempts to import macros from another crate. + +Example of erroneous code: + +```compile_fail,E0468 +mod foo { + #[macro_use(helpful_macro)] // error: must be at crate root to import + extern crate core; // macros from another crate + helpful_macro!(...); +} +``` + +Only `extern crate` imports at the crate root level are allowed to import +macros. + +Either move the macro import to crate root or do without the foreign macros. +This will work: + +```ignore +#[macro_use(helpful_macro)] +extern crate some_crate; + +mod foo { + helpful_macro!(...) +} +``` +"##, + +E0469: r##" +A macro listed for import was not found. + +Erroneous code example: + +```compile_fail,E0469 +#[macro_use(drink, be_merry)] // error: imported macro not found +extern crate collections; + +fn main() { + // ... +} +``` + +Either the listed macro is not contained in the imported crate, or it is not +exported from the given crate. + +This could be caused by a typo. Did you misspell the macro's name? + +Double-check the names of the macros listed for import, and that the crate +in question exports them. + +A working version would be: + +```ignore +// In some_crate crate: +#[macro_export] +macro_rules! eat { + ... +} + +#[macro_export] +macro_rules! drink { + ... +} + +// In your crate: +#[macro_use(eat, drink)] +extern crate some_crate; //ok! +``` +"##, + +E0470: r##" +A macro listed for reexport was not found. + +Erroneous code example: + +```compile_fail,E0470 +#[macro_reexport(drink, be_merry)] +extern crate collections; + +fn main() { + // ... +} +``` + +Either the listed macro is not contained in the imported crate, or it is not +exported from the given crate. + +This could be caused by a typo. Did you misspell the macro's name? + +Double-check the names of the macros listed for reexport, and that the crate +in question exports them. + +A working version: + +```ignore +// In some_crate crate: +#[macro_export] +macro_rules! eat { + ... +} + +#[macro_export] +macro_rules! drink { + ... +} + +// In your_crate: +#[macro_reexport(eat, drink)] +extern crate some_crate; +``` +"##, + +E0530: r##" +A binding shadowed something it shouldn't. + +Erroneous code example: + +```compile_fail,E0530 +static TEST: i32 = 0; + +let r: (i32, i32) = (0, 0); +match r { + TEST => {} // error: match bindings cannot shadow statics +} +``` + +To fix this error, just change the binding's name in order to avoid shadowing +one of the following: + +* struct name +* struct/enum variant +* static +* const +* associated const + +Fixed example: + +``` +static TEST: i32 = 0; + +let r: (i32, i32) = (0, 0); +match r { + something => {} // ok! +} +``` +"##, + +E0532: r##" +Pattern arm did not match expected kind. + +Erroneous code example: + +```compile_fail,E0532 +enum State { + Succeeded, + Failed(String), +} + +fn print_on_failure(state: &State) { + match *state { + // error: expected unit struct/variant or constant, found tuple + // variant `State::Failed` + State::Failed => println!("Failed"), + _ => () + } +} +``` + +To fix this error, ensure the match arm kind is the same as the expression +matched. + +Fixed example: + +``` +enum State { + Succeeded, + Failed(String), +} + +fn print_on_failure(state: &State) { + match *state { + State::Failed(ref msg) => println!("Failed with {}", msg), + _ => () + } +} +``` +"##, } register_diagnostics! { // E0153, unused error code // E0157, unused error code - E0254, // import conflicts with imported crate in this module - E0257, - E0258, +// E0257, +// E0258, E0402, // cannot use an outer type parameter in this context - E0406, // undeclared associated type - E0408, // variable from pattern #1 is not bound in pattern # - E0409, // variable is bound with different mode in pattern # than in - // pattern #1 - E0410, // variable from pattern is not bound in pattern 1 - E0414, // only irrefutable patterns allowed here - E0418, // is not an enum variant, struct or const - E0420, // is not an associated const - E0421, // unresolved associated const - E0427, // cannot use `ref` binding mode with ... - E0429, // `self` imports are only allowed within a { } list - E0434, // can't capture dynamic environment in a fn item +// E0406, merged into 420 +// E0410, merged into 408 +// E0413, merged into 530 +// E0414, merged into 530 +// E0417, merged into 532 +// E0418, merged into 532 +// E0419, merged into 531 +// E0420, merged into 532 +// E0421, merged into 531 +// E0422, merged into 531/532 + E0531, // unresolved pattern path kind `name` +// E0427, merged into 530 } diff --git a/src/librustc_resolve/lib.rs b/src/librustc_resolve/lib.rs index 8464d3ef29870..4738e73d2a8df 100644 --- a/src/librustc_resolve/lib.rs +++ b/src/librustc_resolve/lib.rs @@ -15,6 +15,7 @@ #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] #![feature(associated_consts)] #![feature(borrow_state)] @@ -26,164 +27,120 @@ extern crate log; #[macro_use] extern crate syntax; +extern crate syntax_pos; +extern crate rustc_errors as errors; extern crate arena; #[macro_use] -#[no_link] -extern crate rustc_bitflags; -extern crate rustc_front; extern crate rustc; -use self::PatternBindingMode::*; use self::Namespace::*; -use self::NamespaceResult::*; -use self::ResolveResult::*; use self::FallbackSuggestion::*; use self::TypeParameters::*; use self::RibKind::*; -use self::UseLexicalScopeFlag::*; -use self::ModulePrefixResult::*; -use self::AssocItemResolveResult::*; -use self::NameSearchType::*; -use self::BareIdentifierPatternResolution::*; -use self::ParentLink::*; -use self::FallbackChecks::*; - -use rustc::front::map as hir_map; + +use rustc::hir::map::{Definitions, DefCollector}; +use rustc::hir::{self, PrimTy, TyBool, TyChar, TyFloat, TyInt, TyUint, TyStr}; +use rustc::middle::cstore::CrateLoader; use rustc::session::Session; use rustc::lint; -use rustc::middle::cstore::{CrateStore, DefLike, DlDef}; -use rustc::middle::def::*; -use rustc::middle::def_id::DefId; -use rustc::middle::pat_util::pat_bindings; -use rustc::middle::privacy::*; -use rustc::middle::subst::{ParamSpace, FnSpace, TypeSpace}; -use rustc::middle::ty::{Freevar, FreevarMap, TraitMap, GlobMap}; -use rustc::util::nodemap::{NodeMap, DefIdSet, FnvHashMap}; - -use syntax::ast; -use syntax::ast::{CRATE_NODE_ID, Name, NodeId, CrateNum, TyIs, TyI8, TyI16, TyI32, TyI64}; -use syntax::ast::{TyUs, TyU8, TyU16, TyU32, TyU64, TyF64, TyF32}; -use syntax::attr::AttrMetaMethods; -use syntax::codemap::{self, Span, Pos}; -use syntax::errors::DiagnosticBuilder; -use syntax::parse::token::{self, special_names, special_idents}; +use rustc::hir::def::*; +use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId}; +use rustc::ty; +use rustc::hir::{Freevar, FreevarMap, TraitCandidate, TraitMap, GlobMap}; +use rustc::util::nodemap::{NodeMap, NodeSet, FxHashMap, FxHashSet}; + +use syntax::ext::hygiene::{Mark, SyntaxContext}; +use syntax::ast::{self, FloatTy}; +use syntax::ast::{CRATE_NODE_ID, Name, NodeId, Ident, SpannedIdent, IntTy, UintTy}; +use syntax::ext::base::SyntaxExtension; +use syntax::ext::base::Determinacy::{Determined, Undetermined}; +use syntax::symbol::{Symbol, keywords}; use syntax::util::lev_distance::find_best_match_for_name; -use rustc_front::intravisit::{self, FnKind, Visitor}; -use rustc_front::hir; -use rustc_front::hir::{Arm, BindByRef, BindByValue, BindingMode, Block}; -use rustc_front::hir::Crate; -use rustc_front::hir::{Expr, ExprAgain, ExprBreak, ExprCall, ExprField}; -use rustc_front::hir::{ExprLoop, ExprWhile, ExprMethodCall}; -use rustc_front::hir::{ExprPath, ExprStruct, FnDecl}; -use rustc_front::hir::{ForeignItemFn, ForeignItemStatic, Generics}; -use rustc_front::hir::{ImplItem, Item, ItemConst, ItemEnum, ItemExternCrate}; -use rustc_front::hir::{ItemFn, ItemForeignMod, ItemImpl, ItemMod, ItemStatic, ItemDefaultImpl}; -use rustc_front::hir::{ItemStruct, ItemTrait, ItemTy, ItemUse}; -use rustc_front::hir::Local; -use rustc_front::hir::{Pat, PatEnum, PatIdent, PatLit, PatQPath}; -use rustc_front::hir::{PatRange, PatStruct, Path, PrimTy}; -use rustc_front::hir::{TraitRef, Ty, TyBool, TyChar, TyFloat, TyInt}; -use rustc_front::hir::{TyRptr, TyStr, TyUint, TyPath, TyPtr}; -use rustc_front::util::walk_pat; - -use std::collections::{HashMap, HashSet}; +use syntax::visit::{self, FnKind, Visitor}; +use syntax::attr; +use syntax::ast::{Arm, BindingMode, Block, Crate, Expr, ExprKind}; +use syntax::ast::{FnDecl, ForeignItem, ForeignItemKind, Generics}; +use syntax::ast::{Item, ItemKind, ImplItem, ImplItemKind}; +use syntax::ast::{Local, Mutability, Pat, PatKind, Path}; +use syntax::ast::{PathSegment, PathParameters, QSelf, TraitItemKind, TraitRef, Ty, TyKind}; + +use syntax_pos::{Span, DUMMY_SP}; +use errors::DiagnosticBuilder; + use std::cell::{Cell, RefCell}; use std::fmt; use std::mem::replace; use std::rc::Rc; -use resolve_imports::{Target, ImportDirective, ImportResolutionPerNamespace}; -use resolve_imports::Shadowable; +use resolve_imports::{ImportDirective, ImportDirectiveSubclass, NameResolution, ImportResolver}; +use macros::{InvocationData, LegacyBinding, LegacyScope}; // NB: This module needs to be declared first so diagnostics are // registered before they are used. -pub mod diagnostics; +mod diagnostics; +mod macros; mod check_unused; mod build_reduced_graph; mod resolve_imports; -// Perform the callback, not walking deeper if the return is true -macro_rules! execute_callback { - ($node: expr, $walker: expr) => ( - if let Some(ref callback) = $walker.callback { - if callback($node, &mut $walker.resolved) { - return; - } - } - ) -} - enum SuggestionType { Macro(String), - Function(token::InternedString), + Function(Symbol), NotFound, } -pub enum ResolutionError<'a> { - /// error E0260: name conflicts with an extern crate - NameConflictsWithExternCrate(Name), +/// Candidates for a name resolution failure +struct SuggestedCandidates { + name: String, + candidates: Vec, +} + +enum ResolutionError<'a> { /// error E0401: can't use type parameters from outer function TypeParametersFromOuterFunction, /// error E0402: cannot use an outer type parameter in this context OuterTypeParameterContext, /// error E0403: the name is already used for a type parameter in this type parameter list - NameAlreadyUsedInTypeParameterList(Name), + NameAlreadyUsedInTypeParameterList(Name, &'a Span), /// error E0404: is not a trait - IsNotATrait(&'a str), + IsNotATrait(&'a str, &'a str), /// error E0405: use of undeclared trait name - UndeclaredTraitName(&'a str), - /// error E0406: undeclared associated type - UndeclaredAssociatedType, + UndeclaredTraitName(&'a str, SuggestedCandidates), /// error E0407: method is not a member of trait MethodNotMemberOfTrait(Name, &'a str), /// error E0437: type is not a member of trait TypeNotMemberOfTrait(Name, &'a str), /// error E0438: const is not a member of trait ConstNotMemberOfTrait(Name, &'a str), - /// error E0408: variable `{}` from pattern #1 is not bound in pattern - VariableNotBoundInPattern(Name, usize), + /// error E0408: variable `{}` from pattern #{} is not bound in pattern #{} + VariableNotBoundInPattern(Name, usize, usize), /// error E0409: variable is bound with different mode in pattern #{} than in pattern #1 - VariableBoundWithDifferentMode(Name, usize), - /// error E0410: variable from pattern is not bound in pattern #1 - VariableNotBoundInParentPattern(Name, usize), + VariableBoundWithDifferentMode(Name, usize, Span), /// error E0411: use of `Self` outside of an impl or trait SelfUsedOutsideImplOrTrait, /// error E0412: use of undeclared - UseOfUndeclared(&'a str, &'a str), - /// error E0413: declaration shadows an enum variant or unit-like struct in scope - DeclarationShadowsEnumVariantOrUnitLikeStruct(Name), - /// error E0414: only irrefutable patterns allowed here - OnlyIrrefutablePatternsAllowedHere(DefId, Name), + UseOfUndeclared(&'a str, &'a str, SuggestedCandidates), /// error E0415: identifier is bound more than once in this parameter list IdentifierBoundMoreThanOnceInParameterList(&'a str), /// error E0416: identifier is bound more than once in the same pattern IdentifierBoundMoreThanOnceInSamePattern(&'a str), - /// error E0417: static variables cannot be referenced in a pattern - StaticVariableReference, - /// error E0418: is not an enum variant, struct or const - NotAnEnumVariantStructOrConst(&'a str), - /// error E0419: unresolved enum variant, struct or const - UnresolvedEnumVariantStructOrConst(&'a str), - /// error E0420: is not an associated const - NotAnAssociatedConst(&'a str), - /// error E0421: unresolved associated const - UnresolvedAssociatedConst(&'a str), - /// error E0422: does not name a struct - DoesNotNameAStruct(&'a str), /// error E0423: is a struct variant name, but this expression uses it like a function name StructVariantUsedAsFunction(&'a str), /// error E0424: `self` is not available in a static method SelfNotAvailableInStaticMethod, /// error E0425: unresolved name - UnresolvedName(&'a str, &'a str, UnresolvedNameContext), + UnresolvedName { + path: &'a str, + message: &'a str, + context: UnresolvedNameContext<'a>, + is_static_method: bool, + is_field: bool, + def: Def, + }, /// error E0426: use of undeclared label UndeclaredLabel(&'a str), - /// error E0427: cannot use `ref` binding mode with ... - CannotUseRefBindingModeWith(&'a str), - /// error E0428: duplicate definition - DuplicateDefinition(&'a str, Name), /// error E0429: `self` imports are only allowed within a { } list SelfImportsOnlyAllowedWithin, /// error E0430: `self` import can only appear once in the list @@ -198,16 +155,22 @@ pub enum ResolutionError<'a> { CannotCaptureDynamicEnvironmentInFnItem, /// error E0435: attempt to use a non-constant value in a constant AttemptToUseNonConstantValueInConstant, + /// error E0530: X bindings cannot shadow Ys + BindingShadowsSomethingUnacceptable(&'a str, Name, &'a NameBinding<'a>), + /// error E0531: unresolved pattern path kind `name` + PatPathUnresolved(&'a str, &'a Path), + /// error E0532: expected pattern path kind, found another pattern path kind + PatPathUnexpected(&'a str, &'a str, &'a Path), } /// Context of where `ResolutionError::UnresolvedName` arose. #[derive(Clone, PartialEq, Eq, Debug)] -pub enum UnresolvedNameContext { - /// `PathIsMod(id)` indicates that a given path, used in +enum UnresolvedNameContext<'a> { + /// `PathIsMod(parent)` indicates that a given path, used in /// expression context, actually resolved to a module rather than - /// a value. The `id` attached to the variant is the node id of - /// the erroneous path expression. - PathIsMod(ast::NodeId), + /// a value. The optional expression attached to the variant is the + /// the parent of the erroneous path expression. + PathIsMod(Option<&'a Expr>), /// `Other` means we have no extra information about the context /// of the unresolved name error. (Maybe we could eliminate all @@ -215,35 +178,25 @@ pub enum UnresolvedNameContext { Other, } -fn resolve_error<'b, 'a: 'b, 'tcx: 'a>(resolver: &'b Resolver<'a, 'tcx>, - span: syntax::codemap::Span, - resolution_error: ResolutionError<'b>) { +fn resolve_error<'b, 'a: 'b, 'c>(resolver: &'b Resolver<'a>, + span: syntax_pos::Span, + resolution_error: ResolutionError<'c>) { resolve_struct_error(resolver, span, resolution_error).emit(); } -fn resolve_struct_error<'b, 'a: 'b, 'tcx: 'a>(resolver: &'b Resolver<'a, 'tcx>, - span: syntax::codemap::Span, - resolution_error: ResolutionError<'b>) - -> DiagnosticBuilder<'a> { - if !resolver.emit_errors { - return resolver.session.diagnostic().struct_dummy(); - } - +fn resolve_struct_error<'b, 'a: 'b, 'c>(resolver: &'b Resolver<'a>, + span: syntax_pos::Span, + resolution_error: ResolutionError<'c>) + -> DiagnosticBuilder<'a> { match resolution_error { - ResolutionError::NameConflictsWithExternCrate(name) => { - struct_span_err!(resolver.session, - span, - E0260, - "the name `{}` conflicts with an external crate \ - that has been imported into this module", - name) - } ResolutionError::TypeParametersFromOuterFunction => { - struct_span_err!(resolver.session, - span, - E0401, - "can't use type parameters from outer function; try using a local \ - type parameter instead") + let mut err = struct_span_err!(resolver.session, + span, + E0401, + "can't use type parameters from outer function; \ + try using a local type parameter instead"); + err.span_label(span, &format!("use of type variable from outer function")); + err } ResolutionError::OuterTypeParameterContext => { struct_span_err!(resolver.session, @@ -251,257 +204,200 @@ fn resolve_struct_error<'b, 'a: 'b, 'tcx: 'a>(resolver: &'b Resolver<'a, 'tcx>, E0402, "cannot use an outer type parameter in this context") } - ResolutionError::NameAlreadyUsedInTypeParameterList(name) => { - struct_span_err!(resolver.session, - span, - E0403, - "the name `{}` is already used for a type parameter in this type \ - parameter list", - name) - } - ResolutionError::IsNotATrait(name) => { - struct_span_err!(resolver.session, span, E0404, "`{}` is not a trait", name) + ResolutionError::NameAlreadyUsedInTypeParameterList(name, first_use_span) => { + let mut err = struct_span_err!(resolver.session, + span, + E0403, + "the name `{}` is already used for a type parameter \ + in this type parameter list", + name); + err.span_label(span, &format!("already used")); + err.span_label(first_use_span.clone(), &format!("first use of `{}`", name)); + err + } - ResolutionError::UndeclaredTraitName(name) => { - struct_span_err!(resolver.session, - span, - E0405, - "use of undeclared trait name `{}`", - name) + ResolutionError::IsNotATrait(name, kind_name) => { + let mut err = struct_span_err!(resolver.session, + span, + E0404, + "`{}` is not a trait", + name); + err.span_label(span, &format!("expected trait, found {}", kind_name)); + err } - ResolutionError::UndeclaredAssociatedType => { - struct_span_err!(resolver.session, span, E0406, "undeclared associated type") + ResolutionError::UndeclaredTraitName(name, candidates) => { + let mut err = struct_span_err!(resolver.session, + span, + E0405, + "trait `{}` is not in scope", + name); + show_candidates(&mut err, &candidates); + err.span_label(span, &format!("`{}` is not in scope", name)); + err } ResolutionError::MethodNotMemberOfTrait(method, trait_) => { - struct_span_err!(resolver.session, - span, - E0407, - "method `{}` is not a member of trait `{}`", - method, - trait_) + let mut err = struct_span_err!(resolver.session, + span, + E0407, + "method `{}` is not a member of trait `{}`", + method, + trait_); + err.span_label(span, &format!("not a member of trait `{}`", trait_)); + err } ResolutionError::TypeNotMemberOfTrait(type_, trait_) => { - struct_span_err!(resolver.session, + let mut err = struct_span_err!(resolver.session, span, E0437, "type `{}` is not a member of trait `{}`", type_, - trait_) + trait_); + err.span_label(span, &format!("not a member of trait `{}`", trait_)); + err } ResolutionError::ConstNotMemberOfTrait(const_, trait_) => { - struct_span_err!(resolver.session, + let mut err = struct_span_err!(resolver.session, span, E0438, "const `{}` is not a member of trait `{}`", const_, - trait_) + trait_); + err.span_label(span, &format!("not a member of trait `{}`", trait_)); + err } - ResolutionError::VariableNotBoundInPattern(variable_name, pattern_number) => { - struct_span_err!(resolver.session, + ResolutionError::VariableNotBoundInPattern(variable_name, from, to) => { + let mut err = struct_span_err!(resolver.session, span, E0408, - "variable `{}` from pattern #1 is not bound in pattern #{}", + "variable `{}` from pattern #{} is not bound in pattern #{}", variable_name, - pattern_number) + from, + to); + err.span_label(span, &format!("pattern doesn't bind `{}`", variable_name)); + err } - ResolutionError::VariableBoundWithDifferentMode(variable_name, pattern_number) => { - struct_span_err!(resolver.session, + ResolutionError::VariableBoundWithDifferentMode(variable_name, + pattern_number, + first_binding_span) => { + let mut err = struct_span_err!(resolver.session, span, E0409, "variable `{}` is bound with different mode in pattern #{} than in \ pattern #1", variable_name, - pattern_number) - } - ResolutionError::VariableNotBoundInParentPattern(variable_name, pattern_number) => { - struct_span_err!(resolver.session, - span, - E0410, - "variable `{}` from pattern #{} is not bound in pattern #1", - variable_name, - pattern_number) + pattern_number); + err.span_label(span, &format!("bound in different ways")); + err.span_label(first_binding_span, &format!("first binding")); + err } ResolutionError::SelfUsedOutsideImplOrTrait => { - struct_span_err!(resolver.session, - span, - E0411, - "use of `Self` outside of an impl or trait") - } - ResolutionError::UseOfUndeclared(kind, name) => { - struct_span_err!(resolver.session, - span, - E0412, - "use of undeclared {} `{}`", - kind, - name) - } - ResolutionError::DeclarationShadowsEnumVariantOrUnitLikeStruct(name) => { - struct_span_err!(resolver.session, - span, - E0413, - "declaration of `{}` shadows an enum variant \ - or unit-like struct in scope", - name) + let mut err = struct_span_err!(resolver.session, + span, + E0411, + "use of `Self` outside of an impl or trait"); + err.span_label(span, &format!("used outside of impl or trait")); + err } - ResolutionError::OnlyIrrefutablePatternsAllowedHere(did, name) => { + ResolutionError::UseOfUndeclared(kind, name, candidates) => { let mut err = struct_span_err!(resolver.session, span, - E0414, - "only irrefutable patterns allowed here"); - err.span_note(span, - "there already is a constant in scope sharing the same \ - name as this pattern"); - if let Some(sp) = resolver.ast_map.span_if_local(did) { - err.span_note(sp, "constant defined here"); - } - if let Some(directive) = resolver.current_module - .import_resolutions - .borrow() - .get(&name) { - let item = resolver.ast_map.expect_item(directive.value_ns.id); - err.span_note(item.span, "constant imported here"); - } + E0412, + "{} `{}` is undefined or not in scope", + kind, + name); + show_candidates(&mut err, &candidates); + err.span_label(span, &format!("undefined or not in scope")); err } ResolutionError::IdentifierBoundMoreThanOnceInParameterList(identifier) => { - struct_span_err!(resolver.session, + let mut err = struct_span_err!(resolver.session, span, E0415, "identifier `{}` is bound more than once in this parameter list", - identifier) + identifier); + err.span_label(span, &format!("used as parameter more than once")); + err } ResolutionError::IdentifierBoundMoreThanOnceInSamePattern(identifier) => { - struct_span_err!(resolver.session, + let mut err = struct_span_err!(resolver.session, span, E0416, "identifier `{}` is bound more than once in the same pattern", - identifier) - } - ResolutionError::StaticVariableReference => { - struct_span_err!(resolver.session, - span, - E0417, - "static variables cannot be referenced in a pattern, use a \ - `const` instead") - } - ResolutionError::NotAnEnumVariantStructOrConst(name) => { - struct_span_err!(resolver.session, - span, - E0418, - "`{}` is not an enum variant, struct or const", - name) - } - ResolutionError::UnresolvedEnumVariantStructOrConst(name) => { - struct_span_err!(resolver.session, - span, - E0419, - "unresolved enum variant, struct or const `{}`", - name) - } - ResolutionError::NotAnAssociatedConst(name) => { - struct_span_err!(resolver.session, - span, - E0420, - "`{}` is not an associated const", - name) - } - ResolutionError::UnresolvedAssociatedConst(name) => { - struct_span_err!(resolver.session, - span, - E0421, - "unresolved associated const `{}`", - name) - } - ResolutionError::DoesNotNameAStruct(name) => { - struct_span_err!(resolver.session, - span, - E0422, - "`{}` does not name a structure", - name) + identifier); + err.span_label(span, &format!("used in a pattern more than once")); + err } ResolutionError::StructVariantUsedAsFunction(path_name) => { - struct_span_err!(resolver.session, + let mut err = struct_span_err!(resolver.session, span, E0423, "`{}` is the name of a struct or struct variant, but this expression \ uses it like a function name", - path_name) + path_name); + err.span_label(span, &format!("struct called like a function")); + err } ResolutionError::SelfNotAvailableInStaticMethod => { - struct_span_err!(resolver.session, + let mut err = struct_span_err!(resolver.session, span, E0424, - "`self` is not available in a static method. Maybe a `self` \ - argument is missing?") + "`self` is not available in a static method"); + err.span_label(span, &format!("not available in static method")); + err.note(&format!("maybe a `self` argument is missing?")); + err } - ResolutionError::UnresolvedName(path, msg, context) => { + ResolutionError::UnresolvedName { path, message: msg, context, is_static_method, + is_field, def } => { let mut err = struct_span_err!(resolver.session, span, E0425, - "unresolved name `{}`{}", - path, - msg); + "unresolved name `{}`", + path); + if msg != "" { + err.span_label(span, &msg); + } else { + err.span_label(span, &format!("unresolved name")); + } match context { - UnresolvedNameContext::Other => { } // no help available - UnresolvedNameContext::PathIsMod(id) => { - let mut help_msg = String::new(); - let parent_id = resolver.ast_map.get_parent_node(id); - if let Some(hir_map::Node::NodeExpr(e)) = resolver.ast_map.find(parent_id) { - match e.node { - ExprField(_, ident) => { - help_msg = format!("To reference an item from the \ - `{module}` module, use \ - `{module}::{ident}`", - module = &*path, - ident = ident.node); - } - ExprMethodCall(ident, _, _) => { - help_msg = format!("To call a function from the \ - `{module}` module, use \ - `{module}::{ident}(..)`", - module = &*path, - ident = ident.node); - } - ExprCall(_, _) => { - help_msg = format!("No function corresponds to `{module}(..)`", - module = &*path); - } - _ => { } // no help available - } - } else { - help_msg = format!("Module `{module}` cannot be the value of an expression", - module = &*path); - } - - if !help_msg.is_empty() { - err.fileline_help(span, &help_msg); + UnresolvedNameContext::Other => { + if msg.is_empty() && is_static_method && is_field { + err.help("this is an associated function, you don't have access to \ + this type's fields or methods"); } } + UnresolvedNameContext::PathIsMod(parent) => { + err.help(&match parent.map(|parent| &parent.node) { + Some(&ExprKind::Field(_, ident)) => { + format!("to reference an item from the `{module}` module, \ + use `{module}::{ident}`", + module = path, + ident = ident.node) + } + Some(&ExprKind::MethodCall(ident, ..)) => { + format!("to call a function from the `{module}` module, \ + use `{module}::{ident}(..)`", + module = path, + ident = ident.node) + } + _ => { + format!("{def} `{module}` cannot be used as an expression", + def = def.kind_name(), + module = path) + } + }); + } } err } ResolutionError::UndeclaredLabel(name) => { - struct_span_err!(resolver.session, - span, - E0426, - "use of undeclared label `{}`", - name) - } - ResolutionError::CannotUseRefBindingModeWith(descr) => { - struct_span_err!(resolver.session, - span, - E0427, - "cannot use `ref` binding mode with {}", - descr) - } - ResolutionError::DuplicateDefinition(namespace, name) => { - struct_span_err!(resolver.session, - span, - E0428, - "duplicate definition of {} `{}`", - namespace, - name) + let mut err = struct_span_err!(resolver.session, + span, + E0426, + "use of undeclared label `{}`", + name); + err.span_label(span, &format!("undeclared label `{}`",&name)); + err } ResolutionError::SelfImportsOnlyAllowedWithin => { struct_span_err!(resolver.session, @@ -525,13 +421,20 @@ fn resolve_struct_error<'b, 'a: 'b, 'tcx: 'a>(resolver: &'b Resolver<'a, 'tcx>, } ResolutionError::UnresolvedImport(name) => { let msg = match name { - Some((n, p)) => format!("unresolved import `{}`{}", n, p), + Some((n, _)) => format!("unresolved import `{}`", n), None => "unresolved import".to_owned(), }; - struct_span_err!(resolver.session, span, E0432, "{}", msg) + let mut err = struct_span_err!(resolver.session, span, E0432, "{}", msg); + if let Some((_, p)) = name { + err.span_label(span, &p); + } + err } ResolutionError::FailedToResolve(msg) => { - struct_span_err!(resolver.session, span, E0433, "failed to resolve. {}", msg) + let mut err = struct_span_err!(resolver.session, span, E0433, + "failed to resolve. {}", msg); + err.span_label(span, &msg); + err } ResolutionError::CannotCaptureDynamicEnvironmentInFnItem => { struct_span_err!(resolver.session, @@ -542,10 +445,41 @@ fn resolve_struct_error<'b, 'a: 'b, 'tcx: 'a>(resolver: &'b Resolver<'a, 'tcx>, closure form instead") } ResolutionError::AttemptToUseNonConstantValueInConstant => { - struct_span_err!(resolver.session, + let mut err = struct_span_err!(resolver.session, span, E0435, - "attempt to use a non-constant value in a constant") + "attempt to use a non-constant value in a constant"); + err.span_label(span, &format!("non-constant used with constant")); + err + } + ResolutionError::BindingShadowsSomethingUnacceptable(what_binding, name, binding) => { + let shadows_what = PathResolution::new(binding.def()).kind_name(); + let mut err = struct_span_err!(resolver.session, + span, + E0530, + "{}s cannot shadow {}s", what_binding, shadows_what); + err.span_label(span, &format!("cannot be named the same as a {}", shadows_what)); + let participle = if binding.is_import() { "imported" } else { "defined" }; + let msg = &format!("a {} `{}` is {} here", shadows_what, name, participle); + err.span_label(binding.span, msg); + err + } + ResolutionError::PatPathUnresolved(expected_what, path) => { + struct_span_err!(resolver.session, + span, + E0531, + "unresolved {} `{}`", + expected_what, + path) + } + ResolutionError::PatPathUnexpected(expected_what, found_what, path) => { + struct_span_err!(resolver.session, + span, + E0532, + "expected {}, found {} `{}`", + expected_what, + found_what, + path) } } } @@ -557,97 +491,102 @@ struct BindingInfo { } // Map from the name in a pattern to its binding mode. -type BindingMap = HashMap; +type BindingMap = FxHashMap; + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +enum PatternSource { + Match, + IfLet, + WhileLet, + Let, + For, + FnParam, +} -#[derive(Copy, Clone, PartialEq)] -enum PatternBindingMode { - RefutableMode, - LocalIrrefutableMode, - ArgumentIrrefutableMode, +impl PatternSource { + fn is_refutable(self) -> bool { + match self { + PatternSource::Match | PatternSource::IfLet | PatternSource::WhileLet => true, + PatternSource::Let | PatternSource::For | PatternSource::FnParam => false, + } + } + fn descr(self) -> &'static str { + match self { + PatternSource::Match => "match binding", + PatternSource::IfLet => "if let binding", + PatternSource::WhileLet => "while let binding", + PatternSource::Let => "let binding", + PatternSource::For => "for binding", + PatternSource::FnParam => "function parameter", + } + } } #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub enum Namespace { TypeNS, ValueNS, + MacroNS, } -/// A NamespaceResult represents the result of resolving an import in -/// a particular namespace. The result is either definitely-resolved, -/// definitely- unresolved, or unknown. -#[derive(Clone)] -enum NamespaceResult<'a> { - /// Means that resolve hasn't gathered enough information yet to determine - /// whether the name is bound in this namespace. (That is, it hasn't - /// resolved all `use` directives yet.) - UnknownResult, - /// Means that resolve has determined that the name is definitely - /// not bound in the namespace. - UnboundResult, - /// Means that resolve has determined that the name is bound in the Module - /// argument, and specified by the NameBinding argument. - BoundResult(Module<'a>, NameBinding<'a>), +#[derive(Clone, Default, Debug)] +pub struct PerNS { + value_ns: T, + type_ns: T, + macro_ns: Option, } -impl<'a> NamespaceResult<'a> { - fn is_unknown(&self) -> bool { - match *self { - UnknownResult => true, - _ => false, +impl ::std::ops::Index for PerNS { + type Output = T; + fn index(&self, ns: Namespace) -> &T { + match ns { + ValueNS => &self.value_ns, + TypeNS => &self.type_ns, + MacroNS => self.macro_ns.as_ref().unwrap(), } } - fn is_unbound(&self) -> bool { - match *self { - UnboundResult => true, - _ => false, +} + +impl ::std::ops::IndexMut for PerNS { + fn index_mut(&mut self, ns: Namespace) -> &mut T { + match ns { + ValueNS => &mut self.value_ns, + TypeNS => &mut self.type_ns, + MacroNS => self.macro_ns.as_mut().unwrap(), } } } -impl<'a, 'v, 'tcx> Visitor<'v> for Resolver<'a, 'tcx> { - fn visit_nested_item(&mut self, item: hir::ItemId) { - self.visit_item(self.ast_map.expect_item(item.id)) - } +impl<'a> Visitor for Resolver<'a> { fn visit_item(&mut self, item: &Item) { - execute_callback!(hir_map::Node::NodeItem(item), self); self.resolve_item(item); } fn visit_arm(&mut self, arm: &Arm) { self.resolve_arm(arm); } fn visit_block(&mut self, block: &Block) { - execute_callback!(hir_map::Node::NodeBlock(block), self); self.resolve_block(block); } fn visit_expr(&mut self, expr: &Expr) { - execute_callback!(hir_map::Node::NodeExpr(expr), self); - self.resolve_expr(expr); + self.resolve_expr(expr, None); } fn visit_local(&mut self, local: &Local) { - execute_callback!(hir_map::Node::NodeLocal(&*local.pat), self); self.resolve_local(local); } fn visit_ty(&mut self, ty: &Ty) { self.resolve_type(ty); } - fn visit_generics(&mut self, generics: &Generics) { - self.resolve_generics(generics); - } - fn visit_poly_trait_ref(&mut self, tref: &hir::PolyTraitRef, m: &hir::TraitBoundModifier) { - match self.resolve_trait_reference(tref.trait_ref.ref_id, &tref.trait_ref.path, 0) { - Ok(def) => self.record_def(tref.trait_ref.ref_id, def), - Err(_) => { - // error already reported - self.record_def(tref.trait_ref.ref_id, err_path_resolution()) - } - } - intravisit::walk_poly_trait_ref(self, tref, m); + fn visit_poly_trait_ref(&mut self, tref: &ast::PolyTraitRef, m: &ast::TraitBoundModifier) { + let ast::Path { ref segments, span, global } = tref.trait_ref.path; + let path: Vec<_> = segments.iter().map(|seg| seg.identifier).collect(); + let def = self.resolve_trait_reference(&path, global, None, span); + self.record_def(tref.trait_ref.ref_id, def); + visit::walk_poly_trait_ref(self, tref, m); } fn visit_variant(&mut self, - variant: &hir::Variant, + variant: &ast::Variant, generics: &Generics, item_id: ast::NodeId) { - execute_callback!(hir_map::Node::NodeVariant(variant), self); if let Some(ref dis_expr) = variant.node.disr_expr { // resolve the discriminator expr as a constant self.with_constant_rib(|this| { @@ -655,91 +594,99 @@ impl<'a, 'v, 'tcx> Visitor<'v> for Resolver<'a, 'tcx> { }); } - // `intravisit::walk_variant` without the discriminant expression. + // `visit::walk_variant` without the discriminant expression. self.visit_variant_data(&variant.node.data, variant.node.name, generics, item_id, variant.span); } - fn visit_foreign_item(&mut self, foreign_item: &hir::ForeignItem) { - execute_callback!(hir_map::Node::NodeForeignItem(foreign_item), self); + fn visit_foreign_item(&mut self, foreign_item: &ForeignItem) { let type_parameters = match foreign_item.node { - ForeignItemFn(_, ref generics) => { - HasTypeParameters(generics, FnSpace, ItemRibKind) + ForeignItemKind::Fn(_, ref generics) => { + HasTypeParameters(generics, ItemRibKind) } - ForeignItemStatic(..) => NoTypeParameters, + ForeignItemKind::Static(..) => NoTypeParameters, }; self.with_type_parameter_rib(type_parameters, |this| { - intravisit::walk_foreign_item(this, foreign_item); + visit::walk_foreign_item(this, foreign_item); }); } fn visit_fn(&mut self, - function_kind: FnKind<'v>, - declaration: &'v FnDecl, - block: &'v Block, + function_kind: FnKind, + declaration: &FnDecl, _: Span, node_id: NodeId) { let rib_kind = match function_kind { - FnKind::ItemFn(_, generics, _, _, _, _) => { + FnKind::ItemFn(_, generics, ..) => { self.visit_generics(generics); ItemRibKind } - FnKind::Method(_, sig, _) => { + FnKind::Method(_, sig, _, _) => { self.visit_generics(&sig.generics); - self.visit_explicit_self(&sig.explicit_self); - MethodRibKind + MethodRibKind(!sig.decl.has_self()) } - FnKind::Closure => ClosureRibKind(node_id), + FnKind::Closure(_) => ClosureRibKind(node_id), }; - self.resolve_function(rib_kind, declaration, block); - } -} -type ErrorMessage = Option<(Span, String)>; + // Create a value rib for the function. + self.ribs[ValueNS].push(Rib::new(rib_kind)); -enum ResolveResult { - Failed(ErrorMessage), // Failed to resolve the name, optional helpful error message. - Indeterminate, // Couldn't determine due to unresolved globs. - Success(T), // Successfully resolved the import. -} + // Create a label rib for the function. + self.label_ribs.push(Rib::new(rib_kind)); -impl ResolveResult { - fn success(&self) -> bool { - match *self { - Success(_) => true, - _ => false, + // Add each argument to the rib. + let mut bindings_list = FxHashMap(); + for argument in &declaration.inputs { + self.resolve_pattern(&argument.pat, PatternSource::FnParam, &mut bindings_list); + + self.visit_ty(&argument.ty); + + debug!("(resolving function) recorded argument"); } + visit::walk_fn_ret_ty(self, &declaration.output); + + // Resolve the function body. + match function_kind { + FnKind::ItemFn(.., body) | + FnKind::Method(.., body) => { + self.visit_block(body); + } + FnKind::Closure(body) => { + self.visit_expr(body); + } + }; + + debug!("(resolving function) leaving function"); + + self.label_ribs.pop(); + self.ribs[ValueNS].pop(); } } +pub type ErrorMessage = Option<(Span, String)>; + enum FallbackSuggestion { NoSuggestion, Field, - Method, TraitItem, - StaticMethod(String), TraitMethod(String), } #[derive(Copy, Clone)] -enum TypeParameters<'a> { +enum TypeParameters<'a, 'b> { NoTypeParameters, HasTypeParameters(// Type parameters. - &'a Generics, - - // Identifies the things that these parameters - // were declared on (type, fn, etc) - ParamSpace, + &'b Generics, // The kind of the rib used for type parameters. - RibKind), + RibKind<'a>), } // The rib kind controls the translation of local -// definitions (`DefLocal`) to upvars (`DefUpvar`). +// definitions (`Def::Local`) to upvars (`Def::Upvar`). #[derive(Copy, Clone, Debug)] -enum RibKind { +enum RibKind<'a> { // No translation needs to be applied. NormalRibKind, @@ -751,134 +698,101 @@ enum RibKind { // methods. Allow references to ty params that impl or trait // binds. Disallow any other upvars (including other ty params that are // upvars). - MethodRibKind, + // + // The boolean value represents the fact that this method is static or not. + MethodRibKind(bool), // We passed through an item scope. Disallow upvars. ItemRibKind, // We're in a constant item. Can't refer to dynamic stuff. ConstantItemRibKind, -} - -#[derive(Copy, Clone)] -enum UseLexicalScopeFlag { - DontUseLexicalScope, - UseLexicalScope, -} - -enum ModulePrefixResult<'a> { - NoPrefixFound, - PrefixFound(Module<'a>, usize), -} - -#[derive(Copy, Clone)] -enum AssocItemResolveResult { - /// Syntax such as `::item`, which can't be resolved until type - /// checking. - TypecheckRequired, - /// We should have been able to resolve the associated item. - ResolveAttempt(Option), -} - -#[derive(Copy, Clone, PartialEq)] -enum NameSearchType { - /// We're doing a name search in order to resolve a `use` directive. - ImportSearch, - /// We're doing a name search in order to resolve a path type, a path - /// expression, or a path pattern. - PathSearch, -} + // We passed through a module. + ModuleRibKind(Module<'a>), -#[derive(Copy, Clone)] -enum BareIdentifierPatternResolution { - FoundStructOrEnumVariant(Def, LastPrivate), - FoundConst(Def, LastPrivate, Name), - BareIdentifierPatternUnresolved, + // We passed through a `macro_rules!` statement with the given expansion + MacroDefinition(Mark), } /// One local scope. #[derive(Debug)] -struct Rib { - bindings: HashMap, - kind: RibKind, +struct Rib<'a> { + bindings: FxHashMap, + kind: RibKind<'a>, } -impl Rib { - fn new(kind: RibKind) -> Rib { +impl<'a> Rib<'a> { + fn new(kind: RibKind<'a>) -> Rib<'a> { Rib { - bindings: HashMap::new(), + bindings: FxHashMap(), kind: kind, } } } /// A definition along with the index of the rib it was found on +#[derive(Copy, Clone)] struct LocalDef { ribs: Option<(Namespace, usize)>, def: Def, } -impl LocalDef { - fn from_def(def: Def) -> Self { - LocalDef { - ribs: None, - def: def, +enum LexicalScopeBinding<'a> { + Item(&'a NameBinding<'a>), + Def(Def), +} + +impl<'a> LexicalScopeBinding<'a> { + fn item(self) -> Option<&'a NameBinding<'a>> { + match self { + LexicalScopeBinding::Item(binding) => Some(binding), + _ => None, } } } -/// The link from a module up to its nearest parent node. -#[derive(Clone,Debug)] -enum ParentLink<'a> { - NoParentLink, - ModuleParentLink(Module<'a>, Name), - BlockParentLink(Module<'a>, NodeId), +#[derive(Copy, Clone)] +enum PathScope { + Global, + Lexical, + Import, +} + +#[derive(Clone)] +enum PathResult<'a> { + Module(Module<'a>), + NonModule(PathResolution), + Indeterminate, + Failed(String, bool /* is the error from the last segment? */), +} + +enum ModuleKind { + Block(NodeId), + Def(Def, Name), } /// One node in the tree of modules. pub struct ModuleS<'a> { - parent_link: ParentLink<'a>, - def: Cell>, - is_public: bool, - - children: RefCell>>, - imports: RefCell>, - - // The external module children of this node that were declared with - // `extern crate`. - external_module_children: RefCell>>, + parent: Option>, + kind: ModuleKind, - // The anonymous children of this node. Anonymous children are pseudo- - // modules that are implicitly created around items contained within - // blocks. - // - // For example, if we have this: - // - // fn f() { - // fn g() { - // ... - // } - // } - // - // There will be an anonymous module created around `g` with the ID of the - // entry block for `f`. - anonymous_children: RefCell>>, + // The node id of the closest normal module (`mod`) ancestor (including this module). + normal_ancestor_id: Option, - // The status of resolving each import in this module. - import_resolutions: RefCell>>, + resolutions: RefCell>>>, + legacy_macro_resolutions: RefCell>, - // The number of unresolved globs that this module exports. - glob_count: Cell, + // Macro invocations that can expand into items in this module. + unresolved_invocations: RefCell>, - // The number of unresolved pub imports (both regular and globs) in this module - pub_count: Cell, + no_implicit_prelude: bool, - // The number of unresolved pub glob imports in this module - pub_glob_count: Cell, + glob_importers: RefCell>>, + globs: RefCell>>, - // The index of the import we're resolving. - resolved_import_count: Cell, + // Used to memoize the traits in this module for faster searches through all traits in scope. + traits: RefCell)]>>>, // Whether this module is populated. If not populated, any attempt to // access the children must be preceded with a @@ -889,304 +803,242 @@ pub struct ModuleS<'a> { pub type Module<'a> = &'a ModuleS<'a>; impl<'a> ModuleS<'a> { - fn new(parent_link: ParentLink<'a>, def: Option, external: bool, is_public: bool) -> Self { + fn new(parent: Option>, kind: ModuleKind) -> Self { ModuleS { - parent_link: parent_link, - def: Cell::new(def), - is_public: is_public, - children: RefCell::new(HashMap::new()), - imports: RefCell::new(Vec::new()), - external_module_children: RefCell::new(HashMap::new()), - anonymous_children: RefCell::new(NodeMap()), - import_resolutions: RefCell::new(HashMap::new()), - glob_count: Cell::new(0), - pub_count: Cell::new(0), - pub_glob_count: Cell::new(0), - resolved_import_count: Cell::new(0), - populated: Cell::new(!external), + parent: parent, + kind: kind, + normal_ancestor_id: None, + resolutions: RefCell::new(FxHashMap()), + legacy_macro_resolutions: RefCell::new(Vec::new()), + unresolved_invocations: RefCell::new(FxHashSet()), + no_implicit_prelude: false, + glob_importers: RefCell::new(Vec::new()), + globs: RefCell::new((Vec::new())), + traits: RefCell::new(None), + populated: Cell::new(true), + } + } + + fn for_each_child)>(&self, mut f: F) { + for (&(name, ns), name_resolution) in self.resolutions.borrow().iter() { + name_resolution.borrow().binding.map(|binding| f(name, ns, binding)); + } + } + + fn def(&self) -> Option { + match self.kind { + ModuleKind::Def(def, _) => Some(def), + _ => None, } } fn def_id(&self) -> Option { - self.def.get().as_ref().map(Def::def_id) + self.def().as_ref().map(Def::def_id) } + // `self` resolves to the first module ancestor that `is_normal`. fn is_normal(&self) -> bool { - match self.def.get() { - Some(DefMod(_)) | Some(DefForeignMod(_)) => true, + match self.kind { + ModuleKind::Def(Def::Mod(_), _) => true, _ => false, } } fn is_trait(&self) -> bool { - match self.def.get() { - Some(DefTrait(_)) => true, + match self.kind { + ModuleKind::Def(Def::Trait(_), _) => true, _ => false, } } - fn all_imports_resolved(&self) -> bool { - if self.imports.borrow_state() == ::std::cell::BorrowState::Writing { - // it is currently being resolved ! so nope - false - } else { - self.imports.borrow().len() == self.resolved_import_count.get() - } - } - - pub fn inc_glob_count(&self) { - self.glob_count.set(self.glob_count.get() + 1); - } - pub fn dec_glob_count(&self) { - assert!(self.glob_count.get() > 0); - self.glob_count.set(self.glob_count.get() - 1); - } - pub fn inc_pub_count(&self) { - self.pub_count.set(self.pub_count.get() + 1); - } - pub fn dec_pub_count(&self) { - assert!(self.pub_count.get() > 0); - self.pub_count.set(self.pub_count.get() - 1); - } - pub fn inc_pub_glob_count(&self) { - self.pub_glob_count.set(self.pub_glob_count.get() + 1); - } - pub fn dec_pub_glob_count(&self) { - assert!(self.pub_glob_count.get() > 0); - self.pub_glob_count.set(self.pub_glob_count.get() - 1); + fn is_local(&self) -> bool { + self.normal_ancestor_id.is_some() } } impl<'a> fmt::Debug for ModuleS<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, - "{:?}, {}", - self.def, - if self.is_public { - "public" - } else { - "private" - }) + write!(f, "{:?}", self.def()) } } -bitflags! { - #[derive(Debug)] - flags DefModifiers: u8 { - // Enum variants are always considered `PUBLIC`, this is needed for `use Enum::Variant` - // or `use Enum::*` to work on private enums. - const PUBLIC = 1 << 0, - const IMPORTABLE = 1 << 1, - // Variants are considered `PUBLIC`, but some of them live in private enums. - // We need to track them to prohibit reexports like `pub use PrivEnum::Variant`. - const PRIVATE_VARIANT = 1 << 2, - } +// Records a possibly-private value, type, or module definition. +#[derive(Clone, Debug)] +pub struct NameBinding<'a> { + kind: NameBindingKind<'a>, + expansion: Mark, + span: Span, + vis: ty::Visibility, } -// Records a possibly-private value, type, or module definition. -#[derive(Debug)] -struct NsDef<'a> { - modifiers: DefModifiers, // see note in ImportResolutionPerNamespace about how to use this - def_or_module: DefOrModule<'a>, - span: Option, +pub trait ToNameBinding<'a> { + fn to_name_binding(self) -> NameBinding<'a>; } -#[derive(Debug)] -enum DefOrModule<'a> { +impl<'a> ToNameBinding<'a> for NameBinding<'a> { + fn to_name_binding(self) -> NameBinding<'a> { + self + } +} + +#[derive(Clone, Debug)] +enum NameBindingKind<'a> { Def(Def), Module(Module<'a>), + Import { + binding: &'a NameBinding<'a>, + directive: &'a ImportDirective<'a>, + used: Cell, + }, + Ambiguity { + b1: &'a NameBinding<'a>, + b2: &'a NameBinding<'a>, + } } -impl<'a> NsDef<'a> { - fn create_from_module(module: Module<'a>, span: Option) -> Self { - let modifiers = if module.is_public { - DefModifiers::PUBLIC - } else { - DefModifiers::empty() - } | DefModifiers::IMPORTABLE; - - NsDef { modifiers: modifiers, def_or_module: DefOrModule::Module(module), span: span } - } +struct PrivacyError<'a>(Span, Name, &'a NameBinding<'a>); - fn create_from_def(def: Def, modifiers: DefModifiers, span: Option) -> Self { - NsDef { modifiers: modifiers, def_or_module: DefOrModule::Def(def), span: span } - } +struct AmbiguityError<'a> { + span: Span, + name: Name, + lexical: bool, + b1: &'a NameBinding<'a>, + b2: &'a NameBinding<'a>, +} +impl<'a> NameBinding<'a> { fn module(&self) -> Option> { - match self.def_or_module { - DefOrModule::Module(ref module) => Some(module), - DefOrModule::Def(_) => None, + match self.kind { + NameBindingKind::Module(module) => Some(module), + NameBindingKind::Import { binding, .. } => binding.module(), + _ => None, } } - fn def(&self) -> Option { - match self.def_or_module { - DefOrModule::Def(def) => Some(def), - DefOrModule::Module(ref module) => module.def.get(), + fn def(&self) -> Def { + match self.kind { + NameBindingKind::Def(def) => def, + NameBindingKind::Module(module) => module.def().unwrap(), + NameBindingKind::Import { binding, .. } => binding.def(), + NameBindingKind::Ambiguity { .. } => Def::Err, } } -} - -// Records at most one definition that a name in a namespace is bound to -#[derive(Clone,Debug)] -pub struct NameBinding<'a>(Rc>>>); -impl<'a> NameBinding<'a> { - fn new() -> Self { - NameBinding(Rc::new(RefCell::new(None))) + // We sometimes need to treat variants as `pub` for backwards compatibility + fn pseudo_vis(&self) -> ty::Visibility { + if self.is_variant() { ty::Visibility::Public } else { self.vis } } - fn create_from_module(module: Module<'a>) -> Self { - NameBinding(Rc::new(RefCell::new(Some(NsDef::create_from_module(module, None))))) + fn is_variant(&self) -> bool { + match self.kind { + NameBindingKind::Def(Def::Variant(..)) | + NameBindingKind::Def(Def::VariantCtor(..)) => true, + _ => false, + } } - fn set(&self, ns_def: NsDef<'a>) { - *self.0.borrow_mut() = Some(ns_def); + fn is_extern_crate(&self) -> bool { + match self.kind { + NameBindingKind::Import { + directive: &ImportDirective { + subclass: ImportDirectiveSubclass::ExternCrate, .. + }, .. + } => true, + _ => false, + } } - fn set_modifiers(&self, modifiers: DefModifiers) { - if let Some(ref mut ns_def) = *self.0.borrow_mut() { - ns_def.modifiers = modifiers + fn is_import(&self) -> bool { + match self.kind { + NameBindingKind::Import { .. } => true, + _ => false, } } - fn borrow(&self) -> ::std::cell::Ref>> { - self.0.borrow() - } - - // Lifted versions of the NsDef methods and fields - fn def(&self) -> Option { - self.borrow().as_ref().and_then(NsDef::def) - } - fn module(&self) -> Option> { - self.borrow().as_ref().and_then(NsDef::module) - } - fn span(&self) -> Option { - self.borrow().as_ref().and_then(|def| def.span) - } - fn modifiers(&self) -> Option { - self.borrow().as_ref().and_then(|def| Some(def.modifiers)) - } - - fn defined(&self) -> bool { - self.borrow().is_some() - } - - fn defined_with(&self, modifiers: DefModifiers) -> bool { - self.modifiers().map(|m| m.contains(modifiers)).unwrap_or(false) - } - - fn is_public(&self) -> bool { - self.defined_with(DefModifiers::PUBLIC) - } - - fn def_and_lp(&self) -> (Def, LastPrivate) { - let def = self.def().unwrap(); - (def, LastMod(if self.is_public() { AllPublic } else { DependsOn(def.def_id()) })) - } -} - -// Records the definitions (at most one for each namespace) that a name is -// bound to. -#[derive(Clone,Debug)] -pub struct NameBindings<'a> { - type_ns: NameBinding<'a>, // < Meaning in type namespace. - value_ns: NameBinding<'a>, // < Meaning in value namespace. -} - -impl<'a> ::std::ops::Index for NameBindings<'a> { - type Output = NameBinding<'a>; - fn index(&self, namespace: Namespace) -> &NameBinding<'a> { - match namespace { TypeNS => &self.type_ns, ValueNS => &self.value_ns } - } -} - -impl<'a> NameBindings<'a> { - fn new() -> Self { - NameBindings { - type_ns: NameBinding::new(), - value_ns: NameBinding::new(), + fn is_glob_import(&self) -> bool { + match self.kind { + NameBindingKind::Import { directive, .. } => directive.is_glob(), + NameBindingKind::Ambiguity { b1, .. } => b1.is_glob_import(), + _ => false, } } - /// Creates a new module in this set of name bindings. - fn define_module(&self, module: Module<'a>, sp: Span) { - self.type_ns.set(NsDef::create_from_module(module, Some(sp))); - } - - /// Records a type definition. - fn define_type(&self, def: Def, sp: Span, modifiers: DefModifiers) { - debug!("defining type for def {:?} with modifiers {:?}", def, modifiers); - self.type_ns.set(NsDef::create_from_def(def, modifiers, Some(sp))); - } - - /// Records a value definition. - fn define_value(&self, def: Def, sp: Span, modifiers: DefModifiers) { - debug!("defining value for def {:?} with modifiers {:?}", def, modifiers); - self.value_ns.set(NsDef::create_from_def(def, modifiers, Some(sp))); + fn is_importable(&self) -> bool { + match self.def() { + Def::AssociatedConst(..) | Def::Method(..) | Def::AssociatedTy(..) => false, + _ => true, + } } } /// Interns the names of the primitive types. struct PrimitiveTypeTable { - primitive_types: HashMap, + primitive_types: FxHashMap, } impl PrimitiveTypeTable { fn new() -> PrimitiveTypeTable { - let mut table = PrimitiveTypeTable { primitive_types: HashMap::new() }; + let mut table = PrimitiveTypeTable { primitive_types: FxHashMap() }; table.intern("bool", TyBool); table.intern("char", TyChar); - table.intern("f32", TyFloat(TyF32)); - table.intern("f64", TyFloat(TyF64)); - table.intern("isize", TyInt(TyIs)); - table.intern("i8", TyInt(TyI8)); - table.intern("i16", TyInt(TyI16)); - table.intern("i32", TyInt(TyI32)); - table.intern("i64", TyInt(TyI64)); + table.intern("f32", TyFloat(FloatTy::F32)); + table.intern("f64", TyFloat(FloatTy::F64)); + table.intern("isize", TyInt(IntTy::Is)); + table.intern("i8", TyInt(IntTy::I8)); + table.intern("i16", TyInt(IntTy::I16)); + table.intern("i32", TyInt(IntTy::I32)); + table.intern("i64", TyInt(IntTy::I64)); table.intern("str", TyStr); - table.intern("usize", TyUint(TyUs)); - table.intern("u8", TyUint(TyU8)); - table.intern("u16", TyUint(TyU16)); - table.intern("u32", TyUint(TyU32)); - table.intern("u64", TyUint(TyU64)); + table.intern("usize", TyUint(UintTy::Us)); + table.intern("u8", TyUint(UintTy::U8)); + table.intern("u16", TyUint(UintTy::U16)); + table.intern("u32", TyUint(UintTy::U32)); + table.intern("u64", TyUint(UintTy::U64)); table } fn intern(&mut self, string: &str, primitive_type: PrimTy) { - self.primitive_types.insert(token::intern(string), primitive_type); + self.primitive_types.insert(Symbol::intern(string), primitive_type); } } /// The main resolver class. -pub struct Resolver<'a, 'tcx: 'a> { +pub struct Resolver<'a> { session: &'a Session, - ast_map: &'a hir_map::Map<'tcx>, + pub definitions: Definitions, + + // Maps the node id of a statement to the expansions of the `macro_rules!`s + // immediately above the statement (if appropriate). + macros_at_scope: FxHashMap>, graph_root: Module<'a>, - trait_item_map: FnvHashMap<(Name, DefId), DefId>, + prelude: Option>, + + trait_item_map: FxHashMap<(Name, DefId), bool /* is static method? */>, + + // Names of fields of an item `DefId` accessible with dot syntax. + // Used for hints during error reporting. + field_names: FxHashMap>, - structs: FnvHashMap>, + // All imports known to succeed or fail. + determined_imports: Vec<&'a ImportDirective<'a>>, - // The number of imports that are currently unresolved. - unresolved_imports: usize, + // All non-determined imports. + indeterminate_imports: Vec<&'a ImportDirective<'a>>, // The module that represents the current item scope. current_module: Module<'a>, - // The current set of local scopes, for values. + // The current set of local scopes for types and values. // FIXME #4948: Reuse ribs to avoid allocation. - value_ribs: Vec, - - // The current set of local scopes, for types. - type_ribs: Vec, + ribs: PerNS>>, // The current set of local scopes, for labels. - label_ribs: Vec, + label_ribs: Vec>, // The trait that the current context can refer to. current_trait_ref: Option<(DefId, TraitRef)>, @@ -1197,72 +1049,190 @@ pub struct Resolver<'a, 'tcx: 'a> { // The idents for the primitive types. primitive_type_table: PrimitiveTypeTable, - def_map: RefCell, - freevars: FreevarMap, + def_map: DefMap, + pub freevars: FreevarMap, freevars_seen: NodeMap>, - export_map: ExportMap, - trait_map: TraitMap, - external_exports: ExternalExports, + pub export_map: ExportMap, + pub trait_map: TraitMap, - // Whether or not to print error messages. Can be set to true - // when getting additional info for error message suggestions, - // so as to avoid printing duplicate errors - emit_errors: bool, + // A map from nodes to modules, both normal (`mod`) modules and anonymous modules. + // Anonymous modules are pseudo-modules that are implicitly created around items + // contained within blocks. + // + // For example, if we have this: + // + // fn f() { + // fn g() { + // ... + // } + // } + // + // There will be an anonymous module created around `g` with the ID of the + // entry block for `f`. + module_map: NodeMap>, + extern_crate_roots: FxHashMap<(CrateNum, bool /* MacrosOnly? */), Module<'a>>, - make_glob_map: bool, + pub make_glob_map: bool, // Maps imports to the names of items actually imported (this actually maps // all imports, but only glob imports are actually interesting). - glob_map: GlobMap, + pub glob_map: GlobMap, - used_imports: HashSet<(NodeId, Namespace)>, - used_crates: HashSet, + used_imports: FxHashSet<(NodeId, Namespace)>, + used_crates: FxHashSet, + pub maybe_unused_trait_imports: NodeSet, - // Callback function for intercepting walks - callback: Option bool>>, - // The intention is that the callback modifies this flag. - // Once set, the resolver falls out of the walk, preserving the ribs. - resolved: bool, + privacy_errors: Vec>, + ambiguity_errors: Vec>, + disallowed_shadowing: Vec<&'a LegacyBinding<'a>>, arenas: &'a ResolverArenas<'a>, + dummy_binding: &'a NameBinding<'a>, + use_extern_macros: bool, // true if `#![feature(use_extern_macros)]` + + pub exported_macros: Vec, + crate_loader: &'a mut CrateLoader, + macro_names: FxHashSet, + builtin_macros: FxHashMap>, + lexical_macro_resolutions: Vec<(Name, &'a Cell>)>, + macro_map: FxHashMap>, + macro_exports: Vec, + + // Maps the `Mark` of an expansion to its containing module or block. + invocations: FxHashMap>, + + // Avoid duplicated errors for "name already defined". + name_already_seen: FxHashMap, } pub struct ResolverArenas<'a> { modules: arena::TypedArena>, + local_modules: RefCell>>, + name_bindings: arena::TypedArena>, + import_directives: arena::TypedArena>, + name_resolutions: arena::TypedArena>>, + invocation_data: arena::TypedArena>, + legacy_bindings: arena::TypedArena>, +} + +impl<'a> ResolverArenas<'a> { + fn alloc_module(&'a self, module: ModuleS<'a>) -> Module<'a> { + let module = self.modules.alloc(module); + if module.def_id().map(|def_id| def_id.is_local()).unwrap_or(true) { + self.local_modules.borrow_mut().push(module); + } + module + } + fn local_modules(&'a self) -> ::std::cell::Ref<'a, Vec>> { + self.local_modules.borrow() + } + fn alloc_name_binding(&'a self, name_binding: NameBinding<'a>) -> &'a NameBinding<'a> { + self.name_bindings.alloc(name_binding) + } + fn alloc_import_directive(&'a self, import_directive: ImportDirective<'a>) + -> &'a ImportDirective { + self.import_directives.alloc(import_directive) + } + fn alloc_name_resolution(&'a self) -> &'a RefCell> { + self.name_resolutions.alloc(Default::default()) + } + fn alloc_invocation_data(&'a self, expansion_data: InvocationData<'a>) + -> &'a InvocationData<'a> { + self.invocation_data.alloc(expansion_data) + } + fn alloc_legacy_binding(&'a self, binding: LegacyBinding<'a>) -> &'a LegacyBinding<'a> { + self.legacy_bindings.alloc(binding) + } +} + +impl<'a> ty::NodeIdTree for Resolver<'a> { + fn is_descendant_of(&self, mut node: NodeId, ancestor: NodeId) -> bool { + while node != ancestor { + node = match self.module_map[&node].parent { + Some(parent) => parent.normal_ancestor_id.unwrap(), + None => return false, + } + } + true + } } -#[derive(PartialEq)] -enum FallbackChecks { - Everything, - OnlyTraitAndStatics, +impl<'a> hir::lowering::Resolver for Resolver<'a> { + fn resolve_hir_path(&mut self, path: &mut hir::Path, is_value: bool) { + let namespace = if is_value { ValueNS } else { TypeNS }; + let hir::Path { ref segments, span, global, ref mut def } = *path; + let path: Vec<_> = segments.iter().map(|seg| Ident::with_empty_ctxt(seg.name)).collect(); + let scope = if global { PathScope::Global } else { PathScope::Lexical }; + match self.resolve_path(&path, scope, Some(namespace), Some(span)) { + PathResult::Module(module) => *def = module.def().unwrap(), + PathResult::NonModule(path_res) if path_res.depth == 0 => *def = path_res.base_def, + PathResult::NonModule(..) => match self.resolve_path(&path, scope, None, Some(span)) { + PathResult::Failed(msg, _) => { + resolve_error(self, span, ResolutionError::FailedToResolve(&msg)); + } + _ => {} + }, + PathResult::Indeterminate => unreachable!(), + PathResult::Failed(msg, _) => { + resolve_error(self, span, ResolutionError::FailedToResolve(&msg)); + } + } + } + + fn get_resolution(&mut self, id: NodeId) -> Option { + self.def_map.get(&id).cloned() + } + + fn definitions(&mut self) -> &mut Definitions { + &mut self.definitions + } } -impl<'a, 'tcx> Resolver<'a, 'tcx> { - fn new(session: &'a Session, - ast_map: &'a hir_map::Map<'tcx>, - make_glob_map: MakeGlobMap, - arenas: &'a ResolverArenas<'a>) - -> Resolver<'a, 'tcx> { - let root_def_id = ast_map.local_def_id(CRATE_NODE_ID); - let graph_root = ModuleS::new(NoParentLink, Some(DefMod(root_def_id)), false, true); - let graph_root = arenas.modules.alloc(graph_root); +impl<'a> Resolver<'a> { + pub fn new(session: &'a Session, + krate: &Crate, + make_glob_map: MakeGlobMap, + crate_loader: &'a mut CrateLoader, + arenas: &'a ResolverArenas<'a>) + -> Resolver<'a> { + let root_def = Def::Mod(DefId::local(CRATE_DEF_INDEX)); + let graph_root = arenas.alloc_module(ModuleS { + normal_ancestor_id: Some(CRATE_NODE_ID), + no_implicit_prelude: attr::contains_name(&krate.attrs, "no_implicit_prelude"), + ..ModuleS::new(None, ModuleKind::Def(root_def, keywords::Invalid.name())) + }); + let mut module_map = NodeMap(); + module_map.insert(CRATE_NODE_ID, graph_root); + + let mut definitions = Definitions::new(); + DefCollector::new(&mut definitions).collect_root(); + + let mut invocations = FxHashMap(); + invocations.insert(Mark::root(), + arenas.alloc_invocation_data(InvocationData::root(graph_root))); Resolver { session: session, - ast_map: ast_map, + definitions: definitions, + macros_at_scope: FxHashMap(), // The outermost module has def ID 0; this is not reflected in the // AST. graph_root: graph_root, + prelude: None, - trait_item_map: FnvHashMap(), - structs: FnvHashMap(), + trait_item_map: FxHashMap(), + field_names: FxHashMap(), - unresolved_imports: 0, + determined_imports: Vec::new(), + indeterminate_imports: Vec::new(), current_module: graph_root, - value_ribs: Vec::new(), - type_ribs: Vec::new(), + ribs: PerNS { + value_ns: vec![Rib::new(ModuleRibKind(graph_root))], + type_ns: vec![Rib::new(ModuleRibKind(graph_root))], + macro_ns: None, + }, label_ribs: Vec::new(), current_trait_ref: None, @@ -1270,621 +1240,193 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { primitive_type_table: PrimitiveTypeTable::new(), - def_map: RefCell::new(NodeMap()), + def_map: NodeMap(), freevars: NodeMap(), freevars_seen: NodeMap(), export_map: NodeMap(), trait_map: NodeMap(), - used_imports: HashSet::new(), - used_crates: HashSet::new(), - external_exports: DefIdSet(), + module_map: module_map, + extern_crate_roots: FxHashMap(), - emit_errors: true, make_glob_map: make_glob_map == MakeGlobMap::Yes, - glob_map: HashMap::new(), + glob_map: NodeMap(), - callback: None, - resolved: false, + used_imports: FxHashSet(), + used_crates: FxHashSet(), + maybe_unused_trait_imports: NodeSet(), - arenas: arenas, - } - } + privacy_errors: Vec::new(), + ambiguity_errors: Vec::new(), + disallowed_shadowing: Vec::new(), - fn arenas() -> ResolverArenas<'a> { + arenas: arenas, + dummy_binding: arenas.alloc_name_binding(NameBinding { + kind: NameBindingKind::Def(Def::Err), + expansion: Mark::root(), + span: DUMMY_SP, + vis: ty::Visibility::Public, + }), + use_extern_macros: session.features.borrow().use_extern_macros, + + exported_macros: Vec::new(), + crate_loader: crate_loader, + macro_names: FxHashSet(), + builtin_macros: FxHashMap(), + lexical_macro_resolutions: Vec::new(), + macro_map: FxHashMap(), + macro_exports: Vec::new(), + invocations: invocations, + name_already_seen: FxHashMap(), + } + } + + pub fn arenas() -> ResolverArenas<'a> { ResolverArenas { modules: arena::TypedArena::new(), + local_modules: RefCell::new(Vec::new()), + name_bindings: arena::TypedArena::new(), + import_directives: arena::TypedArena::new(), + name_resolutions: arena::TypedArena::new(), + invocation_data: arena::TypedArena::new(), + legacy_bindings: arena::TypedArena::new(), + } + } + + fn per_ns T>(&mut self, mut f: F) -> PerNS { + PerNS { + type_ns: f(self, TypeNS), + value_ns: f(self, ValueNS), + macro_ns: match self.use_extern_macros { + true => Some(f(self, MacroNS)), + false => None, + }, } } - fn new_module(&self, - parent_link: ParentLink<'a>, - def: Option, - external: bool, - is_public: bool) -> Module<'a> { - self.arenas.modules.alloc(ModuleS::new(parent_link, def, external, is_public)) - } + /// Entry point to crate resolution. + pub fn resolve_crate(&mut self, krate: &Crate) { + ImportResolver { resolver: self }.finalize_imports(); + self.current_module = self.graph_root; + visit::walk_crate(self, krate); - #[inline] - fn record_import_use(&mut self, import_id: NodeId, name: Name) { - if !self.make_glob_map { - return; - } - if self.glob_map.contains_key(&import_id) { - self.glob_map.get_mut(&import_id).unwrap().insert(name); - return; - } + check_unused::check_crate(self, krate); + self.report_errors(); + self.crate_loader.postprocess(krate); + } - let mut new_set = HashSet::new(); - new_set.insert(name); - self.glob_map.insert(import_id, new_set); + fn new_module(&self, parent: Module<'a>, kind: ModuleKind, local: bool) -> Module<'a> { + self.arenas.alloc_module(ModuleS { + normal_ancestor_id: if local { self.current_module.normal_ancestor_id } else { None }, + populated: Cell::new(local), + ..ModuleS::new(Some(parent), kind) + }) } - fn get_trait_name(&self, did: DefId) -> Name { - if let Some(node_id) = self.ast_map.as_local_node_id(did) { - self.ast_map.expect_item(node_id).name - } else { - self.session.cstore.item_name(did) - } - } - - /// Check that an external crate doesn't collide with items or other external crates. - fn check_for_conflicts_for_external_crate(&self, module: Module<'a>, name: Name, span: Span) { - if module.external_module_children.borrow().contains_key(&name) { - span_err!(self.session, - span, - E0259, - "an external crate named `{}` has already been imported into this module", - name); - } - match module.children.borrow().get(&name) { - Some(name_bindings) if name_bindings.type_ns.defined() => { - resolve_error(self, - name_bindings.type_ns.span().unwrap_or(codemap::DUMMY_SP), - ResolutionError::NameConflictsWithExternCrate(name)); - } - _ => {}, - } - } - - /// Checks that the names of items don't collide with external crates. - fn check_for_conflicts_between_external_crates_and_items(&self, - module: Module<'a>, - name: Name, - span: Span) { - if module.external_module_children.borrow().contains_key(&name) { - resolve_error(self, span, ResolutionError::NameConflictsWithExternCrate(name)); - } - } - - /// Resolves the given module path from the given root `module_`. - fn resolve_module_path_from_root(&mut self, - module_: Module<'a>, - module_path: &[Name], - index: usize, - span: Span, - name_search_type: NameSearchType, - lp: LastPrivate) - -> ResolveResult<(Module<'a>, LastPrivate)> { - fn search_parent_externals<'a>(needle: Name, module: Module<'a>) - -> Option> { - match module.external_module_children.borrow().get(&needle) { - Some(_) => Some(module), - None => match module.parent_link { - ModuleParentLink(ref parent, _) => { - search_parent_externals(needle, parent) - } - _ => None, - }, - } + fn record_use(&mut self, name: Name, ns: Namespace, binding: &'a NameBinding<'a>, span: Span) + -> bool /* true if an error was reported */ { + // track extern crates for unused_extern_crate lint + if let Some(DefId { krate, .. }) = binding.module().and_then(ModuleS::def_id) { + self.used_crates.insert(krate); } - let mut search_module = module_; - let mut index = index; - let module_path_len = module_path.len(); - let mut closest_private = lp; - - // Resolve the module part of the path. This does not involve looking - // upward though scope chains; we simply resolve names directly in - // modules as we go. - while index < module_path_len { - let name = module_path[index]; - match self.resolve_name_in_module(search_module, - name, - TypeNS, - name_search_type, - false) { - Failed(None) => { - let segment_name = name.as_str(); - let module_name = module_to_string(search_module); - let mut span = span; - let msg = if "???" == &module_name[..] { - span.hi = span.lo + Pos::from_usize(segment_name.len()); - - match search_parent_externals(name, &self.current_module) { - Some(module) => { - let path_str = names_to_string(module_path); - let target_mod_str = module_to_string(&*module); - let current_mod_str = module_to_string(&*self.current_module); - - let prefix = if target_mod_str == current_mod_str { - "self::".to_string() - } else { - format!("{}::", target_mod_str) - }; - - format!("Did you mean `{}{}`?", prefix, path_str) - } - None => format!("Maybe a missing `extern crate {}`?", segment_name), - } - } else { - format!("Could not find `{}` in `{}`", segment_name, module_name) - }; - - return Failed(Some((span, msg))); - } - Failed(err) => return Failed(err), - Indeterminate => { - debug!("(resolving module path for import) module resolution is \ - indeterminate: {}", - name); - return Indeterminate; - } - Success((target, used_proxy)) => { - // Check to see whether there are type bindings, and, if - // so, whether there is a module within. - if let Some(module_def) = target.binding.module() { - // track extern crates for unused_extern_crate lint - if let Some(did) = module_def.def_id() { - self.used_crates.insert(did.krate); - } - - search_module = module_def; - - // Keep track of the closest private module used - // when resolving this import chain. - if !used_proxy && !search_module.is_public { - if let Some(did) = search_module.def_id() { - closest_private = LastMod(DependsOn(did)); - } - } - } else { - let msg = format!("Not a module `{}`", name); - return Failed(Some((span, msg))); - } - } + match binding.kind { + NameBindingKind::Import { directive, binding, ref used } if !used.get() => { + used.set(true); + self.used_imports.insert((directive.id, ns)); + self.add_to_glob_map(directive.id, name); + self.record_use(name, ns, binding, span) } - - index += 1; + NameBindingKind::Import { .. } => false, + NameBindingKind::Ambiguity { b1, b2 } => { + self.ambiguity_errors.push(AmbiguityError { + span: span, name: name, lexical: false, b1: b1, b2: b2, + }); + true + } + _ => false } - - return Success((search_module, closest_private)); } - /// Attempts to resolve the module part of an import directive or path - /// rooted at the given module. - /// - /// On success, returns the resolved module, and the closest *private* - /// module found to the destination when resolving this path. - fn resolve_module_path(&mut self, - module_: Module<'a>, - module_path: &[Name], - use_lexical_scope: UseLexicalScopeFlag, - span: Span, - name_search_type: NameSearchType) - -> ResolveResult<(Module<'a>, LastPrivate)> { - let module_path_len = module_path.len(); - assert!(module_path_len > 0); - - debug!("(resolving module path for import) processing `{}` rooted at `{}`", - names_to_string(module_path), - module_to_string(&*module_)); - - // Resolve the module prefix, if any. - let module_prefix_result = self.resolve_module_prefix(module_, module_path); - - let search_module; - let start_index; - let last_private; - match module_prefix_result { - Failed(None) => { - let mpath = names_to_string(module_path); - let mpath = &mpath[..]; - match mpath.rfind(':') { - Some(idx) => { - let msg = format!("Could not find `{}` in `{}`", - // idx +- 1 to account for the - // colons on either side - &mpath[idx + 1..], - &mpath[..idx - 1]); - return Failed(Some((span, msg))); - } - None => { - return Failed(None); - } - } - } - Failed(err) => return Failed(err), - Indeterminate => { - debug!("(resolving module path for import) indeterminate; bailing"); - return Indeterminate; - } - Success(NoPrefixFound) => { - // There was no prefix, so we're considering the first element - // of the path. How we handle this depends on whether we were - // instructed to use lexical scope or not. - match use_lexical_scope { - DontUseLexicalScope => { - // This is a crate-relative path. We will start the - // resolution process at index zero. - search_module = self.graph_root; - start_index = 0; - last_private = LastMod(AllPublic); - } - UseLexicalScope => { - // This is not a crate-relative path. We resolve the - // first component of the path in the current lexical - // scope and then proceed to resolve below that. - match self.resolve_module_in_lexical_scope(module_, module_path[0]) { - Failed(err) => return Failed(err), - Indeterminate => { - debug!("(resolving module path for import) indeterminate; bailing"); - return Indeterminate; - } - Success(containing_module) => { - search_module = containing_module; - start_index = 1; - last_private = LastMod(AllPublic); - } - } - } - } - } - Success(PrefixFound(ref containing_module, index)) => { - search_module = containing_module; - start_index = index; - last_private = LastMod(DependsOn(containing_module.def_id() - .unwrap())); - } + fn add_to_glob_map(&mut self, id: NodeId, name: Name) { + if self.make_glob_map { + self.glob_map.entry(id).or_insert_with(FxHashSet).insert(name); } - - self.resolve_module_path_from_root(search_module, - module_path, - start_index, - span, - name_search_type, - last_private) } + /// This resolves the identifier `ident` in the namespace `ns` in the current lexical scope. + /// More specifically, we proceed up the hierarchy of scopes and return the binding for + /// `ident` in the first scope that defines it (or None if no scopes define it). + /// + /// A block's items are above its local variables in the scope hierarchy, regardless of where + /// the items are defined in the block. For example, + /// ```rust + /// fn f() { + /// g(); // Since there are no local variables in scope yet, this resolves to the item. + /// let g = || {}; + /// fn g() {} + /// g(); // This resolves to the local variable `g` since it shadows the item. + /// } + /// ``` + /// /// Invariant: This must only be called during main resolution, not during /// import resolution. - fn resolve_item_in_lexical_scope(&mut self, - module_: Module<'a>, - name: Name, - namespace: Namespace, - record_used: bool) - -> ResolveResult<(Target<'a>, bool)> { - debug!("(resolving item in lexical scope) resolving `{}` in namespace {:?} in `{}`", - name, - namespace, - module_to_string(&*module_)); - - // The current module node is handled specially. First, check for - // its immediate children. - build_reduced_graph::populate_module_if_necessary(self, &module_); - - match module_.children.borrow().get(&name) { - Some(name_bindings) if name_bindings[namespace].defined() => { - debug!("top name bindings succeeded"); - return Success((Target::new(module_, - name_bindings[namespace].clone(), - Shadowable::Never), - false)); - } - Some(_) | None => { - // Not found; continue. - } - } - - // Now check for its import directives. We don't have to have resolved - // all its imports in the usual way; this is because chains of - // adjacent import statements are processed as though they mutated the - // current scope. - if let Some(import_resolution) = module_.import_resolutions.borrow().get(&name) { - match import_resolution[namespace].target.clone() { - None => { - // Not found; continue. - debug!("(resolving item in lexical scope) found import resolution, but not \ - in namespace {:?}", - namespace); - } - Some(target) => { - debug!("(resolving item in lexical scope) using import resolution"); - // track used imports and extern crates as well - let id = import_resolution[namespace].id; - if record_used { - self.used_imports.insert((id, namespace)); - self.record_import_use(id, name); - if let Some(DefId{krate: kid, ..}) = target.target_module.def_id() { - self.used_crates.insert(kid); - } - } - return Success((target, false)); - } - } - } - - // Search for external modules. - if namespace == TypeNS { - let children = module_.external_module_children.borrow(); - if let Some(module) = children.get(&name) { - let name_binding = NameBinding::create_from_module(module); - debug!("lower name bindings succeeded"); - return Success((Target::new(module_, name_binding, Shadowable::Never), - false)); - } - } - - // Finally, proceed up the scope chain looking for parent modules. - let mut search_module = module_; - loop { - // Go to the next parent. - match search_module.parent_link { - NoParentLink => { - // No more parents. This module was unresolved. - debug!("(resolving item in lexical scope) unresolved module"); - return Failed(None); - } - ModuleParentLink(parent_module_node, _) => { - if search_module.is_normal() { - // We stop the search here. - debug!("(resolving item in lexical scope) unresolved module: not \ - searching through module parents"); - return Failed(None); - } else { - search_module = parent_module_node; - } - } - BlockParentLink(parent_module_node, _) => { - search_module = parent_module_node; - } - } - - // Resolve the name in the parent module. - match self.resolve_name_in_module(search_module, - name, - namespace, - PathSearch, - true) { - Failed(Some((span, msg))) => { - resolve_error(self, span, ResolutionError::FailedToResolve(&*msg)); - } - Failed(None) => (), // Continue up the search chain. - Indeterminate => { - // We couldn't see through the higher scope because of an - // unresolved import higher up. Bail. - - debug!("(resolving item in lexical scope) indeterminate higher scope; bailing"); - return Indeterminate; - } - Success((target, used_reexport)) => { - // We found the module. - debug!("(resolving item in lexical scope) found name in module, done"); - return Success((target, used_reexport)); - } - } - } - } - - /// Resolves a module name in the current lexical scope. - fn resolve_module_in_lexical_scope(&mut self, - module_: Module<'a>, - name: Name) - -> ResolveResult> { - // If this module is an anonymous module, resolve the item in the - // lexical scope. Otherwise, resolve the item from the crate root. - let resolve_result = self.resolve_item_in_lexical_scope(module_, name, TypeNS, true); - match resolve_result { - Success((target, _)) => { - if let Some(module_def) = target.binding.module() { - return Success(module_def) + fn resolve_ident_in_lexical_scope(&mut self, + mut ident: Ident, + ns: Namespace, + record_used: Option) + -> Option> { + if ns == TypeNS { + ident = Ident::with_empty_ctxt(ident.name); + } + + // Walk backwards up the ribs in scope. + for i in (0 .. self.ribs[ns].len()).rev() { + if let Some(def) = self.ribs[ns][i].bindings.get(&ident).cloned() { + // The ident resolves to a type parameter or local variable. + return Some(LexicalScopeBinding::Def(if let Some(span) = record_used { + self.adjust_local_def(LocalDef { ribs: Some((ns, i)), def: def }, span) } else { - debug!("!!! (resolving module in lexical scope) module \ - wasn't actually a module!"); - return Failed(None); - } - } - Indeterminate => { - debug!("(resolving module in lexical scope) indeterminate; bailing"); - return Indeterminate; - } - Failed(err) => { - debug!("(resolving module in lexical scope) failed to resolve"); - return Failed(err); + def + })); } - } - } - /// Returns the nearest normal module parent of the given module. - fn get_nearest_normal_module_parent(&mut self, module_: Module<'a>) -> Option> { - let mut module_ = module_; - loop { - match module_.parent_link { - NoParentLink => return None, - ModuleParentLink(new_module, _) | - BlockParentLink(new_module, _) => { - let new_module = new_module; - if new_module.is_normal() { - return Some(new_module); - } - module_ = new_module; + if let ModuleRibKind(module) = self.ribs[ns][i].kind { + let name = ident.name; + let item = self.resolve_name_in_module(module, name, ns, false, record_used); + if let Ok(binding) = item { + // The ident resolves to an item. + return Some(LexicalScopeBinding::Item(binding)); } - } - } - } - - /// Returns the nearest normal module parent of the given module, or the - /// module itself if it is a normal module. - fn get_nearest_normal_module_parent_or_self(&mut self, module_: Module<'a>) -> Module<'a> { - if module_.is_normal() { - return module_; - } - match self.get_nearest_normal_module_parent(module_) { - None => module_, - Some(new_module) => new_module, - } - } - /// Resolves a "module prefix". A module prefix is one or both of (a) `self::`; - /// (b) some chain of `super::`. - /// grammar: (SELF MOD_SEP ) ? (SUPER MOD_SEP) * - fn resolve_module_prefix(&mut self, - module_: Module<'a>, - module_path: &[Name]) - -> ResolveResult> { - // Start at the current module if we see `self` or `super`, or at the - // top of the crate otherwise. - let mut i = match &*module_path[0].as_str() { - "self" => 1, - "super" => 0, - _ => return Success(NoPrefixFound), - }; - let mut containing_module = self.get_nearest_normal_module_parent_or_self(module_); - - // Now loop through all the `super`s we find. - while i < module_path.len() && "super" == module_path[i].as_str() { - debug!("(resolving module prefix) resolving `super` at {}", - module_to_string(&*containing_module)); - match self.get_nearest_normal_module_parent(containing_module) { - None => return Failed(None), - Some(new_module) => { - containing_module = new_module; - i += 1; + if let ModuleKind::Block(..) = module.kind { // We can see through blocks + } else if !module.no_implicit_prelude { + return self.prelude.and_then(|prelude| { + self.resolve_name_in_module(prelude, name, ns, false, None).ok() + }).map(LexicalScopeBinding::Item) + } else { + return None; } } - } - - debug!("(resolving module prefix) finished resolving prefix at {}", - module_to_string(&*containing_module)); - return Success(PrefixFound(containing_module, i)); - } - - /// Attempts to resolve the supplied name in the given module for the - /// given namespace. If successful, returns the target corresponding to - /// the name. - /// - /// The boolean returned on success is an indicator of whether this lookup - /// passed through a public re-export proxy. - fn resolve_name_in_module(&mut self, - module_: Module<'a>, - name: Name, - namespace: Namespace, - name_search_type: NameSearchType, - allow_private_imports: bool) - -> ResolveResult<(Target<'a>, bool)> { - debug!("(resolving name in module) resolving `{}` in `{}`", - name, - module_to_string(&*module_)); - - // First, check the direct children of the module. - build_reduced_graph::populate_module_if_necessary(self, &module_); - - let children = module_.children.borrow(); - match children.get(&name) { - Some(name_bindings) if name_bindings[namespace].defined() => { - debug!("(resolving name in module) found node as child"); - return Success((Target::new(module_, - name_bindings[namespace].clone(), - Shadowable::Never), - false)); - } - Some(_) | None => { - // Continue. - } - } - - // Next, check the module's imports if necessary. - - // If this is a search of all imports, we should be done with glob - // resolution at this point. - if name_search_type == PathSearch { - assert_eq!(module_.glob_count.get(), 0); - } - - // Check the list of resolved imports. - let children = module_.import_resolutions.borrow(); - match children.get(&name) { - Some(import_resolution) if allow_private_imports || - import_resolution[namespace].is_public => { - - if import_resolution[namespace].is_public && - import_resolution.outstanding_references != 0 { - debug!("(resolving name in module) import unresolved; bailing out"); - return Indeterminate; - } - match import_resolution[namespace].target.clone() { - None => { - debug!("(resolving name in module) name found, but not in namespace {:?}", - namespace); - } - Some(target) => { - debug!("(resolving name in module) resolved to import"); - // track used imports and extern crates as well - let id = import_resolution[namespace].id; - self.used_imports.insert((id, namespace)); - self.record_import_use(id, name); - if let Some(DefId{krate: kid, ..}) = target.target_module.def_id() { - self.used_crates.insert(kid); - } - return Success((target, true)); - } + if let MacroDefinition(mac) = self.ribs[ns][i].kind { + // If an invocation of this macro created `ident`, give up on `ident` + // and switch to `ident`'s source from the macro definition. + let (source_ctxt, source_macro) = ident.ctxt.source(); + if source_macro == mac { + ident.ctxt = source_ctxt; } } - Some(..) | None => {} // Continue. } - // Finally, search through external children. - if namespace == TypeNS { - let children = module_.external_module_children.borrow(); - if let Some(module) = children.get(&name) { - let name_binding = NameBinding::create_from_module(module); - return Success((Target::new(module_, name_binding, Shadowable::Never), - false)); - } - } - - // We're out of luck. - debug!("(resolving name in module) failed to resolve `{}`", name); - return Failed(None); + None } - fn report_unresolved_imports(&mut self, module_: Module<'a>) { - let index = module_.resolved_import_count.get(); - let imports = module_.imports.borrow(); - let import_count = imports.len(); - if index != import_count { - resolve_error(self, - (*imports)[index].span, - ResolutionError::UnresolvedImport(None)); - } - - // Descend into children and anonymous children. - build_reduced_graph::populate_module_if_necessary(self, &module_); - - for (_, child_node) in module_.children.borrow().iter() { - match child_node.type_ns.module() { - None => { - // Continue. - } - Some(child_module) => { - self.report_unresolved_imports(child_module); - } - } - } - - for (_, module_) in module_.anonymous_children.borrow().iter() { - self.report_unresolved_imports(module_); + fn resolve_crate_var(&mut self, mut crate_var_ctxt: SyntaxContext) -> Module<'a> { + while crate_var_ctxt.source().0 != SyntaxContext::empty() { + crate_var_ctxt = crate_var_ctxt.source().0; } + let module = self.invocations[&crate_var_ctxt.source().1].module.get(); + if module.is_local() { self.graph_root } else { module } } // AST resolution @@ -1905,60 +1447,49 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { // generate a fake "implementation scope" containing all the // implementations thus found, for compatibility with old resolve pass. - fn with_scope(&mut self, name: Option, f: F) + fn with_scope(&mut self, id: NodeId, f: F) where F: FnOnce(&mut Resolver) { - let orig_module = self.current_module; + let module = self.module_map.get(&id).cloned(); // clones a reference + if let Some(module) = module { + // Move down in the graph. + let orig_module = replace(&mut self.current_module, module); + self.ribs[ValueNS].push(Rib::new(ModuleRibKind(module))); + self.ribs[TypeNS].push(Rib::new(ModuleRibKind(module))); - // Move down in the graph. - match name { - None => { - // Nothing to do. - } - Some(name) => { - build_reduced_graph::populate_module_if_necessary(self, &orig_module); + self.finalize_current_module_macro_resolutions(); + f(self); - match orig_module.children.borrow().get(&name) { - None => { - debug!("!!! (with scope) didn't find `{}` in `{}`", - name, - module_to_string(&*orig_module)); - } - Some(name_bindings) => { - match name_bindings.type_ns.module() { - None => { - debug!("!!! (with scope) didn't find module for `{}` in `{}`", - name, - module_to_string(&*orig_module)); - } - Some(module_) => { - self.current_module = module_; - } - } - } - } - } + self.current_module = orig_module; + self.ribs[ValueNS].pop(); + self.ribs[TypeNS].pop(); + } else { + f(self); } - - f(self); - - self.current_module = orig_module; } /// Searches the current set of local scopes for labels. /// Stops after meeting a closure. - fn search_label(&self, name: Name) -> Option { + fn search_label(&self, mut ident: Ident) -> Option { for rib in self.label_ribs.iter().rev() { match rib.kind { NormalRibKind => { // Continue } + MacroDefinition(mac) => { + // If an invocation of this macro created `ident`, give up on `ident` + // and switch to `ident`'s source from the macro definition. + let (source_ctxt, source_macro) = ident.ctxt.source(); + if source_macro == mac { + ident.ctxt = source_ctxt; + } + } _ => { // Do not resolve labels across function boundary return None; } } - let result = rib.bindings.get(&name).cloned(); + let result = rib.bindings.get(&ident).cloned(); if result.is_some() { return result; } @@ -1966,189 +1497,155 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { None } - fn resolve_crate(&mut self, krate: &hir::Crate) { - debug!("(resolving crate) starting"); - - intravisit::walk_crate(self, krate); - } - - fn check_if_primitive_type_name(&self, name: Name, span: Span) { - if let Some(_) = self.primitive_type_table.primitive_types.get(&name) { - span_err!(self.session, - span, - E0317, - "user-defined types or type parameters cannot shadow the primitive types"); - } - } - fn resolve_item(&mut self, item: &Item) { - let name = item.name; + let name = item.ident.name; debug!("(resolving item) resolving {}", name); match item.node { - ItemEnum(_, ref generics) | - ItemTy(_, ref generics) | - ItemStruct(_, ref generics) => { - self.check_if_primitive_type_name(name, item.span); - - self.with_type_parameter_rib(HasTypeParameters(generics, TypeSpace, ItemRibKind), - |this| intravisit::walk_item(this, item)); - } - ItemFn(_, _, _, _, ref generics, _) => { - self.with_type_parameter_rib(HasTypeParameters(generics, FnSpace, ItemRibKind), - |this| intravisit::walk_item(this, item)); + ItemKind::Enum(_, ref generics) | + ItemKind::Ty(_, ref generics) | + ItemKind::Struct(_, ref generics) | + ItemKind::Union(_, ref generics) | + ItemKind::Fn(.., ref generics, _) => { + self.with_type_parameter_rib(HasTypeParameters(generics, ItemRibKind), + |this| visit::walk_item(this, item)); } - ItemDefaultImpl(_, ref trait_ref) => { - self.with_optional_trait_ref(Some(trait_ref), |_, _| {}); + ItemKind::DefaultImpl(_, ref trait_ref) => { + self.with_optional_trait_ref(Some(trait_ref), |_, _| {}, None); } - ItemImpl(_, _, ref generics, ref opt_trait_ref, ref self_type, ref impl_items) => { + ItemKind::Impl(.., ref generics, ref opt_trait_ref, ref self_type, ref impl_items) => self.resolve_implementation(generics, opt_trait_ref, - &**self_type, + &self_type, item.id, - impl_items); - } - - ItemTrait(_, ref generics, ref bounds, ref trait_items) => { - self.check_if_primitive_type_name(name, item.span); + impl_items), + ItemKind::Trait(_, ref generics, ref bounds, ref trait_items) => { // Create a new rib for the trait-wide type parameters. - self.with_type_parameter_rib(HasTypeParameters(generics, - TypeSpace, - ItemRibKind), - |this| { - let local_def_id = this.ast_map.local_def_id(item.id); - this.with_self_rib(DefSelfTy(Some(local_def_id), None), |this| { + self.with_type_parameter_rib(HasTypeParameters(generics, ItemRibKind), |this| { + let local_def_id = this.definitions.local_def_id(item.id); + this.with_self_rib(Def::SelfTy(Some(local_def_id), None), |this| { this.visit_generics(generics); walk_list!(this, visit_ty_param_bound, bounds); for trait_item in trait_items { match trait_item.node { - hir::ConstTraitItem(_, ref default) => { + TraitItemKind::Const(_, ref default) => { // Only impose the restrictions of // ConstRibKind if there's an actual constant // expression in a provided default. if default.is_some() { this.with_constant_rib(|this| { - intravisit::walk_trait_item(this, trait_item) + visit::walk_trait_item(this, trait_item) }); } else { - intravisit::walk_trait_item(this, trait_item) + visit::walk_trait_item(this, trait_item) } } - hir::MethodTraitItem(ref sig, _) => { + TraitItemKind::Method(ref sig, _) => { let type_parameters = HasTypeParameters(&sig.generics, - FnSpace, - MethodRibKind); + MethodRibKind(!sig.decl.has_self())); this.with_type_parameter_rib(type_parameters, |this| { - intravisit::walk_trait_item(this, trait_item) + visit::walk_trait_item(this, trait_item) }); } - hir::TypeTraitItem(..) => { - this.check_if_primitive_type_name(trait_item.name, - trait_item.span); + TraitItemKind::Type(..) => { this.with_type_parameter_rib(NoTypeParameters, |this| { - intravisit::walk_trait_item(this, trait_item) + visit::walk_trait_item(this, trait_item) }); } + TraitItemKind::Macro(_) => panic!("unexpanded macro in resolve!"), }; } }); }); } - ItemMod(_) | ItemForeignMod(_) => { - self.with_scope(Some(name), |this| { - intravisit::walk_item(this, item); + ItemKind::Mod(_) | ItemKind::ForeignMod(_) => { + self.with_scope(item.id, |this| { + visit::walk_item(this, item); }); } - ItemConst(..) | ItemStatic(..) => { + ItemKind::Const(..) | ItemKind::Static(..) => { self.with_constant_rib(|this| { - intravisit::walk_item(this, item); + visit::walk_item(this, item); }); } - ItemUse(ref view_path) => { - // check for imports shadowing primitive types - let check_rename = |this: &Self, id, name| { - match this.def_map.borrow().get(&id).map(|d| d.full_def()) { - Some(DefTy(..)) | Some(DefStruct(..)) | Some(DefTrait(..)) | None => { - this.check_if_primitive_type_name(name, item.span); - } - _ => {} - } - }; - + ItemKind::Use(ref view_path) => { match view_path.node { - hir::ViewPathSimple(name, _) => { - check_rename(self, item.id, name); - } - hir::ViewPathList(ref prefix, ref items) => { - for item in items { - if let Some(name) = item.node.rename() { - check_rename(self, item.node.id(), name); - } - } - + ast::ViewPathList(ref prefix, ref items) => { + let path: Vec<_> = + prefix.segments.iter().map(|seg| seg.identifier).collect(); // Resolve prefix of an import with empty braces (issue #28388) if items.is_empty() && !prefix.segments.is_empty() { - match self.resolve_crate_relative_path(prefix.span, - &prefix.segments, - TypeNS) { - Some((def, lp)) => - self.record_def(item.id, PathResolution::new(def, lp, 0)), - None => { - resolve_error(self, - prefix.span, - ResolutionError::FailedToResolve( - &path_names_to_string(prefix, 0))); - self.record_def(item.id, err_path_resolution()); + let (scope, span) = (PathScope::Import, prefix.span); + // FIXME(#38012) This should be a module path, not anything in TypeNS. + let result = + self.resolve_path(&path, scope, Some(TypeNS), Some(span)); + let (def, msg) = match result { + PathResult::Module(module) => (module.def().unwrap(), None), + PathResult::NonModule(res) if res.depth == 0 => + (res.base_def, None), + PathResult::NonModule(_) => { + // Resolve a module path for better errors + match self.resolve_path(&path, scope, None, Some(span)) { + PathResult::Failed(msg, _) => (Def::Err, Some(msg)), + _ => unreachable!(), + } } + PathResult::Indeterminate => unreachable!(), + PathResult::Failed(msg, _) => (Def::Err, Some(msg)), + }; + if let Some(msg) = msg { + resolve_error(self, span, ResolutionError::FailedToResolve(&msg)); } + self.record_def(item.id, PathResolution::new(def)); } } _ => {} } } - ItemExternCrate(_) => { + ItemKind::ExternCrate(_) => { // do nothing, these are just around to be encoded } + + ItemKind::Mac(_) => panic!("unexpanded macro in resolve!"), } } - fn with_type_parameter_rib(&mut self, type_parameters: TypeParameters, f: F) + fn with_type_parameter_rib<'b, F>(&'b mut self, type_parameters: TypeParameters<'a, 'b>, f: F) where F: FnOnce(&mut Resolver) { match type_parameters { - HasTypeParameters(generics, space, rib_kind) => { + HasTypeParameters(generics, rib_kind) => { let mut function_type_rib = Rib::new(rib_kind); - let mut seen_bindings = HashSet::new(); - for (index, type_parameter) in generics.ty_params.iter().enumerate() { - let name = type_parameter.name; + let mut seen_bindings = FxHashMap(); + for type_parameter in &generics.ty_params { + let name = type_parameter.ident.name; debug!("with_type_parameter_rib: {}", type_parameter.id); - if seen_bindings.contains(&name) { + if seen_bindings.contains_key(&name) { + let span = seen_bindings.get(&name).unwrap(); resolve_error(self, type_parameter.span, - ResolutionError::NameAlreadyUsedInTypeParameterList(name)); + ResolutionError::NameAlreadyUsedInTypeParameterList(name, + span)); } - seen_bindings.insert(name); + seen_bindings.entry(name).or_insert(type_parameter.span); // plain insert (no renaming) - function_type_rib.bindings - .insert(name, - DlDef(DefTyParam(space, - index as u32, - self.ast_map - .local_def_id(type_parameter.id), - name))); + let def_id = self.definitions.local_def_id(type_parameter.id); + let def = Def::TyParam(def_id); + function_type_rib.bindings.insert(Ident::with_empty_ctxt(name), def); + self.record_def(type_parameter.id, PathResolution::new(def)); } - self.type_ribs.push(function_type_rib); + self.ribs[TypeNS].push(function_type_rib); } NoTypeParameters => { @@ -2158,13 +1655,8 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { f(self); - match type_parameters { - HasTypeParameters(..) => { - if !self.resolved { - self.type_ribs.pop(); - } - } - NoTypeParameters => {} + if let HasTypeParameters(..) = type_parameters { + self.ribs[TypeNS].pop(); } } @@ -2173,108 +1665,68 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { { self.label_ribs.push(Rib::new(NormalRibKind)); f(self); - if !self.resolved { - self.label_ribs.pop(); - } + self.label_ribs.pop(); } fn with_constant_rib(&mut self, f: F) where F: FnOnce(&mut Resolver) { - self.value_ribs.push(Rib::new(ConstantItemRibKind)); - self.type_ribs.push(Rib::new(ConstantItemRibKind)); + self.ribs[ValueNS].push(Rib::new(ConstantItemRibKind)); + self.ribs[TypeNS].push(Rib::new(ConstantItemRibKind)); f(self); - if !self.resolved { - self.type_ribs.pop(); - self.value_ribs.pop(); - } + self.ribs[TypeNS].pop(); + self.ribs[ValueNS].pop(); } - fn resolve_function(&mut self, rib_kind: RibKind, declaration: &FnDecl, block: &Block) { - // Create a value rib for the function. - self.value_ribs.push(Rib::new(rib_kind)); + fn resolve_trait_reference(&mut self, + path: &[Ident], + global: bool, + generics: Option<&Generics>, + span: Span) + -> PathResolution { + let scope = if global { PathScope::Global } else { PathScope::Lexical }; + let def = match self.resolve_path(path, scope, None, Some(span)) { + PathResult::Module(module) => Some(module.def().unwrap()), + PathResult::NonModule(..) => return err_path_resolution(), + PathResult::Failed(msg, false) => { + resolve_error(self, span, ResolutionError::FailedToResolve(&msg)); + return err_path_resolution(); + } + _ => match self.resolve_path(path, scope, Some(TypeNS), None) { + PathResult::NonModule(path_resolution) => Some(path_resolution.base_def), + _ => None, + }, + }; - // Create a label rib for the function. - self.label_ribs.push(Rib::new(rib_kind)); + if let Some(def) = def { + if let Def::Trait(_) = def { + return PathResolution::new(def); + } - // Add each argument to the rib. - let mut bindings_list = HashMap::new(); - for argument in &declaration.inputs { - self.resolve_pattern(&*argument.pat, ArgumentIrrefutableMode, &mut bindings_list); + let mut err = resolve_struct_error(self, span, { + ResolutionError::IsNotATrait(&names_to_string(path), def.kind_name()) + }); + if let Some(generics) = generics { + if let Some(span) = generics.span_for_name(&names_to_string(path)) { + err.span_label(span, &"type parameter defined here"); + } + } - self.visit_ty(&*argument.ty); + // If it's a typedef, give a note + if let Def::TyAlias(..) = def { + err.note(&format!("type aliases cannot be used for traits")); + } + err.emit(); + } else { + // find possible candidates + let is_trait = |def| match def { Def::Trait(_) => true, _ => false }; + let candidates = self.lookup_candidates(path.last().unwrap().name, TypeNS, is_trait); - debug!("(resolving function) recorded argument"); + let path = names_to_string(path); + resolve_error(self, span, ResolutionError::UndeclaredTraitName(&path, candidates)); } - intravisit::walk_fn_ret_ty(self, &declaration.output); - - // Resolve the function body. - self.visit_block(block); - - debug!("(resolving function) leaving function"); - - if !self.resolved { - self.label_ribs.pop(); - self.value_ribs.pop(); - } - } - - fn resolve_trait_reference(&mut self, - id: NodeId, - trait_path: &Path, - path_depth: usize) - -> Result { - if let Some(path_res) = self.resolve_path(id, trait_path, path_depth, TypeNS, true) { - if let DefTrait(_) = path_res.base_def { - debug!("(resolving trait) found trait def: {:?}", path_res); - Ok(path_res) - } else { - let mut err = - resolve_struct_error(self, - trait_path.span, - ResolutionError::IsNotATrait(&*path_names_to_string(trait_path, - path_depth))); - - // If it's a typedef, give a note - if let DefTy(..) = path_res.base_def { - err.span_note(trait_path.span, - "`type` aliases cannot be used for traits"); - } - err.emit(); - Err(()) - } - } else { - resolve_error(self, - trait_path.span, - ResolutionError::UndeclaredTraitName(&*path_names_to_string(trait_path, - path_depth))); - Err(()) - } - } - - fn resolve_generics(&mut self, generics: &Generics) { - for type_parameter in generics.ty_params.iter() { - self.check_if_primitive_type_name(type_parameter.name, type_parameter.span); - } - for predicate in &generics.where_clause.predicates { - match predicate { - &hir::WherePredicate::BoundPredicate(_) | - &hir::WherePredicate::RegionPredicate(_) => {} - &hir::WherePredicate::EqPredicate(ref eq_pred) => { - let path_res = self.resolve_path(eq_pred.id, &eq_pred.path, 0, TypeNS, true); - if let Some(PathResolution { base_def: DefTyParam(..), .. }) = path_res { - self.record_def(eq_pred.id, path_res.unwrap()); - } else { - resolve_error(self, - eq_pred.span, - ResolutionError::UndeclaredAssociatedType); - self.record_def(eq_pred.id, err_path_resolution()); - } - } - } - } - intravisit::walk_generics(self, generics); - } + err_path_resolution() + } fn with_current_self_type(&mut self, self_type: &Ty, f: F) -> T where F: FnOnce(&mut Resolver) -> T @@ -2286,23 +1738,26 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { result } - fn with_optional_trait_ref(&mut self, opt_trait_ref: Option<&TraitRef>, f: F) -> T + fn with_optional_trait_ref(&mut self, + opt_trait_ref: Option<&TraitRef>, + f: F, + generics: Option<&Generics>) + -> T where F: FnOnce(&mut Resolver, Option) -> T { let mut new_val = None; let mut new_id = None; if let Some(trait_ref) = opt_trait_ref { - if let Ok(path_res) = self.resolve_trait_reference(trait_ref.ref_id, - &trait_ref.path, - 0) { - assert!(path_res.depth == 0); - self.record_def(trait_ref.ref_id, path_res); + let ast::Path { ref segments, span, global } = trait_ref.path; + let path: Vec<_> = segments.iter().map(|seg| seg.identifier).collect(); + let path_res = self.resolve_trait_reference(&path, global, generics, span); + assert!(path_res.depth == 0); + self.record_def(trait_ref.ref_id, path_res); + if path_res.base_def != Def::Err { new_val = Some((path_res.base_def.def_id(), trait_ref.clone())); new_id = Some(path_res.base_def.def_id()); - } else { - self.record_def(trait_ref.ref_id, err_path_resolution()); } - intravisit::walk_trait_ref(self, trait_ref); + visit::walk_trait_ref(self, trait_ref); } let original_trait_ref = replace(&mut self.current_trait_ref, new_val); let result = f(self, new_id); @@ -2316,13 +1771,10 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { let mut self_type_rib = Rib::new(NormalRibKind); // plain insert (no renaming, types are not currently hygienic....) - let name = special_names::type_self; - self_type_rib.bindings.insert(name, DlDef(self_def)); - self.type_ribs.push(self_type_rib); + self_type_rib.bindings.insert(keywords::SelfType.ident(), self_def); + self.ribs[TypeNS].push(self_type_rib); f(self); - if !self.resolved { - self.type_ribs.pop(); - } + self.ribs[TypeNS].pop(); } fn resolve_implementation(&mut self, @@ -2332,10 +1784,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { item_id: NodeId, impl_items: &[ImplItem]) { // If applicable, create a rib for the type parameters. - self.with_type_parameter_rib(HasTypeParameters(generics, - TypeSpace, - ItemRibKind), - |this| { + self.with_type_parameter_rib(HasTypeParameters(generics, ItemRibKind), |this| { // Resolve the type parameters. this.visit_generics(generics); @@ -2344,24 +1793,24 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { // Resolve the self type. this.visit_ty(self_type); - this.with_self_rib(DefSelfTy(trait_id, Some((item_id, self_type.id))), |this| { + let item_def_id = this.definitions.local_def_id(item_id); + this.with_self_rib(Def::SelfTy(trait_id, Some(item_def_id)), |this| { this.with_current_self_type(self_type, |this| { for impl_item in impl_items { + this.resolve_visibility(&impl_item.vis); match impl_item.node { - hir::ImplItemKind::Const(..) => { + ImplItemKind::Const(..) => { // If this is a trait impl, ensure the const // exists in trait - this.check_trait_item(impl_item.name, + this.check_trait_item(impl_item.ident.name, impl_item.span, |n, s| ResolutionError::ConstNotMemberOfTrait(n, s)); - this.with_constant_rib(|this| { - intravisit::walk_impl_item(this, impl_item); - }); + visit::walk_impl_item(this, impl_item); } - hir::ImplItemKind::Method(ref sig, _) => { + ImplItemKind::Method(ref sig, _) => { // If this is a trait impl, ensure the method // exists in trait - this.check_trait_item(impl_item.name, + this.check_trait_item(impl_item.ident.name, impl_item.span, |n, s| ResolutionError::MethodNotMemberOfTrait(n, s)); @@ -2369,26 +1818,26 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { // specific type parameters. let type_parameters = HasTypeParameters(&sig.generics, - FnSpace, - MethodRibKind); + MethodRibKind(!sig.decl.has_self())); this.with_type_parameter_rib(type_parameters, |this| { - intravisit::walk_impl_item(this, impl_item); + visit::walk_impl_item(this, impl_item); }); } - hir::ImplItemKind::Type(ref ty) => { + ImplItemKind::Type(ref ty) => { // If this is a trait impl, ensure the type // exists in trait - this.check_trait_item(impl_item.name, + this.check_trait_item(impl_item.ident.name, impl_item.span, |n, s| ResolutionError::TypeNotMemberOfTrait(n, s)); this.visit_ty(ty); } + ImplItemKind::Macro(_) => panic!("unexpanded macro in resolve!"), } } }); }); - }); + }, Some(&generics)); }); } @@ -2400,7 +1849,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { if let Some((did, ref trait_ref)) = self.current_trait_ref { if !self.trait_item_map.contains_key(&(name, did)) { let path_str = path_names_to_string(&trait_ref.path, 0); - resolve_error(self, span, err(name, &*path_str)); + resolve_error(self, span, err(name, &path_str)); } } } @@ -2413,7 +1862,7 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { walk_list!(self, visit_expr, &local.init); // Resolve the pattern. - self.resolve_pattern(&*local.pat, LocalIrrefutableMode, &mut HashMap::new()); + self.resolve_pattern(&local.pat, PatternSource::Let, &mut FxHashMap()); } // build a map from pattern identifiers to binding-info's. @@ -2421,16 +1870,22 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { // that expands into an or-pattern where one 'x' was from the // user and one 'x' came from the macro. fn binding_mode_map(&mut self, pat: &Pat) -> BindingMap { - let mut result = HashMap::new(); - pat_bindings(&self.def_map, pat, |binding_mode, _id, sp, path1| { - let name = path1.node; - result.insert(name, - BindingInfo { - span: sp, - binding_mode: binding_mode, - }); + let mut binding_map = FxHashMap(); + + pat.walk(&mut |pat| { + if let PatKind::Ident(binding_mode, ident, ref sub_pat) = pat.node { + if sub_pat.is_some() || match self.def_map.get(&pat.id) { + Some(&PathResolution { base_def: Def::Local(..), .. }) => true, + _ => false, + } { + let binding_info = BindingInfo { span: ident.span, binding_mode: binding_mode }; + binding_map.insert(ident.node, binding_info); + } + } + true }); - return result; + + binding_map } // check that all of the arms in an or-pattern have exactly the @@ -2439,23 +1894,24 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { if arm.pats.is_empty() { return; } - let map_0 = self.binding_mode_map(&*arm.pats[0]); + let map_0 = self.binding_mode_map(&arm.pats[0]); for (i, p) in arm.pats.iter().enumerate() { - let map_i = self.binding_mode_map(&**p); + let map_i = self.binding_mode_map(&p); for (&key, &binding_0) in &map_0 { match map_i.get(&key) { None => { - resolve_error(self, - p.span, - ResolutionError::VariableNotBoundInPattern(key, i + 1)); + let error = ResolutionError::VariableNotBoundInPattern(key.name, 1, i + 1); + resolve_error(self, p.span, error); } Some(binding_i) => { if binding_0.binding_mode != binding_i.binding_mode { resolve_error(self, binding_i.span, - ResolutionError::VariableBoundWithDifferentMode(key, - i + 1)); + ResolutionError::VariableBoundWithDifferentMode( + key.name, + i + 1, + binding_0.span)); } } } @@ -2465,18 +1921,18 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { if !map_0.contains_key(&key) { resolve_error(self, binding.span, - ResolutionError::VariableNotBoundInParentPattern(key, i + 1)); + ResolutionError::VariableNotBoundInPattern(key.name, i + 1, 1)); } } } } fn resolve_arm(&mut self, arm: &Arm) { - self.value_ribs.push(Rib::new(NormalRibKind)); + self.ribs[ValueNS].push(Rib::new(NormalRibKind)); - let mut bindings_list = HashMap::new(); + let mut bindings_list = FxHashMap(); for pattern in &arm.pats { - self.resolve_pattern(&**pattern, RefutableMode, &mut bindings_list); + self.resolve_pattern(&pattern, PatternSource::Match, &mut bindings_list); } // This has to happen *after* we determine which @@ -2484,602 +1940,503 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { self.check_consistent_bindings(arm); walk_list!(self, visit_expr, &arm.guard); - self.visit_expr(&*arm.body); + self.visit_expr(&arm.body); - if !self.resolved { - self.value_ribs.pop(); - } + self.ribs[ValueNS].pop(); } fn resolve_block(&mut self, block: &Block) { debug!("(resolving block) entering block"); - self.value_ribs.push(Rib::new(NormalRibKind)); - // Move down in the graph, if there's an anonymous module rooted here. let orig_module = self.current_module; - match orig_module.anonymous_children.borrow().get(&block.id) { - None => { - // Nothing to do. - } - Some(anonymous_module) => { - debug!("(resolving block) found anonymous module, moving down"); - self.current_module = anonymous_module; - } + let anonymous_module = self.module_map.get(&block.id).cloned(); // clones a reference + + let mut num_macro_definition_ribs = 0; + if let Some(anonymous_module) = anonymous_module { + debug!("(resolving block) found anonymous module, moving down"); + self.ribs[ValueNS].push(Rib::new(ModuleRibKind(anonymous_module))); + self.ribs[TypeNS].push(Rib::new(ModuleRibKind(anonymous_module))); + self.current_module = anonymous_module; + self.finalize_current_module_macro_resolutions(); + } else { + self.ribs[ValueNS].push(Rib::new(NormalRibKind)); } - // Check for imports appearing after non-item statements. - let mut found_non_item = false; - for statement in &block.stmts { - if let hir::StmtDecl(ref declaration, _) = statement.node { - if let hir::DeclItem(i) = declaration.node { - let i = self.ast_map.expect_item(i.id); - match i.node { - ItemExternCrate(_) | ItemUse(_) if found_non_item => { - span_err!(self.session, - i.span, - E0154, - "imports are not allowed after non-item statements"); - } - _ => {} - } - } else { - found_non_item = true + // Descend into the block. + for stmt in &block.stmts { + if let Some(marks) = self.macros_at_scope.remove(&stmt.id) { + num_macro_definition_ribs += marks.len() as u32; + for mark in marks { + self.ribs[ValueNS].push(Rib::new(MacroDefinition(mark))); + self.label_ribs.push(Rib::new(MacroDefinition(mark))); } - } else { - found_non_item = true; } - } - // Descend into the block. - intravisit::walk_block(self, block); + self.visit_stmt(stmt); + } // Move back up. - if !self.resolved { - self.current_module = orig_module; - self.value_ribs.pop(); + self.current_module = orig_module; + for _ in 0 .. num_macro_definition_ribs { + self.ribs[ValueNS].pop(); + self.label_ribs.pop(); + } + self.ribs[ValueNS].pop(); + if let Some(_) = anonymous_module { + self.ribs[TypeNS].pop(); } debug!("(resolving block) leaving block"); } fn resolve_type(&mut self, ty: &Ty) { - match ty.node { - TyPath(ref maybe_qself, ref path) => { - let resolution = match self.resolve_possibly_assoc_item(ty.id, - maybe_qself.as_ref(), - path, - TypeNS, - true) { - // `::a::b::c` is resolved by typeck alone. - TypecheckRequired => { - // Resolve embedded types. - intravisit::walk_ty(self, ty); - return; + if let TyKind::Path(ref maybe_qself, ref path) = ty.node { + // This is a path in the type namespace. Walk through scopes looking for it. + if let Some(def) = + self.resolve_possibly_assoc_item(ty.id, maybe_qself.as_ref(), path, TypeNS) { + match def.base_def { + Def::Mod(..) if def.depth == 0 => { + self.session.span_err(path.span, "expected type, found module"); + self.record_def(ty.id, err_path_resolution()); } - ResolveAttempt(resolution) => resolution, - }; - - // This is a path in the type namespace. Walk through scopes - // looking for it. - match resolution { - Some(def) => { + _ => { // Write the result into the def map. debug!("(resolving type) writing resolution for `{}` (id {}) = {:?}", - path_names_to_string(path, 0), - ty.id, - def); + path_names_to_string(path, 0), ty.id, def); self.record_def(ty.id, def); - } - None => { - self.record_def(ty.id, err_path_resolution()); - - // Keep reporting some errors even if they're ignored above. - self.resolve_path(ty.id, path, 0, TypeNS, true); - - let kind = if maybe_qself.is_some() { - "associated type" - } else { - "type name" - }; + } + } + } else { + self.record_def(ty.id, err_path_resolution()); + // Keep reporting some errors even if they're ignored above. + let kind = if maybe_qself.is_some() { "associated type" } else { "type name" }; + let is_invalid_self_type_name = { + path.segments.len() > 0 && + maybe_qself.is_none() && + path.segments[0].identifier.name == keywords::SelfType.name() + }; - let self_type_name = special_idents::type_self.name; - let is_invalid_self_type_name = path.segments.len() > 0 && - maybe_qself.is_none() && - path.segments[0].identifier.name == - self_type_name; - if is_invalid_self_type_name { - resolve_error(self, - ty.span, - ResolutionError::SelfUsedOutsideImplOrTrait); - } else { - resolve_error(self, - ty.span, - ResolutionError::UseOfUndeclared( - kind, - &*path_names_to_string(path, - 0)) - ); + if is_invalid_self_type_name { + resolve_error(self, ty.span, ResolutionError::SelfUsedOutsideImplOrTrait); + } else { + let type_name = path.segments.last().unwrap().identifier.name; + let candidates = self.lookup_candidates(type_name, TypeNS, |def| { + match def { + Def::Trait(_) | + Def::Enum(_) | + Def::Struct(_) | + Def::Union(_) | + Def::TyAlias(_) => true, + _ => false, } - } + }); + + let name = &path_names_to_string(path, 0); + let error = ResolutionError::UseOfUndeclared(kind, name, candidates); + resolve_error(self, ty.span, error); } } - _ => {} } // Resolve embedded types. - intravisit::walk_ty(self, ty); + visit::walk_ty(self, ty); + } + + fn fresh_binding(&mut self, + ident: &SpannedIdent, + pat_id: NodeId, + outer_pat_id: NodeId, + pat_src: PatternSource, + bindings: &mut FxHashMap) + -> PathResolution { + // Add the binding to the local ribs, if it + // doesn't already exist in the bindings map. (We + // must not add it if it's in the bindings map + // because that breaks the assumptions later + // passes make about or-patterns.) + let mut def = Def::Local(self.definitions.local_def_id(pat_id)); + match bindings.get(&ident.node).cloned() { + Some(id) if id == outer_pat_id => { + // `Variant(a, a)`, error + resolve_error( + self, + ident.span, + ResolutionError::IdentifierBoundMoreThanOnceInSamePattern( + &ident.node.name.as_str()) + ); + } + Some(..) if pat_src == PatternSource::FnParam => { + // `fn f(a: u8, a: u8)`, error + resolve_error( + self, + ident.span, + ResolutionError::IdentifierBoundMoreThanOnceInParameterList( + &ident.node.name.as_str()) + ); + } + Some(..) if pat_src == PatternSource::Match => { + // `Variant1(a) | Variant2(a)`, ok + // Reuse definition from the first `a`. + def = self.ribs[ValueNS].last_mut().unwrap().bindings[&ident.node]; + } + Some(..) => { + span_bug!(ident.span, "two bindings with the same name from \ + unexpected pattern source {:?}", pat_src); + } + None => { + // A completely fresh binding, add to the lists if it's valid. + if ident.node.name != keywords::Invalid.name() { + bindings.insert(ident.node, outer_pat_id); + self.ribs[ValueNS].last_mut().unwrap().bindings.insert(ident.node, def); + } + } + } + + PathResolution::new(def) + } + + fn resolve_pattern_path(&mut self, + pat_id: NodeId, + qself: Option<&QSelf>, + path: &Path, + namespace: Namespace, + expected_fn: ExpectedFn, + expected_what: &str) + where ExpectedFn: FnOnce(Def) -> bool + { + let resolution = if let Some(resolution) = self.resolve_possibly_assoc_item(pat_id, + qself, path, namespace) { + if resolution.depth == 0 { + if expected_fn(resolution.base_def) || resolution.base_def == Def::Err { + resolution + } else { + resolve_error( + self, + path.span, + ResolutionError::PatPathUnexpected(expected_what, + resolution.kind_name(), path) + ); + err_path_resolution() + } + } else { + // Not fully resolved associated item `T::A::B` or `::A::B` + // or `::A::B`. If `B` should be resolved in value namespace then + // it needs to be added to the trait map. + if namespace == ValueNS { + let item_name = path.segments.last().unwrap().identifier.name; + let traits = self.get_traits_containing_item(item_name); + self.trait_map.insert(pat_id, traits); + } + resolution + } + } else { + let error = ResolutionError::PatPathUnresolved(expected_what, path); + resolve_error(self, path.span, error); + err_path_resolution() + }; + + self.record_def(pat_id, resolution); + } + + fn resolve_struct_path(&mut self, node_id: NodeId, path: &Path) { + // Resolution logic is equivalent for expressions and patterns, + // reuse `resolve_pattern_path` for both. + self.resolve_pattern_path(node_id, None, path, TypeNS, |def| { + match def { + Def::Struct(..) | Def::Union(..) | Def::Variant(..) | + Def::TyAlias(..) | Def::AssociatedTy(..) | Def::SelfTy(..) => true, + _ => false, + } + }, "struct, variant or union type"); } fn resolve_pattern(&mut self, - pattern: &Pat, - mode: PatternBindingMode, - // Maps idents to the node ID for the (outermost) - // pattern that binds them - bindings_list: &mut HashMap) { - let pat_id = pattern.id; - walk_pat(pattern, |pattern| { - match pattern.node { - PatIdent(binding_mode, ref path1, ref at_rhs) => { - // The meaning of PatIdent with no type parameters - // depends on whether an enum variant or unit-like struct - // with that name is in scope. The probing lookup has to - // be careful not to emit spurious errors. Only matching - // patterns (match) can match nullary variants or - // unit-like structs. For binding patterns (let - // and the LHS of @-patterns), matching such a value is - // simply disallowed (since it's rarely what you want). - let const_ok = mode == RefutableMode && at_rhs.is_none(); - - let ident = path1.node; - let renamed = ident.name; - - match self.resolve_bare_identifier_pattern(ident.unhygienic_name, - pattern.span) { - FoundStructOrEnumVariant(def, lp) if const_ok => { - debug!("(resolving pattern) resolving `{}` to struct or enum variant", - renamed); - - self.enforce_default_binding_mode(pattern, - binding_mode, - "an enum variant"); - self.record_def(pattern.id, - PathResolution { - base_def: def, - last_private: lp, - depth: 0, - }); - } - FoundStructOrEnumVariant(..) => { - resolve_error( - self, - pattern.span, - ResolutionError::DeclarationShadowsEnumVariantOrUnitLikeStruct( - renamed) - ); - self.record_def(pattern.id, err_path_resolution()); - } - FoundConst(def, lp, _) if const_ok => { - debug!("(resolving pattern) resolving `{}` to constant", renamed); - - self.enforce_default_binding_mode(pattern, binding_mode, "a constant"); - self.record_def(pattern.id, - PathResolution { - base_def: def, - last_private: lp, - depth: 0, - }); - } - FoundConst(def, _, name) => { - resolve_error( - self, - pattern.span, - ResolutionError::OnlyIrrefutablePatternsAllowedHere(def.def_id(), - name) - ); - self.record_def(pattern.id, err_path_resolution()); - } - BareIdentifierPatternUnresolved => { - debug!("(resolving pattern) binding `{}`", renamed); - - let def_id = self.ast_map.local_def_id(pattern.id); - let def = DefLocal(def_id, pattern.id); - - // Record the definition so that later passes - // will be able to distinguish variants from - // locals in patterns. - - self.record_def(pattern.id, - PathResolution { - base_def: def, - last_private: LastMod(AllPublic), - depth: 0, - }); - - // Add the binding to the local ribs, if it - // doesn't already exist in the bindings list. (We - // must not add it if it's in the bindings list - // because that breaks the assumptions later - // passes make about or-patterns.) - if !bindings_list.contains_key(&renamed) { - let this = &mut *self; - let last_rib = this.value_ribs.last_mut().unwrap(); - last_rib.bindings.insert(renamed, DlDef(def)); - bindings_list.insert(renamed, pat_id); - } else if mode == ArgumentIrrefutableMode && - bindings_list.contains_key(&renamed) { - // Forbid duplicate bindings in the same - // parameter list. - resolve_error( - self, - pattern.span, - ResolutionError::IdentifierBoundMoreThanOnceInParameterList( - &ident.name.as_str()) - ); - } else if bindings_list.get(&renamed) == Some(&pat_id) { - // Then this is a duplicate variable in the - // same disjunction, which is an error. + pat: &Pat, + pat_src: PatternSource, + // Maps idents to the node ID for the + // outermost pattern that binds them. + bindings: &mut FxHashMap) { + // Visit all direct subpatterns of this pattern. + let outer_pat_id = pat.id; + pat.walk(&mut |pat| { + match pat.node { + PatKind::Ident(bmode, ref ident, ref opt_pat) => { + // First try to resolve the identifier as some existing + // entity, then fall back to a fresh binding. + let binding = self.resolve_ident_in_lexical_scope(ident.node, ValueNS, None) + .and_then(LexicalScopeBinding::item); + let resolution = binding.map(NameBinding::def).and_then(|def| { + let always_binding = !pat_src.is_refutable() || opt_pat.is_some() || + bmode != BindingMode::ByValue(Mutability::Immutable); + match def { + Def::StructCtor(_, CtorKind::Const) | + Def::VariantCtor(_, CtorKind::Const) | + Def::Const(..) if !always_binding => { + // A unit struct/variant or constant pattern. + let name = ident.node.name; + self.record_use(name, ValueNS, binding.unwrap(), ident.span); + Some(PathResolution::new(def)) + } + Def::StructCtor(..) | Def::VariantCtor(..) | + Def::Const(..) | Def::Static(..) => { + // A fresh binding that shadows something unacceptable. resolve_error( self, - pattern.span, - ResolutionError::IdentifierBoundMoreThanOnceInSamePattern( - &ident.name.as_str()) + ident.span, + ResolutionError::BindingShadowsSomethingUnacceptable( + pat_src.descr(), ident.node.name, binding.unwrap()) ); + None } - // Else, not bound in the same pattern: do - // nothing. - } - } - } - - PatEnum(ref path, _) => { - // This must be an enum variant, struct or const. - let resolution = match self.resolve_possibly_assoc_item(pat_id, - None, - path, - ValueNS, - false) { - // The below shouldn't happen because all - // qualified paths should be in PatQPath. - TypecheckRequired => - self.session.span_bug(path.span, - "resolve_possibly_assoc_item claimed - \ - that a path in PatEnum requires typecheck - \ - to resolve, but qualified paths should be - \ - PatQPath"), - ResolveAttempt(resolution) => resolution, - }; - if let Some(path_res) = resolution { - match path_res.base_def { - DefVariant(..) | DefStruct(..) | DefConst(..) => { - self.record_def(pattern.id, path_res); - } - DefStatic(..) => { - resolve_error(&self, - path.span, - ResolutionError::StaticVariableReference); - self.record_def(pattern.id, err_path_resolution()); + Def::Local(..) | Def::Upvar(..) | Def::Fn(..) | Def::Err => { + // These entities are explicitly allowed + // to be shadowed by fresh bindings. + None } - _ => { - // If anything ends up here entirely resolved, - // it's an error. If anything ends up here - // partially resolved, that's OK, because it may - // be a `T::CONST` that typeck will resolve. - if path_res.depth == 0 { - resolve_error( - self, - path.span, - ResolutionError::NotAnEnumVariantStructOrConst( - &path.segments - .last() - .unwrap() - .identifier - .name - .as_str()) - ); - self.record_def(pattern.id, err_path_resolution()); - } else { - let const_name = path.segments - .last() - .unwrap() - .identifier - .name; - let traits = self.get_traits_containing_item(const_name); - self.trait_map.insert(pattern.id, traits); - self.record_def(pattern.id, path_res); - } + def => { + span_bug!(ident.span, "unexpected definition for an \ + identifier in pattern: {:?}", def); } } - } else { - resolve_error( - self, - path.span, - ResolutionError::UnresolvedEnumVariantStructOrConst( - &path.segments.last().unwrap().identifier.name.as_str()) - ); - self.record_def(pattern.id, err_path_resolution()); - } - intravisit::walk_path(self, path); + }).unwrap_or_else(|| { + self.fresh_binding(ident, pat.id, outer_pat_id, pat_src, bindings) + }); + + self.record_def(pat.id, resolution); } - PatQPath(ref qself, ref path) => { - // Associated constants only. - let resolution = match self.resolve_possibly_assoc_item(pat_id, - Some(qself), - path, - ValueNS, - false) { - TypecheckRequired => { - // All `::CONST` should end up here, and will - // require use of the trait map to resolve - // during typechecking. - let const_name = path.segments - .last() - .unwrap() - .identifier - .name; - let traits = self.get_traits_containing_item(const_name); - self.trait_map.insert(pattern.id, traits); - intravisit::walk_pat(self, pattern); - return true; - } - ResolveAttempt(resolution) => resolution, - }; - if let Some(path_res) = resolution { - match path_res.base_def { - // All `::CONST` should end up here, and - // have the trait already selected. - DefAssociatedConst(..) => { - self.record_def(pattern.id, path_res); - } - _ => { - resolve_error( - self, - path.span, - ResolutionError::NotAnAssociatedConst( - &path.segments.last().unwrap().identifier.name.as_str() - ) - ); - self.record_def(pattern.id, err_path_resolution()); - } + PatKind::TupleStruct(ref path, ..) => { + self.resolve_pattern_path(pat.id, None, path, ValueNS, |def| { + match def { + Def::StructCtor(_, CtorKind::Fn) | + Def::VariantCtor(_, CtorKind::Fn) => true, + _ => false, } - } else { - resolve_error(self, - path.span, - ResolutionError::UnresolvedAssociatedConst(&path.segments - .last() - .unwrap() - .identifier - .name - .as_str())); - self.record_def(pattern.id, err_path_resolution()); - } - intravisit::walk_pat(self, pattern); + }, "tuple struct/variant"); } - PatStruct(ref path, _, _) => { - match self.resolve_path(pat_id, path, 0, TypeNS, false) { - Some(definition) => { - self.record_def(pattern.id, definition); + PatKind::Path(ref qself, ref path) => { + self.resolve_pattern_path(pat.id, qself.as_ref(), path, ValueNS, |def| { + match def { + Def::StructCtor(_, CtorKind::Const) | + Def::VariantCtor(_, CtorKind::Const) | + Def::Const(..) | Def::AssociatedConst(..) => true, + _ => false, } - result => { - debug!("(resolving pattern) didn't find struct def: {:?}", result); - resolve_error( - self, - path.span, - ResolutionError::DoesNotNameAStruct( - &*path_names_to_string(path, 0)) - ); - self.record_def(pattern.id, err_path_resolution()); - } - } - intravisit::walk_path(self, path); + }, "unit struct/variant or constant"); } - PatLit(_) | PatRange(..) => { - intravisit::walk_pat(self, pattern); + PatKind::Struct(ref path, ..) => { + self.resolve_struct_path(pat.id, path); } - _ => { - // Nothing to do. - } + _ => {} } true }); - } - fn resolve_bare_identifier_pattern(&mut self, - name: Name, - span: Span) - -> BareIdentifierPatternResolution { - let module = self.current_module; - match self.resolve_item_in_lexical_scope(module, name, ValueNS, true) { - Success((target, _)) => { - debug!("(resolve bare identifier pattern) succeeded in finding {} at {:?}", - name, - target.binding.borrow()); - match target.binding.def() { - None => { - panic!("resolved name in the value namespace to a set of name bindings \ - with no def?!"); - } - // For the two success cases, this lookup can be - // considered as not having a private component because - // the lookup happened only within the current module. - Some(def @ DefVariant(..)) | Some(def @ DefStruct(..)) => { - return FoundStructOrEnumVariant(def, LastMod(AllPublic)); - } - Some(def @ DefConst(..)) | Some(def @ DefAssociatedConst(..)) => { - return FoundConst(def, LastMod(AllPublic), name); - } - Some(DefStatic(..)) => { - resolve_error(self, span, ResolutionError::StaticVariableReference); - return BareIdentifierPatternUnresolved; - } - _ => return BareIdentifierPatternUnresolved - } - } - - Indeterminate => { - panic!("unexpected indeterminate result"); - } - Failed(err) => { - match err { - Some((span, msg)) => { - resolve_error(self, span, ResolutionError::FailedToResolve(&*msg)); - } - None => (), - } - - debug!("(resolve bare identifier pattern) failed to find {}", name); - return BareIdentifierPatternUnresolved; - } - } + visit::walk_pat(self, pat); } /// Handles paths that may refer to associated items fn resolve_possibly_assoc_item(&mut self, id: NodeId, - maybe_qself: Option<&hir::QSelf>, + maybe_qself: Option<&QSelf>, path: &Path, - namespace: Namespace, - check_ribs: bool) - -> AssocItemResolveResult { - let max_assoc_types; - - match maybe_qself { - Some(qself) => { - if qself.position == 0 { - return TypecheckRequired; - } - max_assoc_types = path.segments.len() - qself.position; - // Make sure the trait is valid. - let _ = self.resolve_trait_reference(id, path, max_assoc_types); - } - None => { - max_assoc_types = path.segments.len(); + ns: Namespace) + -> Option { + let ast::Path { ref segments, global, span } = *path; + let path: Vec<_> = segments.iter().map(|seg| seg.identifier).collect(); + let scope = if global { PathScope::Global } else { PathScope::Lexical }; + + if let Some(qself) = maybe_qself { + if qself.position == 0 { + // FIXME: Create some fake resolution that can't possibly be a type. + return Some(PathResolution { + base_def: Def::Mod(self.definitions.local_def_id(ast::CRATE_NODE_ID)), + depth: path.len(), + }); } + // Make sure the trait is valid. + self.resolve_trait_reference(&path[..qself.position], global, None, span); } - let mut resolution = self.with_no_errors(|this| { - this.resolve_path(id, path, 0, namespace, check_ribs) - }); - for depth in 1..max_assoc_types { - if resolution.is_some() { - break; + let result = match self.resolve_path(&path, scope, Some(ns), Some(span)) { + PathResult::NonModule(path_res) => match path_res.base_def { + Def::Trait(..) if maybe_qself.is_some() => return None, + _ => path_res, + }, + PathResult::Module(module) if !module.is_normal() => { + PathResolution::new(module.def().unwrap()) + } + // In `a(::assoc_item)*` `a` cannot be a module. If `a` does resolve to a module we + // don't report an error right away, but try to fallback to a primitive type. + // So, we are still able to successfully resolve something like + // + // use std::u8; // bring module u8 in scope + // fn f() -> u8 { // OK, resolves to primitive u8, not to std::u8 + // u8::max_value() // OK, resolves to associated function ::max_value, + // // not to non-existent std::u8::max_value + // } + // + // Such behavior is required for backward compatibility. + // The same fallback is used when `a` resolves to nothing. + _ if self.primitive_type_table.primitive_types.contains_key(&path[0].name) => { + PathResolution { + base_def: Def::PrimTy(self.primitive_type_table.primitive_types[&path[0].name]), + depth: segments.len() - 1, + } } - self.with_no_errors(|this| { - resolution = this.resolve_path(id, path, depth, TypeNS, true); - }); + PathResult::Module(module) => PathResolution::new(module.def().unwrap()), + PathResult::Failed(msg, false) => { + resolve_error(self, span, ResolutionError::FailedToResolve(&msg)); + err_path_resolution() + } + _ => return None, + }; + + if path.len() == 1 || result.base_def == Def::Err { + return Some(result); } - if let Some(DefMod(_)) = resolution.map(|r| r.base_def) { - // A module is not a valid type or value. - resolution = None; - } - ResolveAttempt(resolution) - } - - /// If `check_ribs` is true, checks the local definitions first; i.e. - /// doesn't skip straight to the containing module. - /// Skips `path_depth` trailing segments, which is also reflected in the - /// returned value. See `middle::def::PathResolution` for more info. - pub fn resolve_path(&mut self, - id: NodeId, - path: &Path, - path_depth: usize, - namespace: Namespace, - check_ribs: bool) - -> Option { - let span = path.span; - let segments = &path.segments[..path.segments.len() - path_depth]; - - let mk_res = |(def, lp)| PathResolution::new(def, lp, path_depth); - - if path.global { - let def = self.resolve_crate_relative_path(span, segments, namespace); - return def.map(mk_res); - } - - // Try to find a path to an item in a module. - let last_ident = segments.last().unwrap().identifier; - if segments.len() <= 1 { - let unqualified_def = self.resolve_identifier(last_ident, namespace, check_ribs, true); - return unqualified_def.and_then(|def| self.adjust_local_def(def, span)) - .map(|def| { - PathResolution::new(def, LastMod(AllPublic), path_depth) - }); - } - - let unqualified_def = self.resolve_identifier(last_ident, namespace, check_ribs, false); - let def = self.resolve_module_relative_path(span, segments, namespace); - match (def, unqualified_def) { - (Some((ref d, _)), Some(ref ud)) if *d == ud.def => { - self.session - .add_lint(lint::builtin::UNUSED_QUALIFICATIONS, - id, - span, - "unnecessary qualification".to_string()); + + let unqualified_result = { + match self.resolve_path(&[*path.last().unwrap()], PathScope::Lexical, Some(ns), None) { + PathResult::NonModule(path_res) => path_res.base_def, + PathResult::Module(module) => module.def().unwrap(), + _ => return Some(result), } - _ => {} + }; + if result.base_def == unqualified_result && path[0].name != "$crate" { + let lint = lint::builtin::UNUSED_QUALIFICATIONS; + self.session.add_lint(lint, id, span, "unnecessary qualification".to_string()); } - def.map(mk_res) + Some(result) } - // Resolve a single identifier - fn resolve_identifier(&mut self, - identifier: hir::Ident, - namespace: Namespace, - check_ribs: bool, - record_used: bool) - -> Option { - // First, check to see whether the name is a primitive type. - if namespace == TypeNS { - if let Some(&prim_ty) = self.primitive_type_table - .primitive_types - .get(&identifier.unhygienic_name) { - return Some(LocalDef::from_def(DefPrimTy(prim_ty))); + fn resolve_path(&mut self, + path: &[Ident], + scope: PathScope, + opt_ns: Option, // `None` indicates a module path + record_used: Option) + -> PathResult<'a> { + let (mut module, allow_self) = match scope { + PathScope::Lexical => (None, true), + PathScope::Import => (Some(self.graph_root), true), + PathScope::Global => (Some(self.graph_root), false), + }; + let mut allow_super = allow_self; + + for (i, &ident) in path.iter().enumerate() { + let is_last = i == path.len() - 1; + let ns = if is_last { opt_ns.unwrap_or(TypeNS) } else { TypeNS }; + + if i == 0 && allow_self && ns == TypeNS && ident.name == keywords::SelfValue.name() { + module = Some(self.module_map[&self.current_module.normal_ancestor_id.unwrap()]); + continue + } else if i == 0 && allow_self && ns == TypeNS && ident.name == "$crate" { + module = Some(self.resolve_crate_var(ident.ctxt)); + continue + } else if allow_super && ns == TypeNS && ident.name == keywords::Super.name() { + let current_module = if i == 0 { self.current_module } else { module.unwrap() }; + let self_module = self.module_map[¤t_module.normal_ancestor_id.unwrap()]; + if let Some(parent) = self_module.parent { + module = Some(self.module_map[&parent.normal_ancestor_id.unwrap()]); + continue + } else { + let msg = "There are too many initial `super`s.".to_string(); + return PathResult::Failed(msg, false); + } } - } + allow_super = false; - if check_ribs { - if let Some(def) = self.resolve_identifier_in_local_ribs(identifier, namespace) { - return Some(def); + let binding = if let Some(module) = module { + self.resolve_name_in_module(module, ident.name, ns, false, record_used) + } else { + match self.resolve_ident_in_lexical_scope(ident, ns, record_used) { + Some(LexicalScopeBinding::Item(binding)) => Ok(binding), + Some(LexicalScopeBinding::Def(def)) if opt_ns.is_some() => { + return PathResult::NonModule(PathResolution { + base_def: def, + depth: path.len() - 1, + }); + } + _ => Err(if record_used.is_some() { Determined } else { Undetermined }), + } + }; + + match binding { + Ok(binding) => { + if let Some(next_module) = binding.module() { + module = Some(next_module); + } else if binding.def() == Def::Err { + return PathResult::NonModule(err_path_resolution()); + } else if opt_ns.is_some() { + return PathResult::NonModule(PathResolution { + base_def: binding.def(), + depth: path.len() - i - 1, + }); + } else { + return PathResult::Failed(format!("Not a module `{}`", ident), is_last); + } + } + Err(Undetermined) => return PathResult::Indeterminate, + Err(Determined) => { + if let Some(module) = module { + if opt_ns.is_some() && !module.is_normal() { + return PathResult::NonModule(PathResolution { + base_def: module.def().unwrap(), + depth: path.len() - i, + }); + } + } + let msg = if module.and_then(ModuleS::def) == self.graph_root.def() { + let is_mod = |def| match def { Def::Mod(..) => true, _ => false }; + let mut candidates = + self.lookup_candidates(ident.name, TypeNS, is_mod).candidates; + candidates.sort_by_key(|path| (path.segments.len(), path.to_string())); + if let Some(candidate) = candidates.get(0) { + format!("Did you mean `{}`?", candidate) + } else { + format!("Maybe a missing `extern crate {};`?", ident) + } + } else if i == 0 { + format!("Use of undeclared type or module `{}`", ident) + } else { + format!("Could not find `{}` in `{}`", ident, path[i - 1]) + }; + return PathResult::Failed(msg, is_last); + } } } - let name = identifier.unhygienic_name; - self.resolve_item_by_name_in_lexical_scope(name, namespace, record_used) - .map(LocalDef::from_def) + PathResult::Module(module.unwrap()) } // Resolve a local definition, potentially adjusting for closures. - fn adjust_local_def(&mut self, local_def: LocalDef, span: Span) -> Option { + fn adjust_local_def(&mut self, local_def: LocalDef, span: Span) -> Def { let ribs = match local_def.ribs { - Some((TypeNS, i)) => &self.type_ribs[i + 1..], - Some((ValueNS, i)) => &self.value_ribs[i + 1..], - _ => &[] as &[_], + Some((ns, i)) => &self.ribs[ns][i + 1..], + None => &[] as &[_], }; let mut def = local_def.def; match def { - DefUpvar(..) => { - self.session.span_bug(span, &format!("unexpected {:?} in bindings", def)) + Def::Upvar(..) => { + span_bug!(span, "unexpected {:?} in bindings", def) } - DefLocal(_, node_id) => { + Def::Local(def_id) => { for rib in ribs { match rib.kind { - NormalRibKind => { + NormalRibKind | ModuleRibKind(..) | MacroDefinition(..) => { // Nothing to do. Continue. } ClosureRibKind(function_id) => { let prev_def = def; - let node_def_id = self.ast_map.local_def_id(node_id); + let node_id = self.definitions.as_local_node_id(def_id).unwrap(); let seen = self.freevars_seen .entry(function_id) .or_insert_with(|| NodeMap()); if let Some(&index) = seen.get(&node_id) { - def = DefUpvar(node_def_id, node_id, index, function_id); + def = Def::Upvar(def_id, index, function_id); continue; } let vec = self.freevars @@ -3091,32 +2448,33 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { span: span, }); - def = DefUpvar(node_def_id, node_id, depth, function_id); + def = Def::Upvar(def_id, depth, function_id); seen.insert(node_id, depth); } - ItemRibKind | MethodRibKind => { + ItemRibKind | MethodRibKind(_) => { // This was an attempt to access an upvar inside a // named function item. This is not allowed, so we // report an error. resolve_error(self, span, ResolutionError::CannotCaptureDynamicEnvironmentInFnItem); - return None; + return Def::Err; } ConstantItemRibKind => { // Still doesn't deal with upvars resolve_error(self, span, ResolutionError::AttemptToUseNonConstantValueInConstant); - return None; + return Def::Err; } } } } - DefTyParam(..) | DefSelfTy(..) => { + Def::TyParam(..) | Def::SelfTy(..) => { for rib in ribs { match rib.kind { - NormalRibKind | MethodRibKind | ClosureRibKind(..) => { + NormalRibKind | MethodRibKind(_) | ClosureRibKind(..) | + ModuleRibKind(..) | MacroDefinition(..) => { // Nothing to do. Continue. } ItemRibKind => { @@ -3126,239 +2484,51 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { resolve_error(self, span, ResolutionError::TypeParametersFromOuterFunction); - return None; + return Def::Err; } ConstantItemRibKind => { // see #9186 resolve_error(self, span, ResolutionError::OuterTypeParameterContext); - return None; + return Def::Err; } } } } _ => {} } - return Some(def); - } - - // resolve a "module-relative" path, e.g. a::b::c - fn resolve_module_relative_path(&mut self, - span: Span, - segments: &[hir::PathSegment], - namespace: Namespace) - -> Option<(Def, LastPrivate)> { - let module_path = segments.split_last() - .unwrap() - .1 - .iter() - .map(|ps| ps.identifier.name) - .collect::>(); - - let containing_module; - let last_private; - let current_module = self.current_module; - match self.resolve_module_path(current_module, - &module_path[..], - UseLexicalScope, - span, - PathSearch) { - Failed(err) => { - let (span, msg) = match err { - Some((span, msg)) => (span, msg), - None => { - let msg = format!("Use of undeclared type or module `{}`", - names_to_string(&module_path)); - (span, msg) - } - }; - - resolve_error(self, span, ResolutionError::FailedToResolve(&*msg)); - return None; - } - Indeterminate => panic!("indeterminate unexpected"), - Success((resulting_module, resulting_last_private)) => { - containing_module = resulting_module; - last_private = resulting_last_private; - } - } - - let name = segments.last().unwrap().identifier.name; - let def = match self.resolve_name_in_module(containing_module, - name, - namespace, - NameSearchType::PathSearch, - false) { - Success((Target { binding, .. }, _)) => { - let (def, lp) = binding.def_and_lp(); - (def, last_private.or(lp)) - } - _ => return None, - }; - if let Some(DefId{krate: kid, ..}) = containing_module.def_id() { - self.used_crates.insert(kid); - } - return Some(def); - } - - /// Invariant: This must be called only during main resolution, not during - /// import resolution. - fn resolve_crate_relative_path(&mut self, - span: Span, - segments: &[hir::PathSegment], - namespace: Namespace) - -> Option<(Def, LastPrivate)> { - let module_path = segments.split_last() - .unwrap() - .1 - .iter() - .map(|ps| ps.identifier.name) - .collect::>(); - - let root_module = self.graph_root; - - let containing_module; - let last_private; - match self.resolve_module_path_from_root(root_module, - &module_path[..], - 0, - span, - PathSearch, - LastMod(AllPublic)) { - Failed(err) => { - let (span, msg) = match err { - Some((span, msg)) => (span, msg), - None => { - let msg = format!("Use of undeclared module `::{}`", - names_to_string(&module_path[..])); - (span, msg) - } - }; - - resolve_error(self, span, ResolutionError::FailedToResolve(&*msg)); - return None; - } - - Indeterminate => { - panic!("indeterminate unexpected"); - } - - Success((resulting_module, resulting_last_private)) => { - containing_module = resulting_module; - last_private = resulting_last_private; - } - } - - let name = segments.last().unwrap().identifier.name; - match self.resolve_name_in_module(containing_module, - name, - namespace, - NameSearchType::PathSearch, - false) { - Success((Target { binding, .. }, _)) => { - let (def, lp) = binding.def_and_lp(); - Some((def, last_private.or(lp))) - } - _ => None, - } - } - - fn resolve_identifier_in_local_ribs(&mut self, - ident: hir::Ident, - namespace: Namespace) - -> Option { - // Check the local set of ribs. - let (name, ribs) = match namespace { - ValueNS => (ident.name, &self.value_ribs), - TypeNS => (ident.unhygienic_name, &self.type_ribs), - }; - - for (i, rib) in ribs.iter().enumerate().rev() { - if let Some(def_like) = rib.bindings.get(&name).cloned() { - match def_like { - DlDef(def) => { - debug!("(resolving path in local ribs) resolved `{}` to {:?} at {}", - name, - def, - i); - return Some(LocalDef { - ribs: Some((namespace, i)), - def: def, - }); - } - def_like => { - debug!("(resolving path in local ribs) resolved `{}` to pseudo-def {:?}", - name, - def_like); - return None; - } - } - } - } - - None + return def; } - fn resolve_item_by_name_in_lexical_scope(&mut self, - name: Name, - namespace: Namespace, - record_used: bool) - -> Option { - // Check the items. - let module = self.current_module; - match self.resolve_item_in_lexical_scope(module, name, namespace, record_used) { - Success((target, _)) => { - match target.binding.def() { - None => { - // This can happen if we were looking for a type and - // found a module instead. Modules don't have defs. - debug!("(resolving item path by identifier in lexical scope) failed to \ - resolve {} after success...", - name); - None - } - Some(def) => { - debug!("(resolving item path in lexical scope) resolved `{}` to item", - name); - // This lookup is "all public" because it only searched - // for one identifier in the current module (couldn't - // have passed through reexports or anything like that. - Some(def) - } - } - } - Indeterminate => { - panic!("unexpected indeterminate result"); - } - Failed(err) => { - debug!("(resolving item path by identifier in lexical scope) failed to resolve {}", - name); - - if let Some((span, msg)) = err { - resolve_error(self, span, ResolutionError::FailedToResolve(&*msg)) - } - - None - } - } + // Calls `f` with a `Resolver` whose current lexical scope is `module`'s lexical scope, + // i.e. the module's items and the prelude (unless the module is `#[no_implicit_prelude]`). + // FIXME #34673: This needs testing. + pub fn with_module_lexical_scope(&mut self, module: Module<'a>, f: F) -> T + where F: FnOnce(&mut Resolver<'a>) -> T, + { + self.with_empty_ribs(|this| { + this.ribs[ValueNS].push(Rib::new(ModuleRibKind(module))); + this.ribs[TypeNS].push(Rib::new(ModuleRibKind(module))); + f(this) + }) } - fn with_no_errors(&mut self, f: F) -> T - where F: FnOnce(&mut Resolver) -> T + fn with_empty_ribs(&mut self, f: F) -> T + where F: FnOnce(&mut Resolver<'a>) -> T, { - self.emit_errors = false; - let rs = f(self); - self.emit_errors = true; - rs + let ribs = replace(&mut self.ribs, PerNS::>::default()); + let label_ribs = replace(&mut self.label_ribs, Vec::new()); + + let result = f(self); + self.ribs = ribs; + self.label_ribs = label_ribs; + result } fn find_fallback_in_self_type(&mut self, name: Name) -> FallbackSuggestion { - fn extract_path_and_node_id(t: &Ty, - allow: FallbackChecks) - -> Option<(Path, NodeId, FallbackChecks)> { + fn extract_node_id(t: &Ty) -> Option { match t.node { - TyPath(None, ref path) => Some((path.clone(), t.id, allow)), - TyPtr(ref mut_ty) => extract_path_and_node_id(&*mut_ty.ty, OnlyTraitAndStatics), - TyRptr(_, ref mut_ty) => extract_path_and_node_id(&*mut_ty.ty, allow), + TyKind::Path(None, _) => Some(t.id), + TyKind::Rptr(_, ref mut_ty) => extract_node_id(&mut_ty.ty), // This doesn't handle the remaining `Ty` variants as they are not // that commonly the self_type, it might be interesting to provide // support for those in future. @@ -3366,101 +2536,26 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { } } - fn get_module<'a, 'tcx>(this: &mut Resolver<'a, 'tcx>, - span: Span, - name_path: &[ast::Name]) - -> Option> { - let root = this.current_module; - let last_name = name_path.last().unwrap(); - - if name_path.len() == 1 { - match this.primitive_type_table.primitive_types.get(last_name) { - Some(_) => None, - None => { - match this.current_module.children.borrow().get(last_name) { - Some(child) => child.type_ns.module(), - None => None, - } - } - } - } else { - match this.resolve_module_path(root, - &name_path[..], - UseLexicalScope, - span, - PathSearch) { - Success((module, _)) => Some(module), - _ => None, - } - } - } - - fn is_static_method(this: &Resolver, did: DefId) -> bool { - if let Some(node_id) = this.ast_map.as_local_node_id(did) { - let sig = match this.ast_map.get(node_id) { - hir_map::NodeTraitItem(trait_item) => match trait_item.node { - hir::MethodTraitItem(ref sig, _) => sig, - _ => return false, - }, - hir_map::NodeImplItem(impl_item) => match impl_item.node { - hir::ImplItemKind::Method(ref sig, _) => sig, - _ => return false, - }, - _ => return false, - }; - sig.explicit_self.node == hir::SelfStatic - } else { - this.session.cstore.is_static_method(did) - } - } - - let (path, node_id, allowed) = match self.current_self_type { - Some(ref ty) => match extract_path_and_node_id(ty, Everything) { - Some(x) => x, - None => return NoSuggestion, - }, - None => return NoSuggestion, - }; - - if allowed == Everything { + if let Some(node_id) = self.current_self_type.as_ref().and_then(extract_node_id) { // Look for a field with the same name in the current self_type. - match self.def_map.borrow().get(&node_id).map(|d| d.full_def()) { - Some(DefTy(did, _)) | - Some(DefStruct(did)) | - Some(DefVariant(_, did, _)) => match self.structs.get(&did) { - None => {} - Some(fields) => { - if fields.iter().any(|&field_name| name == field_name) { - return Field; + if let Some(resolution) = self.def_map.get(&node_id) { + match resolution.base_def { + Def::Struct(did) | Def::Union(did) if resolution.depth == 0 => { + if let Some(field_names) = self.field_names.get(&did) { + if field_names.iter().any(|&field_name| name == field_name) { + return Field; + } } } - }, - _ => {} // Self type didn't resolve properly - } - } - - let name_path = path.segments.iter().map(|seg| seg.identifier.name).collect::>(); - - // Look for a method in the current self type's impl module. - if let Some(module) = get_module(self, path.span, &name_path) { - if let Some(binding) = module.children.borrow().get(&name) { - if let Some(DefMethod(did)) = binding.value_ns.def() { - if is_static_method(self, did) { - return StaticMethod(path_names_to_string(&path, 0)); - } - if self.current_trait_ref.is_some() { - return TraitItem; - } else if allowed == Everything { - return Method; - } + _ => {} } } } // Look for a method in the current trait. if let Some((trait_did, ref trait_ref)) = self.current_trait_ref { - if let Some(&did) = self.trait_item_map.get(&(name, trait_did)) { - if is_static_method(self, did) { + if let Some(&is_static_method) = self.trait_item_map.get(&(name, trait_did)) { + if is_static_method { return TraitMethod(path_names_to_string(&trait_ref.path, 0)); } else { return TraitItem; @@ -3472,24 +2567,35 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { } fn find_best_match(&mut self, name: &str) -> SuggestionType { - if let Some(macro_name) = self.session.available_macros - .borrow().iter().find(|n| n.as_str() == name) { + if let Some(macro_name) = self.macro_names.iter().find(|&n| n == &name) { return SuggestionType::Macro(format!("{}!", macro_name)); } - let names = self.value_ribs + let names = self.ribs[ValueNS] .iter() .rev() - .flat_map(|rib| rib.bindings.keys()); + .flat_map(|rib| rib.bindings.keys().map(|ident| &ident.name)); if let Some(found) = find_best_match_for_name(names, name, None) { - if name != &*found { + if found != name { return SuggestionType::Function(found); } } SuggestionType::NotFound } - fn resolve_expr(&mut self, expr: &Expr) { + fn resolve_labeled_block(&mut self, label: Option, id: NodeId, block: &Block) { + if let Some(label) = label { + let def = Def::Label(id); + self.with_label_rib(|this| { + this.label_ribs.last_mut().unwrap().bindings.insert(label.node, def); + this.visit_block(block); + }); + } else { + self.visit_block(block); + } + } + + fn resolve_expr(&mut self, expr: &Expr, parent: Option<&Expr>) { // First, record candidate traits for this expression if it could // result in the invocation of a method call. @@ -3497,41 +2603,26 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { // Next, resolve the node. match expr.node { - ExprPath(ref maybe_qself, ref path) => { - let resolution = match self.resolve_possibly_assoc_item(expr.id, - maybe_qself.as_ref(), - path, - ValueNS, - true) { - // `::a::b::c` is resolved by typeck alone. - TypecheckRequired => { - let method_name = path.segments.last().unwrap().identifier.name; - let traits = self.get_traits_containing_item(method_name); - self.trait_map.insert(expr.id, traits); - intravisit::walk_expr(self, expr); - return; - } - ResolveAttempt(resolution) => resolution, - }; - + ExprKind::Path(ref maybe_qself, ref path) => { // This is a local path in the value namespace. Walk through // scopes looking for it. - if let Some(path_res) = resolution { + if let Some(path_res) = self.resolve_possibly_assoc_item(expr.id, + maybe_qself.as_ref(), path, ValueNS) { // Check if struct variant - if let DefVariant(_, _, true) = path_res.base_def { + let is_struct_variant = match path_res.base_def { + Def::VariantCtor(_, CtorKind::Fictive) => true, + _ => false, + }; + if is_struct_variant { let path_name = path_names_to_string(path, 0); let mut err = resolve_struct_error(self, expr.span, - ResolutionError::StructVariantUsedAsFunction(&*path_name)); + ResolutionError::StructVariantUsedAsFunction(&path_name)); let msg = format!("did you mean to write: `{} {{ /* fields */ }}`?", path_name); - if self.emit_errors { - err.fileline_help(expr.span, &msg); - } else { - err.span_help(expr.span, &msg); - } + err.help(&msg); err.emit(); self.record_def(expr.id, err_path_resolution()); } else { @@ -3551,182 +2642,201 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { } } else { // Be helpful if the name refers to a struct - // (The pattern matching def_tys where the id is in self.structs - // matches on regular structs while excluding tuple- and enum-like - // structs, which wouldn't result in this error.) let path_name = path_names_to_string(path, 0); - let type_res = self.with_no_errors(|this| { - this.resolve_path(expr.id, path, 0, TypeNS, false) - }); + let ast::Path { ref segments, global, .. } = *path; + let path: Vec<_> = segments.iter().map(|seg| seg.identifier).collect(); + let scope = if global { PathScope::Global } else { PathScope::Lexical }; + let type_res = match self.resolve_path(&path, scope, Some(TypeNS), None) { + PathResult::NonModule(type_res) => Some(type_res), + _ => None, + }; self.record_def(expr.id, err_path_resolution()); - match type_res.map(|r| r.base_def) { - Some(DefTy(struct_id, _)) if self.structs.contains_key(&struct_id) => { - let mut err = resolve_struct_error(self, - expr.span, - ResolutionError::StructVariantUsedAsFunction(&*path_name)); - - let msg = format!("did you mean to write: `{} {{ /* fields */ }}`?", - path_name); - if self.emit_errors { - err.fileline_help(expr.span, &msg); - } else { - err.span_help(expr.span, &msg); - } - err.emit(); - } - _ => { - // Keep reporting some errors even if they're ignored above. - self.resolve_path(expr.id, path, 0, ValueNS, true); - - let mut method_scope = false; - self.value_ribs.iter().rev().all(|rib| { - method_scope = match rib.kind { - MethodRibKind => true, - ItemRibKind | ConstantItemRibKind => false, - _ => return true, // Keep advancing - }; - false // Stop advancing - }); - if method_scope && special_names::self_.as_str() == &path_name[..] { - resolve_error(self, - expr.span, - ResolutionError::SelfNotAvailableInStaticMethod); - } else { - let last_name = path.segments.last().unwrap().identifier.name; - let mut msg = match self.find_fallback_in_self_type(last_name) { - NoSuggestion => { - // limit search to 5 to reduce the number - // of stupid suggestions - match self.find_best_match(&path_name) { - SuggestionType::Macro(s) => { - format!("the macro `{}`", s) - } - SuggestionType::Function(s) => format!("`{}`", s), - SuggestionType::NotFound => "".to_string(), + if let Some(Def::Struct(..)) = type_res.map(|r| r.base_def) { + let error_variant = + ResolutionError::StructVariantUsedAsFunction(&path_name); + let mut err = resolve_struct_error(self, expr.span, error_variant); + + let msg = format!("did you mean to write: `{} {{ /* fields */ }}`?", + path_name); + + err.help(&msg); + err.emit(); + } else { + // Keep reporting some errors even if they're ignored above. + let mut method_scope = false; + let mut is_static = false; + self.ribs[ValueNS].iter().rev().all(|rib| { + method_scope = match rib.kind { + MethodRibKind(is_static_) => { + is_static = is_static_; + true + } + ItemRibKind | ConstantItemRibKind => false, + _ => return true, // Keep advancing + }; + false // Stop advancing + }); + + if method_scope && keywords::SelfValue.name() == &*path_name { + let error = ResolutionError::SelfNotAvailableInStaticMethod; + resolve_error(self, expr.span, error); + } else { + let fallback = + self.find_fallback_in_self_type(path.last().unwrap().name); + let (mut msg, is_field) = match fallback { + NoSuggestion => { + // limit search to 5 to reduce the number + // of stupid suggestions + (match self.find_best_match(&path_name) { + SuggestionType::Macro(s) => { + format!("the macro `{}`", s) } - } - Field => format!("`self.{}`", path_name), - Method | - TraitItem => format!("to call `self.{}`", path_name), - TraitMethod(path_str) | - StaticMethod(path_str) => - format!("to call `{}::{}`", path_str, path_name), - }; - - let mut context = UnresolvedNameContext::Other; - if !msg.is_empty() { - msg = format!(". Did you mean {}?", msg); - } else { - // we check if this a module and if so, we display a help - // message - let name_path = path.segments.iter() - .map(|seg| seg.identifier.name) - .collect::>(); - let current_module = self.current_module; - - match self.resolve_module_path(current_module, - &name_path[..], - UseLexicalScope, - expr.span, - PathSearch) { - Success(_) => { - context = UnresolvedNameContext::PathIsMod(expr.id); - }, - _ => {}, - }; + SuggestionType::Function(s) => format!("`{}`", s), + SuggestionType::NotFound => "".to_string(), + }, false) + } + Field => { + (if is_static && method_scope { + "".to_string() + } else { + format!("`self.{}`", path_name) + }, true) } + TraitItem => (format!("to call `self.{}`", path_name), false), + TraitMethod(path_str) => + (format!("to call `{}::{}`", path_str, path_name), false), + }; - resolve_error(self, - expr.span, - ResolutionError::UnresolvedName( - &*path_name, &*msg, context)); + let mut context = UnresolvedNameContext::Other; + let mut def = Def::Err; + if !msg.is_empty() { + msg = format!("did you mean {}?", msg); + } else { + // we display a help message if this is a module + if let PathResult::Module(module) = + self.resolve_path(&path, scope, None, None) { + def = module.def().unwrap(); + context = UnresolvedNameContext::PathIsMod(parent); + } } - } - } - } - - intravisit::walk_expr(self, expr); - } - ExprStruct(ref path, _, _) => { - // Resolve the path to the structure it goes to. We don't - // check to ensure that the path is actually a structure; that - // is checked later during typeck. - match self.resolve_path(expr.id, path, 0, TypeNS, false) { - Some(definition) => self.record_def(expr.id, definition), - None => { - debug!("(resolving expression) didn't find struct def",); - - resolve_error(self, - path.span, - ResolutionError::DoesNotNameAStruct( - &*path_names_to_string(path, 0)) - ); - self.record_def(expr.id, err_path_resolution()); + let error = ResolutionError::UnresolvedName { + path: &path_name, + message: &msg, + context: context, + is_static_method: method_scope && is_static, + is_field: is_field, + def: def, + }; + resolve_error(self, expr.span, error); + } } } - intravisit::walk_expr(self, expr); + visit::walk_expr(self, expr); } - ExprLoop(_, Some(label)) | ExprWhile(_, _, Some(label)) => { - self.with_label_rib(|this| { - let def_like = DlDef(DefLabel(expr.id)); + ExprKind::Struct(ref path, ..) => { + self.resolve_struct_path(expr.id, path); - { - let rib = this.label_ribs.last_mut().unwrap(); - rib.bindings.insert(label.name, def_like); - } - - intravisit::walk_expr(this, expr); - }) + visit::walk_expr(self, expr); } - ExprBreak(Some(label)) | ExprAgain(Some(label)) => { - match self.search_label(label.node.name) { + ExprKind::Break(Some(label), _) | ExprKind::Continue(Some(label)) => { + match self.search_label(label.node) { None => { self.record_def(expr.id, err_path_resolution()); resolve_error(self, label.span, - ResolutionError::UndeclaredLabel(&label.node.name.as_str())) + ResolutionError::UndeclaredLabel(&label.node.name.as_str())); } - Some(DlDef(def @ DefLabel(_))) => { + Some(def @ Def::Label(_)) => { // Since this def is a label, it is never read. - self.record_def(expr.id, - PathResolution { - base_def: def, - last_private: LastMod(AllPublic), - depth: 0, - }) + self.record_def(expr.id, PathResolution::new(def)); } Some(_) => { - self.session.span_bug(expr.span, "label wasn't mapped to a label def!") + span_bug!(expr.span, "label wasn't mapped to a label def!"); } } + + // visit `break` argument if any + visit::walk_expr(self, expr); + } + + ExprKind::IfLet(ref pattern, ref subexpression, ref if_block, ref optional_else) => { + self.visit_expr(subexpression); + + self.ribs[ValueNS].push(Rib::new(NormalRibKind)); + self.resolve_pattern(pattern, PatternSource::IfLet, &mut FxHashMap()); + self.visit_block(if_block); + self.ribs[ValueNS].pop(); + + optional_else.as_ref().map(|expr| self.visit_expr(expr)); + } + + ExprKind::Loop(ref block, label) => self.resolve_labeled_block(label, expr.id, &block), + + ExprKind::While(ref subexpression, ref block, label) => { + self.visit_expr(subexpression); + self.resolve_labeled_block(label, expr.id, &block); + } + + ExprKind::WhileLet(ref pattern, ref subexpression, ref block, label) => { + self.visit_expr(subexpression); + self.ribs[ValueNS].push(Rib::new(NormalRibKind)); + self.resolve_pattern(pattern, PatternSource::WhileLet, &mut FxHashMap()); + + self.resolve_labeled_block(label, expr.id, block); + + self.ribs[ValueNS].pop(); + } + + ExprKind::ForLoop(ref pattern, ref subexpression, ref block, label) => { + self.visit_expr(subexpression); + self.ribs[ValueNS].push(Rib::new(NormalRibKind)); + self.resolve_pattern(pattern, PatternSource::For, &mut FxHashMap()); + + self.resolve_labeled_block(label, expr.id, block); + + self.ribs[ValueNS].pop(); + } + + ExprKind::Field(ref subexpression, _) => { + self.resolve_expr(subexpression, Some(expr)); + } + ExprKind::MethodCall(_, ref types, ref arguments) => { + let mut arguments = arguments.iter(); + self.resolve_expr(arguments.next().unwrap(), Some(expr)); + for argument in arguments { + self.resolve_expr(argument, None); + } + for ty in types.iter() { + self.visit_ty(ty); + } } _ => { - intravisit::walk_expr(self, expr); + visit::walk_expr(self, expr); } } } fn record_candidate_traits_for_expr_if_necessary(&mut self, expr: &Expr) { match expr.node { - ExprField(_, name) => { + ExprKind::Field(_, name) => { // FIXME(#6890): Even though you can't treat a method like a // field, we need to add any trait methods we find that match // the field name so that we can do some nice error reporting // later on in typeck. - let traits = self.get_traits_containing_item(name.node); + let traits = self.get_traits_containing_item(name.node.name); self.trait_map.insert(expr.id, traits); } - ExprMethodCall(name, _, _) => { + ExprKind::MethodCall(name, ..) => { debug!("(recording candidate traits for expr) recording traits for {}", expr.id); - let traits = self.get_traits_containing_item(name.node); + let traits = self.get_traits_containing_item(name.node.name); self.trait_map.insert(expr.id, traits); } _ => { @@ -3735,198 +2845,436 @@ impl<'a, 'tcx> Resolver<'a, 'tcx> { } } - fn get_traits_containing_item(&mut self, name: Name) -> Vec { + fn get_traits_containing_item(&mut self, name: Name) -> Vec { debug!("(getting traits containing item) looking for '{}'", name); - fn add_trait_info(found_traits: &mut Vec, trait_def_id: DefId, name: Name) { + fn add_trait_info(found_traits: &mut Vec, + trait_def_id: DefId, + import_id: Option, + name: Name) { debug!("(adding trait info) found trait {:?} for method '{}'", trait_def_id, name); - found_traits.push(trait_def_id); + found_traits.push(TraitCandidate { + def_id: trait_def_id, + import_id: import_id, + }); } let mut found_traits = Vec::new(); - let mut search_module = self.current_module; - loop { - // Look for the current trait. - match self.current_trait_ref { - Some((trait_def_id, _)) => { - if self.trait_item_map.contains_key(&(name, trait_def_id)) { - add_trait_info(&mut found_traits, trait_def_id, name); - } - } - None => {} // Nothing to do. + // Look for the current trait. + if let Some((trait_def_id, _)) = self.current_trait_ref { + if self.trait_item_map.contains_key(&(name, trait_def_id)) { + add_trait_info(&mut found_traits, trait_def_id, None, name); } + } + let mut search_module = self.current_module; + loop { // Look for trait children. - build_reduced_graph::populate_module_if_necessary(self, &search_module); + let mut search_in_module = |this: &mut Self, module: Module<'a>| { + let mut traits = module.traits.borrow_mut(); + if traits.is_none() { + let mut collected_traits = Vec::new(); + module.for_each_child(|name, ns, binding| { + if ns != TypeNS { return } + if let Def::Trait(_) = binding.def() { + collected_traits.push((name, binding)); + } + }); + *traits = Some(collected_traits.into_boxed_slice()); + } - { - for (_, child_names) in search_module.children.borrow().iter() { - let def = match child_names.type_ns.def() { - Some(def) => def, - None => continue, - }; - let trait_def_id = match def { - DefTrait(trait_def_id) => trait_def_id, - _ => continue, - }; - if self.trait_item_map.contains_key(&(name, trait_def_id)) { - add_trait_info(&mut found_traits, trait_def_id, name); + for &(trait_name, binding) in traits.as_ref().unwrap().iter() { + let trait_def_id = binding.def().def_id(); + if this.trait_item_map.contains_key(&(name, trait_def_id)) { + let mut import_id = None; + if let NameBindingKind::Import { directive, .. } = binding.kind { + let id = directive.id; + this.maybe_unused_trait_imports.insert(id); + this.add_to_glob_map(id, trait_name); + import_id = Some(id); + } + add_trait_info(&mut found_traits, trait_def_id, import_id, name); } } + }; + search_in_module(self, search_module); + + if let ModuleKind::Block(..) = search_module.kind { + search_module = search_module.parent.unwrap(); + } else { + if !search_module.no_implicit_prelude { + self.prelude.map(|prelude| search_in_module(self, prelude)); + } + break; } + } - // Look for imports. - for (_, import) in search_module.import_resolutions.borrow().iter() { - let target = match import.type_ns.target { - None => continue, - Some(ref target) => target, - }; - let did = match target.binding.def() { - Some(DefTrait(trait_def_id)) => trait_def_id, - Some(..) | None => continue, - }; - if self.trait_item_map.contains_key(&(name, did)) { - add_trait_info(&mut found_traits, did, name); - let id = import.type_ns.id; - self.used_imports.insert((id, TypeNS)); - let trait_name = self.get_trait_name(did); - self.record_import_use(id, trait_name); - if let Some(DefId{krate: kid, ..}) = target.target_module.def_id() { - self.used_crates.insert(kid); + found_traits + } + + /// When name resolution fails, this method can be used to look up candidate + /// entities with the expected name. It allows filtering them using the + /// supplied predicate (which should be used to only accept the types of + /// definitions expected e.g. traits). The lookup spans across all crates. + /// + /// NOTE: The method does not look into imports, but this is not a problem, + /// since we report the definitions (thus, the de-aliased imports). + fn lookup_candidates(&mut self, + lookup_name: Name, + namespace: Namespace, + filter_fn: FilterFn) -> SuggestedCandidates + where FilterFn: Fn(Def) -> bool { + + let mut lookup_results = Vec::new(); + let mut worklist = Vec::new(); + worklist.push((self.graph_root, Vec::new(), false)); + + while let Some((in_module, + path_segments, + in_module_is_extern)) = worklist.pop() { + self.populate_module_if_necessary(in_module); + + in_module.for_each_child(|name, ns, name_binding| { + + // avoid imports entirely + if name_binding.is_import() && !name_binding.is_extern_crate() { return; } + + // collect results based on the filter function + if name == lookup_name && ns == namespace { + if filter_fn(name_binding.def()) { + // create the path + let ident = Ident::with_empty_ctxt(name); + let params = PathParameters::none(); + let segment = PathSegment { + identifier: ident, + parameters: params, + }; + let span = name_binding.span; + let mut segms = path_segments.clone(); + segms.push(segment); + let path = Path { + span: span, + global: false, + segments: segms, + }; + // the entity is accessible in the following cases: + // 1. if it's defined in the same crate, it's always + // accessible (since private entities can be made public) + // 2. if it's defined in another crate, it's accessible + // only if both the module is public and the entity is + // declared as public (due to pruning, we don't explore + // outside crate private modules => no need to check this) + if !in_module_is_extern || name_binding.vis == ty::Visibility::Public { + lookup_results.push(path); + } } } - } - match search_module.parent_link { - NoParentLink | ModuleParentLink(..) => break, - BlockParentLink(parent_module, _) => { - search_module = parent_module; + // collect submodules to explore + if let Some(module) = name_binding.module() { + // form the path + let mut path_segments = path_segments.clone(); + path_segments.push(PathSegment { + identifier: Ident::with_empty_ctxt(name), + parameters: PathParameters::none(), + }); + + if !in_module_is_extern || name_binding.vis == ty::Visibility::Public { + // add the module to the lookup + let is_extern = in_module_is_extern || name_binding.is_extern_crate(); + if !worklist.iter().any(|&(m, ..)| m.def() == module.def()) { + worklist.push((module, path_segments, is_extern)); + } + } } - } + }) } - found_traits + SuggestedCandidates { + name: lookup_name.as_str().to_string(), + candidates: lookup_results, + } } fn record_def(&mut self, node_id: NodeId, resolution: PathResolution) { debug!("(recording def) recording {:?} for {}", resolution, node_id); - assert!(match resolution.last_private { - LastImport{..} => false, - _ => true, - }, - "Import should only be used for `use` directives"); + if let Some(prev_res) = self.def_map.insert(node_id, resolution) { + panic!("path resolved multiple times ({:?} before, {:?} now)", prev_res, resolution); + } + } + + fn resolve_visibility(&mut self, vis: &ast::Visibility) -> ty::Visibility { + let (segments, span, id) = match *vis { + ast::Visibility::Public => return ty::Visibility::Public, + ast::Visibility::Crate(_) => return ty::Visibility::Restricted(ast::CRATE_NODE_ID), + ast::Visibility::Restricted { ref path, id } => (&path.segments, path.span, id), + ast::Visibility::Inherited => { + return ty::Visibility::Restricted(self.current_module.normal_ancestor_id.unwrap()); + } + }; - if let Some(prev_res) = self.def_map.borrow_mut().insert(node_id, resolution) { - let span = self.ast_map.opt_span(node_id).unwrap_or(codemap::DUMMY_SP); - self.session.span_bug(span, - &format!("path resolved multiple times ({:?} before, {:?} now)", - prev_res, - resolution)); + let path: Vec<_> = segments.iter().map(|seg| seg.identifier).collect(); + let mut path_resolution = err_path_resolution(); + let vis = match self.resolve_path(&path, PathScope::Import, None, Some(span)) { + PathResult::Module(module) => { + path_resolution = PathResolution::new(module.def().unwrap()); + ty::Visibility::Restricted(module.normal_ancestor_id.unwrap()) + } + PathResult::Failed(msg, _) => { + self.session.span_err(span, &format!("failed to resolve module path. {}", msg)); + ty::Visibility::Public + } + _ => ty::Visibility::Public, + }; + self.def_map.insert(id, path_resolution); + if !self.is_accessible(vis) { + let msg = format!("visibilities can only be restricted to ancestor modules"); + self.session.span_err(span, &msg); } + vis + } + + fn is_accessible(&self, vis: ty::Visibility) -> bool { + vis.is_accessible_from(self.current_module.normal_ancestor_id.unwrap(), self) + } + + fn is_accessible_from(&self, vis: ty::Visibility, module: Module<'a>) -> bool { + vis.is_accessible_from(module.normal_ancestor_id.unwrap(), self) } - fn enforce_default_binding_mode(&mut self, - pat: &Pat, - pat_binding_mode: BindingMode, - descr: &str) { - match pat_binding_mode { - BindByValue(_) => {} - BindByRef(..) => { - resolve_error(self, - pat.span, - ResolutionError::CannotUseRefBindingModeWith(descr)); + fn report_errors(&mut self) { + self.report_shadowing_errors(); + let mut reported_spans = FxHashSet(); + + for &AmbiguityError { span, name, b1, b2, lexical } in &self.ambiguity_errors { + if !reported_spans.insert(span) { continue } + let msg1 = format!("`{}` could resolve to the name imported here", name); + let msg2 = format!("`{}` could also resolve to the name imported here", name); + self.session.struct_span_err(span, &format!("`{}` is ambiguous", name)) + .span_note(b1.span, &msg1) + .span_note(b2.span, &msg2) + .note(&if lexical || !b1.is_glob_import() { + "macro-expanded macro imports do not shadow".to_owned() + } else { + format!("consider adding an explicit import of `{}` to disambiguate", name) + }) + .emit(); + } + + for &PrivacyError(span, name, binding) in &self.privacy_errors { + if !reported_spans.insert(span) { continue } + if binding.is_extern_crate() { + // Warn when using an inaccessible extern crate. + let node_id = match binding.kind { + NameBindingKind::Import { directive, .. } => directive.id, + _ => unreachable!(), + }; + let msg = format!("extern crate `{}` is private", name); + self.session.add_lint(lint::builtin::INACCESSIBLE_EXTERN_CRATE, node_id, span, msg); + } else { + let def = binding.def(); + self.session.span_err(span, &format!("{} `{}` is private", def.kind_name(), name)); } } } - // - // Diagnostics - // - // Diagnostics are not particularly efficient, because they're rarely - // hit. - // + fn report_shadowing_errors(&mut self) { + for (name, scope) in replace(&mut self.lexical_macro_resolutions, Vec::new()) { + self.resolve_legacy_scope(scope, name, true); + } - #[allow(dead_code)] // useful for debugging - fn dump_module(&mut self, module_: Module<'a>) { - debug!("Dump of module `{}`:", module_to_string(&*module_)); + let mut reported_errors = FxHashSet(); + for binding in replace(&mut self.disallowed_shadowing, Vec::new()) { + if self.resolve_legacy_scope(&binding.parent, binding.name, false).is_some() && + reported_errors.insert((binding.name, binding.span)) { + let msg = format!("`{}` is already in scope", binding.name); + self.session.struct_span_err(binding.span, &msg) + .note("macro-expanded `macro_rules!`s may not shadow \ + existing macros (see RFC 1560)") + .emit(); + } + } + } - debug!("Children:"); - build_reduced_graph::populate_module_if_necessary(self, &module_); - for (&name, _) in module_.children.borrow().iter() { - debug!("* {}", name); + fn report_conflict(&mut self, + parent: Module, + name: Name, + ns: Namespace, + binding: &NameBinding, + old_binding: &NameBinding) { + // Error on the second of two conflicting names + if old_binding.span.lo > binding.span.lo { + return self.report_conflict(parent, name, ns, old_binding, binding); } - debug!("Import resolutions:"); - let import_resolutions = module_.import_resolutions.borrow(); - for (&name, import_resolution) in import_resolutions.iter() { - let value_repr; - match import_resolution.value_ns.target { - None => { - value_repr = "".to_string(); - } - Some(_) => { - value_repr = " value:?".to_string(); - // FIXME #4954 - } + let container = match parent.kind { + ModuleKind::Def(Def::Mod(_), _) => "module", + ModuleKind::Def(Def::Trait(_), _) => "trait", + ModuleKind::Block(..) => "block", + _ => "enum", + }; + + let (participle, noun) = match old_binding.is_import() { + true => ("imported", "import"), + false => ("defined", "definition"), + }; + + let span = binding.span; + + if let Some(s) = self.name_already_seen.get(&name) { + if s == &span { + return; } + } - let type_repr; - match import_resolution.type_ns.target { - None => { - type_repr = "".to_string(); - } - Some(_) => { - type_repr = " type:?".to_string(); - // FIXME #4954 + let msg = { + let kind = match (ns, old_binding.module()) { + (ValueNS, _) => "a value", + (MacroNS, _) => "a macro", + (TypeNS, _) if old_binding.is_extern_crate() => "an extern crate", + (TypeNS, Some(module)) if module.is_normal() => "a module", + (TypeNS, Some(module)) if module.is_trait() => "a trait", + (TypeNS, _) => "a type", + }; + format!("{} named `{}` has already been {} in this {}", + kind, name, participle, container) + }; + + let mut err = match (old_binding.is_extern_crate(), binding.is_extern_crate()) { + (true, true) => { + let mut e = struct_span_err!(self.session, span, E0259, "{}", msg); + e.span_label(span, &format!("`{}` was already imported", name)); + e + }, + (true, _) | (_, true) if binding.is_import() && old_binding.is_import() => { + let mut e = struct_span_err!(self.session, span, E0254, "{}", msg); + e.span_label(span, &"already imported"); + e + }, + (true, _) | (_, true) => { + let mut e = struct_span_err!(self.session, span, E0260, "{}", msg); + e.span_label(span, &format!("`{}` already imported", name)); + e + }, + _ => match (old_binding.is_import(), binding.is_import()) { + (false, false) => { + let mut e = struct_span_err!(self.session, span, E0428, "{}", msg); + e.span_label(span, &format!("already defined")); + e + }, + (true, true) => { + let mut e = struct_span_err!(self.session, span, E0252, "{}", msg); + e.span_label(span, &format!("already imported")); + e + }, + _ => { + let mut e = struct_span_err!(self.session, span, E0255, "{}", msg); + e.span_label(span, &format!("`{}` was already imported", name)); + e } - } + }, + }; - debug!("* {}:{}{}", name, value_repr, type_repr); + if old_binding.span != syntax_pos::DUMMY_SP { + err.span_label(old_binding.span, &format!("previous {} of `{}` here", noun, name)); } + err.emit(); + self.name_already_seen.insert(name, span); } } - -fn names_to_string(names: &[Name]) -> String { +fn names_to_string(names: &[Ident]) -> String { let mut first = true; let mut result = String::new(); - for name in names { + for ident in names { if first { first = false } else { result.push_str("::") } - result.push_str(&name.as_str()); + result.push_str(&ident.name.as_str()); } result } fn path_names_to_string(path: &Path, depth: usize) -> String { - let names: Vec = path.segments[..path.segments.len() - depth] - .iter() - .map(|seg| seg.identifier.name) - .collect(); - names_to_string(&names[..]) + let names: Vec<_> = + path.segments[..path.segments.len() - depth].iter().map(|seg| seg.identifier).collect(); + names_to_string(&names) +} + +/// When an entity with a given name is not available in scope, we search for +/// entities with that name in all crates. This method allows outputting the +/// results of this search in a programmer-friendly way +fn show_candidates(session: &mut DiagnosticBuilder, + candidates: &SuggestedCandidates) { + + let paths = &candidates.candidates; + + if paths.len() > 0 { + // don't show more than MAX_CANDIDATES results, so + // we're consistent with the trait suggestions + const MAX_CANDIDATES: usize = 5; + + // we want consistent results across executions, but candidates are produced + // by iterating through a hash map, so make sure they are ordered: + let mut path_strings: Vec<_> = paths.into_iter() + .map(|p| path_names_to_string(&p, 0)) + .collect(); + path_strings.sort(); + + // behave differently based on how many candidates we have: + if !paths.is_empty() { + if paths.len() == 1 { + session.help( + &format!("you can import it into scope: `use {};`.", + &path_strings[0]), + ); + } else { + session.help("you can import several candidates \ + into scope (`use ...;`):"); + let count = path_strings.len() as isize - MAX_CANDIDATES as isize + 1; + + for (idx, path_string) in path_strings.iter().enumerate() { + if idx == MAX_CANDIDATES - 1 && count > 1 { + session.help( + &format!(" and {} other candidates", count).to_string(), + ); + break; + } else { + session.help( + &format!(" `{}`", path_string).to_string(), + ); + } + } + } + } + } else { + // nothing found: + session.help( + &format!("no candidates by the name of `{}` found in your \ + project; maybe you misspelled the name or forgot to import \ + an external crate?", candidates.name.to_string()), + ); + }; } /// A somewhat inefficient routine to obtain the name of a module. -fn module_to_string<'a>(module: Module<'a>) -> String { +fn module_to_string(module: Module) -> String { let mut names = Vec::new(); - fn collect_mod<'a>(names: &mut Vec, module: Module<'a>) { - match module.parent_link { - NoParentLink => {} - ModuleParentLink(ref module, name) => { - names.push(name); - collect_mod(names, module); - } - BlockParentLink(ref module, _) => { - // danger, shouldn't be ident? - names.push(special_idents::opaque.name); - collect_mod(names, module); + fn collect_mod(names: &mut Vec, module: Module) { + if let ModuleKind::Def(_, name) = module.kind { + if let Some(parent) = module.parent { + names.push(Ident::with_empty_ctxt(name)); + collect_mod(names, parent); } + } else { + // danger, shouldn't be ident? + names.push(Ident::from_str("")); + collect_mod(names, module.parent.unwrap()); } } collect_mod(&mut names, module); @@ -3934,25 +3282,11 @@ fn module_to_string<'a>(module: Module<'a>) -> String { if names.is_empty() { return "???".to_string(); } - names_to_string(&names.into_iter().rev().collect::>()) + names_to_string(&names.into_iter().rev().collect::>()) } fn err_path_resolution() -> PathResolution { - PathResolution { - base_def: DefErr, - last_private: LastMod(AllPublic), - depth: 0, - } -} - - -pub struct CrateMap { - pub def_map: RefCell, - pub freevars: FreevarMap, - pub export_map: ExportMap, - pub trait_map: TraitMap, - pub external_exports: ExternalExports, - pub glob_map: Option, + PathResolution::new(Def::Err) } #[derive(PartialEq,Copy, Clone)] @@ -3961,59 +3295,4 @@ pub enum MakeGlobMap { No, } -/// Entry point to crate resolution. -pub fn resolve_crate<'a, 'tcx>(session: &'a Session, - ast_map: &'a hir_map::Map<'tcx>, - make_glob_map: MakeGlobMap) - -> CrateMap { - let krate = ast_map.krate(); - let arenas = Resolver::arenas(); - let mut resolver = create_resolver(session, ast_map, krate, make_glob_map, &arenas, None); - - resolver.resolve_crate(krate); - - check_unused::check_crate(&mut resolver, krate); - - CrateMap { - def_map: resolver.def_map, - freevars: resolver.freevars, - export_map: resolver.export_map, - trait_map: resolver.trait_map, - external_exports: resolver.external_exports, - glob_map: if resolver.make_glob_map { - Some(resolver.glob_map) - } else { - None - }, - } -} - -/// Builds a name resolution walker to be used within this module, -/// or used externally, with an optional callback function. -/// -/// The callback takes a &mut bool which allows callbacks to end a -/// walk when set to true, passing through the rest of the walk, while -/// preserving the ribs + current module. This allows resolve_path -/// calls to be made with the correct scope info. The node in the -/// callback corresponds to the current node in the walk. -pub fn create_resolver<'a, 'tcx>(session: &'a Session, - ast_map: &'a hir_map::Map<'tcx>, - krate: &'a Crate, - make_glob_map: MakeGlobMap, - arenas: &'a ResolverArenas<'a>, - callback: Option bool>>) - -> Resolver<'a, 'tcx> { - let mut resolver = Resolver::new(session, ast_map, make_glob_map, arenas); - - resolver.callback = callback; - - build_reduced_graph::build_reduced_graph(&mut resolver, krate); - session.abort_if_errors(); - - resolve_imports::resolve_imports(&mut resolver); - session.abort_if_errors(); - - resolver -} - __build_diagnostic_array! { librustc_resolve, DIAGNOSTICS } diff --git a/src/librustc_resolve/macros.rs b/src/librustc_resolve/macros.rs new file mode 100644 index 0000000000000..3b34a60c58525 --- /dev/null +++ b/src/librustc_resolve/macros.rs @@ -0,0 +1,405 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use {Module, ModuleKind, NameBinding, NameBindingKind, Resolver, AmbiguityError}; +use Namespace::{self, MacroNS}; +use build_reduced_graph::BuildReducedGraphVisitor; +use resolve_imports::ImportResolver; +use rustc::hir::def_id::{DefId, BUILTIN_MACROS_CRATE, CRATE_DEF_INDEX, DefIndex}; +use rustc::hir::def::{Def, Export}; +use rustc::hir::map::{self, DefCollector}; +use rustc::ty; +use std::cell::Cell; +use std::rc::Rc; +use syntax::ast::{self, Name}; +use syntax::errors::DiagnosticBuilder; +use syntax::ext::base::{self, Determinacy, MultiModifier, MultiDecorator}; +use syntax::ext::base::{NormalTT, SyntaxExtension}; +use syntax::ext::expand::Expansion; +use syntax::ext::hygiene::Mark; +use syntax::ext::tt::macro_rules; +use syntax::fold::Folder; +use syntax::ptr::P; +use syntax::util::lev_distance::find_best_match_for_name; +use syntax::visit::Visitor; +use syntax_pos::{Span, DUMMY_SP}; + +#[derive(Clone)] +pub struct InvocationData<'a> { + pub module: Cell>, + pub def_index: DefIndex, + // True if this expansion is in a `const_integer` position, for example `[u32; m!()]`. + // c.f. `DefCollector::visit_ast_const_integer`. + pub const_integer: bool, + // The scope in which the invocation path is resolved. + pub legacy_scope: Cell>, + // The smallest scope that includes this invocation's expansion, + // or `Empty` if this invocation has not been expanded yet. + pub expansion: Cell>, +} + +impl<'a> InvocationData<'a> { + pub fn root(graph_root: Module<'a>) -> Self { + InvocationData { + module: Cell::new(graph_root), + def_index: CRATE_DEF_INDEX, + const_integer: false, + legacy_scope: Cell::new(LegacyScope::Empty), + expansion: Cell::new(LegacyScope::Empty), + } + } +} + +#[derive(Copy, Clone)] +pub enum LegacyScope<'a> { + Empty, + Invocation(&'a InvocationData<'a>), // The scope of the invocation, not including its expansion + Expansion(&'a InvocationData<'a>), // The scope of the invocation, including its expansion + Binding(&'a LegacyBinding<'a>), +} + +pub struct LegacyBinding<'a> { + pub parent: Cell>, + pub name: ast::Name, + ext: Rc, + pub span: Span, +} + +pub enum MacroBinding<'a> { + Legacy(&'a LegacyBinding<'a>), + Modern(&'a NameBinding<'a>), +} + +impl<'a> base::Resolver for Resolver<'a> { + fn next_node_id(&mut self) -> ast::NodeId { + self.session.next_node_id() + } + + fn get_module_scope(&mut self, id: ast::NodeId) -> Mark { + let mark = Mark::fresh(); + let module = self.module_map[&id]; + self.invocations.insert(mark, self.arenas.alloc_invocation_data(InvocationData { + module: Cell::new(module), + def_index: module.def_id().unwrap().index, + const_integer: false, + legacy_scope: Cell::new(LegacyScope::Empty), + expansion: Cell::new(LegacyScope::Empty), + })); + mark + } + + fn eliminate_crate_var(&mut self, item: P) -> P { + struct EliminateCrateVar<'b, 'a: 'b>(&'b mut Resolver<'a>); + + impl<'a, 'b> Folder for EliminateCrateVar<'a, 'b> { + fn fold_path(&mut self, mut path: ast::Path) -> ast::Path { + let ident = path.segments[0].identifier; + if ident.name == "$crate" { + path.global = true; + let module = self.0.resolve_crate_var(ident.ctxt); + if module.is_local() { + path.segments.remove(0); + } else { + path.segments[0].identifier = match module.kind { + ModuleKind::Def(_, name) => ast::Ident::with_empty_ctxt(name), + _ => unreachable!(), + }; + } + } + path + } + } + + EliminateCrateVar(self).fold_item(item).expect_one("") + } + + fn visit_expansion(&mut self, mark: Mark, expansion: &Expansion) { + let invocation = self.invocations[&mark]; + self.collect_def_ids(invocation, expansion); + + self.current_module = invocation.module.get(); + self.current_module.unresolved_invocations.borrow_mut().remove(&mark); + let mut visitor = BuildReducedGraphVisitor { + resolver: self, + legacy_scope: LegacyScope::Invocation(invocation), + expansion: mark, + }; + expansion.visit_with(&mut visitor); + self.current_module.unresolved_invocations.borrow_mut().remove(&mark); + invocation.expansion.set(visitor.legacy_scope); + } + + fn add_macro(&mut self, scope: Mark, mut def: ast::MacroDef, export: bool) { + if def.ident.name == "macro_rules" { + self.session.span_err(def.span, "user-defined macros may not be named `macro_rules`"); + } + + let invocation = self.invocations[&scope]; + let binding = self.arenas.alloc_legacy_binding(LegacyBinding { + parent: Cell::new(invocation.legacy_scope.get()), + name: def.ident.name, + ext: Rc::new(macro_rules::compile(&self.session.parse_sess, &def)), + span: def.span, + }); + invocation.legacy_scope.set(LegacyScope::Binding(binding)); + self.macro_names.insert(def.ident.name); + + if export { + def.id = self.next_node_id(); + DefCollector::new(&mut self.definitions).with_parent(CRATE_DEF_INDEX, |collector| { + collector.visit_macro_def(&def) + }); + self.macro_exports.push(Export { + name: def.ident.name, + def: Def::Macro(self.definitions.local_def_id(def.id)), + }); + self.exported_macros.push(def); + } + } + + fn add_ext(&mut self, ident: ast::Ident, ext: Rc) { + if let NormalTT(..) = *ext { + self.macro_names.insert(ident.name); + } + let def_id = DefId { + krate: BUILTIN_MACROS_CRATE, + index: DefIndex::new(self.macro_map.len()), + }; + self.macro_map.insert(def_id, ext); + let binding = self.arenas.alloc_name_binding(NameBinding { + kind: NameBindingKind::Def(Def::Macro(def_id)), + span: DUMMY_SP, + vis: ty::Visibility::PrivateExternal, + expansion: Mark::root(), + }); + self.builtin_macros.insert(ident.name, binding); + } + + fn add_expansions_at_stmt(&mut self, id: ast::NodeId, macros: Vec) { + self.macros_at_scope.insert(id, macros); + } + + fn resolve_imports(&mut self) { + ImportResolver { resolver: self }.resolve_imports() + } + + fn find_attr_invoc(&mut self, attrs: &mut Vec) -> Option { + for i in 0..attrs.len() { + match self.builtin_macros.get(&attrs[i].name()).cloned() { + Some(binding) => match *self.get_macro(binding) { + MultiModifier(..) | MultiDecorator(..) | SyntaxExtension::AttrProcMacro(..) => { + return Some(attrs.remove(i)) + } + _ => {} + }, + None => {} + } + } + None + } + + fn resolve_macro(&mut self, scope: Mark, path: &ast::Path, force: bool) + -> Result, Determinacy> { + if path.segments.len() > 1 || path.global || !path.segments[0].parameters.is_empty() { + self.session.span_err(path.span, "expected macro name without module separators"); + return Err(Determinacy::Determined); + } + let name = path.segments[0].identifier.name; + + let invocation = self.invocations[&scope]; + self.current_module = invocation.module.get(); + let result = match self.resolve_legacy_scope(&invocation.legacy_scope, name, false) { + Some(MacroBinding::Legacy(binding)) => Ok(binding.ext.clone()), + Some(MacroBinding::Modern(binding)) => Ok(self.get_macro(binding)), + None => match self.resolve_in_item_lexical_scope(name, MacroNS, None) { + Some(binding) => Ok(self.get_macro(binding)), + None => return Err(if force { + let msg = format!("macro undefined: '{}!'", name); + let mut err = self.session.struct_span_err(path.span, &msg); + self.suggest_macro_name(&name.as_str(), &mut err); + err.emit(); + Determinacy::Determined + } else { + Determinacy::Undetermined + }), + }, + }; + + if self.use_extern_macros { + self.current_module.legacy_macro_resolutions.borrow_mut() + .push((scope, name, path.span)); + } + result + } +} + +impl<'a> Resolver<'a> { + // Resolve the name in the module's lexical scope, excluding non-items. + fn resolve_in_item_lexical_scope(&mut self, + name: Name, + ns: Namespace, + record_used: Option) + -> Option<&'a NameBinding<'a>> { + let mut module = self.current_module; + let mut potential_expanded_shadower = None; + loop { + // Since expanded macros may not shadow the lexical scope (enforced below), + // we can ignore unresolved invocations (indicated by the penultimate argument). + match self.resolve_name_in_module(module, name, ns, true, record_used) { + Ok(binding) => { + let span = match record_used { + Some(span) => span, + None => return Some(binding), + }; + if let Some(shadower) = potential_expanded_shadower { + self.ambiguity_errors.push(AmbiguityError { + span: span, name: name, b1: shadower, b2: binding, lexical: true, + }); + return Some(shadower); + } else if binding.expansion == Mark::root() { + return Some(binding); + } else { + potential_expanded_shadower = Some(binding); + } + }, + Err(Determinacy::Undetermined) => return None, + Err(Determinacy::Determined) => {} + } + + match module.kind { + ModuleKind::Block(..) => module = module.parent.unwrap(), + ModuleKind::Def(..) => return potential_expanded_shadower, + } + } + } + + pub fn resolve_legacy_scope(&mut self, + mut scope: &'a Cell>, + name: Name, + record_used: bool) + -> Option> { + let mut possible_time_travel = None; + let mut relative_depth: u32 = 0; + let mut binding = None; + loop { + match scope.get() { + LegacyScope::Empty => break, + LegacyScope::Expansion(invocation) => { + match invocation.expansion.get() { + LegacyScope::Invocation(_) => scope.set(invocation.legacy_scope.get()), + LegacyScope::Empty => { + if possible_time_travel.is_none() { + possible_time_travel = Some(scope); + } + scope = &invocation.legacy_scope; + } + _ => { + relative_depth += 1; + scope = &invocation.expansion; + } + } + } + LegacyScope::Invocation(invocation) => { + relative_depth = relative_depth.saturating_sub(1); + scope = &invocation.legacy_scope; + } + LegacyScope::Binding(potential_binding) => { + if potential_binding.name == name { + if (!self.use_extern_macros || record_used) && relative_depth > 0 { + self.disallowed_shadowing.push(potential_binding); + } + binding = Some(potential_binding); + break + } + scope = &potential_binding.parent; + } + }; + } + + let binding = match binding { + Some(binding) => MacroBinding::Legacy(binding), + None => match self.builtin_macros.get(&name).cloned() { + Some(binding) => MacroBinding::Modern(binding), + None => return None, + }, + }; + + if !self.use_extern_macros { + if let Some(scope) = possible_time_travel { + // Check for disallowed shadowing later + self.lexical_macro_resolutions.push((name, scope)); + } + } + + Some(binding) + } + + pub fn finalize_current_module_macro_resolutions(&mut self) { + let module = self.current_module; + for &(mark, name, span) in module.legacy_macro_resolutions.borrow().iter() { + let legacy_scope = &self.invocations[&mark].legacy_scope; + let legacy_resolution = self.resolve_legacy_scope(legacy_scope, name, true); + let resolution = self.resolve_in_item_lexical_scope(name, MacroNS, Some(span)); + let (legacy_resolution, resolution) = match (legacy_resolution, resolution) { + (Some(legacy_resolution), Some(resolution)) => (legacy_resolution, resolution), + _ => continue, + }; + let (legacy_span, participle) = match legacy_resolution { + MacroBinding::Modern(binding) if binding.def() == resolution.def() => continue, + MacroBinding::Modern(binding) => (binding.span, "imported"), + MacroBinding::Legacy(binding) => (binding.span, "defined"), + }; + let msg1 = format!("`{}` could resolve to the macro {} here", name, participle); + let msg2 = format!("`{}` could also resolve to the macro imported here", name); + self.session.struct_span_err(span, &format!("`{}` is ambiguous", name)) + .span_note(legacy_span, &msg1) + .span_note(resolution.span, &msg2) + .emit(); + } + } + + fn suggest_macro_name(&mut self, name: &str, err: &mut DiagnosticBuilder<'a>) { + if let Some(suggestion) = find_best_match_for_name(self.macro_names.iter(), name, None) { + if suggestion != name { + err.help(&format!("did you mean `{}!`?", suggestion)); + } else { + err.help(&format!("have you added the `#[macro_use]` on the module/import?")); + } + } + } + + fn collect_def_ids(&mut self, invocation: &'a InvocationData<'a>, expansion: &Expansion) { + let Resolver { ref mut invocations, arenas, graph_root, .. } = *self; + let InvocationData { def_index, const_integer, .. } = *invocation; + + let visit_macro_invoc = &mut |invoc: map::MacroInvocationData| { + invocations.entry(invoc.mark).or_insert_with(|| { + arenas.alloc_invocation_data(InvocationData { + def_index: invoc.def_index, + const_integer: invoc.const_integer, + module: Cell::new(graph_root), + expansion: Cell::new(LegacyScope::Empty), + legacy_scope: Cell::new(LegacyScope::Empty), + }) + }); + }; + + let mut def_collector = DefCollector::new(&mut self.definitions); + def_collector.visit_macro_invoc = Some(visit_macro_invoc); + def_collector.with_parent(def_index, |def_collector| { + if const_integer { + if let Expansion::Expr(ref expr) = *expansion { + def_collector.visit_ast_const_integer(expr); + } + } + expansion.visit_with(def_collector) + }); + } +} diff --git a/src/librustc_resolve/resolve_imports.rs b/src/librustc_resolve/resolve_imports.rs index abaf45cb1704d..2a803d72fd1bd 100644 --- a/src/librustc_resolve/resolve_imports.rs +++ b/src/librustc_resolve/resolve_imports.rs @@ -10,1077 +10,747 @@ use self::ImportDirectiveSubclass::*; -use DefModifiers; -use Module; -use Namespace::{self, TypeNS, ValueNS}; -use {NameBindings, NameBinding}; -use NamespaceResult::{BoundResult, UnboundResult, UnknownResult}; -use NamespaceResult; -use NameSearchType; -use ResolveResult; +use {Module, PerNS}; +use Namespace::{self, TypeNS, MacroNS}; +use {NameBinding, NameBindingKind, PathResult, PathScope, PrivacyError, ToNameBinding}; use Resolver; -use UseLexicalScopeFlag; use {names_to_string, module_to_string}; use {resolve_error, ResolutionError}; -use build_reduced_graph; +use rustc::ty; +use rustc::lint::builtin::PRIVATE_IN_PUBLIC; +use rustc::hir::def::*; -use rustc::lint; -use rustc::middle::def::*; -use rustc::middle::def_id::DefId; -use rustc::middle::privacy::*; - -use syntax::ast::{NodeId, Name}; -use syntax::attr::AttrMetaMethods; -use syntax::codemap::Span; +use syntax::ast::{Ident, NodeId, Name}; +use syntax::ext::base::Determinacy::{self, Determined, Undetermined}; +use syntax::ext::hygiene::Mark; +use syntax::symbol::keywords; use syntax::util::lev_distance::find_best_match_for_name; +use syntax_pos::Span; -use std::mem::replace; +use std::cell::{Cell, RefCell}; +use std::mem; /// Contains data for specific types of import directives. -#[derive(Copy, Clone,Debug)] -pub enum ImportDirectiveSubclass { - SingleImport(Name /* target */, Name /* source */), - GlobImport, -} - -/// Whether an import can be shadowed by another import. -#[derive(Debug,PartialEq,Clone,Copy)] -pub enum Shadowable { - Always, - Never, +#[derive(Clone, Debug)] +pub enum ImportDirectiveSubclass<'a> { + SingleImport { + target: Name, + source: Name, + result: PerNS, Determinacy>>>, + }, + GlobImport { + is_prelude: bool, + max_vis: Cell, // The visibility of the greatest reexport. + // n.b. `max_vis` is only used in `finalize_import` to check for reexport errors. + }, + ExternCrate, } /// One import directive. -#[derive(Debug)] -pub struct ImportDirective { - pub module_path: Vec, - pub subclass: ImportDirectiveSubclass, - pub span: Span, +#[derive(Debug,Clone)] +pub struct ImportDirective<'a> { pub id: NodeId, - pub is_public: bool, // see note in ImportResolutionPerNamespace about how to use this - pub shadowable: Shadowable, + pub parent: Module<'a>, + pub module_path: Vec, + pub imported_module: Cell>>, // the resolution of `module_path` + pub subclass: ImportDirectiveSubclass<'a>, + pub span: Span, + pub vis: Cell, + pub expansion: Mark, } -impl ImportDirective { - pub fn new(module_path: Vec, - subclass: ImportDirectiveSubclass, - span: Span, - id: NodeId, - is_public: bool, - shadowable: Shadowable) - -> ImportDirective { - ImportDirective { - module_path: module_path, - subclass: subclass, - span: span, - id: id, - is_public: is_public, - shadowable: shadowable, - } +impl<'a> ImportDirective<'a> { + pub fn is_glob(&self) -> bool { + match self.subclass { ImportDirectiveSubclass::GlobImport { .. } => true, _ => false } } } -/// The item that an import resolves to. -#[derive(Clone,Debug)] -pub struct Target<'a> { - pub target_module: Module<'a>, - pub binding: NameBinding<'a>, - pub shadowable: Shadowable, +#[derive(Clone, Default)] +/// Records information about the resolution of a name in a namespace of a module. +pub struct NameResolution<'a> { + /// The single imports that define the name in the namespace. + single_imports: SingleImports<'a>, + /// The least shadowable known binding for this name, or None if there are no known bindings. + pub binding: Option<&'a NameBinding<'a>>, } -impl<'a> Target<'a> { - pub fn new(target_module: Module<'a>, binding: NameBinding<'a>, shadowable: Shadowable) - -> Self { - Target { - target_module: target_module, - binding: binding, - shadowable: shadowable, - } - } +#[derive(Clone, Debug)] +enum SingleImports<'a> { + /// No single imports can define the name in the namespace. + None, + /// Only the given single import can define the name in the namespace. + MaybeOne(&'a ImportDirective<'a>), + /// At least one single import will define the name in the namespace. + AtLeastOne, } -#[derive(Debug)] -/// An ImportResolutionPerNamespace records what we know about an imported name. -/// More specifically, it records the number of unresolved `use` directives that import the name, -/// and for each namespace, it records the `use` directive importing the name in the namespace -/// and the `Target` to which the name in the namespace resolves (if applicable). -/// Different `use` directives may import the same name in different namespaces. -pub struct ImportResolutionPerNamespace<'a> { - // When outstanding_references reaches zero, outside modules can count on the targets being - // correct. Before then, all bets are off; future `use` directives could override the name. - // Since shadowing is forbidden, the only way outstanding_references > 1 in a legal program - // is if the name is imported by exactly two `use` directives, one of which resolves to a - // value and the other of which resolves to a type. - pub outstanding_references: usize, - pub type_ns: ImportResolution<'a>, - pub value_ns: ImportResolution<'a>, +impl<'a> Default for SingleImports<'a> { + /// Creates a `SingleImports<'a>` of None type. + fn default() -> Self { + SingleImports::None + } } -/// Records what we know about an imported name in a namespace (see `ImportResolutionPerNamespace`). -#[derive(Clone,Debug)] -pub struct ImportResolution<'a> { - /// Whether the name in the namespace was imported with a `use` or a `pub use`. - pub is_public: bool, - - /// Resolution of the name in the namespace - pub target: Option>, - - /// The source node of the `use` directive - pub id: NodeId, -} +impl<'a> SingleImports<'a> { + fn add_directive(&mut self, directive: &'a ImportDirective<'a>) { + match *self { + SingleImports::None => *self = SingleImports::MaybeOne(directive), + // If two single imports can define the name in the namespace, we can assume that at + // least one of them will define it since otherwise both would have to define only one + // namespace, leading to a duplicate error. + SingleImports::MaybeOne(_) => *self = SingleImports::AtLeastOne, + SingleImports::AtLeastOne => {} + }; + } -impl<'a> ::std::ops::Index for ImportResolutionPerNamespace<'a> { - type Output = ImportResolution<'a>; - fn index(&self, ns: Namespace) -> &ImportResolution<'a> { - match ns { TypeNS => &self.type_ns, ValueNS => &self.value_ns } + fn directive_failed(&mut self) { + match *self { + SingleImports::None => unreachable!(), + SingleImports::MaybeOne(_) => *self = SingleImports::None, + SingleImports::AtLeastOne => {} + } } } -impl<'a> ::std::ops::IndexMut for ImportResolutionPerNamespace<'a> { - fn index_mut(&mut self, ns: Namespace) -> &mut ImportResolution<'a> { - match ns { TypeNS => &mut self.type_ns, ValueNS => &mut self.value_ns } +impl<'a> NameResolution<'a> { + // Returns the binding for the name if it is known or None if it not known. + fn binding(&self) -> Option<&'a NameBinding<'a>> { + self.binding.and_then(|binding| match self.single_imports { + SingleImports::None => Some(binding), + _ if !binding.is_glob_import() => Some(binding), + _ => None, // The binding could be shadowed by a single import, so it is not known. + }) } } -impl<'a> ImportResolutionPerNamespace<'a> { - pub fn new(id: NodeId, is_public: bool) -> Self { - let resolution = ImportResolution { id: id, is_public: is_public, target: None }; - ImportResolutionPerNamespace { - outstanding_references: 0, type_ns: resolution.clone(), value_ns: resolution, - } +impl<'a> Resolver<'a> { + fn resolution(&self, module: Module<'a>, name: Name, ns: Namespace) + -> &'a RefCell> { + *module.resolutions.borrow_mut().entry((name, ns)) + .or_insert_with(|| self.arenas.alloc_name_resolution()) } - pub fn shadowable(&self, namespace: Namespace) -> Shadowable { - match self[namespace].target { - Some(ref target) => target.shadowable, - None => Shadowable::Always, - } - } -} + /// Attempts to resolve the supplied name in the given module for the given namespace. + /// If successful, returns the binding corresponding to the name. + /// Invariant: if `record_used` is `Some`, import resolution must be complete. + pub fn resolve_name_in_module(&mut self, + module: Module<'a>, + name: Name, + ns: Namespace, + ignore_unresolved_invocations: bool, + record_used: Option) + -> Result<&'a NameBinding<'a>, Determinacy> { + self.populate_module_if_necessary(module); + + let resolution = self.resolution(module, name, ns); + let resolution = match resolution.borrow_state() { + ::std::cell::BorrowState::Unused => resolution.borrow_mut(), + _ => return Err(Determined), // This happens when there is a cycle of imports + }; -struct ImportResolvingError { - span: Span, - path: String, - help: String, -} + if let Some(span) = record_used { + if let Some(binding) = resolution.binding { + if self.record_use(name, ns, binding, span) { + return Ok(self.dummy_binding); + } + if !self.is_accessible(binding.vis) { + self.privacy_errors.push(PrivacyError(span, name, binding)); + } + } -struct ImportResolver<'a, 'b: 'a, 'tcx: 'b> { - resolver: &'a mut Resolver<'b, 'tcx>, -} + return resolution.binding.ok_or(Determined); + } -impl<'a, 'b:'a, 'tcx:'b> ImportResolver<'a, 'b, 'tcx> { - // Import resolution - // - // This is a fixed-point algorithm. We resolve imports until our efforts - // are stymied by an unresolved import; then we bail out of the current - // module and continue. We terminate successfully once no more imports - // remain or unsuccessfully when no forward progress in resolving imports - // is made. + let check_usable = |this: &mut Self, binding: &'a NameBinding<'a>| { + // `extern crate` are always usable for backwards compatability, see issue #37020. + let usable = this.is_accessible(binding.vis) || binding.is_extern_crate(); + if usable { Ok(binding) } else { Err(Determined) } + }; - /// Resolves all imports for the crate. This method performs the fixed- - /// point iteration. - fn resolve_imports(&mut self) { - let mut i = 0; - let mut prev_unresolved_imports = 0; - loop { - debug!("(resolving imports) iteration {}, {} imports left", - i, - self.resolver.unresolved_imports); - - let module_root = self.resolver.graph_root; - let errors = self.resolve_imports_for_module_subtree(module_root); - - if self.resolver.unresolved_imports == 0 { - debug!("(resolving imports) success"); - break; + // Items and single imports are not shadowable. + if let Some(binding) = resolution.binding { + if !binding.is_glob_import() { + return check_usable(self, binding); } + } - if self.resolver.unresolved_imports == prev_unresolved_imports { - // resolving failed - if errors.len() > 0 { - for e in errors { - resolve_error(self.resolver, - e.span, - ResolutionError::UnresolvedImport(Some((&e.path, &e.help)))); - } - } else { - // Report unresolved imports only if no hard error was already reported - // to avoid generating multiple errors on the same import. - // Imports that are still indeterminate at this point are actually blocked - // by errored imports, so there is no point reporting them. - self.resolver.report_unresolved_imports(module_root); + // Check if a single import can still define the name. + match resolution.single_imports { + SingleImports::AtLeastOne => return Err(Undetermined), + SingleImports::MaybeOne(directive) if self.is_accessible(directive.vis.get()) => { + let module = match directive.imported_module.get() { + Some(module) => module, + None => return Err(Undetermined), + }; + let name = match directive.subclass { + SingleImport { source, .. } => source, + _ => unreachable!(), + }; + match self.resolve_name_in_module(module, name, ns, false, None) { + Err(Determined) => {} + _ => return Err(Undetermined), } - break; } + SingleImports::MaybeOne(_) | SingleImports::None => {}, + } - i += 1; - prev_unresolved_imports = self.resolver.unresolved_imports; + let no_unresolved_invocations = + ignore_unresolved_invocations || module.unresolved_invocations.borrow().is_empty(); + match resolution.binding { + // In `MacroNS`, expanded bindings do not shadow (enforced in `try_define`). + Some(binding) if no_unresolved_invocations || ns == MacroNS => + return check_usable(self, binding), + None if no_unresolved_invocations => {} + _ => return Err(Undetermined), } - } - /// Attempts to resolve imports for the given module and all of its - /// submodules. - fn resolve_imports_for_module_subtree(&mut self, - module_: Module<'b>) - -> Vec { - let mut errors = Vec::new(); - debug!("(resolving imports for module subtree) resolving {}", - module_to_string(&*module_)); - let orig_module = replace(&mut self.resolver.current_module, module_); - errors.extend(self.resolve_imports_for_module(module_)); - self.resolver.current_module = orig_module; - - build_reduced_graph::populate_module_if_necessary(self.resolver, &module_); - for (_, child_node) in module_.children.borrow().iter() { - match child_node.type_ns.module() { - None => { - // Nothing to do. - } - Some(child_module) => { - errors.extend(self.resolve_imports_for_module_subtree(child_module)); + // Check if the globs are determined + for directive in module.globs.borrow().iter() { + if self.is_accessible(directive.vis.get()) { + if let Some(module) = directive.imported_module.get() { + let result = self.resolve_name_in_module(module, name, ns, false, None); + if let Err(Undetermined) = result { + return Err(Undetermined); + } + } else { + return Err(Undetermined); } } } - for (_, child_module) in module_.anonymous_children.borrow().iter() { - errors.extend(self.resolve_imports_for_module_subtree(child_module)); - } - - errors + Err(Determined) } - /// Attempts to resolve imports for the given module only. - fn resolve_imports_for_module(&mut self, module: Module<'b>) -> Vec { - let mut errors = Vec::new(); + // Add an import directive to the current module. + pub fn add_import_directive(&mut self, + module_path: Vec, + subclass: ImportDirectiveSubclass<'a>, + span: Span, + id: NodeId, + vis: ty::Visibility, + expansion: Mark) { + let current_module = self.current_module; + let directive = self.arenas.alloc_import_directive(ImportDirective { + parent: current_module, + module_path: module_path, + imported_module: Cell::new(None), + subclass: subclass, + span: span, + id: id, + vis: Cell::new(vis), + expansion: expansion, + }); - if module.all_imports_resolved() { - debug!("(resolving imports for module) all imports resolved for {}", - module_to_string(&*module)); - return errors; + self.indeterminate_imports.push(directive); + match directive.subclass { + SingleImport { target, .. } => { + self.per_ns(|this, ns| { + let mut resolution = this.resolution(current_module, target, ns).borrow_mut(); + resolution.single_imports.add_directive(directive); + }); + } + // We don't add prelude imports to the globs since they only affect lexical scopes, + // which are not relevant to import resolution. + GlobImport { is_prelude: true, .. } => {} + GlobImport { .. } => self.current_module.globs.borrow_mut().push(directive), + _ => unreachable!(), } + } - let mut imports = module.imports.borrow_mut(); - let import_count = imports.len(); - let mut indeterminate_imports = Vec::new(); - while module.resolved_import_count.get() + indeterminate_imports.len() < import_count { - let import_index = module.resolved_import_count.get(); - match self.resolve_import_for_module(module, &imports[import_index]) { - ResolveResult::Failed(err) => { - let import_directive = &imports[import_index]; - let (span, help) = match err { - Some((span, msg)) => (span, format!(". {}", msg)), - None => (import_directive.span, String::new()), - }; - errors.push(ImportResolvingError { - span: span, - path: import_path_to_string(&import_directive.module_path, - import_directive.subclass), - help: help, - }); - } - ResolveResult::Indeterminate => {} - ResolveResult::Success(()) => { - // count success - module.resolved_import_count - .set(module.resolved_import_count.get() + 1); - continue; - } + // Given a binding and an import directive that resolves to it, + // return the corresponding binding defined by the import directive. + pub fn import(&mut self, binding: &'a NameBinding<'a>, directive: &'a ImportDirective<'a>) + -> NameBinding<'a> { + let vis = if binding.pseudo_vis().is_at_least(directive.vis.get(), self) || + !directive.is_glob() && binding.is_extern_crate() { // c.f. `PRIVATE_IN_PUBLIC` + directive.vis.get() + } else { + binding.pseudo_vis() + }; + + if let GlobImport { ref max_vis, .. } = directive.subclass { + if vis == directive.vis.get() || vis.is_at_least(max_vis.get(), self) { + max_vis.set(vis) } - // This resolution was not successful, keep it for later - indeterminate_imports.push(imports.swap_remove(import_index)); + } + NameBinding { + kind: NameBindingKind::Import { + binding: binding, + directive: directive, + used: Cell::new(false), + }, + span: directive.span, + vis: vis, + expansion: directive.expansion, } + } - imports.extend(indeterminate_imports); + // Define the name or return the existing binding if there is a collision. + pub fn try_define(&mut self, module: Module<'a>, name: Name, ns: Namespace, binding: T) + -> Result<(), &'a NameBinding<'a>> + where T: ToNameBinding<'a> + { + let binding = self.arenas.alloc_name_binding(binding.to_name_binding()); + self.update_resolution(module, name, ns, |this, resolution| { + if let Some(old_binding) = resolution.binding { + if binding.is_glob_import() { + if !old_binding.is_glob_import() && + !(ns == MacroNS && old_binding.expansion != Mark::root()) { + } else if binding.def() != old_binding.def() { + resolution.binding = Some(this.ambiguity(old_binding, binding)); + } else if !old_binding.vis.is_at_least(binding.vis, this) { + // We are glob-importing the same item but with greater visibility. + resolution.binding = Some(binding); + } + } else if old_binding.is_glob_import() { + if ns == MacroNS && binding.expansion != Mark::root() && + binding.def() != old_binding.def() { + resolution.binding = Some(this.ambiguity(binding, old_binding)); + } else { + resolution.binding = Some(binding); + } + } else { + return Err(old_binding); + } + } else { + resolution.binding = Some(binding); + } - errors + Ok(()) + }) } - /// Attempts to resolve the given import. The return value indicates - /// failure if we're certain the name does not exist, indeterminate if we - /// don't know whether the name exists at the moment due to other - /// currently-unresolved imports, or success if we know the name exists. - /// If successful, the resolved bindings are written into the module. - fn resolve_import_for_module(&mut self, - module_: Module<'b>, - import_directive: &ImportDirective) - -> ResolveResult<()> { - let mut resolution_result = ResolveResult::Failed(None); - let module_path = &import_directive.module_path; - - debug!("(resolving import for module) resolving import `{}::...` in `{}`", - names_to_string(&module_path[..]), - module_to_string(&*module_)); + pub fn ambiguity(&mut self, b1: &'a NameBinding<'a>, b2: &'a NameBinding<'a>) + -> &'a NameBinding<'a> { + self.arenas.alloc_name_binding(NameBinding { + kind: NameBindingKind::Ambiguity { b1: b1, b2: b2 }, + vis: if b1.vis.is_at_least(b2.vis, self) { b1.vis } else { b2.vis }, + span: b1.span, + expansion: Mark::root(), + }) + } - // First, resolve the module path for the directive, if necessary. - let container = if module_path.is_empty() { - // Use the crate root. - Some((self.resolver.graph_root, LastMod(AllPublic))) - } else { - match self.resolver.resolve_module_path(module_, - &module_path[..], - UseLexicalScopeFlag::DontUseLexicalScope, - import_directive.span, - NameSearchType::ImportSearch) { - ResolveResult::Failed(err) => { - resolution_result = ResolveResult::Failed(err); - None - } - ResolveResult::Indeterminate => { - resolution_result = ResolveResult::Indeterminate; - None + // Use `f` to mutate the resolution of the name in the module. + // If the resolution becomes a success, define it in the module's glob importers. + fn update_resolution(&mut self, module: Module<'a>, name: Name, ns: Namespace, f: F) -> T + where F: FnOnce(&mut Resolver<'a>, &mut NameResolution<'a>) -> T + { + // Ensure that `resolution` isn't borrowed when defining in the module's glob importers, + // during which the resolution might end up getting re-defined via a glob cycle. + let (binding, t) = { + let mut resolution = &mut *self.resolution(module, name, ns).borrow_mut(); + let old_binding = resolution.binding(); + + let t = f(self, resolution); + + match resolution.binding() { + _ if old_binding.is_some() => return t, + None => return t, + Some(binding) => match old_binding { + Some(old_binding) if old_binding as *const _ == binding as *const _ => return t, + _ => (binding, t), } - ResolveResult::Success(container) => Some(container), } }; - match container { - None => {} - Some((containing_module, lp)) => { - // We found the module that the target is contained - // within. Attempt to resolve the import within it. - - match import_directive.subclass { - SingleImport(target, source) => { - resolution_result = self.resolve_single_import(&module_, - containing_module, - target, - source, - import_directive, - lp); - } - GlobImport => { - resolution_result = self.resolve_glob_import(&module_, - containing_module, - import_directive, - lp); - } - } + // Define `binding` in `module`s glob importers. + for directive in module.glob_importers.borrow_mut().iter() { + if self.is_accessible_from(binding.vis, directive.parent) { + let imported_binding = self.import(binding, directive); + let _ = self.try_define(directive.parent, name, ns, imported_binding); } } - // Decrement the count of unresolved imports. - match resolution_result { - ResolveResult::Success(()) => { - assert!(self.resolver.unresolved_imports >= 1); - self.resolver.unresolved_imports -= 1; - } - _ => { - // Nothing to do here; just return the error. - } + t + } + + // Define a "dummy" resolution containing a Def::Err as a placeholder for a + // failed resolution + fn import_dummy_binding(&mut self, directive: &'a ImportDirective<'a>) { + if let SingleImport { target, .. } = directive.subclass { + let dummy_binding = self.dummy_binding; + let dummy_binding = self.import(dummy_binding, directive); + self.per_ns(|this, ns| { + let _ = this.try_define(directive.parent, target, ns, dummy_binding.clone()); + }); } + } +} - // Decrement the count of unresolved globs if necessary. But only if - // the resolution result is a success -- other cases will - // be handled by the main loop. +pub struct ImportResolver<'a, 'b: 'a> { + pub resolver: &'a mut Resolver<'b>, +} - if resolution_result.success() { - match import_directive.subclass { - GlobImport => { - module_.dec_glob_count(); - if import_directive.is_public { - module_.dec_pub_glob_count(); - } - } - SingleImport(..) => { - // Ignore. +impl<'a, 'b: 'a> ::std::ops::Deref for ImportResolver<'a, 'b> { + type Target = Resolver<'b>; + fn deref(&self) -> &Resolver<'b> { + self.resolver + } +} + +impl<'a, 'b: 'a> ::std::ops::DerefMut for ImportResolver<'a, 'b> { + fn deref_mut(&mut self) -> &mut Resolver<'b> { + self.resolver + } +} + +impl<'a, 'b: 'a> ty::NodeIdTree for ImportResolver<'a, 'b> { + fn is_descendant_of(&self, node: NodeId, ancestor: NodeId) -> bool { + self.resolver.is_descendant_of(node, ancestor) + } +} + +impl<'a, 'b:'a> ImportResolver<'a, 'b> { + // Import resolution + // + // This is a fixed-point algorithm. We resolve imports until our efforts + // are stymied by an unresolved import; then we bail out of the current + // module and continue. We terminate successfully once no more imports + // remain or unsuccessfully when no forward progress in resolving imports + // is made. + + /// Resolves all imports for the crate. This method performs the fixed- + /// point iteration. + pub fn resolve_imports(&mut self) { + let mut prev_num_indeterminates = self.indeterminate_imports.len() + 1; + while self.indeterminate_imports.len() < prev_num_indeterminates { + prev_num_indeterminates = self.indeterminate_imports.len(); + for import in mem::replace(&mut self.indeterminate_imports, Vec::new()) { + match self.resolve_import(&import) { + true => self.determined_imports.push(import), + false => self.indeterminate_imports.push(import), } } - if import_directive.is_public { - module_.dec_pub_count(); + } + } + + pub fn finalize_imports(&mut self) { + for module in self.arenas.local_modules().iter() { + self.finalize_resolutions_in(module); + } + + let mut errors = false; + for i in 0 .. self.determined_imports.len() { + let import = self.determined_imports[i]; + if let Some(err) = self.finalize_import(import) { + errors = true; + + // If the error is a single failed import then create a "fake" import + // resolution for it so that later resolve stages won't complain. + self.import_dummy_binding(import); + let path = import_path_to_string(&import.module_path, &import.subclass); + let error = ResolutionError::UnresolvedImport(Some((&path, &err))); + resolve_error(self.resolver, import.span, error); } } - return resolution_result; + // Report unresolved imports only if no hard error was already reported + // to avoid generating multiple errors on the same import. + if !errors { + if let Some(import) = self.indeterminate_imports.iter().next() { + let error = ResolutionError::UnresolvedImport(None); + resolve_error(self.resolver, import.span, error); + } + } } - fn resolve_single_import(&mut self, - module_: Module<'b>, - target_module: Module<'b>, - target: Name, - source: Name, - directive: &ImportDirective, - lp: LastPrivate) - -> ResolveResult<()> { - debug!("(resolving single import) resolving `{}` = `{}::{}` from `{}` id {}, last \ - private {:?}", - target, - module_to_string(&*target_module), - source, - module_to_string(module_), - directive.id, - lp); - - let lp = match lp { - LastMod(lp) => lp, - LastImport {..} => { - self.resolver - .session - .span_bug(directive.span, "not expecting Import here, must be LastMod") + /// Attempts to resolve the given import, returning true if its resolution is determined. + /// If successful, the resolved bindings are written into the module. + fn resolve_import(&mut self, directive: &'b ImportDirective<'b>) -> bool { + debug!("(resolving import for module) resolving import `{}::...` in `{}`", + names_to_string(&directive.module_path), + module_to_string(self.current_module)); + + self.current_module = directive.parent; + + let module = if let Some(module) = directive.imported_module.get() { + module + } else { + let vis = directive.vis.get(); + // For better failure detection, pretend that the import will not define any names + // while resolving its module path. + directive.vis.set(ty::Visibility::PrivateExternal); + let result = self.resolve_path(&directive.module_path, PathScope::Import, None, None); + directive.vis.set(vis); + + match result { + PathResult::Module(module) => module, + PathResult::Indeterminate => return false, + _ => return true, } }; - // We need to resolve both namespaces for this to succeed. - - let mut value_result = UnknownResult; - let mut type_result = UnknownResult; - let mut lev_suggestion = "".to_owned(); + directive.imported_module.set(Some(module)); + let (source, target, result) = match directive.subclass { + SingleImport { source, target, ref result } => (source, target, result), + GlobImport { .. } => { + self.resolve_glob_import(directive); + return true; + } + _ => unreachable!(), + }; - // Search for direct children of the containing module. - build_reduced_graph::populate_module_if_necessary(self.resolver, &target_module); + let mut indeterminate = false; + self.per_ns(|this, ns| { + if let Err(Undetermined) = result[ns].get() { + result[ns].set(this.resolve_name_in_module(module, source, ns, false, None)); + } else { + return + }; - match target_module.children.borrow().get(&source) { - None => { - let names = target_module.children.borrow(); - if let Some(name) = find_best_match_for_name(names.keys(), - &source.as_str(), - None) { - lev_suggestion = format!(". Did you mean to use `{}`?", name); + match result[ns].get() { + Err(Undetermined) => indeterminate = true, + Err(Determined) => { + this.update_resolution(directive.parent, target, ns, |_, resolution| { + resolution.single_imports.directive_failed() + }); } - } - Some(ref child_name_bindings) => { - // pub_err makes sure we don't give the same error twice. - let mut pub_err = false; - if child_name_bindings.value_ns.defined() { - debug!("(resolving single import) found value binding"); - value_result = BoundResult(target_module, - child_name_bindings.value_ns.clone()); - if directive.is_public && !child_name_bindings.value_ns.is_public() { - let msg = format!("`{}` is private, and cannot be reexported", source); - let note_msg = format!("Consider marking `{}` as `pub` in the imported \ - module", - source); - struct_span_err!(self.resolver.session, directive.span, E0364, "{}", &msg) - .span_note(directive.span, ¬e_msg) - .emit(); - pub_err = true; - } - if directive.is_public && child_name_bindings.value_ns. - defined_with(DefModifiers::PRIVATE_VARIANT) { - let msg = format!("variant `{}` is private, and cannot be reexported ( \ - error E0364), consider declaring its enum as `pub`", - source); - self.resolver.session.add_lint(lint::builtin::PRIVATE_IN_PUBLIC, - directive.id, - directive.span, - msg); - pub_err = true; - } + Ok(binding) if !binding.is_importable() => { + let msg = format!("`{}` is not directly importable", target); + struct_span_err!(this.session, directive.span, E0253, "{}", &msg) + .span_label(directive.span, &format!("cannot be imported directly")) + .emit(); + // Do not import this illegal binding. Import a dummy binding and pretend + // everything is fine + this.import_dummy_binding(directive); } - if child_name_bindings.type_ns.defined() { - debug!("(resolving single import) found type binding"); - type_result = BoundResult(target_module, - child_name_bindings.type_ns.clone()); - if !pub_err && directive.is_public && - !child_name_bindings.type_ns.is_public() { - let msg = format!("`{}` is private, and cannot be reexported", source); - let note_msg = format!("Consider declaring module `{}` as a `pub mod`", - source); - struct_span_err!(self.resolver.session, directive.span, E0365, "{}", &msg) - .span_note(directive.span, ¬e_msg) - .emit(); - } - if !pub_err && directive.is_public && child_name_bindings.type_ns. - defined_with(DefModifiers::PRIVATE_VARIANT) { - let msg = format!("variant `{}` is private, and cannot be reexported ( \ - error E0365), consider declaring its enum as `pub`", - source); - self.resolver.session.add_lint(lint::builtin::PRIVATE_IN_PUBLIC, - directive.id, - directive.span, - msg); + Ok(binding) => { + let imported_binding = this.import(binding, directive); + let conflict = this.try_define(directive.parent, target, ns, imported_binding); + if let Err(old_binding) = conflict { + let binding = &this.import(binding, directive); + this.report_conflict(directive.parent, target, ns, binding, old_binding); } } } - } + }); - // Unless we managed to find a result in both namespaces (unlikely), - // search imports as well. - let mut value_used_reexport = false; - let mut type_used_reexport = false; - match (value_result.clone(), type_result.clone()) { - (BoundResult(..), BoundResult(..)) => {} // Continue. - _ => { - // If there is an unresolved glob at this point in the - // containing module, bail out. We don't know enough to be - // able to resolve this import. - - if target_module.pub_glob_count.get() > 0 { - debug!("(resolving single import) unresolved pub glob; bailing out"); - return ResolveResult::Indeterminate; - } + !indeterminate + } - // Now search the exported imports within the containing module. - match target_module.import_resolutions.borrow().get(&source) { - None => { - debug!("(resolving single import) no import"); - // The containing module definitely doesn't have an - // exported import with the name in question. We can - // therefore accurately report that the names are - // unbound. - - if lev_suggestion.is_empty() { // skip if we already have a suggestion - let names = target_module.import_resolutions.borrow(); - if let Some(name) = find_best_match_for_name(names.keys(), - &source.as_str(), - None) { - lev_suggestion = - format!(". Did you mean to use the re-exported import `{}`?", - name); - } - } - - if value_result.is_unknown() { - value_result = UnboundResult; - } - if type_result.is_unknown() { - type_result = UnboundResult; - } - } - Some(import_resolution) if import_resolution.outstanding_references == 0 => { - - fn get_binding<'a>(this: &mut Resolver, - import_resolution: &ImportResolutionPerNamespace<'a>, - namespace: Namespace, - source: Name) - -> NamespaceResult<'a> { - - // Import resolutions must be declared with "pub" - // in order to be exported. - if !import_resolution[namespace].is_public { - return UnboundResult; - } - - match import_resolution[namespace].target.clone() { - None => { - return UnboundResult; - } - Some(Target { - target_module, - binding, - shadowable: _ - }) => { - debug!("(resolving single import) found import in ns {:?}", - namespace); - let id = import_resolution[namespace].id; - // track used imports and extern crates as well - this.used_imports.insert((id, namespace)); - this.record_import_use(id, source); - match target_module.def_id() { - Some(DefId{krate: kid, ..}) => { - this.used_crates.insert(kid); - } - _ => {} - } - return BoundResult(target_module, binding); - } - } - } - - // The name is an import which has been fully - // resolved. We can, therefore, just follow it. - if value_result.is_unknown() { - value_result = get_binding(self.resolver, - import_resolution, - ValueNS, - source); - value_used_reexport = import_resolution.value_ns.is_public; - } - if type_result.is_unknown() { - type_result = get_binding(self.resolver, - import_resolution, - TypeNS, - source); - type_used_reexport = import_resolution.type_ns.is_public; - } + // If appropriate, returns an error to report. + fn finalize_import(&mut self, directive: &'b ImportDirective<'b>) -> Option { + self.current_module = directive.parent; + + let ImportDirective { ref module_path, span, .. } = *directive; + let module_result = self.resolve_path(&module_path, PathScope::Import, None, Some(span)); + let module = match module_result { + PathResult::Module(module) => module, + PathResult::Failed(msg, _) => { + let mut path = vec![keywords::SelfValue.ident()]; + path.extend(module_path); + let result = self.resolve_path(&path, PathScope::Import, None, None); + return if let PathResult::Module(..) = result { + Some(format!("Did you mean `self::{}`?", &names_to_string(module_path))) + } else { + Some(msg) + }; + }, + _ => return None, + }; - } - Some(_) => { - // If target_module is the same module whose import we are resolving - // and there it has an unresolved import with the same name as `source`, - // then the user is actually trying to import an item that is declared - // in the same scope - // - // e.g - // use self::submodule; - // pub mod submodule; - // - // In this case we continue as if we resolved the import and let the - // check_for_conflicts_between_imports_and_items call below handle - // the conflict - match (module_.def_id(), target_module.def_id()) { - (Some(id1), Some(id2)) if id1 == id2 => { - if value_result.is_unknown() { - value_result = UnboundResult; - } - if type_result.is_unknown() { - type_result = UnboundResult; - } - } - _ => { - // The import is unresolved. Bail out. - debug!("(resolving single import) unresolved import; bailing out"); - return ResolveResult::Indeterminate; - } - } - } + let (name, result) = match directive.subclass { + SingleImport { source, ref result, .. } => (source, result), + GlobImport { .. } if module.def_id() == directive.parent.def_id() => { + // Importing a module into itself is not allowed. + return Some("Cannot glob-import a module into itself.".to_string()); + } + GlobImport { is_prelude, ref max_vis } => { + if !is_prelude && + max_vis.get() != ty::Visibility::PrivateExternal && // Allow empty globs. + !max_vis.get().is_at_least(directive.vis.get(), self) { + let msg = "A non-empty glob must import something with the glob's visibility"; + self.session.span_err(directive.span, msg); } + return None; } - } + _ => unreachable!(), + }; - let mut value_used_public = false; - let mut type_used_public = false; - - // If we didn't find a result in the type namespace, search the - // external modules. - match type_result { - BoundResult(..) => {} - _ => { - match target_module.external_module_children.borrow_mut().get(&source) { - None => {} // Continue. - Some(module) => { - debug!("(resolving single import) found external module"); - // track the module as used. - match module.def_id() { - Some(DefId{krate: kid, ..}) => { - self.resolver.used_crates.insert(kid); - } - _ => {} - } - let name_binding = NameBinding::create_from_module(module); - type_result = BoundResult(target_module, name_binding); - type_used_public = true; - } + let mut all_ns_err = true; + self.per_ns(|this, ns| { + if let Ok(binding) = result[ns].get() { + all_ns_err = false; + if this.record_use(name, ns, binding, directive.span) { + this.resolution(module, name, ns).borrow_mut().binding = + Some(this.dummy_binding); } } - } - - // We've successfully resolved the import. Write the results in. - let mut import_resolutions = module_.import_resolutions.borrow_mut(); - let import_resolution = import_resolutions.get_mut(&target).unwrap(); + }); - { - let mut check_and_write_import = |namespace, result: &_, used_public: &mut bool| { - let namespace_name = match namespace { - TypeNS => "type", - ValueNS => "value", - }; + if all_ns_err { + let mut all_ns_failed = true; + self.per_ns(|this, ns| { + match this.resolve_name_in_module(module, name, ns, false, Some(span)) { + Ok(_) => all_ns_failed = false, + _ => {} + } + }); - match *result { - BoundResult(ref target_module, ref name_binding) => { - debug!("(resolving single import) found {:?} target: {:?}", - namespace_name, - name_binding.def()); - self.check_for_conflicting_import(&import_resolution, - directive.span, - target, - namespace); - - self.check_that_import_is_importable(&name_binding, - directive.span, - target); - - import_resolution[namespace] = ImportResolution { - target: Some(Target::new(target_module, - name_binding.clone(), - directive.shadowable)), - id: directive.id, - is_public: directive.is_public - }; - - self.add_export(module_, target, &import_resolution[namespace]); - *used_public = name_binding.is_public(); - } - UnboundResult => { - // Continue. + return if all_ns_failed { + let resolutions = module.resolutions.borrow(); + let names = resolutions.iter().filter_map(|(&(ref n, _), resolution)| { + if *n == name { return None; } // Never suggest the same name + match *resolution.borrow() { + NameResolution { binding: Some(_), .. } => Some(n), + NameResolution { single_imports: SingleImports::None, .. } => None, + _ => Some(n), } - UnknownResult => { - panic!("{:?} result should be known at this point", namespace_name); - } - } - }; - check_and_write_import(ValueNS, &value_result, &mut value_used_public); - check_and_write_import(TypeNS, &type_result, &mut type_used_public); + }); + let lev_suggestion = match find_best_match_for_name(names, &name.as_str(), None) { + Some(name) => format!(". Did you mean to use `{}`?", name), + None => "".to_owned(), + }; + let module_str = module_to_string(module); + let msg = if &module_str == "???" { + format!("no `{}` in the root{}", name, lev_suggestion) + } else { + format!("no `{}` in `{}`{}", name, module_str, lev_suggestion) + }; + Some(msg) + } else { + // `resolve_name_in_module` reported a privacy error. + self.import_dummy_binding(directive); + None + } } - self.check_for_conflicts_between_imports_and_items(module_, - import_resolution, - directive.span, - target); + let mut reexport_error = None; + let mut any_successful_reexport = false; + self.per_ns(|this, ns| { + if let Ok(binding) = result[ns].get() { + let vis = directive.vis.get(); + if !binding.pseudo_vis().is_at_least(vis, this) { + reexport_error = Some((ns, binding)); + } else { + any_successful_reexport = true; + } + } + }); - if value_result.is_unbound() && type_result.is_unbound() { - let msg = format!("There is no `{}` in `{}`{}", - source, - module_to_string(&target_module), lev_suggestion); - return ResolveResult::Failed(Some((directive.span, msg))); + // All namespaces must be re-exported with extra visibility for an error to occur. + if !any_successful_reexport { + let (ns, binding) = reexport_error.unwrap(); + if ns == TypeNS && binding.is_extern_crate() { + let msg = format!("extern crate `{}` is private, and cannot be reexported \ + (error E0364), consider declaring with `pub`", + name); + self.session.add_lint(PRIVATE_IN_PUBLIC, directive.id, directive.span, msg); + } else if ns == TypeNS { + struct_span_err!(self.session, directive.span, E0365, + "`{}` is private, and cannot be reexported", name) + .span_label(directive.span, &format!("reexport of private `{}`", name)) + .note(&format!("consider declaring type or module `{}` with `pub`", name)) + .emit(); + } else { + let msg = format!("`{}` is private, and cannot be reexported", name); + let note_msg = + format!("consider marking `{}` as `pub` in the imported module", name); + struct_span_err!(self.session, directive.span, E0364, "{}", &msg) + .span_note(directive.span, ¬e_msg) + .emit(); + } } - let value_used_public = value_used_reexport || value_used_public; - let type_used_public = type_used_reexport || type_used_public; - - assert!(import_resolution.outstanding_references >= 1); - import_resolution.outstanding_references -= 1; // Record what this import resolves to for later uses in documentation, // this may resolve to either a value or a type, but for documentation // purposes it's good enough to just favor one over the other. - let value_def_and_priv = import_resolution.value_ns.target.as_ref().map(|target| { - let def = target.binding.def().unwrap(); - (def, - if value_used_public { - lp - } else { - DependsOn(def.def_id()) - }) - }); - let type_def_and_priv = import_resolution.type_ns.target.as_ref().map(|target| { - let def = target.binding.def().unwrap(); - (def, - if type_used_public { - lp - } else { - DependsOn(def.def_id()) - }) + self.per_ns(|this, ns| if let Some(binding) = result[ns].get().ok() { + this.def_map.entry(directive.id).or_insert(PathResolution::new(binding.def())); }); - let import_lp = LastImport { - value_priv: value_def_and_priv.map(|(_, p)| p), - value_used: Used, - type_priv: type_def_and_priv.map(|(_, p)| p), - type_used: Used, - }; - - if let Some((def, _)) = value_def_and_priv { - self.resolver.def_map.borrow_mut().insert(directive.id, - PathResolution { - base_def: def, - last_private: import_lp, - depth: 0, - }); - } - if let Some((def, _)) = type_def_and_priv { - self.resolver.def_map.borrow_mut().insert(directive.id, - PathResolution { - base_def: def, - last_private: import_lp, - depth: 0, - }); - } - debug!("(resolving single import) successfully resolved import"); - return ResolveResult::Success(()); + None } - // Resolves a glob import. Note that this function cannot fail; it either - // succeeds or bails out (as importing * from an empty module or a module - // that exports nothing is valid). target_module is the module we are - // actually importing, i.e., `foo` in `use foo::*`. - fn resolve_glob_import(&mut self, - module_: Module<'b>, - target_module: Module<'b>, - import_directive: &ImportDirective, - lp: LastPrivate) - -> ResolveResult<()> { - let id = import_directive.id; - let is_public = import_directive.is_public; - - // This function works in a highly imperative manner; it eagerly adds - // everything it can to the list of import resolutions of the module - // node. - debug!("(resolving glob import) resolving glob import {}", id); - - // We must bail out if the node has unresolved imports of any kind - // (including globs). - if (*target_module).pub_count.get() > 0 { - debug!("(resolving glob import) target module has unresolved pub imports; bailing out"); - return ResolveResult::Indeterminate; + fn resolve_glob_import(&mut self, directive: &'b ImportDirective<'b>) { + let module = directive.imported_module.get().unwrap(); + self.populate_module_if_necessary(module); + + if let Some(Def::Trait(_)) = module.def() { + self.session.span_err(directive.span, "items in traits are not importable."); + return; + } else if module.def_id() == directive.parent.def_id() { + return; + } else if let GlobImport { is_prelude: true, .. } = directive.subclass { + self.prelude = Some(module); + return; } - // Add all resolved imports from the containing module. - let import_resolutions = target_module.import_resolutions.borrow(); - - if module_.import_resolutions.borrow_state() != ::std::cell::BorrowState::Unused { - // In this case, target_module == module_ - // This means we are trying to glob import a module into itself, - // and it is a no-go - debug!("(resolving glob imports) target module is current module; giving up"); - return ResolveResult::Failed(Some((import_directive.span, - "Cannot glob-import a module into itself.".into()))); - } - - for (name, target_import_resolution) in import_resolutions.iter() { - debug!("(resolving glob import) writing module resolution {} into `{}`", - *name, - module_to_string(module_)); - - // Here we merge two import resolutions. - let mut import_resolutions = module_.import_resolutions.borrow_mut(); - let mut dest_import_resolution = import_resolutions.entry(*name).or_insert_with(|| { - ImportResolutionPerNamespace::new(id, is_public) - }); - - for &ns in [TypeNS, ValueNS].iter() { - match target_import_resolution[ns].target { - Some(ref target) if target_import_resolution[ns].is_public => { - self.check_for_conflicting_import(&dest_import_resolution, - import_directive.span, - *name, - ns); - dest_import_resolution[ns] = ImportResolution { - id: id, is_public: is_public, target: Some(target.clone()) - }; - self.add_export(module_, *name, &dest_import_resolution[ns]); - } - _ => {} - } + // Add to module's glob_importers + module.glob_importers.borrow_mut().push(directive); + + // Ensure that `resolutions` isn't borrowed during `try_define`, + // since it might get updated via a glob cycle. + let bindings = module.resolutions.borrow().iter().filter_map(|(name, resolution)| { + resolution.borrow().binding().map(|binding| (*name, binding)) + }).collect::>(); + for ((name, ns), binding) in bindings { + if binding.pseudo_vis() == ty::Visibility::Public || self.is_accessible(binding.vis) { + let imported_binding = self.import(binding, directive); + let _ = self.try_define(directive.parent, name, ns, imported_binding); } } - // Add all children from the containing module. - build_reduced_graph::populate_module_if_necessary(self.resolver, &target_module); - - for (&name, name_bindings) in target_module.children.borrow().iter() { - self.merge_import_resolution(module_, - target_module, - import_directive, - name, - name_bindings.clone()); - - } - // Record the destination of this import - if let Some(did) = target_module.def_id() { - self.resolver.def_map.borrow_mut().insert(id, - PathResolution { - base_def: DefMod(did), - last_private: lp, - depth: 0, - }); + if let Some(did) = module.def_id() { + let resolution = PathResolution::new(Def::Mod(did)); + self.def_map.insert(directive.id, resolution); } - - debug!("(resolving glob import) successfully resolved import"); - return ResolveResult::Success(()); } - fn merge_import_resolution(&mut self, - module_: Module<'b>, - containing_module: Module<'b>, - import_directive: &ImportDirective, - name: Name, - name_bindings: NameBindings<'b>) { - let id = import_directive.id; - let is_public = import_directive.is_public; - - let mut import_resolutions = module_.import_resolutions.borrow_mut(); - let dest_import_resolution = import_resolutions.entry(name).or_insert_with(|| { - ImportResolutionPerNamespace::new(id, is_public) - }); + // Miscellaneous post-processing, including recording reexports, reporting conflicts, + // reporting the PRIVATE_IN_PUBLIC lint, and reporting unresolved imports. + fn finalize_resolutions_in(&mut self, module: Module<'b>) { + // Since import resolution is finished, globs will not define any more names. + *module.globs.borrow_mut() = Vec::new(); - debug!("(resolving glob import) writing resolution `{}` in `{}` to `{}`", - name, - module_to_string(&*containing_module), - module_to_string(module_)); - - // Merge the child item into the import resolution. - // pub_err makes sure we don't give the same error twice. - let mut pub_err = false; - { - let mut merge_child_item = |namespace| { - if !pub_err && is_public && - name_bindings[namespace].defined_with(DefModifiers::PRIVATE_VARIANT) { - let msg = format!("variant `{}` is private, and cannot be reexported (error \ - E0364), consider declaring its enum as `pub`", name); - self.resolver.session.add_lint(lint::builtin::PRIVATE_IN_PUBLIC, - import_directive.id, - import_directive.span, - msg); - pub_err = true; - } - - let modifier = DefModifiers::IMPORTABLE | DefModifiers::PUBLIC; - if name_bindings[namespace].defined_with(modifier) { - let namespace_name = match namespace { - TypeNS => "type", - ValueNS => "value", - }; - debug!("(resolving glob import) ... for {} target", namespace_name); - if dest_import_resolution.shadowable(namespace) == Shadowable::Never { - let msg = format!("a {} named `{}` has already been imported in this \ - module", - namespace_name, - name); - span_err!(self.resolver.session, - import_directive.span, - E0251, - "{}", - msg); - } else { - dest_import_resolution[namespace] = ImportResolution { - target: Some(Target::new(containing_module, - name_bindings[namespace].clone(), - import_directive.shadowable)), - id: id, - is_public: is_public - }; - self.add_export(module_, name, &dest_import_resolution[namespace]); - } - } else { - // FIXME #30159: This is required for backwards compatability. - dest_import_resolution[namespace].is_public |= is_public; - } - }; - merge_child_item(ValueNS); - merge_child_item(TypeNS); + let mut reexports = Vec::new(); + if module as *const _ == self.graph_root as *const _ { + reexports = mem::replace(&mut self.macro_exports, Vec::new()); } - self.check_for_conflicts_between_imports_and_items(module_, - dest_import_resolution, - import_directive.span, - name); - } - - fn add_export(&mut self, module: Module<'b>, name: Name, resolution: &ImportResolution<'b>) { - if !resolution.is_public { return } - let node_id = match module.def_id() { - Some(def_id) => self.resolver.ast_map.as_local_node_id(def_id).unwrap(), - None => return, - }; - let export = match resolution.target.as_ref().unwrap().binding.def() { - Some(def) => Export { name: name, def_id: def.def_id() }, - None => return, - }; - self.resolver.export_map.entry(node_id).or_insert(Vec::new()).push(export); - } + for (&(name, ns), resolution) in module.resolutions.borrow().iter() { + let resolution = resolution.borrow(); + let binding = match resolution.binding { + Some(binding) => binding, + None => continue, + }; - /// Checks that imported names and items don't have the same name. - fn check_for_conflicting_import(&mut self, - import_resolution: &ImportResolutionPerNamespace, - import_span: Span, - name: Name, - namespace: Namespace) { - let target = &import_resolution[namespace].target; - debug!("check_for_conflicting_import: {}; target exists: {}", - name, - target.is_some()); - - match *target { - Some(ref target) if target.shadowable != Shadowable::Always => { - let ns_word = match namespace { - TypeNS => { - match target.binding.module() { - Some(ref module) if module.is_normal() => "module", - Some(ref module) if module.is_trait() => "trait", - _ => "type", - } + if binding.vis == ty::Visibility::Public && + (binding.is_import() || binding.is_extern_crate()) { + let def = binding.def(); + if def != Def::Err { + if !def.def_id().is_local() { + self.session.cstore.export_macros(def.def_id().krate); } - ValueNS => "value", - }; - let use_id = import_resolution[namespace].id; - let item = self.resolver.ast_map.expect_item(use_id); - let mut err = struct_span_err!(self.resolver.session, - import_span, - E0252, - "a {} named `{}` has already been imported \ - in this module", - ns_word, - name); - span_note!(&mut err, - item.span, - "previous import of `{}` here", - name); - err.emit(); - } - Some(_) | None => {} - } - } - - /// Checks that an import is actually importable - fn check_that_import_is_importable(&mut self, - name_binding: &NameBinding, - import_span: Span, - name: Name) { - if !name_binding.defined_with(DefModifiers::IMPORTABLE) { - let msg = format!("`{}` is not directly importable", name); - span_err!(self.resolver.session, import_span, E0253, "{}", &msg[..]); - } - } - - /// Checks that imported names and items don't have the same name. - fn check_for_conflicts_between_imports_and_items(&mut self, - module: Module<'b>, - import: &ImportResolutionPerNamespace<'b>, - import_span: Span, - name: Name) { - // First, check for conflicts between imports and `extern crate`s. - if module.external_module_children - .borrow() - .contains_key(&name) { - match import.type_ns.target { - Some(ref target) if target.shadowable != Shadowable::Always => { - let msg = format!("import `{0}` conflicts with imported crate in this module \ - (maybe you meant `use {0}::*`?)", - name); - span_err!(self.resolver.session, import_span, E0254, "{}", &msg[..]); + reexports.push(Export { name: name, def: def }); } - Some(_) | None => {} - } - } - - // Check for item conflicts. - let name_bindings = match module.children.borrow().get(&name) { - None => { - // There can't be any conflicts. - return; } - Some(ref name_bindings) => (*name_bindings).clone(), - }; - match import.value_ns.target { - Some(ref target) if target.shadowable != Shadowable::Always => { - if let Some(ref value) = *name_bindings.value_ns.borrow() { - let mut err = struct_span_err!(self.resolver.session, - import_span, - E0255, - "import `{}` conflicts with \ - value in this module", - name); - if let Some(span) = value.span { - err.span_note(span, "conflicting value here"); - } - err.emit(); + if let NameBindingKind::Import { binding: orig_binding, directive, .. } = binding.kind { + if ns == TypeNS && orig_binding.is_variant() && + !orig_binding.vis.is_at_least(binding.vis, self) { + let msg = format!("variant `{}` is private, and cannot be reexported \ + (error E0364), consider declaring its enum as `pub`", + name); + self.session.add_lint(PRIVATE_IN_PUBLIC, directive.id, binding.span, msg); } } - Some(_) | None => {} } - match import.type_ns.target { - Some(ref target) if target.shadowable != Shadowable::Always => { - if let Some(ref ty) = *name_bindings.type_ns.borrow() { - let (what, note) = match ty.module() { - Some(ref module) if module.is_normal() => - ("existing submodule", "note conflicting module here"), - Some(ref module) if module.is_trait() => - ("trait in this module", "note conflicting trait here"), - _ => ("type in this module", "note conflicting type here"), - }; - let mut err = struct_span_err!(self.resolver.session, - import_span, - E0256, - "import `{}` conflicts with {}", - name, - what); - if let Some(span) = ty.span { - err.span_note(span, note); - } - err.emit(); - } + if reexports.len() > 0 { + if let Some(def_id) = module.def_id() { + let node_id = self.definitions.as_local_node_id(def_id).unwrap(); + self.export_map.insert(node_id, reexports); } - Some(_) | None => {} } } } -fn import_path_to_string(names: &[Name], subclass: ImportDirectiveSubclass) -> String { +fn import_path_to_string(names: &[Ident], subclass: &ImportDirectiveSubclass) -> String { if names.is_empty() { import_directive_subclass_to_string(subclass) } else { @@ -1091,14 +761,10 @@ fn import_path_to_string(names: &[Name], subclass: ImportDirectiveSubclass) -> S } } -fn import_directive_subclass_to_string(subclass: ImportDirectiveSubclass) -> String { - match subclass { - SingleImport(_, source) => source.to_string(), - GlobImport => "*".to_string(), +fn import_directive_subclass_to_string(subclass: &ImportDirectiveSubclass) -> String { + match *subclass { + SingleImport { source, .. } => source.to_string(), + GlobImport { .. } => "*".to_string(), + ExternCrate => "".to_string(), } } - -pub fn resolve_imports(resolver: &mut Resolver) { - let mut import_resolver = ImportResolver { resolver: resolver }; - import_resolver.resolve_imports(); -} diff --git a/src/librustc_save_analysis/Cargo.toml b/src/librustc_save_analysis/Cargo.toml new file mode 100644 index 0000000000000..3d66e5a300787 --- /dev/null +++ b/src/librustc_save_analysis/Cargo.toml @@ -0,0 +1,16 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_save_analysis" +version = "0.0.0" + +[lib] +name = "rustc_save_analysis" +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +log = { path = "../liblog" } +rustc = { path = "../librustc" } +syntax = { path = "../libsyntax" } +serialize = { path = "../libserialize" } +syntax_pos = { path = "../libsyntax_pos" } \ No newline at end of file diff --git a/src/librustc_save_analysis/csv_dumper.rs b/src/librustc_save_analysis/csv_dumper.rs new file mode 100644 index 0000000000000..0fd95500422ff --- /dev/null +++ b/src/librustc_save_analysis/csv_dumper.rs @@ -0,0 +1,434 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::io::Write; + +use super::external_data::*; +use super::dump::Dump; + +pub struct CsvDumper<'b, W: 'b> { + output: &'b mut W +} + +impl<'b, W: Write> CsvDumper<'b, W> { + pub fn new(writer: &'b mut W) -> CsvDumper<'b, W> { + CsvDumper { output: writer } + } + + fn record(&mut self, kind: &str, span: SpanData, values: String) { + let span_str = span_extent_str(span); + if let Err(_) = write!(self.output, "{},{}{}\n", kind, span_str, values) { + error!("Error writing output"); + } + } + + fn record_raw(&mut self, info: &str) { + if let Err(_) = write!(self.output, "{}", info) { + error!("Error writing output '{}'", info); + } + } +} + +impl<'b, W: Write + 'b> Dump for CsvDumper<'b, W> { + fn crate_prelude(&mut self, data: CratePreludeData) { + let values = make_values_str(&[ + ("name", &data.crate_name), + ("crate_root", &data.crate_root) + ]); + + self.record("crate", data.span, values); + + for c in data.external_crates { + let num = c.num.to_string(); + let values = make_values_str(&[ + ("name", &c.name), + ("crate", &num), + ("file_name", &c.file_name) + ]); + + self.record_raw(&format!("external_crate{}\n", values)); + } + + self.record_raw("end_external_crates\n"); + } + + fn enum_data(&mut self, data: EnumData) { + let id = data.id.index.as_u32().to_string(); + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("id", &id), + ("qualname", &data.qualname), + ("scopeid", &scope), + ("value", &data.value) + ]); + + self.record("enum", data.span, values); + } + + fn extern_crate(&mut self, data: ExternCrateData) { + let id = data.id.index.as_u32().to_string(); + let crate_num = data.crate_num.to_string(); + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("id", &id), + ("name", &data.name), + ("location", &data.location), + ("crate", &crate_num), + ("scopeid", &scope) + ]); + + self.record("extern_crate", data.span, values); + } + + fn impl_data(&mut self, data: ImplData) { + let self_ref = data.self_ref.unwrap_or(null_def_id()); + let trait_ref = data.trait_ref.unwrap_or(null_def_id()); + + let id = data.id.index.as_u32().to_string(); + let ref_id = self_ref.index.as_usize().to_string(); + let ref_id_crate = self_ref.krate.to_string(); + let trait_id = trait_ref.index.as_usize().to_string(); + let trait_id_crate = trait_ref.krate.to_string(); + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("id", &id), + ("refid", &ref_id), + ("refidcrate", &ref_id_crate), + ("traitid", &trait_id), + ("traitidcrate", &trait_id_crate), + ("scopeid", &scope) + ]); + + self.record("impl", data.span, values); + } + + fn inheritance(&mut self, data: InheritanceData) { + let base_id = data.base_id.index.as_usize().to_string(); + let base_crate = data.base_id.krate.to_string(); + let deriv_id = data.deriv_id.index.as_u32().to_string(); + let deriv_crate = data.deriv_id.krate.to_string(); + let values = make_values_str(&[ + ("base", &base_id), + ("basecrate", &base_crate), + ("derived", &deriv_id), + ("derivedcrate", &deriv_crate) + ]); + + self.record("inheritance", data.span, values); + } + + fn function(&mut self, data: FunctionData) { + let (decl_id, decl_crate) = match data.declaration { + Some(id) => (id.index.as_usize().to_string(), id.krate.to_string()), + None => (String::new(), String::new()) + }; + + let id = data.id.index.as_u32().to_string(); + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("id", &id), + ("qualname", &data.qualname), + ("declid", &decl_id), + ("declidcrate", &decl_crate), + ("scopeid", &scope) + ]); + + self.record("function", data.span, values); + } + + fn function_ref(&mut self, data: FunctionRefData) { + let ref_id = data.ref_id.index.as_usize().to_string(); + let ref_crate = data.ref_id.krate.to_string(); + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("refid", &ref_id), + ("refidcrate", &ref_crate), + ("qualname", ""), + ("scopeid", &scope) + ]); + + self.record("fn_ref", data.span, values); + } + + fn function_call(&mut self, data: FunctionCallData) { + let ref_id = data.ref_id.index.as_usize().to_string(); + let ref_crate = data.ref_id.krate.to_string(); + let qualname = String::new(); + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("refid", &ref_id), + ("refidcrate", &ref_crate), + ("qualname", &qualname), + ("scopeid", &scope) + ]); + + self.record("fn_call", data.span, values); + } + + fn method(&mut self, data: MethodData) { + let id = data.id.index.as_u32().to_string(); + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("id", &id), + ("qualname", &data.qualname), + ("scopeid", &scope) + ]); + + self.record("method_decl", data.span, values); + } + + fn method_call(&mut self, data: MethodCallData) { + let (dcn, dck) = match data.decl_id { + Some(declid) => (declid.index.as_usize().to_string(), declid.krate.to_string()), + None => (String::new(), String::new()), + }; + + let ref_id = data.ref_id.unwrap_or(null_def_id()); + + let def_id = ref_id.index.as_usize().to_string(); + let def_crate = ref_id.krate.to_string(); + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("refid", &def_id), + ("refidcrate", &def_crate), + ("declid", &dcn), + ("declidcrate", &dck), + ("scopeid", &scope) + ]); + + self.record("method_call", data.span, values); + } + + fn macro_data(&mut self, data: MacroData) { + let values = make_values_str(&[ + ("name", &data.name), + ("qualname", &data.qualname) + ]); + + self.record("macro", data.span, values); + } + + fn macro_use(&mut self, data: MacroUseData) { + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("callee_name", &data.name), + ("qualname", &data.qualname), + ("scopeid", &scope) + ]); + + self.record("macro_use", data.span, values); + } + + fn mod_data(&mut self, data: ModData) { + let id = data.id.index.as_u32().to_string(); + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("id", &id), + ("qualname", &data.qualname), + ("scopeid", &scope), + ("def_file", &data.filename) + ]); + + self.record("module", data.span, values); + } + + fn mod_ref(&mut self, data: ModRefData) { + let (ref_id, ref_crate) = match data.ref_id { + Some(rid) => (rid.index.as_usize().to_string(), rid.krate.to_string()), + None => (0.to_string(), 0.to_string()) + }; + + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("refid", &ref_id), + ("refidcrate", &ref_crate), + ("qualname", &data.qualname), + ("scopeid", &scope) + ]); + + self.record("mod_ref", data.span, values); + } + + fn struct_data(&mut self, data: StructData) { + let id = data.id.index.as_u32().to_string(); + let ctor_id = data.ctor_id.index.as_u32().to_string(); + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("id", &id), + ("ctor_id", &ctor_id), + ("qualname", &data.qualname), + ("scopeid", &scope), + ("value", &data.value) + ]); + + self.record("struct", data.span, values); + } + + fn struct_variant(&mut self, data: StructVariantData) { + let id = data.id.index.as_u32().to_string(); + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("id", &id), + ("ctor_id", &id), + ("qualname", &data.qualname), + ("type", &data.type_value), + ("value", &data.value), + ("scopeid", &scope) + ]); + + self.record("variant_struct", data.span, values); + } + + fn trait_data(&mut self, data: TraitData) { + let id = data.id.index.as_u32().to_string(); + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("id", &id), + ("qualname", &data.qualname), + ("scopeid", &scope), + ("value", &data.value) + ]); + + self.record("trait", data.span, values); + } + + fn tuple_variant(&mut self, data: TupleVariantData) { + let id = data.id.index.as_u32().to_string(); + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("id", &id), + ("name", &data.name), + ("qualname", &data.qualname), + ("type", &data.type_value), + ("value", &data.value), + ("scopeid", &scope) + ]); + + self.record("variant", data.span, values); + } + + fn type_ref(&mut self, data: TypeRefData) { + let (ref_id, ref_crate) = match data.ref_id { + Some(id) => (id.index.as_usize().to_string(), id.krate.to_string()), + None => (0.to_string(), 0.to_string()) + }; + + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("refid", &ref_id), + ("refidcrate", &ref_crate), + ("qualname", &data.qualname), + ("scopeid", &scope) + ]); + + self.record("type_ref", data.span, values); + } + + fn typedef(&mut self, data: TypeDefData) { + let id = data.id.index.as_u32().to_string(); + let values = make_values_str(&[ + ("id", &id), + ("qualname", &data.qualname), + ("value", &data.value) + ]); + + self.record("typedef", data.span, values); + } + + fn use_data(&mut self, data: UseData) { + let mod_id = data.mod_id.unwrap_or(null_def_id()); + + let id = data.id.index.as_u32().to_string(); + let ref_id = mod_id.index.as_usize().to_string(); + let ref_crate = mod_id.krate.to_string(); + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("id", &id), + ("refid", &ref_id), + ("refidcrate", &ref_crate), + ("name", &data.name), + ("scopeid", &scope) + ]); + + self.record("use_alias", data.span, values); + } + + fn use_glob(&mut self, data: UseGlobData) { + let names = data.names.join(", "); + + let id = data.id.index.as_u32().to_string(); + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("id", &id), + ("value", &names), + ("scopeid", &scope) + ]); + + self.record("use_glob", data.span, values); + } + + fn variable(&mut self, data: VariableData) { + let id = data.id.index.as_u32().to_string(); + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("id", &id), + ("name", &data.name), + ("qualname", &data.qualname), + ("value", &data.value), + ("type", &data.type_value), + ("scopeid", &scope) + ]); + + self.record("variable", data.span, values); + } + + fn variable_ref(&mut self, data: VariableRefData) { + let ref_id = data.ref_id.index.as_usize().to_string(); + let ref_crate = data.ref_id.krate.to_string(); + let scope = data.scope.index.as_u32().to_string(); + let values = make_values_str(&[ + ("refid", &ref_id), + ("refidcrate", &ref_crate), + ("qualname", ""), + ("scopeid", &scope) + ]); + + self.record("var_ref", data.span, values) + } +} + +// Helper function to escape quotes in a string +fn escape(s: String) -> String { + s.replace("\"", "\"\"") +} + +fn make_values_str(pairs: &[(&'static str, &str)]) -> String { + let pairs = pairs.into_iter().map(|&(f, v)| { + // Never take more than 1020 chars + if v.len() > 1020 { + (f, &v[..1020]) + } else { + (f, v) + } + }); + + let strs = pairs.map(|(f, v)| format!(",{},\"{}\"", f, escape(String::from(v)))); + strs.fold(String::new(), |mut s, ss| { + s.push_str(&ss[..]); + s + }) +} + +fn span_extent_str(span: SpanData) -> String { + format!("file_name,\"{}\",file_line,{},file_col,{},byte_start,{},\ + file_line_end,{},file_col_end,{},byte_end,{}", + span.file_name, span.line_start, span.column_start, span.byte_start, + span.line_end, span.column_end, span.byte_end) +} diff --git a/src/librustc_save_analysis/data.rs b/src/librustc_save_analysis/data.rs new file mode 100644 index 0000000000000..fc235aaf9276b --- /dev/null +++ b/src/librustc_save_analysis/data.rs @@ -0,0 +1,407 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Structs representing the analysis data from a crate. +//! +//! The `Dump` trait can be used together with `DumpVisitor` in order to +//! retrieve the data from a crate. + +use rustc::hir; +use rustc::hir::def_id::{CrateNum, DefId}; +use syntax::ast::{self, NodeId}; +use syntax_pos::Span; + +pub struct CrateData { + pub name: String, + pub number: u32, + pub span: Span, +} + +/// Data for any entity in the Rust language. The actual data contained varies +/// with the kind of entity being queried. See the nested structs for details. +#[derive(Debug, RustcEncodable)] +pub enum Data { + /// Data for Enums. + EnumData(EnumData), + /// Data for extern crates. + ExternCrateData(ExternCrateData), + /// Data about a function call. + FunctionCallData(FunctionCallData), + /// Data for all kinds of functions and methods. + FunctionData(FunctionData), + /// Data about a function ref. + FunctionRefData(FunctionRefData), + /// Data for impls. + ImplData(ImplData2), + /// Data for trait inheritance. + InheritanceData(InheritanceData), + /// Data about a macro declaration. + MacroData(MacroData), + /// Data about a macro use. + MacroUseData(MacroUseData), + /// Data about a method call. + MethodCallData(MethodCallData), + /// Data for method declarations (methods with a body are treated as functions). + MethodData(MethodData), + /// Data for modules. + ModData(ModData), + /// Data for a reference to a module. + ModRefData(ModRefData), + /// Data for a struct declaration. + StructData(StructData), + /// Data for a struct variant. + StructVariantDat(StructVariantData), + /// Data for a trait declaration. + TraitData(TraitData), + /// Data for a tuple variant. + TupleVariantData(TupleVariantData), + /// Data for a typedef. + TypeDefData(TypeDefData), + /// Data for a reference to a type or trait. + TypeRefData(TypeRefData), + /// Data for a use statement. + UseData(UseData), + /// Data for a global use statement. + UseGlobData(UseGlobData), + /// Data for local and global variables (consts and statics), and fields. + VariableData(VariableData), + /// Data for the use of some variable (e.g., the use of a local variable, which + /// will refere to that variables declaration). + VariableRefData(VariableRefData), +} + +#[derive(Eq, PartialEq, Clone, Copy, Debug, RustcEncodable)] +pub enum Visibility { + Public, + Restricted, + Inherited, +} + +impl<'a> From<&'a ast::Visibility> for Visibility { + fn from(v: &'a ast::Visibility) -> Visibility { + match *v { + ast::Visibility::Public => Visibility::Public, + ast::Visibility::Crate(_) => Visibility::Restricted, + ast::Visibility::Restricted { .. } => Visibility::Restricted, + ast::Visibility::Inherited => Visibility::Inherited, + } + } +} + +impl<'a> From<&'a hir::Visibility> for Visibility { + fn from(v: &'a hir::Visibility) -> Visibility { + match *v { + hir::Visibility::Public => Visibility::Public, + hir::Visibility::Crate => Visibility::Restricted, + hir::Visibility::Restricted { .. } => Visibility::Restricted, + hir::Visibility::Inherited => Visibility::Inherited, + } + } +} + +/// Data for the prelude of a crate. +#[derive(Debug, RustcEncodable)] +pub struct CratePreludeData { + pub crate_name: String, + pub crate_root: String, + pub external_crates: Vec, + pub span: Span, +} + +/// Data for external crates in the prelude of a crate. +#[derive(Debug, RustcEncodable)] +pub struct ExternalCrateData { + pub name: String, + pub num: CrateNum, + pub file_name: String, +} + +/// Data for enum declarations. +#[derive(Clone, Debug, RustcEncodable)] +pub struct EnumData { + pub id: NodeId, + pub name: String, + pub value: String, + pub qualname: String, + pub span: Span, + pub scope: NodeId, + pub variants: Vec, + pub visibility: Visibility, + pub docs: String, +} + +/// Data for extern crates. +#[derive(Debug, RustcEncodable)] +pub struct ExternCrateData { + pub id: NodeId, + pub name: String, + pub crate_num: CrateNum, + pub location: String, + pub span: Span, + pub scope: NodeId, +} + +/// Data about a function call. +#[derive(Debug, RustcEncodable)] +pub struct FunctionCallData { + pub span: Span, + pub scope: NodeId, + pub ref_id: DefId, +} + +/// Data for all kinds of functions and methods. +#[derive(Clone, Debug, RustcEncodable)] +pub struct FunctionData { + pub id: NodeId, + pub name: String, + pub qualname: String, + pub declaration: Option, + pub span: Span, + pub scope: NodeId, + pub value: String, + pub visibility: Visibility, + pub parent: Option, + pub docs: String, +} + +/// Data about a function call. +#[derive(Debug, RustcEncodable)] +pub struct FunctionRefData { + pub span: Span, + pub scope: NodeId, + pub ref_id: DefId, +} + +#[derive(Debug, RustcEncodable)] +pub struct ImplData { + pub id: NodeId, + pub span: Span, + pub scope: NodeId, + pub trait_ref: Option, + pub self_ref: Option, +} + +#[derive(Debug, RustcEncodable)] +// FIXME: this struct should not exist. However, removing it requires heavy +// refactoring of dump_visitor.rs. See PR 31838 for more info. +pub struct ImplData2 { + pub id: NodeId, + pub span: Span, + pub scope: NodeId, + // FIXME: I'm not really sure inline data is the best way to do this. Seems + // OK in this case, but generalising leads to returning chunks of AST, which + // feels wrong. + pub trait_ref: Option, + pub self_ref: Option, +} + +#[derive(Debug, RustcEncodable)] +pub struct InheritanceData { + pub span: Span, + pub base_id: DefId, + pub deriv_id: NodeId +} + +/// Data about a macro declaration. +#[derive(Debug, RustcEncodable)] +pub struct MacroData { + pub span: Span, + pub name: String, + pub qualname: String, + pub docs: String, +} + +/// Data about a macro use. +#[derive(Debug, RustcEncodable)] +pub struct MacroUseData { + pub span: Span, + pub name: String, + pub qualname: String, + // Because macro expansion happens before ref-ids are determined, + // we use the callee span to reference the associated macro definition. + pub callee_span: Span, + pub scope: NodeId, + pub imported: bool, +} + +/// Data about a method call. +#[derive(Debug, RustcEncodable)] +pub struct MethodCallData { + pub span: Span, + pub scope: NodeId, + pub ref_id: Option, + pub decl_id: Option, +} + +/// Data for method declarations (methods with a body are treated as functions). +#[derive(Clone, Debug, RustcEncodable)] +pub struct MethodData { + pub id: NodeId, + pub name: String, + pub qualname: String, + pub span: Span, + pub scope: NodeId, + pub value: String, + pub decl_id: Option, + pub parent: Option, + pub visibility: Visibility, + pub docs: String, +} + +/// Data for modules. +#[derive(Debug, RustcEncodable)] +pub struct ModData { + pub id: NodeId, + pub name: String, + pub qualname: String, + pub span: Span, + pub scope: NodeId, + pub filename: String, + pub items: Vec, + pub visibility: Visibility, + pub docs: String, +} + +/// Data for a reference to a module. +#[derive(Debug, RustcEncodable)] +pub struct ModRefData { + pub span: Span, + pub scope: NodeId, + pub ref_id: Option, + pub qualname: String +} + +#[derive(Debug, RustcEncodable)] +pub struct StructData { + pub span: Span, + pub name: String, + pub id: NodeId, + pub ctor_id: NodeId, + pub qualname: String, + pub scope: NodeId, + pub value: String, + pub fields: Vec, + pub visibility: Visibility, + pub docs: String, +} + +#[derive(Debug, RustcEncodable)] +pub struct StructVariantData { + pub span: Span, + pub name: String, + pub id: NodeId, + pub qualname: String, + pub type_value: String, + pub value: String, + pub scope: NodeId, + pub parent: Option, + pub docs: String, +} + +#[derive(Debug, RustcEncodable)] +pub struct TraitData { + pub span: Span, + pub id: NodeId, + pub name: String, + pub qualname: String, + pub scope: NodeId, + pub value: String, + pub items: Vec, + pub visibility: Visibility, + pub docs: String, +} + +#[derive(Debug, RustcEncodable)] +pub struct TupleVariantData { + pub span: Span, + pub id: NodeId, + pub name: String, + pub qualname: String, + pub type_value: String, + pub value: String, + pub scope: NodeId, + pub parent: Option, + pub docs: String, +} + +/// Data for a typedef. +#[derive(Debug, RustcEncodable)] +pub struct TypeDefData { + pub id: NodeId, + pub name: String, + pub span: Span, + pub qualname: String, + pub value: String, + pub visibility: Visibility, + pub parent: Option, + pub docs: String, +} + +/// Data for a reference to a type or trait. +#[derive(Clone, Debug, RustcEncodable)] +pub struct TypeRefData { + pub span: Span, + pub scope: NodeId, + pub ref_id: Option, + pub qualname: String, +} + +#[derive(Debug, RustcEncodable)] +pub struct UseData { + pub id: NodeId, + pub span: Span, + pub name: String, + pub mod_id: Option, + pub scope: NodeId, + pub visibility: Visibility, +} + +#[derive(Debug, RustcEncodable)] +pub struct UseGlobData { + pub id: NodeId, + pub span: Span, + pub names: Vec, + pub scope: NodeId, + pub visibility: Visibility, +} + +/// Data for local and global variables (consts and statics). +#[derive(Debug, RustcEncodable)] +pub struct VariableData { + pub id: NodeId, + pub kind: VariableKind, + pub name: String, + pub qualname: String, + pub span: Span, + pub scope: NodeId, + pub parent: Option, + pub value: String, + pub type_value: String, + pub visibility: Visibility, + pub docs: String, +} + +#[derive(Debug, RustcEncodable)] +pub enum VariableKind { + Static, + Const, + Local, + Field, +} + +/// Data for the use of some item (e.g., the use of a local variable, which +/// will refer to that variables declaration (by ref_id)). +#[derive(Debug, RustcEncodable)] +pub struct VariableRefData { + pub name: String, + pub span: Span, + pub scope: NodeId, + pub ref_id: DefId, +} diff --git a/src/librustc_save_analysis/dump.rs b/src/librustc_save_analysis/dump.rs new file mode 100644 index 0000000000000..18241b394cc17 --- /dev/null +++ b/src/librustc_save_analysis/dump.rs @@ -0,0 +1,38 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::external_data::*; + +pub trait Dump { + fn crate_prelude(&mut self, CratePreludeData) {} + fn enum_data(&mut self, EnumData) {} + fn extern_crate(&mut self, ExternCrateData) {} + fn impl_data(&mut self, ImplData) {} + fn inheritance(&mut self, InheritanceData) {} + fn function(&mut self, FunctionData) {} + fn function_ref(&mut self, FunctionRefData) {} + fn function_call(&mut self, FunctionCallData) {} + fn method(&mut self, MethodData) {} + fn method_call(&mut self, MethodCallData) {} + fn macro_data(&mut self, MacroData) {} + fn macro_use(&mut self, MacroUseData) {} + fn mod_data(&mut self, ModData) {} + fn mod_ref(&mut self, ModRefData) {} + fn struct_data(&mut self, StructData) {} + fn struct_variant(&mut self, StructVariantData) {} + fn trait_data(&mut self, TraitData) {} + fn tuple_variant(&mut self, TupleVariantData) {} + fn type_ref(&mut self, TypeRefData) {} + fn typedef(&mut self, TypeDefData) {} + fn use_data(&mut self, UseData) {} + fn use_glob(&mut self, UseGlobData) {} + fn variable(&mut self, VariableData) {} + fn variable_ref(&mut self, VariableRefData) {} +} diff --git a/src/librustc_save_analysis/dump_visitor.rs b/src/librustc_save_analysis/dump_visitor.rs new file mode 100644 index 0000000000000..4cd28e0a46daf --- /dev/null +++ b/src/librustc_save_analysis/dump_visitor.rs @@ -0,0 +1,1541 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Write the output of rustc's analysis to an implementor of Dump. The data is +//! primarily designed to be used as input to the DXR tool, specifically its +//! Rust plugin. It could also be used by IDEs or other code browsing, search, or +//! cross-referencing tools. +//! +//! Dumping the analysis is implemented by walking the AST and getting a bunch of +//! info out from all over the place. We use Def IDs to identify objects. The +//! tricky part is getting syntactic (span, source text) and semantic (reference +//! Def IDs) information for parts of expressions which the compiler has discarded. +//! E.g., in a path `foo::bar::baz`, the compiler only keeps a span for the whole +//! path and a reference to `baz`, but we want spans and references for all three +//! idents. +//! +//! SpanUtils is used to manipulate spans. In particular, to extract sub-spans +//! from spans (e.g., the span for `bar` from the above example path). +//! DumpVisitor walks the AST and processes it, and an implementor of Dump +//! is used for recording the output in a format-agnostic way (see CsvDumper +//! for an example). + +use rustc::hir; +use rustc::hir::def::Def; +use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; +use rustc::hir::map::{Node, NodeItem}; +use rustc::session::Session; +use rustc::ty::{self, TyCtxt, AssociatedItemContainer}; + +use std::collections::HashSet; +use std::collections::hash_map::DefaultHasher; +use std::hash::*; + +use syntax::ast::{self, NodeId, PatKind, Attribute, CRATE_NODE_ID}; +use syntax::parse::token; +use syntax::symbol::keywords; +use syntax::visit::{self, Visitor}; +use syntax::print::pprust::{path_to_string, ty_to_string, bounds_to_string, generics_to_string}; +use syntax::ptr::P; +use syntax::codemap::Spanned; +use syntax_pos::*; + +use super::{escape, generated_code, SaveContext, PathCollector, docs_for_attrs}; +use super::data::*; +use super::dump::Dump; +use super::external_data::{Lower, make_def_id}; +use super::span_utils::SpanUtils; +use super::recorder; + +macro_rules! down_cast_data { + ($id:ident, $kind:ident, $sp:expr) => { + let $id = if let super::Data::$kind(data) = $id { + data + } else { + span_bug!($sp, "unexpected data kind: {:?}", $id); + }; + }; +} + +pub struct DumpVisitor<'l, 'tcx: 'l, 'll, D: 'll> { + save_ctxt: SaveContext<'l, 'tcx>, + sess: &'l Session, + tcx: TyCtxt<'l, 'tcx, 'tcx>, + dumper: &'ll mut D, + + span: SpanUtils<'l>, + + cur_scope: NodeId, + + // Set of macro definition (callee) spans, and the set + // of macro use (callsite) spans. We store these to ensure + // we only write one macro def per unique macro definition, and + // one macro use per unique callsite span. + mac_defs: HashSet, + mac_uses: HashSet, +} + +impl<'l, 'tcx: 'l, 'll, D: Dump + 'll> DumpVisitor<'l, 'tcx, 'll, D> { + pub fn new(save_ctxt: SaveContext<'l, 'tcx>, + dumper: &'ll mut D) + -> DumpVisitor<'l, 'tcx, 'll, D> { + let span_utils = SpanUtils::new(&save_ctxt.tcx.sess); + DumpVisitor { + sess: &save_ctxt.tcx.sess, + tcx: save_ctxt.tcx, + save_ctxt: save_ctxt, + dumper: dumper, + span: span_utils.clone(), + cur_scope: CRATE_NODE_ID, + mac_defs: HashSet::new(), + mac_uses: HashSet::new(), + } + } + + fn nest(&mut self, scope_id: NodeId, f: F) + where F: FnOnce(&mut DumpVisitor<'l, 'tcx, 'll, D>) + { + let parent_scope = self.cur_scope; + self.cur_scope = scope_id; + f(self); + self.cur_scope = parent_scope; + } + + pub fn dump_crate_info(&mut self, name: &str, krate: &ast::Crate) { + let source_file = self.tcx.sess.local_crate_source_file.as_ref(); + let crate_root = source_file.map(|source_file| { + match source_file.file_name() { + Some(_) => source_file.parent().unwrap().display().to_string(), + None => source_file.display().to_string(), + } + }); + + // Info about all the external crates referenced from this crate. + let external_crates = self.save_ctxt.get_external_crates().into_iter().map(|c| { + let lo_loc = self.span.sess.codemap().lookup_char_pos(c.span.lo); + ExternalCrateData { + name: c.name, + num: CrateNum::from_u32(c.number), + file_name: SpanUtils::make_path_string(&lo_loc.file.name), + } + }).collect(); + + // The current crate. + let data = CratePreludeData { + crate_name: name.into(), + crate_root: crate_root.unwrap_or("".to_owned()), + external_crates: external_crates, + span: krate.span, + }; + + self.dumper.crate_prelude(data.lower(self.tcx)); + } + + // Return all non-empty prefixes of a path. + // For each prefix, we return the span for the last segment in the prefix and + // a str representation of the entire prefix. + fn process_path_prefixes(&self, path: &ast::Path) -> Vec<(Span, String)> { + let spans = self.span.spans_for_path_segments(path); + + // Paths to enums seem to not match their spans - the span includes all the + // variants too. But they seem to always be at the end, so I hope we can cope with + // always using the first ones. So, only error out if we don't have enough spans. + // What could go wrong...? + if spans.len() < path.segments.len() { + if generated_code(path.span) { + return vec![]; + } + error!("Mis-calculated spans for path '{}'. Found {} spans, expected {}. Found spans:", + path_to_string(path), + spans.len(), + path.segments.len()); + for s in &spans { + let loc = self.sess.codemap().lookup_char_pos(s.lo); + error!(" '{}' in {}, line {}", + self.span.snippet(*s), + loc.file.name, + loc.line); + } + error!(" master span: {:?}: `{}`", path.span, self.span.snippet(path.span)); + return vec![]; + } + + let mut result: Vec<(Span, String)> = vec![]; + + let mut segs = vec![]; + for (i, (seg, span)) in path.segments.iter().zip(&spans).enumerate() { + segs.push(seg.clone()); + let sub_path = ast::Path { + span: *span, // span for the last segment + global: path.global, + segments: segs, + }; + let qualname = if i == 0 && path.global { + format!("::{}", path_to_string(&sub_path)) + } else { + path_to_string(&sub_path) + }; + result.push((*span, qualname)); + segs = sub_path.segments; + } + + result + } + + // The global arg allows us to override the global-ness of the path (which + // actually means 'does the path start with `::`', rather than 'is the path + // semantically global). We use the override for `use` imports (etc.) where + // the syntax is non-global, but the semantics are global. + fn write_sub_paths(&mut self, path: &ast::Path, global: bool) { + let sub_paths = self.process_path_prefixes(path); + for (i, &(ref span, ref qualname)) in sub_paths.iter().enumerate() { + let qualname = if i == 0 && global && !path.global { + format!("::{}", qualname) + } else { + qualname.clone() + }; + self.dumper.mod_ref(ModRefData { + span: *span, + qualname: qualname, + scope: self.cur_scope, + ref_id: None + }.lower(self.tcx)); + } + } + + // As write_sub_paths, but does not process the last ident in the path (assuming it + // will be processed elsewhere). See note on write_sub_paths about global. + fn write_sub_paths_truncated(&mut self, path: &ast::Path, global: bool) { + let sub_paths = self.process_path_prefixes(path); + let len = sub_paths.len(); + if len <= 1 { + return; + } + + let sub_paths = &sub_paths[..len-1]; + for (i, &(ref span, ref qualname)) in sub_paths.iter().enumerate() { + let qualname = if i == 0 && global && !path.global { + format!("::{}", qualname) + } else { + qualname.clone() + }; + self.dumper.mod_ref(ModRefData { + span: *span, + qualname: qualname, + scope: self.cur_scope, + ref_id: None + }.lower(self.tcx)); + } + } + + // As write_sub_paths, but expects a path of the form module_path::trait::method + // Where trait could actually be a struct too. + fn write_sub_path_trait_truncated(&mut self, path: &ast::Path) { + let sub_paths = self.process_path_prefixes(path); + let len = sub_paths.len(); + if len <= 1 { + return; + } + let sub_paths = &sub_paths[.. (len-1)]; + + // write the trait part of the sub-path + let (ref span, ref qualname) = sub_paths[len-2]; + self.dumper.type_ref(TypeRefData { + ref_id: None, + span: *span, + qualname: qualname.to_owned(), + scope: CRATE_NODE_ID + }.lower(self.tcx)); + + // write the other sub-paths + if len <= 2 { + return; + } + let sub_paths = &sub_paths[..len-2]; + for &(ref span, ref qualname) in sub_paths { + self.dumper.mod_ref(ModRefData { + span: *span, + qualname: qualname.to_owned(), + scope: self.cur_scope, + ref_id: None + }.lower(self.tcx)); + } + } + + fn lookup_def_id(&self, ref_id: NodeId) -> Option { + match self.save_ctxt.get_path_def(ref_id) { + Def::PrimTy(..) | Def::SelfTy(..) | Def::Err => None, + def => Some(def.def_id()), + } + } + + fn process_def_kind(&mut self, + ref_id: NodeId, + span: Span, + sub_span: Option, + def_id: DefId, + scope: NodeId) { + if self.span.filter_generated(sub_span, span) { + return; + } + + let def = self.save_ctxt.get_path_def(ref_id); + match def { + Def::Mod(_) => { + self.dumper.mod_ref(ModRefData { + span: sub_span.expect("No span found for mod ref"), + ref_id: Some(def_id), + scope: scope, + qualname: String::new() + }.lower(self.tcx)); + } + Def::Struct(..) | + Def::Variant(..) | + Def::Union(..) | + Def::Enum(..) | + Def::TyAlias(..) | + Def::Trait(_) => { + self.dumper.type_ref(TypeRefData { + span: sub_span.expect("No span found for type ref"), + ref_id: Some(def_id), + scope: scope, + qualname: String::new() + }.lower(self.tcx)); + } + Def::Static(..) | + Def::Const(..) | + Def::StructCtor(..) | + Def::VariantCtor(..) => { + self.dumper.variable_ref(VariableRefData { + span: sub_span.expect("No span found for var ref"), + ref_id: def_id, + scope: scope, + name: String::new() + }.lower(self.tcx)); + } + Def::Fn(..) => { + self.dumper.function_ref(FunctionRefData { + span: sub_span.expect("No span found for fn ref"), + ref_id: def_id, + scope: scope + }.lower(self.tcx)); + } + Def::Local(..) | + Def::Upvar(..) | + Def::SelfTy(..) | + Def::Label(_) | + Def::TyParam(..) | + Def::Method(..) | + Def::AssociatedTy(..) | + Def::AssociatedConst(..) | + Def::PrimTy(_) | + Def::Macro(_) | + Def::Err => { + span_bug!(span, + "process_def_kind for unexpected item: {:?}", + def); + } + } + } + + fn process_formals(&mut self, formals: &Vec, qualname: &str) { + for arg in formals { + self.visit_pat(&arg.pat); + let mut collector = PathCollector::new(); + collector.visit_pat(&arg.pat); + let span_utils = self.span.clone(); + for &(id, ref p, ..) in &collector.collected_paths { + let typ = match self.tcx.tables().node_types.get(&id) { + Some(s) => s.to_string(), + None => continue, + }; + // get the span only for the name of the variable (I hope the path is only ever a + // variable name, but who knows?) + let sub_span = span_utils.span_for_last_ident(p.span); + if !self.span.filter_generated(sub_span, p.span) { + self.dumper.variable(VariableData { + id: id, + kind: VariableKind::Local, + span: sub_span.expect("No span found for variable"), + name: path_to_string(p), + qualname: format!("{}::{}", qualname, path_to_string(p)), + type_value: typ, + value: String::new(), + scope: CRATE_NODE_ID, + parent: None, + visibility: Visibility::Inherited, + docs: String::new(), + }.lower(self.tcx)); + } + } + } + } + + fn process_method(&mut self, + sig: &ast::MethodSig, + body: Option<&ast::Block>, + id: ast::NodeId, + name: ast::Name, + vis: Visibility, + attrs: &[Attribute], + span: Span) { + debug!("process_method: {}:{}", id, name); + + if let Some(method_data) = self.save_ctxt.get_method_data(id, name, span) { + + let sig_str = ::make_signature(&sig.decl, &sig.generics); + if body.is_some() { + self.process_formals(&sig.decl.inputs, &method_data.qualname); + } + + // If the method is defined in an impl, then try and find the corresponding + // method decl in a trait, and if there is one, make a decl_id for it. This + // requires looking up the impl, then the trait, then searching for a method + // with the right name. + if !self.span.filter_generated(Some(method_data.span), span) { + let container = + self.tcx.associated_item(self.tcx.map.local_def_id(id)).container; + let mut trait_id; + let mut decl_id = None; + match container { + AssociatedItemContainer::ImplContainer(id) => { + trait_id = self.tcx.trait_id_of_impl(id); + + match trait_id { + Some(id) => { + for item in self.tcx.associated_items(id) { + if item.kind == ty::AssociatedKind::Method { + if item.name == name { + decl_id = Some(item.def_id); + break; + } + } + } + } + None => { + if let Some(NodeItem(item)) = self.tcx.map.get_if_local(id) { + if let hir::ItemImpl(_, _, _, _, ref ty, _) = item.node { + trait_id = self.lookup_def_id(ty.id); + } + } + } + } + } + AssociatedItemContainer::TraitContainer(id) => { + trait_id = Some(id); + } + } + + self.dumper.method(MethodData { + id: method_data.id, + name: method_data.name, + span: method_data.span, + scope: method_data.scope, + qualname: method_data.qualname.clone(), + value: sig_str, + decl_id: decl_id, + parent: trait_id, + visibility: vis, + docs: docs_for_attrs(attrs), + }.lower(self.tcx)); + } + + self.process_generic_params(&sig.generics, span, &method_data.qualname, id); + } + + // walk arg and return types + for arg in &sig.decl.inputs { + self.visit_ty(&arg.ty); + } + + if let ast::FunctionRetTy::Ty(ref ret_ty) = sig.decl.output { + self.visit_ty(ret_ty); + } + + // walk the fn body + if let Some(body) = body { + self.nest(id, |v| v.visit_block(body)); + } + } + + fn process_trait_ref(&mut self, trait_ref: &ast::TraitRef) { + let trait_ref_data = self.save_ctxt.get_trait_ref_data(trait_ref, self.cur_scope); + if let Some(trait_ref_data) = trait_ref_data { + if !self.span.filter_generated(Some(trait_ref_data.span), trait_ref.path.span) { + self.dumper.type_ref(trait_ref_data.lower(self.tcx)); + } + + visit::walk_path(self, &trait_ref.path); + } + } + + fn process_struct_field_def(&mut self, field: &ast::StructField, parent_id: NodeId) { + let field_data = self.save_ctxt.get_field_data(field, parent_id); + if let Some(mut field_data) = field_data { + if !self.span.filter_generated(Some(field_data.span), field.span) { + field_data.value = String::new(); + self.dumper.variable(field_data.lower(self.tcx)); + } + } + } + + // Dump generic params bindings, then visit_generics + fn process_generic_params(&mut self, + generics: &ast::Generics, + full_span: Span, + prefix: &str, + id: NodeId) { + // We can't only use visit_generics since we don't have spans for param + // bindings, so we reparse the full_span to get those sub spans. + // However full span is the entire enum/fn/struct block, so we only want + // the first few to match the number of generics we're looking for. + let param_sub_spans = self.span.spans_for_ty_params(full_span, + (generics.ty_params.len() as isize)); + for (param, param_ss) in generics.ty_params.iter().zip(param_sub_spans) { + let name = escape(self.span.snippet(param_ss)); + // Append $id to name to make sure each one is unique + let qualname = format!("{}::{}${}", + prefix, + name, + id); + if !self.span.filter_generated(Some(param_ss), full_span) { + self.dumper.typedef(TypeDefData { + span: param_ss, + name: name, + id: param.id, + qualname: qualname, + value: String::new(), + visibility: Visibility::Inherited, + parent: None, + docs: String::new(), + }.lower(self.tcx)); + } + } + self.visit_generics(generics); + } + + fn process_fn(&mut self, + item: &ast::Item, + decl: &ast::FnDecl, + ty_params: &ast::Generics, + body: &ast::Block) { + if let Some(fn_data) = self.save_ctxt.get_item_data(item) { + down_cast_data!(fn_data, FunctionData, item.span); + if !self.span.filter_generated(Some(fn_data.span), item.span) { + self.dumper.function(fn_data.clone().lower(self.tcx)); + } + + self.process_formals(&decl.inputs, &fn_data.qualname); + self.process_generic_params(ty_params, item.span, &fn_data.qualname, item.id); + } + + for arg in &decl.inputs { + self.visit_ty(&arg.ty); + } + + if let ast::FunctionRetTy::Ty(ref ret_ty) = decl.output { + self.visit_ty(&ret_ty); + } + + self.nest(item.id, |v| v.visit_block(&body)); + } + + fn process_static_or_const_item(&mut self, item: &ast::Item, typ: &ast::Ty, expr: &ast::Expr) { + if let Some(var_data) = self.save_ctxt.get_item_data(item) { + down_cast_data!(var_data, VariableData, item.span); + if !self.span.filter_generated(Some(var_data.span), item.span) { + self.dumper.variable(var_data.lower(self.tcx)); + } + } + self.visit_ty(&typ); + self.visit_expr(expr); + } + + fn process_assoc_const(&mut self, + id: ast::NodeId, + name: ast::Name, + span: Span, + typ: &ast::Ty, + expr: &ast::Expr, + parent_id: DefId, + vis: Visibility, + attrs: &[Attribute]) { + let qualname = format!("::{}", self.tcx.node_path_str(id)); + + let sub_span = self.span.sub_span_after_keyword(span, keywords::Const); + + if !self.span.filter_generated(sub_span, span) { + self.dumper.variable(VariableData { + span: sub_span.expect("No span found for variable"), + kind: VariableKind::Const, + id: id, + name: name.to_string(), + qualname: qualname, + value: self.span.snippet(expr.span), + type_value: ty_to_string(&typ), + scope: self.cur_scope, + parent: Some(parent_id), + visibility: vis, + docs: docs_for_attrs(attrs), + }.lower(self.tcx)); + } + + // walk type and init value + self.visit_ty(typ); + self.visit_expr(expr); + } + + // FIXME tuple structs should generate tuple-specific data. + fn process_struct(&mut self, + item: &ast::Item, + def: &ast::VariantData, + ty_params: &ast::Generics) { + let name = item.ident.to_string(); + let qualname = format!("::{}", self.tcx.node_path_str(item.id)); + + let sub_span = self.span.sub_span_after_keyword(item.span, keywords::Struct); + let (val, fields) = + if let ast::ItemKind::Struct(ast::VariantData::Struct(ref fields, _), _) = item.node + { + let fields_str = fields.iter() + .enumerate() + .map(|(i, f)| f.ident.map(|i| i.to_string()) + .unwrap_or(i.to_string())) + .collect::>() + .join(", "); + (format!("{} {{ {} }}", name, fields_str), fields.iter().map(|f| f.id).collect()) + } else { + (String::new(), vec![]) + }; + + if !self.span.filter_generated(sub_span, item.span) { + self.dumper.struct_data(StructData { + span: sub_span.expect("No span found for struct"), + id: item.id, + name: name, + ctor_id: def.id(), + qualname: qualname.clone(), + scope: self.cur_scope, + value: val, + fields: fields, + visibility: From::from(&item.vis), + docs: docs_for_attrs(&item.attrs), + }.lower(self.tcx)); + } + + + // fields + for field in def.fields() { + self.process_struct_field_def(field, item.id); + self.visit_ty(&field.ty); + } + + self.process_generic_params(ty_params, item.span, &qualname, item.id); + } + + fn process_enum(&mut self, + item: &ast::Item, + enum_definition: &ast::EnumDef, + ty_params: &ast::Generics) { + let enum_data = self.save_ctxt.get_item_data(item); + let enum_data = match enum_data { + None => return, + Some(data) => data, + }; + down_cast_data!(enum_data, EnumData, item.span); + if !self.span.filter_generated(Some(enum_data.span), item.span) { + self.dumper.enum_data(enum_data.clone().lower(self.tcx)); + } + + for variant in &enum_definition.variants { + let name = variant.node.name.name.to_string(); + let mut qualname = enum_data.qualname.clone(); + qualname.push_str("::"); + qualname.push_str(&name); + + match variant.node.data { + ast::VariantData::Struct(ref fields, _) => { + let sub_span = self.span.span_for_first_ident(variant.span); + let fields_str = fields.iter() + .enumerate() + .map(|(i, f)| f.ident.map(|i| i.to_string()) + .unwrap_or(i.to_string())) + .collect::>() + .join(", "); + let val = format!("{}::{} {{ {} }}", enum_data.name, name, fields_str); + if !self.span.filter_generated(sub_span, variant.span) { + self.dumper.struct_variant(StructVariantData { + span: sub_span.expect("No span found for struct variant"), + id: variant.node.data.id(), + name: name, + qualname: qualname, + type_value: enum_data.qualname.clone(), + value: val, + scope: enum_data.scope, + parent: Some(make_def_id(item.id, &self.tcx.map)), + docs: docs_for_attrs(&variant.node.attrs), + }.lower(self.tcx)); + } + } + ref v => { + let sub_span = self.span.span_for_first_ident(variant.span); + let mut val = format!("{}::{}", enum_data.name, name); + if let &ast::VariantData::Tuple(ref fields, _) = v { + val.push('('); + val.push_str(&fields.iter() + .map(|f| ty_to_string(&f.ty)) + .collect::>() + .join(", ")); + val.push(')'); + } + if !self.span.filter_generated(sub_span, variant.span) { + self.dumper.tuple_variant(TupleVariantData { + span: sub_span.expect("No span found for tuple variant"), + id: variant.node.data.id(), + name: name, + qualname: qualname, + type_value: enum_data.qualname.clone(), + value: val, + scope: enum_data.scope, + parent: Some(make_def_id(item.id, &self.tcx.map)), + docs: docs_for_attrs(&variant.node.attrs), + }.lower(self.tcx)); + } + } + } + + + for field in variant.node.data.fields() { + self.process_struct_field_def(field, variant.node.data.id()); + self.visit_ty(&field.ty); + } + } + self.process_generic_params(ty_params, item.span, &enum_data.qualname, enum_data.id); + } + + fn process_impl(&mut self, + item: &ast::Item, + type_parameters: &ast::Generics, + trait_ref: &Option, + typ: &ast::Ty, + impl_items: &[ast::ImplItem]) { + let mut has_self_ref = false; + if let Some(impl_data) = self.save_ctxt.get_item_data(item) { + down_cast_data!(impl_data, ImplData, item.span); + if let Some(ref self_ref) = impl_data.self_ref { + has_self_ref = true; + if !self.span.filter_generated(Some(self_ref.span), item.span) { + self.dumper.type_ref(self_ref.clone().lower(self.tcx)); + } + } + if let Some(ref trait_ref_data) = impl_data.trait_ref { + if !self.span.filter_generated(Some(trait_ref_data.span), item.span) { + self.dumper.type_ref(trait_ref_data.clone().lower(self.tcx)); + } + + visit::walk_path(self, &trait_ref.as_ref().unwrap().path); + } + + if !self.span.filter_generated(Some(impl_data.span), item.span) { + self.dumper.impl_data(ImplData { + id: impl_data.id, + span: impl_data.span, + scope: impl_data.scope, + trait_ref: impl_data.trait_ref.map(|d| d.ref_id.unwrap()), + self_ref: impl_data.self_ref.map(|d| d.ref_id.unwrap()) + }.lower(self.tcx)); + } + } + if !has_self_ref { + self.visit_ty(&typ); + } + self.process_generic_params(type_parameters, item.span, "", item.id); + for impl_item in impl_items { + let map = &self.tcx.map; + self.process_impl_item(impl_item, make_def_id(item.id, map)); + } + } + + fn process_trait(&mut self, + item: &ast::Item, + generics: &ast::Generics, + trait_refs: &ast::TyParamBounds, + methods: &[ast::TraitItem]) { + let name = item.ident.to_string(); + let qualname = format!("::{}", self.tcx.node_path_str(item.id)); + let mut val = name.clone(); + if !generics.lifetimes.is_empty() || !generics.ty_params.is_empty() { + val.push_str(&generics_to_string(generics)); + } + if !trait_refs.is_empty() { + val.push_str(": "); + val.push_str(&bounds_to_string(trait_refs)); + } + let sub_span = self.span.sub_span_after_keyword(item.span, keywords::Trait); + if !self.span.filter_generated(sub_span, item.span) { + self.dumper.trait_data(TraitData { + span: sub_span.expect("No span found for trait"), + id: item.id, + name: name, + qualname: qualname.clone(), + scope: self.cur_scope, + value: val, + items: methods.iter().map(|i| i.id).collect(), + visibility: From::from(&item.vis), + docs: docs_for_attrs(&item.attrs), + }.lower(self.tcx)); + } + + // super-traits + for super_bound in trait_refs.iter() { + let trait_ref = match *super_bound { + ast::TraitTyParamBound(ref trait_ref, _) => { + trait_ref + } + ast::RegionTyParamBound(..) => { + continue; + } + }; + + let trait_ref = &trait_ref.trait_ref; + if let Some(id) = self.lookup_def_id(trait_ref.ref_id) { + let sub_span = self.span.sub_span_for_type_name(trait_ref.path.span); + if !self.span.filter_generated(sub_span, trait_ref.path.span) { + self.dumper.type_ref(TypeRefData { + span: sub_span.expect("No span found for trait ref"), + ref_id: Some(id), + scope: self.cur_scope, + qualname: String::new() + }.lower(self.tcx)); + } + + if !self.span.filter_generated(sub_span, trait_ref.path.span) { + let sub_span = sub_span.expect("No span for inheritance"); + self.dumper.inheritance(InheritanceData { + span: sub_span, + base_id: id, + deriv_id: item.id + }.lower(self.tcx)); + } + } + } + + // walk generics and methods + self.process_generic_params(generics, item.span, &qualname, item.id); + for method in methods { + let map = &self.tcx.map; + self.process_trait_item(method, make_def_id(item.id, map)) + } + } + + // `item` is the module in question, represented as an item. + fn process_mod(&mut self, item: &ast::Item) { + if let Some(mod_data) = self.save_ctxt.get_item_data(item) { + down_cast_data!(mod_data, ModData, item.span); + if !self.span.filter_generated(Some(mod_data.span), item.span) { + self.dumper.mod_data(mod_data.lower(self.tcx)); + } + } + } + + fn process_path(&mut self, id: NodeId, path: &ast::Path, ref_kind: Option) { + let path_data = self.save_ctxt.get_path_data(id, path); + if generated_code(path.span) && path_data.is_none() { + return; + } + + let path_data = match path_data { + Some(pd) => pd, + None => { + return; + } + }; + + match path_data { + Data::VariableRefData(vrd) => { + // FIXME: this whole block duplicates the code in process_def_kind + if !self.span.filter_generated(Some(vrd.span), path.span) { + match ref_kind { + Some(recorder::TypeRef) => { + self.dumper.type_ref(TypeRefData { + span: vrd.span, + ref_id: Some(vrd.ref_id), + scope: vrd.scope, + qualname: String::new() + }.lower(self.tcx)); + } + Some(recorder::FnRef) => { + self.dumper.function_ref(FunctionRefData { + span: vrd.span, + ref_id: vrd.ref_id, + scope: vrd.scope + }.lower(self.tcx)); + } + Some(recorder::ModRef) => { + self.dumper.mod_ref( ModRefData { + span: vrd.span, + ref_id: Some(vrd.ref_id), + scope: vrd.scope, + qualname: String::new() + }.lower(self.tcx)); + } + Some(recorder::VarRef) | None + => self.dumper.variable_ref(vrd.lower(self.tcx)) + } + } + + } + Data::TypeRefData(trd) => { + if !self.span.filter_generated(Some(trd.span), path.span) { + self.dumper.type_ref(trd.lower(self.tcx)); + } + } + Data::MethodCallData(mcd) => { + if !self.span.filter_generated(Some(mcd.span), path.span) { + self.dumper.method_call(mcd.lower(self.tcx)); + } + } + Data::FunctionCallData(fcd) => { + if !self.span.filter_generated(Some(fcd.span), path.span) { + self.dumper.function_call(fcd.lower(self.tcx)); + } + } + _ => { + span_bug!(path.span, "Unexpected data: {:?}", path_data); + } + } + + // Modules or types in the path prefix. + match self.save_ctxt.get_path_def(id) { + Def::Method(did) => { + let ti = self.tcx.associated_item(did); + if ti.kind == ty::AssociatedKind::Method && ti.method_has_self_argument { + self.write_sub_path_trait_truncated(path); + } + } + Def::Fn(..) | + Def::Const(..) | + Def::Static(..) | + Def::StructCtor(..) | + Def::VariantCtor(..) | + Def::AssociatedConst(..) | + Def::Local(..) | + Def::Upvar(..) | + Def::Struct(..) | + Def::Union(..) | + Def::Variant(..) | + Def::TyAlias(..) | + Def::AssociatedTy(..) => self.write_sub_paths_truncated(path, false), + _ => {} + } + } + + fn process_struct_lit(&mut self, + ex: &ast::Expr, + path: &ast::Path, + fields: &Vec, + variant: &ty::VariantDef, + base: &Option>) { + self.write_sub_paths_truncated(path, false); + + if let Some(struct_lit_data) = self.save_ctxt.get_expr_data(ex) { + down_cast_data!(struct_lit_data, TypeRefData, ex.span); + if !self.span.filter_generated(Some(struct_lit_data.span), ex.span) { + self.dumper.type_ref(struct_lit_data.lower(self.tcx)); + } + + let scope = self.save_ctxt.enclosing_scope(ex.id); + + for field in fields { + if let Some(field_data) = self.save_ctxt + .get_field_ref_data(field, variant, scope) { + + if !self.span.filter_generated(Some(field_data.span), field.ident.span) { + self.dumper.variable_ref(field_data.lower(self.tcx)); + } + } + + self.visit_expr(&field.expr) + } + } + + walk_list!(self, visit_expr, base); + } + + fn process_method_call(&mut self, ex: &ast::Expr, args: &Vec>) { + if let Some(mcd) = self.save_ctxt.get_expr_data(ex) { + down_cast_data!(mcd, MethodCallData, ex.span); + if !self.span.filter_generated(Some(mcd.span), ex.span) { + self.dumper.method_call(mcd.lower(self.tcx)); + } + } + + // walk receiver and args + walk_list!(self, visit_expr, args); + } + + fn process_pat(&mut self, p: &ast::Pat) { + match p.node { + PatKind::Struct(ref path, ref fields, _) => { + visit::walk_path(self, path); + let adt = match self.tcx.tables().node_id_to_type_opt(p.id) { + Some(ty) => ty.ty_adt_def().unwrap(), + None => { + visit::walk_pat(self, p); + return; + } + }; + let variant = adt.variant_of_def(self.save_ctxt.get_path_def(p.id)); + + for &Spanned { node: ref field, span } in fields { + let sub_span = self.span.span_for_first_ident(span); + if let Some(f) = variant.find_field_named(field.ident.name) { + if !self.span.filter_generated(sub_span, span) { + self.dumper.variable_ref(VariableRefData { + span: sub_span.expect("No span fund for var ref"), + ref_id: f.did, + scope: self.cur_scope, + name: String::new() + }.lower(self.tcx)); + } + } + self.visit_pat(&field.pat); + } + } + _ => visit::walk_pat(self, p), + } + } + + + fn process_var_decl(&mut self, p: &ast::Pat, value: String) { + // The local could declare multiple new vars, we must walk the + // pattern and collect them all. + let mut collector = PathCollector::new(); + collector.visit_pat(&p); + self.visit_pat(&p); + + for &(id, ref p, immut, _) in &collector.collected_paths { + let mut value = match immut { + ast::Mutability::Immutable => value.to_string(), + _ => String::new(), + }; + let typ = match self.tcx.tables().node_types.get(&id) { + Some(typ) => { + let typ = typ.to_string(); + if !value.is_empty() { + value.push_str(": "); + } + value.push_str(&typ); + typ + } + None => String::new(), + }; + + // Get the span only for the name of the variable (I hope the path + // is only ever a variable name, but who knows?). + let sub_span = self.span.span_for_last_ident(p.span); + // Rust uses the id of the pattern for var lookups, so we'll use it too. + if !self.span.filter_generated(sub_span, p.span) { + self.dumper.variable(VariableData { + span: sub_span.expect("No span found for variable"), + kind: VariableKind::Local, + id: id, + name: path_to_string(p), + qualname: format!("{}${}", path_to_string(p), id), + value: value, + type_value: typ, + scope: CRATE_NODE_ID, + parent: None, + visibility: Visibility::Inherited, + docs: String::new(), + }.lower(self.tcx)); + } + } + } + + /// Extract macro use and definition information from the AST node defined + /// by the given NodeId, using the expansion information from the node's + /// span. + /// + /// If the span is not macro-generated, do nothing, else use callee and + /// callsite spans to record macro definition and use data, using the + /// mac_uses and mac_defs sets to prevent multiples. + fn process_macro_use(&mut self, span: Span, id: NodeId) { + let data = match self.save_ctxt.get_macro_use_data(span, id) { + None => return, + Some(data) => data, + }; + let mut hasher = DefaultHasher::new(); + data.callee_span.hash(&mut hasher); + let hash = hasher.finish(); + let qualname = format!("{}::{}", data.name, hash); + // Don't write macro definition for imported macros + if !self.mac_defs.contains(&data.callee_span) + && !data.imported { + self.mac_defs.insert(data.callee_span); + if let Some(sub_span) = self.span.span_for_macro_def_name(data.callee_span) { + self.dumper.macro_data(MacroData { + span: sub_span, + name: data.name.clone(), + qualname: qualname.clone(), + // FIXME where do macro docs come from? + docs: String::new(), + }.lower(self.tcx)); + } + } + if !self.mac_uses.contains(&data.span) { + self.mac_uses.insert(data.span); + if let Some(sub_span) = self.span.span_for_macro_use_name(data.span) { + self.dumper.macro_use(MacroUseData { + span: sub_span, + name: data.name, + qualname: qualname, + scope: data.scope, + callee_span: data.callee_span, + imported: data.imported, + }.lower(self.tcx)); + } + } + } + + fn process_trait_item(&mut self, trait_item: &ast::TraitItem, trait_id: DefId) { + self.process_macro_use(trait_item.span, trait_item.id); + match trait_item.node { + ast::TraitItemKind::Const(ref ty, Some(ref expr)) => { + self.process_assoc_const(trait_item.id, + trait_item.ident.name, + trait_item.span, + &ty, + &expr, + trait_id, + Visibility::Public, + &trait_item.attrs); + } + ast::TraitItemKind::Method(ref sig, ref body) => { + self.process_method(sig, + body.as_ref().map(|x| &**x), + trait_item.id, + trait_item.ident.name, + Visibility::Public, + &trait_item.attrs, + trait_item.span); + } + ast::TraitItemKind::Const(_, None) | + ast::TraitItemKind::Type(..) | + ast::TraitItemKind::Macro(_) => {} + } + } + + fn process_impl_item(&mut self, impl_item: &ast::ImplItem, impl_id: DefId) { + self.process_macro_use(impl_item.span, impl_item.id); + match impl_item.node { + ast::ImplItemKind::Const(ref ty, ref expr) => { + self.process_assoc_const(impl_item.id, + impl_item.ident.name, + impl_item.span, + &ty, + &expr, + impl_id, + From::from(&impl_item.vis), + &impl_item.attrs); + } + ast::ImplItemKind::Method(ref sig, ref body) => { + self.process_method(sig, + Some(body), + impl_item.id, + impl_item.ident.name, + From::from(&impl_item.vis), + &impl_item.attrs, + impl_item.span); + } + ast::ImplItemKind::Type(_) | + ast::ImplItemKind::Macro(_) => {} + } + } +} + +impl<'l, 'tcx: 'l, 'll, D: Dump +'ll> Visitor for DumpVisitor<'l, 'tcx, 'll, D> { + fn visit_item(&mut self, item: &ast::Item) { + use syntax::ast::ItemKind::*; + self.process_macro_use(item.span, item.id); + match item.node { + Use(ref use_item) => { + match use_item.node { + ast::ViewPathSimple(ident, ref path) => { + let sub_span = self.span.span_for_last_ident(path.span); + let mod_id = match self.lookup_def_id(item.id) { + Some(def_id) => { + let scope = self.cur_scope; + self.process_def_kind(item.id, path.span, sub_span, def_id, scope); + + Some(def_id) + } + None => None, + }; + + // 'use' always introduces an alias, if there is not an explicit + // one, there is an implicit one. + let sub_span = match self.span.sub_span_after_keyword(use_item.span, + keywords::As) { + Some(sub_span) => Some(sub_span), + None => sub_span, + }; + + if !self.span.filter_generated(sub_span, path.span) { + self.dumper.use_data(UseData { + span: sub_span.expect("No span found for use"), + id: item.id, + mod_id: mod_id, + name: ident.to_string(), + scope: self.cur_scope, + visibility: From::from(&item.vis), + }.lower(self.tcx)); + } + self.write_sub_paths_truncated(path, true); + } + ast::ViewPathGlob(ref path) => { + // Make a comma-separated list of names of imported modules. + let mut names = vec![]; + let glob_map = &self.save_ctxt.analysis.glob_map; + let glob_map = glob_map.as_ref().unwrap(); + if glob_map.contains_key(&item.id) { + for n in glob_map.get(&item.id).unwrap() { + names.push(n.to_string()); + } + } + + let sub_span = self.span + .sub_span_of_token(item.span, token::BinOp(token::Star)); + if !self.span.filter_generated(sub_span, item.span) { + self.dumper.use_glob(UseGlobData { + span: sub_span.expect("No span found for use glob"), + id: item.id, + names: names, + scope: self.cur_scope, + visibility: From::from(&item.vis), + }.lower(self.tcx)); + } + self.write_sub_paths(path, true); + } + ast::ViewPathList(ref path, ref list) => { + for plid in list { + let scope = self.cur_scope; + let id = plid.node.id; + if let Some(def_id) = self.lookup_def_id(id) { + let span = plid.span; + self.process_def_kind(id, span, Some(span), def_id, scope); + } + } + + self.write_sub_paths(path, true); + } + } + } + ExternCrate(ref s) => { + let location = match *s { + Some(s) => s.to_string(), + None => item.ident.to_string(), + }; + let alias_span = self.span.span_for_last_ident(item.span); + let cnum = match self.sess.cstore.extern_mod_stmt_cnum(item.id) { + Some(cnum) => cnum, + None => LOCAL_CRATE, + }; + + if !self.span.filter_generated(alias_span, item.span) { + self.dumper.extern_crate(ExternCrateData { + id: item.id, + name: item.ident.to_string(), + crate_num: cnum, + location: location, + span: alias_span.expect("No span found for extern crate"), + scope: self.cur_scope, + }.lower(self.tcx)); + } + } + Fn(ref decl, .., ref ty_params, ref body) => + self.process_fn(item, &decl, ty_params, &body), + Static(ref typ, _, ref expr) => + self.process_static_or_const_item(item, typ, expr), + Const(ref typ, ref expr) => + self.process_static_or_const_item(item, &typ, &expr), + Struct(ref def, ref ty_params) => self.process_struct(item, def, ty_params), + Enum(ref def, ref ty_params) => self.process_enum(item, def, ty_params), + Impl(.., + ref ty_params, + ref trait_ref, + ref typ, + ref impl_items) => { + self.process_impl(item, ty_params, trait_ref, &typ, impl_items) + } + Trait(_, ref generics, ref trait_refs, ref methods) => + self.process_trait(item, generics, trait_refs, methods), + Mod(ref m) => { + self.process_mod(item); + self.nest(item.id, |v| visit::walk_mod(v, m)); + } + Ty(ref ty, ref ty_params) => { + let qualname = format!("::{}", self.tcx.node_path_str(item.id)); + let value = ty_to_string(&ty); + let sub_span = self.span.sub_span_after_keyword(item.span, keywords::Type); + if !self.span.filter_generated(sub_span, item.span) { + self.dumper.typedef(TypeDefData { + span: sub_span.expect("No span found for typedef"), + name: item.ident.to_string(), + id: item.id, + qualname: qualname.clone(), + value: value, + visibility: From::from(&item.vis), + parent: None, + docs: docs_for_attrs(&item.attrs), + }.lower(self.tcx)); + } + + self.visit_ty(&ty); + self.process_generic_params(ty_params, item.span, &qualname, item.id); + } + Mac(_) => (), + _ => visit::walk_item(self, item), + } + } + + fn visit_generics(&mut self, generics: &ast::Generics) { + for param in generics.ty_params.iter() { + for bound in param.bounds.iter() { + if let ast::TraitTyParamBound(ref trait_ref, _) = *bound { + self.process_trait_ref(&trait_ref.trait_ref); + } + } + if let Some(ref ty) = param.default { + self.visit_ty(&ty); + } + } + } + + fn visit_ty(&mut self, t: &ast::Ty) { + self.process_macro_use(t.span, t.id); + match t.node { + ast::TyKind::Path(_, ref path) => { + if let Some(id) = self.lookup_def_id(t.id) { + let sub_span = self.span.sub_span_for_type_name(t.span); + if !self.span.filter_generated(sub_span, t.span) { + self.dumper.type_ref(TypeRefData { + span: sub_span.expect("No span found for type ref"), + ref_id: Some(id), + scope: self.cur_scope, + qualname: String::new() + }.lower(self.tcx)); + } + } + + self.write_sub_paths_truncated(path, false); + + visit::walk_path(self, path); + } + _ => visit::walk_ty(self, t), + } + } + + fn visit_expr(&mut self, ex: &ast::Expr) { + self.process_macro_use(ex.span, ex.id); + match ex.node { + ast::ExprKind::Call(ref _f, ref _args) => { + // Don't need to do anything for function calls, + // because just walking the callee path does what we want. + visit::walk_expr(self, ex); + } + ast::ExprKind::Path(_, ref path) => { + self.process_path(ex.id, path, None); + visit::walk_expr(self, ex); + } + ast::ExprKind::Struct(ref path, ref fields, ref base) => { + let hir_expr = self.save_ctxt.tcx.map.expect_expr(ex.id); + let adt = match self.tcx.tables().expr_ty_opt(&hir_expr) { + Some(ty) => ty.ty_adt_def().unwrap(), + None => { + visit::walk_expr(self, ex); + return; + } + }; + let def = self.save_ctxt.get_path_def(hir_expr.id); + self.process_struct_lit(ex, path, fields, adt.variant_of_def(def), base) + } + ast::ExprKind::MethodCall(.., ref args) => self.process_method_call(ex, args), + ast::ExprKind::Field(ref sub_ex, _) => { + self.visit_expr(&sub_ex); + + if let Some(field_data) = self.save_ctxt.get_expr_data(ex) { + down_cast_data!(field_data, VariableRefData, ex.span); + if !self.span.filter_generated(Some(field_data.span), ex.span) { + self.dumper.variable_ref(field_data.lower(self.tcx)); + } + } + } + ast::ExprKind::TupField(ref sub_ex, idx) => { + self.visit_expr(&sub_ex); + + let hir_node = match self.save_ctxt.tcx.map.find(sub_ex.id) { + Some(Node::NodeExpr(expr)) => expr, + _ => { + debug!("Missing or weird node for sub-expression {} in {:?}", + sub_ex.id, ex); + return; + } + }; + let ty = match self.tcx.tables().expr_ty_adjusted_opt(&hir_node) { + Some(ty) => &ty.sty, + None => { + visit::walk_expr(self, ex); + return; + } + }; + match *ty { + ty::TyAdt(def, _) => { + let sub_span = self.span.sub_span_after_token(ex.span, token::Dot); + if !self.span.filter_generated(sub_span, ex.span) { + self.dumper.variable_ref(VariableRefData { + span: sub_span.expect("No span found for var ref"), + ref_id: def.struct_variant().fields[idx.node].did, + scope: self.cur_scope, + name: String::new() + }.lower(self.tcx)); + } + } + ty::TyTuple(_) => {} + _ => span_bug!(ex.span, + "Expected struct or tuple type, found {:?}", + ty), + } + } + ast::ExprKind::Closure(_, ref decl, ref body, _fn_decl_span) => { + let mut id = String::from("$"); + id.push_str(&ex.id.to_string()); + self.process_formals(&decl.inputs, &id); + + // walk arg and return types + for arg in &decl.inputs { + self.visit_ty(&arg.ty); + } + + if let ast::FunctionRetTy::Ty(ref ret_ty) = decl.output { + self.visit_ty(&ret_ty); + } + + // walk the body + self.nest(ex.id, |v| v.visit_expr(body)); + } + ast::ExprKind::ForLoop(ref pattern, ref subexpression, ref block, _) | + ast::ExprKind::WhileLet(ref pattern, ref subexpression, ref block, _) => { + let value = self.span.snippet(subexpression.span); + self.process_var_decl(pattern, value); + visit::walk_expr(self, subexpression); + visit::walk_block(self, block); + } + ast::ExprKind::IfLet(ref pattern, ref subexpression, ref block, ref opt_else) => { + let value = self.span.snippet(subexpression.span); + self.process_var_decl(pattern, value); + visit::walk_expr(self, subexpression); + visit::walk_block(self, block); + opt_else.as_ref().map(|el| visit::walk_expr(self, el)); + } + _ => { + visit::walk_expr(self, ex) + } + } + } + + fn visit_mac(&mut self, mac: &ast::Mac) { + // These shouldn't exist in the AST at this point, log a span bug. + span_bug!(mac.span, "macro invocation should have been expanded out of AST"); + } + + fn visit_pat(&mut self, p: &ast::Pat) { + self.process_macro_use(p.span, p.id); + self.process_pat(p); + } + + fn visit_arm(&mut self, arm: &ast::Arm) { + let mut collector = PathCollector::new(); + for pattern in &arm.pats { + // collect paths from the arm's patterns + collector.visit_pat(&pattern); + self.visit_pat(&pattern); + } + + // This is to get around borrow checking, because we need mut self to call process_path. + let mut paths_to_process = vec![]; + + // process collected paths + for &(id, ref p, immut, ref_kind) in &collector.collected_paths { + match self.save_ctxt.get_path_def(id) { + Def::Local(def_id) => { + let id = self.tcx.map.as_local_node_id(def_id).unwrap(); + let mut value = if immut == ast::Mutability::Immutable { + self.span.snippet(p.span).to_string() + } else { + "".to_string() + }; + let typ = self.tcx.tables().node_types + .get(&id).map(|t| t.to_string()).unwrap_or(String::new()); + value.push_str(": "); + value.push_str(&typ); + + assert!(p.segments.len() == 1, + "qualified path for local variable def in arm"); + if !self.span.filter_generated(Some(p.span), p.span) { + self.dumper.variable(VariableData { + span: p.span, + kind: VariableKind::Local, + id: id, + name: path_to_string(p), + qualname: format!("{}${}", path_to_string(p), id), + value: value, + type_value: typ, + scope: CRATE_NODE_ID, + parent: None, + visibility: Visibility::Inherited, + docs: String::new(), + }.lower(self.tcx)); + } + } + Def::StructCtor(..) | Def::VariantCtor(..) | + Def::Const(..) | Def::AssociatedConst(..) | + Def::Struct(..) | Def::Variant(..) | + Def::TyAlias(..) | Def::AssociatedTy(..) | + Def::SelfTy(..) => { + paths_to_process.push((id, p.clone(), Some(ref_kind))) + } + def => error!("unexpected definition kind when processing collected paths: {:?}", + def), + } + } + + for &(id, ref path, ref_kind) in &paths_to_process { + self.process_path(id, path, ref_kind); + } + walk_list!(self, visit_expr, &arm.guard); + self.visit_expr(&arm.body); + } + + fn visit_stmt(&mut self, s: &ast::Stmt) { + self.process_macro_use(s.span, s.id); + visit::walk_stmt(self, s) + } + + fn visit_local(&mut self, l: &ast::Local) { + self.process_macro_use(l.span, l.id); + let value = l.init.as_ref().map(|i| self.span.snippet(i.span)).unwrap_or(String::new()); + self.process_var_decl(&l.pat, value); + + // Just walk the initialiser and type (don't want to walk the pattern again). + walk_list!(self, visit_ty, &l.ty); + walk_list!(self, visit_expr, &l.init); + } +} diff --git a/src/librustc_save_analysis/external_data.rs b/src/librustc_save_analysis/external_data.rs new file mode 100644 index 0000000000000..5847575742342 --- /dev/null +++ b/src/librustc_save_analysis/external_data.rs @@ -0,0 +1,702 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::hir::def_id::{CrateNum, DefId, DefIndex}; +use rustc::hir::map::Map; +use rustc::ty::TyCtxt; +use syntax::ast::NodeId; +use syntax::codemap::CodeMap; +use syntax_pos::Span; + +use data::{self, Visibility}; + +// FIXME: this should be pub(crate), but the current snapshot doesn't allow it yet +pub trait Lower { + type Target; + fn lower(self, tcx: TyCtxt) -> Self::Target; +} + +pub fn make_def_id(id: NodeId, map: &Map) -> DefId { + map.opt_local_def_id(id).unwrap_or(null_def_id()) +} + +pub fn null_def_id() -> DefId { + DefId { + krate: CrateNum::from_u32(u32::max_value()), + index: DefIndex::from_u32(u32::max_value()) + } +} + +#[derive(Clone, Debug, RustcEncodable)] +pub struct SpanData { + pub file_name: String, + pub byte_start: u32, + pub byte_end: u32, + /// 1-based. + pub line_start: usize, + pub line_end: usize, + /// 1-based, character offset. + pub column_start: usize, + pub column_end: usize, +} + +impl SpanData { + pub fn from_span(span: Span, cm: &CodeMap) -> SpanData { + let start = cm.lookup_char_pos(span.lo); + let end = cm.lookup_char_pos(span.hi); + + SpanData { + file_name: start.file.name.clone(), + byte_start: span.lo.0, + byte_end: span.hi.0, + line_start: start.line, + line_end: end.line, + column_start: start.col.0 + 1, + column_end: end.col.0 + 1, + } + } +} + +#[derive(Debug, RustcEncodable)] +pub struct CratePreludeData { + pub crate_name: String, + pub crate_root: String, + pub external_crates: Vec, + pub span: SpanData, +} + +impl Lower for data::CratePreludeData { + type Target = CratePreludeData; + + fn lower(self, tcx: TyCtxt) -> CratePreludeData { + CratePreludeData { + crate_name: self.crate_name, + crate_root: self.crate_root, + external_crates: self.external_crates, + span: SpanData::from_span(self.span, tcx.sess.codemap()), + } + } +} + +/// Data for enum declarations. +#[derive(Clone, Debug, RustcEncodable)] +pub struct EnumData { + pub id: DefId, + pub value: String, + pub name: String, + pub qualname: String, + pub span: SpanData, + pub scope: DefId, + pub variants: Vec, + pub visibility: Visibility, + pub docs: String, +} + +impl Lower for data::EnumData { + type Target = EnumData; + + fn lower(self, tcx: TyCtxt) -> EnumData { + EnumData { + id: make_def_id(self.id, &tcx.map), + name: self.name, + value: self.value, + qualname: self.qualname, + span: SpanData::from_span(self.span, tcx.sess.codemap()), + scope: make_def_id(self.scope, &tcx.map), + variants: self.variants.into_iter().map(|id| make_def_id(id, &tcx.map)).collect(), + visibility: self.visibility, + docs: self.docs, + } + } +} + +/// Data for extern crates. +#[derive(Debug, RustcEncodable)] +pub struct ExternCrateData { + pub id: DefId, + pub name: String, + pub crate_num: CrateNum, + pub location: String, + pub span: SpanData, + pub scope: DefId, +} + +impl Lower for data::ExternCrateData { + type Target = ExternCrateData; + + fn lower(self, tcx: TyCtxt) -> ExternCrateData { + ExternCrateData { + id: make_def_id(self.id, &tcx.map), + name: self.name, + crate_num: self.crate_num, + location: self.location, + span: SpanData::from_span(self.span, tcx.sess.codemap()), + scope: make_def_id(self.scope, &tcx.map), + } + } +} + +/// Data about a function call. +#[derive(Debug, RustcEncodable)] +pub struct FunctionCallData { + pub span: SpanData, + pub scope: DefId, + pub ref_id: DefId, +} + +impl Lower for data::FunctionCallData { + type Target = FunctionCallData; + + fn lower(self, tcx: TyCtxt) -> FunctionCallData { + FunctionCallData { + span: SpanData::from_span(self.span, tcx.sess.codemap()), + scope: make_def_id(self.scope, &tcx.map), + ref_id: self.ref_id, + } + } +} + +/// Data for all kinds of functions and methods. +#[derive(Clone, Debug, RustcEncodable)] +pub struct FunctionData { + pub id: DefId, + pub name: String, + pub qualname: String, + pub declaration: Option, + pub span: SpanData, + pub scope: DefId, + pub value: String, + pub visibility: Visibility, + pub parent: Option, + pub docs: String, +} + +impl Lower for data::FunctionData { + type Target = FunctionData; + + fn lower(self, tcx: TyCtxt) -> FunctionData { + FunctionData { + id: make_def_id(self.id, &tcx.map), + name: self.name, + qualname: self.qualname, + declaration: self.declaration, + span: SpanData::from_span(self.span, tcx.sess.codemap()), + scope: make_def_id(self.scope, &tcx.map), + value: self.value, + visibility: self.visibility, + parent: self.parent, + docs: self.docs, + } + } +} + +/// Data about a function call. +#[derive(Debug, RustcEncodable)] +pub struct FunctionRefData { + pub span: SpanData, + pub scope: DefId, + pub ref_id: DefId, +} + +impl Lower for data::FunctionRefData { + type Target = FunctionRefData; + + fn lower(self, tcx: TyCtxt) -> FunctionRefData { + FunctionRefData { + span: SpanData::from_span(self.span, tcx.sess.codemap()), + scope: make_def_id(self.scope, &tcx.map), + ref_id: self.ref_id, + } + } +} +#[derive(Debug, RustcEncodable)] +pub struct ImplData { + pub id: DefId, + pub span: SpanData, + pub scope: DefId, + pub trait_ref: Option, + pub self_ref: Option, +} + +impl Lower for data::ImplData { + type Target = ImplData; + + fn lower(self, tcx: TyCtxt) -> ImplData { + ImplData { + id: make_def_id(self.id, &tcx.map), + span: SpanData::from_span(self.span, tcx.sess.codemap()), + scope: make_def_id(self.scope, &tcx.map), + trait_ref: self.trait_ref, + self_ref: self.self_ref, + } + } +} + +#[derive(Debug, RustcEncodable)] +pub struct InheritanceData { + pub span: SpanData, + pub base_id: DefId, + pub deriv_id: DefId +} + +impl Lower for data::InheritanceData { + type Target = InheritanceData; + + fn lower(self, tcx: TyCtxt) -> InheritanceData { + InheritanceData { + span: SpanData::from_span(self.span, tcx.sess.codemap()), + base_id: self.base_id, + deriv_id: make_def_id(self.deriv_id, &tcx.map) + } + } +} + +/// Data about a macro declaration. +#[derive(Debug, RustcEncodable)] +pub struct MacroData { + pub span: SpanData, + pub name: String, + pub qualname: String, + pub docs: String, +} + +impl Lower for data::MacroData { + type Target = MacroData; + + fn lower(self, tcx: TyCtxt) -> MacroData { + MacroData { + span: SpanData::from_span(self.span, tcx.sess.codemap()), + name: self.name, + qualname: self.qualname, + docs: self.docs, + } + } +} + +/// Data about a macro use. +#[derive(Debug, RustcEncodable)] +pub struct MacroUseData { + pub span: SpanData, + pub name: String, + pub qualname: String, + // Because macro expansion happens before ref-ids are determined, + // we use the callee span to reference the associated macro definition. + pub callee_span: SpanData, + pub scope: DefId, +} + +impl Lower for data::MacroUseData { + type Target = MacroUseData; + + fn lower(self, tcx: TyCtxt) -> MacroUseData { + MacroUseData { + span: SpanData::from_span(self.span, tcx.sess.codemap()), + name: self.name, + qualname: self.qualname, + callee_span: SpanData::from_span(self.callee_span, tcx.sess.codemap()), + scope: make_def_id(self.scope, &tcx.map), + } + } +} + +/// Data about a method call. +#[derive(Debug, RustcEncodable)] +pub struct MethodCallData { + pub span: SpanData, + pub scope: DefId, + pub ref_id: Option, + pub decl_id: Option, +} + +impl Lower for data::MethodCallData { + type Target = MethodCallData; + + fn lower(self, tcx: TyCtxt) -> MethodCallData { + MethodCallData { + span: SpanData::from_span(self.span, tcx.sess.codemap()), + scope: make_def_id(self.scope, &tcx.map), + ref_id: self.ref_id, + decl_id: self.decl_id, + } + } +} + +/// Data for method declarations (methods with a body are treated as functions). +#[derive(Clone, Debug, RustcEncodable)] +pub struct MethodData { + pub id: DefId, + pub name: String, + pub qualname: String, + pub span: SpanData, + pub scope: DefId, + pub value: String, + pub decl_id: Option, + pub visibility: Visibility, + pub parent: Option, + pub docs: String, +} + +impl Lower for data::MethodData { + type Target = MethodData; + + fn lower(self, tcx: TyCtxt) -> MethodData { + MethodData { + span: SpanData::from_span(self.span, tcx.sess.codemap()), + name: self.name, + scope: make_def_id(self.scope, &tcx.map), + id: make_def_id(self.id, &tcx.map), + qualname: self.qualname, + value: self.value, + decl_id: self.decl_id, + visibility: self.visibility, + parent: self.parent, + docs: self.docs, + } + } +} + +/// Data for modules. +#[derive(Debug, RustcEncodable)] +pub struct ModData { + pub id: DefId, + pub name: String, + pub qualname: String, + pub span: SpanData, + pub scope: DefId, + pub filename: String, + pub items: Vec, + pub visibility: Visibility, + pub docs: String, +} + +impl Lower for data::ModData { + type Target = ModData; + + fn lower(self, tcx: TyCtxt) -> ModData { + ModData { + id: make_def_id(self.id, &tcx.map), + name: self.name, + qualname: self.qualname, + span: SpanData::from_span(self.span, tcx.sess.codemap()), + scope: make_def_id(self.scope, &tcx.map), + filename: self.filename, + items: self.items.into_iter().map(|id| make_def_id(id, &tcx.map)).collect(), + visibility: self.visibility, + docs: self.docs, + } + } +} + +/// Data for a reference to a module. +#[derive(Debug, RustcEncodable)] +pub struct ModRefData { + pub span: SpanData, + pub scope: DefId, + pub ref_id: Option, + pub qualname: String +} + +impl Lower for data::ModRefData { + type Target = ModRefData; + + fn lower(self, tcx: TyCtxt) -> ModRefData { + ModRefData { + span: SpanData::from_span(self.span, tcx.sess.codemap()), + scope: make_def_id(self.scope, &tcx.map), + ref_id: self.ref_id, + qualname: self.qualname, + } + } +} + +#[derive(Debug, RustcEncodable)] +pub struct StructData { + pub span: SpanData, + pub name: String, + pub id: DefId, + pub ctor_id: DefId, + pub qualname: String, + pub scope: DefId, + pub value: String, + pub fields: Vec, + pub visibility: Visibility, + pub docs: String, +} + +impl Lower for data::StructData { + type Target = StructData; + + fn lower(self, tcx: TyCtxt) -> StructData { + StructData { + span: SpanData::from_span(self.span, tcx.sess.codemap()), + name: self.name, + id: make_def_id(self.id, &tcx.map), + ctor_id: make_def_id(self.ctor_id, &tcx.map), + qualname: self.qualname, + scope: make_def_id(self.scope, &tcx.map), + value: self.value, + fields: self.fields.into_iter().map(|id| make_def_id(id, &tcx.map)).collect(), + visibility: self.visibility, + docs: self.docs, + } + } +} + +#[derive(Debug, RustcEncodable)] +pub struct StructVariantData { + pub span: SpanData, + pub name: String, + pub id: DefId, + pub qualname: String, + pub type_value: String, + pub value: String, + pub scope: DefId, + pub parent: Option, + pub docs: String, +} + +impl Lower for data::StructVariantData { + type Target = StructVariantData; + + fn lower(self, tcx: TyCtxt) -> StructVariantData { + StructVariantData { + span: SpanData::from_span(self.span, tcx.sess.codemap()), + name: self.name, + id: make_def_id(self.id, &tcx.map), + qualname: self.qualname, + type_value: self.type_value, + value: self.value, + scope: make_def_id(self.scope, &tcx.map), + parent: self.parent, + docs: self.docs, + } + } +} + +#[derive(Debug, RustcEncodable)] +pub struct TraitData { + pub span: SpanData, + pub name: String, + pub id: DefId, + pub qualname: String, + pub scope: DefId, + pub value: String, + pub items: Vec, + pub visibility: Visibility, + pub docs: String, +} + +impl Lower for data::TraitData { + type Target = TraitData; + + fn lower(self, tcx: TyCtxt) -> TraitData { + TraitData { + span: SpanData::from_span(self.span, tcx.sess.codemap()), + name: self.name, + id: make_def_id(self.id, &tcx.map), + qualname: self.qualname, + scope: make_def_id(self.scope, &tcx.map), + value: self.value, + items: self.items.into_iter().map(|id| make_def_id(id, &tcx.map)).collect(), + visibility: self.visibility, + docs: self.docs, + } + } +} + +#[derive(Debug, RustcEncodable)] +pub struct TupleVariantData { + pub span: SpanData, + pub id: DefId, + pub name: String, + pub qualname: String, + pub type_value: String, + pub value: String, + pub scope: DefId, + pub parent: Option, + pub docs: String, +} + +impl Lower for data::TupleVariantData { + type Target = TupleVariantData; + + fn lower(self, tcx: TyCtxt) -> TupleVariantData { + TupleVariantData { + span: SpanData::from_span(self.span, tcx.sess.codemap()), + id: make_def_id(self.id, &tcx.map), + name: self.name, + qualname: self.qualname, + type_value: self.type_value, + value: self.value, + scope: make_def_id(self.scope, &tcx.map), + parent: self.parent, + docs: self.docs, + } + } +} + +/// Data for a typedef. +#[derive(Debug, RustcEncodable)] +pub struct TypeDefData { + pub id: DefId, + pub name: String, + pub span: SpanData, + pub qualname: String, + pub value: String, + pub visibility: Visibility, + pub parent: Option, + pub docs: String, +} + +impl Lower for data::TypeDefData { + type Target = TypeDefData; + + fn lower(self, tcx: TyCtxt) -> TypeDefData { + TypeDefData { + id: make_def_id(self.id, &tcx.map), + name: self.name, + span: SpanData::from_span(self.span, tcx.sess.codemap()), + qualname: self.qualname, + value: self.value, + visibility: self.visibility, + parent: self.parent, + docs: self.docs, + } + } +} + +/// Data for a reference to a type or trait. +#[derive(Clone, Debug, RustcEncodable)] +pub struct TypeRefData { + pub span: SpanData, + pub scope: DefId, + pub ref_id: Option, + pub qualname: String, +} + +impl Lower for data::TypeRefData { + type Target = TypeRefData; + + fn lower(self, tcx: TyCtxt) -> TypeRefData { + TypeRefData { + span: SpanData::from_span(self.span, tcx.sess.codemap()), + scope: make_def_id(self.scope, &tcx.map), + ref_id: self.ref_id, + qualname: self.qualname, + } + } +} + +#[derive(Debug, RustcEncodable)] +pub struct UseData { + pub id: DefId, + pub span: SpanData, + pub name: String, + pub mod_id: Option, + pub scope: DefId, + pub visibility: Visibility, +} + +impl Lower for data::UseData { + type Target = UseData; + + fn lower(self, tcx: TyCtxt) -> UseData { + UseData { + id: make_def_id(self.id, &tcx.map), + span: SpanData::from_span(self.span, tcx.sess.codemap()), + name: self.name, + mod_id: self.mod_id, + scope: make_def_id(self.scope, &tcx.map), + visibility: self.visibility, + } + } +} + +#[derive(Debug, RustcEncodable)] +pub struct UseGlobData { + pub id: DefId, + pub span: SpanData, + pub names: Vec, + pub scope: DefId, + pub visibility: Visibility, +} + +impl Lower for data::UseGlobData { + type Target = UseGlobData; + + fn lower(self, tcx: TyCtxt) -> UseGlobData { + UseGlobData { + id: make_def_id(self.id, &tcx.map), + span: SpanData::from_span(self.span, tcx.sess.codemap()), + names: self.names, + scope: make_def_id(self.scope, &tcx.map), + visibility: self.visibility, + } + } +} + +/// Data for local and global variables (consts and statics). +#[derive(Debug, RustcEncodable)] +pub struct VariableData { + pub id: DefId, + pub name: String, + pub kind: data::VariableKind, + pub qualname: String, + pub span: SpanData, + pub scope: DefId, + pub value: String, + pub type_value: String, + pub parent: Option, + pub visibility: Visibility, + pub docs: String, +} + +impl Lower for data::VariableData { + type Target = VariableData; + + fn lower(self, tcx: TyCtxt) -> VariableData { + VariableData { + id: make_def_id(self.id, &tcx.map), + kind: self.kind, + name: self.name, + qualname: self.qualname, + span: SpanData::from_span(self.span, tcx.sess.codemap()), + scope: make_def_id(self.scope, &tcx.map), + value: self.value, + type_value: self.type_value, + parent: self.parent, + visibility: self.visibility, + docs: self.docs, + } + } +} + +/// Data for the use of some item (e.g., the use of a local variable, which +/// will refer to that variables declaration (by ref_id)). +#[derive(Debug, RustcEncodable)] +pub struct VariableRefData { + pub name: String, + pub span: SpanData, + pub scope: DefId, + pub ref_id: DefId, +} + +impl Lower for data::VariableRefData { + type Target = VariableRefData; + + fn lower(self, tcx: TyCtxt) -> VariableRefData { + VariableRefData { + name: self.name, + span: SpanData::from_span(self.span, tcx.sess.codemap()), + scope: make_def_id(self.scope, &tcx.map), + ref_id: self.ref_id, + } + } +} diff --git a/src/librustc_save_analysis/json_api_dumper.rs b/src/librustc_save_analysis/json_api_dumper.rs new file mode 100644 index 0000000000000..d56aae18a7cd1 --- /dev/null +++ b/src/librustc_save_analysis/json_api_dumper.rs @@ -0,0 +1,415 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::io::Write; + +use rustc::hir::def_id::DefId; +use rustc_serialize::json::as_json; + +use external_data::*; +use data::{VariableKind, Visibility}; +use dump::Dump; +use super::Format; + + +// A dumper to dump a restricted set of JSON information, designed for use with +// libraries distributed without their source. Clients are likely to use type +// information here, and (for example) generate Rustdoc URLs, but don't need +// information for navigating the source of the crate. +// Relative to the regular JSON save-analysis info, this form is filtered to +// remove non-visible items, but includes some extra info for items (e.g., the +// parent field for finding the struct to which a field belongs). +pub struct JsonApiDumper<'b, W: Write + 'b> { + output: &'b mut W, + result: Analysis, +} + +impl<'b, W: Write> JsonApiDumper<'b, W> { + pub fn new(writer: &'b mut W) -> JsonApiDumper<'b, W> { + JsonApiDumper { output: writer, result: Analysis::new() } + } +} + +impl<'b, W: Write> Drop for JsonApiDumper<'b, W> { + fn drop(&mut self) { + if let Err(_) = write!(self.output, "{}", as_json(&self.result)) { + error!("Error writing output"); + } + } +} + +macro_rules! impl_fn { + ($fn_name: ident, $data_type: ident, $bucket: ident) => { + fn $fn_name(&mut self, data: $data_type) { + if let Some(datum) = From::from(data) { + self.result.$bucket.push(datum); + } + } + } +} + +impl<'b, W: Write + 'b> Dump for JsonApiDumper<'b, W> { + fn crate_prelude(&mut self, data: CratePreludeData) { + self.result.prelude = Some(data) + } + + impl_fn!(use_data, UseData, imports); + impl_fn!(use_glob, UseGlobData, imports); + + impl_fn!(enum_data, EnumData, defs); + impl_fn!(tuple_variant, TupleVariantData, defs); + impl_fn!(struct_variant, StructVariantData, defs); + impl_fn!(struct_data, StructData, defs); + impl_fn!(trait_data, TraitData, defs); + impl_fn!(function, FunctionData, defs); + impl_fn!(method, MethodData, defs); + impl_fn!(macro_data, MacroData, defs); + impl_fn!(mod_data, ModData, defs); + impl_fn!(typedef, TypeDefData, defs); + impl_fn!(variable, VariableData, defs); +} + +// FIXME methods. The defs have information about possible overriding and the +// refs have decl information (e.g., a trait method where we know the required +// method, but not the supplied method). In both cases, we are currently +// ignoring it. + +#[derive(Debug, RustcEncodable)] +struct Analysis { + kind: Format, + prelude: Option, + imports: Vec, + defs: Vec, + // These two fields are dummies so that clients can parse the two kinds of + // JSON data in the same way. + refs: Vec<()>, + macro_refs: Vec<()>, +} + +impl Analysis { + fn new() -> Analysis { + Analysis { + kind: Format::JsonApi, + prelude: None, + imports: vec![], + defs: vec![], + refs: vec![], + macro_refs: vec![], + } + } +} + +// DefId::index is a newtype and so the JSON serialisation is ugly. Therefore +// we use our own Id which is the same, but without the newtype. +#[derive(Debug, RustcEncodable)] +struct Id { + krate: u32, + index: u32, +} + +impl From for Id { + fn from(id: DefId) -> Id { + Id { + krate: id.krate.as_u32(), + index: id.index.as_u32(), + } + } +} + +#[derive(Debug, RustcEncodable)] +struct Import { + kind: ImportKind, + id: Id, + span: SpanData, + name: String, + value: String, +} + +#[derive(Debug, RustcEncodable)] +enum ImportKind { + Use, + GlobUse, +} + +impl From for Option { + fn from(data: UseData) -> Option { + match data.visibility { + Visibility::Public => Some(Import { + kind: ImportKind::Use, + id: From::from(data.id), + span: data.span, + name: data.name, + value: String::new(), + }), + _ => None, + } + } +} +impl From for Option { + fn from(data: UseGlobData) -> Option { + match data.visibility { + Visibility::Public => Some(Import { + kind: ImportKind::GlobUse, + id: From::from(data.id), + span: data.span, + name: "*".to_owned(), + value: data.names.join(", "), + }), + _ => None, + } + } +} + +#[derive(Debug, RustcEncodable)] +struct Def { + kind: DefKind, + id: Id, + span: SpanData, + name: String, + qualname: String, + value: String, + parent: Option, + children: Vec, + decl_id: Option, + docs: String, +} + +#[derive(Debug, RustcEncodable)] +enum DefKind { + // value = variant names + Enum, + // value = enum name + variant name + types + Tuple, + // value = [enum name +] name + fields + Struct, + // value = signature + Trait, + // value = type + generics + Function, + // value = type + generics + Method, + // No id, no value. + Macro, + // value = file_name + Mod, + // value = aliased type + Type, + // value = type and init expression (for all variable kinds). + Static, + Const, + Field, +} + +impl From for Option { + fn from(data: EnumData) -> Option { + match data.visibility { + Visibility::Public => Some(Def { + kind: DefKind::Enum, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + parent: None, + children: data.variants.into_iter().map(|id| From::from(id)).collect(), + decl_id: None, + docs: data.docs, + }), + _ => None, + } + } +} + +impl From for Option { + fn from(data: TupleVariantData) -> Option { + Some(Def { + kind: DefKind::Tuple, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + parent: data.parent.map(|id| From::from(id)), + children: vec![], + decl_id: None, + docs: data.docs, + }) + } +} +impl From for Option { + fn from(data: StructVariantData) -> Option { + Some(Def { + kind: DefKind::Struct, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + parent: data.parent.map(|id| From::from(id)), + children: vec![], + decl_id: None, + docs: data.docs, + }) + } +} +impl From for Option { + fn from(data: StructData) -> Option { + match data.visibility { + Visibility::Public => Some(Def { + kind: DefKind::Struct, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + parent: None, + children: data.fields.into_iter().map(|id| From::from(id)).collect(), + decl_id: None, + docs: data.docs, + }), + _ => None, + } + } +} +impl From for Option { + fn from(data: TraitData) -> Option { + match data.visibility { + Visibility::Public => Some(Def { + kind: DefKind::Trait, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + children: data.items.into_iter().map(|id| From::from(id)).collect(), + parent: None, + decl_id: None, + docs: data.docs, + }), + _ => None, + } + } +} +impl From for Option { + fn from(data: FunctionData) -> Option { + match data.visibility { + Visibility::Public => Some(Def { + kind: DefKind::Function, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + children: vec![], + parent: data.parent.map(|id| From::from(id)), + decl_id: None, + docs: data.docs, + }), + _ => None, + } + } +} +impl From for Option { + fn from(data: MethodData) -> Option { + match data.visibility { + Visibility::Public => Some(Def { + kind: DefKind::Method, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + children: vec![], + parent: data.parent.map(|id| From::from(id)), + decl_id: data.decl_id.map(|id| From::from(id)), + docs: data.docs, + }), + _ => None, + } + } +} +impl From for Option { + fn from(data: MacroData) -> Option { + Some(Def { + kind: DefKind::Macro, + id: From::from(null_def_id()), + span: data.span, + name: data.name, + qualname: data.qualname, + value: String::new(), + children: vec![], + parent: None, + decl_id: None, + docs: data.docs, + }) + } +} +impl From for Option { + fn from(data:ModData) -> Option { + match data.visibility { + Visibility::Public => Some(Def { + kind: DefKind::Mod, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.filename, + children: data.items.into_iter().map(|id| From::from(id)).collect(), + parent: None, + decl_id: None, + docs: data.docs, + }), + _ => None, + } + } +} +impl From for Option { + fn from(data: TypeDefData) -> Option { + match data.visibility { + Visibility::Public => Some(Def { + kind: DefKind::Type, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + children: vec![], + parent: data.parent.map(|id| From::from(id)), + decl_id: None, + docs: String::new(), + }), + _ => None, + } + } +} +impl From for Option { + fn from(data: VariableData) -> Option { + match data.visibility { + Visibility::Public => Some(Def { + kind: match data.kind { + VariableKind::Static => DefKind::Static, + VariableKind::Const => DefKind::Const, + VariableKind::Local => { return None } + VariableKind::Field => DefKind::Field, + }, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + children: vec![], + parent: data.parent.map(|id| From::from(id)), + decl_id: None, + docs: data.docs, + }), + _ => None, + } + } +} diff --git a/src/librustc_save_analysis/json_dumper.rs b/src/librustc_save_analysis/json_dumper.rs new file mode 100644 index 0000000000000..f97272ad54409 --- /dev/null +++ b/src/librustc_save_analysis/json_dumper.rs @@ -0,0 +1,498 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::io::Write; + +use rustc::hir::def_id::DefId; +use rustc_serialize::json::as_json; + +use external_data::*; +use data::VariableKind; +use dump::Dump; +use super::Format; + +pub struct JsonDumper<'b, W: Write + 'b> { + output: &'b mut W, + result: Analysis, +} + +impl<'b, W: Write> JsonDumper<'b, W> { + pub fn new(writer: &'b mut W) -> JsonDumper<'b, W> { + JsonDumper { output: writer, result: Analysis::new() } + } +} + +impl<'b, W: Write> Drop for JsonDumper<'b, W> { + fn drop(&mut self) { + if let Err(_) = write!(self.output, "{}", as_json(&self.result)) { + error!("Error writing output"); + } + } +} + +macro_rules! impl_fn { + ($fn_name: ident, $data_type: ident, $bucket: ident) => { + fn $fn_name(&mut self, data: $data_type) { + self.result.$bucket.push(From::from(data)); + } + } +} + +impl<'b, W: Write + 'b> Dump for JsonDumper<'b, W> { + fn crate_prelude(&mut self, data: CratePreludeData) { + self.result.prelude = Some(data) + } + + impl_fn!(extern_crate, ExternCrateData, imports); + impl_fn!(use_data, UseData, imports); + impl_fn!(use_glob, UseGlobData, imports); + + impl_fn!(enum_data, EnumData, defs); + impl_fn!(tuple_variant, TupleVariantData, defs); + impl_fn!(struct_variant, StructVariantData, defs); + impl_fn!(struct_data, StructData, defs); + impl_fn!(trait_data, TraitData, defs); + impl_fn!(function, FunctionData, defs); + impl_fn!(method, MethodData, defs); + impl_fn!(macro_data, MacroData, defs); + impl_fn!(typedef, TypeDefData, defs); + impl_fn!(variable, VariableData, defs); + + impl_fn!(function_ref, FunctionRefData, refs); + impl_fn!(function_call, FunctionCallData, refs); + impl_fn!(method_call, MethodCallData, refs); + impl_fn!(mod_ref, ModRefData, refs); + impl_fn!(type_ref, TypeRefData, refs); + impl_fn!(variable_ref, VariableRefData, refs); + + impl_fn!(macro_use, MacroUseData, macro_refs); + + fn mod_data(&mut self, data: ModData) { + let id: Id = From::from(data.id); + let mut def = Def { + kind: DefKind::Mod, + id: id, + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.filename, + children: data.items.into_iter().map(|id| From::from(id)).collect(), + decl_id: None, + docs: data.docs, + }; + if def.span.file_name != def.value { + // If the module is an out-of-line defintion, then we'll make the + // defintion the first character in the module's file and turn the + // the declaration into a reference to it. + let rf = Ref { + kind: RefKind::Mod, + span: def.span, + ref_id: id, + }; + self.result.refs.push(rf); + def.span = SpanData { + file_name: def.value.clone(), + byte_start: 0, + byte_end: 0, + line_start: 1, + line_end: 1, + column_start: 1, + column_end: 1, + } + } + + self.result.defs.push(def); + } + + // FIXME store this instead of throwing it away. + fn impl_data(&mut self, _data: ImplData) {} + fn inheritance(&mut self, _data: InheritanceData) {} +} + +// FIXME do we want to change ExternalData to this mode? It will break DXR. +// FIXME methods. The defs have information about possible overriding and the +// refs have decl information (e.g., a trait method where we know the required +// method, but not the supplied method). In both cases, we are currently +// ignoring it. + +#[derive(Debug, RustcEncodable)] +struct Analysis { + kind: Format, + prelude: Option, + imports: Vec, + defs: Vec, + refs: Vec, + macro_refs: Vec, +} + +impl Analysis { + fn new() -> Analysis { + Analysis { + kind: Format::Json, + prelude: None, + imports: vec![], + defs: vec![], + refs: vec![], + macro_refs: vec![], + } + } +} + +// DefId::index is a newtype and so the JSON serialisation is ugly. Therefore +// we use our own Id which is the same, but without the newtype. +#[derive(Clone, Copy, Debug, RustcEncodable)] +struct Id { + krate: u32, + index: u32, +} + +impl From for Id { + fn from(id: DefId) -> Id { + Id { + krate: id.krate.as_u32(), + index: id.index.as_u32(), + } + } +} + +#[derive(Debug, RustcEncodable)] +struct Import { + kind: ImportKind, + ref_id: Option, + span: SpanData, + name: String, + value: String, +} + +#[derive(Debug, RustcEncodable)] +enum ImportKind { + ExternCrate, + Use, + GlobUse, +} + +impl From for Import { + fn from(data: ExternCrateData) -> Import { + Import { + kind: ImportKind::ExternCrate, + ref_id: None, + span: data.span, + name: data.name, + value: String::new(), + } + } +} +impl From for Import { + fn from(data: UseData) -> Import { + Import { + kind: ImportKind::Use, + ref_id: data.mod_id.map(|id| From::from(id)), + span: data.span, + name: data.name, + value: String::new(), + } + } +} +impl From for Import { + fn from(data: UseGlobData) -> Import { + Import { + kind: ImportKind::GlobUse, + ref_id: None, + span: data.span, + name: "*".to_owned(), + value: data.names.join(", "), + } + } +} + +#[derive(Debug, RustcEncodable)] +struct Def { + kind: DefKind, + id: Id, + span: SpanData, + name: String, + qualname: String, + value: String, + children: Vec, + decl_id: Option, + docs: String, +} + +#[derive(Debug, RustcEncodable)] +enum DefKind { + // value = variant names + Enum, + // value = enum name + variant name + types + Tuple, + // value = [enum name +] name + fields + Struct, + // value = signature + Trait, + // value = type + generics + Function, + // value = type + generics + Method, + // No id, no value. + Macro, + // value = file_name + Mod, + // value = aliased type + Type, + // value = type and init expression (for all variable kinds). + Local, + Static, + Const, + Field, +} + +impl From for Def { + fn from(data: EnumData) -> Def { + Def { + kind: DefKind::Enum, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + children: data.variants.into_iter().map(|id| From::from(id)).collect(), + decl_id: None, + docs: data.docs, + } + } +} + +impl From for Def { + fn from(data: TupleVariantData) -> Def { + Def { + kind: DefKind::Tuple, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + children: vec![], + decl_id: None, + docs: data.docs, + } + } +} +impl From for Def { + fn from(data: StructVariantData) -> Def { + Def { + kind: DefKind::Struct, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + children: vec![], + decl_id: None, + docs: data.docs, + } + } +} +impl From for Def { + fn from(data: StructData) -> Def { + Def { + kind: DefKind::Struct, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + children: data.fields.into_iter().map(|id| From::from(id)).collect(), + decl_id: None, + docs: data.docs, + } + } +} +impl From for Def { + fn from(data: TraitData) -> Def { + Def { + kind: DefKind::Trait, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + children: data.items.into_iter().map(|id| From::from(id)).collect(), + decl_id: None, + docs: data.docs, + } + } +} +impl From for Def { + fn from(data: FunctionData) -> Def { + Def { + kind: DefKind::Function, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + children: vec![], + decl_id: None, + docs: data.docs, + } + } +} +impl From for Def { + fn from(data: MethodData) -> Def { + Def { + kind: DefKind::Method, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + children: vec![], + decl_id: data.decl_id.map(|id| From::from(id)), + docs: data.docs, + } + } +} +impl From for Def { + fn from(data: MacroData) -> Def { + Def { + kind: DefKind::Macro, + id: From::from(null_def_id()), + span: data.span, + name: data.name, + qualname: data.qualname, + value: String::new(), + children: vec![], + decl_id: None, + docs: data.docs, + } + } +} + +impl From for Def { + fn from(data: TypeDefData) -> Def { + Def { + kind: DefKind::Type, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.value, + children: vec![], + decl_id: None, + docs: String::new(), + } + } +} +impl From for Def { + fn from(data: VariableData) -> Def { + Def { + kind: match data.kind { + VariableKind::Static => DefKind::Static, + VariableKind::Const => DefKind::Const, + VariableKind::Local => DefKind::Local, + VariableKind::Field => DefKind::Field, + }, + id: From::from(data.id), + span: data.span, + name: data.name, + qualname: data.qualname, + value: data.type_value, + children: vec![], + decl_id: None, + docs: data.docs, + } + } +} + +#[derive(Debug, RustcEncodable)] +enum RefKind { + Function, + Mod, + Type, + Variable, +} + +#[derive(Debug, RustcEncodable)] +struct Ref { + kind: RefKind, + span: SpanData, + ref_id: Id, +} + +impl From for Ref { + fn from(data: FunctionRefData) -> Ref { + Ref { + kind: RefKind::Function, + span: data.span, + ref_id: From::from(data.ref_id), + } + } +} +impl From for Ref { + fn from(data: FunctionCallData) -> Ref { + Ref { + kind: RefKind::Function, + span: data.span, + ref_id: From::from(data.ref_id), + } + } +} +impl From for Ref { + fn from(data: MethodCallData) -> Ref { + Ref { + kind: RefKind::Function, + span: data.span, + ref_id: From::from(data.ref_id.or(data.decl_id).unwrap_or(null_def_id())), + } + } +} +impl From for Ref { + fn from(data: ModRefData) -> Ref { + Ref { + kind: RefKind::Mod, + span: data.span, + ref_id: From::from(data.ref_id.unwrap_or(null_def_id())), + } + } +} +impl From for Ref { + fn from(data: TypeRefData) -> Ref { + Ref { + kind: RefKind::Type, + span: data.span, + ref_id: From::from(data.ref_id.unwrap_or(null_def_id())), + } + } +} +impl From for Ref { + fn from(data: VariableRefData) -> Ref { + Ref { + kind: RefKind::Variable, + span: data.span, + ref_id: From::from(data.ref_id), + } + } +} + +#[derive(Debug, RustcEncodable)] +struct MacroRef { + span: SpanData, + qualname: String, + callee_span: SpanData, +} + +impl From for MacroRef { + fn from(data: MacroUseData) -> MacroRef { + MacroRef { + span: data.span, + qualname: data.qualname, + callee_span: data.callee_span, + } + } +} diff --git a/src/librustc_save_analysis/lib.rs b/src/librustc_save_analysis/lib.rs new file mode 100644 index 0000000000000..33b9f8c9034e8 --- /dev/null +++ b/src/librustc_save_analysis/lib.rs @@ -0,0 +1,894 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![crate_name = "rustc_save_analysis"] +#![unstable(feature = "rustc_private", issue = "27812")] +#![crate_type = "dylib"] +#![crate_type = "rlib"] +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] + +#![feature(custom_attribute)] +#![allow(unused_attributes)] +#![feature(rustc_private)] +#![feature(staged_api)] + +#[macro_use] extern crate rustc; + +#[macro_use] extern crate log; +#[macro_use] extern crate syntax; +extern crate serialize as rustc_serialize; +extern crate syntax_pos; + + +mod csv_dumper; +mod json_api_dumper; +mod json_dumper; +mod data; +mod dump; +mod dump_visitor; +pub mod external_data; +#[macro_use] +pub mod span_utils; + +use rustc::hir; +use rustc::hir::def::Def; +use rustc::hir::map::Node; +use rustc::hir::def_id::DefId; +use rustc::session::config::CrateType::CrateTypeExecutable; +use rustc::ty::{self, TyCtxt}; + +use std::env; +use std::fs::{self, File}; +use std::path::{Path, PathBuf}; + +use syntax::ast::{self, NodeId, PatKind, Attribute, CRATE_NODE_ID}; +use syntax::parse::lexer::comments::strip_doc_comment_decoration; +use syntax::parse::token; +use syntax::symbol::{Symbol, keywords}; +use syntax::visit::{self, Visitor}; +use syntax::print::pprust::{ty_to_string, arg_to_string}; +use syntax::codemap::MacroAttribute; +use syntax_pos::*; + +pub use self::csv_dumper::CsvDumper; +pub use self::json_api_dumper::JsonApiDumper; +pub use self::json_dumper::JsonDumper; +pub use self::data::*; +pub use self::external_data::make_def_id; +pub use self::dump::Dump; +pub use self::dump_visitor::DumpVisitor; +use self::span_utils::SpanUtils; + +// FIXME this is legacy code and should be removed +pub mod recorder { + pub use self::Row::*; + + #[derive(Copy, Clone, Debug, Eq, PartialEq)] + pub enum Row { + TypeRef, + ModRef, + VarRef, + FnRef, + } +} + +pub struct SaveContext<'l, 'tcx: 'l> { + tcx: TyCtxt<'l, 'tcx, 'tcx>, + analysis: &'l ty::CrateAnalysis<'tcx>, + span_utils: SpanUtils<'tcx>, +} + +macro_rules! option_try( + ($e:expr) => (match $e { Some(e) => e, None => return None }) +); + +impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { + pub fn new(tcx: TyCtxt<'l, 'tcx, 'tcx>, + analysis: &'l ty::CrateAnalysis<'tcx>) + -> SaveContext<'l, 'tcx> { + let span_utils = SpanUtils::new(&tcx.sess); + SaveContext::from_span_utils(tcx, analysis, span_utils) + } + + pub fn from_span_utils(tcx: TyCtxt<'l, 'tcx, 'tcx>, + analysis: &'l ty::CrateAnalysis<'tcx>, + span_utils: SpanUtils<'tcx>) + -> SaveContext<'l, 'tcx> { + SaveContext { + tcx: tcx, + analysis: analysis, + span_utils: span_utils, + } + } + + // List external crates used by the current crate. + pub fn get_external_crates(&self) -> Vec { + let mut result = Vec::new(); + + for n in self.tcx.sess.cstore.crates() { + let span = match self.tcx.sess.cstore.extern_crate(n) { + Some(ref c) => c.span, + None => { + debug!("Skipping crate {}, no data", n); + continue; + } + }; + result.push(CrateData { + name: self.tcx.sess.cstore.crate_name(n).to_string(), + number: n.as_u32(), + span: span, + }); + } + + result + } + + pub fn get_item_data(&self, item: &ast::Item) -> Option { + match item.node { + ast::ItemKind::Fn(ref decl, .., ref generics, _) => { + let qualname = format!("::{}", self.tcx.node_path_str(item.id)); + let sub_span = self.span_utils.sub_span_after_keyword(item.span, keywords::Fn); + filter!(self.span_utils, sub_span, item.span, None); + + + Some(Data::FunctionData(FunctionData { + id: item.id, + name: item.ident.to_string(), + qualname: qualname, + declaration: None, + span: sub_span.unwrap(), + scope: self.enclosing_scope(item.id), + value: make_signature(decl, generics), + visibility: From::from(&item.vis), + parent: None, + docs: docs_for_attrs(&item.attrs), + })) + } + ast::ItemKind::Static(ref typ, mt, ref expr) => { + let qualname = format!("::{}", self.tcx.node_path_str(item.id)); + + // If the variable is immutable, save the initialising expression. + let (value, keyword) = match mt { + ast::Mutability::Mutable => (String::from(""), keywords::Mut), + ast::Mutability::Immutable => { + (self.span_utils.snippet(expr.span), keywords::Static) + }, + }; + + let sub_span = self.span_utils.sub_span_after_keyword(item.span, keyword); + filter!(self.span_utils, sub_span, item.span, None); + Some(Data::VariableData(VariableData { + id: item.id, + kind: VariableKind::Static, + name: item.ident.to_string(), + qualname: qualname, + span: sub_span.unwrap(), + scope: self.enclosing_scope(item.id), + parent: None, + value: value, + type_value: ty_to_string(&typ), + visibility: From::from(&item.vis), + docs: docs_for_attrs(&item.attrs), + })) + } + ast::ItemKind::Const(ref typ, ref expr) => { + let qualname = format!("::{}", self.tcx.node_path_str(item.id)); + let sub_span = self.span_utils.sub_span_after_keyword(item.span, keywords::Const); + filter!(self.span_utils, sub_span, item.span, None); + Some(Data::VariableData(VariableData { + id: item.id, + kind: VariableKind::Const, + name: item.ident.to_string(), + qualname: qualname, + span: sub_span.unwrap(), + scope: self.enclosing_scope(item.id), + parent: None, + value: self.span_utils.snippet(expr.span), + type_value: ty_to_string(&typ), + visibility: From::from(&item.vis), + docs: docs_for_attrs(&item.attrs), + })) + } + ast::ItemKind::Mod(ref m) => { + let qualname = format!("::{}", self.tcx.node_path_str(item.id)); + + let cm = self.tcx.sess.codemap(); + let filename = cm.span_to_filename(m.inner); + + let sub_span = self.span_utils.sub_span_after_keyword(item.span, keywords::Mod); + filter!(self.span_utils, sub_span, item.span, None); + Some(Data::ModData(ModData { + id: item.id, + name: item.ident.to_string(), + qualname: qualname, + span: sub_span.unwrap(), + scope: self.enclosing_scope(item.id), + filename: filename, + items: m.items.iter().map(|i| i.id).collect(), + visibility: From::from(&item.vis), + docs: docs_for_attrs(&item.attrs), + })) + } + ast::ItemKind::Enum(ref def, _) => { + let name = item.ident.to_string(); + let qualname = format!("::{}", self.tcx.node_path_str(item.id)); + let sub_span = self.span_utils.sub_span_after_keyword(item.span, keywords::Enum); + filter!(self.span_utils, sub_span, item.span, None); + let variants_str = def.variants.iter() + .map(|v| v.node.name.to_string()) + .collect::>() + .join(", "); + let val = format!("{}::{{{}}}", name, variants_str); + Some(Data::EnumData(EnumData { + id: item.id, + name: name, + value: val, + span: sub_span.unwrap(), + qualname: qualname, + scope: self.enclosing_scope(item.id), + variants: def.variants.iter().map(|v| v.node.data.id()).collect(), + visibility: From::from(&item.vis), + docs: docs_for_attrs(&item.attrs), + })) + } + ast::ItemKind::Impl(.., ref trait_ref, ref typ, _) => { + let mut type_data = None; + let sub_span; + + let parent = self.enclosing_scope(item.id); + + match typ.node { + // Common case impl for a struct or something basic. + ast::TyKind::Path(None, ref path) => { + sub_span = self.span_utils.sub_span_for_type_name(path.span); + filter!(self.span_utils, sub_span, path.span, None); + type_data = self.lookup_ref_id(typ.id).map(|id| { + TypeRefData { + span: sub_span.unwrap(), + scope: parent, + ref_id: Some(id), + qualname: String::new() // FIXME: generate the real qualname + } + }); + } + _ => { + // Less useful case, impl for a compound type. + let span = typ.span; + sub_span = self.span_utils.sub_span_for_type_name(span).or(Some(span)); + } + } + + let trait_data = trait_ref.as_ref() + .and_then(|tr| self.get_trait_ref_data(tr, parent)); + + filter!(self.span_utils, sub_span, typ.span, None); + Some(Data::ImplData(ImplData2 { + id: item.id, + span: sub_span.unwrap(), + scope: parent, + trait_ref: trait_data, + self_ref: type_data, + })) + } + _ => { + // FIXME + bug!(); + } + } + } + + pub fn get_field_data(&self, field: &ast::StructField, + scope: NodeId) -> Option { + if let Some(ident) = field.ident { + let qualname = format!("::{}::{}", self.tcx.node_path_str(scope), ident); + let def_id = self.tcx.map.local_def_id(field.id); + let typ = self.tcx.item_type(def_id).to_string(); + let sub_span = self.span_utils.sub_span_before_token(field.span, token::Colon); + filter!(self.span_utils, sub_span, field.span, None); + Some(VariableData { + id: field.id, + kind: VariableKind::Field, + name: ident.to_string(), + qualname: qualname, + span: sub_span.unwrap(), + scope: scope, + parent: Some(make_def_id(scope, &self.tcx.map)), + value: "".to_owned(), + type_value: typ, + visibility: From::from(&field.vis), + docs: docs_for_attrs(&field.attrs), + }) + } else { + None + } + } + + // FIXME would be nice to take a MethodItem here, but the ast provides both + // trait and impl flavours, so the caller must do the disassembly. + pub fn get_method_data(&self, id: ast::NodeId, + name: ast::Name, span: Span) -> Option { + // The qualname for a method is the trait name or name of the struct in an impl in + // which the method is declared in, followed by the method's name. + let (qualname, parent_scope, decl_id, vis, docs) = + match self.tcx.impl_of_method(self.tcx.map.local_def_id(id)) { + Some(impl_id) => match self.tcx.map.get_if_local(impl_id) { + Some(Node::NodeItem(item)) => { + match item.node { + hir::ItemImpl(.., ref ty, _) => { + let mut result = String::from("<"); + result.push_str(&rustc::hir::print::ty_to_string(&ty)); + + let trait_id = self.tcx.trait_id_of_impl(impl_id); + let mut decl_id = None; + if let Some(def_id) = trait_id { + result.push_str(" as "); + result.push_str(&self.tcx.item_path_str(def_id)); + self.tcx.associated_items(def_id) + .find(|item| item.name == name) + .map(|item| decl_id = Some(item.def_id)); + } + result.push_str(">"); + + (result, trait_id, decl_id, + From::from(&item.vis), + docs_for_attrs(&item.attrs)) + } + _ => { + span_bug!(span, + "Container {:?} for method {} not an impl?", + impl_id, + id); + } + } + } + r => { + span_bug!(span, + "Container {:?} for method {} is not a node item {:?}", + impl_id, + id, + r); + } + }, + None => match self.tcx.trait_of_item(self.tcx.map.local_def_id(id)) { + Some(def_id) => { + match self.tcx.map.get_if_local(def_id) { + Some(Node::NodeItem(item)) => { + (format!("::{}", self.tcx.item_path_str(def_id)), + Some(def_id), None, + From::from(&item.vis), + docs_for_attrs(&item.attrs)) + } + r => { + span_bug!(span, + "Could not find container {:?} for \ + method {}, got {:?}", + def_id, + id, + r); + } + } + } + None => { + span_bug!(span, "Could not find container for method {}", id); + } + }, + }; + + let qualname = format!("{}::{}", qualname, name); + + let sub_span = self.span_utils.sub_span_after_keyword(span, keywords::Fn); + filter!(self.span_utils, sub_span, span, None); + Some(FunctionData { + id: id, + name: name.to_string(), + qualname: qualname, + declaration: decl_id, + span: sub_span.unwrap(), + scope: self.enclosing_scope(id), + // FIXME you get better data here by using the visitor. + value: String::new(), + visibility: vis, + parent: parent_scope, + docs: docs, + }) + } + + pub fn get_trait_ref_data(&self, + trait_ref: &ast::TraitRef, + parent: NodeId) + -> Option { + self.lookup_ref_id(trait_ref.ref_id).and_then(|def_id| { + let span = trait_ref.path.span; + let sub_span = self.span_utils.sub_span_for_type_name(span).or(Some(span)); + filter!(self.span_utils, sub_span, span, None); + Some(TypeRefData { + span: sub_span.unwrap(), + scope: parent, + ref_id: Some(def_id), + qualname: String::new() // FIXME: generate the real qualname + }) + }) + } + + pub fn get_expr_data(&self, expr: &ast::Expr) -> Option { + let hir_node = self.tcx.map.expect_expr(expr.id); + let ty = self.tcx.tables().expr_ty_adjusted_opt(&hir_node); + if ty.is_none() || ty.unwrap().sty == ty::TyError { + return None; + } + match expr.node { + ast::ExprKind::Field(ref sub_ex, ident) => { + let hir_node = match self.tcx.map.find(sub_ex.id) { + Some(Node::NodeExpr(expr)) => expr, + _ => { + debug!("Missing or weird node for sub-expression {} in {:?}", + sub_ex.id, expr); + return None; + } + }; + match self.tcx.tables().expr_ty_adjusted(&hir_node).sty { + ty::TyAdt(def, _) if !def.is_enum() => { + let f = def.struct_variant().field_named(ident.node.name); + let sub_span = self.span_utils.span_for_last_ident(expr.span); + filter!(self.span_utils, sub_span, expr.span, None); + return Some(Data::VariableRefData(VariableRefData { + name: ident.node.to_string(), + span: sub_span.unwrap(), + scope: self.enclosing_scope(expr.id), + ref_id: f.did, + })); + } + _ => { + debug!("Expected struct or union type, found {:?}", ty); + None + } + } + } + ast::ExprKind::Struct(ref path, ..) => { + match self.tcx.tables().expr_ty_adjusted(&hir_node).sty { + ty::TyAdt(def, _) if !def.is_enum() => { + let sub_span = self.span_utils.span_for_last_ident(path.span); + filter!(self.span_utils, sub_span, path.span, None); + Some(Data::TypeRefData(TypeRefData { + span: sub_span.unwrap(), + scope: self.enclosing_scope(expr.id), + ref_id: Some(def.did), + qualname: String::new() // FIXME: generate the real qualname + })) + } + _ => { + // FIXME ty could legitimately be an enum, but then we will fail + // later if we try to look up the fields. + debug!("expected struct or union, found {:?}", ty); + None + } + } + } + ast::ExprKind::MethodCall(..) => { + let method_call = ty::MethodCall::expr(expr.id); + let method_id = self.tcx.tables().method_map[&method_call].def_id; + let (def_id, decl_id) = match self.tcx.associated_item(method_id).container { + ty::ImplContainer(_) => (Some(method_id), None), + ty::TraitContainer(_) => (None, Some(method_id)), + }; + let sub_span = self.span_utils.sub_span_for_meth_name(expr.span); + filter!(self.span_utils, sub_span, expr.span, None); + let parent = self.enclosing_scope(expr.id); + Some(Data::MethodCallData(MethodCallData { + span: sub_span.unwrap(), + scope: parent, + ref_id: def_id, + decl_id: decl_id, + })) + } + ast::ExprKind::Path(_, ref path) => { + self.get_path_data(expr.id, path) + } + _ => { + // FIXME + bug!(); + } + } + } + + pub fn get_path_def(&self, id: NodeId) -> Def { + match self.tcx.map.get(id) { + Node::NodeTraitRef(tr) => tr.path.def, + + Node::NodeItem(&hir::Item { node: hir::ItemUse(ref path, _), .. }) | + Node::NodeVisibility(&hir::Visibility::Restricted { ref path, .. }) => path.def, + + Node::NodeExpr(&hir::Expr { node: hir::ExprPath(ref qpath), .. }) | + Node::NodeExpr(&hir::Expr { node: hir::ExprStruct(ref qpath, ..), .. }) | + Node::NodePat(&hir::Pat { node: hir::PatKind::Path(ref qpath), .. }) | + Node::NodePat(&hir::Pat { node: hir::PatKind::Struct(ref qpath, ..), .. }) | + Node::NodePat(&hir::Pat { node: hir::PatKind::TupleStruct(ref qpath, ..), .. }) => { + self.tcx.tables().qpath_def(qpath, id) + } + + Node::NodeLocal(&hir::Pat { node: hir::PatKind::Binding(_, def_id, ..), .. }) => { + Def::Local(def_id) + } + + Node::NodeTy(&hir::Ty { node: hir::TyPath(ref qpath), .. }) => { + match *qpath { + hir::QPath::Resolved(_, ref path) => path.def, + hir::QPath::TypeRelative(..) => { + if let Some(ty) = self.analysis.hir_ty_to_ty.get(&id) { + if let ty::TyProjection(proj) = ty.sty { + for item in self.tcx.associated_items(proj.trait_ref.def_id) { + if item.kind == ty::AssociatedKind::Type { + if item.name == proj.item_name { + return Def::AssociatedTy(item.def_id); + } + } + } + } + } + Def::Err + } + } + } + + _ => Def::Err + } + } + + pub fn get_path_data(&self, id: NodeId, path: &ast::Path) -> Option { + let def = self.get_path_def(id); + let sub_span = self.span_utils.span_for_last_ident(path.span); + filter!(self.span_utils, sub_span, path.span, None); + match def { + Def::Upvar(..) | + Def::Local(..) | + Def::Static(..) | + Def::Const(..) | + Def::AssociatedConst(..) | + Def::StructCtor(..) | + Def::VariantCtor(..) => { + Some(Data::VariableRefData(VariableRefData { + name: self.span_utils.snippet(sub_span.unwrap()), + span: sub_span.unwrap(), + scope: self.enclosing_scope(id), + ref_id: def.def_id(), + })) + } + Def::Struct(def_id) | + Def::Variant(def_id, ..) | + Def::Union(def_id) | + Def::Enum(def_id) | + Def::TyAlias(def_id) | + Def::AssociatedTy(def_id) | + Def::Trait(def_id) | + Def::TyParam(def_id) => { + Some(Data::TypeRefData(TypeRefData { + span: sub_span.unwrap(), + ref_id: Some(def_id), + scope: self.enclosing_scope(id), + qualname: String::new() // FIXME: generate the real qualname + })) + } + Def::Method(decl_id) => { + let sub_span = self.span_utils.sub_span_for_meth_name(path.span); + filter!(self.span_utils, sub_span, path.span, None); + let def_id = if decl_id.is_local() { + let ti = self.tcx.associated_item(decl_id); + self.tcx.associated_items(ti.container.id()) + .find(|item| item.name == ti.name && item.defaultness.has_value()) + .map(|item| item.def_id) + } else { + None + }; + Some(Data::MethodCallData(MethodCallData { + span: sub_span.unwrap(), + scope: self.enclosing_scope(id), + ref_id: def_id, + decl_id: Some(decl_id), + })) + } + Def::Fn(def_id) => { + Some(Data::FunctionCallData(FunctionCallData { + ref_id: def_id, + span: sub_span.unwrap(), + scope: self.enclosing_scope(id), + })) + } + Def::Mod(def_id) => { + Some(Data::ModRefData(ModRefData { + ref_id: Some(def_id), + span: sub_span.unwrap(), + scope: self.enclosing_scope(id), + qualname: String::new() // FIXME: generate the real qualname + })) + } + Def::PrimTy(..) | + Def::SelfTy(..) | + Def::Label(..) | + Def::Macro(..) | + Def::Err => None, + } + } + + pub fn get_field_ref_data(&self, + field_ref: &ast::Field, + variant: &ty::VariantDef, + parent: NodeId) + -> Option { + let f = variant.field_named(field_ref.ident.node.name); + // We don't really need a sub-span here, but no harm done + let sub_span = self.span_utils.span_for_last_ident(field_ref.ident.span); + filter!(self.span_utils, sub_span, field_ref.ident.span, None); + Some(VariableRefData { + name: field_ref.ident.node.to_string(), + span: sub_span.unwrap(), + scope: parent, + ref_id: f.did, + }) + } + + /// Attempt to return MacroUseData for any AST node. + /// + /// For a given piece of AST defined by the supplied Span and NodeId, + /// returns None if the node is not macro-generated or the span is malformed, + /// else uses the expansion callsite and callee to return some MacroUseData. + pub fn get_macro_use_data(&self, span: Span, id: NodeId) -> Option { + if !generated_code(span) { + return None; + } + // Note we take care to use the source callsite/callee, to handle + // nested expansions and ensure we only generate data for source-visible + // macro uses. + let callsite = self.tcx.sess.codemap().source_callsite(span); + let callee = self.tcx.sess.codemap().source_callee(span); + let callee = option_try!(callee); + let callee_span = option_try!(callee.span); + + // Ignore attribute macros, their spans are usually mangled + if let MacroAttribute(_) = callee.format { + return None; + } + + // If the callee is an imported macro from an external crate, need to get + // the source span and name from the session, as their spans are localized + // when read in, and no longer correspond to the source. + if let Some(mac) = self.tcx.sess.imported_macro_spans.borrow().get(&callee_span) { + let &(ref mac_name, mac_span) = mac; + return Some(MacroUseData { + span: callsite, + name: mac_name.clone(), + callee_span: mac_span, + scope: self.enclosing_scope(id), + imported: true, + qualname: String::new()// FIXME: generate the real qualname + }); + } + + Some(MacroUseData { + span: callsite, + name: callee.name().to_string(), + callee_span: callee_span, + scope: self.enclosing_scope(id), + imported: false, + qualname: String::new() // FIXME: generate the real qualname + }) + } + + pub fn get_data_for_id(&self, _id: &NodeId) -> Data { + // FIXME + bug!(); + } + + fn lookup_ref_id(&self, ref_id: NodeId) -> Option { + match self.get_path_def(ref_id) { + Def::PrimTy(_) | Def::SelfTy(..) | Def::Err => None, + def => Some(def.def_id()), + } + } + + #[inline] + pub fn enclosing_scope(&self, id: NodeId) -> NodeId { + self.tcx.map.get_enclosing_scope(id).unwrap_or(CRATE_NODE_ID) + } +} + +fn make_signature(decl: &ast::FnDecl, generics: &ast::Generics) -> String { + let mut sig = "fn ".to_owned(); + if !generics.lifetimes.is_empty() || !generics.ty_params.is_empty() { + sig.push('<'); + sig.push_str(&generics.lifetimes.iter() + .map(|l| l.lifetime.name.to_string()) + .collect::>() + .join(", ")); + if !generics.lifetimes.is_empty() { + sig.push_str(", "); + } + sig.push_str(&generics.ty_params.iter() + .map(|l| l.ident.to_string()) + .collect::>() + .join(", ")); + sig.push_str("> "); + } + sig.push('('); + sig.push_str(&decl.inputs.iter().map(arg_to_string).collect::>().join(", ")); + sig.push(')'); + match decl.output { + ast::FunctionRetTy::Default(_) => sig.push_str(" -> ()"), + ast::FunctionRetTy::Ty(ref t) => sig.push_str(&format!(" -> {}", ty_to_string(t))), + } + + sig +} + +// An AST visitor for collecting paths from patterns. +struct PathCollector { + // The Row field identifies the kind of pattern. + collected_paths: Vec<(NodeId, ast::Path, ast::Mutability, recorder::Row)>, +} + +impl PathCollector { + fn new() -> PathCollector { + PathCollector { collected_paths: vec![] } + } +} + +impl Visitor for PathCollector { + fn visit_pat(&mut self, p: &ast::Pat) { + match p.node { + PatKind::Struct(ref path, ..) => { + self.collected_paths.push((p.id, path.clone(), + ast::Mutability::Mutable, recorder::TypeRef)); + } + PatKind::TupleStruct(ref path, ..) | + PatKind::Path(_, ref path) => { + self.collected_paths.push((p.id, path.clone(), + ast::Mutability::Mutable, recorder::VarRef)); + } + PatKind::Ident(bm, ref path1, _) => { + debug!("PathCollector, visit ident in pat {}: {:?} {:?}", + path1.node, + p.span, + path1.span); + let immut = match bm { + // Even if the ref is mut, you can't change the ref, only + // the data pointed at, so showing the initialising expression + // is still worthwhile. + ast::BindingMode::ByRef(_) => ast::Mutability::Immutable, + ast::BindingMode::ByValue(mt) => mt, + }; + // collect path for either visit_local or visit_arm + let path = ast::Path::from_ident(path1.span, path1.node); + self.collected_paths.push((p.id, path, immut, recorder::VarRef)); + } + _ => {} + } + visit::walk_pat(self, p); + } +} + +fn docs_for_attrs(attrs: &[Attribute]) -> String { + let doc = Symbol::intern("doc"); + let mut result = String::new(); + + for attr in attrs { + if attr.name() == doc { + if let Some(val) = attr.value_str() { + if attr.is_sugared_doc { + result.push_str(&strip_doc_comment_decoration(&val.as_str())); + } else { + result.push_str(&val.as_str()); + } + result.push('\n'); + } + } + } + + result +} + +#[derive(Clone, Copy, Debug, RustcEncodable)] +pub enum Format { + Csv, + Json, + JsonApi, +} + +impl Format { + fn extension(&self) -> &'static str { + match *self { + Format::Csv => ".csv", + Format::Json | Format::JsonApi => ".json", + } + } +} + +pub fn process_crate<'l, 'tcx>(tcx: TyCtxt<'l, 'tcx, 'tcx>, + krate: &ast::Crate, + analysis: &'l ty::CrateAnalysis<'tcx>, + cratename: &str, + odir: Option<&Path>, + format: Format) { + let _ignore = tcx.dep_graph.in_ignore(); + + assert!(analysis.glob_map.is_some()); + + info!("Dumping crate {}", cratename); + + // find a path to dump our data to + let mut root_path = match env::var_os("RUST_SAVE_ANALYSIS_FOLDER") { + Some(val) => PathBuf::from(val), + None => match odir { + Some(val) => val.join("save-analysis"), + None => PathBuf::from("save-analysis-temp"), + }, + }; + + if let Err(e) = fs::create_dir_all(&root_path) { + tcx.sess.err(&format!("Could not create directory {}: {}", + root_path.display(), + e)); + } + + { + let disp = root_path.display(); + info!("Writing output to {}", disp); + } + + // Create output file. + let executable = tcx.sess.crate_types.borrow().iter().any(|ct| *ct == CrateTypeExecutable); + let mut out_name = if executable { + "".to_owned() + } else { + "lib".to_owned() + }; + out_name.push_str(&cratename); + out_name.push_str(&tcx.sess.opts.cg.extra_filename); + out_name.push_str(format.extension()); + root_path.push(&out_name); + let mut output_file = File::create(&root_path).unwrap_or_else(|e| { + let disp = root_path.display(); + tcx.sess.fatal(&format!("Could not open {}: {}", disp, e)); + }); + root_path.pop(); + let output = &mut output_file; + + let save_ctxt = SaveContext::new(tcx, analysis); + + macro_rules! dump { + ($new_dumper: expr) => {{ + let mut dumper = $new_dumper; + let mut visitor = DumpVisitor::new(save_ctxt, &mut dumper); + + visitor.dump_crate_info(cratename, krate); + visit::walk_crate(&mut visitor, krate); + }} + } + + match format { + Format::Csv => dump!(CsvDumper::new(output)), + Format::Json => dump!(JsonDumper::new(output)), + Format::JsonApi => dump!(JsonApiDumper::new(output)), + } +} + +// Utility functions for the module. + +// Helper function to escape quotes in a string +fn escape(s: String) -> String { + s.replace("\"", "\"\"") +} + +// Helper function to determine if a span came from a +// macro expansion or syntax extension. +pub fn generated_code(span: Span) -> bool { + span.expn_id != NO_EXPANSION || span == DUMMY_SP +} diff --git a/src/librustc_save_analysis/span_utils.rs b/src/librustc_save_analysis/span_utils.rs new file mode 100644 index 0000000000000..e06aefd865f1b --- /dev/null +++ b/src/librustc_save_analysis/span_utils.rs @@ -0,0 +1,473 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::session::Session; + +use generated_code; + +use std::cell::Cell; +use std::env; +use std::path::Path; + +use syntax::ast; +use syntax::parse::lexer::{self, Reader, StringReader}; +use syntax::parse::token::{self, Token}; +use syntax::symbol::keywords; +use syntax_pos::*; + +#[derive(Clone)] +pub struct SpanUtils<'a> { + pub sess: &'a Session, + // FIXME given that we clone SpanUtils all over the place, this err_count is + // probably useless and any logic relying on it is bogus. + pub err_count: Cell, +} + +impl<'a> SpanUtils<'a> { + pub fn new(sess: &'a Session) -> SpanUtils<'a> { + SpanUtils { + sess: sess, + err_count: Cell::new(0), + } + } + + pub fn make_path_string(file_name: &str) -> String { + let path = Path::new(file_name); + if path.is_absolute() { + path.clone().display().to_string() + } else { + env::current_dir().unwrap().join(&path).display().to_string() + } + } + + // sub_span starts at span.lo, so we need to adjust the positions etc. + // If sub_span is None, we don't need to adjust. + pub fn make_sub_span(&self, span: Span, sub_span: Option) -> Option { + match sub_span { + None => None, + Some(sub) => { + let FileMapAndBytePos {fm, pos} = self.sess.codemap().lookup_byte_offset(span.lo); + let base = pos + fm.start_pos; + Some(Span { + lo: base + self.sess.codemap().lookup_byte_offset(sub.lo).pos, + hi: base + self.sess.codemap().lookup_byte_offset(sub.hi).pos, + expn_id: span.expn_id, + }) + } + } + } + + pub fn snippet(&self, span: Span) -> String { + match self.sess.codemap().span_to_snippet(span) { + Ok(s) => s, + Err(_) => String::new(), + } + } + + pub fn retokenise_span(&self, span: Span) -> StringReader<'a> { + // sadness - we don't have spans for sub-expressions nor access to the tokens + // so in order to get extents for the function name itself (which dxr expects) + // we need to re-tokenise the fn definition + + // Note: this is a bit awful - it adds the contents of span to the end of + // the codemap as a new filemap. This is mostly OK, but means we should + // not iterate over the codemap. Also, any spans over the new filemap + // are incompatible with spans over other filemaps. + let filemap = self.sess + .codemap() + .new_filemap(String::from(""), None, self.snippet(span)); + let s = self.sess; + lexer::StringReader::new(s.diagnostic(), filemap) + } + + // Re-parses a path and returns the span for the last identifier in the path + pub fn span_for_last_ident(&self, span: Span) -> Option { + let mut result = None; + + let mut toks = self.retokenise_span(span); + let mut bracket_count = 0; + loop { + let ts = toks.real_token(); + if ts.tok == token::Eof { + return self.make_sub_span(span, result) + } + if bracket_count == 0 && (ts.tok.is_ident() || ts.tok.is_keyword(keywords::SelfValue)) { + result = Some(ts.sp); + } + + bracket_count += match ts.tok { + token::Lt => 1, + token::Gt => -1, + token::BinOp(token::Shr) => -2, + _ => 0, + } + } + } + + // Return the span for the first identifier in the path. + pub fn span_for_first_ident(&self, span: Span) -> Option { + let mut toks = self.retokenise_span(span); + let mut bracket_count = 0; + loop { + let ts = toks.real_token(); + if ts.tok == token::Eof { + return None; + } + if bracket_count == 0 && (ts.tok.is_ident() || ts.tok.is_keyword(keywords::SelfValue)) { + return self.make_sub_span(span, Some(ts.sp)); + } + + bracket_count += match ts.tok { + token::Lt => 1, + token::Gt => -1, + token::BinOp(token::Shr) => -2, + _ => 0, + } + } + } + + // Return the span for the last ident before a `(` or `<` or '::<' and outside any + // any brackets, or the last span. + pub fn sub_span_for_meth_name(&self, span: Span) -> Option { + let mut toks = self.retokenise_span(span); + let mut prev = toks.real_token(); + let mut result = None; + let mut bracket_count = 0; + let mut prev_span = None; + while prev.tok != token::Eof { + prev_span = None; + let mut next = toks.real_token(); + + if (next.tok == token::OpenDelim(token::Paren) || next.tok == token::Lt) && + bracket_count == 0 && prev.tok.is_ident() { + result = Some(prev.sp); + } + + if bracket_count == 0 && next.tok == token::ModSep { + let old = prev; + prev = next; + next = toks.real_token(); + if next.tok == token::Lt && old.tok.is_ident() { + result = Some(old.sp); + } + } + + bracket_count += match prev.tok { + token::OpenDelim(token::Paren) | token::Lt => 1, + token::CloseDelim(token::Paren) | token::Gt => -1, + token::BinOp(token::Shr) => -2, + _ => 0, + }; + + if prev.tok.is_ident() && bracket_count == 0 { + prev_span = Some(prev.sp); + } + prev = next; + } + if result.is_none() && prev_span.is_some() { + return self.make_sub_span(span, prev_span); + } + return self.make_sub_span(span, result); + } + + // Return the span for the last ident before a `<` and outside any + // angle brackets, or the last span. + pub fn sub_span_for_type_name(&self, span: Span) -> Option { + let mut toks = self.retokenise_span(span); + let mut prev = toks.real_token(); + let mut result = None; + + // We keep track of the following two counts - the depth of nesting of + // angle brackets, and the depth of nesting of square brackets. For the + // angle bracket count, we only count tokens which occur outside of any + // square brackets (i.e. bracket_count == 0). The intutition here is + // that we want to count angle brackets in the type, but not any which + // could be in expression context (because these could mean 'less than', + // etc.). + let mut angle_count = 0; + let mut bracket_count = 0; + loop { + let next = toks.real_token(); + + if (next.tok == token::Lt || next.tok == token::Colon) && + angle_count == 0 && + bracket_count == 0 && + prev.tok.is_ident() { + result = Some(prev.sp); + } + + if bracket_count == 0 { + angle_count += match prev.tok { + token::Lt => 1, + token::Gt => -1, + token::BinOp(token::Shl) => 2, + token::BinOp(token::Shr) => -2, + _ => 0, + }; + } + + bracket_count += match prev.tok { + token::OpenDelim(token::Bracket) => 1, + token::CloseDelim(token::Bracket) => -1, + _ => 0, + }; + + if next.tok == token::Eof { + break; + } + prev = next; + } + if angle_count != 0 || bracket_count != 0 { + let loc = self.sess.codemap().lookup_char_pos(span.lo); + span_bug!(span, + "Mis-counted brackets when breaking path? Parsing '{}' \ + in {}, line {}", + self.snippet(span), + loc.file.name, + loc.line); + } + if result.is_none() && prev.tok.is_ident() && angle_count == 0 { + return self.make_sub_span(span, Some(prev.sp)); + } + self.make_sub_span(span, result) + } + + // Reparse span and return an owned vector of sub spans of the first limit + // identifier tokens in the given nesting level. + // example with Foo, Bar> + // Nesting = 0: all idents outside of angle brackets: [Foo] + // Nesting = 1: idents within one level of angle brackets: [Bar, Bar] + pub fn spans_with_brackets(&self, span: Span, nesting: isize, limit: isize) -> Vec { + let mut result: Vec = vec![]; + + let mut toks = self.retokenise_span(span); + // We keep track of how many brackets we're nested in + let mut angle_count: isize = 0; + let mut bracket_count: isize = 0; + let mut found_ufcs_sep = false; + loop { + let ts = toks.real_token(); + if ts.tok == token::Eof { + if angle_count != 0 || bracket_count != 0 { + if generated_code(span) { + return vec![]; + } + let loc = self.sess.codemap().lookup_char_pos(span.lo); + span_bug!(span, + "Mis-counted brackets when breaking path? \ + Parsing '{}' in {}, line {}", + self.snippet(span), + loc.file.name, + loc.line); + } + return result + } + if (result.len() as isize) == limit { + return result; + } + bracket_count += match ts.tok { + token::OpenDelim(token::Bracket) => 1, + token::CloseDelim(token::Bracket) => -1, + _ => 0, + }; + if bracket_count > 0 { + continue; + } + angle_count += match ts.tok { + token::Lt => 1, + token::Gt => -1, + token::BinOp(token::Shl) => 2, + token::BinOp(token::Shr) => -2, + _ => 0, + }; + + // Ignore the `>::` in `::AssocTy`. + + // The root cause of this hack is that the AST representation of + // qpaths is horrible. It treats
::C as a path with two + // segments, B and C and notes that there is also a self type A at + // position 0. Because we don't have spans for individual idents, + // only the whole path, we have to iterate over the tokens in the + // path, trying to pull out the non-nested idents (e.g., avoiding 'a + // in `>::C`). So we end up with a span for `B>::C` from + // the start of the first ident to the end of the path. + if !found_ufcs_sep && angle_count == -1 { + found_ufcs_sep = true; + angle_count += 1; + } + if ts.tok.is_ident() && angle_count == nesting { + result.push(self.make_sub_span(span, Some(ts.sp)).unwrap()); + } + } + } + + pub fn sub_span_before_token(&self, span: Span, tok: Token) -> Option { + let mut toks = self.retokenise_span(span); + let mut prev = toks.real_token(); + loop { + if prev.tok == token::Eof { + return None; + } + let next = toks.real_token(); + if next.tok == tok { + return self.make_sub_span(span, Some(prev.sp)); + } + prev = next; + } + } + + pub fn sub_span_of_token(&self, span: Span, tok: Token) -> Option { + let mut toks = self.retokenise_span(span); + loop { + let next = toks.real_token(); + if next.tok == token::Eof { + return None; + } + if next.tok == tok { + return self.make_sub_span(span, Some(next.sp)); + } + } + } + + pub fn sub_span_after_keyword(&self, span: Span, keyword: keywords::Keyword) -> Option { + self.sub_span_after(span, |t| t.is_keyword(keyword)) + } + + pub fn sub_span_after_token(&self, span: Span, tok: Token) -> Option { + self.sub_span_after(span, |t| t == tok) + } + + fn sub_span_after bool>(&self, span: Span, f: F) -> Option { + let mut toks = self.retokenise_span(span); + loop { + let ts = toks.real_token(); + if ts.tok == token::Eof { + return None; + } + if f(ts.tok) { + let ts = toks.real_token(); + if ts.tok == token::Eof { + return None + } else { + return self.make_sub_span(span, Some(ts.sp)); + } + } + } + } + + + // Returns a list of the spans of idents in a path. + // E.g., For foo::bar::baz, we return [foo, bar, baz] (well, their spans) + pub fn spans_for_path_segments(&self, path: &ast::Path) -> Vec { + self.spans_with_brackets(path.span, 0, -1) + } + + // Return an owned vector of the subspans of the param identifier + // tokens found in span. + pub fn spans_for_ty_params(&self, span: Span, number: isize) -> Vec { + // Type params are nested within one level of brackets: + // i.e. we want Vec from Foo> + self.spans_with_brackets(span, 1, number) + } + + pub fn report_span_err(&self, kind: &str, span: Span) { + let loc = self.sess.codemap().lookup_char_pos(span.lo); + info!("({}) Could not find sub_span in `{}` in {}, line {}", + kind, + self.snippet(span), + loc.file.name, + loc.line); + self.err_count.set(self.err_count.get() + 1); + if self.err_count.get() > 1000 { + bug!("span errors reached 1000, giving up"); + } + } + + // Return the name for a macro definition (identifier after first `!`) + pub fn span_for_macro_def_name(&self, span: Span) -> Option { + let mut toks = self.retokenise_span(span); + loop { + let ts = toks.real_token(); + if ts.tok == token::Eof { + return None; + } + if ts.tok == token::Not { + let ts = toks.real_token(); + if ts.tok.is_ident() { + return self.make_sub_span(span, Some(ts.sp)); + } else { + return None; + } + } + } + } + + // Return the name for a macro use (identifier before first `!`). + pub fn span_for_macro_use_name(&self, span:Span) -> Option { + let mut toks = self.retokenise_span(span); + let mut prev = toks.real_token(); + loop { + if prev.tok == token::Eof { + return None; + } + let ts = toks.real_token(); + if ts.tok == token::Not { + if prev.tok.is_ident() { + return self.make_sub_span(span, Some(prev.sp)); + } else { + return None; + } + } + prev = ts; + } + } + + /// Return true if the span is generated code, and + /// it is not a subspan of the root callsite. + /// + /// Used to filter out spans of minimal value, + /// such as references to macro internal variables. + pub fn filter_generated(&self, sub_span: Option, parent: Span) -> bool { + if !generated_code(parent) { + if sub_span.is_none() { + // Edge case - this occurs on generated code with incorrect expansion info. + return true; + } + return false; + } + // If sub_span is none, filter out generated code. + if sub_span.is_none() { + return true; + } + + //If the span comes from a fake filemap, filter it. + if !self.sess.codemap().lookup_char_pos(parent.lo).file.is_real_file() { + return true; + } + + // Otherwise, a generated span is deemed invalid if it is not a sub-span of the root + // callsite. This filters out macro internal variables and most malformed spans. + let span = self.sess.codemap().source_callsite(parent); + !(span.contains(parent)) + } +} + +macro_rules! filter { + ($util: expr, $span: ident, $parent: expr, None) => { + if $util.filter_generated($span, $parent) { + return None; + } + }; + ($util: expr, $span: ident, $parent: expr) => { + if $util.filter_generated($span, $parent) { + return; + } + }; +} diff --git a/src/librustc_trans/Cargo.toml b/src/librustc_trans/Cargo.toml new file mode 100644 index 0000000000000..796a80d080944 --- /dev/null +++ b/src/librustc_trans/Cargo.toml @@ -0,0 +1,29 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_trans" +version = "0.0.0" + +[lib] +name = "rustc_trans" +path = "lib.rs" +crate-type = ["dylib"] +test = false + +[dependencies] +arena = { path = "../libarena" } +flate = { path = "../libflate" } +graphviz = { path = "../libgraphviz" } +log = { path = "../liblog" } +rustc = { path = "../librustc" } +rustc_back = { path = "../librustc_back" } +rustc_bitflags = { path = "../librustc_bitflags" } +rustc_const_eval = { path = "../librustc_const_eval" } +rustc_const_math = { path = "../librustc_const_math" } +rustc_data_structures = { path = "../librustc_data_structures" } +rustc_errors = { path = "../librustc_errors" } +rustc_incremental = { path = "../librustc_incremental" } +rustc_llvm = { path = "../librustc_llvm" } +rustc_platform_intrinsics = { path = "../librustc_platform_intrinsics" } +serialize = { path = "../libserialize" } +syntax = { path = "../libsyntax" } +syntax_pos = { path = "../libsyntax_pos" } \ No newline at end of file diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs new file mode 100644 index 0000000000000..07f53466b4975 --- /dev/null +++ b/src/librustc_trans/abi.rs @@ -0,0 +1,760 @@ +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use llvm::{self, ValueRef, Integer, Pointer, Float, Double, Struct, Array, Vector, AttributePlace}; +use base; +use build::AllocaFcx; +use common::{type_is_fat_ptr, BlockAndBuilder, C_uint}; +use context::CrateContext; +use cabi_x86; +use cabi_x86_64; +use cabi_x86_win64; +use cabi_arm; +use cabi_aarch64; +use cabi_powerpc; +use cabi_powerpc64; +use cabi_s390x; +use cabi_mips; +use cabi_mips64; +use cabi_asmjs; +use cabi_msp430; +use machine::{llalign_of_min, llsize_of, llsize_of_alloc}; +use type_::Type; +use type_of; + +use rustc::hir; +use rustc::ty::{self, Ty}; + +use libc::c_uint; +use std::cmp; + +pub use syntax::abi::Abi; +pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; +use rustc::ty::layout::Layout; + +#[derive(Clone, Copy, PartialEq, Debug)] +enum ArgKind { + /// Pass the argument directly using the normal converted + /// LLVM type or by coercing to another specified type + Direct, + /// Pass the argument indirectly via a hidden pointer + Indirect, + /// Ignore the argument (useful for empty struct) + Ignore, +} + +// Hack to disable non_upper_case_globals only for the bitflags! and not for the rest +// of this module +pub use self::attr_impl::ArgAttribute; + +#[allow(non_upper_case_globals)] +mod attr_impl { + // The subset of llvm::Attribute needed for arguments, packed into a bitfield. + bitflags! { + #[derive(Default, Debug)] + flags ArgAttribute : u8 { + const ByVal = 1 << 0, + const NoAlias = 1 << 1, + const NoCapture = 1 << 2, + const NonNull = 1 << 3, + const ReadOnly = 1 << 4, + const SExt = 1 << 5, + const StructRet = 1 << 6, + const ZExt = 1 << 7, + } + } +} + +macro_rules! for_each_kind { + ($flags: ident, $f: ident, $($kind: ident),+) => ({ + $(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+ + }) +} + +impl ArgAttribute { + fn for_each_kind(&self, mut f: F) where F: FnMut(llvm::Attribute) { + for_each_kind!(self, f, + ByVal, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt) + } +} + +/// A compact representation of LLVM attributes (at least those relevant for this module) +/// that can be manipulated without interacting with LLVM's Attribute machinery. +#[derive(Copy, Clone, Debug, Default)] +pub struct ArgAttributes { + regular: ArgAttribute, + dereferenceable_bytes: u64, +} + +impl ArgAttributes { + pub fn set(&mut self, attr: ArgAttribute) -> &mut Self { + self.regular = self.regular | attr; + self + } + + pub fn unset(&mut self, attr: ArgAttribute) -> &mut Self { + self.regular = self.regular - attr; + self + } + + pub fn set_dereferenceable(&mut self, bytes: u64) -> &mut Self { + self.dereferenceable_bytes = bytes; + self + } + + pub fn unset_dereferenceable(&mut self) -> &mut Self { + self.dereferenceable_bytes = 0; + self + } + + pub fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) { + unsafe { + self.regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn)); + if self.dereferenceable_bytes != 0 { + llvm::LLVMRustAddDereferenceableAttr(llfn, + idx.as_uint(), + self.dereferenceable_bytes); + } + } + } + + pub fn apply_callsite(&self, idx: AttributePlace, callsite: ValueRef) { + unsafe { + self.regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite)); + if self.dereferenceable_bytes != 0 { + llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite, + idx.as_uint(), + self.dereferenceable_bytes); + } + } + } +} + +/// Information about how a specific C type +/// should be passed to or returned from a function +/// +/// This is borrowed from clang's ABIInfo.h +#[derive(Clone, Copy, Debug)] +pub struct ArgType { + kind: ArgKind, + /// Original LLVM type + pub original_ty: Type, + /// Sizing LLVM type (pointers are opaque). + /// Unlike original_ty, this is guaranteed to be complete. + /// + /// For example, while we're computing the function pointer type in + /// `struct Foo(fn(Foo));`, `original_ty` is still LLVM's `%Foo = {}`. + /// The field type will likely end up being `void(%Foo)*`, but we cannot + /// use `%Foo` to compute properties (e.g. size and alignment) of `Foo`, + /// until `%Foo` is completed by having all of its field types inserted, + /// so `ty` holds the "sizing type" of `Foo`, which replaces all pointers + /// with opaque ones, resulting in `{i8*}` for `Foo`. + /// ABI-specific logic can then look at the size, alignment and fields of + /// `{i8*}` in order to determine how the argument will be passed. + /// Only later will `original_ty` aka `%Foo` be used in the LLVM function + /// pointer type, without ever having introspected it. + pub ty: Type, + /// Signedness for integer types, None for other types + pub signedness: Option, + /// Coerced LLVM Type + pub cast: Option, + /// Dummy argument, which is emitted before the real argument + pub pad: Option, + /// LLVM attributes of argument + pub attrs: ArgAttributes +} + +impl ArgType { + fn new(original_ty: Type, ty: Type) -> ArgType { + ArgType { + kind: ArgKind::Direct, + original_ty: original_ty, + ty: ty, + signedness: None, + cast: None, + pad: None, + attrs: ArgAttributes::default() + } + } + + pub fn make_indirect(&mut self, ccx: &CrateContext) { + assert_eq!(self.kind, ArgKind::Direct); + + // Wipe old attributes, likely not valid through indirection. + self.attrs = ArgAttributes::default(); + + let llarg_sz = llsize_of_alloc(ccx, self.ty); + + // For non-immediate arguments the callee gets its own copy of + // the value on the stack, so there are no aliases. It's also + // program-invisible so can't possibly capture + self.attrs.set(ArgAttribute::NoAlias) + .set(ArgAttribute::NoCapture) + .set_dereferenceable(llarg_sz); + + self.kind = ArgKind::Indirect; + } + + pub fn ignore(&mut self) { + assert_eq!(self.kind, ArgKind::Direct); + self.kind = ArgKind::Ignore; + } + + pub fn extend_integer_width_to(&mut self, bits: u64) { + // Only integers have signedness + if let Some(signed) = self.signedness { + if self.ty.int_width() < bits { + self.attrs.set(if signed { + ArgAttribute::SExt + } else { + ArgAttribute::ZExt + }); + } + } + } + + pub fn is_indirect(&self) -> bool { + self.kind == ArgKind::Indirect + } + + pub fn is_ignore(&self) -> bool { + self.kind == ArgKind::Ignore + } + + /// Get the LLVM type for an lvalue of the original Rust type of + /// this argument/return, i.e. the result of `type_of::type_of`. + pub fn memory_ty(&self, ccx: &CrateContext) -> Type { + if self.original_ty == Type::i1(ccx) { + Type::i8(ccx) + } else { + self.original_ty + } + } + + /// Store a direct/indirect value described by this ArgType into a + /// lvalue for the original Rust type of this argument/return. + /// Can be used for both storing formal arguments into Rust variables + /// or results of call/invoke instructions into their destinations. + pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) { + if self.is_ignore() { + return; + } + let ccx = bcx.ccx(); + if self.is_indirect() { + let llsz = llsize_of(ccx, self.ty); + let llalign = llalign_of_min(ccx, self.ty); + base::call_memcpy(bcx, dst, val, llsz, llalign as u32); + } else if let Some(ty) = self.cast { + // FIXME(eddyb): Figure out when the simpler Store is safe, clang + // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. + let can_store_through_cast_ptr = false; + if can_store_through_cast_ptr { + let cast_dst = bcx.pointercast(dst, ty.ptr_to()); + let store = bcx.store(val, cast_dst); + let llalign = llalign_of_min(ccx, self.ty); + unsafe { + llvm::LLVMSetAlignment(store, llalign); + } + } else { + // The actual return type is a struct, but the ABI + // adaptation code has cast it into some scalar type. The + // code that follows is the only reliable way I have + // found to do a transform like i64 -> {i32,i32}. + // Basically we dump the data onto the stack then memcpy it. + // + // Other approaches I tried: + // - Casting rust ret pointer to the foreign type and using Store + // is (a) unsafe if size of foreign type > size of rust type and + // (b) runs afoul of strict aliasing rules, yielding invalid + // assembly under -O (specifically, the store gets removed). + // - Truncating foreign type to correct integral type and then + // bitcasting to the struct type yields invalid cast errors. + + // We instead thus allocate some scratch space... + let llscratch = AllocaFcx(bcx.fcx(), ty, "abi_cast"); + base::Lifetime::Start.call(bcx, llscratch); + + // ...where we first store the value... + bcx.store(val, llscratch); + + // ...and then memcpy it to the intended destination. + base::call_memcpy(bcx, + bcx.pointercast(dst, Type::i8p(ccx)), + bcx.pointercast(llscratch, Type::i8p(ccx)), + C_uint(ccx, llsize_of_alloc(ccx, self.ty)), + cmp::min(llalign_of_min(ccx, self.ty), + llalign_of_min(ccx, ty)) as u32); + + base::Lifetime::End.call(bcx, llscratch); + } + } else { + if self.original_ty == Type::i1(ccx) { + val = bcx.zext(val, Type::i8(ccx)); + } + bcx.store(val, dst); + } + } + + pub fn store_fn_arg(&self, bcx: &BlockAndBuilder, idx: &mut usize, dst: ValueRef) { + if self.pad.is_some() { + *idx += 1; + } + if self.is_ignore() { + return; + } + let val = llvm::get_param(bcx.fcx().llfn, *idx as c_uint); + *idx += 1; + self.store(bcx, val, dst); + } +} + +/// Metadata describing how the arguments to a native function +/// should be passed in order to respect the native ABI. +/// +/// I will do my best to describe this structure, but these +/// comments are reverse-engineered and may be inaccurate. -NDM +#[derive(Clone)] +pub struct FnType { + /// The LLVM types of each argument. + pub args: Vec, + + /// LLVM return type. + pub ret: ArgType, + + pub variadic: bool, + + pub cconv: llvm::CallConv +} + +impl FnType { + pub fn new<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + abi: Abi, + sig: &ty::FnSig<'tcx>, + extra_args: &[Ty<'tcx>]) -> FnType { + let mut fn_ty = FnType::unadjusted(ccx, abi, sig, extra_args); + fn_ty.adjust_for_abi(ccx, abi, sig); + fn_ty + } + + pub fn unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + abi: Abi, + sig: &ty::FnSig<'tcx>, + extra_args: &[Ty<'tcx>]) -> FnType { + use self::Abi::*; + let cconv = match ccx.sess().target.target.adjust_abi(abi) { + RustIntrinsic | PlatformIntrinsic | + Rust | RustCall => llvm::CCallConv, + + // It's the ABI's job to select this, not us. + System => bug!("system abi should be selected elsewhere"), + + Stdcall => llvm::X86StdcallCallConv, + Fastcall => llvm::X86FastcallCallConv, + Vectorcall => llvm::X86_VectorCall, + C => llvm::CCallConv, + Win64 => llvm::X86_64_Win64, + SysV64 => llvm::X86_64_SysV, + Aapcs => llvm::ArmAapcsCallConv, + + // These API constants ought to be more specific... + Cdecl => llvm::CCallConv, + }; + + let mut inputs = &sig.inputs[..]; + let extra_args = if abi == RustCall { + assert!(!sig.variadic && extra_args.is_empty()); + + match inputs[inputs.len() - 1].sty { + ty::TyTuple(ref tupled_arguments) => { + inputs = &inputs[..inputs.len() - 1]; + &tupled_arguments[..] + } + _ => { + bug!("argument to function with \"rust-call\" ABI \ + is not a tuple"); + } + } + } else { + assert!(sig.variadic || extra_args.is_empty()); + extra_args + }; + + let target = &ccx.sess().target.target; + let win_x64_gnu = target.target_os == "windows" + && target.arch == "x86_64" + && target.target_env == "gnu"; + let linux_s390x = target.target_os == "linux" + && target.arch == "s390x" + && target.target_env == "gnu"; + let rust_abi = match abi { + RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true, + _ => false + }; + + let arg_of = |ty: Ty<'tcx>, is_return: bool| { + if ty.is_bool() { + let llty = Type::i1(ccx); + let mut arg = ArgType::new(llty, llty); + arg.attrs.set(ArgAttribute::ZExt); + arg + } else { + let mut arg = ArgType::new(type_of::type_of(ccx, ty), + type_of::sizing_type_of(ccx, ty)); + if ty.is_integral() { + arg.signedness = Some(ty.is_signed()); + } + // Rust enum types that map onto C enums also need to follow + // the target ABI zero-/sign-extension rules. + if let Layout::CEnum { signed, .. } = *ccx.layout_of(ty) { + arg.signedness = Some(signed); + } + if llsize_of_alloc(ccx, arg.ty) == 0 { + // For some forsaken reason, x86_64-pc-windows-gnu + // doesn't ignore zero-sized struct arguments. + // The same is true for s390x-unknown-linux-gnu. + if is_return || rust_abi || + (!win_x64_gnu && !linux_s390x) { + arg.ignore(); + } + } + arg + } + }; + + let ret_ty = sig.output; + let mut ret = arg_of(ret_ty, true); + + if !type_is_fat_ptr(ccx.tcx(), ret_ty) { + // The `noalias` attribute on the return value is useful to a + // function ptr caller. + if let ty::TyBox(_) = ret_ty.sty { + // `Box` pointer return values never alias because ownership + // is transferred + ret.attrs.set(ArgAttribute::NoAlias); + } + + // We can also mark the return value as `dereferenceable` in certain cases + match ret_ty.sty { + // These are not really pointers but pairs, (pointer, len) + ty::TyRef(_, ty::TypeAndMut { ty, .. }) | + ty::TyBox(ty) => { + let llty = type_of::sizing_type_of(ccx, ty); + let llsz = llsize_of_alloc(ccx, llty); + ret.attrs.set_dereferenceable(llsz); + } + _ => {} + } + } + + let mut args = Vec::with_capacity(inputs.len() + extra_args.len()); + + // Handle safe Rust thin and fat pointers. + let rust_ptr_attrs = |ty: Ty<'tcx>, arg: &mut ArgType| match ty.sty { + // `Box` pointer parameters never alias because ownership is transferred + ty::TyBox(inner) => { + arg.attrs.set(ArgAttribute::NoAlias); + Some(inner) + } + + ty::TyRef(b, mt) => { + use rustc::ty::{BrAnon, ReLateBound}; + + // `&mut` pointer parameters never alias other parameters, or mutable global data + // + // `&T` where `T` contains no `UnsafeCell` is immutable, and can be marked as + // both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely + // on memory dependencies rather than pointer equality + let interior_unsafe = mt.ty.type_contents(ccx.tcx()).interior_unsafe(); + + if mt.mutbl != hir::MutMutable && !interior_unsafe { + arg.attrs.set(ArgAttribute::NoAlias); + } + + if mt.mutbl == hir::MutImmutable && !interior_unsafe { + arg.attrs.set(ArgAttribute::ReadOnly); + } + + // When a reference in an argument has no named lifetime, it's + // impossible for that reference to escape this function + // (returned or stored beyond the call by a closure). + if let ReLateBound(_, BrAnon(_)) = *b { + arg.attrs.set(ArgAttribute::NoCapture); + } + + Some(mt.ty) + } + _ => None + }; + + for ty in inputs.iter().chain(extra_args.iter()) { + let mut arg = arg_of(ty, false); + + if type_is_fat_ptr(ccx.tcx(), ty) { + let original_tys = arg.original_ty.field_types(); + let sizing_tys = arg.ty.field_types(); + assert_eq!((original_tys.len(), sizing_tys.len()), (2, 2)); + + let mut data = ArgType::new(original_tys[0], sizing_tys[0]); + let mut info = ArgType::new(original_tys[1], sizing_tys[1]); + + if let Some(inner) = rust_ptr_attrs(ty, &mut data) { + data.attrs.set(ArgAttribute::NonNull); + if ccx.tcx().struct_tail(inner).is_trait() { + info.attrs.set(ArgAttribute::NonNull); + } + } + args.push(data); + args.push(info); + } else { + if let Some(inner) = rust_ptr_attrs(ty, &mut arg) { + let llty = type_of::sizing_type_of(ccx, inner); + let llsz = llsize_of_alloc(ccx, llty); + arg.attrs.set_dereferenceable(llsz); + } + args.push(arg); + } + } + + FnType { + args: args, + ret: ret, + variadic: sig.variadic, + cconv: cconv + } + } + + pub fn adjust_for_abi<'a, 'tcx>(&mut self, + ccx: &CrateContext<'a, 'tcx>, + abi: Abi, + sig: &ty::FnSig<'tcx>) { + if abi == Abi::Rust || abi == Abi::RustCall || + abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic { + let fixup = |arg: &mut ArgType| { + let mut llty = arg.ty; + + // Replace newtypes with their inner-most type. + while llty.kind() == llvm::TypeKind::Struct { + let inner = llty.field_types(); + if inner.len() != 1 { + break; + } + llty = inner[0]; + } + + if !llty.is_aggregate() { + // Scalars and vectors, always immediate. + if llty != arg.ty { + // Needs a cast as we've unpacked a newtype. + arg.cast = Some(llty); + } + return; + } + + let size = llsize_of_alloc(ccx, llty); + if size > llsize_of_alloc(ccx, ccx.int_type()) { + arg.make_indirect(ccx); + } else if size > 0 { + // We want to pass small aggregates as immediates, but using + // a LLVM aggregate type for this leads to bad optimizations, + // so we pick an appropriately sized integer type instead. + arg.cast = Some(Type::ix(ccx, size * 8)); + } + }; + // Fat pointers are returned by-value. + if !self.ret.is_ignore() { + if !type_is_fat_ptr(ccx.tcx(), sig.output) { + fixup(&mut self.ret); + } + } + for arg in &mut self.args { + if arg.is_ignore() { continue; } + fixup(arg); + } + if self.ret.is_indirect() { + self.ret.attrs.set(ArgAttribute::StructRet); + } + return; + } + + match &ccx.sess().target.target.arch[..] { + "x86" => cabi_x86::compute_abi_info(ccx, self), + "x86_64" => if abi == Abi::SysV64 { + cabi_x86_64::compute_abi_info(ccx, self); + } else if abi == Abi::Win64 || ccx.sess().target.target.options.is_like_windows { + cabi_x86_win64::compute_abi_info(ccx, self); + } else { + cabi_x86_64::compute_abi_info(ccx, self); + }, + "aarch64" => cabi_aarch64::compute_abi_info(ccx, self), + "arm" => { + let flavor = if ccx.sess().target.target.target_os == "ios" { + cabi_arm::Flavor::Ios + } else { + cabi_arm::Flavor::General + }; + cabi_arm::compute_abi_info(ccx, self, flavor); + }, + "mips" => cabi_mips::compute_abi_info(ccx, self), + "mips64" => cabi_mips64::compute_abi_info(ccx, self), + "powerpc" => cabi_powerpc::compute_abi_info(ccx, self), + "powerpc64" => cabi_powerpc64::compute_abi_info(ccx, self), + "s390x" => cabi_s390x::compute_abi_info(ccx, self), + "asmjs" => cabi_asmjs::compute_abi_info(ccx, self), + "wasm32" => cabi_asmjs::compute_abi_info(ccx, self), + "msp430" => cabi_msp430::compute_abi_info(ccx, self), + a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a)) + } + + if self.ret.is_indirect() { + self.ret.attrs.set(ArgAttribute::StructRet); + } + } + + pub fn llvm_type(&self, ccx: &CrateContext) -> Type { + let mut llargument_tys = Vec::new(); + + let llreturn_ty = if self.ret.is_ignore() { + Type::void(ccx) + } else if self.ret.is_indirect() { + llargument_tys.push(self.ret.original_ty.ptr_to()); + Type::void(ccx) + } else { + self.ret.cast.unwrap_or(self.ret.original_ty) + }; + + for arg in &self.args { + if arg.is_ignore() { + continue; + } + // add padding + if let Some(ty) = arg.pad { + llargument_tys.push(ty); + } + + let llarg_ty = if arg.is_indirect() { + arg.original_ty.ptr_to() + } else { + arg.cast.unwrap_or(arg.original_ty) + }; + + llargument_tys.push(llarg_ty); + } + + if self.variadic { + Type::variadic_func(&llargument_tys, &llreturn_ty) + } else { + Type::func(&llargument_tys, &llreturn_ty) + } + } + + pub fn apply_attrs_llfn(&self, llfn: ValueRef) { + let mut i = if self.ret.is_indirect() { 1 } else { 0 }; + if !self.ret.is_ignore() { + self.ret.attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn); + } + i += 1; + for arg in &self.args { + if !arg.is_ignore() { + if arg.pad.is_some() { i += 1; } + arg.attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn); + i += 1; + } + } + } + + pub fn apply_attrs_callsite(&self, callsite: ValueRef) { + let mut i = if self.ret.is_indirect() { 1 } else { 0 }; + if !self.ret.is_ignore() { + self.ret.attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); + } + i += 1; + for arg in &self.args { + if !arg.is_ignore() { + if arg.pad.is_some() { i += 1; } + arg.attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); + i += 1; + } + } + + if self.cconv != llvm::CCallConv { + llvm::SetInstructionCallConv(callsite, self.cconv); + } + } +} + +pub fn align_up_to(off: usize, a: usize) -> usize { + return (off + a - 1) / a * a; +} + +fn align(off: usize, ty: Type, pointer: usize) -> usize { + let a = ty_align(ty, pointer); + return align_up_to(off, a); +} + +pub fn ty_align(ty: Type, pointer: usize) -> usize { + match ty.kind() { + Integer => ((ty.int_width() as usize) + 7) / 8, + Pointer => pointer, + Float => 4, + Double => 8, + Struct => { + if ty.is_packed() { + 1 + } else { + let str_tys = ty.field_types(); + str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t, pointer))) + } + } + Array => { + let elt = ty.element_type(); + ty_align(elt, pointer) + } + Vector => { + let len = ty.vector_length(); + let elt = ty.element_type(); + ty_align(elt, pointer) * len + } + _ => bug!("ty_align: unhandled type") + } +} + +pub fn ty_size(ty: Type, pointer: usize) -> usize { + match ty.kind() { + Integer => ((ty.int_width() as usize) + 7) / 8, + Pointer => pointer, + Float => 4, + Double => 8, + Struct => { + if ty.is_packed() { + let str_tys = ty.field_types(); + str_tys.iter().fold(0, |s, t| s + ty_size(*t, pointer)) + } else { + let str_tys = ty.field_types(); + let size = str_tys.iter().fold(0, |s, t| { + align(s, *t, pointer) + ty_size(*t, pointer) + }); + align(size, ty, pointer) + } + } + Array => { + let len = ty.array_length(); + let elt = ty.element_type(); + let eltsz = ty_size(elt, pointer); + len * eltsz + } + Vector => { + let len = ty.vector_length(); + let elt = ty.element_type(); + let eltsz = ty_size(elt, pointer); + len * eltsz + }, + _ => bug!("ty_size: unhandled type") + } +} diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs new file mode 100644 index 0000000000000..8ee362bae3551 --- /dev/null +++ b/src/librustc_trans/adt.rs @@ -0,0 +1,842 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! # Representation of Algebraic Data Types +//! +//! This module determines how to represent enums, structs, and tuples +//! based on their monomorphized types; it is responsible both for +//! choosing a representation and translating basic operations on +//! values of those types. (Note: exporting the representations for +//! debuggers is handled in debuginfo.rs, not here.) +//! +//! Note that the interface treats everything as a general case of an +//! enum, so structs/tuples/etc. have one pseudo-variant with +//! discriminant 0; i.e., as if they were a univariant enum. +//! +//! Having everything in one place will enable improvements to data +//! structure representation; possibilities include: +//! +//! - User-specified alignment (e.g., cacheline-aligning parts of +//! concurrently accessed data structures); LLVM can't represent this +//! directly, so we'd have to insert padding fields in any structure +//! that might contain one and adjust GEP indices accordingly. See +//! issue #4578. +//! +//! - Store nested enums' discriminants in the same word. Rather, if +//! some variants start with enums, and those enums representations +//! have unused alignment padding between discriminant and body, the +//! outer enum's discriminant can be stored there and those variants +//! can start at offset 0. Kind of fancy, and might need work to +//! make copies of the inner enum type cooperate, but it could help +//! with `Option` or `Result` wrapped around another enum. +//! +//! - Tagged pointers would be neat, but given that any type can be +//! used unboxed and any field can have pointers (including mutable) +//! taken to it, implementing them for Rust seems difficult. + +use super::Disr; + +use std; + +use llvm::{ValueRef, True, IntEQ, IntNE}; +use rustc::ty::layout; +use rustc::ty::{self, Ty, AdtKind}; +use build::*; +use common::*; +use debuginfo::DebugLoc; +use glue; +use base; +use machine; +use monomorphize; +use type_::Type; +use type_of; +use value::Value; + +#[derive(Copy, Clone, PartialEq)] +pub enum BranchKind { + Switch, + Single +} + +#[derive(Copy, Clone)] +pub struct MaybeSizedValue { + pub value: ValueRef, + pub meta: ValueRef, +} + +impl MaybeSizedValue { + pub fn sized(value: ValueRef) -> MaybeSizedValue { + MaybeSizedValue { + value: value, + meta: std::ptr::null_mut() + } + } + + pub fn unsized_(value: ValueRef, meta: ValueRef) -> MaybeSizedValue { + MaybeSizedValue { + value: value, + meta: meta + } + } + + pub fn has_meta(&self) -> bool { + !self.meta.is_null() + } +} + +/// Given an enum, struct, closure, or tuple, extracts fields. +/// Treats closures as a struct with one variant. +/// `empty_if_no_variants` is a switch to deal with empty enums. +/// If true, `variant_index` is disregarded and an empty Vec returned in this case. +fn compute_fields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, + variant_index: usize, + empty_if_no_variants: bool) -> Vec> { + match t.sty { + ty::TyAdt(ref def, _) if def.variants.len() == 0 && empty_if_no_variants => { + Vec::default() + }, + ty::TyAdt(ref def, ref substs) => { + def.variants[variant_index].fields.iter().map(|f| { + monomorphize::field_ty(cx.tcx(), substs, f) + }).collect::>() + }, + ty::TyTuple(fields) => fields.to_vec(), + ty::TyClosure(def_id, substs) => { + if variant_index > 0 { bug!("{} is a closure, which only has one variant", t);} + substs.upvar_tys(def_id, cx.tcx()).collect() + }, + _ => bug!("{} is not a type that can have fields.", t) + } +} + +/// LLVM-level types are a little complicated. +/// +/// C-like enums need to be actual ints, not wrapped in a struct, +/// because that changes the ABI on some platforms (see issue #10308). +/// +/// For nominal types, in some cases, we need to use LLVM named structs +/// and fill in the actual contents in a second pass to prevent +/// unbounded recursion; see also the comments in `trans::type_of`. +pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { + generic_type_of(cx, t, None, false, false) +} + + +// Pass dst=true if the type you are passing is a DST. Yes, we could figure +// this out, but if you call this on an unsized type without realising it, you +// are going to get the wrong type (it will not include the unsized parts of it). +pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + t: Ty<'tcx>, dst: bool) -> Type { + generic_type_of(cx, t, None, true, dst) +} + +pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + t: Ty<'tcx>, name: &str) -> Type { + generic_type_of(cx, t, Some(name), false, false) +} + +pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + t: Ty<'tcx>, llty: &mut Type) { + let l = cx.layout_of(t); + debug!("finish_type_of: {} with layout {:#?}", t, l); + match *l { + layout::CEnum { .. } | layout::General { .. } + | layout::UntaggedUnion { .. } | layout::RawNullablePointer { .. } => { } + layout::Univariant { ..} + | layout::StructWrappedNullablePointer { .. } => { + let (nonnull_variant, packed) = match *l { + layout::Univariant { ref variant, .. } => (0, variant.packed), + layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => + (nndiscr, nonnull.packed), + _ => unreachable!() + }; + let fields = compute_fields(cx, t, nonnull_variant as usize, true); + llty.set_struct_body(&struct_llfields(cx, &fields, false, false), + packed) + }, + _ => bug!("This function cannot handle {} with layout {:#?}", t, l) + } +} + +fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + t: Ty<'tcx>, + name: Option<&str>, + sizing: bool, + dst: bool) -> Type { + let l = cx.layout_of(t); + debug!("adt::generic_type_of t: {:?} name: {:?} sizing: {} dst: {}", + t, name, sizing, dst); + match *l { + layout::CEnum { discr, .. } => Type::from_integer(cx, discr), + layout::RawNullablePointer { nndiscr, .. } => { + let (def, substs) = match t.sty { + ty::TyAdt(d, s) => (d, s), + _ => bug!("{} is not an ADT", t) + }; + let nnty = monomorphize::field_ty(cx.tcx(), substs, + &def.variants[nndiscr as usize].fields[0]); + type_of::sizing_type_of(cx, nnty) + } + layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { + let fields = compute_fields(cx, t, nndiscr as usize, false); + match name { + None => { + Type::struct_(cx, &struct_llfields(cx, &fields, sizing, dst), + nonnull.packed) + } + Some(name) => { + assert_eq!(sizing, false); + Type::named_struct(cx, name) + } + } + } + layout::Univariant { ref variant, .. } => { + // Note that this case also handles empty enums. + // Thus the true as the final parameter here. + let fields = compute_fields(cx, t, 0, true); + match name { + None => { + let fields = struct_llfields(cx, &fields, sizing, dst); + Type::struct_(cx, &fields, variant.packed) + } + Some(name) => { + // Hypothesis: named_struct's can never need a + // drop flag. (... needs validation.) + assert_eq!(sizing, false); + Type::named_struct(cx, name) + } + } + } + layout::Vector { element, count } => { + let elem_ty = Type::from_primitive(cx, element); + Type::vector(&elem_ty, count) + } + layout::UntaggedUnion { ref variants, .. }=> { + // Use alignment-sized ints to fill all the union storage. + let size = variants.stride().bytes(); + let align = variants.align.abi(); + let fill = union_fill(cx, size, align); + match name { + None => { + Type::struct_(cx, &[fill], variants.packed) + } + Some(name) => { + let mut llty = Type::named_struct(cx, name); + llty.set_struct_body(&[fill], variants.packed); + llty + } + } + } + layout::General { discr, size, align, .. } => { + // We need a representation that has: + // * The alignment of the most-aligned field + // * The size of the largest variant (rounded up to that alignment) + // * No alignment padding anywhere any variant has actual data + // (currently matters only for enums small enough to be immediate) + // * The discriminant in an obvious place. + // + // So we start with the discriminant, pad it up to the alignment with + // more of its own type, then use alignment-sized ints to get the rest + // of the size. + let size = size.bytes(); + let align = align.abi(); + assert!(align <= std::u32::MAX as u64); + let discr_ty = Type::from_integer(cx, discr); + let discr_size = discr.size().bytes(); + let padded_discr_size = roundup(discr_size, align as u32); + let variant_part_size = size-padded_discr_size; + let variant_fill = union_fill(cx, variant_part_size, align); + + assert_eq!(machine::llalign_of_min(cx, variant_fill), align as u32); + assert_eq!(padded_discr_size % discr_size, 0); // Ensure discr_ty can fill pad evenly + let fields: Vec = + [discr_ty, + Type::array(&discr_ty, (padded_discr_size - discr_size)/discr_size), + variant_fill].iter().cloned().collect(); + match name { + None => { + Type::struct_(cx, &fields[..], false) + } + Some(name) => { + let mut llty = Type::named_struct(cx, name); + llty.set_struct_body(&fields[..], false); + llty + } + } + } + _ => bug!("Unsupported type {} represented as {:#?}", t, l) + } +} + +fn union_fill(cx: &CrateContext, size: u64, align: u64) -> Type { + assert_eq!(size%align, 0); + assert_eq!(align.count_ones(), 1, "Alignment must be a power fof 2. Got {}", align); + let align_units = size/align; + let dl = &cx.tcx().data_layout; + let layout_align = layout::Align::from_bytes(align, align).unwrap(); + if let Some(ity) = layout::Integer::for_abi_align(dl, layout_align) { + Type::array(&Type::from_integer(cx, ity), align_units) + } else { + Type::array(&Type::vector(&Type::i32(cx), align/4), + align_units) + } +} + + +fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec>, + sizing: bool, dst: bool) -> Vec { + if sizing { + fields.iter().filter(|&ty| !dst || type_is_sized(cx.tcx(), *ty)) + .map(|&ty| type_of::sizing_type_of(cx, ty)).collect() + } else { + fields.iter().map(|&ty| type_of::in_memory_type_of(cx, ty)).collect() + } +} + +/// Obtain a representation of the discriminant sufficient to translate +/// destructuring; this may or may not involve the actual discriminant. +pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + t: Ty<'tcx>, + scrutinee: ValueRef, + range_assert: bool) + -> (BranchKind, Option) { + let l = bcx.ccx().layout_of(t); + match *l { + layout::CEnum { .. } | layout::General { .. } | + layout::RawNullablePointer { .. } | layout::StructWrappedNullablePointer { .. } => { + (BranchKind::Switch, Some(trans_get_discr(bcx, t, scrutinee, None, range_assert))) + } + layout::Univariant { .. } | layout::UntaggedUnion { .. } => { + // N.B.: Univariant means <= 1 enum variants (*not* == 1 variants). + (BranchKind::Single, None) + }, + _ => bug!("{} is not an enum.", t) + } +} + +pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool { + match *l { + layout::CEnum { signed, .. }=> signed, + _ => false, + } +} + +/// Obtain the actual discriminant of a value. +pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, + scrutinee: ValueRef, cast_to: Option, + range_assert: bool) + -> ValueRef { + let (def, substs) = match t.sty { + ty::TyAdt(ref def, substs) if def.adt_kind() == AdtKind::Enum => (def, substs), + _ => bug!("{} is not an enum", t) + }; + + debug!("trans_get_discr t: {:?}", t); + let l = bcx.ccx().layout_of(t); + + let val = match *l { + layout::CEnum { discr, min, max, .. } => { + load_discr(bcx, discr, scrutinee, min, max, range_assert) + } + layout::General { discr, .. } => { + let ptr = StructGEP(bcx, scrutinee, 0); + load_discr(bcx, discr, ptr, 0, def.variants.len() as u64 - 1, + range_assert) + } + layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx(), 0), + layout::RawNullablePointer { nndiscr, .. } => { + let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; + let llptrty = type_of::sizing_type_of(bcx.ccx(), + monomorphize::field_ty(bcx.ccx().tcx(), substs, + &def.variants[nndiscr as usize].fields[0])); + ICmp(bcx, cmp, Load(bcx, scrutinee), C_null(llptrty), DebugLoc::None) + } + layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { + struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee) + }, + _ => bug!("{} is not an enum", t) + }; + match cast_to { + None => val, + Some(llty) => if is_discr_signed(&l) { SExt(bcx, val, llty) } else { ZExt(bcx, val, llty) } + } +} + +fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: u64, discrfield: &layout::FieldPath, + scrutinee: ValueRef) -> ValueRef { + let llptrptr = GEPi(bcx, scrutinee, + &discrfield.iter().map(|f| *f as usize).collect::>()[..]); + let llptr = Load(bcx, llptrptr); + let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; + ICmp(bcx, cmp, llptr, C_null(val_ty(llptr)), DebugLoc::None) +} + +/// Helper for cases where the discriminant is simply loaded. +fn load_discr(bcx: Block, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64, + range_assert: bool) + -> ValueRef { + let llty = Type::from_integer(bcx.ccx(), ity); + assert_eq!(val_ty(ptr), llty.ptr_to()); + let bits = ity.size().bits(); + assert!(bits <= 64); + let bits = bits as usize; + let mask = !0u64 >> (64 - bits); + // For a (max) discr of -1, max will be `-1 as usize`, which overflows. + // However, that is fine here (it would still represent the full range), + if max.wrapping_add(1) & mask == min & mask || !range_assert { + // i.e., if the range is everything. The lo==hi case would be + // rejected by the LLVM verifier (it would mean either an + // empty set, which is impossible, or the entire range of the + // type, which is pointless). + Load(bcx, ptr) + } else { + // llvm::ConstantRange can deal with ranges that wrap around, + // so an overflow on (max + 1) is fine. + LoadRangeAssert(bcx, ptr, min, max.wrapping_add(1), /* signed: */ True) + } +} + +/// Yield information about how to dispatch a case of the +/// discriminant-like value returned by `trans_switch`. +/// +/// This should ideally be less tightly tied to `_match`. +pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, value: Disr) + -> ValueRef { + let l = bcx.ccx().layout_of(t); + match *l { + layout::CEnum { discr, .. } + | layout::General { discr, .. }=> { + C_integral(Type::from_integer(bcx.ccx(), discr), value.0, true) + } + layout::RawNullablePointer { .. } | + layout::StructWrappedNullablePointer { .. } => { + assert!(value == Disr(0) || value == Disr(1)); + C_bool(bcx.ccx(), value != Disr(0)) + } + _ => { + bug!("{} does not have a discriminant. Represented as {:#?}", t, l); + } + } +} + +/// Set the discriminant for a new value of the given case of the given +/// representation. +pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, + val: ValueRef, to: Disr) { + let l = bcx.ccx().layout_of(t); + match *l { + layout::CEnum{ discr, min, max, .. } => { + assert_discr_in_range(Disr(min), Disr(max), to); + Store(bcx, C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true), + val); + } + layout::General{ discr, .. } => { + Store(bcx, C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true), + StructGEP(bcx, val, 0)); + } + layout::Univariant { .. } + | layout::UntaggedUnion { .. } + | layout::Vector { .. } => { + assert_eq!(to, Disr(0)); + } + layout::RawNullablePointer { nndiscr, .. } => { + let nnty = compute_fields(bcx.ccx(), t, nndiscr as usize, false)[0]; + if to.0 != nndiscr { + let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty); + Store(bcx, C_null(llptrty), val); + } + } + layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => { + if to.0 != nndiscr { + if target_sets_discr_via_memset(bcx) { + // Issue #34427: As workaround for LLVM bug on + // ARM, use memset of 0 on whole struct rather + // than storing null to single target field. + let b = B(bcx); + let llptr = b.pointercast(val, Type::i8(b.ccx).ptr_to()); + let fill_byte = C_u8(b.ccx, 0); + let size = C_uint(b.ccx, nonnull.stride().bytes()); + let align = C_i32(b.ccx, nonnull.align.abi() as i32); + base::call_memset(&b, llptr, fill_byte, size, align, false); + } else { + let path = discrfield.iter().map(|&i| i as usize).collect::>(); + let llptrptr = GEPi(bcx, val, &path[..]); + let llptrty = val_ty(llptrptr).element_type(); + Store(bcx, C_null(llptrty), llptrptr); + } + } + } + _ => bug!("Cannot handle {} represented as {:#?}", t, l) + } +} + +fn target_sets_discr_via_memset<'blk, 'tcx>(bcx: Block<'blk, 'tcx>) -> bool { + bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64" +} + +fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { + if min <= max { + assert!(min <= discr && discr <= max) + } else { + assert!(min <= discr || discr <= max) + } +} + +/// Access a field, at a point when the value's case is known. +pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, + val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef { + trans_field_ptr_builder(&bcx.build(), t, val, discr, ix) +} + +/// Access a field, at a point when the value's case is known. +pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, + t: Ty<'tcx>, + val: MaybeSizedValue, + discr: Disr, ix: usize) + -> ValueRef { + let l = bcx.ccx().layout_of(t); + debug!("trans_field_ptr_builder on {} represented as {:#?}", t, l); + // Note: if this ever needs to generate conditionals (e.g., if we + // decide to do some kind of cdr-coding-like non-unique repr + // someday), it will need to return a possibly-new bcx as well. + match *l { + layout::Univariant { ref variant, .. } => { + assert_eq!(discr, Disr(0)); + struct_field_ptr(bcx, &variant, + &compute_fields(bcx.ccx(), t, 0, false), + val, ix, false) + } + layout::Vector { count, .. } => { + assert_eq!(discr.0, 0); + assert!((ix as u64) < count); + bcx.struct_gep(val.value, ix) + } + layout::General { discr: d, ref variants, .. } => { + let mut fields = compute_fields(bcx.ccx(), t, discr.0 as usize, false); + fields.insert(0, d.to_ty(&bcx.ccx().tcx(), false)); + struct_field_ptr(bcx, &variants[discr.0 as usize], + &fields, + val, ix + 1, true) + } + layout::UntaggedUnion { .. } => { + let fields = compute_fields(bcx.ccx(), t, 0, false); + let ty = type_of::in_memory_type_of(bcx.ccx(), fields[ix]); + if bcx.is_unreachable() { return C_undef(ty.ptr_to()); } + bcx.pointercast(val.value, ty.ptr_to()) + } + layout::RawNullablePointer { nndiscr, .. } | + layout::StructWrappedNullablePointer { nndiscr, .. } if discr.0 != nndiscr => { + let nullfields = compute_fields(bcx.ccx(), t, (1-nndiscr) as usize, false); + // The unit-like case might have a nonzero number of unit-like fields. + // (e.d., Result of Either with (), as one side.) + let ty = type_of::type_of(bcx.ccx(), nullfields[ix]); + assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0); + // The contents of memory at this pointer can't matter, but use + // the value that's "reasonable" in case of pointer comparison. + if bcx.is_unreachable() { return C_undef(ty.ptr_to()); } + bcx.pointercast(val.value, ty.ptr_to()) + } + layout::RawNullablePointer { nndiscr, .. } => { + let nnty = compute_fields(bcx.ccx(), t, nndiscr as usize, false)[0]; + assert_eq!(ix, 0); + assert_eq!(discr.0, nndiscr); + let ty = type_of::type_of(bcx.ccx(), nnty); + if bcx.is_unreachable() { return C_undef(ty.ptr_to()); } + bcx.pointercast(val.value, ty.ptr_to()) + } + layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { + assert_eq!(discr.0, nndiscr); + struct_field_ptr(bcx, &nonnull, + &compute_fields(bcx.ccx(), t, discr.0 as usize, false), + val, ix, false) + } + _ => bug!("element access in type without elements: {} represented as {:#?}", t, l) + } +} + +fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, + st: &layout::Struct, fields: &Vec>, val: MaybeSizedValue, + ix: usize, needs_cast: bool) -> ValueRef { + let ccx = bcx.ccx(); + let fty = fields[ix]; + let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty); + if bcx.is_unreachable() { + return C_undef(ll_fty.ptr_to()); + } + + let ptr_val = if needs_cast { + let fields = fields.iter().map(|&ty| { + type_of::in_memory_type_of(ccx, ty) + }).collect::>(); + let real_ty = Type::struct_(ccx, &fields[..], st.packed); + bcx.pointercast(val.value, real_ty.ptr_to()) + } else { + val.value + }; + + // Simple case - we can just GEP the field + // * First field - Always aligned properly + // * Packed struct - There is no alignment padding + // * Field is sized - pointer is properly aligned already + if ix == 0 || st.packed || type_is_sized(bcx.tcx(), fty) { + return bcx.struct_gep(ptr_val, ix); + } + + // If the type of the last field is [T] or str, then we don't need to do + // any adjusments + match fty.sty { + ty::TySlice(..) | ty::TyStr => { + return bcx.struct_gep(ptr_val, ix); + } + _ => () + } + + // There's no metadata available, log the case and just do the GEP. + if !val.has_meta() { + debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment", + ix, Value(ptr_val)); + return bcx.struct_gep(ptr_val, ix); + } + + let dbloc = DebugLoc::None; + + // We need to get the pointer manually now. + // We do this by casting to a *i8, then offsetting it by the appropriate amount. + // We do this instead of, say, simply adjusting the pointer from the result of a GEP + // because the field may have an arbitrary alignment in the LLVM representation + // anyway. + // + // To demonstrate: + // struct Foo { + // x: u16, + // y: T + // } + // + // The type Foo> is represented in LLVM as { u16, { u16, u8 }}, meaning that + // the `y` field has 16-bit alignment. + + let meta = val.meta; + + + let offset = st.offsets[ix].bytes(); + let unaligned_offset = C_uint(bcx.ccx(), offset); + + // Get the alignment of the field + let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta); + + // Bump the unaligned offset up to the appropriate alignment using the + // following expression: + // + // (unaligned offset + (align - 1)) & -align + + // Calculate offset + dbloc.apply(bcx.fcx()); + let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx(), 1u64)); + let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1), + bcx.neg(align)); + + debug!("struct_field_ptr: DST field offset: {:?}", Value(offset)); + + // Cast and adjust pointer + let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx())); + let byte_ptr = bcx.gep(byte_ptr, &[offset]); + + // Finally, cast back to the type expected + let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty); + debug!("struct_field_ptr: Field type is {:?}", ll_fty); + bcx.pointercast(byte_ptr, ll_fty.ptr_to()) +} + +/// Construct a constant value, suitable for initializing a +/// GlobalVariable, given a case and constant values for its fields. +/// Note that this may have a different LLVM type (and different +/// alignment!) from the representation's `type_of`, so it needs a +/// pointer cast before use. +/// +/// The LLVM type system does not directly support unions, and only +/// pointers can be bitcast, so a constant (and, by extension, the +/// GlobalVariable initialized by it) will have a type that can vary +/// depending on which case of an enum it is. +/// +/// To understand the alignment situation, consider `enum E { V64(u64), +/// V32(u32, u32) }` on Windows. The type has 8-byte alignment to +/// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32, +/// i32, i32}`, which is 4-byte aligned. +/// +/// Currently the returned value has the same size as the type, but +/// this could be changed in the future to avoid allocating unnecessary +/// space after values of shorter-than-maximum cases. +pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, discr: Disr, + vals: &[ValueRef]) -> ValueRef { + let l = ccx.layout_of(t); + let dl = &ccx.tcx().data_layout; + match *l { + layout::CEnum { discr: d, min, max, .. } => { + assert_eq!(vals.len(), 0); + assert_discr_in_range(Disr(min), Disr(max), discr); + C_integral(Type::from_integer(ccx, d), discr.0, true) + } + layout::General { discr: d, ref variants, .. } => { + let variant = &variants[discr.0 as usize]; + let lldiscr = C_integral(Type::from_integer(ccx, d), discr.0 as u64, true); + let mut vals_with_discr = vec![lldiscr]; + vals_with_discr.extend_from_slice(vals); + let mut contents = build_const_struct(ccx, &variant, &vals_with_discr[..]); + let needed_padding = l.size(dl).bytes() - variant.stride().bytes(); + if needed_padding > 0 { + contents.push(padding(ccx, needed_padding)); + } + C_struct(ccx, &contents[..], false) + } + layout::UntaggedUnion { ref variants, .. }=> { + assert_eq!(discr, Disr(0)); + let contents = build_const_union(ccx, variants, vals[0]); + C_struct(ccx, &contents, variants.packed) + } + layout::Univariant { ref variant, .. } => { + assert_eq!(discr, Disr(0)); + let contents = build_const_struct(ccx, &variant, vals); + C_struct(ccx, &contents[..], variant.packed) + } + layout::Vector { .. } => { + C_vector(vals) + } + layout::RawNullablePointer { nndiscr, .. } => { + let nnty = compute_fields(ccx, t, nndiscr as usize, false)[0]; + if discr.0 == nndiscr { + assert_eq!(vals.len(), 1); + vals[0] + } else { + C_null(type_of::sizing_type_of(ccx, nnty)) + } + } + layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { + if discr.0 == nndiscr { + C_struct(ccx, &build_const_struct(ccx, &nonnull, vals), false) + } else { + let fields = compute_fields(ccx, t, nndiscr as usize, false); + let vals = fields.iter().map(|&ty| { + // Always use null even if it's not the `discrfield`th + // field; see #8506. + C_null(type_of::sizing_type_of(ccx, ty)) + }).collect::>(); + C_struct(ccx, &build_const_struct(ccx, &nonnull, &vals[..]), false) + } + } + _ => bug!("trans_const: cannot handle type {} repreented as {:#?}", t, l) + } +} + +/// Building structs is a little complicated, because we might need to +/// insert padding if a field's value is less aligned than its type. +/// +/// Continuing the example from `trans_const`, a value of type `(u32, +/// E)` should have the `E` at offset 8, but if that field's +/// initializer is 4-byte aligned then simply translating the tuple as +/// a two-element struct will locate it at offset 4, and accesses to it +/// will read the wrong memory. +fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + st: &layout::Struct, + vals: &[ValueRef]) + -> Vec { + assert_eq!(vals.len(), st.offsets.len()); + + if vals.len() == 0 { + return Vec::new(); + } + + // offset of current value + let mut offset = 0; + let mut cfields = Vec::new(); + let offsets = st.offsets.iter().map(|i| i.bytes()); + for (&val, target_offset) in vals.iter().zip(offsets) { + if offset < target_offset { + cfields.push(padding(ccx, target_offset - offset)); + offset = target_offset; + } + assert!(!is_undef(val)); + cfields.push(val); + offset += machine::llsize_of_alloc(ccx, val_ty(val)); + } + + if offset < st.stride().bytes() { + cfields.push(padding(ccx, st.stride().bytes() - offset)); + } + + cfields +} + +fn build_const_union<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + un: &layout::Union, + field_val: ValueRef) + -> Vec { + let mut cfields = vec![field_val]; + + let offset = machine::llsize_of_alloc(ccx, val_ty(field_val)); + let size = un.stride().bytes(); + if offset != size { + cfields.push(padding(ccx, size - offset)); + } + + cfields +} + +fn padding(ccx: &CrateContext, size: u64) -> ValueRef { + C_undef(Type::array(&Type::i8(ccx), size)) +} + +// FIXME this utility routine should be somewhere more general +#[inline] +fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a } + +/// Extract a field of a constant value, as appropriate for its +/// representation. +/// +/// (Not to be confused with `common::const_get_elt`, which operates on +/// raw LLVM-level structs and arrays.) +pub fn const_get_field<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, + val: ValueRef, _discr: Disr, + ix: usize) -> ValueRef { + let l = ccx.layout_of(t); + match *l { + layout::CEnum { .. } => bug!("element access in C-like enum const"), + layout::Univariant { .. } | layout::Vector { .. } => const_struct_field(val, ix), + layout::UntaggedUnion { .. } => const_struct_field(val, 0), + layout::General { .. } => const_struct_field(val, ix + 1), + layout::RawNullablePointer { .. } => { + assert_eq!(ix, 0); + val + }, + layout::StructWrappedNullablePointer{ .. } => const_struct_field(val, ix), + _ => bug!("{} does not have fields.", t) + } +} + +/// Extract field of struct-like const, skipping our alignment padding. +fn const_struct_field(val: ValueRef, ix: usize) -> ValueRef { + // Get the ix-th non-undef element of the struct. + let mut real_ix = 0; // actual position in the struct + let mut ix = ix; // logical index relative to real_ix + let mut field; + loop { + loop { + field = const_get_elt(val, &[real_ix]); + if !is_undef(field) { + break; + } + real_ix = real_ix + 1; + } + if ix == 0 { + return field; + } + ix = ix - 1; + real_ix = real_ix + 1; + } +} diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs new file mode 100644 index 0000000000000..665e12cbe8795 --- /dev/null +++ b/src/librustc_trans/asm.rs @@ -0,0 +1,121 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! # Translation of inline assembly. + +use llvm::{self, ValueRef}; +use base; +use build::*; +use common::*; +use type_of; +use type_::Type; + +use rustc::hir; +use rustc::ty::Ty; + +use std::ffi::CString; +use syntax::ast::AsmDialect; +use libc::{c_uint, c_char}; + +// Take an inline assembly expression and splat it out via LLVM +pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + ia: &hir::InlineAsm, + outputs: Vec<(ValueRef, Ty<'tcx>)>, + mut inputs: Vec) { + let mut ext_constraints = vec![]; + let mut output_types = vec![]; + + // Prepare the output operands + let mut indirect_outputs = vec![]; + for (i, (out, &(val, ty))) in ia.outputs.iter().zip(&outputs).enumerate() { + let val = if out.is_rw || out.is_indirect { + Some(base::load_ty(bcx, val, ty)) + } else { + None + }; + if out.is_rw { + inputs.push(val.unwrap()); + ext_constraints.push(i.to_string()); + } + if out.is_indirect { + indirect_outputs.push(val.unwrap()); + } else { + output_types.push(type_of::type_of(bcx.ccx(), ty)); + } + } + if !indirect_outputs.is_empty() { + indirect_outputs.extend_from_slice(&inputs); + inputs = indirect_outputs; + } + + let clobbers = ia.clobbers.iter() + .map(|s| format!("~{{{}}}", &s)); + + // Default per-arch clobbers + // Basically what clang does + let arch_clobbers = match &bcx.sess().target.target.arch[..] { + "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"], + _ => Vec::new() + }; + + let all_constraints = + ia.outputs.iter().map(|out| out.constraint.to_string()) + .chain(ia.inputs.iter().map(|s| s.to_string())) + .chain(ext_constraints) + .chain(clobbers) + .chain(arch_clobbers.iter().map(|s| s.to_string())) + .collect::>().join(","); + + debug!("Asm Constraints: {}", &all_constraints[..]); + + // Depending on how many outputs we have, the return type is different + let num_outputs = output_types.len(); + let output_type = match num_outputs { + 0 => Type::void(bcx.ccx()), + 1 => output_types[0], + _ => Type::struct_(bcx.ccx(), &output_types[..], false) + }; + + let dialect = match ia.dialect { + AsmDialect::Att => llvm::AsmDialect::Att, + AsmDialect::Intel => llvm::AsmDialect::Intel, + }; + + let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap(); + let constraint_cstr = CString::new(all_constraints).unwrap(); + let r = InlineAsmCall(bcx, + asm.as_ptr(), + constraint_cstr.as_ptr(), + &inputs, + output_type, + ia.volatile, + ia.alignstack, + dialect); + + // Again, based on how many outputs we have + let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); + for (i, (_, &(val, _))) in outputs.enumerate() { + let v = if num_outputs == 1 { r } else { ExtractValue(bcx, r, i) }; + Store(bcx, v, val); + } + + // Store expn_id in a metadata node so we can map LLVM errors + // back to source locations. See #17552. + unsafe { + let key = "srcloc"; + let kind = llvm::LLVMGetMDKindIDInContext(bcx.ccx().llcx(), + key.as_ptr() as *const c_char, key.len() as c_uint); + + let val: llvm::ValueRef = C_i32(bcx.ccx(), ia.expn_id.into_u32() as i32); + + llvm::LLVMSetMetadata(r, kind, + llvm::LLVMMDNodeInContext(bcx.ccx().llcx(), &val, 1)); + } +} diff --git a/src/librustc_trans/assert_module_sources.rs b/src/librustc_trans/assert_module_sources.rs new file mode 100644 index 0000000000000..898e65ce391e6 --- /dev/null +++ b/src/librustc_trans/assert_module_sources.rs @@ -0,0 +1,147 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This pass is only used for UNIT TESTS related to incremental +//! compilation. It tests whether a particular `.o` file will be re-used +//! from a previous compilation or whether it must be regenerated. +//! +//! The user adds annotations to the crate of the following form: +//! +//! ``` +//! #![rustc_partition_reused(module="spike", cfg="rpass2")] +//! #![rustc_partition_translated(module="spike-x", cfg="rpass2")] +//! ``` +//! +//! The first indicates (in the cfg `rpass2`) that `spike.o` will be +//! reused, the second that `spike-x.o` will be recreated. If these +//! annotations are inaccurate, errors are reported. +//! +//! The reason that we use `cfg=...` and not `#[cfg_attr]` is so that +//! the HIR doesn't change as a result of the annotations, which might +//! perturb the reuse results. + +use rustc::ty::TyCtxt; +use syntax::ast; + +use {ModuleSource, ModuleTranslation}; + +const PARTITION_REUSED: &'static str = "rustc_partition_reused"; +const PARTITION_TRANSLATED: &'static str = "rustc_partition_translated"; + +const MODULE: &'static str = "module"; +const CFG: &'static str = "cfg"; + +#[derive(Debug, PartialEq)] +enum Disposition { Reused, Translated } + +pub fn assert_module_sources<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + modules: &[ModuleTranslation]) { + let _ignore = tcx.dep_graph.in_ignore(); + + if tcx.sess.opts.incremental.is_none() { + return; + } + + let ams = AssertModuleSource { tcx: tcx, modules: modules }; + for attr in &tcx.map.krate().attrs { + ams.check_attr(attr); + } +} + +struct AssertModuleSource<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + modules: &'a [ModuleTranslation], +} + +impl<'a, 'tcx> AssertModuleSource<'a, 'tcx> { + fn check_attr(&self, attr: &ast::Attribute) { + let disposition = if attr.check_name(PARTITION_REUSED) { + Disposition::Reused + } else if attr.check_name(PARTITION_TRANSLATED) { + Disposition::Translated + } else { + return; + }; + + if !self.check_config(attr) { + debug!("check_attr: config does not match, ignoring attr"); + return; + } + + let mname = self.field(attr, MODULE); + let mtrans = self.modules.iter().find(|mtrans| *mtrans.name == *mname.as_str()); + let mtrans = match mtrans { + Some(m) => m, + None => { + debug!("module name `{}` not found amongst:", mname); + for mtrans in self.modules { + debug!("module named `{}` with disposition {:?}", + mtrans.name, + self.disposition(mtrans)); + } + + self.tcx.sess.span_err( + attr.span, + &format!("no module named `{}`", mname)); + return; + } + }; + + let mtrans_disposition = self.disposition(mtrans); + if disposition != mtrans_disposition { + self.tcx.sess.span_err( + attr.span, + &format!("expected module named `{}` to be {:?} but is {:?}", + mname, + disposition, + mtrans_disposition)); + } + } + + fn disposition(&self, mtrans: &ModuleTranslation) -> Disposition { + match mtrans.source { + ModuleSource::Preexisting(_) => Disposition::Reused, + ModuleSource::Translated(_) => Disposition::Translated, + } + } + + fn field(&self, attr: &ast::Attribute, name: &str) -> ast::Name { + for item in attr.meta_item_list().unwrap_or(&[]) { + if item.check_name(name) { + if let Some(value) = item.value_str() { + return value; + } else { + self.tcx.sess.span_fatal( + item.span, + &format!("associated value expected for `{}`", name)); + } + } + } + + self.tcx.sess.span_fatal( + attr.span, + &format!("no field `{}`", name)); + } + + /// Scan for a `cfg="foo"` attribute and check whether we have a + /// cfg flag called `foo`. + fn check_config(&self, attr: &ast::Attribute) -> bool { + let config = &self.tcx.sess.parse_sess.config; + let value = self.field(attr, CFG); + debug!("check_config(config={:?}, value={:?})", config, value); + if config.iter().any(|&(name, _)| name == value) { + debug!("check_config: matched"); + return true; + } + debug!("check_config: no match found"); + return false; + } + +} diff --git a/src/librustc_trans/attributes.rs b/src/librustc_trans/attributes.rs new file mode 100644 index 0000000000000..efdd1b736f0e7 --- /dev/null +++ b/src/librustc_trans/attributes.rs @@ -0,0 +1,109 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +//! Set and unset common attributes on LLVM values. + +use std::ffi::{CStr, CString}; + +use llvm::{self, Attribute, ValueRef}; +use llvm::AttributePlace::Function; +pub use syntax::attr::InlineAttr; +use syntax::ast; +use context::CrateContext; + +/// Mark LLVM function to use provided inline heuristic. +#[inline] +pub fn inline(val: ValueRef, inline: InlineAttr) { + use self::InlineAttr::*; + match inline { + Hint => Attribute::InlineHint.apply_llfn(Function, val), + Always => Attribute::AlwaysInline.apply_llfn(Function, val), + Never => Attribute::NoInline.apply_llfn(Function, val), + None => { + Attribute::InlineHint.unapply_llfn(Function, val); + Attribute::AlwaysInline.unapply_llfn(Function, val); + Attribute::NoInline.unapply_llfn(Function, val); + }, + }; +} + +/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function. +#[inline] +pub fn emit_uwtable(val: ValueRef, emit: bool) { + Attribute::UWTable.toggle_llfn(Function, val, emit); +} + +/// Tell LLVM whether the function can or cannot unwind. +#[inline] +pub fn unwind(val: ValueRef, can_unwind: bool) { + Attribute::NoUnwind.toggle_llfn(Function, val, !can_unwind); +} + +/// Tell LLVM whether it should optimise function for size. +#[inline] +#[allow(dead_code)] // possibly useful function +pub fn set_optimize_for_size(val: ValueRef, optimize: bool) { + Attribute::OptimizeForSize.toggle_llfn(Function, val, optimize); +} + +/// Tell LLVM if this function should be 'naked', i.e. skip the epilogue and prologue. +#[inline] +pub fn naked(val: ValueRef, is_naked: bool) { + Attribute::Naked.toggle_llfn(Function, val, is_naked); +} + +pub fn set_frame_pointer_elimination(ccx: &CrateContext, llfn: ValueRef) { + // FIXME: #11906: Omitting frame pointers breaks retrieving the value of a + // parameter. + if ccx.sess().must_not_eliminate_frame_pointers() { + llvm::AddFunctionAttrStringValue( + llfn, llvm::AttributePlace::Function, + cstr("no-frame-pointer-elim\0"), cstr("true\0")); + } +} + +/// Composite function which sets LLVM attributes for function depending on its AST (#[attribute]) +/// attributes. +pub fn from_fn_attrs(ccx: &CrateContext, attrs: &[ast::Attribute], llfn: ValueRef) { + use syntax::attr::*; + inline(llfn, find_inline_attr(Some(ccx.sess().diagnostic()), attrs)); + + set_frame_pointer_elimination(ccx, llfn); + let mut target_features = vec![]; + for attr in attrs { + if attr.check_name("target_feature") { + if let Some(val) = attr.value_str() { + for feat in val.as_str().split(",").map(|f| f.trim()) { + if !feat.is_empty() && !feat.contains('\0') { + target_features.push(feat.to_string()); + } + } + } + } else if attr.check_name("cold") { + Attribute::Cold.apply_llfn(Function, llfn); + } else if attr.check_name("naked") { + naked(llfn, true); + } else if attr.check_name("allocator") { + Attribute::NoAlias.apply_llfn( + llvm::AttributePlace::ReturnValue(), llfn); + } else if attr.check_name("unwind") { + unwind(llfn, true); + } + } + if !target_features.is_empty() { + let val = CString::new(target_features.join(",")).unwrap(); + llvm::AddFunctionAttrStringValue( + llfn, llvm::AttributePlace::Function, + cstr("target-features\0"), &val); + } +} + +fn cstr(s: &'static str) -> &CStr { + CStr::from_bytes_with_nul(s.as_bytes()).expect("null-terminated string") +} diff --git a/src/librustc_trans/back/archive.rs b/src/librustc_trans/back/archive.rs index 850608588234c..11ab6dcaa87f9 100644 --- a/src/librustc_trans/back/archive.rs +++ b/src/librustc_trans/back/archive.rs @@ -10,24 +10,17 @@ //! A helper class for dealing with static archives -use std::env; use std::ffi::{CString, CStr, OsString}; -use std::fs::{self, File}; -use std::io::prelude::*; use std::io; use std::mem; use std::path::{Path, PathBuf}; -use std::process::{Command, Output, Stdio}; use std::ptr; use std::str; -use middle::cstore::CrateStore; - use libc; use llvm::archive_ro::{ArchiveRO, Child}; use llvm::{self, ArchiveKind}; use rustc::session::Session; -use rustc_back::tempdir::TempDir; pub struct ArchiveConfig<'a> { pub sess: &'a Session, @@ -43,7 +36,6 @@ pub struct ArchiveConfig<'a> { #[must_use = "must call build() to finish building the archive"] pub struct ArchiveBuilder<'a> { config: ArchiveConfig<'a>, - work_dir: TempDir, removals: Vec, additions: Vec, should_update_symbols: bool, @@ -57,17 +49,10 @@ enum Addition { }, Archive { archive: ArchiveRO, - archive_name: String, skip: Box bool>, }, } -enum Action<'a> { - Remove(&'a [String]), - AddObjects(&'a [&'a PathBuf], bool), - UpdateSymbols, -} - pub fn find_library(name: &str, search_paths: &[PathBuf], sess: &Session) -> PathBuf { // On Windows, static libraries sometimes show up as libfoo.a and other @@ -104,7 +89,6 @@ impl<'a> ArchiveBuilder<'a> { pub fn new(config: ArchiveConfig<'a>) -> ArchiveBuilder<'a> { ArchiveBuilder { config: config, - work_dir: TempDir::new("rsar").unwrap(), removals: Vec::new(), additions: Vec::new(), should_update_symbols: false, @@ -124,6 +108,7 @@ impl<'a> ArchiveBuilder<'a> { } let archive = self.src_archive.as_ref().unwrap().as_ref().unwrap(); let ret = archive.iter() + .filter_map(|child| child.ok()) .filter(is_relevant_child) .filter_map(|child| child.name()) .filter(|name| !self.removals.iter().any(|x| x == name)) @@ -149,7 +134,7 @@ impl<'a> ArchiveBuilder<'a> { pub fn add_native_library(&mut self, name: &str) { let location = find_library(name, &self.config.lib_search_paths, self.config.sess); - self.add_archive(&location, name, |_| false).unwrap_or_else(|e| { + self.add_archive(&location, |_| false).unwrap_or_else(|e| { self.config.sess.fatal(&format!("failed to add native library {}: {}", location.to_string_lossy(), e)); }); @@ -160,8 +145,11 @@ impl<'a> ArchiveBuilder<'a> { /// /// This ignores adding the bytecode from the rlib, and if LTO is enabled /// then the object file also isn't added. - pub fn add_rlib(&mut self, rlib: &Path, name: &str, lto: bool) - -> io::Result<()> { + pub fn add_rlib(&mut self, + rlib: &Path, + name: &str, + lto: bool, + skip_objects: bool) -> io::Result<()> { // Ignoring obj file starting with the crate name // as simple comparison is not enough - there // might be also an extra name suffix @@ -173,14 +161,28 @@ impl<'a> ArchiveBuilder<'a> { let metadata_filename = self.config.sess.cstore.metadata_filename().to_owned(); - self.add_archive(rlib, &name[..], move |fname: &str| { - let skip_obj = lto && fname.starts_with(&obj_start) - && fname.ends_with(".o"); - skip_obj || fname.ends_with(bc_ext) || fname == metadata_filename + self.add_archive(rlib, move |fname: &str| { + if fname.ends_with(bc_ext) || fname == metadata_filename { + return true + } + + // Don't include Rust objects if LTO is enabled + if lto && fname.starts_with(&obj_start) && fname.ends_with(".o") { + return true + } + + // Otherwise if this is *not* a rust object and we're skipping + // objects then skip this file + if skip_objects && (!fname.starts_with(&obj_start) || !fname.ends_with(".o")) { + return true + } + + // ok, don't skip this + return false }) } - fn add_archive(&mut self, archive: &Path, name: &str, skip: F) + fn add_archive(&mut self, archive: &Path, skip: F) -> io::Result<()> where F: FnMut(&str) -> bool + 'static { @@ -191,7 +193,6 @@ impl<'a> ArchiveBuilder<'a> { }; self.additions.push(Addition::Archive { archive: archive, - archive_name: name.to_string(), skip: Box::new(skip), }); Ok(()) @@ -215,228 +216,23 @@ impl<'a> ArchiveBuilder<'a> { /// Combine the provided files, rlibs, and native libraries into a single /// `Archive`. pub fn build(&mut self) { - let res = match self.llvm_archive_kind() { - Some(kind) => self.build_with_llvm(kind), - None => self.build_with_ar_cmd(), - }; - if let Err(e) = res { - self.config.sess.fatal(&format!("failed to build archive: {}", e)); - } - } - - pub fn llvm_archive_kind(&self) -> Option { - if unsafe { llvm::LLVMVersionMinor() < 7 } { - return None - } - - // Currently LLVM only supports writing archives in the 'gnu' format. - match &self.config.sess.target.target.options.archive_format[..] { - "gnu" => Some(ArchiveKind::K_GNU), - "mips64" => Some(ArchiveKind::K_MIPS64), - "bsd" => Some(ArchiveKind::K_BSD), - "coff" => Some(ArchiveKind::K_COFF), - _ => None, - } - } - - pub fn using_llvm(&self) -> bool { - self.llvm_archive_kind().is_some() - } - - fn build_with_ar_cmd(&mut self) -> io::Result<()> { - let removals = mem::replace(&mut self.removals, Vec::new()); - let additions = mem::replace(&mut self.additions, Vec::new()); - let should_update_symbols = mem::replace(&mut self.should_update_symbols, - false); - - // Don't use fs::copy because libs may be installed as read-only and we - // want to modify this archive, so we use `io::copy` to not preserve - // permission bits. - if let Some(ref s) = self.config.src { - try!(io::copy(&mut try!(File::open(s)), - &mut try!(File::create(&self.config.dst)))); - } - - if removals.len() > 0 { - self.run(None, Action::Remove(&removals)); - } - - let mut members = Vec::new(); - for addition in additions { - match addition { - Addition::File { path, name_in_archive } => { - let dst = self.work_dir.path().join(&name_in_archive); - try!(fs::copy(&path, &dst)); - members.push(PathBuf::from(name_in_archive)); - } - Addition::Archive { archive, archive_name, mut skip } => { - try!(self.add_archive_members(&mut members, archive, - &archive_name, &mut *skip)); - } - } - } - - // Get an absolute path to the destination, so `ar` will work even - // though we run it from `self.work_dir`. - let mut objects = Vec::new(); - let mut total_len = self.config.dst.to_string_lossy().len(); - - if members.is_empty() { - if should_update_symbols { - self.run(Some(self.work_dir.path()), Action::UpdateSymbols); - } - return Ok(()) - } - - // Don't allow the total size of `args` to grow beyond 32,000 bytes. - // Windows will raise an error if the argument string is longer than - // 32,768, and we leave a bit of extra space for the program name. - const ARG_LENGTH_LIMIT: usize = 32_000; - - for member_name in &members { - let len = member_name.to_string_lossy().len(); - - // `len + 1` to account for the space that's inserted before each - // argument. (Windows passes command-line arguments as a single - // string, not an array of strings.) - if total_len + len + 1 > ARG_LENGTH_LIMIT { - // Add the archive members seen so far, without updating the - // symbol table. - self.run(Some(self.work_dir.path()), - Action::AddObjects(&objects, false)); - - objects.clear(); - total_len = self.config.dst.to_string_lossy().len(); - } - - objects.push(member_name); - total_len += len + 1; - } - - // Add the remaining archive members, and update the symbol table if - // necessary. - self.run(Some(self.work_dir.path()), - Action::AddObjects(&objects, should_update_symbols)); - Ok(()) - } - - fn add_archive_members(&mut self, members: &mut Vec, - archive: ArchiveRO, name: &str, - skip: &mut FnMut(&str) -> bool) -> io::Result<()> { - // Next, we must rename all of the inputs to "guaranteed unique names". - // We write each file into `self.work_dir` under its new unique name. - // The reason for this renaming is that archives are keyed off the name - // of the files, so if two files have the same name they will override - // one another in the archive (bad). - // - // We skip any files explicitly desired for skipping, and we also skip - // all SYMDEF files as these are just magical placeholders which get - // re-created when we make a new archive anyway. - for file in archive.iter().filter(is_relevant_child) { - let filename = file.name().unwrap(); - if skip(filename) { continue } - let filename = Path::new(filename).file_name().unwrap() - .to_str().unwrap(); - - // Archives on unix systems typically do not have slashes in - // filenames as the `ar` utility generally only uses the last - // component of a path for the filename list in the archive. On - // Windows, however, archives assembled with `lib.exe` will preserve - // the full path to the file that was placed in the archive, - // including path separators. - // - // The code below is munging paths so it'll go wrong pretty quickly - // if there's some unexpected slashes in the filename, so here we - // just chop off everything but the filename component. Note that - // this can cause duplicate filenames, but that's also handled below - // as well. - let filename = Path::new(filename).file_name().unwrap() - .to_str().unwrap(); - - // An archive can contain files of the same name multiple times, so - // we need to be sure to not have them overwrite one another when we - // extract them. Consequently we need to find a truly unique file - // name for us! - let mut new_filename = String::new(); - for n in 0.. { - let n = if n == 0 {String::new()} else {format!("-{}", n)}; - new_filename = format!("r{}-{}-{}", n, name, filename); - - // LLDB (as mentioned in back::link) crashes on filenames of - // exactly - // 16 bytes in length. If we're including an object file with - // exactly 16-bytes of characters, give it some prefix so - // that it's not 16 bytes. - new_filename = if new_filename.len() == 16 { - format!("lldb-fix-{}", new_filename) - } else { - new_filename - }; - - let present = members.iter().filter_map(|p| { - p.file_name().and_then(|f| f.to_str()) - }).any(|s| s == new_filename); - if !present { - break - } + let kind = match self.llvm_archive_kind() { + Ok(kind) => kind, + Err(kind) => { + self.config.sess.fatal(&format!("Don't know how to build archive of type: {}", + kind)); } - let dst = self.work_dir.path().join(&new_filename); - try!(try!(File::create(&dst)).write_all(file.data())); - members.push(PathBuf::from(new_filename)); - } - Ok(()) - } + }; - fn run(&self, cwd: Option<&Path>, action: Action) -> Output { - let abs_dst = env::current_dir().unwrap().join(&self.config.dst); - let ar = &self.config.ar_prog; - let mut cmd = Command::new(ar); - cmd.env("PATH", &self.config.command_path); - cmd.stdout(Stdio::piped()).stderr(Stdio::piped()); - self.prepare_ar_action(&mut cmd, &abs_dst, action); - info!("{:?}", cmd); - - if let Some(p) = cwd { - cmd.current_dir(p); - info!("inside {:?}", p.display()); + if let Err(e) = self.build_with_llvm(kind) { + self.config.sess.fatal(&format!("failed to build archive: {}", e)); } - let sess = &self.config.sess; - match cmd.spawn() { - Ok(prog) => { - let o = prog.wait_with_output().unwrap(); - if !o.status.success() { - sess.struct_err(&format!("{:?} failed with: {}", cmd, o.status)) - .note(&format!("stdout ---\n{}", - str::from_utf8(&o.stdout).unwrap())) - .note(&format!("stderr ---\n{}", - str::from_utf8(&o.stderr).unwrap())) - .emit(); - sess.abort_if_errors(); - } - o - }, - Err(e) => { - sess.fatal(&format!("could not exec `{}`: {}", - self.config.ar_prog, e)); - } - } } - fn prepare_ar_action(&self, cmd: &mut Command, dst: &Path, action: Action) { - match action { - Action::Remove(files) => { - cmd.arg("d").arg(dst).args(files); - } - Action::AddObjects(objs, update_symbols) => { - cmd.arg(if update_symbols {"crs"} else {"crS"}) - .arg(dst) - .args(objs); - } - Action::UpdateSymbols => { - cmd.arg("s").arg(dst); - } - } + fn llvm_archive_kind(&self) -> Result { + let kind = &*self.config.sess.target.target.options.archive_format; + kind.parse().map_err(|_| kind) } fn build_with_llvm(&mut self, kind: ArchiveKind) -> io::Result<()> { @@ -448,6 +244,7 @@ impl<'a> ArchiveBuilder<'a> { unsafe { if let Some(archive) = self.src_archive() { for child in archive.iter() { + let child = child.map_err(string_to_io_error)?; let child_name = match child.name() { Some(s) => s, None => continue, @@ -456,7 +253,7 @@ impl<'a> ArchiveBuilder<'a> { continue } - let name = try!(CString::new(child_name)); + let name = CString::new(child_name)?; members.push(llvm::LLVMRustArchiveMemberNew(ptr::null(), name.as_ptr(), child.raw())); @@ -466,20 +263,35 @@ impl<'a> ArchiveBuilder<'a> { for addition in mem::replace(&mut self.additions, Vec::new()) { match addition { Addition::File { path, name_in_archive } => { - let path = try!(CString::new(path.to_str().unwrap())); - let name = try!(CString::new(name_in_archive)); + let path = CString::new(path.to_str().unwrap())?; + let name = CString::new(name_in_archive)?; members.push(llvm::LLVMRustArchiveMemberNew(path.as_ptr(), name.as_ptr(), ptr::null_mut())); strings.push(path); strings.push(name); } - Addition::Archive { archive, archive_name: _, mut skip } => { - for child in archive.iter().filter(is_relevant_child) { + Addition::Archive { archive, mut skip } => { + for child in archive.iter() { + let child = child.map_err(string_to_io_error)?; + if !is_relevant_child(&child) { + continue + } let child_name = child.name().unwrap(); - if skip(child_name) { continue } - - let name = try!(CString::new(child_name)); + if skip(child_name) { + continue + } + + // It appears that LLVM's archive writer is a little + // buggy if the name we pass down isn't just the + // filename component, so chop that off here and + // pass it in. + // + // See LLVM bug 25877 for more info. + let child_name = Path::new(child_name) + .file_name().unwrap() + .to_str().unwrap(); + let name = CString::new(child_name)?; let m = llvm::LLVMRustArchiveMemberNew(ptr::null(), name.as_ptr(), child.raw()); @@ -492,13 +304,13 @@ impl<'a> ArchiveBuilder<'a> { } let dst = self.config.dst.to_str().unwrap().as_bytes(); - let dst = try!(CString::new(dst)); + let dst = CString::new(dst)?; let r = llvm::LLVMRustWriteArchive(dst.as_ptr(), members.len() as libc::size_t, members.as_ptr(), self.should_update_symbols, kind); - let ret = if r != 0 { + let ret = if r.into_result().is_err() { let err = llvm::LLVMRustGetLastError(); let msg = if err.is_null() { "failed to write archive".to_string() @@ -517,3 +329,7 @@ impl<'a> ArchiveBuilder<'a> { } } } + +fn string_to_io_error(s: String) -> io::Error { + io::Error::new(io::ErrorKind::Other, format!("bad archive: {}", s)) +} diff --git a/src/librustc_trans/back/link.rs b/src/librustc_trans/back/link.rs index ec1383f1f7b2b..648dc4c24c9a6 100644 --- a/src/librustc_trans/back/link.rs +++ b/src/librustc_trans/back/link.rs @@ -9,27 +9,27 @@ // except according to those terms. use super::archive::{ArchiveBuilder, ArchiveConfig}; -use super::linker::{Linker, GnuLinker, MsvcLinker}; +use super::linker::Linker; use super::rpath::RPathConfig; use super::rpath; use super::msvc; -use super::svh::Svh; use session::config; use session::config::NoDebugInfo; use session::config::{OutputFilenames, Input, OutputType}; use session::filesearch; use session::search_paths::PathKind; use session::Session; -use middle::cstore::{self, CrateStore, LinkMeta}; +use middle::cstore::{self, LinkMeta, NativeLibrary, LibSource}; use middle::cstore::{LinkagePreference, NativeLibraryKind}; use middle::dependency_format::Linkage; -use middle::ty::{self, Ty}; -use rustc::front::map::DefPath; -use trans::{CrateContext, CrateTranslation, gensym_name}; +use CrateTranslation; use util::common::time; -use util::sha2::{Digest, Sha256}; use util::fs::fix_windows_verbatim_for_gcc; +use rustc::dep_graph::DepNode; +use rustc::hir::def_id::CrateNum; +use rustc::hir::svh::Svh; use rustc_back::tempdir::TempDir; +use rustc_incremental::IncrementalHashesMap; use std::ascii; use std::char; @@ -37,19 +37,15 @@ use std::env; use std::ffi::OsString; use std::fs; use std::io::{self, Read, Write}; -use std::iter::once; use std::mem; use std::path::{Path, PathBuf}; use std::process::Command; use std::str; use flate; -use serialize::hex::ToHex; use syntax::ast; -use syntax::codemap::Span; -use syntax::parse::token::{self, InternedString}; -use syntax::attr::AttrMetaMethods; - -use rustc_front::hir; +use syntax::attr; +use syntax::symbol::Symbol; +use syntax_pos::Span; // RLIB LLVM-BYTECODE OBJECT LAYOUT // Version 1 @@ -81,58 +77,6 @@ pub const RLIB_BYTECODE_OBJECT_V1_DATA_OFFSET: usize = RLIB_BYTECODE_OBJECT_V1_DATASIZE_OFFSET + 8; -/* - * Name mangling and its relationship to metadata. This is complex. Read - * carefully. - * - * The semantic model of Rust linkage is, broadly, that "there's no global - * namespace" between crates. Our aim is to preserve the illusion of this - * model despite the fact that it's not *quite* possible to implement on - * modern linkers. We initially didn't use system linkers at all, but have - * been convinced of their utility. - * - * There are a few issues to handle: - * - * - Linkers operate on a flat namespace, so we have to flatten names. - * We do this using the C++ namespace-mangling technique. Foo::bar - * symbols and such. - * - * - Symbols with the same name but different types need to get different - * linkage-names. We do this by hashing a string-encoding of the type into - * a fixed-size (currently 16-byte hex) cryptographic hash function (CHF: - * we use SHA256) to "prevent collisions". This is not airtight but 16 hex - * digits on uniform probability means you're going to need 2**32 same-name - * symbols in the same process before you're even hitting birthday-paradox - * collision probability. - * - * - Symbols in different crates but with same names "within" the crate need - * to get different linkage-names. - * - * - The hash shown in the filename needs to be predictable and stable for - * build tooling integration. It also needs to be using a hash function - * which is easy to use from Python, make, etc. - * - * So here is what we do: - * - * - Consider the package id; every crate has one (specified with crate_id - * attribute). If a package id isn't provided explicitly, we infer a - * versionless one from the output name. The version will end up being 0.0 - * in this case. CNAME and CVERS are taken from this package id. For - * example, github.com/mozilla/CNAME#CVERS. - * - * - Define CMH as SHA256(crateid). - * - * - Define CMH8 as the first 8 characters of CMH. - * - * - Compile our crate to lib CNAME-CMH8-CVERS.so - * - * - Define STH(sym) as SHA256(CMH, type_str(sym)) - * - * - Suffix a mangled sym with ::STH@CVERS, so that it is unique in the - * name, non-name metadata, and type sense, and versioned in the way - * system linkers understand. - */ - pub fn find_crate_name(sess: Option<&Session>, attrs: &[ast::Attribute], input: &Input) -> String { @@ -150,8 +94,8 @@ pub fn find_crate_name(sess: Option<&Session>, if let Some(sess) = sess { if let Some(ref s) = sess.opts.crate_name { - if let Some((attr, ref name)) = attr_crate_name { - if *s != &name[..] { + if let Some((attr, name)) = attr_crate_name { + if name != &**s { let msg = format!("--crate-name and #[crate_name] are \ required to match, but `{}` != `{}`", s, name); @@ -182,203 +126,28 @@ pub fn find_crate_name(sess: Option<&Session>, "rust_out".to_string() } -pub fn build_link_meta(sess: &Session, - krate: &hir::Crate, +pub fn build_link_meta(incremental_hashes_map: &IncrementalHashesMap, name: &str) -> LinkMeta { let r = LinkMeta { - crate_name: name.to_owned(), - crate_hash: Svh::calculate(&sess.opts.cg.metadata, krate), + crate_name: Symbol::intern(name), + crate_hash: Svh::new(incremental_hashes_map[&DepNode::Krate].to_smaller_hash()), }; info!("{:?}", r); return r; } -fn truncated_hash_result(symbol_hasher: &mut Sha256) -> String { - let output = symbol_hasher.result_bytes(); - // 64 bits should be enough to avoid collisions. - output[.. 8].to_hex().to_string() -} - - -// This calculates STH for a symbol, as defined above -fn symbol_hash<'tcx>(tcx: &ty::ctxt<'tcx>, - symbol_hasher: &mut Sha256, - t: Ty<'tcx>, - link_meta: &LinkMeta) - -> String { - // NB: do *not* use abbrevs here as we want the symbol names - // to be independent of one another in the crate. - - symbol_hasher.reset(); - symbol_hasher.input_str(&link_meta.crate_name); - symbol_hasher.input_str("-"); - symbol_hasher.input_str(link_meta.crate_hash.as_str()); - for meta in tcx.sess.crate_metadata.borrow().iter() { - symbol_hasher.input_str(&meta[..]); - } - symbol_hasher.input_str("-"); - symbol_hasher.input(&tcx.sess.cstore.encode_type(tcx, t)); - // Prefix with 'h' so that it never blends into adjacent digits - let mut hash = String::from("h"); - hash.push_str(&truncated_hash_result(symbol_hasher)); - hash -} - -fn get_symbol_hash<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> String { - match ccx.type_hashcodes().borrow().get(&t) { - Some(h) => return h.to_string(), - None => {} - } - - let mut symbol_hasher = ccx.symbol_hasher().borrow_mut(); - let hash = symbol_hash(ccx.tcx(), &mut *symbol_hasher, t, ccx.link_meta()); - ccx.type_hashcodes().borrow_mut().insert(t, hash.clone()); - hash -} - - -// Name sanitation. LLVM will happily accept identifiers with weird names, but -// gas doesn't! -// gas accepts the following characters in symbols: a-z, A-Z, 0-9, ., _, $ -pub fn sanitize(s: &str) -> String { - let mut result = String::new(); - for c in s.chars() { - match c { - // Escape these with $ sequences - '@' => result.push_str("$SP$"), - '*' => result.push_str("$BP$"), - '&' => result.push_str("$RF$"), - '<' => result.push_str("$LT$"), - '>' => result.push_str("$GT$"), - '(' => result.push_str("$LP$"), - ')' => result.push_str("$RP$"), - ',' => result.push_str("$C$"), - - // '.' doesn't occur in types and functions, so reuse it - // for ':' and '-' - '-' | ':' => result.push('.'), - - // These are legal symbols - 'a' ... 'z' - | 'A' ... 'Z' - | '0' ... '9' - | '_' | '.' | '$' => result.push(c), - - _ => { - result.push('$'); - for c in c.escape_unicode().skip(1) { - match c { - '{' => {}, - '}' => result.push('$'), - c => result.push(c), - } - } - } - } - } - - // Underscore-qualify anything that didn't start as an ident. - if !result.is_empty() && - result.as_bytes()[0] != '_' as u8 && - ! (result.as_bytes()[0] as char).is_xid_start() { - return format!("_{}", &result[..]); - } - - return result; -} - -pub fn mangle>(path: PI, hash: Option<&str>) -> String { - // Follow C++ namespace-mangling style, see - // http://en.wikipedia.org/wiki/Name_mangling for more info. - // - // It turns out that on OSX you can actually have arbitrary symbols in - // function names (at least when given to LLVM), but this is not possible - // when using unix's linker. Perhaps one day when we just use a linker from LLVM - // we won't need to do this name mangling. The problem with name mangling is - // that it seriously limits the available characters. For example we can't - // have things like &T in symbol names when one would theoretically - // want them for things like impls of traits on that type. - // - // To be able to work on all platforms and get *some* reasonable output, we - // use C++ name-mangling. - - let mut n = String::from("_ZN"); // _Z == Begin name-sequence, N == nested - - fn push(n: &mut String, s: &str) { - let sani = sanitize(s); - n.push_str(&format!("{}{}", sani.len(), sani)); - } - - // First, connect each component with pairs. - for data in path { - push(&mut n, &data); - } - - match hash { - Some(s) => push(&mut n, s), - None => {} - } - - n.push('E'); // End name-sequence. - n -} - -pub fn exported_name(path: DefPath, hash: &str) -> String { - let path = path.into_iter() - .map(|e| e.data.as_interned_str()); - mangle(path, Some(hash)) -} - -pub fn mangle_exported_name<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, path: DefPath, - t: Ty<'tcx>, id: ast::NodeId) -> String { - let mut hash = get_symbol_hash(ccx, t); - - // Paths can be completely identical for different nodes, - // e.g. `fn foo() { { fn a() {} } { fn a() {} } }`, so we - // generate unique characters from the node id. For now - // hopefully 3 characters is enough to avoid collisions. - const EXTRA_CHARS: &'static str = - "abcdefghijklmnopqrstuvwxyz\ - ABCDEFGHIJKLMNOPQRSTUVWXYZ\ - 0123456789"; - let id = id as usize; - let extra1 = id % EXTRA_CHARS.len(); - let id = id / EXTRA_CHARS.len(); - let extra2 = id % EXTRA_CHARS.len(); - let id = id / EXTRA_CHARS.len(); - let extra3 = id % EXTRA_CHARS.len(); - hash.push(EXTRA_CHARS.as_bytes()[extra1] as char); - hash.push(EXTRA_CHARS.as_bytes()[extra2] as char); - hash.push(EXTRA_CHARS.as_bytes()[extra3] as char); - - exported_name(path, &hash[..]) -} - -pub fn mangle_internal_name_by_type_and_seq<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>, - name: &str) -> String { - let path = [token::intern(&t.to_string()).as_str(), gensym_name(name).as_str()]; - let hash = get_symbol_hash(ccx, t); - mangle(path.iter().cloned(), Some(&hash[..])) -} - -pub fn mangle_internal_name_by_path_and_seq(path: DefPath, flav: &str) -> String { - let names = - path.into_iter() - .map(|e| e.data.as_interned_str()) - .chain(once(gensym_name(flav).as_str())); // append unique version of "flav" - mangle(names, None) -} - -pub fn get_linker(sess: &Session) -> (String, Command) { +// The third parameter is for an extra path to add to PATH for MSVC +// cross linkers for host toolchain DLL dependencies +pub fn get_linker(sess: &Session) -> (String, Command, Option) { if let Some(ref linker) = sess.opts.cg.linker { - (linker.clone(), Command::new(linker)) + (linker.clone(), Command::new(linker), None) } else if sess.target.target.options.is_like_msvc { - ("link.exe".to_string(), msvc::link_exe_cmd(sess)) + let (cmd, host) = msvc::link_exe_cmd(sess); + ("link.exe".to_string(), cmd, host) } else { (sess.target.target.options.linker.clone(), - Command::new(&sess.target.target.options.linker)) + Command::new(&sess.target.target.options.linker), None) } } @@ -388,7 +157,7 @@ pub fn get_ar_prog(sess: &Session) -> String { }) } -fn command_path(sess: &Session) -> OsString { +fn command_path(sess: &Session, extra: Option) -> OsString { // The compiler's sysroot often has some bundled tools, so add it to the // PATH for the child. let mut new_path = sess.host_filesearch(PathKind::All) @@ -396,6 +165,7 @@ fn command_path(sess: &Session) -> OsString { if let Some(path) = env::var_os("PATH") { new_path.extend(env::split_paths(&path)); } + new_path.extend(extra); env::join_paths(new_path).unwrap() } @@ -416,11 +186,19 @@ pub fn link_binary(sess: &Session, trans: &CrateTranslation, outputs: &OutputFilenames, crate_name: &str) -> Vec { + let _task = sess.dep_graph.in_task(DepNode::LinkBinary); + let mut out_filenames = Vec::new(); for &crate_type in sess.crate_types.borrow().iter() { + // Ignore executable crates if we have -Z no-trans, as they will error. + if sess.opts.debugging_opts.no_trans && + crate_type == config::CrateTypeExecutable { + continue; + } + if invalid_output_for_target(sess, crate_type) { - sess.bug(&format!("invalid output type `{:?}` for target os `{}`", - crate_type, sess.opts.target_triple)); + bug!("invalid output type `{:?}` for target os `{}`", + crate_type, sess.opts.target_triple); } let out_file = link_binary_output(sess, trans, crate_type, outputs, crate_name); @@ -429,7 +207,7 @@ pub fn link_binary(sess: &Session, // Remove the temporary object file and metadata if we aren't saving temps if !sess.opts.cg.save_temps { - for obj in object_filenames(sess, outputs) { + for obj in object_filenames(trans, outputs) { remove(sess, &obj); } remove(sess, &outputs.with_extension("metadata.o")); @@ -461,6 +239,8 @@ pub fn invalid_output_for_target(sess: &Session, crate_type: config::CrateType) -> bool { match (sess.target.target.options.dynamic_linking, sess.target.target.options.executables, crate_type) { + (false, _, config::CrateTypeCdylib) | + (false, _, config::CrateTypeProcMacro) | (false, _, config::CrateTypeDylib) => true, (_, false, config::CrateTypeExecutable) => true, _ => false @@ -483,6 +263,11 @@ pub fn filename_for_input(sess: &Session, config::CrateTypeRlib => { outputs.out_directory.join(&format!("lib{}.rlib", libname)) } + config::CrateTypeMetadata => { + outputs.out_directory.join(&format!("lib{}.rmeta", libname)) + } + config::CrateTypeCdylib | + config::CrateTypeProcMacro | config::CrateTypeDylib => { let (prefix, suffix) = (&sess.target.target.options.dll_prefix, &sess.target.target.options.dll_suffix); @@ -490,7 +275,10 @@ pub fn filename_for_input(sess: &Session, suffix)) } config::CrateTypeStaticlib => { - outputs.out_directory.join(&format!("lib{}.a", libname)) + let (prefix, suffix) = (&sess.target.target.options.staticlib_prefix, + &sess.target.target.options.staticlib_suffix); + outputs.out_directory.join(&format!("{}{}{}", prefix, libname, + suffix)) } config::CrateTypeExecutable => { let suffix = &sess.target.target.options.exe_suffix; @@ -505,23 +293,29 @@ pub fn filename_for_input(sess: &Session, } pub fn each_linked_rlib(sess: &Session, - f: &mut FnMut(ast::CrateNum, &Path)) { + f: &mut FnMut(CrateNum, &Path)) { let crates = sess.cstore.used_crates(LinkagePreference::RequireStatic).into_iter(); let fmts = sess.dependency_formats.borrow(); - let fmts = fmts.get(&config::CrateTypeExecutable).or_else(|| { - fmts.get(&config::CrateTypeStaticlib) - }).unwrap_or_else(|| { - sess.bug("could not find formats for rlibs") + let fmts = fmts.get(&config::CrateTypeExecutable) + .or_else(|| fmts.get(&config::CrateTypeStaticlib)) + .or_else(|| fmts.get(&config::CrateTypeCdylib)) + .or_else(|| fmts.get(&config::CrateTypeProcMacro)); + let fmts = fmts.unwrap_or_else(|| { + bug!("could not find formats for rlibs"); }); for (cnum, path) in crates { - match fmts[cnum as usize - 1] { + match fmts[cnum.as_usize() - 1] { Linkage::NotLinked | Linkage::IncludedFromDylib => continue, _ => {} } let name = sess.cstore.crate_name(cnum).clone(); let path = match path { - Some(p) => p, - None => { + LibSource::Some(p) => p, + LibSource::MetadataOnly => { + sess.fatal(&format!("could not find rlib for: `{}`, found rmeta (metadata) file", + name)); + } + LibSource::None => { sess.fatal(&format!("could not find rlib for: `{}`", name)); } }; @@ -534,7 +328,7 @@ fn link_binary_output(sess: &Session, crate_type: config::CrateType, outputs: &OutputFilenames, crate_name: &str) -> PathBuf { - let objects = object_filenames(sess, outputs); + let objects = object_filenames(trans, outputs); let default_filename = filename_for_input(sess, crate_type, crate_name, outputs); let out_filename = outputs.outputs.get(&OutputType::Exe) @@ -565,23 +359,23 @@ fn link_binary_output(sess: &Session, config::CrateTypeStaticlib => { link_staticlib(sess, &objects, &out_filename, tmpdir.path()); } - config::CrateTypeExecutable => { - link_natively(sess, false, &objects, &out_filename, trans, outputs, - tmpdir.path()); + config::CrateTypeMetadata => { + emit_metadata(sess, trans, &out_filename); } - config::CrateTypeDylib => { - link_natively(sess, true, &objects, &out_filename, trans, outputs, - tmpdir.path()); + _ => { + link_natively(sess, crate_type, &objects, &out_filename, trans, + outputs, tmpdir.path()); } } out_filename } -fn object_filenames(sess: &Session, outputs: &OutputFilenames) -> Vec { - (0..sess.opts.cg.codegen_units).map(|i| { - let ext = format!("{}.o", i); - outputs.temp_path(OutputType::Object).with_extension(&ext) +fn object_filenames(trans: &CrateTranslation, + outputs: &OutputFilenames) + -> Vec { + trans.modules.iter().map(|module| { + outputs.temp_path(OutputType::Object, Some(&module.name[..])) }).collect() } @@ -602,7 +396,14 @@ fn archive_config<'a>(sess: &'a Session, src: input.map(|p| p.to_path_buf()), lib_search_paths: archive_search_paths(sess), ar_prog: get_ar_prog(sess), - command_path: command_path(sess), + command_path: command_path(sess, None), + } +} + +fn emit_metadata<'a>(sess: &'a Session, trans: &CrateTranslation, out_filename: &Path) { + let result = fs::File::create(out_filename).and_then(|mut f| f.write_all(&trans.metadata)); + if let Err(e) = result { + sess.fatal(&format!("failed to write {}: {}", out_filename.display(), e)); } } @@ -619,29 +420,40 @@ fn link_rlib<'a>(sess: &'a Session, tmpdir: &Path) -> ArchiveBuilder<'a> { info!("preparing rlib from {:?} to {:?}", objects, out_filename); let mut ab = ArchiveBuilder::new(archive_config(sess, out_filename, None)); + for obj in objects { ab.add_file(obj); } - for (l, kind) in sess.cstore.used_libraries() { - match kind { - NativeLibraryKind::NativeStatic => ab.add_native_library(&l), + // Note that in this loop we are ignoring the value of `lib.cfg`. That is, + // we may not be configured to actually include a static library if we're + // adding it here. That's because later when we consume this rlib we'll + // decide whether we actually needed the static library or not. + // + // To do this "correctly" we'd need to keep track of which libraries added + // which object files to the archive. We don't do that here, however. The + // #[link(cfg(..))] feature is unstable, though, and only intended to get + // liblibc working. In that sense the check below just indicates that if + // there are any libraries we want to omit object files for at link time we + // just exclude all custom object files. + // + // Eventually if we want to stabilize or flesh out the #[link(cfg(..))] + // feature then we'll need to figure out how to record what objects were + // loaded from the libraries found here and then encode that into the + // metadata of the rlib we're generating somehow. + for lib in sess.cstore.used_libraries() { + match lib.kind { + NativeLibraryKind::NativeStatic => {} NativeLibraryKind::NativeFramework | - NativeLibraryKind::NativeUnknown => {} + NativeLibraryKind::NativeUnknown => continue, } + ab.add_native_library(&lib.name.as_str()); } // After adding all files to the archive, we need to update the // symbol table of the archive. ab.update_symbols(); - // For OSX/iOS, we must be careful to update symbols only when adding - // object files. We're about to start adding non-object files, so run - // `ar` now to process the object files. - if sess.target.target.options.is_like_osx && !ab.using_llvm() { - ab.build(); - } - // Note that it is important that we add all of our non-object "magical // files" *after* all of the object files in the archive. The reason for // this is as follows: @@ -670,15 +482,7 @@ fn link_rlib<'a>(sess: &'a Session, // here so concurrent builds in the same directory don't try to use // the same filename for metadata (stomping over one another) let metadata = tmpdir.join(sess.cstore.metadata_filename()); - match fs::File::create(&metadata).and_then(|mut f| { - f.write_all(&trans.metadata) - }) { - Ok(..) => {} - Err(e) => { - sess.fatal(&format!("failed to write {}: {}", - metadata.display(), e)); - } - } + emit_metadata(sess, trans, &metadata); ab.add_file(&metadata); // For LTO purposes, the bytecode of this library is also inserted @@ -726,7 +530,7 @@ fn link_rlib<'a>(sess: &'a Session, ab.add_file(&bc_deflated_filename); // See the bottom of back::write::run_passes for an explanation - // of when we do and don't keep .0.bc files around. + // of when we do and don't keep .#module-name#.bc files around. let user_wants_numbered_bitcode = sess.opts.output_types.contains_key(&OutputType::Bitcode) && sess.opts.cg.codegen_units > 1; @@ -738,7 +542,7 @@ fn link_rlib<'a>(sess: &'a Session, // After adding all files to the archive, we need to update the // symbol table of the archive. This currently dies on OSX (see // #11162), and isn't necessary there anyway - if !sess.target.target.options.is_like_osx || ab.using_llvm() { + if !sess.target.target.options.is_like_osx { ab.update_symbols(); } } @@ -753,9 +557,9 @@ fn write_rlib_bytecode_object_v1(writer: &mut Write, bc_data_deflated: &[u8]) -> io::Result<()> { let bc_data_deflated_size: u64 = bc_data_deflated.len() as u64; - try!(writer.write_all(RLIB_BYTECODE_OBJECT_MAGIC)); - try!(writer.write_all(&[1, 0, 0, 0])); - try!(writer.write_all(&[ + writer.write_all(RLIB_BYTECODE_OBJECT_MAGIC)?; + writer.write_all(&[1, 0, 0, 0])?; + writer.write_all(&[ (bc_data_deflated_size >> 0) as u8, (bc_data_deflated_size >> 8) as u8, (bc_data_deflated_size >> 16) as u8, @@ -764,8 +568,8 @@ fn write_rlib_bytecode_object_v1(writer: &mut Write, (bc_data_deflated_size >> 40) as u8, (bc_data_deflated_size >> 48) as u8, (bc_data_deflated_size >> 56) as u8, - ])); - try!(writer.write_all(&bc_data_deflated)); + ])?; + writer.write_all(&bc_data_deflated)?; let number_of_bytes_written_so_far = RLIB_BYTECODE_OBJECT_MAGIC.len() + // magic id @@ -777,7 +581,7 @@ fn write_rlib_bytecode_object_v1(writer: &mut Write, // padding byte to make it even. This works around a crash bug in LLDB // (see issue #15950) if number_of_bytes_written_so_far % 2 == 1 { - try!(writer.write_all(&[0])); + writer.write_all(&[0])?; } return Ok(()); @@ -798,21 +602,32 @@ fn write_rlib_bytecode_object_v1(writer: &mut Write, fn link_staticlib(sess: &Session, objects: &[PathBuf], out_filename: &Path, tempdir: &Path) { let mut ab = link_rlib(sess, None, objects, out_filename, tempdir); - if sess.target.target.options.is_like_osx && !ab.using_llvm() { - ab.build(); - } - if !sess.target.target.options.no_compiler_rt { - ab.add_native_library("compiler-rt"); - } - let mut all_native_libs = vec![]; each_linked_rlib(sess, &mut |cnum, path| { let name = sess.cstore.crate_name(cnum); - ab.add_rlib(path, &name, sess.lto()).unwrap(); - let native_libs = sess.cstore.native_libraries(cnum); - all_native_libs.extend(native_libs); + + // Here when we include the rlib into our staticlib we need to make a + // decision whether to include the extra object files along the way. + // These extra object files come from statically included native + // libraries, but they may be cfg'd away with #[link(cfg(..))]. + // + // This unstable feature, though, only needs liblibc to work. The only + // use case there is where musl is statically included in liblibc.rlib, + // so if we don't want the included version we just need to skip it. As + // a result the logic here is that if *any* linked library is cfg'd away + // we just skip all object files. + // + // Clearly this is not sufficient for a general purpose feature, and + // we'd want to read from the library's metadata to determine which + // object files come from where and selectively skip them. + let skip_object_files = native_libs.iter().any(|lib| { + lib.kind == NativeLibraryKind::NativeStatic && !relevant_lib(sess, lib) + }); + ab.add_rlib(path, &name.as_str(), sess.lto(), skip_object_files).unwrap(); + + all_native_libs.extend(sess.cstore.native_libraries(cnum)); }); ab.update_symbols(); @@ -825,13 +640,14 @@ fn link_staticlib(sess: &Session, objects: &[PathBuf], out_filename: &Path, platforms, and so may need to be preserved"); } - for &(kind, ref lib) in &all_native_libs { - let name = match kind { - NativeLibraryKind::NativeStatic => "static library", + for lib in all_native_libs.iter().filter(|l| relevant_lib(sess, l)) { + let name = match lib.kind { NativeLibraryKind::NativeUnknown => "library", NativeLibraryKind::NativeFramework => "framework", + // These are included, no need to print them + NativeLibraryKind::NativeStatic => continue, }; - sess.note_without_error(&format!("{}: {}", name, *lib)); + sess.note_without_error(&format!("{}: {}", name, lib.name)); } } @@ -839,41 +655,35 @@ fn link_staticlib(sess: &Session, objects: &[PathBuf], out_filename: &Path, // // This will invoke the system linker/cc to create the resulting file. This // links to all upstream files as well. -fn link_natively(sess: &Session, dylib: bool, - objects: &[PathBuf], out_filename: &Path, +fn link_natively(sess: &Session, + crate_type: config::CrateType, + objects: &[PathBuf], + out_filename: &Path, trans: &CrateTranslation, outputs: &OutputFilenames, tmpdir: &Path) { - info!("preparing dylib? ({}) from {:?} to {:?}", dylib, objects, - out_filename); + info!("preparing {:?} from {:?} to {:?}", crate_type, objects, out_filename); // The invocations of cc share some flags across platforms - let (pname, mut cmd) = get_linker(sess); - cmd.env("PATH", command_path(sess)); + let (pname, mut cmd, extra) = get_linker(sess); + cmd.env("PATH", command_path(sess, extra)); let root = sess.target_filesearch(PathKind::Native).get_lib_path(); cmd.args(&sess.target.target.options.pre_link_args); - let pre_link_objects = if dylib { - &sess.target.target.options.pre_link_objects_dll - } else { + let pre_link_objects = if crate_type == config::CrateTypeExecutable { &sess.target.target.options.pre_link_objects_exe + } else { + &sess.target.target.options.pre_link_objects_dll }; for obj in pre_link_objects { cmd.arg(root.join(obj)); } { - let mut linker = if sess.target.target.options.is_like_msvc { - Box::new(MsvcLinker { cmd: &mut cmd, sess: &sess }) as Box - } else { - Box::new(GnuLinker { cmd: &mut cmd, sess: &sess }) as Box - }; - link_args(&mut *linker, sess, dylib, tmpdir, - objects, out_filename, trans, outputs); - if !sess.target.target.options.no_compiler_rt { - linker.link_staticlib("compiler-rt"); - } + let mut linker = trans.linker_info.to_linker(&mut cmd, &sess); + link_args(&mut *linker, sess, crate_type, tmpdir, + objects, out_filename, outputs, trans); } cmd.args(&sess.target.target.options.late_link_args); for obj in &sess.target.target.options.post_link_objects { @@ -910,7 +720,7 @@ fn link_natively(sess: &Session, dylib: bool, pname, prog.status)) .note(&format!("{:?}", &cmd)) - .note(&*escape_string(&output[..])) + .note(&escape_string(&output[..])) .emit(); sess.abort_if_errors(); } @@ -918,7 +728,16 @@ fn link_natively(sess: &Session, dylib: bool, info!("linker stdout:\n{}", escape_string(&prog.stdout[..])); }, Err(e) => { - sess.fatal(&format!("could not exec the linker `{}`: {}", pname, e)); + sess.struct_err(&format!("could not exec the linker `{}`: {}", pname, e)) + .note(&format!("{:?}", &cmd)) + .emit(); + if sess.target.target.options.is_like_msvc && e.kind() == io::ErrorKind::NotFound { + sess.note_without_error("the msvc targets depend on the msvc linker \ + but `link.exe` was not found"); + sess.note_without_error("please ensure that VS 2013 or VS 2015 was installed \ + with the Visual C++ option"); + } + sess.abort_if_errors(); } } @@ -935,12 +754,12 @@ fn link_natively(sess: &Session, dylib: bool, fn link_args(cmd: &mut Linker, sess: &Session, - dylib: bool, + crate_type: config::CrateType, tmpdir: &Path, objects: &[PathBuf], out_filename: &Path, - trans: &CrateTranslation, - outputs: &OutputFilenames) { + outputs: &OutputFilenames, + trans: &CrateTranslation) { // The default library location, we need this to find the runtime. // The location of crates will be determined as needed. @@ -955,30 +774,43 @@ fn link_args(cmd: &mut Linker, } cmd.output_filename(out_filename); + if crate_type == config::CrateTypeExecutable && + sess.target.target.options.is_like_windows { + if let Some(ref s) = trans.windows_subsystem { + cmd.subsystem(s); + } + } + // If we're building a dynamic library then some platforms need to make sure // that all symbols are exported correctly from the dynamic library. - if dylib { - cmd.export_symbols(sess, trans, tmpdir); + if crate_type != config::CrateTypeExecutable { + cmd.export_symbols(tmpdir, crate_type); } // When linking a dynamic library, we put the metadata into a section of the // executable. This metadata is in a separate object file from the main // object file, so we link that in here. - if dylib { + if crate_type == config::CrateTypeDylib || + crate_type == config::CrateTypeProcMacro { cmd.add_object(&outputs.with_extension("metadata.o")); } // Try to strip as much out of the generated object by removing unused // sections if possible. See more comments in linker.rs - cmd.gc_sections(dylib); + if !sess.opts.cg.link_dead_code { + let keep_metadata = crate_type == config::CrateTypeDylib; + cmd.gc_sections(keep_metadata); + } let used_link_args = sess.cstore.used_link_args(); - if !dylib && t.options.position_independent_executables { + if crate_type == config::CrateTypeExecutable && + t.options.position_independent_executables { let empty_vec = Vec::new(); let empty_str = String::new(); let args = sess.opts.cg.link_args.as_ref().unwrap_or(&empty_vec); - let mut args = args.iter().chain(used_link_args.iter()); + let more_args = &sess.opts.cg.link_arg; + let mut args = args.iter().chain(more_args.iter()).chain(used_link_args.iter()); let relocation_model = sess.opts.cg.relocation_model.as_ref() .unwrap_or(&empty_str); if (t.options.relocation_model == "pic" || *relocation_model == "pic") @@ -1029,12 +861,12 @@ fn link_args(cmd: &mut Linker, // in this DAG so far because they're only dylibs and dylibs can only depend // on other dylibs (e.g. other native deps). add_local_native_libraries(cmd, sess); - add_upstream_rust_crates(cmd, sess, dylib, tmpdir); + add_upstream_rust_crates(cmd, sess, crate_type, tmpdir); add_upstream_native_libraries(cmd, sess); // # Telling the linker what we're doing - if dylib { + if crate_type != config::CrateTypeExecutable { cmd.build_dylib(out_filename); } @@ -1068,6 +900,7 @@ fn link_args(cmd: &mut Linker, if let Some(ref args) = sess.opts.cg.link_args { cmd.args(args); } + cmd.args(&sess.opts.cg.link_arg); cmd.args(&used_link_args); } @@ -1090,14 +923,12 @@ fn add_local_native_libraries(cmd: &mut Linker, sess: &Session) { } }); - let libs = sess.cstore.used_libraries(); - - let staticlibs = libs.iter().filter_map(|&(ref l, kind)| { - if kind == NativeLibraryKind::NativeStatic {Some(l)} else {None} - }); - let others = libs.iter().filter(|&&(_, kind)| { - kind != NativeLibraryKind::NativeStatic + let pair = sess.cstore.used_libraries().into_iter().filter(|l| { + relevant_lib(sess, l) + }).partition(|lib| { + lib.kind == NativeLibraryKind::NativeStatic }); + let (staticlibs, others): (Vec<_>, Vec<_>) = pair; // Some platforms take hints about whether a library is static or dynamic. // For those that support this, we ensure we pass the option if the library @@ -1113,16 +944,16 @@ fn add_local_native_libraries(cmd: &mut Linker, sess: &Session) { // don't otherwise explicitly reference them. This can occur for // libraries which are just providing bindings, libraries with generic // functions, etc. - cmd.link_whole_staticlib(l, &search_path); + cmd.link_whole_staticlib(&l.name.as_str(), &search_path); } cmd.hint_dynamic(); - for &(ref l, kind) in others { - match kind { - NativeLibraryKind::NativeUnknown => cmd.link_dylib(l), - NativeLibraryKind::NativeFramework => cmd.link_framework(l), - NativeLibraryKind::NativeStatic => unreachable!(), + for lib in others { + match lib.kind { + NativeLibraryKind::NativeUnknown => cmd.link_dylib(&lib.name.as_str()), + NativeLibraryKind::NativeFramework => cmd.link_framework(&lib.name.as_str()), + NativeLibraryKind::NativeStatic => bug!(), } } } @@ -1132,8 +963,10 @@ fn add_local_native_libraries(cmd: &mut Linker, sess: &Session) { // Rust crates are not considered at all when creating an rlib output. All // dependencies will be linked when producing the final output (instead of // the intermediate rlib version) -fn add_upstream_rust_crates(cmd: &mut Linker, sess: &Session, - dylib: bool, tmpdir: &Path) { +fn add_upstream_rust_crates(cmd: &mut Linker, + sess: &Session, + crate_type: config::CrateType, + tmpdir: &Path) { // All of the heavy lifting has previously been accomplished by the // dependency_format module of the compiler. This is just crawling the // output of that module, adding crates as necessary. @@ -1143,26 +976,30 @@ fn add_upstream_rust_crates(cmd: &mut Linker, sess: &Session, // involves just passing the right -l flag. let formats = sess.dependency_formats.borrow(); - let data = if dylib { - formats.get(&config::CrateTypeDylib).unwrap() - } else { - formats.get(&config::CrateTypeExecutable).unwrap() - }; + let data = formats.get(&crate_type).unwrap(); // Invoke get_used_crates to ensure that we get a topological sorting of // crates. let deps = sess.cstore.used_crates(LinkagePreference::RequireDynamic); + let mut compiler_builtins = None; + for &(cnum, _) in &deps { // We may not pass all crates through to the linker. Some crates may // appear statically in an existing dylib, meaning we'll pick up all the // symbols from the dylib. let src = sess.cstore.used_crate_source(cnum); - match data[cnum as usize - 1] { + match data[cnum.as_usize() - 1] { + // compiler-builtins are always placed last to ensure that they're + // linked correctly. + _ if sess.cstore.is_compiler_builtins(cnum) => { + assert!(compiler_builtins.is_none()); + compiler_builtins = Some(cnum); + } Linkage::NotLinked | Linkage::IncludedFromDylib => {} Linkage::Static => { - add_static_crate(cmd, sess, tmpdir, dylib, &src.rlib.unwrap().0) + add_static_crate(cmd, sess, tmpdir, crate_type, cnum); } Linkage::Dynamic => { add_dynamic_crate(cmd, sess, &src.dylib.unwrap().0) @@ -1170,6 +1007,13 @@ fn add_upstream_rust_crates(cmd: &mut Linker, sess: &Session, } } + // We must always link the `compiler_builtins` crate statically. Even if it + // was already "included" in a dylib (e.g. `libstd` when `-C prefer-dynamic` + // is used) + if let Some(cnum) = compiler_builtins { + add_static_crate(cmd, sess, tmpdir, crate_type, cnum); + } + // Converts a library file-stem into a cc -l argument fn unlib<'a>(config: &config::Config, stem: &'a str) -> &'a str { if stem.starts_with("lib") && !config.target.options.is_like_windows { @@ -1186,12 +1030,16 @@ fn add_upstream_rust_crates(cmd: &mut Linker, sess: &Session, // * For LTO, we remove upstream object files. // * For dylibs we remove metadata and bytecode from upstream rlibs // - // When performing LTO, all of the bytecode from the upstream libraries has - // already been included in our object file output. As a result we need to - // remove the object files in the upstream libraries so the linker doesn't - // try to include them twice (or whine about duplicate symbols). We must - // continue to include the rest of the rlib, however, as it may contain - // static native libraries which must be linked in. + // When performing LTO, almost(*) all of the bytecode from the upstream + // libraries has already been included in our object file output. As a + // result we need to remove the object files in the upstream libraries so + // the linker doesn't try to include them twice (or whine about duplicate + // symbols). We must continue to include the rest of the rlib, however, as + // it may contain static native libraries which must be linked in. + // + // (*) Crates marked with `#![no_builtins]` don't participate in LTO and + // their bytecode wasn't included. The object files in those libraries must + // still be passed to the linker. // // When making a dynamic library, linkers by default don't include any // object files in an archive if they're not necessary to resolve the link. @@ -1207,9 +1055,23 @@ fn add_upstream_rust_crates(cmd: &mut Linker, sess: &Session, // (aka we're making an executable), we can just pass the rlib blindly to // the linker (fast) because it's fine if it's not actually included as // we're at the end of the dependency chain. - fn add_static_crate(cmd: &mut Linker, sess: &Session, tmpdir: &Path, - dylib: bool, cratepath: &Path) { - if !sess.lto() && !dylib { + fn add_static_crate(cmd: &mut Linker, + sess: &Session, + tmpdir: &Path, + crate_type: config::CrateType, + cnum: CrateNum) { + let src = sess.cstore.used_crate_source(cnum); + let cratepath = &src.rlib.unwrap().0; + + // See the comment above in `link_staticlib` and `link_rlib` for why if + // there's a static library that's not relevant we skip all object + // files. + let native_libs = sess.cstore.native_libraries(cnum); + let skip_native = native_libs.iter().any(|lib| { + lib.kind == NativeLibraryKind::NativeStatic && !relevant_lib(sess, lib) + }); + + if !sess.lto() && crate_type != config::CrateTypeDylib && !skip_native { cmd.link_rlib(&fix_windows_verbatim_for_gcc(cratepath)); return } @@ -1221,31 +1083,61 @@ fn add_upstream_rust_crates(cmd: &mut Linker, sess: &Session, time(sess.time_passes(), &format!("altering {}.rlib", name), || { let cfg = archive_config(sess, &dst, Some(cratepath)); let mut archive = ArchiveBuilder::new(cfg); - archive.remove_file(sess.cstore.metadata_filename()); archive.update_symbols(); let mut any_objects = false; for f in archive.src_files() { - if f.ends_with("bytecode.deflate") { + if f.ends_with("bytecode.deflate") || + f == sess.cstore.metadata_filename() { archive.remove_file(&f); continue } + let canonical = f.replace("-", "_"); let canonical_name = name.replace("-", "_"); - if sess.lto() && canonical.starts_with(&canonical_name) && - canonical.ends_with(".o") { - let num = &f[name.len()..f.len() - 2]; - if num.len() > 0 && num[1..].parse::().is_ok() { - archive.remove_file(&f); - continue - } + + let is_rust_object = + canonical.starts_with(&canonical_name) && { + let num = &f[name.len()..f.len() - 2]; + num.len() > 0 && num[1..].parse::().is_ok() + }; + + // If we've been requested to skip all native object files + // (those not generated by the rust compiler) then we can skip + // this file. See above for why we may want to do this. + let skip_because_cfg_say_so = skip_native && !is_rust_object; + + // If we're performing LTO and this is a rust-generated object + // file, then we don't need the object file as it's part of the + // LTO module. Note that `#![no_builtins]` is excluded from LTO, + // though, so we let that object file slide. + let skip_because_lto = sess.lto() && is_rust_object && + !sess.cstore.is_no_builtins(cnum); + + if skip_because_cfg_say_so || skip_because_lto { + archive.remove_file(&f); + } else { + any_objects = true; } - any_objects = true; } - if any_objects { - archive.build(); + if !any_objects { + return + } + archive.build(); + + // If we're creating a dylib, then we need to include the + // whole of each object in our archive into that artifact. This is + // because a `dylib` can be reused as an intermediate artifact. + // + // Note, though, that we don't want to include the whole of a + // compiler-builtins crate (e.g. compiler-rt) because it'll get + // repeatedly linked anyway. + if crate_type == config::CrateTypeDylib && + !sess.cstore.is_compiler_builtins(cnum) { cmd.link_whole_rlib(&fix_windows_verbatim_for_gcc(&dst)); + } else { + cmd.link_rlib(&fix_windows_verbatim_for_gcc(&dst)); } }); } @@ -1298,15 +1190,26 @@ fn add_upstream_native_libraries(cmd: &mut Linker, sess: &Session) { // the paths. let crates = sess.cstore.used_crates(LinkagePreference::RequireStatic); for (cnum, _) in crates { - let libs = sess.cstore.native_libraries(cnum); - for &(kind, ref lib) in &libs { - match kind { - NativeLibraryKind::NativeUnknown => cmd.link_dylib(lib), - NativeLibraryKind::NativeFramework => cmd.link_framework(lib), - NativeLibraryKind::NativeStatic => { - sess.bug("statics shouldn't be propagated"); - } + for lib in sess.cstore.native_libraries(cnum) { + if !relevant_lib(sess, &lib) { + continue + } + match lib.kind { + NativeLibraryKind::NativeUnknown => cmd.link_dylib(&lib.name.as_str()), + NativeLibraryKind::NativeFramework => cmd.link_framework(&lib.name.as_str()), + + // ignore statically included native libraries here as we've + // already included them when we included the rust library + // previously + NativeLibraryKind::NativeStatic => {} } } } } + +fn relevant_lib(sess: &Session, lib: &NativeLibrary) -> bool { + match lib.cfg { + Some(ref cfg) => attr::cfg_matches(cfg, &sess.parse_sess, None), + None => true, + } +} diff --git a/src/librustc_trans/back/linker.rs b/src/librustc_trans/back/linker.rs index f585c65228a82..860903d259fe5 100644 --- a/src/librustc_trans/back/linker.rs +++ b/src/librustc_trans/back/linker.rs @@ -8,21 +8,58 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::collections::HashMap; use std::ffi::OsString; use std::fs::{self, File}; -use std::io::{self, BufWriter}; use std::io::prelude::*; +use std::io::{self, BufWriter}; use std::path::{Path, PathBuf}; use std::process::Command; +use context::SharedCrateContext; +use monomorphize::Instance; + use back::archive; -use middle::cstore::CrateStore; use middle::dependency_format::Linkage; +use rustc::hir::def_id::CrateNum; use session::Session; -use session::config::CrateTypeDylib; +use session::config::CrateType; use session::config; -use syntax::ast; -use trans::CrateTranslation; + +/// For all the linkers we support, and information they might +/// need out of the shared crate context before we get rid of it. +pub struct LinkerInfo { + exports: HashMap>, +} + +impl<'a, 'tcx> LinkerInfo { + pub fn new(scx: &SharedCrateContext<'a, 'tcx>, + reachable: &[String]) -> LinkerInfo { + LinkerInfo { + exports: scx.sess().crate_types.borrow().iter().map(|&c| { + (c, exported_symbols(scx, reachable, c)) + }).collect(), + } + } + + pub fn to_linker(&'a self, + cmd: &'a mut Command, + sess: &'a Session) -> Box { + if sess.target.target.options.is_like_msvc { + Box::new(MsvcLinker { + cmd: cmd, + sess: sess, + info: self + }) as Box + } else { + Box::new(GnuLinker { + cmd: cmd, + sess: sess, + info: self + }) as Box + } + } +} /// Linker abstraction used by back::link to build up the command to invoke a /// linker. @@ -43,7 +80,7 @@ pub trait Linker { fn framework_path(&mut self, path: &Path); fn output_filename(&mut self, path: &Path); fn add_object(&mut self, path: &Path); - fn gc_sections(&mut self, is_dylib: bool); + fn gc_sections(&mut self, keep_metadata: bool); fn position_independent_executable(&mut self); fn optimize(&mut self); fn debuginfo(&mut self); @@ -54,13 +91,14 @@ pub trait Linker { fn hint_dynamic(&mut self); fn whole_archives(&mut self); fn no_whole_archives(&mut self); - fn export_symbols(&mut self, sess: &Session, trans: &CrateTranslation, - tmpdir: &Path); + fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType); + fn subsystem(&mut self, subsystem: &str); } pub struct GnuLinker<'a> { - pub cmd: &'a mut Command, - pub sess: &'a Session, + cmd: &'a mut Command, + sess: &'a Session, + info: &'a LinkerInfo } impl<'a> GnuLinker<'a> { @@ -114,7 +152,7 @@ impl<'a> Linker for GnuLinker<'a> { } } - fn gc_sections(&mut self, is_dylib: bool) { + fn gc_sections(&mut self, keep_metadata: bool) { // The dead_strip option to the linker specifies that functions and data // unreachable by the entry point will be removed. This is quite useful // with Rust's compilation model of compiling libraries at a time into @@ -131,13 +169,16 @@ impl<'a> Linker for GnuLinker<'a> { // insert it here. if self.sess.target.target.options.is_like_osx { self.cmd.arg("-Wl,-dead_strip"); + } else if self.sess.target.target.options.is_like_solaris { + self.cmd.arg("-Wl,-z"); + self.cmd.arg("-Wl,ignore"); // If we're building a dylib, we don't use --gc-sections because LLVM // has already done the best it can do, and we also don't want to // eliminate the metadata. If we're building an executable, however, // --gc-sections drops the size of hello world from 1.8MB to 597K, a 67% // reduction. - } else if !is_dylib { + } else if !keep_metadata { self.cmd.arg("-Wl,--gc-sections"); } } @@ -196,22 +237,91 @@ impl<'a> Linker for GnuLinker<'a> { self.cmd.arg("-Wl,-Bdynamic"); } - fn export_symbols(&mut self, _: &Session, _: &CrateTranslation, _: &Path) { - // noop, visibility in object files takes care of this + fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType) { + // If we're compiling a dylib, then we let symbol visibility in object + // files to take care of whether they're exported or not. + // + // If we're compiling a cdylib, however, we manually create a list of + // exported symbols to ensure we don't expose any more. The object files + // have far more public symbols than we actually want to export, so we + // hide them all here. + if crate_type == CrateType::CrateTypeDylib || + crate_type == CrateType::CrateTypeProcMacro { + return + } + + let mut arg = OsString::new(); + let path = tmpdir.join("list"); + + if self.sess.target.target.options.is_like_solaris { + let res = (|| -> io::Result<()> { + let mut f = BufWriter::new(File::create(&path)?); + writeln!(f, "{{\n global:")?; + for sym in self.info.exports[&crate_type].iter() { + writeln!(f, " {};", sym)?; + } + writeln!(f, "\n local:\n *;\n}};")?; + Ok(()) + })(); + if let Err(e) = res { + self.sess.fatal(&format!("failed to write version script: {}", e)); + } + + arg.push("-Wl,-M,"); + arg.push(&path); + } else { + let prefix = if self.sess.target.target.options.is_like_osx { + "_" + } else { + "" + }; + let res = (|| -> io::Result<()> { + let mut f = BufWriter::new(File::create(&path)?); + for sym in self.info.exports[&crate_type].iter() { + writeln!(f, "{}{}", prefix, sym)?; + } + Ok(()) + })(); + if let Err(e) = res { + self.sess.fatal(&format!("failed to write lib.def file: {}", e)); + } + if self.sess.target.target.options.is_like_osx { + arg.push("-Wl,-exported_symbols_list,"); + } else { + arg.push("-Wl,--retain-symbols-file="); + } + arg.push(&path); + } + + self.cmd.arg(arg); + } + + fn subsystem(&mut self, subsystem: &str) { + self.cmd.arg(&format!("-Wl,--subsystem,{}", subsystem)); } } pub struct MsvcLinker<'a> { - pub cmd: &'a mut Command, - pub sess: &'a Session, + cmd: &'a mut Command, + sess: &'a Session, + info: &'a LinkerInfo } impl<'a> Linker for MsvcLinker<'a> { fn link_rlib(&mut self, lib: &Path) { self.cmd.arg(lib); } fn add_object(&mut self, path: &Path) { self.cmd.arg(path); } fn args(&mut self, args: &[String]) { self.cmd.args(args); } - fn build_dylib(&mut self, _out_filename: &Path) { self.cmd.arg("/DLL"); } - fn gc_sections(&mut self, _is_dylib: bool) { self.cmd.arg("/OPT:REF,ICF"); } + + fn build_dylib(&mut self, out_filename: &Path) { + self.cmd.arg("/DLL"); + let mut arg: OsString = "/IMPLIB:".into(); + arg.push(out_filename.with_extension("dll.lib")); + self.cmd.arg(arg); + } + + fn gc_sections(&mut self, _keep_metadata: bool) { + self.cmd.arg("/OPT:REF,ICF"); + } fn link_dylib(&mut self, lib: &str) { self.cmd.arg(&format!("{}.lib", lib)); @@ -222,7 +332,7 @@ impl<'a> Linker for MsvcLinker<'a> { // `foo.lib` file if the dll doesn't actually export any symbols, so we // check to see if the file is there and just omit linking to it if it's // not present. - let name = format!("{}.lib", lib); + let name = format!("{}.dll.lib", lib); if fs::metadata(&path.join(&name)).is_ok() { self.cmd.arg(name); } @@ -261,10 +371,10 @@ impl<'a> Linker for MsvcLinker<'a> { } fn framework_path(&mut self, _path: &Path) { - panic!("frameworks are not supported on windows") + bug!("frameworks are not supported on windows") } fn link_framework(&mut self, _framework: &str) { - panic!("frameworks are not supported on windows") + bug!("frameworks are not supported on windows") } fn link_whole_staticlib(&mut self, lib: &str, _search_path: &[PathBuf]) { @@ -313,49 +423,93 @@ impl<'a> Linker for MsvcLinker<'a> { // crates. Upstream rlibs may be linked statically to this dynamic library, // in which case they may continue to transitively be used and hence need // their symbols exported. - fn export_symbols(&mut self, sess: &Session, trans: &CrateTranslation, - tmpdir: &Path) { + fn export_symbols(&mut self, + tmpdir: &Path, + crate_type: CrateType) { let path = tmpdir.join("lib.def"); let res = (|| -> io::Result<()> { - let mut f = BufWriter::new(try!(File::create(&path))); + let mut f = BufWriter::new(File::create(&path)?); // Start off with the standard module name header and then go // straight to exports. - try!(writeln!(f, "LIBRARY")); - try!(writeln!(f, "EXPORTS")); - - // Write out all our local symbols - for sym in trans.reachable.iter() { - try!(writeln!(f, " {}", sym)); - } - - // Take a look at how all upstream crates are linked into this - // dynamic library. For all statically linked libraries we take all - // their reachable symbols and emit them as well. - let cstore = &sess.cstore; - let formats = sess.dependency_formats.borrow(); - let symbols = formats[&CrateTypeDylib].iter(); - let symbols = symbols.enumerate().filter_map(|(i, f)| { - if *f == Linkage::Static { - Some((i + 1) as ast::CrateNum) - } else { - None - } - }).flat_map(|cnum| { - cstore.reachable_ids(cnum) - }).map(|did| { - cstore.item_symbol(did) - }); - for symbol in symbols { - try!(writeln!(f, " {}", symbol)); + writeln!(f, "LIBRARY")?; + writeln!(f, "EXPORTS")?; + for symbol in self.info.exports[&crate_type].iter() { + writeln!(f, " {}", symbol)?; } Ok(()) })(); if let Err(e) = res { - sess.fatal(&format!("failed to write lib.def file: {}", e)); + self.sess.fatal(&format!("failed to write lib.def file: {}", e)); } let mut arg = OsString::from("/DEF:"); arg.push(path); self.cmd.arg(&arg); } + + fn subsystem(&mut self, subsystem: &str) { + // Note that previous passes of the compiler validated this subsystem, + // so we just blindly pass it to the linker. + self.cmd.arg(&format!("/SUBSYSTEM:{}", subsystem)); + + // Windows has two subsystems we're interested in right now, the console + // and windows subsystems. These both implicitly have different entry + // points (starting symbols). The console entry point starts with + // `mainCRTStartup` and the windows entry point starts with + // `WinMainCRTStartup`. These entry points, defined in system libraries, + // will then later probe for either `main` or `WinMain`, respectively to + // start the application. + // + // In Rust we just always generate a `main` function so we want control + // to always start there, so we force the entry point on the windows + // subsystem to be `mainCRTStartup` to get everything booted up + // correctly. + // + // For more information see RFC #1665 + if subsystem == "windows" { + self.cmd.arg("/ENTRY:mainCRTStartup"); + } + } +} + +fn exported_symbols(scx: &SharedCrateContext, + reachable: &[String], + crate_type: CrateType) + -> Vec { + // See explanation in GnuLinker::export_symbols, for + // why we don't ever need dylib symbols on non-MSVC. + if crate_type == CrateType::CrateTypeDylib || + crate_type == CrateType::CrateTypeProcMacro { + if !scx.sess().target.target.options.is_like_msvc { + return vec![]; + } + } + + let mut symbols = reachable.to_vec(); + + // If we're producing anything other than a dylib then the `reachable` array + // above is the exhaustive set of symbols we should be exporting. + // + // For dylibs, however, we need to take a look at how all upstream crates + // are linked into this dynamic library. For all statically linked + // libraries we take all their reachable symbols and emit them as well. + if crate_type != CrateType::CrateTypeDylib { + return symbols + } + + let cstore = &scx.sess().cstore; + let formats = scx.sess().dependency_formats.borrow(); + let deps = formats[&crate_type].iter(); + symbols.extend(deps.enumerate().filter_map(|(i, f)| { + if *f == Linkage::Static { + Some(CrateNum::new(i + 1)) + } else { + None + } + }).flat_map(|cnum| { + cstore.reachable_ids(cnum) + }).map(|did| -> String { + Instance::mono(scx, did).symbol_name(scx) + })); + symbols } diff --git a/src/librustc_trans/back/lto.rs b/src/librustc_trans/back/lto.rs index 85419a072503a..522864c6ec3a4 100644 --- a/src/librustc_trans/back/lto.rs +++ b/src/librustc_trans/back/lto.rs @@ -22,15 +22,16 @@ use libc; use flate; use std::ffi::CString; +use std::path::Path; pub fn run(sess: &session::Session, llmod: ModuleRef, tm: TargetMachineRef, reachable: &[String], config: &ModuleConfig, - name_extra: &str, - output_names: &config::OutputFilenames) { + temp_no_opt_bc_filename: &Path) { if sess.opts.cg.prefer_dynamic { sess.struct_err("cannot prefer dynamic linking when performing LTO") - .note("only 'staticlib' and 'bin' outputs are supported with LTO") + .note("only 'staticlib', 'bin', and 'cdylib' outputs are \ + supported with LTO") .emit(); sess.abort_if_errors(); } @@ -38,7 +39,9 @@ pub fn run(sess: &session::Session, llmod: ModuleRef, // Make sure we actually can run LTO for crate_type in sess.crate_types.borrow().iter() { match *crate_type { - config::CrateTypeExecutable | config::CrateTypeStaticlib => {} + config::CrateTypeExecutable | + config::CrateTypeCdylib | + config::CrateTypeStaticlib => {} _ => { sess.fatal("lto can only be run for executables and \ static library outputs"); @@ -49,10 +52,15 @@ pub fn run(sess: &session::Session, llmod: ModuleRef, // For each of our upstream dependencies, find the corresponding rlib and // load the bitcode from the archive. Then merge it into the current LLVM // module that we've got. - link::each_linked_rlib(sess, &mut |_, path| { + link::each_linked_rlib(sess, &mut |cnum, path| { + // `#![no_builtins]` crates don't participate in LTO. + if sess.cstore.is_no_builtins(cnum) { + return; + } + let archive = ArchiveRO::open(&path).expect("wanted an rlib"); let bytecodes = archive.iter().filter_map(|child| { - child.name().map(|name| (name, child)) + child.ok().and_then(|c| c.name().map(|name| (name, c))) }).filter(|&(name, _)| name.ends_with("bytecode.deflate")); for (name, data) in bytecodes { let bc_encoded = data.data(); @@ -129,8 +137,7 @@ pub fn run(sess: &session::Session, llmod: ModuleRef, } if sess.opts.cg.save_temps { - let path = output_names.with_extension(&format!("{}.no-opt.lto.bc", name_extra)); - let cstr = path2cstr(&path); + let cstr = path2cstr(temp_no_opt_bc_filename); unsafe { llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr()); } @@ -145,7 +152,9 @@ pub fn run(sess: &session::Session, llmod: ModuleRef, unsafe { let pm = llvm::LLVMCreatePassManager(); llvm::LLVMRustAddAnalysisPasses(tm, pm, llmod); - llvm::LLVMRustAddPass(pm, "verify\0".as_ptr() as *const _); + let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr() as *const _); + assert!(!pass.is_null()); + llvm::LLVMRustAddPass(pm, pass); with_llvm_pmb(llmod, config, &mut |b| { llvm::LLVMPassManagerBuilderPopulateLTOPassManager(b, pm, @@ -153,7 +162,9 @@ pub fn run(sess: &session::Session, llmod: ModuleRef, /* RunInliner = */ True); }); - llvm::LLVMRustAddPass(pm, "verify\0".as_ptr() as *const _); + let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr() as *const _); + assert!(!pass.is_null()); + llvm::LLVMRustAddPass(pm, pass); time(sess.time_passes(), "LTO passes", || llvm::LLVMRunPassManager(pm, llmod)); diff --git a/src/librustc_trans/back/msvc/arch.rs b/src/librustc_trans/back/msvc/arch.rs new file mode 100644 index 0000000000000..c10312a8e1710 --- /dev/null +++ b/src/librustc_trans/back/msvc/arch.rs @@ -0,0 +1,56 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_camel_case_types, non_snake_case)] + +use libc::c_void; +use std::mem; + +type DWORD = u32; +type WORD = u16; +type LPVOID = *mut c_void; +type DWORD_PTR = usize; + +const PROCESSOR_ARCHITECTURE_INTEL: WORD = 0; +const PROCESSOR_ARCHITECTURE_AMD64: WORD = 9; + +#[repr(C)] +struct SYSTEM_INFO { + wProcessorArchitecture: WORD, + _wReserved: WORD, + _dwPageSize: DWORD, + _lpMinimumApplicationAddress: LPVOID, + _lpMaximumApplicationAddress: LPVOID, + _dwActiveProcessorMask: DWORD_PTR, + _dwNumberOfProcessors: DWORD, + _dwProcessorType: DWORD, + _dwAllocationGranularity: DWORD, + _wProcessorLevel: WORD, + _wProcessorRevision: WORD, +} + +extern "system" { + fn GetNativeSystemInfo(lpSystemInfo: *mut SYSTEM_INFO); +} + +pub enum Arch { + X86, + Amd64, +} + +pub fn host_arch() -> Option { + let mut info = unsafe { mem::zeroed() }; + unsafe { GetNativeSystemInfo(&mut info) }; + match info.wProcessorArchitecture { + PROCESSOR_ARCHITECTURE_INTEL => Some(Arch::X86), + PROCESSOR_ARCHITECTURE_AMD64 => Some(Arch::Amd64), + _ => None, + } +} diff --git a/src/librustc_trans/back/msvc/mod.rs b/src/librustc_trans/back/msvc/mod.rs index 6f0baa86579e0..16aef6ee8ca35 100644 --- a/src/librustc_trans/back/msvc/mod.rs +++ b/src/librustc_trans/back/msvc/mod.rs @@ -31,114 +31,155 @@ //! paths/files is based on Microsoft's logic in their vcvars bat files, but //! comments can also be found below leading through the various code paths. -use std::process::Command; -use session::Session; +// A simple macro to make this option mess easier to read +macro_rules! otry { + ($expr:expr) => (match $expr { + Some(val) => val, + None => return None, + }) +} #[cfg(windows)] mod registry; +#[cfg(windows)] +mod arch; #[cfg(windows)] -pub fn link_exe_cmd(sess: &Session) -> Command { +mod platform { use std::env; use std::ffi::OsString; use std::fs; use std::path::{Path, PathBuf}; - use self::registry::{LOCAL_MACHINE}; - - let arch = &sess.target.target.arch; - let (binsub, libsub, vclibsub) = - match (bin_subdir(arch), lib_subdir(arch), vc_lib_subdir(arch)) { - (Some(x), Some(y), Some(z)) => (x, y, z), - _ => return Command::new("link.exe"), - }; + use std::process::Command; + use session::Session; + use super::arch::{host_arch, Arch}; + use super::registry::LOCAL_MACHINE; // First we need to figure out whether the environment is already correctly // configured by vcvars. We do this by looking at the environment variable // `VCINSTALLDIR` which is always set by vcvars, and unlikely to be set - // otherwise. If it is defined, then we derive the path to `link.exe` from - // that and trust that everything else is configured correctly. - // - // If `VCINSTALLDIR` wasn't defined (or we couldn't find the linker where it - // claimed it should be), then we resort to finding everything ourselves. - // First we find where the latest version of MSVC is installed and what - // version it is. Then based on the version we find the appropriate SDKs. + // otherwise. If it is defined, then we find `link.exe` in `PATH and trust + // that everything else is configured correctly. // - // For MSVC 14 (VS 2015) we look for the Win10 SDK and failing that we look - // for the Win8.1 SDK. We also look for the Universal CRT. + // If `VCINSTALLDIR` wasn't defined (or we couldn't find the linker where + // it claimed it should be), then we resort to finding everything + // ourselves. First we find where the latest version of MSVC is installed + // and what version it is. Then based on the version we find the + // appropriate SDKs. // - // For MSVC 12 (VS 2013) we look for the Win8.1 SDK. + // If despite our best efforts we are still unable to find MSVC then we + // just blindly call `link.exe` and hope for the best. // - // For MSVC 11 (VS 2012) we look for the Win8 SDK. + // This code only supports VC 11 through 15. For versions older than that + // the user will need to manually execute the appropriate vcvars bat file + // and it should hopefully work. // - // For all other versions the user has to execute the appropriate vcvars bat - // file themselves to configure the environment. - // - // If despite our best efforts we are still unable to find MSVC then we just - // blindly call `link.exe` and hope for the best. - return env::var_os("VCINSTALLDIR").and_then(|dir| { - debug!("Environment already configured by user. Assuming it works."); - let mut p = PathBuf::from(dir); - p.push("bin"); - p.push(binsub); - p.push("link.exe"); - if !p.is_file() { return None } - Some(Command::new(p)) - }).or_else(|| { - get_vc_dir().and_then(|(ver, vcdir)| { - debug!("Found VC installation directory {:?}", vcdir); - let mut linker = vcdir.clone(); - linker.push("bin"); - linker.push(binsub); - linker.push("link.exe"); - if !linker.is_file() { return None } - let mut cmd = Command::new(linker); - add_lib(&mut cmd, &vcdir.join("lib").join(vclibsub)); - if ver == "14.0" { - if let Some(dir) = get_ucrt_dir() { - debug!("Found Universal CRT {:?}", dir); - add_lib(&mut cmd, &dir.join("ucrt").join(libsub)); - } - if let Some(dir) = get_sdk10_dir() { - debug!("Found Win10 SDK {:?}", dir); - add_lib(&mut cmd, &dir.join("um").join(libsub)); - } else if let Some(dir) = get_sdk81_dir() { - debug!("Found Win8.1 SDK {:?}", dir); - add_lib(&mut cmd, &dir.join("um").join(libsub)); - } - } else if ver == "12.0" { - if let Some(dir) = get_sdk81_dir() { - debug!("Found Win8.1 SDK {:?}", dir); - add_lib(&mut cmd, &dir.join("um").join(libsub)); - } - } else { // ver == "11.0" - if let Some(dir) = get_sdk8_dir() { - debug!("Found Win8 SDK {:?}", dir); - add_lib(&mut cmd, &dir.join("um").join(libsub)); - } - } - Some(cmd) + // The second member of the tuple we return is the directory for the host + // linker toolchain, which is necessary when using the cross linkers. + pub fn link_exe_cmd(sess: &Session) -> (Command, Option) { + let arch = &sess.target.target.arch; + env::var_os("VCINSTALLDIR").and_then(|_| { + debug!("Detected that vcvars was already run."); + let path = otry!(env::var_os("PATH")); + // Mingw has its own link which is not the link we want so we + // look for `cl.exe` too as a precaution. + env::split_paths(&path).find(|path| { + path.join("cl.exe").is_file() + && path.join("link.exe").is_file() + }).map(|path| { + (Command::new(path.join("link.exe")), None) + }) + }).or_else(|| { + None.or_else(|| { + find_msvc_latest(arch, "15.0") + }).or_else(|| { + find_msvc_latest(arch, "14.0") + }).or_else(|| { + find_msvc_12(arch) + }).or_else(|| { + find_msvc_11(arch) + }).map(|(cmd, path)| (cmd, Some(path))) + }).unwrap_or_else(|| { + debug!("Failed to locate linker."); + (Command::new("link.exe"), None) }) - }).unwrap_or_else(|| { - debug!("Failed to locate linker."); - Command::new("link.exe") - }); + } + + // For MSVC 14 or newer we need to find the Universal CRT as well as either + // the Windows 10 SDK or Windows 8.1 SDK. + fn find_msvc_latest(arch: &str, ver: &str) -> Option<(Command, PathBuf)> { + let vcdir = otry!(get_vc_dir(ver)); + let (mut cmd, host) = otry!(get_linker(&vcdir, arch)); + let sub = otry!(lib_subdir(arch)); + let ucrt = otry!(get_ucrt_dir()); + debug!("Found Universal CRT {:?}", ucrt); + add_lib(&mut cmd, &ucrt.join("ucrt").join(sub)); + if let Some(dir) = get_sdk10_dir() { + debug!("Found Win10 SDK {:?}", dir); + add_lib(&mut cmd, &dir.join("um").join(sub)); + } else if let Some(dir) = get_sdk81_dir() { + debug!("Found Win8.1 SDK {:?}", dir); + add_lib(&mut cmd, &dir.join("um").join(sub)); + } else { + return None + } + Some((cmd, host)) + } + + // For MSVC 12 we need to find the Windows 8.1 SDK. + fn find_msvc_12(arch: &str) -> Option<(Command, PathBuf)> { + let vcdir = otry!(get_vc_dir("12.0")); + let (mut cmd, host) = otry!(get_linker(&vcdir, arch)); + let sub = otry!(lib_subdir(arch)); + let sdk81 = otry!(get_sdk81_dir()); + debug!("Found Win8.1 SDK {:?}", sdk81); + add_lib(&mut cmd, &sdk81.join("um").join(sub)); + Some((cmd, host)) + } + + // For MSVC 11 we need to find the Windows 8 SDK. + fn find_msvc_11(arch: &str) -> Option<(Command, PathBuf)> { + let vcdir = otry!(get_vc_dir("11.0")); + let (mut cmd, host) = otry!(get_linker(&vcdir, arch)); + let sub = otry!(lib_subdir(arch)); + let sdk8 = otry!(get_sdk8_dir()); + debug!("Found Win8 SDK {:?}", sdk8); + add_lib(&mut cmd, &sdk8.join("um").join(sub)); + Some((cmd, host)) + } - // A convenience function to make the above code simpler + // A convenience function to append library paths. fn add_lib(cmd: &mut Command, lib: &Path) { let mut arg: OsString = "/LIBPATH:".into(); arg.push(lib); cmd.arg(arg); } - // To find MSVC we look in a specific registry key for the newest of the - // three versions that we support. - fn get_vc_dir() -> Option<(&'static str, PathBuf)> { - LOCAL_MACHINE.open(r"SOFTWARE\Microsoft\VisualStudio\SxS\VC7".as_ref()) - .ok().and_then(|key| { - ["14.0", "12.0", "11.0"].iter().filter_map(|ver| { - key.query_str(ver).ok().map(|p| (*ver, p.into())) - }).next() - }) + // Given a possible MSVC installation directory, we look for the linker and + // then add the MSVC library path. + fn get_linker(path: &Path, arch: &str) -> Option<(Command, PathBuf)> { + debug!("Looking for linker in {:?}", path); + bin_subdir(arch).into_iter().map(|(sub, host)| { + (path.join("bin").join(sub).join("link.exe"), + path.join("bin").join(host)) + }).filter(|&(ref path, _)| { + path.is_file() + }).map(|(path, host)| { + (Command::new(path), host) + }).filter_map(|(mut cmd, host)| { + let sub = otry!(vc_lib_subdir(arch)); + add_lib(&mut cmd, &path.join("lib").join(sub)); + Some((cmd, host)) + }).next() + } + + // To find MSVC we look in a specific registry key for the version we are + // trying to find. + fn get_vc_dir(ver: &str) -> Option { + let key = otry!(LOCAL_MACHINE + .open(r"SOFTWARE\Microsoft\VisualStudio\SxS\VC7".as_ref()).ok()); + let path = otry!(key.query_str(ver).ok()); + Some(path.into()) } // To find the Universal CRT we look in a specific registry key for where @@ -146,46 +187,42 @@ pub fn link_exe_cmd(sess: &Session) -> Command { // find the newest version. While this sort of sorting isn't ideal, it is // what vcvars does so that's good enough for us. fn get_ucrt_dir() -> Option { - LOCAL_MACHINE.open(r"SOFTWARE\Microsoft\Windows Kits\Installed Roots".as_ref()) - .ok().and_then(|key| { - key.query_str("KitsRoot10").ok() - }).and_then(|root| { - fs::read_dir(Path::new(&root).join("Lib")).ok() - }).and_then(|readdir| { - let mut dirs: Vec<_> = readdir.filter_map(|dir| { - dir.ok() - }).map(|dir| { - dir.path() - }).filter(|dir| { - dir.components().last().and_then(|c| { - c.as_os_str().to_str() - }).map(|c| c.starts_with("10.")).unwrap_or(false) - }).collect(); - dirs.sort(); - dirs.pop() - }) + let key = otry!(LOCAL_MACHINE + .open(r"SOFTWARE\Microsoft\Windows Kits\Installed Roots".as_ref()).ok()); + let root = otry!(key.query_str("KitsRoot10").ok()); + let readdir = otry!(fs::read_dir(Path::new(&root).join("lib")).ok()); + readdir.filter_map(|dir| { + dir.ok() + }).map(|dir| { + dir.path() + }).filter(|dir| { + dir.components().last().and_then(|c| { + c.as_os_str().to_str() + }).map(|c| { + c.starts_with("10.") && dir.join("ucrt").is_dir() + }).unwrap_or(false) + }).max() } // Vcvars finds the correct version of the Windows 10 SDK by looking - // for the include um/Windows.h because sometimes a given version will + // for the include `um\Windows.h` because sometimes a given version will // only have UCRT bits without the rest of the SDK. Since we only care about - // libraries and not includes, we just look for the folder `um` in the lib - // section. Like we do for the Universal CRT, we sort the possibilities + // libraries and not includes, we instead look for `um\x64\kernel32.lib`. + // Since the 32-bit and 64-bit libraries are always installed together we + // only need to bother checking x64, making this code a tiny bit simpler. + // Like we do for the Universal CRT, we sort the possibilities // asciibetically to find the newest one as that is what vcvars does. fn get_sdk10_dir() -> Option { - LOCAL_MACHINE.open(r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\v10.0".as_ref()) - .ok().and_then(|key| { - key.query_str("InstallationFolder").ok() - }).and_then(|root| { - fs::read_dir(Path::new(&root).join("lib")).ok() - }).and_then(|readdir| { - let mut dirs: Vec<_> = readdir.filter_map(|dir| dir.ok()) - .map(|dir| dir.path()).collect(); - dirs.sort(); - dirs.into_iter().rev().filter(|dir| { - dir.join("um").is_dir() - }).next() - }) + let key = otry!(LOCAL_MACHINE + .open(r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\v10.0".as_ref()).ok()); + let root = otry!(key.query_str("InstallationFolder").ok()); + let readdir = otry!(fs::read_dir(Path::new(&root).join("lib")).ok()); + let mut dirs: Vec<_> = readdir.filter_map(|dir| dir.ok()) + .map(|dir| dir.path()).collect(); + dirs.sort(); + dirs.into_iter().rev().filter(|dir| { + dir.join("um").join("x64").join("kernel32.lib").is_file() + }).next() } // Interestingly there are several subdirectories, `win7` `win8` and @@ -193,21 +230,17 @@ pub fn link_exe_cmd(sess: &Session) -> Command { // applies to us. Note that if we were targetting kernel mode drivers // instead of user mode applications, we would care. fn get_sdk81_dir() -> Option { - LOCAL_MACHINE.open(r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\v8.1".as_ref()) - .ok().and_then(|key| { - key.query_str("InstallationFolder").ok() - }).map(|root| { - Path::new(&root).join("lib").join("winv6.3") - }) + let key = otry!(LOCAL_MACHINE + .open(r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\v8.1".as_ref()).ok()); + let root = otry!(key.query_str("InstallationFolder").ok()); + Some(Path::new(&root).join("lib").join("winv6.3")) } fn get_sdk8_dir() -> Option { - LOCAL_MACHINE.open(r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\v8.0".as_ref()) - .ok().and_then(|key| { - key.query_str("InstallationFolder").ok() - }).map(|root| { - Path::new(&root).join("lib").join("win8") - }) + let key = otry!(LOCAL_MACHINE + .open(r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\v8.0".as_ref()).ok()); + let root = otry!(key.query_str("InstallationFolder").ok()); + Some(Path::new(&root).join("lib").join("win8")) } // When choosing the linker toolchain to use, we have to choose the one @@ -215,31 +248,27 @@ pub fn link_exe_cmd(sess: &Session) -> Command { // where someone on 32-bit Windows is trying to cross compile to 64-bit and // it tries to invoke the native 64-bit linker which won't work. // - // FIXME - This currently functions based on the host architecture of rustc - // itself but it should instead detect the bitness of the OS itself. + // For the return value of this function, the first member of the tuple is + // the folder of the linker we will be invoking, while the second member + // is the folder of the host toolchain for that linker which is essential + // when using a cross linker. We return a Vec since on x64 there are often + // two linkers that can target the architecture we desire. The 64-bit host + // linker is preferred, and hence first, due to 64-bit allowing it more + // address space to work with and potentially being faster. // // FIXME - Figure out what happens when the host architecture is arm. - // - // FIXME - Some versions of MSVC may not come with all these toolchains. - // Consider returning an array of toolchains and trying them one at a time - // until the linker is found. - fn bin_subdir(arch: &str) -> Option<&'static str> { - if cfg!(target_arch = "x86_64") { - match arch { - "x86" => Some("amd64_x86"), - "x86_64" => Some("amd64"), - "arm" => Some("amd64_arm"), - _ => None, - } - } else if cfg!(target_arch = "x86") { - match arch { - "x86" => Some(""), - "x86_64" => Some("x86_amd64"), - "arm" => Some("x86_arm"), - _ => None, - } - } else { None } + fn bin_subdir(arch: &str) -> Vec<(&'static str, &'static str)> { + match (arch, host_arch()) { + ("x86", Some(Arch::X86)) => vec![("", "")], + ("x86", Some(Arch::Amd64)) => vec![("amd64_x86", "amd64"), ("", "")], + ("x86_64", Some(Arch::X86)) => vec![("x86_amd64", "")], + ("x86_64", Some(Arch::Amd64)) => vec![("amd64", "amd64"), ("x86_amd64", "")], + ("arm", Some(Arch::X86)) => vec![("x86_arm", "")], + ("arm", Some(Arch::Amd64)) => vec![("amd64_arm", "amd64"), ("x86_arm", "")], + _ => vec![], + } } + fn lib_subdir(arch: &str) -> Option<&'static str> { match arch { "x86" => Some("x86"), @@ -248,6 +277,7 @@ pub fn link_exe_cmd(sess: &Session) -> Command { _ => None, } } + // MSVC's x86 libraries are not in a subfolder fn vc_lib_subdir(arch: &str) -> Option<&'static str> { match arch { @@ -262,6 +292,13 @@ pub fn link_exe_cmd(sess: &Session) -> Command { // If we're not on Windows, then there's no registry to search through and MSVC // wouldn't be able to run, so we just call `link.exe` and hope for the best. #[cfg(not(windows))] -pub fn link_exe_cmd(_sess: &Session) -> Command { - Command::new("link.exe") +mod platform { + use std::path::PathBuf; + use std::process::Command; + use session::Session; + pub fn link_exe_cmd(_sess: &Session) -> (Command, Option) { + (Command::new("link.exe"), None) + } } + +pub use self::platform::*; diff --git a/src/librustc_back/rpath.rs b/src/librustc_trans/back/rpath.rs similarity index 96% rename from src/librustc_back/rpath.rs rename to src/librustc_trans/back/rpath.rs index 6cba27fcf3406..ccaa0d4e1b1b0 100644 --- a/src/librustc_back/rpath.rs +++ b/src/librustc_trans/back/rpath.rs @@ -12,10 +12,12 @@ use std::collections::HashSet; use std::env; use std::path::{Path, PathBuf}; use std::fs; -use syntax::ast; + +use rustc::hir::def_id::CrateNum; +use rustc::middle::cstore::LibSource; pub struct RPathConfig<'a> { - pub used_crates: Vec<(ast::CrateNum, Option)>, + pub used_crates: Vec<(CrateNum, LibSource)>, pub out_filename: PathBuf, pub is_like_osx: bool, pub has_rpath: bool, @@ -34,7 +36,7 @@ pub fn get_rpath_flags(config: &mut RPathConfig) -> Vec { debug!("preparing the RPATH!"); let libs = config.used_crates.clone(); - let libs = libs.into_iter().filter_map(|(_, l)| l).collect::>(); + let libs = libs.into_iter().filter_map(|(_, l)| l.option()).collect::>(); let rpaths = get_rpaths(config, &libs[..]); flags.extend_from_slice(&rpaths_to_flags(&rpaths[..])); @@ -67,7 +69,7 @@ fn get_rpaths(config: &mut RPathConfig, libs: &[PathBuf]) -> Vec { let rel_rpaths = get_rpaths_relative_to_output(config, libs); // And a final backup rpath to the global library location. - let fallback_rpaths = vec!(get_install_prefix_rpath(config)); + let fallback_rpaths = vec![get_install_prefix_rpath(config)]; fn log_rpaths(desc: &str, rpaths: &[String]) { debug!("{} rpaths:", desc); diff --git a/src/librustc_trans/back/symbol_names.rs b/src/librustc_trans/back/symbol_names.rs new file mode 100644 index 0000000000000..ff40cfda5ff7c --- /dev/null +++ b/src/librustc_trans/back/symbol_names.rs @@ -0,0 +1,376 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The Rust Linkage Model and Symbol Names +//! ======================================= +//! +//! The semantic model of Rust linkage is, broadly, that "there's no global +//! namespace" between crates. Our aim is to preserve the illusion of this +//! model despite the fact that it's not *quite* possible to implement on +//! modern linkers. We initially didn't use system linkers at all, but have +//! been convinced of their utility. +//! +//! There are a few issues to handle: +//! +//! - Linkers operate on a flat namespace, so we have to flatten names. +//! We do this using the C++ namespace-mangling technique. Foo::bar +//! symbols and such. +//! +//! - Symbols for distinct items with the same *name* need to get different +//! linkage-names. Examples of this are monomorphizations of functions or +//! items within anonymous scopes that end up having the same path. +//! +//! - Symbols in different crates but with same names "within" the crate need +//! to get different linkage-names. +//! +//! - Symbol names should be deterministic: Two consecutive runs of the +//! compiler over the same code base should produce the same symbol names for +//! the same items. +//! +//! - Symbol names should not depend on any global properties of the code base, +//! so that small modifications to the code base do not result in all symbols +//! changing. In previous versions of the compiler, symbol names incorporated +//! the SVH (Stable Version Hash) of the crate. This scheme turned out to be +//! infeasible when used in conjunction with incremental compilation because +//! small code changes would invalidate all symbols generated previously. +//! +//! - Even symbols from different versions of the same crate should be able to +//! live next to each other without conflict. +//! +//! In order to fulfill the above requirements the following scheme is used by +//! the compiler: +//! +//! The main tool for avoiding naming conflicts is the incorporation of a 64-bit +//! hash value into every exported symbol name. Anything that makes a difference +//! to the symbol being named, but does not show up in the regular path needs to +//! be fed into this hash: +//! +//! - Different monomorphizations of the same item have the same path but differ +//! in their concrete type parameters, so these parameters are part of the +//! data being digested for the symbol hash. +//! +//! - Rust allows items to be defined in anonymous scopes, such as in +//! `fn foo() { { fn bar() {} } { fn bar() {} } }`. Both `bar` functions have +//! the path `foo::bar`, since the anonymous scopes do not contribute to the +//! path of an item. The compiler already handles this case via so-called +//! disambiguating `DefPaths` which use indices to distinguish items with the +//! same name. The DefPaths of the functions above are thus `foo[0]::bar[0]` +//! and `foo[0]::bar[1]`. In order to incorporate this disambiguation +//! information into the symbol name too, these indices are fed into the +//! symbol hash, so that the above two symbols would end up with different +//! hash values. +//! +//! The two measures described above suffice to avoid intra-crate conflicts. In +//! order to also avoid inter-crate conflicts two more measures are taken: +//! +//! - The name of the crate containing the symbol is prepended to the symbol +//! name, i.e. symbols are "crate qualified". For example, a function `foo` in +//! module `bar` in crate `baz` would get a symbol name like +//! `baz::bar::foo::{hash}` instead of just `bar::foo::{hash}`. This avoids +//! simple conflicts between functions from different crates. +//! +//! - In order to be able to also use symbols from two versions of the same +//! crate (which naturally also have the same name), a stronger measure is +//! required: The compiler accepts an arbitrary "disambiguator" value via the +//! `-C metadata` commandline argument. This disambiguator is then fed into +//! the symbol hash of every exported item. Consequently, the symbols in two +//! identical crates but with different disambiguators are not in conflict +//! with each other. This facility is mainly intended to be used by build +//! tools like Cargo. +//! +//! A note on symbol name stability +//! ------------------------------- +//! Previous versions of the compiler resorted to feeding NodeIds into the +//! symbol hash in order to disambiguate between items with the same path. The +//! current version of the name generation algorithm takes great care not to do +//! that, since NodeIds are notoriously unstable: A small change to the +//! code base will offset all NodeIds after the change and thus, much as using +//! the SVH in the hash, invalidate an unbounded number of symbol names. This +//! makes re-using previously compiled code for incremental compilation +//! virtually impossible. Thus, symbol hash generation exclusively relies on +//! DefPaths which are much more robust in the face of changes to the code base. + +use common::SharedCrateContext; +use monomorphize::Instance; +use rustc_data_structures::fmt_wrap::FmtWrap; +use rustc_data_structures::blake2b::Blake2bHasher; + +use rustc::middle::weak_lang_items; +use rustc::hir::def_id::LOCAL_CRATE; +use rustc::hir::map as hir_map; +use rustc::ty::{self, Ty, TypeFoldable}; +use rustc::ty::fold::TypeVisitor; +use rustc::ty::item_path::{self, ItemPathBuffer, RootMode}; +use rustc::ty::subst::Substs; +use rustc::hir::map::definitions::{DefPath, DefPathData}; +use rustc::util::common::record_time; + +use syntax::attr; +use syntax::symbol::{Symbol, InternedString}; + +fn get_symbol_hash<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, + + // path to the item this name is for + def_path: &DefPath, + + // type of the item, without any generic + // parameters substituted; this is + // included in the hash as a kind of + // safeguard. + item_type: Ty<'tcx>, + + // values for generic type parameters, + // if any. + substs: Option<&'tcx Substs<'tcx>>) + -> String { + debug!("get_symbol_hash(def_path={:?}, parameters={:?})", + def_path, substs); + + let tcx = scx.tcx(); + + let mut hasher = ty::util::TypeIdHasher::new(tcx, Blake2bHasher::new(8, &[])); + + record_time(&tcx.sess.perf_stats.symbol_hash_time, || { + // the main symbol name is not necessarily unique; hash in the + // compiler's internal def-path, guaranteeing each symbol has a + // truly unique path + hasher.def_path(def_path); + + // Include the main item-type. Note that, in this case, the + // assertions about `needs_subst` may not hold, but this item-type + // ought to be the same for every reference anyway. + assert!(!item_type.has_erasable_regions()); + hasher.visit_ty(item_type); + + // also include any type parameters (for generic items) + if let Some(substs) = substs { + assert!(!substs.has_erasable_regions()); + assert!(!substs.needs_subst()); + substs.visit_with(&mut hasher); + } + }); + + // 64 bits should be enough to avoid collisions. + let mut hasher = hasher.into_inner(); + let hash_bytes = hasher.finalize(); + format!("h{:x}", FmtWrap(hash_bytes)) +} + +impl<'a, 'tcx> Instance<'tcx> { + pub fn symbol_name(self, scx: &SharedCrateContext<'a, 'tcx>) -> String { + let Instance { def: def_id, substs } = self; + + debug!("symbol_name(def_id={:?}, substs={:?})", + def_id, substs); + + let node_id = scx.tcx().map.as_local_node_id(def_id); + + if let Some(id) = node_id { + if scx.sess().plugin_registrar_fn.get() == Some(id) { + let svh = &scx.link_meta().crate_hash; + let idx = def_id.index; + return scx.sess().generate_plugin_registrar_symbol(svh, idx); + } + if scx.sess().derive_registrar_fn.get() == Some(id) { + let svh = &scx.link_meta().crate_hash; + let idx = def_id.index; + return scx.sess().generate_derive_registrar_symbol(svh, idx); + } + } + + // FIXME(eddyb) Precompute a custom symbol name based on attributes. + let attrs = scx.tcx().get_attrs(def_id); + let is_foreign = if let Some(id) = node_id { + match scx.tcx().map.get(id) { + hir_map::NodeForeignItem(_) => true, + _ => false + } + } else { + scx.sess().cstore.is_foreign_item(def_id) + }; + + if let Some(name) = weak_lang_items::link_name(&attrs) { + return name.to_string(); + } + + if is_foreign { + if let Some(name) = attr::first_attr_value_str_by_name(&attrs, "link_name") { + return name.to_string(); + } + // Don't mangle foreign items. + return scx.tcx().item_name(def_id).as_str().to_string(); + } + + if let Some(name) = attr::find_export_name_attr(scx.sess().diagnostic(), &attrs) { + // Use provided name + return name.to_string(); + } + + if attr::contains_name(&attrs, "no_mangle") { + // Don't mangle + return scx.tcx().item_name(def_id).as_str().to_string(); + } + + let def_path = scx.tcx().def_path(def_id); + + // We want to compute the "type" of this item. Unfortunately, some + // kinds of items (e.g., closures) don't have an entry in the + // item-type array. So walk back up the find the closest parent + // that DOES have an entry. + let mut ty_def_id = def_id; + let instance_ty; + loop { + let key = scx.tcx().def_key(ty_def_id); + match key.disambiguated_data.data { + DefPathData::TypeNs(_) | + DefPathData::ValueNs(_) => { + instance_ty = scx.tcx().item_type(ty_def_id); + break; + } + _ => { + // if we're making a symbol for something, there ought + // to be a value or type-def or something in there + // *somewhere* + ty_def_id.index = key.parent.unwrap_or_else(|| { + bug!("finding type for {:?}, encountered def-id {:?} with no \ + parent", def_id, ty_def_id); + }); + } + } + } + + // Erase regions because they may not be deterministic when hashed + // and should not matter anyhow. + let instance_ty = scx.tcx().erase_regions(&instance_ty); + + let hash = get_symbol_hash(scx, &def_path, instance_ty, Some(substs)); + + let mut buffer = SymbolPathBuffer { + names: Vec::with_capacity(def_path.data.len()) + }; + + item_path::with_forced_absolute_paths(|| { + scx.tcx().push_item_path(&mut buffer, def_id); + }); + + mangle(buffer.names.into_iter(), &hash) + } +} + +struct SymbolPathBuffer { + names: Vec, +} + +impl ItemPathBuffer for SymbolPathBuffer { + fn root_mode(&self) -> &RootMode { + const ABSOLUTE: &'static RootMode = &RootMode::Absolute; + ABSOLUTE + } + + fn push(&mut self, text: &str) { + self.names.push(Symbol::intern(text).as_str()); + } +} + +pub fn exported_name_from_type_and_prefix<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, + t: Ty<'tcx>, + prefix: &str) + -> String { + let empty_def_path = DefPath { + data: vec![], + krate: LOCAL_CRATE, + }; + let hash = get_symbol_hash(scx, &empty_def_path, t, None); + let path = [Symbol::intern(prefix).as_str()]; + mangle(path.iter().cloned(), &hash) +} + +// Name sanitation. LLVM will happily accept identifiers with weird names, but +// gas doesn't! +// gas accepts the following characters in symbols: a-z, A-Z, 0-9, ., _, $ +pub fn sanitize(s: &str) -> String { + let mut result = String::new(); + for c in s.chars() { + match c { + // Escape these with $ sequences + '@' => result.push_str("$SP$"), + '*' => result.push_str("$BP$"), + '&' => result.push_str("$RF$"), + '<' => result.push_str("$LT$"), + '>' => result.push_str("$GT$"), + '(' => result.push_str("$LP$"), + ')' => result.push_str("$RP$"), + ',' => result.push_str("$C$"), + + // '.' doesn't occur in types and functions, so reuse it + // for ':' and '-' + '-' | ':' => result.push('.'), + + // These are legal symbols + 'a' ... 'z' + | 'A' ... 'Z' + | '0' ... '9' + | '_' | '.' | '$' => result.push(c), + + _ => { + result.push('$'); + for c in c.escape_unicode().skip(1) { + match c { + '{' => {}, + '}' => result.push('$'), + c => result.push(c), + } + } + } + } + } + + // Underscore-qualify anything that didn't start as an ident. + if !result.is_empty() && + result.as_bytes()[0] != '_' as u8 && + ! (result.as_bytes()[0] as char).is_xid_start() { + return format!("_{}", &result[..]); + } + + return result; +} + +fn mangle>(path: PI, hash: &str) -> String { + // Follow C++ namespace-mangling style, see + // http://en.wikipedia.org/wiki/Name_mangling for more info. + // + // It turns out that on OSX you can actually have arbitrary symbols in + // function names (at least when given to LLVM), but this is not possible + // when using unix's linker. Perhaps one day when we just use a linker from LLVM + // we won't need to do this name mangling. The problem with name mangling is + // that it seriously limits the available characters. For example we can't + // have things like &T in symbol names when one would theoretically + // want them for things like impls of traits on that type. + // + // To be able to work on all platforms and get *some* reasonable output, we + // use C++ name-mangling. + + let mut n = String::from("_ZN"); // _Z == Begin name-sequence, N == nested + + fn push(n: &mut String, s: &str) { + let sani = sanitize(s); + n.push_str(&format!("{}{}", sani.len(), sani)); + } + + // First, connect each component with pairs. + for data in path { + push(&mut n, &data); + } + + push(&mut n, hash); + + n.push('E'); // End name-sequence. + n +} diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 544df1798eaf9..ae5d02c7e048a 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -10,41 +10,50 @@ use back::lto; use back::link::{get_linker, remove}; -use session::config::{OutputFilenames, Passes, SomePasses, AllPasses}; +use rustc_incremental::{save_trans_partition, in_incr_comp_dir}; +use session::config::{OutputFilenames, OutputTypes, Passes, SomePasses, AllPasses}; use session::Session; use session::config::{self, OutputType}; use llvm; use llvm::{ModuleRef, TargetMachineRef, PassManagerRef, DiagnosticInfoRef, ContextRef}; use llvm::SMDiagnosticRef; -use trans::{CrateTranslation, ModuleTranslation}; +use {CrateTranslation, ModuleLlvm, ModuleSource, ModuleTranslation}; use util::common::time; use util::common::path2cstr; -use syntax::codemap; -use syntax::errors::{self, Handler, Level}; -use syntax::errors::emitter::Emitter; +use util::fs::link_or_copy; +use errors::{self, Handler, Level, DiagnosticBuilder}; +use errors::emitter::Emitter; +use syntax_pos::MultiSpan; +use context::{is_pie_binary, get_reloc_model}; -use std::collections::HashMap; -use std::ffi::{CStr, CString}; +use std::ffi::CString; use std::fs; use std::path::{Path, PathBuf}; -use std::ptr; use std::str; use std::sync::{Arc, Mutex}; use std::sync::mpsc::channel; use std::thread; -use libc::{self, c_uint, c_int, c_void}; +use libc::{c_uint, c_void}; + +pub const RELOC_MODEL_ARGS : [(&'static str, llvm::RelocMode); 4] = [ + ("pic", llvm::RelocMode::PIC), + ("static", llvm::RelocMode::Static), + ("default", llvm::RelocMode::Default), + ("dynamic-no-pic", llvm::RelocMode::DynamicNoPic), +]; + +pub const CODE_GEN_MODEL_ARGS : [(&'static str, llvm::CodeModel); 5] = [ + ("default", llvm::CodeModel::Default), + ("small", llvm::CodeModel::Small), + ("kernel", llvm::CodeModel::Kernel), + ("medium", llvm::CodeModel::Medium), + ("large", llvm::CodeModel::Large), +]; pub fn llvm_err(handler: &errors::Handler, msg: String) -> ! { - unsafe { - let cstr = llvm::LLVMRustGetLastError(); - if cstr == ptr::null() { - panic!(handler.fatal(&msg[..])); - } else { - let err = CStr::from_ptr(cstr).to_bytes(); - let err = String::from_utf8_lossy(err).to_string(); - libc::free(cstr as *mut _); - panic!(handler.fatal(&format!("{}: {}", &msg[..], &err[..]))); - } + match llvm::last_error() { + Some(err) => panic!(handler.fatal(&format!("{}: {}", msg, err))), + None => panic!(handler.fatal(&msg)), } } @@ -59,7 +68,7 @@ pub fn write_output_file( let output_c = path2cstr(output); let result = llvm::LLVMRustWriteOutputFile( target, pm, m, output_c.as_ptr(), file_type); - if !result { + if result.into_result().is_err() { llvm_err(handler, format!("could not write output to {}", output.display())); } } @@ -92,13 +101,13 @@ impl SharedEmitter { for diag in &*buffer { match diag.code { Some(ref code) => { - handler.emit_with_code(None, + handler.emit_with_code(&MultiSpan::new(), &diag.msg, &code[..], diag.lvl); }, None => { - handler.emit(None, + handler.emit(&MultiSpan::new(), &diag.msg, diag.lvl); }, @@ -109,23 +118,22 @@ impl SharedEmitter { } impl Emitter for SharedEmitter { - fn emit(&mut self, sp: Option, - msg: &str, code: Option<&str>, lvl: Level) { - assert!(sp.is_none(), "SharedEmitter doesn't support spans"); - + fn emit(&mut self, db: &DiagnosticBuilder) { self.buffer.lock().unwrap().push(Diagnostic { - msg: msg.to_string(), - code: code.map(|s| s.to_string()), - lvl: lvl, + msg: db.message.to_string(), + code: db.code.clone(), + lvl: db.level, }); - } - - fn custom_emit(&mut self, _sp: errors::RenderSpan, _msg: &str, _lvl: Level) { - panic!("SharedEmitter doesn't support custom_emit"); + for child in &db.children { + self.buffer.lock().unwrap().push(Diagnostic { + msg: child.message.to_string(), + code: None, + lvl: child.level, + }); + } } } - // On android, we by default compile for armv7 processors. This enables // things like double word CAS instructions (rather than emulating them) // which are *far* more efficient. This is obviously undesirable in some @@ -139,45 +147,42 @@ impl Emitter for SharedEmitter { // arise as some of intrinsics are converted into function calls // and nobody provides implementations those functions fn target_feature(sess: &Session) -> String { - format!("{},{}", sess.target.target.options.features, sess.opts.cg.target_feature) + let rustc_features = [ + "crt-static", + ]; + let requested_features = sess.opts.cg.target_feature.split(','); + let llvm_features = requested_features.filter(|f| { + !rustc_features.iter().any(|s| f.contains(s)) + }); + format!("{},{}", + sess.target.target.options.features, + llvm_features.collect::>().join(",")) } fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel { match optimize { - config::OptLevel::No => llvm::CodeGenLevelNone, - config::OptLevel::Less => llvm::CodeGenLevelLess, - config::OptLevel::Default => llvm::CodeGenLevelDefault, - config::OptLevel::Aggressive => llvm::CodeGenLevelAggressive, + config::OptLevel::No => llvm::CodeGenOptLevel::None, + config::OptLevel::Less => llvm::CodeGenOptLevel::Less, + config::OptLevel::Default => llvm::CodeGenOptLevel::Default, + config::OptLevel::Aggressive => llvm::CodeGenOptLevel::Aggressive, + _ => llvm::CodeGenOptLevel::Default, + } +} + +fn get_llvm_opt_size(optimize: config::OptLevel) -> llvm::CodeGenOptSize { + match optimize { + config::OptLevel::Size => llvm::CodeGenOptSizeDefault, + config::OptLevel::SizeMin => llvm::CodeGenOptSizeAggressive, + _ => llvm::CodeGenOptSizeNone, } } pub fn create_target_machine(sess: &Session) -> TargetMachineRef { - let reloc_model_arg = match sess.opts.cg.relocation_model { - Some(ref s) => &s[..], - None => &sess.target.target.options.relocation_model[..], - }; - let reloc_model = match reloc_model_arg { - "pic" => llvm::RelocPIC, - "static" => llvm::RelocStatic, - "default" => llvm::RelocDefault, - "dynamic-no-pic" => llvm::RelocDynamicNoPic, - _ => { - sess.err(&format!("{:?} is not a valid relocation mode", - sess.opts - .cg - .relocation_model)); - sess.abort_if_errors(); - unreachable!(); - } - }; + let reloc_model = get_reloc_model(sess); let opt_level = get_llvm_opt_level(sess.opts.optimize); let use_softfp = sess.opts.cg.soft_float; - let any_library = sess.crate_types.borrow().iter().any(|ty| { - *ty != config::CrateTypeExecutable - }); - let ffunction_sections = sess.target.target.options.function_sections; let fdata_sections = ffunction_sections; @@ -186,19 +191,16 @@ pub fn create_target_machine(sess: &Session) -> TargetMachineRef { None => &sess.target.target.options.code_model[..], }; - let code_model = match code_model_arg { - "default" => llvm::CodeModelDefault, - "small" => llvm::CodeModelSmall, - "kernel" => llvm::CodeModelKernel, - "medium" => llvm::CodeModelMedium, - "large" => llvm::CodeModelLarge, + let code_model = match CODE_GEN_MODEL_ARGS.iter().find( + |&&arg| arg.0 == code_model_arg) { + Some(x) => x.1, _ => { sess.err(&format!("{:?} is not a valid code model", sess.opts .cg .code_model)); sess.abort_if_errors(); - unreachable!(); + bug!(); } }; @@ -218,7 +220,7 @@ pub fn create_target_machine(sess: &Session) -> TargetMachineRef { reloc_model, opt_level, use_softfp, - !any_library && reloc_model == llvm::RelocPIC, + is_pie_binary(sess), ffunction_sections, fdata_sections, ) @@ -245,6 +247,9 @@ pub struct ModuleConfig { /// absolutely no optimizations (used for the metadata module). opt_level: Option, + /// Some(level) to optimize binary size, or None to not affect program size. + opt_size: Option, + // Flags indicating which outputs to produce. emit_no_opt_bc: bool, emit_bc: bool, @@ -252,7 +257,6 @@ pub struct ModuleConfig { emit_ir: bool, emit_asm: bool, emit_obj: bool, - // Miscellaneous flags. These are mostly copied from command-line // options. no_verify: bool, @@ -262,7 +266,11 @@ pub struct ModuleConfig { vectorize_loop: bool, vectorize_slp: bool, merge_functions: bool, - inline_threshold: Option + inline_threshold: Option, + // Instead of creating an object file by doing LLVM codegen, just + // make the object file bitcode. Provides easy compatibility with + // emscripten's ecc compiler, when used as the linker. + obj_is_bitcode: bool, } unsafe impl Send for ModuleConfig { } @@ -273,6 +281,7 @@ impl ModuleConfig { tm: tm, passes: passes, opt_level: None, + opt_size: None, emit_no_opt_bc: false, emit_bc: false, @@ -280,6 +289,7 @@ impl ModuleConfig { emit_ir: false, emit_asm: false, emit_obj: false, + obj_is_bitcode: false, no_verify: false, no_prepopulate_passes: false, @@ -298,6 +308,7 @@ impl ModuleConfig { self.no_builtins = trans.no_builtins; self.time_passes = sess.time_passes(); self.inline_threshold = sess.opts.cg.inline_threshold; + self.obj_is_bitcode = sess.target.target.options.obj_is_bitcode; // Copy what clang does by turning on loop vectorization at O2 and // slp vectorization at O3. Otherwise configure other optimization aspects @@ -326,6 +337,9 @@ struct CodegenContext<'a> { remark: Passes, // Worker thread number worker: usize, + // The incremental compilation session directory, or None if we are not + // compiling incrementally + incr_comp_session_dir: Option } impl<'a> CodegenContext<'a> { @@ -336,6 +350,7 @@ impl<'a> CodegenContext<'a> { plugin_passes: sess.plugin_llvm_passes.borrow().clone(), remark: sess.opts.cg.remark.clone(), worker: 0, + incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()) } } } @@ -348,7 +363,7 @@ struct HandlerFreeVars<'a> { unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext<'a>, msg: &'b str, cookie: c_uint) { - use syntax::codemap::ExpnId; + use syntax_pos::ExpnId; match cgcx.lto_ctxt { Some((sess, _)) => { @@ -371,7 +386,7 @@ unsafe extern "C" fn inline_asm_handler(diag: SMDiagnosticRef, cookie: c_uint) { let HandlerFreeVars { cgcx, .. } = *(user as *const HandlerFreeVars); - let msg = llvm::build_string(|s| llvm::LLVMWriteSMDiagnosticToString(diag, s)) + let msg = llvm::build_string(|s| llvm::LLVMRustWriteSMDiagnosticToString(diag, s)) .expect("non-UTF8 SMDiagnostic"); report_inline_asm(cgcx, &msg[..], cookie); @@ -383,26 +398,23 @@ unsafe extern "C" fn diagnostic_handler(info: DiagnosticInfoRef, user: *mut c_vo match llvm::diagnostic::Diagnostic::unpack(info) { llvm::diagnostic::InlineAsm(inline) => { report_inline_asm(cgcx, - &*llvm::twine_to_string(inline.message), + &llvm::twine_to_string(inline.message), inline.cookie); } llvm::diagnostic::Optimization(opt) => { - let pass_name = str::from_utf8(CStr::from_ptr(opt.pass_name).to_bytes()) - .ok() - .expect("got a non-UTF8 pass name from LLVM"); let enabled = match cgcx.remark { AllPasses => true, - SomePasses(ref v) => v.iter().any(|s| *s == pass_name), + SomePasses(ref v) => v.iter().any(|s| *s == opt.pass_name), }; if enabled { let loc = llvm::debug_loc_to_string(llcx, opt.debug_loc); cgcx.handler.note_without_error(&format!("optimization {} for {} at {}: {}", opt.kind.describe(), - pass_name, + opt.pass_name, if loc.is_empty() { "[unknown]" } else { &*loc }, - llvm::twine_to_string(opt.message))); + opt.message)); } } @@ -413,10 +425,11 @@ unsafe extern "C" fn diagnostic_handler(info: DiagnosticInfoRef, user: *mut c_vo // Unsafe due to LLVM calls. unsafe fn optimize_and_codegen(cgcx: &CodegenContext, mtrans: ModuleTranslation, + mllvm: ModuleLlvm, config: ModuleConfig, - name_extra: String, output_names: OutputFilenames) { - let ModuleTranslation { llmod, llcx } = mtrans; + let llmod = mllvm.llmod; + let llcx = mllvm.llcx; let tm = config.tm; // llcx doesn't outlive this function, so we can put this on the stack. @@ -426,12 +439,13 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, }; let fv = &fv as *const HandlerFreeVars as *mut c_void; - llvm::LLVMSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, fv); + llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, fv); llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, fv); + let module_name = Some(&mtrans.name[..]); + if config.emit_no_opt_bc { - let ext = format!("{}.no-opt.bc", name_extra); - let out = output_names.with_extension(&ext); + let out = output_names.temp_path_ext("no-opt.bc", module_name); let out = path2cstr(&out); llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr()); } @@ -446,9 +460,22 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, // If we're verifying or linting, add them to the function pass // manager. - let addpass = |pass: &str| { - let pass = CString::new(pass).unwrap(); - llvm::LLVMRustAddPass(fpm, pass.as_ptr()) + let addpass = |pass_name: &str| { + let pass_name = CString::new(pass_name).unwrap(); + let pass = llvm::LLVMRustFindAndCreatePass(pass_name.as_ptr()); + if pass.is_null() { + return false; + } + let pass_manager = match llvm::LLVMRustPassKind(pass) { + llvm::PassKind::Function => fpm, + llvm::PassKind::Module => mpm, + llvm::PassKind::Other => { + cgcx.handler.err("Encountered LLVM pass kind we can't handle"); + return true + }, + }; + llvm::LLVMRustAddPass(pass_manager, pass); + true }; if !config.no_verify { assert!(addpass("verify")); } @@ -490,13 +517,18 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, match cgcx.lto_ctxt { Some((sess, reachable)) if sess.lto() => { - time(sess.time_passes(), "all lto passes", || - lto::run(sess, llmod, tm, reachable, &config, - &name_extra, &output_names)); - + time(sess.time_passes(), "all lto passes", || { + let temp_no_opt_bc_filename = + output_names.temp_path_ext("no-opt.lto.bc", module_name); + lto::run(sess, + llmod, + tm, + reachable, + &config, + &temp_no_opt_bc_filename); + }); if config.emit_lto_bc { - let name = format!("{}.lto.bc", name_extra); - let out = output_names.with_extension(&name); + let out = output_names.temp_path_ext("lto.bc", module_name); let out = path2cstr(&out); llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr()); } @@ -525,17 +557,26 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, f(cpm); } - if config.emit_bc { - let ext = format!("{}.bc", name_extra); - let out = output_names.with_extension(&ext); - let out = path2cstr(&out); - llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr()); + // Change what we write and cleanup based on whether obj files are + // just llvm bitcode. In that case write bitcode, and possibly + // delete the bitcode if it wasn't requested. Don't generate the + // machine code, instead copy the .o file from the .bc + let write_bc = config.emit_bc || config.obj_is_bitcode; + let rm_bc = !config.emit_bc && config.obj_is_bitcode; + let write_obj = config.emit_obj && !config.obj_is_bitcode; + let copy_bc_to_obj = config.emit_obj && config.obj_is_bitcode; + + let bc_out = output_names.temp_path(OutputType::Bitcode, module_name); + let obj_out = output_names.temp_path(OutputType::Object, module_name); + + if write_bc { + let bc_out_c = path2cstr(&bc_out); + llvm::LLVMWriteBitcodeToFile(llmod, bc_out_c.as_ptr()); } time(config.time_passes, &format!("codegen passes [{}]", cgcx.worker), || { if config.emit_ir { - let ext = format!("{}.ll", name_extra); - let out = output_names.with_extension(&ext); + let out = output_names.temp_path(OutputType::LlvmAssembly, module_name); let out = path2cstr(&out); with_codegen(tm, llmod, config.no_builtins, |cpm| { llvm::LLVMRustPrintModule(cpm, llmod, out.as_ptr()); @@ -544,7 +585,7 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, } if config.emit_asm { - let path = output_names.with_extension(&format!("{}.s", name_extra)); + let path = output_names.temp_path(OutputType::Assembly, module_name); // We can't use the same module for asm and binary output, because that triggers // various errors like invalid IR or broken binaries, so we might have to clone the @@ -556,29 +597,57 @@ unsafe fn optimize_and_codegen(cgcx: &CodegenContext, }; with_codegen(tm, llmod, config.no_builtins, |cpm| { write_output_file(cgcx.handler, tm, cpm, llmod, &path, - llvm::AssemblyFileType); + llvm::FileType::AssemblyFile); }); if config.emit_obj { llvm::LLVMDisposeModule(llmod); } } - if config.emit_obj { - let path = output_names.with_extension(&format!("{}.o", name_extra)); + if write_obj { with_codegen(tm, llmod, config.no_builtins, |cpm| { - write_output_file(cgcx.handler, tm, cpm, llmod, &path, llvm::ObjectFileType); + write_output_file(cgcx.handler, tm, cpm, llmod, &obj_out, + llvm::FileType::ObjectFile); }); } }); - llvm::LLVMDisposeModule(llmod); - llvm::LLVMContextDispose(llcx); + if copy_bc_to_obj { + debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out); + if let Err(e) = link_or_copy(&bc_out, &obj_out) { + cgcx.handler.err(&format!("failed to copy bitcode to object file: {}", e)); + } + } + + if rm_bc { + debug!("removing_bitcode {:?}", bc_out); + if let Err(e) = fs::remove_file(&bc_out) { + cgcx.handler.err(&format!("failed to remove bitcode: {}", e)); + } + } + llvm::LLVMRustDisposeTargetMachine(tm); } + +pub fn cleanup_llvm(trans: &CrateTranslation) { + for module in trans.modules.iter() { + unsafe { + match module.source { + ModuleSource::Translated(llvm) => { + llvm::LLVMDisposeModule(llvm.llmod); + llvm::LLVMContextDispose(llvm.llcx); + } + ModuleSource::Preexisting(_) => { + } + } + } + } +} + pub fn run_passes(sess: &Session, trans: &CrateTranslation, - output_types: &HashMap>, + output_types: &OutputTypes, crate_output: &OutputFilenames) { // It's possible that we have `codegen_units > 1` but only one item in // `trans.modules`. We could theoretically proceed and do LTO in that @@ -594,16 +663,18 @@ pub fn run_passes(sess: &Session, } // Sanity check - assert!(trans.modules.len() == sess.opts.cg.codegen_units); + assert!(trans.modules.len() == sess.opts.cg.codegen_units || + sess.opts.debugging_opts.incremental.is_some()); let tm = create_target_machine(sess); // Figure out what we actually need to build. let mut modules_config = ModuleConfig::new(tm, sess.opts.cg.passes.clone()); - let mut metadata_config = ModuleConfig::new(tm, vec!()); + let mut metadata_config = ModuleConfig::new(tm, vec![]); modules_config.opt_level = Some(get_llvm_opt_level(sess.opts.optimize)); + modules_config.opt_size = Some(get_llvm_opt_size(sess.opts.optimize)); // Save all versions of the bytecode if we're saving our temporaries. if sess.opts.cg.save_temps { @@ -658,27 +729,50 @@ pub fn run_passes(sess: &Session, { let work = build_work_item(sess, - trans.metadata_module, + trans.metadata_module.clone(), metadata_config.clone(), - crate_output.clone(), - "metadata".to_string()); + crate_output.clone()); work_items.push(work); } - for (index, mtrans) in trans.modules.iter().enumerate() { + for mtrans in trans.modules.iter() { let work = build_work_item(sess, - *mtrans, + mtrans.clone(), modules_config.clone(), - crate_output.clone(), - format!("{}", index)); + crate_output.clone()); work_items.push(work); } + if sess.opts.debugging_opts.incremental_info { + dump_incremental_data(&trans); + } + // Process the work items, optionally using worker threads. - if sess.opts.cg.codegen_units == 1 { + // NOTE: This code is not really adapted to incremental compilation where + // the compiler decides the number of codegen units (and will + // potentially create hundreds of them). + let num_workers = work_items.len() - 1; + if num_workers == 1 { run_work_singlethreaded(sess, &trans.reachable, work_items); } else { - run_work_multithreaded(sess, work_items, sess.opts.cg.codegen_units); + run_work_multithreaded(sess, work_items, num_workers); + } + + // If in incr. comp. mode, preserve the `.o` files for potential re-use + for mtrans in trans.modules.iter() { + let mut files = vec![]; + + if modules_config.emit_obj { + let path = crate_output.temp_path(OutputType::Object, Some(&mtrans.name)); + files.push((OutputType::Object, path)); + } + + if modules_config.emit_bc { + let path = crate_output.temp_path(OutputType::Bitcode, Some(&mtrans.name)); + files.push((OutputType::Bitcode, path)); + } + + save_trans_partition(sess, &mtrans.name, mtrans.symbol_name_hash, &files); } // All codegen is finished. @@ -693,32 +787,42 @@ pub fn run_passes(sess: &Session, } }; - let copy_if_one_unit = |ext: &str, - output_type: OutputType, + let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| { - if sess.opts.cg.codegen_units == 1 { + if trans.modules.len() == 1 { // 1) Only one codegen unit. In this case it's no difficulty // to copy `foo.0.x` to `foo.x`. - copy_gracefully(&crate_output.with_extension(ext), + let module_name = Some(&(trans.modules[0].name)[..]); + let path = crate_output.temp_path(output_type, module_name); + copy_gracefully(&path, &crate_output.path(output_type)); if !sess.opts.cg.save_temps && !keep_numbered { - // The user just wants `foo.x`, not `foo.0.x`. - remove(sess, &crate_output.with_extension(ext)); + // The user just wants `foo.x`, not `foo.#module-name#.x`. + remove(sess, &path); } - } else if crate_output.outputs.contains_key(&output_type) { - // 2) Multiple codegen units, with `--emit foo=some_name`. We have - // no good solution for this case, so warn the user. - sess.warn(&format!("ignoring emit path because multiple .{} files \ - were produced", ext)); - } else if crate_output.single_output_file.is_some() { - // 3) Multiple codegen units, with `-o some_name`. We have - // no good solution for this case, so warn the user. - sess.warn(&format!("ignoring -o because multiple .{} files \ - were produced", ext)); } else { - // 4) Multiple codegen units, but no explicit name. We - // just leave the `foo.0.x` files in place. - // (We don't have to do any work in this case.) + let ext = crate_output.temp_path(output_type, None) + .extension() + .unwrap() + .to_str() + .unwrap() + .to_owned(); + + if crate_output.outputs.contains_key(&output_type) { + // 2) Multiple codegen units, with `--emit foo=some_name`. We have + // no good solution for this case, so warn the user. + sess.warn(&format!("ignoring emit path because multiple .{} files \ + were produced", ext)); + } else if crate_output.single_output_file.is_some() { + // 3) Multiple codegen units, with `-o some_name`. We have + // no good solution for this case, so warn the user. + sess.warn(&format!("ignoring -o because multiple .{} files \ + were produced", ext)); + } else { + // 4) Multiple codegen units, but no explicit name. We + // just leave the `foo.0.x` files in place. + // (We don't have to do any work in this case.) + } } }; @@ -734,17 +838,17 @@ pub fn run_passes(sess: &Session, // Copy to .bc, but always keep the .0.bc. There is a later // check to figure out if we should delete .0.bc files, or keep // them for making an rlib. - copy_if_one_unit("0.bc", OutputType::Bitcode, true); + copy_if_one_unit(OutputType::Bitcode, true); } OutputType::LlvmAssembly => { - copy_if_one_unit("0.ll", OutputType::LlvmAssembly, false); + copy_if_one_unit(OutputType::LlvmAssembly, false); } OutputType::Assembly => { - copy_if_one_unit("0.s", OutputType::Assembly, false); + copy_if_one_unit(OutputType::Assembly, false); } OutputType::Object => { user_wants_objects = true; - copy_if_one_unit("0.o", OutputType::Object, true); + copy_if_one_unit(OutputType::Object, true); } OutputType::Exe | OutputType::DepInfo => {} @@ -755,51 +859,55 @@ pub fn run_passes(sess: &Session, // Clean up unwanted temporary files. // We create the following files by default: - // - crate.0.bc - // - crate.0.o + // - crate.#module-name#.bc + // - crate.#module-name#.o // - crate.metadata.bc // - crate.metadata.o // - crate.o (linked from crate.##.o) - // - crate.bc (copied from crate.0.bc) + // - crate.bc (copied from crate.##.bc) // We may create additional files if requested by the user (through // `-C save-temps` or `--emit=` flags). if !sess.opts.cg.save_temps { - // Remove the temporary .0.o objects. If the user didn't + // Remove the temporary .#module-name#.o objects. If the user didn't // explicitly request bitcode (with --emit=bc), and the bitcode is not - // needed for building an rlib, then we must remove .0.bc as well. + // needed for building an rlib, then we must remove .#module-name#.bc as + // well. - // Specific rules for keeping .0.bc: + // Specific rules for keeping .#module-name#.bc: // - If we're building an rlib (`needs_crate_bitcode`), then keep // it. // - If the user requested bitcode (`user_wants_bitcode`), and // codegen_units > 1, then keep it. // - If the user requested bitcode but codegen_units == 1, then we - // can toss .0.bc because we copied it to .bc earlier. + // can toss .#module-name#.bc because we copied it to .bc earlier. // - If we're not building an rlib and the user didn't request - // bitcode, then delete .0.bc. + // bitcode, then delete .#module-name#.bc. // If you change how this works, also update back::link::link_rlib, - // where .0.bc files are (maybe) deleted after making an rlib. + // where .#module-name#.bc files are (maybe) deleted after making an + // rlib. let keep_numbered_bitcode = needs_crate_bitcode || (user_wants_bitcode && sess.opts.cg.codegen_units > 1); let keep_numbered_objects = needs_crate_object || (user_wants_objects && sess.opts.cg.codegen_units > 1); - for i in 0..trans.modules.len() { + for module_name in trans.modules.iter().map(|m| Some(&m.name[..])) { if modules_config.emit_obj && !keep_numbered_objects { - let ext = format!("{}.o", i); - remove(sess, &crate_output.with_extension(&ext)); + let path = crate_output.temp_path(OutputType::Object, module_name); + remove(sess, &path); } if modules_config.emit_bc && !keep_numbered_bitcode { - let ext = format!("{}.bc", i); - remove(sess, &crate_output.with_extension(&ext)); + let path = crate_output.temp_path(OutputType::Bitcode, module_name); + remove(sess, &path); } } if metadata_config.emit_bc && !user_wants_bitcode { - remove(sess, &crate_output.with_extension("metadata.bc")); + let path = crate_output.temp_path(OutputType::Bitcode, + Some(&trans.metadata_module.name[..])); + remove(sess, &path); } } @@ -816,31 +924,75 @@ pub fn run_passes(sess: &Session, } } +fn dump_incremental_data(trans: &CrateTranslation) { + let mut reuse = 0; + for mtrans in trans.modules.iter() { + match mtrans.source { + ModuleSource::Preexisting(..) => reuse += 1, + ModuleSource::Translated(..) => (), + } + } + println!("incremental: re-using {} out of {} modules", reuse, trans.modules.len()); +} + struct WorkItem { mtrans: ModuleTranslation, config: ModuleConfig, - output_names: OutputFilenames, - name_extra: String + output_names: OutputFilenames } fn build_work_item(sess: &Session, mtrans: ModuleTranslation, config: ModuleConfig, - output_names: OutputFilenames, - name_extra: String) + output_names: OutputFilenames) -> WorkItem { let mut config = config; config.tm = create_target_machine(sess); - WorkItem { mtrans: mtrans, config: config, output_names: output_names, - name_extra: name_extra } + WorkItem { + mtrans: mtrans, + config: config, + output_names: output_names + } } fn execute_work_item(cgcx: &CodegenContext, work_item: WorkItem) { unsafe { - optimize_and_codegen(cgcx, work_item.mtrans, work_item.config, - work_item.name_extra, work_item.output_names); + match work_item.mtrans.source { + ModuleSource::Translated(mllvm) => { + debug!("llvm-optimizing {:?}", work_item.mtrans.name); + optimize_and_codegen(cgcx, + work_item.mtrans, + mllvm, + work_item.config, + work_item.output_names); + } + ModuleSource::Preexisting(wp) => { + let incr_comp_session_dir = cgcx.incr_comp_session_dir + .as_ref() + .unwrap(); + let name = &work_item.mtrans.name; + for (kind, saved_file) in wp.saved_files { + let obj_out = work_item.output_names.temp_path(kind, Some(name)); + let source_file = in_incr_comp_dir(&incr_comp_session_dir, + &saved_file); + debug!("copying pre-existing module `{}` from {:?} to {}", + work_item.mtrans.name, + source_file, + obj_out.display()); + match link_or_copy(&source_file, &obj_out) { + Ok(_) => { } + Err(err) => { + cgcx.handler.err(&format!("unable to copy {} to {}: {}", + source_file.display(), + obj_out.display(), + err)); + } + } + } + } + } } } @@ -859,6 +1011,8 @@ fn run_work_singlethreaded(sess: &Session, fn run_work_multithreaded(sess: &Session, work_items: Vec, num_workers: usize) { + assert!(num_workers > 0); + // Run some workers to process the work items. let work_items_arc = Arc::new(Mutex::new(work_items)); let mut diag_emitter = SharedEmitter::new(); @@ -874,6 +1028,8 @@ fn run_work_multithreaded(sess: &Session, let mut tx = Some(tx); futures.push(rx); + let incr_comp_session_dir = sess.incr_comp_session_dir_opt().map(|r| r.clone()); + thread::Builder::new().name(format!("codegen-{}", i)).spawn(move || { let diag_handler = Handler::with_emitter(true, false, box diag_emitter); @@ -885,6 +1041,7 @@ fn run_work_multithreaded(sess: &Session, plugin_passes: plugin_passes, remark: remark, worker: i, + incr_comp_session_dir: incr_comp_session_dir }; loop { @@ -923,10 +1080,10 @@ fn run_work_multithreaded(sess: &Session, } pub fn run_assembler(sess: &Session, outputs: &OutputFilenames) { - let (pname, mut cmd) = get_linker(sess); + let (pname, mut cmd, _) = get_linker(sess); cmd.arg("-c").arg("-o").arg(&outputs.path(OutputType::Object)) - .arg(&outputs.temp_path(OutputType::Assembly)); + .arg(&outputs.temp_path(OutputType::Assembly, None)); debug!("{:?}", cmd); match cmd.output() { @@ -951,36 +1108,6 @@ pub fn run_assembler(sess: &Session, outputs: &OutputFilenames) { } } -pub unsafe fn configure_llvm(sess: &Session) { - let mut llvm_c_strs = Vec::new(); - let mut llvm_args = Vec::new(); - - { - let mut add = |arg: &str| { - let s = CString::new(arg).unwrap(); - llvm_args.push(s.as_ptr()); - llvm_c_strs.push(s); - }; - add("rustc"); // fake program name - if sess.time_llvm_passes() { add("-time-passes"); } - if sess.print_llvm_passes() { add("-debug-pass=Structure"); } - - // FIXME #21627 disable faulty FastISel on AArch64 (even for -O0) - if sess.target.target.arch == "aarch64" { add("-fast-isel=0"); } - - for arg in &sess.opts.cg.llvm_args { - add(&(*arg)); - } - } - - llvm::LLVMInitializePasses(); - - llvm::initialize_available_targets(); - - llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, - llvm_args.as_ptr()); -} - pub unsafe fn with_llvm_pmb(llmod: ModuleRef, config: &ModuleConfig, f: &mut FnMut(llvm::PassManagerBuilderRef)) { @@ -988,13 +1115,19 @@ pub unsafe fn with_llvm_pmb(llmod: ModuleRef, // reasonable defaults and prepare it to actually populate the pass // manager. let builder = llvm::LLVMPassManagerBuilderCreate(); - let opt = config.opt_level.unwrap_or(llvm::CodeGenLevelNone); + let opt_level = config.opt_level.unwrap_or(llvm::CodeGenOptLevel::None); + let opt_size = config.opt_size.unwrap_or(llvm::CodeGenOptSizeNone); let inline_threshold = config.inline_threshold; - llvm::LLVMRustConfigurePassManagerBuilder(builder, opt, + llvm::LLVMRustConfigurePassManagerBuilder(builder, opt_level, config.merge_functions, config.vectorize_slp, config.vectorize_loop); + llvm::LLVMPassManagerBuilderSetSizeLevel(builder, opt_size as u32); + + if opt_size != llvm::CodeGenOptSizeNone { + llvm::LLVMPassManagerBuilderSetDisableUnrollLoops(builder, 1); + } llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, config.no_builtins); @@ -1002,21 +1135,30 @@ pub unsafe fn with_llvm_pmb(llmod: ModuleRef, // always-inline functions (but don't add lifetime intrinsics), at O1 we // inline with lifetime intrinsics, and O2+ we add an inliner with a // thresholds copied from clang. - match (opt, inline_threshold) { - (_, Some(t)) => { + match (opt_level, opt_size, inline_threshold) { + (.., Some(t)) => { llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, t as u32); } - (llvm::CodeGenLevelNone, _) => { + (llvm::CodeGenOptLevel::Aggressive, ..) => { + llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 275); + } + (_, llvm::CodeGenOptSizeDefault, _) => { + llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 75); + } + (_, llvm::CodeGenOptSizeAggressive, _) => { + llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 25); + } + (llvm::CodeGenOptLevel::None, ..) => { llvm::LLVMRustAddAlwaysInlinePass(builder, false); } - (llvm::CodeGenLevelLess, _) => { + (llvm::CodeGenOptLevel::Less, ..) => { llvm::LLVMRustAddAlwaysInlinePass(builder, true); } - (llvm::CodeGenLevelDefault, _) => { + (llvm::CodeGenOptLevel::Default, ..) => { llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 225); } - (llvm::CodeGenLevelAggressive, _) => { - llvm::LLVMPassManagerBuilderUseInlinerWithThreshold(builder, 275); + (llvm::CodeGenOptLevel::Other, ..) => { + bug!("CodeGenOptLevel::Other selected") } } diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs new file mode 100644 index 0000000000000..259ef2a780cc2 --- /dev/null +++ b/src/librustc_trans/base.rs @@ -0,0 +1,2128 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Translate the completed AST to the LLVM IR. +//! +//! Some functions here, such as trans_block and trans_expr, return a value -- +//! the result of the translation to LLVM -- while others, such as trans_fn +//! and trans_item, are called only for the side effect of adding a +//! particular definition to the LLVM IR output we're producing. +//! +//! Hopefully useful general knowledge about trans: +//! +//! * There's no way to find out the Ty type of a ValueRef. Doing so +//! would be "trying to get the eggs out of an omelette" (credit: +//! pcwalton). You can, instead, find out its TypeRef by calling val_ty, +//! but one TypeRef corresponds to many `Ty`s; for instance, tup(int, int, +//! int) and rec(x=int, y=int, z=int) will have the same TypeRef. + +#![allow(non_camel_case_types)] + +use super::CrateTranslation; +use super::ModuleLlvm; +use super::ModuleSource; +use super::ModuleTranslation; + +use assert_module_sources; +use back::link; +use back::linker::LinkerInfo; +use llvm::{Linkage, ValueRef, Vector, get_param}; +use llvm; +use rustc::hir::def::Def; +use rustc::hir::def_id::DefId; +use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem}; +use rustc::ty::subst::Substs; +use rustc::traits; +use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::adjustment::CustomCoerceUnsized; +use rustc::dep_graph::{DepNode, WorkProduct}; +use rustc::hir::map as hir_map; +use rustc::util::common::time; +use session::config::{self, NoDebugInfo}; +use rustc_incremental::IncrementalHashesMap; +use session::{self, DataTypeKind, Session}; +use abi::{self, Abi, FnType}; +use adt; +use attributes; +use build::*; +use builder::{Builder, noname}; +use callee::{Callee}; +use common::{Block, C_bool, C_bytes_in_context, C_i32, C_uint}; +use collector::{self, TransItemCollectionMode}; +use common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef}; +use common::{CrateContext, FunctionContext}; +use common::{Result}; +use common::{fulfill_obligation}; +use common::{type_is_zero_size, val_ty}; +use common; +use consts; +use context::{SharedCrateContext, CrateContextList}; +use debuginfo::{self, DebugLoc}; +use declare; +use machine; +use machine::{llalign_of_min, llsize_of}; +use meth; +use mir; +use monomorphize::{self, Instance}; +use partitioning::{self, PartitioningStrategy, CodegenUnit}; +use symbol_map::SymbolMap; +use symbol_names_test; +use trans_item::{TransItem, DefPathBasedNames}; +use type_::Type; +use type_of; +use value::Value; +use Disr; +use util::nodemap::{NodeSet, FxHashMap, FxHashSet}; + +use arena::TypedArena; +use libc::c_uint; +use std::ffi::{CStr, CString}; +use std::borrow::Cow; +use std::cell::{Cell, RefCell}; +use std::ptr; +use std::rc::Rc; +use std::str; +use std::i32; +use syntax_pos::{Span, DUMMY_SP}; +use syntax::attr; +use rustc::hir; +use rustc::ty::layout::{self, Layout}; +use syntax::ast; + +thread_local! { + static TASK_LOCAL_INSN_KEY: RefCell>> = { + RefCell::new(None) + } +} + +pub fn with_insn_ctxt(blk: F) + where F: FnOnce(&[&'static str]) +{ + TASK_LOCAL_INSN_KEY.with(move |slot| { + slot.borrow().as_ref().map(move |s| blk(s)); + }) +} + +pub fn init_insn_ctxt() { + TASK_LOCAL_INSN_KEY.with(|slot| { + *slot.borrow_mut() = Some(Vec::new()); + }); +} + +pub struct _InsnCtxt { + _cannot_construct_outside_of_this_module: (), +} + +impl Drop for _InsnCtxt { + fn drop(&mut self) { + TASK_LOCAL_INSN_KEY.with(|slot| { + if let Some(ctx) = slot.borrow_mut().as_mut() { + ctx.pop(); + } + }) + } +} + +pub fn push_ctxt(s: &'static str) -> _InsnCtxt { + debug!("new InsnCtxt: {}", s); + TASK_LOCAL_INSN_KEY.with(|slot| { + if let Some(ctx) = slot.borrow_mut().as_mut() { + ctx.push(s) + } + }); + _InsnCtxt { + _cannot_construct_outside_of_this_module: (), + } +} + +pub struct StatRecorder<'a, 'tcx: 'a> { + ccx: &'a CrateContext<'a, 'tcx>, + name: Option, + istart: usize, +} + +impl<'a, 'tcx> StatRecorder<'a, 'tcx> { + pub fn new(ccx: &'a CrateContext<'a, 'tcx>, name: String) -> StatRecorder<'a, 'tcx> { + let istart = ccx.stats().n_llvm_insns.get(); + StatRecorder { + ccx: ccx, + name: Some(name), + istart: istart, + } + } +} + +impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> { + fn drop(&mut self) { + if self.ccx.sess().trans_stats() { + let iend = self.ccx.stats().n_llvm_insns.get(); + self.ccx + .stats() + .fn_stats + .borrow_mut() + .push((self.name.take().unwrap(), iend - self.istart)); + self.ccx.stats().n_fns.set(self.ccx.stats().n_fns.get() + 1); + // Reset LLVM insn count to avoid compound costs. + self.ccx.stats().n_llvm_insns.set(self.istart); + } + } +} + +pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef { + StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA) +} + +pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef { + StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR) +} + +pub fn get_meta_builder(b: &Builder, fat_ptr: ValueRef) -> ValueRef { + b.struct_gep(fat_ptr, abi::FAT_PTR_EXTRA) +} + +pub fn get_dataptr_builder(b: &Builder, fat_ptr: ValueRef) -> ValueRef { + b.struct_gep(fat_ptr, abi::FAT_PTR_ADDR) +} + +fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem) -> DefId { + match bcx.tcx().lang_items.require(it) { + Ok(id) => id, + Err(s) => { + bcx.sess().fatal(&format!("allocation of `{}` {}", info_ty, s)); + } + } +} + +// The following malloc_raw_dyn* functions allocate a box to contain +// a given type, but with a potentially dynamic size. + +pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + llty_ptr: Type, + info_ty: Ty<'tcx>, + size: ValueRef, + align: ValueRef, + debug_loc: DebugLoc) + -> Result<'blk, 'tcx> { + let _icx = push_ctxt("malloc_raw_exchange"); + + // Allocate space: + let def_id = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem); + let r = Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])) + .call(bcx, debug_loc, &[size, align], None); + + Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr)) +} + + +pub fn bin_op_to_icmp_predicate(op: hir::BinOp_, + signed: bool) + -> llvm::IntPredicate { + match op { + hir::BiEq => llvm::IntEQ, + hir::BiNe => llvm::IntNE, + hir::BiLt => if signed { llvm::IntSLT } else { llvm::IntULT }, + hir::BiLe => if signed { llvm::IntSLE } else { llvm::IntULE }, + hir::BiGt => if signed { llvm::IntSGT } else { llvm::IntUGT }, + hir::BiGe => if signed { llvm::IntSGE } else { llvm::IntUGE }, + op => { + bug!("comparison_op_to_icmp_predicate: expected comparison operator, \ + found {:?}", + op) + } + } +} + +pub fn bin_op_to_fcmp_predicate(op: hir::BinOp_) -> llvm::RealPredicate { + match op { + hir::BiEq => llvm::RealOEQ, + hir::BiNe => llvm::RealUNE, + hir::BiLt => llvm::RealOLT, + hir::BiLe => llvm::RealOLE, + hir::BiGt => llvm::RealOGT, + hir::BiGe => llvm::RealOGE, + op => { + bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \ + found {:?}", + op); + } + } +} + +pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + lhs: ValueRef, + rhs: ValueRef, + t: Ty<'tcx>, + ret_ty: Type, + op: hir::BinOp_, + debug_loc: DebugLoc) + -> ValueRef { + let signed = match t.sty { + ty::TyFloat(_) => { + let cmp = bin_op_to_fcmp_predicate(op); + return SExt(bcx, FCmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty); + }, + ty::TyUint(_) => false, + ty::TyInt(_) => true, + _ => bug!("compare_simd_types: invalid SIMD type"), + }; + + let cmp = bin_op_to_icmp_predicate(op, signed); + // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension + // to get the correctly sized type. This will compile to a single instruction + // once the IR is converted to assembly if the SIMD instruction is supported + // by the target architecture. + SExt(bcx, ICmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty) +} + +/// Retrieve the information we are losing (making dynamic) in an unsizing +/// adjustment. +/// +/// The `old_info` argument is a bit funny. It is intended for use +/// in an upcast, where the new vtable for an object will be drived +/// from the old one. +pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>, + source: Ty<'tcx>, + target: Ty<'tcx>, + old_info: Option) + -> ValueRef { + let (source, target) = ccx.tcx().struct_lockstep_tails(source, target); + match (&source.sty, &target.sty) { + (&ty::TyArray(_, len), &ty::TySlice(_)) => C_uint(ccx, len), + (&ty::TyDynamic(..), &ty::TyDynamic(..)) => { + // For now, upcasts are limited to changes in marker + // traits, and hence never actually require an actual + // change to the vtable. + old_info.expect("unsized_info: missing old info for trait upcast") + } + (_, &ty::TyDynamic(ref data, ..)) => { + consts::ptrcast(meth::get_vtable(ccx, source, data.principal()), + Type::vtable_ptr(ccx)) + } + _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", + source, + target), + } +} + +/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. +pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + src: ValueRef, + src_ty: Ty<'tcx>, + dst_ty: Ty<'tcx>) + -> (ValueRef, ValueRef) { + debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty); + match (&src_ty.sty, &dst_ty.sty) { + (&ty::TyBox(a), &ty::TyBox(b)) | + (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }), + &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) | + (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }), + &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) | + (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }), + &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => { + assert!(common::type_is_sized(bcx.tcx(), a)); + let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), b).ptr_to(); + (PointerCast(bcx, src, ptr_ty), + unsized_info(bcx.ccx(), a, b, None)) + } + _ => bug!("unsize_thin_ptr: called on bad types"), + } +} + +/// Coerce `src`, which is a reference to a value of type `src_ty`, +/// to a value of type `dst_ty` and store the result in `dst` +pub fn coerce_unsized_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + src: ValueRef, + src_ty: Ty<'tcx>, + dst: ValueRef, + dst_ty: Ty<'tcx>) { + match (&src_ty.sty, &dst_ty.sty) { + (&ty::TyBox(..), &ty::TyBox(..)) | + (&ty::TyRef(..), &ty::TyRef(..)) | + (&ty::TyRef(..), &ty::TyRawPtr(..)) | + (&ty::TyRawPtr(..), &ty::TyRawPtr(..)) => { + let (base, info) = if common::type_is_fat_ptr(bcx.tcx(), src_ty) { + // fat-ptr to fat-ptr unsize preserves the vtable + // i.e. &'a fmt::Debug+Send => &'a fmt::Debug + // So we need to pointercast the base to ensure + // the types match up. + let (base, info) = load_fat_ptr(bcx, src, src_ty); + let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), dst_ty); + let base = PointerCast(bcx, base, llcast_ty); + (base, info) + } else { + let base = load_ty(bcx, src, src_ty); + unsize_thin_ptr(bcx, base, src_ty, dst_ty) + }; + store_fat_ptr(bcx, base, info, dst, dst_ty); + } + + (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) => { + assert_eq!(def_a, def_b); + + let src_fields = def_a.variants[0].fields.iter().map(|f| { + monomorphize::field_ty(bcx.tcx(), substs_a, f) + }); + let dst_fields = def_b.variants[0].fields.iter().map(|f| { + monomorphize::field_ty(bcx.tcx(), substs_b, f) + }); + + let src = adt::MaybeSizedValue::sized(src); + let dst = adt::MaybeSizedValue::sized(dst); + + let iter = src_fields.zip(dst_fields).enumerate(); + for (i, (src_fty, dst_fty)) in iter { + if type_is_zero_size(bcx.ccx(), dst_fty) { + continue; + } + + let src_f = adt::trans_field_ptr(bcx, src_ty, src, Disr(0), i); + let dst_f = adt::trans_field_ptr(bcx, dst_ty, dst, Disr(0), i); + if src_fty == dst_fty { + memcpy_ty(bcx, dst_f, src_f, src_fty); + } else { + coerce_unsized_into(bcx, src_f, src_fty, dst_f, dst_fty); + } + } + } + _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", + src_ty, + dst_ty), + } +} + +pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx>, + source_ty: Ty<'tcx>, + target_ty: Ty<'tcx>) + -> CustomCoerceUnsized { + let trait_ref = ty::Binder(ty::TraitRef { + def_id: scx.tcx().lang_items.coerce_unsized_trait().unwrap(), + substs: scx.tcx().mk_substs_trait(source_ty, &[target_ty]) + }); + + match fulfill_obligation(scx, DUMMY_SP, trait_ref) { + traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => { + scx.tcx().custom_coerce_unsized_kind(impl_def_id) + } + vtable => { + bug!("invalid CoerceUnsized vtable: {:?}", vtable); + } + } +} + +pub fn cast_shift_expr_rhs(cx: Block, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + cast_shift_rhs(op, lhs, rhs, |a, b| Trunc(cx, a, b), |a, b| ZExt(cx, a, b)) +} + +pub fn cast_shift_const_rhs(op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + cast_shift_rhs(op, + lhs, + rhs, + |a, b| unsafe { llvm::LLVMConstTrunc(a, b.to_ref()) }, + |a, b| unsafe { llvm::LLVMConstZExt(a, b.to_ref()) }) +} + +fn cast_shift_rhs(op: hir::BinOp_, + lhs: ValueRef, + rhs: ValueRef, + trunc: F, + zext: G) + -> ValueRef + where F: FnOnce(ValueRef, Type) -> ValueRef, + G: FnOnce(ValueRef, Type) -> ValueRef +{ + // Shifts may have any size int on the rhs + if op.is_shift() { + let mut rhs_llty = val_ty(rhs); + let mut lhs_llty = val_ty(lhs); + if rhs_llty.kind() == Vector { + rhs_llty = rhs_llty.element_type() + } + if lhs_llty.kind() == Vector { + lhs_llty = lhs_llty.element_type() + } + let rhs_sz = rhs_llty.int_width(); + let lhs_sz = lhs_llty.int_width(); + if lhs_sz < rhs_sz { + trunc(rhs, lhs_llty) + } else if lhs_sz > rhs_sz { + // FIXME (#1877: If shifting by negative + // values becomes not undefined then this is wrong. + zext(rhs, lhs_llty) + } else { + rhs + } + } else { + rhs + } +} + +pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + llfn: ValueRef, + llargs: &[ValueRef], + debug_loc: DebugLoc) + -> (ValueRef, Block<'blk, 'tcx>) { + let _icx = push_ctxt("invoke_"); + if bcx.unreachable.get() { + return (C_null(Type::i8(bcx.ccx())), bcx); + } + + if need_invoke(bcx) { + debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb); + for &llarg in llargs { + debug!("arg: {:?}", Value(llarg)); + } + let normal_bcx = bcx.fcx.new_block("normal-return"); + let landing_pad = bcx.fcx.get_landing_pad(); + + let llresult = Invoke(bcx, + llfn, + &llargs[..], + normal_bcx.llbb, + landing_pad, + debug_loc); + return (llresult, normal_bcx); + } else { + debug!("calling {:?} at {:?}", Value(llfn), bcx.llbb); + for &llarg in llargs { + debug!("arg: {:?}", Value(llarg)); + } + + let llresult = Call(bcx, llfn, &llargs[..], debug_loc); + return (llresult, bcx); + } +} + +/// Returns whether this session's target will use SEH-based unwinding. +/// +/// This is only true for MSVC targets, and even then the 64-bit MSVC target +/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as +/// 64-bit MinGW) instead of "full SEH". +pub fn wants_msvc_seh(sess: &Session) -> bool { + sess.target.target.options.is_like_msvc +} + +pub fn avoid_invoke(bcx: Block) -> bool { + bcx.sess().no_landing_pads() || bcx.lpad().is_some() +} + +pub fn need_invoke(bcx: Block) -> bool { + if avoid_invoke(bcx) { + false + } else { + bcx.fcx.needs_invoke() + } +} + +pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) { + let assume_intrinsic = b.ccx.get_intrinsic("llvm.assume"); + b.call(assume_intrinsic, &[val], None); +} + +/// Helper for loading values from memory. Does the necessary conversion if the in-memory type +/// differs from the type used for SSA values. Also handles various special cases where the type +/// gives us better information about what we are loading. +pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef { + if cx.unreachable.get() { + return C_undef(type_of::type_of(cx.ccx(), t)); + } + load_ty_builder(&B(cx), ptr, t) +} + +pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef { + let ccx = b.ccx; + if type_is_zero_size(ccx, t) { + return C_undef(type_of::type_of(ccx, t)); + } + + unsafe { + let global = llvm::LLVMIsAGlobalVariable(ptr); + if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True { + let val = llvm::LLVMGetInitializer(global); + if !val.is_null() { + if t.is_bool() { + return llvm::LLVMConstTrunc(val, Type::i1(ccx).to_ref()); + } + return val; + } + } + } + + if t.is_bool() { + b.trunc(b.load_range_assert(ptr, 0, 2, llvm::False), Type::i1(ccx)) + } else if t.is_char() { + // a char is a Unicode codepoint, and so takes values from 0 + // to 0x10FFFF inclusive only. + b.load_range_assert(ptr, 0, 0x10FFFF + 1, llvm::False) + } else if (t.is_region_ptr() || t.is_unique()) && + !common::type_is_fat_ptr(ccx.tcx(), t) { + b.load_nonnull(ptr) + } else { + b.load(ptr) + } +} + +/// Helper for storing values in memory. Does the necessary conversion if the in-memory type +/// differs from the type used for SSA values. +pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { + if cx.unreachable.get() { + return; + } + + debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v)); + + if common::type_is_fat_ptr(cx.tcx(), t) { + let lladdr = ExtractValue(cx, v, abi::FAT_PTR_ADDR); + let llextra = ExtractValue(cx, v, abi::FAT_PTR_EXTRA); + store_fat_ptr(cx, lladdr, llextra, dst, t); + } else { + Store(cx, from_immediate(cx, v), dst); + } +} + +pub fn store_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, + data: ValueRef, + extra: ValueRef, + dst: ValueRef, + _ty: Ty<'tcx>) { + // FIXME: emit metadata + Store(cx, data, get_dataptr(cx, dst)); + Store(cx, extra, get_meta(cx, dst)); +} + +pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, + src: ValueRef, + ty: Ty<'tcx>) + -> (ValueRef, ValueRef) +{ + if cx.unreachable.get() { + // FIXME: remove me + return (Load(cx, get_dataptr(cx, src)), + Load(cx, get_meta(cx, src))); + } + + load_fat_ptr_builder(&B(cx), src, ty) +} + +pub fn load_fat_ptr_builder<'a, 'tcx>( + b: &Builder<'a, 'tcx>, + src: ValueRef, + t: Ty<'tcx>) + -> (ValueRef, ValueRef) +{ + + let ptr = get_dataptr_builder(b, src); + let ptr = if t.is_region_ptr() || t.is_unique() { + b.load_nonnull(ptr) + } else { + b.load(ptr) + }; + + // FIXME: emit metadata on `meta`. + let meta = b.load(get_meta_builder(b, src)); + + (ptr, meta) +} + +pub fn from_immediate(bcx: Block, val: ValueRef) -> ValueRef { + if val_ty(val) == Type::i1(bcx.ccx()) { + ZExt(bcx, val, Type::i8(bcx.ccx())) + } else { + val + } +} + +pub fn to_immediate(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef { + if ty.is_bool() { + Trunc(bcx, val, Type::i1(bcx.ccx())) + } else { + val + } +} + +pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> Block<'blk, 'tcx> + where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx> +{ + let _icx = push_ctxt("with_cond"); + + if bcx.unreachable.get() || common::const_to_opt_uint(val) == Some(0) { + return bcx; + } + + let fcx = bcx.fcx; + let next_cx = fcx.new_block("next"); + let cond_cx = fcx.new_block("cond"); + CondBr(bcx, val, cond_cx.llbb, next_cx.llbb, DebugLoc::None); + let after_cx = f(cond_cx); + if !after_cx.terminated.get() { + Br(after_cx, next_cx.llbb, DebugLoc::None); + } + next_cx +} + +pub enum Lifetime { Start, End } + +// If LLVM lifetime intrinsic support is enabled (i.e. optimizations +// on), and `ptr` is nonzero-sized, then extracts the size of `ptr` +// and the intrinsic for `lt` and passes them to `emit`, which is in +// charge of generating code to call the passed intrinsic on whatever +// block of generated code is targetted for the intrinsic. +// +// If LLVM lifetime intrinsic support is disabled (i.e. optimizations +// off) or `ptr` is zero-sized, then no-op (does not call `emit`). +fn core_lifetime_emit<'blk, 'tcx, F>(ccx: &'blk CrateContext<'blk, 'tcx>, + ptr: ValueRef, + lt: Lifetime, + emit: F) + where F: FnOnce(&'blk CrateContext<'blk, 'tcx>, machine::llsize, ValueRef) +{ + if ccx.sess().opts.optimize == config::OptLevel::No { + return; + } + + let _icx = push_ctxt(match lt { + Lifetime::Start => "lifetime_start", + Lifetime::End => "lifetime_end" + }); + + let size = machine::llsize_of_alloc(ccx, val_ty(ptr).element_type()); + if size == 0 { + return; + } + + let lifetime_intrinsic = ccx.get_intrinsic(match lt { + Lifetime::Start => "llvm.lifetime.start", + Lifetime::End => "llvm.lifetime.end" + }); + emit(ccx, size, lifetime_intrinsic) +} + +impl Lifetime { + pub fn call(self, b: &Builder, ptr: ValueRef) { + core_lifetime_emit(b.ccx, ptr, self, |ccx, size, lifetime_intrinsic| { + let ptr = b.pointercast(ptr, Type::i8p(ccx)); + b.call(lifetime_intrinsic, &[C_u64(ccx, size), ptr], None); + }); + } +} + +pub fn call_lifetime_start(bcx: Block, ptr: ValueRef) { + if !bcx.unreachable.get() { + Lifetime::Start.call(&bcx.build(), ptr); + } +} + +pub fn call_lifetime_end(bcx: Block, ptr: ValueRef) { + if !bcx.unreachable.get() { + Lifetime::End.call(&bcx.build(), ptr); + } +} + +// Generates code for resumption of unwind at the end of a landing pad. +pub fn trans_unwind_resume(bcx: Block, lpval: ValueRef) { + if !bcx.sess().target.target.options.custom_unwind_resume { + Resume(bcx, lpval); + } else { + let exc_ptr = ExtractValue(bcx, lpval, 0); + bcx.fcx.eh_unwind_resume() + .call(bcx, DebugLoc::None, &[exc_ptr], None); + } +} + +pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, + dst: ValueRef, + src: ValueRef, + n_bytes: ValueRef, + align: u32) { + let _icx = push_ctxt("call_memcpy"); + let ccx = b.ccx; + let ptr_width = &ccx.sess().target.target.target_pointer_width[..]; + let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); + let memcpy = ccx.get_intrinsic(&key); + let src_ptr = b.pointercast(src, Type::i8p(ccx)); + let dst_ptr = b.pointercast(dst, Type::i8p(ccx)); + let size = b.intcast(n_bytes, ccx.int_type()); + let align = C_i32(ccx, align as i32); + let volatile = C_bool(ccx, false); + b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); +} + +pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx>) { + let _icx = push_ctxt("memcpy_ty"); + let ccx = bcx.ccx(); + + if type_is_zero_size(ccx, t) || bcx.unreachable.get() { + return; + } + + if t.is_structural() { + let llty = type_of::type_of(ccx, t); + let llsz = llsize_of(ccx, llty); + let llalign = type_of::align_of(ccx, t); + call_memcpy(&B(bcx), dst, src, llsz, llalign as u32); + } else if common::type_is_fat_ptr(bcx.tcx(), t) { + let (data, extra) = load_fat_ptr(bcx, src, t); + store_fat_ptr(bcx, data, extra, dst, t); + } else { + store_ty(bcx, load_ty(bcx, src, t), dst, t); + } +} + +pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { + if cx.unreachable.get() { + return; + } + let _icx = push_ctxt("init_zero_mem"); + let bcx = cx; + memfill(&B(bcx), llptr, t, 0); +} + +// Always use this function instead of storing a constant byte to the memory +// in question. e.g. if you store a zero constant, LLVM will drown in vreg +// allocation for large data structures, and the generated code will be +// awful. (A telltale sign of this is large quantities of +// `mov [byte ptr foo],0` in the generated code.) +fn memfill<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>, byte: u8) { + let _icx = push_ctxt("memfill"); + let ccx = b.ccx; + let llty = type_of::type_of(ccx, ty); + let llptr = b.pointercast(llptr, Type::i8(ccx).ptr_to()); + let llzeroval = C_u8(ccx, byte); + let size = machine::llsize_of(ccx, llty); + let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32); + call_memset(b, llptr, llzeroval, size, align, false); +} + +pub fn call_memset<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, + ptr: ValueRef, + fill_byte: ValueRef, + size: ValueRef, + align: ValueRef, + volatile: bool) { + let ccx = b.ccx; + let ptr_width = &ccx.sess().target.target.target_pointer_width[..]; + let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); + let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key); + let volatile = C_bool(ccx, volatile); + b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); +} + +pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + ty: Ty<'tcx>, + name: &str) -> ValueRef { + assert!(!ty.has_param_types()); + alloca(bcx, type_of::type_of(bcx.ccx(), ty), name) +} + +pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef { + let _icx = push_ctxt("alloca"); + if cx.unreachable.get() { + unsafe { + return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); + } + } + DebugLoc::None.apply(cx.fcx); + Alloca(cx, ty, name) +} + +impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { + /// Create a function context for the given function. + /// Beware that you must call `fcx.init` or `fcx.bind_args` + /// before doing anything with the returned function context. + pub fn new(ccx: &'blk CrateContext<'blk, 'tcx>, + llfndecl: ValueRef, + fn_ty: FnType, + definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi)>, + block_arena: &'blk TypedArena>) + -> FunctionContext<'blk, 'tcx> { + let (param_substs, def_id) = match definition { + Some((instance, ..)) => { + common::validate_substs(instance.substs); + (instance.substs, Some(instance.def)) + } + None => (ccx.tcx().intern_substs(&[]), None) + }; + + let local_id = def_id.and_then(|id| ccx.tcx().map.as_local_node_id(id)); + + debug!("FunctionContext::new({})", + definition.map_or(String::new(), |d| d.0.to_string())); + + let no_debug = if let Some(id) = local_id { + ccx.tcx().map.attrs(id) + .iter().any(|item| item.check_name("no_debug")) + } else if let Some(def_id) = def_id { + ccx.sess().cstore.item_attrs(def_id) + .iter().any(|item| item.check_name("no_debug")) + } else { + false + }; + + let mir = def_id.map(|id| ccx.tcx().item_mir(id)); + + let debug_context = if let (false, Some((instance, sig, abi)), &Some(ref mir)) = + (no_debug, definition, &mir) { + debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfndecl, mir) + } else { + debuginfo::empty_function_debug_context(ccx) + }; + + FunctionContext { + mir: mir, + llfn: llfndecl, + llretslotptr: Cell::new(None), + param_env: ccx.tcx().empty_parameter_environment(), + alloca_insert_pt: Cell::new(None), + landingpad_alloca: Cell::new(None), + fn_ty: fn_ty, + param_substs: param_substs, + span: None, + block_arena: block_arena, + lpad_arena: TypedArena::new(), + ccx: ccx, + debug_context: debug_context, + scopes: RefCell::new(Vec::new()), + } + } + + /// Performs setup on a newly created function, creating the entry + /// scope block and allocating space for the return pointer. + pub fn init(&'blk self, skip_retptr: bool) -> Block<'blk, 'tcx> { + let entry_bcx = self.new_block("entry-block"); + + // Use a dummy instruction as the insertion point for all allocas. + // This is later removed in FunctionContext::cleanup. + self.alloca_insert_pt.set(Some(unsafe { + Load(entry_bcx, C_null(Type::i8p(self.ccx))); + llvm::LLVMGetFirstInstruction(entry_bcx.llbb) + })); + + if !self.fn_ty.ret.is_ignore() && !skip_retptr { + // We normally allocate the llretslotptr, unless we + // have been instructed to skip it for immediate return + // values, or there is nothing to return at all. + + // We create an alloca to hold a pointer of type `ret.original_ty` + // which will hold the pointer to the right alloca which has the + // final ret value + let llty = self.fn_ty.ret.memory_ty(self.ccx); + // But if there are no nested returns, we skip the indirection + // and have a single retslot + let slot = if self.fn_ty.ret.is_indirect() { + get_param(self.llfn, 0) + } else { + AllocaFcx(self, llty, "sret_slot") + }; + + self.llretslotptr.set(Some(slot)); + } + + entry_bcx + } + + /// Ties up the llstaticallocas -> llloadenv -> lltop edges, + /// and builds the return block. + pub fn finish(&'blk self, ret_cx: Block<'blk, 'tcx>, + ret_debug_loc: DebugLoc) { + let _icx = push_ctxt("FunctionContext::finish"); + + self.build_return_block(ret_cx, ret_debug_loc); + + DebugLoc::None.apply(self); + self.cleanup(); + } + + // Builds the return block for a function. + pub fn build_return_block(&self, ret_cx: Block<'blk, 'tcx>, + ret_debug_location: DebugLoc) { + if self.llretslotptr.get().is_none() || + ret_cx.unreachable.get() || + self.fn_ty.ret.is_indirect() { + return RetVoid(ret_cx, ret_debug_location); + } + + let retslot = self.llretslotptr.get().unwrap(); + let retptr = Value(retslot); + let llty = self.fn_ty.ret.original_ty; + match (retptr.get_dominating_store(ret_cx), self.fn_ty.ret.cast) { + // If there's only a single store to the ret slot, we can directly return + // the value that was stored and omit the store and the alloca. + // However, we only want to do this when there is no cast needed. + (Some(s), None) => { + let mut retval = s.get_operand(0).unwrap().get(); + s.erase_from_parent(); + + if retptr.has_no_uses() { + retptr.erase_from_parent(); + } + + if self.fn_ty.ret.is_indirect() { + Store(ret_cx, retval, get_param(self.llfn, 0)); + RetVoid(ret_cx, ret_debug_location) + } else { + if llty == Type::i1(self.ccx) { + retval = Trunc(ret_cx, retval, llty); + } + Ret(ret_cx, retval, ret_debug_location) + } + } + (_, cast_ty) if self.fn_ty.ret.is_indirect() => { + // Otherwise, copy the return value to the ret slot. + assert_eq!(cast_ty, None); + let llsz = llsize_of(self.ccx, self.fn_ty.ret.ty); + let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty); + call_memcpy(&B(ret_cx), get_param(self.llfn, 0), + retslot, llsz, llalign as u32); + RetVoid(ret_cx, ret_debug_location) + } + (_, Some(cast_ty)) => { + let load = Load(ret_cx, PointerCast(ret_cx, retslot, cast_ty.ptr_to())); + let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty); + unsafe { + llvm::LLVMSetAlignment(load, llalign); + } + Ret(ret_cx, load, ret_debug_location) + } + (_, None) => { + let retval = if llty == Type::i1(self.ccx) { + let val = LoadRangeAssert(ret_cx, retslot, 0, 2, llvm::False); + Trunc(ret_cx, val, llty) + } else { + Load(ret_cx, retslot) + }; + Ret(ret_cx, retval, ret_debug_location) + } + } + } +} + +pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>) { + let _s = if ccx.sess().trans_stats() { + let mut instance_name = String::new(); + DefPathBasedNames::new(ccx.tcx(), true, true) + .push_def_path(instance.def, &mut instance_name); + Some(StatRecorder::new(ccx, instance_name)) + } else { + None + }; + + // this is an info! to allow collecting monomorphization statistics + // and to allow finding the last function before LLVM aborts from + // release builds. + info!("trans_instance({})", instance); + + let _icx = push_ctxt("trans_instance"); + + let fn_ty = ccx.tcx().item_type(instance.def); + let fn_ty = ccx.tcx().erase_regions(&fn_ty); + let fn_ty = monomorphize::apply_param_substs(ccx.shared(), instance.substs, &fn_ty); + + let ty::BareFnTy { abi, ref sig, .. } = *common::ty_fn_ty(ccx, fn_ty); + let sig = ccx.tcx().erase_late_bound_regions_and_normalize(sig); + + let lldecl = match ccx.instances().borrow().get(&instance) { + Some(&val) => val, + None => bug!("Instance `{:?}` not already declared", instance) + }; + + ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1); + + if !ccx.sess().no_landing_pads() { + attributes::emit_uwtable(lldecl, true); + } + + let fn_ty = FnType::new(ccx, abi, &sig, &[]); + + let (arena, fcx): (TypedArena<_>, FunctionContext); + arena = TypedArena::new(); + fcx = FunctionContext::new(ccx, + lldecl, + fn_ty, + Some((instance, &sig, abi)), + &arena); + + if fcx.mir.is_none() { + bug!("attempted translation of `{}` w/o MIR", instance); + } + + mir::trans_mir(&fcx); +} + +pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + def_id: DefId, + substs: &'tcx Substs<'tcx>, + disr: Disr, + llfndecl: ValueRef) { + attributes::inline(llfndecl, attributes::InlineAttr::Hint); + attributes::set_frame_pointer_elimination(ccx, llfndecl); + + let ctor_ty = ccx.tcx().item_type(def_id); + let ctor_ty = monomorphize::apply_param_substs(ccx.shared(), substs, &ctor_ty); + + let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&ctor_ty.fn_sig()); + let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]); + + let (arena, fcx): (TypedArena<_>, FunctionContext); + arena = TypedArena::new(); + fcx = FunctionContext::new(ccx, llfndecl, fn_ty, None, &arena); + let bcx = fcx.init(false); + + if !fcx.fn_ty.ret.is_ignore() { + let dest = fcx.llretslotptr.get().unwrap(); + let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value + let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize; + let mut arg_idx = 0; + for (i, arg_ty) in sig.inputs.into_iter().enumerate() { + let lldestptr = adt::trans_field_ptr(bcx, sig.output, dest_val, Disr::from(disr), i); + let arg = &fcx.fn_ty.args[arg_idx]; + arg_idx += 1; + let b = &bcx.build(); + if common::type_is_fat_ptr(bcx.tcx(), arg_ty) { + let meta = &fcx.fn_ty.args[arg_idx]; + arg_idx += 1; + arg.store_fn_arg(b, &mut llarg_idx, get_dataptr(bcx, lldestptr)); + meta.store_fn_arg(b, &mut llarg_idx, get_meta(bcx, lldestptr)); + } else { + arg.store_fn_arg(b, &mut llarg_idx, lldestptr); + } + } + adt::trans_set_discr(bcx, sig.output, dest, disr); + } + + fcx.finish(bcx, DebugLoc::None); +} + +pub fn llvm_linkage_by_name(name: &str) -> Option { + // Use the names from src/llvm/docs/LangRef.rst here. Most types are only + // applicable to variable declarations and may not really make sense for + // Rust code in the first place but whitelist them anyway and trust that + // the user knows what s/he's doing. Who knows, unanticipated use cases + // may pop up in the future. + // + // ghost, dllimport, dllexport and linkonce_odr_autohide are not supported + // and don't have to be, LLVM treats them as no-ops. + match name { + "appending" => Some(llvm::Linkage::AppendingLinkage), + "available_externally" => Some(llvm::Linkage::AvailableExternallyLinkage), + "common" => Some(llvm::Linkage::CommonLinkage), + "extern_weak" => Some(llvm::Linkage::ExternalWeakLinkage), + "external" => Some(llvm::Linkage::ExternalLinkage), + "internal" => Some(llvm::Linkage::InternalLinkage), + "linkonce" => Some(llvm::Linkage::LinkOnceAnyLinkage), + "linkonce_odr" => Some(llvm::Linkage::LinkOnceODRLinkage), + "private" => Some(llvm::Linkage::PrivateLinkage), + "weak" => Some(llvm::Linkage::WeakAnyLinkage), + "weak_odr" => Some(llvm::Linkage::WeakODRLinkage), + _ => None, + } +} + +pub fn set_link_section(ccx: &CrateContext, + llval: ValueRef, + attrs: &[ast::Attribute]) { + if let Some(sect) = attr::first_attr_value_str_by_name(attrs, "link_section") { + if contains_null(§.as_str()) { + ccx.sess().fatal(&format!("Illegal null byte in link_section value: `{}`", §)); + } + unsafe { + let buf = CString::new(sect.as_str().as_bytes()).unwrap(); + llvm::LLVMSetSection(llval, buf.as_ptr()); + } + } +} + +/// Create the `main` function which will initialise the rust runtime and call +/// users’ main function. +pub fn maybe_create_entry_wrapper(ccx: &CrateContext) { + let (main_def_id, span) = match *ccx.sess().entry_fn.borrow() { + Some((id, span)) => { + (ccx.tcx().map.local_def_id(id), span) + } + None => return, + }; + + // check for the #[rustc_error] annotation, which forces an + // error in trans. This is used to write compile-fail tests + // that actually test that compilation succeeds without + // reporting an error. + if ccx.tcx().has_attr(main_def_id, "rustc_error") { + ccx.tcx().sess.span_fatal(span, "compilation successful"); + } + + let instance = Instance::mono(ccx.shared(), main_def_id); + + if !ccx.codegen_unit().contains_item(&TransItem::Fn(instance)) { + // We want to create the wrapper in the same codegen unit as Rust's main + // function. + return; + } + + let main_llfn = Callee::def(ccx, main_def_id, instance.substs).reify(ccx); + + let et = ccx.sess().entry_type.get().unwrap(); + match et { + config::EntryMain => { + create_entry_fn(ccx, span, main_llfn, true); + } + config::EntryStart => create_entry_fn(ccx, span, main_llfn, false), + config::EntryNone => {} // Do nothing. + } + + fn create_entry_fn(ccx: &CrateContext, + sp: Span, + rust_main: ValueRef, + use_start_lang_item: bool) { + let llfty = Type::func(&[ccx.int_type(), Type::i8p(ccx).ptr_to()], &ccx.int_type()); + + if declare::get_defined_value(ccx, "main").is_some() { + // FIXME: We should be smart and show a better diagnostic here. + ccx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times") + .help("did you use #[no_mangle] on `fn main`? Use #[start] instead") + .emit(); + ccx.sess().abort_if_errors(); + bug!(); + } + let llfn = declare::declare_cfn(ccx, "main", llfty); + + // `main` should respect same config for frame pointer elimination as rest of code + attributes::set_frame_pointer_elimination(ccx, llfn); + + let llbb = unsafe { + llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, "top\0".as_ptr() as *const _) + }; + let bld = ccx.raw_builder(); + unsafe { + llvm::LLVMPositionBuilderAtEnd(bld, llbb); + + debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx); + + let (start_fn, args) = if use_start_lang_item { + let start_def_id = match ccx.tcx().lang_items.require(StartFnLangItem) { + Ok(id) => id, + Err(s) => ccx.sess().fatal(&s) + }; + let empty_substs = ccx.tcx().intern_substs(&[]); + let start_fn = Callee::def(ccx, start_def_id, empty_substs).reify(ccx); + let args = { + let opaque_rust_main = + llvm::LLVMBuildPointerCast(bld, + rust_main, + Type::i8p(ccx).to_ref(), + "rust_main\0".as_ptr() as *const _); + + vec![opaque_rust_main, get_param(llfn, 0), get_param(llfn, 1)] + }; + (start_fn, args) + } else { + debug!("using user-defined start fn"); + let args = vec![get_param(llfn, 0 as c_uint), get_param(llfn, 1 as c_uint)]; + + (rust_main, args) + }; + + let result = llvm::LLVMRustBuildCall(bld, + start_fn, + args.as_ptr(), + args.len() as c_uint, + ptr::null_mut(), + noname()); + + llvm::LLVMBuildRet(bld, result); + } + } +} + +fn contains_null(s: &str) -> bool { + s.bytes().any(|b| b == 0) +} + +fn write_metadata(cx: &SharedCrateContext, + reachable_ids: &NodeSet) -> Vec { + use flate; + + #[derive(PartialEq, Eq, PartialOrd, Ord)] + enum MetadataKind { + None, + Uncompressed, + Compressed + } + + let kind = cx.sess().crate_types.borrow().iter().map(|ty| { + match *ty { + config::CrateTypeExecutable | + config::CrateTypeStaticlib | + config::CrateTypeCdylib => MetadataKind::None, + + config::CrateTypeRlib | + config::CrateTypeMetadata => MetadataKind::Uncompressed, + + config::CrateTypeDylib | + config::CrateTypeProcMacro => MetadataKind::Compressed, + } + }).max().unwrap(); + + if kind == MetadataKind::None { + return Vec::new(); + } + + let cstore = &cx.tcx().sess.cstore; + let metadata = cstore.encode_metadata(cx.tcx(), + cx.export_map(), + cx.link_meta(), + reachable_ids); + if kind == MetadataKind::Uncompressed { + return metadata; + } + + assert!(kind == MetadataKind::Compressed); + let mut compressed = cstore.metadata_encoding_version().to_vec(); + compressed.extend_from_slice(&flate::deflate_bytes(&metadata)); + + let llmeta = C_bytes_in_context(cx.metadata_llcx(), &compressed[..]); + let llconst = C_struct_in_context(cx.metadata_llcx(), &[llmeta], false); + let name = cx.metadata_symbol_name(); + let buf = CString::new(name).unwrap(); + let llglobal = unsafe { + llvm::LLVMAddGlobal(cx.metadata_llmod(), val_ty(llconst).to_ref(), buf.as_ptr()) + }; + unsafe { + llvm::LLVMSetInitializer(llglobal, llconst); + let section_name = + cx.tcx().sess.cstore.metadata_section_name(&cx.sess().target.target); + let name = CString::new(section_name).unwrap(); + llvm::LLVMSetSection(llglobal, name.as_ptr()); + + // Also generate a .section directive to force no + // flags, at least for ELF outputs, so that the + // metadata doesn't get loaded into memory. + let directive = format!(".section {}", section_name); + let directive = CString::new(directive).unwrap(); + llvm::LLVMSetModuleInlineAsm(cx.metadata_llmod(), directive.as_ptr()) + } + return metadata; +} + +/// Find any symbols that are defined in one compilation unit, but not declared +/// in any other compilation unit. Give these symbols internal linkage. +fn internalize_symbols<'a, 'tcx>(sess: &Session, + ccxs: &CrateContextList<'a, 'tcx>, + symbol_map: &SymbolMap<'tcx>, + reachable: &FxHashSet<&str>) { + let scx = ccxs.shared(); + let tcx = scx.tcx(); + + // In incr. comp. mode, we can't necessarily see all refs since we + // don't generate LLVM IR for reused modules, so skip this + // step. Later we should get smarter. + if sess.opts.debugging_opts.incremental.is_some() { + return; + } + + // 'unsafe' because we are holding on to CStr's from the LLVM module within + // this block. + unsafe { + let mut referenced_somewhere = FxHashSet(); + + // Collect all symbols that need to stay externally visible because they + // are referenced via a declaration in some other codegen unit. + for ccx in ccxs.iter_need_trans() { + for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) { + let linkage = llvm::LLVMRustGetLinkage(val); + // We only care about external declarations (not definitions) + // and available_externally definitions. + let is_available_externally = linkage == llvm::Linkage::AvailableExternallyLinkage; + let is_decl = llvm::LLVMIsDeclaration(val) != 0; + + if is_decl || is_available_externally { + let symbol_name = CStr::from_ptr(llvm::LLVMGetValueName(val)); + referenced_somewhere.insert(symbol_name); + } + } + } + + // Also collect all symbols for which we cannot adjust linkage, because + // it is fixed by some directive in the source code (e.g. #[no_mangle]). + let linkage_fixed_explicitly: FxHashSet<_> = scx + .translation_items() + .borrow() + .iter() + .cloned() + .filter(|trans_item|{ + trans_item.explicit_linkage(tcx).is_some() + }) + .map(|trans_item| symbol_map.get_or_compute(scx, trans_item)) + .collect(); + + // Examine each external definition. If the definition is not used in + // any other compilation unit, and is not reachable from other crates, + // then give it internal linkage. + for ccx in ccxs.iter_need_trans() { + for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) { + let linkage = llvm::LLVMRustGetLinkage(val); + + let is_externally_visible = (linkage == llvm::Linkage::ExternalLinkage) || + (linkage == llvm::Linkage::LinkOnceODRLinkage) || + (linkage == llvm::Linkage::WeakODRLinkage); + let is_definition = llvm::LLVMIsDeclaration(val) == 0; + + // If this is a definition (as opposed to just a declaration) + // and externally visible, check if we can internalize it + if is_definition && is_externally_visible { + let name_cstr = CStr::from_ptr(llvm::LLVMGetValueName(val)); + let name_str = name_cstr.to_str().unwrap(); + let name_cow = Cow::Borrowed(name_str); + + let is_referenced_somewhere = referenced_somewhere.contains(&name_cstr); + let is_reachable = reachable.contains(&name_str); + let has_fixed_linkage = linkage_fixed_explicitly.contains(&name_cow); + + if !is_referenced_somewhere && !is_reachable && !has_fixed_linkage { + llvm::LLVMRustSetLinkage(val, llvm::Linkage::InternalLinkage); + llvm::LLVMSetDLLStorageClass(val, + llvm::DLLStorageClass::Default); + llvm::UnsetComdat(val); + } + } + } + } + } +} + +// Create a `__imp_ = &symbol` global for every public static `symbol`. +// This is required to satisfy `dllimport` references to static data in .rlibs +// when using MSVC linker. We do this only for data, as linker can fix up +// code references on its own. +// See #26591, #27438 +fn create_imps(cx: &CrateContextList) { + // The x86 ABI seems to require that leading underscores are added to symbol + // names, so we need an extra underscore on 32-bit. There's also a leading + // '\x01' here which disables LLVM's symbol mangling (e.g. no extra + // underscores added in front). + let prefix = if cx.shared().sess().target.target.target_pointer_width == "32" { + "\x01__imp__" + } else { + "\x01__imp_" + }; + unsafe { + for ccx in cx.iter_need_trans() { + let exported: Vec<_> = iter_globals(ccx.llmod()) + .filter(|&val| { + llvm::LLVMRustGetLinkage(val) == + llvm::Linkage::ExternalLinkage && + llvm::LLVMIsDeclaration(val) == 0 + }) + .collect(); + + let i8p_ty = Type::i8p(&ccx); + for val in exported { + let name = CStr::from_ptr(llvm::LLVMGetValueName(val)); + let mut imp_name = prefix.as_bytes().to_vec(); + imp_name.extend(name.to_bytes()); + let imp_name = CString::new(imp_name).unwrap(); + let imp = llvm::LLVMAddGlobal(ccx.llmod(), + i8p_ty.to_ref(), + imp_name.as_ptr() as *const _); + let init = llvm::LLVMConstBitCast(val, i8p_ty.to_ref()); + llvm::LLVMSetInitializer(imp, init); + llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage); + } + } + } +} + +struct ValueIter { + cur: ValueRef, + step: unsafe extern "C" fn(ValueRef) -> ValueRef, +} + +impl Iterator for ValueIter { + type Item = ValueRef; + + fn next(&mut self) -> Option { + let old = self.cur; + if !old.is_null() { + self.cur = unsafe { (self.step)(old) }; + Some(old) + } else { + None + } + } +} + +fn iter_globals(llmod: llvm::ModuleRef) -> ValueIter { + unsafe { + ValueIter { + cur: llvm::LLVMGetFirstGlobal(llmod), + step: llvm::LLVMGetNextGlobal, + } + } +} + +fn iter_functions(llmod: llvm::ModuleRef) -> ValueIter { + unsafe { + ValueIter { + cur: llvm::LLVMGetFirstFunction(llmod), + step: llvm::LLVMGetNextFunction, + } + } +} + +/// The context provided lists a set of reachable ids as calculated by +/// middle::reachable, but this contains far more ids and symbols than we're +/// actually exposing from the object file. This function will filter the set in +/// the context to the set of ids which correspond to symbols that are exposed +/// from the object file being generated. +/// +/// This list is later used by linkers to determine the set of symbols needed to +/// be exposed from a dynamic library and it's also encoded into the metadata. +pub fn filter_reachable_ids(tcx: TyCtxt, reachable: NodeSet) -> NodeSet { + reachable.into_iter().filter(|&id| { + // Next, we want to ignore some FFI functions that are not exposed from + // this crate. Reachable FFI functions can be lumped into two + // categories: + // + // 1. Those that are included statically via a static library + // 2. Those included otherwise (e.g. dynamically or via a framework) + // + // Although our LLVM module is not literally emitting code for the + // statically included symbols, it's an export of our library which + // needs to be passed on to the linker and encoded in the metadata. + // + // As a result, if this id is an FFI item (foreign item) then we only + // let it through if it's included statically. + match tcx.map.get(id) { + hir_map::NodeForeignItem(..) => { + tcx.sess.cstore.is_statically_included_foreign_item(id) + } + + // Only consider nodes that actually have exported symbols. + hir_map::NodeItem(&hir::Item { + node: hir::ItemStatic(..), .. }) | + hir_map::NodeItem(&hir::Item { + node: hir::ItemFn(..), .. }) | + hir_map::NodeImplItem(&hir::ImplItem { + node: hir::ImplItemKind::Method(..), .. }) => { + let def_id = tcx.map.local_def_id(id); + let generics = tcx.item_generics(def_id); + let attributes = tcx.get_attrs(def_id); + (generics.parent_types == 0 && generics.types.is_empty()) && + // Functions marked with #[inline] are only ever translated + // with "internal" linkage and are never exported. + !attr::requests_inline(&attributes[..]) + } + + _ => false + } + }).collect() +} + +pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + analysis: ty::CrateAnalysis, + incremental_hashes_map: &IncrementalHashesMap) + -> CrateTranslation { + let _task = tcx.dep_graph.in_task(DepNode::TransCrate); + + // Be careful with this krate: obviously it gives access to the + // entire contents of the krate. So if you push any subtasks of + // `TransCrate`, you need to be careful to register "reads" of the + // particular items that will be processed. + let krate = tcx.map.krate(); + + let ty::CrateAnalysis { export_map, reachable, name, .. } = analysis; + let reachable = filter_reachable_ids(tcx, reachable); + + let check_overflow = if let Some(v) = tcx.sess.opts.debugging_opts.force_overflow_checks { + v + } else { + tcx.sess.opts.debug_assertions + }; + + let link_meta = link::build_link_meta(incremental_hashes_map, &name); + + let shared_ccx = SharedCrateContext::new(tcx, + export_map, + link_meta.clone(), + reachable, + check_overflow); + // Translate the metadata. + let metadata = time(tcx.sess.time_passes(), "write metadata", || { + write_metadata(&shared_ccx, shared_ccx.reachable()) + }); + + let metadata_module = ModuleTranslation { + name: "metadata".to_string(), + symbol_name_hash: 0, // we always rebuild metadata, at least for now + source: ModuleSource::Translated(ModuleLlvm { + llcx: shared_ccx.metadata_llcx(), + llmod: shared_ccx.metadata_llmod(), + }), + }; + let no_builtins = attr::contains_name(&krate.attrs, "no_builtins"); + + // Run the translation item collector and partition the collected items into + // codegen units. + let (codegen_units, symbol_map) = collect_and_partition_translation_items(&shared_ccx); + + let symbol_map = Rc::new(symbol_map); + + let previous_work_products = trans_reuse_previous_work_products(tcx, + &codegen_units, + &symbol_map); + + let crate_context_list = CrateContextList::new(&shared_ccx, + codegen_units, + previous_work_products, + symbol_map.clone()); + let modules: Vec<_> = crate_context_list.iter_all() + .map(|ccx| { + let source = match ccx.previous_work_product() { + Some(buf) => ModuleSource::Preexisting(buf.clone()), + None => ModuleSource::Translated(ModuleLlvm { + llcx: ccx.llcx(), + llmod: ccx.llmod(), + }), + }; + + ModuleTranslation { + name: String::from(ccx.codegen_unit().name()), + symbol_name_hash: ccx.codegen_unit().compute_symbol_name_hash(tcx, &symbol_map), + source: source, + } + }) + .collect(); + + assert_module_sources::assert_module_sources(tcx, &modules); + + // Skip crate items and just output metadata in -Z no-trans mode. + if tcx.sess.opts.debugging_opts.no_trans || + tcx.sess.crate_types.borrow().iter().all(|ct| ct == &config::CrateTypeMetadata) { + let linker_info = LinkerInfo::new(&shared_ccx, &[]); + return CrateTranslation { + modules: modules, + metadata_module: metadata_module, + link: link_meta, + metadata: metadata, + reachable: vec![], + no_builtins: no_builtins, + linker_info: linker_info, + windows_subsystem: None, + }; + } + + // Instantiate translation items without filling out definitions yet... + for ccx in crate_context_list.iter_need_trans() { + let cgu = ccx.codegen_unit(); + let trans_items = cgu.items_in_deterministic_order(tcx, &symbol_map); + + tcx.dep_graph.with_task(cgu.work_product_dep_node(), || { + for (trans_item, linkage) in trans_items { + trans_item.predefine(&ccx, linkage); + } + }); + } + + // ... and now that we have everything pre-defined, fill out those definitions. + for ccx in crate_context_list.iter_need_trans() { + let cgu = ccx.codegen_unit(); + let trans_items = cgu.items_in_deterministic_order(tcx, &symbol_map); + tcx.dep_graph.with_task(cgu.work_product_dep_node(), || { + for (trans_item, _) in trans_items { + trans_item.define(&ccx); + } + + // If this codegen unit contains the main function, also create the + // wrapper here + maybe_create_entry_wrapper(&ccx); + + // Run replace-all-uses-with for statics that need it + for &(old_g, new_g) in ccx.statics_to_rauw().borrow().iter() { + unsafe { + let bitcast = llvm::LLVMConstPointerCast(new_g, llvm::LLVMTypeOf(old_g)); + llvm::LLVMReplaceAllUsesWith(old_g, bitcast); + llvm::LLVMDeleteGlobal(old_g); + } + } + + // Finalize debuginfo + if ccx.sess().opts.debuginfo != NoDebugInfo { + debuginfo::finalize(&ccx); + } + }); + } + + symbol_names_test::report_symbol_names(&shared_ccx); + + if shared_ccx.sess().trans_stats() { + let stats = shared_ccx.stats(); + println!("--- trans stats ---"); + println!("n_glues_created: {}", stats.n_glues_created.get()); + println!("n_null_glues: {}", stats.n_null_glues.get()); + println!("n_real_glues: {}", stats.n_real_glues.get()); + + println!("n_fns: {}", stats.n_fns.get()); + println!("n_inlines: {}", stats.n_inlines.get()); + println!("n_closures: {}", stats.n_closures.get()); + println!("fn stats:"); + stats.fn_stats.borrow_mut().sort_by(|&(_, insns_a), &(_, insns_b)| { + insns_b.cmp(&insns_a) + }); + for tuple in stats.fn_stats.borrow().iter() { + match *tuple { + (ref name, insns) => { + println!("{} insns, {}", insns, *name); + } + } + } + } + + if shared_ccx.sess().count_llvm_insns() { + for (k, v) in shared_ccx.stats().llvm_insns.borrow().iter() { + println!("{:7} {}", *v, *k); + } + } + + let sess = shared_ccx.sess(); + let mut reachable_symbols = shared_ccx.reachable().iter().map(|&id| { + let def_id = shared_ccx.tcx().map.local_def_id(id); + symbol_for_def_id(def_id, &shared_ccx, &symbol_map) + }).collect::>(); + + if sess.entry_fn.borrow().is_some() { + reachable_symbols.push("main".to_string()); + } + + if sess.crate_types.borrow().contains(&config::CrateTypeDylib) { + reachable_symbols.push(shared_ccx.metadata_symbol_name()); + } + + // For the purposes of LTO or when creating a cdylib, we add to the + // reachable set all of the upstream reachable extern fns. These functions + // are all part of the public ABI of the final product, so we need to + // preserve them. + // + // Note that this happens even if LTO isn't requested or we're not creating + // a cdylib. In those cases, though, we're not even reading the + // `reachable_symbols` list later on so it should be ok. + for cnum in sess.cstore.crates() { + let syms = sess.cstore.reachable_ids(cnum); + reachable_symbols.extend(syms.into_iter().filter(|&def_id| { + let applicable = match sess.cstore.describe_def(def_id) { + Some(Def::Static(..)) => true, + Some(Def::Fn(_)) => { + shared_ccx.tcx().item_generics(def_id).types.is_empty() + } + _ => false + }; + + if applicable { + let attrs = shared_ccx.tcx().get_attrs(def_id); + attr::contains_extern_indicator(sess.diagnostic(), &attrs) + } else { + false + } + }).map(|did| { + symbol_for_def_id(did, &shared_ccx, &symbol_map) + })); + } + + time(shared_ccx.sess().time_passes(), "internalize symbols", || { + internalize_symbols(sess, + &crate_context_list, + &symbol_map, + &reachable_symbols.iter() + .map(|s| &s[..]) + .collect()) + }); + + if tcx.sess.opts.debugging_opts.print_type_sizes { + gather_type_sizes(tcx); + } + + if sess.target.target.options.is_like_msvc && + sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateTypeRlib) { + create_imps(&crate_context_list); + } + + let linker_info = LinkerInfo::new(&shared_ccx, &reachable_symbols); + + let subsystem = attr::first_attr_value_str_by_name(&krate.attrs, + "windows_subsystem"); + let windows_subsystem = subsystem.map(|subsystem| { + if subsystem != "windows" && subsystem != "console" { + tcx.sess.fatal(&format!("invalid windows subsystem `{}`, only \ + `windows` and `console` are allowed", + subsystem)); + } + subsystem.to_string() + }); + + CrateTranslation { + modules: modules, + metadata_module: metadata_module, + link: link_meta, + metadata: metadata, + reachable: reachable_symbols, + no_builtins: no_builtins, + linker_info: linker_info, + windows_subsystem: windows_subsystem, + } +} + +fn gather_type_sizes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { + let layout_cache = tcx.layout_cache.borrow(); + for (ty, layout) in layout_cache.iter() { + + // (delay format until we actually need it) + let record = |kind, opt_discr_size, variants| { + let type_desc = format!("{:?}", ty); + let overall_size = layout.size(&tcx.data_layout); + let align = layout.align(&tcx.data_layout); + tcx.sess.code_stats.borrow_mut().record_type_size(kind, + type_desc, + align, + overall_size, + opt_discr_size, + variants); + }; + + let (adt_def, substs) = match ty.sty { + ty::TyAdt(ref adt_def, substs) => { + debug!("print-type-size t: `{:?}` process adt", ty); + (adt_def, substs) + } + + ty::TyClosure(..) => { + debug!("print-type-size t: `{:?}` record closure", ty); + record(DataTypeKind::Closure, None, vec![]); + continue; + } + + _ => { + debug!("print-type-size t: `{:?}` skip non-nominal", ty); + continue; + } + }; + + let adt_kind = adt_def.adt_kind(); + + let build_field_info = |(field_name, field_ty): (ast::Name, Ty), offset: &layout::Size| { + match layout_cache.get(&field_ty) { + None => bug!("no layout found for field {} type: `{:?}`", field_name, field_ty), + Some(field_layout) => { + session::FieldInfo { + name: field_name.to_string(), + offset: offset.bytes(), + size: field_layout.size(&tcx.data_layout).bytes(), + align: field_layout.align(&tcx.data_layout).abi(), + } + } + } + }; + + let build_primitive_info = |name: ast::Name, value: &layout::Primitive| { + session::VariantInfo { + name: Some(name.to_string()), + kind: session::SizeKind::Exact, + align: value.align(&tcx.data_layout).abi(), + size: value.size(&tcx.data_layout).bytes(), + fields: vec![], + } + }; + + enum Fields<'a> { + WithDiscrim(&'a layout::Struct), + NoDiscrim(&'a layout::Struct), + } + + let build_variant_info = |n: Option, flds: &[(ast::Name, Ty)], layout: Fields| { + let (s, field_offsets) = match layout { + Fields::WithDiscrim(s) => (s, &s.offsets[1..]), + Fields::NoDiscrim(s) => (s, &s.offsets[0..]), + }; + let field_info: Vec<_> = flds.iter() + .zip(field_offsets.iter()) + .map(|(&field_name_ty, offset)| build_field_info(field_name_ty, offset)) + .collect(); + + session::VariantInfo { + name: n.map(|n|n.to_string()), + kind: if s.sized { + session::SizeKind::Exact + } else { + session::SizeKind::Min + }, + align: s.align.abi(), + size: s.min_size.bytes(), + fields: field_info, + } + }; + + match **layout { + Layout::StructWrappedNullablePointer { nonnull: ref variant_layout, + nndiscr, + discrfield: _ } => { + debug!("print-type-size t: `{:?}` adt struct-wrapped nullable nndiscr {} is {:?}", + ty, nndiscr, variant_layout); + let variant_def = &adt_def.variants[nndiscr as usize]; + let fields: Vec<_> = variant_def.fields.iter() + .map(|field_def| (field_def.name, field_def.ty(tcx, substs))) + .collect(); + record(adt_kind.into(), + None, + vec![build_variant_info(Some(variant_def.name), + &fields, + Fields::NoDiscrim(variant_layout))]); + } + Layout::RawNullablePointer { nndiscr, value } => { + debug!("print-type-size t: `{:?}` adt raw nullable nndiscr {} is {:?}", + ty, nndiscr, value); + let variant_def = &adt_def.variants[nndiscr as usize]; + record(adt_kind.into(), None, + vec![build_primitive_info(variant_def.name, &value)]); + } + Layout::Univariant { variant: ref variant_layout, non_zero: _ } => { + let variant_names = || { + adt_def.variants.iter().map(|v|format!("{}", v.name)).collect::>() + }; + debug!("print-type-size t: `{:?}` adt univariant {:?} variants: {:?}", + ty, variant_layout, variant_names()); + assert!(adt_def.variants.len() <= 1, + "univariant with variants {:?}", variant_names()); + if adt_def.variants.len() == 1 { + let variant_def = &adt_def.variants[0]; + let fields: Vec<_> = variant_def.fields.iter() + .map(|field_def| (field_def.name, field_def.ty(tcx, substs))) + .collect(); + record(adt_kind.into(), + None, + vec![build_variant_info(Some(variant_def.name), + &fields, + Fields::NoDiscrim(variant_layout))]); + } else { + // (This case arises for *empty* enums; so give it + // zero variants.) + record(adt_kind.into(), None, vec![]); + } + } + + Layout::General { ref variants, discr, .. } => { + debug!("print-type-size t: `{:?}` adt general variants def {} layouts {} {:?}", + ty, adt_def.variants.len(), variants.len(), variants); + let variant_infos: Vec<_> = adt_def.variants.iter() + .zip(variants.iter()) + .map(|(variant_def, variant_layout)| { + let fields: Vec<_> = variant_def.fields.iter() + .map(|field_def| (field_def.name, field_def.ty(tcx, substs))) + .collect(); + build_variant_info(Some(variant_def.name), + &fields, + Fields::WithDiscrim(variant_layout)) + }) + .collect(); + record(adt_kind.into(), Some(discr.size()), variant_infos); + } + + Layout::UntaggedUnion { ref variants } => { + debug!("print-type-size t: `{:?}` adt union variants {:?}", + ty, variants); + // layout does not currently store info about each + // variant... + record(adt_kind.into(), None, Vec::new()); + } + + Layout::CEnum { discr, .. } => { + debug!("print-type-size t: `{:?}` adt c-like enum", ty); + let variant_infos: Vec<_> = adt_def.variants.iter() + .map(|variant_def| { + build_primitive_info(variant_def.name, + &layout::Primitive::Int(discr)) + }) + .collect(); + record(adt_kind.into(), Some(discr.size()), variant_infos); + } + + // other cases provide little interesting (i.e. adjustable + // via representation tweaks) size info beyond total size. + Layout::Scalar { .. } | + Layout::Vector { .. } | + Layout::Array { .. } | + Layout::FatPointer { .. } => { + debug!("print-type-size t: `{:?}` adt other", ty); + record(adt_kind.into(), None, Vec::new()) + } + } + } +} + +/// For each CGU, identify if we can reuse an existing object file (or +/// maybe other context). +fn trans_reuse_previous_work_products(tcx: TyCtxt, + codegen_units: &[CodegenUnit], + symbol_map: &SymbolMap) + -> Vec> { + debug!("trans_reuse_previous_work_products()"); + codegen_units + .iter() + .map(|cgu| { + let id = cgu.work_product_id(); + + let hash = cgu.compute_symbol_name_hash(tcx, symbol_map); + + debug!("trans_reuse_previous_work_products: id={:?} hash={}", id, hash); + + if let Some(work_product) = tcx.dep_graph.previous_work_product(&id) { + if work_product.input_hash == hash { + debug!("trans_reuse_previous_work_products: reusing {:?}", work_product); + return Some(work_product); + } else { + if tcx.sess.opts.debugging_opts.incremental_info { + println!("incremental: CGU `{}` invalidated because of \ + changed partitioning hash.", + cgu.name()); + } + debug!("trans_reuse_previous_work_products: \ + not reusing {:?} because hash changed to {:?}", + work_product, hash); + } + } + + None + }) + .collect() +} + +fn collect_and_partition_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>) + -> (Vec>, SymbolMap<'tcx>) { + let time_passes = scx.sess().time_passes(); + + let collection_mode = match scx.sess().opts.debugging_opts.print_trans_items { + Some(ref s) => { + let mode_string = s.to_lowercase(); + let mode_string = mode_string.trim(); + if mode_string == "eager" { + TransItemCollectionMode::Eager + } else { + if mode_string != "lazy" { + let message = format!("Unknown codegen-item collection mode '{}'. \ + Falling back to 'lazy' mode.", + mode_string); + scx.sess().warn(&message); + } + + TransItemCollectionMode::Lazy + } + } + None => TransItemCollectionMode::Lazy + }; + + let (items, inlining_map) = + time(time_passes, "translation item collection", || { + collector::collect_crate_translation_items(&scx, collection_mode) + }); + + let symbol_map = SymbolMap::build(scx, items.iter().cloned()); + + let strategy = if scx.sess().opts.debugging_opts.incremental.is_some() { + PartitioningStrategy::PerModule + } else { + PartitioningStrategy::FixedUnitCount(scx.sess().opts.cg.codegen_units) + }; + + let codegen_units = time(time_passes, "codegen unit partitioning", || { + partitioning::partition(scx, + items.iter().cloned(), + strategy, + &inlining_map) + }); + + assert!(scx.tcx().sess.opts.cg.codegen_units == codegen_units.len() || + scx.tcx().sess.opts.debugging_opts.incremental.is_some()); + + { + let mut ccx_map = scx.translation_items().borrow_mut(); + + for trans_item in items.iter().cloned() { + ccx_map.insert(trans_item); + } + } + + if scx.sess().opts.debugging_opts.print_trans_items.is_some() { + let mut item_to_cgus = FxHashMap(); + + for cgu in &codegen_units { + for (&trans_item, &linkage) in cgu.items() { + item_to_cgus.entry(trans_item) + .or_insert(Vec::new()) + .push((cgu.name().clone(), linkage)); + } + } + + let mut item_keys: Vec<_> = items + .iter() + .map(|i| { + let mut output = i.to_string(scx.tcx()); + output.push_str(" @@"); + let mut empty = Vec::new(); + let mut cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty); + cgus.as_mut_slice().sort_by_key(|&(ref name, _)| name.clone()); + cgus.dedup(); + for &(ref cgu_name, linkage) in cgus.iter() { + output.push_str(" "); + output.push_str(&cgu_name[..]); + + let linkage_abbrev = match linkage { + llvm::Linkage::ExternalLinkage => "External", + llvm::Linkage::AvailableExternallyLinkage => "Available", + llvm::Linkage::LinkOnceAnyLinkage => "OnceAny", + llvm::Linkage::LinkOnceODRLinkage => "OnceODR", + llvm::Linkage::WeakAnyLinkage => "WeakAny", + llvm::Linkage::WeakODRLinkage => "WeakODR", + llvm::Linkage::AppendingLinkage => "Appending", + llvm::Linkage::InternalLinkage => "Internal", + llvm::Linkage::PrivateLinkage => "Private", + llvm::Linkage::ExternalWeakLinkage => "ExternalWeak", + llvm::Linkage::CommonLinkage => "Common", + }; + + output.push_str("["); + output.push_str(linkage_abbrev); + output.push_str("]"); + } + output + }) + .collect(); + + item_keys.sort(); + + for item in item_keys { + println!("TRANS_ITEM {}", item); + } + } + + (codegen_units, symbol_map) +} + +fn symbol_for_def_id<'a, 'tcx>(def_id: DefId, + scx: &SharedCrateContext<'a, 'tcx>, + symbol_map: &SymbolMap<'tcx>) + -> String { + // Just try to look things up in the symbol map. If nothing's there, we + // recompute. + if let Some(node_id) = scx.tcx().map.as_local_node_id(def_id) { + if let Some(sym) = symbol_map.get(TransItem::Static(node_id)) { + return sym.to_owned(); + } + } + + let instance = Instance::mono(scx, def_id); + + symbol_map.get(TransItem::Fn(instance)) + .map(str::to_owned) + .unwrap_or_else(|| instance.symbol_name(scx)) +} diff --git a/src/librustc_trans/trans/basic_block.rs b/src/librustc_trans/basic_block.rs similarity index 92% rename from src/librustc_trans/trans/basic_block.rs rename to src/librustc_trans/basic_block.rs index d3d055cda1202..60bd3fb8ef1b8 100644 --- a/src/librustc_trans/trans/basic_block.rs +++ b/src/librustc_trans/basic_block.rs @@ -10,7 +10,7 @@ use llvm; use llvm::BasicBlockRef; -use trans::value::{Users, Value}; +use value::{Users, Value}; use std::iter::{Filter, Map}; #[derive(Copy, Clone)] @@ -49,4 +49,10 @@ impl BasicBlock { _ => None } } + + pub fn delete(self) { + unsafe { + llvm::LLVMDeleteBasicBlock(self.0); + } + } } diff --git a/src/librustc_trans/build.rs b/src/librustc_trans/build.rs new file mode 100644 index 0000000000000..8cd47bd148d0c --- /dev/null +++ b/src/librustc_trans/build.rs @@ -0,0 +1,1167 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(dead_code)] // FFI wrappers +#![allow(non_snake_case)] + +use llvm; +use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; +use llvm::{Opcode, IntPredicate, RealPredicate}; +use llvm::{ValueRef, BasicBlockRef}; +use common::*; +use syntax_pos::Span; + +use builder::Builder; +use type_::Type; +use value::Value; +use debuginfo::DebugLoc; + +use libc::{c_uint, c_char}; + +pub fn terminate(cx: Block, _: &str) { + debug!("terminate({})", cx.to_str()); + cx.terminated.set(true); +} + +pub fn check_not_terminated(cx: Block) { + if cx.terminated.get() { + bug!("already terminated!"); + } +} + +pub fn B<'blk, 'tcx>(cx: Block<'blk, 'tcx>) -> Builder<'blk, 'tcx> { + let b = cx.fcx.ccx.builder(); + b.position_at_end(cx.llbb); + b +} + +// The difference between a block being unreachable and being terminated is +// somewhat obscure, and has to do with error checking. When a block is +// terminated, we're saying that trying to add any further statements in the +// block is an error. On the other hand, if something is unreachable, that +// means that the block was terminated in some way that we don't want to check +// for (panic/break/return statements, call to diverging functions, etc), and +// further instructions to the block should simply be ignored. + +pub fn RetVoid(cx: Block, debug_loc: DebugLoc) { + if cx.unreachable.get() { + return; + } + check_not_terminated(cx); + terminate(cx, "RetVoid"); + debug_loc.apply(cx.fcx); + B(cx).ret_void(); +} + +pub fn Ret(cx: Block, v: ValueRef, debug_loc: DebugLoc) { + if cx.unreachable.get() { + return; + } + check_not_terminated(cx); + terminate(cx, "Ret"); + debug_loc.apply(cx.fcx); + B(cx).ret(v); +} + +pub fn AggregateRet(cx: Block, + ret_vals: &[ValueRef], + debug_loc: DebugLoc) { + if cx.unreachable.get() { + return; + } + check_not_terminated(cx); + terminate(cx, "AggregateRet"); + debug_loc.apply(cx.fcx); + B(cx).aggregate_ret(ret_vals); +} + +pub fn Br(cx: Block, dest: BasicBlockRef, debug_loc: DebugLoc) { + if cx.unreachable.get() { + return; + } + check_not_terminated(cx); + terminate(cx, "Br"); + debug_loc.apply(cx.fcx); + B(cx).br(dest); +} + +pub fn CondBr(cx: Block, + if_: ValueRef, + then: BasicBlockRef, + else_: BasicBlockRef, + debug_loc: DebugLoc) { + if cx.unreachable.get() { + return; + } + check_not_terminated(cx); + terminate(cx, "CondBr"); + debug_loc.apply(cx.fcx); + B(cx).cond_br(if_, then, else_); +} + +pub fn Switch(cx: Block, v: ValueRef, else_: BasicBlockRef, num_cases: usize) + -> ValueRef { + if cx.unreachable.get() { return _Undef(v); } + check_not_terminated(cx); + terminate(cx, "Switch"); + B(cx).switch(v, else_, num_cases) +} + +pub fn AddCase(s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) { + unsafe { + if llvm::LLVMIsUndef(s) == llvm::True { return; } + llvm::LLVMAddCase(s, on_val, dest); + } +} + +pub fn IndirectBr(cx: Block, + addr: ValueRef, + num_dests: usize, + debug_loc: DebugLoc) { + if cx.unreachable.get() { + return; + } + check_not_terminated(cx); + terminate(cx, "IndirectBr"); + debug_loc.apply(cx.fcx); + B(cx).indirect_br(addr, num_dests); +} + +pub fn Invoke(cx: Block, + fn_: ValueRef, + args: &[ValueRef], + then: BasicBlockRef, + catch: BasicBlockRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return C_null(Type::i8(cx.ccx())); + } + check_not_terminated(cx); + terminate(cx, "Invoke"); + debug!("Invoke({:?} with arguments ({}))", + Value(fn_), + args.iter().map(|a| { + format!("{:?}", Value(*a)) + }).collect::>().join(", ")); + debug_loc.apply(cx.fcx); + let bundle = cx.lpad().and_then(|b| b.bundle()); + B(cx).invoke(fn_, args, then, catch, bundle) +} + +pub fn Unreachable(cx: Block) { + if cx.unreachable.get() { + return + } + cx.unreachable.set(true); + if !cx.terminated.get() { + B(cx).unreachable(); + } +} + +pub fn _Undef(val: ValueRef) -> ValueRef { + unsafe { + return llvm::LLVMGetUndef(val_ty(val).to_ref()); + } +} + +/* Arithmetic */ +pub fn Add(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).add(lhs, rhs) +} + +pub fn NSWAdd(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).nswadd(lhs, rhs) +} + +pub fn NUWAdd(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).nuwadd(lhs, rhs) +} + +pub fn FAdd(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).fadd(lhs, rhs) +} + +pub fn FAddFast(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).fadd_fast(lhs, rhs) +} + +pub fn Sub(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).sub(lhs, rhs) +} + +pub fn NSWSub(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).nswsub(lhs, rhs) +} + +pub fn NUWSub(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).nuwsub(lhs, rhs) +} + +pub fn FSub(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).fsub(lhs, rhs) +} + +pub fn FSubFast(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).fsub_fast(lhs, rhs) +} + +pub fn Mul(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).mul(lhs, rhs) +} + +pub fn NSWMul(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).nswmul(lhs, rhs) +} + +pub fn NUWMul(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).nuwmul(lhs, rhs) +} + +pub fn FMul(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).fmul(lhs, rhs) +} + +pub fn FMulFast(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).fmul_fast(lhs, rhs) +} + +pub fn UDiv(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).udiv(lhs, rhs) +} + +pub fn SDiv(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).sdiv(lhs, rhs) +} + +pub fn ExactSDiv(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).exactsdiv(lhs, rhs) +} + +pub fn FDiv(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).fdiv(lhs, rhs) +} + +pub fn FDivFast(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).fdiv_fast(lhs, rhs) +} + +pub fn URem(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).urem(lhs, rhs) +} + +pub fn SRem(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).srem(lhs, rhs) +} + +pub fn FRem(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).frem(lhs, rhs) +} + +pub fn FRemFast(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).frem_fast(lhs, rhs) +} + +pub fn Shl(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).shl(lhs, rhs) +} + +pub fn LShr(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).lshr(lhs, rhs) +} + +pub fn AShr(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).ashr(lhs, rhs) +} + +pub fn And(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).and(lhs, rhs) +} + +pub fn Or(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).or(lhs, rhs) +} + +pub fn Xor(cx: Block, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).xor(lhs, rhs) +} + +pub fn BinOp(cx: Block, + op: Opcode, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _Undef(lhs); + } + debug_loc.apply(cx.fcx); + B(cx).binop(op, lhs, rhs) +} + +pub fn Neg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { + if cx.unreachable.get() { + return _Undef(v); + } + debug_loc.apply(cx.fcx); + B(cx).neg(v) +} + +pub fn NSWNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { + if cx.unreachable.get() { + return _Undef(v); + } + debug_loc.apply(cx.fcx); + B(cx).nswneg(v) +} + +pub fn NUWNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { + if cx.unreachable.get() { + return _Undef(v); + } + debug_loc.apply(cx.fcx); + B(cx).nuwneg(v) +} +pub fn FNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { + if cx.unreachable.get() { + return _Undef(v); + } + debug_loc.apply(cx.fcx); + B(cx).fneg(v) +} + +pub fn Not(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { + if cx.unreachable.get() { + return _Undef(v); + } + debug_loc.apply(cx.fcx); + B(cx).not(v) +} + +pub fn Alloca(cx: Block, ty: Type, name: &str) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); } + AllocaFcx(cx.fcx, ty, name) + } +} + +pub fn AllocaFcx(fcx: &FunctionContext, ty: Type, name: &str) -> ValueRef { + let b = fcx.ccx.builder(); + b.position_before(fcx.alloca_insert_pt.get().unwrap()); + DebugLoc::None.apply(fcx); + b.alloca(ty, name) +} + +pub fn Free(cx: Block, pointer_val: ValueRef) { + if cx.unreachable.get() { return; } + B(cx).free(pointer_val) +} + +pub fn Load(cx: Block, pointer_val: ValueRef) -> ValueRef { + unsafe { + let ccx = cx.fcx.ccx; + if cx.unreachable.get() { + let ty = val_ty(pointer_val); + let eltty = if ty.kind() == llvm::Array { + ty.element_type() + } else { + ccx.int_type() + }; + return llvm::LLVMGetUndef(eltty.to_ref()); + } + B(cx).load(pointer_val) + } +} + +pub fn VolatileLoad(cx: Block, pointer_val: ValueRef) -> ValueRef { + unsafe { + if cx.unreachable.get() { + return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); + } + B(cx).volatile_load(pointer_val) + } +} + +pub fn AtomicLoad(cx: Block, pointer_val: ValueRef, order: AtomicOrdering) -> ValueRef { + unsafe { + let ccx = cx.fcx.ccx; + if cx.unreachable.get() { + return llvm::LLVMGetUndef(ccx.int_type().to_ref()); + } + B(cx).atomic_load(pointer_val, order) + } +} + + +pub fn LoadRangeAssert(cx: Block, pointer_val: ValueRef, lo: u64, + hi: u64, signed: llvm::Bool) -> ValueRef { + if cx.unreachable.get() { + let ccx = cx.fcx.ccx; + let ty = val_ty(pointer_val); + let eltty = if ty.kind() == llvm::Array { + ty.element_type() + } else { + ccx.int_type() + }; + unsafe { + llvm::LLVMGetUndef(eltty.to_ref()) + } + } else { + B(cx).load_range_assert(pointer_val, lo, hi, signed) + } +} + +pub fn LoadNonNull(cx: Block, ptr: ValueRef) -> ValueRef { + if cx.unreachable.get() { + let ccx = cx.fcx.ccx; + let ty = val_ty(ptr); + let eltty = if ty.kind() == llvm::Array { + ty.element_type() + } else { + ccx.int_type() + }; + unsafe { + llvm::LLVMGetUndef(eltty.to_ref()) + } + } else { + B(cx).load_nonnull(ptr) + } +} + +pub fn Store(cx: Block, val: ValueRef, ptr: ValueRef) -> ValueRef { + if cx.unreachable.get() { return C_nil(cx.ccx()); } + B(cx).store(val, ptr) +} + +pub fn VolatileStore(cx: Block, val: ValueRef, ptr: ValueRef) -> ValueRef { + if cx.unreachable.get() { return C_nil(cx.ccx()); } + B(cx).volatile_store(val, ptr) +} + +pub fn AtomicStore(cx: Block, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) { + if cx.unreachable.get() { return; } + B(cx).atomic_store(val, ptr, order) +} + +pub fn GEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef { + unsafe { + if cx.unreachable.get() { + return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); + } + B(cx).gep(pointer, indices) + } +} + +// Simple wrapper around GEP that takes an array of ints and wraps them +// in C_i32() +#[inline] +pub fn GEPi(cx: Block, base: ValueRef, ixs: &[usize]) -> ValueRef { + unsafe { + if cx.unreachable.get() { + return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); + } + B(cx).gepi(base, ixs) + } +} + +pub fn InBoundsGEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef { + unsafe { + if cx.unreachable.get() { + return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); + } + B(cx).inbounds_gep(pointer, indices) + } +} + +pub fn StructGEP(cx: Block, pointer: ValueRef, idx: usize) -> ValueRef { + unsafe { + if cx.unreachable.get() { + return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); + } + B(cx).struct_gep(pointer, idx) + } +} + +pub fn GlobalString(cx: Block, _str: *const c_char) -> ValueRef { + unsafe { + if cx.unreachable.get() { + return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref()); + } + B(cx).global_string(_str) + } +} + +pub fn GlobalStringPtr(cx: Block, _str: *const c_char) -> ValueRef { + unsafe { + if cx.unreachable.get() { + return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref()); + } + B(cx).global_string_ptr(_str) + } +} + +/* Casts */ +pub fn Trunc(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).trunc(val, dest_ty) + } +} + +pub fn ZExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).zext(val, dest_ty) + } +} + +pub fn SExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).sext(val, dest_ty) + } +} + +pub fn FPToUI(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).fptoui(val, dest_ty) + } +} + +pub fn FPToSI(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).fptosi(val, dest_ty) + } +} + +pub fn UIToFP(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).uitofp(val, dest_ty) + } +} + +pub fn SIToFP(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).sitofp(val, dest_ty) + } +} + +pub fn FPTrunc(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).fptrunc(val, dest_ty) + } +} + +pub fn FPExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).fpext(val, dest_ty) + } +} + +pub fn PtrToInt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).ptrtoint(val, dest_ty) + } +} + +pub fn IntToPtr(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).inttoptr(val, dest_ty) + } +} + +pub fn BitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).bitcast(val, dest_ty) + } +} + +pub fn ZExtOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).zext_or_bitcast(val, dest_ty) + } +} + +pub fn SExtOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).sext_or_bitcast(val, dest_ty) + } +} + +pub fn TruncOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).trunc_or_bitcast(val, dest_ty) + } +} + +pub fn Cast(cx: Block, op: Opcode, val: ValueRef, dest_ty: Type, + _: *const u8) + -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).cast(op, val, dest_ty) + } +} + +pub fn PointerCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).pointercast(val, dest_ty) + } +} + +pub fn IntCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).intcast(val, dest_ty) + } +} + +pub fn FPCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } + B(cx).fpcast(val, dest_ty) + } +} + + +/* Comparisons */ +pub fn ICmp(cx: Block, + op: IntPredicate, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + unsafe { + if cx.unreachable.get() { + return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); + } + debug_loc.apply(cx.fcx); + B(cx).icmp(op, lhs, rhs) + } +} + +pub fn FCmp(cx: Block, + op: RealPredicate, + lhs: ValueRef, + rhs: ValueRef, + debug_loc: DebugLoc) + -> ValueRef { + unsafe { + if cx.unreachable.get() { + return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); + } + debug_loc.apply(cx.fcx); + B(cx).fcmp(op, lhs, rhs) + } +} + +/* Miscellaneous instructions */ +pub fn EmptyPhi(cx: Block, ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } + B(cx).empty_phi(ty) + } +} + +pub fn Phi(cx: Block, ty: Type, vals: &[ValueRef], + bbs: &[BasicBlockRef]) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } + B(cx).phi(ty, vals, bbs) + } +} + +pub fn AddIncomingToPhi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) { + unsafe { + if llvm::LLVMIsUndef(phi) == llvm::True { return; } + llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); + } +} + +pub fn _UndefReturn(cx: Block, fn_: ValueRef) -> ValueRef { + unsafe { + let ccx = cx.fcx.ccx; + let ty = val_ty(fn_); + let retty = if ty.kind() == llvm::Function { + ty.return_type() + } else { + ccx.int_type() + }; + B(cx).count_insn("ret_undef"); + llvm::LLVMGetUndef(retty.to_ref()) + } +} + +pub fn add_span_comment(cx: Block, sp: Span, text: &str) { + B(cx).add_span_comment(sp, text) +} + +pub fn add_comment(cx: Block, text: &str) { + B(cx).add_comment(text) +} + +pub fn InlineAsmCall(cx: Block, asm: *const c_char, cons: *const c_char, + inputs: &[ValueRef], output: Type, + volatile: bool, alignstack: bool, + dia: AsmDialect) -> ValueRef { + B(cx).inline_asm_call(asm, cons, inputs, output, volatile, alignstack, dia) +} + +pub fn Call(cx: Block, + fn_: ValueRef, + args: &[ValueRef], + debug_loc: DebugLoc) + -> ValueRef { + if cx.unreachable.get() { + return _UndefReturn(cx, fn_); + } + debug_loc.apply(cx.fcx); + let bundle = cx.lpad.get().and_then(|b| b.bundle()); + B(cx).call(fn_, args, bundle) +} + +pub fn AtomicFence(cx: Block, order: AtomicOrdering, scope: SynchronizationScope) { + if cx.unreachable.get() { return; } + B(cx).atomic_fence(order, scope) +} + +pub fn Select(cx: Block, if_: ValueRef, then: ValueRef, else_: ValueRef) -> ValueRef { + if cx.unreachable.get() { return _Undef(then); } + B(cx).select(if_, then, else_) +} + +pub fn VAArg(cx: Block, list: ValueRef, ty: Type) -> ValueRef { + unsafe { + if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } + B(cx).va_arg(list, ty) + } +} + +pub fn ExtractElement(cx: Block, vec_val: ValueRef, index: ValueRef) -> ValueRef { + unsafe { + if cx.unreachable.get() { + return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); + } + B(cx).extract_element(vec_val, index) + } +} + +pub fn InsertElement(cx: Block, vec_val: ValueRef, elt_val: ValueRef, + index: ValueRef) -> ValueRef { + unsafe { + if cx.unreachable.get() { + return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); + } + B(cx).insert_element(vec_val, elt_val, index) + } +} + +pub fn ShuffleVector(cx: Block, v1: ValueRef, v2: ValueRef, + mask: ValueRef) -> ValueRef { + unsafe { + if cx.unreachable.get() { + return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); + } + B(cx).shuffle_vector(v1, v2, mask) + } +} + +pub fn VectorSplat(cx: Block, num_elts: usize, elt_val: ValueRef) -> ValueRef { + unsafe { + if cx.unreachable.get() { + return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); + } + B(cx).vector_splat(num_elts, elt_val) + } +} + +pub fn ExtractValue(cx: Block, agg_val: ValueRef, index: usize) -> ValueRef { + unsafe { + if cx.unreachable.get() { + return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); + } + B(cx).extract_value(agg_val, index) + } +} + +pub fn InsertValue(cx: Block, agg_val: ValueRef, elt_val: ValueRef, index: usize) -> ValueRef { + unsafe { + if cx.unreachable.get() { + return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); + } + B(cx).insert_value(agg_val, elt_val, index) + } +} + +pub fn IsNull(cx: Block, val: ValueRef) -> ValueRef { + unsafe { + if cx.unreachable.get() { + return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); + } + B(cx).is_null(val) + } +} + +pub fn IsNotNull(cx: Block, val: ValueRef) -> ValueRef { + unsafe { + if cx.unreachable.get() { + return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); + } + B(cx).is_not_null(val) + } +} + +pub fn PtrDiff(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + unsafe { + let ccx = cx.fcx.ccx; + if cx.unreachable.get() { return llvm::LLVMGetUndef(ccx.int_type().to_ref()); } + B(cx).ptrdiff(lhs, rhs) + } +} + +pub fn Trap(cx: Block) { + if cx.unreachable.get() { return; } + B(cx).trap(); +} + +pub fn LandingPad(cx: Block, ty: Type, pers_fn: ValueRef, + num_clauses: usize) -> ValueRef { + check_not_terminated(cx); + assert!(!cx.unreachable.get()); + B(cx).landing_pad(ty, pers_fn, num_clauses, cx.fcx.llfn) +} + +pub fn AddClause(cx: Block, landing_pad: ValueRef, clause: ValueRef) { + B(cx).add_clause(landing_pad, clause) +} + +pub fn SetCleanup(cx: Block, landing_pad: ValueRef) { + B(cx).set_cleanup(landing_pad) +} + +pub fn SetPersonalityFn(cx: Block, f: ValueRef) { + B(cx).set_personality_fn(f) +} + +pub fn Resume(cx: Block, exn: ValueRef) -> ValueRef { + check_not_terminated(cx); + terminate(cx, "Resume"); + B(cx).resume(exn) +} + +// Atomic Operations +pub fn AtomicCmpXchg(cx: Block, dst: ValueRef, + cmp: ValueRef, src: ValueRef, + order: AtomicOrdering, + failure_order: AtomicOrdering, + weak: llvm::Bool) -> ValueRef { + B(cx).atomic_cmpxchg(dst, cmp, src, order, failure_order, weak) +} +pub fn AtomicRMW(cx: Block, op: AtomicRmwBinOp, + dst: ValueRef, src: ValueRef, + order: AtomicOrdering) -> ValueRef { + B(cx).atomic_rmw(op, dst, src, order) +} + +pub fn CleanupPad(cx: Block, + parent: Option, + args: &[ValueRef]) -> ValueRef { + check_not_terminated(cx); + assert!(!cx.unreachable.get()); + B(cx).cleanup_pad(parent, args) +} + +pub fn CleanupRet(cx: Block, + cleanup: ValueRef, + unwind: Option) -> ValueRef { + check_not_terminated(cx); + terminate(cx, "CleanupRet"); + B(cx).cleanup_ret(cleanup, unwind) +} + +pub fn CatchPad(cx: Block, + parent: ValueRef, + args: &[ValueRef]) -> ValueRef { + check_not_terminated(cx); + assert!(!cx.unreachable.get()); + B(cx).catch_pad(parent, args) +} + +pub fn CatchRet(cx: Block, pad: ValueRef, unwind: BasicBlockRef) -> ValueRef { + check_not_terminated(cx); + terminate(cx, "CatchRet"); + B(cx).catch_ret(pad, unwind) +} + +pub fn CatchSwitch(cx: Block, + parent: Option, + unwind: Option, + num_handlers: usize) -> ValueRef { + check_not_terminated(cx); + terminate(cx, "CatchSwitch"); + B(cx).catch_switch(parent, unwind, num_handlers) +} + +pub fn AddHandler(cx: Block, catch_switch: ValueRef, handler: BasicBlockRef) { + B(cx).add_handler(catch_switch, handler) +} diff --git a/src/librustc_trans/trans/builder.rs b/src/librustc_trans/builder.rs similarity index 75% rename from src/librustc_trans/trans/builder.rs rename to src/librustc_trans/builder.rs index be4028e37d718..0480bb82a998e 100644 --- a/src/librustc_trans/trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -11,19 +11,21 @@ #![allow(dead_code)] // FFI wrappers use llvm; -use llvm::{CallConv, AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect, AttrBuilder}; -use llvm::{Opcode, IntPredicate, RealPredicate, False}; +use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; +use llvm::{Opcode, IntPredicate, RealPredicate, False, OperandBundleDef}; use llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef}; -use trans::base; -use trans::common::*; -use trans::machine::llalign_of_pref; -use trans::type_::Type; -use util::nodemap::FnvHashMap; +use base; +use common::*; +use machine::llalign_of_pref; +use type_::Type; +use value::Value; +use util::nodemap::FxHashMap; use libc::{c_uint, c_char}; +use std::borrow::Cow; use std::ffi::CString; use std::ptr; -use syntax::codemap::Span; +use syntax_pos::Span; pub struct Builder<'a, 'tcx: 'a> { pub llbuilder: BuilderRef, @@ -60,7 +62,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // Build version of path with cycles removed. // Pass 1: scan table mapping str -> rightmost pos. - let mut mm = FnvHashMap(); + let mut mm = FxHashMap(); let len = v.len(); let mut i = 0; while i < len { @@ -104,6 +106,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } + pub fn position_at_start(&self, llbb: BasicBlockRef) { + unsafe { + llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); + } + } + pub fn ret_void(&self) { self.count_insn("retvoid"); unsafe { @@ -158,30 +166,28 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { args: &[ValueRef], then: BasicBlockRef, catch: BasicBlockRef, - attributes: Option) - -> ValueRef { + bundle: Option<&OperandBundleDef>) -> ValueRef { self.count_insn("invoke"); - debug!("Invoke {} with args ({})", - self.ccx.tn().val_to_string(llfn), + debug!("Invoke {:?} with args ({})", + Value(llfn), args.iter() - .map(|&v| self.ccx.tn().val_to_string(v)) + .map(|&v| format!("{:?}", Value(v))) .collect::>() .join(", ")); + let args = self.check_call("invoke", llfn, args); + let bundle = bundle.as_ref().map(|b| b.raw()).unwrap_or(ptr::null_mut()); + unsafe { - let v = llvm::LLVMBuildInvoke(self.llbuilder, - llfn, - args.as_ptr(), - args.len() as c_uint, - then, - catch, - noname()); - match attributes { - Some(a) => a.apply_callsite(v), - None => {} - } - v + llvm::LLVMRustBuildInvoke(self.llbuilder, + llfn, + args.as_ptr(), + args.len() as c_uint, + then, + catch, + bundle, + noname()) } } @@ -221,6 +227,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } + pub fn fadd_fast(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("fadd"); + unsafe { + let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + pub fn sub(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { self.count_insn("sub"); unsafe { @@ -249,6 +264,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } + pub fn fsub_fast(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("sub"); + unsafe { + let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + pub fn mul(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { self.count_insn("mul"); unsafe { @@ -277,6 +301,16 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } + pub fn fmul_fast(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("fmul"); + unsafe { + let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + + pub fn udiv(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { self.count_insn("udiv"); unsafe { @@ -305,6 +339,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } + pub fn fdiv_fast(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("fdiv"); + unsafe { + let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + pub fn urem(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { self.count_insn("urem"); unsafe { @@ -326,6 +369,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } + pub fn frem_fast(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { + self.count_insn("frem"); + unsafe { + let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + pub fn shl(&self, lhs: ValueRef, rhs: ValueRef) -> ValueRef { self.count_insn("shl"); unsafe { @@ -451,8 +503,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { unsafe { let ty = Type::from_ref(llvm::LLVMTypeOf(ptr)); let align = llalign_of_pref(self.ccx, ty.element_type()); - llvm::LLVMBuildAtomicLoad(self.llbuilder, ptr, noname(), order, - align as c_uint) + llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order, + align as c_uint) } } @@ -488,22 +540,20 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } pub fn store(&self, val: ValueRef, ptr: ValueRef) -> ValueRef { - debug!("Store {} -> {}", - self.ccx.tn().val_to_string(val), - self.ccx.tn().val_to_string(ptr)); + debug!("Store {:?} -> {:?}", Value(val), Value(ptr)); assert!(!self.llbuilder.is_null()); self.count_insn("store"); + let ptr = self.check_store(val, ptr); unsafe { llvm::LLVMBuildStore(self.llbuilder, val, ptr) } } pub fn volatile_store(&self, val: ValueRef, ptr: ValueRef) -> ValueRef { - debug!("Store {} -> {}", - self.ccx.tn().val_to_string(val), - self.ccx.tn().val_to_string(ptr)); + debug!("Store {:?} -> {:?}", Value(val), Value(ptr)); assert!(!self.llbuilder.is_null()); self.count_insn("store.volatile"); + let ptr = self.check_store(val, ptr); unsafe { let insn = llvm::LLVMBuildStore(self.llbuilder, val, ptr); llvm::LLVMSetVolatile(insn, llvm::True); @@ -512,14 +562,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) { - debug!("Store {} -> {}", - self.ccx.tn().val_to_string(val), - self.ccx.tn().val_to_string(ptr)); + debug!("Store {:?} -> {:?}", Value(val), Value(ptr)); self.count_insn("store.atomic"); + let ptr = self.check_store(val, ptr); unsafe { let ty = Type::from_ref(llvm::LLVMTypeOf(ptr)); let align = llalign_of_pref(self.ccx, ty.element_type()); - llvm::LLVMBuildAtomicStore(self.llbuilder, val, ptr, order, align as c_uint); + llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order, align as c_uint); } } @@ -787,75 +836,39 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { else { llvm::False }; let argtys = inputs.iter().map(|v| { - debug!("Asm Input Type: {}", self.ccx.tn().val_to_string(*v)); + debug!("Asm Input Type: {:?}", Value(*v)); val_ty(*v) }).collect::>(); - debug!("Asm Output Type: {}", self.ccx.tn().type_to_string(output)); + debug!("Asm Output Type: {:?}", output); let fty = Type::func(&argtys[..], &output); unsafe { - let v = llvm::LLVMInlineAsm( - fty.to_ref(), asm, cons, volatile, alignstack, dia as c_uint); + let v = llvm::LLVMRustInlineAsm( + fty.to_ref(), asm, cons, volatile, alignstack, dia); self.call(v, inputs, None) } } pub fn call(&self, llfn: ValueRef, args: &[ValueRef], - attributes: Option) -> ValueRef { + bundle: Option<&OperandBundleDef>) -> ValueRef { self.count_insn("call"); - debug!("Call {} with args ({})", - self.ccx.tn().val_to_string(llfn), + debug!("Call {:?} with args ({})", + Value(llfn), args.iter() - .map(|&v| self.ccx.tn().val_to_string(v)) + .map(|&v| format!("{:?}", Value(v))) .collect::>() .join(", ")); - let mut fn_ty = val_ty(llfn); - // Strip off pointers - while fn_ty.kind() == llvm::TypeKind::Pointer { - fn_ty = fn_ty.element_type(); - } - - assert!(fn_ty.kind() == llvm::TypeKind::Function, - "builder::call not passed a function"); - - let param_tys = fn_ty.func_params(); - - let iter = param_tys.into_iter() - .zip(args.iter().map(|&v| val_ty(v))); - for (i, (expected_ty, actual_ty)) in iter.enumerate() { - if expected_ty != actual_ty { - self.ccx.sess().bug( - &format!( - "Type mismatch in function call of {}. Expected {} for param {}, got {}", - self.ccx.tn().val_to_string(llfn), - self.ccx.tn().type_to_string(expected_ty), - i, - self.ccx.tn().type_to_string(actual_ty))); - - } - } + let args = self.check_call("call", llfn, args); + let bundle = bundle.as_ref().map(|b| b.raw()).unwrap_or(ptr::null_mut()); unsafe { - let v = llvm::LLVMBuildCall(self.llbuilder, llfn, args.as_ptr(), - args.len() as c_uint, noname()); - match attributes { - Some(a) => a.apply_callsite(v), - None => {} - } - v + llvm::LLVMRustBuildCall(self.llbuilder, llfn, args.as_ptr(), + args.len() as c_uint, bundle, noname()) } } - pub fn call_with_conv(&self, llfn: ValueRef, args: &[ValueRef], - conv: CallConv, attributes: Option) -> ValueRef { - self.count_insn("callwithconv"); - let v = self.call(llfn, args, attributes); - llvm::SetInstructionCallConv(v, conv); - v - } - pub fn select(&self, cond: ValueRef, then_val: ValueRef, else_val: ValueRef) -> ValueRef { self.count_insn("select"); unsafe { @@ -948,8 +961,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { assert!((t as isize != 0)); let args: &[ValueRef] = &[]; self.count_insn("trap"); - llvm::LLVMBuildCall( - self.llbuilder, t, args.as_ptr(), args.len() as c_uint, noname()); + llvm::LLVMRustBuildCall(self.llbuilder, t, + args.as_ptr(), args.len() as c_uint, + ptr::null_mut(), + noname()); } } @@ -983,17 +998,98 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } + pub fn cleanup_pad(&self, + parent: Option, + args: &[ValueRef]) -> ValueRef { + self.count_insn("cleanuppad"); + let parent = parent.unwrap_or(ptr::null_mut()); + let name = CString::new("cleanuppad").unwrap(); + let ret = unsafe { + llvm::LLVMRustBuildCleanupPad(self.llbuilder, + parent, + args.len() as c_uint, + args.as_ptr(), + name.as_ptr()) + }; + assert!(!ret.is_null(), "LLVM does not have support for cleanuppad"); + return ret + } + + pub fn cleanup_ret(&self, cleanup: ValueRef, + unwind: Option) -> ValueRef { + self.count_insn("cleanupret"); + let unwind = unwind.unwrap_or(ptr::null_mut()); + let ret = unsafe { + llvm::LLVMRustBuildCleanupRet(self.llbuilder, cleanup, unwind) + }; + assert!(!ret.is_null(), "LLVM does not have support for cleanupret"); + return ret + } + + pub fn catch_pad(&self, + parent: ValueRef, + args: &[ValueRef]) -> ValueRef { + self.count_insn("catchpad"); + let name = CString::new("catchpad").unwrap(); + let ret = unsafe { + llvm::LLVMRustBuildCatchPad(self.llbuilder, parent, + args.len() as c_uint, args.as_ptr(), + name.as_ptr()) + }; + assert!(!ret.is_null(), "LLVM does not have support for catchpad"); + return ret + } + + pub fn catch_ret(&self, pad: ValueRef, unwind: BasicBlockRef) -> ValueRef { + self.count_insn("catchret"); + let ret = unsafe { + llvm::LLVMRustBuildCatchRet(self.llbuilder, pad, unwind) + }; + assert!(!ret.is_null(), "LLVM does not have support for catchret"); + return ret + } + + pub fn catch_switch(&self, + parent: Option, + unwind: Option, + num_handlers: usize) -> ValueRef { + self.count_insn("catchswitch"); + let parent = parent.unwrap_or(ptr::null_mut()); + let unwind = unwind.unwrap_or(ptr::null_mut()); + let name = CString::new("catchswitch").unwrap(); + let ret = unsafe { + llvm::LLVMRustBuildCatchSwitch(self.llbuilder, parent, unwind, + num_handlers as c_uint, + name.as_ptr()) + }; + assert!(!ret.is_null(), "LLVM does not have support for catchswitch"); + return ret + } + + pub fn add_handler(&self, catch_switch: ValueRef, handler: BasicBlockRef) { + unsafe { + llvm::LLVMRustAddHandler(catch_switch, handler); + } + } + + pub fn set_personality_fn(&self, personality: ValueRef) { + unsafe { + llvm::LLVMRustSetPersonalityFn(self.llbuilder, personality); + } + } + // Atomic Operations pub fn atomic_cmpxchg(&self, dst: ValueRef, cmp: ValueRef, src: ValueRef, order: AtomicOrdering, - failure_order: AtomicOrdering) -> ValueRef { + failure_order: AtomicOrdering, + weak: llvm::Bool) -> ValueRef { unsafe { - llvm::LLVMBuildAtomicCmpXchg(self.llbuilder, dst, cmp, src, - order, failure_order) + llvm::LLVMRustBuildAtomicCmpXchg(self.llbuilder, dst, cmp, src, + order, failure_order, weak) } } - pub fn atomic_rmw(&self, op: AtomicBinOp, + pub fn atomic_rmw(&self, op: AtomicRmwBinOp, dst: ValueRef, src: ValueRef, order: AtomicOrdering) -> ValueRef { unsafe { @@ -1003,7 +1099,71 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { pub fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope) { unsafe { - llvm::LLVMBuildAtomicFence(self.llbuilder, order, scope); + llvm::LLVMRustBuildAtomicFence(self.llbuilder, order, scope); + } + } + + /// Returns the ptr value that should be used for storing `val`. + fn check_store<'b>(&self, + val: ValueRef, + ptr: ValueRef) -> ValueRef { + let dest_ptr_ty = val_ty(ptr); + let stored_ty = val_ty(val); + let stored_ptr_ty = stored_ty.ptr_to(); + + assert_eq!(dest_ptr_ty.kind(), llvm::TypeKind::Pointer); + + if dest_ptr_ty == stored_ptr_ty { + ptr + } else { + debug!("Type mismatch in store. \ + Expected {:?}, got {:?}; inserting bitcast", + dest_ptr_ty, stored_ptr_ty); + self.bitcast(ptr, stored_ptr_ty) + } + } + + /// Returns the args that should be used for a call to `llfn`. + fn check_call<'b>(&self, + typ: &str, + llfn: ValueRef, + args: &'b [ValueRef]) -> Cow<'b, [ValueRef]> { + let mut fn_ty = val_ty(llfn); + // Strip off pointers + while fn_ty.kind() == llvm::TypeKind::Pointer { + fn_ty = fn_ty.element_type(); } + + assert!(fn_ty.kind() == llvm::TypeKind::Function, + "builder::{} not passed a function", typ); + + let param_tys = fn_ty.func_params(); + + let all_args_match = param_tys.iter() + .zip(args.iter().map(|&v| val_ty(v))) + .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty); + + if all_args_match { + return Cow::Borrowed(args); + } + + let casted_args: Vec<_> = param_tys.into_iter() + .zip(args.iter()) + .enumerate() + .map(|(i, (expected_ty, &actual_val))| { + let actual_ty = val_ty(actual_val); + if expected_ty != actual_ty { + debug!("Type mismatch in function call of {:?}. \ + Expected {:?} for param {}, got {:?}; injecting bitcast", + Value(llfn), + expected_ty, i, actual_ty); + self.bitcast(actual_val, expected_ty) + } else { + actual_val + } + }) + .collect(); + + return Cow::Owned(casted_args); } } diff --git a/src/librustc_trans/cabi_aarch64.rs b/src/librustc_trans/cabi_aarch64.rs new file mode 100644 index 0000000000000..59a84439950ba --- /dev/null +++ b/src/librustc_trans/cabi_aarch64.rs @@ -0,0 +1,176 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_upper_case_globals)] + +use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector}; +use abi::{self, FnType, ArgType}; +use context::CrateContext; +use type_::Type; + +fn ty_size(ty: Type) -> usize { + abi::ty_size(ty, 8) +} + +fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> { + fn check_array(ty: Type) -> Option<(Type, u64)> { + let len = ty.array_length() as u64; + if len == 0 { + return None + } + let elt = ty.element_type(); + + // if our element is an HFA/HVA, so are we; multiply members by our len + is_homogenous_aggregate_ty(elt).map(|(base_ty, members)| (base_ty, len * members)) + } + + fn check_struct(ty: Type) -> Option<(Type, u64)> { + let str_tys = ty.field_types(); + if str_tys.len() == 0 { + return None + } + + let mut prev_base_ty = None; + let mut members = 0; + for opt_homog_agg in str_tys.iter().map(|t| is_homogenous_aggregate_ty(*t)) { + match (prev_base_ty, opt_homog_agg) { + // field isn't itself an HFA, so we aren't either + (_, None) => return None, + + // first field - store its type and number of members + (None, Some((field_ty, field_members))) => { + prev_base_ty = Some(field_ty); + members = field_members; + }, + + // 2nd or later field - give up if it's a different type; otherwise incr. members + (Some(prev_ty), Some((field_ty, field_members))) => { + if prev_ty != field_ty { + return None; + } + members += field_members; + } + } + } + + // Because of previous checks, we know prev_base_ty is Some(...) because + // 1. str_tys has at least one element; and + // 2. prev_base_ty was filled in (or we would've returned early) + let (base_ty, members) = (prev_base_ty.unwrap(), members); + + // Ensure there is no padding. + if ty_size(ty) == ty_size(base_ty) * (members as usize) { + Some((base_ty, members)) + } else { + None + } + } + + let homog_agg = match ty.kind() { + Float => Some((ty, 1)), + Double => Some((ty, 1)), + Array => check_array(ty), + Struct => check_struct(ty), + Vector => match ty_size(ty) { + 4|8 => Some((ty, 1)), + _ => None + }, + _ => None + }; + + // Ensure we have at most four uniquely addressable members + homog_agg.and_then(|(base_ty, members)| { + if members > 0 && members <= 4 { + Some((base_ty, members)) + } else { + None + } + }) +} + +fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { + if is_reg_ty(ret.ty) { + ret.extend_integer_width_to(32); + return; + } + if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ret.ty) { + ret.cast = Some(Type::array(&base_ty, members)); + return; + } + let size = ty_size(ret.ty); + if size <= 16 { + let llty = if size <= 1 { + Type::i8(ccx) + } else if size <= 2 { + Type::i16(ccx) + } else if size <= 4 { + Type::i32(ccx) + } else if size <= 8 { + Type::i64(ccx) + } else { + Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) + }; + ret.cast = Some(llty); + return; + } + ret.make_indirect(ccx); +} + +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { + if is_reg_ty(arg.ty) { + arg.extend_integer_width_to(32); + return; + } + if let Some((base_ty, members)) = is_homogenous_aggregate_ty(arg.ty) { + arg.cast = Some(Type::array(&base_ty, members)); + return; + } + let size = ty_size(arg.ty); + if size <= 16 { + let llty = if size == 0 { + Type::array(&Type::i64(ccx), 0) + } else if size == 1 { + Type::i8(ccx) + } else if size == 2 { + Type::i16(ccx) + } else if size <= 4 { + Type::i32(ccx) + } else if size <= 8 { + Type::i64(ccx) + } else { + Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) + }; + arg.cast = Some(llty); + return; + } + arg.make_indirect(ccx); +} + +fn is_reg_ty(ty: Type) -> bool { + match ty.kind() { + Integer + | Pointer + | Float + | Double + | Vector => true, + _ => false + } +} + +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + if !fty.ret.is_ignore() { + classify_ret_ty(ccx, &mut fty.ret); + } + + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + classify_arg_ty(ccx, arg); + } +} diff --git a/src/librustc_trans/cabi_arm.rs b/src/librustc_trans/cabi_arm.rs new file mode 100644 index 0000000000000..93d43f7d96116 --- /dev/null +++ b/src/librustc_trans/cabi_arm.rs @@ -0,0 +1,165 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_upper_case_globals)] + +use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector}; +use abi::{self, align_up_to, FnType, ArgType}; +use context::CrateContext; +use type_::Type; + +use std::cmp; + +pub enum Flavor { + General, + Ios +} + +type TyAlignFn = fn(ty: Type) -> usize; + +fn align(off: usize, ty: Type, align_fn: TyAlignFn) -> usize { + let a = align_fn(ty); + return align_up_to(off, a); +} + +fn general_ty_align(ty: Type) -> usize { + abi::ty_align(ty, 4) +} + +// For more information see: +// ARMv7 +// https://developer.apple.com/library/ios/documentation/Xcode/Conceptual +// /iPhoneOSABIReference/Articles/ARMv7FunctionCallingConventions.html +// ARMv6 +// https://developer.apple.com/library/ios/documentation/Xcode/Conceptual +// /iPhoneOSABIReference/Articles/ARMv6FunctionCallingConventions.html +fn ios_ty_align(ty: Type) -> usize { + match ty.kind() { + Integer => cmp::min(4, ((ty.int_width() as usize) + 7) / 8), + Pointer => 4, + Float => 4, + Double => 4, + Struct => { + if ty.is_packed() { + 1 + } else { + let str_tys = ty.field_types(); + str_tys.iter().fold(1, |a, t| cmp::max(a, ios_ty_align(*t))) + } + } + Array => { + let elt = ty.element_type(); + ios_ty_align(elt) + } + Vector => { + let len = ty.vector_length(); + let elt = ty.element_type(); + ios_ty_align(elt) * len + } + _ => bug!("ty_align: unhandled type") + } +} + +fn ty_size(ty: Type, align_fn: TyAlignFn) -> usize { + match ty.kind() { + Integer => ((ty.int_width() as usize) + 7) / 8, + Pointer => 4, + Float => 4, + Double => 8, + Struct => { + if ty.is_packed() { + let str_tys = ty.field_types(); + str_tys.iter().fold(0, |s, t| s + ty_size(*t, align_fn)) + } else { + let str_tys = ty.field_types(); + let size = str_tys.iter() + .fold(0, |s, t| { + align(s, *t, align_fn) + ty_size(*t, align_fn) + }); + align(size, ty, align_fn) + } + } + Array => { + let len = ty.array_length(); + let elt = ty.element_type(); + let eltsz = ty_size(elt, align_fn); + len * eltsz + } + Vector => { + let len = ty.vector_length(); + let elt = ty.element_type(); + let eltsz = ty_size(elt, align_fn); + len * eltsz + } + _ => bug!("ty_size: unhandled type") + } +} + +fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType, align_fn: TyAlignFn) { + if is_reg_ty(ret.ty) { + ret.extend_integer_width_to(32); + return; + } + let size = ty_size(ret.ty, align_fn); + if size <= 4 { + let llty = if size <= 1 { + Type::i8(ccx) + } else if size <= 2 { + Type::i16(ccx) + } else { + Type::i32(ccx) + }; + ret.cast = Some(llty); + return; + } + ret.make_indirect(ccx); +} + +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, align_fn: TyAlignFn) { + if is_reg_ty(arg.ty) { + arg.extend_integer_width_to(32); + return; + } + let align = align_fn(arg.ty); + let size = ty_size(arg.ty, align_fn); + let llty = if align <= 4 { + Type::array(&Type::i32(ccx), ((size + 3) / 4) as u64) + } else { + Type::array(&Type::i64(ccx), ((size + 7) / 8) as u64) + }; + arg.cast = Some(llty); +} + +fn is_reg_ty(ty: Type) -> bool { + match ty.kind() { + Integer + | Pointer + | Float + | Double + | Vector => true, + _ => false + } +} + +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType, flavor: Flavor) { + let align_fn = match flavor { + Flavor::General => general_ty_align as TyAlignFn, + Flavor::Ios => ios_ty_align as TyAlignFn, + }; + + if !fty.ret.is_ignore() { + classify_ret_ty(ccx, &mut fty.ret, align_fn); + } + + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + classify_arg_ty(ccx, arg, align_fn); + } +} diff --git a/src/librustc_trans/cabi_asmjs.rs b/src/librustc_trans/cabi_asmjs.rs new file mode 100644 index 0000000000000..f410627400c34 --- /dev/null +++ b/src/librustc_trans/cabi_asmjs.rs @@ -0,0 +1,55 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_upper_case_globals)] + +use llvm::{Struct, Array}; +use abi::{FnType, ArgType, ArgAttribute}; +use context::CrateContext; + +// Data layout: e-p:32:32-i64:64-v128:32:128-n32-S128 + +// See the https://github.com/kripken/emscripten-fastcomp-clang repository. +// The class `EmscriptenABIInfo` in `/lib/CodeGen/TargetInfo.cpp` contains the ABI definitions. + +fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { + match ret.ty.kind() { + Struct => { + let field_types = ret.ty.field_types(); + if field_types.len() == 1 { + ret.cast = Some(field_types[0]); + } else { + ret.make_indirect(ccx); + } + } + Array => { + ret.make_indirect(ccx); + } + _ => {} + } +} + +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { + if arg.ty.is_aggregate() { + arg.make_indirect(ccx); + arg.attrs.set(ArgAttribute::ByVal); + } +} + +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + if !fty.ret.is_ignore() { + classify_ret_ty(ccx, &mut fty.ret); + } + + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + classify_arg_ty(ccx, arg); + } +} diff --git a/src/librustc_trans/cabi_mips.rs b/src/librustc_trans/cabi_mips.rs new file mode 100644 index 0000000000000..25fe53e7ef40f --- /dev/null +++ b/src/librustc_trans/cabi_mips.rs @@ -0,0 +1,108 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_upper_case_globals)] + +use libc::c_uint; +use std::cmp; +use llvm; +use llvm::{Integer, Pointer, Float, Double, Vector}; +use abi::{self, align_up_to, ArgType, FnType}; +use context::CrateContext; +use type_::Type; + +fn ty_align(ty: Type) -> usize { + abi::ty_align(ty, 4) +} + +fn ty_size(ty: Type) -> usize { + abi::ty_size(ty, 4) +} + +fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { + if is_reg_ty(ret.ty) { + ret.extend_integer_width_to(32); + } else { + ret.make_indirect(ccx); + } +} + +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) { + let orig_offset = *offset; + let size = ty_size(arg.ty) * 8; + let mut align = ty_align(arg.ty); + + align = cmp::min(cmp::max(align, 4), 8); + *offset = align_up_to(*offset, align); + *offset += align_up_to(size, align * 8) / 8; + + if !is_reg_ty(arg.ty) { + arg.cast = Some(struct_ty(ccx, arg.ty)); + arg.pad = padding_ty(ccx, align, orig_offset); + } else { + arg.extend_integer_width_to(32); + } +} + +fn is_reg_ty(ty: Type) -> bool { + return match ty.kind() { + Integer + | Pointer + | Float + | Double + | Vector => true, + _ => false + }; +} + +fn padding_ty(ccx: &CrateContext, align: usize, offset: usize) -> Option { + if ((align - 1 ) & offset) > 0 { + Some(Type::i32(ccx)) + } else { + None + } +} + +fn coerce_to_int(ccx: &CrateContext, size: usize) -> Vec { + let int_ty = Type::i32(ccx); + let mut args = Vec::new(); + + let mut n = size / 32; + while n > 0 { + args.push(int_ty); + n -= 1; + } + + let r = size % 32; + if r > 0 { + unsafe { + args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint))); + } + } + + args +} + +fn struct_ty(ccx: &CrateContext, ty: Type) -> Type { + let size = ty_size(ty) * 8; + Type::struct_(ccx, &coerce_to_int(ccx, size), false) +} + +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + if !fty.ret.is_ignore() { + classify_ret_ty(ccx, &mut fty.ret); + } + + let mut offset = if fty.ret.is_indirect() { 4 } else { 0 }; + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + classify_arg_ty(ccx, arg, &mut offset); + } +} diff --git a/src/librustc_trans/cabi_mips64.rs b/src/librustc_trans/cabi_mips64.rs new file mode 100644 index 0000000000000..e6b500c88dc7a --- /dev/null +++ b/src/librustc_trans/cabi_mips64.rs @@ -0,0 +1,108 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_upper_case_globals)] + +use libc::c_uint; +use std::cmp; +use llvm; +use llvm::{Integer, Pointer, Float, Double, Vector}; +use abi::{self, align_up_to, ArgType, FnType}; +use context::CrateContext; +use type_::Type; + +fn ty_align(ty: Type) -> usize { + abi::ty_align(ty, 8) +} + +fn ty_size(ty: Type) -> usize { + abi::ty_size(ty, 8) +} + +fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { + if is_reg_ty(ret.ty) { + ret.extend_integer_width_to(64); + } else { + ret.make_indirect(ccx); + } +} + +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) { + let orig_offset = *offset; + let size = ty_size(arg.ty) * 8; + let mut align = ty_align(arg.ty); + + align = cmp::min(cmp::max(align, 4), 8); + *offset = align_up_to(*offset, align); + *offset += align_up_to(size, align * 8) / 8; + + if !is_reg_ty(arg.ty) { + arg.cast = Some(struct_ty(ccx, arg.ty)); + arg.pad = padding_ty(ccx, align, orig_offset); + } else { + arg.extend_integer_width_to(64); + } +} + +fn is_reg_ty(ty: Type) -> bool { + return match ty.kind() { + Integer + | Pointer + | Float + | Double + | Vector => true, + _ => false + }; +} + +fn padding_ty(ccx: &CrateContext, align: usize, offset: usize) -> Option { + if ((align - 1 ) & offset) > 0 { + Some(Type::i64(ccx)) + } else { + None + } +} + +fn coerce_to_int(ccx: &CrateContext, size: usize) -> Vec { + let int_ty = Type::i64(ccx); + let mut args = Vec::new(); + + let mut n = size / 64; + while n > 0 { + args.push(int_ty); + n -= 1; + } + + let r = size % 64; + if r > 0 { + unsafe { + args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint))); + } + } + + args +} + +fn struct_ty(ccx: &CrateContext, ty: Type) -> Type { + let size = ty_size(ty) * 8; + Type::struct_(ccx, &coerce_to_int(ccx, size), false) +} + +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + if !fty.ret.is_ignore() { + classify_ret_ty(ccx, &mut fty.ret); + } + + let mut offset = if fty.ret.is_indirect() { 8 } else { 0 }; + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + classify_arg_ty(ccx, arg, &mut offset); + } +} diff --git a/src/librustc_trans/cabi_msp430.rs b/src/librustc_trans/cabi_msp430.rs new file mode 100644 index 0000000000000..aa90bb7ab753a --- /dev/null +++ b/src/librustc_trans/cabi_msp430.rs @@ -0,0 +1,59 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Reference: MSP430 Embedded Application Binary Interface +// http://www.ti.com/lit/an/slaa534/slaa534.pdf + +#![allow(non_upper_case_globals)] + +use llvm::Struct; + +use abi::{self, ArgType, FnType}; +use context::CrateContext; +use type_::Type; + +fn ty_size(ty: Type) -> usize { + abi::ty_size(ty, 2) +} + +// 3.5 Structures or Unions Passed and Returned by Reference +// +// "Structures (including classes) and unions larger than 32 bits are passed and +// returned by reference. To pass a structure or union by reference, the caller +// places its address in the appropriate location: either in a register or on +// the stack, according to its position in the argument list. (..)" +fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { + if ret.ty.kind() == Struct && ty_size(ret.ty) > 32 { + ret.make_indirect(ccx); + } else { + ret.extend_integer_width_to(16); + } +} + +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { + if arg.ty.kind() == Struct && ty_size(arg.ty) > 32 { + arg.make_indirect(ccx); + } else { + arg.extend_integer_width_to(16); + } +} + +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + if !fty.ret.is_ignore() { + classify_ret_ty(ccx, &mut fty.ret); + } + + for arg in &mut fty.args { + if arg.is_ignore() { + continue; + } + classify_arg_ty(ccx, arg); + } +} diff --git a/src/librustc_trans/cabi_powerpc.rs b/src/librustc_trans/cabi_powerpc.rs new file mode 100644 index 0000000000000..4e1d7a9337827 --- /dev/null +++ b/src/librustc_trans/cabi_powerpc.rs @@ -0,0 +1,114 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use libc::c_uint; +use llvm; +use llvm::{Integer, Pointer, Float, Double, Vector}; +use abi::{self, align_up_to, FnType, ArgType}; +use context::CrateContext; +use type_::Type; + +use std::cmp; + +fn ty_align(ty: Type) -> usize { + if ty.kind() == Vector { + bug!("ty_size: unhandled type") + } else { + abi::ty_align(ty, 4) + } +} + +fn ty_size(ty: Type) -> usize { + if ty.kind() == Vector { + bug!("ty_size: unhandled type") + } else { + abi::ty_size(ty, 4) + } +} + +fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { + if is_reg_ty(ret.ty) { + ret.extend_integer_width_to(32); + } else { + ret.make_indirect(ccx); + } +} + +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) { + let orig_offset = *offset; + let size = ty_size(arg.ty) * 8; + let mut align = ty_align(arg.ty); + + align = cmp::min(cmp::max(align, 4), 8); + *offset = align_up_to(*offset, align); + *offset += align_up_to(size, align * 8) / 8; + + if !is_reg_ty(arg.ty) { + arg.cast = Some(struct_ty(ccx, arg.ty)); + arg.pad = padding_ty(ccx, align, orig_offset); + } else { + arg.extend_integer_width_to(32); + } +} + +fn is_reg_ty(ty: Type) -> bool { + return match ty.kind() { + Integer + | Pointer + | Float + | Double => true, + _ => false + }; +} + +fn padding_ty(ccx: &CrateContext, align: usize, offset: usize) -> Option { + if ((align - 1 ) & offset) > 0 { + Some(Type::i32(ccx)) + } else { + None + } +} + +fn coerce_to_int(ccx: &CrateContext, size: usize) -> Vec { + let int_ty = Type::i32(ccx); + let mut args = Vec::new(); + + let mut n = size / 32; + while n > 0 { + args.push(int_ty); + n -= 1; + } + + let r = size % 32; + if r > 0 { + unsafe { + args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint))); + } + } + + args +} + +fn struct_ty(ccx: &CrateContext, ty: Type) -> Type { + let size = ty_size(ty) * 8; + Type::struct_(ccx, &coerce_to_int(ccx, size), false) +} + +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + if !fty.ret.is_ignore() { + classify_ret_ty(ccx, &mut fty.ret); + } + + let mut offset = if fty.ret.is_indirect() { 4 } else { 0 }; + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + classify_arg_ty(ccx, arg, &mut offset); + } +} diff --git a/src/librustc_trans/cabi_powerpc64.rs b/src/librustc_trans/cabi_powerpc64.rs new file mode 100644 index 0000000000000..cdc7c1fd1afb3 --- /dev/null +++ b/src/librustc_trans/cabi_powerpc64.rs @@ -0,0 +1,194 @@ +// Copyright 2014-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// FIXME: The PowerPC64 ABI needs to zero or sign extend function +// call parameters, but compute_abi_info() is passed LLVM types +// which have no sign information. +// +// Alignment of 128 bit types is not currently handled, this will +// need to be fixed when PowerPC vector support is added. + +use llvm::{Integer, Pointer, Float, Double, Struct, Vector, Array}; +use abi::{self, FnType, ArgType}; +use context::CrateContext; +use type_::Type; + +fn ty_size(ty: Type) -> usize { + if ty.kind() == Vector { + bug!("ty_size: unhandled type") + } else { + abi::ty_size(ty, 8) + } +} + +fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> { + fn check_array(ty: Type) -> Option<(Type, u64)> { + let len = ty.array_length() as u64; + if len == 0 { + return None + } + let elt = ty.element_type(); + + // if our element is an HFA/HVA, so are we; multiply members by our len + is_homogenous_aggregate_ty(elt).map(|(base_ty, members)| (base_ty, len * members)) + } + + fn check_struct(ty: Type) -> Option<(Type, u64)> { + let str_tys = ty.field_types(); + if str_tys.len() == 0 { + return None + } + + let mut prev_base_ty = None; + let mut members = 0; + for opt_homog_agg in str_tys.iter().map(|t| is_homogenous_aggregate_ty(*t)) { + match (prev_base_ty, opt_homog_agg) { + // field isn't itself an HFA, so we aren't either + (_, None) => return None, + + // first field - store its type and number of members + (None, Some((field_ty, field_members))) => { + prev_base_ty = Some(field_ty); + members = field_members; + }, + + // 2nd or later field - give up if it's a different type; otherwise incr. members + (Some(prev_ty), Some((field_ty, field_members))) => { + if prev_ty != field_ty { + return None; + } + members += field_members; + } + } + } + + // Because of previous checks, we know prev_base_ty is Some(...) because + // 1. str_tys has at least one element; and + // 2. prev_base_ty was filled in (or we would've returned early) + let (base_ty, members) = (prev_base_ty.unwrap(), members); + + // Ensure there is no padding. + if ty_size(ty) == ty_size(base_ty) * (members as usize) { + Some((base_ty, members)) + } else { + None + } + } + + let homog_agg = match ty.kind() { + Float => Some((ty, 1)), + Double => Some((ty, 1)), + Array => check_array(ty), + Struct => check_struct(ty), + _ => None + }; + + // Ensure we have at most eight uniquely addressable members + homog_agg.and_then(|(base_ty, members)| { + if members > 0 && members <= 8 { + Some((base_ty, members)) + } else { + None + } + }) +} + +fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { + if is_reg_ty(ret.ty) { + ret.extend_integer_width_to(64); + return; + } + + // The PowerPC64 big endian ABI doesn't return aggregates in registers + if ccx.sess().target.target.target_endian == "big" { + ret.make_indirect(ccx); + } + + if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ret.ty) { + ret.cast = Some(Type::array(&base_ty, members)); + return; + } + let size = ty_size(ret.ty); + if size <= 16 { + let llty = if size <= 1 { + Type::i8(ccx) + } else if size <= 2 { + Type::i16(ccx) + } else if size <= 4 { + Type::i32(ccx) + } else if size <= 8 { + Type::i64(ccx) + } else { + Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) + }; + ret.cast = Some(llty); + return; + } + + ret.make_indirect(ccx); +} + +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { + if is_reg_ty(arg.ty) { + arg.extend_integer_width_to(64); + return; + } + + if let Some((base_ty, members)) = is_homogenous_aggregate_ty(arg.ty) { + arg.cast = Some(Type::array(&base_ty, members)); + return; + } + + arg.cast = Some(struct_ty(ccx, arg.ty)); +} + +fn is_reg_ty(ty: Type) -> bool { + match ty.kind() { + Integer + | Pointer + | Float + | Double => true, + _ => false + } +} + +fn coerce_to_long(ccx: &CrateContext, size: usize) -> Vec { + let long_ty = Type::i64(ccx); + let mut args = Vec::new(); + + let mut n = size / 64; + while n > 0 { + args.push(long_ty); + n -= 1; + } + + let r = size % 64; + if r > 0 { + args.push(Type::ix(ccx, r as u64)); + } + + args +} + +fn struct_ty(ccx: &CrateContext, ty: Type) -> Type { + let size = ty_size(ty) * 8; + Type::struct_(ccx, &coerce_to_long(ccx, size), false) +} + +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + if !fty.ret.is_ignore() { + classify_ret_ty(ccx, &mut fty.ret); + } + + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + classify_arg_ty(ccx, arg); + } +} diff --git a/src/librustc_trans/cabi_s390x.rs b/src/librustc_trans/cabi_s390x.rs new file mode 100644 index 0000000000000..5a666c6083d16 --- /dev/null +++ b/src/librustc_trans/cabi_s390x.rs @@ -0,0 +1,146 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// FIXME: The assumes we're using the non-vector ABI, i.e. compiling +// for a pre-z13 machine or using -mno-vx. + +use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector}; +use abi::{align_up_to, FnType, ArgType}; +use context::CrateContext; +use type_::Type; + +use std::cmp; + +fn align(off: usize, ty: Type) -> usize { + let a = ty_align(ty); + return align_up_to(off, a); +} + +fn ty_align(ty: Type) -> usize { + match ty.kind() { + Integer => ((ty.int_width() as usize) + 7) / 8, + Pointer => 8, + Float => 4, + Double => 8, + Struct => { + if ty.is_packed() { + 1 + } else { + let str_tys = ty.field_types(); + str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t))) + } + } + Array => { + let elt = ty.element_type(); + ty_align(elt) + } + Vector => ty_size(ty), + _ => bug!("ty_align: unhandled type") + } +} + +fn ty_size(ty: Type) -> usize { + match ty.kind() { + Integer => ((ty.int_width() as usize) + 7) / 8, + Pointer => 8, + Float => 4, + Double => 8, + Struct => { + if ty.is_packed() { + let str_tys = ty.field_types(); + str_tys.iter().fold(0, |s, t| s + ty_size(*t)) + } else { + let str_tys = ty.field_types(); + let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t)); + align(size, ty) + } + } + Array => { + let len = ty.array_length(); + let elt = ty.element_type(); + let eltsz = ty_size(elt); + len * eltsz + } + Vector => { + let len = ty.vector_length(); + let elt = ty.element_type(); + let eltsz = ty_size(elt); + len * eltsz + } + _ => bug!("ty_size: unhandled type") + } +} + +fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { + if is_reg_ty(ret.ty) { + ret.extend_integer_width_to(64); + } else { + ret.make_indirect(ccx); + } +} + +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { + if arg.ty.kind() == Struct { + fn is_single_fp_element(tys: &[Type]) -> bool { + if tys.len() != 1 { + return false; + } + match tys[0].kind() { + Float | Double => true, + Struct => is_single_fp_element(&tys[0].field_types()), + _ => false + } + } + + if is_single_fp_element(&arg.ty.field_types()) { + match ty_size(arg.ty) { + 4 => arg.cast = Some(Type::f32(ccx)), + 8 => arg.cast = Some(Type::f64(ccx)), + _ => arg.make_indirect(ccx) + } + } else { + match ty_size(arg.ty) { + 1 => arg.cast = Some(Type::i8(ccx)), + 2 => arg.cast = Some(Type::i16(ccx)), + 4 => arg.cast = Some(Type::i32(ccx)), + 8 => arg.cast = Some(Type::i64(ccx)), + _ => arg.make_indirect(ccx) + } + } + return; + } + + if is_reg_ty(arg.ty) { + arg.extend_integer_width_to(64); + } else { + arg.make_indirect(ccx); + } +} + +fn is_reg_ty(ty: Type) -> bool { + match ty.kind() { + Integer + | Pointer + | Float + | Double => ty_size(ty) <= 8, + _ => false + } +} + +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + if !fty.ret.is_ignore() { + classify_ret_ty(ccx, &mut fty.ret); + } + + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + classify_arg_ty(ccx, arg); + } +} diff --git a/src/librustc_trans/cabi_x86.rs b/src/librustc_trans/cabi_x86.rs new file mode 100644 index 0000000000000..5377b49a2b441 --- /dev/null +++ b/src/librustc_trans/cabi_x86.rs @@ -0,0 +1,53 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use llvm::*; +use abi::{ArgAttribute, FnType}; +use type_::Type; +use super::common::*; +use super::machine::*; + +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + if !fty.ret.is_ignore() { + if fty.ret.ty.kind() == Struct { + // Returning a structure. Most often, this will use + // a hidden first argument. On some platforms, though, + // small structs are returned as integers. + // + // Some links: + // http://www.angelcode.com/dev/callconv/callconv.html + // Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp + let t = &ccx.sess().target.target; + if t.options.is_like_osx || t.options.is_like_windows { + match llsize_of_alloc(ccx, fty.ret.ty) { + 1 => fty.ret.cast = Some(Type::i8(ccx)), + 2 => fty.ret.cast = Some(Type::i16(ccx)), + 4 => fty.ret.cast = Some(Type::i32(ccx)), + 8 => fty.ret.cast = Some(Type::i64(ccx)), + _ => fty.ret.make_indirect(ccx) + } + } else { + fty.ret.make_indirect(ccx); + } + } else { + fty.ret.extend_integer_width_to(32); + } + } + + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + if arg.ty.kind() == Struct { + arg.make_indirect(ccx); + arg.attrs.set(ArgAttribute::ByVal); + } else { + arg.extend_integer_width_to(32); + } + } +} diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs new file mode 100644 index 0000000000000..7f2fdbf000b65 --- /dev/null +++ b/src/librustc_trans/cabi_x86_64.rs @@ -0,0 +1,398 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// The classification code for the x86_64 ABI is taken from the clay language +// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp + +#![allow(non_upper_case_globals)] +use self::RegClass::*; + +use llvm::{Integer, Pointer, Float, Double}; +use llvm::{Struct, Array, Vector}; +use abi::{self, ArgType, ArgAttribute, FnType}; +use context::CrateContext; +use type_::Type; + +#[derive(Clone, Copy, PartialEq)] +enum RegClass { + NoClass, + Int, + SSEFs, + SSEFv, + SSEDs, + SSEDv, + SSEInt(/* bitwidth */ u64), + /// Data that can appear in the upper half of an SSE register. + SSEUp, + X87, + X87Up, + ComplexX87, + Memory +} + +trait TypeMethods { + fn is_reg_ty(&self) -> bool; +} + +impl TypeMethods for Type { + fn is_reg_ty(&self) -> bool { + match self.kind() { + Integer | Pointer | Float | Double => true, + _ => false + } + } +} + +impl RegClass { + fn is_sse(&self) -> bool { + match *self { + SSEFs | SSEFv | SSEDs | SSEDv | SSEInt(_) => true, + _ => false + } + } +} + +trait ClassList { + fn is_pass_byval(&self) -> bool; + fn is_ret_bysret(&self) -> bool; +} + +impl ClassList for [RegClass] { + fn is_pass_byval(&self) -> bool { + if self.is_empty() { return false; } + + let class = self[0]; + class == Memory + || class == X87 + || class == ComplexX87 + } + + fn is_ret_bysret(&self) -> bool { + if self.is_empty() { return false; } + + self[0] == Memory + } +} + +fn classify_ty(ty: Type) -> Vec { + fn align(off: usize, ty: Type) -> usize { + let a = ty_align(ty); + return (off + a - 1) / a * a; + } + + fn ty_align(ty: Type) -> usize { + abi::ty_align(ty, 8) + } + + fn ty_size(ty: Type) -> usize { + abi::ty_size(ty, 8) + } + + fn all_mem(cls: &mut [RegClass]) { + for elt in cls { + *elt = Memory; + } + } + + fn unify(cls: &mut [RegClass], + i: usize, + newv: RegClass) { + if cls[i] == newv { return } + + let to_write = match (cls[i], newv) { + (NoClass, _) => newv, + (_, NoClass) => return, + + (Memory, _) | + (_, Memory) => Memory, + + (Int, _) | + (_, Int) => Int, + + (X87, _) | + (X87Up, _) | + (ComplexX87, _) | + (_, X87) | + (_, X87Up) | + (_, ComplexX87) => Memory, + + (SSEFv, SSEUp) | + (SSEFs, SSEUp) | + (SSEDv, SSEUp) | + (SSEDs, SSEUp) | + (SSEInt(_), SSEUp) => return, + + (..) => newv + }; + cls[i] = to_write; + } + + fn classify_struct(tys: &[Type], + cls: &mut [RegClass], + i: usize, + off: usize, + packed: bool) { + let mut field_off = off; + for ty in tys { + if !packed { + field_off = align(field_off, *ty); + } + classify(*ty, cls, i, field_off); + field_off += ty_size(*ty); + } + } + + fn classify(ty: Type, + cls: &mut [RegClass], ix: usize, + off: usize) { + let t_align = ty_align(ty); + let t_size = ty_size(ty); + + let misalign = off % t_align; + if misalign != 0 { + let mut i = off / 8; + let e = (off + t_size + 7) / 8; + while i < e { + unify(cls, ix + i, Memory); + i += 1; + } + return; + } + + match ty.kind() { + Integer | + Pointer => { + unify(cls, ix + off / 8, Int); + } + Float => { + if off % 8 == 4 { + unify(cls, ix + off / 8, SSEFv); + } else { + unify(cls, ix + off / 8, SSEFs); + } + } + Double => { + unify(cls, ix + off / 8, SSEDs); + } + Struct => { + classify_struct(&ty.field_types(), cls, ix, off, ty.is_packed()); + } + Array => { + let len = ty.array_length(); + let elt = ty.element_type(); + let eltsz = ty_size(elt); + let mut i = 0; + while i < len { + classify(elt, cls, ix, off + i * eltsz); + i += 1; + } + } + Vector => { + let len = ty.vector_length(); + let elt = ty.element_type(); + let eltsz = ty_size(elt); + let mut reg = match elt.kind() { + Integer => SSEInt(elt.int_width()), + Float => SSEFv, + Double => SSEDv, + _ => bug!("classify: unhandled vector element type") + }; + + let mut i = 0; + while i < len { + unify(cls, ix + (off + i * eltsz) / 8, reg); + + // everything after the first one is the upper + // half of a register. + reg = SSEUp; + i += 1; + } + } + _ => bug!("classify: unhandled type") + } + } + + fn fixup(ty: Type, cls: &mut [RegClass]) { + let mut i = 0; + let ty_kind = ty.kind(); + let e = cls.len(); + if cls.len() > 2 && (ty_kind == Struct || ty_kind == Array || ty_kind == Vector) { + if cls[i].is_sse() { + i += 1; + while i < e { + if cls[i] != SSEUp { + all_mem(cls); + return; + } + i += 1; + } + } else { + all_mem(cls); + return + } + } else { + while i < e { + if cls[i] == Memory { + all_mem(cls); + return; + } + if cls[i] == X87Up { + // for darwin + // cls[i] = SSEDs; + all_mem(cls); + return; + } + if cls[i] == SSEUp { + cls[i] = SSEDv; + } else if cls[i].is_sse() { + i += 1; + while i != e && cls[i] == SSEUp { i += 1; } + } else if cls[i] == X87 { + i += 1; + while i != e && cls[i] == X87Up { i += 1; } + } else { + i += 1; + } + } + } + } + + let words = (ty_size(ty) + 7) / 8; + let mut cls = vec![NoClass; words]; + if words > 4 { + all_mem(&mut cls); + return cls; + } + classify(ty, &mut cls, 0, 0); + fixup(ty, &mut cls); + return cls; +} + +fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type { + fn llvec_len(cls: &[RegClass]) -> usize { + let mut len = 1; + for c in cls { + if *c != SSEUp { + break; + } + len += 1; + } + return len; + } + + let mut tys = Vec::new(); + let mut i = 0; + let e = cls.len(); + while i < e { + match cls[i] { + Int => { + tys.push(Type::i64(ccx)); + } + SSEFv | SSEDv | SSEInt(_) => { + let (elts_per_word, elt_ty) = match cls[i] { + SSEFv => (2, Type::f32(ccx)), + SSEDv => (1, Type::f64(ccx)), + SSEInt(bits) => { + assert!(bits == 8 || bits == 16 || bits == 32 || bits == 64, + "llreg_ty: unsupported SSEInt width {}", bits); + (64 / bits, Type::ix(ccx, bits)) + } + _ => bug!(), + }; + let vec_len = llvec_len(&cls[i + 1..]); + let vec_ty = Type::vector(&elt_ty, vec_len as u64 * elts_per_word); + tys.push(vec_ty); + i += vec_len; + continue; + } + SSEFs => { + tys.push(Type::f32(ccx)); + } + SSEDs => { + tys.push(Type::f64(ccx)); + } + _ => bug!("llregtype: unhandled class") + } + i += 1; + } + if tys.len() == 1 && tys[0].kind() == Vector { + // if the type contains only a vector, pass it as that vector. + tys[0] + } else { + Type::struct_(ccx, &tys, false) + } +} + +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + fn x86_64_ty(ccx: &CrateContext, + arg: &mut ArgType, + is_mem_cls: F, + ind_attr: Option) + where F: FnOnce(&[RegClass]) -> bool + { + if !arg.ty.is_reg_ty() { + let cls = classify_ty(arg.ty); + if is_mem_cls(&cls) { + arg.make_indirect(ccx); + if let Some(attr) = ind_attr { + arg.attrs.set(attr); + } + } else { + arg.cast = Some(llreg_ty(ccx, &cls)); + } + } else { + arg.extend_integer_width_to(32); + } + } + + let mut int_regs = 6; // RDI, RSI, RDX, RCX, R8, R9 + let mut sse_regs = 8; // XMM0-7 + + if !fty.ret.is_ignore() { + x86_64_ty(ccx, &mut fty.ret, |cls| { + if cls.is_ret_bysret() { + // `sret` parameter thus one less register available + int_regs -= 1; + true + } else { + false + } + }, None); + } + + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + x86_64_ty(ccx, arg, |cls| { + let needed_int = cls.iter().filter(|&&c| c == Int).count() as isize; + let needed_sse = cls.iter().filter(|c| c.is_sse()).count() as isize; + let in_mem = cls.is_pass_byval() || + int_regs < needed_int || + sse_regs < needed_sse; + if in_mem { + // `byval` parameter thus one less integer register available + int_regs -= 1; + } else { + // split into sized chunks passed individually + int_regs -= needed_int; + sse_regs -= needed_sse; + } + in_mem + }, Some(ArgAttribute::ByVal)); + + // An integer, pointer, double or float parameter + // thus the above closure passed to `x86_64_ty` won't + // get called. + match arg.ty.kind() { + Integer | Pointer => int_regs -= 1, + Double | Float => sse_regs -= 1, + _ => {} + } + } +} diff --git a/src/librustc_trans/cabi_x86_win64.rs b/src/librustc_trans/cabi_x86_win64.rs new file mode 100644 index 0000000000000..71ecb6e9ca104 --- /dev/null +++ b/src/librustc_trans/cabi_x86_win64.rs @@ -0,0 +1,41 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use llvm::*; +use super::common::*; +use super::machine::*; +use abi::{ArgType, FnType}; +use type_::Type; + +// Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx + +pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { + let fixup = |a: &mut ArgType| { + if a.ty.kind() == Struct { + match llsize_of_alloc(ccx, a.ty) { + 1 => a.cast = Some(Type::i8(ccx)), + 2 => a.cast = Some(Type::i16(ccx)), + 4 => a.cast = Some(Type::i32(ccx)), + 8 => a.cast = Some(Type::i64(ccx)), + _ => a.make_indirect(ccx) + } + } else { + a.extend_integer_width_to(32); + } + }; + + if !fty.ret.is_ignore() { + fixup(&mut fty.ret); + } + for arg in &mut fty.args { + if arg.is_ignore() { continue; } + fixup(arg); + } +} diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs new file mode 100644 index 0000000000000..df56e27128c7e --- /dev/null +++ b/src/librustc_trans/callee.rs @@ -0,0 +1,723 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Handles translation of callees as well as other call-related +//! things. Callees are a superset of normal rust values and sometimes +//! have different representations. In particular, top-level fn items +//! and methods are represented as just a fn ptr and not a full +//! closure. + +pub use self::CalleeData::*; + +use arena::TypedArena; +use llvm::{self, ValueRef, get_params}; +use rustc::hir::def_id::DefId; +use rustc::ty::subst::Substs; +use rustc::traits; +use abi::{Abi, FnType}; +use attributes; +use base; +use base::*; +use build::*; +use common::{self, Block, Result, CrateContext, FunctionContext, SharedCrateContext}; +use consts; +use debuginfo::DebugLoc; +use declare; +use value::Value; +use meth; +use monomorphize::{self, Instance}; +use trans_item::TransItem; +use type_of; +use Disr; +use rustc::ty::{self, Ty, TypeFoldable}; +use rustc::hir; + +use syntax_pos::DUMMY_SP; + +#[derive(Debug)] +pub enum CalleeData { + /// Constructor for enum variant/tuple-like-struct. + NamedTupleConstructor(Disr), + + /// Function pointer. + Fn(ValueRef), + + Intrinsic, + + /// Trait object found in the vtable at that index. + Virtual(usize) +} + +#[derive(Debug)] +pub struct Callee<'tcx> { + pub data: CalleeData, + pub ty: Ty<'tcx> +} + +impl<'tcx> Callee<'tcx> { + /// Function pointer. + pub fn ptr(llfn: ValueRef, ty: Ty<'tcx>) -> Callee<'tcx> { + Callee { + data: Fn(llfn), + ty: ty + } + } + + /// Trait or impl method call. + pub fn method_call<'blk>(bcx: Block<'blk, 'tcx>, + method_call: ty::MethodCall) + -> Callee<'tcx> { + let method = bcx.tcx().tables().method_map[&method_call]; + Callee::method(bcx, method) + } + + /// Trait or impl method. + pub fn method<'blk>(bcx: Block<'blk, 'tcx>, + method: ty::MethodCallee<'tcx>) -> Callee<'tcx> { + let substs = bcx.fcx.monomorphize(&method.substs); + Callee::def(bcx.ccx(), method.def_id, substs) + } + + /// Function or method definition. + pub fn def<'a>(ccx: &CrateContext<'a, 'tcx>, + def_id: DefId, + substs: &'tcx Substs<'tcx>) + -> Callee<'tcx> { + let tcx = ccx.tcx(); + + if let Some(trait_id) = tcx.trait_of_item(def_id) { + return Callee::trait_method(ccx, trait_id, def_id, substs); + } + + let fn_ty = def_ty(ccx.shared(), def_id, substs); + if let ty::TyFnDef(.., f) = fn_ty.sty { + if f.abi == Abi::RustIntrinsic || f.abi == Abi::PlatformIntrinsic { + return Callee { + data: Intrinsic, + ty: fn_ty + }; + } + } + + // FIXME(eddyb) Detect ADT constructors more efficiently. + if let Some(adt_def) = fn_ty.fn_ret().skip_binder().ty_adt_def() { + if let Some(v) = adt_def.variants.iter().find(|v| def_id == v.did) { + return Callee { + data: NamedTupleConstructor(Disr::from(v.disr_val)), + ty: fn_ty + }; + } + } + + let (llfn, ty) = get_fn(ccx, def_id, substs); + Callee::ptr(llfn, ty) + } + + /// Trait method, which has to be resolved to an impl method. + pub fn trait_method<'a>(ccx: &CrateContext<'a, 'tcx>, + trait_id: DefId, + def_id: DefId, + substs: &'tcx Substs<'tcx>) + -> Callee<'tcx> { + let tcx = ccx.tcx(); + + let trait_ref = ty::TraitRef::from_method(tcx, trait_id, substs); + let trait_ref = tcx.normalize_associated_type(&ty::Binder(trait_ref)); + match common::fulfill_obligation(ccx.shared(), DUMMY_SP, trait_ref) { + traits::VtableImpl(vtable_impl) => { + let name = tcx.item_name(def_id); + let (def_id, substs) = traits::find_method(tcx, name, substs, &vtable_impl); + + // Translate the function, bypassing Callee::def. + // That is because default methods have the same ID as the + // trait method used to look up the impl method that ended + // up here, so calling Callee::def would infinitely recurse. + let (llfn, ty) = get_fn(ccx, def_id, substs); + Callee::ptr(llfn, ty) + } + traits::VtableClosure(vtable_closure) => { + // The substitutions should have no type parameters remaining + // after passing through fulfill_obligation + let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap(); + let instance = Instance::new(def_id, substs); + let llfn = trans_closure_method( + ccx, + vtable_closure.closure_def_id, + vtable_closure.substs, + instance, + trait_closure_kind); + + let method_ty = def_ty(ccx.shared(), def_id, substs); + Callee::ptr(llfn, method_ty) + } + traits::VtableFnPointer(vtable_fn_pointer) => { + let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap(); + let instance = Instance::new(def_id, substs); + let llfn = trans_fn_pointer_shim(ccx, instance, + trait_closure_kind, + vtable_fn_pointer.fn_ty); + + let method_ty = def_ty(ccx.shared(), def_id, substs); + Callee::ptr(llfn, method_ty) + } + traits::VtableObject(ref data) => { + Callee { + data: Virtual(tcx.get_vtable_index_of_object_method(data, def_id)), + ty: def_ty(ccx.shared(), def_id, substs) + } + } + vtable => { + bug!("resolved vtable bad vtable {:?} in trans", vtable); + } + } + } + + /// Get the abi::FnType for a direct call. Mainly deals with the fact + /// that a Virtual call doesn't take the vtable, like its shim does. + /// The extra argument types are for variadic (extern "C") functions. + pub fn direct_fn_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>, + extra_args: &[Ty<'tcx>]) -> FnType { + let abi = self.ty.fn_abi(); + let sig = ccx.tcx().erase_late_bound_regions_and_normalize(self.ty.fn_sig()); + let mut fn_ty = FnType::unadjusted(ccx, abi, &sig, extra_args); + if let Virtual(_) = self.data { + // Don't pass the vtable, it's not an argument of the virtual fn. + fn_ty.args[1].ignore(); + } + fn_ty.adjust_for_abi(ccx, abi, &sig); + fn_ty + } + + /// This behemoth of a function translates function calls. Unfortunately, in + /// order to generate more efficient LLVM output at -O0, it has quite a complex + /// signature (refactoring this into two functions seems like a good idea). + /// + /// In particular, for lang items, it is invoked with a dest of None, and in + /// that case the return value contains the result of the fn. The lang item must + /// not return a structural type or else all heck breaks loose. + /// + /// For non-lang items, `dest` is always Some, and hence the result is written + /// into memory somewhere. Nonetheless we return the actual return value of the + /// function. + pub fn call<'a, 'blk>(self, bcx: Block<'blk, 'tcx>, + debug_loc: DebugLoc, + args: &[ValueRef], + dest: Option) + -> Result<'blk, 'tcx> { + trans_call_inner(bcx, debug_loc, self, args, dest) + } + + /// Turn the callee into a function pointer. + pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { + match self.data { + Fn(llfn) => llfn, + Virtual(_) => meth::trans_object_shim(ccx, self), + NamedTupleConstructor(disr) => match self.ty.sty { + ty::TyFnDef(def_id, substs, _) => { + let instance = Instance::new(def_id, substs); + if let Some(&llfn) = ccx.instances().borrow().get(&instance) { + return llfn; + } + + let sym = ccx.symbol_map().get_or_compute(ccx.shared(), + TransItem::Fn(instance)); + assert!(!ccx.codegen_unit().contains_item(&TransItem::Fn(instance))); + let lldecl = declare::define_internal_fn(ccx, &sym, self.ty); + base::trans_ctor_shim(ccx, def_id, substs, disr, lldecl); + ccx.instances().borrow_mut().insert(instance, lldecl); + + lldecl + } + _ => bug!("expected fn item type, found {}", self.ty) + }, + Intrinsic => bug!("intrinsic {} getting reified", self.ty) + } + } +} + +/// Given a DefId and some Substs, produces the monomorphic item type. +fn def_ty<'a, 'tcx>(shared: &SharedCrateContext<'a, 'tcx>, + def_id: DefId, + substs: &'tcx Substs<'tcx>) + -> Ty<'tcx> { + let ty = shared.tcx().item_type(def_id); + monomorphize::apply_param_substs(shared, substs, &ty) +} + + +fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, + def_id: DefId, + substs: ty::ClosureSubsts<'tcx>, + method_instance: Instance<'tcx>, + trait_closure_kind: ty::ClosureKind) + -> ValueRef +{ + // If this is a closure, redirect to it. + let (llfn, _) = get_fn(ccx, def_id, substs.substs); + + // If the closure is a Fn closure, but a FnOnce is needed (etc), + // then adapt the self type + let llfn_closure_kind = ccx.tcx().closure_kind(def_id); + + let _icx = push_ctxt("trans_closure_adapter_shim"); + + debug!("trans_closure_adapter_shim(llfn_closure_kind={:?}, \ + trait_closure_kind={:?}, llfn={:?})", + llfn_closure_kind, trait_closure_kind, Value(llfn)); + + match (llfn_closure_kind, trait_closure_kind) { + (ty::ClosureKind::Fn, ty::ClosureKind::Fn) | + (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) | + (ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) => { + // No adapter needed. + llfn + } + (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => { + // The closure fn `llfn` is a `fn(&self, ...)`. We want a + // `fn(&mut self, ...)`. In fact, at trans time, these are + // basically the same thing, so we can just return llfn. + llfn + } + (ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) | + (ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => { + // The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut + // self, ...)`. We want a `fn(self, ...)`. We can produce + // this by doing something like: + // + // fn call_once(self, ...) { call_mut(&self, ...) } + // fn call_once(mut self, ...) { call_mut(&mut self, ...) } + // + // These are both the same at trans time. + trans_fn_once_adapter_shim(ccx, def_id, substs, method_instance, llfn) + } + _ => { + bug!("trans_closure_adapter_shim: cannot convert {:?} to {:?}", + llfn_closure_kind, + trait_closure_kind); + } + } +} + +fn trans_fn_once_adapter_shim<'a, 'tcx>( + ccx: &'a CrateContext<'a, 'tcx>, + def_id: DefId, + substs: ty::ClosureSubsts<'tcx>, + method_instance: Instance<'tcx>, + llreffn: ValueRef) + -> ValueRef +{ + if let Some(&llfn) = ccx.instances().borrow().get(&method_instance) { + return llfn; + } + + debug!("trans_fn_once_adapter_shim(def_id={:?}, substs={:?}, llreffn={:?})", + def_id, substs, Value(llreffn)); + + let tcx = ccx.tcx(); + + // Find a version of the closure type. Substitute static for the + // region since it doesn't really matter. + let closure_ty = tcx.mk_closure_from_closure_substs(def_id, substs); + let ref_closure_ty = tcx.mk_imm_ref(tcx.mk_region(ty::ReErased), closure_ty); + + // Make a version with the type of by-ref closure. + let ty::ClosureTy { unsafety, abi, mut sig } = tcx.closure_type(def_id, substs); + sig.0.inputs.insert(0, ref_closure_ty); // sig has no self type as of yet + let llref_fn_ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy { + unsafety: unsafety, + abi: abi, + sig: sig.clone() + })); + debug!("trans_fn_once_adapter_shim: llref_fn_ty={:?}", + llref_fn_ty); + + + // Make a version of the closure type with the same arguments, but + // with argument #0 being by value. + assert_eq!(abi, Abi::RustCall); + sig.0.inputs[0] = closure_ty; + + let sig = tcx.erase_late_bound_regions_and_normalize(&sig); + let fn_ty = FnType::new(ccx, abi, &sig, &[]); + + let llonce_fn_ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy { + unsafety: unsafety, + abi: abi, + sig: ty::Binder(sig) + })); + + // Create the by-value helper. + let function_name = method_instance.symbol_name(ccx.shared()); + let lloncefn = declare::define_internal_fn(ccx, &function_name, llonce_fn_ty); + attributes::set_frame_pointer_elimination(ccx, lloncefn); + + let (block_arena, fcx): (TypedArena<_>, FunctionContext); + block_arena = TypedArena::new(); + fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None, &block_arena); + let mut bcx = fcx.init(false); + + + // the first argument (`self`) will be the (by value) closure env. + + let mut llargs = get_params(fcx.llfn); + let mut self_idx = fcx.fn_ty.ret.is_indirect() as usize; + let env_arg = &fcx.fn_ty.args[0]; + let llenv = if env_arg.is_indirect() { + llargs[self_idx] + } else { + let scratch = alloc_ty(bcx, closure_ty, "self"); + let mut llarg_idx = self_idx; + env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, scratch); + scratch + }; + + debug!("trans_fn_once_adapter_shim: env={:?}", Value(llenv)); + // Adjust llargs such that llargs[self_idx..] has the call arguments. + // For zero-sized closures that means sneaking in a new argument. + if env_arg.is_ignore() { + if self_idx > 0 { + self_idx -= 1; + llargs[self_idx] = llenv; + } else { + llargs.insert(0, llenv); + } + } else { + llargs[self_idx] = llenv; + } + + let dest = fcx.llretslotptr.get(); + + let callee = Callee { + data: Fn(llreffn), + ty: llref_fn_ty + }; + + // Call the by-ref closure body with `self` in a cleanup scope, + // to drop `self` when the body returns, or in case it unwinds. + let self_scope = fcx.push_custom_cleanup_scope(); + fcx.schedule_drop_mem(self_scope, llenv, closure_ty); + + bcx = callee.call(bcx, DebugLoc::None, &llargs[self_idx..], dest).bcx; + + fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope); + + fcx.finish(bcx, DebugLoc::None); + + ccx.instances().borrow_mut().insert(method_instance, lloncefn); + + lloncefn +} + +/// Translates an adapter that implements the `Fn` trait for a fn +/// pointer. This is basically the equivalent of something like: +/// +/// ``` +/// impl<'a> Fn(&'a int) -> &'a int for fn(&int) -> &int { +/// extern "rust-abi" fn call(&self, args: (&'a int,)) -> &'a int { +/// (*self)(args.0) +/// } +/// } +/// ``` +/// +/// but for the bare function type given. +fn trans_fn_pointer_shim<'a, 'tcx>( + ccx: &'a CrateContext<'a, 'tcx>, + method_instance: Instance<'tcx>, + closure_kind: ty::ClosureKind, + bare_fn_ty: Ty<'tcx>) + -> ValueRef +{ + let _icx = push_ctxt("trans_fn_pointer_shim"); + let tcx = ccx.tcx(); + + // Normalize the type for better caching. + let bare_fn_ty = tcx.normalize_associated_type(&bare_fn_ty); + + // If this is an impl of `Fn` or `FnMut` trait, the receiver is `&self`. + let is_by_ref = match closure_kind { + ty::ClosureKind::Fn | ty::ClosureKind::FnMut => true, + ty::ClosureKind::FnOnce => false, + }; + + let llfnpointer = match bare_fn_ty.sty { + ty::TyFnDef(def_id, substs, _) => { + // Function definitions have to be turned into a pointer. + let llfn = Callee::def(ccx, def_id, substs).reify(ccx); + if !is_by_ref { + // A by-value fn item is ignored, so the shim has + // the same signature as the original function. + return llfn; + } + Some(llfn) + } + _ => None + }; + + let bare_fn_ty_maybe_ref = if is_by_ref { + tcx.mk_imm_ref(tcx.mk_region(ty::ReErased), bare_fn_ty) + } else { + bare_fn_ty + }; + + // Check if we already trans'd this shim. + if let Some(&llval) = ccx.fn_pointer_shims().borrow().get(&bare_fn_ty_maybe_ref) { + return llval; + } + + debug!("trans_fn_pointer_shim(bare_fn_ty={:?})", + bare_fn_ty); + + // Construct the "tuply" version of `bare_fn_ty`. It takes two arguments: `self`, + // which is the fn pointer, and `args`, which is the arguments tuple. + let sig = match bare_fn_ty.sty { + ty::TyFnDef(.., + &ty::BareFnTy { unsafety: hir::Unsafety::Normal, + abi: Abi::Rust, + ref sig }) | + ty::TyFnPtr(&ty::BareFnTy { unsafety: hir::Unsafety::Normal, + abi: Abi::Rust, + ref sig }) => sig, + + _ => { + bug!("trans_fn_pointer_shim invoked on invalid type: {}", + bare_fn_ty); + } + }; + let sig = tcx.erase_late_bound_regions_and_normalize(sig); + let tuple_input_ty = tcx.intern_tup(&sig.inputs[..]); + let sig = ty::FnSig { + inputs: vec![bare_fn_ty_maybe_ref, + tuple_input_ty], + output: sig.output, + variadic: false + }; + let fn_ty = FnType::new(ccx, Abi::RustCall, &sig, &[]); + let tuple_fn_ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy { + unsafety: hir::Unsafety::Normal, + abi: Abi::RustCall, + sig: ty::Binder(sig) + })); + debug!("tuple_fn_ty: {:?}", tuple_fn_ty); + + // + let function_name = method_instance.symbol_name(ccx.shared()); + let llfn = declare::define_internal_fn(ccx, &function_name, tuple_fn_ty); + attributes::set_frame_pointer_elimination(ccx, llfn); + // + let (block_arena, fcx): (TypedArena<_>, FunctionContext); + block_arena = TypedArena::new(); + fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena); + let mut bcx = fcx.init(false); + + let llargs = get_params(fcx.llfn); + + let self_idx = fcx.fn_ty.ret.is_indirect() as usize; + let llfnpointer = llfnpointer.unwrap_or_else(|| { + // the first argument (`self`) will be ptr to the fn pointer + if is_by_ref { + Load(bcx, llargs[self_idx]) + } else { + llargs[self_idx] + } + }); + + let dest = fcx.llretslotptr.get(); + + let callee = Callee { + data: Fn(llfnpointer), + ty: bare_fn_ty + }; + bcx = callee.call(bcx, DebugLoc::None, &llargs[(self_idx + 1)..], dest).bcx; + + fcx.finish(bcx, DebugLoc::None); + + ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn); + + llfn +} + +/// Translates a reference to a fn/method item, monomorphizing and +/// inlining as it goes. +/// +/// # Parameters +/// +/// - `ccx`: the crate context +/// - `def_id`: def id of the fn or method item being referenced +/// - `substs`: values for each of the fn/method's parameters +fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + def_id: DefId, + substs: &'tcx Substs<'tcx>) + -> (ValueRef, Ty<'tcx>) { + let tcx = ccx.tcx(); + + debug!("get_fn(def_id={:?}, substs={:?})", def_id, substs); + + assert!(!substs.needs_infer()); + assert!(!substs.has_escaping_regions()); + assert!(!substs.has_param_types()); + + let substs = tcx.normalize_associated_type(&substs); + let instance = Instance::new(def_id, substs); + let item_ty = ccx.tcx().item_type(def_id); + let fn_ty = monomorphize::apply_param_substs(ccx.shared(), substs, &item_ty); + + if let Some(&llfn) = ccx.instances().borrow().get(&instance) { + return (llfn, fn_ty); + } + + let sym = ccx.symbol_map().get_or_compute(ccx.shared(), + TransItem::Fn(instance)); + debug!("get_fn({:?}: {:?}) => {}", instance, fn_ty, sym); + + // This is subtle and surprising, but sometimes we have to bitcast + // the resulting fn pointer. The reason has to do with external + // functions. If you have two crates that both bind the same C + // library, they may not use precisely the same types: for + // example, they will probably each declare their own structs, + // which are distinct types from LLVM's point of view (nominal + // types). + // + // Now, if those two crates are linked into an application, and + // they contain inlined code, you can wind up with a situation + // where both of those functions wind up being loaded into this + // application simultaneously. In that case, the same function + // (from LLVM's point of view) requires two types. But of course + // LLVM won't allow one function to have two types. + // + // What we currently do, therefore, is declare the function with + // one of the two types (whichever happens to come first) and then + // bitcast as needed when the function is referenced to make sure + // it has the type we expect. + // + // This can occur on either a crate-local or crate-external + // reference. It also occurs when testing libcore and in some + // other weird situations. Annoying. + + // Create a fn pointer with the substituted signature. + let fn_ptr_ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(common::ty_fn_ty(ccx, fn_ty).into_owned())); + let llptrty = type_of::type_of(ccx, fn_ptr_ty); + + let llfn = if let Some(llfn) = declare::get_declared_value(ccx, &sym) { + if common::val_ty(llfn) != llptrty { + debug!("get_fn: casting {:?} to {:?}", llfn, llptrty); + consts::ptrcast(llfn, llptrty) + } else { + debug!("get_fn: not casting pointer!"); + llfn + } + } else { + let llfn = declare::declare_fn(ccx, &sym, fn_ty); + assert_eq!(common::val_ty(llfn), llptrty); + debug!("get_fn: not casting pointer!"); + + let attrs = ccx.tcx().get_attrs(def_id); + attributes::from_fn_attrs(ccx, &attrs, llfn); + + let is_local_def = ccx.shared().translation_items().borrow() + .contains(&TransItem::Fn(instance)); + if is_local_def { + // FIXME(eddyb) Doubt all extern fn should allow unwinding. + attributes::unwind(llfn, true); + unsafe { + llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage); + } + } + + llfn + }; + + ccx.instances().borrow_mut().insert(instance, llfn); + + (llfn, fn_ty) +} + +// ______________________________________________________________________ +// Translating calls + +fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + debug_loc: DebugLoc, + callee: Callee<'tcx>, + args: &[ValueRef], + opt_llretslot: Option) + -> Result<'blk, 'tcx> { + // Introduce a temporary cleanup scope that will contain cleanups + // for the arguments while they are being evaluated. The purpose + // this cleanup is to ensure that, should a panic occur while + // evaluating argument N, the values for arguments 0...N-1 are all + // cleaned up. If no panic occurs, the values are handed off to + // the callee, and hence none of the cleanups in this temporary + // scope will ever execute. + let fcx = bcx.fcx; + let ccx = fcx.ccx; + + let fn_ret = callee.ty.fn_ret(); + let fn_ty = callee.direct_fn_type(ccx, &[]); + + let mut callee = match callee.data { + NamedTupleConstructor(_) | Intrinsic => { + bug!("{:?} calls should not go through Callee::call", callee); + } + f => f + }; + + // If there no destination, return must be direct, with no cast. + if opt_llretslot.is_none() { + assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none()); + } + + let mut llargs = Vec::new(); + + if fn_ty.ret.is_indirect() { + let mut llretslot = opt_llretslot.unwrap(); + if let Some(ty) = fn_ty.ret.cast { + llretslot = PointerCast(bcx, llretslot, ty.ptr_to()); + } + llargs.push(llretslot); + } + + match callee { + Virtual(idx) => { + llargs.push(args[0]); + + let fn_ptr = meth::get_virtual_method(bcx, args[1], idx); + let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); + callee = Fn(PointerCast(bcx, fn_ptr, llty)); + llargs.extend_from_slice(&args[2..]); + } + _ => llargs.extend_from_slice(args) + } + + let llfn = match callee { + Fn(f) => f, + _ => bug!("expected fn pointer callee, found {:?}", callee) + }; + + let (llret, bcx) = base::invoke(bcx, llfn, &llargs, debug_loc); + if !bcx.unreachable.get() { + fn_ty.apply_attrs_callsite(llret); + + // If the function we just called does not use an outpointer, + // store the result into the rust outpointer. Cast the outpointer + // type to match because some ABIs will use a different type than + // the Rust type. e.g., a {u32,u32} struct could be returned as + // u64. + if !fn_ty.ret.is_indirect() { + if let Some(llretslot) = opt_llretslot { + fn_ty.ret.store(&bcx.build(), llret, llretslot); + } + } + } + + if fn_ret.0.is_never() { + Unreachable(bcx); + } + + Result::new(bcx, llret) +} diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs new file mode 100644 index 0000000000000..b9f24eba9dc1e --- /dev/null +++ b/src/librustc_trans/cleanup.rs @@ -0,0 +1,704 @@ +// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! ## The Cleanup module +//! +//! The cleanup module tracks what values need to be cleaned up as scopes +//! are exited, either via panic or just normal control flow. The basic +//! idea is that the function context maintains a stack of cleanup scopes +//! that are pushed/popped as we traverse the AST tree. There is typically +//! at least one cleanup scope per AST node; some AST nodes may introduce +//! additional temporary scopes. +//! +//! Cleanup items can be scheduled into any of the scopes on the stack. +//! Typically, when a scope is popped, we will also generate the code for +//! each of its cleanups at that time. This corresponds to a normal exit +//! from a block (for example, an expression completing evaluation +//! successfully without panic). However, it is also possible to pop a +//! block *without* executing its cleanups; this is typically used to +//! guard intermediate values that must be cleaned up on panic, but not +//! if everything goes right. See the section on custom scopes below for +//! more details. +//! +//! Cleanup scopes come in three kinds: +//! +//! - **AST scopes:** each AST node in a function body has a corresponding +//! AST scope. We push the AST scope when we start generate code for an AST +//! node and pop it once the AST node has been fully generated. +//! - **Loop scopes:** loops have an additional cleanup scope. Cleanups are +//! never scheduled into loop scopes; instead, they are used to record the +//! basic blocks that we should branch to when a `continue` or `break` statement +//! is encountered. +//! - **Custom scopes:** custom scopes are typically used to ensure cleanup +//! of intermediate values. +//! +//! ### When to schedule cleanup +//! +//! Although the cleanup system is intended to *feel* fairly declarative, +//! it's still important to time calls to `schedule_clean()` correctly. +//! Basically, you should not schedule cleanup for memory until it has +//! been initialized, because if an unwind should occur before the memory +//! is fully initialized, then the cleanup will run and try to free or +//! drop uninitialized memory. If the initialization itself produces +//! byproducts that need to be freed, then you should use temporary custom +//! scopes to ensure that those byproducts will get freed on unwind. For +//! example, an expression like `box foo()` will first allocate a box in the +//! heap and then call `foo()` -- if `foo()` should panic, this box needs +//! to be *shallowly* freed. +//! +//! ### Long-distance jumps +//! +//! In addition to popping a scope, which corresponds to normal control +//! flow exiting the scope, we may also *jump out* of a scope into some +//! earlier scope on the stack. This can occur in response to a `return`, +//! `break`, or `continue` statement, but also in response to panic. In +//! any of these cases, we will generate a series of cleanup blocks for +//! each of the scopes that is exited. So, if the stack contains scopes A +//! ... Z, and we break out of a loop whose corresponding cleanup scope is +//! X, we would generate cleanup blocks for the cleanups in X, Y, and Z. +//! After cleanup is done we would branch to the exit point for scope X. +//! But if panic should occur, we would generate cleanups for all the +//! scopes from A to Z and then resume the unwind process afterwards. +//! +//! To avoid generating tons of code, we cache the cleanup blocks that we +//! create for breaks, returns, unwinds, and other jumps. Whenever a new +//! cleanup is scheduled, though, we must clear these cached blocks. A +//! possible improvement would be to keep the cached blocks but simply +//! generate a new block which performs the additional cleanup and then +//! branches to the existing cached blocks. +//! +//! ### AST and loop cleanup scopes +//! +//! AST cleanup scopes are pushed when we begin and end processing an AST +//! node. They are used to house cleanups related to rvalue temporary that +//! get referenced (e.g., due to an expression like `&Foo()`). Whenever an +//! AST scope is popped, we always trans all the cleanups, adding the cleanup +//! code after the postdominator of the AST node. +//! +//! AST nodes that represent breakable loops also push a loop scope; the +//! loop scope never has any actual cleanups, it's just used to point to +//! the basic blocks where control should flow after a "continue" or +//! "break" statement. Popping a loop scope never generates code. +//! +//! ### Custom cleanup scopes +//! +//! Custom cleanup scopes are used for a variety of purposes. The most +//! common though is to handle temporary byproducts, where cleanup only +//! needs to occur on panic. The general strategy is to push a custom +//! cleanup scope, schedule *shallow* cleanups into the custom scope, and +//! then pop the custom scope (without transing the cleanups) when +//! execution succeeds normally. This way the cleanups are only trans'd on +//! unwind, and only up until the point where execution succeeded, at +//! which time the complete value should be stored in an lvalue or some +//! other place where normal cleanup applies. +//! +//! To spell it out, here is an example. Imagine an expression `box expr`. +//! We would basically: +//! +//! 1. Push a custom cleanup scope C. +//! 2. Allocate the box. +//! 3. Schedule a shallow free in the scope C. +//! 4. Trans `expr` into the box. +//! 5. Pop the scope C. +//! 6. Return the box as an rvalue. +//! +//! This way, if a panic occurs while transing `expr`, the custom +//! cleanup scope C is pushed and hence the box will be freed. The trans +//! code for `expr` itself is responsible for freeing any other byproducts +//! that may be in play. + +pub use self::EarlyExitLabel::*; + +use llvm::{BasicBlockRef, ValueRef}; +use base; +use build; +use common; +use common::{Block, FunctionContext, LandingPad}; +use debuginfo::{DebugLoc}; +use glue; +use type_::Type; +use value::Value; +use rustc::ty::Ty; + +pub struct CleanupScope<'tcx> { + // Cleanups to run upon scope exit. + cleanups: Vec>, + + // The debug location any drop calls generated for this scope will be + // associated with. + debug_loc: DebugLoc, + + cached_early_exits: Vec, + cached_landing_pad: Option, +} + +#[derive(Copy, Clone, Debug)] +pub struct CustomScopeIndex { + index: usize +} + +#[derive(Copy, Clone, PartialEq, Debug)] +pub enum EarlyExitLabel { + UnwindExit(UnwindKind), +} + +#[derive(Copy, Clone, Debug)] +pub enum UnwindKind { + LandingPad, + CleanupPad(ValueRef), +} + +#[derive(Copy, Clone)] +pub struct CachedEarlyExit { + label: EarlyExitLabel, + cleanup_block: BasicBlockRef, + last_cleanup: usize, +} + +impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { + pub fn push_custom_cleanup_scope(&self) -> CustomScopeIndex { + let index = self.scopes_len(); + debug!("push_custom_cleanup_scope(): {}", index); + + // Just copy the debuginfo source location from the enclosing scope + let debug_loc = self.scopes + .borrow() + .last() + .map(|opt_scope| opt_scope.debug_loc) + .unwrap_or(DebugLoc::None); + + self.push_scope(CleanupScope::new(debug_loc)); + CustomScopeIndex { index: index } + } + + /// Removes the top cleanup scope from the stack without executing its cleanups. The top + /// cleanup scope must be the temporary scope `custom_scope`. + pub fn pop_custom_cleanup_scope(&self, + custom_scope: CustomScopeIndex) { + debug!("pop_custom_cleanup_scope({})", custom_scope.index); + assert!(self.is_valid_to_pop_custom_scope(custom_scope)); + let _ = self.pop_scope(); + } + + /// Removes the top cleanup scope from the stack, which must be a temporary scope, and + /// generates the code to do its cleanups for normal exit. + pub fn pop_and_trans_custom_cleanup_scope(&self, + bcx: Block<'blk, 'tcx>, + custom_scope: CustomScopeIndex) + -> Block<'blk, 'tcx> { + debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope); + assert!(self.is_valid_to_pop_custom_scope(custom_scope)); + + let scope = self.pop_scope(); + self.trans_scope_cleanups(bcx, &scope) + } + + /// Schedules a (deep) drop of `val`, which is a pointer to an instance of + /// `ty` + pub fn schedule_drop_mem(&self, + cleanup_scope: CustomScopeIndex, + val: ValueRef, + ty: Ty<'tcx>) { + if !self.type_needs_drop(ty) { return; } + let drop = DropValue { + is_immediate: false, + val: val, + ty: ty, + skip_dtor: false, + }; + + debug!("schedule_drop_mem({:?}, val={:?}, ty={:?}) skip_dtor={}", + cleanup_scope, + Value(val), + ty, + drop.skip_dtor); + + self.schedule_clean(cleanup_scope, drop); + } + + /// Issue #23611: Schedules a (deep) drop of the contents of + /// `val`, which is a pointer to an instance of struct/enum type + /// `ty`. The scheduled code handles extracting the discriminant + /// and dropping the contents associated with that variant + /// *without* executing any associated drop implementation. + pub fn schedule_drop_adt_contents(&self, + cleanup_scope: CustomScopeIndex, + val: ValueRef, + ty: Ty<'tcx>) { + // `if` below could be "!contents_needs_drop"; skipping drop + // is just an optimization, so sound to be conservative. + if !self.type_needs_drop(ty) { return; } + + let drop = DropValue { + is_immediate: false, + val: val, + ty: ty, + skip_dtor: true, + }; + + debug!("schedule_drop_adt_contents({:?}, val={:?}, ty={:?}) skip_dtor={}", + cleanup_scope, + Value(val), + ty, + drop.skip_dtor); + + self.schedule_clean(cleanup_scope, drop); + } + + /// Schedules a (deep) drop of `val`, which is an instance of `ty` + pub fn schedule_drop_immediate(&self, + cleanup_scope: CustomScopeIndex, + val: ValueRef, + ty: Ty<'tcx>) { + + if !self.type_needs_drop(ty) { return; } + let drop = DropValue { + is_immediate: true, + val: val, + ty: ty, + skip_dtor: false, + }; + + debug!("schedule_drop_immediate({:?}, val={:?}, ty={:?}) skip_dtor={}", + cleanup_scope, + Value(val), + ty, + drop.skip_dtor); + + self.schedule_clean(cleanup_scope, drop); + } + + /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope. + fn schedule_clean(&self, custom_scope: CustomScopeIndex, cleanup: DropValue<'tcx>) { + debug!("schedule_clean_in_custom_scope(custom_scope={})", + custom_scope.index); + + assert!(self.is_valid_custom_scope(custom_scope)); + + let mut scopes = self.scopes.borrow_mut(); + let scope = &mut (*scopes)[custom_scope.index]; + scope.cleanups.push(cleanup); + scope.cached_landing_pad = None; + } + + /// Returns true if there are pending cleanups that should execute on panic. + pub fn needs_invoke(&self) -> bool { + self.scopes.borrow().iter().rev().any(|s| s.needs_invoke()) + } + + /// Returns a basic block to branch to in the event of a panic. This block + /// will run the panic cleanups and eventually resume the exception that + /// caused the landing pad to be run. + pub fn get_landing_pad(&'blk self) -> BasicBlockRef { + let _icx = base::push_ctxt("get_landing_pad"); + + debug!("get_landing_pad"); + + let orig_scopes_len = self.scopes_len(); + assert!(orig_scopes_len > 0); + + // Remove any scopes that do not have cleanups on panic: + let mut popped_scopes = vec![]; + while !self.top_scope(|s| s.needs_invoke()) { + debug!("top scope does not need invoke"); + popped_scopes.push(self.pop_scope()); + } + + // Check for an existing landing pad in the new topmost scope: + let llbb = self.get_or_create_landing_pad(); + + // Push the scopes we removed back on: + loop { + match popped_scopes.pop() { + Some(scope) => self.push_scope(scope), + None => break + } + } + + assert_eq!(self.scopes_len(), orig_scopes_len); + + return llbb; + } + + fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { + self.is_valid_custom_scope(custom_scope) && + custom_scope.index == self.scopes.borrow().len() - 1 + } + + fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { + let scopes = self.scopes.borrow(); + custom_scope.index < scopes.len() + } + + /// Generates the cleanups for `scope` into `bcx` + fn trans_scope_cleanups(&self, // cannot borrow self, will recurse + bcx: Block<'blk, 'tcx>, + scope: &CleanupScope<'tcx>) -> Block<'blk, 'tcx> { + + let mut bcx = bcx; + if !bcx.unreachable.get() { + for cleanup in scope.cleanups.iter().rev() { + bcx = cleanup.trans(bcx, scope.debug_loc); + } + } + bcx + } + + fn scopes_len(&self) -> usize { + self.scopes.borrow().len() + } + + fn push_scope(&self, scope: CleanupScope<'tcx>) { + self.scopes.borrow_mut().push(scope) + } + + fn pop_scope(&self) -> CleanupScope<'tcx> { + debug!("popping cleanup scope {}, {} scopes remaining", + self.top_scope(|s| s.block_name("")), + self.scopes_len() - 1); + + self.scopes.borrow_mut().pop().unwrap() + } + + fn top_scope(&self, f: F) -> R where F: FnOnce(&CleanupScope<'tcx>) -> R { + f(self.scopes.borrow().last().unwrap()) + } + + /// Used when the caller wishes to jump to an early exit, such as a return, + /// break, continue, or unwind. This function will generate all cleanups + /// between the top of the stack and the exit `label` and return a basic + /// block that the caller can branch to. + /// + /// For example, if the current stack of cleanups were as follows: + /// + /// AST 22 + /// Custom 1 + /// AST 23 + /// Loop 23 + /// Custom 2 + /// AST 24 + /// + /// and the `label` specifies a break from `Loop 23`, then this function + /// would generate a series of basic blocks as follows: + /// + /// Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk + /// + /// where `break_blk` is the block specified in `Loop 23` as the target for + /// breaks. The return value would be the first basic block in that sequence + /// (`Cleanup(AST 24)`). The caller could then branch to `Cleanup(AST 24)` + /// and it will perform all cleanups and finally branch to the `break_blk`. + fn trans_cleanups_to_exit_scope(&'blk self, + label: EarlyExitLabel) + -> BasicBlockRef { + debug!("trans_cleanups_to_exit_scope label={:?} scopes={}", + label, self.scopes_len()); + + let orig_scopes_len = self.scopes_len(); + let mut prev_llbb; + let mut popped_scopes = vec![]; + let mut skip = 0; + + // First we pop off all the cleanup stacks that are + // traversed until the exit is reached, pushing them + // onto the side vector `popped_scopes`. No code is + // generated at this time. + // + // So, continuing the example from above, we would wind up + // with a `popped_scopes` vector of `[AST 24, Custom 2]`. + // (Presuming that there are no cached exits) + loop { + if self.scopes_len() == 0 { + match label { + UnwindExit(val) => { + // Generate a block that will resume unwinding to the + // calling function + let bcx = self.new_block("resume"); + match val { + UnwindKind::LandingPad => { + let addr = self.landingpad_alloca.get() + .unwrap(); + let lp = build::Load(bcx, addr); + base::call_lifetime_end(bcx, addr); + base::trans_unwind_resume(bcx, lp); + } + UnwindKind::CleanupPad(_) => { + let pad = build::CleanupPad(bcx, None, &[]); + build::CleanupRet(bcx, pad, None); + } + } + prev_llbb = bcx.llbb; + break; + } + } + } + + // Pop off the scope, since we may be generating + // unwinding code for it. + let top_scope = self.pop_scope(); + let cached_exit = top_scope.cached_early_exit(label); + popped_scopes.push(top_scope); + + // Check if we have already cached the unwinding of this + // scope for this label. If so, we can stop popping scopes + // and branch to the cached label, since it contains the + // cleanups for any subsequent scopes. + if let Some((exit, last_cleanup)) = cached_exit { + prev_llbb = exit; + skip = last_cleanup; + break; + } + } + + debug!("trans_cleanups_to_exit_scope: popped {} scopes", + popped_scopes.len()); + + // Now push the popped scopes back on. As we go, + // we track in `prev_llbb` the exit to which this scope + // should branch when it's done. + // + // So, continuing with our example, we will start out with + // `prev_llbb` being set to `break_blk` (or possibly a cached + // early exit). We will then pop the scopes from `popped_scopes` + // and generate a basic block for each one, prepending it in the + // series and updating `prev_llbb`. So we begin by popping `Custom 2` + // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)` + // branch to `prev_llbb == break_blk`, giving us a sequence like: + // + // Cleanup(Custom 2) -> prev_llbb + // + // We then pop `AST 24` and repeat the process, giving us the sequence: + // + // Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb + // + // At this point, `popped_scopes` is empty, and so the final block + // that we return to the user is `Cleanup(AST 24)`. + while let Some(mut scope) = popped_scopes.pop() { + if !scope.cleanups.is_empty() { + let name = scope.block_name("clean"); + debug!("generating cleanups for {}", name); + + let bcx_in = self.new_block(&name[..]); + let exit_label = label.start(bcx_in); + let mut bcx_out = bcx_in; + let len = scope.cleanups.len(); + for cleanup in scope.cleanups.iter().rev().take(len - skip) { + bcx_out = cleanup.trans(bcx_out, scope.debug_loc); + } + skip = 0; + exit_label.branch(bcx_out, prev_llbb); + prev_llbb = bcx_in.llbb; + + scope.add_cached_early_exit(exit_label, prev_llbb, len); + } + self.push_scope(scope); + } + + debug!("trans_cleanups_to_exit_scope: prev_llbb={:?}", prev_llbb); + + assert_eq!(self.scopes_len(), orig_scopes_len); + prev_llbb + } + + /// Creates a landing pad for the top scope, if one does not exist. The + /// landing pad will perform all cleanups necessary for an unwind and then + /// `resume` to continue error propagation: + /// + /// landing_pad -> ... cleanups ... -> [resume] + /// + /// (The cleanups and resume instruction are created by + /// `trans_cleanups_to_exit_scope()`, not in this function itself.) + fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef { + let pad_bcx; + + debug!("get_or_create_landing_pad"); + + // Check if a landing pad block exists; if not, create one. + { + let mut scopes = self.scopes.borrow_mut(); + let last_scope = scopes.last_mut().unwrap(); + match last_scope.cached_landing_pad { + Some(llbb) => return llbb, + None => { + let name = last_scope.block_name("unwind"); + pad_bcx = self.new_block(&name[..]); + last_scope.cached_landing_pad = Some(pad_bcx.llbb); + } + } + }; + + let llpersonality = pad_bcx.fcx.eh_personality(); + + let val = if base::wants_msvc_seh(self.ccx.sess()) { + // A cleanup pad requires a personality function to be specified, so + // we do that here explicitly (happens implicitly below through + // creation of the landingpad instruction). We then create a + // cleanuppad instruction which has no filters to run cleanup on all + // exceptions. + build::SetPersonalityFn(pad_bcx, llpersonality); + let llretval = build::CleanupPad(pad_bcx, None, &[]); + UnwindKind::CleanupPad(llretval) + } else { + // The landing pad return type (the type being propagated). Not sure + // what this represents but it's determined by the personality + // function and this is what the EH proposal example uses. + let llretty = Type::struct_(self.ccx, + &[Type::i8p(self.ccx), Type::i32(self.ccx)], + false); + + // The only landing pad clause will be 'cleanup' + let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1); + + // The landing pad block is a cleanup + build::SetCleanup(pad_bcx, llretval); + + let addr = match self.landingpad_alloca.get() { + Some(addr) => addr, + None => { + let addr = base::alloca(pad_bcx, common::val_ty(llretval), + ""); + base::call_lifetime_start(pad_bcx, addr); + self.landingpad_alloca.set(Some(addr)); + addr + } + }; + build::Store(pad_bcx, llretval, addr); + UnwindKind::LandingPad + }; + + // Generate the cleanup block and branch to it. + let label = UnwindExit(val); + let cleanup_llbb = self.trans_cleanups_to_exit_scope(label); + label.branch(pad_bcx, cleanup_llbb); + + return pad_bcx.llbb; + } +} + +impl<'tcx> CleanupScope<'tcx> { + fn new(debug_loc: DebugLoc) -> CleanupScope<'tcx> { + CleanupScope { + debug_loc: debug_loc, + cleanups: vec![], + cached_early_exits: vec![], + cached_landing_pad: None, + } + } + + fn cached_early_exit(&self, + label: EarlyExitLabel) + -> Option<(BasicBlockRef, usize)> { + self.cached_early_exits.iter().rev(). + find(|e| e.label == label). + map(|e| (e.cleanup_block, e.last_cleanup)) + } + + fn add_cached_early_exit(&mut self, + label: EarlyExitLabel, + blk: BasicBlockRef, + last_cleanup: usize) { + self.cached_early_exits.push( + CachedEarlyExit { label: label, + cleanup_block: blk, + last_cleanup: last_cleanup}); + } + + /// True if this scope has cleanups that need unwinding + fn needs_invoke(&self) -> bool { + self.cached_landing_pad.is_some() || + !self.cleanups.is_empty() + } + + /// Returns a suitable name to use for the basic block that handles this cleanup scope + fn block_name(&self, prefix: &str) -> String { + format!("{}_custom_", prefix) + } +} + +impl EarlyExitLabel { + /// Generates a branch going from `from_bcx` to `to_llbb` where `self` is + /// the exit label attached to the start of `from_bcx`. + /// + /// Transitions from an exit label to other exit labels depend on the type + /// of label. For example with MSVC exceptions unwind exit labels will use + /// the `cleanupret` instruction instead of the `br` instruction. + fn branch(&self, from_bcx: Block, to_llbb: BasicBlockRef) { + if let UnwindExit(UnwindKind::CleanupPad(pad)) = *self { + build::CleanupRet(from_bcx, pad, Some(to_llbb)); + } else { + build::Br(from_bcx, to_llbb, DebugLoc::None); + } + } + + /// Generates the necessary instructions at the start of `bcx` to prepare + /// for the same kind of early exit label that `self` is. + /// + /// This function will appropriately configure `bcx` based on the kind of + /// label this is. For UnwindExit labels, the `lpad` field of the block will + /// be set to `Some`, and for MSVC exceptions this function will generate a + /// `cleanuppad` instruction at the start of the block so it may be jumped + /// to in the future (e.g. so this block can be cached as an early exit). + /// + /// Returns a new label which will can be used to cache `bcx` in the list of + /// early exits. + fn start(&self, bcx: Block) -> EarlyExitLabel { + match *self { + UnwindExit(UnwindKind::CleanupPad(..)) => { + let pad = build::CleanupPad(bcx, None, &[]); + bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::msvc(pad)))); + UnwindExit(UnwindKind::CleanupPad(pad)) + } + UnwindExit(UnwindKind::LandingPad) => { + bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::gnu()))); + *self + } + } + } +} + +impl PartialEq for UnwindKind { + fn eq(&self, val: &UnwindKind) -> bool { + match (*self, *val) { + (UnwindKind::LandingPad, UnwindKind::LandingPad) | + (UnwindKind::CleanupPad(..), UnwindKind::CleanupPad(..)) => true, + _ => false, + } + } +} + +/////////////////////////////////////////////////////////////////////////// +// Cleanup types + +#[derive(Copy, Clone)] +pub struct DropValue<'tcx> { + is_immediate: bool, + val: ValueRef, + ty: Ty<'tcx>, + skip_dtor: bool, +} + +impl<'tcx> DropValue<'tcx> { + fn trans<'blk>(&self, + bcx: Block<'blk, 'tcx>, + debug_loc: DebugLoc) + -> Block<'blk, 'tcx> { + let skip_dtor = self.skip_dtor; + let _icx = if skip_dtor { + base::push_ctxt("::trans skip_dtor=true") + } else { + base::push_ctxt("::trans skip_dtor=false") + }; + let bcx = if self.is_immediate { + glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor) + } else { + glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor) + }; + bcx + } +} diff --git a/src/librustc_trans/collector.rs b/src/librustc_trans/collector.rs new file mode 100644 index 0000000000000..087fe4decbf1d --- /dev/null +++ b/src/librustc_trans/collector.rs @@ -0,0 +1,1283 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Translation Item Collection +//! =========================== +//! +//! This module is responsible for discovering all items that will contribute to +//! to code generation of the crate. The important part here is that it not only +//! needs to find syntax-level items (functions, structs, etc) but also all +//! their monomorphized instantiations. Every non-generic, non-const function +//! maps to one LLVM artifact. Every generic function can produce +//! from zero to N artifacts, depending on the sets of type arguments it +//! is instantiated with. +//! This also applies to generic items from other crates: A generic definition +//! in crate X might produce monomorphizations that are compiled into crate Y. +//! We also have to collect these here. +//! +//! The following kinds of "translation items" are handled here: +//! +//! - Functions +//! - Methods +//! - Closures +//! - Statics +//! - Drop glue +//! +//! The following things also result in LLVM artifacts, but are not collected +//! here, since we instantiate them locally on demand when needed in a given +//! codegen unit: +//! +//! - Constants +//! - Vtables +//! - Object Shims +//! +//! +//! General Algorithm +//! ----------------- +//! Let's define some terms first: +//! +//! - A "translation item" is something that results in a function or global in +//! the LLVM IR of a codegen unit. Translation items do not stand on their +//! own, they can reference other translation items. For example, if function +//! `foo()` calls function `bar()` then the translation item for `foo()` +//! references the translation item for function `bar()`. In general, the +//! definition for translation item A referencing a translation item B is that +//! the LLVM artifact produced for A references the LLVM artifact produced +//! for B. +//! +//! - Translation items and the references between them for a directed graph, +//! where the translation items are the nodes and references form the edges. +//! Let's call this graph the "translation item graph". +//! +//! - The translation item graph for a program contains all translation items +//! that are needed in order to produce the complete LLVM IR of the program. +//! +//! The purpose of the algorithm implemented in this module is to build the +//! translation item graph for the current crate. It runs in two phases: +//! +//! 1. Discover the roots of the graph by traversing the HIR of the crate. +//! 2. Starting from the roots, find neighboring nodes by inspecting the MIR +//! representation of the item corresponding to a given node, until no more +//! new nodes are found. +//! +//! ### Discovering roots +//! +//! The roots of the translation item graph correspond to the non-generic +//! syntactic items in the source code. We find them by walking the HIR of the +//! crate, and whenever we hit upon a function, method, or static item, we +//! create a translation item consisting of the items DefId and, since we only +//! consider non-generic items, an empty type-substitution set. +//! +//! ### Finding neighbor nodes +//! Given a translation item node, we can discover neighbors by inspecting its +//! MIR. We walk the MIR and any time we hit upon something that signifies a +//! reference to another translation item, we have found a neighbor. Since the +//! translation item we are currently at is always monomorphic, we also know the +//! concrete type arguments of its neighbors, and so all neighbors again will be +//! monomorphic. The specific forms a reference to a neighboring node can take +//! in MIR are quite diverse. Here is an overview: +//! +//! #### Calling Functions/Methods +//! The most obvious form of one translation item referencing another is a +//! function or method call (represented by a CALL terminator in MIR). But +//! calls are not the only thing that might introduce a reference between two +//! function translation items, and as we will see below, they are just a +//! specialized of the form described next, and consequently will don't get any +//! special treatment in the algorithm. +//! +//! #### Taking a reference to a function or method +//! A function does not need to actually be called in order to be a neighbor of +//! another function. It suffices to just take a reference in order to introduce +//! an edge. Consider the following example: +//! +//! ```rust +//! fn print_val(x: T) { +//! println!("{}", x); +//! } +//! +//! fn call_fn(f: &Fn(i32), x: i32) { +//! f(x); +//! } +//! +//! fn main() { +//! let print_i32 = print_val::; +//! call_fn(&print_i32, 0); +//! } +//! ``` +//! The MIR of none of these functions will contain an explicit call to +//! `print_val::`. Nonetheless, in order to translate this program, we need +//! an instance of this function. Thus, whenever we encounter a function or +//! method in operand position, we treat it as a neighbor of the current +//! translation item. Calls are just a special case of that. +//! +//! #### Closures +//! In a way, closures are a simple case. Since every closure object needs to be +//! constructed somewhere, we can reliably discover them by observing +//! `RValue::Aggregate` expressions with `AggregateKind::Closure`. This is also +//! true for closures inlined from other crates. +//! +//! #### Drop glue +//! Drop glue translation items are introduced by MIR drop-statements. The +//! generated translation item will again have drop-glue item neighbors if the +//! type to be dropped contains nested values that also need to be dropped. It +//! might also have a function item neighbor for the explicit `Drop::drop` +//! implementation of its type. +//! +//! #### Unsizing Casts +//! A subtle way of introducing neighbor edges is by casting to a trait object. +//! Since the resulting fat-pointer contains a reference to a vtable, we need to +//! instantiate all object-save methods of the trait, as we need to store +//! pointers to these functions even if they never get called anywhere. This can +//! be seen as a special case of taking a function reference. +//! +//! #### Boxes +//! Since `Box` expression have special compiler support, no explicit calls to +//! `exchange_malloc()` and `exchange_free()` may show up in MIR, even if the +//! compiler will generate them. We have to observe `Rvalue::Box` expressions +//! and Box-typed drop-statements for that purpose. +//! +//! +//! Interaction with Cross-Crate Inlining +//! ------------------------------------- +//! The binary of a crate will not only contain machine code for the items +//! defined in the source code of that crate. It will also contain monomorphic +//! instantiations of any extern generic functions and of functions marked with +//! #[inline]. +//! The collection algorithm handles this more or less transparently. If it is +//! about to create a translation item for something with an external `DefId`, +//! it will take a look if the MIR for that item is available, and if so just +//! proceed normally. If the MIR is not available, it assumes that the item is +//! just linked to and no node is created; which is exactly what we want, since +//! no machine code should be generated in the current crate for such an item. +//! +//! Eager and Lazy Collection Mode +//! ------------------------------ +//! Translation item collection can be performed in one of two modes: +//! +//! - Lazy mode means that items will only be instantiated when actually +//! referenced. The goal is to produce the least amount of machine code +//! possible. +//! +//! - Eager mode is meant to be used in conjunction with incremental compilation +//! where a stable set of translation items is more important than a minimal +//! one. Thus, eager mode will instantiate drop-glue for every drop-able type +//! in the crate, even of no drop call for that type exists (yet). It will +//! also instantiate default implementations of trait methods, something that +//! otherwise is only done on demand. +//! +//! +//! Open Issues +//! ----------- +//! Some things are not yet fully implemented in the current version of this +//! module. +//! +//! ### Initializers of Constants and Statics +//! Since no MIR is constructed yet for initializer expressions of constants and +//! statics we cannot inspect these properly. +//! +//! ### Const Fns +//! Ideally, no translation item should be generated for const fns unless there +//! is a call to them that cannot be evaluated at compile time. At the moment +//! this is not implemented however: a translation item will be produced +//! regardless of whether it is actually needed or not. + +use rustc::hir; +use rustc::hir::itemlikevisit::ItemLikeVisitor; + +use rustc::hir::map as hir_map; +use rustc::hir::def_id::DefId; +use rustc::middle::lang_items::{ExchangeFreeFnLangItem, ExchangeMallocFnLangItem}; +use rustc::traits; +use rustc::ty::subst::{Substs, Subst}; +use rustc::ty::{self, TypeFoldable, TyCtxt}; +use rustc::ty::adjustment::CustomCoerceUnsized; +use rustc::mir::{self, Location}; +use rustc::mir::visit as mir_visit; +use rustc::mir::visit::Visitor as MirVisitor; + +use rustc_const_eval as const_eval; + +use syntax::abi::Abi; +use syntax_pos::DUMMY_SP; +use base::custom_coerce_unsize_info; +use context::SharedCrateContext; +use common::{fulfill_obligation, type_is_sized}; +use glue::{self, DropGlueKind}; +use monomorphize::{self, Instance}; +use util::nodemap::{FxHashSet, FxHashMap, DefIdMap}; + +use trans_item::{TransItem, DefPathBasedNames}; + +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] +pub enum TransItemCollectionMode { + Eager, + Lazy +} + +/// Maps every translation item to all translation items it references in its +/// body. +pub struct InliningMap<'tcx> { + // Maps a source translation item to a range of target translation items + // that are potentially inlined by LLVM into the source. + // The two numbers in the tuple are the start (inclusive) and + // end index (exclusive) within the `targets` vecs. + index: FxHashMap, (usize, usize)>, + targets: Vec>, +} + +impl<'tcx> InliningMap<'tcx> { + + fn new() -> InliningMap<'tcx> { + InliningMap { + index: FxHashMap(), + targets: Vec::new(), + } + } + + fn record_inlining_canditates(&mut self, + source: TransItem<'tcx>, + targets: I) + where I: Iterator> + { + assert!(!self.index.contains_key(&source)); + + let start_index = self.targets.len(); + self.targets.extend(targets); + let end_index = self.targets.len(); + self.index.insert(source, (start_index, end_index)); + } + + // Internally iterate over all items referenced by `source` which will be + // made available for inlining. + pub fn with_inlining_candidates(&self, source: TransItem<'tcx>, mut f: F) + where F: FnMut(TransItem<'tcx>) { + if let Some(&(start_index, end_index)) = self.index.get(&source) + { + for candidate in &self.targets[start_index .. end_index] { + f(*candidate) + } + } + } +} + +pub fn collect_crate_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, + mode: TransItemCollectionMode) + -> (FxHashSet>, + InliningMap<'tcx>) { + // We are not tracking dependencies of this pass as it has to be re-executed + // every time no matter what. + scx.tcx().dep_graph.with_ignore(|| { + let roots = collect_roots(scx, mode); + + debug!("Building translation item graph, beginning at roots"); + let mut visited = FxHashSet(); + let mut recursion_depths = DefIdMap(); + let mut inlining_map = InliningMap::new(); + + for root in roots { + collect_items_rec(scx, + root, + &mut visited, + &mut recursion_depths, + &mut inlining_map); + } + + (visited, inlining_map) + }) +} + +// Find all non-generic items by walking the HIR. These items serve as roots to +// start monomorphizing from. +fn collect_roots<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, + mode: TransItemCollectionMode) + -> Vec> { + debug!("Collecting roots"); + let mut roots = Vec::new(); + + { + let mut visitor = RootCollector { + scx: scx, + mode: mode, + output: &mut roots, + }; + + scx.tcx().map.krate().visit_all_item_likes(&mut visitor); + } + + roots +} + +// Collect all monomorphized translation items reachable from `starting_point` +fn collect_items_rec<'a, 'tcx: 'a>(scx: &SharedCrateContext<'a, 'tcx>, + starting_point: TransItem<'tcx>, + visited: &mut FxHashSet>, + recursion_depths: &mut DefIdMap, + inlining_map: &mut InliningMap<'tcx>) { + if !visited.insert(starting_point.clone()) { + // We've been here already, no need to search again. + return; + } + debug!("BEGIN collect_items_rec({})", starting_point.to_string(scx.tcx())); + + let mut neighbors = Vec::new(); + let recursion_depth_reset; + + match starting_point { + TransItem::DropGlue(t) => { + find_drop_glue_neighbors(scx, t, &mut neighbors); + recursion_depth_reset = None; + } + TransItem::Static(node_id) => { + let def_id = scx.tcx().map.local_def_id(node_id); + let ty = scx.tcx().item_type(def_id); + let ty = glue::get_drop_glue_type(scx.tcx(), ty); + neighbors.push(TransItem::DropGlue(DropGlueKind::Ty(ty))); + + recursion_depth_reset = None; + + // Scan the MIR in order to find function calls, closures, and + // drop-glue + let mir = scx.tcx().item_mir(def_id); + + let empty_substs = scx.empty_substs_for_def_id(def_id); + let visitor = MirNeighborCollector { + scx: scx, + mir: &mir, + output: &mut neighbors, + param_substs: empty_substs + }; + + visit_mir_and_promoted(visitor, &mir); + } + TransItem::Fn(instance) => { + // Keep track of the monomorphization recursion depth + recursion_depth_reset = Some(check_recursion_limit(scx.tcx(), + instance, + recursion_depths)); + check_type_length_limit(scx.tcx(), instance); + + // Scan the MIR in order to find function calls, closures, and + // drop-glue + let mir = scx.tcx().item_mir(instance.def); + + let visitor = MirNeighborCollector { + scx: scx, + mir: &mir, + output: &mut neighbors, + param_substs: instance.substs + }; + + visit_mir_and_promoted(visitor, &mir); + } + } + + record_inlining_canditates(scx.tcx(), starting_point, &neighbors[..], inlining_map); + + for neighbour in neighbors { + collect_items_rec(scx, neighbour, visited, recursion_depths, inlining_map); + } + + if let Some((def_id, depth)) = recursion_depth_reset { + recursion_depths.insert(def_id, depth); + } + + debug!("END collect_items_rec({})", starting_point.to_string(scx.tcx())); +} + +fn record_inlining_canditates<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + caller: TransItem<'tcx>, + callees: &[TransItem<'tcx>], + inlining_map: &mut InliningMap<'tcx>) { + let is_inlining_candidate = |trans_item: &TransItem<'tcx>| { + trans_item.needs_local_copy(tcx) + }; + + let inlining_candidates = callees.into_iter() + .map(|x| *x) + .filter(is_inlining_candidate); + + inlining_map.record_inlining_canditates(caller, inlining_candidates); +} + +fn check_recursion_limit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + instance: Instance<'tcx>, + recursion_depths: &mut DefIdMap) + -> (DefId, usize) { + let recursion_depth = recursion_depths.get(&instance.def) + .map(|x| *x) + .unwrap_or(0); + debug!(" => recursion depth={}", recursion_depth); + + // Code that needs to instantiate the same function recursively + // more than the recursion limit is assumed to be causing an + // infinite expansion. + if recursion_depth > tcx.sess.recursion_limit.get() { + let error = format!("reached the recursion limit while instantiating `{}`", + instance); + if let Some(node_id) = tcx.map.as_local_node_id(instance.def) { + tcx.sess.span_fatal(tcx.map.span(node_id), &error); + } else { + tcx.sess.fatal(&error); + } + } + + recursion_depths.insert(instance.def, recursion_depth + 1); + + (instance.def, recursion_depth) +} + +fn check_type_length_limit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + instance: Instance<'tcx>) +{ + let type_length = instance.substs.types().flat_map(|ty| ty.walk()).count(); + debug!(" => type length={}", type_length); + + // Rust code can easily create exponentially-long types using only a + // polynomial recursion depth. Even with the default recursion + // depth, you can easily get cases that take >2^60 steps to run, + // which means that rustc basically hangs. + // + // Bail out in these cases to avoid that bad user experience. + let type_length_limit = tcx.sess.type_length_limit.get(); + if type_length > type_length_limit { + // The instance name is already known to be too long for rustc. Use + // `{:.64}` to avoid blasting the user's terminal with thousands of + // lines of type-name. + let instance_name = instance.to_string(); + let msg = format!("reached the type-length limit while instantiating `{:.64}...`", + instance_name); + let mut diag = if let Some(node_id) = tcx.map.as_local_node_id(instance.def) { + tcx.sess.struct_span_fatal(tcx.map.span(node_id), &msg) + } else { + tcx.sess.struct_fatal(&msg) + }; + + diag.note(&format!( + "consider adding a `#![type_length_limit=\"{}\"]` attribute to your crate", + type_length_limit*2)); + diag.emit(); + tcx.sess.abort_if_errors(); + } +} + +struct MirNeighborCollector<'a, 'tcx: 'a> { + scx: &'a SharedCrateContext<'a, 'tcx>, + mir: &'a mir::Mir<'tcx>, + output: &'a mut Vec>, + param_substs: &'tcx Substs<'tcx> +} + +impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { + + fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: Location) { + debug!("visiting rvalue {:?}", *rvalue); + + match *rvalue { + // When doing an cast from a regular pointer to a fat pointer, we + // have to instantiate all methods of the trait being cast to, so we + // can build the appropriate vtable. + mir::Rvalue::Cast(mir::CastKind::Unsize, ref operand, target_ty) => { + let target_ty = monomorphize::apply_param_substs(self.scx, + self.param_substs, + &target_ty); + let source_ty = operand.ty(self.mir, self.scx.tcx()); + let source_ty = monomorphize::apply_param_substs(self.scx, + self.param_substs, + &source_ty); + let (source_ty, target_ty) = find_vtable_types_for_unsizing(self.scx, + source_ty, + target_ty); + // This could also be a different Unsize instruction, like + // from a fixed sized array to a slice. But we are only + // interested in things that produce a vtable. + if target_ty.is_trait() && !source_ty.is_trait() { + create_trans_items_for_vtable_methods(self.scx, + target_ty, + source_ty, + self.output); + } + } + mir::Rvalue::Box(..) => { + let exchange_malloc_fn_def_id = + self.scx + .tcx() + .lang_items + .require(ExchangeMallocFnLangItem) + .unwrap_or_else(|e| self.scx.sess().fatal(&e)); + + assert!(can_have_local_instance(self.scx.tcx(), exchange_malloc_fn_def_id)); + let empty_substs = self.scx.empty_substs_for_def_id(exchange_malloc_fn_def_id); + let exchange_malloc_fn_trans_item = + create_fn_trans_item(self.scx, + exchange_malloc_fn_def_id, + empty_substs, + self.param_substs); + + self.output.push(exchange_malloc_fn_trans_item); + } + _ => { /* not interesting */ } + } + + self.super_rvalue(rvalue, location); + } + + fn visit_lvalue(&mut self, + lvalue: &mir::Lvalue<'tcx>, + context: mir_visit::LvalueContext<'tcx>, + location: Location) { + debug!("visiting lvalue {:?}", *lvalue); + + if let mir_visit::LvalueContext::Drop = context { + let ty = lvalue.ty(self.mir, self.scx.tcx()) + .to_ty(self.scx.tcx()); + + let ty = monomorphize::apply_param_substs(self.scx, + self.param_substs, + &ty); + assert!(ty.is_normalized_for_trans()); + let ty = glue::get_drop_glue_type(self.scx.tcx(), ty); + self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty))); + } + + self.super_lvalue(lvalue, context, location); + } + + fn visit_operand(&mut self, operand: &mir::Operand<'tcx>, location: Location) { + debug!("visiting operand {:?}", *operand); + + let callee = match *operand { + mir::Operand::Constant(ref constant) => { + if let ty::TyFnDef(def_id, substs, _) = constant.ty.sty { + // This is something that can act as a callee, proceed + Some((def_id, substs)) + } else { + // This is not a callee, but we still have to look for + // references to `const` items + if let mir::Literal::Item { def_id, substs } = constant.literal { + let tcx = self.scx.tcx(); + let substs = monomorphize::apply_param_substs(self.scx, + self.param_substs, + &substs); + + // If the constant referred to here is an associated + // item of a trait, we need to resolve it to the actual + // constant in the corresponding impl. Luckily + // const_eval::lookup_const_by_id() does that for us. + if let Some((expr, _)) = const_eval::lookup_const_by_id(tcx, + def_id, + Some(substs)) { + // The hir::Expr we get here is the initializer of + // the constant, what we really want is the item + // DefId. + let const_node_id = tcx.map.get_parent(expr.id); + let def_id = if tcx.map.is_inlined_node_id(const_node_id) { + tcx.sess.cstore.defid_for_inlined_node(const_node_id).unwrap() + } else { + tcx.map.local_def_id(const_node_id) + }; + + collect_const_item_neighbours(self.scx, + def_id, + substs, + self.output); + } + } + + None + } + } + _ => None + }; + + if let Some((callee_def_id, callee_substs)) = callee { + debug!(" => operand is callable"); + + // `callee_def_id` might refer to a trait method instead of a + // concrete implementation, so we have to find the actual + // implementation. For example, the call might look like + // + // std::cmp::partial_cmp(0i32, 1i32) + // + // Calling do_static_dispatch() here will map the def_id of + // `std::cmp::partial_cmp` to the def_id of `i32::partial_cmp` + let dispatched = do_static_dispatch(self.scx, + callee_def_id, + callee_substs, + self.param_substs); + + if let Some((callee_def_id, callee_substs)) = dispatched { + // if we have a concrete impl (which we might not have + // in the case of something compiler generated like an + // object shim or a closure that is handled differently), + // we check if the callee is something that will actually + // result in a translation item ... + if can_result_in_trans_item(self.scx.tcx(), callee_def_id) { + // ... and create one if it does. + let trans_item = create_fn_trans_item(self.scx, + callee_def_id, + callee_substs, + self.param_substs); + self.output.push(trans_item); + } + } + } + + self.super_operand(operand, location); + + fn can_result_in_trans_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId) + -> bool { + match tcx.item_type(def_id).sty { + ty::TyFnDef(def_id, _, f) => { + // Some constructors also have type TyFnDef but they are + // always instantiated inline and don't result in + // translation item. Same for FFI functions. + if let Some(hir_map::NodeForeignItem(_)) = tcx.map.get_if_local(def_id) { + return false; + } + + if let Some(adt_def) = f.sig.output().skip_binder().ty_adt_def() { + if adt_def.variants.iter().any(|v| def_id == v.did) { + return false; + } + } + } + ty::TyClosure(..) => {} + _ => return false + } + + can_have_local_instance(tcx, def_id) + } + } + + // This takes care of the "drop_in_place" intrinsic for which we otherwise + // we would not register drop-glues. + fn visit_terminator_kind(&mut self, + block: mir::BasicBlock, + kind: &mir::TerminatorKind<'tcx>, + location: Location) { + let tcx = self.scx.tcx(); + match *kind { + mir::TerminatorKind::Call { + func: mir::Operand::Constant(ref constant), + ref args, + .. + } => { + match constant.ty.sty { + ty::TyFnDef(def_id, _, bare_fn_ty) + if is_drop_in_place_intrinsic(tcx, def_id, bare_fn_ty) => { + let operand_ty = args[0].ty(self.mir, tcx); + if let ty::TyRawPtr(mt) = operand_ty.sty { + let operand_ty = monomorphize::apply_param_substs(self.scx, + self.param_substs, + &mt.ty); + let ty = glue::get_drop_glue_type(tcx, operand_ty); + self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty))); + } else { + bug!("Has the drop_in_place() intrinsic's signature changed?") + } + } + _ => { /* Nothing to do. */ } + } + } + _ => { /* Nothing to do. */ } + } + + self.super_terminator_kind(block, kind, location); + + fn is_drop_in_place_intrinsic<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId, + bare_fn_ty: &ty::BareFnTy<'tcx>) + -> bool { + (bare_fn_ty.abi == Abi::RustIntrinsic || + bare_fn_ty.abi == Abi::PlatformIntrinsic) && + tcx.item_name(def_id) == "drop_in_place" + } + } +} + +fn can_have_local_instance<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId) + -> bool { + // Take a look if we have the definition available. If not, we + // will not emit code for this item in the local crate, and thus + // don't create a translation item for it. + def_id.is_local() || tcx.sess.cstore.is_item_mir_available(def_id) +} + +fn find_drop_glue_neighbors<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, + dg: DropGlueKind<'tcx>, + output: &mut Vec>) { + let ty = match dg { + DropGlueKind::Ty(ty) => ty, + DropGlueKind::TyContents(_) => { + // We already collected the neighbors of this item via the + // DropGlueKind::Ty variant. + return + } + }; + + debug!("find_drop_glue_neighbors: {}", type_to_string(scx.tcx(), ty)); + + // Make sure the exchange_free_fn() lang-item gets translated if + // there is a boxed value. + if let ty::TyBox(_) = ty.sty { + let exchange_free_fn_def_id = scx.tcx() + .lang_items + .require(ExchangeFreeFnLangItem) + .unwrap_or_else(|e| scx.sess().fatal(&e)); + + assert!(can_have_local_instance(scx.tcx(), exchange_free_fn_def_id)); + let fn_substs = scx.empty_substs_for_def_id(exchange_free_fn_def_id); + let exchange_free_fn_trans_item = + create_fn_trans_item(scx, + exchange_free_fn_def_id, + fn_substs, + scx.tcx().intern_substs(&[])); + + output.push(exchange_free_fn_trans_item); + } + + // If the type implements Drop, also add a translation item for the + // monomorphized Drop::drop() implementation. + let destructor_did = match ty.sty { + ty::TyAdt(def, _) => def.destructor(), + _ => None + }; + + if let Some(destructor_did) = destructor_did { + use rustc::ty::ToPolyTraitRef; + + let drop_trait_def_id = scx.tcx() + .lang_items + .drop_trait() + .unwrap(); + + let self_type_substs = scx.tcx().mk_substs_trait(ty, &[]); + + let trait_ref = ty::TraitRef { + def_id: drop_trait_def_id, + substs: self_type_substs, + }.to_poly_trait_ref(); + + let substs = match fulfill_obligation(scx, DUMMY_SP, trait_ref) { + traits::VtableImpl(data) => data.substs, + _ => bug!() + }; + + if can_have_local_instance(scx.tcx(), destructor_did) { + let trans_item = create_fn_trans_item(scx, + destructor_did, + substs, + scx.tcx().intern_substs(&[])); + output.push(trans_item); + } + + // This type has a Drop implementation, we'll need the contents-only + // version of the glue too. + output.push(TransItem::DropGlue(DropGlueKind::TyContents(ty))); + } + + // Finally add the types of nested values + match ty.sty { + ty::TyBool | + ty::TyChar | + ty::TyInt(_) | + ty::TyUint(_) | + ty::TyStr | + ty::TyFloat(_) | + ty::TyRawPtr(_) | + ty::TyRef(..) | + ty::TyFnDef(..) | + ty::TyFnPtr(_) | + ty::TyNever | + ty::TyDynamic(..) => { + /* nothing to do */ + } + ty::TyAdt(adt_def, substs) => { + for field in adt_def.all_fields() { + let field_type = scx.tcx().item_type(field.did); + let field_type = monomorphize::apply_param_substs(scx, + substs, + &field_type); + let field_type = glue::get_drop_glue_type(scx.tcx(), field_type); + + if glue::type_needs_drop(scx.tcx(), field_type) { + output.push(TransItem::DropGlue(DropGlueKind::Ty(field_type))); + } + } + } + ty::TyClosure(def_id, substs) => { + for upvar_ty in substs.upvar_tys(def_id, scx.tcx()) { + let upvar_ty = glue::get_drop_glue_type(scx.tcx(), upvar_ty); + if glue::type_needs_drop(scx.tcx(), upvar_ty) { + output.push(TransItem::DropGlue(DropGlueKind::Ty(upvar_ty))); + } + } + } + ty::TyBox(inner_type) | + ty::TySlice(inner_type) | + ty::TyArray(inner_type, _) => { + let inner_type = glue::get_drop_glue_type(scx.tcx(), inner_type); + if glue::type_needs_drop(scx.tcx(), inner_type) { + output.push(TransItem::DropGlue(DropGlueKind::Ty(inner_type))); + } + } + ty::TyTuple(args) => { + for arg in args { + let arg = glue::get_drop_glue_type(scx.tcx(), arg); + if glue::type_needs_drop(scx.tcx(), arg) { + output.push(TransItem::DropGlue(DropGlueKind::Ty(arg))); + } + } + } + ty::TyProjection(_) | + ty::TyParam(_) | + ty::TyInfer(_) | + ty::TyAnon(..) | + ty::TyError => { + bug!("encountered unexpected type"); + } + } + + +} + +fn do_static_dispatch<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, + fn_def_id: DefId, + fn_substs: &'tcx Substs<'tcx>, + param_substs: &'tcx Substs<'tcx>) + -> Option<(DefId, &'tcx Substs<'tcx>)> { + debug!("do_static_dispatch(fn_def_id={}, fn_substs={:?}, param_substs={:?})", + def_id_to_string(scx.tcx(), fn_def_id), + fn_substs, + param_substs); + + if let Some(trait_def_id) = scx.tcx().trait_of_item(fn_def_id) { + debug!(" => trait method, attempting to find impl"); + do_static_trait_method_dispatch(scx, + &scx.tcx().associated_item(fn_def_id), + trait_def_id, + fn_substs, + param_substs) + } else { + debug!(" => regular function"); + // The function is not part of an impl or trait, no dispatching + // to be done + Some((fn_def_id, fn_substs)) + } +} + +// Given a trait-method and substitution information, find out the actual +// implementation of the trait method. +fn do_static_trait_method_dispatch<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, + trait_method: &ty::AssociatedItem, + trait_id: DefId, + callee_substs: &'tcx Substs<'tcx>, + param_substs: &'tcx Substs<'tcx>) + -> Option<(DefId, &'tcx Substs<'tcx>)> { + let tcx = scx.tcx(); + debug!("do_static_trait_method_dispatch(trait_method={}, \ + trait_id={}, \ + callee_substs={:?}, \ + param_substs={:?}", + def_id_to_string(scx.tcx(), trait_method.def_id), + def_id_to_string(scx.tcx(), trait_id), + callee_substs, + param_substs); + + let rcvr_substs = monomorphize::apply_param_substs(scx, + param_substs, + &callee_substs); + let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs); + let vtbl = fulfill_obligation(scx, DUMMY_SP, ty::Binder(trait_ref)); + + // Now that we know which impl is being used, we can dispatch to + // the actual function: + match vtbl { + traits::VtableImpl(impl_data) => { + Some(traits::find_method(tcx, trait_method.name, rcvr_substs, &impl_data)) + } + traits::VtableClosure(closure_data) => { + Some((closure_data.closure_def_id, closure_data.substs.substs)) + } + // Trait object and function pointer shims are always + // instantiated in-place, and as they are just an ABI-adjusting + // indirect call they do not have any dependencies. + traits::VtableFnPointer(..) | + traits::VtableObject(..) => { + None + } + _ => { + bug!("static call to invalid vtable: {:?}", vtbl) + } + } +} + +/// For given pair of source and target type that occur in an unsizing coercion, +/// this function finds the pair of types that determines the vtable linking +/// them. +/// +/// For example, the source type might be `&SomeStruct` and the target type\ +/// might be `&SomeTrait` in a cast like: +/// +/// let src: &SomeStruct = ...; +/// let target = src as &SomeTrait; +/// +/// Then the output of this function would be (SomeStruct, SomeTrait) since for +/// constructing the `target` fat-pointer we need the vtable for that pair. +/// +/// Things can get more complicated though because there's also the case where +/// the unsized type occurs as a field: +/// +/// ```rust +/// struct ComplexStruct { +/// a: u32, +/// b: f64, +/// c: T +/// } +/// ``` +/// +/// In this case, if `T` is sized, `&ComplexStruct` is a thin pointer. If `T` +/// is unsized, `&SomeStruct` is a fat pointer, and the vtable it points to is +/// for the pair of `T` (which is a trait) and the concrete type that `T` was +/// originally coerced from: +/// +/// let src: &ComplexStruct = ...; +/// let target = src as &ComplexStruct; +/// +/// Again, we want this `find_vtable_types_for_unsizing()` to provide the pair +/// `(SomeStruct, SomeTrait)`. +/// +/// Finally, there is also the case of custom unsizing coercions, e.g. for +/// smart pointers such as `Rc` and `Arc`. +fn find_vtable_types_for_unsizing<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, + source_ty: ty::Ty<'tcx>, + target_ty: ty::Ty<'tcx>) + -> (ty::Ty<'tcx>, ty::Ty<'tcx>) { + match (&source_ty.sty, &target_ty.sty) { + (&ty::TyBox(a), &ty::TyBox(b)) | + (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }), + &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) | + (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }), + &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) | + (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }), + &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => { + let (inner_source, inner_target) = (a, b); + + if !type_is_sized(scx.tcx(), inner_source) { + (inner_source, inner_target) + } else { + scx.tcx().struct_lockstep_tails(inner_source, inner_target) + } + } + + (&ty::TyAdt(source_adt_def, source_substs), + &ty::TyAdt(target_adt_def, target_substs)) => { + assert_eq!(source_adt_def, target_adt_def); + + let kind = custom_coerce_unsize_info(scx, source_ty, target_ty); + + let coerce_index = match kind { + CustomCoerceUnsized::Struct(i) => i + }; + + let source_fields = &source_adt_def.struct_variant().fields; + let target_fields = &target_adt_def.struct_variant().fields; + + assert!(coerce_index < source_fields.len() && + source_fields.len() == target_fields.len()); + + find_vtable_types_for_unsizing(scx, + source_fields[coerce_index].ty(scx.tcx(), + source_substs), + target_fields[coerce_index].ty(scx.tcx(), + target_substs)) + } + _ => bug!("find_vtable_types_for_unsizing: invalid coercion {:?} -> {:?}", + source_ty, + target_ty) + } +} + +fn create_fn_trans_item<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, + def_id: DefId, + fn_substs: &'tcx Substs<'tcx>, + param_substs: &'tcx Substs<'tcx>) + -> TransItem<'tcx> { + let tcx = scx.tcx(); + + debug!("create_fn_trans_item(def_id={}, fn_substs={:?}, param_substs={:?})", + def_id_to_string(tcx, def_id), + fn_substs, + param_substs); + + // We only get here, if fn_def_id either designates a local item or + // an inlineable external item. Non-inlineable external items are + // ignored because we don't want to generate any code for them. + let concrete_substs = monomorphize::apply_param_substs(scx, + param_substs, + &fn_substs); + assert!(concrete_substs.is_normalized_for_trans(), + "concrete_substs not normalized for trans: {:?}", + concrete_substs); + TransItem::Fn(Instance::new(def_id, concrete_substs)) +} + +/// Creates a `TransItem` for each method that is referenced by the vtable for +/// the given trait/impl pair. +fn create_trans_items_for_vtable_methods<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, + trait_ty: ty::Ty<'tcx>, + impl_ty: ty::Ty<'tcx>, + output: &mut Vec>) { + assert!(!trait_ty.needs_subst() && !impl_ty.needs_subst()); + + if let ty::TyDynamic(ref trait_ty, ..) = trait_ty.sty { + if let Some(principal) = trait_ty.principal() { + let poly_trait_ref = principal.with_self_ty(scx.tcx(), impl_ty); + let param_substs = scx.tcx().intern_substs(&[]); + + // Walk all methods of the trait, including those of its supertraits + let methods = traits::get_vtable_methods(scx.tcx(), poly_trait_ref); + let methods = methods.filter_map(|method| method) + .filter_map(|(def_id, substs)| do_static_dispatch(scx, def_id, substs, + param_substs)) + .filter(|&(def_id, _)| can_have_local_instance(scx.tcx(), def_id)) + .map(|(def_id, substs)| create_fn_trans_item(scx, def_id, substs, param_substs)); + output.extend(methods); + } + // Also add the destructor + let dg_type = glue::get_drop_glue_type(scx.tcx(), impl_ty); + output.push(TransItem::DropGlue(DropGlueKind::Ty(dg_type))); + } +} + +//=----------------------------------------------------------------------------- +// Root Collection +//=----------------------------------------------------------------------------- + +struct RootCollector<'b, 'a: 'b, 'tcx: 'a + 'b> { + scx: &'b SharedCrateContext<'a, 'tcx>, + mode: TransItemCollectionMode, + output: &'b mut Vec>, +} + +impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> { + fn visit_item(&mut self, item: &'v hir::Item) { + match item.node { + hir::ItemExternCrate(..) | + hir::ItemUse(..) | + hir::ItemForeignMod(..) | + hir::ItemTy(..) | + hir::ItemDefaultImpl(..) | + hir::ItemTrait(..) | + hir::ItemMod(..) => { + // Nothing to do, just keep recursing... + } + + hir::ItemImpl(..) => { + if self.mode == TransItemCollectionMode::Eager { + create_trans_items_for_default_impls(self.scx, + item, + self.output); + } + } + + hir::ItemEnum(_, ref generics) | + hir::ItemStruct(_, ref generics) | + hir::ItemUnion(_, ref generics) => { + if !generics.is_parameterized() { + if self.mode == TransItemCollectionMode::Eager { + let def_id = self.scx.tcx().map.local_def_id(item.id); + debug!("RootCollector: ADT drop-glue for {}", + def_id_to_string(self.scx.tcx(), def_id)); + + let ty = self.scx.tcx().item_type(def_id); + let ty = glue::get_drop_glue_type(self.scx.tcx(), ty); + self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty))); + } + } + } + hir::ItemStatic(..) => { + debug!("RootCollector: ItemStatic({})", + def_id_to_string(self.scx.tcx(), + self.scx.tcx().map.local_def_id(item.id))); + self.output.push(TransItem::Static(item.id)); + } + hir::ItemConst(..) => { + // const items only generate translation items if they are + // actually used somewhere. Just declaring them is insufficient. + } + hir::ItemFn(.., ref generics, _) => { + if !generics.is_type_parameterized() { + let def_id = self.scx.tcx().map.local_def_id(item.id); + + debug!("RootCollector: ItemFn({})", + def_id_to_string(self.scx.tcx(), def_id)); + + let instance = Instance::mono(self.scx, def_id); + self.output.push(TransItem::Fn(instance)); + } + } + } + } + + fn visit_impl_item(&mut self, ii: &'v hir::ImplItem) { + match ii.node { + hir::ImplItemKind::Method(hir::MethodSig { + ref generics, + .. + }, _) => { + let hir_map = &self.scx.tcx().map; + let parent_node_id = hir_map.get_parent_node(ii.id); + let is_impl_generic = match hir_map.expect_item(parent_node_id) { + &hir::Item { + node: hir::ItemImpl(_, _, ref generics, ..), + .. + } => { + generics.is_type_parameterized() + } + _ => { + bug!() + } + }; + + if !generics.is_type_parameterized() && !is_impl_generic { + let def_id = self.scx.tcx().map.local_def_id(ii.id); + + debug!("RootCollector: MethodImplItem({})", + def_id_to_string(self.scx.tcx(), def_id)); + + let instance = Instance::mono(self.scx, def_id); + self.output.push(TransItem::Fn(instance)); + } + } + _ => { /* Nothing to do here */ } + } + } +} + +fn create_trans_items_for_default_impls<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, + item: &'tcx hir::Item, + output: &mut Vec>) { + let tcx = scx.tcx(); + match item.node { + hir::ItemImpl(_, + _, + ref generics, + .., + ref impl_item_refs) => { + if generics.is_type_parameterized() { + return + } + + let impl_def_id = tcx.map.local_def_id(item.id); + + debug!("create_trans_items_for_default_impls(item={})", + def_id_to_string(tcx, impl_def_id)); + + if let Some(trait_ref) = tcx.impl_trait_ref(impl_def_id) { + let callee_substs = tcx.erase_regions(&trait_ref.substs); + let overridden_methods: FxHashSet<_> = + impl_item_refs.iter() + .map(|iiref| iiref.name) + .collect(); + for method in tcx.provided_trait_methods(trait_ref.def_id) { + if overridden_methods.contains(&method.name) { + continue; + } + + if !tcx.item_generics(method.def_id).types.is_empty() { + continue; + } + + // The substitutions we have are on the impl, so we grab + // the method type from the impl to substitute into. + let impl_substs = Substs::for_item(tcx, impl_def_id, + |_, _| tcx.mk_region(ty::ReErased), + |_, _| tcx.types.err); + let impl_data = traits::VtableImplData { + impl_def_id: impl_def_id, + substs: impl_substs, + nested: vec![] + }; + let (def_id, substs) = traits::find_method(tcx, + method.name, + callee_substs, + &impl_data); + + let predicates = tcx.item_predicates(def_id).predicates + .subst(tcx, substs); + if !traits::normalize_and_test_predicates(tcx, predicates) { + continue; + } + + if can_have_local_instance(tcx, method.def_id) { + let item = create_fn_trans_item(scx, + method.def_id, + callee_substs, + tcx.erase_regions(&substs)); + output.push(item); + } + } + } + } + _ => { + bug!() + } + } +} + +// There are no translation items for constants themselves but their +// initializers might still contain something that produces translation items, +// such as cast that introduce a new vtable. +fn collect_const_item_neighbours<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, + def_id: DefId, + substs: &'tcx Substs<'tcx>, + output: &mut Vec>) +{ + // Scan the MIR in order to find function calls, closures, and + // drop-glue + let mir = scx.tcx().item_mir(def_id); + + let visitor = MirNeighborCollector { + scx: scx, + mir: &mir, + output: output, + param_substs: substs + }; + + visit_mir_and_promoted(visitor, &mir); +} + +fn visit_mir_and_promoted<'tcx, V: MirVisitor<'tcx>>(mut visitor: V, mir: &mir::Mir<'tcx>) { + visitor.visit_mir(&mir); + for promoted in &mir.promoted { + visitor.visit_mir(promoted); + } +} + +fn def_id_to_string<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId) + -> String { + let mut output = String::new(); + let printer = DefPathBasedNames::new(tcx, false, false); + printer.push_def_path(def_id, &mut output); + output +} + +fn type_to_string<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + ty: ty::Ty<'tcx>) + -> String { + let mut output = String::new(); + let printer = DefPathBasedNames::new(tcx, false, false); + printer.push_type_name(ty, &mut output); + output +} diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs new file mode 100644 index 0000000000000..29925d964da25 --- /dev/null +++ b/src/librustc_trans/common.rs @@ -0,0 +1,1106 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_camel_case_types, non_snake_case)] + +//! Code that is useful in various trans modules. + +use session::Session; +use llvm; +use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind}; +use llvm::{True, False, Bool, OperandBundleDef}; +use rustc::hir::def::Def; +use rustc::hir::def_id::DefId; +use rustc::hir::map::DefPathData; +use rustc::infer::TransNormalize; +use rustc::mir::Mir; +use rustc::util::common::MemoizationMap; +use middle::lang_items::LangItem; +use rustc::ty::subst::Substs; +use abi::{Abi, FnType}; +use base; +use build; +use builder::Builder; +use callee::Callee; +use cleanup; +use consts; +use debuginfo::{self, DebugLoc}; +use declare; +use machine; +use monomorphize; +use type_::Type; +use value::Value; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::Layout; +use rustc::traits::{self, SelectionContext, Reveal}; +use rustc::ty::fold::TypeFoldable; +use rustc::hir; + +use arena::TypedArena; +use libc::{c_uint, c_char}; +use std::borrow::Cow; +use std::iter; +use std::ops::Deref; +use std::ffi::CString; +use std::cell::{Cell, RefCell, Ref}; + +use syntax::ast; +use syntax::symbol::{Symbol, InternedString}; +use syntax_pos::{DUMMY_SP, Span}; + +pub use context::{CrateContext, SharedCrateContext}; + +/// Is the type's representation size known at compile time? +pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.is_sized(tcx, &tcx.empty_parameter_environment(), DUMMY_SP) +} + +pub fn type_is_fat_ptr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + match ty.sty { + ty::TyRawPtr(ty::TypeAndMut{ty, ..}) | + ty::TyRef(_, ty::TypeAndMut{ty, ..}) | + ty::TyBox(ty) => { + !type_is_sized(tcx, ty) + } + _ => { + false + } + } +} + +pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { + use machine::llsize_of_alloc; + use type_of::sizing_type_of; + + let tcx = ccx.tcx(); + let simple = ty.is_scalar() || + ty.is_unique() || ty.is_region_ptr() || + ty.is_simd(); + if simple && !type_is_fat_ptr(tcx, ty) { + return true; + } + if !type_is_sized(tcx, ty) { + return false; + } + match ty.sty { + ty::TyAdt(..) | ty::TyTuple(..) | ty::TyArray(..) | ty::TyClosure(..) => { + let llty = sizing_type_of(ccx, ty); + llsize_of_alloc(ccx, llty) <= llsize_of_alloc(ccx, ccx.int_type()) + } + _ => type_is_zero_size(ccx, ty) + } +} + +/// Returns Some([a, b]) if the type has a pair of fields with types a and b. +pub fn type_pair_fields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) + -> Option<[Ty<'tcx>; 2]> { + match ty.sty { + ty::TyAdt(adt, substs) => { + assert_eq!(adt.variants.len(), 1); + let fields = &adt.variants[0].fields; + if fields.len() != 2 { + return None; + } + Some([monomorphize::field_ty(ccx.tcx(), substs, &fields[0]), + monomorphize::field_ty(ccx.tcx(), substs, &fields[1])]) + } + ty::TyClosure(def_id, substs) => { + let mut tys = substs.upvar_tys(def_id, ccx.tcx()); + tys.next().and_then(|first_ty| tys.next().and_then(|second_ty| { + if tys.next().is_some() { + None + } else { + Some([first_ty, second_ty]) + } + })) + } + ty::TyTuple(tys) => { + if tys.len() != 2 { + return None; + } + Some([tys[0], tys[1]]) + } + _ => None + } +} + +/// Returns true if the type is represented as a pair of immediates. +pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) + -> bool { + match *ccx.layout_of(ty) { + Layout::FatPointer { .. } => true, + Layout::Univariant { ref variant, .. } => { + // There must be only 2 fields. + if variant.offsets.len() != 2 { + return false; + } + + match type_pair_fields(ccx, ty) { + Some([a, b]) => { + type_is_immediate(ccx, a) && type_is_immediate(ccx, b) + } + None => false + } + } + _ => false + } +} + +/// Identify types which have size zero at runtime. +pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { + use machine::llsize_of_alloc; + use type_of::sizing_type_of; + let llty = sizing_type_of(ccx, ty); + llsize_of_alloc(ccx, llty) == 0 +} + +/* +* A note on nomenclature of linking: "extern", "foreign", and "upcall". +* +* An "extern" is an LLVM symbol we wind up emitting an undefined external +* reference to. This means "we don't have the thing in this compilation unit, +* please make sure you link it in at runtime". This could be a reference to +* C code found in a C library, or rust code found in a rust crate. +* +* Most "externs" are implicitly declared (automatically) as a result of a +* user declaring an extern _module_ dependency; this causes the rust driver +* to locate an extern crate, scan its compilation metadata, and emit extern +* declarations for any symbols used by the declaring crate. +* +* A "foreign" is an extern that references C (or other non-rust ABI) code. +* There is no metadata to scan for extern references so in these cases either +* a header-digester like bindgen, or manual function prototypes, have to +* serve as declarators. So these are usually given explicitly as prototype +* declarations, in rust code, with ABI attributes on them noting which ABI to +* link via. +* +* An "upcall" is a foreign call generated by the compiler (not corresponding +* to any user-written call in the code) into the runtime library, to perform +* some helper task such as bringing a task to life, allocating memory, etc. +* +*/ + +use Disr; + +/// The concrete version of ty::FieldDef. The name is the field index if +/// the field is numeric. +pub struct Field<'tcx>(pub ast::Name, pub Ty<'tcx>); + +/// The concrete version of ty::VariantDef +pub struct VariantInfo<'tcx> { + pub discr: Disr, + pub fields: Vec> +} + +impl<'a, 'tcx> VariantInfo<'tcx> { + pub fn from_ty(tcx: TyCtxt<'a, 'tcx, 'tcx>, + ty: Ty<'tcx>, + opt_def: Option) + -> Self + { + match ty.sty { + ty::TyAdt(adt, substs) => { + let variant = match opt_def { + None => adt.struct_variant(), + Some(def) => adt.variant_of_def(def) + }; + + VariantInfo { + discr: Disr::from(variant.disr_val), + fields: variant.fields.iter().map(|f| { + Field(f.name, monomorphize::field_ty(tcx, substs, f)) + }).collect() + } + } + + ty::TyTuple(ref v) => { + VariantInfo { + discr: Disr(0), + fields: v.iter().enumerate().map(|(i, &t)| { + Field(Symbol::intern(&i.to_string()), t) + }).collect() + } + } + + _ => { + bug!("cannot get field types from the type {:?}", ty); + } + } + } +} + +pub struct BuilderRef_res { + pub b: BuilderRef, +} + +impl Drop for BuilderRef_res { + fn drop(&mut self) { + unsafe { + llvm::LLVMDisposeBuilder(self.b); + } + } +} + +pub fn BuilderRef_res(b: BuilderRef) -> BuilderRef_res { + BuilderRef_res { + b: b + } +} + +pub fn validate_substs(substs: &Substs) { + assert!(!substs.needs_infer()); +} + +// Function context. Every LLVM function we create will have one of +// these. +pub struct FunctionContext<'a, 'tcx: 'a> { + // The MIR for this function. + pub mir: Option>>, + + // The ValueRef returned from a call to llvm::LLVMAddFunction; the + // address of the first instruction in the sequence of + // instructions for this function that will go in the .text + // section of the executable we're generating. + pub llfn: ValueRef, + + // always an empty parameter-environment NOTE: @jroesch another use of ParamEnv + pub param_env: ty::ParameterEnvironment<'tcx>, + + // A pointer to where to store the return value. If the return type is + // immediate, this points to an alloca in the function. Otherwise, it's a + // pointer to the hidden first parameter of the function. After function + // construction, this should always be Some. + pub llretslotptr: Cell>, + + // These pub elements: "hoisted basic blocks" containing + // administrative activities that have to happen in only one place in + // the function, due to LLVM's quirks. + // A marker for the place where we want to insert the function's static + // allocas, so that LLVM will coalesce them into a single alloca call. + pub alloca_insert_pt: Cell>, + + // When working with landingpad-based exceptions this value is alloca'd and + // later loaded when using the resume instruction. This ends up being + // critical to chaining landing pads and resuing already-translated + // cleanups. + // + // Note that for cleanuppad-based exceptions this is not used. + pub landingpad_alloca: Cell>, + + // Describes the return/argument LLVM types and their ABI handling. + pub fn_ty: FnType, + + // If this function is being monomorphized, this contains the type + // substitutions used. + pub param_substs: &'tcx Substs<'tcx>, + + // The source span and nesting context where this function comes from, for + // error reporting and symbol generation. + pub span: Option, + + // The arena that blocks are allocated from. + pub block_arena: &'a TypedArena>, + + // The arena that landing pads are allocated from. + pub lpad_arena: TypedArena, + + // This function's enclosing crate context. + pub ccx: &'a CrateContext<'a, 'tcx>, + + // Used and maintained by the debuginfo module. + pub debug_context: debuginfo::FunctionDebugContext, + + // Cleanup scopes. + pub scopes: RefCell>>, +} + +impl<'a, 'tcx> FunctionContext<'a, 'tcx> { + pub fn mir(&self) -> Ref<'tcx, Mir<'tcx>> { + self.mir.as_ref().map(Ref::clone).expect("fcx.mir was empty") + } + + pub fn cleanup(&self) { + unsafe { + llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt + .get() + .unwrap()); + } + } + + pub fn new_block(&'a self, + name: &str) + -> Block<'a, 'tcx> { + unsafe { + let name = CString::new(name).unwrap(); + let llbb = llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), + self.llfn, + name.as_ptr()); + BlockS::new(llbb, self) + } + } + + pub fn monomorphize(&self, value: &T) -> T + where T: TransNormalize<'tcx> + { + monomorphize::apply_param_substs(self.ccx.shared(), + self.param_substs, + value) + } + + /// This is the same as `common::type_needs_drop`, except that it + /// may use or update caches within this `FunctionContext`. + pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { + self.ccx.tcx().type_needs_drop_given_env(ty, &self.param_env) + } + + pub fn eh_personality(&self) -> ValueRef { + // The exception handling personality function. + // + // If our compilation unit has the `eh_personality` lang item somewhere + // within it, then we just need to translate that. Otherwise, we're + // building an rlib which will depend on some upstream implementation of + // this function, so we just codegen a generic reference to it. We don't + // specify any of the types for the function, we just make it a symbol + // that LLVM can later use. + // + // Note that MSVC is a little special here in that we don't use the + // `eh_personality` lang item at all. Currently LLVM has support for + // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the + // *name of the personality function* to decide what kind of unwind side + // tables/landing pads to emit. It looks like Dwarf is used by default, + // injecting a dependency on the `_Unwind_Resume` symbol for resuming + // an "exception", but for MSVC we want to force SEH. This means that we + // can't actually have the personality function be our standard + // `rust_eh_personality` function, but rather we wired it up to the + // CRT's custom personality function, which forces LLVM to consider + // landing pads as "landing pads for SEH". + let ccx = self.ccx; + let tcx = ccx.tcx(); + match tcx.lang_items.eh_personality() { + Some(def_id) if !base::wants_msvc_seh(ccx.sess()) => { + Callee::def(ccx, def_id, tcx.intern_substs(&[])).reify(ccx) + } + _ => { + if let Some(llpersonality) = ccx.eh_personality().get() { + return llpersonality + } + let name = if base::wants_msvc_seh(ccx.sess()) { + "__CxxFrameHandler3" + } else { + "rust_eh_personality" + }; + let fty = Type::variadic_func(&[], &Type::i32(ccx)); + let f = declare::declare_cfn(ccx, name, fty); + ccx.eh_personality().set(Some(f)); + f + } + } + } + + // Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined, + // otherwise declares it as an external function. + pub fn eh_unwind_resume(&self) -> Callee<'tcx> { + use attributes; + let ccx = self.ccx; + let tcx = ccx.tcx(); + assert!(ccx.sess().target.target.options.custom_unwind_resume); + if let Some(def_id) = tcx.lang_items.eh_unwind_resume() { + return Callee::def(ccx, def_id, tcx.intern_substs(&[])); + } + + let ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy { + unsafety: hir::Unsafety::Unsafe, + abi: Abi::C, + sig: ty::Binder(ty::FnSig { + inputs: vec![tcx.mk_mut_ptr(tcx.types.u8)], + output: tcx.types.never, + variadic: false + }), + })); + + let unwresume = ccx.eh_unwind_resume(); + if let Some(llfn) = unwresume.get() { + return Callee::ptr(llfn, ty); + } + let llfn = declare::declare_fn(ccx, "rust_eh_unwind_resume", ty); + attributes::unwind(llfn, true); + unwresume.set(Some(llfn)); + Callee::ptr(llfn, ty) + } +} + +// Basic block context. We create a block context for each basic block +// (single-entry, single-exit sequence of instructions) we generate from Rust +// code. Each basic block we generate is attached to a function, typically +// with many basic blocks per function. All the basic blocks attached to a +// function are organized as a directed graph. +pub struct BlockS<'blk, 'tcx: 'blk> { + // The BasicBlockRef returned from a call to + // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic + // block to the function pointed to by llfn. We insert + // instructions into that block by way of this block context. + // The block pointing to this one in the function's digraph. + pub llbb: BasicBlockRef, + pub terminated: Cell, + pub unreachable: Cell, + + // If this block part of a landing pad, then this is `Some` indicating what + // kind of landing pad its in, otherwise this is none. + pub lpad: Cell>, + + // The function context for the function to which this block is + // attached. + pub fcx: &'blk FunctionContext<'blk, 'tcx>, +} + +pub type Block<'blk, 'tcx> = &'blk BlockS<'blk, 'tcx>; + +impl<'blk, 'tcx> BlockS<'blk, 'tcx> { + pub fn new(llbb: BasicBlockRef, + fcx: &'blk FunctionContext<'blk, 'tcx>) + -> Block<'blk, 'tcx> { + fcx.block_arena.alloc(BlockS { + llbb: llbb, + terminated: Cell::new(false), + unreachable: Cell::new(false), + lpad: Cell::new(None), + fcx: fcx + }) + } + + pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> { + self.fcx.ccx + } + pub fn fcx(&self) -> &'blk FunctionContext<'blk, 'tcx> { + self.fcx + } + pub fn tcx(&self) -> TyCtxt<'blk, 'tcx, 'tcx> { + self.fcx.ccx.tcx() + } + pub fn sess(&self) -> &'blk Session { self.fcx.ccx.sess() } + + pub fn lpad(&self) -> Option<&'blk LandingPad> { + self.lpad.get() + } + + pub fn set_lpad_ref(&self, lpad: Option<&'blk LandingPad>) { + // FIXME: use an IVar? + self.lpad.set(lpad); + } + + pub fn set_lpad(&self, lpad: Option) { + self.set_lpad_ref(lpad.map(|p| &*self.fcx().lpad_arena.alloc(p))) + } + + pub fn mir(&self) -> Ref<'tcx, Mir<'tcx>> { + self.fcx.mir() + } + + pub fn name(&self, name: ast::Name) -> String { + name.to_string() + } + + pub fn node_id_to_string(&self, id: ast::NodeId) -> String { + self.tcx().map.node_to_string(id).to_string() + } + + pub fn to_str(&self) -> String { + format!("[block {:p}]", self) + } + + pub fn monomorphize(&self, value: &T) -> T + where T: TransNormalize<'tcx> + { + monomorphize::apply_param_substs(self.fcx.ccx.shared(), + self.fcx.param_substs, + value) + } + + pub fn build(&'blk self) -> BlockAndBuilder<'blk, 'tcx> { + BlockAndBuilder::new(self, OwnedBuilder::new_with_ccx(self.ccx())) + } +} + +pub struct OwnedBuilder<'blk, 'tcx: 'blk> { + builder: Builder<'blk, 'tcx> +} + +impl<'blk, 'tcx> OwnedBuilder<'blk, 'tcx> { + pub fn new_with_ccx(ccx: &'blk CrateContext<'blk, 'tcx>) -> Self { + // Create a fresh builder from the crate context. + let llbuilder = unsafe { + llvm::LLVMCreateBuilderInContext(ccx.llcx()) + }; + OwnedBuilder { + builder: Builder { + llbuilder: llbuilder, + ccx: ccx, + } + } + } +} + +impl<'blk, 'tcx> Drop for OwnedBuilder<'blk, 'tcx> { + fn drop(&mut self) { + unsafe { + llvm::LLVMDisposeBuilder(self.builder.llbuilder); + } + } +} + +pub struct BlockAndBuilder<'blk, 'tcx: 'blk> { + bcx: Block<'blk, 'tcx>, + owned_builder: OwnedBuilder<'blk, 'tcx>, +} + +impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { + pub fn new(bcx: Block<'blk, 'tcx>, owned_builder: OwnedBuilder<'blk, 'tcx>) -> Self { + // Set the builder's position to this block's end. + owned_builder.builder.position_at_end(bcx.llbb); + BlockAndBuilder { + bcx: bcx, + owned_builder: owned_builder, + } + } + + pub fn with_block(&self, f: F) -> R + where F: FnOnce(Block<'blk, 'tcx>) -> R + { + let result = f(self.bcx); + self.position_at_end(self.bcx.llbb); + result + } + + pub fn map_block(self, f: F) -> Self + where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx> + { + let BlockAndBuilder { bcx, owned_builder } = self; + let bcx = f(bcx); + BlockAndBuilder::new(bcx, owned_builder) + } + + pub fn at_start(&self, f: F) -> R + where F: FnOnce(&BlockAndBuilder<'blk, 'tcx>) -> R + { + self.position_at_start(self.bcx.llbb); + let r = f(self); + self.position_at_end(self.bcx.llbb); + r + } + + // Methods delegated to bcx + + pub fn is_unreachable(&self) -> bool { + self.bcx.unreachable.get() + } + + pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> { + self.bcx.ccx() + } + pub fn fcx(&self) -> &'blk FunctionContext<'blk, 'tcx> { + self.bcx.fcx() + } + pub fn tcx(&self) -> TyCtxt<'blk, 'tcx, 'tcx> { + self.bcx.tcx() + } + pub fn sess(&self) -> &'blk Session { + self.bcx.sess() + } + + pub fn llbb(&self) -> BasicBlockRef { + self.bcx.llbb + } + + pub fn mir(&self) -> Ref<'tcx, Mir<'tcx>> { + self.bcx.mir() + } + + pub fn monomorphize(&self, value: &T) -> T + where T: TransNormalize<'tcx> + { + self.bcx.monomorphize(value) + } + + pub fn set_lpad(&self, lpad: Option) { + self.bcx.set_lpad(lpad) + } + + pub fn set_lpad_ref(&self, lpad: Option<&'blk LandingPad>) { + // FIXME: use an IVar? + self.bcx.set_lpad_ref(lpad); + } + + pub fn lpad(&self) -> Option<&'blk LandingPad> { + self.bcx.lpad() + } +} + +impl<'blk, 'tcx> Deref for BlockAndBuilder<'blk, 'tcx> { + type Target = Builder<'blk, 'tcx>; + fn deref(&self) -> &Self::Target { + &self.owned_builder.builder + } +} + +/// A structure representing an active landing pad for the duration of a basic +/// block. +/// +/// Each `Block` may contain an instance of this, indicating whether the block +/// is part of a landing pad or not. This is used to make decision about whether +/// to emit `invoke` instructions (e.g. in a landing pad we don't continue to +/// use `invoke`) and also about various function call metadata. +/// +/// For GNU exceptions (`landingpad` + `resume` instructions) this structure is +/// just a bunch of `None` instances (not too interesting), but for MSVC +/// exceptions (`cleanuppad` + `cleanupret` instructions) this contains data. +/// When inside of a landing pad, each function call in LLVM IR needs to be +/// annotated with which landing pad it's a part of. This is accomplished via +/// the `OperandBundleDef` value created for MSVC landing pads. +pub struct LandingPad { + cleanuppad: Option, + operand: Option, +} + +impl LandingPad { + pub fn gnu() -> LandingPad { + LandingPad { cleanuppad: None, operand: None } + } + + pub fn msvc(cleanuppad: ValueRef) -> LandingPad { + LandingPad { + cleanuppad: Some(cleanuppad), + operand: Some(OperandBundleDef::new("funclet", &[cleanuppad])), + } + } + + pub fn bundle(&self) -> Option<&OperandBundleDef> { + self.operand.as_ref() + } + + pub fn cleanuppad(&self) -> Option { + self.cleanuppad + } +} + +impl Clone for LandingPad { + fn clone(&self) -> LandingPad { + LandingPad { + cleanuppad: self.cleanuppad, + operand: self.cleanuppad.map(|p| { + OperandBundleDef::new("funclet", &[p]) + }), + } + } +} + +pub struct Result<'blk, 'tcx: 'blk> { + pub bcx: Block<'blk, 'tcx>, + pub val: ValueRef +} + +impl<'b, 'tcx> Result<'b, 'tcx> { + pub fn new(bcx: Block<'b, 'tcx>, val: ValueRef) -> Result<'b, 'tcx> { + Result { + bcx: bcx, + val: val, + } + } +} + +pub fn val_ty(v: ValueRef) -> Type { + unsafe { + Type::from_ref(llvm::LLVMTypeOf(v)) + } +} + +// LLVM constant constructors. +pub fn C_null(t: Type) -> ValueRef { + unsafe { + llvm::LLVMConstNull(t.to_ref()) + } +} + +pub fn C_undef(t: Type) -> ValueRef { + unsafe { + llvm::LLVMGetUndef(t.to_ref()) + } +} + +pub fn C_integral(t: Type, u: u64, sign_extend: bool) -> ValueRef { + unsafe { + llvm::LLVMConstInt(t.to_ref(), u, sign_extend as Bool) + } +} + +pub fn C_floating_f64(f: f64, t: Type) -> ValueRef { + unsafe { + llvm::LLVMConstReal(t.to_ref(), f) + } +} + +pub fn C_nil(ccx: &CrateContext) -> ValueRef { + C_struct(ccx, &[], false) +} + +pub fn C_bool(ccx: &CrateContext, val: bool) -> ValueRef { + C_integral(Type::i1(ccx), val as u64, false) +} + +pub fn C_i32(ccx: &CrateContext, i: i32) -> ValueRef { + C_integral(Type::i32(ccx), i as u64, true) +} + +pub fn C_u32(ccx: &CrateContext, i: u32) -> ValueRef { + C_integral(Type::i32(ccx), i as u64, false) +} + +pub fn C_u64(ccx: &CrateContext, i: u64) -> ValueRef { + C_integral(Type::i64(ccx), i, false) +} + +pub fn C_uint(ccx: &CrateContext, i: I) -> ValueRef { + let v = i.as_u64(); + + let bit_size = machine::llbitsize_of_real(ccx, ccx.int_type()); + + if bit_size < 64 { + // make sure it doesn't overflow + assert!(v < (1< i64; } +pub trait AsU64 { fn as_u64(self) -> u64; } + +// FIXME: remove the intptr conversions, because they +// are host-architecture-dependent +impl AsI64 for i64 { fn as_i64(self) -> i64 { self as i64 }} +impl AsI64 for i32 { fn as_i64(self) -> i64 { self as i64 }} +impl AsI64 for isize { fn as_i64(self) -> i64 { self as i64 }} + +impl AsU64 for u64 { fn as_u64(self) -> u64 { self as u64 }} +impl AsU64 for u32 { fn as_u64(self) -> u64 { self as u64 }} +impl AsU64 for usize { fn as_u64(self) -> u64 { self as u64 }} + +pub fn C_u8(ccx: &CrateContext, i: u8) -> ValueRef { + C_integral(Type::i8(ccx), i as u64, false) +} + + +// This is a 'c-like' raw string, which differs from +// our boxed-and-length-annotated strings. +pub fn C_cstr(cx: &CrateContext, s: InternedString, null_terminated: bool) -> ValueRef { + unsafe { + if let Some(&llval) = cx.const_cstr_cache().borrow().get(&s) { + return llval; + } + + let sc = llvm::LLVMConstStringInContext(cx.llcx(), + s.as_ptr() as *const c_char, + s.len() as c_uint, + !null_terminated as Bool); + let sym = cx.generate_local_symbol_name("str"); + let g = declare::define_global(cx, &sym[..], val_ty(sc)).unwrap_or_else(||{ + bug!("symbol `{}` is already defined", sym); + }); + llvm::LLVMSetInitializer(g, sc); + llvm::LLVMSetGlobalConstant(g, True); + llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage); + + cx.const_cstr_cache().borrow_mut().insert(s, g); + g + } +} + +// NB: Do not use `do_spill_noroot` to make this into a constant string, or +// you will be kicked off fast isel. See issue #4352 for an example of this. +pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef { + let len = s.len(); + let cs = consts::ptrcast(C_cstr(cx, s, false), Type::i8p(cx)); + C_named_struct(cx.str_slice_type(), &[cs, C_uint(cx, len)]) +} + +pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef { + C_struct_in_context(cx.llcx(), elts, packed) +} + +pub fn C_struct_in_context(llcx: ContextRef, elts: &[ValueRef], packed: bool) -> ValueRef { + unsafe { + llvm::LLVMConstStructInContext(llcx, + elts.as_ptr(), elts.len() as c_uint, + packed as Bool) + } +} + +pub fn C_named_struct(t: Type, elts: &[ValueRef]) -> ValueRef { + unsafe { + llvm::LLVMConstNamedStruct(t.to_ref(), elts.as_ptr(), elts.len() as c_uint) + } +} + +pub fn C_array(ty: Type, elts: &[ValueRef]) -> ValueRef { + unsafe { + return llvm::LLVMConstArray(ty.to_ref(), elts.as_ptr(), elts.len() as c_uint); + } +} + +pub fn C_vector(elts: &[ValueRef]) -> ValueRef { + unsafe { + return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); + } +} + +pub fn C_bytes(cx: &CrateContext, bytes: &[u8]) -> ValueRef { + C_bytes_in_context(cx.llcx(), bytes) +} + +pub fn C_bytes_in_context(llcx: ContextRef, bytes: &[u8]) -> ValueRef { + unsafe { + let ptr = bytes.as_ptr() as *const c_char; + return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); + } +} + +pub fn const_get_elt(v: ValueRef, us: &[c_uint]) + -> ValueRef { + unsafe { + let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); + + debug!("const_get_elt(v={:?}, us={:?}, r={:?})", + Value(v), us, Value(r)); + + r + } +} + +pub fn const_to_uint(v: ValueRef) -> u64 { + unsafe { + llvm::LLVMConstIntGetZExtValue(v) + } +} + +fn is_const_integral(v: ValueRef) -> bool { + unsafe { + !llvm::LLVMIsAConstantInt(v).is_null() + } +} + +pub fn const_to_opt_int(v: ValueRef) -> Option { + unsafe { + if is_const_integral(v) { + Some(llvm::LLVMConstIntGetSExtValue(v)) + } else { + None + } + } +} + +pub fn const_to_opt_uint(v: ValueRef) -> Option { + unsafe { + if is_const_integral(v) { + Some(llvm::LLVMConstIntGetZExtValue(v)) + } else { + None + } + } +} + +pub fn is_undef(val: ValueRef) -> bool { + unsafe { + llvm::LLVMIsUndef(val) != False + } +} + +#[allow(dead_code)] // potentially useful +pub fn is_null(val: ValueRef) -> bool { + unsafe { + llvm::LLVMIsNull(val) != False + } +} + +/// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we +/// do not (necessarily) resolve all nested obligations on the impl. Note that type check should +/// guarantee to us that all nested obligations *could be* resolved if we wanted to. +pub fn fulfill_obligation<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, + span: Span, + trait_ref: ty::PolyTraitRef<'tcx>) + -> traits::Vtable<'tcx, ()> +{ + let tcx = scx.tcx(); + + // Remove any references to regions; this helps improve caching. + let trait_ref = tcx.erase_regions(&trait_ref); + + scx.trait_cache().memoize(trait_ref, || { + debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})", + trait_ref, trait_ref.def_id()); + + // Do the initial selection for the obligation. This yields the + // shallow result we are looking for -- that is, what specific impl. + tcx.infer_ctxt(None, None, Reveal::All).enter(|infcx| { + let mut selcx = SelectionContext::new(&infcx); + + let obligation_cause = traits::ObligationCause::misc(span, + ast::DUMMY_NODE_ID); + let obligation = traits::Obligation::new(obligation_cause, + trait_ref.to_poly_trait_predicate()); + + let selection = match selcx.select(&obligation) { + Ok(Some(selection)) => selection, + Ok(None) => { + // Ambiguity can happen when monomorphizing during trans + // expands to some humongo type that never occurred + // statically -- this humongo type can then overflow, + // leading to an ambiguous result. So report this as an + // overflow bug, since I believe this is the only case + // where ambiguity can result. + debug!("Encountered ambiguity selecting `{:?}` during trans, \ + presuming due to overflow", + trait_ref); + tcx.sess.span_fatal(span, + "reached the recursion limit during monomorphization \ + (selection ambiguity)"); + } + Err(e) => { + span_bug!(span, "Encountered error `{:?}` selecting `{:?}` during trans", + e, trait_ref) + } + }; + + debug!("fulfill_obligation: selection={:?}", selection); + + // Currently, we use a fulfillment context to completely resolve + // all nested obligations. This is because they can inform the + // inference of the impl's type parameters. + let mut fulfill_cx = traits::FulfillmentContext::new(); + let vtable = selection.map(|predicate| { + debug!("fulfill_obligation: register_predicate_obligation {:?}", predicate); + fulfill_cx.register_predicate_obligation(&infcx, predicate); + }); + let vtable = infcx.drain_fulfillment_cx_or_panic(span, &mut fulfill_cx, &vtable); + + info!("Cache miss: {:?} => {:?}", trait_ref, vtable); + vtable + }) + }) +} + +pub fn langcall(tcx: TyCtxt, + span: Option, + msg: &str, + li: LangItem) + -> DefId { + match tcx.lang_items.require(li) { + Ok(id) => id, + Err(s) => { + let msg = format!("{} {}", msg, s); + match span { + Some(span) => tcx.sess.span_fatal(span, &msg[..]), + None => tcx.sess.fatal(&msg[..]), + } + } + } +} + +// To avoid UB from LLVM, these two functions mask RHS with an +// appropriate mask unconditionally (i.e. the fallback behavior for +// all shifts). For 32- and 64-bit types, this matches the semantics +// of Java. (See related discussion on #1877 and #10183.) + +pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + lhs: ValueRef, + rhs: ValueRef, + binop_debug_loc: DebugLoc) -> ValueRef { + let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShl, lhs, rhs); + // #1877, #10183: Ensure that input is always valid + let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc); + build::Shl(bcx, lhs, rhs, binop_debug_loc) +} + +pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + lhs_t: Ty<'tcx>, + lhs: ValueRef, + rhs: ValueRef, + binop_debug_loc: DebugLoc) -> ValueRef { + let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs); + // #1877, #10183: Ensure that input is always valid + let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc); + let is_signed = lhs_t.is_signed(); + if is_signed { + build::AShr(bcx, lhs, rhs, binop_debug_loc) + } else { + build::LShr(bcx, lhs, rhs, binop_debug_loc) + } +} + +fn shift_mask_rhs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + rhs: ValueRef, + debug_loc: DebugLoc) -> ValueRef { + let rhs_llty = val_ty(rhs); + build::And(bcx, rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false), debug_loc) +} + +pub fn shift_mask_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + llty: Type, + mask_llty: Type, + invert: bool) -> ValueRef { + let kind = llty.kind(); + match kind { + TypeKind::Integer => { + // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. + let val = llty.int_width() - 1; + if invert { + C_integral(mask_llty, !val, true) + } else { + C_integral(mask_llty, val, false) + } + }, + TypeKind::Vector => { + let mask = shift_mask_val(bcx, llty.element_type(), mask_llty.element_type(), invert); + build::VectorSplat(bcx, mask_llty.vector_length(), mask) + }, + _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind), + } +} + +pub fn ty_fn_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + ty: Ty<'tcx>) + -> Cow<'tcx, ty::BareFnTy<'tcx>> +{ + match ty.sty { + ty::TyFnDef(_, _, fty) => Cow::Borrowed(fty), + // Shims currently have type TyFnPtr. Not sure this should remain. + ty::TyFnPtr(fty) => Cow::Borrowed(fty), + ty::TyClosure(def_id, substs) => { + let tcx = ccx.tcx(); + let ty::ClosureTy { unsafety, abi, sig } = tcx.closure_type(def_id, substs); + + let env_region = ty::ReLateBound(ty::DebruijnIndex::new(1), ty::BrEnv); + let env_ty = match tcx.closure_kind(def_id) { + ty::ClosureKind::Fn => tcx.mk_imm_ref(tcx.mk_region(env_region), ty), + ty::ClosureKind::FnMut => tcx.mk_mut_ref(tcx.mk_region(env_region), ty), + ty::ClosureKind::FnOnce => ty, + }; + + let sig = sig.map_bound(|sig| ty::FnSig { + inputs: iter::once(env_ty).chain(sig.inputs).collect(), + ..sig + }); + Cow::Owned(ty::BareFnTy { unsafety: unsafety, abi: abi, sig: sig }) + } + _ => bug!("unexpected type {:?} to ty_fn_sig", ty) + } +} + +pub fn is_closure(tcx: TyCtxt, def_id: DefId) -> bool { + tcx.def_key(def_id).disambiguated_data.data == DefPathData::ClosureExpr +} diff --git a/src/librustc_trans/consts.rs b/src/librustc_trans/consts.rs new file mode 100644 index 0000000000000..4186721c122ac --- /dev/null +++ b/src/librustc_trans/consts.rs @@ -0,0 +1,271 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +use llvm; +use llvm::{SetUnnamedAddr}; +use llvm::{ValueRef, True}; +use rustc_const_eval::ConstEvalErr; +use rustc::hir::def_id::DefId; +use rustc::hir::map as hir_map; +use {debuginfo, machine}; +use base::{self, push_ctxt}; +use trans_item::TransItem; +use common::{CrateContext, val_ty}; +use declare; +use monomorphize::{Instance}; +use type_::Type; +use type_of; +use rustc::ty; + +use rustc::hir; + +use std::ffi::{CStr, CString}; +use syntax::ast; +use syntax::attr; + +pub fn ptrcast(val: ValueRef, ty: Type) -> ValueRef { + unsafe { + llvm::LLVMConstPointerCast(val, ty.to_ref()) + } +} + +pub fn addr_of_mut(ccx: &CrateContext, + cv: ValueRef, + align: machine::llalign, + kind: &str) + -> ValueRef { + unsafe { + let name = ccx.generate_local_symbol_name(kind); + let gv = declare::define_global(ccx, &name[..], val_ty(cv)).unwrap_or_else(||{ + bug!("symbol `{}` is already defined", name); + }); + llvm::LLVMSetInitializer(gv, cv); + llvm::LLVMSetAlignment(gv, align); + llvm::LLVMRustSetLinkage(gv, llvm::Linkage::InternalLinkage); + SetUnnamedAddr(gv, true); + gv + } +} + +pub fn addr_of(ccx: &CrateContext, + cv: ValueRef, + align: machine::llalign, + kind: &str) + -> ValueRef { + if let Some(&gv) = ccx.const_globals().borrow().get(&cv) { + unsafe { + // Upgrade the alignment in cases where the same constant is used with different + // alignment requirements + if align > llvm::LLVMGetAlignment(gv) { + llvm::LLVMSetAlignment(gv, align); + } + } + return gv; + } + let gv = addr_of_mut(ccx, cv, align, kind); + unsafe { + llvm::LLVMSetGlobalConstant(gv, True); + } + ccx.const_globals().borrow_mut().insert(cv, gv); + gv +} + +pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef { + let instance = Instance::mono(ccx.shared(), def_id); + if let Some(&g) = ccx.instances().borrow().get(&instance) { + return g; + } + + let ty = ccx.tcx().item_type(def_id); + let g = if let Some(id) = ccx.tcx().map.as_local_node_id(def_id) { + + let llty = type_of::type_of(ccx, ty); + let (g, attrs) = match ccx.tcx().map.get(id) { + hir_map::NodeItem(&hir::Item { + ref attrs, span, node: hir::ItemStatic(..), .. + }) => { + let sym = ccx.symbol_map() + .get(TransItem::Static(id)) + .expect("Local statics should always be in the SymbolMap"); + // Make sure that this is never executed for something inlined. + assert!(!ccx.tcx().map.is_inlined_node_id(id)); + + let defined_in_current_codegen_unit = ccx.codegen_unit() + .items() + .contains_key(&TransItem::Static(id)); + assert!(!defined_in_current_codegen_unit); + + if declare::get_declared_value(ccx, sym).is_some() { + span_bug!(span, "trans: Conflicting symbol names for static?"); + } + + let g = declare::define_global(ccx, sym, llty).unwrap(); + + (g, attrs) + } + + hir_map::NodeForeignItem(&hir::ForeignItem { + ref attrs, span, node: hir::ForeignItemStatic(..), .. + }) => { + let sym = instance.symbol_name(ccx.shared()); + let g = if let Some(name) = + attr::first_attr_value_str_by_name(&attrs, "linkage") { + // If this is a static with a linkage specified, then we need to handle + // it a little specially. The typesystem prevents things like &T and + // extern "C" fn() from being non-null, so we can't just declare a + // static and call it a day. Some linkages (like weak) will make it such + // that the static actually has a null value. + let linkage = match base::llvm_linkage_by_name(&name.as_str()) { + Some(linkage) => linkage, + None => { + ccx.sess().span_fatal(span, "invalid linkage specified"); + } + }; + let llty2 = match ty.sty { + ty::TyRawPtr(ref mt) => type_of::type_of(ccx, mt.ty), + _ => { + ccx.sess().span_fatal(span, "must have type `*const T` or `*mut T`"); + } + }; + unsafe { + // Declare a symbol `foo` with the desired linkage. + let g1 = declare::declare_global(ccx, &sym, llty2); + llvm::LLVMRustSetLinkage(g1, linkage); + + // Declare an internal global `extern_with_linkage_foo` which + // is initialized with the address of `foo`. If `foo` is + // discarded during linking (for example, if `foo` has weak + // linkage and there are no definitions), then + // `extern_with_linkage_foo` will instead be initialized to + // zero. + let mut real_name = "_rust_extern_with_linkage_".to_string(); + real_name.push_str(&sym); + let g2 = declare::define_global(ccx, &real_name, llty).unwrap_or_else(||{ + ccx.sess().span_fatal(span, + &format!("symbol `{}` is already defined", &sym)) + }); + llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage); + llvm::LLVMSetInitializer(g2, g1); + g2 + } + } else { + // Generate an external declaration. + declare::declare_global(ccx, &sym, llty) + }; + + (g, attrs) + } + + item => bug!("get_static: expected static, found {:?}", item) + }; + + for attr in attrs { + if attr.check_name("thread_local") { + llvm::set_thread_local(g, true); + } + } + + g + } else { + let sym = instance.symbol_name(ccx.shared()); + + // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? + // FIXME(nagisa): investigate whether it can be changed into define_global + let g = declare::declare_global(ccx, &sym, type_of::type_of(ccx, ty)); + // Thread-local statics in some other crate need to *always* be linked + // against in a thread-local fashion, so we need to be sure to apply the + // thread-local attribute locally if it was present remotely. If we + // don't do this then linker errors can be generated where the linker + // complains that one object files has a thread local version of the + // symbol and another one doesn't. + for attr in ccx.tcx().get_attrs(def_id).iter() { + if attr.check_name("thread_local") { + llvm::set_thread_local(g, true); + } + } + if ccx.use_dll_storage_attrs() { + unsafe { + llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); + } + } + g + }; + + ccx.instances().borrow_mut().insert(instance, g); + ccx.statics().borrow_mut().insert(g, def_id); + g +} + +pub fn trans_static(ccx: &CrateContext, + m: hir::Mutability, + id: ast::NodeId, + attrs: &[ast::Attribute]) + -> Result { + unsafe { + let _icx = push_ctxt("trans_static"); + let def_id = ccx.tcx().map.local_def_id(id); + let g = get_static(ccx, def_id); + + let v = ::mir::trans_static_initializer(ccx, def_id)?; + + // boolean SSA values are i1, but they have to be stored in i8 slots, + // otherwise some LLVM optimization passes don't work as expected + let mut val_llty = val_ty(v); + let v = if val_llty == Type::i1(ccx) { + val_llty = Type::i8(ccx); + llvm::LLVMConstZExt(v, val_llty.to_ref()) + } else { + v + }; + + let ty = ccx.tcx().item_type(def_id); + let llty = type_of::type_of(ccx, ty); + let g = if val_llty == llty { + g + } else { + // If we created the global with the wrong type, + // correct the type. + let empty_string = CString::new("").unwrap(); + let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g)); + let name_string = CString::new(name_str_ref.to_bytes()).unwrap(); + llvm::LLVMSetValueName(g, empty_string.as_ptr()); + let new_g = llvm::LLVMRustGetOrInsertGlobal( + ccx.llmod(), name_string.as_ptr(), val_llty.to_ref()); + // To avoid breaking any invariants, we leave around the old + // global for the moment; we'll replace all references to it + // with the new global later. (See base::trans_crate.) + ccx.statics_to_rauw().borrow_mut().push((g, new_g)); + new_g + }; + llvm::LLVMSetAlignment(g, type_of::align_of(ccx, ty)); + llvm::LLVMSetInitializer(g, v); + + // As an optimization, all shared statics which do not have interior + // mutability are placed into read-only memory. + if m != hir::MutMutable { + let tcontents = ty.type_contents(ccx.tcx()); + if !tcontents.interior_unsafe() { + llvm::LLVMSetGlobalConstant(g, llvm::True); + } + } + + debuginfo::create_global_var_metadata(ccx, id, g); + + if attr::contains_name(attrs, + "thread_local") { + llvm::set_thread_local(g, true); + } + + base::set_link_section(ccx, g, attrs); + + Ok(g) + } +} diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs new file mode 100644 index 0000000000000..c0d7c64bd192c --- /dev/null +++ b/src/librustc_trans/context.rs @@ -0,0 +1,1148 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use llvm; +use llvm::{ContextRef, ModuleRef, ValueRef, BuilderRef}; +use rustc::dep_graph::{DepNode, DepTrackingMap, DepTrackingMapConfig, WorkProduct}; +use middle::cstore::LinkMeta; +use rustc::hir::def::ExportMap; +use rustc::hir::def_id::DefId; +use rustc::traits; +use base; +use builder::Builder; +use common::BuilderRef_res; +use debuginfo; +use declare; +use glue::DropGlueKind; +use monomorphize::Instance; + +use partitioning::CodegenUnit; +use trans_item::TransItem; +use type_::Type; +use rustc_data_structures::base_n; +use rustc::ty::subst::Substs; +use rustc::ty::{self, Ty, TyCtxt}; +use session::config::NoDebugInfo; +use session::Session; +use session::config; +use symbol_map::SymbolMap; +use util::nodemap::{NodeSet, DefIdMap, FxHashMap, FxHashSet}; + +use std::ffi::{CStr, CString}; +use std::cell::{Cell, RefCell}; +use std::marker::PhantomData; +use std::ptr; +use std::rc::Rc; +use std::str; +use syntax::ast; +use syntax::symbol::InternedString; +use abi::FnType; + +pub struct Stats { + pub n_glues_created: Cell, + pub n_null_glues: Cell, + pub n_real_glues: Cell, + pub n_fns: Cell, + pub n_inlines: Cell, + pub n_closures: Cell, + pub n_llvm_insns: Cell, + pub llvm_insns: RefCell>, + // (ident, llvm-instructions) + pub fn_stats: RefCell >, +} + +/// The shared portion of a `CrateContext`. There is one `SharedCrateContext` +/// per crate. The data here is shared between all compilation units of the +/// crate, so it must not contain references to any LLVM data structures +/// (aside from metadata-related ones). +pub struct SharedCrateContext<'a, 'tcx: 'a> { + metadata_llmod: ModuleRef, + metadata_llcx: ContextRef, + + export_map: ExportMap, + reachable: NodeSet, + link_meta: LinkMeta, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + stats: Stats, + check_overflow: bool, + + use_dll_storage_attrs: bool, + + translation_items: RefCell>>, + trait_cache: RefCell>>, + project_cache: RefCell>>, +} + +/// The local portion of a `CrateContext`. There is one `LocalCrateContext` +/// per compilation unit. Each one has its own LLVM `ContextRef` so that +/// several compilation units may be optimized in parallel. All other LLVM +/// data structures in the `LocalCrateContext` are tied to that `ContextRef`. +pub struct LocalCrateContext<'tcx> { + llmod: ModuleRef, + llcx: ContextRef, + previous_work_product: Option, + codegen_unit: CodegenUnit<'tcx>, + needs_unwind_cleanup_cache: RefCell, bool>>, + fn_pointer_shims: RefCell, ValueRef>>, + drop_glues: RefCell, (ValueRef, FnType)>>, + /// Cache instances of monomorphic and polymorphic items + instances: RefCell, ValueRef>>, + /// Cache generated vtables + vtables: RefCell, + Option>), ValueRef>>, + /// Cache of constant strings, + const_cstr_cache: RefCell>, + + /// Reverse-direction for const ptrs cast from globals. + /// Key is a ValueRef holding a *T, + /// Val is a ValueRef holding a *[T]. + /// + /// Needed because LLVM loses pointer->pointee association + /// when we ptrcast, and we have to ptrcast during translation + /// of a [T] const because we form a slice, a (*T,usize) pair, not + /// a pointer to an LLVM array type. Similar for trait objects. + const_unsized: RefCell>, + + /// Cache of emitted const globals (value -> global) + const_globals: RefCell>, + + /// Cache of emitted const values + const_values: RefCell), ValueRef>>, + + /// Cache of external const values + extern_const_values: RefCell>, + + /// Mapping from static definitions to their DefId's. + statics: RefCell>, + + impl_method_cache: RefCell>, + + /// Cache of closure wrappers for bare fn's. + closure_bare_wrapper_cache: RefCell>, + + /// List of globals for static variables which need to be passed to the + /// LLVM function ReplaceAllUsesWith (RAUW) when translation is complete. + /// (We have to make sure we don't invalidate any ValueRefs referring + /// to constants.) + statics_to_rauw: RefCell>, + + lltypes: RefCell, Type>>, + llsizingtypes: RefCell, Type>>, + type_hashcodes: RefCell, String>>, + int_type: Type, + opaque_vec_type: Type, + str_slice_type: Type, + builder: BuilderRef_res, + + /// Holds the LLVM values for closure IDs. + closure_vals: RefCell, ValueRef>>, + + dbg_cx: Option>, + + eh_personality: Cell>, + eh_unwind_resume: Cell>, + rust_try_fn: Cell>, + + intrinsics: RefCell>, + + /// Number of LLVM instructions translated into this `LocalCrateContext`. + /// This is used to perform some basic load-balancing to keep all LLVM + /// contexts around the same size. + n_llvm_insns: Cell, + + /// Depth of the current type-of computation - used to bail out + type_of_depth: Cell, + + symbol_map: Rc>, + + /// A counter that is used for generating local symbol names + local_gen_sym_counter: Cell, +} + +// Implement DepTrackingMapConfig for `trait_cache` +pub struct TraitSelectionCache<'tcx> { + data: PhantomData<&'tcx ()> +} + +impl<'tcx> DepTrackingMapConfig for TraitSelectionCache<'tcx> { + type Key = ty::PolyTraitRef<'tcx>; + type Value = traits::Vtable<'tcx, ()>; + fn to_dep_node(key: &ty::PolyTraitRef<'tcx>) -> DepNode { + key.to_poly_trait_predicate().dep_node() + } +} + +// # Global Cache + +pub struct ProjectionCache<'gcx> { + data: PhantomData<&'gcx ()> +} + +impl<'gcx> DepTrackingMapConfig for ProjectionCache<'gcx> { + type Key = Ty<'gcx>; + type Value = Ty<'gcx>; + fn to_dep_node(key: &Self::Key) -> DepNode { + // Ideally, we'd just put `key` into the dep-node, but we + // can't put full types in there. So just collect up all the + // def-ids of structs/enums as well as any traits that we + // project out of. It doesn't matter so much what we do here, + // except that if we are too coarse, we'll create overly + // coarse edges between impls and the trans. For example, if + // we just used the def-id of things we are projecting out of, + // then the key for `::T` and `::T` would both share a dep-node + // (`TraitSelect(SomeTrait)`), and hence the impls for both + // `Foo` and `Bar` would be considered inputs. So a change to + // `Bar` would affect things that just normalized `Foo`. + // Anyway, this heuristic is not ideal, but better than + // nothing. + let def_ids: Vec = + key.walk() + .filter_map(|t| match t.sty { + ty::TyAdt(adt_def, _) => Some(adt_def.did), + ty::TyProjection(ref proj) => Some(proj.trait_ref.def_id), + _ => None, + }) + .collect(); + DepNode::TraitSelect(def_ids) + } +} + +/// This list owns a number of LocalCrateContexts and binds them to their common +/// SharedCrateContext. This type just exists as a convenience, something to +/// pass around all LocalCrateContexts with and get an iterator over them. +pub struct CrateContextList<'a, 'tcx: 'a> { + shared: &'a SharedCrateContext<'a, 'tcx>, + local_ccxs: Vec>, +} + +impl<'a, 'tcx: 'a> CrateContextList<'a, 'tcx> { + pub fn new(shared_ccx: &'a SharedCrateContext<'a, 'tcx>, + codegen_units: Vec>, + previous_work_products: Vec>, + symbol_map: Rc>) + -> CrateContextList<'a, 'tcx> { + CrateContextList { + shared: shared_ccx, + local_ccxs: codegen_units.into_iter().zip(previous_work_products).map(|(cgu, wp)| { + LocalCrateContext::new(shared_ccx, cgu, wp, symbol_map.clone()) + }).collect() + } + } + + /// Iterate over all crate contexts, whether or not they need + /// translation. That is, whether or not a `.o` file is available + /// for re-use from a previous incr. comp.). + pub fn iter_all<'b>(&'b self) -> CrateContextIterator<'b, 'tcx> { + CrateContextIterator { + shared: self.shared, + index: 0, + local_ccxs: &self.local_ccxs[..], + filter_to_previous_work_product_unavail: false, + } + } + + /// Iterator over all CCX that need translation (cannot reuse results from + /// previous incr. comp.). + pub fn iter_need_trans<'b>(&'b self) -> CrateContextIterator<'b, 'tcx> { + CrateContextIterator { + shared: self.shared, + index: 0, + local_ccxs: &self.local_ccxs[..], + filter_to_previous_work_product_unavail: true, + } + } + + pub fn shared(&self) -> &'a SharedCrateContext<'a, 'tcx> { + self.shared + } +} + +/// A CrateContext value binds together one LocalCrateContext with the +/// SharedCrateContext. It exists as a convenience wrapper, so we don't have to +/// pass around (SharedCrateContext, LocalCrateContext) tuples all over trans. +pub struct CrateContext<'a, 'tcx: 'a> { + shared: &'a SharedCrateContext<'a, 'tcx>, + local_ccxs: &'a [LocalCrateContext<'tcx>], + /// The index of `local` in `local_ccxs`. This is used in + /// `maybe_iter(true)` to identify the original `LocalCrateContext`. + index: usize, +} + +pub struct CrateContextIterator<'a, 'tcx: 'a> { + shared: &'a SharedCrateContext<'a, 'tcx>, + local_ccxs: &'a [LocalCrateContext<'tcx>], + index: usize, + + /// if true, only return results where `previous_work_product` is none + filter_to_previous_work_product_unavail: bool, +} + +impl<'a, 'tcx> Iterator for CrateContextIterator<'a,'tcx> { + type Item = CrateContext<'a, 'tcx>; + + fn next(&mut self) -> Option> { + loop { + if self.index >= self.local_ccxs.len() { + return None; + } + + let index = self.index; + self.index += 1; + + let ccx = CrateContext { + shared: self.shared, + index: index, + local_ccxs: self.local_ccxs, + }; + + if + self.filter_to_previous_work_product_unavail && + ccx.previous_work_product().is_some() + { + continue; + } + + return Some(ccx); + } + } +} + +/// The iterator produced by `CrateContext::maybe_iter`. +pub struct CrateContextMaybeIterator<'a, 'tcx: 'a> { + shared: &'a SharedCrateContext<'a, 'tcx>, + local_ccxs: &'a [LocalCrateContext<'tcx>], + index: usize, + single: bool, + origin: usize, +} + +impl<'a, 'tcx> Iterator for CrateContextMaybeIterator<'a, 'tcx> { + type Item = (CrateContext<'a, 'tcx>, bool); + + fn next(&mut self) -> Option<(CrateContext<'a, 'tcx>, bool)> { + if self.index >= self.local_ccxs.len() { + return None; + } + + let index = self.index; + self.index += 1; + if self.single { + self.index = self.local_ccxs.len(); + } + + let ccx = CrateContext { + shared: self.shared, + index: index, + local_ccxs: self.local_ccxs + }; + Some((ccx, index == self.origin)) + } +} + +pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode { + let reloc_model_arg = match sess.opts.cg.relocation_model { + Some(ref s) => &s[..], + None => &sess.target.target.options.relocation_model[..], + }; + + match ::back::write::RELOC_MODEL_ARGS.iter().find( + |&&arg| arg.0 == reloc_model_arg) { + Some(x) => x.1, + _ => { + sess.err(&format!("{:?} is not a valid relocation mode", + sess.opts + .cg + .code_model)); + sess.abort_if_errors(); + bug!(); + } + } +} + +fn is_any_library(sess: &Session) -> bool { + sess.crate_types.borrow().iter().any(|ty| { + *ty != config::CrateTypeExecutable + }) +} + +pub fn is_pie_binary(sess: &Session) -> bool { + !is_any_library(sess) && get_reloc_model(sess) == llvm::RelocMode::PIC +} + +unsafe fn create_context_and_module(sess: &Session, mod_name: &str) -> (ContextRef, ModuleRef) { + let llcx = llvm::LLVMContextCreate(); + let mod_name = CString::new(mod_name).unwrap(); + let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx); + + // Ensure the data-layout values hardcoded remain the defaults. + if sess.target.target.options.is_builtin { + let tm = ::back::write::create_target_machine(sess); + llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm); + llvm::LLVMRustDisposeTargetMachine(tm); + + let data_layout = llvm::LLVMGetDataLayout(llmod); + let data_layout = str::from_utf8(CStr::from_ptr(data_layout).to_bytes()) + .ok().expect("got a non-UTF8 data-layout from LLVM"); + + // Unfortunately LLVM target specs change over time, and right now we + // don't have proper support to work with any more than one + // `data_layout` than the one that is in the rust-lang/rust repo. If + // this compiler is configured against a custom LLVM, we may have a + // differing data layout, even though we should update our own to use + // that one. + // + // As an interim hack, if CFG_LLVM_ROOT is not an empty string then we + // disable this check entirely as we may be configured with something + // that has a different target layout. + // + // Unsure if this will actually cause breakage when rustc is configured + // as such. + // + // FIXME(#34960) + let cfg_llvm_root = option_env!("CFG_LLVM_ROOT").unwrap_or(""); + let custom_llvm_used = cfg_llvm_root.trim() != ""; + + if !custom_llvm_used && sess.target.target.data_layout != data_layout { + bug!("data-layout for builtin `{}` target, `{}`, \ + differs from LLVM default, `{}`", + sess.target.target.llvm_target, + sess.target.target.data_layout, + data_layout); + } + } + + let data_layout = CString::new(&sess.target.target.data_layout[..]).unwrap(); + llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr()); + + let llvm_target = sess.target.target.llvm_target.as_bytes(); + let llvm_target = CString::new(llvm_target).unwrap(); + llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr()); + + if is_pie_binary(sess) { + llvm::LLVMRustSetModulePIELevel(llmod); + } + + (llcx, llmod) +} + +impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { + pub fn new(tcx: TyCtxt<'b, 'tcx, 'tcx>, + export_map: ExportMap, + link_meta: LinkMeta, + reachable: NodeSet, + check_overflow: bool) + -> SharedCrateContext<'b, 'tcx> { + let (metadata_llcx, metadata_llmod) = unsafe { + create_context_and_module(&tcx.sess, "metadata") + }; + + // An interesting part of Windows which MSVC forces our hand on (and + // apparently MinGW didn't) is the usage of `dllimport` and `dllexport` + // attributes in LLVM IR as well as native dependencies (in C these + // correspond to `__declspec(dllimport)`). + // + // Whenever a dynamic library is built by MSVC it must have its public + // interface specified by functions tagged with `dllexport` or otherwise + // they're not available to be linked against. This poses a few problems + // for the compiler, some of which are somewhat fundamental, but we use + // the `use_dll_storage_attrs` variable below to attach the `dllexport` + // attribute to all LLVM functions that are reachable (e.g. they're + // already tagged with external linkage). This is suboptimal for a few + // reasons: + // + // * If an object file will never be included in a dynamic library, + // there's no need to attach the dllexport attribute. Most object + // files in Rust are not destined to become part of a dll as binaries + // are statically linked by default. + // * If the compiler is emitting both an rlib and a dylib, the same + // source object file is currently used but with MSVC this may be less + // feasible. The compiler may be able to get around this, but it may + // involve some invasive changes to deal with this. + // + // The flipside of this situation is that whenever you link to a dll and + // you import a function from it, the import should be tagged with + // `dllimport`. At this time, however, the compiler does not emit + // `dllimport` for any declarations other than constants (where it is + // required), which is again suboptimal for even more reasons! + // + // * Calling a function imported from another dll without using + // `dllimport` causes the linker/compiler to have extra overhead (one + // `jmp` instruction on x86) when calling the function. + // * The same object file may be used in different circumstances, so a + // function may be imported from a dll if the object is linked into a + // dll, but it may be just linked against if linked into an rlib. + // * The compiler has no knowledge about whether native functions should + // be tagged dllimport or not. + // + // For now the compiler takes the perf hit (I do not have any numbers to + // this effect) by marking very little as `dllimport` and praying the + // linker will take care of everything. Fixing this problem will likely + // require adding a few attributes to Rust itself (feature gated at the + // start) and then strongly recommending static linkage on MSVC! + let use_dll_storage_attrs = tcx.sess.target.target.options.is_like_msvc; + + SharedCrateContext { + metadata_llmod: metadata_llmod, + metadata_llcx: metadata_llcx, + export_map: export_map, + reachable: reachable, + link_meta: link_meta, + tcx: tcx, + stats: Stats { + n_glues_created: Cell::new(0), + n_null_glues: Cell::new(0), + n_real_glues: Cell::new(0), + n_fns: Cell::new(0), + n_inlines: Cell::new(0), + n_closures: Cell::new(0), + n_llvm_insns: Cell::new(0), + llvm_insns: RefCell::new(FxHashMap()), + fn_stats: RefCell::new(Vec::new()), + }, + check_overflow: check_overflow, + use_dll_storage_attrs: use_dll_storage_attrs, + translation_items: RefCell::new(FxHashSet()), + trait_cache: RefCell::new(DepTrackingMap::new(tcx.dep_graph.clone())), + project_cache: RefCell::new(DepTrackingMap::new(tcx.dep_graph.clone())), + } + } + + pub fn metadata_llmod(&self) -> ModuleRef { + self.metadata_llmod + } + + pub fn metadata_llcx(&self) -> ContextRef { + self.metadata_llcx + } + + pub fn export_map<'a>(&'a self) -> &'a ExportMap { + &self.export_map + } + + pub fn reachable<'a>(&'a self) -> &'a NodeSet { + &self.reachable + } + + pub fn trait_cache(&self) -> &RefCell>> { + &self.trait_cache + } + + pub fn project_cache(&self) -> &RefCell>> { + &self.project_cache + } + + pub fn link_meta<'a>(&'a self) -> &'a LinkMeta { + &self.link_meta + } + + pub fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> { + self.tcx + } + + pub fn sess<'a>(&'a self) -> &'a Session { + &self.tcx.sess + } + + pub fn stats<'a>(&'a self) -> &'a Stats { + &self.stats + } + + pub fn use_dll_storage_attrs(&self) -> bool { + self.use_dll_storage_attrs + } + + pub fn translation_items(&self) -> &RefCell>> { + &self.translation_items + } + + /// Given the def-id of some item that has no type parameters, make + /// a suitable "empty substs" for it. + pub fn empty_substs_for_def_id(&self, item_def_id: DefId) -> &'tcx Substs<'tcx> { + Substs::for_item(self.tcx(), item_def_id, + |_, _| self.tcx().mk_region(ty::ReErased), + |_, _| { + bug!("empty_substs_for_def_id: {:?} has type parameters", item_def_id) + }) + } + + pub fn metadata_symbol_name(&self) -> String { + format!("rust_metadata_{}_{}", + self.link_meta().crate_name, + self.link_meta().crate_hash) + } +} + +impl<'tcx> LocalCrateContext<'tcx> { + fn new<'a>(shared: &SharedCrateContext<'a, 'tcx>, + codegen_unit: CodegenUnit<'tcx>, + previous_work_product: Option, + symbol_map: Rc>) + -> LocalCrateContext<'tcx> { + unsafe { + // Append ".rs" to LLVM module identifier. + // + // LLVM code generator emits a ".file filename" directive + // for ELF backends. Value of the "filename" is set as the + // LLVM module identifier. Due to a LLVM MC bug[1], LLVM + // crashes if the module identifier is same as other symbols + // such as a function name in the module. + // 1. http://llvm.org/bugs/show_bug.cgi?id=11479 + let llmod_id = format!("{}.rs", codegen_unit.name()); + + let (llcx, llmod) = create_context_and_module(&shared.tcx.sess, + &llmod_id[..]); + + let dbg_cx = if shared.tcx.sess.opts.debuginfo != NoDebugInfo { + let dctx = debuginfo::CrateDebugContext::new(llmod); + debuginfo::metadata::compile_unit_metadata(shared, &dctx, shared.tcx.sess); + Some(dctx) + } else { + None + }; + + let local_ccx = LocalCrateContext { + llmod: llmod, + llcx: llcx, + previous_work_product: previous_work_product, + codegen_unit: codegen_unit, + needs_unwind_cleanup_cache: RefCell::new(FxHashMap()), + fn_pointer_shims: RefCell::new(FxHashMap()), + drop_glues: RefCell::new(FxHashMap()), + instances: RefCell::new(FxHashMap()), + vtables: RefCell::new(FxHashMap()), + const_cstr_cache: RefCell::new(FxHashMap()), + const_unsized: RefCell::new(FxHashMap()), + const_globals: RefCell::new(FxHashMap()), + const_values: RefCell::new(FxHashMap()), + extern_const_values: RefCell::new(DefIdMap()), + statics: RefCell::new(FxHashMap()), + impl_method_cache: RefCell::new(FxHashMap()), + closure_bare_wrapper_cache: RefCell::new(FxHashMap()), + statics_to_rauw: RefCell::new(Vec::new()), + lltypes: RefCell::new(FxHashMap()), + llsizingtypes: RefCell::new(FxHashMap()), + type_hashcodes: RefCell::new(FxHashMap()), + int_type: Type::from_ref(ptr::null_mut()), + opaque_vec_type: Type::from_ref(ptr::null_mut()), + str_slice_type: Type::from_ref(ptr::null_mut()), + builder: BuilderRef_res(llvm::LLVMCreateBuilderInContext(llcx)), + closure_vals: RefCell::new(FxHashMap()), + dbg_cx: dbg_cx, + eh_personality: Cell::new(None), + eh_unwind_resume: Cell::new(None), + rust_try_fn: Cell::new(None), + intrinsics: RefCell::new(FxHashMap()), + n_llvm_insns: Cell::new(0), + type_of_depth: Cell::new(0), + symbol_map: symbol_map, + local_gen_sym_counter: Cell::new(0), + }; + + let (int_type, opaque_vec_type, str_slice_ty, mut local_ccx) = { + // Do a little dance to create a dummy CrateContext, so we can + // create some things in the LLVM module of this codegen unit + let mut local_ccxs = vec![local_ccx]; + let (int_type, opaque_vec_type, str_slice_ty) = { + let dummy_ccx = LocalCrateContext::dummy_ccx(shared, + local_ccxs.as_mut_slice()); + let mut str_slice_ty = Type::named_struct(&dummy_ccx, "str_slice"); + str_slice_ty.set_struct_body(&[Type::i8p(&dummy_ccx), + Type::int(&dummy_ccx)], + false); + (Type::int(&dummy_ccx), Type::opaque_vec(&dummy_ccx), str_slice_ty) + }; + (int_type, opaque_vec_type, str_slice_ty, local_ccxs.pop().unwrap()) + }; + + local_ccx.int_type = int_type; + local_ccx.opaque_vec_type = opaque_vec_type; + local_ccx.str_slice_type = str_slice_ty; + + if shared.tcx.sess.count_llvm_insns() { + base::init_insn_ctxt() + } + + local_ccx + } + } + + /// Create a dummy `CrateContext` from `self` and the provided + /// `SharedCrateContext`. This is somewhat dangerous because `self` may + /// not be fully initialized. + /// + /// This is used in the `LocalCrateContext` constructor to allow calling + /// functions that expect a complete `CrateContext`, even before the local + /// portion is fully initialized and attached to the `SharedCrateContext`. + fn dummy_ccx<'a>(shared: &'a SharedCrateContext<'a, 'tcx>, + local_ccxs: &'a [LocalCrateContext<'tcx>]) + -> CrateContext<'a, 'tcx> { + assert!(local_ccxs.len() == 1); + CrateContext { + shared: shared, + index: 0, + local_ccxs: local_ccxs + } + } +} + +impl<'b, 'tcx> CrateContext<'b, 'tcx> { + pub fn shared(&self) -> &'b SharedCrateContext<'b, 'tcx> { + self.shared + } + + pub fn local(&self) -> &'b LocalCrateContext<'tcx> { + &self.local_ccxs[self.index] + } + + /// Either iterate over only `self`, or iterate over all `CrateContext`s in + /// the `SharedCrateContext`. The iterator produces `(ccx, is_origin)` + /// pairs, where `is_origin` is `true` if `ccx` is `self` and `false` + /// otherwise. This method is useful for avoiding code duplication in + /// cases where it may or may not be necessary to translate code into every + /// context. + pub fn maybe_iter(&self, iter_all: bool) -> CrateContextMaybeIterator<'b, 'tcx> { + CrateContextMaybeIterator { + shared: self.shared, + index: if iter_all { 0 } else { self.index }, + single: !iter_all, + origin: self.index, + local_ccxs: self.local_ccxs, + } + } + + pub fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> { + self.shared.tcx + } + + pub fn sess<'a>(&'a self) -> &'a Session { + &self.shared.tcx.sess + } + + pub fn builder<'a>(&'a self) -> Builder<'a, 'tcx> { + Builder::new(self) + } + + pub fn raw_builder<'a>(&'a self) -> BuilderRef { + self.local().builder.b + } + + pub fn get_intrinsic(&self, key: &str) -> ValueRef { + if let Some(v) = self.intrinsics().borrow().get(key).cloned() { + return v; + } + match declare_intrinsic(self, key) { + Some(v) => return v, + None => bug!("unknown intrinsic '{}'", key) + } + } + + pub fn llmod(&self) -> ModuleRef { + self.local().llmod + } + + pub fn llcx(&self) -> ContextRef { + self.local().llcx + } + + pub fn previous_work_product(&self) -> Option<&WorkProduct> { + self.local().previous_work_product.as_ref() + } + + pub fn codegen_unit(&self) -> &CodegenUnit<'tcx> { + &self.local().codegen_unit + } + + pub fn td(&self) -> llvm::TargetDataRef { + unsafe { llvm::LLVMRustGetModuleDataLayout(self.llmod()) } + } + + pub fn export_map<'a>(&'a self) -> &'a ExportMap { + &self.shared.export_map + } + + pub fn reachable<'a>(&'a self) -> &'a NodeSet { + &self.shared.reachable + } + + pub fn link_meta<'a>(&'a self) -> &'a LinkMeta { + &self.shared.link_meta + } + + pub fn needs_unwind_cleanup_cache(&self) -> &RefCell, bool>> { + &self.local().needs_unwind_cleanup_cache + } + + pub fn fn_pointer_shims(&self) -> &RefCell, ValueRef>> { + &self.local().fn_pointer_shims + } + + pub fn drop_glues<'a>(&'a self) + -> &'a RefCell, (ValueRef, FnType)>> { + &self.local().drop_glues + } + + pub fn local_node_for_inlined_defid<'a>(&'a self, def_id: DefId) -> Option { + self.sess().cstore.local_node_for_inlined_defid(def_id) + } + + pub fn defid_for_inlined_node<'a>(&'a self, node_id: ast::NodeId) -> Option { + self.sess().cstore.defid_for_inlined_node(node_id) + } + + pub fn instances<'a>(&'a self) -> &'a RefCell, ValueRef>> { + &self.local().instances + } + + pub fn vtables<'a>(&'a self) + -> &'a RefCell, + Option>), ValueRef>> { + &self.local().vtables + } + + pub fn const_cstr_cache<'a>(&'a self) -> &'a RefCell> { + &self.local().const_cstr_cache + } + + pub fn const_unsized<'a>(&'a self) -> &'a RefCell> { + &self.local().const_unsized + } + + pub fn const_globals<'a>(&'a self) -> &'a RefCell> { + &self.local().const_globals + } + + pub fn const_values<'a>(&'a self) -> &'a RefCell), + ValueRef>> { + &self.local().const_values + } + + pub fn extern_const_values<'a>(&'a self) -> &'a RefCell> { + &self.local().extern_const_values + } + + pub fn statics<'a>(&'a self) -> &'a RefCell> { + &self.local().statics + } + + pub fn impl_method_cache<'a>(&'a self) + -> &'a RefCell> { + &self.local().impl_method_cache + } + + pub fn closure_bare_wrapper_cache<'a>(&'a self) -> &'a RefCell> { + &self.local().closure_bare_wrapper_cache + } + + pub fn statics_to_rauw<'a>(&'a self) -> &'a RefCell> { + &self.local().statics_to_rauw + } + + pub fn lltypes<'a>(&'a self) -> &'a RefCell, Type>> { + &self.local().lltypes + } + + pub fn llsizingtypes<'a>(&'a self) -> &'a RefCell, Type>> { + &self.local().llsizingtypes + } + + pub fn type_hashcodes<'a>(&'a self) -> &'a RefCell, String>> { + &self.local().type_hashcodes + } + + pub fn stats<'a>(&'a self) -> &'a Stats { + &self.shared.stats + } + + pub fn int_type(&self) -> Type { + self.local().int_type + } + + pub fn opaque_vec_type(&self) -> Type { + self.local().opaque_vec_type + } + + pub fn str_slice_type(&self) -> Type { + self.local().str_slice_type + } + + pub fn closure_vals<'a>(&'a self) -> &'a RefCell, ValueRef>> { + &self.local().closure_vals + } + + pub fn dbg_cx<'a>(&'a self) -> &'a Option> { + &self.local().dbg_cx + } + + pub fn eh_personality<'a>(&'a self) -> &'a Cell> { + &self.local().eh_personality + } + + pub fn eh_unwind_resume<'a>(&'a self) -> &'a Cell> { + &self.local().eh_unwind_resume + } + + pub fn rust_try_fn<'a>(&'a self) -> &'a Cell> { + &self.local().rust_try_fn + } + + fn intrinsics<'a>(&'a self) -> &'a RefCell> { + &self.local().intrinsics + } + + pub fn count_llvm_insn(&self) { + self.local().n_llvm_insns.set(self.local().n_llvm_insns.get() + 1); + } + + pub fn obj_size_bound(&self) -> u64 { + self.tcx().data_layout.obj_size_bound() + } + + pub fn report_overbig_object(&self, obj: Ty<'tcx>) -> ! { + self.sess().fatal( + &format!("the type `{:?}` is too big for the current architecture", + obj)) + } + + pub fn enter_type_of(&self, ty: Ty<'tcx>) -> TypeOfDepthLock<'b, 'tcx> { + let current_depth = self.local().type_of_depth.get(); + debug!("enter_type_of({:?}) at depth {:?}", ty, current_depth); + if current_depth > self.sess().recursion_limit.get() { + self.sess().fatal( + &format!("overflow representing the type `{}`", ty)) + } + self.local().type_of_depth.set(current_depth + 1); + TypeOfDepthLock(self.local()) + } + + pub fn layout_of(&self, ty: Ty<'tcx>) -> &'tcx ty::layout::Layout { + self.tcx().infer_ctxt(None, None, traits::Reveal::All).enter(|infcx| { + ty.layout(&infcx).unwrap_or_else(|e| { + match e { + ty::layout::LayoutError::SizeOverflow(_) => + self.sess().fatal(&e.to_string()), + _ => bug!("failed to get layout for `{}`: {}", ty, e) + } + }) + }) + } + + pub fn check_overflow(&self) -> bool { + self.shared.check_overflow + } + + pub fn use_dll_storage_attrs(&self) -> bool { + self.shared.use_dll_storage_attrs() + } + + pub fn symbol_map(&self) -> &SymbolMap<'tcx> { + &*self.local().symbol_map + } + + pub fn translation_items(&self) -> &RefCell>> { + &self.shared.translation_items + } + + /// Given the def-id of some item that has no type parameters, make + /// a suitable "empty substs" for it. + pub fn empty_substs_for_def_id(&self, item_def_id: DefId) -> &'tcx Substs<'tcx> { + self.shared().empty_substs_for_def_id(item_def_id) + } + + /// Generate a new symbol name with the given prefix. This symbol name must + /// only be used for definitions with `internal` or `private` linkage. + pub fn generate_local_symbol_name(&self, prefix: &str) -> String { + let idx = self.local().local_gen_sym_counter.get(); + self.local().local_gen_sym_counter.set(idx + 1); + // Include a '.' character, so there can be no accidental conflicts with + // user defined names + let mut name = String::with_capacity(prefix.len() + 6); + name.push_str(prefix); + name.push_str("."); + base_n::push_str(idx as u64, base_n::MAX_BASE, &mut name); + name + } +} + +pub struct TypeOfDepthLock<'a, 'tcx: 'a>(&'a LocalCrateContext<'tcx>); + +impl<'a, 'tcx> Drop for TypeOfDepthLock<'a, 'tcx> { + fn drop(&mut self) { + self.0.type_of_depth.set(self.0.type_of_depth.get() - 1); + } +} + +/// Declare any llvm intrinsics that you might need +fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option { + macro_rules! ifn { + ($name:expr, fn() -> $ret:expr) => ( + if key == $name { + let f = declare::declare_cfn(ccx, $name, Type::func(&[], &$ret)); + llvm::SetUnnamedAddr(f, false); + ccx.intrinsics().borrow_mut().insert($name, f.clone()); + return Some(f); + } + ); + ($name:expr, fn(...) -> $ret:expr) => ( + if key == $name { + let f = declare::declare_cfn(ccx, $name, Type::variadic_func(&[], &$ret)); + llvm::SetUnnamedAddr(f, false); + ccx.intrinsics().borrow_mut().insert($name, f.clone()); + return Some(f); + } + ); + ($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( + if key == $name { + let f = declare::declare_cfn(ccx, $name, Type::func(&[$($arg),*], &$ret)); + llvm::SetUnnamedAddr(f, false); + ccx.intrinsics().borrow_mut().insert($name, f.clone()); + return Some(f); + } + ); + } + macro_rules! mk_struct { + ($($field_ty:expr),*) => (Type::struct_(ccx, &[$($field_ty),*], false)) + } + + let i8p = Type::i8p(ccx); + let void = Type::void(ccx); + let i1 = Type::i1(ccx); + let t_i8 = Type::i8(ccx); + let t_i16 = Type::i16(ccx); + let t_i32 = Type::i32(ccx); + let t_i64 = Type::i64(ccx); + let t_f32 = Type::f32(ccx); + let t_f64 = Type::f64(ccx); + + ifn!("llvm.memcpy.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); + ifn!("llvm.memcpy.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); + ifn!("llvm.memcpy.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void); + ifn!("llvm.memmove.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); + ifn!("llvm.memmove.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); + ifn!("llvm.memmove.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void); + ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void); + ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void); + ifn!("llvm.memset.p0i8.i64", fn(i8p, t_i8, t_i64, t_i32, i1) -> void); + + ifn!("llvm.trap", fn() -> void); + ifn!("llvm.debugtrap", fn() -> void); + ifn!("llvm.frameaddress", fn(t_i32) -> i8p); + + ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32); + ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64); + ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32); + ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64); + + ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32); + ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64); + ifn!("llvm.sin.f32", fn(t_f32) -> t_f32); + ifn!("llvm.sin.f64", fn(t_f64) -> t_f64); + ifn!("llvm.cos.f32", fn(t_f32) -> t_f32); + ifn!("llvm.cos.f64", fn(t_f64) -> t_f64); + ifn!("llvm.exp.f32", fn(t_f32) -> t_f32); + ifn!("llvm.exp.f64", fn(t_f64) -> t_f64); + ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32); + ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log10.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log10.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log2.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log2.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32); + ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64); + + ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32); + ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.floor.f32", fn(t_f32) -> t_f32); + ifn!("llvm.floor.f64", fn(t_f64) -> t_f64); + ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32); + ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64); + ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32); + ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32); + ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64); + ifn!("llvm.round.f32", fn(t_f32) -> t_f32); + ifn!("llvm.round.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.rint.f32", fn(t_f32) -> t_f32); + ifn!("llvm.rint.f64", fn(t_f64) -> t_f64); + ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32); + ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8); + ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16); + ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32); + ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64); + + ifn!("llvm.ctlz.i8", fn(t_i8 , i1) -> t_i8); + ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16); + ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32); + ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64); + + ifn!("llvm.cttz.i8", fn(t_i8 , i1) -> t_i8); + ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16); + ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32); + ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64); + + ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16); + ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32); + ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64); + + ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + + ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + + ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + + ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + + ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + + ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + + ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void); + ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void); + + ifn!("llvm.expect.i1", fn(i1, i1) -> i1); + ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32); + ifn!("llvm.localescape", fn(...) -> void); + ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p); + ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p); + + ifn!("llvm.assume", fn(i1) -> void); + + if ccx.sess().opts.debuginfo != NoDebugInfo { + ifn!("llvm.dbg.declare", fn(Type::metadata(ccx), Type::metadata(ccx)) -> void); + ifn!("llvm.dbg.value", fn(Type::metadata(ccx), t_i64, Type::metadata(ccx)) -> void); + } + return None; +} diff --git a/src/librustc_trans/debuginfo/create_scope_map.rs b/src/librustc_trans/debuginfo/create_scope_map.rs new file mode 100644 index 0000000000000..e0c1a80be394d --- /dev/null +++ b/src/librustc_trans/debuginfo/create_scope_map.rs @@ -0,0 +1,133 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::FunctionDebugContext; +use super::metadata::file_metadata; +use super::utils::{DIB, span_start}; + +use llvm; +use llvm::debuginfo::{DIScope, DISubprogram}; +use common::{CrateContext, FunctionContext}; +use rustc::mir::{Mir, VisibilityScope}; + +use libc::c_uint; +use std::ptr; + +use syntax_pos::Pos; + +use rustc_data_structures::bitvec::BitVector; +use rustc_data_structures::indexed_vec::{Idx, IndexVec}; + +use syntax_pos::BytePos; + +#[derive(Clone, Copy, Debug)] +pub struct MirDebugScope { + pub scope_metadata: DIScope, + // Start and end offsets of the file to which this DIScope belongs. + // These are used to quickly determine whether some span refers to the same file. + pub file_start_pos: BytePos, + pub file_end_pos: BytePos, +} + +impl MirDebugScope { + pub fn is_valid(&self) -> bool { + !self.scope_metadata.is_null() + } +} + +/// Produce DIScope DIEs for each MIR Scope which has variables defined in it. +/// If debuginfo is disabled, the returned vector is empty. +pub fn create_mir_scopes(fcx: &FunctionContext) -> IndexVec { + let mir = fcx.mir(); + let null_scope = MirDebugScope { + scope_metadata: ptr::null_mut(), + file_start_pos: BytePos(0), + file_end_pos: BytePos(0) + }; + let mut scopes = IndexVec::from_elem(null_scope, &mir.visibility_scopes); + + let fn_metadata = match fcx.debug_context { + FunctionDebugContext::RegularContext(box ref data) => data.fn_metadata, + FunctionDebugContext::DebugInfoDisabled | + FunctionDebugContext::FunctionWithoutDebugInfo => { + return scopes; + } + }; + + // Find all the scopes with variables defined in them. + let mut has_variables = BitVector::new(mir.visibility_scopes.len()); + for var in mir.vars_iter() { + let decl = &mir.local_decls[var]; + has_variables.insert(decl.source_info.unwrap().scope.index()); + } + + // Instantiate all scopes. + for idx in 0..mir.visibility_scopes.len() { + let scope = VisibilityScope::new(idx); + make_mir_scope(fcx.ccx, &mir, &has_variables, fn_metadata, scope, &mut scopes); + } + + scopes +} + +fn make_mir_scope(ccx: &CrateContext, + mir: &Mir, + has_variables: &BitVector, + fn_metadata: DISubprogram, + scope: VisibilityScope, + scopes: &mut IndexVec) { + if scopes[scope].is_valid() { + return; + } + + let scope_data = &mir.visibility_scopes[scope]; + let parent_scope = if let Some(parent) = scope_data.parent_scope { + make_mir_scope(ccx, mir, has_variables, fn_metadata, parent, scopes); + scopes[parent] + } else { + // The root is the function itself. + let loc = span_start(ccx, mir.span); + scopes[scope] = MirDebugScope { + scope_metadata: fn_metadata, + file_start_pos: loc.file.start_pos, + file_end_pos: loc.file.end_pos, + }; + return; + }; + + if !has_variables.contains(scope.index()) { + // Do not create a DIScope if there are no variables + // defined in this MIR Scope, to avoid debuginfo bloat. + + // However, we don't skip creating a nested scope if + // our parent is the root, because we might want to + // put arguments in the root and not have shadowing. + if parent_scope.scope_metadata != fn_metadata { + scopes[scope] = parent_scope; + return; + } + } + + let loc = span_start(ccx, scope_data.span); + let file_metadata = file_metadata(ccx, &loc.file.name, &loc.file.abs_path); + let scope_metadata = unsafe { + llvm::LLVMRustDIBuilderCreateLexicalBlock( + DIB(ccx), + parent_scope.scope_metadata, + file_metadata, + loc.line as c_uint, + loc.col.to_usize() as c_uint) + }; + scopes[scope] = MirDebugScope { + scope_metadata: scope_metadata, + file_start_pos: loc.file.start_pos, + file_end_pos: loc.file.end_pos, + }; +} diff --git a/src/librustc_trans/trans/debuginfo/doc.rs b/src/librustc_trans/debuginfo/doc.rs similarity index 100% rename from src/librustc_trans/trans/debuginfo/doc.rs rename to src/librustc_trans/debuginfo/doc.rs diff --git a/src/librustc_trans/trans/debuginfo/gdb.rs b/src/librustc_trans/debuginfo/gdb.rs similarity index 90% rename from src/librustc_trans/trans/debuginfo/gdb.rs rename to src/librustc_trans/debuginfo/gdb.rs index 4e3fadd0fa911..8f937d3fe25cb 100644 --- a/src/librustc_trans/trans/debuginfo/gdb.rs +++ b/src/librustc_trans/debuginfo/gdb.rs @@ -12,9 +12,9 @@ use llvm; -use trans::common::{C_bytes, CrateContext, C_i32}; -use trans::declare; -use trans::type_::Type; +use common::{C_bytes, CrateContext, C_i32}; +use declare; +use type_::Type; use session::config::NoDebugInfo; use std::ffi::CString; @@ -71,13 +71,13 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(ccx: &CrateContext) let section_var = declare::define_global(ccx, section_var_name, llvm_type).unwrap_or_else(||{ - ccx.sess().bug(&format!("symbol `{}` is already defined", section_var_name)) + bug!("symbol `{}` is already defined", section_var_name) }); llvm::LLVMSetSection(section_var, section_name.as_ptr() as *const _); llvm::LLVMSetInitializer(section_var, C_bytes(ccx, section_contents)); llvm::LLVMSetGlobalConstant(section_var, llvm::True); llvm::LLVMSetUnnamedAddr(section_var, llvm::True); - llvm::SetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage); + llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage); // This should make sure that the whole section is not larger than // the string it contains. Otherwise we get a warning from GDB. llvm::LLVMSetAlignment(section_var, 1); @@ -90,10 +90,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(ccx: &CrateContext) pub fn needs_gdb_debug_scripts_section(ccx: &CrateContext) -> bool { let omit_gdb_pretty_printer_section = - attr::contains_name(&ccx.tcx() - .map - .krate() - .attrs, + attr::contains_name(&ccx.tcx().map.krate_attrs(), "omit_gdb_pretty_printer_section"); !omit_gdb_pretty_printer_section && diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs new file mode 100644 index 0000000000000..ca76211dc4c95 --- /dev/null +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -0,0 +1,1792 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use self::RecursiveTypeDescription::*; +use self::MemberOffset::*; +use self::MemberDescriptionFactory::*; +use self::EnumDiscriminantInfo::*; + +use super::utils::{debug_context, DIB, span_start, bytes_to_bits, size_and_align_of, + get_namespace_and_span_for_item, create_DIArray, is_node_local_to_unit}; +use super::namespace::mangled_name_of_item; +use super::type_names::compute_debuginfo_type_name; +use super::{CrateDebugContext}; +use context::SharedCrateContext; +use session::Session; + +use llvm::{self, ValueRef}; +use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, + DICompositeType, DILexicalBlock, DIFlags}; + +use rustc::hir::def::CtorKind; +use rustc::hir::def_id::DefId; +use rustc::ty::fold::TypeVisitor; +use rustc::ty::subst::Substs; +use rustc::ty::util::TypeIdHasher; +use rustc::hir; +use rustc_data_structures::blake2b::Blake2bHasher; +use {type_of, machine, monomorphize}; +use common::CrateContext; +use type_::Type; +use rustc::ty::{self, AdtKind, Ty, layout}; +use session::config; +use util::nodemap::FxHashMap; +use util::common::path2cstr; + +use libc::{c_uint, c_longlong}; +use std::ffi::CString; +use std::fmt::Write; +use std::path::Path; +use std::ptr; +use syntax::ast; +use syntax::symbol::{Interner, InternedString}; +use syntax_pos::{self, Span}; + + +// From DWARF 5. +// See http://www.dwarfstd.org/ShowIssue.php?issue=140129.1 +const DW_LANG_RUST: c_uint = 0x1c; +#[allow(non_upper_case_globals)] +const DW_ATE_boolean: c_uint = 0x02; +#[allow(non_upper_case_globals)] +const DW_ATE_float: c_uint = 0x04; +#[allow(non_upper_case_globals)] +const DW_ATE_signed: c_uint = 0x05; +#[allow(non_upper_case_globals)] +const DW_ATE_unsigned: c_uint = 0x07; +#[allow(non_upper_case_globals)] +const DW_ATE_unsigned_char: c_uint = 0x08; + +pub const UNKNOWN_LINE_NUMBER: c_uint = 0; +pub const UNKNOWN_COLUMN_NUMBER: c_uint = 0; + +// ptr::null() doesn't work :( +pub const NO_SCOPE_METADATA: DIScope = (0 as DIScope); + +#[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)] +pub struct UniqueTypeId(ast::Name); + +// The TypeMap is where the CrateDebugContext holds the type metadata nodes +// created so far. The metadata nodes are indexed by UniqueTypeId, and, for +// faster lookup, also by Ty. The TypeMap is responsible for creating +// UniqueTypeIds. +pub struct TypeMap<'tcx> { + // The UniqueTypeIds created so far + unique_id_interner: Interner, + // A map from UniqueTypeId to debuginfo metadata for that type. This is a 1:1 mapping. + unique_id_to_metadata: FxHashMap, + // A map from types to debuginfo metadata. This is a N:1 mapping. + type_to_metadata: FxHashMap, DIType>, + // A map from types to UniqueTypeId. This is a N:1 mapping. + type_to_unique_id: FxHashMap, UniqueTypeId> +} + +impl<'tcx> TypeMap<'tcx> { + pub fn new() -> TypeMap<'tcx> { + TypeMap { + unique_id_interner: Interner::new(), + type_to_metadata: FxHashMap(), + unique_id_to_metadata: FxHashMap(), + type_to_unique_id: FxHashMap(), + } + } + + // Adds a Ty to metadata mapping to the TypeMap. The method will fail if + // the mapping already exists. + fn register_type_with_metadata<'a>(&mut self, + type_: Ty<'tcx>, + metadata: DIType) { + if self.type_to_metadata.insert(type_, metadata).is_some() { + bug!("Type metadata for Ty '{}' is already in the TypeMap!", type_); + } + } + + // Adds a UniqueTypeId to metadata mapping to the TypeMap. The method will + // fail if the mapping already exists. + fn register_unique_id_with_metadata(&mut self, + unique_type_id: UniqueTypeId, + metadata: DIType) { + if self.unique_id_to_metadata.insert(unique_type_id, metadata).is_some() { + bug!("Type metadata for unique id '{}' is already in the TypeMap!", + self.get_unique_type_id_as_string(unique_type_id)); + } + } + + fn find_metadata_for_type(&self, type_: Ty<'tcx>) -> Option { + self.type_to_metadata.get(&type_).cloned() + } + + fn find_metadata_for_unique_id(&self, unique_type_id: UniqueTypeId) -> Option { + self.unique_id_to_metadata.get(&unique_type_id).cloned() + } + + // Get the string representation of a UniqueTypeId. This method will fail if + // the id is unknown. + fn get_unique_type_id_as_string(&self, unique_type_id: UniqueTypeId) -> &str { + let UniqueTypeId(interner_key) = unique_type_id; + self.unique_id_interner.get(interner_key) + } + + // Get the UniqueTypeId for the given type. If the UniqueTypeId for the given + // type has been requested before, this is just a table lookup. Otherwise an + // ID will be generated and stored for later lookup. + fn get_unique_type_id_of_type<'a>(&mut self, cx: &CrateContext<'a, 'tcx>, + type_: Ty<'tcx>) -> UniqueTypeId { + // Let's see if we already have something in the cache + match self.type_to_unique_id.get(&type_).cloned() { + Some(unique_type_id) => return unique_type_id, + None => { /* generate one */} + }; + + // The hasher we are using to generate the UniqueTypeId. We want + // something that provides more than the 64 bits of the DefaultHasher. + const TYPE_ID_HASH_LENGTH: usize = 20; + + let mut type_id_hasher = TypeIdHasher::new(cx.tcx(), + Blake2bHasher::new(TYPE_ID_HASH_LENGTH, &[])); + type_id_hasher.visit_ty(type_); + let mut hash_state = type_id_hasher.into_inner(); + let hash: &[u8] = hash_state.finalize(); + debug_assert!(hash.len() == TYPE_ID_HASH_LENGTH); + + let mut unique_type_id = String::with_capacity(TYPE_ID_HASH_LENGTH * 2); + + for byte in hash.into_iter() { + write!(&mut unique_type_id, "{:x}", byte).unwrap(); + } + + let key = self.unique_id_interner.intern(&unique_type_id); + self.type_to_unique_id.insert(type_, UniqueTypeId(key)); + + return UniqueTypeId(key); + } + + // Get the UniqueTypeId for an enum variant. Enum variants are not really + // types of their own, so they need special handling. We still need a + // UniqueTypeId for them, since to debuginfo they *are* real types. + fn get_unique_type_id_of_enum_variant<'a>(&mut self, + cx: &CrateContext<'a, 'tcx>, + enum_type: Ty<'tcx>, + variant_name: &str) + -> UniqueTypeId { + let enum_type_id = self.get_unique_type_id_of_type(cx, enum_type); + let enum_variant_type_id = format!("{}::{}", + self.get_unique_type_id_as_string(enum_type_id), + variant_name); + let interner_key = self.unique_id_interner.intern(&enum_variant_type_id); + UniqueTypeId(interner_key) + } +} + +// A description of some recursive type. It can either be already finished (as +// with FinalMetadata) or it is not yet finished, but contains all information +// needed to generate the missing parts of the description. See the +// documentation section on Recursive Types at the top of this file for more +// information. +enum RecursiveTypeDescription<'tcx> { + UnfinishedMetadata { + unfinished_type: Ty<'tcx>, + unique_type_id: UniqueTypeId, + metadata_stub: DICompositeType, + llvm_type: Type, + member_description_factory: MemberDescriptionFactory<'tcx>, + }, + FinalMetadata(DICompositeType) +} + +fn create_and_register_recursive_type_forward_declaration<'a, 'tcx>( + cx: &CrateContext<'a, 'tcx>, + unfinished_type: Ty<'tcx>, + unique_type_id: UniqueTypeId, + metadata_stub: DICompositeType, + llvm_type: Type, + member_description_factory: MemberDescriptionFactory<'tcx>) + -> RecursiveTypeDescription<'tcx> { + + // Insert the stub into the TypeMap in order to allow for recursive references + let mut type_map = debug_context(cx).type_map.borrow_mut(); + type_map.register_unique_id_with_metadata(unique_type_id, metadata_stub); + type_map.register_type_with_metadata(unfinished_type, metadata_stub); + + UnfinishedMetadata { + unfinished_type: unfinished_type, + unique_type_id: unique_type_id, + metadata_stub: metadata_stub, + llvm_type: llvm_type, + member_description_factory: member_description_factory, + } +} + +impl<'tcx> RecursiveTypeDescription<'tcx> { + // Finishes up the description of the type in question (mostly by providing + // descriptions of the fields of the given type) and returns the final type + // metadata. + fn finalize<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> MetadataCreationResult { + match *self { + FinalMetadata(metadata) => MetadataCreationResult::new(metadata, false), + UnfinishedMetadata { + unfinished_type, + unique_type_id, + metadata_stub, + llvm_type, + ref member_description_factory, + .. + } => { + // Make sure that we have a forward declaration of the type in + // the TypeMap so that recursive references are possible. This + // will always be the case if the RecursiveTypeDescription has + // been properly created through the + // create_and_register_recursive_type_forward_declaration() + // function. + { + let type_map = debug_context(cx).type_map.borrow(); + if type_map.find_metadata_for_unique_id(unique_type_id).is_none() || + type_map.find_metadata_for_type(unfinished_type).is_none() { + bug!("Forward declaration of potentially recursive type \ + '{:?}' was not found in TypeMap!", + unfinished_type); + } + } + + // ... then create the member descriptions ... + let member_descriptions = + member_description_factory.create_member_descriptions(cx); + + // ... and attach them to the stub to complete it. + set_members_of_composite_type(cx, + metadata_stub, + llvm_type, + &member_descriptions[..]); + return MetadataCreationResult::new(metadata_stub, true); + } + } + } +} + +// Returns from the enclosing function if the type metadata with the given +// unique id can be found in the type map +macro_rules! return_if_metadata_created_in_meantime { + ($cx: expr, $unique_type_id: expr) => ( + match debug_context($cx).type_map + .borrow() + .find_metadata_for_unique_id($unique_type_id) { + Some(metadata) => return MetadataCreationResult::new(metadata, true), + None => { /* proceed normally */ } + } + ) +} + +fn fixed_vec_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + unique_type_id: UniqueTypeId, + element_type: Ty<'tcx>, + len: Option, + span: Span) + -> MetadataCreationResult { + let element_type_metadata = type_metadata(cx, element_type, span); + + return_if_metadata_created_in_meantime!(cx, unique_type_id); + + let element_llvm_type = type_of::type_of(cx, element_type); + let (element_type_size, element_type_align) = size_and_align_of(cx, element_llvm_type); + + let (array_size_in_bytes, upper_bound) = match len { + Some(len) => (element_type_size * len, len as c_longlong), + None => (0, -1) + }; + + let subrange = unsafe { + llvm::LLVMRustDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound) + }; + + let subscripts = create_DIArray(DIB(cx), &[subrange]); + let metadata = unsafe { + llvm::LLVMRustDIBuilderCreateArrayType( + DIB(cx), + bytes_to_bits(array_size_in_bytes), + bytes_to_bits(element_type_align), + element_type_metadata, + subscripts) + }; + + return MetadataCreationResult::new(metadata, false); +} + +fn vec_slice_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + vec_type: Ty<'tcx>, + element_type: Ty<'tcx>, + unique_type_id: UniqueTypeId, + span: Span) + -> MetadataCreationResult { + let data_ptr_type = cx.tcx().mk_ptr(ty::TypeAndMut { + ty: element_type, + mutbl: hir::MutImmutable + }); + + let element_type_metadata = type_metadata(cx, data_ptr_type, span); + + return_if_metadata_created_in_meantime!(cx, unique_type_id); + + let slice_llvm_type = type_of::type_of(cx, vec_type); + let slice_type_name = compute_debuginfo_type_name(cx, vec_type, true); + + let member_llvm_types = slice_llvm_type.field_types(); + assert!(slice_layout_is_correct(cx, + &member_llvm_types[..], + element_type)); + let member_descriptions = [ + MemberDescription { + name: "data_ptr".to_string(), + llvm_type: member_llvm_types[0], + type_metadata: element_type_metadata, + offset: ComputedMemberOffset, + flags: DIFlags::FlagZero, + }, + MemberDescription { + name: "length".to_string(), + llvm_type: member_llvm_types[1], + type_metadata: type_metadata(cx, cx.tcx().types.usize, span), + offset: ComputedMemberOffset, + flags: DIFlags::FlagZero, + }, + ]; + + assert!(member_descriptions.len() == member_llvm_types.len()); + + let loc = span_start(cx, span); + let file_metadata = file_metadata(cx, &loc.file.name, &loc.file.abs_path); + + let metadata = composite_type_metadata(cx, + slice_llvm_type, + &slice_type_name[..], + unique_type_id, + &member_descriptions, + NO_SCOPE_METADATA, + file_metadata, + span); + return MetadataCreationResult::new(metadata, false); + + fn slice_layout_is_correct<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + member_llvm_types: &[Type], + element_type: Ty<'tcx>) + -> bool { + member_llvm_types.len() == 2 && + member_llvm_types[0] == type_of::type_of(cx, element_type).ptr_to() && + member_llvm_types[1] == cx.int_type() + } +} + +fn subroutine_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + unique_type_id: UniqueTypeId, + signature: &ty::PolyFnSig<'tcx>, + span: Span) + -> MetadataCreationResult +{ + let signature = cx.tcx().erase_late_bound_regions(signature); + + let mut signature_metadata: Vec = Vec::with_capacity(signature.inputs.len() + 1); + + // return type + signature_metadata.push(match signature.output.sty { + ty::TyTuple(ref tys) if tys.is_empty() => ptr::null_mut(), + _ => type_metadata(cx, signature.output, span) + }); + + // regular arguments + for &argument_type in &signature.inputs { + signature_metadata.push(type_metadata(cx, argument_type, span)); + } + + return_if_metadata_created_in_meantime!(cx, unique_type_id); + + return MetadataCreationResult::new( + unsafe { + llvm::LLVMRustDIBuilderCreateSubroutineType( + DIB(cx), + unknown_file_metadata(cx), + create_DIArray(DIB(cx), &signature_metadata[..])) + }, + false); +} + +// FIXME(1563) This is all a bit of a hack because 'trait pointer' is an ill- +// defined concept. For the case of an actual trait pointer (i.e., Box, +// &Trait), trait_object_type should be the whole thing (e.g, Box) and +// trait_type should be the actual trait (e.g., Trait). Where the trait is part +// of a DST struct, there is no trait_object_type and the results of this +// function will be a little bit weird. +fn trait_pointer_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + trait_type: Ty<'tcx>, + trait_object_type: Option>, + unique_type_id: UniqueTypeId) + -> DIType { + // The implementation provided here is a stub. It makes sure that the trait + // type is assigned the correct name, size, namespace, and source location. + // But it does not describe the trait's methods. + + let containing_scope = match trait_type.sty { + ty::TyDynamic(ref data, ..) => if let Some(principal) = data.principal() { + let def_id = principal.def_id(); + get_namespace_and_span_for_item(cx, def_id).0 + } else { + NO_SCOPE_METADATA + }, + _ => { + bug!("debuginfo: Unexpected trait-object type in \ + trait_pointer_metadata(): {:?}", + trait_type); + } + }; + + let trait_object_type = trait_object_type.unwrap_or(trait_type); + let trait_type_name = + compute_debuginfo_type_name(cx, trait_object_type, false); + + let trait_llvm_type = type_of::type_of(cx, trait_object_type); + let file_metadata = unknown_file_metadata(cx); + + composite_type_metadata(cx, + trait_llvm_type, + &trait_type_name[..], + unique_type_id, + &[], + containing_scope, + file_metadata, + syntax_pos::DUMMY_SP) +} + +pub fn type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + t: Ty<'tcx>, + usage_site_span: Span) + -> DIType { + // Get the unique type id of this type. + let unique_type_id = { + let mut type_map = debug_context(cx).type_map.borrow_mut(); + // First, try to find the type in TypeMap. If we have seen it before, we + // can exit early here. + match type_map.find_metadata_for_type(t) { + Some(metadata) => { + return metadata; + }, + None => { + // The Ty is not in the TypeMap but maybe we have already seen + // an equivalent type (e.g. only differing in region arguments). + // In order to find out, generate the unique type id and look + // that up. + let unique_type_id = type_map.get_unique_type_id_of_type(cx, t); + match type_map.find_metadata_for_unique_id(unique_type_id) { + Some(metadata) => { + // There is already an equivalent type in the TypeMap. + // Register this Ty as an alias in the cache and + // return the cached metadata. + type_map.register_type_with_metadata(t, metadata); + return metadata; + }, + None => { + // There really is no type metadata for this type, so + // proceed by creating it. + unique_type_id + } + } + } + } + }; + + debug!("type_metadata: {:?}", t); + + let sty = &t.sty; + let MetadataCreationResult { metadata, already_stored_in_typemap } = match *sty { + ty::TyNever | + ty::TyBool | + ty::TyChar | + ty::TyInt(_) | + ty::TyUint(_) | + ty::TyFloat(_) => { + MetadataCreationResult::new(basic_type_metadata(cx, t), false) + } + ty::TyTuple(ref elements) if elements.is_empty() => { + MetadataCreationResult::new(basic_type_metadata(cx, t), false) + } + ty::TyArray(typ, len) => { + fixed_vec_metadata(cx, unique_type_id, typ, Some(len as u64), usage_site_span) + } + ty::TySlice(typ) => { + fixed_vec_metadata(cx, unique_type_id, typ, None, usage_site_span) + } + ty::TyStr => { + fixed_vec_metadata(cx, unique_type_id, cx.tcx().types.i8, None, usage_site_span) + } + ty::TyDynamic(..) => { + MetadataCreationResult::new( + trait_pointer_metadata(cx, t, None, unique_type_id), + false) + } + ty::TyBox(ty) | + ty::TyRawPtr(ty::TypeAndMut{ty, ..}) | + ty::TyRef(_, ty::TypeAndMut{ty, ..}) => { + match ty.sty { + ty::TySlice(typ) => { + vec_slice_metadata(cx, t, typ, unique_type_id, usage_site_span) + } + ty::TyStr => { + vec_slice_metadata(cx, t, cx.tcx().types.u8, unique_type_id, usage_site_span) + } + ty::TyDynamic(..) => { + MetadataCreationResult::new( + trait_pointer_metadata(cx, ty, Some(t), unique_type_id), + false) + } + _ => { + let pointee_metadata = type_metadata(cx, ty, usage_site_span); + + match debug_context(cx).type_map + .borrow() + .find_metadata_for_unique_id(unique_type_id) { + Some(metadata) => return metadata, + None => { /* proceed normally */ } + }; + + MetadataCreationResult::new(pointer_type_metadata(cx, t, pointee_metadata), + false) + } + } + } + ty::TyFnDef(.., ref barefnty) | ty::TyFnPtr(ref barefnty) => { + let fn_metadata = subroutine_type_metadata(cx, + unique_type_id, + &barefnty.sig, + usage_site_span).metadata; + match debug_context(cx).type_map + .borrow() + .find_metadata_for_unique_id(unique_type_id) { + Some(metadata) => return metadata, + None => { /* proceed normally */ } + }; + + // This is actually a function pointer, so wrap it in pointer DI + MetadataCreationResult::new(pointer_type_metadata(cx, t, fn_metadata), false) + + } + ty::TyClosure(def_id, substs) => { + let upvar_tys : Vec<_> = substs.upvar_tys(def_id, cx.tcx()).collect(); + prepare_tuple_metadata(cx, + t, + &upvar_tys, + unique_type_id, + usage_site_span).finalize(cx) + } + ty::TyAdt(def, ..) => match def.adt_kind() { + AdtKind::Struct => { + prepare_struct_metadata(cx, + t, + unique_type_id, + usage_site_span).finalize(cx) + } + AdtKind::Union => { + prepare_union_metadata(cx, + t, + unique_type_id, + usage_site_span).finalize(cx) + } + AdtKind::Enum => { + prepare_enum_metadata(cx, + t, + def.did, + unique_type_id, + usage_site_span).finalize(cx) + } + }, + ty::TyTuple(ref elements) => { + prepare_tuple_metadata(cx, + t, + &elements[..], + unique_type_id, + usage_site_span).finalize(cx) + } + _ => { + bug!("debuginfo: unexpected type in type_metadata: {:?}", sty) + } + }; + + { + let mut type_map = debug_context(cx).type_map.borrow_mut(); + + if already_stored_in_typemap { + // Also make sure that we already have a TypeMap entry for the unique type id. + let metadata_for_uid = match type_map.find_metadata_for_unique_id(unique_type_id) { + Some(metadata) => metadata, + None => { + span_bug!(usage_site_span, + "Expected type metadata for unique \ + type id '{}' to already be in \ + the debuginfo::TypeMap but it \ + was not. (Ty = {})", + type_map.get_unique_type_id_as_string(unique_type_id), + t); + } + }; + + match type_map.find_metadata_for_type(t) { + Some(metadata) => { + if metadata != metadata_for_uid { + span_bug!(usage_site_span, + "Mismatch between Ty and \ + UniqueTypeId maps in \ + debuginfo::TypeMap. \ + UniqueTypeId={}, Ty={}", + type_map.get_unique_type_id_as_string(unique_type_id), + t); + } + } + None => { + type_map.register_type_with_metadata(t, metadata); + } + } + } else { + type_map.register_type_with_metadata(t, metadata); + type_map.register_unique_id_with_metadata(unique_type_id, metadata); + } + } + + metadata +} + +pub fn file_metadata(cx: &CrateContext, path: &str, full_path: &Option) -> DIFile { + // FIXME (#9639): This needs to handle non-utf8 paths + let work_dir = cx.sess().working_dir.to_str().unwrap(); + let file_name = + full_path.as_ref().map(|p| p.as_str()).unwrap_or_else(|| { + if path.starts_with(work_dir) { + &path[work_dir.len() + 1..path.len()] + } else { + path + } + }); + + file_metadata_(cx, path, file_name, &work_dir) +} + +pub fn unknown_file_metadata(cx: &CrateContext) -> DIFile { + // Regular filenames should not be empty, so we abuse an empty name as the + // key for the special unknown file metadata + file_metadata_(cx, "", "", "") + +} + +fn file_metadata_(cx: &CrateContext, key: &str, file_name: &str, work_dir: &str) -> DIFile { + if let Some(file_metadata) = debug_context(cx).created_files.borrow().get(key) { + return *file_metadata; + } + + debug!("file_metadata: file_name: {}, work_dir: {}", file_name, work_dir); + + let file_name = CString::new(file_name).unwrap(); + let work_dir = CString::new(work_dir).unwrap(); + let file_metadata = unsafe { + llvm::LLVMRustDIBuilderCreateFile(DIB(cx), file_name.as_ptr(), + work_dir.as_ptr()) + }; + + let mut created_files = debug_context(cx).created_files.borrow_mut(); + created_files.insert(key.to_string(), file_metadata); + file_metadata +} + +fn basic_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + t: Ty<'tcx>) -> DIType { + + debug!("basic_type_metadata: {:?}", t); + + let (name, encoding) = match t.sty { + ty::TyNever => ("!", DW_ATE_unsigned), + ty::TyTuple(ref elements) if elements.is_empty() => + ("()", DW_ATE_unsigned), + ty::TyBool => ("bool", DW_ATE_boolean), + ty::TyChar => ("char", DW_ATE_unsigned_char), + ty::TyInt(int_ty) => { + (int_ty.ty_to_string(), DW_ATE_signed) + }, + ty::TyUint(uint_ty) => { + (uint_ty.ty_to_string(), DW_ATE_unsigned) + }, + ty::TyFloat(float_ty) => { + (float_ty.ty_to_string(), DW_ATE_float) + }, + _ => bug!("debuginfo::basic_type_metadata - t is invalid type") + }; + + let llvm_type = type_of::type_of(cx, t); + let (size, align) = size_and_align_of(cx, llvm_type); + let name = CString::new(name).unwrap(); + let ty_metadata = unsafe { + llvm::LLVMRustDIBuilderCreateBasicType( + DIB(cx), + name.as_ptr(), + bytes_to_bits(size), + bytes_to_bits(align), + encoding) + }; + + return ty_metadata; +} + +fn pointer_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + pointer_type: Ty<'tcx>, + pointee_type_metadata: DIType) + -> DIType { + let pointer_llvm_type = type_of::type_of(cx, pointer_type); + let (pointer_size, pointer_align) = size_and_align_of(cx, pointer_llvm_type); + let name = compute_debuginfo_type_name(cx, pointer_type, false); + let name = CString::new(name).unwrap(); + let ptr_metadata = unsafe { + llvm::LLVMRustDIBuilderCreatePointerType( + DIB(cx), + pointee_type_metadata, + bytes_to_bits(pointer_size), + bytes_to_bits(pointer_align), + name.as_ptr()) + }; + return ptr_metadata; +} + +pub fn compile_unit_metadata(scc: &SharedCrateContext, + debug_context: &CrateDebugContext, + sess: &Session) + -> DIDescriptor { + let work_dir = &sess.working_dir; + let compile_unit_name = match sess.local_crate_source_file { + None => fallback_path(scc), + Some(ref abs_path) => { + if abs_path.is_relative() { + sess.warn("debuginfo: Invalid path to crate's local root source file!"); + fallback_path(scc) + } else { + match abs_path.strip_prefix(work_dir) { + Ok(ref p) if p.is_relative() => { + if p.starts_with(Path::new("./")) { + path2cstr(p) + } else { + path2cstr(&Path::new(".").join(p)) + } + } + _ => fallback_path(scc) + } + } + } + }; + + debug!("compile_unit_metadata: {:?}", compile_unit_name); + let producer = format!("rustc version {}", + (option_env!("CFG_VERSION")).expect("CFG_VERSION")); + + let compile_unit_name = compile_unit_name.as_ptr(); + let work_dir = path2cstr(&work_dir); + let producer = CString::new(producer).unwrap(); + let flags = "\0"; + let split_name = "\0"; + return unsafe { + llvm::LLVMRustDIBuilderCreateCompileUnit( + debug_context.builder, + DW_LANG_RUST, + compile_unit_name, + work_dir.as_ptr(), + producer.as_ptr(), + sess.opts.optimize != config::OptLevel::No, + flags.as_ptr() as *const _, + 0, + split_name.as_ptr() as *const _) + }; + + fn fallback_path(scc: &SharedCrateContext) -> CString { + CString::new(scc.link_meta().crate_name.to_string()).unwrap() + } +} + +struct MetadataCreationResult { + metadata: DIType, + already_stored_in_typemap: bool +} + +impl MetadataCreationResult { + fn new(metadata: DIType, already_stored_in_typemap: bool) -> MetadataCreationResult { + MetadataCreationResult { + metadata: metadata, + already_stored_in_typemap: already_stored_in_typemap + } + } +} + +#[derive(Debug)] +enum MemberOffset { + FixedMemberOffset { bytes: usize }, + // For ComputedMemberOffset, the offset is read from the llvm type definition. + ComputedMemberOffset +} + +// Description of a type member, which can either be a regular field (as in +// structs or tuples) or an enum variant. +#[derive(Debug)] +struct MemberDescription { + name: String, + llvm_type: Type, + type_metadata: DIType, + offset: MemberOffset, + flags: DIFlags, +} + +// A factory for MemberDescriptions. It produces a list of member descriptions +// for some record-like type. MemberDescriptionFactories are used to defer the +// creation of type member descriptions in order to break cycles arising from +// recursive type definitions. +enum MemberDescriptionFactory<'tcx> { + StructMDF(StructMemberDescriptionFactory<'tcx>), + TupleMDF(TupleMemberDescriptionFactory<'tcx>), + EnumMDF(EnumMemberDescriptionFactory<'tcx>), + UnionMDF(UnionMemberDescriptionFactory<'tcx>), + VariantMDF(VariantMemberDescriptionFactory<'tcx>) +} + +impl<'tcx> MemberDescriptionFactory<'tcx> { + fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) + -> Vec { + match *self { + StructMDF(ref this) => { + this.create_member_descriptions(cx) + } + TupleMDF(ref this) => { + this.create_member_descriptions(cx) + } + EnumMDF(ref this) => { + this.create_member_descriptions(cx) + } + UnionMDF(ref this) => { + this.create_member_descriptions(cx) + } + VariantMDF(ref this) => { + this.create_member_descriptions(cx) + } + } + } +} + +//=----------------------------------------------------------------------------- +// Structs +//=----------------------------------------------------------------------------- + +// Creates MemberDescriptions for the fields of a struct +struct StructMemberDescriptionFactory<'tcx> { + variant: &'tcx ty::VariantDef, + substs: &'tcx Substs<'tcx>, + is_simd: bool, + span: Span, +} + +impl<'tcx> StructMemberDescriptionFactory<'tcx> { + fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) + -> Vec { + let field_size = if self.is_simd { + let fty = monomorphize::field_ty(cx.tcx(), + self.substs, + &self.variant.fields[0]); + Some(machine::llsize_of_alloc( + cx, + type_of::type_of(cx, fty) + ) as usize) + } else { + None + }; + + self.variant.fields.iter().enumerate().map(|(i, f)| { + let name = if self.variant.ctor_kind == CtorKind::Fn { + format!("__{}", i) + } else { + f.name.to_string() + }; + let fty = monomorphize::field_ty(cx.tcx(), self.substs, f); + + let offset = if self.is_simd { + FixedMemberOffset { bytes: i * field_size.unwrap() } + } else { + ComputedMemberOffset + }; + + MemberDescription { + name: name, + llvm_type: type_of::type_of(cx, fty), + type_metadata: type_metadata(cx, fty, self.span), + offset: offset, + flags: DIFlags::FlagZero, + } + }).collect() + } +} + + +fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + struct_type: Ty<'tcx>, + unique_type_id: UniqueTypeId, + span: Span) + -> RecursiveTypeDescription<'tcx> { + let struct_name = compute_debuginfo_type_name(cx, struct_type, false); + let struct_llvm_type = type_of::in_memory_type_of(cx, struct_type); + + let (struct_def_id, variant, substs) = match struct_type.sty { + ty::TyAdt(def, substs) => (def.did, def.struct_variant(), substs), + _ => bug!("prepare_struct_metadata on a non-ADT") + }; + + let (containing_scope, _) = get_namespace_and_span_for_item(cx, struct_def_id); + + let struct_metadata_stub = create_struct_stub(cx, + struct_llvm_type, + &struct_name, + unique_type_id, + containing_scope); + + create_and_register_recursive_type_forward_declaration( + cx, + struct_type, + unique_type_id, + struct_metadata_stub, + struct_llvm_type, + StructMDF(StructMemberDescriptionFactory { + variant: variant, + substs: substs, + is_simd: struct_type.is_simd(), + span: span, + }) + ) +} + +//=----------------------------------------------------------------------------- +// Tuples +//=----------------------------------------------------------------------------- + +// Creates MemberDescriptions for the fields of a tuple +struct TupleMemberDescriptionFactory<'tcx> { + component_types: Vec>, + span: Span, +} + +impl<'tcx> TupleMemberDescriptionFactory<'tcx> { + fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) + -> Vec { + self.component_types + .iter() + .enumerate() + .map(|(i, &component_type)| { + MemberDescription { + name: format!("__{}", i), + llvm_type: type_of::type_of(cx, component_type), + type_metadata: type_metadata(cx, component_type, self.span), + offset: ComputedMemberOffset, + flags: DIFlags::FlagZero, + } + }).collect() + } +} + +fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + tuple_type: Ty<'tcx>, + component_types: &[Ty<'tcx>], + unique_type_id: UniqueTypeId, + span: Span) + -> RecursiveTypeDescription<'tcx> { + let tuple_name = compute_debuginfo_type_name(cx, tuple_type, false); + let tuple_llvm_type = type_of::type_of(cx, tuple_type); + + create_and_register_recursive_type_forward_declaration( + cx, + tuple_type, + unique_type_id, + create_struct_stub(cx, + tuple_llvm_type, + &tuple_name[..], + unique_type_id, + NO_SCOPE_METADATA), + tuple_llvm_type, + TupleMDF(TupleMemberDescriptionFactory { + component_types: component_types.to_vec(), + span: span, + }) + ) +} + +//=----------------------------------------------------------------------------- +// Unions +//=----------------------------------------------------------------------------- + +struct UnionMemberDescriptionFactory<'tcx> { + variant: &'tcx ty::VariantDef, + substs: &'tcx Substs<'tcx>, + span: Span, +} + +impl<'tcx> UnionMemberDescriptionFactory<'tcx> { + fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) + -> Vec { + self.variant.fields.iter().map(|field| { + let fty = monomorphize::field_ty(cx.tcx(), self.substs, field); + MemberDescription { + name: field.name.to_string(), + llvm_type: type_of::type_of(cx, fty), + type_metadata: type_metadata(cx, fty, self.span), + offset: FixedMemberOffset { bytes: 0 }, + flags: DIFlags::FlagZero, + } + }).collect() + } +} + +fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + union_type: Ty<'tcx>, + unique_type_id: UniqueTypeId, + span: Span) + -> RecursiveTypeDescription<'tcx> { + let union_name = compute_debuginfo_type_name(cx, union_type, false); + let union_llvm_type = type_of::in_memory_type_of(cx, union_type); + + let (union_def_id, variant, substs) = match union_type.sty { + ty::TyAdt(def, substs) => (def.did, def.struct_variant(), substs), + _ => bug!("prepare_union_metadata on a non-ADT") + }; + + let (containing_scope, _) = get_namespace_and_span_for_item(cx, union_def_id); + + let union_metadata_stub = create_union_stub(cx, + union_llvm_type, + &union_name, + unique_type_id, + containing_scope); + + create_and_register_recursive_type_forward_declaration( + cx, + union_type, + unique_type_id, + union_metadata_stub, + union_llvm_type, + UnionMDF(UnionMemberDescriptionFactory { + variant: variant, + substs: substs, + span: span, + }) + ) +} + +//=----------------------------------------------------------------------------- +// Enums +//=----------------------------------------------------------------------------- + +// Describes the members of an enum value: An enum is described as a union of +// structs in DWARF. This MemberDescriptionFactory provides the description for +// the members of this union; so for every variant of the given enum, this +// factory will produce one MemberDescription (all with no name and a fixed +// offset of zero bytes). +struct EnumMemberDescriptionFactory<'tcx> { + enum_type: Ty<'tcx>, + type_rep: &'tcx layout::Layout, + discriminant_type_metadata: Option, + containing_scope: DIScope, + file_metadata: DIFile, + span: Span, +} + +impl<'tcx> EnumMemberDescriptionFactory<'tcx> { + fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) + -> Vec { + let adt = &self.enum_type.ty_adt_def().unwrap(); + let substs = match self.enum_type.sty { + ty::TyAdt(def, ref s) if def.adt_kind() == AdtKind::Enum => s, + _ => bug!("{} is not an enum", self.enum_type) + }; + match *self.type_rep { + layout::General { ref variants, .. } => { + let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata + .expect("")); + variants + .iter() + .enumerate() + .map(|(i, struct_def)| { + let (variant_type_metadata, + variant_llvm_type, + member_desc_factory) = + describe_enum_variant(cx, + self.enum_type, + struct_def, + &adt.variants[i], + discriminant_info, + self.containing_scope, + self.span); + + let member_descriptions = member_desc_factory + .create_member_descriptions(cx); + + set_members_of_composite_type(cx, + variant_type_metadata, + variant_llvm_type, + &member_descriptions); + MemberDescription { + name: "".to_string(), + llvm_type: variant_llvm_type, + type_metadata: variant_type_metadata, + offset: FixedMemberOffset { bytes: 0 }, + flags: DIFlags::FlagZero + } + }).collect() + }, + layout::Univariant{ ref variant, .. } => { + assert!(adt.variants.len() <= 1); + + if adt.variants.is_empty() { + vec![] + } else { + let (variant_type_metadata, + variant_llvm_type, + member_description_factory) = + describe_enum_variant(cx, + self.enum_type, + variant, + &adt.variants[0], + NoDiscriminant, + self.containing_scope, + self.span); + + let member_descriptions = + member_description_factory.create_member_descriptions(cx); + + set_members_of_composite_type(cx, + variant_type_metadata, + variant_llvm_type, + &member_descriptions[..]); + vec![ + MemberDescription { + name: "".to_string(), + llvm_type: variant_llvm_type, + type_metadata: variant_type_metadata, + offset: FixedMemberOffset { bytes: 0 }, + flags: DIFlags::FlagZero + } + ] + } + } + layout::RawNullablePointer { nndiscr: non_null_variant_index, .. } => { + // As far as debuginfo is concerned, the pointer this enum + // represents is still wrapped in a struct. This is to make the + // DWARF representation of enums uniform. + + // First create a description of the artificial wrapper struct: + let non_null_variant = &adt.variants[non_null_variant_index as usize]; + let non_null_variant_name = non_null_variant.name.as_str(); + + // The llvm type and metadata of the pointer + let nnty = monomorphize::field_ty(cx.tcx(), &substs, &non_null_variant.fields[0] ); + let non_null_llvm_type = type_of::type_of(cx, nnty); + let non_null_type_metadata = type_metadata(cx, nnty, self.span); + + // The type of the artificial struct wrapping the pointer + let artificial_struct_llvm_type = Type::struct_(cx, + &[non_null_llvm_type], + false); + + // For the metadata of the wrapper struct, we need to create a + // MemberDescription of the struct's single field. + let sole_struct_member_description = MemberDescription { + name: match non_null_variant.ctor_kind { + CtorKind::Fn => "__0".to_string(), + CtorKind::Fictive => { + non_null_variant.fields[0].name.to_string() + } + CtorKind::Const => bug!() + }, + llvm_type: non_null_llvm_type, + type_metadata: non_null_type_metadata, + offset: FixedMemberOffset { bytes: 0 }, + flags: DIFlags::FlagZero + }; + + let unique_type_id = debug_context(cx).type_map + .borrow_mut() + .get_unique_type_id_of_enum_variant( + cx, + self.enum_type, + &non_null_variant_name); + + // Now we can create the metadata of the artificial struct + let artificial_struct_metadata = + composite_type_metadata(cx, + artificial_struct_llvm_type, + &non_null_variant_name, + unique_type_id, + &[sole_struct_member_description], + self.containing_scope, + self.file_metadata, + syntax_pos::DUMMY_SP); + + // Encode the information about the null variant in the union + // member's name. + let null_variant_index = (1 - non_null_variant_index) as usize; + let null_variant_name = adt.variants[null_variant_index].name; + let union_member_name = format!("RUST$ENCODED$ENUM${}${}", + 0, + null_variant_name); + + // Finally create the (singleton) list of descriptions of union + // members. + vec![ + MemberDescription { + name: union_member_name, + llvm_type: artificial_struct_llvm_type, + type_metadata: artificial_struct_metadata, + offset: FixedMemberOffset { bytes: 0 }, + flags: DIFlags::FlagZero + } + ] + }, + layout::StructWrappedNullablePointer { nonnull: ref struct_def, + nndiscr, + ref discrfield, ..} => { + // Create a description of the non-null variant + let (variant_type_metadata, variant_llvm_type, member_description_factory) = + describe_enum_variant(cx, + self.enum_type, + struct_def, + &adt.variants[nndiscr as usize], + OptimizedDiscriminant, + self.containing_scope, + self.span); + + let variant_member_descriptions = + member_description_factory.create_member_descriptions(cx); + + set_members_of_composite_type(cx, + variant_type_metadata, + variant_llvm_type, + &variant_member_descriptions[..]); + + // Encode the information about the null variant in the union + // member's name. + let null_variant_index = (1 - nndiscr) as usize; + let null_variant_name = adt.variants[null_variant_index].name; + let discrfield = discrfield.iter() + .skip(1) + .map(|x| x.to_string()) + .collect::>().join("$"); + let union_member_name = format!("RUST$ENCODED$ENUM${}${}", + discrfield, + null_variant_name); + + // Create the (singleton) list of descriptions of union members. + vec![ + MemberDescription { + name: union_member_name, + llvm_type: variant_llvm_type, + type_metadata: variant_type_metadata, + offset: FixedMemberOffset { bytes: 0 }, + flags: DIFlags::FlagZero + } + ] + }, + layout::CEnum { .. } => span_bug!(self.span, "This should be unreachable."), + ref l @ _ => bug!("Not an enum layout: {:#?}", l) + } + } +} + +// Creates MemberDescriptions for the fields of a single enum variant. +struct VariantMemberDescriptionFactory<'tcx> { + args: Vec<(String, Ty<'tcx>)>, + discriminant_type_metadata: Option, + span: Span, +} + +impl<'tcx> VariantMemberDescriptionFactory<'tcx> { + fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) + -> Vec { + self.args.iter().enumerate().map(|(i, &(ref name, ty))| { + MemberDescription { + name: name.to_string(), + llvm_type: type_of::type_of(cx, ty), + type_metadata: match self.discriminant_type_metadata { + Some(metadata) if i == 0 => metadata, + _ => type_metadata(cx, ty, self.span) + }, + offset: ComputedMemberOffset, + flags: DIFlags::FlagZero + } + }).collect() + } +} + +#[derive(Copy, Clone)] +enum EnumDiscriminantInfo { + RegularDiscriminant(DIType), + OptimizedDiscriminant, + NoDiscriminant +} + +// Returns a tuple of (1) type_metadata_stub of the variant, (2) the llvm_type +// of the variant, and (3) a MemberDescriptionFactory for producing the +// descriptions of the fields of the variant. This is a rudimentary version of a +// full RecursiveTypeDescription. +fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + enum_type: Ty<'tcx>, + struct_def: &layout::Struct, + variant: &'tcx ty::VariantDef, + discriminant_info: EnumDiscriminantInfo, + containing_scope: DIScope, + span: Span) + -> (DICompositeType, Type, MemberDescriptionFactory<'tcx>) { + let substs = match enum_type.sty { + ty::TyAdt(def, s) if def.adt_kind() == AdtKind::Enum => s, + ref t @ _ => bug!("{:#?} is not an enum", t) + }; + + let maybe_discr_and_signed: Option<(layout::Integer, bool)> = match *cx.layout_of(enum_type) { + layout::CEnum {discr, ..} => Some((discr, true)), + layout::General{discr, ..} => Some((discr, false)), + layout::Univariant { .. } + | layout::RawNullablePointer { .. } + | layout::StructWrappedNullablePointer { .. } => None, + ref l @ _ => bug!("This should be unreachable. Type is {:#?} layout is {:#?}", enum_type, l) + }; + + let mut field_tys = variant.fields.iter().map(|f| { + monomorphize::field_ty(cx.tcx(), &substs, f) + }).collect::>(); + + if let Some((discr, signed)) = maybe_discr_and_signed { + field_tys.insert(0, discr.to_ty(&cx.tcx(), signed)); + } + + + let variant_llvm_type = + Type::struct_(cx, &field_tys + .iter() + .map(|t| type_of::type_of(cx, t)) + .collect::>() + , + struct_def.packed); + // Could do some consistency checks here: size, align, field count, discr type + + let variant_name = variant.name.as_str(); + let unique_type_id = debug_context(cx).type_map + .borrow_mut() + .get_unique_type_id_of_enum_variant( + cx, + enum_type, + &variant_name); + + let metadata_stub = create_struct_stub(cx, + variant_llvm_type, + &variant_name, + unique_type_id, + containing_scope); + + // Get the argument names from the enum variant info + let mut arg_names: Vec<_> = match variant.ctor_kind { + CtorKind::Const => vec![], + CtorKind::Fn => { + variant.fields + .iter() + .enumerate() + .map(|(i, _)| format!("__{}", i)) + .collect() + } + CtorKind::Fictive => { + variant.fields + .iter() + .map(|f| f.name.to_string()) + .collect() + } + }; + + // If this is not a univariant enum, there is also the discriminant field. + match discriminant_info { + RegularDiscriminant(_) => arg_names.insert(0, "RUST$ENUM$DISR".to_string()), + _ => { /* do nothing */ } + }; + + // Build an array of (field name, field type) pairs to be captured in the factory closure. + let args: Vec<(String, Ty)> = arg_names.iter() + .zip(field_tys.iter()) + .map(|(s, &t)| (s.to_string(), t)) + .collect(); + + let member_description_factory = + VariantMDF(VariantMemberDescriptionFactory { + args: args, + discriminant_type_metadata: match discriminant_info { + RegularDiscriminant(discriminant_type_metadata) => { + Some(discriminant_type_metadata) + } + _ => None + }, + span: span, + }); + + (metadata_stub, variant_llvm_type, member_description_factory) +} + +fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + enum_type: Ty<'tcx>, + enum_def_id: DefId, + unique_type_id: UniqueTypeId, + span: Span) + -> RecursiveTypeDescription<'tcx> { + let enum_name = compute_debuginfo_type_name(cx, enum_type, false); + + let (containing_scope, _) = get_namespace_and_span_for_item(cx, enum_def_id); + // FIXME: This should emit actual file metadata for the enum, but we + // currently can't get the necessary information when it comes to types + // imported from other crates. Formerly we violated the ODR when performing + // LTO because we emitted debuginfo for the same type with varying file + // metadata, so as a workaround we pretend that the type comes from + // + let file_metadata = unknown_file_metadata(cx); + + let variants = &enum_type.ty_adt_def().unwrap().variants; + let enumerators_metadata: Vec = variants + .iter() + .map(|v| { + let token = v.name.as_str(); + let name = CString::new(token.as_bytes()).unwrap(); + unsafe { + llvm::LLVMRustDIBuilderCreateEnumerator( + DIB(cx), + name.as_ptr(), + v.disr_val.to_u64_unchecked()) + } + }) + .collect(); + + let discriminant_type_metadata = |inttype: layout::Integer, signed: bool| { + let disr_type_key = (enum_def_id, inttype); + let cached_discriminant_type_metadata = debug_context(cx).created_enum_disr_types + .borrow() + .get(&disr_type_key).cloned(); + match cached_discriminant_type_metadata { + Some(discriminant_type_metadata) => discriminant_type_metadata, + None => { + let discriminant_llvm_type = Type::from_integer(cx, inttype); + let (discriminant_size, discriminant_align) = + size_and_align_of(cx, discriminant_llvm_type); + let discriminant_base_type_metadata = + type_metadata(cx, + inttype.to_ty(&cx.tcx(), signed), + syntax_pos::DUMMY_SP); + let discriminant_name = get_enum_discriminant_name(cx, enum_def_id); + + let name = CString::new(discriminant_name.as_bytes()).unwrap(); + let discriminant_type_metadata = unsafe { + llvm::LLVMRustDIBuilderCreateEnumerationType( + DIB(cx), + containing_scope, + name.as_ptr(), + file_metadata, + UNKNOWN_LINE_NUMBER, + bytes_to_bits(discriminant_size), + bytes_to_bits(discriminant_align), + create_DIArray(DIB(cx), &enumerators_metadata), + discriminant_base_type_metadata) + }; + + debug_context(cx).created_enum_disr_types + .borrow_mut() + .insert(disr_type_key, discriminant_type_metadata); + + discriminant_type_metadata + } + } + }; + + let type_rep = cx.layout_of(enum_type); + + let discriminant_type_metadata = match *type_rep { + layout::CEnum { discr, signed, .. } => { + return FinalMetadata(discriminant_type_metadata(discr, signed)) + }, + layout::RawNullablePointer { .. } | + layout::StructWrappedNullablePointer { .. } | + layout::Univariant { .. } => None, + layout::General { discr, .. } => Some(discriminant_type_metadata(discr, false)), + ref l @ _ => bug!("Not an enum layout: {:#?}", l) + }; + + let enum_llvm_type = type_of::type_of(cx, enum_type); + let (enum_type_size, enum_type_align) = size_and_align_of(cx, enum_llvm_type); + + let enum_name = CString::new(enum_name).unwrap(); + let unique_type_id_str = CString::new( + debug_context(cx).type_map.borrow().get_unique_type_id_as_string(unique_type_id).as_bytes() + ).unwrap(); + let enum_metadata = unsafe { + llvm::LLVMRustDIBuilderCreateUnionType( + DIB(cx), + containing_scope, + enum_name.as_ptr(), + file_metadata, + UNKNOWN_LINE_NUMBER, + bytes_to_bits(enum_type_size), + bytes_to_bits(enum_type_align), + DIFlags::FlagZero, + ptr::null_mut(), + 0, // RuntimeLang + unique_type_id_str.as_ptr()) + }; + + return create_and_register_recursive_type_forward_declaration( + cx, + enum_type, + unique_type_id, + enum_metadata, + enum_llvm_type, + EnumMDF(EnumMemberDescriptionFactory { + enum_type: enum_type, + type_rep: type_rep, + discriminant_type_metadata: discriminant_type_metadata, + containing_scope: containing_scope, + file_metadata: file_metadata, + span: span, + }), + ); + + fn get_enum_discriminant_name(cx: &CrateContext, + def_id: DefId) + -> InternedString { + cx.tcx().item_name(def_id).as_str() + } +} + +/// Creates debug information for a composite type, that is, anything that +/// results in a LLVM struct. +/// +/// Examples of Rust types to use this are: structs, tuples, boxes, vecs, and enums. +fn composite_type_metadata(cx: &CrateContext, + composite_llvm_type: Type, + composite_type_name: &str, + composite_type_unique_id: UniqueTypeId, + member_descriptions: &[MemberDescription], + containing_scope: DIScope, + + // Ignore source location information as long as it + // can't be reconstructed for non-local crates. + _file_metadata: DIFile, + _definition_span: Span) + -> DICompositeType { + // Create the (empty) struct metadata node ... + let composite_type_metadata = create_struct_stub(cx, + composite_llvm_type, + composite_type_name, + composite_type_unique_id, + containing_scope); + // ... and immediately create and add the member descriptions. + set_members_of_composite_type(cx, + composite_type_metadata, + composite_llvm_type, + member_descriptions); + + return composite_type_metadata; +} + +fn set_members_of_composite_type(cx: &CrateContext, + composite_type_metadata: DICompositeType, + composite_llvm_type: Type, + member_descriptions: &[MemberDescription]) { + // In some rare cases LLVM metadata uniquing would lead to an existing type + // description being used instead of a new one created in + // create_struct_stub. This would cause a hard to trace assertion in + // DICompositeType::SetTypeArray(). The following check makes sure that we + // get a better error message if this should happen again due to some + // regression. + { + let mut composite_types_completed = + debug_context(cx).composite_types_completed.borrow_mut(); + if composite_types_completed.contains(&composite_type_metadata) { + bug!("debuginfo::set_members_of_composite_type() - \ + Already completed forward declaration re-encountered."); + } else { + composite_types_completed.insert(composite_type_metadata); + } + } + + let member_metadata: Vec = member_descriptions + .iter() + .enumerate() + .map(|(i, member_description)| { + let (member_size, member_align) = size_and_align_of(cx, member_description.llvm_type); + let member_offset = match member_description.offset { + FixedMemberOffset { bytes } => bytes as u64, + ComputedMemberOffset => machine::llelement_offset(cx, composite_llvm_type, i) + }; + + let member_name = member_description.name.as_bytes(); + let member_name = CString::new(member_name).unwrap(); + unsafe { + llvm::LLVMRustDIBuilderCreateMemberType( + DIB(cx), + composite_type_metadata, + member_name.as_ptr(), + unknown_file_metadata(cx), + UNKNOWN_LINE_NUMBER, + bytes_to_bits(member_size), + bytes_to_bits(member_align), + bytes_to_bits(member_offset), + member_description.flags, + member_description.type_metadata) + } + }) + .collect(); + + unsafe { + let type_array = create_DIArray(DIB(cx), &member_metadata[..]); + llvm::LLVMRustDICompositeTypeSetTypeArray( + DIB(cx), composite_type_metadata, type_array); + } +} + +// A convenience wrapper around LLVMRustDIBuilderCreateStructType(). Does not do +// any caching, does not add any fields to the struct. This can be done later +// with set_members_of_composite_type(). +fn create_struct_stub(cx: &CrateContext, + struct_llvm_type: Type, + struct_type_name: &str, + unique_type_id: UniqueTypeId, + containing_scope: DIScope) + -> DICompositeType { + let (struct_size, struct_align) = size_and_align_of(cx, struct_llvm_type); + + let name = CString::new(struct_type_name).unwrap(); + let unique_type_id = CString::new( + debug_context(cx).type_map.borrow().get_unique_type_id_as_string(unique_type_id).as_bytes() + ).unwrap(); + let metadata_stub = unsafe { + // LLVMRustDIBuilderCreateStructType() wants an empty array. A null + // pointer will lead to hard to trace and debug LLVM assertions + // later on in llvm/lib/IR/Value.cpp. + let empty_array = create_DIArray(DIB(cx), &[]); + + llvm::LLVMRustDIBuilderCreateStructType( + DIB(cx), + containing_scope, + name.as_ptr(), + unknown_file_metadata(cx), + UNKNOWN_LINE_NUMBER, + bytes_to_bits(struct_size), + bytes_to_bits(struct_align), + DIFlags::FlagZero, + ptr::null_mut(), + empty_array, + 0, + ptr::null_mut(), + unique_type_id.as_ptr()) + }; + + return metadata_stub; +} + +fn create_union_stub(cx: &CrateContext, + union_llvm_type: Type, + union_type_name: &str, + unique_type_id: UniqueTypeId, + containing_scope: DIScope) + -> DICompositeType { + let (union_size, union_align) = size_and_align_of(cx, union_llvm_type); + + let name = CString::new(union_type_name).unwrap(); + let unique_type_id = CString::new( + debug_context(cx).type_map.borrow().get_unique_type_id_as_string(unique_type_id).as_bytes() + ).unwrap(); + let metadata_stub = unsafe { + // LLVMRustDIBuilderCreateUnionType() wants an empty array. A null + // pointer will lead to hard to trace and debug LLVM assertions + // later on in llvm/lib/IR/Value.cpp. + let empty_array = create_DIArray(DIB(cx), &[]); + + llvm::LLVMRustDIBuilderCreateUnionType( + DIB(cx), + containing_scope, + name.as_ptr(), + unknown_file_metadata(cx), + UNKNOWN_LINE_NUMBER, + bytes_to_bits(union_size), + bytes_to_bits(union_align), + DIFlags::FlagZero, + empty_array, + 0, // RuntimeLang + unique_type_id.as_ptr()) + }; + + return metadata_stub; +} + +/// Creates debug information for the given global variable. +/// +/// Adds the created metadata nodes directly to the crate's IR. +pub fn create_global_var_metadata(cx: &CrateContext, + node_id: ast::NodeId, + global: ValueRef) { + if cx.dbg_cx().is_none() { + return; + } + + let tcx = cx.tcx(); + + // Don't create debuginfo for globals inlined from other crates. The other + // crate should already contain debuginfo for it. More importantly, the + // global might not even exist in un-inlined form anywhere which would lead + // to a linker errors. + if tcx.map.is_inlined_node_id(node_id) { + return; + } + + let node_def_id = tcx.map.local_def_id(node_id); + let (var_scope, span) = get_namespace_and_span_for_item(cx, node_def_id); + + let (file_metadata, line_number) = if span != syntax_pos::DUMMY_SP { + let loc = span_start(cx, span); + (file_metadata(cx, &loc.file.name, &loc.file.abs_path), loc.line as c_uint) + } else { + (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER) + }; + + let is_local_to_unit = is_node_local_to_unit(cx, node_id); + let variable_type = tcx.erase_regions(&tcx.item_type(node_def_id)); + let type_metadata = type_metadata(cx, variable_type, span); + let var_name = tcx.item_name(node_def_id).to_string(); + let linkage_name = mangled_name_of_item(cx, node_def_id, ""); + + let var_name = CString::new(var_name).unwrap(); + let linkage_name = CString::new(linkage_name).unwrap(); + unsafe { + llvm::LLVMRustDIBuilderCreateStaticVariable(DIB(cx), + var_scope, + var_name.as_ptr(), + linkage_name.as_ptr(), + file_metadata, + line_number, + type_metadata, + is_local_to_unit, + global, + ptr::null_mut()); + } +} + +// Creates an "extension" of an existing DIScope into another file. +pub fn extend_scope_to_file(ccx: &CrateContext, + scope_metadata: DIScope, + file: &syntax_pos::FileMap) + -> DILexicalBlock { + let file_metadata = file_metadata(ccx, &file.name, &file.abs_path); + unsafe { + llvm::LLVMRustDIBuilderCreateLexicalBlockFile( + DIB(ccx), + scope_metadata, + file_metadata) + } +} diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs new file mode 100644 index 0000000000000..e023e654d51ad --- /dev/null +++ b/src/librustc_trans/debuginfo/mod.rs @@ -0,0 +1,528 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// See doc.rs for documentation. +mod doc; + +use self::VariableAccess::*; +use self::VariableKind::*; + +use self::utils::{DIB, span_start, create_DIArray, is_node_local_to_unit}; +use self::namespace::mangled_name_of_item; +use self::type_names::compute_debuginfo_type_name; +use self::metadata::{type_metadata, file_metadata, TypeMap}; +use self::source_loc::InternalDebugLocation::{self, UnknownLocation}; + +use llvm; +use llvm::{ModuleRef, ContextRef, ValueRef}; +use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilderRef, DISubprogram, DIArray, DIFlags}; +use rustc::hir::def_id::DefId; +use rustc::ty::subst::Substs; + +use abi::Abi; +use common::{CrateContext, FunctionContext, Block, BlockAndBuilder}; +use monomorphize::{self, Instance}; +use rustc::ty::{self, Ty}; +use rustc::mir; +use session::config::{self, FullDebugInfo, LimitedDebugInfo, NoDebugInfo}; +use util::nodemap::{DefIdMap, FxHashMap, FxHashSet}; + +use libc::c_uint; +use std::cell::{Cell, RefCell}; +use std::ffi::CString; +use std::ptr; + +use syntax_pos::{self, Span, Pos}; +use syntax::ast; +use rustc::ty::layout; + +pub mod gdb; +mod utils; +mod namespace; +mod type_names; +pub mod metadata; +mod create_scope_map; +mod source_loc; + +pub use self::create_scope_map::{create_mir_scopes, MirDebugScope}; +pub use self::source_loc::start_emitting_source_locations; +pub use self::metadata::create_global_var_metadata; +pub use self::metadata::extend_scope_to_file; + +#[allow(non_upper_case_globals)] +const DW_TAG_auto_variable: c_uint = 0x100; +#[allow(non_upper_case_globals)] +const DW_TAG_arg_variable: c_uint = 0x101; + +/// A context object for maintaining all state needed by the debuginfo module. +pub struct CrateDebugContext<'tcx> { + llcontext: ContextRef, + builder: DIBuilderRef, + current_debug_location: Cell, + created_files: RefCell>, + created_enum_disr_types: RefCell>, + + type_map: RefCell>, + namespace_map: RefCell>, + + // This collection is used to assert that composite types (structs, enums, + // ...) have their members only set once: + composite_types_completed: RefCell>, +} + +impl<'tcx> CrateDebugContext<'tcx> { + pub fn new(llmod: ModuleRef) -> CrateDebugContext<'tcx> { + debug!("CrateDebugContext::new"); + let builder = unsafe { llvm::LLVMRustDIBuilderCreate(llmod) }; + // DIBuilder inherits context from the module, so we'd better use the same one + let llcontext = unsafe { llvm::LLVMGetModuleContext(llmod) }; + return CrateDebugContext { + llcontext: llcontext, + builder: builder, + current_debug_location: Cell::new(InternalDebugLocation::UnknownLocation), + created_files: RefCell::new(FxHashMap()), + created_enum_disr_types: RefCell::new(FxHashMap()), + type_map: RefCell::new(TypeMap::new()), + namespace_map: RefCell::new(DefIdMap()), + composite_types_completed: RefCell::new(FxHashSet()), + }; + } +} + +pub enum FunctionDebugContext { + RegularContext(Box), + DebugInfoDisabled, + FunctionWithoutDebugInfo, +} + +impl FunctionDebugContext { + fn get_ref<'a>(&'a self, + span: Span) + -> &'a FunctionDebugContextData { + match *self { + FunctionDebugContext::RegularContext(box ref data) => data, + FunctionDebugContext::DebugInfoDisabled => { + span_bug!(span, + "{}", + FunctionDebugContext::debuginfo_disabled_message()); + } + FunctionDebugContext::FunctionWithoutDebugInfo => { + span_bug!(span, + "{}", + FunctionDebugContext::should_be_ignored_message()); + } + } + } + + fn debuginfo_disabled_message() -> &'static str { + "debuginfo: Error trying to access FunctionDebugContext although debug info is disabled!" + } + + fn should_be_ignored_message() -> &'static str { + "debuginfo: Error trying to access FunctionDebugContext for function that should be \ + ignored by debug info!" + } +} + +pub struct FunctionDebugContextData { + fn_metadata: DISubprogram, + source_locations_enabled: Cell, + source_location_override: Cell, +} + +pub enum VariableAccess<'a> { + // The llptr given is an alloca containing the variable's value + DirectVariable { alloca: ValueRef }, + // The llptr given is an alloca containing the start of some pointer chain + // leading to the variable's content. + IndirectVariable { alloca: ValueRef, address_operations: &'a [i64] } +} + +pub enum VariableKind { + ArgumentVariable(usize /*index*/), + LocalVariable, + CapturedVariable, +} + +/// Create any deferred debug metadata nodes +pub fn finalize(cx: &CrateContext) { + if cx.dbg_cx().is_none() { + return; + } + + debug!("finalize"); + + if gdb::needs_gdb_debug_scripts_section(cx) { + // Add a .debug_gdb_scripts section to this compile-unit. This will + // cause GDB to try and load the gdb_load_rust_pretty_printers.py file, + // which activates the Rust pretty printers for binary this section is + // contained in. + gdb::get_or_insert_gdb_debug_scripts_section_global(cx); + } + + unsafe { + llvm::LLVMRustDIBuilderFinalize(DIB(cx)); + llvm::LLVMRustDIBuilderDispose(DIB(cx)); + // Debuginfo generation in LLVM by default uses a higher + // version of dwarf than OS X currently understands. We can + // instruct LLVM to emit an older version of dwarf, however, + // for OS X to understand. For more info see #11352 + // This can be overridden using --llvm-opts -dwarf-version,N. + // Android has the same issue (#22398) + if cx.sess().target.target.options.is_like_osx || + cx.sess().target.target.options.is_like_android { + llvm::LLVMRustAddModuleFlag(cx.llmod(), + "Dwarf Version\0".as_ptr() as *const _, + 2) + } + + // Indicate that we want CodeView debug information on MSVC + if cx.sess().target.target.options.is_like_msvc { + llvm::LLVMRustAddModuleFlag(cx.llmod(), + "CodeView\0".as_ptr() as *const _, + 1) + } + + // Prevent bitcode readers from deleting the debug info. + let ptr = "Debug Info Version\0".as_ptr(); + llvm::LLVMRustAddModuleFlag(cx.llmod(), ptr as *const _, + llvm::LLVMRustDebugMetadataVersion()); + }; +} + +/// Creates a function-specific debug context for a function w/o debuginfo. +pub fn empty_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>) + -> FunctionDebugContext { + if cx.sess().opts.debuginfo == NoDebugInfo { + return FunctionDebugContext::DebugInfoDisabled; + } + + // Clear the debug location so we don't assign them in the function prelude. + source_loc::set_debug_location(cx, None, UnknownLocation); + FunctionDebugContext::FunctionWithoutDebugInfo +} + +/// Creates the function-specific debug context. +/// +/// Returns the FunctionDebugContext for the function which holds state needed +/// for debug info creation. The function may also return another variant of the +/// FunctionDebugContext enum which indicates why no debuginfo should be created +/// for the function. +pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + instance: Instance<'tcx>, + sig: &ty::FnSig<'tcx>, + abi: Abi, + llfn: ValueRef, + mir: &mir::Mir) -> FunctionDebugContext { + if cx.sess().opts.debuginfo == NoDebugInfo { + return FunctionDebugContext::DebugInfoDisabled; + } + + // Clear the debug location so we don't assign them in the function prelude. + // Do this here already, in case we do an early exit from this function. + source_loc::set_debug_location(cx, None, UnknownLocation); + + let containing_scope = get_containing_scope(cx, instance); + let span = mir.span; + + // This can be the case for functions inlined from another crate + if span == syntax_pos::DUMMY_SP { + return FunctionDebugContext::FunctionWithoutDebugInfo; + } + + let loc = span_start(cx, span); + let file_metadata = file_metadata(cx, &loc.file.name, &loc.file.abs_path); + + let function_type_metadata = unsafe { + let fn_signature = get_function_signature(cx, sig, abi); + llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(cx), file_metadata, fn_signature) + }; + + // Find the enclosing function, in case this is a closure. + let def_key = cx.tcx().def_key(instance.def); + let mut name = def_key.disambiguated_data.data.to_string(); + let name_len = name.len(); + + let fn_def_id = cx.tcx().closure_base_def_id(instance.def); + + // Get_template_parameters() will append a `<...>` clause to the function + // name if necessary. + let generics = cx.tcx().item_generics(fn_def_id); + let substs = instance.substs.truncate_to(cx.tcx(), generics); + let template_parameters = get_template_parameters(cx, + &generics, + substs, + file_metadata, + &mut name); + + // Build the linkage_name out of the item path and "template" parameters. + let linkage_name = mangled_name_of_item(cx, instance.def, &name[name_len..]); + + let scope_line = span_start(cx, span).line; + + let local_id = cx.tcx().map.as_local_node_id(instance.def); + let is_local_to_unit = local_id.map_or(false, |id| is_node_local_to_unit(cx, id)); + + let function_name = CString::new(name).unwrap(); + let linkage_name = CString::new(linkage_name).unwrap(); + + let fn_metadata = unsafe { + llvm::LLVMRustDIBuilderCreateFunction( + DIB(cx), + containing_scope, + function_name.as_ptr(), + linkage_name.as_ptr(), + file_metadata, + loc.line as c_uint, + function_type_metadata, + is_local_to_unit, + true, + scope_line as c_uint, + DIFlags::FlagPrototyped, + cx.sess().opts.optimize != config::OptLevel::No, + llfn, + template_parameters, + ptr::null_mut()) + }; + + // Initialize fn debug context (including scope map and namespace map) + let fn_debug_context = box FunctionDebugContextData { + fn_metadata: fn_metadata, + source_locations_enabled: Cell::new(false), + source_location_override: Cell::new(false), + }; + + return FunctionDebugContext::RegularContext(fn_debug_context); + + fn get_function_signature<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + sig: &ty::FnSig<'tcx>, + abi: Abi) -> DIArray { + if cx.sess().opts.debuginfo == LimitedDebugInfo { + return create_DIArray(DIB(cx), &[]); + } + + let mut signature = Vec::with_capacity(sig.inputs.len() + 1); + + // Return type -- llvm::DIBuilder wants this at index 0 + signature.push(match sig.output.sty { + ty::TyTuple(ref tys) if tys.is_empty() => ptr::null_mut(), + _ => type_metadata(cx, sig.output, syntax_pos::DUMMY_SP) + }); + + let inputs = if abi == Abi::RustCall { + &sig.inputs[..sig.inputs.len()-1] + } else { + &sig.inputs[..] + }; + + // Arguments types + for &argument_type in inputs { + signature.push(type_metadata(cx, argument_type, syntax_pos::DUMMY_SP)); + } + + if abi == Abi::RustCall && !sig.inputs.is_empty() { + if let ty::TyTuple(args) = sig.inputs[sig.inputs.len() - 1].sty { + for &argument_type in args { + signature.push(type_metadata(cx, argument_type, syntax_pos::DUMMY_SP)); + } + } + } + + return create_DIArray(DIB(cx), &signature[..]); + } + + fn get_template_parameters<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + generics: &ty::Generics<'tcx>, + substs: &Substs<'tcx>, + file_metadata: DIFile, + name_to_append_suffix_to: &mut String) + -> DIArray + { + if substs.types().next().is_none() { + return create_DIArray(DIB(cx), &[]); + } + + name_to_append_suffix_to.push('<'); + for (i, actual_type) in substs.types().enumerate() { + if i != 0 { + name_to_append_suffix_to.push_str(","); + } + + let actual_type = cx.tcx().normalize_associated_type(&actual_type); + // Add actual type name to <...> clause of function name + let actual_type_name = compute_debuginfo_type_name(cx, + actual_type, + true); + name_to_append_suffix_to.push_str(&actual_type_name[..]); + } + name_to_append_suffix_to.push('>'); + + // Again, only create type information if full debuginfo is enabled + let template_params: Vec<_> = if cx.sess().opts.debuginfo == FullDebugInfo { + let names = get_type_parameter_names(cx, generics); + substs.types().zip(names).map(|(ty, name)| { + let actual_type = cx.tcx().normalize_associated_type(&ty); + let actual_type_metadata = type_metadata(cx, actual_type, syntax_pos::DUMMY_SP); + let name = CString::new(name.as_str().as_bytes()).unwrap(); + unsafe { + llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( + DIB(cx), + ptr::null_mut(), + name.as_ptr(), + actual_type_metadata, + file_metadata, + 0, + 0) + } + }).collect() + } else { + vec![] + }; + + return create_DIArray(DIB(cx), &template_params[..]); + } + + fn get_type_parameter_names<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + generics: &ty::Generics<'tcx>) + -> Vec { + let mut names = generics.parent.map_or(vec![], |def_id| { + get_type_parameter_names(cx, cx.tcx().item_generics(def_id)) + }); + names.extend(generics.types.iter().map(|param| param.name)); + names + } + + fn get_containing_scope<'ccx, 'tcx>(cx: &CrateContext<'ccx, 'tcx>, + instance: Instance<'tcx>) + -> DIScope { + // First, let's see if this is a method within an inherent impl. Because + // if yes, we want to make the result subroutine DIE a child of the + // subroutine's self-type. + let self_type = cx.tcx().impl_of_method(instance.def).and_then(|impl_def_id| { + // If the method does *not* belong to a trait, proceed + if cx.tcx().trait_id_of_impl(impl_def_id).is_none() { + let impl_self_ty = cx.tcx().item_type(impl_def_id); + let impl_self_ty = cx.tcx().erase_regions(&impl_self_ty); + let impl_self_ty = monomorphize::apply_param_substs(cx.shared(), + instance.substs, + &impl_self_ty); + + // Only "class" methods are generally understood by LLVM, + // so avoid methods on other types (e.g. `<*mut T>::null`). + match impl_self_ty.sty { + ty::TyAdt(..) => { + Some(type_metadata(cx, impl_self_ty, syntax_pos::DUMMY_SP)) + } + _ => None + } + } else { + // For trait method impls we still use the "parallel namespace" + // strategy + None + } + }); + + self_type.unwrap_or_else(|| { + namespace::item_namespace(cx, DefId { + krate: instance.def.krate, + index: cx.tcx() + .def_key(instance.def) + .parent + .expect("get_containing_scope: missing parent?") + }) + }) + } +} + +pub fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + variable_name: ast::Name, + variable_type: Ty<'tcx>, + scope_metadata: DIScope, + variable_access: VariableAccess, + variable_kind: VariableKind, + span: Span) { + let cx: &CrateContext = bcx.ccx(); + + let file = span_start(cx, span).file; + let filename = file.name.clone(); + let file_metadata = file_metadata(cx, &filename[..], &file.abs_path); + + let loc = span_start(cx, span); + let type_metadata = type_metadata(cx, variable_type, span); + + let (argument_index, dwarf_tag) = match variable_kind { + ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable), + LocalVariable | + CapturedVariable => (0, DW_TAG_auto_variable) + }; + + let name = CString::new(variable_name.as_str().as_bytes()).unwrap(); + match (variable_access, &[][..]) { + (DirectVariable { alloca }, address_operations) | + (IndirectVariable {alloca, address_operations}, _) => { + let metadata = unsafe { + llvm::LLVMRustDIBuilderCreateVariable( + DIB(cx), + dwarf_tag, + scope_metadata, + name.as_ptr(), + file_metadata, + loc.line as c_uint, + type_metadata, + cx.sess().opts.optimize != config::OptLevel::No, + DIFlags::FlagZero, + argument_index) + }; + source_loc::set_debug_location(cx, None, + InternalDebugLocation::new(scope_metadata, loc.line, loc.col.to_usize())); + unsafe { + let debug_loc = llvm::LLVMGetCurrentDebugLocation(cx.raw_builder()); + let instr = llvm::LLVMRustDIBuilderInsertDeclareAtEnd( + DIB(cx), + alloca, + metadata, + address_operations.as_ptr(), + address_operations.len() as c_uint, + debug_loc, + bcx.llbb); + + llvm::LLVMSetInstDebugLocation(::build::B(bcx).llbuilder, instr); + } + } + } + + match variable_kind { + ArgumentVariable(_) | CapturedVariable => { + assert!(!bcx.fcx + .debug_context + .get_ref(span) + .source_locations_enabled + .get()); + source_loc::set_debug_location(cx, None, UnknownLocation); + } + _ => { /* nothing to do */ } + } +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub enum DebugLoc { + ScopeAt(DIScope, Span), + None +} + +impl DebugLoc { + pub fn apply(self, fcx: &FunctionContext) { + source_loc::set_source_location(fcx, None, self); + } + + pub fn apply_to_bcx(self, bcx: &BlockAndBuilder) { + source_loc::set_source_location(bcx.fcx(), Some(bcx), self); + } +} diff --git a/src/librustc_trans/debuginfo/namespace.rs b/src/librustc_trans/debuginfo/namespace.rs new file mode 100644 index 0000000000000..521dd7530beea --- /dev/null +++ b/src/librustc_trans/debuginfo/namespace.rs @@ -0,0 +1,91 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Namespace Handling. + +use super::metadata::{file_metadata, unknown_file_metadata, UNKNOWN_LINE_NUMBER}; +use super::utils::{DIB, debug_context, span_start}; + +use llvm; +use llvm::debuginfo::DIScope; +use rustc::hir::def_id::DefId; +use rustc::hir::map::DefPathData; +use common::CrateContext; + +use libc::c_uint; +use std::ffi::CString; +use std::ptr; +use syntax_pos::DUMMY_SP; + +pub fn mangled_name_of_item(ccx: &CrateContext, def_id: DefId, extra: &str) -> String { + fn fill_nested(ccx: &CrateContext, def_id: DefId, extra: &str, output: &mut String) { + let def_key = ccx.tcx().def_key(def_id); + if let Some(parent) = def_key.parent { + fill_nested(ccx, DefId { + krate: def_id.krate, + index: parent + }, "", output); + } + + let name = match def_key.disambiguated_data.data { + DefPathData::CrateRoot => ccx.tcx().crate_name(def_id.krate).as_str(), + data => data.as_interned_str() + }; + + output.push_str(&(name.len() + extra.len()).to_string()); + output.push_str(&name); + output.push_str(extra); + } + + let mut name = String::from("_ZN"); + fill_nested(ccx, def_id, extra, &mut name); + name.push('E'); + name +} + +pub fn item_namespace(ccx: &CrateContext, def_id: DefId) -> DIScope { + if let Some(&scope) = debug_context(ccx).namespace_map.borrow().get(&def_id) { + return scope; + } + + let def_key = ccx.tcx().def_key(def_id); + let parent_scope = def_key.parent.map_or(ptr::null_mut(), |parent| { + item_namespace(ccx, DefId { + krate: def_id.krate, + index: parent + }) + }); + + let namespace_name = match def_key.disambiguated_data.data { + DefPathData::CrateRoot => ccx.tcx().crate_name(def_id.krate).as_str(), + data => data.as_interned_str() + }; + + let namespace_name = CString::new(namespace_name.as_bytes()).unwrap(); + let span = ccx.tcx().def_span(def_id); + let (file, line) = if span != DUMMY_SP { + let loc = span_start(ccx, span); + (file_metadata(ccx, &loc.file.name, &loc.file.abs_path), loc.line as c_uint) + } else { + (unknown_file_metadata(ccx), UNKNOWN_LINE_NUMBER) + }; + + let scope = unsafe { + llvm::LLVMRustDIBuilderCreateNameSpace( + DIB(ccx), + parent_scope, + namespace_name.as_ptr(), + file, + line as c_uint) + }; + + debug_context(ccx).namespace_map.borrow_mut().insert(def_id, scope); + scope +} diff --git a/src/librustc_trans/debuginfo/source_loc.rs b/src/librustc_trans/debuginfo/source_loc.rs new file mode 100644 index 0000000000000..1aee27c144a36 --- /dev/null +++ b/src/librustc_trans/debuginfo/source_loc.rs @@ -0,0 +1,137 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use self::InternalDebugLocation::*; + +use super::utils::{debug_context, span_start}; +use super::metadata::{UNKNOWN_COLUMN_NUMBER}; +use super::{FunctionDebugContext, DebugLoc}; + +use llvm; +use llvm::debuginfo::DIScope; +use builder::Builder; +use common::{CrateContext, FunctionContext}; + +use libc::c_uint; +use std::ptr; +use syntax_pos::Pos; + +/// Sets the current debug location at the beginning of the span. +/// +/// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). +pub fn set_source_location(fcx: &FunctionContext, + builder: Option<&Builder>, + debug_loc: DebugLoc) { + let builder = builder.map(|b| b.llbuilder); + let function_debug_context = match fcx.debug_context { + FunctionDebugContext::DebugInfoDisabled => return, + FunctionDebugContext::FunctionWithoutDebugInfo => { + set_debug_location(fcx.ccx, builder, UnknownLocation); + return; + } + FunctionDebugContext::RegularContext(box ref data) => data + }; + + if function_debug_context.source_location_override.get() { + // Just ignore any attempts to set a new debug location while + // the override is active. + return; + } + + let dbg_loc = if function_debug_context.source_locations_enabled.get() { + let (scope, span) = match debug_loc { + DebugLoc::ScopeAt(scope, span) => (scope, span), + DebugLoc::None => { + set_debug_location(fcx.ccx, builder, UnknownLocation); + return; + } + }; + + debug!("set_source_location: {}", + fcx.ccx.sess().codemap().span_to_string(span)); + let loc = span_start(fcx.ccx, span); + InternalDebugLocation::new(scope, loc.line, loc.col.to_usize()) + } else { + UnknownLocation + }; + set_debug_location(fcx.ccx, builder, dbg_loc); +} + +/// Enables emitting source locations for the given functions. +/// +/// Since we don't want source locations to be emitted for the function prelude, +/// they are disabled when beginning to translate a new function. This functions +/// switches source location emitting on and must therefore be called before the +/// first real statement/expression of the function is translated. +pub fn start_emitting_source_locations(fcx: &FunctionContext) { + match fcx.debug_context { + FunctionDebugContext::RegularContext(box ref data) => { + data.source_locations_enabled.set(true) + }, + _ => { /* safe to ignore */ } + } +} + + +#[derive(Copy, Clone, PartialEq)] +pub enum InternalDebugLocation { + KnownLocation { scope: DIScope, line: usize, col: usize }, + UnknownLocation +} + +impl InternalDebugLocation { + pub fn new(scope: DIScope, line: usize, col: usize) -> InternalDebugLocation { + KnownLocation { + scope: scope, + line: line, + col: col, + } + } +} + +pub fn set_debug_location(cx: &CrateContext, + builder: Option, + debug_location: InternalDebugLocation) { + if builder.is_none() { + if debug_location == debug_context(cx).current_debug_location.get() { + return; + } + } + + let metadata_node = match debug_location { + KnownLocation { scope, line, .. } => { + // Always set the column to zero like Clang and GCC + let col = UNKNOWN_COLUMN_NUMBER; + debug!("setting debug location to {} {}", line, col); + + unsafe { + llvm::LLVMRustDIBuilderCreateDebugLocation( + debug_context(cx).llcontext, + line as c_uint, + col as c_uint, + scope, + ptr::null_mut()) + } + } + UnknownLocation => { + debug!("clearing debug location "); + ptr::null_mut() + } + }; + + if builder.is_none() { + debug_context(cx).current_debug_location.set(debug_location); + } + + let builder = builder.unwrap_or_else(|| cx.raw_builder()); + unsafe { + llvm::LLVMSetCurrentDebugLocation(builder, metadata_node); + } +} diff --git a/src/librustc_trans/debuginfo/type_names.rs b/src/librustc_trans/debuginfo/type_names.rs new file mode 100644 index 0000000000000..80e6bd7aa2984 --- /dev/null +++ b/src/librustc_trans/debuginfo/type_names.rs @@ -0,0 +1,195 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Type Names for Debug Info. + +use common::CrateContext; +use rustc::hir::def_id::DefId; +use rustc::ty::subst::Substs; +use rustc::ty::{self, Ty}; + +use rustc::hir; + +// Compute the name of the type as it should be stored in debuginfo. Does not do +// any caching, i.e. calling the function twice with the same type will also do +// the work twice. The `qualified` parameter only affects the first level of the +// type name, further levels (i.e. type parameters) are always fully qualified. +pub fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + t: Ty<'tcx>, + qualified: bool) + -> String { + let mut result = String::with_capacity(64); + push_debuginfo_type_name(cx, t, qualified, &mut result); + result +} + +// Pushes the name of the type as it should be stored in debuginfo on the +// `output` String. See also compute_debuginfo_type_name(). +pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + t: Ty<'tcx>, + qualified: bool, + output: &mut String) { + match t.sty { + ty::TyBool => output.push_str("bool"), + ty::TyChar => output.push_str("char"), + ty::TyStr => output.push_str("str"), + ty::TyNever => output.push_str("!"), + ty::TyInt(int_ty) => output.push_str(int_ty.ty_to_string()), + ty::TyUint(uint_ty) => output.push_str(uint_ty.ty_to_string()), + ty::TyFloat(float_ty) => output.push_str(float_ty.ty_to_string()), + ty::TyAdt(def, substs) => { + push_item_name(cx, def.did, qualified, output); + push_type_params(cx, substs, output); + }, + ty::TyTuple(component_types) => { + output.push('('); + for &component_type in component_types { + push_debuginfo_type_name(cx, component_type, true, output); + output.push_str(", "); + } + if !component_types.is_empty() { + output.pop(); + output.pop(); + } + output.push(')'); + }, + ty::TyBox(inner_type) => { + output.push_str("Box<"); + push_debuginfo_type_name(cx, inner_type, true, output); + output.push('>'); + }, + ty::TyRawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => { + output.push('*'); + match mutbl { + hir::MutImmutable => output.push_str("const "), + hir::MutMutable => output.push_str("mut "), + } + + push_debuginfo_type_name(cx, inner_type, true, output); + }, + ty::TyRef(_, ty::TypeAndMut { ty: inner_type, mutbl }) => { + output.push('&'); + if mutbl == hir::MutMutable { + output.push_str("mut "); + } + + push_debuginfo_type_name(cx, inner_type, true, output); + }, + ty::TyArray(inner_type, len) => { + output.push('['); + push_debuginfo_type_name(cx, inner_type, true, output); + output.push_str(&format!("; {}", len)); + output.push(']'); + }, + ty::TySlice(inner_type) => { + output.push('['); + push_debuginfo_type_name(cx, inner_type, true, output); + output.push(']'); + }, + ty::TyDynamic(ref trait_data, ..) => { + if let Some(principal) = trait_data.principal() { + let principal = cx.tcx().erase_late_bound_regions_and_normalize( + &principal); + push_item_name(cx, principal.def_id, false, output); + push_type_params(cx, principal.substs, output); + } + }, + ty::TyFnDef(.., &ty::BareFnTy{ unsafety, abi, ref sig } ) | + ty::TyFnPtr(&ty::BareFnTy{ unsafety, abi, ref sig } ) => { + if unsafety == hir::Unsafety::Unsafe { + output.push_str("unsafe "); + } + + if abi != ::abi::Abi::Rust { + output.push_str("extern \""); + output.push_str(abi.name()); + output.push_str("\" "); + } + + output.push_str("fn("); + + let sig = cx.tcx().erase_late_bound_regions_and_normalize(sig); + if !sig.inputs.is_empty() { + for ¶meter_type in &sig.inputs { + push_debuginfo_type_name(cx, parameter_type, true, output); + output.push_str(", "); + } + output.pop(); + output.pop(); + } + + if sig.variadic { + if !sig.inputs.is_empty() { + output.push_str(", ..."); + } else { + output.push_str("..."); + } + } + + output.push(')'); + + if !sig.output.is_nil() { + output.push_str(" -> "); + push_debuginfo_type_name(cx, sig.output, true, output); + } + }, + ty::TyClosure(..) => { + output.push_str("closure"); + } + ty::TyError | + ty::TyInfer(_) | + ty::TyProjection(..) | + ty::TyAnon(..) | + ty::TyParam(_) => { + bug!("debuginfo: Trying to create type name for \ + unexpected type: {:?}", t); + } + } + + fn push_item_name(cx: &CrateContext, + def_id: DefId, + qualified: bool, + output: &mut String) { + if qualified { + output.push_str(&cx.tcx().crate_name(def_id.krate).as_str()); + for path_element in cx.tcx().def_path(def_id).data { + output.push_str("::"); + output.push_str(&path_element.data.as_interned_str()); + } + } else { + output.push_str(&cx.tcx().item_name(def_id).as_str()); + } + } + + // Pushes the type parameters in the given `Substs` to the output string. + // This ignores region parameters, since they can't reliably be + // reconstructed for items from non-local crates. For local crates, this + // would be possible but with inlining and LTO we have to use the least + // common denominator - otherwise we would run into conflicts. + fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + substs: &Substs<'tcx>, + output: &mut String) { + if substs.types().next().is_none() { + return; + } + + output.push('<'); + + for type_parameter in substs.types() { + push_debuginfo_type_name(cx, type_parameter, true, output); + output.push_str(", "); + } + + output.pop(); + output.pop(); + + output.push('>'); + } +} diff --git a/src/librustc_trans/debuginfo/utils.rs b/src/librustc_trans/debuginfo/utils.rs new file mode 100644 index 0000000000000..7cac9172a9c8b --- /dev/null +++ b/src/librustc_trans/debuginfo/utils.rs @@ -0,0 +1,85 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Utility Functions. + +use super::{CrateDebugContext}; +use super::namespace::item_namespace; + +use rustc::hir::def_id::DefId; + +use llvm; +use llvm::debuginfo::{DIScope, DIBuilderRef, DIDescriptor, DIArray}; +use machine; +use common::{CrateContext}; +use type_::Type; + +use syntax_pos::{self, Span}; +use syntax::ast; + +pub fn is_node_local_to_unit(cx: &CrateContext, node_id: ast::NodeId) -> bool +{ + // The is_local_to_unit flag indicates whether a function is local to the + // current compilation unit (i.e. if it is *static* in the C-sense). The + // *reachable* set should provide a good approximation of this, as it + // contains everything that might leak out of the current crate (by being + // externally visible or by being inlined into something externally + // visible). It might better to use the `exported_items` set from + // `driver::CrateAnalysis` in the future, but (atm) this set is not + // available in the translation pass. + !cx.reachable().contains(&node_id) +} + +#[allow(non_snake_case)] +pub fn create_DIArray(builder: DIBuilderRef, arr: &[DIDescriptor]) -> DIArray { + return unsafe { + llvm::LLVMRustDIBuilderGetOrCreateArray(builder, arr.as_ptr(), arr.len() as u32) + }; +} + +/// Return syntax_pos::Loc corresponding to the beginning of the span +pub fn span_start(cx: &CrateContext, span: Span) -> syntax_pos::Loc { + cx.sess().codemap().lookup_char_pos(span.lo) +} + +pub fn size_and_align_of(cx: &CrateContext, llvm_type: Type) -> (u64, u64) { + (machine::llsize_of_alloc(cx, llvm_type), machine::llalign_of_min(cx, llvm_type) as u64) +} + +pub fn bytes_to_bits(bytes: u64) -> u64 { + bytes * 8 +} + +#[inline] +pub fn debug_context<'a, 'tcx>(cx: &'a CrateContext<'a, 'tcx>) + -> &'a CrateDebugContext<'tcx> { + let debug_context: &'a CrateDebugContext<'tcx> = cx.dbg_cx().as_ref().unwrap(); + debug_context +} + +#[inline] +#[allow(non_snake_case)] +pub fn DIB(cx: &CrateContext) -> DIBuilderRef { + cx.dbg_cx().as_ref().unwrap().builder +} + +pub fn get_namespace_and_span_for_item(cx: &CrateContext, def_id: DefId) + -> (DIScope, Span) { + let containing_scope = item_namespace(cx, DefId { + krate: def_id.krate, + index: cx.tcx().def_key(def_id).parent + .expect("get_namespace_and_span_for_item: missing parent?") + }); + + // Try to get some span information, if we have an inlined item. + let definition_span = cx.tcx().def_span(def_id); + + (containing_scope, definition_span) +} diff --git a/src/librustc_trans/declare.rs b/src/librustc_trans/declare.rs new file mode 100644 index 0000000000000..7d6a672077a07 --- /dev/null +++ b/src/librustc_trans/declare.rs @@ -0,0 +1,213 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +//! Declare various LLVM values. +//! +//! Prefer using functions and methods from this module rather than calling LLVM +//! functions directly. These functions do some additional work to ensure we do +//! the right thing given the preconceptions of trans. +//! +//! Some useful guidelines: +//! +//! * Use declare_* family of methods if you are declaring, but are not +//! interested in defining the ValueRef they return. +//! * Use define_* family of methods when you might be defining the ValueRef. +//! * When in doubt, define. + +use llvm::{self, ValueRef}; +use llvm::AttributePlace::Function; +use rustc::ty; +use abi::{Abi, FnType}; +use attributes; +use context::CrateContext; +use common; +use type_::Type; +use value::Value; +use syntax::attr; + +use std::ffi::CString; + + +/// Declare a global value. +/// +/// If there’s a value with the same name already declared, the function will +/// return its ValueRef instead. +pub fn declare_global(ccx: &CrateContext, name: &str, ty: Type) -> llvm::ValueRef { + debug!("declare_global(name={:?})", name); + let namebuf = CString::new(name).unwrap_or_else(|_|{ + bug!("name {:?} contains an interior null byte", name) + }); + unsafe { + llvm::LLVMRustGetOrInsertGlobal(ccx.llmod(), namebuf.as_ptr(), ty.to_ref()) + } +} + + +/// Declare a function. +/// +/// If there’s a value with the same name already declared, the function will +/// update the declaration and return existing ValueRef instead. +fn declare_raw_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv, ty: Type) -> ValueRef { + debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty); + let namebuf = CString::new(name).unwrap_or_else(|_|{ + bug!("name {:?} contains an interior null byte", name) + }); + let llfn = unsafe { + llvm::LLVMRustGetOrInsertFunction(ccx.llmod(), namebuf.as_ptr(), ty.to_ref()) + }; + + llvm::SetFunctionCallConv(llfn, callconv); + // Function addresses in Rust are never significant, allowing functions to + // be merged. + llvm::SetUnnamedAddr(llfn, true); + + if ccx.tcx().sess.opts.cg.no_redzone + .unwrap_or(ccx.tcx().sess.target.target.options.disable_redzone) { + llvm::Attribute::NoRedZone.apply_llfn(Function, llfn); + } + + // If we're compiling the compiler-builtins crate, e.g. the equivalent of + // compiler-rt, then we want to implicitly compile everything with hidden + // visibility as we're going to link this object all over the place but + // don't want the symbols to get exported. + if attr::contains_name(ccx.tcx().map.krate_attrs(), "compiler_builtins") { + unsafe { + llvm::LLVMSetVisibility(llfn, llvm::Visibility::Hidden); + } + } + + match ccx.tcx().sess.opts.cg.opt_level.as_ref().map(String::as_ref) { + Some("s") => { + llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn); + }, + Some("z") => { + llvm::Attribute::MinSize.apply_llfn(Function, llfn); + llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn); + }, + _ => {}, + } + + llfn +} + + +/// Declare a C ABI function. +/// +/// Only use this for foreign function ABIs and glue. For Rust functions use +/// `declare_fn` instead. +/// +/// If there’s a value with the same name already declared, the function will +/// update the declaration and return existing ValueRef instead. +pub fn declare_cfn(ccx: &CrateContext, name: &str, fn_type: Type) -> ValueRef { + declare_raw_fn(ccx, name, llvm::CCallConv, fn_type) +} + + +/// Declare a Rust function. +/// +/// If there’s a value with the same name already declared, the function will +/// update the declaration and return existing ValueRef instead. +pub fn declare_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str, + fn_type: ty::Ty<'tcx>) -> ValueRef { + debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, fn_type); + let ty::BareFnTy { abi, ref sig, .. } = *common::ty_fn_ty(ccx, fn_type); + let sig = ccx.tcx().erase_late_bound_regions_and_normalize(sig); + debug!("declare_rust_fn (after region erasure) sig={:?}", sig); + + let fty = FnType::new(ccx, abi, &sig, &[]); + let llfn = declare_raw_fn(ccx, name, fty.cconv, fty.llvm_type(ccx)); + + // FIXME(canndrew): This is_never should really be an is_uninhabited + if sig.output.is_never() { + llvm::Attribute::NoReturn.apply_llfn(Function, llfn); + } + + if abi != Abi::Rust && abi != Abi::RustCall { + attributes::unwind(llfn, false); + } + + fty.apply_attrs_llfn(llfn); + + llfn +} + + +/// Declare a global with an intention to define it. +/// +/// Use this function when you intend to define a global. This function will +/// return None if the name already has a definition associated with it. In that +/// case an error should be reported to the user, because it usually happens due +/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). +pub fn define_global(ccx: &CrateContext, name: &str, ty: Type) -> Option { + if get_defined_value(ccx, name).is_some() { + None + } else { + Some(declare_global(ccx, name, ty)) + } +} + +/// Declare a Rust function with an intention to define it. +/// +/// Use this function when you intend to define a function. This function will +/// return panic if the name already has a definition associated with it. This +/// can happen with #[no_mangle] or #[export_name], for example. +pub fn define_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + name: &str, + fn_type: ty::Ty<'tcx>) -> ValueRef { + if get_defined_value(ccx, name).is_some() { + ccx.sess().fatal(&format!("symbol `{}` already defined", name)) + } else { + declare_fn(ccx, name, fn_type) + } +} + +/// Declare a Rust function with an intention to define it. +/// +/// Use this function when you intend to define a function. This function will +/// return panic if the name already has a definition associated with it. This +/// can happen with #[no_mangle] or #[export_name], for example. +pub fn define_internal_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + name: &str, + fn_type: ty::Ty<'tcx>) -> ValueRef { + let llfn = define_fn(ccx, name, fn_type); + unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) }; + llfn +} + + +/// Get declared value by name. +pub fn get_declared_value(ccx: &CrateContext, name: &str) -> Option { + debug!("get_declared_value(name={:?})", name); + let namebuf = CString::new(name).unwrap_or_else(|_|{ + bug!("name {:?} contains an interior null byte", name) + }); + let val = unsafe { llvm::LLVMRustGetNamedValue(ccx.llmod(), namebuf.as_ptr()) }; + if val.is_null() { + debug!("get_declared_value: {:?} value is null", name); + None + } else { + debug!("get_declared_value: {:?} => {:?}", name, Value(val)); + Some(val) + } +} + +/// Get defined or externally defined (AvailableExternally linkage) value by +/// name. +pub fn get_defined_value(ccx: &CrateContext, name: &str) -> Option { + get_declared_value(ccx, name).and_then(|val|{ + let declaration = unsafe { + llvm::LLVMIsDeclaration(val) != 0 + }; + if !declaration { + Some(val) + } else { + None + } + }) +} diff --git a/src/librustc_trans/diagnostics.rs b/src/librustc_trans/diagnostics.rs index 539b9a4171f94..18d31448b1a24 100644 --- a/src/librustc_trans/diagnostics.rs +++ b/src/librustc_trans/diagnostics.rs @@ -12,56 +12,29 @@ register_long_diagnostics! { -E0510: r##" -`return_address` was used in an invalid context. Erroneous code example: - -``` -extern "rust-intrinsic" { - fn return_address() -> *const u8; -} - -pub unsafe fn by_value() -> i32 { - let _ = return_address(); - // error: invalid use of `return_address` intrinsic: function does - // not use out pointer - 0 -} -``` - -Return values may be stored in a return register(s) or written into a so-called -out pointer. In case the returned value is too big (this is -target-ABI-dependent and generally not portable or future proof) to fit into -the return register(s), the compiler will return the value by writing it into -space allocated in the caller's stack frame. Example: - -``` -extern "rust-intrinsic" { - fn return_address() -> *const u8; -} - -pub unsafe fn by_pointer() -> String { - let _ = return_address(); - String::new() // ok! -} -``` -"##, - E0511: r##" Invalid monomorphization of an intrinsic function was used. Erroneous code example: -``` +```ignore +#![feature(platform_intrinsics)] + extern "platform-intrinsic" { fn simd_add(a: T, b: T) -> T; } -unsafe { simd_add(0, 1); } -// error: invalid monomorphization of `simd_add` intrinsic +fn main() { + unsafe { simd_add(0, 1); } + // error: invalid monomorphization of `simd_add` intrinsic +} ``` The generic type has to be a SIMD type. Example: ``` +#![feature(repr_simd)] +#![feature(platform_intrinsics)] + #[repr(simd)] #[derive(Copy, Clone)] struct i32x1(i32); @@ -73,45 +46,4 @@ extern "platform-intrinsic" { unsafe { simd_add(i32x1(0), i32x1(1)); } // ok! ``` "##, - -E0512: r##" -Transmute with two differently sized types was attempted. Erroneous code -example: - -``` -fn takes_u8(_: u8) {} - -fn main() { - unsafe { takes_u8(::std::mem::transmute(0u16)); } - // error: transmute called with differently sized types -} -``` - -Please use types with same size or use the expected type directly. Example: - -``` -fn takes_u8(_: u8) {} - -fn main() { - unsafe { takes_u8(::std::mem::transmute(0i8)); } // ok! - // or: - unsafe { takes_u8(0u8); } // ok! -} -``` -"##, - -E0515: r##" -A constant index expression was out of bounds. Erroneous code example: - -``` -let x = &[0, 1, 2][7]; // error: const index-expr is out of bounds -``` - -Please specify a valid index (not inferior to 0 or superior to array length). -Example: - -``` -let x = &[0, 1, 2][2]; // ok -``` -"##, } diff --git a/src/librustc_trans/trans/disr.rs b/src/librustc_trans/disr.rs similarity index 90% rename from src/librustc_trans/trans/disr.rs rename to src/librustc_trans/disr.rs index 7cb10a8bc44c4..fc79fa813aa5a 100644 --- a/src/librustc_trans/trans/disr.rs +++ b/src/librustc_trans/disr.rs @@ -24,9 +24,9 @@ impl ::std::ops::BitAnd for Disr { } } -impl From<::middle::ty::Disr> for Disr { - fn from(i: ::middle::ty::Disr) -> Disr { - Disr(i) +impl From<::rustc::ty::Disr> for Disr { + fn from(i: ::rustc::ty::Disr) -> Disr { + Disr(i.to_u64_unchecked()) } } diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs new file mode 100644 index 0000000000000..90bc29c39e9b5 --- /dev/null +++ b/src/librustc_trans/glue.rs @@ -0,0 +1,641 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! +// +// Code relating to drop glue. + +use std; + +use llvm; +use llvm::{ValueRef, get_param}; +use middle::lang_items::ExchangeFreeFnLangItem; +use rustc::ty::subst::{Substs}; +use rustc::traits; +use rustc::ty::{self, AdtKind, Ty, TyCtxt, TypeFoldable}; +use adt; +use base::*; +use build::*; +use callee::{Callee}; +use common::*; +use debuginfo::DebugLoc; +use machine::*; +use monomorphize; +use trans_item::TransItem; +use tvec; +use type_of::{type_of, sizing_type_of, align_of}; +use type_::Type; +use value::Value; +use Disr; + +use arena::TypedArena; +use syntax_pos::DUMMY_SP; + +pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + v: ValueRef, + size: ValueRef, + align: ValueRef, + debug_loc: DebugLoc) + -> Block<'blk, 'tcx> { + let _icx = push_ctxt("trans_exchange_free"); + + let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem); + let args = [PointerCast(bcx, v, Type::i8p(bcx.ccx())), size, align]; + Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])) + .call(bcx, debug_loc, &args, None).bcx +} + +pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>, + v: ValueRef, + size: u64, + align: u32, + debug_loc: DebugLoc) + -> Block<'blk, 'tcx> { + trans_exchange_free_dyn(cx, + v, + C_uint(cx.ccx(), size), + C_uint(cx.ccx(), align), + debug_loc) +} + +pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + ptr: ValueRef, + content_ty: Ty<'tcx>, + debug_loc: DebugLoc) + -> Block<'blk, 'tcx> { + assert!(type_is_sized(bcx.ccx().tcx(), content_ty)); + let sizing_type = sizing_type_of(bcx.ccx(), content_ty); + let content_size = llsize_of_alloc(bcx.ccx(), sizing_type); + + // `Box` does not allocate. + if content_size != 0 { + let content_align = align_of(bcx.ccx(), content_ty); + trans_exchange_free(bcx, ptr, content_size, content_align, debug_loc) + } else { + bcx + } +} + +pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + ty: Ty<'tcx>) -> bool { + tcx.type_needs_drop_given_env(ty, &tcx.empty_parameter_environment()) +} + +pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + t: Ty<'tcx>) -> Ty<'tcx> { + assert!(t.is_normalized_for_trans()); + + let t = tcx.erase_regions(&t); + + // Even if there is no dtor for t, there might be one deeper down and we + // might need to pass in the vtable ptr. + if !type_is_sized(tcx, t) { + return t; + } + + // FIXME (#22815): note that type_needs_drop conservatively + // approximates in some cases and may say a type expression + // requires drop glue when it actually does not. + // + // (In this case it is not clear whether any harm is done, i.e. + // erroneously returning `t` in some cases where we could have + // returned `tcx.types.i8` does not appear unsound. The impact on + // code quality is unknown at this time.) + + if !type_needs_drop(tcx, t) { + return tcx.types.i8; + } + match t.sty { + ty::TyBox(typ) if !type_needs_drop(tcx, typ) + && type_is_sized(tcx, typ) => { + tcx.infer_ctxt(None, None, traits::Reveal::All).enter(|infcx| { + let layout = t.layout(&infcx).unwrap(); + if layout.size(&tcx.data_layout).bytes() == 0 { + // `Box` does not allocate. + tcx.types.i8 + } else { + t + } + }) + } + _ => t + } +} + +pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + v: ValueRef, + t: Ty<'tcx>, + debug_loc: DebugLoc) -> Block<'blk, 'tcx> { + drop_ty_core(bcx, v, t, debug_loc, false) +} + +pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + v: ValueRef, + t: Ty<'tcx>, + debug_loc: DebugLoc, + skip_dtor: bool) + -> Block<'blk, 'tcx> { + // NB: v is an *alias* of type t here, not a direct value. + debug!("drop_ty_core(t={:?}, skip_dtor={})", t, skip_dtor); + let _icx = push_ctxt("drop_ty"); + if bcx.fcx.type_needs_drop(t) { + let ccx = bcx.ccx(); + let g = if skip_dtor { + DropGlueKind::TyContents(t) + } else { + DropGlueKind::Ty(t) + }; + let glue = get_drop_glue_core(ccx, g); + let glue_type = get_drop_glue_type(ccx.tcx(), t); + let ptr = if glue_type != t { + PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to()) + } else { + v + }; + + // No drop-hint ==> call standard drop glue + Call(bcx, glue, &[ptr], debug_loc); + } + bcx +} + +pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + v: ValueRef, + t: Ty<'tcx>, + debug_loc: DebugLoc, + skip_dtor: bool) + -> Block<'blk, 'tcx> { + let _icx = push_ctxt("drop_ty_immediate"); + let vp = alloc_ty(bcx, t, ""); + call_lifetime_start(bcx, vp); + store_ty(bcx, v, vp, t); + let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor); + call_lifetime_end(bcx, vp); + bcx +} + +pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef { + get_drop_glue_core(ccx, DropGlueKind::Ty(t)) +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub enum DropGlueKind<'tcx> { + /// The normal path; runs the dtor, and then recurs on the contents + Ty(Ty<'tcx>), + /// Skips the dtor, if any, for ty; drops the contents directly. + /// Note that the dtor is only skipped at the most *shallow* + /// level, namely, an `impl Drop for Ty` itself. So, for example, + /// if Ty is Newtype(S) then only the Drop impl for Newtype itself + /// will be skipped, while the Drop impl for S, if any, will be + /// invoked. + TyContents(Ty<'tcx>), +} + +impl<'tcx> DropGlueKind<'tcx> { + pub fn ty(&self) -> Ty<'tcx> { + match *self { DropGlueKind::Ty(t) | DropGlueKind::TyContents(t) => t } + } + + pub fn map_ty(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) -> Ty<'tcx> + { + match *self { + DropGlueKind::Ty(t) => DropGlueKind::Ty(f(t)), + DropGlueKind::TyContents(t) => DropGlueKind::TyContents(f(t)), + } + } +} + +fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + g: DropGlueKind<'tcx>) -> ValueRef { + let g = g.map_ty(|t| get_drop_glue_type(ccx.tcx(), t)); + match ccx.drop_glues().borrow().get(&g) { + Some(&(glue, _)) => glue, + None => { + bug!("Could not find drop glue for {:?} -- {} -- {}.", + g, + TransItem::DropGlue(g).to_raw_string(), + ccx.codegen_unit().name()); + } + } +} + +pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + g: DropGlueKind<'tcx>) { + let tcx = ccx.tcx(); + assert_eq!(g.ty(), get_drop_glue_type(tcx, g.ty())); + let (llfn, fn_ty) = ccx.drop_glues().borrow().get(&g).unwrap().clone(); + + let (arena, fcx): (TypedArena<_>, FunctionContext); + arena = TypedArena::new(); + fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &arena); + + let bcx = fcx.init(false); + + ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1); + // All glue functions take values passed *by alias*; this is a + // requirement since in many contexts glue is invoked indirectly and + // the caller has no idea if it's dealing with something that can be + // passed by value. + // + // llfn is expected be declared to take a parameter of the appropriate + // type, so we don't need to explicitly cast the function parameter. + + let bcx = make_drop_glue(bcx, get_param(llfn, 0), g); + fcx.finish(bcx, DebugLoc::None); +} + +fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + t: Ty<'tcx>, + v0: ValueRef, + shallow_drop: bool) + -> Block<'blk, 'tcx> +{ + debug!("trans_custom_dtor t: {}", t); + let tcx = bcx.tcx(); + let mut bcx = bcx; + + let def = t.ty_adt_def().unwrap(); + + // Be sure to put the contents into a scope so we can use an invoke + // instruction to call the user destructor but still call the field + // destructors if the user destructor panics. + // + // FIXME (#14875) panic-in-drop semantics might be unsupported; we + // might well consider changing below to more direct code. + let contents_scope = bcx.fcx.push_custom_cleanup_scope(); + + // Issue #23611: schedule cleanup of contents, re-inspecting the + // discriminant (if any) in case of variant swap in drop code. + if !shallow_drop { + bcx.fcx.schedule_drop_adt_contents(contents_scope, v0, t); + } + + let (sized_args, unsized_args); + let args: &[ValueRef] = if type_is_sized(tcx, t) { + sized_args = [v0]; + &sized_args + } else { + // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments + unsized_args = [ + Load(bcx, get_dataptr(bcx, v0)), + Load(bcx, get_meta(bcx, v0)) + ]; + &unsized_args + }; + + let trait_ref = ty::Binder(ty::TraitRef { + def_id: tcx.lang_items.drop_trait().unwrap(), + substs: tcx.mk_substs_trait(t, &[]) + }); + let vtbl = match fulfill_obligation(bcx.ccx().shared(), DUMMY_SP, trait_ref) { + traits::VtableImpl(data) => data, + _ => bug!("dtor for {:?} is not an impl???", t) + }; + let dtor_did = def.destructor().unwrap(); + bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs) + .call(bcx, DebugLoc::None, args, None).bcx; + + bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope) +} + +pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, + t: Ty<'tcx>, info: ValueRef) + -> (ValueRef, ValueRef) { + debug!("calculate size of DST: {}; with lost info: {:?}", + t, Value(info)); + if type_is_sized(bcx.tcx(), t) { + let sizing_type = sizing_type_of(bcx.ccx(), t); + let size = llsize_of_alloc(bcx.ccx(), sizing_type); + let align = align_of(bcx.ccx(), t); + debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}", + t, Value(info), size, align); + let size = C_uint(bcx.ccx(), size); + let align = C_uint(bcx.ccx(), align); + return (size, align); + } + if bcx.is_unreachable() { + let llty = Type::int(bcx.ccx()); + return (C_undef(llty), C_undef(llty)); + } + match t.sty { + ty::TyAdt(def, substs) => { + let ccx = bcx.ccx(); + // First get the size of all statically known fields. + // Don't use type_of::sizing_type_of because that expects t to be sized, + // and it also rounds up to alignment, which we want to avoid, + // as the unsized field's alignment could be smaller. + assert!(!t.is_simd()); + let layout = ccx.layout_of(t); + debug!("DST {} layout: {:?}", t, layout); + + let (sized_size, sized_align) = match *layout { + ty::layout::Layout::Univariant { ref variant, .. } => { + (variant.offsets.last().map_or(0, |o| o.bytes()), variant.align.abi()) + } + _ => { + bug!("size_and_align_of_dst: expcted Univariant for `{}`, found {:#?}", + t, layout); + } + }; + debug!("DST {} statically sized prefix size: {} align: {}", + t, sized_size, sized_align); + let sized_size = C_uint(ccx, sized_size); + let sized_align = C_uint(ccx, sized_align); + + // Recurse to get the size of the dynamically sized field (must be + // the last field). + let last_field = def.struct_variant().fields.last().unwrap(); + let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field); + let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info); + + // FIXME (#26403, #27023): We should be adding padding + // to `sized_size` (to accommodate the `unsized_align` + // required of the unsized field that follows) before + // summing it with `sized_size`. (Note that since #26403 + // is unfixed, we do not yet add the necessary padding + // here. But this is where the add would go.) + + // Return the sum of sizes and max of aligns. + let size = bcx.add(sized_size, unsized_size); + + // Choose max of two known alignments (combined value must + // be aligned according to more restrictive of the two). + let align = match (const_to_opt_uint(sized_align), const_to_opt_uint(unsized_align)) { + (Some(sized_align), Some(unsized_align)) => { + // If both alignments are constant, (the sized_align should always be), then + // pick the correct alignment statically. + C_uint(ccx, std::cmp::max(sized_align, unsized_align)) + } + _ => bcx.select(bcx.icmp(llvm::IntUGT, sized_align, unsized_align), + sized_align, + unsized_align) + }; + + // Issue #27023: must add any necessary padding to `size` + // (to make it a multiple of `align`) before returning it. + // + // Namely, the returned size should be, in C notation: + // + // `size + ((size & (align-1)) ? align : 0)` + // + // emulated via the semi-standard fast bit trick: + // + // `(size + (align-1)) & -align` + + let addend = bcx.sub(align, C_uint(bcx.ccx(), 1_u64)); + let size = bcx.and(bcx.add(size, addend), bcx.neg(align)); + + (size, align) + } + ty::TyDynamic(..) => { + // info points to the vtable and the second entry in the vtable is the + // dynamic size of the object. + let info = bcx.pointercast(info, Type::int(bcx.ccx()).ptr_to()); + let size_ptr = bcx.gepi(info, &[1]); + let align_ptr = bcx.gepi(info, &[2]); + (bcx.load(size_ptr), bcx.load(align_ptr)) + } + ty::TySlice(_) | ty::TyStr => { + let unit_ty = t.sequence_element_type(bcx.tcx()); + // The info in this case is the length of the str, so the size is that + // times the unit size. + let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty); + let unit_align = llalign_of_min(bcx.ccx(), llunit_ty); + let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty); + (bcx.mul(info, C_uint(bcx.ccx(), unit_size)), + C_uint(bcx.ccx(), unit_align)) + } + _ => bug!("Unexpected unsized type, found {}", t) + } +} + +fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + v0: ValueRef, + g: DropGlueKind<'tcx>) + -> Block<'blk, 'tcx> { + let t = g.ty(); + + let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true }; + // NB: v0 is an *alias* of type t here, not a direct value. + let _icx = push_ctxt("make_drop_glue"); + + // Only drop the value when it ... well, we used to check for + // non-null, (and maybe we need to continue doing so), but we now + // must definitely check for special bit-patterns corresponding to + // the special dtor markings. + + match t.sty { + ty::TyBox(content_ty) => { + // Support for TyBox is built-in and its drop glue is + // special. It may move to library and have Drop impl. As + // a safe-guard, assert TyBox not used with TyContents. + assert!(!skip_dtor); + if !type_is_sized(bcx.tcx(), content_ty) { + let llval = get_dataptr(bcx, v0); + let llbox = Load(bcx, llval); + let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None); + // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments + let info = get_meta(bcx, v0); + let info = Load(bcx, info); + let (llsize, llalign) = + size_and_align_of_dst(&bcx.build(), content_ty, info); + + // `Box` does not allocate. + let needs_free = ICmp(bcx, + llvm::IntNE, + llsize, + C_uint(bcx.ccx(), 0u64), + DebugLoc::None); + with_cond(bcx, needs_free, |bcx| { + trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None) + }) + } else { + let llval = v0; + let llbox = Load(bcx, llval); + let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None); + trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None) + } + } + ty::TyDynamic(..) => { + // No support in vtable for distinguishing destroying with + // versus without calling Drop::drop. Assert caller is + // okay with always calling the Drop impl, if any. + // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments + assert!(!skip_dtor); + let data_ptr = get_dataptr(bcx, v0); + let vtable_ptr = Load(bcx, get_meta(bcx, v0)); + let dtor = Load(bcx, vtable_ptr); + Call(bcx, + dtor, + &[PointerCast(bcx, Load(bcx, data_ptr), Type::i8p(bcx.ccx()))], + DebugLoc::None); + bcx + } + ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => { + trans_custom_dtor(bcx, t, v0, def.is_union()) + } + ty::TyAdt(def, ..) if def.is_union() => { + bcx + } + _ => { + if bcx.fcx.type_needs_drop(t) { + drop_structural_ty(bcx, v0, t) + } else { + bcx + } + } + } +} + +// Iterates through the elements of a structural type, dropping them. +fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, + av: ValueRef, + t: Ty<'tcx>) + -> Block<'blk, 'tcx> { + let _icx = push_ctxt("drop_structural_ty"); + + fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>, + t: Ty<'tcx>, + av: adt::MaybeSizedValue, + variant: &'tcx ty::VariantDef, + substs: &Substs<'tcx>) + -> Block<'blk, 'tcx> { + let _icx = push_ctxt("iter_variant"); + let tcx = cx.tcx(); + let mut cx = cx; + + for (i, field) in variant.fields.iter().enumerate() { + let arg = monomorphize::field_ty(tcx, substs, field); + cx = drop_ty(cx, + adt::trans_field_ptr(cx, t, av, Disr::from(variant.disr_val), i), + arg, DebugLoc::None); + } + return cx; + } + + let value = if type_is_sized(cx.tcx(), t) { + adt::MaybeSizedValue::sized(av) + } else { + // FIXME(#36457) -- we should pass unsized values as two arguments + let data = Load(cx, get_dataptr(cx, av)); + let info = Load(cx, get_meta(cx, av)); + adt::MaybeSizedValue::unsized_(data, info) + }; + + let mut cx = cx; + match t.sty { + ty::TyClosure(def_id, substs) => { + for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() { + let llupvar = adt::trans_field_ptr(cx, t, value, Disr(0), i); + cx = drop_ty(cx, llupvar, upvar_ty, DebugLoc::None); + } + } + ty::TyArray(_, n) => { + let base = get_dataptr(cx, value.value); + let len = C_uint(cx.ccx(), n); + let unit_ty = t.sequence_element_type(cx.tcx()); + cx = tvec::slice_for_each(cx, base, unit_ty, len, + |bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None)); + } + ty::TySlice(_) | ty::TyStr => { + let unit_ty = t.sequence_element_type(cx.tcx()); + cx = tvec::slice_for_each(cx, value.value, unit_ty, value.meta, + |bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None)); + } + ty::TyTuple(ref args) => { + for (i, arg) in args.iter().enumerate() { + let llfld_a = adt::trans_field_ptr(cx, t, value, Disr(0), i); + cx = drop_ty(cx, llfld_a, *arg, DebugLoc::None); + } + } + ty::TyAdt(adt, substs) => match adt.adt_kind() { + AdtKind::Struct => { + let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None); + for (i, &Field(_, field_ty)) in fields.iter().enumerate() { + let llfld_a = adt::trans_field_ptr(cx, t, value, Disr::from(discr), i); + + let val = if type_is_sized(cx.tcx(), field_ty) { + llfld_a + } else { + // FIXME(#36457) -- we should pass unsized values as two arguments + let scratch = alloc_ty(cx, field_ty, "__fat_ptr_iter"); + Store(cx, llfld_a, get_dataptr(cx, scratch)); + Store(cx, value.meta, get_meta(cx, scratch)); + scratch + }; + cx = drop_ty(cx, val, field_ty, DebugLoc::None); + } + } + AdtKind::Union => { + bug!("Union in `glue::drop_structural_ty`"); + } + AdtKind::Enum => { + let fcx = cx.fcx; + let ccx = fcx.ccx; + let n_variants = adt.variants.len(); + + // NB: we must hit the discriminant first so that structural + // comparison know not to proceed when the discriminants differ. + + match adt::trans_switch(cx, t, av, false) { + (adt::BranchKind::Single, None) => { + if n_variants != 0 { + assert!(n_variants == 1); + cx = iter_variant(cx, t, adt::MaybeSizedValue::sized(av), + &adt.variants[0], substs); + } + } + (adt::BranchKind::Switch, Some(lldiscrim_a)) => { + cx = drop_ty(cx, lldiscrim_a, cx.tcx().types.isize, DebugLoc::None); + + // Create a fall-through basic block for the "else" case of + // the switch instruction we're about to generate. Note that + // we do **not** use an Unreachable instruction here, even + // though most of the time this basic block will never be hit. + // + // When an enum is dropped it's contents are currently + // overwritten to DTOR_DONE, which means the discriminant + // could have changed value to something not within the actual + // range of the discriminant. Currently this function is only + // used for drop glue so in this case we just return quickly + // from the outer function, and any other use case will only + // call this for an already-valid enum in which case the `ret + // void` will never be hit. + let ret_void_cx = fcx.new_block("enum-iter-ret-void"); + RetVoid(ret_void_cx, DebugLoc::None); + let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants); + let next_cx = fcx.new_block("enum-iter-next"); + + for variant in &adt.variants { + let variant_cx = fcx.new_block(&format!("enum-iter-variant-{}", + &variant.disr_val + .to_string())); + let case_val = adt::trans_case(cx, t, Disr::from(variant.disr_val)); + AddCase(llswitch, case_val, variant_cx.llbb); + let variant_cx = iter_variant(variant_cx, + t, + value, + variant, + substs); + Br(variant_cx, next_cx.llbb, DebugLoc::None); + } + cx = next_cx; + } + _ => ccx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"), + } + } + }, + + _ => { + cx.sess().unimpl(&format!("type in drop_structural_ty: {}", t)) + } + } + return cx; +} diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs new file mode 100644 index 0000000000000..016a76a72531b --- /dev/null +++ b/src/librustc_trans/intrinsic.rs @@ -0,0 +1,1367 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_upper_case_globals)] + +use arena::TypedArena; +use intrinsics::{self, Intrinsic}; +use libc; +use llvm; +use llvm::{ValueRef}; +use abi::{Abi, FnType}; +use adt; +use base::*; +use build::*; +use common::*; +use debuginfo::DebugLoc; +use declare; +use glue; +use type_of; +use machine; +use type_::Type; +use rustc::ty::{self, Ty}; +use Disr; +use rustc::hir; +use syntax::ast; +use syntax::symbol::Symbol; + +use rustc::session::Session; +use syntax_pos::{Span, DUMMY_SP}; + +use std::cmp::Ordering; + +fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option { + let llvm_name = match name { + "sqrtf32" => "llvm.sqrt.f32", + "sqrtf64" => "llvm.sqrt.f64", + "powif32" => "llvm.powi.f32", + "powif64" => "llvm.powi.f64", + "sinf32" => "llvm.sin.f32", + "sinf64" => "llvm.sin.f64", + "cosf32" => "llvm.cos.f32", + "cosf64" => "llvm.cos.f64", + "powf32" => "llvm.pow.f32", + "powf64" => "llvm.pow.f64", + "expf32" => "llvm.exp.f32", + "expf64" => "llvm.exp.f64", + "exp2f32" => "llvm.exp2.f32", + "exp2f64" => "llvm.exp2.f64", + "logf32" => "llvm.log.f32", + "logf64" => "llvm.log.f64", + "log10f32" => "llvm.log10.f32", + "log10f64" => "llvm.log10.f64", + "log2f32" => "llvm.log2.f32", + "log2f64" => "llvm.log2.f64", + "fmaf32" => "llvm.fma.f32", + "fmaf64" => "llvm.fma.f64", + "fabsf32" => "llvm.fabs.f32", + "fabsf64" => "llvm.fabs.f64", + "copysignf32" => "llvm.copysign.f32", + "copysignf64" => "llvm.copysign.f64", + "floorf32" => "llvm.floor.f32", + "floorf64" => "llvm.floor.f64", + "ceilf32" => "llvm.ceil.f32", + "ceilf64" => "llvm.ceil.f64", + "truncf32" => "llvm.trunc.f32", + "truncf64" => "llvm.trunc.f64", + "rintf32" => "llvm.rint.f32", + "rintf64" => "llvm.rint.f64", + "nearbyintf32" => "llvm.nearbyint.f32", + "nearbyintf64" => "llvm.nearbyint.f64", + "roundf32" => "llvm.round.f32", + "roundf64" => "llvm.round.f64", + "assume" => "llvm.assume", + _ => return None + }; + Some(ccx.get_intrinsic(&llvm_name)) +} + +/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, +/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, +/// add them to librustc_trans/trans/context.rs +pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, + callee_ty: Ty<'tcx>, + fn_ty: &FnType, + llargs: &[ValueRef], + llresult: ValueRef, + call_debug_location: DebugLoc) + -> Result<'blk, 'tcx> { + let fcx = bcx.fcx; + let ccx = fcx.ccx; + let tcx = bcx.tcx(); + + let _icx = push_ctxt("trans_intrinsic_call"); + + let (def_id, substs, fty) = match callee_ty.sty { + ty::TyFnDef(def_id, substs, ref fty) => (def_id, substs, fty), + _ => bug!("expected fn item type, found {}", callee_ty) + }; + + let sig = tcx.erase_late_bound_regions_and_normalize(&fty.sig); + let arg_tys = sig.inputs; + let ret_ty = sig.output; + let name = &*tcx.item_name(def_id).as_str(); + + let span = match call_debug_location { + DebugLoc::ScopeAt(_, span) => span, + DebugLoc::None => { + span_bug!(fcx.span.unwrap_or(DUMMY_SP), + "intrinsic `{}` called with missing span", name); + } + }; + + // These are the only intrinsic functions that diverge. + if name == "abort" { + let llfn = ccx.get_intrinsic(&("llvm.trap")); + Call(bcx, llfn, &[], call_debug_location); + Unreachable(bcx); + return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to())); + } else if name == "unreachable" { + Unreachable(bcx); + return Result::new(bcx, C_nil(ccx)); + } + + let llret_ty = type_of::type_of(ccx, ret_ty); + + let simple = get_simple_intrinsic(ccx, name); + let llval = match (simple, name) { + (Some(llfn), _) => { + Call(bcx, llfn, &llargs, call_debug_location) + } + (_, "likely") => { + let expect = ccx.get_intrinsic(&("llvm.expect.i1")); + Call(bcx, expect, &[llargs[0], C_bool(ccx, true)], call_debug_location) + } + (_, "unlikely") => { + let expect = ccx.get_intrinsic(&("llvm.expect.i1")); + Call(bcx, expect, &[llargs[0], C_bool(ccx, false)], call_debug_location) + } + (_, "try") => { + bcx = try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult, + call_debug_location); + C_nil(ccx) + } + (_, "breakpoint") => { + let llfn = ccx.get_intrinsic(&("llvm.debugtrap")); + Call(bcx, llfn, &[], call_debug_location) + } + (_, "size_of") => { + let tp_ty = substs.type_at(0); + let lltp_ty = type_of::type_of(ccx, tp_ty); + C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) + } + (_, "size_of_val") => { + let tp_ty = substs.type_at(0); + if !type_is_sized(tcx, tp_ty) { + let (llsize, _) = + glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]); + llsize + } else { + let lltp_ty = type_of::type_of(ccx, tp_ty); + C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) + } + } + (_, "min_align_of") => { + let tp_ty = substs.type_at(0); + C_uint(ccx, type_of::align_of(ccx, tp_ty)) + } + (_, "min_align_of_val") => { + let tp_ty = substs.type_at(0); + if !type_is_sized(tcx, tp_ty) { + let (_, llalign) = + glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]); + llalign + } else { + C_uint(ccx, type_of::align_of(ccx, tp_ty)) + } + } + (_, "pref_align_of") => { + let tp_ty = substs.type_at(0); + let lltp_ty = type_of::type_of(ccx, tp_ty); + C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty)) + } + (_, "drop_in_place") => { + let tp_ty = substs.type_at(0); + let is_sized = type_is_sized(tcx, tp_ty); + let ptr = if is_sized { + llargs[0] + } else { + // FIXME(#36457) -- we should pass unsized values as two arguments + let scratch = alloc_ty(bcx, tp_ty, "drop"); + call_lifetime_start(bcx, scratch); + Store(bcx, llargs[0], get_dataptr(bcx, scratch)); + Store(bcx, llargs[1], get_meta(bcx, scratch)); + scratch + }; + glue::drop_ty(bcx, ptr, tp_ty, call_debug_location); + if !is_sized { + call_lifetime_end(bcx, ptr); + } + C_nil(ccx) + } + (_, "type_name") => { + let tp_ty = substs.type_at(0); + let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); + C_str_slice(ccx, ty_name) + } + (_, "type_id") => { + C_u64(ccx, ccx.tcx().type_id_hash(substs.type_at(0))) + } + (_, "init") => { + let tp_ty = substs.type_at(0); + if !type_is_zero_size(ccx, tp_ty) { + // Just zero out the stack slot. (See comment on base::memzero for explanation) + init_zero_mem(bcx, llresult, tp_ty); + } + C_nil(ccx) + } + // Effectively no-ops + (_, "uninit") | (_, "forget") => { + C_nil(ccx) + } + (_, "needs_drop") => { + let tp_ty = substs.type_at(0); + + C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty)) + } + (_, "offset") => { + let ptr = llargs[0]; + let offset = llargs[1]; + InBoundsGEP(bcx, ptr, &[offset]) + } + (_, "arith_offset") => { + let ptr = llargs[0]; + let offset = llargs[1]; + GEP(bcx, ptr, &[offset]) + } + + (_, "copy_nonoverlapping") => { + copy_intrinsic(bcx, + false, + false, + substs.type_at(0), + llargs[1], + llargs[0], + llargs[2], + call_debug_location) + } + (_, "copy") => { + copy_intrinsic(bcx, + true, + false, + substs.type_at(0), + llargs[1], + llargs[0], + llargs[2], + call_debug_location) + } + (_, "write_bytes") => { + memset_intrinsic(bcx, + false, + substs.type_at(0), + llargs[0], + llargs[1], + llargs[2], + call_debug_location) + } + + (_, "volatile_copy_nonoverlapping_memory") => { + copy_intrinsic(bcx, + false, + true, + substs.type_at(0), + llargs[0], + llargs[1], + llargs[2], + call_debug_location) + } + (_, "volatile_copy_memory") => { + copy_intrinsic(bcx, + true, + true, + substs.type_at(0), + llargs[0], + llargs[1], + llargs[2], + call_debug_location) + } + (_, "volatile_set_memory") => { + memset_intrinsic(bcx, + true, + substs.type_at(0), + llargs[0], + llargs[1], + llargs[2], + call_debug_location) + } + (_, "volatile_load") => { + let tp_ty = substs.type_at(0); + let mut ptr = llargs[0]; + if let Some(ty) = fn_ty.ret.cast { + ptr = PointerCast(bcx, ptr, ty.ptr_to()); + } + let load = VolatileLoad(bcx, ptr); + unsafe { + llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty)); + } + to_immediate(bcx, load, tp_ty) + }, + (_, "volatile_store") => { + let tp_ty = substs.type_at(0); + if type_is_fat_ptr(bcx.tcx(), tp_ty) { + VolatileStore(bcx, llargs[1], get_dataptr(bcx, llargs[0])); + VolatileStore(bcx, llargs[2], get_meta(bcx, llargs[0])); + } else { + let val = if fn_ty.args[1].is_indirect() { + Load(bcx, llargs[1]) + } else { + from_immediate(bcx, llargs[1]) + }; + let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to()); + let store = VolatileStore(bcx, val, ptr); + unsafe { + llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty)); + } + } + C_nil(ccx) + }, + + (_, "ctlz") | (_, "cttz") | (_, "ctpop") | (_, "bswap") | + (_, "add_with_overflow") | (_, "sub_with_overflow") | (_, "mul_with_overflow") | + (_, "overflowing_add") | (_, "overflowing_sub") | (_, "overflowing_mul") | + (_, "unchecked_div") | (_, "unchecked_rem") => { + let sty = &arg_tys[0].sty; + match int_type_width_signed(sty, ccx) { + Some((width, signed)) => + match name { + "ctlz" => count_zeros_intrinsic(bcx, &format!("llvm.ctlz.i{}", width), + llargs[0], call_debug_location), + "cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width), + llargs[0], call_debug_location), + "ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), + &llargs, call_debug_location), + "bswap" => { + if width == 8 { + llargs[0] // byte swap a u8/i8 is just a no-op + } else { + Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)), + &llargs, call_debug_location) + } + } + "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { + let intrinsic = format!("llvm.{}{}.with.overflow.i{}", + if signed { 's' } else { 'u' }, + &name[..3], width); + with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult, + call_debug_location) + }, + "overflowing_add" => Add(bcx, llargs[0], llargs[1], call_debug_location), + "overflowing_sub" => Sub(bcx, llargs[0], llargs[1], call_debug_location), + "overflowing_mul" => Mul(bcx, llargs[0], llargs[1], call_debug_location), + "unchecked_div" => + if signed { + SDiv(bcx, llargs[0], llargs[1], call_debug_location) + } else { + UDiv(bcx, llargs[0], llargs[1], call_debug_location) + }, + "unchecked_rem" => + if signed { + SRem(bcx, llargs[0], llargs[1], call_debug_location) + } else { + URem(bcx, llargs[0], llargs[1], call_debug_location) + }, + _ => bug!(), + }, + None => { + span_invalid_monomorphization_error( + tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", name, sty)); + C_nil(ccx) + } + } + + }, + (_, "fadd_fast") | (_, "fsub_fast") | (_, "fmul_fast") | (_, "fdiv_fast") | + (_, "frem_fast") => { + let sty = &arg_tys[0].sty; + match float_type_width(sty) { + Some(_width) => + match name { + "fadd_fast" => FAddFast(bcx, llargs[0], llargs[1], call_debug_location), + "fsub_fast" => FSubFast(bcx, llargs[0], llargs[1], call_debug_location), + "fmul_fast" => FMulFast(bcx, llargs[0], llargs[1], call_debug_location), + "fdiv_fast" => FDivFast(bcx, llargs[0], llargs[1], call_debug_location), + "frem_fast" => FRemFast(bcx, llargs[0], llargs[1], call_debug_location), + _ => bug!(), + }, + None => { + span_invalid_monomorphization_error( + tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic float type, found `{}`", name, sty)); + C_nil(ccx) + } + } + + }, + + (_, "discriminant_value") => { + let val_ty = substs.type_at(0); + match val_ty.sty { + ty::TyAdt(adt, ..) if adt.is_enum() => { + adt::trans_get_discr(bcx, val_ty, llargs[0], + Some(llret_ty), true) + } + _ => C_null(llret_ty) + } + } + (_, name) if name.starts_with("simd_") => { + generic_simd_intrinsic(bcx, name, + callee_ty, + &llargs, + ret_ty, llret_ty, + call_debug_location, + span) + } + // This requires that atomic intrinsics follow a specific naming pattern: + // "atomic_[_]", and no ordering means SeqCst + (_, name) if name.starts_with("atomic_") => { + use llvm::AtomicOrdering::*; + + let split: Vec<&str> = name.split('_').collect(); + + let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak"; + let (order, failorder) = match split.len() { + 2 => (SequentiallyConsistent, SequentiallyConsistent), + 3 => match split[2] { + "unordered" => (Unordered, Unordered), + "relaxed" => (Monotonic, Monotonic), + "acq" => (Acquire, Acquire), + "rel" => (Release, Monotonic), + "acqrel" => (AcquireRelease, Acquire), + "failrelaxed" if is_cxchg => + (SequentiallyConsistent, Monotonic), + "failacq" if is_cxchg => + (SequentiallyConsistent, Acquire), + _ => ccx.sess().fatal("unknown ordering in atomic intrinsic") + }, + 4 => match (split[2], split[3]) { + ("acq", "failrelaxed") if is_cxchg => + (Acquire, Monotonic), + ("acqrel", "failrelaxed") if is_cxchg => + (AcquireRelease, Monotonic), + _ => ccx.sess().fatal("unknown ordering in atomic intrinsic") + }, + _ => ccx.sess().fatal("Atomic intrinsic not in correct format"), + }; + + match split[1] { + "cxchg" | "cxchgweak" => { + let sty = &substs.type_at(0).sty; + if int_type_width_signed(sty, ccx).is_some() { + let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False }; + let val = AtomicCmpXchg(bcx, llargs[0], llargs[1], llargs[2], + order, failorder, weak); + let result = ExtractValue(bcx, val, 0); + let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx())); + Store(bcx, result, StructGEP(bcx, llresult, 0)); + Store(bcx, success, StructGEP(bcx, llresult, 1)); + } else { + span_invalid_monomorphization_error( + tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", name, sty)); + } + C_nil(ccx) + } + + "load" => { + let sty = &substs.type_at(0).sty; + if int_type_width_signed(sty, ccx).is_some() { + AtomicLoad(bcx, llargs[0], order) + } else { + span_invalid_monomorphization_error( + tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", name, sty)); + C_nil(ccx) + } + } + + "store" => { + let sty = &substs.type_at(0).sty; + if int_type_width_signed(sty, ccx).is_some() { + AtomicStore(bcx, llargs[1], llargs[0], order); + } else { + span_invalid_monomorphization_error( + tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", name, sty)); + } + C_nil(ccx) + } + + "fence" => { + AtomicFence(bcx, order, llvm::SynchronizationScope::CrossThread); + C_nil(ccx) + } + + "singlethreadfence" => { + AtomicFence(bcx, order, llvm::SynchronizationScope::SingleThread); + C_nil(ccx) + } + + // These are all AtomicRMW ops + op => { + let atom_op = match op { + "xchg" => llvm::AtomicXchg, + "xadd" => llvm::AtomicAdd, + "xsub" => llvm::AtomicSub, + "and" => llvm::AtomicAnd, + "nand" => llvm::AtomicNand, + "or" => llvm::AtomicOr, + "xor" => llvm::AtomicXor, + "max" => llvm::AtomicMax, + "min" => llvm::AtomicMin, + "umax" => llvm::AtomicUMax, + "umin" => llvm::AtomicUMin, + _ => ccx.sess().fatal("unknown atomic operation") + }; + + let sty = &substs.type_at(0).sty; + if int_type_width_signed(sty, ccx).is_some() { + AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order) + } else { + span_invalid_monomorphization_error( + tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", name, sty)); + C_nil(ccx) + } + } + } + + } + + (..) => { + let intr = match Intrinsic::find(&name) { + Some(intr) => intr, + None => bug!("unknown intrinsic '{}'", name), + }; + fn one(x: Vec) -> T { + assert_eq!(x.len(), 1); + x.into_iter().next().unwrap() + } + fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type, + any_changes_needed: &mut bool) -> Vec { + use intrinsics::Type::*; + match *t { + Void => vec![Type::void(ccx)], + Integer(_signed, width, llvm_width) => { + *any_changes_needed |= width != llvm_width; + vec![Type::ix(ccx, llvm_width as u64)] + } + Float(x) => { + match x { + 32 => vec![Type::f32(ccx)], + 64 => vec![Type::f64(ccx)], + _ => bug!() + } + } + Pointer(ref t, ref llvm_elem, _const) => { + *any_changes_needed |= llvm_elem.is_some(); + + let t = llvm_elem.as_ref().unwrap_or(t); + let elem = one(ty_to_type(ccx, t, + any_changes_needed)); + vec![elem.ptr_to()] + } + Vector(ref t, ref llvm_elem, length) => { + *any_changes_needed |= llvm_elem.is_some(); + + let t = llvm_elem.as_ref().unwrap_or(t); + let elem = one(ty_to_type(ccx, t, + any_changes_needed)); + vec![Type::vector(&elem, + length as u64)] + } + Aggregate(false, ref contents) => { + let elems = contents.iter() + .map(|t| one(ty_to_type(ccx, t, any_changes_needed))) + .collect::>(); + vec![Type::struct_(ccx, &elems, false)] + } + Aggregate(true, ref contents) => { + *any_changes_needed = true; + contents.iter() + .flat_map(|t| ty_to_type(ccx, t, any_changes_needed)) + .collect() + } + } + } + + // This allows an argument list like `foo, (bar, baz), + // qux` to be converted into `foo, bar, baz, qux`, integer + // arguments to be truncated as needed and pointers to be + // cast. + fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + t: &intrinsics::Type, + arg_type: Ty<'tcx>, + llarg: ValueRef) + -> Vec + { + match *t { + intrinsics::Type::Aggregate(true, ref contents) => { + // We found a tuple that needs squishing! So + // run over the tuple and load each field. + // + // This assumes the type is "simple", i.e. no + // destructors, and the contents are SIMD + // etc. + assert!(!bcx.fcx.type_needs_drop(arg_type)); + let arg = adt::MaybeSizedValue::sized(llarg); + (0..contents.len()) + .map(|i| { + Load(bcx, adt::trans_field_ptr(bcx, arg_type, arg, Disr(0), i)) + }) + .collect() + } + intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { + let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false)); + vec![PointerCast(bcx, llarg, + llvm_elem.ptr_to())] + } + intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { + let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false)); + vec![BitCast(bcx, llarg, + Type::vector(&llvm_elem, length as u64))] + } + intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { + // the LLVM intrinsic uses a smaller integer + // size than the C intrinsic's signature, so + // we have to trim it down here. + vec![Trunc(bcx, llarg, Type::ix(bcx.ccx(), llvm_width as u64))] + } + _ => vec![llarg], + } + } + + + let mut any_changes_needed = false; + let inputs = intr.inputs.iter() + .flat_map(|t| ty_to_type(ccx, t, &mut any_changes_needed)) + .collect::>(); + + let mut out_changes = false; + let outputs = one(ty_to_type(ccx, &intr.output, &mut out_changes)); + // outputting a flattened aggregate is nonsense + assert!(!out_changes); + + let llargs = if !any_changes_needed { + // no aggregates to flatten, so no change needed + llargs.to_vec() + } else { + // there are some aggregates that need to be flattened + // in the LLVM call, so we need to run over the types + // again to find them and extract the arguments + intr.inputs.iter() + .zip(llargs) + .zip(&arg_tys) + .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg)) + .collect() + }; + assert_eq!(inputs.len(), llargs.len()); + + let val = match intr.definition { + intrinsics::IntrinsicDef::Named(name) => { + let f = declare::declare_cfn(ccx, + name, + Type::func(&inputs, &outputs)); + Call(bcx, f, &llargs, call_debug_location) + } + }; + + match *intr.output { + intrinsics::Type::Aggregate(flatten, ref elems) => { + // the output is a tuple so we need to munge it properly + assert!(!flatten); + + for i in 0..elems.len() { + let val = ExtractValue(bcx, val, i); + Store(bcx, val, StructGEP(bcx, llresult, i)); + } + C_nil(ccx) + } + _ => val, + } + } + }; + + if val_ty(llval) != Type::void(ccx) && + machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 { + if let Some(ty) = fn_ty.ret.cast { + let ptr = PointerCast(bcx, llresult, ty.ptr_to()); + let store = Store(bcx, llval, ptr); + unsafe { + llvm::LLVMSetAlignment(store, type_of::align_of(ccx, ret_ty)); + } + } else { + store_ty(bcx, llval, llresult, ret_ty); + } + } + + Result::new(bcx, llresult) +} + +fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + allow_overlap: bool, + volatile: bool, + tp_ty: Ty<'tcx>, + dst: ValueRef, + src: ValueRef, + count: ValueRef, + call_debug_location: DebugLoc) + -> ValueRef { + let ccx = bcx.ccx(); + let lltp_ty = type_of::type_of(ccx, tp_ty); + let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32); + let size = machine::llsize_of(ccx, lltp_ty); + let int_size = machine::llbitsize_of_real(ccx, ccx.int_type()); + + let operation = if allow_overlap { + "memmove" + } else { + "memcpy" + }; + + let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size); + + let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx)); + let src_ptr = PointerCast(bcx, src, Type::i8p(ccx)); + let llfn = ccx.get_intrinsic(&name); + + Call(bcx, + llfn, + &[dst_ptr, + src_ptr, + Mul(bcx, size, count, DebugLoc::None), + align, + C_bool(ccx, volatile)], + call_debug_location) +} + +fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + volatile: bool, + tp_ty: Ty<'tcx>, + dst: ValueRef, + val: ValueRef, + count: ValueRef, + call_debug_location: DebugLoc) + -> ValueRef { + let ccx = bcx.ccx(); + let lltp_ty = type_of::type_of(ccx, tp_ty); + let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32); + let size = machine::llsize_of(ccx, lltp_ty); + let int_size = machine::llbitsize_of_real(ccx, ccx.int_type()); + + let name = format!("llvm.memset.p0i8.i{}", int_size); + + let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx)); + let llfn = ccx.get_intrinsic(&name); + + Call(bcx, + llfn, + &[dst_ptr, + val, + Mul(bcx, size, count, DebugLoc::None), + align, + C_bool(ccx, volatile)], + call_debug_location) +} + +fn count_zeros_intrinsic(bcx: Block, + name: &str, + val: ValueRef, + call_debug_location: DebugLoc) + -> ValueRef { + let y = C_bool(bcx.ccx(), false); + let llfn = bcx.ccx().get_intrinsic(&name); + Call(bcx, llfn, &[val, y], call_debug_location) +} + +fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + name: &str, + a: ValueRef, + b: ValueRef, + out: ValueRef, + call_debug_location: DebugLoc) + -> ValueRef { + let llfn = bcx.ccx().get_intrinsic(&name); + + // Convert `i1` to a `bool`, and write it to the out parameter + let val = Call(bcx, llfn, &[a, b], call_debug_location); + let result = ExtractValue(bcx, val, 0); + let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx())); + Store(bcx, result, StructGEP(bcx, out, 0)); + Store(bcx, overflow, StructGEP(bcx, out, 1)); + + C_nil(bcx.ccx()) +} + +fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + func: ValueRef, + data: ValueRef, + local_ptr: ValueRef, + dest: ValueRef, + dloc: DebugLoc) -> Block<'blk, 'tcx> { + if bcx.sess().no_landing_pads() { + Call(bcx, func, &[data], dloc); + Store(bcx, C_null(Type::i8p(bcx.ccx())), dest); + bcx + } else if wants_msvc_seh(bcx.sess()) { + trans_msvc_try(bcx, func, data, local_ptr, dest, dloc) + } else { + trans_gnu_try(bcx, func, data, local_ptr, dest, dloc) + } +} + +// MSVC's definition of the `rust_try` function. +// +// This implementation uses the new exception handling instructions in LLVM +// which have support in LLVM for SEH on MSVC targets. Although these +// instructions are meant to work for all targets, as of the time of this +// writing, however, LLVM does not recommend the usage of these new instructions +// as the old ones are still more optimized. +fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + func: ValueRef, + data: ValueRef, + local_ptr: ValueRef, + dest: ValueRef, + dloc: DebugLoc) -> Block<'blk, 'tcx> { + let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| { + let ccx = bcx.ccx(); + let dloc = DebugLoc::None; + + SetPersonalityFn(bcx, bcx.fcx.eh_personality()); + + let normal = bcx.fcx.new_block("normal"); + let catchswitch = bcx.fcx.new_block("catchswitch"); + let catchpad = bcx.fcx.new_block("catchpad"); + let caught = bcx.fcx.new_block("caught"); + + let func = llvm::get_param(bcx.fcx.llfn, 0); + let data = llvm::get_param(bcx.fcx.llfn, 1); + let local_ptr = llvm::get_param(bcx.fcx.llfn, 2); + + // We're generating an IR snippet that looks like: + // + // declare i32 @rust_try(%func, %data, %ptr) { + // %slot = alloca i64* + // invoke %func(%data) to label %normal unwind label %catchswitch + // + // normal: + // ret i32 0 + // + // catchswitch: + // %cs = catchswitch within none [%catchpad] unwind to caller + // + // catchpad: + // %tok = catchpad within %cs [%type_descriptor, 0, %slot] + // %ptr[0] = %slot[0] + // %ptr[1] = %slot[1] + // catchret from %tok to label %caught + // + // caught: + // ret i32 1 + // } + // + // This structure follows the basic usage of throw/try/catch in LLVM. + // For example, compile this C++ snippet to see what LLVM generates: + // + // #include + // + // int bar(void (*foo)(void), uint64_t *ret) { + // try { + // foo(); + // return 0; + // } catch(uint64_t a[2]) { + // ret[0] = a[0]; + // ret[1] = a[1]; + // return 1; + // } + // } + // + // More information can be found in libstd's seh.rs implementation. + let i64p = Type::i64(ccx).ptr_to(); + let slot = Alloca(bcx, i64p, "slot"); + Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, dloc); + + Ret(normal, C_i32(ccx, 0), dloc); + + let cs = CatchSwitch(catchswitch, None, None, 1); + AddHandler(catchswitch, cs, catchpad.llbb); + + let tcx = ccx.tcx(); + let tydesc = match tcx.lang_items.msvc_try_filter() { + Some(did) => ::consts::get_static(ccx, did), + None => bug!("msvc_try_filter not defined"), + }; + let tok = CatchPad(catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]); + let addr = Load(catchpad, slot); + let arg1 = Load(catchpad, addr); + let val1 = C_i32(ccx, 1); + let arg2 = Load(catchpad, InBoundsGEP(catchpad, addr, &[val1])); + let local_ptr = BitCast(catchpad, local_ptr, i64p); + Store(catchpad, arg1, local_ptr); + Store(catchpad, arg2, InBoundsGEP(catchpad, local_ptr, &[val1])); + CatchRet(catchpad, tok, caught.llbb); + + Ret(caught, C_i32(ccx, 1), dloc); + }); + + // Note that no invoke is used here because by definition this function + // can't panic (that's what it's catching). + let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc); + Store(bcx, ret, dest); + return bcx +} + +// Definition of the standard "try" function for Rust using the GNU-like model +// of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke +// instructions). +// +// This translation is a little surprising because we always call a shim +// function instead of inlining the call to `invoke` manually here. This is done +// because in LLVM we're only allowed to have one personality per function +// definition. The call to the `try` intrinsic is being inlined into the +// function calling it, and that function may already have other personality +// functions in play. By calling a shim we're guaranteed that our shim will have +// the right personality function. +fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + func: ValueRef, + data: ValueRef, + local_ptr: ValueRef, + dest: ValueRef, + dloc: DebugLoc) -> Block<'blk, 'tcx> { + let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| { + let ccx = bcx.ccx(); + let dloc = DebugLoc::None; + + // Translates the shims described above: + // + // bcx: + // invoke %func(%args...) normal %normal unwind %catch + // + // normal: + // ret 0 + // + // catch: + // (ptr, _) = landingpad + // store ptr, %local_ptr + // ret 1 + // + // Note that the `local_ptr` data passed into the `try` intrinsic is + // expected to be `*mut *mut u8` for this to actually work, but that's + // managed by the standard library. + + let then = bcx.fcx.new_block("then"); + let catch = bcx.fcx.new_block("catch"); + + let func = llvm::get_param(bcx.fcx.llfn, 0); + let data = llvm::get_param(bcx.fcx.llfn, 1); + let local_ptr = llvm::get_param(bcx.fcx.llfn, 2); + Invoke(bcx, func, &[data], then.llbb, catch.llbb, dloc); + Ret(then, C_i32(ccx, 0), dloc); + + // Type indicator for the exception being thrown. + // + // The first value in this tuple is a pointer to the exception object + // being thrown. The second value is a "selector" indicating which of + // the landing pad clauses the exception's type had been matched to. + // rust_try ignores the selector. + let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], + false); + let vals = LandingPad(catch, lpad_ty, bcx.fcx.eh_personality(), 1); + AddClause(catch, vals, C_null(Type::i8p(ccx))); + let ptr = ExtractValue(catch, vals, 0); + Store(catch, ptr, BitCast(catch, local_ptr, Type::i8p(ccx).ptr_to())); + Ret(catch, C_i32(ccx, 1), dloc); + }); + + // Note that no invoke is used here because by definition this function + // can't panic (that's what it's catching). + let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc); + Store(bcx, ret, dest); + return bcx; +} + +// Helper function to give a Block to a closure to translate a shim function. +// This is currently primarily used for the `try` intrinsic functions above. +fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, + name: &str, + inputs: Vec>, + output: Ty<'tcx>, + trans: &mut for<'b> FnMut(Block<'b, 'tcx>)) + -> ValueRef { + let ccx = fcx.ccx; + let sig = ty::FnSig { + inputs: inputs, + output: output, + variadic: false, + }; + let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]); + + let rust_fn_ty = ccx.tcx().mk_fn_ptr(ccx.tcx().mk_bare_fn(ty::BareFnTy { + unsafety: hir::Unsafety::Unsafe, + abi: Abi::Rust, + sig: ty::Binder(sig) + })); + let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty); + let (fcx, block_arena); + block_arena = TypedArena::new(); + fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena); + trans(fcx.init(true)); + fcx.cleanup(); + llfn +} + +// Helper function used to get a handle to the `__rust_try` function used to +// catch exceptions. +// +// This function is only generated once and is then cached. +fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, + trans: &mut for<'b> FnMut(Block<'b, 'tcx>)) + -> ValueRef { + let ccx = fcx.ccx; + if let Some(llfn) = ccx.rust_try_fn().get() { + return llfn; + } + + // Define the type up front for the signature of the rust_try function. + let tcx = ccx.tcx(); + let i8p = tcx.mk_mut_ptr(tcx.types.i8); + let fn_ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy { + unsafety: hir::Unsafety::Unsafe, + abi: Abi::Rust, + sig: ty::Binder(ty::FnSig { + inputs: vec![i8p], + output: tcx.mk_nil(), + variadic: false, + }), + })); + let output = tcx.types.i32; + let rust_try = gen_fn(fcx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans); + ccx.rust_try_fn().set(Some(rust_try)); + return rust_try +} + +fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) { + span_err!(a, b, E0511, "{}", c); +} + +fn generic_simd_intrinsic<'blk, 'tcx, 'a> + (bcx: Block<'blk, 'tcx>, + name: &str, + callee_ty: Ty<'tcx>, + llargs: &[ValueRef], + ret_ty: Ty<'tcx>, + llret_ty: Type, + call_debug_location: DebugLoc, + span: Span) -> ValueRef +{ + // macros for error handling: + macro_rules! emit_error { + ($msg: tt) => { + emit_error!($msg, ) + }; + ($msg: tt, $($fmt: tt)*) => { + span_invalid_monomorphization_error( + bcx.sess(), span, + &format!(concat!("invalid monomorphization of `{}` intrinsic: ", + $msg), + name, $($fmt)*)); + } + } + macro_rules! require { + ($cond: expr, $($fmt: tt)*) => { + if !$cond { + emit_error!($($fmt)*); + return C_nil(bcx.ccx()) + } + } + } + macro_rules! require_simd { + ($ty: expr, $position: expr) => { + require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty) + } + } + + + + let tcx = bcx.tcx(); + let sig = tcx.erase_late_bound_regions_and_normalize(callee_ty.fn_sig()); + let arg_tys = sig.inputs; + + // every intrinsic takes a SIMD vector as its first argument + require_simd!(arg_tys[0], "input"); + let in_ty = arg_tys[0]; + let in_elem = arg_tys[0].simd_type(tcx); + let in_len = arg_tys[0].simd_size(tcx); + + let comparison = match name { + "simd_eq" => Some(hir::BiEq), + "simd_ne" => Some(hir::BiNe), + "simd_lt" => Some(hir::BiLt), + "simd_le" => Some(hir::BiLe), + "simd_gt" => Some(hir::BiGt), + "simd_ge" => Some(hir::BiGe), + _ => None + }; + + if let Some(cmp_op) = comparison { + require_simd!(ret_ty, "return"); + + let out_len = ret_ty.simd_size(tcx); + require!(in_len == out_len, + "expected return type with length {} (same as input type `{}`), \ + found `{}` with length {}", + in_len, in_ty, + ret_ty, out_len); + require!(llret_ty.element_type().kind() == llvm::Integer, + "expected return type with integer elements, found `{}` with non-integer `{}`", + ret_ty, + ret_ty.simd_type(tcx)); + + return compare_simd_types(bcx, + llargs[0], + llargs[1], + in_elem, + llret_ty, + cmp_op, + call_debug_location) + } + + if name.starts_with("simd_shuffle") { + let n: usize = match name["simd_shuffle".len()..].parse() { + Ok(n) => n, + Err(_) => span_bug!(span, + "bad `simd_shuffle` instruction only caught in trans?") + }; + + require_simd!(ret_ty, "return"); + + let out_len = ret_ty.simd_size(tcx); + require!(out_len == n, + "expected return type of length {}, found `{}` with length {}", + n, ret_ty, out_len); + require!(in_elem == ret_ty.simd_type(tcx), + "expected return element type `{}` (element of input `{}`), \ + found `{}` with element type `{}`", + in_elem, in_ty, + ret_ty, ret_ty.simd_type(tcx)); + + let total_len = in_len as u64 * 2; + + let vector = llargs[2]; + + let indices: Option> = (0..n) + .map(|i| { + let arg_idx = i; + let val = const_get_elt(vector, &[i as libc::c_uint]); + match const_to_opt_uint(val) { + None => { + emit_error!("shuffle index #{} is not a constant", arg_idx); + None + } + Some(idx) if idx >= total_len => { + emit_error!("shuffle index #{} is out of bounds (limit {})", + arg_idx, total_len); + None + } + Some(idx) => Some(C_i32(bcx.ccx(), idx as i32)), + } + }) + .collect(); + let indices = match indices { + Some(i) => i, + None => return C_null(llret_ty) + }; + + return ShuffleVector(bcx, llargs[0], llargs[1], C_vector(&indices)) + } + + if name == "simd_insert" { + require!(in_elem == arg_tys[2], + "expected inserted type `{}` (element of input `{}`), found `{}`", + in_elem, in_ty, arg_tys[2]); + return InsertElement(bcx, llargs[0], llargs[2], llargs[1]) + } + if name == "simd_extract" { + require!(ret_ty == in_elem, + "expected return type `{}` (element of input `{}`), found `{}`", + in_elem, in_ty, ret_ty); + return ExtractElement(bcx, llargs[0], llargs[1]) + } + + if name == "simd_cast" { + require_simd!(ret_ty, "return"); + let out_len = ret_ty.simd_size(tcx); + require!(in_len == out_len, + "expected return type with length {} (same as input type `{}`), \ + found `{}` with length {}", + in_len, in_ty, + ret_ty, out_len); + // casting cares about nominal type, not just structural type + let out_elem = ret_ty.simd_type(tcx); + + if in_elem == out_elem { return llargs[0]; } + + enum Style { Float, Int(/* is signed? */ bool), Unsupported } + + let (in_style, in_width) = match in_elem.sty { + // vectors of pointer-sized integers should've been + // disallowed before here, so this unwrap is safe. + ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()), + ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()), + ty::TyFloat(f) => (Style::Float, f.bit_width()), + _ => (Style::Unsupported, 0) + }; + let (out_style, out_width) = match out_elem.sty { + ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()), + ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()), + ty::TyFloat(f) => (Style::Float, f.bit_width()), + _ => (Style::Unsupported, 0) + }; + + match (in_style, out_style) { + (Style::Int(in_is_signed), Style::Int(_)) => { + return match in_width.cmp(&out_width) { + Ordering::Greater => Trunc(bcx, llargs[0], llret_ty), + Ordering::Equal => llargs[0], + Ordering::Less => if in_is_signed { + SExt(bcx, llargs[0], llret_ty) + } else { + ZExt(bcx, llargs[0], llret_ty) + } + } + } + (Style::Int(in_is_signed), Style::Float) => { + return if in_is_signed { + SIToFP(bcx, llargs[0], llret_ty) + } else { + UIToFP(bcx, llargs[0], llret_ty) + } + } + (Style::Float, Style::Int(out_is_signed)) => { + return if out_is_signed { + FPToSI(bcx, llargs[0], llret_ty) + } else { + FPToUI(bcx, llargs[0], llret_ty) + } + } + (Style::Float, Style::Float) => { + return match in_width.cmp(&out_width) { + Ordering::Greater => FPTrunc(bcx, llargs[0], llret_ty), + Ordering::Equal => llargs[0], + Ordering::Less => FPExt(bcx, llargs[0], llret_ty) + } + } + _ => {/* Unsupported. Fallthrough. */} + } + require!(false, + "unsupported cast from `{}` with element `{}` to `{}` with element `{}`", + in_ty, in_elem, + ret_ty, out_elem); + } + macro_rules! arith { + ($($name: ident: $($($p: ident),* => $call: expr),*;)*) => { + $( + if name == stringify!($name) { + match in_elem.sty { + $( + $(ty::$p(_))|* => { + return $call(bcx, llargs[0], llargs[1], call_debug_location) + } + )* + _ => {}, + } + require!(false, + "unsupported operation on `{}` with element `{}`", + in_ty, + in_elem) + })* + } + } + arith! { + simd_add: TyUint, TyInt => Add, TyFloat => FAdd; + simd_sub: TyUint, TyInt => Sub, TyFloat => FSub; + simd_mul: TyUint, TyInt => Mul, TyFloat => FMul; + simd_div: TyFloat => FDiv; + simd_shl: TyUint, TyInt => Shl; + simd_shr: TyUint => LShr, TyInt => AShr; + simd_and: TyUint, TyInt => And; + simd_or: TyUint, TyInt => Or; + simd_xor: TyUint, TyInt => Xor; + } + span_bug!(span, "unknown SIMD intrinsic"); +} + +// Returns the width of an int TypeVariant, and if it's signed or not +// Returns None if the type is not an integer +fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext) + -> Option<(u64, bool)> { + use rustc::ty::{TyInt, TyUint}; + match *sty { + TyInt(t) => Some((match t { + ast::IntTy::Is => { + match &ccx.tcx().sess.target.target.target_pointer_width[..] { + "16" => 16, + "32" => 32, + "64" => 64, + tws => bug!("Unsupported target word size for isize: {}", tws), + } + }, + ast::IntTy::I8 => 8, + ast::IntTy::I16 => 16, + ast::IntTy::I32 => 32, + ast::IntTy::I64 => 64, + }, true)), + TyUint(t) => Some((match t { + ast::UintTy::Us => { + match &ccx.tcx().sess.target.target.target_pointer_width[..] { + "16" => 16, + "32" => 32, + "64" => 64, + tws => bug!("Unsupported target word size for usize: {}", tws), + } + }, + ast::UintTy::U8 => 8, + ast::UintTy::U16 => 16, + ast::UintTy::U32 => 32, + ast::UintTy::U64 => 64, + }, false)), + _ => None, + } +} + +// Returns the width of a float TypeVariant +// Returns None if the type is not a float +fn float_type_width<'tcx>(sty: &ty::TypeVariants<'tcx>) + -> Option { + use rustc::ty::TyFloat; + match *sty { + TyFloat(t) => Some(match t { + ast::FloatTy::F32 => 32, + ast::FloatTy::F64 => 64, + }), + _ => None, + } +} diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index c1ab0284ade52..0e7ead30a933a 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -21,13 +21,15 @@ #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] +#![feature(associated_consts)] #![feature(box_patterns)] #![feature(box_syntax)] +#![feature(cell_extras)] #![feature(const_fn)] #![feature(custom_attribute)] #![allow(unused_attributes)] -#![feature(iter_arith)] #![feature(libc)] #![feature(quote)] #![feature(rustc_diagnostic_macros)] @@ -36,50 +38,141 @@ #![feature(staged_api)] #![feature(unicode)] -#![allow(trivial_casts)] +use rustc::dep_graph::WorkProduct; extern crate arena; extern crate flate; extern crate getopts; extern crate graphviz; extern crate libc; -extern crate rustc; +#[macro_use] extern crate rustc; extern crate rustc_back; extern crate rustc_data_structures; -extern crate rustc_front; -extern crate rustc_llvm as llvm; -extern crate rustc_mir; +extern crate rustc_incremental; +pub extern crate rustc_llvm as llvm; extern crate rustc_platform_intrinsics as intrinsics; extern crate serialize; +extern crate rustc_const_math; +extern crate rustc_const_eval; +#[macro_use] +#[no_link] +extern crate rustc_bitflags; #[macro_use] extern crate log; #[macro_use] extern crate syntax; +extern crate syntax_pos; +extern crate rustc_errors as errors; pub use rustc::session; pub use rustc::middle; pub use rustc::lint; pub use rustc::util; +pub use base::trans_crate; +pub use disr::Disr; + pub mod back { - pub use rustc_back::abi; - pub use rustc_back::rpath; - pub use rustc_back::svh; + pub use rustc::hir::svh; pub mod archive; pub mod linker; pub mod link; pub mod lto; + pub mod symbol_names; pub mod write; pub mod msvc; + pub mod rpath; } pub mod diagnostics; -pub mod trans; -pub mod save; +#[macro_use] +mod macros; + +mod abi; +mod adt; +mod asm; +mod assert_module_sources; +mod attributes; +mod base; +mod basic_block; +mod build; +mod builder; +mod cabi_aarch64; +mod cabi_arm; +mod cabi_asmjs; +mod cabi_mips; +mod cabi_mips64; +mod cabi_msp430; +mod cabi_powerpc; +mod cabi_powerpc64; +mod cabi_s390x; +mod cabi_x86; +mod cabi_x86_64; +mod cabi_x86_win64; +mod callee; +mod cleanup; +mod collector; +mod common; +mod consts; +mod context; +mod debuginfo; +mod declare; +mod disr; +mod glue; +mod intrinsic; +mod machine; +mod meth; +mod mir; +mod monomorphize; +mod partitioning; +mod symbol_map; +mod symbol_names_test; +mod trans_item; +mod tvec; +mod type_; +mod type_of; +mod value; + +#[derive(Clone)] +pub struct ModuleTranslation { + /// The name of the module. When the crate may be saved between + /// compilations, incremental compilation requires that name be + /// unique amongst **all** crates. Therefore, it should contain + /// something unique to this crate (e.g., a module path) as well + /// as the crate name and disambiguator. + pub name: String, + pub symbol_name_hash: u64, + pub source: ModuleSource, +} + +#[derive(Clone)] +pub enum ModuleSource { + /// Copy the `.o` files or whatever from the incr. comp. directory. + Preexisting(WorkProduct), + + /// Rebuild from this LLVM module. + Translated(ModuleLlvm), +} + +#[derive(Copy, Clone)] +pub struct ModuleLlvm { + pub llcx: llvm::ContextRef, + pub llmod: llvm::ModuleRef, +} + +unsafe impl Send for ModuleTranslation { } +unsafe impl Sync for ModuleTranslation { } -pub mod lib { - pub use llvm; +pub struct CrateTranslation { + pub modules: Vec, + pub metadata_module: ModuleTranslation, + pub link: middle::cstore::LinkMeta, + pub metadata: Vec, + pub reachable: Vec, + pub no_builtins: bool, + pub windows_subsystem: Option, + pub linker_info: back::linker::LinkerInfo } __build_diagnostic_array! { librustc_trans, DIAGNOSTICS } diff --git a/src/librustc_trans/machine.rs b/src/librustc_trans/machine.rs new file mode 100644 index 0000000000000..cd31f02842add --- /dev/null +++ b/src/librustc_trans/machine.rs @@ -0,0 +1,79 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Information concerning the machine representation of various types. + +#![allow(non_camel_case_types)] + +use llvm::{self, ValueRef}; +use common::*; + +use type_::Type; + +pub type llbits = u64; +pub type llsize = u64; +pub type llalign = u32; + +// ______________________________________________________________________ +// compute sizeof / alignof + +// Returns the number of bytes between successive elements of type T in an +// array of T. This is the "ABI" size. It includes any ABI-mandated padding. +pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> llsize { + unsafe { + return llvm::LLVMABISizeOfType(cx.td(), ty.to_ref()); + } +} + +/// Returns the "real" size of the type in bits. +pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> llbits { + unsafe { + llvm::LLVMSizeOfTypeInBits(cx.td(), ty.to_ref()) + } +} + +/// Returns the size of the type as an LLVM constant integer value. +pub fn llsize_of(cx: &CrateContext, ty: Type) -> ValueRef { + // Once upon a time, this called LLVMSizeOf, which does a + // getelementptr(1) on a null pointer and casts to an int, in + // order to obtain the type size as a value without requiring the + // target data layout. But we have the target data layout, so + // there's no need for that contrivance. The instruction + // selection DAG generator would flatten that GEP(1) node into a + // constant of the type's alloc size, so let's save it some work. + return C_uint(cx, llsize_of_alloc(cx, ty)); +} + +// Returns the preferred alignment of the given type for the current target. +// The preferred alignment may be larger than the alignment used when +// packing the type into structs. This will be used for things like +// allocations inside a stack frame, which LLVM has a free hand in. +pub fn llalign_of_pref(cx: &CrateContext, ty: Type) -> llalign { + unsafe { + return llvm::LLVMPreferredAlignmentOfType(cx.td(), ty.to_ref()); + } +} + +// Returns the minimum alignment of a type required by the platform. +// This is the alignment that will be used for struct fields, arrays, +// and similar ABI-mandated things. +pub fn llalign_of_min(cx: &CrateContext, ty: Type) -> llalign { + unsafe { + return llvm::LLVMABIAlignmentOfType(cx.td(), ty.to_ref()); + } +} + +pub fn llelement_offset(cx: &CrateContext, struct_ty: Type, element: usize) -> u64 { + unsafe { + return llvm::LLVMOffsetOfElement(cx.td(), + struct_ty.to_ref(), + element as u32); + } +} diff --git a/src/librustc_trans/trans/macros.rs b/src/librustc_trans/macros.rs similarity index 100% rename from src/librustc_trans/trans/macros.rs rename to src/librustc_trans/macros.rs diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs new file mode 100644 index 0000000000000..aa9b900fa4653 --- /dev/null +++ b/src/librustc_trans/meth.rs @@ -0,0 +1,157 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use attributes; +use arena::TypedArena; +use llvm::{ValueRef, get_params}; +use rustc::traits; +use abi::FnType; +use base::*; +use build::*; +use callee::Callee; +use common::*; +use consts; +use debuginfo::DebugLoc; +use declare; +use glue; +use machine; +use monomorphize::Instance; +use type_::Type; +use type_of::*; +use value::Value; +use rustc::ty; + +// drop_glue pointer, size, align. +const VTABLE_OFFSET: usize = 3; + +/// Extracts a method from a trait object's vtable, at the specified index. +pub fn get_virtual_method<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, + llvtable: ValueRef, + vtable_index: usize) + -> ValueRef { + // Load the data pointer from the object. + debug!("get_virtual_method(vtable_index={}, llvtable={:?})", + vtable_index, Value(llvtable)); + + Load(bcx, GEPi(bcx, llvtable, &[vtable_index + VTABLE_OFFSET])) +} + +/// Generate a shim function that allows an object type like `SomeTrait` to +/// implement the type `SomeTrait`. Imagine a trait definition: +/// +/// trait SomeTrait { fn get(&self) -> i32; ... } +/// +/// And a generic bit of code: +/// +/// fn foo(t: &T) { +/// let x = SomeTrait::get; +/// x(t) +/// } +/// +/// What is the value of `x` when `foo` is invoked with `T=SomeTrait`? +/// The answer is that it is a shim function generated by this routine: +/// +/// fn shim(t: &SomeTrait) -> i32 { +/// // ... call t.get() virtually ... +/// } +/// +/// In fact, all virtual calls can be thought of as normal trait calls +/// that go through this shim function. +pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, + callee: Callee<'tcx>) + -> ValueRef { + let _icx = push_ctxt("trans_object_shim"); + let tcx = ccx.tcx(); + + debug!("trans_object_shim({:?})", callee); + + let (sig, abi, function_name) = match callee.ty.sty { + ty::TyFnDef(def_id, substs, f) => { + let instance = Instance::new(def_id, substs); + (&f.sig, f.abi, instance.symbol_name(ccx.shared())) + } + _ => bug!() + }; + + let sig = tcx.erase_late_bound_regions_and_normalize(sig); + let fn_ty = FnType::new(ccx, abi, &sig, &[]); + + let llfn = declare::define_internal_fn(ccx, &function_name, callee.ty); + attributes::set_frame_pointer_elimination(ccx, llfn); + + let (block_arena, fcx): (TypedArena<_>, FunctionContext); + block_arena = TypedArena::new(); + fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena); + let mut bcx = fcx.init(false); + + let dest = fcx.llretslotptr.get(); + let llargs = get_params(fcx.llfn); + bcx = callee.call(bcx, DebugLoc::None, + &llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest).bcx; + + fcx.finish(bcx, DebugLoc::None); + + llfn +} + +/// Creates a dynamic vtable for the given type and vtable origin. +/// This is used only for objects. +/// +/// The vtables are cached instead of created on every call. +/// +/// The `trait_ref` encodes the erased self type. Hence if we are +/// making an object `Foo` from a value of type `Foo`, then +/// `trait_ref` would map `T:Trait`. +pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + ty: ty::Ty<'tcx>, + trait_ref: Option>) + -> ValueRef +{ + let tcx = ccx.tcx(); + let _icx = push_ctxt("meth::get_vtable"); + + debug!("get_vtable(ty={:?}, trait_ref={:?})", ty, trait_ref); + + // Check the cache. + if let Some(&val) = ccx.vtables().borrow().get(&(ty, trait_ref)) { + return val; + } + + // Not in the cache. Build it. + let nullptr = C_null(Type::nil(ccx).ptr_to()); + + let size_ty = sizing_type_of(ccx, ty); + let size = machine::llsize_of_alloc(ccx, size_ty); + let align = align_of(ccx, ty); + + let mut components: Vec<_> = [ + // Generate a destructor for the vtable. + glue::get_drop_glue(ccx, ty), + C_uint(ccx, size), + C_uint(ccx, align) + ].iter().cloned().collect(); + + if let Some(trait_ref) = trait_ref { + let trait_ref = trait_ref.with_self_ty(tcx, ty); + let methods = traits::get_vtable_methods(tcx, trait_ref).map(|opt_mth| { + opt_mth.map_or(nullptr, |(def_id, substs)| { + Callee::def(ccx, def_id, substs).reify(ccx) + }) + }); + components.extend(methods); + } + + let vtable_const = C_struct(ccx, &components, false); + let align = machine::llalign_of_pref(ccx, val_ty(vtable_const)); + let vtable = consts::addr_of(ccx, vtable_const, align, "vtable"); + + ccx.vtables().borrow_mut().insert((ty, trait_ref), vtable); + vtable +} diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs new file mode 100644 index 0000000000000..e4d0533ec8784 --- /dev/null +++ b/src/librustc_trans/mir/analyze.rs @@ -0,0 +1,296 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! An analysis to determine which locals require allocas and +//! which do not. + +use rustc_data_structures::bitvec::BitVector; +use rustc_data_structures::indexed_vec::{Idx, IndexVec}; +use rustc::mir::{self, Location, TerminatorKind}; +use rustc::mir::visit::{Visitor, LvalueContext}; +use rustc::mir::traversal; +use common::{self, Block, BlockAndBuilder}; +use glue; +use super::rvalue; + +pub fn lvalue_locals<'bcx, 'tcx>(bcx: Block<'bcx,'tcx>, + mir: &mir::Mir<'tcx>) -> BitVector { + let bcx = bcx.build(); + let mut analyzer = LocalAnalyzer::new(mir, &bcx); + + analyzer.visit_mir(mir); + + for (index, ty) in mir.local_decls.iter().map(|l| l.ty).enumerate() { + let ty = bcx.monomorphize(&ty); + debug!("local {} has type {:?}", index, ty); + if ty.is_scalar() || + ty.is_unique() || + ty.is_region_ptr() || + ty.is_simd() || + common::type_is_zero_size(bcx.ccx(), ty) + { + // These sorts of types are immediates that we can store + // in an ValueRef without an alloca. + assert!(common::type_is_immediate(bcx.ccx(), ty) || + common::type_is_fat_ptr(bcx.tcx(), ty)); + } else if common::type_is_imm_pair(bcx.ccx(), ty) { + // We allow pairs and uses of any of their 2 fields. + } else { + // These sorts of types require an alloca. Note that + // type_is_immediate() may *still* be true, particularly + // for newtypes, but we currently force some types + // (e.g. structs) into an alloca unconditionally, just so + // that we don't have to deal with having two pathways + // (gep vs extractvalue etc). + analyzer.mark_as_lvalue(mir::Local::new(index)); + } + } + + analyzer.lvalue_locals +} + +struct LocalAnalyzer<'mir, 'bcx: 'mir, 'tcx: 'bcx> { + mir: &'mir mir::Mir<'tcx>, + bcx: &'mir BlockAndBuilder<'bcx, 'tcx>, + lvalue_locals: BitVector, + seen_assigned: BitVector +} + +impl<'mir, 'bcx, 'tcx> LocalAnalyzer<'mir, 'bcx, 'tcx> { + fn new(mir: &'mir mir::Mir<'tcx>, + bcx: &'mir BlockAndBuilder<'bcx, 'tcx>) + -> LocalAnalyzer<'mir, 'bcx, 'tcx> { + LocalAnalyzer { + mir: mir, + bcx: bcx, + lvalue_locals: BitVector::new(mir.local_decls.len()), + seen_assigned: BitVector::new(mir.local_decls.len()) + } + } + + fn mark_as_lvalue(&mut self, local: mir::Local) { + debug!("marking {:?} as lvalue", local); + self.lvalue_locals.insert(local.index()); + } + + fn mark_assigned(&mut self, local: mir::Local) { + if !self.seen_assigned.insert(local.index()) { + self.mark_as_lvalue(local); + } + } +} + +impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'bcx, 'tcx> { + fn visit_assign(&mut self, + block: mir::BasicBlock, + lvalue: &mir::Lvalue<'tcx>, + rvalue: &mir::Rvalue<'tcx>, + location: Location) { + debug!("visit_assign(block={:?}, lvalue={:?}, rvalue={:?})", block, lvalue, rvalue); + + if let mir::Lvalue::Local(index) = *lvalue { + self.mark_assigned(index); + if !rvalue::rvalue_creates_operand(self.mir, self.bcx, rvalue) { + self.mark_as_lvalue(index); + } + } else { + self.visit_lvalue(lvalue, LvalueContext::Store, location); + } + + self.visit_rvalue(rvalue, location); + } + + fn visit_terminator_kind(&mut self, + block: mir::BasicBlock, + kind: &mir::TerminatorKind<'tcx>, + location: Location) { + match *kind { + mir::TerminatorKind::Call { + func: mir::Operand::Constant(mir::Constant { + literal: mir::Literal::Item { def_id, .. }, .. + }), + ref args, .. + } if Some(def_id) == self.bcx.tcx().lang_items.box_free_fn() => { + // box_free(x) shares with `drop x` the property that it + // is not guaranteed to be statically dominated by the + // definition of x, so x must always be in an alloca. + if let mir::Operand::Consume(ref lvalue) = args[0] { + self.visit_lvalue(lvalue, LvalueContext::Drop, location); + } + } + _ => {} + } + + self.super_terminator_kind(block, kind, location); + } + + fn visit_lvalue(&mut self, + lvalue: &mir::Lvalue<'tcx>, + context: LvalueContext<'tcx>, + location: Location) { + debug!("visit_lvalue(lvalue={:?}, context={:?})", lvalue, context); + + // Allow uses of projections of immediate pair fields. + if let mir::Lvalue::Projection(ref proj) = *lvalue { + if let mir::Lvalue::Local(_) = proj.base { + let ty = proj.base.ty(self.mir, self.bcx.tcx()); + + let ty = self.bcx.monomorphize(&ty.to_ty(self.bcx.tcx())); + if common::type_is_imm_pair(self.bcx.ccx(), ty) { + if let mir::ProjectionElem::Field(..) = proj.elem { + if let LvalueContext::Consume = context { + return; + } + } + } + } + } + + if let mir::Lvalue::Local(index) = *lvalue { + match context { + LvalueContext::Call => { + self.mark_assigned(index); + } + + LvalueContext::StorageLive | + LvalueContext::StorageDead | + LvalueContext::Consume => {} + + LvalueContext::Store | + LvalueContext::Inspect | + LvalueContext::Borrow { .. } | + LvalueContext::Projection(..) => { + self.mark_as_lvalue(index); + } + + LvalueContext::Drop => { + let ty = lvalue.ty(self.mir, self.bcx.tcx()); + let ty = self.bcx.monomorphize(&ty.to_ty(self.bcx.tcx())); + + // Only need the lvalue if we're actually dropping it. + if glue::type_needs_drop(self.bcx.tcx(), ty) { + self.mark_as_lvalue(index); + } + } + } + } + + // A deref projection only reads the pointer, never needs the lvalue. + if let mir::Lvalue::Projection(ref proj) = *lvalue { + if let mir::ProjectionElem::Deref = proj.elem { + return self.visit_lvalue(&proj.base, LvalueContext::Consume, location); + } + } + + self.super_lvalue(lvalue, context, location); + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum CleanupKind { + NotCleanup, + Funclet, + Internal { funclet: mir::BasicBlock } +} + +pub fn cleanup_kinds<'bcx,'tcx>(_bcx: Block<'bcx,'tcx>, + mir: &mir::Mir<'tcx>) + -> IndexVec +{ + fn discover_masters<'tcx>(result: &mut IndexVec, + mir: &mir::Mir<'tcx>) { + for (bb, data) in mir.basic_blocks().iter_enumerated() { + match data.terminator().kind { + TerminatorKind::Goto { .. } | + TerminatorKind::Resume | + TerminatorKind::Return | + TerminatorKind::Unreachable | + TerminatorKind::If { .. } | + TerminatorKind::Switch { .. } | + TerminatorKind::SwitchInt { .. } => { + /* nothing to do */ + } + TerminatorKind::Call { cleanup: unwind, .. } | + TerminatorKind::Assert { cleanup: unwind, .. } | + TerminatorKind::DropAndReplace { unwind, .. } | + TerminatorKind::Drop { unwind, .. } => { + if let Some(unwind) = unwind { + debug!("cleanup_kinds: {:?}/{:?} registering {:?} as funclet", + bb, data, unwind); + result[unwind] = CleanupKind::Funclet; + } + } + } + } + } + + fn propagate<'tcx>(result: &mut IndexVec, + mir: &mir::Mir<'tcx>) { + let mut funclet_succs = IndexVec::from_elem(None, mir.basic_blocks()); + + let mut set_successor = |funclet: mir::BasicBlock, succ| { + match funclet_succs[funclet] { + ref mut s @ None => { + debug!("set_successor: updating successor of {:?} to {:?}", + funclet, succ); + *s = Some(succ); + }, + Some(s) => if s != succ { + span_bug!(mir.span, "funclet {:?} has 2 parents - {:?} and {:?}", + funclet, s, succ); + } + } + }; + + for (bb, data) in traversal::reverse_postorder(mir) { + let funclet = match result[bb] { + CleanupKind::NotCleanup => continue, + CleanupKind::Funclet => bb, + CleanupKind::Internal { funclet } => funclet, + }; + + debug!("cleanup_kinds: {:?}/{:?}/{:?} propagating funclet {:?}", + bb, data, result[bb], funclet); + + for &succ in data.terminator().successors().iter() { + let kind = result[succ]; + debug!("cleanup_kinds: propagating {:?} to {:?}/{:?}", + funclet, succ, kind); + match kind { + CleanupKind::NotCleanup => { + result[succ] = CleanupKind::Internal { funclet: funclet }; + } + CleanupKind::Funclet => { + set_successor(funclet, succ); + } + CleanupKind::Internal { funclet: succ_funclet } => { + if funclet != succ_funclet { + // `succ` has 2 different funclet going into it, so it must + // be a funclet by itself. + + debug!("promoting {:?} to a funclet and updating {:?}", succ, + succ_funclet); + result[succ] = CleanupKind::Funclet; + set_successor(succ_funclet, succ); + set_successor(funclet, succ); + } + } + } + } + } + } + + let mut result = IndexVec::from_elem(CleanupKind::NotCleanup, mir.basic_blocks()); + + discover_masters(&mut result, mir); + propagate(&mut result, mir); + debug!("cleanup_kinds: result={:?}", result); + result +} diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs new file mode 100644 index 0000000000000..29e6f6af416bc --- /dev/null +++ b/src/librustc_trans/mir/block.rs @@ -0,0 +1,959 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use llvm::{self, ValueRef}; +use rustc_const_eval::{ErrKind, ConstEvalErr, note_const_eval_err}; +use rustc::middle::lang_items; +use rustc::ty; +use rustc::mir; +use abi::{Abi, FnType, ArgType}; +use adt; +use base; +use build; +use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}; +use common::{self, Block, BlockAndBuilder, LandingPad}; +use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef}; +use consts; +use debuginfo::DebugLoc; +use Disr; +use machine::{llalign_of_min, llbitsize_of_real}; +use meth; +use type_of; +use glue; +use type_::Type; + +use rustc_data_structures::fx::FxHashMap; +use syntax::symbol::Symbol; + +use super::{MirContext, LocalRef}; +use super::analyze::CleanupKind; +use super::constant::Const; +use super::lvalue::{LvalueRef}; +use super::operand::OperandRef; +use super::operand::OperandValue::{Pair, Ref, Immediate}; + +use std::cell::Ref as CellRef; + +impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { + pub fn trans_block(&mut self, bb: mir::BasicBlock) { + let mut bcx = self.bcx(bb); + let data = &CellRef::clone(&self.mir)[bb]; + + debug!("trans_block({:?}={:?})", bb, data); + + // Create the cleanup bundle, if needed. + let cleanup_pad = bcx.lpad().and_then(|lp| lp.cleanuppad()); + let cleanup_bundle = bcx.lpad().and_then(|l| l.bundle()); + + let funclet_br = |this: &Self, bcx: BlockAndBuilder, bb: mir::BasicBlock| { + let lltarget = this.blocks[bb].llbb; + if let Some(cp) = cleanup_pad { + match this.cleanup_kinds[bb] { + CleanupKind::Funclet => { + // micro-optimization: generate a `ret` rather than a jump + // to a return block + bcx.cleanup_ret(cp, Some(lltarget)); + } + CleanupKind::Internal { .. } => bcx.br(lltarget), + CleanupKind::NotCleanup => bug!("jump from cleanup bb to bb {:?}", bb) + } + } else { + bcx.br(lltarget); + } + }; + + let llblock = |this: &mut Self, target: mir::BasicBlock| { + let lltarget = this.blocks[target].llbb; + + if let Some(cp) = cleanup_pad { + match this.cleanup_kinds[target] { + CleanupKind::Funclet => { + // MSVC cross-funclet jump - need a trampoline + + debug!("llblock: creating cleanup trampoline for {:?}", target); + let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); + let trampoline = this.fcx.new_block(name).build(); + trampoline.set_personality_fn(this.fcx.eh_personality()); + trampoline.cleanup_ret(cp, Some(lltarget)); + trampoline.llbb() + } + CleanupKind::Internal { .. } => lltarget, + CleanupKind::NotCleanup => + bug!("jump from cleanup bb {:?} to bb {:?}", bb, target) + } + } else { + if let (CleanupKind::NotCleanup, CleanupKind::Funclet) = + (this.cleanup_kinds[bb], this.cleanup_kinds[target]) + { + // jump *into* cleanup - need a landing pad if GNU + this.landing_pad_to(target).llbb + } else { + lltarget + } + } + }; + + for statement in &data.statements { + bcx = self.trans_statement(bcx, statement); + } + + let terminator = data.terminator(); + debug!("trans_block: terminator: {:?}", terminator); + + let span = terminator.source_info.span; + let debug_loc = self.debug_loc(terminator.source_info); + debug_loc.apply_to_bcx(&bcx); + debug_loc.apply(bcx.fcx()); + match terminator.kind { + mir::TerminatorKind::Resume => { + if let Some(cleanup_pad) = cleanup_pad { + bcx.cleanup_ret(cleanup_pad, None); + } else { + let llpersonality = bcx.fcx().eh_personality(); + bcx.set_personality_fn(llpersonality); + + let ps = self.get_personality_slot(&bcx); + let lp = bcx.load(ps); + bcx.with_block(|bcx| { + base::call_lifetime_end(bcx, ps); + base::trans_unwind_resume(bcx, lp); + }); + } + } + + mir::TerminatorKind::Goto { target } => { + funclet_br(self, bcx, target); + } + + mir::TerminatorKind::If { ref cond, targets: (true_bb, false_bb) } => { + let cond = self.trans_operand(&bcx, cond); + + let lltrue = llblock(self, true_bb); + let llfalse = llblock(self, false_bb); + bcx.cond_br(cond.immediate(), lltrue, llfalse); + } + + mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => { + let discr_lvalue = self.trans_lvalue(&bcx, discr); + let ty = discr_lvalue.ty.to_ty(bcx.tcx()); + let discr = bcx.with_block(|bcx| + adt::trans_get_discr(bcx, ty, discr_lvalue.llval, None, true) + ); + + let mut bb_hist = FxHashMap(); + for target in targets { + *bb_hist.entry(target).or_insert(0) += 1; + } + let (default_bb, default_blk) = match bb_hist.iter().max_by_key(|&(_, c)| c) { + // If a single target basic blocks is predominant, promote that to be the + // default case for the switch instruction to reduce the size of the generated + // code. This is especially helpful in cases like an if-let on a huge enum. + // Note: This optimization is only valid for exhaustive matches. + Some((&&bb, &c)) if c > targets.len() / 2 => { + (Some(bb), llblock(self, bb)) + } + // We're generating an exhaustive switch, so the else branch + // can't be hit. Branching to an unreachable instruction + // lets LLVM know this + _ => (None, self.unreachable_block().llbb) + }; + let switch = bcx.switch(discr, default_blk, targets.len()); + assert_eq!(adt_def.variants.len(), targets.len()); + for (adt_variant, &target) in adt_def.variants.iter().zip(targets) { + if default_bb != Some(target) { + let llbb = llblock(self, target); + let llval = bcx.with_block(|bcx| adt::trans_case( + bcx, ty, Disr::from(adt_variant.disr_val))); + build::AddCase(switch, llval, llbb) + } + } + } + + mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => { + let (otherwise, targets) = targets.split_last().unwrap(); + let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval); + let discr = bcx.with_block(|bcx| base::to_immediate(bcx, discr, switch_ty)); + let switch = bcx.switch(discr, llblock(self, *otherwise), values.len()); + for (value, target) in values.iter().zip(targets) { + let val = Const::from_constval(bcx.ccx(), value.clone(), switch_ty); + let llbb = llblock(self, *target); + build::AddCase(switch, val.llval, llbb) + } + } + + mir::TerminatorKind::Return => { + let ret = bcx.fcx().fn_ty.ret; + if ret.is_ignore() || ret.is_indirect() { + bcx.ret_void(); + return; + } + + let llval = if let Some(cast_ty) = ret.cast { + let op = match self.locals[mir::RETURN_POINTER] { + LocalRef::Operand(Some(op)) => op, + LocalRef::Operand(None) => bug!("use of return before def"), + LocalRef::Lvalue(tr_lvalue) => { + OperandRef { + val: Ref(tr_lvalue.llval), + ty: tr_lvalue.ty.to_ty(bcx.tcx()) + } + } + }; + let llslot = match op.val { + Immediate(_) | Pair(..) => { + let llscratch = build::AllocaFcx(bcx.fcx(), ret.original_ty, "ret"); + self.store_operand(&bcx, llscratch, op); + llscratch + } + Ref(llval) => llval + }; + let load = bcx.load(bcx.pointercast(llslot, cast_ty.ptr_to())); + let llalign = llalign_of_min(bcx.ccx(), ret.ty); + unsafe { + llvm::LLVMSetAlignment(load, llalign); + } + load + } else { + let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER)); + op.pack_if_pair(&bcx).immediate() + }; + bcx.ret(llval); + } + + mir::TerminatorKind::Unreachable => { + bcx.unreachable(); + } + + mir::TerminatorKind::Drop { ref location, target, unwind } => { + let ty = location.ty(&self.mir, bcx.tcx()).to_ty(bcx.tcx()); + let ty = bcx.monomorphize(&ty); + + // Double check for necessity to drop + if !glue::type_needs_drop(bcx.tcx(), ty) { + funclet_br(self, bcx, target); + return; + } + + let lvalue = self.trans_lvalue(&bcx, location); + let drop_fn = glue::get_drop_glue(bcx.ccx(), ty); + let drop_ty = glue::get_drop_glue_type(bcx.tcx(), ty); + let is_sized = common::type_is_sized(bcx.tcx(), ty); + let llvalue = if is_sized { + if drop_ty != ty { + bcx.pointercast(lvalue.llval, type_of::type_of(bcx.ccx(), drop_ty).ptr_to()) + } else { + lvalue.llval + } + } else { + // FIXME(#36457) Currently drop glue takes sized + // values as a `*(data, meta)`, but elsewhere in + // MIR we pass `(data, meta)` as two separate + // arguments. It would be better to fix drop glue, + // but I am shooting for a quick fix to #35546 + // here that can be cleanly backported to beta, so + // I want to avoid touching all of trans. + bcx.with_block(|bcx| { + let scratch = base::alloc_ty(bcx, ty, "drop"); + base::call_lifetime_start(bcx, scratch); + build::Store(bcx, lvalue.llval, base::get_dataptr(bcx, scratch)); + build::Store(bcx, lvalue.llextra, base::get_meta(bcx, scratch)); + scratch + }) + }; + if let Some(unwind) = unwind { + bcx.invoke(drop_fn, + &[llvalue], + self.blocks[target].llbb, + llblock(self, unwind), + cleanup_bundle); + } else { + bcx.call(drop_fn, &[llvalue], cleanup_bundle); + funclet_br(self, bcx, target); + } + } + + mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => { + let cond = self.trans_operand(&bcx, cond).immediate(); + let mut const_cond = common::const_to_opt_uint(cond).map(|c| c == 1); + + // This case can currently arise only from functions marked + // with #[rustc_inherit_overflow_checks] and inlined from + // another crate (mostly core::num generic/#[inline] fns), + // while the current crate doesn't use overflow checks. + // NOTE: Unlike binops, negation doesn't have its own + // checked operation, just a comparison with the minimum + // value, so we have to check for the assert message. + if !bcx.ccx().check_overflow() { + use rustc_const_math::ConstMathErr::Overflow; + use rustc_const_math::Op::Neg; + + if let mir::AssertMessage::Math(Overflow(Neg)) = *msg { + const_cond = Some(expected); + } + } + + // Don't translate the panic block if success if known. + if const_cond == Some(expected) { + funclet_br(self, bcx, target); + return; + } + + // Pass the condition through llvm.expect for branch hinting. + let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1"); + let cond = bcx.call(expect, &[cond, C_bool(bcx.ccx(), expected)], None); + + // Create the failure block and the conditional branch to it. + let lltarget = llblock(self, target); + let panic_block = self.fcx.new_block("panic"); + if expected { + bcx.cond_br(cond, lltarget, panic_block.llbb); + } else { + bcx.cond_br(cond, panic_block.llbb, lltarget); + } + + // After this point, bcx is the block for the call to panic. + bcx = panic_block.build(); + debug_loc.apply_to_bcx(&bcx); + + // Get the location information. + let loc = bcx.sess().codemap().lookup_char_pos(span.lo); + let filename = Symbol::intern(&loc.file.name).as_str(); + let filename = C_str_slice(bcx.ccx(), filename); + let line = C_u32(bcx.ccx(), loc.line as u32); + + // Put together the arguments to the panic entry point. + let (lang_item, args, const_err) = match *msg { + mir::AssertMessage::BoundsCheck { ref len, ref index } => { + let len = self.trans_operand(&mut bcx, len).immediate(); + let index = self.trans_operand(&mut bcx, index).immediate(); + + let const_err = common::const_to_opt_uint(len).and_then(|len| { + common::const_to_opt_uint(index).map(|index| { + ErrKind::IndexOutOfBounds { + len: len, + index: index + } + }) + }); + + let file_line = C_struct(bcx.ccx(), &[filename, line], false); + let align = llalign_of_min(bcx.ccx(), common::val_ty(file_line)); + let file_line = consts::addr_of(bcx.ccx(), + file_line, + align, + "panic_bounds_check_loc"); + (lang_items::PanicBoundsCheckFnLangItem, + vec![file_line, index, len], + const_err) + } + mir::AssertMessage::Math(ref err) => { + let msg_str = Symbol::intern(err.description()).as_str(); + let msg_str = C_str_slice(bcx.ccx(), msg_str); + let msg_file_line = C_struct(bcx.ccx(), + &[msg_str, filename, line], + false); + let align = llalign_of_min(bcx.ccx(), common::val_ty(msg_file_line)); + let msg_file_line = consts::addr_of(bcx.ccx(), + msg_file_line, + align, + "panic_loc"); + (lang_items::PanicFnLangItem, + vec![msg_file_line], + Some(ErrKind::Math(err.clone()))) + } + }; + + // If we know we always panic, and the error message + // is also constant, then we can produce a warning. + if const_cond == Some(!expected) { + if let Some(err) = const_err { + let err = ConstEvalErr{ span: span, kind: err }; + let mut diag = bcx.tcx().sess.struct_span_warn( + span, "this expression will panic at run-time"); + note_const_eval_err(bcx.tcx(), &err, span, "expression", &mut diag); + diag.emit(); + } + } + + // Obtain the panic entry point. + let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item); + let callee = Callee::def(bcx.ccx(), def_id, + bcx.ccx().empty_substs_for_def_id(def_id)); + let llfn = callee.reify(bcx.ccx()); + + // Translate the actual panic invoke/call. + if let Some(unwind) = cleanup { + bcx.invoke(llfn, + &args, + self.unreachable_block().llbb, + llblock(self, unwind), + cleanup_bundle); + } else { + bcx.call(llfn, &args, cleanup_bundle); + bcx.unreachable(); + } + } + + mir::TerminatorKind::DropAndReplace { .. } => { + bug!("undesugared DropAndReplace in trans: {:?}", data); + } + + mir::TerminatorKind::Call { ref func, ref args, ref destination, ref cleanup } => { + // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. + let callee = self.trans_operand(&bcx, func); + + let (mut callee, abi, sig) = match callee.ty.sty { + ty::TyFnDef(def_id, substs, f) => { + (Callee::def(bcx.ccx(), def_id, substs), f.abi, &f.sig) + } + ty::TyFnPtr(f) => { + (Callee { + data: Fn(callee.immediate()), + ty: callee.ty + }, f.abi, &f.sig) + } + _ => bug!("{} is not callable", callee.ty) + }; + + let sig = bcx.tcx().erase_late_bound_regions_and_normalize(sig); + + // Handle intrinsics old trans wants Expr's for, ourselves. + let intrinsic = match (&callee.ty.sty, &callee.data) { + (&ty::TyFnDef(def_id, ..), &Intrinsic) => { + Some(bcx.tcx().item_name(def_id).as_str()) + } + _ => None + }; + let intrinsic = intrinsic.as_ref().map(|s| &s[..]); + + if intrinsic == Some("move_val_init") { + let &(_, target) = destination.as_ref().unwrap(); + // The first argument is a thin destination pointer. + let llptr = self.trans_operand(&bcx, &args[0]).immediate(); + let val = self.trans_operand(&bcx, &args[1]); + self.store_operand(&bcx, llptr, val); + funclet_br(self, bcx, target); + return; + } + + if intrinsic == Some("transmute") { + let &(ref dest, target) = destination.as_ref().unwrap(); + self.with_lvalue_ref(&bcx, dest, |this, dest| { + this.trans_transmute(&bcx, &args[0], dest); + }); + + funclet_br(self, bcx, target); + return; + } + + let extra_args = &args[sig.inputs.len()..]; + let extra_args = extra_args.iter().map(|op_arg| { + let op_ty = op_arg.ty(&self.mir, bcx.tcx()); + bcx.monomorphize(&op_ty) + }).collect::>(); + let fn_ty = callee.direct_fn_type(bcx.ccx(), &extra_args); + + // The arguments we'll be passing. Plus one to account for outptr, if used. + let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize; + let mut llargs = Vec::with_capacity(arg_count); + + // Prepare the return value destination + let ret_dest = if let Some((ref dest, _)) = *destination { + let is_intrinsic = if let Intrinsic = callee.data { + true + } else { + false + }; + self.make_return_dest(&bcx, dest, &fn_ty.ret, &mut llargs, is_intrinsic) + } else { + ReturnDest::Nothing + }; + + // Split the rust-call tupled arguments off. + let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() { + let (tup, args) = args.split_last().unwrap(); + (args, Some(tup)) + } else { + (&args[..], None) + }; + + let is_shuffle = intrinsic.map_or(false, |name| { + name.starts_with("simd_shuffle") + }); + let mut idx = 0; + for arg in first_args { + // The indices passed to simd_shuffle* in the + // third argument must be constant. This is + // checked by const-qualification, which also + // promotes any complex rvalues to constants. + if is_shuffle && idx == 2 { + match *arg { + mir::Operand::Consume(_) => { + span_bug!(span, "shuffle indices must be constant"); + } + mir::Operand::Constant(ref constant) => { + let val = self.trans_constant(&bcx, constant); + llargs.push(val.llval); + idx += 1; + continue; + } + } + } + + let op = self.trans_operand(&bcx, arg); + self.trans_argument(&bcx, op, &mut llargs, &fn_ty, + &mut idx, &mut callee.data); + } + if let Some(tup) = untuple { + self.trans_arguments_untupled(&bcx, tup, &mut llargs, &fn_ty, + &mut idx, &mut callee.data) + } + + let fn_ptr = match callee.data { + NamedTupleConstructor(_) => { + // FIXME translate this like mir::Rvalue::Aggregate. + callee.reify(bcx.ccx()) + } + Intrinsic => { + use intrinsic::trans_intrinsic_call; + + let (dest, llargs) = match ret_dest { + _ if fn_ty.ret.is_indirect() => { + (llargs[0], &llargs[1..]) + } + ReturnDest::Nothing => { + (C_undef(fn_ty.ret.original_ty.ptr_to()), &llargs[..]) + } + ReturnDest::IndirectOperand(dst, _) | + ReturnDest::Store(dst) => (dst, &llargs[..]), + ReturnDest::DirectOperand(_) => + bug!("Cannot use direct operand with an intrinsic call") + }; + + bcx.with_block(|bcx| { + trans_intrinsic_call(bcx, callee.ty, &fn_ty, + &llargs, dest, debug_loc); + }); + + if let ReturnDest::IndirectOperand(dst, _) = ret_dest { + // Make a fake operand for store_return + let op = OperandRef { + val: Ref(dst), + ty: sig.output, + }; + self.store_return(&bcx, ret_dest, fn_ty.ret, op); + } + + if let Some((_, target)) = *destination { + funclet_br(self, bcx, target); + } else { + // trans_intrinsic_call already used Unreachable. + // bcx.unreachable(); + } + + return; + } + Fn(f) => f, + Virtual(_) => bug!("Virtual fn ptr not extracted") + }; + + // Many different ways to call a function handled here + if let &Some(cleanup) = cleanup { + let ret_bcx = if let Some((_, target)) = *destination { + self.blocks[target] + } else { + self.unreachable_block() + }; + let invokeret = bcx.invoke(fn_ptr, + &llargs, + ret_bcx.llbb, + llblock(self, cleanup), + cleanup_bundle); + fn_ty.apply_attrs_callsite(invokeret); + + if destination.is_some() { + let ret_bcx = ret_bcx.build(); + ret_bcx.at_start(|ret_bcx| { + debug_loc.apply_to_bcx(ret_bcx); + let op = OperandRef { + val: Immediate(invokeret), + ty: sig.output, + }; + self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op); + }); + } + } else { + let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle); + fn_ty.apply_attrs_callsite(llret); + if let Some((_, target)) = *destination { + let op = OperandRef { + val: Immediate(llret), + ty: sig.output, + }; + self.store_return(&bcx, ret_dest, fn_ty.ret, op); + funclet_br(self, bcx, target); + } else { + bcx.unreachable(); + } + } + } + } + } + + fn trans_argument(&mut self, + bcx: &BlockAndBuilder<'bcx, 'tcx>, + op: OperandRef<'tcx>, + llargs: &mut Vec, + fn_ty: &FnType, + next_idx: &mut usize, + callee: &mut CalleeData) { + if let Pair(a, b) = op.val { + // Treat the values in a fat pointer separately. + if common::type_is_fat_ptr(bcx.tcx(), op.ty) { + let (ptr, meta) = (a, b); + if *next_idx == 0 { + if let Virtual(idx) = *callee { + let llfn = bcx.with_block(|bcx| { + meth::get_virtual_method(bcx, meta, idx) + }); + let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); + *callee = Fn(bcx.pointercast(llfn, llty)); + } + } + + let imm_op = |x| OperandRef { + val: Immediate(x), + // We won't be checking the type again. + ty: bcx.tcx().types.err + }; + self.trans_argument(bcx, imm_op(ptr), llargs, fn_ty, next_idx, callee); + self.trans_argument(bcx, imm_op(meta), llargs, fn_ty, next_idx, callee); + return; + } + } + + let arg = &fn_ty.args[*next_idx]; + *next_idx += 1; + + // Fill padding with undef value, where applicable. + if let Some(ty) = arg.pad { + llargs.push(C_undef(ty)); + } + + if arg.is_ignore() { + return; + } + + // Force by-ref if we have to load through a cast pointer. + let (mut llval, by_ref) = match op.val { + Immediate(_) | Pair(..) => { + if arg.is_indirect() || arg.cast.is_some() { + let llscratch = build::AllocaFcx(bcx.fcx(), arg.original_ty, "arg"); + self.store_operand(bcx, llscratch, op); + (llscratch, true) + } else { + (op.pack_if_pair(bcx).immediate(), false) + } + } + Ref(llval) => (llval, true) + }; + + if by_ref && !arg.is_indirect() { + // Have to load the argument, maybe while casting it. + if arg.original_ty == Type::i1(bcx.ccx()) { + // We store bools as i8 so we need to truncate to i1. + llval = bcx.load_range_assert(llval, 0, 2, llvm::False); + llval = bcx.trunc(llval, arg.original_ty); + } else if let Some(ty) = arg.cast { + llval = bcx.load(bcx.pointercast(llval, ty.ptr_to())); + let llalign = llalign_of_min(bcx.ccx(), arg.ty); + unsafe { + llvm::LLVMSetAlignment(llval, llalign); + } + } else { + llval = bcx.load(llval); + } + } + + llargs.push(llval); + } + + fn trans_arguments_untupled(&mut self, + bcx: &BlockAndBuilder<'bcx, 'tcx>, + operand: &mir::Operand<'tcx>, + llargs: &mut Vec, + fn_ty: &FnType, + next_idx: &mut usize, + callee: &mut CalleeData) { + let tuple = self.trans_operand(bcx, operand); + + let arg_types = match tuple.ty.sty { + ty::TyTuple(ref tys) => tys, + _ => span_bug!(self.mir.span, + "bad final argument to \"rust-call\" fn {:?}", tuple.ty) + }; + + // Handle both by-ref and immediate tuples. + match tuple.val { + Ref(llval) => { + let base = adt::MaybeSizedValue::sized(llval); + for (n, &ty) in arg_types.iter().enumerate() { + let ptr = adt::trans_field_ptr_builder(bcx, tuple.ty, base, Disr(0), n); + let val = if common::type_is_fat_ptr(bcx.tcx(), ty) { + let (lldata, llextra) = base::load_fat_ptr_builder(bcx, ptr, ty); + Pair(lldata, llextra) + } else { + // trans_argument will load this if it needs to + Ref(ptr) + }; + let op = OperandRef { + val: val, + ty: ty + }; + self.trans_argument(bcx, op, llargs, fn_ty, next_idx, callee); + } + + } + Immediate(llval) => { + for (n, &ty) in arg_types.iter().enumerate() { + let mut elem = bcx.extract_value(llval, n); + // Truncate bools to i1, if needed + if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx()) { + elem = bcx.trunc(elem, Type::i1(bcx.ccx())); + } + // If the tuple is immediate, the elements are as well + let op = OperandRef { + val: Immediate(elem), + ty: ty + }; + self.trans_argument(bcx, op, llargs, fn_ty, next_idx, callee); + } + } + Pair(a, b) => { + let elems = [a, b]; + for (n, &ty) in arg_types.iter().enumerate() { + let mut elem = elems[n]; + // Truncate bools to i1, if needed + if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx()) { + elem = bcx.trunc(elem, Type::i1(bcx.ccx())); + } + // Pair is always made up of immediates + let op = OperandRef { + val: Immediate(elem), + ty: ty + }; + self.trans_argument(bcx, op, llargs, fn_ty, next_idx, callee); + } + } + } + + } + + fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRef { + let ccx = bcx.ccx(); + if let Some(slot) = self.llpersonalityslot { + slot + } else { + let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); + bcx.with_block(|bcx| { + let slot = base::alloca(bcx, llretty, "personalityslot"); + self.llpersonalityslot = Some(slot); + base::call_lifetime_start(bcx, slot); + slot + }) + } + } + + /// Return the landingpad wrapper around the given basic block + /// + /// No-op in MSVC SEH scheme. + fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> Block<'bcx, 'tcx> + { + if let Some(block) = self.landing_pads[target_bb] { + return block; + } + + if base::wants_msvc_seh(self.fcx.ccx.sess()) { + return self.blocks[target_bb]; + } + + let target = self.bcx(target_bb); + + let block = self.fcx.new_block("cleanup"); + self.landing_pads[target_bb] = Some(block); + + let bcx = block.build(); + let ccx = bcx.ccx(); + let llpersonality = self.fcx.eh_personality(); + let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); + let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.fcx.llfn); + bcx.set_cleanup(llretval); + let slot = self.get_personality_slot(&bcx); + bcx.store(llretval, slot); + bcx.br(target.llbb()); + block + } + + pub fn init_cpad(&mut self, bb: mir::BasicBlock) { + let bcx = self.bcx(bb); + let data = &self.mir[bb]; + debug!("init_cpad({:?})", data); + + match self.cleanup_kinds[bb] { + CleanupKind::NotCleanup => { + bcx.set_lpad(None) + } + _ if !base::wants_msvc_seh(bcx.sess()) => { + bcx.set_lpad(Some(LandingPad::gnu())) + } + CleanupKind::Internal { funclet } => { + // FIXME: is this needed? + bcx.set_personality_fn(self.fcx.eh_personality()); + bcx.set_lpad_ref(self.bcx(funclet).lpad()); + } + CleanupKind::Funclet => { + bcx.set_personality_fn(self.fcx.eh_personality()); + DebugLoc::None.apply_to_bcx(&bcx); + let cleanup_pad = bcx.cleanup_pad(None, &[]); + bcx.set_lpad(Some(LandingPad::msvc(cleanup_pad))); + } + }; + } + + fn unreachable_block(&mut self) -> Block<'bcx, 'tcx> { + self.unreachable_block.unwrap_or_else(|| { + let bl = self.fcx.new_block("unreachable"); + bl.build().unreachable(); + self.unreachable_block = Some(bl); + bl + }) + } + + fn bcx(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'bcx, 'tcx> { + self.blocks[bb].build() + } + + fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, + dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType, + llargs: &mut Vec, is_intrinsic: bool) -> ReturnDest { + // If the return is ignored, we can just return a do-nothing ReturnDest + if fn_ret_ty.is_ignore() { + return ReturnDest::Nothing; + } + let dest = if let mir::Lvalue::Local(index) = *dest { + let ret_ty = self.monomorphized_lvalue_ty(dest); + match self.locals[index] { + LocalRef::Lvalue(dest) => dest, + LocalRef::Operand(None) => { + // Handle temporary lvalues, specifically Operand ones, as + // they don't have allocas + return if fn_ret_ty.is_indirect() { + // Odd, but possible, case, we have an operand temporary, + // but the calling convention has an indirect return. + let tmp = bcx.with_block(|bcx| { + base::alloc_ty(bcx, ret_ty, "tmp_ret") + }); + llargs.push(tmp); + ReturnDest::IndirectOperand(tmp, index) + } else if is_intrinsic { + // Currently, intrinsics always need a location to store + // the result. so we create a temporary alloca for the + // result + let tmp = bcx.with_block(|bcx| { + base::alloc_ty(bcx, ret_ty, "tmp_ret") + }); + ReturnDest::IndirectOperand(tmp, index) + } else { + ReturnDest::DirectOperand(index) + }; + } + LocalRef::Operand(Some(_)) => { + bug!("lvalue local already assigned to"); + } + } + } else { + self.trans_lvalue(bcx, dest) + }; + if fn_ret_ty.is_indirect() { + llargs.push(dest.llval); + ReturnDest::Nothing + } else { + ReturnDest::Store(dest.llval) + } + } + + fn trans_transmute(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, + src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) { + let mut val = self.trans_operand(bcx, src); + if let ty::TyFnDef(def_id, substs, _) = val.ty.sty { + let llouttype = type_of::type_of(bcx.ccx(), dst.ty.to_ty(bcx.tcx())); + let out_type_size = llbitsize_of_real(bcx.ccx(), llouttype); + if out_type_size != 0 { + // FIXME #19925 Remove this hack after a release cycle. + let f = Callee::def(bcx.ccx(), def_id, substs); + let ty = match f.ty.sty { + ty::TyFnDef(.., f) => bcx.tcx().mk_fn_ptr(f), + _ => f.ty + }; + val = OperandRef { + val: Immediate(f.reify(bcx.ccx())), + ty: ty + }; + } + } + + let llty = type_of::type_of(bcx.ccx(), val.ty); + let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to()); + self.store_operand(bcx, cast_ptr, val); + } + + + // Stores the return value of a function call into it's final location. + fn store_return(&mut self, + bcx: &BlockAndBuilder<'bcx, 'tcx>, + dest: ReturnDest, + ret_ty: ArgType, + op: OperandRef<'tcx>) { + use self::ReturnDest::*; + + match dest { + Nothing => (), + Store(dst) => ret_ty.store(bcx, op.immediate(), dst), + IndirectOperand(tmp, index) => { + let op = self.trans_load(bcx, tmp, op.ty); + self.locals[index] = LocalRef::Operand(Some(op)); + } + DirectOperand(index) => { + // If there is a cast, we have to store and reload. + let op = if ret_ty.cast.is_some() { + let tmp = bcx.with_block(|bcx| { + base::alloc_ty(bcx, op.ty, "tmp_ret") + }); + ret_ty.store(bcx, op.immediate(), tmp); + self.trans_load(bcx, tmp, op.ty) + } else { + op.unpack_if_pair(bcx) + }; + self.locals[index] = LocalRef::Operand(Some(op)); + } + } + } +} + +enum ReturnDest { + // Do nothing, the return value is indirect or ignored + Nothing, + // Store the return value to the pointer + Store(ValueRef), + // Stores an indirect return value to an operand local lvalue + IndirectOperand(ValueRef, mir::Local), + // Stores a direct return value to an operand local lvalue + DirectOperand(mir::Local) +} diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs new file mode 100644 index 0000000000000..bca81fa36458f --- /dev/null +++ b/src/librustc_trans/mir/constant.rs @@ -0,0 +1,995 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use llvm::{self, ValueRef}; +use rustc::middle::const_val::ConstVal; +use rustc_const_eval::{ErrKind, ConstEvalErr, report_const_eval_err}; +use rustc_const_math::ConstInt::*; +use rustc_const_math::ConstFloat::*; +use rustc_const_math::{ConstInt, ConstIsize, ConstUsize, ConstMathErr}; +use rustc::hir::def_id::DefId; +use rustc::infer::TransNormalize; +use rustc::mir; +use rustc::mir::tcx::LvalueTy; +use rustc::traits; +use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::cast::{CastTy, IntTy}; +use rustc::ty::subst::Substs; +use rustc_data_structures::indexed_vec::{Idx, IndexVec}; +use {abi, adt, base, Disr, machine}; +use callee::Callee; +use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty, type_is_sized}; +use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral}; +use common::{C_null, C_struct, C_str_slice, C_undef, C_uint}; +use common::{const_to_opt_int, const_to_opt_uint}; +use consts; +use monomorphize::{self, Instance}; +use type_of; +use type_::Type; +use value::Value; + +use syntax::ast; +use syntax_pos::{Span, DUMMY_SP}; + +use std::fmt; +use std::ptr; + +use super::operand::{OperandRef, OperandValue}; +use super::MirContext; + +/// A sized constant rvalue. +/// The LLVM type might not be the same for a single Rust type, +/// e.g. each enum variant would have its own LLVM struct type. +#[derive(Copy, Clone)] +pub struct Const<'tcx> { + pub llval: ValueRef, + pub ty: Ty<'tcx> +} + +impl<'tcx> Const<'tcx> { + pub fn new(llval: ValueRef, ty: Ty<'tcx>) -> Const<'tcx> { + Const { + llval: llval, + ty: ty + } + } + + /// Translate ConstVal into a LLVM constant value. + pub fn from_constval<'a>(ccx: &CrateContext<'a, 'tcx>, + cv: ConstVal, + ty: Ty<'tcx>) + -> Const<'tcx> { + let llty = type_of::type_of(ccx, ty); + let val = match cv { + ConstVal::Float(F32(v)) => C_floating_f64(v as f64, llty), + ConstVal::Float(F64(v)) => C_floating_f64(v, llty), + ConstVal::Float(FInfer {..}) => bug!("MIR must not use `{:?}`", cv), + ConstVal::Bool(v) => C_bool(ccx, v), + ConstVal::Integral(I8(v)) => C_integral(Type::i8(ccx), v as u64, true), + ConstVal::Integral(I16(v)) => C_integral(Type::i16(ccx), v as u64, true), + ConstVal::Integral(I32(v)) => C_integral(Type::i32(ccx), v as u64, true), + ConstVal::Integral(I64(v)) => C_integral(Type::i64(ccx), v as u64, true), + ConstVal::Integral(Isize(v)) => { + let i = v.as_i64(ccx.tcx().sess.target.int_type); + C_integral(Type::int(ccx), i as u64, true) + }, + ConstVal::Integral(U8(v)) => C_integral(Type::i8(ccx), v as u64, false), + ConstVal::Integral(U16(v)) => C_integral(Type::i16(ccx), v as u64, false), + ConstVal::Integral(U32(v)) => C_integral(Type::i32(ccx), v as u64, false), + ConstVal::Integral(U64(v)) => C_integral(Type::i64(ccx), v, false), + ConstVal::Integral(Usize(v)) => { + let u = v.as_u64(ccx.tcx().sess.target.uint_type); + C_integral(Type::int(ccx), u, false) + }, + ConstVal::Integral(Infer(_)) | + ConstVal::Integral(InferSigned(_)) => bug!("MIR must not use `{:?}`", cv), + ConstVal::Str(ref v) => C_str_slice(ccx, v.clone()), + ConstVal::ByteStr(ref v) => consts::addr_of(ccx, C_bytes(ccx, v), 1, "byte_str"), + ConstVal::Struct(_) | ConstVal::Tuple(_) | + ConstVal::Array(..) | ConstVal::Repeat(..) | + ConstVal::Function(_) => { + bug!("MIR must not use `{:?}` (which refers to a local ID)", cv) + } + ConstVal::Char(c) => C_integral(Type::char(ccx), c as u64, false), + ConstVal::Dummy => bug!(), + }; + + assert!(!ty.has_erasable_regions()); + + Const::new(val, ty) + } + + fn get_pair(&self) -> (ValueRef, ValueRef) { + (const_get_elt(self.llval, &[0]), + const_get_elt(self.llval, &[1])) + } + + fn get_fat_ptr(&self) -> (ValueRef, ValueRef) { + assert_eq!(abi::FAT_PTR_ADDR, 0); + assert_eq!(abi::FAT_PTR_EXTRA, 1); + self.get_pair() + } + + fn as_lvalue(&self) -> ConstLvalue<'tcx> { + ConstLvalue { + base: Base::Value(self.llval), + llextra: ptr::null_mut(), + ty: self.ty + } + } + + pub fn to_operand<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> OperandRef<'tcx> { + let llty = type_of::immediate_type_of(ccx, self.ty); + let llvalty = val_ty(self.llval); + + let val = if llty == llvalty && common::type_is_imm_pair(ccx, self.ty) { + let (a, b) = self.get_pair(); + OperandValue::Pair(a, b) + } else if llty == llvalty && common::type_is_immediate(ccx, self.ty) { + // If the types match, we can use the value directly. + OperandValue::Immediate(self.llval) + } else { + // Otherwise, or if the value is not immediate, we create + // a constant LLVM global and cast its address if necessary. + let align = type_of::align_of(ccx, self.ty); + let ptr = consts::addr_of(ccx, self.llval, align, "const"); + OperandValue::Ref(consts::ptrcast(ptr, llty.ptr_to())) + }; + + OperandRef { + val: val, + ty: self.ty + } + } +} + +impl<'tcx> fmt::Debug for Const<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Const({:?}: {:?})", Value(self.llval), self.ty) + } +} + +#[derive(Copy, Clone)] +enum Base { + /// A constant value without an unique address. + Value(ValueRef), + + /// String literal base pointer (cast from array). + Str(ValueRef), + + /// The address of a static. + Static(ValueRef) +} + +/// An lvalue as seen from a constant. +#[derive(Copy, Clone)] +struct ConstLvalue<'tcx> { + base: Base, + llextra: ValueRef, + ty: Ty<'tcx> +} + +impl<'tcx> ConstLvalue<'tcx> { + fn to_const(&self, span: Span) -> Const<'tcx> { + match self.base { + Base::Value(val) => Const::new(val, self.ty), + Base::Str(ptr) => { + span_bug!(span, "loading from `str` ({:?}) in constant", + Value(ptr)) + } + Base::Static(val) => { + span_bug!(span, "loading from `static` ({:?}) in constant", + Value(val)) + } + } + } + + pub fn len<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { + match self.ty.sty { + ty::TyArray(_, n) => C_uint(ccx, n), + ty::TySlice(_) | ty::TyStr => { + assert!(self.llextra != ptr::null_mut()); + self.llextra + } + _ => bug!("unexpected type `{}` in ConstLvalue::len", self.ty) + } + } +} + +/// Machinery for translating a constant's MIR to LLVM values. +/// FIXME(eddyb) use miri and lower its allocations to LLVM. +struct MirConstContext<'a, 'tcx: 'a> { + ccx: &'a CrateContext<'a, 'tcx>, + mir: &'a mir::Mir<'tcx>, + + /// Type parameters for const fn and associated constants. + substs: &'tcx Substs<'tcx>, + + /// Values of locals in a constant or const fn. + locals: IndexVec>> +} + + +impl<'a, 'tcx> MirConstContext<'a, 'tcx> { + fn new(ccx: &'a CrateContext<'a, 'tcx>, + mir: &'a mir::Mir<'tcx>, + substs: &'tcx Substs<'tcx>, + args: IndexVec>) + -> MirConstContext<'a, 'tcx> { + let mut context = MirConstContext { + ccx: ccx, + mir: mir, + substs: substs, + locals: (0..mir.local_decls.len()).map(|_| None).collect(), + }; + for (i, arg) in args.into_iter().enumerate() { + // Locals after local 0 are the function arguments + let index = mir::Local::new(i + 1); + context.locals[index] = Some(arg); + } + context + } + + fn trans_def(ccx: &'a CrateContext<'a, 'tcx>, + mut instance: Instance<'tcx>, + args: IndexVec>) + -> Result, ConstEvalErr> { + // Try to resolve associated constants. + if let Some(trait_id) = ccx.tcx().trait_of_item(instance.def) { + let trait_ref = ty::TraitRef::new(trait_id, instance.substs); + let trait_ref = ty::Binder(trait_ref); + let vtable = common::fulfill_obligation(ccx.shared(), DUMMY_SP, trait_ref); + if let traits::VtableImpl(vtable_impl) = vtable { + let name = ccx.tcx().item_name(instance.def); + let ac = ccx.tcx().associated_items(vtable_impl.impl_def_id) + .find(|item| item.kind == ty::AssociatedKind::Const && item.name == name); + if let Some(ac) = ac { + instance = Instance::new(ac.def_id, vtable_impl.substs); + } + } + } + + let mir = ccx.tcx().item_mir(instance.def); + MirConstContext::new(ccx, &mir, instance.substs, args).trans() + } + + fn monomorphize(&self, value: &T) -> T + where T: TransNormalize<'tcx> + { + monomorphize::apply_param_substs(self.ccx.shared(), + self.substs, + value) + } + + fn trans(&mut self) -> Result, ConstEvalErr> { + let tcx = self.ccx.tcx(); + let mut bb = mir::START_BLOCK; + + // Make sure to evaluate all statemenets to + // report as many errors as we possibly can. + let mut failure = Ok(()); + + loop { + let data = &self.mir[bb]; + for statement in &data.statements { + let span = statement.source_info.span; + match statement.kind { + mir::StatementKind::Assign(ref dest, ref rvalue) => { + let ty = dest.ty(self.mir, tcx); + let ty = self.monomorphize(&ty).to_ty(tcx); + match self.const_rvalue(rvalue, ty, span) { + Ok(value) => self.store(dest, value, span), + Err(err) => if failure.is_ok() { failure = Err(err); } + } + } + mir::StatementKind::StorageLive(_) | + mir::StatementKind::StorageDead(_) | + mir::StatementKind::Nop => {} + mir::StatementKind::SetDiscriminant{ .. } => { + span_bug!(span, "SetDiscriminant should not appear in constants?"); + } + } + } + + let terminator = data.terminator(); + let span = terminator.source_info.span; + bb = match terminator.kind { + mir::TerminatorKind::Drop { target, .. } | // No dropping. + mir::TerminatorKind::Goto { target } => target, + mir::TerminatorKind::Return => { + failure?; + return Ok(self.locals[mir::RETURN_POINTER].unwrap_or_else(|| { + span_bug!(span, "no returned value in constant"); + })); + } + + mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, .. } => { + let cond = self.const_operand(cond, span)?; + let cond_bool = common::const_to_uint(cond.llval) != 0; + if cond_bool != expected { + let err = match *msg { + mir::AssertMessage::BoundsCheck { ref len, ref index } => { + let len = self.const_operand(len, span)?; + let index = self.const_operand(index, span)?; + ErrKind::IndexOutOfBounds { + len: common::const_to_uint(len.llval), + index: common::const_to_uint(index.llval) + } + } + mir::AssertMessage::Math(ref err) => { + ErrKind::Math(err.clone()) + } + }; + + let err = ConstEvalErr{ span: span, kind: err }; + report_const_eval_err(tcx, &err, span, "expression").emit(); + failure = Err(err); + } + target + } + + mir::TerminatorKind::Call { ref func, ref args, ref destination, .. } => { + let fn_ty = func.ty(self.mir, tcx); + let fn_ty = self.monomorphize(&fn_ty); + let instance = match fn_ty.sty { + ty::TyFnDef(def_id, substs, _) => { + Instance::new(def_id, substs) + } + _ => span_bug!(span, "calling {:?} (of type {}) in constant", + func, fn_ty) + }; + + let mut const_args = IndexVec::with_capacity(args.len()); + for arg in args { + match self.const_operand(arg, span) { + Ok(arg) => { const_args.push(arg); }, + Err(err) => if failure.is_ok() { failure = Err(err); } + } + } + if let Some((ref dest, target)) = *destination { + match MirConstContext::trans_def(self.ccx, instance, const_args) { + Ok(value) => self.store(dest, value, span), + Err(err) => if failure.is_ok() { failure = Err(err); } + } + target + } else { + span_bug!(span, "diverging {:?} in constant", terminator.kind); + } + } + _ => span_bug!(span, "{:?} in constant", terminator.kind) + }; + } + } + + fn store(&mut self, dest: &mir::Lvalue<'tcx>, value: Const<'tcx>, span: Span) { + if let mir::Lvalue::Local(index) = *dest { + self.locals[index] = Some(value); + } else { + span_bug!(span, "assignment to {:?} in constant", dest); + } + } + + fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span) + -> Result, ConstEvalErr> { + let tcx = self.ccx.tcx(); + + if let mir::Lvalue::Local(index) = *lvalue { + return Ok(self.locals[index].unwrap_or_else(|| { + span_bug!(span, "{:?} not initialized", lvalue) + }).as_lvalue()); + } + + let lvalue = match *lvalue { + mir::Lvalue::Local(_) => bug!(), // handled above + mir::Lvalue::Static(def_id) => { + ConstLvalue { + base: Base::Static(consts::get_static(self.ccx, def_id)), + llextra: ptr::null_mut(), + ty: lvalue.ty(self.mir, tcx).to_ty(tcx) + } + } + mir::Lvalue::Projection(ref projection) => { + let tr_base = self.const_lvalue(&projection.base, span)?; + let projected_ty = LvalueTy::Ty { ty: tr_base.ty } + .projection_ty(tcx, &projection.elem); + let base = tr_base.to_const(span); + let projected_ty = self.monomorphize(&projected_ty).to_ty(tcx); + let is_sized = common::type_is_sized(tcx, projected_ty); + + let (projected, llextra) = match projection.elem { + mir::ProjectionElem::Deref => { + let (base, extra) = if is_sized { + (base.llval, ptr::null_mut()) + } else { + base.get_fat_ptr() + }; + if self.ccx.statics().borrow().contains_key(&base) { + (Base::Static(base), extra) + } else if let ty::TyStr = projected_ty.sty { + (Base::Str(base), extra) + } else { + let v = base; + let v = self.ccx.const_unsized().borrow().get(&v).map_or(v, |&v| v); + let mut val = unsafe { llvm::LLVMGetInitializer(v) }; + if val.is_null() { + span_bug!(span, "dereference of non-constant pointer `{:?}`", + Value(base)); + } + if projected_ty.is_bool() { + unsafe { + val = llvm::LLVMConstTrunc(val, Type::i1(self.ccx).to_ref()); + } + } + (Base::Value(val), extra) + } + } + mir::ProjectionElem::Field(ref field, _) => { + let llprojected = adt::const_get_field(self.ccx, tr_base.ty, base.llval, + Disr(0), field.index()); + let llextra = if is_sized { + ptr::null_mut() + } else { + tr_base.llextra + }; + (Base::Value(llprojected), llextra) + } + mir::ProjectionElem::Index(ref index) => { + let llindex = self.const_operand(index, span)?.llval; + + let iv = if let Some(iv) = common::const_to_opt_uint(llindex) { + iv + } else { + span_bug!(span, "index is not an integer-constant expression") + }; + + // Produce an undef instead of a LLVM assertion on OOB. + let len = common::const_to_uint(tr_base.len(self.ccx)); + let llelem = if iv < len { + const_get_elt(base.llval, &[iv as u32]) + } else { + C_undef(type_of::type_of(self.ccx, projected_ty)) + }; + + (Base::Value(llelem), ptr::null_mut()) + } + _ => span_bug!(span, "{:?} in constant", projection.elem) + }; + ConstLvalue { + base: projected, + llextra: llextra, + ty: projected_ty + } + } + }; + Ok(lvalue) + } + + fn const_operand(&self, operand: &mir::Operand<'tcx>, span: Span) + -> Result, ConstEvalErr> { + debug!("const_operand({:?} @ {:?})", operand, span); + let result = match *operand { + mir::Operand::Consume(ref lvalue) => { + Ok(self.const_lvalue(lvalue, span)?.to_const(span)) + } + + mir::Operand::Constant(ref constant) => { + let ty = self.monomorphize(&constant.ty); + match constant.literal.clone() { + mir::Literal::Item { def_id, substs } => { + // Shortcut for zero-sized types, including function item + // types, which would not work with MirConstContext. + if common::type_is_zero_size(self.ccx, ty) { + let llty = type_of::type_of(self.ccx, ty); + return Ok(Const::new(C_null(llty), ty)); + } + + let substs = self.monomorphize(&substs); + let instance = Instance::new(def_id, substs); + MirConstContext::trans_def(self.ccx, instance, IndexVec::new()) + } + mir::Literal::Promoted { index } => { + let mir = &self.mir.promoted[index]; + MirConstContext::new(self.ccx, mir, self.substs, IndexVec::new()).trans() + } + mir::Literal::Value { value } => { + Ok(Const::from_constval(self.ccx, value, ty)) + } + } + } + }; + debug!("const_operand({:?} @ {:?}) = {:?}", operand, span, + result.as_ref().ok()); + result + } + + fn const_array(&self, array_ty: Ty<'tcx>, fields: &[ValueRef]) + -> Const<'tcx> + { + let elem_ty = array_ty.builtin_index().unwrap_or_else(|| { + bug!("bad array type {:?}", array_ty) + }); + let llunitty = type_of::type_of(self.ccx, elem_ty); + // If the array contains enums, an LLVM array won't work. + let val = if fields.iter().all(|&f| val_ty(f) == llunitty) { + C_array(llunitty, fields) + } else { + C_struct(self.ccx, fields, false) + }; + Const::new(val, array_ty) + } + + fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, + dest_ty: Ty<'tcx>, span: Span) + -> Result, ConstEvalErr> { + let tcx = self.ccx.tcx(); + debug!("const_rvalue({:?}: {:?} @ {:?})", rvalue, dest_ty, span); + let val = match *rvalue { + mir::Rvalue::Use(ref operand) => self.const_operand(operand, span)?, + + mir::Rvalue::Repeat(ref elem, ref count) => { + let elem = self.const_operand(elem, span)?; + let size = count.value.as_u64(tcx.sess.target.uint_type); + let fields = vec![elem.llval; size as usize]; + self.const_array(dest_ty, &fields) + } + + mir::Rvalue::Aggregate(ref kind, ref operands) => { + // Make sure to evaluate all operands to + // report as many errors as we possibly can. + let mut fields = Vec::with_capacity(operands.len()); + let mut failure = Ok(()); + for operand in operands { + match self.const_operand(operand, span) { + Ok(val) => fields.push(val.llval), + Err(err) => if failure.is_ok() { failure = Err(err); } + } + } + failure?; + + match *kind { + mir::AggregateKind::Array => { + self.const_array(dest_ty, &fields) + } + mir::AggregateKind::Adt(..) | + mir::AggregateKind::Closure(..) | + mir::AggregateKind::Tuple => { + let disr = match *kind { + mir::AggregateKind::Adt(adt_def, index, _, _) => { + Disr::from(adt_def.variants[index].disr_val) + } + _ => Disr(0) + }; + Const::new( + adt::trans_const(self.ccx, dest_ty, disr, &fields), + dest_ty + ) + } + } + } + + mir::Rvalue::Cast(ref kind, ref source, cast_ty) => { + let operand = self.const_operand(source, span)?; + let cast_ty = self.monomorphize(&cast_ty); + + let val = match *kind { + mir::CastKind::ReifyFnPointer => { + match operand.ty.sty { + ty::TyFnDef(def_id, substs, _) => { + Callee::def(self.ccx, def_id, substs) + .reify(self.ccx) + } + _ => { + span_bug!(span, "{} cannot be reified to a fn ptr", + operand.ty) + } + } + } + mir::CastKind::UnsafeFnPointer => { + // this is a no-op at the LLVM level + operand.llval + } + mir::CastKind::Unsize => { + // unsize targets other than to a fat pointer currently + // can't be in constants. + assert!(common::type_is_fat_ptr(tcx, cast_ty)); + + let pointee_ty = operand.ty.builtin_deref(true, ty::NoPreference) + .expect("consts: unsizing got non-pointer type").ty; + let (base, old_info) = if !common::type_is_sized(tcx, pointee_ty) { + // Normally, the source is a thin pointer and we are + // adding extra info to make a fat pointer. The exception + // is when we are upcasting an existing object fat pointer + // to use a different vtable. In that case, we want to + // load out the original data pointer so we can repackage + // it. + let (base, extra) = operand.get_fat_ptr(); + (base, Some(extra)) + } else { + (operand.llval, None) + }; + + let unsized_ty = cast_ty.builtin_deref(true, ty::NoPreference) + .expect("consts: unsizing got non-pointer target type").ty; + let ptr_ty = type_of::in_memory_type_of(self.ccx, unsized_ty).ptr_to(); + let base = consts::ptrcast(base, ptr_ty); + let info = base::unsized_info(self.ccx, pointee_ty, + unsized_ty, old_info); + + if old_info.is_none() { + let prev_const = self.ccx.const_unsized().borrow_mut() + .insert(base, operand.llval); + assert!(prev_const.is_none() || prev_const == Some(operand.llval)); + } + assert_eq!(abi::FAT_PTR_ADDR, 0); + assert_eq!(abi::FAT_PTR_EXTRA, 1); + C_struct(self.ccx, &[base, info], false) + } + mir::CastKind::Misc if common::type_is_immediate(self.ccx, operand.ty) => { + debug_assert!(common::type_is_immediate(self.ccx, cast_ty)); + let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); + let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); + let ll_t_out = type_of::immediate_type_of(self.ccx, cast_ty); + let llval = operand.llval; + let signed = if let CastTy::Int(IntTy::CEnum) = r_t_in { + let l = self.ccx.layout_of(operand.ty); + adt::is_discr_signed(&l) + } else { + operand.ty.is_signed() + }; + + unsafe { + match (r_t_in, r_t_out) { + (CastTy::Int(_), CastTy::Int(_)) => { + let s = signed as llvm::Bool; + llvm::LLVMConstIntCast(llval, ll_t_out.to_ref(), s) + } + (CastTy::Int(_), CastTy::Float) => { + if signed { + llvm::LLVMConstSIToFP(llval, ll_t_out.to_ref()) + } else { + llvm::LLVMConstUIToFP(llval, ll_t_out.to_ref()) + } + } + (CastTy::Float, CastTy::Float) => { + llvm::LLVMConstFPCast(llval, ll_t_out.to_ref()) + } + (CastTy::Float, CastTy::Int(IntTy::I)) => { + llvm::LLVMConstFPToSI(llval, ll_t_out.to_ref()) + } + (CastTy::Float, CastTy::Int(_)) => { + llvm::LLVMConstFPToUI(llval, ll_t_out.to_ref()) + } + (CastTy::Ptr(_), CastTy::Ptr(_)) | + (CastTy::FnPtr, CastTy::Ptr(_)) | + (CastTy::RPtr(_), CastTy::Ptr(_)) => { + consts::ptrcast(llval, ll_t_out) + } + (CastTy::Int(_), CastTy::Ptr(_)) => { + llvm::LLVMConstIntToPtr(llval, ll_t_out.to_ref()) + } + (CastTy::Ptr(_), CastTy::Int(_)) | + (CastTy::FnPtr, CastTy::Int(_)) => { + llvm::LLVMConstPtrToInt(llval, ll_t_out.to_ref()) + } + _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty) + } + } + } + mir::CastKind::Misc => { // Casts from a fat-ptr. + let ll_cast_ty = type_of::immediate_type_of(self.ccx, cast_ty); + let ll_from_ty = type_of::immediate_type_of(self.ccx, operand.ty); + if common::type_is_fat_ptr(tcx, operand.ty) { + let (data_ptr, meta_ptr) = operand.get_fat_ptr(); + if common::type_is_fat_ptr(tcx, cast_ty) { + let ll_cft = ll_cast_ty.field_types(); + let ll_fft = ll_from_ty.field_types(); + let data_cast = consts::ptrcast(data_ptr, ll_cft[0]); + assert_eq!(ll_cft[1].kind(), ll_fft[1].kind()); + C_struct(self.ccx, &[data_cast, meta_ptr], false) + } else { // cast to thin-ptr + // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and + // pointer-cast of that pointer to desired pointer type. + consts::ptrcast(data_ptr, ll_cast_ty) + } + } else { + bug!("Unexpected non-fat-pointer operand") + } + } + }; + Const::new(val, cast_ty) + } + + mir::Rvalue::Ref(_, bk, ref lvalue) => { + let tr_lvalue = self.const_lvalue(lvalue, span)?; + + let ty = tr_lvalue.ty; + let ref_ty = tcx.mk_ref(tcx.mk_region(ty::ReErased), + ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }); + + let base = match tr_lvalue.base { + Base::Value(llval) => { + // FIXME: may be wrong for &*(&simd_vec as &fmt::Debug) + let align = if type_is_sized(self.ccx.tcx(), ty) { + type_of::align_of(self.ccx, ty) + } else { + self.ccx.tcx().data_layout.pointer_align.abi() as machine::llalign + }; + if bk == mir::BorrowKind::Mut { + consts::addr_of_mut(self.ccx, llval, align, "ref_mut") + } else { + consts::addr_of(self.ccx, llval, align, "ref") + } + } + Base::Str(llval) | + Base::Static(llval) => llval + }; + + let ptr = if common::type_is_sized(tcx, ty) { + base + } else { + C_struct(self.ccx, &[base, tr_lvalue.llextra], false) + }; + Const::new(ptr, ref_ty) + } + + mir::Rvalue::Len(ref lvalue) => { + let tr_lvalue = self.const_lvalue(lvalue, span)?; + Const::new(tr_lvalue.len(self.ccx), tcx.types.usize) + } + + mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => { + let lhs = self.const_operand(lhs, span)?; + let rhs = self.const_operand(rhs, span)?; + let ty = lhs.ty; + let binop_ty = op.ty(tcx, lhs.ty, rhs.ty); + let (lhs, rhs) = (lhs.llval, rhs.llval); + Const::new(const_scalar_binop(op, lhs, rhs, ty), binop_ty) + } + + mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => { + let lhs = self.const_operand(lhs, span)?; + let rhs = self.const_operand(rhs, span)?; + let ty = lhs.ty; + let val_ty = op.ty(tcx, lhs.ty, rhs.ty); + let binop_ty = tcx.intern_tup(&[val_ty, tcx.types.bool]); + let (lhs, rhs) = (lhs.llval, rhs.llval); + assert!(!ty.is_fp()); + + match const_scalar_checked_binop(tcx, op, lhs, rhs, ty) { + Some((llval, of)) => { + let llof = C_bool(self.ccx, of); + Const::new(C_struct(self.ccx, &[llval, llof], false), binop_ty) + } + None => { + span_bug!(span, "{:?} got non-integer operands: {:?} and {:?}", + rvalue, Value(lhs), Value(rhs)); + } + } + } + + mir::Rvalue::UnaryOp(op, ref operand) => { + let operand = self.const_operand(operand, span)?; + let lloperand = operand.llval; + let llval = match op { + mir::UnOp::Not => { + unsafe { + llvm::LLVMConstNot(lloperand) + } + } + mir::UnOp::Neg => { + let is_float = operand.ty.is_fp(); + unsafe { + if is_float { + llvm::LLVMConstFNeg(lloperand) + } else { + llvm::LLVMConstNeg(lloperand) + } + } + } + }; + Const::new(llval, operand.ty) + } + + _ => span_bug!(span, "{:?} in constant", rvalue) + }; + + debug!("const_rvalue({:?}: {:?} @ {:?}) = {:?}", rvalue, dest_ty, span, val); + + Ok(val) + } + +} + +fn to_const_int(value: ValueRef, t: Ty, tcx: TyCtxt) -> Option { + match t.sty { + ty::TyInt(int_type) => const_to_opt_int(value).and_then(|input| match int_type { + ast::IntTy::I8 => { + assert_eq!(input as i8 as i64, input); + Some(ConstInt::I8(input as i8)) + }, + ast::IntTy::I16 => { + assert_eq!(input as i16 as i64, input); + Some(ConstInt::I16(input as i16)) + }, + ast::IntTy::I32 => { + assert_eq!(input as i32 as i64, input); + Some(ConstInt::I32(input as i32)) + }, + ast::IntTy::I64 => { + Some(ConstInt::I64(input)) + }, + ast::IntTy::Is => { + ConstIsize::new(input, tcx.sess.target.int_type) + .ok().map(ConstInt::Isize) + }, + }), + ty::TyUint(uint_type) => const_to_opt_uint(value).and_then(|input| match uint_type { + ast::UintTy::U8 => { + assert_eq!(input as u8 as u64, input); + Some(ConstInt::U8(input as u8)) + }, + ast::UintTy::U16 => { + assert_eq!(input as u16 as u64, input); + Some(ConstInt::U16(input as u16)) + }, + ast::UintTy::U32 => { + assert_eq!(input as u32 as u64, input); + Some(ConstInt::U32(input as u32)) + }, + ast::UintTy::U64 => { + Some(ConstInt::U64(input)) + }, + ast::UintTy::Us => { + ConstUsize::new(input, tcx.sess.target.uint_type) + .ok().map(ConstInt::Usize) + }, + }), + _ => None, + } +} + +pub fn const_scalar_binop(op: mir::BinOp, + lhs: ValueRef, + rhs: ValueRef, + input_ty: Ty) -> ValueRef { + assert!(!input_ty.is_simd()); + let is_float = input_ty.is_fp(); + let signed = input_ty.is_signed(); + + unsafe { + match op { + mir::BinOp::Add if is_float => llvm::LLVMConstFAdd(lhs, rhs), + mir::BinOp::Add => llvm::LLVMConstAdd(lhs, rhs), + + mir::BinOp::Sub if is_float => llvm::LLVMConstFSub(lhs, rhs), + mir::BinOp::Sub => llvm::LLVMConstSub(lhs, rhs), + + mir::BinOp::Mul if is_float => llvm::LLVMConstFMul(lhs, rhs), + mir::BinOp::Mul => llvm::LLVMConstMul(lhs, rhs), + + mir::BinOp::Div if is_float => llvm::LLVMConstFDiv(lhs, rhs), + mir::BinOp::Div if signed => llvm::LLVMConstSDiv(lhs, rhs), + mir::BinOp::Div => llvm::LLVMConstUDiv(lhs, rhs), + + mir::BinOp::Rem if is_float => llvm::LLVMConstFRem(lhs, rhs), + mir::BinOp::Rem if signed => llvm::LLVMConstSRem(lhs, rhs), + mir::BinOp::Rem => llvm::LLVMConstURem(lhs, rhs), + + mir::BinOp::BitXor => llvm::LLVMConstXor(lhs, rhs), + mir::BinOp::BitAnd => llvm::LLVMConstAnd(lhs, rhs), + mir::BinOp::BitOr => llvm::LLVMConstOr(lhs, rhs), + mir::BinOp::Shl => { + let rhs = base::cast_shift_const_rhs(op.to_hir_binop(), lhs, rhs); + llvm::LLVMConstShl(lhs, rhs) + } + mir::BinOp::Shr => { + let rhs = base::cast_shift_const_rhs(op.to_hir_binop(), lhs, rhs); + if signed { llvm::LLVMConstAShr(lhs, rhs) } + else { llvm::LLVMConstLShr(lhs, rhs) } + } + mir::BinOp::Eq | mir::BinOp::Ne | + mir::BinOp::Lt | mir::BinOp::Le | + mir::BinOp::Gt | mir::BinOp::Ge => { + if is_float { + let cmp = base::bin_op_to_fcmp_predicate(op.to_hir_binop()); + llvm::LLVMConstFCmp(cmp, lhs, rhs) + } else { + let cmp = base::bin_op_to_icmp_predicate(op.to_hir_binop(), + signed); + llvm::LLVMConstICmp(cmp, lhs, rhs) + } + } + } + } +} + +pub fn const_scalar_checked_binop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + op: mir::BinOp, + lllhs: ValueRef, + llrhs: ValueRef, + input_ty: Ty<'tcx>) + -> Option<(ValueRef, bool)> { + if let (Some(lhs), Some(rhs)) = (to_const_int(lllhs, input_ty, tcx), + to_const_int(llrhs, input_ty, tcx)) { + let result = match op { + mir::BinOp::Add => lhs + rhs, + mir::BinOp::Sub => lhs - rhs, + mir::BinOp::Mul => lhs * rhs, + mir::BinOp::Shl => lhs << rhs, + mir::BinOp::Shr => lhs >> rhs, + _ => { + bug!("Operator `{:?}` is not a checkable operator", op) + } + }; + + let of = match result { + Ok(_) => false, + Err(ConstMathErr::Overflow(_)) | + Err(ConstMathErr::ShiftNegative) => true, + Err(err) => { + bug!("Operator `{:?}` on `{:?}` and `{:?}` errored: {}", + op, lhs, rhs, err.description()); + } + }; + + Some((const_scalar_binop(op, lllhs, llrhs, input_ty), of)) + } else { + None + } +} + +impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { + pub fn trans_constant(&mut self, + bcx: &BlockAndBuilder<'bcx, 'tcx>, + constant: &mir::Constant<'tcx>) + -> Const<'tcx> + { + debug!("trans_constant({:?})", constant); + let ty = bcx.monomorphize(&constant.ty); + let result = match constant.literal.clone() { + mir::Literal::Item { def_id, substs } => { + // Shortcut for zero-sized types, including function item + // types, which would not work with MirConstContext. + if common::type_is_zero_size(bcx.ccx(), ty) { + let llty = type_of::type_of(bcx.ccx(), ty); + return Const::new(C_null(llty), ty); + } + + let substs = bcx.monomorphize(&substs); + let instance = Instance::new(def_id, substs); + MirConstContext::trans_def(bcx.ccx(), instance, IndexVec::new()) + } + mir::Literal::Promoted { index } => { + let mir = &self.mir.promoted[index]; + MirConstContext::new(bcx.ccx(), mir, bcx.fcx().param_substs, + IndexVec::new()).trans() + } + mir::Literal::Value { value } => { + Ok(Const::from_constval(bcx.ccx(), value, ty)) + } + }; + + let result = result.unwrap_or_else(|_| { + // We've errored, so we don't have to produce working code. + let llty = type_of::type_of(bcx.ccx(), ty); + Const::new(C_undef(llty), ty) + }); + + debug!("trans_constant({:?}) = {:?}", constant, result); + result + } +} + + +pub fn trans_static_initializer(ccx: &CrateContext, def_id: DefId) + -> Result { + let instance = Instance::mono(ccx.shared(), def_id); + MirConstContext::trans_def(ccx, instance, IndexVec::new()).map(|c| c.llval) +} diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs new file mode 100644 index 0000000000000..d28c466e230ba --- /dev/null +++ b/src/librustc_trans/mir/lvalue.rs @@ -0,0 +1,279 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use llvm::ValueRef; +use rustc::ty::{self, Ty, TypeFoldable}; +use rustc::mir; +use rustc::mir::tcx::LvalueTy; +use rustc_data_structures::indexed_vec::Idx; +use adt; +use base; +use common::{self, BlockAndBuilder, CrateContext, C_uint, C_undef}; +use consts; +use machine; +use type_of::type_of; +use type_of; +use Disr; + +use std::ptr; + +use super::{MirContext, LocalRef}; +use super::operand::OperandValue; + +#[derive(Copy, Clone, Debug)] +pub struct LvalueRef<'tcx> { + /// Pointer to the contents of the lvalue + pub llval: ValueRef, + + /// This lvalue's extra data if it is unsized, or null + pub llextra: ValueRef, + + /// Monomorphized type of this lvalue, including variant information + pub ty: LvalueTy<'tcx>, +} + +impl<'tcx> LvalueRef<'tcx> { + pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>) -> LvalueRef<'tcx> { + LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty } + } + + pub fn alloca<'bcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, + ty: Ty<'tcx>, + name: &str) + -> LvalueRef<'tcx> + { + assert!(!ty.has_erasable_regions()); + let lltemp = bcx.with_block(|bcx| base::alloc_ty(bcx, ty, name)); + LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty)) + } + + pub fn len<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { + let ty = self.ty.to_ty(ccx.tcx()); + match ty.sty { + ty::TyArray(_, n) => common::C_uint(ccx, n), + ty::TySlice(_) | ty::TyStr => { + assert!(self.llextra != ptr::null_mut()); + self.llextra + } + _ => bug!("unexpected type `{}` in LvalueRef::len", ty) + } + } +} + +impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { + pub fn trans_lvalue(&mut self, + bcx: &BlockAndBuilder<'bcx, 'tcx>, + lvalue: &mir::Lvalue<'tcx>) + -> LvalueRef<'tcx> { + debug!("trans_lvalue(lvalue={:?})", lvalue); + + let ccx = bcx.ccx(); + let tcx = bcx.tcx(); + + if let mir::Lvalue::Local(index) = *lvalue { + match self.locals[index] { + LocalRef::Lvalue(lvalue) => { + return lvalue; + } + LocalRef::Operand(..) => { + bug!("using operand local {:?} as lvalue", lvalue); + } + } + } + + let result = match *lvalue { + mir::Lvalue::Local(_) => bug!(), // handled above + mir::Lvalue::Static(def_id) => { + let const_ty = self.monomorphized_lvalue_ty(lvalue); + LvalueRef::new_sized(consts::get_static(ccx, def_id), + LvalueTy::from_ty(const_ty)) + }, + mir::Lvalue::Projection(box mir::Projection { + ref base, + elem: mir::ProjectionElem::Deref + }) => { + // Load the pointer from its location. + let ptr = self.trans_consume(bcx, base); + let projected_ty = LvalueTy::from_ty(ptr.ty) + .projection_ty(tcx, &mir::ProjectionElem::Deref); + let projected_ty = bcx.monomorphize(&projected_ty); + let (llptr, llextra) = match ptr.val { + OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()), + OperandValue::Pair(llptr, llextra) => (llptr, llextra), + OperandValue::Ref(_) => bug!("Deref of by-Ref type {:?}", ptr.ty) + }; + LvalueRef { + llval: llptr, + llextra: llextra, + ty: projected_ty, + } + } + mir::Lvalue::Projection(ref projection) => { + let tr_base = self.trans_lvalue(bcx, &projection.base); + let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem); + let projected_ty = bcx.monomorphize(&projected_ty); + + let project_index = |llindex| { + let element = if let ty::TySlice(_) = tr_base.ty.to_ty(tcx).sty { + // Slices already point to the array element type. + bcx.inbounds_gep(tr_base.llval, &[llindex]) + } else { + let zero = common::C_uint(bcx.ccx(), 0u64); + bcx.inbounds_gep(tr_base.llval, &[zero, llindex]) + }; + element + }; + + let (llprojected, llextra) = match projection.elem { + mir::ProjectionElem::Deref => bug!(), + mir::ProjectionElem::Field(ref field, _) => { + let base_ty = tr_base.ty.to_ty(tcx); + let discr = match tr_base.ty { + LvalueTy::Ty { .. } => 0, + LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v, + }; + let discr = discr as u64; + let is_sized = common::type_is_sized(tcx, projected_ty.to_ty(tcx)); + let base = if is_sized { + adt::MaybeSizedValue::sized(tr_base.llval) + } else { + adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra) + }; + let llprojected = adt::trans_field_ptr_builder(bcx, base_ty, base, + Disr(discr), field.index()); + let llextra = if is_sized { + ptr::null_mut() + } else { + tr_base.llextra + }; + (llprojected, llextra) + } + mir::ProjectionElem::Index(ref index) => { + let index = self.trans_operand(bcx, index); + (project_index(self.prepare_index(bcx, index.immediate())), ptr::null_mut()) + } + mir::ProjectionElem::ConstantIndex { offset, + from_end: false, + min_length: _ } => { + let lloffset = C_uint(bcx.ccx(), offset); + (project_index(lloffset), ptr::null_mut()) + } + mir::ProjectionElem::ConstantIndex { offset, + from_end: true, + min_length: _ } => { + let lloffset = C_uint(bcx.ccx(), offset); + let lllen = tr_base.len(bcx.ccx()); + let llindex = bcx.sub(lllen, lloffset); + (project_index(llindex), ptr::null_mut()) + } + mir::ProjectionElem::Subslice { from, to } => { + let llindex = C_uint(bcx.ccx(), from); + let llbase = project_index(llindex); + + let base_ty = tr_base.ty.to_ty(bcx.tcx()); + match base_ty.sty { + ty::TyArray(..) => { + // must cast the lvalue pointer type to the new + // array type (*[%_; new_len]). + let base_ty = self.monomorphized_lvalue_ty(lvalue); + let llbasety = type_of::type_of(bcx.ccx(), base_ty).ptr_to(); + let llbase = bcx.pointercast(llbase, llbasety); + (llbase, ptr::null_mut()) + } + ty::TySlice(..) => { + assert!(tr_base.llextra != ptr::null_mut()); + let lllen = bcx.sub(tr_base.llextra, + C_uint(bcx.ccx(), from+to)); + (llbase, lllen) + } + _ => bug!("unexpected type {:?} in Subslice", base_ty) + } + } + mir::ProjectionElem::Downcast(..) => { + (tr_base.llval, tr_base.llextra) + } + }; + LvalueRef { + llval: llprojected, + llextra: llextra, + ty: projected_ty, + } + } + }; + debug!("trans_lvalue(lvalue={:?}) => {:?}", lvalue, result); + result + } + + // Perform an action using the given Lvalue. + // If the Lvalue is an empty LocalRef::Operand, then a temporary stack slot + // is created first, then used as an operand to update the Lvalue. + pub fn with_lvalue_ref(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, + lvalue: &mir::Lvalue<'tcx>, f: F) -> U + where F: FnOnce(&mut Self, LvalueRef<'tcx>) -> U + { + if let mir::Lvalue::Local(index) = *lvalue { + match self.locals[index] { + LocalRef::Lvalue(lvalue) => f(self, lvalue), + LocalRef::Operand(None) => { + let lvalue_ty = self.monomorphized_lvalue_ty(lvalue); + let lvalue = LvalueRef::alloca(bcx, + lvalue_ty, + "lvalue_temp"); + let ret = f(self, lvalue); + let op = self.trans_load(bcx, lvalue.llval, lvalue_ty); + self.locals[index] = LocalRef::Operand(Some(op)); + ret + } + LocalRef::Operand(Some(_)) => { + // See comments in LocalRef::new_operand as to why + // we always have Some in a ZST LocalRef::Operand. + let ty = self.monomorphized_lvalue_ty(lvalue); + if common::type_is_zero_size(bcx.ccx(), ty) { + // Pass an undef pointer as no stores can actually occur. + let llptr = C_undef(type_of(bcx.ccx(), ty).ptr_to()); + f(self, LvalueRef::new_sized(llptr, LvalueTy::from_ty(ty))) + } else { + bug!("Lvalue local already set"); + } + } + } + } else { + let lvalue = self.trans_lvalue(bcx, lvalue); + f(self, lvalue) + } + } + + /// Adjust the bitwidth of an index since LLVM is less forgiving + /// than we are. + /// + /// nmatsakis: is this still necessary? Not sure. + fn prepare_index(&mut self, + bcx: &BlockAndBuilder<'bcx, 'tcx>, + llindex: ValueRef) + -> ValueRef + { + let ccx = bcx.ccx(); + let index_size = machine::llbitsize_of_real(bcx.ccx(), common::val_ty(llindex)); + let int_size = machine::llbitsize_of_real(bcx.ccx(), ccx.int_type()); + if index_size < int_size { + bcx.zext(llindex, ccx.int_type()) + } else if index_size > int_size { + bcx.trunc(llindex, ccx.int_type()) + } else { + llindex + } + } + + pub fn monomorphized_lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> { + let tcx = self.fcx.ccx.tcx(); + let lvalue_ty = lvalue.ty(&self.mir, tcx); + self.fcx.monomorphize(&lvalue_ty.to_ty(tcx)) + } +} diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs new file mode 100644 index 0000000000000..12cbfcef7d26b --- /dev/null +++ b/src/librustc_trans/mir/mod.rs @@ -0,0 +1,541 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use libc::c_uint; +use llvm::{self, ValueRef}; +use rustc::ty; +use rustc::mir; +use rustc::mir::tcx::LvalueTy; +use session::config::FullDebugInfo; +use base; +use common::{self, Block, BlockAndBuilder, CrateContext, FunctionContext, C_null}; +use debuginfo::{self, declare_local, DebugLoc, VariableAccess, VariableKind, FunctionDebugContext}; +use machine; +use type_of; + +use syntax_pos::{DUMMY_SP, NO_EXPANSION, COMMAND_LINE_EXPN, BytePos}; +use syntax::symbol::keywords; + +use std::cell::Ref; +use std::iter; + +use basic_block::BasicBlock; + +use rustc_data_structures::bitvec::BitVector; +use rustc_data_structures::indexed_vec::{IndexVec, Idx}; + +pub use self::constant::trans_static_initializer; + +use self::lvalue::{LvalueRef}; +use rustc::mir::traversal; + +use self::operand::{OperandRef, OperandValue}; + +/// Master context for translating MIR. +pub struct MirContext<'bcx, 'tcx:'bcx> { + mir: Ref<'tcx, mir::Mir<'tcx>>, + + /// Function context + fcx: &'bcx common::FunctionContext<'bcx, 'tcx>, + + /// When unwinding is initiated, we have to store this personality + /// value somewhere so that we can load it and re-use it in the + /// resume instruction. The personality is (afaik) some kind of + /// value used for C++ unwinding, which must filter by type: we + /// don't really care about it very much. Anyway, this value + /// contains an alloca into which the personality is stored and + /// then later loaded when generating the DIVERGE_BLOCK. + llpersonalityslot: Option, + + /// A `Block` for each MIR `BasicBlock` + blocks: IndexVec>, + + /// The funclet status of each basic block + cleanup_kinds: IndexVec, + + /// This stores the landing-pad block for a given BB, computed lazily on GNU + /// and eagerly on MSVC. + landing_pads: IndexVec>>, + + /// Cached unreachable block + unreachable_block: Option>, + + /// The location where each MIR arg/var/tmp/ret is stored. This is + /// usually an `LvalueRef` representing an alloca, but not always: + /// sometimes we can skip the alloca and just store the value + /// directly using an `OperandRef`, which makes for tighter LLVM + /// IR. The conditions for using an `OperandRef` are as follows: + /// + /// - the type of the local must be judged "immediate" by `type_is_immediate` + /// - the operand must never be referenced indirectly + /// - we should not take its address using the `&` operator + /// - nor should it appear in an lvalue path like `tmp.a` + /// - the operand must be defined by an rvalue that can generate immediate + /// values + /// + /// Avoiding allocs can also be important for certain intrinsics, + /// notably `expect`. + locals: IndexVec>, + + /// Debug information for MIR scopes. + scopes: IndexVec, +} + +impl<'blk, 'tcx> MirContext<'blk, 'tcx> { + pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> DebugLoc { + // Bail out if debug info emission is not enabled. + match self.fcx.debug_context { + FunctionDebugContext::DebugInfoDisabled | + FunctionDebugContext::FunctionWithoutDebugInfo => { + // Can't return DebugLoc::None here because intrinsic::trans_intrinsic_call() + // relies on debug location to obtain span of the call site. + return DebugLoc::ScopeAt(self.scopes[source_info.scope].scope_metadata, + source_info.span); + } + FunctionDebugContext::RegularContext(_) =>{} + } + + // In order to have a good line stepping behavior in debugger, we overwrite debug + // locations of macro expansions with that of the outermost expansion site + // (unless the crate is being compiled with `-Z debug-macros`). + if source_info.span.expn_id == NO_EXPANSION || + source_info.span.expn_id == COMMAND_LINE_EXPN || + self.fcx.ccx.sess().opts.debugging_opts.debug_macros { + + let scope_metadata = self.scope_metadata_for_loc(source_info.scope, + source_info.span.lo); + DebugLoc::ScopeAt(scope_metadata, source_info.span) + } else { + let cm = self.fcx.ccx.sess().codemap(); + // Walk up the macro expansion chain until we reach a non-expanded span. + let mut span = source_info.span; + while span.expn_id != NO_EXPANSION && span.expn_id != COMMAND_LINE_EXPN { + if let Some(callsite_span) = cm.with_expn_info(span.expn_id, + |ei| ei.map(|ei| ei.call_site.clone())) { + span = callsite_span; + } else { + break; + } + } + let scope_metadata = self.scope_metadata_for_loc(source_info.scope, span.lo); + // Use span of the outermost call site, while keeping the original lexical scope + DebugLoc::ScopeAt(scope_metadata, span) + } + } + + // DILocations inherit source file name from the parent DIScope. Due to macro expansions + // it may so happen that the current span belongs to a different file than the DIScope + // corresponding to span's containing visibility scope. If so, we need to create a DIScope + // "extension" into that file. + fn scope_metadata_for_loc(&self, scope_id: mir::VisibilityScope, pos: BytePos) + -> llvm::debuginfo::DIScope { + let scope_metadata = self.scopes[scope_id].scope_metadata; + if pos < self.scopes[scope_id].file_start_pos || + pos >= self.scopes[scope_id].file_end_pos { + let cm = self.fcx.ccx.sess().codemap(); + debuginfo::extend_scope_to_file(self.fcx.ccx, + scope_metadata, + &cm.lookup_char_pos(pos).file) + } else { + scope_metadata + } + } +} + +enum LocalRef<'tcx> { + Lvalue(LvalueRef<'tcx>), + Operand(Option>), +} + +impl<'tcx> LocalRef<'tcx> { + fn new_operand<'bcx>(ccx: &CrateContext<'bcx, 'tcx>, + ty: ty::Ty<'tcx>) -> LocalRef<'tcx> { + if common::type_is_zero_size(ccx, ty) { + // Zero-size temporaries aren't always initialized, which + // doesn't matter because they don't contain data, but + // we need something in the operand. + let llty = type_of::type_of(ccx, ty); + let val = if common::type_is_imm_pair(ccx, ty) { + let fields = llty.field_types(); + OperandValue::Pair(C_null(fields[0]), C_null(fields[1])) + } else { + OperandValue::Immediate(C_null(llty)) + }; + let op = OperandRef { + val: val, + ty: ty + }; + LocalRef::Operand(Some(op)) + } else { + LocalRef::Operand(None) + } + } +} + +/////////////////////////////////////////////////////////////////////////// + +pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { + let bcx = fcx.init(true).build(); + let mir = bcx.mir(); + + // Analyze the temps to determine which must be lvalues + // FIXME + let (lvalue_locals, cleanup_kinds) = bcx.with_block(|bcx| { + (analyze::lvalue_locals(bcx, &mir), + analyze::cleanup_kinds(bcx, &mir)) + }); + + // Allocate a `Block` for every basic block + let block_bcxs: IndexVec> = + mir.basic_blocks().indices().map(|bb| { + if bb == mir::START_BLOCK { + fcx.new_block("start") + } else { + fcx.new_block(&format!("{:?}", bb)) + } + }).collect(); + + // Compute debuginfo scopes from MIR scopes. + let scopes = debuginfo::create_mir_scopes(fcx); + + let mut mircx = MirContext { + mir: Ref::clone(&mir), + fcx: fcx, + llpersonalityslot: None, + blocks: block_bcxs, + unreachable_block: None, + cleanup_kinds: cleanup_kinds, + landing_pads: IndexVec::from_elem(None, mir.basic_blocks()), + scopes: scopes, + locals: IndexVec::new(), + }; + + // Allocate variable and temp allocas + mircx.locals = { + let args = arg_local_refs(&bcx, &mir, &mircx.scopes, &lvalue_locals); + + let mut allocate_local = |local| { + let decl = &mir.local_decls[local]; + let ty = bcx.monomorphize(&decl.ty); + + if let Some(name) = decl.name { + // User variable + let source_info = decl.source_info.unwrap(); + let debug_scope = mircx.scopes[source_info.scope]; + let dbg = debug_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo; + + if !lvalue_locals.contains(local.index()) && !dbg { + debug!("alloc: {:?} ({}) -> operand", local, name); + return LocalRef::new_operand(bcx.ccx(), ty); + } + + debug!("alloc: {:?} ({}) -> lvalue", local, name); + let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str()); + if dbg { + let dbg_loc = mircx.debug_loc(source_info); + if let DebugLoc::ScopeAt(scope, span) = dbg_loc { + bcx.with_block(|bcx| { + declare_local(bcx, name, ty, scope, + VariableAccess::DirectVariable { alloca: lvalue.llval }, + VariableKind::LocalVariable, span); + }); + } else { + panic!("Unexpected"); + } + } + LocalRef::Lvalue(lvalue) + } else { + // Temporary or return pointer + if local == mir::RETURN_POINTER && fcx.fn_ty.ret.is_indirect() { + debug!("alloc: {:?} (return pointer) -> lvalue", local); + let llretptr = llvm::get_param(fcx.llfn, 0); + LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty))) + } else if lvalue_locals.contains(local.index()) { + debug!("alloc: {:?} -> lvalue", local); + LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty, &format!("{:?}", local))) + } else { + // If this is an immediate local, we do not create an + // alloca in advance. Instead we wait until we see the + // definition and update the operand there. + debug!("alloc: {:?} -> operand", local); + LocalRef::new_operand(bcx.ccx(), ty) + } + } + }; + + let retptr = allocate_local(mir::RETURN_POINTER); + iter::once(retptr) + .chain(args.into_iter()) + .chain(mir.vars_and_temps_iter().map(allocate_local)) + .collect() + }; + + // Branch to the START block + let start_bcx = mircx.blocks[mir::START_BLOCK]; + bcx.br(start_bcx.llbb); + + // Up until here, IR instructions for this function have explicitly not been annotated with + // source code location, so we don't step into call setup code. From here on, source location + // emitting should be enabled. + debuginfo::start_emitting_source_locations(fcx); + + let mut visited = BitVector::new(mir.basic_blocks().len()); + + let mut rpo = traversal::reverse_postorder(&mir); + + // Prepare each block for translation. + for (bb, _) in rpo.by_ref() { + mircx.init_cpad(bb); + } + rpo.reset(); + + // Translate the body of each block using reverse postorder + for (bb, _) in rpo { + visited.insert(bb.index()); + mircx.trans_block(bb); + } + + // Remove blocks that haven't been visited, or have no + // predecessors. + for bb in mir.basic_blocks().indices() { + let block = mircx.blocks[bb]; + let block = BasicBlock(block.llbb); + // Unreachable block + if !visited.contains(bb.index()) { + debug!("trans_mir: block {:?} was not visited", bb); + block.delete(); + } + } + + DebugLoc::None.apply(fcx); + fcx.cleanup(); +} + +/// Produce, for each argument, a `ValueRef` pointing at the +/// argument's value. As arguments are lvalues, these are always +/// indirect. +fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, + mir: &mir::Mir<'tcx>, + scopes: &IndexVec, + lvalue_locals: &BitVector) + -> Vec> { + let fcx = bcx.fcx(); + let tcx = bcx.tcx(); + let mut idx = 0; + let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize; + + // Get the argument scope, if it exists and if we need it. + let arg_scope = scopes[mir::ARGUMENT_VISIBILITY_SCOPE]; + let arg_scope = if arg_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo { + Some(arg_scope.scope_metadata) + } else { + None + }; + + mir.args_iter().enumerate().map(|(arg_index, local)| { + let arg_decl = &mir.local_decls[local]; + let arg_ty = bcx.monomorphize(&arg_decl.ty); + + if Some(local) == mir.spread_arg { + // This argument (e.g. the last argument in the "rust-call" ABI) + // is a tuple that was spread at the ABI level and now we have + // to reconstruct it into a tuple local variable, from multiple + // individual LLVM function arguments. + + let tupled_arg_tys = match arg_ty.sty { + ty::TyTuple(ref tys) => tys, + _ => bug!("spread argument isn't a tuple?!") + }; + + let lltemp = bcx.with_block(|bcx| { + base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index)) + }); + for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() { + let dst = bcx.struct_gep(lltemp, i); + let arg = &fcx.fn_ty.args[idx]; + idx += 1; + if common::type_is_fat_ptr(tcx, tupled_arg_ty) { + // We pass fat pointers as two words, but inside the tuple + // they are the two sub-fields of a single aggregate field. + let meta = &fcx.fn_ty.args[idx]; + idx += 1; + arg.store_fn_arg(bcx, &mut llarg_idx, + base::get_dataptr_builder(bcx, dst)); + meta.store_fn_arg(bcx, &mut llarg_idx, + base::get_meta_builder(bcx, dst)); + } else { + arg.store_fn_arg(bcx, &mut llarg_idx, dst); + } + } + + // Now that we have one alloca that contains the aggregate value, + // we can create one debuginfo entry for the argument. + bcx.with_block(|bcx| arg_scope.map(|scope| { + let variable_access = VariableAccess::DirectVariable { + alloca: lltemp + }; + declare_local(bcx, arg_decl.name.unwrap_or(keywords::Invalid.name()), + arg_ty, scope, variable_access, + VariableKind::ArgumentVariable(arg_index + 1), + bcx.fcx().span.unwrap_or(DUMMY_SP)); + })); + + return LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(arg_ty))); + } + + let arg = &fcx.fn_ty.args[idx]; + idx += 1; + let llval = if arg.is_indirect() && bcx.sess().opts.debuginfo != FullDebugInfo { + // Don't copy an indirect argument to an alloca, the caller + // already put it in a temporary alloca and gave it up, unless + // we emit extra-debug-info, which requires local allocas :(. + // FIXME: lifetimes + if arg.pad.is_some() { + llarg_idx += 1; + } + let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint); + llarg_idx += 1; + llarg + } else if !lvalue_locals.contains(local.index()) && + !arg.is_indirect() && arg.cast.is_none() && + arg_scope.is_none() { + if arg.is_ignore() { + return LocalRef::new_operand(bcx.ccx(), arg_ty); + } + + // We don't have to cast or keep the argument in the alloca. + // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead + // of putting everything in allocas just so we can use llvm.dbg.declare. + if arg.pad.is_some() { + llarg_idx += 1; + } + let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint); + llarg_idx += 1; + let val = if common::type_is_fat_ptr(tcx, arg_ty) { + let meta = &fcx.fn_ty.args[idx]; + idx += 1; + assert_eq!((meta.cast, meta.pad), (None, None)); + let llmeta = llvm::get_param(fcx.llfn, llarg_idx as c_uint); + llarg_idx += 1; + OperandValue::Pair(llarg, llmeta) + } else { + OperandValue::Immediate(llarg) + }; + let operand = OperandRef { + val: val, + ty: arg_ty + }; + return LocalRef::Operand(Some(operand.unpack_if_pair(bcx))); + } else { + let lltemp = bcx.with_block(|bcx| { + base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index)) + }); + if common::type_is_fat_ptr(tcx, arg_ty) { + // we pass fat pointers as two words, but we want to + // represent them internally as a pointer to two words, + // so make an alloca to store them in. + let meta = &fcx.fn_ty.args[idx]; + idx += 1; + arg.store_fn_arg(bcx, &mut llarg_idx, + base::get_dataptr_builder(bcx, lltemp)); + meta.store_fn_arg(bcx, &mut llarg_idx, + base::get_meta_builder(bcx, lltemp)); + } else { + // otherwise, arg is passed by value, so make a + // temporary and store it there + arg.store_fn_arg(bcx, &mut llarg_idx, lltemp); + } + lltemp + }; + bcx.with_block(|bcx| arg_scope.map(|scope| { + // Is this a regular argument? + if arg_index > 0 || mir.upvar_decls.is_empty() { + declare_local(bcx, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg_ty, + scope, VariableAccess::DirectVariable { alloca: llval }, + VariableKind::ArgumentVariable(arg_index + 1), + bcx.fcx().span.unwrap_or(DUMMY_SP)); + return; + } + + // Or is it the closure environment? + let (closure_ty, env_ref) = if let ty::TyRef(_, mt) = arg_ty.sty { + (mt.ty, true) + } else { + (arg_ty, false) + }; + let upvar_tys = if let ty::TyClosure(def_id, substs) = closure_ty.sty { + substs.upvar_tys(def_id, tcx) + } else { + bug!("upvar_decls with non-closure arg0 type `{}`", closure_ty); + }; + + // Store the pointer to closure data in an alloca for debuginfo + // because that's what the llvm.dbg.declare intrinsic expects. + + // FIXME(eddyb) this shouldn't be necessary but SROA seems to + // mishandle DW_OP_plus not preceded by DW_OP_deref, i.e. it + // doesn't actually strip the offset when splitting the closure + // environment into its components so it ends up out of bounds. + let env_ptr = if !env_ref { + use base::*; + use build::*; + use common::*; + let alloc = alloca(bcx, val_ty(llval), "__debuginfo_env_ptr"); + Store(bcx, llval, alloc); + alloc + } else { + llval + }; + + let llclosurety = type_of::type_of(bcx.ccx(), closure_ty); + for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() { + let byte_offset_of_var_in_env = + machine::llelement_offset(bcx.ccx(), llclosurety, i); + + let ops = unsafe { + [llvm::LLVMRustDIBuilderCreateOpDeref(), + llvm::LLVMRustDIBuilderCreateOpPlus(), + byte_offset_of_var_in_env as i64, + llvm::LLVMRustDIBuilderCreateOpDeref()] + }; + + // The environment and the capture can each be indirect. + + // FIXME(eddyb) see above why we have to keep + // a pointer in an alloca for debuginfo atm. + let mut ops = if env_ref || true { &ops[..] } else { &ops[1..] }; + + let ty = if let (true, &ty::TyRef(_, mt)) = (decl.by_ref, &ty.sty) { + mt.ty + } else { + ops = &ops[..ops.len() - 1]; + ty + }; + + let variable_access = VariableAccess::IndirectVariable { + alloca: env_ptr, + address_operations: &ops + }; + declare_local(bcx, decl.debug_name, ty, scope, variable_access, + VariableKind::CapturedVariable, + bcx.fcx().span.unwrap_or(DUMMY_SP)); + } + })); + LocalRef::Lvalue(LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty))) + }).collect() +} + +mod analyze; +mod block; +mod constant; +mod lvalue; +mod operand; +mod rvalue; +mod statement; diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs new file mode 100644 index 0000000000000..62eda56e2e1ba --- /dev/null +++ b/src/librustc_trans/mir/operand.rs @@ -0,0 +1,275 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use llvm::ValueRef; +use rustc::ty::Ty; +use rustc::mir; +use rustc_data_structures::indexed_vec::Idx; + +use base; +use common::{self, Block, BlockAndBuilder}; +use value::Value; +use type_of; +use type_::Type; + +use std::fmt; + +use super::{MirContext, LocalRef}; + +/// The representation of a Rust value. The enum variant is in fact +/// uniquely determined by the value's type, but is kept as a +/// safety check. +#[derive(Copy, Clone)] +pub enum OperandValue { + /// A reference to the actual operand. The data is guaranteed + /// to be valid for the operand's lifetime. + Ref(ValueRef), + /// A single LLVM value. + Immediate(ValueRef), + /// A pair of immediate LLVM values. Used by fat pointers too. + Pair(ValueRef, ValueRef) +} + +/// An `OperandRef` is an "SSA" reference to a Rust value, along with +/// its type. +/// +/// NOTE: unless you know a value's type exactly, you should not +/// generate LLVM opcodes acting on it and instead act via methods, +/// to avoid nasty edge cases. In particular, using `Builder.store` +/// directly is sure to cause problems -- use `MirContext.store_operand` +/// instead. +#[derive(Copy, Clone)] +pub struct OperandRef<'tcx> { + // The value. + pub val: OperandValue, + + // The type of value being returned. + pub ty: Ty<'tcx> +} + +impl<'tcx> fmt::Debug for OperandRef<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.val { + OperandValue::Ref(r) => { + write!(f, "OperandRef(Ref({:?}) @ {:?})", + Value(r), self.ty) + } + OperandValue::Immediate(i) => { + write!(f, "OperandRef(Immediate({:?}) @ {:?})", + Value(i), self.ty) + } + OperandValue::Pair(a, b) => { + write!(f, "OperandRef(Pair({:?}, {:?}) @ {:?})", + Value(a), Value(b), self.ty) + } + } + } +} + +impl<'bcx, 'tcx> OperandRef<'tcx> { + /// Asserts that this operand refers to a scalar and returns + /// a reference to its value. + pub fn immediate(self) -> ValueRef { + match self.val { + OperandValue::Immediate(s) => s, + _ => bug!() + } + } + + /// If this operand is a Pair, we return an + /// Immediate aggregate with the two values. + pub fn pack_if_pair(mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) + -> OperandRef<'tcx> { + if let OperandValue::Pair(a, b) = self.val { + // Reconstruct the immediate aggregate. + let llty = type_of::type_of(bcx.ccx(), self.ty); + let mut llpair = common::C_undef(llty); + let elems = [a, b]; + for i in 0..2 { + let mut elem = elems[i]; + // Extend boolean i1's to i8. + if common::val_ty(elem) == Type::i1(bcx.ccx()) { + elem = bcx.zext(elem, Type::i8(bcx.ccx())); + } + llpair = bcx.insert_value(llpair, elem, i); + } + self.val = OperandValue::Immediate(llpair); + } + self + } + + /// If this operand is a pair in an Immediate, + /// we return a Pair with the two halves. + pub fn unpack_if_pair(mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) + -> OperandRef<'tcx> { + if let OperandValue::Immediate(llval) = self.val { + // Deconstruct the immediate aggregate. + if common::type_is_imm_pair(bcx.ccx(), self.ty) { + debug!("Operand::unpack_if_pair: unpacking {:?}", self); + + let mut a = bcx.extract_value(llval, 0); + let mut b = bcx.extract_value(llval, 1); + + let pair_fields = common::type_pair_fields(bcx.ccx(), self.ty); + if let Some([a_ty, b_ty]) = pair_fields { + if a_ty.is_bool() { + a = bcx.trunc(a, Type::i1(bcx.ccx())); + } + if b_ty.is_bool() { + b = bcx.trunc(b, Type::i1(bcx.ccx())); + } + } + + self.val = OperandValue::Pair(a, b); + } + } + self + } +} + +impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { + pub fn trans_load(&mut self, + bcx: &BlockAndBuilder<'bcx, 'tcx>, + llval: ValueRef, + ty: Ty<'tcx>) + -> OperandRef<'tcx> + { + debug!("trans_load: {:?} @ {:?}", Value(llval), ty); + + let val = if common::type_is_fat_ptr(bcx.tcx(), ty) { + let (lldata, llextra) = base::load_fat_ptr_builder(bcx, llval, ty); + OperandValue::Pair(lldata, llextra) + } else if common::type_is_imm_pair(bcx.ccx(), ty) { + let [a_ty, b_ty] = common::type_pair_fields(bcx.ccx(), ty).unwrap(); + let a_ptr = bcx.struct_gep(llval, 0); + let b_ptr = bcx.struct_gep(llval, 1); + + OperandValue::Pair( + base::load_ty_builder(bcx, a_ptr, a_ty), + base::load_ty_builder(bcx, b_ptr, b_ty) + ) + } else if common::type_is_immediate(bcx.ccx(), ty) { + OperandValue::Immediate(base::load_ty_builder(bcx, llval, ty)) + } else { + OperandValue::Ref(llval) + }; + + OperandRef { val: val, ty: ty } + } + + pub fn trans_consume(&mut self, + bcx: &BlockAndBuilder<'bcx, 'tcx>, + lvalue: &mir::Lvalue<'tcx>) + -> OperandRef<'tcx> + { + debug!("trans_consume(lvalue={:?})", lvalue); + + // watch out for locals that do not have an + // alloca; they are handled somewhat differently + if let mir::Lvalue::Local(index) = *lvalue { + match self.locals[index] { + LocalRef::Operand(Some(o)) => { + return o; + } + LocalRef::Operand(None) => { + bug!("use of {:?} before def", lvalue); + } + LocalRef::Lvalue(..) => { + // use path below + } + } + } + + // Moves out of pair fields are trivial. + if let &mir::Lvalue::Projection(ref proj) = lvalue { + if let mir::Lvalue::Local(index) = proj.base { + if let LocalRef::Operand(Some(o)) = self.locals[index] { + match (o.val, &proj.elem) { + (OperandValue::Pair(a, b), + &mir::ProjectionElem::Field(ref f, ty)) => { + let llval = [a, b][f.index()]; + let op = OperandRef { + val: OperandValue::Immediate(llval), + ty: bcx.monomorphize(&ty) + }; + + // Handle nested pairs. + return op.unpack_if_pair(bcx); + } + _ => {} + } + } + } + } + + // for most lvalues, to consume them we just load them + // out from their home + let tr_lvalue = self.trans_lvalue(bcx, lvalue); + let ty = tr_lvalue.ty.to_ty(bcx.tcx()); + self.trans_load(bcx, tr_lvalue.llval, ty) + } + + pub fn trans_operand(&mut self, + bcx: &BlockAndBuilder<'bcx, 'tcx>, + operand: &mir::Operand<'tcx>) + -> OperandRef<'tcx> + { + debug!("trans_operand(operand={:?})", operand); + + match *operand { + mir::Operand::Consume(ref lvalue) => { + self.trans_consume(bcx, lvalue) + } + + mir::Operand::Constant(ref constant) => { + let val = self.trans_constant(bcx, constant); + let operand = val.to_operand(bcx.ccx()); + if let OperandValue::Ref(ptr) = operand.val { + // If this is a OperandValue::Ref to an immediate constant, load it. + self.trans_load(bcx, ptr, operand.ty) + } else { + operand + } + } + } + } + + pub fn store_operand(&mut self, + bcx: &BlockAndBuilder<'bcx, 'tcx>, + lldest: ValueRef, + operand: OperandRef<'tcx>) + { + debug!("store_operand: operand={:?}", operand); + bcx.with_block(|bcx| self.store_operand_direct(bcx, lldest, operand)) + } + + pub fn store_operand_direct(&mut self, + bcx: Block<'bcx, 'tcx>, + lldest: ValueRef, + operand: OperandRef<'tcx>) + { + // Avoid generating stores of zero-sized values, because the only way to have a zero-sized + // value is through `undef`, and store itself is useless. + if common::type_is_zero_size(bcx.ccx(), operand.ty) { + return; + } + match operand.val { + OperandValue::Ref(r) => base::memcpy_ty(bcx, lldest, r, operand.ty), + OperandValue::Immediate(s) => base::store_ty(bcx, s, lldest, operand.ty), + OperandValue::Pair(a, b) => { + use build::*; + let a = base::from_immediate(bcx, a); + let b = base::from_immediate(bcx, b); + Store(bcx, a, StructGEP(bcx, lldest, 0)); + Store(bcx, b, StructGEP(bcx, lldest, 1)); + } + } + } +} diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs new file mode 100644 index 0000000000000..15cbbc720d6d4 --- /dev/null +++ b/src/librustc_trans/mir/rvalue.rs @@ -0,0 +1,781 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use llvm::{self, ValueRef}; +use rustc::ty::{self, Ty}; +use rustc::ty::cast::{CastTy, IntTy}; +use rustc::ty::layout::Layout; +use rustc::mir; + +use asm; +use base; +use callee::Callee; +use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder, Result}; +use common::{C_integral}; +use debuginfo::DebugLoc; +use adt; +use machine; +use type_::Type; +use type_of; +use tvec; +use value::Value; +use Disr; + +use super::MirContext; +use super::constant::const_scalar_checked_binop; +use super::operand::{OperandRef, OperandValue}; +use super::lvalue::{LvalueRef}; + +impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { + pub fn trans_rvalue(&mut self, + bcx: BlockAndBuilder<'bcx, 'tcx>, + dest: LvalueRef<'tcx>, + rvalue: &mir::Rvalue<'tcx>, + debug_loc: DebugLoc) + -> BlockAndBuilder<'bcx, 'tcx> + { + debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})", + Value(dest.llval), rvalue); + + match *rvalue { + mir::Rvalue::Use(ref operand) => { + let tr_operand = self.trans_operand(&bcx, operand); + // FIXME: consider not copying constants through stack. (fixable by translating + // constants into OperandValue::Ref, why don’t we do that yet if we don’t?) + self.store_operand(&bcx, dest.llval, tr_operand); + bcx + } + + mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => { + let cast_ty = bcx.monomorphize(&cast_ty); + + if common::type_is_fat_ptr(bcx.tcx(), cast_ty) { + // into-coerce of a thin pointer to a fat pointer - just + // use the operand path. + let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc); + self.store_operand(&bcx, dest.llval, temp); + return bcx; + } + + // Unsize of a nontrivial struct. I would prefer for + // this to be eliminated by MIR translation, but + // `CoerceUnsized` can be passed by a where-clause, + // so the (generic) MIR may not be able to expand it. + let operand = self.trans_operand(&bcx, source); + let operand = operand.pack_if_pair(&bcx); + bcx.with_block(|bcx| { + match operand.val { + OperandValue::Pair(..) => bug!(), + OperandValue::Immediate(llval) => { + // unsize from an immediate structure. We don't + // really need a temporary alloca here, but + // avoiding it would require us to have + // `coerce_unsized_into` use extractvalue to + // index into the struct, and this case isn't + // important enough for it. + debug!("trans_rvalue: creating ugly alloca"); + let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp"); + base::store_ty(bcx, llval, lltemp, operand.ty); + base::coerce_unsized_into(bcx, + lltemp, operand.ty, + dest.llval, cast_ty); + } + OperandValue::Ref(llref) => { + base::coerce_unsized_into(bcx, + llref, operand.ty, + dest.llval, cast_ty); + } + } + }); + bcx + } + + mir::Rvalue::Repeat(ref elem, ref count) => { + let tr_elem = self.trans_operand(&bcx, elem); + let size = count.value.as_u64(bcx.tcx().sess.target.uint_type); + let size = C_uint(bcx.ccx(), size); + let base = base::get_dataptr_builder(&bcx, dest.llval); + let bcx = bcx.map_block(|block| { + tvec::slice_for_each(block, base, tr_elem.ty, size, |block, llslot| { + self.store_operand_direct(block, llslot, tr_elem); + block + }) + }); + bcx + } + + mir::Rvalue::Aggregate(ref kind, ref operands) => { + match *kind { + mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { + let disr = Disr::from(adt_def.variants[variant_index].disr_val); + bcx.with_block(|bcx| { + adt::trans_set_discr(bcx, + dest.ty.to_ty(bcx.tcx()), dest.llval, Disr::from(disr)); + }); + for (i, operand) in operands.iter().enumerate() { + let op = self.trans_operand(&bcx, operand); + // Do not generate stores and GEPis for zero-sized fields. + if !common::type_is_zero_size(bcx.ccx(), op.ty) { + let val = adt::MaybeSizedValue::sized(dest.llval); + let field_index = active_field_index.unwrap_or(i); + let lldest_i = adt::trans_field_ptr_builder(&bcx, + dest.ty.to_ty(bcx.tcx()), + val, disr, field_index); + self.store_operand(&bcx, lldest_i, op); + } + } + }, + _ => { + for (i, operand) in operands.iter().enumerate() { + let op = self.trans_operand(&bcx, operand); + // Do not generate stores and GEPis for zero-sized fields. + if !common::type_is_zero_size(bcx.ccx(), op.ty) { + // Note: perhaps this should be StructGep, but + // note that in some cases the values here will + // not be structs but arrays. + let dest = bcx.gepi(dest.llval, &[0, i]); + self.store_operand(&bcx, dest, op); + } + } + } + } + bcx + } + + mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => { + let outputs = outputs.iter().map(|output| { + let lvalue = self.trans_lvalue(&bcx, output); + (lvalue.llval, lvalue.ty.to_ty(bcx.tcx())) + }).collect(); + + let input_vals = inputs.iter().map(|input| { + self.trans_operand(&bcx, input).immediate() + }).collect(); + + bcx.with_block(|bcx| { + asm::trans_inline_asm(bcx, asm, outputs, input_vals); + }); + + bcx + } + + _ => { + assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue)); + let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc); + self.store_operand(&bcx, dest.llval, temp); + bcx + } + } + } + + pub fn trans_rvalue_operand(&mut self, + bcx: BlockAndBuilder<'bcx, 'tcx>, + rvalue: &mir::Rvalue<'tcx>, + debug_loc: DebugLoc) + -> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>) + { + assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue), + "cannot trans {:?} to operand", rvalue); + + match *rvalue { + mir::Rvalue::Cast(ref kind, ref source, cast_ty) => { + let operand = self.trans_operand(&bcx, source); + debug!("cast operand is {:?}", operand); + let cast_ty = bcx.monomorphize(&cast_ty); + + let val = match *kind { + mir::CastKind::ReifyFnPointer => { + match operand.ty.sty { + ty::TyFnDef(def_id, substs, _) => { + OperandValue::Immediate( + Callee::def(bcx.ccx(), def_id, substs) + .reify(bcx.ccx())) + } + _ => { + bug!("{} cannot be reified to a fn ptr", operand.ty) + } + } + } + mir::CastKind::UnsafeFnPointer => { + // this is a no-op at the LLVM level + operand.val + } + mir::CastKind::Unsize => { + // unsize targets other than to a fat pointer currently + // can't be operands. + assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty)); + + match operand.val { + OperandValue::Pair(lldata, llextra) => { + // unsize from a fat pointer - this is a + // "trait-object-to-supertrait" coercion, for + // example, + // &'a fmt::Debug+Send => &'a fmt::Debug, + // So we need to pointercast the base to ensure + // the types match up. + let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), cast_ty); + let lldata = bcx.pointercast(lldata, llcast_ty); + OperandValue::Pair(lldata, llextra) + } + OperandValue::Immediate(lldata) => { + // "standard" unsize + let (lldata, llextra) = bcx.with_block(|bcx| { + base::unsize_thin_ptr(bcx, lldata, + operand.ty, cast_ty) + }); + OperandValue::Pair(lldata, llextra) + } + OperandValue::Ref(_) => { + bug!("by-ref operand {:?} in trans_rvalue_operand", + operand); + } + } + } + mir::CastKind::Misc if common::type_is_fat_ptr(bcx.tcx(), operand.ty) => { + let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty); + let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty); + if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val { + if common::type_is_fat_ptr(bcx.tcx(), cast_ty) { + let ll_cft = ll_cast_ty.field_types(); + let ll_fft = ll_from_ty.field_types(); + let data_cast = bcx.pointercast(data_ptr, ll_cft[0]); + assert_eq!(ll_cft[1].kind(), ll_fft[1].kind()); + OperandValue::Pair(data_cast, meta_ptr) + } else { // cast to thin-ptr + // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and + // pointer-cast of that pointer to desired pointer type. + let llval = bcx.pointercast(data_ptr, ll_cast_ty); + OperandValue::Immediate(llval) + } + } else { + bug!("Unexpected non-Pair operand") + } + } + mir::CastKind::Misc => { + debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty)); + let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); + let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); + let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty); + let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty); + let (llval, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in { + let l = bcx.ccx().layout_of(operand.ty); + let discr = match operand.val { + OperandValue::Immediate(llval) => llval, + OperandValue::Ref(llptr) => { + bcx.with_block(|bcx| { + adt::trans_get_discr(bcx, operand.ty, llptr, None, true) + }) + } + OperandValue::Pair(..) => bug!("Unexpected Pair operand") + }; + let (signed, min, max) = match l { + &Layout::CEnum { signed, min, max, .. } => { + (signed, min, max) + } + _ => bug!("CEnum {:?} is not an enum", operand) + }; + + if max > min { + // We want `table[e as usize]` to not + // have bound checks, and this is the most + // convenient place to put the `assume`. + + base::call_assume(&bcx, bcx.icmp( + llvm::IntULE, + discr, + C_integral(common::val_ty(discr), max, false) + )) + } + + (discr, signed) + } else { + (operand.immediate(), operand.ty.is_signed()) + }; + + let newval = match (r_t_in, r_t_out) { + (CastTy::Int(_), CastTy::Int(_)) => { + let srcsz = ll_t_in.int_width(); + let dstsz = ll_t_out.int_width(); + if srcsz == dstsz { + bcx.bitcast(llval, ll_t_out) + } else if srcsz > dstsz { + bcx.trunc(llval, ll_t_out) + } else if signed { + bcx.sext(llval, ll_t_out) + } else { + bcx.zext(llval, ll_t_out) + } + } + (CastTy::Float, CastTy::Float) => { + let srcsz = ll_t_in.float_width(); + let dstsz = ll_t_out.float_width(); + if dstsz > srcsz { + bcx.fpext(llval, ll_t_out) + } else if srcsz > dstsz { + bcx.fptrunc(llval, ll_t_out) + } else { + llval + } + } + (CastTy::Ptr(_), CastTy::Ptr(_)) | + (CastTy::FnPtr, CastTy::Ptr(_)) | + (CastTy::RPtr(_), CastTy::Ptr(_)) => + bcx.pointercast(llval, ll_t_out), + (CastTy::Ptr(_), CastTy::Int(_)) | + (CastTy::FnPtr, CastTy::Int(_)) => + bcx.ptrtoint(llval, ll_t_out), + (CastTy::Int(_), CastTy::Ptr(_)) => + bcx.inttoptr(llval, ll_t_out), + (CastTy::Int(_), CastTy::Float) if signed => + bcx.sitofp(llval, ll_t_out), + (CastTy::Int(_), CastTy::Float) => + bcx.uitofp(llval, ll_t_out), + (CastTy::Float, CastTy::Int(IntTy::I)) => + bcx.fptosi(llval, ll_t_out), + (CastTy::Float, CastTy::Int(_)) => + bcx.fptoui(llval, ll_t_out), + _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty) + }; + OperandValue::Immediate(newval) + } + }; + let operand = OperandRef { + val: val, + ty: cast_ty + }; + (bcx, operand) + } + + mir::Rvalue::Ref(_, bk, ref lvalue) => { + let tr_lvalue = self.trans_lvalue(&bcx, lvalue); + + let ty = tr_lvalue.ty.to_ty(bcx.tcx()); + let ref_ty = bcx.tcx().mk_ref( + bcx.tcx().mk_region(ty::ReErased), + ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() } + ); + + // Note: lvalues are indirect, so storing the `llval` into the + // destination effectively creates a reference. + let operand = if common::type_is_sized(bcx.tcx(), ty) { + OperandRef { + val: OperandValue::Immediate(tr_lvalue.llval), + ty: ref_ty, + } + } else { + OperandRef { + val: OperandValue::Pair(tr_lvalue.llval, + tr_lvalue.llextra), + ty: ref_ty, + } + }; + (bcx, operand) + } + + mir::Rvalue::Len(ref lvalue) => { + let tr_lvalue = self.trans_lvalue(&bcx, lvalue); + let operand = OperandRef { + val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx())), + ty: bcx.tcx().types.usize, + }; + (bcx, operand) + } + + mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => { + let lhs = self.trans_operand(&bcx, lhs); + let rhs = self.trans_operand(&bcx, rhs); + let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) { + match (lhs.val, rhs.val) { + (OperandValue::Pair(lhs_addr, lhs_extra), + OperandValue::Pair(rhs_addr, rhs_extra)) => { + self.trans_fat_ptr_binop(&bcx, op, + lhs_addr, lhs_extra, + rhs_addr, rhs_extra, + lhs.ty) + } + _ => bug!() + } + + } else { + self.trans_scalar_binop(&bcx, op, + lhs.immediate(), rhs.immediate(), + lhs.ty) + }; + let operand = OperandRef { + val: OperandValue::Immediate(llresult), + ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty), + }; + (bcx, operand) + } + mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => { + let lhs = self.trans_operand(&bcx, lhs); + let rhs = self.trans_operand(&bcx, rhs); + let result = self.trans_scalar_checked_binop(&bcx, op, + lhs.immediate(), rhs.immediate(), + lhs.ty); + let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty); + let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool]); + let operand = OperandRef { + val: result, + ty: operand_ty + }; + + (bcx, operand) + } + + mir::Rvalue::UnaryOp(op, ref operand) => { + let operand = self.trans_operand(&bcx, operand); + let lloperand = operand.immediate(); + let is_float = operand.ty.is_fp(); + let llval = match op { + mir::UnOp::Not => bcx.not(lloperand), + mir::UnOp::Neg => if is_float { + bcx.fneg(lloperand) + } else { + bcx.neg(lloperand) + } + }; + (bcx, OperandRef { + val: OperandValue::Immediate(llval), + ty: operand.ty, + }) + } + + mir::Rvalue::Box(content_ty) => { + let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty); + let llty = type_of::type_of(bcx.ccx(), content_ty); + let llsize = machine::llsize_of(bcx.ccx(), llty); + let align = type_of::align_of(bcx.ccx(), content_ty); + let llalign = C_uint(bcx.ccx(), align); + let llty_ptr = llty.ptr_to(); + let box_ty = bcx.tcx().mk_box(content_ty); + let mut llval = None; + let bcx = bcx.map_block(|bcx| { + let Result { bcx, val } = base::malloc_raw_dyn(bcx, + llty_ptr, + box_ty, + llsize, + llalign, + debug_loc); + llval = Some(val); + bcx + }); + let operand = OperandRef { + val: OperandValue::Immediate(llval.unwrap()), + ty: box_ty, + }; + (bcx, operand) + } + + mir::Rvalue::Use(ref operand) => { + let operand = self.trans_operand(&bcx, operand); + (bcx, operand) + } + mir::Rvalue::Repeat(..) | + mir::Rvalue::Aggregate(..) | + mir::Rvalue::InlineAsm { .. } => { + bug!("cannot generate operand from rvalue {:?}", rvalue); + + } + } + } + + pub fn trans_scalar_binop(&mut self, + bcx: &BlockAndBuilder<'bcx, 'tcx>, + op: mir::BinOp, + lhs: ValueRef, + rhs: ValueRef, + input_ty: Ty<'tcx>) -> ValueRef { + let is_float = input_ty.is_fp(); + let is_signed = input_ty.is_signed(); + let is_nil = input_ty.is_nil(); + let is_bool = input_ty.is_bool(); + match op { + mir::BinOp::Add => if is_float { + bcx.fadd(lhs, rhs) + } else { + bcx.add(lhs, rhs) + }, + mir::BinOp::Sub => if is_float { + bcx.fsub(lhs, rhs) + } else { + bcx.sub(lhs, rhs) + }, + mir::BinOp::Mul => if is_float { + bcx.fmul(lhs, rhs) + } else { + bcx.mul(lhs, rhs) + }, + mir::BinOp::Div => if is_float { + bcx.fdiv(lhs, rhs) + } else if is_signed { + bcx.sdiv(lhs, rhs) + } else { + bcx.udiv(lhs, rhs) + }, + mir::BinOp::Rem => if is_float { + bcx.frem(lhs, rhs) + } else if is_signed { + bcx.srem(lhs, rhs) + } else { + bcx.urem(lhs, rhs) + }, + mir::BinOp::BitOr => bcx.or(lhs, rhs), + mir::BinOp::BitAnd => bcx.and(lhs, rhs), + mir::BinOp::BitXor => bcx.xor(lhs, rhs), + mir::BinOp::Shl => { + bcx.with_block(|bcx| { + common::build_unchecked_lshift(bcx, + lhs, + rhs, + DebugLoc::None) + }) + } + mir::BinOp::Shr => { + bcx.with_block(|bcx| { + common::build_unchecked_rshift(bcx, + input_ty, + lhs, + rhs, + DebugLoc::None) + }) + } + mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt | + mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil { + C_bool(bcx.ccx(), match op { + mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false, + mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true, + _ => unreachable!() + }) + } else if is_float { + bcx.fcmp( + base::bin_op_to_fcmp_predicate(op.to_hir_binop()), + lhs, rhs + ) + } else { + let (lhs, rhs) = if is_bool { + // FIXME(#36856) -- extend the bools into `i8` because + // LLVM's i1 comparisons are broken. + (bcx.zext(lhs, Type::i8(bcx.ccx())), + bcx.zext(rhs, Type::i8(bcx.ccx()))) + } else { + (lhs, rhs) + }; + + bcx.icmp( + base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), + lhs, rhs + ) + } + } + } + + pub fn trans_fat_ptr_binop(&mut self, + bcx: &BlockAndBuilder<'bcx, 'tcx>, + op: mir::BinOp, + lhs_addr: ValueRef, + lhs_extra: ValueRef, + rhs_addr: ValueRef, + rhs_extra: ValueRef, + _input_ty: Ty<'tcx>) + -> ValueRef { + match op { + mir::BinOp::Eq => { + bcx.and( + bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr), + bcx.icmp(llvm::IntEQ, lhs_extra, rhs_extra) + ) + } + mir::BinOp::Ne => { + bcx.or( + bcx.icmp(llvm::IntNE, lhs_addr, rhs_addr), + bcx.icmp(llvm::IntNE, lhs_extra, rhs_extra) + ) + } + mir::BinOp::Le | mir::BinOp::Lt | + mir::BinOp::Ge | mir::BinOp::Gt => { + // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1) + let (op, strict_op) = match op { + mir::BinOp::Lt => (llvm::IntULT, llvm::IntULT), + mir::BinOp::Le => (llvm::IntULE, llvm::IntULT), + mir::BinOp::Gt => (llvm::IntUGT, llvm::IntUGT), + mir::BinOp::Ge => (llvm::IntUGE, llvm::IntUGT), + _ => bug!(), + }; + + bcx.or( + bcx.icmp(strict_op, lhs_addr, rhs_addr), + bcx.and( + bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr), + bcx.icmp(op, lhs_extra, rhs_extra) + ) + ) + } + _ => { + bug!("unexpected fat ptr binop"); + } + } + } + + pub fn trans_scalar_checked_binop(&mut self, + bcx: &BlockAndBuilder<'bcx, 'tcx>, + op: mir::BinOp, + lhs: ValueRef, + rhs: ValueRef, + input_ty: Ty<'tcx>) -> OperandValue { + // This case can currently arise only from functions marked + // with #[rustc_inherit_overflow_checks] and inlined from + // another crate (mostly core::num generic/#[inline] fns), + // while the current crate doesn't use overflow checks. + if !bcx.ccx().check_overflow() { + let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty); + return OperandValue::Pair(val, C_bool(bcx.ccx(), false)); + } + + // First try performing the operation on constants, which + // will only succeed if both operands are constant. + // This is necessary to determine when an overflow Assert + // will always panic at runtime, and produce a warning. + if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) { + return OperandValue::Pair(val, C_bool(bcx.ccx(), of)); + } + + let (val, of) = match op { + // These are checked using intrinsics + mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => { + let oop = match op { + mir::BinOp::Add => OverflowOp::Add, + mir::BinOp::Sub => OverflowOp::Sub, + mir::BinOp::Mul => OverflowOp::Mul, + _ => unreachable!() + }; + let intrinsic = get_overflow_intrinsic(oop, bcx, input_ty); + let res = bcx.call(intrinsic, &[lhs, rhs], None); + + (bcx.extract_value(res, 0), + bcx.extract_value(res, 1)) + } + mir::BinOp::Shl | mir::BinOp::Shr => { + let lhs_llty = val_ty(lhs); + let rhs_llty = val_ty(rhs); + let invert_mask = bcx.with_block(|bcx| { + common::shift_mask_val(bcx, lhs_llty, rhs_llty, true) + }); + let outer_bits = bcx.and(rhs, invert_mask); + + let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty)); + let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty); + + (val, of) + } + _ => { + bug!("Operator `{:?}` is not a checkable operator", op) + } + }; + + OperandValue::Pair(val, of) + } +} + +pub fn rvalue_creates_operand<'bcx, 'tcx>(_mir: &mir::Mir<'tcx>, + _bcx: &BlockAndBuilder<'bcx, 'tcx>, + rvalue: &mir::Rvalue<'tcx>) -> bool { + match *rvalue { + mir::Rvalue::Ref(..) | + mir::Rvalue::Len(..) | + mir::Rvalue::Cast(..) | // (*) + mir::Rvalue::BinaryOp(..) | + mir::Rvalue::CheckedBinaryOp(..) | + mir::Rvalue::UnaryOp(..) | + mir::Rvalue::Box(..) | + mir::Rvalue::Use(..) => + true, + mir::Rvalue::Repeat(..) | + mir::Rvalue::Aggregate(..) | + mir::Rvalue::InlineAsm { .. } => + false, + } + + // (*) this is only true if the type is suitable +} + +#[derive(Copy, Clone)] +enum OverflowOp { + Add, Sub, Mul +} + +fn get_overflow_intrinsic(oop: OverflowOp, bcx: &BlockAndBuilder, ty: Ty) -> ValueRef { + use syntax::ast::IntTy::*; + use syntax::ast::UintTy::*; + use rustc::ty::{TyInt, TyUint}; + + let tcx = bcx.tcx(); + + let new_sty = match ty.sty { + TyInt(Is) => match &tcx.sess.target.target.target_pointer_width[..] { + "16" => TyInt(I16), + "32" => TyInt(I32), + "64" => TyInt(I64), + _ => panic!("unsupported target word size") + }, + TyUint(Us) => match &tcx.sess.target.target.target_pointer_width[..] { + "16" => TyUint(U16), + "32" => TyUint(U32), + "64" => TyUint(U64), + _ => panic!("unsupported target word size") + }, + ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(), + _ => panic!("tried to get overflow intrinsic for op applied to non-int type") + }; + + let name = match oop { + OverflowOp::Add => match new_sty { + TyInt(I8) => "llvm.sadd.with.overflow.i8", + TyInt(I16) => "llvm.sadd.with.overflow.i16", + TyInt(I32) => "llvm.sadd.with.overflow.i32", + TyInt(I64) => "llvm.sadd.with.overflow.i64", + + TyUint(U8) => "llvm.uadd.with.overflow.i8", + TyUint(U16) => "llvm.uadd.with.overflow.i16", + TyUint(U32) => "llvm.uadd.with.overflow.i32", + TyUint(U64) => "llvm.uadd.with.overflow.i64", + + _ => unreachable!(), + }, + OverflowOp::Sub => match new_sty { + TyInt(I8) => "llvm.ssub.with.overflow.i8", + TyInt(I16) => "llvm.ssub.with.overflow.i16", + TyInt(I32) => "llvm.ssub.with.overflow.i32", + TyInt(I64) => "llvm.ssub.with.overflow.i64", + + TyUint(U8) => "llvm.usub.with.overflow.i8", + TyUint(U16) => "llvm.usub.with.overflow.i16", + TyUint(U32) => "llvm.usub.with.overflow.i32", + TyUint(U64) => "llvm.usub.with.overflow.i64", + + _ => unreachable!(), + }, + OverflowOp::Mul => match new_sty { + TyInt(I8) => "llvm.smul.with.overflow.i8", + TyInt(I16) => "llvm.smul.with.overflow.i16", + TyInt(I32) => "llvm.smul.with.overflow.i32", + TyInt(I64) => "llvm.smul.with.overflow.i64", + + TyUint(U8) => "llvm.umul.with.overflow.i8", + TyUint(U16) => "llvm.umul.with.overflow.i16", + TyUint(U32) => "llvm.umul.with.overflow.i32", + TyUint(U64) => "llvm.umul.with.overflow.i64", + + _ => unreachable!(), + }, + }; + + bcx.ccx().get_intrinsic(&name) +} diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs new file mode 100644 index 0000000000000..296a0e8049e08 --- /dev/null +++ b/src/librustc_trans/mir/statement.rs @@ -0,0 +1,96 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::mir; + +use base; +use common::{self, BlockAndBuilder}; + +use super::MirContext; +use super::LocalRef; +use super::super::adt; +use super::super::disr::Disr; + +impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { + pub fn trans_statement(&mut self, + bcx: BlockAndBuilder<'bcx, 'tcx>, + statement: &mir::Statement<'tcx>) + -> BlockAndBuilder<'bcx, 'tcx> { + debug!("trans_statement(statement={:?})", statement); + + let debug_loc = self.debug_loc(statement.source_info); + debug_loc.apply_to_bcx(&bcx); + debug_loc.apply(bcx.fcx()); + match statement.kind { + mir::StatementKind::Assign(ref lvalue, ref rvalue) => { + if let mir::Lvalue::Local(index) = *lvalue { + match self.locals[index] { + LocalRef::Lvalue(tr_dest) => { + self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc) + } + LocalRef::Operand(None) => { + let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue, + debug_loc); + self.locals[index] = LocalRef::Operand(Some(operand)); + bcx + } + LocalRef::Operand(Some(_)) => { + let ty = self.monomorphized_lvalue_ty(lvalue); + + if !common::type_is_zero_size(bcx.ccx(), ty) { + span_bug!(statement.source_info.span, + "operand {:?} already assigned", + rvalue); + } else { + // If the type is zero-sized, it's already been set here, + // but we still need to make sure we translate the operand + self.trans_rvalue_operand(bcx, rvalue, debug_loc).0 + } + } + } + } else { + let tr_dest = self.trans_lvalue(&bcx, lvalue); + self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc) + } + } + mir::StatementKind::SetDiscriminant{ref lvalue, variant_index} => { + let ty = self.monomorphized_lvalue_ty(lvalue); + let lvalue_transed = self.trans_lvalue(&bcx, lvalue); + bcx.with_block(|bcx| + adt::trans_set_discr(bcx, + ty, + lvalue_transed.llval, + Disr::from(variant_index)) + ); + bcx + } + mir::StatementKind::StorageLive(ref lvalue) => { + self.trans_storage_liveness(bcx, lvalue, base::Lifetime::Start) + } + mir::StatementKind::StorageDead(ref lvalue) => { + self.trans_storage_liveness(bcx, lvalue, base::Lifetime::End) + } + mir::StatementKind::Nop => bcx, + } + } + + fn trans_storage_liveness(&self, + bcx: BlockAndBuilder<'bcx, 'tcx>, + lvalue: &mir::Lvalue<'tcx>, + intrinsic: base::Lifetime) + -> BlockAndBuilder<'bcx, 'tcx> { + if let mir::Lvalue::Local(index) = *lvalue { + if let LocalRef::Lvalue(tr_lval) = self.locals[index] { + intrinsic.call(&bcx, tr_lval.llval); + } + } + bcx + } +} diff --git a/src/librustc_trans/monomorphize.rs b/src/librustc_trans/monomorphize.rs new file mode 100644 index 0000000000000..8f05cc793ef22 --- /dev/null +++ b/src/librustc_trans/monomorphize.rs @@ -0,0 +1,104 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use common::*; +use rustc::hir::def_id::DefId; +use rustc::infer::TransNormalize; +use rustc::ty::fold::{TypeFolder, TypeFoldable}; +use rustc::ty::subst::{Subst, Substs}; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::util::ppaux; +use rustc::util::common::MemoizationMap; +use std::fmt; + +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub struct Instance<'tcx> { + pub def: DefId, + pub substs: &'tcx Substs<'tcx>, +} + +impl<'tcx> fmt::Display for Instance<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + ppaux::parameterized(f, &self.substs, self.def, &[]) + } +} + +impl<'tcx> Instance<'tcx> { + pub fn new(def_id: DefId, substs: &'tcx Substs<'tcx>) + -> Instance<'tcx> { + assert!(substs.regions().all(|&r| r == ty::ReErased)); + Instance { def: def_id, substs: substs } + } + pub fn mono<'a>(scx: &SharedCrateContext<'a, 'tcx>, def_id: DefId) -> Instance<'tcx> { + Instance::new(def_id, scx.empty_substs_for_def_id(def_id)) + } +} + +/// Monomorphizes a type from the AST by first applying the in-scope +/// substitutions and then normalizing any associated types. +pub fn apply_param_substs<'a, 'tcx, T>(scx: &SharedCrateContext<'a, 'tcx>, + param_substs: &Substs<'tcx>, + value: &T) + -> T + where T: TransNormalize<'tcx> +{ + let tcx = scx.tcx(); + debug!("apply_param_substs(param_substs={:?}, value={:?})", param_substs, value); + let substituted = value.subst(tcx, param_substs); + let substituted = scx.tcx().erase_regions(&substituted); + AssociatedTypeNormalizer::new(scx).fold(&substituted) +} + + +/// Returns the normalized type of a struct field +pub fn field_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_substs: &Substs<'tcx>, + f: &'tcx ty::FieldDef) + -> Ty<'tcx> +{ + tcx.normalize_associated_type(&f.ty(tcx, param_substs)) +} + +struct AssociatedTypeNormalizer<'a, 'b: 'a, 'gcx: 'b> { + shared: &'a SharedCrateContext<'b, 'gcx>, +} + +impl<'a, 'b, 'gcx> AssociatedTypeNormalizer<'a, 'b, 'gcx> { + fn new(shared: &'a SharedCrateContext<'b, 'gcx>) -> Self { + AssociatedTypeNormalizer { + shared: shared, + } + } + + fn fold>(&mut self, value: &T) -> T { + if !value.has_projection_types() { + value.clone() + } else { + value.fold_with(self) + } + } +} + +impl<'a, 'b, 'gcx> TypeFolder<'gcx, 'gcx> for AssociatedTypeNormalizer<'a, 'b, 'gcx> { + fn tcx<'c>(&'c self) -> TyCtxt<'c, 'gcx, 'gcx> { + self.shared.tcx() + } + + fn fold_ty(&mut self, ty: Ty<'gcx>) -> Ty<'gcx> { + if !ty.has_projection_types() { + ty + } else { + self.shared.project_cache().memoize(ty, || { + debug!("AssociatedTypeNormalizer: ty={:?}", ty); + self.shared.tcx().normalize_associated_type(&ty) + }) + } + } +} diff --git a/src/librustc_trans/partitioning.rs b/src/librustc_trans/partitioning.rs new file mode 100644 index 0000000000000..a36960993e471 --- /dev/null +++ b/src/librustc_trans/partitioning.rs @@ -0,0 +1,581 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Partitioning Codegen Units for Incremental Compilation +//! ====================================================== +//! +//! The task of this module is to take the complete set of translation items of +//! a crate and produce a set of codegen units from it, where a codegen unit +//! is a named set of (translation-item, linkage) pairs. That is, this module +//! decides which translation item appears in which codegen units with which +//! linkage. The following paragraphs describe some of the background on the +//! partitioning scheme. +//! +//! The most important opportunity for saving on compilation time with +//! incremental compilation is to avoid re-translating and re-optimizing code. +//! Since the unit of translation and optimization for LLVM is "modules" or, how +//! we call them "codegen units", the particulars of how much time can be saved +//! by incremental compilation are tightly linked to how the output program is +//! partitioned into these codegen units prior to passing it to LLVM -- +//! especially because we have to treat codegen units as opaque entities once +//! they are created: There is no way for us to incrementally update an existing +//! LLVM module and so we have to build any such module from scratch if it was +//! affected by some change in the source code. +//! +//! From that point of view it would make sense to maximize the number of +//! codegen units by, for example, putting each function into its own module. +//! That way only those modules would have to be re-compiled that were actually +//! affected by some change, minimizing the number of functions that could have +//! been re-used but just happened to be located in a module that is +//! re-compiled. +//! +//! However, since LLVM optimization does not work across module boundaries, +//! using such a highly granular partitioning would lead to very slow runtime +//! code since it would effectively prohibit inlining and other inter-procedure +//! optimizations. We want to avoid that as much as possible. +//! +//! Thus we end up with a trade-off: The bigger the codegen units, the better +//! LLVM's optimizer can do its work, but also the smaller the compilation time +//! reduction we get from incremental compilation. +//! +//! Ideally, we would create a partitioning such that there are few big codegen +//! units with few interdependencies between them. For now though, we use the +//! following heuristic to determine the partitioning: +//! +//! - There are two codegen units for every source-level module: +//! - One for "stable", that is non-generic, code +//! - One for more "volatile" code, i.e. monomorphized instances of functions +//! defined in that module +//! - Code for monomorphized instances of functions from external crates gets +//! placed into every codegen unit that uses that instance. +//! +//! In order to see why this heuristic makes sense, let's take a look at when a +//! codegen unit can get invalidated: +//! +//! 1. The most straightforward case is when the BODY of a function or global +//! changes. Then any codegen unit containing the code for that item has to be +//! re-compiled. Note that this includes all codegen units where the function +//! has been inlined. +//! +//! 2. The next case is when the SIGNATURE of a function or global changes. In +//! this case, all codegen units containing a REFERENCE to that item have to be +//! re-compiled. This is a superset of case 1. +//! +//! 3. The final and most subtle case is when a REFERENCE to a generic function +//! is added or removed somewhere. Even though the definition of the function +//! might be unchanged, a new REFERENCE might introduce a new monomorphized +//! instance of this function which has to be placed and compiled somewhere. +//! Conversely, when removing a REFERENCE, it might have been the last one with +//! that particular set of generic arguments and thus we have to remove it. +//! +//! From the above we see that just using one codegen unit per source-level +//! module is not such a good idea, since just adding a REFERENCE to some +//! generic item somewhere else would invalidate everything within the module +//! containing the generic item. The heuristic above reduces this detrimental +//! side-effect of references a little by at least not touching the non-generic +//! code of the module. +//! +//! As another optimization, monomorphized functions from external crates get +//! some special handling. Since we assume that the definition of such a +//! function changes rather infrequently compared to local items, we can just +//! instantiate external functions in every codegen unit where it is referenced +//! -- without having to fear that doing this will cause a lot of unnecessary +//! re-compilations. If such a reference is added or removed, the codegen unit +//! has to be re-translated anyway. +//! (Note that this only makes sense if external crates actually don't change +//! frequently. For certain multi-crate projects this might not be a valid +//! assumption). +//! +//! A Note on Inlining +//! ------------------ +//! As briefly mentioned above, in order for LLVM to be able to inline a +//! function call, the body of the function has to be available in the LLVM +//! module where the call is made. This has a few consequences for partitioning: +//! +//! - The partitioning algorithm has to take care of placing functions into all +//! codegen units where they should be available for inlining. It also has to +//! decide on the correct linkage for these functions. +//! +//! - The partitioning algorithm has to know which functions are likely to get +//! inlined, so it can distribute function instantiations accordingly. Since +//! there is no way of knowing for sure which functions LLVM will decide to +//! inline in the end, we apply a heuristic here: Only functions marked with +//! #[inline] and (as stated above) functions from external crates are +//! considered for inlining by the partitioner. The current implementation +//! will not try to determine if a function is likely to be inlined by looking +//! at the functions definition. +//! +//! Note though that as a side-effect of creating a codegen units per +//! source-level module, functions from the same module will be available for +//! inlining, even when they are not marked #[inline]. + +use collector::InliningMap; +use context::SharedCrateContext; +use llvm; +use monomorphize; +use rustc::dep_graph::{DepNode, WorkProductId}; +use rustc::hir::def_id::DefId; +use rustc::hir::map::DefPathData; +use rustc::session::config::NUMBERED_CODEGEN_UNIT_MARKER; +use rustc::ty::TyCtxt; +use rustc::ty::item_path::characteristic_def_id_of_type; +use std::cmp::Ordering; +use std::hash::{Hash, Hasher}; +use std::sync::Arc; +use std::collections::hash_map::DefaultHasher; +use symbol_map::SymbolMap; +use syntax::ast::NodeId; +use syntax::symbol::{Symbol, InternedString}; +use trans_item::TransItem; +use util::nodemap::{FxHashMap, FxHashSet}; + +pub enum PartitioningStrategy { + /// Generate one codegen unit per source-level module. + PerModule, + + /// Partition the whole crate into a fixed number of codegen units. + FixedUnitCount(usize) +} + +pub struct CodegenUnit<'tcx> { + /// A name for this CGU. Incremental compilation requires that + /// name be unique amongst **all** crates. Therefore, it should + /// contain something unique to this crate (e.g., a module path) + /// as well as the crate name and disambiguator. + name: InternedString, + + items: FxHashMap, llvm::Linkage>, +} + +impl<'tcx> CodegenUnit<'tcx> { + pub fn new(name: InternedString, + items: FxHashMap, llvm::Linkage>) + -> Self { + CodegenUnit { + name: name, + items: items, + } + } + + pub fn empty(name: InternedString) -> Self { + Self::new(name, FxHashMap()) + } + + pub fn contains_item(&self, item: &TransItem<'tcx>) -> bool { + self.items.contains_key(item) + } + + pub fn name(&self) -> &str { + &self.name + } + + pub fn items(&self) -> &FxHashMap, llvm::Linkage> { + &self.items + } + + pub fn work_product_id(&self) -> Arc { + Arc::new(WorkProductId(self.name().to_string())) + } + + pub fn work_product_dep_node(&self) -> DepNode { + DepNode::WorkProduct(self.work_product_id()) + } + + pub fn compute_symbol_name_hash(&self, tcx: TyCtxt, symbol_map: &SymbolMap) -> u64 { + let mut state = DefaultHasher::new(); + let all_items = self.items_in_deterministic_order(tcx, symbol_map); + for (item, _) in all_items { + let symbol_name = symbol_map.get(item).unwrap(); + symbol_name.hash(&mut state); + } + state.finish() + } + + pub fn items_in_deterministic_order(&self, + tcx: TyCtxt, + symbol_map: &SymbolMap) + -> Vec<(TransItem<'tcx>, llvm::Linkage)> { + let mut items: Vec<(TransItem<'tcx>, llvm::Linkage)> = + self.items.iter().map(|(item, linkage)| (*item, *linkage)).collect(); + + // The codegen tests rely on items being process in the same order as + // they appear in the file, so for local items, we sort by node_id first + items.sort_by(|&(trans_item1, _), &(trans_item2, _)| { + let node_id1 = local_node_id(tcx, trans_item1); + let node_id2 = local_node_id(tcx, trans_item2); + + match (node_id1, node_id2) { + (None, None) => { + let symbol_name1 = symbol_map.get(trans_item1).unwrap(); + let symbol_name2 = symbol_map.get(trans_item2).unwrap(); + symbol_name1.cmp(symbol_name2) + } + // In the following two cases we can avoid looking up the symbol + (None, Some(_)) => Ordering::Less, + (Some(_), None) => Ordering::Greater, + (Some(node_id1), Some(node_id2)) => { + let ordering = node_id1.cmp(&node_id2); + + if ordering != Ordering::Equal { + return ordering; + } + + let symbol_name1 = symbol_map.get(trans_item1).unwrap(); + let symbol_name2 = symbol_map.get(trans_item2).unwrap(); + symbol_name1.cmp(symbol_name2) + } + } + }); + + return items; + + fn local_node_id(tcx: TyCtxt, trans_item: TransItem) -> Option { + match trans_item { + TransItem::Fn(instance) => { + tcx.map.as_local_node_id(instance.def) + } + TransItem::Static(node_id) => Some(node_id), + TransItem::DropGlue(_) => None, + } + } + } +} + + +// Anything we can't find a proper codegen unit for goes into this. +const FALLBACK_CODEGEN_UNIT: &'static str = "__rustc_fallback_codegen_unit"; + +pub fn partition<'a, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>, + trans_items: I, + strategy: PartitioningStrategy, + inlining_map: &InliningMap<'tcx>) + -> Vec> + where I: Iterator> +{ + let tcx = scx.tcx(); + + // In the first step, we place all regular translation items into their + // respective 'home' codegen unit. Regular translation items are all + // functions and statics defined in the local crate. + let mut initial_partitioning = place_root_translation_items(scx, + trans_items); + + debug_dump(scx, "INITIAL PARTITONING:", initial_partitioning.codegen_units.iter()); + + // If the partitioning should produce a fixed count of codegen units, merge + // until that count is reached. + if let PartitioningStrategy::FixedUnitCount(count) = strategy { + merge_codegen_units(&mut initial_partitioning, count, &tcx.crate_name.as_str()); + + debug_dump(scx, "POST MERGING:", initial_partitioning.codegen_units.iter()); + } + + // In the next step, we use the inlining map to determine which addtional + // translation items have to go into each codegen unit. These additional + // translation items can be drop-glue, functions from external crates, and + // local functions the definition of which is marked with #[inline]. + let post_inlining = place_inlined_translation_items(initial_partitioning, + inlining_map); + + debug_dump(scx, "POST INLINING:", post_inlining.0.iter()); + + // Finally, sort by codegen unit name, so that we get deterministic results + let mut result = post_inlining.0; + result.sort_by(|cgu1, cgu2| { + (&cgu1.name[..]).cmp(&cgu2.name[..]) + }); + + result +} + +struct PreInliningPartitioning<'tcx> { + codegen_units: Vec>, + roots: FxHashSet>, +} + +struct PostInliningPartitioning<'tcx>(Vec>); + +fn place_root_translation_items<'a, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>, + trans_items: I) + -> PreInliningPartitioning<'tcx> + where I: Iterator> +{ + let tcx = scx.tcx(); + let mut roots = FxHashSet(); + let mut codegen_units = FxHashMap(); + + for trans_item in trans_items { + let is_root = !trans_item.is_instantiated_only_on_demand(tcx); + + if is_root { + let characteristic_def_id = characteristic_def_id_of_trans_item(scx, trans_item); + let is_volatile = trans_item.is_generic_fn(); + + let codegen_unit_name = match characteristic_def_id { + Some(def_id) => compute_codegen_unit_name(tcx, def_id, is_volatile), + None => Symbol::intern(FALLBACK_CODEGEN_UNIT).as_str(), + }; + + let make_codegen_unit = || { + CodegenUnit::empty(codegen_unit_name.clone()) + }; + + let mut codegen_unit = codegen_units.entry(codegen_unit_name.clone()) + .or_insert_with(make_codegen_unit); + + let linkage = match trans_item.explicit_linkage(tcx) { + Some(explicit_linkage) => explicit_linkage, + None => { + match trans_item { + TransItem::Static(..) => llvm::ExternalLinkage, + TransItem::DropGlue(..) => unreachable!(), + // Is there any benefit to using ExternalLinkage?: + TransItem::Fn(ref instance) => { + if instance.substs.types().next().is_none() { + // This is a non-generic functions, we always + // make it visible externally on the chance that + // it might be used in another codegen unit. + // Later on base::internalize_symbols() will + // assign "internal" linkage to those symbols + // that are not referenced from other codegen + // units (and are not publicly visible). + llvm::ExternalLinkage + } else { + // In the current setup, generic functions cannot + // be roots. + unreachable!() + } + } + } + } + }; + + codegen_unit.items.insert(trans_item, linkage); + roots.insert(trans_item); + } + } + + // always ensure we have at least one CGU; otherwise, if we have a + // crate with just types (for example), we could wind up with no CGU + if codegen_units.is_empty() { + let codegen_unit_name = Symbol::intern(FALLBACK_CODEGEN_UNIT).as_str(); + codegen_units.entry(codegen_unit_name.clone()) + .or_insert_with(|| CodegenUnit::empty(codegen_unit_name.clone())); + } + + PreInliningPartitioning { + codegen_units: codegen_units.into_iter() + .map(|(_, codegen_unit)| codegen_unit) + .collect(), + roots: roots, + } +} + +fn merge_codegen_units<'tcx>(initial_partitioning: &mut PreInliningPartitioning<'tcx>, + target_cgu_count: usize, + crate_name: &str) { + assert!(target_cgu_count >= 1); + let codegen_units = &mut initial_partitioning.codegen_units; + + // Merge the two smallest codegen units until the target size is reached. + // Note that "size" is estimated here rather inaccurately as the number of + // translation items in a given unit. This could be improved on. + while codegen_units.len() > target_cgu_count { + // Sort small cgus to the back + codegen_units.sort_by_key(|cgu| -(cgu.items.len() as i64)); + let smallest = codegen_units.pop().unwrap(); + let second_smallest = codegen_units.last_mut().unwrap(); + + for (k, v) in smallest.items.into_iter() { + second_smallest.items.insert(k, v); + } + } + + for (index, cgu) in codegen_units.iter_mut().enumerate() { + cgu.name = numbered_codegen_unit_name(crate_name, index); + } + + // If the initial partitioning contained less than target_cgu_count to begin + // with, we won't have enough codegen units here, so add a empty units until + // we reach the target count + while codegen_units.len() < target_cgu_count { + let index = codegen_units.len(); + codegen_units.push( + CodegenUnit::empty(numbered_codegen_unit_name(crate_name, index))); + } +} + +fn place_inlined_translation_items<'tcx>(initial_partitioning: PreInliningPartitioning<'tcx>, + inlining_map: &InliningMap<'tcx>) + -> PostInliningPartitioning<'tcx> { + let mut new_partitioning = Vec::new(); + + for codegen_unit in &initial_partitioning.codegen_units[..] { + // Collect all items that need to be available in this codegen unit + let mut reachable = FxHashSet(); + for root in codegen_unit.items.keys() { + follow_inlining(*root, inlining_map, &mut reachable); + } + + let mut new_codegen_unit = + CodegenUnit::empty(codegen_unit.name.clone()); + + // Add all translation items that are not already there + for trans_item in reachable { + if let Some(linkage) = codegen_unit.items.get(&trans_item) { + // This is a root, just copy it over + new_codegen_unit.items.insert(trans_item, *linkage); + } else if initial_partitioning.roots.contains(&trans_item) { + // This item will be instantiated in some other codegen unit, + // so we just add it here with AvailableExternallyLinkage + // FIXME(mw): I have not seen it happening yet but having + // available_externally here could potentially lead + // to the same problem with exception handling tables + // as in the case below. + new_codegen_unit.items.insert(trans_item, + llvm::AvailableExternallyLinkage); + } else if trans_item.is_from_extern_crate() && !trans_item.is_generic_fn() { + // FIXME(mw): It would be nice if we could mark these as + // `AvailableExternallyLinkage`, since they should have + // been instantiated in the extern crate. But this + // sometimes leads to crashes on Windows because LLVM + // does not handle exception handling table instantiation + // reliably in that case. + new_codegen_unit.items.insert(trans_item, llvm::InternalLinkage); + } else { + // We can't be sure if this will also be instantiated + // somewhere else, so we add an instance here with + // InternalLinkage so we don't get any conflicts. + new_codegen_unit.items.insert(trans_item, + llvm::InternalLinkage); + } + } + + new_partitioning.push(new_codegen_unit); + } + + return PostInliningPartitioning(new_partitioning); + + fn follow_inlining<'tcx>(trans_item: TransItem<'tcx>, + inlining_map: &InliningMap<'tcx>, + visited: &mut FxHashSet>) { + if !visited.insert(trans_item) { + return; + } + + inlining_map.with_inlining_candidates(trans_item, |target| { + follow_inlining(target, inlining_map, visited); + }); + } +} + +fn characteristic_def_id_of_trans_item<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, + trans_item: TransItem<'tcx>) + -> Option { + let tcx = scx.tcx(); + match trans_item { + TransItem::Fn(instance) => { + // If this is a method, we want to put it into the same module as + // its self-type. If the self-type does not provide a characteristic + // DefId, we use the location of the impl after all. + + if tcx.trait_of_item(instance.def).is_some() { + let self_ty = instance.substs.type_at(0); + // This is an implementation of a trait method. + return characteristic_def_id_of_type(self_ty).or(Some(instance.def)); + } + + if let Some(impl_def_id) = tcx.impl_of_method(instance.def) { + // This is a method within an inherent impl, find out what the + // self-type is: + let impl_self_ty = tcx.item_type(impl_def_id); + let impl_self_ty = tcx.erase_regions(&impl_self_ty); + let impl_self_ty = monomorphize::apply_param_substs(scx, + instance.substs, + &impl_self_ty); + + if let Some(def_id) = characteristic_def_id_of_type(impl_self_ty) { + return Some(def_id); + } + } + + Some(instance.def) + } + TransItem::DropGlue(dg) => characteristic_def_id_of_type(dg.ty()), + TransItem::Static(node_id) => Some(tcx.map.local_def_id(node_id)), + } +} + +fn compute_codegen_unit_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId, + volatile: bool) + -> InternedString { + // Unfortunately we cannot just use the `ty::item_path` infrastructure here + // because we need paths to modules and the DefIds of those are not + // available anymore for external items. + let mut mod_path = String::with_capacity(64); + + let def_path = tcx.def_path(def_id); + mod_path.push_str(&tcx.crate_name(def_path.krate).as_str()); + + for part in tcx.def_path(def_id) + .data + .iter() + .take_while(|part| { + match part.data { + DefPathData::Module(..) => true, + _ => false, + } + }) { + mod_path.push_str("-"); + mod_path.push_str(&part.data.as_interned_str()); + } + + if volatile { + mod_path.push_str(".volatile"); + } + + return Symbol::intern(&mod_path[..]).as_str(); +} + +fn numbered_codegen_unit_name(crate_name: &str, index: usize) -> InternedString { + Symbol::intern(&format!("{}{}{}", crate_name, NUMBERED_CODEGEN_UNIT_MARKER, index)).as_str() +} + +fn debug_dump<'a, 'b, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>, + label: &str, + cgus: I) + where I: Iterator>, + 'tcx: 'a + 'b +{ + if cfg!(debug_assertions) { + debug!("{}", label); + for cgu in cgus { + let symbol_map = SymbolMap::build(scx, cgu.items + .iter() + .map(|(&trans_item, _)| trans_item)); + debug!("CodegenUnit {}:", cgu.name); + + for (trans_item, linkage) in &cgu.items { + let symbol_name = symbol_map.get_or_compute(scx, *trans_item); + let symbol_hash_start = symbol_name.rfind('h'); + let symbol_hash = symbol_hash_start.map(|i| &symbol_name[i ..]) + .unwrap_or(""); + + debug!(" - {} [{:?}] [{}]", + trans_item.to_string(scx.tcx()), + linkage, + symbol_hash); + } + + debug!(""); + } + } +} diff --git a/src/librustc_trans/save/dump_csv.rs b/src/librustc_trans/save/dump_csv.rs deleted file mode 100644 index c34013a7bbbb1..0000000000000 --- a/src/librustc_trans/save/dump_csv.rs +++ /dev/null @@ -1,1216 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Output a CSV file containing the output from rustc's analysis. The data is -//! primarily designed to be used as input to the DXR tool, specifically its -//! Rust plugin. It could also be used by IDEs or other code browsing, search, or -//! cross-referencing tools. -//! -//! Dumping the analysis is implemented by walking the AST and getting a bunch of -//! info out from all over the place. We use Def IDs to identify objects. The -//! tricky part is getting syntactic (span, source text) and semantic (reference -//! Def IDs) information for parts of expressions which the compiler has discarded. -//! E.g., in a path `foo::bar::baz`, the compiler only keeps a span for the whole -//! path and a reference to `baz`, but we want spans and references for all three -//! idents. -//! -//! SpanUtils is used to manipulate spans. In particular, to extract sub-spans -//! from spans (e.g., the span for `bar` from the above example path). -//! Recorder is used for recording the output in csv format. FmtStrs separates -//! the format of the output away from extracting it from the compiler. -//! DumpCsvVisitor walks the AST and processes it. - - -use super::{escape, generated_code, recorder, SaveContext, PathCollector, Data}; - -use session::Session; - -use middle::def; -use middle::def_id::DefId; -use middle::ty; - -use std::fs::File; - -use syntax::ast::{self, NodeId}; -use syntax::codemap::*; -use syntax::parse::token::{self, keywords}; -use syntax::visit::{self, Visitor}; -use syntax::print::pprust::{path_to_string, ty_to_string}; -use syntax::ptr::P; - -use rustc_front::lowering::{lower_expr, LoweringContext}; - -use super::span_utils::SpanUtils; -use super::recorder::{Recorder, FmtStrs}; - -macro_rules! down_cast_data { - ($id:ident, $kind:ident, $this:ident, $sp:expr) => { - let $id = if let super::Data::$kind(data) = $id { - data - } else { - $this.sess.span_bug($sp, &format!("unexpected data kind: {:?}", $id)); - } - }; -} - -pub struct DumpCsvVisitor<'l, 'tcx: 'l> { - save_ctxt: SaveContext<'l, 'tcx>, - sess: &'l Session, - tcx: &'l ty::ctxt<'tcx>, - analysis: &'l ty::CrateAnalysis<'l>, - - span: SpanUtils<'l>, - fmt: FmtStrs<'l, 'tcx>, - - cur_scope: NodeId, -} - -impl <'l, 'tcx> DumpCsvVisitor<'l, 'tcx> { - pub fn new(tcx: &'l ty::ctxt<'tcx>, - lcx: &'l LoweringContext<'l>, - analysis: &'l ty::CrateAnalysis<'l>, - output_file: Box) - -> DumpCsvVisitor<'l, 'tcx> { - let span_utils = SpanUtils::new(&tcx.sess); - DumpCsvVisitor { - sess: &tcx.sess, - tcx: tcx, - save_ctxt: SaveContext::from_span_utils(tcx, lcx, span_utils.clone()), - analysis: analysis, - span: span_utils.clone(), - fmt: FmtStrs::new(box Recorder { - out: output_file, - dump_spans: false, - }, - span_utils, - tcx), - cur_scope: 0, - } - } - - fn nest(&mut self, scope_id: NodeId, f: F) - where F: FnOnce(&mut DumpCsvVisitor<'l, 'tcx>) - { - let parent_scope = self.cur_scope; - self.cur_scope = scope_id; - f(self); - self.cur_scope = parent_scope; - } - - pub fn dump_crate_info(&mut self, name: &str, krate: &ast::Crate) { - let source_file = self.tcx.sess.local_crate_source_file.as_ref(); - let crate_root = match source_file { - Some(source_file) => match source_file.file_name() { - Some(_) => source_file.parent().unwrap().display().to_string(), - None => source_file.display().to_string(), - }, - None => "".to_owned(), - }; - - // The current crate. - self.fmt.crate_str(krate.span, name, &crate_root); - - // Dump info about all the external crates referenced from this crate. - for c in &self.save_ctxt.get_external_crates() { - self.fmt.external_crate_str(krate.span, &c.name, c.number); - } - self.fmt.recorder.record("end_external_crates\n"); - } - - // Return all non-empty prefixes of a path. - // For each prefix, we return the span for the last segment in the prefix and - // a str representation of the entire prefix. - fn process_path_prefixes(&self, path: &ast::Path) -> Vec<(Span, String)> { - let spans = self.span.spans_for_path_segments(path); - - // Paths to enums seem to not match their spans - the span includes all the - // variants too. But they seem to always be at the end, so I hope we can cope with - // always using the first ones. So, only error out if we don't have enough spans. - // What could go wrong...? - if spans.len() < path.segments.len() { - error!("Mis-calculated spans for path '{}'. Found {} spans, expected {}. Found spans:", - path_to_string(path), - spans.len(), - path.segments.len()); - for s in &spans { - let loc = self.sess.codemap().lookup_char_pos(s.lo); - error!(" '{}' in {}, line {}", - self.span.snippet(*s), - loc.file.name, - loc.line); - } - return vec!(); - } - - let mut result: Vec<(Span, String)> = vec!(); - - let mut segs = vec!(); - for (i, (seg, span)) in path.segments.iter().zip(&spans).enumerate() { - segs.push(seg.clone()); - let sub_path = ast::Path { - span: *span, // span for the last segment - global: path.global, - segments: segs, - }; - let qualname = if i == 0 && path.global { - format!("::{}", path_to_string(&sub_path)) - } else { - path_to_string(&sub_path) - }; - result.push((*span, qualname)); - segs = sub_path.segments; - } - - result - } - - // The global arg allows us to override the global-ness of the path (which - // actually means 'does the path start with `::`', rather than 'is the path - // semantically global). We use the override for `use` imports (etc.) where - // the syntax is non-global, but the semantics are global. - fn write_sub_paths(&mut self, path: &ast::Path, global: bool) { - let sub_paths = self.process_path_prefixes(path); - for (i, &(ref span, ref qualname)) in sub_paths.iter().enumerate() { - let qualname = if i == 0 && global && !path.global { - format!("::{}", qualname) - } else { - qualname.clone() - }; - self.fmt.sub_mod_ref_str(path.span, *span, &qualname, self.cur_scope); - } - } - - // As write_sub_paths, but does not process the last ident in the path (assuming it - // will be processed elsewhere). See note on write_sub_paths about global. - fn write_sub_paths_truncated(&mut self, path: &ast::Path, global: bool) { - let sub_paths = self.process_path_prefixes(path); - let len = sub_paths.len(); - if len <= 1 { - return; - } - - let sub_paths = &sub_paths[..len-1]; - for (i, &(ref span, ref qualname)) in sub_paths.iter().enumerate() { - let qualname = if i == 0 && global && !path.global { - format!("::{}", qualname) - } else { - qualname.clone() - }; - self.fmt.sub_mod_ref_str(path.span, *span, &qualname, self.cur_scope); - } - } - - // As write_sub_paths, but expects a path of the form module_path::trait::method - // Where trait could actually be a struct too. - fn write_sub_path_trait_truncated(&mut self, path: &ast::Path) { - let sub_paths = self.process_path_prefixes(path); - let len = sub_paths.len(); - if len <= 1 { - return; - } - let sub_paths = &sub_paths[.. (len-1)]; - - // write the trait part of the sub-path - let (ref span, ref qualname) = sub_paths[len-2]; - self.fmt.sub_type_ref_str(path.span, *span, &qualname); - - // write the other sub-paths - if len <= 2 { - return; - } - let sub_paths = &sub_paths[..len-2]; - for &(ref span, ref qualname) in sub_paths { - self.fmt.sub_mod_ref_str(path.span, *span, &qualname, self.cur_scope); - } - } - - // looks up anything, not just a type - fn lookup_type_ref(&self, ref_id: NodeId) -> Option { - if !self.tcx.def_map.borrow().contains_key(&ref_id) { - self.sess.bug(&format!("def_map has no key for {} in lookup_type_ref", - ref_id)); - } - let def = self.tcx.def_map.borrow().get(&ref_id).unwrap().full_def(); - match def { - def::DefPrimTy(..) => None, - def::DefSelfTy(..) => None, - _ => Some(def.def_id()), - } - } - - fn lookup_def_kind(&self, ref_id: NodeId, span: Span) -> Option { - let def_map = self.tcx.def_map.borrow(); - if !def_map.contains_key(&ref_id) { - self.sess.span_bug(span, - &format!("def_map has no key for {} in lookup_def_kind", - ref_id)); - } - let def = def_map.get(&ref_id).unwrap().full_def(); - match def { - def::DefMod(_) | - def::DefForeignMod(_) => Some(recorder::ModRef), - def::DefStruct(_) => Some(recorder::TypeRef), - def::DefTy(..) | - def::DefAssociatedTy(..) | - def::DefTrait(_) => Some(recorder::TypeRef), - def::DefStatic(_, _) | - def::DefConst(_) | - def::DefAssociatedConst(..) | - def::DefLocal(..) | - def::DefVariant(_, _, _) | - def::DefUpvar(..) => Some(recorder::VarRef), - - def::DefFn(..) => Some(recorder::FnRef), - - def::DefSelfTy(..) | - def::DefLabel(_) | - def::DefTyParam(..) | - def::DefMethod(..) | - def::DefPrimTy(_) | - def::DefErr => { - self.sess.span_bug(span, - &format!("lookup_def_kind for unexpected item: {:?}", def)); - } - } - } - - fn process_formals(&mut self, formals: &Vec, qualname: &str) { - for arg in formals { - self.visit_pat(&arg.pat); - let mut collector = PathCollector::new(); - collector.visit_pat(&arg.pat); - let span_utils = self.span.clone(); - for &(id, ref p, _, _) in &collector.collected_paths { - let typ = self.tcx.node_types().get(&id).unwrap().to_string(); - // get the span only for the name of the variable (I hope the path is only ever a - // variable name, but who knows?) - self.fmt.formal_str(p.span, - span_utils.span_for_last_ident(p.span), - id, - qualname, - &path_to_string(p), - &typ); - } - } - } - - fn process_method(&mut self, - sig: &ast::MethodSig, - body: Option<&ast::Block>, - id: ast::NodeId, - name: ast::Name, - span: Span) { - if generated_code(span) { - return; - } - - debug!("process_method: {}:{}", id, name); - - let method_data = self.save_ctxt.get_method_data(id, name, span); - - if body.is_some() { - self.fmt.method_str(span, - Some(method_data.span), - method_data.id, - &method_data.qualname, - method_data.declaration, - method_data.scope); - self.process_formals(&sig.decl.inputs, &method_data.qualname); - } else { - self.fmt.method_decl_str(span, - Some(method_data.span), - method_data.id, - &method_data.qualname, - method_data.scope); - } - - // walk arg and return types - for arg in &sig.decl.inputs { - self.visit_ty(&arg.ty); - } - - if let ast::Return(ref ret_ty) = sig.decl.output { - self.visit_ty(ret_ty); - } - - // walk the fn body - if let Some(body) = body { - self.nest(id, |v| v.visit_block(body)); - } - - self.process_generic_params(&sig.generics, span, &method_data.qualname, id); - } - - fn process_trait_ref(&mut self, trait_ref: &ast::TraitRef) { - let trait_ref_data = self.save_ctxt.get_trait_ref_data(trait_ref, self.cur_scope); - if let Some(trait_ref_data) = trait_ref_data { - self.fmt.ref_str(recorder::TypeRef, - trait_ref.path.span, - Some(trait_ref_data.span), - trait_ref_data.ref_id, - trait_ref_data.scope); - visit::walk_path(self, &trait_ref.path); - } - } - - fn process_struct_field_def(&mut self, field: &ast::StructField, parent_id: NodeId) { - let field_data = self.save_ctxt.get_field_data(field, parent_id); - if let Some(field_data) = field_data { - self.fmt.field_str(field.span, - Some(field_data.span), - field_data.id, - &field_data.name, - &field_data.qualname, - &field_data.type_value, - field_data.scope); - } - } - - // Dump generic params bindings, then visit_generics - fn process_generic_params(&mut self, - generics: &ast::Generics, - full_span: Span, - prefix: &str, - id: NodeId) { - // We can't only use visit_generics since we don't have spans for param - // bindings, so we reparse the full_span to get those sub spans. - // However full span is the entire enum/fn/struct block, so we only want - // the first few to match the number of generics we're looking for. - let param_sub_spans = self.span.spans_for_ty_params(full_span, - (generics.ty_params.len() as isize)); - for (param, param_ss) in generics.ty_params.iter().zip(param_sub_spans) { - // Append $id to name to make sure each one is unique - let name = format!("{}::{}${}", - prefix, - escape(self.span.snippet(param_ss)), - id); - self.fmt.typedef_str(full_span, Some(param_ss), param.id, &name, ""); - } - self.visit_generics(generics); - } - - fn process_fn(&mut self, - item: &ast::Item, - decl: &ast::FnDecl, - ty_params: &ast::Generics, - body: &ast::Block) { - let fn_data = self.save_ctxt.get_item_data(item); - down_cast_data!(fn_data, FunctionData, self, item.span); - self.fmt.fn_str(item.span, - Some(fn_data.span), - fn_data.id, - &fn_data.qualname, - fn_data.scope); - - - self.process_formals(&decl.inputs, &fn_data.qualname); - self.process_generic_params(ty_params, item.span, &fn_data.qualname, item.id); - - for arg in &decl.inputs { - self.visit_ty(&arg.ty); - } - - if let ast::Return(ref ret_ty) = decl.output { - self.visit_ty(&ret_ty); - } - - self.nest(item.id, |v| v.visit_block(&body)); - } - - fn process_static_or_const_item(&mut self, item: &ast::Item, typ: &ast::Ty, expr: &ast::Expr) { - let var_data = self.save_ctxt.get_item_data(item); - down_cast_data!(var_data, VariableData, self, item.span); - self.fmt.static_str(item.span, - Some(var_data.span), - var_data.id, - &var_data.name, - &var_data.qualname, - &var_data.value, - &var_data.type_value, - var_data.scope); - - self.visit_ty(&typ); - self.visit_expr(expr); - } - - fn process_const(&mut self, - id: ast::NodeId, - name: ast::Name, - span: Span, - typ: &ast::Ty, - expr: &ast::Expr) { - let qualname = format!("::{}", self.tcx.map.path_to_string(id)); - - let sub_span = self.span.sub_span_after_keyword(span, keywords::Const); - - self.fmt.static_str(span, - sub_span, - id, - &name.as_str(), - &qualname, - &self.span.snippet(expr.span), - &ty_to_string(&*typ), - self.cur_scope); - - // walk type and init value - self.visit_ty(typ); - self.visit_expr(expr); - } - - fn process_struct(&mut self, - item: &ast::Item, - def: &ast::VariantData, - ty_params: &ast::Generics) { - let qualname = format!("::{}", self.tcx.map.path_to_string(item.id)); - - let val = self.span.snippet(item.span); - let sub_span = self.span.sub_span_after_keyword(item.span, keywords::Struct); - self.fmt.struct_str(item.span, - sub_span, - item.id, - def.id(), - &qualname, - self.cur_scope, - &val); - - // fields - for field in def.fields() { - self.process_struct_field_def(field, item.id); - self.visit_ty(&field.node.ty); - } - - self.process_generic_params(ty_params, item.span, &qualname, item.id); - } - - fn process_enum(&mut self, - item: &ast::Item, - enum_definition: &ast::EnumDef, - ty_params: &ast::Generics) { - let enum_data = self.save_ctxt.get_item_data(item); - down_cast_data!(enum_data, EnumData, self, item.span); - self.fmt.enum_str(item.span, - Some(enum_data.span), - enum_data.id, - &enum_data.qualname, - enum_data.scope, - &enum_data.value); - - for variant in &enum_definition.variants { - let name = &variant.node.name.name.as_str(); - let mut qualname = enum_data.qualname.clone(); - qualname.push_str("::"); - qualname.push_str(name); - let val = self.span.snippet(variant.span); - - self.fmt.struct_variant_str(variant.span, - self.span.span_for_first_ident(variant.span), - variant.node.data.id(), - variant.node.data.id(), - &qualname, - &enum_data.qualname, - &val, - enum_data.id); - - for field in variant.node.data.fields() { - self.process_struct_field_def(field, variant.node.data.id()); - self.visit_ty(&*field.node.ty); - } - } - self.process_generic_params(ty_params, item.span, &enum_data.qualname, enum_data.id); - } - - fn process_impl(&mut self, - item: &ast::Item, - type_parameters: &ast::Generics, - trait_ref: &Option, - typ: &ast::Ty, - impl_items: &[P]) { - let impl_data = self.save_ctxt.get_item_data(item); - down_cast_data!(impl_data, ImplData, self, item.span); - match impl_data.self_ref { - Some(ref self_ref) => { - self.fmt.ref_str(recorder::TypeRef, - item.span, - Some(self_ref.span), - self_ref.ref_id, - self_ref.scope); - } - None => { - self.visit_ty(&typ); - } - } - if let Some(ref trait_ref_data) = impl_data.trait_ref { - self.fmt.ref_str(recorder::TypeRef, - item.span, - Some(trait_ref_data.span), - trait_ref_data.ref_id, - trait_ref_data.scope); - visit::walk_path(self, &trait_ref.as_ref().unwrap().path); - } - - self.fmt.impl_str(item.span, - Some(impl_data.span), - impl_data.id, - impl_data.self_ref.map(|data| data.ref_id), - impl_data.trait_ref.map(|data| data.ref_id), - impl_data.scope); - - self.process_generic_params(type_parameters, item.span, "", item.id); - for impl_item in impl_items { - self.visit_impl_item(impl_item); - } - } - - fn process_trait(&mut self, - item: &ast::Item, - generics: &ast::Generics, - trait_refs: &ast::TyParamBounds, - methods: &[P]) { - let qualname = format!("::{}", self.tcx.map.path_to_string(item.id)); - let val = self.span.snippet(item.span); - let sub_span = self.span.sub_span_after_keyword(item.span, keywords::Trait); - self.fmt.trait_str(item.span, - sub_span, - item.id, - &qualname, - self.cur_scope, - &val); - - // super-traits - for super_bound in trait_refs.iter() { - let trait_ref = match *super_bound { - ast::TraitTyParamBound(ref trait_ref, _) => { - trait_ref - } - ast::RegionTyParamBound(..) => { - continue; - } - }; - - let trait_ref = &trait_ref.trait_ref; - match self.lookup_type_ref(trait_ref.ref_id) { - Some(id) => { - let sub_span = self.span.sub_span_for_type_name(trait_ref.path.span); - self.fmt.ref_str(recorder::TypeRef, - trait_ref.path.span, - sub_span, - id, - self.cur_scope); - self.fmt.inherit_str(trait_ref.path.span, sub_span, id, item.id); - } - None => (), - } - } - - // walk generics and methods - self.process_generic_params(generics, item.span, &qualname, item.id); - for method in methods { - self.visit_trait_item(method) - } - } - - // `item` is the module in question, represented as an item. - fn process_mod(&mut self, item: &ast::Item) { - let mod_data = self.save_ctxt.get_item_data(item); - down_cast_data!(mod_data, ModData, self, item.span); - self.fmt.mod_str(item.span, - Some(mod_data.span), - mod_data.id, - &mod_data.qualname, - mod_data.scope, - &mod_data.filename); - } - - fn process_path(&mut self, id: NodeId, path: &ast::Path, ref_kind: Option) { - if generated_code(path.span) { - return; - } - - let path_data = self.save_ctxt.get_path_data(id, path); - let path_data = match path_data { - Some(pd) => pd, - None => { - self.tcx.sess.span_bug(path.span, - &format!("Unexpected def kind while looking up path in \ - `{}`", - self.span.snippet(path.span))) - } - }; - match path_data { - Data::VariableRefData(ref vrd) => { - self.fmt.ref_str(ref_kind.unwrap_or(recorder::VarRef), - path.span, - Some(vrd.span), - vrd.ref_id, - vrd.scope); - - } - Data::TypeRefData(ref trd) => { - self.fmt.ref_str(recorder::TypeRef, - path.span, - Some(trd.span), - trd.ref_id, - trd.scope); - } - Data::MethodCallData(ref mcd) => { - self.fmt.meth_call_str(path.span, - Some(mcd.span), - mcd.ref_id, - mcd.decl_id, - mcd.scope); - } - Data::FunctionCallData(fcd) => { - self.fmt.fn_call_str(path.span, Some(fcd.span), fcd.ref_id, fcd.scope); - } - _ => { - self.sess.span_bug(path.span, - &format!("Unexpected data: {:?}", path_data)); - } - } - - // Modules or types in the path prefix. - let def_map = self.tcx.def_map.borrow(); - let def = def_map.get(&id).unwrap().full_def(); - match def { - def::DefMethod(did) => { - let ti = self.tcx.impl_or_trait_item(did); - if let ty::MethodTraitItem(m) = ti { - if m.explicit_self == ty::ExplicitSelfCategory::Static { - self.write_sub_path_trait_truncated(path); - } - } - } - def::DefLocal(..) | - def::DefStatic(_,_) | - def::DefConst(..) | - def::DefAssociatedConst(..) | - def::DefStruct(_) | - def::DefVariant(..) | - def::DefFn(..) => self.write_sub_paths_truncated(path, false), - _ => {} - } - } - - fn process_struct_lit(&mut self, - ex: &ast::Expr, - path: &ast::Path, - fields: &Vec, - variant: ty::VariantDef, - base: &Option>) { - if generated_code(path.span) { - return - } - - self.write_sub_paths_truncated(path, false); - - if let Some(struct_lit_data) = self.save_ctxt.get_expr_data(ex) { - down_cast_data!(struct_lit_data, TypeRefData, self, ex.span); - self.fmt.ref_str(recorder::TypeRef, - ex.span, - Some(struct_lit_data.span), - struct_lit_data.ref_id, - struct_lit_data.scope); - let scope = self.save_ctxt.enclosing_scope(ex.id); - - for field in fields { - if generated_code(field.ident.span) { - continue; - } - - let field_data = self.save_ctxt.get_field_ref_data(field, variant, scope); - self.fmt.ref_str(recorder::VarRef, - field.ident.span, - Some(field_data.span), - field_data.ref_id, - field_data.scope); - - self.visit_expr(&field.expr) - } - } - - walk_list!(self, visit_expr, base); - } - - fn process_method_call(&mut self, ex: &ast::Expr, args: &Vec>) { - if let Some(call_data) = self.save_ctxt.get_expr_data(ex) { - down_cast_data!(call_data, MethodCallData, self, ex.span); - self.fmt.meth_call_str(ex.span, - Some(call_data.span), - call_data.ref_id, - call_data.decl_id, - call_data.scope); - } - - // walk receiver and args - walk_list!(self, visit_expr, args); - } - - fn process_pat(&mut self, p: &ast::Pat) { - if generated_code(p.span) { - return; - } - - match p.node { - ast::PatStruct(ref path, ref fields, _) => { - visit::walk_path(self, path); - let adt = self.tcx.node_id_to_type(p.id).ty_adt_def().unwrap(); - let def = self.tcx.def_map.borrow()[&p.id].full_def(); - let variant = adt.variant_of_def(def); - - for &Spanned { node: ref field, span } in fields { - if generated_code(span) { - continue; - } - - let sub_span = self.span.span_for_first_ident(span); - if let Some(f) = variant.find_field_named(field.ident.name) { - self.fmt.ref_str(recorder::VarRef, span, sub_span, f.did, self.cur_scope); - } - self.visit_pat(&field.pat); - } - } - _ => visit::walk_pat(self, p), - } - } - - - fn process_var_decl(&mut self, p: &ast::Pat, value: String) { - // The local could declare multiple new vars, we must walk the - // pattern and collect them all. - let mut collector = PathCollector::new(); - collector.visit_pat(&p); - self.visit_pat(&p); - - for &(id, ref p, immut, _) in &collector.collected_paths { - let value = if immut == ast::MutImmutable { - value.to_string() - } else { - "".to_string() - }; - let types = self.tcx.node_types(); - let typ = types.get(&id).unwrap().to_string(); - // Get the span only for the name of the variable (I hope the path - // is only ever a variable name, but who knows?). - let sub_span = self.span.span_for_last_ident(p.span); - // Rust uses the id of the pattern for var lookups, so we'll use it too. - self.fmt.variable_str(p.span, - sub_span, - id, - &path_to_string(p), - &value, - &typ); - } - } -} - -impl<'l, 'tcx, 'v> Visitor<'v> for DumpCsvVisitor<'l, 'tcx> { - fn visit_item(&mut self, item: &ast::Item) { - if generated_code(item.span) { - return - } - - match item.node { - ast::ItemUse(ref use_item) => { - match use_item.node { - ast::ViewPathSimple(ident, ref path) => { - let sub_span = self.span.span_for_last_ident(path.span); - let mod_id = match self.lookup_type_ref(item.id) { - Some(def_id) => { - match self.lookup_def_kind(item.id, path.span) { - Some(kind) => self.fmt.ref_str(kind, - path.span, - sub_span, - def_id, - self.cur_scope), - None => {} - } - Some(def_id) - } - None => None, - }; - - // 'use' always introduces an alias, if there is not an explicit - // one, there is an implicit one. - let sub_span = match self.span.sub_span_after_keyword(use_item.span, - keywords::As) { - Some(sub_span) => Some(sub_span), - None => sub_span, - }; - - self.fmt.use_alias_str(path.span, - sub_span, - item.id, - mod_id, - &ident.name.as_str(), - self.cur_scope); - self.write_sub_paths_truncated(path, true); - } - ast::ViewPathGlob(ref path) => { - // Make a comma-separated list of names of imported modules. - let mut name_string = String::new(); - let glob_map = &self.analysis.glob_map; - let glob_map = glob_map.as_ref().unwrap(); - if glob_map.contains_key(&item.id) { - for n in glob_map.get(&item.id).unwrap() { - if !name_string.is_empty() { - name_string.push_str(", "); - } - name_string.push_str(&n.as_str()); - } - } - - let sub_span = self.span - .sub_span_of_token(path.span, token::BinOp(token::Star)); - self.fmt.use_glob_str(path.span, - sub_span, - item.id, - &name_string, - self.cur_scope); - self.write_sub_paths(path, true); - } - ast::ViewPathList(ref path, ref list) => { - for plid in list { - match plid.node { - ast::PathListIdent { id, .. } => { - match self.lookup_type_ref(id) { - Some(def_id) => match self.lookup_def_kind(id, plid.span) { - Some(kind) => { - self.fmt.ref_str(kind, - plid.span, - Some(plid.span), - def_id, - self.cur_scope); - } - None => (), - }, - None => (), - } - } - ast::PathListMod { .. } => (), - } - } - - self.write_sub_paths(path, true); - } - } - } - ast::ItemExternCrate(ref s) => { - let location = match *s { - Some(s) => s.to_string(), - None => item.ident.to_string(), - }; - let alias_span = self.span.span_for_last_ident(item.span); - let cnum = match self.sess.cstore.extern_mod_stmt_cnum(item.id) { - Some(cnum) => cnum, - None => 0, - }; - self.fmt.extern_crate_str(item.span, - alias_span, - item.id, - cnum, - &item.ident.name.as_str(), - &location, - self.cur_scope); - } - ast::ItemFn(ref decl, _, _, _, ref ty_params, ref body) => - self.process_fn(item, &**decl, ty_params, &**body), - ast::ItemStatic(ref typ, _, ref expr) => - self.process_static_or_const_item(item, typ, expr), - ast::ItemConst(ref typ, ref expr) => - self.process_static_or_const_item(item, &typ, &expr), - ast::ItemStruct(ref def, ref ty_params) => self.process_struct(item, def, ty_params), - ast::ItemEnum(ref def, ref ty_params) => self.process_enum(item, def, ty_params), - ast::ItemImpl(_, _, - ref ty_params, - ref trait_ref, - ref typ, - ref impl_items) => { - self.process_impl(item, ty_params, trait_ref, &typ, impl_items) - } - ast::ItemTrait(_, ref generics, ref trait_refs, ref methods) => - self.process_trait(item, generics, trait_refs, methods), - ast::ItemMod(ref m) => { - self.process_mod(item); - self.nest(item.id, |v| visit::walk_mod(v, m)); - } - ast::ItemTy(ref ty, ref ty_params) => { - let qualname = format!("::{}", self.tcx.map.path_to_string(item.id)); - let value = ty_to_string(&**ty); - let sub_span = self.span.sub_span_after_keyword(item.span, keywords::Type); - self.fmt.typedef_str(item.span, sub_span, item.id, &qualname, &value); - - self.visit_ty(&**ty); - self.process_generic_params(ty_params, item.span, &qualname, item.id); - } - ast::ItemMac(_) => (), - _ => visit::walk_item(self, item), - } - } - - fn visit_generics(&mut self, generics: &ast::Generics) { - for param in generics.ty_params.iter() { - for bound in param.bounds.iter() { - if let ast::TraitTyParamBound(ref trait_ref, _) = *bound { - self.process_trait_ref(&trait_ref.trait_ref); - } - } - if let Some(ref ty) = param.default { - self.visit_ty(&**ty); - } - } - } - - fn visit_trait_item(&mut self, trait_item: &ast::TraitItem) { - match trait_item.node { - ast::ConstTraitItem(ref ty, Some(ref expr)) => { - self.process_const(trait_item.id, - trait_item.ident.name, - trait_item.span, - &*ty, - &*expr); - } - ast::MethodTraitItem(ref sig, ref body) => { - self.process_method(sig, - body.as_ref().map(|x| &**x), - trait_item.id, - trait_item.ident.name, - trait_item.span); - } - ast::ConstTraitItem(_, None) | - ast::TypeTraitItem(..) => {} - } - } - - fn visit_impl_item(&mut self, impl_item: &ast::ImplItem) { - match impl_item.node { - ast::ImplItemKind::Const(ref ty, ref expr) => { - self.process_const(impl_item.id, - impl_item.ident.name, - impl_item.span, - &ty, - &expr); - } - ast::ImplItemKind::Method(ref sig, ref body) => { - self.process_method(sig, - Some(body), - impl_item.id, - impl_item.ident.name, - impl_item.span); - } - ast::ImplItemKind::Type(_) | - ast::ImplItemKind::Macro(_) => {} - } - } - - fn visit_ty(&mut self, t: &ast::Ty) { - if generated_code(t.span) { - return - } - - match t.node { - ast::TyPath(_, ref path) => { - match self.lookup_type_ref(t.id) { - Some(id) => { - let sub_span = self.span.sub_span_for_type_name(t.span); - self.fmt.ref_str(recorder::TypeRef, t.span, sub_span, id, self.cur_scope); - } - None => (), - } - - self.write_sub_paths_truncated(path, false); - - visit::walk_path(self, path); - } - _ => visit::walk_ty(self, t), - } - } - - fn visit_expr(&mut self, ex: &ast::Expr) { - if generated_code(ex.span) { - return - } - - match ex.node { - ast::ExprCall(ref _f, ref _args) => { - // Don't need to do anything for function calls, - // because just walking the callee path does what we want. - visit::walk_expr(self, ex); - } - ast::ExprPath(_, ref path) => { - self.process_path(ex.id, path, None); - visit::walk_expr(self, ex); - } - ast::ExprStruct(ref path, ref fields, ref base) => { - let hir_expr = lower_expr(self.save_ctxt.lcx, ex); - let adt = self.tcx.expr_ty(&hir_expr).ty_adt_def().unwrap(); - let def = self.tcx.resolve_expr(&hir_expr); - self.process_struct_lit(ex, path, fields, adt.variant_of_def(def), base) - } - ast::ExprMethodCall(_, _, ref args) => self.process_method_call(ex, args), - ast::ExprField(ref sub_ex, _) => { - if generated_code(sub_ex.span) { - return - } - - self.visit_expr(&sub_ex); - - if let Some(field_data) = self.save_ctxt.get_expr_data(ex) { - down_cast_data!(field_data, VariableRefData, self, ex.span); - self.fmt.ref_str(recorder::VarRef, - ex.span, - Some(field_data.span), - field_data.ref_id, - field_data.scope); - } - } - ast::ExprTupField(ref sub_ex, idx) => { - if generated_code(sub_ex.span) { - return - } - - self.visit_expr(&**sub_ex); - - let hir_node = lower_expr(self.save_ctxt.lcx, sub_ex); - let ty = &self.tcx.expr_ty_adjusted(&hir_node).sty; - match *ty { - ty::TyStruct(def, _) => { - let sub_span = self.span.sub_span_after_token(ex.span, token::Dot); - self.fmt.ref_str(recorder::VarRef, - ex.span, - sub_span, - def.struct_variant().fields[idx.node].did, - self.cur_scope); - } - ty::TyTuple(_) => {} - _ => self.sess.span_bug(ex.span, - &format!("Expected struct or tuple type, found {:?}", - ty)), - } - } - ast::ExprClosure(_, ref decl, ref body) => { - if generated_code(body.span) { - return - } - - let mut id = String::from("$"); - id.push_str(&ex.id.to_string()); - self.process_formals(&decl.inputs, &id); - - // walk arg and return types - for arg in &decl.inputs { - self.visit_ty(&*arg.ty); - } - - if let ast::Return(ref ret_ty) = decl.output { - self.visit_ty(&**ret_ty); - } - - // walk the body - self.nest(ex.id, |v| v.visit_block(&**body)); - } - ast::ExprForLoop(ref pattern, ref subexpression, ref block, _) | - ast::ExprWhileLet(ref pattern, ref subexpression, ref block, _) => { - let value = self.span.snippet(mk_sp(ex.span.lo, subexpression.span.hi)); - self.process_var_decl(pattern, value); - visit::walk_expr(self, subexpression); - visit::walk_block(self, block); - } - ast::ExprIfLet(ref pattern, ref subexpression, ref block, ref opt_else) => { - let value = self.span.snippet(mk_sp(ex.span.lo, subexpression.span.hi)); - self.process_var_decl(pattern, value); - visit::walk_expr(self, subexpression); - visit::walk_block(self, block); - opt_else.as_ref().map(|el| visit::walk_expr(self, el)); - } - _ => { - visit::walk_expr(self, ex) - } - } - } - - fn visit_mac(&mut self, _: &ast::Mac) { - // Just stop, macros are poison to us. - } - - fn visit_pat(&mut self, p: &ast::Pat) { - self.process_pat(p); - } - - fn visit_arm(&mut self, arm: &ast::Arm) { - let mut collector = PathCollector::new(); - for pattern in &arm.pats { - // collect paths from the arm's patterns - collector.visit_pat(&pattern); - self.visit_pat(&pattern); - } - - // This is to get around borrow checking, because we need mut self to call process_path. - let mut paths_to_process = vec![]; - - // process collected paths - for &(id, ref p, immut, ref_kind) in &collector.collected_paths { - let def_map = self.tcx.def_map.borrow(); - if !def_map.contains_key(&id) { - self.sess.span_bug(p.span, - &format!("def_map has no key for {} in visit_arm", id)); - } - let def = def_map.get(&id).unwrap().full_def(); - match def { - def::DefLocal(_, id) => { - let value = if immut == ast::MutImmutable { - self.span.snippet(p.span).to_string() - } else { - "".to_string() - }; - - assert!(p.segments.len() == 1, - "qualified path for local variable def in arm"); - self.fmt.variable_str(p.span, Some(p.span), id, &path_to_string(p), &value, "") - } - def::DefVariant(..) | def::DefTy(..) | def::DefStruct(..) => { - paths_to_process.push((id, p.clone(), Some(ref_kind))) - } - // FIXME(nrc) what are these doing here? - def::DefStatic(_, _) | - def::DefConst(..) | - def::DefAssociatedConst(..) => {} - _ => error!("unexpected definition kind when processing collected paths: {:?}", - def), - } - } - - for &(id, ref path, ref_kind) in &paths_to_process { - self.process_path(id, path, ref_kind); - } - walk_list!(self, visit_expr, &arm.guard); - self.visit_expr(&arm.body); - } - - fn visit_stmt(&mut self, s: &ast::Stmt) { - if generated_code(s.span) { - return - } - - visit::walk_stmt(self, s) - } - - fn visit_local(&mut self, l: &ast::Local) { - if generated_code(l.span) { - return - } - - let value = self.span.snippet(l.span); - self.process_var_decl(&l.pat, value); - - // Just walk the initialiser and type (don't want to walk the pattern again). - walk_list!(self, visit_ty, &l.ty); - walk_list!(self, visit_expr, &l.init); - } -} diff --git a/src/librustc_trans/save/mod.rs b/src/librustc_trans/save/mod.rs deleted file mode 100644 index e1343c73acfa9..0000000000000 --- a/src/librustc_trans/save/mod.rs +++ /dev/null @@ -1,786 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use middle::ty; -use middle::def; -use middle::def_id::DefId; - -use std::env; -use std::fs::{self, File}; -use std::path::{Path, PathBuf}; - -use rustc_front; -use rustc_front::{hir, lowering}; -use rustc::front::map::NodeItem; -use rustc::session::config::CrateType::CrateTypeExecutable; - -use syntax::ast::{self, NodeId}; -use syntax::ast_util; -use syntax::codemap::*; -use syntax::parse::token::{self, keywords}; -use syntax::visit::{self, Visitor}; -use syntax::print::pprust::ty_to_string; - -use self::span_utils::SpanUtils; - - -pub mod span_utils; -pub mod recorder; - -mod dump_csv; - -pub struct SaveContext<'l, 'tcx: 'l> { - tcx: &'l ty::ctxt<'tcx>, - lcx: &'l lowering::LoweringContext<'l>, - span_utils: SpanUtils<'l>, -} - -pub struct CrateData { - pub name: String, - pub number: u32, -} - -/// Data for any entity in the Rust language. The actual data contained varied -/// with the kind of entity being queried. See the nested structs for details. -#[derive(Debug)] -pub enum Data { - /// Data for all kinds of functions and methods. - FunctionData(FunctionData), - /// Data for local and global variables (consts and statics), and fields. - VariableData(VariableData), - /// Data for modules. - ModData(ModData), - /// Data for Enums. - EnumData(EnumData), - /// Data for impls. - ImplData(ImplData), - - /// Data for the use of some variable (e.g., the use of a local variable, which - /// will refere to that variables declaration). - VariableRefData(VariableRefData), - /// Data for a reference to a type or trait. - TypeRefData(TypeRefData), - /// Data for a reference to a module. - ModRefData(ModRefData), - /// Data about a function call. - FunctionCallData(FunctionCallData), - /// Data about a method call. - MethodCallData(MethodCallData), -} - -/// Data for all kinds of functions and methods. -#[derive(Debug)] -pub struct FunctionData { - pub id: NodeId, - pub name: String, - pub qualname: String, - pub declaration: Option, - pub span: Span, - pub scope: NodeId, -} - -/// Data for local and global variables (consts and statics). -#[derive(Debug)] -pub struct VariableData { - pub id: NodeId, - pub name: String, - pub qualname: String, - pub span: Span, - pub scope: NodeId, - pub value: String, - pub type_value: String, -} - -/// Data for modules. -#[derive(Debug)] -pub struct ModData { - pub id: NodeId, - pub name: String, - pub qualname: String, - pub span: Span, - pub scope: NodeId, - pub filename: String, -} - -/// Data for enum declarations. -#[derive(Debug)] -pub struct EnumData { - pub id: NodeId, - pub value: String, - pub qualname: String, - pub span: Span, - pub scope: NodeId, -} - -#[derive(Debug)] -pub struct ImplData { - pub id: NodeId, - pub span: Span, - pub scope: NodeId, - // FIXME: I'm not really sure inline data is the best way to do this. Seems - // OK in this case, but generalising leads to returning chunks of AST, which - // feels wrong. - pub trait_ref: Option, - pub self_ref: Option, -} - -/// Data for the use of some item (e.g., the use of a local variable, which -/// will refer to that variables declaration (by ref_id)). -#[derive(Debug)] -pub struct VariableRefData { - pub name: String, - pub span: Span, - pub scope: NodeId, - pub ref_id: DefId, -} - -/// Data for a reference to a type or trait. -#[derive(Debug)] -pub struct TypeRefData { - pub span: Span, - pub scope: NodeId, - pub ref_id: DefId, -} - -/// Data for a reference to a module. -#[derive(Debug)] -pub struct ModRefData { - pub span: Span, - pub scope: NodeId, - pub ref_id: DefId, -} - -/// Data about a function call. -#[derive(Debug)] -pub struct FunctionCallData { - pub span: Span, - pub scope: NodeId, - pub ref_id: DefId, -} - -/// Data about a method call. -#[derive(Debug)] -pub struct MethodCallData { - pub span: Span, - pub scope: NodeId, - pub ref_id: Option, - pub decl_id: Option, -} - - - -impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { - pub fn new(tcx: &'l ty::ctxt<'tcx>, - lcx: &'l lowering::LoweringContext<'l>) - -> SaveContext<'l, 'tcx> { - let span_utils = SpanUtils::new(&tcx.sess); - SaveContext::from_span_utils(tcx, lcx, span_utils) - } - - pub fn from_span_utils(tcx: &'l ty::ctxt<'tcx>, - lcx: &'l lowering::LoweringContext<'l>, - span_utils: SpanUtils<'l>) - -> SaveContext<'l, 'tcx> { - SaveContext { - tcx: tcx, - lcx: lcx, - span_utils: span_utils, - } - } - - // List external crates used by the current crate. - pub fn get_external_crates(&self) -> Vec { - let mut result = Vec::new(); - - for n in self.tcx.sess.cstore.crates() { - result.push(CrateData { - name: self.tcx.sess.cstore.crate_name(n), - number: n, - }); - } - - result - } - - pub fn get_item_data(&self, item: &ast::Item) -> Data { - match item.node { - ast::ItemFn(..) => { - let name = self.tcx.map.path_to_string(item.id); - let qualname = format!("::{}", name); - let sub_span = self.span_utils.sub_span_after_keyword(item.span, keywords::Fn); - - Data::FunctionData(FunctionData { - id: item.id, - name: name, - qualname: qualname, - declaration: None, - span: sub_span.unwrap(), - scope: self.enclosing_scope(item.id), - }) - } - ast::ItemStatic(ref typ, mt, ref expr) => { - let qualname = format!("::{}", self.tcx.map.path_to_string(item.id)); - - // If the variable is immutable, save the initialising expression. - let (value, keyword) = match mt { - ast::MutMutable => (String::from(""), keywords::Mut), - ast::MutImmutable => (self.span_utils.snippet(expr.span), keywords::Static), - }; - - let sub_span = self.span_utils.sub_span_after_keyword(item.span, keyword); - - Data::VariableData(VariableData { - id: item.id, - name: item.ident.to_string(), - qualname: qualname, - span: sub_span.unwrap(), - scope: self.enclosing_scope(item.id), - value: value, - type_value: ty_to_string(&typ), - }) - } - ast::ItemConst(ref typ, ref expr) => { - let qualname = format!("::{}", self.tcx.map.path_to_string(item.id)); - let sub_span = self.span_utils.sub_span_after_keyword(item.span, keywords::Const); - - Data::VariableData(VariableData { - id: item.id, - name: item.ident.to_string(), - qualname: qualname, - span: sub_span.unwrap(), - scope: self.enclosing_scope(item.id), - value: self.span_utils.snippet(expr.span), - type_value: ty_to_string(&typ), - }) - } - ast::ItemMod(ref m) => { - let qualname = format!("::{}", self.tcx.map.path_to_string(item.id)); - - let cm = self.tcx.sess.codemap(); - let filename = cm.span_to_filename(m.inner); - - let sub_span = self.span_utils.sub_span_after_keyword(item.span, keywords::Mod); - - Data::ModData(ModData { - id: item.id, - name: item.ident.to_string(), - qualname: qualname, - span: sub_span.unwrap(), - scope: self.enclosing_scope(item.id), - filename: filename, - }) - } - ast::ItemEnum(..) => { - let enum_name = format!("::{}", self.tcx.map.path_to_string(item.id)); - let val = self.span_utils.snippet(item.span); - let sub_span = self.span_utils.sub_span_after_keyword(item.span, keywords::Enum); - - Data::EnumData(EnumData { - id: item.id, - value: val, - span: sub_span.unwrap(), - qualname: enum_name, - scope: self.enclosing_scope(item.id), - }) - } - ast::ItemImpl(_, _, _, ref trait_ref, ref typ, _) => { - let mut type_data = None; - let sub_span; - - let parent = self.enclosing_scope(item.id); - - match typ.node { - // Common case impl for a struct or something basic. - ast::TyPath(None, ref path) => { - sub_span = self.span_utils.sub_span_for_type_name(path.span).unwrap(); - type_data = self.lookup_ref_id(typ.id).map(|id| { - TypeRefData { - span: sub_span, - scope: parent, - ref_id: id, - } - }); - } - _ => { - // Less useful case, impl for a compound type. - let span = typ.span; - sub_span = self.span_utils.sub_span_for_type_name(span).unwrap_or(span); - } - } - - let trait_data = trait_ref.as_ref() - .and_then(|tr| self.get_trait_ref_data(tr, parent)); - - Data::ImplData(ImplData { - id: item.id, - span: sub_span, - scope: parent, - trait_ref: trait_data, - self_ref: type_data, - }) - } - _ => { - // FIXME - unimplemented!(); - } - } - } - - pub fn get_field_data(&self, field: &ast::StructField, scope: NodeId) -> Option { - match field.node.kind { - ast::NamedField(ident, _) => { - let qualname = format!("::{}::{}", self.tcx.map.path_to_string(scope), ident); - let typ = self.tcx.node_types().get(&field.node.id).unwrap().to_string(); - let sub_span = self.span_utils.sub_span_before_token(field.span, token::Colon); - Some(VariableData { - id: field.node.id, - name: ident.to_string(), - qualname: qualname, - span: sub_span.unwrap(), - scope: scope, - value: "".to_owned(), - type_value: typ, - }) - } - _ => None, - } - } - - // FIXME would be nice to take a MethodItem here, but the ast provides both - // trait and impl flavours, so the caller must do the disassembly. - pub fn get_method_data(&self, id: ast::NodeId, name: ast::Name, span: Span) -> FunctionData { - // The qualname for a method is the trait name or name of the struct in an impl in - // which the method is declared in, followed by the method's name. - let qualname = match self.tcx.impl_of_method(self.tcx.map.local_def_id(id)) { - Some(impl_id) => match self.tcx.map.get_if_local(impl_id) { - Some(NodeItem(item)) => { - match item.node { - hir::ItemImpl(_, _, _, _, ref ty, _) => { - let mut result = String::from("<"); - result.push_str(&rustc_front::print::pprust::ty_to_string(&**ty)); - - match self.tcx.trait_of_item(self.tcx.map.local_def_id(id)) { - Some(def_id) => { - result.push_str(" as "); - result.push_str(&self.tcx.item_path_str(def_id)); - } - None => {} - } - result.push_str(">"); - result - } - _ => { - self.tcx.sess.span_bug(span, - &format!("Container {:?} for method {} not \ - an impl?", - impl_id, - id)); - } - } - } - r => { - self.tcx.sess.span_bug(span, - &format!("Container {:?} for method {} is not a node \ - item {:?}", - impl_id, - id, - r)); - } - }, - None => match self.tcx.trait_of_item(self.tcx.map.local_def_id(id)) { - Some(def_id) => { - match self.tcx.map.get_if_local(def_id) { - Some(NodeItem(_)) => { - format!("::{}", self.tcx.item_path_str(def_id)) - } - r => { - self.tcx.sess.span_bug(span, - &format!("Could not find container {:?} for \ - method {}, got {:?}", - def_id, - id, - r)); - } - } - } - None => { - self.tcx.sess.span_bug(span, - &format!("Could not find container for method {}", id)); - } - }, - }; - - let qualname = format!("{}::{}", qualname, name); - - let def_id = self.tcx.map.local_def_id(id); - let decl_id = self.tcx.trait_item_of_item(def_id).and_then(|new_id| { - let new_def_id = new_id.def_id(); - if new_def_id != def_id { - Some(new_def_id) - } else { - None - } - }); - - let sub_span = self.span_utils.sub_span_after_keyword(span, keywords::Fn); - - FunctionData { - id: id, - name: name.to_string(), - qualname: qualname, - declaration: decl_id, - span: sub_span.unwrap(), - scope: self.enclosing_scope(id), - } - } - - pub fn get_trait_ref_data(&self, - trait_ref: &ast::TraitRef, - parent: NodeId) - -> Option { - self.lookup_ref_id(trait_ref.ref_id).map(|def_id| { - let span = trait_ref.path.span; - let sub_span = self.span_utils.sub_span_for_type_name(span).unwrap_or(span); - TypeRefData { - span: sub_span, - scope: parent, - ref_id: def_id, - } - }) - } - - pub fn get_expr_data(&self, expr: &ast::Expr) -> Option { - match expr.node { - ast::ExprField(ref sub_ex, ident) => { - let hir_node = lowering::lower_expr(self.lcx, sub_ex); - let ty = &self.tcx.expr_ty_adjusted(&hir_node).sty; - match *ty { - ty::TyStruct(def, _) => { - let f = def.struct_variant().field_named(ident.node.name); - let sub_span = self.span_utils.span_for_last_ident(expr.span); - return Some(Data::VariableRefData(VariableRefData { - name: ident.node.to_string(), - span: sub_span.unwrap(), - scope: self.enclosing_scope(expr.id), - ref_id: f.did, - })); - } - _ => { - debug!("Expected struct type, found {:?}", ty); - None - } - } - } - ast::ExprStruct(ref path, _, _) => { - let hir_node = lowering::lower_expr(self.lcx, expr); - let ty = &self.tcx.expr_ty_adjusted(&hir_node).sty; - match *ty { - ty::TyStruct(def, _) => { - let sub_span = self.span_utils.span_for_last_ident(path.span); - Some(Data::TypeRefData(TypeRefData { - span: sub_span.unwrap(), - scope: self.enclosing_scope(expr.id), - ref_id: def.did, - })) - } - _ => { - // FIXME ty could legitimately be a TyEnum, but then we will fail - // later if we try to look up the fields. - debug!("expected TyStruct, found {:?}", ty); - None - } - } - } - ast::ExprMethodCall(..) => { - let method_call = ty::MethodCall::expr(expr.id); - let method_id = self.tcx.tables.borrow().method_map[&method_call].def_id; - let (def_id, decl_id) = match self.tcx.impl_or_trait_item(method_id).container() { - ty::ImplContainer(_) => (Some(method_id), None), - ty::TraitContainer(_) => (None, Some(method_id)), - }; - let sub_span = self.span_utils.sub_span_for_meth_name(expr.span); - let parent = self.enclosing_scope(expr.id); - Some(Data::MethodCallData(MethodCallData { - span: sub_span.unwrap(), - scope: parent, - ref_id: def_id, - decl_id: decl_id, - })) - } - ast::ExprPath(_, ref path) => { - self.get_path_data(expr.id, path) - } - _ => { - // FIXME - unimplemented!(); - } - } - } - - pub fn get_path_data(&self, id: NodeId, path: &ast::Path) -> Option { - let def_map = self.tcx.def_map.borrow(); - if !def_map.contains_key(&id) { - self.tcx.sess.span_bug(path.span, - &format!("def_map has no key for {} in visit_expr", id)); - } - let def = def_map.get(&id).unwrap().full_def(); - let sub_span = self.span_utils.span_for_last_ident(path.span); - match def { - def::DefUpvar(..) | - def::DefLocal(..) | - def::DefStatic(..) | - def::DefConst(..) | - def::DefAssociatedConst(..) | - def::DefVariant(..) => { - Some(Data::VariableRefData(VariableRefData { - name: self.span_utils.snippet(sub_span.unwrap()), - span: sub_span.unwrap(), - scope: self.enclosing_scope(id), - ref_id: def.def_id(), - })) - } - def::DefStruct(def_id) | - def::DefTy(def_id, _) | - def::DefTrait(def_id) | - def::DefTyParam(_, _, def_id, _) => { - Some(Data::TypeRefData(TypeRefData { - span: sub_span.unwrap(), - ref_id: def_id, - scope: self.enclosing_scope(id), - })) - } - def::DefMethod(decl_id) => { - let sub_span = self.span_utils.sub_span_for_meth_name(path.span); - let def_id = if decl_id.is_local() { - let ti = self.tcx.impl_or_trait_item(decl_id); - match ti.container() { - ty::TraitContainer(def_id) => { - self.tcx - .trait_items(def_id) - .iter() - .find(|mr| mr.name() == ti.name() && self.trait_method_has_body(mr)) - .map(|mr| mr.def_id()) - } - ty::ImplContainer(def_id) => { - let impl_items = self.tcx.impl_items.borrow(); - Some(impl_items.get(&def_id) - .unwrap() - .iter() - .find(|mr| { - self.tcx.impl_or_trait_item(mr.def_id()).name() == - ti.name() - }) - .unwrap() - .def_id()) - } - } - } else { - None - }; - Some(Data::MethodCallData(MethodCallData { - span: sub_span.unwrap(), - scope: self.enclosing_scope(id), - ref_id: def_id, - decl_id: Some(decl_id), - })) - } - def::DefFn(def_id, _) => { - Some(Data::FunctionCallData(FunctionCallData { - ref_id: def_id, - span: sub_span.unwrap(), - scope: self.enclosing_scope(id), - })) - } - def::DefMod(def_id) => { - Some(Data::ModRefData(ModRefData { - ref_id: def_id, - span: sub_span.unwrap(), - scope: self.enclosing_scope(id), - })) - } - _ => None, - } - } - - fn trait_method_has_body(&self, mr: &ty::ImplOrTraitItem) -> bool { - let def_id = mr.def_id(); - if let Some(node_id) = self.tcx.map.as_local_node_id(def_id) { - let trait_item = self.tcx.map.expect_trait_item(node_id); - if let hir::TraitItem_::MethodTraitItem(_, Some(_)) = trait_item.node { - true - } else { - false - } - } else { - false - } - } - - pub fn get_field_ref_data(&self, - field_ref: &ast::Field, - variant: ty::VariantDef, - parent: NodeId) - -> VariableRefData { - let f = variant.field_named(field_ref.ident.node.name); - // We don't really need a sub-span here, but no harm done - let sub_span = self.span_utils.span_for_last_ident(field_ref.ident.span); - VariableRefData { - name: field_ref.ident.node.to_string(), - span: sub_span.unwrap(), - scope: parent, - ref_id: f.did, - } - } - - pub fn get_data_for_id(&self, _id: &NodeId) -> Data { - // FIXME - unimplemented!(); - } - - fn lookup_ref_id(&self, ref_id: NodeId) -> Option { - if !self.tcx.def_map.borrow().contains_key(&ref_id) { - self.tcx.sess.bug(&format!("def_map has no key for {} in lookup_type_ref", - ref_id)); - } - let def = self.tcx.def_map.borrow().get(&ref_id).unwrap().full_def(); - match def { - def::DefPrimTy(_) | def::DefSelfTy(..) => None, - _ => Some(def.def_id()), - } - } - - #[inline] - pub fn enclosing_scope(&self, id: NodeId) -> NodeId { - self.tcx.map.get_enclosing_scope(id).unwrap_or(0) - } -} - -// An AST visitor for collecting paths from patterns. -struct PathCollector { - // The Row field identifies the kind of pattern. - collected_paths: Vec<(NodeId, ast::Path, ast::Mutability, recorder::Row)>, -} - -impl PathCollector { - fn new() -> PathCollector { - PathCollector { collected_paths: vec![] } - } -} - -impl<'v> Visitor<'v> for PathCollector { - fn visit_pat(&mut self, p: &ast::Pat) { - if generated_code(p.span) { - return; - } - - match p.node { - ast::PatStruct(ref path, _, _) => { - self.collected_paths.push((p.id, path.clone(), ast::MutMutable, recorder::TypeRef)); - } - ast::PatEnum(ref path, _) | - ast::PatQPath(_, ref path) => { - self.collected_paths.push((p.id, path.clone(), ast::MutMutable, recorder::VarRef)); - } - ast::PatIdent(bm, ref path1, _) => { - debug!("PathCollector, visit ident in pat {}: {:?} {:?}", - path1.node, - p.span, - path1.span); - let immut = match bm { - // Even if the ref is mut, you can't change the ref, only - // the data pointed at, so showing the initialising expression - // is still worthwhile. - ast::BindingMode::ByRef(_) => ast::MutImmutable, - ast::BindingMode::ByValue(mt) => mt, - }; - // collect path for either visit_local or visit_arm - let path = ast_util::ident_to_path(path1.span, path1.node); - self.collected_paths.push((p.id, path, immut, recorder::VarRef)); - } - _ => {} - } - visit::walk_pat(self, p); - } -} - -pub fn process_crate<'l, 'tcx>(tcx: &'l ty::ctxt<'tcx>, - lcx: &'l lowering::LoweringContext<'l>, - krate: &ast::Crate, - analysis: &ty::CrateAnalysis, - cratename: &str, - odir: Option<&Path>) { - let _ignore = tcx.dep_graph.in_ignore(); - - if generated_code(krate.span) { - return; - } - - assert!(analysis.glob_map.is_some()); - - info!("Dumping crate {}", cratename); - - // find a path to dump our data to - let mut root_path = match env::var_os("DXR_RUST_TEMP_FOLDER") { - Some(val) => PathBuf::from(val), - None => match odir { - Some(val) => val.join("dxr"), - None => PathBuf::from("dxr-temp"), - }, - }; - - if let Err(e) = fs::create_dir_all(&root_path) { - tcx.sess.err(&format!("Could not create directory {}: {}", - root_path.display(), - e)); - } - - { - let disp = root_path.display(); - info!("Writing output to {}", disp); - } - - // Create output file. - let executable = tcx.sess.crate_types.borrow().iter().any(|ct| *ct == CrateTypeExecutable); - let mut out_name = if executable { - "".to_owned() - } else { - "lib".to_owned() - }; - out_name.push_str(&cratename); - out_name.push_str(&tcx.sess.opts.cg.extra_filename); - out_name.push_str(".csv"); - root_path.push(&out_name); - let output_file = match File::create(&root_path) { - Ok(f) => box f, - Err(e) => { - let disp = root_path.display(); - tcx.sess.fatal(&format!("Could not open {}: {}", disp, e)); - } - }; - root_path.pop(); - - let mut visitor = dump_csv::DumpCsvVisitor::new(tcx, lcx, analysis, output_file); - - visitor.dump_crate_info(cratename, krate); - visit::walk_crate(&mut visitor, krate); -} - -// Utility functions for the module. - -// Helper function to escape quotes in a string -fn escape(s: String) -> String { - s.replace("\"", "\"\"") -} - -// If the expression is a macro expansion or other generated code, run screaming -// and don't index. -pub fn generated_code(span: Span) -> bool { - span.expn_id != NO_EXPANSION || span == DUMMY_SP -} diff --git a/src/librustc_trans/save/recorder.rs b/src/librustc_trans/save/recorder.rs deleted file mode 100644 index 1db31baf30dcc..0000000000000 --- a/src/librustc_trans/save/recorder.rs +++ /dev/null @@ -1,689 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub use self::Row::*; - -use super::escape; -use super::span_utils::SpanUtils; - -use middle::cstore::LOCAL_CRATE; -use middle::def_id::{CRATE_DEF_INDEX, DefId}; -use middle::ty; - -use std::io::Write; - -use syntax::ast; -use syntax::ast::NodeId; -use syntax::codemap::*; - -const CRATE_ROOT_DEF_ID: DefId = DefId { - krate: LOCAL_CRATE, - index: CRATE_DEF_INDEX, -}; - -pub struct Recorder { - // output file - pub out: Box, - pub dump_spans: bool, -} - -impl Recorder { - pub fn record(&mut self, info: &str) { - match write!(self.out, "{}", info) { - Err(_) => error!("Error writing output '{}'", info), - _ => (), - } - } - - pub fn dump_span(&mut self, su: SpanUtils, kind: &str, span: Span, _sub_span: Option) { - assert!(self.dump_spans); - let result = format!("span,kind,{},{},text,\"{}\"\n", - kind, - su.extent_str(span), - escape(su.snippet(span))); - self.record(&result[..]); - } -} - -pub struct FmtStrs<'a, 'tcx: 'a> { - pub recorder: Box, - span: SpanUtils<'a>, - tcx: &'a ty::ctxt<'tcx>, -} - -macro_rules! s { ($e:expr) => { format!("{}", $e) }} -macro_rules! svec { - ($($e:expr),*) => ({ - // leading _ to allow empty construction without a warning. - let mut _temp = ::std::vec::Vec::new(); - $(_temp.push(s!($e));)* - _temp - }) -} - -// FIXME recorder should operate on super::Data, rather than lots of ad hoc -// data. - -#[derive(Copy, Clone, Debug, Eq, PartialEq)] -pub enum Row { - Variable, - Enum, - Variant, - VariantStruct, - Function, - MethodDecl, - Struct, - Trait, - Impl, - Module, - UseAlias, - UseGlob, - ExternCrate, - Inheritance, - MethodCall, - Typedef, - ExternalCrate, - Crate, - FnCall, - ModRef, - VarRef, - TypeRef, - FnRef, -} - -impl<'a, 'tcx: 'a> FmtStrs<'a, 'tcx> { - pub fn new(rec: Box, - span: SpanUtils<'a>, - tcx: &'a ty::ctxt<'tcx>) - -> FmtStrs<'a, 'tcx> { - FmtStrs { - recorder: rec, - span: span, - tcx: tcx, - } - } - - // Emitted ids are used to cross-reference items across crates. DefIds and - // NodeIds do not usually correspond in any way. The strategy is to use the - // index from the DefId as a crate-local id. However, within a crate, DefId - // indices and NodeIds can overlap. So, we must adjust the NodeIds. If an - // item can be identified by a DefId as well as a NodeId, then we use the - // DefId index as the id. If it can't, then we have to use the NodeId, but - // need to adjust it so it will not clash with any possible DefId index. - fn normalize_node_id(&self, id: NodeId) -> usize { - match self.tcx.map.opt_local_def_id(id) { - Some(id) => id.index.as_usize(), - None => id as usize + self.tcx.map.num_local_def_ids() - } - } - - // A map from kind of item to a tuple of - // a string representation of the name - // a vector of field names - // whether this kind requires a span - // whether dump_spans should dump for this kind - fn lookup_row(r: Row) -> (&'static str, Vec<&'static str>, bool, bool) { - match r { - Variable => ("variable", - vec!("id", "name", "qualname", "value", "type", "scopeid"), - true, - true), - Enum => ("enum", - vec!("id", "qualname", "scopeid", "value"), - true, - true), - Variant => ("variant", - vec!("id", "name", "qualname", "type", "value", "scopeid"), - true, - true), - VariantStruct => ("variant_struct", - vec!("id", "ctor_id", "qualname", "type", "value", "scopeid"), - true, - true), - Function => ("function", - vec!("id", "qualname", "declid", "declidcrate", "scopeid"), - true, - true), - MethodDecl => ("method_decl", - vec!("id", "qualname", "scopeid"), - true, - true), - Struct => ("struct", - vec!("id", "ctor_id", "qualname", "scopeid", "value"), - true, - true), - Trait => ("trait", - vec!("id", "qualname", "scopeid", "value"), - true, - true), - Impl => ("impl", - vec!("id", - "refid", - "refidcrate", - "traitid", - "traitidcrate", - "scopeid"), - true, - true), - Module => ("module", - vec!("id", "qualname", "scopeid", "def_file"), - true, - false), - UseAlias => ("use_alias", - vec!("id", "refid", "refidcrate", "name", "scopeid"), - true, - true), - UseGlob => ("use_glob", vec!("id", "value", "scopeid"), true, true), - ExternCrate => ("extern_crate", - vec!("id", "name", "location", "crate", "scopeid"), - true, - true), - Inheritance => ("inheritance", - vec!("base", "basecrate", "derived", "derivedcrate"), - true, - false), - MethodCall => ("method_call", - vec!("refid", "refidcrate", "declid", "declidcrate", "scopeid"), - true, - true), - Typedef => ("typedef", vec!("id", "qualname", "value"), true, true), - ExternalCrate => ("external_crate", - vec!("name", "crate", "file_name"), - false, - false), - Crate => ("crate", vec!("name", "crate_root"), true, false), - FnCall => ("fn_call", - vec!("refid", "refidcrate", "qualname", "scopeid"), - true, - true), - ModRef => ("mod_ref", - vec!("refid", "refidcrate", "qualname", "scopeid"), - true, - true), - VarRef => ("var_ref", - vec!("refid", "refidcrate", "qualname", "scopeid"), - true, - true), - TypeRef => ("type_ref", - vec!("refid", "refidcrate", "qualname", "scopeid"), - true, - true), - FnRef => ("fn_ref", - vec!("refid", "refidcrate", "qualname", "scopeid"), - true, - true), - } - } - - pub fn make_values_str(&self, - kind: &'static str, - fields: &Vec<&'static str>, - values: Vec, - span: Span) - -> Option { - if values.len() != fields.len() { - self.span.sess.span_bug(span, - &format!("Mismatch between length of fields for '{}', \ - expected '{}', found '{}'", - kind, - fields.len(), - values.len())); - } - - let values = values.iter().map(|s| { - // Never take more than 1020 chars - if s.len() > 1020 { - &s[..1020] - } else { - &s[..] - } - }); - - let pairs = fields.iter().zip(values); - let strs = pairs.map(|(f, v)| format!(",{},\"{}\"", f, escape(String::from(v)))); - Some(strs.fold(String::new(), - |mut s, ss| { - s.push_str(&ss[..]); - s - })) - } - - pub fn record_without_span(&mut self, kind: Row, values: Vec, span: Span) { - let (label, ref fields, needs_span, dump_spans) = FmtStrs::lookup_row(kind); - - if needs_span { - self.span.sess.span_bug(span, - &format!("Called record_without_span for '{}' which does \ - requires a span", - label)); - } - assert!(!dump_spans); - - if self.recorder.dump_spans { - return; - } - - let values_str = match self.make_values_str(label, fields, values, span) { - Some(vs) => vs, - None => return, - }; - - let mut result = String::from(label); - result.push_str(&values_str[..]); - result.push_str("\n"); - self.recorder.record(&result[..]); - } - - pub fn record_with_span(&mut self, - kind: Row, - span: Span, - sub_span: Span, - values: Vec) { - let (label, ref fields, needs_span, dump_spans) = FmtStrs::lookup_row(kind); - - if self.recorder.dump_spans { - if dump_spans { - self.recorder.dump_span(self.span.clone(), label, span, Some(sub_span)); - } - return; - } - - if !needs_span { - self.span.sess.span_bug(span, - &format!("Called record_with_span for '{}' which does not \ - require a span", - label)); - } - - let values_str = match self.make_values_str(label, fields, values, span) { - Some(vs) => vs, - None => return, - }; - let result = format!("{},{}{}\n", - label, - self.span.extent_str(sub_span), - values_str); - self.recorder.record(&result[..]); - } - - pub fn check_and_record(&mut self, - kind: Row, - span: Span, - sub_span: Option, - values: Vec) { - match sub_span { - Some(sub_span) => self.record_with_span(kind, span, sub_span, values), - None => { - let (label, _, _, _) = FmtStrs::lookup_row(kind); - self.span.report_span_err(label, span); - } - } - } - - pub fn variable_str(&mut self, - span: Span, - sub_span: Option, - id: NodeId, - name: &str, - value: &str, - typ: &str) { - // Getting a fully qualified name for a variable is hard because in - // the local case they can be overridden in one block and there is no nice way - // to refer to such a scope in english, so we just hack it by appending the - // variable def's node id - let mut qualname = String::from(name); - qualname.push_str("$"); - qualname.push_str(&id.to_string()); - let id = self.normalize_node_id(id); - self.check_and_record(Variable, - span, - sub_span, - svec!(id, name, qualname, value, typ, 0)); - } - - // formal parameters - pub fn formal_str(&mut self, - span: Span, - sub_span: Option, - id: NodeId, - fn_name: &str, - name: &str, - typ: &str) { - let mut qualname = String::from(fn_name); - qualname.push_str("::"); - qualname.push_str(name); - let id = self.normalize_node_id(id); - self.check_and_record(Variable, - span, - sub_span, - svec!(id, name, qualname, "", typ, 0)); - } - - // value is the initialising expression of the static if it is not mut, otherwise "". - pub fn static_str(&mut self, - span: Span, - sub_span: Option, - id: NodeId, - name: &str, - qualname: &str, - value: &str, - typ: &str, - scope_id: NodeId) { - let id = self.normalize_node_id(id); - let scope_id = self.normalize_node_id(scope_id); - self.check_and_record(Variable, - span, - sub_span, - svec!(id, name, qualname, value, typ, scope_id)); - } - - pub fn field_str(&mut self, - span: Span, - sub_span: Option, - id: NodeId, - name: &str, - qualname: &str, - typ: &str, - scope_id: NodeId) { - let id = self.normalize_node_id(id); - let scope_id = self.normalize_node_id(scope_id); - self.check_and_record(Variable, - span, - sub_span, - svec!(id, name, qualname, "", typ, scope_id)); - } - - pub fn enum_str(&mut self, - span: Span, - sub_span: Option, - id: NodeId, - name: &str, - scope_id: NodeId, - value: &str) { - let id = self.normalize_node_id(id); - let scope_id = self.normalize_node_id(scope_id); - self.check_and_record(Enum, span, sub_span, svec!(id, name, scope_id, value)); - } - - pub fn tuple_variant_str(&mut self, - span: Span, - sub_span: Option, - id: NodeId, - name: &str, - qualname: &str, - typ: &str, - val: &str, - scope_id: NodeId) { - let id = self.normalize_node_id(id); - let scope_id = self.normalize_node_id(scope_id); - self.check_and_record(Variant, - span, - sub_span, - svec!(id, name, qualname, typ, val, scope_id)); - } - - pub fn struct_variant_str(&mut self, - span: Span, - sub_span: Option, - id: NodeId, - ctor_id: NodeId, - name: &str, - typ: &str, - val: &str, - scope_id: NodeId) { - let id = self.normalize_node_id(id); - let scope_id = self.normalize_node_id(scope_id); - let ctor_id = self.normalize_node_id(ctor_id); - self.check_and_record(VariantStruct, - span, - sub_span, - svec!(id, ctor_id, name, typ, val, scope_id)); - } - - pub fn fn_str(&mut self, - span: Span, - sub_span: Option, - id: NodeId, - name: &str, - scope_id: NodeId) { - let id = self.normalize_node_id(id); - let scope_id = self.normalize_node_id(scope_id); - self.check_and_record(Function, - span, - sub_span, - svec!(id, name, "", "", scope_id)); - } - - pub fn method_str(&mut self, - span: Span, - sub_span: Option, - id: NodeId, - name: &str, - decl_id: Option, - scope_id: NodeId) { - let id = self.normalize_node_id(id); - let scope_id = self.normalize_node_id(scope_id); - let values = match decl_id { - Some(decl_id) => svec!(id, - name, - decl_id.index.as_usize(), - decl_id.krate, - scope_id), - None => svec!(id, name, "", "", scope_id), - }; - self.check_and_record(Function, span, sub_span, values); - } - - pub fn method_decl_str(&mut self, - span: Span, - sub_span: Option, - id: NodeId, - name: &str, - scope_id: NodeId) { - let id = self.normalize_node_id(id); - let scope_id = self.normalize_node_id(scope_id); - self.check_and_record(MethodDecl, span, sub_span, svec!(id, name, scope_id)); - } - - pub fn struct_str(&mut self, - span: Span, - sub_span: Option, - id: NodeId, - ctor_id: NodeId, - name: &str, - scope_id: NodeId, - value: &str) { - let id = self.normalize_node_id(id); - let scope_id = self.normalize_node_id(scope_id); - let ctor_id = self.normalize_node_id(ctor_id); - self.check_and_record(Struct, - span, - sub_span, - svec!(id, ctor_id, name, scope_id, value)); - } - - pub fn trait_str(&mut self, - span: Span, - sub_span: Option, - id: NodeId, - name: &str, - scope_id: NodeId, - value: &str) { - let id = self.normalize_node_id(id); - let scope_id = self.normalize_node_id(scope_id); - self.check_and_record(Trait, span, sub_span, svec!(id, name, scope_id, value)); - } - - pub fn impl_str(&mut self, - span: Span, - sub_span: Option, - id: NodeId, - ref_id: Option, - trait_id: Option, - scope_id: NodeId) { - let id = self.normalize_node_id(id); - let scope_id = self.normalize_node_id(scope_id); - let ref_id = ref_id.unwrap_or(CRATE_ROOT_DEF_ID); - let trait_id = trait_id.unwrap_or(CRATE_ROOT_DEF_ID); - self.check_and_record(Impl, - span, - sub_span, - svec!(id, - ref_id.index.as_usize(), - ref_id.krate, - trait_id.index.as_usize(), - trait_id.krate, - scope_id)); - } - - pub fn mod_str(&mut self, - span: Span, - sub_span: Option, - id: NodeId, - name: &str, - parent: NodeId, - filename: &str) { - let id = self.normalize_node_id(id); - let parent = self.normalize_node_id(parent); - self.check_and_record(Module, - span, - sub_span, - svec!(id, name, parent, filename)); - } - - pub fn use_alias_str(&mut self, - span: Span, - sub_span: Option, - id: NodeId, - mod_id: Option, - name: &str, - parent: NodeId) { - let id = self.normalize_node_id(id); - let parent = self.normalize_node_id(parent); - let mod_id = mod_id.unwrap_or(CRATE_ROOT_DEF_ID); - self.check_and_record(UseAlias, - span, - sub_span, - svec!(id, mod_id.index.as_usize(), mod_id.krate, name, parent)); - } - - pub fn use_glob_str(&mut self, - span: Span, - sub_span: Option, - id: NodeId, - values: &str, - parent: NodeId) { - let id = self.normalize_node_id(id); - let parent = self.normalize_node_id(parent); - self.check_and_record(UseGlob, span, sub_span, svec!(id, values, parent)); - } - - pub fn extern_crate_str(&mut self, - span: Span, - sub_span: Option, - id: NodeId, - cnum: ast::CrateNum, - name: &str, - loc: &str, - parent: NodeId) { - let id = self.normalize_node_id(id); - let parent = self.normalize_node_id(parent); - self.check_and_record(ExternCrate, - span, - sub_span, - svec!(id, name, loc, cnum, parent)); - } - - pub fn inherit_str(&mut self, - span: Span, - sub_span: Option, - base_id: DefId, - deriv_id: NodeId) { - let deriv_id = self.normalize_node_id(deriv_id); - self.check_and_record(Inheritance, - span, - sub_span, - svec!(base_id.index.as_usize(), base_id.krate, deriv_id, 0)); - } - - pub fn fn_call_str(&mut self, - span: Span, - sub_span: Option, - id: DefId, - scope_id: NodeId) { - let scope_id = self.normalize_node_id(scope_id); - self.check_and_record(FnCall, - span, - sub_span, - svec!(id.index.as_usize(), id.krate, "", scope_id)); - } - - pub fn meth_call_str(&mut self, - span: Span, - sub_span: Option, - defid: Option, - declid: Option, - scope_id: NodeId) { - let scope_id = self.normalize_node_id(scope_id); - let defid = defid.unwrap_or(CRATE_ROOT_DEF_ID); - let (dcn, dck) = match declid { - Some(declid) => (s!(declid.index.as_usize()), s!(declid.krate)), - None => ("".to_string(), "".to_string()), - }; - self.check_and_record(MethodCall, - span, - sub_span, - svec!(defid.index.as_usize(), defid.krate, dcn, dck, scope_id)); - } - - pub fn sub_mod_ref_str(&mut self, span: Span, sub_span: Span, qualname: &str, parent: NodeId) { - let parent = self.normalize_node_id(parent); - self.record_with_span(ModRef, span, sub_span, svec!(0, 0, qualname, parent)); - } - - pub fn typedef_str(&mut self, - span: Span, - sub_span: Option, - id: NodeId, - qualname: &str, - value: &str) { - let id = self.normalize_node_id(id); - self.check_and_record(Typedef, span, sub_span, svec!(id, qualname, value)); - } - - pub fn crate_str(&mut self, span: Span, name: &str, crate_root: &str) { - self.record_with_span(Crate, span, span, svec!(name, crate_root)); - } - - pub fn external_crate_str(&mut self, span: Span, name: &str, num: ast::CrateNum) { - let lo_loc = self.span.sess.codemap().lookup_char_pos(span.lo); - self.record_without_span(ExternalCrate, - svec!(name, num, SpanUtils::make_path_string(&lo_loc.file.name)), - span); - } - - pub fn sub_type_ref_str(&mut self, span: Span, sub_span: Span, qualname: &str) { - self.record_with_span(TypeRef, span, sub_span, svec!(0, 0, qualname, 0)); - } - - // A slightly generic function for a reference to an item of any kind. - pub fn ref_str(&mut self, - kind: Row, - span: Span, - sub_span: Option, - id: DefId, - scope_id: NodeId) { - let scope_id = self.normalize_node_id(scope_id); - self.check_and_record(kind, - span, - sub_span, - svec!(id.index.as_usize(), id.krate, "", scope_id)); - } -} diff --git a/src/librustc_trans/save/span_utils.rs b/src/librustc_trans/save/span_utils.rs deleted file mode 100644 index 773d5caea5f1a..0000000000000 --- a/src/librustc_trans/save/span_utils.rs +++ /dev/null @@ -1,391 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::session::Session; - -use save::generated_code; - -use std::cell::Cell; -use std::env; -use std::path::Path; - -use syntax::ast; -use syntax::codemap::*; -use syntax::parse::lexer; -use syntax::parse::lexer::{Reader, StringReader}; -use syntax::parse::token; -use syntax::parse::token::{keywords, Token}; - -#[derive(Clone)] -pub struct SpanUtils<'a> { - pub sess: &'a Session, - pub err_count: Cell, -} - -impl<'a> SpanUtils<'a> { - pub fn new(sess: &'a Session) -> SpanUtils<'a> { - SpanUtils { - sess: sess, - err_count: Cell::new(0), - } - } - - pub fn make_path_string(file_name: &str) -> String { - let path = Path::new(file_name); - if path.is_absolute() { - path.clone().display().to_string() - } else { - env::current_dir().unwrap().join(&path).display().to_string() - } - } - - // Standard string for extents/location. - #[rustfmt_skip] - pub fn extent_str(&self, span: Span) -> String { - let lo_loc = self.sess.codemap().lookup_char_pos(span.lo); - let hi_loc = self.sess.codemap().lookup_char_pos(span.hi); - let lo_pos = self.sess.codemap().bytepos_to_file_charpos(span.lo); - let hi_pos = self.sess.codemap().bytepos_to_file_charpos(span.hi); - let lo_pos_byte = self.sess.codemap().lookup_byte_offset(span.lo).pos; - let hi_pos_byte = self.sess.codemap().lookup_byte_offset(span.hi).pos; - - format!("file_name,\"{}\",file_line,{},file_col,{},extent_start,{},extent_start_bytes,{},\ - file_line_end,{},file_col_end,{},extent_end,{},extent_end_bytes,{}", - SpanUtils::make_path_string(&lo_loc.file.name), - lo_loc.line, lo_loc.col.to_usize(), lo_pos.to_usize(), lo_pos_byte.to_usize(), - hi_loc.line, hi_loc.col.to_usize(), hi_pos.to_usize(), hi_pos_byte.to_usize()) - } - - // sub_span starts at span.lo, so we need to adjust the positions etc. - // If sub_span is None, we don't need to adjust. - pub fn make_sub_span(&self, span: Span, sub_span: Option) -> Option { - let loc = self.sess.codemap().lookup_char_pos(span.lo); - assert!(!generated_code(span), - "generated code; we should not be processing this `{}` in {}, line {}", - self.snippet(span), - loc.file.name, - loc.line); - - match sub_span { - None => None, - Some(sub) => { - let FileMapAndBytePos {fm, pos} = self.sess.codemap().lookup_byte_offset(span.lo); - let base = pos + fm.start_pos; - Some(Span { - lo: base + self.sess.codemap().lookup_byte_offset(sub.lo).pos, - hi: base + self.sess.codemap().lookup_byte_offset(sub.hi).pos, - expn_id: NO_EXPANSION, - }) - } - } - } - - pub fn snippet(&self, span: Span) -> String { - match self.sess.codemap().span_to_snippet(span) { - Ok(s) => s, - Err(_) => String::new(), - } - } - - pub fn retokenise_span(&self, span: Span) -> StringReader<'a> { - // sadness - we don't have spans for sub-expressions nor access to the tokens - // so in order to get extents for the function name itself (which dxr expects) - // we need to re-tokenise the fn definition - - // Note: this is a bit awful - it adds the contents of span to the end of - // the codemap as a new filemap. This is mostly OK, but means we should - // not iterate over the codemap. Also, any spans over the new filemap - // are incompatible with spans over other filemaps. - let filemap = self.sess - .codemap() - .new_filemap(String::from(""), self.snippet(span)); - let s = self.sess; - lexer::StringReader::new(s.diagnostic(), filemap) - } - - // Re-parses a path and returns the span for the last identifier in the path - pub fn span_for_last_ident(&self, span: Span) -> Option { - let mut result = None; - - let mut toks = self.retokenise_span(span); - let mut bracket_count = 0; - loop { - let ts = toks.real_token(); - if ts.tok == token::Eof { - return self.make_sub_span(span, result) - } - if bracket_count == 0 && (ts.tok.is_ident() || ts.tok.is_keyword(keywords::SelfValue)) { - result = Some(ts.sp); - } - - bracket_count += match ts.tok { - token::Lt => 1, - token::Gt => -1, - token::BinOp(token::Shr) => -2, - _ => 0, - } - } - } - - // Return the span for the first identifier in the path. - pub fn span_for_first_ident(&self, span: Span) -> Option { - let mut toks = self.retokenise_span(span); - let mut bracket_count = 0; - loop { - let ts = toks.real_token(); - if ts.tok == token::Eof { - return None; - } - if bracket_count == 0 && (ts.tok.is_ident() || ts.tok.is_keyword(keywords::SelfValue)) { - return self.make_sub_span(span, Some(ts.sp)); - } - - bracket_count += match ts.tok { - token::Lt => 1, - token::Gt => -1, - token::BinOp(token::Shr) => -2, - _ => 0, - } - } - } - - // Return the span for the last ident before a `(` or `<` or '::<' and outside any - // any brackets, or the last span. - pub fn sub_span_for_meth_name(&self, span: Span) -> Option { - let mut toks = self.retokenise_span(span); - let mut prev = toks.real_token(); - let mut result = None; - let mut bracket_count = 0; - let mut last_span = None; - while prev.tok != token::Eof { - last_span = None; - let mut next = toks.real_token(); - - if (next.tok == token::OpenDelim(token::Paren) || next.tok == token::Lt) && - bracket_count == 0 && prev.tok.is_ident() { - result = Some(prev.sp); - } - - if bracket_count == 0 && next.tok == token::ModSep { - let old = prev; - prev = next; - next = toks.real_token(); - if next.tok == token::Lt && old.tok.is_ident() { - result = Some(old.sp); - } - } - - bracket_count += match prev.tok { - token::OpenDelim(token::Paren) | token::Lt => 1, - token::CloseDelim(token::Paren) | token::Gt => -1, - token::BinOp(token::Shr) => -2, - _ => 0, - }; - - if prev.tok.is_ident() && bracket_count == 0 { - last_span = Some(prev.sp); - } - prev = next; - } - if result.is_none() && last_span.is_some() { - return self.make_sub_span(span, last_span); - } - return self.make_sub_span(span, result); - } - - // Return the span for the last ident before a `<` and outside any - // brackets, or the last span. - pub fn sub_span_for_type_name(&self, span: Span) -> Option { - let mut toks = self.retokenise_span(span); - let mut prev = toks.real_token(); - let mut result = None; - let mut bracket_count = 0; - loop { - let next = toks.real_token(); - - if (next.tok == token::Lt || next.tok == token::Colon) && bracket_count == 0 && - prev.tok.is_ident() { - result = Some(prev.sp); - } - - bracket_count += match prev.tok { - token::Lt => 1, - token::Gt => -1, - token::BinOp(token::Shl) => 2, - token::BinOp(token::Shr) => -2, - _ => 0, - }; - - if next.tok == token::Eof { - break; - } - prev = next; - } - if bracket_count != 0 { - let loc = self.sess.codemap().lookup_char_pos(span.lo); - self.sess.span_bug(span, - &format!("Mis-counted brackets when breaking path? Parsing '{}' \ - in {}, line {}", - self.snippet(span), - loc.file.name, - loc.line)); - } - if result.is_none() && prev.tok.is_ident() && bracket_count == 0 { - return self.make_sub_span(span, Some(prev.sp)); - } - self.make_sub_span(span, result) - } - - // Reparse span and return an owned vector of sub spans of the first limit - // identifier tokens in the given nesting level. - // example with Foo, Bar> - // Nesting = 0: all idents outside of brackets: [Foo] - // Nesting = 1: idents within one level of brackets: [Bar, Bar] - pub fn spans_with_brackets(&self, span: Span, nesting: isize, limit: isize) -> Vec { - let mut result: Vec = vec!(); - - let mut toks = self.retokenise_span(span); - // We keep track of how many brackets we're nested in - let mut bracket_count: isize = 0; - let mut found_ufcs_sep = false; - loop { - let ts = toks.real_token(); - if ts.tok == token::Eof { - if bracket_count != 0 { - let loc = self.sess.codemap().lookup_char_pos(span.lo); - self.sess.span_bug(span, - &format!("Mis-counted brackets when breaking path? \ - Parsing '{}' in {}, line {}", - self.snippet(span), - loc.file.name, - loc.line)); - } - return result - } - if (result.len() as isize) == limit { - return result; - } - bracket_count += match ts.tok { - token::Lt => 1, - token::Gt => -1, - token::BinOp(token::Shl) => 2, - token::BinOp(token::Shr) => -2, - _ => 0, - }; - - // Ignore the `>::` in `::AssocTy`. - - // The root cause of this hack is that the AST representation of - // qpaths is horrible. It treats ::C as a path with two - // segments, B and C and notes that there is also a self type A at - // position 0. Because we don't have spans for individual idents, - // only the whole path, we have to iterate over the tokens in the - // path, trying to pull out the non-nested idents (e.g., avoiding 'a - // in `>::C`). So we end up with a span for `B>::C` from - // the start of the first ident to the end of the path. - if !found_ufcs_sep && bracket_count == -1 { - found_ufcs_sep = true; - bracket_count += 1; - } - if ts.tok.is_ident() && bracket_count == nesting { - result.push(self.make_sub_span(span, Some(ts.sp)).unwrap()); - } - } - } - - pub fn sub_span_before_token(&self, span: Span, tok: Token) -> Option { - let mut toks = self.retokenise_span(span); - let mut prev = toks.real_token(); - loop { - if prev.tok == token::Eof { - return None; - } - let next = toks.real_token(); - if next.tok == tok { - return self.make_sub_span(span, Some(prev.sp)); - } - prev = next; - } - } - - pub fn sub_span_of_token(&self, span: Span, tok: Token) -> Option { - let mut toks = self.retokenise_span(span); - loop { - let next = toks.real_token(); - if next.tok == token::Eof { - return None; - } - if next.tok == tok { - return self.make_sub_span(span, Some(next.sp)); - } - } - } - - pub fn sub_span_after_keyword(&self, span: Span, keyword: keywords::Keyword) -> Option { - self.sub_span_after(span, |t| t.is_keyword(keyword)) - } - - pub fn sub_span_after_token(&self, span: Span, tok: Token) -> Option { - self.sub_span_after(span, |t| t == tok) - } - - fn sub_span_after bool>(&self, span: Span, f: F) -> Option { - let mut toks = self.retokenise_span(span); - loop { - let ts = toks.real_token(); - if ts.tok == token::Eof { - return None; - } - if f(ts.tok) { - let ts = toks.real_token(); - if ts.tok == token::Eof { - return None - } else { - return self.make_sub_span(span, Some(ts.sp)); - } - } - } - } - - - // Returns a list of the spans of idents in a path. - // E.g., For foo::bar::baz, we return [foo, bar, baz] (well, their spans) - pub fn spans_for_path_segments(&self, path: &ast::Path) -> Vec { - if generated_code(path.span) { - return vec!(); - } - - self.spans_with_brackets(path.span, 0, -1) - } - - // Return an owned vector of the subspans of the param identifier - // tokens found in span. - pub fn spans_for_ty_params(&self, span: Span, number: isize) -> Vec { - if generated_code(span) { - return vec!(); - } - // Type params are nested within one level of brackets: - // i.e. we want Vec from Foo> - self.spans_with_brackets(span, 1, number) - } - - pub fn report_span_err(&self, kind: &str, span: Span) { - let loc = self.sess.codemap().lookup_char_pos(span.lo); - info!("({}) Could not find sub_span in `{}` in {}, line {}", - kind, - self.snippet(span), - loc.file.name, - loc.line); - self.err_count.set(self.err_count.get() + 1); - if self.err_count.get() > 1000 { - self.sess.bug("span errors reached 1000, giving up"); - } - } -} diff --git a/src/librustc_trans/symbol_map.rs b/src/librustc_trans/symbol_map.rs new file mode 100644 index 0000000000000..c3e0ac1fee515 --- /dev/null +++ b/src/librustc_trans/symbol_map.rs @@ -0,0 +1,128 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use context::SharedCrateContext; +use monomorphize::Instance; +use rustc::ty::TyCtxt; +use std::borrow::Cow; +use syntax::codemap::Span; +use trans_item::TransItem; +use util::nodemap::FxHashMap; + +// In the SymbolMap we collect the symbol names of all translation items of +// the current crate. This map exists as a performance optimization. Symbol +// names of translation items are deterministic and fully defined by the item. +// Thus they could also always be recomputed if needed. + +pub struct SymbolMap<'tcx> { + index: FxHashMap, (usize, usize)>, + arena: String, +} + +impl<'tcx> SymbolMap<'tcx> { + + pub fn build<'a, I>(scx: &SharedCrateContext<'a, 'tcx>, + trans_items: I) + -> SymbolMap<'tcx> + where I: Iterator> + { + // Check for duplicate symbol names + let mut symbols: Vec<_> = trans_items.map(|trans_item| { + (trans_item, trans_item.compute_symbol_name(scx)) + }).collect(); + + (&mut symbols[..]).sort_by(|&(_, ref sym1), &(_, ref sym2)|{ + sym1.cmp(sym2) + }); + + for pair in (&symbols[..]).windows(2) { + let sym1 = &pair[0].1; + let sym2 = &pair[1].1; + + if *sym1 == *sym2 { + let trans_item1 = pair[0].0; + let trans_item2 = pair[1].0; + + let span1 = get_span(scx.tcx(), trans_item1); + let span2 = get_span(scx.tcx(), trans_item2); + + // Deterministically select one of the spans for error reporting + let span = match (span1, span2) { + (Some(span1), Some(span2)) => { + Some(if span1.lo.0 > span2.lo.0 { + span1 + } else { + span2 + }) + } + (Some(span), None) | + (None, Some(span)) => Some(span), + _ => None + }; + + let error_message = format!("symbol `{}` is already defined", sym1); + + if let Some(span) = span { + scx.sess().span_fatal(span, &error_message) + } else { + scx.sess().fatal(&error_message) + } + } + } + + let mut symbol_map = SymbolMap { + index: FxHashMap(), + arena: String::with_capacity(1024), + }; + + for (trans_item, symbol) in symbols { + let start_index = symbol_map.arena.len(); + symbol_map.arena.push_str(&symbol[..]); + let end_index = symbol_map.arena.len(); + let prev_entry = symbol_map.index.insert(trans_item, + (start_index, end_index)); + if prev_entry.is_some() { + bug!("TransItem encountered twice?") + } + } + + fn get_span<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + trans_item: TransItem<'tcx>) -> Option { + match trans_item { + TransItem::Fn(Instance { def, .. }) => { + tcx.map.as_local_node_id(def) + } + TransItem::Static(node_id) => Some(node_id), + TransItem::DropGlue(_) => None, + }.map(|node_id| { + tcx.map.span(node_id) + }) + } + + symbol_map + } + + pub fn get(&self, trans_item: TransItem<'tcx>) -> Option<&str> { + self.index.get(&trans_item).map(|&(start_index, end_index)| { + &self.arena[start_index .. end_index] + }) + } + + pub fn get_or_compute<'map, 'scx>(&'map self, + scx: &SharedCrateContext<'scx, 'tcx>, + trans_item: TransItem<'tcx>) + -> Cow<'map, str> { + if let Some(sym) = self.get(trans_item) { + Cow::from(sym) + } else { + Cow::from(trans_item.compute_symbol_name(scx)) + } + } +} diff --git a/src/librustc_trans/symbol_names_test.rs b/src/librustc_trans/symbol_names_test.rs new file mode 100644 index 0000000000000..9ed5a5d148cd6 --- /dev/null +++ b/src/librustc_trans/symbol_names_test.rs @@ -0,0 +1,89 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Walks the crate looking for items/impl-items/trait-items that have +//! either a `rustc_symbol_name` or `rustc_item_path` attribute and +//! generates an error giving, respectively, the symbol name or +//! item-path. This is used for unit testing the code that generates +//! paths etc in all kinds of annoying scenarios. + +use rustc::hir; +use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; +use syntax::ast; + +use common::SharedCrateContext; +use monomorphize::Instance; + +const SYMBOL_NAME: &'static str = "rustc_symbol_name"; +const ITEM_PATH: &'static str = "rustc_item_path"; + +pub fn report_symbol_names(scx: &SharedCrateContext) { + // if the `rustc_attrs` feature is not enabled, then the + // attributes we are interested in cannot be present anyway, so + // skip the walk. + let tcx = scx.tcx(); + if !tcx.sess.features.borrow().rustc_attrs { + return; + } + + let _ignore = tcx.dep_graph.in_ignore(); + let mut visitor = SymbolNamesTest { scx: scx }; + // FIXME(#37712) could use ItemLikeVisitor if trait items were item-like + tcx.map.krate().visit_all_item_likes(&mut visitor.as_deep_visitor()); +} + +struct SymbolNamesTest<'a, 'tcx:'a> { + scx: &'a SharedCrateContext<'a, 'tcx>, +} + +impl<'a, 'tcx> SymbolNamesTest<'a, 'tcx> { + fn process_attrs(&mut self, + node_id: ast::NodeId) { + let tcx = self.scx.tcx(); + let def_id = tcx.map.local_def_id(node_id); + for attr in tcx.get_attrs(def_id).iter() { + if attr.check_name(SYMBOL_NAME) { + // for now, can only use on monomorphic names + let instance = Instance::mono(self.scx, def_id); + let name = instance.symbol_name(self.scx); + tcx.sess.span_err(attr.span, &format!("symbol-name({})", name)); + } else if attr.check_name(ITEM_PATH) { + let path = tcx.item_path_str(def_id); + tcx.sess.span_err(attr.span, &format!("item-path({})", path)); + } + + // (*) The formatting of `tag({})` is chosen so that tests can elect + // to test the entirety of the string, if they choose, or else just + // some subset. + } + } +} + +impl<'a, 'tcx> Visitor<'tcx> for SymbolNamesTest<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::None + } + + fn visit_item(&mut self, item: &'tcx hir::Item) { + self.process_attrs(item.id); + intravisit::walk_item(self, item); + } + + fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem) { + self.process_attrs(ti.id); + intravisit::walk_trait_item(self, ti) + } + + fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem) { + self.process_attrs(ii.id); + intravisit::walk_impl_item(self, ii) + } +} + diff --git a/src/librustc_trans/trans/_match.rs b/src/librustc_trans/trans/_match.rs deleted file mode 100644 index 6c1a31738afb9..0000000000000 --- a/src/librustc_trans/trans/_match.rs +++ /dev/null @@ -1,2019 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! # Compilation of match statements -//! -//! I will endeavor to explain the code as best I can. I have only a loose -//! understanding of some parts of it. -//! -//! ## Matching -//! -//! The basic state of the code is maintained in an array `m` of `Match` -//! objects. Each `Match` describes some list of patterns, all of which must -//! match against the current list of values. If those patterns match, then -//! the arm listed in the match is the correct arm. A given arm may have -//! multiple corresponding match entries, one for each alternative that -//! remains. As we proceed these sets of matches are adjusted by the various -//! `enter_XXX()` functions, each of which adjusts the set of options given -//! some information about the value which has been matched. -//! -//! So, initially, there is one value and N matches, each of which have one -//! constituent pattern. N here is usually the number of arms but may be -//! greater, if some arms have multiple alternatives. For example, here: -//! -//! enum Foo { A, B(int), C(usize, usize) } -//! match foo { -//! A => ..., -//! B(x) => ..., -//! C(1, 2) => ..., -//! C(_) => ... -//! } -//! -//! The value would be `foo`. There would be four matches, each of which -//! contains one pattern (and, in one case, a guard). We could collect the -//! various options and then compile the code for the case where `foo` is an -//! `A`, a `B`, and a `C`. When we generate the code for `C`, we would (1) -//! drop the two matches that do not match a `C` and (2) expand the other two -//! into two patterns each. In the first case, the two patterns would be `1` -//! and `2`, and the in the second case the _ pattern would be expanded into -//! `_` and `_`. The two values are of course the arguments to `C`. -//! -//! Here is a quick guide to the various functions: -//! -//! - `compile_submatch()`: The main workhouse. It takes a list of values and -//! a list of matches and finds the various possibilities that could occur. -//! -//! - `enter_XXX()`: modifies the list of matches based on some information -//! about the value that has been matched. For example, -//! `enter_rec_or_struct()` adjusts the values given that a record or struct -//! has been matched. This is an infallible pattern, so *all* of the matches -//! must be either wildcards or record/struct patterns. `enter_opt()` -//! handles the fallible cases, and it is correspondingly more complex. -//! -//! ## Bindings -//! -//! We store information about the bound variables for each arm as part of the -//! per-arm `ArmData` struct. There is a mapping from identifiers to -//! `BindingInfo` structs. These structs contain the mode/id/type of the -//! binding, but they also contain an LLVM value which points at an alloca -//! called `llmatch`. For by value bindings that are Copy, we also create -//! an extra alloca that we copy the matched value to so that any changes -//! we do to our copy is not reflected in the original and vice-versa. -//! We don't do this if it's a move since the original value can't be used -//! and thus allowing us to cheat in not creating an extra alloca. -//! -//! The `llmatch` binding always stores a pointer into the value being matched -//! which points at the data for the binding. If the value being matched has -//! type `T`, then, `llmatch` will point at an alloca of type `T*` (and hence -//! `llmatch` has type `T**`). So, if you have a pattern like: -//! -//! let a: A = ...; -//! let b: B = ...; -//! match (a, b) { (ref c, d) => { ... } } -//! -//! For `c` and `d`, we would generate allocas of type `C*` and `D*` -//! respectively. These are called the `llmatch`. As we match, when we come -//! up against an identifier, we store the current pointer into the -//! corresponding alloca. -//! -//! Once a pattern is completely matched, and assuming that there is no guard -//! pattern, we will branch to a block that leads to the body itself. For any -//! by-value bindings, this block will first load the ptr from `llmatch` (the -//! one of type `D*`) and then load a second time to get the actual value (the -//! one of type `D`). For by ref bindings, the value of the local variable is -//! simply the first alloca. -//! -//! So, for the example above, we would generate a setup kind of like this: -//! -//! +-------+ -//! | Entry | -//! +-------+ -//! | -//! +--------------------------------------------+ -//! | llmatch_c = (addr of first half of tuple) | -//! | llmatch_d = (addr of second half of tuple) | -//! +--------------------------------------------+ -//! | -//! +--------------------------------------+ -//! | *llbinding_d = **llmatch_d | -//! +--------------------------------------+ -//! -//! If there is a guard, the situation is slightly different, because we must -//! execute the guard code. Moreover, we need to do so once for each of the -//! alternatives that lead to the arm, because if the guard fails, they may -//! have different points from which to continue the search. Therefore, in that -//! case, we generate code that looks more like: -//! -//! +-------+ -//! | Entry | -//! +-------+ -//! | -//! +-------------------------------------------+ -//! | llmatch_c = (addr of first half of tuple) | -//! | llmatch_d = (addr of first half of tuple) | -//! +-------------------------------------------+ -//! | -//! +-------------------------------------------------+ -//! | *llbinding_d = **llmatch_d | -//! | check condition | -//! | if false { goto next case } | -//! | if true { goto body } | -//! +-------------------------------------------------+ -//! -//! The handling for the cleanups is a bit... sensitive. Basically, the body -//! is the one that invokes `add_clean()` for each binding. During the guard -//! evaluation, we add temporary cleanups and revoke them after the guard is -//! evaluated (it could fail, after all). Note that guards and moves are -//! just plain incompatible. -//! -//! Some relevant helper functions that manage bindings: -//! - `create_bindings_map()` -//! - `insert_lllocals()` -//! -//! -//! ## Notes on vector pattern matching. -//! -//! Vector pattern matching is surprisingly tricky. The problem is that -//! the structure of the vector isn't fully known, and slice matches -//! can be done on subparts of it. -//! -//! The way that vector pattern matches are dealt with, then, is as -//! follows. First, we make the actual condition associated with a -//! vector pattern simply a vector length comparison. So the pattern -//! [1, .. x] gets the condition "vec len >= 1", and the pattern -//! [.. x] gets the condition "vec len >= 0". The problem here is that -//! having the condition "vec len >= 1" hold clearly does not mean that -//! only a pattern that has exactly that condition will match. This -//! means that it may well be the case that a condition holds, but none -//! of the patterns matching that condition match; to deal with this, -//! when doing vector length matches, we have match failures proceed to -//! the next condition to check. -//! -//! There are a couple more subtleties to deal with. While the "actual" -//! condition associated with vector length tests is simply a test on -//! the vector length, the actual vec_len Opt entry contains more -//! information used to restrict which matches are associated with it. -//! So that all matches in a submatch are matching against the same -//! values from inside the vector, they are split up by how many -//! elements they match at the front and at the back of the vector. In -//! order to make sure that arms are properly checked in order, even -//! with the overmatching conditions, each vec_len Opt entry is -//! associated with a range of matches. -//! Consider the following: -//! -//! match &[1, 2, 3] { -//! [1, 1, .. _] => 0, -//! [1, 2, 2, .. _] => 1, -//! [1, 2, 3, .. _] => 2, -//! [1, 2, .. _] => 3, -//! _ => 4 -//! } -//! The proper arm to match is arm 2, but arms 0 and 3 both have the -//! condition "len >= 2". If arm 3 was lumped in with arm 0, then the -//! wrong branch would be taken. Instead, vec_len Opts are associated -//! with a contiguous range of matches that have the same "shape". -//! This is sort of ugly and requires a bunch of special handling of -//! vec_len options. - -pub use self::BranchKind::*; -pub use self::OptResult::*; -pub use self::TransBindingMode::*; -use self::Opt::*; -use self::FailureHandler::*; - -use llvm::{ValueRef, BasicBlockRef}; -use middle::check_match::StaticInliner; -use middle::check_match; -use middle::const_eval; -use middle::def::{self, DefMap}; -use middle::def_id::DefId; -use middle::expr_use_visitor as euv; -use middle::infer; -use middle::lang_items::StrEqFnLangItem; -use middle::mem_categorization as mc; -use middle::mem_categorization::Categorization; -use middle::pat_util::*; -use trans::adt; -use trans::base::*; -use trans::build::{AddCase, And, Br, CondBr, GEPi, InBoundsGEP, Load, PointerCast}; -use trans::build::{Not, Store, Sub, add_comment}; -use trans::build; -use trans::callee; -use trans::cleanup::{self, CleanupMethods, DropHintMethods}; -use trans::common::*; -use trans::consts; -use trans::datum::*; -use trans::debuginfo::{self, DebugLoc, ToDebugLoc}; -use trans::expr::{self, Dest}; -use trans::monomorphize; -use trans::tvec; -use trans::type_of; -use trans::Disr; -use middle::ty::{self, Ty}; -use session::config::NoDebugInfo; -use util::common::indenter; -use util::nodemap::FnvHashMap; -use util::ppaux; - -use std; -use std::cell::RefCell; -use std::cmp::Ordering; -use std::fmt; -use std::rc::Rc; -use rustc_front::hir; -use syntax::ast::{self, DUMMY_NODE_ID, NodeId}; -use syntax::codemap::Span; -use rustc_front::fold::Folder; -use syntax::ptr::P; - -#[derive(Copy, Clone, Debug)] -struct ConstantExpr<'a>(&'a hir::Expr); - -impl<'a> ConstantExpr<'a> { - fn eq(self, other: ConstantExpr<'a>, tcx: &ty::ctxt) -> bool { - match const_eval::compare_lit_exprs(tcx, self.0, other.0) { - Some(result) => result == Ordering::Equal, - None => panic!("compare_list_exprs: type mismatch"), - } - } -} - -// An option identifying a branch (either a literal, an enum variant or a range) -#[derive(Debug)] -enum Opt<'a, 'tcx> { - ConstantValue(ConstantExpr<'a>, DebugLoc), - ConstantRange(ConstantExpr<'a>, ConstantExpr<'a>, DebugLoc), - Variant(Disr, Rc>, DefId, DebugLoc), - SliceLengthEqual(usize, DebugLoc), - SliceLengthGreaterOrEqual(/* prefix length */ usize, - /* suffix length */ usize, - DebugLoc), -} - -impl<'a, 'tcx> Opt<'a, 'tcx> { - fn eq(&self, other: &Opt<'a, 'tcx>, tcx: &ty::ctxt<'tcx>) -> bool { - match (self, other) { - (&ConstantValue(a, _), &ConstantValue(b, _)) => a.eq(b, tcx), - (&ConstantRange(a1, a2, _), &ConstantRange(b1, b2, _)) => { - a1.eq(b1, tcx) && a2.eq(b2, tcx) - } - (&Variant(a_disr, ref a_repr, a_def, _), - &Variant(b_disr, ref b_repr, b_def, _)) => { - a_disr == b_disr && *a_repr == *b_repr && a_def == b_def - } - (&SliceLengthEqual(a, _), &SliceLengthEqual(b, _)) => a == b, - (&SliceLengthGreaterOrEqual(a1, a2, _), - &SliceLengthGreaterOrEqual(b1, b2, _)) => { - a1 == b1 && a2 == b2 - } - _ => false - } - } - - fn trans<'blk>(&self, mut bcx: Block<'blk, 'tcx>) -> OptResult<'blk, 'tcx> { - use trans::consts::TrueConst::Yes; - let _icx = push_ctxt("match::trans_opt"); - let ccx = bcx.ccx(); - match *self { - ConstantValue(ConstantExpr(lit_expr), _) => { - let lit_ty = bcx.tcx().node_id_to_type(lit_expr.id); - let expr = consts::const_expr(ccx, &*lit_expr, bcx.fcx.param_substs, None, Yes); - let llval = match expr { - Ok((llval, _)) => llval, - Err(err) => bcx.ccx().sess().span_fatal(lit_expr.span, &err.description()), - }; - let lit_datum = immediate_rvalue(llval, lit_ty); - let lit_datum = unpack_datum!(bcx, lit_datum.to_appropriate_datum(bcx)); - SingleResult(Result::new(bcx, lit_datum.val)) - } - ConstantRange(ConstantExpr(ref l1), ConstantExpr(ref l2), _) => { - let l1 = match consts::const_expr(ccx, &**l1, bcx.fcx.param_substs, None, Yes) { - Ok((l1, _)) => l1, - Err(err) => bcx.ccx().sess().span_fatal(l1.span, &err.description()), - }; - let l2 = match consts::const_expr(ccx, &**l2, bcx.fcx.param_substs, None, Yes) { - Ok((l2, _)) => l2, - Err(err) => bcx.ccx().sess().span_fatal(l2.span, &err.description()), - }; - RangeResult(Result::new(bcx, l1), Result::new(bcx, l2)) - } - Variant(disr_val, ref repr, _, _) => { - SingleResult(Result::new(bcx, adt::trans_case(bcx, &**repr, disr_val))) - } - SliceLengthEqual(length, _) => { - SingleResult(Result::new(bcx, C_uint(ccx, length))) - } - SliceLengthGreaterOrEqual(prefix, suffix, _) => { - LowerBound(Result::new(bcx, C_uint(ccx, prefix + suffix))) - } - } - } - - fn debug_loc(&self) -> DebugLoc { - match *self { - ConstantValue(_,debug_loc) | - ConstantRange(_, _, debug_loc) | - Variant(_, _, _, debug_loc) | - SliceLengthEqual(_, debug_loc) | - SliceLengthGreaterOrEqual(_, _, debug_loc) => debug_loc - } - } -} - -#[derive(Copy, Clone, PartialEq)] -pub enum BranchKind { - NoBranch, - Single, - Switch, - Compare, - CompareSliceLength -} - -pub enum OptResult<'blk, 'tcx: 'blk> { - SingleResult(Result<'blk, 'tcx>), - RangeResult(Result<'blk, 'tcx>, Result<'blk, 'tcx>), - LowerBound(Result<'blk, 'tcx>) -} - -#[derive(Clone, Copy, PartialEq)] -pub enum TransBindingMode { - /// By-value binding for a copy type: copies from matched data - /// into a fresh LLVM alloca. - TrByCopy(/* llbinding */ ValueRef), - - /// By-value binding for a non-copy type where we copy into a - /// fresh LLVM alloca; this most accurately reflects the language - /// semantics (e.g. it properly handles overwrites of the matched - /// input), but potentially injects an unwanted copy. - TrByMoveIntoCopy(/* llbinding */ ValueRef), - - /// Binding a non-copy type by reference under the hood; this is - /// a codegen optimization to avoid unnecessary memory traffic. - TrByMoveRef, - - /// By-ref binding exposed in the original source input. - TrByRef, -} - -impl TransBindingMode { - /// if binding by making a fresh copy; returns the alloca that it - /// will copy into; otherwise None. - fn alloca_if_copy(&self) -> Option { - match *self { - TrByCopy(llbinding) | TrByMoveIntoCopy(llbinding) => Some(llbinding), - TrByMoveRef | TrByRef => None, - } - } -} - -/// Information about a pattern binding: -/// - `llmatch` is a pointer to a stack slot. The stack slot contains a -/// pointer into the value being matched. Hence, llmatch has type `T**` -/// where `T` is the value being matched. -/// - `trmode` is the trans binding mode -/// - `id` is the node id of the binding -/// - `ty` is the Rust type of the binding -#[derive(Clone, Copy)] -pub struct BindingInfo<'tcx> { - pub llmatch: ValueRef, - pub trmode: TransBindingMode, - pub id: ast::NodeId, - pub span: Span, - pub ty: Ty<'tcx>, -} - -type BindingsMap<'tcx> = FnvHashMap>; - -struct ArmData<'p, 'blk, 'tcx: 'blk> { - bodycx: Block<'blk, 'tcx>, - arm: &'p hir::Arm, - bindings_map: BindingsMap<'tcx> -} - -/// Info about Match. -/// If all `pats` are matched then arm `data` will be executed. -/// As we proceed `bound_ptrs` are filled with pointers to values to be bound, -/// these pointers are stored in llmatch variables just before executing `data` arm. -struct Match<'a, 'p: 'a, 'blk: 'a, 'tcx: 'blk> { - pats: Vec<&'p hir::Pat>, - data: &'a ArmData<'p, 'blk, 'tcx>, - bound_ptrs: Vec<(ast::Name, ValueRef)>, - // Thread along renamings done by the check_match::StaticInliner, so we can - // map back to original NodeIds - pat_renaming_map: Option<&'a FnvHashMap<(NodeId, Span), NodeId>> -} - -impl<'a, 'p, 'blk, 'tcx> fmt::Debug for Match<'a, 'p, 'blk, 'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if ppaux::verbose() { - // for many programs, this just take too long to serialize - write!(f, "{:?}", self.pats) - } else { - write!(f, "{} pats", self.pats.len()) - } - } -} - -fn has_nested_bindings(m: &[Match], col: usize) -> bool { - for br in m { - match br.pats[col].node { - hir::PatIdent(_, _, Some(_)) => return true, - _ => () - } - } - return false; -} - -// As noted in `fn match_datum`, we should eventually pass around a -// `Datum` for the `val`; but until we get to that point, this -// `MatchInput` struct will serve -- it has everything `Datum` -// does except for the type field. -#[derive(Copy, Clone)] -pub struct MatchInput { val: ValueRef, lval: Lvalue } - -impl<'tcx> Datum<'tcx, Lvalue> { - pub fn match_input(&self) -> MatchInput { - MatchInput { - val: self.val, - lval: self.kind, - } - } -} - -impl MatchInput { - fn from_val(val: ValueRef) -> MatchInput { - MatchInput { - val: val, - lval: Lvalue::new("MatchInput::from_val"), - } - } - - fn to_datum<'tcx>(self, ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> { - Datum::new(self.val, ty, self.lval) - } -} - -fn expand_nested_bindings<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - col: usize, - val: MatchInput) - -> Vec> { - debug!("expand_nested_bindings(bcx={}, m={:?}, col={}, val={})", - bcx.to_str(), - m, - col, - bcx.val_to_string(val.val)); - let _indenter = indenter(); - - m.iter().map(|br| { - let mut bound_ptrs = br.bound_ptrs.clone(); - let mut pat = br.pats[col]; - loop { - pat = match pat.node { - hir::PatIdent(_, ref path, Some(ref inner)) => { - bound_ptrs.push((path.node.name, val.val)); - &**inner - }, - _ => break - } - } - - let mut pats = br.pats.clone(); - pats[col] = pat; - Match { - pats: pats, - data: &*br.data, - bound_ptrs: bound_ptrs, - pat_renaming_map: br.pat_renaming_map, - } - }).collect() -} - -fn enter_match<'a, 'b, 'p, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - dm: &RefCell, - m: &[Match<'a, 'p, 'blk, 'tcx>], - col: usize, - val: MatchInput, - mut e: F) - -> Vec> where - F: FnMut(&[&'p hir::Pat]) -> Option>, -{ - debug!("enter_match(bcx={}, m={:?}, col={}, val={})", - bcx.to_str(), - m, - col, - bcx.val_to_string(val.val)); - let _indenter = indenter(); - - m.iter().filter_map(|br| { - e(&br.pats).map(|pats| { - let this = br.pats[col]; - let mut bound_ptrs = br.bound_ptrs.clone(); - match this.node { - hir::PatIdent(_, ref path, None) => { - if pat_is_binding(&dm.borrow(), &*this) { - bound_ptrs.push((path.node.name, val.val)); - } - } - hir::PatVec(ref before, Some(ref slice), ref after) => { - if let hir::PatIdent(_, ref path, None) = slice.node { - let subslice_val = bind_subslice_pat( - bcx, this.id, val, - before.len(), after.len()); - bound_ptrs.push((path.node.name, subslice_val)); - } - } - _ => {} - } - Match { - pats: pats, - data: br.data, - bound_ptrs: bound_ptrs, - pat_renaming_map: br.pat_renaming_map, - } - }) - }).collect() -} - -fn enter_default<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - dm: &RefCell, - m: &[Match<'a, 'p, 'blk, 'tcx>], - col: usize, - val: MatchInput) - -> Vec> { - debug!("enter_default(bcx={}, m={:?}, col={}, val={})", - bcx.to_str(), - m, - col, - bcx.val_to_string(val.val)); - let _indenter = indenter(); - - // Collect all of the matches that can match against anything. - enter_match(bcx, dm, m, col, val, |pats| { - if pat_is_binding_or_wild(&dm.borrow(), &*pats[col]) { - let mut r = pats[..col].to_vec(); - r.extend_from_slice(&pats[col + 1..]); - Some(r) - } else { - None - } - }) -} - -// nmatsakis: what does enter_opt do? -// in trans/match -// trans/match.rs is like stumbling around in a dark cave -// pcwalton: the enter family of functions adjust the set of -// patterns as needed -// yeah, at some point I kind of achieved some level of -// understanding -// anyhow, they adjust the patterns given that something of that -// kind has been found -// pcwalton: ok, right, so enter_XXX() adjusts the patterns, as I -// said -// enter_match() kind of embodies the generic code -// it is provided with a function that tests each pattern to see -// if it might possibly apply and so forth -// so, if you have a pattern like {a: _, b: _, _} and one like _ -// then _ would be expanded to (_, _) -// one spot for each of the sub-patterns -// enter_opt() is one of the more complex; it covers the fallible -// cases -// enter_rec_or_struct() or enter_tuple() are simpler, since they -// are infallible patterns -// so all patterns must either be records (resp. tuples) or -// wildcards - -/// The above is now outdated in that enter_match() now takes a function that -/// takes the complete row of patterns rather than just the first one. -/// Also, most of the enter_() family functions have been unified with -/// the check_match specialization step. -fn enter_opt<'a, 'p, 'blk, 'tcx>( - bcx: Block<'blk, 'tcx>, - _: ast::NodeId, - dm: &RefCell, - m: &[Match<'a, 'p, 'blk, 'tcx>], - opt: &Opt, - col: usize, - variant_size: usize, - val: MatchInput) - -> Vec> { - debug!("enter_opt(bcx={}, m={:?}, opt={:?}, col={}, val={})", - bcx.to_str(), - m, - *opt, - col, - bcx.val_to_string(val.val)); - let _indenter = indenter(); - - let ctor = match opt { - &ConstantValue(ConstantExpr(expr), _) => check_match::ConstantValue( - const_eval::eval_const_expr(bcx.tcx(), &*expr) - ), - &ConstantRange(ConstantExpr(lo), ConstantExpr(hi), _) => check_match::ConstantRange( - const_eval::eval_const_expr(bcx.tcx(), &*lo), - const_eval::eval_const_expr(bcx.tcx(), &*hi) - ), - &SliceLengthEqual(n, _) => - check_match::Slice(n), - &SliceLengthGreaterOrEqual(before, after, _) => - check_match::SliceWithSubslice(before, after), - &Variant(_, _, def_id, _) => - check_match::Constructor::Variant(def_id) - }; - - let param_env = bcx.tcx().empty_parameter_environment(); - let mcx = check_match::MatchCheckCtxt { - tcx: bcx.tcx(), - param_env: param_env, - }; - enter_match(bcx, dm, m, col, val, |pats| - check_match::specialize(&mcx, &pats[..], &ctor, col, variant_size) - ) -} - -// Returns the options in one column of matches. An option is something that -// needs to be conditionally matched at runtime; for example, the discriminant -// on a set of enum variants or a literal. -fn get_branches<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - col: usize) - -> Vec> { - let tcx = bcx.tcx(); - - let mut found: Vec = vec![]; - for br in m { - let cur = br.pats[col]; - let debug_loc = match br.pat_renaming_map { - Some(pat_renaming_map) => { - match pat_renaming_map.get(&(cur.id, cur.span)) { - Some(&id) => DebugLoc::At(id, cur.span), - None => DebugLoc::At(cur.id, cur.span), - } - } - None => DebugLoc::None - }; - - let opt = match cur.node { - hir::PatLit(ref l) => { - ConstantValue(ConstantExpr(&**l), debug_loc) - } - hir::PatIdent(..) | hir::PatEnum(..) | hir::PatStruct(..) => { - // This is either an enum variant or a variable binding. - let opt_def = tcx.def_map.borrow().get(&cur.id).map(|d| d.full_def()); - match opt_def { - Some(def::DefVariant(enum_id, var_id, _)) => { - let variant = tcx.lookup_adt_def(enum_id).variant_with_id(var_id); - Variant(Disr::from(variant.disr_val), - adt::represent_node(bcx, cur.id), - var_id, - debug_loc) - } - _ => continue - } - } - hir::PatRange(ref l1, ref l2) => { - ConstantRange(ConstantExpr(&**l1), ConstantExpr(&**l2), debug_loc) - } - hir::PatVec(ref before, None, ref after) => { - SliceLengthEqual(before.len() + after.len(), debug_loc) - } - hir::PatVec(ref before, Some(_), ref after) => { - SliceLengthGreaterOrEqual(before.len(), after.len(), debug_loc) - } - _ => continue - }; - - if !found.iter().any(|x| x.eq(&opt, tcx)) { - found.push(opt); - } - } - found -} - -struct ExtractedBlock<'blk, 'tcx: 'blk> { - vals: Vec, - bcx: Block<'blk, 'tcx>, -} - -fn extract_variant_args<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - repr: &adt::Repr<'tcx>, - disr_val: Disr, - val: MatchInput) - -> ExtractedBlock<'blk, 'tcx> { - let _icx = push_ctxt("match::extract_variant_args"); - // Assume enums are always sized for now. - let val = adt::MaybeSizedValue::sized(val.val); - let args = (0..adt::num_args(repr, disr_val)).map(|i| { - adt::trans_field_ptr(bcx, repr, val, disr_val, i) - }).collect(); - - ExtractedBlock { vals: args, bcx: bcx } -} - -/// Helper for converting from the ValueRef that we pass around in the match code, which is always -/// an lvalue, into a Datum. Eventually we should just pass around a Datum and be done with it. -fn match_datum<'tcx>(val: MatchInput, left_ty: Ty<'tcx>) -> Datum<'tcx, Lvalue> { - val.to_datum(left_ty) -} - -fn bind_subslice_pat(bcx: Block, - pat_id: ast::NodeId, - val: MatchInput, - offset_left: usize, - offset_right: usize) -> ValueRef { - let _icx = push_ctxt("match::bind_subslice_pat"); - let vec_ty = node_id_type(bcx, pat_id); - let vec_ty_contents = match vec_ty.sty { - ty::TyBox(ty) => ty, - ty::TyRef(_, mt) | ty::TyRawPtr(mt) => mt.ty, - _ => vec_ty - }; - let unit_ty = vec_ty_contents.sequence_element_type(bcx.tcx()); - let vec_datum = match_datum(val, vec_ty); - let (base, len) = vec_datum.get_vec_base_and_len(bcx); - - let slice_begin = InBoundsGEP(bcx, base, &[C_uint(bcx.ccx(), offset_left)]); - let slice_len_offset = C_uint(bcx.ccx(), offset_left + offset_right); - let slice_len = Sub(bcx, len, slice_len_offset, DebugLoc::None); - let slice_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic), - bcx.tcx().mk_slice(unit_ty)); - let scratch = rvalue_scratch_datum(bcx, slice_ty, ""); - Store(bcx, slice_begin, expr::get_dataptr(bcx, scratch.val)); - Store(bcx, slice_len, expr::get_meta(bcx, scratch.val)); - scratch.val -} - -fn extract_vec_elems<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - left_ty: Ty<'tcx>, - before: usize, - after: usize, - val: MatchInput) - -> ExtractedBlock<'blk, 'tcx> { - let _icx = push_ctxt("match::extract_vec_elems"); - let vec_datum = match_datum(val, left_ty); - let (base, len) = vec_datum.get_vec_base_and_len(bcx); - let mut elems = vec![]; - elems.extend((0..before).map(|i| GEPi(bcx, base, &[i]))); - elems.extend((0..after).rev().map(|i| { - InBoundsGEP(bcx, base, &[ - Sub(bcx, len, C_uint(bcx.ccx(), i + 1), DebugLoc::None) - ]) - })); - ExtractedBlock { vals: elems, bcx: bcx } -} - -// Macro for deciding whether any of the remaining matches fit a given kind of -// pattern. Note that, because the macro is well-typed, either ALL of the -// matches should fit that sort of pattern or NONE (however, some of the -// matches may be wildcards like _ or identifiers). -macro_rules! any_pat { - ($m:expr, $col:expr, $pattern:pat) => ( - ($m).iter().any(|br| { - match br.pats[$col].node { - $pattern => true, - _ => false - } - }) - ) -} - -fn any_uniq_pat(m: &[Match], col: usize) -> bool { - any_pat!(m, col, hir::PatBox(_)) -} - -fn any_region_pat(m: &[Match], col: usize) -> bool { - any_pat!(m, col, hir::PatRegion(..)) -} - -fn any_irrefutable_adt_pat(tcx: &ty::ctxt, m: &[Match], col: usize) -> bool { - m.iter().any(|br| { - let pat = br.pats[col]; - match pat.node { - hir::PatTup(_) => true, - hir::PatStruct(..) => { - match tcx.def_map.borrow().get(&pat.id).map(|d| d.full_def()) { - Some(def::DefVariant(..)) => false, - _ => true, - } - } - hir::PatEnum(..) | hir::PatIdent(_, _, None) => { - match tcx.def_map.borrow().get(&pat.id).map(|d| d.full_def()) { - Some(def::DefStruct(..)) => true, - _ => false - } - } - _ => false - } - }) -} - -/// What to do when the pattern match fails. -enum FailureHandler { - Infallible, - JumpToBasicBlock(BasicBlockRef), - Unreachable -} - -impl FailureHandler { - fn is_fallible(&self) -> bool { - match *self { - Infallible => false, - _ => true - } - } - - fn is_infallible(&self) -> bool { - !self.is_fallible() - } - - fn handle_fail(&self, bcx: Block) { - match *self { - Infallible => - panic!("attempted to panic in a non-panicking panic handler!"), - JumpToBasicBlock(basic_block) => - Br(bcx, basic_block, DebugLoc::None), - Unreachable => - build::Unreachable(bcx) - } - } -} - -fn pick_column_to_specialize(def_map: &RefCell, m: &[Match]) -> Option { - fn pat_score(def_map: &RefCell, pat: &hir::Pat) -> usize { - match pat.node { - hir::PatIdent(_, _, Some(ref inner)) => pat_score(def_map, &**inner), - _ if pat_is_refutable(&def_map.borrow(), pat) => 1, - _ => 0 - } - } - - let column_score = |m: &[Match], col: usize| -> usize { - let total_score = m.iter() - .map(|row| row.pats[col]) - .map(|pat| pat_score(def_map, pat)) - .sum(); - - // Irrefutable columns always go first, they'd only be duplicated in the branches. - if total_score == 0 { - std::usize::MAX - } else { - total_score - } - }; - - let column_contains_any_nonwild_patterns = |&col: &usize| -> bool { - m.iter().any(|row| match row.pats[col].node { - hir::PatWild => false, - _ => true - }) - }; - - (0..m[0].pats.len()) - .filter(column_contains_any_nonwild_patterns) - .map(|col| (col, column_score(m, col))) - .max_by_key(|&(_, score)| score) - .map(|(col, _)| col) -} - -// Compiles a comparison between two things. -fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - lhs: ValueRef, - rhs: ValueRef, - rhs_t: Ty<'tcx>, - debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { - fn compare_str<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - lhs_data: ValueRef, - lhs_len: ValueRef, - rhs_data: ValueRef, - rhs_len: ValueRef, - rhs_t: Ty<'tcx>, - debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { - let did = langcall(cx, - None, - &format!("comparison of `{}`", rhs_t), - StrEqFnLangItem); - callee::trans_lang_call(cx, did, &[lhs_data, lhs_len, rhs_data, rhs_len], None, debug_loc) - } - - let _icx = push_ctxt("compare_values"); - if rhs_t.is_scalar() { - let cmp = compare_scalar_types(cx, lhs, rhs, rhs_t, hir::BiEq, debug_loc); - return Result::new(cx, cmp); - } - - match rhs_t.sty { - ty::TyRef(_, mt) => match mt.ty.sty { - ty::TyStr => { - let lhs_data = Load(cx, expr::get_dataptr(cx, lhs)); - let lhs_len = Load(cx, expr::get_meta(cx, lhs)); - let rhs_data = Load(cx, expr::get_dataptr(cx, rhs)); - let rhs_len = Load(cx, expr::get_meta(cx, rhs)); - compare_str(cx, lhs_data, lhs_len, rhs_data, rhs_len, rhs_t, debug_loc) - } - ty::TyArray(ty, _) | ty::TySlice(ty) => match ty.sty { - ty::TyUint(ast::TyU8) => { - // NOTE: cast &[u8] and &[u8; N] to &str and abuse the str_eq lang item, - // which calls memcmp(). - let pat_len = val_ty(rhs).element_type().array_length(); - let ty_str_slice = cx.tcx().mk_static_str(); - - let rhs_data = GEPi(cx, rhs, &[0, 0]); - let rhs_len = C_uint(cx.ccx(), pat_len); - - let lhs_data; - let lhs_len; - if val_ty(lhs) == val_ty(rhs) { - // Both the discriminant and the pattern are thin pointers - lhs_data = GEPi(cx, lhs, &[0, 0]); - lhs_len = C_uint(cx.ccx(), pat_len); - } else { - // The discriminant is a fat pointer - let llty_str_slice = type_of::type_of(cx.ccx(), ty_str_slice).ptr_to(); - let lhs_str = PointerCast(cx, lhs, llty_str_slice); - lhs_data = Load(cx, expr::get_dataptr(cx, lhs_str)); - lhs_len = Load(cx, expr::get_meta(cx, lhs_str)); - } - - compare_str(cx, lhs_data, lhs_len, rhs_data, rhs_len, rhs_t, debug_loc) - }, - _ => cx.sess().bug("only byte strings supported in compare_values"), - }, - _ => cx.sess().bug("only string and byte strings supported in compare_values"), - }, - _ => cx.sess().bug("only scalars, byte strings, and strings supported in compare_values"), - } -} - -/// For each binding in `data.bindings_map`, adds an appropriate entry into the `fcx.lllocals` map -fn insert_lllocals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - bindings_map: &BindingsMap<'tcx>, - cs: Option) - -> Block<'blk, 'tcx> { - for (&name, &binding_info) in bindings_map { - let (llval, aliases_other_state) = match binding_info.trmode { - // By value mut binding for a copy type: load from the ptr - // into the matched value and copy to our alloca - TrByCopy(llbinding) | - TrByMoveIntoCopy(llbinding) => { - let llval = Load(bcx, binding_info.llmatch); - let lvalue = match binding_info.trmode { - TrByCopy(..) => - Lvalue::new("_match::insert_lllocals"), - TrByMoveIntoCopy(..) => { - // match_input moves from the input into a - // separate stack slot. - // - // E.g. consider moving the value `D(A)` out - // of the tuple `(D(A), D(B))` and into the - // local variable `x` via the pattern `(x,_)`, - // leaving the remainder of the tuple `(_, - // D(B))` still to be dropped in the future. - // - // Thus, here we must zero the place that we - // are moving *from*, because we do not yet - // track drop flags for a fragmented parent - // match input expression. - // - // Longer term we will be able to map the move - // into `(x, _)` up to the parent path that - // owns the whole tuple, and mark the - // corresponding stack-local drop-flag - // tracking the first component of the tuple. - let hint_kind = HintKind::ZeroAndMaintain; - Lvalue::new_with_hint("_match::insert_lllocals (match_input)", - bcx, binding_info.id, hint_kind) - } - _ => unreachable!(), - }; - let datum = Datum::new(llval, binding_info.ty, lvalue); - call_lifetime_start(bcx, llbinding); - bcx = datum.store_to(bcx, llbinding); - if let Some(cs) = cs { - bcx.fcx.schedule_lifetime_end(cs, llbinding); - } - - (llbinding, false) - }, - - // By value move bindings: load from the ptr into the matched value - TrByMoveRef => (Load(bcx, binding_info.llmatch), true), - - // By ref binding: use the ptr into the matched value - TrByRef => (binding_info.llmatch, true), - }; - - - // A local that aliases some other state must be zeroed, since - // the other state (e.g. some parent data that we matched - // into) will still have its subcomponents (such as this - // local) destructed at the end of the parent's scope. Longer - // term, we will properly map such parents to the set of - // unique drop flags for its fragments. - let hint_kind = if aliases_other_state { - HintKind::ZeroAndMaintain - } else { - HintKind::DontZeroJustUse - }; - let lvalue = Lvalue::new_with_hint("_match::insert_lllocals (local)", - bcx, - binding_info.id, - hint_kind); - let datum = Datum::new(llval, binding_info.ty, lvalue); - if let Some(cs) = cs { - let opt_datum = lvalue.dropflag_hint(bcx); - bcx.fcx.schedule_lifetime_end(cs, binding_info.llmatch); - bcx.fcx.schedule_drop_and_fill_mem(cs, llval, binding_info.ty, opt_datum); - } - - debug!("binding {} to {}", binding_info.id, bcx.val_to_string(llval)); - bcx.fcx.lllocals.borrow_mut().insert(binding_info.id, datum); - debuginfo::create_match_binding_metadata(bcx, name, binding_info); - } - bcx -} - -fn compile_guard<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - guard_expr: &hir::Expr, - data: &ArmData<'p, 'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - vals: &[MatchInput], - chk: &FailureHandler, - has_genuine_default: bool) - -> Block<'blk, 'tcx> { - debug!("compile_guard(bcx={}, guard_expr={:?}, m={:?}, vals=[{}])", - bcx.to_str(), - guard_expr, - m, - vals.iter().map(|v| bcx.val_to_string(v.val)).collect::>().join(", ")); - let _indenter = indenter(); - - let mut bcx = insert_lllocals(bcx, &data.bindings_map, None); - - let val = unpack_datum!(bcx, expr::trans(bcx, guard_expr)); - let val = val.to_llbool(bcx); - - for (_, &binding_info) in &data.bindings_map { - if let Some(llbinding) = binding_info.trmode.alloca_if_copy() { - call_lifetime_end(bcx, llbinding) - } - } - - for (_, &binding_info) in &data.bindings_map { - bcx.fcx.lllocals.borrow_mut().remove(&binding_info.id); - } - - with_cond(bcx, Not(bcx, val, guard_expr.debug_loc()), |bcx| { - for (_, &binding_info) in &data.bindings_map { - call_lifetime_end(bcx, binding_info.llmatch); - } - match chk { - // If the default arm is the only one left, move on to the next - // condition explicitly rather than (possibly) falling back to - // the default arm. - &JumpToBasicBlock(_) if m.len() == 1 && has_genuine_default => { - chk.handle_fail(bcx); - } - _ => { - compile_submatch(bcx, m, vals, chk, has_genuine_default); - } - }; - bcx - }) -} - -fn compile_submatch<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - vals: &[MatchInput], - chk: &FailureHandler, - has_genuine_default: bool) { - debug!("compile_submatch(bcx={}, m={:?}, vals=[{}])", - bcx.to_str(), - m, - vals.iter().map(|v| bcx.val_to_string(v.val)).collect::>().join(", ")); - let _indenter = indenter(); - let _icx = push_ctxt("match::compile_submatch"); - let mut bcx = bcx; - if m.is_empty() { - if chk.is_fallible() { - chk.handle_fail(bcx); - } - return; - } - - let tcx = bcx.tcx(); - let def_map = &tcx.def_map; - match pick_column_to_specialize(def_map, m) { - Some(col) => { - let val = vals[col]; - if has_nested_bindings(m, col) { - let expanded = expand_nested_bindings(bcx, m, col, val); - compile_submatch_continue(bcx, - &expanded[..], - vals, - chk, - col, - val, - has_genuine_default) - } else { - compile_submatch_continue(bcx, m, vals, chk, col, val, has_genuine_default) - } - } - None => { - let data = &m[0].data; - for &(ref name, ref value_ptr) in &m[0].bound_ptrs { - let binfo = *data.bindings_map.get(name).unwrap(); - call_lifetime_start(bcx, binfo.llmatch); - if binfo.trmode == TrByRef && type_is_fat_ptr(bcx.tcx(), binfo.ty) { - expr::copy_fat_ptr(bcx, *value_ptr, binfo.llmatch); - } - else { - Store(bcx, *value_ptr, binfo.llmatch); - } - } - match data.arm.guard { - Some(ref guard_expr) => { - bcx = compile_guard(bcx, - &**guard_expr, - m[0].data, - &m[1..m.len()], - vals, - chk, - has_genuine_default); - } - _ => () - } - Br(bcx, data.bodycx.llbb, DebugLoc::None); - } - } -} - -fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - m: &[Match<'a, 'p, 'blk, 'tcx>], - vals: &[MatchInput], - chk: &FailureHandler, - col: usize, - val: MatchInput, - has_genuine_default: bool) { - let fcx = bcx.fcx; - let tcx = bcx.tcx(); - let dm = &tcx.def_map; - - let mut vals_left = vals[0..col].to_vec(); - vals_left.extend_from_slice(&vals[col + 1..]); - let ccx = bcx.fcx.ccx; - - // Find a real id (we're adding placeholder wildcard patterns, but - // each column is guaranteed to have at least one real pattern) - let pat_id = m.iter().map(|br| br.pats[col].id) - .find(|&id| id != DUMMY_NODE_ID) - .unwrap_or(DUMMY_NODE_ID); - - let left_ty = if pat_id == DUMMY_NODE_ID { - tcx.mk_nil() - } else { - node_id_type(bcx, pat_id) - }; - - let mcx = check_match::MatchCheckCtxt { - tcx: bcx.tcx(), - param_env: bcx.tcx().empty_parameter_environment(), - }; - let adt_vals = if any_irrefutable_adt_pat(bcx.tcx(), m, col) { - let repr = adt::represent_type(bcx.ccx(), left_ty); - let arg_count = adt::num_args(&*repr, Disr(0)); - let (arg_count, struct_val) = if type_is_sized(bcx.tcx(), left_ty) { - (arg_count, val.val) - } else { - // For an unsized ADT (i.e. DST struct), we need to treat - // the last field specially: instead of simply passing a - // ValueRef pointing to that field, as with all the others, - // we skip it and instead construct a 'fat ptr' below. - (arg_count - 1, Load(bcx, expr::get_dataptr(bcx, val.val))) - }; - let mut field_vals: Vec = (0..arg_count).map(|ix| - // By definition, these are all sized - adt::trans_field_ptr(bcx, &*repr, adt::MaybeSizedValue::sized(struct_val), Disr(0), ix) - ).collect(); - - match left_ty.sty { - ty::TyStruct(def, substs) if !type_is_sized(bcx.tcx(), left_ty) => { - // The last field is technically unsized but - // since we can only ever match that field behind - // a reference we construct a fat ptr here. - let unsized_ty = def.struct_variant().fields.last().map(|field| { - monomorphize::field_ty(bcx.tcx(), substs, field) - }).unwrap(); - let scratch = alloc_ty(bcx, unsized_ty, "__struct_field_fat_ptr"); - - let meta = Load(bcx, expr::get_meta(bcx, val.val)); - let struct_val = adt::MaybeSizedValue::unsized_(struct_val, meta); - - let data = adt::trans_field_ptr(bcx, &*repr, struct_val, Disr(0), arg_count); - Store(bcx, data, expr::get_dataptr(bcx, scratch)); - Store(bcx, meta, expr::get_meta(bcx, scratch)); - field_vals.push(scratch); - } - _ => {} - } - Some(field_vals) - } else if any_uniq_pat(m, col) || any_region_pat(m, col) { - Some(vec!(Load(bcx, val.val))) - } else { - match left_ty.sty { - ty::TyArray(_, n) => { - let args = extract_vec_elems(bcx, left_ty, n, 0, val); - Some(args.vals) - } - _ => None - } - }; - match adt_vals { - Some(field_vals) => { - let pats = enter_match(bcx, dm, m, col, val, |pats| - check_match::specialize(&mcx, pats, - &check_match::Single, col, - field_vals.len()) - ); - let mut vals: Vec<_> = field_vals.into_iter() - .map(|v|MatchInput::from_val(v)) - .collect(); - vals.extend_from_slice(&vals_left); - compile_submatch(bcx, &pats, &vals, chk, has_genuine_default); - return; - } - _ => () - } - - // Decide what kind of branch we need - let opts = get_branches(bcx, m, col); - debug!("options={:?}", opts); - let mut kind = NoBranch; - let mut test_val = val.val; - debug!("test_val={}", bcx.val_to_string(test_val)); - if !opts.is_empty() { - match opts[0] { - ConstantValue(..) | ConstantRange(..) => { - test_val = load_if_immediate(bcx, val.val, left_ty); - kind = if left_ty.is_integral() { - Switch - } else { - Compare - }; - } - Variant(_, ref repr, _, _) => { - let (the_kind, val_opt) = adt::trans_switch(bcx, &**repr, val.val); - kind = the_kind; - if let Some(tval) = val_opt { test_val = tval; } - } - SliceLengthEqual(..) | SliceLengthGreaterOrEqual(..) => { - let (_, len) = tvec::get_base_and_len(bcx, val.val, left_ty); - test_val = len; - kind = Switch; - } - } - } - for o in &opts { - match *o { - ConstantRange(..) => { kind = Compare; break }, - SliceLengthGreaterOrEqual(..) => { kind = CompareSliceLength; break }, - _ => () - } - } - let else_cx = match kind { - NoBranch | Single => bcx, - _ => bcx.fcx.new_temp_block("match_else") - }; - let sw = if kind == Switch { - build::Switch(bcx, test_val, else_cx.llbb, opts.len()) - } else { - C_int(ccx, 0) // Placeholder for when not using a switch - }; - - let defaults = enter_default(else_cx, dm, m, col, val); - let exhaustive = chk.is_infallible() && defaults.is_empty(); - let len = opts.len(); - - if exhaustive && kind == Switch { - build::Unreachable(else_cx); - } - - // Compile subtrees for each option - for (i, opt) in opts.iter().enumerate() { - // In some cases of range and vector pattern matching, we need to - // override the failure case so that instead of failing, it proceeds - // to try more matching. branch_chk, then, is the proper failure case - // for the current conditional branch. - let mut branch_chk = None; - let mut opt_cx = else_cx; - let debug_loc = opt.debug_loc(); - - if kind == Switch || !exhaustive || i + 1 < len { - opt_cx = bcx.fcx.new_temp_block("match_case"); - match kind { - Single => Br(bcx, opt_cx.llbb, debug_loc), - Switch => { - match opt.trans(bcx) { - SingleResult(r) => { - AddCase(sw, r.val, opt_cx.llbb); - bcx = r.bcx; - } - _ => { - bcx.sess().bug( - "in compile_submatch, expected \ - opt.trans() to return a SingleResult") - } - } - } - Compare | CompareSliceLength => { - let t = if kind == Compare { - left_ty - } else { - tcx.types.usize // vector length - }; - let Result { bcx: after_cx, val: matches } = { - match opt.trans(bcx) { - SingleResult(Result { bcx, val }) => { - compare_values(bcx, test_val, val, t, debug_loc) - } - RangeResult(Result { val: vbegin, .. }, - Result { bcx, val: vend }) => { - let llge = compare_scalar_types(bcx, test_val, vbegin, - t, hir::BiGe, debug_loc); - let llle = compare_scalar_types(bcx, test_val, vend, - t, hir::BiLe, debug_loc); - Result::new(bcx, And(bcx, llge, llle, DebugLoc::None)) - } - LowerBound(Result { bcx, val }) => { - Result::new(bcx, compare_scalar_types(bcx, test_val, - val, t, hir::BiGe, - debug_loc)) - } - } - }; - bcx = fcx.new_temp_block("compare_next"); - - // If none of the sub-cases match, and the current condition - // is guarded or has multiple patterns, move on to the next - // condition, if there is any, rather than falling back to - // the default. - let guarded = m[i].data.arm.guard.is_some(); - let multi_pats = m[i].pats.len() > 1; - if i + 1 < len && (guarded || multi_pats || kind == CompareSliceLength) { - branch_chk = Some(JumpToBasicBlock(bcx.llbb)); - } - CondBr(after_cx, matches, opt_cx.llbb, bcx.llbb, debug_loc); - } - _ => () - } - } else if kind == Compare || kind == CompareSliceLength { - Br(bcx, else_cx.llbb, debug_loc); - } - - let mut size = 0; - let mut unpacked = Vec::new(); - match *opt { - Variant(disr_val, ref repr, _, _) => { - let ExtractedBlock {vals: argvals, bcx: new_bcx} = - extract_variant_args(opt_cx, &**repr, disr_val, val); - size = argvals.len(); - unpacked = argvals; - opt_cx = new_bcx; - } - SliceLengthEqual(len, _) => { - let args = extract_vec_elems(opt_cx, left_ty, len, 0, val); - size = args.vals.len(); - unpacked = args.vals.clone(); - opt_cx = args.bcx; - } - SliceLengthGreaterOrEqual(before, after, _) => { - let args = extract_vec_elems(opt_cx, left_ty, before, after, val); - size = args.vals.len(); - unpacked = args.vals.clone(); - opt_cx = args.bcx; - } - ConstantValue(..) | ConstantRange(..) => () - } - let opt_ms = enter_opt(opt_cx, pat_id, dm, m, opt, col, size, val); - let mut opt_vals: Vec<_> = unpacked.into_iter() - .map(|v|MatchInput::from_val(v)) - .collect(); - opt_vals.extend_from_slice(&vals_left[..]); - compile_submatch(opt_cx, - &opt_ms[..], - &opt_vals[..], - branch_chk.as_ref().unwrap_or(chk), - has_genuine_default); - } - - // Compile the fall-through case, if any - if !exhaustive && kind != Single { - if kind == Compare || kind == CompareSliceLength { - Br(bcx, else_cx.llbb, DebugLoc::None); - } - match chk { - // If there is only one default arm left, move on to the next - // condition explicitly rather than (eventually) falling back to - // the last default arm. - &JumpToBasicBlock(_) if defaults.len() == 1 && has_genuine_default => { - chk.handle_fail(else_cx); - } - _ => { - compile_submatch(else_cx, - &defaults[..], - &vals_left[..], - chk, - has_genuine_default); - } - } - } -} - -pub fn trans_match<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - match_expr: &hir::Expr, - discr_expr: &hir::Expr, - arms: &[hir::Arm], - dest: Dest) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("match::trans_match"); - trans_match_inner(bcx, match_expr.id, discr_expr, arms, dest) -} - -/// Checks whether the binding in `discr` is assigned to anywhere in the expression `body` -fn is_discr_reassigned(bcx: Block, discr: &hir::Expr, body: &hir::Expr) -> bool { - let (vid, field) = match discr.node { - hir::ExprPath(..) => match bcx.def(discr.id) { - def::DefLocal(_, vid) | def::DefUpvar(_, vid, _, _) => (vid, None), - _ => return false - }, - hir::ExprField(ref base, field) => { - let vid = match bcx.tcx().def_map.borrow().get(&base.id).map(|d| d.full_def()) { - Some(def::DefLocal(_, vid)) | Some(def::DefUpvar(_, vid, _, _)) => vid, - _ => return false - }; - (vid, Some(mc::NamedField(field.node))) - }, - hir::ExprTupField(ref base, field) => { - let vid = match bcx.tcx().def_map.borrow().get(&base.id).map(|d| d.full_def()) { - Some(def::DefLocal(_, vid)) | Some(def::DefUpvar(_, vid, _, _)) => vid, - _ => return false - }; - (vid, Some(mc::PositionalField(field.node))) - }, - _ => return false - }; - - let mut rc = ReassignmentChecker { - node: vid, - field: field, - reassigned: false - }; - { - let infcx = infer::normalizing_infer_ctxt(bcx.tcx(), &bcx.tcx().tables); - let mut visitor = euv::ExprUseVisitor::new(&mut rc, &infcx); - visitor.walk_expr(body); - } - rc.reassigned -} - -struct ReassignmentChecker { - node: ast::NodeId, - field: Option, - reassigned: bool -} - -// Determine if the expression we're matching on is reassigned to within -// the body of the match's arm. -// We only care for the `mutate` callback since this check only matters -// for cases where the matched value is moved. -impl<'tcx> euv::Delegate<'tcx> for ReassignmentChecker { - fn consume(&mut self, _: ast::NodeId, _: Span, _: mc::cmt, _: euv::ConsumeMode) {} - fn matched_pat(&mut self, _: &hir::Pat, _: mc::cmt, _: euv::MatchMode) {} - fn consume_pat(&mut self, _: &hir::Pat, _: mc::cmt, _: euv::ConsumeMode) {} - fn borrow(&mut self, _: ast::NodeId, _: Span, _: mc::cmt, _: ty::Region, - _: ty::BorrowKind, _: euv::LoanCause) {} - fn decl_without_init(&mut self, _: ast::NodeId, _: Span) {} - - fn mutate(&mut self, _: ast::NodeId, _: Span, cmt: mc::cmt, _: euv::MutateMode) { - match cmt.cat { - Categorization::Upvar(mc::Upvar { id: ty::UpvarId { var_id: vid, .. }, .. }) | - Categorization::Local(vid) => self.reassigned |= self.node == vid, - Categorization::Interior(ref base_cmt, mc::InteriorField(field)) => { - match base_cmt.cat { - Categorization::Upvar(mc::Upvar { id: ty::UpvarId { var_id: vid, .. }, .. }) | - Categorization::Local(vid) => { - self.reassigned |= self.node == vid && - (self.field.is_none() || Some(field) == self.field) - }, - _ => {} - } - }, - _ => {} - } - } -} - -fn create_bindings_map<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pat: &hir::Pat, - discr: &hir::Expr, body: &hir::Expr) - -> BindingsMap<'tcx> { - // Create the bindings map, which is a mapping from each binding name - // to an alloca() that will be the value for that local variable. - // Note that we use the names because each binding will have many ids - // from the various alternatives. - let ccx = bcx.ccx(); - let tcx = bcx.tcx(); - let reassigned = is_discr_reassigned(bcx, discr, body); - let mut bindings_map = FnvHashMap(); - pat_bindings(&tcx.def_map, &*pat, |bm, p_id, span, path1| { - let name = path1.node; - let variable_ty = node_id_type(bcx, p_id); - let llvariable_ty = type_of::type_of(ccx, variable_ty); - let tcx = bcx.tcx(); - let param_env = tcx.empty_parameter_environment(); - - let llmatch; - let trmode; - let moves_by_default = variable_ty.moves_by_default(¶m_env, span); - match bm { - hir::BindByValue(_) if !moves_by_default || reassigned => - { - llmatch = alloca(bcx, llvariable_ty.ptr_to(), "__llmatch"); - let llcopy = alloca(bcx, llvariable_ty, &bcx.name(name)); - trmode = if moves_by_default { - TrByMoveIntoCopy(llcopy) - } else { - TrByCopy(llcopy) - }; - } - hir::BindByValue(_) => { - // in this case, the final type of the variable will be T, - // but during matching we need to store a *T as explained - // above - llmatch = alloca(bcx, llvariable_ty.ptr_to(), &bcx.name(name)); - trmode = TrByMoveRef; - } - hir::BindByRef(_) => { - llmatch = alloca(bcx, llvariable_ty, &bcx.name(name)); - trmode = TrByRef; - } - }; - bindings_map.insert(name, BindingInfo { - llmatch: llmatch, - trmode: trmode, - id: p_id, - span: span, - ty: variable_ty - }); - }); - return bindings_map; -} - -fn trans_match_inner<'blk, 'tcx>(scope_cx: Block<'blk, 'tcx>, - match_id: ast::NodeId, - discr_expr: &hir::Expr, - arms: &[hir::Arm], - dest: Dest) -> Block<'blk, 'tcx> { - let _icx = push_ctxt("match::trans_match_inner"); - let fcx = scope_cx.fcx; - let mut bcx = scope_cx; - let tcx = bcx.tcx(); - - let discr_datum = unpack_datum!(bcx, expr::trans_to_lvalue(bcx, discr_expr, - "match")); - if bcx.unreachable.get() { - return bcx; - } - - let t = node_id_type(bcx, discr_expr.id); - let chk = if t.is_empty(tcx) { - Unreachable - } else { - Infallible - }; - - let arm_datas: Vec = arms.iter().map(|arm| ArmData { - bodycx: fcx.new_id_block("case_body", arm.body.id), - arm: arm, - bindings_map: create_bindings_map(bcx, &*arm.pats[0], discr_expr, &*arm.body) - }).collect(); - - let mut pat_renaming_map = if scope_cx.sess().opts.debuginfo != NoDebugInfo { - Some(FnvHashMap()) - } else { - None - }; - - let arm_pats: Vec>> = { - let mut static_inliner = StaticInliner::new(scope_cx.tcx(), - pat_renaming_map.as_mut()); - arm_datas.iter().map(|arm_data| { - arm_data.arm.pats.iter().map(|p| static_inliner.fold_pat((*p).clone())).collect() - }).collect() - }; - - let mut matches = Vec::new(); - for (arm_data, pats) in arm_datas.iter().zip(&arm_pats) { - matches.extend(pats.iter().map(|p| Match { - pats: vec![&**p], - data: arm_data, - bound_ptrs: Vec::new(), - pat_renaming_map: pat_renaming_map.as_ref() - })); - } - - // `compile_submatch` works one column of arm patterns a time and - // then peels that column off. So as we progress, it may become - // impossible to tell whether we have a genuine default arm, i.e. - // `_ => foo` or not. Sometimes it is important to know that in order - // to decide whether moving on to the next condition or falling back - // to the default arm. - let has_default = arms.last().map_or(false, |arm| { - arm.pats.len() == 1 - && arm.pats.last().unwrap().node == hir::PatWild - }); - - compile_submatch(bcx, &matches[..], &[discr_datum.match_input()], &chk, has_default); - - let mut arm_cxs = Vec::new(); - for arm_data in &arm_datas { - let mut bcx = arm_data.bodycx; - - // insert bindings into the lllocals map and add cleanups - let cs = fcx.push_custom_cleanup_scope(); - bcx = insert_lllocals(bcx, &arm_data.bindings_map, Some(cleanup::CustomScope(cs))); - bcx = expr::trans_into(bcx, &*arm_data.arm.body, dest); - bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, cs); - arm_cxs.push(bcx); - } - - bcx = scope_cx.fcx.join_blocks(match_id, &arm_cxs[..]); - return bcx; -} - -/// Generates code for a local variable declaration like `let ;` or `let = -/// `. -pub fn store_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - local: &hir::Local) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("match::store_local"); - let mut bcx = bcx; - let tcx = bcx.tcx(); - let pat = &*local.pat; - - fn create_dummy_locals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - pat: &hir::Pat) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("create_dummy_locals"); - // create dummy memory for the variables if we have no - // value to store into them immediately - let tcx = bcx.tcx(); - pat_bindings(&tcx.def_map, pat, |_, p_id, _, path1| { - let scope = cleanup::var_scope(tcx, p_id); - bcx = mk_binding_alloca( - bcx, p_id, path1.node, scope, (), - "_match::store_local::create_dummy_locals", - |(), bcx, Datum { val: llval, ty, kind }| { - // Dummy-locals start out uninitialized, so set their - // drop-flag hints (if any) to "moved." - if let Some(hint) = kind.dropflag_hint(bcx) { - let moved_hint = adt::DTOR_MOVED_HINT; - debug!("store moved_hint={} for hint={:?}, uninitialized dummy", - moved_hint, hint); - Store(bcx, C_u8(bcx.fcx.ccx, moved_hint), hint.to_value().value()); - } - - if kind.drop_flag_info.must_zero() { - // if no drop-flag hint, or the hint requires - // we maintain the embedded drop-flag, then - // mark embedded drop-flag(s) as moved - // (i.e. "already dropped"). - drop_done_fill_mem(bcx, llval, ty); - } - bcx - }); - }); - bcx - } - - match local.init { - Some(ref init_expr) => { - // Optimize the "let x = expr" case. This just writes - // the result of evaluating `expr` directly into the alloca - // for `x`. Often the general path results in similar or the - // same code post-optimization, but not always. In particular, - // in unsafe code, you can have expressions like - // - // let x = intrinsics::uninit(); - // - // In such cases, the more general path is unsafe, because - // it assumes it is matching against a valid value. - match simple_name(pat) { - Some(name) => { - let var_scope = cleanup::var_scope(tcx, local.id); - return mk_binding_alloca( - bcx, pat.id, name, var_scope, (), - "_match::store_local", - |(), bcx, Datum { val: v, .. }| expr::trans_into(bcx, &**init_expr, - expr::SaveIn(v))); - } - - None => {} - } - - // General path. - let init_datum = - unpack_datum!(bcx, expr::trans_to_lvalue(bcx, &**init_expr, "let")); - if bcx.sess().asm_comments() { - add_comment(bcx, "creating zeroable ref llval"); - } - let var_scope = cleanup::var_scope(tcx, local.id); - bind_irrefutable_pat(bcx, pat, init_datum.match_input(), var_scope) - } - None => { - create_dummy_locals(bcx, pat) - } - } -} - -fn mk_binding_alloca<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>, - p_id: ast::NodeId, - name: ast::Name, - cleanup_scope: cleanup::ScopeId, - arg: A, - caller_name: &'static str, - populate: F) - -> Block<'blk, 'tcx> where - F: FnOnce(A, Block<'blk, 'tcx>, Datum<'tcx, Lvalue>) -> Block<'blk, 'tcx>, -{ - let var_ty = node_id_type(bcx, p_id); - - // Allocate memory on stack for the binding. - let llval = alloc_ty(bcx, var_ty, &bcx.name(name)); - let lvalue = Lvalue::new_with_hint(caller_name, bcx, p_id, HintKind::DontZeroJustUse); - let datum = Datum::new(llval, var_ty, lvalue); - - debug!("mk_binding_alloca cleanup_scope={:?} llval={} var_ty={:?}", - cleanup_scope, bcx.ccx().tn().val_to_string(llval), var_ty); - - // Subtle: be sure that we *populate* the memory *before* - // we schedule the cleanup. - call_lifetime_start(bcx, llval); - let bcx = populate(arg, bcx, datum); - bcx.fcx.schedule_lifetime_end(cleanup_scope, llval); - bcx.fcx.schedule_drop_mem(cleanup_scope, llval, var_ty, lvalue.dropflag_hint(bcx)); - - // Now that memory is initialized and has cleanup scheduled, - // insert datum into the local variable map. - bcx.fcx.lllocals.borrow_mut().insert(p_id, datum); - bcx -} - -/// A simple version of the pattern matching code that only handles -/// irrefutable patterns. This is used in let/argument patterns, -/// not in match statements. Unifying this code with the code above -/// sounds nice, but in practice it produces very inefficient code, -/// since the match code is so much more general. In most cases, -/// LLVM is able to optimize the code, but it causes longer compile -/// times and makes the generated code nigh impossible to read. -/// -/// # Arguments -/// - bcx: starting basic block context -/// - pat: the irrefutable pattern being matched. -/// - val: the value being matched -- must be an lvalue (by ref, with cleanup) -pub fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - pat: &hir::Pat, - val: MatchInput, - cleanup_scope: cleanup::ScopeId) - -> Block<'blk, 'tcx> { - debug!("bind_irrefutable_pat(bcx={}, pat={:?}, val={})", - bcx.to_str(), - pat, - bcx.val_to_string(val.val)); - - if bcx.sess().asm_comments() { - add_comment(bcx, &format!("bind_irrefutable_pat(pat={:?})", - pat)); - } - - let _indenter = indenter(); - - let _icx = push_ctxt("match::bind_irrefutable_pat"); - let mut bcx = bcx; - let tcx = bcx.tcx(); - let ccx = bcx.ccx(); - match pat.node { - hir::PatIdent(pat_binding_mode, ref path1, ref inner) => { - if pat_is_binding(&tcx.def_map.borrow(), &*pat) { - // Allocate the stack slot where the value of this - // binding will live and place it into the appropriate - // map. - bcx = mk_binding_alloca( - bcx, pat.id, path1.node.name, cleanup_scope, (), - "_match::bind_irrefutable_pat", - |(), bcx, Datum { val: llval, ty, kind: _ }| { - match pat_binding_mode { - hir::BindByValue(_) => { - // By value binding: move the value that `val` - // points at into the binding's stack slot. - let d = val.to_datum(ty); - d.store_to(bcx, llval) - } - - hir::BindByRef(_) => { - // By ref binding: the value of the variable - // is the pointer `val` itself or fat pointer referenced by `val` - if type_is_fat_ptr(bcx.tcx(), ty) { - expr::copy_fat_ptr(bcx, val.val, llval); - } - else { - Store(bcx, val.val, llval); - } - - bcx - } - } - }); - } - - if let Some(ref inner_pat) = *inner { - bcx = bind_irrefutable_pat(bcx, &**inner_pat, val, cleanup_scope); - } - } - hir::PatEnum(_, ref sub_pats) => { - let opt_def = bcx.tcx().def_map.borrow().get(&pat.id).map(|d| d.full_def()); - match opt_def { - Some(def::DefVariant(enum_id, var_id, _)) => { - let repr = adt::represent_node(bcx, pat.id); - let vinfo = ccx.tcx().lookup_adt_def(enum_id).variant_with_id(var_id); - let args = extract_variant_args(bcx, - &*repr, - Disr::from(vinfo.disr_val), - val); - if let Some(ref sub_pat) = *sub_pats { - for (i, &argval) in args.vals.iter().enumerate() { - bcx = bind_irrefutable_pat( - bcx, - &*sub_pat[i], - MatchInput::from_val(argval), - cleanup_scope); - } - } - } - Some(def::DefStruct(..)) => { - match *sub_pats { - None => { - // This is a unit-like struct. Nothing to do here. - } - Some(ref elems) => { - // This is the tuple struct case. - let repr = adt::represent_node(bcx, pat.id); - let val = adt::MaybeSizedValue::sized(val.val); - for (i, elem) in elems.iter().enumerate() { - let fldptr = adt::trans_field_ptr(bcx, &*repr, - val, Disr(0), i); - bcx = bind_irrefutable_pat( - bcx, - &**elem, - MatchInput::from_val(fldptr), - cleanup_scope); - } - } - } - } - _ => { - // Nothing to do here. - } - } - } - hir::PatStruct(_, ref fields, _) => { - let tcx = bcx.tcx(); - let pat_ty = node_id_type(bcx, pat.id); - let pat_repr = adt::represent_type(bcx.ccx(), pat_ty); - let pat_v = VariantInfo::of_node(tcx, pat_ty, pat.id); - - let val = if type_is_sized(tcx, pat_ty) { - adt::MaybeSizedValue::sized(val.val) - } else { - let data = Load(bcx, expr::get_dataptr(bcx, val.val)); - let meta = Load(bcx, expr::get_meta(bcx, val.val)); - adt::MaybeSizedValue::unsized_(data, meta) - }; - - for f in fields { - let name = f.node.name; - let field_idx = pat_v.field_index(name); - let mut fldptr = adt::trans_field_ptr( - bcx, - &*pat_repr, - val, - pat_v.discr, - field_idx); - - let fty = pat_v.fields[field_idx].1; - // If it's not sized, then construct a fat pointer instead of - // a regular one - if !type_is_sized(tcx, fty) { - let scratch = alloc_ty(bcx, fty, "__struct_field_fat_ptr"); - debug!("Creating fat pointer {}", bcx.val_to_string(scratch)); - Store(bcx, fldptr, expr::get_dataptr(bcx, scratch)); - Store(bcx, val.meta, expr::get_meta(bcx, scratch)); - fldptr = scratch; - } - bcx = bind_irrefutable_pat(bcx, - &*f.node.pat, - MatchInput::from_val(fldptr), - cleanup_scope); - } - } - hir::PatTup(ref elems) => { - let repr = adt::represent_node(bcx, pat.id); - let val = adt::MaybeSizedValue::sized(val.val); - for (i, elem) in elems.iter().enumerate() { - let fldptr = adt::trans_field_ptr(bcx, &*repr, val, Disr(0), i); - bcx = bind_irrefutable_pat( - bcx, - &**elem, - MatchInput::from_val(fldptr), - cleanup_scope); - } - } - hir::PatBox(ref inner) => { - let pat_ty = node_id_type(bcx, inner.id); - // Pass along DSTs as fat pointers. - let val = if type_is_fat_ptr(tcx, pat_ty) { - // We need to check for this, as the pattern could be binding - // a fat pointer by-value. - if let hir::PatIdent(hir::BindByRef(_),_,_) = inner.node { - val.val - } else { - Load(bcx, val.val) - } - } else if type_is_sized(tcx, pat_ty) { - Load(bcx, val.val) - } else { - val.val - }; - bcx = bind_irrefutable_pat( - bcx, &**inner, MatchInput::from_val(val), cleanup_scope); - } - hir::PatRegion(ref inner, _) => { - let pat_ty = node_id_type(bcx, inner.id); - // Pass along DSTs as fat pointers. - let val = if type_is_fat_ptr(tcx, pat_ty) { - // We need to check for this, as the pattern could be binding - // a fat pointer by-value. - if let hir::PatIdent(hir::BindByRef(_),_,_) = inner.node { - val.val - } else { - Load(bcx, val.val) - } - } else if type_is_sized(tcx, pat_ty) { - Load(bcx, val.val) - } else { - val.val - }; - bcx = bind_irrefutable_pat( - bcx, - &**inner, - MatchInput::from_val(val), - cleanup_scope); - } - hir::PatVec(ref before, ref slice, ref after) => { - let pat_ty = node_id_type(bcx, pat.id); - let mut extracted = extract_vec_elems(bcx, pat_ty, before.len(), after.len(), val); - match slice { - &Some(_) => { - extracted.vals.insert( - before.len(), - bind_subslice_pat(bcx, pat.id, val, before.len(), after.len()) - ); - } - &None => () - } - bcx = before - .iter() - .chain(slice.iter()) - .chain(after.iter()) - .zip(extracted.vals) - .fold(bcx, |bcx, (inner, elem)| { - bind_irrefutable_pat( - bcx, - &**inner, - MatchInput::from_val(elem), - cleanup_scope) - }); - } - hir::PatQPath(..) | hir::PatWild | hir::PatLit(_) | - hir::PatRange(_, _) => () - } - return bcx; -} diff --git a/src/librustc_trans/trans/adt.rs b/src/librustc_trans/trans/adt.rs deleted file mode 100644 index 59487c0362c89..0000000000000 --- a/src/librustc_trans/trans/adt.rs +++ /dev/null @@ -1,1509 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! # Representation of Algebraic Data Types -//! -//! This module determines how to represent enums, structs, and tuples -//! based on their monomorphized types; it is responsible both for -//! choosing a representation and translating basic operations on -//! values of those types. (Note: exporting the representations for -//! debuggers is handled in debuginfo.rs, not here.) -//! -//! Note that the interface treats everything as a general case of an -//! enum, so structs/tuples/etc. have one pseudo-variant with -//! discriminant 0; i.e., as if they were a univariant enum. -//! -//! Having everything in one place will enable improvements to data -//! structure representation; possibilities include: -//! -//! - User-specified alignment (e.g., cacheline-aligning parts of -//! concurrently accessed data structures); LLVM can't represent this -//! directly, so we'd have to insert padding fields in any structure -//! that might contain one and adjust GEP indices accordingly. See -//! issue #4578. -//! -//! - Store nested enums' discriminants in the same word. Rather, if -//! some variants start with enums, and those enums representations -//! have unused alignment padding between discriminant and body, the -//! outer enum's discriminant can be stored there and those variants -//! can start at offset 0. Kind of fancy, and might need work to -//! make copies of the inner enum type cooperate, but it could help -//! with `Option` or `Result` wrapped around another enum. -//! -//! - Tagged pointers would be neat, but given that any type can be -//! used unboxed and any field can have pointers (including mutable) -//! taken to it, implementing them for Rust seems difficult. - -pub use self::Repr::*; -use super::Disr; - -use std; -use std::rc::Rc; - -use llvm::{ValueRef, True, IntEQ, IntNE}; -use back::abi::FAT_PTR_ADDR; -use middle::subst; -use middle::ty::{self, Ty}; -use syntax::ast; -use syntax::attr; -use syntax::attr::IntType; -use trans::_match; -use trans::base::InitAlloca; -use trans::build::*; -use trans::cleanup; -use trans::cleanup::CleanupMethods; -use trans::common::*; -use trans::datum; -use trans::debuginfo::DebugLoc; -use trans::glue; -use trans::machine; -use trans::monomorphize; -use trans::type_::Type; -use trans::type_of; - -type Hint = attr::ReprAttr; - -// Representation of the context surrounding an unsized type. I want -// to be able to track the drop flags that are injected by trans. -#[derive(Clone, Copy, PartialEq, Debug)] -pub struct TypeContext { - prefix: Type, - needs_drop_flag: bool, -} - -impl TypeContext { - pub fn prefix(&self) -> Type { self.prefix } - pub fn needs_drop_flag(&self) -> bool { self.needs_drop_flag } - - fn direct(t: Type) -> TypeContext { - TypeContext { prefix: t, needs_drop_flag: false } - } - fn may_need_drop_flag(t: Type, needs_drop_flag: bool) -> TypeContext { - TypeContext { prefix: t, needs_drop_flag: needs_drop_flag } - } - pub fn to_string(self) -> String { - let TypeContext { prefix, needs_drop_flag } = self; - format!("TypeContext {{ prefix: {}, needs_drop_flag: {} }}", - prefix.to_string(), needs_drop_flag) - } -} - -/// Representations. -#[derive(Eq, PartialEq, Debug)] -pub enum Repr<'tcx> { - /// C-like enums; basically an int. - CEnum(IntType, Disr, Disr), // discriminant range (signedness based on the IntType) - /// Single-case variants, and structs/tuples/records. - /// - /// Structs with destructors need a dynamic destroyedness flag to - /// avoid running the destructor too many times; this is included - /// in the `Struct` if present. - /// (The flag if nonzero, represents the initialization value to use; - /// if zero, then use no flag at all.) - Univariant(Struct<'tcx>, u8), - /// General-case enums: for each case there is a struct, and they - /// all start with a field for the discriminant. - /// - /// Types with destructors need a dynamic destroyedness flag to - /// avoid running the destructor too many times; the last argument - /// indicates whether such a flag is present. - /// (The flag, if nonzero, represents the initialization value to use; - /// if zero, then use no flag at all.) - General(IntType, Vec>, u8), - /// Two cases distinguished by a nullable pointer: the case with discriminant - /// `nndiscr` must have single field which is known to be nonnull due to its type. - /// The other case is known to be zero sized. Hence we represent the enum - /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant, - /// otherwise it indicates the other case. - RawNullablePointer { - nndiscr: Disr, - nnty: Ty<'tcx>, - nullfields: Vec> - }, - /// Two cases distinguished by a nullable pointer: the case with discriminant - /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th - /// field is known to be nonnull due to its type; if that field is null, then - /// it represents the other case, which is inhabited by at most one value - /// (and all other fields are undefined/unused). - /// - /// For example, `std::option::Option` instantiated at a safe pointer type - /// is represented such that `None` is a null pointer and `Some` is the - /// identity function. - StructWrappedNullablePointer { - nonnull: Struct<'tcx>, - nndiscr: Disr, - discrfield: DiscrField, - nullfields: Vec>, - } -} - -/// For structs, and struct-like parts of anything fancier. -#[derive(Eq, PartialEq, Debug)] -pub struct Struct<'tcx> { - // If the struct is DST, then the size and alignment do not take into - // account the unsized fields of the struct. - pub size: u64, - pub align: u32, - pub sized: bool, - pub packed: bool, - pub fields: Vec>, -} - -#[derive(Copy, Clone)] -pub struct MaybeSizedValue { - pub value: ValueRef, - pub meta: ValueRef, -} - -impl MaybeSizedValue { - pub fn sized(value: ValueRef) -> MaybeSizedValue { - MaybeSizedValue { - value: value, - meta: std::ptr::null_mut() - } - } - - pub fn unsized_(value: ValueRef, meta: ValueRef) -> MaybeSizedValue { - MaybeSizedValue { - value: value, - meta: meta - } - } - - pub fn has_meta(&self) -> bool { - !self.meta.is_null() - } -} - -/// Convenience for `represent_type`. There should probably be more or -/// these, for places in trans where the `Ty` isn't directly -/// available. -pub fn represent_node<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - node: ast::NodeId) -> Rc> { - represent_type(bcx.ccx(), node_id_type(bcx, node)) -} - -/// Decides how to represent a given type. -pub fn represent_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>) - -> Rc> { - debug!("Representing: {}", t); - match cx.adt_reprs().borrow().get(&t) { - Some(repr) => return repr.clone(), - None => {} - } - - let repr = Rc::new(represent_type_uncached(cx, t)); - debug!("Represented as: {:?}", repr); - cx.adt_reprs().borrow_mut().insert(t, repr.clone()); - repr -} - -const fn repeat_u8_as_u32(val: u8) -> u32 { - (val as u32) << 24 | (val as u32) << 16 | (val as u32) << 8 | val as u32 -} - -const fn repeat_u8_as_u64(val: u8) -> u64 { - (repeat_u8_as_u32(val) as u64) << 32 | repeat_u8_as_u32(val) as u64 -} - -/// `DTOR_NEEDED_HINT` is a stack-local hint that just means -/// "we do not know whether the destructor has run or not; check the -/// drop-flag embedded in the value itself." -pub const DTOR_NEEDED_HINT: u8 = 0x3d; - -/// `DTOR_MOVED_HINT` is a stack-local hint that means "this value has -/// definitely been moved; you do not need to run its destructor." -/// -/// (However, for now, such values may still end up being explicitly -/// zeroed by the generated code; this is the distinction between -/// `datum::DropFlagInfo::ZeroAndMaintain` versus -/// `datum::DropFlagInfo::DontZeroJustUse`.) -pub const DTOR_MOVED_HINT: u8 = 0x2d; - -pub const DTOR_NEEDED: u8 = 0xd4; -pub const DTOR_NEEDED_U32: u32 = repeat_u8_as_u32(DTOR_NEEDED); -pub const DTOR_NEEDED_U64: u64 = repeat_u8_as_u64(DTOR_NEEDED); -#[allow(dead_code)] -pub fn dtor_needed_usize(ccx: &CrateContext) -> usize { - match &ccx.tcx().sess.target.target.target_pointer_width[..] { - "32" => DTOR_NEEDED_U32 as usize, - "64" => DTOR_NEEDED_U64 as usize, - tws => panic!("Unsupported target word size for int: {}", tws), - } -} - -pub const DTOR_DONE: u8 = 0x1d; -pub const DTOR_DONE_U32: u32 = repeat_u8_as_u32(DTOR_DONE); -pub const DTOR_DONE_U64: u64 = repeat_u8_as_u64(DTOR_DONE); -#[allow(dead_code)] -pub fn dtor_done_usize(ccx: &CrateContext) -> usize { - match &ccx.tcx().sess.target.target.target_pointer_width[..] { - "32" => DTOR_DONE_U32 as usize, - "64" => DTOR_DONE_U64 as usize, - tws => panic!("Unsupported target word size for int: {}", tws), - } -} - -fn dtor_to_init_u8(dtor: bool) -> u8 { - if dtor { DTOR_NEEDED } else { 0 } -} - -pub trait GetDtorType<'tcx> { fn dtor_type(&self) -> Ty<'tcx>; } -impl<'tcx> GetDtorType<'tcx> for ty::ctxt<'tcx> { - fn dtor_type(&self) -> Ty<'tcx> { self.types.u8 } -} - -fn dtor_active(flag: u8) -> bool { - flag != 0 -} - -fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>) -> Repr<'tcx> { - match t.sty { - ty::TyTuple(ref elems) => { - Univariant(mk_struct(cx, &elems[..], false, t), 0) - } - ty::TyStruct(def, substs) => { - let mut ftys = def.struct_variant().fields.iter().map(|field| { - monomorphize::field_ty(cx.tcx(), substs, field) - }).collect::>(); - let packed = cx.tcx().lookup_packed(def.did); - // FIXME(16758) don't add a drop flag to unsized structs, as it - // won't actually be in the location we say it is because it'll be after - // the unsized field. Several other pieces of code assume that the unsized - // field is definitely the last one. - let dtor = def.dtor_kind().has_drop_flag() && type_is_sized(cx.tcx(), t); - if dtor { - ftys.push(cx.tcx().dtor_type()); - } - - Univariant(mk_struct(cx, &ftys[..], packed, t), dtor_to_init_u8(dtor)) - } - ty::TyClosure(_, ref substs) => { - Univariant(mk_struct(cx, &substs.upvar_tys, false, t), 0) - } - ty::TyEnum(def, substs) => { - let cases = get_cases(cx.tcx(), def, substs); - let hint = *cx.tcx().lookup_repr_hints(def.did).get(0) - .unwrap_or(&attr::ReprAny); - - let dtor = def.dtor_kind().has_drop_flag(); - - if cases.is_empty() { - // Uninhabitable; represent as unit - // (Typechecking will reject discriminant-sizing attrs.) - assert_eq!(hint, attr::ReprAny); - let ftys = if dtor { vec!(cx.tcx().dtor_type()) } else { vec!() }; - return Univariant(mk_struct(cx, &ftys[..], false, t), - dtor_to_init_u8(dtor)); - } - - if !dtor && cases.iter().all(|c| c.tys.is_empty()) { - // All bodies empty -> intlike - let discrs: Vec<_> = cases.iter().map(|c| Disr::from(c.discr)).collect(); - let bounds = IntBounds { - ulo: discrs.iter().min().unwrap().0, - uhi: discrs.iter().max().unwrap().0, - slo: discrs.iter().map(|n| n.0 as i64).min().unwrap(), - shi: discrs.iter().map(|n| n.0 as i64).max().unwrap() - }; - return mk_cenum(cx, hint, &bounds); - } - - // Since there's at least one - // non-empty body, explicit discriminants should have - // been rejected by a checker before this point. - if !cases.iter().enumerate().all(|(i,c)| c.discr == Disr::from(i)) { - cx.sess().bug(&format!("non-C-like enum {} with specified \ - discriminants", - cx.tcx().item_path_str(def.did))); - } - - if cases.len() == 1 { - // Equivalent to a struct/tuple/newtype. - // (Typechecking will reject discriminant-sizing attrs.) - assert_eq!(hint, attr::ReprAny); - let mut ftys = cases[0].tys.clone(); - if dtor { ftys.push(cx.tcx().dtor_type()); } - return Univariant(mk_struct(cx, &ftys[..], false, t), - dtor_to_init_u8(dtor)); - } - - if !dtor && cases.len() == 2 && hint == attr::ReprAny { - // Nullable pointer optimization - let mut discr = 0; - while discr < 2 { - if cases[1 - discr].is_zerolen(cx, t) { - let st = mk_struct(cx, &cases[discr].tys, - false, t); - match cases[discr].find_ptr(cx) { - Some(ref df) if df.len() == 1 && st.fields.len() == 1 => { - return RawNullablePointer { - nndiscr: Disr::from(discr), - nnty: st.fields[0], - nullfields: cases[1 - discr].tys.clone() - }; - } - Some(mut discrfield) => { - discrfield.push(0); - discrfield.reverse(); - return StructWrappedNullablePointer { - nndiscr: Disr::from(discr), - nonnull: st, - discrfield: discrfield, - nullfields: cases[1 - discr].tys.clone() - }; - } - None => {} - } - } - discr += 1; - } - } - - // The general case. - assert!((cases.len() - 1) as i64 >= 0); - let bounds = IntBounds { ulo: 0, uhi: (cases.len() - 1) as u64, - slo: 0, shi: (cases.len() - 1) as i64 }; - let min_ity = range_to_inttype(cx, hint, &bounds); - - // Create the set of structs that represent each variant - // Use the minimum integer type we figured out above - let fields : Vec<_> = cases.iter().map(|c| { - let mut ftys = vec!(ty_of_inttype(cx.tcx(), min_ity)); - ftys.extend_from_slice(&c.tys); - if dtor { ftys.push(cx.tcx().dtor_type()); } - mk_struct(cx, &ftys, false, t) - }).collect(); - - - // Check to see if we should use a different type for the - // discriminant. If the overall alignment of the type is - // the same as the first field in each variant, we can safely use - // an alignment-sized type. - // We increase the size of the discriminant to avoid LLVM copying - // padding when it doesn't need to. This normally causes unaligned - // load/stores and excessive memcpy/memset operations. By using a - // bigger integer size, LLVM can be sure about it's contents and - // won't be so conservative. - // This check is needed to avoid increasing the size of types when - // the alignment of the first field is smaller than the overall - // alignment of the type. - let (_, align) = union_size_and_align(&fields); - let mut use_align = true; - for st in &fields { - // Get the first non-zero-sized field - let field = st.fields.iter().skip(1).filter(|ty| { - let t = type_of::sizing_type_of(cx, **ty); - machine::llsize_of_real(cx, t) != 0 || - // This case is only relevant for zero-sized types with large alignment - machine::llalign_of_min(cx, t) != 1 - }).next(); - - if let Some(field) = field { - let field_align = type_of::align_of(cx, *field); - if field_align != align { - use_align = false; - break; - } - } - } - let ity = if use_align { - // Use the overall alignment - match align { - 1 => attr::UnsignedInt(ast::TyU8), - 2 => attr::UnsignedInt(ast::TyU16), - 4 => attr::UnsignedInt(ast::TyU32), - 8 if machine::llalign_of_min(cx, Type::i64(cx)) == 8 => - attr::UnsignedInt(ast::TyU64), - _ => min_ity // use min_ity as a fallback - } - } else { - min_ity - }; - - let fields : Vec<_> = cases.iter().map(|c| { - let mut ftys = vec!(ty_of_inttype(cx.tcx(), ity)); - ftys.extend_from_slice(&c.tys); - if dtor { ftys.push(cx.tcx().dtor_type()); } - mk_struct(cx, &ftys[..], false, t) - }).collect(); - - ensure_enum_fits_in_address_space(cx, &fields[..], t); - - General(ity, fields, dtor_to_init_u8(dtor)) - } - _ => cx.sess().bug(&format!("adt::represent_type called on non-ADT type: {}", t)) - } -} - -// this should probably all be in ty -struct Case<'tcx> { - discr: Disr, - tys: Vec> -} - -/// This represents the (GEP) indices to follow to get to the discriminant field -pub type DiscrField = Vec; - -fn find_discr_field_candidate<'tcx>(tcx: &ty::ctxt<'tcx>, - ty: Ty<'tcx>, - mut path: DiscrField) -> Option { - match ty.sty { - // Fat &T/&mut T/Box i.e. T is [T], str, or Trait - ty::TyRef(_, ty::TypeAndMut { ty, .. }) | ty::TyBox(ty) if !type_is_sized(tcx, ty) => { - path.push(FAT_PTR_ADDR); - Some(path) - }, - - // Regular thin pointer: &T/&mut T/Box - ty::TyRef(..) | ty::TyBox(..) => Some(path), - - // Functions are just pointers - ty::TyBareFn(..) => Some(path), - - // Is this the NonZero lang item wrapping a pointer or integer type? - ty::TyStruct(def, substs) if Some(def.did) == tcx.lang_items.non_zero() => { - let nonzero_fields = &def.struct_variant().fields; - assert_eq!(nonzero_fields.len(), 1); - let field_ty = monomorphize::field_ty(tcx, substs, &nonzero_fields[0]); - match field_ty.sty { - ty::TyRawPtr(ty::TypeAndMut { ty, .. }) if !type_is_sized(tcx, ty) => { - path.extend_from_slice(&[0, FAT_PTR_ADDR]); - Some(path) - }, - ty::TyRawPtr(..) | ty::TyInt(..) | ty::TyUint(..) => { - path.push(0); - Some(path) - }, - _ => None - } - }, - - // Perhaps one of the fields of this struct is non-zero - // let's recurse and find out - ty::TyStruct(def, substs) => { - for (j, field) in def.struct_variant().fields.iter().enumerate() { - let field_ty = monomorphize::field_ty(tcx, substs, field); - if let Some(mut fpath) = find_discr_field_candidate(tcx, field_ty, path.clone()) { - fpath.push(j); - return Some(fpath); - } - } - None - }, - - // Perhaps one of the upvars of this struct is non-zero - // Let's recurse and find out! - ty::TyClosure(_, ref substs) => { - for (j, &ty) in substs.upvar_tys.iter().enumerate() { - if let Some(mut fpath) = find_discr_field_candidate(tcx, ty, path.clone()) { - fpath.push(j); - return Some(fpath); - } - } - None - }, - - // Can we use one of the fields in this tuple? - ty::TyTuple(ref tys) => { - for (j, &ty) in tys.iter().enumerate() { - if let Some(mut fpath) = find_discr_field_candidate(tcx, ty, path.clone()) { - fpath.push(j); - return Some(fpath); - } - } - None - }, - - // Is this a fixed-size array of something non-zero - // with at least one element? - ty::TyArray(ety, d) if d > 0 => { - if let Some(mut vpath) = find_discr_field_candidate(tcx, ety, path) { - vpath.push(0); - Some(vpath) - } else { - None - } - }, - - // Anything else is not a pointer - _ => None - } -} - -impl<'tcx> Case<'tcx> { - fn is_zerolen<'a>(&self, cx: &CrateContext<'a, 'tcx>, scapegoat: Ty<'tcx>) -> bool { - mk_struct(cx, &self.tys, false, scapegoat).size == 0 - } - - fn find_ptr<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Option { - for (i, &ty) in self.tys.iter().enumerate() { - if let Some(mut path) = find_discr_field_candidate(cx.tcx(), ty, vec![]) { - path.push(i); - return Some(path); - } - } - None - } -} - -fn get_cases<'tcx>(tcx: &ty::ctxt<'tcx>, - adt: ty::AdtDef<'tcx>, - substs: &subst::Substs<'tcx>) - -> Vec> { - adt.variants.iter().map(|vi| { - let field_tys = vi.fields.iter().map(|field| { - monomorphize::field_ty(tcx, substs, field) - }).collect(); - Case { discr: Disr::from(vi.disr_val), tys: field_tys } - }).collect() -} - -fn mk_struct<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - tys: &[Ty<'tcx>], packed: bool, - scapegoat: Ty<'tcx>) - -> Struct<'tcx> { - let sized = tys.iter().all(|&ty| type_is_sized(cx.tcx(), ty)); - let lltys : Vec = if sized { - tys.iter().map(|&ty| type_of::sizing_type_of(cx, ty)).collect() - } else { - tys.iter().filter(|&ty| type_is_sized(cx.tcx(), *ty)) - .map(|&ty| type_of::sizing_type_of(cx, ty)).collect() - }; - - ensure_struct_fits_in_address_space(cx, &lltys[..], packed, scapegoat); - - let llty_rec = Type::struct_(cx, &lltys[..], packed); - Struct { - size: machine::llsize_of_alloc(cx, llty_rec), - align: machine::llalign_of_min(cx, llty_rec), - sized: sized, - packed: packed, - fields: tys.to_vec(), - } -} - -#[derive(Debug)] -struct IntBounds { - slo: i64, - shi: i64, - ulo: u64, - uhi: u64 -} - -fn mk_cenum<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - hint: Hint, bounds: &IntBounds) - -> Repr<'tcx> { - let it = range_to_inttype(cx, hint, bounds); - match it { - attr::SignedInt(_) => CEnum(it, Disr(bounds.slo as u64), Disr(bounds.shi as u64)), - attr::UnsignedInt(_) => CEnum(it, Disr(bounds.ulo), Disr(bounds.uhi)) - } -} - -fn range_to_inttype(cx: &CrateContext, hint: Hint, bounds: &IntBounds) -> IntType { - debug!("range_to_inttype: {:?} {:?}", hint, bounds); - // Lists of sizes to try. u64 is always allowed as a fallback. - #[allow(non_upper_case_globals)] - const choose_shortest: &'static [IntType] = &[ - attr::UnsignedInt(ast::TyU8), attr::SignedInt(ast::TyI8), - attr::UnsignedInt(ast::TyU16), attr::SignedInt(ast::TyI16), - attr::UnsignedInt(ast::TyU32), attr::SignedInt(ast::TyI32)]; - #[allow(non_upper_case_globals)] - const at_least_32: &'static [IntType] = &[ - attr::UnsignedInt(ast::TyU32), attr::SignedInt(ast::TyI32)]; - - let attempts; - match hint { - attr::ReprInt(span, ity) => { - if !bounds_usable(cx, ity, bounds) { - cx.sess().span_bug(span, "representation hint insufficient for discriminant range") - } - return ity; - } - attr::ReprExtern => { - attempts = match &cx.sess().target.target.arch[..] { - // WARNING: the ARM EABI has two variants; the one corresponding to `at_least_32` - // appears to be used on Linux and NetBSD, but some systems may use the variant - // corresponding to `choose_shortest`. However, we don't run on those yet...? - "arm" => at_least_32, - _ => at_least_32, - } - } - attr::ReprAny => { - attempts = choose_shortest; - }, - attr::ReprPacked => { - cx.tcx().sess.bug("range_to_inttype: found ReprPacked on an enum"); - } - attr::ReprSimd => { - cx.tcx().sess.bug("range_to_inttype: found ReprSimd on an enum"); - } - } - for &ity in attempts { - if bounds_usable(cx, ity, bounds) { - return ity; - } - } - return attr::UnsignedInt(ast::TyU64); -} - -pub fn ll_inttype(cx: &CrateContext, ity: IntType) -> Type { - match ity { - attr::SignedInt(t) => Type::int_from_ty(cx, t), - attr::UnsignedInt(t) => Type::uint_from_ty(cx, t) - } -} - -fn bounds_usable(cx: &CrateContext, ity: IntType, bounds: &IntBounds) -> bool { - debug!("bounds_usable: {:?} {:?}", ity, bounds); - match ity { - attr::SignedInt(_) => { - let lllo = C_integral(ll_inttype(cx, ity), bounds.slo as u64, true); - let llhi = C_integral(ll_inttype(cx, ity), bounds.shi as u64, true); - bounds.slo == const_to_int(lllo) as i64 && bounds.shi == const_to_int(llhi) as i64 - } - attr::UnsignedInt(_) => { - let lllo = C_integral(ll_inttype(cx, ity), bounds.ulo, false); - let llhi = C_integral(ll_inttype(cx, ity), bounds.uhi, false); - bounds.ulo == const_to_uint(lllo) as u64 && bounds.uhi == const_to_uint(llhi) as u64 - } - } -} - -pub fn ty_of_inttype<'tcx>(tcx: &ty::ctxt<'tcx>, ity: IntType) -> Ty<'tcx> { - match ity { - attr::SignedInt(t) => tcx.mk_mach_int(t), - attr::UnsignedInt(t) => tcx.mk_mach_uint(t) - } -} - -// LLVM doesn't like types that don't fit in the address space -fn ensure_struct_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - fields: &[Type], - packed: bool, - scapegoat: Ty<'tcx>) { - let mut offset = 0; - for &llty in fields { - // Invariant: offset < ccx.obj_size_bound() <= 1<<61 - if !packed { - let type_align = machine::llalign_of_min(ccx, llty); - offset = roundup(offset, type_align); - } - // type_align is a power-of-2, so still offset < ccx.obj_size_bound() - // llsize_of_alloc(ccx, llty) is also less than ccx.obj_size_bound() - // so the sum is less than 1<<62 (and therefore can't overflow). - offset += machine::llsize_of_alloc(ccx, llty); - - if offset >= ccx.obj_size_bound() { - ccx.report_overbig_object(scapegoat); - } - } -} - -fn union_size_and_align(sts: &[Struct]) -> (machine::llsize, machine::llalign) { - let size = sts.iter().map(|st| st.size).max().unwrap(); - let align = sts.iter().map(|st| st.align).max().unwrap(); - (roundup(size, align), align) -} - -fn ensure_enum_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - fields: &[Struct], - scapegoat: Ty<'tcx>) { - let (total_size, _) = union_size_and_align(fields); - - if total_size >= ccx.obj_size_bound() { - ccx.report_overbig_object(scapegoat); - } -} - - -/// LLVM-level types are a little complicated. -/// -/// C-like enums need to be actual ints, not wrapped in a struct, -/// because that changes the ABI on some platforms (see issue #10308). -/// -/// For nominal types, in some cases, we need to use LLVM named structs -/// and fill in the actual contents in a second pass to prevent -/// unbounded recursion; see also the comments in `trans::type_of`. -pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>) -> Type { - let c = generic_type_of(cx, r, None, false, false, false); - assert!(!c.needs_drop_flag); - c.prefix -} - - -// Pass dst=true if the type you are passing is a DST. Yes, we could figure -// this out, but if you call this on an unsized type without realising it, you -// are going to get the wrong type (it will not include the unsized parts of it). -pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - r: &Repr<'tcx>, dst: bool) -> Type { - let c = generic_type_of(cx, r, None, true, dst, false); - assert!(!c.needs_drop_flag); - c.prefix -} -pub fn sizing_type_context_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - r: &Repr<'tcx>, dst: bool) -> TypeContext { - generic_type_of(cx, r, None, true, dst, true) -} -pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - r: &Repr<'tcx>, name: &str) -> Type { - let c = generic_type_of(cx, r, Some(name), false, false, false); - assert!(!c.needs_drop_flag); - c.prefix -} -pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - r: &Repr<'tcx>, llty: &mut Type) { - match *r { - CEnum(..) | General(..) | RawNullablePointer { .. } => { } - Univariant(ref st, _) | StructWrappedNullablePointer { nonnull: ref st, .. } => - llty.set_struct_body(&struct_llfields(cx, st, false, false), - st.packed) - } -} - -fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - r: &Repr<'tcx>, - name: Option<&str>, - sizing: bool, - dst: bool, - delay_drop_flag: bool) -> TypeContext { - debug!("adt::generic_type_of r: {:?} name: {:?} sizing: {} dst: {} delay_drop_flag: {}", - r, name, sizing, dst, delay_drop_flag); - match *r { - CEnum(ity, _, _) => TypeContext::direct(ll_inttype(cx, ity)), - RawNullablePointer { nnty, .. } => - TypeContext::direct(type_of::sizing_type_of(cx, nnty)), - StructWrappedNullablePointer { nonnull: ref st, .. } => { - match name { - None => { - TypeContext::direct( - Type::struct_(cx, &struct_llfields(cx, st, sizing, dst), - st.packed)) - } - Some(name) => { - assert_eq!(sizing, false); - TypeContext::direct(Type::named_struct(cx, name)) - } - } - } - Univariant(ref st, dtor_needed) => { - let dtor_needed = dtor_needed != 0; - match name { - None => { - let mut fields = struct_llfields(cx, st, sizing, dst); - if delay_drop_flag && dtor_needed { - fields.pop(); - } - TypeContext::may_need_drop_flag( - Type::struct_(cx, &fields, - st.packed), - delay_drop_flag && dtor_needed) - } - Some(name) => { - // Hypothesis: named_struct's can never need a - // drop flag. (... needs validation.) - assert_eq!(sizing, false); - TypeContext::direct(Type::named_struct(cx, name)) - } - } - } - General(ity, ref sts, dtor_needed) => { - let dtor_needed = dtor_needed != 0; - // We need a representation that has: - // * The alignment of the most-aligned field - // * The size of the largest variant (rounded up to that alignment) - // * No alignment padding anywhere any variant has actual data - // (currently matters only for enums small enough to be immediate) - // * The discriminant in an obvious place. - // - // So we start with the discriminant, pad it up to the alignment with - // more of its own type, then use alignment-sized ints to get the rest - // of the size. - // - // FIXME #10604: this breaks when vector types are present. - let (size, align) = union_size_and_align(&sts[..]); - let align_s = align as u64; - assert_eq!(size % align_s, 0); - let align_units = size / align_s - 1; - - let discr_ty = ll_inttype(cx, ity); - let discr_size = machine::llsize_of_alloc(cx, discr_ty); - let fill_ty = match align_s { - 1 => Type::array(&Type::i8(cx), align_units), - 2 => Type::array(&Type::i16(cx), align_units), - 4 => Type::array(&Type::i32(cx), align_units), - 8 if machine::llalign_of_min(cx, Type::i64(cx)) == 8 => - Type::array(&Type::i64(cx), align_units), - a if a.count_ones() == 1 => Type::array(&Type::vector(&Type::i32(cx), a / 4), - align_units), - _ => panic!("unsupported enum alignment: {}", align) - }; - assert_eq!(machine::llalign_of_min(cx, fill_ty), align); - assert_eq!(align_s % discr_size, 0); - let mut fields: Vec = - [discr_ty, - Type::array(&discr_ty, align_s / discr_size - 1), - fill_ty].iter().cloned().collect(); - if delay_drop_flag && dtor_needed { - fields.pop(); - } - match name { - None => { - TypeContext::may_need_drop_flag( - Type::struct_(cx, &fields[..], false), - delay_drop_flag && dtor_needed) - } - Some(name) => { - let mut llty = Type::named_struct(cx, name); - llty.set_struct_body(&fields[..], false); - TypeContext::may_need_drop_flag( - llty, - delay_drop_flag && dtor_needed) - } - } - } - } -} - -fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, st: &Struct<'tcx>, - sizing: bool, dst: bool) -> Vec { - if sizing { - st.fields.iter().filter(|&ty| !dst || type_is_sized(cx.tcx(), *ty)) - .map(|&ty| type_of::sizing_type_of(cx, ty)).collect() - } else { - st.fields.iter().map(|&ty| type_of::in_memory_type_of(cx, ty)).collect() - } -} - -/// Obtain a representation of the discriminant sufficient to translate -/// destructuring; this may or may not involve the actual discriminant. -/// -/// This should ideally be less tightly tied to `_match`. -pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - r: &Repr<'tcx>, scrutinee: ValueRef) - -> (_match::BranchKind, Option) { - match *r { - CEnum(..) | General(..) | - RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => { - (_match::Switch, Some(trans_get_discr(bcx, r, scrutinee, None))) - } - Univariant(..) => { - // N.B.: Univariant means <= 1 enum variants (*not* == 1 variants). - (_match::Single, None) - } - } -} - -pub fn is_discr_signed<'tcx>(r: &Repr<'tcx>) -> bool { - match *r { - CEnum(ity, _, _) => ity.is_signed(), - General(ity, _, _) => ity.is_signed(), - Univariant(..) => false, - RawNullablePointer { .. } => false, - StructWrappedNullablePointer { .. } => false, - } -} - -/// Obtain the actual discriminant of a value. -pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, - scrutinee: ValueRef, cast_to: Option) - -> ValueRef { - debug!("trans_get_discr r: {:?}", r); - let val = match *r { - CEnum(ity, min, max) => load_discr(bcx, ity, scrutinee, min, max), - General(ity, ref cases, _) => { - let ptr = StructGEP(bcx, scrutinee, 0); - load_discr(bcx, ity, ptr, Disr(0), Disr(cases.len() as u64 - 1)) - } - Univariant(..) => C_u8(bcx.ccx(), 0), - RawNullablePointer { nndiscr, nnty, .. } => { - let cmp = if nndiscr == Disr(0) { IntEQ } else { IntNE }; - let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty); - ICmp(bcx, cmp, Load(bcx, scrutinee), C_null(llptrty), DebugLoc::None) - } - StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { - struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee) - } - }; - match cast_to { - None => val, - Some(llty) => if is_discr_signed(r) { SExt(bcx, val, llty) } else { ZExt(bcx, val, llty) } - } -} - -fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: Disr, discrfield: &DiscrField, - scrutinee: ValueRef) -> ValueRef { - let llptrptr = GEPi(bcx, scrutinee, &discrfield[..]); - let llptr = Load(bcx, llptrptr); - let cmp = if nndiscr == Disr(0) { IntEQ } else { IntNE }; - ICmp(bcx, cmp, llptr, C_null(val_ty(llptr)), DebugLoc::None) -} - -/// Helper for cases where the discriminant is simply loaded. -fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr) - -> ValueRef { - let llty = ll_inttype(bcx.ccx(), ity); - assert_eq!(val_ty(ptr), llty.ptr_to()); - let bits = machine::llbitsize_of_real(bcx.ccx(), llty); - assert!(bits <= 64); - let bits = bits as usize; - let mask = Disr(!0u64 >> (64 - bits)); - // For a (max) discr of -1, max will be `-1 as usize`, which overflows. - // However, that is fine here (it would still represent the full range), - if max.wrapping_add(Disr(1)) & mask == min & mask { - // i.e., if the range is everything. The lo==hi case would be - // rejected by the LLVM verifier (it would mean either an - // empty set, which is impossible, or the entire range of the - // type, which is pointless). - Load(bcx, ptr) - } else { - // llvm::ConstantRange can deal with ranges that wrap around, - // so an overflow on (max + 1) is fine. - LoadRangeAssert(bcx, ptr, min, max.wrapping_add(Disr(1)), /* signed: */ True) - } -} - -/// Yield information about how to dispatch a case of the -/// discriminant-like value returned by `trans_switch`. -/// -/// This should ideally be less tightly tied to `_match`. -pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr, discr: Disr) - -> ValueRef { - match *r { - CEnum(ity, _, _) => { - C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true) - } - General(ity, _, _) => { - C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true) - } - Univariant(..) => { - bcx.ccx().sess().bug("no cases for univariants or structs") - } - RawNullablePointer { .. } | - StructWrappedNullablePointer { .. } => { - assert!(discr == Disr(0) || discr == Disr(1)); - C_bool(bcx.ccx(), discr != Disr(0)) - } - } -} - -/// Set the discriminant for a new value of the given case of the given -/// representation. -pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, - val: ValueRef, discr: Disr) { - match *r { - CEnum(ity, min, max) => { - assert_discr_in_range(ity, min, max, discr); - Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true), - val); - } - General(ity, ref cases, dtor) => { - if dtor_active(dtor) { - let ptr = trans_field_ptr(bcx, r, MaybeSizedValue::sized(val), discr, - cases[discr.0 as usize].fields.len() - 2); - Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED), ptr); - } - Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr.0, true), - StructGEP(bcx, val, 0)); - } - Univariant(ref st, dtor) => { - assert_eq!(discr, Disr(0)); - if dtor_active(dtor) { - Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED), - StructGEP(bcx, val, st.fields.len() - 1)); - } - } - RawNullablePointer { nndiscr, nnty, ..} => { - if discr != nndiscr { - let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty); - Store(bcx, C_null(llptrty), val); - } - } - StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { - if discr != nndiscr { - let llptrptr = GEPi(bcx, val, &discrfield[..]); - let llptrty = val_ty(llptrptr).element_type(); - Store(bcx, C_null(llptrty), llptrptr); - } - } - } -} - -fn assert_discr_in_range(ity: IntType, min: Disr, max: Disr, discr: Disr) { - match ity { - attr::UnsignedInt(_) => { - assert!(min <= discr); - assert!(discr <= max) - }, - attr::SignedInt(_) => { - assert!(min.0 as i64 <= discr.0 as i64); - assert!(discr.0 as i64 <= max.0 as i64); - }, - } -} - -/// The number of fields in a given case; for use when obtaining this -/// information from the type or definition is less convenient. -pub fn num_args(r: &Repr, discr: Disr) -> usize { - match *r { - CEnum(..) => 0, - Univariant(ref st, dtor) => { - assert_eq!(discr, Disr(0)); - st.fields.len() - (if dtor_active(dtor) { 1 } else { 0 }) - } - General(_, ref cases, dtor) => { - cases[discr.0 as usize].fields.len() - 1 - (if dtor_active(dtor) { 1 } else { 0 }) - } - RawNullablePointer { nndiscr, ref nullfields, .. } => { - if discr == nndiscr { 1 } else { nullfields.len() } - } - StructWrappedNullablePointer { ref nonnull, nndiscr, - ref nullfields, .. } => { - if discr == nndiscr { nonnull.fields.len() } else { nullfields.len() } - } - } -} - -/// Access a field, at a point when the value's case is known. -pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, - val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef { - // Note: if this ever needs to generate conditionals (e.g., if we - // decide to do some kind of cdr-coding-like non-unique repr - // someday), it will need to return a possibly-new bcx as well. - match *r { - CEnum(..) => { - bcx.ccx().sess().bug("element access in C-like enum") - } - Univariant(ref st, _dtor) => { - assert_eq!(discr, Disr(0)); - struct_field_ptr(bcx, st, val, ix, false) - } - General(_, ref cases, _) => { - struct_field_ptr(bcx, &cases[discr.0 as usize], val, ix + 1, true) - } - RawNullablePointer { nndiscr, ref nullfields, .. } | - StructWrappedNullablePointer { nndiscr, ref nullfields, .. } if discr != nndiscr => { - // The unit-like case might have a nonzero number of unit-like fields. - // (e.d., Result of Either with (), as one side.) - let ty = type_of::type_of(bcx.ccx(), nullfields[ix]); - assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0); - // The contents of memory at this pointer can't matter, but use - // the value that's "reasonable" in case of pointer comparison. - PointerCast(bcx, val.value, ty.ptr_to()) - } - RawNullablePointer { nndiscr, nnty, .. } => { - assert_eq!(ix, 0); - assert_eq!(discr, nndiscr); - let ty = type_of::type_of(bcx.ccx(), nnty); - PointerCast(bcx, val.value, ty.ptr_to()) - } - StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { - assert_eq!(discr, nndiscr); - struct_field_ptr(bcx, nonnull, val, ix, false) - } - } -} - -pub fn struct_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, st: &Struct<'tcx>, val: MaybeSizedValue, - ix: usize, needs_cast: bool) -> ValueRef { - let ccx = bcx.ccx(); - let ptr_val = if needs_cast { - let fields = st.fields.iter().map(|&ty| { - type_of::in_memory_type_of(ccx, ty) - }).collect::>(); - let real_ty = Type::struct_(ccx, &fields[..], st.packed); - PointerCast(bcx, val.value, real_ty.ptr_to()) - } else { - val.value - }; - - let fty = st.fields[ix]; - // Simple case - we can just GEP the field - // * First field - Always aligned properly - // * Packed struct - There is no alignment padding - // * Field is sized - pointer is properly aligned already - if ix == 0 || st.packed || type_is_sized(bcx.tcx(), fty) { - return StructGEP(bcx, ptr_val, ix); - } - - // If the type of the last field is [T] or str, then we don't need to do - // any adjusments - match fty.sty { - ty::TySlice(..) | ty::TyStr => { - return StructGEP(bcx, ptr_val, ix); - } - _ => () - } - - // There's no metadata available, log the case and just do the GEP. - if !val.has_meta() { - debug!("Unsized field `{}`, of `{}` has no metadata for adjustment", - ix, - bcx.val_to_string(ptr_val)); - return StructGEP(bcx, ptr_val, ix); - } - - let dbloc = DebugLoc::None; - - // We need to get the pointer manually now. - // We do this by casting to a *i8, then offsetting it by the appropriate amount. - // We do this instead of, say, simply adjusting the pointer from the result of a GEP - // because the field may have an arbitrary alignment in the LLVM representation - // anyway. - // - // To demonstrate: - // struct Foo { - // x: u16, - // y: T - // } - // - // The type Foo> is represented in LLVM as { u16, { u16, u8 }}, meaning that - // the `y` field has 16-bit alignment. - - let meta = val.meta; - - // Calculate the unaligned offset of the unsized field. - let mut offset = 0; - for &ty in &st.fields[0..ix] { - let llty = type_of::sizing_type_of(ccx, ty); - let type_align = type_of::align_of(ccx, ty); - offset = roundup(offset, type_align); - offset += machine::llsize_of_alloc(ccx, llty); - } - let unaligned_offset = C_uint(bcx.ccx(), offset); - - // Get the alignment of the field - let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta); - - // Bump the unaligned offset up to the appropriate alignment using the - // following expression: - // - // (unaligned offset + (align - 1)) & -align - - // Calculate offset - let align_sub_1 = Sub(bcx, align, C_uint(bcx.ccx(), 1u64), dbloc); - let offset = And(bcx, - Add(bcx, unaligned_offset, align_sub_1, dbloc), - Neg(bcx, align, dbloc), - dbloc); - - debug!("struct_field_ptr: DST field offset: {}", - bcx.val_to_string(offset)); - - // Cast and adjust pointer - let byte_ptr = PointerCast(bcx, ptr_val, Type::i8p(bcx.ccx())); - let byte_ptr = GEP(bcx, byte_ptr, &[offset]); - - // Finally, cast back to the type expected - let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty); - debug!("struct_field_ptr: Field type is {}", ll_fty.to_string()); - PointerCast(bcx, byte_ptr, ll_fty.ptr_to()) -} - -pub fn fold_variants<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - r: &Repr<'tcx>, - value: ValueRef, - mut f: F) - -> Block<'blk, 'tcx> where - F: FnMut(Block<'blk, 'tcx>, &Struct<'tcx>, ValueRef) -> Block<'blk, 'tcx>, -{ - let fcx = bcx.fcx; - match *r { - Univariant(ref st, _) => { - f(bcx, st, value) - } - General(ity, ref cases, _) => { - let ccx = bcx.ccx(); - - // See the comments in trans/base.rs for more information (inside - // iter_structural_ty), but the gist here is that if the enum's - // discriminant is *not* in the range that we're expecting (in which - // case we'll take the fall-through branch on the switch - // instruction) then we can't just optimize this to an Unreachable - // block. - // - // Currently we still have filling drop, so this means that the drop - // glue for enums may be called when the enum has been paved over - // with the "I've been dropped" value. In this case the default - // branch of the switch instruction will actually be taken at - // runtime, so the basic block isn't actually unreachable, so we - // need to make it do something with defined behavior. In this case - // we just return early from the function. - let ret_void_cx = fcx.new_temp_block("enum-variant-iter-ret-void"); - RetVoid(ret_void_cx, DebugLoc::None); - - let discr_val = trans_get_discr(bcx, r, value, None); - let llswitch = Switch(bcx, discr_val, ret_void_cx.llbb, cases.len()); - let bcx_next = fcx.new_temp_block("enum-variant-iter-next"); - - for (discr, case) in cases.iter().enumerate() { - let mut variant_cx = fcx.new_temp_block( - &format!("enum-variant-iter-{}", &discr.to_string()) - ); - let rhs_val = C_integral(ll_inttype(ccx, ity), discr as u64, true); - AddCase(llswitch, rhs_val, variant_cx.llbb); - - let fields = case.fields.iter().map(|&ty| - type_of::type_of(bcx.ccx(), ty)).collect::>(); - let real_ty = Type::struct_(ccx, &fields[..], case.packed); - let variant_value = PointerCast(variant_cx, value, real_ty.ptr_to()); - - variant_cx = f(variant_cx, case, variant_value); - Br(variant_cx, bcx_next.llbb, DebugLoc::None); - } - - bcx_next - } - _ => unreachable!() - } -} - -/// Access the struct drop flag, if present. -pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - r: &Repr<'tcx>, - val: ValueRef) - -> datum::DatumBlock<'blk, 'tcx, datum::Expr> -{ - let tcx = bcx.tcx(); - let ptr_ty = bcx.tcx().mk_imm_ptr(tcx.dtor_type()); - match *r { - Univariant(ref st, dtor) if dtor_active(dtor) => { - let flag_ptr = StructGEP(bcx, val, st.fields.len() - 1); - datum::immediate_rvalue_bcx(bcx, flag_ptr, ptr_ty).to_expr_datumblock() - } - General(_, _, dtor) if dtor_active(dtor) => { - let fcx = bcx.fcx; - let custom_cleanup_scope = fcx.push_custom_cleanup_scope(); - let scratch = unpack_datum!(bcx, datum::lvalue_scratch_datum( - bcx, tcx.dtor_type(), "drop_flag", - InitAlloca::Uninit("drop flag itself has no dtor"), - cleanup::CustomScope(custom_cleanup_scope), (), |_, bcx, _| { - debug!("no-op populate call for trans_drop_flag_ptr on dtor_type={:?}", - tcx.dtor_type()); - bcx - } - )); - bcx = fold_variants(bcx, r, val, |variant_cx, st, value| { - let ptr = struct_field_ptr(variant_cx, st, MaybeSizedValue::sized(value), - (st.fields.len() - 1), false); - datum::Datum::new(ptr, ptr_ty, datum::Lvalue::new("adt::trans_drop_flag_ptr")) - .store_to(variant_cx, scratch.val) - }); - let expr_datum = scratch.to_expr_datum(); - fcx.pop_custom_cleanup_scope(custom_cleanup_scope); - datum::DatumBlock::new(bcx, expr_datum) - } - _ => bcx.ccx().sess().bug("tried to get drop flag of non-droppable type") - } -} - -/// Construct a constant value, suitable for initializing a -/// GlobalVariable, given a case and constant values for its fields. -/// Note that this may have a different LLVM type (and different -/// alignment!) from the representation's `type_of`, so it needs a -/// pointer cast before use. -/// -/// The LLVM type system does not directly support unions, and only -/// pointers can be bitcast, so a constant (and, by extension, the -/// GlobalVariable initialized by it) will have a type that can vary -/// depending on which case of an enum it is. -/// -/// To understand the alignment situation, consider `enum E { V64(u64), -/// V32(u32, u32) }` on Windows. The type has 8-byte alignment to -/// accommodate the u64, but `V32(x, y)` would have LLVM type `{i32, -/// i32, i32}`, which is 4-byte aligned. -/// -/// Currently the returned value has the same size as the type, but -/// this could be changed in the future to avoid allocating unnecessary -/// space after values of shorter-than-maximum cases. -pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, discr: Disr, - vals: &[ValueRef]) -> ValueRef { - match *r { - CEnum(ity, min, max) => { - assert_eq!(vals.len(), 0); - assert_discr_in_range(ity, min, max, discr); - C_integral(ll_inttype(ccx, ity), discr.0, true) - } - General(ity, ref cases, _) => { - let case = &cases[discr.0 as usize]; - let (max_sz, _) = union_size_and_align(&cases[..]); - let lldiscr = C_integral(ll_inttype(ccx, ity), discr.0 as u64, true); - let mut f = vec![lldiscr]; - f.extend_from_slice(vals); - let mut contents = build_const_struct(ccx, case, &f[..]); - contents.extend_from_slice(&[padding(ccx, max_sz - case.size)]); - C_struct(ccx, &contents[..], false) - } - Univariant(ref st, _dro) => { - assert_eq!(discr, Disr(0)); - let contents = build_const_struct(ccx, st, vals); - C_struct(ccx, &contents[..], st.packed) - } - RawNullablePointer { nndiscr, nnty, .. } => { - if discr == nndiscr { - assert_eq!(vals.len(), 1); - vals[0] - } else { - C_null(type_of::sizing_type_of(ccx, nnty)) - } - } - StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { - if discr == nndiscr { - C_struct(ccx, &build_const_struct(ccx, - nonnull, - vals), - false) - } else { - let vals = nonnull.fields.iter().map(|&ty| { - // Always use null even if it's not the `discrfield`th - // field; see #8506. - C_null(type_of::sizing_type_of(ccx, ty)) - }).collect::>(); - C_struct(ccx, &build_const_struct(ccx, - nonnull, - &vals[..]), - false) - } - } - } -} - -/// Compute struct field offsets relative to struct begin. -fn compute_struct_field_offsets<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - st: &Struct<'tcx>) -> Vec { - let mut offsets = vec!(); - - let mut offset = 0; - for &ty in &st.fields { - let llty = type_of::sizing_type_of(ccx, ty); - if !st.packed { - let type_align = type_of::align_of(ccx, ty); - offset = roundup(offset, type_align); - } - offsets.push(offset); - offset += machine::llsize_of_alloc(ccx, llty); - } - assert_eq!(st.fields.len(), offsets.len()); - offsets -} - -/// Building structs is a little complicated, because we might need to -/// insert padding if a field's value is less aligned than its type. -/// -/// Continuing the example from `trans_const`, a value of type `(u32, -/// E)` should have the `E` at offset 8, but if that field's -/// initializer is 4-byte aligned then simply translating the tuple as -/// a two-element struct will locate it at offset 4, and accesses to it -/// will read the wrong memory. -fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - st: &Struct<'tcx>, vals: &[ValueRef]) - -> Vec { - assert_eq!(vals.len(), st.fields.len()); - - let target_offsets = compute_struct_field_offsets(ccx, st); - - // offset of current value - let mut offset = 0; - let mut cfields = Vec::new(); - for (&val, target_offset) in vals.iter().zip(target_offsets) { - if !st.packed { - let val_align = machine::llalign_of_min(ccx, val_ty(val)); - offset = roundup(offset, val_align); - } - if offset != target_offset { - cfields.push(padding(ccx, target_offset - offset)); - offset = target_offset; - } - assert!(!is_undef(val)); - cfields.push(val); - offset += machine::llsize_of_alloc(ccx, val_ty(val)); - } - - assert!(st.sized && offset <= st.size); - if offset != st.size { - cfields.push(padding(ccx, st.size - offset)); - } - - cfields -} - -fn padding(ccx: &CrateContext, size: u64) -> ValueRef { - C_undef(Type::array(&Type::i8(ccx), size)) -} - -// FIXME this utility routine should be somewhere more general -#[inline] -fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a } - -/// Get the discriminant of a constant value. -pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef) -> Disr { - match *r { - CEnum(ity, _, _) => { - match ity { - attr::SignedInt(..) => Disr(const_to_int(val) as u64), - attr::UnsignedInt(..) => Disr(const_to_uint(val)), - } - } - General(ity, _, _) => { - match ity { - attr::SignedInt(..) => Disr(const_to_int(const_get_elt(ccx, val, &[0])) as u64), - attr::UnsignedInt(..) => Disr(const_to_uint(const_get_elt(ccx, val, &[0]))) - } - } - Univariant(..) => Disr(0), - RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => { - ccx.sess().bug("const discrim access of non c-like enum") - } - } -} - -/// Extract a field of a constant value, as appropriate for its -/// representation. -/// -/// (Not to be confused with `common::const_get_elt`, which operates on -/// raw LLVM-level structs and arrays.) -pub fn const_get_field(ccx: &CrateContext, r: &Repr, val: ValueRef, - _discr: Disr, ix: usize) -> ValueRef { - match *r { - CEnum(..) => ccx.sess().bug("element access in C-like enum const"), - Univariant(..) => const_struct_field(ccx, val, ix), - General(..) => const_struct_field(ccx, val, ix + 1), - RawNullablePointer { .. } => { - assert_eq!(ix, 0); - val - }, - StructWrappedNullablePointer{ .. } => const_struct_field(ccx, val, ix) - } -} - -/// Extract field of struct-like const, skipping our alignment padding. -fn const_struct_field(ccx: &CrateContext, val: ValueRef, ix: usize) -> ValueRef { - // Get the ix-th non-undef element of the struct. - let mut real_ix = 0; // actual position in the struct - let mut ix = ix; // logical index relative to real_ix - let mut field; - loop { - loop { - field = const_get_elt(ccx, val, &[real_ix]); - if !is_undef(field) { - break; - } - real_ix = real_ix + 1; - } - if ix == 0 { - return field; - } - ix = ix - 1; - real_ix = real_ix + 1; - } -} diff --git a/src/librustc_trans/trans/asm.rs b/src/librustc_trans/trans/asm.rs deleted file mode 100644 index 69a8a84229d47..0000000000000 --- a/src/librustc_trans/trans/asm.rs +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! # Translation of inline assembly. - -use llvm; -use trans::build::*; -use trans::callee; -use trans::common::*; -use trans::cleanup; -use trans::cleanup::CleanupMethods; -use trans::expr; -use trans::type_of; -use trans::type_::Type; - -use rustc_front::hir as ast; -use std::ffi::CString; -use syntax::ast::AsmDialect; -use libc::{c_uint, c_char}; - -// Take an inline assembly expression and splat it out via LLVM -pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm) - -> Block<'blk, 'tcx> { - let fcx = bcx.fcx; - let mut bcx = bcx; - let mut constraints = Vec::new(); - let mut output_types = Vec::new(); - - let temp_scope = fcx.push_custom_cleanup_scope(); - - let mut ext_inputs = Vec::new(); - let mut ext_constraints = Vec::new(); - - // Prepare the output operands - let mut outputs = Vec::new(); - let mut inputs = Vec::new(); - for (i, out) in ia.outputs.iter().enumerate() { - constraints.push(out.constraint.clone()); - - let out_datum = unpack_datum!(bcx, expr::trans(bcx, &*out.expr)); - if out.is_indirect { - bcx = callee::trans_arg_datum(bcx, - expr_ty(bcx, &*out.expr), - out_datum, - cleanup::CustomScope(temp_scope), - callee::DontAutorefArg, - &mut inputs); - if out.is_rw { - ext_inputs.push(*inputs.last().unwrap()); - ext_constraints.push(i.to_string()); - } - } else { - output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty)); - outputs.push(out_datum.val); - if out.is_rw { - bcx = callee::trans_arg_datum(bcx, - expr_ty(bcx, &*out.expr), - out_datum, - cleanup::CustomScope(temp_scope), - callee::DontAutorefArg, - &mut ext_inputs); - ext_constraints.push(i.to_string()); - } - } - } - - // Now the input operands - for &(ref c, ref input) in &ia.inputs { - constraints.push((*c).clone()); - - let in_datum = unpack_datum!(bcx, expr::trans(bcx, &**input)); - bcx = callee::trans_arg_datum(bcx, - expr_ty(bcx, &**input), - in_datum, - cleanup::CustomScope(temp_scope), - callee::DontAutorefArg, - &mut inputs); - } - inputs.extend_from_slice(&ext_inputs[..]); - - // no failure occurred preparing operands, no need to cleanup - fcx.pop_custom_cleanup_scope(temp_scope); - - let clobbers = ia.clobbers.iter() - .map(|s| format!("~{{{}}}", &s)); - - // Default per-arch clobbers - // Basically what clang does - let arch_clobbers = match &bcx.sess().target.target.arch[..] { - "x86" | "x86_64" => vec!("~{dirflag}", "~{fpsr}", "~{flags}"), - _ => Vec::new() - }; - - let all_constraints= constraints.iter() - .map(|s| s.to_string()) - .chain(ext_constraints) - .chain(clobbers) - .chain(arch_clobbers.iter() - .map(|s| s.to_string())) - .collect::>() - .join(","); - - debug!("Asm Constraints: {}", &all_constraints[..]); - - // Depending on how many outputs we have, the return type is different - let num_outputs = outputs.len(); - let output_type = match num_outputs { - 0 => Type::void(bcx.ccx()), - 1 => output_types[0], - _ => Type::struct_(bcx.ccx(), &output_types[..], false) - }; - - let dialect = match ia.dialect { - AsmDialect::Att => llvm::AD_ATT, - AsmDialect::Intel => llvm::AD_Intel - }; - - let asm = CString::new(ia.asm.as_bytes()).unwrap(); - let constraint_cstr = CString::new(all_constraints).unwrap(); - let r = InlineAsmCall(bcx, - asm.as_ptr(), - constraint_cstr.as_ptr(), - &inputs, - output_type, - ia.volatile, - ia.alignstack, - dialect); - - // Again, based on how many outputs we have - if num_outputs == 1 { - Store(bcx, r, outputs[0]); - } else { - for (i, o) in outputs.iter().enumerate() { - let v = ExtractValue(bcx, r, i); - Store(bcx, v, *o); - } - } - - // Store expn_id in a metadata node so we can map LLVM errors - // back to source locations. See #17552. - unsafe { - let key = "srcloc"; - let kind = llvm::LLVMGetMDKindIDInContext(bcx.ccx().llcx(), - key.as_ptr() as *const c_char, key.len() as c_uint); - - let val: llvm::ValueRef = C_i32(bcx.ccx(), ia.expn_id.into_u32() as i32); - - llvm::LLVMSetMetadata(r, kind, - llvm::LLVMMDNodeInContext(bcx.ccx().llcx(), &val, 1)); - } - - return bcx; - -} - diff --git a/src/librustc_trans/trans/assert_dep_graph.rs b/src/librustc_trans/trans/assert_dep_graph.rs deleted file mode 100644 index 3d6a6a8fa7770..0000000000000 --- a/src/librustc_trans/trans/assert_dep_graph.rs +++ /dev/null @@ -1,430 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! This pass is only used for the UNIT TESTS and DEBUGGING NEEDS -//! around dependency graph construction. It serves two purposes; it -//! will dump graphs in graphviz form to disk, and it searches for -//! `#[rustc_if_this_changed]` and `#[rustc_then_this_would_need]` -//! annotations. These annotations can be used to test whether paths -//! exist in the graph. We report errors on each -//! `rustc_if_this_changed` annotation. If a path exists in all -//! cases, then we would report "all path(s) exist". Otherwise, we -//! report: "no path to `foo`" for each case where no path exists. -//! `compile-fail` tests can then be used to check when paths exist or -//! do not. -//! -//! The full form of the `rustc_if_this_changed` annotation is -//! `#[rustc_if_this_changed(id)]`. The `"id"` is optional and -//! defaults to `"id"` if omitted. -//! -//! Example: -//! -//! ``` -//! #[rustc_if_this_changed] -//! fn foo() { } -//! -//! #[rustc_then_this_would_need("trans")] //~ ERROR no path from `foo` -//! fn bar() { } -//! -//! #[rustc_then_this_would_need("trans")] //~ ERROR OK -//! fn baz() { foo(); } -//! ``` - -use graphviz as dot; -use rustc::dep_graph::{DepGraphQuery, DepNode}; -use rustc::middle::def_id::DefId; -use rustc::middle::ty; -use rustc_data_structures::fnv::{FnvHashMap, FnvHashSet}; -use rustc_data_structures::graph::{Direction, INCOMING, OUTGOING, NodeIndex}; -use rustc_front::hir; -use rustc_front::intravisit::Visitor; -use graphviz::IntoCow; -use std::env; -use std::fs::File; -use std::io::Write; -use syntax::ast; -use syntax::attr::AttrMetaMethods; -use syntax::codemap::Span; -use syntax::parse::token::InternedString; - -const IF_THIS_CHANGED: &'static str = "rustc_if_this_changed"; -const THEN_THIS_WOULD_NEED: &'static str = "rustc_then_this_would_need"; -const ID: &'static str = "id"; - -pub fn assert_dep_graph(tcx: &ty::ctxt) { - let _ignore = tcx.dep_graph.in_ignore(); - - if tcx.sess.opts.dump_dep_graph { - dump_graph(tcx); - } - - // Find annotations supplied by user (if any). - let (if_this_changed, then_this_would_need) = { - let mut visitor = IfThisChanged { tcx: tcx, - if_this_changed: FnvHashMap(), - then_this_would_need: FnvHashMap() }; - tcx.map.krate().visit_all_items(&mut visitor); - (visitor.if_this_changed, visitor.then_this_would_need) - }; - - // Check paths. - check_paths(tcx, &if_this_changed, &then_this_would_need); -} - -type SourceHashMap = FnvHashMap>; -type TargetHashMap = FnvHashMap>; - -struct IfThisChanged<'a, 'tcx:'a> { - tcx: &'a ty::ctxt<'tcx>, - if_this_changed: SourceHashMap, - then_this_would_need: TargetHashMap, -} - -impl<'a, 'tcx> IfThisChanged<'a, 'tcx> { - fn process_attrs(&mut self, node_id: ast::NodeId, def_id: DefId) { - for attr in self.tcx.get_attrs(def_id).iter() { - if attr.check_name(IF_THIS_CHANGED) { - let mut id = None; - for meta_item in attr.meta_item_list().unwrap_or_default() { - match meta_item.node { - ast::MetaWord(ref s) if id.is_none() => id = Some(s.clone()), - _ => { - self.tcx.sess.span_err( - meta_item.span, - &format!("unexpected meta-item {:?}", meta_item.node)); - } - } - } - let id = id.unwrap_or(InternedString::new(ID)); - self.if_this_changed.entry(id) - .or_insert(FnvHashSet()) - .insert((attr.span, def_id, DepNode::Hir(def_id))); - } else if attr.check_name(THEN_THIS_WOULD_NEED) { - let mut dep_node_interned = None; - let mut id = None; - for meta_item in attr.meta_item_list().unwrap_or_default() { - match meta_item.node { - ast::MetaWord(ref s) if dep_node_interned.is_none() => - dep_node_interned = Some(s.clone()), - ast::MetaWord(ref s) if id.is_none() => - id = Some(s.clone()), - _ => { - self.tcx.sess.span_err( - meta_item.span, - &format!("unexpected meta-item {:?}", meta_item.node)); - } - } - } - let dep_node_str = dep_node_interned.as_ref().map(|s| &**s); - macro_rules! match_depnode_name { - ($input:expr, $def_id:expr, match { $($variant:ident,)* } else $y:expr) => { - match $input { - $(Some(stringify!($variant)) => DepNode::$variant($def_id),)* - _ => $y - } - } - } - let dep_node = match_depnode_name! { - dep_node_str, def_id, match { - CollectItem, - BorrowCheck, - TransCrateItem, - TypeckItemType, - TypeckItemBody, - ImplOrTraitItems, - ItemSignature, - FieldTy, - TraitItemDefIds, - InherentImpls, - ImplItems, - TraitImpls, - ReprHints, - } else { - self.tcx.sess.span_fatal( - attr.span, - &format!("unrecognized DepNode variant {:?}", dep_node_str)); - } - }; - let id = id.unwrap_or(InternedString::new(ID)); - self.then_this_would_need - .entry(id) - .or_insert(FnvHashSet()) - .insert((attr.span, dep_node_interned.clone().unwrap(), node_id, dep_node)); - } - } - } -} - -impl<'a, 'tcx> Visitor<'tcx> for IfThisChanged<'a, 'tcx> { - fn visit_item(&mut self, item: &'tcx hir::Item) { - let def_id = self.tcx.map.local_def_id(item.id); - self.process_attrs(item.id, def_id); - } -} - -fn check_paths(tcx: &ty::ctxt, - if_this_changed: &SourceHashMap, - then_this_would_need: &TargetHashMap) -{ - // Return early here so as not to construct the query, which is not cheap. - if if_this_changed.is_empty() { - return; - } - let query = tcx.dep_graph.query(); - for (id, sources) in if_this_changed { - let targets = match then_this_would_need.get(id) { - Some(targets) => targets, - None => { - for &(source_span, _, _) in sources.iter().take(1) { - tcx.sess.span_err( - source_span, - &format!("no targets for id `{}`", id)); - } - continue; - } - }; - - for &(_, source_def_id, source_dep_node) in sources { - let dependents = query.dependents(source_dep_node); - for &(target_span, ref target_pass, _, ref target_dep_node) in targets { - if !dependents.contains(&target_dep_node) { - tcx.sess.span_err( - target_span, - &format!("no path from `{}` to `{}`", - tcx.item_path_str(source_def_id), - target_pass)); - } else { - tcx.sess.span_err( - target_span, - &format!("OK")); - } - } - } - } -} - -fn dump_graph(tcx: &ty::ctxt) { - let path: String = env::var("RUST_DEP_GRAPH").unwrap_or_else(|_| format!("dep_graph")); - let query = tcx.dep_graph.query(); - - let nodes = match env::var("RUST_DEP_GRAPH_FILTER") { - Ok(string) => { - // Expect one of: "-> target", "source -> target", or "source ->". - let parts: Vec<_> = string.split("->").collect(); - if parts.len() > 2 { - panic!("Invalid RUST_DEP_GRAPH_FILTER: expected '[source] -> [target]'"); - } - let sources = node_set(&query, &parts[0]); - let targets = node_set(&query, &parts[1]); - filter_nodes(&query, &sources, &targets) - } - Err(_) => { - query.nodes() - .into_iter() - .collect() - } - }; - let edges = filter_edges(&query, &nodes); - - { // dump a .txt file with just the edges: - let txt_path = format!("{}.txt", path); - let mut file = File::create(&txt_path).unwrap(); - for &(source, target) in &edges { - write!(file, "{:?} -> {:?}\n", source, target).unwrap(); - } - } - - { // dump a .dot file in graphviz format: - let dot_path = format!("{}.dot", path); - let mut v = Vec::new(); - dot::render(&GraphvizDepGraph(nodes, edges), &mut v).unwrap(); - File::create(&dot_path).and_then(|mut f| f.write_all(&v)).unwrap(); - } -} - -pub struct GraphvizDepGraph(FnvHashSet, Vec<(DepNode, DepNode)>); - -impl<'a, 'tcx> dot::GraphWalk<'a, DepNode, (DepNode, DepNode)> for GraphvizDepGraph { - fn nodes(&self) -> dot::Nodes { - let nodes: Vec<_> = self.0.iter().cloned().collect(); - nodes.into_cow() - } - fn edges(&self) -> dot::Edges<(DepNode, DepNode)> { - self.1[..].into_cow() - } - fn source(&self, edge: &(DepNode, DepNode)) -> DepNode { - edge.0 - } - fn target(&self, edge: &(DepNode, DepNode)) -> DepNode { - edge.1 - } -} - -impl<'a, 'tcx> dot::Labeller<'a, DepNode, (DepNode, DepNode)> for GraphvizDepGraph { - fn graph_id(&self) -> dot::Id { - dot::Id::new("DependencyGraph").unwrap() - } - fn node_id(&self, n: &DepNode) -> dot::Id { - let s: String = - format!("{:?}", n).chars() - .map(|c| if c == '_' || c.is_alphanumeric() { c } else { '_' }) - .collect(); - debug!("n={:?} s={:?}", n, s); - dot::Id::new(s).unwrap() - } - fn node_label(&self, n: &DepNode) -> dot::LabelText { - dot::LabelText::label(format!("{:?}", n)) - } -} - -// Given an optional filter like `"x,y,z"`, returns either `None` (no -// filter) or the set of nodes whose labels contain all of those -// substrings. -fn node_set(query: &DepGraphQuery, filter: &str) -> Option> { - debug!("node_set(filter={:?})", filter); - - if filter.trim().is_empty() { - return None; - } - - let filters: Vec<&str> = filter.split("&").map(|s| s.trim()).collect(); - - debug!("node_set: filters={:?}", filters); - - Some(query.nodes() - .into_iter() - .filter(|n| { - let s = format!("{:?}", n); - filters.iter().all(|f| s.contains(f)) - }) - .collect()) -} - -fn filter_nodes(query: &DepGraphQuery, - sources: &Option>, - targets: &Option>) - -> FnvHashSet -{ - if let &Some(ref sources) = sources { - if let &Some(ref targets) = targets { - walk_between(query, sources, targets) - } else { - walk_nodes(query, sources, OUTGOING) - } - } else if let &Some(ref targets) = targets { - walk_nodes(query, targets, INCOMING) - } else { - query.nodes().into_iter().collect() - } -} - -fn walk_nodes(query: &DepGraphQuery, - starts: &FnvHashSet, - direction: Direction) - -> FnvHashSet -{ - let mut set = FnvHashSet(); - for start in starts { - debug!("walk_nodes: start={:?} outgoing?={:?}", start, direction == OUTGOING); - if set.insert(*start) { - let mut stack = vec![query.indices[start]]; - while let Some(index) = stack.pop() { - for (_, edge) in query.graph.adjacent_edges(index, direction) { - let neighbor_index = edge.source_or_target(direction); - let neighbor = query.graph.node_data(neighbor_index); - if set.insert(*neighbor) { - stack.push(neighbor_index); - } - } - } - } - } - set -} - -fn walk_between(query: &DepGraphQuery, - sources: &FnvHashSet, - targets: &FnvHashSet) - -> FnvHashSet -{ - // This is a bit tricky. We want to include a node only if it is: - // (a) reachable from a source and (b) will reach a target. And we - // have to be careful about cycles etc. Luckily efficiency is not - // a big concern! - - #[derive(Copy, Clone, PartialEq)] - enum State { Undecided, Deciding, Included, Excluded } - - let mut node_states = vec![State::Undecided; query.graph.len_nodes()]; - - for &target in targets { - node_states[query.indices[&target].0] = State::Included; - } - - for source in sources.iter().map(|n| query.indices[n]) { - recurse(query, &mut node_states, source); - } - - return query.nodes() - .into_iter() - .filter(|n| { - let index = query.indices[n]; - node_states[index.0] == State::Included - }) - .collect(); - - fn recurse(query: &DepGraphQuery, - node_states: &mut [State], - node: NodeIndex) - -> bool - { - match node_states[node.0] { - // known to reach a target - State::Included => return true, - - // known not to reach a target - State::Excluded => return false, - - // backedge, not yet known, say false - State::Deciding => return false, - - State::Undecided => { } - } - - node_states[node.0] = State::Deciding; - - for neighbor_index in query.graph.successor_nodes(node) { - if recurse(query, node_states, neighbor_index) { - node_states[node.0] = State::Included; - } - } - - // if we didn't find a path to target, then set to excluded - if node_states[node.0] == State::Deciding { - node_states[node.0] = State::Excluded; - false - } else { - assert!(node_states[node.0] == State::Included); - true - } - } -} - -fn filter_edges(query: &DepGraphQuery, - nodes: &FnvHashSet) - -> Vec<(DepNode, DepNode)> -{ - query.edges() - .into_iter() - .filter(|&(source, target)| nodes.contains(&source) && nodes.contains(&target)) - .collect() -} diff --git a/src/librustc_trans/trans/attributes.rs b/src/librustc_trans/trans/attributes.rs deleted file mode 100644 index 28dfa4e07e668..0000000000000 --- a/src/librustc_trans/trans/attributes.rs +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -//! Set and unset common attributes on LLVM values. - -use libc::{c_uint, c_ulonglong}; -use llvm::{self, ValueRef, AttrHelper}; -use middle::ty; -use middle::infer; -use session::config::NoDebugInfo; -use syntax::abi; -pub use syntax::attr::InlineAttr; -use syntax::ast; -use rustc_front::hir; -use trans::base; -use trans::common; -use trans::context::CrateContext; -use trans::machine; -use trans::type_of; - -/// Mark LLVM function to use provided inline heuristic. -#[inline] -pub fn inline(val: ValueRef, inline: InlineAttr) { - use self::InlineAttr::*; - match inline { - Hint => llvm::SetFunctionAttribute(val, llvm::Attribute::InlineHint), - Always => llvm::SetFunctionAttribute(val, llvm::Attribute::AlwaysInline), - Never => llvm::SetFunctionAttribute(val, llvm::Attribute::NoInline), - None => { - let attr = llvm::Attribute::InlineHint | - llvm::Attribute::AlwaysInline | - llvm::Attribute::NoInline; - unsafe { - llvm::LLVMRemoveFunctionAttr(val, attr.bits() as c_ulonglong) - } - }, - }; -} - -/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function. -#[inline] -pub fn emit_uwtable(val: ValueRef, emit: bool) { - if emit { - llvm::SetFunctionAttribute(val, llvm::Attribute::UWTable); - } else { - unsafe { - llvm::LLVMRemoveFunctionAttr( - val, - llvm::Attribute::UWTable.bits() as c_ulonglong, - ); - } - } -} - -/// Tell LLVM whether the function can or cannot unwind. -#[inline] -pub fn unwind(val: ValueRef, can_unwind: bool) { - if can_unwind { - unsafe { - llvm::LLVMRemoveFunctionAttr( - val, - llvm::Attribute::NoUnwind.bits() as c_ulonglong, - ); - } - } else { - llvm::SetFunctionAttribute(val, llvm::Attribute::NoUnwind); - } -} - -/// Tell LLVM whether it should optimise function for size. -#[inline] -#[allow(dead_code)] // possibly useful function -pub fn set_optimize_for_size(val: ValueRef, optimize: bool) { - if optimize { - llvm::SetFunctionAttribute(val, llvm::Attribute::OptimizeForSize); - } else { - unsafe { - llvm::LLVMRemoveFunctionAttr( - val, - llvm::Attribute::OptimizeForSize.bits() as c_ulonglong, - ); - } - } -} - -/// Composite function which sets LLVM attributes for function depending on its AST (#[attribute]) -/// attributes. -pub fn from_fn_attrs(ccx: &CrateContext, attrs: &[ast::Attribute], llfn: ValueRef) { - use syntax::attr::*; - inline(llfn, find_inline_attr(Some(ccx.sess().diagnostic()), attrs)); - - // FIXME: #11906: Omitting frame pointers breaks retrieving the value of a - // parameter. - let no_fp_elim = (ccx.sess().opts.debuginfo != NoDebugInfo) || - !ccx.sess().target.target.options.eliminate_frame_pointer; - if no_fp_elim { - unsafe { - let attr = "no-frame-pointer-elim\0".as_ptr() as *const _; - let val = "true\0".as_ptr() as *const _; - llvm::LLVMAddFunctionAttrStringValue(llfn, - llvm::FunctionIndex as c_uint, - attr, val); - } - } - - for attr in attrs { - if attr.check_name("cold") { - unsafe { - llvm::LLVMAddFunctionAttribute(llfn, - llvm::FunctionIndex as c_uint, - llvm::ColdAttribute as u64) - } - } else if attr.check_name("allocator") { - llvm::Attribute::NoAlias.apply_llfn(llvm::ReturnIndex as c_uint, llfn); - } else if attr.check_name("unwind") { - unwind(llfn, true); - } - } -} - -/// Composite function which converts function type into LLVM attributes for the function. -pub fn from_fn_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_type: ty::Ty<'tcx>) - -> llvm::AttrBuilder { - use middle::ty::{BrAnon, ReLateBound}; - - let function_type; - let (fn_sig, abi, env_ty) = match fn_type.sty { - ty::TyBareFn(_, ref f) => (&f.sig, f.abi, None), - ty::TyClosure(closure_did, ref substs) => { - let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables); - function_type = infcx.closure_type(closure_did, substs); - let self_type = base::self_type_for_closure(ccx, closure_did, fn_type); - (&function_type.sig, abi::RustCall, Some(self_type)) - } - _ => ccx.sess().bug("expected closure or function.") - }; - - let fn_sig = ccx.tcx().erase_late_bound_regions(fn_sig); - let fn_sig = infer::normalize_associated_type(ccx.tcx(), &fn_sig); - - let mut attrs = llvm::AttrBuilder::new(); - let ret_ty = fn_sig.output; - - // These have an odd calling convention, so we need to manually - // unpack the input ty's - let input_tys = match fn_type.sty { - ty::TyClosure(..) => { - assert!(abi == abi::RustCall); - - match fn_sig.inputs[0].sty { - ty::TyTuple(ref inputs) => { - let mut full_inputs = vec![env_ty.expect("Missing closure environment")]; - full_inputs.extend_from_slice(inputs); - full_inputs - } - _ => ccx.sess().bug("expected tuple'd inputs") - } - }, - ty::TyBareFn(..) if abi == abi::RustCall => { - let mut inputs = vec![fn_sig.inputs[0]]; - - match fn_sig.inputs[1].sty { - ty::TyTuple(ref t_in) => { - inputs.extend_from_slice(&t_in[..]); - inputs - } - _ => ccx.sess().bug("expected tuple'd inputs") - } - } - _ => fn_sig.inputs.clone() - }; - - // Index 0 is the return value of the llvm func, so we start at 1 - let mut idx = 1; - if let ty::FnConverging(ret_ty) = ret_ty { - // A function pointer is called without the declaration - // available, so we have to apply any attributes with ABI - // implications directly to the call instruction. Right now, - // the only attribute we need to worry about is `sret`. - if type_of::return_uses_outptr(ccx, ret_ty) { - let llret_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, ret_ty)); - - // The outptr can be noalias and nocapture because it's entirely - // invisible to the program. We also know it's nonnull as well - // as how many bytes we can dereference - attrs.arg(1, llvm::Attribute::StructRet) - .arg(1, llvm::Attribute::NoAlias) - .arg(1, llvm::Attribute::NoCapture) - .arg(1, llvm::DereferenceableAttribute(llret_sz)); - - // Add one more since there's an outptr - idx += 1; - } else { - // The `noalias` attribute on the return value is useful to a - // function ptr caller. - match ret_ty.sty { - // `Box` pointer return values never alias because ownership - // is transferred - ty::TyBox(it) if common::type_is_sized(ccx.tcx(), it) => { - attrs.ret(llvm::Attribute::NoAlias); - } - _ => {} - } - - // We can also mark the return value as `dereferenceable` in certain cases - match ret_ty.sty { - // These are not really pointers but pairs, (pointer, len) - ty::TyRef(_, ty::TypeAndMut { ty: inner, .. }) - | ty::TyBox(inner) if common::type_is_sized(ccx.tcx(), inner) => { - let llret_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, inner)); - attrs.ret(llvm::DereferenceableAttribute(llret_sz)); - } - _ => {} - } - - if let ty::TyBool = ret_ty.sty { - attrs.ret(llvm::Attribute::ZExt); - } - } - } - - for &t in input_tys.iter() { - match t.sty { - _ if type_of::arg_is_indirect(ccx, t) => { - let llarg_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, t)); - - // For non-immediate arguments the callee gets its own copy of - // the value on the stack, so there are no aliases. It's also - // program-invisible so can't possibly capture - attrs.arg(idx, llvm::Attribute::NoAlias) - .arg(idx, llvm::Attribute::NoCapture) - .arg(idx, llvm::DereferenceableAttribute(llarg_sz)); - } - - ty::TyBool => { - attrs.arg(idx, llvm::Attribute::ZExt); - } - - // `Box` pointer parameters never alias because ownership is transferred - ty::TyBox(inner) => { - attrs.arg(idx, llvm::Attribute::NoAlias); - - if common::type_is_sized(ccx.tcx(), inner) { - let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, inner)); - attrs.arg(idx, llvm::DereferenceableAttribute(llsz)); - } else { - attrs.arg(idx, llvm::NonNullAttribute); - if inner.is_trait() { - attrs.arg(idx + 1, llvm::NonNullAttribute); - } - } - } - - ty::TyRef(b, mt) => { - // `&mut` pointer parameters never alias other parameters, or mutable global data - // - // `&T` where `T` contains no `UnsafeCell` is immutable, and can be marked as - // both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely - // on memory dependencies rather than pointer equality - let interior_unsafe = mt.ty.type_contents(ccx.tcx()).interior_unsafe(); - - if mt.mutbl == hir::MutMutable || !interior_unsafe { - attrs.arg(idx, llvm::Attribute::NoAlias); - } - - if mt.mutbl == hir::MutImmutable && !interior_unsafe { - attrs.arg(idx, llvm::Attribute::ReadOnly); - } - - // & pointer parameters are also never null and for sized types we also know - // exactly how many bytes we can dereference - if common::type_is_sized(ccx.tcx(), mt.ty) { - let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, mt.ty)); - attrs.arg(idx, llvm::DereferenceableAttribute(llsz)); - } else { - attrs.arg(idx, llvm::NonNullAttribute); - if mt.ty.is_trait() { - attrs.arg(idx + 1, llvm::NonNullAttribute); - } - } - - // When a reference in an argument has no named lifetime, it's - // impossible for that reference to escape this function - // (returned or stored beyond the call by a closure). - if let ReLateBound(_, BrAnon(_)) = *b { - attrs.arg(idx, llvm::Attribute::NoCapture); - } - } - - _ => () - } - - if common::type_is_fat_ptr(ccx.tcx(), t) { - idx += 2; - } else { - idx += 1; - } - } - - attrs -} diff --git a/src/librustc_trans/trans/base.rs b/src/librustc_trans/trans/base.rs deleted file mode 100644 index 3909cb2df2364..0000000000000 --- a/src/librustc_trans/trans/base.rs +++ /dev/null @@ -1,3333 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -//! Translate the completed AST to the LLVM IR. -//! -//! Some functions here, such as trans_block and trans_expr, return a value -- -//! the result of the translation to LLVM -- while others, such as trans_fn, -//! trans_impl, and trans_item, are called only for the side effect of adding a -//! particular definition to the LLVM IR output we're producing. -//! -//! Hopefully useful general knowledge about trans: -//! -//! * There's no way to find out the Ty type of a ValueRef. Doing so -//! would be "trying to get the eggs out of an omelette" (credit: -//! pcwalton). You can, instead, find out its TypeRef by calling val_ty, -//! but one TypeRef corresponds to many `Ty`s; for instance, tup(int, int, -//! int) and rec(x=int, y=int, z=int) will have the same TypeRef. - -#![allow(non_camel_case_types)] - -pub use self::ValueOrigin::*; - -use super::CrateTranslation; -use super::ModuleTranslation; - -use back::link::mangle_exported_name; -use back::{link, abi}; -use lint; -use llvm::{BasicBlockRef, Linkage, ValueRef, Vector, get_param}; -use llvm; -use middle::cfg; -use middle::cstore::CrateStore; -use middle::def_id::DefId; -use middle::infer; -use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem}; -use middle::weak_lang_items; -use middle::pat_util::simple_name; -use middle::subst::Substs; -use middle::ty::{self, Ty, TypeFoldable}; -use rustc::dep_graph::DepNode; -use rustc::front::map as hir_map; -use rustc::util::common::time; -use rustc_mir::mir_map::MirMap; -use session::config::{self, NoDebugInfo, FullDebugInfo}; -use session::Session; -use trans::_match; -use trans::adt; -use trans::assert_dep_graph; -use trans::attributes; -use trans::build::*; -use trans::builder::{Builder, noname}; -use trans::callee; -use trans::cleanup::{self, CleanupMethods, DropHint}; -use trans::closure; -use trans::common::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_uint, C_integral}; -use trans::common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef}; -use trans::common::{CrateContext, DropFlagHintsMap, Field, FunctionContext}; -use trans::common::{Result, NodeIdAndSpan, VariantInfo}; -use trans::common::{node_id_type, return_type_is_void}; -use trans::common::{type_is_immediate, type_is_zero_size, val_ty}; -use trans::common; -use trans::consts; -use trans::context::SharedCrateContext; -use trans::controlflow; -use trans::datum; -use trans::debuginfo::{self, DebugLoc, ToDebugLoc}; -use trans::declare; -use trans::expr; -use trans::foreign; -use trans::glue; -use trans::intrinsic; -use trans::machine; -use trans::machine::{llsize_of, llsize_of_real}; -use trans::meth; -use trans::mir; -use trans::monomorphize; -use trans::tvec; -use trans::type_::Type; -use trans::type_of; -use trans::type_of::*; -use trans::value::Value; -use trans::Disr; -use util::common::indenter; -use util::sha2::Sha256; -use util::nodemap::{NodeMap, NodeSet}; - -use arena::TypedArena; -use libc::c_uint; -use std::ffi::{CStr, CString}; -use std::cell::{Cell, RefCell}; -use std::collections::{HashMap, HashSet}; -use std::str; -use std::{i8, i16, i32, i64}; -use syntax::abi::{Rust, RustCall, RustIntrinsic, PlatformIntrinsic, Abi}; -use syntax::codemap::Span; -use syntax::parse::token::InternedString; -use syntax::attr::AttrMetaMethods; -use syntax::attr; -use rustc_front; -use rustc_front::intravisit::{self, Visitor}; -use rustc_front::hir; -use syntax::ast; - -thread_local! { - static TASK_LOCAL_INSN_KEY: RefCell>> = { - RefCell::new(None) - } -} - -pub fn with_insn_ctxt(blk: F) - where F: FnOnce(&[&'static str]) -{ - TASK_LOCAL_INSN_KEY.with(move |slot| { - slot.borrow().as_ref().map(move |s| blk(s)); - }) -} - -pub fn init_insn_ctxt() { - TASK_LOCAL_INSN_KEY.with(|slot| { - *slot.borrow_mut() = Some(Vec::new()); - }); -} - -pub struct _InsnCtxt { - _cannot_construct_outside_of_this_module: (), -} - -impl Drop for _InsnCtxt { - fn drop(&mut self) { - TASK_LOCAL_INSN_KEY.with(|slot| { - match slot.borrow_mut().as_mut() { - Some(ctx) => { - ctx.pop(); - } - None => {} - } - }) - } -} - -pub fn push_ctxt(s: &'static str) -> _InsnCtxt { - debug!("new InsnCtxt: {}", s); - TASK_LOCAL_INSN_KEY.with(|slot| { - match slot.borrow_mut().as_mut() { - Some(ctx) => ctx.push(s), - None => {} - } - }); - _InsnCtxt { - _cannot_construct_outside_of_this_module: (), - } -} - -pub struct StatRecorder<'a, 'tcx: 'a> { - ccx: &'a CrateContext<'a, 'tcx>, - name: Option, - istart: usize, -} - -impl<'a, 'tcx> StatRecorder<'a, 'tcx> { - pub fn new(ccx: &'a CrateContext<'a, 'tcx>, name: String) -> StatRecorder<'a, 'tcx> { - let istart = ccx.stats().n_llvm_insns.get(); - StatRecorder { - ccx: ccx, - name: Some(name), - istart: istart, - } - } -} - -impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> { - fn drop(&mut self) { - if self.ccx.sess().trans_stats() { - let iend = self.ccx.stats().n_llvm_insns.get(); - self.ccx - .stats() - .fn_stats - .borrow_mut() - .push((self.name.take().unwrap(), iend - self.istart)); - self.ccx.stats().n_fns.set(self.ccx.stats().n_fns.get() + 1); - // Reset LLVM insn count to avoid compound costs. - self.ccx.stats().n_llvm_insns.set(self.istart); - } - } -} - -fn get_extern_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - fn_ty: Ty<'tcx>, - name: &str, - did: DefId) - -> ValueRef { - match ccx.externs().borrow().get(name) { - Some(n) => return *n, - None => (), - } - - let f = declare::declare_rust_fn(ccx, name, fn_ty); - - let attrs = ccx.sess().cstore.item_attrs(did); - attributes::from_fn_attrs(ccx, &attrs[..], f); - - ccx.externs().borrow_mut().insert(name.to_string(), f); - f -} - -pub fn self_type_for_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - closure_id: DefId, - fn_ty: Ty<'tcx>) - -> Ty<'tcx> { - let closure_kind = ccx.tcx().closure_kind(closure_id); - match closure_kind { - ty::FnClosureKind => { - ccx.tcx().mk_imm_ref(ccx.tcx().mk_region(ty::ReStatic), fn_ty) - } - ty::FnMutClosureKind => { - ccx.tcx().mk_mut_ref(ccx.tcx().mk_region(ty::ReStatic), fn_ty) - } - ty::FnOnceClosureKind => fn_ty, - } -} - -pub fn kind_for_closure(ccx: &CrateContext, closure_id: DefId) -> ty::ClosureKind { - *ccx.tcx().tables.borrow().closure_kinds.get(&closure_id).unwrap() -} - -pub fn get_extern_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - did: DefId, - t: Ty<'tcx>) - -> ValueRef { - let name = ccx.sess().cstore.item_symbol(did); - let ty = type_of(ccx, t); - match ccx.externs().borrow_mut().get(&name) { - Some(n) => return *n, - None => (), - } - // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? - // FIXME(nagisa): investigate whether it can be changed into define_global - let c = declare::declare_global(ccx, &name[..], ty); - // Thread-local statics in some other crate need to *always* be linked - // against in a thread-local fashion, so we need to be sure to apply the - // thread-local attribute locally if it was present remotely. If we - // don't do this then linker errors can be generated where the linker - // complains that one object files has a thread local version of the - // symbol and another one doesn't. - for attr in ccx.tcx().get_attrs(did).iter() { - if attr.check_name("thread_local") { - llvm::set_thread_local(c, true); - } - } - if ccx.use_dll_storage_attrs() { - llvm::SetDLLStorageClass(c, llvm::DLLImportStorageClass); - } - ccx.externs().borrow_mut().insert(name.to_string(), c); - return c; -} - -fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem) -> DefId { - match bcx.tcx().lang_items.require(it) { - Ok(id) => id, - Err(s) => { - bcx.sess().fatal(&format!("allocation of `{}` {}", info_ty, s)); - } - } -} - -// The following malloc_raw_dyn* functions allocate a box to contain -// a given type, but with a potentially dynamic size. - -pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - llty_ptr: Type, - info_ty: Ty<'tcx>, - size: ValueRef, - align: ValueRef, - debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { - let _icx = push_ctxt("malloc_raw_exchange"); - - // Allocate space: - let r = callee::trans_lang_call(bcx, - require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem), - &[size, align], - None, - debug_loc); - - Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr)) -} - - -pub fn bin_op_to_icmp_predicate(ccx: &CrateContext, - op: hir::BinOp_, - signed: bool) - -> llvm::IntPredicate { - match op { - hir::BiEq => llvm::IntEQ, - hir::BiNe => llvm::IntNE, - hir::BiLt => if signed { llvm::IntSLT } else { llvm::IntULT }, - hir::BiLe => if signed { llvm::IntSLE } else { llvm::IntULE }, - hir::BiGt => if signed { llvm::IntSGT } else { llvm::IntUGT }, - hir::BiGe => if signed { llvm::IntSGE } else { llvm::IntUGE }, - op => { - ccx.sess() - .bug(&format!("comparison_op_to_icmp_predicate: expected comparison operator, \ - found {:?}", - op)); - } - } -} - -pub fn bin_op_to_fcmp_predicate(ccx: &CrateContext, op: hir::BinOp_) -> llvm::RealPredicate { - match op { - hir::BiEq => llvm::RealOEQ, - hir::BiNe => llvm::RealUNE, - hir::BiLt => llvm::RealOLT, - hir::BiLe => llvm::RealOLE, - hir::BiGt => llvm::RealOGT, - hir::BiGe => llvm::RealOGE, - op => { - ccx.sess() - .bug(&format!("comparison_op_to_fcmp_predicate: expected comparison operator, \ - found {:?}", - op)); - } - } -} - -pub fn compare_fat_ptrs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - lhs_addr: ValueRef, - lhs_extra: ValueRef, - rhs_addr: ValueRef, - rhs_extra: ValueRef, - _t: Ty<'tcx>, - op: hir::BinOp_, - debug_loc: DebugLoc) - -> ValueRef { - match op { - hir::BiEq => { - let addr_eq = ICmp(bcx, llvm::IntEQ, lhs_addr, rhs_addr, debug_loc); - let extra_eq = ICmp(bcx, llvm::IntEQ, lhs_extra, rhs_extra, debug_loc); - And(bcx, addr_eq, extra_eq, debug_loc) - } - hir::BiNe => { - let addr_eq = ICmp(bcx, llvm::IntNE, lhs_addr, rhs_addr, debug_loc); - let extra_eq = ICmp(bcx, llvm::IntNE, lhs_extra, rhs_extra, debug_loc); - Or(bcx, addr_eq, extra_eq, debug_loc) - } - hir::BiLe | hir::BiLt | hir::BiGe | hir::BiGt => { - // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1) - let (op, strict_op) = match op { - hir::BiLt => (llvm::IntULT, llvm::IntULT), - hir::BiLe => (llvm::IntULE, llvm::IntULT), - hir::BiGt => (llvm::IntUGT, llvm::IntUGT), - hir::BiGe => (llvm::IntUGE, llvm::IntUGT), - _ => unreachable!(), - }; - - let addr_eq = ICmp(bcx, llvm::IntEQ, lhs_addr, rhs_addr, debug_loc); - let extra_op = ICmp(bcx, op, lhs_extra, rhs_extra, debug_loc); - let addr_eq_extra_op = And(bcx, addr_eq, extra_op, debug_loc); - - let addr_strict = ICmp(bcx, strict_op, lhs_addr, rhs_addr, debug_loc); - Or(bcx, addr_strict, addr_eq_extra_op, debug_loc) - } - _ => { - bcx.tcx().sess.bug("unexpected fat ptr binop"); - } - } -} - -pub fn compare_scalar_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - lhs: ValueRef, - rhs: ValueRef, - t: Ty<'tcx>, - op: hir::BinOp_, - debug_loc: DebugLoc) - -> ValueRef { - match t.sty { - ty::TyTuple(ref tys) if tys.is_empty() => { - // We don't need to do actual comparisons for nil. - // () == () holds but () < () does not. - match op { - hir::BiEq | hir::BiLe | hir::BiGe => return C_bool(bcx.ccx(), true), - hir::BiNe | hir::BiLt | hir::BiGt => return C_bool(bcx.ccx(), false), - // refinements would be nice - _ => bcx.sess().bug("compare_scalar_types: must be a comparison operator"), - } - } - ty::TyBareFn(..) | ty::TyBool | ty::TyUint(_) | ty::TyChar => { - ICmp(bcx, - bin_op_to_icmp_predicate(bcx.ccx(), op, false), - lhs, - rhs, - debug_loc) - } - ty::TyRawPtr(mt) if common::type_is_sized(bcx.tcx(), mt.ty) => { - ICmp(bcx, - bin_op_to_icmp_predicate(bcx.ccx(), op, false), - lhs, - rhs, - debug_loc) - } - ty::TyRawPtr(_) => { - let lhs_addr = Load(bcx, GEPi(bcx, lhs, &[0, abi::FAT_PTR_ADDR])); - let lhs_extra = Load(bcx, GEPi(bcx, lhs, &[0, abi::FAT_PTR_EXTRA])); - - let rhs_addr = Load(bcx, GEPi(bcx, rhs, &[0, abi::FAT_PTR_ADDR])); - let rhs_extra = Load(bcx, GEPi(bcx, rhs, &[0, abi::FAT_PTR_EXTRA])); - compare_fat_ptrs(bcx, - lhs_addr, - lhs_extra, - rhs_addr, - rhs_extra, - t, - op, - debug_loc) - } - ty::TyInt(_) => { - ICmp(bcx, - bin_op_to_icmp_predicate(bcx.ccx(), op, true), - lhs, - rhs, - debug_loc) - } - ty::TyFloat(_) => { - FCmp(bcx, - bin_op_to_fcmp_predicate(bcx.ccx(), op), - lhs, - rhs, - debug_loc) - } - // Should never get here, because t is scalar. - _ => bcx.sess().bug("non-scalar type passed to compare_scalar_types"), - } -} - -pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - lhs: ValueRef, - rhs: ValueRef, - t: Ty<'tcx>, - ret_ty: Type, - op: hir::BinOp_, - debug_loc: DebugLoc) - -> ValueRef { - let signed = match t.sty { - ty::TyFloat(_) => { - let cmp = bin_op_to_fcmp_predicate(bcx.ccx(), op); - return SExt(bcx, FCmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty); - }, - ty::TyUint(_) => false, - ty::TyInt(_) => true, - _ => bcx.sess().bug("compare_simd_types: invalid SIMD type"), - }; - - let cmp = bin_op_to_icmp_predicate(bcx.ccx(), op, signed); - // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension - // to get the correctly sized type. This will compile to a single instruction - // once the IR is converted to assembly if the SIMD instruction is supported - // by the target architecture. - SExt(bcx, ICmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty) -} - -// Iterates through the elements of a structural type. -pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>, - av: ValueRef, - t: Ty<'tcx>, - mut f: F) - -> Block<'blk, 'tcx> - where F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx> -{ - let _icx = push_ctxt("iter_structural_ty"); - - fn iter_variant<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>, - repr: &adt::Repr<'tcx>, - av: adt::MaybeSizedValue, - variant: ty::VariantDef<'tcx>, - substs: &Substs<'tcx>, - f: &mut F) - -> Block<'blk, 'tcx> - where F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx> - { - let _icx = push_ctxt("iter_variant"); - let tcx = cx.tcx(); - let mut cx = cx; - - for (i, field) in variant.fields.iter().enumerate() { - let arg = monomorphize::field_ty(tcx, substs, field); - cx = f(cx, - adt::trans_field_ptr(cx, repr, av, Disr::from(variant.disr_val), i), - arg); - } - return cx; - } - - let value = if common::type_is_sized(cx.tcx(), t) { - adt::MaybeSizedValue::sized(av) - } else { - let data = Load(cx, expr::get_dataptr(cx, av)); - let info = Load(cx, expr::get_meta(cx, av)); - adt::MaybeSizedValue::unsized_(data, info) - }; - - let mut cx = cx; - match t.sty { - ty::TyStruct(..) => { - let repr = adt::represent_type(cx.ccx(), t); - let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None); - for (i, &Field(_, field_ty)) in fields.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(cx, &*repr, value, Disr::from(discr), i); - - let val = if common::type_is_sized(cx.tcx(), field_ty) { - llfld_a - } else { - let scratch = datum::rvalue_scratch_datum(cx, field_ty, "__fat_ptr_iter"); - Store(cx, llfld_a, expr::get_dataptr(cx, scratch.val)); - Store(cx, value.meta, expr::get_meta(cx, scratch.val)); - scratch.val - }; - cx = f(cx, val, field_ty); - } - } - ty::TyClosure(_, ref substs) => { - let repr = adt::represent_type(cx.ccx(), t); - for (i, upvar_ty) in substs.upvar_tys.iter().enumerate() { - let llupvar = adt::trans_field_ptr(cx, &*repr, value, Disr(0), i); - cx = f(cx, llupvar, upvar_ty); - } - } - ty::TyArray(_, n) => { - let (base, len) = tvec::get_fixed_base_and_len(cx, value.value, n); - let unit_ty = t.sequence_element_type(cx.tcx()); - cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f); - } - ty::TySlice(_) | ty::TyStr => { - let unit_ty = t.sequence_element_type(cx.tcx()); - cx = tvec::iter_vec_raw(cx, value.value, unit_ty, value.meta, f); - } - ty::TyTuple(ref args) => { - let repr = adt::represent_type(cx.ccx(), t); - for (i, arg) in args.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(cx, &*repr, value, Disr(0), i); - cx = f(cx, llfld_a, *arg); - } - } - ty::TyEnum(en, substs) => { - let fcx = cx.fcx; - let ccx = fcx.ccx; - - let repr = adt::represent_type(ccx, t); - let n_variants = en.variants.len(); - - // NB: we must hit the discriminant first so that structural - // comparison know not to proceed when the discriminants differ. - - match adt::trans_switch(cx, &*repr, av) { - (_match::Single, None) => { - if n_variants != 0 { - assert!(n_variants == 1); - cx = iter_variant(cx, &*repr, adt::MaybeSizedValue::sized(av), - &en.variants[0], substs, &mut f); - } - } - (_match::Switch, Some(lldiscrim_a)) => { - cx = f(cx, lldiscrim_a, cx.tcx().types.isize); - - // Create a fall-through basic block for the "else" case of - // the switch instruction we're about to generate. Note that - // we do **not** use an Unreachable instruction here, even - // though most of the time this basic block will never be hit. - // - // When an enum is dropped it's contents are currently - // overwritten to DTOR_DONE, which means the discriminant - // could have changed value to something not within the actual - // range of the discriminant. Currently this function is only - // used for drop glue so in this case we just return quickly - // from the outer function, and any other use case will only - // call this for an already-valid enum in which case the `ret - // void` will never be hit. - let ret_void_cx = fcx.new_temp_block("enum-iter-ret-void"); - RetVoid(ret_void_cx, DebugLoc::None); - let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants); - let next_cx = fcx.new_temp_block("enum-iter-next"); - - for variant in &en.variants { - let variant_cx = fcx.new_temp_block(&format!("enum-iter-variant-{}", - &variant.disr_val - .to_string())); - let case_val = adt::trans_case(cx, &*repr, Disr::from(variant.disr_val)); - AddCase(llswitch, case_val, variant_cx.llbb); - let variant_cx = iter_variant(variant_cx, - &*repr, - value, - variant, - substs, - &mut f); - Br(variant_cx, next_cx.llbb, DebugLoc::None); - } - cx = next_cx; - } - _ => ccx.sess().unimpl("value from adt::trans_switch in iter_structural_ty"), - } - } - _ => { - cx.sess().unimpl(&format!("type in iter_structural_ty: {}", t)) - } - } - return cx; -} - - -/// Retrieve the information we are losing (making dynamic) in an unsizing -/// adjustment. -/// -/// The `old_info` argument is a bit funny. It is intended for use -/// in an upcast, where the new vtable for an object will be drived -/// from the old one. -pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>, - source: Ty<'tcx>, - target: Ty<'tcx>, - old_info: Option, - param_substs: &'tcx Substs<'tcx>) - -> ValueRef { - let (source, target) = ccx.tcx().struct_lockstep_tails(source, target); - match (&source.sty, &target.sty) { - (&ty::TyArray(_, len), &ty::TySlice(_)) => C_uint(ccx, len), - (&ty::TyTrait(_), &ty::TyTrait(_)) => { - // For now, upcasts are limited to changes in marker - // traits, and hence never actually require an actual - // change to the vtable. - old_info.expect("unsized_info: missing old info for trait upcast") - } - (_, &ty::TyTrait(box ty::TraitTy { ref principal, .. })) => { - // Note that we preserve binding levels here: - let substs = principal.0.substs.with_self_ty(source).erase_regions(); - let substs = ccx.tcx().mk_substs(substs); - let trait_ref = ty::Binder(ty::TraitRef { - def_id: principal.def_id(), - substs: substs, - }); - consts::ptrcast(meth::get_vtable(ccx, trait_ref, param_substs), - Type::vtable_ptr(ccx)) - } - _ => ccx.sess().bug(&format!("unsized_info: invalid unsizing {:?} -> {:?}", - source, - target)), - } -} - -/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. -pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - src: ValueRef, - src_ty: Ty<'tcx>, - dst_ty: Ty<'tcx>) - -> (ValueRef, ValueRef) { - debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty); - match (&src_ty.sty, &dst_ty.sty) { - (&ty::TyBox(a), &ty::TyBox(b)) | - (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }), - &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) | - (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }), - &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) | - (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }), - &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => { - assert!(common::type_is_sized(bcx.tcx(), a)); - let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), b).ptr_to(); - (PointerCast(bcx, src, ptr_ty), - unsized_info(bcx.ccx(), a, b, None, bcx.fcx.param_substs)) - } - _ => bcx.sess().bug("unsize_thin_ptr: called on bad types"), - } -} - -/// Coerce `src`, which is a reference to a value of type `src_ty`, -/// to a value of type `dst_ty` and store the result in `dst` -pub fn coerce_unsized_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - src: ValueRef, - src_ty: Ty<'tcx>, - dst: ValueRef, - dst_ty: Ty<'tcx>) { - match (&src_ty.sty, &dst_ty.sty) { - (&ty::TyBox(..), &ty::TyBox(..)) | - (&ty::TyRef(..), &ty::TyRef(..)) | - (&ty::TyRef(..), &ty::TyRawPtr(..)) | - (&ty::TyRawPtr(..), &ty::TyRawPtr(..)) => { - let (base, info) = if common::type_is_fat_ptr(bcx.tcx(), src_ty) { - // fat-ptr to fat-ptr unsize preserves the vtable - load_fat_ptr(bcx, src, src_ty) - } else { - let base = load_ty(bcx, src, src_ty); - unsize_thin_ptr(bcx, base, src_ty, dst_ty) - }; - store_fat_ptr(bcx, base, info, dst, dst_ty); - } - - // This can be extended to enums and tuples in the future. - // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) | - (&ty::TyStruct(def_a, _), &ty::TyStruct(def_b, _)) => { - assert_eq!(def_a, def_b); - - let src_repr = adt::represent_type(bcx.ccx(), src_ty); - let src_fields = match &*src_repr { - &adt::Repr::Univariant(ref s, _) => &s.fields, - _ => bcx.sess().bug("struct has non-univariant repr"), - }; - let dst_repr = adt::represent_type(bcx.ccx(), dst_ty); - let dst_fields = match &*dst_repr { - &adt::Repr::Univariant(ref s, _) => &s.fields, - _ => bcx.sess().bug("struct has non-univariant repr"), - }; - - let src = adt::MaybeSizedValue::sized(src); - let dst = adt::MaybeSizedValue::sized(dst); - - let iter = src_fields.iter().zip(dst_fields).enumerate(); - for (i, (src_fty, dst_fty)) in iter { - if type_is_zero_size(bcx.ccx(), dst_fty) { - continue; - } - - let src_f = adt::trans_field_ptr(bcx, &src_repr, src, Disr(0), i); - let dst_f = adt::trans_field_ptr(bcx, &dst_repr, dst, Disr(0), i); - if src_fty == dst_fty { - memcpy_ty(bcx, dst_f, src_f, src_fty); - } else { - coerce_unsized_into(bcx, src_f, src_fty, dst_f, dst_fty); - } - } - } - _ => bcx.sess().bug(&format!("coerce_unsized_into: invalid coercion {:?} -> {:?}", - src_ty, - dst_ty)), - } -} - -pub fn cast_shift_expr_rhs(cx: Block, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef { - cast_shift_rhs(op, lhs, rhs, |a, b| Trunc(cx, a, b), |a, b| ZExt(cx, a, b)) -} - -pub fn cast_shift_const_rhs(op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef { - cast_shift_rhs(op, - lhs, - rhs, - |a, b| unsafe { llvm::LLVMConstTrunc(a, b.to_ref()) }, - |a, b| unsafe { llvm::LLVMConstZExt(a, b.to_ref()) }) -} - -fn cast_shift_rhs(op: hir::BinOp_, - lhs: ValueRef, - rhs: ValueRef, - trunc: F, - zext: G) - -> ValueRef - where F: FnOnce(ValueRef, Type) -> ValueRef, - G: FnOnce(ValueRef, Type) -> ValueRef -{ - // Shifts may have any size int on the rhs - if rustc_front::util::is_shift_binop(op) { - let mut rhs_llty = val_ty(rhs); - let mut lhs_llty = val_ty(lhs); - if rhs_llty.kind() == Vector { - rhs_llty = rhs_llty.element_type() - } - if lhs_llty.kind() == Vector { - lhs_llty = lhs_llty.element_type() - } - let rhs_sz = rhs_llty.int_width(); - let lhs_sz = lhs_llty.int_width(); - if lhs_sz < rhs_sz { - trunc(rhs, lhs_llty) - } else if lhs_sz > rhs_sz { - // FIXME (#1877: If shifting by negative - // values becomes not undefined then this is wrong. - zext(rhs, lhs_llty) - } else { - rhs - } - } else { - rhs - } -} - -pub fn llty_and_min_for_signed_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - val_t: Ty<'tcx>) - -> (Type, u64) { - match val_t.sty { - ty::TyInt(t) => { - let llty = Type::int_from_ty(cx.ccx(), t); - let min = match t { - ast::TyIs if llty == Type::i32(cx.ccx()) => i32::MIN as u64, - ast::TyIs => i64::MIN as u64, - ast::TyI8 => i8::MIN as u64, - ast::TyI16 => i16::MIN as u64, - ast::TyI32 => i32::MIN as u64, - ast::TyI64 => i64::MIN as u64, - }; - (llty, min) - } - _ => unreachable!(), - } -} - -pub fn fail_if_zero_or_overflows<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - call_info: NodeIdAndSpan, - divrem: hir::BinOp, - lhs: ValueRef, - rhs: ValueRef, - rhs_t: Ty<'tcx>) - -> Block<'blk, 'tcx> { - let (zero_text, overflow_text) = if divrem.node == hir::BiDiv { - ("attempted to divide by zero", - "attempted to divide with overflow") - } else { - ("attempted remainder with a divisor of zero", - "attempted remainder with overflow") - }; - let debug_loc = call_info.debug_loc(); - - let (is_zero, is_signed) = match rhs_t.sty { - ty::TyInt(t) => { - let zero = C_integral(Type::int_from_ty(cx.ccx(), t), 0, false); - (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), true) - } - ty::TyUint(t) => { - let zero = C_integral(Type::uint_from_ty(cx.ccx(), t), 0, false); - (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), false) - } - ty::TyStruct(def, _) if def.is_simd() => { - let mut res = C_bool(cx.ccx(), false); - for i in 0..rhs_t.simd_size(cx.tcx()) { - res = Or(cx, - res, - IsNull(cx, ExtractElement(cx, rhs, C_int(cx.ccx(), i as i64))), - debug_loc); - } - (res, false) - } - _ => { - cx.sess().bug(&format!("fail-if-zero on unexpected type: {}", rhs_t)); - } - }; - let bcx = with_cond(cx, is_zero, |bcx| { - controlflow::trans_fail(bcx, call_info, InternedString::new(zero_text)) - }); - - // To quote LLVM's documentation for the sdiv instruction: - // - // Division by zero leads to undefined behavior. Overflow also leads - // to undefined behavior; this is a rare case, but can occur, for - // example, by doing a 32-bit division of -2147483648 by -1. - // - // In order to avoid undefined behavior, we perform runtime checks for - // signed division/remainder which would trigger overflow. For unsigned - // integers, no action beyond checking for zero need be taken. - if is_signed { - let (llty, min) = llty_and_min_for_signed_ty(cx, rhs_t); - let minus_one = ICmp(bcx, - llvm::IntEQ, - rhs, - C_integral(llty, !0, false), - debug_loc); - with_cond(bcx, minus_one, |bcx| { - let is_min = ICmp(bcx, - llvm::IntEQ, - lhs, - C_integral(llty, min, true), - debug_loc); - with_cond(bcx, is_min, |bcx| { - controlflow::trans_fail(bcx, call_info, InternedString::new(overflow_text)) - }) - }) - } else { - bcx - } -} - -pub fn trans_external_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - did: DefId, - t: Ty<'tcx>) - -> ValueRef { - let name = ccx.sess().cstore.item_symbol(did); - match t.sty { - ty::TyBareFn(_, ref fn_ty) => { - match ccx.sess().target.target.adjust_abi(fn_ty.abi) { - Rust | RustCall => { - get_extern_rust_fn(ccx, t, &name[..], did) - } - RustIntrinsic | PlatformIntrinsic => { - ccx.sess().bug("unexpected intrinsic in trans_external_path") - } - _ => { - let attrs = ccx.sess().cstore.item_attrs(did); - foreign::register_foreign_item_fn(ccx, fn_ty.abi, t, &name, &attrs) - } - } - } - _ => { - get_extern_const(ccx, did, t) - } - } -} - -pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - llfn: ValueRef, - llargs: &[ValueRef], - fn_ty: Ty<'tcx>, - debug_loc: DebugLoc) - -> (ValueRef, Block<'blk, 'tcx>) { - let _icx = push_ctxt("invoke_"); - if bcx.unreachable.get() { - return (C_null(Type::i8(bcx.ccx())), bcx); - } - - let attributes = attributes::from_fn_type(bcx.ccx(), fn_ty); - - match bcx.opt_node_id { - None => { - debug!("invoke at ???"); - } - Some(id) => { - debug!("invoke at {}", bcx.tcx().map.node_to_string(id)); - } - } - - if need_invoke(bcx) { - debug!("invoking {} at {:?}", bcx.val_to_string(llfn), bcx.llbb); - for &llarg in llargs { - debug!("arg: {}", bcx.val_to_string(llarg)); - } - let normal_bcx = bcx.fcx.new_temp_block("normal-return"); - let landing_pad = bcx.fcx.get_landing_pad(); - - let llresult = Invoke(bcx, - llfn, - &llargs[..], - normal_bcx.llbb, - landing_pad, - Some(attributes), - debug_loc); - return (llresult, normal_bcx); - } else { - debug!("calling {} at {:?}", bcx.val_to_string(llfn), bcx.llbb); - for &llarg in llargs { - debug!("arg: {}", bcx.val_to_string(llarg)); - } - - let llresult = Call(bcx, llfn, &llargs[..], Some(attributes), debug_loc); - return (llresult, bcx); - } -} - -/// Returns whether this session's target will use SEH-based unwinding. -/// -/// This is only true for MSVC targets, and even then the 64-bit MSVC target -/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as -/// 64-bit MinGW) instead of "full SEH". -pub fn wants_msvc_seh(sess: &Session) -> bool { - sess.target.target.options.is_like_msvc && sess.target.target.arch == "x86" -} - -pub fn avoid_invoke(bcx: Block) -> bool { - // FIXME(#25869) currently SEH-based unwinding is pretty buggy in LLVM and - // is being overhauled as this is being written. Until that - // time such that upstream LLVM's implementation is more solid - // and we start binding it we need to skip invokes for any - // target which wants SEH-based unwinding. - if bcx.sess().no_landing_pads() || wants_msvc_seh(bcx.sess()) { - true - } else if bcx.is_lpad { - // Avoid using invoke if we are already inside a landing pad. - true - } else { - false - } -} - -pub fn need_invoke(bcx: Block) -> bool { - if avoid_invoke(bcx) { - false - } else { - bcx.fcx.needs_invoke() - } -} - -pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>) -> ValueRef { - let _icx = push_ctxt("load_if_immediate"); - if type_is_immediate(cx.ccx(), t) { - return load_ty(cx, v, t); - } - return v; -} - -/// Helper for loading values from memory. Does the necessary conversion if the in-memory type -/// differs from the type used for SSA values. Also handles various special cases where the type -/// gives us better information about what we are loading. -pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef { - if cx.unreachable.get() || type_is_zero_size(cx.ccx(), t) { - return C_undef(type_of::type_of(cx.ccx(), t)); - } - - let ptr = to_arg_ty_ptr(cx, ptr, t); - let align = type_of::align_of(cx.ccx(), t); - - if type_is_immediate(cx.ccx(), t) && type_of::type_of(cx.ccx(), t).is_aggregate() { - let load = Load(cx, ptr); - unsafe { - llvm::LLVMSetAlignment(load, align); - } - return load; - } - - unsafe { - let global = llvm::LLVMIsAGlobalVariable(ptr); - if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True { - let val = llvm::LLVMGetInitializer(global); - if !val.is_null() { - return to_arg_ty(cx, val, t); - } - } - } - - let val = if t.is_bool() { - LoadRangeAssert(cx, ptr, Disr(0), Disr(2), llvm::False) - } else if t.is_char() { - // a char is a Unicode codepoint, and so takes values from 0 - // to 0x10FFFF inclusive only. - LoadRangeAssert(cx, ptr, Disr(0), Disr(0x10FFFF + 1), llvm::False) - } else if (t.is_region_ptr() || t.is_unique()) && !common::type_is_fat_ptr(cx.tcx(), t) { - LoadNonNull(cx, ptr) - } else { - Load(cx, ptr) - }; - - unsafe { - llvm::LLVMSetAlignment(val, align); - } - - to_arg_ty(cx, val, t) -} - -/// Helper for storing values in memory. Does the necessary conversion if the in-memory type -/// differs from the type used for SSA values. -pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { - if cx.unreachable.get() { - return; - } - - debug!("store_ty: {} : {:?} <- {}", - cx.val_to_string(dst), - t, - cx.val_to_string(v)); - - if common::type_is_fat_ptr(cx.tcx(), t) { - Store(cx, - ExtractValue(cx, v, abi::FAT_PTR_ADDR), - expr::get_dataptr(cx, dst)); - Store(cx, - ExtractValue(cx, v, abi::FAT_PTR_EXTRA), - expr::get_meta(cx, dst)); - } else { - let store = Store(cx, from_arg_ty(cx, v, t), to_arg_ty_ptr(cx, dst, t)); - unsafe { - llvm::LLVMSetAlignment(store, type_of::align_of(cx.ccx(), t)); - } - } -} - -pub fn store_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - data: ValueRef, - extra: ValueRef, - dst: ValueRef, - _ty: Ty<'tcx>) { - // FIXME: emit metadata - Store(cx, data, expr::get_dataptr(cx, dst)); - Store(cx, extra, expr::get_meta(cx, dst)); -} - -pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - src: ValueRef, - _ty: Ty<'tcx>) - -> (ValueRef, ValueRef) { - // FIXME: emit metadata - (Load(cx, expr::get_dataptr(cx, src)), - Load(cx, expr::get_meta(cx, src))) -} - -pub fn from_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef { - if ty.is_bool() { - ZExt(bcx, val, Type::i8(bcx.ccx())) - } else { - val - } -} - -pub fn to_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef { - if ty.is_bool() { - Trunc(bcx, val, Type::i1(bcx.ccx())) - } else { - val - } -} - -pub fn to_arg_ty_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ptr: ValueRef, ty: Ty<'tcx>) -> ValueRef { - if type_is_immediate(bcx.ccx(), ty) && type_of::type_of(bcx.ccx(), ty).is_aggregate() { - // We want to pass small aggregates as immediate values, but using an aggregate LLVM type - // for this leads to bad optimizations, so its arg type is an appropriately sized integer - // and we have to convert it - BitCast(bcx, ptr, type_of::arg_type_of(bcx.ccx(), ty).ptr_to()) - } else { - ptr - } -} - -pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &hir::Local) -> Block<'blk, 'tcx> { - debug!("init_local(bcx={}, local.id={})", bcx.to_str(), local.id); - let _indenter = indenter(); - let _icx = push_ctxt("init_local"); - _match::store_local(bcx, local) -} - -pub fn raw_block<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>, - is_lpad: bool, - llbb: BasicBlockRef) - -> Block<'blk, 'tcx> { - common::BlockS::new(llbb, is_lpad, None, fcx) -} - -pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> Block<'blk, 'tcx> - where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx> -{ - let _icx = push_ctxt("with_cond"); - - if bcx.unreachable.get() || common::const_to_opt_uint(val) == Some(0) { - return bcx; - } - - let fcx = bcx.fcx; - let next_cx = fcx.new_temp_block("next"); - let cond_cx = fcx.new_temp_block("cond"); - CondBr(bcx, val, cond_cx.llbb, next_cx.llbb, DebugLoc::None); - let after_cx = f(cond_cx); - if !after_cx.terminated.get() { - Br(after_cx, next_cx.llbb, DebugLoc::None); - } - next_cx -} - -enum Lifetime { Start, End } - -// If LLVM lifetime intrinsic support is enabled (i.e. optimizations -// on), and `ptr` is nonzero-sized, then extracts the size of `ptr` -// and the intrinsic for `lt` and passes them to `emit`, which is in -// charge of generating code to call the passed intrinsic on whatever -// block of generated code is targetted for the intrinsic. -// -// If LLVM lifetime intrinsic support is disabled (i.e. optimizations -// off) or `ptr` is zero-sized, then no-op (does not call `emit`). -fn core_lifetime_emit<'blk, 'tcx, F>(ccx: &'blk CrateContext<'blk, 'tcx>, - ptr: ValueRef, - lt: Lifetime, - emit: F) - where F: FnOnce(&'blk CrateContext<'blk, 'tcx>, machine::llsize, ValueRef) -{ - if ccx.sess().opts.optimize == config::OptLevel::No { - return; - } - - let _icx = push_ctxt(match lt { - Lifetime::Start => "lifetime_start", - Lifetime::End => "lifetime_end" - }); - - let size = machine::llsize_of_alloc(ccx, val_ty(ptr).element_type()); - if size == 0 { - return; - } - - let lifetime_intrinsic = ccx.get_intrinsic(match lt { - Lifetime::Start => "llvm.lifetime.start", - Lifetime::End => "llvm.lifetime.end" - }); - emit(ccx, size, lifetime_intrinsic) -} - -pub fn call_lifetime_start(cx: Block, ptr: ValueRef) { - core_lifetime_emit(cx.ccx(), ptr, Lifetime::Start, |ccx, size, lifetime_start| { - let ptr = PointerCast(cx, ptr, Type::i8p(ccx)); - Call(cx, - lifetime_start, - &[C_u64(ccx, size), ptr], - None, - DebugLoc::None); - }) -} - -pub fn call_lifetime_end(cx: Block, ptr: ValueRef) { - core_lifetime_emit(cx.ccx(), ptr, Lifetime::End, |ccx, size, lifetime_end| { - let ptr = PointerCast(cx, ptr, Type::i8p(ccx)); - Call(cx, - lifetime_end, - &[C_u64(ccx, size), ptr], - None, - DebugLoc::None); - }) -} - -// Generates code for resumption of unwind at the end of a landing pad. -pub fn trans_unwind_resume(bcx: Block, lpval: ValueRef) { - if !bcx.sess().target.target.options.custom_unwind_resume { - Resume(bcx, lpval); - } else { - let exc_ptr = ExtractValue(bcx, lpval, 0); - let llunwresume = bcx.fcx.eh_unwind_resume(); - Call(bcx, llunwresume, &[exc_ptr], None, DebugLoc::None); - Unreachable(bcx); - } -} - - -pub fn call_memcpy(cx: Block, dst: ValueRef, src: ValueRef, n_bytes: ValueRef, align: u32) { - let _icx = push_ctxt("call_memcpy"); - let ccx = cx.ccx(); - let ptr_width = &ccx.sess().target.target.target_pointer_width[..]; - let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); - let memcpy = ccx.get_intrinsic(&key); - let src_ptr = PointerCast(cx, src, Type::i8p(ccx)); - let dst_ptr = PointerCast(cx, dst, Type::i8p(ccx)); - let size = IntCast(cx, n_bytes, ccx.int_type()); - let align = C_i32(ccx, align as i32); - let volatile = C_bool(ccx, false); - Call(cx, - memcpy, - &[dst_ptr, src_ptr, size, align, volatile], - None, - DebugLoc::None); -} - -pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx>) { - let _icx = push_ctxt("memcpy_ty"); - let ccx = bcx.ccx(); - - if type_is_zero_size(ccx, t) { - return; - } - - if t.is_structural() { - let llty = type_of::type_of(ccx, t); - let llsz = llsize_of(ccx, llty); - let llalign = type_of::align_of(ccx, t); - call_memcpy(bcx, dst, src, llsz, llalign as u32); - } else if common::type_is_fat_ptr(bcx.tcx(), t) { - let (data, extra) = load_fat_ptr(bcx, src, t); - store_fat_ptr(bcx, data, extra, dst, t); - } else { - store_ty(bcx, load_ty(bcx, src, t), dst, t); - } -} - -pub fn drop_done_fill_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { - if cx.unreachable.get() { - return; - } - let _icx = push_ctxt("drop_done_fill_mem"); - let bcx = cx; - memfill(&B(bcx), llptr, t, adt::DTOR_DONE); -} - -pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { - if cx.unreachable.get() { - return; - } - let _icx = push_ctxt("init_zero_mem"); - let bcx = cx; - memfill(&B(bcx), llptr, t, 0); -} - -// Always use this function instead of storing a constant byte to the memory -// in question. e.g. if you store a zero constant, LLVM will drown in vreg -// allocation for large data structures, and the generated code will be -// awful. (A telltale sign of this is large quantities of -// `mov [byte ptr foo],0` in the generated code.) -fn memfill<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>, byte: u8) { - let _icx = push_ctxt("memfill"); - let ccx = b.ccx; - - let llty = type_of::type_of(ccx, ty); - let ptr_width = &ccx.sess().target.target.target_pointer_width[..]; - let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); - - let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key); - let llptr = b.pointercast(llptr, Type::i8(ccx).ptr_to()); - let llzeroval = C_u8(ccx, byte); - let size = machine::llsize_of(ccx, llty); - let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32); - let volatile = C_bool(ccx, false); - b.call(llintrinsicfn, - &[llptr, llzeroval, size, align, volatile], - None); -} - -/// In general, when we create an scratch value in an alloca, the -/// creator may not know if the block (that initializes the scratch -/// with the desired value) actually dominates the cleanup associated -/// with the scratch value. -/// -/// To deal with this, when we do an alloca (at the *start* of whole -/// function body), we optionally can also set the associated -/// dropped-flag state of the alloca to "dropped." -#[derive(Copy, Clone, Debug)] -pub enum InitAlloca { - /// Indicates that the state should have its associated drop flag - /// set to "dropped" at the point of allocation. - Dropped, - /// Indicates the value of the associated drop flag is irrelevant. - /// The embedded string literal is a programmer provided argument - /// for why. This is a safeguard forcing compiler devs to - /// document; it might be a good idea to also emit this as a - /// comment with the alloca itself when emitting LLVM output.ll. - Uninit(&'static str), -} - - -pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - t: Ty<'tcx>, - name: &str) -> ValueRef { - // pnkfelix: I do not know why alloc_ty meets the assumptions for - // passing Uninit, but it was never needed (even back when we had - // the original boolean `zero` flag on `lvalue_scratch_datum`). - alloc_ty_init(bcx, t, InitAlloca::Uninit("all alloc_ty are uninit"), name) -} - -/// This variant of `fn alloc_ty` does not necessarily assume that the -/// alloca should be created with no initial value. Instead the caller -/// controls that assumption via the `init` flag. -/// -/// Note that if the alloca *is* initialized via `init`, then we will -/// also inject an `llvm.lifetime.start` before that initialization -/// occurs, and thus callers should not call_lifetime_start -/// themselves. But if `init` says "uninitialized", then callers are -/// in charge of choosing where to call_lifetime_start and -/// subsequently populate the alloca. -/// -/// (See related discussion on PR #30823.) -pub fn alloc_ty_init<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - t: Ty<'tcx>, - init: InitAlloca, - name: &str) -> ValueRef { - let _icx = push_ctxt("alloc_ty"); - let ccx = bcx.ccx(); - let ty = type_of::type_of(ccx, t); - assert!(!t.has_param_types()); - match init { - InitAlloca::Dropped => alloca_dropped(bcx, t, name), - InitAlloca::Uninit(_) => alloca(bcx, ty, name), - } -} - -pub fn alloca_dropped<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ty: Ty<'tcx>, name: &str) -> ValueRef { - let _icx = push_ctxt("alloca_dropped"); - let llty = type_of::type_of(cx.ccx(), ty); - if cx.unreachable.get() { - unsafe { return llvm::LLVMGetUndef(llty.ptr_to().to_ref()); } - } - let p = alloca(cx, llty, name); - let b = cx.fcx.ccx.builder(); - b.position_before(cx.fcx.alloca_insert_pt.get().unwrap()); - - // This is just like `call_lifetime_start` (but latter expects a - // Block, which we do not have for `alloca_insert_pt`). - core_lifetime_emit(cx.ccx(), p, Lifetime::Start, |ccx, size, lifetime_start| { - let ptr = b.pointercast(p, Type::i8p(ccx)); - b.call(lifetime_start, &[C_u64(ccx, size), ptr], None); - }); - memfill(&b, p, ty, adt::DTOR_DONE); - p -} - -pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef { - let _icx = push_ctxt("alloca"); - if cx.unreachable.get() { - unsafe { - return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); - } - } - debuginfo::clear_source_location(cx.fcx); - Alloca(cx, ty, name) -} - -pub fn set_value_name(val: ValueRef, name: &str) { - unsafe { - let name = CString::new(name).unwrap(); - llvm::LLVMSetValueName(val, name.as_ptr()); - } -} - -// Creates the alloca slot which holds the pointer to the slot for the final return value -pub fn make_return_slot_pointer<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, - output_type: Ty<'tcx>) - -> ValueRef { - let lloutputtype = type_of::type_of(fcx.ccx, output_type); - - // We create an alloca to hold a pointer of type `output_type` - // which will hold the pointer to the right alloca which has the - // final ret value - if fcx.needs_ret_allocas { - // Let's create the stack slot - let slot = AllocaFcx(fcx, lloutputtype.ptr_to(), "llretslotptr"); - - // and if we're using an out pointer, then store that in our newly made slot - if type_of::return_uses_outptr(fcx.ccx, output_type) { - let outptr = get_param(fcx.llfn, 0); - - let b = fcx.ccx.builder(); - b.position_before(fcx.alloca_insert_pt.get().unwrap()); - b.store(outptr, slot); - } - - slot - - // But if there are no nested returns, we skip the indirection and have a single - // retslot - } else { - if type_of::return_uses_outptr(fcx.ccx, output_type) { - get_param(fcx.llfn, 0) - } else { - AllocaFcx(fcx, lloutputtype, "sret_slot") - } - } -} - -struct FindNestedReturn { - found: bool, -} - -impl FindNestedReturn { - fn new() -> FindNestedReturn { - FindNestedReturn { - found: false, - } - } -} - -impl<'v> Visitor<'v> for FindNestedReturn { - fn visit_expr(&mut self, e: &hir::Expr) { - match e.node { - hir::ExprRet(..) => { - self.found = true; - } - _ => intravisit::walk_expr(self, e), - } - } -} - -fn build_cfg(tcx: &ty::ctxt, id: ast::NodeId) -> (ast::NodeId, Option) { - let blk = match tcx.map.find(id) { - Some(hir_map::NodeItem(i)) => { - match i.node { - hir::ItemFn(_, _, _, _, _, ref blk) => { - blk - } - _ => tcx.sess.bug("unexpected item variant in has_nested_returns"), - } - } - Some(hir_map::NodeTraitItem(trait_item)) => { - match trait_item.node { - hir::MethodTraitItem(_, Some(ref body)) => body, - _ => { - tcx.sess.bug("unexpected variant: trait item other than a provided method in \ - has_nested_returns") - } - } - } - Some(hir_map::NodeImplItem(impl_item)) => { - match impl_item.node { - hir::ImplItemKind::Method(_, ref body) => body, - _ => { - tcx.sess.bug("unexpected variant: non-method impl item in has_nested_returns") - } - } - } - Some(hir_map::NodeExpr(e)) => { - match e.node { - hir::ExprClosure(_, _, ref blk) => blk, - _ => tcx.sess.bug("unexpected expr variant in has_nested_returns"), - } - } - Some(hir_map::NodeVariant(..)) | - Some(hir_map::NodeStructCtor(..)) => return (ast::DUMMY_NODE_ID, None), - - // glue, shims, etc - None if id == ast::DUMMY_NODE_ID => return (ast::DUMMY_NODE_ID, None), - - _ => tcx.sess.bug(&format!("unexpected variant in has_nested_returns: {}", - tcx.map.path_to_string(id))), - }; - - (blk.id, Some(cfg::CFG::new(tcx, blk))) -} - -// Checks for the presence of "nested returns" in a function. -// Nested returns are when the inner expression of a return expression -// (the 'expr' in 'return expr') contains a return expression. Only cases -// where the outer return is actually reachable are considered. Implicit -// returns from the end of blocks are considered as well. -// -// This check is needed to handle the case where the inner expression is -// part of a larger expression that may have already partially-filled the -// return slot alloca. This can cause errors related to clean-up due to -// the clobbering of the existing value in the return slot. -fn has_nested_returns(tcx: &ty::ctxt, cfg: &cfg::CFG, blk_id: ast::NodeId) -> bool { - for index in cfg.graph.depth_traverse(cfg.entry) { - let n = cfg.graph.node_data(index); - match tcx.map.find(n.id()) { - Some(hir_map::NodeExpr(ex)) => { - if let hir::ExprRet(Some(ref ret_expr)) = ex.node { - let mut visitor = FindNestedReturn::new(); - intravisit::walk_expr(&mut visitor, &**ret_expr); - if visitor.found { - return true; - } - } - } - Some(hir_map::NodeBlock(blk)) if blk.id == blk_id => { - let mut visitor = FindNestedReturn::new(); - walk_list!(&mut visitor, visit_expr, &blk.expr); - if visitor.found { - return true; - } - } - _ => {} - } - } - - return false; -} - -// NB: must keep 4 fns in sync: -// -// - type_of_fn -// - create_datums_for_fn_args. -// - new_fn_ctxt -// - trans_args -// -// Be warned! You must call `init_function` before doing anything with the -// returned function context. -pub fn new_fn_ctxt<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, - llfndecl: ValueRef, - id: ast::NodeId, - has_env: bool, - output_type: ty::FnOutput<'tcx>, - param_substs: &'tcx Substs<'tcx>, - sp: Option, - block_arena: &'a TypedArena>) - -> FunctionContext<'a, 'tcx> { - common::validate_substs(param_substs); - - debug!("new_fn_ctxt(path={}, id={}, param_substs={:?})", - if id == !0 { - "".to_string() - } else { - ccx.tcx().map.path_to_string(id).to_string() - }, - id, - param_substs); - - let uses_outptr = match output_type { - ty::FnConverging(output_type) => { - let substd_output_type = monomorphize::apply_param_substs(ccx.tcx(), - param_substs, - &output_type); - type_of::return_uses_outptr(ccx, substd_output_type) - } - ty::FnDiverging => false, - }; - let debug_context = debuginfo::create_function_debug_context(ccx, id, param_substs, llfndecl); - let (blk_id, cfg) = build_cfg(ccx.tcx(), id); - let nested_returns = if let Some(ref cfg) = cfg { - has_nested_returns(ccx.tcx(), cfg, blk_id) - } else { - false - }; - - let mir = ccx.mir_map().get(&id); - - let mut fcx = FunctionContext { - mir: mir, - llfn: llfndecl, - llenv: None, - llretslotptr: Cell::new(None), - param_env: ccx.tcx().empty_parameter_environment(), - alloca_insert_pt: Cell::new(None), - llreturn: Cell::new(None), - needs_ret_allocas: nested_returns, - personality: Cell::new(None), - caller_expects_out_pointer: uses_outptr, - lllocals: RefCell::new(NodeMap()), - llupvars: RefCell::new(NodeMap()), - lldropflag_hints: RefCell::new(DropFlagHintsMap::new()), - id: id, - param_substs: param_substs, - span: sp, - block_arena: block_arena, - ccx: ccx, - debug_context: debug_context, - scopes: RefCell::new(Vec::new()), - cfg: cfg, - }; - - if has_env { - fcx.llenv = Some(get_param(fcx.llfn, fcx.env_arg_pos() as c_uint)) - } - - fcx -} - -/// Performs setup on a newly created function, creating the entry scope block -/// and allocating space for the return pointer. -pub fn init_function<'a, 'tcx>(fcx: &'a FunctionContext<'a, 'tcx>, - skip_retptr: bool, - output: ty::FnOutput<'tcx>) - -> Block<'a, 'tcx> { - let entry_bcx = fcx.new_temp_block("entry-block"); - - // Use a dummy instruction as the insertion point for all allocas. - // This is later removed in FunctionContext::cleanup. - fcx.alloca_insert_pt.set(Some(unsafe { - Load(entry_bcx, C_null(Type::i8p(fcx.ccx))); - llvm::LLVMGetFirstInstruction(entry_bcx.llbb) - })); - - if let ty::FnConverging(output_type) = output { - // This shouldn't need to recompute the return type, - // as new_fn_ctxt did it already. - let substd_output_type = fcx.monomorphize(&output_type); - if !return_type_is_void(fcx.ccx, substd_output_type) { - // If the function returns nil/bot, there is no real return - // value, so do not set `llretslotptr`. - if !skip_retptr || fcx.caller_expects_out_pointer { - // Otherwise, we normally allocate the llretslotptr, unless we - // have been instructed to skip it for immediate return - // values. - fcx.llretslotptr.set(Some(make_return_slot_pointer(fcx, substd_output_type))); - } - } - } - - // Create the drop-flag hints for every unfragmented path in the function. - let tcx = fcx.ccx.tcx(); - let fn_did = tcx.map.local_def_id(fcx.id); - let tables = tcx.tables.borrow(); - let mut hints = fcx.lldropflag_hints.borrow_mut(); - let fragment_infos = tcx.fragment_infos.borrow(); - - // Intern table for drop-flag hint datums. - let mut seen = HashMap::new(); - - if let Some(fragment_infos) = fragment_infos.get(&fn_did) { - for &info in fragment_infos { - - let make_datum = |id| { - let init_val = C_u8(fcx.ccx, adt::DTOR_NEEDED_HINT); - let llname = &format!("dropflag_hint_{}", id); - debug!("adding hint {}", llname); - let ty = tcx.types.u8; - let ptr = alloc_ty(entry_bcx, ty, llname); - Store(entry_bcx, init_val, ptr); - let flag = datum::Lvalue::new_dropflag_hint("base::init_function"); - datum::Datum::new(ptr, ty, flag) - }; - - let (var, datum) = match info { - ty::FragmentInfo::Moved { var, .. } | - ty::FragmentInfo::Assigned { var, .. } => { - let opt_datum = seen.get(&var).cloned().unwrap_or_else(|| { - let ty = tables.node_types[&var]; - if fcx.type_needs_drop(ty) { - let datum = make_datum(var); - seen.insert(var, Some(datum.clone())); - Some(datum) - } else { - // No drop call needed, so we don't need a dropflag hint - None - } - }); - if let Some(datum) = opt_datum { - (var, datum) - } else { - continue - } - } - }; - match info { - ty::FragmentInfo::Moved { move_expr: expr_id, .. } => { - debug!("FragmentInfo::Moved insert drop hint for {}", expr_id); - hints.insert(expr_id, DropHint::new(var, datum)); - } - ty::FragmentInfo::Assigned { assignee_id: expr_id, .. } => { - debug!("FragmentInfo::Assigned insert drop hint for {}", expr_id); - hints.insert(expr_id, DropHint::new(var, datum)); - } - } - } - } - - entry_bcx -} - -// NB: must keep 4 fns in sync: -// -// - type_of_fn -// - create_datums_for_fn_args. -// - new_fn_ctxt -// - trans_args - -pub fn arg_kind<'a, 'tcx>(cx: &FunctionContext<'a, 'tcx>, t: Ty<'tcx>) -> datum::Rvalue { - use trans::datum::{ByRef, ByValue}; - - datum::Rvalue { - mode: if arg_is_indirect(cx.ccx, t) { ByRef } else { ByValue } - } -} - -// create_datums_for_fn_args: creates lvalue datums for each of the -// incoming function arguments. -pub fn create_datums_for_fn_args<'a, 'tcx>(mut bcx: Block<'a, 'tcx>, - args: &[hir::Arg], - arg_tys: &[Ty<'tcx>], - has_tupled_arg: bool, - arg_scope: cleanup::CustomScopeIndex) - -> Block<'a, 'tcx> { - let _icx = push_ctxt("create_datums_for_fn_args"); - let fcx = bcx.fcx; - let arg_scope_id = cleanup::CustomScope(arg_scope); - - debug!("create_datums_for_fn_args"); - - // Return an array wrapping the ValueRefs that we get from `get_param` for - // each argument into datums. - // - // For certain mode/type combinations, the raw llarg values are passed - // by value. However, within the fn body itself, we want to always - // have all locals and arguments be by-ref so that we can cancel the - // cleanup and for better interaction with LLVM's debug info. So, if - // the argument would be passed by value, we store it into an alloca. - // This alloca should be optimized away by LLVM's mem-to-reg pass in - // the event it's not truly needed. - let mut idx = fcx.arg_offset() as c_uint; - let uninit_reason = InitAlloca::Uninit("fn_arg populate dominates dtor"); - for (i, &arg_ty) in arg_tys.iter().enumerate() { - let arg_datum = if !has_tupled_arg || i < arg_tys.len() - 1 { - if type_of::arg_is_indirect(bcx.ccx(), arg_ty) && - bcx.sess().opts.debuginfo != FullDebugInfo { - // Don't copy an indirect argument to an alloca, the caller - // already put it in a temporary alloca and gave it up, unless - // we emit extra-debug-info, which requires local allocas :(. - let llarg = get_param(fcx.llfn, idx); - idx += 1; - bcx.fcx.schedule_lifetime_end(arg_scope_id, llarg); - bcx.fcx.schedule_drop_mem(arg_scope_id, llarg, arg_ty, None); - - datum::Datum::new(llarg, - arg_ty, - datum::Lvalue::new("create_datum_for_fn_args")) - } else if common::type_is_fat_ptr(bcx.tcx(), arg_ty) { - let data = get_param(fcx.llfn, idx); - let extra = get_param(fcx.llfn, idx + 1); - idx += 2; - unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, arg_ty, "", uninit_reason, - arg_scope_id, (data, extra), - |(data, extra), bcx, dst| { - debug!("populate call for create_datum_for_fn_args \ - early fat arg, on arg[{}] ty={:?}", i, arg_ty); - - Store(bcx, data, expr::get_dataptr(bcx, dst)); - Store(bcx, extra, expr::get_meta(bcx, dst)); - bcx - })) - } else { - let llarg = get_param(fcx.llfn, idx); - idx += 1; - let tmp = datum::Datum::new(llarg, arg_ty, arg_kind(fcx, arg_ty)); - unpack_datum!(bcx, - datum::lvalue_scratch_datum(bcx, - arg_ty, - "", - uninit_reason, - arg_scope_id, - tmp, - |tmp, bcx, dst| { - - debug!("populate call for create_datum_for_fn_args \ - early thin arg, on arg[{}] ty={:?}", i, arg_ty); - - tmp.store_to(bcx, dst) - })) - } - } else { - // FIXME(pcwalton): Reduce the amount of code bloat this is responsible for. - match arg_ty.sty { - ty::TyTuple(ref tupled_arg_tys) => { - unpack_datum!(bcx, - datum::lvalue_scratch_datum(bcx, - arg_ty, - "tupled_args", - uninit_reason, - arg_scope_id, - (), - |(), - mut bcx, - llval| { - debug!("populate call for create_datum_for_fn_args \ - tupled_args, on arg[{}] ty={:?}", i, arg_ty); - for (j, &tupled_arg_ty) in - tupled_arg_tys.iter().enumerate() { - let lldest = StructGEP(bcx, llval, j); - if common::type_is_fat_ptr(bcx.tcx(), tupled_arg_ty) { - let data = get_param(bcx.fcx.llfn, idx); - let extra = get_param(bcx.fcx.llfn, idx + 1); - Store(bcx, data, expr::get_dataptr(bcx, lldest)); - Store(bcx, extra, expr::get_meta(bcx, lldest)); - idx += 2; - } else { - let datum = datum::Datum::new( - get_param(bcx.fcx.llfn, idx), - tupled_arg_ty, - arg_kind(bcx.fcx, tupled_arg_ty)); - idx += 1; - bcx = datum.store_to(bcx, lldest); - }; - } - bcx - })) - } - _ => { - bcx.tcx() - .sess - .bug("last argument of a function with `rust-call` ABI isn't a tuple?!") - } - } - }; - - let pat = &*args[i].pat; - bcx = if let Some(name) = simple_name(pat) { - // Generate nicer LLVM for the common case of fn a pattern - // like `x: T` - set_value_name(arg_datum.val, &bcx.name(name)); - bcx.fcx.lllocals.borrow_mut().insert(pat.id, arg_datum); - bcx - } else { - // General path. Copy out the values that are used in the - // pattern. - _match::bind_irrefutable_pat(bcx, pat, arg_datum.match_input(), arg_scope_id) - }; - debuginfo::create_argument_metadata(bcx, &args[i]); - } - - bcx -} - -// Ties up the llstaticallocas -> llloadenv -> lltop edges, -// and builds the return block. -pub fn finish_fn<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>, - last_bcx: Block<'blk, 'tcx>, - retty: ty::FnOutput<'tcx>, - ret_debug_loc: DebugLoc) { - let _icx = push_ctxt("finish_fn"); - - let ret_cx = match fcx.llreturn.get() { - Some(llreturn) => { - if !last_bcx.terminated.get() { - Br(last_bcx, llreturn, DebugLoc::None); - } - raw_block(fcx, false, llreturn) - } - None => last_bcx, - }; - - // This shouldn't need to recompute the return type, - // as new_fn_ctxt did it already. - let substd_retty = fcx.monomorphize(&retty); - build_return_block(fcx, ret_cx, substd_retty, ret_debug_loc); - - debuginfo::clear_source_location(fcx); - fcx.cleanup(); -} - -// Builds the return block for a function. -pub fn build_return_block<'blk, 'tcx>(fcx: &FunctionContext<'blk, 'tcx>, - ret_cx: Block<'blk, 'tcx>, - retty: ty::FnOutput<'tcx>, - ret_debug_location: DebugLoc) { - if fcx.llretslotptr.get().is_none() || - (!fcx.needs_ret_allocas && fcx.caller_expects_out_pointer) { - return RetVoid(ret_cx, ret_debug_location); - } - - let retslot = if fcx.needs_ret_allocas { - Load(ret_cx, fcx.llretslotptr.get().unwrap()) - } else { - fcx.llretslotptr.get().unwrap() - }; - let retptr = Value(retslot); - match retptr.get_dominating_store(ret_cx) { - // If there's only a single store to the ret slot, we can directly return - // the value that was stored and omit the store and the alloca - Some(s) => { - let retval = s.get_operand(0).unwrap().get(); - s.erase_from_parent(); - - if retptr.has_no_uses() { - retptr.erase_from_parent(); - } - - let retval = if retty == ty::FnConverging(fcx.ccx.tcx().types.bool) { - Trunc(ret_cx, retval, Type::i1(fcx.ccx)) - } else { - retval - }; - - if fcx.caller_expects_out_pointer { - if let ty::FnConverging(retty) = retty { - store_ty(ret_cx, retval, get_param(fcx.llfn, 0), retty); - } - RetVoid(ret_cx, ret_debug_location) - } else { - Ret(ret_cx, retval, ret_debug_location) - } - } - // Otherwise, copy the return value to the ret slot - None => match retty { - ty::FnConverging(retty) => { - if fcx.caller_expects_out_pointer { - memcpy_ty(ret_cx, get_param(fcx.llfn, 0), retslot, retty); - RetVoid(ret_cx, ret_debug_location) - } else { - Ret(ret_cx, load_ty(ret_cx, retslot, retty), ret_debug_location) - } - } - ty::FnDiverging => { - if fcx.caller_expects_out_pointer { - RetVoid(ret_cx, ret_debug_location) - } else { - Ret(ret_cx, C_undef(Type::nil(fcx.ccx)), ret_debug_location) - } - } - }, - } -} - -/// Builds an LLVM function out of a source function. -/// -/// If the function closes over its environment a closure will be returned. -pub fn trans_closure<'a, 'b, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - decl: &hir::FnDecl, - body: &hir::Block, - llfndecl: ValueRef, - param_substs: &'tcx Substs<'tcx>, - fn_ast_id: ast::NodeId, - attributes: &[ast::Attribute], - output_type: ty::FnOutput<'tcx>, - abi: Abi, - closure_env: closure::ClosureEnv<'b>) { - ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1); - - let _icx = push_ctxt("trans_closure"); - attributes::emit_uwtable(llfndecl, true); - - debug!("trans_closure(..., param_substs={:?})", param_substs); - - let has_env = match closure_env { - closure::ClosureEnv::Closure(..) => true, - closure::ClosureEnv::NotClosure => false, - }; - - let (arena, fcx): (TypedArena<_>, FunctionContext); - arena = TypedArena::new(); - fcx = new_fn_ctxt(ccx, - llfndecl, - fn_ast_id, - has_env, - output_type, - param_substs, - Some(body.span), - &arena); - let mut bcx = init_function(&fcx, false, output_type); - - if attributes.iter().any(|item| item.check_name("rustc_mir")) { - mir::trans_mir(bcx); - fcx.cleanup(); - return; - } - - // cleanup scope for the incoming arguments - let fn_cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(ccx, - fn_ast_id, - body.span, - true); - let arg_scope = fcx.push_custom_cleanup_scope_with_debug_loc(fn_cleanup_debug_loc); - - let block_ty = node_id_type(bcx, body.id); - - // Set up arguments to the function. - let monomorphized_arg_types = decl.inputs - .iter() - .map(|arg| node_id_type(bcx, arg.id)) - .collect::>(); - for monomorphized_arg_type in &monomorphized_arg_types { - debug!("trans_closure: monomorphized_arg_type: {:?}", - monomorphized_arg_type); - } - debug!("trans_closure: function lltype: {}", - bcx.fcx.ccx.tn().val_to_string(bcx.fcx.llfn)); - - let has_tupled_arg = match closure_env { - closure::ClosureEnv::NotClosure => abi == RustCall, - _ => false, - }; - - bcx = create_datums_for_fn_args(bcx, - &decl.inputs, - &monomorphized_arg_types, - has_tupled_arg, - arg_scope); - - bcx = closure_env.load(bcx, cleanup::CustomScope(arg_scope)); - - // Up until here, IR instructions for this function have explicitly not been annotated with - // source code location, so we don't step into call setup code. From here on, source location - // emitting should be enabled. - debuginfo::start_emitting_source_locations(&fcx); - - let dest = match fcx.llretslotptr.get() { - Some(_) => expr::SaveIn(fcx.get_ret_slot(bcx, ty::FnConverging(block_ty), "iret_slot")), - None => { - assert!(type_is_zero_size(bcx.ccx(), block_ty)); - expr::Ignore - } - }; - - // This call to trans_block is the place where we bridge between - // translation calls that don't have a return value (trans_crate, - // trans_mod, trans_item, et cetera) and those that do - // (trans_block, trans_expr, et cetera). - bcx = controlflow::trans_block(bcx, body, dest); - - match dest { - expr::SaveIn(slot) if fcx.needs_ret_allocas => { - Store(bcx, slot, fcx.llretslotptr.get().unwrap()); - } - _ => {} - } - - match fcx.llreturn.get() { - Some(_) => { - Br(bcx, fcx.return_exit_block(), DebugLoc::None); - fcx.pop_custom_cleanup_scope(arg_scope); - } - None => { - // Microoptimization writ large: avoid creating a separate - // llreturn basic block - bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_scope); - } - }; - - // Put return block after all other blocks. - // This somewhat improves single-stepping experience in debugger. - unsafe { - let llreturn = fcx.llreturn.get(); - if let Some(llreturn) = llreturn { - llvm::LLVMMoveBasicBlockAfter(llreturn, bcx.llbb); - } - } - - let ret_debug_loc = DebugLoc::At(fn_cleanup_debug_loc.id, fn_cleanup_debug_loc.span); - - // Insert the mandatory first few basic blocks before lltop. - finish_fn(&fcx, bcx, output_type, ret_debug_loc); -} - -/// Creates an LLVM function corresponding to a source language function. -pub fn trans_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - decl: &hir::FnDecl, - body: &hir::Block, - llfndecl: ValueRef, - param_substs: &'tcx Substs<'tcx>, - id: ast::NodeId, - attrs: &[ast::Attribute]) { - let _s = StatRecorder::new(ccx, ccx.tcx().map.path_to_string(id).to_string()); - debug!("trans_fn(param_substs={:?})", param_substs); - let _icx = push_ctxt("trans_fn"); - let fn_ty = ccx.tcx().node_id_to_type(id); - let fn_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &fn_ty); - let sig = fn_ty.fn_sig(); - let sig = ccx.tcx().erase_late_bound_regions(&sig); - let sig = infer::normalize_associated_type(ccx.tcx(), &sig); - let output_type = sig.output; - let abi = fn_ty.fn_abi(); - trans_closure(ccx, - decl, - body, - llfndecl, - param_substs, - id, - attrs, - output_type, - abi, - closure::ClosureEnv::NotClosure); -} - -pub fn trans_enum_variant<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ctor_id: ast::NodeId, - disr: Disr, - param_substs: &'tcx Substs<'tcx>, - llfndecl: ValueRef) { - let _icx = push_ctxt("trans_enum_variant"); - - trans_enum_variant_or_tuple_like_struct(ccx, ctor_id, disr, param_substs, llfndecl); -} - -pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - ctor_ty: Ty<'tcx>, - disr: Disr, - args: callee::CallArgs, - dest: expr::Dest, - debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { - - let ccx = bcx.fcx.ccx; - - let sig = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_sig()); - let sig = infer::normalize_associated_type(ccx.tcx(), &sig); - let result_ty = sig.output.unwrap(); - - // Get location to store the result. If the user does not care about - // the result, just make a stack slot - let llresult = match dest { - expr::SaveIn(d) => d, - expr::Ignore => { - if !type_is_zero_size(ccx, result_ty) { - let llresult = alloc_ty(bcx, result_ty, "constructor_result"); - call_lifetime_start(bcx, llresult); - llresult - } else { - C_undef(type_of::type_of(ccx, result_ty).ptr_to()) - } - } - }; - - if !type_is_zero_size(ccx, result_ty) { - match args { - callee::ArgExprs(exprs) => { - let fields = exprs.iter().map(|x| &**x).enumerate().collect::>(); - bcx = expr::trans_adt(bcx, - result_ty, - disr, - &fields[..], - None, - expr::SaveIn(llresult), - debug_loc); - } - _ => ccx.sess().bug("expected expr as arguments for variant/struct tuple constructor"), - } - } else { - // Just eval all the expressions (if any). Since expressions in Rust can have arbitrary - // contents, there could be side-effects we need from them. - match args { - callee::ArgExprs(exprs) => { - for expr in exprs { - bcx = expr::trans_into(bcx, expr, expr::Ignore); - } - } - _ => (), - } - } - - // If the caller doesn't care about the result - // drop the temporary we made - let bcx = match dest { - expr::SaveIn(_) => bcx, - expr::Ignore => { - let bcx = glue::drop_ty(bcx, llresult, result_ty, debug_loc); - if !type_is_zero_size(ccx, result_ty) { - call_lifetime_end(bcx, llresult); - } - bcx - } - }; - - Result::new(bcx, llresult) -} - -pub fn trans_tuple_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ctor_id: ast::NodeId, - param_substs: &'tcx Substs<'tcx>, - llfndecl: ValueRef) { - let _icx = push_ctxt("trans_tuple_struct"); - - trans_enum_variant_or_tuple_like_struct(ccx, ctor_id, Disr(0), param_substs, llfndecl); -} - -fn trans_enum_variant_or_tuple_like_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ctor_id: ast::NodeId, - disr: Disr, - param_substs: &'tcx Substs<'tcx>, - llfndecl: ValueRef) { - let ctor_ty = ccx.tcx().node_id_to_type(ctor_id); - let ctor_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &ctor_ty); - - let sig = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_sig()); - let sig = infer::normalize_associated_type(ccx.tcx(), &sig); - let arg_tys = sig.inputs; - let result_ty = sig.output; - - let (arena, fcx): (TypedArena<_>, FunctionContext); - arena = TypedArena::new(); - fcx = new_fn_ctxt(ccx, - llfndecl, - ctor_id, - false, - result_ty, - param_substs, - None, - &arena); - let bcx = init_function(&fcx, false, result_ty); - - assert!(!fcx.needs_ret_allocas); - - if !type_is_zero_size(fcx.ccx, result_ty.unwrap()) { - let dest = fcx.get_ret_slot(bcx, result_ty, "eret_slot"); - let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value - let repr = adt::represent_type(ccx, result_ty.unwrap()); - let mut llarg_idx = fcx.arg_offset() as c_uint; - for (i, arg_ty) in arg_tys.into_iter().enumerate() { - let lldestptr = adt::trans_field_ptr(bcx, &*repr, dest_val, Disr::from(disr), i); - if common::type_is_fat_ptr(bcx.tcx(), arg_ty) { - Store(bcx, - get_param(fcx.llfn, llarg_idx), - expr::get_dataptr(bcx, lldestptr)); - Store(bcx, - get_param(fcx.llfn, llarg_idx + 1), - expr::get_meta(bcx, lldestptr)); - llarg_idx += 2; - } else { - let arg = get_param(fcx.llfn, llarg_idx); - llarg_idx += 1; - - if arg_is_indirect(ccx, arg_ty) { - memcpy_ty(bcx, lldestptr, arg, arg_ty); - } else { - store_ty(bcx, arg, lldestptr, arg_ty); - } - } - } - adt::trans_set_discr(bcx, &*repr, dest, disr); - } - - finish_fn(&fcx, bcx, result_ty, DebugLoc::None); -} - -fn enum_variant_size_lint(ccx: &CrateContext, enum_def: &hir::EnumDef, sp: Span, id: ast::NodeId) { - let mut sizes = Vec::new(); // does no allocation if no pushes, thankfully - - let print_info = ccx.sess().print_enum_sizes(); - - let levels = ccx.tcx().node_lint_levels.borrow(); - let lint_id = lint::LintId::of(lint::builtin::VARIANT_SIZE_DIFFERENCES); - let lvlsrc = levels.get(&(id, lint_id)); - let is_allow = lvlsrc.map_or(true, |&(lvl, _)| lvl == lint::Allow); - - if is_allow && !print_info { - // we're not interested in anything here - return; - } - - let ty = ccx.tcx().node_id_to_type(id); - let avar = adt::represent_type(ccx, ty); - match *avar { - adt::General(_, ref variants, _) => { - for var in variants { - let mut size = 0; - for field in var.fields.iter().skip(1) { - // skip the discriminant - size += llsize_of_real(ccx, sizing_type_of(ccx, *field)); - } - sizes.push(size); - } - }, - _ => { /* its size is either constant or unimportant */ } - } - - let (largest, slargest, largest_index) = sizes.iter().enumerate().fold((0, 0, 0), - |(l, s, li), (idx, &size)| - if size > l { - (size, l, idx) - } else if size > s { - (l, size, li) - } else { - (l, s, li) - } - ); - - // FIXME(#30505) Should use logging for this. - if print_info { - let llty = type_of::sizing_type_of(ccx, ty); - - let sess = &ccx.tcx().sess; - sess.span_note_without_error(sp, - &*format!("total size: {} bytes", llsize_of_real(ccx, llty))); - match *avar { - adt::General(..) => { - for (i, var) in enum_def.variants.iter().enumerate() { - ccx.tcx() - .sess - .span_note_without_error(var.span, - &*format!("variant data: {} bytes", sizes[i])); - } - } - _ => {} - } - } - - // we only warn if the largest variant is at least thrice as large as - // the second-largest. - if !is_allow && largest > slargest * 3 && slargest > 0 { - // Use lint::raw_emit_lint rather than sess.add_lint because the lint-printing - // pass for the latter already ran. - lint::raw_struct_lint(&ccx.tcx().sess, - &ccx.tcx().sess.lint_store.borrow(), - lint::builtin::VARIANT_SIZE_DIFFERENCES, - *lvlsrc.unwrap(), - Some(sp), - &format!("enum variant is more than three times larger ({} bytes) \ - than the next largest (ignoring padding)", - largest)) - .span_note(enum_def.variants[largest_index].span, - "this variant is the largest") - .emit(); - } -} - -pub fn llvm_linkage_by_name(name: &str) -> Option { - // Use the names from src/llvm/docs/LangRef.rst here. Most types are only - // applicable to variable declarations and may not really make sense for - // Rust code in the first place but whitelist them anyway and trust that - // the user knows what s/he's doing. Who knows, unanticipated use cases - // may pop up in the future. - // - // ghost, dllimport, dllexport and linkonce_odr_autohide are not supported - // and don't have to be, LLVM treats them as no-ops. - match name { - "appending" => Some(llvm::AppendingLinkage), - "available_externally" => Some(llvm::AvailableExternallyLinkage), - "common" => Some(llvm::CommonLinkage), - "extern_weak" => Some(llvm::ExternalWeakLinkage), - "external" => Some(llvm::ExternalLinkage), - "internal" => Some(llvm::InternalLinkage), - "linkonce" => Some(llvm::LinkOnceAnyLinkage), - "linkonce_odr" => Some(llvm::LinkOnceODRLinkage), - "private" => Some(llvm::PrivateLinkage), - "weak" => Some(llvm::WeakAnyLinkage), - "weak_odr" => Some(llvm::WeakODRLinkage), - _ => None, - } -} - - -/// Enum describing the origin of an LLVM `Value`, for linkage purposes. -#[derive(Copy, Clone)] -pub enum ValueOrigin { - /// The LLVM `Value` is in this context because the corresponding item was - /// assigned to the current compilation unit. - OriginalTranslation, - /// The `Value`'s corresponding item was assigned to some other compilation - /// unit, but the `Value` was translated in this context anyway because the - /// item is marked `#[inline]`. - InlinedCopy, -} - -/// Set the appropriate linkage for an LLVM `ValueRef` (function or global). -/// If the `llval` is the direct translation of a specific Rust item, `id` -/// should be set to the `NodeId` of that item. (This mapping should be -/// 1-to-1, so monomorphizations and drop/visit glue should have `id` set to -/// `None`.) `llval_origin` indicates whether `llval` is the translation of an -/// item assigned to `ccx`'s compilation unit or an inlined copy of an item -/// assigned to a different compilation unit. -pub fn update_linkage(ccx: &CrateContext, - llval: ValueRef, - id: Option, - llval_origin: ValueOrigin) { - match llval_origin { - InlinedCopy => { - // `llval` is a translation of an item defined in a separate - // compilation unit. This only makes sense if there are at least - // two compilation units. - assert!(ccx.sess().opts.cg.codegen_units > 1); - // `llval` is a copy of something defined elsewhere, so use - // `AvailableExternallyLinkage` to avoid duplicating code in the - // output. - llvm::SetLinkage(llval, llvm::AvailableExternallyLinkage); - return; - }, - OriginalTranslation => {}, - } - - if let Some(id) = id { - let item = ccx.tcx().map.get(id); - if let hir_map::NodeItem(i) = item { - if let Some(name) = attr::first_attr_value_str_by_name(&i.attrs, "linkage") { - if let Some(linkage) = llvm_linkage_by_name(&name) { - llvm::SetLinkage(llval, linkage); - } else { - ccx.sess().span_fatal(i.span, "invalid linkage specified"); - } - return; - } - } - } - - match id { - Some(id) if ccx.reachable().contains(&id) => { - llvm::SetLinkage(llval, llvm::ExternalLinkage); - }, - _ => { - // `id` does not refer to an item in `ccx.reachable`. - if ccx.sess().opts.cg.codegen_units > 1 { - llvm::SetLinkage(llval, llvm::ExternalLinkage); - } else { - llvm::SetLinkage(llval, llvm::InternalLinkage); - } - }, - } -} - -fn set_global_section(ccx: &CrateContext, llval: ValueRef, i: &hir::Item) { - match attr::first_attr_value_str_by_name(&i.attrs, "link_section") { - Some(sect) => { - if contains_null(§) { - ccx.sess().fatal(&format!("Illegal null byte in link_section value: `{}`", §)); - } - unsafe { - let buf = CString::new(sect.as_bytes()).unwrap(); - llvm::LLVMSetSection(llval, buf.as_ptr()); - } - }, - None => () - } -} - -pub fn trans_item(ccx: &CrateContext, item: &hir::Item) { - let _icx = push_ctxt("trans_item"); - - let from_external = ccx.external_srcs().borrow().contains_key(&item.id); - - match item.node { - hir::ItemFn(ref decl, _, _, abi, ref generics, ref body) => { - if !generics.is_type_parameterized() { - let trans_everywhere = attr::requests_inline(&item.attrs); - // Ignore `trans_everywhere` for cross-crate inlined items - // (`from_external`). `trans_item` will be called once for each - // compilation unit that references the item, so it will still get - // translated everywhere it's needed. - for (ref ccx, is_origin) in ccx.maybe_iter(!from_external && trans_everywhere) { - let llfn = get_item_val(ccx, item.id); - let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty()); - if abi != Rust { - foreign::trans_rust_fn_with_foreign_abi(ccx, - &**decl, - &**body, - &item.attrs, - llfn, - empty_substs, - item.id, - None); - } else { - trans_fn(ccx, - &**decl, - &**body, - llfn, - empty_substs, - item.id, - &item.attrs); - } - set_global_section(ccx, llfn, item); - update_linkage(ccx, - llfn, - Some(item.id), - if is_origin { - OriginalTranslation - } else { - InlinedCopy - }); - - if is_entry_fn(ccx.sess(), item.id) { - create_entry_wrapper(ccx, item.span, llfn); - // check for the #[rustc_error] annotation, which forces an - // error in trans. This is used to write compile-fail tests - // that actually test that compilation succeeds without - // reporting an error. - let item_def_id = ccx.tcx().map.local_def_id(item.id); - if ccx.tcx().has_attr(item_def_id, "rustc_error") { - ccx.tcx().sess.span_fatal(item.span, "compilation successful"); - } - } - } - } - } - hir::ItemImpl(_, _, ref generics, _, _, ref impl_items) => { - meth::trans_impl(ccx, item.name, impl_items, generics, item.id); - } - hir::ItemMod(_) => { - // modules have no equivalent at runtime, they just affect - // the mangled names of things contained within - } - hir::ItemEnum(ref enum_definition, ref gens) => { - if gens.ty_params.is_empty() { - // sizes only make sense for non-generic types - - enum_variant_size_lint(ccx, enum_definition, item.span, item.id); - } - } - hir::ItemConst(..) => {} - hir::ItemStatic(_, m, ref expr) => { - let g = match consts::trans_static(ccx, m, expr, item.id, &item.attrs) { - Ok(g) => g, - Err(err) => ccx.tcx().sess.span_fatal(expr.span, &err.description()), - }; - set_global_section(ccx, g, item); - update_linkage(ccx, g, Some(item.id), OriginalTranslation); - } - hir::ItemForeignMod(ref foreign_mod) => { - foreign::trans_foreign_mod(ccx, foreign_mod); - } - hir::ItemTrait(..) => {} - _ => { - // fall through - } - } -} - -// only use this for foreign function ABIs and glue, use `register_fn` for Rust functions -pub fn register_fn_llvmty(ccx: &CrateContext, - sp: Span, - sym: String, - node_id: ast::NodeId, - cc: llvm::CallConv, - llfty: Type) - -> ValueRef { - debug!("register_fn_llvmty id={} sym={}", node_id, sym); - - let llfn = declare::define_fn(ccx, &sym[..], cc, llfty, - ty::FnConverging(ccx.tcx().mk_nil())).unwrap_or_else(||{ - ccx.sess().span_fatal(sp, &format!("symbol `{}` is already defined", sym)); - }); - finish_register_fn(ccx, sym, node_id); - llfn -} - -fn finish_register_fn(ccx: &CrateContext, sym: String, node_id: ast::NodeId) { - ccx.item_symbols().borrow_mut().insert(node_id, sym); -} - -fn register_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - sp: Span, - sym: String, - node_id: ast::NodeId, - node_type: Ty<'tcx>) - -> ValueRef { - if let ty::TyBareFn(_, ref f) = node_type.sty { - if f.abi != Rust && f.abi != RustCall { - ccx.sess().span_bug(sp, - &format!("only the `{}` or `{}` calling conventions are valid \ - for this function; `{}` was specified", - Rust.name(), - RustCall.name(), - f.abi.name())); - } - } else { - ccx.sess().span_bug(sp, "expected bare rust function") - } - - let llfn = declare::define_rust_fn(ccx, &sym[..], node_type).unwrap_or_else(|| { - ccx.sess().span_fatal(sp, &format!("symbol `{}` is already defined", sym)); - }); - finish_register_fn(ccx, sym, node_id); - llfn -} - -pub fn is_entry_fn(sess: &Session, node_id: ast::NodeId) -> bool { - match *sess.entry_fn.borrow() { - Some((entry_id, _)) => node_id == entry_id, - None => false, - } -} - -/// Create the `main` function which will initialise the rust runtime and call users’ main -/// function. -pub fn create_entry_wrapper(ccx: &CrateContext, sp: Span, main_llfn: ValueRef) { - let et = ccx.sess().entry_type.get().unwrap(); - match et { - config::EntryMain => { - create_entry_fn(ccx, sp, main_llfn, true); - } - config::EntryStart => create_entry_fn(ccx, sp, main_llfn, false), - config::EntryNone => {} // Do nothing. - } - - fn create_entry_fn(ccx: &CrateContext, - sp: Span, - rust_main: ValueRef, - use_start_lang_item: bool) { - let llfty = Type::func(&[ccx.int_type(), Type::i8p(ccx).ptr_to()], &ccx.int_type()); - - let llfn = declare::define_cfn(ccx, "main", llfty, ccx.tcx().mk_nil()).unwrap_or_else(|| { - // FIXME: We should be smart and show a better diagnostic here. - ccx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times") - .help("did you use #[no_mangle] on `fn main`? Use #[start] instead") - .emit(); - ccx.sess().abort_if_errors(); - panic!(); - }); - - let llbb = unsafe { - llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, "top\0".as_ptr() as *const _) - }; - let bld = ccx.raw_builder(); - unsafe { - llvm::LLVMPositionBuilderAtEnd(bld, llbb); - - debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx); - - let (start_fn, args) = if use_start_lang_item { - let start_def_id = match ccx.tcx().lang_items.require(StartFnLangItem) { - Ok(id) => id, - Err(s) => { - ccx.sess().fatal(&s[..]); - } - }; - let start_fn = if let Some(start_node_id) = ccx.tcx() - .map - .as_local_node_id(start_def_id) { - get_item_val(ccx, start_node_id) - } else { - let start_fn_type = ccx.tcx().lookup_item_type(start_def_id).ty; - trans_external_path(ccx, start_def_id, start_fn_type) - }; - let args = { - let opaque_rust_main = - llvm::LLVMBuildPointerCast(bld, - rust_main, - Type::i8p(ccx).to_ref(), - "rust_main\0".as_ptr() as *const _); - - vec![opaque_rust_main, get_param(llfn, 0), get_param(llfn, 1)] - }; - (start_fn, args) - } else { - debug!("using user-defined start fn"); - let args = vec![get_param(llfn, 0 as c_uint), get_param(llfn, 1 as c_uint)]; - - (rust_main, args) - }; - - let result = llvm::LLVMBuildCall(bld, - start_fn, - args.as_ptr(), - args.len() as c_uint, - noname()); - - llvm::LLVMBuildRet(bld, result); - } - } -} - -fn exported_name<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - id: ast::NodeId, - ty: Ty<'tcx>, - attrs: &[ast::Attribute]) - -> String { - match ccx.external_srcs().borrow().get(&id) { - Some(&did) => { - let sym = ccx.sess().cstore.item_symbol(did); - debug!("found item {} in other crate...", sym); - return sym; - } - None => {} - } - - match attr::find_export_name_attr(ccx.sess().diagnostic(), attrs) { - // Use provided name - Some(name) => name.to_string(), - _ => { - let path = ccx.tcx().map.def_path_from_id(id); - if attr::contains_name(attrs, "no_mangle") { - // Don't mangle - path.last().unwrap().data.to_string() - } else { - match weak_lang_items::link_name(attrs) { - Some(name) => name.to_string(), - None => { - // Usual name mangling - mangle_exported_name(ccx, path, ty, id) - } - } - } - } - } -} - -fn contains_null(s: &str) -> bool { - s.bytes().any(|b| b == 0) -} - -pub fn get_item_val(ccx: &CrateContext, id: ast::NodeId) -> ValueRef { - debug!("get_item_val(id=`{}`)", id); - - match ccx.item_vals().borrow().get(&id).cloned() { - Some(v) => return v, - None => {} - } - - let item = ccx.tcx().map.get(id); - debug!("get_item_val: id={} item={:?}", id, item); - let val = match item { - hir_map::NodeItem(i) => { - let ty = ccx.tcx().node_id_to_type(i.id); - let sym = || exported_name(ccx, id, ty, &i.attrs); - - let v = match i.node { - hir::ItemStatic(..) => { - // If this static came from an external crate, then - // we need to get the symbol from metadata instead of - // using the current crate's name/version - // information in the hash of the symbol - let sym = sym(); - debug!("making {}", sym); - - // Create the global before evaluating the initializer; - // this is necessary to allow recursive statics. - let llty = type_of(ccx, ty); - let g = declare::define_global(ccx, &sym[..], llty).unwrap_or_else(|| { - ccx.sess() - .span_fatal(i.span, &format!("symbol `{}` is already defined", sym)) - }); - - ccx.item_symbols().borrow_mut().insert(i.id, sym); - g - } - - hir::ItemFn(_, _, _, abi, _, _) => { - let sym = sym(); - let llfn = if abi == Rust { - register_fn(ccx, i.span, sym, i.id, ty) - } else { - foreign::register_rust_fn_with_foreign_abi(ccx, i.span, sym, i.id) - }; - attributes::from_fn_attrs(ccx, &i.attrs, llfn); - llfn - } - - _ => ccx.sess().bug("get_item_val: weird result in table"), - }; - - v - } - - hir_map::NodeTraitItem(trait_item) => { - debug!("get_item_val(): processing a NodeTraitItem"); - match trait_item.node { - hir::MethodTraitItem(_, Some(_)) => { - register_method(ccx, id, &trait_item.attrs, trait_item.span) - } - _ => { - ccx.sess().span_bug(trait_item.span, - "unexpected variant: trait item other than a provided \ - method in get_item_val()"); - } - } - } - - hir_map::NodeImplItem(impl_item) => { - match impl_item.node { - hir::ImplItemKind::Method(..) => { - register_method(ccx, id, &impl_item.attrs, impl_item.span) - } - _ => { - ccx.sess().span_bug(impl_item.span, - "unexpected variant: non-method impl item in \ - get_item_val()"); - } - } - } - - hir_map::NodeForeignItem(ni) => { - match ni.node { - hir::ForeignItemFn(..) => { - let abi = ccx.tcx().map.get_foreign_abi(id); - let ty = ccx.tcx().node_id_to_type(ni.id); - let name = foreign::link_name(&*ni); - foreign::register_foreign_item_fn(ccx, abi, ty, &name, &ni.attrs) - } - hir::ForeignItemStatic(..) => { - foreign::register_static(ccx, &*ni) - } - } - } - - hir_map::NodeVariant(ref v) => { - let llfn; - let fields = if v.node.data.is_struct() { - ccx.sess().bug("struct variant kind unexpected in get_item_val") - } else { - v.node.data.fields() - }; - assert!(!fields.is_empty()); - let ty = ccx.tcx().node_id_to_type(id); - let parent = ccx.tcx().map.get_parent(id); - let enm = ccx.tcx().map.expect_item(parent); - let sym = exported_name(ccx, id, ty, &enm.attrs); - - llfn = match enm.node { - hir::ItemEnum(_, _) => { - register_fn(ccx, (*v).span, sym, id, ty) - } - _ => ccx.sess().bug("NodeVariant, shouldn't happen"), - }; - attributes::inline(llfn, attributes::InlineAttr::Hint); - llfn - } - - hir_map::NodeStructCtor(struct_def) => { - // Only register the constructor if this is a tuple-like struct. - let ctor_id = if struct_def.is_struct() { - ccx.sess().bug("attempt to register a constructor of a non-tuple-like struct") - } else { - struct_def.id() - }; - let parent = ccx.tcx().map.get_parent(id); - let struct_item = ccx.tcx().map.expect_item(parent); - let ty = ccx.tcx().node_id_to_type(ctor_id); - let sym = exported_name(ccx, id, ty, &struct_item.attrs); - let llfn = register_fn(ccx, struct_item.span, sym, ctor_id, ty); - attributes::inline(llfn, attributes::InlineAttr::Hint); - llfn - } - - ref variant => { - ccx.sess().bug(&format!("get_item_val(): unexpected variant: {:?}", variant)) - } - }; - - // All LLVM globals and functions are initially created as external-linkage - // declarations. If `trans_item`/`trans_fn` later turns the declaration - // into a definition, it adjusts the linkage then (using `update_linkage`). - // - // The exception is foreign items, which have their linkage set inside the - // call to `foreign::register_*` above. We don't touch the linkage after - // that (`foreign::trans_foreign_mod` doesn't adjust the linkage like the - // other item translation functions do). - - ccx.item_vals().borrow_mut().insert(id, val); - val -} - -fn register_method(ccx: &CrateContext, - id: ast::NodeId, - attrs: &[ast::Attribute], - span: Span) - -> ValueRef { - let mty = ccx.tcx().node_id_to_type(id); - - let sym = exported_name(ccx, id, mty, &attrs); - - if let ty::TyBareFn(_, ref f) = mty.sty { - let llfn = if f.abi == Rust || f.abi == RustCall { - register_fn(ccx, span, sym, id, mty) - } else { - foreign::register_rust_fn_with_foreign_abi(ccx, span, sym, id) - }; - attributes::from_fn_attrs(ccx, &attrs, llfn); - return llfn; - } else { - ccx.sess().span_bug(span, "expected bare rust function"); - } -} - -pub fn write_metadata<'a, 'tcx>(cx: &SharedCrateContext<'a, 'tcx>, - krate: &hir::Crate, - reachable: &NodeSet, - mir_map: &MirMap<'tcx>) - -> Vec { - use flate; - - let any_library = cx.sess() - .crate_types - .borrow() - .iter() - .any(|ty| *ty != config::CrateTypeExecutable); - if !any_library { - return Vec::new(); - } - - let cstore = &cx.tcx().sess.cstore; - let metadata = cstore.encode_metadata(cx.tcx(), - cx.export_map(), - cx.item_symbols(), - cx.link_meta(), - reachable, - mir_map, - krate); - let mut compressed = cstore.metadata_encoding_version().to_vec(); - compressed.extend_from_slice(&flate::deflate_bytes(&metadata)); - - let llmeta = C_bytes_in_context(cx.metadata_llcx(), &compressed[..]); - let llconst = C_struct_in_context(cx.metadata_llcx(), &[llmeta], false); - let name = format!("rust_metadata_{}_{}", - cx.link_meta().crate_name, - cx.link_meta().crate_hash); - let buf = CString::new(name).unwrap(); - let llglobal = unsafe { - llvm::LLVMAddGlobal(cx.metadata_llmod(), val_ty(llconst).to_ref(), buf.as_ptr()) - }; - unsafe { - llvm::LLVMSetInitializer(llglobal, llconst); - let name = - cx.tcx().sess.cstore.metadata_section_name(&cx.sess().target.target); - let name = CString::new(name).unwrap(); - llvm::LLVMSetSection(llglobal, name.as_ptr()) - } - return metadata; -} - -/// Find any symbols that are defined in one compilation unit, but not declared -/// in any other compilation unit. Give these symbols internal linkage. -fn internalize_symbols(cx: &SharedCrateContext, reachable: &HashSet<&str>) { - unsafe { - let mut declared = HashSet::new(); - - // Collect all external declarations in all compilation units. - for ccx in cx.iter() { - for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) { - let linkage = llvm::LLVMGetLinkage(val); - // We only care about external declarations (not definitions) - // and available_externally definitions. - if !(linkage == llvm::ExternalLinkage as c_uint && - llvm::LLVMIsDeclaration(val) != 0) && - !(linkage == llvm::AvailableExternallyLinkage as c_uint) { - continue; - } - - let name = CStr::from_ptr(llvm::LLVMGetValueName(val)) - .to_bytes() - .to_vec(); - declared.insert(name); - } - } - - // Examine each external definition. If the definition is not used in - // any other compilation unit, and is not reachable from other crates, - // then give it internal linkage. - for ccx in cx.iter() { - for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) { - // We only care about external definitions. - if !(llvm::LLVMGetLinkage(val) == llvm::ExternalLinkage as c_uint && - llvm::LLVMIsDeclaration(val) == 0) { - continue; - } - - let name = CStr::from_ptr(llvm::LLVMGetValueName(val)) - .to_bytes() - .to_vec(); - if !declared.contains(&name) && - !reachable.contains(str::from_utf8(&name).unwrap()) { - llvm::SetLinkage(val, llvm::InternalLinkage); - llvm::SetDLLStorageClass(val, llvm::DefaultStorageClass); - } - } - } - } -} - -// Create a `__imp_ = &symbol` global for every public static `symbol`. -// This is required to satisfy `dllimport` references to static data in .rlibs -// when using MSVC linker. We do this only for data, as linker can fix up -// code references on its own. -// See #26591, #27438 -fn create_imps(cx: &SharedCrateContext) { - // The x86 ABI seems to require that leading underscores are added to symbol - // names, so we need an extra underscore on 32-bit. There's also a leading - // '\x01' here which disables LLVM's symbol mangling (e.g. no extra - // underscores added in front). - let prefix = if cx.sess().target.target.target_pointer_width == "32" { - "\x01__imp__" - } else { - "\x01__imp_" - }; - unsafe { - for ccx in cx.iter() { - let exported: Vec<_> = iter_globals(ccx.llmod()) - .filter(|&val| { - llvm::LLVMGetLinkage(val) == - llvm::ExternalLinkage as c_uint && - llvm::LLVMIsDeclaration(val) == 0 - }) - .collect(); - - let i8p_ty = Type::i8p(&ccx); - for val in exported { - let name = CStr::from_ptr(llvm::LLVMGetValueName(val)); - let mut imp_name = prefix.as_bytes().to_vec(); - imp_name.extend(name.to_bytes()); - let imp_name = CString::new(imp_name).unwrap(); - let imp = llvm::LLVMAddGlobal(ccx.llmod(), - i8p_ty.to_ref(), - imp_name.as_ptr() as *const _); - let init = llvm::LLVMConstBitCast(val, i8p_ty.to_ref()); - llvm::LLVMSetInitializer(imp, init); - llvm::SetLinkage(imp, llvm::ExternalLinkage); - } - } - } -} - -struct ValueIter { - cur: ValueRef, - step: unsafe extern "C" fn(ValueRef) -> ValueRef, -} - -impl Iterator for ValueIter { - type Item = ValueRef; - - fn next(&mut self) -> Option { - let old = self.cur; - if !old.is_null() { - self.cur = unsafe { (self.step)(old) }; - Some(old) - } else { - None - } - } -} - -fn iter_globals(llmod: llvm::ModuleRef) -> ValueIter { - unsafe { - ValueIter { - cur: llvm::LLVMGetFirstGlobal(llmod), - step: llvm::LLVMGetNextGlobal, - } - } -} - -fn iter_functions(llmod: llvm::ModuleRef) -> ValueIter { - unsafe { - ValueIter { - cur: llvm::LLVMGetFirstFunction(llmod), - step: llvm::LLVMGetNextFunction, - } - } -} - -/// The context provided lists a set of reachable ids as calculated by -/// middle::reachable, but this contains far more ids and symbols than we're -/// actually exposing from the object file. This function will filter the set in -/// the context to the set of ids which correspond to symbols that are exposed -/// from the object file being generated. -/// -/// This list is later used by linkers to determine the set of symbols needed to -/// be exposed from a dynamic library and it's also encoded into the metadata. -pub fn filter_reachable_ids(ccx: &SharedCrateContext) -> NodeSet { - ccx.reachable().iter().map(|x| *x).filter(|id| { - // First, only worry about nodes which have a symbol name - ccx.item_symbols().borrow().contains_key(id) - }).filter(|&id| { - // Next, we want to ignore some FFI functions that are not exposed from - // this crate. Reachable FFI functions can be lumped into two - // categories: - // - // 1. Those that are included statically via a static library - // 2. Those included otherwise (e.g. dynamically or via a framework) - // - // Although our LLVM module is not literally emitting code for the - // statically included symbols, it's an export of our library which - // needs to be passed on to the linker and encoded in the metadata. - // - // As a result, if this id is an FFI item (foreign item) then we only - // let it through if it's included statically. - match ccx.tcx().map.get(id) { - hir_map::NodeForeignItem(..) => { - ccx.sess().cstore.is_statically_included_foreign_item(id) - } - _ => true, - } - }).collect() -} - -pub fn trans_crate<'tcx>(tcx: &ty::ctxt<'tcx>, - mir_map: &MirMap<'tcx>, - analysis: ty::CrateAnalysis) - -> CrateTranslation { - let _task = tcx.dep_graph.in_task(DepNode::TransCrate); - - // Be careful with this krate: obviously it gives access to the - // entire contents of the krate. So if you push any subtasks of - // `TransCrate`, you need to be careful to register "reads" of the - // particular items that will be processed. - let krate = tcx.map.krate(); - - let ty::CrateAnalysis { export_map, reachable, name, .. } = analysis; - - let check_overflow = if let Some(v) = tcx.sess.opts.debugging_opts.force_overflow_checks { - v - } else { - tcx.sess.opts.debug_assertions - }; - - let check_dropflag = if let Some(v) = tcx.sess.opts.debugging_opts.force_dropflag_checks { - v - } else { - tcx.sess.opts.debug_assertions - }; - - // Before we touch LLVM, make sure that multithreading is enabled. - unsafe { - use std::sync::Once; - static INIT: Once = Once::new(); - static mut POISONED: bool = false; - INIT.call_once(|| { - if llvm::LLVMStartMultithreaded() != 1 { - // use an extra bool to make sure that all future usage of LLVM - // cannot proceed despite the Once not running more than once. - POISONED = true; - } - - ::back::write::configure_llvm(&tcx.sess); - }); - - if POISONED { - tcx.sess.bug("couldn't enable multi-threaded LLVM"); - } - } - - let link_meta = link::build_link_meta(&tcx.sess, krate, name); - - let codegen_units = tcx.sess.opts.cg.codegen_units; - let shared_ccx = SharedCrateContext::new(&link_meta.crate_name, - codegen_units, - tcx, - &mir_map, - export_map, - Sha256::new(), - link_meta.clone(), - reachable, - check_overflow, - check_dropflag); - - { - let ccx = shared_ccx.get_ccx(0); - - // First, verify intrinsics. - intrinsic::check_intrinsics(&ccx); - - // Next, translate all items. See `TransModVisitor` for - // details on why we walk in this particular way. - { - let _icx = push_ctxt("text"); - intravisit::walk_mod(&mut TransItemsWithinModVisitor { ccx: &ccx }, &krate.module); - krate.visit_all_items(&mut TransModVisitor { ccx: &ccx }); - } - } - - for ccx in shared_ccx.iter() { - if ccx.sess().opts.debuginfo != NoDebugInfo { - debuginfo::finalize(&ccx); - } - for &(old_g, new_g) in ccx.statics_to_rauw().borrow().iter() { - unsafe { - let bitcast = llvm::LLVMConstPointerCast(new_g, llvm::LLVMTypeOf(old_g)); - llvm::LLVMReplaceAllUsesWith(old_g, bitcast); - llvm::LLVMDeleteGlobal(old_g); - } - } - } - - let reachable_symbol_ids = filter_reachable_ids(&shared_ccx); - - // Translate the metadata. - let metadata = time(tcx.sess.time_passes(), "write metadata", || { - write_metadata(&shared_ccx, krate, &reachable_symbol_ids, mir_map) - }); - - if shared_ccx.sess().trans_stats() { - let stats = shared_ccx.stats(); - println!("--- trans stats ---"); - println!("n_glues_created: {}", stats.n_glues_created.get()); - println!("n_null_glues: {}", stats.n_null_glues.get()); - println!("n_real_glues: {}", stats.n_real_glues.get()); - - println!("n_fns: {}", stats.n_fns.get()); - println!("n_monos: {}", stats.n_monos.get()); - println!("n_inlines: {}", stats.n_inlines.get()); - println!("n_closures: {}", stats.n_closures.get()); - println!("fn stats:"); - stats.fn_stats.borrow_mut().sort_by(|&(_, insns_a), &(_, insns_b)| { - insns_b.cmp(&insns_a) - }); - for tuple in stats.fn_stats.borrow().iter() { - match *tuple { - (ref name, insns) => { - println!("{} insns, {}", insns, *name); - } - } - } - } - if shared_ccx.sess().count_llvm_insns() { - for (k, v) in shared_ccx.stats().llvm_insns.borrow().iter() { - println!("{:7} {}", *v, *k); - } - } - - let modules = shared_ccx.iter() - .map(|ccx| ModuleTranslation { llcx: ccx.llcx(), llmod: ccx.llmod() }) - .collect(); - - let sess = shared_ccx.sess(); - let mut reachable_symbols = reachable_symbol_ids.iter().map(|id| { - shared_ccx.item_symbols().borrow()[id].to_string() - }).collect::>(); - if sess.entry_fn.borrow().is_some() { - reachable_symbols.push("main".to_string()); - } - - // For the purposes of LTO, we add to the reachable set all of the upstream - // reachable extern fns. These functions are all part of the public ABI of - // the final product, so LTO needs to preserve them. - if sess.lto() { - for cnum in sess.cstore.crates() { - let syms = sess.cstore.reachable_ids(cnum); - reachable_symbols.extend(syms.into_iter().filter(|did| { - sess.cstore.is_extern_fn(shared_ccx.tcx(), *did) || - sess.cstore.is_static(*did) - }).map(|did| { - sess.cstore.item_symbol(did) - })); - } - } - - if codegen_units > 1 { - internalize_symbols(&shared_ccx, - &reachable_symbols.iter().map(|x| &x[..]).collect()); - } - - if sess.target.target.options.is_like_msvc && - sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateTypeRlib) { - create_imps(&shared_ccx); - } - - let metadata_module = ModuleTranslation { - llcx: shared_ccx.metadata_llcx(), - llmod: shared_ccx.metadata_llmod(), - }; - let no_builtins = attr::contains_name(&krate.attrs, "no_builtins"); - - assert_dep_graph::assert_dep_graph(tcx); - - CrateTranslation { - modules: modules, - metadata_module: metadata_module, - link: link_meta, - metadata: metadata, - reachable: reachable_symbols, - no_builtins: no_builtins, - } -} - -/// We visit all the items in the krate and translate them. We do -/// this in two walks. The first walk just finds module items. It then -/// walks the full contents of those module items and translates all -/// the items within. Note that this entire process is O(n). The -/// reason for this two phased walk is that each module is -/// (potentially) placed into a distinct codegen-unit. This walk also -/// ensures that the immediate contents of each module is processed -/// entirely before we proceed to find more modules, helping to ensure -/// an equitable distribution amongst codegen-units. -pub struct TransModVisitor<'a, 'tcx: 'a> { - pub ccx: &'a CrateContext<'a, 'tcx>, -} - -impl<'a, 'tcx, 'v> Visitor<'v> for TransModVisitor<'a, 'tcx> { - fn visit_item(&mut self, i: &hir::Item) { - match i.node { - hir::ItemMod(_) => { - let item_ccx = self.ccx.rotate(); - intravisit::walk_item(&mut TransItemsWithinModVisitor { ccx: &item_ccx }, i); - } - _ => { } - } - } -} - -/// Translates all the items within a given module. Expects owner to -/// invoke `walk_item` on a module item. Ignores nested modules. -pub struct TransItemsWithinModVisitor<'a, 'tcx: 'a> { - pub ccx: &'a CrateContext<'a, 'tcx>, -} - -impl<'a, 'tcx, 'v> Visitor<'v> for TransItemsWithinModVisitor<'a, 'tcx> { - fn visit_nested_item(&mut self, item_id: hir::ItemId) { - self.visit_item(self.ccx.tcx().map.expect_item(item_id.id)); - } - - fn visit_item(&mut self, i: &hir::Item) { - match i.node { - hir::ItemMod(..) => { - // skip modules, they will be uncovered by the TransModVisitor - } - _ => { - let def_id = self.ccx.tcx().map.local_def_id(i.id); - let tcx = self.ccx.tcx(); - - // Create a subtask for trans'ing a particular item. We are - // giving `trans_item` access to this item, so also record a read. - tcx.dep_graph.with_task(DepNode::TransCrateItem(def_id), || { - tcx.dep_graph.read(DepNode::Hir(def_id)); - trans_item(self.ccx, i); - }); - - intravisit::walk_item(self, i); - } - } - } -} diff --git a/src/librustc_trans/trans/build.rs b/src/librustc_trans/trans/build.rs deleted file mode 100644 index 89e5855717952..0000000000000 --- a/src/librustc_trans/trans/build.rs +++ /dev/null @@ -1,1071 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(dead_code)] // FFI wrappers -#![allow(non_snake_case)] - -use llvm; -use llvm::{CallConv, AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect, AttrBuilder}; -use llvm::{Opcode, IntPredicate, RealPredicate}; -use llvm::{ValueRef, BasicBlockRef}; -use trans::common::*; -use syntax::codemap::Span; - -use trans::builder::Builder; -use trans::type_::Type; -use trans::debuginfo::DebugLoc; -use trans::Disr; - -use libc::{c_uint, c_char}; - -pub fn terminate(cx: Block, _: &str) { - debug!("terminate({})", cx.to_str()); - cx.terminated.set(true); -} - -pub fn check_not_terminated(cx: Block) { - if cx.terminated.get() { - panic!("already terminated!"); - } -} - -pub fn B<'blk, 'tcx>(cx: Block<'blk, 'tcx>) -> Builder<'blk, 'tcx> { - let b = cx.fcx.ccx.builder(); - b.position_at_end(cx.llbb); - b -} - -// The difference between a block being unreachable and being terminated is -// somewhat obscure, and has to do with error checking. When a block is -// terminated, we're saying that trying to add any further statements in the -// block is an error. On the other hand, if something is unreachable, that -// means that the block was terminated in some way that we don't want to check -// for (panic/break/return statements, call to diverging functions, etc), and -// further instructions to the block should simply be ignored. - -pub fn RetVoid(cx: Block, debug_loc: DebugLoc) { - if cx.unreachable.get() { - return; - } - check_not_terminated(cx); - terminate(cx, "RetVoid"); - debug_loc.apply(cx.fcx); - B(cx).ret_void(); -} - -pub fn Ret(cx: Block, v: ValueRef, debug_loc: DebugLoc) { - if cx.unreachable.get() { - return; - } - check_not_terminated(cx); - terminate(cx, "Ret"); - debug_loc.apply(cx.fcx); - B(cx).ret(v); -} - -pub fn AggregateRet(cx: Block, - ret_vals: &[ValueRef], - debug_loc: DebugLoc) { - if cx.unreachable.get() { - return; - } - check_not_terminated(cx); - terminate(cx, "AggregateRet"); - debug_loc.apply(cx.fcx); - B(cx).aggregate_ret(ret_vals); -} - -pub fn Br(cx: Block, dest: BasicBlockRef, debug_loc: DebugLoc) { - if cx.unreachable.get() { - return; - } - check_not_terminated(cx); - terminate(cx, "Br"); - debug_loc.apply(cx.fcx); - B(cx).br(dest); -} - -pub fn CondBr(cx: Block, - if_: ValueRef, - then: BasicBlockRef, - else_: BasicBlockRef, - debug_loc: DebugLoc) { - if cx.unreachable.get() { - return; - } - check_not_terminated(cx); - terminate(cx, "CondBr"); - debug_loc.apply(cx.fcx); - B(cx).cond_br(if_, then, else_); -} - -pub fn Switch(cx: Block, v: ValueRef, else_: BasicBlockRef, num_cases: usize) - -> ValueRef { - if cx.unreachable.get() { return _Undef(v); } - check_not_terminated(cx); - terminate(cx, "Switch"); - B(cx).switch(v, else_, num_cases) -} - -pub fn AddCase(s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) { - unsafe { - if llvm::LLVMIsUndef(s) == llvm::True { return; } - llvm::LLVMAddCase(s, on_val, dest); - } -} - -pub fn IndirectBr(cx: Block, - addr: ValueRef, - num_dests: usize, - debug_loc: DebugLoc) { - if cx.unreachable.get() { - return; - } - check_not_terminated(cx); - terminate(cx, "IndirectBr"); - debug_loc.apply(cx.fcx); - B(cx).indirect_br(addr, num_dests); -} - -pub fn Invoke(cx: Block, - fn_: ValueRef, - args: &[ValueRef], - then: BasicBlockRef, - catch: BasicBlockRef, - attributes: Option, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return C_null(Type::i8(cx.ccx())); - } - check_not_terminated(cx); - terminate(cx, "Invoke"); - debug!("Invoke({} with arguments ({}))", - cx.val_to_string(fn_), - args.iter().map(|a| cx.val_to_string(*a)).collect::>().join(", ")); - debug_loc.apply(cx.fcx); - B(cx).invoke(fn_, args, then, catch, attributes) -} - -pub fn Unreachable(cx: Block) { - if cx.unreachable.get() { - return - } - cx.unreachable.set(true); - if !cx.terminated.get() { - B(cx).unreachable(); - } -} - -pub fn _Undef(val: ValueRef) -> ValueRef { - unsafe { - return llvm::LLVMGetUndef(val_ty(val).to_ref()); - } -} - -/* Arithmetic */ -pub fn Add(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).add(lhs, rhs) -} - -pub fn NSWAdd(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).nswadd(lhs, rhs) -} - -pub fn NUWAdd(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).nuwadd(lhs, rhs) -} - -pub fn FAdd(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).fadd(lhs, rhs) -} - -pub fn Sub(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).sub(lhs, rhs) -} - -pub fn NSWSub(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).nswsub(lhs, rhs) -} - -pub fn NUWSub(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).nuwsub(lhs, rhs) -} - -pub fn FSub(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).fsub(lhs, rhs) -} - -pub fn Mul(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).mul(lhs, rhs) -} - -pub fn NSWMul(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).nswmul(lhs, rhs) -} - -pub fn NUWMul(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).nuwmul(lhs, rhs) -} - -pub fn FMul(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).fmul(lhs, rhs) -} - -pub fn UDiv(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).udiv(lhs, rhs) -} - -pub fn SDiv(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).sdiv(lhs, rhs) -} - -pub fn ExactSDiv(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).exactsdiv(lhs, rhs) -} - -pub fn FDiv(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).fdiv(lhs, rhs) -} - -pub fn URem(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).urem(lhs, rhs) -} - -pub fn SRem(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).srem(lhs, rhs) -} - -pub fn FRem(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).frem(lhs, rhs) -} - -pub fn Shl(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).shl(lhs, rhs) -} - -pub fn LShr(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).lshr(lhs, rhs) -} - -pub fn AShr(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).ashr(lhs, rhs) -} - -pub fn And(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).and(lhs, rhs) -} - -pub fn Or(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).or(lhs, rhs) -} - -pub fn Xor(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).xor(lhs, rhs) -} - -pub fn BinOp(cx: Block, - op: Opcode, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).binop(op, lhs, rhs) -} - -pub fn Neg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { - return _Undef(v); - } - debug_loc.apply(cx.fcx); - B(cx).neg(v) -} - -pub fn NSWNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { - return _Undef(v); - } - debug_loc.apply(cx.fcx); - B(cx).nswneg(v) -} - -pub fn NUWNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { - return _Undef(v); - } - debug_loc.apply(cx.fcx); - B(cx).nuwneg(v) -} -pub fn FNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { - return _Undef(v); - } - debug_loc.apply(cx.fcx); - B(cx).fneg(v) -} - -pub fn Not(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { - return _Undef(v); - } - debug_loc.apply(cx.fcx); - B(cx).not(v) -} - -pub fn Alloca(cx: Block, ty: Type, name: &str) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); } - AllocaFcx(cx.fcx, ty, name) - } -} - -pub fn AllocaFcx(fcx: &FunctionContext, ty: Type, name: &str) -> ValueRef { - let b = fcx.ccx.builder(); - b.position_before(fcx.alloca_insert_pt.get().unwrap()); - DebugLoc::None.apply(fcx); - b.alloca(ty, name) -} - -pub fn Free(cx: Block, pointer_val: ValueRef) { - if cx.unreachable.get() { return; } - B(cx).free(pointer_val) -} - -pub fn Load(cx: Block, pointer_val: ValueRef) -> ValueRef { - unsafe { - let ccx = cx.fcx.ccx; - if cx.unreachable.get() { - let ty = val_ty(pointer_val); - let eltty = if ty.kind() == llvm::Array { - ty.element_type() - } else { - ccx.int_type() - }; - return llvm::LLVMGetUndef(eltty.to_ref()); - } - B(cx).load(pointer_val) - } -} - -pub fn VolatileLoad(cx: Block, pointer_val: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).volatile_load(pointer_val) - } -} - -pub fn AtomicLoad(cx: Block, pointer_val: ValueRef, order: AtomicOrdering) -> ValueRef { - unsafe { - let ccx = cx.fcx.ccx; - if cx.unreachable.get() { - return llvm::LLVMGetUndef(ccx.int_type().to_ref()); - } - B(cx).atomic_load(pointer_val, order) - } -} - - -pub fn LoadRangeAssert(cx: Block, pointer_val: ValueRef, lo: Disr, - hi: Disr, signed: llvm::Bool) -> ValueRef { - if cx.unreachable.get() { - let ccx = cx.fcx.ccx; - let ty = val_ty(pointer_val); - let eltty = if ty.kind() == llvm::Array { - ty.element_type() - } else { - ccx.int_type() - }; - unsafe { - llvm::LLVMGetUndef(eltty.to_ref()) - } - } else { - B(cx).load_range_assert(pointer_val, lo.0, hi.0, signed) - } -} - -pub fn LoadNonNull(cx: Block, ptr: ValueRef) -> ValueRef { - if cx.unreachable.get() { - let ccx = cx.fcx.ccx; - let ty = val_ty(ptr); - let eltty = if ty.kind() == llvm::Array { - ty.element_type() - } else { - ccx.int_type() - }; - unsafe { - llvm::LLVMGetUndef(eltty.to_ref()) - } - } else { - B(cx).load_nonnull(ptr) - } -} - -pub fn Store(cx: Block, val: ValueRef, ptr: ValueRef) -> ValueRef { - if cx.unreachable.get() { return C_nil(cx.ccx()); } - B(cx).store(val, ptr) -} - -pub fn VolatileStore(cx: Block, val: ValueRef, ptr: ValueRef) -> ValueRef { - if cx.unreachable.get() { return C_nil(cx.ccx()); } - B(cx).volatile_store(val, ptr) -} - -pub fn AtomicStore(cx: Block, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) { - if cx.unreachable.get() { return; } - B(cx).atomic_store(val, ptr, order) -} - -pub fn GEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); - } - B(cx).gep(pointer, indices) - } -} - -// Simple wrapper around GEP that takes an array of ints and wraps them -// in C_i32() -#[inline] -pub fn GEPi(cx: Block, base: ValueRef, ixs: &[usize]) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); - } - B(cx).gepi(base, ixs) - } -} - -pub fn InBoundsGEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); - } - B(cx).inbounds_gep(pointer, indices) - } -} - -pub fn StructGEP(cx: Block, pointer: ValueRef, idx: usize) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); - } - B(cx).struct_gep(pointer, idx) - } -} - -pub fn GlobalString(cx: Block, _str: *const c_char) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref()); - } - B(cx).global_string(_str) - } -} - -pub fn GlobalStringPtr(cx: Block, _str: *const c_char) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref()); - } - B(cx).global_string_ptr(_str) - } -} - -/* Casts */ -pub fn Trunc(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).trunc(val, dest_ty) - } -} - -pub fn ZExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).zext(val, dest_ty) - } -} - -pub fn SExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).sext(val, dest_ty) - } -} - -pub fn FPToUI(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).fptoui(val, dest_ty) - } -} - -pub fn FPToSI(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).fptosi(val, dest_ty) - } -} - -pub fn UIToFP(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).uitofp(val, dest_ty) - } -} - -pub fn SIToFP(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).sitofp(val, dest_ty) - } -} - -pub fn FPTrunc(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).fptrunc(val, dest_ty) - } -} - -pub fn FPExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).fpext(val, dest_ty) - } -} - -pub fn PtrToInt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).ptrtoint(val, dest_ty) - } -} - -pub fn IntToPtr(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).inttoptr(val, dest_ty) - } -} - -pub fn BitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).bitcast(val, dest_ty) - } -} - -pub fn ZExtOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).zext_or_bitcast(val, dest_ty) - } -} - -pub fn SExtOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).sext_or_bitcast(val, dest_ty) - } -} - -pub fn TruncOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).trunc_or_bitcast(val, dest_ty) - } -} - -pub fn Cast(cx: Block, op: Opcode, val: ValueRef, dest_ty: Type, - _: *const u8) - -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).cast(op, val, dest_ty) - } -} - -pub fn PointerCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).pointercast(val, dest_ty) - } -} - -pub fn IntCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).intcast(val, dest_ty) - } -} - -pub fn FPCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).fpcast(val, dest_ty) - } -} - - -/* Comparisons */ -pub fn ICmp(cx: Block, - op: IntPredicate, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); - } - debug_loc.apply(cx.fcx); - B(cx).icmp(op, lhs, rhs) - } -} - -pub fn FCmp(cx: Block, - op: RealPredicate, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); - } - debug_loc.apply(cx.fcx); - B(cx).fcmp(op, lhs, rhs) - } -} - -/* Miscellaneous instructions */ -pub fn EmptyPhi(cx: Block, ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } - B(cx).empty_phi(ty) - } -} - -pub fn Phi(cx: Block, ty: Type, vals: &[ValueRef], - bbs: &[BasicBlockRef]) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } - B(cx).phi(ty, vals, bbs) - } -} - -pub fn AddIncomingToPhi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) { - unsafe { - if llvm::LLVMIsUndef(phi) == llvm::True { return; } - llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); - } -} - -pub fn _UndefReturn(cx: Block, fn_: ValueRef) -> ValueRef { - unsafe { - let ccx = cx.fcx.ccx; - let ty = val_ty(fn_); - let retty = if ty.kind() == llvm::Function { - ty.return_type() - } else { - ccx.int_type() - }; - B(cx).count_insn("ret_undef"); - llvm::LLVMGetUndef(retty.to_ref()) - } -} - -pub fn add_span_comment(cx: Block, sp: Span, text: &str) { - B(cx).add_span_comment(sp, text) -} - -pub fn add_comment(cx: Block, text: &str) { - B(cx).add_comment(text) -} - -pub fn InlineAsmCall(cx: Block, asm: *const c_char, cons: *const c_char, - inputs: &[ValueRef], output: Type, - volatile: bool, alignstack: bool, - dia: AsmDialect) -> ValueRef { - B(cx).inline_asm_call(asm, cons, inputs, output, volatile, alignstack, dia) -} - -pub fn Call(cx: Block, - fn_: ValueRef, - args: &[ValueRef], - attributes: Option, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _UndefReturn(cx, fn_); - } - debug_loc.apply(cx.fcx); - B(cx).call(fn_, args, attributes) -} - -pub fn CallWithConv(cx: Block, - fn_: ValueRef, - args: &[ValueRef], - conv: CallConv, - attributes: Option, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _UndefReturn(cx, fn_); - } - debug_loc.apply(cx.fcx); - B(cx).call_with_conv(fn_, args, conv, attributes) -} - -pub fn AtomicFence(cx: Block, order: AtomicOrdering, scope: SynchronizationScope) { - if cx.unreachable.get() { return; } - B(cx).atomic_fence(order, scope) -} - -pub fn Select(cx: Block, if_: ValueRef, then: ValueRef, else_: ValueRef) -> ValueRef { - if cx.unreachable.get() { return _Undef(then); } - B(cx).select(if_, then, else_) -} - -pub fn VAArg(cx: Block, list: ValueRef, ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } - B(cx).va_arg(list, ty) - } -} - -pub fn ExtractElement(cx: Block, vec_val: ValueRef, index: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).extract_element(vec_val, index) - } -} - -pub fn InsertElement(cx: Block, vec_val: ValueRef, elt_val: ValueRef, - index: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).insert_element(vec_val, elt_val, index) - } -} - -pub fn ShuffleVector(cx: Block, v1: ValueRef, v2: ValueRef, - mask: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).shuffle_vector(v1, v2, mask) - } -} - -pub fn VectorSplat(cx: Block, num_elts: usize, elt_val: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).vector_splat(num_elts, elt_val) - } -} - -pub fn ExtractValue(cx: Block, agg_val: ValueRef, index: usize) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).extract_value(agg_val, index) - } -} - -pub fn InsertValue(cx: Block, agg_val: ValueRef, elt_val: ValueRef, index: usize) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).insert_value(agg_val, elt_val, index) - } -} - -pub fn IsNull(cx: Block, val: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); - } - B(cx).is_null(val) - } -} - -pub fn IsNotNull(cx: Block, val: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); - } - B(cx).is_not_null(val) - } -} - -pub fn PtrDiff(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef { - unsafe { - let ccx = cx.fcx.ccx; - if cx.unreachable.get() { return llvm::LLVMGetUndef(ccx.int_type().to_ref()); } - B(cx).ptrdiff(lhs, rhs) - } -} - -pub fn Trap(cx: Block) { - if cx.unreachable.get() { return; } - B(cx).trap(); -} - -pub fn LandingPad(cx: Block, ty: Type, pers_fn: ValueRef, - num_clauses: usize) -> ValueRef { - check_not_terminated(cx); - assert!(!cx.unreachable.get()); - B(cx).landing_pad(ty, pers_fn, num_clauses, cx.fcx.llfn) -} - -pub fn AddClause(cx: Block, landing_pad: ValueRef, clause: ValueRef) { - B(cx).add_clause(landing_pad, clause) -} - -pub fn SetCleanup(cx: Block, landing_pad: ValueRef) { - B(cx).set_cleanup(landing_pad) -} - -pub fn Resume(cx: Block, exn: ValueRef) -> ValueRef { - check_not_terminated(cx); - terminate(cx, "Resume"); - B(cx).resume(exn) -} - -// Atomic Operations -pub fn AtomicCmpXchg(cx: Block, dst: ValueRef, - cmp: ValueRef, src: ValueRef, - order: AtomicOrdering, - failure_order: AtomicOrdering) -> ValueRef { - B(cx).atomic_cmpxchg(dst, cmp, src, order, failure_order) -} -pub fn AtomicRMW(cx: Block, op: AtomicBinOp, - dst: ValueRef, src: ValueRef, - order: AtomicOrdering) -> ValueRef { - B(cx).atomic_rmw(op, dst, src, order) -} diff --git a/src/librustc_trans/trans/cabi.rs b/src/librustc_trans/trans/cabi.rs deleted file mode 100644 index 4bfbb8b69f0ac..0000000000000 --- a/src/librustc_trans/trans/cabi.rs +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -pub use self::ArgKind::*; - -use llvm::Attribute; -use std::option; -use trans::context::CrateContext; -use trans::cabi_x86; -use trans::cabi_x86_64; -use trans::cabi_x86_win64; -use trans::cabi_arm; -use trans::cabi_aarch64; -use trans::cabi_powerpc; -use trans::cabi_powerpc64; -use trans::cabi_mips; -use trans::type_::Type; - -#[derive(Clone, Copy, PartialEq)] -pub enum ArgKind { - /// Pass the argument directly using the normal converted - /// LLVM type or by coercing to another specified type - Direct, - /// Pass the argument indirectly via a hidden pointer - Indirect, - /// Ignore the argument (useful for empty struct) - Ignore, -} - -/// Information about how a specific C type -/// should be passed to or returned from a function -/// -/// This is borrowed from clang's ABIInfo.h -#[derive(Clone, Copy)] -pub struct ArgType { - pub kind: ArgKind, - /// Original LLVM type - pub ty: Type, - /// Coerced LLVM Type - pub cast: option::Option, - /// Dummy argument, which is emitted before the real argument - pub pad: option::Option, - /// LLVM attribute of argument - pub attr: option::Option -} - -impl ArgType { - pub fn direct(ty: Type, cast: option::Option, - pad: option::Option, - attr: option::Option) -> ArgType { - ArgType { - kind: Direct, - ty: ty, - cast: cast, - pad: pad, - attr: attr - } - } - - pub fn indirect(ty: Type, attr: option::Option) -> ArgType { - ArgType { - kind: Indirect, - ty: ty, - cast: option::Option::None, - pad: option::Option::None, - attr: attr - } - } - - pub fn ignore(ty: Type) -> ArgType { - ArgType { - kind: Ignore, - ty: ty, - cast: None, - pad: None, - attr: None, - } - } - - pub fn is_indirect(&self) -> bool { - return self.kind == Indirect; - } - - pub fn is_ignore(&self) -> bool { - return self.kind == Ignore; - } -} - -/// Metadata describing how the arguments to a native function -/// should be passed in order to respect the native ABI. -/// -/// I will do my best to describe this structure, but these -/// comments are reverse-engineered and may be inaccurate. -NDM -pub struct FnType { - /// The LLVM types of each argument. - pub arg_tys: Vec , - - /// LLVM return type. - pub ret_ty: ArgType, -} - -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool) -> FnType { - match &ccx.sess().target.target.arch[..] { - "x86" => cabi_x86::compute_abi_info(ccx, atys, rty, ret_def), - "x86_64" => if ccx.sess().target.target.options.is_like_windows { - cabi_x86_win64::compute_abi_info(ccx, atys, rty, ret_def) - } else { - cabi_x86_64::compute_abi_info(ccx, atys, rty, ret_def) - }, - "aarch64" => cabi_aarch64::compute_abi_info(ccx, atys, rty, ret_def), - "arm" => { - let flavor = if ccx.sess().target.target.target_os == "ios" { - cabi_arm::Flavor::Ios - } else { - cabi_arm::Flavor::General - }; - cabi_arm::compute_abi_info(ccx, atys, rty, ret_def, flavor) - }, - "mips" => cabi_mips::compute_abi_info(ccx, atys, rty, ret_def), - "powerpc" => cabi_powerpc::compute_abi_info(ccx, atys, rty, ret_def), - "powerpc64" | "powerpc64le" => cabi_powerpc64::compute_abi_info(ccx, atys, rty, ret_def), - a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a) - ), - } -} diff --git a/src/librustc_trans/trans/cabi_aarch64.rs b/src/librustc_trans/trans/cabi_aarch64.rs deleted file mode 100644 index f2434ceee2b85..0000000000000 --- a/src/librustc_trans/trans/cabi_aarch64.rs +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(non_upper_case_globals)] - -use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector, Attribute}; -use trans::cabi::{FnType, ArgType}; -use trans::context::CrateContext; -use trans::type_::Type; - -use std::cmp; - -fn align_up_to(off: usize, a: usize) -> usize { - return (off + a - 1) / a * a; -} - -fn align(off: usize, ty: Type) -> usize { - let a = ty_align(ty); - return align_up_to(off, a); -} - -fn ty_align(ty: Type) -> usize { - match ty.kind() { - Integer => ((ty.int_width() as usize) + 7) / 8, - Pointer => 8, - Float => 4, - Double => 8, - Struct => { - if ty.is_packed() { - 1 - } else { - let str_tys = ty.field_types(); - str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t))) - } - } - Array => { - let elt = ty.element_type(); - ty_align(elt) - } - Vector => { - let len = ty.vector_length(); - let elt = ty.element_type(); - ty_align(elt) * len - } - _ => panic!("ty_align: unhandled type") - } -} - -fn ty_size(ty: Type) -> usize { - match ty.kind() { - Integer => ((ty.int_width() as usize) + 7) / 8, - Pointer => 8, - Float => 4, - Double => 8, - Struct => { - if ty.is_packed() { - let str_tys = ty.field_types(); - str_tys.iter().fold(0, |s, t| s + ty_size(*t)) - } else { - let str_tys = ty.field_types(); - let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t)); - align(size, ty) - } - } - Array => { - let len = ty.array_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt); - len * eltsz - } - Vector => { - let len = ty.vector_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt); - len * eltsz - } - _ => panic!("ty_size: unhandled type") - } -} - -fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> { - fn check_array(ty: Type) -> Option<(Type, u64)> { - let len = ty.array_length() as u64; - if len == 0 { - return None - } - let elt = ty.element_type(); - - // if our element is an HFA/HVA, so are we; multiply members by our len - is_homogenous_aggregate_ty(elt).map(|(base_ty, members)| (base_ty, len * members)) - } - - fn check_struct(ty: Type) -> Option<(Type, u64)> { - let str_tys = ty.field_types(); - if str_tys.len() == 0 { - return None - } - - let mut prev_base_ty = None; - let mut members = 0; - for opt_homog_agg in str_tys.iter().map(|t| is_homogenous_aggregate_ty(*t)) { - match (prev_base_ty, opt_homog_agg) { - // field isn't itself an HFA, so we aren't either - (_, None) => return None, - - // first field - store its type and number of members - (None, Some((field_ty, field_members))) => { - prev_base_ty = Some(field_ty); - members = field_members; - }, - - // 2nd or later field - give up if it's a different type; otherwise incr. members - (Some(prev_ty), Some((field_ty, field_members))) => { - if prev_ty != field_ty { - return None; - } - members += field_members; - } - } - } - - // Because of previous checks, we know prev_base_ty is Some(...) because - // 1. str_tys has at least one element; and - // 2. prev_base_ty was filled in (or we would've returned early) - let (base_ty, members) = (prev_base_ty.unwrap(), members); - - // Ensure there is no padding. - if ty_size(ty) == ty_size(base_ty) * (members as usize) { - Some((base_ty, members)) - } else { - None - } - } - - let homog_agg = match ty.kind() { - Float => Some((ty, 1)), - Double => Some((ty, 1)), - Array => check_array(ty), - Struct => check_struct(ty), - Vector => match ty_size(ty) { - 4|8 => Some((ty, 1)), - _ => None - }, - _ => None - }; - - // Ensure we have at most four uniquely addressable members - homog_agg.and_then(|(base_ty, members)| { - if members > 0 && members <= 4 { - Some((base_ty, members)) - } else { - None - } - }) -} - -fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType { - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - return ArgType::direct(ty, None, None, attr); - } - if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ty) { - let llty = Type::array(&base_ty, members); - return ArgType::direct(ty, Some(llty), None, None); - } - let size = ty_size(ty); - if size <= 16 { - let llty = if size <= 1 { - Type::i8(ccx) - } else if size <= 2 { - Type::i16(ccx) - } else if size <= 4 { - Type::i32(ccx) - } else if size <= 8 { - Type::i64(ccx) - } else { - Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) - }; - return ArgType::direct(ty, Some(llty), None, None); - } - ArgType::indirect(ty, Some(Attribute::StructRet)) -} - -fn classify_arg_ty(ccx: &CrateContext, ty: Type) -> ArgType { - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - return ArgType::direct(ty, None, None, attr); - } - if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ty) { - let llty = Type::array(&base_ty, members); - return ArgType::direct(ty, Some(llty), None, None); - } - let size = ty_size(ty); - if size <= 16 { - let llty = if size == 0 { - Type::array(&Type::i64(ccx), 0) - } else if size == 1 { - Type::i8(ccx) - } else if size == 2 { - Type::i16(ccx) - } else if size <= 4 { - Type::i32(ccx) - } else if size <= 8 { - Type::i64(ccx) - } else { - Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) - }; - return ArgType::direct(ty, Some(llty), None, None); - } - ArgType::indirect(ty, None) -} - -fn is_reg_ty(ty: Type) -> bool { - match ty.kind() { - Integer - | Pointer - | Float - | Double - | Vector => true, - _ => false - } -} - -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool) -> FnType { - let mut arg_tys = Vec::new(); - for &aty in atys { - let ty = classify_arg_ty(ccx, aty); - arg_tys.push(ty); - } - - let ret_ty = if ret_def { - classify_ret_ty(ccx, rty) - } else { - ArgType::direct(Type::void(ccx), None, None, None) - }; - - return FnType { - arg_tys: arg_tys, - ret_ty: ret_ty, - }; -} diff --git a/src/librustc_trans/trans/cabi_arm.rs b/src/librustc_trans/trans/cabi_arm.rs deleted file mode 100644 index c5116e738048d..0000000000000 --- a/src/librustc_trans/trans/cabi_arm.rs +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(non_upper_case_globals)] - -use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector, Attribute}; -use trans::cabi::{FnType, ArgType}; -use trans::context::CrateContext; -use trans::type_::Type; - -use std::cmp; - -pub enum Flavor { - General, - Ios -} - -type TyAlignFn = fn(ty: Type) -> usize; - -fn align_up_to(off: usize, a: usize) -> usize { - return (off + a - 1) / a * a; -} - -fn align(off: usize, ty: Type, align_fn: TyAlignFn) -> usize { - let a = align_fn(ty); - return align_up_to(off, a); -} - -fn general_ty_align(ty: Type) -> usize { - match ty.kind() { - Integer => ((ty.int_width() as usize) + 7) / 8, - Pointer => 4, - Float => 4, - Double => 8, - Struct => { - if ty.is_packed() { - 1 - } else { - let str_tys = ty.field_types(); - str_tys.iter().fold(1, |a, t| cmp::max(a, general_ty_align(*t))) - } - } - Array => { - let elt = ty.element_type(); - general_ty_align(elt) - } - Vector => { - let len = ty.vector_length(); - let elt = ty.element_type(); - general_ty_align(elt) * len - } - _ => panic!("ty_align: unhandled type") - } -} - -// For more information see: -// ARMv7 -// https://developer.apple.com/library/ios/documentation/Xcode/Conceptual -// /iPhoneOSABIReference/Articles/ARMv7FunctionCallingConventions.html -// ARMv6 -// https://developer.apple.com/library/ios/documentation/Xcode/Conceptual -// /iPhoneOSABIReference/Articles/ARMv6FunctionCallingConventions.html -fn ios_ty_align(ty: Type) -> usize { - match ty.kind() { - Integer => cmp::min(4, ((ty.int_width() as usize) + 7) / 8), - Pointer => 4, - Float => 4, - Double => 4, - Struct => { - if ty.is_packed() { - 1 - } else { - let str_tys = ty.field_types(); - str_tys.iter().fold(1, |a, t| cmp::max(a, ios_ty_align(*t))) - } - } - Array => { - let elt = ty.element_type(); - ios_ty_align(elt) - } - Vector => { - let len = ty.vector_length(); - let elt = ty.element_type(); - ios_ty_align(elt) * len - } - _ => panic!("ty_align: unhandled type") - } -} - -fn ty_size(ty: Type, align_fn: TyAlignFn) -> usize { - match ty.kind() { - Integer => ((ty.int_width() as usize) + 7) / 8, - Pointer => 4, - Float => 4, - Double => 8, - Struct => { - if ty.is_packed() { - let str_tys = ty.field_types(); - str_tys.iter().fold(0, |s, t| s + ty_size(*t, align_fn)) - } else { - let str_tys = ty.field_types(); - let size = str_tys.iter() - .fold(0, |s, t| { - align(s, *t, align_fn) + ty_size(*t, align_fn) - }); - align(size, ty, align_fn) - } - } - Array => { - let len = ty.array_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt, align_fn); - len * eltsz - } - Vector => { - let len = ty.vector_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt, align_fn); - len * eltsz - } - _ => panic!("ty_size: unhandled type") - } -} - -fn classify_ret_ty(ccx: &CrateContext, ty: Type, align_fn: TyAlignFn) -> ArgType { - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - return ArgType::direct(ty, None, None, attr); - } - let size = ty_size(ty, align_fn); - if size <= 4 { - let llty = if size <= 1 { - Type::i8(ccx) - } else if size <= 2 { - Type::i16(ccx) - } else { - Type::i32(ccx) - }; - return ArgType::direct(ty, Some(llty), None, None); - } - ArgType::indirect(ty, Some(Attribute::StructRet)) -} - -fn classify_arg_ty(ccx: &CrateContext, ty: Type, align_fn: TyAlignFn) -> ArgType { - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - return ArgType::direct(ty, None, None, attr); - } - let align = align_fn(ty); - let size = ty_size(ty, align_fn); - let llty = if align <= 4 { - Type::array(&Type::i32(ccx), ((size + 3) / 4) as u64) - } else { - Type::array(&Type::i64(ccx), ((size + 7) / 8) as u64) - }; - ArgType::direct(ty, Some(llty), None, None) -} - -fn is_reg_ty(ty: Type) -> bool { - match ty.kind() { - Integer - | Pointer - | Float - | Double - | Vector => true, - _ => false - } -} - -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool, - flavor: Flavor) -> FnType { - let align_fn = match flavor { - Flavor::General => general_ty_align as TyAlignFn, - Flavor::Ios => ios_ty_align as TyAlignFn, - }; - - let mut arg_tys = Vec::new(); - for &aty in atys { - let ty = classify_arg_ty(ccx, aty, align_fn); - arg_tys.push(ty); - } - - let ret_ty = if ret_def { - classify_ret_ty(ccx, rty, align_fn) - } else { - ArgType::direct(Type::void(ccx), None, None, None) - }; - - return FnType { - arg_tys: arg_tys, - ret_ty: ret_ty, - }; -} diff --git a/src/librustc_trans/trans/cabi_mips.rs b/src/librustc_trans/trans/cabi_mips.rs deleted file mode 100644 index bcffb238f5950..0000000000000 --- a/src/librustc_trans/trans/cabi_mips.rs +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(non_upper_case_globals)] - -use libc::c_uint; -use std::cmp; -use llvm; -use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector, Attribute}; -use trans::cabi::{ArgType, FnType}; -use trans::context::CrateContext; -use trans::type_::Type; - -fn align_up_to(off: usize, a: usize) -> usize { - return (off + a - 1) / a * a; -} - -fn align(off: usize, ty: Type) -> usize { - let a = ty_align(ty); - return align_up_to(off, a); -} - -fn ty_align(ty: Type) -> usize { - match ty.kind() { - Integer => ((ty.int_width() as usize) + 7) / 8, - Pointer => 4, - Float => 4, - Double => 8, - Struct => { - if ty.is_packed() { - 1 - } else { - let str_tys = ty.field_types(); - str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t))) - } - } - Array => { - let elt = ty.element_type(); - ty_align(elt) - } - Vector => { - let len = ty.vector_length(); - let elt = ty.element_type(); - ty_align(elt) * len - } - _ => panic!("ty_align: unhandled type") - } -} - -fn ty_size(ty: Type) -> usize { - match ty.kind() { - Integer => ((ty.int_width() as usize) + 7) / 8, - Pointer => 4, - Float => 4, - Double => 8, - Struct => { - if ty.is_packed() { - let str_tys = ty.field_types(); - str_tys.iter().fold(0, |s, t| s + ty_size(*t)) - } else { - let str_tys = ty.field_types(); - let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t)); - align(size, ty) - } - } - Array => { - let len = ty.array_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt); - len * eltsz - } - Vector => { - let len = ty.vector_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt); - len * eltsz - } - _ => panic!("ty_size: unhandled type") - } -} - -fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType { - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ArgType::direct(ty, None, None, attr) - } else { - ArgType::indirect(ty, Some(Attribute::StructRet)) - } -} - -fn classify_arg_ty(ccx: &CrateContext, ty: Type, offset: &mut usize) -> ArgType { - let orig_offset = *offset; - let size = ty_size(ty) * 8; - let mut align = ty_align(ty); - - align = cmp::min(cmp::max(align, 4), 8); - *offset = align_up_to(*offset, align); - *offset += align_up_to(size, align * 8) / 8; - - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ArgType::direct(ty, None, None, attr) - } else { - ArgType::direct( - ty, - Some(struct_ty(ccx, ty)), - padding_ty(ccx, align, orig_offset), - None - ) - } -} - -fn is_reg_ty(ty: Type) -> bool { - return match ty.kind() { - Integer - | Pointer - | Float - | Double - | Vector => true, - _ => false - }; -} - -fn padding_ty(ccx: &CrateContext, align: usize, offset: usize) -> Option { - if ((align - 1 ) & offset) > 0 { - Some(Type::i32(ccx)) - } else { - None - } -} - -fn coerce_to_int(ccx: &CrateContext, size: usize) -> Vec { - let int_ty = Type::i32(ccx); - let mut args = Vec::new(); - - let mut n = size / 32; - while n > 0 { - args.push(int_ty); - n -= 1; - } - - let r = size % 32; - if r > 0 { - unsafe { - args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint))); - } - } - - args -} - -fn struct_ty(ccx: &CrateContext, ty: Type) -> Type { - let size = ty_size(ty) * 8; - Type::struct_(ccx, &coerce_to_int(ccx, size), false) -} - -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool) -> FnType { - let ret_ty = if ret_def { - classify_ret_ty(ccx, rty) - } else { - ArgType::direct(Type::void(ccx), None, None, None) - }; - - let sret = ret_ty.is_indirect(); - let mut arg_tys = Vec::new(); - let mut offset = if sret { 4 } else { 0 }; - - for aty in atys { - let ty = classify_arg_ty(ccx, *aty, &mut offset); - arg_tys.push(ty); - }; - - return FnType { - arg_tys: arg_tys, - ret_ty: ret_ty, - }; -} diff --git a/src/librustc_trans/trans/cabi_powerpc.rs b/src/librustc_trans/trans/cabi_powerpc.rs deleted file mode 100644 index 1bcc8fd6bbb90..0000000000000 --- a/src/librustc_trans/trans/cabi_powerpc.rs +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use libc::c_uint; -use llvm; -use llvm::{Integer, Pointer, Float, Double, Struct, Array, Attribute}; -use trans::cabi::{FnType, ArgType}; -use trans::context::CrateContext; -use trans::type_::Type; - -use std::cmp; - -fn align_up_to(off: usize, a: usize) -> usize { - return (off + a - 1) / a * a; -} - -fn align(off: usize, ty: Type) -> usize { - let a = ty_align(ty); - return align_up_to(off, a); -} - -fn ty_align(ty: Type) -> usize { - match ty.kind() { - Integer => { - unsafe { - ((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as usize) + 7) / 8 - } - } - Pointer => 4, - Float => 4, - Double => 8, - Struct => { - if ty.is_packed() { - 1 - } else { - let str_tys = ty.field_types(); - str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t))) - } - } - Array => { - let elt = ty.element_type(); - ty_align(elt) - } - _ => panic!("ty_size: unhandled type") - } -} - -fn ty_size(ty: Type) -> usize { - match ty.kind() { - Integer => { - unsafe { - ((llvm::LLVMGetIntTypeWidth(ty.to_ref()) as usize) + 7) / 8 - } - } - Pointer => 4, - Float => 4, - Double => 8, - Struct => { - if ty.is_packed() { - let str_tys = ty.field_types(); - str_tys.iter().fold(0, |s, t| s + ty_size(*t)) - } else { - let str_tys = ty.field_types(); - let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t)); - align(size, ty) - } - } - Array => { - let len = ty.array_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt); - len * eltsz - } - _ => panic!("ty_size: unhandled type") - } -} - -fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType { - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ArgType::direct(ty, None, None, attr) - } else { - ArgType::indirect(ty, Some(Attribute::StructRet)) - } -} - -fn classify_arg_ty(ccx: &CrateContext, ty: Type, offset: &mut usize) -> ArgType { - let orig_offset = *offset; - let size = ty_size(ty) * 8; - let mut align = ty_align(ty); - - align = cmp::min(cmp::max(align, 4), 8); - *offset = align_up_to(*offset, align); - *offset += align_up_to(size, align * 8) / 8; - - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ArgType::direct(ty, None, None, attr) - } else { - ArgType::direct( - ty, - Some(struct_ty(ccx, ty)), - padding_ty(ccx, align, orig_offset), - None - ) - } -} - -fn is_reg_ty(ty: Type) -> bool { - return match ty.kind() { - Integer - | Pointer - | Float - | Double => true, - _ => false - }; -} - -fn padding_ty(ccx: &CrateContext, align: usize, offset: usize) -> Option { - if ((align - 1 ) & offset) > 0 { - Some(Type::i32(ccx)) - } else { - None - } -} - -fn coerce_to_int(ccx: &CrateContext, size: usize) -> Vec { - let int_ty = Type::i32(ccx); - let mut args = Vec::new(); - - let mut n = size / 32; - while n > 0 { - args.push(int_ty); - n -= 1; - } - - let r = size % 32; - if r > 0 { - unsafe { - args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint))); - } - } - - args -} - -fn struct_ty(ccx: &CrateContext, ty: Type) -> Type { - let size = ty_size(ty) * 8; - Type::struct_(ccx, &coerce_to_int(ccx, size), false) -} - -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool) -> FnType { - let ret_ty = if ret_def { - classify_ret_ty(ccx, rty) - } else { - ArgType::direct(Type::void(ccx), None, None, None) - }; - - let sret = ret_ty.is_indirect(); - let mut arg_tys = Vec::new(); - let mut offset = if sret { 4 } else { 0 }; - - for aty in atys { - let ty = classify_arg_ty(ccx, *aty, &mut offset); - arg_tys.push(ty); - }; - - return FnType { - arg_tys: arg_tys, - ret_ty: ret_ty, - }; -} diff --git a/src/librustc_trans/trans/cabi_powerpc64.rs b/src/librustc_trans/trans/cabi_powerpc64.rs deleted file mode 100644 index cba73004279d6..0000000000000 --- a/src/librustc_trans/trans/cabi_powerpc64.rs +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2014-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// FIXME: The PowerPC64 ABI needs to zero or sign extend function -// call parameters, but compute_abi_info() is passed LLVM types -// which have no sign information. -// -// Alignment of 128 bit types is not currently handled, this will -// need to be fixed when PowerPC vector support is added. - -use llvm::{Integer, Pointer, Float, Double, Struct, Array, Attribute}; -use trans::cabi::{FnType, ArgType}; -use trans::context::CrateContext; -use trans::type_::Type; - -use std::cmp; - -fn align_up_to(off: usize, a: usize) -> usize { - return (off + a - 1) / a * a; -} - -fn align(off: usize, ty: Type) -> usize { - let a = ty_align(ty); - return align_up_to(off, a); -} - -fn ty_align(ty: Type) -> usize { - match ty.kind() { - Integer => ((ty.int_width() as usize) + 7) / 8, - Pointer => 8, - Float => 4, - Double => 8, - Struct => { - if ty.is_packed() { - 1 - } else { - let str_tys = ty.field_types(); - str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t))) - } - } - Array => { - let elt = ty.element_type(); - ty_align(elt) - } - _ => panic!("ty_align: unhandled type") - } -} - -fn ty_size(ty: Type) -> usize { - match ty.kind() { - Integer => ((ty.int_width() as usize) + 7) / 8, - Pointer => 8, - Float => 4, - Double => 8, - Struct => { - if ty.is_packed() { - let str_tys = ty.field_types(); - str_tys.iter().fold(0, |s, t| s + ty_size(*t)) - } else { - let str_tys = ty.field_types(); - let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t)); - align(size, ty) - } - } - Array => { - let len = ty.array_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt); - len * eltsz - } - _ => panic!("ty_size: unhandled type") - } -} - -fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> { - fn check_array(ty: Type) -> Option<(Type, u64)> { - let len = ty.array_length() as u64; - if len == 0 { - return None - } - let elt = ty.element_type(); - - // if our element is an HFA/HVA, so are we; multiply members by our len - is_homogenous_aggregate_ty(elt).map(|(base_ty, members)| (base_ty, len * members)) - } - - fn check_struct(ty: Type) -> Option<(Type, u64)> { - let str_tys = ty.field_types(); - if str_tys.len() == 0 { - return None - } - - let mut prev_base_ty = None; - let mut members = 0; - for opt_homog_agg in str_tys.iter().map(|t| is_homogenous_aggregate_ty(*t)) { - match (prev_base_ty, opt_homog_agg) { - // field isn't itself an HFA, so we aren't either - (_, None) => return None, - - // first field - store its type and number of members - (None, Some((field_ty, field_members))) => { - prev_base_ty = Some(field_ty); - members = field_members; - }, - - // 2nd or later field - give up if it's a different type; otherwise incr. members - (Some(prev_ty), Some((field_ty, field_members))) => { - if prev_ty != field_ty { - return None; - } - members += field_members; - } - } - } - - // Because of previous checks, we know prev_base_ty is Some(...) because - // 1. str_tys has at least one element; and - // 2. prev_base_ty was filled in (or we would've returned early) - let (base_ty, members) = (prev_base_ty.unwrap(), members); - - // Ensure there is no padding. - if ty_size(ty) == ty_size(base_ty) * (members as usize) { - Some((base_ty, members)) - } else { - None - } - } - - let homog_agg = match ty.kind() { - Float => Some((ty, 1)), - Double => Some((ty, 1)), - Array => check_array(ty), - Struct => check_struct(ty), - _ => None - }; - - // Ensure we have at most eight uniquely addressable members - homog_agg.and_then(|(base_ty, members)| { - if members > 0 && members <= 8 { - Some((base_ty, members)) - } else { - None - } - }) -} - -fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType { - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - return ArgType::direct(ty, None, None, attr); - } - - // The PowerPC64 big endian ABI doesn't return aggregates in registers - if ccx.sess().target.target.arch == "powerpc64" { - return ArgType::indirect(ty, Some(Attribute::StructRet)) - } - - if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ty) { - let llty = Type::array(&base_ty, members); - return ArgType::direct(ty, Some(llty), None, None); - } - let size = ty_size(ty); - if size <= 16 { - let llty = if size <= 1 { - Type::i8(ccx) - } else if size <= 2 { - Type::i16(ccx) - } else if size <= 4 { - Type::i32(ccx) - } else if size <= 8 { - Type::i64(ccx) - } else { - Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) - }; - return ArgType::direct(ty, Some(llty), None, None); - } - - ArgType::indirect(ty, Some(Attribute::StructRet)) -} - -fn classify_arg_ty(ccx: &CrateContext, ty: Type) -> ArgType { - if is_reg_ty(ty) { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - return ArgType::direct(ty, None, None, attr); - } - if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ty) { - let llty = Type::array(&base_ty, members); - return ArgType::direct(ty, Some(llty), None, None); - } - - ArgType::direct( - ty, - Some(struct_ty(ccx, ty)), - None, - None - ) -} - -fn is_reg_ty(ty: Type) -> bool { - match ty.kind() { - Integer - | Pointer - | Float - | Double => true, - _ => false - } -} - -fn coerce_to_long(ccx: &CrateContext, size: usize) -> Vec { - let long_ty = Type::i64(ccx); - let mut args = Vec::new(); - - let mut n = size / 64; - while n > 0 { - args.push(long_ty); - n -= 1; - } - - let r = size % 64; - if r > 0 { - args.push(Type::ix(ccx, r as u64)); - } - - args -} - -fn struct_ty(ccx: &CrateContext, ty: Type) -> Type { - let size = ty_size(ty) * 8; - Type::struct_(ccx, &coerce_to_long(ccx, size), false) -} - -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool) -> FnType { - let ret_ty = if ret_def { - classify_ret_ty(ccx, rty) - } else { - ArgType::direct(Type::void(ccx), None, None, None) - }; - - let mut arg_tys = Vec::new(); - for &aty in atys { - let ty = classify_arg_ty(ccx, aty); - arg_tys.push(ty); - }; - - return FnType { - arg_tys: arg_tys, - ret_ty: ret_ty, - }; -} diff --git a/src/librustc_trans/trans/cabi_x86.rs b/src/librustc_trans/trans/cabi_x86.rs deleted file mode 100644 index 50a3095dea169..0000000000000 --- a/src/librustc_trans/trans/cabi_x86.rs +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use self::Strategy::*; -use llvm::*; -use trans::cabi::{ArgType, FnType}; -use trans::type_::Type; -use super::common::*; -use super::machine::*; - -enum Strategy { RetValue(Type), RetPointer } -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool) -> FnType { - let mut arg_tys = Vec::new(); - - let ret_ty; - if !ret_def { - ret_ty = ArgType::direct(Type::void(ccx), None, None, None); - } else if rty.kind() == Struct { - // Returning a structure. Most often, this will use - // a hidden first argument. On some platforms, though, - // small structs are returned as integers. - // - // Some links: - // http://www.angelcode.com/dev/callconv/callconv.html - // Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp - - let t = &ccx.sess().target.target; - let strategy = if t.options.is_like_osx || t.options.is_like_windows { - match llsize_of_alloc(ccx, rty) { - 1 => RetValue(Type::i8(ccx)), - 2 => RetValue(Type::i16(ccx)), - 4 => RetValue(Type::i32(ccx)), - 8 => RetValue(Type::i64(ccx)), - _ => RetPointer - } - } else { - RetPointer - }; - - match strategy { - RetValue(t) => { - ret_ty = ArgType::direct(rty, Some(t), None, None); - } - RetPointer => { - ret_ty = ArgType::indirect(rty, Some(Attribute::StructRet)); - } - } - } else { - let attr = if rty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ret_ty = ArgType::direct(rty, None, None, attr); - } - - for &t in atys { - let ty = match t.kind() { - Struct => { - let size = llsize_of_alloc(ccx, t); - if size == 0 { - ArgType::ignore(t) - } else { - ArgType::indirect(t, Some(Attribute::ByVal)) - } - } - _ => { - let attr = if t == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ArgType::direct(t, None, None, attr) - } - }; - arg_tys.push(ty); - } - - return FnType { - arg_tys: arg_tys, - ret_ty: ret_ty, - }; -} diff --git a/src/librustc_trans/trans/cabi_x86_64.rs b/src/librustc_trans/trans/cabi_x86_64.rs deleted file mode 100644 index 00d8fdad32de1..0000000000000 --- a/src/librustc_trans/trans/cabi_x86_64.rs +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// The classification code for the x86_64 ABI is taken from the clay language -// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp - -#![allow(non_upper_case_globals)] -use self::RegClass::*; - -use llvm::{Integer, Pointer, Float, Double}; -use llvm::{Struct, Array, Attribute, Vector}; -use trans::cabi::{ArgType, FnType}; -use trans::context::CrateContext; -use trans::type_::Type; - -use std::cmp; - -#[derive(Clone, Copy, PartialEq)] -enum RegClass { - NoClass, - Int, - SSEFs, - SSEFv, - SSEDs, - SSEDv, - SSEInt(/* bitwidth */ u64), - /// Data that can appear in the upper half of an SSE register. - SSEUp, - X87, - X87Up, - ComplexX87, - Memory -} - -trait TypeMethods { - fn is_reg_ty(&self) -> bool; -} - -impl TypeMethods for Type { - fn is_reg_ty(&self) -> bool { - match self.kind() { - Integer | Pointer | Float | Double => true, - _ => false - } - } -} - -impl RegClass { - fn is_sse(&self) -> bool { - match *self { - SSEFs | SSEFv | SSEDs | SSEDv | SSEInt(_) => true, - _ => false - } - } -} - -trait ClassList { - fn is_pass_byval(&self) -> bool; - fn is_ret_bysret(&self) -> bool; -} - -impl ClassList for [RegClass] { - fn is_pass_byval(&self) -> bool { - if self.is_empty() { return false; } - - let class = self[0]; - class == Memory - || class == X87 - || class == ComplexX87 - } - - fn is_ret_bysret(&self) -> bool { - if self.is_empty() { return false; } - - self[0] == Memory - } -} - -fn classify_ty(ty: Type) -> Vec { - fn align(off: usize, ty: Type) -> usize { - let a = ty_align(ty); - return (off + a - 1) / a * a; - } - - fn ty_align(ty: Type) -> usize { - match ty.kind() { - Integer => ((ty.int_width() as usize) + 7) / 8, - Pointer => 8, - Float => 4, - Double => 8, - Struct => { - if ty.is_packed() { - 1 - } else { - let str_tys = ty.field_types(); - str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t))) - } - } - Array => { - let elt = ty.element_type(); - ty_align(elt) - } - Vector => { - let len = ty.vector_length(); - let elt = ty.element_type(); - ty_align(elt) * len - } - _ => panic!("ty_align: unhandled type") - } - } - - fn ty_size(ty: Type) -> usize { - match ty.kind() { - Integer => (ty.int_width() as usize + 7) / 8, - Pointer => 8, - Float => 4, - Double => 8, - Struct => { - let str_tys = ty.field_types(); - if ty.is_packed() { - str_tys.iter().fold(0, |s, t| s + ty_size(*t)) - } else { - let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t)); - align(size, ty) - } - } - Array => { - let len = ty.array_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt); - len * eltsz - } - Vector => { - let len = ty.vector_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt); - len * eltsz - } - - _ => panic!("ty_size: unhandled type") - } - } - - fn all_mem(cls: &mut [RegClass]) { - for elt in cls { - *elt = Memory; - } - } - - fn unify(cls: &mut [RegClass], - i: usize, - newv: RegClass) { - if cls[i] == newv { return } - - let to_write = match (cls[i], newv) { - (NoClass, _) => newv, - (_, NoClass) => return, - - (Memory, _) | - (_, Memory) => Memory, - - (Int, _) | - (_, Int) => Int, - - (X87, _) | - (X87Up, _) | - (ComplexX87, _) | - (_, X87) | - (_, X87Up) | - (_, ComplexX87) => Memory, - - (SSEFv, SSEUp) | - (SSEFs, SSEUp) | - (SSEDv, SSEUp) | - (SSEDs, SSEUp) | - (SSEInt(_), SSEUp) => return, - - (_, _) => newv - }; - cls[i] = to_write; - } - - fn classify_struct(tys: &[Type], - cls: &mut [RegClass], - i: usize, - off: usize, - packed: bool) { - let mut field_off = off; - for ty in tys { - if !packed { - field_off = align(field_off, *ty); - } - classify(*ty, cls, i, field_off); - field_off += ty_size(*ty); - } - } - - fn classify(ty: Type, - cls: &mut [RegClass], ix: usize, - off: usize) { - let t_align = ty_align(ty); - let t_size = ty_size(ty); - - let misalign = off % t_align; - if misalign != 0 { - let mut i = off / 8; - let e = (off + t_size + 7) / 8; - while i < e { - unify(cls, ix + i, Memory); - i += 1; - } - return; - } - - match ty.kind() { - Integer | - Pointer => { - unify(cls, ix + off / 8, Int); - } - Float => { - if off % 8 == 4 { - unify(cls, ix + off / 8, SSEFv); - } else { - unify(cls, ix + off / 8, SSEFs); - } - } - Double => { - unify(cls, ix + off / 8, SSEDs); - } - Struct => { - classify_struct(&ty.field_types(), cls, ix, off, ty.is_packed()); - } - Array => { - let len = ty.array_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt); - let mut i = 0; - while i < len { - classify(elt, cls, ix, off + i * eltsz); - i += 1; - } - } - Vector => { - let len = ty.vector_length(); - let elt = ty.element_type(); - let eltsz = ty_size(elt); - let mut reg = match elt.kind() { - Integer => SSEInt(elt.int_width()), - Float => SSEFv, - Double => SSEDv, - _ => panic!("classify: unhandled vector element type") - }; - - let mut i = 0; - while i < len { - unify(cls, ix + (off + i * eltsz) / 8, reg); - - // everything after the first one is the upper - // half of a register. - reg = SSEUp; - i += 1; - } - } - _ => panic!("classify: unhandled type") - } - } - - fn fixup(ty: Type, cls: &mut [RegClass]) { - let mut i = 0; - let ty_kind = ty.kind(); - let e = cls.len(); - if cls.len() > 2 && (ty_kind == Struct || ty_kind == Array || ty_kind == Vector) { - if cls[i].is_sse() { - i += 1; - while i < e { - if cls[i] != SSEUp { - all_mem(cls); - return; - } - i += 1; - } - } else { - all_mem(cls); - return - } - } else { - while i < e { - if cls[i] == Memory { - all_mem(cls); - return; - } - if cls[i] == X87Up { - // for darwin - // cls[i] = SSEDs; - all_mem(cls); - return; - } - if cls[i] == SSEUp { - cls[i] = SSEDv; - } else if cls[i].is_sse() { - i += 1; - while i != e && cls[i] == SSEUp { i += 1; } - } else if cls[i] == X87 { - i += 1; - while i != e && cls[i] == X87Up { i += 1; } - } else { - i += 1; - } - } - } - } - - let words = (ty_size(ty) + 7) / 8; - let mut cls = vec![NoClass; words]; - if words > 4 { - all_mem(&mut cls); - return cls; - } - classify(ty, &mut cls, 0, 0); - fixup(ty, &mut cls); - return cls; -} - -fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type { - fn llvec_len(cls: &[RegClass]) -> usize { - let mut len = 1; - for c in cls { - if *c != SSEUp { - break; - } - len += 1; - } - return len; - } - - let mut tys = Vec::new(); - let mut i = 0; - let e = cls.len(); - while i < e { - match cls[i] { - Int => { - tys.push(Type::i64(ccx)); - } - SSEFv | SSEDv | SSEInt(_) => { - let (elts_per_word, elt_ty) = match cls[i] { - SSEFv => (2, Type::f32(ccx)), - SSEDv => (1, Type::f64(ccx)), - SSEInt(bits) => { - assert!(bits == 8 || bits == 16 || bits == 32 || bits == 64, - "llreg_ty: unsupported SSEInt width {}", bits); - (64 / bits, Type::ix(ccx, bits)) - } - _ => unreachable!(), - }; - let vec_len = llvec_len(&cls[i + 1..]); - let vec_ty = Type::vector(&elt_ty, vec_len as u64 * elts_per_word); - tys.push(vec_ty); - i += vec_len; - continue; - } - SSEFs => { - tys.push(Type::f32(ccx)); - } - SSEDs => { - tys.push(Type::f64(ccx)); - } - _ => panic!("llregtype: unhandled class") - } - i += 1; - } - if tys.len() == 1 && tys[0].kind() == Vector { - // if the type contains only a vector, pass it as that vector. - tys[0] - } else { - Type::struct_(ccx, &tys, false) - } -} - -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool) -> FnType { - fn x86_64_ty(ccx: &CrateContext, - ty: Type, - is_mem_cls: F, - ind_attr: Attribute) - -> ArgType where - F: FnOnce(&[RegClass]) -> bool, - { - if !ty.is_reg_ty() { - let cls = classify_ty(ty); - if is_mem_cls(&cls) { - ArgType::indirect(ty, Some(ind_attr)) - } else { - ArgType::direct(ty, - Some(llreg_ty(ccx, &cls)), - None, - None) - } - } else { - let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ArgType::direct(ty, None, None, attr) - } - } - - let mut int_regs = 6; // RDI, RSI, RDX, RCX, R8, R9 - let mut sse_regs = 8; // XMM0-7 - - let ret_ty = if ret_def { - x86_64_ty(ccx, rty, |cls| { - if cls.is_ret_bysret() { - // `sret` parameter thus one less register available - int_regs -= 1; - true - } else { - false - } - }, Attribute::StructRet) - } else { - ArgType::direct(Type::void(ccx), None, None, None) - }; - - let mut arg_tys = Vec::new(); - for t in atys { - let ty = x86_64_ty(ccx, *t, |cls| { - let needed_int = cls.iter().filter(|&&c| c == Int).count() as isize; - let needed_sse = cls.iter().filter(|c| c.is_sse()).count() as isize; - let in_mem = cls.is_pass_byval() || - int_regs < needed_int || - sse_regs < needed_sse; - if in_mem { - // `byval` parameter thus one less integer register available - int_regs -= 1; - } else { - // split into sized chunks passed individually - int_regs -= needed_int; - sse_regs -= needed_sse; - } - in_mem - }, Attribute::ByVal); - arg_tys.push(ty); - - // An integer, pointer, double or float parameter - // thus the above closure passed to `x86_64_ty` won't - // get called. - if t.kind() == Integer || t.kind() == Pointer { - int_regs -= 1; - } else if t.kind() == Double || t.kind() == Float { - sse_regs -= 1; - } - } - - return FnType { - arg_tys: arg_tys, - ret_ty: ret_ty, - }; -} diff --git a/src/librustc_trans/trans/cabi_x86_win64.rs b/src/librustc_trans/trans/cabi_x86_win64.rs deleted file mode 100644 index 120c8dc0384ce..0000000000000 --- a/src/librustc_trans/trans/cabi_x86_win64.rs +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm::*; -use super::common::*; -use super::machine::*; -use trans::cabi::{ArgType, FnType}; -use trans::type_::Type; - -// Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx - -pub fn compute_abi_info(ccx: &CrateContext, - atys: &[Type], - rty: Type, - ret_def: bool) -> FnType { - let mut arg_tys = Vec::new(); - - let ret_ty; - if !ret_def { - ret_ty = ArgType::direct(Type::void(ccx), None, None, None); - } else if rty.kind() == Struct { - ret_ty = match llsize_of_alloc(ccx, rty) { - 1 => ArgType::direct(rty, Some(Type::i8(ccx)), None, None), - 2 => ArgType::direct(rty, Some(Type::i16(ccx)), None, None), - 4 => ArgType::direct(rty, Some(Type::i32(ccx)), None, None), - 8 => ArgType::direct(rty, Some(Type::i64(ccx)), None, None), - _ => ArgType::indirect(rty, Some(Attribute::StructRet)) - }; - } else { - let attr = if rty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ret_ty = ArgType::direct(rty, None, None, attr); - } - - for &t in atys { - let ty = match t.kind() { - Struct => { - match llsize_of_alloc(ccx, t) { - 1 => ArgType::direct(t, Some(Type::i8(ccx)), None, None), - 2 => ArgType::direct(t, Some(Type::i16(ccx)), None, None), - 4 => ArgType::direct(t, Some(Type::i32(ccx)), None, None), - 8 => ArgType::direct(t, Some(Type::i64(ccx)), None, None), - _ => ArgType::indirect(t, None) - } - } - _ => { - let attr = if t == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; - ArgType::direct(t, None, None, attr) - } - }; - arg_tys.push(ty); - } - - return FnType { - arg_tys: arg_tys, - ret_ty: ret_ty, - }; -} diff --git a/src/librustc_trans/trans/callee.rs b/src/librustc_trans/trans/callee.rs deleted file mode 100644 index c7ec1c0955146..0000000000000 --- a/src/librustc_trans/trans/callee.rs +++ /dev/null @@ -1,1096 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Handles translation of callees as well as other call-related -//! things. Callees are a superset of normal rust values and sometimes -//! have different representations. In particular, top-level fn items -//! and methods are represented as just a fn ptr and not a full -//! closure. - -pub use self::AutorefArg::*; -pub use self::CalleeData::*; -pub use self::CallArgs::*; - -use arena::TypedArena; -use back::link; -use llvm::{self, ValueRef, get_params}; -use middle::cstore::LOCAL_CRATE; -use middle::def; -use middle::def_id::DefId; -use middle::infer; -use middle::subst; -use middle::subst::{Substs}; -use rustc::front::map as hir_map; -use trans::adt; -use trans::base; -use trans::base::*; -use trans::build::*; -use trans::callee; -use trans::cleanup; -use trans::cleanup::CleanupMethods; -use trans::common::{self, Block, Result, NodeIdAndSpan, ExprId, CrateContext, - ExprOrMethodCall, FunctionContext, MethodCallKey}; -use trans::consts; -use trans::datum::*; -use trans::debuginfo::{DebugLoc, ToDebugLoc}; -use trans::declare; -use trans::expr; -use trans::glue; -use trans::inline; -use trans::foreign; -use trans::intrinsic; -use trans::meth; -use trans::monomorphize; -use trans::type_::Type; -use trans::type_of; -use trans::Disr; -use middle::ty::{self, Ty, TypeFoldable}; -use middle::ty::MethodCall; -use rustc_front::hir; - -use syntax::abi as synabi; -use syntax::ast; -use syntax::errors; -use syntax::ptr::P; - -#[derive(Copy, Clone)] -pub struct MethodData { - pub llfn: ValueRef, - pub llself: ValueRef, -} - -pub enum CalleeData<'tcx> { - // Constructor for enum variant/tuple-like-struct - // i.e. Some, Ok - NamedTupleConstructor(Disr), - - // Represents a (possibly monomorphized) top-level fn item or method - // item. Note that this is just the fn-ptr and is not a Rust closure - // value (which is a pair). - Fn(/* llfn */ ValueRef), - - Intrinsic(ast::NodeId, subst::Substs<'tcx>), - - TraitItem(MethodData) -} - -pub struct Callee<'blk, 'tcx: 'blk> { - pub bcx: Block<'blk, 'tcx>, - pub data: CalleeData<'tcx>, - pub ty: Ty<'tcx> -} - -fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &hir::Expr) - -> Callee<'blk, 'tcx> { - let _icx = push_ctxt("trans_callee"); - debug!("callee::trans(expr={:?})", expr); - - // pick out special kinds of expressions that can be called: - match expr.node { - hir::ExprPath(..) => { - return trans_def(bcx, bcx.def(expr.id), expr); - } - _ => {} - } - - // any other expressions are closures: - return datum_callee(bcx, expr); - - fn datum_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, expr: &hir::Expr) - -> Callee<'blk, 'tcx> { - let DatumBlock { bcx, datum, .. } = expr::trans(bcx, expr); - match datum.ty.sty { - ty::TyBareFn(..) => { - Callee { - bcx: bcx, - ty: datum.ty, - data: Fn(datum.to_llscalarish(bcx)) - } - } - _ => { - bcx.tcx().sess.span_bug( - expr.span, - &format!("type of callee is neither bare-fn nor closure: {}", - datum.ty)); - } - } - } - - fn fn_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, datum: Datum<'tcx, Rvalue>) - -> Callee<'blk, 'tcx> { - Callee { - bcx: bcx, - data: Fn(datum.val), - ty: datum.ty - } - } - - fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - def: def::Def, - ref_expr: &hir::Expr) - -> Callee<'blk, 'tcx> { - debug!("trans_def(def={:?}, ref_expr={:?})", def, ref_expr); - let expr_ty = common::node_id_type(bcx, ref_expr.id); - match def { - def::DefFn(did, _) if { - let maybe_def_id = inline::get_local_instance(bcx.ccx(), did); - let maybe_ast_node = maybe_def_id.and_then(|def_id| { - let node_id = bcx.tcx().map.as_local_node_id(def_id).unwrap(); - bcx.tcx().map.find(node_id) - }); - match maybe_ast_node { - Some(hir_map::NodeStructCtor(_)) => true, - _ => false - } - } => { - Callee { - bcx: bcx, - data: NamedTupleConstructor(Disr(0)), - ty: expr_ty - } - } - def::DefFn(did, _) if match expr_ty.sty { - ty::TyBareFn(_, ref f) => f.abi == synabi::RustIntrinsic || - f.abi == synabi::PlatformIntrinsic, - _ => false - } => { - let substs = common::node_id_substs(bcx.ccx(), - ExprId(ref_expr.id), - bcx.fcx.param_substs); - let def_id = inline::maybe_instantiate_inline(bcx.ccx(), did); - let node_id = bcx.tcx().map.as_local_node_id(def_id).unwrap(); - Callee { bcx: bcx, data: Intrinsic(node_id, substs), ty: expr_ty } - } - def::DefFn(did, _) => { - fn_callee(bcx, trans_fn_ref(bcx.ccx(), did, ExprId(ref_expr.id), - bcx.fcx.param_substs)) - } - def::DefMethod(meth_did) => { - let method_item = bcx.tcx().impl_or_trait_item(meth_did); - let fn_datum = match method_item.container() { - ty::ImplContainer(_) => { - trans_fn_ref(bcx.ccx(), meth_did, - ExprId(ref_expr.id), - bcx.fcx.param_substs) - } - ty::TraitContainer(trait_did) => { - meth::trans_static_method_callee(bcx.ccx(), - meth_did, - trait_did, - ref_expr.id, - bcx.fcx.param_substs) - } - }; - fn_callee(bcx, fn_datum) - } - def::DefVariant(tid, vid, _) => { - let vinfo = bcx.tcx().lookup_adt_def(tid).variant_with_id(vid); - assert_eq!(vinfo.kind(), ty::VariantKind::Tuple); - - Callee { - bcx: bcx, - data: NamedTupleConstructor(Disr::from(vinfo.disr_val)), - ty: expr_ty - } - } - def::DefStruct(_) => { - Callee { - bcx: bcx, - data: NamedTupleConstructor(Disr(0)), - ty: expr_ty - } - } - def::DefStatic(..) | - def::DefConst(..) | - def::DefAssociatedConst(..) | - def::DefLocal(..) | - def::DefUpvar(..) => { - datum_callee(bcx, ref_expr) - } - def::DefMod(..) | def::DefForeignMod(..) | def::DefTrait(..) | - def::DefTy(..) | def::DefPrimTy(..) | def::DefAssociatedTy(..) | - def::DefLabel(..) | def::DefTyParam(..) | - def::DefSelfTy(..) | def::DefErr => { - bcx.tcx().sess.span_bug( - ref_expr.span, - &format!("cannot translate def {:?} \ - to a callable thing!", def)); - } - } - } -} - -/// Translates a reference (with id `ref_id`) to the fn/method with id `def_id` into a function -/// pointer. This may require monomorphization or inlining. -pub fn trans_fn_ref<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - def_id: DefId, - node: ExprOrMethodCall, - param_substs: &'tcx subst::Substs<'tcx>) - -> Datum<'tcx, Rvalue> { - let _icx = push_ctxt("trans_fn_ref"); - - let substs = common::node_id_substs(ccx, node, param_substs); - debug!("trans_fn_ref(def_id={:?}, node={:?}, substs={:?})", - def_id, - node, - substs); - trans_fn_ref_with_substs(ccx, def_id, node, param_substs, substs) -} - -/// Translates an adapter that implements the `Fn` trait for a fn -/// pointer. This is basically the equivalent of something like: -/// -/// ``` -/// impl<'a> Fn(&'a int) -> &'a int for fn(&int) -> &int { -/// extern "rust-abi" fn call(&self, args: (&'a int,)) -> &'a int { -/// (*self)(args.0) -/// } -/// } -/// ``` -/// -/// but for the bare function type given. -pub fn trans_fn_pointer_shim<'a, 'tcx>( - ccx: &'a CrateContext<'a, 'tcx>, - closure_kind: ty::ClosureKind, - bare_fn_ty: Ty<'tcx>) - -> ValueRef -{ - let _icx = push_ctxt("trans_fn_pointer_shim"); - let tcx = ccx.tcx(); - - // Normalize the type for better caching. - let bare_fn_ty = tcx.erase_regions(&bare_fn_ty); - - // If this is an impl of `Fn` or `FnMut` trait, the receiver is `&self`. - let is_by_ref = match closure_kind { - ty::FnClosureKind | ty::FnMutClosureKind => true, - ty::FnOnceClosureKind => false, - }; - let bare_fn_ty_maybe_ref = if is_by_ref { - tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), bare_fn_ty) - } else { - bare_fn_ty - }; - - // Check if we already trans'd this shim. - match ccx.fn_pointer_shims().borrow().get(&bare_fn_ty_maybe_ref) { - Some(&llval) => { return llval; } - None => { } - } - - debug!("trans_fn_pointer_shim(bare_fn_ty={:?})", - bare_fn_ty); - - // Construct the "tuply" version of `bare_fn_ty`. It takes two arguments: `self`, - // which is the fn pointer, and `args`, which is the arguments tuple. - let (opt_def_id, sig) = - match bare_fn_ty.sty { - ty::TyBareFn(opt_def_id, - &ty::BareFnTy { unsafety: hir::Unsafety::Normal, - abi: synabi::Rust, - ref sig }) => { - (opt_def_id, sig) - } - - _ => { - tcx.sess.bug(&format!("trans_fn_pointer_shim invoked on invalid type: {}", - bare_fn_ty)); - } - }; - let sig = tcx.erase_late_bound_regions(sig); - let sig = infer::normalize_associated_type(ccx.tcx(), &sig); - let tuple_input_ty = tcx.mk_tup(sig.inputs.to_vec()); - let tuple_fn_ty = tcx.mk_fn(opt_def_id, - tcx.mk_bare_fn(ty::BareFnTy { - unsafety: hir::Unsafety::Normal, - abi: synabi::RustCall, - sig: ty::Binder(ty::FnSig { - inputs: vec![bare_fn_ty_maybe_ref, - tuple_input_ty], - output: sig.output, - variadic: false - })})); - debug!("tuple_fn_ty: {:?}", tuple_fn_ty); - - // - let function_name = link::mangle_internal_name_by_type_and_seq(ccx, bare_fn_ty, - "fn_pointer_shim"); - let llfn = declare::declare_internal_rust_fn(ccx, &function_name[..], tuple_fn_ty); - - // - let empty_substs = tcx.mk_substs(Substs::trans_empty()); - let (block_arena, fcx): (TypedArena<_>, FunctionContext); - block_arena = TypedArena::new(); - fcx = new_fn_ctxt(ccx, - llfn, - ast::DUMMY_NODE_ID, - false, - sig.output, - empty_substs, - None, - &block_arena); - let mut bcx = init_function(&fcx, false, sig.output); - - let llargs = get_params(fcx.llfn); - - let self_idx = fcx.arg_offset(); - // the first argument (`self`) will be ptr to the fn pointer - let llfnpointer = if is_by_ref { - Load(bcx, llargs[self_idx]) - } else { - llargs[self_idx] - }; - - assert!(!fcx.needs_ret_allocas); - - let dest = fcx.llretslotptr.get().map(|_| - expr::SaveIn(fcx.get_ret_slot(bcx, sig.output, "ret_slot")) - ); - - bcx = trans_call_inner(bcx, DebugLoc::None, |bcx, _| { - Callee { - bcx: bcx, - data: Fn(llfnpointer), - ty: bare_fn_ty - } - }, ArgVals(&llargs[(self_idx + 1)..]), dest).bcx; - - finish_fn(&fcx, bcx, sig.output, DebugLoc::None); - - ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn); - - llfn -} - -/// Translates a reference to a fn/method item, monomorphizing and -/// inlining as it goes. -/// -/// # Parameters -/// -/// - `ccx`: the crate context -/// - `def_id`: def id of the fn or method item being referenced -/// - `node`: node id of the reference to the fn/method, if applicable. -/// This parameter may be zero; but, if so, the resulting value may not -/// have the right type, so it must be cast before being used. -/// - `param_substs`: if the `node` is in a polymorphic function, these -/// are the substitutions required to monomorphize its type -/// - `substs`: values for each of the fn/method's parameters -pub fn trans_fn_ref_with_substs<'a, 'tcx>( - ccx: &CrateContext<'a, 'tcx>, - def_id: DefId, - node: ExprOrMethodCall, - param_substs: &'tcx subst::Substs<'tcx>, - substs: subst::Substs<'tcx>) - -> Datum<'tcx, Rvalue> -{ - let _icx = push_ctxt("trans_fn_ref_with_substs"); - let tcx = ccx.tcx(); - - debug!("trans_fn_ref_with_substs(def_id={:?}, node={:?}, \ - param_substs={:?}, substs={:?})", - def_id, - node, - param_substs, - substs); - - assert!(!substs.types.needs_infer()); - assert!(!substs.types.has_escaping_regions()); - let substs = substs.erase_regions(); - - // Check whether this fn has an inlined copy and, if so, redirect - // def_id to the local id of the inlined copy. - let def_id = inline::maybe_instantiate_inline(ccx, def_id); - - fn is_named_tuple_constructor(tcx: &ty::ctxt, def_id: DefId) -> bool { - let node_id = match tcx.map.as_local_node_id(def_id) { - Some(n) => n, - None => { return false; } - }; - let map_node = errors::expect( - &tcx.sess.diagnostic(), - tcx.map.find(node_id), - || "local item should be in ast map".to_string()); - - match map_node { - hir_map::NodeVariant(v) => { - v.node.data.is_tuple() - } - hir_map::NodeStructCtor(_) => true, - _ => false - } - } - let must_monomorphise = - !substs.types.is_empty() || is_named_tuple_constructor(tcx, def_id); - - debug!("trans_fn_ref_with_substs({:?}) must_monomorphise: {}", - def_id, must_monomorphise); - - // Create a monomorphic version of generic functions - if must_monomorphise { - // Should be either intra-crate or inlined. - assert_eq!(def_id.krate, LOCAL_CRATE); - - let opt_ref_id = match node { - ExprId(id) => if id != 0 { Some(id) } else { None }, - MethodCallKey(_) => None, - }; - - let substs = tcx.mk_substs(substs); - let (val, fn_ty, must_cast) = - monomorphize::monomorphic_fn(ccx, def_id, substs, opt_ref_id); - if must_cast && node != ExprId(0) { - // Monotype of the REFERENCE to the function (type params - // are subst'd) - let ref_ty = match node { - ExprId(id) => tcx.node_id_to_type(id), - MethodCallKey(method_call) => { - tcx.tables.borrow().method_map[&method_call].ty - } - }; - let ref_ty = monomorphize::apply_param_substs(tcx, - param_substs, - &ref_ty); - let llptrty = type_of::type_of_fn_from_ty(ccx, ref_ty).ptr_to(); - if llptrty != common::val_ty(val) { - let val = consts::ptrcast(val, llptrty); - return Datum::new(val, ref_ty, Rvalue::new(ByValue)); - } - } - return Datum::new(val, fn_ty, Rvalue::new(ByValue)); - } - - // Type scheme of the function item (may have type params) - let fn_type_scheme = tcx.lookup_item_type(def_id); - let fn_type = infer::normalize_associated_type(tcx, &fn_type_scheme.ty); - - // Find the actual function pointer. - let mut val = { - if let Some(node_id) = ccx.tcx().map.as_local_node_id(def_id) { - // Internal reference. - get_item_val(ccx, node_id) - } else { - // External reference. - trans_external_path(ccx, def_id, fn_type) - } - }; - - // This is subtle and surprising, but sometimes we have to bitcast - // the resulting fn pointer. The reason has to do with external - // functions. If you have two crates that both bind the same C - // library, they may not use precisely the same types: for - // example, they will probably each declare their own structs, - // which are distinct types from LLVM's point of view (nominal - // types). - // - // Now, if those two crates are linked into an application, and - // they contain inlined code, you can wind up with a situation - // where both of those functions wind up being loaded into this - // application simultaneously. In that case, the same function - // (from LLVM's point of view) requires two types. But of course - // LLVM won't allow one function to have two types. - // - // What we currently do, therefore, is declare the function with - // one of the two types (whichever happens to come first) and then - // bitcast as needed when the function is referenced to make sure - // it has the type we expect. - // - // This can occur on either a crate-local or crate-external - // reference. It also occurs when testing libcore and in some - // other weird situations. Annoying. - let llty = type_of::type_of_fn_from_ty(ccx, fn_type); - let llptrty = llty.ptr_to(); - if common::val_ty(val) != llptrty { - debug!("trans_fn_ref_with_substs(): casting pointer!"); - val = consts::ptrcast(val, llptrty); - } else { - debug!("trans_fn_ref_with_substs(): not casting pointer!"); - } - - Datum::new(val, fn_type, Rvalue::new(ByValue)) -} - -// ______________________________________________________________________ -// Translating calls - -pub fn trans_call<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - call_expr: &hir::Expr, - f: &hir::Expr, - args: CallArgs<'a, 'tcx>, - dest: expr::Dest) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_call"); - trans_call_inner(bcx, - call_expr.debug_loc(), - |bcx, _| trans(bcx, f), - args, - Some(dest)).bcx -} - -pub fn trans_method_call<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - call_expr: &hir::Expr, - rcvr: &hir::Expr, - args: CallArgs<'a, 'tcx>, - dest: expr::Dest) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_method_call"); - debug!("trans_method_call(call_expr={:?})", call_expr); - let method_call = MethodCall::expr(call_expr.id); - trans_call_inner( - bcx, - call_expr.debug_loc(), - |cx, arg_cleanup_scope| { - meth::trans_method_callee(cx, method_call, Some(rcvr), arg_cleanup_scope) - }, - args, - Some(dest)).bcx -} - -pub fn trans_lang_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - did: DefId, - args: &[ValueRef], - dest: Option, - debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { - callee::trans_call_inner(bcx, debug_loc, |bcx, _| { - let datum = trans_fn_ref_with_substs(bcx.ccx(), - did, - ExprId(0), - bcx.fcx.param_substs, - subst::Substs::trans_empty()); - Callee { - bcx: bcx, - data: Fn(datum.val), - ty: datum.ty - } - }, ArgVals(args), dest) -} - -/// This behemoth of a function translates function calls. Unfortunately, in -/// order to generate more efficient LLVM output at -O0, it has quite a complex -/// signature (refactoring this into two functions seems like a good idea). -/// -/// In particular, for lang items, it is invoked with a dest of None, and in -/// that case the return value contains the result of the fn. The lang item must -/// not return a structural type or else all heck breaks loose. -/// -/// For non-lang items, `dest` is always Some, and hence the result is written -/// into memory somewhere. Nonetheless we return the actual return value of the -/// function. -pub fn trans_call_inner<'a, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc, - get_callee: F, - args: CallArgs<'a, 'tcx>, - dest: Option) - -> Result<'blk, 'tcx> where - F: FnOnce(Block<'blk, 'tcx>, cleanup::ScopeId) -> Callee<'blk, 'tcx>, -{ - // Introduce a temporary cleanup scope that will contain cleanups - // for the arguments while they are being evaluated. The purpose - // this cleanup is to ensure that, should a panic occur while - // evaluating argument N, the values for arguments 0...N-1 are all - // cleaned up. If no panic occurs, the values are handed off to - // the callee, and hence none of the cleanups in this temporary - // scope will ever execute. - let fcx = bcx.fcx; - let ccx = fcx.ccx; - let arg_cleanup_scope = fcx.push_custom_cleanup_scope(); - - let callee = get_callee(bcx, cleanup::CustomScope(arg_cleanup_scope)); - let mut bcx = callee.bcx; - - let (abi, ret_ty) = match callee.ty.sty { - ty::TyBareFn(_, ref f) => { - let sig = bcx.tcx().erase_late_bound_regions(&f.sig); - let sig = infer::normalize_associated_type(bcx.tcx(), &sig); - (f.abi, sig.output) - } - _ => panic!("expected bare rust fn or closure in trans_call_inner") - }; - - let (llfn, llself) = match callee.data { - Fn(llfn) => { - (llfn, None) - } - TraitItem(d) => { - (d.llfn, Some(d.llself)) - } - Intrinsic(node, substs) => { - assert!(abi == synabi::RustIntrinsic || abi == synabi::PlatformIntrinsic); - assert!(dest.is_some()); - - let call_info = match debug_loc { - DebugLoc::At(id, span) => NodeIdAndSpan { id: id, span: span }, - DebugLoc::None => { - bcx.sess().bug("No call info for intrinsic call?") - } - }; - - return intrinsic::trans_intrinsic_call(bcx, node, callee.ty, - arg_cleanup_scope, args, - dest.unwrap(), substs, - call_info); - } - NamedTupleConstructor(disr) => { - assert!(dest.is_some()); - fcx.pop_custom_cleanup_scope(arg_cleanup_scope); - - return base::trans_named_tuple_constructor(bcx, - callee.ty, - disr, - args, - dest.unwrap(), - debug_loc); - } - }; - - // Intrinsics should not become actual functions. - // We trans them in place in `trans_intrinsic_call` - assert!(abi != synabi::RustIntrinsic && abi != synabi::PlatformIntrinsic); - - let is_rust_fn = abi == synabi::Rust || abi == synabi::RustCall; - - // Generate a location to store the result. If the user does - // not care about the result, just make a stack slot. - let opt_llretslot = dest.and_then(|dest| match dest { - expr::SaveIn(dst) => Some(dst), - expr::Ignore => { - let ret_ty = match ret_ty { - ty::FnConverging(ret_ty) => ret_ty, - ty::FnDiverging => ccx.tcx().mk_nil() - }; - if !is_rust_fn || - type_of::return_uses_outptr(ccx, ret_ty) || - bcx.fcx.type_needs_drop(ret_ty) { - // Push the out-pointer if we use an out-pointer for this - // return type, otherwise push "undef". - if common::type_is_zero_size(ccx, ret_ty) { - let llty = type_of::type_of(ccx, ret_ty); - Some(common::C_undef(llty.ptr_to())) - } else { - let llresult = alloc_ty(bcx, ret_ty, "__llret"); - call_lifetime_start(bcx, llresult); - Some(llresult) - } - } else { - None - } - } - }); - - let mut llresult = unsafe { - llvm::LLVMGetUndef(Type::nil(ccx).ptr_to().to_ref()) - }; - - // The code below invokes the function, using either the Rust - // conventions (if it is a rust fn) or the native conventions - // (otherwise). The important part is that, when all is said - // and done, either the return value of the function will have been - // written in opt_llretslot (if it is Some) or `llresult` will be - // set appropriately (otherwise). - if is_rust_fn { - let mut llargs = Vec::new(); - - if let (ty::FnConverging(ret_ty), Some(mut llretslot)) = (ret_ty, opt_llretslot) { - if type_of::return_uses_outptr(ccx, ret_ty) { - let llformal_ret_ty = type_of::type_of(ccx, ret_ty).ptr_to(); - let llret_ty = common::val_ty(llretslot); - if llformal_ret_ty != llret_ty { - // this could happen due to e.g. subtyping - debug!("casting actual return type ({}) to match formal ({})", - bcx.llty_str(llret_ty), bcx.llty_str(llformal_ret_ty)); - llretslot = PointerCast(bcx, llretslot, llformal_ret_ty); - } - llargs.push(llretslot); - } - } - - // Push a trait object's self. - if let Some(llself) = llself { - llargs.push(llself); - } - - // Push the arguments. - bcx = trans_args(bcx, - args, - callee.ty, - &mut llargs, - cleanup::CustomScope(arg_cleanup_scope), - llself.is_some(), - abi); - - fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); - - // Invoke the actual rust fn and update bcx/llresult. - let (llret, b) = base::invoke(bcx, - llfn, - &llargs[..], - callee.ty, - debug_loc); - bcx = b; - llresult = llret; - - // If the Rust convention for this type is return via - // the return value, copy it into llretslot. - match (opt_llretslot, ret_ty) { - (Some(llretslot), ty::FnConverging(ret_ty)) => { - if !type_of::return_uses_outptr(bcx.ccx(), ret_ty) && - !common::type_is_zero_size(bcx.ccx(), ret_ty) - { - store_ty(bcx, llret, llretslot, ret_ty) - } - } - (_, _) => {} - } - } else { - // Lang items are the only case where dest is None, and - // they are always Rust fns. - assert!(dest.is_some()); - - let mut llargs = Vec::new(); - let arg_tys = match args { - ArgExprs(a) => a.iter().map(|x| common::expr_ty_adjusted(bcx, &**x)).collect(), - _ => panic!("expected arg exprs.") - }; - bcx = trans_args(bcx, - args, - callee.ty, - &mut llargs, - cleanup::CustomScope(arg_cleanup_scope), - false, - abi); - fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); - - bcx = foreign::trans_native_call(bcx, - callee.ty, - llfn, - opt_llretslot.unwrap(), - &llargs[..], - arg_tys, - debug_loc); - } - - fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_cleanup_scope); - - // If the caller doesn't care about the result of this fn call, - // drop the temporary slot we made. - match (dest, opt_llretslot, ret_ty) { - (Some(expr::Ignore), Some(llretslot), ty::FnConverging(ret_ty)) => { - // drop the value if it is not being saved. - bcx = glue::drop_ty(bcx, - llretslot, - ret_ty, - debug_loc); - call_lifetime_end(bcx, llretslot); - } - _ => {} - } - - if ret_ty == ty::FnDiverging { - Unreachable(bcx); - } - - Result::new(bcx, llresult) -} - -pub enum CallArgs<'a, 'tcx> { - // Supply value of arguments as a list of expressions that must be - // translated. This is used in the common case of `foo(bar, qux)`. - ArgExprs(&'a [P]), - - // Supply value of arguments as a list of LLVM value refs; frequently - // used with lang items and so forth, when the argument is an internal - // value. - ArgVals(&'a [ValueRef]), - - // For overloaded operators: `(lhs, Option(rhs, rhs_id), autoref)`. `lhs` - // is the left-hand-side and `rhs/rhs_id` is the datum/expr-id of - // the right-hand-side argument (if any). `autoref` indicates whether the `rhs` - // arguments should be auto-referenced - ArgOverloadedOp(Datum<'tcx, Expr>, Option<(Datum<'tcx, Expr>, ast::NodeId)>, bool), - - // Supply value of arguments as a list of expressions that must be - // translated, for overloaded call operators. - ArgOverloadedCall(Vec<&'a hir::Expr>), -} - -fn trans_args_under_call_abi<'blk, 'tcx>( - mut bcx: Block<'blk, 'tcx>, - arg_exprs: &[P], - fn_ty: Ty<'tcx>, - llargs: &mut Vec, - arg_cleanup_scope: cleanup::ScopeId, - ignore_self: bool) - -> Block<'blk, 'tcx> -{ - let sig = bcx.tcx().erase_late_bound_regions(&fn_ty.fn_sig()); - let sig = infer::normalize_associated_type(bcx.tcx(), &sig); - let args = sig.inputs; - - // Translate the `self` argument first. - if !ignore_self { - let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &*arg_exprs[0])); - bcx = trans_arg_datum(bcx, - args[0], - arg_datum, - arg_cleanup_scope, - DontAutorefArg, - llargs); - } - - // Now untuple the rest of the arguments. - let tuple_expr = &arg_exprs[1]; - let tuple_type = common::node_id_type(bcx, tuple_expr.id); - - match tuple_type.sty { - ty::TyTuple(ref field_types) => { - let tuple_datum = unpack_datum!(bcx, - expr::trans(bcx, &**tuple_expr)); - let tuple_lvalue_datum = - unpack_datum!(bcx, - tuple_datum.to_lvalue_datum(bcx, - "args", - tuple_expr.id)); - let repr = adt::represent_type(bcx.ccx(), tuple_type); - let repr_ptr = &*repr; - for (i, field_type) in field_types.iter().enumerate() { - let arg_datum = tuple_lvalue_datum.get_element( - bcx, - field_type, - |srcval| { - adt::trans_field_ptr(bcx, repr_ptr, srcval, Disr(0), i) - }).to_expr_datum(); - bcx = trans_arg_datum(bcx, - field_type, - arg_datum, - arg_cleanup_scope, - DontAutorefArg, - llargs); - } - } - _ => { - bcx.sess().span_bug(tuple_expr.span, - "argument to `.call()` wasn't a tuple?!") - } - }; - - bcx -} - -fn trans_overloaded_call_args<'blk, 'tcx>( - mut bcx: Block<'blk, 'tcx>, - arg_exprs: Vec<&hir::Expr>, - fn_ty: Ty<'tcx>, - llargs: &mut Vec, - arg_cleanup_scope: cleanup::ScopeId, - ignore_self: bool) - -> Block<'blk, 'tcx> { - // Translate the `self` argument first. - let sig = bcx.tcx().erase_late_bound_regions(&fn_ty.fn_sig()); - let sig = infer::normalize_associated_type(bcx.tcx(), &sig); - let arg_tys = sig.inputs; - - if !ignore_self { - let arg_datum = unpack_datum!(bcx, expr::trans(bcx, arg_exprs[0])); - bcx = trans_arg_datum(bcx, - arg_tys[0], - arg_datum, - arg_cleanup_scope, - DontAutorefArg, - llargs); - } - - // Now untuple the rest of the arguments. - let tuple_type = arg_tys[1]; - match tuple_type.sty { - ty::TyTuple(ref field_types) => { - for (i, &field_type) in field_types.iter().enumerate() { - let arg_datum = - unpack_datum!(bcx, expr::trans(bcx, arg_exprs[i + 1])); - bcx = trans_arg_datum(bcx, - field_type, - arg_datum, - arg_cleanup_scope, - DontAutorefArg, - llargs); - } - } - _ => { - bcx.sess().span_bug(arg_exprs[0].span, - "argument to `.call()` wasn't a tuple?!") - } - }; - - bcx -} - -pub fn trans_args<'a, 'blk, 'tcx>(cx: Block<'blk, 'tcx>, - args: CallArgs<'a, 'tcx>, - fn_ty: Ty<'tcx>, - llargs: &mut Vec, - arg_cleanup_scope: cleanup::ScopeId, - ignore_self: bool, - abi: synabi::Abi) - -> Block<'blk, 'tcx> { - debug!("trans_args(abi={})", abi); - - let _icx = push_ctxt("trans_args"); - let sig = cx.tcx().erase_late_bound_regions(&fn_ty.fn_sig()); - let sig = infer::normalize_associated_type(cx.tcx(), &sig); - let arg_tys = sig.inputs; - let variadic = sig.variadic; - - let mut bcx = cx; - - // First we figure out the caller's view of the types of the arguments. - // This will be needed if this is a generic call, because the callee has - // to cast her view of the arguments to the caller's view. - match args { - ArgExprs(arg_exprs) => { - if abi == synabi::RustCall { - // This is only used for direct calls to the `call`, - // `call_mut` or `call_once` functions. - return trans_args_under_call_abi(cx, - arg_exprs, - fn_ty, - llargs, - arg_cleanup_scope, - ignore_self) - } - - let num_formal_args = arg_tys.len(); - for (i, arg_expr) in arg_exprs.iter().enumerate() { - if i == 0 && ignore_self { - continue; - } - let arg_ty = if i >= num_formal_args { - assert!(variadic); - common::expr_ty_adjusted(cx, &**arg_expr) - } else { - arg_tys[i] - }; - - let arg_datum = unpack_datum!(bcx, expr::trans(bcx, &**arg_expr)); - bcx = trans_arg_datum(bcx, arg_ty, arg_datum, - arg_cleanup_scope, - DontAutorefArg, - llargs); - } - } - ArgOverloadedCall(arg_exprs) => { - return trans_overloaded_call_args(cx, - arg_exprs, - fn_ty, - llargs, - arg_cleanup_scope, - ignore_self) - } - ArgOverloadedOp(lhs, rhs, autoref) => { - assert!(!variadic); - - bcx = trans_arg_datum(bcx, arg_tys[0], lhs, - arg_cleanup_scope, - DontAutorefArg, - llargs); - - if let Some((rhs, rhs_id)) = rhs { - assert_eq!(arg_tys.len(), 2); - bcx = trans_arg_datum(bcx, arg_tys[1], rhs, - arg_cleanup_scope, - if autoref { DoAutorefArg(rhs_id) } else { DontAutorefArg }, - llargs); - } else { - assert_eq!(arg_tys.len(), 1); - } - } - ArgVals(vs) => { - llargs.extend_from_slice(vs); - } - } - - bcx -} - -#[derive(Copy, Clone)] -pub enum AutorefArg { - DontAutorefArg, - DoAutorefArg(ast::NodeId) -} - -pub fn trans_arg_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - formal_arg_ty: Ty<'tcx>, - arg_datum: Datum<'tcx, Expr>, - arg_cleanup_scope: cleanup::ScopeId, - autoref_arg: AutorefArg, - llargs: &mut Vec) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_arg_datum"); - let mut bcx = bcx; - let ccx = bcx.ccx(); - - debug!("trans_arg_datum({:?})", - formal_arg_ty); - - let arg_datum_ty = arg_datum.ty; - - debug!(" arg datum: {}", arg_datum.to_string(bcx.ccx())); - - let mut val; - // FIXME(#3548) use the adjustments table - match autoref_arg { - DoAutorefArg(arg_id) => { - // We will pass argument by reference - // We want an lvalue, so that we can pass by reference and - let arg_datum = unpack_datum!( - bcx, arg_datum.to_lvalue_datum(bcx, "arg", arg_id)); - val = arg_datum.val; - } - DontAutorefArg if common::type_is_fat_ptr(bcx.tcx(), arg_datum_ty) && - !bcx.fcx.type_needs_drop(arg_datum_ty) => { - val = arg_datum.val - } - DontAutorefArg => { - // Make this an rvalue, since we are going to be - // passing ownership. - let arg_datum = unpack_datum!( - bcx, arg_datum.to_rvalue_datum(bcx, "arg")); - - // Now that arg_datum is owned, get it into the appropriate - // mode (ref vs value). - let arg_datum = unpack_datum!( - bcx, arg_datum.to_appropriate_datum(bcx)); - - // Technically, ownership of val passes to the callee. - // However, we must cleanup should we panic before the - // callee is actually invoked. - val = arg_datum.add_clean(bcx.fcx, arg_cleanup_scope); - } - } - - if type_of::arg_is_indirect(ccx, formal_arg_ty) && formal_arg_ty != arg_datum_ty { - // this could happen due to e.g. subtyping - let llformal_arg_ty = type_of::type_of_explicit_arg(ccx, formal_arg_ty); - debug!("casting actual type ({}) to match formal ({})", - bcx.val_to_string(val), bcx.llty_str(llformal_arg_ty)); - debug!("Rust types: {:?}; {:?}", arg_datum_ty, - formal_arg_ty); - val = PointerCast(bcx, val, llformal_arg_ty); - } - - debug!("--- trans_arg_datum passing {}", bcx.val_to_string(val)); - - if common::type_is_fat_ptr(bcx.tcx(), formal_arg_ty) { - llargs.push(Load(bcx, expr::get_dataptr(bcx, val))); - llargs.push(Load(bcx, expr::get_meta(bcx, val))); - } else { - llargs.push(val); - } - - bcx -} diff --git a/src/librustc_trans/trans/cleanup.rs b/src/librustc_trans/trans/cleanup.rs deleted file mode 100644 index ffdc2701f8158..0000000000000 --- a/src/librustc_trans/trans/cleanup.rs +++ /dev/null @@ -1,1215 +0,0 @@ -// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! ## The Cleanup module -//! -//! The cleanup module tracks what values need to be cleaned up as scopes -//! are exited, either via panic or just normal control flow. The basic -//! idea is that the function context maintains a stack of cleanup scopes -//! that are pushed/popped as we traverse the AST tree. There is typically -//! at least one cleanup scope per AST node; some AST nodes may introduce -//! additional temporary scopes. -//! -//! Cleanup items can be scheduled into any of the scopes on the stack. -//! Typically, when a scope is popped, we will also generate the code for -//! each of its cleanups at that time. This corresponds to a normal exit -//! from a block (for example, an expression completing evaluation -//! successfully without panic). However, it is also possible to pop a -//! block *without* executing its cleanups; this is typically used to -//! guard intermediate values that must be cleaned up on panic, but not -//! if everything goes right. See the section on custom scopes below for -//! more details. -//! -//! Cleanup scopes come in three kinds: -//! -//! - **AST scopes:** each AST node in a function body has a corresponding -//! AST scope. We push the AST scope when we start generate code for an AST -//! node and pop it once the AST node has been fully generated. -//! - **Loop scopes:** loops have an additional cleanup scope. Cleanups are -//! never scheduled into loop scopes; instead, they are used to record the -//! basic blocks that we should branch to when a `continue` or `break` statement -//! is encountered. -//! - **Custom scopes:** custom scopes are typically used to ensure cleanup -//! of intermediate values. -//! -//! ### When to schedule cleanup -//! -//! Although the cleanup system is intended to *feel* fairly declarative, -//! it's still important to time calls to `schedule_clean()` correctly. -//! Basically, you should not schedule cleanup for memory until it has -//! been initialized, because if an unwind should occur before the memory -//! is fully initialized, then the cleanup will run and try to free or -//! drop uninitialized memory. If the initialization itself produces -//! byproducts that need to be freed, then you should use temporary custom -//! scopes to ensure that those byproducts will get freed on unwind. For -//! example, an expression like `box foo()` will first allocate a box in the -//! heap and then call `foo()` -- if `foo()` should panic, this box needs -//! to be *shallowly* freed. -//! -//! ### Long-distance jumps -//! -//! In addition to popping a scope, which corresponds to normal control -//! flow exiting the scope, we may also *jump out* of a scope into some -//! earlier scope on the stack. This can occur in response to a `return`, -//! `break`, or `continue` statement, but also in response to panic. In -//! any of these cases, we will generate a series of cleanup blocks for -//! each of the scopes that is exited. So, if the stack contains scopes A -//! ... Z, and we break out of a loop whose corresponding cleanup scope is -//! X, we would generate cleanup blocks for the cleanups in X, Y, and Z. -//! After cleanup is done we would branch to the exit point for scope X. -//! But if panic should occur, we would generate cleanups for all the -//! scopes from A to Z and then resume the unwind process afterwards. -//! -//! To avoid generating tons of code, we cache the cleanup blocks that we -//! create for breaks, returns, unwinds, and other jumps. Whenever a new -//! cleanup is scheduled, though, we must clear these cached blocks. A -//! possible improvement would be to keep the cached blocks but simply -//! generate a new block which performs the additional cleanup and then -//! branches to the existing cached blocks. -//! -//! ### AST and loop cleanup scopes -//! -//! AST cleanup scopes are pushed when we begin and end processing an AST -//! node. They are used to house cleanups related to rvalue temporary that -//! get referenced (e.g., due to an expression like `&Foo()`). Whenever an -//! AST scope is popped, we always trans all the cleanups, adding the cleanup -//! code after the postdominator of the AST node. -//! -//! AST nodes that represent breakable loops also push a loop scope; the -//! loop scope never has any actual cleanups, it's just used to point to -//! the basic blocks where control should flow after a "continue" or -//! "break" statement. Popping a loop scope never generates code. -//! -//! ### Custom cleanup scopes -//! -//! Custom cleanup scopes are used for a variety of purposes. The most -//! common though is to handle temporary byproducts, where cleanup only -//! needs to occur on panic. The general strategy is to push a custom -//! cleanup scope, schedule *shallow* cleanups into the custom scope, and -//! then pop the custom scope (without transing the cleanups) when -//! execution succeeds normally. This way the cleanups are only trans'd on -//! unwind, and only up until the point where execution succeeded, at -//! which time the complete value should be stored in an lvalue or some -//! other place where normal cleanup applies. -//! -//! To spell it out, here is an example. Imagine an expression `box expr`. -//! We would basically: -//! -//! 1. Push a custom cleanup scope C. -//! 2. Allocate the box. -//! 3. Schedule a shallow free in the scope C. -//! 4. Trans `expr` into the box. -//! 5. Pop the scope C. -//! 6. Return the box as an rvalue. -//! -//! This way, if a panic occurs while transing `expr`, the custom -//! cleanup scope C is pushed and hence the box will be freed. The trans -//! code for `expr` itself is responsible for freeing any other byproducts -//! that may be in play. - -pub use self::ScopeId::*; -pub use self::CleanupScopeKind::*; -pub use self::EarlyExitLabel::*; -pub use self::Heap::*; - -use llvm::{BasicBlockRef, ValueRef}; -use trans::base; -use trans::build; -use trans::common; -use trans::common::{Block, FunctionContext, NodeIdAndSpan}; -use trans::datum::{Datum, Lvalue}; -use trans::debuginfo::{DebugLoc, ToDebugLoc}; -use trans::glue; -use middle::region; -use trans::type_::Type; -use middle::ty::{self, Ty}; -use std::fmt; -use syntax::ast; - -pub struct CleanupScope<'blk, 'tcx: 'blk> { - // The id of this cleanup scope. If the id is None, - // this is a *temporary scope* that is pushed during trans to - // cleanup miscellaneous garbage that trans may generate whose - // lifetime is a subset of some expression. See module doc for - // more details. - kind: CleanupScopeKind<'blk, 'tcx>, - - // Cleanups to run upon scope exit. - cleanups: Vec>, - - // The debug location any drop calls generated for this scope will be - // associated with. - debug_loc: DebugLoc, - - cached_early_exits: Vec, - cached_landing_pad: Option, -} - -#[derive(Copy, Clone, Debug)] -pub struct CustomScopeIndex { - index: usize -} - -pub const EXIT_BREAK: usize = 0; -pub const EXIT_LOOP: usize = 1; -pub const EXIT_MAX: usize = 2; - -pub enum CleanupScopeKind<'blk, 'tcx: 'blk> { - CustomScopeKind, - AstScopeKind(ast::NodeId), - LoopScopeKind(ast::NodeId, [Block<'blk, 'tcx>; EXIT_MAX]) -} - -impl<'blk, 'tcx: 'blk> fmt::Debug for CleanupScopeKind<'blk, 'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - CustomScopeKind => write!(f, "CustomScopeKind"), - AstScopeKind(nid) => write!(f, "AstScopeKind({})", nid), - LoopScopeKind(nid, ref blks) => { - try!(write!(f, "LoopScopeKind({}, [", nid)); - for blk in blks { - try!(write!(f, "{:p}, ", blk)); - } - write!(f, "])") - } - } - } -} - -#[derive(Copy, Clone, PartialEq, Debug)] -pub enum EarlyExitLabel { - UnwindExit, - ReturnExit, - LoopExit(ast::NodeId, usize) -} - -#[derive(Copy, Clone)] -pub struct CachedEarlyExit { - label: EarlyExitLabel, - cleanup_block: BasicBlockRef, -} - -pub trait Cleanup<'tcx> { - fn must_unwind(&self) -> bool; - fn is_lifetime_end(&self) -> bool; - fn trans<'blk>(&self, - bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx>; -} - -pub type CleanupObj<'tcx> = Box+'tcx>; - -#[derive(Copy, Clone, Debug)] -pub enum ScopeId { - AstScope(ast::NodeId), - CustomScope(CustomScopeIndex) -} - -#[derive(Copy, Clone, Debug)] -pub struct DropHint(pub ast::NodeId, pub K); - -pub type DropHintDatum<'tcx> = DropHint>; -pub type DropHintValue = DropHint; - -impl DropHint { - pub fn new(id: ast::NodeId, k: K) -> DropHint { DropHint(id, k) } -} - -impl DropHint { - pub fn value(&self) -> ValueRef { self.1 } -} - -pub trait DropHintMethods { - type ValueKind; - fn to_value(&self) -> Self::ValueKind; -} -impl<'tcx> DropHintMethods for DropHintDatum<'tcx> { - type ValueKind = DropHintValue; - fn to_value(&self) -> DropHintValue { DropHint(self.0, self.1.val) } -} - -impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { - /// Invoked when we start to trans the code contained within a new cleanup scope. - fn push_ast_cleanup_scope(&self, debug_loc: NodeIdAndSpan) { - debug!("push_ast_cleanup_scope({})", - self.ccx.tcx().map.node_to_string(debug_loc.id)); - - // FIXME(#2202) -- currently closure bodies have a parent - // region, which messes up the assertion below, since there - // are no cleanup scopes on the stack at the start of - // trans'ing a closure body. I think though that this should - // eventually be fixed by closure bodies not having a parent - // region, though that's a touch unclear, and it might also be - // better just to narrow this assertion more (i.e., by - // excluding id's that correspond to closure bodies only). For - // now we just say that if there is already an AST scope on the stack, - // this new AST scope had better be its immediate child. - let top_scope = self.top_ast_scope(); - let region_maps = &self.ccx.tcx().region_maps; - if top_scope.is_some() { - assert!((region_maps - .opt_encl_scope(region_maps.node_extent(debug_loc.id)) - .map(|s|s.node_id(region_maps)) == top_scope) - || - (region_maps - .opt_encl_scope(region_maps.lookup_code_extent( - region::CodeExtentData::DestructionScope(debug_loc.id))) - .map(|s|s.node_id(region_maps)) == top_scope)); - } - - self.push_scope(CleanupScope::new(AstScopeKind(debug_loc.id), - debug_loc.debug_loc())); - } - - fn push_loop_cleanup_scope(&self, - id: ast::NodeId, - exits: [Block<'blk, 'tcx>; EXIT_MAX]) { - debug!("push_loop_cleanup_scope({})", - self.ccx.tcx().map.node_to_string(id)); - assert_eq!(Some(id), self.top_ast_scope()); - - // Just copy the debuginfo source location from the enclosing scope - let debug_loc = self.scopes - .borrow() - .last() - .unwrap() - .debug_loc; - - self.push_scope(CleanupScope::new(LoopScopeKind(id, exits), debug_loc)); - } - - fn push_custom_cleanup_scope(&self) -> CustomScopeIndex { - let index = self.scopes_len(); - debug!("push_custom_cleanup_scope(): {}", index); - - // Just copy the debuginfo source location from the enclosing scope - let debug_loc = self.scopes - .borrow() - .last() - .map(|opt_scope| opt_scope.debug_loc) - .unwrap_or(DebugLoc::None); - - self.push_scope(CleanupScope::new(CustomScopeKind, debug_loc)); - CustomScopeIndex { index: index } - } - - fn push_custom_cleanup_scope_with_debug_loc(&self, - debug_loc: NodeIdAndSpan) - -> CustomScopeIndex { - let index = self.scopes_len(); - debug!("push_custom_cleanup_scope(): {}", index); - - self.push_scope(CleanupScope::new(CustomScopeKind, - debug_loc.debug_loc())); - CustomScopeIndex { index: index } - } - - /// Removes the cleanup scope for id `cleanup_scope`, which must be at the top of the cleanup - /// stack, and generates the code to do its cleanups for normal exit. - fn pop_and_trans_ast_cleanup_scope(&self, - bcx: Block<'blk, 'tcx>, - cleanup_scope: ast::NodeId) - -> Block<'blk, 'tcx> { - debug!("pop_and_trans_ast_cleanup_scope({})", - self.ccx.tcx().map.node_to_string(cleanup_scope)); - - assert!(self.top_scope(|s| s.kind.is_ast_with_id(cleanup_scope))); - - let scope = self.pop_scope(); - self.trans_scope_cleanups(bcx, &scope) - } - - /// Removes the loop cleanup scope for id `cleanup_scope`, which must be at the top of the - /// cleanup stack. Does not generate any cleanup code, since loop scopes should exit by - /// branching to a block generated by `normal_exit_block`. - fn pop_loop_cleanup_scope(&self, - cleanup_scope: ast::NodeId) { - debug!("pop_loop_cleanup_scope({})", - self.ccx.tcx().map.node_to_string(cleanup_scope)); - - assert!(self.top_scope(|s| s.kind.is_loop_with_id(cleanup_scope))); - - let _ = self.pop_scope(); - } - - /// Removes the top cleanup scope from the stack without executing its cleanups. The top - /// cleanup scope must be the temporary scope `custom_scope`. - fn pop_custom_cleanup_scope(&self, - custom_scope: CustomScopeIndex) { - debug!("pop_custom_cleanup_scope({})", custom_scope.index); - assert!(self.is_valid_to_pop_custom_scope(custom_scope)); - let _ = self.pop_scope(); - } - - /// Removes the top cleanup scope from the stack, which must be a temporary scope, and - /// generates the code to do its cleanups for normal exit. - fn pop_and_trans_custom_cleanup_scope(&self, - bcx: Block<'blk, 'tcx>, - custom_scope: CustomScopeIndex) - -> Block<'blk, 'tcx> { - debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope); - assert!(self.is_valid_to_pop_custom_scope(custom_scope)); - - let scope = self.pop_scope(); - self.trans_scope_cleanups(bcx, &scope) - } - - /// Returns the id of the top-most loop scope - fn top_loop_scope(&self) -> ast::NodeId { - for scope in self.scopes.borrow().iter().rev() { - if let LoopScopeKind(id, _) = scope.kind { - return id; - } - } - self.ccx.sess().bug("no loop scope found"); - } - - /// Returns a block to branch to which will perform all pending cleanups and then - /// break/continue (depending on `exit`) out of the loop with id `cleanup_scope` - fn normal_exit_block(&'blk self, - cleanup_scope: ast::NodeId, - exit: usize) -> BasicBlockRef { - self.trans_cleanups_to_exit_scope(LoopExit(cleanup_scope, exit)) - } - - /// Returns a block to branch to which will perform all pending cleanups and then return from - /// this function - fn return_exit_block(&'blk self) -> BasicBlockRef { - self.trans_cleanups_to_exit_scope(ReturnExit) - } - - fn schedule_lifetime_end(&self, - cleanup_scope: ScopeId, - val: ValueRef) { - let drop = box LifetimeEnd { - ptr: val, - }; - - debug!("schedule_lifetime_end({:?}, val={})", - cleanup_scope, - self.ccx.tn().val_to_string(val)); - - self.schedule_clean(cleanup_scope, drop as CleanupObj); - } - - /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty` - fn schedule_drop_mem(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>, - drop_hint: Option>) { - if !self.type_needs_drop(ty) { return; } - let drop_hint = drop_hint.map(|hint|hint.to_value()); - let drop = box DropValue { - is_immediate: false, - val: val, - ty: ty, - fill_on_drop: false, - skip_dtor: false, - drop_hint: drop_hint, - }; - - debug!("schedule_drop_mem({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}", - cleanup_scope, - self.ccx.tn().val_to_string(val), - ty, - drop.fill_on_drop, - drop.skip_dtor); - - self.schedule_clean(cleanup_scope, drop as CleanupObj); - } - - /// Schedules a (deep) drop and filling of `val`, which is a pointer to an instance of `ty` - fn schedule_drop_and_fill_mem(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>, - drop_hint: Option>) { - if !self.type_needs_drop(ty) { return; } - - let drop_hint = drop_hint.map(|datum|datum.to_value()); - let drop = box DropValue { - is_immediate: false, - val: val, - ty: ty, - fill_on_drop: true, - skip_dtor: false, - drop_hint: drop_hint, - }; - - debug!("schedule_drop_and_fill_mem({:?}, val={}, ty={:?}, - fill_on_drop={}, skip_dtor={}, has_drop_hint={})", - cleanup_scope, - self.ccx.tn().val_to_string(val), - ty, - drop.fill_on_drop, - drop.skip_dtor, - drop_hint.is_some()); - - self.schedule_clean(cleanup_scope, drop as CleanupObj); - } - - /// Issue #23611: Schedules a (deep) drop of the contents of - /// `val`, which is a pointer to an instance of struct/enum type - /// `ty`. The scheduled code handles extracting the discriminant - /// and dropping the contents associated with that variant - /// *without* executing any associated drop implementation. - fn schedule_drop_adt_contents(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>) { - // `if` below could be "!contents_needs_drop"; skipping drop - // is just an optimization, so sound to be conservative. - if !self.type_needs_drop(ty) { return; } - - let drop = box DropValue { - is_immediate: false, - val: val, - ty: ty, - fill_on_drop: false, - skip_dtor: true, - drop_hint: None, - }; - - debug!("schedule_drop_adt_contents({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}", - cleanup_scope, - self.ccx.tn().val_to_string(val), - ty, - drop.fill_on_drop, - drop.skip_dtor); - - self.schedule_clean(cleanup_scope, drop as CleanupObj); - } - - /// Schedules a (deep) drop of `val`, which is an instance of `ty` - fn schedule_drop_immediate(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>) { - - if !self.type_needs_drop(ty) { return; } - let drop = Box::new(DropValue { - is_immediate: true, - val: val, - ty: ty, - fill_on_drop: false, - skip_dtor: false, - drop_hint: None, - }); - - debug!("schedule_drop_immediate({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}", - cleanup_scope, - self.ccx.tn().val_to_string(val), - ty, - drop.fill_on_drop, - drop.skip_dtor); - - self.schedule_clean(cleanup_scope, drop as CleanupObj); - } - - /// Schedules a call to `free(val)`. Note that this is a shallow operation. - fn schedule_free_value(&self, - cleanup_scope: ScopeId, - val: ValueRef, - heap: Heap, - content_ty: Ty<'tcx>) { - let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty }; - - debug!("schedule_free_value({:?}, val={}, heap={:?})", - cleanup_scope, - self.ccx.tn().val_to_string(val), - heap); - - self.schedule_clean(cleanup_scope, drop as CleanupObj); - } - - fn schedule_clean(&self, - cleanup_scope: ScopeId, - cleanup: CleanupObj<'tcx>) { - match cleanup_scope { - AstScope(id) => self.schedule_clean_in_ast_scope(id, cleanup), - CustomScope(id) => self.schedule_clean_in_custom_scope(id, cleanup), - } - } - - /// Schedules a cleanup to occur upon exit from `cleanup_scope`. If `cleanup_scope` is not - /// provided, then the cleanup is scheduled in the topmost scope, which must be a temporary - /// scope. - fn schedule_clean_in_ast_scope(&self, - cleanup_scope: ast::NodeId, - cleanup: CleanupObj<'tcx>) { - debug!("schedule_clean_in_ast_scope(cleanup_scope={})", - cleanup_scope); - - for scope in self.scopes.borrow_mut().iter_mut().rev() { - if scope.kind.is_ast_with_id(cleanup_scope) { - scope.cleanups.push(cleanup); - scope.clear_cached_exits(); - return; - } else { - // will be adding a cleanup to some enclosing scope - scope.clear_cached_exits(); - } - } - - self.ccx.sess().bug( - &format!("no cleanup scope {} found", - self.ccx.tcx().map.node_to_string(cleanup_scope))); - } - - /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope. - fn schedule_clean_in_custom_scope(&self, - custom_scope: CustomScopeIndex, - cleanup: CleanupObj<'tcx>) { - debug!("schedule_clean_in_custom_scope(custom_scope={})", - custom_scope.index); - - assert!(self.is_valid_custom_scope(custom_scope)); - - let mut scopes = self.scopes.borrow_mut(); - let scope = &mut (*scopes)[custom_scope.index]; - scope.cleanups.push(cleanup); - scope.clear_cached_exits(); - } - - /// Returns true if there are pending cleanups that should execute on panic. - fn needs_invoke(&self) -> bool { - self.scopes.borrow().iter().rev().any(|s| s.needs_invoke()) - } - - /// Returns a basic block to branch to in the event of a panic. This block will run the panic - /// cleanups and eventually invoke the LLVM `Resume` instruction. - fn get_landing_pad(&'blk self) -> BasicBlockRef { - let _icx = base::push_ctxt("get_landing_pad"); - - debug!("get_landing_pad"); - - let orig_scopes_len = self.scopes_len(); - assert!(orig_scopes_len > 0); - - // Remove any scopes that do not have cleanups on panic: - let mut popped_scopes = vec!(); - while !self.top_scope(|s| s.needs_invoke()) { - debug!("top scope does not need invoke"); - popped_scopes.push(self.pop_scope()); - } - - // Check for an existing landing pad in the new topmost scope: - let llbb = self.get_or_create_landing_pad(); - - // Push the scopes we removed back on: - loop { - match popped_scopes.pop() { - Some(scope) => self.push_scope(scope), - None => break - } - } - - assert_eq!(self.scopes_len(), orig_scopes_len); - - return llbb; - } -} - -impl<'blk, 'tcx> CleanupHelperMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> { - /// Returns the id of the current top-most AST scope, if any. - fn top_ast_scope(&self) -> Option { - for scope in self.scopes.borrow().iter().rev() { - match scope.kind { - CustomScopeKind | LoopScopeKind(..) => {} - AstScopeKind(i) => { - return Some(i); - } - } - } - None - } - - fn top_nonempty_cleanup_scope(&self) -> Option { - self.scopes.borrow().iter().rev().position(|s| !s.cleanups.is_empty()) - } - - fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { - self.is_valid_custom_scope(custom_scope) && - custom_scope.index == self.scopes.borrow().len() - 1 - } - - fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { - let scopes = self.scopes.borrow(); - custom_scope.index < scopes.len() && - (*scopes)[custom_scope.index].kind.is_temp() - } - - /// Generates the cleanups for `scope` into `bcx` - fn trans_scope_cleanups(&self, // cannot borrow self, will recurse - bcx: Block<'blk, 'tcx>, - scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx> { - - let mut bcx = bcx; - if !bcx.unreachable.get() { - for cleanup in scope.cleanups.iter().rev() { - bcx = cleanup.trans(bcx, scope.debug_loc); - } - } - bcx - } - - fn scopes_len(&self) -> usize { - self.scopes.borrow().len() - } - - fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>) { - self.scopes.borrow_mut().push(scope) - } - - fn pop_scope(&self) -> CleanupScope<'blk, 'tcx> { - debug!("popping cleanup scope {}, {} scopes remaining", - self.top_scope(|s| s.block_name("")), - self.scopes_len() - 1); - - self.scopes.borrow_mut().pop().unwrap() - } - - fn top_scope(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R { - f(self.scopes.borrow().last().unwrap()) - } - - /// Used when the caller wishes to jump to an early exit, such as a return, break, continue, or - /// unwind. This function will generate all cleanups between the top of the stack and the exit - /// `label` and return a basic block that the caller can branch to. - /// - /// For example, if the current stack of cleanups were as follows: - /// - /// AST 22 - /// Custom 1 - /// AST 23 - /// Loop 23 - /// Custom 2 - /// AST 24 - /// - /// and the `label` specifies a break from `Loop 23`, then this function would generate a - /// series of basic blocks as follows: - /// - /// Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk - /// - /// where `break_blk` is the block specified in `Loop 23` as the target for breaks. The return - /// value would be the first basic block in that sequence (`Cleanup(AST 24)`). The caller could - /// then branch to `Cleanup(AST 24)` and it will perform all cleanups and finally branch to the - /// `break_blk`. - fn trans_cleanups_to_exit_scope(&'blk self, - label: EarlyExitLabel) - -> BasicBlockRef { - debug!("trans_cleanups_to_exit_scope label={:?} scopes={}", - label, self.scopes_len()); - - let orig_scopes_len = self.scopes_len(); - let mut prev_llbb; - let mut popped_scopes = vec!(); - - // First we pop off all the cleanup stacks that are - // traversed until the exit is reached, pushing them - // onto the side vector `popped_scopes`. No code is - // generated at this time. - // - // So, continuing the example from above, we would wind up - // with a `popped_scopes` vector of `[AST 24, Custom 2]`. - // (Presuming that there are no cached exits) - loop { - if self.scopes_len() == 0 { - match label { - UnwindExit => { - // Generate a block that will `Resume`. - let prev_bcx = self.new_block(true, "resume", None); - let personality = self.personality.get().expect( - "create_landing_pad() should have set this"); - let lp = build::Load(prev_bcx, personality); - base::call_lifetime_end(prev_bcx, personality); - base::trans_unwind_resume(prev_bcx, lp); - prev_llbb = prev_bcx.llbb; - break; - } - - ReturnExit => { - prev_llbb = self.get_llreturn(); - break; - } - - LoopExit(id, _) => { - self.ccx.sess().bug(&format!( - "cannot exit from scope {}, \ - not in scope", id)); - } - } - } - - // Check if we have already cached the unwinding of this - // scope for this label. If so, we can stop popping scopes - // and branch to the cached label, since it contains the - // cleanups for any subsequent scopes. - match self.top_scope(|s| s.cached_early_exit(label)) { - Some(cleanup_block) => { - prev_llbb = cleanup_block; - break; - } - None => { } - } - - // Pop off the scope, since we will be generating - // unwinding code for it. If we are searching for a loop exit, - // and this scope is that loop, then stop popping and set - // `prev_llbb` to the appropriate exit block from the loop. - popped_scopes.push(self.pop_scope()); - let scope = popped_scopes.last().unwrap(); - match label { - UnwindExit | ReturnExit => { } - LoopExit(id, exit) => { - match scope.kind.early_exit_block(id, exit) { - Some(exitllbb) => { - prev_llbb = exitllbb; - break; - } - - None => { } - } - } - } - } - - debug!("trans_cleanups_to_exit_scope: popped {} scopes", - popped_scopes.len()); - - // Now push the popped scopes back on. As we go, - // we track in `prev_llbb` the exit to which this scope - // should branch when it's done. - // - // So, continuing with our example, we will start out with - // `prev_llbb` being set to `break_blk` (or possibly a cached - // early exit). We will then pop the scopes from `popped_scopes` - // and generate a basic block for each one, prepending it in the - // series and updating `prev_llbb`. So we begin by popping `Custom 2` - // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)` - // branch to `prev_llbb == break_blk`, giving us a sequence like: - // - // Cleanup(Custom 2) -> prev_llbb - // - // We then pop `AST 24` and repeat the process, giving us the sequence: - // - // Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb - // - // At this point, `popped_scopes` is empty, and so the final block - // that we return to the user is `Cleanup(AST 24)`. - while let Some(mut scope) = popped_scopes.pop() { - if !scope.cleanups.is_empty() { - let name = scope.block_name("clean"); - debug!("generating cleanups for {}", name); - let bcx_in = self.new_block(label.is_unwind(), - &name[..], - None); - let mut bcx_out = bcx_in; - for cleanup in scope.cleanups.iter().rev() { - bcx_out = cleanup.trans(bcx_out, - scope.debug_loc); - } - build::Br(bcx_out, prev_llbb, DebugLoc::None); - prev_llbb = bcx_in.llbb; - - scope.add_cached_early_exit(label, prev_llbb); - } - self.push_scope(scope); - } - - debug!("trans_cleanups_to_exit_scope: prev_llbb={:?}", prev_llbb); - - assert_eq!(self.scopes_len(), orig_scopes_len); - prev_llbb - } - - /// Creates a landing pad for the top scope, if one does not exist. The landing pad will - /// perform all cleanups necessary for an unwind and then `resume` to continue error - /// propagation: - /// - /// landing_pad -> ... cleanups ... -> [resume] - /// - /// (The cleanups and resume instruction are created by `trans_cleanups_to_exit_scope()`, not - /// in this function itself.) - fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef { - let pad_bcx; - - debug!("get_or_create_landing_pad"); - - // Check if a landing pad block exists; if not, create one. - { - let mut scopes = self.scopes.borrow_mut(); - let last_scope = scopes.last_mut().unwrap(); - match last_scope.cached_landing_pad { - Some(llbb) => { return llbb; } - None => { - let name = last_scope.block_name("unwind"); - pad_bcx = self.new_block(true, &name[..], None); - last_scope.cached_landing_pad = Some(pad_bcx.llbb); - } - } - } - - // The landing pad return type (the type being propagated). Not sure what - // this represents but it's determined by the personality function and - // this is what the EH proposal example uses. - let llretty = Type::struct_(self.ccx, - &[Type::i8p(self.ccx), Type::i32(self.ccx)], - false); - - let llpersonality = pad_bcx.fcx.eh_personality(); - - // The only landing pad clause will be 'cleanup' - let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1); - - // The landing pad block is a cleanup - build::SetCleanup(pad_bcx, llretval); - - // We store the retval in a function-central alloca, so that calls to - // Resume can find it. - match self.personality.get() { - Some(addr) => { - build::Store(pad_bcx, llretval, addr); - } - None => { - let addr = base::alloca(pad_bcx, common::val_ty(llretval), ""); - base::call_lifetime_start(pad_bcx, addr); - self.personality.set(Some(addr)); - build::Store(pad_bcx, llretval, addr); - } - } - - // Generate the cleanup block and branch to it. - let cleanup_llbb = self.trans_cleanups_to_exit_scope(UnwindExit); - build::Br(pad_bcx, cleanup_llbb, DebugLoc::None); - - return pad_bcx.llbb; - } -} - -impl<'blk, 'tcx> CleanupScope<'blk, 'tcx> { - fn new(kind: CleanupScopeKind<'blk, 'tcx>, - debug_loc: DebugLoc) - -> CleanupScope<'blk, 'tcx> { - CleanupScope { - kind: kind, - debug_loc: debug_loc, - cleanups: vec!(), - cached_early_exits: vec!(), - cached_landing_pad: None, - } - } - - fn clear_cached_exits(&mut self) { - self.cached_early_exits = vec!(); - self.cached_landing_pad = None; - } - - fn cached_early_exit(&self, - label: EarlyExitLabel) - -> Option { - self.cached_early_exits.iter(). - find(|e| e.label == label). - map(|e| e.cleanup_block) - } - - fn add_cached_early_exit(&mut self, - label: EarlyExitLabel, - blk: BasicBlockRef) { - self.cached_early_exits.push( - CachedEarlyExit { label: label, - cleanup_block: blk }); - } - - /// True if this scope has cleanups that need unwinding - fn needs_invoke(&self) -> bool { - - self.cached_landing_pad.is_some() || - self.cleanups.iter().any(|c| c.must_unwind()) - } - - /// Returns a suitable name to use for the basic block that handles this cleanup scope - fn block_name(&self, prefix: &str) -> String { - match self.kind { - CustomScopeKind => format!("{}_custom_", prefix), - AstScopeKind(id) => format!("{}_ast_{}_", prefix, id), - LoopScopeKind(id, _) => format!("{}_loop_{}_", prefix, id), - } - } - - /// Manipulate cleanup scope for call arguments. Conceptually, each - /// argument to a call is an lvalue, and performing the call moves each - /// of the arguments into a new rvalue (which gets cleaned up by the - /// callee). As an optimization, instead of actually performing all of - /// those moves, trans just manipulates the cleanup scope to obtain the - /// same effect. - pub fn drop_non_lifetime_clean(&mut self) { - self.cleanups.retain(|c| c.is_lifetime_end()); - self.clear_cached_exits(); - } -} - -impl<'blk, 'tcx> CleanupScopeKind<'blk, 'tcx> { - fn is_temp(&self) -> bool { - match *self { - CustomScopeKind => true, - LoopScopeKind(..) | AstScopeKind(..) => false, - } - } - - fn is_ast_with_id(&self, id: ast::NodeId) -> bool { - match *self { - CustomScopeKind | LoopScopeKind(..) => false, - AstScopeKind(i) => i == id - } - } - - fn is_loop_with_id(&self, id: ast::NodeId) -> bool { - match *self { - CustomScopeKind | AstScopeKind(..) => false, - LoopScopeKind(i, _) => i == id - } - } - - /// If this is a loop scope with id `id`, return the early exit block `exit`, else `None` - fn early_exit_block(&self, - id: ast::NodeId, - exit: usize) -> Option { - match *self { - LoopScopeKind(i, ref exits) if id == i => Some(exits[exit].llbb), - _ => None, - } - } -} - -impl EarlyExitLabel { - fn is_unwind(&self) -> bool { - match *self { - UnwindExit => true, - _ => false - } - } -} - -/////////////////////////////////////////////////////////////////////////// -// Cleanup types - -#[derive(Copy, Clone)] -pub struct DropValue<'tcx> { - is_immediate: bool, - val: ValueRef, - ty: Ty<'tcx>, - fill_on_drop: bool, - skip_dtor: bool, - drop_hint: Option, -} - -impl<'tcx> Cleanup<'tcx> for DropValue<'tcx> { - fn must_unwind(&self) -> bool { - true - } - - fn is_lifetime_end(&self) -> bool { - false - } - - fn trans<'blk>(&self, - bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { - let skip_dtor = self.skip_dtor; - let _icx = if skip_dtor { - base::push_ctxt("::trans skip_dtor=true") - } else { - base::push_ctxt("::trans skip_dtor=false") - }; - let bcx = if self.is_immediate { - glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor) - } else { - glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor, self.drop_hint) - }; - if self.fill_on_drop { - base::drop_done_fill_mem(bcx, self.val, self.ty); - } - bcx - } -} - -#[derive(Copy, Clone, Debug)] -pub enum Heap { - HeapExchange -} - -#[derive(Copy, Clone)] -pub struct FreeValue<'tcx> { - ptr: ValueRef, - heap: Heap, - content_ty: Ty<'tcx> -} - -impl<'tcx> Cleanup<'tcx> for FreeValue<'tcx> { - fn must_unwind(&self) -> bool { - true - } - - fn is_lifetime_end(&self) -> bool { - false - } - - fn trans<'blk>(&self, - bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { - match self.heap { - HeapExchange => { - glue::trans_exchange_free_ty(bcx, - self.ptr, - self.content_ty, - debug_loc) - } - } - } -} - -#[derive(Copy, Clone)] -pub struct LifetimeEnd { - ptr: ValueRef, -} - -impl<'tcx> Cleanup<'tcx> for LifetimeEnd { - fn must_unwind(&self) -> bool { - false - } - - fn is_lifetime_end(&self) -> bool { - true - } - - fn trans<'blk>(&self, - bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { - debug_loc.apply(bcx.fcx); - base::call_lifetime_end(bcx, self.ptr); - bcx - } -} - -pub fn temporary_scope(tcx: &ty::ctxt, - id: ast::NodeId) - -> ScopeId { - match tcx.region_maps.temporary_scope(id) { - Some(scope) => { - let r = AstScope(scope.node_id(&tcx.region_maps)); - debug!("temporary_scope({}) = {:?}", id, r); - r - } - None => { - tcx.sess.bug(&format!("no temporary scope available for expr {}", - id)) - } - } -} - -pub fn var_scope(tcx: &ty::ctxt, - id: ast::NodeId) - -> ScopeId { - let r = AstScope(tcx.region_maps.var_scope(id).node_id(&tcx.region_maps)); - debug!("var_scope({}) = {:?}", id, r); - r -} - -/////////////////////////////////////////////////////////////////////////// -// These traits just exist to put the methods into this file. - -pub trait CleanupMethods<'blk, 'tcx> { - fn push_ast_cleanup_scope(&self, id: NodeIdAndSpan); - fn push_loop_cleanup_scope(&self, - id: ast::NodeId, - exits: [Block<'blk, 'tcx>; EXIT_MAX]); - fn push_custom_cleanup_scope(&self) -> CustomScopeIndex; - fn push_custom_cleanup_scope_with_debug_loc(&self, - debug_loc: NodeIdAndSpan) - -> CustomScopeIndex; - fn pop_and_trans_ast_cleanup_scope(&self, - bcx: Block<'blk, 'tcx>, - cleanup_scope: ast::NodeId) - -> Block<'blk, 'tcx>; - fn pop_loop_cleanup_scope(&self, - cleanup_scope: ast::NodeId); - fn pop_custom_cleanup_scope(&self, - custom_scope: CustomScopeIndex); - fn pop_and_trans_custom_cleanup_scope(&self, - bcx: Block<'blk, 'tcx>, - custom_scope: CustomScopeIndex) - -> Block<'blk, 'tcx>; - fn top_loop_scope(&self) -> ast::NodeId; - fn normal_exit_block(&'blk self, - cleanup_scope: ast::NodeId, - exit: usize) -> BasicBlockRef; - fn return_exit_block(&'blk self) -> BasicBlockRef; - fn schedule_lifetime_end(&self, - cleanup_scope: ScopeId, - val: ValueRef); - fn schedule_drop_mem(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>, - drop_hint: Option>); - fn schedule_drop_and_fill_mem(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>, - drop_hint: Option>); - fn schedule_drop_adt_contents(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>); - fn schedule_drop_immediate(&self, - cleanup_scope: ScopeId, - val: ValueRef, - ty: Ty<'tcx>); - fn schedule_free_value(&self, - cleanup_scope: ScopeId, - val: ValueRef, - heap: Heap, - content_ty: Ty<'tcx>); - fn schedule_clean(&self, - cleanup_scope: ScopeId, - cleanup: CleanupObj<'tcx>); - fn schedule_clean_in_ast_scope(&self, - cleanup_scope: ast::NodeId, - cleanup: CleanupObj<'tcx>); - fn schedule_clean_in_custom_scope(&self, - custom_scope: CustomScopeIndex, - cleanup: CleanupObj<'tcx>); - fn needs_invoke(&self) -> bool; - fn get_landing_pad(&'blk self) -> BasicBlockRef; -} - -trait CleanupHelperMethods<'blk, 'tcx> { - fn top_ast_scope(&self) -> Option; - fn top_nonempty_cleanup_scope(&self) -> Option; - fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool; - fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool; - fn trans_scope_cleanups(&self, - bcx: Block<'blk, 'tcx>, - scope: &CleanupScope<'blk, 'tcx>) -> Block<'blk, 'tcx>; - fn trans_cleanups_to_exit_scope(&'blk self, - label: EarlyExitLabel) - -> BasicBlockRef; - fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef; - fn scopes_len(&self) -> usize; - fn push_scope(&self, scope: CleanupScope<'blk, 'tcx>); - fn pop_scope(&self) -> CleanupScope<'blk, 'tcx>; - fn top_scope(&self, f: F) -> R where F: FnOnce(&CleanupScope<'blk, 'tcx>) -> R; -} diff --git a/src/librustc_trans/trans/closure.rs b/src/librustc_trans/trans/closure.rs deleted file mode 100644 index 5bdfc099f0880..0000000000000 --- a/src/librustc_trans/trans/closure.rs +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use arena::TypedArena; -use back::link::{self, mangle_internal_name_by_path_and_seq}; -use llvm::{ValueRef, get_params}; -use middle::def_id::DefId; -use middle::infer; -use trans::adt; -use trans::attributes; -use trans::base::*; -use trans::build::*; -use trans::callee::{self, ArgVals, Callee, TraitItem, MethodData}; -use trans::cleanup::{CleanupMethods, CustomScope, ScopeId}; -use trans::common::*; -use trans::datum::{self, Datum, rvalue_scratch_datum, Rvalue}; -use trans::debuginfo::{self, DebugLoc}; -use trans::declare; -use trans::expr; -use trans::monomorphize::{MonoId}; -use trans::type_of::*; -use trans::Disr; -use middle::ty; -use session::config::FullDebugInfo; - -use syntax::abi::RustCall; -use syntax::ast; -use syntax::attr::{ThinAttributes, ThinAttributesExt}; - -use rustc_front::hir; - - -fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - closure_def_id: DefId, - arg_scope_id: ScopeId, - freevars: &[ty::Freevar]) - -> Block<'blk, 'tcx> -{ - let _icx = push_ctxt("closure::load_closure_environment"); - - // Special case for small by-value selfs. - let closure_ty = node_id_type(bcx, bcx.fcx.id); - let self_type = self_type_for_closure(bcx.ccx(), closure_def_id, closure_ty); - let kind = kind_for_closure(bcx.ccx(), closure_def_id); - let llenv = if kind == ty::FnOnceClosureKind && - !arg_is_indirect(bcx.ccx(), self_type) { - let datum = rvalue_scratch_datum(bcx, - self_type, - "closure_env"); - store_ty(bcx, bcx.fcx.llenv.unwrap(), datum.val, self_type); - datum.val - } else { - bcx.fcx.llenv.unwrap() - }; - - // Store the pointer to closure data in an alloca for debug info because that's what the - // llvm.dbg.declare intrinsic expects - let env_pointer_alloca = if bcx.sess().opts.debuginfo == FullDebugInfo { - let alloc = alloca(bcx, val_ty(llenv), "__debuginfo_env_ptr"); - Store(bcx, llenv, alloc); - Some(alloc) - } else { - None - }; - - for (i, freevar) in freevars.iter().enumerate() { - let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(), - closure_expr_id: bcx.fcx.id }; - let upvar_capture = bcx.tcx().upvar_capture(upvar_id).unwrap(); - let mut upvar_ptr = StructGEP(bcx, llenv, i); - let captured_by_ref = match upvar_capture { - ty::UpvarCapture::ByValue => false, - ty::UpvarCapture::ByRef(..) => { - upvar_ptr = Load(bcx, upvar_ptr); - true - } - }; - let node_id = freevar.def.var_id(); - bcx.fcx.llupvars.borrow_mut().insert(node_id, upvar_ptr); - - if kind == ty::FnOnceClosureKind && !captured_by_ref { - let hint = bcx.fcx.lldropflag_hints.borrow().hint_datum(upvar_id.var_id); - bcx.fcx.schedule_drop_mem(arg_scope_id, - upvar_ptr, - node_id_type(bcx, node_id), - hint) - } - - if let Some(env_pointer_alloca) = env_pointer_alloca { - debuginfo::create_captured_var_metadata( - bcx, - node_id, - env_pointer_alloca, - i, - captured_by_ref, - freevar.span); - } - } - - bcx -} - -pub enum ClosureEnv<'a> { - NotClosure, - Closure(DefId, &'a [ty::Freevar]), -} - -impl<'a> ClosureEnv<'a> { - pub fn load<'blk,'tcx>(self, bcx: Block<'blk, 'tcx>, arg_scope: ScopeId) - -> Block<'blk, 'tcx> - { - match self { - ClosureEnv::NotClosure => bcx, - ClosureEnv::Closure(def_id, freevars) => { - if freevars.is_empty() { - bcx - } else { - load_closure_environment(bcx, def_id, arg_scope, freevars) - } - } - } - } -} - -/// Returns the LLVM function declaration for a closure, creating it if -/// necessary. If the ID does not correspond to a closure ID, returns None. -pub fn get_or_create_closure_declaration<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - closure_id: DefId, - substs: &ty::ClosureSubsts<'tcx>) - -> ValueRef { - // Normalize type so differences in regions and typedefs don't cause - // duplicate declarations - let substs = ccx.tcx().erase_regions(substs); - let mono_id = MonoId { - def: closure_id, - params: &substs.func_substs.types - }; - - if let Some(&llfn) = ccx.closure_vals().borrow().get(&mono_id) { - debug!("get_or_create_closure_declaration(): found closure {:?}: {:?}", - mono_id, ccx.tn().val_to_string(llfn)); - return llfn; - } - - let path = ccx.tcx().def_path(closure_id); - let symbol = mangle_internal_name_by_path_and_seq(path, "closure"); - - let function_type = ccx.tcx().mk_closure_from_closure_substs(closure_id, Box::new(substs)); - let llfn = declare::define_internal_rust_fn(ccx, &symbol[..], function_type); - - // set an inline hint for all closures - attributes::inline(llfn, attributes::InlineAttr::Hint); - - debug!("get_or_create_declaration_if_closure(): inserting new \ - closure {:?} (type {}): {:?}", - mono_id, - ccx.tn().type_to_string(val_ty(llfn)), - ccx.tn().val_to_string(llfn)); - ccx.closure_vals().borrow_mut().insert(mono_id, llfn); - - llfn -} - -pub enum Dest<'a, 'tcx: 'a> { - SaveIn(Block<'a, 'tcx>, ValueRef), - Ignore(&'a CrateContext<'a, 'tcx>) -} - -pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>, - decl: &hir::FnDecl, - body: &hir::Block, - id: ast::NodeId, - closure_def_id: DefId, // (*) - closure_substs: &'tcx ty::ClosureSubsts<'tcx>, - closure_expr_attrs: &ThinAttributes) - -> Option> -{ - // (*) Note that in the case of inlined functions, the `closure_def_id` will be the - // defid of the closure in its original crate, whereas `id` will be the id of the local - // inlined copy. - - let param_substs = closure_substs.func_substs; - - let ccx = match dest { - Dest::SaveIn(bcx, _) => bcx.ccx(), - Dest::Ignore(ccx) => ccx - }; - let tcx = ccx.tcx(); - let _icx = push_ctxt("closure::trans_closure_expr"); - - debug!("trans_closure_expr(id={:?}, closure_def_id={:?}, closure_substs={:?})", - id, closure_def_id, closure_substs); - - let llfn = get_or_create_closure_declaration(ccx, closure_def_id, closure_substs); - - // Get the type of this closure. Use the current `param_substs` as - // the closure substitutions. This makes sense because the closure - // takes the same set of type arguments as the enclosing fn, and - // this function (`trans_closure`) is invoked at the point - // of the closure expression. - - let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables); - let function_type = infcx.closure_type(closure_def_id, closure_substs); - - let freevars: Vec = - tcx.with_freevars(id, |fv| fv.iter().cloned().collect()); - - let sig = tcx.erase_late_bound_regions(&function_type.sig); - let sig = infer::normalize_associated_type(ccx.tcx(), &sig); - - trans_closure(ccx, - decl, - body, - llfn, - param_substs, - id, - closure_expr_attrs.as_attr_slice(), - sig.output, - function_type.abi, - ClosureEnv::Closure(closure_def_id, &freevars)); - - // Don't hoist this to the top of the function. It's perfectly legitimate - // to have a zero-size closure (in which case dest will be `Ignore`) and - // we must still generate the closure body. - let (mut bcx, dest_addr) = match dest { - Dest::SaveIn(bcx, p) => (bcx, p), - Dest::Ignore(_) => { - debug!("trans_closure_expr() ignoring result"); - return None; - } - }; - - let repr = adt::represent_type(ccx, node_id_type(bcx, id)); - - // Create the closure. - for (i, freevar) in freevars.iter().enumerate() { - let datum = expr::trans_local_var(bcx, freevar.def); - let upvar_slot_dest = adt::trans_field_ptr( - bcx, &*repr, adt::MaybeSizedValue::sized(dest_addr), Disr(0), i); - let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(), - closure_expr_id: id }; - match tcx.upvar_capture(upvar_id).unwrap() { - ty::UpvarCapture::ByValue => { - bcx = datum.store_to(bcx, upvar_slot_dest); - } - ty::UpvarCapture::ByRef(..) => { - Store(bcx, datum.to_llref(), upvar_slot_dest); - } - } - } - adt::trans_set_discr(bcx, &*repr, dest_addr, Disr(0)); - - Some(bcx) -} - -pub fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, - closure_def_id: DefId, - substs: ty::ClosureSubsts<'tcx>, - trait_closure_kind: ty::ClosureKind) - -> ValueRef -{ - // If this is a closure, redirect to it. - let llfn = get_or_create_closure_declaration(ccx, closure_def_id, &substs); - - // If the closure is a Fn closure, but a FnOnce is needed (etc), - // then adapt the self type - let closure_kind = ccx.tcx().closure_kind(closure_def_id); - trans_closure_adapter_shim(ccx, - closure_def_id, - substs, - closure_kind, - trait_closure_kind, - llfn) -} - -fn trans_closure_adapter_shim<'a, 'tcx>( - ccx: &'a CrateContext<'a, 'tcx>, - closure_def_id: DefId, - substs: ty::ClosureSubsts<'tcx>, - llfn_closure_kind: ty::ClosureKind, - trait_closure_kind: ty::ClosureKind, - llfn: ValueRef) - -> ValueRef -{ - let _icx = push_ctxt("trans_closure_adapter_shim"); - let tcx = ccx.tcx(); - - debug!("trans_closure_adapter_shim(llfn_closure_kind={:?}, \ - trait_closure_kind={:?}, \ - llfn={})", - llfn_closure_kind, - trait_closure_kind, - ccx.tn().val_to_string(llfn)); - - match (llfn_closure_kind, trait_closure_kind) { - (ty::FnClosureKind, ty::FnClosureKind) | - (ty::FnMutClosureKind, ty::FnMutClosureKind) | - (ty::FnOnceClosureKind, ty::FnOnceClosureKind) => { - // No adapter needed. - llfn - } - (ty::FnClosureKind, ty::FnMutClosureKind) => { - // The closure fn `llfn` is a `fn(&self, ...)`. We want a - // `fn(&mut self, ...)`. In fact, at trans time, these are - // basically the same thing, so we can just return llfn. - llfn - } - (ty::FnClosureKind, ty::FnOnceClosureKind) | - (ty::FnMutClosureKind, ty::FnOnceClosureKind) => { - // The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut - // self, ...)`. We want a `fn(self, ...)`. We can produce - // this by doing something like: - // - // fn call_once(self, ...) { call_mut(&self, ...) } - // fn call_once(mut self, ...) { call_mut(&mut self, ...) } - // - // These are both the same at trans time. - trans_fn_once_adapter_shim(ccx, closure_def_id, substs, llfn) - } - _ => { - tcx.sess.bug(&format!("trans_closure_adapter_shim: cannot convert {:?} to {:?}", - llfn_closure_kind, - trait_closure_kind)); - } - } -} - -fn trans_fn_once_adapter_shim<'a, 'tcx>( - ccx: &'a CrateContext<'a, 'tcx>, - closure_def_id: DefId, - substs: ty::ClosureSubsts<'tcx>, - llreffn: ValueRef) - -> ValueRef -{ - debug!("trans_fn_once_adapter_shim(closure_def_id={:?}, substs={:?}, llreffn={})", - closure_def_id, - substs, - ccx.tn().val_to_string(llreffn)); - - let tcx = ccx.tcx(); - let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables); - - // Find a version of the closure type. Substitute static for the - // region since it doesn't really matter. - let closure_ty = tcx.mk_closure_from_closure_substs(closure_def_id, Box::new(substs.clone())); - let ref_closure_ty = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), closure_ty); - - // Make a version with the type of by-ref closure. - let ty::ClosureTy { unsafety, abi, mut sig } = infcx.closure_type(closure_def_id, &substs); - sig.0.inputs.insert(0, ref_closure_ty); // sig has no self type as of yet - let llref_bare_fn_ty = tcx.mk_bare_fn(ty::BareFnTy { unsafety: unsafety, - abi: abi, - sig: sig.clone() }); - let llref_fn_ty = tcx.mk_fn(None, llref_bare_fn_ty); - debug!("trans_fn_once_adapter_shim: llref_fn_ty={:?}", - llref_fn_ty); - - // Make a version of the closure type with the same arguments, but - // with argument #0 being by value. - assert_eq!(abi, RustCall); - sig.0.inputs[0] = closure_ty; - let llonce_bare_fn_ty = tcx.mk_bare_fn(ty::BareFnTy { unsafety: unsafety, - abi: abi, - sig: sig }); - let llonce_fn_ty = tcx.mk_fn(None, llonce_bare_fn_ty); - - // Create the by-value helper. - let function_name = link::mangle_internal_name_by_type_and_seq(ccx, llonce_fn_ty, "once_shim"); - let lloncefn = declare::define_internal_rust_fn(ccx, &function_name, - llonce_fn_ty); - let sig = tcx.erase_late_bound_regions(&llonce_bare_fn_ty.sig); - let sig = infer::normalize_associated_type(ccx.tcx(), &sig); - - let (block_arena, fcx): (TypedArena<_>, FunctionContext); - block_arena = TypedArena::new(); - fcx = new_fn_ctxt(ccx, - lloncefn, - ast::DUMMY_NODE_ID, - false, - sig.output, - substs.func_substs, - None, - &block_arena); - let mut bcx = init_function(&fcx, false, sig.output); - - let llargs = get_params(fcx.llfn); - - // the first argument (`self`) will be the (by value) closure env. - let self_scope = fcx.push_custom_cleanup_scope(); - let self_scope_id = CustomScope(self_scope); - let rvalue_mode = datum::appropriate_rvalue_mode(ccx, closure_ty); - let self_idx = fcx.arg_offset(); - let llself = llargs[self_idx]; - let env_datum = Datum::new(llself, closure_ty, Rvalue::new(rvalue_mode)); - let env_datum = unpack_datum!(bcx, - env_datum.to_lvalue_datum_in_scope(bcx, "self", - self_scope_id)); - - debug!("trans_fn_once_adapter_shim: env_datum={}", - bcx.val_to_string(env_datum.val)); - - let dest = - fcx.llretslotptr.get().map( - |_| expr::SaveIn(fcx.get_ret_slot(bcx, sig.output, "ret_slot"))); - - let callee_data = TraitItem(MethodData { llfn: llreffn, - llself: env_datum.val }); - - bcx = callee::trans_call_inner(bcx, DebugLoc::None, |bcx, _| { - Callee { - bcx: bcx, - data: callee_data, - ty: llref_fn_ty - } - }, ArgVals(&llargs[(self_idx + 1)..]), dest).bcx; - - fcx.pop_custom_cleanup_scope(self_scope); - - finish_fn(&fcx, bcx, sig.output, DebugLoc::None); - - lloncefn -} diff --git a/src/librustc_trans/trans/common.rs b/src/librustc_trans/trans/common.rs deleted file mode 100644 index b73e5ff3e038e..0000000000000 --- a/src/librustc_trans/trans/common.rs +++ /dev/null @@ -1,1187 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(non_camel_case_types, non_snake_case)] - -//! Code that is useful in various trans modules. - -pub use self::ExprOrMethodCall::*; - -use session::Session; -use llvm; -use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind}; -use llvm::{True, False, Bool}; -use middle::cfg; -use middle::def; -use middle::def_id::DefId; -use middle::infer; -use middle::lang_items::LangItem; -use middle::subst::{self, Substs}; -use trans::base; -use trans::build; -use trans::callee; -use trans::cleanup; -use trans::consts; -use trans::datum; -use trans::debuginfo::{self, DebugLoc}; -use trans::declare; -use trans::machine; -use trans::monomorphize; -use trans::type_::Type; -use trans::type_of; -use middle::traits; -use middle::ty::{self, Ty}; -use middle::ty::fold::{TypeFolder, TypeFoldable}; -use rustc_front::hir; -use rustc::mir::repr::Mir; -use util::nodemap::{FnvHashMap, NodeMap}; - -use arena::TypedArena; -use libc::{c_uint, c_char}; -use std::ffi::CString; -use std::cell::{Cell, RefCell}; -use std::vec::Vec; -use syntax::ast; -use syntax::codemap::{DUMMY_SP, Span}; -use syntax::parse::token::InternedString; -use syntax::parse::token; - -pub use trans::context::CrateContext; - -/// Is the type's representation size known at compile time? -pub fn type_is_sized<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_sized(&tcx.empty_parameter_environment(), DUMMY_SP) -} - -pub fn type_is_fat_ptr<'tcx>(cx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool { - match ty.sty { - ty::TyRawPtr(ty::TypeAndMut{ty, ..}) | - ty::TyRef(_, ty::TypeAndMut{ty, ..}) | - ty::TyBox(ty) => { - !type_is_sized(cx, ty) - } - _ => { - false - } - } -} - -fn type_is_newtype_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { - match ty.sty { - ty::TyStruct(def, substs) => { - let fields = &def.struct_variant().fields; - fields.len() == 1 && { - type_is_immediate(ccx, monomorphize::field_ty(ccx.tcx(), substs, &fields[0])) - } - } - _ => false - } -} - -pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { - use trans::machine::llsize_of_alloc; - use trans::type_of::sizing_type_of; - - let tcx = ccx.tcx(); - let simple = ty.is_scalar() || - ty.is_unique() || ty.is_region_ptr() || - type_is_newtype_immediate(ccx, ty) || - ty.is_simd(); - if simple && !type_is_fat_ptr(tcx, ty) { - return true; - } - if !type_is_sized(tcx, ty) { - return false; - } - match ty.sty { - ty::TyStruct(..) | ty::TyEnum(..) | ty::TyTuple(..) | ty::TyArray(_, _) | - ty::TyClosure(..) => { - let llty = sizing_type_of(ccx, ty); - llsize_of_alloc(ccx, llty) <= llsize_of_alloc(ccx, ccx.int_type()) - } - _ => type_is_zero_size(ccx, ty) - } -} - -/// Identify types which have size zero at runtime. -pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { - use trans::machine::llsize_of_alloc; - use trans::type_of::sizing_type_of; - let llty = sizing_type_of(ccx, ty); - llsize_of_alloc(ccx, llty) == 0 -} - -/// Identifies types which we declare to be equivalent to `void` in C for the purpose of function -/// return types. These are `()`, bot, uninhabited enums and all other zero-sized types. -pub fn return_type_is_void<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_nil() || ty.is_empty(ccx.tcx()) || type_is_zero_size(ccx, ty) -} - -/// Generates a unique symbol based off the name given. This is used to create -/// unique symbols for things like closures. -pub fn gensym_name(name: &str) -> ast::Name { - let num = token::gensym(name).0; - // use one colon which will get translated to a period by the mangler, and - // we're guaranteed that `num` is globally unique for this crate. - token::gensym(&format!("{}:{}", name, num)) -} - -/* -* A note on nomenclature of linking: "extern", "foreign", and "upcall". -* -* An "extern" is an LLVM symbol we wind up emitting an undefined external -* reference to. This means "we don't have the thing in this compilation unit, -* please make sure you link it in at runtime". This could be a reference to -* C code found in a C library, or rust code found in a rust crate. -* -* Most "externs" are implicitly declared (automatically) as a result of a -* user declaring an extern _module_ dependency; this causes the rust driver -* to locate an extern crate, scan its compilation metadata, and emit extern -* declarations for any symbols used by the declaring crate. -* -* A "foreign" is an extern that references C (or other non-rust ABI) code. -* There is no metadata to scan for extern references so in these cases either -* a header-digester like bindgen, or manual function prototypes, have to -* serve as declarators. So these are usually given explicitly as prototype -* declarations, in rust code, with ABI attributes on them noting which ABI to -* link via. -* -* An "upcall" is a foreign call generated by the compiler (not corresponding -* to any user-written call in the code) into the runtime library, to perform -* some helper task such as bringing a task to life, allocating memory, etc. -* -*/ - -use trans::Disr; - -#[derive(Copy, Clone)] -pub struct NodeIdAndSpan { - pub id: ast::NodeId, - pub span: Span, -} - -pub fn expr_info(expr: &hir::Expr) -> NodeIdAndSpan { - NodeIdAndSpan { id: expr.id, span: expr.span } -} - -/// The concrete version of ty::FieldDef. The name is the field index if -/// the field is numeric. -pub struct Field<'tcx>(pub ast::Name, pub Ty<'tcx>); - -/// The concrete version of ty::VariantDef -pub struct VariantInfo<'tcx> { - pub discr: Disr, - pub fields: Vec> -} - -impl<'tcx> VariantInfo<'tcx> { - pub fn from_ty(tcx: &ty::ctxt<'tcx>, - ty: Ty<'tcx>, - opt_def: Option) - -> Self - { - match ty.sty { - ty::TyStruct(adt, substs) | ty::TyEnum(adt, substs) => { - let variant = match opt_def { - None => adt.struct_variant(), - Some(def) => adt.variant_of_def(def) - }; - - VariantInfo { - discr: Disr::from(variant.disr_val), - fields: variant.fields.iter().map(|f| { - Field(f.name, monomorphize::field_ty(tcx, substs, f)) - }).collect() - } - } - - ty::TyTuple(ref v) => { - VariantInfo { - discr: Disr(0), - fields: v.iter().enumerate().map(|(i, &t)| { - Field(token::intern(&i.to_string()), t) - }).collect() - } - } - - _ => { - tcx.sess.bug(&format!( - "cannot get field types from the type {:?}", - ty)); - } - } - } - - /// Return the variant corresponding to a given node (e.g. expr) - pub fn of_node(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>, id: ast::NodeId) -> Self { - let node_def = tcx.def_map.borrow().get(&id).map(|v| v.full_def()); - Self::from_ty(tcx, ty, node_def) - } - - pub fn field_index(&self, name: ast::Name) -> usize { - self.fields.iter().position(|&Field(n,_)| n == name).unwrap_or_else(|| { - panic!("unknown field `{}`", name) - }) - } -} - -pub struct BuilderRef_res { - pub b: BuilderRef, -} - -impl Drop for BuilderRef_res { - fn drop(&mut self) { - unsafe { - llvm::LLVMDisposeBuilder(self.b); - } - } -} - -pub fn BuilderRef_res(b: BuilderRef) -> BuilderRef_res { - BuilderRef_res { - b: b - } -} - -pub type ExternMap = FnvHashMap; - -pub fn validate_substs(substs: &Substs) { - assert!(!substs.types.needs_infer()); -} - -// work around bizarre resolve errors -type RvalueDatum<'tcx> = datum::Datum<'tcx, datum::Rvalue>; -pub type LvalueDatum<'tcx> = datum::Datum<'tcx, datum::Lvalue>; - -#[derive(Clone, Debug)] -struct HintEntry<'tcx> { - // The datum for the dropflag-hint itself; note that many - // source-level Lvalues will be associated with the same - // dropflag-hint datum. - datum: cleanup::DropHintDatum<'tcx>, -} - -pub struct DropFlagHintsMap<'tcx> { - // Maps NodeId for expressions that read/write unfragmented state - // to that state's drop-flag "hint." (A stack-local hint - // indicates either that (1.) it is certain that no-drop is - // needed, or (2.) inline drop-flag must be consulted.) - node_map: NodeMap>, -} - -impl<'tcx> DropFlagHintsMap<'tcx> { - pub fn new() -> DropFlagHintsMap<'tcx> { DropFlagHintsMap { node_map: NodeMap() } } - pub fn has_hint(&self, id: ast::NodeId) -> bool { self.node_map.contains_key(&id) } - pub fn insert(&mut self, id: ast::NodeId, datum: cleanup::DropHintDatum<'tcx>) { - self.node_map.insert(id, HintEntry { datum: datum }); - } - pub fn hint_datum(&self, id: ast::NodeId) -> Option> { - self.node_map.get(&id).map(|t|t.datum) - } -} - -// Function context. Every LLVM function we create will have one of -// these. -pub struct FunctionContext<'a, 'tcx: 'a> { - // The MIR for this function. At present, this is optional because - // we only have MIR available for things that are local to the - // crate. - pub mir: Option<&'a Mir<'tcx>>, - - // The ValueRef returned from a call to llvm::LLVMAddFunction; the - // address of the first instruction in the sequence of - // instructions for this function that will go in the .text - // section of the executable we're generating. - pub llfn: ValueRef, - - // always an empty parameter-environment NOTE: @jroesch another use of ParamEnv - pub param_env: ty::ParameterEnvironment<'a, 'tcx>, - - // The environment argument in a closure. - pub llenv: Option, - - // A pointer to where to store the return value. If the return type is - // immediate, this points to an alloca in the function. Otherwise, it's a - // pointer to the hidden first parameter of the function. After function - // construction, this should always be Some. - pub llretslotptr: Cell>, - - // These pub elements: "hoisted basic blocks" containing - // administrative activities that have to happen in only one place in - // the function, due to LLVM's quirks. - // A marker for the place where we want to insert the function's static - // allocas, so that LLVM will coalesce them into a single alloca call. - pub alloca_insert_pt: Cell>, - pub llreturn: Cell>, - - // If the function has any nested return's, including something like: - // fn foo() -> Option { Some(Foo { x: return None }) }, then - // we use a separate alloca for each return - pub needs_ret_allocas: bool, - - // The a value alloca'd for calls to upcalls.rust_personality. Used when - // outputting the resume instruction. - pub personality: Cell>, - - // True if the caller expects this fn to use the out pointer to - // return. Either way, your code should write into the slot llretslotptr - // points to, but if this value is false, that slot will be a local alloca. - pub caller_expects_out_pointer: bool, - - // Maps the DefId's for local variables to the allocas created for - // them in llallocas. - pub lllocals: RefCell>>, - - // Same as above, but for closure upvars - pub llupvars: RefCell>, - - // Carries info about drop-flags for local bindings (longer term, - // paths) for the code being compiled. - pub lldropflag_hints: RefCell>, - - // The NodeId of the function, or -1 if it doesn't correspond to - // a user-defined function. - pub id: ast::NodeId, - - // If this function is being monomorphized, this contains the type - // substitutions used. - pub param_substs: &'tcx Substs<'tcx>, - - // The source span and nesting context where this function comes from, for - // error reporting and symbol generation. - pub span: Option, - - // The arena that blocks are allocated from. - pub block_arena: &'a TypedArena>, - - // This function's enclosing crate context. - pub ccx: &'a CrateContext<'a, 'tcx>, - - // Used and maintained by the debuginfo module. - pub debug_context: debuginfo::FunctionDebugContext, - - // Cleanup scopes. - pub scopes: RefCell>>, - - pub cfg: Option, -} - -impl<'a, 'tcx> FunctionContext<'a, 'tcx> { - pub fn mir(&self) -> &'a Mir<'tcx> { - self.mir.unwrap() - } - - pub fn arg_offset(&self) -> usize { - self.env_arg_pos() + if self.llenv.is_some() { 1 } else { 0 } - } - - pub fn env_arg_pos(&self) -> usize { - if self.caller_expects_out_pointer { - 1 - } else { - 0 - } - } - - pub fn cleanup(&self) { - unsafe { - llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt - .get() - .unwrap()); - } - } - - pub fn get_llreturn(&self) -> BasicBlockRef { - if self.llreturn.get().is_none() { - - self.llreturn.set(Some(unsafe { - llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), self.llfn, - "return\0".as_ptr() as *const _) - })) - } - - self.llreturn.get().unwrap() - } - - pub fn get_ret_slot(&self, bcx: Block<'a, 'tcx>, - output: ty::FnOutput<'tcx>, - name: &str) -> ValueRef { - if self.needs_ret_allocas { - base::alloca(bcx, match output { - ty::FnConverging(output_type) => type_of::type_of(bcx.ccx(), output_type), - ty::FnDiverging => Type::void(bcx.ccx()) - }, name) - } else { - self.llretslotptr.get().unwrap() - } - } - - pub fn new_block(&'a self, - is_lpad: bool, - name: &str, - opt_node_id: Option) - -> Block<'a, 'tcx> { - unsafe { - let name = CString::new(name).unwrap(); - let llbb = llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), - self.llfn, - name.as_ptr()); - BlockS::new(llbb, is_lpad, opt_node_id, self) - } - } - - pub fn new_id_block(&'a self, - name: &str, - node_id: ast::NodeId) - -> Block<'a, 'tcx> { - self.new_block(false, name, Some(node_id)) - } - - pub fn new_temp_block(&'a self, - name: &str) - -> Block<'a, 'tcx> { - self.new_block(false, name, None) - } - - pub fn join_blocks(&'a self, - id: ast::NodeId, - in_cxs: &[Block<'a, 'tcx>]) - -> Block<'a, 'tcx> { - let out = self.new_id_block("join", id); - let mut reachable = false; - for bcx in in_cxs { - if !bcx.unreachable.get() { - build::Br(*bcx, out.llbb, DebugLoc::None); - reachable = true; - } - } - if !reachable { - build::Unreachable(out); - } - return out; - } - - pub fn monomorphize(&self, value: &T) -> T - where T : TypeFoldable<'tcx> - { - monomorphize::apply_param_substs(self.ccx.tcx(), - self.param_substs, - value) - } - - /// This is the same as `common::type_needs_drop`, except that it - /// may use or update caches within this `FunctionContext`. - pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { - self.ccx.tcx().type_needs_drop_given_env(ty, &self.param_env) - } - - pub fn eh_personality(&self) -> ValueRef { - // The exception handling personality function. - // - // If our compilation unit has the `eh_personality` lang item somewhere - // within it, then we just need to translate that. Otherwise, we're - // building an rlib which will depend on some upstream implementation of - // this function, so we just codegen a generic reference to it. We don't - // specify any of the types for the function, we just make it a symbol - // that LLVM can later use. - // - // Note that MSVC is a little special here in that we don't use the - // `eh_personality` lang item at all. Currently LLVM has support for - // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the - // *name of the personality function* to decide what kind of unwind side - // tables/landing pads to emit. It looks like Dwarf is used by default, - // injecting a dependency on the `_Unwind_Resume` symbol for resuming - // an "exception", but for MSVC we want to force SEH. This means that we - // can't actually have the personality function be our standard - // `rust_eh_personality` function, but rather we wired it up to the - // CRT's custom personality function, which forces LLVM to consider - // landing pads as "landing pads for SEH". - let target = &self.ccx.sess().target.target; - match self.ccx.tcx().lang_items.eh_personality() { - Some(def_id) if !base::wants_msvc_seh(self.ccx.sess()) => { - callee::trans_fn_ref(self.ccx, def_id, ExprId(0), - self.param_substs).val - } - _ => { - let mut personality = self.ccx.eh_personality().borrow_mut(); - match *personality { - Some(llpersonality) => llpersonality, - None => { - let name = if !base::wants_msvc_seh(self.ccx.sess()) { - "rust_eh_personality" - } else if target.arch == "x86" { - "_except_handler3" - } else { - "__C_specific_handler" - }; - let fty = Type::variadic_func(&[], &Type::i32(self.ccx)); - let f = declare::declare_cfn(self.ccx, name, fty, - self.ccx.tcx().types.i32); - *personality = Some(f); - f - } - } - } - } - } - - // Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined, - // otherwise declares it as an external funtion. - pub fn eh_unwind_resume(&self) -> ValueRef { - use trans::attributes; - assert!(self.ccx.sess().target.target.options.custom_unwind_resume); - match self.ccx.tcx().lang_items.eh_unwind_resume() { - Some(def_id) => { - callee::trans_fn_ref(self.ccx, def_id, ExprId(0), - self.param_substs).val - } - None => { - let mut unwresume = self.ccx.eh_unwind_resume().borrow_mut(); - match *unwresume { - Some(llfn) => llfn, - None => { - let fty = Type::func(&[Type::i8p(self.ccx)], &Type::void(self.ccx)); - let llfn = declare::declare_fn(self.ccx, - "rust_eh_unwind_resume", - llvm::CCallConv, - fty, ty::FnDiverging); - attributes::unwind(llfn, true); - *unwresume = Some(llfn); - llfn - } - } - } - } - } -} - -// Basic block context. We create a block context for each basic block -// (single-entry, single-exit sequence of instructions) we generate from Rust -// code. Each basic block we generate is attached to a function, typically -// with many basic blocks per function. All the basic blocks attached to a -// function are organized as a directed graph. -pub struct BlockS<'blk, 'tcx: 'blk> { - // The BasicBlockRef returned from a call to - // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic - // block to the function pointed to by llfn. We insert - // instructions into that block by way of this block context. - // The block pointing to this one in the function's digraph. - pub llbb: BasicBlockRef, - pub terminated: Cell, - pub unreachable: Cell, - - // Is this block part of a landing pad? - pub is_lpad: bool, - - // AST node-id associated with this block, if any. Used for - // debugging purposes only. - pub opt_node_id: Option, - - // The function context for the function to which this block is - // attached. - pub fcx: &'blk FunctionContext<'blk, 'tcx>, -} - -pub type Block<'blk, 'tcx> = &'blk BlockS<'blk, 'tcx>; - -impl<'blk, 'tcx> BlockS<'blk, 'tcx> { - pub fn new(llbb: BasicBlockRef, - is_lpad: bool, - opt_node_id: Option, - fcx: &'blk FunctionContext<'blk, 'tcx>) - -> Block<'blk, 'tcx> { - fcx.block_arena.alloc(BlockS { - llbb: llbb, - terminated: Cell::new(false), - unreachable: Cell::new(false), - is_lpad: is_lpad, - opt_node_id: opt_node_id, - fcx: fcx - }) - } - - pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> { - self.fcx.ccx - } - pub fn tcx(&self) -> &'blk ty::ctxt<'tcx> { - self.fcx.ccx.tcx() - } - pub fn sess(&self) -> &'blk Session { self.fcx.ccx.sess() } - - pub fn mir(&self) -> &'blk Mir<'tcx> { - self.fcx.mir() - } - - pub fn name(&self, name: ast::Name) -> String { - name.to_string() - } - - pub fn node_id_to_string(&self, id: ast::NodeId) -> String { - self.tcx().map.node_to_string(id).to_string() - } - - pub fn def(&self, nid: ast::NodeId) -> def::Def { - match self.tcx().def_map.borrow().get(&nid) { - Some(v) => v.full_def(), - None => { - self.tcx().sess.bug(&format!( - "no def associated with node id {}", nid)); - } - } - } - - pub fn val_to_string(&self, val: ValueRef) -> String { - self.ccx().tn().val_to_string(val) - } - - pub fn llty_str(&self, ty: Type) -> String { - self.ccx().tn().type_to_string(ty) - } - - pub fn to_str(&self) -> String { - format!("[block {:p}]", self) - } - - pub fn monomorphize(&self, value: &T) -> T - where T : TypeFoldable<'tcx> - { - monomorphize::apply_param_substs(self.tcx(), - self.fcx.param_substs, - value) - } -} - -pub struct Result<'blk, 'tcx: 'blk> { - pub bcx: Block<'blk, 'tcx>, - pub val: ValueRef -} - -impl<'b, 'tcx> Result<'b, 'tcx> { - pub fn new(bcx: Block<'b, 'tcx>, val: ValueRef) -> Result<'b, 'tcx> { - Result { - bcx: bcx, - val: val, - } - } -} - -pub fn val_ty(v: ValueRef) -> Type { - unsafe { - Type::from_ref(llvm::LLVMTypeOf(v)) - } -} - -// LLVM constant constructors. -pub fn C_null(t: Type) -> ValueRef { - unsafe { - llvm::LLVMConstNull(t.to_ref()) - } -} - -pub fn C_undef(t: Type) -> ValueRef { - unsafe { - llvm::LLVMGetUndef(t.to_ref()) - } -} - -pub fn C_integral(t: Type, u: u64, sign_extend: bool) -> ValueRef { - unsafe { - llvm::LLVMConstInt(t.to_ref(), u, sign_extend as Bool) - } -} - -pub fn C_floating(s: &str, t: Type) -> ValueRef { - unsafe { - let s = CString::new(s).unwrap(); - llvm::LLVMConstRealOfString(t.to_ref(), s.as_ptr()) - } -} - -pub fn C_floating_f64(f: f64, t: Type) -> ValueRef { - unsafe { - llvm::LLVMConstReal(t.to_ref(), f) - } -} - -pub fn C_nil(ccx: &CrateContext) -> ValueRef { - C_struct(ccx, &[], false) -} - -pub fn C_bool(ccx: &CrateContext, val: bool) -> ValueRef { - C_integral(Type::i1(ccx), val as u64, false) -} - -pub fn C_i32(ccx: &CrateContext, i: i32) -> ValueRef { - C_integral(Type::i32(ccx), i as u64, true) -} - -pub fn C_u32(ccx: &CrateContext, i: u32) -> ValueRef { - C_integral(Type::i32(ccx), i as u64, false) -} - -pub fn C_u64(ccx: &CrateContext, i: u64) -> ValueRef { - C_integral(Type::i64(ccx), i, false) -} - -pub fn C_int(ccx: &CrateContext, i: I) -> ValueRef { - let v = i.as_i64(); - - let bit_size = machine::llbitsize_of_real(ccx, ccx.int_type()); - - if bit_size < 64 { - // make sure it doesn't overflow - assert!(v < (1<<(bit_size-1)) && v >= -(1<<(bit_size-1))); - } - - C_integral(ccx.int_type(), v as u64, true) -} - -pub fn C_uint(ccx: &CrateContext, i: I) -> ValueRef { - let v = i.as_u64(); - - let bit_size = machine::llbitsize_of_real(ccx, ccx.int_type()); - - if bit_size < 64 { - // make sure it doesn't overflow - assert!(v < (1< i64; } -pub trait AsU64 { fn as_u64(self) -> u64; } - -// FIXME: remove the intptr conversions, because they -// are host-architecture-dependent -impl AsI64 for i64 { fn as_i64(self) -> i64 { self as i64 }} -impl AsI64 for i32 { fn as_i64(self) -> i64 { self as i64 }} -impl AsI64 for isize { fn as_i64(self) -> i64 { self as i64 }} - -impl AsU64 for u64 { fn as_u64(self) -> u64 { self as u64 }} -impl AsU64 for u32 { fn as_u64(self) -> u64 { self as u64 }} -impl AsU64 for usize { fn as_u64(self) -> u64 { self as u64 }} - -pub fn C_u8(ccx: &CrateContext, i: u8) -> ValueRef { - C_integral(Type::i8(ccx), i as u64, false) -} - - -// This is a 'c-like' raw string, which differs from -// our boxed-and-length-annotated strings. -pub fn C_cstr(cx: &CrateContext, s: InternedString, null_terminated: bool) -> ValueRef { - unsafe { - match cx.const_cstr_cache().borrow().get(&s) { - Some(&llval) => return llval, - None => () - } - - let sc = llvm::LLVMConstStringInContext(cx.llcx(), - s.as_ptr() as *const c_char, - s.len() as c_uint, - !null_terminated as Bool); - - let gsym = token::gensym("str"); - let sym = format!("str{}", gsym.0); - let g = declare::define_global(cx, &sym[..], val_ty(sc)).unwrap_or_else(||{ - cx.sess().bug(&format!("symbol `{}` is already defined", sym)); - }); - llvm::LLVMSetInitializer(g, sc); - llvm::LLVMSetGlobalConstant(g, True); - llvm::SetLinkage(g, llvm::InternalLinkage); - - cx.const_cstr_cache().borrow_mut().insert(s, g); - g - } -} - -// NB: Do not use `do_spill_noroot` to make this into a constant string, or -// you will be kicked off fast isel. See issue #4352 for an example of this. -pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef { - let len = s.len(); - let cs = consts::ptrcast(C_cstr(cx, s, false), Type::i8p(cx)); - C_named_struct(cx.tn().find_type("str_slice").unwrap(), &[cs, C_uint(cx, len)]) -} - -pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef { - C_struct_in_context(cx.llcx(), elts, packed) -} - -pub fn C_struct_in_context(llcx: ContextRef, elts: &[ValueRef], packed: bool) -> ValueRef { - unsafe { - llvm::LLVMConstStructInContext(llcx, - elts.as_ptr(), elts.len() as c_uint, - packed as Bool) - } -} - -pub fn C_named_struct(t: Type, elts: &[ValueRef]) -> ValueRef { - unsafe { - llvm::LLVMConstNamedStruct(t.to_ref(), elts.as_ptr(), elts.len() as c_uint) - } -} - -pub fn C_array(ty: Type, elts: &[ValueRef]) -> ValueRef { - unsafe { - return llvm::LLVMConstArray(ty.to_ref(), elts.as_ptr(), elts.len() as c_uint); - } -} - -pub fn C_vector(elts: &[ValueRef]) -> ValueRef { - unsafe { - return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); - } -} - -pub fn C_bytes(cx: &CrateContext, bytes: &[u8]) -> ValueRef { - C_bytes_in_context(cx.llcx(), bytes) -} - -pub fn C_bytes_in_context(llcx: ContextRef, bytes: &[u8]) -> ValueRef { - unsafe { - let ptr = bytes.as_ptr() as *const c_char; - return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); - } -} - -pub fn const_get_elt(cx: &CrateContext, v: ValueRef, us: &[c_uint]) - -> ValueRef { - unsafe { - let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); - - debug!("const_get_elt(v={}, us={:?}, r={})", - cx.tn().val_to_string(v), us, cx.tn().val_to_string(r)); - - return r; - } -} - -pub fn const_to_int(v: ValueRef) -> i64 { - unsafe { - llvm::LLVMConstIntGetSExtValue(v) - } -} - -pub fn const_to_uint(v: ValueRef) -> u64 { - unsafe { - llvm::LLVMConstIntGetZExtValue(v) - } -} - -fn is_const_integral(v: ValueRef) -> bool { - unsafe { - !llvm::LLVMIsAConstantInt(v).is_null() - } -} - -pub fn const_to_opt_int(v: ValueRef) -> Option { - unsafe { - if is_const_integral(v) { - Some(llvm::LLVMConstIntGetSExtValue(v)) - } else { - None - } - } -} - -pub fn const_to_opt_uint(v: ValueRef) -> Option { - unsafe { - if is_const_integral(v) { - Some(llvm::LLVMConstIntGetZExtValue(v)) - } else { - None - } - } -} - -pub fn is_undef(val: ValueRef) -> bool { - unsafe { - llvm::LLVMIsUndef(val) != False - } -} - -#[allow(dead_code)] // potentially useful -pub fn is_null(val: ValueRef) -> bool { - unsafe { - llvm::LLVMIsNull(val) != False - } -} - -pub fn monomorphize_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> { - bcx.fcx.monomorphize(&t) -} - -pub fn node_id_type<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, id: ast::NodeId) -> Ty<'tcx> { - let tcx = bcx.tcx(); - let t = tcx.node_id_to_type(id); - monomorphize_type(bcx, t) -} - -pub fn expr_ty<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &hir::Expr) -> Ty<'tcx> { - node_id_type(bcx, ex.id) -} - -pub fn expr_ty_adjusted<'blk, 'tcx>(bcx: &BlockS<'blk, 'tcx>, ex: &hir::Expr) -> Ty<'tcx> { - monomorphize_type(bcx, bcx.tcx().expr_ty_adjusted(ex)) -} - -/// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we -/// do not (necessarily) resolve all nested obligations on the impl. Note that type check should -/// guarantee to us that all nested obligations *could be* resolved if we wanted to. -pub fn fulfill_obligation<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - span: Span, - trait_ref: ty::PolyTraitRef<'tcx>) - -> traits::Vtable<'tcx, ()> -{ - let tcx = ccx.tcx(); - - // Remove any references to regions; this helps improve caching. - let trait_ref = tcx.erase_regions(&trait_ref); - - // First check the cache. - match ccx.trait_cache().borrow().get(&trait_ref) { - Some(vtable) => { - info!("Cache hit: {:?}", trait_ref); - return (*vtable).clone(); - } - None => { } - } - - debug!("trans fulfill_obligation: trait_ref={:?} def_id={:?}", - trait_ref, trait_ref.def_id()); - - - // Do the initial selection for the obligation. This yields the - // shallow result we are looking for -- that is, what specific impl. - let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables); - let mut selcx = traits::SelectionContext::new(&infcx); - - let obligation = - traits::Obligation::new(traits::ObligationCause::misc(span, ast::DUMMY_NODE_ID), - trait_ref.to_poly_trait_predicate()); - let selection = match selcx.select(&obligation) { - Ok(Some(selection)) => selection, - Ok(None) => { - // Ambiguity can happen when monomorphizing during trans - // expands to some humongo type that never occurred - // statically -- this humongo type can then overflow, - // leading to an ambiguous result. So report this as an - // overflow bug, since I believe this is the only case - // where ambiguity can result. - debug!("Encountered ambiguity selecting `{:?}` during trans, \ - presuming due to overflow", - trait_ref); - ccx.sess().span_fatal( - span, - "reached the recursion limit during monomorphization (selection ambiguity)"); - } - Err(e) => { - tcx.sess.span_bug( - span, - &format!("Encountered error `{:?}` selecting `{:?}` during trans", - e, - trait_ref)) - } - }; - - // Currently, we use a fulfillment context to completely resolve - // all nested obligations. This is because they can inform the - // inference of the impl's type parameters. - let mut fulfill_cx = infcx.fulfillment_cx.borrow_mut(); - let vtable = selection.map(|predicate| { - fulfill_cx.register_predicate_obligation(&infcx, predicate); - }); - let vtable = infer::drain_fulfillment_cx_or_panic( - span, &infcx, &mut fulfill_cx, &vtable - ); - - info!("Cache miss: {:?} => {:?}", trait_ref, vtable); - - ccx.trait_cache().borrow_mut().insert(trait_ref, vtable.clone()); - - vtable -} - -/// Normalizes the predicates and checks whether they hold. If this -/// returns false, then either normalize encountered an error or one -/// of the predicates did not hold. Used when creating vtables to -/// check for unsatisfiable methods. -pub fn normalize_and_test_predicates<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - predicates: Vec>) - -> bool -{ - debug!("normalize_and_test_predicates(predicates={:?})", - predicates); - - let tcx = ccx.tcx(); - let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables); - let mut selcx = traits::SelectionContext::new(&infcx); - let mut fulfill_cx = infcx.fulfillment_cx.borrow_mut(); - let cause = traits::ObligationCause::dummy(); - let traits::Normalized { value: predicates, obligations } = - traits::normalize(&mut selcx, cause.clone(), &predicates); - for obligation in obligations { - fulfill_cx.register_predicate_obligation(&infcx, obligation); - } - for predicate in predicates { - let obligation = traits::Obligation::new(cause.clone(), predicate); - fulfill_cx.register_predicate_obligation(&infcx, obligation); - } - - infer::drain_fulfillment_cx(&infcx, &mut fulfill_cx, &()).is_ok() -} - -// Key used to lookup values supplied for type parameters in an expr. -#[derive(Copy, Clone, PartialEq, Debug)] -pub enum ExprOrMethodCall { - // Type parameters for a path like `None::` - ExprId(ast::NodeId), - - // Type parameters for a method call like `a.foo::()` - MethodCallKey(ty::MethodCall) -} - -pub fn node_id_substs<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - node: ExprOrMethodCall, - param_substs: &subst::Substs<'tcx>) - -> subst::Substs<'tcx> { - let tcx = ccx.tcx(); - - let substs = match node { - ExprId(id) => { - tcx.node_id_item_substs(id).substs - } - MethodCallKey(method_call) => { - tcx.tables.borrow().method_map[&method_call].substs.clone() - } - }; - - if substs.types.needs_infer() { - tcx.sess.bug(&format!("type parameters for node {:?} include inference types: {:?}", - node, substs)); - } - - monomorphize::apply_param_substs(tcx, - param_substs, - &substs.erase_regions()) -} - -pub fn langcall(bcx: Block, - span: Option, - msg: &str, - li: LangItem) - -> DefId { - match bcx.tcx().lang_items.require(li) { - Ok(id) => id, - Err(s) => { - let msg = format!("{} {}", msg, s); - match span { - Some(span) => bcx.tcx().sess.span_fatal(span, &msg[..]), - None => bcx.tcx().sess.fatal(&msg[..]), - } - } - } -} - -/// Return the VariantDef corresponding to an inlined variant node -pub fn inlined_variant_def<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - inlined_vid: ast::NodeId) - -> ty::VariantDef<'tcx> -{ - - let ctor_ty = ccx.tcx().node_id_to_type(inlined_vid); - debug!("inlined_variant_def: ctor_ty={:?} inlined_vid={:?}", ctor_ty, - inlined_vid); - let adt_def = match ctor_ty.sty { - ty::TyBareFn(_, &ty::BareFnTy { sig: ty::Binder(ty::FnSig { - output: ty::FnConverging(ty), .. - }), ..}) => ty, - _ => ctor_ty - }.ty_adt_def().unwrap(); - let inlined_vid_def_id = ccx.tcx().map.local_def_id(inlined_vid); - adt_def.variants.iter().find(|v| { - inlined_vid_def_id == v.did || - ccx.external().borrow().get(&v.did) == Some(&Some(inlined_vid)) - }).unwrap_or_else(|| { - ccx.sess().bug(&format!("no variant for {:?}::{}", adt_def, inlined_vid)) - }) -} - -// To avoid UB from LLVM, these two functions mask RHS with an -// appropriate mask unconditionally (i.e. the fallback behavior for -// all shifts). For 32- and 64-bit types, this matches the semantics -// of Java. (See related discussion on #1877 and #10183.) - -pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) -> ValueRef { - let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShl, lhs, rhs); - // #1877, #10183: Ensure that input is always valid - let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc); - build::Shl(bcx, lhs, rhs, binop_debug_loc) -} - -pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - lhs_t: Ty<'tcx>, - lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) -> ValueRef { - let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs); - // #1877, #10183: Ensure that input is always valid - let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc); - let is_signed = lhs_t.is_signed(); - if is_signed { - build::AShr(bcx, lhs, rhs, binop_debug_loc) - } else { - build::LShr(bcx, lhs, rhs, binop_debug_loc) - } -} - -fn shift_mask_rhs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - rhs: ValueRef, - debug_loc: DebugLoc) -> ValueRef { - let rhs_llty = val_ty(rhs); - build::And(bcx, rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false), debug_loc) -} - -pub fn shift_mask_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - llty: Type, - mask_llty: Type, - invert: bool) -> ValueRef { - let kind = llty.kind(); - match kind { - TypeKind::Integer => { - // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. - let val = llty.int_width() - 1; - if invert { - C_integral(mask_llty, !val, true) - } else { - C_integral(mask_llty, val, false) - } - }, - TypeKind::Vector => { - let mask = shift_mask_val(bcx, llty.element_type(), mask_llty.element_type(), invert); - build::VectorSplat(bcx, mask_llty.vector_length(), mask) - }, - _ => panic!("shift_mask_val: expected Integer or Vector, found {:?}", kind), - } -} - -pub fn get_static_val<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - did: DefId, - ty: Ty<'tcx>) - -> ValueRef { - if let Some(node_id) = ccx.tcx().map.as_local_node_id(did) { - base::get_item_val(ccx, node_id) - } else { - base::get_extern_const(ccx, did, ty) - } -} diff --git a/src/librustc_trans/trans/consts.rs b/src/librustc_trans/trans/consts.rs deleted file mode 100644 index 0fc879707331d..0000000000000 --- a/src/librustc_trans/trans/consts.rs +++ /dev/null @@ -1,1081 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - -use back::abi; -use llvm; -use llvm::{ConstFCmp, ConstICmp, SetLinkage, SetUnnamedAddr}; -use llvm::{InternalLinkage, ValueRef, Bool, True}; -use middle::{check_const, def}; -use middle::cstore::LOCAL_CRATE; -use middle::const_eval::{self, ConstVal, ConstEvalErr}; -use middle::const_eval::{const_int_checked_neg, const_uint_checked_neg}; -use middle::const_eval::{const_int_checked_add, const_uint_checked_add}; -use middle::const_eval::{const_int_checked_sub, const_uint_checked_sub}; -use middle::const_eval::{const_int_checked_mul, const_uint_checked_mul}; -use middle::const_eval::{const_int_checked_div, const_uint_checked_div}; -use middle::const_eval::{const_int_checked_rem, const_uint_checked_rem}; -use middle::const_eval::{const_int_checked_shl, const_uint_checked_shl}; -use middle::const_eval::{const_int_checked_shr, const_uint_checked_shr}; -use middle::const_eval::EvalHint::ExprTypeChecked; -use middle::const_eval::eval_const_expr_partial; -use middle::def_id::DefId; -use trans::{adt, closure, debuginfo, expr, inline, machine}; -use trans::base::{self, push_ctxt}; -use trans::common::{self, type_is_sized, ExprOrMethodCall, node_id_substs, C_nil, const_get_elt}; -use trans::common::{CrateContext, C_integral, C_floating, C_bool, C_str_slice, C_bytes, val_ty}; -use trans::common::{C_struct, C_undef, const_to_opt_int, const_to_opt_uint, VariantInfo, C_uint}; -use trans::common::{type_is_fat_ptr, Field, C_vector, C_array, C_null, ExprId, MethodCallKey}; -use trans::declare; -use trans::monomorphize; -use trans::type_::Type; -use trans::type_of; -use trans::Disr; -use middle::subst::Substs; -use middle::ty::adjustment::{AdjustDerefRef, AdjustReifyFnPointer}; -use middle::ty::adjustment::AdjustUnsafeFnPointer; -use middle::ty::{self, Ty}; -use middle::ty::cast::{CastTy,IntTy}; -use util::nodemap::NodeMap; - -use rustc_front::hir; - -use std::ffi::{CStr, CString}; -use std::borrow::Cow; -use libc::c_uint; -use syntax::ast; -use syntax::attr; -use syntax::parse::token; -use syntax::ptr::P; - -pub type FnArgMap<'a> = Option<&'a NodeMap>; - -pub fn const_lit(cx: &CrateContext, e: &hir::Expr, lit: &ast::Lit) - -> ValueRef { - let _icx = push_ctxt("trans_lit"); - debug!("const_lit: {:?}", lit); - match lit.node { - ast::LitByte(b) => C_integral(Type::uint_from_ty(cx, ast::TyU8), b as u64, false), - ast::LitChar(i) => C_integral(Type::char(cx), i as u64, false), - ast::LitInt(i, ast::SignedIntLit(t, _)) => { - C_integral(Type::int_from_ty(cx, t), i, true) - } - ast::LitInt(u, ast::UnsignedIntLit(t)) => { - C_integral(Type::uint_from_ty(cx, t), u, false) - } - ast::LitInt(i, ast::UnsuffixedIntLit(_)) => { - let lit_int_ty = cx.tcx().node_id_to_type(e.id); - match lit_int_ty.sty { - ty::TyInt(t) => { - C_integral(Type::int_from_ty(cx, t), i as u64, true) - } - ty::TyUint(t) => { - C_integral(Type::uint_from_ty(cx, t), i as u64, false) - } - _ => cx.sess().span_bug(lit.span, - &format!("integer literal has type {:?} (expected int \ - or usize)", - lit_int_ty)) - } - } - ast::LitFloat(ref fs, t) => { - C_floating(&fs, Type::float_from_ty(cx, t)) - } - ast::LitFloatUnsuffixed(ref fs) => { - let lit_float_ty = cx.tcx().node_id_to_type(e.id); - match lit_float_ty.sty { - ty::TyFloat(t) => { - C_floating(&fs, Type::float_from_ty(cx, t)) - } - _ => { - cx.sess().span_bug(lit.span, - "floating point literal doesn't have the right type"); - } - } - } - ast::LitBool(b) => C_bool(cx, b), - ast::LitStr(ref s, _) => C_str_slice(cx, (*s).clone()), - ast::LitByteStr(ref data) => { - addr_of(cx, C_bytes(cx, &data[..]), 1, "byte_str") - } - } -} - -pub fn ptrcast(val: ValueRef, ty: Type) -> ValueRef { - unsafe { - llvm::LLVMConstPointerCast(val, ty.to_ref()) - } -} - -fn addr_of_mut(ccx: &CrateContext, - cv: ValueRef, - align: machine::llalign, - kind: &str) - -> ValueRef { - unsafe { - // FIXME: this totally needs a better name generation scheme, perhaps a simple global - // counter? Also most other uses of gensym in trans. - let gsym = token::gensym("_"); - let name = format!("{}{}", kind, gsym.0); - let gv = declare::define_global(ccx, &name[..], val_ty(cv)).unwrap_or_else(||{ - ccx.sess().bug(&format!("symbol `{}` is already defined", name)); - }); - llvm::LLVMSetInitializer(gv, cv); - llvm::LLVMSetAlignment(gv, align); - SetLinkage(gv, InternalLinkage); - SetUnnamedAddr(gv, true); - gv - } -} - -pub fn addr_of(ccx: &CrateContext, - cv: ValueRef, - align: machine::llalign, - kind: &str) - -> ValueRef { - match ccx.const_globals().borrow().get(&cv) { - Some(&gv) => { - unsafe { - // Upgrade the alignment in cases where the same constant is used with different - // alignment requirements - if align > llvm::LLVMGetAlignment(gv) { - llvm::LLVMSetAlignment(gv, align); - } - } - return gv; - } - None => {} - } - let gv = addr_of_mut(ccx, cv, align, kind); - unsafe { - llvm::LLVMSetGlobalConstant(gv, True); - } - ccx.const_globals().borrow_mut().insert(cv, gv); - gv -} - -fn const_deref_ptr(cx: &CrateContext, v: ValueRef) -> ValueRef { - let v = match cx.const_unsized().borrow().get(&v) { - Some(&v) => v, - None => v - }; - unsafe { - llvm::LLVMGetInitializer(v) - } -} - -fn const_deref<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - v: ValueRef, - ty: Ty<'tcx>) - -> (ValueRef, Ty<'tcx>) { - match ty.builtin_deref(true, ty::NoPreference) { - Some(mt) => { - if type_is_sized(cx.tcx(), mt.ty) { - (const_deref_ptr(cx, v), mt.ty) - } else { - // Derefing a fat pointer does not change the representation, - // just the type to the unsized contents. - (v, mt.ty) - } - } - None => { - cx.sess().bug(&format!("unexpected dereferenceable type {:?}", - ty)) - } - } -} - -fn const_fn_call<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - node: ExprOrMethodCall, - def_id: DefId, - arg_vals: &[ValueRef], - param_substs: &'tcx Substs<'tcx>, - trueconst: TrueConst) -> Result { - let fn_like = const_eval::lookup_const_fn_by_id(ccx.tcx(), def_id); - let fn_like = fn_like.expect("lookup_const_fn_by_id failed in const_fn_call"); - - let args = &fn_like.decl().inputs; - assert_eq!(args.len(), arg_vals.len()); - - let arg_ids = args.iter().map(|arg| arg.pat.id); - let fn_args = arg_ids.zip(arg_vals.iter().cloned()).collect(); - - let substs = ccx.tcx().mk_substs(node_id_substs(ccx, node, param_substs)); - match fn_like.body().expr { - Some(ref expr) => { - const_expr(ccx, &**expr, substs, Some(&fn_args), trueconst).map(|(res, _)| res) - }, - None => Ok(C_nil(ccx)), - } -} - -pub fn get_const_expr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - def_id: DefId, - ref_expr: &hir::Expr, - param_substs: &'tcx Substs<'tcx>) - -> &'tcx hir::Expr { - let def_id = inline::maybe_instantiate_inline(ccx, def_id); - - if def_id.krate != LOCAL_CRATE { - ccx.sess().span_bug(ref_expr.span, - "cross crate constant could not be inlined"); - } - - match const_eval::lookup_const_by_id(ccx.tcx(), def_id, Some(ref_expr.id), Some(param_substs)) { - Some(ref expr) => expr, - None => { - ccx.sess().span_bug(ref_expr.span, "constant item not found") - } - } -} - -pub enum ConstEvalFailure { - /// in case the const evaluator failed on something that panic at runtime - /// as defined in RFC 1229 - Runtime(ConstEvalErr), - // in case we found a true constant - Compiletime(ConstEvalErr), -} - -impl ConstEvalFailure { - fn into_inner(self) -> ConstEvalErr { - match self { - Runtime(e) => e, - Compiletime(e) => e, - } - } - pub fn description(&self) -> Cow { - match self { - &Runtime(ref e) => e.description(), - &Compiletime(ref e) => e.description(), - } - } -} - -#[derive(Copy, Clone)] -pub enum TrueConst { - Yes, No -} - -use self::ConstEvalFailure::*; - -fn get_const_val<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - def_id: DefId, - ref_expr: &hir::Expr, - param_substs: &'tcx Substs<'tcx>) - -> Result { - let expr = get_const_expr(ccx, def_id, ref_expr, param_substs); - let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty()); - match get_const_expr_as_global(ccx, expr, check_const::ConstQualif::empty(), - empty_substs, TrueConst::Yes) { - Err(Runtime(err)) => { - ccx.tcx().sess.span_err(expr.span, &err.description()); - Err(Compiletime(err)) - }, - other => other, - } -} - -pub fn get_const_expr_as_global<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - expr: &hir::Expr, - qualif: check_const::ConstQualif, - param_substs: &'tcx Substs<'tcx>, - trueconst: TrueConst) - -> Result { - debug!("get_const_expr_as_global: {:?}", expr.id); - // Special-case constants to cache a common global for all uses. - if let hir::ExprPath(..) = expr.node { - // `def` must be its own statement and cannot be in the `match` - // otherwise the `def_map` will be borrowed for the entire match instead - // of just to get the `def` value - let def = ccx.tcx().def_map.borrow().get(&expr.id).unwrap().full_def(); - match def { - def::DefConst(def_id) | def::DefAssociatedConst(def_id) => { - if !ccx.tcx().tables.borrow().adjustments.contains_key(&expr.id) { - debug!("get_const_expr_as_global ({:?}): found const {:?}", - expr.id, def_id); - return get_const_val(ccx, def_id, expr, param_substs); - } - }, - _ => {}, - } - } - - let key = (expr.id, param_substs); - if let Some(&val) = ccx.const_values().borrow().get(&key) { - return Ok(val); - } - let ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, - &ccx.tcx().expr_ty(expr)); - let val = if qualif.intersects(check_const::ConstQualif::NON_STATIC_BORROWS) { - // Avoid autorefs as they would create global instead of stack - // references, even when only the latter are correct. - try!(const_expr_unadjusted(ccx, expr, ty, param_substs, None, trueconst)) - } else { - try!(const_expr(ccx, expr, param_substs, None, trueconst)).0 - }; - - // boolean SSA values are i1, but they have to be stored in i8 slots, - // otherwise some LLVM optimization passes don't work as expected - let val = unsafe { - if llvm::LLVMTypeOf(val) == Type::i1(ccx).to_ref() { - llvm::LLVMConstZExt(val, Type::i8(ccx).to_ref()) - } else { - val - } - }; - - let lvalue = addr_of(ccx, val, type_of::align_of(ccx, ty), "const"); - ccx.const_values().borrow_mut().insert(key, lvalue); - Ok(lvalue) -} - -pub fn const_expr<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - e: &hir::Expr, - param_substs: &'tcx Substs<'tcx>, - fn_args: FnArgMap, - trueconst: TrueConst) - -> Result<(ValueRef, Ty<'tcx>), ConstEvalFailure> { - let ety = monomorphize::apply_param_substs(cx.tcx(), param_substs, - &cx.tcx().expr_ty(e)); - let llconst = try!(const_expr_unadjusted(cx, e, ety, param_substs, fn_args, trueconst)); - let mut llconst = llconst; - let mut ety_adjusted = monomorphize::apply_param_substs(cx.tcx(), param_substs, - &cx.tcx().expr_ty_adjusted(e)); - let opt_adj = cx.tcx().tables.borrow().adjustments.get(&e.id).cloned(); - match opt_adj { - Some(AdjustReifyFnPointer) => { - // FIXME(#19925) once fn item types are - // zero-sized, we'll need to do something here - } - Some(AdjustUnsafeFnPointer) => { - // purely a type-level thing - } - Some(AdjustDerefRef(adj)) => { - let mut ty = ety; - // Save the last autoderef in case we can avoid it. - if adj.autoderefs > 0 { - for _ in 0..adj.autoderefs-1 { - let (dv, dt) = const_deref(cx, llconst, ty); - llconst = dv; - ty = dt; - } - } - - if adj.autoref.is_some() { - if adj.autoderefs == 0 { - // Don't copy data to do a deref+ref - // (i.e., skip the last auto-deref). - llconst = addr_of(cx, llconst, type_of::align_of(cx, ty), "autoref"); - ty = cx.tcx().mk_imm_ref(cx.tcx().mk_region(ty::ReStatic), ty); - } - } else { - let (dv, dt) = const_deref(cx, llconst, ty); - llconst = dv; - - // If we derefed a fat pointer then we will have an - // open type here. So we need to update the type with - // the one returned from const_deref. - ety_adjusted = dt; - } - - if let Some(target) = adj.unsize { - let target = monomorphize::apply_param_substs(cx.tcx(), - param_substs, - &target); - - let pointee_ty = ty.builtin_deref(true, ty::NoPreference) - .expect("consts: unsizing got non-pointer type").ty; - let (base, old_info) = if !type_is_sized(cx.tcx(), pointee_ty) { - // Normally, the source is a thin pointer and we are - // adding extra info to make a fat pointer. The exception - // is when we are upcasting an existing object fat pointer - // to use a different vtable. In that case, we want to - // load out the original data pointer so we can repackage - // it. - (const_get_elt(cx, llconst, &[abi::FAT_PTR_ADDR as u32]), - Some(const_get_elt(cx, llconst, &[abi::FAT_PTR_EXTRA as u32]))) - } else { - (llconst, None) - }; - - let unsized_ty = target.builtin_deref(true, ty::NoPreference) - .expect("consts: unsizing got non-pointer target type").ty; - let ptr_ty = type_of::in_memory_type_of(cx, unsized_ty).ptr_to(); - let base = ptrcast(base, ptr_ty); - let info = base::unsized_info(cx, pointee_ty, unsized_ty, - old_info, param_substs); - - if old_info.is_none() { - let prev_const = cx.const_unsized().borrow_mut() - .insert(base, llconst); - assert!(prev_const.is_none() || prev_const == Some(llconst)); - } - assert_eq!(abi::FAT_PTR_ADDR, 0); - assert_eq!(abi::FAT_PTR_EXTRA, 1); - llconst = C_struct(cx, &[base, info], false); - } - } - None => {} - }; - - let llty = type_of::sizing_type_of(cx, ety_adjusted); - let csize = machine::llsize_of_alloc(cx, val_ty(llconst)); - let tsize = machine::llsize_of_alloc(cx, llty); - if csize != tsize { - cx.sess().abort_if_errors(); - unsafe { - // FIXME these values could use some context - llvm::LLVMDumpValue(llconst); - llvm::LLVMDumpValue(C_undef(llty)); - } - cx.sess().bug(&format!("const {:?} of type {:?} has size {} instead of {}", - e, ety_adjusted, - csize, tsize)); - } - Ok((llconst, ety_adjusted)) -} - -fn check_unary_expr_validity(cx: &CrateContext, e: &hir::Expr, t: Ty, - te: ValueRef, trueconst: TrueConst) -> Result<(), ConstEvalFailure> { - // The only kind of unary expression that we check for validity - // here is `-expr`, to check if it "overflows" (e.g. `-i32::MIN`). - if let hir::ExprUnary(hir::UnNeg, ref inner_e) = e.node { - - // An unfortunate special case: we parse e.g. -128 as a - // negation of the literal 128, which means if we're expecting - // a i8 (or if it was already suffixed, e.g. `-128_i8`), then - // 128 will have already overflowed to -128, and so then the - // constant evaluator thinks we're trying to negate -128. - // - // Catch this up front by looking for ExprLit directly, - // and just accepting it. - if let hir::ExprLit(_) = inner_e.node { return Ok(()); } - - let result = match t.sty { - ty::TyInt(int_type) => { - let input = match const_to_opt_int(te) { - Some(v) => v, - None => return Ok(()), - }; - const_int_checked_neg( - input, e, Some(const_eval::IntTy::from(cx.tcx(), int_type))) - } - ty::TyUint(uint_type) => { - let input = match const_to_opt_uint(te) { - Some(v) => v, - None => return Ok(()), - }; - const_uint_checked_neg( - input, e, Some(const_eval::UintTy::from(cx.tcx(), uint_type))) - } - _ => return Ok(()), - }; - const_err(cx, e, result, trueconst) - } else { - Ok(()) - } -} - -fn const_err(cx: &CrateContext, - e: &hir::Expr, - result: Result, - trueconst: TrueConst) - -> Result<(), ConstEvalFailure> { - match (result, trueconst) { - (Ok(_), _) => { - // We do not actually care about a successful result. - Ok(()) - }, - (Err(err), TrueConst::Yes) => { - cx.tcx().sess.span_err(e.span, &err.description()); - Err(Compiletime(err)) - }, - (Err(err), TrueConst::No) => { - cx.tcx().sess.span_warn(e.span, &err.description()); - Err(Runtime(err)) - }, - } -} - -fn check_binary_expr_validity(cx: &CrateContext, e: &hir::Expr, t: Ty, - te1: ValueRef, te2: ValueRef, - trueconst: TrueConst) -> Result<(), ConstEvalFailure> { - let b = if let hir::ExprBinary(b, _, _) = e.node { b } else { unreachable!() }; - - let result = match t.sty { - ty::TyInt(int_type) => { - let (lhs, rhs) = match (const_to_opt_int(te1), - const_to_opt_int(te2)) { - (Some(v1), Some(v2)) => (v1, v2), - _ => return Ok(()), - }; - - let opt_ety = Some(const_eval::IntTy::from(cx.tcx(), int_type)); - match b.node { - hir::BiAdd => const_int_checked_add(lhs, rhs, e, opt_ety), - hir::BiSub => const_int_checked_sub(lhs, rhs, e, opt_ety), - hir::BiMul => const_int_checked_mul(lhs, rhs, e, opt_ety), - hir::BiDiv => const_int_checked_div(lhs, rhs, e, opt_ety), - hir::BiRem => const_int_checked_rem(lhs, rhs, e, opt_ety), - hir::BiShl => const_int_checked_shl(lhs, rhs, e, opt_ety), - hir::BiShr => const_int_checked_shr(lhs, rhs, e, opt_ety), - _ => return Ok(()), - } - } - ty::TyUint(uint_type) => { - let (lhs, rhs) = match (const_to_opt_uint(te1), - const_to_opt_uint(te2)) { - (Some(v1), Some(v2)) => (v1, v2), - _ => return Ok(()), - }; - - let opt_ety = Some(const_eval::UintTy::from(cx.tcx(), uint_type)); - match b.node { - hir::BiAdd => const_uint_checked_add(lhs, rhs, e, opt_ety), - hir::BiSub => const_uint_checked_sub(lhs, rhs, e, opt_ety), - hir::BiMul => const_uint_checked_mul(lhs, rhs, e, opt_ety), - hir::BiDiv => const_uint_checked_div(lhs, rhs, e, opt_ety), - hir::BiRem => const_uint_checked_rem(lhs, rhs, e, opt_ety), - hir::BiShl => const_uint_checked_shl(lhs, rhs, e, opt_ety), - hir::BiShr => const_uint_checked_shr(lhs, rhs, e, opt_ety), - _ => return Ok(()), - } - } - _ => return Ok(()), - }; - const_err(cx, e, result, trueconst) -} - -fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - e: &hir::Expr, - ety: Ty<'tcx>, - param_substs: &'tcx Substs<'tcx>, - fn_args: FnArgMap, - trueconst: TrueConst) - -> Result -{ - debug!("const_expr_unadjusted(e={:?}, ety={:?}, param_substs={:?})", - e, - ety, - param_substs); - - let map_list = |exprs: &[P]| -> Result, ConstEvalFailure> { - exprs.iter() - .map(|e| const_expr(cx, &**e, param_substs, fn_args, trueconst).map(|(l, _)| l)) - .collect::>>() - .into_iter() - .collect() - // this dance is necessary to eagerly run const_expr so all errors are reported - }; - let _icx = push_ctxt("const_expr"); - Ok(match e.node { - hir::ExprLit(ref lit) => const_lit(cx, e, &**lit), - hir::ExprBinary(b, ref e1, ref e2) => { - /* Neither type is bottom, and we expect them to be unified - * already, so the following is safe. */ - let (te1, ty) = try!(const_expr(cx, &**e1, param_substs, fn_args, trueconst)); - debug!("const_expr_unadjusted: te1={}, ty={:?}", - cx.tn().val_to_string(te1), - ty); - assert!(!ty.is_simd()); - let is_float = ty.is_fp(); - let signed = ty.is_signed(); - - let (te2, _) = try!(const_expr(cx, &**e2, param_substs, fn_args, trueconst)); - - try!(check_binary_expr_validity(cx, e, ty, te1, te2, trueconst)); - - unsafe { match b.node { - hir::BiAdd if is_float => llvm::LLVMConstFAdd(te1, te2), - hir::BiAdd => llvm::LLVMConstAdd(te1, te2), - - hir::BiSub if is_float => llvm::LLVMConstFSub(te1, te2), - hir::BiSub => llvm::LLVMConstSub(te1, te2), - - hir::BiMul if is_float => llvm::LLVMConstFMul(te1, te2), - hir::BiMul => llvm::LLVMConstMul(te1, te2), - - hir::BiDiv if is_float => llvm::LLVMConstFDiv(te1, te2), - hir::BiDiv if signed => llvm::LLVMConstSDiv(te1, te2), - hir::BiDiv => llvm::LLVMConstUDiv(te1, te2), - - hir::BiRem if is_float => llvm::LLVMConstFRem(te1, te2), - hir::BiRem if signed => llvm::LLVMConstSRem(te1, te2), - hir::BiRem => llvm::LLVMConstURem(te1, te2), - - hir::BiAnd => llvm::LLVMConstAnd(te1, te2), - hir::BiOr => llvm::LLVMConstOr(te1, te2), - hir::BiBitXor => llvm::LLVMConstXor(te1, te2), - hir::BiBitAnd => llvm::LLVMConstAnd(te1, te2), - hir::BiBitOr => llvm::LLVMConstOr(te1, te2), - hir::BiShl => { - let te2 = base::cast_shift_const_rhs(b.node, te1, te2); - llvm::LLVMConstShl(te1, te2) - }, - hir::BiShr => { - let te2 = base::cast_shift_const_rhs(b.node, te1, te2); - if signed { llvm::LLVMConstAShr(te1, te2) } - else { llvm::LLVMConstLShr(te1, te2) } - }, - hir::BiEq | hir::BiNe | hir::BiLt | hir::BiLe | hir::BiGt | hir::BiGe => { - if is_float { - let cmp = base::bin_op_to_fcmp_predicate(cx, b.node); - ConstFCmp(cmp, te1, te2) - } else { - let cmp = base::bin_op_to_icmp_predicate(cx, b.node, signed); - ConstICmp(cmp, te1, te2) - } - }, - } } // unsafe { match b.node { - }, - hir::ExprUnary(u, ref inner_e) => { - let (te, ty) = try!(const_expr(cx, &**inner_e, param_substs, fn_args, trueconst)); - - try!(check_unary_expr_validity(cx, e, ty, te, trueconst)); - - let is_float = ty.is_fp(); - unsafe { match u { - hir::UnDeref => const_deref(cx, te, ty).0, - hir::UnNot => llvm::LLVMConstNot(te), - hir::UnNeg if is_float => llvm::LLVMConstFNeg(te), - hir::UnNeg => llvm::LLVMConstNeg(te), - } } - }, - hir::ExprField(ref base, field) => { - let (bv, bt) = try!(const_expr(cx, &**base, param_substs, fn_args, trueconst)); - let brepr = adt::represent_type(cx, bt); - let vinfo = VariantInfo::from_ty(cx.tcx(), bt, None); - let ix = vinfo.field_index(field.node); - adt::const_get_field(cx, &*brepr, bv, vinfo.discr, ix) - }, - hir::ExprTupField(ref base, idx) => { - let (bv, bt) = try!(const_expr(cx, &**base, param_substs, fn_args, trueconst)); - let brepr = adt::represent_type(cx, bt); - let vinfo = VariantInfo::from_ty(cx.tcx(), bt, None); - adt::const_get_field(cx, &*brepr, bv, vinfo.discr, idx.node) - }, - hir::ExprIndex(ref base, ref index) => { - let (bv, bt) = try!(const_expr(cx, &**base, param_substs, fn_args, trueconst)); - let iv = match eval_const_expr_partial(cx.tcx(), &index, ExprTypeChecked, None) { - Ok(ConstVal::Int(i)) => i as u64, - Ok(ConstVal::Uint(u)) => u, - _ => cx.sess().span_bug(index.span, - "index is not an integer-constant expression") - }; - let (arr, len) = match bt.sty { - ty::TyArray(_, u) => (bv, C_uint(cx, u)), - ty::TySlice(_) | ty::TyStr => { - let e1 = const_get_elt(cx, bv, &[0]); - (const_deref_ptr(cx, e1), const_get_elt(cx, bv, &[1])) - }, - ty::TyRef(_, mt) => match mt.ty.sty { - ty::TyArray(_, u) => { - (const_deref_ptr(cx, bv), C_uint(cx, u)) - }, - _ => cx.sess().span_bug(base.span, - &format!("index-expr base must be a vector \ - or string type, found {:?}", - bt)), - }, - _ => cx.sess().span_bug(base.span, - &format!("index-expr base must be a vector \ - or string type, found {:?}", - bt)), - }; - - let len = unsafe { llvm::LLVMConstIntGetZExtValue(len) as u64 }; - let len = match bt.sty { - ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) => match ty.sty { - ty::TyStr => { - assert!(len > 0); - len - 1 - }, - _ => len, - }, - _ => len, - }; - if iv >= len { - // FIXME #3170: report this earlier on in the const-eval - // pass. Reporting here is a bit late. - span_err!(cx.sess(), e.span, E0515, - "const index-expr is out of bounds"); - C_undef(val_ty(arr).element_type()) - } else { - const_get_elt(cx, arr, &[iv as c_uint]) - } - }, - hir::ExprCast(ref base, _) => { - let t_cast = ety; - let llty = type_of::type_of(cx, t_cast); - let (v, t_expr) = try!(const_expr(cx, &**base, param_substs, fn_args, trueconst)); - debug!("trans_const_cast({:?} as {:?})", t_expr, t_cast); - if expr::cast_is_noop(cx.tcx(), base, t_expr, t_cast) { - return Ok(v); - } - if type_is_fat_ptr(cx.tcx(), t_expr) { - // Fat pointer casts. - let t_cast_inner = - t_cast.builtin_deref(true, ty::NoPreference).expect("cast to non-pointer").ty; - let ptr_ty = type_of::in_memory_type_of(cx, t_cast_inner).ptr_to(); - let addr = ptrcast(const_get_elt(cx, v, &[abi::FAT_PTR_ADDR as u32]), - ptr_ty); - if type_is_fat_ptr(cx.tcx(), t_cast) { - let info = const_get_elt(cx, v, &[abi::FAT_PTR_EXTRA as u32]); - return Ok(C_struct(cx, &[addr, info], false)) - } else { - return Ok(addr); - } - } - unsafe { match ( - CastTy::from_ty(t_expr).expect("bad input type for cast"), - CastTy::from_ty(t_cast).expect("bad output type for cast"), - ) { - (CastTy::Int(IntTy::CEnum), CastTy::Int(_)) => { - let repr = adt::represent_type(cx, t_expr); - let discr = adt::const_get_discrim(cx, &*repr, v); - let iv = C_integral(cx.int_type(), discr.0, false); - let s = adt::is_discr_signed(&*repr) as Bool; - llvm::LLVMConstIntCast(iv, llty.to_ref(), s) - }, - (CastTy::Int(_), CastTy::Int(_)) => { - let s = t_expr.is_signed() as Bool; - llvm::LLVMConstIntCast(v, llty.to_ref(), s) - }, - (CastTy::Int(_), CastTy::Float) => { - if t_expr.is_signed() { - llvm::LLVMConstSIToFP(v, llty.to_ref()) - } else { - llvm::LLVMConstUIToFP(v, llty.to_ref()) - } - }, - (CastTy::Float, CastTy::Float) => llvm::LLVMConstFPCast(v, llty.to_ref()), - (CastTy::Float, CastTy::Int(IntTy::I)) => llvm::LLVMConstFPToSI(v, llty.to_ref()), - (CastTy::Float, CastTy::Int(_)) => llvm::LLVMConstFPToUI(v, llty.to_ref()), - (CastTy::Ptr(_), CastTy::Ptr(_)) | (CastTy::FnPtr, CastTy::Ptr(_)) - | (CastTy::RPtr(_), CastTy::Ptr(_)) => { - ptrcast(v, llty) - }, - (CastTy::FnPtr, CastTy::FnPtr) => ptrcast(v, llty), // isn't this a coercion? - (CastTy::Int(_), CastTy::Ptr(_)) => llvm::LLVMConstIntToPtr(v, llty.to_ref()), - (CastTy::Ptr(_), CastTy::Int(_)) | (CastTy::FnPtr, CastTy::Int(_)) => { - llvm::LLVMConstPtrToInt(v, llty.to_ref()) - }, - _ => { - cx.sess().impossible_case(e.span, - "bad combination of types for cast") - }, - } } // unsafe { match ( ... ) { - }, - hir::ExprAddrOf(hir::MutImmutable, ref sub) => { - // If this is the address of some static, then we need to return - // the actual address of the static itself (short circuit the rest - // of const eval). - let mut cur = sub; - loop { - match cur.node { - hir::ExprBlock(ref blk) => { - if let Some(ref sub) = blk.expr { - cur = sub; - } else { - break; - } - }, - _ => break, - } - } - let opt_def = cx.tcx().def_map.borrow().get(&cur.id).map(|d| d.full_def()); - if let Some(def::DefStatic(def_id, _)) = opt_def { - common::get_static_val(cx, def_id, ety) - } else { - // If this isn't the address of a static, then keep going through - // normal constant evaluation. - let (v, ty) = try!(const_expr(cx, &**sub, param_substs, fn_args, trueconst)); - addr_of(cx, v, type_of::align_of(cx, ty), "ref") - } - }, - hir::ExprAddrOf(hir::MutMutable, ref sub) => { - let (v, ty) = try!(const_expr(cx, &**sub, param_substs, fn_args, trueconst)); - addr_of_mut(cx, v, type_of::align_of(cx, ty), "ref_mut_slice") - }, - hir::ExprTup(ref es) => { - let repr = adt::represent_type(cx, ety); - let vals = try!(map_list(&es[..])); - adt::trans_const(cx, &*repr, Disr(0), &vals[..]) - }, - hir::ExprStruct(_, ref fs, ref base_opt) => { - let repr = adt::represent_type(cx, ety); - - let base_val = match *base_opt { - Some(ref base) => Some(try!(const_expr( - cx, - &**base, - param_substs, - fn_args, - trueconst, - ))), - None => None - }; - - let VariantInfo { discr, fields } = VariantInfo::of_node(cx.tcx(), ety, e.id); - let cs = fields.iter().enumerate().map(|(ix, &Field(f_name, _))| { - match (fs.iter().find(|f| f_name == f.name.node), base_val) { - (Some(ref f), _) => { - const_expr(cx, &*f.expr, param_substs, fn_args, trueconst).map(|(l, _)| l) - }, - (_, Some((bv, _))) => Ok(adt::const_get_field(cx, &*repr, bv, discr, ix)), - (_, None) => cx.sess().span_bug(e.span, "missing struct field"), - } - }) - .collect::>>() - .into_iter() - .collect::,ConstEvalFailure>>(); - let cs = try!(cs); - if ety.is_simd() { - C_vector(&cs[..]) - } else { - adt::trans_const(cx, &*repr, discr, &cs[..]) - } - }, - hir::ExprVec(ref es) => { - let unit_ty = ety.sequence_element_type(cx.tcx()); - let llunitty = type_of::type_of(cx, unit_ty); - let vs = es.iter() - .map(|e| const_expr( - cx, - &**e, - param_substs, - fn_args, - trueconst, - ).map(|(l, _)| l)) - .collect::>>() - .into_iter() - .collect::, ConstEvalFailure>>(); - let vs = try!(vs); - // If the vector contains enums, an LLVM array won't work. - if vs.iter().any(|vi| val_ty(*vi) != llunitty) { - C_struct(cx, &vs[..], false) - } else { - C_array(llunitty, &vs[..]) - } - }, - hir::ExprRepeat(ref elem, ref count) => { - let unit_ty = ety.sequence_element_type(cx.tcx()); - let llunitty = type_of::type_of(cx, unit_ty); - let n = cx.tcx().eval_repeat_count(count); - let unit_val = try!(const_expr(cx, &**elem, param_substs, fn_args, trueconst)).0; - let vs = vec![unit_val; n]; - if val_ty(unit_val) != llunitty { - C_struct(cx, &vs[..], false) - } else { - C_array(llunitty, &vs[..]) - } - }, - hir::ExprPath(..) => { - let def = cx.tcx().def_map.borrow().get(&e.id).unwrap().full_def(); - match def { - def::DefLocal(_, id) => { - if let Some(val) = fn_args.and_then(|args| args.get(&id).cloned()) { - val - } else { - cx.sess().span_bug(e.span, "const fn argument not found") - } - } - def::DefFn(..) | def::DefMethod(..) => { - expr::trans_def_fn_unadjusted(cx, e, def, param_substs).val - } - def::DefConst(def_id) | def::DefAssociatedConst(def_id) => { - const_deref_ptr(cx, try!(get_const_val(cx, def_id, e, param_substs))) - } - def::DefVariant(enum_did, variant_did, _) => { - let vinfo = cx.tcx().lookup_adt_def(enum_did).variant_with_id(variant_did); - match vinfo.kind() { - ty::VariantKind::Unit => { - let repr = adt::represent_type(cx, ety); - adt::trans_const(cx, &*repr, Disr::from(vinfo.disr_val), &[]) - } - ty::VariantKind::Tuple => { - expr::trans_def_fn_unadjusted(cx, e, def, param_substs).val - } - ty::VariantKind::Struct => { - cx.sess().span_bug(e.span, "path-expr refers to a dict variant!") - } - } - } - def::DefStruct(_) => { - if let ty::TyBareFn(..) = ety.sty { - // Tuple struct. - expr::trans_def_fn_unadjusted(cx, e, def, param_substs).val - } else { - // Unit struct. - C_null(type_of::type_of(cx, ety)) - } - } - _ => { - cx.sess().span_bug(e.span, "expected a const, fn, struct, \ - or variant def") - } - } - }, - hir::ExprCall(ref callee, ref args) => { - let mut callee = &**callee; - loop { - callee = match callee.node { - hir::ExprBlock(ref block) => match block.expr { - Some(ref tail) => &**tail, - None => break, - }, - _ => break, - }; - } - let def = cx.tcx().def_map.borrow()[&callee.id].full_def(); - let arg_vals = try!(map_list(args)); - match def { - def::DefFn(did, _) | def::DefMethod(did) => { - try!(const_fn_call( - cx, - ExprId(callee.id), - did, - &arg_vals, - param_substs, - trueconst, - )) - } - def::DefStruct(_) => { - if ety.is_simd() { - C_vector(&arg_vals[..]) - } else { - let repr = adt::represent_type(cx, ety); - adt::trans_const(cx, &*repr, Disr(0), &arg_vals[..]) - } - } - def::DefVariant(enum_did, variant_did, _) => { - let repr = adt::represent_type(cx, ety); - let vinfo = cx.tcx().lookup_adt_def(enum_did).variant_with_id(variant_did); - adt::trans_const(cx, - &*repr, - Disr::from(vinfo.disr_val), - &arg_vals[..]) - } - _ => cx.sess().span_bug(e.span, "expected a struct, variant, or const fn def"), - } - }, - hir::ExprMethodCall(_, _, ref args) => { - let arg_vals = try!(map_list(args)); - let method_call = ty::MethodCall::expr(e.id); - let method_did = cx.tcx().tables.borrow().method_map[&method_call].def_id; - try!(const_fn_call(cx, MethodCallKey(method_call), - method_did, &arg_vals, param_substs, trueconst)) - }, - hir::ExprType(ref e, _) => try!(const_expr(cx, &**e, param_substs, fn_args, trueconst)).0, - hir::ExprBlock(ref block) => { - match block.expr { - Some(ref expr) => try!(const_expr( - cx, - &**expr, - param_substs, - fn_args, - trueconst, - )).0, - None => C_nil(cx), - } - }, - hir::ExprClosure(_, ref decl, ref body) => { - match ety.sty { - ty::TyClosure(def_id, ref substs) => { - closure::trans_closure_expr(closure::Dest::Ignore(cx), - decl, - body, - e.id, - def_id, - substs, - &e.attrs); - } - _ => - cx.sess().span_bug( - e.span, - &format!("bad type for closure expr: {:?}", ety)) - } - C_null(type_of::type_of(cx, ety)) - }, - _ => cx.sess().span_bug(e.span, - "bad constant expression type in consts::const_expr"), - }) -} - -pub fn trans_static(ccx: &CrateContext, - m: hir::Mutability, - expr: &hir::Expr, - id: ast::NodeId, - attrs: &[ast::Attribute]) - -> Result { - unsafe { - let _icx = push_ctxt("trans_static"); - let g = base::get_item_val(ccx, id); - - let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty()); - let (v, _) = try!(const_expr( - ccx, - expr, - empty_substs, - None, - TrueConst::Yes, - ).map_err(|e| e.into_inner())); - - // boolean SSA values are i1, but they have to be stored in i8 slots, - // otherwise some LLVM optimization passes don't work as expected - let mut val_llty = llvm::LLVMTypeOf(v); - let v = if val_llty == Type::i1(ccx).to_ref() { - val_llty = Type::i8(ccx).to_ref(); - llvm::LLVMConstZExt(v, val_llty) - } else { - v - }; - - let ty = ccx.tcx().node_id_to_type(id); - let llty = type_of::type_of(ccx, ty); - let g = if val_llty == llty.to_ref() { - g - } else { - // If we created the global with the wrong type, - // correct the type. - let empty_string = CString::new("").unwrap(); - let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g)); - let name_string = CString::new(name_str_ref.to_bytes()).unwrap(); - llvm::LLVMSetValueName(g, empty_string.as_ptr()); - let new_g = llvm::LLVMGetOrInsertGlobal( - ccx.llmod(), name_string.as_ptr(), val_llty); - // To avoid breaking any invariants, we leave around the old - // global for the moment; we'll replace all references to it - // with the new global later. (See base::trans_crate.) - ccx.statics_to_rauw().borrow_mut().push((g, new_g)); - new_g - }; - llvm::LLVMSetAlignment(g, type_of::align_of(ccx, ty)); - llvm::LLVMSetInitializer(g, v); - - // As an optimization, all shared statics which do not have interior - // mutability are placed into read-only memory. - if m != hir::MutMutable { - let tcontents = ty.type_contents(ccx.tcx()); - if !tcontents.interior_unsafe() { - llvm::LLVMSetGlobalConstant(g, llvm::True); - } - } - - debuginfo::create_global_var_metadata(ccx, id, g); - - if attr::contains_name(attrs, - "thread_local") { - llvm::set_thread_local(g, true); - } - Ok(g) - } -} diff --git a/src/librustc_trans/trans/context.rs b/src/librustc_trans/trans/context.rs deleted file mode 100644 index d4d2f01f77426..0000000000000 --- a/src/librustc_trans/trans/context.rs +++ /dev/null @@ -1,1020 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm; -use llvm::{ContextRef, ModuleRef, ValueRef, BuilderRef}; -use middle::cstore::LinkMeta; -use middle::def::ExportMap; -use middle::def_id::DefId; -use middle::traits; -use rustc_mir::mir_map::MirMap; -use trans::adt; -use trans::base; -use trans::builder::Builder; -use trans::common::{ExternMap,BuilderRef_res}; -use trans::debuginfo; -use trans::declare; -use trans::glue::DropGlueKind; -use trans::monomorphize::MonoId; -use trans::type_::{Type, TypeNames}; -use middle::subst::Substs; -use middle::ty::{self, Ty}; -use session::config::NoDebugInfo; -use session::Session; -use util::sha2::Sha256; -use util::nodemap::{NodeMap, NodeSet, DefIdMap, FnvHashMap, FnvHashSet}; - -use std::ffi::CString; -use std::cell::{Cell, RefCell}; -use std::ptr; -use std::rc::Rc; -use syntax::ast; -use syntax::parse::token::InternedString; - -pub struct Stats { - pub n_glues_created: Cell, - pub n_null_glues: Cell, - pub n_real_glues: Cell, - pub n_fns: Cell, - pub n_monos: Cell, - pub n_inlines: Cell, - pub n_closures: Cell, - pub n_llvm_insns: Cell, - pub llvm_insns: RefCell>, - // (ident, llvm-instructions) - pub fn_stats: RefCell >, -} - -/// The shared portion of a `CrateContext`. There is one `SharedCrateContext` -/// per crate. The data here is shared between all compilation units of the -/// crate, so it must not contain references to any LLVM data structures -/// (aside from metadata-related ones). -pub struct SharedCrateContext<'a, 'tcx: 'a> { - local_ccxs: Vec>, - - metadata_llmod: ModuleRef, - metadata_llcx: ContextRef, - - export_map: ExportMap, - reachable: NodeSet, - item_symbols: RefCell>, - link_meta: LinkMeta, - symbol_hasher: RefCell, - tcx: &'a ty::ctxt<'tcx>, - stats: Stats, - check_overflow: bool, - check_drop_flag_for_sanity: bool, - mir_map: &'a MirMap<'tcx>, - - available_drop_glues: RefCell, String>>, - use_dll_storage_attrs: bool, -} - -/// The local portion of a `CrateContext`. There is one `LocalCrateContext` -/// per compilation unit. Each one has its own LLVM `ContextRef` so that -/// several compilation units may be optimized in parallel. All other LLVM -/// data structures in the `LocalCrateContext` are tied to that `ContextRef`. -pub struct LocalCrateContext<'tcx> { - llmod: ModuleRef, - llcx: ContextRef, - tn: TypeNames, - externs: RefCell, - item_vals: RefCell>, - needs_unwind_cleanup_cache: RefCell, bool>>, - fn_pointer_shims: RefCell, ValueRef>>, - drop_glues: RefCell, ValueRef>>, - /// Track mapping of external ids to local items imported for inlining - external: RefCell>>, - /// Backwards version of the `external` map (inlined items to where they - /// came from) - external_srcs: RefCell>, - /// Cache instances of monomorphized functions - monomorphized: RefCell, ValueRef>>, - monomorphizing: RefCell>, - available_monomorphizations: RefCell>, - /// Cache generated vtables - vtables: RefCell, ValueRef>>, - /// Cache of constant strings, - const_cstr_cache: RefCell>, - - /// Reverse-direction for const ptrs cast from globals. - /// Key is a ValueRef holding a *T, - /// Val is a ValueRef holding a *[T]. - /// - /// Needed because LLVM loses pointer->pointee association - /// when we ptrcast, and we have to ptrcast during translation - /// of a [T] const because we form a slice, a (*T,usize) pair, not - /// a pointer to an LLVM array type. Similar for trait objects. - const_unsized: RefCell>, - - /// Cache of emitted const globals (value -> global) - const_globals: RefCell>, - - /// Cache of emitted const values - const_values: RefCell), ValueRef>>, - - /// Cache of external const values - extern_const_values: RefCell>, - - impl_method_cache: RefCell>, - - /// Cache of closure wrappers for bare fn's. - closure_bare_wrapper_cache: RefCell>, - - /// List of globals for static variables which need to be passed to the - /// LLVM function ReplaceAllUsesWith (RAUW) when translation is complete. - /// (We have to make sure we don't invalidate any ValueRefs referring - /// to constants.) - statics_to_rauw: RefCell>, - - lltypes: RefCell, Type>>, - llsizingtypes: RefCell, Type>>, - adt_reprs: RefCell, Rc>>>, - type_hashcodes: RefCell, String>>, - int_type: Type, - opaque_vec_type: Type, - builder: BuilderRef_res, - - /// Holds the LLVM values for closure IDs. - closure_vals: RefCell, ValueRef>>, - - dbg_cx: Option>, - - eh_personality: RefCell>, - eh_unwind_resume: RefCell>, - rust_try_fn: RefCell>, - - intrinsics: RefCell>, - - /// Number of LLVM instructions translated into this `LocalCrateContext`. - /// This is used to perform some basic load-balancing to keep all LLVM - /// contexts around the same size. - n_llvm_insns: Cell, - - /// Depth of the current type-of computation - used to bail out - type_of_depth: Cell, - - trait_cache: RefCell, - traits::Vtable<'tcx, ()>>>, -} - -pub struct CrateContext<'a, 'tcx: 'a> { - shared: &'a SharedCrateContext<'a, 'tcx>, - local: &'a LocalCrateContext<'tcx>, - /// The index of `local` in `shared.local_ccxs`. This is used in - /// `maybe_iter(true)` to identify the original `LocalCrateContext`. - index: usize, -} - -pub struct CrateContextIterator<'a, 'tcx: 'a> { - shared: &'a SharedCrateContext<'a, 'tcx>, - index: usize, -} - -impl<'a, 'tcx> Iterator for CrateContextIterator<'a,'tcx> { - type Item = CrateContext<'a, 'tcx>; - - fn next(&mut self) -> Option> { - if self.index >= self.shared.local_ccxs.len() { - return None; - } - - let index = self.index; - self.index += 1; - - Some(CrateContext { - shared: self.shared, - local: &self.shared.local_ccxs[index], - index: index, - }) - } -} - -/// The iterator produced by `CrateContext::maybe_iter`. -pub struct CrateContextMaybeIterator<'a, 'tcx: 'a> { - shared: &'a SharedCrateContext<'a, 'tcx>, - index: usize, - single: bool, - origin: usize, -} - -impl<'a, 'tcx> Iterator for CrateContextMaybeIterator<'a, 'tcx> { - type Item = (CrateContext<'a, 'tcx>, bool); - - fn next(&mut self) -> Option<(CrateContext<'a, 'tcx>, bool)> { - if self.index >= self.shared.local_ccxs.len() { - return None; - } - - let index = self.index; - self.index += 1; - if self.single { - self.index = self.shared.local_ccxs.len(); - } - - let ccx = CrateContext { - shared: self.shared, - local: &self.shared.local_ccxs[index], - index: index, - }; - Some((ccx, index == self.origin)) - } -} - - -unsafe fn create_context_and_module(sess: &Session, mod_name: &str) -> (ContextRef, ModuleRef) { - let llcx = llvm::LLVMContextCreate(); - let mod_name = CString::new(mod_name).unwrap(); - let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx); - - if let Some(ref custom_data_layout) = sess.target.target.options.data_layout { - let data_layout = CString::new(&custom_data_layout[..]).unwrap(); - llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr()); - } else { - let tm = ::back::write::create_target_machine(sess); - llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm); - llvm::LLVMRustDisposeTargetMachine(tm); - } - - let llvm_target = sess.target.target.llvm_target.as_bytes(); - let llvm_target = CString::new(llvm_target).unwrap(); - llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr()); - (llcx, llmod) -} - -impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> { - pub fn new(crate_name: &str, - local_count: usize, - tcx: &'b ty::ctxt<'tcx>, - mir_map: &'b MirMap<'tcx>, - export_map: ExportMap, - symbol_hasher: Sha256, - link_meta: LinkMeta, - reachable: NodeSet, - check_overflow: bool, - check_drop_flag_for_sanity: bool) - -> SharedCrateContext<'b, 'tcx> { - let (metadata_llcx, metadata_llmod) = unsafe { - create_context_and_module(&tcx.sess, "metadata") - }; - - // An interesting part of Windows which MSVC forces our hand on (and - // apparently MinGW didn't) is the usage of `dllimport` and `dllexport` - // attributes in LLVM IR as well as native dependencies (in C these - // correspond to `__declspec(dllimport)`). - // - // Whenever a dynamic library is built by MSVC it must have its public - // interface specified by functions tagged with `dllexport` or otherwise - // they're not available to be linked against. This poses a few problems - // for the compiler, some of which are somewhat fundamental, but we use - // the `use_dll_storage_attrs` variable below to attach the `dllexport` - // attribute to all LLVM functions that are reachable (e.g. they're - // already tagged with external linkage). This is suboptimal for a few - // reasons: - // - // * If an object file will never be included in a dynamic library, - // there's no need to attach the dllexport attribute. Most object - // files in Rust are not destined to become part of a dll as binaries - // are statically linked by default. - // * If the compiler is emitting both an rlib and a dylib, the same - // source object file is currently used but with MSVC this may be less - // feasible. The compiler may be able to get around this, but it may - // involve some invasive changes to deal with this. - // - // The flipside of this situation is that whenever you link to a dll and - // you import a function from it, the import should be tagged with - // `dllimport`. At this time, however, the compiler does not emit - // `dllimport` for any declarations other than constants (where it is - // required), which is again suboptimal for even more reasons! - // - // * Calling a function imported from another dll without using - // `dllimport` causes the linker/compiler to have extra overhead (one - // `jmp` instruction on x86) when calling the function. - // * The same object file may be used in different circumstances, so a - // function may be imported from a dll if the object is linked into a - // dll, but it may be just linked against if linked into an rlib. - // * The compiler has no knowledge about whether native functions should - // be tagged dllimport or not. - // - // For now the compiler takes the perf hit (I do not have any numbers to - // this effect) by marking very little as `dllimport` and praying the - // linker will take care of everything. Fixing this problem will likely - // require adding a few attributes to Rust itself (feature gated at the - // start) and then strongly recommending static linkage on MSVC! - let use_dll_storage_attrs = tcx.sess.target.target.options.is_like_msvc; - - let mut shared_ccx = SharedCrateContext { - local_ccxs: Vec::with_capacity(local_count), - metadata_llmod: metadata_llmod, - metadata_llcx: metadata_llcx, - export_map: export_map, - reachable: reachable, - item_symbols: RefCell::new(NodeMap()), - link_meta: link_meta, - symbol_hasher: RefCell::new(symbol_hasher), - tcx: tcx, - mir_map: mir_map, - stats: Stats { - n_glues_created: Cell::new(0), - n_null_glues: Cell::new(0), - n_real_glues: Cell::new(0), - n_fns: Cell::new(0), - n_monos: Cell::new(0), - n_inlines: Cell::new(0), - n_closures: Cell::new(0), - n_llvm_insns: Cell::new(0), - llvm_insns: RefCell::new(FnvHashMap()), - fn_stats: RefCell::new(Vec::new()), - }, - check_overflow: check_overflow, - check_drop_flag_for_sanity: check_drop_flag_for_sanity, - available_drop_glues: RefCell::new(FnvHashMap()), - use_dll_storage_attrs: use_dll_storage_attrs, - }; - - for i in 0..local_count { - // Append ".rs" to crate name as LLVM module identifier. - // - // LLVM code generator emits a ".file filename" directive - // for ELF backends. Value of the "filename" is set as the - // LLVM module identifier. Due to a LLVM MC bug[1], LLVM - // crashes if the module identifier is same as other symbols - // such as a function name in the module. - // 1. http://llvm.org/bugs/show_bug.cgi?id=11479 - let llmod_id = format!("{}.{}.rs", crate_name, i); - let local_ccx = LocalCrateContext::new(&shared_ccx, &llmod_id[..]); - shared_ccx.local_ccxs.push(local_ccx); - } - - shared_ccx - } - - pub fn iter<'a>(&'a self) -> CrateContextIterator<'a, 'tcx> { - CrateContextIterator { - shared: self, - index: 0, - } - } - - pub fn get_ccx<'a>(&'a self, index: usize) -> CrateContext<'a, 'tcx> { - CrateContext { - shared: self, - local: &self.local_ccxs[index], - index: index, - } - } - - fn get_smallest_ccx<'a>(&'a self) -> CrateContext<'a, 'tcx> { - let (local_ccx, index) = - self.local_ccxs - .iter() - .zip(0..self.local_ccxs.len()) - .min_by_key(|&(local_ccx, _idx)| local_ccx.n_llvm_insns.get()) - .unwrap(); - CrateContext { - shared: self, - local: local_ccx, - index: index, - } - } - - - pub fn metadata_llmod(&self) -> ModuleRef { - self.metadata_llmod - } - - pub fn metadata_llcx(&self) -> ContextRef { - self.metadata_llcx - } - - pub fn export_map<'a>(&'a self) -> &'a ExportMap { - &self.export_map - } - - pub fn reachable<'a>(&'a self) -> &'a NodeSet { - &self.reachable - } - - pub fn item_symbols<'a>(&'a self) -> &'a RefCell> { - &self.item_symbols - } - - pub fn link_meta<'a>(&'a self) -> &'a LinkMeta { - &self.link_meta - } - - pub fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { - self.tcx - } - - pub fn sess<'a>(&'a self) -> &'a Session { - &self.tcx.sess - } - - pub fn stats<'a>(&'a self) -> &'a Stats { - &self.stats - } - - pub fn use_dll_storage_attrs(&self) -> bool { - self.use_dll_storage_attrs - } -} - -impl<'tcx> LocalCrateContext<'tcx> { - fn new<'a>(shared: &SharedCrateContext<'a, 'tcx>, - name: &str) - -> LocalCrateContext<'tcx> { - unsafe { - let (llcx, llmod) = create_context_and_module(&shared.tcx.sess, name); - - let dbg_cx = if shared.tcx.sess.opts.debuginfo != NoDebugInfo { - Some(debuginfo::CrateDebugContext::new(llmod)) - } else { - None - }; - - let mut local_ccx = LocalCrateContext { - llmod: llmod, - llcx: llcx, - tn: TypeNames::new(), - externs: RefCell::new(FnvHashMap()), - item_vals: RefCell::new(NodeMap()), - needs_unwind_cleanup_cache: RefCell::new(FnvHashMap()), - fn_pointer_shims: RefCell::new(FnvHashMap()), - drop_glues: RefCell::new(FnvHashMap()), - external: RefCell::new(DefIdMap()), - external_srcs: RefCell::new(NodeMap()), - monomorphized: RefCell::new(FnvHashMap()), - monomorphizing: RefCell::new(DefIdMap()), - available_monomorphizations: RefCell::new(FnvHashSet()), - vtables: RefCell::new(FnvHashMap()), - const_cstr_cache: RefCell::new(FnvHashMap()), - const_unsized: RefCell::new(FnvHashMap()), - const_globals: RefCell::new(FnvHashMap()), - const_values: RefCell::new(FnvHashMap()), - extern_const_values: RefCell::new(DefIdMap()), - impl_method_cache: RefCell::new(FnvHashMap()), - closure_bare_wrapper_cache: RefCell::new(FnvHashMap()), - statics_to_rauw: RefCell::new(Vec::new()), - lltypes: RefCell::new(FnvHashMap()), - llsizingtypes: RefCell::new(FnvHashMap()), - adt_reprs: RefCell::new(FnvHashMap()), - type_hashcodes: RefCell::new(FnvHashMap()), - int_type: Type::from_ref(ptr::null_mut()), - opaque_vec_type: Type::from_ref(ptr::null_mut()), - builder: BuilderRef_res(llvm::LLVMCreateBuilderInContext(llcx)), - closure_vals: RefCell::new(FnvHashMap()), - dbg_cx: dbg_cx, - eh_personality: RefCell::new(None), - eh_unwind_resume: RefCell::new(None), - rust_try_fn: RefCell::new(None), - intrinsics: RefCell::new(FnvHashMap()), - n_llvm_insns: Cell::new(0), - type_of_depth: Cell::new(0), - trait_cache: RefCell::new(FnvHashMap()), - }; - - local_ccx.int_type = Type::int(&local_ccx.dummy_ccx(shared)); - local_ccx.opaque_vec_type = Type::opaque_vec(&local_ccx.dummy_ccx(shared)); - - // Done mutating local_ccx directly. (The rest of the - // initialization goes through RefCell.) - { - let ccx = local_ccx.dummy_ccx(shared); - - let mut str_slice_ty = Type::named_struct(&ccx, "str_slice"); - str_slice_ty.set_struct_body(&[Type::i8p(&ccx), ccx.int_type()], false); - ccx.tn().associate_type("str_slice", &str_slice_ty); - - if ccx.sess().count_llvm_insns() { - base::init_insn_ctxt() - } - } - - local_ccx - } - } - - /// Create a dummy `CrateContext` from `self` and the provided - /// `SharedCrateContext`. This is somewhat dangerous because `self` may - /// not actually be an element of `shared.local_ccxs`, which can cause some - /// operations to panic unexpectedly. - /// - /// This is used in the `LocalCrateContext` constructor to allow calling - /// functions that expect a complete `CrateContext`, even before the local - /// portion is fully initialized and attached to the `SharedCrateContext`. - fn dummy_ccx<'a>(&'a self, shared: &'a SharedCrateContext<'a, 'tcx>) - -> CrateContext<'a, 'tcx> { - CrateContext { - shared: shared, - local: self, - index: !0 as usize, - } - } -} - -impl<'b, 'tcx> CrateContext<'b, 'tcx> { - pub fn shared(&self) -> &'b SharedCrateContext<'b, 'tcx> { - self.shared - } - - pub fn local(&self) -> &'b LocalCrateContext<'tcx> { - self.local - } - - - /// Get a (possibly) different `CrateContext` from the same - /// `SharedCrateContext`. - pub fn rotate(&self) -> CrateContext<'b, 'tcx> { - self.shared.get_smallest_ccx() - } - - /// Either iterate over only `self`, or iterate over all `CrateContext`s in - /// the `SharedCrateContext`. The iterator produces `(ccx, is_origin)` - /// pairs, where `is_origin` is `true` if `ccx` is `self` and `false` - /// otherwise. This method is useful for avoiding code duplication in - /// cases where it may or may not be necessary to translate code into every - /// context. - pub fn maybe_iter(&self, iter_all: bool) -> CrateContextMaybeIterator<'b, 'tcx> { - CrateContextMaybeIterator { - shared: self.shared, - index: if iter_all { 0 } else { self.index }, - single: !iter_all, - origin: self.index, - } - } - - - pub fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { - self.shared.tcx - } - - pub fn sess<'a>(&'a self) -> &'a Session { - &self.shared.tcx.sess - } - - pub fn builder<'a>(&'a self) -> Builder<'a, 'tcx> { - Builder::new(self) - } - - pub fn raw_builder<'a>(&'a self) -> BuilderRef { - self.local.builder.b - } - - pub fn get_intrinsic(&self, key: &str) -> ValueRef { - if let Some(v) = self.intrinsics().borrow().get(key).cloned() { - return v; - } - match declare_intrinsic(self, key) { - Some(v) => return v, - None => panic!("unknown intrinsic '{}'", key) - } - } - - pub fn llmod(&self) -> ModuleRef { - self.local.llmod - } - - pub fn llcx(&self) -> ContextRef { - self.local.llcx - } - - pub fn td(&self) -> llvm::TargetDataRef { - unsafe { llvm::LLVMRustGetModuleDataLayout(self.llmod()) } - } - - pub fn tn<'a>(&'a self) -> &'a TypeNames { - &self.local.tn - } - - pub fn externs<'a>(&'a self) -> &'a RefCell { - &self.local.externs - } - - pub fn item_vals<'a>(&'a self) -> &'a RefCell> { - &self.local.item_vals - } - - pub fn export_map<'a>(&'a self) -> &'a ExportMap { - &self.shared.export_map - } - - pub fn reachable<'a>(&'a self) -> &'a NodeSet { - &self.shared.reachable - } - - pub fn item_symbols<'a>(&'a self) -> &'a RefCell> { - &self.shared.item_symbols - } - - pub fn link_meta<'a>(&'a self) -> &'a LinkMeta { - &self.shared.link_meta - } - - pub fn needs_unwind_cleanup_cache(&self) -> &RefCell, bool>> { - &self.local.needs_unwind_cleanup_cache - } - - pub fn fn_pointer_shims(&self) -> &RefCell, ValueRef>> { - &self.local.fn_pointer_shims - } - - pub fn drop_glues<'a>(&'a self) -> &'a RefCell, ValueRef>> { - &self.local.drop_glues - } - - pub fn external<'a>(&'a self) -> &'a RefCell>> { - &self.local.external - } - - pub fn external_srcs<'a>(&'a self) -> &'a RefCell> { - &self.local.external_srcs - } - - pub fn monomorphized<'a>(&'a self) -> &'a RefCell, ValueRef>> { - &self.local.monomorphized - } - - pub fn monomorphizing<'a>(&'a self) -> &'a RefCell> { - &self.local.monomorphizing - } - - pub fn vtables<'a>(&'a self) -> &'a RefCell, ValueRef>> { - &self.local.vtables - } - - pub fn const_cstr_cache<'a>(&'a self) -> &'a RefCell> { - &self.local.const_cstr_cache - } - - pub fn const_unsized<'a>(&'a self) -> &'a RefCell> { - &self.local.const_unsized - } - - pub fn const_globals<'a>(&'a self) -> &'a RefCell> { - &self.local.const_globals - } - - pub fn const_values<'a>(&'a self) -> &'a RefCell), - ValueRef>> { - &self.local.const_values - } - - pub fn extern_const_values<'a>(&'a self) -> &'a RefCell> { - &self.local.extern_const_values - } - - pub fn impl_method_cache<'a>(&'a self) - -> &'a RefCell> { - &self.local.impl_method_cache - } - - pub fn closure_bare_wrapper_cache<'a>(&'a self) -> &'a RefCell> { - &self.local.closure_bare_wrapper_cache - } - - pub fn statics_to_rauw<'a>(&'a self) -> &'a RefCell> { - &self.local.statics_to_rauw - } - - pub fn lltypes<'a>(&'a self) -> &'a RefCell, Type>> { - &self.local.lltypes - } - - pub fn llsizingtypes<'a>(&'a self) -> &'a RefCell, Type>> { - &self.local.llsizingtypes - } - - pub fn adt_reprs<'a>(&'a self) -> &'a RefCell, Rc>>> { - &self.local.adt_reprs - } - - pub fn symbol_hasher<'a>(&'a self) -> &'a RefCell { - &self.shared.symbol_hasher - } - - pub fn type_hashcodes<'a>(&'a self) -> &'a RefCell, String>> { - &self.local.type_hashcodes - } - - pub fn stats<'a>(&'a self) -> &'a Stats { - &self.shared.stats - } - - pub fn available_monomorphizations<'a>(&'a self) -> &'a RefCell> { - &self.local.available_monomorphizations - } - - pub fn available_drop_glues(&self) -> &RefCell, String>> { - &self.shared.available_drop_glues - } - - pub fn int_type(&self) -> Type { - self.local.int_type - } - - pub fn opaque_vec_type(&self) -> Type { - self.local.opaque_vec_type - } - - pub fn closure_vals<'a>(&'a self) -> &'a RefCell, ValueRef>> { - &self.local.closure_vals - } - - pub fn dbg_cx<'a>(&'a self) -> &'a Option> { - &self.local.dbg_cx - } - - pub fn eh_personality<'a>(&'a self) -> &'a RefCell> { - &self.local.eh_personality - } - - pub fn eh_unwind_resume<'a>(&'a self) -> &'a RefCell> { - &self.local.eh_unwind_resume - } - - pub fn rust_try_fn<'a>(&'a self) -> &'a RefCell> { - &self.local.rust_try_fn - } - - fn intrinsics<'a>(&'a self) -> &'a RefCell> { - &self.local.intrinsics - } - - pub fn count_llvm_insn(&self) { - self.local.n_llvm_insns.set(self.local.n_llvm_insns.get() + 1); - } - - pub fn trait_cache(&self) -> &RefCell, - traits::Vtable<'tcx, ()>>> { - &self.local.trait_cache - } - - /// Return exclusive upper bound on object size. - /// - /// The theoretical maximum object size is defined as the maximum positive `int` value. This - /// ensures that the `offset` semantics remain well-defined by allowing it to correctly index - /// every address within an object along with one byte past the end, along with allowing `int` - /// to store the difference between any two pointers into an object. - /// - /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer to - /// represent object size in bits. It would need to be 1 << 61 to account for this, but is - /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable - /// address space on 64-bit ARMv8 and x86_64. - pub fn obj_size_bound(&self) -> u64 { - match &self.sess().target.target.target_pointer_width[..] { - "32" => 1 << 31, - "64" => 1 << 47, - _ => unreachable!() // error handled by config::build_target_config - } - } - - pub fn report_overbig_object(&self, obj: Ty<'tcx>) -> ! { - self.sess().fatal( - &format!("the type `{:?}` is too big for the current architecture", - obj)) - } - - pub fn enter_type_of(&self, ty: Ty<'tcx>) -> TypeOfDepthLock<'b, 'tcx> { - let current_depth = self.local.type_of_depth.get(); - debug!("enter_type_of({:?}) at depth {:?}", ty, current_depth); - if current_depth > self.sess().recursion_limit.get() { - self.sess().fatal( - &format!("overflow representing the type `{}`", ty)) - } - self.local.type_of_depth.set(current_depth + 1); - TypeOfDepthLock(self.local) - } - - pub fn check_overflow(&self) -> bool { - self.shared.check_overflow - } - - pub fn check_drop_flag_for_sanity(&self) -> bool { - // This controls whether we emit a conditional llvm.debugtrap - // guarded on whether the dropflag is one of its (two) valid - // values. - self.shared.check_drop_flag_for_sanity - } - - pub fn use_dll_storage_attrs(&self) -> bool { - self.shared.use_dll_storage_attrs() - } - - pub fn mir_map(&self) -> &'b MirMap<'tcx> { - self.shared.mir_map - } -} - -pub struct TypeOfDepthLock<'a, 'tcx: 'a>(&'a LocalCrateContext<'tcx>); - -impl<'a, 'tcx> Drop for TypeOfDepthLock<'a, 'tcx> { - fn drop(&mut self) { - self.0.type_of_depth.set(self.0.type_of_depth.get() - 1); - } -} - -/// Declare any llvm intrinsics that you might need -fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option { - macro_rules! ifn { - ($name:expr, fn() -> $ret:expr) => ( - if key == $name { - let f = declare::declare_cfn(ccx, $name, Type::func(&[], &$ret), - ccx.tcx().mk_nil()); - llvm::SetUnnamedAddr(f, false); - ccx.intrinsics().borrow_mut().insert($name, f.clone()); - return Some(f); - } - ); - ($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( - if key == $name { - let f = declare::declare_cfn(ccx, $name, Type::func(&[$($arg),*], &$ret), - ccx.tcx().mk_nil()); - llvm::SetUnnamedAddr(f, false); - ccx.intrinsics().borrow_mut().insert($name, f.clone()); - return Some(f); - } - ) - } - macro_rules! mk_struct { - ($($field_ty:expr),*) => (Type::struct_(ccx, &[$($field_ty),*], false)) - } - - let i8p = Type::i8p(ccx); - let void = Type::void(ccx); - let i1 = Type::i1(ccx); - let t_i8 = Type::i8(ccx); - let t_i16 = Type::i16(ccx); - let t_i32 = Type::i32(ccx); - let t_i64 = Type::i64(ccx); - let t_f32 = Type::f32(ccx); - let t_f64 = Type::f64(ccx); - - ifn!("llvm.memcpy.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); - ifn!("llvm.memcpy.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); - ifn!("llvm.memcpy.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void); - ifn!("llvm.memmove.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); - ifn!("llvm.memmove.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); - ifn!("llvm.memmove.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void); - ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void); - ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void); - ifn!("llvm.memset.p0i8.i64", fn(i8p, t_i8, t_i64, t_i32, i1) -> void); - - ifn!("llvm.trap", fn() -> void); - ifn!("llvm.debugtrap", fn() -> void); - - ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32); - ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64); - ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32); - ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64); - - ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32); - ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64); - ifn!("llvm.sin.f32", fn(t_f32) -> t_f32); - ifn!("llvm.sin.f64", fn(t_f64) -> t_f64); - ifn!("llvm.cos.f32", fn(t_f32) -> t_f32); - ifn!("llvm.cos.f64", fn(t_f64) -> t_f64); - ifn!("llvm.exp.f32", fn(t_f32) -> t_f32); - ifn!("llvm.exp.f64", fn(t_f64) -> t_f64); - ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32); - ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64); - ifn!("llvm.log.f32", fn(t_f32) -> t_f32); - ifn!("llvm.log.f64", fn(t_f64) -> t_f64); - ifn!("llvm.log10.f32", fn(t_f32) -> t_f32); - ifn!("llvm.log10.f64", fn(t_f64) -> t_f64); - ifn!("llvm.log2.f32", fn(t_f32) -> t_f32); - ifn!("llvm.log2.f64", fn(t_f64) -> t_f64); - - ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32); - ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64); - - ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32); - ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64); - - ifn!("llvm.floor.f32", fn(t_f32) -> t_f32); - ifn!("llvm.floor.f64", fn(t_f64) -> t_f64); - ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32); - ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64); - ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32); - ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64); - - ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32); - ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64); - ifn!("llvm.round.f32", fn(t_f32) -> t_f32); - ifn!("llvm.round.f64", fn(t_f64) -> t_f64); - - ifn!("llvm.rint.f32", fn(t_f32) -> t_f32); - ifn!("llvm.rint.f64", fn(t_f64) -> t_f64); - ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32); - ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64); - - ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8); - ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16); - ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32); - ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64); - - ifn!("llvm.ctlz.i8", fn(t_i8 , i1) -> t_i8); - ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16); - ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32); - ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64); - - ifn!("llvm.cttz.i8", fn(t_i8 , i1) -> t_i8); - ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16); - ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32); - ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64); - - ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16); - ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32); - ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64); - - ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - - ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - - ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - - ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - - ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - - ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - - ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void); - ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void); - - ifn!("llvm.expect.i1", fn(i1, i1) -> i1); - ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32); - - // Some intrinsics were introduced in later versions of LLVM, but they have - // fallbacks in libc or libm and such. - macro_rules! compatible_ifn { - ($name:expr, noop($cname:ident ($($arg:expr),*) -> void), $llvm_version:expr) => ( - if unsafe { llvm::LLVMVersionMinor() >= $llvm_version } { - // The `if key == $name` is already in ifn! - ifn!($name, fn($($arg),*) -> void); - } else if key == $name { - let f = declare::declare_cfn(ccx, stringify!($cname), - Type::func(&[$($arg),*], &void), - ccx.tcx().mk_nil()); - llvm::SetLinkage(f, llvm::InternalLinkage); - - let bld = ccx.builder(); - let llbb = unsafe { - llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), f, - "entry-block\0".as_ptr() as *const _) - }; - - bld.position_at_end(llbb); - bld.ret_void(); - - ccx.intrinsics().borrow_mut().insert($name, f.clone()); - return Some(f); - } - ); - ($name:expr, $cname:ident ($($arg:expr),*) -> $ret:expr, $llvm_version:expr) => ( - if unsafe { llvm::LLVMVersionMinor() >= $llvm_version } { - // The `if key == $name` is already in ifn! - ifn!($name, fn($($arg),*) -> $ret); - } else if key == $name { - let f = declare::declare_cfn(ccx, stringify!($cname), - Type::func(&[$($arg),*], &$ret), - ccx.tcx().mk_nil()); - ccx.intrinsics().borrow_mut().insert($name, f.clone()); - return Some(f); - } - ) - } - - compatible_ifn!("llvm.assume", noop(llvmcompat_assume(i1) -> void), 6); - - if ccx.sess().opts.debuginfo != NoDebugInfo { - ifn!("llvm.dbg.declare", fn(Type::metadata(ccx), Type::metadata(ccx)) -> void); - ifn!("llvm.dbg.value", fn(Type::metadata(ccx), t_i64, Type::metadata(ccx)) -> void); - } - return None; -} diff --git a/src/librustc_trans/trans/controlflow.rs b/src/librustc_trans/trans/controlflow.rs deleted file mode 100644 index 45f46410068be..0000000000000 --- a/src/librustc_trans/trans/controlflow.rs +++ /dev/null @@ -1,449 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm::ValueRef; -use middle::def; -use middle::lang_items::{PanicFnLangItem, PanicBoundsCheckFnLangItem}; -use trans::base::*; -use trans::basic_block::BasicBlock; -use trans::build::*; -use trans::callee; -use trans::cleanup::CleanupMethods; -use trans::cleanup; -use trans::common::*; -use trans::consts; -use trans::debuginfo; -use trans::debuginfo::{DebugLoc, ToDebugLoc}; -use trans::expr; -use trans::machine; -use trans; -use middle::ty; - -use rustc_front::hir; -use rustc_front::util as ast_util; - -use syntax::ast; -use syntax::parse::token::InternedString; -use syntax::parse::token; - -pub fn trans_stmt<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - s: &hir::Stmt) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_stmt"); - let fcx = cx.fcx; - debug!("trans_stmt({:?})", s); - - if cx.unreachable.get() { - return cx; - } - - if cx.sess().asm_comments() { - add_span_comment(cx, s.span, &format!("{:?}", s)); - } - - let mut bcx = cx; - - let id = ast_util::stmt_id(s); - let cleanup_debug_loc = - debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), id, s.span, false); - fcx.push_ast_cleanup_scope(cleanup_debug_loc); - - match s.node { - hir::StmtExpr(ref e, _) | hir::StmtSemi(ref e, _) => { - bcx = trans_stmt_semi(bcx, &**e); - } - hir::StmtDecl(ref d, _) => { - match d.node { - hir::DeclLocal(ref local) => { - bcx = init_local(bcx, &**local); - debuginfo::create_local_var_metadata(bcx, &**local); - } - // Inner items are visited by `trans_item`/`trans_meth`. - hir::DeclItem(_) => {}, - } - } - } - - bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, ast_util::stmt_id(s)); - - return bcx; -} - -pub fn trans_stmt_semi<'blk, 'tcx>(cx: Block<'blk, 'tcx>, e: &hir::Expr) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_stmt_semi"); - - if cx.unreachable.get() { - return cx; - } - - let ty = expr_ty(cx, e); - if cx.fcx.type_needs_drop(ty) { - expr::trans_to_lvalue(cx, e, "stmt").bcx - } else { - expr::trans_into(cx, e, expr::Ignore) - } -} - -pub fn trans_block<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - b: &hir::Block, - mut dest: expr::Dest) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_block"); - - if bcx.unreachable.get() { - return bcx; - } - - let fcx = bcx.fcx; - let mut bcx = bcx; - - let cleanup_debug_loc = - debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), b.id, b.span, true); - fcx.push_ast_cleanup_scope(cleanup_debug_loc); - - for s in &b.stmts { - bcx = trans_stmt(bcx, s); - } - - if dest != expr::Ignore { - let block_ty = node_id_type(bcx, b.id); - - if b.expr.is_none() || type_is_zero_size(bcx.ccx(), block_ty) { - dest = expr::Ignore; - } else if b.expr.is_some() { - // If the block has an expression, but that expression isn't reachable, - // don't save into the destination given, ignore it. - if let Some(ref cfg) = bcx.fcx.cfg { - if !cfg.node_is_reachable(b.expr.as_ref().unwrap().id) { - dest = expr::Ignore; - } - } - } - } - - match b.expr { - Some(ref e) => { - if !bcx.unreachable.get() { - bcx = expr::trans_into(bcx, &**e, dest); - } - } - None => { - assert!(dest == expr::Ignore || bcx.unreachable.get()); - } - } - - bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, b.id); - - return bcx; -} - -pub fn trans_if<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - if_id: ast::NodeId, - cond: &hir::Expr, - thn: &hir::Block, - els: Option<&hir::Expr>, - dest: expr::Dest) - -> Block<'blk, 'tcx> { - debug!("trans_if(bcx={}, if_id={}, cond={:?}, thn={}, dest={})", - bcx.to_str(), if_id, cond, thn.id, - dest.to_string(bcx.ccx())); - let _icx = push_ctxt("trans_if"); - - if bcx.unreachable.get() { - return bcx; - } - - let mut bcx = bcx; - - let cond_val = unpack_result!(bcx, expr::trans(bcx, cond).to_llbool()); - - // Drop branches that are known to be impossible - if let Some(cv) = const_to_opt_uint(cond_val) { - if cv == 1 { - // if true { .. } [else { .. }] - bcx = trans_block(bcx, &*thn, dest); - trans::debuginfo::clear_source_location(bcx.fcx); - } else { - if let Some(elexpr) = els { - bcx = expr::trans_into(bcx, &*elexpr, dest); - trans::debuginfo::clear_source_location(bcx.fcx); - } - } - - return bcx; - } - - let name = format!("then-block-{}-", thn.id); - let then_bcx_in = bcx.fcx.new_id_block(&name[..], thn.id); - let then_bcx_out = trans_block(then_bcx_in, &*thn, dest); - trans::debuginfo::clear_source_location(bcx.fcx); - - let cond_source_loc = cond.debug_loc(); - - let next_bcx; - match els { - Some(elexpr) => { - let else_bcx_in = bcx.fcx.new_id_block("else-block", elexpr.id); - let else_bcx_out = expr::trans_into(else_bcx_in, &*elexpr, dest); - next_bcx = bcx.fcx.join_blocks(if_id, - &[then_bcx_out, else_bcx_out]); - CondBr(bcx, cond_val, then_bcx_in.llbb, else_bcx_in.llbb, cond_source_loc); - } - - None => { - next_bcx = bcx.fcx.new_id_block("next-block", if_id); - Br(then_bcx_out, next_bcx.llbb, DebugLoc::None); - CondBr(bcx, cond_val, then_bcx_in.llbb, next_bcx.llbb, cond_source_loc); - } - } - - // Clear the source location because it is still set to whatever has been translated - // right before. - trans::debuginfo::clear_source_location(next_bcx.fcx); - - next_bcx -} - -pub fn trans_while<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - loop_expr: &hir::Expr, - cond: &hir::Expr, - body: &hir::Block) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_while"); - - if bcx.unreachable.get() { - return bcx; - } - - let fcx = bcx.fcx; - - // bcx - // | - // cond_bcx_in <--------+ - // | | - // cond_bcx_out | - // | | | - // | body_bcx_in | - // cleanup_blk | | - // | body_bcx_out --+ - // next_bcx_in - - let next_bcx_in = fcx.new_id_block("while_exit", loop_expr.id); - let cond_bcx_in = fcx.new_id_block("while_cond", cond.id); - let body_bcx_in = fcx.new_id_block("while_body", body.id); - - fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, cond_bcx_in]); - - Br(bcx, cond_bcx_in.llbb, loop_expr.debug_loc()); - - // compile the block where we will handle loop cleanups - let cleanup_llbb = fcx.normal_exit_block(loop_expr.id, cleanup::EXIT_BREAK); - - // compile the condition - let Result {bcx: cond_bcx_out, val: cond_val} = - expr::trans(cond_bcx_in, cond).to_llbool(); - - CondBr(cond_bcx_out, cond_val, body_bcx_in.llbb, cleanup_llbb, cond.debug_loc()); - - // loop body: - let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore); - Br(body_bcx_out, cond_bcx_in.llbb, DebugLoc::None); - - fcx.pop_loop_cleanup_scope(loop_expr.id); - return next_bcx_in; -} - -pub fn trans_loop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - loop_expr: &hir::Expr, - body: &hir::Block) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_loop"); - - if bcx.unreachable.get() { - return bcx; - } - - let fcx = bcx.fcx; - - // bcx - // | - // body_bcx_in - // | - // body_bcx_out - // - // next_bcx - // - // Links between body_bcx_in and next_bcx are created by - // break statements. - - let next_bcx_in = bcx.fcx.new_id_block("loop_exit", loop_expr.id); - let body_bcx_in = bcx.fcx.new_id_block("loop_body", body.id); - - fcx.push_loop_cleanup_scope(loop_expr.id, [next_bcx_in, body_bcx_in]); - - Br(bcx, body_bcx_in.llbb, loop_expr.debug_loc()); - let body_bcx_out = trans_block(body_bcx_in, body, expr::Ignore); - Br(body_bcx_out, body_bcx_in.llbb, DebugLoc::None); - - fcx.pop_loop_cleanup_scope(loop_expr.id); - - // If there are no predecessors for the next block, we just translated an endless loop and the - // next block is unreachable - if BasicBlock(next_bcx_in.llbb).pred_iter().next().is_none() { - Unreachable(next_bcx_in); - } - - return next_bcx_in; -} - -pub fn trans_break_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - opt_label: Option, - exit: usize) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_break_cont"); - - if bcx.unreachable.get() { - return bcx; - } - - let fcx = bcx.fcx; - - // Locate loop that we will break to - let loop_id = match opt_label { - None => fcx.top_loop_scope(), - Some(_) => { - match bcx.tcx().def_map.borrow().get(&expr.id).map(|d| d.full_def()) { - Some(def::DefLabel(loop_id)) => loop_id, - r => { - bcx.tcx().sess.bug(&format!("{:?} in def-map for label", r)) - } - } - } - }; - - // Generate appropriate cleanup code and branch - let cleanup_llbb = fcx.normal_exit_block(loop_id, exit); - Br(bcx, cleanup_llbb, expr.debug_loc()); - Unreachable(bcx); // anything afterwards should be ignored - return bcx; -} - -pub fn trans_break<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - label_opt: Option) - -> Block<'blk, 'tcx> { - return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_BREAK); -} - -pub fn trans_cont<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - label_opt: Option) - -> Block<'blk, 'tcx> { - return trans_break_cont(bcx, expr, label_opt, cleanup::EXIT_LOOP); -} - -pub fn trans_ret<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - return_expr: &hir::Expr, - retval_expr: Option<&hir::Expr>) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_ret"); - - if bcx.unreachable.get() { - return bcx; - } - - let fcx = bcx.fcx; - let mut bcx = bcx; - let dest = match (fcx.llretslotptr.get(), retval_expr) { - (Some(_), Some(retval_expr)) => { - let ret_ty = expr_ty_adjusted(bcx, &*retval_expr); - expr::SaveIn(fcx.get_ret_slot(bcx, ty::FnConverging(ret_ty), "ret_slot")) - } - _ => expr::Ignore, - }; - if let Some(x) = retval_expr { - bcx = expr::trans_into(bcx, &*x, dest); - match dest { - expr::SaveIn(slot) if fcx.needs_ret_allocas => { - Store(bcx, slot, fcx.llretslotptr.get().unwrap()); - } - _ => {} - } - } - let cleanup_llbb = fcx.return_exit_block(); - Br(bcx, cleanup_llbb, return_expr.debug_loc()); - Unreachable(bcx); - return bcx; -} - -pub fn trans_fail<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - call_info: NodeIdAndSpan, - fail_str: InternedString) - -> Block<'blk, 'tcx> { - let ccx = bcx.ccx(); - let _icx = push_ctxt("trans_fail_value"); - - if bcx.unreachable.get() { - return bcx; - } - - let v_str = C_str_slice(ccx, fail_str); - let loc = bcx.sess().codemap().lookup_char_pos(call_info.span.lo); - let filename = token::intern_and_get_ident(&loc.file.name); - let filename = C_str_slice(ccx, filename); - let line = C_u32(ccx, loc.line as u32); - let expr_file_line_const = C_struct(ccx, &[v_str, filename, line], false); - let align = machine::llalign_of_min(ccx, val_ty(expr_file_line_const)); - let expr_file_line = consts::addr_of(ccx, expr_file_line_const, align, "panic_loc"); - let args = vec!(expr_file_line); - let did = langcall(bcx, Some(call_info.span), "", PanicFnLangItem); - let bcx = callee::trans_lang_call(bcx, - did, - &args[..], - Some(expr::Ignore), - call_info.debug_loc()).bcx; - Unreachable(bcx); - return bcx; -} - -pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - call_info: NodeIdAndSpan, - index: ValueRef, - len: ValueRef) - -> Block<'blk, 'tcx> { - let ccx = bcx.ccx(); - let _icx = push_ctxt("trans_fail_bounds_check"); - - if bcx.unreachable.get() { - return bcx; - } - - // Extract the file/line from the span - let loc = bcx.sess().codemap().lookup_char_pos(call_info.span.lo); - let filename = token::intern_and_get_ident(&loc.file.name); - - // Invoke the lang item - let filename = C_str_slice(ccx, filename); - let line = C_u32(ccx, loc.line as u32); - let file_line_const = C_struct(ccx, &[filename, line], false); - let align = machine::llalign_of_min(ccx, val_ty(file_line_const)); - let file_line = consts::addr_of(ccx, file_line_const, align, "panic_bounds_check_loc"); - let args = vec!(file_line, index, len); - let did = langcall(bcx, Some(call_info.span), "", PanicBoundsCheckFnLangItem); - let bcx = callee::trans_lang_call(bcx, - did, - &args[..], - Some(expr::Ignore), - call_info.debug_loc()).bcx; - Unreachable(bcx); - return bcx; -} diff --git a/src/librustc_trans/trans/datum.rs b/src/librustc_trans/trans/datum.rs deleted file mode 100644 index 32f263746d31e..0000000000000 --- a/src/librustc_trans/trans/datum.rs +++ /dev/null @@ -1,829 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! ## The Datum module -//! -//! A `Datum` encapsulates the result of evaluating a Rust expression. It -//! contains a `ValueRef` indicating the result, a `Ty` describing -//! the Rust type, but also a *kind*. The kind indicates whether the datum -//! has cleanup scheduled (lvalue) or not (rvalue) and -- in the case of -//! rvalues -- whether or not the value is "by ref" or "by value". -//! -//! The datum API is designed to try and help you avoid memory errors like -//! forgetting to arrange cleanup or duplicating a value. The type of the -//! datum incorporates the kind, and thus reflects whether it has cleanup -//! scheduled: -//! -//! - `Datum` -- by ref, cleanup scheduled -//! - `Datum` -- by value or by ref, no cleanup scheduled -//! - `Datum` -- either `Datum` or `Datum` -//! -//! Rvalue and expr datums are noncopyable, and most of the methods on -//! datums consume the datum itself (with some notable exceptions). This -//! reflects the fact that datums may represent affine values which ought -//! to be consumed exactly once, and if you were to try to (for example) -//! store an affine value multiple times, you would be duplicating it, -//! which would certainly be a bug. -//! -//! Some of the datum methods, however, are designed to work only on -//! copyable values such as ints or pointers. Those methods may borrow the -//! datum (`&self`) rather than consume it, but they always include -//! assertions on the type of the value represented to check that this -//! makes sense. An example is `shallow_copy()`, which duplicates -//! a datum value. -//! -//! Translating an expression always yields a `Datum` result, but -//! the methods `to_[lr]value_datum()` can be used to coerce a -//! `Datum` into a `Datum` or `Datum` as -//! needed. Coercing to an lvalue is fairly common, and generally occurs -//! whenever it is necessary to inspect a value and pull out its -//! subcomponents (for example, a match, or indexing expression). Coercing -//! to an rvalue is more unusual; it occurs when moving values from place -//! to place, such as in an assignment expression or parameter passing. -//! -//! ### Lvalues in detail -//! -//! An lvalue datum is one for which cleanup has been scheduled. Lvalue -//! datums are always located in memory, and thus the `ValueRef` for an -//! LLVM value is always a pointer to the actual Rust value. This means -//! that if the Datum has a Rust type of `int`, then the LLVM type of the -//! `ValueRef` will be `int*` (pointer to int). -//! -//! Because lvalues already have cleanups scheduled, the memory must be -//! zeroed to prevent the cleanup from taking place (presuming that the -//! Rust type needs drop in the first place, otherwise it doesn't -//! matter). The Datum code automatically performs this zeroing when the -//! value is stored to a new location, for example. -//! -//! Lvalues usually result from evaluating lvalue expressions. For -//! example, evaluating a local variable `x` yields an lvalue, as does a -//! reference to a field like `x.f` or an index `x[i]`. -//! -//! Lvalue datums can also arise by *converting* an rvalue into an lvalue. -//! This is done with the `to_lvalue_datum` method defined on -//! `Datum`. Basically this method just schedules cleanup if the -//! datum is an rvalue, possibly storing the value into a stack slot first -//! if needed. Converting rvalues into lvalues occurs in constructs like -//! `&foo()` or `match foo() { ref x => ... }`, where the user is -//! implicitly requesting a temporary. -//! -//! ### Rvalues in detail -//! -//! Rvalues datums are values with no cleanup scheduled. One must be -//! careful with rvalue datums to ensure that cleanup is properly -//! arranged, usually by converting to an lvalue datum or by invoking the -//! `add_clean` method. -//! -//! ### Scratch datums -//! -//! Sometimes you need some temporary scratch space. The functions -//! `[lr]value_scratch_datum()` can be used to get temporary stack -//! space. As their name suggests, they yield lvalues and rvalues -//! respectively. That is, the slot from `lvalue_scratch_datum` will have -//! cleanup arranged, and the slot from `rvalue_scratch_datum` does not. - -pub use self::Expr::*; -pub use self::RvalueMode::*; - -use llvm::ValueRef; -use trans::adt; -use trans::base::*; -use trans::build::{Load, Store}; -use trans::common::*; -use trans::cleanup; -use trans::cleanup::{CleanupMethods, DropHintDatum, DropHintMethods}; -use trans::expr; -use trans::tvec; -use middle::ty::Ty; - -use std::fmt; -use syntax::ast; -use syntax::codemap::DUMMY_SP; - -/// A `Datum` encapsulates the result of evaluating an expression. It -/// describes where the value is stored, what Rust type the value has, -/// whether it is addressed by reference, and so forth. Please refer -/// the section on datums in `README.md` for more details. -#[derive(Clone, Copy, Debug)] -pub struct Datum<'tcx, K> { - /// The llvm value. This is either a pointer to the Rust value or - /// the value itself, depending on `kind` below. - pub val: ValueRef, - - /// The rust type of the value. - pub ty: Ty<'tcx>, - - /// Indicates whether this is by-ref or by-value. - pub kind: K, -} - -pub struct DatumBlock<'blk, 'tcx: 'blk, K> { - pub bcx: Block<'blk, 'tcx>, - pub datum: Datum<'tcx, K>, -} - -#[derive(Debug)] -pub enum Expr { - /// a fresh value that was produced and which has no cleanup yet - /// because it has not yet "landed" into its permanent home - RvalueExpr(Rvalue), - - /// `val` is a pointer into memory for which a cleanup is scheduled - /// (and thus has type *T). If you move out of an Lvalue, you must - /// zero out the memory (FIXME #5016). - LvalueExpr(Lvalue), -} - -#[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub enum DropFlagInfo { - DontZeroJustUse(ast::NodeId), - ZeroAndMaintain(ast::NodeId), - None, -} - -impl DropFlagInfo { - pub fn must_zero(&self) -> bool { - match *self { - DropFlagInfo::DontZeroJustUse(..) => false, - DropFlagInfo::ZeroAndMaintain(..) => true, - DropFlagInfo::None => true, - } - } - - pub fn hint_datum<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>) - -> Option> { - let id = match *self { - DropFlagInfo::None => return None, - DropFlagInfo::DontZeroJustUse(id) | - DropFlagInfo::ZeroAndMaintain(id) => id, - }; - - let hints = bcx.fcx.lldropflag_hints.borrow(); - let retval = hints.hint_datum(id); - assert!(retval.is_some(), "An id (={}) means must have a hint", id); - retval - } -} - -// FIXME: having Lvalue be `Copy` is a bit of a footgun, since clients -// may not realize that subparts of an Lvalue can have a subset of -// drop-flags associated with them, while this as written will just -// memcpy the drop_flag_info. But, it is an easier way to get `_match` -// off the ground to just let this be `Copy` for now. -#[derive(Copy, Clone, Debug)] -pub struct Lvalue { - pub source: &'static str, - pub drop_flag_info: DropFlagInfo -} - -#[derive(Debug)] -pub struct Rvalue { - pub mode: RvalueMode -} - -/// Classifies what action we should take when a value is moved away -/// with respect to its drop-flag. -/// -/// Long term there will be no need for this classification: all flags -/// (which will be stored on the stack frame) will have the same -/// interpretation and maintenance code associated with them. -#[derive(Copy, Clone, Debug)] -pub enum HintKind { - /// When the value is moved, set the drop-flag to "dropped" - /// (i.e. "zero the flag", even when the specific representation - /// is not literally 0) and when it is reinitialized, set the - /// drop-flag back to "initialized". - ZeroAndMaintain, - - /// When the value is moved, do not set the drop-flag to "dropped" - /// However, continue to read the drop-flag in deciding whether to - /// drop. (In essence, the path/fragment in question will never - /// need to be dropped at the points where it is moved away by - /// this code, but we are defending against the scenario where - /// some *other* code could move away (or drop) the value and thus - /// zero-the-flag, which is why we will still read from it. - DontZeroJustUse, -} - -impl Lvalue { // Constructors for various Lvalues. - pub fn new<'blk, 'tcx>(source: &'static str) -> Lvalue { - debug!("Lvalue at {} no drop flag info", source); - Lvalue { source: source, drop_flag_info: DropFlagInfo::None } - } - - pub fn new_dropflag_hint(source: &'static str) -> Lvalue { - debug!("Lvalue at {} is drop flag hint", source); - Lvalue { source: source, drop_flag_info: DropFlagInfo::None } - } - - pub fn new_with_hint<'blk, 'tcx>(source: &'static str, - bcx: Block<'blk, 'tcx>, - id: ast::NodeId, - k: HintKind) -> Lvalue { - let (opt_id, info) = { - let hint_available = Lvalue::has_dropflag_hint(bcx, id) && - bcx.tcx().sess.nonzeroing_move_hints(); - let info = match k { - HintKind::ZeroAndMaintain if hint_available => - DropFlagInfo::ZeroAndMaintain(id), - HintKind::DontZeroJustUse if hint_available => - DropFlagInfo::DontZeroJustUse(id), - _ => - DropFlagInfo::None, - }; - (Some(id), info) - }; - debug!("Lvalue at {}, id: {:?} info: {:?}", source, opt_id, info); - Lvalue { source: source, drop_flag_info: info } - } -} // end Lvalue constructor methods. - -impl Lvalue { - fn has_dropflag_hint<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - id: ast::NodeId) -> bool { - let hints = bcx.fcx.lldropflag_hints.borrow(); - hints.has_hint(id) - } - pub fn dropflag_hint<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>) - -> Option> { - self.drop_flag_info.hint_datum(bcx) - } -} - -impl Rvalue { - pub fn new(m: RvalueMode) -> Rvalue { - Rvalue { mode: m } - } -} - -// Make Datum linear for more type safety. -impl Drop for Rvalue { - fn drop(&mut self) { } -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub enum RvalueMode { - /// `val` is a pointer to the actual value (and thus has type *T) - ByRef, - - /// `val` is the actual value (*only used for immediates* like ints, ptrs) - ByValue, -} - -pub fn immediate_rvalue<'tcx>(val: ValueRef, ty: Ty<'tcx>) -> Datum<'tcx, Rvalue> { - return Datum::new(val, ty, Rvalue::new(ByValue)); -} - -pub fn immediate_rvalue_bcx<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - val: ValueRef, - ty: Ty<'tcx>) - -> DatumBlock<'blk, 'tcx, Rvalue> { - return DatumBlock::new(bcx, immediate_rvalue(val, ty)) -} - -/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to -/// it. The memory will be dropped upon exit from `scope`. The callback `populate` should -/// initialize the memory. -/// -/// The flag `zero` indicates how the temporary space itself should be -/// initialized at the outset of the function; the only time that -/// `InitAlloca::Uninit` is a valid value for `zero` is when the -/// caller can prove that either (1.) the code injected by `populate` -/// onto `bcx` always dominates the end of `scope`, or (2.) the data -/// being allocated has no associated destructor. -pub fn lvalue_scratch_datum<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>, - ty: Ty<'tcx>, - name: &str, - zero: InitAlloca, - scope: cleanup::ScopeId, - arg: A, - populate: F) - -> DatumBlock<'blk, 'tcx, Lvalue> where - F: FnOnce(A, Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>, -{ - // Very subtle: potentially initialize the scratch memory at point where it is alloca'ed. - // (See discussion at Issue 30530.) - let scratch = alloc_ty_init(bcx, ty, zero, name); - debug!("lvalue_scratch_datum scope={:?} scratch={} ty={:?}", - scope, bcx.ccx().tn().val_to_string(scratch), ty); - - // Subtle. Populate the scratch memory *before* scheduling cleanup. - let bcx = populate(arg, bcx, scratch); - bcx.fcx.schedule_drop_mem(scope, scratch, ty, None); - - DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue::new("datum::lvalue_scratch_datum"))) -} - -/// Allocates temporary space on the stack using alloca() and returns a by-ref Datum pointing to -/// it. If `zero` is true, the space will be zeroed when it is allocated; this is normally not -/// necessary, but in the case of automatic rooting in match statements it is possible to have -/// temporaries that may not get initialized if a certain arm is not taken, so we must zero them. -/// You must arrange any cleanups etc yourself! -pub fn rvalue_scratch_datum<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - ty: Ty<'tcx>, - name: &str) - -> Datum<'tcx, Rvalue> { - let scratch = alloc_ty(bcx, ty, name); - call_lifetime_start(bcx, scratch); - Datum::new(scratch, ty, Rvalue::new(ByRef)) -} - -/// Indicates the "appropriate" mode for this value, which is either by ref or by value, depending -/// on whether type is immediate or not. -pub fn appropriate_rvalue_mode<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ty: Ty<'tcx>) -> RvalueMode { - if type_is_immediate(ccx, ty) { - ByValue - } else { - ByRef - } -} - -fn add_rvalue_clean<'a, 'tcx>(mode: RvalueMode, - fcx: &FunctionContext<'a, 'tcx>, - scope: cleanup::ScopeId, - val: ValueRef, - ty: Ty<'tcx>) { - debug!("add_rvalue_clean scope={:?} val={} ty={:?}", - scope, fcx.ccx.tn().val_to_string(val), ty); - match mode { - ByValue => { fcx.schedule_drop_immediate(scope, val, ty); } - ByRef => { - fcx.schedule_lifetime_end(scope, val); - fcx.schedule_drop_mem(scope, val, ty, None); - } - } -} - -pub trait KindOps { - - /// Take appropriate action after the value in `datum` has been - /// stored to a new location. - fn post_store<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - val: ValueRef, - ty: Ty<'tcx>) - -> Block<'blk, 'tcx>; - - /// True if this mode is a reference mode, meaning that the datum's - /// val field is a pointer to the actual value - fn is_by_ref(&self) -> bool; - - /// Converts to an Expr kind - fn to_expr_kind(self) -> Expr; - -} - -impl KindOps for Rvalue { - fn post_store<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - _val: ValueRef, - _ty: Ty<'tcx>) - -> Block<'blk, 'tcx> { - // No cleanup is scheduled for an rvalue, so we don't have - // to do anything after a move to cancel or duplicate it. - if self.is_by_ref() { - call_lifetime_end(bcx, _val); - } - bcx - } - - fn is_by_ref(&self) -> bool { - self.mode == ByRef - } - - fn to_expr_kind(self) -> Expr { - RvalueExpr(self) - } -} - -impl KindOps for Lvalue { - /// If an lvalue is moved, we must zero out the memory in which it resides so as to cancel - /// cleanup. If an @T lvalue is copied, we must increment the reference count. - fn post_store<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - val: ValueRef, - ty: Ty<'tcx>) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("::post_store"); - if bcx.fcx.type_needs_drop(ty) { - // cancel cleanup of affine values: - // 1. if it has drop-hint, mark as moved; then code - // aware of drop-hint won't bother calling the - // drop-glue itself. - if let Some(hint_datum) = self.drop_flag_info.hint_datum(bcx) { - let moved_hint_byte = adt::DTOR_MOVED_HINT; - let hint_llval = hint_datum.to_value().value(); - Store(bcx, C_u8(bcx.fcx.ccx, moved_hint_byte), hint_llval); - } - // 2. if the drop info says its necessary, drop-fill the memory. - if self.drop_flag_info.must_zero() { - let () = drop_done_fill_mem(bcx, val, ty); - } - bcx - } else { - // FIXME (#5016) would be nice to assert this, but we have - // to allow for e.g. DontZeroJustUse flags, for now. - // - // (The dropflag hint construction should be taking - // !type_needs_drop into account; earlier analysis phases - // may not have all the info they need to include such - // information properly, I think; in particular the - // fragments analysis works on a non-monomorphized view of - // the code.) - // - // assert_eq!(self.drop_flag_info, DropFlagInfo::None); - bcx - } - } - - fn is_by_ref(&self) -> bool { - true - } - - fn to_expr_kind(self) -> Expr { - LvalueExpr(self) - } -} - -impl KindOps for Expr { - fn post_store<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - val: ValueRef, - ty: Ty<'tcx>) - -> Block<'blk, 'tcx> { - match *self { - LvalueExpr(ref l) => l.post_store(bcx, val, ty), - RvalueExpr(ref r) => r.post_store(bcx, val, ty), - } - } - - fn is_by_ref(&self) -> bool { - match *self { - LvalueExpr(ref l) => l.is_by_ref(), - RvalueExpr(ref r) => r.is_by_ref() - } - } - - fn to_expr_kind(self) -> Expr { - self - } -} - -impl<'tcx> Datum<'tcx, Rvalue> { - /// Schedules a cleanup for this datum in the given scope. That means that this datum is no - /// longer an rvalue datum; hence, this function consumes the datum and returns the contained - /// ValueRef. - pub fn add_clean<'a>(self, - fcx: &FunctionContext<'a, 'tcx>, - scope: cleanup::ScopeId) - -> ValueRef { - add_rvalue_clean(self.kind.mode, fcx, scope, self.val, self.ty); - self.val - } - - /// Returns an lvalue datum (that is, a by ref datum with cleanup scheduled). If `self` is not - /// already an lvalue, cleanup will be scheduled in the temporary scope for `expr_id`. - pub fn to_lvalue_datum_in_scope<'blk>(self, - bcx: Block<'blk, 'tcx>, - name: &str, - scope: cleanup::ScopeId) - -> DatumBlock<'blk, 'tcx, Lvalue> { - let fcx = bcx.fcx; - - match self.kind.mode { - ByRef => { - add_rvalue_clean(ByRef, fcx, scope, self.val, self.ty); - DatumBlock::new(bcx, Datum::new( - self.val, - self.ty, - Lvalue::new("datum::to_lvalue_datum_in_scope"))) - } - - ByValue => { - lvalue_scratch_datum( - bcx, self.ty, name, InitAlloca::Dropped, scope, self, - |this, bcx, llval| { - debug!("populate call for Datum::to_lvalue_datum_in_scope \ - self.ty={:?}", this.ty); - // do not call_lifetime_start here; the - // `InitAlloc::Dropped` will start scratch - // value's lifetime at open of function body. - let bcx = this.store_to(bcx, llval); - bcx.fcx.schedule_lifetime_end(scope, llval); - bcx - }) - } - } - } - - pub fn to_ref_datum<'blk>(self, bcx: Block<'blk, 'tcx>) - -> DatumBlock<'blk, 'tcx, Rvalue> { - let mut bcx = bcx; - match self.kind.mode { - ByRef => DatumBlock::new(bcx, self), - ByValue => { - let scratch = rvalue_scratch_datum(bcx, self.ty, "to_ref"); - bcx = self.store_to(bcx, scratch.val); - DatumBlock::new(bcx, scratch) - } - } - } - - pub fn to_appropriate_datum<'blk>(self, bcx: Block<'blk, 'tcx>) - -> DatumBlock<'blk, 'tcx, Rvalue> { - match self.appropriate_rvalue_mode(bcx.ccx()) { - ByRef => { - self.to_ref_datum(bcx) - } - ByValue => { - match self.kind.mode { - ByValue => DatumBlock::new(bcx, self), - ByRef => { - let llval = load_ty(bcx, self.val, self.ty); - call_lifetime_end(bcx, self.val); - DatumBlock::new(bcx, Datum::new(llval, self.ty, Rvalue::new(ByValue))) - } - } - } - } - } -} - -/// Methods suitable for "expr" datums that could be either lvalues or -/// rvalues. These include coercions into lvalues/rvalues but also a number -/// of more general operations. (Some of those operations could be moved to -/// the more general `impl Datum`, but it's convenient to have them -/// here since we can `match self.kind` rather than having to implement -/// generic methods in `KindOps`.) -impl<'tcx> Datum<'tcx, Expr> { - fn match_kind(self, if_lvalue: F, if_rvalue: G) -> R where - F: FnOnce(Datum<'tcx, Lvalue>) -> R, - G: FnOnce(Datum<'tcx, Rvalue>) -> R, - { - let Datum { val, ty, kind } = self; - match kind { - LvalueExpr(l) => if_lvalue(Datum::new(val, ty, l)), - RvalueExpr(r) => if_rvalue(Datum::new(val, ty, r)), - } - } - - /// Asserts that this datum *is* an lvalue and returns it. - #[allow(dead_code)] // potentially useful - pub fn assert_lvalue(self, bcx: Block) -> Datum<'tcx, Lvalue> { - self.match_kind( - |d| d, - |_| bcx.sess().bug("assert_lvalue given rvalue")) - } - - pub fn store_to_dest<'blk>(self, - bcx: Block<'blk, 'tcx>, - dest: expr::Dest, - expr_id: ast::NodeId) - -> Block<'blk, 'tcx> { - match dest { - expr::Ignore => { - self.add_clean_if_rvalue(bcx, expr_id); - bcx - } - expr::SaveIn(addr) => { - self.store_to(bcx, addr) - } - } - } - - /// Arranges cleanup for `self` if it is an rvalue. Use when you are done working with a value - /// that may need drop. - pub fn add_clean_if_rvalue<'blk>(self, - bcx: Block<'blk, 'tcx>, - expr_id: ast::NodeId) { - self.match_kind( - |_| { /* Nothing to do, cleanup already arranged */ }, - |r| { - let scope = cleanup::temporary_scope(bcx.tcx(), expr_id); - r.add_clean(bcx.fcx, scope); - }) - } - - pub fn to_lvalue_datum<'blk>(self, - bcx: Block<'blk, 'tcx>, - name: &str, - expr_id: ast::NodeId) - -> DatumBlock<'blk, 'tcx, Lvalue> { - debug!("to_lvalue_datum self: {}", self.to_string(bcx.ccx())); - - self.match_kind( - |l| DatumBlock::new(bcx, l), - |r| { - let scope = cleanup::temporary_scope(bcx.tcx(), expr_id); - r.to_lvalue_datum_in_scope(bcx, name, scope) - }) - } - - /// Ensures that we have an rvalue datum (that is, a datum with no cleanup scheduled). - pub fn to_rvalue_datum<'blk>(self, - bcx: Block<'blk, 'tcx>, - name: &'static str) - -> DatumBlock<'blk, 'tcx, Rvalue> { - self.match_kind( - |l| { - let mut bcx = bcx; - match l.appropriate_rvalue_mode(bcx.ccx()) { - ByRef => { - let scratch = rvalue_scratch_datum(bcx, l.ty, name); - bcx = l.store_to(bcx, scratch.val); - DatumBlock::new(bcx, scratch) - } - ByValue => { - let v = load_ty(bcx, l.val, l.ty); - bcx = l.kind.post_store(bcx, l.val, l.ty); - DatumBlock::new(bcx, Datum::new(v, l.ty, Rvalue::new(ByValue))) - } - } - }, - |r| DatumBlock::new(bcx, r)) - } - -} - -/// Methods suitable only for lvalues. These include the various -/// operations to extract components out of compound data structures, -/// such as extracting the field from a struct or a particular element -/// from an array. -impl<'tcx> Datum<'tcx, Lvalue> { - /// Converts a datum into a by-ref value. The datum type must be one which is always passed by - /// reference. - pub fn to_llref(self) -> ValueRef { - self.val - } - - // Extracts a component of a compound data structure (e.g., a field from a - // struct). Note that if self is an opened, unsized type then the returned - // datum may also be unsized _without the size information_. It is the - // callers responsibility to package the result in some way to make a valid - // datum in that case (e.g., by making a fat pointer or opened pair). - pub fn get_element<'blk, F>(&self, bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>, - gep: F) - -> Datum<'tcx, Lvalue> where - F: FnOnce(adt::MaybeSizedValue) -> ValueRef, - { - let val = if type_is_sized(bcx.tcx(), self.ty) { - let val = adt::MaybeSizedValue::sized(self.val); - gep(val) - } else { - let val = adt::MaybeSizedValue::unsized_( - Load(bcx, expr::get_dataptr(bcx, self.val)), - Load(bcx, expr::get_meta(bcx, self.val))); - gep(val) - }; - Datum { - val: val, - kind: Lvalue::new("Datum::get_element"), - ty: ty, - } - } - - pub fn get_vec_base_and_len<'blk>(&self, bcx: Block<'blk, 'tcx>) - -> (ValueRef, ValueRef) { - //! Converts a vector into the slice pair. - - tvec::get_base_and_len(bcx, self.val, self.ty) - } -} - -/// Generic methods applicable to any sort of datum. -impl<'tcx, K: KindOps + fmt::Debug> Datum<'tcx, K> { - pub fn new(val: ValueRef, ty: Ty<'tcx>, kind: K) -> Datum<'tcx, K> { - Datum { val: val, ty: ty, kind: kind } - } - - pub fn to_expr_datum(self) -> Datum<'tcx, Expr> { - let Datum { val, ty, kind } = self; - Datum { val: val, ty: ty, kind: kind.to_expr_kind() } - } - - /// Moves or copies this value into a new home, as appropriate depending on the type of the - /// datum. This method consumes the datum, since it would be incorrect to go on using the datum - /// if the value represented is affine (and hence the value is moved). - pub fn store_to<'blk>(self, - bcx: Block<'blk, 'tcx>, - dst: ValueRef) - -> Block<'blk, 'tcx> { - self.shallow_copy_raw(bcx, dst); - - self.kind.post_store(bcx, self.val, self.ty) - } - - /// Helper function that performs a shallow copy of this value into `dst`, which should be a - /// pointer to a memory location suitable for `self.ty`. `dst` should contain uninitialized - /// memory (either newly allocated, zeroed, or dropped). - /// - /// This function is private to datums because it leaves memory in an unstable state, where the - /// source value has been copied but not zeroed. Public methods are `store_to` (if you no - /// longer need the source value) or `shallow_copy` (if you wish the source value to remain - /// valid). - fn shallow_copy_raw<'blk>(&self, - bcx: Block<'blk, 'tcx>, - dst: ValueRef) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("copy_to_no_check"); - - if type_is_zero_size(bcx.ccx(), self.ty) { - return bcx; - } - - if self.kind.is_by_ref() { - memcpy_ty(bcx, dst, self.val, self.ty); - } else { - store_ty(bcx, self.val, dst, self.ty); - } - - return bcx; - } - - /// Copies the value into a new location. This function always preserves the existing datum as - /// a valid value. Therefore, it does not consume `self` and, also, cannot be applied to affine - /// values (since they must never be duplicated). - pub fn shallow_copy<'blk>(&self, - bcx: Block<'blk, 'tcx>, - dst: ValueRef) - -> Block<'blk, 'tcx> { - /*! - * Copies the value into a new location. This function always - * preserves the existing datum as a valid value. Therefore, - * it does not consume `self` and, also, cannot be applied to - * affine values (since they must never be duplicated). - */ - - assert!(!self.ty - .moves_by_default(&bcx.tcx().empty_parameter_environment(), DUMMY_SP)); - self.shallow_copy_raw(bcx, dst) - } - - #[allow(dead_code)] // useful for debugging - pub fn to_string<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> String { - format!("Datum({}, {:?}, {:?})", - ccx.tn().val_to_string(self.val), - self.ty, - self.kind) - } - - /// See the `appropriate_rvalue_mode()` function - pub fn appropriate_rvalue_mode<'a>(&self, ccx: &CrateContext<'a, 'tcx>) - -> RvalueMode { - appropriate_rvalue_mode(ccx, self.ty) - } - - /// Converts `self` into a by-value `ValueRef`. Consumes this datum (i.e., absolves you of - /// responsibility to cleanup the value). For this to work, the value must be something - /// scalar-ish (like an int or a pointer) which (1) does not require drop glue and (2) is - /// naturally passed around by value, and not by reference. - pub fn to_llscalarish<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef { - assert!(!bcx.fcx.type_needs_drop(self.ty)); - assert!(self.appropriate_rvalue_mode(bcx.ccx()) == ByValue); - if self.kind.is_by_ref() { - load_ty(bcx, self.val, self.ty) - } else { - self.val - } - } - - pub fn to_llbool<'blk>(self, bcx: Block<'blk, 'tcx>) -> ValueRef { - assert!(self.ty.is_bool()); - self.to_llscalarish(bcx) - } -} - -impl<'blk, 'tcx, K> DatumBlock<'blk, 'tcx, K> { - pub fn new(bcx: Block<'blk, 'tcx>, datum: Datum<'tcx, K>) - -> DatumBlock<'blk, 'tcx, K> { - DatumBlock { bcx: bcx, datum: datum } - } -} - -impl<'blk, 'tcx, K: KindOps + fmt::Debug> DatumBlock<'blk, 'tcx, K> { - pub fn to_expr_datumblock(self) -> DatumBlock<'blk, 'tcx, Expr> { - DatumBlock::new(self.bcx, self.datum.to_expr_datum()) - } -} - -impl<'blk, 'tcx> DatumBlock<'blk, 'tcx, Expr> { - pub fn store_to_dest(self, - dest: expr::Dest, - expr_id: ast::NodeId) -> Block<'blk, 'tcx> { - let DatumBlock { bcx, datum } = self; - datum.store_to_dest(bcx, dest, expr_id) - } - - pub fn to_llbool(self) -> Result<'blk, 'tcx> { - let DatumBlock { datum, bcx } = self; - Result::new(bcx, datum.to_llbool(bcx)) - } -} diff --git a/src/librustc_trans/trans/debuginfo/create_scope_map.rs b/src/librustc_trans/trans/debuginfo/create_scope_map.rs deleted file mode 100644 index 237d31c47783d..0000000000000 --- a/src/librustc_trans/trans/debuginfo/create_scope_map.rs +++ /dev/null @@ -1,489 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::metadata::file_metadata; -use super::utils::DIB; - -use llvm; -use llvm::debuginfo::{DIScope, DISubprogram}; -use trans::common::CrateContext; -use middle::pat_util; -use rustc::util::nodemap::NodeMap; - -use libc::c_uint; -use syntax::codemap::{Span, Pos}; -use syntax::{ast, codemap}; - -use rustc_front; -use rustc_front::hir; - -// This procedure builds the *scope map* for a given function, which maps any -// given ast::NodeId in the function's AST to the correct DIScope metadata instance. -// -// This builder procedure walks the AST in execution order and keeps track of -// what belongs to which scope, creating DIScope DIEs along the way, and -// introducing *artificial* lexical scope descriptors where necessary. These -// artificial scopes allow GDB to correctly handle name shadowing. -pub fn create_scope_map(cx: &CrateContext, - args: &[hir::Arg], - fn_entry_block: &hir::Block, - fn_metadata: DISubprogram, - fn_ast_id: ast::NodeId) - -> NodeMap { - let mut scope_map = NodeMap(); - - let def_map = &cx.tcx().def_map; - - let mut scope_stack = vec!(ScopeStackEntry { scope_metadata: fn_metadata, name: None }); - scope_map.insert(fn_ast_id, fn_metadata); - - // Push argument identifiers onto the stack so arguments integrate nicely - // with variable shadowing. - for arg in args { - pat_util::pat_bindings_ident(def_map, &*arg.pat, |_, node_id, _, path1| { - scope_stack.push(ScopeStackEntry { scope_metadata: fn_metadata, - name: Some(path1.node.unhygienic_name) }); - scope_map.insert(node_id, fn_metadata); - }) - } - - // Clang creates a separate scope for function bodies, so let's do this too. - with_new_scope(cx, - fn_entry_block.span, - &mut scope_stack, - &mut scope_map, - |cx, scope_stack, scope_map| { - walk_block(cx, fn_entry_block, scope_stack, scope_map); - }); - - return scope_map; -} - -// local helper functions for walking the AST. -fn with_new_scope(cx: &CrateContext, - scope_span: Span, - scope_stack: &mut Vec , - scope_map: &mut NodeMap, - inner_walk: F) where - F: FnOnce(&CrateContext, &mut Vec, &mut NodeMap), -{ - // Create a new lexical scope and push it onto the stack - let loc = cx.sess().codemap().lookup_char_pos(scope_span.lo); - let file_metadata = file_metadata(cx, &loc.file.name); - let parent_scope = scope_stack.last().unwrap().scope_metadata; - - let scope_metadata = unsafe { - llvm::LLVMDIBuilderCreateLexicalBlock( - DIB(cx), - parent_scope, - file_metadata, - loc.line as c_uint, - loc.col.to_usize() as c_uint) - }; - - scope_stack.push(ScopeStackEntry { scope_metadata: scope_metadata, name: None }); - - inner_walk(cx, scope_stack, scope_map); - - // pop artificial scopes - while scope_stack.last().unwrap().name.is_some() { - scope_stack.pop(); - } - - if scope_stack.last().unwrap().scope_metadata != scope_metadata { - cx.sess().span_bug(scope_span, "debuginfo: Inconsistency in scope management."); - } - - scope_stack.pop(); -} - -struct ScopeStackEntry { - scope_metadata: DIScope, - name: Option -} - -fn walk_block(cx: &CrateContext, - block: &hir::Block, - scope_stack: &mut Vec , - scope_map: &mut NodeMap) { - scope_map.insert(block.id, scope_stack.last().unwrap().scope_metadata); - - // The interesting things here are statements and the concluding expression. - for statement in &block.stmts { - scope_map.insert(rustc_front::util::stmt_id(statement), - scope_stack.last().unwrap().scope_metadata); - - match statement.node { - hir::StmtDecl(ref decl, _) => - walk_decl(cx, &**decl, scope_stack, scope_map), - hir::StmtExpr(ref exp, _) | - hir::StmtSemi(ref exp, _) => - walk_expr(cx, &**exp, scope_stack, scope_map), - } - } - - if let Some(ref exp) = block.expr { - walk_expr(cx, &**exp, scope_stack, scope_map); - } -} - -fn walk_decl(cx: &CrateContext, - decl: &hir::Decl, - scope_stack: &mut Vec , - scope_map: &mut NodeMap) { - match *decl { - codemap::Spanned { node: hir::DeclLocal(ref local), .. } => { - scope_map.insert(local.id, scope_stack.last().unwrap().scope_metadata); - - walk_pattern(cx, &*local.pat, scope_stack, scope_map); - - if let Some(ref exp) = local.init { - walk_expr(cx, &**exp, scope_stack, scope_map); - } - } - _ => () - } -} - -fn walk_pattern(cx: &CrateContext, - pat: &hir::Pat, - scope_stack: &mut Vec , - scope_map: &mut NodeMap) { - - let def_map = &cx.tcx().def_map; - - // Unfortunately, we cannot just use pat_util::pat_bindings() or - // ast_util::walk_pat() here because we have to visit *all* nodes in - // order to put them into the scope map. The above functions don't do that. - match pat.node { - hir::PatIdent(_, ref path1, ref sub_pat_opt) => { - - // Check if this is a binding. If so we need to put it on the - // scope stack and maybe introduce an artificial scope - if pat_util::pat_is_binding(&def_map.borrow(), &*pat) { - - let name = path1.node.unhygienic_name; - - // LLVM does not properly generate 'DW_AT_start_scope' fields - // for variable DIEs. For this reason we have to introduce - // an artificial scope at bindings whenever a variable with - // the same name is declared in *any* parent scope. - // - // Otherwise the following error occurs: - // - // let x = 10; - // - // do_something(); // 'gdb print x' correctly prints 10 - // - // { - // do_something(); // 'gdb print x' prints 0, because it - // // already reads the uninitialized 'x' - // // from the next line... - // let x = 100; - // do_something(); // 'gdb print x' correctly prints 100 - // } - - // Is there already a binding with that name? - // N.B.: this comparison must be UNhygienic... because - // gdb knows nothing about the context, so any two - // variables with the same name will cause the problem. - let need_new_scope = scope_stack - .iter() - .any(|entry| entry.name == Some(name)); - - if need_new_scope { - // Create a new lexical scope and push it onto the stack - let loc = cx.sess().codemap().lookup_char_pos(pat.span.lo); - let file_metadata = file_metadata(cx, &loc.file.name); - let parent_scope = scope_stack.last().unwrap().scope_metadata; - - let scope_metadata = unsafe { - llvm::LLVMDIBuilderCreateLexicalBlock( - DIB(cx), - parent_scope, - file_metadata, - loc.line as c_uint, - loc.col.to_usize() as c_uint) - }; - - scope_stack.push(ScopeStackEntry { - scope_metadata: scope_metadata, - name: Some(name) - }); - - } else { - // Push a new entry anyway so the name can be found - let prev_metadata = scope_stack.last().unwrap().scope_metadata; - scope_stack.push(ScopeStackEntry { - scope_metadata: prev_metadata, - name: Some(name) - }); - } - } - - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - - if let Some(ref sub_pat) = *sub_pat_opt { - walk_pattern(cx, &**sub_pat, scope_stack, scope_map); - } - } - - hir::PatWild => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - } - - hir::PatEnum(_, ref sub_pats_opt) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - - if let Some(ref sub_pats) = *sub_pats_opt { - for p in sub_pats { - walk_pattern(cx, &**p, scope_stack, scope_map); - } - } - } - - hir::PatQPath(..) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - } - - hir::PatStruct(_, ref field_pats, _) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - - for &codemap::Spanned { - node: hir::FieldPat { pat: ref sub_pat, .. }, - .. - } in field_pats { - walk_pattern(cx, &**sub_pat, scope_stack, scope_map); - } - } - - hir::PatTup(ref sub_pats) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - - for sub_pat in sub_pats { - walk_pattern(cx, &**sub_pat, scope_stack, scope_map); - } - } - - hir::PatBox(ref sub_pat) | hir::PatRegion(ref sub_pat, _) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - walk_pattern(cx, &**sub_pat, scope_stack, scope_map); - } - - hir::PatLit(ref exp) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - walk_expr(cx, &**exp, scope_stack, scope_map); - } - - hir::PatRange(ref exp1, ref exp2) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - walk_expr(cx, &**exp1, scope_stack, scope_map); - walk_expr(cx, &**exp2, scope_stack, scope_map); - } - - hir::PatVec(ref front_sub_pats, ref middle_sub_pats, ref back_sub_pats) => { - scope_map.insert(pat.id, scope_stack.last().unwrap().scope_metadata); - - for sub_pat in front_sub_pats { - walk_pattern(cx, &**sub_pat, scope_stack, scope_map); - } - - if let Some(ref sub_pat) = *middle_sub_pats { - walk_pattern(cx, &**sub_pat, scope_stack, scope_map); - } - - for sub_pat in back_sub_pats { - walk_pattern(cx, &**sub_pat, scope_stack, scope_map); - } - } - } -} - -fn walk_expr(cx: &CrateContext, - exp: &hir::Expr, - scope_stack: &mut Vec , - scope_map: &mut NodeMap) { - - scope_map.insert(exp.id, scope_stack.last().unwrap().scope_metadata); - - match exp.node { - hir::ExprLit(_) | - hir::ExprBreak(_) | - hir::ExprAgain(_) | - hir::ExprPath(..) => {} - - hir::ExprCast(ref sub_exp, _) | - hir::ExprType(ref sub_exp, _) | - hir::ExprAddrOf(_, ref sub_exp) | - hir::ExprField(ref sub_exp, _) | - hir::ExprTupField(ref sub_exp, _) => - walk_expr(cx, &**sub_exp, scope_stack, scope_map), - - hir::ExprBox(ref sub_expr) => { - walk_expr(cx, &**sub_expr, scope_stack, scope_map); - } - - hir::ExprRet(ref exp_opt) => match *exp_opt { - Some(ref sub_exp) => walk_expr(cx, &**sub_exp, scope_stack, scope_map), - None => () - }, - - hir::ExprUnary(_, ref sub_exp) => { - walk_expr(cx, &**sub_exp, scope_stack, scope_map); - } - - hir::ExprAssignOp(_, ref lhs, ref rhs) | - hir::ExprIndex(ref lhs, ref rhs) | - hir::ExprBinary(_, ref lhs, ref rhs) => { - walk_expr(cx, &**lhs, scope_stack, scope_map); - walk_expr(cx, &**rhs, scope_stack, scope_map); - } - - hir::ExprRange(ref start, ref end) => { - start.as_ref().map(|e| walk_expr(cx, &**e, scope_stack, scope_map)); - end.as_ref().map(|e| walk_expr(cx, &**e, scope_stack, scope_map)); - } - - hir::ExprVec(ref init_expressions) | - hir::ExprTup(ref init_expressions) => { - for ie in init_expressions { - walk_expr(cx, &**ie, scope_stack, scope_map); - } - } - - hir::ExprAssign(ref sub_exp1, ref sub_exp2) | - hir::ExprRepeat(ref sub_exp1, ref sub_exp2) => { - walk_expr(cx, &**sub_exp1, scope_stack, scope_map); - walk_expr(cx, &**sub_exp2, scope_stack, scope_map); - } - - hir::ExprIf(ref cond_exp, ref then_block, ref opt_else_exp) => { - walk_expr(cx, &**cond_exp, scope_stack, scope_map); - - with_new_scope(cx, - then_block.span, - scope_stack, - scope_map, - |cx, scope_stack, scope_map| { - walk_block(cx, &**then_block, scope_stack, scope_map); - }); - - match *opt_else_exp { - Some(ref else_exp) => - walk_expr(cx, &**else_exp, scope_stack, scope_map), - _ => () - } - } - - hir::ExprWhile(ref cond_exp, ref loop_body, _) => { - walk_expr(cx, &**cond_exp, scope_stack, scope_map); - - with_new_scope(cx, - loop_body.span, - scope_stack, - scope_map, - |cx, scope_stack, scope_map| { - walk_block(cx, &**loop_body, scope_stack, scope_map); - }) - } - - hir::ExprLoop(ref block, _) | - hir::ExprBlock(ref block) => { - with_new_scope(cx, - block.span, - scope_stack, - scope_map, - |cx, scope_stack, scope_map| { - walk_block(cx, &**block, scope_stack, scope_map); - }) - } - - hir::ExprClosure(_, ref decl, ref block) => { - with_new_scope(cx, - block.span, - scope_stack, - scope_map, - |cx, scope_stack, scope_map| { - for &hir::Arg { pat: ref pattern, .. } in &decl.inputs { - walk_pattern(cx, &**pattern, scope_stack, scope_map); - } - - walk_block(cx, &**block, scope_stack, scope_map); - }) - } - - hir::ExprCall(ref fn_exp, ref args) => { - walk_expr(cx, &**fn_exp, scope_stack, scope_map); - - for arg_exp in args { - walk_expr(cx, &**arg_exp, scope_stack, scope_map); - } - } - - hir::ExprMethodCall(_, _, ref args) => { - for arg_exp in args { - walk_expr(cx, &**arg_exp, scope_stack, scope_map); - } - } - - hir::ExprMatch(ref discriminant_exp, ref arms, _) => { - walk_expr(cx, &**discriminant_exp, scope_stack, scope_map); - - // For each arm we have to first walk the pattern as these might - // introduce new artificial scopes. It should be sufficient to - // walk only one pattern per arm, as they all must contain the - // same binding names. - - for arm_ref in arms { - let arm_span = arm_ref.pats[0].span; - - with_new_scope(cx, - arm_span, - scope_stack, - scope_map, - |cx, scope_stack, scope_map| { - for pat in &arm_ref.pats { - walk_pattern(cx, &**pat, scope_stack, scope_map); - } - - if let Some(ref guard_exp) = arm_ref.guard { - walk_expr(cx, &**guard_exp, scope_stack, scope_map) - } - - walk_expr(cx, &*arm_ref.body, scope_stack, scope_map); - }) - } - } - - hir::ExprStruct(_, ref fields, ref base_exp) => { - for &hir::Field { expr: ref exp, .. } in fields { - walk_expr(cx, &**exp, scope_stack, scope_map); - } - - match *base_exp { - Some(ref exp) => walk_expr(cx, &**exp, scope_stack, scope_map), - None => () - } - } - - hir::ExprInlineAsm(hir::InlineAsm { ref inputs, - ref outputs, - .. }) => { - // inputs, outputs: Vec<(String, P)> - for &(_, ref exp) in inputs { - walk_expr(cx, &**exp, scope_stack, scope_map); - } - - for out in outputs { - walk_expr(cx, &*out.expr, scope_stack, scope_map); - } - } - } -} diff --git a/src/librustc_trans/trans/debuginfo/metadata.rs b/src/librustc_trans/trans/debuginfo/metadata.rs deleted file mode 100644 index 128d0601167f3..0000000000000 --- a/src/librustc_trans/trans/debuginfo/metadata.rs +++ /dev/null @@ -1,2136 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use self::RecursiveTypeDescription::*; -use self::MemberOffset::*; -use self::MemberDescriptionFactory::*; -use self::EnumDiscriminantInfo::*; - -use super::utils::{debug_context, DIB, span_start, bytes_to_bits, size_and_align_of, - get_namespace_and_span_for_item, create_DIArray, - fn_should_be_ignored, is_node_local_to_unit}; -use super::namespace::namespace_for_item; -use super::type_names::{compute_debuginfo_type_name, push_debuginfo_type_name}; -use super::{declare_local, VariableKind, VariableAccess}; - -use llvm::{self, ValueRef}; -use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, DICompositeType}; - -use middle::def_id::DefId; -use middle::infer; -use middle::pat_util; -use middle::subst::{self, Substs}; -use rustc::front::map as hir_map; -use rustc_front::hir; -use trans::{type_of, adt, machine, monomorphize}; -use trans::common::{self, CrateContext, FunctionContext, Block}; -use trans::_match::{BindingInfo, TransBindingMode}; -use trans::type_::Type; -use middle::ty::{self, Ty}; -use session::config::{self, FullDebugInfo}; -use util::nodemap::FnvHashMap; -use util::common::path2cstr; - -use libc::{c_uint, c_longlong}; -use std::ffi::CString; -use std::path::Path; -use std::ptr; -use std::rc::Rc; -use syntax; -use syntax::util::interner::Interner; -use syntax::codemap::Span; -use syntax::{ast, codemap}; -use syntax::parse::token; - - -const DW_LANG_RUST: c_uint = 0x9000; -#[allow(non_upper_case_globals)] -const DW_ATE_boolean: c_uint = 0x02; -#[allow(non_upper_case_globals)] -const DW_ATE_float: c_uint = 0x04; -#[allow(non_upper_case_globals)] -const DW_ATE_signed: c_uint = 0x05; -#[allow(non_upper_case_globals)] -const DW_ATE_unsigned: c_uint = 0x07; -#[allow(non_upper_case_globals)] -const DW_ATE_unsigned_char: c_uint = 0x08; - -pub const UNKNOWN_LINE_NUMBER: c_uint = 0; -pub const UNKNOWN_COLUMN_NUMBER: c_uint = 0; - -// ptr::null() doesn't work :( -const NO_FILE_METADATA: DIFile = (0 as DIFile); -const NO_SCOPE_METADATA: DIScope = (0 as DIScope); - -const FLAGS_NONE: c_uint = 0; - -#[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)] -pub struct UniqueTypeId(ast::Name); - -// The TypeMap is where the CrateDebugContext holds the type metadata nodes -// created so far. The metadata nodes are indexed by UniqueTypeId, and, for -// faster lookup, also by Ty. The TypeMap is responsible for creating -// UniqueTypeIds. -pub struct TypeMap<'tcx> { - // The UniqueTypeIds created so far - unique_id_interner: Interner>, - // A map from UniqueTypeId to debuginfo metadata for that type. This is a 1:1 mapping. - unique_id_to_metadata: FnvHashMap, - // A map from types to debuginfo metadata. This is a N:1 mapping. - type_to_metadata: FnvHashMap, DIType>, - // A map from types to UniqueTypeId. This is a N:1 mapping. - type_to_unique_id: FnvHashMap, UniqueTypeId> -} - -impl<'tcx> TypeMap<'tcx> { - pub fn new() -> TypeMap<'tcx> { - TypeMap { - unique_id_interner: Interner::new(), - type_to_metadata: FnvHashMap(), - unique_id_to_metadata: FnvHashMap(), - type_to_unique_id: FnvHashMap(), - } - } - - // Adds a Ty to metadata mapping to the TypeMap. The method will fail if - // the mapping already exists. - fn register_type_with_metadata<'a>(&mut self, - cx: &CrateContext<'a, 'tcx>, - type_: Ty<'tcx>, - metadata: DIType) { - if self.type_to_metadata.insert(type_, metadata).is_some() { - cx.sess().bug(&format!("Type metadata for Ty '{}' is already in the TypeMap!", - type_)); - } - } - - // Adds a UniqueTypeId to metadata mapping to the TypeMap. The method will - // fail if the mapping already exists. - fn register_unique_id_with_metadata(&mut self, - cx: &CrateContext, - unique_type_id: UniqueTypeId, - metadata: DIType) { - if self.unique_id_to_metadata.insert(unique_type_id, metadata).is_some() { - let unique_type_id_str = self.get_unique_type_id_as_string(unique_type_id); - cx.sess().bug(&format!("Type metadata for unique id '{}' is already in the TypeMap!", - &unique_type_id_str[..])); - } - } - - fn find_metadata_for_type(&self, type_: Ty<'tcx>) -> Option { - self.type_to_metadata.get(&type_).cloned() - } - - fn find_metadata_for_unique_id(&self, unique_type_id: UniqueTypeId) -> Option { - self.unique_id_to_metadata.get(&unique_type_id).cloned() - } - - // Get the string representation of a UniqueTypeId. This method will fail if - // the id is unknown. - fn get_unique_type_id_as_string(&self, unique_type_id: UniqueTypeId) -> Rc { - let UniqueTypeId(interner_key) = unique_type_id; - self.unique_id_interner.get(interner_key) - } - - // Get the UniqueTypeId for the given type. If the UniqueTypeId for the given - // type has been requested before, this is just a table lookup. Otherwise an - // ID will be generated and stored for later lookup. - fn get_unique_type_id_of_type<'a>(&mut self, cx: &CrateContext<'a, 'tcx>, - type_: Ty<'tcx>) -> UniqueTypeId { - - // basic type -> {:name of the type:} - // tuple -> {tuple_(:param-uid:)*} - // struct -> {struct_:svh: / :node-id:_<(:param-uid:),*> } - // enum -> {enum_:svh: / :node-id:_<(:param-uid:),*> } - // enum variant -> {variant_:variant-name:_:enum-uid:} - // reference (&) -> {& :pointee-uid:} - // mut reference (&mut) -> {&mut :pointee-uid:} - // ptr (*) -> {* :pointee-uid:} - // mut ptr (*mut) -> {*mut :pointee-uid:} - // unique ptr (box) -> {box :pointee-uid:} - // @-ptr (@) -> {@ :pointee-uid:} - // sized vec ([T; x]) -> {[:size:] :element-uid:} - // unsized vec ([T]) -> {[] :element-uid:} - // trait (T) -> {trait_:svh: / :node-id:_<(:param-uid:),*> } - // closure -> { :store-sigil: |(:param-uid:),* <,_...>| -> \ - // :return-type-uid: : (:bounds:)*} - // function -> { fn( (:param-uid:)* <,_...> ) -> \ - // :return-type-uid:} - - match self.type_to_unique_id.get(&type_).cloned() { - Some(unique_type_id) => return unique_type_id, - None => { /* generate one */} - }; - - let mut unique_type_id = String::with_capacity(256); - unique_type_id.push('{'); - - match type_.sty { - ty::TyBool | - ty::TyChar | - ty::TyStr | - ty::TyInt(_) | - ty::TyUint(_) | - ty::TyFloat(_) => { - push_debuginfo_type_name(cx, type_, false, &mut unique_type_id); - }, - ty::TyEnum(def, substs) => { - unique_type_id.push_str("enum "); - from_def_id_and_substs(self, cx, def.did, substs, &mut unique_type_id); - }, - ty::TyStruct(def, substs) => { - unique_type_id.push_str("struct "); - from_def_id_and_substs(self, cx, def.did, substs, &mut unique_type_id); - }, - ty::TyTuple(ref component_types) if component_types.is_empty() => { - push_debuginfo_type_name(cx, type_, false, &mut unique_type_id); - }, - ty::TyTuple(ref component_types) => { - unique_type_id.push_str("tuple "); - for &component_type in component_types { - let component_type_id = - self.get_unique_type_id_of_type(cx, component_type); - let component_type_id = - self.get_unique_type_id_as_string(component_type_id); - unique_type_id.push_str(&component_type_id[..]); - } - }, - ty::TyBox(inner_type) => { - unique_type_id.push_str("box "); - let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type); - let inner_type_id = self.get_unique_type_id_as_string(inner_type_id); - unique_type_id.push_str(&inner_type_id[..]); - }, - ty::TyRawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => { - unique_type_id.push('*'); - if mutbl == hir::MutMutable { - unique_type_id.push_str("mut"); - } - - let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type); - let inner_type_id = self.get_unique_type_id_as_string(inner_type_id); - unique_type_id.push_str(&inner_type_id[..]); - }, - ty::TyRef(_, ty::TypeAndMut { ty: inner_type, mutbl }) => { - unique_type_id.push('&'); - if mutbl == hir::MutMutable { - unique_type_id.push_str("mut"); - } - - let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type); - let inner_type_id = self.get_unique_type_id_as_string(inner_type_id); - unique_type_id.push_str(&inner_type_id[..]); - }, - ty::TyArray(inner_type, len) => { - unique_type_id.push_str(&format!("[{}]", len)); - - let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type); - let inner_type_id = self.get_unique_type_id_as_string(inner_type_id); - unique_type_id.push_str(&inner_type_id[..]); - }, - ty::TySlice(inner_type) => { - unique_type_id.push_str("[]"); - - let inner_type_id = self.get_unique_type_id_of_type(cx, inner_type); - let inner_type_id = self.get_unique_type_id_as_string(inner_type_id); - unique_type_id.push_str(&inner_type_id[..]); - }, - ty::TyTrait(ref trait_data) => { - unique_type_id.push_str("trait "); - - let principal = cx.tcx().erase_late_bound_regions(&trait_data.principal); - - from_def_id_and_substs(self, - cx, - principal.def_id, - principal.substs, - &mut unique_type_id); - }, - ty::TyBareFn(_, &ty::BareFnTy{ unsafety, abi, ref sig } ) => { - if unsafety == hir::Unsafety::Unsafe { - unique_type_id.push_str("unsafe "); - } - - unique_type_id.push_str(abi.name()); - - unique_type_id.push_str(" fn("); - - let sig = cx.tcx().erase_late_bound_regions(sig); - let sig = infer::normalize_associated_type(cx.tcx(), &sig); - - for ¶meter_type in &sig.inputs { - let parameter_type_id = - self.get_unique_type_id_of_type(cx, parameter_type); - let parameter_type_id = - self.get_unique_type_id_as_string(parameter_type_id); - unique_type_id.push_str(¶meter_type_id[..]); - unique_type_id.push(','); - } - - if sig.variadic { - unique_type_id.push_str("..."); - } - - unique_type_id.push_str(")->"); - match sig.output { - ty::FnConverging(ret_ty) => { - let return_type_id = self.get_unique_type_id_of_type(cx, ret_ty); - let return_type_id = self.get_unique_type_id_as_string(return_type_id); - unique_type_id.push_str(&return_type_id[..]); - } - ty::FnDiverging => { - unique_type_id.push_str("!"); - } - } - }, - ty::TyClosure(_, ref substs) if substs.upvar_tys.is_empty() => { - push_debuginfo_type_name(cx, type_, false, &mut unique_type_id); - }, - ty::TyClosure(_, ref substs) => { - unique_type_id.push_str("closure "); - for upvar_type in &substs.upvar_tys { - let upvar_type_id = - self.get_unique_type_id_of_type(cx, upvar_type); - let upvar_type_id = - self.get_unique_type_id_as_string(upvar_type_id); - unique_type_id.push_str(&upvar_type_id[..]); - } - }, - _ => { - cx.sess().bug(&format!("get_unique_type_id_of_type() - unexpected type: {:?}", - type_)) - } - }; - - unique_type_id.push('}'); - - // Trim to size before storing permanently - unique_type_id.shrink_to_fit(); - - let key = self.unique_id_interner.intern(Rc::new(unique_type_id)); - self.type_to_unique_id.insert(type_, UniqueTypeId(key)); - - return UniqueTypeId(key); - - fn from_def_id_and_substs<'a, 'tcx>(type_map: &mut TypeMap<'tcx>, - cx: &CrateContext<'a, 'tcx>, - def_id: DefId, - substs: &subst::Substs<'tcx>, - output: &mut String) { - // First, find out the 'real' def_id of the type. Items inlined from - // other crates have to be mapped back to their source. - let source_def_id = if let Some(node_id) = cx.tcx().map.as_local_node_id(def_id) { - match cx.external_srcs().borrow().get(&node_id).cloned() { - Some(source_def_id) => { - // The given def_id identifies the inlined copy of a - // type definition, let's take the source of the copy. - source_def_id - } - None => def_id - } - } else { - def_id - }; - - // Get the crate hash as first part of the identifier. - let crate_hash = if source_def_id.is_local() { - cx.link_meta().crate_hash.clone() - } else { - cx.sess().cstore.crate_hash(source_def_id.krate) - }; - - output.push_str(crate_hash.as_str()); - output.push_str("/"); - output.push_str(&format!("{:x}", def_id.index.as_usize())); - - // Maybe check that there is no self type here. - - let tps = substs.types.get_slice(subst::TypeSpace); - if !tps.is_empty() { - output.push('<'); - - for &type_parameter in tps { - let param_type_id = - type_map.get_unique_type_id_of_type(cx, type_parameter); - let param_type_id = - type_map.get_unique_type_id_as_string(param_type_id); - output.push_str(¶m_type_id[..]); - output.push(','); - } - - output.push('>'); - } - } - } - - // Get the UniqueTypeId for an enum variant. Enum variants are not really - // types of their own, so they need special handling. We still need a - // UniqueTypeId for them, since to debuginfo they *are* real types. - fn get_unique_type_id_of_enum_variant<'a>(&mut self, - cx: &CrateContext<'a, 'tcx>, - enum_type: Ty<'tcx>, - variant_name: &str) - -> UniqueTypeId { - let enum_type_id = self.get_unique_type_id_of_type(cx, enum_type); - let enum_variant_type_id = format!("{}::{}", - &self.get_unique_type_id_as_string(enum_type_id), - variant_name); - let interner_key = self.unique_id_interner.intern(Rc::new(enum_variant_type_id)); - UniqueTypeId(interner_key) - } -} - -// A description of some recursive type. It can either be already finished (as -// with FinalMetadata) or it is not yet finished, but contains all information -// needed to generate the missing parts of the description. See the -// documentation section on Recursive Types at the top of this file for more -// information. -enum RecursiveTypeDescription<'tcx> { - UnfinishedMetadata { - unfinished_type: Ty<'tcx>, - unique_type_id: UniqueTypeId, - metadata_stub: DICompositeType, - llvm_type: Type, - member_description_factory: MemberDescriptionFactory<'tcx>, - }, - FinalMetadata(DICompositeType) -} - -fn create_and_register_recursive_type_forward_declaration<'a, 'tcx>( - cx: &CrateContext<'a, 'tcx>, - unfinished_type: Ty<'tcx>, - unique_type_id: UniqueTypeId, - metadata_stub: DICompositeType, - llvm_type: Type, - member_description_factory: MemberDescriptionFactory<'tcx>) - -> RecursiveTypeDescription<'tcx> { - - // Insert the stub into the TypeMap in order to allow for recursive references - let mut type_map = debug_context(cx).type_map.borrow_mut(); - type_map.register_unique_id_with_metadata(cx, unique_type_id, metadata_stub); - type_map.register_type_with_metadata(cx, unfinished_type, metadata_stub); - - UnfinishedMetadata { - unfinished_type: unfinished_type, - unique_type_id: unique_type_id, - metadata_stub: metadata_stub, - llvm_type: llvm_type, - member_description_factory: member_description_factory, - } -} - -impl<'tcx> RecursiveTypeDescription<'tcx> { - // Finishes up the description of the type in question (mostly by providing - // descriptions of the fields of the given type) and returns the final type - // metadata. - fn finalize<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> MetadataCreationResult { - match *self { - FinalMetadata(metadata) => MetadataCreationResult::new(metadata, false), - UnfinishedMetadata { - unfinished_type, - unique_type_id, - metadata_stub, - llvm_type, - ref member_description_factory, - .. - } => { - // Make sure that we have a forward declaration of the type in - // the TypeMap so that recursive references are possible. This - // will always be the case if the RecursiveTypeDescription has - // been properly created through the - // create_and_register_recursive_type_forward_declaration() - // function. - { - let type_map = debug_context(cx).type_map.borrow(); - if type_map.find_metadata_for_unique_id(unique_type_id).is_none() || - type_map.find_metadata_for_type(unfinished_type).is_none() { - cx.sess().bug(&format!("Forward declaration of potentially recursive type \ - '{:?}' was not found in TypeMap!", - unfinished_type) - ); - } - } - - // ... then create the member descriptions ... - let member_descriptions = - member_description_factory.create_member_descriptions(cx); - - // ... and attach them to the stub to complete it. - set_members_of_composite_type(cx, - metadata_stub, - llvm_type, - &member_descriptions[..]); - return MetadataCreationResult::new(metadata_stub, true); - } - } - } -} - -// Returns from the enclosing function if the type metadata with the given -// unique id can be found in the type map -macro_rules! return_if_metadata_created_in_meantime { - ($cx: expr, $unique_type_id: expr) => ( - match debug_context($cx).type_map - .borrow() - .find_metadata_for_unique_id($unique_type_id) { - Some(metadata) => return MetadataCreationResult::new(metadata, true), - None => { /* proceed normally */ } - } - ) -} - -fn fixed_vec_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - unique_type_id: UniqueTypeId, - element_type: Ty<'tcx>, - len: Option, - span: Span) - -> MetadataCreationResult { - let element_type_metadata = type_metadata(cx, element_type, span); - - return_if_metadata_created_in_meantime!(cx, unique_type_id); - - let element_llvm_type = type_of::type_of(cx, element_type); - let (element_type_size, element_type_align) = size_and_align_of(cx, element_llvm_type); - - let (array_size_in_bytes, upper_bound) = match len { - Some(len) => (element_type_size * len, len as c_longlong), - None => (0, -1) - }; - - let subrange = unsafe { - llvm::LLVMDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound) - }; - - let subscripts = create_DIArray(DIB(cx), &[subrange]); - let metadata = unsafe { - llvm::LLVMDIBuilderCreateArrayType( - DIB(cx), - bytes_to_bits(array_size_in_bytes), - bytes_to_bits(element_type_align), - element_type_metadata, - subscripts) - }; - - return MetadataCreationResult::new(metadata, false); -} - -fn vec_slice_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - vec_type: Ty<'tcx>, - element_type: Ty<'tcx>, - unique_type_id: UniqueTypeId, - span: Span) - -> MetadataCreationResult { - let data_ptr_type = cx.tcx().mk_ptr(ty::TypeAndMut { - ty: element_type, - mutbl: hir::MutImmutable - }); - - let element_type_metadata = type_metadata(cx, data_ptr_type, span); - - return_if_metadata_created_in_meantime!(cx, unique_type_id); - - let slice_llvm_type = type_of::type_of(cx, vec_type); - let slice_type_name = compute_debuginfo_type_name(cx, vec_type, true); - - let member_llvm_types = slice_llvm_type.field_types(); - assert!(slice_layout_is_correct(cx, - &member_llvm_types[..], - element_type)); - let member_descriptions = [ - MemberDescription { - name: "data_ptr".to_string(), - llvm_type: member_llvm_types[0], - type_metadata: element_type_metadata, - offset: ComputedMemberOffset, - flags: FLAGS_NONE - }, - MemberDescription { - name: "length".to_string(), - llvm_type: member_llvm_types[1], - type_metadata: type_metadata(cx, cx.tcx().types.usize, span), - offset: ComputedMemberOffset, - flags: FLAGS_NONE - }, - ]; - - assert!(member_descriptions.len() == member_llvm_types.len()); - - let loc = span_start(cx, span); - let file_metadata = file_metadata(cx, &loc.file.name); - - let metadata = composite_type_metadata(cx, - slice_llvm_type, - &slice_type_name[..], - unique_type_id, - &member_descriptions, - NO_SCOPE_METADATA, - file_metadata, - span); - return MetadataCreationResult::new(metadata, false); - - fn slice_layout_is_correct<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - member_llvm_types: &[Type], - element_type: Ty<'tcx>) - -> bool { - member_llvm_types.len() == 2 && - member_llvm_types[0] == type_of::type_of(cx, element_type).ptr_to() && - member_llvm_types[1] == cx.int_type() - } -} - -fn subroutine_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - unique_type_id: UniqueTypeId, - signature: &ty::PolyFnSig<'tcx>, - span: Span) - -> MetadataCreationResult -{ - let signature = cx.tcx().erase_late_bound_regions(signature); - - let mut signature_metadata: Vec = Vec::with_capacity(signature.inputs.len() + 1); - - // return type - signature_metadata.push(match signature.output { - ty::FnConverging(ret_ty) => match ret_ty.sty { - ty::TyTuple(ref tys) if tys.is_empty() => ptr::null_mut(), - _ => type_metadata(cx, ret_ty, span) - }, - ty::FnDiverging => diverging_type_metadata(cx) - }); - - // regular arguments - for &argument_type in &signature.inputs { - signature_metadata.push(type_metadata(cx, argument_type, span)); - } - - return_if_metadata_created_in_meantime!(cx, unique_type_id); - - return MetadataCreationResult::new( - unsafe { - llvm::LLVMDIBuilderCreateSubroutineType( - DIB(cx), - NO_FILE_METADATA, - create_DIArray(DIB(cx), &signature_metadata[..])) - }, - false); -} - -// FIXME(1563) This is all a bit of a hack because 'trait pointer' is an ill- -// defined concept. For the case of an actual trait pointer (i.e., Box, -// &Trait), trait_object_type should be the whole thing (e.g, Box) and -// trait_type should be the actual trait (e.g., Trait). Where the trait is part -// of a DST struct, there is no trait_object_type and the results of this -// function will be a little bit weird. -fn trait_pointer_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - trait_type: Ty<'tcx>, - trait_object_type: Option>, - unique_type_id: UniqueTypeId) - -> DIType { - // The implementation provided here is a stub. It makes sure that the trait - // type is assigned the correct name, size, namespace, and source location. - // But it does not describe the trait's methods. - - let def_id = match trait_type.sty { - ty::TyTrait(ref data) => data.principal_def_id(), - _ => { - cx.sess().bug(&format!("debuginfo: Unexpected trait-object type in \ - trait_pointer_metadata(): {:?}", - trait_type)); - } - }; - - let trait_object_type = trait_object_type.unwrap_or(trait_type); - let trait_type_name = - compute_debuginfo_type_name(cx, trait_object_type, false); - - let (containing_scope, _) = get_namespace_and_span_for_item(cx, def_id); - - let trait_llvm_type = type_of::type_of(cx, trait_object_type); - - composite_type_metadata(cx, - trait_llvm_type, - &trait_type_name[..], - unique_type_id, - &[], - containing_scope, - NO_FILE_METADATA, - codemap::DUMMY_SP) -} - -pub fn type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>, - usage_site_span: Span) - -> DIType { - // Get the unique type id of this type. - let unique_type_id = { - let mut type_map = debug_context(cx).type_map.borrow_mut(); - // First, try to find the type in TypeMap. If we have seen it before, we - // can exit early here. - match type_map.find_metadata_for_type(t) { - Some(metadata) => { - return metadata; - }, - None => { - // The Ty is not in the TypeMap but maybe we have already seen - // an equivalent type (e.g. only differing in region arguments). - // In order to find out, generate the unique type id and look - // that up. - let unique_type_id = type_map.get_unique_type_id_of_type(cx, t); - match type_map.find_metadata_for_unique_id(unique_type_id) { - Some(metadata) => { - // There is already an equivalent type in the TypeMap. - // Register this Ty as an alias in the cache and - // return the cached metadata. - type_map.register_type_with_metadata(cx, t, metadata); - return metadata; - }, - None => { - // There really is no type metadata for this type, so - // proceed by creating it. - unique_type_id - } - } - } - } - }; - - debug!("type_metadata: {:?}", t); - - let sty = &t.sty; - let MetadataCreationResult { metadata, already_stored_in_typemap } = match *sty { - ty::TyBool | - ty::TyChar | - ty::TyInt(_) | - ty::TyUint(_) | - ty::TyFloat(_) => { - MetadataCreationResult::new(basic_type_metadata(cx, t), false) - } - ty::TyTuple(ref elements) if elements.is_empty() => { - MetadataCreationResult::new(basic_type_metadata(cx, t), false) - } - ty::TyEnum(def, _) => { - prepare_enum_metadata(cx, - t, - def.did, - unique_type_id, - usage_site_span).finalize(cx) - } - ty::TyArray(typ, len) => { - fixed_vec_metadata(cx, unique_type_id, typ, Some(len as u64), usage_site_span) - } - ty::TySlice(typ) => { - fixed_vec_metadata(cx, unique_type_id, typ, None, usage_site_span) - } - ty::TyStr => { - fixed_vec_metadata(cx, unique_type_id, cx.tcx().types.i8, None, usage_site_span) - } - ty::TyTrait(..) => { - MetadataCreationResult::new( - trait_pointer_metadata(cx, t, None, unique_type_id), - false) - } - ty::TyBox(ty) | - ty::TyRawPtr(ty::TypeAndMut{ty, ..}) | - ty::TyRef(_, ty::TypeAndMut{ty, ..}) => { - match ty.sty { - ty::TySlice(typ) => { - vec_slice_metadata(cx, t, typ, unique_type_id, usage_site_span) - } - ty::TyStr => { - vec_slice_metadata(cx, t, cx.tcx().types.u8, unique_type_id, usage_site_span) - } - ty::TyTrait(..) => { - MetadataCreationResult::new( - trait_pointer_metadata(cx, ty, Some(t), unique_type_id), - false) - } - _ => { - let pointee_metadata = type_metadata(cx, ty, usage_site_span); - - match debug_context(cx).type_map - .borrow() - .find_metadata_for_unique_id(unique_type_id) { - Some(metadata) => return metadata, - None => { /* proceed normally */ } - }; - - MetadataCreationResult::new(pointer_type_metadata(cx, t, pointee_metadata), - false) - } - } - } - ty::TyBareFn(_, ref barefnty) => { - let fn_metadata = subroutine_type_metadata(cx, - unique_type_id, - &barefnty.sig, - usage_site_span).metadata; - match debug_context(cx).type_map - .borrow() - .find_metadata_for_unique_id(unique_type_id) { - Some(metadata) => return metadata, - None => { /* proceed normally */ } - }; - - // This is actually a function pointer, so wrap it in pointer DI - MetadataCreationResult::new(pointer_type_metadata(cx, t, fn_metadata), false) - - } - ty::TyClosure(_, ref substs) => { - prepare_tuple_metadata(cx, - t, - &substs.upvar_tys, - unique_type_id, - usage_site_span).finalize(cx) - } - ty::TyStruct(..) => { - prepare_struct_metadata(cx, - t, - unique_type_id, - usage_site_span).finalize(cx) - } - ty::TyTuple(ref elements) => { - prepare_tuple_metadata(cx, - t, - &elements[..], - unique_type_id, - usage_site_span).finalize(cx) - } - _ => { - cx.sess().bug(&format!("debuginfo: unexpected type in type_metadata: {:?}", - sty)) - } - }; - - { - let mut type_map = debug_context(cx).type_map.borrow_mut(); - - if already_stored_in_typemap { - // Also make sure that we already have a TypeMap entry for the unique type id. - let metadata_for_uid = match type_map.find_metadata_for_unique_id(unique_type_id) { - Some(metadata) => metadata, - None => { - let unique_type_id_str = - type_map.get_unique_type_id_as_string(unique_type_id); - let error_message = format!("Expected type metadata for unique \ - type id '{}' to already be in \ - the debuginfo::TypeMap but it \ - was not. (Ty = {})", - &unique_type_id_str[..], - t); - cx.sess().span_bug(usage_site_span, &error_message[..]); - } - }; - - match type_map.find_metadata_for_type(t) { - Some(metadata) => { - if metadata != metadata_for_uid { - let unique_type_id_str = - type_map.get_unique_type_id_as_string(unique_type_id); - let error_message = format!("Mismatch between Ty and \ - UniqueTypeId maps in \ - debuginfo::TypeMap. \ - UniqueTypeId={}, Ty={}", - &unique_type_id_str[..], - t); - cx.sess().span_bug(usage_site_span, &error_message[..]); - } - } - None => { - type_map.register_type_with_metadata(cx, t, metadata); - } - } - } else { - type_map.register_type_with_metadata(cx, t, metadata); - type_map.register_unique_id_with_metadata(cx, unique_type_id, metadata); - } - } - - metadata -} - -pub fn file_metadata(cx: &CrateContext, full_path: &str) -> DIFile { - // FIXME (#9639): This needs to handle non-utf8 paths - let work_dir = cx.sess().working_dir.to_str().unwrap(); - let file_name = - if full_path.starts_with(work_dir) { - &full_path[work_dir.len() + 1..full_path.len()] - } else { - full_path - }; - - file_metadata_(cx, full_path, file_name, &work_dir) -} - -pub fn unknown_file_metadata(cx: &CrateContext) -> DIFile { - // Regular filenames should not be empty, so we abuse an empty name as the - // key for the special unknown file metadata - file_metadata_(cx, "", "", "") - -} - -fn file_metadata_(cx: &CrateContext, key: &str, file_name: &str, work_dir: &str) -> DIFile { - match debug_context(cx).created_files.borrow().get(key) { - Some(file_metadata) => return *file_metadata, - None => () - } - - debug!("file_metadata: file_name: {}, work_dir: {}", file_name, work_dir); - - let file_name = CString::new(file_name).unwrap(); - let work_dir = CString::new(work_dir).unwrap(); - let file_metadata = unsafe { - llvm::LLVMDIBuilderCreateFile(DIB(cx), file_name.as_ptr(), - work_dir.as_ptr()) - }; - - let mut created_files = debug_context(cx).created_files.borrow_mut(); - created_files.insert(key.to_string(), file_metadata); - file_metadata -} - -/// Finds the scope metadata node for the given AST node. -pub fn scope_metadata(fcx: &FunctionContext, - node_id: ast::NodeId, - error_reporting_span: Span) - -> DIScope { - let scope_map = &fcx.debug_context - .get_ref(fcx.ccx, error_reporting_span) - .scope_map; - match scope_map.borrow().get(&node_id).cloned() { - Some(scope_metadata) => scope_metadata, - None => { - let node = fcx.ccx.tcx().map.get(node_id); - - fcx.ccx.sess().span_bug(error_reporting_span, - &format!("debuginfo: Could not find scope info for node {:?}", - node)); - } - } -} - -pub fn diverging_type_metadata(cx: &CrateContext) -> DIType { - unsafe { - llvm::LLVMDIBuilderCreateBasicType( - DIB(cx), - "!\0".as_ptr() as *const _, - bytes_to_bits(0), - bytes_to_bits(0), - DW_ATE_unsigned) - } -} - -fn basic_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>) -> DIType { - - debug!("basic_type_metadata: {:?}", t); - - let (name, encoding) = match t.sty { - ty::TyTuple(ref elements) if elements.is_empty() => - ("()", DW_ATE_unsigned), - ty::TyBool => ("bool", DW_ATE_boolean), - ty::TyChar => ("char", DW_ATE_unsigned_char), - ty::TyInt(int_ty) => { - (int_ty.ty_to_string(), DW_ATE_signed) - }, - ty::TyUint(uint_ty) => { - (uint_ty.ty_to_string(), DW_ATE_unsigned) - }, - ty::TyFloat(float_ty) => { - (float_ty.ty_to_string(), DW_ATE_float) - }, - _ => cx.sess().bug("debuginfo::basic_type_metadata - t is invalid type") - }; - - let llvm_type = type_of::type_of(cx, t); - let (size, align) = size_and_align_of(cx, llvm_type); - let name = CString::new(name).unwrap(); - let ty_metadata = unsafe { - llvm::LLVMDIBuilderCreateBasicType( - DIB(cx), - name.as_ptr(), - bytes_to_bits(size), - bytes_to_bits(align), - encoding) - }; - - return ty_metadata; -} - -fn pointer_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - pointer_type: Ty<'tcx>, - pointee_type_metadata: DIType) - -> DIType { - let pointer_llvm_type = type_of::type_of(cx, pointer_type); - let (pointer_size, pointer_align) = size_and_align_of(cx, pointer_llvm_type); - let name = compute_debuginfo_type_name(cx, pointer_type, false); - let name = CString::new(name).unwrap(); - let ptr_metadata = unsafe { - llvm::LLVMDIBuilderCreatePointerType( - DIB(cx), - pointee_type_metadata, - bytes_to_bits(pointer_size), - bytes_to_bits(pointer_align), - name.as_ptr()) - }; - return ptr_metadata; -} - -pub fn compile_unit_metadata(cx: &CrateContext) -> DIDescriptor { - let work_dir = &cx.sess().working_dir; - let compile_unit_name = match cx.sess().local_crate_source_file { - None => fallback_path(cx), - Some(ref abs_path) => { - if abs_path.is_relative() { - cx.sess().warn("debuginfo: Invalid path to crate's local root source file!"); - fallback_path(cx) - } else { - match abs_path.strip_prefix(work_dir) { - Ok(ref p) if p.is_relative() => { - if p.starts_with(Path::new("./")) { - path2cstr(p) - } else { - path2cstr(&Path::new(".").join(p)) - } - } - _ => fallback_path(cx) - } - } - } - }; - - debug!("compile_unit_metadata: {:?}", compile_unit_name); - let producer = format!("rustc version {}", - (option_env!("CFG_VERSION")).expect("CFG_VERSION")); - - let compile_unit_name = compile_unit_name.as_ptr(); - let work_dir = path2cstr(&work_dir); - let producer = CString::new(producer).unwrap(); - let flags = "\0"; - let split_name = "\0"; - return unsafe { - llvm::LLVMDIBuilderCreateCompileUnit( - debug_context(cx).builder, - DW_LANG_RUST, - compile_unit_name, - work_dir.as_ptr(), - producer.as_ptr(), - cx.sess().opts.optimize != config::OptLevel::No, - flags.as_ptr() as *const _, - 0, - split_name.as_ptr() as *const _) - }; - - fn fallback_path(cx: &CrateContext) -> CString { - CString::new(cx.link_meta().crate_name.clone()).unwrap() - } -} - -struct MetadataCreationResult { - metadata: DIType, - already_stored_in_typemap: bool -} - -impl MetadataCreationResult { - fn new(metadata: DIType, already_stored_in_typemap: bool) -> MetadataCreationResult { - MetadataCreationResult { - metadata: metadata, - already_stored_in_typemap: already_stored_in_typemap - } - } -} - -#[derive(Debug)] -enum MemberOffset { - FixedMemberOffset { bytes: usize }, - // For ComputedMemberOffset, the offset is read from the llvm type definition. - ComputedMemberOffset -} - -// Description of a type member, which can either be a regular field (as in -// structs or tuples) or an enum variant. -#[derive(Debug)] -struct MemberDescription { - name: String, - llvm_type: Type, - type_metadata: DIType, - offset: MemberOffset, - flags: c_uint -} - -// A factory for MemberDescriptions. It produces a list of member descriptions -// for some record-like type. MemberDescriptionFactories are used to defer the -// creation of type member descriptions in order to break cycles arising from -// recursive type definitions. -enum MemberDescriptionFactory<'tcx> { - StructMDF(StructMemberDescriptionFactory<'tcx>), - TupleMDF(TupleMemberDescriptionFactory<'tcx>), - EnumMDF(EnumMemberDescriptionFactory<'tcx>), - VariantMDF(VariantMemberDescriptionFactory<'tcx>) -} - -impl<'tcx> MemberDescriptionFactory<'tcx> { - fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) - -> Vec { - match *self { - StructMDF(ref this) => { - this.create_member_descriptions(cx) - } - TupleMDF(ref this) => { - this.create_member_descriptions(cx) - } - EnumMDF(ref this) => { - this.create_member_descriptions(cx) - } - VariantMDF(ref this) => { - this.create_member_descriptions(cx) - } - } - } -} - -//=----------------------------------------------------------------------------- -// Structs -//=----------------------------------------------------------------------------- - -// Creates MemberDescriptions for the fields of a struct -struct StructMemberDescriptionFactory<'tcx> { - variant: ty::VariantDef<'tcx>, - substs: &'tcx subst::Substs<'tcx>, - is_simd: bool, - span: Span, -} - -impl<'tcx> StructMemberDescriptionFactory<'tcx> { - fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) - -> Vec { - if let ty::VariantKind::Unit = self.variant.kind() { - return Vec::new(); - } - - let field_size = if self.is_simd { - let fty = monomorphize::field_ty(cx.tcx(), - self.substs, - &self.variant.fields[0]); - Some(machine::llsize_of_alloc( - cx, - type_of::type_of(cx, fty) - ) as usize) - } else { - None - }; - - self.variant.fields.iter().enumerate().map(|(i, f)| { - let name = if let ty::VariantKind::Tuple = self.variant.kind() { - format!("__{}", i) - } else { - f.name.to_string() - }; - let fty = monomorphize::field_ty(cx.tcx(), self.substs, f); - - let offset = if self.is_simd { - FixedMemberOffset { bytes: i * field_size.unwrap() } - } else { - ComputedMemberOffset - }; - - MemberDescription { - name: name, - llvm_type: type_of::type_of(cx, fty), - type_metadata: type_metadata(cx, fty, self.span), - offset: offset, - flags: FLAGS_NONE, - } - }).collect() - } -} - - -fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - struct_type: Ty<'tcx>, - unique_type_id: UniqueTypeId, - span: Span) - -> RecursiveTypeDescription<'tcx> { - let struct_name = compute_debuginfo_type_name(cx, struct_type, false); - let struct_llvm_type = type_of::in_memory_type_of(cx, struct_type); - - let (variant, substs) = match struct_type.sty { - ty::TyStruct(def, substs) => (def.struct_variant(), substs), - _ => cx.tcx().sess.bug("prepare_struct_metadata on a non-struct") - }; - - let (containing_scope, _) = get_namespace_and_span_for_item(cx, variant.did); - - let struct_metadata_stub = create_struct_stub(cx, - struct_llvm_type, - &struct_name, - unique_type_id, - containing_scope); - - create_and_register_recursive_type_forward_declaration( - cx, - struct_type, - unique_type_id, - struct_metadata_stub, - struct_llvm_type, - StructMDF(StructMemberDescriptionFactory { - variant: variant, - substs: substs, - is_simd: struct_type.is_simd(), - span: span, - }) - ) -} - - -//=----------------------------------------------------------------------------- -// Tuples -//=----------------------------------------------------------------------------- - -// Creates MemberDescriptions for the fields of a tuple -struct TupleMemberDescriptionFactory<'tcx> { - component_types: Vec>, - span: Span, -} - -impl<'tcx> TupleMemberDescriptionFactory<'tcx> { - fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) - -> Vec { - self.component_types - .iter() - .enumerate() - .map(|(i, &component_type)| { - MemberDescription { - name: format!("__{}", i), - llvm_type: type_of::type_of(cx, component_type), - type_metadata: type_metadata(cx, component_type, self.span), - offset: ComputedMemberOffset, - flags: FLAGS_NONE, - } - }).collect() - } -} - -fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - tuple_type: Ty<'tcx>, - component_types: &[Ty<'tcx>], - unique_type_id: UniqueTypeId, - span: Span) - -> RecursiveTypeDescription<'tcx> { - let tuple_name = compute_debuginfo_type_name(cx, tuple_type, false); - let tuple_llvm_type = type_of::type_of(cx, tuple_type); - - create_and_register_recursive_type_forward_declaration( - cx, - tuple_type, - unique_type_id, - create_struct_stub(cx, - tuple_llvm_type, - &tuple_name[..], - unique_type_id, - NO_SCOPE_METADATA), - tuple_llvm_type, - TupleMDF(TupleMemberDescriptionFactory { - component_types: component_types.to_vec(), - span: span, - }) - ) -} - - -//=----------------------------------------------------------------------------- -// Enums -//=----------------------------------------------------------------------------- - -// Describes the members of an enum value: An enum is described as a union of -// structs in DWARF. This MemberDescriptionFactory provides the description for -// the members of this union; so for every variant of the given enum, this -// factory will produce one MemberDescription (all with no name and a fixed -// offset of zero bytes). -struct EnumMemberDescriptionFactory<'tcx> { - enum_type: Ty<'tcx>, - type_rep: Rc>, - discriminant_type_metadata: Option, - containing_scope: DIScope, - file_metadata: DIFile, - span: Span, -} - -impl<'tcx> EnumMemberDescriptionFactory<'tcx> { - fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) - -> Vec { - let adt = &self.enum_type.ty_adt_def().unwrap(); - match *self.type_rep { - adt::General(_, ref struct_defs, _) => { - let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata - .expect("")); - struct_defs - .iter() - .enumerate() - .map(|(i, struct_def)| { - let (variant_type_metadata, - variant_llvm_type, - member_desc_factory) = - describe_enum_variant(cx, - self.enum_type, - struct_def, - &adt.variants[i], - discriminant_info, - self.containing_scope, - self.span); - - let member_descriptions = member_desc_factory - .create_member_descriptions(cx); - - set_members_of_composite_type(cx, - variant_type_metadata, - variant_llvm_type, - &member_descriptions); - MemberDescription { - name: "".to_string(), - llvm_type: variant_llvm_type, - type_metadata: variant_type_metadata, - offset: FixedMemberOffset { bytes: 0 }, - flags: FLAGS_NONE - } - }).collect() - }, - adt::Univariant(ref struct_def, _) => { - assert!(adt.variants.len() <= 1); - - if adt.variants.is_empty() { - vec![] - } else { - let (variant_type_metadata, - variant_llvm_type, - member_description_factory) = - describe_enum_variant(cx, - self.enum_type, - struct_def, - &adt.variants[0], - NoDiscriminant, - self.containing_scope, - self.span); - - let member_descriptions = - member_description_factory.create_member_descriptions(cx); - - set_members_of_composite_type(cx, - variant_type_metadata, - variant_llvm_type, - &member_descriptions[..]); - vec![ - MemberDescription { - name: "".to_string(), - llvm_type: variant_llvm_type, - type_metadata: variant_type_metadata, - offset: FixedMemberOffset { bytes: 0 }, - flags: FLAGS_NONE - } - ] - } - } - adt::RawNullablePointer { nndiscr: non_null_variant_index, nnty, .. } => { - // As far as debuginfo is concerned, the pointer this enum - // represents is still wrapped in a struct. This is to make the - // DWARF representation of enums uniform. - - // First create a description of the artificial wrapper struct: - let non_null_variant = &adt.variants[non_null_variant_index.0 as usize]; - let non_null_variant_name = non_null_variant.name.as_str(); - - // The llvm type and metadata of the pointer - let non_null_llvm_type = type_of::type_of(cx, nnty); - let non_null_type_metadata = type_metadata(cx, nnty, self.span); - - // The type of the artificial struct wrapping the pointer - let artificial_struct_llvm_type = Type::struct_(cx, - &[non_null_llvm_type], - false); - - // For the metadata of the wrapper struct, we need to create a - // MemberDescription of the struct's single field. - let sole_struct_member_description = MemberDescription { - name: match non_null_variant.kind() { - ty::VariantKind::Tuple => "__0".to_string(), - ty::VariantKind::Struct => { - non_null_variant.fields[0].name.to_string() - } - ty::VariantKind::Unit => unreachable!() - }, - llvm_type: non_null_llvm_type, - type_metadata: non_null_type_metadata, - offset: FixedMemberOffset { bytes: 0 }, - flags: FLAGS_NONE - }; - - let unique_type_id = debug_context(cx).type_map - .borrow_mut() - .get_unique_type_id_of_enum_variant( - cx, - self.enum_type, - &non_null_variant_name); - - // Now we can create the metadata of the artificial struct - let artificial_struct_metadata = - composite_type_metadata(cx, - artificial_struct_llvm_type, - &non_null_variant_name, - unique_type_id, - &[sole_struct_member_description], - self.containing_scope, - self.file_metadata, - codemap::DUMMY_SP); - - // Encode the information about the null variant in the union - // member's name. - let null_variant_index = (1 - non_null_variant_index.0) as usize; - let null_variant_name = adt.variants[null_variant_index].name; - let union_member_name = format!("RUST$ENCODED$ENUM${}${}", - 0, - null_variant_name); - - // Finally create the (singleton) list of descriptions of union - // members. - vec![ - MemberDescription { - name: union_member_name, - llvm_type: artificial_struct_llvm_type, - type_metadata: artificial_struct_metadata, - offset: FixedMemberOffset { bytes: 0 }, - flags: FLAGS_NONE - } - ] - }, - adt::StructWrappedNullablePointer { nonnull: ref struct_def, - nndiscr, - ref discrfield, ..} => { - // Create a description of the non-null variant - let (variant_type_metadata, variant_llvm_type, member_description_factory) = - describe_enum_variant(cx, - self.enum_type, - struct_def, - &adt.variants[nndiscr.0 as usize], - OptimizedDiscriminant, - self.containing_scope, - self.span); - - let variant_member_descriptions = - member_description_factory.create_member_descriptions(cx); - - set_members_of_composite_type(cx, - variant_type_metadata, - variant_llvm_type, - &variant_member_descriptions[..]); - - // Encode the information about the null variant in the union - // member's name. - let null_variant_index = (1 - nndiscr.0) as usize; - let null_variant_name = adt.variants[null_variant_index].name; - let discrfield = discrfield.iter() - .skip(1) - .map(|x| x.to_string()) - .collect::>().join("$"); - let union_member_name = format!("RUST$ENCODED$ENUM${}${}", - discrfield, - null_variant_name); - - // Create the (singleton) list of descriptions of union members. - vec![ - MemberDescription { - name: union_member_name, - llvm_type: variant_llvm_type, - type_metadata: variant_type_metadata, - offset: FixedMemberOffset { bytes: 0 }, - flags: FLAGS_NONE - } - ] - }, - adt::CEnum(..) => cx.sess().span_bug(self.span, "This should be unreachable.") - } - } -} - -// Creates MemberDescriptions for the fields of a single enum variant. -struct VariantMemberDescriptionFactory<'tcx> { - args: Vec<(String, Ty<'tcx>)>, - discriminant_type_metadata: Option, - span: Span, -} - -impl<'tcx> VariantMemberDescriptionFactory<'tcx> { - fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) - -> Vec { - self.args.iter().enumerate().map(|(i, &(ref name, ty))| { - MemberDescription { - name: name.to_string(), - llvm_type: type_of::type_of(cx, ty), - type_metadata: match self.discriminant_type_metadata { - Some(metadata) if i == 0 => metadata, - _ => type_metadata(cx, ty, self.span) - }, - offset: ComputedMemberOffset, - flags: FLAGS_NONE - } - }).collect() - } -} - -#[derive(Copy, Clone)] -enum EnumDiscriminantInfo { - RegularDiscriminant(DIType), - OptimizedDiscriminant, - NoDiscriminant -} - -// Returns a tuple of (1) type_metadata_stub of the variant, (2) the llvm_type -// of the variant, and (3) a MemberDescriptionFactory for producing the -// descriptions of the fields of the variant. This is a rudimentary version of a -// full RecursiveTypeDescription. -fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - enum_type: Ty<'tcx>, - struct_def: &adt::Struct<'tcx>, - variant: ty::VariantDef<'tcx>, - discriminant_info: EnumDiscriminantInfo, - containing_scope: DIScope, - span: Span) - -> (DICompositeType, Type, MemberDescriptionFactory<'tcx>) { - let variant_llvm_type = - Type::struct_(cx, &struct_def.fields - .iter() - .map(|&t| type_of::type_of(cx, t)) - .collect::>() - , - struct_def.packed); - // Could do some consistency checks here: size, align, field count, discr type - - let variant_name = variant.name.as_str(); - let unique_type_id = debug_context(cx).type_map - .borrow_mut() - .get_unique_type_id_of_enum_variant( - cx, - enum_type, - &variant_name); - - let metadata_stub = create_struct_stub(cx, - variant_llvm_type, - &variant_name, - unique_type_id, - containing_scope); - - // Get the argument names from the enum variant info - let mut arg_names: Vec<_> = match variant.kind() { - ty::VariantKind::Unit => vec![], - ty::VariantKind::Tuple => { - variant.fields - .iter() - .enumerate() - .map(|(i, _)| format!("__{}", i)) - .collect() - } - ty::VariantKind::Struct => { - variant.fields - .iter() - .map(|f| f.name.to_string()) - .collect() - } - }; - - // If this is not a univariant enum, there is also the discriminant field. - match discriminant_info { - RegularDiscriminant(_) => arg_names.insert(0, "RUST$ENUM$DISR".to_string()), - _ => { /* do nothing */ } - }; - - // Build an array of (field name, field type) pairs to be captured in the factory closure. - let args: Vec<(String, Ty)> = arg_names.iter() - .zip(&struct_def.fields) - .map(|(s, &t)| (s.to_string(), t)) - .collect(); - - let member_description_factory = - VariantMDF(VariantMemberDescriptionFactory { - args: args, - discriminant_type_metadata: match discriminant_info { - RegularDiscriminant(discriminant_type_metadata) => { - Some(discriminant_type_metadata) - } - _ => None - }, - span: span, - }); - - (metadata_stub, variant_llvm_type, member_description_factory) -} - -fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - enum_type: Ty<'tcx>, - enum_def_id: DefId, - unique_type_id: UniqueTypeId, - span: Span) - -> RecursiveTypeDescription<'tcx> { - let enum_name = compute_debuginfo_type_name(cx, enum_type, false); - - let (containing_scope, _) = get_namespace_and_span_for_item(cx, enum_def_id); - // FIXME: This should emit actual file metadata for the enum, but we - // currently can't get the necessary information when it comes to types - // imported from other crates. Formerly we violated the ODR when performing - // LTO because we emitted debuginfo for the same type with varying file - // metadata, so as a workaround we pretend that the type comes from - // - let file_metadata = unknown_file_metadata(cx); - - let variants = &enum_type.ty_adt_def().unwrap().variants; - - let enumerators_metadata: Vec = variants - .iter() - .map(|v| { - let token = v.name.as_str(); - let name = CString::new(token.as_bytes()).unwrap(); - unsafe { - llvm::LLVMDIBuilderCreateEnumerator( - DIB(cx), - name.as_ptr(), - v.disr_val as u64) - } - }) - .collect(); - - let discriminant_type_metadata = |inttype: syntax::attr::IntType| { - let disr_type_key = (enum_def_id, inttype); - let cached_discriminant_type_metadata = debug_context(cx).created_enum_disr_types - .borrow() - .get(&disr_type_key).cloned(); - match cached_discriminant_type_metadata { - Some(discriminant_type_metadata) => discriminant_type_metadata, - None => { - let discriminant_llvm_type = adt::ll_inttype(cx, inttype); - let (discriminant_size, discriminant_align) = - size_and_align_of(cx, discriminant_llvm_type); - let discriminant_base_type_metadata = - type_metadata(cx, - adt::ty_of_inttype(cx.tcx(), inttype), - codemap::DUMMY_SP); - let discriminant_name = get_enum_discriminant_name(cx, enum_def_id); - - let name = CString::new(discriminant_name.as_bytes()).unwrap(); - let discriminant_type_metadata = unsafe { - llvm::LLVMDIBuilderCreateEnumerationType( - DIB(cx), - containing_scope, - name.as_ptr(), - NO_FILE_METADATA, - UNKNOWN_LINE_NUMBER, - bytes_to_bits(discriminant_size), - bytes_to_bits(discriminant_align), - create_DIArray(DIB(cx), &enumerators_metadata), - discriminant_base_type_metadata) - }; - - debug_context(cx).created_enum_disr_types - .borrow_mut() - .insert(disr_type_key, discriminant_type_metadata); - - discriminant_type_metadata - } - } - }; - - let type_rep = adt::represent_type(cx, enum_type); - - let discriminant_type_metadata = match *type_rep { - adt::CEnum(inttype, _, _) => { - return FinalMetadata(discriminant_type_metadata(inttype)) - }, - adt::RawNullablePointer { .. } | - adt::StructWrappedNullablePointer { .. } | - adt::Univariant(..) => None, - adt::General(inttype, _, _) => Some(discriminant_type_metadata(inttype)), - }; - - let enum_llvm_type = type_of::type_of(cx, enum_type); - let (enum_type_size, enum_type_align) = size_and_align_of(cx, enum_llvm_type); - - let unique_type_id_str = debug_context(cx) - .type_map - .borrow() - .get_unique_type_id_as_string(unique_type_id); - - let enum_name = CString::new(enum_name).unwrap(); - let unique_type_id_str = CString::new(unique_type_id_str.as_bytes()).unwrap(); - let enum_metadata = unsafe { - llvm::LLVMDIBuilderCreateUnionType( - DIB(cx), - containing_scope, - enum_name.as_ptr(), - file_metadata, - UNKNOWN_LINE_NUMBER, - bytes_to_bits(enum_type_size), - bytes_to_bits(enum_type_align), - 0, // Flags - ptr::null_mut(), - 0, // RuntimeLang - unique_type_id_str.as_ptr()) - }; - - return create_and_register_recursive_type_forward_declaration( - cx, - enum_type, - unique_type_id, - enum_metadata, - enum_llvm_type, - EnumMDF(EnumMemberDescriptionFactory { - enum_type: enum_type, - type_rep: type_rep.clone(), - discriminant_type_metadata: discriminant_type_metadata, - containing_scope: containing_scope, - file_metadata: file_metadata, - span: span, - }), - ); - - fn get_enum_discriminant_name(cx: &CrateContext, - def_id: DefId) - -> token::InternedString { - cx.tcx().item_name(def_id).as_str() - } -} - -/// Creates debug information for a composite type, that is, anything that -/// results in a LLVM struct. -/// -/// Examples of Rust types to use this are: structs, tuples, boxes, vecs, and enums. -fn composite_type_metadata(cx: &CrateContext, - composite_llvm_type: Type, - composite_type_name: &str, - composite_type_unique_id: UniqueTypeId, - member_descriptions: &[MemberDescription], - containing_scope: DIScope, - - // Ignore source location information as long as it - // can't be reconstructed for non-local crates. - _file_metadata: DIFile, - _definition_span: Span) - -> DICompositeType { - // Create the (empty) struct metadata node ... - let composite_type_metadata = create_struct_stub(cx, - composite_llvm_type, - composite_type_name, - composite_type_unique_id, - containing_scope); - // ... and immediately create and add the member descriptions. - set_members_of_composite_type(cx, - composite_type_metadata, - composite_llvm_type, - member_descriptions); - - return composite_type_metadata; -} - -fn set_members_of_composite_type(cx: &CrateContext, - composite_type_metadata: DICompositeType, - composite_llvm_type: Type, - member_descriptions: &[MemberDescription]) { - // In some rare cases LLVM metadata uniquing would lead to an existing type - // description being used instead of a new one created in - // create_struct_stub. This would cause a hard to trace assertion in - // DICompositeType::SetTypeArray(). The following check makes sure that we - // get a better error message if this should happen again due to some - // regression. - { - let mut composite_types_completed = - debug_context(cx).composite_types_completed.borrow_mut(); - if composite_types_completed.contains(&composite_type_metadata) { - cx.sess().bug("debuginfo::set_members_of_composite_type() - \ - Already completed forward declaration re-encountered."); - } else { - composite_types_completed.insert(composite_type_metadata); - } - } - - let member_metadata: Vec = member_descriptions - .iter() - .enumerate() - .map(|(i, member_description)| { - let (member_size, member_align) = size_and_align_of(cx, member_description.llvm_type); - let member_offset = match member_description.offset { - FixedMemberOffset { bytes } => bytes as u64, - ComputedMemberOffset => machine::llelement_offset(cx, composite_llvm_type, i) - }; - - let member_name = member_description.name.as_bytes(); - let member_name = CString::new(member_name).unwrap(); - unsafe { - llvm::LLVMDIBuilderCreateMemberType( - DIB(cx), - composite_type_metadata, - member_name.as_ptr(), - NO_FILE_METADATA, - UNKNOWN_LINE_NUMBER, - bytes_to_bits(member_size), - bytes_to_bits(member_align), - bytes_to_bits(member_offset), - member_description.flags, - member_description.type_metadata) - } - }) - .collect(); - - unsafe { - let type_array = create_DIArray(DIB(cx), &member_metadata[..]); - llvm::LLVMDICompositeTypeSetTypeArray(DIB(cx), composite_type_metadata, type_array); - } -} - -// A convenience wrapper around LLVMDIBuilderCreateStructType(). Does not do any -// caching, does not add any fields to the struct. This can be done later with -// set_members_of_composite_type(). -fn create_struct_stub(cx: &CrateContext, - struct_llvm_type: Type, - struct_type_name: &str, - unique_type_id: UniqueTypeId, - containing_scope: DIScope) - -> DICompositeType { - let (struct_size, struct_align) = size_and_align_of(cx, struct_llvm_type); - - let unique_type_id_str = debug_context(cx).type_map - .borrow() - .get_unique_type_id_as_string(unique_type_id); - let name = CString::new(struct_type_name).unwrap(); - let unique_type_id = CString::new(unique_type_id_str.as_bytes()).unwrap(); - let metadata_stub = unsafe { - // LLVMDIBuilderCreateStructType() wants an empty array. A null - // pointer will lead to hard to trace and debug LLVM assertions - // later on in llvm/lib/IR/Value.cpp. - let empty_array = create_DIArray(DIB(cx), &[]); - - llvm::LLVMDIBuilderCreateStructType( - DIB(cx), - containing_scope, - name.as_ptr(), - NO_FILE_METADATA, - UNKNOWN_LINE_NUMBER, - bytes_to_bits(struct_size), - bytes_to_bits(struct_align), - 0, - ptr::null_mut(), - empty_array, - 0, - ptr::null_mut(), - unique_type_id.as_ptr()) - }; - - return metadata_stub; -} - -/// Creates debug information for the given global variable. -/// -/// Adds the created metadata nodes directly to the crate's IR. -pub fn create_global_var_metadata(cx: &CrateContext, - node_id: ast::NodeId, - global: ValueRef) { - if cx.dbg_cx().is_none() { - return; - } - - // Don't create debuginfo for globals inlined from other crates. The other - // crate should already contain debuginfo for it. More importantly, the - // global might not even exist in un-inlined form anywhere which would lead - // to a linker errors. - if cx.external_srcs().borrow().contains_key(&node_id) { - return; - } - - let var_item = cx.tcx().map.get(node_id); - - let (name, span) = match var_item { - hir_map::NodeItem(item) => { - match item.node { - hir::ItemStatic(..) => (item.name, item.span), - hir::ItemConst(..) => (item.name, item.span), - _ => { - cx.sess() - .span_bug(item.span, - &format!("debuginfo::\ - create_global_var_metadata() - - Captured var-id refers to \ - unexpected ast_item variant: {:?}", - var_item)) - } - } - }, - _ => cx.sess().bug(&format!("debuginfo::create_global_var_metadata() \ - - Captured var-id refers to unexpected \ - hir_map variant: {:?}", - var_item)) - }; - - let (file_metadata, line_number) = if span != codemap::DUMMY_SP { - let loc = span_start(cx, span); - (file_metadata(cx, &loc.file.name), loc.line as c_uint) - } else { - (NO_FILE_METADATA, UNKNOWN_LINE_NUMBER) - }; - - let is_local_to_unit = is_node_local_to_unit(cx, node_id); - let variable_type = cx.tcx().node_id_to_type(node_id); - let type_metadata = type_metadata(cx, variable_type, span); - let node_def_id = cx.tcx().map.local_def_id(node_id); - let namespace_node = namespace_for_item(cx, node_def_id); - let var_name = name.to_string(); - let linkage_name = - namespace_node.mangled_name_of_contained_item(&var_name[..]); - let var_scope = namespace_node.scope; - - let var_name = CString::new(var_name).unwrap(); - let linkage_name = CString::new(linkage_name).unwrap(); - unsafe { - llvm::LLVMDIBuilderCreateStaticVariable(DIB(cx), - var_scope, - var_name.as_ptr(), - linkage_name.as_ptr(), - file_metadata, - line_number, - type_metadata, - is_local_to_unit, - global, - ptr::null_mut()); - } -} - -/// Creates debug information for the given local variable. -/// -/// This function assumes that there's a datum for each pattern component of the -/// local in `bcx.fcx.lllocals`. -/// Adds the created metadata nodes directly to the crate's IR. -pub fn create_local_var_metadata(bcx: Block, local: &hir::Local) { - if bcx.unreachable.get() || - fn_should_be_ignored(bcx.fcx) || - bcx.sess().opts.debuginfo != FullDebugInfo { - return; - } - - let cx = bcx.ccx(); - let def_map = &cx.tcx().def_map; - let locals = bcx.fcx.lllocals.borrow(); - - pat_util::pat_bindings(def_map, &*local.pat, |_, node_id, span, var_name| { - let datum = match locals.get(&node_id) { - Some(datum) => datum, - None => { - bcx.sess().span_bug(span, - &format!("no entry in lllocals table for {}", - node_id)); - } - }; - - if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() { - cx.sess().span_bug(span, "debuginfo::create_local_var_metadata() - \ - Referenced variable location is not an alloca!"); - } - - let scope_metadata = scope_metadata(bcx.fcx, node_id, span); - - declare_local(bcx, - var_name.node, - datum.ty, - scope_metadata, - VariableAccess::DirectVariable { alloca: datum.val }, - VariableKind::LocalVariable, - span); - }) -} - -/// Creates debug information for a variable captured in a closure. -/// -/// Adds the created metadata nodes directly to the crate's IR. -pub fn create_captured_var_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - node_id: ast::NodeId, - env_pointer: ValueRef, - env_index: usize, - captured_by_ref: bool, - span: Span) { - if bcx.unreachable.get() || - fn_should_be_ignored(bcx.fcx) || - bcx.sess().opts.debuginfo != FullDebugInfo { - return; - } - - let cx = bcx.ccx(); - - let ast_item = cx.tcx().map.find(node_id); - - let variable_name = match ast_item { - None => { - cx.sess().span_bug(span, "debuginfo::create_captured_var_metadata: node not found"); - } - Some(hir_map::NodeLocal(pat)) => { - match pat.node { - hir::PatIdent(_, ref path1, _) => { - path1.node.name - } - _ => { - cx.sess() - .span_bug(span, - &format!( - "debuginfo::create_captured_var_metadata() - \ - Captured var-id refers to unexpected \ - hir_map variant: {:?}", - ast_item)); - } - } - } - _ => { - cx.sess() - .span_bug(span, - &format!("debuginfo::create_captured_var_metadata() - \ - Captured var-id refers to unexpected \ - hir_map variant: {:?}", - ast_item)); - } - }; - - let variable_type = common::node_id_type(bcx, node_id); - let scope_metadata = bcx.fcx.debug_context.get_ref(cx, span).fn_metadata; - - // env_pointer is the alloca containing the pointer to the environment, - // so it's type is **EnvironmentType. In order to find out the type of - // the environment we have to "dereference" two times. - let llvm_env_data_type = common::val_ty(env_pointer).element_type() - .element_type(); - let byte_offset_of_var_in_env = machine::llelement_offset(cx, - llvm_env_data_type, - env_index); - - let address_operations = unsafe { - [llvm::LLVMDIBuilderCreateOpDeref(), - llvm::LLVMDIBuilderCreateOpPlus(), - byte_offset_of_var_in_env as i64, - llvm::LLVMDIBuilderCreateOpDeref()] - }; - - let address_op_count = if captured_by_ref { - address_operations.len() - } else { - address_operations.len() - 1 - }; - - let variable_access = VariableAccess::IndirectVariable { - alloca: env_pointer, - address_operations: &address_operations[..address_op_count] - }; - - declare_local(bcx, - variable_name, - variable_type, - scope_metadata, - variable_access, - VariableKind::CapturedVariable, - span); -} - -/// Creates debug information for a local variable introduced in the head of a -/// match-statement arm. -/// -/// Adds the created metadata nodes directly to the crate's IR. -pub fn create_match_binding_metadata<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - variable_name: ast::Name, - binding: BindingInfo<'tcx>) { - if bcx.unreachable.get() || - fn_should_be_ignored(bcx.fcx) || - bcx.sess().opts.debuginfo != FullDebugInfo { - return; - } - - let scope_metadata = scope_metadata(bcx.fcx, binding.id, binding.span); - let aops = unsafe { - [llvm::LLVMDIBuilderCreateOpDeref()] - }; - // Regardless of the actual type (`T`) we're always passed the stack slot - // (alloca) for the binding. For ByRef bindings that's a `T*` but for ByMove - // bindings we actually have `T**`. So to get the actual variable we need to - // dereference once more. For ByCopy we just use the stack slot we created - // for the binding. - let var_access = match binding.trmode { - TransBindingMode::TrByCopy(llbinding) | - TransBindingMode::TrByMoveIntoCopy(llbinding) => VariableAccess::DirectVariable { - alloca: llbinding - }, - TransBindingMode::TrByMoveRef => VariableAccess::IndirectVariable { - alloca: binding.llmatch, - address_operations: &aops - }, - TransBindingMode::TrByRef => VariableAccess::DirectVariable { - alloca: binding.llmatch - } - }; - - declare_local(bcx, - variable_name, - binding.ty, - scope_metadata, - var_access, - VariableKind::LocalVariable, - binding.span); -} - -/// Creates debug information for the given function argument. -/// -/// This function assumes that there's a datum for each pattern component of the -/// argument in `bcx.fcx.lllocals`. -/// Adds the created metadata nodes directly to the crate's IR. -pub fn create_argument_metadata(bcx: Block, arg: &hir::Arg) { - if bcx.unreachable.get() || - fn_should_be_ignored(bcx.fcx) || - bcx.sess().opts.debuginfo != FullDebugInfo { - return; - } - - let def_map = &bcx.tcx().def_map; - let scope_metadata = bcx - .fcx - .debug_context - .get_ref(bcx.ccx(), arg.pat.span) - .fn_metadata; - let locals = bcx.fcx.lllocals.borrow(); - - pat_util::pat_bindings(def_map, &*arg.pat, |_, node_id, span, var_name| { - let datum = match locals.get(&node_id) { - Some(v) => v, - None => { - bcx.sess().span_bug(span, - &format!("no entry in lllocals table for {}", - node_id)); - } - }; - - if unsafe { llvm::LLVMIsAAllocaInst(datum.val) } == ptr::null_mut() { - bcx.sess().span_bug(span, "debuginfo::create_argument_metadata() - \ - Referenced variable location is not an alloca!"); - } - - let argument_index = { - let counter = &bcx - .fcx - .debug_context - .get_ref(bcx.ccx(), span) - .argument_counter; - let argument_index = counter.get(); - counter.set(argument_index + 1); - argument_index - }; - - declare_local(bcx, - var_name.node, - datum.ty, - scope_metadata, - VariableAccess::DirectVariable { alloca: datum.val }, - VariableKind::ArgumentVariable(argument_index), - span); - }) -} diff --git a/src/librustc_trans/trans/debuginfo/mod.rs b/src/librustc_trans/trans/debuginfo/mod.rs deleted file mode 100644 index 5e11a50be2273..0000000000000 --- a/src/librustc_trans/trans/debuginfo/mod.rs +++ /dev/null @@ -1,679 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// See doc.rs for documentation. -mod doc; - -use self::VariableAccess::*; -use self::VariableKind::*; - -use self::utils::{DIB, span_start, assert_type_for_node_id, contains_nodebug_attribute, - create_DIArray, is_node_local_to_unit}; -use self::namespace::{namespace_for_item, NamespaceTreeNode}; -use self::type_names::compute_debuginfo_type_name; -use self::metadata::{type_metadata, diverging_type_metadata}; -use self::metadata::{file_metadata, scope_metadata, TypeMap, compile_unit_metadata}; -use self::source_loc::InternalDebugLocation; - -use llvm; -use llvm::{ModuleRef, ContextRef, ValueRef}; -use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilderRef, DISubprogram, DIArray, - DIDescriptor, FlagPrototyped}; -use middle::def_id::DefId; -use middle::infer::normalize_associated_type; -use middle::subst::{self, Substs}; -use rustc_front; -use rustc_front::hir; - -use trans::common::{NodeIdAndSpan, CrateContext, FunctionContext, Block}; -use trans; -use trans::{monomorphize, type_of}; -use middle::infer; -use middle::ty::{self, Ty}; -use session::config::{self, FullDebugInfo, LimitedDebugInfo, NoDebugInfo}; -use util::nodemap::{NodeMap, FnvHashMap, FnvHashSet}; -use rustc::front::map as hir_map; - -use libc::c_uint; -use std::cell::{Cell, RefCell}; -use std::ffi::CString; -use std::ptr; -use std::rc::Rc; - -use syntax::codemap::{Span, Pos}; -use syntax::{abi, ast, codemap}; -use syntax::attr::IntType; -use syntax::parse::token::{self, special_idents}; - -pub mod gdb; -mod utils; -mod namespace; -mod type_names; -mod metadata; -mod create_scope_map; -mod source_loc; - -pub use self::source_loc::set_source_location; -pub use self::source_loc::clear_source_location; -pub use self::source_loc::start_emitting_source_locations; -pub use self::source_loc::get_cleanup_debug_loc_for_ast_node; -pub use self::source_loc::with_source_location_override; -pub use self::metadata::create_match_binding_metadata; -pub use self::metadata::create_argument_metadata; -pub use self::metadata::create_captured_var_metadata; -pub use self::metadata::create_global_var_metadata; -pub use self::metadata::create_local_var_metadata; - -#[allow(non_upper_case_globals)] -const DW_TAG_auto_variable: c_uint = 0x100; -#[allow(non_upper_case_globals)] -const DW_TAG_arg_variable: c_uint = 0x101; - -/// A context object for maintaining all state needed by the debuginfo module. -pub struct CrateDebugContext<'tcx> { - llcontext: ContextRef, - builder: DIBuilderRef, - current_debug_location: Cell, - created_files: RefCell>, - created_enum_disr_types: RefCell>, - - type_map: RefCell>, - namespace_map: RefCell, Rc>>, - - // This collection is used to assert that composite types (structs, enums, - // ...) have their members only set once: - composite_types_completed: RefCell>, -} - -impl<'tcx> CrateDebugContext<'tcx> { - pub fn new(llmod: ModuleRef) -> CrateDebugContext<'tcx> { - debug!("CrateDebugContext::new"); - let builder = unsafe { llvm::LLVMDIBuilderCreate(llmod) }; - // DIBuilder inherits context from the module, so we'd better use the same one - let llcontext = unsafe { llvm::LLVMGetModuleContext(llmod) }; - return CrateDebugContext { - llcontext: llcontext, - builder: builder, - current_debug_location: Cell::new(InternalDebugLocation::UnknownLocation), - created_files: RefCell::new(FnvHashMap()), - created_enum_disr_types: RefCell::new(FnvHashMap()), - type_map: RefCell::new(TypeMap::new()), - namespace_map: RefCell::new(FnvHashMap()), - composite_types_completed: RefCell::new(FnvHashSet()), - }; - } -} - -pub enum FunctionDebugContext { - RegularContext(Box), - DebugInfoDisabled, - FunctionWithoutDebugInfo, -} - -impl FunctionDebugContext { - fn get_ref<'a>(&'a self, - cx: &CrateContext, - span: Span) - -> &'a FunctionDebugContextData { - match *self { - FunctionDebugContext::RegularContext(box ref data) => data, - FunctionDebugContext::DebugInfoDisabled => { - cx.sess().span_bug(span, - FunctionDebugContext::debuginfo_disabled_message()); - } - FunctionDebugContext::FunctionWithoutDebugInfo => { - cx.sess().span_bug(span, - FunctionDebugContext::should_be_ignored_message()); - } - } - } - - fn debuginfo_disabled_message() -> &'static str { - "debuginfo: Error trying to access FunctionDebugContext although debug info is disabled!" - } - - fn should_be_ignored_message() -> &'static str { - "debuginfo: Error trying to access FunctionDebugContext for function that should be \ - ignored by debug info!" - } -} - -pub struct FunctionDebugContextData { - scope_map: RefCell>, - fn_metadata: DISubprogram, - argument_counter: Cell, - source_locations_enabled: Cell, - source_location_override: Cell, -} - -pub enum VariableAccess<'a> { - // The llptr given is an alloca containing the variable's value - DirectVariable { alloca: ValueRef }, - // The llptr given is an alloca containing the start of some pointer chain - // leading to the variable's content. - IndirectVariable { alloca: ValueRef, address_operations: &'a [i64] } -} - -pub enum VariableKind { - ArgumentVariable(usize /*index*/), - LocalVariable, - CapturedVariable, -} - -/// Create any deferred debug metadata nodes -pub fn finalize(cx: &CrateContext) { - if cx.dbg_cx().is_none() { - return; - } - - debug!("finalize"); - let _ = compile_unit_metadata(cx); - - if gdb::needs_gdb_debug_scripts_section(cx) { - // Add a .debug_gdb_scripts section to this compile-unit. This will - // cause GDB to try and load the gdb_load_rust_pretty_printers.py file, - // which activates the Rust pretty printers for binary this section is - // contained in. - gdb::get_or_insert_gdb_debug_scripts_section_global(cx); - } - - unsafe { - llvm::LLVMDIBuilderFinalize(DIB(cx)); - llvm::LLVMDIBuilderDispose(DIB(cx)); - // Debuginfo generation in LLVM by default uses a higher - // version of dwarf than OS X currently understands. We can - // instruct LLVM to emit an older version of dwarf, however, - // for OS X to understand. For more info see #11352 - // This can be overridden using --llvm-opts -dwarf-version,N. - // Android has the same issue (#22398) - if cx.sess().target.target.options.is_like_osx || - cx.sess().target.target.options.is_like_android { - llvm::LLVMRustAddModuleFlag(cx.llmod(), - "Dwarf Version\0".as_ptr() as *const _, - 2) - } - - // Prevent bitcode readers from deleting the debug info. - let ptr = "Debug Info Version\0".as_ptr(); - llvm::LLVMRustAddModuleFlag(cx.llmod(), ptr as *const _, - llvm::LLVMRustDebugMetadataVersion()); - }; -} - -/// Creates the function-specific debug context. -/// -/// Returns the FunctionDebugContext for the function which holds state needed -/// for debug info creation. The function may also return another variant of the -/// FunctionDebugContext enum which indicates why no debuginfo should be created -/// for the function. -pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - fn_ast_id: ast::NodeId, - param_substs: &Substs<'tcx>, - llfn: ValueRef) -> FunctionDebugContext { - if cx.sess().opts.debuginfo == NoDebugInfo { - return FunctionDebugContext::DebugInfoDisabled; - } - - // Clear the debug location so we don't assign them in the function prelude. - // Do this here already, in case we do an early exit from this function. - source_loc::set_debug_location(cx, InternalDebugLocation::UnknownLocation); - - if fn_ast_id == ast::DUMMY_NODE_ID { - // This is a function not linked to any source location, so don't - // generate debuginfo for it. - return FunctionDebugContext::FunctionWithoutDebugInfo; - } - - let empty_generics = rustc_front::util::empty_generics(); - - let fnitem = cx.tcx().map.get(fn_ast_id); - - let (name, fn_decl, generics, top_level_block, span, has_path) = match fnitem { - hir_map::NodeItem(ref item) => { - if contains_nodebug_attribute(&item.attrs) { - return FunctionDebugContext::FunctionWithoutDebugInfo; - } - - match item.node { - hir::ItemFn(ref fn_decl, _, _, _, ref generics, ref top_level_block) => { - (item.name, fn_decl, generics, top_level_block, item.span, true) - } - _ => { - cx.sess().span_bug(item.span, - "create_function_debug_context: item bound to non-function"); - } - } - } - hir_map::NodeImplItem(impl_item) => { - match impl_item.node { - hir::ImplItemKind::Method(ref sig, ref body) => { - if contains_nodebug_attribute(&impl_item.attrs) { - return FunctionDebugContext::FunctionWithoutDebugInfo; - } - - (impl_item.name, - &sig.decl, - &sig.generics, - body, - impl_item.span, - true) - } - _ => { - cx.sess().span_bug(impl_item.span, - "create_function_debug_context() \ - called on non-method impl item?!") - } - } - } - hir_map::NodeExpr(ref expr) => { - match expr.node { - hir::ExprClosure(_, ref fn_decl, ref top_level_block) => { - let name = format!("fn{}", token::gensym("fn")); - let name = token::intern(&name[..]); - (name, fn_decl, - // This is not quite right. It should actually inherit - // the generics of the enclosing function. - &empty_generics, - top_level_block, - expr.span, - // Don't try to lookup the item path: - false) - } - _ => cx.sess().span_bug(expr.span, - "create_function_debug_context: expected an expr_fn_block here") - } - } - hir_map::NodeTraitItem(trait_item) => { - match trait_item.node { - hir::MethodTraitItem(ref sig, Some(ref body)) => { - if contains_nodebug_attribute(&trait_item.attrs) { - return FunctionDebugContext::FunctionWithoutDebugInfo; - } - - (trait_item.name, - &sig.decl, - &sig.generics, - body, - trait_item.span, - true) - } - _ => { - cx.sess() - .bug(&format!("create_function_debug_context: \ - unexpected sort of node: {:?}", - fnitem)) - } - } - } - hir_map::NodeForeignItem(..) | - hir_map::NodeVariant(..) | - hir_map::NodeStructCtor(..) => { - return FunctionDebugContext::FunctionWithoutDebugInfo; - } - _ => cx.sess().bug(&format!("create_function_debug_context: \ - unexpected sort of node: {:?}", - fnitem)) - }; - - // This can be the case for functions inlined from another crate - if span == codemap::DUMMY_SP { - return FunctionDebugContext::FunctionWithoutDebugInfo; - } - - let loc = span_start(cx, span); - let file_metadata = file_metadata(cx, &loc.file.name); - - let function_type_metadata = unsafe { - let fn_signature = get_function_signature(cx, - fn_ast_id, - param_substs, - span); - llvm::LLVMDIBuilderCreateSubroutineType(DIB(cx), file_metadata, fn_signature) - }; - - // Get_template_parameters() will append a `<...>` clause to the function - // name if necessary. - let mut function_name = name.to_string(); - let template_parameters = get_template_parameters(cx, - generics, - param_substs, - file_metadata, - &mut function_name); - - // There is no hir_map::Path for hir::ExprClosure-type functions. For now, - // just don't put them into a namespace. In the future this could be improved - // somehow (storing a path in the hir_map, or construct a path using the - // enclosing function). - let (linkage_name, containing_scope) = if has_path { - let fn_ast_def_id = cx.tcx().map.local_def_id(fn_ast_id); - let namespace_node = namespace_for_item(cx, fn_ast_def_id); - let linkage_name = namespace_node.mangled_name_of_contained_item( - &function_name[..]); - let containing_scope = namespace_node.scope; - (linkage_name, containing_scope) - } else { - (function_name.clone(), file_metadata) - }; - - // Clang sets this parameter to the opening brace of the function's block, - // so let's do this too. - let scope_line = span_start(cx, top_level_block.span).line; - - let is_local_to_unit = is_node_local_to_unit(cx, fn_ast_id); - - let function_name = CString::new(function_name).unwrap(); - let linkage_name = CString::new(linkage_name).unwrap(); - let fn_metadata = unsafe { - llvm::LLVMDIBuilderCreateFunction( - DIB(cx), - containing_scope, - function_name.as_ptr(), - linkage_name.as_ptr(), - file_metadata, - loc.line as c_uint, - function_type_metadata, - is_local_to_unit, - true, - scope_line as c_uint, - FlagPrototyped as c_uint, - cx.sess().opts.optimize != config::OptLevel::No, - llfn, - template_parameters, - ptr::null_mut()) - }; - - let scope_map = create_scope_map::create_scope_map(cx, - &fn_decl.inputs, - &*top_level_block, - fn_metadata, - fn_ast_id); - - // Initialize fn debug context (including scope map and namespace map) - let fn_debug_context = box FunctionDebugContextData { - scope_map: RefCell::new(scope_map), - fn_metadata: fn_metadata, - argument_counter: Cell::new(1), - source_locations_enabled: Cell::new(false), - source_location_override: Cell::new(false), - }; - - - - return FunctionDebugContext::RegularContext(fn_debug_context); - - fn get_function_signature<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - fn_ast_id: ast::NodeId, - param_substs: &Substs<'tcx>, - error_reporting_span: Span) -> DIArray { - if cx.sess().opts.debuginfo == LimitedDebugInfo { - return create_DIArray(DIB(cx), &[]); - } - - // Return type -- llvm::DIBuilder wants this at index 0 - assert_type_for_node_id(cx, fn_ast_id, error_reporting_span); - let fn_type = cx.tcx().node_id_to_type(fn_ast_id); - let fn_type = monomorphize::apply_param_substs(cx.tcx(), param_substs, &fn_type); - - let (sig, abi) = match fn_type.sty { - ty::TyBareFn(_, ref barefnty) => { - let sig = cx.tcx().erase_late_bound_regions(&barefnty.sig); - let sig = infer::normalize_associated_type(cx.tcx(), &sig); - (sig, barefnty.abi) - } - ty::TyClosure(def_id, ref substs) => { - let closure_type = cx.tcx().closure_type(def_id, substs); - let sig = cx.tcx().erase_late_bound_regions(&closure_type.sig); - let sig = infer::normalize_associated_type(cx.tcx(), &sig); - (sig, closure_type.abi) - } - - _ => cx.sess().bug("get_function_metdata: Expected a function type!") - }; - - let mut signature = Vec::with_capacity(sig.inputs.len() + 1); - - // Return type -- llvm::DIBuilder wants this at index 0 - signature.push(match sig.output { - ty::FnConverging(ret_ty) => match ret_ty.sty { - ty::TyTuple(ref tys) if tys.is_empty() => ptr::null_mut(), - _ => type_metadata(cx, ret_ty, codemap::DUMMY_SP) - }, - ty::FnDiverging => diverging_type_metadata(cx) - }); - - let inputs = &if abi == abi::RustCall { - type_of::untuple_arguments(cx, &sig.inputs) - } else { - sig.inputs - }; - - // Arguments types - for &argument_type in inputs { - signature.push(type_metadata(cx, argument_type, codemap::DUMMY_SP)); - } - - return create_DIArray(DIB(cx), &signature[..]); - } - - fn get_template_parameters<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - generics: &hir::Generics, - param_substs: &Substs<'tcx>, - file_metadata: DIFile, - name_to_append_suffix_to: &mut String) - -> DIArray - { - let self_type = param_substs.self_ty(); - let self_type = normalize_associated_type(cx.tcx(), &self_type); - - // Only true for static default methods: - let has_self_type = self_type.is_some(); - - if !generics.is_type_parameterized() && !has_self_type { - return create_DIArray(DIB(cx), &[]); - } - - name_to_append_suffix_to.push('<'); - - // The list to be filled with template parameters: - let mut template_params: Vec = - Vec::with_capacity(generics.ty_params.len() + 1); - - // Handle self type - if has_self_type { - let actual_self_type = self_type.unwrap(); - // Add self type name to <...> clause of function name - let actual_self_type_name = compute_debuginfo_type_name( - cx, - actual_self_type, - true); - - name_to_append_suffix_to.push_str(&actual_self_type_name[..]); - - if generics.is_type_parameterized() { - name_to_append_suffix_to.push_str(","); - } - - // Only create type information if full debuginfo is enabled - if cx.sess().opts.debuginfo == FullDebugInfo { - let actual_self_type_metadata = type_metadata(cx, - actual_self_type, - codemap::DUMMY_SP); - - let name = special_idents::type_self.name.as_str(); - - let name = CString::new(name.as_bytes()).unwrap(); - let param_metadata = unsafe { - llvm::LLVMDIBuilderCreateTemplateTypeParameter( - DIB(cx), - ptr::null_mut(), - name.as_ptr(), - actual_self_type_metadata, - file_metadata, - 0, - 0) - }; - - template_params.push(param_metadata); - } - } - - // Handle other generic parameters - let actual_types = param_substs.types.get_slice(subst::FnSpace); - for (index, &hir::TyParam{ name, .. }) in generics.ty_params.iter().enumerate() { - let actual_type = actual_types[index]; - // Add actual type name to <...> clause of function name - let actual_type_name = compute_debuginfo_type_name(cx, - actual_type, - true); - name_to_append_suffix_to.push_str(&actual_type_name[..]); - - if index != generics.ty_params.len() - 1 { - name_to_append_suffix_to.push_str(","); - } - - // Again, only create type information if full debuginfo is enabled - if cx.sess().opts.debuginfo == FullDebugInfo { - let actual_type_metadata = type_metadata(cx, actual_type, codemap::DUMMY_SP); - let name = CString::new(name.as_str().as_bytes()).unwrap(); - let param_metadata = unsafe { - llvm::LLVMDIBuilderCreateTemplateTypeParameter( - DIB(cx), - ptr::null_mut(), - name.as_ptr(), - actual_type_metadata, - file_metadata, - 0, - 0) - }; - template_params.push(param_metadata); - } - } - - name_to_append_suffix_to.push('>'); - - return create_DIArray(DIB(cx), &template_params[..]); - } -} - -fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - variable_name: ast::Name, - variable_type: Ty<'tcx>, - scope_metadata: DIScope, - variable_access: VariableAccess, - variable_kind: VariableKind, - span: Span) { - let cx: &CrateContext = bcx.ccx(); - - let filename = span_start(cx, span).file.name.clone(); - let file_metadata = file_metadata(cx, &filename[..]); - - let loc = span_start(cx, span); - let type_metadata = type_metadata(cx, variable_type, span); - - let (argument_index, dwarf_tag) = match variable_kind { - ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable), - LocalVariable | - CapturedVariable => (0, DW_TAG_auto_variable) - }; - - let name = CString::new(variable_name.as_str().as_bytes()).unwrap(); - match (variable_access, &[][..]) { - (DirectVariable { alloca }, address_operations) | - (IndirectVariable {alloca, address_operations}, _) => { - let metadata = unsafe { - llvm::LLVMDIBuilderCreateVariable( - DIB(cx), - dwarf_tag, - scope_metadata, - name.as_ptr(), - file_metadata, - loc.line as c_uint, - type_metadata, - cx.sess().opts.optimize != config::OptLevel::No, - 0, - address_operations.as_ptr(), - address_operations.len() as c_uint, - argument_index) - }; - source_loc::set_debug_location(cx, InternalDebugLocation::new(scope_metadata, - loc.line, - loc.col.to_usize())); - unsafe { - let debug_loc = llvm::LLVMGetCurrentDebugLocation(cx.raw_builder()); - let instr = llvm::LLVMDIBuilderInsertDeclareAtEnd( - DIB(cx), - alloca, - metadata, - address_operations.as_ptr(), - address_operations.len() as c_uint, - debug_loc, - bcx.llbb); - - llvm::LLVMSetInstDebugLocation(trans::build::B(bcx).llbuilder, instr); - } - } - } - - match variable_kind { - ArgumentVariable(_) | CapturedVariable => { - assert!(!bcx.fcx - .debug_context - .get_ref(cx, span) - .source_locations_enabled - .get()); - source_loc::set_debug_location(cx, InternalDebugLocation::UnknownLocation); - } - _ => { /* nothing to do */ } - } -} - -#[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub enum DebugLoc { - At(ast::NodeId, Span), - None -} - -impl DebugLoc { - pub fn apply(&self, fcx: &FunctionContext) { - match *self { - DebugLoc::At(node_id, span) => { - source_loc::set_source_location(fcx, node_id, span); - } - DebugLoc::None => { - source_loc::clear_source_location(fcx); - } - } - } -} - -pub trait ToDebugLoc { - fn debug_loc(&self) -> DebugLoc; -} - -impl ToDebugLoc for hir::Expr { - fn debug_loc(&self) -> DebugLoc { - DebugLoc::At(self.id, self.span) - } -} - -impl ToDebugLoc for NodeIdAndSpan { - fn debug_loc(&self) -> DebugLoc { - DebugLoc::At(self.id, self.span) - } -} - -impl ToDebugLoc for Option { - fn debug_loc(&self) -> DebugLoc { - match *self { - Some(NodeIdAndSpan { id, span }) => DebugLoc::At(id, span), - None => DebugLoc::None - } - } -} diff --git a/src/librustc_trans/trans/debuginfo/namespace.rs b/src/librustc_trans/trans/debuginfo/namespace.rs deleted file mode 100644 index 533f8d7bad7ad..0000000000000 --- a/src/librustc_trans/trans/debuginfo/namespace.rs +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// Namespace Handling. - -use super::utils::{DIB, debug_context}; - -use llvm; -use llvm::debuginfo::DIScope; -use rustc::middle::def_id::DefId; -use rustc::front::map as hir_map; -use trans::common::CrateContext; - -use std::ffi::CString; -use std::ptr; -use std::rc::{Rc, Weak}; -use syntax::ast; -use syntax::parse::token; - -pub struct NamespaceTreeNode { - pub name: ast::Name, - pub scope: DIScope, - pub parent: Option>, -} - -impl NamespaceTreeNode { - pub fn mangled_name_of_contained_item(&self, item_name: &str) -> String { - fn fill_nested(node: &NamespaceTreeNode, output: &mut String) { - match node.parent { - Some(ref parent) => fill_nested(&*parent.upgrade().unwrap(), output), - None => {} - } - let string = node.name.as_str(); - output.push_str(&string.len().to_string()); - output.push_str(&string); - } - - let mut name = String::from("_ZN"); - fill_nested(self, &mut name); - name.push_str(&item_name.len().to_string()); - name.push_str(item_name); - name.push('E'); - name - } -} - -pub fn crate_root_namespace<'a>(cx: &'a CrateContext) -> &'a str { - &cx.link_meta().crate_name -} - -pub fn namespace_for_item(cx: &CrateContext, def_id: DefId) -> Rc { - cx.tcx().with_path(def_id, |path| { - // prepend crate name if not already present - let krate = if def_id.is_local() { - let crate_namespace_name = token::intern(crate_root_namespace(cx)); - Some(hir_map::PathMod(crate_namespace_name)) - } else { - None - }; - let mut path = krate.into_iter().chain(path).peekable(); - - let mut current_key = Vec::new(); - let mut parent_node: Option> = None; - - // Create/Lookup namespace for each element of the path. - loop { - // Emulate a for loop so we can use peek below. - let path_element = match path.next() { - Some(e) => e, - None => break - }; - // Ignore the name of the item (the last path element). - if path.peek().is_none() { - break; - } - - let name = path_element.name(); - current_key.push(name); - - let existing_node = debug_context(cx).namespace_map.borrow() - .get(¤t_key).cloned(); - let current_node = match existing_node { - Some(existing_node) => existing_node, - None => { - // create and insert - let parent_scope = match parent_node { - Some(ref node) => node.scope, - None => ptr::null_mut() - }; - let namespace_name = name.as_str(); - let namespace_name = CString::new(namespace_name.as_bytes()).unwrap(); - let scope = unsafe { - llvm::LLVMDIBuilderCreateNameSpace( - DIB(cx), - parent_scope, - namespace_name.as_ptr(), - // cannot reconstruct file ... - ptr::null_mut(), - // ... or line information, but that's not so important. - 0) - }; - - let node = Rc::new(NamespaceTreeNode { - name: name, - scope: scope, - parent: parent_node.map(|parent| Rc::downgrade(&parent)), - }); - - debug_context(cx).namespace_map.borrow_mut() - .insert(current_key.clone(), node.clone()); - - node - } - }; - - parent_node = Some(current_node); - } - - match parent_node { - Some(node) => node, - None => { - cx.sess().bug(&format!("debuginfo::namespace_for_item(): \ - path too short for {:?}", - def_id)); - } - } - }) -} diff --git a/src/librustc_trans/trans/debuginfo/source_loc.rs b/src/librustc_trans/trans/debuginfo/source_loc.rs deleted file mode 100644 index 981a23fd664a9..0000000000000 --- a/src/librustc_trans/trans/debuginfo/source_loc.rs +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use self::InternalDebugLocation::*; - -use super::utils::{debug_context, span_start, fn_should_be_ignored}; -use super::metadata::{scope_metadata,UNKNOWN_COLUMN_NUMBER}; -use super::{FunctionDebugContext, DebugLoc}; - -use llvm; -use llvm::debuginfo::DIScope; -use trans::common::{NodeIdAndSpan, CrateContext, FunctionContext}; - -use libc::c_uint; -use std::ptr; -use syntax::codemap::{Span, Pos}; -use syntax::{ast, codemap}; - -pub fn get_cleanup_debug_loc_for_ast_node<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - node_id: ast::NodeId, - node_span: Span, - is_block: bool) - -> NodeIdAndSpan { - // A debug location needs two things: - // (1) A span (of which only the beginning will actually be used) - // (2) An AST node-id which will be used to look up the lexical scope - // for the location in the functions scope-map - // - // This function will calculate the debug location for compiler-generated - // cleanup calls that are executed when control-flow leaves the - // scope identified by `node_id`. - // - // For everything but block-like things we can simply take id and span of - // the given expression, meaning that from a debugger's view cleanup code is - // executed at the same source location as the statement/expr itself. - // - // Blocks are a special case. Here we want the cleanup to be linked to the - // closing curly brace of the block. The *scope* the cleanup is executed in - // is up to debate: It could either still be *within* the block being - // cleaned up, meaning that locals from the block are still visible in the - // debugger. - // Or it could be in the scope that the block is contained in, so any locals - // from within the block are already considered out-of-scope and thus not - // accessible in the debugger anymore. - // - // The current implementation opts for the second option: cleanup of a block - // already happens in the parent scope of the block. The main reason for - // this decision is that scoping becomes controlflow dependent when variable - // shadowing is involved and it's impossible to decide statically which - // scope is actually left when the cleanup code is executed. - // In practice it shouldn't make much of a difference. - - let mut cleanup_span = node_span; - - if is_block { - // Not all blocks actually have curly braces (e.g. simple closure - // bodies), in which case we also just want to return the span of the - // whole expression. - let code_snippet = cx.sess().codemap().span_to_snippet(node_span); - if let Ok(code_snippet) = code_snippet { - let bytes = code_snippet.as_bytes(); - - if !bytes.is_empty() && &bytes[bytes.len()-1..] == b"}" { - cleanup_span = Span { - lo: node_span.hi - codemap::BytePos(1), - hi: node_span.hi, - expn_id: node_span.expn_id - }; - } - } - } - - NodeIdAndSpan { - id: node_id, - span: cleanup_span - } -} - - -/// Sets the current debug location at the beginning of the span. -/// -/// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). The node_id -/// parameter is used to reliably find the correct visibility scope for the code -/// position. -pub fn set_source_location(fcx: &FunctionContext, - node_id: ast::NodeId, - span: Span) { - match fcx.debug_context { - FunctionDebugContext::DebugInfoDisabled => return, - FunctionDebugContext::FunctionWithoutDebugInfo => { - set_debug_location(fcx.ccx, UnknownLocation); - return; - } - FunctionDebugContext::RegularContext(box ref function_debug_context) => { - if function_debug_context.source_location_override.get() { - // Just ignore any attempts to set a new debug location while - // the override is active. - return; - } - - let cx = fcx.ccx; - - debug!("set_source_location: {}", cx.sess().codemap().span_to_string(span)); - - if function_debug_context.source_locations_enabled.get() { - let loc = span_start(cx, span); - let scope = scope_metadata(fcx, node_id, span); - - set_debug_location(cx, InternalDebugLocation::new(scope, - loc.line, - loc.col.to_usize())); - } else { - set_debug_location(cx, UnknownLocation); - } - } - } -} - -/// This function makes sure that all debug locations emitted while executing -/// `wrapped_function` are set to the given `debug_loc`. -pub fn with_source_location_override(fcx: &FunctionContext, - debug_loc: DebugLoc, - wrapped_function: F) -> R - where F: FnOnce() -> R -{ - match fcx.debug_context { - FunctionDebugContext::DebugInfoDisabled => { - wrapped_function() - } - FunctionDebugContext::FunctionWithoutDebugInfo => { - set_debug_location(fcx.ccx, UnknownLocation); - wrapped_function() - } - FunctionDebugContext::RegularContext(box ref function_debug_context) => { - if function_debug_context.source_location_override.get() { - wrapped_function() - } else { - debug_loc.apply(fcx); - function_debug_context.source_location_override.set(true); - let result = wrapped_function(); - function_debug_context.source_location_override.set(false); - result - } - } - } -} - -/// Clears the current debug location. -/// -/// Instructions generated hereafter won't be assigned a source location. -pub fn clear_source_location(fcx: &FunctionContext) { - if fn_should_be_ignored(fcx) { - return; - } - - set_debug_location(fcx.ccx, UnknownLocation); -} - -/// Enables emitting source locations for the given functions. -/// -/// Since we don't want source locations to be emitted for the function prelude, -/// they are disabled when beginning to translate a new function. This functions -/// switches source location emitting on and must therefore be called before the -/// first real statement/expression of the function is translated. -pub fn start_emitting_source_locations(fcx: &FunctionContext) { - match fcx.debug_context { - FunctionDebugContext::RegularContext(box ref data) => { - data.source_locations_enabled.set(true) - }, - _ => { /* safe to ignore */ } - } -} - - -#[derive(Copy, Clone, PartialEq)] -pub enum InternalDebugLocation { - KnownLocation { scope: DIScope, line: usize, col: usize }, - UnknownLocation -} - -impl InternalDebugLocation { - pub fn new(scope: DIScope, line: usize, col: usize) -> InternalDebugLocation { - KnownLocation { - scope: scope, - line: line, - col: col, - } - } -} - -pub fn set_debug_location(cx: &CrateContext, debug_location: InternalDebugLocation) { - if debug_location == debug_context(cx).current_debug_location.get() { - return; - } - - let metadata_node; - - match debug_location { - KnownLocation { scope, line, .. } => { - // Always set the column to zero like Clang and GCC - let col = UNKNOWN_COLUMN_NUMBER; - debug!("setting debug location to {} {}", line, col); - - unsafe { - metadata_node = llvm::LLVMDIBuilderCreateDebugLocation( - debug_context(cx).llcontext, - line as c_uint, - col as c_uint, - scope, - ptr::null_mut()); - } - } - UnknownLocation => { - debug!("clearing debug location "); - metadata_node = ptr::null_mut(); - } - }; - - unsafe { - llvm::LLVMSetCurrentDebugLocation(cx.raw_builder(), metadata_node); - } - - debug_context(cx).current_debug_location.set(debug_location); -} diff --git a/src/librustc_trans/trans/debuginfo/type_names.rs b/src/librustc_trans/trans/debuginfo/type_names.rs deleted file mode 100644 index 518a78f8fd4c7..0000000000000 --- a/src/librustc_trans/trans/debuginfo/type_names.rs +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// Type Names for Debug Info. - -use super::namespace::crate_root_namespace; - -use trans::common::CrateContext; -use middle::def_id::DefId; -use middle::infer; -use middle::subst::{self, Substs}; -use middle::ty::{self, Ty}; - -use rustc_front::hir; - -// Compute the name of the type as it should be stored in debuginfo. Does not do -// any caching, i.e. calling the function twice with the same type will also do -// the work twice. The `qualified` parameter only affects the first level of the -// type name, further levels (i.e. type parameters) are always fully qualified. -pub fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>, - qualified: bool) - -> String { - let mut result = String::with_capacity(64); - push_debuginfo_type_name(cx, t, qualified, &mut result); - result -} - -// Pushes the name of the type as it should be stored in debuginfo on the -// `output` String. See also compute_debuginfo_type_name(). -pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>, - qualified: bool, - output: &mut String) { - match t.sty { - ty::TyBool => output.push_str("bool"), - ty::TyChar => output.push_str("char"), - ty::TyStr => output.push_str("str"), - ty::TyInt(int_ty) => output.push_str(int_ty.ty_to_string()), - ty::TyUint(uint_ty) => output.push_str(uint_ty.ty_to_string()), - ty::TyFloat(float_ty) => output.push_str(float_ty.ty_to_string()), - ty::TyStruct(def, substs) | - ty::TyEnum(def, substs) => { - push_item_name(cx, def.did, qualified, output); - push_type_params(cx, substs, output); - }, - ty::TyTuple(ref component_types) => { - output.push('('); - for &component_type in component_types { - push_debuginfo_type_name(cx, component_type, true, output); - output.push_str(", "); - } - if !component_types.is_empty() { - output.pop(); - output.pop(); - } - output.push(')'); - }, - ty::TyBox(inner_type) => { - output.push_str("Box<"); - push_debuginfo_type_name(cx, inner_type, true, output); - output.push('>'); - }, - ty::TyRawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => { - output.push('*'); - match mutbl { - hir::MutImmutable => output.push_str("const "), - hir::MutMutable => output.push_str("mut "), - } - - push_debuginfo_type_name(cx, inner_type, true, output); - }, - ty::TyRef(_, ty::TypeAndMut { ty: inner_type, mutbl }) => { - output.push('&'); - if mutbl == hir::MutMutable { - output.push_str("mut "); - } - - push_debuginfo_type_name(cx, inner_type, true, output); - }, - ty::TyArray(inner_type, len) => { - output.push('['); - push_debuginfo_type_name(cx, inner_type, true, output); - output.push_str(&format!("; {}", len)); - output.push(']'); - }, - ty::TySlice(inner_type) => { - output.push('['); - push_debuginfo_type_name(cx, inner_type, true, output); - output.push(']'); - }, - ty::TyTrait(ref trait_data) => { - let principal = cx.tcx().erase_late_bound_regions(&trait_data.principal); - push_item_name(cx, principal.def_id, false, output); - push_type_params(cx, principal.substs, output); - }, - ty::TyBareFn(_, &ty::BareFnTy{ unsafety, abi, ref sig } ) => { - if unsafety == hir::Unsafety::Unsafe { - output.push_str("unsafe "); - } - - if abi != ::syntax::abi::Rust { - output.push_str("extern \""); - output.push_str(abi.name()); - output.push_str("\" "); - } - - output.push_str("fn("); - - let sig = cx.tcx().erase_late_bound_regions(sig); - let sig = infer::normalize_associated_type(cx.tcx(), &sig); - if !sig.inputs.is_empty() { - for ¶meter_type in &sig.inputs { - push_debuginfo_type_name(cx, parameter_type, true, output); - output.push_str(", "); - } - output.pop(); - output.pop(); - } - - if sig.variadic { - if !sig.inputs.is_empty() { - output.push_str(", ..."); - } else { - output.push_str("..."); - } - } - - output.push(')'); - - match sig.output { - ty::FnConverging(result_type) if result_type.is_nil() => {} - ty::FnConverging(result_type) => { - output.push_str(" -> "); - push_debuginfo_type_name(cx, result_type, true, output); - } - ty::FnDiverging => { - output.push_str(" -> !"); - } - } - }, - ty::TyClosure(..) => { - output.push_str("closure"); - } - ty::TyError | - ty::TyInfer(_) | - ty::TyProjection(..) | - ty::TyParam(_) => { - cx.sess().bug(&format!("debuginfo: Trying to create type name for \ - unexpected type: {:?}", t)); - } - } - - fn push_item_name(cx: &CrateContext, - def_id: DefId, - qualified: bool, - output: &mut String) { - cx.tcx().with_path(def_id, |path| { - if qualified { - if def_id.is_local() { - output.push_str(crate_root_namespace(cx)); - output.push_str("::"); - } - - let mut path_element_count = 0; - for path_element in path { - output.push_str(&path_element.name().as_str()); - output.push_str("::"); - path_element_count += 1; - } - - if path_element_count == 0 { - cx.sess().bug("debuginfo: Encountered empty item path!"); - } - - output.pop(); - output.pop(); - } else { - let name = path.last().expect("debuginfo: Empty item path?").name(); - output.push_str(&name.as_str()); - } - }); - } - - // Pushes the type parameters in the given `Substs` to the output string. - // This ignores region parameters, since they can't reliably be - // reconstructed for items from non-local crates. For local crates, this - // would be possible but with inlining and LTO we have to use the least - // common denominator - otherwise we would run into conflicts. - fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - substs: &subst::Substs<'tcx>, - output: &mut String) { - if substs.types.is_empty() { - return; - } - - output.push('<'); - - for &type_parameter in &substs.types { - push_debuginfo_type_name(cx, type_parameter, true, output); - output.push_str(", "); - } - - output.pop(); - output.pop(); - - output.push('>'); - } -} diff --git a/src/librustc_trans/trans/debuginfo/utils.rs b/src/librustc_trans/trans/debuginfo/utils.rs deleted file mode 100644 index 276f9936ac52a..0000000000000 --- a/src/librustc_trans/trans/debuginfo/utils.rs +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// Utility Functions. - -use super::{FunctionDebugContext, CrateDebugContext}; -use super::namespace::namespace_for_item; - -use middle::def_id::DefId; - -use llvm; -use llvm::debuginfo::{DIScope, DIBuilderRef, DIDescriptor, DIArray}; -use trans::machine; -use trans::common::{CrateContext, FunctionContext}; -use trans::type_::Type; - -use syntax::codemap::Span; -use syntax::{ast, codemap}; - -pub fn is_node_local_to_unit(cx: &CrateContext, node_id: ast::NodeId) -> bool -{ - // The is_local_to_unit flag indicates whether a function is local to the - // current compilation unit (i.e. if it is *static* in the C-sense). The - // *reachable* set should provide a good approximation of this, as it - // contains everything that might leak out of the current crate (by being - // externally visible or by being inlined into something externally - // visible). It might better to use the `exported_items` set from - // `driver::CrateAnalysis` in the future, but (atm) this set is not - // available in the translation pass. - !cx.reachable().contains(&node_id) -} - -#[allow(non_snake_case)] -pub fn create_DIArray(builder: DIBuilderRef, arr: &[DIDescriptor]) -> DIArray { - return unsafe { - llvm::LLVMDIBuilderGetOrCreateArray(builder, arr.as_ptr(), arr.len() as u32) - }; -} - -pub fn contains_nodebug_attribute(attributes: &[ast::Attribute]) -> bool { - attributes.iter().any(|attr| { - let meta_item: &ast::MetaItem = &*attr.node.value; - match meta_item.node { - ast::MetaWord(ref value) => &value[..] == "no_debug", - _ => false - } - }) -} - -/// Return codemap::Loc corresponding to the beginning of the span -pub fn span_start(cx: &CrateContext, span: Span) -> codemap::Loc { - cx.sess().codemap().lookup_char_pos(span.lo) -} - -pub fn size_and_align_of(cx: &CrateContext, llvm_type: Type) -> (u64, u64) { - (machine::llsize_of_alloc(cx, llvm_type), machine::llalign_of_min(cx, llvm_type) as u64) -} - -pub fn bytes_to_bits(bytes: u64) -> u64 { - bytes * 8 -} - -#[inline] -pub fn debug_context<'a, 'tcx>(cx: &'a CrateContext<'a, 'tcx>) - -> &'a CrateDebugContext<'tcx> { - let debug_context: &'a CrateDebugContext<'tcx> = cx.dbg_cx().as_ref().unwrap(); - debug_context -} - -#[inline] -#[allow(non_snake_case)] -pub fn DIB(cx: &CrateContext) -> DIBuilderRef { - cx.dbg_cx().as_ref().unwrap().builder -} - -pub fn fn_should_be_ignored(fcx: &FunctionContext) -> bool { - match fcx.debug_context { - FunctionDebugContext::RegularContext(_) => false, - _ => true - } -} - -pub fn assert_type_for_node_id(cx: &CrateContext, - node_id: ast::NodeId, - error_reporting_span: Span) { - if !cx.tcx().node_types().contains_key(&node_id) { - cx.sess().span_bug(error_reporting_span, - "debuginfo: Could not find type for node id!"); - } -} - -pub fn get_namespace_and_span_for_item(cx: &CrateContext, def_id: DefId) - -> (DIScope, Span) { - let containing_scope = namespace_for_item(cx, def_id).scope; - let definition_span = cx.tcx().map.def_id_span(def_id, codemap::DUMMY_SP /* (1) */ ); - - // (1) For external items there is no span information - - (containing_scope, definition_span) -} diff --git a/src/librustc_trans/trans/declare.rs b/src/librustc_trans/trans/declare.rs deleted file mode 100644 index b9e74beaf55ae..0000000000000 --- a/src/librustc_trans/trans/declare.rs +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -//! Declare various LLVM values. -//! -//! Prefer using functions and methods from this module rather than calling LLVM -//! functions directly. These functions do some additional work to ensure we do -//! the right thing given the preconceptions of trans. -//! -//! Some useful guidelines: -//! -//! * Use declare_* family of methods if you are declaring, but are not -//! interested in defining the ValueRef they return. -//! * Use define_* family of methods when you might be defining the ValueRef. -//! * When in doubt, define. -use llvm::{self, ValueRef}; -use middle::ty; -use middle::infer; -use syntax::abi; -use trans::attributes; -use trans::base; -use trans::context::CrateContext; -use trans::type_::Type; -use trans::type_of; - -use std::ffi::CString; -use libc::c_uint; - - -/// Declare a global value. -/// -/// If there’s a value with the same name already declared, the function will -/// return its ValueRef instead. -pub fn declare_global(ccx: &CrateContext, name: &str, ty: Type) -> llvm::ValueRef { - debug!("declare_global(name={:?})", name); - let namebuf = CString::new(name).unwrap_or_else(|_|{ - ccx.sess().bug(&format!("name {:?} contains an interior null byte", name)) - }); - unsafe { - llvm::LLVMGetOrInsertGlobal(ccx.llmod(), namebuf.as_ptr(), ty.to_ref()) - } -} - - -/// Declare a function. -/// -/// For rust functions use `declare_rust_fn` instead. -/// -/// If there’s a value with the same name already declared, the function will -/// update the declaration and return existing ValueRef instead. -pub fn declare_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv, - ty: Type, output: ty::FnOutput) -> ValueRef { - debug!("declare_fn(name={:?})", name); - let namebuf = CString::new(name).unwrap_or_else(|_|{ - ccx.sess().bug(&format!("name {:?} contains an interior null byte", name)) - }); - let llfn = unsafe { - llvm::LLVMGetOrInsertFunction(ccx.llmod(), namebuf.as_ptr(), ty.to_ref()) - }; - - llvm::SetFunctionCallConv(llfn, callconv); - // Function addresses in Rust are never significant, allowing functions to - // be merged. - llvm::SetUnnamedAddr(llfn, true); - - if output == ty::FnDiverging { - llvm::SetFunctionAttribute(llfn, llvm::Attribute::NoReturn); - } - - if ccx.tcx().sess.opts.cg.no_redzone - .unwrap_or(ccx.tcx().sess.target.target.options.disable_redzone) { - llvm::SetFunctionAttribute(llfn, llvm::Attribute::NoRedZone) - } - - llfn -} - - -/// Declare a C ABI function. -/// -/// Only use this for foreign function ABIs and glue. For Rust functions use -/// `declare_rust_fn` instead. -/// -/// If there’s a value with the same name already declared, the function will -/// update the declaration and return existing ValueRef instead. -pub fn declare_cfn(ccx: &CrateContext, name: &str, fn_type: Type, - output: ty::Ty) -> ValueRef { - declare_fn(ccx, name, llvm::CCallConv, fn_type, ty::FnConverging(output)) -} - - -/// Declare a Rust function. -/// -/// If there’s a value with the same name already declared, the function will -/// update the declaration and return existing ValueRef instead. -pub fn declare_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str, - fn_type: ty::Ty<'tcx>) -> ValueRef { - debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, - fn_type); - - let function_type; // placeholder so that the memory ownership works out ok - let (sig, abi, env) = match fn_type.sty { - ty::TyBareFn(_, ref f) => { - (&f.sig, f.abi, None) - } - ty::TyClosure(closure_did, ref substs) => { - let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables); - function_type = infcx.closure_type(closure_did, substs); - let self_type = base::self_type_for_closure(ccx, closure_did, fn_type); - let llenvironment_type = type_of::type_of_explicit_arg(ccx, self_type); - debug!("declare_rust_fn function_type={:?} self_type={:?}", - function_type, self_type); - (&function_type.sig, abi::RustCall, Some(llenvironment_type)) - } - _ => ccx.sess().bug("expected closure or fn") - }; - - let sig = ccx.tcx().erase_late_bound_regions(sig); - let sig = infer::normalize_associated_type(ccx.tcx(), &sig); - debug!("declare_rust_fn (after region erasure) sig={:?}", sig); - let llfty = type_of::type_of_rust_fn(ccx, env, &sig, abi); - debug!("declare_rust_fn llfty={}", ccx.tn().type_to_string(llfty)); - - // it is ok to directly access sig.0.output because we erased all - // late-bound-regions above - let llfn = declare_fn(ccx, name, llvm::CCallConv, llfty, sig.output); - attributes::from_fn_type(ccx, fn_type).apply_llfn(llfn); - llfn -} - - -/// Declare a Rust function with internal linkage. -/// -/// If there’s a value with the same name already declared, the function will -/// update the declaration and return existing ValueRef instead. -pub fn declare_internal_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str, - fn_type: ty::Ty<'tcx>) -> ValueRef { - let llfn = declare_rust_fn(ccx, name, fn_type); - llvm::SetLinkage(llfn, llvm::InternalLinkage); - llfn -} - - -/// Declare a global with an intention to define it. -/// -/// Use this function when you intend to define a global. This function will -/// return None if the name already has a definition associated with it. In that -/// case an error should be reported to the user, because it usually happens due -/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). -pub fn define_global(ccx: &CrateContext, name: &str, ty: Type) -> Option { - if get_defined_value(ccx, name).is_some() { - None - } else { - Some(declare_global(ccx, name, ty)) - } -} - - -/// Declare a function with an intention to define it. -/// -/// For rust functions use `define_rust_fn` instead. -/// -/// Use this function when you intend to define a function. This function will -/// return None if the name already has a definition associated with it. In that -/// case an error should be reported to the user, because it usually happens due -/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). -pub fn define_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv, - fn_type: Type, output: ty::FnOutput) -> Option { - if get_defined_value(ccx, name).is_some() { - None - } else { - Some(declare_fn(ccx, name, callconv, fn_type, output)) - } -} - - -/// Declare a C ABI function with an intention to define it. -/// -/// Use this function when you intend to define a function. This function will -/// return None if the name already has a definition associated with it. In that -/// case an error should be reported to the user, because it usually happens due -/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). -/// -/// Only use this for foreign function ABIs and glue. For Rust functions use -/// `declare_rust_fn` instead. -pub fn define_cfn(ccx: &CrateContext, name: &str, fn_type: Type, - output: ty::Ty) -> Option { - if get_defined_value(ccx, name).is_some() { - None - } else { - Some(declare_cfn(ccx, name, fn_type, output)) - } -} - - -/// Declare a Rust function with an intention to define it. -/// -/// Use this function when you intend to define a function. This function will -/// return None if the name already has a definition associated with it. In that -/// case an error should be reported to the user, because it usually happens due -/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). -pub fn define_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str, - fn_type: ty::Ty<'tcx>) -> Option { - if get_defined_value(ccx, name).is_some() { - None - } else { - Some(declare_rust_fn(ccx, name, fn_type)) - } -} - - -/// Declare a Rust function with an intention to define it. -/// -/// Use this function when you intend to define a function. This function will -/// return panic if the name already has a definition associated with it. This -/// can happen with #[no_mangle] or #[export_name], for example. -pub fn define_internal_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - name: &str, - fn_type: ty::Ty<'tcx>) -> ValueRef { - if get_defined_value(ccx, name).is_some() { - ccx.sess().fatal(&format!("symbol `{}` already defined", name)) - } else { - declare_internal_rust_fn(ccx, name, fn_type) - } -} - - -/// Get defined or externally defined (AvailableExternally linkage) value by -/// name. -fn get_defined_value(ccx: &CrateContext, name: &str) -> Option { - debug!("get_defined_value(name={:?})", name); - let namebuf = CString::new(name).unwrap_or_else(|_|{ - ccx.sess().bug(&format!("name {:?} contains an interior null byte", name)) - }); - let val = unsafe { llvm::LLVMGetNamedValue(ccx.llmod(), namebuf.as_ptr()) }; - if val.is_null() { - debug!("get_defined_value: {:?} value is null", name); - None - } else { - let (declaration, aext_link) = unsafe { - let linkage = llvm::LLVMGetLinkage(val); - (llvm::LLVMIsDeclaration(val) != 0, - linkage == llvm::AvailableExternallyLinkage as c_uint) - }; - debug!("get_defined_value: found {:?} value (declaration: {}, \ - aext_link: {})", name, declaration, aext_link); - if !declaration || aext_link { - Some(val) - } else { - None - } - } -} diff --git a/src/librustc_trans/trans/expr.rs b/src/librustc_trans/trans/expr.rs deleted file mode 100644 index ab2f4462757d5..0000000000000 --- a/src/librustc_trans/trans/expr.rs +++ /dev/null @@ -1,2677 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! # Translation of Expressions -//! -//! The expr module handles translation of expressions. The most general -//! translation routine is `trans()`, which will translate an expression -//! into a datum. `trans_into()` is also available, which will translate -//! an expression and write the result directly into memory, sometimes -//! avoiding the need for a temporary stack slot. Finally, -//! `trans_to_lvalue()` is available if you'd like to ensure that the -//! result has cleanup scheduled. -//! -//! Internally, each of these functions dispatches to various other -//! expression functions depending on the kind of expression. We divide -//! up expressions into: -//! -//! - **Datum expressions:** Those that most naturally yield values. -//! Examples would be `22`, `box x`, or `a + b` (when not overloaded). -//! - **DPS expressions:** Those that most naturally write into a location -//! in memory. Examples would be `foo()` or `Point { x: 3, y: 4 }`. -//! - **Statement expressions:** That that do not generate a meaningful -//! result. Examples would be `while { ... }` or `return 44`. -//! -//! Public entry points: -//! -//! - `trans_into(bcx, expr, dest) -> bcx`: evaluates an expression, -//! storing the result into `dest`. This is the preferred form, if you -//! can manage it. -//! -//! - `trans(bcx, expr) -> DatumBlock`: evaluates an expression, yielding -//! `Datum` with the result. You can then store the datum, inspect -//! the value, etc. This may introduce temporaries if the datum is a -//! structural type. -//! -//! - `trans_to_lvalue(bcx, expr, "...") -> DatumBlock`: evaluates an -//! expression and ensures that the result has a cleanup associated with it, -//! creating a temporary stack slot if necessary. -//! -//! - `trans_local_var -> Datum`: looks up a local variable or upvar. - -#![allow(non_camel_case_types)] - -pub use self::Dest::*; -use self::lazy_binop_ty::*; - -use back::abi; -use llvm::{self, ValueRef, TypeKind}; -use middle::check_const; -use middle::def; -use middle::lang_items::CoerceUnsizedTraitLangItem; -use middle::subst::{Substs, VecPerParamSpace}; -use middle::traits; -use trans::{_match, adt, asm, base, callee, closure, consts, controlflow}; -use trans::base::*; -use trans::build::*; -use trans::cleanup::{self, CleanupMethods, DropHintMethods}; -use trans::common::*; -use trans::datum::*; -use trans::debuginfo::{self, DebugLoc, ToDebugLoc}; -use trans::declare; -use trans::glue; -use trans::machine; -use trans::meth; -use trans::tvec; -use trans::type_of; -use trans::Disr; -use middle::ty::adjustment::{AdjustDerefRef, AdjustReifyFnPointer}; -use middle::ty::adjustment::{AdjustUnsafeFnPointer, CustomCoerceUnsized}; -use middle::ty::{self, Ty}; -use middle::ty::MethodCall; -use middle::ty::cast::{CastKind, CastTy}; -use util::common::indenter; -use trans::machine::{llsize_of, llsize_of_alloc}; -use trans::type_::Type; - -use rustc_front; -use rustc_front::hir; - -use syntax::{ast, codemap}; -use syntax::parse::token::InternedString; -use syntax::ptr::P; -use syntax::parse::token; -use std::mem; - -// Destinations - -// These are passed around by the code generating functions to track the -// destination of a computation's value. - -#[derive(Copy, Clone, PartialEq)] -pub enum Dest { - SaveIn(ValueRef), - Ignore, -} - -impl Dest { - pub fn to_string(&self, ccx: &CrateContext) -> String { - match *self { - SaveIn(v) => format!("SaveIn({})", ccx.tn().val_to_string(v)), - Ignore => "Ignore".to_string() - } - } -} - -/// This function is equivalent to `trans(bcx, expr).store_to_dest(dest)` but it may generate -/// better optimized LLVM code. -pub fn trans_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - dest: Dest) - -> Block<'blk, 'tcx> { - let mut bcx = bcx; - - debuginfo::set_source_location(bcx.fcx, expr.id, expr.span); - - if adjustment_required(bcx, expr) { - // use trans, which may be less efficient but - // which will perform the adjustments: - let datum = unpack_datum!(bcx, trans(bcx, expr)); - return datum.store_to_dest(bcx, dest, expr.id); - } - - let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap(); - if !qualif.intersects( - check_const::ConstQualif::NOT_CONST | - check_const::ConstQualif::NEEDS_DROP - ) { - if !qualif.intersects(check_const::ConstQualif::PREFER_IN_PLACE) { - if let SaveIn(lldest) = dest { - match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif, - bcx.fcx.param_substs, - consts::TrueConst::No) { - Ok(global) => { - // Cast pointer to destination, because constants - // have different types. - let lldest = PointerCast(bcx, lldest, val_ty(global)); - memcpy_ty(bcx, lldest, global, expr_ty_adjusted(bcx, expr)); - return bcx; - }, - Err(consts::ConstEvalFailure::Runtime(_)) => { - // in case const evaluation errors, translate normally - // debug assertions catch the same errors - // see RFC 1229 - }, - Err(consts::ConstEvalFailure::Compiletime(_)) => { - return bcx; - }, - } - } - // Even if we don't have a value to emit, and the expression - // doesn't have any side-effects, we still have to translate the - // body of any closures. - // FIXME: Find a better way of handling this case. - } else { - // The only way we're going to see a `const` at this point is if - // it prefers in-place instantiation, likely because it contains - // `[x; N]` somewhere within. - match expr.node { - hir::ExprPath(..) => { - match bcx.def(expr.id) { - def::DefConst(did) => { - let empty_substs = bcx.tcx().mk_substs(Substs::trans_empty()); - let const_expr = consts::get_const_expr(bcx.ccx(), did, expr, - empty_substs); - // Temporarily get cleanup scopes out of the way, - // as they require sub-expressions to be contained - // inside the current AST scope. - // These should record no cleanups anyways, `const` - // can't have destructors. - let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(), - vec![]); - // Lock emitted debug locations to the location of - // the constant reference expression. - debuginfo::with_source_location_override(bcx.fcx, - expr.debug_loc(), - || { - bcx = trans_into(bcx, const_expr, dest) - }); - let scopes = mem::replace(&mut *bcx.fcx.scopes.borrow_mut(), - scopes); - assert!(scopes.is_empty()); - return bcx; - } - _ => {} - } - } - _ => {} - } - } - } - - debug!("trans_into() expr={:?}", expr); - - let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), - expr.id, - expr.span, - false); - bcx.fcx.push_ast_cleanup_scope(cleanup_debug_loc); - - let kind = expr_kind(bcx.tcx(), expr); - bcx = match kind { - ExprKind::Lvalue | ExprKind::RvalueDatum => { - trans_unadjusted(bcx, expr).store_to_dest(dest, expr.id) - } - ExprKind::RvalueDps => { - trans_rvalue_dps_unadjusted(bcx, expr, dest) - } - ExprKind::RvalueStmt => { - trans_rvalue_stmt_unadjusted(bcx, expr) - } - }; - - bcx.fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id) -} - -/// Translates an expression, returning a datum (and new block) encapsulating the result. When -/// possible, it is preferred to use `trans_into`, as that may avoid creating a temporary on the -/// stack. -pub fn trans<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - debug!("trans(expr={:?})", expr); - - let mut bcx = bcx; - let fcx = bcx.fcx; - let qualif = *bcx.tcx().const_qualif_map.borrow().get(&expr.id).unwrap(); - let adjusted_global = !qualif.intersects(check_const::ConstQualif::NON_STATIC_BORROWS); - let global = if !qualif.intersects( - check_const::ConstQualif::NOT_CONST | - check_const::ConstQualif::NEEDS_DROP - ) { - match consts::get_const_expr_as_global(bcx.ccx(), expr, qualif, - bcx.fcx.param_substs, - consts::TrueConst::No) { - Ok(global) => { - if qualif.intersects(check_const::ConstQualif::HAS_STATIC_BORROWS) { - // Is borrowed as 'static, must return lvalue. - - // Cast pointer to global, because constants have different types. - let const_ty = expr_ty_adjusted(bcx, expr); - let llty = type_of::type_of(bcx.ccx(), const_ty); - let global = PointerCast(bcx, global, llty.ptr_to()); - let datum = Datum::new(global, const_ty, Lvalue::new("expr::trans")); - return DatumBlock::new(bcx, datum.to_expr_datum()); - } - - // Otherwise, keep around and perform adjustments, if needed. - let const_ty = if adjusted_global { - expr_ty_adjusted(bcx, expr) - } else { - expr_ty(bcx, expr) - }; - - // This could use a better heuristic. - Some(if type_is_immediate(bcx.ccx(), const_ty) { - // Cast pointer to global, because constants have different types. - let llty = type_of::type_of(bcx.ccx(), const_ty); - let global = PointerCast(bcx, global, llty.ptr_to()); - // Maybe just get the value directly, instead of loading it? - immediate_rvalue(load_ty(bcx, global, const_ty), const_ty) - } else { - let scratch = alloc_ty(bcx, const_ty, "const"); - call_lifetime_start(bcx, scratch); - let lldest = if !const_ty.is_structural() { - // Cast pointer to slot, because constants have different types. - PointerCast(bcx, scratch, val_ty(global)) - } else { - // In this case, memcpy_ty calls llvm.memcpy after casting both - // source and destination to i8*, so we don't need any casts. - scratch - }; - memcpy_ty(bcx, lldest, global, const_ty); - Datum::new(scratch, const_ty, Rvalue::new(ByRef)) - }) - }, - Err(consts::ConstEvalFailure::Runtime(_)) => { - // in case const evaluation errors, translate normally - // debug assertions catch the same errors - // see RFC 1229 - None - }, - Err(consts::ConstEvalFailure::Compiletime(_)) => { - // generate a dummy llvm value - let const_ty = expr_ty(bcx, expr); - let llty = type_of::type_of(bcx.ccx(), const_ty); - let dummy = C_undef(llty.ptr_to()); - Some(Datum::new(dummy, const_ty, Rvalue::new(ByRef))) - }, - } - } else { - None - }; - - let cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), - expr.id, - expr.span, - false); - fcx.push_ast_cleanup_scope(cleanup_debug_loc); - let datum = match global { - Some(rvalue) => rvalue.to_expr_datum(), - None => unpack_datum!(bcx, trans_unadjusted(bcx, expr)) - }; - let datum = if adjusted_global { - datum // trans::consts already performed adjustments. - } else { - unpack_datum!(bcx, apply_adjustments(bcx, expr, datum)) - }; - bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, expr.id); - return DatumBlock::new(bcx, datum); -} - -pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef { - StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA) -} - -pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef { - StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR) -} - -pub fn copy_fat_ptr(bcx: Block, src_ptr: ValueRef, dst_ptr: ValueRef) { - Store(bcx, Load(bcx, get_dataptr(bcx, src_ptr)), get_dataptr(bcx, dst_ptr)); - Store(bcx, Load(bcx, get_meta(bcx, src_ptr)), get_meta(bcx, dst_ptr)); -} - -fn adjustment_required<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr) -> bool { - let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() { - None => { return false; } - Some(adj) => adj - }; - - // Don't skip a conversion from Box to &T, etc. - if bcx.tcx().is_overloaded_autoderef(expr.id, 0) { - return true; - } - - match adjustment { - AdjustReifyFnPointer => { - // FIXME(#19925) once fn item types are - // zero-sized, we'll need to return true here - false - } - AdjustUnsafeFnPointer => { - // purely a type-level thing - false - } - AdjustDerefRef(ref adj) => { - // We are a bit paranoid about adjustments and thus might have a re- - // borrow here which merely derefs and then refs again (it might have - // a different region or mutability, but we don't care here). - !(adj.autoderefs == 1 && adj.autoref.is_some() && adj.unsize.is_none()) - } - } -} - -/// Helper for trans that apply adjustments from `expr` to `datum`, which should be the unadjusted -/// translation of `expr`. -fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - datum: Datum<'tcx, Expr>) - -> DatumBlock<'blk, 'tcx, Expr> -{ - let mut bcx = bcx; - let mut datum = datum; - let adjustment = match bcx.tcx().tables.borrow().adjustments.get(&expr.id).cloned() { - None => { - return DatumBlock::new(bcx, datum); - } - Some(adj) => { adj } - }; - debug!("unadjusted datum for expr {:?}: {} adjustment={:?}", - expr, - datum.to_string(bcx.ccx()), - adjustment); - match adjustment { - AdjustReifyFnPointer => { - // FIXME(#19925) once fn item types are - // zero-sized, we'll need to do something here - } - AdjustUnsafeFnPointer => { - // purely a type-level thing - } - AdjustDerefRef(ref adj) => { - let skip_reborrows = if adj.autoderefs == 1 && adj.autoref.is_some() { - // We are a bit paranoid about adjustments and thus might have a re- - // borrow here which merely derefs and then refs again (it might have - // a different region or mutability, but we don't care here). - match datum.ty.sty { - // Don't skip a conversion from Box to &T, etc. - ty::TyRef(..) => { - if bcx.tcx().is_overloaded_autoderef(expr.id, 0) { - // Don't skip an overloaded deref. - 0 - } else { - 1 - } - } - _ => 0 - } - } else { - 0 - }; - - if adj.autoderefs > skip_reborrows { - // Schedule cleanup. - let lval = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "auto_deref", expr.id)); - datum = unpack_datum!(bcx, deref_multiple(bcx, expr, - lval.to_expr_datum(), - adj.autoderefs - skip_reborrows)); - } - - // (You might think there is a more elegant way to do this than a - // skip_reborrows bool, but then you remember that the borrow checker exists). - if skip_reborrows == 0 && adj.autoref.is_some() { - datum = unpack_datum!(bcx, auto_ref(bcx, datum, expr)); - } - - if let Some(target) = adj.unsize { - // We do not arrange cleanup ourselves; if we already are an - // L-value, then cleanup will have already been scheduled (and - // the `datum.to_rvalue_datum` call below will emit code to zero - // the drop flag when moving out of the L-value). If we are an - // R-value, then we do not need to schedule cleanup. - let source_datum = unpack_datum!(bcx, - datum.to_rvalue_datum(bcx, "__coerce_source")); - - let target = bcx.monomorphize(&target); - - let scratch = alloc_ty(bcx, target, "__coerce_target"); - call_lifetime_start(bcx, scratch); - let target_datum = Datum::new(scratch, target, - Rvalue::new(ByRef)); - bcx = coerce_unsized(bcx, expr.span, source_datum, target_datum); - datum = Datum::new(scratch, target, - RvalueExpr(Rvalue::new(ByRef))); - } - } - } - debug!("after adjustments, datum={}", datum.to_string(bcx.ccx())); - DatumBlock::new(bcx, datum) -} - -fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - span: codemap::Span, - source: Datum<'tcx, Rvalue>, - target: Datum<'tcx, Rvalue>) - -> Block<'blk, 'tcx> { - let mut bcx = bcx; - debug!("coerce_unsized({} -> {})", - source.to_string(bcx.ccx()), - target.to_string(bcx.ccx())); - - match (&source.ty.sty, &target.ty.sty) { - (&ty::TyBox(a), &ty::TyBox(b)) | - (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }), - &ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) | - (&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }), - &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) | - (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }), - &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => { - let (inner_source, inner_target) = (a, b); - - let (base, old_info) = if !type_is_sized(bcx.tcx(), inner_source) { - // Normally, the source is a thin pointer and we are - // adding extra info to make a fat pointer. The exception - // is when we are upcasting an existing object fat pointer - // to use a different vtable. In that case, we want to - // load out the original data pointer so we can repackage - // it. - (Load(bcx, get_dataptr(bcx, source.val)), - Some(Load(bcx, get_meta(bcx, source.val)))) - } else { - let val = if source.kind.is_by_ref() { - load_ty(bcx, source.val, source.ty) - } else { - source.val - }; - (val, None) - }; - - let info = unsized_info(bcx.ccx(), inner_source, inner_target, - old_info, bcx.fcx.param_substs); - - // Compute the base pointer. This doesn't change the pointer value, - // but merely its type. - let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), inner_target).ptr_to(); - let base = PointerCast(bcx, base, ptr_ty); - - Store(bcx, base, get_dataptr(bcx, target.val)); - Store(bcx, info, get_meta(bcx, target.val)); - } - - // This can be extended to enums and tuples in the future. - // (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) | - (&ty::TyStruct(def_id_a, _), &ty::TyStruct(def_id_b, _)) => { - assert_eq!(def_id_a, def_id_b); - - // The target is already by-ref because it's to be written to. - let source = unpack_datum!(bcx, source.to_ref_datum(bcx)); - assert!(target.kind.is_by_ref()); - - let trait_substs = Substs::erased(VecPerParamSpace::new(vec![target.ty], - vec![source.ty], - Vec::new())); - let trait_ref = ty::Binder(ty::TraitRef { - def_id: langcall(bcx, Some(span), "coercion", - CoerceUnsizedTraitLangItem), - substs: bcx.tcx().mk_substs(trait_substs) - }); - - let kind = match fulfill_obligation(bcx.ccx(), span, trait_ref) { - traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => { - bcx.tcx().custom_coerce_unsized_kind(impl_def_id) - } - vtable => { - bcx.sess().span_bug(span, &format!("invalid CoerceUnsized vtable: {:?}", - vtable)); - } - }; - - let repr_source = adt::represent_type(bcx.ccx(), source.ty); - let src_fields = match &*repr_source { - &adt::Repr::Univariant(ref s, _) => &s.fields, - _ => bcx.sess().span_bug(span, - &format!("Non univariant struct? (repr_source: {:?})", - repr_source)), - }; - let repr_target = adt::represent_type(bcx.ccx(), target.ty); - let target_fields = match &*repr_target { - &adt::Repr::Univariant(ref s, _) => &s.fields, - _ => bcx.sess().span_bug(span, - &format!("Non univariant struct? (repr_target: {:?})", - repr_target)), - }; - - let coerce_index = match kind { - CustomCoerceUnsized::Struct(i) => i - }; - assert!(coerce_index < src_fields.len() && src_fields.len() == target_fields.len()); - - let source_val = adt::MaybeSizedValue::sized(source.val); - let target_val = adt::MaybeSizedValue::sized(target.val); - - let iter = src_fields.iter().zip(target_fields).enumerate(); - for (i, (src_ty, target_ty)) in iter { - let ll_source = adt::trans_field_ptr(bcx, &repr_source, source_val, Disr(0), i); - let ll_target = adt::trans_field_ptr(bcx, &repr_target, target_val, Disr(0), i); - - // If this is the field we need to coerce, recurse on it. - if i == coerce_index { - coerce_unsized(bcx, span, - Datum::new(ll_source, src_ty, - Rvalue::new(ByRef)), - Datum::new(ll_target, target_ty, - Rvalue::new(ByRef))); - } else { - // Otherwise, simply copy the data from the source. - assert!(src_ty.is_phantom_data() || src_ty == target_ty); - memcpy_ty(bcx, ll_target, ll_source, src_ty); - } - } - } - _ => bcx.sess().bug(&format!("coerce_unsized: invalid coercion {:?} -> {:?}", - source.ty, - target.ty)) - } - bcx -} - -/// Translates an expression in "lvalue" mode -- meaning that it returns a reference to the memory -/// that the expr represents. -/// -/// If this expression is an rvalue, this implies introducing a temporary. In other words, -/// something like `x().f` is translated into roughly the equivalent of -/// -/// { tmp = x(); tmp.f } -pub fn trans_to_lvalue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - name: &str) - -> DatumBlock<'blk, 'tcx, Lvalue> { - let mut bcx = bcx; - let datum = unpack_datum!(bcx, trans(bcx, expr)); - return datum.to_lvalue_datum(bcx, name, expr.id); -} - -/// A version of `trans` that ignores adjustments. You almost certainly do not want to call this -/// directly. -fn trans_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let mut bcx = bcx; - - debug!("trans_unadjusted(expr={:?})", expr); - let _indenter = indenter(); - - debuginfo::set_source_location(bcx.fcx, expr.id, expr.span); - - return match expr_kind(bcx.tcx(), expr) { - ExprKind::Lvalue | ExprKind::RvalueDatum => { - let datum = unpack_datum!(bcx, { - trans_datum_unadjusted(bcx, expr) - }); - - DatumBlock {bcx: bcx, datum: datum} - } - - ExprKind::RvalueStmt => { - bcx = trans_rvalue_stmt_unadjusted(bcx, expr); - nil(bcx, expr_ty(bcx, expr)) - } - - ExprKind::RvalueDps => { - let ty = expr_ty(bcx, expr); - if type_is_zero_size(bcx.ccx(), ty) { - bcx = trans_rvalue_dps_unadjusted(bcx, expr, Ignore); - nil(bcx, ty) - } else { - let scratch = rvalue_scratch_datum(bcx, ty, ""); - bcx = trans_rvalue_dps_unadjusted( - bcx, expr, SaveIn(scratch.val)); - - // Note: this is not obviously a good idea. It causes - // immediate values to be loaded immediately after a - // return from a call or other similar expression, - // which in turn leads to alloca's having shorter - // lifetimes and hence larger stack frames. However, - // in turn it can lead to more register pressure. - // Still, in practice it seems to increase - // performance, since we have fewer problems with - // morestack churn. - let scratch = unpack_datum!( - bcx, scratch.to_appropriate_datum(bcx)); - - DatumBlock::new(bcx, scratch.to_expr_datum()) - } - } - }; - - fn nil<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>) - -> DatumBlock<'blk, 'tcx, Expr> { - let llval = C_undef(type_of::type_of(bcx.ccx(), ty)); - let datum = immediate_rvalue(llval, ty); - DatumBlock::new(bcx, datum.to_expr_datum()) - } -} - -fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let mut bcx = bcx; - let fcx = bcx.fcx; - let _icx = push_ctxt("trans_datum_unadjusted"); - - match expr.node { - hir::ExprType(ref e, _) => { - trans(bcx, &**e) - } - hir::ExprPath(..) => { - trans_def(bcx, expr, bcx.def(expr.id)) - } - hir::ExprField(ref base, name) => { - trans_rec_field(bcx, &**base, name.node) - } - hir::ExprTupField(ref base, idx) => { - trans_rec_tup_field(bcx, &**base, idx.node) - } - hir::ExprIndex(ref base, ref idx) => { - trans_index(bcx, expr, &**base, &**idx, MethodCall::expr(expr.id)) - } - hir::ExprBox(ref contents) => { - // Special case for `Box` - let box_ty = expr_ty(bcx, expr); - let contents_ty = expr_ty(bcx, &**contents); - match box_ty.sty { - ty::TyBox(..) => { - trans_uniq_expr(bcx, expr, box_ty, &**contents, contents_ty) - } - _ => bcx.sess().span_bug(expr.span, - "expected unique box") - } - - } - hir::ExprLit(ref lit) => trans_immediate_lit(bcx, expr, &**lit), - hir::ExprBinary(op, ref lhs, ref rhs) => { - trans_binary(bcx, expr, op, &**lhs, &**rhs) - } - hir::ExprUnary(op, ref x) => { - trans_unary(bcx, expr, op, &**x) - } - hir::ExprAddrOf(_, ref x) => { - match x.node { - hir::ExprRepeat(..) | hir::ExprVec(..) => { - // Special case for slices. - let cleanup_debug_loc = - debuginfo::get_cleanup_debug_loc_for_ast_node(bcx.ccx(), - x.id, - x.span, - false); - fcx.push_ast_cleanup_scope(cleanup_debug_loc); - let datum = unpack_datum!( - bcx, tvec::trans_slice_vec(bcx, expr, &**x)); - bcx = fcx.pop_and_trans_ast_cleanup_scope(bcx, x.id); - DatumBlock::new(bcx, datum) - } - _ => { - trans_addr_of(bcx, expr, &**x) - } - } - } - hir::ExprCast(ref val, _) => { - // Datum output mode means this is a scalar cast: - trans_imm_cast(bcx, &**val, expr.id) - } - _ => { - bcx.tcx().sess.span_bug( - expr.span, - &format!("trans_rvalue_datum_unadjusted reached \ - fall-through case: {:?}", - expr.node)); - } - } -} - -fn trans_field<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - base: &hir::Expr, - get_idx: F) - -> DatumBlock<'blk, 'tcx, Expr> where - F: FnOnce(&'blk ty::ctxt<'tcx>, &VariantInfo<'tcx>) -> usize, -{ - let mut bcx = bcx; - let _icx = push_ctxt("trans_rec_field"); - - let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, base, "field")); - let bare_ty = base_datum.ty; - let repr = adt::represent_type(bcx.ccx(), bare_ty); - let vinfo = VariantInfo::from_ty(bcx.tcx(), bare_ty, None); - - let ix = get_idx(bcx.tcx(), &vinfo); - let d = base_datum.get_element( - bcx, - vinfo.fields[ix].1, - |srcval| { - adt::trans_field_ptr(bcx, &*repr, srcval, vinfo.discr, ix) - }); - - if type_is_sized(bcx.tcx(), d.ty) { - DatumBlock { datum: d.to_expr_datum(), bcx: bcx } - } else { - let scratch = rvalue_scratch_datum(bcx, d.ty, ""); - Store(bcx, d.val, get_dataptr(bcx, scratch.val)); - let info = Load(bcx, get_meta(bcx, base_datum.val)); - Store(bcx, info, get_meta(bcx, scratch.val)); - - // Always generate an lvalue datum, because this pointer doesn't own - // the data and cleanup is scheduled elsewhere. - DatumBlock::new(bcx, Datum::new(scratch.val, scratch.ty, LvalueExpr(d.kind))) - } -} - -/// Translates `base.field`. -fn trans_rec_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - base: &hir::Expr, - field: ast::Name) - -> DatumBlock<'blk, 'tcx, Expr> { - trans_field(bcx, base, |_, vinfo| vinfo.field_index(field)) -} - -/// Translates `base.`. -fn trans_rec_tup_field<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - base: &hir::Expr, - idx: usize) - -> DatumBlock<'blk, 'tcx, Expr> { - trans_field(bcx, base, |_, _| idx) -} - -fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - index_expr: &hir::Expr, - base: &hir::Expr, - idx: &hir::Expr, - method_call: MethodCall) - -> DatumBlock<'blk, 'tcx, Expr> { - //! Translates `base[idx]`. - - let _icx = push_ctxt("trans_index"); - let ccx = bcx.ccx(); - let mut bcx = bcx; - - let index_expr_debug_loc = index_expr.debug_loc(); - - // Check for overloaded index. - let method_ty = ccx.tcx() - .tables - .borrow() - .method_map - .get(&method_call) - .map(|method| method.ty); - let elt_datum = match method_ty { - Some(method_ty) => { - let method_ty = monomorphize_type(bcx, method_ty); - - let base_datum = unpack_datum!(bcx, trans(bcx, base)); - - // Translate index expression. - let ix_datum = unpack_datum!(bcx, trans(bcx, idx)); - - let ref_ty = // invoked methods have LB regions instantiated: - bcx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap(); - let elt_ty = match ref_ty.builtin_deref(true, ty::NoPreference) { - None => { - bcx.tcx().sess.span_bug(index_expr.span, - "index method didn't return a \ - dereferenceable type?!") - } - Some(elt_tm) => elt_tm.ty, - }; - - // Overloaded. Evaluate `trans_overloaded_op`, which will - // invoke the user's index() method, which basically yields - // a `&T` pointer. We can then proceed down the normal - // path (below) to dereference that `&T`. - let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_index_elt"); - unpack_result!(bcx, - trans_overloaded_op(bcx, - index_expr, - method_call, - base_datum, - Some((ix_datum, idx.id)), - Some(SaveIn(scratch.val)), - false)); - let datum = scratch.to_expr_datum(); - let lval = Lvalue::new("expr::trans_index overload"); - if type_is_sized(bcx.tcx(), elt_ty) { - Datum::new(datum.to_llscalarish(bcx), elt_ty, LvalueExpr(lval)) - } else { - Datum::new(datum.val, elt_ty, LvalueExpr(lval)) - } - } - None => { - let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, - base, - "index")); - - // Translate index expression and cast to a suitable LLVM integer. - // Rust is less strict than LLVM in this regard. - let ix_datum = unpack_datum!(bcx, trans(bcx, idx)); - let ix_val = ix_datum.to_llscalarish(bcx); - let ix_size = machine::llbitsize_of_real(bcx.ccx(), - val_ty(ix_val)); - let int_size = machine::llbitsize_of_real(bcx.ccx(), - ccx.int_type()); - let ix_val = { - if ix_size < int_size { - if expr_ty(bcx, idx).is_signed() { - SExt(bcx, ix_val, ccx.int_type()) - } else { ZExt(bcx, ix_val, ccx.int_type()) } - } else if ix_size > int_size { - Trunc(bcx, ix_val, ccx.int_type()) - } else { - ix_val - } - }; - - let unit_ty = base_datum.ty.sequence_element_type(bcx.tcx()); - - let (base, len) = base_datum.get_vec_base_and_len(bcx); - - debug!("trans_index: base {}", bcx.val_to_string(base)); - debug!("trans_index: len {}", bcx.val_to_string(len)); - - let bounds_check = ICmp(bcx, - llvm::IntUGE, - ix_val, - len, - index_expr_debug_loc); - let expect = ccx.get_intrinsic(&("llvm.expect.i1")); - let expected = Call(bcx, - expect, - &[bounds_check, C_bool(ccx, false)], - None, - index_expr_debug_loc); - bcx = with_cond(bcx, expected, |bcx| { - controlflow::trans_fail_bounds_check(bcx, - expr_info(index_expr), - ix_val, - len) - }); - let elt = InBoundsGEP(bcx, base, &[ix_val]); - let elt = PointerCast(bcx, elt, type_of::type_of(ccx, unit_ty).ptr_to()); - let lval = Lvalue::new("expr::trans_index fallback"); - Datum::new(elt, unit_ty, LvalueExpr(lval)) - } - }; - - DatumBlock::new(bcx, elt_datum) -} - -fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - ref_expr: &hir::Expr, - def: def::Def) - -> DatumBlock<'blk, 'tcx, Expr> { - //! Translates a reference to a path. - - let _icx = push_ctxt("trans_def_lvalue"); - match def { - def::DefFn(..) | def::DefMethod(..) | - def::DefStruct(_) | def::DefVariant(..) => { - let datum = trans_def_fn_unadjusted(bcx.ccx(), ref_expr, def, - bcx.fcx.param_substs); - DatumBlock::new(bcx, datum.to_expr_datum()) - } - def::DefStatic(did, _) => { - let const_ty = expr_ty(bcx, ref_expr); - let val = get_static_val(bcx.ccx(), did, const_ty); - let lval = Lvalue::new("expr::trans_def"); - DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr(lval))) - } - def::DefConst(_) => { - bcx.sess().span_bug(ref_expr.span, - "constant expression should not reach expr::trans_def") - } - _ => { - DatumBlock::new(bcx, trans_local_var(bcx, def).to_expr_datum()) - } - } -} - -fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr) - -> Block<'blk, 'tcx> { - let mut bcx = bcx; - let _icx = push_ctxt("trans_rvalue_stmt"); - - if bcx.unreachable.get() { - return bcx; - } - - debuginfo::set_source_location(bcx.fcx, expr.id, expr.span); - - match expr.node { - hir::ExprBreak(label_opt) => { - controlflow::trans_break(bcx, expr, label_opt.map(|l| l.node.name)) - } - hir::ExprType(ref e, _) => { - trans_into(bcx, &**e, Ignore) - } - hir::ExprAgain(label_opt) => { - controlflow::trans_cont(bcx, expr, label_opt.map(|l| l.node.name)) - } - hir::ExprRet(ref ex) => { - // Check to see if the return expression itself is reachable. - // This can occur when the inner expression contains a return - let reachable = if let Some(ref cfg) = bcx.fcx.cfg { - cfg.node_is_reachable(expr.id) - } else { - true - }; - - if reachable { - controlflow::trans_ret(bcx, expr, ex.as_ref().map(|e| &**e)) - } else { - // If it's not reachable, just translate the inner expression - // directly. This avoids having to manage a return slot when - // it won't actually be used anyway. - if let &Some(ref x) = ex { - bcx = trans_into(bcx, &**x, Ignore); - } - // Mark the end of the block as unreachable. Once we get to - // a return expression, there's no more we should be doing - // after this. - Unreachable(bcx); - bcx - } - } - hir::ExprWhile(ref cond, ref body, _) => { - controlflow::trans_while(bcx, expr, &**cond, &**body) - } - hir::ExprLoop(ref body, _) => { - controlflow::trans_loop(bcx, expr, &**body) - } - hir::ExprAssign(ref dst, ref src) => { - let src_datum = unpack_datum!(bcx, trans(bcx, &**src)); - let dst_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &**dst, "assign")); - - if bcx.fcx.type_needs_drop(dst_datum.ty) { - // If there are destructors involved, make sure we - // are copying from an rvalue, since that cannot possible - // alias an lvalue. We are concerned about code like: - // - // a = a - // - // but also - // - // a = a.b - // - // where e.g. a : Option and a.b : - // Option. In that case, freeing `a` before the - // assignment may also free `a.b`! - // - // We could avoid this intermediary with some analysis - // to determine whether `dst` may possibly own `src`. - debuginfo::set_source_location(bcx.fcx, expr.id, expr.span); - let src_datum = unpack_datum!( - bcx, src_datum.to_rvalue_datum(bcx, "ExprAssign")); - let opt_hint_datum = dst_datum.kind.drop_flag_info.hint_datum(bcx); - let opt_hint_val = opt_hint_datum.map(|d|d.to_value()); - - // 1. Drop the data at the destination, passing the - // drop-hint in case the lvalue has already been - // dropped or moved. - bcx = glue::drop_ty_core(bcx, - dst_datum.val, - dst_datum.ty, - expr.debug_loc(), - false, - opt_hint_val); - - // 2. We are overwriting the destination; ensure that - // its drop-hint (if any) says "initialized." - if let Some(hint_val) = opt_hint_val { - let hint_llval = hint_val.value(); - let drop_needed = C_u8(bcx.fcx.ccx, adt::DTOR_NEEDED_HINT); - Store(bcx, drop_needed, hint_llval); - } - src_datum.store_to(bcx, dst_datum.val) - } else { - src_datum.store_to(bcx, dst_datum.val) - } - } - hir::ExprAssignOp(op, ref dst, ref src) => { - let has_method_map = bcx.tcx() - .tables - .borrow() - .method_map - .contains_key(&MethodCall::expr(expr.id)); - - if has_method_map { - let dst = unpack_datum!(bcx, trans(bcx, &**dst)); - let src_datum = unpack_datum!(bcx, trans(bcx, &**src)); - trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), dst, - Some((src_datum, src.id)), None, false).bcx - } else { - trans_assign_op(bcx, expr, op, &**dst, &**src) - } - } - hir::ExprInlineAsm(ref a) => { - asm::trans_inline_asm(bcx, a) - } - _ => { - bcx.tcx().sess.span_bug( - expr.span, - &format!("trans_rvalue_stmt_unadjusted reached \ - fall-through case: {:?}", - expr.node)); - } - } -} - -fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - dest: Dest) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_rvalue_dps_unadjusted"); - let mut bcx = bcx; - let tcx = bcx.tcx(); - - debuginfo::set_source_location(bcx.fcx, expr.id, expr.span); - - match expr.node { - hir::ExprType(ref e, _) => { - trans_into(bcx, &**e, dest) - } - hir::ExprPath(..) => { - trans_def_dps_unadjusted(bcx, expr, bcx.def(expr.id), dest) - } - hir::ExprIf(ref cond, ref thn, ref els) => { - controlflow::trans_if(bcx, expr.id, &**cond, &**thn, els.as_ref().map(|e| &**e), dest) - } - hir::ExprMatch(ref discr, ref arms, _) => { - _match::trans_match(bcx, expr, &**discr, &arms[..], dest) - } - hir::ExprBlock(ref blk) => { - controlflow::trans_block(bcx, &**blk, dest) - } - hir::ExprStruct(_, ref fields, ref base) => { - trans_struct(bcx, - &fields[..], - base.as_ref().map(|e| &**e), - expr.span, - expr.id, - node_id_type(bcx, expr.id), - dest) - } - hir::ExprRange(ref start, ref end) => { - // FIXME it is just not right that we are synthesising ast nodes in - // trans. Shudder. - fn make_field(field_name: &str, expr: P) -> hir::Field { - hir::Field { - name: codemap::dummy_spanned(token::intern(field_name)), - expr: expr, - span: codemap::DUMMY_SP, - } - } - - // A range just desugars into a struct. - // Note that the type of the start and end may not be the same, but - // they should only differ in their lifetime, which should not matter - // in trans. - let (did, fields, ty_params) = match (start, end) { - (&Some(ref start), &Some(ref end)) => { - // Desugar to Range - let fields = vec![make_field("start", start.clone()), - make_field("end", end.clone())]; - (tcx.lang_items.range_struct(), fields, vec![node_id_type(bcx, start.id)]) - } - (&Some(ref start), &None) => { - // Desugar to RangeFrom - let fields = vec![make_field("start", start.clone())]; - (tcx.lang_items.range_from_struct(), fields, vec![node_id_type(bcx, start.id)]) - } - (&None, &Some(ref end)) => { - // Desugar to RangeTo - let fields = vec![make_field("end", end.clone())]; - (tcx.lang_items.range_to_struct(), fields, vec![node_id_type(bcx, end.id)]) - } - _ => { - // Desugar to RangeFull - (tcx.lang_items.range_full_struct(), vec![], vec![]) - } - }; - - if let Some(did) = did { - let substs = Substs::new_type(ty_params, vec![]); - trans_struct(bcx, - &fields, - None, - expr.span, - expr.id, - tcx.mk_struct(tcx.lookup_adt_def(did), - tcx.mk_substs(substs)), - dest) - } else { - tcx.sess.span_bug(expr.span, - "No lang item for ranges (how did we get this far?)") - } - } - hir::ExprTup(ref args) => { - let numbered_fields: Vec<(usize, &hir::Expr)> = - args.iter().enumerate().map(|(i, arg)| (i, &**arg)).collect(); - trans_adt(bcx, - expr_ty(bcx, expr), - Disr(0), - &numbered_fields[..], - None, - dest, - expr.debug_loc()) - } - hir::ExprLit(ref lit) => { - match lit.node { - ast::LitStr(ref s, _) => { - tvec::trans_lit_str(bcx, expr, (*s).clone(), dest) - } - _ => { - bcx.tcx() - .sess - .span_bug(expr.span, - "trans_rvalue_dps_unadjusted shouldn't be \ - translating this type of literal") - } - } - } - hir::ExprVec(..) | hir::ExprRepeat(..) => { - tvec::trans_fixed_vstore(bcx, expr, dest) - } - hir::ExprClosure(_, ref decl, ref body) => { - let dest = match dest { - SaveIn(lldest) => closure::Dest::SaveIn(bcx, lldest), - Ignore => closure::Dest::Ignore(bcx.ccx()) - }; - - // NB. To get the id of the closure, we don't use - // `local_def_id(id)`, but rather we extract the closure - // def-id from the expr's type. This is because this may - // be an inlined expression from another crate, and we - // want to get the ORIGINAL closure def-id, since that is - // the key we need to find the closure-kind and - // closure-type etc. - let (def_id, substs) = match expr_ty(bcx, expr).sty { - ty::TyClosure(def_id, ref substs) => (def_id, substs), - ref t => - bcx.tcx().sess.span_bug( - expr.span, - &format!("closure expr without closure type: {:?}", t)), - }; - - closure::trans_closure_expr(dest, - decl, - body, - expr.id, - def_id, - substs, - &expr.attrs).unwrap_or(bcx) - } - hir::ExprCall(ref f, ref args) => { - if bcx.tcx().is_method_call(expr.id) { - trans_overloaded_call(bcx, - expr, - &**f, - &args[..], - Some(dest)) - } else { - callee::trans_call(bcx, - expr, - &**f, - callee::ArgExprs(&args[..]), - dest) - } - } - hir::ExprMethodCall(_, _, ref args) => { - callee::trans_method_call(bcx, - expr, - &*args[0], - callee::ArgExprs(&args[..]), - dest) - } - hir::ExprBinary(op, ref lhs, ref rhs) => { - // if not overloaded, would be RvalueDatumExpr - let lhs = unpack_datum!(bcx, trans(bcx, &**lhs)); - let rhs_datum = unpack_datum!(bcx, trans(bcx, &**rhs)); - trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), lhs, - Some((rhs_datum, rhs.id)), Some(dest), - !rustc_front::util::is_by_value_binop(op.node)).bcx - } - hir::ExprUnary(op, ref subexpr) => { - // if not overloaded, would be RvalueDatumExpr - let arg = unpack_datum!(bcx, trans(bcx, &**subexpr)); - trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), - arg, None, Some(dest), !rustc_front::util::is_by_value_unop(op)).bcx - } - hir::ExprIndex(ref base, ref idx) => { - // if not overloaded, would be RvalueDatumExpr - let base = unpack_datum!(bcx, trans(bcx, &**base)); - let idx_datum = unpack_datum!(bcx, trans(bcx, &**idx)); - trans_overloaded_op(bcx, expr, MethodCall::expr(expr.id), base, - Some((idx_datum, idx.id)), Some(dest), true).bcx - } - hir::ExprCast(..) => { - // Trait casts used to come this way, now they should be coercions. - bcx.tcx().sess.span_bug(expr.span, "DPS expr_cast (residual trait cast?)") - } - hir::ExprAssignOp(op, _, _) => { - bcx.tcx().sess.span_bug( - expr.span, - &format!("augmented assignment `{}=` should always be a rvalue_stmt", - rustc_front::util::binop_to_string(op.node))) - } - _ => { - bcx.tcx().sess.span_bug( - expr.span, - &format!("trans_rvalue_dps_unadjusted reached fall-through \ - case: {:?}", - expr.node)); - } - } -} - -fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - ref_expr: &hir::Expr, - def: def::Def, - dest: Dest) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_def_dps_unadjusted"); - - let lldest = match dest { - SaveIn(lldest) => lldest, - Ignore => { return bcx; } - }; - - match def { - def::DefVariant(tid, vid, _) => { - let variant = bcx.tcx().lookup_adt_def(tid).variant_with_id(vid); - if let ty::VariantKind::Tuple = variant.kind() { - // N-ary variant. - let llfn = callee::trans_fn_ref(bcx.ccx(), vid, - ExprId(ref_expr.id), - bcx.fcx.param_substs).val; - Store(bcx, llfn, lldest); - return bcx; - } else { - // Nullary variant. - let ty = expr_ty(bcx, ref_expr); - let repr = adt::represent_type(bcx.ccx(), ty); - adt::trans_set_discr(bcx, &*repr, lldest, Disr::from(variant.disr_val)); - return bcx; - } - } - def::DefStruct(_) => { - let ty = expr_ty(bcx, ref_expr); - match ty.sty { - ty::TyStruct(def, _) if def.has_dtor() => { - let repr = adt::represent_type(bcx.ccx(), ty); - adt::trans_set_discr(bcx, &*repr, lldest, Disr(0)); - } - _ => {} - } - bcx - } - _ => { - bcx.tcx().sess.span_bug(ref_expr.span, &format!( - "Non-DPS def {:?} referened by {}", - def, bcx.node_id_to_string(ref_expr.id))); - } - } -} - -pub fn trans_def_fn_unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ref_expr: &hir::Expr, - def: def::Def, - param_substs: &'tcx Substs<'tcx>) - -> Datum<'tcx, Rvalue> { - let _icx = push_ctxt("trans_def_datum_unadjusted"); - - match def { - def::DefFn(did, _) | - def::DefStruct(did) | def::DefVariant(_, did, _) => { - callee::trans_fn_ref(ccx, did, ExprId(ref_expr.id), param_substs) - } - def::DefMethod(method_did) => { - match ccx.tcx().impl_or_trait_item(method_did).container() { - ty::ImplContainer(_) => { - callee::trans_fn_ref(ccx, method_did, - ExprId(ref_expr.id), - param_substs) - } - ty::TraitContainer(trait_did) => { - meth::trans_static_method_callee(ccx, method_did, - trait_did, ref_expr.id, - param_substs) - } - } - } - _ => { - ccx.tcx().sess.span_bug(ref_expr.span, &format!( - "trans_def_fn_unadjusted invoked on: {:?} for {:?}", - def, - ref_expr)); - } - } -} - -/// Translates a reference to a local variable or argument. This always results in an lvalue datum. -pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - def: def::Def) - -> Datum<'tcx, Lvalue> { - let _icx = push_ctxt("trans_local_var"); - - match def { - def::DefUpvar(_, nid, _, _) => { - // Can't move upvars, so this is never a ZeroMemLastUse. - let local_ty = node_id_type(bcx, nid); - let lval = Lvalue::new_with_hint("expr::trans_local_var (upvar)", - bcx, nid, HintKind::ZeroAndMaintain); - match bcx.fcx.llupvars.borrow().get(&nid) { - Some(&val) => Datum::new(val, local_ty, lval), - None => { - bcx.sess().bug(&format!( - "trans_local_var: no llval for upvar {} found", - nid)); - } - } - } - def::DefLocal(_, nid) => { - let datum = match bcx.fcx.lllocals.borrow().get(&nid) { - Some(&v) => v, - None => { - bcx.sess().bug(&format!( - "trans_local_var: no datum for local/arg {} found", - nid)); - } - }; - debug!("take_local(nid={}, v={}, ty={})", - nid, bcx.val_to_string(datum.val), datum.ty); - datum - } - _ => { - bcx.sess().unimpl(&format!( - "unsupported def type in trans_local_var: {:?}", - def)); - } - } -} - -fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - fields: &[hir::Field], - base: Option<&hir::Expr>, - expr_span: codemap::Span, - expr_id: ast::NodeId, - ty: Ty<'tcx>, - dest: Dest) -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_rec"); - - let tcx = bcx.tcx(); - let vinfo = VariantInfo::of_node(tcx, ty, expr_id); - - let mut need_base = vec![true; vinfo.fields.len()]; - - let numbered_fields = fields.iter().map(|field| { - let pos = vinfo.field_index(field.name.node); - need_base[pos] = false; - (pos, &*field.expr) - }).collect::>(); - - let optbase = match base { - Some(base_expr) => { - let mut leftovers = Vec::new(); - for (i, b) in need_base.iter().enumerate() { - if *b { - leftovers.push((i, vinfo.fields[i].1)); - } - } - Some(StructBaseInfo {expr: base_expr, - fields: leftovers }) - } - None => { - if need_base.iter().any(|b| *b) { - tcx.sess.span_bug(expr_span, "missing fields and no base expr") - } - None - } - }; - - trans_adt(bcx, - ty, - vinfo.discr, - &numbered_fields, - optbase, - dest, - DebugLoc::At(expr_id, expr_span)) -} - -/// Information that `trans_adt` needs in order to fill in the fields -/// of a struct copied from a base struct (e.g., from an expression -/// like `Foo { a: b, ..base }`. -/// -/// Note that `fields` may be empty; the base expression must always be -/// evaluated for side-effects. -pub struct StructBaseInfo<'a, 'tcx> { - /// The base expression; will be evaluated after all explicit fields. - expr: &'a hir::Expr, - /// The indices of fields to copy paired with their types. - fields: Vec<(usize, Ty<'tcx>)> -} - -/// Constructs an ADT instance: -/// -/// - `fields` should be a list of field indices paired with the -/// expression to store into that field. The initializers will be -/// evaluated in the order specified by `fields`. -/// -/// - `optbase` contains information on the base struct (if any) from -/// which remaining fields are copied; see comments on `StructBaseInfo`. -pub fn trans_adt<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - ty: Ty<'tcx>, - discr: Disr, - fields: &[(usize, &hir::Expr)], - optbase: Option>, - dest: Dest, - debug_location: DebugLoc) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_adt"); - let fcx = bcx.fcx; - let repr = adt::represent_type(bcx.ccx(), ty); - - debug_location.apply(bcx.fcx); - - // If we don't care about the result, just make a - // temporary stack slot - let addr = match dest { - SaveIn(pos) => pos, - Ignore => { - let llresult = alloc_ty(bcx, ty, "temp"); - call_lifetime_start(bcx, llresult); - llresult - } - }; - - debug!("trans_adt"); - - // This scope holds intermediates that must be cleaned should - // panic occur before the ADT as a whole is ready. - let custom_cleanup_scope = fcx.push_custom_cleanup_scope(); - - if ty.is_simd() { - // Issue 23112: The original logic appeared vulnerable to same - // order-of-eval bug. But, SIMD values are tuple-structs; - // i.e. functional record update (FRU) syntax is unavailable. - // - // To be safe, double-check that we did not get here via FRU. - assert!(optbase.is_none()); - - // This is the constructor of a SIMD type, such types are - // always primitive machine types and so do not have a - // destructor or require any clean-up. - let llty = type_of::type_of(bcx.ccx(), ty); - - // keep a vector as a register, and running through the field - // `insertelement`ing them directly into that register - // (i.e. avoid GEPi and `store`s to an alloca) . - let mut vec_val = C_undef(llty); - - for &(i, ref e) in fields { - let block_datum = trans(bcx, &**e); - bcx = block_datum.bcx; - let position = C_uint(bcx.ccx(), i); - let value = block_datum.datum.to_llscalarish(bcx); - vec_val = InsertElement(bcx, vec_val, value, position); - } - Store(bcx, vec_val, addr); - } else if let Some(base) = optbase { - // Issue 23112: If there is a base, then order-of-eval - // requires field expressions eval'ed before base expression. - - // First, trans field expressions to temporary scratch values. - let scratch_vals: Vec<_> = fields.iter().map(|&(i, ref e)| { - let datum = unpack_datum!(bcx, trans(bcx, &**e)); - (i, datum) - }).collect(); - - debug_location.apply(bcx.fcx); - - // Second, trans the base to the dest. - assert_eq!(discr, Disr(0)); - - let addr = adt::MaybeSizedValue::sized(addr); - match expr_kind(bcx.tcx(), &*base.expr) { - ExprKind::RvalueDps | ExprKind::RvalueDatum if !bcx.fcx.type_needs_drop(ty) => { - bcx = trans_into(bcx, &*base.expr, SaveIn(addr.value)); - }, - ExprKind::RvalueStmt => { - bcx.tcx().sess.bug("unexpected expr kind for struct base expr") - } - _ => { - let base_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, &*base.expr, "base")); - for &(i, t) in &base.fields { - let datum = base_datum.get_element( - bcx, t, |srcval| adt::trans_field_ptr(bcx, &*repr, srcval, discr, i)); - assert!(type_is_sized(bcx.tcx(), datum.ty)); - let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i); - bcx = datum.store_to(bcx, dest); - } - } - } - - // Finally, move scratch field values into actual field locations - for (i, datum) in scratch_vals { - let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i); - bcx = datum.store_to(bcx, dest); - } - } else { - // No base means we can write all fields directly in place. - let addr = adt::MaybeSizedValue::sized(addr); - for &(i, ref e) in fields { - let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i); - let e_ty = expr_ty_adjusted(bcx, &**e); - bcx = trans_into(bcx, &**e, SaveIn(dest)); - let scope = cleanup::CustomScope(custom_cleanup_scope); - fcx.schedule_lifetime_end(scope, dest); - // FIXME: nonzeroing move should generalize to fields - fcx.schedule_drop_mem(scope, dest, e_ty, None); - } - } - - adt::trans_set_discr(bcx, &*repr, addr, discr); - - fcx.pop_custom_cleanup_scope(custom_cleanup_scope); - - // If we don't care about the result drop the temporary we made - match dest { - SaveIn(_) => bcx, - Ignore => { - bcx = glue::drop_ty(bcx, addr, ty, debug_location); - base::call_lifetime_end(bcx, addr); - bcx - } - } -} - - -fn trans_immediate_lit<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - lit: &ast::Lit) - -> DatumBlock<'blk, 'tcx, Expr> { - // must not be a string constant, that is a RvalueDpsExpr - let _icx = push_ctxt("trans_immediate_lit"); - let ty = expr_ty(bcx, expr); - let v = consts::const_lit(bcx.ccx(), expr, lit); - immediate_rvalue_bcx(bcx, v, ty).to_expr_datumblock() -} - -fn trans_unary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - op: hir::UnOp, - sub_expr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let ccx = bcx.ccx(); - let mut bcx = bcx; - let _icx = push_ctxt("trans_unary_datum"); - - let method_call = MethodCall::expr(expr.id); - - // The only overloaded operator that is translated to a datum - // is an overloaded deref, since it is always yields a `&T`. - // Otherwise, we should be in the RvalueDpsExpr path. - assert!(op == hir::UnDeref || !ccx.tcx().is_method_call(expr.id)); - - let un_ty = expr_ty(bcx, expr); - - let debug_loc = expr.debug_loc(); - - match op { - hir::UnNot => { - let datum = unpack_datum!(bcx, trans(bcx, sub_expr)); - let llresult = Not(bcx, datum.to_llscalarish(bcx), debug_loc); - immediate_rvalue_bcx(bcx, llresult, un_ty).to_expr_datumblock() - } - hir::UnNeg => { - let datum = unpack_datum!(bcx, trans(bcx, sub_expr)); - let val = datum.to_llscalarish(bcx); - let (bcx, llneg) = { - if un_ty.is_fp() { - let result = FNeg(bcx, val, debug_loc); - (bcx, result) - } else { - let is_signed = un_ty.is_signed(); - let result = Neg(bcx, val, debug_loc); - let bcx = if bcx.ccx().check_overflow() && is_signed { - let (llty, min) = base::llty_and_min_for_signed_ty(bcx, un_ty); - let is_min = ICmp(bcx, llvm::IntEQ, val, - C_integral(llty, min, true), debug_loc); - with_cond(bcx, is_min, |bcx| { - let msg = InternedString::new( - "attempted to negate with overflow"); - controlflow::trans_fail(bcx, expr_info(expr), msg) - }) - } else { - bcx - }; - (bcx, result) - } - }; - immediate_rvalue_bcx(bcx, llneg, un_ty).to_expr_datumblock() - } - hir::UnDeref => { - let datum = unpack_datum!(bcx, trans(bcx, sub_expr)); - deref_once(bcx, expr, datum, method_call) - } - } -} - -fn trans_uniq_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - box_expr: &hir::Expr, - box_ty: Ty<'tcx>, - contents: &hir::Expr, - contents_ty: Ty<'tcx>) - -> DatumBlock<'blk, 'tcx, Expr> { - let _icx = push_ctxt("trans_uniq_expr"); - let fcx = bcx.fcx; - assert!(type_is_sized(bcx.tcx(), contents_ty)); - let llty = type_of::type_of(bcx.ccx(), contents_ty); - let size = llsize_of(bcx.ccx(), llty); - let align = C_uint(bcx.ccx(), type_of::align_of(bcx.ccx(), contents_ty)); - let llty_ptr = llty.ptr_to(); - let Result { bcx, val } = malloc_raw_dyn(bcx, - llty_ptr, - box_ty, - size, - align, - box_expr.debug_loc()); - // Unique boxes do not allocate for zero-size types. The standard library - // may assume that `free` is never called on the pointer returned for - // `Box`. - let bcx = if llsize_of_alloc(bcx.ccx(), llty) == 0 { - trans_into(bcx, contents, SaveIn(val)) - } else { - let custom_cleanup_scope = fcx.push_custom_cleanup_scope(); - fcx.schedule_free_value(cleanup::CustomScope(custom_cleanup_scope), - val, cleanup::HeapExchange, contents_ty); - let bcx = trans_into(bcx, contents, SaveIn(val)); - fcx.pop_custom_cleanup_scope(custom_cleanup_scope); - bcx - }; - immediate_rvalue_bcx(bcx, val, box_ty).to_expr_datumblock() -} - -fn trans_addr_of<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - subexpr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let _icx = push_ctxt("trans_addr_of"); - let mut bcx = bcx; - let sub_datum = unpack_datum!(bcx, trans_to_lvalue(bcx, subexpr, "addr_of")); - let ty = expr_ty(bcx, expr); - if !type_is_sized(bcx.tcx(), sub_datum.ty) { - // Always generate an lvalue datum, because this pointer doesn't own - // the data and cleanup is scheduled elsewhere. - DatumBlock::new(bcx, Datum::new(sub_datum.val, ty, LvalueExpr(sub_datum.kind))) - } else { - // Sized value, ref to a thin pointer - immediate_rvalue_bcx(bcx, sub_datum.val, ty).to_expr_datumblock() - } -} - -fn trans_scalar_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - binop_expr: &hir::Expr, - binop_ty: Ty<'tcx>, - op: hir::BinOp, - lhs: Datum<'tcx, Rvalue>, - rhs: Datum<'tcx, Rvalue>) - -> DatumBlock<'blk, 'tcx, Expr> -{ - let _icx = push_ctxt("trans_scalar_binop"); - - let tcx = bcx.tcx(); - let lhs_t = lhs.ty; - assert!(!lhs_t.is_simd()); - let is_float = lhs_t.is_fp(); - let is_signed = lhs_t.is_signed(); - let info = expr_info(binop_expr); - - let binop_debug_loc = binop_expr.debug_loc(); - - let mut bcx = bcx; - let lhs = lhs.to_llscalarish(bcx); - let rhs = rhs.to_llscalarish(bcx); - let val = match op.node { - hir::BiAdd => { - if is_float { - FAdd(bcx, lhs, rhs, binop_debug_loc) - } else { - let (newbcx, res) = with_overflow_check( - bcx, OverflowOp::Add, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; - res - } - } - hir::BiSub => { - if is_float { - FSub(bcx, lhs, rhs, binop_debug_loc) - } else { - let (newbcx, res) = with_overflow_check( - bcx, OverflowOp::Sub, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; - res - } - } - hir::BiMul => { - if is_float { - FMul(bcx, lhs, rhs, binop_debug_loc) - } else { - let (newbcx, res) = with_overflow_check( - bcx, OverflowOp::Mul, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; - res - } - } - hir::BiDiv => { - if is_float { - FDiv(bcx, lhs, rhs, binop_debug_loc) - } else { - // Only zero-check integers; fp /0 is NaN - bcx = base::fail_if_zero_or_overflows(bcx, - expr_info(binop_expr), - op, - lhs, - rhs, - lhs_t); - if is_signed { - SDiv(bcx, lhs, rhs, binop_debug_loc) - } else { - UDiv(bcx, lhs, rhs, binop_debug_loc) - } - } - } - hir::BiRem => { - if is_float { - // LLVM currently always lowers the `frem` instructions appropriate - // library calls typically found in libm. Notably f64 gets wired up - // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for - // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's - // instead just an inline function in a header that goes up to a - // f64, uses `fmod`, and then comes back down to a f32. - // - // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will - // still unconditionally lower frem instructions over 32-bit floats - // to a call to `fmodf`. To work around this we special case MSVC - // 32-bit float rem instructions and instead do the call out to - // `fmod` ourselves. - // - // Note that this is currently duplicated with src/libcore/ops.rs - // which does the same thing, and it would be nice to perhaps unify - // these two implementations on day! Also note that we call `fmod` - // for both 32 and 64-bit floats because if we emit any FRem - // instruction at all then LLVM is capable of optimizing it into a - // 32-bit FRem (which we're trying to avoid). - let use_fmod = tcx.sess.target.target.options.is_like_msvc && - tcx.sess.target.target.arch == "x86"; - if use_fmod { - let f64t = Type::f64(bcx.ccx()); - let fty = Type::func(&[f64t, f64t], &f64t); - let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty, - tcx.types.f64); - if lhs_t == tcx.types.f32 { - let lhs = FPExt(bcx, lhs, f64t); - let rhs = FPExt(bcx, rhs, f64t); - let res = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc); - FPTrunc(bcx, res, Type::f32(bcx.ccx())) - } else { - Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc) - } - } else { - FRem(bcx, lhs, rhs, binop_debug_loc) - } - } else { - // Only zero-check integers; fp %0 is NaN - bcx = base::fail_if_zero_or_overflows(bcx, - expr_info(binop_expr), - op, lhs, rhs, lhs_t); - if is_signed { - SRem(bcx, lhs, rhs, binop_debug_loc) - } else { - URem(bcx, lhs, rhs, binop_debug_loc) - } - } - } - hir::BiBitOr => Or(bcx, lhs, rhs, binop_debug_loc), - hir::BiBitAnd => And(bcx, lhs, rhs, binop_debug_loc), - hir::BiBitXor => Xor(bcx, lhs, rhs, binop_debug_loc), - hir::BiShl => { - let (newbcx, res) = with_overflow_check( - bcx, OverflowOp::Shl, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; - res - } - hir::BiShr => { - let (newbcx, res) = with_overflow_check( - bcx, OverflowOp::Shr, info, lhs_t, lhs, rhs, binop_debug_loc); - bcx = newbcx; - res - } - hir::BiEq | hir::BiNe | hir::BiLt | hir::BiGe | hir::BiLe | hir::BiGt => { - base::compare_scalar_types(bcx, lhs, rhs, lhs_t, op.node, binop_debug_loc) - } - _ => { - bcx.tcx().sess.span_bug(binop_expr.span, "unexpected binop"); - } - }; - - immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock() -} - -// refinement types would obviate the need for this -enum lazy_binop_ty { - lazy_and, - lazy_or, -} - -fn trans_lazy_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - binop_expr: &hir::Expr, - op: lazy_binop_ty, - a: &hir::Expr, - b: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let _icx = push_ctxt("trans_lazy_binop"); - let binop_ty = expr_ty(bcx, binop_expr); - let fcx = bcx.fcx; - - let DatumBlock {bcx: past_lhs, datum: lhs} = trans(bcx, a); - let lhs = lhs.to_llscalarish(past_lhs); - - if past_lhs.unreachable.get() { - return immediate_rvalue_bcx(past_lhs, lhs, binop_ty).to_expr_datumblock(); - } - - let join = fcx.new_id_block("join", binop_expr.id); - let before_rhs = fcx.new_id_block("before_rhs", b.id); - - match op { - lazy_and => CondBr(past_lhs, lhs, before_rhs.llbb, join.llbb, DebugLoc::None), - lazy_or => CondBr(past_lhs, lhs, join.llbb, before_rhs.llbb, DebugLoc::None) - } - - let DatumBlock {bcx: past_rhs, datum: rhs} = trans(before_rhs, b); - let rhs = rhs.to_llscalarish(past_rhs); - - if past_rhs.unreachable.get() { - return immediate_rvalue_bcx(join, lhs, binop_ty).to_expr_datumblock(); - } - - Br(past_rhs, join.llbb, DebugLoc::None); - let phi = Phi(join, Type::i1(bcx.ccx()), &[lhs, rhs], - &[past_lhs.llbb, past_rhs.llbb]); - - return immediate_rvalue_bcx(join, phi, binop_ty).to_expr_datumblock(); -} - -fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - op: hir::BinOp, - lhs: &hir::Expr, - rhs: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let _icx = push_ctxt("trans_binary"); - let ccx = bcx.ccx(); - - // if overloaded, would be RvalueDpsExpr - assert!(!ccx.tcx().is_method_call(expr.id)); - - match op.node { - hir::BiAnd => { - trans_lazy_binop(bcx, expr, lazy_and, lhs, rhs) - } - hir::BiOr => { - trans_lazy_binop(bcx, expr, lazy_or, lhs, rhs) - } - _ => { - let mut bcx = bcx; - let binop_ty = expr_ty(bcx, expr); - - let lhs = unpack_datum!(bcx, trans(bcx, lhs)); - let lhs = unpack_datum!(bcx, lhs.to_rvalue_datum(bcx, "binop_lhs")); - debug!("trans_binary (expr {}): lhs={}", - expr.id, lhs.to_string(ccx)); - let rhs = unpack_datum!(bcx, trans(bcx, rhs)); - let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "binop_rhs")); - debug!("trans_binary (expr {}): rhs={}", - expr.id, rhs.to_string(ccx)); - - if type_is_fat_ptr(ccx.tcx(), lhs.ty) { - assert!(type_is_fat_ptr(ccx.tcx(), rhs.ty), - "built-in binary operators on fat pointers are homogeneous"); - assert_eq!(binop_ty, bcx.tcx().types.bool); - let val = base::compare_scalar_types( - bcx, - lhs.val, - rhs.val, - lhs.ty, - op.node, - expr.debug_loc()); - immediate_rvalue_bcx(bcx, val, binop_ty).to_expr_datumblock() - } else { - assert!(!type_is_fat_ptr(ccx.tcx(), rhs.ty), - "built-in binary operators on fat pointers are homogeneous"); - trans_scalar_binop(bcx, expr, binop_ty, op, lhs, rhs) - } - } - } -} - -fn trans_overloaded_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - method_call: MethodCall, - lhs: Datum<'tcx, Expr>, - rhs: Option<(Datum<'tcx, Expr>, ast::NodeId)>, - dest: Option, - autoref: bool) - -> Result<'blk, 'tcx> { - callee::trans_call_inner(bcx, - expr.debug_loc(), - |bcx, arg_cleanup_scope| { - meth::trans_method_callee(bcx, - method_call, - None, - arg_cleanup_scope) - }, - callee::ArgOverloadedOp(lhs, rhs, autoref), - dest) -} - -fn trans_overloaded_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - callee: &'a hir::Expr, - args: &'a [P], - dest: Option) - -> Block<'blk, 'tcx> { - debug!("trans_overloaded_call {}", expr.id); - let method_call = MethodCall::expr(expr.id); - let mut all_args = vec!(callee); - all_args.extend(args.iter().map(|e| &**e)); - unpack_result!(bcx, - callee::trans_call_inner(bcx, - expr.debug_loc(), - |bcx, arg_cleanup_scope| { - meth::trans_method_callee( - bcx, - method_call, - None, - arg_cleanup_scope) - }, - callee::ArgOverloadedCall(all_args), - dest)); - bcx -} - -pub fn cast_is_noop<'tcx>(tcx: &ty::ctxt<'tcx>, - expr: &hir::Expr, - t_in: Ty<'tcx>, - t_out: Ty<'tcx>) - -> bool { - if let Some(&CastKind::CoercionCast) = tcx.cast_kinds.borrow().get(&expr.id) { - return true; - } - - match (t_in.builtin_deref(true, ty::NoPreference), - t_out.builtin_deref(true, ty::NoPreference)) { - (Some(ty::TypeAndMut{ ty: t_in, .. }), Some(ty::TypeAndMut{ ty: t_out, .. })) => { - t_in == t_out - } - _ => { - // This condition isn't redundant with the check for CoercionCast: - // different types can be substituted into the same type, and - // == equality can be overconservative if there are regions. - t_in == t_out - } - } -} - -fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - id: ast::NodeId) - -> DatumBlock<'blk, 'tcx, Expr> -{ - use middle::ty::cast::CastTy::*; - use middle::ty::cast::IntTy::*; - - fn int_cast(bcx: Block, - lldsttype: Type, - llsrctype: Type, - llsrc: ValueRef, - signed: bool) - -> ValueRef - { - let _icx = push_ctxt("int_cast"); - let srcsz = llsrctype.int_width(); - let dstsz = lldsttype.int_width(); - return if dstsz == srcsz { - BitCast(bcx, llsrc, lldsttype) - } else if srcsz > dstsz { - TruncOrBitCast(bcx, llsrc, lldsttype) - } else if signed { - SExtOrBitCast(bcx, llsrc, lldsttype) - } else { - ZExtOrBitCast(bcx, llsrc, lldsttype) - } - } - - fn float_cast(bcx: Block, - lldsttype: Type, - llsrctype: Type, - llsrc: ValueRef) - -> ValueRef - { - let _icx = push_ctxt("float_cast"); - let srcsz = llsrctype.float_width(); - let dstsz = lldsttype.float_width(); - return if dstsz > srcsz { - FPExt(bcx, llsrc, lldsttype) - } else if srcsz > dstsz { - FPTrunc(bcx, llsrc, lldsttype) - } else { llsrc }; - } - - let _icx = push_ctxt("trans_cast"); - let mut bcx = bcx; - let ccx = bcx.ccx(); - - let t_in = expr_ty_adjusted(bcx, expr); - let t_out = node_id_type(bcx, id); - - debug!("trans_cast({:?} as {:?})", t_in, t_out); - let mut ll_t_in = type_of::arg_type_of(ccx, t_in); - let ll_t_out = type_of::arg_type_of(ccx, t_out); - // Convert the value to be cast into a ValueRef, either by-ref or - // by-value as appropriate given its type: - let mut datum = unpack_datum!(bcx, trans(bcx, expr)); - - let datum_ty = monomorphize_type(bcx, datum.ty); - - if cast_is_noop(bcx.tcx(), expr, datum_ty, t_out) { - datum.ty = t_out; - return DatumBlock::new(bcx, datum); - } - - if type_is_fat_ptr(bcx.tcx(), t_in) { - assert!(datum.kind.is_by_ref()); - if type_is_fat_ptr(bcx.tcx(), t_out) { - return DatumBlock::new(bcx, Datum::new( - PointerCast(bcx, datum.val, ll_t_out.ptr_to()), - t_out, - Rvalue::new(ByRef) - )).to_expr_datumblock(); - } else { - // Return the address - return immediate_rvalue_bcx(bcx, - PointerCast(bcx, - Load(bcx, get_dataptr(bcx, datum.val)), - ll_t_out), - t_out).to_expr_datumblock(); - } - } - - let r_t_in = CastTy::from_ty(t_in).expect("bad input type for cast"); - let r_t_out = CastTy::from_ty(t_out).expect("bad output type for cast"); - - let (llexpr, signed) = if let Int(CEnum) = r_t_in { - let repr = adt::represent_type(ccx, t_in); - let datum = unpack_datum!( - bcx, datum.to_lvalue_datum(bcx, "trans_imm_cast", expr.id)); - let llexpr_ptr = datum.to_llref(); - let discr = adt::trans_get_discr(bcx, &*repr, llexpr_ptr, Some(Type::i64(ccx))); - ll_t_in = val_ty(discr); - (discr, adt::is_discr_signed(&*repr)) - } else { - (datum.to_llscalarish(bcx), t_in.is_signed()) - }; - - let newval = match (r_t_in, r_t_out) { - (Ptr(_), Ptr(_)) | (FnPtr, Ptr(_)) | (RPtr(_), Ptr(_)) => { - PointerCast(bcx, llexpr, ll_t_out) - } - (Ptr(_), Int(_)) | (FnPtr, Int(_)) => PtrToInt(bcx, llexpr, ll_t_out), - (Int(_), Ptr(_)) => IntToPtr(bcx, llexpr, ll_t_out), - - (Int(_), Int(_)) => int_cast(bcx, ll_t_out, ll_t_in, llexpr, signed), - (Float, Float) => float_cast(bcx, ll_t_out, ll_t_in, llexpr), - (Int(_), Float) if signed => SIToFP(bcx, llexpr, ll_t_out), - (Int(_), Float) => UIToFP(bcx, llexpr, ll_t_out), - (Float, Int(I)) => FPToSI(bcx, llexpr, ll_t_out), - (Float, Int(_)) => FPToUI(bcx, llexpr, ll_t_out), - - _ => ccx.sess().span_bug(expr.span, - &format!("translating unsupported cast: \ - {:?} -> {:?}", - t_in, - t_out) - ) - }; - return immediate_rvalue_bcx(bcx, newval, t_out).to_expr_datumblock(); -} - -fn trans_assign_op<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - op: hir::BinOp, - dst: &hir::Expr, - src: &hir::Expr) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_assign_op"); - let mut bcx = bcx; - - debug!("trans_assign_op(expr={:?})", expr); - - // User-defined operator methods cannot be used with `+=` etc right now - assert!(!bcx.tcx().is_method_call(expr.id)); - - // Evaluate LHS (destination), which should be an lvalue - let dst = unpack_datum!(bcx, trans_to_lvalue(bcx, dst, "assign_op")); - assert!(!bcx.fcx.type_needs_drop(dst.ty)); - let lhs = load_ty(bcx, dst.val, dst.ty); - let lhs = immediate_rvalue(lhs, dst.ty); - - // Evaluate RHS - FIXME(#28160) this sucks - let rhs = unpack_datum!(bcx, trans(bcx, &*src)); - let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "assign_op_rhs")); - - // Perform computation and store the result - let result_datum = unpack_datum!( - bcx, trans_scalar_binop(bcx, expr, dst.ty, op, lhs, rhs)); - return result_datum.store_to(bcx, dst.val); -} - -fn auto_ref<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - datum: Datum<'tcx, Expr>, - expr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let mut bcx = bcx; - - // Ensure cleanup of `datum` if not already scheduled and obtain - // a "by ref" pointer. - let lv_datum = unpack_datum!(bcx, datum.to_lvalue_datum(bcx, "autoref", expr.id)); - - // Compute final type. Note that we are loose with the region and - // mutability, since those things don't matter in trans. - let referent_ty = lv_datum.ty; - let ptr_ty = bcx.tcx().mk_imm_ref(bcx.tcx().mk_region(ty::ReStatic), referent_ty); - - // Construct the resulting datum. The right datum to return here would be an Lvalue datum, - // because there is cleanup scheduled and the datum doesn't own the data, but for thin pointers - // we microoptimize it to be an Rvalue datum to avoid the extra alloca and level of - // indirection and for thin pointers, this has no ill effects. - let kind = if type_is_sized(bcx.tcx(), referent_ty) { - RvalueExpr(Rvalue::new(ByValue)) - } else { - LvalueExpr(lv_datum.kind) - }; - - // Get the pointer. - let llref = lv_datum.to_llref(); - DatumBlock::new(bcx, Datum::new(llref, ptr_ty, kind)) -} - -fn deref_multiple<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - datum: Datum<'tcx, Expr>, - times: usize) - -> DatumBlock<'blk, 'tcx, Expr> { - let mut bcx = bcx; - let mut datum = datum; - for i in 0..times { - let method_call = MethodCall::autoderef(expr.id, i as u32); - datum = unpack_datum!(bcx, deref_once(bcx, expr, datum, method_call)); - } - DatumBlock { bcx: bcx, datum: datum } -} - -fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - datum: Datum<'tcx, Expr>, - method_call: MethodCall) - -> DatumBlock<'blk, 'tcx, Expr> { - let ccx = bcx.ccx(); - - debug!("deref_once(expr={:?}, datum={}, method_call={:?})", - expr, - datum.to_string(ccx), - method_call); - - let mut bcx = bcx; - - // Check for overloaded deref. - let method_ty = ccx.tcx() - .tables - .borrow() - .method_map - .get(&method_call).map(|method| method.ty); - - let datum = match method_ty { - Some(method_ty) => { - let method_ty = monomorphize_type(bcx, method_ty); - - // Overloaded. Evaluate `trans_overloaded_op`, which will - // invoke the user's deref() method, which basically - // converts from the `Smaht` pointer that we have into - // a `&T` pointer. We can then proceed down the normal - // path (below) to dereference that `&T`. - let datum = if method_call.autoderef == 0 { - datum - } else { - // Always perform an AutoPtr when applying an overloaded auto-deref - unpack_datum!(bcx, auto_ref(bcx, datum, expr)) - }; - - let ref_ty = // invoked methods have their LB regions instantiated - ccx.tcx().no_late_bound_regions(&method_ty.fn_ret()).unwrap().unwrap(); - let scratch = rvalue_scratch_datum(bcx, ref_ty, "overloaded_deref"); - - unpack_result!(bcx, trans_overloaded_op(bcx, expr, method_call, - datum, None, Some(SaveIn(scratch.val)), - false)); - scratch.to_expr_datum() - } - None => { - // Not overloaded. We already have a pointer we know how to deref. - datum - } - }; - - let r = match datum.ty.sty { - ty::TyBox(content_ty) => { - // Make sure we have an lvalue datum here to get the - // proper cleanups scheduled - let datum = unpack_datum!( - bcx, datum.to_lvalue_datum(bcx, "deref", expr.id)); - - if type_is_sized(bcx.tcx(), content_ty) { - let ptr = load_ty(bcx, datum.val, datum.ty); - DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(datum.kind))) - } else { - // A fat pointer and a DST lvalue have the same representation - // just different types. Since there is no temporary for `*e` - // here (because it is unsized), we cannot emulate the sized - // object code path for running drop glue and free. Instead, - // we schedule cleanup for `e`, turning it into an lvalue. - - let lval = Lvalue::new("expr::deref_once ty_uniq"); - let datum = Datum::new(datum.val, content_ty, LvalueExpr(lval)); - DatumBlock::new(bcx, datum) - } - } - - ty::TyRawPtr(ty::TypeAndMut { ty: content_ty, .. }) | - ty::TyRef(_, ty::TypeAndMut { ty: content_ty, .. }) => { - let lval = Lvalue::new("expr::deref_once ptr"); - if type_is_sized(bcx.tcx(), content_ty) { - let ptr = datum.to_llscalarish(bcx); - - // Always generate an lvalue datum, even if datum.mode is - // an rvalue. This is because datum.mode is only an - // rvalue for non-owning pointers like &T or *T, in which - // case cleanup *is* scheduled elsewhere, by the true - // owner (or, in the case of *T, by the user). - DatumBlock::new(bcx, Datum::new(ptr, content_ty, LvalueExpr(lval))) - } else { - // A fat pointer and a DST lvalue have the same representation - // just different types. - DatumBlock::new(bcx, Datum::new(datum.val, content_ty, LvalueExpr(lval))) - } - } - - _ => { - bcx.tcx().sess.span_bug( - expr.span, - &format!("deref invoked on expr of invalid type {:?}", - datum.ty)); - } - }; - - debug!("deref_once(expr={}, method_call={:?}, result={})", - expr.id, method_call, r.datum.to_string(ccx)); - - return r; -} - -#[derive(Debug)] -enum OverflowOp { - Add, - Sub, - Mul, - Shl, - Shr, -} - -impl OverflowOp { - fn codegen_strategy(&self) -> OverflowCodegen { - use self::OverflowCodegen::{ViaIntrinsic, ViaInputCheck}; - match *self { - OverflowOp::Add => ViaIntrinsic(OverflowOpViaIntrinsic::Add), - OverflowOp::Sub => ViaIntrinsic(OverflowOpViaIntrinsic::Sub), - OverflowOp::Mul => ViaIntrinsic(OverflowOpViaIntrinsic::Mul), - - OverflowOp::Shl => ViaInputCheck(OverflowOpViaInputCheck::Shl), - OverflowOp::Shr => ViaInputCheck(OverflowOpViaInputCheck::Shr), - } - } -} - -enum OverflowCodegen { - ViaIntrinsic(OverflowOpViaIntrinsic), - ViaInputCheck(OverflowOpViaInputCheck), -} - -enum OverflowOpViaInputCheck { Shl, Shr, } - -#[derive(Debug)] -enum OverflowOpViaIntrinsic { Add, Sub, Mul, } - -impl OverflowOpViaIntrinsic { - fn to_intrinsic<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, lhs_ty: Ty) -> ValueRef { - let name = self.to_intrinsic_name(bcx.tcx(), lhs_ty); - bcx.ccx().get_intrinsic(&name) - } - fn to_intrinsic_name(&self, tcx: &ty::ctxt, ty: Ty) -> &'static str { - use syntax::ast::IntTy::*; - use syntax::ast::UintTy::*; - use middle::ty::{TyInt, TyUint}; - - let new_sty = match ty.sty { - TyInt(TyIs) => match &tcx.sess.target.target.target_pointer_width[..] { - "32" => TyInt(TyI32), - "64" => TyInt(TyI64), - _ => panic!("unsupported target word size") - }, - TyUint(TyUs) => match &tcx.sess.target.target.target_pointer_width[..] { - "32" => TyUint(TyU32), - "64" => TyUint(TyU64), - _ => panic!("unsupported target word size") - }, - ref t @ TyUint(_) | ref t @ TyInt(_) => t.clone(), - _ => panic!("tried to get overflow intrinsic for {:?} applied to non-int type", - *self) - }; - - match *self { - OverflowOpViaIntrinsic::Add => match new_sty { - TyInt(TyI8) => "llvm.sadd.with.overflow.i8", - TyInt(TyI16) => "llvm.sadd.with.overflow.i16", - TyInt(TyI32) => "llvm.sadd.with.overflow.i32", - TyInt(TyI64) => "llvm.sadd.with.overflow.i64", - - TyUint(TyU8) => "llvm.uadd.with.overflow.i8", - TyUint(TyU16) => "llvm.uadd.with.overflow.i16", - TyUint(TyU32) => "llvm.uadd.with.overflow.i32", - TyUint(TyU64) => "llvm.uadd.with.overflow.i64", - - _ => unreachable!(), - }, - OverflowOpViaIntrinsic::Sub => match new_sty { - TyInt(TyI8) => "llvm.ssub.with.overflow.i8", - TyInt(TyI16) => "llvm.ssub.with.overflow.i16", - TyInt(TyI32) => "llvm.ssub.with.overflow.i32", - TyInt(TyI64) => "llvm.ssub.with.overflow.i64", - - TyUint(TyU8) => "llvm.usub.with.overflow.i8", - TyUint(TyU16) => "llvm.usub.with.overflow.i16", - TyUint(TyU32) => "llvm.usub.with.overflow.i32", - TyUint(TyU64) => "llvm.usub.with.overflow.i64", - - _ => unreachable!(), - }, - OverflowOpViaIntrinsic::Mul => match new_sty { - TyInt(TyI8) => "llvm.smul.with.overflow.i8", - TyInt(TyI16) => "llvm.smul.with.overflow.i16", - TyInt(TyI32) => "llvm.smul.with.overflow.i32", - TyInt(TyI64) => "llvm.smul.with.overflow.i64", - - TyUint(TyU8) => "llvm.umul.with.overflow.i8", - TyUint(TyU16) => "llvm.umul.with.overflow.i16", - TyUint(TyU32) => "llvm.umul.with.overflow.i32", - TyUint(TyU64) => "llvm.umul.with.overflow.i64", - - _ => unreachable!(), - }, - } - } - - fn build_intrinsic_call<'blk, 'tcx>(&self, bcx: Block<'blk, 'tcx>, - info: NodeIdAndSpan, - lhs_t: Ty<'tcx>, lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) - -> (Block<'blk, 'tcx>, ValueRef) { - let llfn = self.to_intrinsic(bcx, lhs_t); - - let val = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc); - let result = ExtractValue(bcx, val, 0); // iN operation result - let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?" - - let cond = ICmp(bcx, llvm::IntEQ, overflow, C_integral(Type::i1(bcx.ccx()), 1, false), - binop_debug_loc); - - let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1"); - Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)], - None, binop_debug_loc); - - let bcx = - base::with_cond(bcx, cond, |bcx| - controlflow::trans_fail(bcx, info, - InternedString::new("arithmetic operation overflowed"))); - - (bcx, result) - } -} - -impl OverflowOpViaInputCheck { - fn build_with_input_check<'blk, 'tcx>(&self, - bcx: Block<'blk, 'tcx>, - info: NodeIdAndSpan, - lhs_t: Ty<'tcx>, - lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) - -> (Block<'blk, 'tcx>, ValueRef) - { - let lhs_llty = val_ty(lhs); - let rhs_llty = val_ty(rhs); - - // Panic if any bits are set outside of bits that we always - // mask in. - // - // Note that the mask's value is derived from the LHS type - // (since that is where the 32/64 distinction is relevant) but - // the mask's type must match the RHS type (since they will - // both be fed into an and-binop) - let invert_mask = shift_mask_val(bcx, lhs_llty, rhs_llty, true); - - let outer_bits = And(bcx, rhs, invert_mask, binop_debug_loc); - let cond = build_nonzero_check(bcx, outer_bits, binop_debug_loc); - let result = match *self { - OverflowOpViaInputCheck::Shl => - build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc), - OverflowOpViaInputCheck::Shr => - build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc), - }; - let bcx = - base::with_cond(bcx, cond, |bcx| - controlflow::trans_fail(bcx, info, - InternedString::new("shift operation overflowed"))); - - (bcx, result) - } -} - -// Check if an integer or vector contains a nonzero element. -fn build_nonzero_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - value: ValueRef, - binop_debug_loc: DebugLoc) -> ValueRef { - let llty = val_ty(value); - let kind = llty.kind(); - match kind { - TypeKind::Integer => ICmp(bcx, llvm::IntNE, value, C_null(llty), binop_debug_loc), - TypeKind::Vector => { - // Check if any elements of the vector are nonzero by treating - // it as a wide integer and checking if the integer is nonzero. - let width = llty.vector_length() as u64 * llty.element_type().int_width(); - let int_value = BitCast(bcx, value, Type::ix(bcx.ccx(), width)); - build_nonzero_check(bcx, int_value, binop_debug_loc) - }, - _ => panic!("build_nonzero_check: expected Integer or Vector, found {:?}", kind), - } -} - -fn with_overflow_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, oop: OverflowOp, info: NodeIdAndSpan, - lhs_t: Ty<'tcx>, lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) - -> (Block<'blk, 'tcx>, ValueRef) { - if bcx.unreachable.get() { return (bcx, _Undef(lhs)); } - if bcx.ccx().check_overflow() { - - match oop.codegen_strategy() { - OverflowCodegen::ViaIntrinsic(oop) => - oop.build_intrinsic_call(bcx, info, lhs_t, lhs, rhs, binop_debug_loc), - OverflowCodegen::ViaInputCheck(oop) => - oop.build_with_input_check(bcx, info, lhs_t, lhs, rhs, binop_debug_loc), - } - } else { - let res = match oop { - OverflowOp::Add => Add(bcx, lhs, rhs, binop_debug_loc), - OverflowOp::Sub => Sub(bcx, lhs, rhs, binop_debug_loc), - OverflowOp::Mul => Mul(bcx, lhs, rhs, binop_debug_loc), - - OverflowOp::Shl => - build_unchecked_lshift(bcx, lhs, rhs, binop_debug_loc), - OverflowOp::Shr => - build_unchecked_rshift(bcx, lhs_t, lhs, rhs, binop_debug_loc), - }; - (bcx, res) - } -} - -/// We categorize expressions into three kinds. The distinction between -/// lvalue/rvalue is fundamental to the language. The distinction between the -/// two kinds of rvalues is an artifact of trans which reflects how we will -/// generate code for that kind of expression. See trans/expr.rs for more -/// information. -#[derive(Copy, Clone)] -enum ExprKind { - Lvalue, - RvalueDps, - RvalueDatum, - RvalueStmt -} - -fn expr_kind(tcx: &ty::ctxt, expr: &hir::Expr) -> ExprKind { - if tcx.is_method_call(expr.id) { - // Overloaded operations are generally calls, and hence they are - // generated via DPS, but there are a few exceptions: - return match expr.node { - // `a += b` has a unit result. - hir::ExprAssignOp(..) => ExprKind::RvalueStmt, - - // the deref method invoked for `*a` always yields an `&T` - hir::ExprUnary(hir::UnDeref, _) => ExprKind::Lvalue, - - // the index method invoked for `a[i]` always yields an `&T` - hir::ExprIndex(..) => ExprKind::Lvalue, - - // in the general case, result could be any type, use DPS - _ => ExprKind::RvalueDps - }; - } - - match expr.node { - hir::ExprPath(..) => { - match tcx.resolve_expr(expr) { - def::DefStruct(_) | def::DefVariant(..) => { - if let ty::TyBareFn(..) = tcx.node_id_to_type(expr.id).sty { - // ctor function - ExprKind::RvalueDatum - } else { - ExprKind::RvalueDps - } - } - - // Special case: A unit like struct's constructor must be called without () at the - // end (like `UnitStruct`) which means this is an ExprPath to a DefFn. But in case - // of unit structs this is should not be interpreted as function pointer but as - // call to the constructor. - def::DefFn(_, true) => ExprKind::RvalueDps, - - // Fn pointers are just scalar values. - def::DefFn(..) | def::DefMethod(..) => ExprKind::RvalueDatum, - - // Note: there is actually a good case to be made that - // DefArg's, particularly those of immediate type, ought to - // considered rvalues. - def::DefStatic(..) | - def::DefUpvar(..) | - def::DefLocal(..) => ExprKind::Lvalue, - - def::DefConst(..) | - def::DefAssociatedConst(..) => ExprKind::RvalueDatum, - - def => { - tcx.sess.span_bug( - expr.span, - &format!("uncategorized def for expr {}: {:?}", - expr.id, - def)); - } - } - } - - hir::ExprType(ref expr, _) => { - expr_kind(tcx, expr) - } - - hir::ExprUnary(hir::UnDeref, _) | - hir::ExprField(..) | - hir::ExprTupField(..) | - hir::ExprIndex(..) => { - ExprKind::Lvalue - } - - hir::ExprCall(..) | - hir::ExprMethodCall(..) | - hir::ExprStruct(..) | - hir::ExprRange(..) | - hir::ExprTup(..) | - hir::ExprIf(..) | - hir::ExprMatch(..) | - hir::ExprClosure(..) | - hir::ExprBlock(..) | - hir::ExprRepeat(..) | - hir::ExprVec(..) => { - ExprKind::RvalueDps - } - - hir::ExprLit(ref lit) if lit.node.is_str() => { - ExprKind::RvalueDps - } - - hir::ExprBreak(..) | - hir::ExprAgain(..) | - hir::ExprRet(..) | - hir::ExprWhile(..) | - hir::ExprLoop(..) | - hir::ExprAssign(..) | - hir::ExprInlineAsm(..) | - hir::ExprAssignOp(..) => { - ExprKind::RvalueStmt - } - - hir::ExprLit(_) | // Note: LitStr is carved out above - hir::ExprUnary(..) | - hir::ExprBox(_) | - hir::ExprAddrOf(..) | - hir::ExprBinary(..) | - hir::ExprCast(..) => { - ExprKind::RvalueDatum - } - } -} diff --git a/src/librustc_trans/trans/foreign.rs b/src/librustc_trans/trans/foreign.rs deleted file mode 100644 index 217310d6610b7..0000000000000 --- a/src/librustc_trans/trans/foreign.rs +++ /dev/null @@ -1,1059 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - -use back::{abi, link}; -use llvm::{ValueRef, CallConv, get_param}; -use llvm; -use middle::weak_lang_items; -use trans::attributes; -use trans::base::{llvm_linkage_by_name, push_ctxt}; -use trans::base; -use trans::build::*; -use trans::cabi; -use trans::common::*; -use trans::debuginfo::DebugLoc; -use trans::declare; -use trans::expr; -use trans::machine; -use trans::monomorphize; -use trans::type_::Type; -use trans::type_of::*; -use trans::type_of; -use trans::Disr; -use middle::infer; -use middle::ty::{self, Ty}; -use middle::subst::Substs; - -use std::cmp; -use std::iter::once; -use libc::c_uint; -use syntax::abi::{Cdecl, Aapcs, C, Win64, Abi}; -use syntax::abi::{PlatformIntrinsic, RustIntrinsic, Rust, RustCall, Stdcall}; -use syntax::abi::{Fastcall, Vectorcall, System}; -use syntax::attr; -use syntax::codemap::Span; -use syntax::parse::token::{InternedString, special_idents}; -use syntax::ast; - -use rustc_front::print::pprust; -use rustc_front::hir; - -/////////////////////////////////////////////////////////////////////////// -// Type definitions - -struct ForeignTypes<'tcx> { - /// Rust signature of the function - fn_sig: ty::FnSig<'tcx>, - - /// Adapter object for handling native ABI rules (trust me, you - /// don't want to know) - fn_ty: cabi::FnType, - - /// LLVM types that will appear on the foreign function - llsig: LlvmSignature, -} - -struct LlvmSignature { - // LLVM versions of the types of this function's arguments. - llarg_tys: Vec , - - // LLVM version of the type that this function returns. Note that - // this *may not be* the declared return type of the foreign - // function, because the foreign function may opt to return via an - // out pointer. - llret_ty: Type, - - /// True if there is a return value (not bottom, not unit) - ret_def: bool, -} - - -/////////////////////////////////////////////////////////////////////////// -// Calls to external functions - -pub fn llvm_calling_convention(ccx: &CrateContext, - abi: Abi) -> CallConv { - match ccx.sess().target.target.adjust_abi(abi) { - RustIntrinsic => { - // Intrinsics are emitted at the call site - ccx.sess().bug("asked to register intrinsic fn"); - } - PlatformIntrinsic => { - // Intrinsics are emitted at the call site - ccx.sess().bug("asked to register platform intrinsic fn"); - } - - Rust => { - // FIXME(#3678) Implement linking to foreign fns with Rust ABI - ccx.sess().unimpl("foreign functions with Rust ABI"); - } - - RustCall => { - // FIXME(#3678) Implement linking to foreign fns with Rust ABI - ccx.sess().unimpl("foreign functions with RustCall ABI"); - } - - // It's the ABI's job to select this, not us. - System => ccx.sess().bug("system abi should be selected elsewhere"), - - Stdcall => llvm::X86StdcallCallConv, - Fastcall => llvm::X86FastcallCallConv, - Vectorcall => llvm::X86_VectorCall, - C => llvm::CCallConv, - Win64 => llvm::X86_64_Win64, - - // These API constants ought to be more specific... - Cdecl => llvm::CCallConv, - Aapcs => llvm::CCallConv, - } -} - -pub fn register_static(ccx: &CrateContext, - foreign_item: &hir::ForeignItem) -> ValueRef { - let ty = ccx.tcx().node_id_to_type(foreign_item.id); - let llty = type_of::type_of(ccx, ty); - - let ident = link_name(foreign_item); - match attr::first_attr_value_str_by_name(&foreign_item.attrs, - "linkage") { - // If this is a static with a linkage specified, then we need to handle - // it a little specially. The typesystem prevents things like &T and - // extern "C" fn() from being non-null, so we can't just declare a - // static and call it a day. Some linkages (like weak) will make it such - // that the static actually has a null value. - Some(name) => { - let linkage = match llvm_linkage_by_name(&name) { - Some(linkage) => linkage, - None => { - ccx.sess().span_fatal(foreign_item.span, - "invalid linkage specified"); - } - }; - let llty2 = match ty.sty { - ty::TyRawPtr(ref mt) => type_of::type_of(ccx, mt.ty), - _ => { - ccx.sess().span_fatal(foreign_item.span, - "must have type `*T` or `*mut T`"); - } - }; - unsafe { - // Declare a symbol `foo` with the desired linkage. - let g1 = declare::declare_global(ccx, &ident[..], llty2); - llvm::SetLinkage(g1, linkage); - - // Declare an internal global `extern_with_linkage_foo` which - // is initialized with the address of `foo`. If `foo` is - // discarded during linking (for example, if `foo` has weak - // linkage and there are no definitions), then - // `extern_with_linkage_foo` will instead be initialized to - // zero. - let mut real_name = "_rust_extern_with_linkage_".to_string(); - real_name.push_str(&ident); - let g2 = declare::define_global(ccx, &real_name[..], llty).unwrap_or_else(||{ - ccx.sess().span_fatal(foreign_item.span, - &format!("symbol `{}` is already defined", ident)) - }); - llvm::SetLinkage(g2, llvm::InternalLinkage); - llvm::LLVMSetInitializer(g2, g1); - g2 - } - } - None => // Generate an external declaration. - declare::declare_global(ccx, &ident[..], llty), - } -} - -// only use this for foreign function ABIs and glue, use `get_extern_rust_fn` for Rust functions -pub fn get_extern_fn(ccx: &CrateContext, - externs: &mut ExternMap, - name: &str, - cc: llvm::CallConv, - ty: Type, - output: Ty) - -> ValueRef { - match externs.get(name) { - Some(n) => return *n, - None => {} - } - let f = declare::declare_fn(ccx, name, cc, ty, ty::FnConverging(output)); - externs.insert(name.to_string(), f); - f -} - -/// Registers a foreign function found in a library. Just adds a LLVM global. -pub fn register_foreign_item_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - abi: Abi, fty: Ty<'tcx>, - name: &str, - attrs: &[ast::Attribute])-> ValueRef { - debug!("register_foreign_item_fn(abi={:?}, \ - ty={:?}, \ - name={})", - abi, - fty, - name); - - let cc = llvm_calling_convention(ccx, abi); - - // Register the function as a C extern fn - let tys = foreign_types_for_fn_ty(ccx, fty); - - // Make sure the calling convention is right for variadic functions - // (should've been caught if not in typeck) - if tys.fn_sig.variadic { - assert!(cc == llvm::CCallConv); - } - - // Create the LLVM value for the C extern fn - let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys); - - let llfn = get_extern_fn(ccx, &mut *ccx.externs().borrow_mut(), name, cc, llfn_ty, fty); - attributes::unwind(llfn, false); - add_argument_attributes(&tys, llfn); - attributes::from_fn_attrs(ccx, attrs, llfn); - llfn -} - -/// Prepares a call to a native function. This requires adapting -/// from the Rust argument passing rules to the native rules. -/// -/// # Parameters -/// -/// - `callee_ty`: Rust type for the function we are calling -/// - `llfn`: the function pointer we are calling -/// - `llretptr`: where to store the return value of the function -/// - `llargs_rust`: a list of the argument values, prepared -/// as they would be if calling a Rust function -/// - `passed_arg_tys`: Rust type for the arguments. Normally we -/// can derive these from callee_ty but in the case of variadic -/// functions passed_arg_tys will include the Rust type of all -/// the arguments including the ones not specified in the fn's signature. -pub fn trans_native_call<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - callee_ty: Ty<'tcx>, - llfn: ValueRef, - llretptr: ValueRef, - llargs_rust: &[ValueRef], - passed_arg_tys: Vec>, - call_debug_loc: DebugLoc) - -> Block<'blk, 'tcx> -{ - let ccx = bcx.ccx(); - - debug!("trans_native_call(callee_ty={:?}, \ - llfn={}, \ - llretptr={})", - callee_ty, - ccx.tn().val_to_string(llfn), - ccx.tn().val_to_string(llretptr)); - - let (fn_abi, fn_sig) = match callee_ty.sty { - ty::TyBareFn(_, ref fn_ty) => (fn_ty.abi, &fn_ty.sig), - _ => ccx.sess().bug("trans_native_call called on non-function type") - }; - let fn_sig = ccx.tcx().erase_late_bound_regions(fn_sig); - let fn_sig = infer::normalize_associated_type(ccx.tcx(), &fn_sig); - let llsig = foreign_signature(ccx, &fn_sig, &passed_arg_tys[..]); - let fn_type = cabi::compute_abi_info(ccx, - &llsig.llarg_tys, - llsig.llret_ty, - llsig.ret_def); - - let arg_tys: &[cabi::ArgType] = &fn_type.arg_tys; - - let mut llargs_foreign = Vec::new(); - - // If the foreign ABI expects return value by pointer, supply the - // pointer that Rust gave us. Sometimes we have to bitcast - // because foreign fns return slightly different (but equivalent) - // views on the same type (e.g., i64 in place of {i32,i32}). - if fn_type.ret_ty.is_indirect() { - match fn_type.ret_ty.cast { - Some(ty) => { - let llcastedretptr = - BitCast(bcx, llretptr, ty.ptr_to()); - llargs_foreign.push(llcastedretptr); - } - None => { - llargs_foreign.push(llretptr); - } - } - } - - let mut offset = 0; - for (i, arg_ty) in arg_tys.iter().enumerate() { - let mut llarg_rust = llargs_rust[i + offset]; - - if arg_ty.is_ignore() { - continue; - } - - // Does Rust pass this argument by pointer? - let rust_indirect = type_of::arg_is_indirect(ccx, passed_arg_tys[i]); - - debug!("argument {}, llarg_rust={}, rust_indirect={}, arg_ty={}", - i, - ccx.tn().val_to_string(llarg_rust), - rust_indirect, - ccx.tn().type_to_string(arg_ty.ty)); - - // Ensure that we always have the Rust value indirectly, - // because it makes bitcasting easier. - if !rust_indirect { - let scratch = base::alloc_ty(bcx, passed_arg_tys[i], "__arg"); - if type_is_fat_ptr(ccx.tcx(), passed_arg_tys[i]) { - Store(bcx, llargs_rust[i + offset], expr::get_dataptr(bcx, scratch)); - Store(bcx, llargs_rust[i + offset + 1], expr::get_meta(bcx, scratch)); - offset += 1; - } else { - base::store_ty(bcx, llarg_rust, scratch, passed_arg_tys[i]); - } - llarg_rust = scratch; - } - - debug!("llarg_rust={} (after indirection)", - ccx.tn().val_to_string(llarg_rust)); - - // Check whether we need to do any casting - match arg_ty.cast { - Some(ty) => llarg_rust = BitCast(bcx, llarg_rust, ty.ptr_to()), - None => () - } - - debug!("llarg_rust={} (after casting)", - ccx.tn().val_to_string(llarg_rust)); - - // Finally, load the value if needed for the foreign ABI - let foreign_indirect = arg_ty.is_indirect(); - let llarg_foreign = if foreign_indirect { - llarg_rust - } else { - if passed_arg_tys[i].is_bool() { - let val = LoadRangeAssert(bcx, llarg_rust, Disr(0), Disr(2), llvm::False); - Trunc(bcx, val, Type::i1(bcx.ccx())) - } else { - Load(bcx, llarg_rust) - } - }; - - debug!("argument {}, llarg_foreign={}", - i, ccx.tn().val_to_string(llarg_foreign)); - - // fill padding with undef value - match arg_ty.pad { - Some(ty) => llargs_foreign.push(C_undef(ty)), - None => () - } - llargs_foreign.push(llarg_foreign); - } - - let cc = llvm_calling_convention(ccx, fn_abi); - - // A function pointer is called without the declaration available, so we have to apply - // any attributes with ABI implications directly to the call instruction. - let mut attrs = llvm::AttrBuilder::new(); - - // Add attributes that are always applicable, independent of the concrete foreign ABI - if fn_type.ret_ty.is_indirect() { - let llret_sz = machine::llsize_of_real(ccx, fn_type.ret_ty.ty); - - // The outptr can be noalias and nocapture because it's entirely - // invisible to the program. We also know it's nonnull as well - // as how many bytes we can dereference - attrs.arg(1, llvm::Attribute::NoAlias) - .arg(1, llvm::Attribute::NoCapture) - .arg(1, llvm::DereferenceableAttribute(llret_sz)); - }; - - // Add attributes that depend on the concrete foreign ABI - let mut arg_idx = if fn_type.ret_ty.is_indirect() { 1 } else { 0 }; - match fn_type.ret_ty.attr { - Some(attr) => { attrs.arg(arg_idx, attr); }, - _ => () - } - - arg_idx += 1; - for arg_ty in &fn_type.arg_tys { - if arg_ty.is_ignore() { - continue; - } - // skip padding - if arg_ty.pad.is_some() { arg_idx += 1; } - - if let Some(attr) = arg_ty.attr { - attrs.arg(arg_idx, attr); - } - - arg_idx += 1; - } - - let llforeign_retval = CallWithConv(bcx, - llfn, - &llargs_foreign[..], - cc, - Some(attrs), - call_debug_loc); - - // If the function we just called does not use an outpointer, - // store the result into the rust outpointer. Cast the outpointer - // type to match because some ABIs will use a different type than - // the Rust type. e.g., a {u32,u32} struct could be returned as - // u64. - if llsig.ret_def && !fn_type.ret_ty.is_indirect() { - let llrust_ret_ty = llsig.llret_ty; - let llforeign_ret_ty = match fn_type.ret_ty.cast { - Some(ty) => ty, - None => fn_type.ret_ty.ty - }; - - debug!("llretptr={}", ccx.tn().val_to_string(llretptr)); - debug!("llforeign_retval={}", ccx.tn().val_to_string(llforeign_retval)); - debug!("llrust_ret_ty={}", ccx.tn().type_to_string(llrust_ret_ty)); - debug!("llforeign_ret_ty={}", ccx.tn().type_to_string(llforeign_ret_ty)); - - if llrust_ret_ty == llforeign_ret_ty { - match fn_sig.output { - ty::FnConverging(result_ty) => { - base::store_ty(bcx, llforeign_retval, llretptr, result_ty) - } - ty::FnDiverging => {} - } - } else { - // The actual return type is a struct, but the ABI - // adaptation code has cast it into some scalar type. The - // code that follows is the only reliable way I have - // found to do a transform like i64 -> {i32,i32}. - // Basically we dump the data onto the stack then memcpy it. - // - // Other approaches I tried: - // - Casting rust ret pointer to the foreign type and using Store - // is (a) unsafe if size of foreign type > size of rust type and - // (b) runs afoul of strict aliasing rules, yielding invalid - // assembly under -O (specifically, the store gets removed). - // - Truncating foreign type to correct integral type and then - // bitcasting to the struct type yields invalid cast errors. - let llscratch = base::alloca(bcx, llforeign_ret_ty, "__cast"); - base::call_lifetime_start(bcx, llscratch); - Store(bcx, llforeign_retval, llscratch); - let llscratch_i8 = BitCast(bcx, llscratch, Type::i8(ccx).ptr_to()); - let llretptr_i8 = BitCast(bcx, llretptr, Type::i8(ccx).ptr_to()); - let llrust_size = machine::llsize_of_store(ccx, llrust_ret_ty); - let llforeign_align = machine::llalign_of_min(ccx, llforeign_ret_ty); - let llrust_align = machine::llalign_of_min(ccx, llrust_ret_ty); - let llalign = cmp::min(llforeign_align, llrust_align); - debug!("llrust_size={}", llrust_size); - base::call_memcpy(bcx, llretptr_i8, llscratch_i8, - C_uint(ccx, llrust_size), llalign as u32); - base::call_lifetime_end(bcx, llscratch); - } - } - - return bcx; -} - -// feature gate SIMD types in FFI, since I (huonw) am not sure the -// ABIs are handled at all correctly. -fn gate_simd_ffi(tcx: &ty::ctxt, decl: &hir::FnDecl, ty: &ty::BareFnTy) { - if !tcx.sess.features.borrow().simd_ffi { - let check = |ast_ty: &hir::Ty, ty: ty::Ty| { - if ty.is_simd() { - tcx.sess.struct_span_err(ast_ty.span, - &format!("use of SIMD type `{}` in FFI is highly experimental and \ - may result in invalid code", - pprust::ty_to_string(ast_ty))) - .fileline_help(ast_ty.span, - "add #![feature(simd_ffi)] to the crate attributes to enable") - .emit(); - } - }; - let sig = &ty.sig.0; - for (input, ty) in decl.inputs.iter().zip(&sig.inputs) { - check(&*input.ty, *ty) - } - if let hir::Return(ref ty) = decl.output { - check(&**ty, sig.output.unwrap()) - } - } -} - -pub fn trans_foreign_mod(ccx: &CrateContext, foreign_mod: &hir::ForeignMod) { - let _icx = push_ctxt("foreign::trans_foreign_mod"); - for foreign_item in &foreign_mod.items { - let lname = link_name(foreign_item); - - if let hir::ForeignItemFn(ref decl, _) = foreign_item.node { - match foreign_mod.abi { - Rust | RustIntrinsic | PlatformIntrinsic => {} - abi => { - let ty = ccx.tcx().node_id_to_type(foreign_item.id); - match ty.sty { - ty::TyBareFn(_, bft) => gate_simd_ffi(ccx.tcx(), &**decl, bft), - _ => ccx.tcx().sess.span_bug(foreign_item.span, - "foreign fn's sty isn't a bare_fn_ty?") - } - - register_foreign_item_fn(ccx, abi, ty, &lname, &foreign_item.attrs); - // Unlike for other items, we shouldn't call - // `base::update_linkage` here. Foreign items have - // special linkage requirements, which are handled - // inside `foreign::register_*`. - } - } - } - - ccx.item_symbols().borrow_mut().insert(foreign_item.id, - lname.to_string()); - } -} - -/////////////////////////////////////////////////////////////////////////// -// Rust functions with foreign ABIs -// -// These are normal Rust functions defined with foreign ABIs. For -// now, and perhaps forever, we translate these using a "layer of -// indirection". That is, given a Rust declaration like: -// -// extern "C" fn foo(i: u32) -> u32 { ... } -// -// we will generate a function like: -// -// S foo(T i) { -// S r; -// foo0(&r, NULL, i); -// return r; -// } -// -// #[inline_always] -// void foo0(uint32_t *r, void *env, uint32_t i) { ... } -// -// Here the (internal) `foo0` function follows the Rust ABI as normal, -// where the `foo` function follows the C ABI. We rely on LLVM to -// inline the one into the other. Of course we could just generate the -// correct code in the first place, but this is much simpler. - -pub fn decl_rust_fn_with_foreign_abi<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>, - name: &str) - -> ValueRef { - let tys = foreign_types_for_fn_ty(ccx, t); - let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys); - let cconv = match t.sty { - ty::TyBareFn(_, ref fn_ty) => { - llvm_calling_convention(ccx, fn_ty.abi) - } - _ => panic!("expected bare fn in decl_rust_fn_with_foreign_abi") - }; - let llfn = declare::declare_fn(ccx, name, cconv, llfn_ty, - ty::FnConverging(ccx.tcx().mk_nil())); - add_argument_attributes(&tys, llfn); - debug!("decl_rust_fn_with_foreign_abi(llfn_ty={}, llfn={})", - ccx.tn().type_to_string(llfn_ty), ccx.tn().val_to_string(llfn)); - llfn -} - -pub fn register_rust_fn_with_foreign_abi(ccx: &CrateContext, - sp: Span, - sym: String, - node_id: ast::NodeId) - -> ValueRef { - let _icx = push_ctxt("foreign::register_foreign_fn"); - - let t = ccx.tcx().node_id_to_type(node_id); - let cconv = match t.sty { - ty::TyBareFn(_, ref fn_ty) => { - llvm_calling_convention(ccx, fn_ty.abi) - } - _ => panic!("expected bare fn in register_rust_fn_with_foreign_abi") - }; - let tys = foreign_types_for_fn_ty(ccx, t); - let llfn_ty = lltype_for_fn_from_foreign_types(ccx, &tys); - let llfn = base::register_fn_llvmty(ccx, sp, sym, node_id, cconv, llfn_ty); - add_argument_attributes(&tys, llfn); - debug!("register_rust_fn_with_foreign_abi(node_id={}, llfn_ty={}, llfn={})", - node_id, ccx.tn().type_to_string(llfn_ty), ccx.tn().val_to_string(llfn)); - llfn -} - -pub fn trans_rust_fn_with_foreign_abi<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - decl: &hir::FnDecl, - body: &hir::Block, - attrs: &[ast::Attribute], - llwrapfn: ValueRef, - param_substs: &'tcx Substs<'tcx>, - id: ast::NodeId, - hash: Option<&str>) { - let _icx = push_ctxt("foreign::build_foreign_fn"); - - let fnty = ccx.tcx().node_id_to_type(id); - let mty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &fnty); - let tys = foreign_types_for_fn_ty(ccx, mty); - - unsafe { // unsafe because we call LLVM operations - // Build up the Rust function (`foo0` above). - let llrustfn = build_rust_fn(ccx, decl, body, param_substs, attrs, id, hash); - - // Build up the foreign wrapper (`foo` above). - return build_wrap_fn(ccx, llrustfn, llwrapfn, &tys, mty); - } - - fn build_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - decl: &hir::FnDecl, - body: &hir::Block, - param_substs: &'tcx Substs<'tcx>, - attrs: &[ast::Attribute], - id: ast::NodeId, - hash: Option<&str>) - -> ValueRef - { - let _icx = push_ctxt("foreign::foreign::build_rust_fn"); - let tcx = ccx.tcx(); - let t = tcx.node_id_to_type(id); - let t = monomorphize::apply_param_substs(tcx, param_substs, &t); - - let path = - tcx.map.def_path_from_id(id) - .into_iter() - .map(|e| e.data.as_interned_str()) - .chain(once(special_idents::clownshoe_abi.name.as_str())); - let ps = link::mangle(path, hash); - - // Compute the type that the function would have if it were just a - // normal Rust function. This will be the type of the wrappee fn. - match t.sty { - ty::TyBareFn(_, ref f) => { - assert!(f.abi != Rust && f.abi != RustIntrinsic && f.abi != PlatformIntrinsic); - } - _ => { - ccx.sess().bug(&format!("build_rust_fn: extern fn {} has ty {:?}, \ - expected a bare fn ty", - ccx.tcx().map.path_to_string(id), - t)); - } - }; - - debug!("build_rust_fn: path={} id={} t={:?}", - ccx.tcx().map.path_to_string(id), - id, t); - - let llfn = declare::define_internal_rust_fn(ccx, &ps, t); - attributes::from_fn_attrs(ccx, attrs, llfn); - base::trans_fn(ccx, decl, body, llfn, param_substs, id, attrs); - llfn - } - - unsafe fn build_wrap_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - llrustfn: ValueRef, - llwrapfn: ValueRef, - tys: &ForeignTypes<'tcx>, - t: Ty<'tcx>) { - let _icx = push_ctxt( - "foreign::trans_rust_fn_with_foreign_abi::build_wrap_fn"); - - debug!("build_wrap_fn(llrustfn={}, llwrapfn={}, t={:?})", - ccx.tn().val_to_string(llrustfn), - ccx.tn().val_to_string(llwrapfn), - t); - - // Avoid all the Rust generation stuff and just generate raw - // LLVM here. - // - // We want to generate code like this: - // - // S foo(T i) { - // S r; - // foo0(&r, NULL, i); - // return r; - // } - - if llvm::LLVMCountBasicBlocks(llwrapfn) != 0 { - ccx.sess().bug("wrapping a function inside non-empty wrapper, most likely cause is \ - multiple functions being wrapped"); - } - - let ptr = "the block\0".as_ptr(); - let the_block = llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llwrapfn, - ptr as *const _); - - let builder = ccx.builder(); - builder.position_at_end(the_block); - - // Array for the arguments we will pass to the rust function. - let mut llrust_args = Vec::new(); - let mut next_foreign_arg_counter: c_uint = 0; - let mut next_foreign_arg = |pad: bool| -> c_uint { - next_foreign_arg_counter += if pad { - 2 - } else { - 1 - }; - next_foreign_arg_counter - 1 - }; - - // If there is an out pointer on the foreign function - let foreign_outptr = { - if tys.fn_ty.ret_ty.is_indirect() { - Some(get_param(llwrapfn, next_foreign_arg(false))) - } else { - None - } - }; - - let rustfn_ty = Type::from_ref(llvm::LLVMTypeOf(llrustfn)).element_type(); - let mut rust_param_tys = rustfn_ty.func_params().into_iter(); - // Push Rust return pointer, using null if it will be unused. - let rust_uses_outptr = match tys.fn_sig.output { - ty::FnConverging(ret_ty) => type_of::return_uses_outptr(ccx, ret_ty), - ty::FnDiverging => false - }; - let return_alloca: Option; - let llrust_ret_ty = if rust_uses_outptr { - rust_param_tys.next().expect("Missing return type!").element_type() - } else { - rustfn_ty.return_type() - }; - if rust_uses_outptr { - // Rust expects to use an outpointer. If the foreign fn - // also uses an outpointer, we can reuse it, but the types - // may vary, so cast first to the Rust type. If the - // foreign fn does NOT use an outpointer, we will have to - // alloca some scratch space on the stack. - match foreign_outptr { - Some(llforeign_outptr) => { - debug!("out pointer, foreign={}", - ccx.tn().val_to_string(llforeign_outptr)); - let llrust_retptr = - builder.bitcast(llforeign_outptr, llrust_ret_ty.ptr_to()); - debug!("out pointer, foreign={} (casted)", - ccx.tn().val_to_string(llrust_retptr)); - llrust_args.push(llrust_retptr); - return_alloca = None; - } - - None => { - let slot = builder.alloca(llrust_ret_ty, "return_alloca"); - debug!("out pointer, \ - allocad={}, \ - llrust_ret_ty={}, \ - return_ty={:?}", - ccx.tn().val_to_string(slot), - ccx.tn().type_to_string(llrust_ret_ty), - tys.fn_sig.output); - llrust_args.push(slot); - return_alloca = Some(slot); - } - } - } else { - // Rust does not expect an outpointer. If the foreign fn - // does use an outpointer, then we will do a store of the - // value that the Rust fn returns. - return_alloca = None; - }; - - // Build up the arguments to the call to the rust function. - // Careful to adapt for cases where the native convention uses - // a pointer and Rust does not or vice versa. - for i in 0..tys.fn_sig.inputs.len() { - let rust_ty = tys.fn_sig.inputs[i]; - let rust_indirect = type_of::arg_is_indirect(ccx, rust_ty); - let llty = rust_param_tys.next().expect("Not enough parameter types!"); - let llrust_ty = if rust_indirect { - llty.element_type() - } else { - llty - }; - let llforeign_arg_ty = tys.fn_ty.arg_tys[i]; - let foreign_indirect = llforeign_arg_ty.is_indirect(); - - if llforeign_arg_ty.is_ignore() { - debug!("skipping ignored arg #{}", i); - llrust_args.push(C_undef(llrust_ty)); - continue; - } - - // skip padding - let foreign_index = next_foreign_arg(llforeign_arg_ty.pad.is_some()); - let mut llforeign_arg = get_param(llwrapfn, foreign_index); - - debug!("llforeign_arg {}{}: {}", "#", - i, ccx.tn().val_to_string(llforeign_arg)); - debug!("rust_indirect = {}, foreign_indirect = {}", - rust_indirect, foreign_indirect); - - // Ensure that the foreign argument is indirect (by - // pointer). It makes adapting types easier, since we can - // always just bitcast pointers. - if !foreign_indirect { - llforeign_arg = if rust_ty.is_bool() { - let lltemp = builder.alloca(Type::bool(ccx), ""); - builder.store(builder.zext(llforeign_arg, Type::bool(ccx)), lltemp); - lltemp - } else { - let lltemp = builder.alloca(val_ty(llforeign_arg), ""); - builder.store(llforeign_arg, lltemp); - lltemp - } - } - - // If the types in the ABI and the Rust types don't match, - // bitcast the llforeign_arg pointer so it matches the types - // Rust expects. - if llforeign_arg_ty.cast.is_some() && !type_is_fat_ptr(ccx.tcx(), rust_ty){ - assert!(!foreign_indirect); - llforeign_arg = builder.bitcast(llforeign_arg, llrust_ty.ptr_to()); - } - - let llrust_arg = if rust_indirect || type_is_fat_ptr(ccx.tcx(), rust_ty) { - llforeign_arg - } else { - if rust_ty.is_bool() { - let tmp = builder.load_range_assert(llforeign_arg, 0, 2, llvm::False); - builder.trunc(tmp, Type::i1(ccx)) - } else if type_of::type_of(ccx, rust_ty).is_aggregate() { - // We want to pass small aggregates as immediate values, but using an aggregate - // LLVM type for this leads to bad optimizations, so its arg type is an - // appropriately sized integer and we have to convert it - let tmp = builder.bitcast(llforeign_arg, - type_of::arg_type_of(ccx, rust_ty).ptr_to()); - let load = builder.load(tmp); - llvm::LLVMSetAlignment(load, type_of::align_of(ccx, rust_ty)); - load - } else { - builder.load(llforeign_arg) - } - }; - - debug!("llrust_arg {}{}: {}", "#", - i, ccx.tn().val_to_string(llrust_arg)); - if type_is_fat_ptr(ccx.tcx(), rust_ty) { - let next_llrust_ty = rust_param_tys.next().expect("Not enough parameter types!"); - llrust_args.push(builder.load(builder.bitcast(builder.struct_gep( - llrust_arg, abi::FAT_PTR_ADDR), llrust_ty.ptr_to()))); - llrust_args.push(builder.load(builder.bitcast(builder.struct_gep( - llrust_arg, abi::FAT_PTR_EXTRA), next_llrust_ty.ptr_to()))); - } else { - llrust_args.push(llrust_arg); - } - } - - // Perform the call itself - debug!("calling llrustfn = {}, t = {:?}", - ccx.tn().val_to_string(llrustfn), t); - let attributes = attributes::from_fn_type(ccx, t); - let llrust_ret_val = builder.call(llrustfn, &llrust_args, Some(attributes)); - - // Get the return value where the foreign fn expects it. - let llforeign_ret_ty = match tys.fn_ty.ret_ty.cast { - Some(ty) => ty, - None => tys.fn_ty.ret_ty.ty - }; - match foreign_outptr { - None if !tys.llsig.ret_def => { - // Function returns `()` or `bot`, which in Rust is the LLVM - // type "{}" but in foreign ABIs is "Void". - builder.ret_void(); - } - - None if rust_uses_outptr => { - // Rust uses an outpointer, but the foreign ABI does not. Load. - let llrust_outptr = return_alloca.unwrap(); - let llforeign_outptr_casted = - builder.bitcast(llrust_outptr, llforeign_ret_ty.ptr_to()); - let llforeign_retval = builder.load(llforeign_outptr_casted); - builder.ret(llforeign_retval); - } - - None if llforeign_ret_ty != llrust_ret_ty => { - // Neither ABI uses an outpointer, but the types don't - // quite match. Must cast. Probably we should try and - // examine the types and use a concrete llvm cast, but - // right now we just use a temp memory location and - // bitcast the pointer, which is the same thing the - // old wrappers used to do. - let lltemp = builder.alloca(llforeign_ret_ty, ""); - let lltemp_casted = builder.bitcast(lltemp, llrust_ret_ty.ptr_to()); - builder.store(llrust_ret_val, lltemp_casted); - let llforeign_retval = builder.load(lltemp); - builder.ret(llforeign_retval); - } - - None => { - // Neither ABI uses an outpointer, and the types - // match. Easy peasy. - builder.ret(llrust_ret_val); - } - - Some(llforeign_outptr) if !rust_uses_outptr => { - // Foreign ABI requires an out pointer, but Rust doesn't. - // Store Rust return value. - let llforeign_outptr_casted = - builder.bitcast(llforeign_outptr, llrust_ret_ty.ptr_to()); - builder.store(llrust_ret_val, llforeign_outptr_casted); - builder.ret_void(); - } - - Some(_) => { - // Both ABIs use outpointers. Easy peasy. - builder.ret_void(); - } - } - } -} - -/////////////////////////////////////////////////////////////////////////// -// General ABI Support -// -// This code is kind of a confused mess and needs to be reworked given -// the massive simplifications that have occurred. - -pub fn link_name(i: &hir::ForeignItem) -> InternedString { - match attr::first_attr_value_str_by_name(&i.attrs, "link_name") { - Some(ln) => ln.clone(), - None => match weak_lang_items::link_name(&i.attrs) { - Some(name) => name, - None => i.name.as_str(), - } - } -} - -/// The ForeignSignature is the LLVM types of the arguments/return type of a function. Note that -/// these LLVM types are not quite the same as the LLVM types would be for a native Rust function -/// because foreign functions just plain ignore modes. They also don't pass aggregate values by -/// pointer like we do. -fn foreign_signature<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - fn_sig: &ty::FnSig<'tcx>, - arg_tys: &[Ty<'tcx>]) - -> LlvmSignature { - let llarg_tys = arg_tys.iter().map(|&arg| foreign_arg_type_of(ccx, arg)).collect(); - let (llret_ty, ret_def) = match fn_sig.output { - ty::FnConverging(ret_ty) => - (type_of::foreign_arg_type_of(ccx, ret_ty), !return_type_is_void(ccx, ret_ty)), - ty::FnDiverging => - (Type::nil(ccx), false) - }; - LlvmSignature { - llarg_tys: llarg_tys, - llret_ty: llret_ty, - ret_def: ret_def - } -} - -fn foreign_types_for_fn_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ty: Ty<'tcx>) -> ForeignTypes<'tcx> { - let fn_sig = match ty.sty { - ty::TyBareFn(_, ref fn_ty) => &fn_ty.sig, - _ => ccx.sess().bug("foreign_types_for_fn_ty called on non-function type") - }; - let fn_sig = ccx.tcx().erase_late_bound_regions(fn_sig); - let fn_sig = infer::normalize_associated_type(ccx.tcx(), &fn_sig); - let llsig = foreign_signature(ccx, &fn_sig, &fn_sig.inputs); - let fn_ty = cabi::compute_abi_info(ccx, - &llsig.llarg_tys, - llsig.llret_ty, - llsig.ret_def); - debug!("foreign_types_for_fn_ty(\ - ty={:?}, \ - llsig={} -> {}, \ - fn_ty={} -> {}, \ - ret_def={}", - ty, - ccx.tn().types_to_str(&llsig.llarg_tys), - ccx.tn().type_to_string(llsig.llret_ty), - ccx.tn().types_to_str(&fn_ty.arg_tys.iter().map(|t| t.ty).collect::>()), - ccx.tn().type_to_string(fn_ty.ret_ty.ty), - llsig.ret_def); - - ForeignTypes { - fn_sig: fn_sig, - llsig: llsig, - fn_ty: fn_ty - } -} - -fn lltype_for_fn_from_foreign_types(ccx: &CrateContext, tys: &ForeignTypes) -> Type { - let mut llargument_tys = Vec::new(); - - let ret_ty = tys.fn_ty.ret_ty; - let llreturn_ty = if ret_ty.is_indirect() { - llargument_tys.push(ret_ty.ty.ptr_to()); - Type::void(ccx) - } else { - match ret_ty.cast { - Some(ty) => ty, - None => ret_ty.ty - } - }; - - for &arg_ty in &tys.fn_ty.arg_tys { - if arg_ty.is_ignore() { - continue; - } - // add padding - match arg_ty.pad { - Some(ty) => llargument_tys.push(ty), - None => () - } - - let llarg_ty = if arg_ty.is_indirect() { - arg_ty.ty.ptr_to() - } else { - match arg_ty.cast { - Some(ty) => ty, - None => arg_ty.ty - } - }; - - llargument_tys.push(llarg_ty); - } - - if tys.fn_sig.variadic { - Type::variadic_func(&llargument_tys, &llreturn_ty) - } else { - Type::func(&llargument_tys[..], &llreturn_ty) - } -} - -pub fn lltype_for_foreign_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ty: Ty<'tcx>) -> Type { - lltype_for_fn_from_foreign_types(ccx, &foreign_types_for_fn_ty(ccx, ty)) -} - -fn add_argument_attributes(tys: &ForeignTypes, - llfn: ValueRef) { - let mut i = if tys.fn_ty.ret_ty.is_indirect() { - 1 - } else { - 0 - }; - - match tys.fn_ty.ret_ty.attr { - Some(attr) => unsafe { - llvm::LLVMAddFunctionAttribute(llfn, i as c_uint, attr.bits() as u64); - }, - None => {} - } - - i += 1; - - for &arg_ty in &tys.fn_ty.arg_tys { - if arg_ty.is_ignore() { - continue; - } - // skip padding - if arg_ty.pad.is_some() { i += 1; } - - match arg_ty.attr { - Some(attr) => unsafe { - llvm::LLVMAddFunctionAttribute(llfn, i as c_uint, attr.bits() as u64); - }, - None => () - } - - i += 1; - } -} diff --git a/src/librustc_trans/trans/glue.rs b/src/librustc_trans/trans/glue.rs deleted file mode 100644 index a1165ffe171d0..0000000000000 --- a/src/librustc_trans/trans/glue.rs +++ /dev/null @@ -1,604 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! -// -// Code relating to drop glue. - -use std; - -use back::link::*; -use llvm; -use llvm::{ValueRef, get_param}; -use middle::lang_items::ExchangeFreeFnLangItem; -use middle::subst::{Substs}; -use middle::traits; -use middle::ty::{self, Ty}; -use trans::adt; -use trans::adt::GetDtorType; // for tcx.dtor_type() -use trans::base::*; -use trans::build::*; -use trans::callee; -use trans::cleanup; -use trans::cleanup::CleanupMethods; -use trans::common::*; -use trans::debuginfo::DebugLoc; -use trans::declare; -use trans::expr; -use trans::machine::*; -use trans::monomorphize; -use trans::type_of::{type_of, sizing_type_of, align_of}; -use trans::type_::Type; - -use arena::TypedArena; -use libc::c_uint; -use syntax::ast; -use syntax::codemap::DUMMY_SP; - -pub fn trans_exchange_free_dyn<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - v: ValueRef, - size: ValueRef, - align: ValueRef, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_exchange_free"); - let ccx = cx.ccx(); - callee::trans_lang_call(cx, - langcall(cx, None, "", ExchangeFreeFnLangItem), - &[PointerCast(cx, v, Type::i8p(ccx)), size, align], - Some(expr::Ignore), - debug_loc).bcx -} - -pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - v: ValueRef, - size: u64, - align: u32, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { - trans_exchange_free_dyn(cx, - v, - C_uint(cx.ccx(), size), - C_uint(cx.ccx(), align), - debug_loc) -} - -pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - ptr: ValueRef, - content_ty: Ty<'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { - assert!(type_is_sized(bcx.ccx().tcx(), content_ty)); - let sizing_type = sizing_type_of(bcx.ccx(), content_ty); - let content_size = llsize_of_alloc(bcx.ccx(), sizing_type); - - // `Box` does not allocate. - if content_size != 0 { - let content_align = align_of(bcx.ccx(), content_ty); - trans_exchange_free(bcx, ptr, content_size, content_align, debug_loc) - } else { - bcx - } -} - -fn type_needs_drop<'tcx>(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> bool { - tcx.type_needs_drop_given_env(ty, &tcx.empty_parameter_environment()) -} - -pub fn get_drop_glue_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>) -> Ty<'tcx> { - let tcx = ccx.tcx(); - // Even if there is no dtor for t, there might be one deeper down and we - // might need to pass in the vtable ptr. - if !type_is_sized(tcx, t) { - return t - } - - // FIXME (#22815): note that type_needs_drop conservatively - // approximates in some cases and may say a type expression - // requires drop glue when it actually does not. - // - // (In this case it is not clear whether any harm is done, i.e. - // erroneously returning `t` in some cases where we could have - // returned `tcx.types.i8` does not appear unsound. The impact on - // code quality is unknown at this time.) - - if !type_needs_drop(&tcx, t) { - return tcx.types.i8; - } - match t.sty { - ty::TyBox(typ) if !type_needs_drop(&tcx, typ) - && type_is_sized(tcx, typ) => { - let llty = sizing_type_of(ccx, typ); - // `Box` does not allocate. - if llsize_of_alloc(ccx, llty) == 0 { - tcx.types.i8 - } else { - t - } - } - _ => t - } -} - -pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - v: ValueRef, - t: Ty<'tcx>, - debug_loc: DebugLoc) -> Block<'blk, 'tcx> { - drop_ty_core(bcx, v, t, debug_loc, false, None) -} - -pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - v: ValueRef, - t: Ty<'tcx>, - debug_loc: DebugLoc, - skip_dtor: bool, - drop_hint: Option) - -> Block<'blk, 'tcx> { - // NB: v is an *alias* of type t here, not a direct value. - debug!("drop_ty_core(t={:?}, skip_dtor={} drop_hint={:?})", t, skip_dtor, drop_hint); - let _icx = push_ctxt("drop_ty"); - let mut bcx = bcx; - if bcx.fcx.type_needs_drop(t) { - let ccx = bcx.ccx(); - let g = if skip_dtor { - DropGlueKind::TyContents(t) - } else { - DropGlueKind::Ty(t) - }; - let glue = get_drop_glue_core(ccx, g); - let glue_type = get_drop_glue_type(ccx, t); - let ptr = if glue_type != t { - PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to()) - } else { - v - }; - - match drop_hint { - Some(drop_hint) => { - let hint_val = load_ty(bcx, drop_hint.value(), bcx.tcx().types.u8); - let moved_val = - C_integral(Type::i8(bcx.ccx()), adt::DTOR_MOVED_HINT as u64, false); - let may_need_drop = - ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None); - bcx = with_cond(bcx, may_need_drop, |cx| { - Call(cx, glue, &[ptr], None, debug_loc); - cx - }) - } - None => { - // No drop-hint ==> call standard drop glue - Call(bcx, glue, &[ptr], None, debug_loc); - } - } - } - bcx -} - -pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - v: ValueRef, - t: Ty<'tcx>, - debug_loc: DebugLoc, - skip_dtor: bool) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("drop_ty_immediate"); - let vp = alloc_ty(bcx, t, ""); - call_lifetime_start(bcx, vp); - store_ty(bcx, v, vp, t); - let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor, None); - call_lifetime_end(bcx, vp); - bcx -} - -pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef { - get_drop_glue_core(ccx, DropGlueKind::Ty(t)) -} - -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] -pub enum DropGlueKind<'tcx> { - /// The normal path; runs the dtor, and then recurs on the contents - Ty(Ty<'tcx>), - /// Skips the dtor, if any, for ty; drops the contents directly. - /// Note that the dtor is only skipped at the most *shallow* - /// level, namely, an `impl Drop for Ty` itself. So, for example, - /// if Ty is Newtype(S) then only the Drop impl for Newtype itself - /// will be skipped, while the Drop impl for S, if any, will be - /// invoked. - TyContents(Ty<'tcx>), -} - -impl<'tcx> DropGlueKind<'tcx> { - fn ty(&self) -> Ty<'tcx> { - match *self { DropGlueKind::Ty(t) | DropGlueKind::TyContents(t) => t } - } - - fn map_ty(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) -> Ty<'tcx> - { - match *self { - DropGlueKind::Ty(t) => DropGlueKind::Ty(f(t)), - DropGlueKind::TyContents(t) => DropGlueKind::TyContents(f(t)), - } - } -} - -fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - g: DropGlueKind<'tcx>) -> ValueRef { - debug!("make drop glue for {:?}", g); - let g = g.map_ty(|t| get_drop_glue_type(ccx, t)); - debug!("drop glue type {:?}", g); - match ccx.drop_glues().borrow().get(&g) { - Some(&glue) => return glue, - _ => { } - } - let t = g.ty(); - - let llty = if type_is_sized(ccx.tcx(), t) { - type_of(ccx, t).ptr_to() - } else { - type_of(ccx, ccx.tcx().mk_box(t)).ptr_to() - }; - - let llfnty = Type::glue_fn(ccx, llty); - - // To avoid infinite recursion, don't `make_drop_glue` until after we've - // added the entry to the `drop_glues` cache. - if let Some(old_sym) = ccx.available_drop_glues().borrow().get(&g) { - let llfn = declare::declare_cfn(ccx, &old_sym, llfnty, ccx.tcx().mk_nil()); - ccx.drop_glues().borrow_mut().insert(g, llfn); - return llfn; - }; - - let fn_nm = mangle_internal_name_by_type_and_seq(ccx, t, "drop"); - let llfn = declare::define_cfn(ccx, &fn_nm, llfnty, ccx.tcx().mk_nil()).unwrap_or_else(||{ - ccx.sess().bug(&format!("symbol `{}` already defined", fn_nm)); - }); - ccx.available_drop_glues().borrow_mut().insert(g, fn_nm); - - let _s = StatRecorder::new(ccx, format!("drop {:?}", t)); - - let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty()); - let (arena, fcx): (TypedArena<_>, FunctionContext); - arena = TypedArena::new(); - fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false, - ty::FnConverging(ccx.tcx().mk_nil()), - empty_substs, None, &arena); - - let bcx = init_function(&fcx, false, ty::FnConverging(ccx.tcx().mk_nil())); - - update_linkage(ccx, llfn, None, OriginalTranslation); - - ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1); - // All glue functions take values passed *by alias*; this is a - // requirement since in many contexts glue is invoked indirectly and - // the caller has no idea if it's dealing with something that can be - // passed by value. - // - // llfn is expected be declared to take a parameter of the appropriate - // type, so we don't need to explicitly cast the function parameter. - - let llrawptr0 = get_param(llfn, fcx.arg_offset() as c_uint); - let bcx = make_drop_glue(bcx, llrawptr0, g); - finish_fn(&fcx, bcx, ty::FnConverging(ccx.tcx().mk_nil()), DebugLoc::None); - - llfn -} - -fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - t: Ty<'tcx>, - struct_data: ValueRef) - -> Block<'blk, 'tcx> { - assert!(type_is_sized(bcx.tcx(), t), "Precondition: caller must ensure t is sized"); - - let repr = adt::represent_type(bcx.ccx(), t); - let drop_flag = unpack_datum!(bcx, adt::trans_drop_flag_ptr(bcx, &*repr, struct_data)); - let loaded = load_ty(bcx, drop_flag.val, bcx.tcx().dtor_type()); - let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type()); - let init_val = C_integral(drop_flag_llty, adt::DTOR_NEEDED as u64, false); - - let bcx = if !bcx.ccx().check_drop_flag_for_sanity() { - bcx - } else { - let drop_flag_llty = type_of(bcx.fcx.ccx, bcx.tcx().dtor_type()); - let done_val = C_integral(drop_flag_llty, adt::DTOR_DONE as u64, false); - let not_init = ICmp(bcx, llvm::IntNE, loaded, init_val, DebugLoc::None); - let not_done = ICmp(bcx, llvm::IntNE, loaded, done_val, DebugLoc::None); - let drop_flag_neither_initialized_nor_cleared = - And(bcx, not_init, not_done, DebugLoc::None); - with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| { - let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap")); - Call(cx, llfn, &[], None, DebugLoc::None); - cx - }) - }; - - let drop_flag_dtor_needed = ICmp(bcx, llvm::IntEQ, loaded, init_val, DebugLoc::None); - with_cond(bcx, drop_flag_dtor_needed, |cx| { - trans_struct_drop(cx, t, struct_data) - }) -} -fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - t: Ty<'tcx>, - v0: ValueRef) - -> Block<'blk, 'tcx> -{ - debug!("trans_struct_drop t: {}", t); - let tcx = bcx.tcx(); - let mut bcx = bcx; - - let def = t.ty_adt_def().unwrap(); - - // Be sure to put the contents into a scope so we can use an invoke - // instruction to call the user destructor but still call the field - // destructors if the user destructor panics. - // - // FIXME (#14875) panic-in-drop semantics might be unsupported; we - // might well consider changing below to more direct code. - let contents_scope = bcx.fcx.push_custom_cleanup_scope(); - - // Issue #23611: schedule cleanup of contents, re-inspecting the - // discriminant (if any) in case of variant swap in drop code. - bcx.fcx.schedule_drop_adt_contents(cleanup::CustomScope(contents_scope), v0, t); - - let (sized_args, unsized_args); - let args: &[ValueRef] = if type_is_sized(tcx, t) { - sized_args = [v0]; - &sized_args - } else { - unsized_args = [Load(bcx, expr::get_dataptr(bcx, v0)), Load(bcx, expr::get_meta(bcx, v0))]; - &unsized_args - }; - - bcx = callee::trans_call_inner(bcx, DebugLoc::None, |bcx, _| { - let trait_ref = ty::Binder(ty::TraitRef { - def_id: tcx.lang_items.drop_trait().unwrap(), - substs: tcx.mk_substs(Substs::trans_empty().with_self_ty(t)) - }); - let vtbl = match fulfill_obligation(bcx.ccx(), DUMMY_SP, trait_ref) { - traits::VtableImpl(data) => data, - _ => tcx.sess.bug(&format!("dtor for {:?} is not an impl???", t)) - }; - let dtor_did = def.destructor().unwrap(); - let datum = callee::trans_fn_ref_with_substs(bcx.ccx(), - dtor_did, - ExprId(0), - bcx.fcx.param_substs, - vtbl.substs); - callee::Callee { - bcx: bcx, - data: callee::Fn(datum.val), - ty: datum.ty - } - }, callee::ArgVals(args), Some(expr::Ignore)).bcx; - - bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope) -} - -pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, info: ValueRef) - -> (ValueRef, ValueRef) { - debug!("calculate size of DST: {}; with lost info: {}", - t, bcx.val_to_string(info)); - if type_is_sized(bcx.tcx(), t) { - let sizing_type = sizing_type_of(bcx.ccx(), t); - let size = llsize_of_alloc(bcx.ccx(), sizing_type); - let align = align_of(bcx.ccx(), t); - debug!("size_and_align_of_dst t={} info={} size: {} align: {}", - t, bcx.val_to_string(info), size, align); - let size = C_uint(bcx.ccx(), size); - let align = C_uint(bcx.ccx(), align); - return (size, align); - } - match t.sty { - ty::TyStruct(def, substs) => { - let ccx = bcx.ccx(); - // First get the size of all statically known fields. - // Don't use type_of::sizing_type_of because that expects t to be sized. - assert!(!t.is_simd()); - let repr = adt::represent_type(ccx, t); - let sizing_type = adt::sizing_type_context_of(ccx, &*repr, true); - debug!("DST {} sizing_type: {}", t, sizing_type.to_string()); - let sized_size = llsize_of_alloc(ccx, sizing_type.prefix()); - let sized_align = llalign_of_min(ccx, sizing_type.prefix()); - debug!("DST {} statically sized prefix size: {} align: {}", - t, sized_size, sized_align); - let sized_size = C_uint(ccx, sized_size); - let sized_align = C_uint(ccx, sized_align); - - // Recurse to get the size of the dynamically sized field (must be - // the last field). - let last_field = def.struct_variant().fields.last().unwrap(); - let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field); - let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info); - - let dbloc = DebugLoc::None; - - // FIXME (#26403, #27023): We should be adding padding - // to `sized_size` (to accommodate the `unsized_align` - // required of the unsized field that follows) before - // summing it with `sized_size`. (Note that since #26403 - // is unfixed, we do not yet add the necessary padding - // here. But this is where the add would go.) - - // Return the sum of sizes and max of aligns. - let mut size = Add(bcx, sized_size, unsized_size, dbloc); - - // Issue #27023: If there is a drop flag, *now* we add 1 - // to the size. (We can do this without adding any - // padding because drop flags do not have any alignment - // constraints.) - if sizing_type.needs_drop_flag() { - size = Add(bcx, size, C_uint(bcx.ccx(), 1_u64), dbloc); - } - - // Choose max of two known alignments (combined value must - // be aligned according to more restrictive of the two). - let align = match (const_to_opt_uint(sized_align), const_to_opt_uint(unsized_align)) { - (Some(sized_align), Some(unsized_align)) => { - // If both alignments are constant, (the sized_align should always be), then - // pick the correct alignment statically. - C_uint(ccx, std::cmp::max(sized_align, unsized_align)) - } - _ => Select(bcx, - ICmp(bcx, - llvm::IntUGT, - sized_align, - unsized_align, - dbloc), - sized_align, - unsized_align) - }; - - // Issue #27023: must add any necessary padding to `size` - // (to make it a multiple of `align`) before returning it. - // - // Namely, the returned size should be, in C notation: - // - // `size + ((size & (align-1)) ? align : 0)` - // - // emulated via the semi-standard fast bit trick: - // - // `(size + (align-1)) & -align` - - let addend = Sub(bcx, align, C_uint(bcx.ccx(), 1_u64), dbloc); - let size = And( - bcx, Add(bcx, size, addend, dbloc), Neg(bcx, align, dbloc), dbloc); - - (size, align) - } - ty::TyTrait(..) => { - // info points to the vtable and the second entry in the vtable is the - // dynamic size of the object. - let info = PointerCast(bcx, info, Type::int(bcx.ccx()).ptr_to()); - let size_ptr = GEPi(bcx, info, &[1]); - let align_ptr = GEPi(bcx, info, &[2]); - (Load(bcx, size_ptr), Load(bcx, align_ptr)) - } - ty::TySlice(_) | ty::TyStr => { - let unit_ty = t.sequence_element_type(bcx.tcx()); - // The info in this case is the length of the str, so the size is that - // times the unit size. - let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty); - let unit_align = llalign_of_min(bcx.ccx(), llunit_ty); - let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty); - (Mul(bcx, info, C_uint(bcx.ccx(), unit_size), DebugLoc::None), - C_uint(bcx.ccx(), unit_align)) - } - _ => bcx.sess().bug(&format!("Unexpected unsized type, found {}", t)) - } -} - -fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueKind<'tcx>) - -> Block<'blk, 'tcx> { - let t = g.ty(); - let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true }; - // NB: v0 is an *alias* of type t here, not a direct value. - let _icx = push_ctxt("make_drop_glue"); - - // Only drop the value when it ... well, we used to check for - // non-null, (and maybe we need to continue doing so), but we now - // must definitely check for special bit-patterns corresponding to - // the special dtor markings. - - let inttype = Type::int(bcx.ccx()); - let dropped_pattern = C_integral(inttype, adt::dtor_done_usize(bcx.fcx.ccx) as u64, false); - - match t.sty { - ty::TyBox(content_ty) => { - // Support for TyBox is built-in and its drop glue is - // special. It may move to library and have Drop impl. As - // a safe-guard, assert TyBox not used with TyContents. - assert!(!skip_dtor); - if !type_is_sized(bcx.tcx(), content_ty) { - let llval = expr::get_dataptr(bcx, v0); - let llbox = Load(bcx, llval); - let llbox_as_usize = PtrToInt(bcx, llbox, Type::int(bcx.ccx())); - let drop_flag_not_dropped_already = - ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None); - with_cond(bcx, drop_flag_not_dropped_already, |bcx| { - let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None); - let info = expr::get_meta(bcx, v0); - let info = Load(bcx, info); - let (llsize, llalign) = size_and_align_of_dst(bcx, content_ty, info); - - // `Box` does not allocate. - let needs_free = ICmp(bcx, - llvm::IntNE, - llsize, - C_uint(bcx.ccx(), 0u64), - DebugLoc::None); - with_cond(bcx, needs_free, |bcx| { - trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None) - }) - }) - } else { - let llval = v0; - let llbox = Load(bcx, llval); - let llbox_as_usize = PtrToInt(bcx, llbox, inttype); - let drop_flag_not_dropped_already = - ICmp(bcx, llvm::IntNE, llbox_as_usize, dropped_pattern, DebugLoc::None); - with_cond(bcx, drop_flag_not_dropped_already, |bcx| { - let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None); - trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None) - }) - } - } - ty::TyStruct(def, _) | ty::TyEnum(def, _) => { - match (def.dtor_kind(), skip_dtor) { - (ty::TraitDtor(true), false) => { - // FIXME(16758) Since the struct is unsized, it is hard to - // find the drop flag (which is at the end of the struct). - // Lets just ignore the flag and pretend everything will be - // OK. - if type_is_sized(bcx.tcx(), t) { - trans_struct_drop_flag(bcx, t, v0) - } else { - // Give the user a heads up that we are doing something - // stupid and dangerous. - bcx.sess().warn(&format!("Ignoring drop flag in destructor for {}\ - because the struct is unsized. See issue\ - #16758", t)); - trans_struct_drop(bcx, t, v0) - } - } - (ty::TraitDtor(false), false) => { - trans_struct_drop(bcx, t, v0) - } - (ty::NoDtor, _) | (_, true) => { - // No dtor? Just the default case - iter_structural_ty(bcx, v0, t, |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None)) - } - } - } - ty::TyTrait(..) => { - // No support in vtable for distinguishing destroying with - // versus without calling Drop::drop. Assert caller is - // okay with always calling the Drop impl, if any. - assert!(!skip_dtor); - let data_ptr = expr::get_dataptr(bcx, v0); - let vtable_ptr = Load(bcx, expr::get_meta(bcx, v0)); - let dtor = Load(bcx, vtable_ptr); - Call(bcx, - dtor, - &[PointerCast(bcx, Load(bcx, data_ptr), Type::i8p(bcx.ccx()))], - None, - DebugLoc::None); - bcx - } - _ => { - if bcx.fcx.type_needs_drop(t) { - iter_structural_ty(bcx, - v0, - t, - |bb, vv, tt| drop_ty(bb, vv, tt, DebugLoc::None)) - } else { - bcx - } - } - } -} diff --git a/src/librustc_trans/trans/inline.rs b/src/librustc_trans/trans/inline.rs deleted file mode 100644 index baf244c2e7960..0000000000000 --- a/src/librustc_trans/trans/inline.rs +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm::{AvailableExternallyLinkage, InternalLinkage, SetLinkage}; -use middle::cstore::{CrateStore, FoundAst, InlinedItem}; -use middle::def_id::DefId; -use middle::subst::Substs; -use trans::base::{push_ctxt, trans_item, get_item_val, trans_fn}; -use trans::common::*; - -use rustc::dep_graph::DepNode; -use rustc_front::hir; - -fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option { - debug!("instantiate_inline({:?})", fn_id); - let _icx = push_ctxt("instantiate_inline"); - let _task = ccx.tcx().dep_graph.in_task(DepNode::TransInlinedItem(fn_id)); - - match ccx.external().borrow().get(&fn_id) { - Some(&Some(node_id)) => { - // Already inline - debug!("instantiate_inline({}): already inline as node id {}", - ccx.tcx().item_path_str(fn_id), node_id); - let node_def_id = ccx.tcx().map.local_def_id(node_id); - return Some(node_def_id); - } - Some(&None) => { - return None; // Not inlinable - } - None => { - // Not seen yet - } - } - - let inlined = ccx.tcx().sess.cstore.maybe_get_item_ast(ccx.tcx(), fn_id); - let inline_id = match inlined { - FoundAst::NotFound => { - ccx.external().borrow_mut().insert(fn_id, None); - return None; - } - FoundAst::Found(&InlinedItem::Item(ref item)) => { - ccx.external().borrow_mut().insert(fn_id, Some(item.id)); - ccx.external_srcs().borrow_mut().insert(item.id, fn_id); - - ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1); - trans_item(ccx, item); - - let linkage = match item.node { - hir::ItemFn(_, _, _, _, ref generics, _) => { - if generics.is_type_parameterized() { - // Generics have no symbol, so they can't be given any - // linkage. - None - } else { - if ccx.sess().opts.cg.codegen_units == 1 { - // We could use AvailableExternallyLinkage here, - // but InternalLinkage allows LLVM to optimize more - // aggressively (at the cost of sometimes - // duplicating code). - Some(InternalLinkage) - } else { - // With multiple compilation units, duplicated code - // is more of a problem. Also, `codegen_units > 1` - // means the user is okay with losing some - // performance. - Some(AvailableExternallyLinkage) - } - } - } - hir::ItemConst(..) => None, - _ => unreachable!(), - }; - - match linkage { - Some(linkage) => { - let g = get_item_val(ccx, item.id); - SetLinkage(g, linkage); - } - None => {} - } - - item.id - } - FoundAst::Found(&InlinedItem::Foreign(ref item)) => { - ccx.external().borrow_mut().insert(fn_id, Some(item.id)); - ccx.external_srcs().borrow_mut().insert(item.id, fn_id); - item.id - } - FoundAst::FoundParent(parent_id, &InlinedItem::Item(ref item)) => { - ccx.external().borrow_mut().insert(parent_id, Some(item.id)); - ccx.external_srcs().borrow_mut().insert(item.id, parent_id); - - let mut my_id = 0; - match item.node { - hir::ItemEnum(ref ast_def, _) => { - let ast_vs = &ast_def.variants; - let ty_vs = &ccx.tcx().lookup_adt_def(parent_id).variants; - assert_eq!(ast_vs.len(), ty_vs.len()); - for (ast_v, ty_v) in ast_vs.iter().zip(ty_vs.iter()) { - if ty_v.did == fn_id { my_id = ast_v.node.data.id(); } - ccx.external().borrow_mut().insert(ty_v.did, Some(ast_v.node.data.id())); - } - } - hir::ItemStruct(ref struct_def, _) => { - if struct_def.is_struct() { - ccx.sess().bug("instantiate_inline: called on a \ - non-tuple struct") - } else { - ccx.external().borrow_mut().insert(fn_id, Some(struct_def.id())); - my_id = struct_def.id(); - } - } - _ => ccx.sess().bug("instantiate_inline: item has a \ - non-enum, non-struct parent") - } - trans_item(ccx, &**item); - my_id - } - FoundAst::FoundParent(_, _) => { - ccx.sess().bug("maybe_get_item_ast returned a FoundParent \ - with a non-item parent"); - } - FoundAst::Found(&InlinedItem::TraitItem(_, ref trait_item)) => { - ccx.external().borrow_mut().insert(fn_id, Some(trait_item.id)); - ccx.external_srcs().borrow_mut().insert(trait_item.id, fn_id); - - ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1); - - // Associated consts already have to be evaluated in `typeck`, so - // the logic to do that already exists in `middle`. In order to - // reuse that code, it needs to be able to look up the traits for - // inlined items. - let ty_trait_item = ccx.tcx().impl_or_trait_item(fn_id).clone(); - let trait_item_def_id = ccx.tcx().map.local_def_id(trait_item.id); - ccx.tcx().impl_or_trait_items.borrow_mut() - .insert(trait_item_def_id, ty_trait_item); - - // If this is a default method, we can't look up the - // impl type. But we aren't going to translate anyways, so - // don't. - trait_item.id - } - FoundAst::Found(&InlinedItem::ImplItem(impl_did, ref impl_item)) => { - ccx.external().borrow_mut().insert(fn_id, Some(impl_item.id)); - ccx.external_srcs().borrow_mut().insert(impl_item.id, fn_id); - - ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1); - - // Translate monomorphic impl methods immediately. - if let hir::ImplItemKind::Method(ref sig, ref body) = impl_item.node { - let impl_tpt = ccx.tcx().lookup_item_type(impl_did); - if impl_tpt.generics.types.is_empty() && - sig.generics.ty_params.is_empty() { - let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty()); - let llfn = get_item_val(ccx, impl_item.id); - trans_fn(ccx, - &sig.decl, - body, - llfn, - empty_substs, - impl_item.id, - &impl_item.attrs); - // See linkage comments on items. - if ccx.sess().opts.cg.codegen_units == 1 { - SetLinkage(llfn, InternalLinkage); - } else { - SetLinkage(llfn, AvailableExternallyLinkage); - } - } - } - - impl_item.id - } - }; - - let inline_def_id = ccx.tcx().map.local_def_id(inline_id); - Some(inline_def_id) -} - -pub fn get_local_instance(ccx: &CrateContext, fn_id: DefId) - -> Option { - if let Some(_) = ccx.tcx().map.as_local_node_id(fn_id) { - Some(fn_id) - } else { - instantiate_inline(ccx, fn_id) - } -} - -pub fn maybe_instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> DefId { - get_local_instance(ccx, fn_id).unwrap_or(fn_id) -} diff --git a/src/librustc_trans/trans/intrinsic.rs b/src/librustc_trans/trans/intrinsic.rs deleted file mode 100644 index 0c9b076cb650a..0000000000000 --- a/src/librustc_trans/trans/intrinsic.rs +++ /dev/null @@ -1,1590 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(non_upper_case_globals)] - -use arena::TypedArena; -use intrinsics::{self, Intrinsic}; -use libc; -use llvm; -use llvm::{ValueRef, TypeKind}; -use middle::infer; -use middle::subst; -use middle::subst::FnSpace; -use trans::adt; -use trans::attributes; -use trans::base::*; -use trans::build::*; -use trans::callee; -use trans::cleanup; -use trans::cleanup::CleanupMethods; -use trans::common::*; -use trans::consts; -use trans::datum::*; -use trans::debuginfo::DebugLoc; -use trans::declare; -use trans::expr; -use trans::glue; -use trans::type_of; -use trans::machine; -use trans::type_::Type; -use middle::ty::{self, Ty, TypeFoldable}; -use trans::Disr; -use middle::subst::Substs; -use rustc::dep_graph::DepNode; -use rustc_front::hir; -use syntax::abi::{self, RustIntrinsic}; -use syntax::ast; -use syntax::ptr::P; -use syntax::parse::token; - -use rustc::session::Session; -use syntax::codemap::Span; - -use std::cmp::Ordering; - -pub fn get_simple_intrinsic(ccx: &CrateContext, item: &hir::ForeignItem) -> Option { - let name = match &*item.name.as_str() { - "sqrtf32" => "llvm.sqrt.f32", - "sqrtf64" => "llvm.sqrt.f64", - "powif32" => "llvm.powi.f32", - "powif64" => "llvm.powi.f64", - "sinf32" => "llvm.sin.f32", - "sinf64" => "llvm.sin.f64", - "cosf32" => "llvm.cos.f32", - "cosf64" => "llvm.cos.f64", - "powf32" => "llvm.pow.f32", - "powf64" => "llvm.pow.f64", - "expf32" => "llvm.exp.f32", - "expf64" => "llvm.exp.f64", - "exp2f32" => "llvm.exp2.f32", - "exp2f64" => "llvm.exp2.f64", - "logf32" => "llvm.log.f32", - "logf64" => "llvm.log.f64", - "log10f32" => "llvm.log10.f32", - "log10f64" => "llvm.log10.f64", - "log2f32" => "llvm.log2.f32", - "log2f64" => "llvm.log2.f64", - "fmaf32" => "llvm.fma.f32", - "fmaf64" => "llvm.fma.f64", - "fabsf32" => "llvm.fabs.f32", - "fabsf64" => "llvm.fabs.f64", - "copysignf32" => "llvm.copysign.f32", - "copysignf64" => "llvm.copysign.f64", - "floorf32" => "llvm.floor.f32", - "floorf64" => "llvm.floor.f64", - "ceilf32" => "llvm.ceil.f32", - "ceilf64" => "llvm.ceil.f64", - "truncf32" => "llvm.trunc.f32", - "truncf64" => "llvm.trunc.f64", - "rintf32" => "llvm.rint.f32", - "rintf64" => "llvm.rint.f64", - "nearbyintf32" => "llvm.nearbyint.f32", - "nearbyintf64" => "llvm.nearbyint.f64", - "roundf32" => "llvm.round.f32", - "roundf64" => "llvm.round.f64", - "assume" => "llvm.assume", - _ => return None - }; - Some(ccx.get_intrinsic(&name)) -} - -pub fn span_transmute_size_error(a: &Session, b: Span, msg: &str) { - span_err!(a, b, E0512, "{}", msg); -} - -/// Performs late verification that intrinsics are used correctly. At present, -/// the only intrinsic that needs such verification is `transmute`. -pub fn check_intrinsics(ccx: &CrateContext) { - let _task = ccx.tcx().dep_graph.in_task(DepNode::IntrinsicUseCheck); - let mut last_failing_id = None; - for transmute_restriction in ccx.tcx().transmute_restrictions.borrow().iter() { - // Sometimes, a single call to transmute will push multiple - // type pairs to test in order to exhaustively test the - // possibility around a type parameter. If one of those fails, - // there is no sense reporting errors on the others. - if last_failing_id == Some(transmute_restriction.id) { - continue; - } - - debug!("transmute_restriction: {:?}", transmute_restriction); - - assert!(!transmute_restriction.substituted_from.has_param_types()); - assert!(!transmute_restriction.substituted_to.has_param_types()); - - let llfromtype = type_of::sizing_type_of(ccx, - transmute_restriction.substituted_from); - let lltotype = type_of::sizing_type_of(ccx, - transmute_restriction.substituted_to); - let from_type_size = machine::llbitsize_of_real(ccx, llfromtype); - let to_type_size = machine::llbitsize_of_real(ccx, lltotype); - if from_type_size != to_type_size { - last_failing_id = Some(transmute_restriction.id); - - if transmute_restriction.original_from != transmute_restriction.substituted_from { - span_transmute_size_error(ccx.sess(), transmute_restriction.span, - &format!("transmute called with differently sized types: \ - {} (could be {} bit{}) to {} (could be {} bit{})", - transmute_restriction.original_from, - from_type_size as usize, - if from_type_size == 1 {""} else {"s"}, - transmute_restriction.original_to, - to_type_size as usize, - if to_type_size == 1 {""} else {"s"})); - } else { - span_transmute_size_error(ccx.sess(), transmute_restriction.span, - &format!("transmute called with differently sized types: \ - {} ({} bit{}) to {} ({} bit{})", - transmute_restriction.original_from, - from_type_size as usize, - if from_type_size == 1 {""} else {"s"}, - transmute_restriction.original_to, - to_type_size as usize, - if to_type_size == 1 {""} else {"s"})); - } - } - } - ccx.sess().abort_if_errors(); -} - -/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, -/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, -/// add them to librustc_trans/trans/context.rs -pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - node: ast::NodeId, - callee_ty: Ty<'tcx>, - cleanup_scope: cleanup::CustomScopeIndex, - args: callee::CallArgs<'a, 'tcx>, - dest: expr::Dest, - substs: subst::Substs<'tcx>, - call_info: NodeIdAndSpan) - -> Result<'blk, 'tcx> { - let fcx = bcx.fcx; - let ccx = fcx.ccx; - let tcx = bcx.tcx(); - - let _icx = push_ctxt("trans_intrinsic_call"); - - let sig = ccx.tcx().erase_late_bound_regions(callee_ty.fn_sig()); - let sig = infer::normalize_associated_type(ccx.tcx(), &sig); - let arg_tys = sig.inputs; - let ret_ty = sig.output; - let foreign_item = tcx.map.expect_foreign_item(node); - let name = foreign_item.name.as_str(); - - // For `transmute` we can just trans the input expr directly into dest - if name == "transmute" { - let llret_ty = type_of::type_of(ccx, ret_ty.unwrap()); - match args { - callee::ArgExprs(arg_exprs) => { - assert_eq!(arg_exprs.len(), 1); - - let (in_type, out_type) = (*substs.types.get(FnSpace, 0), - *substs.types.get(FnSpace, 1)); - let llintype = type_of::type_of(ccx, in_type); - let llouttype = type_of::type_of(ccx, out_type); - - let in_type_size = machine::llbitsize_of_real(ccx, llintype); - let out_type_size = machine::llbitsize_of_real(ccx, llouttype); - - // This should be caught by the intrinsicck pass - assert_eq!(in_type_size, out_type_size); - - let nonpointer_nonaggregate = |llkind: TypeKind| -> bool { - use llvm::TypeKind::*; - match llkind { - Half | Float | Double | X86_FP80 | FP128 | - PPC_FP128 | Integer | Vector | X86_MMX => true, - _ => false - } - }; - - // An approximation to which types can be directly cast via - // LLVM's bitcast. This doesn't cover pointer -> pointer casts, - // but does, importantly, cover SIMD types. - let in_kind = llintype.kind(); - let ret_kind = llret_ty.kind(); - let bitcast_compatible = - (nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || { - in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer - }; - - let dest = if bitcast_compatible { - // if we're here, the type is scalar-like (a primitive, a - // SIMD type or a pointer), and so can be handled as a - // by-value ValueRef and can also be directly bitcast to the - // target type. Doing this special case makes conversions - // like `u32x4` -> `u64x2` much nicer for LLVM and so more - // efficient (these are done efficiently implicitly in C - // with the `__m128i` type and so this means Rust doesn't - // lose out there). - let expr = &*arg_exprs[0]; - let datum = unpack_datum!(bcx, expr::trans(bcx, expr)); - let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp")); - let val = if datum.kind.is_by_ref() { - load_ty(bcx, datum.val, datum.ty) - } else { - from_arg_ty(bcx, datum.val, datum.ty) - }; - - let cast_val = BitCast(bcx, val, llret_ty); - - match dest { - expr::SaveIn(d) => { - // this often occurs in a sequence like `Store(val, - // d); val2 = Load(d)`, so disappears easily. - Store(bcx, cast_val, d); - } - expr::Ignore => {} - } - dest - } else { - // The types are too complicated to do with a by-value - // bitcast, so pointer cast instead. We need to cast the - // dest so the types work out. - let dest = match dest { - expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())), - expr::Ignore => expr::Ignore - }; - bcx = expr::trans_into(bcx, &*arg_exprs[0], dest); - dest - }; - - fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); - - return match dest { - expr::SaveIn(d) => Result::new(bcx, d), - expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to())) - }; - - } - - _ => { - ccx.sess().bug("expected expr as argument for transmute"); - } - } - } - - // For `move_val_init` we can evaluate the destination address - // (the first argument) and then trans the source value (the - // second argument) directly into the resulting destination - // address. - if name == "move_val_init" { - if let callee::ArgExprs(ref exprs) = args { - let (dest_expr, source_expr) = if exprs.len() != 2 { - ccx.sess().bug("expected two exprs as arguments for `move_val_init` intrinsic"); - } else { - (&exprs[0], &exprs[1]) - }; - - // evaluate destination address - let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr)); - let dest_datum = unpack_datum!( - bcx, dest_datum.to_rvalue_datum(bcx, "arg")); - let dest_datum = unpack_datum!( - bcx, dest_datum.to_appropriate_datum(bcx)); - - // `expr::trans_into(bcx, expr, dest)` is equiv to - // - // `trans(bcx, expr).store_to_dest(dest)`, - // - // which for `dest == expr::SaveIn(addr)`, is equivalent to: - // - // `trans(bcx, expr).store_to(bcx, addr)`. - let lldest = expr::Dest::SaveIn(dest_datum.val); - bcx = expr::trans_into(bcx, source_expr, lldest); - - let llresult = C_nil(ccx); - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); - - return Result::new(bcx, llresult); - } else { - ccx.sess().bug("expected two exprs as arguments for `move_val_init` intrinsic"); - } - } - - let call_debug_location = DebugLoc::At(call_info.id, call_info.span); - - // For `try` we need some custom control flow - if &name[..] == "try" { - if let callee::ArgExprs(ref exprs) = args { - let (func, data) = if exprs.len() != 2 { - ccx.sess().bug("expected two exprs as arguments for \ - `try` intrinsic"); - } else { - (&exprs[0], &exprs[1]) - }; - - // translate arguments - let func = unpack_datum!(bcx, expr::trans(bcx, func)); - let func = unpack_datum!(bcx, func.to_rvalue_datum(bcx, "func")); - let data = unpack_datum!(bcx, expr::trans(bcx, data)); - let data = unpack_datum!(bcx, data.to_rvalue_datum(bcx, "data")); - - let dest = match dest { - expr::SaveIn(d) => d, - expr::Ignore => alloc_ty(bcx, tcx.mk_mut_ptr(tcx.types.i8), - "try_result"), - }; - - // do the invoke - bcx = try_intrinsic(bcx, func.val, data.val, dest, - call_debug_location); - - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); - return Result::new(bcx, dest); - } else { - ccx.sess().bug("expected two exprs as arguments for \ - `try` intrinsic"); - } - } - - // save the actual AST arguments for later (some places need to do - // const-evaluation on them) - let expr_arguments = match args { - callee::ArgExprs(args) => Some(args), - _ => None, - }; - - // Push the arguments. - let mut llargs = Vec::new(); - bcx = callee::trans_args(bcx, - args, - callee_ty, - &mut llargs, - cleanup::CustomScope(cleanup_scope), - false, - RustIntrinsic); - - fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); - - // These are the only intrinsic functions that diverge. - if name == "abort" { - let llfn = ccx.get_intrinsic(&("llvm.trap")); - Call(bcx, llfn, &[], None, call_debug_location); - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); - Unreachable(bcx); - return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to())); - } else if &name[..] == "unreachable" { - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); - Unreachable(bcx); - return Result::new(bcx, C_nil(ccx)); - } - - let ret_ty = match ret_ty { - ty::FnConverging(ret_ty) => ret_ty, - ty::FnDiverging => unreachable!() - }; - - let llret_ty = type_of::type_of(ccx, ret_ty); - - // Get location to store the result. If the user does - // not care about the result, just make a stack slot - let llresult = match dest { - expr::SaveIn(d) => d, - expr::Ignore => { - if !type_is_zero_size(ccx, ret_ty) { - let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result"); - call_lifetime_start(bcx, llresult); - llresult - } else { - C_undef(llret_ty.ptr_to()) - } - } - }; - - let simple = get_simple_intrinsic(ccx, &*foreign_item); - let llval = match (simple, &*name) { - (Some(llfn), _) => { - Call(bcx, llfn, &llargs, None, call_debug_location) - } - (_, "breakpoint") => { - let llfn = ccx.get_intrinsic(&("llvm.debugtrap")); - Call(bcx, llfn, &[], None, call_debug_location) - } - (_, "size_of") => { - let tp_ty = *substs.types.get(FnSpace, 0); - let lltp_ty = type_of::type_of(ccx, tp_ty); - C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) - } - (_, "size_of_val") => { - let tp_ty = *substs.types.get(FnSpace, 0); - if !type_is_sized(tcx, tp_ty) { - let (llsize, _) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); - llsize - } else { - let lltp_ty = type_of::type_of(ccx, tp_ty); - C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) - } - } - (_, "min_align_of") => { - let tp_ty = *substs.types.get(FnSpace, 0); - C_uint(ccx, type_of::align_of(ccx, tp_ty)) - } - (_, "min_align_of_val") => { - let tp_ty = *substs.types.get(FnSpace, 0); - if !type_is_sized(tcx, tp_ty) { - let (_, llalign) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); - llalign - } else { - C_uint(ccx, type_of::align_of(ccx, tp_ty)) - } - } - (_, "pref_align_of") => { - let tp_ty = *substs.types.get(FnSpace, 0); - let lltp_ty = type_of::type_of(ccx, tp_ty); - C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty)) - } - (_, "drop_in_place") => { - let tp_ty = *substs.types.get(FnSpace, 0); - let ptr = if type_is_sized(tcx, tp_ty) { - llargs[0] - } else { - let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp"); - Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val)); - Store(bcx, llargs[1], expr::get_meta(bcx, scratch.val)); - fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val); - scratch.val - }; - glue::drop_ty(bcx, ptr, tp_ty, call_debug_location); - C_nil(ccx) - } - (_, "type_name") => { - let tp_ty = *substs.types.get(FnSpace, 0); - let ty_name = token::intern_and_get_ident(&tp_ty.to_string()); - C_str_slice(ccx, ty_name) - } - (_, "type_id") => { - let hash = ccx.tcx().hash_crate_independent(*substs.types.get(FnSpace, 0), - &ccx.link_meta().crate_hash); - C_u64(ccx, hash) - } - (_, "init_dropped") => { - let tp_ty = *substs.types.get(FnSpace, 0); - if !return_type_is_void(ccx, tp_ty) { - drop_done_fill_mem(bcx, llresult, tp_ty); - } - C_nil(ccx) - } - (_, "init") => { - let tp_ty = *substs.types.get(FnSpace, 0); - if !return_type_is_void(ccx, tp_ty) { - // Just zero out the stack slot. (See comment on base::memzero for explanation) - init_zero_mem(bcx, llresult, tp_ty); - } - C_nil(ccx) - } - // Effectively no-ops - (_, "uninit") | (_, "forget") => { - C_nil(ccx) - } - (_, "needs_drop") => { - let tp_ty = *substs.types.get(FnSpace, 0); - - C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty)) - } - (_, "offset") => { - let ptr = llargs[0]; - let offset = llargs[1]; - InBoundsGEP(bcx, ptr, &[offset]) - } - (_, "arith_offset") => { - let ptr = llargs[0]; - let offset = llargs[1]; - GEP(bcx, ptr, &[offset]) - } - - (_, "copy_nonoverlapping") => { - copy_intrinsic(bcx, - false, - false, - *substs.types.get(FnSpace, 0), - llargs[1], - llargs[0], - llargs[2], - call_debug_location) - } - (_, "copy") => { - copy_intrinsic(bcx, - true, - false, - *substs.types.get(FnSpace, 0), - llargs[1], - llargs[0], - llargs[2], - call_debug_location) - } - (_, "write_bytes") => { - memset_intrinsic(bcx, - false, - *substs.types.get(FnSpace, 0), - llargs[0], - llargs[1], - llargs[2], - call_debug_location) - } - - (_, "volatile_copy_nonoverlapping_memory") => { - copy_intrinsic(bcx, - false, - true, - *substs.types.get(FnSpace, 0), - llargs[0], - llargs[1], - llargs[2], - call_debug_location) - } - (_, "volatile_copy_memory") => { - copy_intrinsic(bcx, - true, - true, - *substs.types.get(FnSpace, 0), - llargs[0], - llargs[1], - llargs[2], - call_debug_location) - } - (_, "volatile_set_memory") => { - memset_intrinsic(bcx, - true, - *substs.types.get(FnSpace, 0), - llargs[0], - llargs[1], - llargs[2], - call_debug_location) - } - (_, "volatile_load") => { - let tp_ty = *substs.types.get(FnSpace, 0); - let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); - let load = VolatileLoad(bcx, ptr); - unsafe { - llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty)); - } - to_arg_ty(bcx, load, tp_ty) - }, - (_, "volatile_store") => { - let tp_ty = *substs.types.get(FnSpace, 0); - let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); - let val = from_arg_ty(bcx, llargs[1], tp_ty); - let store = VolatileStore(bcx, val, ptr); - unsafe { - llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty)); - } - C_nil(ccx) - }, - - (_, "ctlz") | (_, "cttz") | (_, "ctpop") | (_, "bswap") | - (_, "add_with_overflow") | (_, "sub_with_overflow") | (_, "mul_with_overflow") | - (_, "overflowing_add") | (_, "overflowing_sub") | (_, "overflowing_mul") | - (_, "unchecked_div") | (_, "unchecked_rem") => { - let sty = &arg_tys[0].sty; - match int_type_width_signed(sty, ccx) { - Some((width, signed)) => - match &*name { - "ctlz" => count_zeros_intrinsic(bcx, &format!("llvm.ctlz.i{}", width), - llargs[0], call_debug_location), - "cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width), - llargs[0], call_debug_location), - "ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), - &llargs, None, call_debug_location), - "bswap" => { - if width == 8 { - llargs[0] // byte swap a u8/i8 is just a no-op - } else { - Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)), - &llargs, None, call_debug_location) - } - } - "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { - let intrinsic = format!("llvm.{}{}.with.overflow.i{}", - if signed { 's' } else { 'u' }, - &name[..3], width); - with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult, - call_debug_location) - }, - "overflowing_add" => Add(bcx, llargs[0], llargs[1], call_debug_location), - "overflowing_sub" => Sub(bcx, llargs[0], llargs[1], call_debug_location), - "overflowing_mul" => Mul(bcx, llargs[0], llargs[1], call_debug_location), - "unchecked_div" => - if signed { - SDiv(bcx, llargs[0], llargs[1], call_debug_location) - } else { - UDiv(bcx, llargs[0], llargs[1], call_debug_location) - }, - "unchecked_rem" => - if signed { - SRem(bcx, llargs[0], llargs[1], call_debug_location) - } else { - URem(bcx, llargs[0], llargs[1], call_debug_location) - }, - _ => unreachable!(), - }, - None => { - span_invalid_monomorphization_error( - tcx.sess, call_info.span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, sty)); - C_null(llret_ty) - } - } - - }, - - - (_, "return_address") => { - if !fcx.caller_expects_out_pointer { - span_err!(tcx.sess, call_info.span, E0510, - "invalid use of `return_address` intrinsic: function \ - does not use out pointer"); - C_null(Type::i8p(ccx)) - } else { - PointerCast(bcx, llvm::get_param(fcx.llfn, 0), Type::i8p(ccx)) - } - } - - (_, "discriminant_value") => { - let val_ty = substs.types.get(FnSpace, 0); - match val_ty.sty { - ty::TyEnum(..) => { - let repr = adt::represent_type(ccx, *val_ty); - adt::trans_get_discr(bcx, &*repr, llargs[0], Some(llret_ty)) - } - _ => C_null(llret_ty) - } - } - (_, name) if name.starts_with("simd_") => { - generic_simd_intrinsic(bcx, name, - substs, - callee_ty, - expr_arguments, - &llargs, - ret_ty, llret_ty, - call_debug_location, - call_info) - } - // This requires that atomic intrinsics follow a specific naming pattern: - // "atomic_[_]", and no ordering means SeqCst - (_, name) if name.starts_with("atomic_") => { - let split: Vec<&str> = name.split('_').collect(); - assert!(split.len() >= 2, "Atomic intrinsic not correct format"); - - let order = if split.len() == 2 { - llvm::SequentiallyConsistent - } else { - match split[2] { - "unordered" => llvm::Unordered, - "relaxed" => llvm::Monotonic, - "acq" => llvm::Acquire, - "rel" => llvm::Release, - "acqrel" => llvm::AcquireRelease, - _ => ccx.sess().fatal("unknown ordering in atomic intrinsic") - } - }; - - match split[1] { - "cxchg" => { - // See include/llvm/IR/Instructions.h for their implementation - // of this, I assume that it's good enough for us to use for - // now. - let strongest_failure_ordering = match order { - llvm::NotAtomic | llvm::Unordered => - ccx.sess().fatal("cmpxchg must be atomic"), - - llvm::Monotonic | llvm::Release => - llvm::Monotonic, - - llvm::Acquire | llvm::AcquireRelease => - llvm::Acquire, - - llvm::SequentiallyConsistent => - llvm::SequentiallyConsistent - }; - - let tp_ty = *substs.types.get(FnSpace, 0); - let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); - let cmp = from_arg_ty(bcx, llargs[1], tp_ty); - let src = from_arg_ty(bcx, llargs[2], tp_ty); - let res = AtomicCmpXchg(bcx, ptr, cmp, src, order, - strongest_failure_ordering); - ExtractValue(bcx, res, 0) - } - - "load" => { - let tp_ty = *substs.types.get(FnSpace, 0); - let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); - to_arg_ty(bcx, AtomicLoad(bcx, ptr, order), tp_ty) - } - "store" => { - let tp_ty = *substs.types.get(FnSpace, 0); - let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); - let val = from_arg_ty(bcx, llargs[1], tp_ty); - AtomicStore(bcx, val, ptr, order); - C_nil(ccx) - } - - "fence" => { - AtomicFence(bcx, order, llvm::CrossThread); - C_nil(ccx) - } - - "singlethreadfence" => { - AtomicFence(bcx, order, llvm::SingleThread); - C_nil(ccx) - } - - // These are all AtomicRMW ops - op => { - let atom_op = match op { - "xchg" => llvm::AtomicXchg, - "xadd" => llvm::AtomicAdd, - "xsub" => llvm::AtomicSub, - "and" => llvm::AtomicAnd, - "nand" => llvm::AtomicNand, - "or" => llvm::AtomicOr, - "xor" => llvm::AtomicXor, - "max" => llvm::AtomicMax, - "min" => llvm::AtomicMin, - "umax" => llvm::AtomicUMax, - "umin" => llvm::AtomicUMin, - _ => ccx.sess().fatal("unknown atomic operation") - }; - - let tp_ty = *substs.types.get(FnSpace, 0); - let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); - let val = from_arg_ty(bcx, llargs[1], tp_ty); - AtomicRMW(bcx, atom_op, ptr, val, order) - } - } - - } - - (_, _) => { - let intr = match Intrinsic::find(tcx, &name) { - Some(intr) => intr, - None => ccx.sess().span_bug(foreign_item.span, - &format!("unknown intrinsic '{}'", name)), - }; - fn one(x: Vec) -> T { - assert_eq!(x.len(), 1); - x.into_iter().next().unwrap() - } - fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type, - any_changes_needed: &mut bool) -> Vec { - use intrinsics::Type::*; - match *t { - Void => vec![Type::void(ccx)], - Integer(_signed, width, llvm_width) => { - *any_changes_needed |= width != llvm_width; - vec![Type::ix(ccx, llvm_width as u64)] - } - Float(x) => { - match x { - 32 => vec![Type::f32(ccx)], - 64 => vec![Type::f64(ccx)], - _ => unreachable!() - } - } - Pointer(ref t, ref llvm_elem, _const) => { - *any_changes_needed |= llvm_elem.is_some(); - - let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(ccx, t, - any_changes_needed)); - vec![elem.ptr_to()] - } - Vector(ref t, ref llvm_elem, length) => { - *any_changes_needed |= llvm_elem.is_some(); - - let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(ccx, t, - any_changes_needed)); - vec![Type::vector(&elem, - length as u64)] - } - Aggregate(false, ref contents) => { - let elems = contents.iter() - .map(|t| one(ty_to_type(ccx, t, any_changes_needed))) - .collect::>(); - vec![Type::struct_(ccx, &elems, false)] - } - Aggregate(true, ref contents) => { - *any_changes_needed = true; - contents.iter() - .flat_map(|t| ty_to_type(ccx, t, any_changes_needed)) - .collect() - } - } - } - - // This allows an argument list like `foo, (bar, baz), - // qux` to be converted into `foo, bar, baz, qux`, integer - // arguments to be truncated as needed and pointers to be - // cast. - fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - t: &intrinsics::Type, - arg_type: Ty<'tcx>, - llarg: ValueRef) - -> Vec - { - match *t { - intrinsics::Type::Aggregate(true, ref contents) => { - // We found a tuple that needs squishing! So - // run over the tuple and load each field. - // - // This assumes the type is "simple", i.e. no - // destructors, and the contents are SIMD - // etc. - assert!(!bcx.fcx.type_needs_drop(arg_type)); - - let repr = adt::represent_type(bcx.ccx(), arg_type); - let repr_ptr = &*repr; - let arg = adt::MaybeSizedValue::sized(llarg); - (0..contents.len()) - .map(|i| { - Load(bcx, adt::trans_field_ptr(bcx, repr_ptr, arg, Disr(0), i)) - }) - .collect() - } - intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { - let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false)); - vec![PointerCast(bcx, llarg, - llvm_elem.ptr_to())] - } - intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { - let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false)); - vec![BitCast(bcx, llarg, - Type::vector(&llvm_elem, length as u64))] - } - intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { - // the LLVM intrinsic uses a smaller integer - // size than the C intrinsic's signature, so - // we have to trim it down here. - vec![Trunc(bcx, llarg, Type::ix(bcx.ccx(), llvm_width as u64))] - } - _ => vec![llarg], - } - } - - - let mut any_changes_needed = false; - let inputs = intr.inputs.iter() - .flat_map(|t| ty_to_type(ccx, t, &mut any_changes_needed)) - .collect::>(); - - let mut out_changes = false; - let outputs = one(ty_to_type(ccx, &intr.output, &mut out_changes)); - // outputting a flattened aggregate is nonsense - assert!(!out_changes); - - let llargs = if !any_changes_needed { - // no aggregates to flatten, so no change needed - llargs - } else { - // there are some aggregates that need to be flattened - // in the LLVM call, so we need to run over the types - // again to find them and extract the arguments - intr.inputs.iter() - .zip(&llargs) - .zip(&arg_tys) - .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg)) - .collect() - }; - assert_eq!(inputs.len(), llargs.len()); - - let val = match intr.definition { - intrinsics::IntrinsicDef::Named(name) => { - let f = declare::declare_cfn(ccx, - name, - Type::func(&inputs, &outputs), - tcx.mk_nil()); - Call(bcx, f, &llargs, None, call_debug_location) - } - }; - - match intr.output { - intrinsics::Type::Aggregate(flatten, ref elems) => { - // the output is a tuple so we need to munge it properly - assert!(!flatten); - - for i in 0..elems.len() { - let val = ExtractValue(bcx, val, i); - Store(bcx, val, StructGEP(bcx, llresult, i)); - } - C_nil(ccx) - } - _ => val, - } - } - }; - - if val_ty(llval) != Type::void(ccx) && - machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 { - store_ty(bcx, llval, llresult, ret_ty); - } - - // If we made a temporary stack slot, let's clean it up - match dest { - expr::Ignore => { - bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location); - call_lifetime_end(bcx, llresult); - } - expr::SaveIn(_) => {} - } - - fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); - - Result::new(bcx, llresult) -} - -fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - allow_overlap: bool, - volatile: bool, - tp_ty: Ty<'tcx>, - dst: ValueRef, - src: ValueRef, - count: ValueRef, - call_debug_location: DebugLoc) - -> ValueRef { - let ccx = bcx.ccx(); - let lltp_ty = type_of::type_of(ccx, tp_ty); - let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32); - let size = machine::llsize_of(ccx, lltp_ty); - let int_size = machine::llbitsize_of_real(ccx, ccx.int_type()); - - let operation = if allow_overlap { - "memmove" - } else { - "memcpy" - }; - - let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size); - - let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx)); - let src_ptr = PointerCast(bcx, src, Type::i8p(ccx)); - let llfn = ccx.get_intrinsic(&name); - - Call(bcx, - llfn, - &[dst_ptr, - src_ptr, - Mul(bcx, size, count, DebugLoc::None), - align, - C_bool(ccx, volatile)], - None, - call_debug_location) -} - -fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - volatile: bool, - tp_ty: Ty<'tcx>, - dst: ValueRef, - val: ValueRef, - count: ValueRef, - call_debug_location: DebugLoc) - -> ValueRef { - let ccx = bcx.ccx(); - let lltp_ty = type_of::type_of(ccx, tp_ty); - let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32); - let size = machine::llsize_of(ccx, lltp_ty); - let int_size = machine::llbitsize_of_real(ccx, ccx.int_type()); - - let name = format!("llvm.memset.p0i8.i{}", int_size); - - let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx)); - let llfn = ccx.get_intrinsic(&name); - - Call(bcx, - llfn, - &[dst_ptr, - val, - Mul(bcx, size, count, DebugLoc::None), - align, - C_bool(ccx, volatile)], - None, - call_debug_location) -} - -fn count_zeros_intrinsic(bcx: Block, - name: &str, - val: ValueRef, - call_debug_location: DebugLoc) - -> ValueRef { - let y = C_bool(bcx.ccx(), false); - let llfn = bcx.ccx().get_intrinsic(&name); - Call(bcx, llfn, &[val, y], None, call_debug_location) -} - -fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - name: &str, - a: ValueRef, - b: ValueRef, - out: ValueRef, - call_debug_location: DebugLoc) - -> ValueRef { - let llfn = bcx.ccx().get_intrinsic(&name); - - // Convert `i1` to a `bool`, and write it to the out parameter - let val = Call(bcx, llfn, &[a, b], None, call_debug_location); - let result = ExtractValue(bcx, val, 0); - let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx())); - Store(bcx, result, StructGEP(bcx, out, 0)); - Store(bcx, overflow, StructGEP(bcx, out, 1)); - - C_nil(bcx.ccx()) -} - -fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - func: ValueRef, - data: ValueRef, - dest: ValueRef, - dloc: DebugLoc) -> Block<'blk, 'tcx> { - if bcx.sess().no_landing_pads() { - Call(bcx, func, &[data], None, dloc); - Store(bcx, C_null(Type::i8p(bcx.ccx())), dest); - bcx - } else if wants_msvc_seh(bcx.sess()) { - trans_msvc_try(bcx, func, data, dest, dloc) - } else { - trans_gnu_try(bcx, func, data, dest, dloc) - } -} - -// MSVC's definition of the `rust_try` function. The exact implementation here -// is a little different than the GNU (standard) version below, not only because -// of the personality function but also because of the other fiddly bits about -// SEH. LLVM also currently requires us to structure this in a very particular -// way as explained below. -// -// Like with the GNU version we generate a shim wrapper -fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - func: ValueRef, - data: ValueRef, - dest: ValueRef, - dloc: DebugLoc) -> Block<'blk, 'tcx> { - let llfn = get_rust_try_fn(bcx.fcx, &mut |try_fn_ty, output| { - let ccx = bcx.ccx(); - let dloc = DebugLoc::None; - let rust_try = declare::define_internal_rust_fn(ccx, "__rust_try", - try_fn_ty); - let (fcx, block_arena); - block_arena = TypedArena::new(); - fcx = new_fn_ctxt(ccx, rust_try, ast::DUMMY_NODE_ID, false, - output, ccx.tcx().mk_substs(Substs::trans_empty()), - None, &block_arena); - let bcx = init_function(&fcx, true, output); - let then = fcx.new_temp_block("then"); - let catch = fcx.new_temp_block("catch"); - let catch_return = fcx.new_temp_block("catch-return"); - let catch_resume = fcx.new_temp_block("catch-resume"); - let personality = fcx.eh_personality(); - - let eh_typeid_for = ccx.get_intrinsic(&"llvm.eh.typeid.for"); - let rust_try_filter = match bcx.tcx().lang_items.msvc_try_filter() { - Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0), - bcx.fcx.param_substs).val, - None => bcx.sess().bug("msvc_try_filter not defined"), - }; - - // Type indicator for the exception being thrown, not entirely sure - // what's going on here but it's what all the examples in LLVM use. - let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], - false); - - llvm::SetFunctionAttribute(rust_try, llvm::Attribute::NoInline); - llvm::SetFunctionAttribute(rust_try, llvm::Attribute::OptimizeNone); - let func = llvm::get_param(rust_try, 0); - let data = llvm::get_param(rust_try, 1); - - // Invoke the function, specifying our two temporary landing pads as the - // ext point. After the invoke we've terminated our basic block. - Invoke(bcx, func, &[data], then.llbb, catch.llbb, None, dloc); - - // All the magic happens in this landing pad, and this is basically the - // only landing pad in rust tagged with "catch" to indicate that we're - // catching an exception. The other catch handlers in the GNU version - // below just catch *all* exceptions, but that's because most exceptions - // are already filtered out by the gnu personality function. - // - // For MSVC we're just using a standard personality function that we - // can't customize (e.g. _except_handler3 or __C_specific_handler), so - // we need to do the exception filtering ourselves. This is currently - // performed by the `__rust_try_filter` function. This function, - // specified in the landingpad instruction, will be invoked by Windows - // SEH routines and will return whether the exception in question can be - // caught (aka the Rust runtime is the one that threw the exception). - // - // To get this to compile (currently LLVM segfaults if it's not in this - // particular structure), when the landingpad is executing we test to - // make sure that the ID of the exception being thrown is indeed the one - // that we were expecting. If it's not, we resume the exception, and - // otherwise we return the pointer that we got Full disclosure: It's not - // clear to me what this `llvm.eh.typeid` stuff is doing *other* then - // just allowing LLVM to compile this file without segfaulting. I would - // expect the entire landing pad to just be: - // - // %vals = landingpad ... - // %ehptr = extractvalue { i8*, i32 } %vals, 0 - // ret i8* %ehptr - // - // but apparently LLVM chokes on this, so we do the more complicated - // thing to placate it. - let vals = LandingPad(catch, lpad_ty, personality, 1); - let rust_try_filter = BitCast(catch, rust_try_filter, Type::i8p(ccx)); - AddClause(catch, vals, rust_try_filter); - let ehptr = ExtractValue(catch, vals, 0); - let sel = ExtractValue(catch, vals, 1); - let filter_sel = Call(catch, eh_typeid_for, &[rust_try_filter], None, - dloc); - let is_filter = ICmp(catch, llvm::IntEQ, sel, filter_sel, dloc); - CondBr(catch, is_filter, catch_return.llbb, catch_resume.llbb, dloc); - - // Our "catch-return" basic block is where we've determined that we - // actually need to catch this exception, in which case we just return - // the exception pointer. - Ret(catch_return, ehptr, dloc); - - // The "catch-resume" block is where we're running this landing pad but - // we actually need to not catch the exception, so just resume the - // exception to return. - trans_unwind_resume(catch_resume, vals); - - // On the successful branch we just return null. - Ret(then, C_null(Type::i8p(ccx)), dloc); - - return rust_try - }); - - // Note that no invoke is used here because by definition this function - // can't panic (that's what it's catching). - let ret = Call(bcx, llfn, &[func, data], None, dloc); - Store(bcx, ret, dest); - return bcx; -} - -// Definition of the standard "try" function for Rust using the GNU-like model -// of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke -// instructions). -// -// This translation is a little surprising because -// we always call a shim function instead of inlining the call to `invoke` -// manually here. This is done because in LLVM we're only allowed to have one -// personality per function definition. The call to the `try` intrinsic is -// being inlined into the function calling it, and that function may already -// have other personality functions in play. By calling a shim we're -// guaranteed that our shim will have the right personality function. -// -fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - func: ValueRef, - data: ValueRef, - dest: ValueRef, - dloc: DebugLoc) -> Block<'blk, 'tcx> { - let llfn = get_rust_try_fn(bcx.fcx, &mut |try_fn_ty, output| { - let ccx = bcx.ccx(); - let dloc = DebugLoc::None; - - // Translates the shims described above: - // - // bcx: - // invoke %func(%args...) normal %normal unwind %catch - // - // normal: - // ret null - // - // catch: - // (ptr, _) = landingpad - // ret ptr - - let rust_try = declare::define_internal_rust_fn(ccx, "__rust_try", try_fn_ty); - attributes::emit_uwtable(rust_try, true); - let catch_pers = match bcx.tcx().lang_items.eh_personality_catch() { - Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0), - bcx.fcx.param_substs).val, - None => bcx.tcx().sess.bug("eh_personality_catch not defined"), - }; - - let (fcx, block_arena); - block_arena = TypedArena::new(); - fcx = new_fn_ctxt(ccx, rust_try, ast::DUMMY_NODE_ID, false, - output, ccx.tcx().mk_substs(Substs::trans_empty()), - None, &block_arena); - let bcx = init_function(&fcx, true, output); - let then = bcx.fcx.new_temp_block("then"); - let catch = bcx.fcx.new_temp_block("catch"); - - let func = llvm::get_param(rust_try, 0); - let data = llvm::get_param(rust_try, 1); - Invoke(bcx, func, &[data], then.llbb, catch.llbb, None, dloc); - Ret(then, C_null(Type::i8p(ccx)), dloc); - - // Type indicator for the exception being thrown. - // The first value in this tuple is a pointer to the exception object being thrown. - // The second value is a "selector" indicating which of the landing pad clauses - // the exception's type had been matched to. rust_try ignores the selector. - let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], - false); - let vals = LandingPad(catch, lpad_ty, catch_pers, 1); - AddClause(catch, vals, C_null(Type::i8p(ccx))); - let ptr = ExtractValue(catch, vals, 0); - Ret(catch, ptr, dloc); - fcx.cleanup(); - - return rust_try - }); - - // Note that no invoke is used here because by definition this function - // can't panic (that's what it's catching). - let ret = Call(bcx, llfn, &[func, data], None, dloc); - Store(bcx, ret, dest); - return bcx; -} - -// Helper to generate the `Ty` associated with `rust_try` -fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, - f: &mut FnMut(Ty<'tcx>, - ty::FnOutput<'tcx>) -> ValueRef) - -> ValueRef { - let ccx = fcx.ccx; - if let Some(llfn) = *ccx.rust_try_fn().borrow() { - return llfn - } - - // Define the type up front for the signature of the rust_try function. - let tcx = ccx.tcx(); - let i8p = tcx.mk_mut_ptr(tcx.types.i8); - let fn_ty = tcx.mk_bare_fn(ty::BareFnTy { - unsafety: hir::Unsafety::Unsafe, - abi: abi::Rust, - sig: ty::Binder(ty::FnSig { - inputs: vec![i8p], - output: ty::FnOutput::FnConverging(tcx.mk_nil()), - variadic: false, - }), - }); - let fn_ty = tcx.mk_fn(None, fn_ty); - let output = ty::FnOutput::FnConverging(i8p); - let try_fn_ty = tcx.mk_bare_fn(ty::BareFnTy { - unsafety: hir::Unsafety::Unsafe, - abi: abi::Rust, - sig: ty::Binder(ty::FnSig { - inputs: vec![fn_ty, i8p], - output: output, - variadic: false, - }), - }); - let rust_try = f(tcx.mk_fn(None, try_fn_ty), output); - *ccx.rust_try_fn().borrow_mut() = Some(rust_try); - return rust_try -} - -fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) { - span_err!(a, b, E0511, "{}", c); -} - -fn generic_simd_intrinsic<'blk, 'tcx, 'a> - (bcx: Block<'blk, 'tcx>, - name: &str, - substs: subst::Substs<'tcx>, - callee_ty: Ty<'tcx>, - args: Option<&[P]>, - llargs: &[ValueRef], - ret_ty: Ty<'tcx>, - llret_ty: Type, - call_debug_location: DebugLoc, - call_info: NodeIdAndSpan) -> ValueRef -{ - // macros for error handling: - macro_rules! emit_error { - ($msg: tt) => { - emit_error!($msg, ) - }; - ($msg: tt, $($fmt: tt)*) => { - span_invalid_monomorphization_error( - bcx.sess(), call_info.span, - &format!(concat!("invalid monomorphization of `{}` intrinsic: ", - $msg), - name, $($fmt)*)); - } - } - macro_rules! require { - ($cond: expr, $($fmt: tt)*) => { - if !$cond { - emit_error!($($fmt)*); - return C_null(llret_ty) - } - } - } - macro_rules! require_simd { - ($ty: expr, $position: expr) => { - require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty) - } - } - - - - let tcx = bcx.tcx(); - let sig = tcx.erase_late_bound_regions(callee_ty.fn_sig()); - let sig = infer::normalize_associated_type(tcx, &sig); - let arg_tys = sig.inputs; - - // every intrinsic takes a SIMD vector as its first argument - require_simd!(arg_tys[0], "input"); - let in_ty = arg_tys[0]; - let in_elem = arg_tys[0].simd_type(tcx); - let in_len = arg_tys[0].simd_size(tcx); - - let comparison = match name { - "simd_eq" => Some(hir::BiEq), - "simd_ne" => Some(hir::BiNe), - "simd_lt" => Some(hir::BiLt), - "simd_le" => Some(hir::BiLe), - "simd_gt" => Some(hir::BiGt), - "simd_ge" => Some(hir::BiGe), - _ => None - }; - - if let Some(cmp_op) = comparison { - require_simd!(ret_ty, "return"); - - let out_len = ret_ty.simd_size(tcx); - require!(in_len == out_len, - "expected return type with length {} (same as input type `{}`), \ - found `{}` with length {}", - in_len, in_ty, - ret_ty, out_len); - require!(llret_ty.element_type().kind() == llvm::Integer, - "expected return type with integer elements, found `{}` with non-integer `{}`", - ret_ty, - ret_ty.simd_type(tcx)); - - return compare_simd_types(bcx, - llargs[0], - llargs[1], - in_elem, - llret_ty, - cmp_op, - call_debug_location) - } - - if name.starts_with("simd_shuffle") { - let n: usize = match name["simd_shuffle".len()..].parse() { - Ok(n) => n, - Err(_) => tcx.sess.span_bug(call_info.span, - "bad `simd_shuffle` instruction only caught in trans?") - }; - - require_simd!(ret_ty, "return"); - - let out_len = ret_ty.simd_size(tcx); - require!(out_len == n, - "expected return type of length {}, found `{}` with length {}", - n, ret_ty, out_len); - require!(in_elem == ret_ty.simd_type(tcx), - "expected return element type `{}` (element of input `{}`), \ - found `{}` with element type `{}`", - in_elem, in_ty, - ret_ty, ret_ty.simd_type(tcx)); - - let total_len = in_len as u64 * 2; - - let vector = match args { - Some(args) => &args[2], - None => bcx.sess().span_bug(call_info.span, - "intrinsic call with unexpected argument shape"), - }; - let vector = match consts::const_expr( - bcx.ccx(), - vector, - tcx.mk_substs(substs), - None, - consts::TrueConst::Yes, // this should probably help simd error reporting - ) { - Ok((vector, _)) => vector, - Err(err) => bcx.sess().span_fatal(call_info.span, &err.description()), - }; - - let indices: Option> = (0..n) - .map(|i| { - let arg_idx = i; - let val = const_get_elt(bcx.ccx(), vector, &[i as libc::c_uint]); - let c = const_to_opt_uint(val); - match c { - None => { - emit_error!("shuffle index #{} is not a constant", arg_idx); - None - } - Some(idx) if idx >= total_len => { - emit_error!("shuffle index #{} is out of bounds (limit {})", - arg_idx, total_len); - None - } - Some(idx) => Some(C_i32(bcx.ccx(), idx as i32)), - } - }) - .collect(); - let indices = match indices { - Some(i) => i, - None => return C_null(llret_ty) - }; - - return ShuffleVector(bcx, llargs[0], llargs[1], C_vector(&indices)) - } - - if name == "simd_insert" { - require!(in_elem == arg_tys[2], - "expected inserted type `{}` (element of input `{}`), found `{}`", - in_elem, in_ty, arg_tys[2]); - return InsertElement(bcx, llargs[0], llargs[2], llargs[1]) - } - if name == "simd_extract" { - require!(ret_ty == in_elem, - "expected return type `{}` (element of input `{}`), found `{}`", - in_elem, in_ty, ret_ty); - return ExtractElement(bcx, llargs[0], llargs[1]) - } - - if name == "simd_cast" { - require_simd!(ret_ty, "return"); - let out_len = ret_ty.simd_size(tcx); - require!(in_len == out_len, - "expected return type with length {} (same as input type `{}`), \ - found `{}` with length {}", - in_len, in_ty, - ret_ty, out_len); - // casting cares about nominal type, not just structural type - let out_elem = ret_ty.simd_type(tcx); - - if in_elem == out_elem { return llargs[0]; } - - enum Style { Float, Int(/* is signed? */ bool), Unsupported } - - let (in_style, in_width) = match in_elem.sty { - // vectors of pointer-sized integers should've been - // disallowed before here, so this unwrap is safe. - ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()), - ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()), - ty::TyFloat(f) => (Style::Float, f.bit_width()), - _ => (Style::Unsupported, 0) - }; - let (out_style, out_width) = match out_elem.sty { - ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()), - ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()), - ty::TyFloat(f) => (Style::Float, f.bit_width()), - _ => (Style::Unsupported, 0) - }; - - match (in_style, out_style) { - (Style::Int(in_is_signed), Style::Int(_)) => { - return match in_width.cmp(&out_width) { - Ordering::Greater => Trunc(bcx, llargs[0], llret_ty), - Ordering::Equal => llargs[0], - Ordering::Less => if in_is_signed { - SExt(bcx, llargs[0], llret_ty) - } else { - ZExt(bcx, llargs[0], llret_ty) - } - } - } - (Style::Int(in_is_signed), Style::Float) => { - return if in_is_signed { - SIToFP(bcx, llargs[0], llret_ty) - } else { - UIToFP(bcx, llargs[0], llret_ty) - } - } - (Style::Float, Style::Int(out_is_signed)) => { - return if out_is_signed { - FPToSI(bcx, llargs[0], llret_ty) - } else { - FPToUI(bcx, llargs[0], llret_ty) - } - } - (Style::Float, Style::Float) => { - return match in_width.cmp(&out_width) { - Ordering::Greater => FPTrunc(bcx, llargs[0], llret_ty), - Ordering::Equal => llargs[0], - Ordering::Less => FPExt(bcx, llargs[0], llret_ty) - } - } - _ => {/* Unsupported. Fallthrough. */} - } - require!(false, - "unsupported cast from `{}` with element `{}` to `{}` with element `{}`", - in_ty, in_elem, - ret_ty, out_elem); - } - macro_rules! arith { - ($($name: ident: $($($p: ident),* => $call: expr),*;)*) => { - $( - if name == stringify!($name) { - match in_elem.sty { - $( - $(ty::$p(_))|* => { - return $call(bcx, llargs[0], llargs[1], call_debug_location) - } - )* - _ => {}, - } - require!(false, - "unsupported operation on `{}` with element `{}`", - in_ty, - in_elem) - })* - } - } - arith! { - simd_add: TyUint, TyInt => Add, TyFloat => FAdd; - simd_sub: TyUint, TyInt => Sub, TyFloat => FSub; - simd_mul: TyUint, TyInt => Mul, TyFloat => FMul; - simd_div: TyFloat => FDiv; - simd_shl: TyUint, TyInt => Shl; - simd_shr: TyUint => LShr, TyInt => AShr; - simd_and: TyUint, TyInt => And; - simd_or: TyUint, TyInt => Or; - simd_xor: TyUint, TyInt => Xor; - } - bcx.sess().span_bug(call_info.span, "unknown SIMD intrinsic"); -} - -// Returns the width of an int TypeVariant, and if it's signed or not -// Returns None if the type is not an integer -fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext) - -> Option<(u64, bool)> { - use rustc::middle::ty::{TyInt, TyUint}; - match *sty { - TyInt(t) => Some((match t { - ast::TyIs => { - match &ccx.tcx().sess.target.target.target_pointer_width[..] { - "32" => 32, - "64" => 64, - tws => panic!("Unsupported target word size for isize: {}", tws), - } - }, - ast::TyI8 => 8, - ast::TyI16 => 16, - ast::TyI32 => 32, - ast::TyI64 => 64, - }, true)), - TyUint(t) => Some((match t { - ast::TyUs => { - match &ccx.tcx().sess.target.target.target_pointer_width[..] { - "32" => 32, - "64" => 64, - tws => panic!("Unsupported target word size for usize: {}", tws), - } - }, - ast::TyU8 => 8, - ast::TyU16 => 16, - ast::TyU32 => 32, - ast::TyU64 => 64, - }, false)), - _ => None, - } -} diff --git a/src/librustc_trans/trans/llrepr.rs b/src/librustc_trans/trans/llrepr.rs deleted file mode 100644 index 6b785e7edfd6a..0000000000000 --- a/src/librustc_trans/trans/llrepr.rs +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use trans::context::CrateContext; -use trans::type_::Type; -use llvm::ValueRef; - -pub trait LlvmRepr { - fn llrepr(&self, ccx: &CrateContext) -> String; -} - -impl LlvmRepr for [T] { - fn llrepr(&self, ccx: &CrateContext) -> String { - let reprs: Vec = self.iter().map(|t| t.llrepr(ccx)).collect(); - format!("[{}]", reprs.join(",")) - } -} - -impl LlvmRepr for Type { - fn llrepr(&self, ccx: &CrateContext) -> String { - ccx.tn().type_to_string(*self) - } -} - -impl LlvmRepr for ValueRef { - fn llrepr(&self, ccx: &CrateContext) -> String { - ccx.tn().val_to_string(*self) - } -} diff --git a/src/librustc_trans/trans/machine.rs b/src/librustc_trans/trans/machine.rs deleted file mode 100644 index 691fba42d5705..0000000000000 --- a/src/librustc_trans/trans/machine.rs +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// Information concerning the machine representation of various types. - -#![allow(non_camel_case_types)] - -use llvm::{self, ValueRef}; -use trans::common::*; - -use trans::type_::Type; - -pub type llbits = u64; -pub type llsize = u64; -pub type llalign = u32; - -// ______________________________________________________________________ -// compute sizeof / alignof - -// Returns the number of bytes clobbered by a Store to this type. -pub fn llsize_of_store(cx: &CrateContext, ty: Type) -> llsize { - unsafe { - return llvm::LLVMStoreSizeOfType(cx.td(), ty.to_ref()); - } -} - -// Returns the number of bytes between successive elements of type T in an -// array of T. This is the "ABI" size. It includes any ABI-mandated padding. -pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> llsize { - unsafe { - return llvm::LLVMABISizeOfType(cx.td(), ty.to_ref()); - } -} - -// Returns, as near as we can figure, the "real" size of a type. As in, the -// bits in this number of bytes actually carry data related to the datum -// with the type. Not junk, accidentally-damaged words, or whatever. -// Note that padding of the type will be included for structs, but not for the -// other types (i.e. SIMD types). -// Rounds up to the nearest byte though, so if you have a 1-bit -// value, we return 1 here, not 0. Most of rustc works in bytes. Be warned -// that LLVM *does* distinguish between e.g. a 1-bit value and an 8-bit value -// at the codegen level! In general you should prefer `llbitsize_of_real` -// below. -pub fn llsize_of_real(cx: &CrateContext, ty: Type) -> llsize { - unsafe { - let nbits = llvm::LLVMSizeOfTypeInBits(cx.td(), ty.to_ref()); - if nbits & 7 != 0 { - // Not an even number of bytes, spills into "next" byte. - 1 + (nbits >> 3) - } else { - nbits >> 3 - } - } -} - -/// Returns the "real" size of the type in bits. -pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> llbits { - unsafe { - llvm::LLVMSizeOfTypeInBits(cx.td(), ty.to_ref()) - } -} - -/// Returns the size of the type as an LLVM constant integer value. -pub fn llsize_of(cx: &CrateContext, ty: Type) -> ValueRef { - // Once upon a time, this called LLVMSizeOf, which does a - // getelementptr(1) on a null pointer and casts to an int, in - // order to obtain the type size as a value without requiring the - // target data layout. But we have the target data layout, so - // there's no need for that contrivance. The instruction - // selection DAG generator would flatten that GEP(1) node into a - // constant of the type's alloc size, so let's save it some work. - return C_uint(cx, llsize_of_alloc(cx, ty)); -} - -// Returns the preferred alignment of the given type for the current target. -// The preferred alignment may be larger than the alignment used when -// packing the type into structs. This will be used for things like -// allocations inside a stack frame, which LLVM has a free hand in. -pub fn llalign_of_pref(cx: &CrateContext, ty: Type) -> llalign { - unsafe { - return llvm::LLVMPreferredAlignmentOfType(cx.td(), ty.to_ref()); - } -} - -// Returns the minimum alignment of a type required by the platform. -// This is the alignment that will be used for struct fields, arrays, -// and similar ABI-mandated things. -pub fn llalign_of_min(cx: &CrateContext, ty: Type) -> llalign { - unsafe { - return llvm::LLVMABIAlignmentOfType(cx.td(), ty.to_ref()); - } -} - -pub fn llelement_offset(cx: &CrateContext, struct_ty: Type, element: usize) -> u64 { - unsafe { - return llvm::LLVMOffsetOfElement(cx.td(), - struct_ty.to_ref(), - element as u32); - } -} diff --git a/src/librustc_trans/trans/meth.rs b/src/librustc_trans/trans/meth.rs deleted file mode 100644 index bd12dd8c3effc..0000000000000 --- a/src/librustc_trans/trans/meth.rs +++ /dev/null @@ -1,754 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use arena::TypedArena; -use back::link; -use llvm::{ValueRef, get_params}; -use middle::def_id::DefId; -use middle::infer; -use middle::subst::{Subst, Substs}; -use middle::subst::VecPerParamSpace; -use middle::subst; -use middle::traits; -use trans::base::*; -use trans::build::*; -use trans::callee::*; -use trans::callee; -use trans::cleanup; -use trans::closure; -use trans::common::*; -use trans::consts; -use trans::datum::*; -use trans::debuginfo::DebugLoc; -use trans::declare; -use trans::expr; -use trans::glue; -use trans::machine; -use trans::monomorphize; -use trans::type_::Type; -use trans::type_of::*; -use middle::ty::{self, Ty, TypeFoldable}; -use middle::ty::MethodCall; - -use syntax::ast; -use syntax::attr; -use syntax::codemap::DUMMY_SP; - -use rustc_front::hir; - -// drop_glue pointer, size, align. -const VTABLE_OFFSET: usize = 3; - -/// The main "translation" pass for methods. Generates code -/// for non-monomorphized methods only. Other methods will -/// be generated once they are invoked with specific type parameters, -/// see `trans::base::lval_static_fn()` or `trans::base::monomorphic_fn()`. -pub fn trans_impl(ccx: &CrateContext, - name: ast::Name, - impl_items: &[hir::ImplItem], - generics: &hir::Generics, - id: ast::NodeId) { - let _icx = push_ctxt("meth::trans_impl"); - let tcx = ccx.tcx(); - - debug!("trans_impl(name={}, id={})", name, id); - - // Both here and below with generic methods, be sure to recurse and look for - // items that we need to translate. - if !generics.ty_params.is_empty() { - return; - } - - for impl_item in impl_items { - match impl_item.node { - hir::ImplItemKind::Method(ref sig, ref body) => { - if sig.generics.ty_params.is_empty() { - let trans_everywhere = attr::requests_inline(&impl_item.attrs); - for (ref ccx, is_origin) in ccx.maybe_iter(trans_everywhere) { - let llfn = get_item_val(ccx, impl_item.id); - let empty_substs = tcx.mk_substs(Substs::trans_empty()); - trans_fn(ccx, - &sig.decl, - body, - llfn, - empty_substs, - impl_item.id, - &impl_item.attrs); - update_linkage(ccx, - llfn, - Some(impl_item.id), - if is_origin { OriginalTranslation } else { InlinedCopy }); - } - } - } - _ => {} - } - } -} - -pub fn trans_method_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - method_call: MethodCall, - self_expr: Option<&hir::Expr>, - arg_cleanup_scope: cleanup::ScopeId) - -> Callee<'blk, 'tcx> { - let _icx = push_ctxt("meth::trans_method_callee"); - - let method = bcx.tcx().tables.borrow().method_map[&method_call]; - - match bcx.tcx().impl_or_trait_item(method.def_id).container() { - ty::ImplContainer(_) => { - debug!("trans_method_callee: static, {:?}", method.def_id); - let datum = callee::trans_fn_ref(bcx.ccx(), - method.def_id, - MethodCallKey(method_call), - bcx.fcx.param_substs); - Callee { - bcx: bcx, - data: Fn(datum.val), - ty: datum.ty - } - } - - ty::TraitContainer(trait_def_id) => { - let trait_substs = method.substs.clone().method_to_trait(); - let trait_substs = bcx.tcx().mk_substs(trait_substs); - let trait_ref = ty::TraitRef::new(trait_def_id, trait_substs); - - let trait_ref = ty::Binder(bcx.monomorphize(&trait_ref)); - let span = bcx.tcx().map.span(method_call.expr_id); - debug!("method_call={:?} trait_ref={:?} trait_ref id={:?} substs={:?}", - method_call, - trait_ref, - trait_ref.0.def_id, - trait_ref.0.substs); - let origin = fulfill_obligation(bcx.ccx(), - span, - trait_ref.clone()); - debug!("origin = {:?}", origin); - trans_monomorphized_callee(bcx, - method_call, - self_expr, - trait_def_id, - method.def_id, - method.ty, - origin, - arg_cleanup_scope) - } - } -} - -pub fn trans_static_method_callee<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - method_id: DefId, - trait_id: DefId, - expr_id: ast::NodeId, - param_substs: &'tcx subst::Substs<'tcx>) - -> Datum<'tcx, Rvalue> -{ - let _icx = push_ctxt("meth::trans_static_method_callee"); - let tcx = ccx.tcx(); - - debug!("trans_static_method_callee(method_id={:?}, trait_id={}, \ - expr_id={})", - method_id, - tcx.item_path_str(trait_id), - expr_id); - - let mname = tcx.item_name(method_id); - - debug!("trans_static_method_callee: method_id={:?}, expr_id={}, \ - name={}", method_id, expr_id, mname); - - // Find the substitutions for the fn itself. This includes - // type parameters that belong to the trait but also some that - // belong to the method: - let rcvr_substs = node_id_substs(ccx, ExprId(expr_id), param_substs); - let subst::SeparateVecsPerParamSpace { - types: rcvr_type, - selfs: rcvr_self, - fns: rcvr_method - } = rcvr_substs.types.split(); - - // Lookup the precise impl being called. To do that, we need to - // create a trait reference identifying the self type and other - // input type parameters. To create that trait reference, we have - // to pick apart the type parameters to identify just those that - // pertain to the trait. This is easiest to explain by example: - // - // trait Convert { - // fn from(n: U) -> Option; - // } - // ... - // let f = as Convert>::from::(...) - // - // Here, in this call, which I've written with explicit UFCS - // notation, the set of type parameters will be: - // - // rcvr_type: [] <-- nothing declared on the trait itself - // rcvr_self: [Vec] <-- the self type - // rcvr_method: [String] <-- method type parameter - // - // So we create a trait reference using the first two, - // basically corresponding to ` as Convert>`. - // The remaining type parameters (`rcvr_method`) will be used below. - let trait_substs = - Substs::erased(VecPerParamSpace::new(rcvr_type, - rcvr_self, - Vec::new())); - let trait_substs = tcx.mk_substs(trait_substs); - debug!("trait_substs={:?}", trait_substs); - let trait_ref = ty::Binder(ty::TraitRef::new(trait_id, trait_substs)); - let vtbl = fulfill_obligation(ccx, - DUMMY_SP, - trait_ref); - - // Now that we know which impl is being used, we can dispatch to - // the actual function: - match vtbl { - traits::VtableImpl(traits::VtableImplData { - impl_def_id: impl_did, - substs: impl_substs, - nested: _ }) => - { - assert!(!impl_substs.types.needs_infer()); - - // Create the substitutions that are in scope. This combines - // the type parameters from the impl with those declared earlier. - // To see what I mean, consider a possible impl: - // - // impl Convert for Vec { - // fn from(n: U) { ... } - // } - // - // Recall that we matched ` as Convert>`. Trait - // resolution will have given us a substitution - // containing `impl_substs=[[T=i32],[],[]]` (the type - // parameters defined on the impl). We combine - // that with the `rcvr_method` from before, which tells us - // the type parameters from the *method*, to yield - // `callee_substs=[[T=i32],[],[U=String]]`. - let subst::SeparateVecsPerParamSpace { - types: impl_type, - selfs: impl_self, - fns: _ - } = impl_substs.types.split(); - let callee_substs = - Substs::erased(VecPerParamSpace::new(impl_type, - impl_self, - rcvr_method)); - - let mth = tcx.get_impl_method(impl_did, callee_substs, mname); - trans_fn_ref_with_substs(ccx, mth.method.def_id, ExprId(expr_id), - param_substs, - mth.substs) - } - traits::VtableObject(ref data) => { - let idx = traits::get_vtable_index_of_object_method(tcx, data, method_id); - trans_object_shim(ccx, - data.upcast_trait_ref.clone(), - method_id, - idx) - } - _ => { - tcx.sess.bug(&format!("static call to invalid vtable: {:?}", - vtbl)); - } - } -} - -fn trans_monomorphized_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - method_call: MethodCall, - self_expr: Option<&hir::Expr>, - trait_id: DefId, - method_id: DefId, - method_ty: Ty<'tcx>, - vtable: traits::Vtable<'tcx, ()>, - arg_cleanup_scope: cleanup::ScopeId) - -> Callee<'blk, 'tcx> { - let _icx = push_ctxt("meth::trans_monomorphized_callee"); - match vtable { - traits::VtableImpl(vtable_impl) => { - let ccx = bcx.ccx(); - let impl_did = vtable_impl.impl_def_id; - let mname = match ccx.tcx().impl_or_trait_item(method_id) { - ty::MethodTraitItem(method) => method.name, - _ => { - bcx.tcx().sess.bug("can't monomorphize a non-method trait \ - item") - } - }; - // create a concatenated set of substitutions which includes - // those from the impl and those from the method: - let callee_substs = - combine_impl_and_methods_tps( - bcx, MethodCallKey(method_call), vtable_impl.substs); - - let mth = bcx.tcx().get_impl_method(impl_did, callee_substs, mname); - // translate the function - let datum = trans_fn_ref_with_substs(bcx.ccx(), - mth.method.def_id, - MethodCallKey(method_call), - bcx.fcx.param_substs, - mth.substs); - - Callee { bcx: bcx, data: Fn(datum.val), ty: datum.ty } - } - traits::VtableClosure(vtable_closure) => { - // The substitutions should have no type parameters remaining - // after passing through fulfill_obligation - let trait_closure_kind = bcx.tcx().lang_items.fn_trait_kind(trait_id).unwrap(); - let llfn = closure::trans_closure_method(bcx.ccx(), - vtable_closure.closure_def_id, - vtable_closure.substs, - trait_closure_kind); - Callee { - bcx: bcx, - data: Fn(llfn), - ty: monomorphize_type(bcx, method_ty) - } - } - traits::VtableFnPointer(fn_ty) => { - let trait_closure_kind = bcx.tcx().lang_items.fn_trait_kind(trait_id).unwrap(); - let llfn = trans_fn_pointer_shim(bcx.ccx(), trait_closure_kind, fn_ty); - Callee { - bcx: bcx, - data: Fn(llfn), - ty: monomorphize_type(bcx, method_ty) - } - } - traits::VtableObject(ref data) => { - let idx = traits::get_vtable_index_of_object_method(bcx.tcx(), data, method_id); - if let Some(self_expr) = self_expr { - if let ty::TyBareFn(_, ref fty) = monomorphize_type(bcx, method_ty).sty { - let ty = bcx.tcx().mk_fn(None, opaque_method_ty(bcx.tcx(), fty)); - return trans_trait_callee(bcx, ty, idx, self_expr, arg_cleanup_scope); - } - } - let datum = trans_object_shim(bcx.ccx(), - data.upcast_trait_ref.clone(), - method_id, - idx); - Callee { bcx: bcx, data: Fn(datum.val), ty: datum.ty } - } - traits::VtableBuiltin(..) | - traits::VtableDefaultImpl(..) | - traits::VtableParam(..) => { - bcx.sess().bug( - &format!("resolved vtable bad vtable {:?} in trans", - vtable)); - } - } -} - - /// Creates a concatenated set of substitutions which includes those from the impl and those from - /// the method. This are some subtle complications here. Statically, we have a list of type - /// parameters like `[T0, T1, T2, M1, M2, M3]` where `Tn` are type parameters that appear on the - /// receiver. For example, if the receiver is a method parameter `A` with a bound like - /// `trait` then `Tn` would be `[B,C,D]`. - /// - /// The weird part is that the type `A` might now be bound to any other type, such as `foo`. - /// In that case, the vector we want is: `[X, M1, M2, M3]`. Therefore, what we do now is to slice - /// off the method type parameters and append them to the type parameters from the type that the - /// receiver is mapped to. -fn combine_impl_and_methods_tps<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - node: ExprOrMethodCall, - rcvr_substs: subst::Substs<'tcx>) - -> subst::Substs<'tcx> -{ - let ccx = bcx.ccx(); - - let node_substs = node_id_substs(ccx, node, bcx.fcx.param_substs); - - debug!("rcvr_substs={:?}", rcvr_substs); - debug!("node_substs={:?}", node_substs); - - // Break apart the type parameters from the node and type - // parameters from the receiver. - let node_method = node_substs.types.split().fns; - let subst::SeparateVecsPerParamSpace { - types: rcvr_type, - selfs: rcvr_self, - fns: rcvr_method - } = rcvr_substs.types.clone().split(); - assert!(rcvr_method.is_empty()); - subst::Substs { - regions: subst::ErasedRegions, - types: subst::VecPerParamSpace::new(rcvr_type, rcvr_self, node_method) - } -} - -/// Create a method callee where the method is coming from a trait object (e.g., Box type). -/// In this case, we must pull the fn pointer out of the vtable that is packaged up with the -/// object. Objects are represented as a pair, so we first evaluate the self expression and then -/// extract the self data and vtable out of the pair. -fn trans_trait_callee<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - opaque_fn_ty: Ty<'tcx>, - vtable_index: usize, - self_expr: &hir::Expr, - arg_cleanup_scope: cleanup::ScopeId) - -> Callee<'blk, 'tcx> { - let _icx = push_ctxt("meth::trans_trait_callee"); - let mut bcx = bcx; - - // Translate self_datum and take ownership of the value by - // converting to an rvalue. - let self_datum = unpack_datum!( - bcx, expr::trans(bcx, self_expr)); - - let llval = if bcx.fcx.type_needs_drop(self_datum.ty) { - let self_datum = unpack_datum!( - bcx, self_datum.to_rvalue_datum(bcx, "trait_callee")); - - // Convert to by-ref since `trans_trait_callee_from_llval` wants it - // that way. - let self_datum = unpack_datum!( - bcx, self_datum.to_ref_datum(bcx)); - - // Arrange cleanup in case something should go wrong before the - // actual call occurs. - self_datum.add_clean(bcx.fcx, arg_cleanup_scope) - } else { - // We don't have to do anything about cleanups for &Trait and &mut Trait. - assert!(self_datum.kind.is_by_ref()); - self_datum.val - }; - - let llself = Load(bcx, expr::get_dataptr(bcx, llval)); - let llvtable = Load(bcx, expr::get_meta(bcx, llval)); - trans_trait_callee_from_llval(bcx, opaque_fn_ty, vtable_index, llself, llvtable) -} - -/// Same as `trans_trait_callee()` above, except that it is given a by-ref pointer to the object -/// pair. -fn trans_trait_callee_from_llval<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - opaque_fn_ty: Ty<'tcx>, - vtable_index: usize, - llself: ValueRef, - llvtable: ValueRef) - -> Callee<'blk, 'tcx> { - let _icx = push_ctxt("meth::trans_trait_callee"); - let ccx = bcx.ccx(); - - // Load the data pointer from the object. - debug!("trans_trait_callee_from_llval(callee_ty={}, vtable_index={}, llself={}, llvtable={})", - opaque_fn_ty, - vtable_index, - bcx.val_to_string(llself), - bcx.val_to_string(llvtable)); - - // Replace the self type (&Self or Box) with an opaque pointer. - let mptr = Load(bcx, GEPi(bcx, llvtable, &[vtable_index + VTABLE_OFFSET])); - let llcallee_ty = type_of_fn_from_ty(ccx, opaque_fn_ty); - - Callee { - bcx: bcx, - data: TraitItem(MethodData { - llfn: PointerCast(bcx, mptr, llcallee_ty.ptr_to()), - llself: PointerCast(bcx, llself, Type::i8p(ccx)), - }), - ty: opaque_fn_ty - } -} - -/// Generate a shim function that allows an object type like `SomeTrait` to -/// implement the type `SomeTrait`. Imagine a trait definition: -/// -/// trait SomeTrait { fn get(&self) -> i32; ... } -/// -/// And a generic bit of code: -/// -/// fn foo(t: &T) { -/// let x = SomeTrait::get; -/// x(t) -/// } -/// -/// What is the value of `x` when `foo` is invoked with `T=SomeTrait`? -/// The answer is that it is a shim function generated by this routine: -/// -/// fn shim(t: &SomeTrait) -> i32 { -/// // ... call t.get() virtually ... -/// } -/// -/// In fact, all virtual calls can be thought of as normal trait calls -/// that go through this shim function. -pub fn trans_object_shim<'a, 'tcx>( - ccx: &'a CrateContext<'a, 'tcx>, - upcast_trait_ref: ty::PolyTraitRef<'tcx>, - method_id: DefId, - vtable_index: usize) - -> Datum<'tcx, Rvalue> -{ - let _icx = push_ctxt("trans_object_shim"); - let tcx = ccx.tcx(); - - debug!("trans_object_shim(upcast_trait_ref={:?}, method_id={:?})", - upcast_trait_ref, - method_id); - - // Upcast to the trait in question and extract out the substitutions. - let upcast_trait_ref = tcx.erase_late_bound_regions(&upcast_trait_ref); - let object_substs = upcast_trait_ref.substs.clone().erase_regions(); - debug!("trans_object_shim: object_substs={:?}", object_substs); - - // Lookup the type of this method as declared in the trait and apply substitutions. - let method_ty = match tcx.impl_or_trait_item(method_id) { - ty::MethodTraitItem(method) => method, - _ => { - tcx.sess.bug("can't create a method shim for a non-method item") - } - }; - let fty = monomorphize::apply_param_substs(tcx, &object_substs, &method_ty.fty); - let fty = tcx.mk_bare_fn(fty); - let method_ty = opaque_method_ty(tcx, fty); - debug!("trans_object_shim: fty={:?} method_ty={:?}", fty, method_ty); - - // - let shim_fn_ty = tcx.mk_fn(None, fty); - let method_bare_fn_ty = tcx.mk_fn(None, method_ty); - let function_name = link::mangle_internal_name_by_type_and_seq(ccx, shim_fn_ty, "object_shim"); - let llfn = declare::define_internal_rust_fn(ccx, &function_name, shim_fn_ty); - - let sig = ccx.tcx().erase_late_bound_regions(&fty.sig); - let sig = infer::normalize_associated_type(ccx.tcx(), &sig); - - let empty_substs = tcx.mk_substs(Substs::trans_empty()); - let (block_arena, fcx): (TypedArena<_>, FunctionContext); - block_arena = TypedArena::new(); - fcx = new_fn_ctxt(ccx, - llfn, - ast::DUMMY_NODE_ID, - false, - sig.output, - empty_substs, - None, - &block_arena); - let mut bcx = init_function(&fcx, false, sig.output); - - let llargs = get_params(fcx.llfn); - - let self_idx = fcx.arg_offset(); - let llself = llargs[self_idx]; - let llvtable = llargs[self_idx + 1]; - - debug!("trans_object_shim: llself={}, llvtable={}", - bcx.val_to_string(llself), bcx.val_to_string(llvtable)); - - assert!(!fcx.needs_ret_allocas); - - let dest = - fcx.llretslotptr.get().map( - |_| expr::SaveIn(fcx.get_ret_slot(bcx, sig.output, "ret_slot"))); - - debug!("trans_object_shim: method_offset_in_vtable={}", - vtable_index); - - bcx = trans_call_inner(bcx, - DebugLoc::None, - |bcx, _| trans_trait_callee_from_llval(bcx, - method_bare_fn_ty, - vtable_index, - llself, llvtable), - ArgVals(&llargs[(self_idx + 2)..]), - dest).bcx; - - finish_fn(&fcx, bcx, sig.output, DebugLoc::None); - - immediate_rvalue(llfn, shim_fn_ty) -} - -/// Creates a returns a dynamic vtable for the given type and vtable origin. -/// This is used only for objects. -/// -/// The `trait_ref` encodes the erased self type. Hence if we are -/// making an object `Foo` from a value of type `Foo`, then -/// `trait_ref` would map `T:Trait`. -pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - trait_ref: ty::PolyTraitRef<'tcx>, - param_substs: &'tcx subst::Substs<'tcx>) - -> ValueRef -{ - let tcx = ccx.tcx(); - let _icx = push_ctxt("meth::get_vtable"); - - debug!("get_vtable(trait_ref={:?})", trait_ref); - - // Check the cache. - match ccx.vtables().borrow().get(&trait_ref) { - Some(&val) => { return val } - None => { } - } - - // Not in the cache. Build it. - let methods = traits::supertraits(tcx, trait_ref.clone()).flat_map(|trait_ref| { - let vtable = fulfill_obligation(ccx, DUMMY_SP, trait_ref.clone()); - match vtable { - // Should default trait error here? - traits::VtableDefaultImpl(_) | - traits::VtableBuiltin(_) => { - Vec::new().into_iter() - } - traits::VtableImpl( - traits::VtableImplData { - impl_def_id: id, - substs, - nested: _ }) => { - emit_vtable_methods(ccx, id, substs, param_substs).into_iter() - } - traits::VtableClosure( - traits::VtableClosureData { - closure_def_id, - substs, - nested: _ }) => { - let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_ref.def_id()).unwrap(); - let llfn = closure::trans_closure_method(ccx, - closure_def_id, - substs, - trait_closure_kind); - vec![llfn].into_iter() - } - traits::VtableFnPointer(bare_fn_ty) => { - let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_ref.def_id()).unwrap(); - vec![trans_fn_pointer_shim(ccx, trait_closure_kind, bare_fn_ty)].into_iter() - } - traits::VtableObject(ref data) => { - // this would imply that the Self type being erased is - // an object type; this cannot happen because we - // cannot cast an unsized type into a trait object - tcx.sess.bug( - &format!("cannot get vtable for an object type: {:?}", - data)); - } - traits::VtableParam(..) => { - tcx.sess.bug( - &format!("resolved vtable for {:?} to bad vtable {:?} in trans", - trait_ref, - vtable)); - } - } - }); - - let size_ty = sizing_type_of(ccx, trait_ref.self_ty()); - let size = machine::llsize_of_alloc(ccx, size_ty); - let align = align_of(ccx, trait_ref.self_ty()); - - let components: Vec<_> = vec![ - // Generate a destructor for the vtable. - glue::get_drop_glue(ccx, trait_ref.self_ty()), - C_uint(ccx, size), - C_uint(ccx, align) - ].into_iter().chain(methods).collect(); - - let vtable_const = C_struct(ccx, &components, false); - let align = machine::llalign_of_pref(ccx, val_ty(vtable_const)); - let vtable = consts::addr_of(ccx, vtable_const, align, "vtable"); - - ccx.vtables().borrow_mut().insert(trait_ref, vtable); - vtable -} - -fn emit_vtable_methods<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - impl_id: DefId, - substs: subst::Substs<'tcx>, - param_substs: &'tcx subst::Substs<'tcx>) - -> Vec -{ - let tcx = ccx.tcx(); - - debug!("emit_vtable_methods(impl_id={:?}, substs={:?}, param_substs={:?})", - impl_id, - substs, - param_substs); - - let trt_id = match tcx.impl_trait_ref(impl_id) { - Some(t_id) => t_id.def_id, - None => ccx.sess().bug("make_impl_vtable: don't know how to \ - make a vtable for a type impl!") - }; - - tcx.populate_implementations_for_trait_if_necessary(trt_id); - - let nullptr = C_null(Type::nil(ccx).ptr_to()); - let trait_item_def_ids = tcx.trait_item_def_ids(trt_id); - trait_item_def_ids - .iter() - - // Filter out non-method items. - .filter_map(|item_def_id| { - match *item_def_id { - ty::MethodTraitItemId(def_id) => Some(def_id), - _ => None, - } - }) - - // Now produce pointers for each remaining method. If the - // method could never be called from this object, just supply - // null. - .map(|trait_method_def_id| { - debug!("emit_vtable_methods: trait_method_def_id={:?}", - trait_method_def_id); - - let trait_method_type = match tcx.impl_or_trait_item(trait_method_def_id) { - ty::MethodTraitItem(m) => m, - _ => ccx.sess().bug("should be a method, not other assoc item"), - }; - let name = trait_method_type.name; - - // Some methods cannot be called on an object; skip those. - if !traits::is_vtable_safe_method(tcx, trt_id, &trait_method_type) { - debug!("emit_vtable_methods: not vtable safe"); - return nullptr; - } - - debug!("emit_vtable_methods: trait_method_type={:?}", - trait_method_type); - - // The substitutions we have are on the impl, so we grab - // the method type from the impl to substitute into. - let mth = tcx.get_impl_method(impl_id, substs.clone(), name); - - debug!("emit_vtable_methods: mth={:?}", mth); - - // If this is a default method, it's possible that it - // relies on where clauses that do not hold for this - // particular set of type parameters. Note that this - // method could then never be called, so we do not want to - // try and trans it, in that case. Issue #23435. - if mth.is_provided { - let predicates = mth.method.predicates.predicates.subst(tcx, &mth.substs); - if !normalize_and_test_predicates(ccx, predicates.into_vec()) { - debug!("emit_vtable_methods: predicates do not hold"); - return nullptr; - } - } - - trans_fn_ref_with_substs(ccx, - mth.method.def_id, - ExprId(0), - param_substs, - mth.substs).val - }) - .collect() -} - -/// Replace the self type (&Self or Box) with an opaque pointer. -fn opaque_method_ty<'tcx>(tcx: &ty::ctxt<'tcx>, method_ty: &ty::BareFnTy<'tcx>) - -> &'tcx ty::BareFnTy<'tcx> { - let mut inputs = method_ty.sig.0.inputs.clone(); - inputs[0] = tcx.mk_mut_ptr(tcx.mk_mach_int(ast::TyI8)); - - tcx.mk_bare_fn(ty::BareFnTy { - unsafety: method_ty.unsafety, - abi: method_ty.abi, - sig: ty::Binder(ty::FnSig { - inputs: inputs, - output: method_ty.sig.0.output, - variadic: method_ty.sig.0.variadic, - }), - }) -} diff --git a/src/librustc_trans/trans/mir/analyze.rs b/src/librustc_trans/trans/mir/analyze.rs deleted file mode 100644 index 9d4c7663cb0d3..0000000000000 --- a/src/librustc_trans/trans/mir/analyze.rs +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! An analysis to determine which temporaries require allocas and -//! which do not. - -use rustc_data_structures::fnv::FnvHashSet; -use rustc::mir::repr as mir; -use rustc::mir::visit::{Visitor, LvalueContext}; -use trans::common::{self, Block}; -use super::rvalue; - -pub fn lvalue_temps<'bcx,'tcx>(bcx: Block<'bcx,'tcx>, - mir: &mir::Mir<'tcx>) - -> FnvHashSet { - let mut analyzer = TempAnalyzer::new(); - - analyzer.visit_mir(mir); - - for (index, temp_decl) in mir.temp_decls.iter().enumerate() { - let ty = bcx.monomorphize(&temp_decl.ty); - debug!("temp {:?} has type {:?}", index, ty); - if ty.is_scalar() || - ty.is_unique() || - ty.is_region_ptr() || - ty.is_simd() - { - // These sorts of types are immediates that we can store - // in an ValueRef without an alloca. - assert!(common::type_is_immediate(bcx.ccx(), ty) || - common::type_is_fat_ptr(bcx.tcx(), ty)); - } else { - // These sorts of types require an alloca. Note that - // type_is_immediate() may *still* be true, particularly - // for newtypes, but we currently force some types - // (e.g. structs) into an alloca unconditionally, just so - // that we don't have to deal with having two pathways - // (gep vs extractvalue etc). - analyzer.mark_as_lvalue(index); - } - } - - analyzer.lvalue_temps -} - -struct TempAnalyzer { - lvalue_temps: FnvHashSet, -} - -impl TempAnalyzer { - fn new() -> TempAnalyzer { - TempAnalyzer { lvalue_temps: FnvHashSet() } - } - - fn mark_as_lvalue(&mut self, temp: usize) { - debug!("marking temp {} as lvalue", temp); - self.lvalue_temps.insert(temp); - } -} - -impl<'tcx> Visitor<'tcx> for TempAnalyzer { - fn visit_assign(&mut self, - block: mir::BasicBlock, - lvalue: &mir::Lvalue<'tcx>, - rvalue: &mir::Rvalue<'tcx>) { - debug!("visit_assign(block={:?}, lvalue={:?}, rvalue={:?})", block, lvalue, rvalue); - - match *lvalue { - mir::Lvalue::Temp(index) => { - if !rvalue::rvalue_creates_operand(rvalue) { - self.mark_as_lvalue(index as usize); - } - } - _ => { - self.visit_lvalue(lvalue, LvalueContext::Store); - } - } - - self.visit_rvalue(rvalue); - } - - fn visit_lvalue(&mut self, - lvalue: &mir::Lvalue<'tcx>, - context: LvalueContext) { - debug!("visit_lvalue(lvalue={:?}, context={:?})", lvalue, context); - - match *lvalue { - mir::Lvalue::Temp(index) => { - match context { - LvalueContext::Consume => { - } - LvalueContext::Store | - LvalueContext::Drop | - LvalueContext::Inspect | - LvalueContext::Borrow { .. } | - LvalueContext::Slice { .. } | - LvalueContext::Projection => { - self.mark_as_lvalue(index as usize); - } - } - } - _ => { - } - } - - self.super_lvalue(lvalue, context); - } -} diff --git a/src/librustc_trans/trans/mir/block.rs b/src/librustc_trans/trans/mir/block.rs deleted file mode 100644 index 5446bbda4c26d..0000000000000 --- a/src/librustc_trans/trans/mir/block.rs +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm::{BasicBlockRef, ValueRef}; -use rustc::mir::repr as mir; -use trans::adt; -use trans::base; -use trans::build; -use trans::attributes; -use trans::common::{self, Block}; -use trans::debuginfo::DebugLoc; -use trans::type_of; -use trans::type_::Type; -use trans::Disr; - -use super::MirContext; -use super::operand::OperandValue::{FatPtr, Immediate, Ref}; - -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { - pub fn trans_block(&mut self, bb: mir::BasicBlock) { - debug!("trans_block({:?})", bb); - - let mut bcx = self.bcx(bb); - let data = self.mir.basic_block_data(bb); - - for statement in &data.statements { - bcx = self.trans_statement(bcx, statement); - } - - debug!("trans_block: terminator: {:?}", data.terminator()); - - match *data.terminator() { - mir::Terminator::Goto { target } => { - build::Br(bcx, self.llblock(target), DebugLoc::None) - } - - mir::Terminator::If { ref cond, targets: (true_bb, false_bb) } => { - let cond = self.trans_operand(bcx, cond); - let lltrue = self.llblock(true_bb); - let llfalse = self.llblock(false_bb); - build::CondBr(bcx, cond.immediate(), lltrue, llfalse, DebugLoc::None); - } - - mir::Terminator::Switch { ref discr, ref adt_def, ref targets } => { - let discr_lvalue = self.trans_lvalue(bcx, discr); - let ty = discr_lvalue.ty.to_ty(bcx.tcx()); - let repr = adt::represent_type(bcx.ccx(), ty); - let discr = adt::trans_get_discr(bcx, &repr, discr_lvalue.llval, None); - - // The else branch of the Switch can't be hit, so branch to an unreachable - // instruction so LLVM knows that - let unreachable_blk = self.unreachable_block(); - - let switch = build::Switch(bcx, discr, unreachable_blk.llbb, targets.len()); - assert_eq!(adt_def.variants.len(), targets.len()); - for (adt_variant, target) in adt_def.variants.iter().zip(targets) { - let llval = adt::trans_case(bcx, &*repr, Disr::from(adt_variant.disr_val)); - let llbb = self.llblock(*target); - - build::AddCase(switch, llval, llbb) - } - } - - mir::Terminator::SwitchInt { ref discr, switch_ty, ref values, ref targets } => { - let (otherwise, targets) = targets.split_last().unwrap(); - let discr = build::Load(bcx, self.trans_lvalue(bcx, discr).llval); - let switch = build::Switch(bcx, discr, self.llblock(*otherwise), values.len()); - for (value, target) in values.iter().zip(targets) { - let llval = self.trans_constval(bcx, value, switch_ty).immediate(); - let llbb = self.llblock(*target); - build::AddCase(switch, llval, llbb) - } - } - - mir::Terminator::Resume => { - let ps = self.get_personality_slot(bcx); - let lp = build::Load(bcx, ps); - base::call_lifetime_end(bcx, ps); - base::trans_unwind_resume(bcx, lp); - } - - mir::Terminator::Return => { - let return_ty = bcx.monomorphize(&self.mir.return_ty); - base::build_return_block(bcx.fcx, bcx, return_ty, DebugLoc::None); - } - - mir::Terminator::Call { ref func, ref args, ref kind } => { - // Create the callee. This will always be a fn ptr and hence a kind of scalar. - let callee = self.trans_operand(bcx, func); - let attrs = attributes::from_fn_type(bcx.ccx(), callee.ty); - let debugloc = DebugLoc::None; - // The arguments we'll be passing. Plus one to account for outptr, if used. - let mut llargs = Vec::with_capacity(args.len() + 1); - - // Prepare the return value destination - let (ret_dest_ty, must_copy_dest) = if let Some(d) = kind.destination() { - let dest = self.trans_lvalue(bcx, d); - let ret_ty = dest.ty.to_ty(bcx.tcx()); - if type_of::return_uses_outptr(bcx.ccx(), ret_ty) { - llargs.push(dest.llval); - (Some((dest, ret_ty)), false) - } else { - (Some((dest, ret_ty)), !common::type_is_zero_size(bcx.ccx(), ret_ty)) - } - } else { - (None, false) - }; - - // Process the rest of the args. - for arg in args { - match self.trans_operand(bcx, arg).val { - Ref(llval) | Immediate(llval) => llargs.push(llval), - FatPtr(b, e) => { - llargs.push(b); - llargs.push(e); - } - } - } - - // Many different ways to call a function handled here - match (base::avoid_invoke(bcx), kind) { - // The two cases below are the only ones to use LLVM’s `invoke`. - (false, &mir::CallKind::DivergingCleanup(cleanup)) => { - let cleanup = self.bcx(cleanup); - let landingpad = self.make_landing_pad(cleanup); - build::Invoke(bcx, - callee.immediate(), - &llargs[..], - self.unreachable_block().llbb, - landingpad.llbb, - Some(attrs), - debugloc); - }, - (false, &mir::CallKind::ConvergingCleanup { ref targets, .. }) => { - let cleanup = self.bcx(targets.1); - let landingpad = self.make_landing_pad(cleanup); - let (target, postinvoke) = if must_copy_dest { - (bcx.fcx.new_block(false, "", None), Some(self.bcx(targets.0))) - } else { - (self.bcx(targets.0), None) - }; - let invokeret = build::Invoke(bcx, - callee.immediate(), - &llargs[..], - target.llbb, - landingpad.llbb, - Some(attrs), - debugloc); - if let Some(postinvoketarget) = postinvoke { - // We translate the copy into a temoprary block. The temporary block is - // necessary because the current block has already been terminated (by - // `invoke`) and we cannot really translate into the target block - // because: - // * The target block may have more than a single precedesor; - // * Some LLVM insns cannot have a preceeding store insn (phi, - // cleanuppad), and adding/prepending the store now may render - // those other instructions invalid. - // - // NB: This approach still may break some LLVM code. For example if the - // target block starts with a `phi` (which may only match on immediate - // precedesors), it cannot know about this temporary block thus - // resulting in an invalid code: - // - // this: - // … - // %0 = … - // %1 = invoke to label %temp … - // temp: - // store ty %1, ty* %dest - // br label %actualtargetblock - // actualtargetblock: ; preds: %temp, … - // phi … [%this, …], [%0, …] ; ERROR: phi requires to match only on - // ; immediate precedesors - let (ret_dest, ret_ty) = ret_dest_ty - .expect("return destination and type not set"); - base::store_ty(target, invokeret, ret_dest.llval, ret_ty); - build::Br(target, postinvoketarget.llbb, debugloc); - } - }, - (_, &mir::CallKind::DivergingCleanup(_)) | - (_, &mir::CallKind::Diverging) => { - build::Call(bcx, callee.immediate(), &llargs[..], Some(attrs), debugloc); - build::Unreachable(bcx); - } - (_, k@&mir::CallKind::ConvergingCleanup { .. }) | - (_, k@&mir::CallKind::Converging { .. }) => { - // Bug #20046 - let target = match *k { - mir::CallKind::ConvergingCleanup { targets, .. } => targets.0, - mir::CallKind::Converging { target, .. } => target, - _ => unreachable!() - }; - let llret = build::Call(bcx, - callee.immediate(), - &llargs[..], - Some(attrs), - debugloc); - if must_copy_dest { - let (ret_dest, ret_ty) = ret_dest_ty - .expect("return destination and type not set"); - base::store_ty(bcx, llret, ret_dest.llval, ret_ty); - } - build::Br(bcx, self.llblock(target), debugloc); - } - } - } - } - } - - fn get_personality_slot(&mut self, bcx: Block<'bcx, 'tcx>) -> ValueRef { - let ccx = bcx.ccx(); - if let Some(slot) = self.llpersonalityslot { - slot - } else { - let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let slot = base::alloca(bcx, llretty, "personalityslot"); - self.llpersonalityslot = Some(slot); - base::call_lifetime_start(bcx, slot); - slot - } - } - - fn make_landing_pad(&mut self, cleanup: Block<'bcx, 'tcx>) -> Block<'bcx, 'tcx> { - let bcx = cleanup.fcx.new_block(true, "cleanup", None); - let ccx = bcx.ccx(); - let llpersonality = bcx.fcx.eh_personality(); - let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let llretval = build::LandingPad(bcx, llretty, llpersonality, 1); - build::SetCleanup(bcx, llretval); - let slot = self.get_personality_slot(bcx); - build::Store(bcx, llretval, slot); - build::Br(bcx, cleanup.llbb, DebugLoc::None); - bcx - } - - fn unreachable_block(&mut self) -> Block<'bcx, 'tcx> { - match self.unreachable_block { - Some(b) => b, - None => { - let bl = self.fcx.new_block(false, "unreachable", None); - build::Unreachable(bl); - self.unreachable_block = Some(bl); - bl - } - } - } - - fn bcx(&self, bb: mir::BasicBlock) -> Block<'bcx, 'tcx> { - self.blocks[bb.index()] - } - - fn llblock(&self, bb: mir::BasicBlock) -> BasicBlockRef { - self.blocks[bb.index()].llbb - } -} diff --git a/src/librustc_trans/trans/mir/constant.rs b/src/librustc_trans/trans/mir/constant.rs deleted file mode 100644 index 84cc87e9b1385..0000000000000 --- a/src/librustc_trans/trans/mir/constant.rs +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use back::abi; -use llvm::ValueRef; -use middle::subst::Substs; -use middle::ty::{Ty, TypeFoldable}; -use rustc::middle::const_eval::ConstVal; -use rustc::mir::repr as mir; -use trans::common::{self, Block, C_bool, C_bytes, C_floating_f64, C_integral, C_str_slice}; -use trans::consts; -use trans::expr; -use trans::type_of; - -use super::operand::{OperandRef, OperandValue}; -use super::MirContext; - - -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { - pub fn trans_constval(&mut self, - bcx: Block<'bcx, 'tcx>, - cv: &ConstVal, - ty: Ty<'tcx>) - -> OperandRef<'tcx> - { - let ccx = bcx.ccx(); - let val = self.trans_constval_inner(bcx, cv, ty, bcx.fcx.param_substs); - let val = if common::type_is_immediate(ccx, ty) { - OperandValue::Immediate(val) - } else if common::type_is_fat_ptr(bcx.tcx(), ty) { - let data = common::const_get_elt(ccx, val, &[abi::FAT_PTR_ADDR as u32]); - let extra = common::const_get_elt(ccx, val, &[abi::FAT_PTR_EXTRA as u32]); - OperandValue::FatPtr(data, extra) - } else { - OperandValue::Ref(val) - }; - - assert!(!ty.has_erasable_regions()); - - OperandRef { - ty: ty, - val: val - } - } - - /// Translate ConstVal into a bare LLVM ValueRef. - fn trans_constval_inner(&mut self, - bcx: common::Block<'bcx, 'tcx>, - cv: &ConstVal, - ty: Ty<'tcx>, - param_substs: &'tcx Substs<'tcx>) - -> ValueRef - { - let ccx = bcx.ccx(); - let llty = type_of::type_of(ccx, ty); - match *cv { - ConstVal::Float(v) => C_floating_f64(v, llty), - ConstVal::Bool(v) => C_bool(ccx, v), - ConstVal::Int(v) => C_integral(llty, v as u64, true), - ConstVal::Uint(v) => C_integral(llty, v, false), - ConstVal::Str(ref v) => C_str_slice(ccx, v.clone()), - ConstVal::ByteStr(ref v) => consts::addr_of(ccx, C_bytes(ccx, v), 1, "byte_str"), - ConstVal::Struct(id) | ConstVal::Tuple(id) | - ConstVal::Array(id, _) | ConstVal::Repeat(id, _) => { - let expr = bcx.tcx().map.expect_expr(id); - expr::trans(bcx, expr).datum.val - }, - ConstVal::Function(did) => - self.trans_fn_ref(bcx, ty, param_substs, did).immediate() - } - } - - pub fn trans_constant(&mut self, - bcx: Block<'bcx, 'tcx>, - constant: &mir::Constant<'tcx>) - -> OperandRef<'tcx> - { - let ty = bcx.monomorphize(&constant.ty); - match constant.literal { - mir::Literal::Item { def_id, kind, substs } => { - let substs = bcx.tcx().mk_substs(bcx.monomorphize(&substs)); - self.trans_item_ref(bcx, ty, kind, substs, def_id) - } - mir::Literal::Value { ref value } => { - self.trans_constval(bcx, value, ty) - } - } - } -} diff --git a/src/librustc_trans/trans/mir/did.rs b/src/librustc_trans/trans/mir/did.rs deleted file mode 100644 index 3238869cac5c1..0000000000000 --- a/src/librustc_trans/trans/mir/did.rs +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Code for translating references to other items (DefIds). - -use syntax::codemap::DUMMY_SP; -use rustc::front::map; -use rustc::middle::ty::{self, Ty, TypeFoldable}; -use rustc::middle::subst::Substs; -use rustc::middle::const_eval; -use rustc::middle::def_id::DefId; -use rustc::middle::subst; -use rustc::middle::traits; -use rustc::mir::repr::ItemKind; -use trans::common::{Block, fulfill_obligation}; -use trans::base; -use trans::closure; -use trans::expr; -use trans::monomorphize; -use trans::meth; -use trans::inline; - -use super::MirContext; -use super::operand::{OperandRef, OperandValue}; - -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { - /// Translate reference to item. - pub fn trans_item_ref(&mut self, - bcx: Block<'bcx, 'tcx>, - ty: Ty<'tcx>, - kind: ItemKind, - substs: &'tcx Substs<'tcx>, - did: DefId) - -> OperandRef<'tcx> { - debug!("trans_item_ref(ty={:?}, kind={:?}, substs={:?}, did={})", - ty, kind, substs, bcx.tcx().item_path_str(did)); - - match kind { - ItemKind::Function => self.trans_fn_ref(bcx, ty, substs, did), - ItemKind::Method => match bcx.tcx().impl_or_trait_item(did).container() { - ty::ImplContainer(_) => self.trans_fn_ref(bcx, ty, substs, did), - ty::TraitContainer(tdid) => self.trans_static_method(bcx, ty, did, tdid, substs) - }, - ItemKind::Constant => { - let did = inline::maybe_instantiate_inline(bcx.ccx(), did); - let expr = const_eval::lookup_const_by_id(bcx.tcx(), did, None, Some(substs)) - .expect("def was const, but lookup_const_by_id failed"); - // FIXME: this is falling back to translating from HIR. This is not easy to fix, - // because we would have somehow adapt const_eval to work on MIR rather than HIR. - let d = expr::trans(bcx, expr); - OperandRef::from_rvalue_datum(d.datum.to_rvalue_datum(d.bcx, "").datum) - } - } - } - - /// Translates references to a function-like items. - /// - /// That includes regular functions, non-static methods, struct and enum variant constructors, - /// closures and possibly more. - /// - /// This is an adaptation of callee::trans_fn_ref_with_substs. - pub fn trans_fn_ref(&mut self, - bcx: Block<'bcx, 'tcx>, - ty: Ty<'tcx>, - substs: &'tcx Substs<'tcx>, - did: DefId) - -> OperandRef<'tcx> { - debug!("trans_fn_ref(ty={:?}, substs={:?}, did={})", - ty, substs, bcx.tcx().item_path_str(did)); - - let did = inline::maybe_instantiate_inline(bcx.ccx(), did); - - if !substs.types.is_empty() || is_named_tuple_constructor(bcx.tcx(), did) { - let (val, fn_ty, _) = monomorphize::monomorphic_fn(bcx.ccx(), did, substs, None); - // FIXME: cast fnptr to proper type if necessary - OperandRef { - ty: fn_ty, - val: OperandValue::Immediate(val) - } - } else { - let val = if let Some(node_id) = bcx.tcx().map.as_local_node_id(did) { - base::get_item_val(bcx.ccx(), node_id) - } else { - base::trans_external_path(bcx.ccx(), did, ty) - }; - // FIXME: cast fnptr to proper type if necessary - OperandRef { - ty: ty, - val: OperandValue::Immediate(val) - } - } - } - - /// Translates references to static methods. - /// - /// This is an adaptation of meth::trans_static_method_callee - pub fn trans_static_method(&mut self, - bcx: Block<'bcx, 'tcx>, - ty: Ty<'tcx>, - method_id: DefId, - trait_id: DefId, - substs: &'tcx Substs<'tcx>) - -> OperandRef<'tcx> { - debug!("trans_static_method(ty={:?}, method={}, trait={}, substs={:?})", - ty, - bcx.tcx().item_path_str(method_id), - bcx.tcx().item_path_str(trait_id), - substs); - - let ccx = bcx.ccx(); - let tcx = bcx.tcx(); - let subst::SeparateVecsPerParamSpace { - types: rcvr_type, - selfs: rcvr_self, - fns: rcvr_method - } = substs.clone().types.split(); - let trait_substs = Substs::erased( - subst::VecPerParamSpace::new(rcvr_type, rcvr_self, Vec::new()) - ); - let trait_substs = tcx.mk_substs(trait_substs); - let trait_ref = ty::Binder(ty::TraitRef::new(trait_id, trait_substs)); - let vtbl = fulfill_obligation(ccx, DUMMY_SP, trait_ref); - match vtbl { - traits::VtableImpl(traits::VtableImplData { impl_def_id, substs: imp_substs, .. }) => { - assert!(!imp_substs.types.needs_infer()); - - let mname = tcx.item_name(method_id); - - let subst::SeparateVecsPerParamSpace { - types: impl_type, - selfs: impl_self, - fns: _ - } = imp_substs.types.split(); - let callee_substs = Substs::erased( - subst::VecPerParamSpace::new(impl_type, impl_self, rcvr_method) - ); - let mth = tcx.get_impl_method(impl_def_id, callee_substs, mname); - let mthsubsts = tcx.mk_substs(mth.substs); - self.trans_fn_ref(bcx, ty, mthsubsts, mth.method.def_id) - }, - traits::VtableClosure(data) => { - let trait_closure_kind = bcx.tcx().lang_items.fn_trait_kind(trait_id).unwrap(); - let llfn = closure::trans_closure_method(bcx.ccx(), - data.closure_def_id, - data.substs, - trait_closure_kind); - OperandRef { - ty: ty, - val: OperandValue::Immediate(llfn) - } - }, - traits::VtableObject(ref data) => { - let idx = traits::get_vtable_index_of_object_method(tcx, data, method_id); - OperandRef::from_rvalue_datum( - meth::trans_object_shim(ccx, data.upcast_trait_ref.clone(), method_id, idx) - ) - } - _ => { - tcx.sess.bug(&format!("static call to invalid vtable: {:?}", vtbl)); - } - } - } -} - -fn is_named_tuple_constructor(tcx: &ty::ctxt, def_id: DefId) -> bool { - let node_id = match tcx.map.as_local_node_id(def_id) { - Some(n) => n, - None => { return false; } - }; - match tcx.map.find(node_id).expect("local item should be in ast map") { - map::NodeVariant(v) => { - v.node.data.is_tuple() - } - map::NodeStructCtor(_) => true, - _ => false - } -} diff --git a/src/librustc_trans/trans/mir/lvalue.rs b/src/librustc_trans/trans/mir/lvalue.rs deleted file mode 100644 index a6ba069742d91..0000000000000 --- a/src/librustc_trans/trans/mir/lvalue.rs +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm::ValueRef; -use rustc::middle::ty::{self, Ty, TypeFoldable}; -use rustc::mir::repr as mir; -use rustc::mir::tcx::LvalueTy; -use trans::adt; -use trans::base; -use trans::build; -use trans::common::{self, Block}; -use trans::debuginfo::DebugLoc; -use trans::machine; -use trans::type_of; -use llvm; -use trans::Disr; - -use std::ptr; - -use super::{MirContext, TempRef}; - -#[derive(Copy, Clone)] -pub struct LvalueRef<'tcx> { - /// Pointer to the contents of the lvalue - pub llval: ValueRef, - - /// This lvalue's extra data if it is unsized, or null - pub llextra: ValueRef, - - /// Monomorphized type of this lvalue, including variant information - pub ty: LvalueTy<'tcx>, -} - -impl<'tcx> LvalueRef<'tcx> { - pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>) -> LvalueRef<'tcx> { - LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty } - } - - pub fn alloca<'bcx>(bcx: Block<'bcx, 'tcx>, - ty: Ty<'tcx>, - name: &str) - -> LvalueRef<'tcx> - { - assert!(!ty.has_erasable_regions()); - let lltemp = base::alloc_ty(bcx, ty, name); - LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty)) - } -} - -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { - pub fn lvalue_len(&mut self, - bcx: Block<'bcx, 'tcx>, - lvalue: LvalueRef<'tcx>) - -> ValueRef { - match lvalue.ty.to_ty(bcx.tcx()).sty { - ty::TyArray(_, n) => common::C_uint(bcx.ccx(), n), - ty::TySlice(_) | ty::TyStr => { - assert!(lvalue.llextra != ptr::null_mut()); - lvalue.llextra - } - _ => bcx.sess().bug("unexpected type in get_base_and_len"), - } - } - - pub fn trans_lvalue(&mut self, - bcx: Block<'bcx, 'tcx>, - lvalue: &mir::Lvalue<'tcx>) - -> LvalueRef<'tcx> { - debug!("trans_lvalue(lvalue={:?})", lvalue); - - let fcx = bcx.fcx; - let ccx = fcx.ccx; - let tcx = bcx.tcx(); - match *lvalue { - mir::Lvalue::Var(index) => self.vars[index as usize], - mir::Lvalue::Temp(index) => match self.temps[index as usize] { - TempRef::Lvalue(lvalue) => - lvalue, - TempRef::Operand(..) => - tcx.sess.bug(&format!("using operand temp {:?} as lvalue", lvalue)), - }, - mir::Lvalue::Arg(index) => self.args[index as usize], - mir::Lvalue::Static(def_id) => { - let const_ty = self.mir.lvalue_ty(tcx, lvalue); - LvalueRef::new_sized( - common::get_static_val(ccx, def_id, const_ty.to_ty(tcx)), - const_ty) - }, - mir::Lvalue::ReturnPointer => { - let fn_return_ty = bcx.monomorphize(&self.mir.return_ty); - let return_ty = fn_return_ty.unwrap(); - let llval = if !common::return_type_is_void(bcx.ccx(), return_ty) { - fcx.get_ret_slot(bcx, fn_return_ty, "") - } else { - // This is a void return; that is, there’s no place to store the value and - // there cannot really be one (or storing into it doesn’t make sense, anyway). - // Ergo, we return an undef ValueRef, so we do not have to special-case every - // place using lvalues, and could use it the same way you use a regular - // ReturnPointer LValue (i.e. store into it, load from it etc). - let llty = type_of::type_of(bcx.ccx(), return_ty).ptr_to(); - unsafe { - llvm::LLVMGetUndef(llty.to_ref()) - } - }; - LvalueRef::new_sized(llval, LvalueTy::from_ty(return_ty)) - }, - mir::Lvalue::Projection(ref projection) => { - let tr_base = self.trans_lvalue(bcx, &projection.base); - let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem); - let (llprojected, llextra) = match projection.elem { - mir::ProjectionElem::Deref => { - let base_ty = tr_base.ty.to_ty(tcx); - if common::type_is_sized(tcx, projected_ty.to_ty(tcx)) { - (base::load_ty(bcx, tr_base.llval, base_ty), - ptr::null_mut()) - } else { - base::load_fat_ptr(bcx, tr_base.llval, base_ty) - } - } - mir::ProjectionElem::Field(ref field) => { - let base_ty = tr_base.ty.to_ty(tcx); - let base_repr = adt::represent_type(ccx, base_ty); - let discr = match tr_base.ty { - LvalueTy::Ty { .. } => 0, - LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v, - }; - let discr = discr as u64; - let is_sized = common::type_is_sized(tcx, projected_ty.to_ty(tcx)); - let base = if is_sized { - adt::MaybeSizedValue::sized(tr_base.llval) - } else { - adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra) - }; - (adt::trans_field_ptr(bcx, &base_repr, base, Disr(discr), field.index()), - if is_sized { - ptr::null_mut() - } else { - tr_base.llextra - }) - } - mir::ProjectionElem::Index(ref index) => { - let index = self.trans_operand(bcx, index); - let llindex = self.prepare_index(bcx, index.immediate()); - let zero = common::C_uint(bcx.ccx(), 0u64); - (build::InBoundsGEP(bcx, tr_base.llval, &[zero, llindex]), - ptr::null_mut()) - } - mir::ProjectionElem::ConstantIndex { offset, - from_end: false, - min_length: _ } => { - let lloffset = common::C_u32(bcx.ccx(), offset); - let llindex = self.prepare_index(bcx, lloffset); - let zero = common::C_uint(bcx.ccx(), 0u64); - (build::InBoundsGEP(bcx, tr_base.llval, &[zero, llindex]), - ptr::null_mut()) - } - mir::ProjectionElem::ConstantIndex { offset, - from_end: true, - min_length: _ } => { - let lloffset = common::C_u32(bcx.ccx(), offset); - let lllen = self.lvalue_len(bcx, tr_base); - let llindex = build::Sub(bcx, lllen, lloffset, DebugLoc::None); - let llindex = self.prepare_index(bcx, llindex); - let zero = common::C_uint(bcx.ccx(), 0u64); - (build::InBoundsGEP(bcx, tr_base.llval, &[zero, llindex]), - ptr::null_mut()) - } - mir::ProjectionElem::Downcast(..) => { - (tr_base.llval, tr_base.llextra) - } - }; - LvalueRef { - llval: llprojected, - llextra: llextra, - ty: projected_ty, - } - } - } - } - - /// Adjust the bitwidth of an index since LLVM is less forgiving - /// than we are. - /// - /// nmatsakis: is this still necessary? Not sure. - fn prepare_index(&mut self, - bcx: Block<'bcx, 'tcx>, - llindex: ValueRef) - -> ValueRef - { - let ccx = bcx.ccx(); - let index_size = machine::llbitsize_of_real(bcx.ccx(), common::val_ty(llindex)); - let int_size = machine::llbitsize_of_real(bcx.ccx(), ccx.int_type()); - if index_size < int_size { - build::ZExt(bcx, llindex, ccx.int_type()) - } else if index_size > int_size { - build::Trunc(bcx, llindex, ccx.int_type()) - } else { - llindex - } - } -} diff --git a/src/librustc_trans/trans/mir/mod.rs b/src/librustc_trans/trans/mir/mod.rs deleted file mode 100644 index 75ce33da2c9b9..0000000000000 --- a/src/librustc_trans/trans/mir/mod.rs +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use libc::c_uint; -use llvm::{self, ValueRef}; -use rustc::mir::repr as mir; -use rustc::mir::tcx::LvalueTy; -use trans::base; -use trans::build; -use trans::common::{self, Block}; -use trans::debuginfo::DebugLoc; -use trans::expr; -use trans::type_of; - -use self::lvalue::LvalueRef; -use self::operand::OperandRef; - -// FIXME DebugLoc is always None right now - -/// Master context for translating MIR. -pub struct MirContext<'bcx, 'tcx:'bcx> { - mir: &'bcx mir::Mir<'tcx>, - - /// Function context - fcx: &'bcx common::FunctionContext<'bcx, 'tcx>, - - /// When unwinding is initiated, we have to store this personality - /// value somewhere so that we can load it and re-use it in the - /// resume instruction. The personality is (afaik) some kind of - /// value used for C++ unwinding, which must filter by type: we - /// don't really care about it very much. Anyway, this value - /// contains an alloca into which the personality is stored and - /// then later loaded when generating the DIVERGE_BLOCK. - llpersonalityslot: Option, - - /// A `Block` for each MIR `BasicBlock` - blocks: Vec>, - - /// Cached unreachable block - unreachable_block: Option>, - - /// An LLVM alloca for each MIR `VarDecl` - vars: Vec>, - - /// The location where each MIR `TempDecl` is stored. This is - /// usually an `LvalueRef` representing an alloca, but not always: - /// sometimes we can skip the alloca and just store the value - /// directly using an `OperandRef`, which makes for tighter LLVM - /// IR. The conditions for using an `OperandRef` are as follows: - /// - /// - the type of the temporary must be judged "immediate" by `type_is_immediate` - /// - the operand must never be referenced indirectly - /// - we should not take its address using the `&` operator - /// - nor should it appear in an lvalue path like `tmp.a` - /// - the operand must be defined by an rvalue that can generate immediate - /// values - /// - /// Avoiding allocs can also be important for certain intrinsics, - /// notably `expect`. - temps: Vec>, - - /// The arguments to the function; as args are lvalues, these are - /// always indirect, though we try to avoid creating an alloca - /// when we can (and just reuse the pointer the caller provided). - args: Vec>, -} - -enum TempRef<'tcx> { - Lvalue(LvalueRef<'tcx>), - Operand(Option>), -} - -/////////////////////////////////////////////////////////////////////////// - -pub fn trans_mir<'bcx, 'tcx>(bcx: Block<'bcx, 'tcx>) { - let fcx = bcx.fcx; - let mir = bcx.mir(); - - let mir_blocks = bcx.mir().all_basic_blocks(); - - // Analyze the temps to determine which must be lvalues - // FIXME - let lvalue_temps = analyze::lvalue_temps(bcx, mir); - - // Allocate variable and temp allocas - let vars = mir.var_decls.iter() - .map(|decl| (bcx.monomorphize(&decl.ty), decl.name)) - .map(|(mty, name)| LvalueRef::alloca(bcx, mty, &name.as_str())) - .collect(); - let temps = mir.temp_decls.iter() - .map(|decl| bcx.monomorphize(&decl.ty)) - .enumerate() - .map(|(i, mty)| if lvalue_temps.contains(&i) { - TempRef::Lvalue(LvalueRef::alloca(bcx, - mty, - &format!("temp{:?}", i))) - } else { - // If this is an immediate temp, we do not create an - // alloca in advance. Instead we wait until we see the - // definition and update the operand there. - TempRef::Operand(None) - }) - .collect(); - let args = arg_value_refs(bcx, mir); - - // Allocate a `Block` for every basic block - let block_bcxs: Vec> = - mir_blocks.iter() - .map(|&bb|{ - let is_cleanup = mir.basic_block_data(bb).is_cleanup; - fcx.new_block(is_cleanup, &format!("{:?}", bb), None) - }) - .collect(); - - // Branch to the START block - let start_bcx = block_bcxs[mir::START_BLOCK.index()]; - build::Br(bcx, start_bcx.llbb, DebugLoc::None); - - let mut mircx = MirContext { - mir: mir, - fcx: fcx, - llpersonalityslot: None, - blocks: block_bcxs, - unreachable_block: None, - vars: vars, - temps: temps, - args: args, - }; - - // Translate the body of each block - for &bb in &mir_blocks { - mircx.trans_block(bb); - } -} - -/// Produce, for each argument, a `ValueRef` pointing at the -/// argument's value. As arguments are lvalues, these are always -/// indirect. -fn arg_value_refs<'bcx, 'tcx>(bcx: Block<'bcx, 'tcx>, - mir: &mir::Mir<'tcx>) - -> Vec> { - // FIXME tupled_args? I think I'd rather that mapping is done in MIR land though - let fcx = bcx.fcx; - let tcx = bcx.tcx(); - let mut idx = fcx.arg_offset() as c_uint; - mir.arg_decls - .iter() - .enumerate() - .map(|(arg_index, arg_decl)| { - let arg_ty = bcx.monomorphize(&arg_decl.ty); - let llval = if type_of::arg_is_indirect(bcx.ccx(), arg_ty) { - // Don't copy an indirect argument to an alloca, the caller - // already put it in a temporary alloca and gave it up, unless - // we emit extra-debug-info, which requires local allocas :(. - // FIXME: lifetimes, debug info - let llarg = llvm::get_param(fcx.llfn, idx); - idx += 1; - llarg - } else if common::type_is_fat_ptr(tcx, arg_ty) { - // we pass fat pointers as two words, but we want to - // represent them internally as a pointer to two words, - // so make an alloca to store them in. - let lldata = llvm::get_param(fcx.llfn, idx); - let llextra = llvm::get_param(fcx.llfn, idx + 1); - idx += 2; - let lltemp = base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index)); - build::Store(bcx, lldata, expr::get_dataptr(bcx, lltemp)); - build::Store(bcx, llextra, expr::get_meta(bcx, lltemp)); - lltemp - } else { - // otherwise, arg is passed by value, so make a - // temporary and store it there - let llarg = llvm::get_param(fcx.llfn, idx); - idx += 1; - let lltemp = base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index)); - base::store_ty(bcx, llarg, lltemp, arg_ty); - lltemp - }; - LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty)) - }) - .collect() -} - -mod analyze; -mod block; -mod constant; -mod lvalue; -mod rvalue; -mod operand; -mod statement; -mod did; diff --git a/src/librustc_trans/trans/mir/operand.rs b/src/librustc_trans/trans/mir/operand.rs deleted file mode 100644 index 114e78b05bddd..0000000000000 --- a/src/librustc_trans/trans/mir/operand.rs +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm::ValueRef; -use rustc::middle::ty::{Ty, TypeFoldable}; -use rustc::mir::repr as mir; -use trans::base; -use trans::common::{self, Block}; -use trans::datum; - -use super::{MirContext, TempRef}; - -/// The representation of a Rust value. The enum variant is in fact -/// uniquely determined by the value's type, but is kept as a -/// safety check. -#[derive(Copy, Clone)] -pub enum OperandValue { - /// A reference to the actual operand. The data is guaranteed - /// to be valid for the operand's lifetime. - Ref(ValueRef), - /// A single LLVM value. - Immediate(ValueRef), - /// A fat pointer. The first ValueRef is the data and the second - /// is the extra. - FatPtr(ValueRef, ValueRef) -} - -/// An `OperandRef` is an "SSA" reference to a Rust value, along with -/// its type. -/// -/// NOTE: unless you know a value's type exactly, you should not -/// generate LLVM opcodes acting on it and instead act via methods, -/// to avoid nasty edge cases. In particular, using `build::Store` -/// directly is sure to cause problems - use `store_operand` instead. -#[derive(Copy, Clone)] -pub struct OperandRef<'tcx> { - // The value. - pub val: OperandValue, - - // The type of value being returned. - pub ty: Ty<'tcx> -} - -impl<'tcx> OperandRef<'tcx> { - /// Asserts that this operand refers to a scalar and returns - /// a reference to its value. - pub fn immediate(self) -> ValueRef { - match self.val { - OperandValue::Immediate(s) => s, - _ => unreachable!() - } - } - - pub fn repr<'bcx>(self, bcx: Block<'bcx, 'tcx>) -> String { - match self.val { - OperandValue::Ref(r) => { - format!("OperandRef(Ref({}) @ {:?})", - bcx.val_to_string(r), self.ty) - } - OperandValue::Immediate(i) => { - format!("OperandRef(Immediate({}) @ {:?})", - bcx.val_to_string(i), self.ty) - } - OperandValue::FatPtr(a, d) => { - format!("OperandRef(FatPtr({}, {}) @ {:?})", - bcx.val_to_string(a), - bcx.val_to_string(d), - self.ty) - } - } - } - - pub fn from_rvalue_datum(datum: datum::Datum<'tcx, datum::Rvalue>) -> OperandRef { - OperandRef { - ty: datum.ty, - val: match datum.kind.mode { - datum::RvalueMode::ByRef => OperandValue::Ref(datum.val), - datum::RvalueMode::ByValue => OperandValue::Immediate(datum.val), - } - } - } -} - -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { - pub fn trans_operand(&mut self, - bcx: Block<'bcx, 'tcx>, - operand: &mir::Operand<'tcx>) - -> OperandRef<'tcx> - { - debug!("trans_operand(operand={:?})", operand); - - match *operand { - mir::Operand::Consume(ref lvalue) => { - // watch out for temporaries that do not have an - // alloca; they are handled somewhat differently - if let &mir::Lvalue::Temp(index) = lvalue { - match self.temps[index as usize] { - TempRef::Operand(Some(o)) => { - return o; - } - TempRef::Operand(None) => { - bcx.tcx().sess.bug( - &format!("use of {:?} before def", lvalue)); - } - TempRef::Lvalue(..) => { - // use path below - } - } - } - - // for most lvalues, to consume them we just load them - // out from their home - let tr_lvalue = self.trans_lvalue(bcx, lvalue); - let ty = tr_lvalue.ty.to_ty(bcx.tcx()); - debug!("trans_operand: tr_lvalue={} @ {:?}", - bcx.val_to_string(tr_lvalue.llval), - ty); - let val = match datum::appropriate_rvalue_mode(bcx.ccx(), ty) { - datum::ByValue => { - OperandValue::Immediate(base::load_ty(bcx, tr_lvalue.llval, ty)) - } - datum::ByRef if common::type_is_fat_ptr(bcx.tcx(), ty) => { - let (lldata, llextra) = base::load_fat_ptr(bcx, tr_lvalue.llval, ty); - OperandValue::FatPtr(lldata, llextra) - } - datum::ByRef => OperandValue::Ref(tr_lvalue.llval) - }; - - assert!(!ty.has_erasable_regions()); - - OperandRef { - val: val, - ty: ty - } - } - - mir::Operand::Constant(ref constant) => { - self.trans_constant(bcx, constant) - } - } - } - - pub fn trans_operand_into(&mut self, - bcx: Block<'bcx, 'tcx>, - lldest: ValueRef, - operand: &mir::Operand<'tcx>) - { - debug!("trans_operand_into(lldest={}, operand={:?})", - bcx.val_to_string(lldest), - operand); - - // FIXME: consider not copying constants through the - // stack. - - let o = self.trans_operand(bcx, operand); - self.store_operand(bcx, lldest, o); - } - - pub fn store_operand(&mut self, - bcx: Block<'bcx, 'tcx>, - lldest: ValueRef, - operand: OperandRef<'tcx>) - { - debug!("store_operand: operand={}", operand.repr(bcx)); - // Avoid generating stores of zero-sized values, because the only way to have a zero-sized - // value is through `undef`, and store itself is useless. - if common::type_is_zero_size(bcx.ccx(), operand.ty) { - return; - } - match operand.val { - OperandValue::Ref(r) => base::memcpy_ty(bcx, lldest, r, operand.ty), - OperandValue::Immediate(s) => base::store_ty(bcx, s, lldest, operand.ty), - OperandValue::FatPtr(data, extra) => { - base::store_fat_ptr(bcx, data, extra, lldest, operand.ty); - } - } - } -} diff --git a/src/librustc_trans/trans/mir/rvalue.rs b/src/librustc_trans/trans/mir/rvalue.rs deleted file mode 100644 index f53653d7cad6a..0000000000000 --- a/src/librustc_trans/trans/mir/rvalue.rs +++ /dev/null @@ -1,529 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm::ValueRef; -use rustc::middle::ty::{self, Ty}; -use middle::ty::cast::{CastTy, IntTy}; -use rustc::mir::repr as mir; - -use trans::asm; -use trans::base; -use trans::build; -use trans::common::{self, Block, Result}; -use trans::debuginfo::DebugLoc; -use trans::declare; -use trans::expr; -use trans::adt; -use trans::machine; -use trans::type_::Type; -use trans::type_of; -use trans::tvec; -use trans::Disr; - -use super::MirContext; -use super::operand::{OperandRef, OperandValue}; -use super::lvalue::LvalueRef; - -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { - pub fn trans_rvalue(&mut self, - bcx: Block<'bcx, 'tcx>, - dest: LvalueRef<'tcx>, - rvalue: &mir::Rvalue<'tcx>) - -> Block<'bcx, 'tcx> - { - debug!("trans_rvalue(dest.llval={}, rvalue={:?})", - bcx.val_to_string(dest.llval), - rvalue); - - match *rvalue { - mir::Rvalue::Use(ref operand) => { - self.trans_operand_into(bcx, dest.llval, operand); - bcx - } - - mir::Rvalue::Cast(mir::CastKind::Unsize, ref operand, cast_ty) => { - if common::type_is_fat_ptr(bcx.tcx(), cast_ty) { - // into-coerce of a thin pointer to a fat pointer - just - // use the operand path. - let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); - self.store_operand(bcx, dest.llval, temp); - return bcx; - } - - // Unsize of a nontrivial struct. I would prefer for - // this to be eliminated by MIR translation, but - // `CoerceUnsized` can be passed by a where-clause, - // so the (generic) MIR may not be able to expand it. - let operand = self.trans_operand(bcx, operand); - match operand.val { - OperandValue::FatPtr(..) => unreachable!(), - OperandValue::Immediate(llval) => { - // unsize from an immediate structure. We don't - // really need a temporary alloca here, but - // avoiding it would require us to have - // `coerce_unsized_into` use extractvalue to - // index into the struct, and this case isn't - // important enough for it. - debug!("trans_rvalue: creating ugly alloca"); - let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp"); - base::store_ty(bcx, llval, lltemp, operand.ty); - base::coerce_unsized_into(bcx, - lltemp, operand.ty, - dest.llval, cast_ty); - } - OperandValue::Ref(llref) => { - base::coerce_unsized_into(bcx, - llref, operand.ty, - dest.llval, cast_ty); - } - } - bcx - } - - mir::Rvalue::Repeat(ref elem, ref count) => { - let elem = self.trans_operand(bcx, elem); - let size = self.trans_constant(bcx, count).immediate(); - let base = expr::get_dataptr(bcx, dest.llval); - tvec::iter_vec_raw(bcx, base, elem.ty, size, |bcx, llslot, _| { - self.store_operand(bcx, llslot, elem); - bcx - }) - } - - mir::Rvalue::Aggregate(ref kind, ref operands) => { - match *kind { - mir::AggregateKind::Adt(adt_def, index, _) => { - let repr = adt::represent_type(bcx.ccx(), dest.ty.to_ty(bcx.tcx())); - let disr = Disr::from(adt_def.variants[index].disr_val); - adt::trans_set_discr(bcx, &*repr, dest.llval, Disr::from(disr)); - for (i, operand) in operands.iter().enumerate() { - let op = self.trans_operand(bcx, operand); - // Do not generate stores and GEPis for zero-sized fields. - if !common::type_is_zero_size(bcx.ccx(), op.ty) { - let val = adt::MaybeSizedValue::sized(dest.llval); - let lldest_i = adt::trans_field_ptr(bcx, &*repr, val, disr, i); - self.store_operand(bcx, lldest_i, op); - } - } - }, - _ => { - for (i, operand) in operands.iter().enumerate() { - let op = self.trans_operand(bcx, operand); - // Do not generate stores and GEPis for zero-sized fields. - if !common::type_is_zero_size(bcx.ccx(), op.ty) { - // Note: perhaps this should be StructGep, but - // note that in some cases the values here will - // not be structs but arrays. - let dest = build::GEPi(bcx, dest.llval, &[0, i]); - self.store_operand(bcx, dest, op); - } - } - } - } - bcx - } - - mir::Rvalue::Slice { ref input, from_start, from_end } => { - let ccx = bcx.ccx(); - let input = self.trans_lvalue(bcx, input); - let (llbase, lllen) = tvec::get_base_and_len(bcx, - input.llval, - input.ty.to_ty(bcx.tcx())); - let llbase1 = build::GEPi(bcx, llbase, &[from_start]); - let adj = common::C_uint(ccx, from_start + from_end); - let lllen1 = build::Sub(bcx, lllen, adj, DebugLoc::None); - let lladdrdest = expr::get_dataptr(bcx, dest.llval); - build::Store(bcx, llbase1, lladdrdest); - let llmetadest = expr::get_meta(bcx, dest.llval); - build::Store(bcx, lllen1, llmetadest); - bcx - } - - mir::Rvalue::InlineAsm(ref inline_asm) => { - asm::trans_inline_asm(bcx, inline_asm) - } - - _ => { - assert!(rvalue_creates_operand(rvalue)); - let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); - self.store_operand(bcx, dest.llval, temp); - bcx - } - } - } - - pub fn trans_rvalue_operand(&mut self, - bcx: Block<'bcx, 'tcx>, - rvalue: &mir::Rvalue<'tcx>) - -> (Block<'bcx, 'tcx>, OperandRef<'tcx>) - { - assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue); - - match *rvalue { - mir::Rvalue::Use(ref operand) => { - let operand = self.trans_operand(bcx, operand); - (bcx, operand) - } - - mir::Rvalue::Cast(ref kind, ref operand, cast_ty) => { - let operand = self.trans_operand(bcx, operand); - debug!("cast operand is {}", operand.repr(bcx)); - let cast_ty = bcx.monomorphize(&cast_ty); - - let val = match *kind { - mir::CastKind::ReifyFnPointer | - mir::CastKind::UnsafeFnPointer => { - // these are no-ops at the LLVM level - operand.val - } - mir::CastKind::Unsize => { - // unsize targets other than to a fat pointer currently - // can't be operands. - assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty)); - - match operand.val { - OperandValue::FatPtr(..) => { - // unsize from a fat pointer - this is a - // "trait-object-to-supertrait" coercion, for - // example, - // &'a fmt::Debug+Send => &'a fmt::Debug, - // and is a no-op at the LLVM level - operand.val - } - OperandValue::Immediate(lldata) => { - // "standard" unsize - let (lldata, llextra) = - base::unsize_thin_ptr(bcx, lldata, - operand.ty, cast_ty); - OperandValue::FatPtr(lldata, llextra) - } - OperandValue::Ref(_) => { - bcx.sess().bug( - &format!("by-ref operand {} in trans_rvalue_operand", - operand.repr(bcx))); - } - } - } - mir::CastKind::Misc if common::type_is_immediate(bcx.ccx(), operand.ty) => { - debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty)); - let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); - let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); - let ll_t_in = type_of::arg_type_of(bcx.ccx(), operand.ty); - let ll_t_out = type_of::arg_type_of(bcx.ccx(), cast_ty); - let (llval, ll_t_in, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in { - let repr = adt::represent_type(bcx.ccx(), operand.ty); - let llval = operand.immediate(); - let discr = adt::trans_get_discr(bcx, &*repr, llval, None); - (discr, common::val_ty(discr), adt::is_discr_signed(&*repr)) - } else { - (operand.immediate(), ll_t_in, operand.ty.is_signed()) - }; - - let newval = match (r_t_in, r_t_out) { - (CastTy::Int(_), CastTy::Int(_)) => { - let srcsz = ll_t_in.int_width(); - let dstsz = ll_t_out.int_width(); - if srcsz == dstsz { - build::BitCast(bcx, llval, ll_t_out) - } else if srcsz > dstsz { - build::Trunc(bcx, llval, ll_t_out) - } else if signed { - build::SExt(bcx, llval, ll_t_out) - } else { - build::ZExt(bcx, llval, ll_t_out) - } - } - (CastTy::Float, CastTy::Float) => { - let srcsz = ll_t_in.float_width(); - let dstsz = ll_t_out.float_width(); - if dstsz > srcsz { - build::FPExt(bcx, llval, ll_t_out) - } else if srcsz > dstsz { - build::FPTrunc(bcx, llval, ll_t_out) - } else { - llval - } - } - (CastTy::Ptr(_), CastTy::Ptr(_)) | - (CastTy::FnPtr, CastTy::Ptr(_)) | - (CastTy::RPtr(_), CastTy::Ptr(_)) => - build::PointerCast(bcx, llval, ll_t_out), - (CastTy::Ptr(_), CastTy::Int(_)) | - (CastTy::FnPtr, CastTy::Int(_)) => - build::PtrToInt(bcx, llval, ll_t_out), - (CastTy::Int(_), CastTy::Ptr(_)) => - build::IntToPtr(bcx, llval, ll_t_out), - (CastTy::Int(_), CastTy::Float) if signed => - build::SIToFP(bcx, llval, ll_t_out), - (CastTy::Int(_), CastTy::Float) => - build::UIToFP(bcx, llval, ll_t_out), - (CastTy::Float, CastTy::Int(IntTy::I)) => - build::FPToSI(bcx, llval, ll_t_out), - (CastTy::Float, CastTy::Int(_)) => - build::FPToUI(bcx, llval, ll_t_out), - _ => bcx.ccx().sess().bug( - &format!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty) - ) - }; - OperandValue::Immediate(newval) - } - mir::CastKind::Misc => { // Casts from a fat-ptr. - let ll_cast_ty = type_of::arg_type_of(bcx.ccx(), cast_ty); - let ll_from_ty = type_of::arg_type_of(bcx.ccx(), operand.ty); - if let OperandValue::FatPtr(data_ptr, meta_ptr) = operand.val { - if common::type_is_fat_ptr(bcx.tcx(), cast_ty) { - let ll_cft = ll_cast_ty.field_types(); - let ll_fft = ll_from_ty.field_types(); - let data_cast = build::PointerCast(bcx, data_ptr, ll_cft[0]); - assert_eq!(ll_cft[1].kind(), ll_fft[1].kind()); - OperandValue::FatPtr(data_cast, meta_ptr) - } else { // cast to thin-ptr - // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and - // pointer-cast of that pointer to desired pointer type. - let llval = build::PointerCast(bcx, data_ptr, ll_cast_ty); - OperandValue::Immediate(llval) - } - } else { - panic!("Unexpected non-FatPtr operand") - } - } - }; - (bcx, OperandRef { - val: val, - ty: cast_ty - }) - } - - mir::Rvalue::Ref(_, bk, ref lvalue) => { - let tr_lvalue = self.trans_lvalue(bcx, lvalue); - - let ty = tr_lvalue.ty.to_ty(bcx.tcx()); - let ref_ty = bcx.tcx().mk_ref( - bcx.tcx().mk_region(ty::ReStatic), - ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() } - ); - - // Note: lvalues are indirect, so storing the `llval` into the - // destination effectively creates a reference. - if common::type_is_sized(bcx.tcx(), ty) { - (bcx, OperandRef { - val: OperandValue::Immediate(tr_lvalue.llval), - ty: ref_ty, - }) - } else { - (bcx, OperandRef { - val: OperandValue::FatPtr(tr_lvalue.llval, - tr_lvalue.llextra), - ty: ref_ty, - }) - } - } - - mir::Rvalue::Len(ref lvalue) => { - let tr_lvalue = self.trans_lvalue(bcx, lvalue); - (bcx, OperandRef { - val: OperandValue::Immediate(self.lvalue_len(bcx, tr_lvalue)), - ty: bcx.tcx().types.usize, - }) - } - - mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => { - let lhs = self.trans_operand(bcx, lhs); - let rhs = self.trans_operand(bcx, rhs); - let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) { - match (lhs.val, rhs.val) { - (OperandValue::FatPtr(lhs_addr, lhs_extra), - OperandValue::FatPtr(rhs_addr, rhs_extra)) => { - base::compare_fat_ptrs(bcx, - lhs_addr, lhs_extra, - rhs_addr, rhs_extra, - lhs.ty, op.to_hir_binop(), - DebugLoc::None) - } - _ => unreachable!() - } - - } else { - self.trans_scalar_binop(bcx, op, - lhs.immediate(), rhs.immediate(), - lhs.ty, DebugLoc::None) - }; - (bcx, OperandRef { - val: OperandValue::Immediate(llresult), - ty: self.mir.binop_ty(bcx.tcx(), op, lhs.ty, rhs.ty), - }) - } - - mir::Rvalue::UnaryOp(op, ref operand) => { - let operand = self.trans_operand(bcx, operand); - let lloperand = operand.immediate(); - let is_float = operand.ty.is_fp(); - let debug_loc = DebugLoc::None; - let llval = match op { - mir::UnOp::Not => build::Not(bcx, lloperand, debug_loc), - mir::UnOp::Neg => if is_float { - build::FNeg(bcx, lloperand, debug_loc) - } else { - build::Neg(bcx, lloperand, debug_loc) - } - }; - (bcx, OperandRef { - val: OperandValue::Immediate(llval), - ty: operand.ty, - }) - } - - mir::Rvalue::Box(content_ty) => { - let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty); - let llty = type_of::type_of(bcx.ccx(), content_ty); - let llsize = machine::llsize_of(bcx.ccx(), llty); - let align = type_of::align_of(bcx.ccx(), content_ty); - let llalign = common::C_uint(bcx.ccx(), align); - let llty_ptr = llty.ptr_to(); - let box_ty = bcx.tcx().mk_box(content_ty); - let Result { bcx, val: llval } = base::malloc_raw_dyn(bcx, - llty_ptr, - box_ty, - llsize, - llalign, - DebugLoc::None); - (bcx, OperandRef { - val: OperandValue::Immediate(llval), - ty: box_ty, - }) - } - - mir::Rvalue::Repeat(..) | - mir::Rvalue::Aggregate(..) | - mir::Rvalue::Slice { .. } | - mir::Rvalue::InlineAsm(..) => { - bcx.tcx().sess.bug(&format!("cannot generate operand from rvalue {:?}", rvalue)); - } - } - } - - pub fn trans_scalar_binop(&mut self, - bcx: Block<'bcx, 'tcx>, - op: mir::BinOp, - lhs: ValueRef, - rhs: ValueRef, - input_ty: Ty<'tcx>, - debug_loc: DebugLoc) -> ValueRef { - let is_float = input_ty.is_fp(); - let is_signed = input_ty.is_signed(); - match op { - mir::BinOp::Add => if is_float { - build::FAdd(bcx, lhs, rhs, debug_loc) - } else { - build::Add(bcx, lhs, rhs, debug_loc) - }, - mir::BinOp::Sub => if is_float { - build::FSub(bcx, lhs, rhs, debug_loc) - } else { - build::Sub(bcx, lhs, rhs, debug_loc) - }, - mir::BinOp::Mul => if is_float { - build::FMul(bcx, lhs, rhs, debug_loc) - } else { - build::Mul(bcx, lhs, rhs, debug_loc) - }, - mir::BinOp::Div => if is_float { - build::FDiv(bcx, lhs, rhs, debug_loc) - } else if is_signed { - build::SDiv(bcx, lhs, rhs, debug_loc) - } else { - build::UDiv(bcx, lhs, rhs, debug_loc) - }, - mir::BinOp::Rem => if is_float { - // LLVM currently always lowers the `frem` instructions appropriate - // library calls typically found in libm. Notably f64 gets wired up - // to `fmod` and f32 gets wired up to `fmodf`. Inconveniently for - // us, 32-bit MSVC does not actually have a `fmodf` symbol, it's - // instead just an inline function in a header that goes up to a - // f64, uses `fmod`, and then comes back down to a f32. - // - // Although LLVM knows that `fmodf` doesn't exist on MSVC, it will - // still unconditionally lower frem instructions over 32-bit floats - // to a call to `fmodf`. To work around this we special case MSVC - // 32-bit float rem instructions and instead do the call out to - // `fmod` ourselves. - // - // Note that this is currently duplicated with src/libcore/ops.rs - // which does the same thing, and it would be nice to perhaps unify - // these two implementations one day! Also note that we call `fmod` - // for both 32 and 64-bit floats because if we emit any FRem - // instruction at all then LLVM is capable of optimizing it into a - // 32-bit FRem (which we're trying to avoid). - let tcx = bcx.tcx(); - let use_fmod = tcx.sess.target.target.options.is_like_msvc && - tcx.sess.target.target.arch == "x86"; - if use_fmod { - let f64t = Type::f64(bcx.ccx()); - let fty = Type::func(&[f64t, f64t], &f64t); - let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty, - tcx.types.f64); - if input_ty == tcx.types.f32 { - let lllhs = build::FPExt(bcx, lhs, f64t); - let llrhs = build::FPExt(bcx, rhs, f64t); - let llres = build::Call(bcx, llfn, &[lllhs, llrhs], - None, debug_loc); - build::FPTrunc(bcx, llres, Type::f32(bcx.ccx())) - } else { - build::Call(bcx, llfn, &[lhs, rhs], - None, debug_loc) - } - } else { - build::FRem(bcx, lhs, rhs, debug_loc) - } - } else if is_signed { - build::SRem(bcx, lhs, rhs, debug_loc) - } else { - build::URem(bcx, lhs, rhs, debug_loc) - }, - mir::BinOp::BitOr => build::Or(bcx, lhs, rhs, debug_loc), - mir::BinOp::BitAnd => build::And(bcx, lhs, rhs, debug_loc), - mir::BinOp::BitXor => build::Xor(bcx, lhs, rhs, debug_loc), - mir::BinOp::Shl => common::build_unchecked_lshift(bcx, - lhs, - rhs, - debug_loc), - mir::BinOp::Shr => common::build_unchecked_rshift(bcx, - input_ty, - lhs, - rhs, - debug_loc), - mir::BinOp::Eq | mir::BinOp::Lt | mir::BinOp::Gt | - mir::BinOp::Ne | mir::BinOp::Le | mir::BinOp::Ge => { - base::compare_scalar_types(bcx, lhs, rhs, input_ty, - op.to_hir_binop(), debug_loc) - } - } - } -} - -pub fn rvalue_creates_operand<'tcx>(rvalue: &mir::Rvalue<'tcx>) -> bool { - match *rvalue { - mir::Rvalue::Use(..) | // (*) - mir::Rvalue::Ref(..) | - mir::Rvalue::Len(..) | - mir::Rvalue::Cast(..) | // (*) - mir::Rvalue::BinaryOp(..) | - mir::Rvalue::UnaryOp(..) | - mir::Rvalue::Box(..) => - true, - mir::Rvalue::Repeat(..) | - mir::Rvalue::Aggregate(..) | - mir::Rvalue::Slice { .. } | - mir::Rvalue::InlineAsm(..) => - false, - } - - // (*) this is only true if the type is suitable -} diff --git a/src/librustc_trans/trans/mir/statement.rs b/src/librustc_trans/trans/mir/statement.rs deleted file mode 100644 index dae0d3b55c0ba..0000000000000 --- a/src/librustc_trans/trans/mir/statement.rs +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::middle::ty::LvaluePreference; -use rustc::mir::repr as mir; -use trans::common::Block; -use trans::debuginfo::DebugLoc; -use trans::glue; - -use super::MirContext; -use super::TempRef; - -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { - pub fn trans_statement(&mut self, - bcx: Block<'bcx, 'tcx>, - statement: &mir::Statement<'tcx>) - -> Block<'bcx, 'tcx> { - debug!("trans_statement(statement={:?})", statement); - - match statement.kind { - mir::StatementKind::Assign(ref lvalue, ref rvalue) => { - match *lvalue { - mir::Lvalue::Temp(index) => { - let index = index as usize; - match self.temps[index as usize] { - TempRef::Lvalue(tr_dest) => { - self.trans_rvalue(bcx, tr_dest, rvalue) - } - TempRef::Operand(None) => { - let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue); - self.temps[index] = TempRef::Operand(Some(operand)); - bcx - } - TempRef::Operand(Some(_)) => { - bcx.tcx().sess.span_bug( - statement.span, - &format!("operand {:?} already assigned", rvalue)); - } - } - } - _ => { - let tr_dest = self.trans_lvalue(bcx, lvalue); - self.trans_rvalue(bcx, tr_dest, rvalue) - } - } - } - - mir::StatementKind::Drop(mir::DropKind::Deep, ref lvalue) => { - let tr_lvalue = self.trans_lvalue(bcx, lvalue); - let ty = tr_lvalue.ty.to_ty(bcx.tcx()); - glue::drop_ty(bcx, tr_lvalue.llval, ty, DebugLoc::None) - } - - mir::StatementKind::Drop(mir::DropKind::Free, ref lvalue) => { - let tr_lvalue = self.trans_lvalue(bcx, lvalue); - let ty = tr_lvalue.ty.to_ty(bcx.tcx()); - let content_ty = ty.builtin_deref(true, LvaluePreference::NoPreference); - let content_ty = content_ty.unwrap().ty; - glue::trans_exchange_free_ty(bcx, tr_lvalue.llval, content_ty, DebugLoc::None) - } - } - } -} diff --git a/src/librustc_trans/trans/mod.rs b/src/librustc_trans/trans/mod.rs deleted file mode 100644 index d87c17cbf88d4..0000000000000 --- a/src/librustc_trans/trans/mod.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm::{ContextRef, ModuleRef}; -use middle::cstore::LinkMeta; - -pub use self::base::trans_crate; -pub use self::context::CrateContext; -pub use self::common::gensym_name; -pub use self::disr::Disr; - -#[macro_use] -mod macros; - -mod adt; -mod asm; -mod assert_dep_graph; -mod attributes; -mod base; -mod basic_block; -mod build; -mod builder; -mod cabi; -mod cabi_aarch64; -mod cabi_arm; -mod cabi_mips; -mod cabi_powerpc; -mod cabi_powerpc64; -mod cabi_x86; -mod cabi_x86_64; -mod cabi_x86_win64; -mod callee; -mod cleanup; -mod closure; -mod common; -mod consts; -mod context; -mod controlflow; -mod datum; -mod debuginfo; -mod declare; -mod disr; -mod expr; -mod foreign; -mod glue; -mod inline; -mod intrinsic; -mod llrepr; -mod machine; -mod _match; -mod meth; -mod mir; -mod monomorphize; -mod tvec; -mod type_; -mod type_of; -mod value; - -#[derive(Copy, Clone)] -pub struct ModuleTranslation { - pub llcx: ContextRef, - pub llmod: ModuleRef, -} - -unsafe impl Send for ModuleTranslation { } -unsafe impl Sync for ModuleTranslation { } - -pub struct CrateTranslation { - pub modules: Vec, - pub metadata_module: ModuleTranslation, - pub link: LinkMeta, - pub metadata: Vec, - pub reachable: Vec, - pub no_builtins: bool, -} diff --git a/src/librustc_trans/trans/monomorphize.rs b/src/librustc_trans/trans/monomorphize.rs deleted file mode 100644 index 62e69cbb85e6f..0000000000000 --- a/src/librustc_trans/trans/monomorphize.rs +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use back::link::exported_name; -use llvm::ValueRef; -use llvm; -use middle::def_id::DefId; -use middle::infer::normalize_associated_type; -use middle::subst; -use middle::subst::{Subst, Substs}; -use middle::ty::fold::{TypeFolder, TypeFoldable}; -use trans::attributes; -use trans::base::{trans_enum_variant, push_ctxt, get_item_val}; -use trans::base::trans_fn; -use trans::base; -use trans::common::*; -use trans::declare; -use trans::foreign; -use middle::ty::{self, Ty}; -use trans::Disr; -use rustc::front::map as hir_map; - -use rustc_front::hir; - -use syntax::abi; -use syntax::ast; -use syntax::attr; -use syntax::errors; -use std::hash::{Hasher, Hash, SipHasher}; - -pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - fn_id: DefId, - psubsts: &'tcx subst::Substs<'tcx>, - ref_id: Option) - -> (ValueRef, Ty<'tcx>, bool) { - debug!("monomorphic_fn(\ - fn_id={:?}, \ - real_substs={:?}, \ - ref_id={:?})", - fn_id, - psubsts, - ref_id); - - assert!(!psubsts.types.needs_infer() && !psubsts.types.has_param_types()); - - // we can only monomorphize things in this crate (or inlined into it) - let fn_node_id = ccx.tcx().map.as_local_node_id(fn_id).unwrap(); - - let _icx = push_ctxt("monomorphic_fn"); - - let hash_id = MonoId { - def: fn_id, - params: &psubsts.types - }; - - let item_ty = ccx.tcx().lookup_item_type(fn_id).ty; - - debug!("monomorphic_fn about to subst into {:?}", item_ty); - let mono_ty = apply_param_substs(ccx.tcx(), psubsts, &item_ty); - debug!("mono_ty = {:?} (post-substitution)", mono_ty); - - match ccx.monomorphized().borrow().get(&hash_id) { - Some(&val) => { - debug!("leaving monomorphic fn {}", - ccx.tcx().item_path_str(fn_id)); - return (val, mono_ty, false); - } - None => () - } - - debug!("monomorphic_fn(\ - fn_id={:?}, \ - psubsts={:?}, \ - hash_id={:?})", - fn_id, - psubsts, - hash_id); - - - let map_node = errors::expect( - ccx.sess().diagnostic(), - ccx.tcx().map.find(fn_node_id), - || { - format!("while monomorphizing {:?}, couldn't find it in \ - the item map (may have attempted to monomorphize \ - an item defined in a different crate?)", - fn_id) - }); - - if let hir_map::NodeForeignItem(_) = map_node { - let abi = ccx.tcx().map.get_foreign_abi(fn_node_id); - if abi != abi::RustIntrinsic && abi != abi::PlatformIntrinsic { - // Foreign externs don't have to be monomorphized. - return (get_item_val(ccx, fn_node_id), mono_ty, true); - } - } - - ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1); - - let depth; - { - let mut monomorphizing = ccx.monomorphizing().borrow_mut(); - depth = match monomorphizing.get(&fn_id) { - Some(&d) => d, None => 0 - }; - - debug!("monomorphic_fn: depth for fn_id={:?} is {:?}", fn_id, depth+1); - - // Random cut-off -- code that needs to instantiate the same function - // recursively more than thirty times can probably safely be assumed - // to be causing an infinite expansion. - if depth > ccx.sess().recursion_limit.get() { - ccx.sess().span_fatal(ccx.tcx().map.span(fn_node_id), - "reached the recursion limit during monomorphization"); - } - - monomorphizing.insert(fn_id, depth + 1); - } - - let hash; - let s = { - let mut state = SipHasher::new(); - hash_id.hash(&mut state); - mono_ty.hash(&mut state); - - hash = format!("h{}", state.finish()); - let path = ccx.tcx().map.def_path_from_id(fn_node_id); - exported_name(path, &hash[..]) - }; - - debug!("monomorphize_fn mangled to {}", s); - - // This shouldn't need to option dance. - let mut hash_id = Some(hash_id); - let mut mk_lldecl = |abi: abi::Abi| { - let lldecl = if abi != abi::Rust { - foreign::decl_rust_fn_with_foreign_abi(ccx, mono_ty, &s) - } else { - // FIXME(nagisa): perhaps needs a more fine grained selection? See - // setup_lldecl below. - declare::define_internal_rust_fn(ccx, &s, mono_ty) - }; - - ccx.monomorphized().borrow_mut().insert(hash_id.take().unwrap(), lldecl); - lldecl - }; - let setup_lldecl = |lldecl, attrs: &[ast::Attribute]| { - base::update_linkage(ccx, lldecl, None, base::OriginalTranslation); - attributes::from_fn_attrs(ccx, attrs, lldecl); - - let is_first = !ccx.available_monomorphizations().borrow().contains(&s); - if is_first { - ccx.available_monomorphizations().borrow_mut().insert(s.clone()); - } - - let trans_everywhere = attr::requests_inline(attrs); - if trans_everywhere && !is_first { - llvm::SetLinkage(lldecl, llvm::AvailableExternallyLinkage); - } - - // If `true`, then `lldecl` should be given a function body. - // Otherwise, it should be left as a declaration of an external - // function, with no definition in the current compilation unit. - trans_everywhere || is_first - }; - - let lldecl = match map_node { - hir_map::NodeItem(i) => { - match *i { - hir::Item { - node: hir::ItemFn(ref decl, _, _, abi, _, ref body), - .. - } => { - let d = mk_lldecl(abi); - let needs_body = setup_lldecl(d, &i.attrs); - if needs_body { - if abi != abi::Rust { - foreign::trans_rust_fn_with_foreign_abi( - ccx, &**decl, &**body, &[], d, psubsts, fn_node_id, - Some(&hash[..])); - } else { - trans_fn(ccx, - &**decl, - &**body, - d, - psubsts, - fn_node_id, - &i.attrs); - } - } - - d - } - _ => { - ccx.sess().bug("Can't monomorphize this kind of item") - } - } - } - hir_map::NodeVariant(v) => { - let variant = inlined_variant_def(ccx, fn_node_id); - assert_eq!(v.node.name, variant.name); - let d = mk_lldecl(abi::Rust); - attributes::inline(d, attributes::InlineAttr::Hint); - trans_enum_variant(ccx, fn_node_id, Disr::from(variant.disr_val), psubsts, d); - d - } - hir_map::NodeImplItem(impl_item) => { - match impl_item.node { - hir::ImplItemKind::Method(ref sig, ref body) => { - let d = mk_lldecl(abi::Rust); - let needs_body = setup_lldecl(d, &impl_item.attrs); - if needs_body { - trans_fn(ccx, - &sig.decl, - body, - d, - psubsts, - impl_item.id, - &impl_item.attrs); - } - d - } - _ => { - ccx.sess().bug(&format!("can't monomorphize a {:?}", - map_node)) - } - } - } - hir_map::NodeTraitItem(trait_item) => { - match trait_item.node { - hir::MethodTraitItem(ref sig, Some(ref body)) => { - let d = mk_lldecl(abi::Rust); - let needs_body = setup_lldecl(d, &trait_item.attrs); - if needs_body { - trans_fn(ccx, - &sig.decl, - body, - d, - psubsts, - trait_item.id, - &trait_item.attrs); - } - d - } - _ => { - ccx.sess().bug(&format!("can't monomorphize a {:?}", - map_node)) - } - } - } - hir_map::NodeStructCtor(struct_def) => { - let d = mk_lldecl(abi::Rust); - attributes::inline(d, attributes::InlineAttr::Hint); - if struct_def.is_struct() { - panic!("ast-mapped struct didn't have a ctor id") - } - base::trans_tuple_struct(ccx, - struct_def.id(), - psubsts, - d); - d - } - - // Ugh -- but this ensures any new variants won't be forgotten - hir_map::NodeForeignItem(..) | - hir_map::NodeLifetime(..) | - hir_map::NodeTyParam(..) | - hir_map::NodeExpr(..) | - hir_map::NodeStmt(..) | - hir_map::NodeBlock(..) | - hir_map::NodePat(..) | - hir_map::NodeLocal(..) => { - ccx.sess().bug(&format!("can't monomorphize a {:?}", - map_node)) - } - }; - - ccx.monomorphizing().borrow_mut().insert(fn_id, depth); - - debug!("leaving monomorphic fn {}", ccx.tcx().item_path_str(fn_id)); - (lldecl, mono_ty, true) -} - -#[derive(PartialEq, Eq, Hash, Debug)] -pub struct MonoId<'tcx> { - pub def: DefId, - pub params: &'tcx subst::VecPerParamSpace> -} - -/// Monomorphizes a type from the AST by first applying the in-scope -/// substitutions and then normalizing any associated types. -pub fn apply_param_substs<'tcx,T>(tcx: &ty::ctxt<'tcx>, - param_substs: &Substs<'tcx>, - value: &T) - -> T - where T : TypeFoldable<'tcx> -{ - let substituted = value.subst(tcx, param_substs); - normalize_associated_type(tcx, &substituted) -} - - -/// Returns the normalized type of a struct field -pub fn field_ty<'tcx>(tcx: &ty::ctxt<'tcx>, - param_substs: &Substs<'tcx>, - f: ty::FieldDef<'tcx>) - -> Ty<'tcx> -{ - normalize_associated_type(tcx, &f.ty(tcx, param_substs)) -} diff --git a/src/librustc_trans/trans/tvec.rs b/src/librustc_trans/trans/tvec.rs deleted file mode 100644 index 3a1568a70c992..0000000000000 --- a/src/librustc_trans/trans/tvec.rs +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(non_camel_case_types)] - -use llvm; -use llvm::ValueRef; -use trans::base::*; -use trans::base; -use trans::build::*; -use trans::cleanup; -use trans::cleanup::CleanupMethods; -use trans::common::*; -use trans::consts; -use trans::datum::*; -use trans::debuginfo::DebugLoc; -use trans::expr::{Dest, Ignore, SaveIn}; -use trans::expr; -use trans::machine::llsize_of_alloc; -use trans::type_::Type; -use trans::type_of; -use middle::ty::{self, Ty}; - -use rustc_front::hir; - -use syntax::ast; -use syntax::parse::token::InternedString; - -#[derive(Copy, Clone)] -struct VecTypes<'tcx> { - unit_ty: Ty<'tcx>, - llunit_ty: Type -} - -impl<'tcx> VecTypes<'tcx> { - pub fn to_string<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> String { - format!("VecTypes {{unit_ty={}, llunit_ty={}}}", - self.unit_ty, - ccx.tn().type_to_string(self.llunit_ty)) - } -} - -pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - expr: &hir::Expr, - dest: expr::Dest) - -> Block<'blk, 'tcx> { - //! - // - // [...] allocates a fixed-size array and moves it around "by value". - // In this case, it means that the caller has already given us a location - // to store the array of the suitable size, so all we have to do is - // generate the content. - - debug!("trans_fixed_vstore(expr={:?}, dest={})", - expr, dest.to_string(bcx.ccx())); - - let vt = vec_types_from_expr(bcx, expr); - - return match dest { - Ignore => write_content(bcx, &vt, expr, expr, dest), - SaveIn(lldest) => { - // lldest will have type *[T x N], but we want the type *T, - // so use GEP to convert: - let lldest = StructGEP(bcx, lldest, 0); - write_content(bcx, &vt, expr, expr, SaveIn(lldest)) - } - }; -} - -/// &[...] allocates memory on the stack and writes the values into it, returning the vector (the -/// caller must make the reference). "..." is similar except that the memory can be statically -/// allocated and we return a reference (strings are always by-ref). -pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - slice_expr: &hir::Expr, - content_expr: &hir::Expr) - -> DatumBlock<'blk, 'tcx, Expr> { - let fcx = bcx.fcx; - let ccx = fcx.ccx; - let mut bcx = bcx; - - debug!("trans_slice_vec(slice_expr={:?})", - slice_expr); - - let vec_ty = node_id_type(bcx, slice_expr.id); - - // Handle the "..." case (returns a slice since strings are always unsized): - if let hir::ExprLit(ref lit) = content_expr.node { - if let ast::LitStr(ref s, _) = lit.node { - let scratch = rvalue_scratch_datum(bcx, vec_ty, ""); - bcx = trans_lit_str(bcx, - content_expr, - s.clone(), - SaveIn(scratch.val)); - return DatumBlock::new(bcx, scratch.to_expr_datum()); - } - } - - // Handle the &[...] case: - let vt = vec_types_from_expr(bcx, content_expr); - let count = elements_required(bcx, content_expr); - debug!(" vt={}, count={}", vt.to_string(ccx), count); - - let fixed_ty = bcx.tcx().mk_array(vt.unit_ty, count); - - // Always create an alloca even if zero-sized, to preserve - // the non-null invariant of the inner slice ptr - let llfixed; - // Issue 30018: ensure state is initialized as dropped if necessary. - if fcx.type_needs_drop(vt.unit_ty) { - llfixed = base::alloc_ty_init(bcx, fixed_ty, InitAlloca::Dropped, ""); - } else { - let uninit = InitAlloca::Uninit("fcx says vt.unit_ty is non-drop"); - llfixed = base::alloc_ty_init(bcx, fixed_ty, uninit, ""); - call_lifetime_start(bcx, llfixed); - }; - - if count > 0 { - // Arrange for the backing array to be cleaned up. - let cleanup_scope = cleanup::temporary_scope(bcx.tcx(), content_expr.id); - fcx.schedule_lifetime_end(cleanup_scope, llfixed); - fcx.schedule_drop_mem(cleanup_scope, llfixed, fixed_ty, None); - - // Generate the content into the backing array. - // llfixed has type *[T x N], but we want the type *T, - // so use GEP to convert - bcx = write_content(bcx, &vt, slice_expr, content_expr, - SaveIn(StructGEP(bcx, llfixed, 0))); - }; - - immediate_rvalue_bcx(bcx, llfixed, vec_ty).to_expr_datumblock() -} - -/// Literal strings translate to slices into static memory. This is different from -/// trans_slice_vstore() above because it doesn't need to copy the content anywhere. -pub fn trans_lit_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - lit_expr: &hir::Expr, - str_lit: InternedString, - dest: Dest) - -> Block<'blk, 'tcx> { - debug!("trans_lit_str(lit_expr={:?}, dest={})", - lit_expr, - dest.to_string(bcx.ccx())); - - match dest { - Ignore => bcx, - SaveIn(lldest) => { - let bytes = str_lit.len(); - let llbytes = C_uint(bcx.ccx(), bytes); - let llcstr = C_cstr(bcx.ccx(), str_lit, false); - let llcstr = consts::ptrcast(llcstr, Type::i8p(bcx.ccx())); - Store(bcx, llcstr, expr::get_dataptr(bcx, lldest)); - Store(bcx, llbytes, expr::get_meta(bcx, lldest)); - bcx - } - } -} - -fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - vt: &VecTypes<'tcx>, - vstore_expr: &hir::Expr, - content_expr: &hir::Expr, - dest: Dest) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("tvec::write_content"); - let fcx = bcx.fcx; - let mut bcx = bcx; - - debug!("write_content(vt={}, dest={}, vstore_expr={:?})", - vt.to_string(bcx.ccx()), - dest.to_string(bcx.ccx()), - vstore_expr); - - match content_expr.node { - hir::ExprLit(ref lit) => { - match lit.node { - ast::LitStr(ref s, _) => { - match dest { - Ignore => return bcx, - SaveIn(lldest) => { - let bytes = s.len(); - let llbytes = C_uint(bcx.ccx(), bytes); - let llcstr = C_cstr(bcx.ccx(), (*s).clone(), false); - base::call_memcpy(bcx, - lldest, - llcstr, - llbytes, - 1); - return bcx; - } - } - } - _ => { - bcx.tcx().sess.span_bug(content_expr.span, - "unexpected evec content"); - } - } - } - hir::ExprVec(ref elements) => { - match dest { - Ignore => { - for element in elements { - bcx = expr::trans_into(bcx, &**element, Ignore); - } - } - - SaveIn(lldest) => { - let temp_scope = fcx.push_custom_cleanup_scope(); - for (i, element) in elements.iter().enumerate() { - let lleltptr = GEPi(bcx, lldest, &[i]); - debug!("writing index {} with lleltptr={}", - i, bcx.val_to_string(lleltptr)); - bcx = expr::trans_into(bcx, &**element, - SaveIn(lleltptr)); - let scope = cleanup::CustomScope(temp_scope); - // Issue #30822: mark memory as dropped after running destructor - fcx.schedule_drop_and_fill_mem(scope, lleltptr, vt.unit_ty, None); - } - fcx.pop_custom_cleanup_scope(temp_scope); - } - } - return bcx; - } - hir::ExprRepeat(ref element, ref count_expr) => { - match dest { - Ignore => { - return expr::trans_into(bcx, &**element, Ignore); - } - SaveIn(lldest) => { - match bcx.tcx().eval_repeat_count(&**count_expr) { - 0 => expr::trans_into(bcx, &**element, Ignore), - 1 => expr::trans_into(bcx, &**element, SaveIn(lldest)), - count => { - let elem = unpack_datum!(bcx, expr::trans(bcx, &**element)); - let bcx = iter_vec_loop(bcx, lldest, vt, - C_uint(bcx.ccx(), count), - |set_bcx, lleltptr, _| { - elem.shallow_copy(set_bcx, lleltptr) - }); - bcx - } - } - } - } - } - _ => { - bcx.tcx().sess.span_bug(content_expr.span, - "unexpected vec content"); - } - } -} - -fn vec_types_from_expr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, vec_expr: &hir::Expr) - -> VecTypes<'tcx> { - let vec_ty = node_id_type(bcx, vec_expr.id); - vec_types(bcx, vec_ty.sequence_element_type(bcx.tcx())) -} - -fn vec_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, unit_ty: Ty<'tcx>) - -> VecTypes<'tcx> { - VecTypes { - unit_ty: unit_ty, - llunit_ty: type_of::type_of(bcx.ccx(), unit_ty) - } -} - -fn elements_required(bcx: Block, content_expr: &hir::Expr) -> usize { - //! Figure out the number of elements we need to store this content - - match content_expr.node { - hir::ExprLit(ref lit) => { - match lit.node { - ast::LitStr(ref s, _) => s.len(), - _ => { - bcx.tcx().sess.span_bug(content_expr.span, - "unexpected evec content") - } - } - }, - hir::ExprVec(ref es) => es.len(), - hir::ExprRepeat(_, ref count_expr) => { - bcx.tcx().eval_repeat_count(&**count_expr) - } - _ => bcx.tcx().sess.span_bug(content_expr.span, - "unexpected vec content") - } -} - -/// Converts a fixed-length vector into the slice pair. The vector should be stored in `llval` -/// which should be by ref. -pub fn get_fixed_base_and_len(bcx: Block, - llval: ValueRef, - vec_length: usize) - -> (ValueRef, ValueRef) { - let ccx = bcx.ccx(); - - let base = expr::get_dataptr(bcx, llval); - let len = C_uint(ccx, vec_length); - (base, len) -} - -/// Converts a vector into the slice pair. The vector should be stored in `llval` which should be -/// by-reference. If you have a datum, you would probably prefer to call -/// `Datum::get_base_and_len()` which will handle any conversions for you. -pub fn get_base_and_len<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - llval: ValueRef, - vec_ty: Ty<'tcx>) - -> (ValueRef, ValueRef) { - let ccx = bcx.ccx(); - - match vec_ty.sty { - ty::TyArray(_, n) => get_fixed_base_and_len(bcx, llval, n), - ty::TySlice(_) | ty::TyStr => { - let base = Load(bcx, expr::get_dataptr(bcx, llval)); - let len = Load(bcx, expr::get_meta(bcx, llval)); - (base, len) - } - - // Only used for pattern matching. - ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) => { - let inner = if type_is_sized(bcx.tcx(), ty) { - Load(bcx, llval) - } else { - llval - }; - get_base_and_len(bcx, inner, ty) - }, - _ => ccx.sess().bug("unexpected type in get_base_and_len"), - } -} - -fn iter_vec_loop<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - data_ptr: ValueRef, - vt: &VecTypes<'tcx>, - count: ValueRef, - f: F) - -> Block<'blk, 'tcx> where - F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>, -{ - let _icx = push_ctxt("tvec::iter_vec_loop"); - - if bcx.unreachable.get() { - return bcx; - } - - let fcx = bcx.fcx; - let loop_bcx = fcx.new_temp_block("expr_repeat"); - let next_bcx = fcx.new_temp_block("expr_repeat: next"); - - Br(bcx, loop_bcx.llbb, DebugLoc::None); - - let loop_counter = Phi(loop_bcx, bcx.ccx().int_type(), - &[C_uint(bcx.ccx(), 0 as usize)], &[bcx.llbb]); - - let bcx = loop_bcx; - - let lleltptr = if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 { - data_ptr - } else { - InBoundsGEP(bcx, data_ptr, &[loop_counter]) - }; - let bcx = f(bcx, lleltptr, vt.unit_ty); - let plusone = Add(bcx, loop_counter, C_uint(bcx.ccx(), 1usize), DebugLoc::None); - AddIncomingToPhi(loop_counter, plusone, bcx.llbb); - - let cond_val = ICmp(bcx, llvm::IntULT, plusone, count, DebugLoc::None); - CondBr(bcx, cond_val, loop_bcx.llbb, next_bcx.llbb, DebugLoc::None); - - next_bcx -} - -pub fn iter_vec_raw<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - data_ptr: ValueRef, - unit_ty: Ty<'tcx>, - len: ValueRef, - f: F) - -> Block<'blk, 'tcx> where - F: FnOnce(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>, -{ - let _icx = push_ctxt("tvec::iter_vec_raw"); - let fcx = bcx.fcx; - - let vt = vec_types(bcx, unit_ty); - - if llsize_of_alloc(bcx.ccx(), vt.llunit_ty) == 0 { - // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) - iter_vec_loop(bcx, data_ptr, &vt, len, f) - } else { - // Calculate the last pointer address we want to handle. - let data_end_ptr = InBoundsGEP(bcx, data_ptr, &[len]); - - // Now perform the iteration. - let header_bcx = fcx.new_temp_block("iter_vec_loop_header"); - Br(bcx, header_bcx.llbb, DebugLoc::None); - let data_ptr = - Phi(header_bcx, val_ty(data_ptr), &[data_ptr], &[bcx.llbb]); - let not_yet_at_end = - ICmp(header_bcx, llvm::IntULT, data_ptr, data_end_ptr, DebugLoc::None); - let body_bcx = fcx.new_temp_block("iter_vec_loop_body"); - let next_bcx = fcx.new_temp_block("iter_vec_next"); - CondBr(header_bcx, not_yet_at_end, body_bcx.llbb, next_bcx.llbb, DebugLoc::None); - let body_bcx = f(body_bcx, data_ptr, unit_ty); - AddIncomingToPhi(data_ptr, InBoundsGEP(body_bcx, data_ptr, - &[C_int(bcx.ccx(), 1)]), - body_bcx.llbb); - Br(body_bcx, header_bcx.llbb, DebugLoc::None); - next_bcx - } -} diff --git a/src/librustc_trans/trans/type_of.rs b/src/librustc_trans/trans/type_of.rs deleted file mode 100644 index 8696bdd60e291..0000000000000 --- a/src/librustc_trans/trans/type_of.rs +++ /dev/null @@ -1,499 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(non_camel_case_types)] - -use middle::def_id::DefId; -use middle::infer; -use middle::subst; -use trans::adt; -use trans::common::*; -use trans::foreign; -use trans::machine; -use middle::ty::{self, Ty, TypeFoldable}; - -use trans::type_::Type; - -use syntax::abi; -use syntax::ast; - -// LLVM doesn't like objects that are too big. Issue #17913 -fn ensure_array_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - llet: Type, - size: machine::llsize, - scapegoat: Ty<'tcx>) { - let esz = machine::llsize_of_alloc(ccx, llet); - match esz.checked_mul(size) { - Some(n) if n < ccx.obj_size_bound() => {} - _ => { ccx.report_overbig_object(scapegoat) } - } -} - -pub fn arg_is_indirect<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - arg_ty: Ty<'tcx>) -> bool { - !type_is_immediate(ccx, arg_ty) && !type_is_fat_ptr(ccx.tcx(), arg_ty) -} - -pub fn return_uses_outptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - ty: Ty<'tcx>) -> bool { - arg_is_indirect(ccx, ty) -} - -pub fn type_of_explicit_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - arg_ty: Ty<'tcx>) -> Type { - let llty = arg_type_of(ccx, arg_ty); - if arg_is_indirect(ccx, arg_ty) { - llty.ptr_to() - } else { - llty - } -} - -/// Yields the types of the "real" arguments for a function using the `RustCall` -/// ABI by untupling the arguments of the function. -pub fn untuple_arguments<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - inputs: &[Ty<'tcx>]) - -> Vec> { - if inputs.is_empty() { - return Vec::new() - } - - let mut result = Vec::new(); - for (i, &arg_prior_to_tuple) in inputs.iter().enumerate() { - if i < inputs.len() - 1 { - result.push(arg_prior_to_tuple); - } - } - - match inputs[inputs.len() - 1].sty { - ty::TyTuple(ref tupled_arguments) => { - debug!("untuple_arguments(): untupling arguments"); - for &tupled_argument in tupled_arguments { - result.push(tupled_argument); - } - } - _ => { - ccx.tcx().sess.bug("argument to function with \"rust-call\" ABI \ - is neither a tuple nor unit") - } - } - - result -} - -pub fn type_of_rust_fn<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - llenvironment_type: Option, - sig: &ty::FnSig<'tcx>, - abi: abi::Abi) - -> Type -{ - debug!("type_of_rust_fn(sig={:?},abi={:?})", - sig, - abi); - - assert!(!sig.variadic); // rust fns are never variadic - - let mut atys: Vec = Vec::new(); - - // First, munge the inputs, if this has the `rust-call` ABI. - let inputs_temp; - let inputs = if abi == abi::RustCall { - inputs_temp = untuple_arguments(cx, &sig.inputs); - &inputs_temp - } else { - &sig.inputs - }; - - // Arg 0: Output pointer. - // (if the output type is non-immediate) - let lloutputtype = match sig.output { - ty::FnConverging(output) => { - let use_out_pointer = return_uses_outptr(cx, output); - let lloutputtype = arg_type_of(cx, output); - // Use the output as the actual return value if it's immediate. - if use_out_pointer { - atys.push(lloutputtype.ptr_to()); - Type::void(cx) - } else if return_type_is_void(cx, output) { - Type::void(cx) - } else { - lloutputtype - } - } - ty::FnDiverging => Type::void(cx) - }; - - // Arg 1: Environment - match llenvironment_type { - None => {} - Some(llenvironment_type) => atys.push(llenvironment_type), - } - - // ... then explicit args. - for input in inputs { - let arg_ty = type_of_explicit_arg(cx, input); - - if type_is_fat_ptr(cx.tcx(), input) { - atys.extend(arg_ty.field_types()); - } else { - atys.push(arg_ty); - } - } - - Type::func(&atys[..], &lloutputtype) -} - -// Given a function type and a count of ty params, construct an llvm type -pub fn type_of_fn_from_ty<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fty: Ty<'tcx>) -> Type { - match fty.sty { - ty::TyBareFn(_, ref f) => { - // FIXME(#19925) once fn item types are - // zero-sized, we'll need to do something here - if f.abi == abi::Rust || f.abi == abi::RustCall { - let sig = cx.tcx().erase_late_bound_regions(&f.sig); - let sig = infer::normalize_associated_type(cx.tcx(), &sig); - type_of_rust_fn(cx, None, &sig, f.abi) - } else { - foreign::lltype_for_foreign_fn(cx, fty) - } - } - _ => { - cx.sess().bug("type_of_fn_from_ty given non-closure, non-bare-fn") - } - } -} - -// A "sizing type" is an LLVM type, the size and alignment of which are -// guaranteed to be equivalent to what you would get out of `type_of()`. It's -// useful because: -// -// (1) It may be cheaper to compute the sizing type than the full type if all -// you're interested in is the size and/or alignment; -// -// (2) It won't make any recursive calls to determine the structure of the -// type behind pointers. This can help prevent infinite loops for -// recursive types. For example, enum types rely on this behavior. - -pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { - match cx.llsizingtypes().borrow().get(&t).cloned() { - Some(t) => return t, - None => () - } - - debug!("sizing_type_of {:?}", t); - let _recursion_lock = cx.enter_type_of(t); - - let llsizingty = match t.sty { - _ if !type_is_sized(cx.tcx(), t) => { - Type::struct_(cx, &[Type::i8p(cx), Type::i8p(cx)], false) - } - - ty::TyBool => Type::bool(cx), - ty::TyChar => Type::char(cx), - ty::TyInt(t) => Type::int_from_ty(cx, t), - ty::TyUint(t) => Type::uint_from_ty(cx, t), - ty::TyFloat(t) => Type::float_from_ty(cx, t), - - ty::TyBox(ty) | - ty::TyRef(_, ty::TypeAndMut{ty, ..}) | - ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => { - if type_is_sized(cx.tcx(), ty) { - Type::i8p(cx) - } else { - Type::struct_(cx, &[Type::i8p(cx), Type::i8p(cx)], false) - } - } - - ty::TyBareFn(..) => Type::i8p(cx), - - ty::TyArray(ty, size) => { - let llty = sizing_type_of(cx, ty); - let size = size as u64; - ensure_array_fits_in_address_space(cx, llty, size, t); - Type::array(&llty, size) - } - - ty::TyTuple(ref tys) if tys.is_empty() => { - Type::nil(cx) - } - - ty::TyTuple(..) | ty::TyEnum(..) | ty::TyClosure(..) => { - let repr = adt::represent_type(cx, t); - adt::sizing_type_of(cx, &*repr, false) - } - - ty::TyStruct(..) => { - if t.is_simd() { - let e = t.simd_type(cx.tcx()); - if !e.is_machine() { - cx.sess().fatal(&format!("monomorphising SIMD type `{}` with \ - a non-machine element type `{}`", - t, e)) - } - let llet = type_of(cx, e); - let n = t.simd_size(cx.tcx()) as u64; - ensure_array_fits_in_address_space(cx, llet, n, t); - Type::vector(&llet, n) - } else { - let repr = adt::represent_type(cx, t); - adt::sizing_type_of(cx, &*repr, false) - } - } - - ty::TyProjection(..) | ty::TyInfer(..) | ty::TyParam(..) | ty::TyError => { - cx.sess().bug(&format!("fictitious type {:?} in sizing_type_of()", - t)) - } - ty::TySlice(_) | ty::TyTrait(..) | ty::TyStr => unreachable!() - }; - - debug!("--> mapped t={:?} to llsizingty={}", - t, - cx.tn().type_to_string(llsizingty)); - - cx.llsizingtypes().borrow_mut().insert(t, llsizingty); - llsizingty -} - -pub fn foreign_arg_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { - if t.is_bool() { - Type::i1(cx) - } else { - type_of(cx, t) - } -} - -pub fn arg_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { - if t.is_bool() { - Type::i1(cx) - } else if type_is_immediate(cx, t) && type_of(cx, t).is_aggregate() { - // We want to pass small aggregates as immediate values, but using an aggregate LLVM type - // for this leads to bad optimizations, so its arg type is an appropriately sized integer - match machine::llsize_of_alloc(cx, sizing_type_of(cx, t)) { - 0 => type_of(cx, t), - n => Type::ix(cx, n * 8), - } - } else { - type_of(cx, t) - } -} - -/// Get the LLVM type corresponding to a Rust type, i.e. `middle::ty::Ty`. -/// This is the right LLVM type for an alloca containing a value of that type, -/// and the pointee of an Lvalue Datum (which is always a LLVM pointer). -/// For unsized types, the returned type is a fat pointer, thus the resulting -/// LLVM type for a `Trait` Lvalue is `{ i8*, void(i8*)** }*`, which is a double -/// indirection to the actual data, unlike a `i8` Lvalue, which is just `i8*`. -/// This is needed due to the treatment of immediate values, as a fat pointer -/// is too large for it to be placed in SSA value (by our rules). -/// For the raw type without far pointer indirection, see `in_memory_type_of`. -pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { - let ty = if !type_is_sized(cx.tcx(), ty) { - cx.tcx().mk_imm_ptr(ty) - } else { - ty - }; - in_memory_type_of(cx, ty) -} - -/// Get the LLVM type corresponding to a Rust type, i.e. `middle::ty::Ty`. -/// This is the right LLVM type for a field/array element of that type, -/// and is the same as `type_of` for all Sized types. -/// Unsized types, however, are represented by a "minimal unit", e.g. -/// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this -/// is useful for indexing slices, as `&[T]`'s data pointer is `T*`. -/// If the type is an unsized struct, the regular layout is generated, -/// with the inner-most trailing unsized field using the "minimal unit" -/// of that field's type - this is useful for taking the address of -/// that field and ensuring the struct has the right alignment. -/// For the LLVM type of a value as a whole, see `type_of`. -/// NB: If you update this, be sure to update `sizing_type_of()` as well. -pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { - // Check the cache. - match cx.lltypes().borrow().get(&t) { - Some(&llty) => return llty, - None => () - } - - debug!("type_of {:?}", t); - - assert!(!t.has_escaping_regions()); - - // Replace any typedef'd types with their equivalent non-typedef - // type. This ensures that all LLVM nominal types that contain - // Rust types are defined as the same LLVM types. If we don't do - // this then, e.g. `Option<{myfield: bool}>` would be a different - // type than `Option`. - let t_norm = cx.tcx().erase_regions(&t); - - if t != t_norm { - let llty = in_memory_type_of(cx, t_norm); - debug!("--> normalized {:?} {:?} to {:?} {:?} llty={}", - t, - t, - t_norm, - t_norm, - cx.tn().type_to_string(llty)); - cx.lltypes().borrow_mut().insert(t, llty); - return llty; - } - - let mut llty = match t.sty { - ty::TyBool => Type::bool(cx), - ty::TyChar => Type::char(cx), - ty::TyInt(t) => Type::int_from_ty(cx, t), - ty::TyUint(t) => Type::uint_from_ty(cx, t), - ty::TyFloat(t) => Type::float_from_ty(cx, t), - ty::TyEnum(def, ref substs) => { - // Only create the named struct, but don't fill it in. We - // fill it in *after* placing it into the type cache. This - // avoids creating more than one copy of the enum when one - // of the enum's variants refers to the enum itself. - let repr = adt::represent_type(cx, t); - let tps = substs.types.get_slice(subst::TypeSpace); - let name = llvm_type_name(cx, def.did, tps); - adt::incomplete_type_of(cx, &*repr, &name[..]) - } - ty::TyClosure(..) => { - // Only create the named struct, but don't fill it in. We - // fill it in *after* placing it into the type cache. - let repr = adt::represent_type(cx, t); - // Unboxed closures can have substitutions in all spaces - // inherited from their environment, so we use entire - // contents of the VecPerParamSpace to construct the llvm - // name - adt::incomplete_type_of(cx, &*repr, "closure") - } - - ty::TyBox(ty) | - ty::TyRef(_, ty::TypeAndMut{ty, ..}) | - ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => { - if !type_is_sized(cx.tcx(), ty) { - if let ty::TyStr = ty.sty { - // This means we get a nicer name in the output (str is always - // unsized). - cx.tn().find_type("str_slice").unwrap() - } else { - let ptr_ty = in_memory_type_of(cx, ty).ptr_to(); - let unsized_part = cx.tcx().struct_tail(ty); - let info_ty = match unsized_part.sty { - ty::TyStr | ty::TyArray(..) | ty::TySlice(_) => { - Type::uint_from_ty(cx, ast::TyUs) - } - ty::TyTrait(_) => Type::vtable_ptr(cx), - _ => panic!("Unexpected type returned from \ - struct_tail: {:?} for ty={:?}", - unsized_part, ty) - }; - Type::struct_(cx, &[ptr_ty, info_ty], false) - } - } else { - in_memory_type_of(cx, ty).ptr_to() - } - } - - ty::TyArray(ty, size) => { - let size = size as u64; - // we must use `sizing_type_of` here as the type may - // not be fully initialized. - let szty = sizing_type_of(cx, ty); - ensure_array_fits_in_address_space(cx, szty, size, t); - - let llty = in_memory_type_of(cx, ty); - Type::array(&llty, size) - } - - // Unsized slice types (and str) have the type of their element, and - // traits have the type of u8. This is so that the data pointer inside - // fat pointers is of the right type (e.g. for array accesses), even - // when taking the address of an unsized field in a struct. - ty::TySlice(ty) => in_memory_type_of(cx, ty), - ty::TyStr | ty::TyTrait(..) => Type::i8(cx), - - ty::TyBareFn(..) => { - type_of_fn_from_ty(cx, t).ptr_to() - } - ty::TyTuple(ref tys) if tys.is_empty() => Type::nil(cx), - ty::TyTuple(..) => { - let repr = adt::represent_type(cx, t); - adt::type_of(cx, &*repr) - } - ty::TyStruct(def, ref substs) => { - if t.is_simd() { - let e = t.simd_type(cx.tcx()); - if !e.is_machine() { - cx.sess().fatal(&format!("monomorphising SIMD type `{}` with \ - a non-machine element type `{}`", - t, e)) - } - let llet = in_memory_type_of(cx, e); - let n = t.simd_size(cx.tcx()) as u64; - ensure_array_fits_in_address_space(cx, llet, n, t); - Type::vector(&llet, n) - } else { - // Only create the named struct, but don't fill it in. We fill it - // in *after* placing it into the type cache. This prevents - // infinite recursion with recursive struct types. - let repr = adt::represent_type(cx, t); - let tps = substs.types.get_slice(subst::TypeSpace); - let name = llvm_type_name(cx, def.did, tps); - adt::incomplete_type_of(cx, &*repr, &name[..]) - } - } - - ty::TyInfer(..) => cx.sess().bug("type_of with TyInfer"), - ty::TyProjection(..) => cx.sess().bug("type_of with TyProjection"), - ty::TyParam(..) => cx.sess().bug("type_of with ty_param"), - ty::TyError => cx.sess().bug("type_of with TyError"), - }; - - debug!("--> mapped t={:?} to llty={}", - t, - cx.tn().type_to_string(llty)); - - cx.lltypes().borrow_mut().insert(t, llty); - - // If this was an enum or struct, fill in the type now. - match t.sty { - ty::TyEnum(..) | ty::TyStruct(..) | ty::TyClosure(..) - if !t.is_simd() => { - let repr = adt::represent_type(cx, t); - adt::finish_type_of(cx, &*repr, &mut llty); - } - _ => () - } - - llty -} - -pub fn align_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) - -> machine::llalign { - let llty = sizing_type_of(cx, t); - machine::llalign_of_min(cx, llty) -} - -fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - did: DefId, - tps: &[Ty<'tcx>]) - -> String { - let base = cx.tcx().item_path_str(did); - let strings: Vec = tps.iter().map(|t| t.to_string()).collect(); - let tstr = if strings.is_empty() { - base - } else { - format!("{}<{}>", base, strings.join(", ")) - }; - - if did.krate == 0 { - tstr - } else { - format!("{}.{}", did.krate, tstr) - } -} diff --git a/src/librustc_trans/trans_item.rs b/src/librustc_trans/trans_item.rs new file mode 100644 index 0000000000000..322c5eb6e182a --- /dev/null +++ b/src/librustc_trans/trans_item.rs @@ -0,0 +1,596 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Walks the crate looking for items/impl-items/trait-items that have +//! either a `rustc_symbol_name` or `rustc_item_path` attribute and +//! generates an error giving, respectively, the symbol name or +//! item-path. This is used for unit testing the code that generates +//! paths etc in all kinds of annoying scenarios. + +use attributes; +use base; +use consts; +use context::{CrateContext, SharedCrateContext}; +use common; +use declare; +use glue::DropGlueKind; +use llvm; +use monomorphize::{self, Instance}; +use rustc::dep_graph::DepNode; +use rustc::hir; +use rustc::hir::def_id::DefId; +use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::subst::Substs; +use rustc_const_eval::fatal_const_eval_err; +use syntax::ast::{self, NodeId}; +use syntax::attr; +use type_of; +use glue; +use abi::{Abi, FnType}; +use back::symbol_names; +use std::fmt::Write; +use std::iter; + +#[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)] +pub enum TransItem<'tcx> { + DropGlue(DropGlueKind<'tcx>), + Fn(Instance<'tcx>), + Static(NodeId) +} + +impl<'a, 'tcx> TransItem<'tcx> { + + pub fn define(&self, ccx: &CrateContext<'a, 'tcx>) { + debug!("BEGIN IMPLEMENTING '{} ({})' in cgu {}", + self.to_string(ccx.tcx()), + self.to_raw_string(), + ccx.codegen_unit().name()); + + // (*) This code executes in the context of a dep-node for the + // entire CGU. In some cases, we introduce dep-nodes for + // particular items that we are translating (these nodes will + // have read edges coming into the CGU node). These smaller + // nodes are not needed for correctness -- we always + // invalidate an entire CGU at a time -- but they enable + // finer-grained testing, since you can write tests that check + // that the incoming edges to a particular fn are from a + // particular set. + + match *self { + TransItem::Static(node_id) => { + let def_id = ccx.tcx().map.local_def_id(node_id); + let _task = ccx.tcx().dep_graph.in_task(DepNode::TransCrateItem(def_id)); // (*) + let item = ccx.tcx().map.expect_item(node_id); + if let hir::ItemStatic(_, m, _) = item.node { + match consts::trans_static(&ccx, m, item.id, &item.attrs) { + Ok(_) => { /* Cool, everything's alright. */ }, + Err(err) => { + // FIXME: shouldn't this be a `span_err`? + fatal_const_eval_err( + ccx.tcx(), &err, item.span, "static"); + } + }; + } else { + span_bug!(item.span, "Mismatch between hir::Item type and TransItem type") + } + } + TransItem::Fn(instance) => { + let _task = ccx.tcx().dep_graph.in_task( + DepNode::TransCrateItem(instance.def)); // (*) + + base::trans_instance(&ccx, instance); + } + TransItem::DropGlue(dg) => { + glue::implement_drop_glue(&ccx, dg); + } + } + + debug!("END IMPLEMENTING '{} ({})' in cgu {}", + self.to_string(ccx.tcx()), + self.to_raw_string(), + ccx.codegen_unit().name()); + } + + pub fn predefine(&self, + ccx: &CrateContext<'a, 'tcx>, + linkage: llvm::Linkage) { + debug!("BEGIN PREDEFINING '{} ({})' in cgu {}", + self.to_string(ccx.tcx()), + self.to_raw_string(), + ccx.codegen_unit().name()); + + let symbol_name = ccx.symbol_map() + .get_or_compute(ccx.shared(), *self); + + debug!("symbol {}", &symbol_name); + + match *self { + TransItem::Static(node_id) => { + TransItem::predefine_static(ccx, node_id, linkage, &symbol_name); + } + TransItem::Fn(instance) => { + TransItem::predefine_fn(ccx, instance, linkage, &symbol_name); + } + TransItem::DropGlue(dg) => { + TransItem::predefine_drop_glue(ccx, dg, linkage, &symbol_name); + } + } + + debug!("END PREDEFINING '{} ({})' in cgu {}", + self.to_string(ccx.tcx()), + self.to_raw_string(), + ccx.codegen_unit().name()); + } + + fn predefine_static(ccx: &CrateContext<'a, 'tcx>, + node_id: ast::NodeId, + linkage: llvm::Linkage, + symbol_name: &str) { + let def_id = ccx.tcx().map.local_def_id(node_id); + let ty = ccx.tcx().item_type(def_id); + let llty = type_of::type_of(ccx, ty); + + let g = declare::define_global(ccx, symbol_name, llty).unwrap_or_else(|| { + ccx.sess().span_fatal(ccx.tcx().map.span(node_id), + &format!("symbol `{}` is already defined", symbol_name)) + }); + + unsafe { llvm::LLVMRustSetLinkage(g, linkage) }; + + let instance = Instance::mono(ccx.shared(), def_id); + ccx.instances().borrow_mut().insert(instance, g); + ccx.statics().borrow_mut().insert(g, def_id); + } + + fn predefine_fn(ccx: &CrateContext<'a, 'tcx>, + instance: Instance<'tcx>, + linkage: llvm::Linkage, + symbol_name: &str) { + assert!(!instance.substs.needs_infer() && + !instance.substs.has_param_types()); + + let item_ty = ccx.tcx().item_type(instance.def); + let item_ty = ccx.tcx().erase_regions(&item_ty); + let mono_ty = monomorphize::apply_param_substs(ccx.shared(), instance.substs, &item_ty); + + let attrs = ccx.tcx().get_attrs(instance.def); + let lldecl = declare::declare_fn(ccx, symbol_name, mono_ty); + unsafe { llvm::LLVMRustSetLinkage(lldecl, linkage) }; + base::set_link_section(ccx, lldecl, &attrs); + if linkage == llvm::Linkage::LinkOnceODRLinkage || + linkage == llvm::Linkage::WeakODRLinkage { + llvm::SetUniqueComdat(ccx.llmod(), lldecl); + } + + if let ty::TyClosure(..) = mono_ty.sty { + // set an inline hint for all closures + attributes::inline(lldecl, attributes::InlineAttr::Hint); + } + + attributes::from_fn_attrs(ccx, &attrs, lldecl); + + ccx.instances().borrow_mut().insert(instance, lldecl); + } + + fn predefine_drop_glue(ccx: &CrateContext<'a, 'tcx>, + dg: glue::DropGlueKind<'tcx>, + linkage: llvm::Linkage, + symbol_name: &str) { + let tcx = ccx.tcx(); + assert_eq!(dg.ty(), glue::get_drop_glue_type(tcx, dg.ty())); + let t = dg.ty(); + + let sig = ty::FnSig { + inputs: vec![tcx.mk_mut_ptr(tcx.types.i8)], + output: tcx.mk_nil(), + variadic: false, + }; + + // Create a FnType for fn(*mut i8) and substitute the real type in + // later - that prevents FnType from splitting fat pointers up. + let mut fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]); + fn_ty.args[0].original_ty = type_of::type_of(ccx, t).ptr_to(); + let llfnty = fn_ty.llvm_type(ccx); + + assert!(declare::get_defined_value(ccx, symbol_name).is_none()); + let llfn = declare::declare_cfn(ccx, symbol_name, llfnty); + unsafe { llvm::LLVMRustSetLinkage(llfn, linkage) }; + if linkage == llvm::Linkage::LinkOnceODRLinkage || + linkage == llvm::Linkage::WeakODRLinkage { + llvm::SetUniqueComdat(ccx.llmod(), llfn); + } + attributes::set_frame_pointer_elimination(ccx, llfn); + ccx.drop_glues().borrow_mut().insert(dg, (llfn, fn_ty)); + } + + pub fn compute_symbol_name(&self, + scx: &SharedCrateContext<'a, 'tcx>) -> String { + match *self { + TransItem::Fn(instance) => instance.symbol_name(scx), + TransItem::Static(node_id) => { + let def_id = scx.tcx().map.local_def_id(node_id); + Instance::mono(scx, def_id).symbol_name(scx) + } + TransItem::DropGlue(dg) => { + let prefix = match dg { + DropGlueKind::Ty(_) => "drop", + DropGlueKind::TyContents(_) => "drop_contents", + }; + symbol_names::exported_name_from_type_and_prefix(scx, dg.ty(), prefix) + } + } + } + + pub fn is_from_extern_crate(&self) -> bool { + match *self { + TransItem::Fn(ref instance) => !instance.def.is_local(), + TransItem::DropGlue(..) | + TransItem::Static(..) => false, + } + } + + /// True if the translation item should only be translated to LLVM IR if + /// it is referenced somewhere (like inline functions, for example). + pub fn is_instantiated_only_on_demand(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> bool { + if self.explicit_linkage(tcx).is_some() { + return false; + } + + match *self { + TransItem::Fn(ref instance) => { + !instance.def.is_local() || + instance.substs.types().next().is_some() || + common::is_closure(tcx, instance.def) || + attr::requests_inline(&tcx.get_attrs(instance.def)[..]) + } + TransItem::DropGlue(..) => true, + TransItem::Static(..) => false, + } + } + + pub fn is_generic_fn(&self) -> bool { + match *self { + TransItem::Fn(ref instance) => { + instance.substs.types().next().is_some() + } + TransItem::DropGlue(..) | + TransItem::Static(..) => false, + } + } + + /// Returns true if there has to be a local copy of this TransItem in every + /// codegen unit that references it (as with inlined functions, for example) + pub fn needs_local_copy(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> bool { + // Currently everything that is instantiated only on demand is done so + // with "internal" linkage, so we need a copy to be present in every + // codegen unit. + // This is coincidental: We could also instantiate something only if it + // is referenced (e.g. a regular, private function) but place it in its + // own codegen unit with "external" linkage. + self.is_instantiated_only_on_demand(tcx) + } + + pub fn explicit_linkage(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Option { + let def_id = match *self { + TransItem::Fn(ref instance) => instance.def, + TransItem::Static(node_id) => tcx.map.local_def_id(node_id), + TransItem::DropGlue(..) => return None, + }; + + let attributes = tcx.get_attrs(def_id); + if let Some(name) = attr::first_attr_value_str_by_name(&attributes, "linkage") { + if let Some(linkage) = base::llvm_linkage_by_name(&name.as_str()) { + Some(linkage) + } else { + let span = tcx.map.span_if_local(def_id); + if let Some(span) = span { + tcx.sess.span_fatal(span, "invalid linkage specified") + } else { + tcx.sess.fatal(&format!("invalid linkage specified: {}", name)) + } + } + } else { + None + } + } + + pub fn to_string(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> String { + let hir_map = &tcx.map; + + return match *self { + TransItem::DropGlue(dg) => { + let mut s = String::with_capacity(32); + match dg { + DropGlueKind::Ty(_) => s.push_str("drop-glue "), + DropGlueKind::TyContents(_) => s.push_str("drop-glue-contents "), + }; + let printer = DefPathBasedNames::new(tcx, false, false); + printer.push_type_name(dg.ty(), &mut s); + s + } + TransItem::Fn(instance) => { + to_string_internal(tcx, "fn ", instance) + }, + TransItem::Static(node_id) => { + let def_id = hir_map.local_def_id(node_id); + let instance = Instance::new(def_id, tcx.intern_substs(&[])); + to_string_internal(tcx, "static ", instance) + }, + }; + + fn to_string_internal<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + prefix: &str, + instance: Instance<'tcx>) + -> String { + let mut result = String::with_capacity(32); + result.push_str(prefix); + let printer = DefPathBasedNames::new(tcx, false, false); + printer.push_instance_as_string(instance, &mut result); + result + } + } + + pub fn to_raw_string(&self) -> String { + match *self { + TransItem::DropGlue(dg) => { + let prefix = match dg { + DropGlueKind::Ty(_) => "Ty", + DropGlueKind::TyContents(_) => "TyContents", + }; + format!("DropGlue({}: {})", prefix, dg.ty() as *const _ as usize) + } + TransItem::Fn(instance) => { + format!("Fn({:?}, {})", + instance.def, + instance.substs.as_ptr() as usize) + } + TransItem::Static(id) => { + format!("Static({:?})", id) + } + } + } +} + + +//=----------------------------------------------------------------------------- +// TransItem String Keys +//=----------------------------------------------------------------------------- + +// The code below allows for producing a unique string key for a trans item. +// These keys are used by the handwritten auto-tests, so they need to be +// predictable and human-readable. +// +// Note: A lot of this could looks very similar to what's already in the +// ppaux module. It would be good to refactor things so we only have one +// parameterizable implementation for printing types. + +/// Same as `unique_type_name()` but with the result pushed onto the given +/// `output` parameter. +pub struct DefPathBasedNames<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + omit_disambiguators: bool, + omit_local_crate_name: bool, +} + +impl<'a, 'tcx> DefPathBasedNames<'a, 'tcx> { + pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, + omit_disambiguators: bool, + omit_local_crate_name: bool) + -> Self { + DefPathBasedNames { + tcx: tcx, + omit_disambiguators: omit_disambiguators, + omit_local_crate_name: omit_local_crate_name, + } + } + + pub fn push_type_name(&self, t: Ty<'tcx>, output: &mut String) { + match t.sty { + ty::TyBool => output.push_str("bool"), + ty::TyChar => output.push_str("char"), + ty::TyStr => output.push_str("str"), + ty::TyNever => output.push_str("!"), + ty::TyInt(ast::IntTy::Is) => output.push_str("isize"), + ty::TyInt(ast::IntTy::I8) => output.push_str("i8"), + ty::TyInt(ast::IntTy::I16) => output.push_str("i16"), + ty::TyInt(ast::IntTy::I32) => output.push_str("i32"), + ty::TyInt(ast::IntTy::I64) => output.push_str("i64"), + ty::TyUint(ast::UintTy::Us) => output.push_str("usize"), + ty::TyUint(ast::UintTy::U8) => output.push_str("u8"), + ty::TyUint(ast::UintTy::U16) => output.push_str("u16"), + ty::TyUint(ast::UintTy::U32) => output.push_str("u32"), + ty::TyUint(ast::UintTy::U64) => output.push_str("u64"), + ty::TyFloat(ast::FloatTy::F32) => output.push_str("f32"), + ty::TyFloat(ast::FloatTy::F64) => output.push_str("f64"), + ty::TyAdt(adt_def, substs) => { + self.push_def_path(adt_def.did, output); + self.push_type_params(substs, iter::empty(), output); + }, + ty::TyTuple(component_types) => { + output.push('('); + for &component_type in component_types { + self.push_type_name(component_type, output); + output.push_str(", "); + } + if !component_types.is_empty() { + output.pop(); + output.pop(); + } + output.push(')'); + }, + ty::TyBox(inner_type) => { + output.push_str("Box<"); + self.push_type_name(inner_type, output); + output.push('>'); + }, + ty::TyRawPtr(ty::TypeAndMut { ty: inner_type, mutbl } ) => { + output.push('*'); + match mutbl { + hir::MutImmutable => output.push_str("const "), + hir::MutMutable => output.push_str("mut "), + } + + self.push_type_name(inner_type, output); + }, + ty::TyRef(_, ty::TypeAndMut { ty: inner_type, mutbl }) => { + output.push('&'); + if mutbl == hir::MutMutable { + output.push_str("mut "); + } + + self.push_type_name(inner_type, output); + }, + ty::TyArray(inner_type, len) => { + output.push('['); + self.push_type_name(inner_type, output); + write!(output, "; {}", len).unwrap(); + output.push(']'); + }, + ty::TySlice(inner_type) => { + output.push('['); + self.push_type_name(inner_type, output); + output.push(']'); + }, + ty::TyDynamic(ref trait_data, ..) => { + if let Some(principal) = trait_data.principal() { + self.push_def_path(principal.def_id(), output); + self.push_type_params(principal.skip_binder().substs, + trait_data.projection_bounds(), + output); + } + }, + ty::TyFnDef(.., &ty::BareFnTy{ unsafety, abi, ref sig } ) | + ty::TyFnPtr(&ty::BareFnTy{ unsafety, abi, ref sig } ) => { + if unsafety == hir::Unsafety::Unsafe { + output.push_str("unsafe "); + } + + if abi != ::abi::Abi::Rust { + output.push_str("extern \""); + output.push_str(abi.name()); + output.push_str("\" "); + } + + output.push_str("fn("); + + let ty::FnSig { + inputs: sig_inputs, + output: sig_output, + variadic: sig_variadic + } = self.tcx.erase_late_bound_regions_and_normalize(sig); + + if !sig_inputs.is_empty() { + for ¶meter_type in &sig_inputs { + self.push_type_name(parameter_type, output); + output.push_str(", "); + } + output.pop(); + output.pop(); + } + + if sig_variadic { + if !sig_inputs.is_empty() { + output.push_str(", ..."); + } else { + output.push_str("..."); + } + } + + output.push(')'); + + if !sig_output.is_nil() { + output.push_str(" -> "); + self.push_type_name(sig_output, output); + } + }, + ty::TyClosure(def_id, ref closure_substs) => { + self.push_def_path(def_id, output); + let generics = self.tcx.item_generics(self.tcx.closure_base_def_id(def_id)); + let substs = closure_substs.substs.truncate_to(self.tcx, generics); + self.push_type_params(substs, iter::empty(), output); + } + ty::TyError | + ty::TyInfer(_) | + ty::TyProjection(..) | + ty::TyParam(_) | + ty::TyAnon(..) => { + bug!("DefPathBasedNames: Trying to create type name for \ + unexpected type: {:?}", t); + } + } + } + + pub fn push_def_path(&self, + def_id: DefId, + output: &mut String) { + let def_path = self.tcx.def_path(def_id); + + // some_crate:: + if !(self.omit_local_crate_name && def_id.is_local()) { + output.push_str(&self.tcx.crate_name(def_path.krate).as_str()); + output.push_str("::"); + } + + // foo::bar::ItemName:: + for part in self.tcx.def_path(def_id).data { + if self.omit_disambiguators { + write!(output, "{}::", part.data.as_interned_str()).unwrap(); + } else { + write!(output, "{}[{}]::", + part.data.as_interned_str(), + part.disambiguator).unwrap(); + } + } + + // remove final "::" + output.pop(); + output.pop(); + } + + fn push_type_params(&self, + substs: &Substs<'tcx>, + projections: I, + output: &mut String) + where I: Iterator> + { + let mut projections = projections.peekable(); + if substs.types().next().is_none() && projections.peek().is_none() { + return; + } + + output.push('<'); + + for type_parameter in substs.types() { + self.push_type_name(type_parameter, output); + output.push_str(", "); + } + + for projection in projections { + let projection = projection.skip_binder(); + let name = &projection.item_name.as_str(); + output.push_str(name); + output.push_str("="); + self.push_type_name(projection.ty, output); + output.push_str(", "); + } + + output.pop(); + output.pop(); + + output.push('>'); + } + + pub fn push_instance_as_string(&self, + instance: Instance<'tcx>, + output: &mut String) { + self.push_def_path(instance.def, output); + self.push_type_params(instance.substs, iter::empty(), output); + } +} diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs new file mode 100644 index 0000000000000..cf897fc5a1518 --- /dev/null +++ b/src/librustc_trans/tvec.rs @@ -0,0 +1,63 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_camel_case_types)] + +use llvm; +use llvm::ValueRef; +use base::*; +use build::*; +use common::*; +use debuginfo::DebugLoc; +use rustc::ty::Ty; + +pub fn slice_for_each<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, + data_ptr: ValueRef, + unit_ty: Ty<'tcx>, + len: ValueRef, + f: F) + -> Block<'blk, 'tcx> where + F: FnOnce(Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>, +{ + let _icx = push_ctxt("tvec::slice_for_each"); + let fcx = bcx.fcx; + + // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) + let zst = type_is_zero_size(bcx.ccx(), unit_ty); + let add = |bcx, a, b| if zst { + Add(bcx, a, b, DebugLoc::None) + } else { + InBoundsGEP(bcx, a, &[b]) + }; + + let header_bcx = fcx.new_block("slice_loop_header"); + let body_bcx = fcx.new_block("slice_loop_body"); + let next_bcx = fcx.new_block("slice_loop_next"); + + let start = if zst { + C_uint(bcx.ccx(), 0 as usize) + } else { + data_ptr + }; + let end = add(bcx, start, len); + + Br(bcx, header_bcx.llbb, DebugLoc::None); + let current = Phi(header_bcx, val_ty(start), &[start], &[bcx.llbb]); + + let keep_going = + ICmp(header_bcx, llvm::IntNE, current, end, DebugLoc::None); + CondBr(header_bcx, keep_going, body_bcx.llbb, next_bcx.llbb, DebugLoc::None); + + let body_bcx = f(body_bcx, if zst { data_ptr } else { current }); + let next = add(body_bcx, current, C_uint(bcx.ccx(), 1usize)); + AddIncomingToPhi(current, next, body_bcx.llbb); + Br(body_bcx, header_bcx.llbb, DebugLoc::None); + next_bcx +} diff --git a/src/librustc_trans/trans/type_.rs b/src/librustc_trans/type_.rs similarity index 75% rename from src/librustc_trans/trans/type_.rs rename to src/librustc_trans/type_.rs index c635d1ba233fd..2b2776acab869 100644 --- a/src/librustc_trans/trans/type_.rs +++ b/src/librustc_trans/type_.rs @@ -11,27 +11,35 @@ #![allow(non_upper_case_globals)] use llvm; -use llvm::{TypeRef, Bool, False, True, TypeKind, ValueRef}; +use llvm::{TypeRef, Bool, False, True, TypeKind}; use llvm::{Float, Double, X86_FP80, PPC_FP128, FP128}; -use trans::context::CrateContext; -use util::nodemap::FnvHashMap; +use context::CrateContext; use syntax::ast; +use rustc::ty::layout; use std::ffi::CString; +use std::fmt; use std::mem; use std::ptr; -use std::cell::RefCell; use libc::c_uint; -#[derive(Clone, Copy, PartialEq, Debug)] +#[derive(Clone, Copy, PartialEq)] #[repr(C)] pub struct Type { rf: TypeRef } +impl fmt::Debug for Type { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(&llvm::build_string(|s| unsafe { + llvm::LLVMRustWriteTypeToString(self.to_ref(), s); + }).expect("non-UTF8 type description from LLVM")) + } +} + macro_rules! ty { ($e:expr) => ( Type::from_ref(unsafe { $e })) } @@ -50,12 +58,6 @@ impl Type { self.rf } - pub fn to_string(self: Type) -> String { - llvm::build_string(|s| unsafe { - llvm::LLVMWriteTypeToString(self.to_ref(), s); - }).expect("non-UTF8 type description from LLVM") - } - pub fn to_ref_slice(slice: &[Type]) -> &[TypeRef] { unsafe { mem::transmute(slice) } } @@ -69,7 +71,7 @@ impl Type { } pub fn metadata(ccx: &CrateContext) -> Type { - ty!(llvm::LLVMMetadataTypeInContext(ccx.llcx())) + ty!(llvm::LLVMRustMetadataTypeInContext(ccx.llcx())) } pub fn i1(ccx: &CrateContext) -> Type { @@ -119,36 +121,37 @@ impl Type { pub fn int(ccx: &CrateContext) -> Type { match &ccx.tcx().sess.target.target.target_pointer_width[..] { + "16" => Type::i16(ccx), "32" => Type::i32(ccx), "64" => Type::i64(ccx), - tws => panic!("Unsupported target word size for int: {}", tws), + tws => bug!("Unsupported target word size for int: {}", tws), } } pub fn int_from_ty(ccx: &CrateContext, t: ast::IntTy) -> Type { match t { - ast::TyIs => ccx.int_type(), - ast::TyI8 => Type::i8(ccx), - ast::TyI16 => Type::i16(ccx), - ast::TyI32 => Type::i32(ccx), - ast::TyI64 => Type::i64(ccx) + ast::IntTy::Is => ccx.int_type(), + ast::IntTy::I8 => Type::i8(ccx), + ast::IntTy::I16 => Type::i16(ccx), + ast::IntTy::I32 => Type::i32(ccx), + ast::IntTy::I64 => Type::i64(ccx) } } pub fn uint_from_ty(ccx: &CrateContext, t: ast::UintTy) -> Type { match t { - ast::TyUs => ccx.int_type(), - ast::TyU8 => Type::i8(ccx), - ast::TyU16 => Type::i16(ccx), - ast::TyU32 => Type::i32(ccx), - ast::TyU64 => Type::i64(ccx) + ast::UintTy::Us => ccx.int_type(), + ast::UintTy::U8 => Type::i8(ccx), + ast::UintTy::U16 => Type::i16(ccx), + ast::UintTy::U32 => Type::i32(ccx), + ast::UintTy::U64 => Type::i64(ccx) } } pub fn float_from_ty(ccx: &CrateContext, t: ast::FloatTy) -> Type { match t { - ast::TyF32 => Type::f32(ccx), - ast::TyF64 => Type::f64(ccx), + ast::FloatTy::F32 => Type::f32(ccx), + ast::FloatTy::F64 => Type::f64(ccx), } } @@ -180,10 +183,6 @@ impl Type { Type::struct_(ccx, &[], false) } - pub fn glue_fn(ccx: &CrateContext, t: Type) -> Type { - Type::func(&[t], &Type::void(ccx)) - } - pub fn array(ty: &Type, len: u64) -> Type { ty!(llvm::LLVMRustArrayType(ty.to_ref(), len)) } @@ -203,12 +202,12 @@ impl Type { } pub fn vtable_ptr(ccx: &CrateContext) -> Type { - Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to().ptr_to() + Type::func(&[Type::i8p(ccx)], &Type::void(ccx)).ptr_to().ptr_to() } pub fn kind(&self) -> TypeKind { unsafe { - llvm::LLVMGetTypeKind(self.to_ref()) + llvm::LLVMRustGetTypeKind(self.to_ref()) } } @@ -289,7 +288,7 @@ impl Type { Double => 64, X86_FP80 => 80, FP128 | PPC_FP128 => 128, - _ => panic!("llvm_float_width called on a non-float type") + _ => bug!("llvm_float_width called on a non-float type") } } @@ -299,43 +298,24 @@ impl Type { llvm::LLVMGetIntTypeWidth(self.to_ref()) as u64 } } -} - -/* Memory-managed object interface to type handles. */ - -pub struct TypeNames { - named_types: RefCell>, -} - -impl TypeNames { - pub fn new() -> TypeNames { - TypeNames { - named_types: RefCell::new(FnvHashMap()) + pub fn from_integer(cx: &CrateContext, i: layout::Integer) -> Type { + use rustc::ty::layout::Integer::*; + match i { + I1 => Type::i1(cx), + I8 => Type::i8(cx), + I16 => Type::i16(cx), + I32 => Type::i32(cx), + I64 => Type::i64(cx), } } - pub fn associate_type(&self, s: &str, t: &Type) { - assert!(self.named_types.borrow_mut().insert(s.to_string(), - t.to_ref()).is_none()); - } - - pub fn find_type(&self, s: &str) -> Option { - self.named_types.borrow().get(s).map(|x| Type::from_ref(*x)) - } - - pub fn type_to_string(&self, ty: Type) -> String { - ty.to_string() - } - - pub fn types_to_str(&self, tys: &[Type]) -> String { - let strs: Vec = tys.iter().map(|t| self.type_to_string(*t)).collect(); - format!("[{}]", strs.join(",")) - } - - pub fn val_to_string(&self, val: ValueRef) -> String { - llvm::build_string(|s| unsafe { - llvm::LLVMWriteValueToString(val, s); - }).expect("nun-UTF8 value description from LLVM") + pub fn from_primitive(ccx: &CrateContext, p: layout::Primitive) -> Type { + match p { + layout::Int(i) => Type::from_integer(ccx, i), + layout::F32 => Type::f32(ccx), + layout::F64 => Type::f64(ccx), + layout::Pointer => bug!("It is not possible to convert Pointer directly to Type.") + } } } diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs new file mode 100644 index 0000000000000..22c405fe254a6 --- /dev/null +++ b/src/librustc_trans/type_of.rs @@ -0,0 +1,325 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_camel_case_types)] + +use abi::FnType; +use adt; +use common::*; +use machine; +use rustc::ty::{self, Ty, TypeFoldable}; +use trans_item::DefPathBasedNames; +use type_::Type; + +use syntax::ast; + + +// A "sizing type" is an LLVM type, the size and alignment of which are +// guaranteed to be equivalent to what you would get out of `type_of()`. It's +// useful because: +// +// (1) It may be cheaper to compute the sizing type than the full type if all +// you're interested in is the size and/or alignment; +// +// (2) It won't make any recursive calls to determine the structure of the +// type behind pointers. This can help prevent infinite loops for +// recursive types. For example, enum types rely on this behavior. + +pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { + if let Some(t) = cx.llsizingtypes().borrow().get(&t).cloned() { + return t; + } + + debug!("sizing_type_of {:?}", t); + let _recursion_lock = cx.enter_type_of(t); + + let llsizingty = match t.sty { + _ if !type_is_sized(cx.tcx(), t) => { + Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, t)], false) + } + + ty::TyBool => Type::bool(cx), + ty::TyChar => Type::char(cx), + ty::TyInt(t) => Type::int_from_ty(cx, t), + ty::TyUint(t) => Type::uint_from_ty(cx, t), + ty::TyFloat(t) => Type::float_from_ty(cx, t), + ty::TyNever => Type::nil(cx), + + ty::TyBox(ty) | + ty::TyRef(_, ty::TypeAndMut{ty, ..}) | + ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => { + if type_is_sized(cx.tcx(), ty) { + Type::i8p(cx) + } else { + Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, ty)], false) + } + } + + ty::TyFnDef(..) => Type::nil(cx), + ty::TyFnPtr(_) => Type::i8p(cx), + + ty::TyArray(ty, size) => { + let llty = sizing_type_of(cx, ty); + let size = size as u64; + Type::array(&llty, size) + } + + ty::TyTuple(ref tys) if tys.is_empty() => { + Type::nil(cx) + } + + ty::TyAdt(..) if t.is_simd() => { + let e = t.simd_type(cx.tcx()); + if !e.is_machine() { + cx.sess().fatal(&format!("monomorphising SIMD type `{}` with \ + a non-machine element type `{}`", + t, e)) + } + let llet = type_of(cx, e); + let n = t.simd_size(cx.tcx()) as u64; + Type::vector(&llet, n) + } + + ty::TyTuple(..) | ty::TyAdt(..) | ty::TyClosure(..) => { + adt::sizing_type_of(cx, t, false) + } + + ty::TyProjection(..) | ty::TyInfer(..) | ty::TyParam(..) | + ty::TyAnon(..) | ty::TyError => { + bug!("fictitious type {:?} in sizing_type_of()", t) + } + ty::TySlice(_) | ty::TyDynamic(..) | ty::TyStr => bug!() + }; + + debug!("--> mapped t={:?} to llsizingty={:?}", t, llsizingty); + + cx.llsizingtypes().borrow_mut().insert(t, llsizingty); + + // FIXME(eddyb) Temporary sanity check for ty::layout. + let layout = cx.layout_of(t); + if !type_is_sized(cx.tcx(), t) { + if !layout.is_unsized() { + bug!("layout should be unsized for type `{}` / {:#?}", + t, layout); + } + + // Unsized types get turned into a fat pointer for LLVM. + return llsizingty; + } + + let r = layout.size(&cx.tcx().data_layout).bytes(); + let l = machine::llsize_of_alloc(cx, llsizingty); + if r != l { + bug!("size differs (rustc: {}, llvm: {}) for type `{}` / {:#?}", + r, l, t, layout); + } + + let r = layout.align(&cx.tcx().data_layout).abi(); + let l = machine::llalign_of_min(cx, llsizingty) as u64; + if r != l { + bug!("align differs (rustc: {}, llvm: {}) for type `{}` / {:#?}", + r, l, t, layout); + } + + llsizingty +} + +pub fn fat_ptr_base_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { + match ty.sty { + ty::TyBox(t) | + ty::TyRef(_, ty::TypeAndMut { ty: t, .. }) | + ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if !type_is_sized(ccx.tcx(), t) => { + in_memory_type_of(ccx, t).ptr_to() + } + _ => bug!("expected fat ptr ty but got {:?}", ty) + } +} + +fn unsized_info_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { + let unsized_part = ccx.tcx().struct_tail(ty); + match unsized_part.sty { + ty::TyStr | ty::TyArray(..) | ty::TySlice(_) => { + Type::uint_from_ty(ccx, ast::UintTy::Us) + } + ty::TyDynamic(..) => Type::vtable_ptr(ccx), + _ => bug!("Unexpected tail in unsized_info_ty: {:?} for ty={:?}", + unsized_part, ty) + } +} + +pub fn immediate_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { + if t.is_bool() { + Type::i1(cx) + } else { + type_of(cx, t) + } +} + +/// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`. +/// This is the right LLVM type for an alloca containing a value of that type, +/// and the pointee of an Lvalue Datum (which is always a LLVM pointer). +/// For unsized types, the returned type is a fat pointer, thus the resulting +/// LLVM type for a `Trait` Lvalue is `{ i8*, void(i8*)** }*`, which is a double +/// indirection to the actual data, unlike a `i8` Lvalue, which is just `i8*`. +/// This is needed due to the treatment of immediate values, as a fat pointer +/// is too large for it to be placed in SSA value (by our rules). +/// For the raw type without far pointer indirection, see `in_memory_type_of`. +pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { + let ty = if !type_is_sized(cx.tcx(), ty) { + cx.tcx().mk_imm_ptr(ty) + } else { + ty + }; + in_memory_type_of(cx, ty) +} + +/// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`. +/// This is the right LLVM type for a field/array element of that type, +/// and is the same as `type_of` for all Sized types. +/// Unsized types, however, are represented by a "minimal unit", e.g. +/// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this +/// is useful for indexing slices, as `&[T]`'s data pointer is `T*`. +/// If the type is an unsized struct, the regular layout is generated, +/// with the inner-most trailing unsized field using the "minimal unit" +/// of that field's type - this is useful for taking the address of +/// that field and ensuring the struct has the right alignment. +/// For the LLVM type of a value as a whole, see `type_of`. +/// NB: If you update this, be sure to update `sizing_type_of()` as well. +pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { + // Check the cache. + if let Some(&llty) = cx.lltypes().borrow().get(&t) { + return llty; + } + + debug!("type_of {:?}", t); + + assert!(!t.has_escaping_regions(), "{:?} has escaping regions", t); + + // Replace any typedef'd types with their equivalent non-typedef + // type. This ensures that all LLVM nominal types that contain + // Rust types are defined as the same LLVM types. If we don't do + // this then, e.g. `Option<{myfield: bool}>` would be a different + // type than `Option`. + let t_norm = cx.tcx().erase_regions(&t); + + if t != t_norm { + let llty = in_memory_type_of(cx, t_norm); + debug!("--> normalized {:?} to {:?} llty={:?}", t, t_norm, llty); + cx.lltypes().borrow_mut().insert(t, llty); + return llty; + } + + let mut llty = match t.sty { + ty::TyBool => Type::bool(cx), + ty::TyChar => Type::char(cx), + ty::TyInt(t) => Type::int_from_ty(cx, t), + ty::TyUint(t) => Type::uint_from_ty(cx, t), + ty::TyFloat(t) => Type::float_from_ty(cx, t), + ty::TyNever => Type::nil(cx), + ty::TyClosure(..) => { + // Only create the named struct, but don't fill it in. We + // fill it in *after* placing it into the type cache. + adt::incomplete_type_of(cx, t, "closure") + } + + ty::TyBox(ty) | + ty::TyRef(_, ty::TypeAndMut{ty, ..}) | + ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => { + if !type_is_sized(cx.tcx(), ty) { + if let ty::TyStr = ty.sty { + // This means we get a nicer name in the output (str is always + // unsized). + cx.str_slice_type() + } else { + let ptr_ty = in_memory_type_of(cx, ty).ptr_to(); + let info_ty = unsized_info_ty(cx, ty); + Type::struct_(cx, &[ptr_ty, info_ty], false) + } + } else { + in_memory_type_of(cx, ty).ptr_to() + } + } + + ty::TyArray(ty, size) => { + let size = size as u64; + let llty = in_memory_type_of(cx, ty); + Type::array(&llty, size) + } + + // Unsized slice types (and str) have the type of their element, and + // traits have the type of u8. This is so that the data pointer inside + // fat pointers is of the right type (e.g. for array accesses), even + // when taking the address of an unsized field in a struct. + ty::TySlice(ty) => in_memory_type_of(cx, ty), + ty::TyStr | ty::TyDynamic(..) => Type::i8(cx), + + ty::TyFnDef(..) => Type::nil(cx), + ty::TyFnPtr(f) => { + let sig = cx.tcx().erase_late_bound_regions_and_normalize(&f.sig); + FnType::new(cx, f.abi, &sig, &[]).llvm_type(cx).ptr_to() + } + ty::TyTuple(ref tys) if tys.is_empty() => Type::nil(cx), + ty::TyTuple(..) => { + adt::type_of(cx, t) + } + ty::TyAdt(..) if t.is_simd() => { + let e = t.simd_type(cx.tcx()); + if !e.is_machine() { + cx.sess().fatal(&format!("monomorphising SIMD type `{}` with \ + a non-machine element type `{}`", + t, e)) + } + let llet = in_memory_type_of(cx, e); + let n = t.simd_size(cx.tcx()) as u64; + Type::vector(&llet, n) + } + ty::TyAdt(..) => { + // Only create the named struct, but don't fill it in. We + // fill it in *after* placing it into the type cache. This + // avoids creating more than one copy of the enum when one + // of the enum's variants refers to the enum itself. + let name = llvm_type_name(cx, t); + adt::incomplete_type_of(cx, t, &name[..]) + } + + ty::TyInfer(..) | + ty::TyProjection(..) | + ty::TyParam(..) | + ty::TyAnon(..) | + ty::TyError => bug!("type_of with {:?}", t), + }; + + debug!("--> mapped t={:?} to llty={:?}", t, llty); + + cx.lltypes().borrow_mut().insert(t, llty); + + // If this was an enum or struct, fill in the type now. + match t.sty { + ty::TyAdt(..) | ty::TyClosure(..) if !t.is_simd() => { + adt::finish_type_of(cx, t, &mut llty); + } + _ => () + } + + llty +} + +pub fn align_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) + -> machine::llalign { + let layout = cx.layout_of(t); + layout.align(&cx.tcx().data_layout).abi() as machine::llalign +} + +fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> String { + let mut name = String::with_capacity(32); + let printer = DefPathBasedNames::new(cx.tcx(), true, true); + printer.push_type_name(ty, &mut name); + name +} diff --git a/src/librustc_trans/trans/value.rs b/src/librustc_trans/value.rs similarity index 92% rename from src/librustc_trans/trans/value.rs rename to src/librustc_trans/value.rs index bc71278c15743..79e0c11515fc4 100644 --- a/src/librustc_trans/trans/value.rs +++ b/src/librustc_trans/value.rs @@ -10,13 +10,24 @@ use llvm; use llvm::{UseRef, ValueRef}; -use trans::basic_block::BasicBlock; -use trans::common::Block; +use basic_block::BasicBlock; +use common::Block; + +use std::fmt; + use libc::c_uint; -#[derive(Copy, Clone)] +#[derive(Copy, Clone, PartialEq)] pub struct Value(pub ValueRef); +impl fmt::Debug for Value { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.write_str(&llvm::build_string(|s| unsafe { + llvm::LLVMRustWriteValueToString(self.0, s); + }).expect("nun-UTF8 value description from LLVM")) + } +} + macro_rules! opt_val { ($e:expr) => ( unsafe { match $e { diff --git a/src/librustc_typeck/Cargo.toml b/src/librustc_typeck/Cargo.toml new file mode 100644 index 0000000000000..f08d26373e50e --- /dev/null +++ b/src/librustc_typeck/Cargo.toml @@ -0,0 +1,24 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_typeck" +version = "0.0.0" + +[lib] +name = "rustc_typeck" +path = "lib.rs" +crate-type = ["dylib"] +test = false + +[dependencies] +log = { path = "../liblog" } +syntax = { path = "../libsyntax" } +arena = { path = "../libarena" } +fmt_macros = { path = "../libfmt_macros" } +rustc = { path = "../librustc" } +rustc_back = { path = "../librustc_back" } +rustc_const_eval = { path = "../librustc_const_eval" } +rustc_const_math = { path = "../librustc_const_math" } +rustc_data_structures = { path = "../librustc_data_structures" } +rustc_platform_intrinsics = { path = "../librustc_platform_intrinsics" } +syntax_pos = { path = "../libsyntax_pos" } +rustc_errors = { path = "../librustc_errors" } diff --git a/src/librustc_typeck/astconv.rs b/src/librustc_typeck/astconv.rs index 98effeefad2a7..b5531b8bb9ec9 100644 --- a/src/librustc_typeck/astconv.rs +++ b/src/librustc_typeck/astconv.rs @@ -16,12 +16,12 @@ //! somewhat differently during the collect and check phases, //! particularly with respect to looking up the types of top-level //! items. In the collect phase, the crate context is used as the -//! `AstConv` instance; in this phase, the `get_item_type_scheme()` -//! function triggers a recursive call to `type_scheme_of_item()` +//! `AstConv` instance; in this phase, the `get_item_type()` +//! function triggers a recursive call to `type_of_item()` //! (note that `ast_ty_to_ty()` will detect recursive types and report //! an error). In the check phase, when the FnCtxt is used as the -//! `AstConv`, `get_item_type_scheme()` just looks up the item type in -//! `tcx.tcache` (using `ty::lookup_item_type`). +//! `AstConv`, `get_item_type()` just looks up the item type in +//! `tcx.types` (using `TyCtxt::item_type`). //! //! The `RegionScope` trait controls what happens when the user does //! not specify a region in some location where a region is required @@ -48,47 +48,52 @@ //! case but `&a` in the second. Basically, defaults that appear inside //! an rptr (`&r.T`) use the region `r` that appears in the rptr. -use middle::astconv_util::{prim_ty_to_ty, prohibit_type_params, prohibit_projection}; -use middle::const_eval::{self, ConstVal}; -use middle::const_eval::EvalHint::UncheckedExprHint; -use middle::def; -use middle::def_id::DefId; +use rustc_const_eval::eval_length; +use rustc_data_structures::accumulate_vec::AccumulateVec; +use hir::{self, SelfKind}; +use hir::def::Def; +use hir::def_id::DefId; +use hir::print as pprust; use middle::resolve_lifetime as rl; -use middle::privacy::{AllPublic, LastMod}; -use middle::subst::{FnSpace, TypeSpace, SelfSpace, Subst, Substs, ParamSpace}; -use middle::traits; -use middle::ty::{self, Ty, ToPredicate, TypeFoldable}; -use middle::ty::wf::object_region_bounds; +use rustc::lint; +use rustc::ty::subst::{Kind, Subst, Substs}; +use rustc::traits; +use rustc::ty::{self, Ty, TyCtxt, ToPredicate, TypeFoldable}; +use rustc::ty::wf::object_region_bounds; +use rustc_back::slice; use require_c_abi_if_variadic; use rscope::{self, UnelidableRscope, RegionScope, ElidableRscope, ObjectLifetimeDefaultRscope, ShiftedRscope, BindingRscope, ElisionFailureInfo, ElidedLifetime}; +use rscope::{AnonTypeScope, MaybeWithAnonTypes}; use util::common::{ErrorReported, FN_OUTPUT_NAME}; -use util::nodemap::FnvHashSet; +use util::nodemap::{NodeMap, FxHashSet}; +use std::cell::RefCell; +use std::iter; use syntax::{abi, ast}; -use syntax::codemap::{Span, Pos}; -use syntax::errors::DiagnosticBuilder; use syntax::feature_gate::{GateIssue, emit_feature_err}; -use syntax::parse::token; +use syntax::symbol::{Symbol, keywords}; +use syntax_pos::{Span, Pos}; +use errors::DiagnosticBuilder; -use rustc_front::print::pprust; -use rustc_front::hir; -use rustc_back::slice; +pub trait AstConv<'gcx, 'tcx> { + fn tcx<'a>(&'a self) -> TyCtxt<'a, 'gcx, 'tcx>; -pub trait AstConv<'tcx> { - fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx>; + /// A cache used for the result of `ast_ty_to_ty_cache` + fn ast_ty_to_ty_cache(&self) -> &RefCell>>; - /// Identify the type scheme for an item with a type, like a type - /// alias, fn, or struct. This allows you to figure out the set of - /// type parameters defined on the item. - fn get_item_type_scheme(&self, span: Span, id: DefId) - -> Result, ErrorReported>; + /// Returns the generic type and lifetime parameters for an item. + fn get_generics(&self, span: Span, id: DefId) + -> Result<&'tcx ty::Generics<'tcx>, ErrorReported>; + + /// Identify the type for an item, like a type alias, fn, or struct. + fn get_item_type(&self, span: Span, id: DefId) -> Result, ErrorReported>; /// Returns the `TraitDef` for a given trait. This allows you to /// figure out the set of type parameters defined on the trait. fn get_trait_def(&self, span: Span, id: DefId) - -> Result<&'tcx ty::TraitDef<'tcx>, ErrorReported>; + -> Result<&'tcx ty::TraitDef, ErrorReported>; /// Ensure that the super-predicates for the trait with the given /// id are available and also for the transitive set of @@ -101,25 +106,22 @@ pub trait AstConv<'tcx> { fn get_type_parameter_bounds(&self, span: Span, def_id: ast::NodeId) -> Result>, ErrorReported>; - /// Returns true if the trait with id `trait_def_id` defines an - /// associated type with the name `name`. - fn trait_defines_associated_type_named(&self, trait_def_id: DefId, name: ast::Name) - -> bool; - /// Return an (optional) substitution to convert bound type parameters that /// are in scope into free ones. This function should only return Some /// within a fn body. /// See ParameterEnvironment::free_substs for more information. - fn get_free_substs(&self) -> Option<&Substs<'tcx>> { - None - } + fn get_free_substs(&self) -> Option<&Substs<'tcx>>; /// What type should we use when a type is omitted? - fn ty_infer(&self, - param_and_substs: Option>, - substs: Option<&mut Substs<'tcx>>, - space: Option, - span: Span) -> Ty<'tcx>; + fn ty_infer(&self, span: Span) -> Ty<'tcx>; + + /// Same as ty_infer, but with a known type parameter definition. + fn ty_infer_for_def(&self, + _def: &ty::TypeParameterDef<'tcx>, + _substs: &[Kind<'tcx>], + span: Span) -> Ty<'tcx> { + self.ty_infer(span) + } /// Projecting an associated type from a (potentially) /// higher-ranked trait reference is more complicated, because of @@ -132,18 +134,7 @@ pub trait AstConv<'tcx> { span: Span, poly_trait_ref: ty::PolyTraitRef<'tcx>, item_name: ast::Name) - -> Ty<'tcx> - { - if let Some(trait_ref) = self.tcx().no_late_bound_regions(&poly_trait_ref) { - self.projected_ty(span, trait_ref, item_name) - } else { - // no late-bound regions, we can just ignore the binder - span_err!(self.tcx().sess, span, E0212, - "cannot extract an associated type from a higher-ranked trait bound \ - in this context"); - self.tcx().types.err - } - } + -> Ty<'tcx>; /// Project an associated type from a non-higher-ranked trait reference. /// This is fairly straightforward and can be accommodated in any context. @@ -152,14 +143,32 @@ pub trait AstConv<'tcx> { _trait_ref: ty::TraitRef<'tcx>, _item_name: ast::Name) -> Ty<'tcx>; + + /// Invoked when we encounter an error from some prior pass + /// (e.g. resolve) that is translated into a ty-error. This is + /// used to help suppress derived errors typeck might otherwise + /// report. + fn set_tainted_by_errors(&self); +} + +struct ConvertedBinding<'tcx> { + item_name: ast::Name, + ty: Ty<'tcx>, + span: Span, } -pub fn ast_region_to_region(tcx: &ty::ctxt, lifetime: &hir::Lifetime) - -> ty::Region { - let r = match tcx.named_region_map.get(&lifetime.id) { +/// Dummy type used for the `Self` of a `TraitRef` created for converting +/// a trait object, and which gets removed in `ExistentialTraitRef`. +/// This type must not appear anywhere in other converted types. +const TRAIT_OBJECT_DUMMY_SELF: ty::TypeVariants<'static> = ty::TyInfer(ty::FreshTy(0)); + +pub fn ast_region_to_region<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + lifetime: &hir::Lifetime) + -> &'tcx ty::Region { + let r = match tcx.named_region_map.defs.get(&lifetime.id) { None => { // should have been recorded by the `resolve_lifetime` pass - tcx.sess.span_bug(lifetime.span, "unresolved lifetime"); + span_bug!(lifetime.span, "unresolved lifetime"); } Some(&rl::DefStaticRegion) => { @@ -167,23 +176,45 @@ pub fn ast_region_to_region(tcx: &ty::ctxt, lifetime: &hir::Lifetime) } Some(&rl::DefLateBoundRegion(debruijn, id)) => { - ty::ReLateBound(debruijn, ty::BrNamed(tcx.map.local_def_id(id), lifetime.name)) - } - - Some(&rl::DefEarlyBoundRegion(space, index, _)) => { + // If this region is declared on a function, it will have + // an entry in `late_bound`, but if it comes from + // `for<'a>` in some type or something, it won't + // necessarily have one. In that case though, we won't be + // changed from late to early bound, so we can just + // substitute false. + let issue_32330 = tcx.named_region_map + .late_bound + .get(&id) + .cloned() + .unwrap_or(ty::Issue32330::WontChange); + ty::ReLateBound(debruijn, ty::BrNamed(tcx.map.local_def_id(id), + lifetime.name, + issue_32330)) + } + + Some(&rl::DefEarlyBoundRegion(index, _)) => { ty::ReEarlyBound(ty::EarlyBoundRegion { - space: space, index: index, name: lifetime.name }) } Some(&rl::DefFreeRegion(scope, id)) => { + // As in DefLateBoundRegion above, could be missing for some late-bound + // regions, but also for early-bound regions. + let issue_32330 = tcx.named_region_map + .late_bound + .get(&id) + .cloned() + .unwrap_or(ty::Issue32330::WontChange); ty::ReFree(ty::FreeRegion { scope: scope.to_code_extent(&tcx.region_maps), bound_region: ty::BrNamed(tcx.map.local_def_id(id), - lifetime.name) - }) + lifetime.name, + issue_32330) + }) + + // (*) -- not late-bound, won't change } }; @@ -192,25 +223,27 @@ pub fn ast_region_to_region(tcx: &ty::ctxt, lifetime: &hir::Lifetime) lifetime.id, r); - r + tcx.mk_region(r) } fn report_elision_failure( db: &mut DiagnosticBuilder, - default_span: Span, params: Vec) { let mut m = String::new(); let len = params.len(); - let mut any_lifetimes = false; - for (i, info) in params.into_iter().enumerate() { + let elided_params: Vec<_> = params.into_iter() + .filter(|info| info.lifetime_count > 0) + .collect(); + + let elided_len = elided_params.len(); + + for (i, info) in elided_params.into_iter().enumerate() { let ElisionFailureInfo { name, lifetime_count: n, have_bound_regions } = info; - any_lifetimes = any_lifetimes || (n > 0); - let help_name = if name.is_empty() { format!("argument {}", i + 1) } else { @@ -224,1980 +257,1787 @@ fn report_elision_failure( if have_bound_regions { "free " } else { "" } ) })[..]); - if len == 2 && i == 0 { + if elided_len == 2 && i == 0 { m.push_str(" or "); - } else if i + 2 == len { + } else if i + 2 == elided_len { m.push_str(", or "); - } else if i + 1 != len { + } else if i != elided_len - 1 { m.push_str(", "); } + } if len == 0 { - fileline_help!(db, default_span, - "this function's return type contains a borrowed value, but \ - there is no value for it to be borrowed from"); - fileline_help!(db, default_span, - "consider giving it a 'static lifetime"); - } else if !any_lifetimes { - fileline_help!(db, default_span, - "this function's return type contains a borrowed value with \ - an elided lifetime, but the lifetime cannot be derived from \ - the arguments"); - fileline_help!(db, default_span, - "consider giving it an explicit bounded or 'static \ - lifetime"); - } else if len == 1 { - fileline_help!(db, default_span, - "this function's return type contains a borrowed value, but \ - the signature does not say which {} it is borrowed from", - m); + help!(db, + "this function's return type contains a borrowed value, but \ + there is no value for it to be borrowed from"); + help!(db, + "consider giving it a 'static lifetime"); + } else if elided_len == 0 { + help!(db, + "this function's return type contains a borrowed value with \ + an elided lifetime, but the lifetime cannot be derived from \ + the arguments"); + help!(db, + "consider giving it an explicit bounded or 'static \ + lifetime"); + } else if elided_len == 1 { + help!(db, + "this function's return type contains a borrowed value, but \ + the signature does not say which {} it is borrowed from", + m); } else { - fileline_help!(db, default_span, - "this function's return type contains a borrowed value, but \ - the signature does not say whether it is borrowed from {}", - m); + help!(db, + "this function's return type contains a borrowed value, but \ + the signature does not say whether it is borrowed from {}", + m); } } -pub fn opt_ast_region_to_region<'tcx>( - this: &AstConv<'tcx>, - rscope: &RegionScope, - default_span: Span, - opt_lifetime: &Option) -> ty::Region -{ - let r = match *opt_lifetime { - Some(ref lifetime) => { - ast_region_to_region(this.tcx(), lifetime) - } - - None => match rscope.anon_regions(default_span, 1) { - Ok(rs) => rs[0], - Err(params) => { - let mut err = struct_span_err!(this.tcx().sess, default_span, E0106, - "missing lifetime specifier"); - if let Some(params) = params { - report_elision_failure(&mut err, default_span, params); - } - err.emit(); - ty::ReStatic +impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o { + pub fn opt_ast_region_to_region(&self, + rscope: &RegionScope, + default_span: Span, + opt_lifetime: &Option) -> &'tcx ty::Region + { + let r = match *opt_lifetime { + Some(ref lifetime) => { + ast_region_to_region(self.tcx(), lifetime) } - } - }; - - debug!("opt_ast_region_to_region(opt_lifetime={:?}) yields {:?}", - opt_lifetime, - r); - - r -} - -/// Given a path `path` that refers to an item `I` with the declared generics `decl_generics`, -/// returns an appropriate set of substitutions for this particular reference to `I`. -pub fn ast_path_substs_for_ty<'tcx>( - this: &AstConv<'tcx>, - rscope: &RegionScope, - span: Span, - param_mode: PathParamMode, - decl_generics: &ty::Generics<'tcx>, - item_segment: &hir::PathSegment) - -> Substs<'tcx> -{ - let tcx = this.tcx(); - - // ast_path_substs() is only called to convert paths that are - // known to refer to traits, types, or structs. In these cases, - // all type parameters defined for the item being referenced will - // be in the TypeSpace or SelfSpace. - // - // Note: in the case of traits, the self parameter is also - // defined, but we don't currently create a `type_param_def` for - // `Self` because it is implicit. - assert!(decl_generics.regions.all(|d| d.space == TypeSpace)); - assert!(decl_generics.types.all(|d| d.space != FnSpace)); - - let (regions, types, assoc_bindings) = match item_segment.parameters { - hir::AngleBracketedParameters(ref data) => { - convert_angle_bracketed_parameters(this, rscope, span, decl_generics, data) - } - hir::ParenthesizedParameters(..) => { - span_err!(tcx.sess, span, E0214, - "parenthesized parameters may only be used with a trait"); - let ty_param_defs = decl_generics.types.get_slice(TypeSpace); - (Substs::empty(), - ty_param_defs.iter().map(|_| tcx.types.err).collect(), - vec![]) - } - }; - prohibit_projections(this.tcx(), &assoc_bindings); + None => self.tcx().mk_region(match rscope.anon_regions(default_span, 1) { + Ok(rs) => rs[0], + Err(params) => { + let ampersand_span = Span { hi: default_span.lo, ..default_span}; - create_substs_for_ast_path(this, - span, - param_mode, - decl_generics, - None, - types, - regions) -} + let mut err = struct_span_err!(self.tcx().sess, ampersand_span, E0106, + "missing lifetime specifier"); + err.span_label(ampersand_span, &format!("expected lifetime parameter")); -#[derive(PartialEq, Eq)] -pub enum PathParamMode { - // Any path in a type context. - Explicit, - // The `module::Type` in `module::Type::method` in an expression. - Optional -} + if let Some(params) = params { + report_elision_failure(&mut err, params); + } + err.emit(); + ty::ReStatic + } + }) + }; -fn create_region_substs<'tcx>( - this: &AstConv<'tcx>, - rscope: &RegionScope, - span: Span, - decl_generics: &ty::Generics<'tcx>, - regions_provided: Vec) - -> Substs<'tcx> -{ - let tcx = this.tcx(); - - // If the type is parameterized by this region, then replace this - // region with the current anon region binding (in other words, - // whatever & would get replaced with). - let expected_num_region_params = decl_generics.regions.len(TypeSpace); - let supplied_num_region_params = regions_provided.len(); - let regions = if expected_num_region_params == supplied_num_region_params { - regions_provided - } else { - let anon_regions = - rscope.anon_regions(span, expected_num_region_params); + debug!("opt_ast_region_to_region(opt_lifetime={:?}) yields {:?}", + opt_lifetime, + r); - if supplied_num_region_params != 0 || anon_regions.is_err() { - report_lifetime_number_error(tcx, span, - supplied_num_region_params, - expected_num_region_params); - } + r + } - match anon_regions { - Ok(anon_regions) => anon_regions, - Err(_) => (0..expected_num_region_params).map(|_| ty::ReStatic).collect() + /// Given a path `path` that refers to an item `I` with the declared generics `decl_generics`, + /// returns an appropriate set of substitutions for this particular reference to `I`. + pub fn ast_path_substs_for_ty(&self, + rscope: &RegionScope, + span: Span, + def_id: DefId, + item_segment: &hir::PathSegment) + -> &'tcx Substs<'tcx> + { + let tcx = self.tcx(); + + match item_segment.parameters { + hir::AngleBracketedParameters(_) => {} + hir::ParenthesizedParameters(..) => { + struct_span_err!(tcx.sess, span, E0214, + "parenthesized parameters may only be used with a trait") + .span_label(span, &format!("only traits may use parentheses")) + .emit(); + + return Substs::for_item(tcx, def_id, |_, _| { + tcx.mk_region(ty::ReStatic) + }, |_, _| { + tcx.types.err + }); + } } - }; - Substs::new_type(vec![], regions) -} - -/// Given the type/region arguments provided to some path (along with -/// an implicit Self, if this is a trait reference) returns the complete -/// set of substitutions. This may involve applying defaulted type parameters. -/// -/// Note that the type listing given here is *exactly* what the user provided. -/// -/// The `region_substs` should be the result of `create_region_substs` -/// -- that is, a substitution with no types but the correct number of -/// regions. -fn create_substs_for_ast_path<'tcx>( - this: &AstConv<'tcx>, - span: Span, - param_mode: PathParamMode, - decl_generics: &ty::Generics<'tcx>, - self_ty: Option>, - types_provided: Vec>, - region_substs: Substs<'tcx>) - -> Substs<'tcx> -{ - let tcx = this.tcx(); - debug!("create_substs_for_ast_path(decl_generics={:?}, self_ty={:?}, \ - types_provided={:?}, region_substs={:?})", - decl_generics, self_ty, types_provided, - region_substs); - - assert_eq!(region_substs.regions().len(TypeSpace), decl_generics.regions.len(TypeSpace)); - assert!(region_substs.types.is_empty()); + let (substs, assoc_bindings) = + self.create_substs_for_ast_path(rscope, + span, + def_id, + &item_segment.parameters, + None); - // Convert the type parameters supplied by the user. - let ty_param_defs = decl_generics.types.get_slice(TypeSpace); - let formal_ty_param_count = ty_param_defs.len(); - let required_ty_param_count = ty_param_defs.iter() - .take_while(|x| x.default.is_none()) - .count(); + assoc_bindings.first().map(|b| self.tcx().prohibit_projection(b.span)); - let mut type_substs = get_type_substs_for_defs(this, - span, - types_provided, - param_mode, - ty_param_defs, - region_substs.clone(), - self_ty); - - let supplied_ty_param_count = type_substs.len(); - check_type_argument_count(this.tcx(), span, supplied_ty_param_count, - required_ty_param_count, formal_ty_param_count); - - if supplied_ty_param_count < required_ty_param_count { - while type_substs.len() < required_ty_param_count { - type_substs.push(tcx.types.err); - } - } else if supplied_ty_param_count > formal_ty_param_count { - type_substs.truncate(formal_ty_param_count); + substs } - assert!(type_substs.len() >= required_ty_param_count && - type_substs.len() <= formal_ty_param_count); - let mut substs = region_substs; - substs.types.extend(TypeSpace, type_substs.into_iter()); + /// Given the type/region arguments provided to some path (along with + /// an implicit Self, if this is a trait reference) returns the complete + /// set of substitutions. This may involve applying defaulted type parameters. + /// + /// Note that the type listing given here is *exactly* what the user provided. + fn create_substs_for_ast_path(&self, + rscope: &RegionScope, + span: Span, + def_id: DefId, + parameters: &hir::PathParameters, + self_ty: Option>) + -> (&'tcx Substs<'tcx>, Vec>) + { + let tcx = self.tcx(); - match self_ty { - None => { - // If no self-type is provided, it's still possible that - // one was declared, because this could be an object type. - } - Some(ty) => { - // If a self-type is provided, one should have been - // "declared" (in other words, this should be a - // trait-ref). - assert!(decl_generics.types.get_self().is_some()); - substs.types.push(SelfSpace, ty); - } - } + debug!("create_substs_for_ast_path(def_id={:?}, self_ty={:?}, \ + parameters={:?})", + def_id, self_ty, parameters); - let actual_supplied_ty_param_count = substs.types.len(TypeSpace); - for param in &ty_param_defs[actual_supplied_ty_param_count..] { - if let Some(default) = param.default { - // If we are converting an object type, then the - // `Self` parameter is unknown. However, some of the - // other type parameters may reference `Self` in their - // defaults. This will lead to an ICE if we are not - // careful! - if self_ty.is_none() && default.has_self_ty() { - span_err!(tcx.sess, span, E0393, - "the type parameter `{}` must be explicitly specified \ - in an object type because its default value `{}` references \ - the type `Self`", - param.name, - default); - substs.types.push(TypeSpace, tcx.types.err); - } else { - // This is a default type parameter. - let default = default.subst_spanned(tcx, - &substs, - Some(span)); - substs.types.push(TypeSpace, default); + let (lifetimes, num_types_provided, infer_types) = match *parameters { + hir::AngleBracketedParameters(ref data) => { + (&data.lifetimes[..], data.types.len(), data.infer_types) } - } else { - tcx.sess.span_bug(span, "extra parameter without default"); - } - } + hir::ParenthesizedParameters(_) => (&[][..], 1, false) + }; - debug!("create_substs_for_ast_path(decl_generics={:?}, self_ty={:?}) -> {:?}", - decl_generics, self_ty, substs); + // If the type is parameterized by this region, then replace this + // region with the current anon region binding (in other words, + // whatever & would get replaced with). + let decl_generics = match self.get_generics(span, def_id) { + Ok(generics) => generics, + Err(ErrorReported) => { + // No convenient way to recover from a cycle here. Just bail. Sorry! + self.tcx().sess.abort_if_errors(); + bug!("ErrorReported returned, but no errors reports?") + } + }; + let expected_num_region_params = decl_generics.regions.len(); + let supplied_num_region_params = lifetimes.len(); + let regions = if expected_num_region_params == supplied_num_region_params { + lifetimes.iter().map(|l| *ast_region_to_region(tcx, l)).collect() + } else { + let anon_regions = + rscope.anon_regions(span, expected_num_region_params); - substs -} + if supplied_num_region_params != 0 || anon_regions.is_err() { + report_lifetime_number_error(tcx, span, + supplied_num_region_params, + expected_num_region_params); + } -/// Returns types_provided if it is not empty, otherwise populating the -/// type parameters with inference variables as appropriate. -fn get_type_substs_for_defs<'tcx>(this: &AstConv<'tcx>, - span: Span, - types_provided: Vec>, - param_mode: PathParamMode, - ty_param_defs: &[ty::TypeParameterDef<'tcx>], - mut substs: Substs<'tcx>, - self_ty: Option>) - -> Vec> -{ - fn default_type_parameter<'tcx>(p: &ty::TypeParameterDef<'tcx>, self_ty: Option>) - -> Option> - { - if let Some(ref default) = p.default { - if self_ty.is_none() && default.has_self_ty() { - // There is no suitable inference default for a type parameter - // that references self with no self-type provided. - return None; + match anon_regions { + Ok(anon_regions) => anon_regions, + Err(_) => (0..expected_num_region_params).map(|_| ty::ReStatic).collect() } - } + }; - Some(p.clone()) - } + // If a self-type was declared, one should be provided. + assert_eq!(decl_generics.has_self, self_ty.is_some()); - if param_mode == PathParamMode::Optional && types_provided.is_empty() { - ty_param_defs - .iter() - .map(|p| this.ty_infer(default_type_parameter(p, self_ty), Some(&mut substs), - Some(TypeSpace), span)) - .collect() - } else { - types_provided - } -} + // Check the number of type parameters supplied by the user. + let ty_param_defs = &decl_generics.types[self_ty.is_some() as usize..]; + if !infer_types || num_types_provided > ty_param_defs.len() { + check_type_argument_count(tcx, span, num_types_provided, ty_param_defs); + } -struct ConvertedBinding<'tcx> { - item_name: ast::Name, - ty: Ty<'tcx>, - span: Span, -} + let is_object = self_ty.map_or(false, |ty| ty.sty == TRAIT_OBJECT_DUMMY_SELF); + let default_needs_object_self = |p: &ty::TypeParameterDef<'tcx>| { + if let Some(ref default) = p.default { + if is_object && default.has_self_ty() { + // There is no suitable inference default for a type parameter + // that references self, in an object type. + return true; + } + } -fn convert_angle_bracketed_parameters<'tcx>(this: &AstConv<'tcx>, - rscope: &RegionScope, - span: Span, - decl_generics: &ty::Generics<'tcx>, - data: &hir::AngleBracketedParameterData) - -> (Substs<'tcx>, - Vec>, - Vec>) -{ - let regions: Vec<_> = - data.lifetimes.iter() - .map(|l| ast_region_to_region(this.tcx(), l)) - .collect(); - - let region_substs = - create_region_substs(this, rscope, span, decl_generics, regions); - - let types: Vec<_> = - data.types.iter() - .enumerate() - .map(|(i,t)| ast_ty_arg_to_ty(this, rscope, decl_generics, - i, ®ion_substs, t)) - .collect(); - - let assoc_bindings: Vec<_> = - data.bindings.iter() - .map(|b| ConvertedBinding { item_name: b.name, - ty: ast_ty_to_ty(this, rscope, &*b.ty), - span: b.span }) - .collect(); - - (region_substs, types, assoc_bindings) -} + false + }; -/// Returns the appropriate lifetime to use for any output lifetimes -/// (if one exists) and a vector of the (pattern, number of lifetimes) -/// corresponding to each input type/pattern. -fn find_implied_output_region<'tcx>(tcx: &ty::ctxt<'tcx>, - input_tys: &[Ty<'tcx>], - input_pats: Vec) -> ElidedLifetime -{ - let mut lifetimes_for_params = Vec::new(); - let mut possible_implied_output_region = None; + let mut output_assoc_binding = None; + let substs = Substs::for_item(tcx, def_id, |def, _| { + let i = def.index as usize - self_ty.is_some() as usize; + tcx.mk_region(regions[i]) + }, |def, substs| { + let i = def.index as usize; - for (input_type, input_pat) in input_tys.iter().zip(input_pats) { - let mut regions = FnvHashSet(); - let have_bound_regions = tcx.collect_regions(input_type, &mut regions); + // Handle Self first, so we can adjust the index to match the AST. + if let (0, Some(ty)) = (i, self_ty) { + return ty; + } - debug!("find_implied_output_regions: collected {:?} from {:?} \ - have_bound_regions={:?}", ®ions, input_type, have_bound_regions); + let i = i - self_ty.is_some() as usize - decl_generics.regions.len(); + if i < num_types_provided { + // A provided type parameter. + match *parameters { + hir::AngleBracketedParameters(ref data) => { + self.ast_ty_arg_to_ty(rscope, Some(def), substs, &data.types[i]) + } + hir::ParenthesizedParameters(ref data) => { + assert_eq!(i, 0); + let (ty, assoc) = + self.convert_parenthesized_parameters(rscope, substs, data); + output_assoc_binding = Some(assoc); + ty + } + } + } else if infer_types { + // No type parameters were provided, we can infer all. + let ty_var = if !default_needs_object_self(def) { + self.ty_infer_for_def(def, substs, span) + } else { + self.ty_infer(span) + }; + ty_var + } else if let Some(default) = def.default { + // No type parameter provided, but a default exists. + + // If we are converting an object type, then the + // `Self` parameter is unknown. However, some of the + // other type parameters may reference `Self` in their + // defaults. This will lead to an ICE if we are not + // careful! + if default_needs_object_self(def) { + struct_span_err!(tcx.sess, span, E0393, + "the type parameter `{}` must be explicitly specified", + def.name) + .span_label(span, &format!("missing reference to `{}`", def.name)) + .note(&format!("because of the default `Self` reference, \ + type parameters must be specified on object types")) + .emit(); + tcx.types.err + } else { + // This is a default type parameter. + default.subst_spanned(tcx, substs, Some(span)) + } + } else { + // We've already errored above about the mismatch. + tcx.types.err + } + }); - if regions.len() == 1 { - // there's a chance that the unique lifetime of this - // iteration will be the appropriate lifetime for output - // parameters, so lets store it. - possible_implied_output_region = regions.iter().cloned().next(); - } + let assoc_bindings = match *parameters { + hir::AngleBracketedParameters(ref data) => { + data.bindings.iter().map(|b| { + ConvertedBinding { + item_name: b.name, + ty: self.ast_ty_to_ty(rscope, &b.ty), + span: b.span + } + }).collect() + } + hir::ParenthesizedParameters(ref data) => { + vec![output_assoc_binding.unwrap_or_else(|| { + // This is an error condition, but we should + // get the associated type binding anyway. + self.convert_parenthesized_parameters(rscope, substs, data).1 + })] + } + }; - lifetimes_for_params.push(ElisionFailureInfo { - name: input_pat, - lifetime_count: regions.len(), - have_bound_regions: have_bound_regions - }); - } + debug!("create_substs_for_ast_path(decl_generics={:?}, self_ty={:?}) -> {:?}", + decl_generics, self_ty, substs); - if lifetimes_for_params.iter().map(|e| e.lifetime_count).sum::() == 1 { - Ok(possible_implied_output_region.unwrap()) - } else { - Err(Some(lifetimes_for_params)) + (substs, assoc_bindings) } -} -fn convert_ty_with_lifetime_elision<'tcx>(this: &AstConv<'tcx>, - elided_lifetime: ElidedLifetime, - ty: &hir::Ty) - -> Ty<'tcx> -{ - match elided_lifetime { - Ok(implied_output_region) => { - let rb = ElidableRscope::new(implied_output_region); - ast_ty_to_ty(this, &rb, ty) + /// Returns the appropriate lifetime to use for any output lifetimes + /// (if one exists) and a vector of the (pattern, number of lifetimes) + /// corresponding to each input type/pattern. + fn find_implied_output_region(&self, + input_tys: &[Ty<'tcx>], + input_pats: F) -> ElidedLifetime + where F: FnOnce() -> Vec + { + let tcx = self.tcx(); + let mut lifetimes_for_params = Vec::new(); + let mut possible_implied_output_region = None; + + for input_type in input_tys.iter() { + let mut regions = FxHashSet(); + let have_bound_regions = tcx.collect_regions(input_type, &mut regions); + + debug!("find_implied_output_regions: collected {:?} from {:?} \ + have_bound_regions={:?}", ®ions, input_type, have_bound_regions); + + if regions.len() == 1 { + // there's a chance that the unique lifetime of this + // iteration will be the appropriate lifetime for output + // parameters, so lets store it. + possible_implied_output_region = regions.iter().cloned().next(); + } + + // Use a placeholder for `name` because computing it can be + // expensive and we don't want to do it until we know it's + // necessary. + lifetimes_for_params.push(ElisionFailureInfo { + name: String::new(), + lifetime_count: regions.len(), + have_bound_regions: have_bound_regions + }); } - Err(param_lifetimes) => { - // All regions must be explicitly specified in the output - // if the lifetime elision rules do not apply. This saves - // the user from potentially-confusing errors. - let rb = UnelidableRscope::new(param_lifetimes); - ast_ty_to_ty(this, &rb, ty) + + if lifetimes_for_params.iter().map(|e| e.lifetime_count).sum::() == 1 { + Ok(*possible_implied_output_region.unwrap()) + } else { + // Fill in the expensive `name` fields now that we know they're + // needed. + for (info, input_pat) in lifetimes_for_params.iter_mut().zip(input_pats()) { + info.name = input_pat; + } + Err(Some(lifetimes_for_params)) } } -} -fn convert_parenthesized_parameters<'tcx>(this: &AstConv<'tcx>, - rscope: &RegionScope, - span: Span, - decl_generics: &ty::Generics<'tcx>, - data: &hir::ParenthesizedParameterData) - -> (Substs<'tcx>, - Vec>, - Vec>) -{ - let region_substs = - create_region_substs(this, rscope, span, decl_generics, Vec::new()); - - let binding_rscope = BindingRscope::new(); - let inputs = - data.inputs.iter() - .map(|a_t| ast_ty_arg_to_ty(this, &binding_rscope, decl_generics, - 0, ®ion_substs, a_t)) - .collect::>>(); - - let input_params = vec![String::new(); inputs.len()]; - let implied_output_region = find_implied_output_region(this.tcx(), &inputs, input_params); - - let input_ty = this.tcx().mk_tup(inputs); - - let (output, output_span) = match data.output { - Some(ref output_ty) => { - (convert_ty_with_lifetime_elision(this, - implied_output_region, - &output_ty), - output_ty.span) - } - None => { - (this.tcx().mk_nil(), data.span) + fn convert_ty_with_lifetime_elision(&self, + elided_lifetime: ElidedLifetime, + ty: &hir::Ty, + anon_scope: Option) + -> Ty<'tcx> + { + match elided_lifetime { + Ok(implied_output_region) => { + let rb = ElidableRscope::new(implied_output_region); + self.ast_ty_to_ty(&MaybeWithAnonTypes::new(rb, anon_scope), ty) + } + Err(param_lifetimes) => { + // All regions must be explicitly specified in the output + // if the lifetime elision rules do not apply. This saves + // the user from potentially-confusing errors. + let rb = UnelidableRscope::new(param_lifetimes); + self.ast_ty_to_ty(&MaybeWithAnonTypes::new(rb, anon_scope), ty) + } } - }; - - let output_binding = ConvertedBinding { - item_name: token::intern(FN_OUTPUT_NAME), - ty: output, - span: output_span - }; - - (region_substs, vec![input_ty], vec![output_binding]) -} + } -pub fn instantiate_poly_trait_ref<'tcx>( - this: &AstConv<'tcx>, - rscope: &RegionScope, - ast_trait_ref: &hir::PolyTraitRef, - self_ty: Option>, - poly_projections: &mut Vec>) - -> ty::PolyTraitRef<'tcx> -{ - let trait_ref = &ast_trait_ref.trait_ref; - let trait_def_id = trait_def_id(this, trait_ref); - ast_path_to_poly_trait_ref(this, - rscope, - trait_ref.path.span, - PathParamMode::Explicit, - trait_def_id, - self_ty, - trait_ref.path.segments.last().unwrap(), - poly_projections) -} + fn convert_parenthesized_parameters(&self, + rscope: &RegionScope, + region_substs: &[Kind<'tcx>], + data: &hir::ParenthesizedParameterData) + -> (Ty<'tcx>, ConvertedBinding<'tcx>) + { + let anon_scope = rscope.anon_type_scope(); + let binding_rscope = MaybeWithAnonTypes::new(BindingRscope::new(), anon_scope); + let inputs = self.tcx().mk_type_list(data.inputs.iter().map(|a_t| { + self.ast_ty_arg_to_ty(&binding_rscope, None, region_substs, a_t) + })); + let inputs_len = inputs.len(); + let input_params = || vec![String::new(); inputs_len]; + let implied_output_region = self.find_implied_output_region(&inputs, input_params); + + let (output, output_span) = match data.output { + Some(ref output_ty) => { + (self.convert_ty_with_lifetime_elision(implied_output_region, + &output_ty, + anon_scope), + output_ty.span) + } + None => { + (self.tcx().mk_nil(), data.span) + } + }; -/// Instantiates the path for the given trait reference, assuming that it's -/// bound to a valid trait type. Returns the def_id for the defining trait. -/// Fails if the type is a type other than a trait type. -/// -/// If the `projections` argument is `None`, then assoc type bindings like `Foo` -/// are disallowed. Otherwise, they are pushed onto the vector given. -pub fn instantiate_mono_trait_ref<'tcx>( - this: &AstConv<'tcx>, - rscope: &RegionScope, - trait_ref: &hir::TraitRef, - self_ty: Option>) - -> ty::TraitRef<'tcx> -{ - let trait_def_id = trait_def_id(this, trait_ref); - ast_path_to_mono_trait_ref(this, - rscope, - trait_ref.path.span, - PathParamMode::Explicit, - trait_def_id, - self_ty, - trait_ref.path.segments.last().unwrap()) -} + let output_binding = ConvertedBinding { + item_name: Symbol::intern(FN_OUTPUT_NAME), + ty: output, + span: output_span + }; -fn trait_def_id<'tcx>(this: &AstConv<'tcx>, trait_ref: &hir::TraitRef) -> DefId { - let path = &trait_ref.path; - match ::lookup_full_def(this.tcx(), path.span, trait_ref.ref_id) { - def::DefTrait(trait_def_id) => trait_def_id, - def::DefErr => { - this.tcx().sess.fatal("cannot continue compilation due to previous error"); - } - _ => { - span_fatal!(this.tcx().sess, path.span, E0245, "`{}` is not a trait", - path); - } + (self.tcx().mk_ty(ty::TyTuple(inputs)), output_binding) } -} -fn object_path_to_poly_trait_ref<'a,'tcx>( - this: &AstConv<'tcx>, - rscope: &RegionScope, - span: Span, - param_mode: PathParamMode, - trait_def_id: DefId, - trait_segment: &hir::PathSegment, - mut projections: &mut Vec>) - -> ty::PolyTraitRef<'tcx> -{ - ast_path_to_poly_trait_ref(this, - rscope, - span, - param_mode, - trait_def_id, - None, - trait_segment, - projections) -} + pub fn instantiate_poly_trait_ref(&self, + rscope: &RegionScope, + ast_trait_ref: &hir::PolyTraitRef, + self_ty: Ty<'tcx>, + poly_projections: &mut Vec>) + -> ty::PolyTraitRef<'tcx> + { + let trait_ref = &ast_trait_ref.trait_ref; + let trait_def_id = self.trait_def_id(trait_ref); + self.ast_path_to_poly_trait_ref(rscope, + trait_ref.path.span, + trait_def_id, + self_ty, + trait_ref.ref_id, + trait_ref.path.segments.last().unwrap(), + poly_projections) + } -fn ast_path_to_poly_trait_ref<'a,'tcx>( - this: &AstConv<'tcx>, - rscope: &RegionScope, - span: Span, - param_mode: PathParamMode, - trait_def_id: DefId, - self_ty: Option>, - trait_segment: &hir::PathSegment, - poly_projections: &mut Vec>) - -> ty::PolyTraitRef<'tcx> -{ - debug!("ast_path_to_poly_trait_ref(trait_segment={:?})", trait_segment); - // The trait reference introduces a binding level here, so - // we need to shift the `rscope`. It'd be nice if we could - // do away with this rscope stuff and work this knowledge - // into resolve_lifetimes, as we do with non-omitted - // lifetimes. Oh well, not there yet. - let shifted_rscope = &ShiftedRscope::new(rscope); - - let (substs, assoc_bindings) = - create_substs_for_ast_trait_ref(this, - shifted_rscope, - span, - param_mode, + /// Instantiates the path for the given trait reference, assuming that it's + /// bound to a valid trait type. Returns the def_id for the defining trait. + /// Fails if the type is a type other than a trait type. + /// + /// If the `projections` argument is `None`, then assoc type bindings like `Foo` + /// are disallowed. Otherwise, they are pushed onto the vector given. + pub fn instantiate_mono_trait_ref(&self, + rscope: &RegionScope, + trait_ref: &hir::TraitRef, + self_ty: Ty<'tcx>) + -> ty::TraitRef<'tcx> + { + let trait_def_id = self.trait_def_id(trait_ref); + self.ast_path_to_mono_trait_ref(rscope, + trait_ref.path.span, trait_def_id, self_ty, - trait_segment); - let poly_trait_ref = ty::Binder(ty::TraitRef::new(trait_def_id, substs)); + trait_ref.path.segments.last().unwrap()) + } + + fn trait_def_id(&self, trait_ref: &hir::TraitRef) -> DefId { + let path = &trait_ref.path; + match path.def { + Def::Trait(trait_def_id) => trait_def_id, + Def::Err => { + self.tcx().sess.fatal("cannot continue compilation due to previous error"); + } + _ => { + span_fatal!(self.tcx().sess, path.span, E0245, "`{}` is not a trait", + path); + } + } + } + fn ast_path_to_poly_trait_ref(&self, + rscope: &RegionScope, + span: Span, + trait_def_id: DefId, + self_ty: Ty<'tcx>, + path_id: ast::NodeId, + trait_segment: &hir::PathSegment, + poly_projections: &mut Vec>) + -> ty::PolyTraitRef<'tcx> { - let converted_bindings = - assoc_bindings - .iter() - .filter_map(|binding| { - // specify type to assert that error was already reported in Err case: - let predicate: Result<_, ErrorReported> = - ast_type_binding_to_poly_projection_predicate(this, - poly_trait_ref.clone(), - self_ty, - binding); - predicate.ok() // ok to ignore Err() because ErrorReported (see above) - }); - poly_projections.extend(converted_bindings); + debug!("ast_path_to_poly_trait_ref(trait_segment={:?})", trait_segment); + // The trait reference introduces a binding level here, so + // we need to shift the `rscope`. It'd be nice if we could + // do away with this rscope stuff and work this knowledge + // into resolve_lifetimes, as we do with non-omitted + // lifetimes. Oh well, not there yet. + let shifted_rscope = &ShiftedRscope::new(rscope); + + let (substs, assoc_bindings) = + self.create_substs_for_ast_trait_ref(shifted_rscope, + span, + trait_def_id, + self_ty, + trait_segment); + let poly_trait_ref = ty::Binder(ty::TraitRef::new(trait_def_id, substs)); + + poly_projections.extend(assoc_bindings.iter().filter_map(|binding| { + // specify type to assert that error was already reported in Err case: + let predicate: Result<_, ErrorReported> = + self.ast_type_binding_to_poly_projection_predicate(path_id, + poly_trait_ref, + binding); + predicate.ok() // ok to ignore Err() because ErrorReported (see above) + })); + + debug!("ast_path_to_poly_trait_ref(trait_segment={:?}, projections={:?}) -> {:?}", + trait_segment, poly_projections, poly_trait_ref); + poly_trait_ref } - debug!("ast_path_to_poly_trait_ref(trait_segment={:?}, projections={:?}) -> {:?}", - trait_segment, poly_projections, poly_trait_ref); - poly_trait_ref -} + fn ast_path_to_mono_trait_ref(&self, + rscope: &RegionScope, + span: Span, + trait_def_id: DefId, + self_ty: Ty<'tcx>, + trait_segment: &hir::PathSegment) + -> ty::TraitRef<'tcx> + { + let (substs, assoc_bindings) = + self.create_substs_for_ast_trait_ref(rscope, + span, + trait_def_id, + self_ty, + trait_segment); + assoc_bindings.first().map(|b| self.tcx().prohibit_projection(b.span)); + ty::TraitRef::new(trait_def_id, substs) + } -fn ast_path_to_mono_trait_ref<'a,'tcx>(this: &AstConv<'tcx>, + fn create_substs_for_ast_trait_ref(&self, rscope: &RegionScope, span: Span, - param_mode: PathParamMode, trait_def_id: DefId, - self_ty: Option>, + self_ty: Ty<'tcx>, trait_segment: &hir::PathSegment) - -> ty::TraitRef<'tcx> -{ - let (substs, assoc_bindings) = - create_substs_for_ast_trait_ref(this, - rscope, - span, - param_mode, - trait_def_id, - self_ty, - trait_segment); - prohibit_projections(this.tcx(), &assoc_bindings); - ty::TraitRef::new(trait_def_id, substs) -} - -fn create_substs_for_ast_trait_ref<'a,'tcx>(this: &AstConv<'tcx>, - rscope: &RegionScope, - span: Span, - param_mode: PathParamMode, - trait_def_id: DefId, - self_ty: Option>, - trait_segment: &hir::PathSegment) - -> (&'tcx Substs<'tcx>, Vec>) -{ - debug!("create_substs_for_ast_trait_ref(trait_segment={:?})", - trait_segment); - - let trait_def = match this.get_trait_def(span, trait_def_id) { - Ok(trait_def) => trait_def, - Err(ErrorReported) => { - // No convenient way to recover from a cycle here. Just bail. Sorry! - this.tcx().sess.abort_if_errors(); - this.tcx().sess.bug("ErrorReported returned, but no errors reports?") - } - }; - - let (regions, types, assoc_bindings) = match trait_segment.parameters { - hir::AngleBracketedParameters(ref data) => { - // For now, require that parenthetical notation be used - // only with `Fn()` etc. - if !this.tcx().sess.features.borrow().unboxed_closures && trait_def.paren_sugar { - emit_feature_err(&this.tcx().sess.parse_sess.span_diagnostic, - "unboxed_closures", span, GateIssue::Language, - "\ - the precise format of `Fn`-family traits' type parameters is \ - subject to change. Use parenthetical notation (Fn(Foo, Bar) -> Baz) instead"); + -> (&'tcx Substs<'tcx>, Vec>) + { + debug!("create_substs_for_ast_trait_ref(trait_segment={:?})", + trait_segment); + + let trait_def = match self.get_trait_def(span, trait_def_id) { + Ok(trait_def) => trait_def, + Err(ErrorReported) => { + // No convenient way to recover from a cycle here. Just bail. Sorry! + self.tcx().sess.abort_if_errors(); + bug!("ErrorReported returned, but no errors reports?") } + }; - convert_angle_bracketed_parameters(this, rscope, span, &trait_def.generics, data) - } - hir::ParenthesizedParameters(ref data) => { - // For now, require that parenthetical notation be used - // only with `Fn()` etc. - if !this.tcx().sess.features.borrow().unboxed_closures && !trait_def.paren_sugar { - emit_feature_err(&this.tcx().sess.parse_sess.span_diagnostic, - "unboxed_closures", span, GateIssue::Language, - "\ - parenthetical notation is only stable when used with `Fn`-family traits"); + match trait_segment.parameters { + hir::AngleBracketedParameters(_) => { + // For now, require that parenthetical notation be used + // only with `Fn()` etc. + if !self.tcx().sess.features.borrow().unboxed_closures && trait_def.paren_sugar { + emit_feature_err(&self.tcx().sess.parse_sess, + "unboxed_closures", span, GateIssue::Language, + "\ + the precise format of `Fn`-family traits' \ + type parameters is subject to change. \ + Use parenthetical notation (Fn(Foo, Bar) -> Baz) instead"); + } + } + hir::ParenthesizedParameters(_) => { + // For now, require that parenthetical notation be used + // only with `Fn()` etc. + if !self.tcx().sess.features.borrow().unboxed_closures && !trait_def.paren_sugar { + emit_feature_err(&self.tcx().sess.parse_sess, + "unboxed_closures", span, GateIssue::Language, + "\ + parenthetical notation is only stable when used with `Fn`-family traits"); + } } - - convert_parenthesized_parameters(this, rscope, span, &trait_def.generics, data) } - }; - - let substs = create_substs_for_ast_path(this, - span, - param_mode, - &trait_def.generics, - self_ty, - types, - regions); - - (this.tcx().mk_substs(substs), assoc_bindings) -} -fn ast_type_binding_to_poly_projection_predicate<'tcx>( - this: &AstConv<'tcx>, - mut trait_ref: ty::PolyTraitRef<'tcx>, - self_ty: Option>, - binding: &ConvertedBinding<'tcx>) - -> Result, ErrorReported> -{ - let tcx = this.tcx(); - - // Given something like `U : SomeTrait`, we want to produce a - // predicate like `::T = X`. This is somewhat - // subtle in the event that `T` is defined in a supertrait of - // `SomeTrait`, because in that case we need to upcast. - // - // That is, consider this case: - // - // ``` - // trait SubTrait : SuperTrait { } - // trait SuperTrait { type T; } - // - // ... B : SubTrait ... - // ``` - // - // We want to produce `>::T == foo`. - - // Simple case: X is defined in the current trait. - if this.trait_defines_associated_type_named(trait_ref.def_id(), binding.item_name) { - return Ok(ty::Binder(ty::ProjectionPredicate { // <-------------------+ - projection_ty: ty::ProjectionTy { // | - trait_ref: trait_ref.skip_binder().clone(), // Binder moved here --+ - item_name: binding.item_name, - }, - ty: binding.ty, - })); + self.create_substs_for_ast_path(rscope, + span, + trait_def_id, + &trait_segment.parameters, + Some(self_ty)) } - // Otherwise, we have to walk through the supertraits to find - // those that do. This is complicated by the fact that, for an - // object type, the `Self` type is not present in the - // substitutions (after all, it's being constructed right now), - // but the `supertraits` iterator really wants one. To handle - // this, we currently insert a dummy type and then remove it - // later. Yuck. - - let dummy_self_ty = tcx.mk_infer(ty::FreshTy(0)); - if self_ty.is_none() { // if converting for an object type - let mut dummy_substs = trait_ref.skip_binder().substs.clone(); // binder moved here -+ - assert!(dummy_substs.self_ty().is_none()); // | - dummy_substs.types.push(SelfSpace, dummy_self_ty); // | - trait_ref = ty::Binder(ty::TraitRef::new(trait_ref.def_id(), // <------------+ - tcx.mk_substs(dummy_substs))); + fn trait_defines_associated_type_named(&self, + trait_def_id: DefId, + assoc_name: ast::Name) + -> bool + { + self.tcx().associated_items(trait_def_id).any(|item| { + item.kind == ty::AssociatedKind::Type && item.name == assoc_name + }) } - try!(this.ensure_super_predicates(binding.span, trait_ref.def_id())); - - let mut candidates: Vec = - traits::supertraits(tcx, trait_ref.clone()) - .filter(|r| this.trait_defines_associated_type_named(r.def_id(), binding.item_name)) - .collect(); - - // If converting for an object type, then remove the dummy-ty from `Self` now. - // Yuckety yuck. - if self_ty.is_none() { - for candidate in &mut candidates { - let mut dummy_substs = candidate.0.substs.clone(); - assert!(dummy_substs.self_ty() == Some(dummy_self_ty)); - dummy_substs.types.pop(SelfSpace); - *candidate = ty::Binder(ty::TraitRef::new(candidate.def_id(), - tcx.mk_substs(dummy_substs))); + fn ast_type_binding_to_poly_projection_predicate( + &self, + path_id: ast::NodeId, + trait_ref: ty::PolyTraitRef<'tcx>, + binding: &ConvertedBinding<'tcx>) + -> Result, ErrorReported> + { + let tcx = self.tcx(); + + // Given something like `U : SomeTrait`, we want to produce a + // predicate like `::T = X`. This is somewhat + // subtle in the event that `T` is defined in a supertrait of + // `SomeTrait`, because in that case we need to upcast. + // + // That is, consider this case: + // + // ``` + // trait SubTrait : SuperTrait { } + // trait SuperTrait { type T; } + // + // ... B : SubTrait ... + // ``` + // + // We want to produce `>::T == foo`. + + // Find any late-bound regions declared in `ty` that are not + // declared in the trait-ref. These are not wellformed. + // + // Example: + // + // for<'a> ::Item = &'a str // <-- 'a is bad + // for<'a> >::Output = &'a str // <-- 'a is ok + let late_bound_in_trait_ref = tcx.collect_constrained_late_bound_regions(&trait_ref); + let late_bound_in_ty = tcx.collect_referenced_late_bound_regions(&ty::Binder(binding.ty)); + debug!("late_bound_in_trait_ref = {:?}", late_bound_in_trait_ref); + debug!("late_bound_in_ty = {:?}", late_bound_in_ty); + for br in late_bound_in_ty.difference(&late_bound_in_trait_ref) { + let br_name = match *br { + ty::BrNamed(_, name, _) => name, + _ => { + span_bug!( + binding.span, + "anonymous bound region {:?} in binding but not trait ref", + br); + } + }; + tcx.sess.add_lint( + lint::builtin::HR_LIFETIME_IN_ASSOC_TYPE, + path_id, + binding.span, + format!("binding for associated type `{}` references lifetime `{}`, \ + which does not appear in the trait input types", + binding.item_name, br_name)); + } + + // Simple case: X is defined in the current trait. + if self.trait_defines_associated_type_named(trait_ref.def_id(), binding.item_name) { + return Ok(trait_ref.map_bound(|trait_ref| { + ty::ProjectionPredicate { + projection_ty: ty::ProjectionTy { + trait_ref: trait_ref, + item_name: binding.item_name, + }, + ty: binding.ty, + } + })); } - } - let candidate = try!(one_bound_for_assoc_type(tcx, - candidates, - &trait_ref.to_string(), - &binding.item_name.as_str(), - binding.span)); - - Ok(ty::Binder(ty::ProjectionPredicate { // <-------------------------+ - projection_ty: ty::ProjectionTy { // | - trait_ref: candidate.skip_binder().clone(), // binder is moved up here --+ - item_name: binding.item_name, - }, - ty: binding.ty, - })) -} + // Otherwise, we have to walk through the supertraits to find + // those that do. + self.ensure_super_predicates(binding.span, trait_ref.def_id())?; -fn ast_path_to_ty<'tcx>( - this: &AstConv<'tcx>, - rscope: &RegionScope, - span: Span, - param_mode: PathParamMode, - did: DefId, - item_segment: &hir::PathSegment) - -> Ty<'tcx> -{ - let tcx = this.tcx(); - let (generics, decl_ty) = match this.get_item_type_scheme(span, did) { - Ok(ty::TypeScheme { generics, ty: decl_ty }) => { - (generics, decl_ty) - } - Err(ErrorReported) => { - return tcx.types.err; - } - }; + let candidates = + traits::supertraits(tcx, trait_ref.clone()) + .filter(|r| self.trait_defines_associated_type_named(r.def_id(), binding.item_name)); - let substs = ast_path_substs_for_ty(this, - rscope, - span, - param_mode, - &generics, - item_segment); - - // FIXME(#12938): This is a hack until we have full support for DST. - if Some(did) == this.tcx().lang_items.owned_box() { - assert_eq!(substs.types.len(TypeSpace), 1); - return this.tcx().mk_box(*substs.types.get(TypeSpace, 0)); + let candidate = self.one_bound_for_assoc_type(candidates, + &trait_ref.to_string(), + &binding.item_name.as_str(), + binding.span)?; + + Ok(candidate.map_bound(|trait_ref| { + ty::ProjectionPredicate { + projection_ty: ty::ProjectionTy { + trait_ref: trait_ref, + item_name: binding.item_name, + }, + ty: binding.ty, + } + })) } - decl_ty.subst(this.tcx(), &substs) -} + fn ast_path_to_ty(&self, + rscope: &RegionScope, + span: Span, + did: DefId, + item_segment: &hir::PathSegment) + -> Ty<'tcx> + { + let tcx = self.tcx(); + let decl_ty = match self.get_item_type(span, did) { + Ok(ty) => ty, + Err(ErrorReported) => { + return tcx.types.err; + } + }; -type TraitAndProjections<'tcx> = (ty::PolyTraitRef<'tcx>, Vec>); + let substs = self.ast_path_substs_for_ty(rscope, + span, + did, + item_segment); -fn ast_ty_to_trait_ref<'tcx>(this: &AstConv<'tcx>, - rscope: &RegionScope, - ty: &hir::Ty, - bounds: &[hir::TyParamBound]) - -> Result, ErrorReported> -{ - /*! - * In a type like `Foo + Send`, we want to wait to collect the - * full set of bounds before we make the object type, because we - * need them to infer a region bound. (For example, if we tried - * made a type from just `Foo`, then it wouldn't be enough to - * infer a 'static bound, and hence the user would get an error.) - * So this function is used when we're dealing with a sum type to - * convert the LHS. It only accepts a type that refers to a trait - * name, and reports an error otherwise. - */ - - match ty.node { - hir::TyPath(None, ref path) => { - let def = match this.tcx().def_map.borrow().get(&ty.id) { - Some(&def::PathResolution { base_def, depth: 0, .. }) => Some(base_def), - _ => None - }; - match def { - Some(def::DefTrait(trait_def_id)) => { - let mut projection_bounds = Vec::new(); - let trait_ref = object_path_to_poly_trait_ref(this, - rscope, - path.span, - PathParamMode::Explicit, - trait_def_id, - path.segments.last().unwrap(), - &mut projection_bounds); - Ok((trait_ref, projection_bounds)) - } - _ => { - span_err!(this.tcx().sess, ty.span, E0172, "expected a reference to a trait"); - Err(ErrorReported) - } - } + // FIXME(#12938): This is a hack until we have full support for DST. + if Some(did) == self.tcx().lang_items.owned_box() { + assert_eq!(substs.types().count(), 1); + return self.tcx().mk_box(substs.type_at(0)); } - _ => { - let mut err = struct_span_err!(this.tcx().sess, ty.span, E0178, - "expected a path on the left-hand side of `+`, not `{}`", - pprust::ty_to_string(ty)); - let hi = bounds.iter().map(|x| match *x { - hir::TraitTyParamBound(ref tr, _) => tr.span.hi, - hir::RegionTyParamBound(ref r) => r.span.hi, - }).max_by_key(|x| x.to_usize()); - let full_span = hi.map(|hi| Span { - lo: ty.span.lo, - hi: hi, - expn_id: ty.span.expn_id, - }); - match (&ty.node, full_span) { - (&hir::TyRptr(None, ref mut_ty), Some(full_span)) => { - let mutbl_str = if mut_ty.mutbl == hir::MutMutable { "mut " } else { "" }; - err.span_suggestion(full_span, "try adding parentheses (per RFC 438):", - format!("&{}({} +{})", - mutbl_str, - pprust::ty_to_string(&*mut_ty.ty), - pprust::bounds_to_string(bounds))); - } - (&hir::TyRptr(Some(ref lt), ref mut_ty), Some(full_span)) => { - let mutbl_str = if mut_ty.mutbl == hir::MutMutable { "mut " } else { "" }; - err.span_suggestion(full_span, "try adding parentheses (per RFC 438):", - format!("&{} {}({} +{})", - pprust::lifetime_to_string(lt), - mutbl_str, - pprust::ty_to_string(&*mut_ty.ty), - pprust::bounds_to_string(bounds))); - } - _ => { - fileline_help!(&mut err, ty.span, - "perhaps you forgot parentheses? (per RFC 438)"); - } - } - err.emit(); - Err(ErrorReported) - } + decl_ty.subst(self.tcx(), substs) } -} -fn trait_ref_to_object_type<'tcx>(this: &AstConv<'tcx>, + fn ast_ty_to_object_trait_ref(&self, rscope: &RegionScope, span: Span, - trait_ref: ty::PolyTraitRef<'tcx>, - projection_bounds: Vec>, + ty: &hir::Ty, bounds: &[hir::TyParamBound]) -> Ty<'tcx> -{ - let existential_bounds = conv_existential_bounds(this, - rscope, - span, - trait_ref.clone(), - projection_bounds, - bounds); - - let result = make_object_type(this, span, trait_ref, existential_bounds); - debug!("trait_ref_to_object_type: result={:?}", - result); - - result -} - -fn make_object_type<'tcx>(this: &AstConv<'tcx>, - span: Span, - principal: ty::PolyTraitRef<'tcx>, - bounds: ty::ExistentialBounds<'tcx>) - -> Ty<'tcx> { - let tcx = this.tcx(); - let object = ty::TraitTy { - principal: principal, - bounds: bounds - }; - let object_trait_ref = - object.principal_trait_ref_with_self_ty(tcx, tcx.types.err); - - // ensure the super predicates and stop if we encountered an error - if this.ensure_super_predicates(span, principal.def_id()).is_err() { - return tcx.types.err; - } + { + /*! + * In a type like `Foo + Send`, we want to wait to collect the + * full set of bounds before we make the object type, because we + * need them to infer a region bound. (For example, if we tried + * made a type from just `Foo`, then it wouldn't be enough to + * infer a 'static bound, and hence the user would get an error.) + * So this function is used when we're dealing with a sum type to + * convert the LHS. It only accepts a type that refers to a trait + * name, and reports an error otherwise. + */ + + let tcx = self.tcx(); + match ty.node { + hir::TyPath(hir::QPath::Resolved(None, ref path)) => { + if let Def::Trait(trait_def_id) = path.def { + self.trait_path_to_object_type(rscope, + path.span, + trait_def_id, + ty.id, + path.segments.last().unwrap(), + span, + partition_bounds(bounds)) + } else { + struct_span_err!(tcx.sess, ty.span, E0172, + "expected a reference to a trait") + .span_label(ty.span, &format!("expected a trait")) + .emit(); + tcx.types.err + } + } + _ => { + let mut err = struct_span_err!(tcx.sess, ty.span, E0178, + "expected a path on the left-hand side \ + of `+`, not `{}`", + pprust::ty_to_string(ty)); + err.span_label(ty.span, &format!("expected a path")); + let hi = bounds.iter().map(|x| match *x { + hir::TraitTyParamBound(ref tr, _) => tr.span.hi, + hir::RegionTyParamBound(ref r) => r.span.hi, + }).max_by_key(|x| x.to_usize()); + let full_span = hi.map(|hi| Span { + lo: ty.span.lo, + hi: hi, + expn_id: ty.span.expn_id, + }); + match (&ty.node, full_span) { + (&hir::TyRptr(None, ref mut_ty), Some(full_span)) => { + let mutbl_str = if mut_ty.mutbl == hir::MutMutable { "mut " } else { "" }; + err.span_suggestion(full_span, "try adding parentheses (per RFC 438):", + format!("&{}({} +{})", + mutbl_str, + pprust::ty_to_string(&mut_ty.ty), + pprust::bounds_to_string(bounds))); + } + (&hir::TyRptr(Some(ref lt), ref mut_ty), Some(full_span)) => { + let mutbl_str = if mut_ty.mutbl == hir::MutMutable { "mut " } else { "" }; + err.span_suggestion(full_span, "try adding parentheses (per RFC 438):", + format!("&{} {}({} +{})", + pprust::lifetime_to_string(lt), + mutbl_str, + pprust::ty_to_string(&mut_ty.ty), + pprust::bounds_to_string(bounds))); + } - // check that there are no gross object safety violations, - // most importantly, that the supertraits don't contain Self, - // to avoid ICE-s. - let object_safety_violations = - traits::astconv_object_safety_violations(tcx, principal.def_id()); - if !object_safety_violations.is_empty() { - traits::report_object_safety_error( - tcx, span, principal.def_id(), object_safety_violations) - .emit(); - return tcx.types.err; + _ => { + help!(&mut err, + "perhaps you forgot parentheses? (per RFC 438)"); + } + } + err.emit(); + tcx.types.err + } + } } - let mut associated_types: FnvHashSet<(DefId, ast::Name)> = - traits::supertraits(tcx, object_trait_ref) - .flat_map(|tr| { - let trait_def = tcx.lookup_trait_def(tr.def_id()); - trait_def.associated_type_names - .clone() - .into_iter() - .map(move |associated_type_name| (tr.def_id(), associated_type_name)) - }) - .collect(); - - for projection_bound in &object.bounds.projection_bounds { - let pair = (projection_bound.0.projection_ty.trait_ref.def_id, - projection_bound.0.projection_ty.item_name); - associated_types.remove(&pair); + /// Transform a PolyTraitRef into a PolyExistentialTraitRef by + /// removing the dummy Self type (TRAIT_OBJECT_DUMMY_SELF). + fn trait_ref_to_existential(&self, trait_ref: ty::TraitRef<'tcx>) + -> ty::ExistentialTraitRef<'tcx> { + assert_eq!(trait_ref.self_ty().sty, TRAIT_OBJECT_DUMMY_SELF); + ty::ExistentialTraitRef::erase_self_ty(self.tcx(), trait_ref) } - for (trait_def_id, name) in associated_types { - span_err!(tcx.sess, span, E0191, - "the value of the associated type `{}` (from the trait `{}`) must be specified", - name, - tcx.item_path_str(trait_def_id)); - } + fn trait_path_to_object_type(&self, + rscope: &RegionScope, + path_span: Span, + trait_def_id: DefId, + trait_path_ref_id: ast::NodeId, + trait_segment: &hir::PathSegment, + span: Span, + partitioned_bounds: PartitionedBounds) + -> Ty<'tcx> { + let tcx = self.tcx(); + + let mut projection_bounds = vec![]; + let dummy_self = tcx.mk_ty(TRAIT_OBJECT_DUMMY_SELF); + let principal = self.ast_path_to_poly_trait_ref(rscope, + path_span, + trait_def_id, + dummy_self, + trait_path_ref_id, + trait_segment, + &mut projection_bounds); + + let PartitionedBounds { trait_bounds, + region_bounds } = + partitioned_bounds; + + let (auto_traits, trait_bounds) = split_auto_traits(tcx, trait_bounds); + + if !trait_bounds.is_empty() { + let b = &trait_bounds[0]; + let span = b.trait_ref.path.span; + struct_span_err!(self.tcx().sess, span, E0225, + "only Send/Sync traits can be used as additional traits in a trait object") + .span_label(span, &format!("non-Send/Sync additional trait")) + .emit(); + } + + // Erase the dummy_self (TRAIT_OBJECT_DUMMY_SELF) used above. + let existential_principal = principal.map_bound(|trait_ref| { + self.trait_ref_to_existential(trait_ref) + }); + let existential_projections = projection_bounds.iter().map(|bound| { + bound.map_bound(|b| { + let p = b.projection_ty; + ty::ExistentialProjection { + trait_ref: self.trait_ref_to_existential(p.trait_ref), + item_name: p.item_name, + ty: b.ty + } + }) + }); - tcx.mk_trait(object.principal, object.bounds) -} + // ensure the super predicates and stop if we encountered an error + if self.ensure_super_predicates(span, principal.def_id()).is_err() { + return tcx.types.err; + } -fn report_ambiguous_associated_type(tcx: &ty::ctxt, - span: Span, - type_str: &str, - trait_str: &str, - name: &str) { - span_err!(tcx.sess, span, E0223, - "ambiguous associated type; specify the type using the syntax \ - `<{} as {}>::{}`", - type_str, trait_str, name); -} + // check that there are no gross object safety violations, + // most importantly, that the supertraits don't contain Self, + // to avoid ICE-s. + let object_safety_violations = + tcx.astconv_object_safety_violations(principal.def_id()); + if !object_safety_violations.is_empty() { + tcx.report_object_safety_error( + span, principal.def_id(), object_safety_violations) + .emit(); + return tcx.types.err; + } -// Search for a bound on a type parameter which includes the associated item -// given by assoc_name. ty_param_node_id is the node id for the type parameter -// (which might be `Self`, but only if it is the `Self` of a trait, not an -// impl). This function will fail if there are no suitable bounds or there is -// any ambiguity. -fn find_bound_for_assoc_item<'tcx>(this: &AstConv<'tcx>, - ty_param_node_id: ast::NodeId, - ty_param_name: ast::Name, - assoc_name: ast::Name, - span: Span) - -> Result, ErrorReported> -{ - let tcx = this.tcx(); + let mut associated_types = FxHashSet::default(); + for tr in traits::supertraits(tcx, principal) { + associated_types.extend(tcx.associated_items(tr.def_id()) + .filter(|item| item.kind == ty::AssociatedKind::Type) + .map(|item| (tr.def_id(), item.name))); + } + + for projection_bound in &projection_bounds { + let pair = (projection_bound.0.projection_ty.trait_ref.def_id, + projection_bound.0.projection_ty.item_name); + associated_types.remove(&pair); + } + + for (trait_def_id, name) in associated_types { + struct_span_err!(tcx.sess, span, E0191, + "the value of the associated type `{}` (from the trait `{}`) must be specified", + name, + tcx.item_path_str(trait_def_id)) + .span_label(span, &format!( + "missing associated type `{}` value", name)) + .emit(); + } + + let mut v = + iter::once(ty::ExistentialPredicate::Trait(*existential_principal.skip_binder())) + .chain(auto_traits.into_iter().map(ty::ExistentialPredicate::AutoTrait)) + .chain(existential_projections + .map(|x| ty::ExistentialPredicate::Projection(*x.skip_binder()))) + .collect::>(); + v.sort_by(|a, b| a.cmp(tcx, b)); + let existential_predicates = ty::Binder(tcx.mk_existential_predicates(v.into_iter())); + + let region_bound = self.compute_object_lifetime_bound(span, + ®ion_bounds, + existential_predicates); + + let region_bound = match region_bound { + Some(r) => r, + None => { + tcx.mk_region(match rscope.object_lifetime_default(span) { + Some(r) => r, + None => { + span_err!(self.tcx().sess, span, E0228, + "the lifetime bound for this object type cannot be deduced \ + from context; please supply an explicit bound"); + ty::ReStatic + } + }) + } + }; - let bounds = match this.get_type_parameter_bounds(span, ty_param_node_id) { - Ok(v) => v, - Err(ErrorReported) => { - return Err(ErrorReported); - } - }; + debug!("region_bound: {:?}", region_bound); - // Ensure the super predicates and stop if we encountered an error. - if bounds.iter().any(|b| this.ensure_super_predicates(span, b.def_id()).is_err()) { - return Err(ErrorReported); + let ty = tcx.mk_dynamic(existential_predicates, region_bound); + debug!("trait_object_type: {:?}", ty); + ty } - // Check that there is exactly one way to find an associated type with the - // correct name. - let suitable_bounds: Vec<_> = - traits::transitive_bounds(tcx, &bounds) - .filter(|b| this.trait_defines_associated_type_named(b.def_id(), assoc_name)) - .collect(); - - one_bound_for_assoc_type(tcx, - suitable_bounds, - &ty_param_name.as_str(), - &assoc_name.as_str(), - span) -} - + fn report_ambiguous_associated_type(&self, + span: Span, + type_str: &str, + trait_str: &str, + name: &str) { + struct_span_err!(self.tcx().sess, span, E0223, "ambiguous associated type") + .span_label(span, &format!("ambiguous associated type")) + .note(&format!("specify the type using the syntax `<{} as {}>::{}`", + type_str, trait_str, name)) + .emit(); -// Checks that bounds contains exactly one element and reports appropriate -// errors otherwise. -fn one_bound_for_assoc_type<'tcx>(tcx: &ty::ctxt<'tcx>, - bounds: Vec>, - ty_param_name: &str, - assoc_name: &str, - span: Span) - -> Result, ErrorReported> -{ - if bounds.is_empty() { - span_err!(tcx.sess, span, E0220, - "associated type `{}` not found for `{}`", - assoc_name, - ty_param_name); - return Err(ErrorReported); } - if bounds.len() > 1 { - let mut err = struct_span_err!(tcx.sess, span, E0221, - "ambiguous associated type `{}` in bounds of `{}`", - assoc_name, - ty_param_name); - - for bound in &bounds { - span_note!(&mut err, span, - "associated type `{}` could derive from `{}`", - ty_param_name, - bound); + // Search for a bound on a type parameter which includes the associated item + // given by assoc_name. ty_param_node_id is the node id for the type parameter + // (which might be `Self`, but only if it is the `Self` of a trait, not an + // impl). This function will fail if there are no suitable bounds or there is + // any ambiguity. + fn find_bound_for_assoc_item(&self, + ty_param_node_id: ast::NodeId, + ty_param_name: ast::Name, + assoc_name: ast::Name, + span: Span) + -> Result, ErrorReported> + { + let tcx = self.tcx(); + + let bounds = match self.get_type_parameter_bounds(span, ty_param_node_id) { + Ok(v) => v, + Err(ErrorReported) => { + return Err(ErrorReported); + } + }; + + // Ensure the super predicates and stop if we encountered an error. + if bounds.iter().any(|b| self.ensure_super_predicates(span, b.def_id()).is_err()) { + return Err(ErrorReported); } - err.emit(); - } - Ok(bounds[0].clone()) -} + // Check that there is exactly one way to find an associated type with the + // correct name. + let suitable_bounds = + traits::transitive_bounds(tcx, &bounds) + .filter(|b| self.trait_defines_associated_type_named(b.def_id(), assoc_name)); + + self.one_bound_for_assoc_type(suitable_bounds, + &ty_param_name.as_str(), + &assoc_name.as_str(), + span) + } -// Create a type from a path to an associated type. -// For a path A::B::C::D, ty and ty_path_def are the type and def for A::B::C -// and item_segment is the path segment for D. We return a type and a def for -// the whole path. -// Will fail except for T::A and Self::A; i.e., if ty/ty_path_def are not a type -// parameter or Self. -fn associated_path_def_to_ty<'tcx>(this: &AstConv<'tcx>, - span: Span, - ty: Ty<'tcx>, - ty_path_def: def::Def, - item_segment: &hir::PathSegment) - -> (Ty<'tcx>, def::Def) -{ - let tcx = this.tcx(); - let assoc_name = item_segment.identifier.name; - - debug!("associated_path_def_to_ty: {:?}::{}", ty, assoc_name); - - prohibit_type_params(tcx, slice::ref_slice(item_segment)); - - // Find the type of the associated item, and the trait where the associated - // item is declared. - let bound = match (&ty.sty, ty_path_def) { - (_, def::DefSelfTy(Some(trait_did), Some((impl_id, _)))) => { - // `Self` in an impl of a trait - we have a concrete self type and a - // trait reference. - let trait_ref = tcx.impl_trait_ref(tcx.map.local_def_id(impl_id)).unwrap(); - let trait_ref = if let Some(free_substs) = this.get_free_substs() { - trait_ref.subst(tcx, free_substs) - } else { - trait_ref - }; - if this.ensure_super_predicates(span, trait_did).is_err() { - return (tcx.types.err, ty_path_def); + // Checks that bounds contains exactly one element and reports appropriate + // errors otherwise. + fn one_bound_for_assoc_type(&self, + mut bounds: I, + ty_param_name: &str, + assoc_name: &str, + span: Span) + -> Result, ErrorReported> + where I: Iterator> + { + let bound = match bounds.next() { + Some(bound) => bound, + None => { + struct_span_err!(self.tcx().sess, span, E0220, + "associated type `{}` not found for `{}`", + assoc_name, + ty_param_name) + .span_label(span, &format!("associated type `{}` not found", assoc_name)) + .emit(); + return Err(ErrorReported); } + }; - let candidates: Vec = - traits::supertraits(tcx, ty::Binder(trait_ref)) - .filter(|r| this.trait_defines_associated_type_named(r.def_id(), - assoc_name)) - .collect(); + if let Some(bound2) = bounds.next() { + let bounds = iter::once(bound).chain(iter::once(bound2)).chain(bounds); + let mut err = struct_span_err!( + self.tcx().sess, span, E0221, + "ambiguous associated type `{}` in bounds of `{}`", + assoc_name, + ty_param_name); + err.span_label(span, &format!("ambiguous associated type `{}`", assoc_name)); + + for bound in bounds { + let bound_span = self.tcx().associated_items(bound.def_id()).find(|item| { + item.kind == ty::AssociatedKind::Type && item.name == assoc_name + }) + .and_then(|item| self.tcx().map.span_if_local(item.def_id)); - match one_bound_for_assoc_type(tcx, - candidates, - "Self", - &assoc_name.as_str(), - span) { - Ok(bound) => bound, - Err(ErrorReported) => return (tcx.types.err, ty_path_def), - } - } - (&ty::TyParam(_), def::DefSelfTy(Some(trait_did), None)) => { - let trait_node_id = tcx.map.as_local_node_id(trait_did).unwrap(); - match find_bound_for_assoc_item(this, - trait_node_id, - token::special_idents::type_self.name, - assoc_name, - span) { - Ok(bound) => bound, - Err(ErrorReported) => return (tcx.types.err, ty_path_def), - } - } - (&ty::TyParam(_), def::DefTyParam(_, _, param_did, param_name)) => { - let param_node_id = tcx.map.as_local_node_id(param_did).unwrap(); - match find_bound_for_assoc_item(this, - param_node_id, - param_name, - assoc_name, - span) { - Ok(bound) => bound, - Err(ErrorReported) => return (tcx.types.err, ty_path_def), + if let Some(span) = bound_span { + err.span_label(span, &format!("ambiguous `{}` from `{}`", + assoc_name, + bound)); + } else { + span_note!(&mut err, span, + "associated type `{}` could derive from `{}`", + ty_param_name, + bound); + } } + err.emit(); } - _ => { - report_ambiguous_associated_type(tcx, - span, - &ty.to_string(), - "Trait", - &assoc_name.as_str()); - return (tcx.types.err, ty_path_def); - } - }; - let trait_did = bound.0.def_id; - let ty = this.projected_ty_from_poly_trait_ref(span, bound, assoc_name); + return Ok(bound); + } - let item_did = if let Some(trait_id) = tcx.map.as_local_node_id(trait_did) { - // `ty::trait_items` used below requires information generated - // by type collection, which may be in progress at this point. - match tcx.map.expect_item(trait_id).node { - hir::ItemTrait(_, _, _, ref trait_items) => { - let item = trait_items.iter() - .find(|i| i.name == assoc_name) - .expect("missing associated type"); - tcx.map.local_def_id(item.id) - } - _ => unreachable!() - } - } else { - let trait_items = tcx.trait_items(trait_did); - let item = trait_items.iter().find(|i| i.name() == assoc_name); - item.expect("missing associated type").def_id() - }; + // Create a type from a path to an associated type. + // For a path A::B::C::D, ty and ty_path_def are the type and def for A::B::C + // and item_segment is the path segment for D. We return a type and a def for + // the whole path. + // Will fail except for T::A and Self::A; i.e., if ty/ty_path_def are not a type + // parameter or Self. + pub fn associated_path_def_to_ty(&self, + ref_id: ast::NodeId, + span: Span, + ty: Ty<'tcx>, + ty_path_def: Def, + item_segment: &hir::PathSegment) + -> (Ty<'tcx>, Def) + { + let tcx = self.tcx(); + let assoc_name = item_segment.name; + + debug!("associated_path_def_to_ty: {:?}::{}", ty, assoc_name); + + tcx.prohibit_type_params(slice::ref_slice(item_segment)); + + // Find the type of the associated item, and the trait where the associated + // item is declared. + let bound = match (&ty.sty, ty_path_def) { + (_, Def::SelfTy(Some(_), Some(impl_def_id))) => { + // `Self` in an impl of a trait - we have a concrete self type and a + // trait reference. + let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap(); + let trait_ref = if let Some(free_substs) = self.get_free_substs() { + trait_ref.subst(tcx, free_substs) + } else { + trait_ref + }; - (ty, def::DefAssociatedTy(trait_did, item_did)) -} + if self.ensure_super_predicates(span, trait_ref.def_id).is_err() { + return (tcx.types.err, Def::Err); + } -fn qpath_to_ty<'tcx>(this: &AstConv<'tcx>, - rscope: &RegionScope, - span: Span, - param_mode: PathParamMode, - opt_self_ty: Option>, - trait_def_id: DefId, - trait_segment: &hir::PathSegment, - item_segment: &hir::PathSegment) - -> Ty<'tcx> -{ - let tcx = this.tcx(); + let candidates = + traits::supertraits(tcx, ty::Binder(trait_ref)) + .filter(|r| self.trait_defines_associated_type_named(r.def_id(), + assoc_name)); + + match self.one_bound_for_assoc_type(candidates, + "Self", + &assoc_name.as_str(), + span) { + Ok(bound) => bound, + Err(ErrorReported) => return (tcx.types.err, Def::Err), + } + } + (&ty::TyParam(_), Def::SelfTy(Some(trait_did), None)) => { + let trait_node_id = tcx.map.as_local_node_id(trait_did).unwrap(); + match self.find_bound_for_assoc_item(trait_node_id, + keywords::SelfType.name(), + assoc_name, + span) { + Ok(bound) => bound, + Err(ErrorReported) => return (tcx.types.err, Def::Err), + } + } + (&ty::TyParam(_), Def::TyParam(param_did)) => { + let param_node_id = tcx.map.as_local_node_id(param_did).unwrap(); + let param_name = tcx.type_parameter_def(param_node_id).name; + match self.find_bound_for_assoc_item(param_node_id, + param_name, + assoc_name, + span) { + Ok(bound) => bound, + Err(ErrorReported) => return (tcx.types.err, Def::Err), + } + } + _ => { + // Don't print TyErr to the user. + if !ty.references_error() { + self.report_ambiguous_associated_type(span, + &ty.to_string(), + "Trait", + &assoc_name.as_str()); + } + return (tcx.types.err, Def::Err); + } + }; - prohibit_type_params(tcx, slice::ref_slice(item_segment)); + let trait_did = bound.0.def_id; + let ty = self.projected_ty_from_poly_trait_ref(span, bound, assoc_name); - let self_ty = if let Some(ty) = opt_self_ty { - ty - } else { - let path_str = tcx.item_path_str(trait_def_id); - report_ambiguous_associated_type(tcx, - span, - "Type", - &path_str, - &item_segment.identifier.name.as_str()); - return tcx.types.err; - }; + let item = tcx.associated_items(trait_did).find(|i| i.name == assoc_name); + let def_id = item.expect("missing associated type").def_id; + tcx.check_stability(def_id, ref_id, span); + (ty, Def::AssociatedTy(def_id)) + } - debug!("qpath_to_ty: self_type={:?}", self_ty); + fn qpath_to_ty(&self, + rscope: &RegionScope, + span: Span, + opt_self_ty: Option>, + trait_def_id: DefId, + trait_segment: &hir::PathSegment, + item_segment: &hir::PathSegment) + -> Ty<'tcx> + { + let tcx = self.tcx(); - let trait_ref = ast_path_to_mono_trait_ref(this, - rscope, - span, - param_mode, - trait_def_id, - Some(self_ty), - trait_segment); + tcx.prohibit_type_params(slice::ref_slice(item_segment)); - debug!("qpath_to_ty: trait_ref={:?}", trait_ref); + let self_ty = if let Some(ty) = opt_self_ty { + ty + } else { + let path_str = tcx.item_path_str(trait_def_id); + self.report_ambiguous_associated_type(span, + "Type", + &path_str, + &item_segment.name.as_str()); + return tcx.types.err; + }; - this.projected_ty(span, trait_ref, item_segment.identifier.name) -} + debug!("qpath_to_ty: self_type={:?}", self_ty); -/// Convert a type supplied as value for a type argument from AST into our -/// our internal representation. This is the same as `ast_ty_to_ty` but that -/// it applies the object lifetime default. -/// -/// # Parameters -/// -/// * `this`, `rscope`: the surrounding context -/// * `decl_generics`: the generics of the struct/enum/trait declaration being -/// referenced -/// * `index`: the index of the type parameter being instantiated from the list -/// (we assume it is in the `TypeSpace`) -/// * `region_substs`: a partial substitution consisting of -/// only the region type parameters being supplied to this type. -/// * `ast_ty`: the ast representation of the type being supplied -pub fn ast_ty_arg_to_ty<'tcx>(this: &AstConv<'tcx>, - rscope: &RegionScope, - decl_generics: &ty::Generics<'tcx>, - index: usize, - region_substs: &Substs<'tcx>, - ast_ty: &hir::Ty) - -> Ty<'tcx> -{ - let tcx = this.tcx(); + let trait_ref = self.ast_path_to_mono_trait_ref(rscope, + span, + trait_def_id, + self_ty, + trait_segment); - if let Some(def) = decl_generics.types.opt_get(TypeSpace, index) { - let object_lifetime_default = def.object_lifetime_default.subst(tcx, region_substs); - let rscope1 = &ObjectLifetimeDefaultRscope::new(rscope, object_lifetime_default); - ast_ty_to_ty(this, rscope1, ast_ty) - } else { - ast_ty_to_ty(this, rscope, ast_ty) + debug!("qpath_to_ty: trait_ref={:?}", trait_ref); + + self.projected_ty(span, trait_ref, item_segment.name) } -} -// Check the base def in a PathResolution and convert it to a Ty. If there are -// associated types in the PathResolution, these will need to be separately -// resolved. -fn base_def_to_ty<'tcx>(this: &AstConv<'tcx>, + /// Convert a type supplied as value for a type argument from AST into our + /// our internal representation. This is the same as `ast_ty_to_ty` but that + /// it applies the object lifetime default. + /// + /// # Parameters + /// + /// * `this`, `rscope`: the surrounding context + /// * `def`: the type parameter being instantiated (if available) + /// * `region_substs`: a partial substitution consisting of + /// only the region type parameters being supplied to this type. + /// * `ast_ty`: the ast representation of the type being supplied + fn ast_ty_arg_to_ty(&self, rscope: &RegionScope, - span: Span, - param_mode: PathParamMode, - def: &def::Def, - opt_self_ty: Option>, - base_segments: &[hir::PathSegment]) - -> Ty<'tcx> { - let tcx = this.tcx(); - - match *def { - def::DefTrait(trait_def_id) => { - // N.B. this case overlaps somewhat with - // TyObjectSum, see that fn for details - let mut projection_bounds = Vec::new(); - - let trait_ref = object_path_to_poly_trait_ref(this, - rscope, - span, - param_mode, - trait_def_id, - base_segments.last().unwrap(), - &mut projection_bounds); - - prohibit_type_params(tcx, base_segments.split_last().unwrap().1); - trait_ref_to_object_type(this, - rscope, - span, - trait_ref, - projection_bounds, - &[]) - } - def::DefTy(did, _) | def::DefStruct(did) => { - prohibit_type_params(tcx, base_segments.split_last().unwrap().1); - ast_path_to_ty(this, - rscope, - span, - param_mode, - did, - base_segments.last().unwrap()) - } - def::DefTyParam(space, index, _, name) => { - prohibit_type_params(tcx, base_segments); - tcx.mk_param(space, index, name) + def: Option<&ty::TypeParameterDef<'tcx>>, + region_substs: &[Kind<'tcx>], + ast_ty: &hir::Ty) + -> Ty<'tcx> + { + let tcx = self.tcx(); + + if let Some(def) = def { + let object_lifetime_default = def.object_lifetime_default.subst(tcx, region_substs); + let rscope1 = &ObjectLifetimeDefaultRscope::new(rscope, object_lifetime_default); + self.ast_ty_to_ty(rscope1, ast_ty) + } else { + self.ast_ty_to_ty(rscope, ast_ty) } - def::DefSelfTy(_, Some((_, self_ty_id))) => { - // Self in impl (we know the concrete type). - prohibit_type_params(tcx, base_segments); - if let Some(&ty) = tcx.ast_ty_to_ty_cache.borrow().get(&self_ty_id) { - if let Some(free_substs) = this.get_free_substs() { + } + + // Check a type Path and convert it to a Ty. + pub fn def_to_ty(&self, + rscope: &RegionScope, + opt_self_ty: Option>, + path: &hir::Path, + path_id: ast::NodeId, + permit_variants: bool) + -> Ty<'tcx> { + let tcx = self.tcx(); + + debug!("base_def_to_ty(def={:?}, opt_self_ty={:?}, path_segments={:?})", + path.def, opt_self_ty, path.segments); + + let span = path.span; + match path.def { + Def::Trait(trait_def_id) => { + // N.B. this case overlaps somewhat with + // TyObjectSum, see that fn for details + + assert_eq!(opt_self_ty, None); + tcx.prohibit_type_params(path.segments.split_last().unwrap().1); + + self.trait_path_to_object_type(rscope, + span, + trait_def_id, + path_id, + path.segments.last().unwrap(), + span, + partition_bounds(&[])) + } + Def::Enum(did) | Def::TyAlias(did) | Def::Struct(did) | Def::Union(did) => { + assert_eq!(opt_self_ty, None); + tcx.prohibit_type_params(path.segments.split_last().unwrap().1); + self.ast_path_to_ty(rscope, span, did, path.segments.last().unwrap()) + } + Def::Variant(did) if permit_variants => { + // Convert "variant type" as if it were a real type. + // The resulting `Ty` is type of the variant's enum for now. + assert_eq!(opt_self_ty, None); + tcx.prohibit_type_params(path.segments.split_last().unwrap().1); + self.ast_path_to_ty(rscope, + span, + tcx.parent_def_id(did).unwrap(), + path.segments.last().unwrap()) + } + Def::TyParam(did) => { + assert_eq!(opt_self_ty, None); + tcx.prohibit_type_params(&path.segments); + + let node_id = tcx.map.as_local_node_id(did).unwrap(); + let param = tcx.ty_param_defs.borrow().get(&node_id) + .map(ty::ParamTy::for_def); + if let Some(p) = param { + p.to_ty(tcx) + } else { + // Only while computing defaults of earlier type + // parameters can a type parameter be missing its def. + struct_span_err!(tcx.sess, span, E0128, + "type parameters with a default cannot use \ + forward declared identifiers") + .span_label(span, &format!("defaulted type parameters \ + cannot be forward declared")) + .emit(); + tcx.types.err + } + } + Def::SelfTy(_, Some(def_id)) => { + // Self in impl (we know the concrete type). + + assert_eq!(opt_self_ty, None); + tcx.prohibit_type_params(&path.segments); + let ty = tcx.item_type(def_id); + if let Some(free_substs) = self.get_free_substs() { ty.subst(tcx, free_substs) } else { ty } - } else { - tcx.sess.span_bug(span, "self type has not been fully resolved") + } + Def::SelfTy(Some(_), None) => { + // Self in trait. + assert_eq!(opt_self_ty, None); + tcx.prohibit_type_params(&path.segments); + tcx.mk_self_type() + } + Def::AssociatedTy(def_id) => { + tcx.prohibit_type_params(&path.segments[..path.segments.len()-2]); + let trait_did = tcx.parent_def_id(def_id).unwrap(); + self.qpath_to_ty(rscope, + span, + opt_self_ty, + trait_did, + &path.segments[path.segments.len()-2], + path.segments.last().unwrap()) + } + Def::PrimTy(prim_ty) => { + assert_eq!(opt_self_ty, None); + tcx.prim_ty_to_ty(&path.segments, prim_ty) + } + Def::Err => { + self.set_tainted_by_errors(); + return self.tcx().types.err; + } + _ => { + struct_span_err!(tcx.sess, span, E0248, + "found value `{}` used as a type", + tcx.item_path_str(path.def.def_id())) + .span_label(span, &format!("value used as a type")) + .emit(); + return self.tcx().types.err; } } - def::DefSelfTy(Some(_), None) => { - // Self in trait. - prohibit_type_params(tcx, base_segments); - tcx.mk_self_type() - } - def::DefAssociatedTy(trait_did, _) => { - prohibit_type_params(tcx, &base_segments[..base_segments.len()-2]); - qpath_to_ty(this, - rscope, - span, - param_mode, - opt_self_ty, - trait_did, - &base_segments[base_segments.len()-2], - base_segments.last().unwrap()) - } - def::DefMod(id) => { - // Used as sentinel by callers to indicate the `::A::B::C` form. - // FIXME(#22519) This part of the resolution logic should be - // avoided entirely for that form, once we stop needed a Def - // for `associated_path_def_to_ty`. - // Fixing this will also let use resolve ::Foo the same way we - // resolve Self::Foo, at the moment we can't resolve the former because - // we don't have the trait information around, which is just sad. - - if !base_segments.is_empty() { - let id_node = tcx.map.as_local_node_id(id).unwrap(); - span_err!(tcx.sess, - span, - E0247, - "found module name used as a type: {}", - tcx.map.node_to_user_string(id_node)); - return this.tcx().types.err; - } - - opt_self_ty.expect("missing T in ::a::b::c") - } - def::DefPrimTy(prim_ty) => { - prim_ty_to_ty(tcx, base_segments, prim_ty) - } - def::DefErr => { - return this.tcx().types.err; - } - _ => { - let id_node = tcx.map.as_local_node_id(def.def_id()).unwrap(); - span_err!(tcx.sess, span, E0248, - "found value `{}` used as a type", - tcx.map.path_to_string(id_node)); - return this.tcx().types.err; - } - } -} - -// Note that both base_segments and assoc_segments may be empty, although not at -// the same time. -pub fn finish_resolving_def_to_ty<'tcx>(this: &AstConv<'tcx>, - rscope: &RegionScope, - span: Span, - param_mode: PathParamMode, - def: &def::Def, - opt_self_ty: Option>, - base_segments: &[hir::PathSegment], - assoc_segments: &[hir::PathSegment]) - -> Ty<'tcx> { - let mut ty = base_def_to_ty(this, - rscope, - span, - param_mode, - def, - opt_self_ty, - base_segments); - let mut def = *def; - // If any associated type segments remain, attempt to resolve them. - for segment in assoc_segments { - if ty.sty == ty::TyError { - break; - } - // This is pretty bad (it will fail except for T::A and Self::A). - let (a_ty, a_def) = associated_path_def_to_ty(this, - span, - ty, - def, - segment); - ty = a_ty; - def = a_def; } - ty -} -/// Parses the programmer's textual representation of a type into our -/// internal notion of a type. -pub fn ast_ty_to_ty<'tcx>(this: &AstConv<'tcx>, - rscope: &RegionScope, - ast_ty: &hir::Ty) - -> Ty<'tcx> -{ - debug!("ast_ty_to_ty(id={:?}, ast_ty={:?})", - ast_ty.id, ast_ty); + /// Parses the programmer's textual representation of a type into our + /// internal notion of a type. + pub fn ast_ty_to_ty(&self, rscope: &RegionScope, ast_ty: &hir::Ty) -> Ty<'tcx> { + debug!("ast_ty_to_ty(id={:?}, ast_ty={:?})", + ast_ty.id, ast_ty); - let tcx = this.tcx(); + let tcx = self.tcx(); - if let Some(&ty) = tcx.ast_ty_to_ty_cache.borrow().get(&ast_ty.id) { - debug!("ast_ty_to_ty: id={:?} ty={:?} (cached)", ast_ty.id, ty); - return ty; - } - - let typ = match ast_ty.node { - hir::TyVec(ref ty) => { - tcx.mk_slice(ast_ty_to_ty(this, rscope, &**ty)) + let cache = self.ast_ty_to_ty_cache(); + if let Some(ty) = cache.borrow().get(&ast_ty.id) { + return ty; } - hir::TyObjectSum(ref ty, ref bounds) => { - match ast_ty_to_trait_ref(this, rscope, &**ty, bounds) { - Ok((trait_ref, projection_bounds)) => { - trait_ref_to_object_type(this, - rscope, - ast_ty.span, - trait_ref, - projection_bounds, - bounds) - } - Err(ErrorReported) => { - this.tcx().types.err + + let result_ty = match ast_ty.node { + hir::TySlice(ref ty) => { + tcx.mk_slice(self.ast_ty_to_ty(rscope, &ty)) + } + hir::TyObjectSum(ref ty, ref bounds) => { + self.ast_ty_to_object_trait_ref(rscope, ast_ty.span, ty, bounds) + } + hir::TyPtr(ref mt) => { + tcx.mk_ptr(ty::TypeAndMut { + ty: self.ast_ty_to_ty(rscope, &mt.ty), + mutbl: mt.mutbl + }) + } + hir::TyRptr(ref region, ref mt) => { + let r = self.opt_ast_region_to_region(rscope, ast_ty.span, region); + debug!("TyRef r={:?}", r); + let rscope1 = + &ObjectLifetimeDefaultRscope::new( + rscope, + ty::ObjectLifetimeDefault::Specific(r)); + let t = self.ast_ty_to_ty(rscope1, &mt.ty); + tcx.mk_ref(r, ty::TypeAndMut {ty: t, mutbl: mt.mutbl}) + } + hir::TyNever => { + tcx.types.never + }, + hir::TyTup(ref fields) => { + tcx.mk_tup(fields.iter().map(|t| self.ast_ty_to_ty(rscope, &t))) + } + hir::TyBareFn(ref bf) => { + require_c_abi_if_variadic(tcx, &bf.decl, bf.abi, ast_ty.span); + let anon_scope = rscope.anon_type_scope(); + let bare_fn_ty = self.ty_of_method_or_bare_fn(bf.unsafety, + bf.abi, + None, + &bf.decl, + anon_scope, + anon_scope); + + // Find any late-bound regions declared in return type that do + // not appear in the arguments. These are not wellformed. + // + // Example: + // + // for<'a> fn() -> &'a str <-- 'a is bad + // for<'a> fn(&'a String) -> &'a str <-- 'a is ok + // + // Note that we do this check **here** and not in + // `ty_of_bare_fn` because the latter is also used to make + // the types for fn items, and we do not want to issue a + // warning then. (Once we fix #32330, the regions we are + // checking for here would be considered early bound + // anyway.) + let inputs = bare_fn_ty.sig.inputs(); + let late_bound_in_args = tcx.collect_constrained_late_bound_regions(&inputs); + let output = bare_fn_ty.sig.output(); + let late_bound_in_ret = tcx.collect_referenced_late_bound_regions(&output); + for br in late_bound_in_ret.difference(&late_bound_in_args) { + let br_name = match *br { + ty::BrNamed(_, name, _) => name, + _ => { + span_bug!( + bf.decl.output.span(), + "anonymous bound region {:?} in return but not args", + br); + } + }; + tcx.sess.add_lint( + lint::builtin::HR_LIFETIME_IN_ASSOC_TYPE, + ast_ty.id, + ast_ty.span, + format!("return type references lifetime `{}`, \ + which does not appear in the trait input types", + br_name)); } + tcx.mk_fn_ptr(bare_fn_ty) } - } - hir::TyPtr(ref mt) => { - tcx.mk_ptr(ty::TypeAndMut { - ty: ast_ty_to_ty(this, rscope, &*mt.ty), - mutbl: mt.mutbl - }) - } - hir::TyRptr(ref region, ref mt) => { - let r = opt_ast_region_to_region(this, rscope, ast_ty.span, region); - debug!("TyRef r={:?}", r); - let rscope1 = - &ObjectLifetimeDefaultRscope::new( - rscope, - ty::ObjectLifetimeDefault::Specific(r)); - let t = ast_ty_to_ty(this, rscope1, &*mt.ty); - tcx.mk_ref(tcx.mk_region(r), ty::TypeAndMut {ty: t, mutbl: mt.mutbl}) - } - hir::TyTup(ref fields) => { - let flds = fields.iter() - .map(|t| ast_ty_to_ty(this, rscope, &**t)) - .collect(); - tcx.mk_tup(flds) - } - hir::TyBareFn(ref bf) => { - require_c_abi_if_variadic(tcx, &bf.decl, bf.abi, ast_ty.span); - let bare_fn = ty_of_bare_fn(this, bf.unsafety, bf.abi, &*bf.decl); - tcx.mk_fn(None, tcx.mk_bare_fn(bare_fn)) - } - hir::TyPolyTraitRef(ref bounds) => { - conv_ty_poly_trait_ref(this, rscope, ast_ty.span, bounds) - } - hir::TyPath(ref maybe_qself, ref path) => { - let path_res = if let Some(&d) = tcx.def_map.borrow().get(&ast_ty.id) { - d - } else if let Some(hir::QSelf { position: 0, .. }) = *maybe_qself { - // Create some fake resolution that can't possibly be a type. - def::PathResolution { - base_def: def::DefMod(tcx.map.local_def_id(ast::CRATE_NODE_ID)), - last_private: LastMod(AllPublic), - depth: path.segments.len() + hir::TyPolyTraitRef(ref bounds) => { + self.conv_object_ty_poly_trait_ref(rscope, ast_ty.span, bounds) + } + hir::TyImplTrait(ref bounds) => { + use collect::{compute_bounds, SizedByDefault}; + + // Create the anonymized type. + let def_id = tcx.map.local_def_id(ast_ty.id); + if let Some(anon_scope) = rscope.anon_type_scope() { + let substs = anon_scope.fresh_substs(self, ast_ty.span); + let ty = tcx.mk_anon(tcx.map.local_def_id(ast_ty.id), substs); + + // Collect the bounds, i.e. the `A+B+'c` in `impl A+B+'c`. + let bounds = compute_bounds(self, ty, bounds, + SizedByDefault::Yes, + Some(anon_scope), + ast_ty.span); + let predicates = bounds.predicates(tcx, ty); + let predicates = tcx.lift_to_global(&predicates).unwrap(); + tcx.predicates.borrow_mut().insert(def_id, ty::GenericPredicates { + parent: None, + predicates: predicates + }); + + ty + } else { + span_err!(tcx.sess, ast_ty.span, E0562, + "`impl Trait` not allowed outside of function \ + and inherent method return types"); + tcx.types.err } - } else { - tcx.sess.span_bug(ast_ty.span, &format!("unbound path {:?}", ast_ty)) - }; - let def = path_res.base_def; - let base_ty_end = path.segments.len() - path_res.depth; - let opt_self_ty = maybe_qself.as_ref().map(|qself| { - ast_ty_to_ty(this, rscope, &qself.ty) - }); - let ty = finish_resolving_def_to_ty(this, - rscope, - ast_ty.span, - PathParamMode::Explicit, - &def, - opt_self_ty, - &path.segments[..base_ty_end], - &path.segments[base_ty_end..]); - - if path_res.depth != 0 && ty.sty != ty::TyError { - // Write back the new resolution. - tcx.def_map.borrow_mut().insert(ast_ty.id, def::PathResolution { - base_def: def, - last_private: path_res.last_private, - depth: 0 + } + hir::TyPath(hir::QPath::Resolved(ref maybe_qself, ref path)) => { + debug!("ast_ty_to_ty: maybe_qself={:?} path={:?}", maybe_qself, path); + let opt_self_ty = maybe_qself.as_ref().map(|qself| { + self.ast_ty_to_ty(rscope, qself) }); + self.def_to_ty(rscope, opt_self_ty, path, ast_ty.id, false) } + hir::TyPath(hir::QPath::TypeRelative(ref qself, ref segment)) => { + debug!("ast_ty_to_ty: qself={:?} segment={:?}", qself, segment); + let ty = self.ast_ty_to_ty(rscope, qself); - ty - } - hir::TyFixedLengthVec(ref ty, ref e) => { - let hint = UncheckedExprHint(tcx.types.usize); - match const_eval::eval_const_expr_partial(tcx, &e, hint, None) { - Ok(r) => { - match r { - ConstVal::Int(i) => - tcx.mk_array(ast_ty_to_ty(this, rscope, &**ty), - i as usize), - ConstVal::Uint(i) => - tcx.mk_array(ast_ty_to_ty(this, rscope, &**ty), - i as usize), - _ => { - span_err!(tcx.sess, ast_ty.span, E0249, - "expected constant integer expression \ - for array length"); - this.tcx().types.err - } - } - } - Err(ref r) => { - let mut err = struct_span_err!(tcx.sess, r.span, E0250, - "array length constant evaluation error: {}", - r.description()); - if !ast_ty.span.contains(r.span) { - span_note!(&mut err, ast_ty.span, "for array length here") - } - err.emit(); - this.tcx().types.err + let def = if let hir::TyPath(hir::QPath::Resolved(_, ref path)) = qself.node { + path.def + } else { + Def::Err + }; + self.associated_path_def_to_ty(ast_ty.id, ast_ty.span, ty, def, segment).0 + } + hir::TyArray(ref ty, ref e) => { + if let Ok(length) = eval_length(tcx.global_tcx(), &e, "array length") { + tcx.mk_array(self.ast_ty_to_ty(rscope, &ty), length) + } else { + self.tcx().types.err } } - } - hir::TyTypeof(ref _e) => { - span_err!(tcx.sess, ast_ty.span, E0516, - "`typeof` is a reserved keyword but unimplemented"); - tcx.types.err - } - hir::TyInfer => { - // TyInfer also appears as the type of arguments or return - // values in a ExprClosure, or as - // the type of local variables. Both of these cases are - // handled specially and will not descend into this routine. - this.ty_infer(None, None, None, ast_ty.span) - } - }; + hir::TyTypeof(ref _e) => { + struct_span_err!(tcx.sess, ast_ty.span, E0516, + "`typeof` is a reserved keyword but unimplemented") + .span_label(ast_ty.span, &format!("reserved keyword")) + .emit(); - debug!("ast_ty_to_ty: id={:?} ty={:?}", ast_ty.id, typ); - tcx.ast_ty_to_ty_cache.borrow_mut().insert(ast_ty.id, typ); - return typ; -} + tcx.types.err + } + hir::TyInfer => { + // TyInfer also appears as the type of arguments or return + // values in a ExprClosure, or as + // the type of local variables. Both of these cases are + // handled specially and will not descend into this routine. + self.ty_infer(ast_ty.span) + } + }; -pub fn ty_of_arg<'tcx>(this: &AstConv<'tcx>, - rscope: &RegionScope, - a: &hir::Arg, - expected_ty: Option>) - -> Ty<'tcx> -{ - match a.ty.node { - hir::TyInfer if expected_ty.is_some() => expected_ty.unwrap(), - hir::TyInfer => this.ty_infer(None, None, None, a.ty.span), - _ => ast_ty_to_ty(this, rscope, &*a.ty), + cache.borrow_mut().insert(ast_ty.id, result_ty); + + result_ty } -} -struct SelfInfo<'a, 'tcx> { - untransformed_self_ty: Ty<'tcx>, - explicit_self: &'a hir::ExplicitSelf, -} + pub fn ty_of_arg(&self, + rscope: &RegionScope, + a: &hir::Arg, + expected_ty: Option>) + -> Ty<'tcx> + { + match a.ty.node { + hir::TyInfer if expected_ty.is_some() => expected_ty.unwrap(), + hir::TyInfer => self.ty_infer(a.ty.span), + _ => self.ast_ty_to_ty(rscope, &a.ty), + } + } -pub fn ty_of_method<'tcx>(this: &AstConv<'tcx>, - sig: &hir::MethodSig, - untransformed_self_ty: Ty<'tcx>) - -> (ty::BareFnTy<'tcx>, ty::ExplicitSelfCategory) { - let self_info = Some(SelfInfo { - untransformed_self_ty: untransformed_self_ty, - explicit_self: &sig.explicit_self, - }); - let (bare_fn_ty, optional_explicit_self_category) = - ty_of_method_or_bare_fn(this, - sig.unsafety, - sig.abi, - self_info, - &sig.decl); - (bare_fn_ty, optional_explicit_self_category.unwrap()) -} + pub fn ty_of_method(&self, + sig: &hir::MethodSig, + untransformed_self_ty: Ty<'tcx>, + anon_scope: Option) + -> &'tcx ty::BareFnTy<'tcx> { + self.ty_of_method_or_bare_fn(sig.unsafety, + sig.abi, + Some(untransformed_self_ty), + &sig.decl, + None, + anon_scope) + } -pub fn ty_of_bare_fn<'tcx>(this: &AstConv<'tcx>, unsafety: hir::Unsafety, abi: abi::Abi, - decl: &hir::FnDecl) -> ty::BareFnTy<'tcx> { - let (bare_fn_ty, _) = ty_of_method_or_bare_fn(this, unsafety, abi, None, decl); - bare_fn_ty -} + pub fn ty_of_bare_fn(&self, + unsafety: hir::Unsafety, + abi: abi::Abi, + decl: &hir::FnDecl, + anon_scope: Option) + -> &'tcx ty::BareFnTy<'tcx> { + self.ty_of_method_or_bare_fn(unsafety, abi, None, decl, None, anon_scope) + } -fn ty_of_method_or_bare_fn<'a, 'tcx>(this: &AstConv<'tcx>, - unsafety: hir::Unsafety, - abi: abi::Abi, - opt_self_info: Option>, - decl: &hir::FnDecl) - -> (ty::BareFnTy<'tcx>, Option) -{ - debug!("ty_of_method_or_bare_fn"); - - // New region names that appear inside of the arguments of the function - // declaration are bound to that function type. - let rb = rscope::BindingRscope::new(); - - // `implied_output_region` is the region that will be assumed for any - // region parameters in the return type. In accordance with the rules for - // lifetime elision, we can determine it in two ways. First (determined - // here), if self is by-reference, then the implied output region is the - // region of the self parameter. - let (self_ty, explicit_self_category) = match opt_self_info { - None => (None, None), - Some(self_info) => determine_self_type(this, &rb, self_info) - }; + fn ty_of_method_or_bare_fn(&self, + unsafety: hir::Unsafety, + abi: abi::Abi, + opt_untransformed_self_ty: Option>, + decl: &hir::FnDecl, + arg_anon_scope: Option, + ret_anon_scope: Option) + -> &'tcx ty::BareFnTy<'tcx> + { + debug!("ty_of_method_or_bare_fn"); + + // New region names that appear inside of the arguments of the function + // declaration are bound to that function type. + let rb = MaybeWithAnonTypes::new(BindingRscope::new(), arg_anon_scope); + + // `implied_output_region` is the region that will be assumed for any + // region parameters in the return type. In accordance with the rules for + // lifetime elision, we can determine it in two ways. First (determined + // here), if self is by-reference, then the implied output region is the + // region of the self parameter. + let (self_ty, explicit_self) = match (opt_untransformed_self_ty, decl.get_self()) { + (Some(untransformed_self_ty), Some(explicit_self)) => { + let self_type = self.determine_self_type(&rb, untransformed_self_ty, + &explicit_self); + (Some(self_type), Some(ExplicitSelf::determine(untransformed_self_ty, self_type))) + } + _ => (None, None), + }; - // HACK(eddyb) replace the fake self type in the AST with the actual type. - let arg_params = if self_ty.is_some() { - &decl.inputs[1..] - } else { - &decl.inputs[..] - }; - let arg_tys: Vec = - arg_params.iter().map(|a| ty_of_arg(this, &rb, a, None)).collect(); - let arg_pats: Vec = - arg_params.iter().map(|a| pprust::pat_to_string(&*a.pat)).collect(); - - // Second, if there was exactly one lifetime (either a substitution or a - // reference) in the arguments, then any anonymous regions in the output - // have that lifetime. - let implied_output_region = match explicit_self_category { - Some(ty::ExplicitSelfCategory::ByReference(region, _)) => Ok(region), - _ => find_implied_output_region(this.tcx(), &arg_tys, arg_pats) - }; + // HACK(eddyb) replace the fake self type in the AST with the actual type. + let arg_params = if self_ty.is_some() { + &decl.inputs[1..] + } else { + &decl.inputs[..] + }; + let arg_tys: Vec = + arg_params.iter().map(|a| self.ty_of_arg(&rb, a, None)).collect(); + + // Second, if there was exactly one lifetime (either a substitution or a + // reference) in the arguments, then any anonymous regions in the output + // have that lifetime. + let implied_output_region = match explicit_self { + Some(ExplicitSelf::ByReference(region, _)) => Ok(*region), + _ => { + // `pat_to_string` is expensive and + // `find_implied_output_region` only needs its result when + // there's an error. So we wrap it in a closure to avoid + // calling it until necessary. + let arg_pats = || { + arg_params.iter().map(|a| pprust::pat_to_string(&a.pat)).collect() + }; + self.find_implied_output_region(&arg_tys, arg_pats) + } + }; - let output_ty = match decl.output { - hir::Return(ref output) => - ty::FnConverging(convert_ty_with_lifetime_elision(this, - implied_output_region, - &output)), - hir::DefaultReturn(..) => ty::FnConverging(this.tcx().mk_nil()), - hir::NoReturn(..) => ty::FnDiverging - }; + let output_ty = match decl.output { + hir::Return(ref output) => + self.convert_ty_with_lifetime_elision(implied_output_region, + &output, + ret_anon_scope), + hir::DefaultReturn(..) => self.tcx().mk_nil(), + }; - (ty::BareFnTy { - unsafety: unsafety, - abi: abi, - sig: ty::Binder(ty::FnSig { - inputs: self_ty.into_iter().chain(arg_tys).collect(), - output: output_ty, - variadic: decl.variadic - }), - }, explicit_self_category) -} + let input_tys = self_ty.into_iter().chain(arg_tys).collect(); -fn determine_self_type<'a, 'tcx>(this: &AstConv<'tcx>, - rscope: &RegionScope, - self_info: SelfInfo<'a, 'tcx>) - -> (Option>, Option) -{ - let self_ty = self_info.untransformed_self_ty; - return match self_info.explicit_self.node { - hir::SelfStatic => (None, Some(ty::ExplicitSelfCategory::Static)), - hir::SelfValue(_) => { - (Some(self_ty), Some(ty::ExplicitSelfCategory::ByValue)) - } - hir::SelfRegion(ref lifetime, mutability, _) => { - let region = - opt_ast_region_to_region(this, - rscope, - self_info.explicit_self.span, - lifetime); - (Some(this.tcx().mk_ref( - this.tcx().mk_region(region), - ty::TypeAndMut { - ty: self_ty, - mutbl: mutability - })), - Some(ty::ExplicitSelfCategory::ByReference(region, mutability))) - } - hir::SelfExplicit(ref ast_type, _) => { - let explicit_type = ast_ty_to_ty(this, rscope, &**ast_type); - - // We wish to (for now) categorize an explicit self - // declaration like `self: SomeType` into either `self`, - // `&self`, `&mut self`, or `Box`. We do this here - // by some simple pattern matching. A more precise check - // is done later in `check_method_self_type()`. - // - // Examples: - // - // ``` - // impl Foo for &T { - // // Legal declarations: - // fn method1(self: &&T); // ExplicitSelfCategory::ByReference - // fn method2(self: &T); // ExplicitSelfCategory::ByValue - // fn method3(self: Box<&T>); // ExplicitSelfCategory::ByBox - // - // // Invalid cases will be caught later by `check_method_self_type`: - // fn method_err1(self: &mut T); // ExplicitSelfCategory::ByReference - // } - // ``` - // - // To do the check we just count the number of "modifiers" - // on each type and compare them. If they are the same or - // the impl has more, we call it "by value". Otherwise, we - // look at the outermost modifier on the method decl and - // call it by-ref, by-box as appropriate. For method1, for - // example, the impl type has one modifier, but the method - // type has two, so we end up with - // ExplicitSelfCategory::ByReference. - - let impl_modifiers = count_modifiers(self_info.untransformed_self_ty); - let method_modifiers = count_modifiers(explicit_type); - - debug!("determine_explicit_self_category(self_info.untransformed_self_ty={:?} \ - explicit_type={:?} \ - modifiers=({},{})", - self_info.untransformed_self_ty, - explicit_type, - impl_modifiers, - method_modifiers); - - let category = if impl_modifiers >= method_modifiers { - ty::ExplicitSelfCategory::ByValue - } else { - match explicit_type.sty { - ty::TyRef(r, mt) => ty::ExplicitSelfCategory::ByReference(*r, mt.mutbl), - ty::TyBox(_) => ty::ExplicitSelfCategory::ByBox, - _ => ty::ExplicitSelfCategory::ByValue, - } - }; + debug!("ty_of_method_or_bare_fn: input_tys={:?}", input_tys); + debug!("ty_of_method_or_bare_fn: output_ty={:?}", output_ty); - (Some(explicit_type), Some(category)) - } - }; + self.tcx().mk_bare_fn(ty::BareFnTy { + unsafety: unsafety, + abi: abi, + sig: ty::Binder(ty::FnSig { + inputs: input_tys, + output: output_ty, + variadic: decl.variadic + }), + }) + } - fn count_modifiers(ty: Ty) -> usize { - match ty.sty { - ty::TyRef(_, mt) => count_modifiers(mt.ty) + 1, - ty::TyBox(t) => count_modifiers(t) + 1, - _ => 0, + fn determine_self_type<'a>(&self, + rscope: &RegionScope, + untransformed_self_ty: Ty<'tcx>, + explicit_self: &hir::ExplicitSelf) + -> Ty<'tcx> + { + match explicit_self.node { + SelfKind::Value(..) => untransformed_self_ty, + SelfKind::Region(ref lifetime, mutability) => { + let region = + self.opt_ast_region_to_region( + rscope, + explicit_self.span, + lifetime); + self.tcx().mk_ref(region, + ty::TypeAndMut { + ty: untransformed_self_ty, + mutbl: mutability + }) + } + SelfKind::Explicit(ref ast_type, _) => self.ast_ty_to_ty(rscope, &ast_type) } } -} -pub fn ty_of_closure<'tcx>( - this: &AstConv<'tcx>, - unsafety: hir::Unsafety, - decl: &hir::FnDecl, - abi: abi::Abi, - expected_sig: Option>) - -> ty::ClosureTy<'tcx> -{ - debug!("ty_of_closure(expected_sig={:?})", - expected_sig); - - // new region names that appear inside of the fn decl are bound to - // that function type - let rb = rscope::BindingRscope::new(); - - let input_tys: Vec<_> = decl.inputs.iter().enumerate().map(|(i, a)| { - let expected_arg_ty = expected_sig.as_ref().and_then(|e| { - // no guarantee that the correct number of expected args - // were supplied - if i < e.inputs.len() { - Some(e.inputs[i]) - } else { - None - } - }); - ty_of_arg(this, &rb, a, expected_arg_ty) - }).collect(); + pub fn ty_of_closure(&self, + unsafety: hir::Unsafety, + decl: &hir::FnDecl, + abi: abi::Abi, + expected_sig: Option>) + -> ty::ClosureTy<'tcx> + { + debug!("ty_of_closure(expected_sig={:?})", + expected_sig); + + // new region names that appear inside of the fn decl are bound to + // that function type + let rb = rscope::BindingRscope::new(); + + let input_tys: Vec<_> = decl.inputs.iter().enumerate().map(|(i, a)| { + let expected_arg_ty = expected_sig.as_ref().and_then(|e| { + // no guarantee that the correct number of expected args + // were supplied + if i < e.inputs.len() { + Some(e.inputs[i]) + } else { + None + } + }); + self.ty_of_arg(&rb, a, expected_arg_ty) + }).collect(); - let expected_ret_ty = expected_sig.map(|e| e.output); + let expected_ret_ty = expected_sig.map(|e| e.output); - let is_infer = match decl.output { - hir::Return(ref output) if output.node == hir::TyInfer => true, - hir::DefaultReturn(..) => true, - _ => false - }; + let is_infer = match decl.output { + hir::Return(ref output) if output.node == hir::TyInfer => true, + hir::DefaultReturn(..) => true, + _ => false + }; - let output_ty = match decl.output { - _ if is_infer && expected_ret_ty.is_some() => - expected_ret_ty.unwrap(), - _ if is_infer => - ty::FnConverging(this.ty_infer(None, None, None, decl.output.span())), - hir::Return(ref output) => - ty::FnConverging(ast_ty_to_ty(this, &rb, &**output)), - hir::DefaultReturn(..) => unreachable!(), - hir::NoReturn(..) => ty::FnDiverging - }; + let output_ty = match decl.output { + _ if is_infer && expected_ret_ty.is_some() => + expected_ret_ty.unwrap(), + _ if is_infer => self.ty_infer(decl.output.span()), + hir::Return(ref output) => + self.ast_ty_to_ty(&rb, &output), + hir::DefaultReturn(..) => bug!(), + }; - debug!("ty_of_closure: input_tys={:?}", input_tys); - debug!("ty_of_closure: output_ty={:?}", output_ty); + debug!("ty_of_closure: input_tys={:?}", input_tys); + debug!("ty_of_closure: output_ty={:?}", output_ty); - ty::ClosureTy { - unsafety: unsafety, - abi: abi, - sig: ty::Binder(ty::FnSig {inputs: input_tys, - output: output_ty, - variadic: decl.variadic}), + ty::ClosureTy { + unsafety: unsafety, + abi: abi, + sig: ty::Binder(ty::FnSig {inputs: input_tys, + output: output_ty, + variadic: decl.variadic}), + } } -} -/// Given an existential type like `Foo+'a+Bar`, this routine converts the `'a` and `Bar` intos an -/// `ExistentialBounds` struct. The `main_trait_refs` argument specifies the `Foo` -- it is absent -/// for closures. Eventually this should all be normalized, I think, so that there is no "main -/// trait ref" and instead we just have a flat list of bounds as the existential type. -fn conv_existential_bounds<'tcx>( - this: &AstConv<'tcx>, - rscope: &RegionScope, - span: Span, - principal_trait_ref: ty::PolyTraitRef<'tcx>, - projection_bounds: Vec>, - ast_bounds: &[hir::TyParamBound]) - -> ty::ExistentialBounds<'tcx> -{ - let partitioned_bounds = - partition_bounds(this.tcx(), span, ast_bounds); + fn conv_object_ty_poly_trait_ref(&self, + rscope: &RegionScope, + span: Span, + ast_bounds: &[hir::TyParamBound]) + -> Ty<'tcx> + { + let mut partitioned_bounds = partition_bounds(ast_bounds); - conv_existential_bounds_from_partitioned_bounds( - this, rscope, span, principal_trait_ref, projection_bounds, partitioned_bounds) -} + let trait_bound = if !partitioned_bounds.trait_bounds.is_empty() { + partitioned_bounds.trait_bounds.remove(0) + } else { + span_err!(self.tcx().sess, span, E0224, + "at least one non-builtin trait is required for an object type"); + return self.tcx().types.err; + }; -fn conv_ty_poly_trait_ref<'tcx>( - this: &AstConv<'tcx>, - rscope: &RegionScope, - span: Span, - ast_bounds: &[hir::TyParamBound]) - -> Ty<'tcx> -{ - let mut partitioned_bounds = partition_bounds(this.tcx(), span, &ast_bounds[..]); - - let mut projection_bounds = Vec::new(); - let main_trait_bound = if !partitioned_bounds.trait_bounds.is_empty() { - let trait_bound = partitioned_bounds.trait_bounds.remove(0); - instantiate_poly_trait_ref(this, - rscope, - trait_bound, - None, - &mut projection_bounds) - } else { - span_err!(this.tcx().sess, span, E0224, - "at least one non-builtin trait is required for an object type"); - return this.tcx().types.err; - }; + let trait_ref = &trait_bound.trait_ref; + let trait_def_id = self.trait_def_id(trait_ref); + self.trait_path_to_object_type(rscope, + trait_ref.path.span, + trait_def_id, + trait_ref.ref_id, + trait_ref.path.segments.last().unwrap(), + span, + partitioned_bounds) + } - let bounds = - conv_existential_bounds_from_partitioned_bounds(this, - rscope, - span, - main_trait_bound.clone(), - projection_bounds, - partitioned_bounds); + /// Given the bounds on an object, determines what single region bound (if any) we can + /// use to summarize this type. The basic idea is that we will use the bound the user + /// provided, if they provided one, and otherwise search the supertypes of trait bounds + /// for region bounds. It may be that we can derive no bound at all, in which case + /// we return `None`. + fn compute_object_lifetime_bound(&self, + span: Span, + explicit_region_bounds: &[&hir::Lifetime], + existential_predicates: ty::Binder<&'tcx ty::Slice>>) + -> Option<&'tcx ty::Region> // if None, use the default + { + let tcx = self.tcx(); - make_object_type(this, span, main_trait_bound, bounds) -} + debug!("compute_opt_region_bound(explicit_region_bounds={:?}, \ + existential_predicates={:?})", + explicit_region_bounds, + existential_predicates); -pub fn conv_existential_bounds_from_partitioned_bounds<'tcx>( - this: &AstConv<'tcx>, - rscope: &RegionScope, - span: Span, - principal_trait_ref: ty::PolyTraitRef<'tcx>, - projection_bounds: Vec>, // Empty for boxed closures - partitioned_bounds: PartitionedBounds) - -> ty::ExistentialBounds<'tcx> -{ - let PartitionedBounds { builtin_bounds, - trait_bounds, - region_bounds } = - partitioned_bounds; - - if !trait_bounds.is_empty() { - let b = &trait_bounds[0]; - span_err!(this.tcx().sess, b.trait_ref.path.span, E0225, - "only the builtin traits can be used as closure or object bounds"); - } + if explicit_region_bounds.len() > 1 { + span_err!(tcx.sess, explicit_region_bounds[1].span, E0226, + "only a single explicit lifetime bound is permitted"); + } - let region_bound = - compute_object_lifetime_bound(this, - span, - ®ion_bounds, - principal_trait_ref, - builtin_bounds); + if let Some(&r) = explicit_region_bounds.get(0) { + // Explicitly specified region bound. Use that. + return Some(ast_region_to_region(tcx, r)); + } - let region_bound = match region_bound { - Some(r) => r, - None => { - match rscope.object_lifetime_default(span) { - Some(r) => r, - None => { - span_err!(this.tcx().sess, span, E0228, - "the lifetime bound for this object type cannot be deduced \ - from context; please supply an explicit bound"); - ty::ReStatic - } + if let Some(principal) = existential_predicates.principal() { + if let Err(ErrorReported) = self.ensure_super_predicates(span, principal.def_id()) { + return Some(tcx.mk_region(ty::ReStatic)); } } - }; - - debug!("region_bound: {:?}", region_bound); - - ty::ExistentialBounds::new(region_bound, builtin_bounds, projection_bounds) -} -/// Given the bounds on an object, determines what single region bound -/// (if any) we can use to summarize this type. The basic idea is that we will use the bound the -/// user provided, if they provided one, and otherwise search the supertypes of trait bounds for -/// region bounds. It may be that we can derive no bound at all, in which case we return `None`. -fn compute_object_lifetime_bound<'tcx>( - this: &AstConv<'tcx>, - span: Span, - explicit_region_bounds: &[&hir::Lifetime], - principal_trait_ref: ty::PolyTraitRef<'tcx>, - builtin_bounds: ty::BuiltinBounds) - -> Option // if None, use the default -{ - let tcx = this.tcx(); - - debug!("compute_opt_region_bound(explicit_region_bounds={:?}, \ - principal_trait_ref={:?}, builtin_bounds={:?})", - explicit_region_bounds, - principal_trait_ref, - builtin_bounds); - - if explicit_region_bounds.len() > 1 { - span_err!(tcx.sess, explicit_region_bounds[1].span, E0226, - "only a single explicit lifetime bound is permitted"); - } - - if !explicit_region_bounds.is_empty() { - // Explicitly specified region bound. Use that. - let r = explicit_region_bounds[0]; - return Some(ast_region_to_region(tcx, r)); - } + // No explicit region bound specified. Therefore, examine trait + // bounds and see if we can derive region bounds from those. + let derived_region_bounds = + object_region_bounds(tcx, existential_predicates); - if let Err(ErrorReported) = this.ensure_super_predicates(span,principal_trait_ref.def_id()) { - return Some(ty::ReStatic); - } - - // No explicit region bound specified. Therefore, examine trait - // bounds and see if we can derive region bounds from those. - let derived_region_bounds = - object_region_bounds(tcx, &principal_trait_ref, builtin_bounds); - - // If there are no derived region bounds, then report back that we - // can find no region bound. The caller will use the default. - if derived_region_bounds.is_empty() { - return None; - } + // If there are no derived region bounds, then report back that we + // can find no region bound. The caller will use the default. + if derived_region_bounds.is_empty() { + return None; + } - // If any of the derived region bounds are 'static, that is always - // the best choice. - if derived_region_bounds.iter().any(|r| ty::ReStatic == *r) { - return Some(ty::ReStatic); - } + // If any of the derived region bounds are 'static, that is always + // the best choice. + if derived_region_bounds.iter().any(|&r| ty::ReStatic == *r) { + return Some(tcx.mk_region(ty::ReStatic)); + } - // Determine whether there is exactly one unique region in the set - // of derived region bounds. If so, use that. Otherwise, report an - // error. - let r = derived_region_bounds[0]; - if derived_region_bounds[1..].iter().any(|r1| r != *r1) { - span_err!(tcx.sess, span, E0227, - "ambiguous lifetime bound, explicit lifetime bound required"); + // Determine whether there is exactly one unique region in the set + // of derived region bounds. If so, use that. Otherwise, report an + // error. + let r = derived_region_bounds[0]; + if derived_region_bounds[1..].iter().any(|r1| r != *r1) { + span_err!(tcx.sess, span, E0227, + "ambiguous lifetime bound, explicit lifetime bound required"); + } + return Some(r); } - return Some(r); } pub struct PartitionedBounds<'a> { - pub builtin_bounds: ty::BuiltinBounds, pub trait_bounds: Vec<&'a hir::PolyTraitRef>, pub region_bounds: Vec<&'a hir::Lifetime>, } -/// Divides a list of bounds from the AST into three groups: builtin bounds (Copy, Sized etc), -/// general trait bounds, and region bounds. -pub fn partition_bounds<'a>(tcx: &ty::ctxt, - _span: Span, - ast_bounds: &'a [hir::TyParamBound]) - -> PartitionedBounds<'a> +/// Divides a list of general trait bounds into two groups: builtin bounds (Sync/Send) and the +/// remaining general trait bounds. +fn split_auto_traits<'a, 'b, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + trait_bounds: Vec<&'b hir::PolyTraitRef>) + -> (Vec, Vec<&'b hir::PolyTraitRef>) +{ + let (auto_traits, trait_bounds): (Vec<_>, _) = trait_bounds.into_iter().partition(|bound| { + match bound.trait_ref.path.def { + Def::Trait(trait_did) => { + // Checks whether `trait_did` refers to one of the builtin + // traits, like `Send`, and adds it to `auto_traits` if so. + if Some(trait_did) == tcx.lang_items.send_trait() || + Some(trait_did) == tcx.lang_items.sync_trait() { + let segments = &bound.trait_ref.path.segments; + let parameters = &segments[segments.len() - 1].parameters; + if !parameters.types().is_empty() { + check_type_argument_count(tcx, bound.trait_ref.path.span, + parameters.types().len(), &[]); + } + if !parameters.lifetimes().is_empty() { + report_lifetime_number_error(tcx, bound.trait_ref.path.span, + parameters.lifetimes().len(), 0); + } + true + } else { + false + } + } + _ => false + } + }); + + let auto_traits = auto_traits.into_iter().map(|tr| { + if let Def::Trait(trait_did) = tr.trait_ref.path.def { + trait_did + } else { + unreachable!() + } + }).collect::>(); + + (auto_traits, trait_bounds) +} + +/// Divides a list of bounds from the AST into two groups: general trait bounds and region bounds +pub fn partition_bounds<'a, 'b, 'gcx, 'tcx>(ast_bounds: &'b [hir::TyParamBound]) + -> PartitionedBounds<'b> { - let mut builtin_bounds = ty::BuiltinBounds::empty(); let mut region_bounds = Vec::new(); let mut trait_bounds = Vec::new(); for ast_bound in ast_bounds { match *ast_bound { hir::TraitTyParamBound(ref b, hir::TraitBoundModifier::None) => { - match ::lookup_full_def(tcx, b.trait_ref.path.span, b.trait_ref.ref_id) { - def::DefTrait(trait_did) => { - if tcx.try_add_builtin_trait(trait_did, - &mut builtin_bounds) { - let segments = &b.trait_ref.path.segments; - let parameters = &segments[segments.len() - 1].parameters; - if !parameters.types().is_empty() { - check_type_argument_count(tcx, b.trait_ref.path.span, - parameters.types().len(), 0, 0); - } - if !parameters.lifetimes().is_empty() { - report_lifetime_number_error(tcx, b.trait_ref.path.span, - parameters.lifetimes().len(), 0); - } - continue; // success - } - } - _ => { - // Not a trait? that's an error, but it'll get - // reported later. - } - } trait_bounds.push(b); } hir::TraitTyParamBound(_, hir::TraitBoundModifier::Maybe) => {} @@ -2208,80 +2048,106 @@ pub fn partition_bounds<'a>(tcx: &ty::ctxt, } PartitionedBounds { - builtin_bounds: builtin_bounds, trait_bounds: trait_bounds, region_bounds: region_bounds, } } -fn prohibit_projections<'tcx>(tcx: &ty::ctxt<'tcx>, - bindings: &[ConvertedBinding<'tcx>]) -{ - for binding in bindings.iter().take(1) { - prohibit_projection(tcx, binding.span); - } -} - -fn check_type_argument_count(tcx: &ty::ctxt, span: Span, supplied: usize, - required: usize, accepted: usize) { +fn check_type_argument_count(tcx: TyCtxt, span: Span, supplied: usize, + ty_param_defs: &[ty::TypeParameterDef]) { + let accepted = ty_param_defs.len(); + let required = ty_param_defs.iter().take_while(|x| x.default.is_none()) .count(); if supplied < required { let expected = if required < accepted { "expected at least" } else { "expected" }; - span_err!(tcx.sess, span, E0243, - "wrong number of type arguments: {} {}, found {}", - expected, required, supplied); + let arguments_plural = if required == 1 { "" } else { "s" }; + + struct_span_err!(tcx.sess, span, E0243, + "wrong number of type arguments: {} {}, found {}", + expected, required, supplied) + .span_label(span, + &format!("{} {} type argument{}", + expected, + required, + arguments_plural)) + .emit(); } else if supplied > accepted { let expected = if required < accepted { - "expected at most" + format!("expected at most {}", accepted) } else { - "expected" + format!("expected {}", accepted) }; - span_err!(tcx.sess, span, E0244, - "wrong number of type arguments: {} {}, found {}", - expected, - accepted, - supplied); + let arguments_plural = if accepted == 1 { "" } else { "s" }; + + struct_span_err!(tcx.sess, span, E0244, + "wrong number of type arguments: {}, found {}", + expected, supplied) + .span_label( + span, + &format!("{} type argument{}", + if accepted == 0 { "expected no" } else { &expected }, + arguments_plural) + ) + .emit(); } } -fn report_lifetime_number_error(tcx: &ty::ctxt, span: Span, number: usize, expected: usize) { - span_err!(tcx.sess, span, E0107, - "wrong number of lifetime parameters: expected {}, found {}", - expected, number); +fn report_lifetime_number_error(tcx: TyCtxt, span: Span, number: usize, expected: usize) { + let label = if number < expected { + if expected == 1 { + format!("expected {} lifetime parameter", expected) + } else { + format!("expected {} lifetime parameters", expected) + } + } else { + let additional = number - expected; + if additional == 1 { + "unexpected lifetime parameter".to_string() + } else { + format!("{} unexpected lifetime parameters", additional) + } + }; + struct_span_err!(tcx.sess, span, E0107, + "wrong number of lifetime parameters: expected {}, found {}", + expected, number) + .span_label(span, &label) + .emit(); } // A helper struct for conveniently grouping a set of bounds which we pass to // and return from functions in multiple places. #[derive(PartialEq, Eq, Clone, Debug)] pub struct Bounds<'tcx> { - pub region_bounds: Vec, - pub builtin_bounds: ty::BuiltinBounds, + pub region_bounds: Vec<&'tcx ty::Region>, + pub implicitly_sized: bool, pub trait_bounds: Vec>, pub projection_bounds: Vec>, } -impl<'tcx> Bounds<'tcx> { - pub fn predicates(&self, - tcx: &ty::ctxt<'tcx>, - param_ty: Ty<'tcx>) - -> Vec> +impl<'a, 'gcx, 'tcx> Bounds<'tcx> { + pub fn predicates(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, param_ty: Ty<'tcx>) + -> Vec> { let mut vec = Vec::new(); - for builtin_bound in &self.builtin_bounds { - match traits::trait_ref_for_builtin_bound(tcx, builtin_bound, param_ty) { - Ok(trait_ref) => { vec.push(trait_ref.to_predicate()); } - Err(ErrorReported) => { } + // If it could be sized, and is, add the sized predicate + if self.implicitly_sized { + if let Some(sized) = tcx.lang_items.sized_trait() { + let trait_ref = ty::TraitRef { + def_id: sized, + substs: tcx.mk_substs_trait(param_ty, &[]) + }; + vec.push(trait_ref.to_predicate()); } } for ®ion_bound in &self.region_bounds { // account for the binder being introduced below; no need to shift `param_ty` // because, at present at least, it can only refer to early-bound regions - let region_bound = ty::fold::shift_region(region_bound, 1); + let region_bound = tcx.mk_region(ty::fold::shift_region(*region_bound, 1)); vec.push(ty::Binder(ty::OutlivesPredicate(param_ty, region_bound)).to_predicate()); } @@ -2296,3 +2162,64 @@ impl<'tcx> Bounds<'tcx> { vec } } + +pub enum ExplicitSelf<'tcx> { + ByValue, + ByReference(&'tcx ty::Region, hir::Mutability), + ByBox +} + +impl<'tcx> ExplicitSelf<'tcx> { + /// We wish to (for now) categorize an explicit self + /// declaration like `self: SomeType` into either `self`, + /// `&self`, `&mut self`, or `Box`. We do this here + /// by some simple pattern matching. A more precise check + /// is done later in `check_method_self_type()`. + /// + /// Examples: + /// + /// ``` + /// impl Foo for &T { + /// // Legal declarations: + /// fn method1(self: &&T); // ExplicitSelf::ByReference + /// fn method2(self: &T); // ExplicitSelf::ByValue + /// fn method3(self: Box<&T>); // ExplicitSelf::ByBox + /// + /// // Invalid cases will be caught later by `check_method_self_type`: + /// fn method_err1(self: &mut T); // ExplicitSelf::ByReference + /// } + /// ``` + /// + /// To do the check we just count the number of "modifiers" + /// on each type and compare them. If they are the same or + /// the impl has more, we call it "by value". Otherwise, we + /// look at the outermost modifier on the method decl and + /// call it by-ref, by-box as appropriate. For method1, for + /// example, the impl type has one modifier, but the method + /// type has two, so we end up with + /// ExplicitSelf::ByReference. + pub fn determine(untransformed_self_ty: Ty<'tcx>, + self_arg_ty: Ty<'tcx>) + -> ExplicitSelf<'tcx> { + fn count_modifiers(ty: Ty) -> usize { + match ty.sty { + ty::TyRef(_, mt) => count_modifiers(mt.ty) + 1, + ty::TyBox(t) => count_modifiers(t) + 1, + _ => 0, + } + } + + let impl_modifiers = count_modifiers(untransformed_self_ty); + let method_modifiers = count_modifiers(self_arg_ty); + + if impl_modifiers >= method_modifiers { + ExplicitSelf::ByValue + } else { + match self_arg_ty.sty { + ty::TyRef(r, mt) => ExplicitSelf::ByReference(r, mt.mutbl), + ty::TyBox(_) => ExplicitSelf::ByBox, + _ => ExplicitSelf::ByValue, + } + } + } +} diff --git a/src/librustc_typeck/check/_match.rs b/src/librustc_typeck/check/_match.rs index dfa144699b217..15f383c5787d5 100644 --- a/src/librustc_typeck/check/_match.rs +++ b/src/librustc_typeck/check/_match.rs @@ -8,822 +8,704 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use middle::def; -use middle::infer::{self, TypeOrigin}; -use middle::pat_util::{PatIdMap, pat_id_map, pat_is_binding}; -use middle::pat_util::pat_is_resolved_const; -use middle::privacy::{AllPublic, LastMod}; -use middle::subst::Substs; -use middle::ty::{self, Ty, TypeFoldable, LvaluePreference}; -use check::{check_expr, check_expr_has_type, check_expr_with_expectation}; -use check::{check_expr_coercable_to_type, demand, FnCtxt, Expectation}; -use check::{check_expr_with_lvalue_pref}; -use check::{instantiate_path, resolve_ty_and_def_ufcs, structurally_resolved_type}; -use lint; -use require_same_types; -use util::nodemap::FnvHashMap; -use session::Session; +use rustc::hir::{self, PatKind}; +use rustc::hir::def::{Def, CtorKind}; +use rustc::hir::pat_util::EnumerateAndAdjustIterator; +use rustc::infer; +use rustc::traits::ObligationCauseCode; +use rustc::ty::{self, Ty, TypeFoldable, LvaluePreference}; +use check::{FnCtxt, Expectation, Diverges}; +use util::nodemap::FxHashMap; -use std::cmp; use std::collections::hash_map::Entry::{Occupied, Vacant}; +use std::cmp; use syntax::ast; -use syntax::codemap::{Span, Spanned}; +use syntax::codemap::Spanned; use syntax::ptr::P; +use syntax_pos::Span; -use rustc_front::hir; -use rustc_front::print::pprust; -use rustc_front::util as hir_util; - -pub fn check_pat<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>, - pat: &'tcx hir::Pat, - expected: Ty<'tcx>) -{ - let fcx = pcx.fcx; - let tcx = pcx.fcx.ccx.tcx; +impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { + pub fn check_pat(&self, pat: &'gcx hir::Pat, expected: Ty<'tcx>) { + let tcx = self.tcx; - debug!("check_pat(pat={:?},expected={:?})", - pat, - expected); + debug!("check_pat(pat={:?},expected={:?})", pat, expected); - match pat.node { - hir::PatWild => { - fcx.write_ty(pat.id, expected); - } - hir::PatLit(ref lt) => { - check_expr(fcx, &**lt); - let expr_ty = fcx.expr_ty(&**lt); - - // Byte string patterns behave the same way as array patterns - // They can denote both statically and dynamically sized byte arrays - let mut pat_ty = expr_ty; - if let hir::ExprLit(ref lt) = lt.node { - if let ast::LitByteStr(_) = lt.node { - let expected_ty = structurally_resolved_type(fcx, pat.span, expected); - if let ty::TyRef(_, mt) = expected_ty.sty { - if let ty::TySlice(_) = mt.ty.sty { - pat_ty = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), - tcx.mk_slice(tcx.types.u8)) + let ty = match pat.node { + PatKind::Wild => { + expected + } + PatKind::Lit(ref lt) => { + let ty = self.check_expr(<); + + // Byte string patterns behave the same way as array patterns + // They can denote both statically and dynamically sized byte arrays + let mut pat_ty = ty; + if let hir::ExprLit(ref lt) = lt.node { + if let ast::LitKind::ByteStr(_) = lt.node { + let expected_ty = self.structurally_resolved_type(pat.span, expected); + if let ty::TyRef(_, mt) = expected_ty.sty { + if let ty::TySlice(_) = mt.ty.sty { + pat_ty = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), + tcx.mk_slice(tcx.types.u8)) + } } } } - } - - fcx.write_ty(pat.id, pat_ty); - - // somewhat surprising: in this case, the subtyping - // relation goes the opposite way as the other - // cases. Actually what we really want is not a subtyping - // relation at all but rather that there exists a LUB (so - // that they can be compared). However, in practice, - // constants are always scalars or strings. For scalars - // subtyping is irrelevant, and for strings `expr_ty` is - // type is `&'static str`, so if we say that - // - // &'static str <: expected - // - // that's equivalent to there existing a LUB. - demand::suptype(fcx, pat.span, expected, pat_ty); - } - hir::PatRange(ref begin, ref end) => { - check_expr(fcx, begin); - check_expr(fcx, end); - - let lhs_ty = fcx.expr_ty(begin); - let rhs_ty = fcx.expr_ty(end); - - // Check that both end-points are of numeric or char type. - let numeric_or_char = |ty: Ty| ty.is_numeric() || ty.is_char(); - let lhs_compat = numeric_or_char(lhs_ty); - let rhs_compat = numeric_or_char(rhs_ty); - - if !lhs_compat || !rhs_compat { - let span = if !lhs_compat && !rhs_compat { - pat.span - } else if !lhs_compat { - begin.span - } else { - end.span - }; - // Note: spacing here is intentional, we want a space before "start" and "end". - span_err!(tcx.sess, span, E0029, - "only char and numeric types are allowed in range patterns\n \ - start type: {}\n end type: {}", - fcx.infcx().ty_to_string(lhs_ty), - fcx.infcx().ty_to_string(rhs_ty) - ); - return; + // somewhat surprising: in this case, the subtyping + // relation goes the opposite way as the other + // cases. Actually what we really want is not a subtyping + // relation at all but rather that there exists a LUB (so + // that they can be compared). However, in practice, + // constants are always scalars or strings. For scalars + // subtyping is irrelevant, and for strings `ty` is + // type is `&'static str`, so if we say that + // + // &'static str <: expected + // + // that's equivalent to there existing a LUB. + self.demand_suptype(pat.span, expected, pat_ty); + pat_ty } - - // Check that the types of the end-points can be unified. - let types_unify = require_same_types( - tcx, Some(fcx.infcx()), false, pat.span, rhs_ty, lhs_ty, - || "mismatched types in range".to_string() - ); - - // It's ok to return without a message as `require_same_types` prints an error. - if !types_unify { - return; - } - - // Now that we know the types can be unified we find the unified type and use - // it to type the entire expression. - let common_type = fcx.infcx().resolve_type_vars_if_possible(&lhs_ty); - - fcx.write_ty(pat.id, common_type); - - // subtyping doesn't matter here, as the value is some kind of scalar - demand::eqtype(fcx, pat.span, expected, lhs_ty); - } - hir::PatEnum(..) | hir::PatIdent(..) - if pat_is_resolved_const(&tcx.def_map.borrow(), pat) => { - if let hir::PatEnum(ref path, ref subpats) = pat.node { - if !(subpats.is_some() && subpats.as_ref().unwrap().is_empty()) { - bad_struct_kind_err(tcx.sess, pat, path, false); + PatKind::Range(ref begin, ref end) => { + let lhs_ty = self.check_expr(begin); + let rhs_ty = self.check_expr(end); + + // Check that both end-points are of numeric or char type. + let numeric_or_char = |ty: Ty| ty.is_numeric() || ty.is_char(); + let lhs_compat = numeric_or_char(lhs_ty); + let rhs_compat = numeric_or_char(rhs_ty); + + if !lhs_compat || !rhs_compat { + let span = if !lhs_compat && !rhs_compat { + pat.span + } else if !lhs_compat { + begin.span + } else { + end.span + }; + + struct_span_err!(tcx.sess, span, E0029, + "only char and numeric types are allowed in range patterns") + .span_label(span, &format!("ranges require char or numeric types")) + .note(&format!("start type: {}", self.ty_to_string(lhs_ty))) + .note(&format!("end type: {}", self.ty_to_string(rhs_ty))) + .emit(); return; } + + // Now that we know the types can be unified we find the unified type and use + // it to type the entire expression. + let common_type = self.resolve_type_vars_if_possible(&lhs_ty); + + // subtyping doesn't matter here, as the value is some kind of scalar + self.demand_eqtype(pat.span, expected, lhs_ty); + self.demand_eqtype(pat.span, expected, rhs_ty); + common_type } - if let Some(pat_def) = tcx.def_map.borrow().get(&pat.id) { - let const_did = pat_def.def_id(); - let const_scheme = tcx.lookup_item_type(const_did); - assert!(const_scheme.generics.is_empty()); - let const_ty = pcx.fcx.instantiate_type_scheme(pat.span, - &Substs::empty(), - &const_scheme.ty); - fcx.write_ty(pat.id, const_ty); - - // FIXME(#20489) -- we should limit the types here to scalars or something! - - // As with PatLit, what we really want here is that there - // exist a LUB, but for the cases that can occur, subtype - // is good enough. - demand::suptype(fcx, pat.span, expected, const_ty); - } else { - fcx.write_error(pat.id); - } - } - hir::PatIdent(bm, ref path, ref sub) if pat_is_binding(&tcx.def_map.borrow(), pat) => { - let typ = fcx.local_ty(pat.span, pat.id); - match bm { - hir::BindByRef(mutbl) => { - // if the binding is like - // ref x | ref const x | ref mut x - // then `x` is assigned a value of type `&M T` where M is the mutability - // and T is the expected type. - let region_var = fcx.infcx().next_region_var(infer::PatternRegion(pat.span)); - let mt = ty::TypeAndMut { ty: expected, mutbl: mutbl }; - let region_ty = tcx.mk_ref(tcx.mk_region(region_var), mt); - - // `x` is assigned a value of type `&M T`, hence `&M T <: typeof(x)` is - // required. However, we use equality, which is stronger. See (*) for - // an explanation. - demand::eqtype(fcx, pat.span, region_ty, typ); - } - // otherwise the type of x is the expected type T - hir::BindByValue(_) => { - // As above, `T <: typeof(x)` is required but we - // use equality, see (*) below. - demand::eqtype(fcx, pat.span, expected, typ); + PatKind::Binding(bm, def_id, _, ref sub) => { + let typ = self.local_ty(pat.span, pat.id); + match bm { + hir::BindByRef(mutbl) => { + // if the binding is like + // ref x | ref const x | ref mut x + // then `x` is assigned a value of type `&M T` where M is the mutability + // and T is the expected type. + let region_var = self.next_region_var(infer::PatternRegion(pat.span)); + let mt = ty::TypeAndMut { ty: expected, mutbl: mutbl }; + let region_ty = tcx.mk_ref(region_var, mt); + + // `x` is assigned a value of type `&M T`, hence `&M T <: typeof(x)` is + // required. However, we use equality, which is stronger. See (*) for + // an explanation. + self.demand_eqtype(pat.span, region_ty, typ); + } + // otherwise the type of x is the expected type T + hir::BindByValue(_) => { + // As above, `T <: typeof(x)` is required but we + // use equality, see (*) below. + self.demand_eqtype(pat.span, expected, typ); + } } - } - - fcx.write_ty(pat.id, typ); - // if there are multiple arms, make sure they all agree on - // what the type of the binding `x` ought to be - if let Some(&canon_id) = pcx.map.get(&path.node.name) { - if canon_id != pat.id { - let ct = fcx.local_ty(pat.span, canon_id); - demand::eqtype(fcx, pat.span, ct, typ); + // if there are multiple arms, make sure they all agree on + // what the type of the binding `x` ought to be + let var_id = tcx.map.as_local_node_id(def_id).unwrap(); + if var_id != pat.id { + let vt = self.local_ty(pat.span, var_id); + self.demand_eqtype(pat.span, vt, typ); } if let Some(ref p) = *sub { - check_pat(pcx, &**p, expected); + self.check_pat(&p, expected); } + + typ } - } - hir::PatIdent(_, ref path, _) => { - let path = hir_util::ident_to_path(path.span, path.node); - check_pat_enum(pcx, pat, &path, Some(&[]), expected, false); - } - hir::PatEnum(ref path, ref subpats) => { - let subpats = subpats.as_ref().map(|v| &v[..]); - let is_tuple_struct_pat = !(subpats.is_some() && subpats.unwrap().is_empty()); - check_pat_enum(pcx, pat, path, subpats, expected, is_tuple_struct_pat); - } - hir::PatQPath(ref qself, ref path) => { - let self_ty = fcx.to_ty(&qself.ty); - let path_res = if let Some(&d) = tcx.def_map.borrow().get(&pat.id) { - if d.base_def == def::DefErr { - fcx.write_error(pat.id); - return; + PatKind::TupleStruct(ref qpath, ref subpats, ddpos) => { + self.check_pat_tuple_struct(pat, qpath, &subpats, ddpos, expected) + } + PatKind::Path(ref qpath) => { + self.check_pat_path(pat, qpath, expected) + } + PatKind::Struct(ref qpath, ref fields, etc) => { + self.check_pat_struct(pat, qpath, fields, etc, expected) + } + PatKind::Tuple(ref elements, ddpos) => { + let mut expected_len = elements.len(); + if ddpos.is_some() { + // Require known type only when `..` is present + if let ty::TyTuple(ref tys) = + self.structurally_resolved_type(pat.span, expected).sty { + expected_len = tys.len(); + } } - d - } else if qself.position == 0 { - // This is just a sentinel for finish_resolving_def_to_ty. - let sentinel = fcx.tcx().map.local_def_id(ast::CRATE_NODE_ID); - def::PathResolution { - base_def: def::DefMod(sentinel), - last_private: LastMod(AllPublic), - depth: path.segments.len() + let max_len = cmp::max(expected_len, elements.len()); + + let element_tys_iter = (0..max_len).map(|_| self.next_ty_var()); + let element_tys = tcx.mk_type_list(element_tys_iter); + let pat_ty = tcx.mk_ty(ty::TyTuple(element_tys)); + self.demand_eqtype(pat.span, expected, pat_ty); + for (i, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) { + self.check_pat(elem, &element_tys[i]); } - } else { - debug!("unbound path {:?}", pat); - fcx.write_error(pat.id); - return; - }; - if let Some((opt_ty, segments, def)) = - resolve_ty_and_def_ufcs(fcx, path_res, Some(self_ty), - path, pat.span, pat.id) { - if check_assoc_item_is_const(pcx, def, pat.span) { - let scheme = tcx.lookup_item_type(def.def_id()); - let predicates = tcx.lookup_predicates(def.def_id()); - instantiate_path(fcx, segments, - scheme, &predicates, - opt_ty, def, pat.span, pat.id); - let const_ty = fcx.node_ty(pat.id); - demand::suptype(fcx, pat.span, expected, const_ty); + pat_ty + } + PatKind::Box(ref inner) => { + let inner_ty = self.next_ty_var(); + let uniq_ty = tcx.mk_box(inner_ty); + + if self.check_dereferencable(pat.span, expected, &inner) { + // Here, `demand::subtype` is good enough, but I don't + // think any errors can be introduced by using + // `demand::eqtype`. + self.demand_eqtype(pat.span, expected, uniq_ty); + self.check_pat(&inner, inner_ty); + uniq_ty } else { - fcx.write_error(pat.id) + self.check_pat(&inner, tcx.types.err); + tcx.types.err } } - } - hir::PatStruct(ref path, ref fields, etc) => { - check_pat_struct(pcx, pat, path, fields, etc, expected); - } - hir::PatTup(ref elements) => { - let element_tys: Vec<_> = - (0..elements.len()).map(|_| fcx.infcx().next_ty_var()) - .collect(); - let pat_ty = tcx.mk_tup(element_tys.clone()); - fcx.write_ty(pat.id, pat_ty); - demand::eqtype(fcx, pat.span, expected, pat_ty); - for (element_pat, element_ty) in elements.iter().zip(element_tys) { - check_pat(pcx, &**element_pat, element_ty); - } - } - hir::PatBox(ref inner) => { - let inner_ty = fcx.infcx().next_ty_var(); - let uniq_ty = tcx.mk_box(inner_ty); - - if check_dereferencable(pcx, pat.span, expected, &**inner) { - // Here, `demand::subtype` is good enough, but I don't - // think any errors can be introduced by using - // `demand::eqtype`. - demand::eqtype(fcx, pat.span, expected, uniq_ty); - fcx.write_ty(pat.id, uniq_ty); - check_pat(pcx, &**inner, inner_ty); - } else { - fcx.write_error(pat.id); - check_pat(pcx, &**inner, tcx.types.err); + PatKind::Ref(ref inner, mutbl) => { + let expected = self.shallow_resolve(expected); + if self.check_dereferencable(pat.span, expected, &inner) { + // `demand::subtype` would be good enough, but using + // `eqtype` turns out to be equally general. See (*) + // below for details. + + // Take region, inner-type from expected type if we + // can, to avoid creating needless variables. This + // also helps with the bad interactions of the given + // hack detailed in (*) below. + let (rptr_ty, inner_ty) = match expected.sty { + ty::TyRef(_, mt) if mt.mutbl == mutbl => { + (expected, mt.ty) + } + _ => { + let inner_ty = self.next_ty_var(); + let mt = ty::TypeAndMut { ty: inner_ty, mutbl: mutbl }; + let region = self.next_region_var(infer::PatternRegion(pat.span)); + let rptr_ty = tcx.mk_ref(region, mt); + self.demand_eqtype(pat.span, expected, rptr_ty); + (rptr_ty, inner_ty) + } + }; + + self.check_pat(&inner, inner_ty); + rptr_ty + } else { + self.check_pat(&inner, tcx.types.err); + tcx.types.err + } } - } - hir::PatRegion(ref inner, mutbl) => { - let expected = fcx.infcx().shallow_resolve(expected); - if check_dereferencable(pcx, pat.span, expected, &**inner) { - // `demand::subtype` would be good enough, but using - // `eqtype` turns out to be equally general. See (*) - // below for details. - - // Take region, inner-type from expected type if we - // can, to avoid creating needless variables. This - // also helps with the bad interactions of the given - // hack detailed in (*) below. - let (rptr_ty, inner_ty) = match expected.sty { - ty::TyRef(_, mt) if mt.mutbl == mutbl => { - (expected, mt.ty) + PatKind::Slice(ref before, ref slice, ref after) => { + let expected_ty = self.structurally_resolved_type(pat.span, expected); + let (inner_ty, slice_ty) = match expected_ty.sty { + ty::TyArray(inner_ty, size) => { + let min_len = before.len() + after.len(); + if slice.is_none() { + if min_len != size { + struct_span_err!( + tcx.sess, pat.span, E0527, + "pattern requires {} elements but array has {}", + min_len, size) + .span_label(pat.span, &format!("expected {} elements",size)) + .emit(); + } + (inner_ty, tcx.types.err) + } else if let Some(rest) = size.checked_sub(min_len) { + (inner_ty, tcx.mk_array(inner_ty, rest)) + } else { + struct_span_err!(tcx.sess, pat.span, E0528, + "pattern requires at least {} elements but array has {}", + min_len, size) + .span_label(pat.span, + &format!("pattern cannot match array of {} elements", size)) + .emit(); + (inner_ty, tcx.types.err) + } } + ty::TySlice(inner_ty) => (inner_ty, expected_ty), _ => { - let inner_ty = fcx.infcx().next_ty_var(); - let mt = ty::TypeAndMut { ty: inner_ty, mutbl: mutbl }; - let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span)); - let rptr_ty = tcx.mk_ref(tcx.mk_region(region), mt); - demand::eqtype(fcx, pat.span, expected, rptr_ty); - (rptr_ty, inner_ty) + if !expected_ty.references_error() { + let mut err = struct_span_err!( + tcx.sess, pat.span, E0529, + "expected an array or slice, found `{}`", + expected_ty); + if let ty::TyRef(_, ty::TypeAndMut { mutbl: _, ty }) = expected_ty.sty { + match ty.sty { + ty::TyArray(..) | ty::TySlice(..) => { + err.help("the semantics of slice patterns changed \ + recently; see issue #23121"); + } + _ => {} + } + } + + err.span_label( pat.span, + &format!("pattern cannot match with input type `{}`", expected_ty) + ).emit(); + } + (tcx.types.err, tcx.types.err) } }; - fcx.write_ty(pat.id, rptr_ty); - check_pat(pcx, &**inner, inner_ty); - } else { - fcx.write_error(pat.id); - check_pat(pcx, &**inner, tcx.types.err); - } - } - hir::PatVec(ref before, ref slice, ref after) => { - let expected_ty = structurally_resolved_type(fcx, pat.span, expected); - let inner_ty = fcx.infcx().next_ty_var(); - let pat_ty = match expected_ty.sty { - ty::TyArray(_, size) => tcx.mk_array(inner_ty, { - let min_len = before.len() + after.len(); - match *slice { - Some(_) => cmp::max(min_len, size), - None => min_len - } - }), - _ => { - let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span)); - tcx.mk_ref(tcx.mk_region(region), ty::TypeAndMut { - ty: tcx.mk_slice(inner_ty), - mutbl: expected_ty.builtin_deref(true, ty::NoPreference).map(|mt| mt.mutbl) - .unwrap_or(hir::MutImmutable) - }) + for elt in before { + self.check_pat(&elt, inner_ty); } - }; - - fcx.write_ty(pat.id, pat_ty); - - // `demand::subtype` would be good enough, but using - // `eqtype` turns out to be equally general. See (*) - // below for details. - demand::eqtype(fcx, pat.span, expected, pat_ty); - - for elt in before { - check_pat(pcx, &**elt, inner_ty); - } - if let Some(ref slice) = *slice { - let region = fcx.infcx().next_region_var(infer::PatternRegion(pat.span)); - let mutbl = expected_ty.builtin_deref(true, ty::NoPreference) - .map_or(hir::MutImmutable, |mt| mt.mutbl); - - let slice_ty = tcx.mk_ref(tcx.mk_region(region), ty::TypeAndMut { - ty: tcx.mk_slice(inner_ty), - mutbl: mutbl - }); - check_pat(pcx, &**slice, slice_ty); - } - for elt in after { - check_pat(pcx, &**elt, inner_ty); + if let Some(ref slice) = *slice { + self.check_pat(&slice, slice_ty); + } + for elt in after { + self.check_pat(&elt, inner_ty); + } + expected_ty } - } - } - - - // (*) In most of the cases above (literals and constants being - // the exception), we relate types using strict equality, evewn - // though subtyping would be sufficient. There are a few reasons - // for this, some of which are fairly subtle and which cost me - // (nmatsakis) an hour or two debugging to remember, so I thought - // I'd write them down this time. - // - // 1. There is no loss of expressiveness here, though it does - // cause some inconvenience. What we are saying is that the type - // of `x` becomes *exactly* what is expected. This can cause unnecessary - // errors in some cases, such as this one: - // it will cause errors in a case like this: - // - // ``` - // fn foo<'x>(x: &'x int) { - // let a = 1; - // let mut z = x; - // z = &a; - // } - // ``` - // - // The reason we might get an error is that `z` might be - // assigned a type like `&'x int`, and then we would have - // a problem when we try to assign `&a` to `z`, because - // the lifetime of `&a` (i.e., the enclosing block) is - // shorter than `'x`. - // - // HOWEVER, this code works fine. The reason is that the - // expected type here is whatever type the user wrote, not - // the initializer's type. In this case the user wrote - // nothing, so we are going to create a type variable `Z`. - // Then we will assign the type of the initializer (`&'x - // int`) as a subtype of `Z`: `&'x int <: Z`. And hence we - // will instantiate `Z` as a type `&'0 int` where `'0` is - // a fresh region variable, with the constraint that `'x : - // '0`. So basically we're all set. - // - // Note that there are two tests to check that this remains true - // (`regions-reassign-{match,let}-bound-pointer.rs`). - // - // 2. Things go horribly wrong if we use subtype. The reason for - // THIS is a fairly subtle case involving bound regions. See the - // `givens` field in `region_inference`, as well as the test - // `regions-relate-bound-regions-on-closures-to-inference-variables.rs`, - // for details. Short version is that we must sometimes detect - // relationships between specific region variables and regions - // bound in a closure signature, and that detection gets thrown - // off when we substitute fresh region variables here to enable - // subtyping. -} + }; -fn check_assoc_item_is_const(pcx: &pat_ctxt, def: def::Def, span: Span) -> bool { - match def { - def::DefAssociatedConst(..) => true, - def::DefMethod(..) => { - span_err!(pcx.fcx.ccx.tcx.sess, span, E0327, - "associated items in match patterns must be constants"); - false - } - _ => { - pcx.fcx.ccx.tcx.sess.span_bug(span, "non-associated item in - check_assoc_item_is_const"); - } + self.write_ty(pat.id, ty); + + // (*) In most of the cases above (literals and constants being + // the exception), we relate types using strict equality, evewn + // though subtyping would be sufficient. There are a few reasons + // for this, some of which are fairly subtle and which cost me + // (nmatsakis) an hour or two debugging to remember, so I thought + // I'd write them down this time. + // + // 1. There is no loss of expressiveness here, though it does + // cause some inconvenience. What we are saying is that the type + // of `x` becomes *exactly* what is expected. This can cause unnecessary + // errors in some cases, such as this one: + // it will cause errors in a case like this: + // + // ``` + // fn foo<'x>(x: &'x int) { + // let a = 1; + // let mut z = x; + // z = &a; + // } + // ``` + // + // The reason we might get an error is that `z` might be + // assigned a type like `&'x int`, and then we would have + // a problem when we try to assign `&a` to `z`, because + // the lifetime of `&a` (i.e., the enclosing block) is + // shorter than `'x`. + // + // HOWEVER, this code works fine. The reason is that the + // expected type here is whatever type the user wrote, not + // the initializer's type. In this case the user wrote + // nothing, so we are going to create a type variable `Z`. + // Then we will assign the type of the initializer (`&'x + // int`) as a subtype of `Z`: `&'x int <: Z`. And hence we + // will instantiate `Z` as a type `&'0 int` where `'0` is + // a fresh region variable, with the constraint that `'x : + // '0`. So basically we're all set. + // + // Note that there are two tests to check that this remains true + // (`regions-reassign-{match,let}-bound-pointer.rs`). + // + // 2. Things go horribly wrong if we use subtype. The reason for + // THIS is a fairly subtle case involving bound regions. See the + // `givens` field in `region_inference`, as well as the test + // `regions-relate-bound-regions-on-closures-to-inference-variables.rs`, + // for details. Short version is that we must sometimes detect + // relationships between specific region variables and regions + // bound in a closure signature, and that detection gets thrown + // off when we substitute fresh region variables here to enable + // subtyping. } -} -pub fn check_dereferencable<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>, - span: Span, expected: Ty<'tcx>, - inner: &hir::Pat) -> bool { - let fcx = pcx.fcx; - let tcx = pcx.fcx.ccx.tcx; - if pat_is_binding(&tcx.def_map.borrow(), inner) { - let expected = fcx.infcx().shallow_resolve(expected); - expected.builtin_deref(true, ty::NoPreference).map_or(true, |mt| match mt.ty.sty { - ty::TyTrait(_) => { - // This is "x = SomeTrait" being reduced from - // "let &x = &SomeTrait" or "let box x = Box", an error. - span_err!(tcx.sess, span, E0033, - "type `{}` cannot be dereferenced", - fcx.infcx().ty_to_string(expected)); - false + pub fn check_dereferencable(&self, span: Span, expected: Ty<'tcx>, inner: &hir::Pat) -> bool { + if let PatKind::Binding(..) = inner.node { + if let Some(mt) = self.shallow_resolve(expected).builtin_deref(true, ty::NoPreference) { + if let ty::TyDynamic(..) = mt.ty.sty { + // This is "x = SomeTrait" being reduced from + // "let &x = &SomeTrait" or "let box x = Box", an error. + let type_str = self.ty_to_string(expected); + struct_span_err!(self.tcx.sess, span, E0033, + "type `{}` cannot be dereferenced", type_str) + .span_label(span, &format!("type `{}` cannot be dereferenced", type_str)) + .emit(); + return false + } } - _ => true - }) - } else { + } true } -} -pub fn check_match<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - expr: &'tcx hir::Expr, - discrim: &'tcx hir::Expr, - arms: &'tcx [hir::Arm], - expected: Expectation<'tcx>, - match_src: hir::MatchSource) { - let tcx = fcx.ccx.tcx; - - // Not entirely obvious: if matches may create ref bindings, we - // want to use the *precise* type of the discriminant, *not* some - // supertype, as the "discriminant type" (issue #23116). - let contains_ref_bindings = arms.iter() - .filter_map(|a| tcx.arm_contains_ref_binding(a)) - .max_by_key(|m| match *m { - hir::MutMutable => 1, - hir::MutImmutable => 0, - }); - let discrim_ty; - if let Some(m) = contains_ref_bindings { - check_expr_with_lvalue_pref(fcx, discrim, LvaluePreference::from_mutbl(m)); - discrim_ty = fcx.expr_ty(discrim); - } else { - // ...but otherwise we want to use any supertype of the - // discriminant. This is sort of a workaround, see note (*) in - // `check_pat` for some details. - discrim_ty = fcx.infcx().next_ty_var(); - check_expr_has_type(fcx, discrim, discrim_ty); - }; - - // Typecheck the patterns first, so that we get types for all the - // bindings. - for arm in arms { - let mut pcx = pat_ctxt { - fcx: fcx, - map: pat_id_map(&tcx.def_map, &*arm.pats[0]), + pub fn check_match(&self, + expr: &'gcx hir::Expr, + discrim: &'gcx hir::Expr, + arms: &'gcx [hir::Arm], + expected: Expectation<'tcx>, + match_src: hir::MatchSource) -> Ty<'tcx> { + let tcx = self.tcx; + + // Not entirely obvious: if matches may create ref bindings, we + // want to use the *precise* type of the discriminant, *not* some + // supertype, as the "discriminant type" (issue #23116). + let contains_ref_bindings = arms.iter() + .filter_map(|a| a.contains_ref_binding()) + .max_by_key(|m| match *m { + hir::MutMutable => 1, + hir::MutImmutable => 0, + }); + let discrim_ty; + if let Some(m) = contains_ref_bindings { + discrim_ty = self.check_expr_with_lvalue_pref(discrim, LvaluePreference::from_mutbl(m)); + } else { + // ...but otherwise we want to use any supertype of the + // discriminant. This is sort of a workaround, see note (*) in + // `check_pat` for some details. + discrim_ty = self.next_ty_var(); + self.check_expr_has_type(discrim, discrim_ty); }; - for p in &arm.pats { - check_pat(&mut pcx, &**p, discrim_ty); - } - } - - // Now typecheck the blocks. - // - // The result of the match is the common supertype of all the - // arms. Start out the value as bottom, since it's the, well, - // bottom the type lattice, and we'll be moving up the lattice as - // we process each arm. (Note that any match with 0 arms is matching - // on any empty type and is therefore unreachable; should the flow - // of execution reach it, we will panic, so bottom is an appropriate - // type in that case) - let expected = expected.adjust_for_branches(fcx); - let result_ty = arms.iter().fold(fcx.infcx().next_diverging_ty_var(), |result_ty, arm| { - let bty = match expected { + let discrim_diverges = self.diverges.get(); + self.diverges.set(Diverges::Maybe); + + // Typecheck the patterns first, so that we get types for all the + // bindings. + let all_arm_pats_diverge: Vec<_> = arms.iter().map(|arm| { + let mut all_pats_diverge = Diverges::WarnedAlways; + for p in &arm.pats { + self.diverges.set(Diverges::Maybe); + self.check_pat(&p, discrim_ty); + all_pats_diverge &= self.diverges.get(); + } + all_pats_diverge + }).collect(); + + // Now typecheck the blocks. + // + // The result of the match is the common supertype of all the + // arms. Start out the value as bottom, since it's the, well, + // bottom the type lattice, and we'll be moving up the lattice as + // we process each arm. (Note that any match with 0 arms is matching + // on any empty type and is therefore unreachable; should the flow + // of execution reach it, we will panic, so bottom is an appropriate + // type in that case) + let expected = expected.adjust_for_branches(self); + let mut result_ty = self.next_diverging_ty_var(); + let mut all_arms_diverge = Diverges::WarnedAlways; + let coerce_first = match expected { // We don't coerce to `()` so that if the match expression is a // statement it's branches can have any consistent type. That allows // us to give better error messages (pointing to a usually better // arm for inconsistent arms or to the whole match when a `()` type // is required). - Expectation::ExpectHasType(ety) if ety != fcx.tcx().mk_nil() => { - check_expr_coercable_to_type(fcx, &*arm.body, ety); + Expectation::ExpectHasType(ety) if ety != self.tcx.mk_nil() => { ety } - _ => { - check_expr_with_expectation(fcx, &*arm.body, expected); - fcx.node_ty(arm.body.id) - } + _ => result_ty }; - if let Some(ref e) = arm.guard { - check_expr_has_type(fcx, &**e, tcx.types.bool); - } + for (i, (arm, pats_diverge)) in arms.iter().zip(all_arm_pats_diverge).enumerate() { + if let Some(ref e) = arm.guard { + self.diverges.set(pats_diverge); + self.check_expr_has_type(e, tcx.types.bool); + } - if result_ty.references_error() || bty.references_error() { - tcx.types.err - } else { - let (origin, expected, found) = match match_src { - /* if-let construct without an else block */ - hir::MatchSource::IfLetDesugar { contains_else_clause } - if !contains_else_clause => ( - TypeOrigin::IfExpressionWithNoElse(expr.span), - bty, - result_ty, - ), - _ => ( - TypeOrigin::MatchExpressionArm(expr.span, arm.body.span, match_src), - result_ty, - bty, - ), - }; + self.diverges.set(pats_diverge); + let arm_ty = self.check_expr_with_expectation(&arm.body, expected); + all_arms_diverge &= self.diverges.get(); - infer::common_supertype( - fcx.infcx(), - origin, - true, - expected, - found, - ) - } - }); + if result_ty.references_error() || arm_ty.references_error() { + result_ty = tcx.types.err; + continue; + } - fcx.write_ty(expr.id, result_ty); -} + // Handle the fallback arm of a desugared if-let like a missing else. + let is_if_let_fallback = match match_src { + hir::MatchSource::IfLetDesugar { contains_else_clause: false } => { + i == arms.len() - 1 && arm_ty.is_nil() + } + _ => false + }; -pub struct pat_ctxt<'a, 'tcx: 'a> { - pub fcx: &'a FnCtxt<'a, 'tcx>, - pub map: PatIdMap, -} + let cause = if is_if_let_fallback { + self.cause(expr.span, ObligationCauseCode::IfExpressionWithNoElse) + } else { + self.cause(expr.span, ObligationCauseCode::MatchExpressionArm { + arm_span: arm.body.span, + source: match_src + }) + }; -pub fn check_pat_struct<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>, pat: &'tcx hir::Pat, - path: &hir::Path, fields: &'tcx [Spanned], - etc: bool, expected: Ty<'tcx>) { - let fcx = pcx.fcx; - let tcx = pcx.fcx.ccx.tcx; - - let def = tcx.def_map.borrow().get(&pat.id).unwrap().full_def(); - let variant = match fcx.def_struct_variant(def, path.span) { - Some((_, variant)) => variant, - None => { - let name = pprust::path_to_string(path); - span_err!(tcx.sess, pat.span, E0163, - "`{}` does not name a struct or a struct variant", name); - fcx.write_error(pat.id); + let result = if is_if_let_fallback { + self.eq_types(true, &cause, arm_ty, result_ty) + .map(|infer_ok| { + self.register_infer_ok_obligations(infer_ok); + arm_ty + }) + } else if i == 0 { + // Special-case the first arm, as it has no "previous expressions". + self.try_coerce(&arm.body, arm_ty, coerce_first) + } else { + let prev_arms = || arms[..i].iter().map(|arm| &*arm.body); + self.try_find_coercion_lub(&cause, prev_arms, result_ty, &arm.body, arm_ty) + }; - for field in fields { - check_pat(pcx, &field.node.pat, tcx.types.err); - } - return; + result_ty = match result { + Ok(ty) => ty, + Err(e) => { + let (expected, found) = if is_if_let_fallback { + (arm_ty, result_ty) + } else { + (result_ty, arm_ty) + }; + self.report_mismatched_types(&cause, expected, found, e); + self.tcx.types.err + } + }; } - }; - - let pat_ty = pcx.fcx.instantiate_type(def.def_id(), path); - let item_substs = match pat_ty.sty { - ty::TyStruct(_, substs) | ty::TyEnum(_, substs) => substs, - _ => tcx.sess.span_bug(pat.span, "struct variant is not an ADT") - }; - demand::eqtype(fcx, pat.span, expected, pat_ty); - check_struct_pat_fields(pcx, pat.span, fields, variant, &item_substs, etc); - - fcx.write_ty(pat.id, pat_ty); - fcx.write_substs(pat.id, ty::ItemSubsts { substs: item_substs.clone() }); -} -// This function exists due to the warning "diagnostic code E0164 already used" -fn bad_struct_kind_err(sess: &Session, pat: &hir::Pat, path: &hir::Path, lint: bool) { - let name = pprust::path_to_string(path); - let msg = format!("`{}` does not name a tuple variant or a tuple struct", name); - if lint { - sess.add_lint(lint::builtin::MATCH_OF_UNIT_VARIANT_VIA_PAREN_DOTDOT, - pat.id, - pat.span, - msg); - } else { - span_err!(sess, pat.span, E0164, "{}", msg); + // We won't diverge unless the discriminant or all arms diverge. + self.diverges.set(discrim_diverges | all_arms_diverge); + + result_ty } -} -pub fn check_pat_enum<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>, - pat: &hir::Pat, - path: &hir::Path, - subpats: Option<&'tcx [P]>, - expected: Ty<'tcx>, - is_tuple_struct_pat: bool) -{ - // Typecheck the path. - let fcx = pcx.fcx; - let tcx = pcx.fcx.ccx.tcx; - - let path_res = match tcx.def_map.borrow().get(&pat.id) { - Some(&path_res) if path_res.base_def != def::DefErr => path_res, - _ => { - fcx.write_error(pat.id); - - if let Some(subpats) = subpats { - for pat in subpats { - check_pat(pcx, &**pat, tcx.types.err); - } + fn check_pat_struct(&self, + pat: &'gcx hir::Pat, + qpath: &hir::QPath, + fields: &'gcx [Spanned], + etc: bool, + expected: Ty<'tcx>) -> Ty<'tcx> + { + // Resolve the path and check the definition for errors. + let (variant, pat_ty) = if let Some(variant_ty) = self.check_struct_path(qpath, pat.id) { + variant_ty + } else { + for field in fields { + self.check_pat(&field.node.pat, self.tcx.types.err); } + return self.tcx.types.err; + }; - return; - } - }; - - let (opt_ty, segments, def) = match resolve_ty_and_def_ufcs(fcx, path_res, - None, path, - pat.span, pat.id) { - Some(resolution) => resolution, - // Error handling done inside resolve_ty_and_def_ufcs, so if - // resolution fails just return. - None => {return;} - }; - - // Items that were partially resolved before should have been resolved to - // associated constants (i.e. not methods). - if path_res.depth != 0 && !check_assoc_item_is_const(pcx, def, pat.span) { - fcx.write_error(pat.id); - return; - } - - let enum_def = def.variant_def_ids() - .map_or_else(|| def.def_id(), |(enum_def, _)| enum_def); + // Type check the path. + self.demand_eqtype(pat.span, expected, pat_ty); - let ctor_scheme = tcx.lookup_item_type(enum_def); - let ctor_predicates = tcx.lookup_predicates(enum_def); - let path_scheme = if ctor_scheme.ty.is_fn() { - let fn_ret = tcx.no_late_bound_regions(&ctor_scheme.ty.fn_ret()).unwrap(); - ty::TypeScheme { - ty: fn_ret.unwrap(), - generics: ctor_scheme.generics, - } - } else { - ctor_scheme - }; - instantiate_path(pcx.fcx, segments, - path_scheme, &ctor_predicates, - opt_ty, def, pat.span, pat.id); - - let report_bad_struct_kind = |is_warning| { - bad_struct_kind_err(tcx.sess, pat, path, is_warning); - if is_warning { return; } - fcx.write_error(pat.id); - if let Some(subpats) = subpats { - for pat in subpats { - check_pat(pcx, &**pat, tcx.types.err); - } - } - }; - - // If we didn't have a fully resolved path to start with, we had an - // associated const, and we should quit now, since the rest of this - // function uses checks specific to structs and enums. - if path_res.depth != 0 { - if is_tuple_struct_pat { - report_bad_struct_kind(false); - } else { - let pat_ty = fcx.node_ty(pat.id); - demand::suptype(fcx, pat.span, expected, pat_ty); - } - return; + // Type check subpatterns. + self.check_struct_pat_fields(pat_ty, pat.id, pat.span, variant, fields, etc); + pat_ty } - let pat_ty = fcx.node_ty(pat.id); - demand::eqtype(fcx, pat.span, expected, pat_ty); - - let real_path_ty = fcx.node_ty(pat.id); - let (arg_tys, kind_name): (Vec<_>, &'static str) = match real_path_ty.sty { - ty::TyEnum(enum_def, expected_substs) - if def == def::DefVariant(enum_def.did, def.def_id(), false) => - { - let variant = enum_def.variant_of_def(def); - if is_tuple_struct_pat && variant.kind() != ty::VariantKind::Tuple { - // Matching unit variants with tuple variant patterns (`UnitVariant(..)`) - // is allowed for backward compatibility. - let is_special_case = variant.kind() == ty::VariantKind::Unit; - report_bad_struct_kind(is_special_case); - if !is_special_case { - return - } + fn check_pat_path(&self, + pat: &hir::Pat, + qpath: &hir::QPath, + expected: Ty<'tcx>) -> Ty<'tcx> + { + let tcx = self.tcx; + let report_unexpected_def = |def: Def| { + span_err!(tcx.sess, pat.span, E0533, + "expected unit struct/variant or constant, found {} `{}`", + def.kind_name(), qpath); + }; + + // Resolve the path and check the definition for errors. + let (def, opt_ty, segments) = self.resolve_ty_and_def_ufcs(qpath, pat.id, pat.span); + match def { + Def::Err => { + self.set_tainted_by_errors(); + return tcx.types.err; } - (variant.fields - .iter() - .map(|f| fcx.instantiate_type_scheme(pat.span, - expected_substs, - &f.unsubst_ty())) - .collect(), - "variant") - } - ty::TyStruct(struct_def, expected_substs) => { - let variant = struct_def.struct_variant(); - if is_tuple_struct_pat && variant.kind() != ty::VariantKind::Tuple { - // Matching unit structs with tuple variant patterns (`UnitVariant(..)`) - // is allowed for backward compatibility. - let is_special_case = variant.kind() == ty::VariantKind::Unit; - report_bad_struct_kind(is_special_case); - return; + Def::Method(..) => { + report_unexpected_def(def); + return tcx.types.err; } - (variant.fields - .iter() - .map(|f| fcx.instantiate_type_scheme(pat.span, - expected_substs, - &f.unsubst_ty())) - .collect(), - "struct") - } - _ => { - report_bad_struct_kind(false); - return; + Def::VariantCtor(_, CtorKind::Const) | + Def::StructCtor(_, CtorKind::Const) | + Def::Const(..) | Def::AssociatedConst(..) => {} // OK + _ => bug!("unexpected pattern definition: {:?}", def) } - }; - if let Some(subpats) = subpats { - if subpats.len() == arg_tys.len() { - for (subpat, arg_ty) in subpats.iter().zip(arg_tys) { - check_pat(pcx, &**subpat, arg_ty); - } - } else if arg_tys.is_empty() { - span_err!(tcx.sess, pat.span, E0024, - "this pattern has {} field{}, but the corresponding {} has no fields", - subpats.len(), if subpats.len() == 1 {""} else {"s"}, kind_name); + // Type check the path. + let pat_ty = self.instantiate_value_path(segments, opt_ty, def, pat.span, pat.id); + self.demand_suptype(pat.span, expected, pat_ty); + pat_ty + } + fn check_pat_tuple_struct(&self, + pat: &hir::Pat, + qpath: &hir::QPath, + subpats: &'gcx [P], + ddpos: Option, + expected: Ty<'tcx>) -> Ty<'tcx> + { + let tcx = self.tcx; + let on_error = || { for pat in subpats { - check_pat(pcx, &**pat, tcx.types.err); + self.check_pat(&pat, tcx.types.err); } - } else { - span_err!(tcx.sess, pat.span, E0023, - "this pattern has {} field{}, but the corresponding {} has {} field{}", - subpats.len(), if subpats.len() == 1 {""} else {"s"}, - kind_name, - arg_tys.len(), if arg_tys.len() == 1 {""} else {"s"}); + }; + let report_unexpected_def = |def: Def| { + let msg = format!("expected tuple struct/variant, found {} `{}`", + def.kind_name(), qpath); + struct_span_err!(tcx.sess, pat.span, E0164, "{}", msg) + .span_label(pat.span, &format!("not a tuple variant or struct")).emit(); + on_error(); + }; - for pat in subpats { - check_pat(pcx, &**pat, tcx.types.err); + // Resolve the path and check the definition for errors. + let (def, opt_ty, segments) = self.resolve_ty_and_def_ufcs(qpath, pat.id, pat.span); + let variant = match def { + Def::Err => { + self.set_tainted_by_errors(); + on_error(); + return tcx.types.err; } - } - } -} - -/// `path` is the AST path item naming the type of this struct. -/// `fields` is the field patterns of the struct pattern. -/// `struct_fields` describes the type of each field of the struct. -/// `struct_id` is the ID of the struct. -/// `etc` is true if the pattern said '...' and false otherwise. -pub fn check_struct_pat_fields<'a, 'tcx>(pcx: &pat_ctxt<'a, 'tcx>, - span: Span, - fields: &'tcx [Spanned], - variant: ty::VariantDef<'tcx>, - substs: &Substs<'tcx>, - etc: bool) { - let tcx = pcx.fcx.ccx.tcx; - - // Index the struct fields' types. - let field_map = variant.fields - .iter() - .map(|field| (field.name, field)) - .collect::>(); - - // Keep track of which fields have already appeared in the pattern. - let mut used_fields = FnvHashMap(); - - // Typecheck each field. - for &Spanned { node: ref field, span } in fields { - let field_ty = match used_fields.entry(field.name) { - Occupied(occupied) => { - let mut err = struct_span_err!(tcx.sess, span, E0025, - "field `{}` bound multiple times in the pattern", - field.name); - span_note!(&mut err, *occupied.get(), - "field `{}` previously bound here", - field.name); - err.emit(); - tcx.types.err + Def::AssociatedConst(..) | Def::Method(..) => { + report_unexpected_def(def); + return tcx.types.err; } - Vacant(vacant) => { - vacant.insert(span); - field_map.get(&field.name) - .map(|f| pcx.fcx.field_ty(span, f, substs)) - .unwrap_or_else(|| { - span_err!(tcx.sess, span, E0026, - "struct `{}` does not have a field named `{}`", - tcx.item_path_str(variant.did), - field.name); - tcx.types.err - }) + Def::VariantCtor(_, CtorKind::Fn) | + Def::StructCtor(_, CtorKind::Fn) => { + tcx.expect_variant_def(def) } + _ => bug!("unexpected pattern definition: {:?}", def) }; - check_pat(pcx, &*field.pat, field_ty); + // Type check the path. + let pat_ty = self.instantiate_value_path(segments, opt_ty, def, pat.span, pat.id); + // Replace constructor type with constructed type for tuple struct patterns. + let pat_ty = tcx.no_late_bound_regions(&pat_ty.fn_ret()).expect("expected fn type"); + self.demand_eqtype(pat.span, expected, pat_ty); + + // Type check subpatterns. + if subpats.len() == variant.fields.len() || + subpats.len() < variant.fields.len() && ddpos.is_some() { + let substs = match pat_ty.sty { + ty::TyAdt(_, substs) => substs, + ref ty => bug!("unexpected pattern type {:?}", ty), + }; + for (i, subpat) in subpats.iter().enumerate_and_adjust(variant.fields.len(), ddpos) { + let field_ty = self.field_ty(subpat.span, &variant.fields[i], substs); + self.check_pat(&subpat, field_ty); + + self.tcx.check_stability(variant.fields[i].did, pat.id, subpat.span); + } + } else { + let subpats_ending = if subpats.len() == 1 { "" } else { "s" }; + let fields_ending = if variant.fields.len() == 1 { "" } else { "s" }; + struct_span_err!(tcx.sess, pat.span, E0023, + "this pattern has {} field{}, but the corresponding {} has {} field{}", + subpats.len(), subpats_ending, def.kind_name(), + variant.fields.len(), fields_ending) + .span_label(pat.span, &format!("expected {} field{}, found {}", + variant.fields.len(), fields_ending, subpats.len())) + .emit(); + on_error(); + return tcx.types.err; + } + pat_ty } - // Report an error if not all the fields were specified. - if !etc { - for field in variant.fields + fn check_struct_pat_fields(&self, + adt_ty: Ty<'tcx>, + pat_id: ast::NodeId, + span: Span, + variant: &'tcx ty::VariantDef, + fields: &'gcx [Spanned], + etc: bool) { + let tcx = self.tcx; + + let (substs, kind_name) = match adt_ty.sty { + ty::TyAdt(adt, substs) => (substs, adt.variant_descr()), + _ => span_bug!(span, "struct pattern is not an ADT") + }; + + // Index the struct fields' types. + let field_map = variant.fields .iter() - .filter(|field| !used_fields.contains_key(&field.name)) { - span_err!(tcx.sess, span, E0027, - "pattern does not mention field `{}`", - field.name); + .map(|field| (field.name, field)) + .collect::>(); + + // Keep track of which fields have already appeared in the pattern. + let mut used_fields = FxHashMap(); + + // Typecheck each field. + for &Spanned { node: ref field, span } in fields { + let field_ty = match used_fields.entry(field.name) { + Occupied(occupied) => { + struct_span_err!(tcx.sess, span, E0025, + "field `{}` bound multiple times \ + in the pattern", + field.name) + .span_label(span, + &format!("multiple uses of `{}` in pattern", field.name)) + .span_label(*occupied.get(), &format!("first use of `{}`", field.name)) + .emit(); + tcx.types.err + } + Vacant(vacant) => { + vacant.insert(span); + field_map.get(&field.name) + .map(|f| { + self.tcx.check_stability(f.did, pat_id, span); + + self.field_ty(span, f, substs) + }) + .unwrap_or_else(|| { + struct_span_err!(tcx.sess, span, E0026, + "{} `{}` does not have a field named `{}`", + kind_name, + tcx.item_path_str(variant.did), + field.name) + .span_label(span, + &format!("{} `{}` does not have field `{}`", + kind_name, + tcx.item_path_str(variant.did), + field.name)) + .emit(); + + tcx.types.err + }) + } + }; + + self.check_pat(&field.pat, field_ty); + } + + // Report an error if incorrect number of the fields were specified. + if kind_name == "union" { + if fields.len() != 1 { + tcx.sess.span_err(span, "union patterns should have exactly one field"); + } + if etc { + tcx.sess.span_err(span, "`..` cannot be used in union patterns"); + } + } else if !etc { + for field in variant.fields + .iter() + .filter(|field| !used_fields.contains_key(&field.name)) { + struct_span_err!(tcx.sess, span, E0027, + "pattern does not mention field `{}`", + field.name) + .span_label(span, &format!("missing field `{}`", field.name)) + .emit(); + } } } } diff --git a/src/librustc_typeck/check/assoc.rs b/src/librustc_typeck/check/assoc.rs index 91916efa88277..9610477d8fd91 100644 --- a/src/librustc_typeck/check/assoc.rs +++ b/src/librustc_typeck/check/assoc.rs @@ -8,20 +8,21 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use middle::infer::InferCtxt; -use middle::traits::{self, FulfillmentContext, Normalized, MiscObligation, - SelectionContext, ObligationCause}; -use middle::ty::fold::TypeFoldable; +use rustc::infer::InferCtxt; +use rustc::traits::{self, FulfillmentContext, Normalized, MiscObligation, SelectionContext, + ObligationCause}; +use rustc::ty::fold::TypeFoldable; use syntax::ast; -use syntax::codemap::Span; +use syntax_pos::Span; + +// FIXME(@jroesch): Ideally we should be able to drop the fulfillment_cx argument. +pub fn normalize_associated_types_in<'a, 'gcx, 'tcx, T>( + infcx: &InferCtxt<'a, 'gcx, 'tcx>, + fulfillment_cx: &mut FulfillmentContext<'tcx>, + span: Span, + body_id: ast::NodeId, + value: &T) -> T -//FIXME(@jroesch): Ideally we should be able to drop the fulfillment_cx argument. -pub fn normalize_associated_types_in<'a,'tcx,T>(infcx: &InferCtxt<'a,'tcx>, - fulfillment_cx: &mut FulfillmentContext<'tcx>, - span: Span, - body_id: ast::NodeId, - value: &T) - -> T where T : TypeFoldable<'tcx> { debug!("normalize_associated_types_in(value={:?})", value); diff --git a/src/librustc_typeck/check/autoderef.rs b/src/librustc_typeck/check/autoderef.rs new file mode 100644 index 0000000000000..e72dba858c562 --- /dev/null +++ b/src/librustc_typeck/check/autoderef.rs @@ -0,0 +1,224 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use astconv::AstConv; + +use super::FnCtxt; + +use rustc::traits; +use rustc::ty::{self, Ty, TraitRef}; +use rustc::ty::{ToPredicate, TypeFoldable}; +use rustc::ty::{MethodCall, MethodCallee}; +use rustc::ty::{LvaluePreference, NoPreference, PreferMutLvalue}; +use rustc::hir; + +use syntax_pos::Span; +use syntax::symbol::Symbol; + +#[derive(Copy, Clone, Debug)] +enum AutoderefKind { + Builtin, + Overloaded, +} + +pub struct Autoderef<'a, 'gcx: 'tcx, 'tcx: 'a> { + fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, + steps: Vec<(Ty<'tcx>, AutoderefKind)>, + cur_ty: Ty<'tcx>, + obligations: Vec>, + at_start: bool, + span: Span, +} + +impl<'a, 'gcx, 'tcx> Iterator for Autoderef<'a, 'gcx, 'tcx> { + type Item = (Ty<'tcx>, usize); + + fn next(&mut self) -> Option { + let tcx = self.fcx.tcx; + + debug!("autoderef: steps={:?}, cur_ty={:?}", + self.steps, + self.cur_ty); + if self.at_start { + self.at_start = false; + debug!("autoderef stage #0 is {:?}", self.cur_ty); + return Some((self.cur_ty, 0)); + } + + if self.steps.len() == tcx.sess.recursion_limit.get() { + // We've reached the recursion limit, error gracefully. + struct_span_err!(tcx.sess, + self.span, + E0055, + "reached the recursion limit while auto-dereferencing {:?}", + self.cur_ty) + .span_label(self.span, &format!("deref recursion limit reached")) + .emit(); + return None; + } + + if self.cur_ty.is_ty_var() { + return None; + } + + // Otherwise, deref if type is derefable: + let (kind, new_ty) = if let Some(mt) = self.cur_ty.builtin_deref(false, NoPreference) { + (AutoderefKind::Builtin, mt.ty) + } else { + match self.overloaded_deref_ty(self.cur_ty) { + Some(ty) => (AutoderefKind::Overloaded, ty), + _ => return None, + } + }; + + if new_ty.references_error() { + return None; + } + + self.steps.push((self.cur_ty, kind)); + debug!("autoderef stage #{:?} is {:?} from {:?}", + self.steps.len(), + new_ty, + (self.cur_ty, kind)); + self.cur_ty = new_ty; + + Some((self.cur_ty, self.steps.len())) + } +} + +impl<'a, 'gcx, 'tcx> Autoderef<'a, 'gcx, 'tcx> { + fn overloaded_deref_ty(&mut self, ty: Ty<'tcx>) -> Option> { + debug!("overloaded_deref_ty({:?})", ty); + + let tcx = self.fcx.tcx(); + + // + let trait_ref = TraitRef { + def_id: match tcx.lang_items.deref_trait() { + Some(f) => f, + None => return None, + }, + substs: tcx.mk_substs_trait(self.cur_ty, &[]), + }; + + let cause = traits::ObligationCause::misc(self.span, self.fcx.body_id); + + let mut selcx = traits::SelectionContext::new(self.fcx); + let obligation = traits::Obligation::new(cause.clone(), trait_ref.to_predicate()); + if !selcx.evaluate_obligation(&obligation) { + debug!("overloaded_deref_ty: cannot match obligation"); + return None; + } + + let normalized = traits::normalize_projection_type(&mut selcx, + ty::ProjectionTy { + trait_ref: trait_ref, + item_name: Symbol::intern("Target"), + }, + cause, + 0); + + debug!("overloaded_deref_ty({:?}) = {:?}", ty, normalized); + self.obligations.extend(normalized.obligations); + + Some(self.fcx.resolve_type_vars_if_possible(&normalized.value)) + } + + pub fn unambiguous_final_ty(&self) -> Ty<'tcx> { + self.fcx.structurally_resolved_type(self.span, self.cur_ty) + } + + pub fn finalize<'b, I>(self, pref: LvaluePreference, exprs: I) + where I: IntoIterator + { + let methods: Vec<_> = self.steps + .iter() + .map(|&(ty, kind)| { + if let AutoderefKind::Overloaded = kind { + self.fcx.try_overloaded_deref(self.span, None, ty, pref) + } else { + None + } + }) + .collect(); + + debug!("finalize({:?}) - {:?},{:?}", + pref, + methods, + self.obligations); + + for expr in exprs { + debug!("finalize - finalizing #{} - {:?}", expr.id, expr); + for (n, method) in methods.iter().enumerate() { + if let &Some(method) = method { + let method_call = MethodCall::autoderef(expr.id, n as u32); + self.fcx.tables.borrow_mut().method_map.insert(method_call, method); + } + } + } + + for obligation in self.obligations { + self.fcx.register_predicate(obligation); + } + } +} + +impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { + pub fn autoderef(&'a self, span: Span, base_ty: Ty<'tcx>) -> Autoderef<'a, 'gcx, 'tcx> { + Autoderef { + fcx: self, + steps: vec![], + cur_ty: self.resolve_type_vars_if_possible(&base_ty), + obligations: vec![], + at_start: true, + span: span, + } + } + + pub fn try_overloaded_deref(&self, + span: Span, + base_expr: Option<&hir::Expr>, + base_ty: Ty<'tcx>, + lvalue_pref: LvaluePreference) + -> Option> { + debug!("try_overloaded_deref({:?},{:?},{:?},{:?})", + span, + base_expr, + base_ty, + lvalue_pref); + // Try DerefMut first, if preferred. + let method = match (lvalue_pref, self.tcx.lang_items.deref_mut_trait()) { + (PreferMutLvalue, Some(trait_did)) => { + self.lookup_method_in_trait(span, + base_expr, + Symbol::intern("deref_mut"), + trait_did, + base_ty, + None) + } + _ => None, + }; + + // Otherwise, fall back to Deref. + let method = match (method, self.tcx.lang_items.deref_trait()) { + (None, Some(trait_did)) => { + self.lookup_method_in_trait(span, + base_expr, + Symbol::intern("deref"), + trait_did, + base_ty, + None) + } + (method, _) => method, + }; + + method + } +} diff --git a/src/librustc_typeck/check/callee.rs b/src/librustc_typeck/check/callee.rs index a1b378d84d001..6d00f481fa263 100644 --- a/src/librustc_typeck/check/callee.rs +++ b/src/librustc_typeck/check/callee.rs @@ -8,364 +8,344 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::autoderef; -use super::check_argument_types; -use super::check_expr; -use super::check_method_argument_types; -use super::demand; -use super::DeferredCallResolution; -use super::err_args; -use super::Expectation; -use super::expected_types_for_fn_args; -use super::FnCtxt; -use super::method; -use super::structurally_resolved_type; -use super::TupleArgumentsFlag; -use super::UnresolvedTypeAction; -use super::write_call; +use super::{DeferredCallResolution, Expectation, FnCtxt, TupleArgumentsFlag}; use CrateCtxt; -use middle::cstore::LOCAL_CRATE; -use middle::def; -use middle::def_id::DefId; -use middle::infer; -use middle::ty::{self, LvaluePreference, Ty}; -use syntax::codemap::Span; -use syntax::parse::token; -use syntax::ptr::P; +use hir::def::Def; +use hir::def_id::{DefId, LOCAL_CRATE}; +use hir::print; +use rustc::{infer, traits}; +use rustc::ty::{self, LvaluePreference, Ty}; +use syntax::symbol::Symbol; +use syntax_pos::Span; -use rustc_front::hir; +use rustc::hir; /// Check that it is legal to call methods of the trait corresponding /// to `trait_id` (this only cares about the trait, not the specific /// method that is called) pub fn check_legal_trait_for_method_call(ccx: &CrateCtxt, span: Span, trait_id: DefId) { - let tcx = ccx.tcx; - let did = Some(trait_id); - let li = &tcx.lang_items; - - if did == li.drop_trait() { - span_err!(tcx.sess, span, E0040, "explicit use of destructor method"); - } else if !tcx.sess.features.borrow().unboxed_closures { - // the #[feature(unboxed_closures)] feature isn't - // activated so we need to enforce the closure - // restrictions. - - let method = if did == li.fn_trait() { - "call" - } else if did == li.fn_mut_trait() { - "call_mut" - } else if did == li.fn_once_trait() { - "call_once" - } else { - return // not a closure method, everything is OK. - }; - - struct_span_err!(tcx.sess, span, E0174, - "explicit use of unboxed closure method `{}` is experimental", - method) - .fileline_help(span, "add `#![feature(unboxed_closures)]` to the crate \ - attributes to enable") + if ccx.tcx.lang_items.drop_trait() == Some(trait_id) { + struct_span_err!(ccx.tcx.sess, + span, + E0040, + "explicit use of destructor method") + .span_label(span, &format!("explicit destructor calls not allowed")) .emit(); } } -pub fn check_call<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - call_expr: &'tcx hir::Expr, - callee_expr: &'tcx hir::Expr, - arg_exprs: &'tcx [P], - expected: Expectation<'tcx>) -{ - check_expr(fcx, callee_expr); - let original_callee_ty = fcx.expr_ty(callee_expr); - let (callee_ty, _, result) = - autoderef(fcx, - callee_expr.span, - original_callee_ty, - Some(callee_expr), - UnresolvedTypeAction::Error, - LvaluePreference::NoPreference, - |adj_ty, idx| { - try_overloaded_call_step(fcx, call_expr, callee_expr, adj_ty, idx) - }); - - match result { - None => { - // this will report an error since original_callee_ty is not a fn - confirm_builtin_call(fcx, call_expr, original_callee_ty, arg_exprs, expected); - } - - Some(CallStep::Builtin) => { - confirm_builtin_call(fcx, call_expr, callee_ty, arg_exprs, expected); - } - - Some(CallStep::DeferredClosure(fn_sig)) => { - confirm_deferred_closure_call(fcx, call_expr, arg_exprs, expected, fn_sig); - } - - Some(CallStep::Overloaded(method_callee)) => { - confirm_overloaded_call(fcx, call_expr, callee_expr, - arg_exprs, expected, method_callee); - } - } -} - enum CallStep<'tcx> { Builtin, DeferredClosure(ty::FnSig<'tcx>), - Overloaded(ty::MethodCallee<'tcx>) + Overloaded(ty::MethodCallee<'tcx>), } -fn try_overloaded_call_step<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - call_expr: &'tcx hir::Expr, - callee_expr: &'tcx hir::Expr, - adjusted_ty: Ty<'tcx>, - autoderefs: usize) - -> Option> -{ - debug!("try_overloaded_call_step(call_expr={:?}, adjusted_ty={:?}, autoderefs={})", - call_expr, - adjusted_ty, - autoderefs); - - // If the callee is a bare function or a closure, then we're all set. - match structurally_resolved_type(fcx, callee_expr.span, adjusted_ty).sty { - ty::TyBareFn(..) => { - fcx.write_autoderef_adjustment(callee_expr.id, autoderefs); - return Some(CallStep::Builtin); - } +impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { + pub fn check_call(&self, + call_expr: &'gcx hir::Expr, + callee_expr: &'gcx hir::Expr, + arg_exprs: &'gcx [hir::Expr], + expected: Expectation<'tcx>) + -> Ty<'tcx> { + let original_callee_ty = self.check_expr(callee_expr); + let expr_ty = self.structurally_resolved_type(call_expr.span, original_callee_ty); + + let mut autoderef = self.autoderef(callee_expr.span, expr_ty); + let result = autoderef.by_ref() + .flat_map(|(adj_ty, idx)| { + self.try_overloaded_call_step(call_expr, callee_expr, adj_ty, idx) + }) + .next(); + let callee_ty = autoderef.unambiguous_final_ty(); + autoderef.finalize(LvaluePreference::NoPreference, Some(callee_expr)); + + let output = match result { + None => { + // this will report an error since original_callee_ty is not a fn + self.confirm_builtin_call(call_expr, original_callee_ty, arg_exprs, expected) + } - ty::TyClosure(def_id, ref substs) => { - assert_eq!(def_id.krate, LOCAL_CRATE); - - // Check whether this is a call to a closure where we - // haven't yet decided on whether the closure is fn vs - // fnmut vs fnonce. If so, we have to defer further processing. - if fcx.infcx().closure_kind(def_id).is_none() { - let closure_ty = - fcx.infcx().closure_type(def_id, substs); - let fn_sig = - fcx.infcx().replace_late_bound_regions_with_fresh_var(call_expr.span, - infer::FnCall, - &closure_ty.sig).0; - fcx.record_deferred_call_resolution(def_id, Box::new(CallResolution { - call_expr: call_expr, - callee_expr: callee_expr, - adjusted_ty: adjusted_ty, - autoderefs: autoderefs, - fn_sig: fn_sig.clone(), - closure_def_id: def_id - })); - return Some(CallStep::DeferredClosure(fn_sig)); + Some(CallStep::Builtin) => { + self.confirm_builtin_call(call_expr, callee_ty, arg_exprs, expected) } - } - // Hack: we know that there are traits implementing Fn for &F - // where F:Fn and so forth. In the particular case of types - // like `x: &mut FnMut()`, if there is a call `x()`, we would - // normally translate to `FnMut::call_mut(&mut x, ())`, but - // that winds up requiring `mut x: &mut FnMut()`. A little - // over the top. The simplest fix by far is to just ignore - // this case and deref again, so we wind up with - // `FnMut::call_mut(&mut *x, ())`. - ty::TyRef(..) if autoderefs == 0 => { - return None; - } + Some(CallStep::DeferredClosure(fn_sig)) => { + self.confirm_deferred_closure_call(call_expr, arg_exprs, expected, fn_sig) + } - _ => {} + Some(CallStep::Overloaded(method_callee)) => { + self.confirm_overloaded_call(call_expr, + callee_expr, + arg_exprs, + expected, + method_callee) + } + }; + + // we must check that return type of called functions is WF: + self.register_wf_obligation(output, call_expr.span, traits::MiscObligation); + + output } - try_overloaded_call_traits(fcx, call_expr, callee_expr, adjusted_ty, autoderefs) - .map(|method_callee| CallStep::Overloaded(method_callee)) -} + fn try_overloaded_call_step(&self, + call_expr: &'gcx hir::Expr, + callee_expr: &'gcx hir::Expr, + adjusted_ty: Ty<'tcx>, + autoderefs: usize) + -> Option> { + debug!("try_overloaded_call_step(call_expr={:?}, adjusted_ty={:?}, autoderefs={})", + call_expr, + adjusted_ty, + autoderefs); + + // If the callee is a bare function or a closure, then we're all set. + match self.structurally_resolved_type(callee_expr.span, adjusted_ty).sty { + ty::TyFnDef(..) | ty::TyFnPtr(_) => { + self.write_autoderef_adjustment(callee_expr.id, autoderefs, adjusted_ty); + return Some(CallStep::Builtin); + } -fn try_overloaded_call_traits<'a,'tcx>(fcx: &FnCtxt<'a, 'tcx>, - call_expr: &hir::Expr, - callee_expr: &hir::Expr, - adjusted_ty: Ty<'tcx>, - autoderefs: usize) - -> Option> -{ - // Try the options that are least restrictive on the caller first. - for &(opt_trait_def_id, method_name) in &[ - (fcx.tcx().lang_items.fn_trait(), token::intern("call")), - (fcx.tcx().lang_items.fn_mut_trait(), token::intern("call_mut")), - (fcx.tcx().lang_items.fn_once_trait(), token::intern("call_once")), - ] { - let trait_def_id = match opt_trait_def_id { - Some(def_id) => def_id, - None => continue, - }; + ty::TyClosure(def_id, substs) => { + assert_eq!(def_id.krate, LOCAL_CRATE); + + // Check whether this is a call to a closure where we + // haven't yet decided on whether the closure is fn vs + // fnmut vs fnonce. If so, we have to defer further processing. + if self.closure_kind(def_id).is_none() { + let closure_ty = self.closure_type(def_id, substs); + let fn_sig = self.replace_late_bound_regions_with_fresh_var(call_expr.span, + infer::FnCall, + &closure_ty.sig) + .0; + self.record_deferred_call_resolution(def_id, + Box::new(CallResolution { + call_expr: call_expr, + callee_expr: callee_expr, + adjusted_ty: adjusted_ty, + autoderefs: autoderefs, + fn_sig: fn_sig.clone(), + closure_def_id: def_id, + })); + return Some(CallStep::DeferredClosure(fn_sig)); + } + } - match method::lookup_in_trait_adjusted(fcx, - call_expr.span, - Some(&*callee_expr), - method_name, - trait_def_id, - autoderefs, - false, - adjusted_ty, - None) { - None => continue, - Some(method_callee) => { - return Some(method_callee); + // Hack: we know that there are traits implementing Fn for &F + // where F:Fn and so forth. In the particular case of types + // like `x: &mut FnMut()`, if there is a call `x()`, we would + // normally translate to `FnMut::call_mut(&mut x, ())`, but + // that winds up requiring `mut x: &mut FnMut()`. A little + // over the top. The simplest fix by far is to just ignore + // this case and deref again, so we wind up with + // `FnMut::call_mut(&mut *x, ())`. + ty::TyRef(..) if autoderefs == 0 => { + return None; } + + _ => {} } - } - None -} + self.try_overloaded_call_traits(call_expr, callee_expr, adjusted_ty, autoderefs) + .map(|method_callee| CallStep::Overloaded(method_callee)) + } -fn confirm_builtin_call<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, - call_expr: &hir::Expr, - callee_ty: Ty<'tcx>, - arg_exprs: &'tcx [P], - expected: Expectation<'tcx>) -{ - let error_fn_sig; - - let fn_sig = match callee_ty.sty { - ty::TyBareFn(_, &ty::BareFnTy {ref sig, ..}) => { - sig + fn try_overloaded_call_traits(&self, + call_expr: &hir::Expr, + callee_expr: &hir::Expr, + adjusted_ty: Ty<'tcx>, + autoderefs: usize) + -> Option> { + // Try the options that are least restrictive on the caller first. + for &(opt_trait_def_id, method_name) in + &[(self.tcx.lang_items.fn_trait(), Symbol::intern("call")), + (self.tcx.lang_items.fn_mut_trait(), Symbol::intern("call_mut")), + (self.tcx.lang_items.fn_once_trait(), Symbol::intern("call_once"))] { + let trait_def_id = match opt_trait_def_id { + Some(def_id) => def_id, + None => continue, + }; + + match self.lookup_method_in_trait_adjusted(call_expr.span, + Some(&callee_expr), + method_name, + trait_def_id, + autoderefs, + false, + adjusted_ty, + None) { + None => continue, + Some(method_callee) => { + return Some(method_callee); + } + } } - _ => { - let mut err = fcx.type_error_struct(call_expr.span, |actual| { - format!("expected function, found `{}`", actual) - }, callee_ty, None); - - if let hir::ExprCall(ref expr, _) = call_expr.node { - let tcx = fcx.tcx(); - if let Some(pr) = tcx.def_map.borrow().get(&expr.id) { - if pr.depth == 0 && pr.base_def != def::DefErr { - if let Some(span) = tcx.map.span_if_local(pr.def_id()) { + + None + } + + fn confirm_builtin_call(&self, + call_expr: &hir::Expr, + callee_ty: Ty<'tcx>, + arg_exprs: &'gcx [hir::Expr], + expected: Expectation<'tcx>) + -> Ty<'tcx> { + let error_fn_sig; + + let fn_sig = match callee_ty.sty { + ty::TyFnDef(.., &ty::BareFnTy {ref sig, ..}) | + ty::TyFnPtr(&ty::BareFnTy {ref sig, ..}) => sig, + ref t => { + let mut unit_variant = None; + if let &ty::TyAdt(adt_def, ..) = t { + if adt_def.is_enum() { + if let hir::ExprCall(ref expr, _) = call_expr.node { + unit_variant = Some(print::expr_to_string(expr)) + } + } + } + let mut err = if let Some(path) = unit_variant { + let mut err = self.type_error_struct(call_expr.span, |_| { + format!("`{}` is being called, but it is not a function", path) + }, callee_ty); + err.help(&format!("did you mean to write `{}`?", path)); + err + } else { + self.type_error_struct(call_expr.span, |actual| { + format!("expected function, found `{}`", actual) + }, callee_ty) + }; + + if let hir::ExprCall(ref expr, _) = call_expr.node { + let def = if let hir::ExprPath(ref qpath) = expr.node { + self.tables.borrow().qpath_def(qpath, expr.id) + } else { + Def::Err + }; + if def != Def::Err { + if let Some(span) = self.tcx.map.span_if_local(def.def_id()) { err.span_note(span, "defined here"); } } } - } - err.emit(); + err.emit(); - // This is the "default" function signature, used in case of error. - // In that case, we check each argument against "error" in order to - // set up all the node type bindings. - error_fn_sig = ty::Binder(ty::FnSig { - inputs: err_args(fcx.tcx(), arg_exprs.len()), - output: ty::FnConverging(fcx.tcx().types.err), - variadic: false - }); + // This is the "default" function signature, used in case of error. + // In that case, we check each argument against "error" in order to + // set up all the node type bindings. + error_fn_sig = ty::Binder(ty::FnSig { + inputs: self.err_args(arg_exprs.len()), + output: self.tcx.types.err, + variadic: false, + }); - &error_fn_sig - } - }; - - // Replace any late-bound regions that appear in the function - // signature with region variables. We also have to - // renormalize the associated types at this point, since they - // previously appeared within a `Binder<>` and hence would not - // have been normalized before. - let fn_sig = - fcx.infcx().replace_late_bound_regions_with_fresh_var(call_expr.span, - infer::FnCall, - fn_sig).0; - let fn_sig = - fcx.normalize_associated_types_in(call_expr.span, &fn_sig); - - // Call the generic checker. - let expected_arg_tys = expected_types_for_fn_args(fcx, - call_expr.span, - expected, - fn_sig.output, - &fn_sig.inputs); - check_argument_types(fcx, - call_expr.span, - &fn_sig.inputs, - &expected_arg_tys[..], - arg_exprs, - fn_sig.variadic, - TupleArgumentsFlag::DontTupleArguments); - - write_call(fcx, call_expr, fn_sig.output); -} + &error_fn_sig + } + }; -fn confirm_deferred_closure_call<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, - call_expr: &hir::Expr, - arg_exprs: &'tcx [P], - expected: Expectation<'tcx>, - fn_sig: ty::FnSig<'tcx>) -{ - // `fn_sig` is the *signature* of the cosure being called. We - // don't know the full details yet (`Fn` vs `FnMut` etc), but we - // do know the types expected for each argument and the return - // type. - - let expected_arg_tys = - expected_types_for_fn_args(fcx, - call_expr.span, - expected, - fn_sig.output.clone(), - &*fn_sig.inputs); - - check_argument_types(fcx, - call_expr.span, - &*fn_sig.inputs, - &*expected_arg_tys, - arg_exprs, - fn_sig.variadic, - TupleArgumentsFlag::TupleArguments); - - write_call(fcx, call_expr, fn_sig.output); -} + // Replace any late-bound regions that appear in the function + // signature with region variables. We also have to + // renormalize the associated types at this point, since they + // previously appeared within a `Binder<>` and hence would not + // have been normalized before. + let fn_sig = + self.replace_late_bound_regions_with_fresh_var(call_expr.span, infer::FnCall, fn_sig) + .0; + let fn_sig = self.normalize_associated_types_in(call_expr.span, &fn_sig); + + // Call the generic checker. + let expected_arg_tys = + self.expected_types_for_fn_args(call_expr.span, + expected, + fn_sig.output, + &fn_sig.inputs); + self.check_argument_types(call_expr.span, + &fn_sig.inputs, + &expected_arg_tys[..], + arg_exprs, + fn_sig.variadic, + TupleArgumentsFlag::DontTupleArguments); + + fn_sig.output + } -fn confirm_overloaded_call<'a,'tcx>(fcx: &FnCtxt<'a, 'tcx>, - call_expr: &hir::Expr, - callee_expr: &'tcx hir::Expr, - arg_exprs: &'tcx [P], - expected: Expectation<'tcx>, - method_callee: ty::MethodCallee<'tcx>) -{ - let output_type = - check_method_argument_types(fcx, - call_expr.span, - method_callee.ty, - callee_expr, - arg_exprs, - TupleArgumentsFlag::TupleArguments, - expected); - write_call(fcx, call_expr, output_type); - - write_overloaded_call_method_map(fcx, call_expr, method_callee); -} + fn confirm_deferred_closure_call(&self, + call_expr: &hir::Expr, + arg_exprs: &'gcx [hir::Expr], + expected: Expectation<'tcx>, + fn_sig: ty::FnSig<'tcx>) + -> Ty<'tcx> { + // `fn_sig` is the *signature* of the cosure being called. We + // don't know the full details yet (`Fn` vs `FnMut` etc), but we + // do know the types expected for each argument and the return + // type. + + let expected_arg_tys = self.expected_types_for_fn_args(call_expr.span, + expected, + fn_sig.output.clone(), + &fn_sig.inputs); + + self.check_argument_types(call_expr.span, + &fn_sig.inputs, + &expected_arg_tys, + arg_exprs, + fn_sig.variadic, + TupleArgumentsFlag::TupleArguments); + + fn_sig.output + } -fn write_overloaded_call_method_map<'a,'tcx>(fcx: &FnCtxt<'a, 'tcx>, - call_expr: &hir::Expr, - method_callee: ty::MethodCallee<'tcx>) { - let method_call = ty::MethodCall::expr(call_expr.id); - fcx.inh.tables.borrow_mut().method_map.insert(method_call, method_callee); + fn confirm_overloaded_call(&self, + call_expr: &hir::Expr, + callee_expr: &'gcx hir::Expr, + arg_exprs: &'gcx [hir::Expr], + expected: Expectation<'tcx>, + method_callee: ty::MethodCallee<'tcx>) + -> Ty<'tcx> { + let output_type = self.check_method_argument_types(call_expr.span, + method_callee.ty, + callee_expr, + arg_exprs, + TupleArgumentsFlag::TupleArguments, + expected); + + self.write_overloaded_call_method_map(call_expr, method_callee); + output_type + } + + fn write_overloaded_call_method_map(&self, + call_expr: &hir::Expr, + method_callee: ty::MethodCallee<'tcx>) { + let method_call = ty::MethodCall::expr(call_expr.id); + self.tables.borrow_mut().method_map.insert(method_call, method_callee); + } } #[derive(Debug)] -struct CallResolution<'tcx> { - call_expr: &'tcx hir::Expr, - callee_expr: &'tcx hir::Expr, +struct CallResolution<'gcx: 'tcx, 'tcx> { + call_expr: &'gcx hir::Expr, + callee_expr: &'gcx hir::Expr, adjusted_ty: Ty<'tcx>, autoderefs: usize, fn_sig: ty::FnSig<'tcx>, closure_def_id: DefId, } -impl<'tcx> DeferredCallResolution<'tcx> for CallResolution<'tcx> { - fn resolve<'a>(&mut self, fcx: &FnCtxt<'a,'tcx>) { - debug!("DeferredCallResolution::resolve() {:?}", - self); +impl<'gcx, 'tcx> DeferredCallResolution<'gcx, 'tcx> for CallResolution<'gcx, 'tcx> { + fn resolve<'a>(&mut self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) { + debug!("DeferredCallResolution::resolve() {:?}", self); // we should not be invoked until the closure kind has been // determined by upvar inference - assert!(fcx.infcx().closure_kind(self.closure_def_id).is_some()); + assert!(fcx.closure_kind(self.closure_def_id).is_some()); // We may now know enough to figure out fn vs fnmut etc. - match try_overloaded_call_traits(fcx, self.call_expr, self.callee_expr, - self.adjusted_ty, self.autoderefs) { + match fcx.try_overloaded_call_traits(self.call_expr, + self.callee_expr, + self.adjusted_ty, + self.autoderefs) { Some(method_callee) => { // One problem is that when we get here, we are going // to have a newly instantiated function signature @@ -375,30 +355,24 @@ impl<'tcx> DeferredCallResolution<'tcx> for CallResolution<'tcx> { // can't because of the annoying need for a TypeTrace. // (This always bites me, should find a way to // refactor it.) - let method_sig = fcx.tcx().no_late_bound_regions(method_callee.ty.fn_sig()) - .unwrap(); + let method_sig = fcx.tcx + .no_late_bound_regions(method_callee.ty.fn_sig()) + .unwrap(); - debug!("attempt_resolution: method_callee={:?}", - method_callee); + debug!("attempt_resolution: method_callee={:?}", method_callee); for (&method_arg_ty, &self_arg_ty) in - method_sig.inputs[1..].iter().zip(&self.fn_sig.inputs) - { - demand::eqtype(fcx, self.call_expr.span, self_arg_ty, method_arg_ty); + method_sig.inputs[1..].iter().zip(&self.fn_sig.inputs) { + fcx.demand_eqtype(self.call_expr.span, self_arg_ty, method_arg_ty); } - let nilty = fcx.tcx().mk_nil(); - demand::eqtype(fcx, - self.call_expr.span, - method_sig.output.unwrap_or(nilty), - self.fn_sig.output.unwrap_or(nilty)); + fcx.demand_eqtype(self.call_expr.span, method_sig.output, self.fn_sig.output); - write_overloaded_call_method_map(fcx, self.call_expr, method_callee); + fcx.write_overloaded_call_method_map(self.call_expr, method_callee); } None => { - fcx.tcx().sess.span_bug( - self.call_expr.span, - "failed to find an overloaded call trait for closure call"); + span_bug!(self.call_expr.span, + "failed to find an overloaded call trait for closure call"); } } } diff --git a/src/librustc_typeck/check/cast.rs b/src/librustc_typeck/check/cast.rs index fd6c4f44ba428..f2c8ef46a7e25 100644 --- a/src/librustc_typeck/check/cast.rs +++ b/src/librustc_typeck/check/cast.rs @@ -38,27 +38,26 @@ //! expression, `e as U2` is not necessarily so (in fact it will only be valid if //! `U1` coerces to `U2`). -use super::coercion; -use super::demand; use super::FnCtxt; -use super::structurally_resolved_type; use lint; -use middle::def_id::DefId; -use middle::ty::{self, Ty, TypeFoldable}; -use middle::ty::cast::{CastKind, CastTy}; -use syntax::codemap::Span; -use rustc_front::hir; +use hir::def_id::DefId; +use rustc::hir; +use rustc::traits; +use rustc::ty::{self, Ty, TypeFoldable}; +use rustc::ty::cast::{CastKind, CastTy}; +use rustc::middle::lang_items; use syntax::ast; -use syntax::ast::UintTy::TyU8; - +use syntax_pos::Span; +use util::common::ErrorReported; /// Reifies a cast check to be checked once we have full type information for /// a function context. pub struct CastCheck<'tcx> { - expr: hir::Expr, + expr: &'tcx hir::Expr, expr_ty: Ty<'tcx>, cast_ty: Ty<'tcx>, + cast_span: Span, span: Span, } @@ -66,33 +65,34 @@ pub struct CastCheck<'tcx> { /// fat pointers if their unsize-infos have the same kind. #[derive(Copy, Clone, PartialEq, Eq)] enum UnsizeKind<'tcx> { - Vtable(DefId), + Vtable(Option), Length, /// The unsize info of this projection OfProjection(&'tcx ty::ProjectionTy<'tcx>), /// The unsize info of this parameter - OfParam(&'tcx ty::ParamTy) + OfParam(&'tcx ty::ParamTy), } -/// Returns the kind of unsize information of t, or None -/// if t is sized or it is unknown. -fn unsize_kind<'a,'tcx>(fcx: &FnCtxt<'a, 'tcx>, - t: Ty<'tcx>) - -> Option> { - match t.sty { - ty::TySlice(_) | ty::TyStr => Some(UnsizeKind::Length), - ty::TyTrait(ref tty) => Some(UnsizeKind::Vtable(tty.principal_def_id())), - ty::TyStruct(def, substs) => { - // FIXME(arielb1): do some kind of normalization - match def.struct_variant().fields.last() { - None => None, - Some(f) => unsize_kind(fcx, f.ty(fcx.tcx(), substs)) +impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { + /// Returns the kind of unsize information of t, or None + /// if t is sized or it is unknown. + fn unsize_kind(&self, t: Ty<'tcx>) -> Option> { + match t.sty { + ty::TySlice(_) | ty::TyStr => Some(UnsizeKind::Length), + ty::TyDynamic(ref tty, ..) => + Some(UnsizeKind::Vtable(tty.principal().map(|p| p.def_id()))), + ty::TyAdt(def, substs) if def.is_struct() => { + // FIXME(arielb1): do some kind of normalization + match def.struct_variant().fields.last() { + None => None, + Some(f) => self.unsize_kind(f.ty(self.tcx, substs)), + } } + // We should really try to normalize here. + ty::TyProjection(ref pi) => Some(UnsizeKind::OfProjection(pi)), + ty::TyParam(ref p) => Some(UnsizeKind::OfParam(p)), + _ => None, } - // We should really try to normalize here. - ty::TyProjection(ref pi) => Some(UnsizeKind::OfProjection(pi)), - ty::TyParam(ref p) => Some(UnsizeKind::OfParam(p)), - _ => None } } @@ -101,144 +101,288 @@ enum CastError { CastToBool, CastToChar, DifferingKinds, + /// Cast of thin to fat raw ptr (eg. `*const () as *const [u8]`) + SizedUnsizedCast, IllegalCast, + NeedDeref, NeedViaPtr, NeedViaThinPtr, NeedViaInt, - NeedViaUsize, NonScalar, } -impl<'tcx> CastCheck<'tcx> { - pub fn new(expr: hir::Expr, expr_ty: Ty<'tcx>, cast_ty: Ty<'tcx>, span: Span) - -> CastCheck<'tcx> { - CastCheck { +impl<'a, 'gcx, 'tcx> CastCheck<'tcx> { + pub fn new(fcx: &FnCtxt<'a, 'gcx, 'tcx>, + expr: &'tcx hir::Expr, + expr_ty: Ty<'tcx>, + cast_ty: Ty<'tcx>, + cast_span: Span, + span: Span) + -> Result, ErrorReported> { + let check = CastCheck { expr: expr, expr_ty: expr_ty, cast_ty: cast_ty, + cast_span: cast_span, span: span, + }; + + // For better error messages, check for some obviously unsized + // cases now. We do a more thorough check at the end, once + // inference is more completely known. + match cast_ty.sty { + ty::TyDynamic(..) | ty::TySlice(..) => { + check.report_cast_to_unsized_type(fcx); + Err(ErrorReported) + } + _ => Ok(check), } } - fn report_cast_error<'a>(&self, fcx: &FnCtxt<'a, 'tcx>, - e: CastError) { + fn report_cast_error(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>, e: CastError) { match e { - CastError::NeedViaPtr | + CastError::NeedDeref => { + let cast_ty = fcx.ty_to_string(self.cast_ty); + let mut err = fcx.type_error_struct(self.cast_span, + |actual| { + format!("casting `{}` as `{}` is invalid", + actual, + cast_ty) + }, + self.expr_ty); + err.span_label(self.expr.span, + &format!("cannot cast `{}` as `{}`", + fcx.ty_to_string(self.expr_ty), + cast_ty)); + if let Ok(snippet) = fcx.sess().codemap().span_to_snippet(self.expr.span) { + err.span_label(self.expr.span, + &format!("did you mean `*{}`?", snippet)); + } + err.emit(); + } CastError::NeedViaThinPtr | - CastError::NeedViaInt | - CastError::NeedViaUsize => { - fcx.type_error_struct(self.span, |actual| { - format!("casting `{}` as `{}` is invalid", - actual, - fcx.infcx().ty_to_string(self.cast_ty)) - }, self.expr_ty, None) - .fileline_help(self.span, - &format!("cast through {} first", match e { - CastError::NeedViaPtr => "a raw pointer", - CastError::NeedViaThinPtr => "a thin pointer", - CastError::NeedViaInt => "an integer", - CastError::NeedViaUsize => "a usize", - _ => unreachable!() - })) - .emit(); + CastError::NeedViaPtr => { + let mut err = fcx.type_error_struct(self.span, + |actual| { + format!("casting `{}` as `{}` is invalid", + actual, + fcx.ty_to_string(self.cast_ty)) + }, + self.expr_ty); + if self.cast_ty.is_uint() { + err.help(&format!("cast through {} first", + match e { + CastError::NeedViaPtr => "a raw pointer", + CastError::NeedViaThinPtr => "a thin pointer", + _ => bug!(), + })); + } + err.emit(); + } + CastError::NeedViaInt => { + fcx.type_error_struct(self.span, + |actual| { + format!("casting `{}` as `{}` is invalid", + actual, + fcx.ty_to_string(self.cast_ty)) + }, + self.expr_ty) + .help(&format!("cast through {} first", + match e { + CastError::NeedViaInt => "an integer", + _ => bug!(), + })) + .emit(); } CastError::CastToBool => { - struct_span_err!(fcx.tcx().sess, self.span, E0054, "cannot cast as `bool`") - .fileline_help(self.span, "compare with zero instead") + struct_span_err!(fcx.tcx.sess, self.span, E0054, "cannot cast as `bool`") + .span_label(self.span, &format!("unsupported cast")) + .help("compare with zero instead") .emit(); } CastError::CastToChar => { - fcx.type_error_message(self.span, |actual| { - format!("only `u8` can be cast as `char`, not `{}`", actual) - }, self.expr_ty, None); + fcx.type_error_message(self.span, + |actual| { + format!("only `u8` can be cast as `char`, not `{}`", + actual) + }, + self.expr_ty); } CastError::NonScalar => { - fcx.type_error_message(self.span, |actual| { - format!("non-scalar cast: `{}` as `{}`", - actual, - fcx.infcx().ty_to_string(self.cast_ty)) - }, self.expr_ty, None); + fcx.type_error_message(self.span, + |actual| { + format!("non-scalar cast: `{}` as `{}`", + actual, + fcx.ty_to_string(self.cast_ty)) + }, + self.expr_ty); } CastError::IllegalCast => { - fcx.type_error_message(self.span, |actual| { - format!("casting `{}` as `{}` is invalid", - actual, - fcx.infcx().ty_to_string(self.cast_ty)) - }, self.expr_ty, None); + fcx.type_error_message(self.span, + |actual| { + format!("casting `{}` as `{}` is invalid", + actual, + fcx.ty_to_string(self.cast_ty)) + }, + self.expr_ty); + } + CastError::SizedUnsizedCast => { + fcx.type_error_message(self.span, + |actual| { + format!("cannot cast thin pointer `{}` to fat pointer \ + `{}`", + actual, + fcx.ty_to_string(self.cast_ty)) + }, + self.expr_ty) } CastError::DifferingKinds => { - fcx.type_error_struct(self.span, |actual| { - format!("casting `{}` as `{}` is invalid", - actual, - fcx.infcx().ty_to_string(self.cast_ty)) - }, self.expr_ty, None) - .fileline_note(self.span, "vtable kinds may not match") + fcx.type_error_struct(self.span, + |actual| { + format!("casting `{}` as `{}` is invalid", + actual, + fcx.ty_to_string(self.cast_ty)) + }, + self.expr_ty) + .note("vtable kinds may not match") .emit(); } } } - fn trivial_cast_lint<'a>(&self, fcx: &FnCtxt<'a, 'tcx>) { + fn report_cast_to_unsized_type(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) { + if self.cast_ty.references_error() || self.expr_ty.references_error() { + return; + } + + let tstr = fcx.ty_to_string(self.cast_ty); + let mut err = + fcx.type_error_struct(self.span, + |actual| { + format!("cast to unsized type: `{}` as `{}`", actual, tstr) + }, + self.expr_ty); + match self.expr_ty.sty { + ty::TyRef(_, ty::TypeAndMut { mutbl: mt, .. }) => { + let mtstr = match mt { + hir::MutMutable => "mut ", + hir::MutImmutable => "", + }; + if self.cast_ty.is_trait() { + match fcx.tcx.sess.codemap().span_to_snippet(self.cast_span) { + Ok(s) => { + err.span_suggestion(self.cast_span, + "try casting to a reference instead:", + format!("&{}{}", mtstr, s)); + } + Err(_) => { + span_help!(err, self.cast_span, "did you mean `&{}{}`?", mtstr, tstr) + } + } + } else { + span_help!(err, + self.span, + "consider using an implicit coercion to `&{}{}` instead", + mtstr, + tstr); + } + } + ty::TyBox(..) => { + match fcx.tcx.sess.codemap().span_to_snippet(self.cast_span) { + Ok(s) => { + err.span_suggestion(self.cast_span, + "try casting to a `Box` instead:", + format!("Box<{}>", s)); + } + Err(_) => span_help!(err, self.cast_span, "did you mean `Box<{}>`?", tstr), + } + } + _ => { + span_help!(err, + self.expr.span, + "consider using a box or reference as appropriate"); + } + } + err.emit(); + } + + fn trivial_cast_lint(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) { let t_cast = self.cast_ty; let t_expr = self.expr_ty; if t_cast.is_numeric() && t_expr.is_numeric() { - fcx.tcx().sess.add_lint(lint::builtin::TRIVIAL_NUMERIC_CASTS, - self.expr.id, - self.span, - format!("trivial numeric cast: `{}` as `{}`. Cast can be \ - replaced by coercion, this might require type \ - ascription or a temporary variable", - fcx.infcx().ty_to_string(t_expr), - fcx.infcx().ty_to_string(t_cast))); + fcx.tcx.sess.add_lint(lint::builtin::TRIVIAL_NUMERIC_CASTS, + self.expr.id, + self.span, + format!("trivial numeric cast: `{}` as `{}`. Cast can be \ + replaced by coercion, this might require type \ + ascription or a temporary variable", + fcx.ty_to_string(t_expr), + fcx.ty_to_string(t_cast))); } else { - fcx.tcx().sess.add_lint(lint::builtin::TRIVIAL_CASTS, - self.expr.id, - self.span, - format!("trivial cast: `{}` as `{}`. Cast can be \ - replaced by coercion, this might require type \ - ascription or a temporary variable", - fcx.infcx().ty_to_string(t_expr), - fcx.infcx().ty_to_string(t_cast))); + fcx.tcx.sess.add_lint(lint::builtin::TRIVIAL_CASTS, + self.expr.id, + self.span, + format!("trivial cast: `{}` as `{}`. Cast can be \ + replaced by coercion, this might require type \ + ascription or a temporary variable", + fcx.ty_to_string(t_expr), + fcx.ty_to_string(t_cast))); } } - pub fn check<'a>(mut self, fcx: &FnCtxt<'a, 'tcx>) { - self.expr_ty = structurally_resolved_type(fcx, self.span, self.expr_ty); - self.cast_ty = structurally_resolved_type(fcx, self.span, self.cast_ty); + pub fn check(mut self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) { + self.expr_ty = fcx.structurally_resolved_type(self.span, self.expr_ty); + self.cast_ty = fcx.structurally_resolved_type(self.span, self.cast_ty); - debug!("check_cast({}, {:?} as {:?})", self.expr.id, self.expr_ty, + debug!("check_cast({}, {:?} as {:?})", + self.expr.id, + self.expr_ty, self.cast_ty); - if self.expr_ty.references_error() || self.cast_ty.references_error() { + if !fcx.type_is_known_to_be_sized(self.cast_ty, self.span) { + self.report_cast_to_unsized_type(fcx); + } else if self.expr_ty.references_error() || self.cast_ty.references_error() { // No sense in giving duplicate error messages } else if self.try_coercion_cast(fcx) { self.trivial_cast_lint(fcx); debug!(" -> CoercionCast"); - fcx.tcx().cast_kinds.borrow_mut().insert(self.expr.id, - CastKind::CoercionCast); - } else { match self.do_check(fcx) { - Ok(k) => { - debug!(" -> {:?}", k); - fcx.tcx().cast_kinds.borrow_mut().insert(self.expr.id, k); - } - Err(e) => self.report_cast_error(fcx, e) - };} + fcx.tcx.cast_kinds.borrow_mut().insert(self.expr.id, CastKind::CoercionCast); + } else { + match self.do_check(fcx) { + Ok(k) => { + debug!(" -> {:?}", k); + fcx.tcx.cast_kinds.borrow_mut().insert(self.expr.id, k); + } + Err(e) => self.report_cast_error(fcx, e), + }; + } } /// Check a cast, and report an error if one exists. In some cases, this /// can return Ok and create type errors in the fcx rather than returning /// directly. coercion-cast is handled in check instead of here. - fn do_check<'a>(&self, fcx: &FnCtxt<'a, 'tcx>) -> Result { - use middle::ty::cast::IntTy::*; - use middle::ty::cast::CastTy::*; + fn do_check(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Result { + use rustc::ty::cast::IntTy::*; + use rustc::ty::cast::CastTy::*; let (t_from, t_cast) = match (CastTy::from_ty(self.expr_ty), CastTy::from_ty(self.cast_ty)) { (Some(t_from), Some(t_cast)) => (t_from, t_cast), - _ => { - return Err(CastError::NonScalar) + // Function item types may need to be reified before casts. + (None, Some(t_cast)) => { + if let ty::TyFnDef(.., f) = self.expr_ty.sty { + // Attempt a coercion to a fn pointer type. + let res = fcx.try_coerce(self.expr, self.expr_ty, fcx.tcx.mk_fn_ptr(f)); + if !res.is_ok() { + return Err(CastError::NonScalar); + } + (FnPtr, t_cast) + } else { + return Err(CastError::NonScalar); + } } + _ => return Err(CastError::NonScalar), }; match (t_from, t_cast) { @@ -249,47 +393,69 @@ impl<'tcx> CastCheck<'tcx> { (_, Int(Bool)) => Err(CastError::CastToBool), // * -> Char - (Int(U(ast::TyU8)), Int(Char)) => Ok(CastKind::U8CharCast), // u8-char-cast + (Int(U(ast::UintTy::U8)), Int(Char)) => Ok(CastKind::U8CharCast), // u8-char-cast (_, Int(Char)) => Err(CastError::CastToChar), // prim -> float,ptr - (Int(Bool), Float) | (Int(CEnum), Float) | (Int(Char), Float) - => Err(CastError::NeedViaInt), - (Int(Bool), Ptr(_)) | (Int(CEnum), Ptr(_)) | (Int(Char), Ptr(_)) - => Err(CastError::NeedViaUsize), + (Int(Bool), Float) | + (Int(CEnum), Float) | + (Int(Char), Float) => Err(CastError::NeedViaInt), + + (Int(Bool), Ptr(_)) | + (Int(CEnum), Ptr(_)) | + (Int(Char), Ptr(_)) | + (Ptr(_), Float) | + (FnPtr, Float) | + (Float, Ptr(_)) => Err(CastError::IllegalCast), // ptr -> * (Ptr(m_e), Ptr(m_c)) => self.check_ptr_ptr_cast(fcx, m_e, m_c), // ptr-ptr-cast (Ptr(m_expr), Int(_)) => self.check_ptr_addr_cast(fcx, m_expr), // ptr-addr-cast - (Ptr(_), Float) | (FnPtr, Float) => Err(CastError::NeedViaUsize), (FnPtr, Int(_)) => Ok(CastKind::FnPtrAddrCast), - (RPtr(_), Int(_)) | (RPtr(_), Float) => Err(CastError::NeedViaPtr), + (RPtr(p), Int(_)) | + (RPtr(p), Float) => { + match p.ty.sty { + ty::TypeVariants::TyInt(_) | + ty::TypeVariants::TyUint(_) | + ty::TypeVariants::TyFloat(_) => { + Err(CastError::NeedDeref) + } + ty::TypeVariants::TyInfer(t) => { + match t { + ty::InferTy::IntVar(_) | + ty::InferTy::FloatVar(_) | + ty::InferTy::FreshIntTy(_) | + ty::InferTy::FreshFloatTy(_) => { + Err(CastError::NeedDeref) + } + _ => Err(CastError::NeedViaPtr), + } + } + _ => Err(CastError::NeedViaPtr), + } + } // * -> ptr (Int(_), Ptr(mt)) => self.check_addr_ptr_cast(fcx, mt), // addr-ptr-cast (FnPtr, Ptr(mt)) => self.check_fptr_ptr_cast(fcx, mt), - (Float, Ptr(_)) => Err(CastError::NeedViaUsize), (RPtr(rmt), Ptr(mt)) => self.check_ref_cast(fcx, rmt, mt), // array-ptr-cast // prim -> prim (Int(CEnum), Int(_)) => Ok(CastKind::EnumCast), - (Int(Char), Int(_)) | (Int(Bool), Int(_)) => Ok(CastKind::PrimIntCast), - - (Int(_), Int(_)) | - (Int(_), Float) | - (Float, Int(_)) | - (Float, Float) => Ok(CastKind::NumericCast), + (Int(Char), Int(_)) | + (Int(Bool), Int(_)) => Ok(CastKind::PrimIntCast), + (Int(_), Int(_)) | (Int(_), Float) | (Float, Int(_)) | (Float, Float) => { + Ok(CastKind::NumericCast) + } } } - fn check_ptr_ptr_cast<'a>(&self, - fcx: &FnCtxt<'a, 'tcx>, - m_expr: &'tcx ty::TypeAndMut<'tcx>, - m_cast: &'tcx ty::TypeAndMut<'tcx>) - -> Result - { - debug!("check_ptr_ptr_cast m_expr={:?} m_cast={:?}", - m_expr, m_cast); + fn check_ptr_ptr_cast(&self, + fcx: &FnCtxt<'a, 'gcx, 'tcx>, + m_expr: &'tcx ty::TypeAndMut<'tcx>, + m_cast: &'tcx ty::TypeAndMut<'tcx>) + -> Result { + debug!("check_ptr_ptr_cast m_expr={:?} m_cast={:?}", m_expr, m_cast); // ptr-ptr cast. vtables must match. // Cast to sized is OK @@ -299,21 +465,20 @@ impl<'tcx> CastCheck<'tcx> { // sized -> unsized? report invalid cast (don't complain about vtable kinds) if fcx.type_is_known_to_be_sized(m_expr.ty, self.span) { - return Err(CastError::IllegalCast); + return Err(CastError::SizedUnsizedCast); } // vtable kinds must match - match (unsize_kind(fcx, m_cast.ty), unsize_kind(fcx, m_expr.ty)) { + match (fcx.unsize_kind(m_cast.ty), fcx.unsize_kind(m_expr.ty)) { (Some(a), Some(b)) if a == b => Ok(CastKind::PtrPtrCast), - _ => Err(CastError::DifferingKinds) + _ => Err(CastError::DifferingKinds), } } - fn check_fptr_ptr_cast<'a>(&self, - fcx: &FnCtxt<'a, 'tcx>, - m_cast: &'tcx ty::TypeAndMut<'tcx>) - -> Result - { + fn check_fptr_ptr_cast(&self, + fcx: &FnCtxt<'a, 'gcx, 'tcx>, + m_cast: &'tcx ty::TypeAndMut<'tcx>) + -> Result { // fptr-ptr cast. must be to sized ptr if fcx.type_is_known_to_be_sized(m_cast.ty, self.span) { @@ -323,11 +488,10 @@ impl<'tcx> CastCheck<'tcx> { } } - fn check_ptr_addr_cast<'a>(&self, - fcx: &FnCtxt<'a, 'tcx>, - m_expr: &'tcx ty::TypeAndMut<'tcx>) - -> Result - { + fn check_ptr_addr_cast(&self, + fcx: &FnCtxt<'a, 'gcx, 'tcx>, + m_expr: &'tcx ty::TypeAndMut<'tcx>) + -> Result { // ptr-addr cast. must be from sized ptr if fcx.type_is_known_to_be_sized(m_expr.ty, self.span) { @@ -337,12 +501,11 @@ impl<'tcx> CastCheck<'tcx> { } } - fn check_ref_cast<'a>(&self, - fcx: &FnCtxt<'a, 'tcx>, - m_expr: &'tcx ty::TypeAndMut<'tcx>, - m_cast: &'tcx ty::TypeAndMut<'tcx>) - -> Result - { + fn check_ref_cast(&self, + fcx: &FnCtxt<'a, 'gcx, 'tcx>, + m_expr: &'tcx ty::TypeAndMut<'tcx>, + m_cast: &'tcx ty::TypeAndMut<'tcx>) + -> Result { // array-ptr-cast. if m_expr.mutbl == hir::MutImmutable && m_cast.mutbl == hir::MutImmutable { @@ -355,7 +518,7 @@ impl<'tcx> CastCheck<'tcx> { // from a region pointer to a vector. // this will report a type mismatch if needed - demand::eqtype(fcx, self.span, ety, m_cast.ty); + fcx.demand_eqtype(self.span, ety, m_cast.ty); return Ok(CastKind::ArrayPtrCast); } } @@ -363,28 +526,26 @@ impl<'tcx> CastCheck<'tcx> { Err(CastError::IllegalCast) } - fn check_addr_ptr_cast<'a>(&self, - fcx: &FnCtxt<'a, 'tcx>, - m_cast: &'tcx ty::TypeAndMut<'tcx>) - -> Result - { + fn check_addr_ptr_cast(&self, + fcx: &FnCtxt<'a, 'gcx, 'tcx>, + m_cast: &'tcx ty::TypeAndMut<'tcx>) + -> Result { // ptr-addr cast. pointer must be thin. if fcx.type_is_known_to_be_sized(m_cast.ty, self.span) { - Ok(CastKind::AddrPtrCast) + Ok(CastKind::AddrPtrCast) } else { - Err(CastError::IllegalCast) + Err(CastError::IllegalCast) } } - fn try_coercion_cast<'a>(&self, fcx: &FnCtxt<'a, 'tcx>) -> bool { - if let Ok(()) = coercion::mk_assignty(fcx, - &self.expr, - self.expr_ty, - self.cast_ty) { - true - } else { - false - } + fn try_coercion_cast(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> bool { + fcx.try_coerce(self.expr, self.expr_ty, self.cast_ty).is_ok() } +} +impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { + fn type_is_known_to_be_sized(&self, ty: Ty<'tcx>, span: Span) -> bool { + let lang_item = self.tcx.require_lang_item(lang_items::SizedTraitLangItem); + traits::type_known_to_meet_bound(self, ty, lang_item, span) + } } diff --git a/src/librustc_typeck/check/closure.rs b/src/librustc_typeck/check/closure.rs index 3b7cb2bd4b466..486f8fc25bb32 100644 --- a/src/librustc_typeck/check/closure.rs +++ b/src/librustc_typeck/check/closure.rs @@ -12,247 +12,239 @@ use super::{check_fn, Expectation, FnCtxt}; -use astconv; -use middle::subst; -use middle::ty::{self, ToPolyTraitRef, Ty}; +use astconv::AstConv; +use rustc::ty::{self, ToPolyTraitRef, Ty}; use std::cmp; -use syntax::abi; -use rustc_front::hir; - -pub fn check_expr_closure<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, - expr: &hir::Expr, - _capture: hir::CaptureClause, - decl: &'tcx hir::FnDecl, - body: &'tcx hir::Block, - expected: Expectation<'tcx>) { - debug!("check_expr_closure(expr={:?},expected={:?})", - expr, - expected); - - // It's always helpful for inference if we know the kind of - // closure sooner rather than later, so first examine the expected - // type, and see if can glean a closure kind from there. - let (expected_sig,expected_kind) = match expected.to_option(fcx) { - Some(ty) => deduce_expectations_from_expected_type(fcx, ty), - None => (None, None) - }; - check_closure(fcx, expr, expected_kind, decl, body, expected_sig) -} - -fn check_closure<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, - expr: &hir::Expr, - opt_kind: Option, - decl: &'tcx hir::FnDecl, - body: &'tcx hir::Block, - expected_sig: Option>) { - let expr_def_id = fcx.tcx().map.local_def_id(expr.id); - - debug!("check_closure opt_kind={:?} expected_sig={:?}", - opt_kind, - expected_sig); - - let mut fn_ty = astconv::ty_of_closure(fcx, - hir::Unsafety::Normal, - decl, - abi::RustCall, - expected_sig); - - // Create type variables (for now) to represent the transformed - // types of upvars. These will be unified during the upvar - // inference phase (`upvar.rs`). - let num_upvars = fcx.tcx().with_freevars(expr.id, |fv| fv.len()); - let upvar_tys = fcx.infcx().next_ty_vars(num_upvars); - - debug!("check_closure: expr.id={:?} upvar_tys={:?}", - expr.id, upvar_tys); - - let closure_type = - fcx.ccx.tcx.mk_closure( - expr_def_id, - fcx.ccx.tcx.mk_substs(fcx.inh.infcx.parameter_environment.free_substs.clone()), - upvar_tys); - - fcx.write_ty(expr.id, closure_type); - - let fn_sig = fcx.tcx().liberate_late_bound_regions( - fcx.tcx().region_maps.call_site_extent(expr.id, body.id), &fn_ty.sig); - - check_fn(fcx.ccx, - hir::Unsafety::Normal, - expr.id, - &fn_sig, - decl, - expr.id, - &*body, - fcx.inh); - - // Tuple up the arguments and insert the resulting function type into - // the `closures` table. - fn_ty.sig.0.inputs = vec![fcx.tcx().mk_tup(fn_ty.sig.0.inputs)]; - - debug!("closure for {:?} --> sig={:?} opt_kind={:?}", - expr_def_id, - fn_ty.sig, - opt_kind); - - fcx.inh.tables.borrow_mut().closure_tys.insert(expr_def_id, fn_ty); - match opt_kind { - Some(kind) => { fcx.inh.tables.borrow_mut().closure_kinds.insert(expr_def_id, kind); } - None => { } +use syntax::abi::Abi; +use rustc::hir; + +impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { + pub fn check_expr_closure(&self, + expr: &hir::Expr, + _capture: hir::CaptureClause, + decl: &'gcx hir::FnDecl, + body_id: hir::ExprId, + expected: Expectation<'tcx>) + -> Ty<'tcx> { + debug!("check_expr_closure(expr={:?},expected={:?})", + expr, + expected); + + // It's always helpful for inference if we know the kind of + // closure sooner rather than later, so first examine the expected + // type, and see if can glean a closure kind from there. + let (expected_sig, expected_kind) = match expected.to_option(self) { + Some(ty) => self.deduce_expectations_from_expected_type(ty), + None => (None, None), + }; + let body = self.tcx.map.expr(body_id); + self.check_closure(expr, expected_kind, decl, body, expected_sig) } -} -fn deduce_expectations_from_expected_type<'a,'tcx>( - fcx: &FnCtxt<'a,'tcx>, - expected_ty: Ty<'tcx>) - -> (Option>,Option) -{ - debug!("deduce_expectations_from_expected_type(expected_ty={:?})", - expected_ty); - - match expected_ty.sty { - ty::TyTrait(ref object_type) => { - let proj_bounds = object_type.projection_bounds_with_self_ty(fcx.tcx(), - fcx.tcx().types.err); - let sig = proj_bounds.iter() - .filter_map(|pb| deduce_sig_from_projection(fcx, pb)) - .next(); - let kind = fcx.tcx().lang_items.fn_trait_kind(object_type.principal_def_id()); - (sig, kind) - } - ty::TyInfer(ty::TyVar(vid)) => { - deduce_expectations_from_obligations(fcx, vid) - } - _ => { - (None, None) + fn check_closure(&self, + expr: &hir::Expr, + opt_kind: Option, + decl: &'gcx hir::FnDecl, + body: &'gcx hir::Expr, + expected_sig: Option>) + -> Ty<'tcx> { + debug!("check_closure opt_kind={:?} expected_sig={:?}", + opt_kind, + expected_sig); + + let expr_def_id = self.tcx.map.local_def_id(expr.id); + let mut fn_ty = AstConv::ty_of_closure(self, + hir::Unsafety::Normal, + decl, + Abi::RustCall, + expected_sig); + + // Create type variables (for now) to represent the transformed + // types of upvars. These will be unified during the upvar + // inference phase (`upvar.rs`). + let closure_type = self.tcx.mk_closure(expr_def_id, + self.parameter_environment.free_substs.extend_to(self.tcx, expr_def_id, + |_, _| span_bug!(expr.span, "closure has region param"), + |_, _| self.infcx.next_ty_var() + ) + ); + + debug!("check_closure: expr.id={:?} closure_type={:?}", expr.id, closure_type); + + let fn_sig = self.tcx + .liberate_late_bound_regions(self.tcx.region_maps.call_site_extent(expr.id, body.id), + &fn_ty.sig); + let fn_sig = (**self).normalize_associated_types_in(body.span, body.id, &fn_sig); + + check_fn(self, + hir::Unsafety::Normal, + expr.id, + &fn_sig, + decl, + expr.id, + &body); + + // Tuple up the arguments and insert the resulting function type into + // the `closures` table. + fn_ty.sig.0.inputs = vec![self.tcx.intern_tup(&fn_ty.sig.0.inputs[..])]; + + debug!("closure for {:?} --> sig={:?} opt_kind={:?}", + expr_def_id, + fn_ty.sig, + opt_kind); + + self.tables.borrow_mut().closure_tys.insert(expr_def_id, fn_ty); + match opt_kind { + Some(kind) => { + self.tables.borrow_mut().closure_kinds.insert(expr_def_id, kind); + } + None => {} } + + closure_type } -} -fn deduce_expectations_from_obligations<'a,'tcx>( - fcx: &FnCtxt<'a,'tcx>, - expected_vid: ty::TyVid) - -> (Option>, Option) -{ - let fulfillment_cx = fcx.inh.infcx.fulfillment_cx.borrow(); - // Here `expected_ty` is known to be a type inference variable. - - let expected_sig = - fulfillment_cx - .pending_obligations() - .iter() - .map(|obligation| &obligation.obligation) - .filter_map(|obligation| { - debug!("deduce_expectations_from_obligations: obligation.predicate={:?}", - obligation.predicate); - - match obligation.predicate { - // Given a Projection predicate, we can potentially infer - // the complete signature. - ty::Predicate::Projection(ref proj_predicate) => { - let trait_ref = proj_predicate.to_poly_trait_ref(); - self_type_matches_expected_vid(fcx, trait_ref, expected_vid) - .and_then(|_| deduce_sig_from_projection(fcx, proj_predicate)) - } - _ => { - None - } + fn deduce_expectations_from_expected_type + (&self, + expected_ty: Ty<'tcx>) + -> (Option>, Option) { + debug!("deduce_expectations_from_expected_type(expected_ty={:?})", + expected_ty); + + match expected_ty.sty { + ty::TyDynamic(ref object_type, ..) => { + let sig = object_type.projection_bounds() + .filter_map(|pb| { + let pb = pb.with_self_ty(self.tcx, self.tcx.types.err); + self.deduce_sig_from_projection(&pb) + }) + .next(); + let kind = object_type.principal() + .and_then(|p| self.tcx.lang_items.fn_trait_kind(p.def_id())); + (sig, kind) } - }) - .next(); - - // Even if we can't infer the full signature, we may be able to - // infer the kind. This can occur if there is a trait-reference - // like `F : Fn`. Note that due to subtyping we could encounter - // many viable options, so pick the most restrictive. - let expected_kind = - fulfillment_cx - .pending_obligations() - .iter() - .map(|obligation| &obligation.obligation) - .filter_map(|obligation| { - let opt_trait_ref = match obligation.predicate { - ty::Predicate::Projection(ref data) => Some(data.to_poly_trait_ref()), - ty::Predicate::Trait(ref data) => Some(data.to_poly_trait_ref()), - ty::Predicate::Equate(..) => None, - ty::Predicate::RegionOutlives(..) => None, - ty::Predicate::TypeOutlives(..) => None, - ty::Predicate::WellFormed(..) => None, - ty::Predicate::ObjectSafe(..) => None, - }; - opt_trait_ref - .and_then(|trait_ref| self_type_matches_expected_vid(fcx, trait_ref, expected_vid)) - .and_then(|trait_ref| fcx.tcx().lang_items.fn_trait_kind(trait_ref.def_id())) - }) - .fold(None, pick_most_restrictive_closure_kind); - - (expected_sig, expected_kind) -} - -fn pick_most_restrictive_closure_kind(best: Option, - cur: ty::ClosureKind) - -> Option -{ - match best { - None => Some(cur), - Some(best) => Some(cmp::min(best, cur)) + ty::TyInfer(ty::TyVar(vid)) => self.deduce_expectations_from_obligations(vid), + _ => (None, None), + } } -} - -/// Given a projection like "::Result == Y", we can deduce -/// everything we need to know about a closure. -fn deduce_sig_from_projection<'a,'tcx>( - fcx: &FnCtxt<'a,'tcx>, - projection: &ty::PolyProjectionPredicate<'tcx>) - -> Option> -{ - let tcx = fcx.tcx(); - debug!("deduce_sig_from_projection({:?})", - projection); - - let trait_ref = projection.to_poly_trait_ref(); - - if tcx.lang_items.fn_trait_kind(trait_ref.def_id()).is_none() { - return None; + fn deduce_expectations_from_obligations + (&self, + expected_vid: ty::TyVid) + -> (Option>, Option) { + let fulfillment_cx = self.fulfillment_cx.borrow(); + // Here `expected_ty` is known to be a type inference variable. + + let expected_sig = fulfillment_cx.pending_obligations() + .iter() + .map(|obligation| &obligation.obligation) + .filter_map(|obligation| { + debug!("deduce_expectations_from_obligations: obligation.predicate={:?}", + obligation.predicate); + + match obligation.predicate { + // Given a Projection predicate, we can potentially infer + // the complete signature. + ty::Predicate::Projection(ref proj_predicate) => { + let trait_ref = proj_predicate.to_poly_trait_ref(); + self.self_type_matches_expected_vid(trait_ref, expected_vid) + .and_then(|_| self.deduce_sig_from_projection(proj_predicate)) + } + _ => None, + } + }) + .next(); + + // Even if we can't infer the full signature, we may be able to + // infer the kind. This can occur if there is a trait-reference + // like `F : Fn`. Note that due to subtyping we could encounter + // many viable options, so pick the most restrictive. + let expected_kind = fulfillment_cx.pending_obligations() + .iter() + .map(|obligation| &obligation.obligation) + .filter_map(|obligation| { + let opt_trait_ref = match obligation.predicate { + ty::Predicate::Projection(ref data) => Some(data.to_poly_trait_ref()), + ty::Predicate::Trait(ref data) => Some(data.to_poly_trait_ref()), + ty::Predicate::Equate(..) => None, + ty::Predicate::RegionOutlives(..) => None, + ty::Predicate::TypeOutlives(..) => None, + ty::Predicate::WellFormed(..) => None, + ty::Predicate::ObjectSafe(..) => None, + + // NB: This predicate is created by breaking down a + // `ClosureType: FnFoo()` predicate, where + // `ClosureType` represents some `TyClosure`. It can't + // possibly be referring to the current closure, + // because we haven't produced the `TyClosure` for + // this closure yet; this is exactly why the other + // code is looking for a self type of a unresolved + // inference variable. + ty::Predicate::ClosureKind(..) => None, + }; + opt_trait_ref.and_then(|tr| self.self_type_matches_expected_vid(tr, expected_vid)) + .and_then(|tr| self.tcx.lang_items.fn_trait_kind(tr.def_id())) + }) + .fold(None, + |best, cur| Some(best.map_or(cur, |best| cmp::min(best, cur)))); + + (expected_sig, expected_kind) } - let arg_param_ty = *trait_ref.substs().types.get(subst::TypeSpace, 0); - let arg_param_ty = fcx.infcx().resolve_type_vars_if_possible(&arg_param_ty); - debug!("deduce_sig_from_projection: arg_param_ty {:?}", arg_param_ty); + /// Given a projection like "::Result == Y", we can deduce + /// everything we need to know about a closure. + fn deduce_sig_from_projection(&self, + projection: &ty::PolyProjectionPredicate<'tcx>) + -> Option> { + let tcx = self.tcx; - let input_tys = match arg_param_ty.sty { - ty::TyTuple(ref tys) => { (*tys).clone() } - _ => { return None; } - }; - debug!("deduce_sig_from_projection: input_tys {:?}", input_tys); + debug!("deduce_sig_from_projection({:?})", projection); - let ret_param_ty = projection.0.ty; - let ret_param_ty = fcx.infcx().resolve_type_vars_if_possible(&ret_param_ty); - debug!("deduce_sig_from_projection: ret_param_ty {:?}", ret_param_ty); + let trait_ref = projection.to_poly_trait_ref(); - let fn_sig = ty::FnSig { - inputs: input_tys, - output: ty::FnConverging(ret_param_ty), - variadic: false - }; - debug!("deduce_sig_from_projection: fn_sig {:?}", fn_sig); + if tcx.lang_items.fn_trait_kind(trait_ref.def_id()).is_none() { + return None; + } - Some(fn_sig) -} + let arg_param_ty = trait_ref.substs().type_at(1); + let arg_param_ty = self.resolve_type_vars_if_possible(&arg_param_ty); + debug!("deduce_sig_from_projection: arg_param_ty {:?}", + arg_param_ty); + + let input_tys = match arg_param_ty.sty { + ty::TyTuple(tys) => tys.to_vec(), + _ => { + return None; + } + }; + debug!("deduce_sig_from_projection: input_tys {:?}", input_tys); + + let ret_param_ty = projection.0.ty; + let ret_param_ty = self.resolve_type_vars_if_possible(&ret_param_ty); + debug!("deduce_sig_from_projection: ret_param_ty {:?}", + ret_param_ty); + + let fn_sig = ty::FnSig { + inputs: input_tys, + output: ret_param_ty, + variadic: false, + }; + debug!("deduce_sig_from_projection: fn_sig {:?}", fn_sig); + + Some(fn_sig) + } -fn self_type_matches_expected_vid<'a,'tcx>( - fcx: &FnCtxt<'a,'tcx>, - trait_ref: ty::PolyTraitRef<'tcx>, - expected_vid: ty::TyVid) - -> Option> -{ - let self_ty = fcx.infcx().shallow_resolve(trait_ref.self_ty()); - debug!("self_type_matches_expected_vid(trait_ref={:?}, self_ty={:?})", - trait_ref, - self_ty); - match self_ty.sty { - ty::TyInfer(ty::TyVar(v)) if expected_vid == v => Some(trait_ref), - _ => None, + fn self_type_matches_expected_vid(&self, + trait_ref: ty::PolyTraitRef<'tcx>, + expected_vid: ty::TyVid) + -> Option> { + let self_ty = self.shallow_resolve(trait_ref.self_ty()); + debug!("self_type_matches_expected_vid(trait_ref={:?}, self_ty={:?})", + trait_ref, + self_ty); + match self_ty.sty { + ty::TyInfer(ty::TyVar(v)) if expected_vid == v => Some(trait_ref), + _ => None, + } } } diff --git a/src/librustc_typeck/check/coercion.rs b/src/librustc_typeck/check/coercion.rs index 8f64e85de4b0f..718c273785ae9 100644 --- a/src/librustc_typeck/check/coercion.rs +++ b/src/librustc_typeck/check/coercion.rs @@ -60,62 +60,104 @@ //! sort of a minor point so I've opted to leave it for later---after all //! we may want to adjust precisely when coercions occur. -use check::{autoderef, FnCtxt, UnresolvedTypeAction}; - -use middle::infer::{self, Coercion, TypeOrigin}; -use middle::traits::{self, ObligationCause}; -use middle::traits::{predicate_for_trait_def, report_selection_error}; -use middle::ty::adjustment::{AutoAdjustment, AutoDerefRef, AdjustDerefRef}; -use middle::ty::adjustment::{AutoPtr, AutoUnsafe, AdjustReifyFnPointer}; -use middle::ty::adjustment::{AdjustUnsafeFnPointer}; -use middle::ty::{self, LvaluePreference, TypeAndMut, Ty}; -use middle::ty::fold::TypeFoldable; -use middle::ty::error::TypeError; -use middle::ty::relate::RelateResult; +use check::FnCtxt; + +use rustc::hir; +use rustc::infer::{Coercion, InferOk, TypeTrace}; +use rustc::traits::{self, ObligationCause, ObligationCauseCode}; +use rustc::ty::adjustment::{Adjustment, Adjust, AutoBorrow}; +use rustc::ty::{self, LvaluePreference, TypeAndMut, Ty}; +use rustc::ty::fold::TypeFoldable; +use rustc::ty::error::TypeError; +use rustc::ty::relate::RelateResult; use util::common::indent; use std::cell::RefCell; use std::collections::VecDeque; -use rustc_front::hir; +use std::ops::Deref; -struct Coerce<'a, 'tcx: 'a> { - fcx: &'a FnCtxt<'a, 'tcx>, - origin: infer::TypeOrigin, +struct Coerce<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { + fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, + cause: ObligationCause<'tcx>, + use_lub: bool, unsizing_obligations: RefCell>>, } -type CoerceResult<'tcx> = RelateResult<'tcx, Option>>; +impl<'a, 'gcx, 'tcx> Deref for Coerce<'a, 'gcx, 'tcx> { + type Target = FnCtxt<'a, 'gcx, 'tcx>; + fn deref(&self) -> &Self::Target { + &self.fcx + } +} -impl<'f, 'tcx> Coerce<'f, 'tcx> { - fn tcx(&self) -> &ty::ctxt<'tcx> { - self.fcx.tcx() +type CoerceResult<'tcx> = RelateResult<'tcx, (Ty<'tcx>, Adjust<'tcx>)>; + +fn coerce_mutbls<'tcx>(from_mutbl: hir::Mutability, + to_mutbl: hir::Mutability) + -> RelateResult<'tcx, ()> { + match (from_mutbl, to_mutbl) { + (hir::MutMutable, hir::MutMutable) | + (hir::MutImmutable, hir::MutImmutable) | + (hir::MutMutable, hir::MutImmutable) => Ok(()), + (hir::MutImmutable, hir::MutMutable) => Err(TypeError::Mutability), } +} - fn subtype(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> { - try!(self.fcx.infcx().sub_types(false, self.origin.clone(), a, b)); - Ok(None) // No coercion required. +impl<'f, 'gcx, 'tcx> Coerce<'f, 'gcx, 'tcx> { + fn new(fcx: &'f FnCtxt<'f, 'gcx, 'tcx>, cause: ObligationCause<'tcx>) -> Self { + Coerce { + fcx: fcx, + cause: cause, + use_lub: false, + unsizing_obligations: RefCell::new(vec![]), + } } - fn unpack_actual_value(&self, a: Ty<'tcx>, f: F) -> T where - F: FnOnce(Ty<'tcx>) -> T, - { - f(self.fcx.infcx().shallow_resolve(a)) + fn unify(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { + self.commit_if_ok(|_| { + let trace = TypeTrace::types(&self.cause, false, a, b); + if self.use_lub { + self.lub(false, trace, &a, &b) + .map(|ok| self.register_infer_ok_obligations(ok)) + } else { + self.sub(false, trace, &a, &b) + .map(|InferOk { value, obligations }| { + self.fcx.register_predicates(obligations); + value + }) + } + }) } - fn coerce(&self, - expr_a: &hir::Expr, - a: Ty<'tcx>, - b: Ty<'tcx>) - -> CoerceResult<'tcx> { - debug!("Coerce.tys({:?} => {:?})", - a, - b); + /// Unify two types (using sub or lub) and produce a noop coercion. + fn unify_and_identity(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> { + self.unify(&a, &b).and_then(|ty| self.identity(ty)) + } + + /// Synthesize an identity adjustment. + fn identity(&self, ty: Ty<'tcx>) -> CoerceResult<'tcx> { + Ok((ty, Adjust::DerefRef { + autoderefs: 0, + autoref: None, + unsize: false, + })) + } - let a = self.fcx.infcx().shallow_resolve(a); + fn coerce<'a, E, I>(&self, exprs: &E, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> + where E: Fn() -> I, + I: IntoIterator + { + + let a = self.shallow_resolve(a); + debug!("Coerce.tys({:?} => {:?})", a, b); // Just ignore error types. if a.references_error() || b.references_error() { - return Ok(None); + return self.identity(b); + } + + if a.is_never() { + return Ok((b, Adjust::NeverToAny)); } // Consider coercing the subtype to a DST @@ -133,28 +175,30 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { return self.coerce_unsafe_ptr(a, b, mt_b.mutbl); } - ty::TyRef(_, mt_b) => { - return self.coerce_borrowed_pointer(expr_a, a, b, mt_b.mutbl); + ty::TyRef(r_b, mt_b) => { + return self.coerce_borrowed_pointer(exprs, a, b, r_b, mt_b); } _ => {} } match a.sty { - ty::TyBareFn(Some(_), a_f) => { + ty::TyFnDef(.., a_f) => { // Function items are coercible to any closure // type; function pointers are not (that would // require double indirection). + // Additionally, we permit coercion of function + // items to drop the unsafe qualifier. self.coerce_from_fn_item(a, a_f, b) } - ty::TyBareFn(None, a_f) => { + ty::TyFnPtr(a_f) => { // We permit coercion of fn pointers to drop the // unsafe qualifier. self.coerce_from_fn_pointer(a, a_f, b) } _ => { - // Otherwise, just use subtyping rules. - self.subtype(a, b) + // Otherwise, just use unification rules. + self.unify_and_identity(a, b) } } } @@ -162,15 +206,18 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { /// Reborrows `&mut A` to `&mut B` and `&(mut) A` to `&B`. /// To match `A` with `B`, autoderef will be performed, /// calling `deref`/`deref_mut` where necessary. - fn coerce_borrowed_pointer(&self, - expr_a: &hir::Expr, - a: Ty<'tcx>, - b: Ty<'tcx>, - mutbl_b: hir::Mutability) - -> CoerceResult<'tcx> { - debug!("coerce_borrowed_pointer(a={:?}, b={:?})", - a, - b); + fn coerce_borrowed_pointer<'a, E, I>(&self, + exprs: &E, + a: Ty<'tcx>, + b: Ty<'tcx>, + r_b: &'tcx ty::Region, + mt_b: TypeAndMut<'tcx>) + -> CoerceResult<'tcx> + where E: Fn() -> I, + I: IntoIterator + { + + debug!("coerce_borrowed_pointer(a={:?}, b={:?})", a, b); // If we have a parameter of type `&M T_a` and the value // provided is `expr`, we will be adding an implicit borrow, @@ -178,74 +225,189 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { // to type check, we will construct the type that `&M*expr` would // yield. - match a.sty { - ty::TyRef(_, mt_a) => { - try!(coerce_mutbls(mt_a.mutbl, mutbl_b)); + let (r_a, mt_a) = match a.sty { + ty::TyRef(r_a, mt_a) => { + coerce_mutbls(mt_a.mutbl, mt_b.mutbl)?; + (r_a, mt_a) } - _ => return self.subtype(a, b) - } + _ => return self.unify_and_identity(a, b), + }; - let coercion = Coercion(self.origin.span()); - let r_borrow = self.fcx.infcx().next_region_var(coercion); - let r_borrow = self.tcx().mk_region(r_borrow); - let autoref = Some(AutoPtr(r_borrow, mutbl_b)); + let span = self.cause.span; - let lvalue_pref = LvaluePreference::from_mutbl(mutbl_b); let mut first_error = None; - let (_, autoderefs, success) = autoderef(self.fcx, - expr_a.span, - a, - Some(expr_a), - UnresolvedTypeAction::Ignore, - lvalue_pref, - |inner_ty, autoderef| { - if autoderef == 0 { + let mut r_borrow_var = None; + let mut autoderef = self.autoderef(span, a); + let mut success = None; + + for (referent_ty, autoderefs) in autoderef.by_ref() { + if autoderefs == 0 { // Don't let this pass, otherwise it would cause // &T to autoref to &&T. - return None; + continue; } - let ty = self.tcx().mk_ref(r_borrow, - TypeAndMut {ty: inner_ty, mutbl: mutbl_b}); - if let Err(err) = self.subtype(ty, b) { - if first_error.is_none() { - first_error = Some(err); - } - None + + // At this point, we have deref'd `a` to `referent_ty`. So + // imagine we are coercing from `&'a mut Vec` to `&'b mut [T]`. + // In the autoderef loop for `&'a mut Vec`, we would get + // three callbacks: + // + // - `&'a mut Vec` -- 0 derefs, just ignore it + // - `Vec` -- 1 deref + // - `[T]` -- 2 deref + // + // At each point after the first callback, we want to + // check to see whether this would match out target type + // (`&'b mut [T]`) if we autoref'd it. We can't just + // compare the referent types, though, because we still + // have to consider the mutability. E.g., in the case + // we've been considering, we have an `&mut` reference, so + // the `T` in `[T]` needs to be unified with equality. + // + // Therefore, we construct reference types reflecting what + // the types will be after we do the final auto-ref and + // compare those. Note that this means we use the target + // mutability [1], since it may be that we are coercing + // from `&mut T` to `&U`. + // + // One fine point concerns the region that we use. We + // choose the region such that the region of the final + // type that results from `unify` will be the region we + // want for the autoref: + // + // - if in sub mode, that means we want to use `'b` (the + // region from the target reference) for both + // pointers [2]. This is because sub mode (somewhat + // arbitrarily) returns the subtype region. In the case + // where we are coercing to a target type, we know we + // want to use that target type region (`'b`) because -- + // for the program to type-check -- it must be the + // smaller of the two. + // - One fine point. It may be surprising that we can + // use `'b` without relating `'a` and `'b`. The reason + // that this is ok is that what we produce is + // effectively a `&'b *x` expression (if you could + // annotate the region of a borrow), and regionck has + // code that adds edges from the region of a borrow + // (`'b`, here) into the regions in the borrowed + // expression (`*x`, here). (Search for "link".) + // - if in lub mode, things can get fairly complicated. The + // easiest thing is just to make a fresh + // region variable [4], which effectively means we defer + // the decision to region inference (and regionck, which will add + // some more edges to this variable). However, this can wind up + // creating a crippling number of variables in some cases -- + // e.g. #32278 -- so we optimize one particular case [3]. + // Let me try to explain with some examples: + // - The "running example" above represents the simple case, + // where we have one `&` reference at the outer level and + // ownership all the rest of the way down. In this case, + // we want `LUB('a, 'b)` as the resulting region. + // - However, if there are nested borrows, that region is + // too strong. Consider a coercion from `&'a &'x Rc` to + // `&'b T`. In this case, `'a` is actually irrelevant. + // The pointer we want is `LUB('x, 'b`). If we choose `LUB('a,'b)` + // we get spurious errors (`run-pass/regions-lub-ref-ref-rc.rs`). + // (The errors actually show up in borrowck, typically, because + // this extra edge causes the region `'a` to be inferred to something + // too big, which then results in borrowck errors.) + // - We could track the innermost shared reference, but there is already + // code in regionck that has the job of creating links between + // the region of a borrow and the regions in the thing being + // borrowed (here, `'a` and `'x`), and it knows how to handle + // all the various cases. So instead we just make a region variable + // and let regionck figure it out. + let r = if !self.use_lub { + r_b // [2] above + } else if autoderefs == 1 { + r_a // [3] above } else { - Some(()) + if r_borrow_var.is_none() { + // create var lazilly, at most once + let coercion = Coercion(span); + let r = self.next_region_var(coercion); + r_borrow_var = Some(r); // [4] above + } + r_borrow_var.unwrap() + }; + let derefd_ty_a = self.tcx.mk_ref(r, + TypeAndMut { + ty: referent_ty, + mutbl: mt_b.mutbl, // [1] above + }); + match self.unify(derefd_ty_a, b) { + Ok(ty) => { + success = Some((ty, autoderefs)); + break; + } + Err(err) => { + if first_error.is_none() { + first_error = Some(err); + } + } } - }); + } - match success { - Some(_) => { - Ok(Some(AdjustDerefRef(AutoDerefRef { - autoderefs: autoderefs, - autoref: autoref, - unsize: None - }))) - } + // Extract type or return an error. We return the first error + // we got, which should be from relating the "base" type + // (e.g., in example above, the failure from relating `Vec` + // to the target type), since that should be the least + // confusing. + let (ty, autoderefs) = match success { + Some(d) => d, None => { - // Return original error as if overloaded deref was never - // attempted, to avoid irrelevant/confusing error messages. - Err(first_error.expect("coerce_borrowed_pointer failed with no error?")) + let err = first_error.expect("coerce_borrowed_pointer had no error"); + debug!("coerce_borrowed_pointer: failed with err = {:?}", err); + return Err(err); } + }; + + // This commits the obligations to the fulfillcx. After this succeeds, + // this snapshot can't be rolled back. + autoderef.finalize(LvaluePreference::from_mutbl(mt_b.mutbl), exprs()); + + // Now apply the autoref. We have to extract the region out of + // the final ref type we got. + if ty == a && mt_a.mutbl == hir::MutImmutable && autoderefs == 1 { + // As a special case, if we would produce `&'a *x`, that's + // a total no-op. We end up with the type `&'a T` just as + // we started with. In that case, just skip it + // altogether. This is just an optimization. + // + // Note that for `&mut`, we DO want to reborrow -- + // otherwise, this would be a move, which might be an + // error. For example `foo(self.x)` where `self` and + // `self.x` both have `&mut `type would be a move of + // `self.x`, but we auto-coerce it to `foo(&mut *self.x)`, + // which is a borrow. + assert_eq!(mt_b.mutbl, hir::MutImmutable); // can only coerce &T -> &U + return self.identity(ty); } + let r_borrow = match ty.sty { + ty::TyRef(r_borrow, _) => r_borrow, + _ => span_bug!(span, "expected a ref type, got {:?}", ty), + }; + let autoref = Some(AutoBorrow::Ref(r_borrow, mt_b.mutbl)); + debug!("coerce_borrowed_pointer: succeeded ty={:?} autoderefs={:?} autoref={:?}", + ty, + autoderefs, + autoref); + Ok((ty, Adjust::DerefRef { + autoderefs: autoderefs, + autoref: autoref, + unsize: false, + })) } // &[T; n] or &mut [T; n] -> &[T] // or &mut [T; n] -> &mut [T] // or &Concrete -> &Trait, etc. - fn coerce_unsized(&self, - source: Ty<'tcx>, - target: Ty<'tcx>) - -> CoerceResult<'tcx> { - debug!("coerce_unsized(source={:?}, target={:?})", - source, - target); + fn coerce_unsized(&self, source: Ty<'tcx>, target: Ty<'tcx>) -> CoerceResult<'tcx> { + debug!("coerce_unsized(source={:?}, target={:?})", source, target); - let traits = (self.tcx().lang_items.unsize_trait(), - self.tcx().lang_items.coerce_unsized_trait()); + let traits = (self.tcx.lang_items.unsize_trait(), + self.tcx.lang_items.coerce_unsized_trait()); let (unsize_did, coerce_unsized_did) = if let (Some(u), Some(cu)) = traits { (u, cu) } else { @@ -261,35 +423,30 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { // Handle reborrows before selecting `Source: CoerceUnsized`. let (source, reborrow) = match (&source.sty, &target.sty) { (&ty::TyRef(_, mt_a), &ty::TyRef(_, mt_b)) => { - try!(coerce_mutbls(mt_a.mutbl, mt_b.mutbl)); + coerce_mutbls(mt_a.mutbl, mt_b.mutbl)?; - let coercion = Coercion(self.origin.span()); - let r_borrow = self.fcx.infcx().next_region_var(coercion); - let region = self.tcx().mk_region(r_borrow); - (mt_a.ty, Some(AutoPtr(region, mt_b.mutbl))) + let coercion = Coercion(self.cause.span); + let r_borrow = self.next_region_var(coercion); + (mt_a.ty, Some(AutoBorrow::Ref(r_borrow, mt_b.mutbl))) } (&ty::TyRef(_, mt_a), &ty::TyRawPtr(mt_b)) => { - try!(coerce_mutbls(mt_a.mutbl, mt_b.mutbl)); - (mt_a.ty, Some(AutoUnsafe(mt_b.mutbl))) + coerce_mutbls(mt_a.mutbl, mt_b.mutbl)?; + (mt_a.ty, Some(AutoBorrow::RawPtr(mt_b.mutbl))) } - _ => (source, None) + _ => (source, None), }; - let source = source.adjust_for_autoref(self.tcx(), reborrow); + let source = source.adjust_for_autoref(self.tcx, reborrow); - let mut selcx = traits::SelectionContext::new(self.fcx.infcx()); + let mut selcx = traits::SelectionContext::new(self); // Use a FIFO queue for this custom fulfillment procedure. let mut queue = VecDeque::new(); let mut leftover_predicates = vec![]; // Create an obligation for `Source: CoerceUnsized`. - let cause = ObligationCause::misc(self.origin.span(), self.fcx.body_id); - queue.push_back(predicate_for_trait_def(self.tcx(), - cause, - coerce_unsized_did, - 0, - source, - vec![target])); + let cause = ObligationCause::misc(self.cause.span, self.body_id); + queue.push_back(self.tcx + .predicate_for_trait_def(cause, coerce_unsized_did, 0, source, &[target])); // Keep resolving `CoerceUnsized` and `Unsize` predicates to avoid // emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where @@ -297,10 +454,8 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { let traits = [coerce_unsized_did, unsize_did]; while let Some(obligation) = queue.pop_front() { debug!("coerce_unsized resolve step: {:?}", obligation); - let trait_ref = match obligation.predicate { - ty::Predicate::Trait(ref tr) if traits.contains(&tr.def_id()) => { - tr.clone() - } + let trait_ref = match obligation.predicate { + ty::Predicate::Trait(ref tr) if traits.contains(&tr.def_id()) => tr.clone(), _ => { leftover_predicates.push(obligation); continue; @@ -308,14 +463,15 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { }; match selcx.select(&obligation.with(trait_ref)) { // Uncertain or unimplemented. - Ok(None) | Err(traits::Unimplemented) => { + Ok(None) | + Err(traits::Unimplemented) => { debug!("coerce_unsized: early return - can't prove obligation"); return Err(TypeError::Mismatch); } // Object safety violations or miscellaneous. Err(err) => { - report_selection_error(self.fcx.infcx(), &obligation, &err); + self.report_selection_error(&obligation, &err); // Treat this like an obligation and follow through // with the unsizing - the lack of a coercion should // be silent, as it causes a type mismatch later. @@ -329,46 +485,48 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { } } - let mut obligations = self.unsizing_obligations.borrow_mut(); - assert!(obligations.is_empty()); - *obligations = leftover_predicates; + *self.unsizing_obligations.borrow_mut() = leftover_predicates; - let adjustment = AutoDerefRef { + let adjustment = Adjust::DerefRef { autoderefs: if reborrow.is_some() { 1 } else { 0 }, autoref: reborrow, - unsize: Some(target) + unsize: true, }; debug!("Success, coerced with {:?}", adjustment); - Ok(Some(AdjustDerefRef(adjustment))) + Ok((target, adjustment)) } - fn coerce_from_fn_pointer(&self, + fn coerce_from_safe_fn(&self, a: Ty<'tcx>, fn_ty_a: &'tcx ty::BareFnTy<'tcx>, b: Ty<'tcx>) - -> CoerceResult<'tcx> - { - /*! - * Attempts to coerce from the type of a Rust function item - * into a closure or a `proc`. - */ - - self.unpack_actual_value(b, |b| { - debug!("coerce_from_fn_pointer(a={:?}, b={:?})", - a, b); - - if let ty::TyBareFn(None, fn_ty_b) = b.sty { - match (fn_ty_a.unsafety, fn_ty_b.unsafety) { - (hir::Unsafety::Normal, hir::Unsafety::Unsafe) => { - let unsafe_a = self.tcx().safe_to_unsafe_fn_ty(fn_ty_a); - try!(self.subtype(unsafe_a, b)); - return Ok(Some(AdjustUnsafeFnPointer)); - } - _ => {} + -> CoerceResult<'tcx> { + if let ty::TyFnPtr(fn_ty_b) = b.sty { + match (fn_ty_a.unsafety, fn_ty_b.unsafety) { + (hir::Unsafety::Normal, hir::Unsafety::Unsafe) => { + let unsafe_a = self.tcx.safe_to_unsafe_fn_ty(fn_ty_a); + return self.unify_and_identity(unsafe_a, b) + .map(|(ty, _)| (ty, Adjust::UnsafeFnPointer)); } + _ => {} } - self.subtype(a, b) - }) + } + self.unify_and_identity(a, b) + } + + fn coerce_from_fn_pointer(&self, + a: Ty<'tcx>, + fn_ty_a: &'tcx ty::BareFnTy<'tcx>, + b: Ty<'tcx>) + -> CoerceResult<'tcx> { + //! Attempts to coerce from the type of a Rust function item + //! into a closure or a `proc`. + //! + + let b = self.shallow_resolve(b); + debug!("coerce_from_fn_pointer(a={:?}, b={:?})", a, b); + + self.coerce_from_safe_fn(a, fn_ty_a, b) } fn coerce_from_fn_item(&self, @@ -376,24 +534,21 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { fn_ty_a: &'tcx ty::BareFnTy<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> { - /*! - * Attempts to coerce from the type of a Rust function item - * into a closure or a `proc`. - */ - - self.unpack_actual_value(b, |b| { - debug!("coerce_from_fn_item(a={:?}, b={:?})", - a, b); - - match b.sty { - ty::TyBareFn(None, _) => { - let a_fn_pointer = self.tcx().mk_fn(None, fn_ty_a); - try!(self.subtype(a_fn_pointer, b)); - Ok(Some(AdjustReifyFnPointer)) - } - _ => self.subtype(a, b) + //! Attempts to coerce from the type of a Rust function item + //! into a closure or a `proc`. + //! + + let b = self.shallow_resolve(b); + debug!("coerce_from_fn_item(a={:?}, b={:?})", a, b); + + match b.sty { + ty::TyFnPtr(_) => { + let a_fn_pointer = self.tcx.mk_fn_ptr(fn_ty_a); + self.coerce_from_safe_fn(a_fn_pointer, fn_ty_a, b) + .map(|(ty, _)| (ty, Adjust::ReifyFnPointer)) } - }) + _ => self.unify_and_identity(a, b), + } } fn coerce_unsafe_ptr(&self, @@ -401,80 +556,231 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { b: Ty<'tcx>, mutbl_b: hir::Mutability) -> CoerceResult<'tcx> { - debug!("coerce_unsafe_ptr(a={:?}, b={:?})", - a, - b); + debug!("coerce_unsafe_ptr(a={:?}, b={:?})", a, b); let (is_ref, mt_a) = match a.sty { ty::TyRef(_, mt) => (true, mt), ty::TyRawPtr(mt) => (false, mt), _ => { - return self.subtype(a, b); + return self.unify_and_identity(a, b); } }; // Check that the types which they point at are compatible. - let a_unsafe = self.tcx().mk_ptr(ty::TypeAndMut{ mutbl: mutbl_b, ty: mt_a.ty }); - try!(self.subtype(a_unsafe, b)); - try!(coerce_mutbls(mt_a.mutbl, mutbl_b)); + let a_unsafe = self.tcx.mk_ptr(ty::TypeAndMut { + mutbl: mutbl_b, + ty: mt_a.ty, + }); + let (ty, noop) = self.unify_and_identity(a_unsafe, b)?; + coerce_mutbls(mt_a.mutbl, mutbl_b)?; // Although references and unsafe ptrs have the same - // representation, we still register an AutoDerefRef so that + // representation, we still register an Adjust::DerefRef so that // regionck knows that the region for `a` must be valid here. - if is_ref { - Ok(Some(AdjustDerefRef(AutoDerefRef { - autoderefs: 1, - autoref: Some(AutoUnsafe(mutbl_b)), - unsize: None - }))) - } else { - Ok(None) + Ok((ty, + if is_ref { + Adjust::DerefRef { + autoderefs: 1, + autoref: Some(AutoBorrow::RawPtr(mutbl_b)), + unsize: false, + } + } else if mt_a.mutbl != mutbl_b { + Adjust::MutToConstPointer + } else { + noop + })) + } +} + +fn apply<'a, 'b, 'gcx, 'tcx, E, I>(coerce: &mut Coerce<'a, 'gcx, 'tcx>, + exprs: &E, + a: Ty<'tcx>, + b: Ty<'tcx>) + -> RelateResult<'tcx, Adjustment<'tcx>> + where E: Fn() -> I, + I: IntoIterator +{ + + let (ty, adjust) = indent(|| coerce.coerce(exprs, a, b))?; + + let fcx = coerce.fcx; + if let Adjust::DerefRef { unsize: true, .. } = adjust { + let mut obligations = coerce.unsizing_obligations.borrow_mut(); + for obligation in obligations.drain(..) { + fcx.register_predicate(obligation); } } + + Ok(Adjustment { + kind: adjust, + target: ty + }) } -pub fn mk_assignty<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - expr: &hir::Expr, - a: Ty<'tcx>, - b: Ty<'tcx>) - -> RelateResult<'tcx, ()> { - debug!("mk_assignty({:?} -> {:?})", a, b); - let mut unsizing_obligations = vec![]; - let adjustment = try!(indent(|| { - fcx.infcx().commit_if_ok(|_| { - let coerce = Coerce { - fcx: fcx, - origin: TypeOrigin::ExprAssignable(expr.span), - unsizing_obligations: RefCell::new(vec![]) - }; - let adjustment = try!(coerce.coerce(expr, a, b)); - unsizing_obligations = coerce.unsizing_obligations.into_inner(); - Ok(adjustment) +impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { + /// Attempt to coerce an expression to a type, and return the + /// adjusted type of the expression, if successful. + /// Adjustments are only recorded if the coercion succeeded. + /// The expressions *must not* have any pre-existing adjustments. + pub fn try_coerce(&self, + expr: &hir::Expr, + expr_ty: Ty<'tcx>, + target: Ty<'tcx>) + -> RelateResult<'tcx, Ty<'tcx>> { + let source = self.resolve_type_vars_with_obligations(expr_ty); + debug!("coercion::try({:?}: {:?} -> {:?})", expr, source, target); + + let cause = self.cause(expr.span, ObligationCauseCode::ExprAssignable); + let mut coerce = Coerce::new(self, cause); + self.commit_if_ok(|_| { + let adjustment = apply(&mut coerce, &|| Some(expr), source, target)?; + if !adjustment.is_identity() { + debug!("Success, coerced with {:?}", adjustment); + match self.tables.borrow().adjustments.get(&expr.id) { + None | + Some(&Adjustment { kind: Adjust::NeverToAny, .. }) => (), + _ => bug!("expr already has an adjustment on it!"), + }; + self.write_adjustment(expr.id, adjustment); + } + Ok(adjustment.target) }) - })); + } - if let Some(AdjustDerefRef(auto)) = adjustment { - if auto.unsize.is_some() { - for obligation in unsizing_obligations { - fcx.register_predicate(obligation); + /// Given some expressions, their known unified type and another expression, + /// tries to unify the types, potentially inserting coercions on any of the + /// provided expressions and returns their LUB (aka "common supertype"). + pub fn try_find_coercion_lub<'b, E, I>(&self, + cause: &ObligationCause<'tcx>, + exprs: E, + prev_ty: Ty<'tcx>, + new: &'b hir::Expr, + new_ty: Ty<'tcx>) + -> RelateResult<'tcx, Ty<'tcx>> + where E: Fn() -> I, + I: IntoIterator + { + + let prev_ty = self.resolve_type_vars_with_obligations(prev_ty); + let new_ty = self.resolve_type_vars_with_obligations(new_ty); + debug!("coercion::try_find_lub({:?}, {:?})", prev_ty, new_ty); + + let trace = TypeTrace::types(cause, true, prev_ty, new_ty); + + // Special-case that coercion alone cannot handle: + // Two function item types of differing IDs or Substs. + match (&prev_ty.sty, &new_ty.sty) { + (&ty::TyFnDef(a_def_id, a_substs, a_fty), &ty::TyFnDef(b_def_id, b_substs, b_fty)) => { + // The signature must always match. + let fty = self.lub(true, trace.clone(), &a_fty, &b_fty) + .map(|ok| self.register_infer_ok_obligations(ok))?; + + if a_def_id == b_def_id { + // Same function, maybe the parameters match. + let substs = self.commit_if_ok(|_| { + self.lub(true, trace.clone(), &a_substs, &b_substs) + .map(|ok| self.register_infer_ok_obligations(ok)) + }); + + if let Ok(substs) = substs { + // We have a LUB of prev_ty and new_ty, just return it. + return Ok(self.tcx.mk_fn_def(a_def_id, substs, fty)); + } + } + + // Reify both sides and return the reified fn pointer type. + let fn_ptr = self.tcx.mk_fn_ptr(fty); + for expr in exprs().into_iter().chain(Some(new)) { + // No adjustments can produce a fn item, so this should never trip. + assert!(!self.tables.borrow().adjustments.contains_key(&expr.id)); + self.write_adjustment(expr.id, Adjustment { + kind: Adjust::ReifyFnPointer, + target: fn_ptr + }); + } + return Ok(fn_ptr); } + _ => {} } - } - if let Some(adjustment) = adjustment { - debug!("Success, coerced with {:?}", adjustment); - fcx.write_adjustment(expr.id, adjustment); - } - Ok(()) -} + let mut coerce = Coerce::new(self, cause.clone()); + coerce.use_lub = true; -fn coerce_mutbls<'tcx>(from_mutbl: hir::Mutability, - to_mutbl: hir::Mutability) - -> CoerceResult<'tcx> { - match (from_mutbl, to_mutbl) { - (hir::MutMutable, hir::MutMutable) | - (hir::MutImmutable, hir::MutImmutable) | - (hir::MutMutable, hir::MutImmutable) => Ok(None), - (hir::MutImmutable, hir::MutMutable) => Err(TypeError::Mutability) + // First try to coerce the new expression to the type of the previous ones, + // but only if the new expression has no coercion already applied to it. + let mut first_error = None; + if !self.tables.borrow().adjustments.contains_key(&new.id) { + let result = self.commit_if_ok(|_| apply(&mut coerce, &|| Some(new), new_ty, prev_ty)); + match result { + Ok(adjustment) => { + if !adjustment.is_identity() { + self.write_adjustment(new.id, adjustment); + } + return Ok(adjustment.target); + } + Err(e) => first_error = Some(e), + } + } + + // Then try to coerce the previous expressions to the type of the new one. + // This requires ensuring there are no coercions applied to *any* of the + // previous expressions, other than noop reborrows (ignoring lifetimes). + for expr in exprs() { + let noop = match self.tables.borrow().adjustments.get(&expr.id).map(|adj| adj.kind) { + Some(Adjust::DerefRef { + autoderefs: 1, + autoref: Some(AutoBorrow::Ref(_, mutbl_adj)), + unsize: false + }) => { + match self.node_ty(expr.id).sty { + ty::TyRef(_, mt_orig) => { + // Reborrow that we can safely ignore. + mutbl_adj == mt_orig.mutbl + } + _ => false, + } + } + Some(Adjust::NeverToAny) => true, + Some(_) => false, + None => true, + }; + + if !noop { + return self.commit_if_ok(|_| { + self.lub(true, trace.clone(), &prev_ty, &new_ty) + .map(|ok| self.register_infer_ok_obligations(ok)) + }); + } + } + + match self.commit_if_ok(|_| apply(&mut coerce, &exprs, prev_ty, new_ty)) { + Err(_) => { + // Avoid giving strange errors on failed attempts. + if let Some(e) = first_error { + Err(e) + } else { + self.commit_if_ok(|_| { + self.lub(true, trace, &prev_ty, &new_ty) + .map(|ok| self.register_infer_ok_obligations(ok)) + }) + } + } + Ok(adjustment) => { + if !adjustment.is_identity() { + let mut tables = self.tables.borrow_mut(); + for expr in exprs() { + if let Some(&mut Adjustment { + kind: Adjust::NeverToAny, + ref mut target + }) = tables.adjustments.get_mut(&expr.id) { + *target = adjustment.target; + continue; + } + tables.adjustments.insert(expr.id, adjustment); + } + } + Ok(adjustment.target) + } + } } } diff --git a/src/librustc_typeck/check/compare_method.rs b/src/librustc_typeck/check/compare_method.rs index d5f2422018925..2602ff05badd4 100644 --- a/src/librustc_typeck/check/compare_method.rs +++ b/src/librustc_typeck/check/compare_method.rs @@ -8,16 +8,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use middle::free_region::FreeRegionMap; -use middle::infer::{self, TypeOrigin}; -use middle::traits; -use middle::ty::{self}; -use middle::subst::{self, Subst, Substs, VecPerParamSpace}; +use rustc::hir; +use rustc::infer::{self, InferOk}; +use rustc::middle::free_region::FreeRegionMap; +use rustc::ty; +use rustc::traits::{self, ObligationCause, ObligationCauseCode, Reveal}; +use rustc::ty::error::{ExpectedFound, TypeError}; +use rustc::ty::subst::{Subst, Substs}; +use rustc::hir::{ImplItemKind, TraitItem_, Ty_}; +use rustc::util::common::ErrorReported; use syntax::ast; -use syntax::codemap::Span; +use syntax_pos::Span; +use CrateCtxt; use super::assoc; +use super::{Inherited, FnCtxt}; +use astconv::ExplicitSelf; /// Checks that a method from an impl conforms to the signature of /// the same method as declared in the trait. @@ -30,79 +37,74 @@ use super::assoc; /// - trait_m: the method in the trait /// - impl_trait_ref: the TraitRef corresponding to the trait implementation -pub fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>, - impl_m: &ty::Method<'tcx>, - impl_m_span: Span, - impl_m_body_id: ast::NodeId, - trait_m: &ty::Method<'tcx>, - impl_trait_ref: &ty::TraitRef<'tcx>) { +pub fn compare_impl_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + impl_m: &ty::AssociatedItem, + impl_m_span: Span, + impl_m_body_id: ast::NodeId, + trait_m: &ty::AssociatedItem, + impl_trait_ref: ty::TraitRef<'tcx>, + trait_item_span: Option, + old_broken_mode: bool) { debug!("compare_impl_method(impl_trait_ref={:?})", impl_trait_ref); - debug!("compare_impl_method: impl_trait_ref (liberated) = {:?}", - impl_trait_ref); - - let mut infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None); - let mut fulfillment_cx = infcx.fulfillment_cx.borrow_mut(); - - let trait_to_impl_substs = &impl_trait_ref.substs; + if let Err(ErrorReported) = compare_self_type(ccx, + impl_m, + impl_m_span, + trait_m, + impl_trait_ref) { + return; + } - // Try to give more informative error messages about self typing - // mismatches. Note that any mismatch will also be detected - // below, where we construct a canonical function type that - // includes the self parameter as a normal parameter. It's just - // that the error messages you get out of this code are a bit more - // inscrutable, particularly for cases where one method has no - // self. - match (&trait_m.explicit_self, &impl_m.explicit_self) { - (&ty::ExplicitSelfCategory::Static, - &ty::ExplicitSelfCategory::Static) => {} - (&ty::ExplicitSelfCategory::Static, _) => { - span_err!(tcx.sess, impl_m_span, E0185, - "method `{}` has a `{}` declaration in the impl, \ - but not in the trait", - trait_m.name, - impl_m.explicit_self); - return; - } - (_, &ty::ExplicitSelfCategory::Static) => { - span_err!(tcx.sess, impl_m_span, E0186, - "method `{}` has a `{}` declaration in the trait, \ - but not in the impl", - trait_m.name, - trait_m.explicit_self); - return; - } - _ => { - // Let the type checker catch other errors below - } + if let Err(ErrorReported) = compare_number_of_generics(ccx, + impl_m, + impl_m_span, + trait_m, + trait_item_span) { + return; } - let num_impl_m_type_params = impl_m.generics.types.len(subst::FnSpace); - let num_trait_m_type_params = trait_m.generics.types.len(subst::FnSpace); - if num_impl_m_type_params != num_trait_m_type_params { - span_err!(tcx.sess, impl_m_span, E0049, - "method `{}` has {} type parameter{} \ - but its trait declaration has {} type parameter{}", - trait_m.name, - num_impl_m_type_params, - if num_impl_m_type_params == 1 {""} else {"s"}, - num_trait_m_type_params, - if num_trait_m_type_params == 1 {""} else {"s"}); + if let Err(ErrorReported) = compare_number_of_method_arguments(ccx, + impl_m, + impl_m_span, + trait_m, + trait_item_span) { return; } - if impl_m.fty.sig.0.inputs.len() != trait_m.fty.sig.0.inputs.len() { - span_err!(tcx.sess, impl_m_span, E0050, - "method `{}` has {} parameter{} \ - but the declaration in trait `{}` has {}", - trait_m.name, - impl_m.fty.sig.0.inputs.len(), - if impl_m.fty.sig.0.inputs.len() == 1 {""} else {"s"}, - tcx.item_path_str(trait_m.def_id), - trait_m.fty.sig.0.inputs.len()); + if let Err(ErrorReported) = compare_predicate_entailment(ccx, + impl_m, + impl_m_span, + impl_m_body_id, + trait_m, + impl_trait_ref, + old_broken_mode) { return; } +} + +fn compare_predicate_entailment<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + impl_m: &ty::AssociatedItem, + impl_m_span: Span, + impl_m_body_id: ast::NodeId, + trait_m: &ty::AssociatedItem, + impl_trait_ref: ty::TraitRef<'tcx>, + old_broken_mode: bool) + -> Result<(), ErrorReported> { + let tcx = ccx.tcx; + + let trait_to_impl_substs = impl_trait_ref.substs; + + let cause = ObligationCause { + span: impl_m_span, + body_id: impl_m_body_id, + code: ObligationCauseCode::CompareImplMethodObligation { + item_name: impl_m.name, + impl_item_def_id: impl_m.def_id, + trait_item_def_id: trait_m.def_id, + lint_id: if !old_broken_mode { Some(impl_m_body_id) } else { None }, + }, + }; // This code is best explained by example. Consider a trait: // @@ -176,316 +178,679 @@ pub fn compare_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>, let impl_to_skol_substs = &impl_param_env.free_substs; // Create mapping from trait to skolemized. - let trait_to_skol_substs = - trait_to_impl_substs - .subst(tcx, impl_to_skol_substs) - .with_method(impl_to_skol_substs.types.get_slice(subst::FnSpace).to_vec(), - impl_to_skol_substs.regions().get_slice(subst::FnSpace).to_vec()); + let trait_to_skol_substs = impl_to_skol_substs.rebase_onto(tcx, + impl_m.container.id(), + trait_to_impl_substs.subst(tcx, + impl_to_skol_substs)); debug!("compare_impl_method: trait_to_skol_substs={:?}", trait_to_skol_substs); - // Check region bounds. FIXME(@jroesch) refactor this away when removing - // ParamBounds. - if !check_region_bounds_on_impl_method(tcx, - impl_m_span, - impl_m, - &trait_m.generics, - &impl_m.generics, - &trait_to_skol_substs, - impl_to_skol_substs) { - return; - } + let impl_m_generics = tcx.item_generics(impl_m.def_id); + let trait_m_generics = tcx.item_generics(trait_m.def_id); + let impl_m_predicates = tcx.item_predicates(impl_m.def_id); + let trait_m_predicates = tcx.item_predicates(trait_m.def_id); + + // Check region bounds. + check_region_bounds_on_impl_method(ccx, + impl_m_span, + impl_m, + &trait_m_generics, + &impl_m_generics, + trait_to_skol_substs, + impl_to_skol_substs)?; // Create obligations for each predicate declared by the impl // definition in the context of the trait's parameter // environment. We can't just use `impl_env.caller_bounds`, // however, because we want to replace all late-bound regions with // region variables. - let impl_bounds = - impl_m.predicates.instantiate(tcx, impl_to_skol_substs); - - let (impl_bounds, _) = - infcx.replace_late_bound_regions_with_fresh_var( - impl_m_span, - infer::HigherRankedType, - &ty::Binder(impl_bounds)); - debug!("compare_impl_method: impl_bounds={:?}", - impl_bounds); - - // Normalize the associated types in the trait_bounds. - let trait_bounds = trait_m.predicates.instantiate(tcx, &trait_to_skol_substs); + let impl_predicates = tcx.item_predicates(impl_m_predicates.parent.unwrap()); + let mut hybrid_preds = impl_predicates.instantiate(tcx, impl_to_skol_substs); - // Obtain the predicate split predicate sets for each. - let trait_pred = trait_bounds.predicates.split(); - let impl_pred = impl_bounds.predicates.split(); + debug!("compare_impl_method: impl_bounds={:?}", hybrid_preds); // This is the only tricky bit of the new way we check implementation methods - // We need to build a set of predicates where only the FnSpace bounds + // We need to build a set of predicates where only the method-level bounds // are from the trait and we assume all other bounds from the implementation // to be previously satisfied. // // We then register the obligations from the impl_m and check to see // if all constraints hold. - let hybrid_preds = VecPerParamSpace::new( - impl_pred.types, - impl_pred.selfs, - trait_pred.fns - ); + hybrid_preds.predicates + .extend(trait_m_predicates.instantiate_own(tcx, trait_to_skol_substs).predicates); // Construct trait parameter environment and then shift it into the skolemized viewpoint. // The key step here is to update the caller_bounds's predicates to be // the new hybrid bounds we computed. let normalize_cause = traits::ObligationCause::misc(impl_m_span, impl_m_body_id); - let trait_param_env = impl_param_env.with_caller_bounds(hybrid_preds.into_vec()); - let trait_param_env = traits::normalize_param_env_or_error(trait_param_env, + let trait_param_env = impl_param_env.with_caller_bounds(hybrid_preds.predicates); + let trait_param_env = traits::normalize_param_env_or_error(tcx, + trait_param_env, normalize_cause.clone()); - // FIXME(@jroesch) this seems ugly, but is a temporary change - infcx.parameter_environment = trait_param_env; - debug!("compare_impl_method: trait_bounds={:?}", - infcx.parameter_environment.caller_bounds); + tcx.infer_ctxt(None, Some(trait_param_env), Reveal::NotSpecializable).enter(|infcx| { + let inh = Inherited::new(ccx, infcx); + let infcx = &inh.infcx; + let fulfillment_cx = &inh.fulfillment_cx; - let mut selcx = traits::SelectionContext::new(&infcx); + debug!("compare_impl_method: caller_bounds={:?}", + infcx.parameter_environment.caller_bounds); - for predicate in impl_pred.fns { - let traits::Normalized { value: predicate, .. } = - traits::normalize(&mut selcx, normalize_cause.clone(), &predicate); + let mut selcx = traits::SelectionContext::new(&infcx); - let cause = traits::ObligationCause { - span: impl_m_span, - body_id: impl_m_body_id, - code: traits::ObligationCauseCode::CompareImplMethodObligation - }; + let impl_m_own_bounds = impl_m_predicates.instantiate_own(tcx, impl_to_skol_substs); + let (impl_m_own_bounds, _) = infcx.replace_late_bound_regions_with_fresh_var(impl_m_span, + infer::HigherRankedType, + &ty::Binder(impl_m_own_bounds.predicates)); + for predicate in impl_m_own_bounds { + let traits::Normalized { value: predicate, .. } = + traits::normalize(&mut selcx, normalize_cause.clone(), &predicate); - fulfillment_cx.register_predicate_obligation( - &infcx, - traits::Obligation::new(cause, predicate)); - } + fulfillment_cx.borrow_mut().register_predicate_obligation( + &infcx, + traits::Obligation::new(cause.clone(), predicate)); + } - // We now need to check that the signature of the impl method is - // compatible with that of the trait method. We do this by - // checking that `impl_fty <: trait_fty`. - // - // FIXME. Unfortunately, this doesn't quite work right now because - // associated type normalization is not integrated into subtype - // checks. For the comparison to be valid, we need to - // normalize the associated types in the impl/trait methods - // first. However, because function types bind regions, just - // calling `normalize_associated_types_in` would have no effect on - // any associated types appearing in the fn arguments or return - // type. - - // Compute skolemized form of impl and trait method tys. - let impl_fty = tcx.mk_fn(None, tcx.mk_bare_fn(impl_m.fty.clone())); - let impl_fty = impl_fty.subst(tcx, impl_to_skol_substs); - let trait_fty = tcx.mk_fn(None, tcx.mk_bare_fn(trait_m.fty.clone())); - let trait_fty = trait_fty.subst(tcx, &trait_to_skol_substs); - - let err = infcx.commit_if_ok(|snapshot| { - let origin = TypeOrigin::MethodCompatCheck(impl_m_span); + // We now need to check that the signature of the impl method is + // compatible with that of the trait method. We do this by + // checking that `impl_fty <: trait_fty`. + // + // FIXME. Unfortunately, this doesn't quite work right now because + // associated type normalization is not integrated into subtype + // checks. For the comparison to be valid, we need to + // normalize the associated types in the impl/trait methods + // first. However, because function types bind regions, just + // calling `normalize_associated_types_in` would have no effect on + // any associated types appearing in the fn arguments or return + // type. + + // Compute skolemized form of impl and trait method tys. + let tcx = infcx.tcx; + + let m_fty = |method: &ty::AssociatedItem| { + match tcx.item_type(method.def_id).sty { + ty::TyFnDef(_, _, f) => f, + _ => bug!() + } + }; + let impl_m_fty = m_fty(impl_m); + let trait_m_fty = m_fty(trait_m); let (impl_sig, _) = infcx.replace_late_bound_regions_with_fresh_var(impl_m_span, infer::HigherRankedType, - &impl_m.fty.sig); + &impl_m_fty.sig); let impl_sig = impl_sig.subst(tcx, impl_to_skol_substs); let impl_sig = assoc::normalize_associated_types_in(&infcx, - &mut fulfillment_cx, + &mut fulfillment_cx.borrow_mut(), impl_m_span, impl_m_body_id, &impl_sig); - let impl_fty = tcx.mk_fn(None, tcx.mk_bare_fn(ty::BareFnTy { - unsafety: impl_m.fty.unsafety, - abi: impl_m.fty.abi, - sig: ty::Binder(impl_sig) + let impl_fty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy { + unsafety: impl_m_fty.unsafety, + abi: impl_m_fty.abi, + sig: ty::Binder(impl_sig.clone()), })); - debug!("compare_impl_method: impl_fty={:?}", - impl_fty); + debug!("compare_impl_method: impl_fty={:?}", impl_fty); - let (trait_sig, skol_map) = - infcx.skolemize_late_bound_regions(&trait_m.fty.sig, snapshot); + let trait_sig = tcx.liberate_late_bound_regions( + infcx.parameter_environment.free_id_outlive, + &trait_m_fty.sig); let trait_sig = - trait_sig.subst(tcx, &trait_to_skol_substs); + trait_sig.subst(tcx, trait_to_skol_substs); let trait_sig = assoc::normalize_associated_types_in(&infcx, - &mut fulfillment_cx, + &mut fulfillment_cx.borrow_mut(), impl_m_span, impl_m_body_id, &trait_sig); - let trait_fty = tcx.mk_fn(None, tcx.mk_bare_fn(ty::BareFnTy { - unsafety: trait_m.fty.unsafety, - abi: trait_m.fty.abi, - sig: ty::Binder(trait_sig) + let trait_fty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy { + unsafety: trait_m_fty.unsafety, + abi: trait_m_fty.abi, + sig: ty::Binder(trait_sig.clone()), })); - debug!("compare_impl_method: trait_fty={:?}", - trait_fty); + debug!("compare_impl_method: trait_fty={:?}", trait_fty); - try!(infer::mk_subty(&infcx, false, origin, impl_fty, trait_fty)); + let sub_result = infcx.sub_types(false, &cause, impl_fty, trait_fty) + .map(|InferOk { obligations, .. }| { + // FIXME(#32730) propagate obligations + assert!(obligations.is_empty()); + }); - infcx.leak_check(&skol_map, snapshot) - }); - - match err { - Ok(()) => { } - Err(terr) => { - debug!("checking trait method for compatibility: impl ty {:?}, trait ty {:?}", + if let Err(terr) = sub_result { + debug!("sub_types failed: impl ty {:?}, trait ty {:?}", impl_fty, trait_fty); - span_err!(tcx.sess, impl_m_span, E0053, - "method `{}` has an incompatible type for trait: {}", - trait_m.name, - terr); - return; + + let (impl_err_span, trait_err_span) = extract_spans_for_error_reporting(&infcx, + &terr, + &cause, + impl_m, + impl_sig, + trait_m, + trait_sig); + + let cause = ObligationCause { + span: impl_err_span, + ..cause.clone() + }; + + let mut diag = struct_span_err!(tcx.sess, + cause.span, + E0053, + "method `{}` has an incompatible type for trait", + trait_m.name); + + infcx.note_type_err(&mut diag, + &cause, + trait_err_span.map(|sp| (sp, format!("type in trait"))), + Some(infer::ValuePairs::Types(ExpectedFound { + expected: trait_fty, + found: impl_fty, + })), + &terr); + diag.emit(); + return Err(ErrorReported); } - } - // Check that all obligations are satisfied by the implementation's - // version. - match fulfillment_cx.select_all_or_error(&infcx) { - Err(ref errors) => { traits::report_fulfillment_errors(&infcx, errors) } - Ok(_) => {} - } + // Check that all obligations are satisfied by the implementation's + // version. + if let Err(ref errors) = fulfillment_cx.borrow_mut().select_all_or_error(&infcx) { + infcx.report_fulfillment_errors(errors); + return Err(ErrorReported); + } - // Finally, resolve all regions. This catches wily misuses of - // lifetime parameters. We have to build up a plausible lifetime - // environment based on what we find in the trait. We could also - // include the obligations derived from the method argument types, - // but I don't think it's necessary -- after all, those are still - // in effect when type-checking the body, and all the - // where-clauses in the header etc should be implied by the trait - // anyway, so it shouldn't be needed there either. Anyway, we can - // always add more relations later (it's backwards compat). - let mut free_regions = FreeRegionMap::new(); - free_regions.relate_free_regions_from_predicates(tcx, - &infcx.parameter_environment.caller_bounds); - - infcx.resolve_regions_and_report_errors(&free_regions, impl_m_body_id); - - fn check_region_bounds_on_impl_method<'tcx>(tcx: &ty::ctxt<'tcx>, + // Finally, resolve all regions. This catches wily misuses of + // lifetime parameters. + if old_broken_mode { + // FIXME(#18937) -- this is how the code used to + // work. This is buggy because the fulfillment cx creates + // region obligations that get overlooked. The right + // thing to do is the code below. But we keep this old + // pass around temporarily. + let mut free_regions = FreeRegionMap::new(); + free_regions.relate_free_regions_from_predicates( + &infcx.parameter_environment.caller_bounds); + infcx.resolve_regions_and_report_errors(&free_regions, impl_m_body_id); + } else { + let fcx = FnCtxt::new(&inh, tcx.types.err, impl_m_body_id); + fcx.regionck_item(impl_m_body_id, impl_m_span, &[]); + } + + Ok(()) + }) +} + +fn check_region_bounds_on_impl_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, span: Span, - impl_m: &ty::Method<'tcx>, + impl_m: &ty::AssociatedItem, trait_generics: &ty::Generics<'tcx>, impl_generics: &ty::Generics<'tcx>, trait_to_skol_substs: &Substs<'tcx>, impl_to_skol_substs: &Substs<'tcx>) - -> bool - { - - let trait_params = trait_generics.regions.get_slice(subst::FnSpace); - let impl_params = impl_generics.regions.get_slice(subst::FnSpace); - - debug!("check_region_bounds_on_impl_method: \ - trait_generics={:?} \ - impl_generics={:?} \ - trait_to_skol_substs={:?} \ - impl_to_skol_substs={:?}", - trait_generics, - impl_generics, - trait_to_skol_substs, - impl_to_skol_substs); - - // Must have same number of early-bound lifetime parameters. - // Unfortunately, if the user screws up the bounds, then this - // will change classification between early and late. E.g., - // if in trait we have `<'a,'b:'a>`, and in impl we just have - // `<'a,'b>`, then we have 2 early-bound lifetime parameters - // in trait but 0 in the impl. But if we report "expected 2 - // but found 0" it's confusing, because it looks like there - // are zero. Since I don't quite know how to phrase things at - // the moment, give a kind of vague error message. - if trait_params.len() != impl_params.len() { - span_err!(tcx.sess, span, E0195, - "lifetime parameters or bounds on method `{}` do \ - not match the trait declaration", - impl_m.name); - return false; - } + -> Result<(), ErrorReported> { + let trait_params = &trait_generics.regions[..]; + let impl_params = &impl_generics.regions[..]; + + debug!("check_region_bounds_on_impl_method: \ + trait_generics={:?} \ + impl_generics={:?} \ + trait_to_skol_substs={:?} \ + impl_to_skol_substs={:?}", + trait_generics, + impl_generics, + trait_to_skol_substs, + impl_to_skol_substs); + + // Must have same number of early-bound lifetime parameters. + // Unfortunately, if the user screws up the bounds, then this + // will change classification between early and late. E.g., + // if in trait we have `<'a,'b:'a>`, and in impl we just have + // `<'a,'b>`, then we have 2 early-bound lifetime parameters + // in trait but 0 in the impl. But if we report "expected 2 + // but found 0" it's confusing, because it looks like there + // are zero. Since I don't quite know how to phrase things at + // the moment, give a kind of vague error message. + if trait_params.len() != impl_params.len() { + struct_span_err!(ccx.tcx.sess, + span, + E0195, + "lifetime parameters or bounds on method `{}` do not match the \ + trait declaration", + impl_m.name) + .span_label(span, &format!("lifetimes do not match trait")) + .emit(); + return Err(ErrorReported); + } + + return Ok(()); +} - return true; +fn extract_spans_for_error_reporting<'a, 'gcx, 'tcx>(infcx: &infer::InferCtxt<'a, 'gcx, 'tcx>, + terr: &TypeError, + cause: &ObligationCause<'tcx>, + impl_m: &ty::AssociatedItem, + impl_sig: ty::FnSig<'tcx>, + trait_m: &ty::AssociatedItem, + trait_sig: ty::FnSig<'tcx>) + -> (Span, Option) { + let tcx = infcx.tcx; + let impl_m_node_id = tcx.map.as_local_node_id(impl_m.def_id).unwrap(); + let (impl_m_output, impl_m_iter) = match tcx.map.expect_impl_item(impl_m_node_id).node { + ImplItemKind::Method(ref impl_m_sig, _) => { + (&impl_m_sig.decl.output, impl_m_sig.decl.inputs.iter()) + } + _ => bug!("{:?} is not a method", impl_m), + }; + + match *terr { + TypeError::Mutability => { + if let Some(trait_m_node_id) = tcx.map.as_local_node_id(trait_m.def_id) { + let trait_m_iter = match tcx.map.expect_trait_item(trait_m_node_id).node { + TraitItem_::MethodTraitItem(ref trait_m_sig, _) => { + trait_m_sig.decl.inputs.iter() + } + _ => bug!("{:?} is not a MethodTraitItem", trait_m), + }; + + impl_m_iter.zip(trait_m_iter) + .find(|&(ref impl_arg, ref trait_arg)| { + match (&impl_arg.ty.node, &trait_arg.ty.node) { + (&Ty_::TyRptr(_, ref impl_mt), &Ty_::TyRptr(_, ref trait_mt)) | + (&Ty_::TyPtr(ref impl_mt), &Ty_::TyPtr(ref trait_mt)) => { + impl_mt.mutbl != trait_mt.mutbl + } + _ => false, + } + }) + .map(|(ref impl_arg, ref trait_arg)| { + match (impl_arg.to_self(), trait_arg.to_self()) { + (Some(impl_self), Some(trait_self)) => { + (impl_self.span, Some(trait_self.span)) + } + (None, None) => (impl_arg.ty.span, Some(trait_arg.ty.span)), + _ => { + bug!("impl and trait fns have different first args, impl: \ + {:?}, trait: {:?}", + impl_arg, + trait_arg) + } + } + }) + .unwrap_or((cause.span, tcx.map.span_if_local(trait_m.def_id))) + } else { + (cause.span, tcx.map.span_if_local(trait_m.def_id)) + } + } + TypeError::Sorts(ExpectedFound { .. }) => { + if let Some(trait_m_node_id) = tcx.map.as_local_node_id(trait_m.def_id) { + let (trait_m_output, trait_m_iter) = + match tcx.map.expect_trait_item(trait_m_node_id).node { + TraitItem_::MethodTraitItem(ref trait_m_sig, _) => { + (&trait_m_sig.decl.output, trait_m_sig.decl.inputs.iter()) + } + _ => bug!("{:?} is not a MethodTraitItem", trait_m), + }; + + let impl_iter = impl_sig.inputs.iter(); + let trait_iter = trait_sig.inputs.iter(); + impl_iter.zip(trait_iter) + .zip(impl_m_iter) + .zip(trait_m_iter) + .filter_map(|(((impl_arg_ty, trait_arg_ty), impl_arg), trait_arg)| { + match infcx.sub_types(true, &cause, trait_arg_ty, impl_arg_ty) { + Ok(_) => None, + Err(_) => Some((impl_arg.ty.span, Some(trait_arg.ty.span))), + } + }) + .next() + .unwrap_or_else(|| { + if infcx.sub_types(false, &cause, impl_sig.output, trait_sig.output) + .is_err() { + (impl_m_output.span(), Some(trait_m_output.span())) + } else { + (cause.span, tcx.map.span_if_local(trait_m.def_id)) + } + }) + } else { + (cause.span, tcx.map.span_if_local(trait_m.def_id)) + } + } + _ => (cause.span, tcx.map.span_if_local(trait_m.def_id)), } } -pub fn compare_const_impl<'tcx>(tcx: &ty::ctxt<'tcx>, - impl_c: &ty::AssociatedConst<'tcx>, - impl_c_span: Span, - trait_c: &ty::AssociatedConst<'tcx>, - impl_trait_ref: &ty::TraitRef<'tcx>) { - debug!("compare_const_impl(impl_trait_ref={:?})", - impl_trait_ref); +fn compare_self_type<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + impl_m: &ty::AssociatedItem, + impl_m_span: Span, + trait_m: &ty::AssociatedItem, + impl_trait_ref: ty::TraitRef<'tcx>) + -> Result<(), ErrorReported> +{ + let tcx = ccx.tcx; + // Try to give more informative error messages about self typing + // mismatches. Note that any mismatch will also be detected + // below, where we construct a canonical function type that + // includes the self parameter as a normal parameter. It's just + // that the error messages you get out of this code are a bit more + // inscrutable, particularly for cases where one method has no + // self. - let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None); - let mut fulfillment_cx = infcx.fulfillment_cx.borrow_mut(); + let self_string = |method: &ty::AssociatedItem| { + let untransformed_self_ty = match method.container { + ty::ImplContainer(_) => impl_trait_ref.self_ty(), + ty::TraitContainer(_) => tcx.mk_self_type() + }; + let method_ty = tcx.item_type(method.def_id); + let self_arg_ty = *method_ty.fn_sig().input(0).skip_binder(); + match ExplicitSelf::determine(untransformed_self_ty, self_arg_ty) { + ExplicitSelf::ByValue => "self".to_string(), + ExplicitSelf::ByReference(_, hir::MutImmutable) => "&self".to_string(), + ExplicitSelf::ByReference(_, hir::MutMutable) => "&mut self".to_string(), + _ => format!("self: {}", self_arg_ty) + } + }; - // The below is for the most part highly similar to the procedure - // for methods above. It is simpler in many respects, especially - // because we shouldn't really have to deal with lifetimes or - // predicates. In fact some of this should probably be put into - // shared functions because of DRY violations... - let trait_to_impl_substs = &impl_trait_ref.substs; + match (trait_m.method_has_self_argument, impl_m.method_has_self_argument) { + (false, false) | (true, true) => {} - // Create a parameter environment that represents the implementation's - // method. - let impl_c_node_id = tcx.map.as_local_node_id(impl_c.def_id).unwrap(); - let impl_param_env = ty::ParameterEnvironment::for_item(tcx, impl_c_node_id); + (false, true) => { + let self_descr = self_string(impl_m); + let mut err = struct_span_err!(tcx.sess, + impl_m_span, + E0185, + "method `{}` has a `{}` declaration in the impl, but \ + not in the trait", + trait_m.name, + self_descr); + err.span_label(impl_m_span, &format!("`{}` used in impl", self_descr)); + if let Some(span) = tcx.map.span_if_local(trait_m.def_id) { + err.span_label(span, &format!("trait declared without `{}`", self_descr)); + } + err.emit(); + return Err(ErrorReported); + } - // Create mapping from impl to skolemized. - let impl_to_skol_substs = &impl_param_env.free_substs; + (true, false) => { + let self_descr = self_string(trait_m); + let mut err = struct_span_err!(tcx.sess, + impl_m_span, + E0186, + "method `{}` has a `{}` declaration in the trait, but \ + not in the impl", + trait_m.name, + self_descr); + err.span_label(impl_m_span, + &format!("expected `{}` in impl", self_descr)); + if let Some(span) = tcx.map.span_if_local(trait_m.def_id) { + err.span_label(span, &format!("`{}` used in trait", self_descr)); + } + err.emit(); + return Err(ErrorReported); + } + } - // Create mapping from trait to skolemized. - let trait_to_skol_substs = - trait_to_impl_substs - .subst(tcx, impl_to_skol_substs) - .with_method(impl_to_skol_substs.types.get_slice(subst::FnSpace).to_vec(), - impl_to_skol_substs.regions().get_slice(subst::FnSpace).to_vec()); - debug!("compare_const_impl: trait_to_skol_substs={:?}", - trait_to_skol_substs); + Ok(()) +} - // Compute skolemized form of impl and trait const tys. - let impl_ty = impl_c.ty.subst(tcx, impl_to_skol_substs); - let trait_ty = trait_c.ty.subst(tcx, &trait_to_skol_substs); +fn compare_number_of_generics<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + impl_m: &ty::AssociatedItem, + impl_m_span: Span, + trait_m: &ty::AssociatedItem, + trait_item_span: Option) + -> Result<(), ErrorReported> { + let tcx = ccx.tcx; + let impl_m_generics = tcx.item_generics(impl_m.def_id); + let trait_m_generics = tcx.item_generics(trait_m.def_id); + let num_impl_m_type_params = impl_m_generics.types.len(); + let num_trait_m_type_params = trait_m_generics.types.len(); + if num_impl_m_type_params != num_trait_m_type_params { + let impl_m_node_id = tcx.map.as_local_node_id(impl_m.def_id).unwrap(); + let span = match tcx.map.expect_impl_item(impl_m_node_id).node { + ImplItemKind::Method(ref impl_m_sig, _) => { + if impl_m_sig.generics.is_parameterized() { + impl_m_sig.generics.span + } else { + impl_m_span + } + } + _ => bug!("{:?} is not a method", impl_m), + }; - let err = infcx.commit_if_ok(|_| { - let origin = TypeOrigin::Misc(impl_c_span); + let mut err = struct_span_err!(tcx.sess, + span, + E0049, + "method `{}` has {} type parameter{} but its trait \ + declaration has {} type parameter{}", + trait_m.name, + num_impl_m_type_params, + if num_impl_m_type_params == 1 { "" } else { "s" }, + num_trait_m_type_params, + if num_trait_m_type_params == 1 { + "" + } else { + "s" + }); + + let mut suffix = None; + + if let Some(span) = trait_item_span { + err.span_label(span, + &format!("expected {}", + &if num_trait_m_type_params != 1 { + format!("{} type parameters", num_trait_m_type_params) + } else { + format!("{} type parameter", num_trait_m_type_params) + })); + } else { + suffix = Some(format!(", expected {}", num_trait_m_type_params)); + } - // There is no "body" here, so just pass dummy id. - let impl_ty = - assoc::normalize_associated_types_in(&infcx, - &mut fulfillment_cx, - impl_c_span, - 0, - &impl_ty); + err.span_label(span, + &format!("found {}{}", + &if num_impl_m_type_params != 1 { + format!("{} type parameters", num_impl_m_type_params) + } else { + format!("1 type parameter") + }, + suffix.as_ref().map(|s| &s[..]).unwrap_or(""))); - debug!("compare_const_impl: impl_ty={:?}", - impl_ty); + err.emit(); - let trait_ty = - assoc::normalize_associated_types_in(&infcx, - &mut fulfillment_cx, - impl_c_span, - 0, - &trait_ty); + return Err(ErrorReported); + } + + Ok(()) +} - debug!("compare_const_impl: trait_ty={:?}", - trait_ty); +fn compare_number_of_method_arguments<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + impl_m: &ty::AssociatedItem, + impl_m_span: Span, + trait_m: &ty::AssociatedItem, + trait_item_span: Option) + -> Result<(), ErrorReported> { + let tcx = ccx.tcx; + let m_fty = |method: &ty::AssociatedItem| { + match tcx.item_type(method.def_id).sty { + ty::TyFnDef(_, _, f) => f, + _ => bug!() + } + }; + let impl_m_fty = m_fty(impl_m); + let trait_m_fty = m_fty(trait_m); + if impl_m_fty.sig.0.inputs.len() != trait_m_fty.sig.0.inputs.len() { + let trait_number_args = trait_m_fty.sig.0.inputs.len(); + let impl_number_args = impl_m_fty.sig.0.inputs.len(); + let trait_m_node_id = tcx.map.as_local_node_id(trait_m.def_id); + let trait_span = if let Some(trait_id) = trait_m_node_id { + match tcx.map.expect_trait_item(trait_id).node { + TraitItem_::MethodTraitItem(ref trait_m_sig, _) => { + if let Some(arg) = trait_m_sig.decl.inputs.get(if trait_number_args > 0 { + trait_number_args - 1 + } else { + 0 + }) { + Some(arg.pat.span) + } else { + trait_item_span + } + } + _ => bug!("{:?} is not a method", impl_m), + } + } else { + trait_item_span + }; + let impl_m_node_id = tcx.map.as_local_node_id(impl_m.def_id).unwrap(); + let impl_span = match tcx.map.expect_impl_item(impl_m_node_id).node { + ImplItemKind::Method(ref impl_m_sig, _) => { + if let Some(arg) = impl_m_sig.decl.inputs.get(if impl_number_args > 0 { + impl_number_args - 1 + } else { + 0 + }) { + arg.pat.span + } else { + impl_m_span + } + } + _ => bug!("{:?} is not a method", impl_m), + }; + let mut err = struct_span_err!(tcx.sess, + impl_span, + E0050, + "method `{}` has {} parameter{} but the declaration in \ + trait `{}` has {}", + trait_m.name, + impl_number_args, + if impl_number_args == 1 { "" } else { "s" }, + tcx.item_path_str(trait_m.def_id), + trait_number_args); + if let Some(trait_span) = trait_span { + err.span_label(trait_span, + &format!("trait requires {}", + &if trait_number_args != 1 { + format!("{} parameters", trait_number_args) + } else { + format!("{} parameter", trait_number_args) + })); + } + err.span_label(impl_span, + &format!("expected {}, found {}", + &if trait_number_args != 1 { + format!("{} parameters", trait_number_args) + } else { + format!("{} parameter", trait_number_args) + }, + impl_number_args)); + err.emit(); + return Err(ErrorReported); + } - infer::mk_subty(&infcx, false, origin, impl_ty, trait_ty) - }); + Ok(()) +} - match err { - Ok(()) => { } - Err(terr) => { +pub fn compare_const_impl<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + impl_c: &ty::AssociatedItem, + impl_c_span: Span, + trait_c: &ty::AssociatedItem, + impl_trait_ref: ty::TraitRef<'tcx>) { + debug!("compare_const_impl(impl_trait_ref={:?})", impl_trait_ref); + + let tcx = ccx.tcx; + tcx.infer_ctxt(None, None, Reveal::NotSpecializable).enter(|infcx| { + let mut fulfillment_cx = traits::FulfillmentContext::new(); + + // The below is for the most part highly similar to the procedure + // for methods above. It is simpler in many respects, especially + // because we shouldn't really have to deal with lifetimes or + // predicates. In fact some of this should probably be put into + // shared functions because of DRY violations... + let trait_to_impl_substs = impl_trait_ref.substs; + + // Create a parameter environment that represents the implementation's + // method. + let impl_c_node_id = tcx.map.as_local_node_id(impl_c.def_id).unwrap(); + let impl_param_env = ty::ParameterEnvironment::for_item(tcx, impl_c_node_id); + + // Create mapping from impl to skolemized. + let impl_to_skol_substs = &impl_param_env.free_substs; + + // Create mapping from trait to skolemized. + let trait_to_skol_substs = impl_to_skol_substs.rebase_onto(tcx, + impl_c.container.id(), + trait_to_impl_substs.subst(tcx, + impl_to_skol_substs)); + debug!("compare_const_impl: trait_to_skol_substs={:?}", + trait_to_skol_substs); + + // Compute skolemized form of impl and trait const tys. + let impl_ty = tcx.item_type(impl_c.def_id).subst(tcx, impl_to_skol_substs); + let trait_ty = tcx.item_type(trait_c.def_id).subst(tcx, trait_to_skol_substs); + let mut cause = ObligationCause::misc(impl_c_span, impl_c_node_id); + + let err = infcx.commit_if_ok(|_| { + // There is no "body" here, so just pass dummy id. + let impl_ty = assoc::normalize_associated_types_in(&infcx, + &mut fulfillment_cx, + impl_c_span, + ast::CRATE_NODE_ID, + &impl_ty); + + debug!("compare_const_impl: impl_ty={:?}", impl_ty); + + let trait_ty = assoc::normalize_associated_types_in(&infcx, + &mut fulfillment_cx, + impl_c_span, + ast::CRATE_NODE_ID, + &trait_ty); + + debug!("compare_const_impl: trait_ty={:?}", trait_ty); + + infcx.sub_types(false, &cause, impl_ty, trait_ty) + .map(|InferOk { obligations, value: () }| { + for obligation in obligations { + fulfillment_cx.register_predicate_obligation(&infcx, obligation); + } + }) + }); + + if let Err(terr) = err { debug!("checking associated const for compatibility: impl ty {:?}, trait ty {:?}", impl_ty, trait_ty); - span_err!(tcx.sess, impl_c_span, E0326, - "implemented const `{}` has an incompatible type for \ - trait: {}", - trait_c.name, - terr); - return; + + // Locate the Span containing just the type of the offending impl + match tcx.map.expect_impl_item(impl_c_node_id).node { + ImplItemKind::Const(ref ty, _) => cause.span = ty.span, + _ => bug!("{:?} is not a impl const", impl_c), + } + + let mut diag = struct_span_err!(tcx.sess, + cause.span, + E0326, + "implemented const `{}` has an incompatible type for \ + trait", + trait_c.name); + + // Add a label to the Span containing just the type of the item + let trait_c_node_id = tcx.map.as_local_node_id(trait_c.def_id).unwrap(); + let trait_c_span = match tcx.map.expect_trait_item(trait_c_node_id).node { + TraitItem_::ConstTraitItem(ref ty, _) => ty.span, + _ => bug!("{:?} is not a trait const", trait_c), + }; + + infcx.note_type_err(&mut diag, + &cause, + Some((trait_c_span, format!("type in trait"))), + Some(infer::ValuePairs::Types(ExpectedFound { + expected: trait_ty, + found: impl_ty, + })), + &terr); + diag.emit(); } - } + }); } diff --git a/src/librustc_typeck/check/demand.rs b/src/librustc_typeck/check/demand.rs index 63dac49b384a7..ef1c08bdab549 100644 --- a/src/librustc_typeck/check/demand.rs +++ b/src/librustc_typeck/check/demand.rs @@ -9,64 +9,55 @@ // except according to those terms. -use check::{coercion, FnCtxt}; -use middle::ty::{self, Ty}; -use middle::infer::{self, TypeOrigin}; +use check::FnCtxt; +use rustc::ty::Ty; +use rustc::infer::{InferOk}; +use rustc::traits::ObligationCause; -use std::result::Result::{Err, Ok}; -use syntax::codemap::Span; -use rustc_front::hir; +use syntax_pos::Span; +use rustc::hir; -// Requires that the two types unify, and prints an error message if -// they don't. -pub fn suptype<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, sp: Span, - ty_expected: Ty<'tcx>, ty_actual: Ty<'tcx>) { - suptype_with_fn(fcx, sp, false, ty_expected, ty_actual, - |sp, e, a, s| { fcx.report_mismatched_types(sp, e, a, s) }) -} +impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { + // Requires that the two types unify, and prints an error message if + // they don't. + pub fn demand_suptype(&self, sp: Span, expected: Ty<'tcx>, actual: Ty<'tcx>) { + let cause = self.misc(sp); + match self.sub_types(false, &cause, actual, expected) { + Ok(InferOk { obligations, value: () }) => { + self.register_predicates(obligations); + }, + Err(e) => { + self.report_mismatched_types(&cause, expected, actual, e); + } + } + } -/// As `suptype`, but call `handle_err` if unification for subtyping fails. -pub fn suptype_with_fn<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>, - sp: Span, - b_is_expected: bool, - ty_a: Ty<'tcx>, - ty_b: Ty<'tcx>, - handle_err: F) where - F: FnOnce(Span, Ty<'tcx>, Ty<'tcx>, &ty::error::TypeError<'tcx>), -{ - // n.b.: order of actual, expected is reversed - match infer::mk_subty(fcx.infcx(), b_is_expected, TypeOrigin::Misc(sp), - ty_b, ty_a) { - Ok(()) => { /* ok */ } - Err(ref err) => { - handle_err(sp, ty_a, ty_b, err); - } + pub fn demand_eqtype(&self, sp: Span, expected: Ty<'tcx>, actual: Ty<'tcx>) { + self.demand_eqtype_with_origin(&self.misc(sp), expected, actual); } -} -pub fn eqtype<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, sp: Span, - expected: Ty<'tcx>, actual: Ty<'tcx>) { - match infer::mk_eqty(fcx.infcx(), false, TypeOrigin::Misc(sp), actual, expected) { - Ok(()) => { /* ok */ } - Err(ref err) => { fcx.report_mismatched_types(sp, expected, actual, err); } + pub fn demand_eqtype_with_origin(&self, + cause: &ObligationCause<'tcx>, + expected: Ty<'tcx>, + actual: Ty<'tcx>) + { + match self.eq_types(false, cause, actual, expected) { + Ok(InferOk { obligations, value: () }) => { + self.register_predicates(obligations); + }, + Err(e) => { + self.report_mismatched_types(cause, expected, actual, e); + } + } } -} -// Checks that the type of `expr` can be coerced to `expected`. -pub fn coerce<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - sp: Span, - expected: Ty<'tcx>, - expr: &hir::Expr) { - let expr_ty = fcx.expr_ty(expr); - debug!("demand::coerce(expected = {:?}, expr_ty = {:?})", - expected, - expr_ty); - let expr_ty = fcx.resolve_type_vars_if_possible(expr_ty); - let expected = fcx.resolve_type_vars_if_possible(expected); - match coercion::mk_assignty(fcx, expr, expr_ty, expected) { - Ok(()) => { /* ok */ } - Err(ref err) => { - fcx.report_mismatched_types(sp, expected, expr_ty, err); - } + // Checks that the type of `expr` can be coerced to `expected`. + pub fn demand_coerce(&self, expr: &hir::Expr, checked_ty: Ty<'tcx>, expected: Ty<'tcx>) { + let expected = self.resolve_type_vars_with_obligations(expected); + if let Err(e) = self.try_coerce(expr, checked_ty, expected) { + let cause = self.misc(expr.span); + let expr_ty = self.resolve_type_vars_with_obligations(checked_ty); + self.report_mismatched_types(&cause, expected, expr_ty, e); + } } } diff --git a/src/librustc_typeck/check/dropck.rs b/src/librustc_typeck/check/dropck.rs index deda0b818ee06..e13c4ea314f06 100644 --- a/src/librustc_typeck/check/dropck.rs +++ b/src/librustc_typeck/check/dropck.rs @@ -8,20 +8,20 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use check::regionck::{self, Rcx}; +use CrateCtxt; +use check::regionck::RegionCtxt; -use middle::def_id::DefId; +use hir::def_id::DefId; use middle::free_region::FreeRegionMap; -use middle::infer; +use rustc::infer::{self, InferOk}; use middle::region; -use middle::subst::{self, Subst}; -use middle::traits; -use middle::ty::{self, Ty}; -use util::nodemap::FnvHashSet; +use rustc::ty::subst::{Subst, Substs}; +use rustc::ty::{self, AdtKind, Ty, TyCtxt}; +use rustc::traits::{self, ObligationCause, Reveal}; +use util::nodemap::FxHashSet; use syntax::ast; -use syntax::codemap::{self, Span}; -use syntax::parse::token::special_idents; +use syntax_pos::Span; /// check_drop_impl confirms that the Drop implementation identfied by /// `drop_impl_did` is not any more specialized than the type it is @@ -40,20 +40,17 @@ use syntax::parse::token::special_idents; /// struct/enum definition for the nominal type itself (i.e. /// cannot do `struct S; impl Drop for S { ... }`). /// -pub fn check_drop_impl(tcx: &ty::ctxt, drop_impl_did: DefId) -> Result<(), ()> { - let ty::TypeScheme { generics: ref dtor_generics, - ty: dtor_self_type } = tcx.lookup_item_type(drop_impl_did); - let dtor_predicates = tcx.lookup_predicates(drop_impl_did); +pub fn check_drop_impl(ccx: &CrateCtxt, drop_impl_did: DefId) -> Result<(), ()> { + let dtor_self_type = ccx.tcx.item_type(drop_impl_did); + let dtor_predicates = ccx.tcx.item_predicates(drop_impl_did); match dtor_self_type.sty { - ty::TyEnum(adt_def, self_to_impl_substs) | - ty::TyStruct(adt_def, self_to_impl_substs) => { - try!(ensure_drop_params_and_item_params_correspond(tcx, - drop_impl_did, - dtor_generics, - &dtor_self_type, - adt_def.did)); - - ensure_drop_predicates_are_implied_by_item_defn(tcx, + ty::TyAdt(adt_def, self_to_impl_substs) => { + ensure_drop_params_and_item_params_correspond(ccx, + drop_impl_did, + dtor_self_type, + adt_def.did)?; + + ensure_drop_predicates_are_implied_by_item_defn(ccx, drop_impl_did, &dtor_predicates, adt_def.did, @@ -62,68 +59,80 @@ pub fn check_drop_impl(tcx: &ty::ctxt, drop_impl_did: DefId) -> Result<(), ()> { _ => { // Destructors only work on nominal types. This was // already checked by coherence, so we can panic here. - let span = tcx.map.def_id_span(drop_impl_did, codemap::DUMMY_SP); - tcx.sess.span_bug( - span, &format!("should have been rejected by coherence check: {}", - dtor_self_type)); + let span = ccx.tcx.def_span(drop_impl_did); + span_bug!(span, + "should have been rejected by coherence check: {}", + dtor_self_type); } } } -fn ensure_drop_params_and_item_params_correspond<'tcx>( - tcx: &ty::ctxt<'tcx>, +fn ensure_drop_params_and_item_params_correspond<'a, 'tcx>( + ccx: &CrateCtxt<'a, 'tcx>, drop_impl_did: DefId, - drop_impl_generics: &ty::Generics<'tcx>, - drop_impl_ty: &ty::Ty<'tcx>, - self_type_did: DefId) -> Result<(), ()> + drop_impl_ty: Ty<'tcx>, + self_type_did: DefId) + -> Result<(), ()> { + let tcx = ccx.tcx; let drop_impl_node_id = tcx.map.as_local_node_id(drop_impl_did).unwrap(); let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap(); // check that the impl type can be made to match the trait type. let impl_param_env = ty::ParameterEnvironment::for_item(tcx, self_type_node_id); - let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, Some(impl_param_env)); - - let named_type = tcx.lookup_item_type(self_type_did).ty; - let named_type = named_type.subst(tcx, &infcx.parameter_environment.free_substs); - - let drop_impl_span = tcx.map.def_id_span(drop_impl_did, codemap::DUMMY_SP); - let fresh_impl_substs = - infcx.fresh_substs_for_generics(drop_impl_span, drop_impl_generics); - let fresh_impl_self_ty = drop_impl_ty.subst(tcx, &fresh_impl_substs); - - if let Err(_) = infer::mk_eqty(&infcx, true, infer::TypeOrigin::Misc(drop_impl_span), - named_type, fresh_impl_self_ty) { - let item_span = tcx.map.span(self_type_node_id); - struct_span_err!(tcx.sess, drop_impl_span, E0366, - "Implementations of Drop cannot be specialized") - .span_note(item_span, - "Use same sequence of generic type and region \ - parameters that is on the struct/enum definition") - .emit(); - return Err(()); - } + tcx.infer_ctxt(None, Some(impl_param_env), Reveal::NotSpecializable).enter(|infcx| { + let tcx = infcx.tcx; + let mut fulfillment_cx = traits::FulfillmentContext::new(); + + let named_type = tcx.item_type(self_type_did); + let named_type = named_type.subst(tcx, &infcx.parameter_environment.free_substs); + + let drop_impl_span = tcx.def_span(drop_impl_did); + let fresh_impl_substs = + infcx.fresh_substs_for_item(drop_impl_span, drop_impl_did); + let fresh_impl_self_ty = drop_impl_ty.subst(tcx, fresh_impl_substs); + + let cause = &ObligationCause::misc(drop_impl_span, drop_impl_node_id); + match infcx.eq_types(true, cause, named_type, fresh_impl_self_ty) { + Ok(InferOk { obligations, .. }) => { + // FIXME(#32730) propagate obligations + assert!(obligations.is_empty()); + } + Err(_) => { + let item_span = tcx.map.span(self_type_node_id); + struct_span_err!(tcx.sess, drop_impl_span, E0366, + "Implementations of Drop cannot be specialized") + .span_note(item_span, + "Use same sequence of generic type and region \ + parameters that is on the struct/enum definition") + .emit(); + return Err(()); + } + } - if let Err(ref errors) = infcx.fulfillment_cx.borrow_mut().select_all_or_error(&infcx) { - // this could be reached when we get lazy normalization - traits::report_fulfillment_errors(&infcx, errors); - return Err(()); - } + if let Err(ref errors) = fulfillment_cx.select_all_or_error(&infcx) { + // this could be reached when we get lazy normalization + infcx.report_fulfillment_errors(errors); + return Err(()); + } - let free_regions = FreeRegionMap::new(); - infcx.resolve_regions_and_report_errors(&free_regions, drop_impl_node_id); - Ok(()) + let free_regions = FreeRegionMap::new(); + infcx.resolve_regions_and_report_errors(&free_regions, drop_impl_node_id); + Ok(()) + }) } /// Confirms that every predicate imposed by dtor_predicates is /// implied by assuming the predicates attached to self_type_did. -fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>( - tcx: &ty::ctxt<'tcx>, +fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>( + ccx: &CrateCtxt<'a, 'tcx>, drop_impl_did: DefId, dtor_predicates: &ty::GenericPredicates<'tcx>, self_type_did: DefId, - self_to_impl_substs: &subst::Substs<'tcx>) -> Result<(), ()> { + self_to_impl_substs: &Substs<'tcx>) + -> Result<(), ()> +{ // Here is an example, analogous to that from // `compare_impl_method`. @@ -160,19 +169,18 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>( // absent. So we report an error that the Drop impl injected a // predicate that is not present on the struct definition. + let tcx = ccx.tcx; + let self_type_node_id = tcx.map.as_local_node_id(self_type_did).unwrap(); - let drop_impl_span = tcx.map.def_id_span(drop_impl_did, codemap::DUMMY_SP); + let drop_impl_span = tcx.def_span(drop_impl_did); // We can assume the predicates attached to struct/enum definition // hold. - let generic_assumptions = tcx.lookup_predicates(self_type_did); + let generic_assumptions = tcx.item_predicates(self_type_did); let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs); - assert!(assumptions_in_impl_context.predicates.is_empty_in(subst::SelfSpace)); - assert!(assumptions_in_impl_context.predicates.is_empty_in(subst::FnSpace)); - let assumptions_in_impl_context = - assumptions_in_impl_context.predicates.get_slice(subst::TypeSpace); + let assumptions_in_impl_context = assumptions_in_impl_context.predicates; // An earlier version of this code attempted to do this checking // via the traits::fulfill machinery. However, it ran into trouble @@ -180,10 +188,8 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>( // 'a:'b and T:'b into region inference constraints. It is simpler // just to look for all the predicates directly. - assert!(dtor_predicates.predicates.is_empty_in(subst::SelfSpace)); - assert!(dtor_predicates.predicates.is_empty_in(subst::FnSpace)); - let predicates = dtor_predicates.predicates.get_slice(subst::TypeSpace); - for predicate in predicates { + assert_eq!(dtor_predicates.parent, None); + for predicate in &dtor_predicates.predicates { // (We do not need to worry about deep analysis of type // expressions etc because the Drop impls are already forced // to take on a structure that is roughly an alpha-renaming of @@ -265,16 +271,17 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>( /// ensuring that they do not access data nor invoke methods of /// values that have been previously dropped). /// -pub fn check_safety_of_destructor_if_necessary<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>, - typ: ty::Ty<'tcx>, - span: Span, - scope: region::CodeExtent) { +pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>( + rcx: &mut RegionCtxt<'a, 'gcx, 'tcx>, + typ: ty::Ty<'tcx>, + span: Span, + scope: region::CodeExtent) +{ debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}", typ, scope); - let parent_scope = rcx.tcx().region_maps.opt_encl_scope(scope).unwrap_or_else(|| { - rcx.tcx().sess.span_bug( - span, &format!("no enclosing scope found for scope: {:?}", scope)) + let parent_scope = rcx.tcx.region_maps.opt_encl_scope(scope).unwrap_or_else(|| { + span_bug!(span, "no enclosing scope found for scope: {:?}", scope) }); let result = iterate_over_potentially_unsafe_regions_in_type( @@ -282,7 +289,7 @@ pub fn check_safety_of_destructor_if_necessary<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx> rcx: rcx, span: span, parent_scope: parent_scope, - breadcrumbs: FnvHashSet() + breadcrumbs: FxHashSet() }, TypeContext::Root, typ, @@ -290,7 +297,7 @@ pub fn check_safety_of_destructor_if_necessary<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx> match result { Ok(()) => {} Err(Error::Overflow(ref ctxt, ref detected_on_typ)) => { - let tcx = rcx.tcx(); + let tcx = rcx.tcx; let mut err = struct_span_err!(tcx.sess, span, E0320, "overflow while adding drop-check rules for {}", typ); match *ctxt { @@ -298,26 +305,23 @@ pub fn check_safety_of_destructor_if_necessary<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx> // no need for an additional note if the overflow // was somehow on the root. } - TypeContext::ADT { def_id, variant, field, field_index } => { + TypeContext::ADT { def_id, variant, field } => { let adt = tcx.lookup_adt_def(def_id); let variant_name = match adt.adt_kind() { - ty::AdtKind::Enum => format!("enum {} variant {}", - tcx.item_path_str(def_id), - variant), - ty::AdtKind::Struct => format!("struct {}", - tcx.item_path_str(def_id)) - }; - let field_name = if field == special_idents::unnamed_field.name { - format!("#{}", field_index) - } else { - format!("`{}`", field) + AdtKind::Enum => format!("enum {} variant {}", + tcx.item_path_str(def_id), + variant), + AdtKind::Struct => format!("struct {}", + tcx.item_path_str(def_id)), + AdtKind::Union => format!("union {}", + tcx.item_path_str(def_id)), }; span_note!( &mut err, span, "overflowed on {} field {} type: {}", variant_name, - field_name, + field, detected_on_typ); } } @@ -337,14 +341,13 @@ enum TypeContext { def_id: DefId, variant: ast::Name, field: ast::Name, - field_index: usize } } -struct DropckContext<'a, 'b: 'a, 'tcx: 'b> { - rcx: &'a mut Rcx<'b, 'tcx>, +struct DropckContext<'a, 'b: 'a, 'gcx: 'b+'tcx, 'tcx: 'b> { + rcx: &'a mut RegionCtxt<'b, 'gcx, 'tcx>, /// types that have already been traversed - breadcrumbs: FnvHashSet>, + breadcrumbs: FxHashSet>, /// span for error reporting span: Span, /// the scope reachable dtorck types must outlive @@ -352,13 +355,14 @@ struct DropckContext<'a, 'b: 'a, 'tcx: 'b> { } // `context` is used for reporting overflow errors -fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'tcx>( - cx: &mut DropckContext<'a, 'b, 'tcx>, +fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'gcx, 'tcx>( + cx: &mut DropckContext<'a, 'b, 'gcx, 'tcx>, context: TypeContext, ty: Ty<'tcx>, - depth: usize) -> Result<(), Error<'tcx>> + depth: usize) + -> Result<(), Error<'tcx>> { - let tcx = cx.rcx.tcx(); + let tcx = cx.rcx.tcx; // Issue #22443: Watch out for overflow. While we are careful to // handle regular types properly, non-regular ones cause problems. let recursion_limit = tcx.sess.recursion_limit.get(); @@ -371,7 +375,7 @@ fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'tcx>( // canoncialize the regions in `ty` before inserting - infinitely many // region variables can refer to the same region. - let ty = cx.rcx.infcx().resolve_type_and_region_vars_if_possible(&ty); + let ty = cx.rcx.resolve_type_and_region_vars_if_possible(&ty); if !cx.breadcrumbs.insert(ty) { debug!("iterate_over_potentially_unsafe_regions_in_type \ @@ -408,18 +412,27 @@ fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'tcx>( // unbounded type parameter `T`, we must resume the recursive // analysis on `T` (since it would be ignored by // type_must_outlive). - if has_dtor_of_interest(tcx, ty) { - debug!("iterate_over_potentially_unsafe_regions_in_type \ - {}ty: {} - is a dtorck type!", - (0..depth).map(|_| ' ').collect::(), - ty); - - regionck::type_must_outlive(cx.rcx, - infer::SubregionOrigin::SafeDestructor(cx.span), - ty, - ty::ReScope(cx.parent_scope)); - - return Ok(()); + let dropck_kind = has_dtor_of_interest(tcx, ty); + debug!("iterate_over_potentially_unsafe_regions_in_type \ + ty: {:?} dropck_kind: {:?}", ty, dropck_kind); + match dropck_kind { + DropckKind::NoBorrowedDataAccessedInMyDtor => { + // The maximally blind attribute. + } + DropckKind::BorrowedDataMustStrictlyOutliveSelf => { + cx.rcx.type_must_outlive(infer::SubregionOrigin::SafeDestructor(cx.span), + ty, tcx.mk_region(ty::ReScope(cx.parent_scope))); + return Ok(()); + } + DropckKind::RevisedSelf(revised_ty) => { + cx.rcx.type_must_outlive(infer::SubregionOrigin::SafeDestructor(cx.span), + revised_ty, tcx.mk_region(ty::ReScope(cx.parent_scope))); + // Do not return early from this case; we want + // to recursively process the internal structure of Self + // (because even though the Drop for Self has been asserted + // safe, the types instantiated for the generics of Self + // may themselves carry dropck constraints.) + } } debug!("iterate_over_potentially_unsafe_regions_in_type \ @@ -430,7 +443,7 @@ fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'tcx>( // We still need to ensure all referenced data is safe. match ty.sty { ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | - ty::TyFloat(_) | ty::TyStr => { + ty::TyFloat(_) | ty::TyStr | ty::TyNever => { // primitive - definitely safe Ok(()) } @@ -441,40 +454,44 @@ fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'tcx>( cx, context, ity, depth+1) } - ty::TyStruct(def, substs) if def.is_phantom_data() => { + ty::TyAdt(def, substs) if def.is_phantom_data() => { // PhantomData - behaves identically to T - let ity = *substs.types.get(subst::TypeSpace, 0); + let ity = substs.type_at(0); iterate_over_potentially_unsafe_regions_in_type( cx, context, ity, depth+1) } - ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => { + ty::TyAdt(def, substs) => { let did = def.did; for variant in &def.variants { - for (i, field) in variant.fields.iter().enumerate() { + for field in variant.fields.iter() { let fty = field.ty(tcx, substs); - let fty = cx.rcx.fcx.resolve_type_vars_if_possible( + let fty = cx.rcx.fcx.resolve_type_vars_with_obligations( cx.rcx.fcx.normalize_associated_types_in(cx.span, &fty)); - try!(iterate_over_potentially_unsafe_regions_in_type( + iterate_over_potentially_unsafe_regions_in_type( cx, TypeContext::ADT { def_id: did, field: field.name, variant: variant.name, - field_index: i }, fty, - depth+1)) + depth+1)? } } Ok(()) } - ty::TyTuple(ref tys) | - ty::TyClosure(_, box ty::ClosureSubsts { upvar_tys: ref tys, .. }) => { + ty::TyClosure(def_id, substs) => { + for ty in substs.upvar_tys(def_id, tcx) { + iterate_over_potentially_unsafe_regions_in_type(cx, context, ty, depth+1)? + } + Ok(()) + } + + ty::TyTuple(tys) => { for ty in tys { - try!(iterate_over_potentially_unsafe_regions_in_type( - cx, context, ty, depth+1)) + iterate_over_potentially_unsafe_regions_in_type(cx, context, ty, depth+1)? } Ok(()) } @@ -486,7 +503,7 @@ fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'tcx>( Ok(()) } - ty::TyBareFn(..) => { + ty::TyFnDef(..) | ty::TyFnPtr(_) => { // FIXME(#26656): this type is always destruction-safe, but // it implicitly witnesses Self: Fn, which can be false. Ok(()) @@ -498,20 +515,144 @@ fn iterate_over_potentially_unsafe_regions_in_type<'a, 'b, 'tcx>( } // these are always dtorck - ty::TyTrait(..) | ty::TyProjection(_) => unreachable!(), + ty::TyDynamic(..) | ty::TyProjection(_) | ty::TyAnon(..) => bug!(), } } -fn has_dtor_of_interest<'tcx>(tcx: &ty::ctxt<'tcx>, - ty: ty::Ty<'tcx>) -> bool { +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +enum DropckKind<'tcx> { + /// The "safe" kind; i.e. conservatively assume any borrow + /// accessed by dtor, and therefore such data must strictly + /// outlive self. + /// + /// Equivalent to RevisedTy with no change to the self type. + BorrowedDataMustStrictlyOutliveSelf, + + /// The nearly completely-unsafe kind. + /// + /// Equivalent to RevisedSelf with *all* parameters remapped to () + /// (maybe...?) + NoBorrowedDataAccessedInMyDtor, + + /// Assume all borrowed data access by dtor occurs as if Self has the + /// type carried by this variant. In practice this means that some + /// of the type parameters are remapped to `()` (and some lifetime + /// parameters remapped to `'static`), because the developer has asserted + /// that the destructor will not access their contents. + RevisedSelf(Ty<'tcx>), +} + +/// Returns the classification of what kind of check should be applied +/// to `ty`, which may include a revised type where some of the type +/// parameters are re-mapped to `()` to reflect the destructor's +/// "purity" with respect to their actual contents. +fn has_dtor_of_interest<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + ty: Ty<'tcx>) + -> DropckKind<'tcx> { match ty.sty { - ty::TyEnum(def, _) | ty::TyStruct(def, _) => { - def.is_dtorck(tcx) + ty::TyAdt(adt_def, substs) => { + if !adt_def.is_dtorck(tcx) { + return DropckKind::NoBorrowedDataAccessedInMyDtor; + } + + // Find the `impl<..> Drop for _` to inspect any + // attributes attached to the impl's generics. + let dtor_method = adt_def.destructor() + .expect("dtorck type without destructor impossible"); + let method = tcx.associated_item(dtor_method); + let impl_def_id = method.container.id(); + let revised_ty = revise_self_ty(tcx, adt_def, impl_def_id, substs); + return DropckKind::RevisedSelf(revised_ty); } - ty::TyTrait(..) | ty::TyProjection(..) => { + ty::TyDynamic(..) | ty::TyProjection(..) | ty::TyAnon(..) => { debug!("ty: {:?} isn't known, and therefore is a dropck type", ty); - true + return DropckKind::BorrowedDataMustStrictlyOutliveSelf; }, - _ => false + _ => { + return DropckKind::NoBorrowedDataAccessedInMyDtor; + } } } + +// Constructs new Ty just like the type defined by `adt_def` coupled +// with `substs`, except each type and lifetime parameter marked as +// `#[may_dangle]` in the Drop impl (identified by `impl_def_id`) is +// respectively mapped to `()` or `'static`. +// +// For example: If the `adt_def` maps to: +// +// enum Foo<'a, X, Y> { ... } +// +// and the `impl_def_id` maps to: +// +// impl<#[may_dangle] 'a, X, #[may_dangle] Y> Drop for Foo<'a, X, Y> { ... } +// +// then revises input: `Foo<'r,i64,&'r i64>` to: `Foo<'static,i64,()>` +fn revise_self_ty<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + adt_def: &'tcx ty::AdtDef, + impl_def_id: DefId, + substs: &Substs<'tcx>) + -> Ty<'tcx> { + // Get generics for `impl Drop` to query for `#[may_dangle]` attr. + let impl_bindings = tcx.item_generics(impl_def_id); + + // Get Substs attached to Self on `impl Drop`; process in parallel + // with `substs`, replacing dangling entries as appropriate. + let self_substs = { + let impl_self_ty: Ty<'tcx> = tcx.item_type(impl_def_id); + if let ty::TyAdt(self_adt_def, self_substs) = impl_self_ty.sty { + assert_eq!(adt_def, self_adt_def); + self_substs + } else { + bug!("Self in `impl Drop for _` must be an Adt."); + } + }; + + // Walk `substs` + `self_substs`, build new substs appropriate for + // `adt_def`; each non-dangling param reuses entry from `substs`. + // + // Note: The manner we map from a right-hand side (i.e. Region or + // Ty) for a given `def` to generic parameter associated with that + // right-hand side is tightly coupled to `Drop` impl constraints. + // + // E.g. we know such a Ty must be `TyParam`, because a destructor + // for `struct Foo` is defined via `impl Drop for Foo`, + // and never by (for example) `impl Drop for Foo>`. + let substs = Substs::for_item( + tcx, + adt_def.did, + |def, _| { + let r_orig = substs.region_for_def(def); + let impl_self_orig = self_substs.region_for_def(def); + let r = if let ty::Region::ReEarlyBound(ref ebr) = *impl_self_orig { + if impl_bindings.region_param(ebr).pure_wrt_drop { + tcx.mk_region(ty::ReStatic) + } else { + r_orig + } + } else { + bug!("substs for an impl must map regions to ReEarlyBound"); + }; + debug!("has_dtor_of_interest mapping def {:?} orig {:?} to {:?}", + def, r_orig, r); + r + }, + |def, _| { + let t_orig = substs.type_for_def(def); + let impl_self_orig = self_substs.type_for_def(def); + let t = if let ty::TypeVariants::TyParam(ref pt) = impl_self_orig.sty { + if impl_bindings.type_param(pt).pure_wrt_drop { + tcx.mk_nil() + } else { + t_orig + } + } else { + bug!("substs for an impl must map types to TyParam"); + }; + debug!("has_dtor_of_interest mapping def {:?} orig {:?} {:?} to {:?} {:?}", + def, t_orig, t_orig.sty, t, t.sty); + t + }); + + tcx.mk_adt(adt_def, &substs) +} diff --git a/src/librustc_typeck/check/intrinsic.rs b/src/librustc_typeck/check/intrinsic.rs index b9fec44ec4030..a07573a7b9eab 100644 --- a/src/librustc_typeck/check/intrinsic.rs +++ b/src/librustc_typeck/check/intrinsic.rs @@ -11,29 +11,35 @@ //! Type-checking for the rust-intrinsic and platform-intrinsic //! intrinsics that the compiler exposes. -use astconv::AstConv; use intrinsics; -use middle::subst; -use middle::ty::FnSig; -use middle::ty::{self, Ty}; -use middle::ty::fold::TypeFolder; +use rustc::traits::{ObligationCause, ObligationCauseCode}; +use rustc::ty::subst::Substs; +use rustc::ty::FnSig; +use rustc::ty::{self, Ty}; +use rustc::util::nodemap::FxHashMap; use {CrateCtxt, require_same_types}; -use std::collections::{HashMap}; -use syntax::abi; +use syntax::abi::Abi; use syntax::ast; -use syntax::attr::AttrMetaMethods; -use syntax::codemap::Span; -use syntax::parse::token; +use syntax::symbol::Symbol; +use syntax_pos::Span; -use rustc_front::hir; +use rustc::hir; -fn equate_intrinsic_type<'a, 'tcx>(tcx: &ty::ctxt<'tcx>, it: &hir::ForeignItem, +fn equate_intrinsic_type<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + it: &hir::ForeignItem, n_tps: usize, - abi: abi::Abi, - inputs: Vec>, - output: ty::FnOutput<'tcx>) { - let fty = tcx.mk_fn(None, tcx.mk_bare_fn(ty::BareFnTy { + abi: Abi, + inputs: Vec>, + output: Ty<'tcx>) { + let tcx = ccx.tcx; + let def_id = tcx.map.local_def_id(it.id); + + let substs = Substs::for_item(tcx, def_id, + |_, _| tcx.mk_region(ty::ReErased), + |def, _| tcx.mk_param_from_def(def)); + + let fty = tcx.mk_fn_def(def_id, substs, tcx.mk_bare_fn(ty::BareFnTy { unsafety: hir::Unsafety::Unsafe, abi: abi, sig: ty::Binder(FnSig { @@ -42,24 +48,26 @@ fn equate_intrinsic_type<'a, 'tcx>(tcx: &ty::ctxt<'tcx>, it: &hir::ForeignItem, variadic: false, }), })); - let i_ty = tcx.lookup_item_type(tcx.map.local_def_id(it.id)); - let i_n_tps = i_ty.generics.types.len(subst::FnSpace); + let i_n_tps = tcx.item_generics(def_id).types.len(); if i_n_tps != n_tps { - span_err!(tcx.sess, it.span, E0094, - "intrinsic has wrong number of type \ - parameters: found {}, expected {}", - i_n_tps, n_tps); + let span = match it.node { + hir::ForeignItemFn(_, ref generics) => generics.span, + hir::ForeignItemStatic(..) => it.span + }; + + struct_span_err!(tcx.sess, span, E0094, + "intrinsic has wrong number of type \ + parameters: found {}, expected {}", + i_n_tps, n_tps) + .span_label(span, &format!("expected {} type parameter", n_tps)) + .emit(); } else { - require_same_types(tcx, - None, - false, - it.span, - i_ty.ty, - fty, - || { - format!("intrinsic has wrong type: expected `{}`", - fty) - }); + require_same_types(ccx, + &ObligationCause::new(it.span, + it.id, + ObligationCauseCode::IntrinsicType), + tcx.item_type(def_id), + fty); } } @@ -67,8 +75,8 @@ fn equate_intrinsic_type<'a, 'tcx>(tcx: &ty::ctxt<'tcx>, it: &hir::ForeignItem, /// and in libcore/intrinsics.rs pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) { fn param<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, n: u32) -> Ty<'tcx> { - let name = token::intern(&format!("P{}", n)); - ccx.tcx.mk_param(subst::FnSpace, n, name) + let name = Symbol::intern(&format!("P{}", n)); + ccx.tcx.mk_param(n, name) } let tcx = ccx.tcx; @@ -79,32 +87,34 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) { //We only care about the operation here let (n_tps, inputs, output) = match split[1] { - "cxchg" => (1, vec!(tcx.mk_mut_ptr(param(ccx, 0)), - param(ccx, 0), - param(ccx, 0)), - param(ccx, 0)), - "load" => (1, vec!(tcx.mk_imm_ptr(param(ccx, 0))), + "cxchg" | "cxchgweak" => (1, vec![tcx.mk_mut_ptr(param(ccx, 0)), + param(ccx, 0), + param(ccx, 0)], + tcx.intern_tup(&[param(ccx, 0), tcx.types.bool])), + "load" => (1, vec![tcx.mk_imm_ptr(param(ccx, 0))], param(ccx, 0)), - "store" => (1, vec!(tcx.mk_mut_ptr(param(ccx, 0)), param(ccx, 0)), + "store" => (1, vec![tcx.mk_mut_ptr(param(ccx, 0)), param(ccx, 0)], tcx.mk_nil()), "xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" | "min" | "umax" | "umin" => { - (1, vec!(tcx.mk_mut_ptr(param(ccx, 0)), param(ccx, 0)), + (1, vec![tcx.mk_mut_ptr(param(ccx, 0)), param(ccx, 0)], param(ccx, 0)) } "fence" | "singlethreadfence" => { (0, Vec::new(), tcx.mk_nil()) } op => { - span_err!(tcx.sess, it.span, E0092, - "unrecognized atomic operation function: `{}`", op); + struct_span_err!(tcx.sess, it.span, E0092, + "unrecognized atomic operation function: `{}`", op) + .span_label(it.span, &format!("unrecognized atomic operation")) + .emit(); return; } }; - (n_tps, inputs, ty::FnConverging(output)) + (n_tps, inputs, output) } else if &name[..] == "abort" || &name[..] == "unreachable" { - (0, Vec::new(), ty::FnDiverging) + (0, Vec::new(), tcx.types.never) } else { let (n_tps, inputs, output) = match &name[..] { "breakpoint" => (0, Vec::new(), tcx.mk_nil()), @@ -117,16 +127,17 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) { param(ccx, 0)) ], ccx.tcx.types.usize) } - "init" | "init_dropped" => (1, Vec::new(), param(ccx, 0)), + "rustc_peek" => (1, vec![param(ccx, 0)], param(ccx, 0)), + "init" => (1, Vec::new(), param(ccx, 0)), "uninit" => (1, Vec::new(), param(ccx, 0)), - "forget" => (1, vec!( param(ccx, 0) ), tcx.mk_nil()), - "transmute" => (2, vec!( param(ccx, 0) ), param(ccx, 1)), + "forget" => (1, vec![ param(ccx, 0) ], tcx.mk_nil()), + "transmute" => (2, vec![ param(ccx, 0) ], param(ccx, 1)), "move_val_init" => { (1, - vec!( + vec![ tcx.mk_mut_ptr(param(ccx, 0)), param(ccx, 0) - ), + ], tcx.mk_nil()) } "drop_in_place" => { @@ -138,13 +149,13 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) { "type_id" => (1, Vec::new(), ccx.tcx.types.u64), "offset" | "arith_offset" => { (1, - vec!( + vec![ tcx.mk_ptr(ty::TypeAndMut { ty: param(ccx, 0), mutbl: hir::MutImmutable }), ccx.tcx.types.isize - ), + ], tcx.mk_ptr(ty::TypeAndMut { ty: param(ccx, 0), mutbl: hir::MutImmutable @@ -152,7 +163,7 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) { } "copy" | "copy_nonoverlapping" => { (1, - vec!( + vec![ tcx.mk_ptr(ty::TypeAndMut { ty: param(ccx, 0), mutbl: hir::MutImmutable @@ -162,12 +173,12 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) { mutbl: hir::MutMutable }), tcx.types.usize, - ), + ], tcx.mk_nil()) } "volatile_copy_memory" | "volatile_copy_nonoverlapping_memory" => { (1, - vec!( + vec![ tcx.mk_ptr(ty::TypeAndMut { ty: param(ccx, 0), mutbl: hir::MutMutable @@ -177,104 +188,106 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) { mutbl: hir::MutImmutable }), tcx.types.usize, - ), + ], tcx.mk_nil()) } "write_bytes" | "volatile_set_memory" => { (1, - vec!( + vec![ tcx.mk_ptr(ty::TypeAndMut { ty: param(ccx, 0), mutbl: hir::MutMutable }), tcx.types.u8, tcx.types.usize, - ), + ], tcx.mk_nil()) } - "sqrtf32" => (0, vec!( tcx.types.f32 ), tcx.types.f32), - "sqrtf64" => (0, vec!( tcx.types.f64 ), tcx.types.f64), + "sqrtf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32), + "sqrtf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64), "powif32" => { (0, - vec!( tcx.types.f32, tcx.types.i32 ), + vec![ tcx.types.f32, tcx.types.i32 ], tcx.types.f32) } "powif64" => { (0, - vec!( tcx.types.f64, tcx.types.i32 ), + vec![ tcx.types.f64, tcx.types.i32 ], tcx.types.f64) } - "sinf32" => (0, vec!( tcx.types.f32 ), tcx.types.f32), - "sinf64" => (0, vec!( tcx.types.f64 ), tcx.types.f64), - "cosf32" => (0, vec!( tcx.types.f32 ), tcx.types.f32), - "cosf64" => (0, vec!( tcx.types.f64 ), tcx.types.f64), + "sinf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32), + "sinf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64), + "cosf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32), + "cosf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64), "powf32" => { (0, - vec!( tcx.types.f32, tcx.types.f32 ), + vec![ tcx.types.f32, tcx.types.f32 ], tcx.types.f32) } "powf64" => { (0, - vec!( tcx.types.f64, tcx.types.f64 ), + vec![ tcx.types.f64, tcx.types.f64 ], tcx.types.f64) } - "expf32" => (0, vec!( tcx.types.f32 ), tcx.types.f32), - "expf64" => (0, vec!( tcx.types.f64 ), tcx.types.f64), - "exp2f32" => (0, vec!( tcx.types.f32 ), tcx.types.f32), - "exp2f64" => (0, vec!( tcx.types.f64 ), tcx.types.f64), - "logf32" => (0, vec!( tcx.types.f32 ), tcx.types.f32), - "logf64" => (0, vec!( tcx.types.f64 ), tcx.types.f64), - "log10f32" => (0, vec!( tcx.types.f32 ), tcx.types.f32), - "log10f64" => (0, vec!( tcx.types.f64 ), tcx.types.f64), - "log2f32" => (0, vec!( tcx.types.f32 ), tcx.types.f32), - "log2f64" => (0, vec!( tcx.types.f64 ), tcx.types.f64), + "expf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32), + "expf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64), + "exp2f32" => (0, vec![ tcx.types.f32 ], tcx.types.f32), + "exp2f64" => (0, vec![ tcx.types.f64 ], tcx.types.f64), + "logf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32), + "logf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64), + "log10f32" => (0, vec![ tcx.types.f32 ], tcx.types.f32), + "log10f64" => (0, vec![ tcx.types.f64 ], tcx.types.f64), + "log2f32" => (0, vec![ tcx.types.f32 ], tcx.types.f32), + "log2f64" => (0, vec![ tcx.types.f64 ], tcx.types.f64), "fmaf32" => { (0, - vec!( tcx.types.f32, tcx.types.f32, tcx.types.f32 ), + vec![ tcx.types.f32, tcx.types.f32, tcx.types.f32 ], tcx.types.f32) } "fmaf64" => { (0, - vec!( tcx.types.f64, tcx.types.f64, tcx.types.f64 ), + vec![ tcx.types.f64, tcx.types.f64, tcx.types.f64 ], tcx.types.f64) } - "fabsf32" => (0, vec!( tcx.types.f32 ), tcx.types.f32), - "fabsf64" => (0, vec!( tcx.types.f64 ), tcx.types.f64), - "copysignf32" => (0, vec!( tcx.types.f32, tcx.types.f32 ), tcx.types.f32), - "copysignf64" => (0, vec!( tcx.types.f64, tcx.types.f64 ), tcx.types.f64), - "floorf32" => (0, vec!( tcx.types.f32 ), tcx.types.f32), - "floorf64" => (0, vec!( tcx.types.f64 ), tcx.types.f64), - "ceilf32" => (0, vec!( tcx.types.f32 ), tcx.types.f32), - "ceilf64" => (0, vec!( tcx.types.f64 ), tcx.types.f64), - "truncf32" => (0, vec!( tcx.types.f32 ), tcx.types.f32), - "truncf64" => (0, vec!( tcx.types.f64 ), tcx.types.f64), - "rintf32" => (0, vec!( tcx.types.f32 ), tcx.types.f32), - "rintf64" => (0, vec!( tcx.types.f64 ), tcx.types.f64), - "nearbyintf32" => (0, vec!( tcx.types.f32 ), tcx.types.f32), - "nearbyintf64" => (0, vec!( tcx.types.f64 ), tcx.types.f64), - "roundf32" => (0, vec!( tcx.types.f32 ), tcx.types.f32), - "roundf64" => (0, vec!( tcx.types.f64 ), tcx.types.f64), + "fabsf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32), + "fabsf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64), + "copysignf32" => (0, vec![ tcx.types.f32, tcx.types.f32 ], tcx.types.f32), + "copysignf64" => (0, vec![ tcx.types.f64, tcx.types.f64 ], tcx.types.f64), + "floorf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32), + "floorf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64), + "ceilf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32), + "ceilf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64), + "truncf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32), + "truncf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64), + "rintf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32), + "rintf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64), + "nearbyintf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32), + "nearbyintf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64), + "roundf32" => (0, vec![ tcx.types.f32 ], tcx.types.f32), + "roundf64" => (0, vec![ tcx.types.f64 ], tcx.types.f64), "volatile_load" => - (1, vec!( tcx.mk_imm_ptr(param(ccx, 0)) ), param(ccx, 0)), + (1, vec![ tcx.mk_imm_ptr(param(ccx, 0)) ], param(ccx, 0)), "volatile_store" => - (1, vec!( tcx.mk_mut_ptr(param(ccx, 0)), param(ccx, 0) ), tcx.mk_nil()), + (1, vec![ tcx.mk_mut_ptr(param(ccx, 0)), param(ccx, 0) ], tcx.mk_nil()), - "ctpop" | "ctlz" | "cttz" | "bswap" => (1, vec!(param(ccx, 0)), param(ccx, 0)), + "ctpop" | "ctlz" | "cttz" | "bswap" => (1, vec![param(ccx, 0)], param(ccx, 0)), "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => - (1, vec!(param(ccx, 0), param(ccx, 0)), - tcx.mk_tup(vec!(param(ccx, 0), tcx.types.bool))), + (1, vec![param(ccx, 0), param(ccx, 0)], + tcx.intern_tup(&[param(ccx, 0), tcx.types.bool])), "unchecked_div" | "unchecked_rem" => (1, vec![param(ccx, 0), param(ccx, 0)], param(ccx, 0)), "overflowing_add" | "overflowing_sub" | "overflowing_mul" => (1, vec![param(ccx, 0), param(ccx, 0)], param(ccx, 0)), - - "return_address" => (0, vec![], tcx.mk_imm_ptr(tcx.types.u8)), + "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => + (1, vec![param(ccx, 0), param(ccx, 0)], param(ccx, 0)), "assume" => (0, vec![tcx.types.bool], tcx.mk_nil()), + "likely" => (0, vec![tcx.types.bool], tcx.types.bool), + "unlikely" => (0, vec![tcx.types.bool], tcx.types.bool), "discriminant_value" => (1, vec![ tcx.mk_imm_ref(tcx.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1), @@ -283,48 +296,43 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) { "try" => { let mut_u8 = tcx.mk_mut_ptr(tcx.types.u8); - let fn_ty = ty::BareFnTy { + let fn_ty = tcx.mk_bare_fn(ty::BareFnTy { unsafety: hir::Unsafety::Normal, - abi: abi::Rust, + abi: Abi::Rust, sig: ty::Binder(FnSig { inputs: vec![mut_u8], - output: ty::FnOutput::FnConverging(tcx.mk_nil()), + output: tcx.mk_nil(), variadic: false, }), - }; - let fn_ty = tcx.mk_bare_fn(fn_ty); - (0, vec![tcx.mk_fn(None, fn_ty), mut_u8], mut_u8) + }); + (0, vec![tcx.mk_fn_ptr(fn_ty), mut_u8, mut_u8], tcx.types.i32) } ref other => { - span_err!(tcx.sess, it.span, E0093, - "unrecognized intrinsic function: `{}`", *other); + struct_span_err!(tcx.sess, it.span, E0093, + "unrecognized intrinsic function: `{}`", + *other) + .span_label(it.span, &format!("unrecognized intrinsic")) + .emit(); return; } }; - (n_tps, inputs, ty::FnConverging(output)) + (n_tps, inputs, output) }; - equate_intrinsic_type( - tcx, - it, - n_tps, - abi::RustIntrinsic, - inputs, - output - ) + equate_intrinsic_type(ccx, it, n_tps, Abi::RustIntrinsic, inputs, output) } /// Type-check `extern "platform-intrinsic" { ... }` functions. pub fn check_platform_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) { let param = |n| { - let name = token::intern(&format!("P{}", n)); - ccx.tcx.mk_param(subst::FnSpace, n, name) + let name = Symbol::intern(&format!("P{}", n)); + ccx.tcx.mk_param(n, name) }; let tcx = ccx.tcx; - let i_ty = tcx.lookup_item_type(tcx.map.local_def_id(it.id)); - let i_n_tps = i_ty.generics.types.len(subst::FnSpace); + let def_id = tcx.map.local_def_id(it.id); + let i_n_tps = tcx.item_generics(def_id).types.len(); let name = it.name.as_str(); let (n_tps, inputs, output) = match &*name { @@ -354,7 +362,7 @@ pub fn check_platform_intrinsic_type(ccx: &CrateCtxt, } } _ => { - match intrinsics::Intrinsic::find(tcx, &name) { + match intrinsics::Intrinsic::find(&name) { Some(intr) => { // this function is a platform specific intrinsic if i_n_tps != 0 { @@ -365,24 +373,25 @@ pub fn check_platform_intrinsic_type(ccx: &CrateCtxt, return } - let mut structural_to_nomimal = HashMap::new(); + let mut structural_to_nomimal = FxHashMap(); - let sig = tcx.no_late_bound_regions(i_ty.ty.fn_sig()).unwrap(); + let sig = tcx.item_type(def_id).fn_sig(); + let sig = tcx.no_late_bound_regions(sig).unwrap(); if intr.inputs.len() != sig.inputs.len() { span_err!(tcx.sess, it.span, E0444, "platform-specific intrinsic has invalid number of \ arguments: found {}, expected {}", - intr.inputs.len(), sig.inputs.len()); + sig.inputs.len(), intr.inputs.len()); return } let input_pairs = intr.inputs.iter().zip(&sig.inputs); for (i, (expected_arg, arg)) in input_pairs.enumerate() { - match_intrinsic_type_to_type(tcx, &format!("argument {}", i + 1), it.span, + match_intrinsic_type_to_type(ccx, &format!("argument {}", i + 1), it.span, &mut structural_to_nomimal, expected_arg, arg); } - match_intrinsic_type_to_type(tcx, "return value", it.span, + match_intrinsic_type_to_type(ccx, "return value", it.span, &mut structural_to_nomimal, - &intr.output, sig.output.unwrap()); + &intr.output, sig.output); return } None => { @@ -394,30 +403,24 @@ pub fn check_platform_intrinsic_type(ccx: &CrateCtxt, } }; - equate_intrinsic_type( - tcx, - it, - n_tps, - abi::PlatformIntrinsic, - inputs, - ty::FnConverging(output) - ) + equate_intrinsic_type(ccx, it, n_tps, Abi::PlatformIntrinsic, + inputs, output) } // walk the expected type and the actual type in lock step, checking they're // the same, in a kinda-structural way, i.e. `Vector`s have to be simd structs with // exactly the right element type fn match_intrinsic_type_to_type<'tcx, 'a>( - tcx: &ty::ctxt<'tcx>, + ccx: &CrateCtxt<'a, 'tcx>, position: &str, span: Span, - structural_to_nominal: &mut HashMap<&'a intrinsics::Type, ty::Ty<'tcx>>, + structural_to_nominal: &mut FxHashMap<&'a intrinsics::Type, ty::Ty<'tcx>>, expected: &'a intrinsics::Type, t: ty::Ty<'tcx>) { use intrinsics::Type::*; let simple_error = |real: &str, expected: &str| { - span_err!(tcx.sess, span, E0442, + span_err!(ccx.tcx.sess, span, E0442, "intrinsic {} has wrong type: found {}, expected {}", position, real, expected) }; @@ -429,22 +432,22 @@ fn match_intrinsic_type_to_type<'tcx, 'a>( }, // (The width we pass to LLVM doesn't concern the type checker.) Integer(signed, bits, _llvm_width) => match (signed, bits, &t.sty) { - (true, 8, &ty::TyInt(ast::IntTy::TyI8)) | - (false, 8, &ty::TyUint(ast::UintTy::TyU8)) | - (true, 16, &ty::TyInt(ast::IntTy::TyI16)) | - (false, 16, &ty::TyUint(ast::UintTy::TyU16)) | - (true, 32, &ty::TyInt(ast::IntTy::TyI32)) | - (false, 32, &ty::TyUint(ast::UintTy::TyU32)) | - (true, 64, &ty::TyInt(ast::IntTy::TyI64)) | - (false, 64, &ty::TyUint(ast::UintTy::TyU64)) => {}, + (true, 8, &ty::TyInt(ast::IntTy::I8)) | + (false, 8, &ty::TyUint(ast::UintTy::U8)) | + (true, 16, &ty::TyInt(ast::IntTy::I16)) | + (false, 16, &ty::TyUint(ast::UintTy::U16)) | + (true, 32, &ty::TyInt(ast::IntTy::I32)) | + (false, 32, &ty::TyUint(ast::UintTy::U32)) | + (true, 64, &ty::TyInt(ast::IntTy::I64)) | + (false, 64, &ty::TyUint(ast::UintTy::U64)) => {}, _ => simple_error(&format!("`{}`", t), &format!("`{}{n}`", if signed {"i"} else {"u"}, n = bits)), }, Float(bits) => match (bits, &t.sty) { - (32, &ty::TyFloat(ast::FloatTy::TyF32)) | - (64, &ty::TyFloat(ast::FloatTy::TyF64)) => {}, + (32, &ty::TyFloat(ast::FloatTy::F32)) | + (64, &ty::TyFloat(ast::FloatTy::F64)) => {}, _ => simple_error(&format!("`{}`", t), &format!("`f{n}`", n = bits)), }, @@ -455,7 +458,7 @@ fn match_intrinsic_type_to_type<'tcx, 'a>( simple_error(&format!("`{}`", t), if const_ {"const pointer"} else {"mut pointer"}) } - match_intrinsic_type_to_type(tcx, position, span, structural_to_nominal, + match_intrinsic_type_to_type(ccx, position, span, structural_to_nominal, inner_expected, ty) } _ => simple_error(&format!("`{}`", t), "raw pointer"), @@ -466,19 +469,19 @@ fn match_intrinsic_type_to_type<'tcx, 'a>( simple_error(&format!("non-simd type `{}`", t), "simd type"); return; } - let t_len = t.simd_size(tcx); + let t_len = t.simd_size(ccx.tcx); if len as usize != t_len { simple_error(&format!("vector with length {}", t_len), &format!("length {}", len)); return; } - let t_ty = t.simd_type(tcx); + let t_ty = t.simd_type(ccx.tcx); { // check that a given structural type always has the same an intrinsic definition let previous = structural_to_nominal.entry(expected).or_insert(t); if *previous != t { // this gets its own error code because it is non-trivial - span_err!(tcx.sess, span, E0443, + span_err!(ccx.tcx.sess, span, E0443, "intrinsic {} has wrong type: found `{}`, expected `{}` which \ was used for this vector type previously in this signature", position, @@ -487,7 +490,7 @@ fn match_intrinsic_type_to_type<'tcx, 'a>( return; } } - match_intrinsic_type_to_type(tcx, + match_intrinsic_type_to_type(ccx, position, span, structural_to_nominal, @@ -496,14 +499,14 @@ fn match_intrinsic_type_to_type<'tcx, 'a>( } Aggregate(_flatten, ref expected_contents) => { match t.sty { - ty::TyTuple(ref contents) => { + ty::TyTuple(contents) => { if contents.len() != expected_contents.len() { simple_error(&format!("tuple with length {}", contents.len()), &format!("tuple with length {}", expected_contents.len())); return } for (e, c) in expected_contents.iter().zip(contents) { - match_intrinsic_type_to_type(tcx, position, span, structural_to_nominal, + match_intrinsic_type_to_type(ccx, position, span, structural_to_nominal, e, c) } } diff --git a/src/librustc_typeck/check/method/confirm.rs b/src/librustc_typeck/check/method/confirm.rs index b2462a3612ca4..ff9eaa012ba41 100644 --- a/src/librustc_typeck/check/method/confirm.rs +++ b/src/librustc_typeck/check/method/confirm.rs @@ -10,74 +10,71 @@ use super::probe; -use check::{self, FnCtxt, callee, demand}; -use check::UnresolvedTypeAction; -use middle::def_id::DefId; -use middle::subst::{self}; -use middle::traits; -use middle::ty::{self, NoPreference, PreferMutLvalue, Ty}; -use middle::ty::adjustment::{AdjustDerefRef, AutoDerefRef, AutoPtr}; -use middle::ty::fold::TypeFoldable; -use middle::infer; -use middle::infer::{InferCtxt, TypeOrigin}; -use syntax::codemap::Span; -use rustc_front::hir; - -struct ConfirmContext<'a, 'tcx:'a> { - fcx: &'a FnCtxt<'a, 'tcx>, +use check::{FnCtxt, callee}; +use hir::def_id::DefId; +use rustc::ty::subst::Substs; +use rustc::traits; +use rustc::ty::{self, LvaluePreference, NoPreference, PreferMutLvalue, Ty}; +use rustc::ty::adjustment::{Adjustment, Adjust, AutoBorrow}; +use rustc::ty::fold::TypeFoldable; +use rustc::infer::{self, InferOk}; +use syntax_pos::Span; +use rustc::hir; + +use std::ops::Deref; + +struct ConfirmContext<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { + fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, span: Span, - self_expr: &'tcx hir::Expr, - call_expr: &'tcx hir::Expr, + self_expr: &'gcx hir::Expr, + call_expr: &'gcx hir::Expr, } -struct InstantiatedMethodSig<'tcx> { - /// Function signature of the method being invoked. The 0th - /// argument is the receiver. - method_sig: ty::FnSig<'tcx>, - - /// Substitutions for all types/early-bound-regions declared on - /// the method. - all_substs: subst::Substs<'tcx>, - - /// Generic bounds on the method's parameters which must be added - /// as pending obligations. - method_predicates: ty::InstantiatedPredicates<'tcx>, +impl<'a, 'gcx, 'tcx> Deref for ConfirmContext<'a, 'gcx, 'tcx> { + type Target = FnCtxt<'a, 'gcx, 'tcx>; + fn deref(&self) -> &Self::Target { + &self.fcx + } } -pub fn confirm<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - span: Span, - self_expr: &'tcx hir::Expr, - call_expr: &'tcx hir::Expr, - unadjusted_self_ty: Ty<'tcx>, - pick: probe::Pick<'tcx>, - supplied_method_types: Vec>) - -> ty::MethodCallee<'tcx> -{ - debug!("confirm(unadjusted_self_ty={:?}, pick={:?}, supplied_method_types={:?})", - unadjusted_self_ty, - pick, - supplied_method_types); - - let mut confirm_cx = ConfirmContext::new(fcx, span, self_expr, call_expr); - confirm_cx.confirm(unadjusted_self_ty, pick, supplied_method_types) +impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { + pub fn confirm_method(&self, + span: Span, + self_expr: &'gcx hir::Expr, + call_expr: &'gcx hir::Expr, + unadjusted_self_ty: Ty<'tcx>, + pick: probe::Pick<'tcx>, + supplied_method_types: Vec>) + -> ty::MethodCallee<'tcx> { + debug!("confirm(unadjusted_self_ty={:?}, pick={:?}, supplied_method_types={:?})", + unadjusted_self_ty, + pick, + supplied_method_types); + + let mut confirm_cx = ConfirmContext::new(self, span, self_expr, call_expr); + confirm_cx.confirm(unadjusted_self_ty, pick, supplied_method_types) + } } -impl<'a,'tcx> ConfirmContext<'a,'tcx> { - fn new(fcx: &'a FnCtxt<'a, 'tcx>, +impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { + fn new(fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, span: Span, - self_expr: &'tcx hir::Expr, - call_expr: &'tcx hir::Expr) - -> ConfirmContext<'a, 'tcx> - { - ConfirmContext { fcx: fcx, span: span, self_expr: self_expr, call_expr: call_expr } + self_expr: &'gcx hir::Expr, + call_expr: &'gcx hir::Expr) + -> ConfirmContext<'a, 'gcx, 'tcx> { + ConfirmContext { + fcx: fcx, + span: span, + self_expr: self_expr, + call_expr: call_expr, + } } fn confirm(&mut self, unadjusted_self_ty: Ty<'tcx>, pick: probe::Pick<'tcx>, supplied_method_types: Vec>) - -> ty::MethodCallee<'tcx> - { + -> ty::MethodCallee<'tcx> { // Adjust the self expression the user provided and obtain the adjusted type. let self_ty = self.adjust_self_ty(unadjusted_self_ty, &pick); @@ -86,44 +83,29 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> { // Create substitutions for the method's type parameters. let rcvr_substs = self.fresh_receiver_substs(self_ty, &pick); - let all_substs = - self.instantiate_method_substs( - &pick, - supplied_method_types, - rcvr_substs); + let all_substs = self.instantiate_method_substs(&pick, supplied_method_types, rcvr_substs); debug!("all_substs={:?}", all_substs); // Create the final signature for the method, replacing late-bound regions. - let InstantiatedMethodSig { - method_sig, all_substs, method_predicates - } = self.instantiate_method_sig(&pick, all_substs); - let method_self_ty = method_sig.inputs[0]; + let (method_ty, method_predicates) = self.instantiate_method_sig(&pick, all_substs); // Unify the (adjusted) self type with what the method expects. - self.unify_receivers(self_ty, method_self_ty); - - // Create the method type - let method_ty = pick.item.as_opt_method().unwrap(); - let fty = self.tcx().mk_fn(None, self.tcx().mk_bare_fn(ty::BareFnTy { - sig: ty::Binder(method_sig), - unsafety: method_ty.fty.unsafety, - abi: method_ty.fty.abi.clone(), - })); + self.unify_receivers(self_ty, method_ty.fn_sig().input(0).skip_binder()); // Add any trait/regions obligations specified on the method's type parameters. - self.add_obligations(fty, &all_substs, &method_predicates); + self.add_obligations(method_ty, all_substs, &method_predicates); // Create the final `MethodCallee`. let callee = ty::MethodCallee { - def_id: pick.item.def_id(), - ty: fty, - substs: self.tcx().mk_substs(all_substs) + def_id: pick.item.def_id, + ty: method_ty, + substs: all_substs, }; - // If this is an `&mut self` method, bias the receiver - // expression towards mutability (this will switch - // e.g. `Deref` to `DerefMut` in overloaded derefs and so on). - self.fixup_derefs_on_method_receiver_if_necessary(&callee); + + if let Some(hir::MutMutable) = pick.autoref { + self.convert_lvalue_derefs_to_mutable(); + } callee } @@ -134,54 +116,43 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> { fn adjust_self_ty(&mut self, unadjusted_self_ty: Ty<'tcx>, pick: &probe::Pick<'tcx>) - -> Ty<'tcx> - { - let (autoref, unsize) = if let Some(mutbl) = pick.autoref { - let region = self.infcx().next_region_var(infer::Autoref(self.span)); - let autoref = AutoPtr(self.tcx().mk_region(region), mutbl); - (Some(autoref), pick.unsize.map(|target| { - target.adjust_for_autoref(self.tcx(), Some(autoref)) - })) + -> Ty<'tcx> { + let autoref = if let Some(mutbl) = pick.autoref { + let region = self.next_region_var(infer::Autoref(self.span)); + Some(AutoBorrow::Ref(region, mutbl)) } else { // No unsizing should be performed without autoref (at // least during method dispach). This is because we // currently only unsize `[T;N]` to `[T]`, and naturally // that must occur being a reference. assert!(pick.unsize.is_none()); - (None, None) + None }; - // Commit the autoderefs by calling `autoderef again, but this + + // Commit the autoderefs by calling `autoderef` again, but this // time writing the results into the various tables. - let (autoderefd_ty, n, result) = check::autoderef(self.fcx, - self.span, - unadjusted_self_ty, - Some(self.self_expr), - UnresolvedTypeAction::Error, - NoPreference, - |_, n| { - if n == pick.autoderefs { - Some(()) - } else { - None - } - }); + let mut autoderef = self.autoderef(self.span, unadjusted_self_ty); + let (autoderefd_ty, n) = autoderef.nth(pick.autoderefs).unwrap(); assert_eq!(n, pick.autoderefs); - assert_eq!(result, Some(())); + + autoderef.unambiguous_final_ty(); + autoderef.finalize(LvaluePreference::NoPreference, Some(self.self_expr)); + + let target = pick.unsize.unwrap_or(autoderefd_ty); + let target = target.adjust_for_autoref(self.tcx, autoref); // Write out the final adjustment. - self.fcx.write_adjustment(self.self_expr.id, - AdjustDerefRef(AutoDerefRef { - autoderefs: pick.autoderefs, - autoref: autoref, - unsize: unsize - })); + self.write_adjustment(self.self_expr.id, Adjustment { + kind: Adjust::DerefRef { + autoderefs: pick.autoderefs, + autoref: autoref, + unsize: pick.unsize.is_some(), + }, + target: target + }); - if let Some(target) = unsize { - target - } else { - autoderefd_ty.adjust_for_autoref(self.tcx(), autoref) - } + target } /////////////////////////////////////////////////////////////////////////// @@ -196,19 +167,19 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> { fn fresh_receiver_substs(&mut self, self_ty: Ty<'tcx>, pick: &probe::Pick<'tcx>) - -> subst::Substs<'tcx> - { + -> &'tcx Substs<'tcx> { match pick.kind { probe::InherentImplPick => { - let impl_def_id = pick.item.container().id(); - assert!(self.tcx().impl_trait_ref(impl_def_id).is_none(), - "impl {:?} is not an inherent impl", impl_def_id); - check::impl_self_ty(self.fcx, self.span, impl_def_id).substs + let impl_def_id = pick.item.container.id(); + assert!(self.tcx.impl_trait_ref(impl_def_id).is_none(), + "impl {:?} is not an inherent impl", + impl_def_id); + self.impl_self_ty(self.span, impl_def_id).substs } probe::ObjectPick => { - let trait_def_id = pick.item.container().id(); - self.extract_trait_ref(self_ty, |this, object_ty, data| { + let trait_def_id = pick.item.container.id(); + self.extract_existential_trait_ref(self_ty, |this, object_ty, principal| { // The object data has no entry for the Self // Type. For the purposes of this method call, we // substitute the object type itself. This @@ -219,17 +190,15 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> { // argument type), but those cases have already // been ruled out when we deemed the trait to be // "object safe". - let original_poly_trait_ref = - data.principal_trait_ref_with_self_ty(this.tcx(), object_ty); - let upcast_poly_trait_ref = - this.upcast(original_poly_trait_ref.clone(), trait_def_id); + let original_poly_trait_ref = principal.with_self_ty(this.tcx, object_ty); + let upcast_poly_trait_ref = this.upcast(original_poly_trait_ref, trait_def_id); let upcast_trait_ref = this.replace_late_bound_regions_with_fresh_var(&upcast_poly_trait_ref); debug!("original_poly_trait_ref={:?} upcast_trait_ref={:?} target_trait={:?}", original_poly_trait_ref, upcast_trait_ref, trait_def_id); - upcast_trait_ref.substs.clone() + upcast_trait_ref.substs }) } @@ -243,137 +212,133 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> { // respectively, then we want to return the type // parameters from the trait ([$A,$B]), not those from // the impl ([$A,$B,$C]) not the receiver type ([$C]). - let impl_polytype = check::impl_self_ty(self.fcx, self.span, impl_def_id); + let impl_polytype = self.impl_self_ty(self.span, impl_def_id); let impl_trait_ref = - self.fcx.instantiate_type_scheme( - self.span, - &impl_polytype.substs, - &self.tcx().impl_trait_ref(impl_def_id).unwrap()); - impl_trait_ref.substs.clone() + self.instantiate_type_scheme(self.span, + impl_polytype.substs, + &self.tcx.impl_trait_ref(impl_def_id).unwrap()); + impl_trait_ref.substs } probe::TraitPick => { - let trait_def_id = pick.item.container().id(); - let trait_def = self.tcx().lookup_trait_def(trait_def_id); + let trait_def_id = pick.item.container.id(); // Make a trait reference `$0 : Trait<$1...$n>` // consisting entirely of type variables. Later on in // the process we will unify the transformed-self-type // of the method with the actual type in order to // unify some of these variables. - self.infcx().fresh_substs_for_trait(self.span, - &trait_def.generics, - self.infcx().next_ty_var()) + self.fresh_substs_for_item(self.span, trait_def_id) } probe::WhereClausePick(ref poly_trait_ref) => { // Where clauses can have bound regions in them. We need to instantiate // those to convert from a poly-trait-ref to a trait-ref. - self.replace_late_bound_regions_with_fresh_var(&*poly_trait_ref).substs.clone() + self.replace_late_bound_regions_with_fresh_var(&poly_trait_ref).substs } } } - fn extract_trait_ref(&mut self, self_ty: Ty<'tcx>, mut closure: F) -> R where - F: FnMut(&mut ConfirmContext<'a, 'tcx>, Ty<'tcx>, &ty::TraitTy<'tcx>) -> R, + fn extract_existential_trait_ref(&mut self, self_ty: Ty<'tcx>, mut closure: F) -> R + where F: FnMut(&mut ConfirmContext<'a, 'gcx, 'tcx>, + Ty<'tcx>, + ty::PolyExistentialTraitRef<'tcx>) + -> R { // If we specified that this is an object method, then the // self-type ought to be something that can be dereferenced to // yield an object-type (e.g., `&Object` or `Box` // etc). - let (_, _, result) = check::autoderef(self.fcx, - self.span, - self_ty, - None, - UnresolvedTypeAction::Error, - NoPreference, - |ty, _| { - match ty.sty { - ty::TyTrait(ref data) => Some(closure(self, ty, &**data)), - _ => None, - } - }); - - match result { - Some(r) => r, - None => { - self.tcx().sess.span_bug( - self.span, - &format!("self-type `{}` for ObjectPick never dereferenced to an object", - self_ty)) - } - } + // FIXME: this feels, like, super dubious + self.fcx + .autoderef(self.span, self_ty) + .filter_map(|(ty, _)| { + match ty.sty { + ty::TyDynamic(ref data, ..) => data.principal().map(|p| closure(self, ty, p)), + _ => None, + } + }) + .next() + .unwrap_or_else(|| { + span_bug!(self.span, + "self-type `{}` for ObjectPick never dereferenced to an object", + self_ty) + }) } fn instantiate_method_substs(&mut self, pick: &probe::Pick<'tcx>, - supplied_method_types: Vec>, - substs: subst::Substs<'tcx>) - -> subst::Substs<'tcx> - { + mut supplied_method_types: Vec>, + substs: &Substs<'tcx>) + -> &'tcx Substs<'tcx> { // Determine the values for the generic parameters of the method. // If they were not explicitly supplied, just construct fresh // variables. let num_supplied_types = supplied_method_types.len(); - let method = pick.item.as_opt_method().unwrap(); - let method_types = method.generics.types.get_slice(subst::FnSpace); - let num_method_types = method_types.len(); - + let method_generics = self.tcx.item_generics(pick.item.def_id); + let num_method_types = method_generics.types.len(); + + if num_supplied_types > 0 && num_supplied_types != num_method_types { + if num_method_types == 0 { + struct_span_err!(self.tcx.sess, + self.span, + E0035, + "does not take type parameters") + .span_label(self.span, &"called with unneeded type parameters") + .emit(); + } else { + struct_span_err!(self.tcx.sess, + self.span, + E0036, + "incorrect number of type parameters given for this method: \ + expected {}, found {}", + num_method_types, + num_supplied_types) + .span_label(self.span, + &format!("Passed {} type argument{}, expected {}", + num_supplied_types, + if num_supplied_types != 1 { "s" } else { "" }, + num_method_types)) + .emit(); + } + supplied_method_types = vec![self.tcx.types.err; num_method_types]; + } // Create subst for early-bound lifetime parameters, combining // parameters from the type and those from the method. // // FIXME -- permit users to manually specify lifetimes - let method_regions = - self.fcx.infcx().region_vars_for_defs( - self.span, - pick.item.as_opt_method().unwrap() - .generics.regions.get_slice(subst::FnSpace)); - - let subst::Substs { types, regions } = substs; - let regions = regions.map(|r| r.with_vec(subst::FnSpace, method_regions)); - let mut final_substs = subst::Substs { types: types, regions: regions }; - - if num_supplied_types == 0 { - self.fcx.infcx().type_vars_for_defs( - self.span, - subst::FnSpace, - &mut final_substs, - method_types); - } else if num_method_types == 0 { - span_err!(self.tcx().sess, self.span, E0035, - "does not take type parameters"); - self.fcx.infcx().type_vars_for_defs( - self.span, - subst::FnSpace, - &mut final_substs, - method_types); - } else if num_supplied_types != num_method_types { - span_err!(self.tcx().sess, self.span, E0036, - "incorrect number of type parameters given for this method: expected {}, found {}", - num_method_types, num_supplied_types); - final_substs.types.replace( - subst::FnSpace, - vec![self.tcx().types.err; num_method_types]); - } else { - final_substs.types.replace(subst::FnSpace, supplied_method_types); - } - - return final_substs; + let supplied_start = substs.params().len() + method_generics.regions.len(); + Substs::for_item(self.tcx, pick.item.def_id, |def, _| { + let i = def.index as usize; + if i < substs.params().len() { + substs.region_at(i) + } else { + self.region_var_for_def(self.span, def) + } + }, |def, cur_substs| { + let i = def.index as usize; + if i < substs.params().len() { + substs.type_at(i) + } else if supplied_method_types.is_empty() { + self.type_var_for_def(self.span, def, cur_substs) + } else { + supplied_method_types[i - supplied_start] + } + }) } - fn unify_receivers(&mut self, - self_ty: Ty<'tcx>, - method_self_ty: Ty<'tcx>) - { - match self.fcx.mk_subty(false, TypeOrigin::Misc(self.span), self_ty, method_self_ty) { - Ok(_) => {} + fn unify_receivers(&mut self, self_ty: Ty<'tcx>, method_self_ty: Ty<'tcx>) { + match self.sub_types(false, &self.misc(self.span), self_ty, method_self_ty) { + Ok(InferOk { obligations, value: () }) => { + self.register_predicates(obligations); + } Err(_) => { - self.tcx().sess.span_bug( - self.span, - &format!("{} was a subtype of {} but now is not?", - self_ty, method_self_ty)); + span_bug!(self.span, + "{} was a subtype of {} but now is not?", + self_ty, + method_self_ty); } } } @@ -383,9 +348,8 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> { fn instantiate_method_sig(&mut self, pick: &probe::Pick<'tcx>, - all_substs: subst::Substs<'tcx>) - -> InstantiatedMethodSig<'tcx> - { + all_substs: &'tcx Substs<'tcx>) + -> (Ty<'tcx>, ty::InstantiatedPredicates<'tcx>) { debug!("instantiate_method_sig(pick={:?}, all_substs={:?})", pick, all_substs); @@ -393,13 +357,18 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> { // Instantiate the bounds on the method with the // type/early-bound-regions substitutions performed. There can // be no late-bound regions appearing here. - let method_predicates = pick.item.as_opt_method().unwrap() - .predicates.instantiate(self.tcx(), &all_substs); - let method_predicates = self.fcx.normalize_associated_types_in(self.span, - &method_predicates); + let def_id = pick.item.def_id; + let method_predicates = self.tcx.item_predicates(def_id) + .instantiate(self.tcx, all_substs); + let method_predicates = self.normalize_associated_types_in(self.span, + &method_predicates); - debug!("method_predicates after subst = {:?}", - method_predicates); + debug!("method_predicates after subst = {:?}", method_predicates); + + let fty = match self.tcx.item_type(def_id).sty { + ty::TyFnDef(_, _, f) => f, + _ => bug!() + }; // Instantiate late-bound regions and substitute the trait // parameters into the method type to get the actual method type. @@ -407,68 +376,52 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> { // NB: Instantiate late-bound regions first so that // `instantiate_type_scheme` can normalize associated types that // may reference those regions. - let method_sig = self.replace_late_bound_regions_with_fresh_var( - &pick.item.as_opt_method().unwrap().fty.sig); + let method_sig = self.replace_late_bound_regions_with_fresh_var(&fty.sig); debug!("late-bound lifetimes from method instantiated, method_sig={:?}", method_sig); - let method_sig = self.fcx.instantiate_type_scheme(self.span, &all_substs, &method_sig); - debug!("type scheme substituted, method_sig={:?}", - method_sig); + let method_sig = self.instantiate_type_scheme(self.span, all_substs, &method_sig); + debug!("type scheme substituted, method_sig={:?}", method_sig); - InstantiatedMethodSig { - method_sig: method_sig, - all_substs: all_substs, - method_predicates: method_predicates, - } + let method_ty = self.tcx.mk_fn_def(def_id, all_substs, + self.tcx.mk_bare_fn(ty::BareFnTy { + sig: ty::Binder(method_sig), + unsafety: fty.unsafety, + abi: fty.abi, + })); + + (method_ty, method_predicates) } fn add_obligations(&mut self, fty: Ty<'tcx>, - all_substs: &subst::Substs<'tcx>, + all_substs: &Substs<'tcx>, method_predicates: &ty::InstantiatedPredicates<'tcx>) { debug!("add_obligations: fty={:?} all_substs={:?} method_predicates={:?}", fty, all_substs, method_predicates); - self.fcx.add_obligations_for_parameters( - traits::ObligationCause::misc(self.span, self.fcx.body_id), - method_predicates); + self.add_obligations_for_parameters(traits::ObligationCause::misc(self.span, self.body_id), + method_predicates); // this is a projection from a trait reference, so we have to // make sure that the trait reference inputs are well-formed. - self.fcx.add_wf_bounds( - all_substs, - self.call_expr); + self.add_wf_bounds(all_substs, self.call_expr); // the function type must also be well-formed (this is not // implied by the substs being well-formed because of inherent // impls and late-bound regions - see issue #28609). - self.fcx.register_wf_obligation(fty, self.span, traits::MiscObligation); + self.register_wf_obligation(fty, self.span, traits::MiscObligation); } /////////////////////////////////////////////////////////////////////////// // RECONCILIATION - /// When we select a method with an `&mut self` receiver, we have to go convert any + /// When we select a method with a mutable autoref, we have to go convert any /// auto-derefs, indices, etc from `Deref` and `Index` into `DerefMut` and `IndexMut` /// respectively. - fn fixup_derefs_on_method_receiver_if_necessary(&self, - method_callee: &ty::MethodCallee) { - let sig = match method_callee.ty.sty { - ty::TyBareFn(_, ref f) => f.sig.clone(), - _ => return, - }; - - match sig.0.inputs[0].sty { - ty::TyRef(_, ty::TypeAndMut { - ty: _, - mutbl: hir::MutMutable, - }) => {} - _ => return, - } - + fn convert_lvalue_derefs_to_mutable(&self) { // Gather up expressions we want to munge. let mut exprs = Vec::new(); exprs.push(self.self_expr); @@ -478,134 +431,130 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> { hir::ExprField(ref expr, _) | hir::ExprTupField(ref expr, _) | hir::ExprIndex(ref expr, _) | - hir::ExprUnary(hir::UnDeref, ref expr) => exprs.push(&**expr), + hir::ExprUnary(hir::UnDeref, ref expr) => exprs.push(&expr), _ => break, } } - debug!("fixup_derefs_on_method_receiver_if_necessary: exprs={:?}", - exprs); + debug!("convert_lvalue_derefs_to_mutable: exprs={:?}", exprs); // Fix up autoderefs and derefs. for (i, &expr) in exprs.iter().rev().enumerate() { + debug!("convert_lvalue_derefs_to_mutable: i={} expr={:?}", i, expr); + // Count autoderefs. - let autoderef_count = match self.fcx - .inh - .tables - .borrow() - .adjustments - .get(&expr.id) { - Some(&AdjustDerefRef(ref adj)) => adj.autoderefs, - Some(_) | None => 0, - }; - - debug!("fixup_derefs_on_method_receiver_if_necessary: i={} expr={:?} \ - autoderef_count={}", - i, expr, autoderef_count); - - if autoderef_count > 0 { - check::autoderef(self.fcx, - expr.span, - self.fcx.expr_ty(expr), - Some(expr), - UnresolvedTypeAction::Error, - PreferMutLvalue, - |_, autoderefs| { - if autoderefs == autoderef_count + 1 { - Some(()) - } else { - None - } - }); + let adjustment = self.tables.borrow().adjustments.get(&expr.id).cloned(); + match adjustment { + Some(Adjustment { kind: Adjust::DerefRef { autoderefs, .. }, .. }) => { + if autoderefs > 0 { + let mut autoderef = self.autoderef(expr.span, self.node_ty(expr.id)); + autoderef.nth(autoderefs).unwrap_or_else(|| { + span_bug!(expr.span, + "expr was deref-able {} times but now isn't?", + autoderefs); + }); + autoderef.finalize(PreferMutLvalue, Some(expr)); + } + } + Some(_) | None => {} } // Don't retry the first one or we might infinite loop! - if i != 0 { - match expr.node { - hir::ExprIndex(ref base_expr, ref index_expr) => { - // If this is an overloaded index, the - // adjustment will include an extra layer of - // autoref because the method is an &self/&mut - // self method. We have to peel it off to get - // the raw adjustment that `try_index_step` - // expects. This is annoying and horrible. We - // ought to recode this routine so it doesn't - // (ab)use the normal type checking paths. - let adj = self.fcx.inh.tables.borrow().adjustments.get(&base_expr.id) - .cloned(); - let (autoderefs, unsize) = match adj { - Some(AdjustDerefRef(adr)) => match adr.autoref { + if i == 0 { + continue; + } + match expr.node { + hir::ExprIndex(ref base_expr, ref index_expr) => { + // If this is an overloaded index, the + // adjustment will include an extra layer of + // autoref because the method is an &self/&mut + // self method. We have to peel it off to get + // the raw adjustment that `try_index_step` + // expects. This is annoying and horrible. We + // ought to recode this routine so it doesn't + // (ab)use the normal type checking paths. + let adj = self.tables.borrow().adjustments.get(&base_expr.id).cloned(); + let (autoderefs, unsize, adjusted_base_ty) = match adj { + Some(Adjustment { + kind: Adjust::DerefRef { autoderefs, autoref, unsize }, + target + }) => { + match autoref { None => { - assert!(adr.unsize.is_none()); - (adr.autoderefs, None) - } - Some(AutoPtr(_, _)) => { - (adr.autoderefs, adr.unsize.map(|target| { - target.builtin_deref(false, NoPreference) - .expect("fixup: AutoPtr is not &T").ty - })) + assert!(!unsize); } + Some(AutoBorrow::Ref(..)) => {} Some(_) => { - self.tcx().sess.span_bug( - base_expr.span, - &format!("unexpected adjustment autoref {:?}", - adr)); + span_bug!(base_expr.span, + "unexpected adjustment autoref {:?}", + adj); } - }, - None => (0, None), - Some(_) => { - self.tcx().sess.span_bug( - base_expr.span, - "unexpected adjustment type"); } - }; - - let (adjusted_base_ty, unsize) = if let Some(target) = unsize { - (target, true) - } else { - (self.fcx.adjust_expr_ty(base_expr, - Some(&AdjustDerefRef(AutoDerefRef { - autoderefs: autoderefs, - autoref: None, - unsize: None - }))), false) - }; - let index_expr_ty = self.fcx.expr_ty(&**index_expr); - - let result = check::try_index_step( - self.fcx, - ty::MethodCall::expr(expr.id), - expr, - &**base_expr, - adjusted_base_ty, - autoderefs, - unsize, - PreferMutLvalue, - index_expr_ty); - - if let Some((input_ty, return_ty)) = result { - demand::suptype(self.fcx, index_expr.span, input_ty, index_expr_ty); - - let expr_ty = self.fcx.expr_ty(&*expr); - demand::suptype(self.fcx, expr.span, expr_ty, return_ty); + + (autoderefs, unsize, if unsize { + target.builtin_deref(false, NoPreference) + .expect("fixup: AutoBorrow::Ref is not &T") + .ty + } else { + let ty = self.node_ty(base_expr.id); + let mut ty = self.shallow_resolve(ty); + let mut method_type = |method_call: ty::MethodCall| { + self.tables.borrow().method_map.get(&method_call).map(|m| { + self.resolve_type_vars_if_possible(&m.ty) + }) + }; + + if !ty.references_error() { + for i in 0..autoderefs { + ty = ty.adjust_for_autoderef(self.tcx, + base_expr.id, + base_expr.span, + i as u32, + &mut method_type); + } + } + + ty + }) } - } - hir::ExprUnary(hir::UnDeref, ref base_expr) => { - // if this is an overloaded deref, then re-evaluate with - // a preference for mut - let method_call = ty::MethodCall::expr(expr.id); - if self.fcx.inh.tables.borrow().method_map.contains_key(&method_call) { - check::try_overloaded_deref( - self.fcx, - expr.span, - Some(method_call), - Some(&**base_expr), - self.fcx.expr_ty(&**base_expr), - PreferMutLvalue); + None => (0, false, self.node_ty(base_expr.id)), + Some(_) => { + span_bug!(base_expr.span, "unexpected adjustment type"); } + }; + + let index_expr_ty = self.node_ty(index_expr.id); + + let result = self.try_index_step(ty::MethodCall::expr(expr.id), + expr, + &base_expr, + adjusted_base_ty, + autoderefs, + unsize, + PreferMutLvalue, + index_expr_ty); + + if let Some((input_ty, return_ty)) = result { + self.demand_suptype(index_expr.span, input_ty, index_expr_ty); + + let expr_ty = self.node_ty(expr.id); + self.demand_suptype(expr.span, expr_ty, return_ty); } - _ => {} } + hir::ExprUnary(hir::UnDeref, ref base_expr) => { + // if this is an overloaded deref, then re-evaluate with + // a preference for mut + let method_call = ty::MethodCall::expr(expr.id); + if self.tables.borrow().method_map.contains_key(&method_call) { + let method = self.try_overloaded_deref(expr.span, + Some(&base_expr), + self.node_ty(base_expr.id), + PreferMutLvalue); + let method = method.expect("re-trying deref failed"); + self.tables.borrow_mut().method_map.insert(method_call, method); + } + } + _ => {} } } } @@ -613,19 +562,11 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> { /////////////////////////////////////////////////////////////////////////// // MISCELLANY - fn tcx(&self) -> &'a ty::ctxt<'tcx> { - self.fcx.tcx() - } - - fn infcx(&self) -> &'a InferCtxt<'a, 'tcx> { - self.fcx.infcx() - } - fn enforce_illegal_method_limitations(&self, pick: &probe::Pick) { // Disallow calls to the method `drop` defined in the `Drop` trait. - match pick.item.container() { + match pick.item.container { ty::TraitContainer(trait_def_id) => { - callee::check_legal_trait_for_method_call(self.fcx.ccx, self.span, trait_def_id) + callee::check_legal_trait_for_method_call(self.ccx, self.span, trait_def_id) } ty::ImplContainer(..) => {} } @@ -634,29 +575,27 @@ impl<'a,'tcx> ConfirmContext<'a,'tcx> { fn upcast(&mut self, source_trait_ref: ty::PolyTraitRef<'tcx>, target_trait_def_id: DefId) - -> ty::PolyTraitRef<'tcx> - { - let upcast_trait_refs = traits::upcast(self.tcx(), - source_trait_ref.clone(), - target_trait_def_id); + -> ty::PolyTraitRef<'tcx> { + let upcast_trait_refs = self.tcx + .upcast_choices(source_trait_ref.clone(), target_trait_def_id); // must be exactly one trait ref or we'd get an ambig error etc if upcast_trait_refs.len() != 1 { - self.tcx().sess.span_bug( - self.span, - &format!("cannot uniquely upcast `{:?}` to `{:?}`: `{:?}`", - source_trait_ref, - target_trait_def_id, - upcast_trait_refs)); + span_bug!(self.span, + "cannot uniquely upcast `{:?}` to `{:?}`: `{:?}`", + source_trait_ref, + target_trait_def_id, + upcast_trait_refs); } upcast_trait_refs.into_iter().next().unwrap() } fn replace_late_bound_regions_with_fresh_var(&self, value: &ty::Binder) -> T - where T : TypeFoldable<'tcx> + where T: TypeFoldable<'tcx> { - self.infcx().replace_late_bound_regions_with_fresh_var( - self.span, infer::FnCall, value).0 + self.fcx + .replace_late_bound_regions_with_fresh_var(self.span, infer::FnCall, value) + .0 } } diff --git a/src/librustc_typeck/check/method/mod.rs b/src/librustc_typeck/check/method/mod.rs index d462e2b45b281..2e66f6290a022 100644 --- a/src/librustc_typeck/check/method/mod.rs +++ b/src/librustc_typeck/check/method/mod.rs @@ -10,26 +10,24 @@ //! Method lookup: the secret sauce of Rust. See `README.md`. -use astconv::AstConv; use check::FnCtxt; -use middle::def; -use middle::def_id::DefId; -use middle::privacy::{AllPublic, DependsOn, LastPrivate, LastMod}; -use middle::subst; -use middle::traits; -use middle::ty::{self, ToPredicate, ToPolyTraitRef, TraitRef, TypeFoldable}; -use middle::ty::adjustment::{AdjustDerefRef, AutoDerefRef, AutoPtr}; -use middle::infer; +use hir::def::Def; +use hir::def_id::DefId; +use rustc::ty::subst::Substs; +use rustc::traits; +use rustc::ty::{self, ToPredicate, ToPolyTraitRef, TraitRef, TypeFoldable}; +use rustc::ty::adjustment::{Adjustment, Adjust, AutoBorrow}; +use rustc::infer; use syntax::ast; -use syntax::codemap::Span; +use syntax_pos::Span; -use rustc_front::hir; +use rustc::hir; pub use self::MethodError::*; pub use self::CandidateSource::*; -pub use self::suggest::{report_error, AllTraitsVec}; +pub use self::suggest::AllTraitsVec; mod confirm; mod probe; @@ -43,7 +41,11 @@ pub enum MethodError<'tcx> { Ambiguity(Vec), // Using a `Fn`/`FnMut`/etc method on a raw closure type before we have inferred its kind. - ClosureAmbiguity(/* DefId of fn trait */ DefId), + ClosureAmbiguity(// DefId of fn trait + DefId), + + // Found an applicable method, but it is not visible. + PrivateMatch(Def), } // Contains a list of static methods that may apply, a list of unsatisfied trait predicates which @@ -52,19 +54,20 @@ pub struct NoMatchData<'tcx> { pub static_candidates: Vec, pub unsatisfied_predicates: Vec>, pub out_of_scope_traits: Vec, - pub mode: probe::Mode + pub mode: probe::Mode, } impl<'tcx> NoMatchData<'tcx> { pub fn new(static_candidates: Vec, unsatisfied_predicates: Vec>, out_of_scope_traits: Vec, - mode: probe::Mode) -> Self { + mode: probe::Mode) + -> Self { NoMatchData { static_candidates: static_candidates, unsatisfied_predicates: unsatisfied_predicates, out_of_scope_traits: out_of_scope_traits, - mode: mode + mode: mode, } } } @@ -74,310 +77,280 @@ impl<'tcx> NoMatchData<'tcx> { #[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] pub enum CandidateSource { ImplSource(DefId), - TraitSource(/* trait id */ DefId), + TraitSource(// trait id + DefId), } -/// Determines whether the type `self_ty` supports a method name `method_name` or not. -pub fn exists<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - span: Span, - method_name: ast::Name, - self_ty: ty::Ty<'tcx>, - call_expr_id: ast::NodeId) - -> bool -{ - let mode = probe::Mode::MethodCall; - match probe::probe(fcx, span, mode, method_name, self_ty, call_expr_id) { - Ok(..) => true, - Err(NoMatch(..)) => false, - Err(Ambiguity(..)) => true, - Err(ClosureAmbiguity(..)) => true, +impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { + /// Determines whether the type `self_ty` supports a method name `method_name` or not. + pub fn method_exists(&self, + span: Span, + method_name: ast::Name, + self_ty: ty::Ty<'tcx>, + call_expr_id: ast::NodeId, + allow_private: bool) + -> bool { + let mode = probe::Mode::MethodCall; + match self.probe_method(span, mode, method_name, self_ty, call_expr_id) { + Ok(..) => true, + Err(NoMatch(..)) => false, + Err(Ambiguity(..)) => true, + Err(ClosureAmbiguity(..)) => true, + Err(PrivateMatch(..)) => allow_private, + } } -} -/// Performs method lookup. If lookup is successful, it will return the callee and store an -/// appropriate adjustment for the self-expr. In some cases it may report an error (e.g., invoking -/// the `drop` method). -/// -/// # Arguments -/// -/// Given a method call like `foo.bar::(...)`: -/// -/// * `fcx`: the surrounding `FnCtxt` (!) -/// * `span`: the span for the method call -/// * `method_name`: the name of the method being called (`bar`) -/// * `self_ty`: the (unadjusted) type of the self expression (`foo`) -/// * `supplied_method_types`: the explicit method type parameters, if any (`T1..Tn`) -/// * `self_expr`: the self expression (`foo`) -pub fn lookup<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - span: Span, - method_name: ast::Name, - self_ty: ty::Ty<'tcx>, - supplied_method_types: Vec>, - call_expr: &'tcx hir::Expr, - self_expr: &'tcx hir::Expr) - -> Result, MethodError<'tcx>> -{ - debug!("lookup(method_name={}, self_ty={:?}, call_expr={:?}, self_expr={:?})", - method_name, - self_ty, - call_expr, - self_expr); - - let mode = probe::Mode::MethodCall; - let self_ty = fcx.infcx().resolve_type_vars_if_possible(&self_ty); - let pick = try!(probe::probe(fcx, span, mode, method_name, self_ty, call_expr.id)); - Ok(confirm::confirm(fcx, span, self_expr, call_expr, self_ty, pick, supplied_method_types)) -} + /// Performs method lookup. If lookup is successful, it will return the callee + /// and store an appropriate adjustment for the self-expr. In some cases it may + /// report an error (e.g., invoking the `drop` method). + /// + /// # Arguments + /// + /// Given a method call like `foo.bar::(...)`: + /// + /// * `fcx`: the surrounding `FnCtxt` (!) + /// * `span`: the span for the method call + /// * `method_name`: the name of the method being called (`bar`) + /// * `self_ty`: the (unadjusted) type of the self expression (`foo`) + /// * `supplied_method_types`: the explicit method type parameters, if any (`T1..Tn`) + /// * `self_expr`: the self expression (`foo`) + pub fn lookup_method(&self, + span: Span, + method_name: ast::Name, + self_ty: ty::Ty<'tcx>, + supplied_method_types: Vec>, + call_expr: &'gcx hir::Expr, + self_expr: &'gcx hir::Expr) + -> Result, MethodError<'tcx>> { + debug!("lookup(method_name={}, self_ty={:?}, call_expr={:?}, self_expr={:?})", + method_name, + self_ty, + call_expr, + self_expr); + + let mode = probe::Mode::MethodCall; + let self_ty = self.resolve_type_vars_if_possible(&self_ty); + let pick = self.probe_method(span, mode, method_name, self_ty, call_expr.id)?; + + if let Some(import_id) = pick.import_id { + self.tcx.used_trait_imports.borrow_mut().insert(import_id); + } -pub fn lookup_in_trait<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - span: Span, - self_expr: Option<&hir::Expr>, - m_name: ast::Name, - trait_def_id: DefId, - self_ty: ty::Ty<'tcx>, - opt_input_types: Option>>) - -> Option> -{ - lookup_in_trait_adjusted(fcx, span, self_expr, m_name, trait_def_id, - 0, false, self_ty, opt_input_types) -} + self.tcx.check_stability(pick.item.def_id, call_expr.id, span); -/// `lookup_in_trait_adjusted` is used for overloaded operators. It does a very narrow slice of -/// what the normal probe/confirm path does. In particular, it doesn't really do any probing: it -/// simply constructs an obligation for a particular trait with the given self-type and checks -/// whether that trait is implemented. -/// -/// FIXME(#18741) -- It seems likely that we can consolidate some of this code with the other -/// method-lookup code. In particular, autoderef on index is basically identical to autoderef with -/// normal probes, except that the test also looks for built-in indexing. Also, the second half of -/// this method is basically the same as confirmation. -pub fn lookup_in_trait_adjusted<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - span: Span, - self_expr: Option<&hir::Expr>, - m_name: ast::Name, - trait_def_id: DefId, - autoderefs: usize, - unsize: bool, - self_ty: ty::Ty<'tcx>, - opt_input_types: Option>>) - -> Option> -{ - debug!("lookup_in_trait_adjusted(self_ty={:?}, self_expr={:?}, m_name={}, trait_def_id={:?})", - self_ty, - self_expr, - m_name, - trait_def_id); - - let trait_def = fcx.tcx().lookup_trait_def(trait_def_id); - - let type_parameter_defs = trait_def.generics.types.get_slice(subst::TypeSpace); - let expected_number_of_input_types = type_parameter_defs.len(); - - assert_eq!(trait_def.generics.types.len(subst::FnSpace), 0); - assert!(trait_def.generics.regions.is_empty()); - - // Construct a trait-reference `self_ty : Trait` - let mut substs = subst::Substs::new_trait(Vec::new(), Vec::new(), self_ty); - - match opt_input_types { - Some(input_types) => { - assert_eq!(expected_number_of_input_types, input_types.len()); - substs.types.replace(subst::ParamSpace::TypeSpace, input_types); - } + Ok(self.confirm_method(span, + self_expr, + call_expr, + self_ty, + pick, + supplied_method_types)) + } - None => { - fcx.inh.infcx.type_vars_for_defs( - span, - subst::ParamSpace::TypeSpace, - &mut substs, - type_parameter_defs); - } + pub fn lookup_method_in_trait(&self, + span: Span, + self_expr: Option<&hir::Expr>, + m_name: ast::Name, + trait_def_id: DefId, + self_ty: ty::Ty<'tcx>, + opt_input_types: Option>>) + -> Option> { + self.lookup_method_in_trait_adjusted(span, + self_expr, + m_name, + trait_def_id, + 0, + false, + self_ty, + opt_input_types) } - let trait_ref = ty::TraitRef::new(trait_def_id, fcx.tcx().mk_substs(substs)); + /// `lookup_in_trait_adjusted` is used for overloaded operators. + /// It does a very narrow slice of what the normal probe/confirm path does. + /// In particular, it doesn't really do any probing: it simply constructs + /// an obligation for aparticular trait with the given self-type and checks + /// whether that trait is implemented. + /// + /// FIXME(#18741) -- It seems likely that we can consolidate some of this + /// code with the other method-lookup code. In particular, autoderef on + /// index is basically identical to autoderef with normal probes, except + /// that the test also looks for built-in indexing. Also, the second half of + /// this method is basically the same as confirmation. + pub fn lookup_method_in_trait_adjusted(&self, + span: Span, + self_expr: Option<&hir::Expr>, + m_name: ast::Name, + trait_def_id: DefId, + autoderefs: usize, + unsize: bool, + self_ty: ty::Ty<'tcx>, + opt_input_types: Option>>) + -> Option> { + debug!("lookup_in_trait_adjusted(self_ty={:?}, self_expr={:?}, \ + m_name={}, trait_def_id={:?})", + self_ty, + self_expr, + m_name, + trait_def_id); + + // Construct a trait-reference `self_ty : Trait` + let substs = Substs::for_item(self.tcx, + trait_def_id, + |def, _| self.region_var_for_def(span, def), + |def, substs| { + if def.index == 0 { + self_ty + } else if let Some(ref input_types) = opt_input_types { + input_types[def.index as usize - 1] + } else { + self.type_var_for_def(span, def, substs) + } + }); - // Construct an obligation - let poly_trait_ref = trait_ref.to_poly_trait_ref(); - let obligation = traits::Obligation::misc(span, - fcx.body_id, - poly_trait_ref.to_predicate()); + let trait_ref = ty::TraitRef::new(trait_def_id, substs); - // Now we want to know if this can be matched - let mut selcx = traits::SelectionContext::new(fcx.infcx()); - if !selcx.evaluate_obligation(&obligation) { - debug!("--> Cannot match obligation"); - return None; // Cannot be matched, no such method resolution is possible. - } + // Construct an obligation + let poly_trait_ref = trait_ref.to_poly_trait_ref(); + let obligation = + traits::Obligation::misc(span, self.body_id, poly_trait_ref.to_predicate()); - // Trait must have a method named `m_name` and it should not have - // type parameters or early-bound regions. - let tcx = fcx.tcx(); - let method_item = trait_item(tcx, trait_def_id, m_name).unwrap(); - let method_ty = method_item.as_opt_method().unwrap(); - assert_eq!(method_ty.generics.types.len(subst::FnSpace), 0); - assert_eq!(method_ty.generics.regions.len(subst::FnSpace), 0); - - debug!("lookup_in_trait_adjusted: method_item={:?} method_ty={:?}", - method_item, method_ty); - - // Instantiate late-bound regions and substitute the trait - // parameters into the method type to get the actual method type. - // - // NB: Instantiate late-bound regions first so that - // `instantiate_type_scheme` can normalize associated types that - // may reference those regions. - let fn_sig = fcx.infcx().replace_late_bound_regions_with_fresh_var(span, - infer::FnCall, - &method_ty.fty.sig).0; - let fn_sig = fcx.instantiate_type_scheme(span, trait_ref.substs, &fn_sig); - let transformed_self_ty = fn_sig.inputs[0]; - let fty = tcx.mk_fn(None, tcx.mk_bare_fn(ty::BareFnTy { - sig: ty::Binder(fn_sig), - unsafety: method_ty.fty.unsafety, - abi: method_ty.fty.abi.clone(), - })); - - debug!("lookup_in_trait_adjusted: matched method fty={:?} obligation={:?}", - fty, - obligation); - - // Register obligations for the parameters. This will include the - // `Self` parameter, which in turn has a bound of the main trait, - // so this also effectively registers `obligation` as well. (We - // used to register `obligation` explicitly, but that resulted in - // double error messages being reported.) - // - // Note that as the method comes from a trait, it should not have - // any late-bound regions appearing in its bounds. - let method_bounds = fcx.instantiate_bounds(span, trait_ref.substs, &method_ty.predicates); - assert!(!method_bounds.has_escaping_regions()); - fcx.add_obligations_for_parameters( - traits::ObligationCause::misc(span, fcx.body_id), - &method_bounds); - - // Also register an obligation for the method type being well-formed. - fcx.register_wf_obligation(fty, span, traits::MiscObligation); - - // FIXME(#18653) -- Try to resolve obligations, giving us more - // typing information, which can sometimes be needed to avoid - // pathological region inference failures. - fcx.select_obligations_where_possible(); - - // Insert any adjustments needed (always an autoref of some mutability). - match self_expr { - None => { } - - Some(self_expr) => { - debug!("lookup_in_trait_adjusted: inserting adjustment if needed \ - (self-id={}, autoderefs={}, unsize={}, explicit_self={:?})", - self_expr.id, autoderefs, unsize, - method_ty.explicit_self); + // Now we want to know if this can be matched + let mut selcx = traits::SelectionContext::new(self); + if !selcx.evaluate_obligation(&obligation) { + debug!("--> Cannot match obligation"); + return None; // Cannot be matched, no such method resolution is possible. + } - match method_ty.explicit_self { - ty::ExplicitSelfCategory::ByValue => { - // Trait method is fn(self), no transformation needed. - assert!(!unsize); - fcx.write_autoderef_adjustment(self_expr.id, autoderefs); - } + // Trait must have a method named `m_name` and it should not have + // type parameters or early-bound regions. + let tcx = self.tcx; + let method_item = self.associated_item(trait_def_id, m_name).unwrap(); + let def_id = method_item.def_id; + let generics = tcx.item_generics(def_id); + assert_eq!(generics.types.len(), 0); + assert_eq!(generics.regions.len(), 0); + + debug!("lookup_in_trait_adjusted: method_item={:?}", method_item); + + // Instantiate late-bound regions and substitute the trait + // parameters into the method type to get the actual method type. + // + // NB: Instantiate late-bound regions first so that + // `instantiate_type_scheme` can normalize associated types that + // may reference those regions. + let original_method_ty = tcx.item_type(def_id); + let fty = match original_method_ty.sty { + ty::TyFnDef(_, _, f) => f, + _ => bug!() + }; + let fn_sig = self.replace_late_bound_regions_with_fresh_var(span, + infer::FnCall, + &fty.sig).0; + let fn_sig = self.instantiate_type_scheme(span, trait_ref.substs, &fn_sig); + let transformed_self_ty = fn_sig.inputs[0]; + let method_ty = tcx.mk_fn_def(def_id, trait_ref.substs, + tcx.mk_bare_fn(ty::BareFnTy { + sig: ty::Binder(fn_sig), + unsafety: fty.unsafety, + abi: fty.abi + })); + + debug!("lookup_in_trait_adjusted: matched method method_ty={:?} obligation={:?}", + method_ty, + obligation); + + // Register obligations for the parameters. This will include the + // `Self` parameter, which in turn has a bound of the main trait, + // so this also effectively registers `obligation` as well. (We + // used to register `obligation` explicitly, but that resulted in + // double error messages being reported.) + // + // Note that as the method comes from a trait, it should not have + // any late-bound regions appearing in its bounds. + let method_bounds = self.instantiate_bounds(span, def_id, trait_ref.substs); + assert!(!method_bounds.has_escaping_regions()); + self.add_obligations_for_parameters(traits::ObligationCause::misc(span, self.body_id), + &method_bounds); + + // Also register an obligation for the method type being well-formed. + self.register_wf_obligation(method_ty, span, traits::MiscObligation); + + // FIXME(#18653) -- Try to resolve obligations, giving us more + // typing information, which can sometimes be needed to avoid + // pathological region inference failures. + self.select_obligations_where_possible(); + + // Insert any adjustments needed (always an autoref of some mutability). + if let Some(self_expr) = self_expr { + debug!("lookup_in_trait_adjusted: inserting adjustment if needed \ + (self-id={}, autoderefs={}, unsize={}, fty={:?})", + self_expr.id, autoderefs, unsize, original_method_ty); - ty::ExplicitSelfCategory::ByReference(..) => { + let original_sig = original_method_ty.fn_sig(); + let autoref = match (&original_sig.input(0).skip_binder().sty, + &transformed_self_ty.sty) { + (&ty::TyRef(..), &ty::TyRef(region, ty::TypeAndMut { mutbl, ty: _ })) => { // Trait method is fn(&self) or fn(&mut self), need an // autoref. Pull the region etc out of the type of first argument. - match transformed_self_ty.sty { - ty::TyRef(region, ty::TypeAndMut { mutbl, ty: _ }) => { - fcx.write_adjustment(self_expr.id, - AdjustDerefRef(AutoDerefRef { - autoderefs: autoderefs, - autoref: Some(AutoPtr(region, mutbl)), - unsize: if unsize { - Some(transformed_self_ty) - } else { - None - } - })); - } - - _ => { - fcx.tcx().sess.span_bug( - span, - &format!( - "trait method is &self but first arg is: {}", - transformed_self_ty)); - } - } + Some(AutoBorrow::Ref(region, mutbl)) } - _ => { - fcx.tcx().sess.span_bug( - span, - &format!( - "unexpected explicit self type in operator method: {:?}", - method_ty.explicit_self)); + // Trait method is fn(self), no transformation needed. + assert!(!unsize); + None } - } + }; + + self.write_adjustment(self_expr.id, Adjustment { + kind: Adjust::DerefRef { + autoderefs: autoderefs, + autoref: autoref, + unsize: unsize + }, + target: transformed_self_ty + }); } - } - let callee = ty::MethodCallee { - def_id: method_item.def_id(), - ty: fty, - substs: trait_ref.substs - }; + let callee = ty::MethodCallee { + def_id: def_id, + ty: method_ty, + substs: trait_ref.substs, + }; - debug!("callee = {:?}", callee); + debug!("callee = {:?}", callee); - Some(callee) -} - -pub fn resolve_ufcs<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - span: Span, - method_name: ast::Name, - self_ty: ty::Ty<'tcx>, - expr_id: ast::NodeId) - -> Result<(def::Def, LastPrivate), MethodError<'tcx>> -{ - let mode = probe::Mode::Path; - let pick = try!(probe::probe(fcx, span, mode, method_name, self_ty, expr_id)); - let def_id = pick.item.def_id(); - let mut lp = LastMod(AllPublic); - if let probe::InherentImplPick = pick.kind { - if pick.item.vis() != hir::Public { - lp = LastMod(DependsOn(def_id)); - } + Some(callee) } - let def_result = match pick.item { - ty::ImplOrTraitItem::MethodTraitItem(..) => def::DefMethod(def_id), - ty::ImplOrTraitItem::ConstTraitItem(..) => def::DefAssociatedConst(def_id), - ty::ImplOrTraitItem::TypeTraitItem(..) => { - fcx.tcx().sess.span_bug(span, "resolve_ufcs: probe picked associated type"); + + pub fn resolve_ufcs(&self, + span: Span, + method_name: ast::Name, + self_ty: ty::Ty<'tcx>, + expr_id: ast::NodeId) + -> Result> { + let mode = probe::Mode::Path; + let pick = self.probe_method(span, mode, method_name, self_ty, expr_id)?; + + if let Some(import_id) = pick.import_id { + self.tcx.used_trait_imports.borrow_mut().insert(import_id); } - }; - Ok((def_result, lp)) -} + let def = pick.item.def(); -/// Find item with name `item_name` defined in `trait_def_id` -/// and return it, or `None`, if no such item. -fn trait_item<'tcx>(tcx: &ty::ctxt<'tcx>, - trait_def_id: DefId, - item_name: ast::Name) - -> Option> -{ - let trait_items = tcx.trait_items(trait_def_id); - trait_items.iter() - .find(|item| item.name() == item_name) - .cloned() -} + self.tcx.check_stability(def.def_id(), expr_id, span); -fn impl_item<'tcx>(tcx: &ty::ctxt<'tcx>, - impl_def_id: DefId, - item_name: ast::Name) - -> Option> -{ - let impl_items = tcx.impl_items.borrow(); - let impl_items = impl_items.get(&impl_def_id).unwrap(); - impl_items - .iter() - .map(|&did| tcx.impl_or_trait_item(did.def_id())) - .find(|m| m.name() == item_name) + if let probe::InherentImplPick = pick.kind { + if !pick.item.vis.is_accessible_from(self.body_id, &self.tcx.map) { + let msg = format!("{} `{}` is private", def.kind_name(), method_name); + self.tcx.sess.span_err(span, &msg); + } + } + Ok(def) + } + + /// Find item with name `item_name` defined in impl/trait `def_id` + /// and return it, or `None`, if no such item was defined there. + pub fn associated_item(&self, def_id: DefId, item_name: ast::Name) + -> Option { + self.tcx.associated_items(def_id).find(|item| item.name == item_name) + } } diff --git a/src/librustc_typeck/check/method/probe.rs b/src/librustc_typeck/check/method/probe.rs index 44dd0ef7b17d8..b0787d75c9cb4 100644 --- a/src/librustc_typeck/check/method/probe.rs +++ b/src/librustc_typeck/check/method/probe.rs @@ -13,27 +13,26 @@ use super::NoMatchData; use super::{CandidateSource, ImplSource, TraitSource}; use super::suggest; -use check; -use check::{FnCtxt, UnresolvedTypeAction}; -use middle::def_id::DefId; -use middle::subst; -use middle::subst::Subst; -use middle::traits; -use middle::ty::{self, NoPreference, Ty, ToPolyTraitRef, TraitRef, TypeFoldable}; -use middle::infer; -use middle::infer::{InferCtxt, TypeOrigin}; +use check::FnCtxt; +use hir::def_id::DefId; +use hir::def::Def; +use rustc::infer::InferOk; +use rustc::ty::subst::{Subst, Substs}; +use rustc::traits::{self, ObligationCause}; +use rustc::ty::{self, Ty, ToPolyTraitRef, TraitRef, TypeFoldable}; +use rustc::util::nodemap::FxHashSet; use syntax::ast; -use syntax::codemap::{Span, DUMMY_SP}; -use rustc_front::hir; -use std::collections::HashSet; +use syntax_pos::Span; +use rustc::hir; use std::mem; +use std::ops::Deref; use std::rc::Rc; use self::CandidateKind::*; pub use self::PickKind::*; -struct ProbeContext<'a, 'tcx:'a> { - fcx: &'a FnCtxt<'a, 'tcx>, +struct ProbeContext<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { + fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, span: Span, mode: Mode, item_name: ast::Name, @@ -41,46 +40,64 @@ struct ProbeContext<'a, 'tcx:'a> { opt_simplified_steps: Option>, inherent_candidates: Vec>, extension_candidates: Vec>, - impl_dups: HashSet, + impl_dups: FxHashSet, + import_id: Option, /// Collects near misses when the candidate functions are missing a `self` keyword and is only /// used for error reporting static_candidates: Vec, + /// Some(candidate) if there is a private candidate + private_candidate: Option, + /// Collects near misses when trait bounds for type parameters are unsatisfied and is only used /// for error reporting - unsatisfied_predicates: Vec> + unsatisfied_predicates: Vec>, +} + +impl<'a, 'gcx, 'tcx> Deref for ProbeContext<'a, 'gcx, 'tcx> { + type Target = FnCtxt<'a, 'gcx, 'tcx>; + fn deref(&self) -> &Self::Target { + &self.fcx + } } #[derive(Debug)] struct CandidateStep<'tcx> { self_ty: Ty<'tcx>, autoderefs: usize, - unsize: bool + unsize: bool, } #[derive(Debug)] struct Candidate<'tcx> { xform_self_ty: Ty<'tcx>, - item: ty::ImplOrTraitItem<'tcx>, + item: ty::AssociatedItem, kind: CandidateKind<'tcx>, + import_id: Option, } #[derive(Debug)] enum CandidateKind<'tcx> { - InherentImplCandidate(subst::Substs<'tcx>, - /* Normalize obligations */ Vec>), - ExtensionImplCandidate(/* Impl */ DefId, subst::Substs<'tcx>, - /* Normalize obligations */ Vec>), + InherentImplCandidate(&'tcx Substs<'tcx>, + // Normalize obligations + Vec>), + ExtensionImplCandidate(// Impl + DefId, + &'tcx Substs<'tcx>, + // Normalize obligations + Vec>), ObjectCandidate, TraitCandidate, - WhereClauseCandidate(/* Trait */ ty::PolyTraitRef<'tcx>), + WhereClauseCandidate(// Trait + ty::PolyTraitRef<'tcx>), } #[derive(Debug)] pub struct Pick<'tcx> { - pub item: ty::ImplOrTraitItem<'tcx>, + pub item: ty::AssociatedItem, pub kind: PickKind<'tcx>, + pub import_id: Option, // Indicates that the source expression should be autoderef'd N times // @@ -103,10 +120,12 @@ pub struct Pick<'tcx> { #[derive(Clone,Debug)] pub enum PickKind<'tcx> { InherentImplPick, - ExtensionImplPick(/* Impl */ DefId), + ExtensionImplPick(// Impl + DefId), ObjectPick, TraitPick, - WhereClausePick(/* Trait */ ty::PolyTraitRef<'tcx>), + WhereClausePick(// Trait + ty::PolyTraitRef<'tcx>), } pub type PickResult<'tcx> = Result, MethodError<'tcx>>; @@ -120,122 +139,123 @@ pub enum Mode { // An expression of the form `Type::item` or `::item`. // No autoderefs are performed, lookup is done based on the type each // implementation is for, and static methods are included. - Path + Path, } -pub fn probe<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - span: Span, - mode: Mode, - item_name: ast::Name, - self_ty: Ty<'tcx>, - scope_expr_id: ast::NodeId) - -> PickResult<'tcx> -{ - debug!("probe(self_ty={:?}, item_name={}, scope_expr_id={})", - self_ty, - item_name, - scope_expr_id); - - // FIXME(#18741) -- right now, creating the steps involves evaluating the - // `*` operator, which registers obligations that then escape into - // the global fulfillment context and thus has global - // side-effects. This is a bit of a pain to refactor. So just let - // it ride, although it's really not great, and in fact could I - // think cause spurious errors. Really though this part should - // take place in the `fcx.infcx().probe` below. - let steps = if mode == Mode::MethodCall { - match create_steps(fcx, span, self_ty) { - Some(steps) => steps, - None =>return Err(MethodError::NoMatch(NoMatchData::new(Vec::new(), Vec::new(), - Vec::new(), mode))), - } - } else { - vec![CandidateStep { - self_ty: self_ty, - autoderefs: 0, - unsize: false - }] - }; - - // Create a list of simplified self types, if we can. - let mut simplified_steps = Vec::new(); - for step in &steps { - match ty::fast_reject::simplify_type(fcx.tcx(), step.self_ty, true) { - None => { break; } - Some(simplified_type) => { simplified_steps.push(simplified_type); } +impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { + pub fn probe_method(&self, + span: Span, + mode: Mode, + item_name: ast::Name, + self_ty: Ty<'tcx>, + scope_expr_id: ast::NodeId) + -> PickResult<'tcx> { + debug!("probe(self_ty={:?}, item_name={}, scope_expr_id={})", + self_ty, + item_name, + scope_expr_id); + + // FIXME(#18741) -- right now, creating the steps involves evaluating the + // `*` operator, which registers obligations that then escape into + // the global fulfillment context and thus has global + // side-effects. This is a bit of a pain to refactor. So just let + // it ride, although it's really not great, and in fact could I + // think cause spurious errors. Really though this part should + // take place in the `self.probe` below. + let steps = if mode == Mode::MethodCall { + match self.create_steps(span, self_ty) { + Some(steps) => steps, + None => { + return Err(MethodError::NoMatch(NoMatchData::new(Vec::new(), + Vec::new(), + Vec::new(), + mode))) + } + } + } else { + vec![CandidateStep { + self_ty: self_ty, + autoderefs: 0, + unsize: false, + }] + }; + + // Create a list of simplified self types, if we can. + let mut simplified_steps = Vec::new(); + for step in &steps { + match ty::fast_reject::simplify_type(self.tcx, step.self_ty, true) { + None => { + break; + } + Some(simplified_type) => { + simplified_steps.push(simplified_type); + } + } } - } - let opt_simplified_steps = - if simplified_steps.len() < steps.len() { + let opt_simplified_steps = if simplified_steps.len() < steps.len() { None // failed to convert at least one of the steps } else { Some(simplified_steps) }; - debug!("ProbeContext: steps for self_ty={:?} are {:?}", - self_ty, - steps); - - // this creates one big transaction so that all type variables etc - // that we create during the probe process are removed later - fcx.infcx().probe(|_| { - let mut probe_cx = ProbeContext::new(fcx, - span, - mode, - item_name, - steps, - opt_simplified_steps); - probe_cx.assemble_inherent_candidates(); - try!(probe_cx.assemble_extension_candidates_for_traits_in_scope(scope_expr_id)); - probe_cx.pick() - }) -} + debug!("ProbeContext: steps for self_ty={:?} are {:?}", + self_ty, + steps); + + // this creates one big transaction so that all type variables etc + // that we create during the probe process are removed later + self.probe(|_| { + let mut probe_cx = + ProbeContext::new(self, span, mode, item_name, steps, opt_simplified_steps); + probe_cx.assemble_inherent_candidates(); + probe_cx.assemble_extension_candidates_for_traits_in_scope(scope_expr_id)?; + probe_cx.pick() + }) + } -fn create_steps<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - span: Span, - self_ty: Ty<'tcx>) - -> Option>> { - let mut steps = Vec::new(); - - let (final_ty, dereferences, _) = check::autoderef(fcx, - span, - self_ty, - None, - UnresolvedTypeAction::Error, - NoPreference, - |t, d| { - steps.push(CandidateStep { - self_ty: t, - autoderefs: d, - unsize: false - }); - None::<()> // keep iterating until we can't anymore - }); - - match final_ty.sty { - ty::TyArray(elem_ty, _) => { - steps.push(CandidateStep { - self_ty: fcx.tcx().mk_slice(elem_ty), - autoderefs: dereferences, - unsize: true - }); + fn create_steps(&self, span: Span, self_ty: Ty<'tcx>) -> Option>> { + // FIXME: we don't need to create the entire steps in one pass + + let mut autoderef = self.autoderef(span, self_ty); + let mut steps: Vec<_> = autoderef.by_ref() + .map(|(ty, d)| { + CandidateStep { + self_ty: ty, + autoderefs: d, + unsize: false, + } + }) + .collect(); + + let final_ty = autoderef.unambiguous_final_ty(); + match final_ty.sty { + ty::TyArray(elem_ty, _) => { + let dereferences = steps.len() - 1; + + steps.push(CandidateStep { + self_ty: self.tcx.mk_slice(elem_ty), + autoderefs: dereferences, + unsize: true, + }); + } + ty::TyError => return None, + _ => (), } - ty::TyError => return None, - _ => (), - } - Some(steps) + debug!("create_steps: steps={:?}", steps); + + Some(steps) + } } -impl<'a,'tcx> ProbeContext<'a,'tcx> { - fn new(fcx: &'a FnCtxt<'a,'tcx>, +impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { + fn new(fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, span: Span, mode: Mode, item_name: ast::Name, steps: Vec>, opt_simplified_steps: Option>) - -> ProbeContext<'a,'tcx> - { + -> ProbeContext<'a, 'gcx, 'tcx> { ProbeContext { fcx: fcx, span: span, @@ -243,10 +263,12 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { item_name: item_name, inherent_candidates: Vec::new(), extension_candidates: Vec::new(), - impl_dups: HashSet::new(), + impl_dups: FxHashSet(), + import_id: None, steps: Rc::new(steps), opt_simplified_steps: opt_simplified_steps, static_candidates: Vec::new(), + private_candidate: None, unsatisfied_predicates: Vec::new(), } } @@ -256,14 +278,7 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { self.extension_candidates.clear(); self.impl_dups.clear(); self.static_candidates.clear(); - } - - fn tcx(&self) -> &'a ty::ctxt<'tcx> { - self.fcx.tcx() - } - - fn infcx(&self) -> &'a InferCtxt<'a, 'tcx> { - self.fcx.infcx() + self.private_candidate = None; } /////////////////////////////////////////////////////////////////////////// @@ -277,20 +292,20 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { } fn assemble_probe(&mut self, self_ty: Ty<'tcx>) { - debug!("assemble_probe: self_ty={:?}", - self_ty); + debug!("assemble_probe: self_ty={:?}", self_ty); match self_ty.sty { - ty::TyTrait(box ref data) => { - self.assemble_inherent_candidates_from_object(self_ty, data); - self.assemble_inherent_impl_candidates_for_type(data.principal_def_id()); + ty::TyDynamic(ref data, ..) => { + if let Some(p) = data.principal() { + self.assemble_inherent_candidates_from_object(self_ty, p); + self.assemble_inherent_impl_candidates_for_type(p.def_id()); + } } - ty::TyEnum(def, _) | - ty::TyStruct(def, _) => { + ty::TyAdt(def, _) => { self.assemble_inherent_impl_candidates_for_type(def.did); } ty::TyBox(_) => { - if let Some(box_did) = self.tcx().lang_items.owned_box() { + if let Some(box_did) = self.tcx.lang_items.owned_box() { self.assemble_inherent_impl_candidates_for_type(box_did); } } @@ -298,82 +313,79 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { self.assemble_inherent_candidates_from_param(self_ty, p); } ty::TyChar => { - let lang_def_id = self.tcx().lang_items.char_impl(); + let lang_def_id = self.tcx.lang_items.char_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyStr => { - let lang_def_id = self.tcx().lang_items.str_impl(); + let lang_def_id = self.tcx.lang_items.str_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TySlice(_) => { - let lang_def_id = self.tcx().lang_items.slice_impl(); + let lang_def_id = self.tcx.lang_items.slice_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => { - let lang_def_id = self.tcx().lang_items.const_ptr_impl(); + let lang_def_id = self.tcx.lang_items.const_ptr_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } ty::TyRawPtr(ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => { - let lang_def_id = self.tcx().lang_items.mut_ptr_impl(); + let lang_def_id = self.tcx.lang_items.mut_ptr_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } - ty::TyInt(ast::TyI8) => { - let lang_def_id = self.tcx().lang_items.i8_impl(); + ty::TyInt(ast::IntTy::I8) => { + let lang_def_id = self.tcx.lang_items.i8_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } - ty::TyInt(ast::TyI16) => { - let lang_def_id = self.tcx().lang_items.i16_impl(); + ty::TyInt(ast::IntTy::I16) => { + let lang_def_id = self.tcx.lang_items.i16_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } - ty::TyInt(ast::TyI32) => { - let lang_def_id = self.tcx().lang_items.i32_impl(); + ty::TyInt(ast::IntTy::I32) => { + let lang_def_id = self.tcx.lang_items.i32_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } - ty::TyInt(ast::TyI64) => { - let lang_def_id = self.tcx().lang_items.i64_impl(); + ty::TyInt(ast::IntTy::I64) => { + let lang_def_id = self.tcx.lang_items.i64_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } - ty::TyInt(ast::TyIs) => { - let lang_def_id = self.tcx().lang_items.isize_impl(); + ty::TyInt(ast::IntTy::Is) => { + let lang_def_id = self.tcx.lang_items.isize_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } - ty::TyUint(ast::TyU8) => { - let lang_def_id = self.tcx().lang_items.u8_impl(); + ty::TyUint(ast::UintTy::U8) => { + let lang_def_id = self.tcx.lang_items.u8_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } - ty::TyUint(ast::TyU16) => { - let lang_def_id = self.tcx().lang_items.u16_impl(); + ty::TyUint(ast::UintTy::U16) => { + let lang_def_id = self.tcx.lang_items.u16_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } - ty::TyUint(ast::TyU32) => { - let lang_def_id = self.tcx().lang_items.u32_impl(); + ty::TyUint(ast::UintTy::U32) => { + let lang_def_id = self.tcx.lang_items.u32_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } - ty::TyUint(ast::TyU64) => { - let lang_def_id = self.tcx().lang_items.u64_impl(); + ty::TyUint(ast::UintTy::U64) => { + let lang_def_id = self.tcx.lang_items.u64_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } - ty::TyUint(ast::TyUs) => { - let lang_def_id = self.tcx().lang_items.usize_impl(); + ty::TyUint(ast::UintTy::Us) => { + let lang_def_id = self.tcx.lang_items.usize_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } - ty::TyFloat(ast::TyF32) => { - let lang_def_id = self.tcx().lang_items.f32_impl(); + ty::TyFloat(ast::FloatTy::F32) => { + let lang_def_id = self.tcx.lang_items.f32_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } - ty::TyFloat(ast::TyF64) => { - let lang_def_id = self.tcx().lang_items.f64_impl(); + ty::TyFloat(ast::FloatTy::F64) => { + let lang_def_id = self.tcx.lang_items.f64_impl(); self.assemble_inherent_impl_for_primitive(lang_def_id); } - _ => { - } + _ => {} } } fn assemble_inherent_impl_for_primitive(&mut self, lang_def_id: Option) { if let Some(impl_def_id) = lang_def_id { - self.tcx().populate_implementations_for_primitive_if_necessary(impl_def_id); - self.assemble_inherent_impl_probe(impl_def_id); } } @@ -381,9 +393,9 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { fn assemble_inherent_impl_candidates_for_type(&mut self, def_id: DefId) { // Read the inherent implementation candidates for this type from the // metadata if necessary. - self.tcx().populate_inherent_implementations_for_type_if_necessary(def_id); + self.tcx.populate_inherent_implementations_for_type_if_necessary(def_id); - if let Some(impl_infos) = self.tcx().inherent_impls.borrow().get(&def_id) { + if let Some(impl_infos) = self.tcx.inherent_impls.borrow().get(&def_id) { for &impl_def_id in impl_infos.iter() { self.assemble_inherent_impl_probe(impl_def_id); } @@ -397,9 +409,11 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { debug!("assemble_inherent_impl_probe {:?}", impl_def_id); - let item = match impl_item(self.tcx(), impl_def_id, self.item_name) { + let item = match self.associated_item(impl_def_id) { Some(m) => m, - None => { return; } // No method with correct name on this impl + None => { + return; + } // No method with correct name on this impl }; if !self.has_applicable_self(&item) { @@ -407,16 +421,21 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { return self.record_static_candidate(ImplSource(impl_def_id)); } + if !item.vis.is_accessible_from(self.body_id, &self.tcx.map) { + self.private_candidate = Some(item.def()); + return; + } + let (impl_ty, impl_substs) = self.impl_ty_and_substs(impl_def_id); - let impl_ty = impl_ty.subst(self.tcx(), &impl_substs); + let impl_ty = impl_ty.subst(self.tcx, impl_substs); // Determine the receiver type that the method itself expects. - let xform_self_ty = self.xform_self_ty(&item, impl_ty, &impl_substs); + let xform_self_ty = self.xform_self_ty(&item, impl_ty, impl_substs); // We can't use normalize_associated_types_in as it will pollute the // fcx's fulfillment context after this probe is over. - let cause = traits::ObligationCause::misc(self.span, self.fcx.body_id); - let mut selcx = &mut traits::SelectionContext::new(self.fcx.infcx()); + let cause = traits::ObligationCause::misc(self.span, self.body_id); + let mut selcx = &mut traits::SelectionContext::new(self.fcx); let traits::Normalized { value: xform_self_ty, obligations } = traits::normalize(selcx, cause, &xform_self_ty); debug!("assemble_inherent_impl_probe: xform_self_ty = {:?}", @@ -425,13 +444,14 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { self.inherent_candidates.push(Candidate { xform_self_ty: xform_self_ty, item: item, - kind: InherentImplCandidate(impl_substs, obligations) + kind: InherentImplCandidate(impl_substs, obligations), + import_id: self.import_id, }); } fn assemble_inherent_candidates_from_object(&mut self, self_ty: Ty<'tcx>, - data: &ty::TraitTy<'tcx>) { + principal: ty::PolyExistentialTraitRef<'tcx>) { debug!("assemble_inherent_candidates_from_object(self_ty={:?})", self_ty); @@ -442,18 +462,18 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { // a substitution that replaces `Self` with the object type // itself. Hence, a `&self` method will wind up with an // argument type like `&Trait`. - let trait_ref = data.principal_trait_ref_with_self_ty(self.tcx(), self_ty); + let trait_ref = principal.with_self_ty(self.tcx, self_ty); self.elaborate_bounds(&[trait_ref], |this, new_trait_ref, item| { let new_trait_ref = this.erase_late_bound_regions(&new_trait_ref); - let xform_self_ty = this.xform_self_ty(&item, - new_trait_ref.self_ty(), - new_trait_ref.substs); + let xform_self_ty = + this.xform_self_ty(&item, new_trait_ref.self_ty(), new_trait_ref.substs); this.inherent_candidates.push(Candidate { xform_self_ty: xform_self_ty, item: item, - kind: ObjectCandidate + kind: ObjectCandidate, + import_id: this.import_id, }); }); } @@ -463,8 +483,8 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { param_ty: ty::ParamTy) { // FIXME -- Do we want to commit to this behavior for param bounds? - let bounds: Vec<_> = - self.fcx.inh.infcx.parameter_environment.caller_bounds + let bounds: Vec<_> = self.parameter_environment + .caller_bounds .iter() .filter_map(|predicate| { match *predicate { @@ -473,7 +493,7 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { ty::TyParam(ref p) if *p == param_ty => { Some(trait_predicate.to_poly_trait_ref()) } - _ => None + _ => None, } } ty::Predicate::Equate(..) | @@ -481,74 +501,49 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { ty::Predicate::RegionOutlives(..) | ty::Predicate::WellFormed(..) | ty::Predicate::ObjectSafe(..) | - ty::Predicate::TypeOutlives(..) => { - None - } + ty::Predicate::ClosureKind(..) | + ty::Predicate::TypeOutlives(..) => None, } }) .collect(); self.elaborate_bounds(&bounds, |this, poly_trait_ref, item| { - let trait_ref = - this.erase_late_bound_regions(&poly_trait_ref); + let trait_ref = this.erase_late_bound_regions(&poly_trait_ref); - let xform_self_ty = - this.xform_self_ty(&item, - trait_ref.self_ty(), - trait_ref.substs); - - if let Some(ref m) = item.as_opt_method() { - debug!("found match: trait_ref={:?} substs={:?} m={:?}", - trait_ref, - trait_ref.substs, - m); - assert_eq!(m.generics.types.get_slice(subst::TypeSpace).len(), - trait_ref.substs.types.get_slice(subst::TypeSpace).len()); - assert_eq!(m.generics.regions.get_slice(subst::TypeSpace).len(), - trait_ref.substs.regions().get_slice(subst::TypeSpace).len()); - assert_eq!(m.generics.types.get_slice(subst::SelfSpace).len(), - trait_ref.substs.types.get_slice(subst::SelfSpace).len()); - assert_eq!(m.generics.regions.get_slice(subst::SelfSpace).len(), - trait_ref.substs.regions().get_slice(subst::SelfSpace).len()); - } + let xform_self_ty = this.xform_self_ty(&item, trait_ref.self_ty(), trait_ref.substs); // Because this trait derives from a where-clause, it // should not contain any inference variables or other // artifacts. This means it is safe to put into the // `WhereClauseCandidate` and (eventually) into the // `WhereClausePick`. - assert!(!trait_ref.substs.types.needs_infer()); + assert!(!trait_ref.substs.needs_infer()); this.inherent_candidates.push(Candidate { xform_self_ty: xform_self_ty, item: item, - kind: WhereClauseCandidate(poly_trait_ref) + kind: WhereClauseCandidate(poly_trait_ref), + import_id: this.import_id, }); }); } // Do a search through a list of bounds, using a callback to actually // create the candidates. - fn elaborate_bounds( - &mut self, - bounds: &[ty::PolyTraitRef<'tcx>], - mut mk_cand: F, - ) where - F: for<'b> FnMut( - &mut ProbeContext<'b, 'tcx>, - ty::PolyTraitRef<'tcx>, - ty::ImplOrTraitItem<'tcx>, - ), + fn elaborate_bounds(&mut self, bounds: &[ty::PolyTraitRef<'tcx>], mut mk_cand: F) + where F: for<'b> FnMut(&mut ProbeContext<'b, 'gcx, 'tcx>, + ty::PolyTraitRef<'tcx>, + ty::AssociatedItem) { debug!("elaborate_bounds(bounds={:?})", bounds); - let tcx = self.tcx(); + let tcx = self.tcx; for bound_trait_ref in traits::transitive_bounds(tcx, bounds) { - let item = match trait_item(tcx, - bound_trait_ref.def_id(), - self.item_name) { + let item = match self.associated_item(bound_trait_ref.def_id()) { Some(v) => v, - None => { continue; } + None => { + continue; + } }; if !self.has_applicable_self(&item) { @@ -561,14 +556,17 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { fn assemble_extension_candidates_for_traits_in_scope(&mut self, expr_id: ast::NodeId) - -> Result<(), MethodError<'tcx>> - { - let mut duplicates = HashSet::new(); - let opt_applicable_traits = self.fcx.ccx.trait_map.get(&expr_id); + -> Result<(), MethodError<'tcx>> { + let mut duplicates = FxHashSet(); + let opt_applicable_traits = self.tcx.trait_map.get(&expr_id); if let Some(applicable_traits) = opt_applicable_traits { - for &trait_did in applicable_traits { + for trait_candidate in applicable_traits { + let trait_did = trait_candidate.def_id; if duplicates.insert(trait_did) { - try!(self.assemble_extension_candidates_for_trait(trait_did)); + self.import_id = trait_candidate.import_id; + let result = self.assemble_extension_candidates_for_trait(trait_did); + self.import_id = None; + result?; } } } @@ -576,10 +574,10 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { } fn assemble_extension_candidates_for_all_traits(&mut self) -> Result<(), MethodError<'tcx>> { - let mut duplicates = HashSet::new(); - for trait_info in suggest::all_traits(self.fcx.ccx) { + let mut duplicates = FxHashSet(); + for trait_info in suggest::all_traits(self.ccx) { if duplicates.insert(trait_info.def_id) { - try!(self.assemble_extension_candidates_for_trait(trait_info.def_id)); + self.assemble_extension_candidates_for_trait(trait_info.def_id)?; } } Ok(()) @@ -587,24 +585,22 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { fn assemble_extension_candidates_for_trait(&mut self, trait_def_id: DefId) - -> Result<(), MethodError<'tcx>> - { + -> Result<(), MethodError<'tcx>> { debug!("assemble_extension_candidates_for_trait(trait_def_id={:?})", trait_def_id); // Check whether `trait_def_id` defines a method with suitable name: - let trait_items = - self.tcx().trait_items(trait_def_id); - let maybe_item = - trait_items.iter() - .find(|item| item.name() == self.item_name); + let maybe_item = self.tcx.associated_items(trait_def_id) + .find(|item| item.name == self.item_name); let item = match maybe_item { Some(i) => i, - None => { return Ok(()); } + None => { + return Ok(()); + } }; // Check whether `trait_def_id` defines a method with suitable name: - if !self.has_applicable_self(item) { + if !self.has_applicable_self(&item) { debug!("method has inapplicable self"); self.record_static_candidate(TraitSource(trait_def_id)); return Ok(()); @@ -612,7 +608,7 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { self.assemble_extension_candidates_for_trait_impls(trait_def_id, item.clone()); - try!(self.assemble_closure_candidates(trait_def_id, item.clone())); + self.assemble_closure_candidates(trait_def_id, item.clone())?; self.assemble_projection_candidates(trait_def_id, item.clone()); @@ -623,12 +619,11 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { fn assemble_extension_candidates_for_trait_impls(&mut self, trait_def_id: DefId, - item: ty::ImplOrTraitItem<'tcx>) - { - let trait_def = self.tcx().lookup_trait_def(trait_def_id); + item: ty::AssociatedItem) { + let trait_def = self.tcx.lookup_trait_def(trait_def_id); // FIXME(arielb1): can we use for_each_relevant_impl here? - trait_def.for_each_impl(self.tcx(), |impl_def_id| { + trait_def.for_each_impl(self.tcx, |impl_def_id| { debug!("assemble_extension_candidates_for_trait_impl: trait_def_id={:?} \ impl_def_id={:?}", trait_def_id, @@ -642,24 +637,21 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { debug!("impl_substs={:?}", impl_substs); - let impl_trait_ref = - self.tcx().impl_trait_ref(impl_def_id) + let impl_trait_ref = self.tcx.impl_trait_ref(impl_def_id) .unwrap() // we know this is a trait impl - .subst(self.tcx(), &impl_substs); + .subst(self.tcx, impl_substs); debug!("impl_trait_ref={:?}", impl_trait_ref); // Determine the receiver type that the method itself expects. let xform_self_ty = - self.xform_self_ty(&item, - impl_trait_ref.self_ty(), - impl_trait_ref.substs); + self.xform_self_ty(&item, impl_trait_ref.self_ty(), impl_trait_ref.substs); // Normalize the receiver. We can't use normalize_associated_types_in // as it will pollute the fcx's fulfillment context after this probe // is over. - let cause = traits::ObligationCause::misc(self.span, self.fcx.body_id); - let mut selcx = &mut traits::SelectionContext::new(self.fcx.infcx()); + let cause = traits::ObligationCause::misc(self.span, self.body_id); + let mut selcx = &mut traits::SelectionContext::new(self.fcx); let traits::Normalized { value: xform_self_ty, obligations } = traits::normalize(selcx, cause, &xform_self_ty); @@ -668,7 +660,8 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { self.extension_candidates.push(Candidate { xform_self_ty: xform_self_ty, item: item.clone(), - kind: ExtensionImplCandidate(impl_def_id, impl_substs, obligations) + kind: ExtensionImplCandidate(impl_def_id, impl_substs, obligations), + import_id: self.import_id, }); }); } @@ -676,14 +669,18 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { fn impl_can_possibly_match(&self, impl_def_id: DefId) -> bool { let simplified_steps = match self.opt_simplified_steps { Some(ref simplified_steps) => simplified_steps, - None => { return true; } + None => { + return true; + } }; - let impl_type = self.tcx().lookup_item_type(impl_def_id); + let impl_type = self.tcx.item_type(impl_def_id); let impl_simplified_type = - match ty::fast_reject::simplify_type(self.tcx(), impl_type.ty, false) { + match ty::fast_reject::simplify_type(self.tcx, impl_type, false) { Some(simplified_type) => simplified_type, - None => { return true; } + None => { + return true; + } }; simplified_steps.contains(&impl_simplified_type) @@ -691,17 +688,16 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { fn assemble_closure_candidates(&mut self, trait_def_id: DefId, - item: ty::ImplOrTraitItem<'tcx>) - -> Result<(), MethodError<'tcx>> - { + item: ty::AssociatedItem) + -> Result<(), MethodError<'tcx>> { // Check if this is one of the Fn,FnMut,FnOnce traits. - let tcx = self.tcx(); + let tcx = self.tcx; let kind = if Some(trait_def_id) == tcx.lang_items.fn_trait() { - ty::FnClosureKind + ty::ClosureKind::Fn } else if Some(trait_def_id) == tcx.lang_items.fn_mut_trait() { - ty::FnMutClosureKind + ty::ClosureKind::FnMut } else if Some(trait_def_id) == tcx.lang_items.fn_once_trait() { - ty::FnOnceClosureKind + ty::ClosureKind::FnOnce } else { return Ok(()); }; @@ -715,7 +711,7 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { _ => continue, }; - let closure_kinds = &self.fcx.inh.tables.borrow().closure_kinds; + let closure_kinds = &self.tables.borrow().closure_kinds; let closure_kind = match closure_kinds.get(&closure_def_id) { Some(&k) => k, None => { @@ -732,18 +728,23 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { // for the purposes of our method lookup, we only take // receiver type into account, so we can just substitute // fresh types here to use during substitution and subtyping. - let trait_def = self.tcx().lookup_trait_def(trait_def_id); - let substs = self.infcx().fresh_substs_for_trait(self.span, - &trait_def.generics, - step.self_ty); - - let xform_self_ty = self.xform_self_ty(&item, - step.self_ty, - &substs); + let substs = Substs::for_item(self.tcx, + trait_def_id, + |def, _| self.region_var_for_def(self.span, def), + |def, substs| { + if def.index == 0 { + step.self_ty + } else { + self.type_var_for_def(self.span, def, substs) + } + }); + + let xform_self_ty = self.xform_self_ty(&item, step.self_ty, substs); self.inherent_candidates.push(Candidate { xform_self_ty: xform_self_ty, item: item.clone(), - kind: TraitCandidate + kind: TraitCandidate, + import_id: self.import_id, }); } @@ -752,8 +753,7 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { fn assemble_projection_candidates(&mut self, trait_def_id: DefId, - item: ty::ImplOrTraitItem<'tcx>) - { + item: ty::AssociatedItem) { debug!("assemble_projection_candidates(\ trait_def_id={:?}, \ item={:?})", @@ -761,37 +761,35 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { item); for step in self.steps.iter() { - debug!("assemble_projection_candidates: step={:?}", - step); + debug!("assemble_projection_candidates: step={:?}", step); - let projection_trait_ref = match step.self_ty.sty { - ty::TyProjection(ref data) => &data.trait_ref, + let (def_id, substs) = match step.self_ty.sty { + ty::TyProjection(ref data) => (data.trait_ref.def_id, data.trait_ref.substs), + ty::TyAnon(def_id, substs) => (def_id, substs), _ => continue, }; - debug!("assemble_projection_candidates: projection_trait_ref={:?}", - projection_trait_ref); + debug!("assemble_projection_candidates: def_id={:?} substs={:?}", + def_id, + substs); - let trait_predicates = self.tcx().lookup_predicates(projection_trait_ref.def_id); - let bounds = trait_predicates.instantiate(self.tcx(), projection_trait_ref.substs); - let predicates = bounds.predicates.into_vec(); + let trait_predicates = self.tcx.item_predicates(def_id); + let bounds = trait_predicates.instantiate(self.tcx, substs); + let predicates = bounds.predicates; debug!("assemble_projection_candidates: predicates={:?}", predicates); - for poly_bound in - traits::elaborate_predicates(self.tcx(), predicates) + for poly_bound in traits::elaborate_predicates(self.tcx, predicates) .filter_map(|p| p.to_opt_poly_trait_ref()) - .filter(|b| b.def_id() == trait_def_id) - { + .filter(|b| b.def_id() == trait_def_id) { let bound = self.erase_late_bound_regions(&poly_bound); - debug!("assemble_projection_candidates: projection_trait_ref={:?} bound={:?}", - projection_trait_ref, + debug!("assemble_projection_candidates: def_id={:?} substs={:?} bound={:?}", + def_id, + substs, bound); - if self.infcx().can_equate(&step.self_ty, &bound.self_ty()).is_ok() { - let xform_self_ty = self.xform_self_ty(&item, - bound.self_ty(), - bound.substs); + if self.can_equate(&step.self_ty, &bound.self_ty()).is_ok() { + let xform_self_ty = self.xform_self_ty(&item, bound.self_ty(), bound.substs); debug!("assemble_projection_candidates: bound={:?} xform_self_ty={:?}", bound, @@ -800,7 +798,8 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { self.extension_candidates.push(Candidate { xform_self_ty: xform_self_ty, item: item.clone(), - kind: TraitCandidate + kind: TraitCandidate, + import_id: self.import_id, }); } } @@ -809,20 +808,16 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { fn assemble_where_clause_candidates(&mut self, trait_def_id: DefId, - item: ty::ImplOrTraitItem<'tcx>) - { + item: ty::AssociatedItem) { debug!("assemble_where_clause_candidates(trait_def_id={:?})", trait_def_id); - let caller_predicates = self.fcx.inh.infcx.parameter_environment.caller_bounds.clone(); - for poly_bound in traits::elaborate_predicates(self.tcx(), caller_predicates) - .filter_map(|p| p.to_opt_poly_trait_ref()) - .filter(|b| b.def_id() == trait_def_id) - { + let caller_predicates = self.parameter_environment.caller_bounds.clone(); + for poly_bound in traits::elaborate_predicates(self.tcx, caller_predicates) + .filter_map(|p| p.to_opt_poly_trait_ref()) + .filter(|b| b.def_id() == trait_def_id) { let bound = self.erase_late_bound_regions(&poly_bound); - let xform_self_ty = self.xform_self_ty(&item, - bound.self_ty(), - bound.substs); + let xform_self_ty = self.xform_self_ty(&item, bound.self_ty(), bound.substs); debug!("assemble_where_clause_candidates: bound={:?} xform_self_ty={:?}", bound, @@ -831,7 +826,8 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { self.extension_candidates.push(Candidate { xform_self_ty: xform_self_ty, item: item.clone(), - kind: WhereClauseCandidate(poly_bound) + kind: WhereClauseCandidate(poly_bound), + import_id: self.import_id, }); } } @@ -840,50 +836,61 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { // THE ACTUAL SEARCH fn pick(mut self) -> PickResult<'tcx> { - match self.pick_core() { - Some(r) => return r, - None => {} + if let Some(r) = self.pick_core() { + return r; } let static_candidates = mem::replace(&mut self.static_candidates, vec![]); + let private_candidate = mem::replace(&mut self.private_candidate, None); let unsatisfied_predicates = mem::replace(&mut self.unsatisfied_predicates, vec![]); // things failed, so lets look at all traits, for diagnostic purposes now: self.reset(); let span = self.span; - let tcx = self.tcx(); + let tcx = self.tcx; - try!(self.assemble_extension_candidates_for_all_traits()); + self.assemble_extension_candidates_for_all_traits()?; let out_of_scope_traits = match self.pick_core() { - Some(Ok(p)) => vec![p.item.container().id()], - Some(Err(MethodError::Ambiguity(v))) => v.into_iter().map(|source| { - match source { - TraitSource(id) => id, - ImplSource(impl_id) => { - match tcx.trait_id_of_impl(impl_id) { - Some(id) => id, - None => - tcx.sess.span_bug(span, + Some(Ok(p)) => vec![p.item.container.id()], + Some(Err(MethodError::Ambiguity(v))) => { + v.into_iter() + .map(|source| { + match source { + TraitSource(id) => id, + ImplSource(impl_id) => { + match tcx.trait_id_of_impl(impl_id) { + Some(id) => id, + None => { + span_bug!(span, "found inherent method when looking at traits") + } + } + } } - } - } - }).collect(), + }) + .collect() + } Some(Err(MethodError::NoMatch(NoMatchData { out_of_scope_traits: others, .. }))) => { assert!(others.is_empty()); vec![] } Some(Err(MethodError::ClosureAmbiguity(..))) => { // this error only occurs when assembling candidates - tcx.sess.span_bug(span, "encountered ClosureAmbiguity from pick_core"); + span_bug!(span, "encountered ClosureAmbiguity from pick_core"); } - None => vec![], + _ => vec![], }; - Err(MethodError::NoMatch(NoMatchData::new(static_candidates, unsatisfied_predicates, - out_of_scope_traits, self.mode))) + if let Some(def) = private_candidate { + return Err(MethodError::PrivateMatch(def)); + } + + Err(MethodError::NoMatch(NoMatchData::new(static_candidates, + unsatisfied_predicates, + out_of_scope_traits, + self.mode))) } fn pick_core(&mut self) -> Option> { @@ -900,90 +907,88 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { return None; } - match self.pick_by_value_method(step) { - Some(result) => return Some(result), - None => {} + if let Some(result) = self.pick_by_value_method(step) { + return Some(result); } self.pick_autorefd_method(step) } - fn pick_by_value_method(&mut self, - step: &CandidateStep<'tcx>) - -> Option> - { - /*! - * For each type `T` in the step list, this attempts to find a - * method where the (transformed) self type is exactly `T`. We - * do however do one transformation on the adjustment: if we - * are passing a region pointer in, we will potentially - * *reborrow* it to a shorter lifetime. This allows us to - * transparently pass `&mut` pointers, in particular, without - * consuming them for their entire lifetime. - */ + fn pick_by_value_method(&mut self, step: &CandidateStep<'tcx>) -> Option> { + //! For each type `T` in the step list, this attempts to find a + //! method where the (transformed) self type is exactly `T`. We + //! do however do one transformation on the adjustment: if we + //! are passing a region pointer in, we will potentially + //! *reborrow* it to a shorter lifetime. This allows us to + //! transparently pass `&mut` pointers, in particular, without + //! consuming them for their entire lifetime. if step.unsize { return None; } - self.pick_method(step.self_ty).map(|r| r.map(|mut pick| { - pick.autoderefs = step.autoderefs; + self.pick_method(step.self_ty).map(|r| { + r.map(|mut pick| { + pick.autoderefs = step.autoderefs; - // Insert a `&*` or `&mut *` if this is a reference type: - if let ty::TyRef(_, mt) = step.self_ty.sty { - pick.autoderefs += 1; - pick.autoref = Some(mt.mutbl); - } + // Insert a `&*` or `&mut *` if this is a reference type: + if let ty::TyRef(_, mt) = step.self_ty.sty { + pick.autoderefs += 1; + pick.autoref = Some(mt.mutbl); + } - pick - })) + pick + }) + }) } - fn pick_autorefd_method(&mut self, - step: &CandidateStep<'tcx>) - -> Option> - { - let tcx = self.tcx(); + fn pick_autorefd_method(&mut self, step: &CandidateStep<'tcx>) -> Option> { + let tcx = self.tcx; // In general, during probing we erase regions. See // `impl_self_ty()` for an explanation. - let region = tcx.mk_region(ty::ReStatic); + let region = tcx.mk_region(ty::ReErased); // Search through mutabilities in order to find one where pick works: - [hir::MutImmutable, hir::MutMutable].iter().filter_map(|&m| { - let autoref_ty = tcx.mk_ref(region, ty::TypeAndMut { - ty: step.self_ty, - mutbl: m - }); - self.pick_method(autoref_ty).map(|r| r.map(|mut pick| { - pick.autoderefs = step.autoderefs; - pick.autoref = Some(m); - pick.unsize = if step.unsize { - Some(step.self_ty) - } else { - None - }; - pick - })) - }).nth(0) + [hir::MutImmutable, hir::MutMutable] + .iter() + .filter_map(|&m| { + let autoref_ty = tcx.mk_ref(region, + ty::TypeAndMut { + ty: step.self_ty, + mutbl: m, + }); + self.pick_method(autoref_ty).map(|r| { + r.map(|mut pick| { + pick.autoderefs = step.autoderefs; + pick.autoref = Some(m); + pick.unsize = if step.unsize { + Some(step.self_ty) + } else { + None + }; + pick + }) + }) + }) + .nth(0) } fn pick_method(&mut self, self_ty: Ty<'tcx>) -> Option> { - debug!("pick_method(self_ty={})", self.infcx().ty_to_string(self_ty)); + debug!("pick_method(self_ty={})", self.ty_to_string(self_ty)); let mut possibly_unsatisfied_predicates = Vec::new(); debug!("searching inherent candidates"); - match self.consider_candidates(self_ty, &self.inherent_candidates, - &mut possibly_unsatisfied_predicates) { - None => {} - Some(pick) => { - return Some(pick); - } + if let Some(pick) = self.consider_candidates(self_ty, + &self.inherent_candidates, + &mut possibly_unsatisfied_predicates) { + return Some(pick); } debug!("searching extension candidates"); - let res = self.consider_candidates(self_ty, &self.extension_candidates, + let res = self.consider_candidates(self_ty, + &self.extension_candidates, &mut possibly_unsatisfied_predicates); if let None = res { self.unsatisfied_predicates.extend(possibly_unsatisfied_predicates); @@ -996,18 +1001,18 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { probes: &[Candidate<'tcx>], possibly_unsatisfied_predicates: &mut Vec>) -> Option> { - let mut applicable_candidates: Vec<_> = - probes.iter() - .filter(|&probe| self.consider_probe(self_ty, - probe,possibly_unsatisfied_predicates)) - .collect(); + let mut applicable_candidates: Vec<_> = probes.iter() + .filter(|&probe| self.consider_probe(self_ty, probe, possibly_unsatisfied_predicates)) + .collect(); debug!("applicable_candidates: {:?}", applicable_candidates); if applicable_candidates.len() > 1 { match self.collapse_candidates_to_trait_pick(&applicable_candidates[..]) { - Some(pick) => { return Some(Ok(pick)); } - None => { } + Some(pick) => { + return Some(Ok(pick)); + } + None => {} } } @@ -1016,21 +1021,26 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { return Some(Err(MethodError::Ambiguity(sources))); } - applicable_candidates.pop().map(|probe| { - Ok(probe.to_unadjusted_pick()) - }) + applicable_candidates.pop().map(|probe| Ok(probe.to_unadjusted_pick())) } - fn consider_probe(&self, self_ty: Ty<'tcx>, probe: &Candidate<'tcx>, - possibly_unsatisfied_predicates: &mut Vec>) -> bool { - debug!("consider_probe: self_ty={:?} probe={:?}", - self_ty, - probe); + fn consider_probe(&self, + self_ty: Ty<'tcx>, + probe: &Candidate<'tcx>, + possibly_unsatisfied_predicates: &mut Vec>) + -> bool { + debug!("consider_probe: self_ty={:?} probe={:?}", self_ty, probe); - self.infcx().probe(|_| { + self.probe(|_| { // First check that the self type can be related. - match self.make_sub_ty(self_ty, probe.xform_self_ty) { - Ok(()) => { } + match self.sub_types(false, + &ObligationCause::dummy(), + self_ty, + probe.xform_self_ty) { + Ok(InferOk { obligations, value: () }) => { + // FIXME(#32730) propagate obligations + assert!(obligations.is_empty()) + } Err(_) => { debug!("--> cannot relate self-types"); return false; @@ -1043,7 +1053,7 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { // don't have enough information to fully evaluate). let (impl_def_id, substs, ref_obligations) = match probe.kind { InherentImplCandidate(ref substs, ref ref_obligations) => { - (probe.item.container().id(), substs, ref_obligations) + (probe.item.container.id(), substs, ref_obligations) } ExtensionImplCandidate(impl_def_id, ref substs, ref ref_obligations) => { @@ -1058,20 +1068,17 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { } }; - let selcx = &mut traits::SelectionContext::new(self.infcx()); - let cause = traits::ObligationCause::misc(self.span, self.fcx.body_id); + let selcx = &mut traits::SelectionContext::new(self); + let cause = traits::ObligationCause::misc(self.span, self.body_id); // Check whether the impl imposes obligations we have to worry about. - let impl_bounds = self.tcx().lookup_predicates(impl_def_id); - let impl_bounds = impl_bounds.instantiate(self.tcx(), substs); - let traits::Normalized { value: impl_bounds, - obligations: norm_obligations } = + let impl_bounds = self.tcx.item_predicates(impl_def_id); + let impl_bounds = impl_bounds.instantiate(self.tcx, substs); + let traits::Normalized { value: impl_bounds, obligations: norm_obligations } = traits::normalize(selcx, cause.clone(), &impl_bounds); // Convert the bounds into obligations. - let obligations = - traits::predicates_for_generics(cause.clone(), - &impl_bounds); + let obligations = traits::predicates_for_generics(cause.clone(), &impl_bounds); debug!("impl_obligations={:?}", obligations); // Evaluate those obligations to see if they might possibly hold. @@ -1107,16 +1114,14 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { /// /// Now imagine the receiver is `Vec<_>`. It doesn't really matter at this time which impl we /// use, so it's ok to just commit to "using the method from the trait Foo". - fn collapse_candidates_to_trait_pick(&self, - probes: &[&Candidate<'tcx>]) - -> Option> { + fn collapse_candidates_to_trait_pick(&self, probes: &[&Candidate<'tcx>]) -> Option> { // Do all probes correspond to the same trait? - let container = probes[0].item.container(); + let container = probes[0].item.container; match container { ty::TraitContainer(_) => {} - ty::ImplContainer(_) => return None + ty::ImplContainer(_) => return None, } - if probes[1..].iter().any(|p| p.item.container() != container) { + if probes[1..].iter().any(|p| p.item.container != container) { return None; } @@ -1124,31 +1129,20 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { Some(Pick { item: probes[0].item.clone(), kind: TraitPick, + import_id: probes[0].import_id, autoderefs: 0, autoref: None, - unsize: None + unsize: None, }) } /////////////////////////////////////////////////////////////////////////// // MISCELLANY - - fn make_sub_ty(&self, sub: Ty<'tcx>, sup: Ty<'tcx>) -> infer::UnitResult<'tcx> { - self.infcx().sub_types(false, TypeOrigin::Misc(DUMMY_SP), sub, sup) - } - - fn has_applicable_self(&self, item: &ty::ImplOrTraitItem) -> bool { + fn has_applicable_self(&self, item: &ty::AssociatedItem) -> bool { // "fast track" -- check for usage of sugar - match *item { - ty::ImplOrTraitItem::MethodTraitItem(ref method) => - match method.explicit_self { - ty::ExplicitSelfCategory::Static => self.mode == Mode::Path, - ty::ExplicitSelfCategory::ByValue | - ty::ExplicitSelfCategory::ByReference(..) | - ty::ExplicitSelfCategory::ByBox => true, - }, - ty::ImplOrTraitItem::ConstTraitItem(..) => self.mode == Mode::Path, - _ => false, + match self.mode { + Mode::MethodCall => item.method_has_self_argument, + Mode::Path => true } // FIXME -- check for types that deref to `Self`, // like `Rc` and so on. @@ -1163,27 +1157,26 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { } fn xform_self_ty(&self, - item: &ty::ImplOrTraitItem<'tcx>, + item: &ty::AssociatedItem, impl_ty: Ty<'tcx>, - substs: &subst::Substs<'tcx>) - -> Ty<'tcx> - { - match item.as_opt_method() { - Some(ref method) => self.xform_method_self_ty(method, impl_ty, - substs), - None => impl_ty, + substs: &Substs<'tcx>) + -> Ty<'tcx> { + if item.kind == ty::AssociatedKind::Method && self.mode == Mode::MethodCall { + self.xform_method_self_ty(item.def_id, impl_ty, substs) + } else { + impl_ty } } fn xform_method_self_ty(&self, - method: &Rc>, + method: DefId, impl_ty: Ty<'tcx>, - substs: &subst::Substs<'tcx>) - -> Ty<'tcx> - { + substs: &Substs<'tcx>) + -> Ty<'tcx> { + let self_ty = self.tcx.item_type(method).fn_sig().input(0); debug!("xform_self_ty(impl_ty={:?}, self_ty={:?}, substs={:?})", impl_ty, - method.fty.sig.0.inputs.get(0), + self_ty, substs); assert!(!substs.has_escaping_regions()); @@ -1193,64 +1186,48 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { // are given do not include type/lifetime parameters for the // method yet. So create fresh variables here for those too, // if there are any. - assert_eq!(substs.types.len(subst::FnSpace), 0); - assert_eq!(substs.regions().len(subst::FnSpace), 0); - - if self.mode == Mode::Path { - return impl_ty; - } - - let mut placeholder; - let mut substs = substs; - if - !method.generics.types.is_empty_in(subst::FnSpace) || - !method.generics.regions.is_empty_in(subst::FnSpace) - { - // In general, during probe we erase regions. See - // `impl_self_ty()` for an explanation. - let method_regions = - method.generics.regions.get_slice(subst::FnSpace) - .iter() - .map(|_| ty::ReStatic) - .collect(); - - placeholder = (*substs).clone().with_method(Vec::new(), method_regions); - - self.infcx().type_vars_for_defs( - self.span, - subst::FnSpace, - &mut placeholder, - method.generics.types.get_slice(subst::FnSpace)); - - substs = &placeholder; - } + let generics = self.tcx.item_generics(method); + assert_eq!(substs.types().count(), generics.parent_types as usize); + assert_eq!(substs.regions().count(), generics.parent_regions as usize); // Erase any late-bound regions from the method and substitute // in the values from the substitution. - let xform_self_ty = method.fty.sig.input(0); - let xform_self_ty = self.erase_late_bound_regions(&xform_self_ty); - let xform_self_ty = xform_self_ty.subst(self.tcx(), substs); + let xform_self_ty = self.erase_late_bound_regions(&self_ty); - xform_self_ty + if generics.types.is_empty() && generics.regions.is_empty() { + xform_self_ty.subst(self.tcx, substs) + } else { + let substs = Substs::for_item(self.tcx, method, |def, _| { + let i = def.index as usize; + if i < substs.params().len() { + substs.region_at(i) + } else { + // In general, during probe we erase regions. See + // `impl_self_ty()` for an explanation. + self.tcx.mk_region(ty::ReErased) + } + }, |def, cur_substs| { + let i = def.index as usize; + if i < substs.params().len() { + substs.type_at(i) + } else { + self.type_var_for_def(self.span, def, cur_substs) + } + }); + xform_self_ty.subst(self.tcx, substs) + } } /// Get the type of an impl and generate substitutions with placeholders. - fn impl_ty_and_substs(&self, - impl_def_id: DefId) - -> (Ty<'tcx>, subst::Substs<'tcx>) - { - let impl_pty = self.tcx().lookup_item_type(impl_def_id); + fn impl_ty_and_substs(&self, impl_def_id: DefId) -> (Ty<'tcx>, &'tcx Substs<'tcx>) { + let impl_ty = self.tcx.item_type(impl_def_id); - let type_vars = - impl_pty.generics.types.map( - |_| self.infcx().next_ty_var()); + let substs = Substs::for_item(self.tcx, + impl_def_id, + |_, _| self.tcx.mk_region(ty::ReErased), + |_, _| self.next_ty_var()); - let region_placeholders = - impl_pty.generics.regions.map( - |_| ty::ReStatic); // see erase_late_bound_regions() for an expl of why 'static - - let substs = subst::Substs::new(type_vars, region_placeholders); - (impl_pty.ty, substs) + (impl_ty, substs) } /// Replace late-bound-regions bound by `value` with `'static` using @@ -1272,37 +1249,16 @@ impl<'a,'tcx> ProbeContext<'a,'tcx> { /// and/or tracking the substitution and /// so forth. fn erase_late_bound_regions(&self, value: &ty::Binder) -> T - where T : TypeFoldable<'tcx> + where T: TypeFoldable<'tcx> { - self.tcx().erase_late_bound_regions(value) + self.tcx.erase_late_bound_regions(value) } -} - -fn impl_item<'tcx>(tcx: &ty::ctxt<'tcx>, - impl_def_id: DefId, - item_name: ast::Name) - -> Option> -{ - let impl_items = tcx.impl_items.borrow(); - let impl_items = impl_items.get(&impl_def_id).unwrap(); - impl_items - .iter() - .map(|&did| tcx.impl_or_trait_item(did.def_id())) - .find(|item| item.name() == item_name) -} -/// Find item with name `item_name` defined in `trait_def_id` -/// and return it, or `None`, if no such item. -fn trait_item<'tcx>(tcx: &ty::ctxt<'tcx>, - trait_def_id: DefId, - item_name: ast::Name) - -> Option> -{ - let trait_items = tcx.trait_items(trait_def_id); - debug!("trait_method; items: {:?}", trait_items); - trait_items.iter() - .find(|item| item.name() == item_name) - .cloned() + /// Find item with name `item_name` defined in impl/trait `def_id` + /// and return it, or `None`, if no such item was defined there. + fn associated_item(&self, def_id: DefId) -> Option { + self.fcx.associated_item(def_id, self.item_name) + } } impl<'tcx> Candidate<'tcx> { @@ -1310,10 +1266,8 @@ impl<'tcx> Candidate<'tcx> { Pick { item: self.item.clone(), kind: match self.kind { - InherentImplCandidate(_, _) => InherentImplPick, - ExtensionImplCandidate(def_id, _, _) => { - ExtensionImplPick(def_id) - } + InherentImplCandidate(..) => InherentImplPick, + ExtensionImplCandidate(def_id, ..) => ExtensionImplPick(def_id), ObjectCandidate => ObjectPick, TraitCandidate => TraitPick, WhereClauseCandidate(ref trait_ref) => { @@ -1322,26 +1276,25 @@ impl<'tcx> Candidate<'tcx> { // inference variables or other artifacts. This // means they are safe to put into the // `WhereClausePick`. - assert!(!trait_ref.substs().types.needs_infer()); + assert!(!trait_ref.substs().needs_infer()); WhereClausePick(trait_ref.clone()) } }, + import_id: self.import_id, autoderefs: 0, autoref: None, - unsize: None + unsize: None, } } fn to_source(&self) -> CandidateSource { match self.kind { - InherentImplCandidate(_, _) => { - ImplSource(self.item.container().id()) - } - ExtensionImplCandidate(def_id, _, _) => ImplSource(def_id), + InherentImplCandidate(..) => ImplSource(self.item.container.id()), + ExtensionImplCandidate(def_id, ..) => ImplSource(def_id), ObjectCandidate | TraitCandidate | - WhereClauseCandidate(_) => TraitSource(self.item.container().id()), + WhereClauseCandidate(_) => TraitSource(self.item.container.id()), } } } diff --git a/src/librustc_typeck/check/method/suggest.rs b/src/librustc_typeck/check/method/suggest.rs index 560e84b52d1d6..7cfefefc0d964 100644 --- a/src/librustc_typeck/check/method/suggest.rs +++ b/src/librustc_typeck/check/method/suggest.rs @@ -13,355 +13,397 @@ use CrateCtxt; -use astconv::AstConv; -use check::{self, FnCtxt}; -use front::map as hir_map; -use middle::ty::{self, Ty, ToPolyTraitRef, ToPredicate, TypeFoldable}; -use middle::cstore::{self, CrateStore, DefLike}; -use middle::def; -use middle::def_id::DefId; +use check::FnCtxt; +use rustc::hir::map as hir_map; +use rustc::ty::{self, Ty, ToPolyTraitRef, ToPredicate, TypeFoldable}; +use hir::def::Def; +use hir::def_id::{CRATE_DEF_INDEX, DefId}; use middle::lang_items::FnOnceTraitLangItem; -use middle::subst::Substs; -use middle::traits::{Obligation, SelectionContext}; -use util::nodemap::{FnvHashSet}; +use rustc::traits::{Obligation, SelectionContext}; +use util::nodemap::FxHashSet; use syntax::ast; -use syntax::codemap::Span; -use syntax::errors::DiagnosticBuilder; -use rustc_front::print::pprust; -use rustc_front::hir; +use errors::DiagnosticBuilder; +use syntax_pos::Span; + +use rustc::hir::print as pprust; +use rustc::hir; use std::cell; use std::cmp::Ordering; -use super::{MethodError, NoMatchData, CandidateSource, impl_item, trait_item}; +use super::{MethodError, NoMatchData, CandidateSource}; use super::probe::Mode; -pub fn report_error<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - span: Span, - rcvr_ty: Ty<'tcx>, - item_name: ast::Name, - rcvr_expr: Option<&hir::Expr>, - error: MethodError<'tcx>) -{ - // avoid suggestions when we don't know what's going on. - if rcvr_ty.references_error() { - return +impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { + fn is_fn_ty(&self, ty: &Ty<'tcx>, span: Span) -> bool { + let tcx = self.tcx; + match ty.sty { + // Not all of these (e.g. unsafe fns) implement FnOnce + // so we look for these beforehand + ty::TyClosure(..) | + ty::TyFnDef(..) | + ty::TyFnPtr(_) => true, + // If it's not a simple function, look for things which implement FnOnce + _ => { + let fn_once = match tcx.lang_items.require(FnOnceTraitLangItem) { + Ok(fn_once) => fn_once, + Err(..) => return false, + }; + + self.autoderef(span, ty).any(|(ty, _)| { + self.probe(|_| { + let fn_once_substs = tcx.mk_substs_trait(ty, &[self.next_ty_var()]); + let trait_ref = ty::TraitRef::new(fn_once, fn_once_substs); + let poly_trait_ref = trait_ref.to_poly_trait_ref(); + let obligation = + Obligation::misc(span, self.body_id, poly_trait_ref.to_predicate()); + SelectionContext::new(self).evaluate_obligation(&obligation) + }) + }) + } + } } - match error { - MethodError::NoMatch(NoMatchData { static_candidates: static_sources, - unsatisfied_predicates, - out_of_scope_traits, - mode }) => { - let cx = fcx.tcx(); - - let mut err = fcx.type_error_struct( - span, - |actual| { - format!("no {} named `{}` found for type `{}` \ - in the current scope", - if mode == Mode::MethodCall { "method" } - else { "associated item" }, + pub fn report_method_error(&self, + span: Span, + rcvr_ty: Ty<'tcx>, + item_name: ast::Name, + rcvr_expr: Option<&hir::Expr>, + error: MethodError<'tcx>) { + // avoid suggestions when we don't know what's going on. + if rcvr_ty.references_error() { + return; + } + + let report_candidates = |err: &mut DiagnosticBuilder, mut sources: Vec| { + + sources.sort(); + sources.dedup(); + // Dynamic limit to avoid hiding just one candidate, which is silly. + let limit = if sources.len() == 5 { 5 } else { 4 }; + + for (idx, source) in sources.iter().take(limit).enumerate() { + match *source { + CandidateSource::ImplSource(impl_did) => { + // Provide the best span we can. Use the item, if local to crate, else + // the impl, if local to crate (item may be defaulted), else nothing. + let item = self.associated_item(impl_did, item_name) + .or_else(|| { + self.associated_item( + self.tcx.impl_trait_ref(impl_did).unwrap().def_id, + + item_name + ) + }).unwrap(); + let note_span = self.tcx.map.span_if_local(item.def_id).or_else(|| { + self.tcx.map.span_if_local(impl_did) + }); + + let impl_ty = self.impl_self_ty(span, impl_did).ty; + + let insertion = match self.tcx.impl_trait_ref(impl_did) { + None => format!(""), + Some(trait_ref) => { + format!(" of the trait `{}`", + self.tcx.item_path_str(trait_ref.def_id)) + } + }; + + let note_str = format!("candidate #{} is defined in an impl{} \ + for the type `{}`", + idx + 1, + insertion, + impl_ty); + if let Some(note_span) = note_span { + // We have a span pointing to the method. Show note with snippet. + err.span_note(note_span, ¬e_str); + } else { + err.note(¬e_str); + } + } + CandidateSource::TraitSource(trait_did) => { + let item = self.associated_item(trait_did, item_name).unwrap(); + let item_span = self.tcx.def_span(item.def_id); + span_note!(err, + item_span, + "candidate #{} is defined in the trait `{}`", + idx + 1, + self.tcx.item_path_str(trait_did)); + } + } + } + if sources.len() > limit { + err.note(&format!("and {} others", sources.len() - limit)); + } + }; + + match error { + MethodError::NoMatch(NoMatchData { static_candidates: static_sources, + unsatisfied_predicates, + out_of_scope_traits, + mode, + .. }) => { + let tcx = self.tcx; + + let mut err = self.type_error_struct(span, + |actual| { + format!("no {} named `{}` found for type `{}` in the current scope", + if mode == Mode::MethodCall { + "method" + } else { + "associated item" + }, item_name, actual) }, - rcvr_ty, - None); - - // If the item has the name of a field, give a help note - if let (&ty::TyStruct(def, substs), Some(expr)) = (&rcvr_ty.sty, rcvr_expr) { - if let Some(field) = def.struct_variant().find_field_named(item_name) { - let expr_string = match cx.sess.codemap().span_to_snippet(expr.span) { - Ok(expr_string) => expr_string, - _ => "s".into() // Default to a generic placeholder for the - // expression when we can't generate a string - // snippet - }; - - macro_rules! span_stored_function { - () => { - err.span_note(span, - &format!("use `({0}.{1})(...)` if you meant to call \ - the function stored in the `{1}` field", - expr_string, item_name)); + rcvr_ty); + + // If the method name is the name of a field with a function or closure type, + // give a helping note that it has to be called as (x.f)(...). + if let Some(expr) = rcvr_expr { + for (ty, _) in self.autoderef(span, rcvr_ty) { + match ty.sty { + ty::TyAdt(def, substs) if !def.is_enum() => { + if let Some(field) = def.struct_variant() + .find_field_named(item_name) { + let snippet = tcx.sess.codemap().span_to_snippet(expr.span); + let expr_string = match snippet { + Ok(expr_string) => expr_string, + _ => "s".into(), // Default to a generic placeholder for the + // expression when we can't generate a + // string snippet + }; + + let field_ty = field.ty(tcx, substs); + + if self.is_fn_ty(&field_ty, span) { + err.span_note(span, + &format!("use `({0}.{1})(...)` if you \ + meant to call the function \ + stored in the `{1}` field", + expr_string, + item_name)); + } else { + err.span_note(span, + &format!("did you mean to write `{0}.{1}`?", + expr_string, + item_name)); + } + break; + } + } + _ => {} } } + } - macro_rules! span_did_you_mean { - () => { - err.span_note(span, &format!("did you mean to write `{0}.{1}`?", - expr_string, item_name)); + if self.is_fn_ty(&rcvr_ty, span) { + macro_rules! report_function { + ($span:expr, $name:expr) => { + err.note(&format!("{} is a function, perhaps you wish to call it", + $name)); } } - // Determine if the field can be used as a function in some way - let field_ty = field.ty(cx, substs); - - match field_ty.sty { - // Not all of these (e.g. unsafe fns) implement FnOnce - // so we look for these beforehand - ty::TyClosure(..) | ty::TyBareFn(..) => { - span_stored_function!(); - } - // If it's not a simple function, look for things which implement FnOnce - _ => { - if let Ok(fn_once_trait_did) = - cx.lang_items.require(FnOnceTraitLangItem) { - let infcx = fcx.infcx(); - infcx.probe(|_| { - let fn_once_substs = - Substs::new_trait(vec![infcx.next_ty_var()], - Vec::new(), - field_ty); - let trait_ref = - ty::TraitRef::new(fn_once_trait_did, - cx.mk_substs(fn_once_substs)); - let poly_trait_ref = trait_ref.to_poly_trait_ref(); - let obligation = Obligation::misc(span, - fcx.body_id, - poly_trait_ref - .to_predicate()); - let mut selcx = SelectionContext::new(infcx); - - if selcx.evaluate_obligation(&obligation) { - span_stored_function!(); - } else { - span_did_you_mean!(); - } - }); - } else { - span_did_you_mean!(); + if let Some(expr) = rcvr_expr { + if let Ok(expr_string) = tcx.sess.codemap().span_to_snippet(expr.span) { + report_function!(expr.span, expr_string); + } else if let hir::ExprPath(hir::QPath::Resolved(_, ref path)) = expr.node { + if let Some(segment) = path.segments.last() { + report_function!(expr.span, segment.name); } } } } - } - if !static_sources.is_empty() { - err.fileline_note( - span, - "found defined static methods, maybe a `self` is missing?"); + if !static_sources.is_empty() { + err.note("found the following associated functions; to be used as methods, \ + functions must have a `self` parameter"); - report_candidates(fcx, &mut err, span, item_name, static_sources); - } + report_candidates(&mut err, static_sources); + } + + if !unsatisfied_predicates.is_empty() { + let bound_list = unsatisfied_predicates.iter() + .map(|p| format!("`{} : {}`", p.self_ty(), p)) + .collect::>() + .join(", "); + err.note(&format!("the method `{}` exists but the following trait bounds \ + were not satisfied: {}", + item_name, + bound_list)); + } - if !unsatisfied_predicates.is_empty() { - let bound_list = unsatisfied_predicates.iter() - .map(|p| format!("`{} : {}`", - p.self_ty(), - p)) - .collect::>() - .join(", "); - err.fileline_note( - span, - &format!("the method `{}` exists but the \ - following trait bounds were not satisfied: {}", - item_name, - bound_list)); + self.suggest_traits_to_import(&mut err, + span, + rcvr_ty, + item_name, + rcvr_expr, + out_of_scope_traits); + err.emit(); } - suggest_traits_to_import(fcx, &mut err, span, rcvr_ty, item_name, - rcvr_expr, out_of_scope_traits); - err.emit(); - } + MethodError::Ambiguity(sources) => { + let mut err = struct_span_err!(self.sess(), + span, + E0034, + "multiple applicable items in scope"); + err.span_label(span, &format!("multiple `{}` found", item_name)); - MethodError::Ambiguity(sources) => { - let mut err = struct_span_err!(fcx.sess(), span, E0034, - "multiple applicable items in scope"); + report_candidates(&mut err, sources); + err.emit(); + } - report_candidates(fcx, &mut err, span, item_name, sources); - err.emit(); - } + MethodError::ClosureAmbiguity(trait_def_id) => { + let msg = format!("the `{}` method from the `{}` trait cannot be explicitly \ + invoked on this closure as we have not yet inferred what \ + kind of closure it is", + item_name, + self.tcx.item_path_str(trait_def_id)); + let msg = if let Some(callee) = rcvr_expr { + format!("{}; use overloaded call notation instead (e.g., `{}()`)", + msg, + pprust::expr_to_string(callee)) + } else { + msg + }; + self.sess().span_err(span, &msg); + } - MethodError::ClosureAmbiguity(trait_def_id) => { - let msg = format!("the `{}` method from the `{}` trait cannot be explicitly \ - invoked on this closure as we have not yet inferred what \ - kind of closure it is", - item_name, - fcx.tcx().item_path_str(trait_def_id)); - let msg = if let Some(callee) = rcvr_expr { - format!("{}; use overloaded call notation instead (e.g., `{}()`)", - msg, pprust::expr_to_string(callee)) - } else { - msg - }; - fcx.sess().span_err(span, &msg); + MethodError::PrivateMatch(def) => { + let msg = format!("{} `{}` is private", def.kind_name(), item_name); + self.tcx.sess.span_err(span, &msg); + } } } - fn report_candidates(fcx: &FnCtxt, - err: &mut DiagnosticBuilder, - span: Span, - item_name: ast::Name, - mut sources: Vec) { - sources.sort(); - sources.dedup(); - - for (idx, source) in sources.iter().enumerate() { - match *source { - CandidateSource::ImplSource(impl_did) => { - // Provide the best span we can. Use the item, if local to crate, else - // the impl, if local to crate (item may be defaulted), else the call site. - let item = impl_item(fcx.tcx(), impl_did, item_name) - .or_else(|| { - trait_item( - fcx.tcx(), - fcx.tcx().impl_trait_ref(impl_did).unwrap().def_id, - item_name - ) - }).unwrap(); - let impl_span = fcx.tcx().map.def_id_span(impl_did, span); - let item_span = fcx.tcx().map.def_id_span(item.def_id(), impl_span); - - let impl_ty = check::impl_self_ty(fcx, span, impl_did).ty; - - let insertion = match fcx.tcx().impl_trait_ref(impl_did) { - None => format!(""), - Some(trait_ref) => { - format!(" of the trait `{}`", - fcx.tcx().item_path_str(trait_ref.def_id)) - } - }; - - span_note!(err, item_span, - "candidate #{} is defined in an impl{} for the type `{}`", - idx + 1, - insertion, - impl_ty); - } - CandidateSource::TraitSource(trait_did) => { - let item = trait_item(fcx.tcx(), trait_did, item_name).unwrap(); - let item_span = fcx.tcx().map.def_id_span(item.def_id(), span); - span_note!(err, item_span, - "candidate #{} is defined in the trait `{}`", - idx + 1, - fcx.tcx().item_path_str(trait_did)); - } + fn suggest_traits_to_import(&self, + err: &mut DiagnosticBuilder, + span: Span, + rcvr_ty: Ty<'tcx>, + item_name: ast::Name, + rcvr_expr: Option<&hir::Expr>, + valid_out_of_scope_traits: Vec) { + if !valid_out_of_scope_traits.is_empty() { + let mut candidates = valid_out_of_scope_traits; + candidates.sort(); + candidates.dedup(); + let msg = format!("items from traits can only be used if the trait is in scope; the \ + following {traits_are} implemented but not in scope, perhaps add \ + a `use` for {one_of_them}:", + traits_are = if candidates.len() == 1 { + "trait is" + } else { + "traits are" + }, + one_of_them = if candidates.len() == 1 { + "it" + } else { + "one of them" + }); + + err.help(&msg[..]); + + let limit = if candidates.len() == 5 { 5 } else { 4 }; + for (i, trait_did) in candidates.iter().take(limit).enumerate() { + err.help(&format!("candidate #{}: `use {};`", + i + 1, + self.tcx.item_path_str(*trait_did))); + } + if candidates.len() > limit { + err.note(&format!("and {} others", candidates.len() - limit)); } + return; } - } -} - -pub type AllTraitsVec = Vec; - -fn suggest_traits_to_import<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - err: &mut DiagnosticBuilder, - span: Span, - rcvr_ty: Ty<'tcx>, - item_name: ast::Name, - rcvr_expr: Option<&hir::Expr>, - valid_out_of_scope_traits: Vec) -{ - let tcx = fcx.tcx(); - - if !valid_out_of_scope_traits.is_empty() { - let mut candidates = valid_out_of_scope_traits; - candidates.sort(); - candidates.dedup(); - let msg = format!( - "items from traits can only be used if the trait is in scope; \ - the following {traits_are} implemented but not in scope, \ - perhaps add a `use` for {one_of_them}:", - traits_are = if candidates.len() == 1 {"trait is"} else {"traits are"}, - one_of_them = if candidates.len() == 1 {"it"} else {"one of them"}); - - err.fileline_help(span, &msg[..]); - - for (i, trait_did) in candidates.iter().enumerate() { - err.fileline_help(span, - &*format!("candidate #{}: use `{}`", - i + 1, - fcx.tcx().item_path_str(*trait_did))); + let type_is_local = self.type_derefs_to_local(span, rcvr_ty, rcvr_expr); + + // there's no implemented traits, so lets suggest some traits to + // implement, by finding ones that have the item name, and are + // legal to implement. + let mut candidates = all_traits(self.ccx) + .filter(|info| { + // we approximate the coherence rules to only suggest + // traits that are legal to implement by requiring that + // either the type or trait is local. Multidispatch means + // this isn't perfect (that is, there are cases when + // implementing a trait would be legal but is rejected + // here). + (type_is_local || info.def_id.is_local()) + && self.associated_item(info.def_id, item_name).is_some() + }) + .collect::>(); + + if !candidates.is_empty() { + // sort from most relevant to least relevant + candidates.sort_by(|a, b| a.cmp(b).reverse()); + candidates.dedup(); + + // FIXME #21673 this help message could be tuned to the case + // of a type parameter: suggest adding a trait bound rather + // than implementing. + let msg = format!("items from traits can only be used if the trait is implemented \ + and in scope; the following {traits_define} an item `{name}`, \ + perhaps you need to implement {one_of_them}:", + traits_define = if candidates.len() == 1 { + "trait defines" + } else { + "traits define" + }, + one_of_them = if candidates.len() == 1 { + "it" + } else { + "one of them" + }, + name = item_name); + + err.help(&msg[..]); + + for (i, trait_info) in candidates.iter().enumerate() { + err.help(&format!("candidate #{}: `{}`", + i + 1, + self.tcx.item_path_str(trait_info.def_id))); + } } - return } - let type_is_local = type_derefs_to_local(fcx, span, rcvr_ty, rcvr_expr); - - // there's no implemented traits, so lets suggest some traits to - // implement, by finding ones that have the item name, and are - // legal to implement. - let mut candidates = all_traits(fcx.ccx) - .filter(|info| { - // we approximate the coherence rules to only suggest - // traits that are legal to implement by requiring that - // either the type or trait is local. Multidispatch means - // this isn't perfect (that is, there are cases when - // implementing a trait would be legal but is rejected - // here). - (type_is_local || info.def_id.is_local()) - && trait_item(tcx, info.def_id, item_name).is_some() - }) - .collect::>(); - - if !candidates.is_empty() { - // sort from most relevant to least relevant - candidates.sort_by(|a, b| a.cmp(b).reverse()); - candidates.dedup(); - - // FIXME #21673 this help message could be tuned to the case - // of a type parameter: suggest adding a trait bound rather - // than implementing. - let msg = format!( - "items from traits can only be used if the trait is implemented and in scope; \ - the following {traits_define} an item `{name}`, \ - perhaps you need to implement {one_of_them}:", - traits_define = if candidates.len() == 1 {"trait defines"} else {"traits define"}, - one_of_them = if candidates.len() == 1 {"it"} else {"one of them"}, - name = item_name); - - err.fileline_help(span, &msg[..]); - - for (i, trait_info) in candidates.iter().enumerate() { - err.fileline_help(span, - &*format!("candidate #{}: `{}`", - i + 1, - fcx.tcx().item_path_str(trait_info.def_id))); + /// Checks whether there is a local type somewhere in the chain of + /// autoderefs of `rcvr_ty`. + fn type_derefs_to_local(&self, + span: Span, + rcvr_ty: Ty<'tcx>, + rcvr_expr: Option<&hir::Expr>) + -> bool { + fn is_local(ty: Ty) -> bool { + match ty.sty { + ty::TyAdt(def, _) => def.did.is_local(), + + ty::TyDynamic(ref tr, ..) => tr.principal() + .map_or(false, |p| p.def_id().is_local()), + + ty::TyParam(_) => true, + + // everything else (primitive types etc.) is effectively + // non-local (there are "edge" cases, e.g. (LocalType,), but + // the noise from these sort of types is usually just really + // annoying, rather than any sort of help). + _ => false, + } } - } -} -/// Checks whether there is a local type somewhere in the chain of -/// autoderefs of `rcvr_ty`. -fn type_derefs_to_local<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - span: Span, - rcvr_ty: Ty<'tcx>, - rcvr_expr: Option<&hir::Expr>) -> bool { - fn is_local(ty: Ty) -> bool { - match ty.sty { - ty::TyEnum(def, _) | ty::TyStruct(def, _) => def.did.is_local(), - - ty::TyTrait(ref tr) => tr.principal_def_id().is_local(), - - ty::TyParam(_) => true, - - // everything else (primitive types etc.) is effectively - // non-local (there are "edge" cases, e.g. (LocalType,), but - // the noise from these sort of types is usually just really - // annoying, rather than any sort of help). - _ => false + // This occurs for UFCS desugaring of `T::method`, where there is no + // receiver expression for the method call, and thus no autoderef. + if rcvr_expr.is_none() { + return is_local(self.resolve_type_vars_with_obligations(rcvr_ty)); } - } - // This occurs for UFCS desugaring of `T::method`, where there is no - // receiver expression for the method call, and thus no autoderef. - if rcvr_expr.is_none() { - return is_local(fcx.resolve_type_vars_if_possible(rcvr_ty)); + self.autoderef(span, rcvr_ty).any(|(ty, _)| is_local(ty)) } - - check::autoderef(fcx, span, rcvr_ty, None, - check::UnresolvedTypeAction::Ignore, ty::NoPreference, - |ty, _| { - if is_local(ty) { - Some(()) - } else { - None - } - }).2.is_some() } +pub type AllTraitsVec = Vec; + #[derive(Copy, Clone)] pub struct TraitInfo { pub def_id: DefId, @@ -369,9 +411,7 @@ pub struct TraitInfo { impl TraitInfo { fn new(def_id: DefId) -> TraitInfo { - TraitInfo { - def_id: def_id, - } + TraitInfo { def_id: def_id } } } impl PartialEq for TraitInfo { @@ -381,7 +421,9 @@ impl PartialEq for TraitInfo { } impl Eq for TraitInfo {} impl PartialOrd for TraitInfo { - fn partial_cmp(&self, other: &TraitInfo) -> Option { Some(self.cmp(other)) } + fn partial_cmp(&self, other: &TraitInfo) -> Option { + Some(self.cmp(other)) + } } impl Ord for TraitInfo { fn cmp(&self, other: &TraitInfo) -> Ordering { @@ -397,18 +439,18 @@ impl Ord for TraitInfo { /// Retrieve all traits in this crate and any dependent crates. pub fn all_traits<'a>(ccx: &'a CrateCtxt) -> AllTraits<'a> { if ccx.all_traits.borrow().is_none() { - use rustc_front::intravisit; + use rustc::hir::itemlikevisit; let mut traits = vec![]; // Crate-local: // // meh. - struct Visitor<'a, 'tcx:'a> { + struct Visitor<'a, 'tcx: 'a> { map: &'a hir_map::Map<'tcx>, traits: &'a mut AllTraitsVec, } - impl<'v, 'a, 'tcx> intravisit::Visitor<'v> for Visitor<'a, 'tcx> { + impl<'v, 'a, 'tcx> itemlikevisit::ItemLikeVisitor<'v> for Visitor<'a, 'tcx> { fn visit_item(&mut self, i: &'v hir::Item) { match i.node { hir::ItemTrait(..) => { @@ -418,42 +460,43 @@ pub fn all_traits<'a>(ccx: &'a CrateCtxt) -> AllTraits<'a> { _ => {} } } + + fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) { + } } - ccx.tcx.map.krate().visit_all_items(&mut Visitor { + ccx.tcx.map.krate().visit_all_item_likes(&mut Visitor { map: &ccx.tcx.map, - traits: &mut traits + traits: &mut traits, }); // Cross-crate: - let mut external_mods = FnvHashSet(); - fn handle_external_def(traits: &mut AllTraitsVec, - external_mods: &mut FnvHashSet, - ccx: &CrateCtxt, - cstore: &for<'a> cstore::CrateStore<'a>, - dl: cstore::DefLike) { - match dl { - cstore::DlDef(def::DefTrait(did)) => { - traits.push(TraitInfo::new(did)); + let mut external_mods = FxHashSet(); + fn handle_external_def(ccx: &CrateCtxt, + traits: &mut AllTraitsVec, + external_mods: &mut FxHashSet, + def: Def) { + let def_id = def.def_id(); + match def { + Def::Trait(..) => { + traits.push(TraitInfo::new(def_id)); } - cstore::DlDef(def::DefMod(did)) => { - if !external_mods.insert(did) { + Def::Mod(..) => { + if !external_mods.insert(def_id) { return; } - for child in cstore.item_children(did) { - handle_external_def(traits, external_mods, - ccx, cstore, child.def) + for child in ccx.tcx.sess.cstore.item_children(def_id) { + handle_external_def(ccx, traits, external_mods, child.def) } } _ => {} } } - let cstore = &*ccx.tcx.sess.cstore; - for cnum in ccx.tcx.sess.cstore.crates() { - for child in cstore.crate_top_level_items(cnum) { - handle_external_def(&mut traits, &mut external_mods, - ccx, cstore, child.def) - } + let def_id = DefId { + krate: cnum, + index: CRATE_DEF_INDEX, + }; + handle_external_def(ccx, &mut traits, &mut external_mods, Def::Mod(def_id)); } *ccx.all_traits.borrow_mut() = Some(traits); @@ -463,13 +506,13 @@ pub fn all_traits<'a>(ccx: &'a CrateCtxt) -> AllTraits<'a> { assert!(borrow.is_some()); AllTraits { borrow: borrow, - idx: 0 + idx: 0, } } pub struct AllTraits<'a> { borrow: cell::Ref<'a, Option>, - idx: usize + idx: usize, } impl<'a> Iterator for AllTraits<'a> { diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index 03508b07b7adc..764ea9445568e 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -80,58 +80,55 @@ pub use self::Expectation::*; pub use self::compare_method::{compare_impl_method, compare_const_impl}; use self::TupleArgumentsFlag::*; -use astconv::{self, ast_region_to_region, ast_ty_to_ty, AstConv, PathParamMode}; -use check::_match::pat_ctxt; +use astconv::{AstConv, ast_region_to_region}; use dep_graph::DepNode; use fmt_macros::{Parser, Piece, Position}; -use middle::astconv_util::prohibit_type_params; -use middle::cstore::LOCAL_CRATE; -use middle::def; -use middle::def_id::DefId; -use middle::infer; -use middle::infer::{TypeOrigin, type_variable}; -use middle::pat_util::{self, pat_id_map}; -use middle::privacy::{AllPublic, LastMod}; -use middle::subst::{self, Subst, Substs, VecPerParamSpace, ParamSpace, TypeSpace}; -use middle::traits::{self, report_fulfillment_errors}; -use middle::ty::{GenericPredicates, TypeScheme}; -use middle::ty::{Disr, ParamTy, ParameterEnvironment}; -use middle::ty::{LvaluePreference, NoPreference, PreferMutLvalue}; -use middle::ty::{self, ToPolyTraitRef, Ty}; -use middle::ty::{MethodCall, MethodCallee}; -use middle::ty::adjustment; -use middle::ty::error::TypeError; -use middle::ty::fold::{TypeFolder, TypeFoldable}; -use middle::ty::util::Representability; +use hir::def::{Def, CtorKind}; +use hir::def_id::{DefId, LOCAL_CRATE}; +use rustc::infer::{self, InferCtxt, InferOk, RegionVariableOrigin, + TypeTrace, type_variable}; +use rustc::ty::subst::{Kind, Subst, Substs}; +use rustc::traits::{self, ObligationCause, ObligationCauseCode, Reveal}; +use rustc::ty::{ParamTy, ParameterEnvironment}; +use rustc::ty::{LvaluePreference, NoPreference, PreferMutLvalue}; +use rustc::ty::{self, ToPolyTraitRef, Ty, TyCtxt, Visibility}; +use rustc::ty::{MethodCall, MethodCallee}; +use rustc::ty::adjustment; +use rustc::ty::fold::{BottomUpFolder, TypeFoldable}; +use rustc::ty::util::{Representability, IntTypeExt}; use require_c_abi_if_variadic; use rscope::{ElisionFailureInfo, RegionScope}; -use session::Session; -use {CrateCtxt, lookup_full_def}; +use session::{Session, CompileResult}; +use CrateCtxt; use TypeAndSubsts; use lint; -use util::common::{block_query, ErrorReported, indenter, loop_query}; -use util::nodemap::{DefIdMap, FnvHashMap, NodeMap}; +use util::common::{ErrorReported, indenter}; +use util::nodemap::{DefIdMap, FxHashMap, FxHashSet, NodeMap}; use std::cell::{Cell, Ref, RefCell}; -use std::collections::{HashSet}; +use std::cmp; use std::mem::replace; -use syntax::abi; +use std::ops::{self, Deref}; +use syntax::abi::Abi; use syntax::ast; use syntax::attr; -use syntax::attr::AttrMetaMethods; -use syntax::codemap::{self, Span, Spanned}; -use syntax::errors::DiagnosticBuilder; -use syntax::parse::token::{self, InternedString}; +use syntax::codemap::{self, original_sp, Spanned}; +use syntax::feature_gate::{GateIssue, emit_feature_err}; use syntax::ptr::P; +use syntax::symbol::{Symbol, InternedString, keywords}; use syntax::util::lev_distance::find_best_match_for_name; +use syntax_pos::{self, BytePos, Span}; -use rustc_front::intravisit::{self, Visitor}; -use rustc_front::hir; -use rustc_front::hir::Visibility; -use rustc_front::print::pprust; +use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; +use rustc::hir::itemlikevisit::ItemLikeVisitor; +use rustc::hir::{self, PatKind}; +use rustc::hir::print as pprust; +use rustc::middle::lang_items; use rustc_back::slice; +use rustc_const_eval::eval_length; mod assoc; +mod autoderef; pub mod dropck; pub mod _match; pub mod writeback; @@ -157,11 +154,12 @@ mod op; /// Here, the function `foo()` and the closure passed to /// `bar()` will each have their own `FnCtxt`, but they will /// share the inherited fields. -pub struct Inherited<'a, 'tcx: 'a> { - infcx: infer::InferCtxt<'a, 'tcx>, +pub struct Inherited<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + ccx: &'a CrateCtxt<'a, 'gcx>, + infcx: InferCtxt<'a, 'gcx, 'tcx>, locals: RefCell>>, - tables: &'a RefCell>, + fulfillment_cx: RefCell>, // When we process a call like `c()` where `c` is a closure type, // we may not have decided yet whether `c` is a `Fn`, `FnMut`, or @@ -170,16 +168,33 @@ pub struct Inherited<'a, 'tcx: 'a> { // decision. We keep these deferred resolutions grouped by the // def-id of the closure, so that once we decide, we can easily go // back and process them. - deferred_call_resolutions: RefCell>>>, + deferred_call_resolutions: RefCell>>>, deferred_cast_checks: RefCell>>, + + // Anonymized types found in explicit return types and their + // associated fresh inference variable. Writeback resolves these + // variables to get the concrete type, which can be used to + // deanonymize TyAnon, after typeck is done with all functions. + anon_types: RefCell>>, + + // Obligations which will have to be checked at the end of + // type-checking, after all functions have been inferred. + deferred_obligations: RefCell>>, +} + +impl<'a, 'gcx, 'tcx> Deref for Inherited<'a, 'gcx, 'tcx> { + type Target = InferCtxt<'a, 'gcx, 'tcx>; + fn deref(&self) -> &Self::Target { + &self.infcx + } } -trait DeferredCallResolution<'tcx> { - fn resolve<'a>(&mut self, fcx: &FnCtxt<'a,'tcx>); +trait DeferredCallResolution<'gcx, 'tcx> { + fn resolve<'a>(&mut self, fcx: &FnCtxt<'a, 'gcx, 'tcx>); } -type DeferredCallResolutionHandler<'tcx> = Box+'tcx>; +type DeferredCallResolutionHandler<'gcx, 'tcx> = Box+'tcx>; /// When type-checking an expression, we propagate downward /// whatever type hint we are able in the form of an `Expectation`. @@ -199,7 +214,7 @@ pub enum Expectation<'tcx> { ExpectRvalueLikeUnsized(Ty<'tcx>), } -impl<'tcx> Expectation<'tcx> { +impl<'a, 'gcx, 'tcx> Expectation<'tcx> { // Disregard "castable to" expectations because they // can lead us astray. Consider for example `if cond // {22} else {c} as u8` -- if we propagate the @@ -216,10 +231,10 @@ impl<'tcx> Expectation<'tcx> { // an expected type. Otherwise, we might write parts of the type // when checking the 'then' block which are incompatible with the // 'else' branch. - fn adjust_for_branches<'a>(&self, fcx: &FnCtxt<'a, 'tcx>) -> Expectation<'tcx> { + fn adjust_for_branches(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Expectation<'tcx> { match *self { ExpectHasType(ety) => { - let ety = fcx.infcx().shallow_resolve(ety); + let ety = fcx.shallow_resolve(ety); if !ety.is_ty_var() { ExpectHasType(ety) } else { @@ -232,6 +247,70 @@ impl<'tcx> Expectation<'tcx> { _ => NoExpectation } } + + /// Provide an expectation for an rvalue expression given an *optional* + /// hint, which is not required for type safety (the resulting type might + /// be checked higher up, as is the case with `&expr` and `box expr`), but + /// is useful in determining the concrete type. + /// + /// The primary use case is where the expected type is a fat pointer, + /// like `&[isize]`. For example, consider the following statement: + /// + /// let x: &[isize] = &[1, 2, 3]; + /// + /// In this case, the expected type for the `&[1, 2, 3]` expression is + /// `&[isize]`. If however we were to say that `[1, 2, 3]` has the + /// expectation `ExpectHasType([isize])`, that would be too strong -- + /// `[1, 2, 3]` does not have the type `[isize]` but rather `[isize; 3]`. + /// It is only the `&[1, 2, 3]` expression as a whole that can be coerced + /// to the type `&[isize]`. Therefore, we propagate this more limited hint, + /// which still is useful, because it informs integer literals and the like. + /// See the test case `test/run-pass/coerce-expect-unsized.rs` and #20169 + /// for examples of where this comes up,. + fn rvalue_hint(fcx: &FnCtxt<'a, 'gcx, 'tcx>, ty: Ty<'tcx>) -> Expectation<'tcx> { + match fcx.tcx.struct_tail(ty).sty { + ty::TySlice(_) | ty::TyStr | ty::TyDynamic(..) => { + ExpectRvalueLikeUnsized(ty) + } + _ => ExpectHasType(ty) + } + } + + // Resolves `expected` by a single level if it is a variable. If + // there is no expected type or resolution is not possible (e.g., + // no constraints yet present), just returns `None`. + fn resolve(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Expectation<'tcx> { + match self { + NoExpectation => { + NoExpectation + } + ExpectCastableToType(t) => { + ExpectCastableToType(fcx.resolve_type_vars_if_possible(&t)) + } + ExpectHasType(t) => { + ExpectHasType(fcx.resolve_type_vars_if_possible(&t)) + } + ExpectRvalueLikeUnsized(t) => { + ExpectRvalueLikeUnsized(fcx.resolve_type_vars_if_possible(&t)) + } + } + } + + fn to_option(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Option> { + match self.resolve(fcx) { + NoExpectation => None, + ExpectCastableToType(ty) | + ExpectHasType(ty) | + ExpectRvalueLikeUnsized(ty) => Some(ty), + } + } + + fn only_has_type(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Option> { + match self.resolve(fcx) { + ExpectHasType(ty) => Some(ty), + _ => None + } + } } #[derive(Copy, Clone)] @@ -263,7 +342,7 @@ impl UnsafetyState { (unsafety, blk.id, self.unsafe_push_count.checked_sub(1).unwrap()), hir::UnsafeBlock(..) => (hir::Unsafety::Unsafe, blk.id, self.unsafe_push_count), - hir::DefaultBlock | hir::PushUnstableBlock | hir:: PopUnstableBlock => + hir::DefaultBlock => (unsafety, self.def, self.unsafe_push_count), }; UnsafetyState{ def: def, @@ -275,8 +354,91 @@ impl UnsafetyState { } } +/// Whether a node ever exits normally or not. +/// Tracked semi-automatically (through type variables +/// marked as diverging), with some manual adjustments +/// for control-flow primitives (approximating a CFG). +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +enum Diverges { + /// Potentially unknown, some cases converge, + /// others require a CFG to determine them. + Maybe, + + /// Definitely known to diverge and therefore + /// not reach the next sibling or its parent. + Always, + + /// Same as `Always` but with a reachability + /// warning already emitted + WarnedAlways +} + +// Convenience impls for combinig `Diverges`. + +impl ops::BitAnd for Diverges { + type Output = Self; + fn bitand(self, other: Self) -> Self { + cmp::min(self, other) + } +} + +impl ops::BitOr for Diverges { + type Output = Self; + fn bitor(self, other: Self) -> Self { + cmp::max(self, other) + } +} + +impl ops::BitAndAssign for Diverges { + fn bitand_assign(&mut self, other: Self) { + *self = *self & other; + } +} + +impl ops::BitOrAssign for Diverges { + fn bitor_assign(&mut self, other: Self) { + *self = *self | other; + } +} + +impl Diverges { + fn always(self) -> bool { + self >= Diverges::Always + } +} + +#[derive(Clone)] +pub struct LoopCtxt<'gcx, 'tcx> { + unified: Ty<'tcx>, + coerce_to: Ty<'tcx>, + break_exprs: Vec<&'gcx hir::Expr>, + may_break: bool, +} + +#[derive(Clone)] +pub struct EnclosingLoops<'gcx, 'tcx> { + stack: Vec>, + by_id: NodeMap, +} + +impl<'gcx, 'tcx> EnclosingLoops<'gcx, 'tcx> { + fn find_loop(&mut self, id: Option) -> Option<&mut LoopCtxt<'gcx, 'tcx>> { + if let Some(id) = id { + if let Some(ix) = self.by_id.get(&id).cloned() { + Some(&mut self.stack[ix]) + } else { + None + } + } else { + self.stack.last_mut() + } + } +} + #[derive(Clone)] -pub struct FnCtxt<'a, 'tcx: 'a> { +pub struct FnCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + ast_ty_to_ty_cache: RefCell>>, + body_id: ast::NodeId, // This flag is set to true if, during the writeback phase, we encounter @@ -289,27 +451,71 @@ pub struct FnCtxt<'a, 'tcx: 'a> { // expects the types within the function to be consistent. err_count_on_creation: usize, - ret_ty: ty::FnOutput<'tcx>, + ret_ty: Ty<'tcx>, ps: RefCell, - inh: &'a Inherited<'a, 'tcx>, + /// Whether the last checked node can ever exit. + diverges: Cell, + + /// Whether any child nodes have any type errors. + has_errors: Cell, + + enclosing_loops: RefCell>, + + inh: &'a Inherited<'a, 'gcx, 'tcx>, +} + +impl<'a, 'gcx, 'tcx> Deref for FnCtxt<'a, 'gcx, 'tcx> { + type Target = Inherited<'a, 'gcx, 'tcx>; + fn deref(&self) -> &Self::Target { + &self.inh + } +} - ccx: &'a CrateCtxt<'a, 'tcx>, +/// Helper type of a temporary returned by ccx.inherited(...). +/// Necessary because we can't write the following bound: +/// F: for<'b, 'tcx> where 'gcx: 'tcx FnOnce(Inherited<'b, 'gcx, 'tcx>). +pub struct InheritedBuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + ccx: &'a CrateCtxt<'a, 'gcx>, + infcx: infer::InferCtxtBuilder<'a, 'gcx, 'tcx> +} + +impl<'a, 'gcx, 'tcx> CrateCtxt<'a, 'gcx> { + pub fn inherited(&'a self, id: ast::NodeId) + -> InheritedBuilder<'a, 'gcx, 'tcx> { + let param_env = ParameterEnvironment::for_item(self.tcx, id); + InheritedBuilder { + ccx: self, + infcx: self.tcx.infer_ctxt(Some(ty::Tables::empty()), + Some(param_env), + Reveal::NotSpecializable) + } + } } -impl<'a, 'tcx> Inherited<'a, 'tcx> { - fn new(tcx: &'a ty::ctxt<'tcx>, - tables: &'a RefCell>, - param_env: ty::ParameterEnvironment<'a, 'tcx>) - -> Inherited<'a, 'tcx> { +impl<'a, 'gcx, 'tcx> InheritedBuilder<'a, 'gcx, 'tcx> { + fn enter(&'tcx mut self, f: F) -> R + where F: for<'b> FnOnce(Inherited<'b, 'gcx, 'tcx>) -> R + { + let ccx = self.ccx; + self.infcx.enter(|infcx| f(Inherited::new(ccx, infcx))) + } +} +impl<'a, 'gcx, 'tcx> Inherited<'a, 'gcx, 'tcx> { + pub fn new(ccx: &'a CrateCtxt<'a, 'gcx>, + infcx: InferCtxt<'a, 'gcx, 'tcx>) + -> Self { Inherited { - infcx: infer::new_infer_ctxt(tcx, tables, Some(param_env)), + ccx: ccx, + infcx: infcx, + fulfillment_cx: RefCell::new(traits::FulfillmentContext::new()), locals: RefCell::new(NodeMap()), - tables: tables, deferred_call_resolutions: RefCell::new(DefIdMap()), deferred_cast_checks: RefCell::new(Vec::new()), + anon_types: RefCell::new(DefIdMap()), + deferred_obligations: RefCell::new(Vec::new()), } } @@ -320,9 +526,8 @@ impl<'a, 'tcx> Inherited<'a, 'tcx> { -> T where T : TypeFoldable<'tcx> { - let mut fulfillment_cx = self.infcx.fulfillment_cx.borrow_mut(); - assoc::normalize_associated_types_in(&self.infcx, - &mut fulfillment_cx, + assoc::normalize_associated_types_in(self, + &mut self.fulfillment_cx.borrow_mut(), span, body_id, value) @@ -330,36 +535,14 @@ impl<'a, 'tcx> Inherited<'a, 'tcx> { } -// Used by check_const and check_enum_variants -pub fn blank_fn_ctxt<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>, - inh: &'a Inherited<'a, 'tcx>, - rty: ty::FnOutput<'tcx>, - body_id: ast::NodeId) - -> FnCtxt<'a, 'tcx> { - FnCtxt { - body_id: body_id, - writeback_errors: Cell::new(false), - err_count_on_creation: ccx.tcx.sess.err_count(), - ret_ty: rty, - ps: RefCell::new(UnsafetyState::function(hir::Unsafety::Normal, 0)), - inh: inh, - ccx: ccx - } -} - -fn static_inherited_fields<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>, - tables: &'a RefCell>) - -> Inherited<'a, 'tcx> { - // It's kind of a kludge to manufacture a fake function context - // and statement context, but we might as well do write the code only once - let param_env = ccx.tcx.empty_parameter_environment(); - Inherited::new(ccx.tcx, &tables, param_env) -} - struct CheckItemTypesVisitor<'a, 'tcx: 'a> { ccx: &'a CrateCtxt<'a, 'tcx> } struct CheckItemBodiesVisitor<'a, 'tcx: 'a> { ccx: &'a CrateCtxt<'a, 'tcx> } impl<'a, 'tcx> Visitor<'tcx> for CheckItemTypesVisitor<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.ccx.tcx.map) + } + fn visit_item(&mut self, i: &'tcx hir::Item) { check_item_type(self.ccx, i); intravisit::walk_item(self, i); @@ -367,8 +550,8 @@ impl<'a, 'tcx> Visitor<'tcx> for CheckItemTypesVisitor<'a, 'tcx> { fn visit_ty(&mut self, t: &'tcx hir::Ty) { match t.node { - hir::TyFixedLengthVec(_, ref expr) => { - check_const_in_type(self.ccx, &**expr, self.ccx.tcx.types.usize); + hir::TyArray(_, ref expr) => { + check_const_with_type(self.ccx, &expr, self.ccx.tcx.types.usize, expr.id); } _ => {} } @@ -377,35 +560,61 @@ impl<'a, 'tcx> Visitor<'tcx> for CheckItemTypesVisitor<'a, 'tcx> { } } -impl<'a, 'tcx> Visitor<'tcx> for CheckItemBodiesVisitor<'a, 'tcx> { +impl<'a, 'tcx> ItemLikeVisitor<'tcx> for CheckItemBodiesVisitor<'a, 'tcx> { fn visit_item(&mut self, i: &'tcx hir::Item) { check_item_body(self.ccx, i); } + + fn visit_impl_item(&mut self, _item: &'tcx hir::ImplItem) { + // done as part of `visit_item` above + } } -pub fn check_wf_new(ccx: &CrateCtxt) { - ccx.tcx.sess.abort_if_new_errors(|| { +pub fn check_wf_new(ccx: &CrateCtxt) -> CompileResult { + ccx.tcx.sess.track_errors(|| { let mut visit = wfcheck::CheckTypeWellFormedVisitor::new(ccx); - ccx.tcx.visit_all_items_in_krate(DepNode::WfCheck, &mut visit); - }); + ccx.tcx.visit_all_item_likes_in_krate(DepNode::WfCheck, &mut visit.as_deep_visitor()); + }) } -pub fn check_item_types(ccx: &CrateCtxt) { - ccx.tcx.sess.abort_if_new_errors(|| { +pub fn check_item_types(ccx: &CrateCtxt) -> CompileResult { + ccx.tcx.sess.track_errors(|| { let mut visit = CheckItemTypesVisitor { ccx: ccx }; - ccx.tcx.visit_all_items_in_krate(DepNode::TypeckItemType, &mut visit); - }); + ccx.tcx.visit_all_item_likes_in_krate(DepNode::TypeckItemType, + &mut visit.as_deep_visitor()); + }) } -pub fn check_item_bodies(ccx: &CrateCtxt) { - ccx.tcx.sess.abort_if_new_errors(|| { +pub fn check_item_bodies(ccx: &CrateCtxt) -> CompileResult { + ccx.tcx.sess.track_errors(|| { let mut visit = CheckItemBodiesVisitor { ccx: ccx }; - ccx.tcx.visit_all_items_in_krate(DepNode::TypeckItemBody, &mut visit); - }); + ccx.tcx.visit_all_item_likes_in_krate(DepNode::TypeckItemBody, &mut visit); + + // Process deferred obligations, now that all functions + // bodies have been fully inferred. + for (&item_id, obligations) in ccx.deferred_obligations.borrow().iter() { + // Use the same DepNode as for the body of the original function/item. + let def_id = ccx.tcx.map.local_def_id(item_id); + let _task = ccx.tcx.dep_graph.in_task(DepNode::TypeckItemBody(def_id)); + + let param_env = ParameterEnvironment::for_item(ccx.tcx, item_id); + ccx.tcx.infer_ctxt(None, Some(param_env), + Reveal::NotSpecializable).enter(|infcx| { + let mut fulfillment_cx = traits::FulfillmentContext::new(); + for obligation in obligations.iter().map(|o| o.to_obligation()) { + fulfillment_cx.register_predicate_obligation(&infcx, obligation); + } + + if let Err(errors) = fulfillment_cx.select_all_or_error(&infcx) { + infcx.report_fulfillment_errors(&errors); + } + }); + } + }) } -pub fn check_drop_impls(ccx: &CrateCtxt) { - ccx.tcx.sess.abort_if_new_errors(|| { +pub fn check_drop_impls(ccx: &CrateCtxt) -> CompileResult { + ccx.tcx.sess.track_errors(|| { let _task = ccx.tcx.dep_graph.in_task(DepNode::Dropck); let drop_trait = match ccx.tcx.lang_items.drop_trait() { Some(id) => ccx.tcx.lookup_trait_def(id), None => { return } @@ -413,7 +622,7 @@ pub fn check_drop_impls(ccx: &CrateCtxt) { drop_trait.for_each_impl(ccx.tcx, |drop_impl_did| { let _task = ccx.tcx.dep_graph.in_task(DepNode::DropckImpl(drop_impl_did)); if drop_impl_did.is_local() { - match dropck::check_drop_impl(ccx.tcx, drop_impl_did) { + match dropck::check_drop_impl(ccx, drop_impl_did) { Ok(()) => {} Err(()) => { assert!(ccx.tcx.sess.has_errors()); @@ -421,107 +630,113 @@ pub fn check_drop_impls(ccx: &CrateCtxt) { } } }); - }); + }) } fn check_bare_fn<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, decl: &'tcx hir::FnDecl, - body: &'tcx hir::Block, + body_id: hir::ExprId, fn_id: ast::NodeId, - fn_span: Span, - raw_fty: Ty<'tcx>, - param_env: ty::ParameterEnvironment<'a, 'tcx>) -{ - match raw_fty.sty { - ty::TyBareFn(_, ref fn_ty) => { - let tables = RefCell::new(ty::Tables::empty()); - let inh = Inherited::new(ccx.tcx, &tables, param_env); - - // Compute the fty from point of view of inside fn. - let fn_scope = ccx.tcx.region_maps.call_site_extent(fn_id, body.id); - let fn_sig = - fn_ty.sig.subst(ccx.tcx, &inh.infcx.parameter_environment.free_substs); - let fn_sig = - ccx.tcx.liberate_late_bound_regions(fn_scope, &fn_sig); - let fn_sig = - inh.normalize_associated_types_in(body.span, - body.id, - &fn_sig); - - let fcx = check_fn(ccx, fn_ty.unsafety, fn_id, &fn_sig, - decl, fn_id, body, &inh); - - fcx.select_all_obligations_and_apply_defaults(); - upvar::closure_analyze_fn(&fcx, fn_id, decl, body); - fcx.select_obligations_where_possible(); - fcx.check_casts(); - fcx.select_all_obligations_or_error(); // Casts can introduce new obligations. - - regionck::regionck_fn(&fcx, fn_id, fn_span, decl, body); - writeback::resolve_type_vars_in_fn(&fcx, decl, body); - } - _ => ccx.tcx.sess.impossible_case(body.span, - "check_bare_fn: function type expected") + span: Span) { + let body = ccx.tcx.map.expr(body_id); + + let raw_fty = ccx.tcx.item_type(ccx.tcx.map.local_def_id(fn_id)); + let fn_ty = match raw_fty.sty { + ty::TyFnDef(.., f) => f, + _ => span_bug!(body.span, "check_bare_fn: function type expected") + }; + + check_abi(ccx, span, fn_ty.abi); + + ccx.inherited(fn_id).enter(|inh| { + // Compute the fty from point of view of inside fn. + let fn_scope = inh.tcx.region_maps.call_site_extent(fn_id, body_id.node_id()); + let fn_sig = + fn_ty.sig.subst(inh.tcx, &inh.parameter_environment.free_substs); + let fn_sig = + inh.tcx.liberate_late_bound_regions(fn_scope, &fn_sig); + let fn_sig = + inh.normalize_associated_types_in(body.span, body_id.node_id(), &fn_sig); + + let fcx = check_fn(&inh, fn_ty.unsafety, fn_id, &fn_sig, decl, fn_id, body); + + fcx.select_all_obligations_and_apply_defaults(); + fcx.closure_analyze(body); + fcx.select_obligations_where_possible(); + fcx.check_casts(); + fcx.select_all_obligations_or_error(); // Casts can introduce new obligations. + + fcx.regionck_fn(fn_id, decl, body_id); + fcx.resolve_type_vars_in_fn(decl, body, fn_id); + }); +} + +fn check_abi<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, span: Span, abi: Abi) { + if !ccx.tcx.sess.target.target.is_abi_supported(abi) { + struct_span_err!(ccx.tcx.sess, span, E0570, + "The ABI `{}` is not supported for the current target", abi).emit() } } -struct GatherLocalsVisitor<'a, 'tcx: 'a> { - fcx: &'a FnCtxt<'a, 'tcx> +struct GatherLocalsVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + fcx: &'a FnCtxt<'a, 'gcx, 'tcx> } -impl<'a, 'tcx> GatherLocalsVisitor<'a, 'tcx> { +impl<'a, 'gcx, 'tcx> GatherLocalsVisitor<'a, 'gcx, 'tcx> { fn assign(&mut self, _span: Span, nid: ast::NodeId, ty_opt: Option>) -> Ty<'tcx> { match ty_opt { None => { // infer the variable's type - let var_ty = self.fcx.infcx().next_ty_var(); - self.fcx.inh.locals.borrow_mut().insert(nid, var_ty); + let var_ty = self.fcx.next_ty_var(); + self.fcx.locals.borrow_mut().insert(nid, var_ty); var_ty } Some(typ) => { // take type that the user specified - self.fcx.inh.locals.borrow_mut().insert(nid, typ); + self.fcx.locals.borrow_mut().insert(nid, typ); typ } } } } -impl<'a, 'tcx> Visitor<'tcx> for GatherLocalsVisitor<'a, 'tcx> { +impl<'a, 'gcx, 'tcx> Visitor<'gcx> for GatherLocalsVisitor<'a, 'gcx, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'gcx> { + NestedVisitorMap::None + } + // Add explicitly-declared locals. - fn visit_local(&mut self, local: &'tcx hir::Local) { + fn visit_local(&mut self, local: &'gcx hir::Local) { let o_ty = match local.ty { - Some(ref ty) => Some(self.fcx.to_ty(&**ty)), + Some(ref ty) => Some(self.fcx.to_ty(&ty)), None => None }; self.assign(local.span, local.id, o_ty); debug!("Local variable {:?} is assigned type {}", local.pat, - self.fcx.infcx().ty_to_string( - self.fcx.inh.locals.borrow().get(&local.id).unwrap().clone())); + self.fcx.ty_to_string( + self.fcx.locals.borrow().get(&local.id).unwrap().clone())); intravisit::walk_local(self, local); } // Add pattern bindings. - fn visit_pat(&mut self, p: &'tcx hir::Pat) { - if let hir::PatIdent(_, ref path1, _) = p.node { - if pat_util::pat_is_binding(&self.fcx.ccx.tcx.def_map.borrow(), p) { - let var_ty = self.assign(p.span, p.id, None); - - self.fcx.require_type_is_sized(var_ty, p.span, - traits::VariableType(p.id)); - - debug!("Pattern binding {} is assigned to {} with type {:?}", - path1.node, - self.fcx.infcx().ty_to_string( - self.fcx.inh.locals.borrow().get(&p.id).unwrap().clone()), - var_ty); - } + fn visit_pat(&mut self, p: &'gcx hir::Pat) { + if let PatKind::Binding(_, _, ref path1, _) = p.node { + let var_ty = self.assign(p.span, p.id, None); + + self.fcx.require_type_is_sized(var_ty, p.span, + traits::VariableType(p.id)); + + debug!("Pattern binding {} is assigned to {} with type {:?}", + path1.node, + self.fcx.ty_to_string( + self.fcx.locals.borrow().get(&p.id).unwrap().clone()), + var_ty); } intravisit::walk_pat(self, p); } - fn visit_block(&mut self, b: &'tcx hir::Block) { + fn visit_block(&mut self, b: &'gcx hir::Block) { // non-obvious: the `blk` variable maps to region lb, so // we have to keep this up-to-date. This // is... unfortunate. It'd be nice to not need this. @@ -530,11 +745,11 @@ impl<'a, 'tcx> Visitor<'tcx> for GatherLocalsVisitor<'a, 'tcx> { // Since an expr occurs as part of the type fixed size arrays we // need to record the type for that node - fn visit_ty(&mut self, t: &'tcx hir::Ty) { + fn visit_ty(&mut self, t: &'gcx hir::Ty) { match t.node { - hir::TyFixedLengthVec(ref ty, ref count_expr) => { - self.visit_ty(&**ty); - check_expr_with_hint(self.fcx, &**count_expr, self.fcx.tcx().types.usize); + hir::TyArray(ref ty, ref count_expr) => { + self.visit_ty(&ty); + self.fcx.check_expr_with_hint(&count_expr, self.fcx.tcx.types.usize); } hir::TyBareFn(ref function_declaration) => { intravisit::walk_fn_decl_nopat(self, &function_declaration.decl); @@ -545,8 +760,8 @@ impl<'a, 'tcx> Visitor<'tcx> for GatherLocalsVisitor<'a, 'tcx> { } // Don't descend into the bodies of nested closures - fn visit_fn(&mut self, _: intravisit::FnKind<'tcx>, _: &'tcx hir::FnDecl, - _: &'tcx hir::Block, _: Span, _: ast::NodeId) { } + fn visit_fn(&mut self, _: intravisit::FnKind<'gcx>, _: &'gcx hir::FnDecl, + _: hir::ExprId, _: Span, _: ast::NodeId) { } } /// Helper used by check_bare_fn and check_expr_fn. Does the grungy work of checking a function @@ -555,52 +770,33 @@ impl<'a, 'tcx> Visitor<'tcx> for GatherLocalsVisitor<'a, 'tcx> { /// /// * ... /// * inherited: other fields inherited from the enclosing fn (if any) -fn check_fn<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>, - unsafety: hir::Unsafety, - unsafety_id: ast::NodeId, - fn_sig: &ty::FnSig<'tcx>, - decl: &'tcx hir::FnDecl, - fn_id: ast::NodeId, - body: &'tcx hir::Block, - inherited: &'a Inherited<'a, 'tcx>) - -> FnCtxt<'a, 'tcx> +fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, + unsafety: hir::Unsafety, + unsafety_id: ast::NodeId, + fn_sig: &ty::FnSig<'tcx>, + decl: &'gcx hir::FnDecl, + fn_id: ast::NodeId, + body: &'gcx hir::Expr) + -> FnCtxt<'a, 'gcx, 'tcx> { - let tcx = ccx.tcx; - let err_count_on_creation = tcx.sess.err_count(); + let mut fn_sig = fn_sig.clone(); - let arg_tys = &fn_sig.inputs; - let ret_ty = fn_sig.output; - - debug!("check_fn(arg_tys={:?}, ret_ty={:?}, fn_id={})", - arg_tys, - ret_ty, - fn_id); + debug!("check_fn(sig={:?}, fn_id={})", fn_sig, fn_id); // Create the function context. This is either derived from scratch or, // in the case of function expressions, based on the outer context. - let fcx = FnCtxt { - body_id: body.id, - writeback_errors: Cell::new(false), - err_count_on_creation: err_count_on_creation, - ret_ty: ret_ty, - ps: RefCell::new(UnsafetyState::function(unsafety, unsafety_id)), - inh: inherited, - ccx: ccx - }; + let mut fcx = FnCtxt::new(inherited, fn_sig.output, body.id); + *fcx.ps.borrow_mut() = UnsafetyState::function(unsafety, unsafety_id); - if let ty::FnConverging(ret_ty) = ret_ty { - fcx.require_type_is_sized(ret_ty, decl.output.span(), traits::ReturnType); - } - - debug!("fn-sig-map: fn_id={} fn_sig={:?}", fn_id, fn_sig); - - inherited.tables.borrow_mut().liberated_fn_sigs.insert(fn_id, fn_sig.clone()); + fcx.require_type_is_sized(fcx.ret_ty, decl.output.span(), traits::ReturnType); + fcx.ret_ty = fcx.instantiate_anon_types(&fcx.ret_ty); + fn_sig.output = fcx.ret_ty; { let mut visit = GatherLocalsVisitor { fcx: &fcx, }; // Add formal parameters. - for (arg_ty, input) in arg_tys.iter().zip(&decl.inputs) { + for (arg_ty, input) in fn_sig.inputs.iter().zip(&decl.inputs) { // The type of the argument must be well-formed. // // NB -- this is now checked in wfcheck, but that @@ -610,48 +806,39 @@ fn check_fn<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>, fcx.register_old_wf_obligation(arg_ty, input.ty.span, traits::MiscObligation); // Create type variables for each argument. - pat_util::pat_bindings( - &tcx.def_map, - &*input.pat, - |_bm, pat_id, sp, _path| { - let var_ty = visit.assign(sp, pat_id, None); - fcx.require_type_is_sized(var_ty, sp, - traits::VariableType(pat_id)); - }); + input.pat.each_binding(|_bm, pat_id, sp, _path| { + let var_ty = visit.assign(sp, pat_id, None); + fcx.require_type_is_sized(var_ty, sp, traits::VariableType(pat_id)); + }); // Check the pattern. - let pcx = pat_ctxt { - fcx: &fcx, - map: pat_id_map(&tcx.def_map, &*input.pat), - }; - _match::check_pat(&pcx, &*input.pat, *arg_ty); + fcx.check_pat(&input.pat, arg_ty); + fcx.write_ty(input.id, arg_ty); } - visit.visit_block(body); + visit.visit_expr(body); } - check_block_with_expected(&fcx, body, match ret_ty { - ty::FnConverging(result_type) => ExpectHasType(result_type), - ty::FnDiverging => NoExpectation - }); + inherited.tables.borrow_mut().liberated_fn_sigs.insert(fn_id, fn_sig); - for (input, arg) in decl.inputs.iter().zip(arg_tys) { - fcx.write_ty(input.id, arg); - } + fcx.check_expr_coercable_to_type(body, fcx.ret_ty); fcx } -pub fn check_struct(ccx: &CrateCtxt, id: ast::NodeId, span: Span) { - let tcx = ccx.tcx; - - check_representable(tcx, span, id, "struct"); +fn check_struct(ccx: &CrateCtxt, id: ast::NodeId, span: Span) { + let def_id = ccx.tcx.map.local_def_id(id); + check_representable(ccx.tcx, span, def_id); - if tcx.lookup_simd(ccx.tcx.map.local_def_id(id)) { - check_simd(tcx, span, id); + if ccx.tcx.lookup_simd(def_id) { + check_simd(ccx.tcx, span, def_id); } } +fn check_union(ccx: &CrateCtxt, id: ast::NodeId, span: Span) { + check_representable(ccx.tcx, span, ccx.tcx.map.local_def_id(id)); +} + pub fn check_item_type<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, it: &'tcx hir::Item) { debug!("check_item_type(it.id={}, it.name={})", it.id, @@ -659,8 +846,8 @@ pub fn check_item_type<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, it: &'tcx hir::Item) { let _indenter = indenter(); match it.node { // Consts can play a role in type-checking, so they are included here. - hir::ItemStatic(_, _, ref e) | - hir::ItemConst(_, ref e) => check_const(ccx, it.span, &**e, it.id), + hir::ItemStatic(.., ref e) | + hir::ItemConst(_, ref e) => check_const(ccx, &e, it.id), hir::ItemEnum(ref enum_definition, _) => { check_enum_variants(ccx, it.span, @@ -668,41 +855,49 @@ pub fn check_item_type<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, it: &'tcx hir::Item) { it.id); } hir::ItemFn(..) => {} // entirely within check_item_body - hir::ItemImpl(_, _, _, _, _, ref impl_items) => { + hir::ItemImpl(.., ref impl_item_refs) => { debug!("ItemImpl {} with id {}", it.name, it.id); - match ccx.tcx.impl_trait_ref(ccx.tcx.map.local_def_id(it.id)) { - Some(impl_trait_ref) => { - check_impl_items_against_trait(ccx, - it.span, - &impl_trait_ref, - impl_items); - } - None => { } + let impl_def_id = ccx.tcx.map.local_def_id(it.id); + if let Some(impl_trait_ref) = ccx.tcx.impl_trait_ref(impl_def_id) { + check_impl_items_against_trait(ccx, + it.span, + impl_def_id, + impl_trait_ref, + impl_item_refs); + let trait_def_id = impl_trait_ref.def_id; + check_on_unimplemented(ccx, trait_def_id, it); } } - hir::ItemTrait(_, ref generics, _, _) => { - check_trait_on_unimplemented(ccx, generics, it); + hir::ItemTrait(..) => { + let def_id = ccx.tcx.map.local_def_id(it.id); + check_on_unimplemented(ccx, def_id, it); } hir::ItemStruct(..) => { check_struct(ccx, it.id, it.span); } + hir::ItemUnion(..) => { + check_union(ccx, it.id, it.span); + } hir::ItemTy(_, ref generics) => { - let pty_ty = ccx.tcx.node_id_to_type(it.id); - check_bounds_are_used(ccx, &generics.ty_params, pty_ty); + let def_id = ccx.tcx.map.local_def_id(it.id); + let pty_ty = ccx.tcx.item_type(def_id); + check_bounds_are_used(ccx, generics, pty_ty); } hir::ItemForeignMod(ref m) => { - if m.abi == abi::RustIntrinsic { + check_abi(ccx, it.span, m.abi); + + if m.abi == Abi::RustIntrinsic { for item in &m.items { intrinsic::check_intrinsic_type(ccx, item); } - } else if m.abi == abi::PlatformIntrinsic { + } else if m.abi == Abi::PlatformIntrinsic { for item in &m.items { intrinsic::check_platform_intrinsic_type(ccx, item); } } else { for item in &m.items { - let pty = ccx.tcx.lookup_item_type(ccx.tcx.map.local_def_id(item.id)); - if !pty.generics.types.is_empty() { + let generics = ccx.tcx.item_generics(ccx.tcx.map.local_def_id(item.id)); + if !generics.types.is_empty() { let mut err = struct_span_err!(ccx.tcx.sess, item.span, E0044, "foreign items may not have type parameters"); span_help!(&mut err, item.span, @@ -727,24 +922,20 @@ pub fn check_item_body<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, it: &'tcx hir::Item) { ccx.tcx.item_path_str(ccx.tcx.map.local_def_id(it.id))); let _indenter = indenter(); match it.node { - hir::ItemFn(ref decl, _, _, _, _, ref body) => { - let fn_pty = ccx.tcx.lookup_item_type(ccx.tcx.map.local_def_id(it.id)); - let param_env = ParameterEnvironment::for_item(ccx.tcx, it.id); - check_bare_fn(ccx, &**decl, &**body, it.id, it.span, fn_pty.ty, param_env); + hir::ItemFn(ref decl, .., body_id) => { + check_bare_fn(ccx, &decl, body_id, it.id, it.span); } - hir::ItemImpl(_, _, _, _, _, ref impl_items) => { + hir::ItemImpl(.., ref impl_item_refs) => { debug!("ItemImpl {} with id {}", it.name, it.id); - let impl_pty = ccx.tcx.lookup_item_type(ccx.tcx.map.local_def_id(it.id)); - - for impl_item in impl_items { + for impl_item_ref in impl_item_refs { + let impl_item = ccx.tcx.map.impl_item(impl_item_ref.id); match impl_item.node { hir::ImplItemKind::Const(_, ref expr) => { - check_const(ccx, impl_item.span, &*expr, impl_item.id) + check_const(ccx, &expr, impl_item.id) } - hir::ImplItemKind::Method(ref sig, ref body) => { - check_method_body(ccx, &impl_pty.generics, sig, body, - impl_item.id, impl_item.span); + hir::ImplItemKind::Method(ref sig, body_id) => { + check_bare_fn(ccx, &sig.decl, body_id, impl_item.id, impl_item.span); } hir::ImplItemKind::Type(_) => { // Nothing to do here. @@ -752,22 +943,16 @@ pub fn check_item_body<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, it: &'tcx hir::Item) { } } } - hir::ItemTrait(_, _, _, ref trait_items) => { - let trait_def = ccx.tcx.lookup_trait_def(ccx.tcx.map.local_def_id(it.id)); + hir::ItemTrait(.., ref trait_items) => { for trait_item in trait_items { match trait_item.node { hir::ConstTraitItem(_, Some(ref expr)) => { - check_const(ccx, trait_item.span, &*expr, trait_item.id) - } - hir::MethodTraitItem(ref sig, Some(ref body)) => { - check_trait_fn_not_const(ccx, trait_item.span, sig.constness); - - check_method_body(ccx, &trait_def.generics, sig, body, - trait_item.id, trait_item.span); + check_const(ccx, &expr, trait_item.id) } - hir::MethodTraitItem(ref sig, None) => { - check_trait_fn_not_const(ccx, trait_item.span, sig.constness); + hir::MethodTraitItem(ref sig, Some(body_id)) => { + check_bare_fn(ccx, &sig.decl, body_id, trait_item.id, trait_item.span); } + hir::MethodTraitItem(_, None) | hir::ConstTraitItem(_, None) | hir::TypeTraitItem(..) => { // Nothing to do. @@ -779,29 +964,17 @@ pub fn check_item_body<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, it: &'tcx hir::Item) { } } -fn check_trait_fn_not_const<'a,'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - span: Span, - constness: hir::Constness) -{ - match constness { - hir::Constness::NotConst => { - // good - } - hir::Constness::Const => { - span_err!(ccx.tcx.sess, span, E0379, "trait fns cannot be declared const"); - } - } -} - -fn check_trait_on_unimplemented<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - generics: &hir::Generics, - item: &hir::Item) { +fn check_on_unimplemented<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + def_id: DefId, + item: &hir::Item) { + let generics = ccx.tcx.item_generics(def_id); if let Some(ref attr) = item.attrs.iter().find(|a| { a.check_name("rustc_on_unimplemented") }) { - if let Some(ref istring) = attr.value_str() { + if let Some(istring) = attr.value_str() { + let istring = istring.as_str(); let parser = Parser::new(&istring); - let types = &*generics.ty_params; + let types = &generics.types; for token in parser { match token { Piece::String(_) => (), // Normal string, no need to check it @@ -810,18 +983,19 @@ fn check_trait_on_unimplemented<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, Position::ArgumentNamed(s) if s == "Self" => (), // So is `{A}` if A is a type parameter Position::ArgumentNamed(s) => match types.iter().find(|t| { - t.name.as_str() == s + t.name == s }) { Some(_) => (), None => { + let name = ccx.tcx.item_name(def_id); span_err!(ccx.tcx.sess, attr.span, E0230, "there is no type parameter \ {} on trait {}", - s, item.name); + s, name); } }, // `{:1}` and `{}` are not to be used - Position::ArgumentIs(_) | Position::ArgumentNext => { + Position::ArgumentIs(_) => { span_err!(ccx.tcx.sess, attr.span, E0231, "only named substitution \ parameters are allowed"); @@ -830,193 +1004,227 @@ fn check_trait_on_unimplemented<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, } } } else { - span_err!(ccx.tcx.sess, attr.span, E0232, - "this attribute must have a value, \ - eg `#[rustc_on_unimplemented = \"foo\"]`") + struct_span_err!( + ccx.tcx.sess, attr.span, E0232, + "this attribute must have a value") + .span_label(attr.span, &format!("attribute requires a value")) + .note(&format!("eg `#[rustc_on_unimplemented = \"foo\"]`")) + .emit(); } } } -/// Type checks a method body. -/// -/// # Parameters -/// -/// * `item_generics`: generics defined on the impl/trait that contains -/// the method -/// * `self_bound`: bound for the `Self` type parameter, if any -/// * `method`: the method definition -fn check_method_body<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - item_generics: &ty::Generics<'tcx>, - sig: &'tcx hir::MethodSig, - body: &'tcx hir::Block, - id: ast::NodeId, span: Span) { - debug!("check_method_body(item_generics={:?}, id={})", - item_generics, id); - let param_env = ParameterEnvironment::for_item(ccx.tcx, id); - - let fty = ccx.tcx.node_id_to_type(id); - debug!("check_method_body: fty={:?}", fty); - - check_bare_fn(ccx, &sig.decl, body, id, span, fty, param_env); +fn report_forbidden_specialization<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + impl_item: &hir::ImplItem, + parent_impl: DefId) +{ + let mut err = struct_span_err!( + tcx.sess, impl_item.span, E0520, + "`{}` specializes an item from a parent `impl`, but \ + that item is not marked `default`", + impl_item.name); + err.span_label(impl_item.span, &format!("cannot specialize default item `{}`", + impl_item.name)); + + match tcx.span_of_impl(parent_impl) { + Ok(span) => { + err.span_label(span, &"parent `impl` is here"); + err.note(&format!("to specialize, `{}` in the parent `impl` must be marked `default`", + impl_item.name)); + } + Err(cname) => { + err.note(&format!("parent implementation is in crate `{}`", cname)); + } + } + + err.emit(); +} + +fn check_specialization_validity<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + trait_def: &ty::TraitDef, + impl_id: DefId, + impl_item: &hir::ImplItem) +{ + let ancestors = trait_def.ancestors(impl_id); + + let kind = match impl_item.node { + hir::ImplItemKind::Const(..) => ty::AssociatedKind::Const, + hir::ImplItemKind::Method(..) => ty::AssociatedKind::Method, + hir::ImplItemKind::Type(_) => ty::AssociatedKind::Type + }; + let parent = ancestors.defs(tcx, impl_item.name, kind).skip(1).next() + .map(|node_item| node_item.map(|parent| parent.defaultness)); + + if let Some(parent) = parent { + if parent.item.is_final() { + report_forbidden_specialization(tcx, impl_item, parent.node.def_id()); + } + } + } fn check_impl_items_against_trait<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, impl_span: Span, - impl_trait_ref: &ty::TraitRef<'tcx>, - impl_items: &[hir::ImplItem]) { - // Locate trait methods + impl_id: DefId, + impl_trait_ref: ty::TraitRef<'tcx>, + impl_item_refs: &[hir::ImplItemRef]) { + // If the trait reference itself is erroneous (so the compilation is going + // to fail), skip checking the items here -- the `impl_item` table in `tcx` + // isn't populated for such impls. + if impl_trait_ref.references_error() { return; } + + // Locate trait definition and items let tcx = ccx.tcx; - let trait_items = tcx.trait_items(impl_trait_ref.def_id); + let trait_def = tcx.lookup_trait_def(impl_trait_ref.def_id); let mut overridden_associated_type = None; + let impl_items = || impl_item_refs.iter().map(|iiref| ccx.tcx.map.impl_item(iiref.id)); + // Check existing impl methods to see if they are both present in trait // and compatible with trait signature - for impl_item in impl_items { - let ty_impl_item = ccx.tcx.impl_or_trait_item(ccx.tcx.map.local_def_id(impl_item.id)); - let ty_trait_item = trait_items.iter() - .find(|ac| ac.name() == ty_impl_item.name()); + for impl_item in impl_items() { + let ty_impl_item = tcx.associated_item(tcx.map.local_def_id(impl_item.id)); + let ty_trait_item = tcx.associated_items(impl_trait_ref.def_id) + .find(|ac| ac.name == ty_impl_item.name); + // Check that impl definition matches trait definition if let Some(ty_trait_item) = ty_trait_item { match impl_item.node { hir::ImplItemKind::Const(..) => { - let impl_const = match ty_impl_item { - ty::ConstTraitItem(ref cti) => cti, - _ => tcx.sess.span_bug(impl_item.span, "non-const impl-item for const") - }; - // Find associated const definition. - if let &ty::ConstTraitItem(ref trait_const) = ty_trait_item { - compare_const_impl(ccx.tcx, - &impl_const, + if ty_trait_item.kind == ty::AssociatedKind::Const { + compare_const_impl(ccx, + &ty_impl_item, impl_item.span, - trait_const, - &*impl_trait_ref); + &ty_trait_item, + impl_trait_ref); } else { - span_err!(tcx.sess, impl_item.span, E0323, + let mut err = struct_span_err!(tcx.sess, impl_item.span, E0323, "item `{}` is an associated const, \ - which doesn't match its trait `{:?}`", - impl_const.name, - impl_trait_ref) + which doesn't match its trait `{}`", + ty_impl_item.name, + impl_trait_ref); + err.span_label(impl_item.span, &format!("does not match trait")); + // We can only get the spans from local trait definition + // Same for E0324 and E0325 + if let Some(trait_span) = tcx.map.span_if_local(ty_trait_item.def_id) { + err.span_label(trait_span, &format!("item in trait")); + } + err.emit() } } - hir::ImplItemKind::Method(ref sig, ref body) => { - check_trait_fn_not_const(ccx, impl_item.span, sig.constness); - - let impl_method = match ty_impl_item { - ty::MethodTraitItem(ref mti) => mti, - _ => tcx.sess.span_bug(impl_item.span, "non-method impl-item for method") - }; - - if let &ty::MethodTraitItem(ref trait_method) = ty_trait_item { - compare_impl_method(ccx.tcx, - &impl_method, + hir::ImplItemKind::Method(_, body_id) => { + let trait_span = tcx.map.span_if_local(ty_trait_item.def_id); + if ty_trait_item.kind == ty::AssociatedKind::Method { + let err_count = tcx.sess.err_count(); + compare_impl_method(ccx, + &ty_impl_item, impl_item.span, - body.id, - &trait_method, - &impl_trait_ref); + body_id.node_id(), + &ty_trait_item, + impl_trait_ref, + trait_span, + true); // start with old-broken-mode + if err_count == tcx.sess.err_count() { + // old broken mode did not report an error. Try with the new mode. + compare_impl_method(ccx, + &ty_impl_item, + impl_item.span, + body_id.node_id(), + &ty_trait_item, + impl_trait_ref, + trait_span, + false); // use the new mode + } } else { - span_err!(tcx.sess, impl_item.span, E0324, + let mut err = struct_span_err!(tcx.sess, impl_item.span, E0324, "item `{}` is an associated method, \ - which doesn't match its trait `{:?}`", - impl_method.name, - impl_trait_ref) + which doesn't match its trait `{}`", + ty_impl_item.name, + impl_trait_ref); + err.span_label(impl_item.span, &format!("does not match trait")); + if let Some(trait_span) = tcx.map.span_if_local(ty_trait_item.def_id) { + err.span_label(trait_span, &format!("item in trait")); + } + err.emit() } } hir::ImplItemKind::Type(_) => { - let impl_type = match ty_impl_item { - ty::TypeTraitItem(ref tti) => tti, - _ => tcx.sess.span_bug(impl_item.span, "non-type impl-item for type") - }; - - if let &ty::TypeTraitItem(ref at) = ty_trait_item { - if let Some(_) = at.ty { + if ty_trait_item.kind == ty::AssociatedKind::Type { + if ty_trait_item.defaultness.has_value() { overridden_associated_type = Some(impl_item); } } else { - span_err!(tcx.sess, impl_item.span, E0325, + let mut err = struct_span_err!(tcx.sess, impl_item.span, E0325, "item `{}` is an associated type, \ - which doesn't match its trait `{:?}`", - impl_type.name, - impl_trait_ref) + which doesn't match its trait `{}`", + ty_impl_item.name, + impl_trait_ref); + err.span_label(impl_item.span, &format!("does not match trait")); + if let Some(trait_span) = tcx.map.span_if_local(ty_trait_item.def_id) { + err.span_label(trait_span, &format!("item in trait")); + } + err.emit() } } } } + + check_specialization_validity(tcx, trait_def, impl_id, impl_item); } // Check for missing items from trait - let provided_methods = tcx.provided_trait_methods(impl_trait_ref.def_id); let mut missing_items = Vec::new(); let mut invalidated_items = Vec::new(); let associated_type_overridden = overridden_associated_type.is_some(); - for trait_item in trait_items.iter() { - match *trait_item { - ty::ConstTraitItem(ref associated_const) => { - let is_implemented = impl_items.iter().any(|ii| { - match ii.node { - hir::ImplItemKind::Const(..) => { - ii.name == associated_const.name - } - _ => false, - } - }); - let is_provided = associated_const.has_value; - - if !is_implemented { - if !is_provided { - missing_items.push(associated_const.name); - } else if associated_type_overridden { - invalidated_items.push(associated_const.name); - } - } + for trait_item in tcx.associated_items(impl_trait_ref.def_id) { + let is_implemented = trait_def.ancestors(impl_id) + .defs(tcx, trait_item.name, trait_item.kind) + .next() + .map(|node_item| !node_item.node.is_from_trait()) + .unwrap_or(false); + + if !is_implemented { + if !trait_item.defaultness.has_value() { + missing_items.push(trait_item); + } else if associated_type_overridden { + invalidated_items.push(trait_item.name); } - ty::MethodTraitItem(ref trait_method) => { - let is_implemented = - impl_items.iter().any(|ii| { - match ii.node { - hir::ImplItemKind::Method(..) => { - ii.name == trait_method.name - } - _ => false, - } - }); - let is_provided = - provided_methods.iter().any(|m| m.name == trait_method.name); - if !is_implemented { - if !is_provided { - missing_items.push(trait_method.name); - } else if associated_type_overridden { - invalidated_items.push(trait_method.name); - } - } + } + } + + let signature = |item: &ty::AssociatedItem| { + match item.kind { + ty::AssociatedKind::Method => { + format!("{}", tcx.item_type(item.def_id).fn_sig().0) } - ty::TypeTraitItem(ref associated_type) => { - let is_implemented = impl_items.iter().any(|ii| { - match ii.node { - hir::ImplItemKind::Type(_) => { - ii.name == associated_type.name - } - _ => false, - } - }); - let is_provided = associated_type.ty.is_some(); - if !is_implemented { - if !is_provided { - missing_items.push(associated_type.name); - } else if associated_type_overridden { - invalidated_items.push(associated_type.name); - } - } + ty::AssociatedKind::Type => format!("type {};", item.name.to_string()), + ty::AssociatedKind::Const => { + format!("const {}: {:?};", item.name.to_string(), tcx.item_type(item.def_id)) } } - } + }; if !missing_items.is_empty() { - span_err!(tcx.sess, impl_span, E0046, + let mut err = struct_span_err!(tcx.sess, impl_span, E0046, "not all trait items implemented, missing: `{}`", missing_items.iter() - .map(|name| name.to_string()) - .collect::>().join("`, `")) + .map(|trait_item| trait_item.name.to_string()) + .collect::>().join("`, `")); + err.span_label(impl_span, &format!("missing `{}` in implementation", + missing_items.iter() + .map(|trait_item| trait_item.name.to_string()) + .collect::>().join("`, `"))); + for trait_item in missing_items { + if let Some(span) = tcx.map.span_if_local(trait_item.def_id) { + err.span_label(span, &format!("`{}` from trait", trait_item.name)); + } else { + err.note(&format!("`{}` from trait: `{}`", + trait_item.name, + signature(&trait_item))); + } + } + err.emit(); } if !invalidated_items.is_empty() { @@ -1031,75 +1239,169 @@ fn check_impl_items_against_trait<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, } } -fn report_cast_to_unsized_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - span: Span, - t_span: Span, - e_span: Span, - t_cast: Ty<'tcx>, - t_expr: Ty<'tcx>, - id: ast::NodeId) { - if t_cast.references_error() || t_expr.references_error() { - return; - } - let tstr = fcx.infcx().ty_to_string(t_cast); - let mut err = fcx.type_error_struct(span, |actual| { - format!("cast to unsized type: `{}` as `{}`", actual, tstr) - }, t_expr, None); - match t_expr.sty { - ty::TyRef(_, ty::TypeAndMut { mutbl: mt, .. }) => { - let mtstr = match mt { - hir::MutMutable => "mut ", - hir::MutImmutable => "" - }; - if t_cast.is_trait() { - match fcx.tcx().sess.codemap().span_to_snippet(t_span) { - Ok(s) => { - err.span_suggestion(t_span, - "try casting to a reference instead:", - format!("&{}{}", mtstr, s)); - }, - Err(_) => - span_help!(err, t_span, - "did you mean `&{}{}`?", mtstr, tstr), - } - } else { - span_help!(err, span, - "consider using an implicit coercion to `&{}{}` instead", - mtstr, tstr); - } - } - ty::TyBox(..) => { - match fcx.tcx().sess.codemap().span_to_snippet(t_span) { - Ok(s) => { - err.span_suggestion(t_span, - "try casting to a `Box` instead:", - format!("Box<{}>", s)); - }, - Err(_) => - span_help!(err, t_span, "did you mean `Box<{}>`?", tstr), - } - } - _ => { - span_help!(err, e_span, - "consider using a box or reference as appropriate"); - } - } - err.emit(); - fcx.write_error(id); +/// Checks a constant with a given type. +fn check_const_with_type<'a, 'tcx>(ccx: &'a CrateCtxt<'a, 'tcx>, + expr: &'tcx hir::Expr, + expected_type: Ty<'tcx>, + id: ast::NodeId) { + ccx.inherited(id).enter(|inh| { + let fcx = FnCtxt::new(&inh, expected_type, expr.id); + fcx.require_type_is_sized(expected_type, expr.span, traits::ConstSized); + + // Gather locals in statics (because of block expressions). + // This is technically unnecessary because locals in static items are forbidden, + // but prevents type checking from blowing up before const checking can properly + // emit an error. + GatherLocalsVisitor { fcx: &fcx }.visit_expr(expr); + + fcx.check_expr_coercable_to_type(expr, expected_type); + + fcx.select_all_obligations_and_apply_defaults(); + fcx.closure_analyze(expr); + fcx.select_obligations_where_possible(); + fcx.check_casts(); + fcx.select_all_obligations_or_error(); + + fcx.regionck_expr(expr); + fcx.resolve_type_vars_in_expr(expr, id); + }); } +fn check_const<'a, 'tcx>(ccx: &CrateCtxt<'a,'tcx>, + expr: &'tcx hir::Expr, + id: ast::NodeId) { + let decl_ty = ccx.tcx.item_type(ccx.tcx.map.local_def_id(id)); + check_const_with_type(ccx, expr, decl_ty, id); +} -impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> { - fn tcx(&self) -> &ty::ctxt<'tcx> { self.ccx.tcx } +/// Checks whether a type can be represented in memory. In particular, it +/// identifies types that contain themselves without indirection through a +/// pointer, which would mean their size is unbounded. +fn check_representable<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + sp: Span, + item_def_id: DefId) + -> bool { + let rty = tcx.item_type(item_def_id); - fn get_item_type_scheme(&self, _: Span, id: DefId) - -> Result, ErrorReported> - { - Ok(self.tcx().lookup_item_type(id)) + // Check that it is possible to represent this type. This call identifies + // (1) types that contain themselves and (2) types that contain a different + // recursive type. It is only necessary to throw an error on those that + // contain themselves. For case 2, there must be an inner type that will be + // caught by case 1. + match rty.is_representable(tcx, sp) { + Representability::SelfRecursive => { + tcx.recursive_type_with_infinite_size_error(item_def_id).emit(); + return false + } + Representability::Representable | Representability::ContainsRecursive => (), } - + return true +} + +pub fn check_simd<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: DefId) { + let t = tcx.item_type(def_id); + match t.sty { + ty::TyAdt(def, substs) if def.is_struct() => { + let fields = &def.struct_variant().fields; + if fields.is_empty() { + span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty"); + return; + } + let e = fields[0].ty(tcx, substs); + if !fields.iter().all(|f| f.ty(tcx, substs) == e) { + struct_span_err!(tcx.sess, sp, E0076, "SIMD vector should be homogeneous") + .span_label(sp, &format!("SIMD elements must have the same type")) + .emit(); + return; + } + match e.sty { + ty::TyParam(_) => { /* struct(T, T, T, T) is ok */ } + _ if e.is_machine() => { /* struct(u8, u8, u8, u8) is ok */ } + _ => { + span_err!(tcx.sess, sp, E0077, + "SIMD vector element type should be machine type"); + return; + } + } + } + _ => () + } +} + +#[allow(trivial_numeric_casts)] +pub fn check_enum_variants<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, + sp: Span, + vs: &'tcx [hir::Variant], + id: ast::NodeId) { + let def_id = ccx.tcx.map.local_def_id(id); + let hint = *ccx.tcx.lookup_repr_hints(def_id).get(0).unwrap_or(&attr::ReprAny); + + if hint != attr::ReprAny && vs.is_empty() { + struct_span_err!( + ccx.tcx.sess, sp, E0084, + "unsupported representation for zero-variant enum") + .span_label(sp, &format!("unsupported enum representation")) + .emit(); + } + + let repr_type_ty = ccx.tcx.enum_repr_type(Some(&hint)).to_ty(ccx.tcx); + for v in vs { + if let Some(ref e) = v.node.disr_expr { + check_const_with_type(ccx, e, repr_type_ty, e.id); + } + } + + let def_id = ccx.tcx.map.local_def_id(id); + + let variants = &ccx.tcx.lookup_adt_def(def_id).variants; + let mut disr_vals: Vec = Vec::new(); + for (v, variant) in vs.iter().zip(variants.iter()) { + let current_disr_val = variant.disr_val; + + // Check for duplicate discriminant values + if let Some(i) = disr_vals.iter().position(|&x| x == current_disr_val) { + let variant_i_node_id = ccx.tcx.map.as_local_node_id(variants[i].did).unwrap(); + let variant_i = ccx.tcx.map.expect_variant(variant_i_node_id); + let i_span = match variant_i.node.disr_expr { + Some(ref expr) => expr.span, + None => ccx.tcx.map.span(variant_i_node_id) + }; + let span = match v.node.disr_expr { + Some(ref expr) => expr.span, + None => v.span + }; + struct_span_err!(ccx.tcx.sess, span, E0081, + "discriminant value `{}` already exists", disr_vals[i]) + .span_label(i_span, &format!("first use of `{}`", disr_vals[i])) + .span_label(span , &format!("enum already has `{}`", disr_vals[i])) + .emit(); + } + disr_vals.push(current_disr_val); + } + + check_representable(ccx.tcx, sp, def_id); +} + +impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx } + + fn ast_ty_to_ty_cache(&self) -> &RefCell>> { + &self.ast_ty_to_ty_cache + } + + fn get_generics(&self, _: Span, id: DefId) + -> Result<&'tcx ty::Generics<'tcx>, ErrorReported> + { + Ok(self.tcx().item_generics(id)) + } + + fn get_item_type(&self, _: Span, id: DefId) -> Result, ErrorReported> + { + Ok(self.tcx().item_type(id)) + } + fn get_trait_def(&self, _: Span, id: DefId) - -> Result<&'tcx ty::TraitDef<'tcx>, ErrorReported> + -> Result<&'tcx ty::TraitDef, ErrorReported> { Ok(self.tcx().lookup_trait_def(id)) } @@ -1110,7 +1412,7 @@ impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> { } fn get_free_substs(&self) -> Option<&Substs<'tcx>> { - Some(&self.inh.infcx.parameter_environment.free_substs) + Some(&self.parameter_environment.free_substs) } fn get_type_parameter_bounds(&self, @@ -1118,14 +1420,14 @@ impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> { node_id: ast::NodeId) -> Result>, ErrorReported> { - let def = self.tcx().type_parameter_def(node_id); - let r = self.inh.infcx.parameter_environment + let def = self.tcx.type_parameter_def(node_id); + let r = self.parameter_environment .caller_bounds .iter() .filter_map(|predicate| { match *predicate { ty::Predicate::Trait(ref data) => { - if data.0.self_ty().is_param(def.space, def.index) { + if data.0.self_ty().is_param(def.index) { Some(data.to_poly_trait_ref()) } else { None @@ -1140,36 +1442,15 @@ impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> { Ok(r) } - fn trait_defines_associated_type_named(&self, - trait_def_id: DefId, - assoc_name: ast::Name) - -> bool - { - let trait_def = self.ccx.tcx.lookup_trait_def(trait_def_id); - trait_def.associated_type_names.contains(&assoc_name) - } - - fn ty_infer(&self, - ty_param_def: Option>, - substs: Option<&mut subst::Substs<'tcx>>, - space: Option, - span: Span) -> Ty<'tcx> { - // Grab the default doing subsitution - let default = ty_param_def.and_then(|def| { - def.default.map(|ty| type_variable::Default { - ty: ty.subst_spanned(self.tcx(), substs.as_ref().unwrap(), Some(span)), - origin_span: span, - def_id: def.default_def_id - }) - }); - - let ty_var = self.infcx().next_ty_var_with_default(default); + fn ty_infer(&self, _span: Span) -> Ty<'tcx> { + self.next_ty_var() + } - // Finally we add the type variable to the substs - match substs { - None => ty_var, - Some(substs) => { substs.types.push(space.unwrap(), ty_var); ty_var } - } + fn ty_infer_for_def(&self, + ty_param_def: &ty::TypeParameterDef<'tcx>, + substs: &[Kind<'tcx>], + span: Span) -> Ty<'tcx> { + self.type_var_for_def(span, ty_param_def, substs) } fn projected_ty_from_poly_trait_ref(&self, @@ -1179,7 +1460,7 @@ impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> { -> Ty<'tcx> { let (trait_ref, _) = - self.infcx().replace_late_bound_regions_with_fresh_var( + self.replace_late_bound_regions_with_fresh_var( span, infer::LateBoundRegionConversionTime::AssocTypeProjection(item_name), &poly_trait_ref); @@ -1195,43 +1476,133 @@ impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> { { self.normalize_associated_type(span, trait_ref, item_name) } + + fn set_tainted_by_errors(&self) { + self.infcx.set_tainted_by_errors() + } +} + +impl<'a, 'gcx, 'tcx> RegionScope for FnCtxt<'a, 'gcx, 'tcx> { + fn object_lifetime_default(&self, span: Span) -> Option { + Some(self.base_object_lifetime_default(span)) + } + + fn base_object_lifetime_default(&self, span: Span) -> ty::Region { + // RFC #599 specifies that object lifetime defaults take + // precedence over other defaults. But within a fn body we + // don't have a *default* region, rather we use inference to + // find the *correct* region, which is strictly more general + // (and anyway, within a fn body the right region may not even + // be something the user can write explicitly, since it might + // be some expression). + *self.next_region_var(infer::MiscVariable(span)) + } + + fn anon_regions(&self, span: Span, count: usize) + -> Result, Option>> { + Ok((0..count).map(|_| { + *self.next_region_var(infer::MiscVariable(span)) + }).collect()) + } } -impl<'a, 'tcx> FnCtxt<'a, 'tcx> { - fn tcx(&self) -> &ty::ctxt<'tcx> { self.ccx.tcx } +/// Controls whether the arguments are tupled. This is used for the call +/// operator. +/// +/// Tupling means that all call-side arguments are packed into a tuple and +/// passed as a single parameter. For example, if tupling is enabled, this +/// function: +/// +/// fn f(x: (isize, isize)) +/// +/// Can be called as: +/// +/// f(1, 2); +/// +/// Instead of: +/// +/// f((1, 2)); +#[derive(Clone, Eq, PartialEq)] +enum TupleArgumentsFlag { + DontTupleArguments, + TupleArguments, +} - pub fn infcx(&self) -> &infer::InferCtxt<'a,'tcx> { - &self.inh.infcx +impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { + pub fn new(inh: &'a Inherited<'a, 'gcx, 'tcx>, + rty: Ty<'tcx>, + body_id: ast::NodeId) + -> FnCtxt<'a, 'gcx, 'tcx> { + FnCtxt { + ast_ty_to_ty_cache: RefCell::new(NodeMap()), + body_id: body_id, + writeback_errors: Cell::new(false), + err_count_on_creation: inh.tcx.sess.err_count(), + ret_ty: rty, + ps: RefCell::new(UnsafetyState::function(hir::Unsafety::Normal, + ast::CRATE_NODE_ID)), + diverges: Cell::new(Diverges::Maybe), + has_errors: Cell::new(false), + enclosing_loops: RefCell::new(EnclosingLoops { + stack: Vec::new(), + by_id: NodeMap(), + }), + inh: inh, + } } - pub fn param_env(&self) -> &ty::ParameterEnvironment<'a,'tcx> { - &self.inh.infcx.parameter_environment + pub fn param_env(&self) -> &ty::ParameterEnvironment<'gcx> { + &self.parameter_environment } pub fn sess(&self) -> &Session { - &self.tcx().sess + &self.tcx.sess } pub fn err_count_since_creation(&self) -> usize { - self.ccx.tcx.sess.err_count() - self.err_count_on_creation + self.tcx.sess.err_count() - self.err_count_on_creation + } + + /// Produce warning on the given node, if the current point in the + /// function is unreachable, and there hasn't been another warning. + fn warn_if_unreachable(&self, id: ast::NodeId, span: Span, kind: &str) { + if self.diverges.get() == Diverges::Always { + self.diverges.set(Diverges::WarnedAlways); + + self.tcx.sess.add_lint(lint::builtin::UNREACHABLE_CODE, + id, span, + format!("unreachable {}", kind)); + } + } + + pub fn cause(&self, + span: Span, + code: ObligationCauseCode<'tcx>) + -> ObligationCause<'tcx> { + ObligationCause::new(span, self.body_id, code) + } + + pub fn misc(&self, span: Span) -> ObligationCause<'tcx> { + self.cause(span, ObligationCauseCode::MiscObligation) } /// Resolves type variables in `ty` if possible. Unlike the infcx - /// version, this version will also select obligations if it seems - /// useful, in an effort to get more type information. - fn resolve_type_vars_if_possible(&self, mut ty: Ty<'tcx>) -> Ty<'tcx> { - debug!("resolve_type_vars_if_possible(ty={:?})", ty); + /// version (resolve_type_vars_if_possible), this version will + /// also select obligations if it seems useful, in an effort + /// to get more type information. + fn resolve_type_vars_with_obligations(&self, mut ty: Ty<'tcx>) -> Ty<'tcx> { + debug!("resolve_type_vars_with_obligations(ty={:?})", ty); // No TyInfer()? Nothing needs doing. if !ty.has_infer_types() { - debug!("resolve_type_vars_if_possible: ty={:?}", ty); + debug!("resolve_type_vars_with_obligations: ty={:?}", ty); return ty; } // If `ty` is a type variable, see whether we already know what it is. - ty = self.infcx().resolve_type_vars_if_possible(&ty); + ty = self.resolve_type_vars_if_possible(&ty); if !ty.has_infer_types() { - debug!("resolve_type_vars_if_possible: ty={:?}", ty); + debug!("resolve_type_vars_with_obligations: ty={:?}", ty); return ty; } @@ -1240,24 +1611,24 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // indirect dependencies that don't seem worth tracking // precisely. self.select_obligations_where_possible(); - ty = self.infcx().resolve_type_vars_if_possible(&ty); + ty = self.resolve_type_vars_if_possible(&ty); - debug!("resolve_type_vars_if_possible: ty={:?}", ty); + debug!("resolve_type_vars_with_obligations: ty={:?}", ty); ty } fn record_deferred_call_resolution(&self, closure_def_id: DefId, - r: DeferredCallResolutionHandler<'tcx>) { - let mut deferred_call_resolutions = self.inh.deferred_call_resolutions.borrow_mut(); + r: DeferredCallResolutionHandler<'gcx, 'tcx>) { + let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut(); deferred_call_resolutions.entry(closure_def_id).or_insert(vec![]).push(r); } fn remove_deferred_call_resolutions(&self, closure_def_id: DefId) - -> Vec> + -> Vec> { - let mut deferred_call_resolutions = self.inh.deferred_call_resolutions.borrow_mut(); + let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut(); deferred_call_resolutions.remove(&closure_def_id).unwrap_or(Vec::new()) } @@ -1267,13 +1638,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } pub fn local_ty(&self, span: Span, nid: ast::NodeId) -> Ty<'tcx> { - match self.inh.locals.borrow().get(&nid) { + match self.locals.borrow().get(&nid) { Some(&t) => t, None => { - span_err!(self.tcx().sess, span, E0513, - "no type for local variable {}", - nid); - self.tcx().types.err + struct_span_err!(self.tcx.sess, span, E0513, + "no type for local variable {}", + self.tcx.map.node_to_string(nid)) + .span_label(span, &"no type for variable") + .emit(); + self.tcx.types.err } } } @@ -1282,7 +1655,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { pub fn write_ty(&self, node_id: ast::NodeId, ty: Ty<'tcx>) { debug!("write_ty({}, {:?}) in fcx {}", node_id, ty, self.tag()); - self.inh.tables.borrow_mut().node_types.insert(node_id, ty); + self.tables.borrow_mut().node_types.insert(node_id, ty); + + if ty.references_error() { + self.has_errors.set(true); + } + + // FIXME(canndrew): This is_never should probably be an is_uninhabited + if ty.is_never() || self.type_var_diverges(ty) { + self.diverges.set(self.diverges.get() | Diverges::Always); + } } pub fn write_substs(&self, node_id: ast::NodeId, substs: ty::ItemSubsts<'tcx>) { @@ -1292,33 +1674,34 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { substs, self.tag()); - self.inh.tables.borrow_mut().item_substs.insert(node_id, substs); + self.tables.borrow_mut().item_substs.insert(node_id, substs); } } pub fn write_autoderef_adjustment(&self, node_id: ast::NodeId, - derefs: usize) { - self.write_adjustment( - node_id, - adjustment::AdjustDerefRef(adjustment::AutoDerefRef { + derefs: usize, + adjusted_ty: Ty<'tcx>) { + self.write_adjustment(node_id, adjustment::Adjustment { + kind: adjustment::Adjust::DerefRef { autoderefs: derefs, autoref: None, - unsize: None - }) - ); + unsize: false + }, + target: adjusted_ty + }); } pub fn write_adjustment(&self, node_id: ast::NodeId, - adj: adjustment::AutoAdjustment<'tcx>) { + adj: adjustment::Adjustment<'tcx>) { debug!("write_adjustment(node_id={}, adj={:?})", node_id, adj); if adj.is_identity() { return; } - self.inh.tables.borrow_mut().adjustments.insert(node_id, adj); + self.tables.borrow_mut().adjustments.insert(node_id, adj); } /// Basically whenever we are converting from a type scheme into @@ -1331,7 +1714,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { -> T where T : TypeFoldable<'tcx> { - let value = value.subst(self.tcx(), substs); + let value = value.subst(self.tcx, substs); let result = self.normalize_associated_types_in(span, &value); debug!("instantiate_type_scheme(value={:?}, substs={:?}) = {:?}", value, @@ -1342,17 +1725,55 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { /// As `instantiate_type_scheme`, but for the bounds found in a /// generic type scheme. - fn instantiate_bounds(&self, - span: Span, - substs: &Substs<'tcx>, - bounds: &ty::GenericPredicates<'tcx>) - -> ty::InstantiatedPredicates<'tcx> - { + fn instantiate_bounds(&self, span: Span, def_id: DefId, substs: &Substs<'tcx>) + -> ty::InstantiatedPredicates<'tcx> { + let bounds = self.tcx.item_predicates(def_id); + let result = bounds.instantiate(self.tcx, substs); + let result = self.normalize_associated_types_in(span, &result.predicates); + debug!("instantiate_bounds(bounds={:?}, substs={:?}) = {:?}", + bounds, + substs, + result); ty::InstantiatedPredicates { - predicates: self.instantiate_type_scheme(span, substs, &bounds.predicates) + predicates: result } } + /// Replace all anonymized types with fresh inference variables + /// and record them for writeback. + fn instantiate_anon_types>(&self, value: &T) -> T { + value.fold_with(&mut BottomUpFolder { tcx: self.tcx, fldop: |ty| { + if let ty::TyAnon(def_id, substs) = ty.sty { + // Use the same type variable if the exact same TyAnon appears more + // than once in the return type (e.g. if it's pased to a type alias). + if let Some(ty_var) = self.anon_types.borrow().get(&def_id) { + return ty_var; + } + let ty_var = self.next_ty_var(); + self.anon_types.borrow_mut().insert(def_id, ty_var); + + let item_predicates = self.tcx.item_predicates(def_id); + let bounds = item_predicates.instantiate(self.tcx, substs); + + let span = self.tcx.def_span(def_id); + for predicate in bounds.predicates { + // Change the predicate to refer to the type variable, + // which will be the concrete type, instead of the TyAnon. + // This also instantiates nested `impl Trait`. + let predicate = self.instantiate_anon_types(&predicate); + + // Require that the predicate holds for the concrete type. + let cause = traits::ObligationCause::new(span, self.body_id, + traits::ReturnType); + self.register_predicate(traits::Obligation::new(cause, predicate)); + } + + ty_var + } else { + ty + } + }}) + } fn normalize_associated_types_in(&self, span: Span, value: &T) -> T where T : TypeFoldable<'tcx> @@ -1369,11 +1790,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let cause = traits::ObligationCause::new(span, self.body_id, traits::ObligationCauseCode::MiscObligation); - self.inh - .infcx - .fulfillment_cx + self.fulfillment_cx .borrow_mut() - .normalize_projection_type(self.infcx(), + .normalize_projection_type(self, ty::ProjectionTy { trait_ref: trait_ref, item_name: item_name, @@ -1381,96 +1800,27 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { cause) } - /// Instantiates the type in `did` with the generics in `path` and returns - /// it (registering the necessary trait obligations along the way). - /// - /// Note that this function is only intended to be used with type-paths, - /// not with value-paths. - pub fn instantiate_type(&self, - did: DefId, - path: &hir::Path) - -> Ty<'tcx> - { - debug!("instantiate_type(did={:?}, path={:?})", did, path); - let type_scheme = - self.tcx().lookup_item_type(did); - let type_predicates = - self.tcx().lookup_predicates(did); - let substs = astconv::ast_path_substs_for_ty(self, self, - path.span, - PathParamMode::Optional, - &type_scheme.generics, - path.segments.last().unwrap()); - debug!("instantiate_type: ty={:?} substs={:?}", &type_scheme.ty, &substs); - let bounds = - self.instantiate_bounds(path.span, &substs, &type_predicates); - self.add_obligations_for_parameters( - traits::ObligationCause::new( - path.span, - self.body_id, - traits::ItemObligation(did)), - &bounds); - - self.instantiate_type_scheme(path.span, &substs, &type_scheme.ty) + pub fn write_nil(&self, node_id: ast::NodeId) { + self.write_ty(node_id, self.tcx.mk_nil()); } - /// Return the dict-like variant corresponding to a given `Def`. - pub fn def_struct_variant(&self, - def: def::Def, - span: Span) - -> Option<(ty::AdtDef<'tcx>, ty::VariantDef<'tcx>)> - { - let (adt, variant) = match def { - def::DefVariant(enum_id, variant_id, _) => { - let adt = self.tcx().lookup_adt_def(enum_id); - (adt, adt.variant_with_id(variant_id)) - } - def::DefTy(did, _) | def::DefStruct(did) => { - let typ = self.tcx().lookup_item_type(did); - if let ty::TyStruct(adt, _) = typ.ty.sty { - (adt, adt.struct_variant()) - } else { - return None; - } - } - _ => return None - }; - - let var_kind = variant.kind(); - if var_kind == ty::VariantKind::Struct { - Some((adt, variant)) - } else if var_kind == ty::VariantKind::Unit { - if !self.tcx().sess.features.borrow().braced_empty_structs { - let mut err = self.tcx().sess.struct_span_err(span, - "empty structs and enum variants \ - with braces are unstable"); - fileline_help!(&mut err, span, "add #![feature(braced_empty_structs)] to \ - the crate features to enable"); - err.emit(); - } - - Some((adt, variant)) - } else { - None - } + pub fn write_never(&self, node_id: ast::NodeId) { + self.write_ty(node_id, self.tcx.types.never); } - pub fn write_nil(&self, node_id: ast::NodeId) { - self.write_ty(node_id, self.tcx().mk_nil()); - } pub fn write_error(&self, node_id: ast::NodeId) { - self.write_ty(node_id, self.tcx().types.err); + self.write_ty(node_id, self.tcx.types.err); } pub fn require_type_meets(&self, ty: Ty<'tcx>, span: Span, code: traits::ObligationCauseCode<'tcx>, - bound: ty::BuiltinBound) + def_id: DefId) { - self.register_builtin_bound( + self.register_bound( ty, - bound, + def_id, traits::ObligationCause::new(span, self.body_id, code)); } @@ -1479,34 +1829,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { span: Span, code: traits::ObligationCauseCode<'tcx>) { - self.require_type_meets(ty, span, code, ty::BoundSized); - } - - pub fn require_expr_have_sized_type(&self, - expr: &hir::Expr, - code: traits::ObligationCauseCode<'tcx>) - { - self.require_type_is_sized(self.expr_ty(expr), expr.span, code); - } - - pub fn type_is_known_to_be_sized(&self, - ty: Ty<'tcx>, - span: Span) - -> bool - { - traits::type_known_to_meet_builtin_bound(self.infcx(), - ty, - ty::BoundSized, - span) + let lang_item = self.tcx.require_lang_item(lang_items::SizedTraitLangItem); + self.require_type_meets(ty, span, code, lang_item); } - pub fn register_builtin_bound(&self, + pub fn register_bound(&self, ty: Ty<'tcx>, - builtin_bound: ty::BuiltinBound, + def_id: DefId, cause: traits::ObligationCause<'tcx>) { - self.inh.infcx.fulfillment_cx.borrow_mut() - .register_builtin_bound(self.infcx(), ty, builtin_bound, cause); + self.fulfillment_cx.borrow_mut() + .register_bound(self, ty, def_id, cause); } pub fn register_predicate(&self, @@ -1514,51 +1847,38 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { { debug!("register_predicate({:?})", obligation); - self.inh.infcx.fulfillment_cx + self.fulfillment_cx .borrow_mut() - .register_predicate_obligation(self.infcx(), obligation); + .register_predicate_obligation(self, obligation); } - pub fn to_ty(&self, ast_t: &hir::Ty) -> Ty<'tcx> { - let t = ast_ty_to_ty(self, self, ast_t); - self.register_wf_obligation(t, ast_t.span, traits::MiscObligation); - t + pub fn register_predicates(&self, + obligations: Vec>) + { + for obligation in obligations { + self.register_predicate(obligation); + } } - pub fn expr_ty(&self, ex: &hir::Expr) -> Ty<'tcx> { - match self.inh.tables.borrow().node_types.get(&ex.id) { - Some(&t) => t, - None => { - self.tcx().sess.bug(&format!("no type for expr in fcx {}", - self.tag())); - } - } + pub fn register_infer_ok_obligations(&self, infer_ok: InferOk<'tcx, T>) -> T { + self.register_predicates(infer_ok.obligations); + infer_ok.value } - /// Apply `adjustment` to the type of `expr` - pub fn adjust_expr_ty(&self, - expr: &hir::Expr, - adjustment: Option<&adjustment::AutoAdjustment<'tcx>>) - -> Ty<'tcx> - { - let raw_ty = self.expr_ty(expr); - let raw_ty = self.infcx().shallow_resolve(raw_ty); - let resolve_ty = |ty: Ty<'tcx>| self.infcx().resolve_type_vars_if_possible(&ty); - raw_ty.adjust(self.tcx(), expr.span, expr.id, adjustment, |method_call| { - self.inh.tables.borrow().method_map.get(&method_call) - .map(|method| resolve_ty(method.ty)) - }) + pub fn to_ty(&self, ast_t: &hir::Ty) -> Ty<'tcx> { + let t = AstConv::ast_ty_to_ty(self, self, ast_t); + self.register_wf_obligation(t, ast_t.span, traits::MiscObligation); + t } pub fn node_ty(&self, id: ast::NodeId) -> Ty<'tcx> { - match self.inh.tables.borrow().node_types.get(&id) { + match self.tables.borrow().node_types.get(&id) { Some(&t) => t, - None if self.err_count_since_creation() != 0 => self.tcx().types.err, + None if self.err_count_since_creation() != 0 => self.tcx.types.err, None => { - self.tcx().sess.bug( - &format!("no type for node {}: {} in fcx {}", - id, self.tcx().map.node_to_string(id), - self.tag())); + bug!("no type for node {}: {} in fcx {}", + id, self.tcx.map.node_to_string(id), + self.tag()); } } } @@ -1571,7 +1891,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { &tables.item_substs } - Ref::map(self.inh.tables.borrow(), project_item_susbts) + Ref::map(self.tables.borrow(), project_item_susbts) } pub fn opt_node_ty_substs(&self, @@ -1579,74 +1899,19 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { f: F) where F: FnOnce(&ty::ItemSubsts<'tcx>), { - match self.inh.tables.borrow().item_substs.get(&id) { - Some(s) => { f(s) } - None => { } + if let Some(s) = self.tables.borrow().item_substs.get(&id) { + f(s); } } - pub fn mk_subty(&self, - a_is_expected: bool, - origin: TypeOrigin, - sub: Ty<'tcx>, - sup: Ty<'tcx>) - -> Result<(), TypeError<'tcx>> { - infer::mk_subty(self.infcx(), a_is_expected, origin, sub, sup) - } - - pub fn mk_eqty(&self, - a_is_expected: bool, - origin: TypeOrigin, - sub: Ty<'tcx>, - sup: Ty<'tcx>) - -> Result<(), TypeError<'tcx>> { - infer::mk_eqty(self.infcx(), a_is_expected, origin, sub, sup) - } - - pub fn mk_subr(&self, - origin: infer::SubregionOrigin<'tcx>, - sub: ty::Region, - sup: ty::Region) { - infer::mk_subr(self.infcx(), origin, sub, sup) - } - - pub fn type_error_message(&self, - sp: Span, - mk_msg: M, - actual_ty: Ty<'tcx>, - err: Option<&TypeError<'tcx>>) - where M: FnOnce(String) -> String, - { - self.infcx().type_error_message(sp, mk_msg, actual_ty, err); - } - - pub fn type_error_struct(&self, - sp: Span, - mk_msg: M, - actual_ty: Ty<'tcx>, - err: Option<&TypeError<'tcx>>) - -> DiagnosticBuilder<'tcx> - where M: FnOnce(String) -> String, - { - self.infcx().type_error_struct(sp, mk_msg, actual_ty, err) - } - - pub fn report_mismatched_types(&self, - sp: Span, - e: Ty<'tcx>, - a: Ty<'tcx>, - err: &TypeError<'tcx>) { - self.infcx().report_mismatched_types(sp, e, a, err) - } - /// Registers an obligation for checking later, during regionck, that the type `ty` must /// outlive the region `r`. pub fn register_region_obligation(&self, ty: Ty<'tcx>, - region: ty::Region, + region: &'tcx ty::Region, cause: traits::ObligationCause<'tcx>) { - let mut fulfillment_cx = self.inh.infcx.fulfillment_cx.borrow_mut(); + let mut fulfillment_cx = self.fulfillment_cx.borrow_mut(); fulfillment_cx.register_region_obligation(ty, region, cause); } @@ -1674,13 +1939,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // // FIXME(#27579) all uses of this should be migrated to register_wf_obligation eventually let cause = traits::ObligationCause::new(span, self.body_id, code); - self.register_region_obligation(ty, ty::ReEmpty, cause); + self.register_region_obligation(ty, self.tcx.mk_region(ty::ReEmpty), cause); } /// Registers obligations that all types appearing in `substs` are well-formed. pub fn add_wf_bounds(&self, substs: &Substs<'tcx>, expr: &hir::Expr) { - for &ty in &substs.types { + for ty in substs.types() { self.register_wf_obligation(ty, expr.span, traits::MiscObligation); } } @@ -1718,20 +1983,20 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } // FIXME(arielb1): use this instead of field.ty everywhere + // Only for fields! Returns for methods> + // Indifferent to privacy flags pub fn field_ty(&self, span: Span, - field: ty::FieldDef<'tcx>, + field: &'tcx ty::FieldDef, substs: &Substs<'tcx>) -> Ty<'tcx> { self.normalize_associated_types_in(span, - &field.ty(self.tcx(), substs)) + &field.ty(self.tcx, substs)) } - // Only for fields! Returns for methods> - // Indifferent to privacy flags fn check_casts(&self) { - let mut deferred_cast_checks = self.inh.deferred_cast_checks.borrow_mut(); + let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut(); for cast in deferred_cast_checks.drain(..) { cast.check(self); } @@ -1740,19 +2005,41 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { /// Apply "fallbacks" to some types /// ! gets replaced with (), unconstrained ints with i32, and unconstrained floats with f64. fn default_type_parameters(&self) { - use middle::ty::error::UnconstrainedNumeric::Neither; - use middle::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat}; - for ty in &self.infcx().unsolved_variables() { - let resolved = self.infcx().resolve_type_vars_if_possible(ty); - if self.infcx().type_var_diverges(resolved) { - demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().mk_nil()); + use rustc::ty::error::UnconstrainedNumeric::Neither; + use rustc::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat}; + + // Defaulting inference variables becomes very dubious if we have + // encountered type-checking errors. Therefore, if we think we saw + // some errors in this function, just resolve all uninstanted type + // varibles to TyError. + if self.is_tainted_by_errors() { + for ty in &self.unsolved_variables() { + if let ty::TyInfer(_) = self.shallow_resolve(ty).sty { + debug!("default_type_parameters: defaulting `{:?}` to error", ty); + self.demand_eqtype(syntax_pos::DUMMY_SP, *ty, self.tcx().types.err); + } + } + return; + } + + for ty in &self.unsolved_variables() { + let resolved = self.resolve_type_vars_if_possible(ty); + if self.type_var_diverges(resolved) { + debug!("default_type_parameters: defaulting `{:?}` to `!` because it diverges", + resolved); + self.demand_eqtype(syntax_pos::DUMMY_SP, *ty, + self.tcx.mk_diverging_default()); } else { - match self.infcx().type_is_unconstrained_numeric(resolved) { + match self.type_is_unconstrained_numeric(resolved) { UnconstrainedInt => { - demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.i32) + debug!("default_type_parameters: defaulting `{:?}` to `i32`", + resolved); + self.demand_eqtype(syntax_pos::DUMMY_SP, *ty, self.tcx.types.i32) }, UnconstrainedFloat => { - demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.f64) + debug!("default_type_parameters: defaulting `{:?}` to `f32`", + resolved); + self.demand_eqtype(syntax_pos::DUMMY_SP, *ty, self.tcx.types.f64) } Neither => { } } @@ -1761,7 +2048,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } fn select_all_obligations_and_apply_defaults(&self) { - if self.tcx().sess.features.borrow().default_type_parameter_fallback { + if self.tcx.sess.features.borrow().default_type_parameter_fallback { self.new_select_all_obligations_and_apply_defaults(); } else { self.old_select_all_obligations_and_apply_defaults(); @@ -1776,16 +2063,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } fn new_select_all_obligations_and_apply_defaults(&self) { - use middle::ty::error::UnconstrainedNumeric::Neither; - use middle::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat}; + use rustc::ty::error::UnconstrainedNumeric::Neither; + use rustc::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat}; // For the time being this errs on the side of being memory wasteful but provides better // error reporting. - // let type_variables = self.infcx().type_variables.clone(); + // let type_variables = self.type_variables.clone(); // There is a possibility that this algorithm will have to run an arbitrary number of times // to terminate so we bound it by the compiler's recursion limit. - for _ in 0..self.tcx().sess.recursion_limit.get() { + for _ in 0..self.tcx.sess.recursion_limit.get() { // First we try to solve all obligations, it is possible that the last iteration // has made it possible to make more progress. self.select_obligations_where_possible(); @@ -1793,31 +2080,32 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let mut conflicts = Vec::new(); // Collect all unsolved type, integral and floating point variables. - let unsolved_variables = self.inh.infcx.unsolved_variables(); + let unsolved_variables = self.unsolved_variables(); // We must collect the defaults *before* we do any unification. Because we have // directly attached defaults to the type variables any unification that occurs // will erase defaults causing conflicting defaults to be completely ignored. - let default_map: FnvHashMap<_, _> = + let default_map: FxHashMap<_, _> = unsolved_variables .iter() - .filter_map(|t| self.infcx().default(t).map(|d| (t, d))) + .filter_map(|t| self.default(t).map(|d| (t, d))) .collect(); - let mut unbound_tyvars = HashSet::new(); + let mut unbound_tyvars = FxHashSet(); debug!("select_all_obligations_and_apply_defaults: defaults={:?}", default_map); // We loop over the unsolved variables, resolving them and if they are - // and unconstrainted numberic type we add them to the set of unbound + // and unconstrainted numeric type we add them to the set of unbound // variables. We do this so we only apply literal fallback to type // variables without defaults. for ty in &unsolved_variables { - let resolved = self.infcx().resolve_type_vars_if_possible(ty); - if self.infcx().type_var_diverges(resolved) { - demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().mk_nil()); + let resolved = self.resolve_type_vars_if_possible(ty); + if self.type_var_diverges(resolved) { + self.demand_eqtype(syntax_pos::DUMMY_SP, *ty, + self.tcx.mk_diverging_default()); } else { - match self.infcx().type_is_unconstrained_numeric(resolved) { + match self.type_is_unconstrained_numeric(resolved) { UnconstrainedInt | UnconstrainedFloat => { unbound_tyvars.insert(resolved); }, @@ -1830,9 +2118,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // the type variable with a defined fallback. for ty in &unsolved_variables { if let Some(_default) = default_map.get(ty) { - let resolved = self.infcx().resolve_type_vars_if_possible(ty); + let resolved = self.resolve_type_vars_if_possible(ty); - debug!("select_all_obligations_and_apply_defaults: ty: {:?} with default: {:?}", + debug!("select_all_obligations_and_apply_defaults: \ + ty: {:?} with default: {:?}", ty, _default); match resolved.sty { @@ -1869,28 +2158,28 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // for conflicts and correctly report them. - let _ = self.infcx().commit_if_ok(|_: &infer::CombinedSnapshot| { + let _ = self.commit_if_ok(|_: &infer::CombinedSnapshot| { for ty in &unbound_tyvars { - if self.infcx().type_var_diverges(ty) { - demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().mk_nil()); + if self.type_var_diverges(ty) { + self.demand_eqtype(syntax_pos::DUMMY_SP, *ty, + self.tcx.mk_diverging_default()); } else { - match self.infcx().type_is_unconstrained_numeric(ty) { + match self.type_is_unconstrained_numeric(ty) { UnconstrainedInt => { - demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.i32) + self.demand_eqtype(syntax_pos::DUMMY_SP, *ty, self.tcx.types.i32) }, UnconstrainedFloat => { - demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.f64) + self.demand_eqtype(syntax_pos::DUMMY_SP, *ty, self.tcx.types.f64) } Neither => { if let Some(default) = default_map.get(ty) { let default = default.clone(); - match infer::mk_eqty(self.infcx(), false, - TypeOrigin::Misc(default.origin_span), - ty, default.ty) { - Ok(()) => {} - Err(_) => { - conflicts.push((*ty, default)); - } + match self.eq_types(false, + &self.misc(default.origin_span), + ty, + default.ty) { + Ok(ok) => self.register_infer_ok_obligations(ok), + Err(_) => conflicts.push((*ty, default)), } } } @@ -1913,9 +2202,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let conflicting_default = self.find_conflicting_default(&unbound_tyvars, &default_map, conflict) .unwrap_or(type_variable::Default { - ty: self.infcx().next_ty_var(), - origin_span: codemap::DUMMY_SP, - def_id: self.tcx().map.local_def_id(0) // what do I put here? + ty: self.next_ty_var(), + origin_span: syntax_pos::DUMMY_SP, + // what do I put here? + def_id: self.tcx.map.local_def_id(ast::CRATE_NODE_ID) }); // This is to ensure that we elimnate any non-determinism from the error @@ -1929,8 +2219,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { }; - self.infcx().report_conflicting_default_types( + self.report_conflicting_default_types( first_default.origin_span, + self.body_id, first_default, second_default) } @@ -1945,12 +2236,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // table then apply defaults until we find a conflict. That default must be the one // that caused conflict earlier. fn find_conflicting_default(&self, - unbound_vars: &HashSet>, - default_map: &FnvHashMap<&Ty<'tcx>, type_variable::Default<'tcx>>, + unbound_vars: &FxHashSet>, + default_map: &FxHashMap<&Ty<'tcx>, type_variable::Default<'tcx>>, conflict: Ty<'tcx>) -> Option> { - use middle::ty::error::UnconstrainedNumeric::Neither; - use middle::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat}; + use rustc::ty::error::UnconstrainedNumeric::Neither; + use rustc::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat}; // Ensure that we apply the conflicting default first let mut unbound_tyvars = Vec::with_capacity(unbound_vars.len() + 1); @@ -1964,23 +2255,25 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // We also run this inside snapshot that never commits so we can do error // reporting for more then one conflict. for ty in &unbound_tyvars { - if self.infcx().type_var_diverges(ty) { - demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().mk_nil()); + if self.type_var_diverges(ty) { + self.demand_eqtype(syntax_pos::DUMMY_SP, *ty, + self.tcx.mk_diverging_default()); } else { - match self.infcx().type_is_unconstrained_numeric(ty) { + match self.type_is_unconstrained_numeric(ty) { UnconstrainedInt => { - demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.i32) + self.demand_eqtype(syntax_pos::DUMMY_SP, *ty, self.tcx.types.i32) }, UnconstrainedFloat => { - demand::eqtype(self, codemap::DUMMY_SP, *ty, self.tcx().types.f64) + self.demand_eqtype(syntax_pos::DUMMY_SP, *ty, self.tcx.types.f64) }, Neither => { if let Some(default) = default_map.get(ty) { let default = default.clone(); - match infer::mk_eqty(self.infcx(), false, - TypeOrigin::Misc(default.origin_span), - ty, default.ty) { - Ok(()) => {} + match self.eq_types(false, + &self.misc(default.origin_span), + ty, + default.ty) { + Ok(ok) => self.register_infer_ok_obligations(ok), Err(_) => { result = Some(default); } @@ -1999,1048 +2292,859 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // upvar inference should have ensured that all deferred call // resolutions are handled by now. - assert!(self.inh.deferred_call_resolutions.borrow().is_empty()); + assert!(self.deferred_call_resolutions.borrow().is_empty()); self.select_all_obligations_and_apply_defaults(); - let mut fulfillment_cx = self.inh.infcx.fulfillment_cx.borrow_mut(); - match fulfillment_cx.select_all_or_error(self.infcx()) { + let mut fulfillment_cx = self.fulfillment_cx.borrow_mut(); + + // Steal the deferred obligations before the fulfillment + // context can turn all of them into errors. + let obligations = fulfillment_cx.take_deferred_obligations(); + self.deferred_obligations.borrow_mut().extend(obligations); + + match fulfillment_cx.select_all_or_error(self) { Ok(()) => { } - Err(errors) => { report_fulfillment_errors(self.infcx(), &errors); } + Err(errors) => { self.report_fulfillment_errors(&errors); } } } /// Select as many obligations as we can at present. fn select_obligations_where_possible(&self) { - match - self.inh.infcx.fulfillment_cx - .borrow_mut() - .select_where_possible(self.infcx()) - { + match self.fulfillment_cx.borrow_mut().select_where_possible(self) { Ok(()) => { } - Err(errors) => { report_fulfillment_errors(self.infcx(), &errors); } + Err(errors) => { self.report_fulfillment_errors(&errors); } } } -} - -impl<'a, 'tcx> RegionScope for FnCtxt<'a, 'tcx> { - fn object_lifetime_default(&self, span: Span) -> Option { - Some(self.base_object_lifetime_default(span)) - } - fn base_object_lifetime_default(&self, span: Span) -> ty::Region { - // RFC #599 specifies that object lifetime defaults take - // precedence over other defaults. But within a fn body we - // don't have a *default* region, rather we use inference to - // find the *correct* region, which is strictly more general - // (and anyway, within a fn body the right region may not even - // be something the user can write explicitly, since it might - // be some expression). - self.infcx().next_region_var(infer::MiscVariable(span)) - } + /// For the overloaded lvalue expressions (`*x`, `x[3]`), the trait + /// returns a type of `&T`, but the actual type we assign to the + /// *expression* is `T`. So this function just peels off the return + /// type by one layer to yield `T`. + fn make_overloaded_lvalue_return_type(&self, + method: MethodCallee<'tcx>) + -> ty::TypeAndMut<'tcx> + { + // extract method return type, which will be &T; + // all LB regions should have been instantiated during method lookup + let ret_ty = method.ty.fn_ret(); + let ret_ty = self.tcx.no_late_bound_regions(&ret_ty).unwrap(); + + // method returns &T, but the type as visible to user is T, so deref + ret_ty.builtin_deref(true, NoPreference).unwrap() + } + + fn lookup_indexing(&self, + expr: &hir::Expr, + base_expr: &'gcx hir::Expr, + base_ty: Ty<'tcx>, + idx_ty: Ty<'tcx>, + lvalue_pref: LvaluePreference) + -> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)> + { + // FIXME(#18741) -- this is almost but not quite the same as the + // autoderef that normal method probing does. They could likely be + // consolidated. + + let mut autoderef = self.autoderef(base_expr.span, base_ty); + + while let Some((adj_ty, autoderefs)) = autoderef.next() { + if let Some(final_mt) = self.try_index_step( + MethodCall::expr(expr.id), + expr, base_expr, adj_ty, autoderefs, + false, lvalue_pref, idx_ty) + { + autoderef.finalize(lvalue_pref, Some(base_expr)); + return Some(final_mt); + } - fn anon_regions(&self, span: Span, count: usize) - -> Result, Option>> { - Ok((0..count).map(|_| { - self.infcx().next_region_var(infer::MiscVariable(span)) - }).collect()) + if let ty::TyArray(element_ty, _) = adj_ty.sty { + autoderef.finalize(lvalue_pref, Some(base_expr)); + let adjusted_ty = self.tcx.mk_slice(element_ty); + return self.try_index_step( + MethodCall::expr(expr.id), expr, base_expr, + adjusted_ty, autoderefs, true, lvalue_pref, idx_ty); + } + } + autoderef.unambiguous_final_ty(); + None } -} - -/// Whether `autoderef` requires types to resolve. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum UnresolvedTypeAction { - /// Produce an error and return `TyError` whenever a type cannot - /// be resolved (i.e. it is `TyInfer`). - Error, - /// Go on without emitting any errors, and return the unresolved - /// type. Useful for probing, e.g. in coercions. - Ignore -} -/// Executes an autoderef loop for the type `t`. At each step, invokes `should_stop` to decide -/// whether to terminate the loop. Returns the final type and number of derefs that it performed. -/// -/// Note: this method does not modify the adjustments table. The caller is responsible for -/// inserting an AutoAdjustment record into the `fcx` using one of the suitable methods. -pub fn autoderef<'a, 'tcx, T, F>(fcx: &FnCtxt<'a, 'tcx>, - sp: Span, - base_ty: Ty<'tcx>, - opt_expr: Option<&hir::Expr>, - unresolved_type_action: UnresolvedTypeAction, - mut lvalue_pref: LvaluePreference, - mut should_stop: F) - -> (Ty<'tcx>, usize, Option) - where F: FnMut(Ty<'tcx>, usize) -> Option, -{ - debug!("autoderef(base_ty={:?}, opt_expr={:?}, lvalue_pref={:?})", - base_ty, - opt_expr, - lvalue_pref); - - let mut t = base_ty; - for autoderefs in 0..fcx.tcx().sess.recursion_limit.get() { - let resolved_t = match unresolved_type_action { - UnresolvedTypeAction::Error => { - structurally_resolved_type(fcx, sp, t) - } - UnresolvedTypeAction::Ignore => { - // We can continue even when the type cannot be resolved - // (i.e. it is an inference variable) because `Ty::builtin_deref` - // and `try_overloaded_deref` both simply return `None` - // in such a case without producing spurious errors. - fcx.resolve_type_vars_if_possible(t) + /// To type-check `base_expr[index_expr]`, we progressively autoderef + /// (and otherwise adjust) `base_expr`, looking for a type which either + /// supports builtin indexing or overloaded indexing. + /// This loop implements one step in that search; the autoderef loop + /// is implemented by `lookup_indexing`. + fn try_index_step(&self, + method_call: MethodCall, + expr: &hir::Expr, + base_expr: &'gcx hir::Expr, + adjusted_ty: Ty<'tcx>, + autoderefs: usize, + unsize: bool, + lvalue_pref: LvaluePreference, + index_ty: Ty<'tcx>) + -> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)> + { + let tcx = self.tcx; + debug!("try_index_step(expr={:?}, base_expr.id={:?}, adjusted_ty={:?}, \ + autoderefs={}, unsize={}, index_ty={:?})", + expr, + base_expr, + adjusted_ty, + autoderefs, + unsize, + index_ty); + + let input_ty = self.next_ty_var(); + + // First, try built-in indexing. + match (adjusted_ty.builtin_index(), &index_ty.sty) { + (Some(ty), &ty::TyUint(ast::UintTy::Us)) | (Some(ty), &ty::TyInfer(ty::IntVar(_))) => { + debug!("try_index_step: success, using built-in indexing"); + // If we had `[T; N]`, we should've caught it before unsizing to `[T]`. + assert!(!unsize); + self.write_autoderef_adjustment(base_expr.id, autoderefs, adjusted_ty); + return Some((tcx.types.usize, ty)); } - }; - if resolved_t.references_error() { - return (resolved_t, autoderefs, None); + _ => {} } - match should_stop(resolved_t, autoderefs) { - Some(x) => return (resolved_t, autoderefs, Some(x)), - None => {} - } + // Try `IndexMut` first, if preferred. + let method = match (lvalue_pref, tcx.lang_items.index_mut_trait()) { + (PreferMutLvalue, Some(trait_did)) => { + self.lookup_method_in_trait_adjusted(expr.span, + Some(&base_expr), + Symbol::intern("index_mut"), + trait_did, + autoderefs, + unsize, + adjusted_ty, + Some(vec![input_ty])) + } + _ => None, + }; - // Otherwise, deref if type is derefable: - let mt = match resolved_t.builtin_deref(false, lvalue_pref) { - Some(mt) => Some(mt), - None => { - let method_call = - opt_expr.map(|expr| MethodCall::autoderef(expr.id, autoderefs as u32)); - - // Super subtle: it might seem as though we should - // pass `opt_expr` to `try_overloaded_deref`, so that - // the (implicit) autoref of using an overloaded deref - // would get added to the adjustment table. However we - // do not do that, because it's kind of a - // "meta-adjustment" -- instead, we just leave it - // unrecorded and know that there "will be" an - // autoref. regionck and other bits of the code base, - // when they encounter an overloaded autoderef, have - // to do some reconstructive surgery. This is a pretty - // complex mess that is begging for a proper MIR. - try_overloaded_deref(fcx, sp, method_call, None, resolved_t, lvalue_pref) + // Otherwise, fall back to `Index`. + let method = match (method, tcx.lang_items.index_trait()) { + (None, Some(trait_did)) => { + self.lookup_method_in_trait_adjusted(expr.span, + Some(&base_expr), + Symbol::intern("index"), + trait_did, + autoderefs, + unsize, + adjusted_ty, + Some(vec![input_ty])) } + (method, _) => method, }; - match mt { - Some(mt) => { - t = mt.ty; - if mt.mutbl == hir::MutImmutable { - lvalue_pref = NoPreference; + + // If some lookup succeeds, write callee into table and extract index/element + // type from the method signature. + // If some lookup succeeded, install method in table + method.map(|method| { + debug!("try_index_step: success, using overloaded indexing"); + self.tables.borrow_mut().method_map.insert(method_call, method); + (input_ty, self.make_overloaded_lvalue_return_type(method).ty) + }) + } + + fn check_method_argument_types(&self, + sp: Span, + method_fn_ty: Ty<'tcx>, + callee_expr: &'gcx hir::Expr, + args_no_rcvr: &'gcx [hir::Expr], + tuple_arguments: TupleArgumentsFlag, + expected: Expectation<'tcx>) + -> Ty<'tcx> { + if method_fn_ty.references_error() { + let err_inputs = self.err_args(args_no_rcvr.len()); + + let err_inputs = match tuple_arguments { + DontTupleArguments => err_inputs, + TupleArguments => vec![self.tcx.intern_tup(&err_inputs[..])], + }; + + self.check_argument_types(sp, &err_inputs[..], &[], args_no_rcvr, + false, tuple_arguments); + self.tcx.types.err + } else { + match method_fn_ty.sty { + ty::TyFnDef(.., ref fty) => { + // HACK(eddyb) ignore self in the definition (see above). + let expected_arg_tys = self.expected_types_for_fn_args(sp, expected, + fty.sig.0.output, + &fty.sig.0.inputs[1..]); + self.check_argument_types(sp, &fty.sig.0.inputs[1..], &expected_arg_tys[..], + args_no_rcvr, fty.sig.0.variadic, tuple_arguments); + fty.sig.0.output + } + _ => { + span_bug!(callee_expr.span, "method without bare fn type"); } } - None => return (resolved_t, autoderefs, None) } } - // We've reached the recursion limit, error gracefully. - span_err!(fcx.tcx().sess, sp, E0055, - "reached the recursion limit while auto-dereferencing {:?}", - base_ty); - (fcx.tcx().types.err, 0, None) -} + /// Generic function that factors out common logic from function calls, + /// method calls and overloaded operators. + fn check_argument_types(&self, + sp: Span, + fn_inputs: &[Ty<'tcx>], + expected_arg_tys: &[Ty<'tcx>], + args: &'gcx [hir::Expr], + variadic: bool, + tuple_arguments: TupleArgumentsFlag) { + let tcx = self.tcx; + + // Grab the argument types, supplying fresh type variables + // if the wrong number of arguments were supplied + let supplied_arg_count = if tuple_arguments == DontTupleArguments { + args.len() + } else { + 1 + }; -fn try_overloaded_deref<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - span: Span, - method_call: Option, - base_expr: Option<&hir::Expr>, - base_ty: Ty<'tcx>, - lvalue_pref: LvaluePreference) - -> Option> -{ - // Try DerefMut first, if preferred. - let method = match (lvalue_pref, fcx.tcx().lang_items.deref_mut_trait()) { - (PreferMutLvalue, Some(trait_did)) => { - method::lookup_in_trait(fcx, span, base_expr, - token::intern("deref_mut"), trait_did, - base_ty, None) + // All the input types from the fn signature must outlive the call + // so as to validate implied bounds. + for &fn_input_ty in fn_inputs { + self.register_wf_obligation(fn_input_ty, sp, traits::MiscObligation); } - _ => None - }; - // Otherwise, fall back to Deref. - let method = match (method, fcx.tcx().lang_items.deref_trait()) { - (None, Some(trait_did)) => { - method::lookup_in_trait(fcx, span, base_expr, - token::intern("deref"), trait_did, - base_ty, None) - } - (method, _) => method - }; + let mut expected_arg_tys = expected_arg_tys; + let expected_arg_count = fn_inputs.len(); - make_overloaded_lvalue_return_type(fcx, method_call, method) -} - -/// For the overloaded lvalue expressions (`*x`, `x[3]`), the trait returns a type of `&T`, but the -/// actual type we assign to the *expression* is `T`. So this function just peels off the return -/// type by one layer to yield `T`. It also inserts the `method-callee` into the method map. -fn make_overloaded_lvalue_return_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - method_call: Option, - method: Option>) - -> Option> -{ - match method { - Some(method) => { - // extract method return type, which will be &T; - // all LB regions should have been instantiated during method lookup - let ret_ty = method.ty.fn_ret(); - let ret_ty = fcx.tcx().no_late_bound_regions(&ret_ty).unwrap().unwrap(); - - if let Some(method_call) = method_call { - fcx.inh.tables.borrow_mut().method_map.insert(method_call, method); - } - - // method returns &T, but the type as visible to user is T, so deref - ret_ty.builtin_deref(true, NoPreference) - } - None => None, - } -} - -fn lookup_indexing<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - expr: &hir::Expr, - base_expr: &'tcx hir::Expr, - base_ty: Ty<'tcx>, - idx_ty: Ty<'tcx>, - lvalue_pref: LvaluePreference) - -> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)> -{ - // FIXME(#18741) -- this is almost but not quite the same as the - // autoderef that normal method probing does. They could likely be - // consolidated. - - let (ty, autoderefs, final_mt) = autoderef(fcx, - base_expr.span, - base_ty, - Some(base_expr), - UnresolvedTypeAction::Error, - lvalue_pref, - |adj_ty, idx| { - try_index_step(fcx, MethodCall::expr(expr.id), expr, base_expr, - adj_ty, idx, false, lvalue_pref, idx_ty) - }); - - if final_mt.is_some() { - return final_mt; - } - - // After we have fully autoderef'd, if the resulting type is [T; n], then - // do a final unsized coercion to yield [T]. - if let ty::TyArray(element_ty, _) = ty.sty { - let adjusted_ty = fcx.tcx().mk_slice(element_ty); - try_index_step(fcx, MethodCall::expr(expr.id), expr, base_expr, - adjusted_ty, autoderefs, true, lvalue_pref, idx_ty) - } else { - None - } -} - -/// To type-check `base_expr[index_expr]`, we progressively autoderef (and otherwise adjust) -/// `base_expr`, looking for a type which either supports builtin indexing or overloaded indexing. -/// This loop implements one step in that search; the autoderef loop is implemented by -/// `lookup_indexing`. -fn try_index_step<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - method_call: MethodCall, - expr: &hir::Expr, - base_expr: &'tcx hir::Expr, - adjusted_ty: Ty<'tcx>, - autoderefs: usize, - unsize: bool, - lvalue_pref: LvaluePreference, - index_ty: Ty<'tcx>) - -> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)> -{ - let tcx = fcx.tcx(); - debug!("try_index_step(expr={:?}, base_expr.id={:?}, adjusted_ty={:?}, \ - autoderefs={}, unsize={}, index_ty={:?})", - expr, - base_expr, - adjusted_ty, - autoderefs, - unsize, - index_ty); - - let input_ty = fcx.infcx().next_ty_var(); - - // First, try built-in indexing. - match (adjusted_ty.builtin_index(), &index_ty.sty) { - (Some(ty), &ty::TyUint(ast::TyUs)) | (Some(ty), &ty::TyInfer(ty::IntVar(_))) => { - debug!("try_index_step: success, using built-in indexing"); - // If we had `[T; N]`, we should've caught it before unsizing to `[T]`. - assert!(!unsize); - fcx.write_autoderef_adjustment(base_expr.id, autoderefs); - return Some((tcx.types.usize, ty)); - } - _ => {} - } - - // Try `IndexMut` first, if preferred. - let method = match (lvalue_pref, tcx.lang_items.index_mut_trait()) { - (PreferMutLvalue, Some(trait_did)) => { - method::lookup_in_trait_adjusted(fcx, - expr.span, - Some(&*base_expr), - token::intern("index_mut"), - trait_did, - autoderefs, - unsize, - adjusted_ty, - Some(vec![input_ty])) - } - _ => None, - }; - - // Otherwise, fall back to `Index`. - let method = match (method, tcx.lang_items.index_trait()) { - (None, Some(trait_did)) => { - method::lookup_in_trait_adjusted(fcx, - expr.span, - Some(&*base_expr), - token::intern("index"), - trait_did, - autoderefs, - unsize, - adjusted_ty, - Some(vec![input_ty])) - } - (method, _) => method, - }; - - // If some lookup succeeds, write callee into table and extract index/element - // type from the method signature. - // If some lookup succeeded, install method in table - method.and_then(|method| { - debug!("try_index_step: success, using overloaded indexing"); - make_overloaded_lvalue_return_type(fcx, Some(method_call), Some(method)). - map(|ret| (input_ty, ret.ty)) - }) -} - -fn check_method_argument_types<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - sp: Span, - method_fn_ty: Ty<'tcx>, - callee_expr: &'tcx hir::Expr, - args_no_rcvr: &'tcx [P], - tuple_arguments: TupleArgumentsFlag, - expected: Expectation<'tcx>) - -> ty::FnOutput<'tcx> { - if method_fn_ty.references_error() { - let err_inputs = err_args(fcx.tcx(), args_no_rcvr.len()); - - let err_inputs = match tuple_arguments { - DontTupleArguments => err_inputs, - TupleArguments => vec![fcx.tcx().mk_tup(err_inputs)], + let sp_args = if args.len() > 0 { + let (first, args) = args.split_at(1); + let mut sp_tmp = first[0].span; + for arg in args { + let sp_opt = self.sess().codemap().merge_spans(sp_tmp, arg.span); + if ! sp_opt.is_some() { + break; + } + sp_tmp = sp_opt.unwrap(); + }; + sp_tmp + } else { + sp }; - check_argument_types(fcx, - sp, - &err_inputs[..], - &[], - args_no_rcvr, - false, - tuple_arguments); - ty::FnConverging(fcx.tcx().types.err) - } else { - match method_fn_ty.sty { - ty::TyBareFn(_, ref fty) => { - // HACK(eddyb) ignore self in the definition (see above). - let expected_arg_tys = expected_types_for_fn_args(fcx, - sp, - expected, - fty.sig.0.output, - &fty.sig.0.inputs[1..]); - check_argument_types(fcx, - sp, - &fty.sig.0.inputs[1..], - &expected_arg_tys[..], - args_no_rcvr, - fty.sig.0.variadic, - tuple_arguments); - fty.sig.0.output - } - _ => { - fcx.tcx().sess.span_bug(callee_expr.span, - "method without bare fn type"); + fn parameter_count_error<'tcx>(sess: &Session, sp: Span, fn_inputs: &[Ty<'tcx>], + expected_count: usize, arg_count: usize, error_code: &str, + variadic: bool) { + let mut err = sess.struct_span_err_with_code(sp, + &format!("this function takes {}{} parameter{} but {} parameter{} supplied", + if variadic {"at least "} else {""}, + expected_count, + if expected_count == 1 {""} else {"s"}, + arg_count, + if arg_count == 1 {" was"} else {"s were"}), + error_code); + + let input_types = fn_inputs.iter().map(|i| format!("{:?}", i)).collect::>(); + if input_types.len() > 1 { + err.note("the following parameter types were expected:"); + err.note(&input_types.join(", ")); + } else if input_types.len() > 0 { + err.note(&format!("the following parameter type was expected: {}", + input_types[0])); + } else { + err.span_label(sp, &format!("expected {}{} parameter{}", + if variadic {"at least "} else {""}, + expected_count, + if expected_count == 1 {""} else {"s"})); } + err.emit(); } - } -} - -/// Generic function that factors out common logic from function calls, method calls and overloaded -/// operators. -fn check_argument_types<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - sp: Span, - fn_inputs: &[Ty<'tcx>], - expected_arg_tys: &[Ty<'tcx>], - args: &'tcx [P], - variadic: bool, - tuple_arguments: TupleArgumentsFlag) { - let tcx = fcx.ccx.tcx; - - // Grab the argument types, supplying fresh type variables - // if the wrong number of arguments were supplied - let supplied_arg_count = if tuple_arguments == DontTupleArguments { - args.len() - } else { - 1 - }; - // All the input types from the fn signature must outlive the call - // so as to validate implied bounds. - for &fn_input_ty in fn_inputs { - fcx.register_wf_obligation(fn_input_ty, sp, traits::MiscObligation); - } - - let mut expected_arg_tys = expected_arg_tys; - let expected_arg_count = fn_inputs.len(); - let formal_tys = if tuple_arguments == TupleArguments { - let tuple_type = structurally_resolved_type(fcx, sp, fn_inputs[0]); - match tuple_type.sty { - ty::TyTuple(ref arg_types) => { - if arg_types.len() != args.len() { - span_err!(tcx.sess, sp, E0057, - "this function takes {} parameter{} but {} parameter{} supplied", - arg_types.len(), - if arg_types.len() == 1 {""} else {"s"}, - args.len(), - if args.len() == 1 {" was"} else {"s were"}); + let formal_tys = if tuple_arguments == TupleArguments { + let tuple_type = self.structurally_resolved_type(sp, fn_inputs[0]); + match tuple_type.sty { + ty::TyTuple(arg_types) if arg_types.len() != args.len() => { + parameter_count_error(tcx.sess, sp_args, fn_inputs, arg_types.len(), args.len(), + "E0057", false); expected_arg_tys = &[]; - err_args(fcx.tcx(), args.len()) - } else { + self.err_args(args.len()) + } + ty::TyTuple(arg_types) => { expected_arg_tys = match expected_arg_tys.get(0) { Some(&ty) => match ty.sty { - ty::TyTuple(ref tys) => &**tys, + ty::TyTuple(ref tys) => &tys, _ => &[] }, None => &[] }; - (*arg_types).clone() + arg_types.to_vec() + } + _ => { + span_err!(tcx.sess, sp, E0059, + "cannot use call notation; the first type parameter \ + for the function trait is neither a tuple nor unit"); + expected_arg_tys = &[]; + self.err_args(args.len()) } } - _ => { - span_err!(tcx.sess, sp, E0059, - "cannot use call notation; the first type parameter \ - for the function trait is neither a tuple nor unit"); + } else if expected_arg_count == supplied_arg_count { + fn_inputs.to_vec() + } else if variadic { + if supplied_arg_count >= expected_arg_count { + fn_inputs.to_vec() + } else { + parameter_count_error(tcx.sess, sp_args, fn_inputs, expected_arg_count, + supplied_arg_count, "E0060", true); expected_arg_tys = &[]; - err_args(fcx.tcx(), args.len()) + self.err_args(supplied_arg_count) } - } - } else if expected_arg_count == supplied_arg_count { - fn_inputs.to_vec() - } else if variadic { - if supplied_arg_count >= expected_arg_count { - fn_inputs.to_vec() } else { - span_err!(tcx.sess, sp, E0060, - "this function takes at least {} parameter{} \ - but {} parameter{} supplied", - expected_arg_count, - if expected_arg_count == 1 {""} else {"s"}, - supplied_arg_count, - if supplied_arg_count == 1 {" was"} else {"s were"}); + parameter_count_error(tcx.sess, sp_args, fn_inputs, expected_arg_count, + supplied_arg_count, "E0061", false); expected_arg_tys = &[]; - err_args(fcx.tcx(), supplied_arg_count) - } - } else { - span_err!(tcx.sess, sp, E0061, - "this function takes {} parameter{} but {} parameter{} supplied", - expected_arg_count, - if expected_arg_count == 1 {""} else {"s"}, - supplied_arg_count, - if supplied_arg_count == 1 {" was"} else {"s were"}); - expected_arg_tys = &[]; - err_args(fcx.tcx(), supplied_arg_count) - }; - - debug!("check_argument_types: formal_tys={:?}", - formal_tys.iter().map(|t| fcx.infcx().ty_to_string(*t)).collect::>()); - - // Check the arguments. - // We do this in a pretty awful way: first we typecheck any arguments - // that are not anonymous functions, then we typecheck the anonymous - // functions. This is so that we have more information about the types - // of arguments when we typecheck the functions. This isn't really the - // right way to do this. - let xs = [false, true]; - let mut any_diverges = false; // has any of the arguments diverged? - let mut warned = false; // have we already warned about unreachable code? - for check_blocks in &xs { - let check_blocks = *check_blocks; - debug!("check_blocks={}", check_blocks); - - // More awful hacks: before we check argument types, try to do - // an "opportunistic" vtable resolution of any trait bounds on - // the call. This helps coercions. - if check_blocks { - fcx.select_obligations_where_possible(); - } - - // For variadic functions, we don't have a declared type for all of - // the arguments hence we only do our usual type checking with - // the arguments who's types we do know. - let t = if variadic { - expected_arg_count - } else if tuple_arguments == TupleArguments { - args.len() - } else { - supplied_arg_count + self.err_args(supplied_arg_count) }; - for (i, arg) in args.iter().take(t).enumerate() { - if any_diverges && !warned { - fcx.ccx - .tcx - .sess - .add_lint(lint::builtin::UNREACHABLE_CODE, - arg.id, - arg.span, - "unreachable expression".to_string()); - warned = true; + + debug!("check_argument_types: formal_tys={:?}", + formal_tys.iter().map(|t| self.ty_to_string(*t)).collect::>()); + + // Check the arguments. + // We do this in a pretty awful way: first we typecheck any arguments + // that are not closures, then we typecheck the closures. This is so + // that we have more information about the types of arguments when we + // typecheck the functions. This isn't really the right way to do this. + for &check_closures in &[false, true] { + debug!("check_closures={}", check_closures); + + // More awful hacks: before we check argument types, try to do + // an "opportunistic" vtable resolution of any trait bounds on + // the call. This helps coercions. + if check_closures { + self.select_obligations_where_possible(); } - let is_block = match arg.node { - hir::ExprClosure(..) => true, - _ => false + + // For variadic functions, we don't have a declared type for all of + // the arguments hence we only do our usual type checking with + // the arguments who's types we do know. + let t = if variadic { + expected_arg_count + } else if tuple_arguments == TupleArguments { + args.len() + } else { + supplied_arg_count }; + for (i, arg) in args.iter().take(t).enumerate() { + // Warn only for the first loop (the "no closures" one). + // Closure arguments themselves can't be diverging, but + // a previous argument can, e.g. `foo(panic!(), || {})`. + if !check_closures { + self.warn_if_unreachable(arg.id, arg.span, "expression"); + } + + let is_closure = match arg.node { + hir::ExprClosure(..) => true, + _ => false + }; + + if is_closure != check_closures { + continue; + } - if is_block == check_blocks { debug!("checking the argument"); let formal_ty = formal_tys[i]; // The special-cased logic below has three functions: // 1. Provide as good of an expected type as possible. let expected = expected_arg_tys.get(i).map(|&ty| { - Expectation::rvalue_hint(fcx.tcx(), ty) + Expectation::rvalue_hint(self, ty) }); - check_expr_with_unifier(fcx, - &**arg, - expected.unwrap_or(ExpectHasType(formal_ty)), - NoPreference, || { - // 2. Coerce to the most detailed type that could be coerced - // to, which is `expected_ty` if `rvalue_hint` returns an - // `ExprHasType(expected_ty)`, or the `formal_ty` otherwise. - let coerce_ty = expected.and_then(|e| e.only_has_type(fcx)); - demand::coerce(fcx, arg.span, coerce_ty.unwrap_or(formal_ty), &**arg); - - // 3. Relate the expected type and the formal one, - // if the expected type was used for the coercion. - coerce_ty.map(|ty| demand::suptype(fcx, arg.span, formal_ty, ty)); - }); - } - - if let Some(&arg_ty) = fcx.inh.tables.borrow().node_types.get(&arg.id) { - any_diverges = any_diverges || fcx.infcx().type_var_diverges(arg_ty); + let checked_ty = self.check_expr_with_expectation(&arg, + expected.unwrap_or(ExpectHasType(formal_ty))); + // 2. Coerce to the most detailed type that could be coerced + // to, which is `expected_ty` if `rvalue_hint` returns an + // `ExpectHasType(expected_ty)`, or the `formal_ty` otherwise. + let coerce_ty = expected.and_then(|e| e.only_has_type(self)); + self.demand_coerce(&arg, checked_ty, coerce_ty.unwrap_or(formal_ty)); + + // 3. Relate the expected type and the formal one, + // if the expected type was used for the coercion. + coerce_ty.map(|ty| self.demand_suptype(arg.span, formal_ty, ty)); } } - if any_diverges && !warned { - let parent = fcx.ccx.tcx.map.get_parent_node(args[0].id); - fcx.ccx - .tcx - .sess - .add_lint(lint::builtin::UNREACHABLE_CODE, - parent, - sp, - "unreachable call".to_string()); - warned = true; - } - } - - // We also need to make sure we at least write the ty of the other - // arguments which we skipped above. - if variadic { - for arg in args.iter().skip(expected_arg_count) { - check_expr(fcx, &**arg); - - // There are a few types which get autopromoted when passed via varargs - // in C but we just error out instead and require explicit casts. - let arg_ty = structurally_resolved_type(fcx, arg.span, - fcx.expr_ty(&**arg)); - match arg_ty.sty { - ty::TyFloat(ast::TyF32) => { - fcx.type_error_message(arg.span, - |t| { - format!("can't pass an {} to variadic \ - function, cast to c_double", t) - }, arg_ty, None); - } - ty::TyInt(ast::TyI8) | ty::TyInt(ast::TyI16) | ty::TyBool => { - fcx.type_error_message(arg.span, |t| { - format!("can't pass {} to variadic \ - function, cast to c_int", - t) - }, arg_ty, None); - } - ty::TyUint(ast::TyU8) | ty::TyUint(ast::TyU16) => { - fcx.type_error_message(arg.span, |t| { - format!("can't pass {} to variadic \ - function, cast to c_uint", - t) - }, arg_ty, None); + // We also need to make sure we at least write the ty of the other + // arguments which we skipped above. + if variadic { + for arg in args.iter().skip(expected_arg_count) { + let arg_ty = self.check_expr(&arg); + + // There are a few types which get autopromoted when passed via varargs + // in C but we just error out instead and require explicit casts. + let arg_ty = self.structurally_resolved_type(arg.span, + arg_ty); + match arg_ty.sty { + ty::TyFloat(ast::FloatTy::F32) => { + self.type_error_message(arg.span, |t| { + format!("can't pass an `{}` to variadic \ + function, cast to `c_double`", t) + }, arg_ty); + } + ty::TyInt(ast::IntTy::I8) | ty::TyInt(ast::IntTy::I16) | ty::TyBool => { + self.type_error_message(arg.span, |t| { + format!("can't pass `{}` to variadic \ + function, cast to `c_int`", + t) + }, arg_ty); + } + ty::TyUint(ast::UintTy::U8) | ty::TyUint(ast::UintTy::U16) => { + self.type_error_message(arg.span, |t| { + format!("can't pass `{}` to variadic \ + function, cast to `c_uint`", + t) + }, arg_ty); + } + ty::TyFnDef(.., f) => { + let ptr_ty = self.tcx.mk_fn_ptr(f); + let ptr_ty = self.resolve_type_vars_if_possible(&ptr_ty); + self.type_error_message(arg.span, + |t| { + format!("can't pass `{}` to variadic \ + function, cast to `{}`", t, ptr_ty) + }, arg_ty); + } + _ => {} } - _ => {} } } } -} - -// FIXME(#17596) Ty<'tcx> is incorrectly invariant w.r.t 'tcx. -fn err_args<'tcx>(tcx: &ty::ctxt<'tcx>, len: usize) -> Vec> { - (0..len).map(|_| tcx.types.err).collect() -} -fn write_call<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - call_expr: &hir::Expr, - output: ty::FnOutput<'tcx>) { - fcx.write_ty(call_expr.id, match output { - ty::FnConverging(output_ty) => output_ty, - ty::FnDiverging => fcx.infcx().next_diverging_ty_var() - }); -} + fn err_args(&self, len: usize) -> Vec> { + (0..len).map(|_| self.tcx.types.err).collect() + } -// AST fragment checking -fn check_lit<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - lit: &ast::Lit, - expected: Expectation<'tcx>) - -> Ty<'tcx> -{ - let tcx = fcx.ccx.tcx; + // AST fragment checking + fn check_lit(&self, + lit: &ast::Lit, + expected: Expectation<'tcx>) + -> Ty<'tcx> + { + let tcx = self.tcx; - match lit.node { - ast::LitStr(..) => tcx.mk_static_str(), - ast::LitByteStr(ref v) => { - tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), - tcx.mk_array(tcx.types.u8, v.len())) - } - ast::LitByte(_) => tcx.types.u8, - ast::LitChar(_) => tcx.types.char, - ast::LitInt(_, ast::SignedIntLit(t, _)) => tcx.mk_mach_int(t), - ast::LitInt(_, ast::UnsignedIntLit(t)) => tcx.mk_mach_uint(t), - ast::LitInt(_, ast::UnsuffixedIntLit(_)) => { - let opt_ty = expected.to_option(fcx).and_then(|ty| { - match ty.sty { - ty::TyInt(_) | ty::TyUint(_) => Some(ty), - ty::TyChar => Some(tcx.types.u8), - ty::TyRawPtr(..) => Some(tcx.types.usize), - ty::TyBareFn(..) => Some(tcx.types.usize), - _ => None - } - }); - opt_ty.unwrap_or_else( - || tcx.mk_int_var(fcx.infcx().next_int_var_id())) - } - ast::LitFloat(_, t) => tcx.mk_mach_float(t), - ast::LitFloatUnsuffixed(_) => { - let opt_ty = expected.to_option(fcx).and_then(|ty| { - match ty.sty { - ty::TyFloat(_) => Some(ty), - _ => None - } - }); - opt_ty.unwrap_or_else( - || tcx.mk_float_var(fcx.infcx().next_float_var_id())) + match lit.node { + ast::LitKind::Str(..) => tcx.mk_static_str(), + ast::LitKind::ByteStr(ref v) => { + tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), + tcx.mk_array(tcx.types.u8, v.len())) + } + ast::LitKind::Byte(_) => tcx.types.u8, + ast::LitKind::Char(_) => tcx.types.char, + ast::LitKind::Int(_, ast::LitIntType::Signed(t)) => tcx.mk_mach_int(t), + ast::LitKind::Int(_, ast::LitIntType::Unsigned(t)) => tcx.mk_mach_uint(t), + ast::LitKind::Int(_, ast::LitIntType::Unsuffixed) => { + let opt_ty = expected.to_option(self).and_then(|ty| { + match ty.sty { + ty::TyInt(_) | ty::TyUint(_) => Some(ty), + ty::TyChar => Some(tcx.types.u8), + ty::TyRawPtr(..) => Some(tcx.types.usize), + ty::TyFnDef(..) | ty::TyFnPtr(_) => Some(tcx.types.usize), + _ => None + } + }); + opt_ty.unwrap_or_else( + || tcx.mk_int_var(self.next_int_var_id())) + } + ast::LitKind::Float(_, t) => tcx.mk_mach_float(t), + ast::LitKind::FloatUnsuffixed(_) => { + let opt_ty = expected.to_option(self).and_then(|ty| { + match ty.sty { + ty::TyFloat(_) => Some(ty), + _ => None + } + }); + opt_ty.unwrap_or_else( + || tcx.mk_float_var(self.next_float_var_id())) + } + ast::LitKind::Bool(_) => tcx.types.bool } - ast::LitBool(_) => tcx.types.bool } -} -fn check_expr_eq_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - expr: &'tcx hir::Expr, - expected: Ty<'tcx>) { - check_expr_with_unifier( - fcx, expr, ExpectHasType(expected), NoPreference, - || demand::eqtype(fcx, expr.span, expected, fcx.expr_ty(expr))); -} + fn check_expr_eq_type(&self, + expr: &'gcx hir::Expr, + expected: Ty<'tcx>) { + let ty = self.check_expr_with_hint(expr, expected); + self.demand_eqtype(expr.span, expected, ty); + } -pub fn check_expr_has_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - expr: &'tcx hir::Expr, - expected: Ty<'tcx>) { - check_expr_with_unifier( - fcx, expr, ExpectHasType(expected), NoPreference, - || demand::suptype(fcx, expr.span, expected, fcx.expr_ty(expr))); -} + pub fn check_expr_has_type(&self, + expr: &'gcx hir::Expr, + expected: Ty<'tcx>) -> Ty<'tcx> { + let ty = self.check_expr_with_hint(expr, expected); + self.demand_suptype(expr.span, expected, ty); + ty + } -fn check_expr_coercable_to_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - expr: &'tcx hir::Expr, - expected: Ty<'tcx>) { - check_expr_with_unifier( - fcx, expr, ExpectHasType(expected), NoPreference, - || demand::coerce(fcx, expr.span, expected, expr)); -} + fn check_expr_coercable_to_type(&self, + expr: &'gcx hir::Expr, + expected: Ty<'tcx>) -> Ty<'tcx> { + let ty = self.check_expr_with_hint(expr, expected); + self.demand_coerce(expr, ty, expected); + ty + } -fn check_expr_with_hint<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, expr: &'tcx hir::Expr, - expected: Ty<'tcx>) { - check_expr_with_unifier( - fcx, expr, ExpectHasType(expected), NoPreference, - || ()) -} + fn check_expr_with_hint(&self, expr: &'gcx hir::Expr, + expected: Ty<'tcx>) -> Ty<'tcx> { + self.check_expr_with_expectation(expr, ExpectHasType(expected)) + } -fn check_expr_with_expectation<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - expr: &'tcx hir::Expr, - expected: Expectation<'tcx>) { - check_expr_with_unifier( - fcx, expr, expected, NoPreference, - || ()) -} + fn check_expr_with_expectation(&self, + expr: &'gcx hir::Expr, + expected: Expectation<'tcx>) -> Ty<'tcx> { + self.check_expr_with_expectation_and_lvalue_pref(expr, expected, NoPreference) + } -fn check_expr_with_expectation_and_lvalue_pref<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - expr: &'tcx hir::Expr, - expected: Expectation<'tcx>, - lvalue_pref: LvaluePreference) -{ - check_expr_with_unifier(fcx, expr, expected, lvalue_pref, || ()) -} + fn check_expr(&self, expr: &'gcx hir::Expr) -> Ty<'tcx> { + self.check_expr_with_expectation(expr, NoExpectation) + } -fn check_expr<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, expr: &'tcx hir::Expr) { - check_expr_with_unifier(fcx, expr, NoExpectation, NoPreference, || ()) -} + fn check_expr_with_lvalue_pref(&self, expr: &'gcx hir::Expr, + lvalue_pref: LvaluePreference) -> Ty<'tcx> { + self.check_expr_with_expectation_and_lvalue_pref(expr, NoExpectation, lvalue_pref) + } -fn check_expr_with_lvalue_pref<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, expr: &'tcx hir::Expr, - lvalue_pref: LvaluePreference) { - check_expr_with_unifier(fcx, expr, NoExpectation, lvalue_pref, || ()) -} + // determine the `self` type, using fresh variables for all variables + // declared on the impl declaration e.g., `impl for Vec<(A,B)>` + // would return ($0, $1) where $0 and $1 are freshly instantiated type + // variables. + pub fn impl_self_ty(&self, + span: Span, // (potential) receiver for this impl + did: DefId) + -> TypeAndSubsts<'tcx> { + let ity = self.tcx.item_type(did); + debug!("impl_self_ty: ity={:?}", ity); -// determine the `self` type, using fresh variables for all variables -// declared on the impl declaration e.g., `impl for Vec<(A,B)>` -// would return ($0, $1) where $0 and $1 are freshly instantiated type -// variables. -pub fn impl_self_ty<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - span: Span, // (potential) receiver for this impl - did: DefId) - -> TypeAndSubsts<'tcx> { - let tcx = fcx.tcx(); - - let ity = tcx.lookup_item_type(did); - let (tps, rps, raw_ty) = - (ity.generics.types.get_slice(subst::TypeSpace), - ity.generics.regions.get_slice(subst::TypeSpace), - ity.ty); - - debug!("impl_self_ty: tps={:?} rps={:?} raw_ty={:?}", tps, rps, raw_ty); - - let rps = fcx.inh.infcx.region_vars_for_defs(span, rps); - let mut substs = subst::Substs::new( - VecPerParamSpace::empty(), - VecPerParamSpace::new(rps, Vec::new(), Vec::new())); - fcx.inh.infcx.type_vars_for_defs(span, ParamSpace::TypeSpace, &mut substs, tps); - let substd_ty = fcx.instantiate_type_scheme(span, &substs, &raw_ty); - - TypeAndSubsts { substs: substs, ty: substd_ty } -} + let substs = self.fresh_substs_for_item(span, did); + let substd_ty = self.instantiate_type_scheme(span, &substs, &ity); -/// Controls whether the arguments are tupled. This is used for the call -/// operator. -/// -/// Tupling means that all call-side arguments are packed into a tuple and -/// passed as a single parameter. For example, if tupling is enabled, this -/// function: -/// -/// fn f(x: (isize, isize)) -/// -/// Can be called as: -/// -/// f(1, 2); -/// -/// Instead of: -/// -/// f((1, 2)); -#[derive(Clone, Eq, PartialEq)] -enum TupleArgumentsFlag { - DontTupleArguments, - TupleArguments, -} + TypeAndSubsts { substs: substs, ty: substd_ty } + } -/// Unifies the return type with the expected type early, for more coercions -/// and forward type information on the argument expressions. -fn expected_types_for_fn_args<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - call_span: Span, - expected_ret: Expectation<'tcx>, - formal_ret: ty::FnOutput<'tcx>, - formal_args: &[Ty<'tcx>]) - -> Vec> { - let expected_args = expected_ret.only_has_type(fcx).and_then(|ret_ty| { - if let ty::FnConverging(formal_ret_ty) = formal_ret { - fcx.infcx().commit_regions_if_ok(|| { + /// Unifies the return type with the expected type early, for more coercions + /// and forward type information on the argument expressions. + fn expected_types_for_fn_args(&self, + call_span: Span, + expected_ret: Expectation<'tcx>, + formal_ret: Ty<'tcx>, + formal_args: &[Ty<'tcx>]) + -> Vec> { + let expected_args = expected_ret.only_has_type(self).and_then(|ret_ty| { + self.fudge_regions_if_ok(&RegionVariableOrigin::Coercion(call_span), || { // Attempt to apply a subtyping relationship between the formal // return type (likely containing type variables if the function // is polymorphic) and the expected return type. // No argument expectations are produced if unification fails. - let origin = TypeOrigin::Misc(call_span); - let ures = fcx.infcx().sub_types(false, origin, formal_ret_ty, ret_ty); + let origin = self.misc(call_span); + let ures = self.sub_types(false, &origin, formal_ret, ret_ty); // FIXME(#15760) can't use try! here, FromError doesn't default // to identity so the resulting type is not constrained. - if let Err(e) = ures { - return Err(e); + match ures { + Ok(ok) => self.register_infer_ok_obligations(ok), + Err(e) => return Err(e), } // Record all the argument types, with the substitutions // produced from the above subtyping unification. Ok(formal_args.iter().map(|ty| { - fcx.infcx().resolve_type_vars_if_possible(ty) + self.resolve_type_vars_if_possible(ty) }).collect()) }).ok() - } else { - None - } - }).unwrap_or(vec![]); - debug!("expected_types_for_fn_args(formal={:?} -> {:?}, expected={:?} -> {:?})", - formal_args, formal_ret, - expected_args, expected_ret); - expected_args -} - -/// Invariant: -/// If an expression has any sub-expressions that result in a type error, -/// inspecting that expression's type with `ty.references_error()` will return -/// true. Likewise, if an expression is known to diverge, inspecting its -/// type with `ty::type_is_bot` will return true (n.b.: since Rust is -/// strict, _|_ can appear in the type of an expression that does not, -/// itself, diverge: for example, fn() -> _|_.) -/// Note that inspecting a type's structure *directly* may expose the fact -/// that there are actually multiple representations for `TyError`, so avoid -/// that when err needs to be handled differently. -fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>, - expr: &'tcx hir::Expr, - expected: Expectation<'tcx>, - lvalue_pref: LvaluePreference, - unifier: F) where - F: FnOnce(), -{ - debug!(">> typechecking: expr={:?} expected={:?}", - expr, expected); + }).unwrap_or(vec![]); + debug!("expected_types_for_fn_args(formal={:?} -> {:?}, expected={:?} -> {:?})", + formal_args, formal_ret, + expected_args, expected_ret); + expected_args + } // Checks a method call. - fn check_method_call<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - expr: &'tcx hir::Expr, - method_name: Spanned, - args: &'tcx [P], - tps: &[P], - expected: Expectation<'tcx>, - lvalue_pref: LvaluePreference) { - let rcvr = &*args[0]; - check_expr_with_lvalue_pref(fcx, &*rcvr, lvalue_pref); + fn check_method_call(&self, + expr: &'gcx hir::Expr, + method_name: Spanned, + args: &'gcx [hir::Expr], + tps: &[P], + expected: Expectation<'tcx>, + lvalue_pref: LvaluePreference) -> Ty<'tcx> { + let rcvr = &args[0]; + let rcvr_t = self.check_expr_with_lvalue_pref(&rcvr, lvalue_pref); // no need to check for bot/err -- callee does that - let expr_t = structurally_resolved_type(fcx, - expr.span, - fcx.expr_ty(&*rcvr)); - - let tps = tps.iter().map(|ast_ty| fcx.to_ty(&**ast_ty)).collect::>(); - let fn_ty = match method::lookup(fcx, - method_name.span, - method_name.node, - expr_t, - tps, - expr, - rcvr) { + let expr_t = self.structurally_resolved_type(expr.span, rcvr_t); + + let tps = tps.iter().map(|ast_ty| self.to_ty(&ast_ty)).collect::>(); + let fn_ty = match self.lookup_method(method_name.span, + method_name.node, + expr_t, + tps, + expr, + rcvr) { Ok(method) => { let method_ty = method.ty; let method_call = MethodCall::expr(expr.id); - fcx.inh.tables.borrow_mut().method_map.insert(method_call, method); + self.tables.borrow_mut().method_map.insert(method_call, method); method_ty } Err(error) => { - method::report_error(fcx, method_name.span, expr_t, - method_name.node, Some(rcvr), error); - fcx.write_error(expr.id); - fcx.tcx().types.err + if method_name.node != keywords::Invalid.name() { + self.report_method_error(method_name.span, expr_t, + method_name.node, Some(rcvr), error); + } + self.write_error(expr.id); + self.tcx.types.err } }; // Call the generic checker. - let ret_ty = check_method_argument_types(fcx, - method_name.span, - fn_ty, - expr, - &args[1..], - DontTupleArguments, - expected); + let ret_ty = self.check_method_argument_types(method_name.span, fn_ty, + expr, &args[1..], + DontTupleArguments, + expected); - write_call(fcx, expr, ret_ty); + ret_ty } // A generic function for checking the then and else in an if // or if-else. - fn check_then_else<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - cond_expr: &'tcx hir::Expr, - then_blk: &'tcx hir::Block, - opt_else_expr: Option<&'tcx hir::Expr>, - id: ast::NodeId, - sp: Span, - expected: Expectation<'tcx>) { - check_expr_has_type(fcx, cond_expr, fcx.tcx().types.bool); - - let expected = expected.adjust_for_branches(fcx); - check_block_with_expected(fcx, then_blk, expected); - let then_ty = fcx.node_ty(then_blk.id); - - let branches_ty = match opt_else_expr { - Some(ref else_expr) => { - check_expr_with_expectation(fcx, &**else_expr, expected); - let else_ty = fcx.expr_ty(&**else_expr); - infer::common_supertype(fcx.infcx(), - TypeOrigin::IfExpression(sp), - true, - then_ty, - else_ty) - } - None => { - infer::common_supertype(fcx.infcx(), - TypeOrigin::IfExpressionWithNoElse(sp), - false, - then_ty, - fcx.tcx().mk_nil()) - } - }; + fn check_then_else(&self, + cond_expr: &'gcx hir::Expr, + then_blk: &'gcx hir::Block, + opt_else_expr: Option<&'gcx hir::Expr>, + sp: Span, + expected: Expectation<'tcx>) -> Ty<'tcx> { + let cond_ty = self.check_expr_has_type(cond_expr, self.tcx.types.bool); + let cond_diverges = self.diverges.get(); + self.diverges.set(Diverges::Maybe); + + let expected = expected.adjust_for_branches(self); + let then_ty = self.check_block_with_expected(then_blk, expected); + let then_diverges = self.diverges.get(); + self.diverges.set(Diverges::Maybe); + + let unit = self.tcx.mk_nil(); + let (cause, expected_ty, found_ty, result); + if let Some(else_expr) = opt_else_expr { + let else_ty = self.check_expr_with_expectation(else_expr, expected); + let else_diverges = self.diverges.get(); + cause = self.cause(sp, ObligationCauseCode::IfExpression); + + // Only try to coerce-unify if we have a then expression + // to assign coercions to, otherwise it's () or diverging. + expected_ty = then_ty; + found_ty = else_ty; + result = if let Some(ref then) = then_blk.expr { + let res = self.try_find_coercion_lub(&cause, || Some(&**then), + then_ty, else_expr, else_ty); + + // In case we did perform an adjustment, we have to update + // the type of the block, because old trans still uses it. + if res.is_ok() { + let adj = self.tables.borrow().adjustments.get(&then.id).cloned(); + if let Some(adj) = adj { + self.write_ty(then_blk.id, adj.target); + } + } + + res + } else { + self.commit_if_ok(|_| { + let trace = TypeTrace::types(&cause, true, then_ty, else_ty); + self.lub(true, trace, &then_ty, &else_ty) + .map(|ok| self.register_infer_ok_obligations(ok)) + }) + }; - let cond_ty = fcx.expr_ty(cond_expr); - let if_ty = if cond_ty.references_error() { - fcx.tcx().types.err + // We won't diverge unless both branches do (or the condition does). + self.diverges.set(cond_diverges | then_diverges & else_diverges); } else { - branches_ty - }; + // If the condition is false we can't diverge. + self.diverges.set(cond_diverges); + + cause = self.cause(sp, ObligationCauseCode::IfExpressionWithNoElse); + expected_ty = unit; + found_ty = then_ty; + result = self.eq_types(true, &cause, unit, then_ty) + .map(|ok| { + self.register_infer_ok_obligations(ok); + unit + }); + } - fcx.write_ty(id, if_ty); + match result { + Ok(ty) => { + if cond_ty.references_error() { + self.tcx.types.err + } else { + ty + } + } + Err(e) => { + self.report_mismatched_types(&cause, expected_ty, found_ty, e); + self.tcx.types.err + } + } } // Check field access expressions - fn check_field<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, - expr: &'tcx hir::Expr, - lvalue_pref: LvaluePreference, - base: &'tcx hir::Expr, - field: &Spanned) { - check_expr_with_lvalue_pref(fcx, base, lvalue_pref); - let expr_t = structurally_resolved_type(fcx, expr.span, - fcx.expr_ty(base)); - // FIXME(eddyb) #12808 Integrate privacy into this auto-deref loop. - let (_, autoderefs, field_ty) = autoderef(fcx, - expr.span, - expr_t, - Some(base), - UnresolvedTypeAction::Error, - lvalue_pref, - |base_t, _| { - match base_t.sty { - ty::TyStruct(base_def, substs) => { - debug!("struct named {:?}", base_t); - base_def.struct_variant() - .find_field_named(field.node) - .map(|f| fcx.field_ty(expr.span, f, substs)) + fn check_field(&self, + expr: &'gcx hir::Expr, + lvalue_pref: LvaluePreference, + base: &'gcx hir::Expr, + field: &Spanned) -> Ty<'tcx> { + let expr_t = self.check_expr_with_lvalue_pref(base, lvalue_pref); + let expr_t = self.structurally_resolved_type(expr.span, + expr_t); + let mut private_candidate = None; + let mut autoderef = self.autoderef(expr.span, expr_t); + while let Some((base_t, autoderefs)) = autoderef.next() { + match base_t.sty { + ty::TyAdt(base_def, substs) if !base_def.is_enum() => { + debug!("struct named {:?}", base_t); + if let Some(field) = base_def.struct_variant().find_field_named(field.node) { + let field_ty = self.field_ty(expr.span, field, substs); + if field.vis.is_accessible_from(self.body_id, &self.tcx().map) { + autoderef.finalize(lvalue_pref, Some(base)); + self.write_autoderef_adjustment(base.id, autoderefs, base_t); + + self.tcx.check_stability(field.did, expr.id, expr.span); + + return field_ty; + } + private_candidate = Some((base_def.did, field_ty)); } - _ => None } - }); - match field_ty { - Some(field_ty) => { - fcx.write_ty(expr.id, field_ty); - fcx.write_autoderef_adjustment(base.id, autoderefs); - return; + _ => {} } - None => {} } - - if method::exists(fcx, field.span, field.node, expr_t, expr.id) { - fcx.type_error_struct(field.span, - |actual| { - format!("attempted to take value of method `{}` on type \ - `{}`", field.node, actual) - }, - expr_t, None) - .fileline_help(field.span, - "maybe a `()` to call it is missing? \ - If not, try an anonymous function") + autoderef.unambiguous_final_ty(); + + if let Some((did, field_ty)) = private_candidate { + let struct_path = self.tcx().item_path_str(did); + let msg = format!("field `{}` of struct `{}` is private", field.node, struct_path); + let mut err = self.tcx().sess.struct_span_err(expr.span, &msg); + // Also check if an accessible method exists, which is often what is meant. + if self.method_exists(field.span, field.node, expr_t, expr.id, false) { + err.note(&format!("a method `{}` also exists, perhaps you wish to call it", + field.node)); + } + err.emit(); + field_ty + } else if field.node == keywords::Invalid.name() { + self.tcx().types.err + } else if self.method_exists(field.span, field.node, expr_t, expr.id, true) { + self.type_error_struct(field.span, |actual| { + format!("attempted to take value of method `{}` on type \ + `{}`", field.node, actual) + }, expr_t) + .help("maybe a `()` to call it is missing? \ + If not, try an anonymous function") .emit(); + self.tcx().types.err } else { - let mut err = fcx.type_error_struct( - expr.span, - |actual| { - format!("attempted access of field `{}` on \ - type `{}`, but no field with that \ - name was found", - field.node, - actual) - }, - expr_t, None); - if let ty::TyStruct(def, _) = expr_t.sty { - suggest_field_names(&mut err, def.struct_variant(), field, vec![]); + let mut err = self.type_error_struct(field.span, |actual| { + format!("no field `{}` on type `{}`", + field.node, actual) + }, expr_t); + match expr_t.sty { + ty::TyAdt(def, _) if !def.is_enum() => { + if let Some(suggested_field_name) = + Self::suggest_field_name(def.struct_variant(), field, vec![]) { + err.span_label(field.span, + &format!("did you mean `{}`?", suggested_field_name)); + } else { + err.span_label(field.span, + &format!("unknown field")); + }; + } + ty::TyRawPtr(..) => { + err.note(&format!("`{0}` is a native pointer; perhaps you need to deref with \ + `(*{0}).{1}`", pprust::expr_to_string(base), field.node)); + } + _ => {} } err.emit(); + self.tcx().types.err } - - fcx.write_error(expr.id); } - // displays hints about the closest matches in field names - fn suggest_field_names<'tcx>(err: &mut DiagnosticBuilder, - variant: ty::VariantDef<'tcx>, - field: &Spanned, - skip : Vec) { + // Return an hint about the closest match in field names + fn suggest_field_name(variant: &'tcx ty::VariantDef, + field: &Spanned, + skip : Vec) + -> Option { let name = field.node.as_str(); - let names = variant.fields - .iter() - .filter_map(|ref field| { - // ignore already set fields and private fields from non-local crates - if skip.iter().any(|x| *x == field.name.as_str()) || - (variant.did.krate != LOCAL_CRATE && field.vis != Visibility::Public) { - None - } else { - Some(&field.name) - } - }); + let names = variant.fields.iter().filter_map(|field| { + // ignore already set fields and private fields from non-local crates + if skip.iter().any(|x| *x == field.name.as_str()) || + (variant.did.krate != LOCAL_CRATE && field.vis != Visibility::Public) { + None + } else { + Some(&field.name) + } + }); // only find fits with at least one matching letter - if let Some(name) = find_best_match_for_name(names, &name, Some(name.len())) { - err.span_help(field.span, - &format!("did you mean `{}`?", name)); - } + find_best_match_for_name(names, &name, Some(name.len())) } // Check tuple index expressions - fn check_tup_field<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, - expr: &'tcx hir::Expr, - lvalue_pref: LvaluePreference, - base: &'tcx hir::Expr, - idx: codemap::Spanned) { - check_expr_with_lvalue_pref(fcx, base, lvalue_pref); - let expr_t = structurally_resolved_type(fcx, expr.span, - fcx.expr_ty(base)); + fn check_tup_field(&self, + expr: &'gcx hir::Expr, + lvalue_pref: LvaluePreference, + base: &'gcx hir::Expr, + idx: codemap::Spanned) -> Ty<'tcx> { + let expr_t = self.check_expr_with_lvalue_pref(base, lvalue_pref); + let expr_t = self.structurally_resolved_type(expr.span, + expr_t); + let mut private_candidate = None; let mut tuple_like = false; - // FIXME(eddyb) #12808 Integrate privacy into this auto-deref loop. - let (_, autoderefs, field_ty) = autoderef(fcx, - expr.span, - expr_t, - Some(base), - UnresolvedTypeAction::Error, - lvalue_pref, - |base_t, _| { - match base_t.sty { - ty::TyStruct(base_def, substs) => { - tuple_like = base_def.struct_variant().is_tuple_struct(); - if tuple_like { - debug!("tuple struct named {:?}", base_t); - base_def.struct_variant() - .fields - .get(idx.node) - .map(|f| fcx.field_ty(expr.span, f, substs)) + let mut autoderef = self.autoderef(expr.span, expr_t); + while let Some((base_t, autoderefs)) = autoderef.next() { + let field = match base_t.sty { + ty::TyAdt(base_def, substs) if base_def.is_struct() => { + tuple_like = base_def.struct_variant().ctor_kind == CtorKind::Fn; + if !tuple_like { continue } + + debug!("tuple struct named {:?}", base_t); + base_def.struct_variant().fields.get(idx.node).and_then(|field| { + let field_ty = self.field_ty(expr.span, field, substs); + private_candidate = Some((base_def.did, field_ty)); + if field.vis.is_accessible_from(self.body_id, &self.tcx().map) { + self.tcx.check_stability(field.did, expr.id, expr.span); + Some(field_ty) } else { None } - } - ty::TyTuple(ref v) => { - tuple_like = true; - if idx.node < v.len() { Some(v[idx.node]) } else { None } - } - _ => None + }) } - }); - match field_ty { - Some(field_ty) => { - fcx.write_ty(expr.id, field_ty); - fcx.write_autoderef_adjustment(base.id, autoderefs); - return; + ty::TyTuple(ref v) => { + tuple_like = true; + v.get(idx.node).cloned() + } + _ => continue + }; + + if let Some(field_ty) = field { + autoderef.finalize(lvalue_pref, Some(base)); + self.write_autoderef_adjustment(base.id, autoderefs, base_t); + return field_ty; } - None => {} } - fcx.type_error_message( + autoderef.unambiguous_final_ty(); + + if let Some((did, field_ty)) = private_candidate { + let struct_path = self.tcx().item_path_str(did); + let msg = format!("field `{}` of struct `{}` is private", idx.node, struct_path); + self.tcx().sess.span_err(expr.span, &msg); + return field_ty; + } + + self.type_error_message( expr.span, |actual| { if tuple_like { @@ -3055,50 +3159,73 @@ fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>, actual) } }, - expr_t, None); + expr_t); - fcx.write_error(expr.id); + self.tcx().types.err } - fn report_unknown_field<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - ty: Ty<'tcx>, - variant: ty::VariantDef<'tcx>, - field: &hir::Field, - skip_fields: &[hir::Field]) { - let mut err = fcx.type_error_struct( + fn report_unknown_field(&self, + ty: Ty<'tcx>, + variant: &'tcx ty::VariantDef, + field: &hir::Field, + skip_fields: &[hir::Field], + kind_name: &str) { + let mut err = self.type_error_struct_with_diag( field.name.span, - |actual| if let ty::TyEnum(..) = ty.sty { - format!("struct variant `{}::{}` has no field named `{}`", - actual, variant.name.as_str(), field.name.node) - } else { - format!("structure `{}` has no field named `{}`", - actual, field.name.node) + |actual| match ty.sty { + ty::TyAdt(adt, ..) if adt.is_enum() => { + struct_span_err!(self.tcx.sess, field.name.span, E0559, + "{} `{}::{}` has no field named `{}`", + kind_name, actual, variant.name, field.name.node) + } + _ => { + struct_span_err!(self.tcx.sess, field.name.span, E0560, + "{} `{}` has no field named `{}`", + kind_name, actual, field.name.node) + } }, - ty, - None); + ty); // prevent all specified fields from being suggested let skip_fields = skip_fields.iter().map(|ref x| x.name.node.as_str()); - suggest_field_names(&mut err, variant, &field.name, skip_fields.collect()); + if let Some(field_name) = Self::suggest_field_name(variant, + &field.name, + skip_fields.collect()) { + err.span_label(field.name.span, + &format!("field does not exist - did you mean `{}`?", field_name)); + } else { + match ty.sty { + ty::TyAdt(adt, ..) if adt.is_enum() => { + err.span_label(field.name.span, &format!("`{}::{}` does not have this field", + ty, variant.name)); + } + _ => { + err.span_label(field.name.span, &format!("`{}` does not have this field", ty)); + } + } + }; err.emit(); } - fn check_expr_struct_fields<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - adt_ty: Ty<'tcx>, - span: Span, - variant: ty::VariantDef<'tcx>, - ast_fields: &'tcx [hir::Field], - check_completeness: bool) { - let tcx = fcx.ccx.tcx; - let substs = match adt_ty.sty { - ty::TyStruct(_, substs) | ty::TyEnum(_, substs) => substs, - _ => tcx.sess.span_bug(span, "non-ADT passed to check_expr_struct_fields") + fn check_expr_struct_fields(&self, + adt_ty: Ty<'tcx>, + expr_id: ast::NodeId, + span: Span, + variant: &'tcx ty::VariantDef, + ast_fields: &'gcx [hir::Field], + check_completeness: bool) { + let tcx = self.tcx; + let (substs, adt_kind, kind_name) = match adt_ty.sty { + ty::TyAdt(adt, substs) => (substs, adt.adt_kind(), adt.variant_descr()), + _ => span_bug!(span, "non-ADT passed to check_expr_struct_fields") }; - let mut remaining_fields = FnvHashMap(); + let mut remaining_fields = FxHashMap(); for field in &variant.fields { remaining_fields.insert(field.name, field); } + let mut seen_fields = FxHashMap(); + let mut error_happened = false; // Typecheck each field. @@ -3106,1785 +3233,1483 @@ fn check_expr_with_unifier<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>, let expected_field_type; if let Some(v_field) = remaining_fields.remove(&field.name.node) { - expected_field_type = fcx.field_ty(field.span, v_field, substs); + expected_field_type = self.field_ty(field.span, v_field, substs); + + seen_fields.insert(field.name.node, field.span); + + // we don't look at stability attributes on + // struct-like enums (yet...), but it's definitely not + // a bug to have construct one. + if adt_kind != ty::AdtKind::Enum { + tcx.check_stability(v_field.did, expr_id, field.span); + } } else { error_happened = true; expected_field_type = tcx.types.err; if let Some(_) = variant.find_field_named(field.name.node) { - span_err!(fcx.tcx().sess, field.name.span, E0062, - "field `{}` specified more than once", - field.name.node); + let mut err = struct_span_err!(self.tcx.sess, + field.name.span, + E0062, + "field `{}` specified more than once", + field.name.node); + + err.span_label(field.name.span, &format!("used more than once")); + + if let Some(prev_span) = seen_fields.get(&field.name.node) { + err.span_label(*prev_span, &format!("first use of `{}`", field.name.node)); + } + + err.emit(); } else { - report_unknown_field(fcx, adt_ty, variant, field, ast_fields); + self.report_unknown_field(adt_ty, variant, field, ast_fields, kind_name); } } // Make sure to give a type to the field even if there's // an error, so we can continue typechecking - check_expr_coercable_to_type(fcx, &*field.expr, expected_field_type); + self.check_expr_coercable_to_type(&field.expr, expected_field_type); } - // Make sure the programmer specified all the fields. - if check_completeness && - !error_happened && - !remaining_fields.is_empty() - { - span_err!(tcx.sess, span, E0063, - "missing field{} {} in initializer of `{}`", - if remaining_fields.len() == 1 {""} else {"s"}, - remaining_fields.keys() - .map(|n| format!("`{}`", n)) - .collect::>() - .join(", "), - adt_ty); - } + // Make sure the programmer specified correct number of fields. + if kind_name == "union" { + if ast_fields.len() != 1 { + tcx.sess.span_err(span, "union expressions should have exactly one field"); + } + } else if check_completeness && !error_happened && !remaining_fields.is_empty() { + let len = remaining_fields.len(); + let mut displayable_field_names = remaining_fields + .keys() + .map(|x| x.as_str()) + .collect::>(); + + displayable_field_names.sort(); + + let truncated_fields_error = if len <= 3 { + "".to_string() + } else { + format!(" and {} other field{}", (len - 3), if len - 3 == 1 {""} else {"s"}) + }; + + let remaining_fields_names = displayable_field_names.iter().take(3) + .map(|n| format!("`{}`", n)) + .collect::>() + .join(", "); + + struct_span_err!(tcx.sess, span, E0063, + "missing field{} {}{} in initializer of `{}`", + if remaining_fields.len() == 1 {""} else {"s"}, + remaining_fields_names, + truncated_fields_error, + adt_ty) + .span_label(span, &format!("missing {}{}", + remaining_fields_names, + truncated_fields_error)) + .emit(); + } } - fn check_struct_fields_on_error<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, - id: ast::NodeId, - fields: &'tcx [hir::Field], - base_expr: &'tcx Option>) { - // Make sure to still write the types - // otherwise we might ICE - fcx.write_error(id); + fn check_struct_fields_on_error(&self, + fields: &'gcx [hir::Field], + base_expr: &'gcx Option>) { for field in fields { - check_expr(fcx, &*field.expr); + self.check_expr(&field.expr); } match *base_expr { - Some(ref base) => check_expr(fcx, &**base), + Some(ref base) => { + self.check_expr(&base); + }, None => {} } } - fn check_expr_struct<'a, 'tcx>(fcx: &FnCtxt<'a,'tcx>, - expr: &hir::Expr, - path: &hir::Path, - fields: &'tcx [hir::Field], - base_expr: &'tcx Option>) - { - let tcx = fcx.tcx(); - - // Find the relevant variant - let def = lookup_full_def(tcx, path.span, expr.id); - if def == def::DefErr { - check_struct_fields_on_error(fcx, expr.id, fields, base_expr); - return; - } - let (adt, variant) = match fcx.def_struct_variant(def, path.span) { - Some((adt, variant)) => (adt, variant), - None => { - span_err!(fcx.tcx().sess, path.span, E0071, - "`{}` does not name a structure", - pprust::path_to_string(path)); - check_struct_fields_on_error(fcx, expr.id, fields, base_expr); - return; - } + pub fn check_struct_path(&self, + qpath: &hir::QPath, + node_id: ast::NodeId) + -> Option<(&'tcx ty::VariantDef, Ty<'tcx>)> { + let path_span = match *qpath { + hir::QPath::Resolved(_, ref path) => path.span, + hir::QPath::TypeRelative(ref qself, _) => qself.span }; - - let expr_ty = fcx.instantiate_type(def.def_id(), path); - fcx.write_ty(expr.id, expr_ty); - - check_expr_struct_fields(fcx, expr_ty, expr.span, variant, fields, - base_expr.is_none()); - - if let &Some(ref base_expr) = base_expr { - check_expr_has_type(fcx, base_expr, expr_ty); - if adt.adt_kind() == ty::AdtKind::Enum { - span_err!(tcx.sess, base_expr.span, E0436, - "functional record update syntax requires a struct"); + let (def, ty) = self.finish_resolving_struct_path(qpath, path_span, node_id); + let variant = match def { + Def::Err => { + self.set_tainted_by_errors(); + return None; } - } - } - - type ExprCheckerWithTy = fn(&FnCtxt, &hir::Expr, Ty); - - let tcx = fcx.ccx.tcx; - let id = expr.id; - match expr.node { - hir::ExprBox(ref subexpr) => { - let expected_inner = expected.to_option(fcx).map_or(NoExpectation, |ty| { - match ty.sty { - ty::TyBox(ty) => Expectation::rvalue_hint(tcx, ty), - _ => NoExpectation - } - }); - check_expr_with_expectation(fcx, subexpr, expected_inner); - let referent_ty = fcx.expr_ty(&**subexpr); - fcx.write_ty(id, tcx.mk_box(referent_ty)); - } - - hir::ExprLit(ref lit) => { - let typ = check_lit(fcx, &**lit, expected); - fcx.write_ty(id, typ); - } - hir::ExprBinary(op, ref lhs, ref rhs) => { - op::check_binop(fcx, expr, op, lhs, rhs); - } - hir::ExprAssignOp(op, ref lhs, ref rhs) => { - op::check_binop_assign(fcx, expr, op, lhs, rhs); - } - hir::ExprUnary(unop, ref oprnd) => { - let expected_inner = match unop { - hir::UnNot | hir::UnNeg => { - expected - } - hir::UnDeref => { - NoExpectation - } - }; - let lvalue_pref = match unop { - hir::UnDeref => lvalue_pref, - _ => NoPreference - }; - check_expr_with_expectation_and_lvalue_pref( - fcx, &**oprnd, expected_inner, lvalue_pref); - let mut oprnd_t = fcx.expr_ty(&**oprnd); - - if !oprnd_t.references_error() { - match unop { - hir::UnDeref => { - oprnd_t = structurally_resolved_type(fcx, expr.span, oprnd_t); - oprnd_t = match oprnd_t.builtin_deref(true, NoPreference) { - Some(mt) => mt.ty, - None => match try_overloaded_deref(fcx, expr.span, - Some(MethodCall::expr(expr.id)), - Some(&**oprnd), oprnd_t, lvalue_pref) { - Some(mt) => mt.ty, - None => { - fcx.type_error_message(expr.span, |actual| { - format!("type `{}` cannot be \ - dereferenced", actual) - }, oprnd_t, None); - tcx.types.err - } - } - }; - } - hir::UnNot => { - oprnd_t = structurally_resolved_type(fcx, oprnd.span, - oprnd_t); - if !(oprnd_t.is_integral() || oprnd_t.sty == ty::TyBool) { - oprnd_t = op::check_user_unop(fcx, "!", "not", - tcx.lang_items.not_trait(), - expr, &**oprnd, oprnd_t, unop); + Def::Variant(..) => { + match ty.sty { + ty::TyAdt(adt, substs) => { + Some((adt.variant_of_def(def), adt.did, substs)) } + _ => bug!("unexpected type: {:?}", ty.sty) } - hir::UnNeg => { - oprnd_t = structurally_resolved_type(fcx, oprnd.span, - oprnd_t); - if !(oprnd_t.is_integral() || oprnd_t.is_fp()) { - oprnd_t = op::check_user_unop(fcx, "-", "neg", - tcx.lang_items.neg_trait(), - expr, &**oprnd, oprnd_t, unop); + } + Def::Struct(..) | Def::Union(..) | Def::TyAlias(..) | + Def::AssociatedTy(..) | Def::SelfTy(..) => { + match def { + Def::AssociatedTy(..) | Def::SelfTy(..) + if !self.tcx.sess.features.borrow().more_struct_aliases => { + emit_feature_err(&self.tcx.sess.parse_sess, + "more_struct_aliases", path_span, GateIssue::Language, + "`Self` and associated types in struct \ + expressions and patterns are unstable"); } + _ => {} } - } - } - fcx.write_ty(id, oprnd_t); - } - hir::ExprAddrOf(mutbl, ref oprnd) => { - let hint = expected.only_has_type(fcx).map_or(NoExpectation, |ty| { - match ty.sty { - ty::TyRef(_, ref mt) | ty::TyRawPtr(ref mt) => { - if fcx.tcx().expr_is_lval(&**oprnd) { - // Lvalues may legitimately have unsized types. - // For example, dereferences of a fat pointer and - // the last field of a struct can be unsized. - ExpectHasType(mt.ty) - } else { - Expectation::rvalue_hint(tcx, mt.ty) + match ty.sty { + ty::TyAdt(adt, substs) if !adt.is_enum() => { + Some((adt.struct_variant(), adt.did, substs)) } + _ => None, } - _ => NoExpectation } - }); - let lvalue_pref = LvaluePreference::from_mutbl(mutbl); - check_expr_with_expectation_and_lvalue_pref(fcx, - &**oprnd, - hint, - lvalue_pref); - - let tm = ty::TypeAndMut { ty: fcx.expr_ty(&**oprnd), mutbl: mutbl }; - let oprnd_t = if tm.ty.references_error() { - tcx.types.err - } else { - // Note: at this point, we cannot say what the best lifetime - // is to use for resulting pointer. We want to use the - // shortest lifetime possible so as to avoid spurious borrowck - // errors. Moreover, the longest lifetime will depend on the - // precise details of the value whose address is being taken - // (and how long it is valid), which we don't know yet until type - // inference is complete. - // - // Therefore, here we simply generate a region variable. The - // region inferencer will then select the ultimate value. - // Finally, borrowck is charged with guaranteeing that the - // value whose address was taken can actually be made to live - // as long as it needs to live. - let region = fcx.infcx().next_region_var(infer::AddrOfRegion(expr.span)); - tcx.mk_ref(tcx.mk_region(region), tm) + _ => bug!("unexpected definition: {:?}", def) }; - fcx.write_ty(id, oprnd_t); - } - hir::ExprPath(ref maybe_qself, ref path) => { - let opt_self_ty = maybe_qself.as_ref().map(|qself| { - fcx.to_ty(&qself.ty) - }); - - let path_res = if let Some(&d) = tcx.def_map.borrow().get(&id) { - d - } else if let Some(hir::QSelf { position: 0, .. }) = *maybe_qself { - // Create some fake resolution that can't possibly be a type. - def::PathResolution { - base_def: def::DefMod(tcx.map.local_def_id(ast::CRATE_NODE_ID)), - last_private: LastMod(AllPublic), - depth: path.segments.len() - } - } else { - tcx.sess.span_bug(expr.span, - &format!("unbound path {:?}", expr)) - }; - - if let Some((opt_ty, segments, def)) = - resolve_ty_and_def_ufcs(fcx, path_res, opt_self_ty, path, - expr.span, expr.id) { - if def != def::DefErr { - let (scheme, predicates) = type_scheme_and_predicates_for_def(fcx, - expr.span, - def); - instantiate_path(fcx, - segments, - scheme, - &predicates, - opt_ty, - def, - expr.span, - id); - } else { - fcx.write_ty(id, fcx.tcx().types.err); - } - } - // We always require that the type provided as the value for - // a type parameter outlives the moment of instantiation. - fcx.opt_node_ty_substs(expr.id, |item_substs| { - fcx.add_wf_bounds(&item_substs.substs, expr); - }); - } - hir::ExprInlineAsm(ref ia) => { - for &(_, ref input) in &ia.inputs { - check_expr(fcx, &**input); - } - for out in &ia.outputs { - check_expr(fcx, &*out.expr); - } - fcx.write_nil(id); - } - hir::ExprBreak(_) => { fcx.write_ty(id, fcx.infcx().next_diverging_ty_var()); } - hir::ExprAgain(_) => { fcx.write_ty(id, fcx.infcx().next_diverging_ty_var()); } - hir::ExprRet(ref expr_opt) => { - match fcx.ret_ty { - ty::FnConverging(result_type) => { - match *expr_opt { - None => - if let Err(_) = fcx.mk_eqty(false, TypeOrigin::Misc(expr.span), - result_type, fcx.tcx().mk_nil()) { - span_err!(tcx.sess, expr.span, E0069, - "`return;` in a function whose return type is \ - not `()`"); - }, - Some(ref e) => { - check_expr_coercable_to_type(fcx, &**e, result_type); - } - } - } - ty::FnDiverging => { - if let Some(ref e) = *expr_opt { - check_expr(fcx, &**e); - } - span_err!(tcx.sess, expr.span, E0166, - "`return` in a function declared as diverging"); - } - } - fcx.write_ty(id, fcx.infcx().next_diverging_ty_var()); - } - hir::ExprAssign(ref lhs, ref rhs) => { - check_expr_with_lvalue_pref(fcx, &**lhs, PreferMutLvalue); + if let Some((variant, did, substs)) = variant { + // Check bounds on type arguments used in the path. + let bounds = self.instantiate_bounds(path_span, did, substs); + let cause = traits::ObligationCause::new(path_span, self.body_id, + traits::ItemObligation(did)); + self.add_obligations_for_parameters(cause, &bounds); - let tcx = fcx.tcx(); - if !tcx.expr_is_lval(&**lhs) { - span_err!(tcx.sess, expr.span, E0070, - "invalid left-hand side expression"); + Some((variant, ty)) + } else { + struct_span_err!(self.tcx.sess, path_span, E0071, + "expected struct, variant or union type, found {}", + ty.sort_string(self.tcx)) + .span_label(path_span, &format!("not a struct")) + .emit(); + None } + } - let lhs_ty = fcx.expr_ty(&**lhs); - check_expr_coercable_to_type(fcx, &**rhs, lhs_ty); - let rhs_ty = fcx.expr_ty(&**rhs); + fn check_expr_struct(&self, + expr: &hir::Expr, + qpath: &hir::QPath, + fields: &'gcx [hir::Field], + base_expr: &'gcx Option>) -> Ty<'tcx> + { + // Find the relevant variant + let (variant, struct_ty) = + if let Some(variant_ty) = self.check_struct_path(qpath, expr.id) { + variant_ty + } else { + self.check_struct_fields_on_error(fields, base_expr); + return self.tcx.types.err; + }; - fcx.require_expr_have_sized_type(&**lhs, traits::AssignmentLhsSized); + let path_span = match *qpath { + hir::QPath::Resolved(_, ref path) => path.span, + hir::QPath::TypeRelative(ref qself, _) => qself.span + }; - if lhs_ty.references_error() || rhs_ty.references_error() { - fcx.write_error(id); - } else { - fcx.write_nil(id); - } - } - hir::ExprIf(ref cond, ref then_blk, ref opt_else_expr) => { - check_then_else(fcx, &**cond, &**then_blk, opt_else_expr.as_ref().map(|e| &**e), - id, expr.span, expected); - } - hir::ExprWhile(ref cond, ref body, _) => { - check_expr_has_type(fcx, &**cond, tcx.types.bool); - check_block_no_value(fcx, &**body); - let cond_ty = fcx.expr_ty(&**cond); - let body_ty = fcx.node_ty(body.id); - if cond_ty.references_error() || body_ty.references_error() { - fcx.write_error(id); + self.check_expr_struct_fields(struct_ty, expr.id, path_span, variant, fields, + base_expr.is_none()); + if let &Some(ref base_expr) = base_expr { + self.check_expr_has_type(base_expr, struct_ty); + match struct_ty.sty { + ty::TyAdt(adt, substs) if adt.is_struct() => { + self.tables.borrow_mut().fru_field_types.insert( + expr.id, + adt.struct_variant().fields.iter().map(|f| { + self.normalize_associated_types_in( + expr.span, &f.ty(self.tcx, substs) + ) + }).collect() + ); + } + _ => { + span_err!(self.tcx.sess, base_expr.span, E0436, + "functional record update syntax requires a struct"); + } + } } - else { - fcx.write_nil(id); + self.require_type_is_sized(struct_ty, expr.span, traits::StructInitializerSized); + struct_ty + } + + + /// Invariant: + /// If an expression has any sub-expressions that result in a type error, + /// inspecting that expression's type with `ty.references_error()` will return + /// true. Likewise, if an expression is known to diverge, inspecting its + /// type with `ty::type_is_bot` will return true (n.b.: since Rust is + /// strict, _|_ can appear in the type of an expression that does not, + /// itself, diverge: for example, fn() -> _|_.) + /// Note that inspecting a type's structure *directly* may expose the fact + /// that there are actually multiple representations for `TyError`, so avoid + /// that when err needs to be handled differently. + fn check_expr_with_expectation_and_lvalue_pref(&self, + expr: &'gcx hir::Expr, + expected: Expectation<'tcx>, + lvalue_pref: LvaluePreference) -> Ty<'tcx> { + debug!(">> typechecking: expr={:?} expected={:?}", + expr, expected); + + // Warn for expressions after diverging siblings. + self.warn_if_unreachable(expr.id, expr.span, "expression"); + + // Hide the outer diverging and has_errors flags. + let old_diverges = self.diverges.get(); + let old_has_errors = self.has_errors.get(); + self.diverges.set(Diverges::Maybe); + self.has_errors.set(false); + + let ty = self.check_expr_kind(expr, expected, lvalue_pref); + + // Warn for non-block expressions with diverging children. + match expr.node { + hir::ExprBlock(_) | + hir::ExprLoop(..) | hir::ExprWhile(..) | + hir::ExprIf(..) | hir::ExprMatch(..) => {} + + _ => self.warn_if_unreachable(expr.id, expr.span, "expression") } - } - hir::ExprLoop(ref body, _) => { - check_block_no_value(fcx, &**body); - if !may_break(tcx, expr.id, &**body) { - fcx.write_ty(id, fcx.infcx().next_diverging_ty_var()); - } else { - fcx.write_nil(id); - } - } - hir::ExprMatch(ref discrim, ref arms, match_src) => { - _match::check_match(fcx, expr, &**discrim, arms, expected, match_src); - } - hir::ExprClosure(capture, ref decl, ref body) => { - closure::check_expr_closure(fcx, expr, capture, &**decl, &**body, expected); - } - hir::ExprBlock(ref b) => { - check_block_with_expected(fcx, &**b, expected); - fcx.write_ty(id, fcx.node_ty(b.id)); - } - hir::ExprCall(ref callee, ref args) => { - callee::check_call(fcx, expr, &**callee, &args[..], expected); - // we must check that return type of called functions is WF: - let ret_ty = fcx.expr_ty(expr); - fcx.register_wf_obligation(ret_ty, expr.span, traits::MiscObligation); - } - hir::ExprMethodCall(name, ref tps, ref args) => { - check_method_call(fcx, expr, name, &args[..], &tps[..], expected, lvalue_pref); - let arg_tys = args.iter().map(|a| fcx.expr_ty(&**a)); - let args_err = arg_tys.fold(false, |rest_err, a| rest_err || a.references_error()); - if args_err { - fcx.write_error(id); - } - } - hir::ExprCast(ref e, ref t) => { - if let hir::TyFixedLengthVec(_, ref count_expr) = t.node { - check_expr_with_hint(fcx, &**count_expr, tcx.types.usize); + // Record the type, which applies it effects. + // We need to do this after the warning above, so that + // we don't warn for the diverging expression itself. + self.write_ty(expr.id, ty); + + // Combine the diverging and has_error flags. + self.diverges.set(self.diverges.get() | old_diverges); + self.has_errors.set(self.has_errors.get() | old_has_errors); + + debug!("type of expr({}) {} is...", expr.id, + pprust::expr_to_string(expr)); + debug!("... {:?}, expected is {:?}", + ty, + expected); + + // Add adjustments to !-expressions + if ty.is_never() { + if let Some(hir::map::NodeExpr(_)) = self.tcx.map.find(expr.id) { + let adj_ty = self.next_diverging_ty_var(); + self.write_adjustment(expr.id, adjustment::Adjustment { + kind: adjustment::Adjust::NeverToAny, + target: adj_ty + }); + return adj_ty; + } } + ty + } - // Find the type of `e`. Supply hints based on the type we are casting to, - // if appropriate. - let t_cast = fcx.to_ty(t); - let t_cast = structurally_resolved_type(fcx, expr.span, t_cast); - check_expr_with_expectation(fcx, e, ExpectCastableToType(t_cast)); - let t_expr = fcx.expr_ty(e); - let t_cast = fcx.infcx().resolve_type_vars_if_possible(&t_cast); - - // Eagerly check for some obvious errors. - if t_expr.references_error() || t_cast.references_error() { - fcx.write_error(id); - } else if !fcx.type_is_known_to_be_sized(t_cast, expr.span) { - report_cast_to_unsized_type(fcx, expr.span, t.span, e.span, t_cast, t_expr, id); - } else { - // Write a type for the whole expression, assuming everything is going - // to work out Ok. - fcx.write_ty(id, t_cast); - - // Defer other checks until we're done type checking. - let mut deferred_cast_checks = fcx.inh.deferred_cast_checks.borrow_mut(); - let cast_check = cast::CastCheck::new((**e).clone(), t_expr, t_cast, expr.span); - deferred_cast_checks.push(cast_check); - } - } - hir::ExprType(ref e, ref t) => { - let typ = fcx.to_ty(&**t); - check_expr_eq_type(fcx, &**e, typ); - fcx.write_ty(id, typ); - } - hir::ExprVec(ref args) => { - let uty = expected.to_option(fcx).and_then(|uty| { - match uty.sty { - ty::TyArray(ty, _) | ty::TySlice(ty) => Some(ty), - _ => None - } - }); + fn check_expr_kind(&self, + expr: &'gcx hir::Expr, + expected: Expectation<'tcx>, + lvalue_pref: LvaluePreference) -> Ty<'tcx> { + let tcx = self.tcx; + let id = expr.id; + match expr.node { + hir::ExprBox(ref subexpr) => { + let expected_inner = expected.to_option(self).map_or(NoExpectation, |ty| { + match ty.sty { + ty::TyBox(ty) => Expectation::rvalue_hint(self, ty), + _ => NoExpectation + } + }); + let referent_ty = self.check_expr_with_expectation(subexpr, expected_inner); + tcx.mk_box(referent_ty) + } - let typ = match uty { - Some(uty) => { - for e in args { - check_expr_coercable_to_type(fcx, &**e, uty); + hir::ExprLit(ref lit) => { + self.check_lit(&lit, expected) + } + hir::ExprBinary(op, ref lhs, ref rhs) => { + self.check_binop(expr, op, lhs, rhs) + } + hir::ExprAssignOp(op, ref lhs, ref rhs) => { + self.check_binop_assign(expr, op, lhs, rhs) + } + hir::ExprUnary(unop, ref oprnd) => { + let expected_inner = match unop { + hir::UnNot | hir::UnNeg => { + expected } - uty - } - None => { - let t: Ty = fcx.infcx().next_ty_var(); - for e in args { - check_expr_has_type(fcx, &**e, t); + hir::UnDeref => { + NoExpectation } - t - } - }; - let typ = tcx.mk_array(typ, args.len()); - fcx.write_ty(id, typ); - } - hir::ExprRepeat(ref element, ref count_expr) => { - check_expr_has_type(fcx, &**count_expr, tcx.types.usize); - let count = fcx.tcx().eval_repeat_count(&**count_expr); - - let uty = match expected { - ExpectHasType(uty) => { - match uty.sty { - ty::TyArray(ty, _) | ty::TySlice(ty) => Some(ty), - _ => None + }; + let lvalue_pref = match unop { + hir::UnDeref => lvalue_pref, + _ => NoPreference + }; + let mut oprnd_t = self.check_expr_with_expectation_and_lvalue_pref(&oprnd, + expected_inner, + lvalue_pref); + + if !oprnd_t.references_error() { + match unop { + hir::UnDeref => { + oprnd_t = self.structurally_resolved_type(expr.span, oprnd_t); + + if let Some(mt) = oprnd_t.builtin_deref(true, NoPreference) { + oprnd_t = mt.ty; + } else if let Some(method) = self.try_overloaded_deref( + expr.span, Some(&oprnd), oprnd_t, lvalue_pref) { + oprnd_t = self.make_overloaded_lvalue_return_type(method).ty; + self.tables.borrow_mut().method_map.insert(MethodCall::expr(expr.id), + method); + } else { + self.type_error_message(expr.span, |actual| { + format!("type `{}` cannot be \ + dereferenced", actual) + }, oprnd_t); + oprnd_t = tcx.types.err; + } + } + hir::UnNot => { + oprnd_t = self.structurally_resolved_type(oprnd.span, + oprnd_t); + if !(oprnd_t.is_integral() || oprnd_t.sty == ty::TyBool) { + oprnd_t = self.check_user_unop("!", "not", + tcx.lang_items.not_trait(), + expr, &oprnd, oprnd_t, unop); + } + } + hir::UnNeg => { + oprnd_t = self.structurally_resolved_type(oprnd.span, + oprnd_t); + if !(oprnd_t.is_integral() || oprnd_t.is_fp()) { + oprnd_t = self.check_user_unop("-", "neg", + tcx.lang_items.neg_trait(), + expr, &oprnd, oprnd_t, unop); + } + } } } - _ => None - }; + oprnd_t + } + hir::ExprAddrOf(mutbl, ref oprnd) => { + let hint = expected.only_has_type(self).map_or(NoExpectation, |ty| { + match ty.sty { + ty::TyRef(_, ref mt) | ty::TyRawPtr(ref mt) => { + if self.tcx.expr_is_lval(&oprnd) { + // Lvalues may legitimately have unsized types. + // For example, dereferences of a fat pointer and + // the last field of a struct can be unsized. + ExpectHasType(mt.ty) + } else { + Expectation::rvalue_hint(self, mt.ty) + } + } + _ => NoExpectation + } + }); + let lvalue_pref = LvaluePreference::from_mutbl(mutbl); + let ty = self.check_expr_with_expectation_and_lvalue_pref(&oprnd, hint, lvalue_pref); - let (element_ty, t) = match uty { - Some(uty) => { - check_expr_coercable_to_type(fcx, &**element, uty); - (uty, uty) - } - None => { - let t: Ty = fcx.infcx().next_ty_var(); - check_expr_has_type(fcx, &**element, t); - (fcx.expr_ty(&**element), t) + let tm = ty::TypeAndMut { ty: ty, mutbl: mutbl }; + if tm.ty.references_error() { + tcx.types.err + } else { + // Note: at this point, we cannot say what the best lifetime + // is to use for resulting pointer. We want to use the + // shortest lifetime possible so as to avoid spurious borrowck + // errors. Moreover, the longest lifetime will depend on the + // precise details of the value whose address is being taken + // (and how long it is valid), which we don't know yet until type + // inference is complete. + // + // Therefore, here we simply generate a region variable. The + // region inferencer will then select the ultimate value. + // Finally, borrowck is charged with guaranteeing that the + // value whose address was taken can actually be made to live + // as long as it needs to live. + let region = self.next_region_var(infer::AddrOfRegion(expr.span)); + tcx.mk_ref(region, tm) } - }; + } + hir::ExprPath(ref qpath) => { + let (def, opt_ty, segments) = self.resolve_ty_and_def_ufcs(qpath, + expr.id, expr.span); + let ty = if def != Def::Err { + self.instantiate_value_path(segments, opt_ty, def, expr.span, id) + } else { + self.set_tainted_by_errors(); + tcx.types.err + }; - if count > 1 { - // For [foo, ..n] where n > 1, `foo` must have - // Copy type: - fcx.require_type_meets( - t, - expr.span, - traits::RepeatVec, - ty::BoundCopy); - } + // We always require that the type provided as the value for + // a type parameter outlives the moment of instantiation. + self.opt_node_ty_substs(expr.id, |item_substs| { + self.add_wf_bounds(&item_substs.substs, expr); + }); - if element_ty.references_error() { - fcx.write_error(id); - } else { - let t = tcx.mk_array(t, count); - fcx.write_ty(id, t); - } - } - hir::ExprTup(ref elts) => { - let flds = expected.only_has_type(fcx).and_then(|ty| { - match ty.sty { - ty::TyTuple(ref flds) => Some(&flds[..]), - _ => None - } - }); - let mut err_field = false; - - let elt_ts = elts.iter().enumerate().map(|(i, e)| { - let t = match flds { - Some(ref fs) if i < fs.len() => { - let ety = fs[i]; - check_expr_coercable_to_type(fcx, &**e, ety); - ety - } - _ => { - check_expr_with_expectation(fcx, &**e, NoExpectation); - fcx.expr_ty(&**e) - } - }; - err_field = err_field || t.references_error(); - t - }).collect(); - if err_field { - fcx.write_error(id); - } else { - let typ = tcx.mk_tup(elt_ts); - fcx.write_ty(id, typ); - } - } - hir::ExprStruct(ref path, ref fields, ref base_expr) => { - check_expr_struct(fcx, expr, path, fields, base_expr); - - fcx.require_expr_have_sized_type(expr, traits::StructInitializerSized); - } - hir::ExprField(ref base, ref field) => { - check_field(fcx, expr, lvalue_pref, &**base, field); - } - hir::ExprTupField(ref base, idx) => { - check_tup_field(fcx, expr, lvalue_pref, &**base, idx); - } - hir::ExprIndex(ref base, ref idx) => { - check_expr_with_lvalue_pref(fcx, &**base, lvalue_pref); - check_expr(fcx, &**idx); - - let base_t = fcx.expr_ty(&**base); - let idx_t = fcx.expr_ty(&**idx); - - if base_t.references_error() { - fcx.write_ty(id, base_t); - } else if idx_t.references_error() { - fcx.write_ty(id, idx_t); - } else { - let base_t = structurally_resolved_type(fcx, expr.span, base_t); - match lookup_indexing(fcx, expr, base, base_t, idx_t, lvalue_pref) { - Some((index_ty, element_ty)) => { - let idx_expr_ty = fcx.expr_ty(idx); - demand::eqtype(fcx, expr.span, index_ty, idx_expr_ty); - fcx.write_ty(id, element_ty); - } - None => { - check_expr_has_type(fcx, &**idx, fcx.tcx().types.err); - fcx.type_error_message( - expr.span, - |actual| { - format!("cannot index a value of type `{}`", - actual) - }, - base_t, - None); - fcx.write_ty(id, fcx.tcx().types.err); - } - } + ty } - } - hir::ExprRange(ref start, ref end) => { - let t_start = start.as_ref().map(|e| { - check_expr(fcx, &**e); - fcx.expr_ty(&**e) - }); - let t_end = end.as_ref().map(|e| { - check_expr(fcx, &**e); - fcx.expr_ty(&**e) - }); - - let idx_type = match (t_start, t_end) { - (Some(ty), None) | (None, Some(ty)) => { - Some(ty) - } - (Some(t_start), Some(t_end)) if (t_start.references_error() || - t_end.references_error()) => { - Some(fcx.tcx().types.err) + hir::ExprInlineAsm(_, ref outputs, ref inputs) => { + for output in outputs { + self.check_expr(output); } - (Some(t_start), Some(t_end)) => { - Some(infer::common_supertype(fcx.infcx(), - TypeOrigin::RangeExpression(expr.span), - true, - t_start, - t_end)) + for input in inputs { + self.check_expr(input); } - _ => None - }; - - // Note that we don't check the type of start/end satisfy any - // bounds because right now the range structs do not have any. If we add - // some bounds, then we'll need to check `t_start` against them here. + tcx.mk_nil() + } + hir::ExprBreak(label, ref expr_opt) => { + let loop_id = label.map(|l| l.loop_id); + let coerce_to = { + let mut enclosing_loops = self.enclosing_loops.borrow_mut(); + enclosing_loops.find_loop(loop_id).map(|ctxt| ctxt.coerce_to) + }; + if let Some(coerce_to) = coerce_to { + let e_ty; + let cause; + if let Some(ref e) = *expr_opt { + // Recurse without `enclosing_loops` borrowed. + e_ty = self.check_expr_with_hint(e, coerce_to); + cause = self.misc(e.span); + // Notably, the recursive call may alter coerce_to - must not keep using it! + } else { + // `break` without argument acts like `break ()`. + e_ty = tcx.mk_nil(); + cause = self.misc(expr.span); + } + let mut enclosing_loops = self.enclosing_loops.borrow_mut(); + let ctxt = enclosing_loops.find_loop(loop_id).unwrap(); - let range_type = match idx_type { - Some(idx_type) if idx_type.references_error() => { - fcx.tcx().types.err - } - Some(idx_type) => { - // Find the did from the appropriate lang item. - let did = match (start, end) { - (&Some(_), &Some(_)) => tcx.lang_items.range_struct(), - (&Some(_), &None) => tcx.lang_items.range_from_struct(), - (&None, &Some(_)) => tcx.lang_items.range_to_struct(), - (&None, &None) => { - tcx.sess.span_bug(expr.span, "full range should be dealt with above") - } - }; + let result = if let Some(ref e) = *expr_opt { + // Special-case the first element, as it has no "previous expressions". + let result = if !ctxt.may_break { + self.try_coerce(e, e_ty, ctxt.coerce_to) + } else { + self.try_find_coercion_lub(&cause, || ctxt.break_exprs.iter().cloned(), + ctxt.unified, e, e_ty) + }; - if let Some(did) = did { - let def = tcx.lookup_adt_def(did); - let predicates = tcx.lookup_predicates(did); - let substs = Substs::new_type(vec![idx_type], vec![]); - let bounds = fcx.instantiate_bounds(expr.span, &substs, &predicates); - fcx.add_obligations_for_parameters( - traits::ObligationCause::new(expr.span, - fcx.body_id, - traits::ItemObligation(did)), - &bounds); - - tcx.mk_struct(def, tcx.mk_substs(substs)) + ctxt.break_exprs.push(e); + result } else { - span_err!(tcx.sess, expr.span, E0236, "no lang item for range syntax"); - fcx.tcx().types.err + self.eq_types(true, &cause, e_ty, ctxt.unified) + .map(|InferOk { obligations, .. }| { + // FIXME(#32730) propagate obligations + assert!(obligations.is_empty()); + e_ty + }) + }; + match result { + Ok(ty) => ctxt.unified = ty, + Err(err) => { + self.report_mismatched_types(&cause, ctxt.unified, e_ty, err); + } } + + ctxt.may_break = true; } - None => { - // Neither start nor end => RangeFull - if let Some(did) = tcx.lang_items.range_full_struct() { - tcx.mk_struct( - tcx.lookup_adt_def(did), - tcx.mk_substs(Substs::empty()) - ) - } else { - span_err!(tcx.sess, expr.span, E0237, "no lang item for range syntax"); - fcx.tcx().types.err + // Otherwise, we failed to find the enclosing loop; this can only happen if the + // `break` was not inside a loop at all, which is caught by the loop-checking pass. + tcx.types.never + } + hir::ExprAgain(_) => { tcx.types.never } + hir::ExprRet(ref expr_opt) => { + if let Some(ref e) = *expr_opt { + self.check_expr_coercable_to_type(&e, self.ret_ty); + } else { + match self.eq_types(false, + &self.misc(expr.span), + self.ret_ty, + tcx.mk_nil()) + { + Ok(ok) => self.register_infer_ok_obligations(ok), + Err(_) => { + struct_span_err!(tcx.sess, expr.span, E0069, + "`return;` in a function whose return type is not `()`") + .span_label(expr.span, &format!("return type is not ()")) + .emit(); + } } } - }; - - fcx.write_ty(id, range_type); - } + tcx.types.never + } + hir::ExprAssign(ref lhs, ref rhs) => { + let lhs_ty = self.check_expr_with_lvalue_pref(&lhs, PreferMutLvalue); + + let tcx = self.tcx; + if !tcx.expr_is_lval(&lhs) { + struct_span_err!( + tcx.sess, expr.span, E0070, + "invalid left-hand side expression") + .span_label( + expr.span, + &format!("left-hand of expression not valid")) + .emit(); + } - } + let rhs_ty = self.check_expr_coercable_to_type(&rhs, lhs_ty); - debug!("type of expr({}) {} is...", expr.id, - pprust::expr_to_string(expr)); - debug!("... {:?}, expected is {:?}", - fcx.expr_ty(expr), - expected); + self.require_type_is_sized(lhs_ty, lhs.span, traits::AssignmentLhsSized); - unifier(); -} + if lhs_ty.references_error() || rhs_ty.references_error() { + tcx.types.err + } else { + tcx.mk_nil() + } + } + hir::ExprIf(ref cond, ref then_blk, ref opt_else_expr) => { + self.check_then_else(&cond, &then_blk, opt_else_expr.as_ref().map(|e| &**e), + expr.span, expected) + } + hir::ExprWhile(ref cond, ref body, _) => { + let unified = self.tcx.mk_nil(); + let coerce_to = unified; + let ctxt = LoopCtxt { + unified: unified, + coerce_to: coerce_to, + break_exprs: vec![], + may_break: true, + }; + self.with_loop_ctxt(expr.id, ctxt, || { + self.check_expr_has_type(&cond, tcx.types.bool); + let cond_diverging = self.diverges.get(); + self.check_block_no_value(&body); -pub fn resolve_ty_and_def_ufcs<'a, 'b, 'tcx>(fcx: &FnCtxt<'b, 'tcx>, - path_res: def::PathResolution, - opt_self_ty: Option>, - path: &'a hir::Path, - span: Span, - node_id: ast::NodeId) - -> Option<(Option>, - &'a [hir::PathSegment], - def::Def)> -{ + // We may never reach the body so it diverging means nothing. + self.diverges.set(cond_diverging); + }); - // If fully resolved already, we don't have to do anything. - if path_res.depth == 0 { - Some((opt_self_ty, &path.segments, path_res.base_def)) - } else { - let mut def = path_res.base_def; - let ty_segments = path.segments.split_last().unwrap().1; - let base_ty_end = path.segments.len() - path_res.depth; - let ty = astconv::finish_resolving_def_to_ty(fcx, fcx, span, - PathParamMode::Optional, - &mut def, - opt_self_ty, - &ty_segments[..base_ty_end], - &ty_segments[base_ty_end..]); - let item_segment = path.segments.last().unwrap(); - let item_name = item_segment.identifier.name; - match method::resolve_ufcs(fcx, span, item_name, ty, node_id) { - Ok((def, lp)) => { - // Write back the new resolution. - fcx.ccx.tcx.def_map.borrow_mut() - .insert(node_id, def::PathResolution { - base_def: def, - last_private: path_res.last_private.or(lp), - depth: 0 - }); - Some((Some(ty), slice::ref_slice(item_segment), def)) - } - Err(error) => { - method::report_error(fcx, span, ty, - item_name, None, error); - fcx.write_error(node_id); - None + if self.has_errors.get() { + tcx.types.err + } else { + tcx.mk_nil() } - } - } -} + } + hir::ExprLoop(ref body, _, _) => { + let unified = self.next_ty_var(); + let coerce_to = expected.only_has_type(self).unwrap_or(unified); + let ctxt = LoopCtxt { + unified: unified, + coerce_to: coerce_to, + break_exprs: vec![], + may_break: false, + }; -impl<'tcx> Expectation<'tcx> { - /// Provide an expectation for an rvalue expression given an *optional* - /// hint, which is not required for type safety (the resulting type might - /// be checked higher up, as is the case with `&expr` and `box expr`), but - /// is useful in determining the concrete type. - /// - /// The primary use case is where the expected type is a fat pointer, - /// like `&[isize]`. For example, consider the following statement: - /// - /// let x: &[isize] = &[1, 2, 3]; - /// - /// In this case, the expected type for the `&[1, 2, 3]` expression is - /// `&[isize]`. If however we were to say that `[1, 2, 3]` has the - /// expectation `ExpectHasType([isize])`, that would be too strong -- - /// `[1, 2, 3]` does not have the type `[isize]` but rather `[isize; 3]`. - /// It is only the `&[1, 2, 3]` expression as a whole that can be coerced - /// to the type `&[isize]`. Therefore, we propagate this more limited hint, - /// which still is useful, because it informs integer literals and the like. - /// See the test case `test/run-pass/coerce-expect-unsized.rs` and #20169 - /// for examples of where this comes up,. - fn rvalue_hint(tcx: &ty::ctxt<'tcx>, ty: Ty<'tcx>) -> Expectation<'tcx> { - match tcx.struct_tail(ty).sty { - ty::TySlice(_) | ty::TyTrait(..) => { - ExpectRvalueLikeUnsized(ty) - } - _ => ExpectHasType(ty) - } - } + let ctxt = self.with_loop_ctxt(expr.id, ctxt, || { + self.check_block_no_value(&body); + }); + if ctxt.may_break { + // No way to know whether it's diverging because + // of a `break` or an outer `break` or `return. + self.diverges.set(Diverges::Maybe); - // Resolves `expected` by a single level if it is a variable. If - // there is no expected type or resolution is not possible (e.g., - // no constraints yet present), just returns `None`. - fn resolve<'a>(self, fcx: &FnCtxt<'a, 'tcx>) -> Expectation<'tcx> { - match self { - NoExpectation => { - NoExpectation - } - ExpectCastableToType(t) => { - ExpectCastableToType( - fcx.infcx().resolve_type_vars_if_possible(&t)) - } - ExpectHasType(t) => { - ExpectHasType( - fcx.infcx().resolve_type_vars_if_possible(&t)) + ctxt.unified + } else { + tcx.types.never } - ExpectRvalueLikeUnsized(t) => { - ExpectRvalueLikeUnsized( - fcx.infcx().resolve_type_vars_if_possible(&t)) + } + hir::ExprMatch(ref discrim, ref arms, match_src) => { + self.check_match(expr, &discrim, arms, expected, match_src) + } + hir::ExprClosure(capture, ref decl, body_id, _) => { + self.check_expr_closure(expr, capture, &decl, body_id, expected) + } + hir::ExprBlock(ref b) => { + self.check_block_with_expected(&b, expected) + } + hir::ExprCall(ref callee, ref args) => { + self.check_call(expr, &callee, args, expected) + } + hir::ExprMethodCall(name, ref tps, ref args) => { + self.check_method_call(expr, name, args, &tps[..], expected, lvalue_pref) + } + hir::ExprCast(ref e, ref t) => { + if let hir::TyArray(_, ref count_expr) = t.node { + self.check_expr_with_hint(&count_expr, tcx.types.usize); } - } - } - fn to_option<'a>(self, fcx: &FnCtxt<'a, 'tcx>) -> Option> { - match self.resolve(fcx) { - NoExpectation => None, - ExpectCastableToType(ty) | - ExpectHasType(ty) | - ExpectRvalueLikeUnsized(ty) => Some(ty), - } - } + // Find the type of `e`. Supply hints based on the type we are casting to, + // if appropriate. + let t_cast = self.to_ty(t); + let t_cast = self.resolve_type_vars_if_possible(&t_cast); + let t_expr = self.check_expr_with_expectation(e, ExpectCastableToType(t_cast)); + let t_cast = self.resolve_type_vars_if_possible(&t_cast); - fn only_has_type<'a>(self, fcx: &FnCtxt<'a, 'tcx>) -> Option> { - match self.resolve(fcx) { - ExpectHasType(ty) => Some(ty), - _ => None - } - } -} - -pub fn check_decl_initializer<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, - local: &'tcx hir::Local, - init: &'tcx hir::Expr) -{ - let ref_bindings = fcx.tcx().pat_contains_ref_binding(&local.pat); - - let local_ty = fcx.local_ty(init.span, local.id); - if let Some(m) = ref_bindings { - // Somewhat subtle: if we have a `ref` binding in the pattern, - // we want to avoid introducing coercions for the RHS. This is - // both because it helps preserve sanity and, in the case of - // ref mut, for soundness (issue #23116). In particular, in - // the latter case, we need to be clear that the type of the - // referent for the reference that results is *equal to* the - // type of the lvalue it is referencing, and not some - // supertype thereof. - check_expr_with_lvalue_pref(fcx, init, LvaluePreference::from_mutbl(m)); - let init_ty = fcx.expr_ty(init); - demand::eqtype(fcx, init.span, init_ty, local_ty); - } else { - check_expr_coercable_to_type(fcx, init, local_ty) - }; -} + // Eagerly check for some obvious errors. + if t_expr.references_error() || t_cast.references_error() { + tcx.types.err + } else { + // Defer other checks until we're done type checking. + let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut(); + match cast::CastCheck::new(self, e, t_expr, t_cast, t.span, expr.span) { + Ok(cast_check) => { + deferred_cast_checks.push(cast_check); + t_cast + } + Err(ErrorReported) => { + tcx.types.err + } + } + } + } + hir::ExprType(ref e, ref t) => { + let typ = self.to_ty(&t); + self.check_expr_eq_type(&e, typ); + typ + } + hir::ExprArray(ref args) => { + let uty = expected.to_option(self).and_then(|uty| { + match uty.sty { + ty::TyArray(ty, _) | ty::TySlice(ty) => Some(ty), + _ => None + } + }); -pub fn check_decl_local<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, local: &'tcx hir::Local) { - let tcx = fcx.ccx.tcx; + let mut unified = self.next_ty_var(); + let coerce_to = uty.unwrap_or(unified); - let t = fcx.local_ty(local.span, local.id); - fcx.write_ty(local.id, t); + for (i, e) in args.iter().enumerate() { + let e_ty = self.check_expr_with_hint(e, coerce_to); + let cause = self.misc(e.span); - if let Some(ref init) = local.init { - check_decl_initializer(fcx, local, &**init); - let init_ty = fcx.expr_ty(&**init); - if init_ty.references_error() { - fcx.write_ty(local.id, init_ty); - } - } - - let pcx = pat_ctxt { - fcx: fcx, - map: pat_id_map(&tcx.def_map, &*local.pat), - }; - _match::check_pat(&pcx, &*local.pat, t); - let pat_ty = fcx.node_ty(local.pat.id); - if pat_ty.references_error() { - fcx.write_ty(local.id, pat_ty); - } -} + // Special-case the first element, as it has no "previous expressions". + let result = if i == 0 { + self.try_coerce(e, e_ty, coerce_to) + } else { + let prev_elems = || args[..i].iter().map(|e| &*e); + self.try_find_coercion_lub(&cause, prev_elems, unified, e, e_ty) + }; -pub fn check_stmt<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, stmt: &'tcx hir::Stmt) { - let node_id; - let mut saw_bot = false; - let mut saw_err = false; - match stmt.node { - hir::StmtDecl(ref decl, id) => { - node_id = id; - match decl.node { - hir::DeclLocal(ref l) => { - check_decl_local(fcx, &**l); - let l_t = fcx.node_ty(l.id); - saw_bot = saw_bot || fcx.infcx().type_var_diverges(l_t); - saw_err = saw_err || l_t.references_error(); + match result { + Ok(ty) => unified = ty, + Err(e) => { + self.report_mismatched_types(&cause, unified, e_ty, e); + } + } + } + tcx.mk_array(unified, args.len()) } - hir::DeclItem(_) => {/* ignore for now */ } - } - } - hir::StmtExpr(ref expr, id) => { - node_id = id; - // Check with expected type of () - check_expr_has_type(fcx, &**expr, fcx.tcx().mk_nil()); - let expr_ty = fcx.expr_ty(&**expr); - saw_bot = saw_bot || fcx.infcx().type_var_diverges(expr_ty); - saw_err = saw_err || expr_ty.references_error(); - } - hir::StmtSemi(ref expr, id) => { - node_id = id; - check_expr(fcx, &**expr); - let expr_ty = fcx.expr_ty(&**expr); - saw_bot |= fcx.infcx().type_var_diverges(expr_ty); - saw_err |= expr_ty.references_error(); - } - } - if saw_bot { - fcx.write_ty(node_id, fcx.infcx().next_diverging_ty_var()); - } - else if saw_err { - fcx.write_error(node_id); - } - else { - fcx.write_nil(node_id) - } -} - -pub fn check_block_no_value<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, blk: &'tcx hir::Block) { - check_block_with_expected(fcx, blk, ExpectHasType(fcx.tcx().mk_nil())); - let blkty = fcx.node_ty(blk.id); - if blkty.references_error() { - fcx.write_error(blk.id); - } else { - let nilty = fcx.tcx().mk_nil(); - demand::suptype(fcx, blk.span, nilty, blkty); - } -} - -fn check_block_with_expected<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - blk: &'tcx hir::Block, - expected: Expectation<'tcx>) { - let prev = { - let mut fcx_ps = fcx.ps.borrow_mut(); - let unsafety_state = fcx_ps.recurse(blk); - replace(&mut *fcx_ps, unsafety_state) - }; + hir::ExprRepeat(ref element, ref count_expr) => { + self.check_expr_has_type(&count_expr, tcx.types.usize); + let count = eval_length(self.tcx.global_tcx(), &count_expr, "repeat count") + .unwrap_or(0); + + let uty = match expected { + ExpectHasType(uty) => { + match uty.sty { + ty::TyArray(ty, _) | ty::TySlice(ty) => Some(ty), + _ => None + } + } + _ => None + }; - let mut warned = false; - let mut any_diverges = false; - let mut any_err = false; - for s in &blk.stmts { - check_stmt(fcx, s); - let s_id = ::rustc_front::util::stmt_id(s); - let s_ty = fcx.node_ty(s_id); - if any_diverges && !warned && match s.node { - hir::StmtDecl(ref decl, _) => { - match decl.node { - hir::DeclLocal(_) => true, - _ => false, + let (element_ty, t) = match uty { + Some(uty) => { + self.check_expr_coercable_to_type(&element, uty); + (uty, uty) + } + None => { + let t: Ty = self.next_ty_var(); + let element_ty = self.check_expr_has_type(&element, t); + (element_ty, t) } + }; + + if count > 1 { + // For [foo, ..n] where n > 1, `foo` must have + // Copy type: + let lang_item = self.tcx.require_lang_item(lang_items::CopyTraitLangItem); + self.require_type_meets(t, expr.span, traits::RepeatVec, lang_item); } - hir::StmtExpr(_, _) | hir::StmtSemi(_, _) => true, - } { - fcx.ccx - .tcx - .sess - .add_lint(lint::builtin::UNREACHABLE_CODE, - s_id, - s.span, - "unreachable statement".to_string()); - warned = true; - } - any_diverges = any_diverges || fcx.infcx().type_var_diverges(s_ty); - any_err = any_err || s_ty.references_error(); - } - match blk.expr { - None => if any_err { - fcx.write_error(blk.id); - } else if any_diverges { - fcx.write_ty(blk.id, fcx.infcx().next_diverging_ty_var()); - } else { - fcx.write_nil(blk.id); - }, - Some(ref e) => { - if any_diverges && !warned { - fcx.ccx - .tcx - .sess - .add_lint(lint::builtin::UNREACHABLE_CODE, - e.id, - e.span, - "unreachable expression".to_string()); + + if element_ty.references_error() { + tcx.types.err + } else { + tcx.mk_array(t, count) } - let ety = match expected { - ExpectHasType(ety) => { - check_expr_coercable_to_type(fcx, &**e, ety); - ety - } - _ => { - check_expr_with_expectation(fcx, &**e, expected); - fcx.expr_ty(&**e) + } + hir::ExprTup(ref elts) => { + let flds = expected.only_has_type(self).and_then(|ty| { + match ty.sty { + ty::TyTuple(ref flds) => Some(&flds[..]), + _ => None } - }; + }); - if any_err { - fcx.write_error(blk.id); - } else if any_diverges { - fcx.write_ty(blk.id, fcx.infcx().next_diverging_ty_var()); + let elt_ts_iter = elts.iter().enumerate().map(|(i, e)| { + let t = match flds { + Some(ref fs) if i < fs.len() => { + let ety = fs[i]; + self.check_expr_coercable_to_type(&e, ety); + ety + } + _ => { + self.check_expr_with_expectation(&e, NoExpectation) + } + }; + t + }); + let tuple = tcx.mk_tup(elt_ts_iter); + if tuple.references_error() { + tcx.types.err } else { - fcx.write_ty(blk.id, ety); + tuple } + } + hir::ExprStruct(ref qpath, ref fields, ref base_expr) => { + self.check_expr_struct(expr, qpath, fields, base_expr) + } + hir::ExprField(ref base, ref field) => { + self.check_field(expr, lvalue_pref, &base, field) + } + hir::ExprTupField(ref base, idx) => { + self.check_tup_field(expr, lvalue_pref, &base, idx) + } + hir::ExprIndex(ref base, ref idx) => { + let base_t = self.check_expr_with_lvalue_pref(&base, lvalue_pref); + let idx_t = self.check_expr(&idx); + + if base_t.references_error() { + base_t + } else if idx_t.references_error() { + idx_t + } else { + let base_t = self.structurally_resolved_type(expr.span, base_t); + match self.lookup_indexing(expr, base, base_t, idx_t, lvalue_pref) { + Some((index_ty, element_ty)) => { + self.demand_eqtype(expr.span, index_ty, idx_t); + element_ty + } + None => { + self.check_expr_has_type(&idx, self.tcx.types.err); + let mut err = self.type_error_struct( + expr.span, + |actual| { + format!("cannot index a value of type `{}`", + actual) + }, + base_t); + // Try to give some advice about indexing tuples. + if let ty::TyTuple(_) = base_t.sty { + let mut needs_note = true; + // If the index is an integer, we can show the actual + // fixed expression: + if let hir::ExprLit(ref lit) = idx.node { + if let ast::LitKind::Int(i, + ast::LitIntType::Unsuffixed) = lit.node { + let snip = tcx.sess.codemap().span_to_snippet(base.span); + if let Ok(snip) = snip { + err.span_suggestion(expr.span, + "to access tuple elements, \ + use tuple indexing syntax \ + as shown", + format!("{}.{}", snip, i)); + needs_note = false; + } + } + } + if needs_note { + err.help("to access tuple elements, use tuple indexing \ + syntax (e.g. `tuple.0`)"); + } + } + err.emit(); + self.tcx.types.err + } + } + } + } } - }; - - *fcx.ps.borrow_mut() = prev; -} - -/// Checks a constant appearing in a type. At the moment this is just the -/// length expression in a fixed-length vector, but someday it might be -/// extended to type-level numeric literals. -fn check_const_in_type<'a,'tcx>(ccx: &'a CrateCtxt<'a,'tcx>, - expr: &'tcx hir::Expr, - expected_type: Ty<'tcx>) { - let tables = RefCell::new(ty::Tables::empty()); - let inh = static_inherited_fields(ccx, &tables); - let fcx = blank_fn_ctxt(ccx, &inh, ty::FnConverging(expected_type), expr.id); - check_const_with_ty(&fcx, expr.span, expr, expected_type); -} + } -fn check_const<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, - sp: Span, - e: &'tcx hir::Expr, - id: ast::NodeId) { - let tables = RefCell::new(ty::Tables::empty()); - let inh = static_inherited_fields(ccx, &tables); - let rty = ccx.tcx.node_id_to_type(id); - let fcx = blank_fn_ctxt(ccx, &inh, ty::FnConverging(rty), e.id); - let declty = fcx.ccx.tcx.lookup_item_type(ccx.tcx.map.local_def_id(id)).ty; - check_const_with_ty(&fcx, sp, e, declty); -} + // Finish resolving a path in a struct expression or pattern `S::A { .. }` if necessary. + // The newly resolved definition is written into `type_relative_path_defs`. + fn finish_resolving_struct_path(&self, + qpath: &hir::QPath, + path_span: Span, + node_id: ast::NodeId) + -> (Def, Ty<'tcx>) + { + match *qpath { + hir::QPath::Resolved(ref maybe_qself, ref path) => { + let opt_self_ty = maybe_qself.as_ref().map(|qself| self.to_ty(qself)); + let ty = AstConv::def_to_ty(self, self, opt_self_ty, path, node_id, true); + (path.def, ty) + } + hir::QPath::TypeRelative(ref qself, ref segment) => { + let ty = self.to_ty(qself); -fn check_const_with_ty<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - _: Span, - e: &'tcx hir::Expr, - declty: Ty<'tcx>) { - // Gather locals in statics (because of block expressions). - // This is technically unnecessary because locals in static items are forbidden, - // but prevents type checking from blowing up before const checking can properly - // emit an error. - GatherLocalsVisitor { fcx: fcx }.visit_expr(e); - - check_expr_with_hint(fcx, e, declty); - demand::coerce(fcx, e.span, declty, e); - - fcx.select_all_obligations_and_apply_defaults(); - upvar::closure_analyze_const(&fcx, e); - fcx.select_obligations_where_possible(); - fcx.check_casts(); - fcx.select_all_obligations_or_error(); - - regionck::regionck_expr(fcx, e); - writeback::resolve_type_vars_in_expr(fcx, e); -} + let def = if let hir::TyPath(hir::QPath::Resolved(_, ref path)) = qself.node { + path.def + } else { + Def::Err + }; + let (ty, def) = AstConv::associated_path_def_to_ty(self, node_id, path_span, + ty, def, segment); -/// Checks whether a type can be represented in memory. In particular, it -/// identifies types that contain themselves without indirection through a -/// pointer, which would mean their size is unbounded. -pub fn check_representable(tcx: &ty::ctxt, - sp: Span, - item_id: ast::NodeId, - _designation: &str) -> bool { - let rty = tcx.node_id_to_type(item_id); + // Write back the new resolution. + self.tables.borrow_mut().type_relative_path_defs.insert(node_id, def); - // Check that it is possible to represent this type. This call identifies - // (1) types that contain themselves and (2) types that contain a different - // recursive type. It is only necessary to throw an error on those that - // contain themselves. For case 2, there must be an inner type that will be - // caught by case 1. - match rty.is_representable(tcx, sp) { - Representability::SelfRecursive => { - let item_def_id = tcx.map.local_def_id(item_id); - traits::recursive_type_with_infinite_size_error(tcx, item_def_id).emit(); - return false + (def, ty) + } } - Representability::Representable | Representability::ContainsRecursive => (), } - return true -} -pub fn check_simd(tcx: &ty::ctxt, sp: Span, id: ast::NodeId) { - let t = tcx.node_id_to_type(id); - match t.sty { - ty::TyStruct(def, substs) => { - let fields = &def.struct_variant().fields; - if fields.is_empty() { - span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty"); - return; + // Resolve associated value path into a base type and associated constant or method definition. + // The newly resolved definition is written into `type_relative_path_defs`. + pub fn resolve_ty_and_def_ufcs<'b>(&self, + qpath: &'b hir::QPath, + node_id: ast::NodeId, + span: Span) + -> (Def, Option>, &'b [hir::PathSegment]) + { + let (ty, item_segment) = match *qpath { + hir::QPath::Resolved(ref opt_qself, ref path) => { + return (path.def, + opt_qself.as_ref().map(|qself| self.to_ty(qself)), + &path.segments[..]); } - let e = fields[0].ty(tcx, substs); - if !fields.iter().all(|f| f.ty(tcx, substs) == e) { - span_err!(tcx.sess, sp, E0076, "SIMD vector should be homogeneous"); - return; + hir::QPath::TypeRelative(ref qself, ref segment) => { + (self.to_ty(qself), segment) } - match e.sty { - ty::TyParam(_) => { /* struct(T, T, T, T) is ok */ } - _ if e.is_machine() => { /* struct(u8, u8, u8, u8) is ok */ } - _ => { - span_err!(tcx.sess, sp, E0077, - "SIMD vector element type should be machine type"); - return; + }; + let item_name = item_segment.name; + let def = match self.resolve_ufcs(span, item_name, ty, node_id) { + Ok(def) => def, + Err(error) => { + let def = match error { + method::MethodError::PrivateMatch(def) => def, + _ => Def::Err, + }; + if item_name != keywords::Invalid.name() { + self.report_method_error(span, ty, item_name, None, error); } + def } - } - _ => () + }; + + // Write back the new resolution. + self.tables.borrow_mut().type_relative_path_defs.insert(node_id, def); + (def, Some(ty), slice::ref_slice(&**item_segment)) } -} -pub fn check_enum_variants<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, - sp: Span, - vs: &'tcx [hir::Variant], - id: ast::NodeId) { - // disr_in_range should be removed once we have forced type hints for consts - fn disr_in_range(ccx: &CrateCtxt, - ty: attr::IntType, - disr: ty::Disr) -> bool { - fn uint_in_range(ccx: &CrateCtxt, ty: ast::UintTy, disr: ty::Disr) -> bool { - match ty { - ast::TyU8 => disr as u8 as Disr == disr, - ast::TyU16 => disr as u16 as Disr == disr, - ast::TyU32 => disr as u32 as Disr == disr, - ast::TyU64 => disr as u64 as Disr == disr, - ast::TyUs => uint_in_range(ccx, ccx.tcx.sess.target.uint_type, disr) - } - } - fn int_in_range(ccx: &CrateCtxt, ty: ast::IntTy, disr: ty::Disr) -> bool { - match ty { - ast::TyI8 => disr as i8 as Disr == disr, - ast::TyI16 => disr as i16 as Disr == disr, - ast::TyI32 => disr as i32 as Disr == disr, - ast::TyI64 => disr as i64 as Disr == disr, - ast::TyIs => int_in_range(ccx, ccx.tcx.sess.target.int_type, disr) - } - } - match ty { - attr::UnsignedInt(ty) => uint_in_range(ccx, ty, disr), - attr::SignedInt(ty) => int_in_range(ccx, ty, disr) + pub fn check_decl_initializer(&self, + local: &'gcx hir::Local, + init: &'gcx hir::Expr) -> Ty<'tcx> + { + let ref_bindings = local.pat.contains_ref_binding(); + + let local_ty = self.local_ty(init.span, local.id); + if let Some(m) = ref_bindings { + // Somewhat subtle: if we have a `ref` binding in the pattern, + // we want to avoid introducing coercions for the RHS. This is + // both because it helps preserve sanity and, in the case of + // ref mut, for soundness (issue #23116). In particular, in + // the latter case, we need to be clear that the type of the + // referent for the reference that results is *equal to* the + // type of the lvalue it is referencing, and not some + // supertype thereof. + let init_ty = self.check_expr_with_lvalue_pref(init, LvaluePreference::from_mutbl(m)); + self.demand_eqtype(init.span, init_ty, local_ty); + init_ty + } else { + self.check_expr_coercable_to_type(init, local_ty) } } - fn do_check<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - vs: &'tcx [hir::Variant], - id: ast::NodeId, - hint: attr::ReprAttr) { - #![allow(trivial_numeric_casts)] + pub fn check_decl_local(&self, local: &'gcx hir::Local) { + let t = self.local_ty(local.span, local.id); + self.write_ty(local.id, t); - let rty = ccx.tcx.node_id_to_type(id); - let mut disr_vals: Vec = Vec::new(); - - let tables = RefCell::new(ty::Tables::empty()); - let inh = static_inherited_fields(ccx, &tables); - let fcx = blank_fn_ctxt(ccx, &inh, ty::FnConverging(rty), id); - - let (_, repr_type_ty) = ccx.tcx.enum_repr_type(Some(&hint)); - for v in vs { - if let Some(ref e) = v.node.disr_expr { - check_const_with_ty(&fcx, e.span, e, repr_type_ty); + if let Some(ref init) = local.init { + let init_ty = self.check_decl_initializer(local, &init); + if init_ty.references_error() { + self.write_ty(local.id, init_ty); } } - let def_id = ccx.tcx.map.local_def_id(id); - - let variants = &ccx.tcx.lookup_adt_def(def_id).variants; - for (v, variant) in vs.iter().zip(variants.iter()) { - let current_disr_val = variant.disr_val; + self.check_pat(&local.pat, t); + let pat_ty = self.node_ty(local.pat.id); + if pat_ty.references_error() { + self.write_ty(local.id, pat_ty); + } + } - // Check for duplicate discriminant values - match disr_vals.iter().position(|&x| x == current_disr_val) { - Some(i) => { - let mut err = struct_span_err!(ccx.tcx.sess, v.span, E0081, - "discriminant value `{}` already exists", disr_vals[i]); - let variant_i_node_id = ccx.tcx.map.as_local_node_id(variants[i].did).unwrap(); - span_note!(&mut err, ccx.tcx.map.span(variant_i_node_id), - "conflicting discriminant here"); - err.emit(); - } - None => {} - } - // Check for unrepresentable discriminant values - match hint { - attr::ReprAny | attr::ReprExtern => (), - attr::ReprInt(sp, ity) => { - if !disr_in_range(ccx, ity, current_disr_val) { - let mut err = struct_span_err!(ccx.tcx.sess, v.span, E0082, - "discriminant value outside specified type"); - span_note!(&mut err, sp, - "discriminant type specified here"); - err.emit(); + pub fn check_stmt(&self, stmt: &'gcx hir::Stmt) { + // Don't do all the complex logic below for DeclItem. + match stmt.node { + hir::StmtDecl(ref decl, id) => { + match decl.node { + hir::DeclLocal(_) => {} + hir::DeclItem(_) => { + self.write_nil(id); + return; } } - attr::ReprSimd => { - ccx.tcx.sess.bug("range_to_inttype: found ReprSimd on an enum"); - } - attr::ReprPacked => { - ccx.tcx.sess.bug("range_to_inttype: found ReprPacked on an enum"); - } } - disr_vals.push(current_disr_val); + hir::StmtExpr(..) | hir::StmtSemi(..) => {} } - } - let def_id = ccx.tcx.map.local_def_id(id); - let hint = *ccx.tcx.lookup_repr_hints(def_id).get(0).unwrap_or(&attr::ReprAny); + self.warn_if_unreachable(stmt.node.id(), stmt.span, "statement"); - if hint != attr::ReprAny && vs.len() <= 1 { - if vs.len() == 1 { - span_err!(ccx.tcx.sess, sp, E0083, - "unsupported representation for univariant enum"); - } else { - span_err!(ccx.tcx.sess, sp, E0084, - "unsupported representation for zero-variant enum"); + // Hide the outer diverging and has_errors flags. + let old_diverges = self.diverges.get(); + let old_has_errors = self.has_errors.get(); + self.diverges.set(Diverges::Maybe); + self.has_errors.set(false); + + let node_id = match stmt.node { + hir::StmtDecl(ref decl, id) => { + match decl.node { + hir::DeclLocal(ref l) => { + self.check_decl_local(&l); + } + hir::DeclItem(_) => {/* ignore for now */ } + } + id + } + hir::StmtExpr(ref expr, id) => { + // Check with expected type of () + self.check_expr_has_type(&expr, self.tcx.mk_nil()); + id + } + hir::StmtSemi(ref expr, id) => { + self.check_expr(&expr); + id + } }; - } - do_check(ccx, vs, id, hint); + if self.has_errors.get() { + self.write_error(node_id); + } else if self.diverges.get().always() { + self.write_ty(node_id, self.next_diverging_ty_var()); + } else { + self.write_nil(node_id); + } - check_representable(ccx.tcx, sp, id, "enum"); -} + // Combine the diverging and has_error flags. + self.diverges.set(self.diverges.get() | old_diverges); + self.has_errors.set(self.has_errors.get() | old_has_errors); + } -// Returns the type parameter count and the type for the given definition. -fn type_scheme_and_predicates_for_def<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - sp: Span, - defn: def::Def) - -> (TypeScheme<'tcx>, GenericPredicates<'tcx>) { - match defn { - def::DefLocal(_, nid) | def::DefUpvar(_, nid, _, _) => { - let typ = fcx.local_ty(sp, nid); - (ty::TypeScheme { generics: ty::Generics::empty(), ty: typ }, - ty::GenericPredicates::empty()) - } - def::DefFn(id, _) | def::DefMethod(id) | - def::DefStatic(id, _) | def::DefVariant(_, id, _) | - def::DefStruct(id) | def::DefConst(id) | def::DefAssociatedConst(id) => { - (fcx.tcx().lookup_item_type(id), fcx.tcx().lookup_predicates(id)) - } - def::DefTrait(_) | - def::DefTy(..) | - def::DefAssociatedTy(..) | - def::DefPrimTy(_) | - def::DefTyParam(..) | - def::DefMod(..) | - def::DefForeignMod(..) | - def::DefLabel(..) | - def::DefSelfTy(..) | - def::DefErr => { - fcx.ccx.tcx.sess.span_bug(sp, &format!("expected value, found {:?}", defn)); - } + pub fn check_block_no_value(&self, blk: &'gcx hir::Block) { + let unit = self.tcx.mk_nil(); + let ty = self.check_block_with_expected(blk, ExpectHasType(unit)); + self.demand_suptype(blk.span, unit, ty); } -} -// Instantiates the given path, which must refer to an item with the given -// number of type parameters and type. -pub fn instantiate_path<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - segments: &[hir::PathSegment], - type_scheme: TypeScheme<'tcx>, - type_predicates: &ty::GenericPredicates<'tcx>, - opt_self_ty: Option>, - def: def::Def, - span: Span, - node_id: ast::NodeId) { - debug!("instantiate_path(path={:?}, def={:?}, node_id={}, type_scheme={:?})", - segments, - def, - node_id, - type_scheme); - - // We need to extract the type parameters supplied by the user in - // the path `path`. Due to the current setup, this is a bit of a - // tricky-process; the problem is that resolve only tells us the - // end-point of the path resolution, and not the intermediate steps. - // Luckily, we can (at least for now) deduce the intermediate steps - // just from the end-point. - // - // There are basically four cases to consider: - // - // 1. Reference to a *type*, such as a struct or enum: - // - // mod a { struct Foo { ... } } - // - // Because we don't allow types to be declared within one - // another, a path that leads to a type will always look like - // `a::b::Foo` where `a` and `b` are modules. This implies - // that only the final segment can have type parameters, and - // they are located in the TypeSpace. - // - // *Note:* Generally speaking, references to types don't - // actually pass through this function, but rather the - // `ast_ty_to_ty` function in `astconv`. However, in the case - // of struct patterns (and maybe literals) we do invoke - // `instantiate_path` to get the general type of an instance of - // a struct. (In these cases, there are actually no type - // parameters permitted at present, but perhaps we will allow - // them in the future.) - // - // 1b. Reference to an enum variant or tuple-like struct: - // - // struct foo(...) - // enum E { foo(...) } - // - // In these cases, the parameters are declared in the type - // space. - // - // 2. Reference to a *fn item*: - // - // fn foo() { } - // - // In this case, the path will again always have the form - // `a::b::foo::` where only the final segment should have - // type parameters. However, in this case, those parameters are - // declared on a value, and hence are in the `FnSpace`. - // - // 3. Reference to a *method*: - // - // impl SomeStruct { - // fn foo(...) - // } - // - // Here we can have a path like - // `a::b::SomeStruct::::foo::`, in which case parameters - // may appear in two places. The penultimate segment, - // `SomeStruct::`, contains parameters in TypeSpace, and the - // final segment, `foo::` contains parameters in fn space. - // - // 4. Reference to an *associated const*: - // - // impl AnotherStruct { - // const FOO: B = BAR; - // } - // - // The path in this case will look like - // `a::b::AnotherStruct::::FOO`, so the penultimate segment - // only will have parameters in TypeSpace. - // - // The first step then is to categorize the segments appropriately. - - assert!(!segments.is_empty()); - - let mut ufcs_associated = None; - let mut segment_spaces: Vec<_>; - match def { - // Case 1 and 1b. Reference to a *type* or *enum variant*. - def::DefSelfTy(..) | - def::DefStruct(..) | - def::DefVariant(..) | - def::DefTy(..) | - def::DefAssociatedTy(..) | - def::DefTrait(..) | - def::DefPrimTy(..) | - def::DefTyParam(..) => { - // Everything but the final segment should have no - // parameters at all. - segment_spaces = vec![None; segments.len() - 1]; - segment_spaces.push(Some(subst::TypeSpace)); - } + fn check_block_with_expected(&self, + blk: &'gcx hir::Block, + expected: Expectation<'tcx>) -> Ty<'tcx> { + let prev = { + let mut fcx_ps = self.ps.borrow_mut(); + let unsafety_state = fcx_ps.recurse(blk); + replace(&mut *fcx_ps, unsafety_state) + }; - // Case 2. Reference to a top-level value. - def::DefFn(..) | - def::DefConst(..) | - def::DefStatic(..) => { - segment_spaces = vec![None; segments.len() - 1]; - segment_spaces.push(Some(subst::FnSpace)); + for s in &blk.stmts { + self.check_stmt(s); } - // Case 3. Reference to a method. - def::DefMethod(def_id) => { - let container = fcx.tcx().impl_or_trait_item(def_id).container(); - match container { - ty::TraitContainer(trait_did) => { - callee::check_legal_trait_for_method_call(fcx.ccx, span, trait_did) + let mut ty = match blk.expr { + Some(ref e) => self.check_expr_with_expectation(e, expected), + None => self.tcx.mk_nil() + }; + + if self.diverges.get().always() { + if let ExpectHasType(ety) = expected { + // Avoid forcing a type (only `!` for now) in unreachable code. + // FIXME(aburka) do we need this special case? and should it be is_uninhabited? + if !ety.is_never() { + if let Some(ref e) = blk.expr { + // Coerce the tail expression to the right type. + self.demand_coerce(e, ty, ety); + } } - ty::ImplContainer(_) => {} } - if segments.len() >= 2 { - segment_spaces = vec![None; segments.len() - 2]; - segment_spaces.push(Some(subst::TypeSpace)); - segment_spaces.push(Some(subst::FnSpace)); + ty = self.next_diverging_ty_var(); + } else if let ExpectHasType(ety) = expected { + if let Some(ref e) = blk.expr { + // Coerce the tail expression to the right type. + self.demand_coerce(e, ty, ety); } else { - // `::method` will end up here, and so can `T::method`. - let self_ty = opt_self_ty.expect("UFCS sugared method missing Self"); - segment_spaces = vec![Some(subst::FnSpace)]; - ufcs_associated = Some((container, self_ty)); - } - } + // We're not diverging and there's an expected type, which, + // in case it's not `()`, could result in an error higher-up. + // We have a chance to error here early and be more helpful. + let cause = self.misc(blk.span); + let trace = TypeTrace::types(&cause, false, ty, ety); + match self.sub_types(false, &cause, ty, ety) { + Ok(InferOk { obligations, .. }) => { + // FIXME(#32730) propagate obligations + assert!(obligations.is_empty()); + }, + Err(err) => { + let mut err = self.report_and_explain_type_error(trace, &err); + + // Be helpful when the user wrote `{... expr;}` and + // taking the `;` off is enough to fix the error. + let mut extra_semi = None; + if let Some(stmt) = blk.stmts.last() { + if let hir::StmtSemi(ref e, _) = stmt.node { + if self.can_sub_types(self.node_ty(e.id), ety).is_ok() { + extra_semi = Some(stmt); + } + } + } + if let Some(last_stmt) = extra_semi { + let original_span = original_sp(self.tcx.sess.codemap(), + last_stmt.span, blk.span); + let span_semi = Span { + lo: original_span.hi - BytePos(1), + hi: original_span.hi, + expn_id: original_span.expn_id + }; + err.span_help(span_semi, "consider removing this semicolon:"); + } - def::DefAssociatedConst(def_id) => { - let container = fcx.tcx().impl_or_trait_item(def_id).container(); - match container { - ty::TraitContainer(trait_did) => { - callee::check_legal_trait_for_method_call(fcx.ccx, span, trait_did) + err.emit(); + } } - ty::ImplContainer(_) => {} } - if segments.len() >= 2 { - segment_spaces = vec![None; segments.len() - 2]; - segment_spaces.push(Some(subst::TypeSpace)); - segment_spaces.push(None); - } else { - // `::CONST` will end up here, and so can `T::CONST`. - let self_ty = opt_self_ty.expect("UFCS sugared const missing Self"); - segment_spaces = vec![None]; - ufcs_associated = Some((container, self_ty)); - } + // We already applied the type (and potentially errored), + // use the expected type to avoid further errors out. + ty = ety; } - // Other cases. Various nonsense that really shouldn't show up - // here. If they do, an error will have been reported - // elsewhere. (I hope) - def::DefMod(..) | - def::DefForeignMod(..) | - def::DefLocal(..) | - def::DefLabel(..) | - def::DefUpvar(..) | - def::DefErr => { - segment_spaces = vec![None; segments.len()]; + if self.has_errors.get() || ty.references_error() { + ty = self.tcx.types.err } - } - assert_eq!(segment_spaces.len(), segments.len()); - // In `>::method`, `A` and `B` are mandatory, but - // `opt_self_ty` can also be Some for `Foo::method`, where Foo's - // type parameters are not mandatory. - let require_type_space = opt_self_ty.is_some() && ufcs_associated.is_none(); + self.write_ty(blk.id, ty); - debug!("segment_spaces={:?}", segment_spaces); - - // Next, examine the definition, and determine how many type - // parameters we expect from each space. - let type_defs = &type_scheme.generics.types; - let region_defs = &type_scheme.generics.regions; + *self.ps.borrow_mut() = prev; + ty + } - // Now that we have categorized what space the parameters for each - // segment belong to, let's sort out the parameters that the user - // provided (if any) into their appropriate spaces. We'll also report - // errors if type parameters are provided in an inappropriate place. - let mut substs = Substs::empty(); - for (opt_space, segment) in segment_spaces.iter().zip(segments) { - match *opt_space { - None => { - prohibit_type_params(fcx.tcx(), slice::ref_slice(segment)); + // Instantiates the given path, which must refer to an item with the given + // number of type parameters and type. + pub fn instantiate_value_path(&self, + segments: &[hir::PathSegment], + opt_self_ty: Option>, + def: Def, + span: Span, + node_id: ast::NodeId) + -> Ty<'tcx> { + debug!("instantiate_value_path(path={:?}, def={:?}, node_id={})", + segments, + def, + node_id); + + // We need to extract the type parameters supplied by the user in + // the path `path`. Due to the current setup, this is a bit of a + // tricky-process; the problem is that resolve only tells us the + // end-point of the path resolution, and not the intermediate steps. + // Luckily, we can (at least for now) deduce the intermediate steps + // just from the end-point. + // + // There are basically four cases to consider: + // + // 1. Reference to a constructor of enum variant or struct: + // + // struct Foo(...) + // enum E { Foo(...) } + // + // In these cases, the parameters are declared in the type + // space. + // + // 2. Reference to a fn item or a free constant: + // + // fn foo() { } + // + // In this case, the path will again always have the form + // `a::b::foo::` where only the final segment should have + // type parameters. However, in this case, those parameters are + // declared on a value, and hence are in the `FnSpace`. + // + // 3. Reference to a method or an associated constant: + // + // impl SomeStruct { + // fn foo(...) + // } + // + // Here we can have a path like + // `a::b::SomeStruct::::foo::`, in which case parameters + // may appear in two places. The penultimate segment, + // `SomeStruct::`, contains parameters in TypeSpace, and the + // final segment, `foo::` contains parameters in fn space. + // + // 4. Reference to a local variable + // + // Local variables can't have any type parameters. + // + // The first step then is to categorize the segments appropriately. + + assert!(!segments.is_empty()); + + let mut ufcs_associated = None; + let mut type_segment = None; + let mut fn_segment = None; + match def { + // Case 1. Reference to a struct/variant constructor. + Def::StructCtor(def_id, ..) | + Def::VariantCtor(def_id, ..) => { + // Everything but the final segment should have no + // parameters at all. + let mut generics = self.tcx.item_generics(def_id); + if let Some(def_id) = generics.parent { + // Variant and struct constructors use the + // generics of their parent type definition. + generics = self.tcx.item_generics(def_id); + } + type_segment = Some((segments.last().unwrap(), generics)); } - Some(space) => { - push_explicit_parameters_from_segment_to_substs(fcx, - space, - span, - type_defs, - region_defs, - segment, - &mut substs); + // Case 2. Reference to a top-level value. + Def::Fn(def_id) | + Def::Const(def_id) | + Def::Static(def_id, _) => { + fn_segment = Some((segments.last().unwrap(), + self.tcx.item_generics(def_id))); } - } - } - if let Some(self_ty) = opt_self_ty { - if type_defs.len(subst::SelfSpace) == 1 { - substs.types.push(subst::SelfSpace, self_ty); - } - } - - // Now we have to compare the types that the user *actually* - // provided against the types that were *expected*. If the user - // did not provide any types, then we want to substitute inference - // variables. If the user provided some types, we may still need - // to add defaults. If the user provided *too many* types, that's - // a problem. - for &space in &[subst::SelfSpace, subst::TypeSpace, subst::FnSpace] { - adjust_type_parameters(fcx, span, space, type_defs, - require_type_space, &mut substs); - assert_eq!(substs.types.len(space), type_defs.len(space)); - - adjust_region_parameters(fcx, span, space, region_defs, &mut substs); - assert_eq!(substs.regions().len(space), region_defs.len(space)); - } - - // The things we are substituting into the type should not contain - // escaping late-bound regions, and nor should the base type scheme. - assert!(!substs.has_regions_escaping_depth(0)); - assert!(!type_scheme.has_escaping_regions()); - - // Add all the obligations that are required, substituting and - // normalized appropriately. - let bounds = fcx.instantiate_bounds(span, &substs, &type_predicates); - fcx.add_obligations_for_parameters( - traits::ObligationCause::new(span, fcx.body_id, traits::ItemObligation(def.def_id())), - &bounds); - - // Substitute the values for the type parameters into the type of - // the referenced item. - let ty_substituted = fcx.instantiate_type_scheme(span, &substs, &type_scheme.ty); - - - if let Some((ty::ImplContainer(impl_def_id), self_ty)) = ufcs_associated { - // In the case of `Foo::method` and `>::method`, if `method` - // is inherent, there is no `Self` parameter, instead, the impl needs - // type parameters, which we can infer by unifying the provided `Self` - // with the substituted impl type. - let impl_scheme = fcx.tcx().lookup_item_type(impl_def_id); - assert_eq!(substs.types.len(subst::TypeSpace), - impl_scheme.generics.types.len(subst::TypeSpace)); - assert_eq!(substs.regions().len(subst::TypeSpace), - impl_scheme.generics.regions.len(subst::TypeSpace)); - - let impl_ty = fcx.instantiate_type_scheme(span, &substs, &impl_scheme.ty); - if fcx.mk_subty(false, TypeOrigin::Misc(span), self_ty, impl_ty).is_err() { - fcx.tcx().sess.span_bug(span, - &format!( - "instantiate_path: (UFCS) {:?} was a subtype of {:?} but now is not?", - self_ty, - impl_ty)); - } - } - debug!("instantiate_path: type of {:?} is {:?}", - node_id, - ty_substituted); - fcx.write_ty(node_id, ty_substituted); - fcx.write_substs(node_id, ty::ItemSubsts { substs: substs }); - return; + // Case 3. Reference to a method or associated const. + Def::Method(def_id) | + Def::AssociatedConst(def_id) => { + let container = self.tcx.associated_item(def_id).container; + match container { + ty::TraitContainer(trait_did) => { + callee::check_legal_trait_for_method_call(self.ccx, span, trait_did) + } + ty::ImplContainer(_) => {} + } - /// Finds the parameters that the user provided and adds them to `substs`. If too many - /// parameters are provided, then reports an error and clears the output vector. - /// - /// We clear the output vector because that will cause the `adjust_XXX_parameters()` later to - /// use inference variables. This seems less likely to lead to derived errors. - /// - /// Note that we *do not* check for *too few* parameters here. Due to the presence of defaults - /// etc that is more complicated. I wanted however to do the reporting of *too many* parameters - /// here because we can easily use the precise span of the N+1'th parameter. - fn push_explicit_parameters_from_segment_to_substs<'a, 'tcx>( - fcx: &FnCtxt<'a, 'tcx>, - space: subst::ParamSpace, - span: Span, - type_defs: &VecPerParamSpace>, - region_defs: &VecPerParamSpace, - segment: &hir::PathSegment, - substs: &mut Substs<'tcx>) - { - match segment.parameters { - hir::AngleBracketedParameters(ref data) => { - push_explicit_angle_bracketed_parameters_from_segment_to_substs( - fcx, space, type_defs, region_defs, data, substs); + let generics = self.tcx.item_generics(def_id); + if segments.len() >= 2 { + let parent_generics = self.tcx.item_generics(generics.parent.unwrap()); + type_segment = Some((&segments[segments.len() - 2], parent_generics)); + } else { + // `::assoc` will end up here, and so can `T::assoc`. + let self_ty = opt_self_ty.expect("UFCS sugared assoc missing Self"); + ufcs_associated = Some((container, self_ty)); + } + fn_segment = Some((segments.last().unwrap(), generics)); } - hir::ParenthesizedParameters(ref data) => { - span_err!(fcx.tcx().sess, span, E0238, - "parenthesized parameters may only be used with a trait"); - push_explicit_parenthesized_parameters_from_segment_to_substs( - fcx, space, span, type_defs, data, substs); - } + // Case 4. Local variable, no generics. + Def::Local(..) | Def::Upvar(..) => {} + + _ => bug!("unexpected definition: {:?}", def), } - } - fn push_explicit_angle_bracketed_parameters_from_segment_to_substs<'a, 'tcx>( - fcx: &FnCtxt<'a, 'tcx>, - space: subst::ParamSpace, - type_defs: &VecPerParamSpace>, - region_defs: &VecPerParamSpace, - data: &hir::AngleBracketedParameterData, - substs: &mut Substs<'tcx>) - { - { - let type_count = type_defs.len(space); - assert_eq!(substs.types.len(space), 0); - for (i, typ) in data.types.iter().enumerate() { - let t = fcx.to_ty(&**typ); - if i < type_count { - substs.types.push(space, t); - } else if i == type_count { - span_err!(fcx.tcx().sess, typ.span, E0087, - "too many type parameters provided: \ - expected at most {} parameter{}, \ - found {} parameter{}", - type_count, - if type_count == 1 {""} else {"s"}, - data.types.len(), - if data.types.len() == 1 {""} else {"s"}); - substs.types.truncate(space, 0); - break; - } + debug!("type_segment={:?} fn_segment={:?}", type_segment, fn_segment); + + // Now that we have categorized what space the parameters for each + // segment belong to, let's sort out the parameters that the user + // provided (if any) into their appropriate spaces. We'll also report + // errors if type parameters are provided in an inappropriate place. + let poly_segments = type_segment.is_some() as usize + + fn_segment.is_some() as usize; + self.tcx.prohibit_type_params(&segments[..segments.len() - poly_segments]); + + match def { + Def::Local(def_id) | Def::Upvar(def_id, ..) => { + let nid = self.tcx.map.as_local_node_id(def_id).unwrap(); + let ty = self.local_ty(span, nid); + let ty = self.normalize_associated_types_in(span, &ty); + self.write_ty(node_id, ty); + self.write_substs(node_id, ty::ItemSubsts { + substs: self.tcx.intern_substs(&[]) + }); + return ty; } + _ => {} } - if !data.bindings.is_empty() { - span_err!(fcx.tcx().sess, data.bindings[0].span, E0182, - "unexpected binding of associated item in expression path \ - (only allowed in type paths)"); - } + // Now we have to compare the types that the user *actually* + // provided against the types that were *expected*. If the user + // did not provide any types, then we want to substitute inference + // variables. If the user provided some types, we may still need + // to add defaults. If the user provided *too many* types, that's + // a problem. + self.check_path_parameter_count(span, &mut type_segment); + self.check_path_parameter_count(span, &mut fn_segment); + + let (fn_start, has_self) = match (type_segment, fn_segment) { + (_, Some((_, generics))) => { + (generics.parent_count(), generics.has_self) + } + (Some((_, generics)), None) => { + (generics.own_count(), generics.has_self) + } + (None, None) => (0, false) + }; + let substs = Substs::for_item(self.tcx, def.def_id(), |def, _| { + let mut i = def.index as usize; - { - let region_count = region_defs.len(space); - assert_eq!(substs.regions().len(space), 0); - for (i, lifetime) in data.lifetimes.iter().enumerate() { - let r = ast_region_to_region(fcx.tcx(), lifetime); - if i < region_count { - substs.mut_regions().push(space, r); - } else if i == region_count { - span_err!(fcx.tcx().sess, lifetime.span, E0088, - "too many lifetime parameters provided: \ - expected {} parameter{}, found {} parameter{}", - region_count, - if region_count == 1 {""} else {"s"}, - data.lifetimes.len(), - if data.lifetimes.len() == 1 {""} else {"s"}); - substs.mut_regions().truncate(space, 0); - break; + let segment = if i < fn_start { + i -= has_self as usize; + type_segment + } else { + i -= fn_start; + fn_segment + }; + let lifetimes = match segment.map(|(s, _)| &s.parameters) { + Some(&hir::AngleBracketedParameters(ref data)) => &data.lifetimes[..], + Some(&hir::ParenthesizedParameters(_)) => bug!(), + None => &[] + }; + + if let Some(ast_lifetime) = lifetimes.get(i) { + ast_region_to_region(self.tcx, ast_lifetime) + } else { + self.region_var_for_def(span, def) + } + }, |def, substs| { + let mut i = def.index as usize; + + let segment = if i < fn_start { + // Handle Self first, so we can adjust the index to match the AST. + if has_self && i == 0 { + return opt_self_ty.unwrap_or_else(|| { + self.type_var_for_def(span, def, substs) + }); + } + i -= has_self as usize; + type_segment + } else { + i -= fn_start; + fn_segment + }; + let (types, infer_types) = match segment.map(|(s, _)| &s.parameters) { + Some(&hir::AngleBracketedParameters(ref data)) => { + (&data.types[..], data.infer_types) } + Some(&hir::ParenthesizedParameters(_)) => bug!(), + None => (&[][..], true) + }; + + // Skip over the lifetimes in the same segment. + if let Some((_, generics)) = segment { + i -= generics.regions.len(); } - } - } - /// As with - /// `push_explicit_angle_bracketed_parameters_from_segment_to_substs`, - /// but intended for `Foo(A,B) -> C` form. This expands to - /// roughly the same thing as `Foo<(A,B),C>`. One important - /// difference has to do with the treatment of anonymous - /// regions, which are translated into bound regions (NYI). - fn push_explicit_parenthesized_parameters_from_segment_to_substs<'a, 'tcx>( - fcx: &FnCtxt<'a, 'tcx>, - space: subst::ParamSpace, - span: Span, - type_defs: &VecPerParamSpace>, - data: &hir::ParenthesizedParameterData, - substs: &mut Substs<'tcx>) - { - let type_count = type_defs.len(space); - if type_count < 2 { - span_err!(fcx.tcx().sess, span, E0167, - "parenthesized form always supplies 2 type parameters, \ - but only {} parameter(s) were expected", - type_count); - } + if let Some(ast_ty) = types.get(i) { + // A provided type parameter. + self.to_ty(ast_ty) + } else if let (false, Some(default)) = (infer_types, def.default) { + // No type parameter provided, but a default exists. + default.subst_spanned(self.tcx, substs, Some(span)) + } else { + // No type parameters were provided, we can infer all. + // This can also be reached in some error cases: + // We prefer to use inference variables instead of + // TyError to let type inference recover somewhat. + self.type_var_for_def(span, def, substs) + } + }); - let input_tys: Vec = - data.inputs.iter().map(|ty| fcx.to_ty(&**ty)).collect(); + // The things we are substituting into the type should not contain + // escaping late-bound regions, and nor should the base type scheme. + let ty = self.tcx.item_type(def.def_id()); + assert!(!substs.has_escaping_regions()); + assert!(!ty.has_escaping_regions()); - let tuple_ty = fcx.tcx().mk_tup(input_tys); + // Add all the obligations that are required, substituting and + // normalized appropriately. + let bounds = self.instantiate_bounds(span, def.def_id(), &substs); + self.add_obligations_for_parameters( + traits::ObligationCause::new(span, self.body_id, traits::ItemObligation(def.def_id())), + &bounds); - if type_count >= 1 { - substs.types.push(space, tuple_ty); + // Substitute the values for the type parameters into the type of + // the referenced item. + let ty_substituted = self.instantiate_type_scheme(span, &substs, &ty); + + if let Some((ty::ImplContainer(impl_def_id), self_ty)) = ufcs_associated { + // In the case of `Foo::method` and `>::method`, if `method` + // is inherent, there is no `Self` parameter, instead, the impl needs + // type parameters, which we can infer by unifying the provided `Self` + // with the substituted impl type. + let ty = self.tcx.item_type(impl_def_id); + + let impl_ty = self.instantiate_type_scheme(span, &substs, &ty); + match self.sub_types(false, &self.misc(span), self_ty, impl_ty) { + Ok(ok) => self.register_infer_ok_obligations(ok), + Err(_) => { + span_bug!(span, + "instantiate_value_path: (UFCS) {:?} was a subtype of {:?} but now is not?", + self_ty, + impl_ty); + } + } } - let output_ty: Option = - data.output.as_ref().map(|ty| fcx.to_ty(&**ty)); + debug!("instantiate_value_path: type of {:?} is {:?}", + node_id, + ty_substituted); + self.write_substs(node_id, ty::ItemSubsts { + substs: substs + }); + ty_substituted + } - let output_ty = - output_ty.unwrap_or(fcx.tcx().mk_nil()); + /// Report errors if the provided parameters are too few or too many. + fn check_path_parameter_count(&self, + span: Span, + segment: &mut Option<(&hir::PathSegment, &ty::Generics)>) { + let (lifetimes, types, infer_types, bindings) = { + match segment.map(|(s, _)| &s.parameters) { + Some(&hir::AngleBracketedParameters(ref data)) => { + (&data.lifetimes[..], &data.types[..], data.infer_types, &data.bindings[..]) + } + Some(&hir::ParenthesizedParameters(_)) => { + span_bug!(span, "parenthesized parameters cannot appear in ExprPath"); + } + None => (&[][..], &[][..], true, &[][..]) + } + }; - if type_count >= 2 { - substs.types.push(space, output_ty); - } - } + let count = |n| { + format!("{} parameter{}", n, if n == 1 { "" } else { "s" }) + }; - fn adjust_type_parameters<'a, 'tcx>( - fcx: &FnCtxt<'a, 'tcx>, - span: Span, - space: ParamSpace, - defs: &VecPerParamSpace>, - require_type_space: bool, - substs: &mut Substs<'tcx>) - { - let provided_len = substs.types.len(space); - let desired = defs.get_slice(space); - let required_len = desired.iter() - .take_while(|d| d.default.is_none()) - .count(); - - debug!("adjust_type_parameters(space={:?}, \ - provided_len={}, \ - desired_len={}, \ - required_len={})", - space, - provided_len, - desired.len(), - required_len); - - // Enforced by `push_explicit_parameters_from_segment_to_substs()`. - assert!(provided_len <= desired.len()); - - // Nothing specified at all: supply inference variables for - // everything. - if provided_len == 0 && !(require_type_space && space == subst::TypeSpace) { - substs.types.replace(space, Vec::new()); - fcx.infcx().type_vars_for_defs(span, space, substs, &desired[..]); - return; + // Check provided lifetime parameters. + let lifetime_defs = segment.map_or(&[][..], |(_, generics)| &generics.regions); + if lifetimes.len() > lifetime_defs.len() { + let span = lifetimes[lifetime_defs.len()].span; + struct_span_err!(self.tcx.sess, span, E0088, + "too many lifetime parameters provided: \ + expected {}, found {}", + count(lifetime_defs.len()), + count(lifetimes.len())) + .span_label(span, &format!("unexpected lifetime parameter{}", + match lifetimes.len() { 1 => "", _ => "s" })) + .emit(); } - // Too few parameters specified: report an error and use Err - // for everything. - if provided_len < required_len { - let qualifier = - if desired.len() != required_len { "at least " } else { "" }; - span_err!(fcx.tcx().sess, span, E0089, - "too few type parameters provided: expected {}{} parameter{}, \ - found {} parameter{}", - qualifier, required_len, - if required_len == 1 {""} else {"s"}, - provided_len, - if provided_len == 1 {""} else {"s"}); - substs.types.replace(space, vec![fcx.tcx().types.err; desired.len()]); - return; - } + // The case where there is not enough lifetime parameters is not checked, + // because this is not possible - a function never takes lifetime parameters. + // See discussion for Pull Request 36208. - // Otherwise, add in any optional parameters that the user - // omitted. The case of *too many* parameters is handled - // already by - // push_explicit_parameters_from_segment_to_substs(). Note - // that the *default* type are expressed in terms of all prior - // parameters, so we have to substitute as we go with the - // partial substitution that we have built up. - for i in provided_len..desired.len() { - let default = desired[i].default.unwrap(); - let default = default.subst_spanned(fcx.tcx(), substs, Some(span)); - substs.types.push(space, default); + // Check provided type parameters. + let type_defs = segment.map_or(&[][..], |(_, generics)| { + if generics.parent.is_none() { + &generics.types[generics.has_self as usize..] + } else { + &generics.types + } + }); + let required_len = type_defs.iter() + .take_while(|d| d.default.is_none()) + .count(); + if types.len() > type_defs.len() { + let span = types[type_defs.len()].span; + struct_span_err!(self.tcx.sess, span, E0087, + "too many type parameters provided: \ + expected at most {}, found {}", + count(type_defs.len()), + count(types.len())) + .span_label(span, &format!("too many type parameters")).emit(); + + // To prevent derived errors to accumulate due to extra + // type parameters, we force instantiate_value_path to + // use inference variables instead of the provided types. + *segment = None; + } else if !infer_types && types.len() < required_len { + let adjust = |len| if len > 1 { "parameters" } else { "parameter" }; + let required_param_str = adjust(required_len); + let actual_param_str = adjust(types.len()); + struct_span_err!(self.tcx.sess, span, E0089, + "too few type parameters provided: \ + expected {} {}, found {} {}", + count(required_len), + required_param_str, + count(types.len()), + actual_param_str) + .span_label(span, &format!("expected {} type {}", required_len, required_param_str)) + .emit(); } - assert_eq!(substs.types.len(space), desired.len()); - debug!("Final substs: {:?}", substs); + if !bindings.is_empty() { + span_err!(self.tcx.sess, bindings[0].span, E0182, + "unexpected binding of associated item in expression path \ + (only allowed in type paths)"); + } } - fn adjust_region_parameters( - fcx: &FnCtxt, - span: Span, - space: ParamSpace, - defs: &VecPerParamSpace, - substs: &mut Substs) + fn structurally_resolve_type_or_else(&self, sp: Span, ty: Ty<'tcx>, f: F) + -> Ty<'tcx> + where F: Fn() -> Ty<'tcx> { - let provided_len = substs.mut_regions().len(space); - let desired = defs.get_slice(space); + let mut ty = self.resolve_type_vars_with_obligations(ty); - // Enforced by `push_explicit_parameters_from_segment_to_substs()`. - assert!(provided_len <= desired.len()); - - // If nothing was provided, just use inference variables. - if provided_len == 0 { - substs.mut_regions().replace( - space, - fcx.infcx().region_vars_for_defs(span, desired)); - return; - } + if ty.is_ty_var() { + let alternative = f(); - // If just the right number were provided, everybody is happy. - if provided_len == desired.len() { - return; + // If not, error. + if alternative.is_ty_var() || alternative.references_error() { + if !self.is_tainted_by_errors() { + self.type_error_message(sp, |_actual| { + "the type of this value must be known in this context".to_string() + }, ty); + } + self.demand_suptype(sp, self.tcx.types.err, ty); + ty = self.tcx.types.err; + } else { + self.demand_suptype(sp, alternative, ty); + ty = alternative; + } } - // Otherwise, too few were provided. Report an error and then - // use inference variables. - span_err!(fcx.tcx().sess, span, E0090, - "too few lifetime parameters provided: expected {} parameter{}, \ - found {} parameter{}", - desired.len(), - if desired.len() == 1 {""} else {"s"}, - provided_len, - if provided_len == 1 {""} else {"s"}); - - substs.mut_regions().replace( - space, - fcx.infcx().region_vars_for_defs(span, desired)); + ty } -} -fn structurally_resolve_type_or_else<'a, 'tcx, F>(fcx: &FnCtxt<'a, 'tcx>, - sp: Span, - ty: Ty<'tcx>, - f: F) -> Ty<'tcx> - where F: Fn() -> Ty<'tcx> -{ - let mut ty = fcx.resolve_type_vars_if_possible(ty); - - if ty.is_ty_var() { - let alternative = f(); - - // If not, error. - if alternative.is_ty_var() || alternative.references_error() { - fcx.type_error_message(sp, |_actual| { - "the type of this value must be known in this context".to_string() - }, ty, None); - demand::suptype(fcx, sp, fcx.tcx().types.err, ty); - ty = fcx.tcx().types.err; - } else { - demand::suptype(fcx, sp, alternative, ty); - ty = alternative; - } + // Resolves `typ` by a single level if `typ` is a type variable. If no + // resolution is possible, then an error is reported. + pub fn structurally_resolved_type(&self, sp: Span, ty: Ty<'tcx>) -> Ty<'tcx> { + self.structurally_resolve_type_or_else(sp, ty, || { + self.tcx.types.err + }) } - ty -} - -// Resolves `typ` by a single level if `typ` is a type variable. If no -// resolution is possible, then an error is reported. -pub fn structurally_resolved_type<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - sp: Span, - ty: Ty<'tcx>) - -> Ty<'tcx> -{ - structurally_resolve_type_or_else(fcx, sp, ty, || { - fcx.tcx().types.err - }) -} - -// Returns true if b contains a break that can exit from b -pub fn may_break(cx: &ty::ctxt, id: ast::NodeId, b: &hir::Block) -> bool { - // First: is there an unlabeled break immediately - // inside the loop? - (loop_query(&*b, |e| { - match *e { - hir::ExprBreak(None) => true, - _ => false + fn with_loop_ctxt(&self, id: ast::NodeId, ctxt: LoopCtxt<'gcx, 'tcx>, f: F) + -> LoopCtxt<'gcx, 'tcx> { + let index; + { + let mut enclosing_loops = self.enclosing_loops.borrow_mut(); + index = enclosing_loops.stack.len(); + enclosing_loops.by_id.insert(id, index); + enclosing_loops.stack.push(ctxt); } - })) || - // Second: is there a labeled break with label - // nested anywhere inside the loop? - (block_query(b, |e| { - if let hir::ExprBreak(Some(_)) = e.node { - lookup_full_def(cx, e.span, e.id) == def::DefLabel(id) - } else { - false + f(); + { + let mut enclosing_loops = self.enclosing_loops.borrow_mut(); + debug_assert!(enclosing_loops.stack.len() == index + 1); + enclosing_loops.by_id.remove(&id).expect("missing loop context"); + (enclosing_loops.stack.pop().expect("missing loop context")) } - })) + } } pub fn check_bounds_are_used<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - tps: &[hir::TyParam], + generics: &hir::Generics, ty: Ty<'tcx>) { debug!("check_bounds_are_used(n_tps={}, ty={:?})", - tps.len(), ty); + generics.ty_params.len(), ty); // make a vector of booleans initially false, set to true when used - if tps.is_empty() { return; } - let mut tps_used = vec![false; tps.len()]; + if generics.ty_params.is_empty() { return; } + let mut tps_used = vec![false; generics.ty_params.len()]; for leaf_ty in ty.walk() { if let ty::TyParam(ParamTy {idx, ..}) = leaf_ty.sty { debug!("Found use of ty param num {}", idx); - tps_used[idx as usize] = true; + tps_used[idx as usize - generics.lifetimes.len()] = true; } } - for (i, b) in tps_used.iter().enumerate() { - if !*b { - span_err!(ccx.tcx.sess, tps[i].span, E0091, + for (&used, param) in tps_used.iter().zip(&generics.ty_params) { + if !used { + struct_span_err!(ccx.tcx.sess, param.span, E0091, "type parameter `{}` is unused", - tps[i].name); + param.name) + .span_label(param.span, &format!("unused type parameter")) + .emit(); } } } diff --git a/src/librustc_typeck/check/op.rs b/src/librustc_typeck/check/op.rs index f4841b75d13d5..adb8c6be42bc1 100644 --- a/src/librustc_typeck/check/op.rs +++ b/src/librustc_typeck/check/op.rs @@ -10,341 +10,356 @@ //! Code related to processing overloaded binary and unary operators. -use super::{ - check_expr, - check_expr_coercable_to_type, - check_expr_with_lvalue_pref, - demand, - method, - FnCtxt, -}; -use middle::def_id::DefId; -use middle::ty::{Ty, TypeFoldable, PreferMutLvalue}; +use super::FnCtxt; +use hir::def_id::DefId; +use rustc::ty::{Ty, TypeFoldable, PreferMutLvalue}; use syntax::ast; -use syntax::parse::token; -use rustc_front::hir; -use rustc_front::util as hir_util; - -/// Check a `a = b` -pub fn check_binop_assign<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, - expr: &'tcx hir::Expr, - op: hir::BinOp, - lhs_expr: &'tcx hir::Expr, - rhs_expr: &'tcx hir::Expr) -{ - check_expr_with_lvalue_pref(fcx, lhs_expr, PreferMutLvalue); - - let lhs_ty = fcx.resolve_type_vars_if_possible(fcx.expr_ty(lhs_expr)); - let (rhs_ty, return_ty) = - check_overloaded_binop(fcx, expr, lhs_expr, lhs_ty, rhs_expr, op, IsAssign::Yes); - let rhs_ty = fcx.resolve_type_vars_if_possible(rhs_ty); - - if !lhs_ty.is_ty_var() && !rhs_ty.is_ty_var() && is_builtin_binop(lhs_ty, rhs_ty, op) { - enforce_builtin_binop_types(fcx, lhs_expr, lhs_ty, rhs_expr, rhs_ty, op); - fcx.write_nil(expr.id); - } else { - fcx.write_ty(expr.id, return_ty); - } - - let tcx = fcx.tcx(); - if !tcx.expr_is_lval(lhs_expr) { - span_err!(tcx.sess, lhs_expr.span, E0067, "invalid left-hand side expression"); +use syntax::symbol::Symbol; +use rustc::hir; + +impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { + /// Check a `a = b` + pub fn check_binop_assign(&self, + expr: &'gcx hir::Expr, + op: hir::BinOp, + lhs_expr: &'gcx hir::Expr, + rhs_expr: &'gcx hir::Expr) -> Ty<'tcx> + { + let lhs_ty = self.check_expr_with_lvalue_pref(lhs_expr, PreferMutLvalue); + + let lhs_ty = self.resolve_type_vars_with_obligations(lhs_ty); + let (rhs_ty, return_ty) = + self.check_overloaded_binop(expr, lhs_expr, lhs_ty, rhs_expr, op, IsAssign::Yes); + let rhs_ty = self.resolve_type_vars_with_obligations(rhs_ty); + + let ty = if !lhs_ty.is_ty_var() && !rhs_ty.is_ty_var() + && is_builtin_binop(lhs_ty, rhs_ty, op) { + self.enforce_builtin_binop_types(lhs_expr, lhs_ty, rhs_expr, rhs_ty, op); + self.tcx.mk_nil() + } else { + return_ty + }; + + let tcx = self.tcx; + if !tcx.expr_is_lval(lhs_expr) { + struct_span_err!( + tcx.sess, lhs_expr.span, + E0067, "invalid left-hand side expression") + .span_label( + lhs_expr.span, + &format!("invalid expression for left-hand side")) + .emit(); + } + ty } -} - -/// Check a potentially overloaded binary operator. -pub fn check_binop<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - expr: &'tcx hir::Expr, - op: hir::BinOp, - lhs_expr: &'tcx hir::Expr, - rhs_expr: &'tcx hir::Expr) -{ - let tcx = fcx.ccx.tcx; - debug!("check_binop(expr.id={}, expr={:?}, op={:?}, lhs_expr={:?}, rhs_expr={:?})", - expr.id, - expr, - op, - lhs_expr, - rhs_expr); - - check_expr(fcx, lhs_expr); - let lhs_ty = fcx.resolve_type_vars_if_possible(fcx.expr_ty(lhs_expr)); - - match BinOpCategory::from(op) { - BinOpCategory::Shortcircuit => { - // && and || are a simple case. - demand::suptype(fcx, lhs_expr.span, tcx.mk_bool(), lhs_ty); - check_expr_coercable_to_type(fcx, rhs_expr, tcx.mk_bool()); - fcx.write_ty(expr.id, tcx.mk_bool()); - } - _ => { - // Otherwise, we always treat operators as if they are - // overloaded. This is the way to be most flexible w/r/t - // types that get inferred. - let (rhs_ty, return_ty) = - check_overloaded_binop(fcx, expr, lhs_expr, lhs_ty, rhs_expr, op, IsAssign::No); - - // Supply type inference hints if relevant. Probably these - // hints should be enforced during select as part of the - // `consider_unification_despite_ambiguity` routine, but this - // more convenient for now. - // - // The basic idea is to help type inference by taking - // advantage of things we know about how the impls for - // scalar types are arranged. This is important in a - // scenario like `1_u32 << 2`, because it lets us quickly - // deduce that the result type should be `u32`, even - // though we don't know yet what type 2 has and hence - // can't pin this down to a specific impl. - let rhs_ty = fcx.resolve_type_vars_if_possible(rhs_ty); - if - !lhs_ty.is_ty_var() && !rhs_ty.is_ty_var() && - is_builtin_binop(lhs_ty, rhs_ty, op) - { - let builtin_return_ty = - enforce_builtin_binop_types(fcx, lhs_expr, lhs_ty, rhs_expr, rhs_ty, op); - demand::suptype(fcx, expr.span, builtin_return_ty, return_ty); + /// Check a potentially overloaded binary operator. + pub fn check_binop(&self, + expr: &'gcx hir::Expr, + op: hir::BinOp, + lhs_expr: &'gcx hir::Expr, + rhs_expr: &'gcx hir::Expr) -> Ty<'tcx> + { + let tcx = self.tcx; + + debug!("check_binop(expr.id={}, expr={:?}, op={:?}, lhs_expr={:?}, rhs_expr={:?})", + expr.id, + expr, + op, + lhs_expr, + rhs_expr); + + let lhs_ty = self.check_expr(lhs_expr); + let lhs_ty = self.resolve_type_vars_with_obligations(lhs_ty); + + match BinOpCategory::from(op) { + BinOpCategory::Shortcircuit => { + // && and || are a simple case. + let lhs_diverges = self.diverges.get(); + self.demand_suptype(lhs_expr.span, tcx.mk_bool(), lhs_ty); + self.check_expr_coercable_to_type(rhs_expr, tcx.mk_bool()); + + // Depending on the LHS' value, the RHS can never execute. + self.diverges.set(lhs_diverges); + + tcx.mk_bool() } + _ => { + // Otherwise, we always treat operators as if they are + // overloaded. This is the way to be most flexible w/r/t + // types that get inferred. + let (rhs_ty, return_ty) = + self.check_overloaded_binop(expr, lhs_expr, lhs_ty, + rhs_expr, op, IsAssign::No); + + // Supply type inference hints if relevant. Probably these + // hints should be enforced during select as part of the + // `consider_unification_despite_ambiguity` routine, but this + // more convenient for now. + // + // The basic idea is to help type inference by taking + // advantage of things we know about how the impls for + // scalar types are arranged. This is important in a + // scenario like `1_u32 << 2`, because it lets us quickly + // deduce that the result type should be `u32`, even + // though we don't know yet what type 2 has and hence + // can't pin this down to a specific impl. + let rhs_ty = self.resolve_type_vars_with_obligations(rhs_ty); + if + !lhs_ty.is_ty_var() && !rhs_ty.is_ty_var() && + is_builtin_binop(lhs_ty, rhs_ty, op) + { + let builtin_return_ty = + self.enforce_builtin_binop_types(lhs_expr, lhs_ty, rhs_expr, rhs_ty, op); + self.demand_suptype(expr.span, builtin_return_ty, return_ty); + } - fcx.write_ty(expr.id, return_ty); + return_ty + } } } -} -fn enforce_builtin_binop_types<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - lhs_expr: &'tcx hir::Expr, - lhs_ty: Ty<'tcx>, - rhs_expr: &'tcx hir::Expr, - rhs_ty: Ty<'tcx>, - op: hir::BinOp) - -> Ty<'tcx> -{ - debug_assert!(is_builtin_binop(lhs_ty, rhs_ty, op)); - - let tcx = fcx.tcx(); - match BinOpCategory::from(op) { - BinOpCategory::Shortcircuit => { - demand::suptype(fcx, lhs_expr.span, tcx.mk_bool(), lhs_ty); - demand::suptype(fcx, rhs_expr.span, tcx.mk_bool(), rhs_ty); - tcx.mk_bool() - } + fn enforce_builtin_binop_types(&self, + lhs_expr: &'gcx hir::Expr, + lhs_ty: Ty<'tcx>, + rhs_expr: &'gcx hir::Expr, + rhs_ty: Ty<'tcx>, + op: hir::BinOp) + -> Ty<'tcx> + { + debug_assert!(is_builtin_binop(lhs_ty, rhs_ty, op)); + + let tcx = self.tcx; + match BinOpCategory::from(op) { + BinOpCategory::Shortcircuit => { + self.demand_suptype(lhs_expr.span, tcx.mk_bool(), lhs_ty); + self.demand_suptype(rhs_expr.span, tcx.mk_bool(), rhs_ty); + tcx.mk_bool() + } - BinOpCategory::Shift => { - // result type is same as LHS always - lhs_ty - } + BinOpCategory::Shift => { + // result type is same as LHS always + lhs_ty + } - BinOpCategory::Math | - BinOpCategory::Bitwise => { - // both LHS and RHS and result will have the same type - demand::suptype(fcx, rhs_expr.span, lhs_ty, rhs_ty); - lhs_ty - } + BinOpCategory::Math | + BinOpCategory::Bitwise => { + // both LHS and RHS and result will have the same type + self.demand_suptype(rhs_expr.span, lhs_ty, rhs_ty); + lhs_ty + } - BinOpCategory::Comparison => { - // both LHS and RHS and result will have the same type - demand::suptype(fcx, rhs_expr.span, lhs_ty, rhs_ty); - tcx.mk_bool() + BinOpCategory::Comparison => { + // both LHS and RHS and result will have the same type + self.demand_suptype(rhs_expr.span, lhs_ty, rhs_ty); + tcx.mk_bool() + } } } -} -fn check_overloaded_binop<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - expr: &'tcx hir::Expr, - lhs_expr: &'tcx hir::Expr, - lhs_ty: Ty<'tcx>, - rhs_expr: &'tcx hir::Expr, - op: hir::BinOp, - is_assign: IsAssign) - -> (Ty<'tcx>, Ty<'tcx>) -{ - debug!("check_overloaded_binop(expr.id={}, lhs_ty={:?}, is_assign={:?})", - expr.id, - lhs_ty, - is_assign); - - let (name, trait_def_id) = name_and_trait_def_id(fcx, op, is_assign); - - // NB: As we have not yet type-checked the RHS, we don't have the - // type at hand. Make a variable to represent it. The whole reason - // for this indirection is so that, below, we can check the expr - // using this variable as the expected type, which sometimes lets - // us do better coercions than we would be able to do otherwise, - // particularly for things like `String + &String`. - let rhs_ty_var = fcx.infcx().next_ty_var(); - - let return_ty = match lookup_op_method(fcx, expr, lhs_ty, vec![rhs_ty_var], - token::intern(name), trait_def_id, - lhs_expr) { - Ok(return_ty) => return_ty, - Err(()) => { - // error types are considered "builtin" - if !lhs_ty.references_error() { - if let IsAssign::Yes = is_assign { - span_err!(fcx.tcx().sess, lhs_expr.span, E0368, - "binary assignment operation `{}=` cannot be applied to type `{}`", - hir_util::binop_to_string(op.node), - lhs_ty); - } else { - let mut err = struct_span_err!(fcx.tcx().sess, lhs_expr.span, E0369, - "binary operation `{}` cannot be applied to type `{}`", - hir_util::binop_to_string(op.node), - lhs_ty); - let missing_trait = match op.node { - hir::BiAdd => Some("std::ops::Add"), - hir::BiSub => Some("std::ops::Sub"), - hir::BiMul => Some("std::ops::Mul"), - hir::BiDiv => Some("std::ops::Div"), - hir::BiRem => Some("std::ops::Rem"), - hir::BiBitAnd => Some("std::ops::BitAnd"), - hir::BiBitOr => Some("std::ops::BitOr"), - hir::BiShl => Some("std::ops::Shl"), - hir::BiShr => Some("std::ops::Shr"), - hir::BiEq | hir::BiNe => Some("std::cmp::PartialEq"), - hir::BiLt | hir::BiLe | hir::BiGt | hir::BiGe => - Some("std::cmp::PartialOrd"), - _ => None - }; - - if let Some(missing_trait) = missing_trait { - span_note!(&mut err, lhs_expr.span, - "an implementation of `{}` might be missing for `{}`", - missing_trait, lhs_ty); + fn check_overloaded_binop(&self, + expr: &'gcx hir::Expr, + lhs_expr: &'gcx hir::Expr, + lhs_ty: Ty<'tcx>, + rhs_expr: &'gcx hir::Expr, + op: hir::BinOp, + is_assign: IsAssign) + -> (Ty<'tcx>, Ty<'tcx>) + { + debug!("check_overloaded_binop(expr.id={}, lhs_ty={:?}, is_assign={:?})", + expr.id, + lhs_ty, + is_assign); + + let (name, trait_def_id) = self.name_and_trait_def_id(op, is_assign); + + // NB: As we have not yet type-checked the RHS, we don't have the + // type at hand. Make a variable to represent it. The whole reason + // for this indirection is so that, below, we can check the expr + // using this variable as the expected type, which sometimes lets + // us do better coercions than we would be able to do otherwise, + // particularly for things like `String + &String`. + let rhs_ty_var = self.next_ty_var(); + + let return_ty = match self.lookup_op_method(expr, lhs_ty, vec![rhs_ty_var], + Symbol::intern(name), trait_def_id, + lhs_expr) { + Ok(return_ty) => return_ty, + Err(()) => { + // error types are considered "builtin" + if !lhs_ty.references_error() { + if let IsAssign::Yes = is_assign { + struct_span_err!(self.tcx.sess, lhs_expr.span, E0368, + "binary assignment operation `{}=` \ + cannot be applied to type `{}`", + op.node.as_str(), + lhs_ty) + .span_label(lhs_expr.span, + &format!("cannot use `{}=` on type `{}`", + op.node.as_str(), lhs_ty)) + .emit(); + } else { + let mut err = struct_span_err!(self.tcx.sess, lhs_expr.span, E0369, + "binary operation `{}` cannot be applied to type `{}`", + op.node.as_str(), + lhs_ty); + let missing_trait = match op.node { + hir::BiAdd => Some("std::ops::Add"), + hir::BiSub => Some("std::ops::Sub"), + hir::BiMul => Some("std::ops::Mul"), + hir::BiDiv => Some("std::ops::Div"), + hir::BiRem => Some("std::ops::Rem"), + hir::BiBitAnd => Some("std::ops::BitAnd"), + hir::BiBitOr => Some("std::ops::BitOr"), + hir::BiShl => Some("std::ops::Shl"), + hir::BiShr => Some("std::ops::Shr"), + hir::BiEq | hir::BiNe => Some("std::cmp::PartialEq"), + hir::BiLt | hir::BiLe | hir::BiGt | hir::BiGe => + Some("std::cmp::PartialOrd"), + _ => None + }; + + if let Some(missing_trait) = missing_trait { + span_note!(&mut err, lhs_expr.span, + "an implementation of `{}` might be missing for `{}`", + missing_trait, lhs_ty); + } + err.emit(); } - err.emit(); } + self.tcx.types.err } - fcx.tcx().types.err - } - }; + }; - // see `NB` above - check_expr_coercable_to_type(fcx, rhs_expr, rhs_ty_var); + // see `NB` above + self.check_expr_coercable_to_type(rhs_expr, rhs_ty_var); - (rhs_ty_var, return_ty) -} + (rhs_ty_var, return_ty) + } -pub fn check_user_unop<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - op_str: &str, - mname: &str, - trait_did: Option, - ex: &'tcx hir::Expr, - operand_expr: &'tcx hir::Expr, - operand_ty: Ty<'tcx>, - op: hir::UnOp) - -> Ty<'tcx> -{ - assert!(hir_util::is_by_value_unop(op)); - match lookup_op_method(fcx, ex, operand_ty, vec![], - token::intern(mname), trait_did, - operand_expr) { - Ok(t) => t, - Err(()) => { - fcx.type_error_message(ex.span, |actual| { - format!("cannot apply unary operator `{}` to type `{}`", - op_str, actual) - }, operand_ty, None); - fcx.tcx().types.err + pub fn check_user_unop(&self, + op_str: &str, + mname: &str, + trait_did: Option, + ex: &'gcx hir::Expr, + operand_expr: &'gcx hir::Expr, + operand_ty: Ty<'tcx>, + op: hir::UnOp) + -> Ty<'tcx> + { + assert!(op.is_by_value()); + let mname = Symbol::intern(mname); + match self.lookup_op_method(ex, operand_ty, vec![], mname, trait_did, operand_expr) { + Ok(t) => t, + Err(()) => { + self.type_error_message(ex.span, |actual| { + format!("cannot apply unary operator `{}` to type `{}`", + op_str, actual) + }, operand_ty); + self.tcx.types.err + } } } -} -fn name_and_trait_def_id(fcx: &FnCtxt, - op: hir::BinOp, - is_assign: IsAssign) - -> (&'static str, Option) { - let lang = &fcx.tcx().lang_items; - - if let IsAssign::Yes = is_assign { - match op.node { - hir::BiAdd => ("add_assign", lang.add_assign_trait()), - hir::BiSub => ("sub_assign", lang.sub_assign_trait()), - hir::BiMul => ("mul_assign", lang.mul_assign_trait()), - hir::BiDiv => ("div_assign", lang.div_assign_trait()), - hir::BiRem => ("rem_assign", lang.rem_assign_trait()), - hir::BiBitXor => ("bitxor_assign", lang.bitxor_assign_trait()), - hir::BiBitAnd => ("bitand_assign", lang.bitand_assign_trait()), - hir::BiBitOr => ("bitor_assign", lang.bitor_assign_trait()), - hir::BiShl => ("shl_assign", lang.shl_assign_trait()), - hir::BiShr => ("shr_assign", lang.shr_assign_trait()), - hir::BiLt | hir::BiLe | hir::BiGe | hir::BiGt | hir::BiEq | hir::BiNe | hir::BiAnd | - hir::BiOr => { - fcx.tcx().sess.span_bug(op.span, &format!("impossible assignment operation: {}=", - hir_util::binop_to_string(op.node))) + fn name_and_trait_def_id(&self, + op: hir::BinOp, + is_assign: IsAssign) + -> (&'static str, Option) { + let lang = &self.tcx.lang_items; + + if let IsAssign::Yes = is_assign { + match op.node { + hir::BiAdd => ("add_assign", lang.add_assign_trait()), + hir::BiSub => ("sub_assign", lang.sub_assign_trait()), + hir::BiMul => ("mul_assign", lang.mul_assign_trait()), + hir::BiDiv => ("div_assign", lang.div_assign_trait()), + hir::BiRem => ("rem_assign", lang.rem_assign_trait()), + hir::BiBitXor => ("bitxor_assign", lang.bitxor_assign_trait()), + hir::BiBitAnd => ("bitand_assign", lang.bitand_assign_trait()), + hir::BiBitOr => ("bitor_assign", lang.bitor_assign_trait()), + hir::BiShl => ("shl_assign", lang.shl_assign_trait()), + hir::BiShr => ("shr_assign", lang.shr_assign_trait()), + hir::BiLt | hir::BiLe | + hir::BiGe | hir::BiGt | + hir::BiEq | hir::BiNe | + hir::BiAnd | hir::BiOr => { + span_bug!(op.span, + "impossible assignment operation: {}=", + op.node.as_str()) + } } - } - } else { - match op.node { - hir::BiAdd => ("add", lang.add_trait()), - hir::BiSub => ("sub", lang.sub_trait()), - hir::BiMul => ("mul", lang.mul_trait()), - hir::BiDiv => ("div", lang.div_trait()), - hir::BiRem => ("rem", lang.rem_trait()), - hir::BiBitXor => ("bitxor", lang.bitxor_trait()), - hir::BiBitAnd => ("bitand", lang.bitand_trait()), - hir::BiBitOr => ("bitor", lang.bitor_trait()), - hir::BiShl => ("shl", lang.shl_trait()), - hir::BiShr => ("shr", lang.shr_trait()), - hir::BiLt => ("lt", lang.ord_trait()), - hir::BiLe => ("le", lang.ord_trait()), - hir::BiGe => ("ge", lang.ord_trait()), - hir::BiGt => ("gt", lang.ord_trait()), - hir::BiEq => ("eq", lang.eq_trait()), - hir::BiNe => ("ne", lang.eq_trait()), - hir::BiAnd | hir::BiOr => { - fcx.tcx().sess.span_bug(op.span, "&& and || are not overloadable") + } else { + match op.node { + hir::BiAdd => ("add", lang.add_trait()), + hir::BiSub => ("sub", lang.sub_trait()), + hir::BiMul => ("mul", lang.mul_trait()), + hir::BiDiv => ("div", lang.div_trait()), + hir::BiRem => ("rem", lang.rem_trait()), + hir::BiBitXor => ("bitxor", lang.bitxor_trait()), + hir::BiBitAnd => ("bitand", lang.bitand_trait()), + hir::BiBitOr => ("bitor", lang.bitor_trait()), + hir::BiShl => ("shl", lang.shl_trait()), + hir::BiShr => ("shr", lang.shr_trait()), + hir::BiLt => ("lt", lang.ord_trait()), + hir::BiLe => ("le", lang.ord_trait()), + hir::BiGe => ("ge", lang.ord_trait()), + hir::BiGt => ("gt", lang.ord_trait()), + hir::BiEq => ("eq", lang.eq_trait()), + hir::BiNe => ("ne", lang.eq_trait()), + hir::BiAnd | hir::BiOr => { + span_bug!(op.span, "&& and || are not overloadable") + } } } } -} -fn lookup_op_method<'a, 'tcx>(fcx: &'a FnCtxt<'a, 'tcx>, - expr: &'tcx hir::Expr, - lhs_ty: Ty<'tcx>, - other_tys: Vec>, - opname: ast::Name, - trait_did: Option, - lhs_expr: &'a hir::Expr) - -> Result,()> -{ - debug!("lookup_op_method(expr={:?}, lhs_ty={:?}, opname={:?}, trait_did={:?}, lhs_expr={:?})", - expr, - lhs_ty, - opname, - trait_did, - lhs_expr); - - let method = match trait_did { - Some(trait_did) => { - method::lookup_in_trait_adjusted(fcx, - expr.span, - Some(lhs_expr), - opname, - trait_did, - 0, - false, - lhs_ty, - Some(other_tys)) - } - None => None - }; + fn lookup_op_method(&self, + expr: &'gcx hir::Expr, + lhs_ty: Ty<'tcx>, + other_tys: Vec>, + opname: ast::Name, + trait_did: Option, + lhs_expr: &'a hir::Expr) + -> Result,()> + { + debug!("lookup_op_method(expr={:?}, lhs_ty={:?}, opname={:?}, \ + trait_did={:?}, lhs_expr={:?})", + expr, + lhs_ty, + opname, + trait_did, + lhs_expr); + + let method = match trait_did { + Some(trait_did) => { + self.lookup_method_in_trait_adjusted(expr.span, + Some(lhs_expr), + opname, + trait_did, + 0, + false, + lhs_ty, + Some(other_tys)) + } + None => None + }; - match method { - Some(method) => { - let method_ty = method.ty; + match method { + Some(method) => { + let method_ty = method.ty; - // HACK(eddyb) Fully qualified path to work around a resolve bug. - let method_call = ::middle::ty::MethodCall::expr(expr.id); - fcx.inh.tables.borrow_mut().method_map.insert(method_call, method); + // HACK(eddyb) Fully qualified path to work around a resolve bug. + let method_call = ::rustc::ty::MethodCall::expr(expr.id); + self.tables.borrow_mut().method_map.insert(method_call, method); - // extract return type for method; all late bound regions - // should have been instantiated by now - let ret_ty = method_ty.fn_ret(); - Ok(fcx.tcx().no_late_bound_regions(&ret_ty).unwrap().unwrap()) - } - None => { - Err(()) + // extract return type for method; all late bound regions + // should have been instantiated by now + let ret_ty = method_ty.fn_ret(); + Ok(self.tcx.no_late_bound_regions(&ret_ty).unwrap()) + } + None => { + Err(()) + } } } } @@ -428,11 +443,7 @@ enum IsAssign { /// Reason #2 is the killer. I tried for a while to always use /// overloaded logic and just check the types in constants/trans after /// the fact, and it worked fine, except for SIMD types. -nmatsakis -fn is_builtin_binop<'tcx>(lhs: Ty<'tcx>, - rhs: Ty<'tcx>, - op: hir::BinOp) - -> bool -{ +fn is_builtin_binop(lhs: Ty, rhs: Ty, op: hir::BinOp) -> bool { match BinOpCategory::from(op) { BinOpCategory::Shortcircuit => { true diff --git a/src/librustc_typeck/check/regionck.rs b/src/librustc_typeck/check/regionck.rs index 47cd31d9898d1..c0bf5773ed56a 100644 --- a/src/librustc_typeck/check/regionck.rs +++ b/src/librustc_typeck/check/regionck.rs @@ -82,28 +82,25 @@ //! relation, except that a borrowed pointer never owns its //! contents. -use astconv::AstConv; use check::dropck; use check::FnCtxt; use middle::free_region::FreeRegionMap; -use middle::implicator::{self, Implication}; use middle::mem_categorization as mc; use middle::mem_categorization::Categorization; use middle::region::{self, CodeExtent}; -use middle::subst::Substs; -use middle::traits; -use middle::ty::{self, Ty, MethodCall, TypeFoldable}; -use middle::infer::{self, GenericKind, InferCtxt, SubregionOrigin, TypeOrigin, VerifyBound}; -use middle::pat_util; -use middle::ty::adjustment; -use middle::ty::wf::ImpliedBound; +use rustc::ty::subst::Substs; +use rustc::traits; +use rustc::ty::{self, Ty, MethodCall, TypeFoldable}; +use rustc::infer::{self, GenericKind, SubregionOrigin, VerifyBound}; +use rustc::ty::adjustment; +use rustc::ty::wf::ImpliedBound; use std::mem; +use std::ops::Deref; use syntax::ast; -use syntax::codemap::Span; -use rustc_front::intravisit::{self, Visitor}; -use rustc_front::hir; -use rustc_front::util as hir_util; +use syntax_pos::Span; +use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; +use rustc::hir::{self, PatKind}; use self::SubjectNode::Subject; @@ -115,64 +112,64 @@ macro_rules! ignore_err { /////////////////////////////////////////////////////////////////////////// // PUBLIC ENTRY POINTS -pub fn regionck_expr(fcx: &FnCtxt, e: &hir::Expr) { - let mut rcx = Rcx::new(fcx, RepeatingScope(e.id), e.id, Subject(e.id)); - if fcx.err_count_since_creation() == 0 { - // regionck assumes typeck succeeded - rcx.visit_expr(e); - rcx.visit_region_obligations(e.id); +impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { + pub fn regionck_expr(&self, e: &'gcx hir::Expr) { + let mut rcx = RegionCtxt::new(self, RepeatingScope(e.id), e.id, Subject(e.id)); + if self.err_count_since_creation() == 0 { + // regionck assumes typeck succeeded + rcx.visit_expr(e); + rcx.visit_region_obligations(e.id); + } + rcx.resolve_regions_and_report_errors(); } - rcx.resolve_regions_and_report_errors(); -} - -/// Region checking during the WF phase for items. `wf_tys` are the -/// types from which we should derive implied bounds, if any. -pub fn regionck_item<'a,'tcx>(fcx: &FnCtxt<'a,'tcx>, - item_id: ast::NodeId, - span: Span, - wf_tys: &[Ty<'tcx>]) { - debug!("regionck_item(item.id={:?}, wf_tys={:?}", item_id, wf_tys); - let mut rcx = Rcx::new(fcx, RepeatingScope(item_id), item_id, Subject(item_id)); - let tcx = fcx.tcx(); - rcx.free_region_map - .relate_free_regions_from_predicates(tcx, &fcx.infcx().parameter_environment.caller_bounds); - rcx.relate_free_regions(wf_tys, item_id, span); - rcx.visit_region_obligations(item_id); - rcx.resolve_regions_and_report_errors(); -} -pub fn regionck_fn(fcx: &FnCtxt, - fn_id: ast::NodeId, - fn_span: Span, - decl: &hir::FnDecl, - blk: &hir::Block) { - debug!("regionck_fn(id={})", fn_id); - let mut rcx = Rcx::new(fcx, RepeatingScope(blk.id), blk.id, Subject(fn_id)); - - if fcx.err_count_since_creation() == 0 { - // regionck assumes typeck succeeded - rcx.visit_fn_body(fn_id, decl, blk, fn_span); + /// Region checking during the WF phase for items. `wf_tys` are the + /// types from which we should derive implied bounds, if any. + pub fn regionck_item(&self, + item_id: ast::NodeId, + span: Span, + wf_tys: &[Ty<'tcx>]) { + debug!("regionck_item(item.id={:?}, wf_tys={:?}", item_id, wf_tys); + let mut rcx = RegionCtxt::new(self, RepeatingScope(item_id), item_id, Subject(item_id)); + rcx.free_region_map.relate_free_regions_from_predicates( + &self.parameter_environment.caller_bounds); + rcx.relate_free_regions(wf_tys, item_id, span); + rcx.visit_region_obligations(item_id); + rcx.resolve_regions_and_report_errors(); } - let tcx = fcx.tcx(); - rcx.free_region_map - .relate_free_regions_from_predicates(tcx, &fcx.infcx().parameter_environment.caller_bounds); + pub fn regionck_fn(&self, + fn_id: ast::NodeId, + decl: &hir::FnDecl, + body_id: hir::ExprId) { + debug!("regionck_fn(id={})", fn_id); + let node_id = body_id.node_id(); + let mut rcx = RegionCtxt::new(self, RepeatingScope(node_id), node_id, Subject(fn_id)); + + if self.err_count_since_creation() == 0 { + // regionck assumes typeck succeeded + rcx.visit_fn_body(fn_id, decl, body_id, self.tcx.map.span(fn_id)); + } + + rcx.free_region_map.relate_free_regions_from_predicates( + &self.parameter_environment.caller_bounds); - rcx.resolve_regions_and_report_errors(); + rcx.resolve_regions_and_report_errors(); - // For the top-level fn, store the free-region-map. We don't store - // any map for closures; they just share the same map as the - // function that created them. - fcx.tcx().store_free_region_map(fn_id, rcx.free_region_map); + // For the top-level fn, store the free-region-map. We don't store + // any map for closures; they just share the same map as the + // function that created them. + self.tcx.store_free_region_map(fn_id, rcx.free_region_map); + } } /////////////////////////////////////////////////////////////////////////// // INTERNALS -pub struct Rcx<'a, 'tcx: 'a> { - pub fcx: &'a FnCtxt<'a, 'tcx>, +pub struct RegionCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + pub fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, - region_bound_pairs: Vec<(ty::Region, GenericKind<'tcx>)>, + region_bound_pairs: Vec<(&'tcx ty::Region, GenericKind<'tcx>)>, free_region_map: FreeRegionMap, @@ -190,33 +187,33 @@ pub struct Rcx<'a, 'tcx: 'a> { } +impl<'a, 'gcx, 'tcx> Deref for RegionCtxt<'a, 'gcx, 'tcx> { + type Target = FnCtxt<'a, 'gcx, 'tcx>; + fn deref(&self) -> &Self::Target { + &self.fcx + } +} + pub struct RepeatingScope(ast::NodeId); pub enum SubjectNode { Subject(ast::NodeId), None } -impl<'a, 'tcx> Rcx<'a, 'tcx> { - pub fn new(fcx: &'a FnCtxt<'a, 'tcx>, +impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { + pub fn new(fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, initial_repeating_scope: RepeatingScope, initial_body_id: ast::NodeId, - subject: SubjectNode) -> Rcx<'a, 'tcx> { + subject: SubjectNode) -> RegionCtxt<'a, 'gcx, 'tcx> { let RepeatingScope(initial_repeating_scope) = initial_repeating_scope; - Rcx { fcx: fcx, - repeating_scope: initial_repeating_scope, - body_id: initial_body_id, - call_site_scope: None, - subject: subject, - region_bound_pairs: Vec::new(), - free_region_map: FreeRegionMap::new(), + RegionCtxt { + fcx: fcx, + repeating_scope: initial_repeating_scope, + body_id: initial_body_id, + call_site_scope: None, + subject: subject, + region_bound_pairs: Vec::new(), + free_region_map: FreeRegionMap::new(), } } - pub fn tcx(&self) -> &'a ty::ctxt<'tcx> { - self.fcx.ccx.tcx - } - - pub fn infcx(&self) -> &InferCtxt<'a,'tcx> { - self.fcx.infcx() - } - fn set_call_site_scope(&mut self, call_site_scope: Option) -> Option { mem::replace(&mut self.call_site_scope, call_site_scope) } @@ -253,54 +250,40 @@ impl<'a, 'tcx> Rcx<'a, 'tcx> { /// of b will be `&.i32` and then `*b` will require that `` be bigger than the let and /// the `*b` expression, so we will effectively resolve `` to be the block B. pub fn resolve_type(&self, unresolved_ty: Ty<'tcx>) -> Ty<'tcx> { - self.fcx.infcx().resolve_type_vars_if_possible(&unresolved_ty) + self.resolve_type_vars_if_possible(&unresolved_ty) } /// Try to resolve the type for the given node. fn resolve_node_type(&self, id: ast::NodeId) -> Ty<'tcx> { - let t = self.fcx.node_ty(id); + let t = self.node_ty(id); self.resolve_type(t) } - fn resolve_method_type(&self, method_call: MethodCall) -> Option> { - let method_ty = self.fcx.inh.tables.borrow().method_map - .get(&method_call).map(|method| method.ty); - method_ty.map(|method_ty| self.resolve_type(method_ty)) - } - /// Try to resolve the type for the given node. pub fn resolve_expr_type_adjusted(&mut self, expr: &hir::Expr) -> Ty<'tcx> { - let ty_unadjusted = self.resolve_node_type(expr.id); - if ty_unadjusted.references_error() { - ty_unadjusted - } else { - ty_unadjusted.adjust( - self.fcx.tcx(), expr.span, expr.id, - self.fcx.inh.tables.borrow().adjustments.get(&expr.id), - |method_call| self.resolve_method_type(method_call)) - } + let ty = self.tables.borrow().expr_ty_adjusted(expr); + self.resolve_type(ty) } fn visit_fn_body(&mut self, id: ast::NodeId, // the id of the fn itself fn_decl: &hir::FnDecl, - body: &hir::Block, + body_id: hir::ExprId, span: Span) { // When we enter a function, we can derive debug!("visit_fn_body(id={})", id); - let call_site = self.fcx.tcx().region_maps.lookup_code_extent( - region::CodeExtentData::CallSiteScope { fn_id: id, body_id: body.id }); + let call_site = self.tcx.region_maps.lookup_code_extent( + region::CodeExtentData::CallSiteScope { fn_id: id, body_id: body_id.node_id() }); let old_call_site_scope = self.set_call_site_scope(Some(call_site)); let fn_sig = { - let fn_sig_map = &self.infcx().tables.borrow().liberated_fn_sigs; + let fn_sig_map = &self.tables.borrow().liberated_fn_sigs; match fn_sig_map.get(&id) { Some(f) => f.clone(), None => { - self.tcx().sess.bug( - &format!("No fn-sig entry for id={}", id)); + bug!("No fn-sig entry for id={}", id); } } }; @@ -315,24 +298,24 @@ impl<'a, 'tcx> Rcx<'a, 'tcx> { let fn_sig_tys: Vec<_> = fn_sig.inputs.iter() .cloned() - .chain(Some(fn_sig.output.unwrap_or(self.tcx().types.bool))) + .chain(Some(fn_sig.output)) .collect(); - let old_body_id = self.set_body_id(body.id); - self.relate_free_regions(&fn_sig_tys[..], body.id, span); - link_fn_args(self, - self.tcx().region_maps.node_extent(body.id), - &fn_decl.inputs[..]); - self.visit_block(body); - self.visit_region_obligations(body.id); + let old_body_id = self.set_body_id(body_id.node_id()); + self.relate_free_regions(&fn_sig_tys[..], body_id.node_id(), span); + self.link_fn_args(self.tcx.region_maps.node_extent(body_id.node_id()), + &fn_decl.inputs[..]); + let body = self.tcx.map.expr(body_id); + self.visit_expr(body); + self.visit_region_obligations(body_id.node_id()); let call_site_scope = self.call_site_scope.unwrap(); debug!("visit_fn_body body.id {} call_site_scope: {:?}", body.id, call_site_scope); - type_of_node_must_outlive(self, - infer::CallReturn(span), - body.id, - ty::ReScope(call_site_scope)); + let call_site_region = self.tcx.mk_region(ty::ReScope(call_site_scope)); + self.type_of_node_must_outlive(infer::CallReturn(span), + body_id.node_id(), + call_site_region); self.region_bound_pairs.truncate(old_region_bounds_pairs_len); @@ -347,15 +330,12 @@ impl<'a, 'tcx> Rcx<'a, 'tcx> { // region checking can introduce new pending obligations // which, when processed, might generate new region // obligations. So make sure we process those. - self.fcx.select_all_obligations_or_error(); + self.select_all_obligations_or_error(); // Make a copy of the region obligations vec because we'll need // to be able to borrow the fulfillment-cx below when projecting. let region_obligations = - self.fcx - .inh - .infcx - .fulfillment_cx + self.fulfillment_cx .borrow() .region_obligations(node_id) .to_vec(); @@ -364,72 +344,21 @@ impl<'a, 'tcx> Rcx<'a, 'tcx> { debug!("visit_region_obligations: r_o={:?} cause={:?}", r_o, r_o.cause); let sup_type = self.resolve_type(r_o.sup_type); - let origin = self.code_to_origin(r_o.cause.span, sup_type, &r_o.cause.code); - - if r_o.sub_region != ty::ReEmpty { - type_must_outlive(self, origin, sup_type, r_o.sub_region); - } else { - self.visit_old_school_wf(node_id, sup_type, origin); - } + let origin = self.code_to_origin(&r_o.cause, sup_type); + self.type_must_outlive(origin, sup_type, r_o.sub_region); } // Processing the region obligations should not cause the list to grow further: assert_eq!(region_obligations.len(), - self.fcx.inh.infcx.fulfillment_cx.borrow().region_obligations(node_id).len()); - } - - fn visit_old_school_wf(&mut self, - body_id: ast::NodeId, - ty: Ty<'tcx>, - origin: infer::SubregionOrigin<'tcx>) { - // As a weird kind of hack, we use a region of empty as a signal - // to mean "old-school WF rules". The only reason the old-school - // WF rules are not encoded using WF is that this leads to errors, - // and we want to phase those in gradually. - - // FIXME(#27579) remove this weird special case once we phase in new WF rules completely - let implications = implicator::implications(self.infcx(), - body_id, - ty, - ty::ReEmpty, - origin.span()); - let origin_for_ty = |ty: Option>| match ty { - None => origin.clone(), - Some(ty) => infer::ReferenceOutlivesReferent(ty, origin.span()), - }; - for implication in implications { - match implication { - Implication::RegionSubRegion(ty, r1, r2) => { - self.fcx.mk_subr(origin_for_ty(ty), r1, r2); - } - Implication::RegionSubGeneric(ty, r1, GenericKind::Param(param_ty)) => { - param_ty_must_outlive(self, origin_for_ty(ty), r1, param_ty); - } - Implication::RegionSubGeneric(ty, r1, GenericKind::Projection(proj_ty)) => { - projection_must_outlive(self, origin_for_ty(ty), r1, proj_ty); - } - Implication::Predicate(def_id, predicate) => { - let cause = traits::ObligationCause::new(origin.span(), - body_id, - traits::ItemObligation(def_id)); - let obligation = traits::Obligation::new(cause, predicate); - self.fcx.register_predicate(obligation); - } - } - } + self.fulfillment_cx.borrow().region_obligations(node_id).len()); } fn code_to_origin(&self, - span: Span, - sup_type: Ty<'tcx>, - code: &traits::ObligationCauseCode<'tcx>) + cause: &traits::ObligationCause<'tcx>, + sup_type: Ty<'tcx>) -> SubregionOrigin<'tcx> { - match *code { - traits::ObligationCauseCode::ReferenceOutlivesReferent(ref_type) => - infer::ReferenceOutlivesReferent(ref_type, span), - _ => - infer::RelateParamBound(span, sup_type), - } + SubregionOrigin::from_obligation_cause(cause, + || infer::RelateParamBound(cause.span, sup_type)) } /// This method populates the region map's `free_region_map`. It walks over the transformed @@ -451,7 +380,7 @@ impl<'a, 'tcx> Rcx<'a, 'tcx> { for &ty in fn_sig_tys { let ty = self.resolve_type(ty); debug!("relate_free_regions(t={:?})", ty); - let implied_bounds = ty::wf::implied_bounds(self.fcx.infcx(), body_id, ty, span); + let implied_bounds = ty::wf::implied_bounds(self, body_id, ty, span); // Record any relations between free regions that we observe into the free-region-map. self.free_region_map.relate_free_regions_from_implied_bounds(&implied_bounds); @@ -462,9 +391,9 @@ impl<'a, 'tcx> Rcx<'a, 'tcx> { for implication in implied_bounds { debug!("implication: {:?}", implication); match implication { - ImpliedBound::RegionSubRegion(ty::ReFree(free_a), - ty::ReVar(vid_b)) => { - self.fcx.inh.infcx.add_given(free_a, vid_b); + ImpliedBound::RegionSubRegion(&ty::ReFree(free_a), + &ty::ReVar(vid_b)) => { + self.add_given(free_a, vid_b); } ImpliedBound::RegionSubParam(r_a, param_b) => { self.region_bound_pairs.push((r_a, GenericKind::Param(param_b))); @@ -494,17 +423,55 @@ impl<'a, 'tcx> Rcx<'a, 'tcx> { let subject_node_id = match self.subject { Subject(s) => s, SubjectNode::None => { - self.tcx().sess.bug("cannot resolve_regions_and_report_errors \ - without subject node"); + bug!("cannot resolve_regions_and_report_errors \ + without subject node"); } }; - self.fcx.infcx().resolve_regions_and_report_errors(&self.free_region_map, - subject_node_id); + self.fcx.resolve_regions_and_report_errors(&self.free_region_map, + subject_node_id); + } + + fn constrain_bindings_in_pat(&mut self, pat: &hir::Pat) { + let tcx = self.tcx; + debug!("regionck::visit_pat(pat={:?})", pat); + pat.each_binding(|_, id, span, _| { + // If we have a variable that contains region'd data, that + // data will be accessible from anywhere that the variable is + // accessed. We must be wary of loops like this: + // + // // from src/test/compile-fail/borrowck-lend-flow.rs + // let mut v = box 3, w = box 4; + // let mut x = &mut w; + // loop { + // **x += 1; // (2) + // borrow(v); //~ ERROR cannot borrow + // x = &mut v; // (1) + // } + // + // Typically, we try to determine the region of a borrow from + // those points where it is dereferenced. In this case, one + // might imagine that the lifetime of `x` need only be the + // body of the loop. But of course this is incorrect because + // the pointer that is created at point (1) is consumed at + // point (2), meaning that it must be live across the loop + // iteration. The easiest way to guarantee this is to require + // that the lifetime of any regions that appear in a + // variable's type enclose at least the variable's scope. + + let var_scope = tcx.region_maps.var_scope(id); + let var_region = self.tcx.mk_region(ty::ReScope(var_scope)); + + let origin = infer::BindingTypeIsNotValidAtDecl(span); + self.type_of_node_must_outlive(origin, id, var_region); + + let typ = self.resolve_node_type(id); + dropck::check_safety_of_destructor_if_necessary(self, typ, span, var_scope); + }) } } -impl<'a, 'tcx, 'v> Visitor<'v> for Rcx<'a, 'tcx> { +impl<'a, 'gcx, 'tcx> Visitor<'gcx> for RegionCtxt<'a, 'gcx, 'tcx> { // (..) FIXME(#3238) should use visit_pat, not visit_arm/visit_local, // However, right now we run into an issue whereby some free // regions are not properly related if they appear within the @@ -513,379 +480,326 @@ impl<'a, 'tcx, 'v> Visitor<'v> for Rcx<'a, 'tcx> { // hierarchy, and in particular the relationships between free // regions, until regionck, as described in #3238. - fn visit_fn(&mut self, _fk: intravisit::FnKind<'v>, fd: &'v hir::FnDecl, - b: &'v hir::Block, span: Span, id: ast::NodeId) { - self.visit_fn_body(id, fd, b, span) + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'gcx> { + NestedVisitorMap::OnlyBodies(&self.tcx.map) } - fn visit_expr(&mut self, ex: &hir::Expr) { visit_expr(self, ex); } + fn visit_fn(&mut self, _fk: intravisit::FnKind<'gcx>, fd: &'gcx hir::FnDecl, + b: hir::ExprId, span: Span, id: ast::NodeId) { + self.visit_fn_body(id, fd, b, span) + } //visit_pat: visit_pat, // (..) see above - fn visit_arm(&mut self, a: &hir::Arm) { visit_arm(self, a); } - - fn visit_local(&mut self, l: &hir::Local) { visit_local(self, l); } - - fn visit_block(&mut self, b: &hir::Block) { visit_block(self, b); } -} - -fn visit_block(rcx: &mut Rcx, b: &hir::Block) { - intravisit::walk_block(rcx, b); -} - -fn visit_arm(rcx: &mut Rcx, arm: &hir::Arm) { - // see above - for p in &arm.pats { - constrain_bindings_in_pat(&**p, rcx); + fn visit_arm(&mut self, arm: &'gcx hir::Arm) { + // see above + for p in &arm.pats { + self.constrain_bindings_in_pat(p); + } + intravisit::walk_arm(self, arm); } - intravisit::walk_arm(rcx, arm); -} - -fn visit_local(rcx: &mut Rcx, l: &hir::Local) { - // see above - constrain_bindings_in_pat(&*l.pat, rcx); - link_local(rcx, l); - intravisit::walk_local(rcx, l); -} - -fn constrain_bindings_in_pat(pat: &hir::Pat, rcx: &mut Rcx) { - let tcx = rcx.fcx.tcx(); - debug!("regionck::visit_pat(pat={:?})", pat); - pat_util::pat_bindings(&tcx.def_map, pat, |_, id, span, _| { - // If we have a variable that contains region'd data, that - // data will be accessible from anywhere that the variable is - // accessed. We must be wary of loops like this: - // - // // from src/test/compile-fail/borrowck-lend-flow.rs - // let mut v = box 3, w = box 4; - // let mut x = &mut w; - // loop { - // **x += 1; // (2) - // borrow(v); //~ ERROR cannot borrow - // x = &mut v; // (1) - // } - // - // Typically, we try to determine the region of a borrow from - // those points where it is dereferenced. In this case, one - // might imagine that the lifetime of `x` need only be the - // body of the loop. But of course this is incorrect because - // the pointer that is created at point (1) is consumed at - // point (2), meaning that it must be live across the loop - // iteration. The easiest way to guarantee this is to require - // that the lifetime of any regions that appear in a - // variable's type enclose at least the variable's scope. - - let var_scope = tcx.region_maps.var_scope(id); - - let origin = infer::BindingTypeIsNotValidAtDecl(span); - type_of_node_must_outlive(rcx, origin, id, ty::ReScope(var_scope)); - - let typ = rcx.resolve_node_type(id); - dropck::check_safety_of_destructor_if_necessary(rcx, typ, span, var_scope); - }) -} + fn visit_local(&mut self, l: &'gcx hir::Local) { + // see above + self.constrain_bindings_in_pat(&l.pat); + self.link_local(l); + intravisit::walk_local(self, l); + } -fn visit_expr(rcx: &mut Rcx, expr: &hir::Expr) { - debug!("regionck::visit_expr(e={:?}, repeating_scope={})", - expr, rcx.repeating_scope); - - // No matter what, the type of each expression must outlive the - // scope of that expression. This also guarantees basic WF. - let expr_ty = rcx.resolve_node_type(expr.id); - // the region corresponding to this expression - let expr_region = ty::ReScope(rcx.tcx().region_maps.node_extent(expr.id)); - type_must_outlive(rcx, infer::ExprTypeIsNotInScope(expr_ty, expr.span), - expr_ty, expr_region); - - let method_call = MethodCall::expr(expr.id); - let opt_method_callee = rcx.fcx.inh.tables.borrow().method_map.get(&method_call).cloned(); - let has_method_map = opt_method_callee.is_some(); - - // If we are calling a method (either explicitly or via an - // overloaded operator), check that all of the types provided as - // arguments for its type parameters are well-formed, and all the regions - // provided as arguments outlive the call. - if let Some(callee) = opt_method_callee { - let origin = match expr.node { - hir::ExprMethodCall(..) => - infer::ParameterOrigin::MethodCall, - hir::ExprUnary(op, _) if op == hir::UnDeref => - infer::ParameterOrigin::OverloadedDeref, - _ => - infer::ParameterOrigin::OverloadedOperator - }; + fn visit_expr(&mut self, expr: &'gcx hir::Expr) { + debug!("regionck::visit_expr(e={:?}, repeating_scope={})", + expr, self.repeating_scope); + + // No matter what, the type of each expression must outlive the + // scope of that expression. This also guarantees basic WF. + let expr_ty = self.resolve_node_type(expr.id); + // the region corresponding to this expression + let expr_region = self.tcx.node_scope_region(expr.id); + self.type_must_outlive(infer::ExprTypeIsNotInScope(expr_ty, expr.span), + expr_ty, expr_region); + + let method_call = MethodCall::expr(expr.id); + let opt_method_callee = self.tables.borrow().method_map.get(&method_call).cloned(); + let has_method_map = opt_method_callee.is_some(); + + // If we are calling a method (either explicitly or via an + // overloaded operator), check that all of the types provided as + // arguments for its type parameters are well-formed, and all the regions + // provided as arguments outlive the call. + if let Some(callee) = opt_method_callee { + let origin = match expr.node { + hir::ExprMethodCall(..) => + infer::ParameterOrigin::MethodCall, + hir::ExprUnary(op, _) if op == hir::UnDeref => + infer::ParameterOrigin::OverloadedDeref, + _ => + infer::ParameterOrigin::OverloadedOperator + }; - substs_wf_in_scope(rcx, origin, &callee.substs, expr.span, expr_region); - type_must_outlive(rcx, infer::ExprTypeIsNotInScope(callee.ty, expr.span), - callee.ty, expr_region); - } + self.substs_wf_in_scope(origin, &callee.substs, expr.span, expr_region); + self.type_must_outlive(infer::ExprTypeIsNotInScope(callee.ty, expr.span), + callee.ty, expr_region); + } - // Check any autoderefs or autorefs that appear. - let adjustment = rcx.fcx.inh.tables.borrow().adjustments.get(&expr.id).map(|a| a.clone()); - if let Some(adjustment) = adjustment { - debug!("adjustment={:?}", adjustment); - match adjustment { - adjustment::AdjustDerefRef(adjustment::AutoDerefRef { - autoderefs, ref autoref, .. - }) => { - let expr_ty = rcx.resolve_node_type(expr.id); - constrain_autoderefs(rcx, expr, autoderefs, expr_ty); - if let Some(ref autoref) = *autoref { - link_autoref(rcx, expr, autoderefs, autoref); - - // Require that the resulting region encompasses - // the current node. - // - // FIXME(#6268) remove to support nested method calls - type_of_node_must_outlive( - rcx, infer::AutoBorrow(expr.span), - expr.id, expr_region); + // Check any autoderefs or autorefs that appear. + let adjustment = self.tables.borrow().adjustments.get(&expr.id).map(|a| a.clone()); + if let Some(adjustment) = adjustment { + debug!("adjustment={:?}", adjustment); + match adjustment.kind { + adjustment::Adjust::DerefRef { autoderefs, ref autoref, .. } => { + let expr_ty = self.resolve_node_type(expr.id); + self.constrain_autoderefs(expr, autoderefs, expr_ty); + if let Some(ref autoref) = *autoref { + self.link_autoref(expr, autoderefs, autoref); + + // Require that the resulting region encompasses + // the current node. + // + // FIXME(#6268) remove to support nested method calls + self.type_of_node_must_outlive(infer::AutoBorrow(expr.span), + expr.id, expr_region); + } + } + /* + adjustment::AutoObject(_, ref bounds, ..) => { + // Determine if we are casting `expr` to a trait + // instance. If so, we have to be sure that the type + // of the source obeys the new region bound. + let source_ty = self.resolve_node_type(expr.id); + self.type_must_outlive(infer::RelateObjectBound(expr.span), + source_ty, bounds.region_bound); } + */ + _ => {} } - /* - adjustment::AutoObject(_, ref bounds, _, _) => { - // Determine if we are casting `expr` to a trait - // instance. If so, we have to be sure that the type - // of the source obeys the new region bound. - let source_ty = rcx.resolve_node_type(expr.id); - type_must_outlive(rcx, infer::RelateObjectBound(expr.span), - source_ty, bounds.region_bound); + + // If necessary, constrain destructors in the unadjusted form of this + // expression. + let cmt_result = { + let mc = mc::MemCategorizationContext::new(self); + mc.cat_expr_unadjusted(expr) + }; + match cmt_result { + Ok(head_cmt) => { + self.check_safety_of_rvalue_destructor_if_necessary(head_cmt, + expr.span); + } + Err(..) => { + self.tcx.sess.delay_span_bug(expr.span, "cat_expr_unadjusted Errd"); + } } - */ - _ => {} } - // If necessary, constrain destructors in the unadjusted form of this - // expression. + // If necessary, constrain destructors in this expression. This will be + // the adjusted form if there is an adjustment. let cmt_result = { - let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx()); - mc.cat_expr_unadjusted(expr) + let mc = mc::MemCategorizationContext::new(self); + mc.cat_expr(expr) }; match cmt_result { Ok(head_cmt) => { - check_safety_of_rvalue_destructor_if_necessary(rcx, - head_cmt, - expr.span); + self.check_safety_of_rvalue_destructor_if_necessary(head_cmt, expr.span); } Err(..) => { - let tcx = rcx.fcx.tcx(); - tcx.sess.delay_span_bug(expr.span, "cat_expr_unadjusted Errd"); + self.tcx.sess.delay_span_bug(expr.span, "cat_expr Errd"); } } - } - // If necessary, constrain destructors in this expression. This will be - // the adjusted form if there is an adjustment. - let cmt_result = { - let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx()); - mc.cat_expr(expr) - }; - match cmt_result { - Ok(head_cmt) => { - check_safety_of_rvalue_destructor_if_necessary(rcx, head_cmt, expr.span); - } - Err(..) => { - let tcx = rcx.fcx.tcx(); - tcx.sess.delay_span_bug(expr.span, "cat_expr Errd"); - } - } - - debug!("regionck::visit_expr(e={:?}, repeating_scope={}) - visiting subexprs", - expr, rcx.repeating_scope); - match expr.node { - hir::ExprPath(..) => { - rcx.fcx.opt_node_ty_substs(expr.id, |item_substs| { - let origin = infer::ParameterOrigin::Path; - substs_wf_in_scope(rcx, origin, &item_substs.substs, expr.span, expr_region); - }); - } - - hir::ExprCall(ref callee, ref args) => { - if has_method_map { - constrain_call(rcx, expr, Some(&**callee), - args.iter().map(|e| &**e), false); - } else { - constrain_callee(rcx, callee.id, expr, &**callee); - constrain_call(rcx, expr, None, - args.iter().map(|e| &**e), false); + debug!("regionck::visit_expr(e={:?}, repeating_scope={}) - visiting subexprs", + expr, self.repeating_scope); + match expr.node { + hir::ExprPath(_) => { + self.fcx.opt_node_ty_substs(expr.id, |item_substs| { + let origin = infer::ParameterOrigin::Path; + self.substs_wf_in_scope(origin, &item_substs.substs, expr.span, expr_region); + }); } - intravisit::walk_expr(rcx, expr); - } + hir::ExprCall(ref callee, ref args) => { + if has_method_map { + self.constrain_call(expr, Some(&callee), + args.iter().map(|e| &*e), false); + } else { + self.constrain_callee(callee.id, expr, &callee); + self.constrain_call(expr, None, + args.iter().map(|e| &*e), false); + } - hir::ExprMethodCall(_, _, ref args) => { - constrain_call(rcx, expr, Some(&*args[0]), - args[1..].iter().map(|e| &**e), false); + intravisit::walk_expr(self, expr); + } - intravisit::walk_expr(rcx, expr); - } + hir::ExprMethodCall(.., ref args) => { + self.constrain_call(expr, Some(&args[0]), + args[1..].iter().map(|e| &*e), false); - hir::ExprAssignOp(_, ref lhs, ref rhs) => { - if has_method_map { - constrain_call(rcx, expr, Some(&**lhs), - Some(&**rhs).into_iter(), false); + intravisit::walk_expr(self, expr); } - intravisit::walk_expr(rcx, expr); - } + hir::ExprAssignOp(_, ref lhs, ref rhs) => { + if has_method_map { + self.constrain_call(expr, Some(&lhs), + Some(&**rhs).into_iter(), false); + } - hir::ExprIndex(ref lhs, ref rhs) if has_method_map => { - constrain_call(rcx, expr, Some(&**lhs), - Some(&**rhs).into_iter(), true); + intravisit::walk_expr(self, expr); + } - intravisit::walk_expr(rcx, expr); - }, + hir::ExprIndex(ref lhs, ref rhs) if has_method_map => { + self.constrain_call(expr, Some(&lhs), + Some(&**rhs).into_iter(), true); - hir::ExprBinary(op, ref lhs, ref rhs) if has_method_map => { - let implicitly_ref_args = !hir_util::is_by_value_binop(op.node); + intravisit::walk_expr(self, expr); + }, - // As `expr_method_call`, but the call is via an - // overloaded op. Note that we (sadly) currently use an - // implicit "by ref" sort of passing style here. This - // should be converted to an adjustment! - constrain_call(rcx, expr, Some(&**lhs), - Some(&**rhs).into_iter(), implicitly_ref_args); + hir::ExprBinary(op, ref lhs, ref rhs) if has_method_map => { + let implicitly_ref_args = !op.node.is_by_value(); - intravisit::walk_expr(rcx, expr); - } + // As `expr_method_call`, but the call is via an + // overloaded op. Note that we (sadly) currently use an + // implicit "by ref" sort of passing style here. This + // should be converted to an adjustment! + self.constrain_call(expr, Some(&lhs), + Some(&**rhs).into_iter(), implicitly_ref_args); - hir::ExprBinary(_, ref lhs, ref rhs) => { - // If you do `x OP y`, then the types of `x` and `y` must - // outlive the operation you are performing. - let lhs_ty = rcx.resolve_expr_type_adjusted(&**lhs); - let rhs_ty = rcx.resolve_expr_type_adjusted(&**rhs); - for &ty in &[lhs_ty, rhs_ty] { - type_must_outlive(rcx, - infer::Operand(expr.span), - ty, - expr_region); + intravisit::walk_expr(self, expr); } - intravisit::walk_expr(rcx, expr); - } - hir::ExprUnary(op, ref lhs) if has_method_map => { - let implicitly_ref_args = !hir_util::is_by_value_unop(op); + hir::ExprBinary(_, ref lhs, ref rhs) => { + // If you do `x OP y`, then the types of `x` and `y` must + // outlive the operation you are performing. + let lhs_ty = self.resolve_expr_type_adjusted(&lhs); + let rhs_ty = self.resolve_expr_type_adjusted(&rhs); + for &ty in &[lhs_ty, rhs_ty] { + self.type_must_outlive(infer::Operand(expr.span), + ty, expr_region); + } + intravisit::walk_expr(self, expr); + } - // As above. - constrain_call(rcx, expr, Some(&**lhs), - None::.iter(), implicitly_ref_args); + hir::ExprUnary(op, ref lhs) if has_method_map => { + let implicitly_ref_args = !op.is_by_value(); - intravisit::walk_expr(rcx, expr); - } + // As above. + self.constrain_call(expr, Some(&lhs), + None::.iter(), implicitly_ref_args); - hir::ExprUnary(hir::UnDeref, ref base) => { - // For *a, the lifetime of a must enclose the deref - let method_call = MethodCall::expr(expr.id); - let base_ty = match rcx.fcx.inh.tables.borrow().method_map.get(&method_call) { - Some(method) => { - constrain_call(rcx, expr, Some(&**base), - None::.iter(), true); - let fn_ret = // late-bound regions in overloaded method calls are instantiated - rcx.tcx().no_late_bound_regions(&method.ty.fn_ret()).unwrap(); - fn_ret.unwrap() - } - None => rcx.resolve_node_type(base.id) - }; - if let ty::TyRef(r_ptr, _) = base_ty.sty { - mk_subregion_due_to_dereference( - rcx, expr.span, expr_region, *r_ptr); + intravisit::walk_expr(self, expr); } - intravisit::walk_expr(rcx, expr); - } + hir::ExprUnary(hir::UnDeref, ref base) => { + // For *a, the lifetime of a must enclose the deref + let method_call = MethodCall::expr(expr.id); + let base_ty = match self.tables.borrow().method_map.get(&method_call) { + Some(method) => { + self.constrain_call(expr, Some(&base), + None::.iter(), true); + // late-bound regions in overloaded method calls are instantiated + let fn_ret = self.tcx.no_late_bound_regions(&method.ty.fn_ret()); + fn_ret.unwrap() + } + None => self.resolve_node_type(base.id) + }; + if let ty::TyRef(r_ptr, _) = base_ty.sty { + self.mk_subregion_due_to_dereference(expr.span, expr_region, r_ptr); + } - hir::ExprIndex(ref vec_expr, _) => { - // For a[b], the lifetime of a must enclose the deref - let vec_type = rcx.resolve_expr_type_adjusted(&**vec_expr); - constrain_index(rcx, expr, vec_type); + intravisit::walk_expr(self, expr); + } - intravisit::walk_expr(rcx, expr); - } + hir::ExprIndex(ref vec_expr, _) => { + // For a[b], the lifetime of a must enclose the deref + let vec_type = self.resolve_expr_type_adjusted(&vec_expr); + self.constrain_index(expr, vec_type); - hir::ExprCast(ref source, _) => { - // Determine if we are casting `source` to a trait - // instance. If so, we have to be sure that the type of - // the source obeys the trait's region bound. - constrain_cast(rcx, expr, &**source); - intravisit::walk_expr(rcx, expr); - } + intravisit::walk_expr(self, expr); + } - hir::ExprAddrOf(m, ref base) => { - link_addr_of(rcx, expr, m, &**base); + hir::ExprCast(ref source, _) => { + // Determine if we are casting `source` to a trait + // instance. If so, we have to be sure that the type of + // the source obeys the trait's region bound. + self.constrain_cast(expr, &source); + intravisit::walk_expr(self, expr); + } - // Require that when you write a `&expr` expression, the - // resulting pointer has a lifetime that encompasses the - // `&expr` expression itself. Note that we constraining - // the type of the node expr.id here *before applying - // adjustments*. - // - // FIXME(#6268) nested method calls requires that this rule change - let ty0 = rcx.resolve_node_type(expr.id); - type_must_outlive(rcx, infer::AddrOf(expr.span), ty0, expr_region); - intravisit::walk_expr(rcx, expr); - } + hir::ExprAddrOf(m, ref base) => { + self.link_addr_of(expr, m, &base); + + // Require that when you write a `&expr` expression, the + // resulting pointer has a lifetime that encompasses the + // `&expr` expression itself. Note that we constraining + // the type of the node expr.id here *before applying + // adjustments*. + // + // FIXME(#6268) nested method calls requires that this rule change + let ty0 = self.resolve_node_type(expr.id); + self.type_must_outlive(infer::AddrOf(expr.span), ty0, expr_region); + intravisit::walk_expr(self, expr); + } - hir::ExprMatch(ref discr, ref arms, _) => { - link_match(rcx, &**discr, &arms[..]); + hir::ExprMatch(ref discr, ref arms, _) => { + self.link_match(&discr, &arms[..]); - intravisit::walk_expr(rcx, expr); - } + intravisit::walk_expr(self, expr); + } - hir::ExprClosure(_, _, ref body) => { - check_expr_fn_block(rcx, expr, &**body); - } + hir::ExprClosure(.., body_id, _) => { + self.check_expr_fn_block(expr, body_id); + } - hir::ExprLoop(ref body, _) => { - let repeating_scope = rcx.set_repeating_scope(body.id); - intravisit::walk_expr(rcx, expr); - rcx.set_repeating_scope(repeating_scope); - } + hir::ExprLoop(ref body, _, _) => { + let repeating_scope = self.set_repeating_scope(body.id); + intravisit::walk_expr(self, expr); + self.set_repeating_scope(repeating_scope); + } - hir::ExprWhile(ref cond, ref body, _) => { - let repeating_scope = rcx.set_repeating_scope(cond.id); - rcx.visit_expr(&**cond); + hir::ExprWhile(ref cond, ref body, _) => { + let repeating_scope = self.set_repeating_scope(cond.id); + self.visit_expr(&cond); - rcx.set_repeating_scope(body.id); - rcx.visit_block(&**body); + self.set_repeating_scope(body.id); + self.visit_block(&body); - rcx.set_repeating_scope(repeating_scope); - } + self.set_repeating_scope(repeating_scope); + } - hir::ExprRet(Some(ref ret_expr)) => { - let call_site_scope = rcx.call_site_scope; - debug!("visit_expr ExprRet ret_expr.id {} call_site_scope: {:?}", - ret_expr.id, call_site_scope); - type_of_node_must_outlive(rcx, - infer::CallReturn(ret_expr.span), - ret_expr.id, - ty::ReScope(call_site_scope.unwrap())); - intravisit::walk_expr(rcx, expr); - } + hir::ExprRet(Some(ref ret_expr)) => { + let call_site_scope = self.call_site_scope; + debug!("visit_expr ExprRet ret_expr.id {} call_site_scope: {:?}", + ret_expr.id, call_site_scope); + let call_site_region = self.tcx.mk_region(ty::ReScope(call_site_scope.unwrap())); + self.type_of_node_must_outlive(infer::CallReturn(ret_expr.span), + ret_expr.id, + call_site_region); + intravisit::walk_expr(self, expr); + } - _ => { - intravisit::walk_expr(rcx, expr); + _ => { + intravisit::walk_expr(self, expr); + } } } } -fn constrain_cast(rcx: &mut Rcx, - cast_expr: &hir::Expr, - source_expr: &hir::Expr) -{ - debug!("constrain_cast(cast_expr={:?}, source_expr={:?})", - cast_expr, - source_expr); +impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { + fn constrain_cast(&mut self, + cast_expr: &hir::Expr, + source_expr: &hir::Expr) + { + debug!("constrain_cast(cast_expr={:?}, source_expr={:?})", + cast_expr, + source_expr); - let source_ty = rcx.resolve_node_type(source_expr.id); - let target_ty = rcx.resolve_node_type(cast_expr.id); + let source_ty = self.resolve_node_type(source_expr.id); + let target_ty = self.resolve_node_type(cast_expr.id); - walk_cast(rcx, cast_expr, source_ty, target_ty); + self.walk_cast(cast_expr, source_ty, target_ty); + } - fn walk_cast<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>, - cast_expr: &hir::Expr, - from_ty: Ty<'tcx>, - to_ty: Ty<'tcx>) { + fn walk_cast(&mut self, + cast_expr: &hir::Expr, + from_ty: Ty<'tcx>, + to_ty: Ty<'tcx>) { debug!("walk_cast(from_ty={:?}, to_ty={:?})", from_ty, to_ty); @@ -893,1016 +807,980 @@ fn constrain_cast(rcx: &mut Rcx, /*From:*/ (&ty::TyRef(from_r, ref from_mt), /*To: */ &ty::TyRef(to_r, ref to_mt)) => { // Target cannot outlive source, naturally. - rcx.fcx.mk_subr(infer::Reborrow(cast_expr.span), *to_r, *from_r); - walk_cast(rcx, cast_expr, from_mt.ty, to_mt.ty); + self.sub_regions(infer::Reborrow(cast_expr.span), to_r, from_r); + self.walk_cast(cast_expr, from_mt.ty, to_mt.ty); } /*From:*/ (_, - /*To: */ &ty::TyTrait(box ty::TraitTy { ref bounds, .. })) => { + /*To: */ &ty::TyDynamic(.., r)) => { // When T is existentially quantified as a trait // `Foo+'to`, it must outlive the region bound `'to`. - type_must_outlive(rcx, infer::RelateObjectBound(cast_expr.span), - from_ty, bounds.region_bound); + self.type_must_outlive(infer::RelateObjectBound(cast_expr.span), from_ty, r); } /*From:*/ (&ty::TyBox(from_referent_ty), /*To: */ &ty::TyBox(to_referent_ty)) => { - walk_cast(rcx, cast_expr, from_referent_ty, to_referent_ty); + self.walk_cast(cast_expr, from_referent_ty, to_referent_ty); } _ => { } } } -} -fn check_expr_fn_block(rcx: &mut Rcx, - expr: &hir::Expr, - body: &hir::Block) { - let repeating_scope = rcx.set_repeating_scope(body.id); - intravisit::walk_expr(rcx, expr); - rcx.set_repeating_scope(repeating_scope); -} + fn check_expr_fn_block(&mut self, + expr: &'gcx hir::Expr, + body_id: hir::ExprId) { + let repeating_scope = self.set_repeating_scope(body_id.node_id()); + intravisit::walk_expr(self, expr); + self.set_repeating_scope(repeating_scope); + } -fn constrain_callee(rcx: &mut Rcx, - callee_id: ast::NodeId, - _call_expr: &hir::Expr, - _callee_expr: &hir::Expr) { - let callee_ty = rcx.resolve_node_type(callee_id); - match callee_ty.sty { - ty::TyBareFn(..) => { } - _ => { - // this should not happen, but it does if the program is - // erroneous - // - // tcx.sess.span_bug( - // callee_expr.span, - // format!("Calling non-function: {}", callee_ty)); + fn constrain_callee(&mut self, + callee_id: ast::NodeId, + _call_expr: &hir::Expr, + _callee_expr: &hir::Expr) { + let callee_ty = self.resolve_node_type(callee_id); + match callee_ty.sty { + ty::TyFnDef(..) | ty::TyFnPtr(_) => { } + _ => { + // this should not happen, but it does if the program is + // erroneous + // + // bug!( + // callee_expr.span, + // "Calling non-function: {}", + // callee_ty); + } } } -} -fn constrain_call<'a, I: Iterator>(rcx: &mut Rcx, - call_expr: &hir::Expr, - receiver: Option<&hir::Expr>, - arg_exprs: I, - implicitly_ref_args: bool) { - //! Invoked on every call site (i.e., normal calls, method calls, - //! and overloaded operators). Constrains the regions which appear - //! in the type of the function. Also constrains the regions that - //! appear in the arguments appropriately. - - debug!("constrain_call(call_expr={:?}, \ - receiver={:?}, \ - implicitly_ref_args={})", - call_expr, - receiver, - implicitly_ref_args); - - // `callee_region` is the scope representing the time in which the - // call occurs. - // - // FIXME(#6268) to support nested method calls, should be callee_id - let callee_scope = rcx.tcx().region_maps.node_extent(call_expr.id); - let callee_region = ty::ReScope(callee_scope); - - debug!("callee_region={:?}", callee_region); - - for arg_expr in arg_exprs { - debug!("Argument: {:?}", arg_expr); - - // ensure that any regions appearing in the argument type are - // valid for at least the lifetime of the function: - type_of_node_must_outlive( - rcx, infer::CallArg(arg_expr.span), - arg_expr.id, callee_region); - - // unfortunately, there are two means of taking implicit - // references, and we need to propagate constraints as a - // result. modes are going away and the "DerefArgs" code - // should be ported to use adjustments - if implicitly_ref_args { - link_by_ref(rcx, arg_expr, callee_scope); + fn constrain_call<'b, I: Iterator>(&mut self, + call_expr: &hir::Expr, + receiver: Option<&hir::Expr>, + arg_exprs: I, + implicitly_ref_args: bool) { + //! Invoked on every call site (i.e., normal calls, method calls, + //! and overloaded operators). Constrains the regions which appear + //! in the type of the function. Also constrains the regions that + //! appear in the arguments appropriately. + + debug!("constrain_call(call_expr={:?}, \ + receiver={:?}, \ + implicitly_ref_args={})", + call_expr, + receiver, + implicitly_ref_args); + + // `callee_region` is the scope representing the time in which the + // call occurs. + // + // FIXME(#6268) to support nested method calls, should be callee_id + let callee_scope = self.tcx.region_maps.node_extent(call_expr.id); + let callee_region = self.tcx.mk_region(ty::ReScope(callee_scope)); + + debug!("callee_region={:?}", callee_region); + + for arg_expr in arg_exprs { + debug!("Argument: {:?}", arg_expr); + + // ensure that any regions appearing in the argument type are + // valid for at least the lifetime of the function: + self.type_of_node_must_outlive(infer::CallArg(arg_expr.span), + arg_expr.id, callee_region); + + // unfortunately, there are two means of taking implicit + // references, and we need to propagate constraints as a + // result. modes are going away and the "DerefArgs" code + // should be ported to use adjustments + if implicitly_ref_args { + self.link_by_ref(arg_expr, callee_scope); + } } - } - // as loop above, but for receiver - if let Some(r) = receiver { - debug!("receiver: {:?}", r); - type_of_node_must_outlive( - rcx, infer::CallRcvr(r.span), - r.id, callee_region); - if implicitly_ref_args { - link_by_ref(rcx, &*r, callee_scope); + // as loop above, but for receiver + if let Some(r) = receiver { + debug!("receiver: {:?}", r); + self.type_of_node_must_outlive(infer::CallRcvr(r.span), + r.id, callee_region); + if implicitly_ref_args { + self.link_by_ref(&r, callee_scope); + } } } -} -/// Invoked on any auto-dereference that occurs. Checks that if this is a region pointer being -/// dereferenced, the lifetime of the pointer includes the deref expr. -fn constrain_autoderefs<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>, - deref_expr: &hir::Expr, - derefs: usize, - mut derefd_ty: Ty<'tcx>) -{ - debug!("constrain_autoderefs(deref_expr={:?}, derefs={}, derefd_ty={:?})", - deref_expr, - derefs, - derefd_ty); - - let s_deref_expr = rcx.tcx().region_maps.node_extent(deref_expr.id); - let r_deref_expr = ty::ReScope(s_deref_expr); - for i in 0..derefs { - let method_call = MethodCall::autoderef(deref_expr.id, i as u32); - debug!("constrain_autoderefs: method_call={:?} (of {:?} total)", method_call, derefs); - - let method = rcx.fcx.inh.tables.borrow().method_map.get(&method_call).map(|m| m.clone()); - - derefd_ty = match method { - Some(method) => { - debug!("constrain_autoderefs: #{} is overloaded, method={:?}", - i, method); - - let origin = infer::ParameterOrigin::OverloadedDeref; - substs_wf_in_scope(rcx, origin, method.substs, deref_expr.span, r_deref_expr); - - // Treat overloaded autoderefs as if an AutoRef adjustment - // was applied on the base type, as that is always the case. - let fn_sig = method.ty.fn_sig(); - let fn_sig = // late-bound regions should have been instantiated - rcx.tcx().no_late_bound_regions(fn_sig).unwrap(); - let self_ty = fn_sig.inputs[0]; - let (m, r) = match self_ty.sty { - ty::TyRef(r, ref m) => (m.mutbl, r), - _ => { - rcx.tcx().sess.span_bug( - deref_expr.span, - &format!("bad overloaded deref type {:?}", - method.ty)) - } - }; + /// Invoked on any auto-dereference that occurs. Checks that if this is a region pointer being + /// dereferenced, the lifetime of the pointer includes the deref expr. + fn constrain_autoderefs(&mut self, + deref_expr: &hir::Expr, + derefs: usize, + mut derefd_ty: Ty<'tcx>) + { + debug!("constrain_autoderefs(deref_expr={:?}, derefs={}, derefd_ty={:?})", + deref_expr, + derefs, + derefd_ty); - debug!("constrain_autoderefs: receiver r={:?} m={:?}", - r, m); + let r_deref_expr = self.tcx.node_scope_region(deref_expr.id); + for i in 0..derefs { + let method_call = MethodCall::autoderef(deref_expr.id, i as u32); + debug!("constrain_autoderefs: method_call={:?} (of {:?} total)", method_call, derefs); - { - let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx()); - let self_cmt = ignore_err!(mc.cat_expr_autoderefd(deref_expr, i)); - debug!("constrain_autoderefs: self_cmt={:?}", - self_cmt); - link_region(rcx, deref_expr.span, r, - ty::BorrowKind::from_mutbl(m), self_cmt); - } + let method = self.tables.borrow().method_map.get(&method_call).map(|m| m.clone()); - // Specialized version of constrain_call. - type_must_outlive(rcx, infer::CallRcvr(deref_expr.span), - self_ty, r_deref_expr); - match fn_sig.output { - ty::FnConverging(return_type) => { - type_must_outlive(rcx, infer::CallReturn(deref_expr.span), - return_type, r_deref_expr); - return_type + derefd_ty = match method { + Some(method) => { + debug!("constrain_autoderefs: #{} is overloaded, method={:?}", + i, method); + + let origin = infer::ParameterOrigin::OverloadedDeref; + self.substs_wf_in_scope(origin, method.substs, deref_expr.span, r_deref_expr); + + // Treat overloaded autoderefs as if an AutoBorrow adjustment + // was applied on the base type, as that is always the case. + let fn_sig = method.ty.fn_sig(); + let fn_sig = // late-bound regions should have been instantiated + self.tcx.no_late_bound_regions(fn_sig).unwrap(); + let self_ty = fn_sig.inputs[0]; + let (m, r) = match self_ty.sty { + ty::TyRef(r, ref m) => (m.mutbl, r), + _ => { + span_bug!( + deref_expr.span, + "bad overloaded deref type {:?}", + method.ty) + } + }; + + debug!("constrain_autoderefs: receiver r={:?} m={:?}", + r, m); + + { + let mc = mc::MemCategorizationContext::new(self); + let self_cmt = ignore_err!(mc.cat_expr_autoderefd(deref_expr, i)); + debug!("constrain_autoderefs: self_cmt={:?}", + self_cmt); + self.link_region(deref_expr.span, r, + ty::BorrowKind::from_mutbl(m), self_cmt); } - ty::FnDiverging => unreachable!() + + // Specialized version of constrain_call. + self.type_must_outlive(infer::CallRcvr(deref_expr.span), + self_ty, r_deref_expr); + self.type_must_outlive(infer::CallReturn(deref_expr.span), + fn_sig.output, r_deref_expr); + fn_sig.output } - } - None => derefd_ty - }; + None => derefd_ty + }; - if let ty::TyRef(r_ptr, _) = derefd_ty.sty { - mk_subregion_due_to_dereference(rcx, deref_expr.span, - r_deref_expr, *r_ptr); - } + if let ty::TyRef(r_ptr, _) = derefd_ty.sty { + self.mk_subregion_due_to_dereference(deref_expr.span, + r_deref_expr, r_ptr); + } - match derefd_ty.builtin_deref(true, ty::NoPreference) { - Some(mt) => derefd_ty = mt.ty, - /* if this type can't be dereferenced, then there's already an error - in the session saying so. Just bail out for now */ - None => break + match derefd_ty.builtin_deref(true, ty::NoPreference) { + Some(mt) => derefd_ty = mt.ty, + /* if this type can't be dereferenced, then there's already an error + in the session saying so. Just bail out for now */ + None => break + } } } -} -pub fn mk_subregion_due_to_dereference(rcx: &mut Rcx, - deref_span: Span, - minimum_lifetime: ty::Region, - maximum_lifetime: ty::Region) { - rcx.fcx.mk_subr(infer::DerefPointer(deref_span), - minimum_lifetime, maximum_lifetime) -} + pub fn mk_subregion_due_to_dereference(&mut self, + deref_span: Span, + minimum_lifetime: &'tcx ty::Region, + maximum_lifetime: &'tcx ty::Region) { + self.sub_regions(infer::DerefPointer(deref_span), + minimum_lifetime, maximum_lifetime) + } -fn check_safety_of_rvalue_destructor_if_necessary<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>, - cmt: mc::cmt<'tcx>, - span: Span) { - match cmt.cat { - Categorization::Rvalue(region) => { - match region { - ty::ReScope(rvalue_scope) => { - let typ = rcx.resolve_type(cmt.ty); - dropck::check_safety_of_destructor_if_necessary(rcx, - typ, - span, - rvalue_scope); - } - ty::ReStatic => {} - region => { - rcx.tcx() - .sess - .span_bug(span, - &format!("unexpected rvalue region in rvalue \ - destructor safety checking: `{:?}`", - region)); + fn check_safety_of_rvalue_destructor_if_necessary(&mut self, + cmt: mc::cmt<'tcx>, + span: Span) { + match cmt.cat { + Categorization::Rvalue(region) => { + match *region { + ty::ReScope(rvalue_scope) => { + let typ = self.resolve_type(cmt.ty); + dropck::check_safety_of_destructor_if_necessary(self, + typ, + span, + rvalue_scope); + } + ty::ReStatic => {} + _ => { + span_bug!(span, + "unexpected rvalue region in rvalue \ + destructor safety checking: `{:?}`", + region); + } } } + _ => {} } - _ => {} } -} -/// Invoked on any index expression that occurs. Checks that if this is a slice being indexed, the -/// lifetime of the pointer includes the deref expr. -fn constrain_index<'a, 'tcx>(rcx: &mut Rcx<'a, 'tcx>, - index_expr: &hir::Expr, - indexed_ty: Ty<'tcx>) -{ - debug!("constrain_index(index_expr=?, indexed_ty={}", - rcx.fcx.infcx().ty_to_string(indexed_ty)); - - let r_index_expr = ty::ReScope(rcx.tcx().region_maps.node_extent(index_expr.id)); - if let ty::TyRef(r_ptr, mt) = indexed_ty.sty { - match mt.ty.sty { - ty::TySlice(_) | ty::TyStr => { - rcx.fcx.mk_subr(infer::IndexSlice(index_expr.span), - r_index_expr, *r_ptr); + /// Invoked on any index expression that occurs. Checks that if this is a slice + /// being indexed, the lifetime of the pointer includes the deref expr. + fn constrain_index(&mut self, + index_expr: &hir::Expr, + indexed_ty: Ty<'tcx>) + { + debug!("constrain_index(index_expr=?, indexed_ty={}", + self.ty_to_string(indexed_ty)); + + let r_index_expr = ty::ReScope(self.tcx.region_maps.node_extent(index_expr.id)); + if let ty::TyRef(r_ptr, mt) = indexed_ty.sty { + match mt.ty.sty { + ty::TySlice(_) | ty::TyStr => { + self.sub_regions(infer::IndexSlice(index_expr.span), + self.tcx.mk_region(r_index_expr), r_ptr); + } + _ => {} } - _ => {} } } -} -/// Guarantees that any lifetimes which appear in the type of the node `id` (after applying -/// adjustments) are valid for at least `minimum_lifetime` -fn type_of_node_must_outlive<'a, 'tcx>( - rcx: &mut Rcx<'a, 'tcx>, - origin: infer::SubregionOrigin<'tcx>, - id: ast::NodeId, - minimum_lifetime: ty::Region) -{ - let tcx = rcx.fcx.tcx(); - - // Try to resolve the type. If we encounter an error, then typeck - // is going to fail anyway, so just stop here and let typeck - // report errors later on in the writeback phase. - let ty0 = rcx.resolve_node_type(id); - let ty = ty0.adjust(tcx, origin.span(), id, - rcx.fcx.inh.tables.borrow().adjustments.get(&id), - |method_call| rcx.resolve_method_type(method_call)); - debug!("constrain_regions_in_type_of_node(\ - ty={}, ty0={}, id={}, minimum_lifetime={:?})", - ty, ty0, - id, minimum_lifetime); - type_must_outlive(rcx, origin, ty, minimum_lifetime); -} + /// Guarantees that any lifetimes which appear in the type of the node `id` (after applying + /// adjustments) are valid for at least `minimum_lifetime` + fn type_of_node_must_outlive(&mut self, + origin: infer::SubregionOrigin<'tcx>, + id: ast::NodeId, + minimum_lifetime: &'tcx ty::Region) + { + // Try to resolve the type. If we encounter an error, then typeck + // is going to fail anyway, so just stop here and let typeck + // report errors later on in the writeback phase. + let ty0 = self.resolve_node_type(id); + let ty = self.tables.borrow().adjustments.get(&id).map_or(ty0, |adj| adj.target); + let ty = self.resolve_type(ty); + debug!("constrain_regions_in_type_of_node(\ + ty={}, ty0={}, id={}, minimum_lifetime={:?})", + ty, ty0, + id, minimum_lifetime); + self.type_must_outlive(origin, ty, minimum_lifetime); + } -/// Computes the guarantor for an expression `&base` and then ensures that the lifetime of the -/// resulting pointer is linked to the lifetime of its guarantor (if any). -fn link_addr_of(rcx: &mut Rcx, expr: &hir::Expr, - mutability: hir::Mutability, base: &hir::Expr) { - debug!("link_addr_of(expr={:?}, base={:?})", expr, base); + /// Computes the guarantor for an expression `&base` and then ensures that the lifetime of the + /// resulting pointer is linked to the lifetime of its guarantor (if any). + fn link_addr_of(&mut self, expr: &hir::Expr, + mutability: hir::Mutability, base: &hir::Expr) { + debug!("link_addr_of(expr={:?}, base={:?})", expr, base); - let cmt = { - let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx()); - ignore_err!(mc.cat_expr(base)) - }; + let cmt = { + let mc = mc::MemCategorizationContext::new(self); + ignore_err!(mc.cat_expr(base)) + }; - debug!("link_addr_of: cmt={:?}", cmt); + debug!("link_addr_of: cmt={:?}", cmt); - link_region_from_node_type(rcx, expr.span, expr.id, mutability, cmt); -} + self.link_region_from_node_type(expr.span, expr.id, mutability, cmt); + } -/// Computes the guarantors for any ref bindings in a `let` and -/// then ensures that the lifetime of the resulting pointer is -/// linked to the lifetime of the initialization expression. -fn link_local(rcx: &Rcx, local: &hir::Local) { - debug!("regionck::for_local()"); - let init_expr = match local.init { - None => { return; } - Some(ref expr) => &**expr, - }; - let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx()); - let discr_cmt = ignore_err!(mc.cat_expr(init_expr)); - link_pattern(rcx, mc, discr_cmt, &*local.pat); -} + /// Computes the guarantors for any ref bindings in a `let` and + /// then ensures that the lifetime of the resulting pointer is + /// linked to the lifetime of the initialization expression. + fn link_local(&self, local: &hir::Local) { + debug!("regionck::for_local()"); + let init_expr = match local.init { + None => { return; } + Some(ref expr) => &**expr, + }; + let mc = mc::MemCategorizationContext::new(self); + let discr_cmt = ignore_err!(mc.cat_expr(init_expr)); + self.link_pattern(mc, discr_cmt, &local.pat); + } -/// Computes the guarantors for any ref bindings in a match and -/// then ensures that the lifetime of the resulting pointer is -/// linked to the lifetime of its guarantor (if any). -fn link_match(rcx: &Rcx, discr: &hir::Expr, arms: &[hir::Arm]) { - debug!("regionck::for_match()"); - let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx()); - let discr_cmt = ignore_err!(mc.cat_expr(discr)); - debug!("discr_cmt={:?}", discr_cmt); - for arm in arms { - for root_pat in &arm.pats { - link_pattern(rcx, mc, discr_cmt.clone(), &**root_pat); + /// Computes the guarantors for any ref bindings in a match and + /// then ensures that the lifetime of the resulting pointer is + /// linked to the lifetime of its guarantor (if any). + fn link_match(&self, discr: &hir::Expr, arms: &[hir::Arm]) { + debug!("regionck::for_match()"); + let mc = mc::MemCategorizationContext::new(self); + let discr_cmt = ignore_err!(mc.cat_expr(discr)); + debug!("discr_cmt={:?}", discr_cmt); + for arm in arms { + for root_pat in &arm.pats { + self.link_pattern(mc, discr_cmt.clone(), &root_pat); + } } } -} -/// Computes the guarantors for any ref bindings in a match and -/// then ensures that the lifetime of the resulting pointer is -/// linked to the lifetime of its guarantor (if any). -fn link_fn_args(rcx: &Rcx, body_scope: CodeExtent, args: &[hir::Arg]) { - debug!("regionck::link_fn_args(body_scope={:?})", body_scope); - let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx()); - for arg in args { - let arg_ty = rcx.fcx.node_ty(arg.id); - let re_scope = ty::ReScope(body_scope); - let arg_cmt = mc.cat_rvalue(arg.id, arg.ty.span, re_scope, arg_ty); - debug!("arg_ty={:?} arg_cmt={:?} arg={:?}", - arg_ty, - arg_cmt, - arg); - link_pattern(rcx, mc, arg_cmt, &*arg.pat); + /// Computes the guarantors for any ref bindings in a match and + /// then ensures that the lifetime of the resulting pointer is + /// linked to the lifetime of its guarantor (if any). + fn link_fn_args(&self, body_scope: CodeExtent, args: &[hir::Arg]) { + debug!("regionck::link_fn_args(body_scope={:?})", body_scope); + let mc = mc::MemCategorizationContext::new(self); + for arg in args { + let arg_ty = self.node_ty(arg.id); + let re_scope = self.tcx.mk_region(ty::ReScope(body_scope)); + let arg_cmt = mc.cat_rvalue(arg.id, arg.ty.span, re_scope, arg_ty); + debug!("arg_ty={:?} arg_cmt={:?} arg={:?}", + arg_ty, + arg_cmt, + arg); + self.link_pattern(mc, arg_cmt, &arg.pat); + } } -} -/// Link lifetimes of any ref bindings in `root_pat` to the pointers found in the discriminant, if -/// needed. -fn link_pattern<'t, 'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, - mc: mc::MemCategorizationContext<'t, 'a, 'tcx>, - discr_cmt: mc::cmt<'tcx>, - root_pat: &hir::Pat) { - debug!("link_pattern(discr_cmt={:?}, root_pat={:?})", - discr_cmt, - root_pat); - let _ = mc.cat_pattern(discr_cmt, root_pat, |mc, sub_cmt, sub_pat| { - match sub_pat.node { - // `ref x` pattern - hir::PatIdent(hir::BindByRef(mutbl), _, _) => { - link_region_from_node_type( - rcx, sub_pat.span, sub_pat.id, - mutbl, sub_cmt); - } - - // `[_, ..slice, _]` pattern - hir::PatVec(_, Some(ref slice_pat), _) => { - match mc.cat_slice_pattern(sub_cmt, &**slice_pat) { - Ok((slice_cmt, slice_mutbl, slice_r)) => { - link_region(rcx, sub_pat.span, &slice_r, - ty::BorrowKind::from_mutbl(slice_mutbl), - slice_cmt); - } - Err(()) => {} + /// Link lifetimes of any ref bindings in `root_pat` to the pointers found + /// in the discriminant, if needed. + fn link_pattern<'t>(&self, + mc: mc::MemCategorizationContext<'a, 'gcx, 'tcx>, + discr_cmt: mc::cmt<'tcx>, + root_pat: &hir::Pat) { + debug!("link_pattern(discr_cmt={:?}, root_pat={:?})", + discr_cmt, + root_pat); + let _ = mc.cat_pattern(discr_cmt, root_pat, |_, sub_cmt, sub_pat| { + match sub_pat.node { + // `ref x` pattern + PatKind::Binding(hir::BindByRef(mutbl), ..) => { + self.link_region_from_node_type(sub_pat.span, sub_pat.id, + mutbl, sub_cmt); } + _ => {} } - _ => {} - } - }); -} + }); + } -/// Link lifetime of borrowed pointer resulting from autoref to lifetimes in the value being -/// autoref'd. -fn link_autoref(rcx: &Rcx, - expr: &hir::Expr, - autoderefs: usize, - autoref: &adjustment::AutoRef) -{ - debug!("link_autoref(autoref={:?})", autoref); - let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx()); - let expr_cmt = ignore_err!(mc.cat_expr_autoderefd(expr, autoderefs)); - debug!("expr_cmt={:?}", expr_cmt); - - match *autoref { - adjustment::AutoPtr(r, m) => { - link_region(rcx, expr.span, r, - ty::BorrowKind::from_mutbl(m), expr_cmt); - } + /// Link lifetime of borrowed pointer resulting from autoref to lifetimes in the value being + /// autoref'd. + fn link_autoref(&self, + expr: &hir::Expr, + autoderefs: usize, + autoref: &adjustment::AutoBorrow<'tcx>) + { + debug!("link_autoref(autoderefs={}, autoref={:?})", autoderefs, autoref); + let mc = mc::MemCategorizationContext::new(self); + let expr_cmt = ignore_err!(mc.cat_expr_autoderefd(expr, autoderefs)); + debug!("expr_cmt={:?}", expr_cmt); + + match *autoref { + adjustment::AutoBorrow::Ref(r, m) => { + self.link_region(expr.span, r, + ty::BorrowKind::from_mutbl(m), expr_cmt); + } - adjustment::AutoUnsafe(m) => { - let r = ty::ReScope(rcx.tcx().region_maps.node_extent(expr.id)); - link_region(rcx, expr.span, &r, ty::BorrowKind::from_mutbl(m), expr_cmt); + adjustment::AutoBorrow::RawPtr(m) => { + let r = self.tcx.node_scope_region(expr.id); + self.link_region(expr.span, r, ty::BorrowKind::from_mutbl(m), expr_cmt); + } } } -} -/// Computes the guarantor for cases where the `expr` is being passed by implicit reference and -/// must outlive `callee_scope`. -fn link_by_ref(rcx: &Rcx, - expr: &hir::Expr, - callee_scope: CodeExtent) { - debug!("link_by_ref(expr={:?}, callee_scope={:?})", - expr, callee_scope); - let mc = mc::MemCategorizationContext::new(rcx.fcx.infcx()); - let expr_cmt = ignore_err!(mc.cat_expr(expr)); - let borrow_region = ty::ReScope(callee_scope); - link_region(rcx, expr.span, &borrow_region, ty::ImmBorrow, expr_cmt); -} + /// Computes the guarantor for cases where the `expr` is being passed by implicit reference and + /// must outlive `callee_scope`. + fn link_by_ref(&self, + expr: &hir::Expr, + callee_scope: CodeExtent) { + debug!("link_by_ref(expr={:?}, callee_scope={:?})", + expr, callee_scope); + let mc = mc::MemCategorizationContext::new(self); + let expr_cmt = ignore_err!(mc.cat_expr(expr)); + let borrow_region = self.tcx.mk_region(ty::ReScope(callee_scope)); + self.link_region(expr.span, borrow_region, ty::ImmBorrow, expr_cmt); + } -/// Like `link_region()`, except that the region is extracted from the type of `id`, which must be -/// some reference (`&T`, `&str`, etc). -fn link_region_from_node_type<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, - span: Span, - id: ast::NodeId, - mutbl: hir::Mutability, - cmt_borrowed: mc::cmt<'tcx>) { - debug!("link_region_from_node_type(id={:?}, mutbl={:?}, cmt_borrowed={:?})", - id, mutbl, cmt_borrowed); - - let rptr_ty = rcx.resolve_node_type(id); - if let ty::TyRef(&r, _) = rptr_ty.sty { - debug!("rptr_ty={}", rptr_ty); - link_region(rcx, span, &r, ty::BorrowKind::from_mutbl(mutbl), - cmt_borrowed); + /// Like `link_region()`, except that the region is extracted from the type of `id`, + /// which must be some reference (`&T`, `&str`, etc). + fn link_region_from_node_type(&self, + span: Span, + id: ast::NodeId, + mutbl: hir::Mutability, + cmt_borrowed: mc::cmt<'tcx>) { + debug!("link_region_from_node_type(id={:?}, mutbl={:?}, cmt_borrowed={:?})", + id, mutbl, cmt_borrowed); + + let rptr_ty = self.resolve_node_type(id); + if let ty::TyRef(r, _) = rptr_ty.sty { + debug!("rptr_ty={}", rptr_ty); + self.link_region(span, r, ty::BorrowKind::from_mutbl(mutbl), + cmt_borrowed); + } } -} -/// Informs the inference engine that `borrow_cmt` is being borrowed with kind `borrow_kind` and -/// lifetime `borrow_region`. In order to ensure borrowck is satisfied, this may create constraints -/// between regions, as explained in `link_reborrowed_region()`. -fn link_region<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, - span: Span, - borrow_region: &ty::Region, - borrow_kind: ty::BorrowKind, - borrow_cmt: mc::cmt<'tcx>) { - let mut borrow_cmt = borrow_cmt; - let mut borrow_kind = borrow_kind; + /// Informs the inference engine that `borrow_cmt` is being borrowed with + /// kind `borrow_kind` and lifetime `borrow_region`. + /// In order to ensure borrowck is satisfied, this may create constraints + /// between regions, as explained in `link_reborrowed_region()`. + fn link_region(&self, + span: Span, + borrow_region: &'tcx ty::Region, + borrow_kind: ty::BorrowKind, + borrow_cmt: mc::cmt<'tcx>) { + let mut borrow_cmt = borrow_cmt; + let mut borrow_kind = borrow_kind; + + let origin = infer::DataBorrowed(borrow_cmt.ty, span); + self.type_must_outlive(origin, borrow_cmt.ty, borrow_region); + + loop { + debug!("link_region(borrow_region={:?}, borrow_kind={:?}, borrow_cmt={:?})", + borrow_region, + borrow_kind, + borrow_cmt); + match borrow_cmt.cat.clone() { + Categorization::Deref(ref_cmt, _, + mc::Implicit(ref_kind, ref_region)) | + Categorization::Deref(ref_cmt, _, + mc::BorrowedPtr(ref_kind, ref_region)) => { + match self.link_reborrowed_region(span, + borrow_region, borrow_kind, + ref_cmt, ref_region, ref_kind, + borrow_cmt.note) { + Some((c, k)) => { + borrow_cmt = c; + borrow_kind = k; + } + None => { + return; + } + } + } - let origin = infer::DataBorrowed(borrow_cmt.ty, span); - type_must_outlive(rcx, origin, borrow_cmt.ty, *borrow_region); + Categorization::Downcast(cmt_base, _) | + Categorization::Deref(cmt_base, _, mc::Unique) | + Categorization::Interior(cmt_base, _) => { + // Borrowing interior or owned data requires the base + // to be valid and borrowable in the same fashion. + borrow_cmt = cmt_base; + borrow_kind = borrow_kind; + } - loop { - debug!("link_region(borrow_region={:?}, borrow_kind={:?}, borrow_cmt={:?})", - borrow_region, - borrow_kind, - borrow_cmt); - match borrow_cmt.cat.clone() { - Categorization::Deref(ref_cmt, _, - mc::Implicit(ref_kind, ref_region)) | - Categorization::Deref(ref_cmt, _, - mc::BorrowedPtr(ref_kind, ref_region)) => { - match link_reborrowed_region(rcx, span, - borrow_region, borrow_kind, - ref_cmt, ref_region, ref_kind, - borrow_cmt.note) { - Some((c, k)) => { - borrow_cmt = c; - borrow_kind = k; + Categorization::Deref(.., mc::UnsafePtr(..)) | + Categorization::StaticItem | + Categorization::Upvar(..) | + Categorization::Local(..) | + Categorization::Rvalue(..) => { + // These are all "base cases" with independent lifetimes + // that are not subject to inference + return; + } + } + } + } + + /// This is the most complicated case: the path being borrowed is + /// itself the referent of a borrowed pointer. Let me give an + /// example fragment of code to make clear(er) the situation: + /// + /// let r: &'a mut T = ...; // the original reference "r" has lifetime 'a + /// ... + /// &'z *r // the reborrow has lifetime 'z + /// + /// Now, in this case, our primary job is to add the inference + /// constraint that `'z <= 'a`. Given this setup, let's clarify the + /// parameters in (roughly) terms of the example: + /// + /// A borrow of: `& 'z bk * r` where `r` has type `& 'a bk T` + /// borrow_region ^~ ref_region ^~ + /// borrow_kind ^~ ref_kind ^~ + /// ref_cmt ^ + /// + /// Here `bk` stands for some borrow-kind (e.g., `mut`, `uniq`, etc). + /// + /// Unfortunately, there are some complications beyond the simple + /// scenario I just painted: + /// + /// 1. The reference `r` might in fact be a "by-ref" upvar. In that + /// case, we have two jobs. First, we are inferring whether this reference + /// should be an `&T`, `&mut T`, or `&uniq T` reference, and we must + /// adjust that based on this borrow (e.g., if this is an `&mut` borrow, + /// then `r` must be an `&mut` reference). Second, whenever we link + /// two regions (here, `'z <= 'a`), we supply a *cause*, and in this + /// case we adjust the cause to indicate that the reference being + /// "reborrowed" is itself an upvar. This provides a nicer error message + /// should something go wrong. + /// + /// 2. There may in fact be more levels of reborrowing. In the + /// example, I said the borrow was like `&'z *r`, but it might + /// in fact be a borrow like `&'z **q` where `q` has type `&'a + /// &'b mut T`. In that case, we want to ensure that `'z <= 'a` + /// and `'z <= 'b`. This is explained more below. + /// + /// The return value of this function indicates whether we need to + /// recurse and process `ref_cmt` (see case 2 above). + fn link_reborrowed_region(&self, + span: Span, + borrow_region: &'tcx ty::Region, + borrow_kind: ty::BorrowKind, + ref_cmt: mc::cmt<'tcx>, + ref_region: &'tcx ty::Region, + mut ref_kind: ty::BorrowKind, + note: mc::Note) + -> Option<(mc::cmt<'tcx>, ty::BorrowKind)> + { + // Possible upvar ID we may need later to create an entry in the + // maybe link map. + + // Detect by-ref upvar `x`: + let cause = match note { + mc::NoteUpvarRef(ref upvar_id) => { + let upvar_capture_map = &self.tables.borrow_mut().upvar_capture_map; + match upvar_capture_map.get(upvar_id) { + Some(&ty::UpvarCapture::ByRef(ref upvar_borrow)) => { + // The mutability of the upvar may have been modified + // by the above adjustment, so update our local variable. + ref_kind = upvar_borrow.kind; + + infer::ReborrowUpvar(span, *upvar_id) } - None => { - return; + _ => { + span_bug!( span, "Illegal upvar id: {:?}", upvar_id); } } } + mc::NoteClosureEnv(ref upvar_id) => { + // We don't have any mutability changes to propagate, but + // we do want to note that an upvar reborrow caused this + // link + infer::ReborrowUpvar(span, *upvar_id) + } + _ => { + infer::Reborrow(span) + } + }; - Categorization::Downcast(cmt_base, _) | - Categorization::Deref(cmt_base, _, mc::Unique) | - Categorization::Interior(cmt_base, _) => { - // Borrowing interior or owned data requires the base - // to be valid and borrowable in the same fashion. - borrow_cmt = cmt_base; - borrow_kind = borrow_kind; + debug!("link_reborrowed_region: {:?} <= {:?}", + borrow_region, + ref_region); + self.sub_regions(cause, borrow_region, ref_region); + + // If we end up needing to recurse and establish a region link + // with `ref_cmt`, calculate what borrow kind we will end up + // needing. This will be used below. + // + // One interesting twist is that we can weaken the borrow kind + // when we recurse: to reborrow an `&mut` referent as mutable, + // borrowck requires a unique path to the `&mut` reference but not + // necessarily a *mutable* path. + let new_borrow_kind = match borrow_kind { + ty::ImmBorrow => + ty::ImmBorrow, + ty::MutBorrow | ty::UniqueImmBorrow => + ty::UniqueImmBorrow + }; + + // Decide whether we need to recurse and link any regions within + // the `ref_cmt`. This is concerned for the case where the value + // being reborrowed is in fact a borrowed pointer found within + // another borrowed pointer. For example: + // + // let p: &'b &'a mut T = ...; + // ... + // &'z **p + // + // What makes this case particularly tricky is that, if the data + // being borrowed is a `&mut` or `&uniq` borrow, borrowck requires + // not only that `'z <= 'a`, (as before) but also `'z <= 'b` + // (otherwise the user might mutate through the `&mut T` reference + // after `'b` expires and invalidate the borrow we are looking at + // now). + // + // So let's re-examine our parameters in light of this more + // complicated (possible) scenario: + // + // A borrow of: `& 'z bk * * p` where `p` has type `&'b bk & 'a bk T` + // borrow_region ^~ ref_region ^~ + // borrow_kind ^~ ref_kind ^~ + // ref_cmt ^~~ + // + // (Note that since we have not examined `ref_cmt.cat`, we don't + // know whether this scenario has occurred; but I wanted to show + // how all the types get adjusted.) + match ref_kind { + ty::ImmBorrow => { + // The reference being reborrowed is a sharable ref of + // type `&'a T`. In this case, it doesn't matter where we + // *found* the `&T` pointer, the memory it references will + // be valid and immutable for `'a`. So we can stop here. + // + // (Note that the `borrow_kind` must also be ImmBorrow or + // else the user is borrowed imm memory as mut memory, + // which means they'll get an error downstream in borrowck + // anyhow.) + return None; } - Categorization::Deref(_, _, mc::UnsafePtr(..)) | - Categorization::StaticItem | - Categorization::Upvar(..) | - Categorization::Local(..) | - Categorization::Rvalue(..) => { - // These are all "base cases" with independent lifetimes - // that are not subject to inference - return; + ty::MutBorrow | ty::UniqueImmBorrow => { + // The reference being reborrowed is either an `&mut T` or + // `&uniq T`. This is the case where recursion is needed. + return Some((ref_cmt, new_borrow_kind)); } } } -} -/// This is the most complicated case: the path being borrowed is -/// itself the referent of a borrowed pointer. Let me give an -/// example fragment of code to make clear(er) the situation: -/// -/// let r: &'a mut T = ...; // the original reference "r" has lifetime 'a -/// ... -/// &'z *r // the reborrow has lifetime 'z -/// -/// Now, in this case, our primary job is to add the inference -/// constraint that `'z <= 'a`. Given this setup, let's clarify the -/// parameters in (roughly) terms of the example: -/// -/// A borrow of: `& 'z bk * r` where `r` has type `& 'a bk T` -/// borrow_region ^~ ref_region ^~ -/// borrow_kind ^~ ref_kind ^~ -/// ref_cmt ^ -/// -/// Here `bk` stands for some borrow-kind (e.g., `mut`, `uniq`, etc). -/// -/// Unfortunately, there are some complications beyond the simple -/// scenario I just painted: -/// -/// 1. The reference `r` might in fact be a "by-ref" upvar. In that -/// case, we have two jobs. First, we are inferring whether this reference -/// should be an `&T`, `&mut T`, or `&uniq T` reference, and we must -/// adjust that based on this borrow (e.g., if this is an `&mut` borrow, -/// then `r` must be an `&mut` reference). Second, whenever we link -/// two regions (here, `'z <= 'a`), we supply a *cause*, and in this -/// case we adjust the cause to indicate that the reference being -/// "reborrowed" is itself an upvar. This provides a nicer error message -/// should something go wrong. -/// -/// 2. There may in fact be more levels of reborrowing. In the -/// example, I said the borrow was like `&'z *r`, but it might -/// in fact be a borrow like `&'z **q` where `q` has type `&'a -/// &'b mut T`. In that case, we want to ensure that `'z <= 'a` -/// and `'z <= 'b`. This is explained more below. -/// -/// The return value of this function indicates whether we need to -/// recurse and process `ref_cmt` (see case 2 above). -fn link_reborrowed_region<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, - span: Span, - borrow_region: &ty::Region, - borrow_kind: ty::BorrowKind, - ref_cmt: mc::cmt<'tcx>, - ref_region: ty::Region, - mut ref_kind: ty::BorrowKind, - note: mc::Note) - -> Option<(mc::cmt<'tcx>, ty::BorrowKind)> -{ - // Possible upvar ID we may need later to create an entry in the - // maybe link map. - - // Detect by-ref upvar `x`: - let cause = match note { - mc::NoteUpvarRef(ref upvar_id) => { - let upvar_capture_map = &rcx.fcx.inh.tables.borrow_mut().upvar_capture_map; - match upvar_capture_map.get(upvar_id) { - Some(&ty::UpvarCapture::ByRef(ref upvar_borrow)) => { - // The mutability of the upvar may have been modified - // by the above adjustment, so update our local variable. - ref_kind = upvar_borrow.kind; - - infer::ReborrowUpvar(span, *upvar_id) - } - _ => { - rcx.tcx().sess.span_bug( - span, - &format!("Illegal upvar id: {:?}", - upvar_id)); - } - } - } - mc::NoteClosureEnv(ref upvar_id) => { - // We don't have any mutability changes to propagate, but - // we do want to note that an upvar reborrow caused this - // link - infer::ReborrowUpvar(span, *upvar_id) - } - _ => { - infer::Reborrow(span) - } - }; - - debug!("link_reborrowed_region: {:?} <= {:?}", - borrow_region, - ref_region); - rcx.fcx.mk_subr(cause, *borrow_region, ref_region); - - // If we end up needing to recurse and establish a region link - // with `ref_cmt`, calculate what borrow kind we will end up - // needing. This will be used below. - // - // One interesting twist is that we can weaken the borrow kind - // when we recurse: to reborrow an `&mut` referent as mutable, - // borrowck requires a unique path to the `&mut` reference but not - // necessarily a *mutable* path. - let new_borrow_kind = match borrow_kind { - ty::ImmBorrow => - ty::ImmBorrow, - ty::MutBorrow | ty::UniqueImmBorrow => - ty::UniqueImmBorrow - }; - - // Decide whether we need to recurse and link any regions within - // the `ref_cmt`. This is concerned for the case where the value - // being reborrowed is in fact a borrowed pointer found within - // another borrowed pointer. For example: - // - // let p: &'b &'a mut T = ...; - // ... - // &'z **p - // - // What makes this case particularly tricky is that, if the data - // being borrowed is a `&mut` or `&uniq` borrow, borrowck requires - // not only that `'z <= 'a`, (as before) but also `'z <= 'b` - // (otherwise the user might mutate through the `&mut T` reference - // after `'b` expires and invalidate the borrow we are looking at - // now). - // - // So let's re-examine our parameters in light of this more - // complicated (possible) scenario: - // - // A borrow of: `& 'z bk * * p` where `p` has type `&'b bk & 'a bk T` - // borrow_region ^~ ref_region ^~ - // borrow_kind ^~ ref_kind ^~ - // ref_cmt ^~~ - // - // (Note that since we have not examined `ref_cmt.cat`, we don't - // know whether this scenario has occurred; but I wanted to show - // how all the types get adjusted.) - match ref_kind { - ty::ImmBorrow => { - // The reference being reborrowed is a sharable ref of - // type `&'a T`. In this case, it doesn't matter where we - // *found* the `&T` pointer, the memory it references will - // be valid and immutable for `'a`. So we can stop here. - // - // (Note that the `borrow_kind` must also be ImmBorrow or - // else the user is borrowed imm memory as mut memory, - // which means they'll get an error downstream in borrowck - // anyhow.) - return None; + /// Checks that the values provided for type/region arguments in a given + /// expression are well-formed and in-scope. + fn substs_wf_in_scope(&mut self, + origin: infer::ParameterOrigin, + substs: &Substs<'tcx>, + expr_span: Span, + expr_region: &'tcx ty::Region) { + debug!("substs_wf_in_scope(substs={:?}, \ + expr_region={:?}, \ + origin={:?}, \ + expr_span={:?})", + substs, expr_region, origin, expr_span); + + let origin = infer::ParameterInScope(origin, expr_span); + + for region in substs.regions() { + self.sub_regions(origin.clone(), expr_region, region); } - ty::MutBorrow | ty::UniqueImmBorrow => { - // The reference being reborrowed is either an `&mut T` or - // `&uniq T`. This is the case where recursion is needed. - return Some((ref_cmt, new_borrow_kind)); + for ty in substs.types() { + let ty = self.resolve_type(ty); + self.type_must_outlive(origin.clone(), ty, expr_region); } } -} -/// Checks that the values provided for type/region arguments in a given -/// expression are well-formed and in-scope. -pub fn substs_wf_in_scope<'a,'tcx>(rcx: &mut Rcx<'a,'tcx>, - origin: infer::ParameterOrigin, - substs: &Substs<'tcx>, - expr_span: Span, - expr_region: ty::Region) { - debug!("substs_wf_in_scope(substs={:?}, \ - expr_region={:?}, \ - origin={:?}, \ - expr_span={:?})", - substs, expr_region, origin, expr_span); - - let origin = infer::ParameterInScope(origin, expr_span); - - for ®ion in substs.regions() { - rcx.fcx.mk_subr(origin.clone(), expr_region, region); - } + /// Ensures that type is well-formed in `region`, which implies (among + /// other things) that all borrowed data reachable via `ty` outlives + /// `region`. + pub fn type_must_outlive(&self, + origin: infer::SubregionOrigin<'tcx>, + ty: Ty<'tcx>, + region: &'tcx ty::Region) + { + let ty = self.resolve_type(ty); - for &ty in &substs.types { - let ty = rcx.resolve_type(ty); - type_must_outlive(rcx, origin.clone(), ty, expr_region); - } -} + debug!("type_must_outlive(ty={:?}, region={:?}, origin={:?})", + ty, + region, + origin); -/// Ensures that type is well-formed in `region`, which implies (among -/// other things) that all borrowed data reachable via `ty` outlives -/// `region`. -pub fn type_must_outlive<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, - origin: infer::SubregionOrigin<'tcx>, - ty: Ty<'tcx>, - region: ty::Region) -{ - let ty = rcx.resolve_type(ty); - - debug!("type_must_outlive(ty={:?}, region={:?}, origin={:?})", - ty, - region, - origin); - - assert!(!ty.has_escaping_regions()); - - let components = ty::outlives::components(rcx.infcx(), ty); - components_must_outlive(rcx, origin, components, region); -} + assert!(!ty.has_escaping_regions()); -fn components_must_outlive<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, - origin: infer::SubregionOrigin<'tcx>, - components: Vec>, - region: ty::Region) -{ - for component in components { - let origin = origin.clone(); - match component { - ty::outlives::Component::Region(region1) => { - rcx.fcx.mk_subr(origin, region, region1); - } - ty::outlives::Component::Param(param_ty) => { - param_ty_must_outlive(rcx, origin, region, param_ty); - } - ty::outlives::Component::Projection(projection_ty) => { - projection_must_outlive(rcx, origin, region, projection_ty); - } - ty::outlives::Component::EscapingProjection(subcomponents) => { - components_must_outlive(rcx, origin, subcomponents, region); - } - ty::outlives::Component::UnresolvedInferenceVariable(v) => { - // ignore this, we presume it will yield an error - // later, since if a type variable is not resolved by - // this point it never will be - rcx.tcx().sess.delay_span_bug( - origin.span(), - &format!("unresolved inference variable in outlives: {:?}", v)); + let components = self.tcx.outlives_components(ty); + self.components_must_outlive(origin, components, region); + } + + fn components_must_outlive(&self, + origin: infer::SubregionOrigin<'tcx>, + components: Vec>, + region: &'tcx ty::Region) + { + for component in components { + let origin = origin.clone(); + match component { + ty::outlives::Component::Region(region1) => { + self.sub_regions(origin, region, region1); + } + ty::outlives::Component::Param(param_ty) => { + self.param_ty_must_outlive(origin, region, param_ty); + } + ty::outlives::Component::Projection(projection_ty) => { + self.projection_must_outlive(origin, region, projection_ty); + } + ty::outlives::Component::EscapingProjection(subcomponents) => { + self.components_must_outlive(origin, subcomponents, region); + } + ty::outlives::Component::UnresolvedInferenceVariable(v) => { + // ignore this, we presume it will yield an error + // later, since if a type variable is not resolved by + // this point it never will be + self.tcx.sess.delay_span_bug( + origin.span(), + &format!("unresolved inference variable in outlives: {:?}", v)); + } } } } -} -fn param_ty_must_outlive<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, - origin: infer::SubregionOrigin<'tcx>, - region: ty::Region, - param_ty: ty::ParamTy) { - debug!("param_ty_must_outlive(region={:?}, param_ty={:?}, origin={:?})", - region, param_ty, origin); + fn param_ty_must_outlive(&self, + origin: infer::SubregionOrigin<'tcx>, + region: &'tcx ty::Region, + param_ty: ty::ParamTy) { + debug!("param_ty_must_outlive(region={:?}, param_ty={:?}, origin={:?})", + region, param_ty, origin); - let verify_bound = param_bound(rcx, param_ty); - let generic = GenericKind::Param(param_ty); - rcx.fcx.infcx().verify_generic_bound(origin, generic, region, verify_bound); -} - -fn projection_must_outlive<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, - origin: infer::SubregionOrigin<'tcx>, - region: ty::Region, - projection_ty: ty::ProjectionTy<'tcx>) -{ - debug!("projection_must_outlive(region={:?}, projection_ty={:?}, origin={:?})", - region, projection_ty, origin); - - // This case is thorny for inference. The fundamental problem is - // that there are many cases where we have choice, and inference - // doesn't like choice (the current region inference in - // particular). :) First off, we have to choose between using the - // OutlivesProjectionEnv, OutlivesProjectionTraitDef, and - // OutlivesProjectionComponent rules, any one of which is - // sufficient. If there are no inference variables involved, it's - // not hard to pick the right rule, but if there are, we're in a - // bit of a catch 22: if we picked which rule we were going to - // use, we could add constraints to the region inference graph - // that make it apply, but if we don't add those constraints, the - // rule might not apply (but another rule might). For now, we err - // on the side of adding too few edges into the graph. - - // Compute the bounds we can derive from the environment or trait - // definition. We know that the projection outlives all the - // regions in this list. - let env_bounds = projection_declared_bounds(rcx, origin.span(), projection_ty); - - debug!("projection_must_outlive: env_bounds={:?}", - env_bounds); - - // If we know that the projection outlives 'static, then we're - // done here. - if env_bounds.contains(&ty::ReStatic) { - debug!("projection_must_outlive: 'static as declared bound"); - return; + let verify_bound = self.param_bound(param_ty); + let generic = GenericKind::Param(param_ty); + self.verify_generic_bound(origin, generic, region, verify_bound); } - // If declared bounds list is empty, the only applicable rule is - // OutlivesProjectionComponent. If there are inference variables, - // then, we can break down the outlives into more primitive - // components without adding unnecessary edges. - // - // If there are *no* inference variables, however, we COULD do - // this, but we choose not to, because the error messages are less - // good. For example, a requirement like `T::Item: 'r` would be - // translated to a requirement that `T: 'r`; when this is reported - // to the user, it will thus say "T: 'r must hold so that T::Item: - // 'r holds". But that makes it sound like the only way to fix - // the problem is to add `T: 'r`, which isn't true. So, if there are no - // inference variables, we use a verify constraint instead of adding - // edges, which winds up enforcing the same condition. - let needs_infer = { - projection_ty.trait_ref.substs.types.iter().any(|t| t.needs_infer()) || - projection_ty.trait_ref.substs.regions().iter().any(|r| r.needs_infer()) - }; - if env_bounds.is_empty() && needs_infer { - debug!("projection_must_outlive: no declared bounds"); - - for &component_ty in &projection_ty.trait_ref.substs.types { - type_must_outlive(rcx, origin.clone(), component_ty, region); + fn projection_must_outlive(&self, + origin: infer::SubregionOrigin<'tcx>, + region: &'tcx ty::Region, + projection_ty: ty::ProjectionTy<'tcx>) + { + debug!("projection_must_outlive(region={:?}, projection_ty={:?}, origin={:?})", + region, projection_ty, origin); + + // This case is thorny for inference. The fundamental problem is + // that there are many cases where we have choice, and inference + // doesn't like choice (the current region inference in + // particular). :) First off, we have to choose between using the + // OutlivesProjectionEnv, OutlivesProjectionTraitDef, and + // OutlivesProjectionComponent rules, any one of which is + // sufficient. If there are no inference variables involved, it's + // not hard to pick the right rule, but if there are, we're in a + // bit of a catch 22: if we picked which rule we were going to + // use, we could add constraints to the region inference graph + // that make it apply, but if we don't add those constraints, the + // rule might not apply (but another rule might). For now, we err + // on the side of adding too few edges into the graph. + + // Compute the bounds we can derive from the environment or trait + // definition. We know that the projection outlives all the + // regions in this list. + let env_bounds = self.projection_declared_bounds(origin.span(), projection_ty); + + debug!("projection_must_outlive: env_bounds={:?}", + env_bounds); + + // If we know that the projection outlives 'static, then we're + // done here. + if env_bounds.contains(&&ty::ReStatic) { + debug!("projection_must_outlive: 'static as declared bound"); + return; } - for &r in projection_ty.trait_ref.substs.regions() { - rcx.fcx.mk_subr(origin.clone(), region, r); - } + // If declared bounds list is empty, the only applicable rule is + // OutlivesProjectionComponent. If there are inference variables, + // then, we can break down the outlives into more primitive + // components without adding unnecessary edges. + // + // If there are *no* inference variables, however, we COULD do + // this, but we choose not to, because the error messages are less + // good. For example, a requirement like `T::Item: 'r` would be + // translated to a requirement that `T: 'r`; when this is reported + // to the user, it will thus say "T: 'r must hold so that T::Item: + // 'r holds". But that makes it sound like the only way to fix + // the problem is to add `T: 'r`, which isn't true. So, if there are no + // inference variables, we use a verify constraint instead of adding + // edges, which winds up enforcing the same condition. + let needs_infer = projection_ty.trait_ref.needs_infer(); + if env_bounds.is_empty() && needs_infer { + debug!("projection_must_outlive: no declared bounds"); + + for component_ty in projection_ty.trait_ref.substs.types() { + self.type_must_outlive(origin.clone(), component_ty, region); + } - return; - } + for r in projection_ty.trait_ref.substs.regions() { + self.sub_regions(origin.clone(), region, r); + } - // If we find that there is a unique declared bound `'b`, and this bound - // appears in the trait reference, then the best action is to require that `'b:'r`, - // so do that. This is best no matter what rule we use: - // - // - OutlivesProjectionEnv or OutlivesProjectionTraitDef: these would translate to - // the requirement that `'b:'r` - // - OutlivesProjectionComponent: this would require `'b:'r` in addition to other conditions - if !env_bounds.is_empty() && env_bounds[1..].iter().all(|b| *b == env_bounds[0]) { - let unique_bound = env_bounds[0]; - debug!("projection_must_outlive: unique declared bound = {:?}", unique_bound); - if projection_ty.trait_ref.substs.regions() - .iter() - .any(|r| env_bounds.contains(r)) - { - debug!("projection_must_outlive: unique declared bound appears in trait ref"); - rcx.fcx.mk_subr(origin.clone(), region, unique_bound); return; } - } - // Fallback to verifying after the fact that there exists a - // declared bound, or that all the components appearing in the - // projection outlive; in some cases, this may add insufficient - // edges into the inference graph, leading to inference failures - // even though a satisfactory solution exists. - let verify_bound = projection_bound(rcx, origin.span(), env_bounds, projection_ty); - let generic = GenericKind::Projection(projection_ty); - rcx.fcx.infcx().verify_generic_bound(origin, generic.clone(), region, verify_bound); -} - -fn type_bound<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, span: Span, ty: Ty<'tcx>) -> VerifyBound { - match ty.sty { - ty::TyParam(p) => { - param_bound(rcx, p) - } - ty::TyProjection(data) => { - let declared_bounds = projection_declared_bounds(rcx, span, data); - projection_bound(rcx, span, declared_bounds, data) + // If we find that there is a unique declared bound `'b`, and this bound + // appears in the trait reference, then the best action is to require that `'b:'r`, + // so do that. This is best no matter what rule we use: + // + // - OutlivesProjectionEnv or OutlivesProjectionTraitDef: these would translate to + // the requirement that `'b:'r` + // - OutlivesProjectionComponent: this would require `'b:'r` in addition to + // other conditions + if !env_bounds.is_empty() && env_bounds[1..].iter().all(|b| *b == env_bounds[0]) { + let unique_bound = env_bounds[0]; + debug!("projection_must_outlive: unique declared bound = {:?}", unique_bound); + if projection_ty.trait_ref.substs.regions().any(|r| env_bounds.contains(&r)) { + debug!("projection_must_outlive: unique declared bound appears in trait ref"); + self.sub_regions(origin.clone(), region, unique_bound); + return; + } } - _ => { - recursive_type_bound(rcx, span, ty) + + // Fallback to verifying after the fact that there exists a + // declared bound, or that all the components appearing in the + // projection outlive; in some cases, this may add insufficient + // edges into the inference graph, leading to inference failures + // even though a satisfactory solution exists. + let verify_bound = self.projection_bound(origin.span(), env_bounds, projection_ty); + let generic = GenericKind::Projection(projection_ty); + self.verify_generic_bound(origin, generic.clone(), region, verify_bound); + } + + fn type_bound(&self, span: Span, ty: Ty<'tcx>) -> VerifyBound<'tcx> { + match ty.sty { + ty::TyParam(p) => { + self.param_bound(p) + } + ty::TyProjection(data) => { + let declared_bounds = self.projection_declared_bounds(span, data); + self.projection_bound(span, declared_bounds, data) + } + _ => { + self.recursive_type_bound(span, ty) + } } } -} -fn param_bound<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, param_ty: ty::ParamTy) -> VerifyBound { - let param_env = &rcx.infcx().parameter_environment; + fn param_bound(&self, param_ty: ty::ParamTy) -> VerifyBound<'tcx> { + let param_env = &self.parameter_environment; - debug!("param_bound(param_ty={:?})", - param_ty); + debug!("param_bound(param_ty={:?})", + param_ty); - let mut param_bounds = declared_generic_bounds_from_env(rcx, GenericKind::Param(param_ty)); + let mut param_bounds = self.declared_generic_bounds_from_env(GenericKind::Param(param_ty)); - // Add in the default bound of fn body that applies to all in - // scope type parameters: - param_bounds.push(param_env.implicit_region_bound); + // Add in the default bound of fn body that applies to all in + // scope type parameters: + param_bounds.push(param_env.implicit_region_bound); - VerifyBound::AnyRegion(param_bounds) -} + VerifyBound::AnyRegion(param_bounds) + } -fn projection_declared_bounds<'a, 'tcx>(rcx: &Rcx<'a,'tcx>, - span: Span, - projection_ty: ty::ProjectionTy<'tcx>) - -> Vec -{ - // First assemble bounds from where clauses and traits. + fn projection_declared_bounds(&self, + span: Span, + projection_ty: ty::ProjectionTy<'tcx>) + -> Vec<&'tcx ty::Region> + { + // First assemble bounds from where clauses and traits. - let mut declared_bounds = - declared_generic_bounds_from_env(rcx, GenericKind::Projection(projection_ty)); + let mut declared_bounds = + self.declared_generic_bounds_from_env(GenericKind::Projection(projection_ty)); - declared_bounds.extend_from_slice( - &declared_projection_bounds_from_trait(rcx, span, projection_ty)); + declared_bounds.extend_from_slice( + &self.declared_projection_bounds_from_trait(span, projection_ty)); - declared_bounds -} + declared_bounds + } -fn projection_bound<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, - span: Span, - declared_bounds: Vec, - projection_ty: ty::ProjectionTy<'tcx>) - -> VerifyBound { - debug!("projection_bound(declared_bounds={:?}, projection_ty={:?})", - declared_bounds, projection_ty); + fn projection_bound(&self, + span: Span, + declared_bounds: Vec<&'tcx ty::Region>, + projection_ty: ty::ProjectionTy<'tcx>) + -> VerifyBound<'tcx> { + debug!("projection_bound(declared_bounds={:?}, projection_ty={:?})", + declared_bounds, projection_ty); - // see the extensive comment in projection_must_outlive + // see the extensive comment in projection_must_outlive - let ty = rcx.tcx().mk_projection(projection_ty.trait_ref, projection_ty.item_name); - let recursive_bound = recursive_type_bound(rcx, span, ty); + let ty = self.tcx.mk_projection(projection_ty.trait_ref, projection_ty.item_name); + let recursive_bound = self.recursive_type_bound(span, ty); - VerifyBound::AnyRegion(declared_bounds).or(recursive_bound) -} + VerifyBound::AnyRegion(declared_bounds).or(recursive_bound) + } -fn recursive_type_bound<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, - span: Span, - ty: Ty<'tcx>) - -> VerifyBound { - let mut bounds = vec![]; + fn recursive_type_bound(&self, span: Span, ty: Ty<'tcx>) -> VerifyBound<'tcx> { + let mut bounds = vec![]; - for subty in ty.walk_shallow() { - bounds.push(type_bound(rcx, span, subty)); - } + for subty in ty.walk_shallow() { + bounds.push(self.type_bound(span, subty)); + } - let mut regions = ty.regions(); - regions.retain(|r| !r.is_bound()); // ignore late-bound regions - bounds.push(VerifyBound::AllRegions(regions)); + let mut regions = ty.regions(); + regions.retain(|r| !r.is_bound()); // ignore late-bound regions + bounds.push(VerifyBound::AllRegions(regions)); - // remove bounds that must hold, since they are not interesting - bounds.retain(|b| !b.must_hold()); + // remove bounds that must hold, since they are not interesting + bounds.retain(|b| !b.must_hold()); - if bounds.len() == 1 { - bounds.pop().unwrap() - } else { - VerifyBound::AllBounds(bounds) + if bounds.len() == 1 { + bounds.pop().unwrap() + } else { + VerifyBound::AllBounds(bounds) + } } -} -fn declared_generic_bounds_from_env<'a, 'tcx>(rcx: &Rcx<'a, 'tcx>, - generic: GenericKind<'tcx>) - -> Vec -{ - let param_env = &rcx.infcx().parameter_environment; - - // To start, collect bounds from user: - let mut param_bounds = rcx.tcx().required_region_bounds(generic.to_ty(rcx.tcx()), - param_env.caller_bounds.clone()); - - // Next, collect regions we scraped from the well-formedness - // constraints in the fn signature. To do that, we walk the list - // of known relations from the fn ctxt. - // - // This is crucial because otherwise code like this fails: - // - // fn foo<'a, A>(x: &'a A) { x.bar() } - // - // The problem is that the type of `x` is `&'a A`. To be - // well-formed, then, A must be lower-generic by `'a`, but we - // don't know that this holds from first principles. - for &(r, p) in &rcx.region_bound_pairs { - debug!("generic={:?} p={:?}", - generic, - p); - if generic == p { - param_bounds.push(r); + fn declared_generic_bounds_from_env(&self, generic: GenericKind<'tcx>) + -> Vec<&'tcx ty::Region> + { + let param_env = &self.parameter_environment; + + // To start, collect bounds from user: + let mut param_bounds = self.tcx.required_region_bounds(generic.to_ty(self.tcx), + param_env.caller_bounds.clone()); + + // Next, collect regions we scraped from the well-formedness + // constraints in the fn signature. To do that, we walk the list + // of known relations from the fn ctxt. + // + // This is crucial because otherwise code like this fails: + // + // fn foo<'a, A>(x: &'a A) { x.bar() } + // + // The problem is that the type of `x` is `&'a A`. To be + // well-formed, then, A must be lower-generic by `'a`, but we + // don't know that this holds from first principles. + for &(r, p) in &self.region_bound_pairs { + debug!("generic={:?} p={:?}", + generic, + p); + if generic == p { + param_bounds.push(r); + } } - } - param_bounds -} + param_bounds + } -fn declared_projection_bounds_from_trait<'a,'tcx>(rcx: &Rcx<'a, 'tcx>, - span: Span, - projection_ty: ty::ProjectionTy<'tcx>) - -> Vec -{ - let fcx = rcx.fcx; - let tcx = fcx.tcx(); - let infcx = fcx.infcx(); - - debug!("projection_bounds(projection_ty={:?})", - projection_ty); - - let ty = tcx.mk_projection(projection_ty.trait_ref.clone(), projection_ty.item_name); - - // Say we have a projection `>::SomeType`. We are interested - // in looking for a trait definition like: - // - // ``` - // trait SomeTrait<'a> { - // type SomeType : 'a; - // } - // ``` - // - // we can thus deduce that `>::SomeType : 'a`. - let trait_predicates = tcx.lookup_predicates(projection_ty.trait_ref.def_id); - let predicates = trait_predicates.predicates.as_slice().to_vec(); - traits::elaborate_predicates(tcx, predicates) - .filter_map(|predicate| { - // we're only interesting in `T : 'a` style predicates: - let outlives = match predicate { - ty::Predicate::TypeOutlives(data) => data, - _ => { return None; } - }; + fn declared_projection_bounds_from_trait(&self, + span: Span, + projection_ty: ty::ProjectionTy<'tcx>) + -> Vec<&'tcx ty::Region> + { + debug!("projection_bounds(projection_ty={:?})", + projection_ty); - debug!("projection_bounds: outlives={:?} (1)", - outlives); + let ty = self.tcx.mk_projection(projection_ty.trait_ref.clone(), + projection_ty.item_name); - // apply the substitutions (and normalize any projected types) - let outlives = fcx.instantiate_type_scheme(span, - projection_ty.trait_ref.substs, - &outlives); + // Say we have a projection `>::SomeType`. We are interested + // in looking for a trait definition like: + // + // ``` + // trait SomeTrait<'a> { + // type SomeType : 'a; + // } + // ``` + // + // we can thus deduce that `>::SomeType : 'a`. + let trait_predicates = self.tcx.item_predicates(projection_ty.trait_ref.def_id); + assert_eq!(trait_predicates.parent, None); + let predicates = trait_predicates.predicates.as_slice().to_vec(); + traits::elaborate_predicates(self.tcx, predicates) + .filter_map(|predicate| { + // we're only interesting in `T : 'a` style predicates: + let outlives = match predicate { + ty::Predicate::TypeOutlives(data) => data, + _ => { return None; } + }; - debug!("projection_bounds: outlives={:?} (2)", - outlives); + debug!("projection_bounds: outlives={:?} (1)", + outlives); - let region_result = infcx.commit_if_ok(|_| { - let (outlives, _) = - infcx.replace_late_bound_regions_with_fresh_var( - span, - infer::AssocTypeProjection(projection_ty.item_name), - &outlives); + // apply the substitutions (and normalize any projected types) + let outlives = self.instantiate_type_scheme(span, + projection_ty.trait_ref.substs, + &outlives); - debug!("projection_bounds: outlives={:?} (3)", + debug!("projection_bounds: outlives={:?} (2)", outlives); - // check whether this predicate applies to our current projection - match infer::mk_eqty(infcx, false, TypeOrigin::Misc(span), ty, outlives.0) { - Ok(()) => { Ok(outlives.1) } - Err(_) => { Err(()) } - } - }); + let region_result = self.commit_if_ok(|_| { + let (outlives, _) = + self.replace_late_bound_regions_with_fresh_var( + span, + infer::AssocTypeProjection(projection_ty.item_name), + &outlives); + + debug!("projection_bounds: outlives={:?} (3)", + outlives); + + // check whether this predicate applies to our current projection + let cause = self.fcx.misc(span); + match self.eq_types(false, &cause, ty, outlives.0) { + Ok(ok) => { + self.register_infer_ok_obligations(ok); + Ok(outlives.1) + } + Err(_) => { Err(()) } + } + }); - debug!("projection_bounds: region_result={:?}", - region_result); + debug!("projection_bounds: region_result={:?}", + region_result); - region_result.ok() - }) - .collect() + region_result.ok() + }) + .collect() + } } diff --git a/src/librustc_typeck/check/upvar.rs b/src/librustc_typeck/check/upvar.rs index 0b77935771e44..63d20416bded5 100644 --- a/src/librustc_typeck/check/upvar.rs +++ b/src/librustc_typeck/check/upvar.rs @@ -42,64 +42,50 @@ use super::FnCtxt; -use check::demand; use middle::expr_use_visitor as euv; use middle::mem_categorization as mc; use middle::mem_categorization::Categorization; -use middle::ty::{self, Ty}; -use middle::infer::{InferCtxt, UpvarRegion}; -use std::collections::HashSet; +use rustc::ty::{self, Ty}; +use rustc::infer::UpvarRegion; use syntax::ast; -use syntax::codemap::Span; -use rustc_front::hir; -use rustc_front::intravisit::{self, Visitor}; +use syntax_pos::Span; +use rustc::hir; +use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; +use rustc::util::nodemap::NodeMap; /////////////////////////////////////////////////////////////////////////// // PUBLIC ENTRY POINTS -pub fn closure_analyze_fn(fcx: &FnCtxt, - _id: ast::NodeId, - _decl: &hir::FnDecl, - body: &hir::Block) -{ - let mut seed = SeedBorrowKind::new(fcx); - seed.visit_block(body); - let closures_with_inferred_kinds = seed.closures_with_inferred_kinds; +impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { + pub fn closure_analyze(&self, body: &'gcx hir::Expr) { + let mut seed = SeedBorrowKind::new(self); + seed.visit_expr(body); - let mut adjust = AdjustBorrowKind::new(fcx, &closures_with_inferred_kinds); - adjust.visit_block(body); + let mut adjust = AdjustBorrowKind::new(self, seed.temp_closure_kinds); + adjust.visit_expr(body); - // it's our job to process these. - assert!(fcx.inh.deferred_call_resolutions.borrow().is_empty()); -} - -pub fn closure_analyze_const(fcx: &FnCtxt, - body: &hir::Expr) -{ - let mut seed = SeedBorrowKind::new(fcx); - seed.visit_expr(body); - let closures_with_inferred_kinds = seed.closures_with_inferred_kinds; - - let mut adjust = AdjustBorrowKind::new(fcx, &closures_with_inferred_kinds); - adjust.visit_expr(body); - - // it's our job to process these. - assert!(fcx.inh.deferred_call_resolutions.borrow().is_empty()); + // it's our job to process these. + assert!(self.deferred_call_resolutions.borrow().is_empty()); + } } /////////////////////////////////////////////////////////////////////////// // SEED BORROW KIND -struct SeedBorrowKind<'a,'tcx:'a> { - fcx: &'a FnCtxt<'a,'tcx>, - closures_with_inferred_kinds: HashSet, +struct SeedBorrowKind<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, + temp_closure_kinds: NodeMap, } -impl<'a, 'tcx, 'v> Visitor<'v> for SeedBorrowKind<'a, 'tcx> { - fn visit_expr(&mut self, expr: &hir::Expr) { +impl<'a, 'gcx, 'tcx> Visitor<'gcx> for SeedBorrowKind<'a, 'gcx, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'gcx> { + NestedVisitorMap::OnlyBodies(&self.fcx.tcx.map) + } + + fn visit_expr(&mut self, expr: &'gcx hir::Expr) { match expr.node { - hir::ExprClosure(cc, _, ref body) => { - self.check_closure(expr, cc, &**body); + hir::ExprClosure(cc, _, body_id, _) => { + self.check_closure(expr, cc, body_id); } _ => { } @@ -109,36 +95,26 @@ impl<'a, 'tcx, 'v> Visitor<'v> for SeedBorrowKind<'a, 'tcx> { } } -impl<'a,'tcx> SeedBorrowKind<'a,'tcx> { - fn new(fcx: &'a FnCtxt<'a,'tcx>) -> SeedBorrowKind<'a,'tcx> { - SeedBorrowKind { fcx: fcx, closures_with_inferred_kinds: HashSet::new() } - } - - fn tcx(&self) -> &'a ty::ctxt<'tcx> { - self.fcx.tcx() - } - - fn infcx(&self) -> &'a InferCtxt<'a,'tcx> { - self.fcx.infcx() +impl<'a, 'gcx, 'tcx> SeedBorrowKind<'a, 'gcx, 'tcx> { + fn new(fcx: &'a FnCtxt<'a, 'gcx, 'tcx>) -> SeedBorrowKind<'a, 'gcx, 'tcx> { + SeedBorrowKind { fcx: fcx, temp_closure_kinds: NodeMap() } } fn check_closure(&mut self, expr: &hir::Expr, capture_clause: hir::CaptureClause, - _body: &hir::Block) + _body_id: hir::ExprId) { - let closure_def_id = self.tcx().map.local_def_id(expr.id); - if !self.fcx.inh.tables.borrow().closure_kinds.contains_key(&closure_def_id) { - self.closures_with_inferred_kinds.insert(expr.id); - self.fcx.inh.tables.borrow_mut().closure_kinds - .insert(closure_def_id, ty::FnClosureKind); - debug!("check_closure: adding closure_id={:?} to closures_with_inferred_kinds", - closure_def_id); + let closure_def_id = self.fcx.tcx.map.local_def_id(expr.id); + if !self.fcx.tables.borrow().closure_kinds.contains_key(&closure_def_id) { + self.temp_closure_kinds.insert(expr.id, ty::ClosureKind::Fn); + debug!("check_closure: adding closure {:?} as Fn", expr.id); } - self.tcx().with_freevars(expr.id, |freevars| { + self.fcx.tcx.with_freevars(expr.id, |freevars| { for freevar in freevars { - let var_node_id = freevar.def.var_id(); + let def_id = freevar.def.def_id(); + let var_node_id = self.fcx.tcx.map.as_local_node_id(def_id).unwrap(); let upvar_id = ty::UpvarId { var_id: var_node_id, closure_expr_id: expr.id }; debug!("seed upvar_id {:?}", upvar_id); @@ -149,14 +125,14 @@ impl<'a,'tcx> SeedBorrowKind<'a,'tcx> { } hir::CaptureByRef => { let origin = UpvarRegion(upvar_id, expr.span); - let freevar_region = self.infcx().next_region_var(origin); + let freevar_region = self.fcx.next_region_var(origin); let upvar_borrow = ty::UpvarBorrow { kind: ty::ImmBorrow, region: freevar_region }; ty::UpvarCapture::ByRef(upvar_borrow) } }; - self.fcx.inh.tables.borrow_mut().upvar_capture_map.insert(upvar_id, capture_kind); + self.fcx.tables.borrow_mut().upvar_capture_map.insert(upvar_id, capture_kind); } }); } @@ -165,31 +141,37 @@ impl<'a,'tcx> SeedBorrowKind<'a,'tcx> { /////////////////////////////////////////////////////////////////////////// // ADJUST BORROW KIND -struct AdjustBorrowKind<'a,'tcx:'a> { - fcx: &'a FnCtxt<'a,'tcx>, - closures_with_inferred_kinds: &'a HashSet, +struct AdjustBorrowKind<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, + temp_closure_kinds: NodeMap, } -impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> { - fn new(fcx: &'a FnCtxt<'a,'tcx>, - closures_with_inferred_kinds: &'a HashSet) - -> AdjustBorrowKind<'a,'tcx> { - AdjustBorrowKind { fcx: fcx, closures_with_inferred_kinds: closures_with_inferred_kinds } +impl<'a, 'gcx, 'tcx> AdjustBorrowKind<'a, 'gcx, 'tcx> { + fn new(fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, + temp_closure_kinds: NodeMap) + -> AdjustBorrowKind<'a, 'gcx, 'tcx> { + AdjustBorrowKind { fcx: fcx, temp_closure_kinds: temp_closure_kinds } } fn analyze_closure(&mut self, id: ast::NodeId, span: Span, decl: &hir::FnDecl, - body: &hir::Block) { + body_id: hir::ExprId) { /*! * Analysis starting point. */ - debug!("analyze_closure(id={:?}, body.id={:?})", id, body.id); + debug!("analyze_closure(id={:?}, body.id={:?})", id, body_id); { - let mut euv = euv::ExprUseVisitor::new(self, self.fcx.infcx()); + let body = self.fcx.tcx.map.expr(body_id); + let mut euv = + euv::ExprUseVisitor::with_options(self, + self.fcx, + mc::MemCategorizationOptions { + during_closure_kind_inference: true + }); euv.walk_fn(decl, body); } @@ -206,13 +188,13 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> { // inference algorithm will reject it). // Extract the type variables UV0...UVn. - let closure_substs = match self.fcx.node_ty(id).sty { - ty::TyClosure(_, ref substs) => substs, + let (def_id, closure_substs) = match self.fcx.node_ty(id).sty { + ty::TyClosure(def_id, substs) => (def_id, substs), ref t => { - self.fcx.tcx().sess.span_bug( + span_bug!( span, - &format!("type of closure expr {:?} is not a closure {:?}", - id, t)); + "type of closure expr {:?} is not a closure {:?}", + id, t); } }; @@ -220,14 +202,20 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> { let final_upvar_tys = self.final_upvar_tys(id); debug!("analyze_closure: id={:?} closure_substs={:?} final_upvar_tys={:?}", id, closure_substs, final_upvar_tys); - for (&upvar_ty, final_upvar_ty) in closure_substs.upvar_tys.iter().zip(final_upvar_tys) { - demand::eqtype(self.fcx, span, final_upvar_ty, upvar_ty); + for (upvar_ty, final_upvar_ty) in + closure_substs.upvar_tys(def_id, self.fcx.tcx).zip(final_upvar_tys) + { + self.fcx.demand_eqtype(span, final_upvar_ty, upvar_ty); } - // Now we must process and remove any deferred resolutions, - // since we have a concrete closure kind. - let closure_def_id = self.fcx.tcx().map.local_def_id(id); - if self.closures_with_inferred_kinds.contains(&id) { + // If we are also inferred the closure kind here, update the + // main table and process any deferred resolutions. + let closure_def_id = self.fcx.tcx.map.local_def_id(id); + if let Some(&kind) = self.temp_closure_kinds.get(&id) { + self.fcx.tables.borrow_mut().closure_kinds + .insert(closure_def_id, kind); + debug!("closure_kind({:?}) = {:?}", closure_def_id, kind); + let mut deferred_call_resolutions = self.fcx.remove_deferred_call_resolutions(closure_def_id); for deferred_call_resolution in &mut deferred_call_resolutions { @@ -243,36 +231,35 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> { // local crate or were inlined into it along with some function. // This may change if abstract return types of some sort are // implemented. - let tcx = self.fcx.tcx(); + let tcx = self.fcx.tcx; tcx.with_freevars(closure_id, |freevars| { - freevars.iter() - .map(|freevar| { - let freevar_node_id = freevar.def.var_id(); - let freevar_ty = self.fcx.node_ty(freevar_node_id); - let upvar_id = ty::UpvarId { - var_id: freevar_node_id, - closure_expr_id: closure_id - }; - let capture = self.fcx.infcx().upvar_capture(upvar_id).unwrap(); - - debug!("freevar_node_id={:?} freevar_ty={:?} capture={:?}", - freevar_node_id, freevar_ty, capture); - - match capture { - ty::UpvarCapture::ByValue => freevar_ty, - ty::UpvarCapture::ByRef(borrow) => - tcx.mk_ref(tcx.mk_region(borrow.region), - ty::TypeAndMut { - ty: freevar_ty, - mutbl: borrow.kind.to_mutbl_lossy(), - }), - } - }) - .collect() - }) + freevars.iter().map(|freevar| { + let def_id = freevar.def.def_id(); + let var_id = tcx.map.as_local_node_id(def_id).unwrap(); + let freevar_ty = self.fcx.node_ty(var_id); + let upvar_id = ty::UpvarId { + var_id: var_id, + closure_expr_id: closure_id + }; + let capture = self.fcx.upvar_capture(upvar_id).unwrap(); + + debug!("var_id={:?} freevar_ty={:?} capture={:?}", + var_id, freevar_ty, capture); + + match capture { + ty::UpvarCapture::ByValue => freevar_ty, + ty::UpvarCapture::ByRef(borrow) => + tcx.mk_ref(borrow.region, + ty::TypeAndMut { + ty: freevar_ty, + mutbl: borrow.kind.to_mutbl_lossy(), + }), + } + }).collect() + }) } - fn adjust_upvar_borrow_kind_for_consume(&self, + fn adjust_upvar_borrow_kind_for_consume(&mut self, cmt: mc::cmt<'tcx>, mode: euv::ConsumeMode) { @@ -292,8 +279,8 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> { debug!("adjust_upvar_borrow_kind_for_consume: guarantor={:?}", guarantor); match guarantor.cat { - Categorization::Deref(_, _, mc::BorrowedPtr(..)) | - Categorization::Deref(_, _, mc::Implicit(..)) => { + Categorization::Deref(.., mc::BorrowedPtr(..)) | + Categorization::Deref(.., mc::Implicit(..)) => { match cmt.note { mc::NoteUpvarRef(upvar_id) => { debug!("adjust_upvar_borrow_kind_for_consume: \ @@ -301,10 +288,11 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> { upvar_id); // to move out of an upvar, this must be a FnOnce closure - self.adjust_closure_kind(upvar_id.closure_expr_id, ty::FnOnceClosureKind); + self.adjust_closure_kind(upvar_id.closure_expr_id, + ty::ClosureKind::FnOnce); let upvar_capture_map = - &mut self.fcx.inh.tables.borrow_mut().upvar_capture_map; + &mut self.fcx.tables.borrow_mut().upvar_capture_map; upvar_capture_map.insert(upvar_id, ty::UpvarCapture::ByValue); } mc::NoteClosureEnv(upvar_id) => { @@ -314,7 +302,8 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> { // must still adjust the kind of the closure // to be a FnOnce closure to permit moves out // of the environment. - self.adjust_closure_kind(upvar_id.closure_expr_id, ty::FnOnceClosureKind); + self.adjust_closure_kind(upvar_id.closure_expr_id, + ty::ClosureKind::FnOnce); } mc::NoteNone => { } @@ -351,7 +340,7 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> { } } - Categorization::Deref(_, _, mc::UnsafePtr(..)) | + Categorization::Deref(.., mc::UnsafePtr(..)) | Categorization::StaticItem | Categorization::Rvalue(_) | Categorization::Local(_) | @@ -361,7 +350,7 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> { } } - fn adjust_upvar_borrow_kind_for_unique(&self, cmt: mc::cmt<'tcx>) { + fn adjust_upvar_borrow_kind_for_unique(&mut self, cmt: mc::cmt<'tcx>) { debug!("adjust_upvar_borrow_kind_for_unique(cmt={:?})", cmt); @@ -383,7 +372,7 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> { } } - Categorization::Deref(_, _, mc::UnsafePtr(..)) | + Categorization::Deref(.., mc::UnsafePtr(..)) | Categorization::StaticItem | Categorization::Rvalue(_) | Categorization::Local(_) | @@ -392,7 +381,7 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> { } } - fn try_adjust_upvar_deref(&self, + fn try_adjust_upvar_deref(&mut self, note: &mc::Note, borrow_kind: ty::BorrowKind) -> bool @@ -412,13 +401,13 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> { // borrow_kind of the upvar to make sure it // is inferred to mutable if necessary { - let upvar_capture_map = &mut self.fcx.inh.tables.borrow_mut().upvar_capture_map; + let upvar_capture_map = &mut self.fcx.tables.borrow_mut().upvar_capture_map; let ub = upvar_capture_map.get_mut(&upvar_id).unwrap(); self.adjust_upvar_borrow_kind(upvar_id, ub, borrow_kind); } // also need to be in an FnMut closure since this is not an ImmBorrow - self.adjust_closure_kind(upvar_id.closure_expr_id, ty::FnMutClosureKind); + self.adjust_closure_kind(upvar_id.closure_expr_id, ty::ClosureKind::FnMut); true } @@ -426,7 +415,7 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> { // this kind of deref occurs in a `move` closure, or // for a by-value upvar; in either case, to mutate an // upvar, we need to be an FnMut closure - self.adjust_closure_kind(upvar_id.closure_expr_id, ty::FnMutClosureKind); + self.adjust_closure_kind(upvar_id.closure_expr_id, ty::ClosureKind::FnMut); true } @@ -436,11 +425,12 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> { } } - /// We infer the borrow_kind with which to borrow upvars in a stack closure. The borrow_kind - /// basically follows a lattice of `imm < unique-imm < mut`, moving from left to right as needed - /// (but never right to left). Here the argument `mutbl` is the borrow_kind that is required by + /// We infer the borrow_kind with which to borrow upvars in a stack closure. + /// The borrow_kind basically follows a lattice of `imm < unique-imm < mut`, + /// moving from left to right as needed (but never right to left). + /// Here the argument `mutbl` is the borrow_kind that is required by /// some particular use. - fn adjust_upvar_borrow_kind(&self, + fn adjust_upvar_borrow_kind(&mut self, upvar_id: ty::UpvarId, upvar_capture: &mut ty::UpvarCapture, kind: ty::BorrowKind) { @@ -470,55 +460,53 @@ impl<'a,'tcx> AdjustBorrowKind<'a,'tcx> { } } - fn adjust_closure_kind(&self, + fn adjust_closure_kind(&mut self, closure_id: ast::NodeId, new_kind: ty::ClosureKind) { debug!("adjust_closure_kind(closure_id={}, new_kind={:?})", closure_id, new_kind); - if !self.closures_with_inferred_kinds.contains(&closure_id) { - return; - } - - let closure_def_id = self.fcx.tcx().map.local_def_id(closure_id); - let closure_kinds = &mut self.fcx.inh.tables.borrow_mut().closure_kinds; - let existing_kind = *closure_kinds.get(&closure_def_id).unwrap(); - - debug!("adjust_closure_kind: closure_id={}, existing_kind={:?}, new_kind={:?}", - closure_id, existing_kind, new_kind); + if let Some(&existing_kind) = self.temp_closure_kinds.get(&closure_id) { + debug!("adjust_closure_kind: closure_id={}, existing_kind={:?}, new_kind={:?}", + closure_id, existing_kind, new_kind); - match (existing_kind, new_kind) { - (ty::FnClosureKind, ty::FnClosureKind) | - (ty::FnMutClosureKind, ty::FnClosureKind) | - (ty::FnMutClosureKind, ty::FnMutClosureKind) | - (ty::FnOnceClosureKind, _) => { - // no change needed - } + match (existing_kind, new_kind) { + (ty::ClosureKind::Fn, ty::ClosureKind::Fn) | + (ty::ClosureKind::FnMut, ty::ClosureKind::Fn) | + (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) | + (ty::ClosureKind::FnOnce, _) => { + // no change needed + } - (ty::FnClosureKind, ty::FnMutClosureKind) | - (ty::FnClosureKind, ty::FnOnceClosureKind) | - (ty::FnMutClosureKind, ty::FnOnceClosureKind) => { - // new kind is stronger than the old kind - closure_kinds.insert(closure_def_id, new_kind); + (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) | + (ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) | + (ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => { + // new kind is stronger than the old kind + self.temp_closure_kinds.insert(closure_id, new_kind); + } } } } } -impl<'a, 'tcx, 'v> Visitor<'v> for AdjustBorrowKind<'a, 'tcx> { +impl<'a, 'gcx, 'tcx> Visitor<'gcx> for AdjustBorrowKind<'a, 'gcx, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'gcx> { + NestedVisitorMap::OnlyBodies(&self.fcx.tcx.map) + } + fn visit_fn(&mut self, - fn_kind: intravisit::FnKind<'v>, - decl: &'v hir::FnDecl, - body: &'v hir::Block, + fn_kind: intravisit::FnKind<'gcx>, + decl: &'gcx hir::FnDecl, + body: hir::ExprId, span: Span, id: ast::NodeId) { - intravisit::walk_fn(self, fn_kind, decl, body, span); + intravisit::walk_fn(self, fn_kind, decl, body, span, id); self.analyze_closure(id, span, decl, body); } } -impl<'a,'tcx> euv::Delegate<'tcx> for AdjustBorrowKind<'a,'tcx> { +impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for AdjustBorrowKind<'a, 'gcx, 'tcx> { fn consume(&mut self, _consume_id: ast::NodeId, _consume_span: Span, @@ -548,7 +536,7 @@ impl<'a,'tcx> euv::Delegate<'tcx> for AdjustBorrowKind<'a,'tcx> { borrow_id: ast::NodeId, _borrow_span: Span, cmt: mc::cmt<'tcx>, - _loan_region: ty::Region, + _loan_region: &'tcx ty::Region, bk: ty::BorrowKind, _loan_cause: euv::LoanCause) { diff --git a/src/librustc_typeck/check/wfcheck.rs b/src/librustc_typeck/check/wfcheck.rs index 4f3f716c20d61..7870b3677d0d0 100644 --- a/src/librustc_typeck/check/wfcheck.rs +++ b/src/librustc_typeck/check/wfcheck.rs @@ -8,41 +8,70 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use astconv::AstConv; -use check::{FnCtxt, Inherited, blank_fn_ctxt, regionck}; +use astconv::ExplicitSelf; +use check::FnCtxt; use constrained_type_params::{identify_constrained_type_params, Parameter}; use CrateCtxt; -use middle::def_id::DefId; + +use hir::def_id::DefId; use middle::region::{CodeExtent}; -use middle::subst::{self, TypeSpace, FnSpace, ParamSpace, SelfSpace}; -use middle::traits; -use middle::ty::{self, Ty}; -use middle::ty::fold::{TypeFolder}; +use rustc::traits::{self, ObligationCauseCode}; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::util::nodemap::{FxHashSet, FxHashMap}; +use rustc::middle::lang_items; -use std::cell::RefCell; -use std::collections::HashSet; use syntax::ast; -use syntax::codemap::{Span}; -use syntax::errors::DiagnosticBuilder; -use syntax::parse::token::{special_idents}; -use rustc_front::intravisit::{self, Visitor}; -use rustc_front::hir; +use syntax_pos::Span; +use errors::DiagnosticBuilder; + +use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; +use rustc::hir; pub struct CheckTypeWellFormedVisitor<'ccx, 'tcx:'ccx> { ccx: &'ccx CrateCtxt<'ccx, 'tcx>, - code: traits::ObligationCauseCode<'tcx>, + code: ObligationCauseCode<'tcx>, } -impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> { - pub fn new(ccx: &'ccx CrateCtxt<'ccx, 'tcx>) - -> CheckTypeWellFormedVisitor<'ccx, 'tcx> { +/// Helper type of a temporary returned by .for_item(...). +/// Necessary because we can't write the following bound: +/// F: for<'b, 'tcx> where 'gcx: 'tcx FnOnce(FnCtxt<'b, 'gcx, 'tcx>). +struct CheckWfFcxBuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + inherited: super::InheritedBuilder<'a, 'gcx, 'tcx>, + code: ObligationCauseCode<'gcx>, + id: ast::NodeId, + span: Span +} + +impl<'a, 'gcx, 'tcx> CheckWfFcxBuilder<'a, 'gcx, 'tcx> { + fn with_fcx(&'tcx mut self, f: F) where + F: for<'b> FnOnce(&FnCtxt<'b, 'gcx, 'tcx>, + &mut CheckTypeWellFormedVisitor<'b, 'gcx>) -> Vec> + { + let code = self.code.clone(); + let id = self.id; + let span = self.span; + self.inherited.enter(|inh| { + let fcx = FnCtxt::new(&inh, inh.ccx.tcx.types.never, id); + let wf_tys = f(&fcx, &mut CheckTypeWellFormedVisitor { + ccx: fcx.ccx, + code: code + }); + fcx.select_all_obligations_or_error(); + fcx.regionck_item(id, span, &wf_tys); + }); + } +} + +impl<'ccx, 'gcx> CheckTypeWellFormedVisitor<'ccx, 'gcx> { + pub fn new(ccx: &'ccx CrateCtxt<'ccx, 'gcx>) + -> CheckTypeWellFormedVisitor<'ccx, 'gcx> { CheckTypeWellFormedVisitor { ccx: ccx, - code: traits::ObligationCauseCode::MiscObligation + code: ObligationCauseCode::MiscObligation } } - fn tcx(&self) -> &ty::ctxt<'tcx> { + fn tcx(&self) -> TyCtxt<'ccx, 'gcx, 'gcx> { self.ccx.tcx } @@ -85,22 +114,16 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> { ref trait_ref, ref self_ty, _) => { self.check_impl(item, self_ty, trait_ref); } - hir::ItemImpl(_, hir::ImplPolarity::Negative, _, Some(_), _, _) => { + hir::ItemImpl(_, hir::ImplPolarity::Negative, _, Some(_), ..) => { // FIXME(#27579) what amount of WF checking do we need for neg impls? let trait_ref = ccx.tcx.impl_trait_ref(ccx.tcx.map.local_def_id(item.id)).unwrap(); - ccx.tcx.populate_implementations_for_trait_if_necessary(trait_ref.def_id); - match ccx.tcx.lang_items.to_builtin_kind(trait_ref.def_id) { - Some(ty::BoundSend) | Some(ty::BoundSync) => {} - Some(_) | None => { - if !ccx.tcx.trait_has_default_impl(trait_ref.def_id) { - error_192(ccx, item.span); - } - } + if !ccx.tcx.trait_has_default_impl(trait_ref.def_id) { + error_192(ccx, item.span); } } - hir::ItemFn(_, _, _, _, _, ref body) => { - self.check_item_fn(item, body); + hir::ItemFn(.., body_id) => { + self.check_item_fn(item, body_id); } hir::ItemStatic(..) => { self.check_item_type(item); @@ -109,57 +132,75 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> { self.check_item_type(item); } hir::ItemStruct(ref struct_def, ref ast_generics) => { - self.check_type_defn(item, |fcx| { - vec![struct_variant(fcx, struct_def)] + self.check_type_defn(item, false, |fcx| { + vec![fcx.struct_variant(struct_def)] + }); + + self.check_variances_for_type_defn(item, ast_generics); + } + hir::ItemUnion(ref struct_def, ref ast_generics) => { + self.check_type_defn(item, true, |fcx| { + vec![fcx.struct_variant(struct_def)] }); self.check_variances_for_type_defn(item, ast_generics); } hir::ItemEnum(ref enum_def, ref ast_generics) => { - self.check_type_defn(item, |fcx| { - enum_variants(fcx, enum_def) + self.check_type_defn(item, true, |fcx| { + fcx.enum_variants(enum_def) }); self.check_variances_for_type_defn(item, ast_generics); } - hir::ItemTrait(_, _, _, ref items) => { - self.check_trait(item, items); + hir::ItemTrait(..) => { + self.check_trait(item); } _ => {} } } - fn check_trait_or_impl_item(&mut self, item_id: ast::NodeId, span: Span) { + fn check_trait_or_impl_item(&mut self, + item_id: ast::NodeId, + span: Span, + sig_if_method: Option<&hir::MethodSig>) { let code = self.code.clone(); - self.with_fcx(item_id, span, |fcx, this| { - let free_substs = &fcx.inh.infcx.parameter_environment.free_substs; - let free_id_outlive = fcx.inh.infcx.parameter_environment.free_id_outlive; + self.for_id(item_id, span).with_fcx(|fcx, this| { + let free_substs = &fcx.parameter_environment.free_substs; + let free_id_outlive = fcx.parameter_environment.free_id_outlive; - let item = fcx.tcx().impl_or_trait_item(fcx.tcx().map.local_def_id(item_id)); + let item = fcx.tcx.associated_item(fcx.tcx.map.local_def_id(item_id)); - let (mut implied_bounds, self_ty) = match item.container() { - ty::TraitContainer(_) => (vec![], fcx.tcx().mk_self_type()), - ty::ImplContainer(def_id) => (impl_implied_bounds(fcx, def_id, span), - fcx.tcx().lookup_item_type(def_id).ty) + let (mut implied_bounds, self_ty) = match item.container { + ty::TraitContainer(_) => (vec![], fcx.tcx.mk_self_type()), + ty::ImplContainer(def_id) => (fcx.impl_implied_bounds(def_id, span), + fcx.tcx.item_type(def_id)) }; - match item { - ty::ConstTraitItem(assoc_const) => { - let ty = fcx.instantiate_type_scheme(span, free_substs, &assoc_const.ty); + match item.kind { + ty::AssociatedKind::Const => { + let ty = fcx.tcx.item_type(item.def_id); + let ty = fcx.instantiate_type_scheme(span, free_substs, &ty); fcx.register_wf_obligation(ty, span, code.clone()); } - ty::MethodTraitItem(method) => { - reject_shadowing_type_parameters(fcx.tcx(), span, &method.generics); - let method_ty = fcx.instantiate_type_scheme(span, free_substs, &method.fty); - let predicates = fcx.instantiate_bounds(span, free_substs, &method.predicates); - this.check_fn_or_method(fcx, span, &method_ty, &predicates, + ty::AssociatedKind::Method => { + reject_shadowing_type_parameters(fcx.tcx, item.def_id); + let method_ty = fcx.tcx.item_type(item.def_id); + let method_ty = fcx.instantiate_type_scheme(span, free_substs, &method_ty); + let predicates = fcx.instantiate_bounds(span, item.def_id, free_substs); + let fty = match method_ty.sty { + ty::TyFnDef(_, _, f) => f, + _ => bug!() + }; + this.check_fn_or_method(fcx, span, fty, &predicates, free_id_outlive, &mut implied_bounds); - this.check_method_receiver(fcx, span, &method, + let sig_if_method = sig_if_method.expect("bad signature for method"); + this.check_method_receiver(fcx, sig_if_method, &item, free_id_outlive, self_ty); } - ty::TypeTraitItem(assoc_type) => { - if let Some(ref ty) = assoc_type.ty { - let ty = fcx.instantiate_type_scheme(span, free_substs, ty); + ty::AssociatedKind::Type => { + if item.defaultness.has_value() { + let ty = fcx.tcx.item_type(item.def_id); + let ty = fcx.instantiate_type_scheme(span, free_substs, &ty); fcx.register_wf_obligation(ty, span, code.clone()); } } @@ -169,45 +210,38 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> { }) } - fn with_item_fcx(&mut self, item: &hir::Item, f: F) where - F: for<'fcx> FnMut(&FnCtxt<'fcx, 'tcx>, - &mut CheckTypeWellFormedVisitor<'ccx,'tcx>) -> Vec>, - { - self.with_fcx(item.id, item.span, f) + fn for_item<'tcx>(&self, item: &hir::Item) + -> CheckWfFcxBuilder<'ccx, 'gcx, 'tcx> { + self.for_id(item.id, item.span) } - fn with_fcx(&mut self, id: ast::NodeId, span: Span, mut f: F) where - F: for<'fcx> FnMut(&FnCtxt<'fcx, 'tcx>, - &mut CheckTypeWellFormedVisitor<'ccx,'tcx>) -> Vec>, - { - let ccx = self.ccx; - let param_env = ty::ParameterEnvironment::for_item(ccx.tcx, id); - let tables = RefCell::new(ty::Tables::empty()); - let inh = Inherited::new(ccx.tcx, &tables, param_env); - let fcx = blank_fn_ctxt(ccx, &inh, ty::FnDiverging, id); - let wf_tys = f(&fcx, self); - fcx.select_all_obligations_or_error(); - regionck::regionck_item(&fcx, id, span, &wf_tys); + fn for_id<'tcx>(&self, id: ast::NodeId, span: Span) + -> CheckWfFcxBuilder<'ccx, 'gcx, 'tcx> { + CheckWfFcxBuilder { + inherited: self.ccx.inherited(id), + code: self.code.clone(), + id: id, + span: span + } } /// In a type definition, we check that to ensure that the types of the fields are well-formed. - fn check_type_defn(&mut self, item: &hir::Item, mut lookup_fields: F) where - F: for<'fcx> FnMut(&FnCtxt<'fcx, 'tcx>) -> Vec>, + fn check_type_defn(&mut self, item: &hir::Item, all_sized: bool, mut lookup_fields: F) + where F: for<'fcx, 'tcx> FnMut(&FnCtxt<'fcx, 'gcx, 'tcx>) -> Vec> { - self.with_item_fcx(item, |fcx, this| { + self.for_item(item).with_fcx(|fcx, this| { let variants = lookup_fields(fcx); for variant in &variants { // For DST, all intermediate types must be sized. - if let Some((_, fields)) = variant.fields.split_last() { - for field in fields { - fcx.register_builtin_bound( - field.ty, - ty::BoundSized, - traits::ObligationCause::new(field.span, - fcx.body_id, - traits::FieldSized)); - } + let unsized_len = if all_sized || variant.fields.is_empty() { 0 } else { 1 }; + for field in &variant.fields[..variant.fields.len() - unsized_len] { + fcx.register_bound( + field.ty, + fcx.tcx.require_lang_item(lang_items::SizedTraitLangItem), + traits::ObligationCause::new(field.span, + fcx.body_id, + traits::FieldSized)); } // All field types must be well-formed. @@ -216,31 +250,86 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> { } } - let free_substs = &fcx.inh.infcx.parameter_environment.free_substs; - let predicates = fcx.tcx().lookup_predicates(fcx.tcx().map.local_def_id(item.id)); - let predicates = fcx.instantiate_bounds(item.span, free_substs, &predicates); + let free_substs = &fcx.parameter_environment.free_substs; + let def_id = fcx.tcx.map.local_def_id(item.id); + let predicates = fcx.instantiate_bounds(item.span, def_id, free_substs); this.check_where_clauses(fcx, item.span, &predicates); vec![] // no implied bounds in a struct def'n }); } - fn check_trait(&mut self, - item: &hir::Item, - items: &[hir::TraitItem]) - { - let trait_def_id = self.tcx().map.local_def_id(item.id); + fn check_auto_trait(&mut self, trait_def_id: DefId, span: Span) { + // We want to ensure: + // + // 1) that there are no items contained within + // the trait defintion + // + // 2) that the definition doesn't violate the no-super trait rule + // for auto traits. + // + // 3) that the trait definition does not have any type parameters + + let predicates = self.tcx().item_predicates(trait_def_id); + + // We must exclude the Self : Trait predicate contained by all + // traits. + let has_predicates = + predicates.predicates.iter().any(|predicate| { + match predicate { + &ty::Predicate::Trait(ref poly_trait_ref) => { + let self_ty = poly_trait_ref.0.self_ty(); + !(self_ty.is_self() && poly_trait_ref.def_id() == trait_def_id) + }, + _ => true, + } + }); + + let has_ty_params = self.tcx().item_generics(trait_def_id).types.len() > 1; + + // We use an if-else here, since the generics will also trigger + // an extraneous error message when we find predicates like + // `T : Sized` for a trait like: `trait Magic`. + // + // We also put the check on the number of items here, + // as it seems confusing to report an error about + // extraneous predicates created by things like + // an associated type inside the trait. + let mut err = None; + if !self.tcx().associated_item_def_ids(trait_def_id).is_empty() { + error_380(self.ccx, span); + } else if has_ty_params { + err = Some(struct_span_err!(self.tcx().sess, span, E0567, + "traits with auto impls (`e.g. impl \ + Trait for ..`) can not have type parameters")); + } else if has_predicates { + err = Some(struct_span_err!(self.tcx().sess, span, E0568, + "traits with auto impls (`e.g. impl \ + Trait for ..`) cannot have predicates")); + } - if self.ccx.tcx.trait_has_default_impl(trait_def_id) { - if !items.is_empty() { - error_380(self.ccx, item.span); + // Finally if either of the above conditions apply we should add a note + // indicating that this error is the result of a recent soundness fix. + match err { + None => {}, + Some(mut e) => { + e.note("the new auto trait rules are the result of a \ + recent soundness fix; see #29859 for more details"); + e.emit(); } } + } - self.with_item_fcx(item, |fcx, this| { - let free_substs = &fcx.inh.infcx.parameter_environment.free_substs; - let predicates = fcx.tcx().lookup_predicates(trait_def_id); - let predicates = fcx.instantiate_bounds(item.span, free_substs, &predicates); + fn check_trait(&mut self, item: &hir::Item) { + let trait_def_id = self.tcx().map.local_def_id(item.id); + + if self.tcx().trait_has_default_impl(trait_def_id) { + self.check_auto_trait(trait_def_id, item.span); + } + + self.for_item(item).with_fcx(|fcx, this| { + let free_substs = &fcx.parameter_environment.free_substs; + let predicates = fcx.instantiate_bounds(item.span, trait_def_id, free_substs); this.check_where_clauses(fcx, item.span, &predicates); vec![] }); @@ -248,24 +337,24 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> { fn check_item_fn(&mut self, item: &hir::Item, - body: &hir::Block) + body_id: hir::ExprId) { - self.with_item_fcx(item, |fcx, this| { - let free_substs = &fcx.inh.infcx.parameter_environment.free_substs; - let type_scheme = fcx.tcx().lookup_item_type(fcx.tcx().map.local_def_id(item.id)); - let item_ty = fcx.instantiate_type_scheme(item.span, free_substs, &type_scheme.ty); + self.for_item(item).with_fcx(|fcx, this| { + let free_substs = &fcx.parameter_environment.free_substs; + let def_id = fcx.tcx.map.local_def_id(item.id); + let ty = fcx.tcx.item_type(def_id); + let item_ty = fcx.instantiate_type_scheme(item.span, free_substs, &ty); let bare_fn_ty = match item_ty.sty { - ty::TyBareFn(_, ref bare_fn_ty) => bare_fn_ty, + ty::TyFnDef(.., ref bare_fn_ty) => bare_fn_ty, _ => { - this.tcx().sess.span_bug(item.span, "Fn item without bare fn type"); + span_bug!(item.span, "Fn item without fn type"); } }; - let predicates = fcx.tcx().lookup_predicates(fcx.tcx().map.local_def_id(item.id)); - let predicates = fcx.instantiate_bounds(item.span, free_substs, &predicates); + let predicates = fcx.instantiate_bounds(item.span, def_id, free_substs); let mut implied_bounds = vec![]; - let free_id_outlive = fcx.tcx().region_maps.call_site_extent(item.id, body.id); + let free_id_outlive = fcx.tcx.region_maps.call_site_extent(item.id, body_id.node_id()); this.check_fn_or_method(fcx, item.span, bare_fn_ty, &predicates, free_id_outlive, &mut implied_bounds); implied_bounds @@ -277,14 +366,12 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> { { debug!("check_item_type: {:?}", item); - self.with_item_fcx(item, |fcx, this| { - let type_scheme = fcx.tcx().lookup_item_type(fcx.tcx().map.local_def_id(item.id)); + self.for_item(item).with_fcx(|fcx, this| { + let ty = fcx.tcx.item_type(fcx.tcx.map.local_def_id(item.id)); let item_ty = fcx.instantiate_type_scheme(item.span, - &fcx.inh - .infcx - .parameter_environment + &fcx.parameter_environment .free_substs, - &type_scheme.ty); + &ty); fcx.register_wf_obligation(item_ty, item.span, this.code.clone()); @@ -299,18 +386,18 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> { { debug!("check_impl: {:?}", item); - self.with_item_fcx(item, |fcx, this| { - let free_substs = &fcx.inh.infcx.parameter_environment.free_substs; - let item_def_id = fcx.tcx().map.local_def_id(item.id); + self.for_item(item).with_fcx(|fcx, this| { + let free_substs = &fcx.parameter_environment.free_substs; + let item_def_id = fcx.tcx.map.local_def_id(item.id); match *ast_trait_ref { Some(ref ast_trait_ref) => { - let trait_ref = fcx.tcx().impl_trait_ref(item_def_id).unwrap(); + let trait_ref = fcx.tcx.impl_trait_ref(item_def_id).unwrap(); let trait_ref = fcx.instantiate_type_scheme( ast_trait_ref.path.span, free_substs, &trait_ref); let obligations = - ty::wf::trait_obligations(fcx.infcx(), + ty::wf::trait_obligations(fcx, fcx.body_id, &trait_ref, ast_trait_ref.path.span); @@ -319,29 +406,28 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> { } } None => { - let self_ty = fcx.tcx().node_id_to_type(item.id); + let self_ty = fcx.tcx.item_type(item_def_id); let self_ty = fcx.instantiate_type_scheme(item.span, free_substs, &self_ty); fcx.register_wf_obligation(self_ty, ast_self_ty.span, this.code.clone()); } } - let predicates = fcx.tcx().lookup_predicates(item_def_id); - let predicates = fcx.instantiate_bounds(item.span, free_substs, &predicates); + let predicates = fcx.instantiate_bounds(item.span, item_def_id, free_substs); this.check_where_clauses(fcx, item.span, &predicates); - impl_implied_bounds(fcx, fcx.tcx().map.local_def_id(item.id), item.span) + fcx.impl_implied_bounds(item_def_id, item.span) }); } - fn check_where_clauses<'fcx>(&mut self, - fcx: &FnCtxt<'fcx,'tcx>, - span: Span, - predicates: &ty::InstantiatedPredicates<'tcx>) + fn check_where_clauses<'fcx, 'tcx>(&mut self, + fcx: &FnCtxt<'fcx, 'gcx, 'tcx>, + span: Span, + predicates: &ty::InstantiatedPredicates<'tcx>) { let obligations = predicates.predicates .iter() - .flat_map(|p| ty::wf::predicate_obligations(fcx.infcx(), + .flat_map(|p| ty::wf::predicate_obligations(fcx, fcx.body_id, p, span)); @@ -351,75 +437,75 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> { } } - fn check_fn_or_method<'fcx>(&mut self, - fcx: &FnCtxt<'fcx,'tcx>, - span: Span, - fty: &ty::BareFnTy<'tcx>, - predicates: &ty::InstantiatedPredicates<'tcx>, - free_id_outlive: CodeExtent, - implied_bounds: &mut Vec>) + fn check_fn_or_method<'fcx, 'tcx>(&mut self, + fcx: &FnCtxt<'fcx, 'gcx, 'tcx>, + span: Span, + fty: &'tcx ty::BareFnTy<'tcx>, + predicates: &ty::InstantiatedPredicates<'tcx>, + free_id_outlive: CodeExtent, + implied_bounds: &mut Vec>) { - let free_substs = &fcx.inh.infcx.parameter_environment.free_substs; - let fty = fcx.instantiate_type_scheme(span, free_substs, fty); - let sig = fcx.tcx().liberate_late_bound_regions(free_id_outlive, &fty.sig); + let free_substs = &fcx.parameter_environment.free_substs; + let fty = fcx.instantiate_type_scheme(span, free_substs, &fty); + let sig = fcx.tcx.liberate_late_bound_regions(free_id_outlive, &fty.sig); for &input_ty in &sig.inputs { fcx.register_wf_obligation(input_ty, span, self.code.clone()); } implied_bounds.extend(sig.inputs); - match sig.output { - ty::FnConverging(output) => { - fcx.register_wf_obligation(output, span, self.code.clone()); + fcx.register_wf_obligation(sig.output, span, self.code.clone()); - // FIXME(#25759) return types should not be implied bounds - implied_bounds.push(output); - } - ty::FnDiverging => { } - } + // FIXME(#25759) return types should not be implied bounds + implied_bounds.push(sig.output); self.check_where_clauses(fcx, span, predicates); } - fn check_method_receiver<'fcx>(&mut self, - fcx: &FnCtxt<'fcx,'tcx>, - span: Span, - method: &ty::Method<'tcx>, - free_id_outlive: CodeExtent, - self_ty: ty::Ty<'tcx>) + fn check_method_receiver<'fcx, 'tcx>(&mut self, + fcx: &FnCtxt<'fcx, 'gcx, 'tcx>, + method_sig: &hir::MethodSig, + method: &ty::AssociatedItem, + free_id_outlive: CodeExtent, + self_ty: ty::Ty<'tcx>) { // check that the type of the method's receiver matches the // method's first parameter. + debug!("check_method_receiver({:?}, self_ty={:?})", + method, self_ty); - let free_substs = &fcx.inh.infcx.parameter_environment.free_substs; - let fty = fcx.instantiate_type_scheme(span, free_substs, &method.fty); - let sig = fcx.tcx().liberate_late_bound_regions(free_id_outlive, &fty.sig); + if !method.method_has_self_argument { + return; + } - debug!("check_method_receiver({:?},cat={:?},self_ty={:?},sig={:?})", - method.name, method.explicit_self, self_ty, sig); + let span = method_sig.decl.inputs[0].pat.span; - let rcvr_ty = match method.explicit_self { - ty::ExplicitSelfCategory::Static => return, - ty::ExplicitSelfCategory::ByValue => self_ty, - ty::ExplicitSelfCategory::ByReference(region, mutability) => { - fcx.tcx().mk_ref(fcx.tcx().mk_region(region), ty::TypeAndMut { + let free_substs = &fcx.parameter_environment.free_substs; + let method_ty = fcx.tcx.item_type(method.def_id); + let fty = fcx.instantiate_type_scheme(span, free_substs, &method_ty); + let sig = fcx.tcx.liberate_late_bound_regions(free_id_outlive, &fty.fn_sig()); + + debug!("check_method_receiver: sig={:?}", sig); + + let self_arg_ty = sig.inputs[0]; + let rcvr_ty = match ExplicitSelf::determine(self_ty, self_arg_ty) { + ExplicitSelf::ByValue => self_ty, + ExplicitSelf::ByReference(region, mutbl) => { + fcx.tcx.mk_ref(region, ty::TypeAndMut { ty: self_ty, - mutbl: mutability + mutbl: mutbl }) } - ty::ExplicitSelfCategory::ByBox => fcx.tcx().mk_box(self_ty) + ExplicitSelf::ByBox => fcx.tcx.mk_box(self_ty) }; let rcvr_ty = fcx.instantiate_type_scheme(span, free_substs, &rcvr_ty); - let rcvr_ty = fcx.tcx().liberate_late_bound_regions(free_id_outlive, - &ty::Binder(rcvr_ty)); + let rcvr_ty = fcx.tcx.liberate_late_bound_regions(free_id_outlive, + &ty::Binder(rcvr_ty)); debug!("check_method_receiver: receiver ty = {:?}", rcvr_ty); - let _ = ::require_same_types( - fcx.tcx(), Some(fcx.infcx()), false, span, - sig.inputs[0], rcvr_ty, - || "mismatched method receiver".to_owned() - ); + let cause = fcx.cause(span, ObligationCauseCode::MethodReceiver); + fcx.demand_eqtype_with_origin(&cause, rcvr_ty, self_arg_ty); } fn check_variances_for_type_defn(&self, @@ -427,83 +513,52 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> { ast_generics: &hir::Generics) { let item_def_id = self.tcx().map.local_def_id(item.id); - let ty_predicates = self.tcx().lookup_predicates(item_def_id); + let ty = self.tcx().item_type(item_def_id); + if self.tcx().has_error_field(ty) { + return; + } + + let ty_predicates = self.tcx().item_predicates(item_def_id); + assert_eq!(ty_predicates.parent, None); let variances = self.tcx().item_variances(item_def_id); - let mut constrained_parameters: HashSet<_> = - variances.types - .iter_enumerated() - .filter(|&(_, _, &variance)| variance != ty::Bivariant) - .map(|(space, index, _)| self.param_ty(ast_generics, space, index)) - .map(|p| Parameter::Type(p)) + let mut constrained_parameters: FxHashSet<_> = + variances.iter().enumerate() + .filter(|&(_, &variance)| variance != ty::Bivariant) + .map(|(index, _)| Parameter(index as u32)) .collect(); - identify_constrained_type_params(self.tcx(), - ty_predicates.predicates.as_slice(), + identify_constrained_type_params(ty_predicates.predicates.as_slice(), None, &mut constrained_parameters); - for (space, index, _) in variances.types.iter_enumerated() { - let param_ty = self.param_ty(ast_generics, space, index); - if constrained_parameters.contains(&Parameter::Type(param_ty)) { + for (index, _) in variances.iter().enumerate() { + if constrained_parameters.contains(&Parameter(index as u32)) { continue; } - let span = self.ty_param_span(ast_generics, item, space, index); - self.report_bivariance(span, param_ty.name); - } - for (space, index, &variance) in variances.regions.iter_enumerated() { - if variance != ty::Bivariant { - continue; - } - - assert_eq!(space, TypeSpace); - let span = ast_generics.lifetimes[index].lifetime.span; - let name = ast_generics.lifetimes[index].lifetime.name; + let (span, name) = if index < ast_generics.lifetimes.len() { + (ast_generics.lifetimes[index].lifetime.span, + ast_generics.lifetimes[index].lifetime.name) + } else { + let index = index - ast_generics.lifetimes.len(); + (ast_generics.ty_params[index].span, + ast_generics.ty_params[index].name) + }; self.report_bivariance(span, name); } } - fn param_ty(&self, - ast_generics: &hir::Generics, - space: ParamSpace, - index: usize) - -> ty::ParamTy - { - let name = match space { - TypeSpace => ast_generics.ty_params[index].name, - SelfSpace => special_idents::type_self.name, - FnSpace => self.tcx().sess.bug("Fn space occupied?"), - }; - - ty::ParamTy { space: space, idx: index as u32, name: name } - } - - fn ty_param_span(&self, - ast_generics: &hir::Generics, - item: &hir::Item, - space: ParamSpace, - index: usize) - -> Span - { - match space { - TypeSpace => ast_generics.ty_params[index].span, - SelfSpace => item.span, - FnSpace => self.tcx().sess.span_bug(item.span, "Fn space occupied?"), - } - } - fn report_bivariance(&self, span: Span, param_name: ast::Name) { - let mut err = error_392(self.tcx(), span, param_name); + let mut err = error_392(self.ccx, span, param_name); let suggested_marker_id = self.tcx().lang_items.phantom_data(); match suggested_marker_id { Some(def_id) => { - err.fileline_help( - span, + err.help( &format!("consider removing `{}` or using a marker such as `{}`", param_name, self.tcx().item_path_str(def_id))); @@ -516,20 +571,33 @@ impl<'ccx, 'tcx> CheckTypeWellFormedVisitor<'ccx, 'tcx> { } } -fn reject_shadowing_type_parameters<'tcx>(tcx: &ty::ctxt<'tcx>, - span: Span, - generics: &ty::Generics<'tcx>) { - let impl_params = generics.types.get_slice(subst::TypeSpace).iter() - .map(|tp| tp.name).collect::>(); - - for method_param in generics.types.get_slice(subst::FnSpace) { - if impl_params.contains(&method_param.name) { - error_194(tcx, span, method_param.name); +fn reject_shadowing_type_parameters(tcx: TyCtxt, def_id: DefId) { + let generics = tcx.item_generics(def_id); + let parent = tcx.item_generics(generics.parent.unwrap()); + let impl_params: FxHashMap<_, _> = parent.types + .iter() + .map(|tp| (tp.name, tp.def_id)) + .collect(); + + for method_param in &generics.types { + if impl_params.contains_key(&method_param.name) { + // Tighten up the span to focus on only the shadowing type + let type_span = tcx.def_span(method_param.def_id); + + // The expectation here is that the original trait declaration is + // local so it should be okay to just unwrap everything. + let trait_def_id = impl_params[&method_param.name]; + let trait_decl_span = tcx.def_span(trait_def_id); + error_194(tcx, type_span, trait_decl_span, method_param.name); } } } impl<'ccx, 'tcx, 'v> Visitor<'v> for CheckTypeWellFormedVisitor<'ccx, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> { + NestedVisitorMap::None + } + fn visit_item(&mut self, i: &hir::Item) { debug!("visit_item: {:?}", i); self.check_item_well_formed(i); @@ -538,13 +606,21 @@ impl<'ccx, 'tcx, 'v> Visitor<'v> for CheckTypeWellFormedVisitor<'ccx, 'tcx> { fn visit_trait_item(&mut self, trait_item: &'v hir::TraitItem) { debug!("visit_trait_item: {:?}", trait_item); - self.check_trait_or_impl_item(trait_item.id, trait_item.span); + let method_sig = match trait_item.node { + hir::TraitItem_::MethodTraitItem(ref sig, _) => Some(sig), + _ => None + }; + self.check_trait_or_impl_item(trait_item.id, trait_item.span, method_sig); intravisit::walk_trait_item(self, trait_item) } fn visit_impl_item(&mut self, impl_item: &'v hir::ImplItem) { debug!("visit_impl_item: {:?}", impl_item); - self.check_trait_or_impl_item(impl_item.id, impl_item.span); + let method_sig = match impl_item.node { + hir::ImplItemKind::Method(ref sig, _) => Some(sig), + _ => None + }; + self.check_trait_or_impl_item(impl_item.id, impl_item.span, method_sig); intravisit::walk_impl_item(self, impl_item) } } @@ -561,76 +637,73 @@ struct AdtField<'tcx> { span: Span, } -fn struct_variant<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - struct_def: &hir::VariantData) - -> AdtVariant<'tcx> { - let fields = - struct_def.fields().iter() - .map(|field| { - let field_ty = fcx.tcx().node_id_to_type(field.node.id); - let field_ty = fcx.instantiate_type_scheme(field.span, - &fcx.inh - .infcx - .parameter_environment - .free_substs, - &field_ty); - AdtField { ty: field_ty, span: field.span } - }) - .collect(); - AdtVariant { fields: fields } -} +impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { + fn struct_variant(&self, struct_def: &hir::VariantData) -> AdtVariant<'tcx> { + let fields = + struct_def.fields().iter() + .map(|field| { + let field_ty = self.tcx.item_type(self.tcx.map.local_def_id(field.id)); + let field_ty = self.instantiate_type_scheme(field.span, + &self.parameter_environment + .free_substs, + &field_ty); + AdtField { ty: field_ty, span: field.span } + }) + .collect(); + AdtVariant { fields: fields } + } -fn enum_variants<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>, - enum_def: &hir::EnumDef) - -> Vec> { - enum_def.variants.iter() - .map(|variant| struct_variant(fcx, &variant.node.data)) - .collect() -} + fn enum_variants(&self, enum_def: &hir::EnumDef) -> Vec> { + enum_def.variants.iter() + .map(|variant| self.struct_variant(&variant.node.data)) + .collect() + } -fn impl_implied_bounds<'fcx,'tcx>(fcx: &FnCtxt<'fcx, 'tcx>, - impl_def_id: DefId, - span: Span) - -> Vec> -{ - let free_substs = &fcx.inh.infcx.parameter_environment.free_substs; - match fcx.tcx().impl_trait_ref(impl_def_id) { - Some(ref trait_ref) => { - // Trait impl: take implied bounds from all types that - // appear in the trait reference. - let trait_ref = fcx.instantiate_type_scheme(span, free_substs, trait_ref); - trait_ref.substs.types.as_slice().to_vec() - } + fn impl_implied_bounds(&self, impl_def_id: DefId, span: Span) -> Vec> { + let free_substs = &self.parameter_environment.free_substs; + match self.tcx.impl_trait_ref(impl_def_id) { + Some(ref trait_ref) => { + // Trait impl: take implied bounds from all types that + // appear in the trait reference. + let trait_ref = self.instantiate_type_scheme(span, free_substs, trait_ref); + trait_ref.substs.types().collect() + } - None => { - // Inherent impl: take implied bounds from the self type. - let self_ty = fcx.tcx().lookup_item_type(impl_def_id).ty; - let self_ty = fcx.instantiate_type_scheme(span, free_substs, &self_ty); - vec![self_ty] + None => { + // Inherent impl: take implied bounds from the self type. + let self_ty = self.tcx.item_type(impl_def_id); + let self_ty = self.instantiate_type_scheme(span, free_substs, &self_ty); + vec![self_ty] + } } } } -pub fn error_192<'ccx,'tcx>(ccx: &'ccx CrateCtxt<'ccx, 'tcx>, span: Span) { +fn error_192(ccx: &CrateCtxt, span: Span) { span_err!(ccx.tcx.sess, span, E0192, "negative impls are only allowed for traits with \ default impls (e.g., `Send` and `Sync`)") } -pub fn error_380<'ccx,'tcx>(ccx: &'ccx CrateCtxt<'ccx, 'tcx>, span: Span) { +fn error_380(ccx: &CrateCtxt, span: Span) { span_err!(ccx.tcx.sess, span, E0380, - "traits with default impls (`e.g. unsafe impl \ + "traits with default impls (`e.g. impl \ Trait for ..`) must have no methods or associated items") } -pub fn error_392<'tcx>(tcx: &ty::ctxt<'tcx>, span: Span, param_name: ast::Name) +fn error_392<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, span: Span, param_name: ast::Name) -> DiagnosticBuilder<'tcx> { - struct_span_err!(tcx.sess, span, E0392, - "parameter `{}` is never used", param_name) + let mut err = struct_span_err!(ccx.tcx.sess, span, E0392, + "parameter `{}` is never used", param_name); + err.span_label(span, &format!("unused type parameter")); + err } -pub fn error_194<'tcx>(tcx: &ty::ctxt<'tcx>, span: Span, name: ast::Name) { - span_err!(tcx.sess, span, E0194, +fn error_194(tcx: TyCtxt, span: Span, trait_decl_span: Span, name: ast::Name) { + struct_span_err!(tcx.sess, span, E0194, "type parameter `{}` shadows another type parameter of the same name", - name); + name) + .span_label(span, &format!("shadows another type parameter")) + .span_label(trait_decl_span, &format!("first `{}` declared here", name)) + .emit(); } diff --git a/src/librustc_typeck/check/writeback.rs b/src/librustc_typeck/check/writeback.rs index c2abb074efa13..56de75995fd2e 100644 --- a/src/librustc_typeck/check/writeback.rs +++ b/src/librustc_typeck/check/writeback.rs @@ -13,57 +13,63 @@ // substitutions. use self::ResolveReason::*; -use astconv::AstConv; use check::FnCtxt; -use middle::def_id::DefId; -use middle::pat_util; -use middle::ty::{self, Ty, MethodCall, MethodCallee}; -use middle::ty::adjustment; -use middle::ty::fold::{TypeFolder,TypeFoldable}; -use middle::infer; -use write_substs_to_tcx; -use write_ty_to_tcx; +use hir::def_id::DefId; +use rustc::ty::{self, Ty, TyCtxt, MethodCall, MethodCallee}; +use rustc::ty::adjustment; +use rustc::ty::fold::{TypeFolder,TypeFoldable}; +use rustc::infer::{InferCtxt, FixupError}; +use rustc::util::nodemap::DefIdMap; use std::cell::Cell; use syntax::ast; -use syntax::codemap::{DUMMY_SP, Span}; -use rustc_front::print::pprust::pat_to_string; -use rustc_front::intravisit::{self, Visitor}; -use rustc_front::util as hir_util; -use rustc_front::hir; +use syntax_pos::Span; + +use rustc::hir::print::pat_to_string; +use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; +use rustc::hir::{self, PatKind}; /////////////////////////////////////////////////////////////////////////// // Entry point functions -pub fn resolve_type_vars_in_expr(fcx: &FnCtxt, e: &hir::Expr) { - assert_eq!(fcx.writeback_errors.get(), false); - let mut wbcx = WritebackCx::new(fcx); - wbcx.visit_expr(e); - wbcx.visit_upvar_borrow_map(); - wbcx.visit_closures(); - wbcx.visit_liberated_fn_sigs(); -} +impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { + pub fn resolve_type_vars_in_expr(&self, e: &'gcx hir::Expr, item_id: ast::NodeId) { + assert_eq!(self.writeback_errors.get(), false); + let mut wbcx = WritebackCx::new(self); + wbcx.visit_expr(e); + wbcx.visit_upvar_borrow_map(); + wbcx.visit_closures(); + wbcx.visit_liberated_fn_sigs(); + wbcx.visit_fru_field_types(); + wbcx.visit_deferred_obligations(item_id); + wbcx.visit_type_nodes(); + } -pub fn resolve_type_vars_in_fn(fcx: &FnCtxt, - decl: &hir::FnDecl, - blk: &hir::Block) { - assert_eq!(fcx.writeback_errors.get(), false); - let mut wbcx = WritebackCx::new(fcx); - wbcx.visit_block(blk); - for arg in &decl.inputs { - wbcx.visit_node_id(ResolvingPattern(arg.pat.span), arg.id); - wbcx.visit_pat(&*arg.pat); - - // Privacy needs the type for the whole pattern, not just each binding - if !pat_util::pat_is_binding(&fcx.tcx().def_map.borrow(), &*arg.pat) { - wbcx.visit_node_id(ResolvingPattern(arg.pat.span), - arg.pat.id); + pub fn resolve_type_vars_in_fn(&self, + decl: &'gcx hir::FnDecl, + body: &'gcx hir::Expr, + item_id: ast::NodeId) { + assert_eq!(self.writeback_errors.get(), false); + let mut wbcx = WritebackCx::new(self); + wbcx.visit_expr(body); + for arg in &decl.inputs { + wbcx.visit_node_id(ResolvingPattern(arg.pat.span), arg.id); + wbcx.visit_pat(&arg.pat); + + // Privacy needs the type for the whole pattern, not just each binding + if let PatKind::Binding(..) = arg.pat.node {} else { + wbcx.visit_node_id(ResolvingPattern(arg.pat.span), arg.pat.id); + } } + wbcx.visit_upvar_borrow_map(); + wbcx.visit_closures(); + wbcx.visit_liberated_fn_sigs(); + wbcx.visit_fru_field_types(); + wbcx.visit_anon_types(); + wbcx.visit_deferred_obligations(item_id); + wbcx.visit_type_nodes(); } - wbcx.visit_upvar_borrow_map(); - wbcx.visit_closures(); - wbcx.visit_liberated_fn_sigs(); } /////////////////////////////////////////////////////////////////////////// @@ -74,17 +80,63 @@ pub fn resolve_type_vars_in_fn(fcx: &FnCtxt, // there, it applies a few ad-hoc checks that were not convenient to // do elsewhere. -struct WritebackCx<'cx, 'tcx: 'cx> { - fcx: &'cx FnCtxt<'cx, 'tcx>, +struct WritebackCx<'cx, 'gcx: 'cx+'tcx, 'tcx: 'cx> { + fcx: &'cx FnCtxt<'cx, 'gcx, 'tcx>, + + // Mapping from free regions of the function to the + // early-bound versions of them, visible from the + // outside of the function. This is needed by, and + // only populated if there are any `impl Trait`. + free_to_bound_regions: DefIdMap<&'gcx ty::Region> } -impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { - fn new(fcx: &'cx FnCtxt<'cx, 'tcx>) -> WritebackCx<'cx, 'tcx> { - WritebackCx { fcx: fcx } +impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { + fn new(fcx: &'cx FnCtxt<'cx, 'gcx, 'tcx>) -> WritebackCx<'cx, 'gcx, 'tcx> { + let mut wbcx = WritebackCx { + fcx: fcx, + free_to_bound_regions: DefIdMap() + }; + + // Only build the reverse mapping if `impl Trait` is used. + if fcx.anon_types.borrow().is_empty() { + return wbcx; + } + + let gcx = fcx.tcx.global_tcx(); + let free_substs = fcx.parameter_environment.free_substs; + for (i, k) in free_substs.params().iter().enumerate() { + let r = if let Some(r) = k.as_region() { + r + } else { + continue; + }; + match *r { + ty::ReFree(ty::FreeRegion { + bound_region: ty::BoundRegion::BrNamed(def_id, name, _), .. + }) => { + let bound_region = gcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion { + index: i as u32, + name: name, + })); + wbcx.free_to_bound_regions.insert(def_id, bound_region); + } + _ => { + bug!("{:?} is not a free region for an early-bound lifetime", r); + } + } + } + + wbcx } - fn tcx(&self) -> &'cx ty::ctxt<'tcx> { - self.fcx.tcx() + fn tcx(&self) -> TyCtxt<'cx, 'gcx, 'tcx> { + self.fcx.tcx + } + + fn write_ty_to_tcx(&self, node_id: ast::NodeId, ty: Ty<'gcx>) { + debug!("write_ty_to_tcx({}, {:?})", node_id, ty); + assert!(!ty.needs_infer()); + self.tcx().tables.borrow_mut().node_types.insert(node_id, ty); } // Hacky hack: During type-checking, we treat *all* operators @@ -96,13 +148,13 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { hir::ExprBinary(ref op, ref lhs, ref rhs) | hir::ExprAssignOp(ref op, ref lhs, ref rhs) => { let lhs_ty = self.fcx.node_ty(lhs.id); - let lhs_ty = self.fcx.infcx().resolve_type_vars_if_possible(&lhs_ty); + let lhs_ty = self.fcx.resolve_type_vars_if_possible(&lhs_ty); let rhs_ty = self.fcx.node_ty(rhs.id); - let rhs_ty = self.fcx.infcx().resolve_type_vars_if_possible(&rhs_ty); + let rhs_ty = self.fcx.resolve_type_vars_if_possible(&rhs_ty); if lhs_ty.is_scalar() && rhs_ty.is_scalar() { - self.fcx.inh.tables.borrow_mut().method_map.remove(&MethodCall::expr(e.id)); + self.fcx.tables.borrow_mut().method_map.remove(&MethodCall::expr(e.id)); // weird but true: the by-ref binops put an // adjustment on the lhs but not the rhs; the @@ -110,34 +162,15 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { // system. match e.node { hir::ExprBinary(..) => { - if !hir_util::is_by_value_binop(op.node) { - self.fcx.inh.tables.borrow_mut().adjustments.remove(&lhs.id); + if !op.node.is_by_value() { + self.fcx.tables.borrow_mut().adjustments.remove(&lhs.id); } }, hir::ExprAssignOp(..) => { - self.fcx.inh.tables.borrow_mut().adjustments.remove(&lhs.id); + self.fcx.tables.borrow_mut().adjustments.remove(&lhs.id); }, _ => {}, } - } else { - let tcx = self.tcx(); - - if let hir::ExprAssignOp(_, ref lhs, ref rhs) = e.node { - if - !tcx.sess.features.borrow().augmented_assignments && - !self.fcx.expr_ty(e).references_error() && - !self.fcx.expr_ty(lhs).references_error() && - !self.fcx.expr_ty(rhs).references_error() - { - tcx.sess.struct_span_err(e.span, - "overloaded augmented assignments \ - are not stable") - .fileline_help(e.span, - "add #![feature(augmented_assignments)] to the \ - crate root to enable") - .emit() - } - } } } _ => {}, @@ -153,17 +186,21 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { // below. In general, a function is made into a `visitor` if it must // traffic in node-ids or update tables in the type context etc. -impl<'cx, 'tcx, 'v> Visitor<'v> for WritebackCx<'cx, 'tcx> { - fn visit_stmt(&mut self, s: &hir::Stmt) { +impl<'cx, 'gcx, 'tcx> Visitor<'gcx> for WritebackCx<'cx, 'gcx, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'gcx> { + NestedVisitorMap::OnlyBodies(&self.fcx.tcx.map) + } + + fn visit_stmt(&mut self, s: &'gcx hir::Stmt) { if self.fcx.writeback_errors.get() { return; } - self.visit_node_id(ResolvingExpr(s.span), hir_util::stmt_id(s)); + self.visit_node_id(ResolvingExpr(s.span), s.node.id()); intravisit::walk_stmt(self, s); } - fn visit_expr(&mut self, e: &hir::Expr) { + fn visit_expr(&mut self, e: &'gcx hir::Expr) { if self.fcx.writeback_errors.get() { return; } @@ -174,7 +211,7 @@ impl<'cx, 'tcx, 'v> Visitor<'v> for WritebackCx<'cx, 'tcx> { self.visit_method_map_entry(ResolvingExpr(e.span), MethodCall::expr(e.id)); - if let hir::ExprClosure(_, ref decl, _) = e.node { + if let hir::ExprClosure(_, ref decl, ..) = e.node { for input in &decl.inputs { self.visit_node_id(ResolvingExpr(e.span), input.id); } @@ -183,7 +220,7 @@ impl<'cx, 'tcx, 'v> Visitor<'v> for WritebackCx<'cx, 'tcx> { intravisit::walk_expr(self, e); } - fn visit_block(&mut self, b: &hir::Block) { + fn visit_block(&mut self, b: &'gcx hir::Block) { if self.fcx.writeback_errors.get() { return; } @@ -192,7 +229,7 @@ impl<'cx, 'tcx, 'v> Visitor<'v> for WritebackCx<'cx, 'tcx> { intravisit::walk_block(self, b); } - fn visit_pat(&mut self, p: &hir::Pat) { + fn visit_pat(&mut self, p: &'gcx hir::Pat) { if self.fcx.writeback_errors.get() { return; } @@ -202,27 +239,27 @@ impl<'cx, 'tcx, 'v> Visitor<'v> for WritebackCx<'cx, 'tcx> { debug!("Type for pattern binding {} (id {}) resolved to {:?}", pat_to_string(p), p.id, - self.tcx().node_id_to_type(p.id)); + self.tcx().tables().node_id_to_type(p.id)); intravisit::walk_pat(self, p); } - fn visit_local(&mut self, l: &hir::Local) { + fn visit_local(&mut self, l: &'gcx hir::Local) { if self.fcx.writeback_errors.get() { return; } let var_ty = self.fcx.local_ty(l.span, l.id); let var_ty = self.resolve(&var_ty, ResolvingLocal(l.span)); - write_ty_to_tcx(self.tcx(), l.id, var_ty); + self.write_ty_to_tcx(l.id, var_ty); intravisit::walk_local(self, l); } - fn visit_ty(&mut self, t: &hir::Ty) { + fn visit_ty(&mut self, t: &'gcx hir::Ty) { match t.node { - hir::TyFixedLengthVec(ref ty, ref count_expr) => { - self.visit_ty(&**ty); - write_ty_to_tcx(self.tcx(), count_expr.id, self.tcx().types.usize); + hir::TyArray(ref ty, ref count_expr) => { + self.visit_ty(&ty); + self.write_ty_to_tcx(count_expr.id, self.tcx().types.usize); } hir::TyBareFn(ref function_declaration) => { intravisit::walk_fn_decl_nopat(self, &function_declaration.decl); @@ -233,13 +270,13 @@ impl<'cx, 'tcx, 'v> Visitor<'v> for WritebackCx<'cx, 'tcx> { } } -impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { +impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { fn visit_upvar_borrow_map(&self) { if self.fcx.writeback_errors.get() { return; } - for (upvar_id, upvar_capture) in self.fcx.inh.tables.borrow().upvar_capture_map.iter() { + for (upvar_id, upvar_capture) in self.fcx.tables.borrow().upvar_capture_map.iter() { let new_upvar_capture = match *upvar_capture { ty::UpvarCapture::ByValue => ty::UpvarCapture::ByValue, ty::UpvarCapture::ByRef(ref upvar_borrow) => { @@ -252,11 +289,11 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { debug!("Upvar capture for {:?} resolved to {:?}", upvar_id, new_upvar_capture); - self.fcx.tcx() - .tables - .borrow_mut() - .upvar_capture_map - .insert(*upvar_id, new_upvar_capture); + self.tcx() + .tables + .borrow_mut() + .upvar_capture_map + .insert(*upvar_id, new_upvar_capture); } } @@ -265,63 +302,133 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { return } - for (def_id, closure_ty) in self.fcx.inh.tables.borrow().closure_tys.iter() { + for (def_id, closure_ty) in self.fcx.tables.borrow().closure_tys.iter() { let closure_ty = self.resolve(closure_ty, ResolvingClosure(*def_id)); - self.fcx.tcx().tables.borrow_mut().closure_tys.insert(*def_id, closure_ty); + self.tcx().tables.borrow_mut().closure_tys.insert(*def_id, closure_ty); + } + + for (def_id, &closure_kind) in self.fcx.tables.borrow().closure_kinds.iter() { + self.tcx().tables.borrow_mut().closure_kinds.insert(*def_id, closure_kind); } + } + + fn visit_anon_types(&self) { + if self.fcx.writeback_errors.get() { + return + } + + let gcx = self.tcx().global_tcx(); + for (&def_id, &concrete_ty) in self.fcx.anon_types.borrow().iter() { + let reason = ResolvingAnonTy(def_id); + let inside_ty = self.resolve(&concrete_ty, reason); + + // Convert the type from the function into a type valid outside + // the function, by replacing free regions with early-bound ones. + let outside_ty = gcx.fold_regions(&inside_ty, &mut false, |r, _| { + match *r { + // 'static is valid everywhere. + ty::ReStatic | + ty::ReEmpty => gcx.mk_region(*r), + + // Free regions that come from early-bound regions are valid. + ty::ReFree(ty::FreeRegion { + bound_region: ty::BoundRegion::BrNamed(def_id, ..), .. + }) if self.free_to_bound_regions.contains_key(&def_id) => { + self.free_to_bound_regions[&def_id] + } + + ty::ReFree(_) | + ty::ReEarlyBound(_) | + ty::ReLateBound(..) | + ty::ReScope(_) | + ty::ReSkolemized(..) => { + let span = reason.span(self.tcx()); + span_err!(self.tcx().sess, span, E0564, + "only named lifetimes are allowed in `impl Trait`, \ + but `{}` was found in the type `{}`", r, inside_ty); + gcx.mk_region(ty::ReStatic) + } + + ty::ReVar(_) | + ty::ReErased => { + let span = reason.span(self.tcx()); + span_bug!(span, "invalid region in impl Trait: {:?}", r); + } + } + }); - for (def_id, &closure_kind) in self.fcx.inh.tables.borrow().closure_kinds.iter() { - self.fcx.tcx().tables.borrow_mut().closure_kinds.insert(*def_id, closure_kind); + gcx.item_types.borrow_mut().insert(def_id, outside_ty); } } fn visit_node_id(&self, reason: ResolveReason, id: ast::NodeId) { + // Export associated path extensions. + if let Some(def) = self.fcx.tables.borrow_mut().type_relative_path_defs.remove(&id) { + self.tcx().tables.borrow_mut().type_relative_path_defs.insert(id, def); + } + // Resolve any borrowings for the node with id `id` self.visit_adjustments(reason, id); // Resolve the type of the node with id `id` let n_ty = self.fcx.node_ty(id); let n_ty = self.resolve(&n_ty, reason); - write_ty_to_tcx(self.tcx(), id, n_ty); + self.write_ty_to_tcx(id, n_ty); debug!("Node {} has type {:?}", id, n_ty); // Resolve any substitutions self.fcx.opt_node_ty_substs(id, |item_substs| { - write_substs_to_tcx(self.tcx(), id, - self.resolve(item_substs, reason)); + let item_substs = self.resolve(item_substs, reason); + if !item_substs.is_noop() { + debug!("write_substs_to_tcx({}, {:?})", id, item_substs); + assert!(!item_substs.substs.needs_infer()); + self.tcx().tables.borrow_mut().item_substs.insert(id, item_substs); + } }); } fn visit_adjustments(&self, reason: ResolveReason, id: ast::NodeId) { - let adjustments = self.fcx.inh.tables.borrow_mut().adjustments.remove(&id); + let adjustments = self.fcx.tables.borrow_mut().adjustments.remove(&id); match adjustments { None => { debug!("No adjustments for node {}", id); } Some(adjustment) => { - let resolved_adjustment = match adjustment { - adjustment::AdjustReifyFnPointer => { - adjustment::AdjustReifyFnPointer + let resolved_adjustment = match adjustment.kind { + adjustment::Adjust::NeverToAny => { + adjustment::Adjust::NeverToAny + } + + adjustment::Adjust::ReifyFnPointer => { + adjustment::Adjust::ReifyFnPointer + } + + adjustment::Adjust::MutToConstPointer => { + adjustment::Adjust::MutToConstPointer } - adjustment::AdjustUnsafeFnPointer => { - adjustment::AdjustUnsafeFnPointer + adjustment::Adjust::UnsafeFnPointer => { + adjustment::Adjust::UnsafeFnPointer } - adjustment::AdjustDerefRef(adj) => { - for autoderef in 0..adj.autoderefs { + adjustment::Adjust::DerefRef { autoderefs, autoref, unsize } => { + for autoderef in 0..autoderefs { let method_call = MethodCall::autoderef(id, autoderef as u32); self.visit_method_map_entry(reason, method_call); } - adjustment::AdjustDerefRef(adjustment::AutoDerefRef { - autoderefs: adj.autoderefs, - autoref: self.resolve(&adj.autoref, reason), - unsize: self.resolve(&adj.unsize, reason), - }) + adjustment::Adjust::DerefRef { + autoderefs: autoderefs, + autoref: self.resolve(&autoref, reason), + unsize: unsize, + } } }; + let resolved_adjustment = adjustment::Adjustment { + kind: resolved_adjustment, + target: self.resolve(&adjustment.target, reason) + }; debug!("Adjustments for node {}: {:?}", id, resolved_adjustment); self.tcx().tables.borrow_mut().adjustments.insert( id, resolved_adjustment); @@ -333,7 +440,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { reason: ResolveReason, method_call: MethodCall) { // Resolve any method map entry - let new_method = match self.fcx.inh.tables.borrow_mut().method_map.remove(&method_call) { + let new_method = match self.fcx.tables.borrow_mut().method_map.remove(&method_call) { Some(method) => { debug!("writeback::resolve_method_map_entry(call={:?}, entry={:?})", method_call, @@ -341,7 +448,7 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { let new_method = MethodCallee { def_id: method.def_id, ty: self.resolve(&method.ty, reason), - substs: self.tcx().mk_substs(self.resolve(method.substs, reason)), + substs: self.resolve(&method.substs, reason), }; Some(new_method) @@ -350,32 +457,62 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { }; //NB(jroesch): We need to match twice to avoid a double borrow which would cause an ICE - match new_method { - Some(method) => { - self.tcx().tables.borrow_mut().method_map.insert( - method_call, - method); - } - None => {} + if let Some(method) = new_method { + self.tcx().tables.borrow_mut().method_map.insert(method_call, method); } } fn visit_liberated_fn_sigs(&self) { - for (&node_id, fn_sig) in self.fcx.inh.tables.borrow().liberated_fn_sigs.iter() { + for (&node_id, fn_sig) in self.fcx.tables.borrow().liberated_fn_sigs.iter() { let fn_sig = self.resolve(fn_sig, ResolvingFnSig(node_id)); self.tcx().tables.borrow_mut().liberated_fn_sigs.insert(node_id, fn_sig.clone()); } } - fn resolve>(&self, t: &T, reason: ResolveReason) -> T { - t.fold_with(&mut Resolver::new(self.fcx, reason)) + fn visit_fru_field_types(&self) { + for (&node_id, ftys) in self.fcx.tables.borrow().fru_field_types.iter() { + let ftys = self.resolve(ftys, ResolvingFieldTypes(node_id)); + self.tcx().tables.borrow_mut().fru_field_types.insert(node_id, ftys); + } + } + + fn visit_deferred_obligations(&self, item_id: ast::NodeId) { + let deferred_obligations = self.fcx.deferred_obligations.borrow(); + let obligations: Vec<_> = deferred_obligations.iter().map(|obligation| { + let reason = ResolvingDeferredObligation(obligation.cause.span); + self.resolve(obligation, reason) + }).collect(); + + if !obligations.is_empty() { + assert!(self.fcx.ccx.deferred_obligations.borrow_mut() + .insert(item_id, obligations).is_none()); + } + } + + fn visit_type_nodes(&self) { + for (&id, ty) in self.fcx.ast_ty_to_ty_cache.borrow().iter() { + let ty = self.resolve(ty, ResolvingTyNode(id)); + self.fcx.ccx.ast_ty_to_ty_cache.borrow_mut().insert(id, ty); + } + } + + fn resolve(&self, x: &T, reason: ResolveReason) -> T::Lifted + where T: TypeFoldable<'tcx> + ty::Lift<'gcx> + { + let x = x.fold_with(&mut Resolver::new(self.fcx, reason)); + if let Some(lifted) = self.tcx().lift_to_global(&x) { + lifted + } else { + span_bug!(reason.span(self.tcx()), + "writeback: `{:?}` missing from the global type context", x); + } } } /////////////////////////////////////////////////////////////////////////// // Resolution reason. -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Debug)] enum ResolveReason { ResolvingExpr(Span), ResolvingLocal(Span), @@ -383,10 +520,14 @@ enum ResolveReason { ResolvingUpvar(ty::UpvarId), ResolvingClosure(DefId), ResolvingFnSig(ast::NodeId), + ResolvingFieldTypes(ast::NodeId), + ResolvingAnonTy(DefId), + ResolvingDeferredObligation(Span), + ResolvingTyNode(ast::NodeId), } -impl ResolveReason { - fn span(&self, tcx: &ty::ctxt) -> Span { +impl<'a, 'gcx, 'tcx> ResolveReason { + fn span(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Span { match *self { ResolvingExpr(s) => s, ResolvingLocal(s) => s, @@ -394,16 +535,16 @@ impl ResolveReason { ResolvingUpvar(upvar_id) => { tcx.expr_span(upvar_id.closure_expr_id) } - ResolvingFnSig(id) => { + ResolvingFnSig(id) | + ResolvingFieldTypes(id) | + ResolvingTyNode(id) => { tcx.map.span(id) } - ResolvingClosure(did) => { - if let Some(node_id) = tcx.map.as_local_node_id(did) { - tcx.expr_span(node_id) - } else { - DUMMY_SP - } + ResolvingClosure(did) | + ResolvingAnonTy(did) => { + tcx.def_span(did) } + ResolvingDeferredObligation(span) => span } } } @@ -412,25 +553,25 @@ impl ResolveReason { // The Resolver. This is the type folding engine that detects // unresolved types and so forth. -struct Resolver<'cx, 'tcx: 'cx> { - tcx: &'cx ty::ctxt<'tcx>, - infcx: &'cx infer::InferCtxt<'cx, 'tcx>, +struct Resolver<'cx, 'gcx: 'cx+'tcx, 'tcx: 'cx> { + tcx: TyCtxt<'cx, 'gcx, 'tcx>, + infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, writeback_errors: &'cx Cell, reason: ResolveReason, } -impl<'cx, 'tcx> Resolver<'cx, 'tcx> { - fn new(fcx: &'cx FnCtxt<'cx, 'tcx>, +impl<'cx, 'gcx, 'tcx> Resolver<'cx, 'gcx, 'tcx> { + fn new(fcx: &'cx FnCtxt<'cx, 'gcx, 'tcx>, reason: ResolveReason) - -> Resolver<'cx, 'tcx> + -> Resolver<'cx, 'gcx, 'tcx> { - Resolver::from_infcx(fcx.infcx(), &fcx.writeback_errors, reason) + Resolver::from_infcx(fcx, &fcx.writeback_errors, reason) } - fn from_infcx(infcx: &'cx infer::InferCtxt<'cx, 'tcx>, + fn from_infcx(infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, writeback_errors: &'cx Cell, reason: ResolveReason) - -> Resolver<'cx, 'tcx> + -> Resolver<'cx, 'gcx, 'tcx> { Resolver { infcx: infcx, tcx: infcx.tcx, @@ -438,34 +579,36 @@ impl<'cx, 'tcx> Resolver<'cx, 'tcx> { reason: reason } } - fn report_error(&self, e: infer::FixupError) { + fn report_error(&self, e: FixupError) { self.writeback_errors.set(true); if !self.tcx.sess.has_errors() { match self.reason { ResolvingExpr(span) => { - span_err!(self.tcx.sess, span, E0101, - "cannot determine a type for this expression: {}", - infer::fixup_err_to_string(e)); + struct_span_err!( + self.tcx.sess, span, E0101, + "cannot determine a type for this expression: {}", e) + .span_label(span, &format!("cannot resolve type of expression")) + .emit(); } ResolvingLocal(span) => { - span_err!(self.tcx.sess, span, E0102, - "cannot determine a type for this local variable: {}", - infer::fixup_err_to_string(e)); + struct_span_err!( + self.tcx.sess, span, E0102, + "cannot determine a type for this local variable: {}", e) + .span_label(span, &format!("cannot resolve type of variable")) + .emit(); } ResolvingPattern(span) => { span_err!(self.tcx.sess, span, E0103, - "cannot determine a type for this pattern binding: {}", - infer::fixup_err_to_string(e)); + "cannot determine a type for this pattern binding: {}", e); } ResolvingUpvar(upvar_id) => { let span = self.reason.span(self.tcx); span_err!(self.tcx.sess, span, E0104, "cannot resolve lifetime for captured variable `{}`: {}", - self.tcx.local_var_name_str(upvar_id.var_id).to_string(), - infer::fixup_err_to_string(e)); + self.tcx.local_var_name_str(upvar_id.var_id), e); } ResolvingClosure(_) => { @@ -474,22 +617,32 @@ impl<'cx, 'tcx> Resolver<'cx, 'tcx> { "cannot determine a type for this closure") } - ResolvingFnSig(id) => { + ResolvingFnSig(_) | + ResolvingFieldTypes(_) | + ResolvingDeferredObligation(_) | + ResolvingTyNode(_) => { // any failures here should also fail when // resolving the patterns, closure types, or // something else. let span = self.reason.span(self.tcx); self.tcx.sess.delay_span_bug( span, - &format!("cannot resolve some aspect of fn sig for {:?}", id)); + &format!("cannot resolve some aspect of data for {:?}: {}", + self.reason, e)); + } + + ResolvingAnonTy(_) => { + let span = self.reason.span(self.tcx); + span_err!(self.tcx.sess, span, E0563, + "cannot determine a type for this `impl Trait`: {}", e) } } } } } -impl<'cx, 'tcx> TypeFolder<'tcx> for Resolver<'cx, 'tcx> { - fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { +impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for Resolver<'cx, 'gcx, 'tcx> { + fn tcx<'a>(&'a self) -> TyCtxt<'a, 'gcx, 'tcx> { self.tcx } @@ -505,12 +658,12 @@ impl<'cx, 'tcx> TypeFolder<'tcx> for Resolver<'cx, 'tcx> { } } - fn fold_region(&mut self, r: ty::Region) -> ty::Region { + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { match self.infcx.fully_resolve(&r) { Ok(r) => r, Err(e) => { self.report_error(e); - ty::ReStatic + self.tcx.mk_region(ty::ReStatic) } } } diff --git a/src/librustc_typeck/check_unused.rs b/src/librustc_typeck/check_unused.rs new file mode 100644 index 0000000000000..0034a85f8e29e --- /dev/null +++ b/src/librustc_typeck/check_unused.rs @@ -0,0 +1,61 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use lint; +use rustc::dep_graph::DepNode; +use rustc::ty::TyCtxt; + +use syntax::ast; +use syntax_pos::{Span, DUMMY_SP}; + +use rustc::hir; +use rustc::hir::itemlikevisit::ItemLikeVisitor; + +struct UnusedTraitImportVisitor<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, +} + +impl<'a, 'tcx> UnusedTraitImportVisitor<'a, 'tcx> { + fn check_import(&self, id: ast::NodeId, span: Span) { + if !self.tcx.maybe_unused_trait_imports.contains(&id) { + return; + } + if self.tcx.used_trait_imports.borrow().contains(&id) { + return; + } + + let msg = if let Ok(snippet) = self.tcx.sess.codemap().span_to_snippet(span) { + format!("unused import: `{}`", snippet) + } else { + "unused import".to_string() + }; + self.tcx.sess.add_lint(lint::builtin::UNUSED_IMPORTS, id, span, msg); + } +} + +impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for UnusedTraitImportVisitor<'a, 'tcx> { + fn visit_item(&mut self, item: &hir::Item) { + if item.vis == hir::Public || item.span == DUMMY_SP { + return; + } + if let hir::ItemUse(ref path, _) = item.node { + self.check_import(item.id, path.span); + } + } + + fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) { + } +} + +pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { + let _task = tcx.dep_graph.in_task(DepNode::UnusedTraitCheck); + let mut visitor = UnusedTraitImportVisitor { tcx: tcx }; + tcx.map.krate().visit_all_item_likes(&mut visitor); +} diff --git a/src/librustc_typeck/coherence/mod.rs b/src/librustc_typeck/coherence/mod.rs index 7e63fd47d61e1..f575d4d8bab7a 100644 --- a/src/librustc_typeck/coherence/mod.rs +++ b/src/librustc_typeck/coherence/mod.rs @@ -15,112 +15,85 @@ // done by the orphan and overlap modules. Then we build up various // mappings. That mapping code resides here. - -use middle::def_id::DefId; +use hir::def_id::DefId; use middle::lang_items::UnsizeTraitLangItem; -use middle::subst::{self, Subst}; -use middle::traits; -use middle::ty::{self, TypeFoldable}; -use middle::ty::{ImplOrTraitItemId, ConstTraitItemId}; -use middle::ty::{MethodTraitItemId, TypeTraitItemId, ParameterEnvironment}; -use middle::ty::{Ty, TyBool, TyChar, TyEnum, TyError}; -use middle::ty::{TyParam, TyRawPtr}; -use middle::ty::{TyRef, TyStruct, TyTrait, TyTuple}; -use middle::ty::{TyStr, TyArray, TySlice, TyFloat, TyInfer, TyInt}; -use middle::ty::{TyUint, TyClosure, TyBox, TyBareFn}; -use middle::ty::TyProjection; -use middle::ty::util::CopyImplementationError; +use rustc::ty::subst::Subst; +use rustc::ty::{self, TyCtxt, TypeFoldable}; +use rustc::traits::{self, ObligationCause, Reveal}; +use rustc::ty::ParameterEnvironment; +use rustc::ty::{Ty, TyBool, TyChar, TyError}; +use rustc::ty::{TyParam, TyRawPtr}; +use rustc::ty::{TyRef, TyAdt, TyDynamic, TyNever, TyTuple}; +use rustc::ty::{TyStr, TyArray, TySlice, TyFloat, TyInfer, TyInt}; +use rustc::ty::{TyUint, TyClosure, TyBox, TyFnDef, TyFnPtr}; +use rustc::ty::{TyProjection, TyAnon}; +use rustc::ty::util::CopyImplementationError; use middle::free_region::FreeRegionMap; use CrateCtxt; -use middle::infer::{self, InferCtxt, TypeOrigin, new_infer_ctxt}; -use std::cell::RefCell; -use std::rc::Rc; -use syntax::codemap::Span; -use syntax::parse::token; -use util::nodemap::{DefIdMap, FnvHashMap}; +use rustc::infer::{self, InferCtxt}; +use syntax_pos::Span; use rustc::dep_graph::DepNode; -use rustc::front::map as hir_map; -use rustc_front::intravisit; -use rustc_front::hir::{Item, ItemImpl}; -use rustc_front::hir; +use rustc::hir::map as hir_map; +use rustc::hir::itemlikevisit::ItemLikeVisitor; +use rustc::hir::{Item, ItemImpl}; +use rustc::hir; mod orphan; mod overlap; mod unsafety; -// Returns the def ID of the base type, if there is one. -fn get_base_type_def_id<'a, 'tcx>(inference_context: &InferCtxt<'a, 'tcx>, - span: Span, - ty: Ty<'tcx>) - -> Option { - match ty.sty { - TyEnum(def, _) | - TyStruct(def, _) => { - Some(def.did) - } - - TyTrait(ref t) => { - Some(t.principal_def_id()) - } +struct CoherenceChecker<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { + crate_context: &'a CrateCtxt<'a, 'gcx>, + inference_context: InferCtxt<'a, 'gcx, 'tcx>, +} - TyBox(_) => { - inference_context.tcx.lang_items.owned_box() - } +struct CoherenceCheckVisitor<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { + cc: &'a CoherenceChecker<'a, 'gcx, 'tcx>, +} - TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) | - TyStr | TyArray(..) | TySlice(..) | TyBareFn(..) | TyTuple(..) | - TyParam(..) | TyError | - TyRawPtr(_) | TyRef(_, _) | TyProjection(..) => { - None +impl<'a, 'gcx, 'tcx, 'v> ItemLikeVisitor<'v> for CoherenceCheckVisitor<'a, 'gcx, 'tcx> { + fn visit_item(&mut self, item: &Item) { + if let ItemImpl(..) = item.node { + self.cc.check_implementation(item) } + } - TyInfer(..) | TyClosure(..) => { - // `ty` comes from a user declaration so we should only expect types - // that the user can type - inference_context.tcx.sess.span_bug( - span, - &format!("coherence encountered unexpected type searching for base type: {}", - ty)); - } + fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) { } } -struct CoherenceChecker<'a, 'tcx: 'a> { - crate_context: &'a CrateCtxt<'a, 'tcx>, - inference_context: InferCtxt<'a, 'tcx>, - inherent_impls: RefCell>>>>, -} +impl<'a, 'gcx, 'tcx> CoherenceChecker<'a, 'gcx, 'tcx> { + // Returns the def ID of the base type, if there is one. + fn get_base_type_def_id(&self, span: Span, ty: Ty<'tcx>) -> Option { + match ty.sty { + TyAdt(def, _) => Some(def.did), -struct CoherenceCheckVisitor<'a, 'tcx: 'a> { - cc: &'a CoherenceChecker<'a, 'tcx> -} + TyDynamic(ref t, ..) => t.principal().map(|p| p.def_id()), -impl<'a, 'tcx, 'v> intravisit::Visitor<'v> for CoherenceCheckVisitor<'a, 'tcx> { - fn visit_item(&mut self, item: &Item) { - if let ItemImpl(..) = item.node { - self.cc.check_implementation(item) + TyBox(_) => self.inference_context.tcx.lang_items.owned_box(), + + TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) | TyStr | TyArray(..) | + TySlice(..) | TyFnDef(..) | TyFnPtr(_) | TyTuple(..) | TyParam(..) | TyError | + TyNever | TyRawPtr(_) | TyRef(..) | TyProjection(..) => None, + + TyInfer(..) | TyClosure(..) | TyAnon(..) => { + // `ty` comes from a user declaration so we should only expect types + // that the user can type + span_bug!(span, + "coherence encountered unexpected type searching for base type: {}", + ty); + } } } -} -impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> { fn check(&self) { // Check implementations and traits. This populates the tables // containing the inherent methods and extension methods. It also // builds up the trait inheritance table. - self.crate_context.tcx.visit_all_items_in_krate( + self.crate_context.tcx.visit_all_item_likes_in_krate( DepNode::CoherenceCheckImpl, &mut CoherenceCheckVisitor { cc: self }); - // Copy over the inherent impls we gathered up during the walk into - // the tcx. - let mut tcx_inherent_impls = - self.crate_context.tcx.inherent_impls.borrow_mut(); - for (k, v) in self.inherent_impls.borrow().iter() { - tcx_inherent_impls.insert((*k).clone(), - Rc::new((*v.borrow()).clone())); - } - // Populate the table of destructors. It might seem a bit strange to // do this here, but it's actually the most convenient place, since // the coherence tables contain the trait -> type mappings. @@ -137,13 +110,11 @@ impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> { fn check_implementation(&self, item: &Item) { let tcx = self.crate_context.tcx; let impl_did = tcx.map.local_def_id(item.id); - let self_type = tcx.lookup_item_type(impl_did); + let self_type = tcx.item_type(impl_did); // If there are no traits, then this implementation must have a // base type. - let impl_items = self.create_impl_from_item(item); - if let Some(trait_ref) = self.crate_context.tcx.impl_trait_ref(impl_did) { debug!("(checking implementation) adding impl for trait '{:?}', item '{}'", trait_ref, @@ -162,114 +133,82 @@ impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> { } else { // Skip inherent impls where the self type is an error // type. This occurs with e.g. resolve failures (#30589). - if self_type.ty.references_error() { + if self_type.references_error() { return; } // Add the implementation to the mapping from implementation to base // type def ID, if there is a base type for this implementation and // the implementation does not have any associated traits. - if let Some(base_type_def_id) = get_base_type_def_id( - &self.inference_context, item.span, self_type.ty) { - self.add_inherent_impl(base_type_def_id, impl_did); + if let Some(base_def_id) = self.get_base_type_def_id(item.span, self_type) { + self.add_inherent_impl(base_def_id, impl_did); } } - - tcx.impl_items.borrow_mut().insert(impl_did, impl_items); } fn add_inherent_impl(&self, base_def_id: DefId, impl_def_id: DefId) { - match self.inherent_impls.borrow().get(&base_def_id) { - Some(implementation_list) => { - implementation_list.borrow_mut().push(impl_def_id); - return; - } - None => {} - } - - self.inherent_impls.borrow_mut().insert( - base_def_id, - Rc::new(RefCell::new(vec!(impl_def_id)))); + let tcx = self.crate_context.tcx; + tcx.inherent_impls.borrow_mut().push(base_def_id, impl_def_id); } - fn add_trait_impl(&self, impl_trait_ref: ty::TraitRef<'tcx>, impl_def_id: DefId) { + fn add_trait_impl(&self, impl_trait_ref: ty::TraitRef<'gcx>, impl_def_id: DefId) { debug!("add_trait_impl: impl_trait_ref={:?} impl_def_id={:?}", - impl_trait_ref, impl_def_id); + impl_trait_ref, + impl_def_id); let trait_def = self.crate_context.tcx.lookup_trait_def(impl_trait_ref.def_id); - trait_def.record_impl(self.crate_context.tcx, impl_def_id, impl_trait_ref); - } - - // Converts an implementation in the AST to a vector of items. - fn create_impl_from_item(&self, item: &Item) -> Vec { - match item.node { - ItemImpl(_, _, _, _, _, ref impl_items) => { - impl_items.iter().map(|impl_item| { - let impl_def_id = self.crate_context.tcx.map.local_def_id(impl_item.id); - match impl_item.node { - hir::ImplItemKind::Const(..) => { - ConstTraitItemId(impl_def_id) - } - hir::ImplItemKind::Method(..) => { - MethodTraitItemId(impl_def_id) - } - hir::ImplItemKind::Type(_) => { - TypeTraitItemId(impl_def_id) - } - } - }).collect() - } - _ => { - self.crate_context.tcx.sess.span_bug(item.span, - "can't convert a non-impl \ - to an impl"); - } - } + trait_def.record_local_impl(self.crate_context.tcx, impl_def_id, impl_trait_ref); } - // // Destructors // fn populate_destructors(&self) { let tcx = self.crate_context.tcx; let drop_trait = match tcx.lang_items.drop_trait() { - Some(id) => id, None => { return } + Some(id) => id, + None => return, }; tcx.populate_implementations_for_trait_if_necessary(drop_trait); let drop_trait = tcx.lookup_trait_def(drop_trait); - let impl_items = tcx.impl_items.borrow(); - drop_trait.for_each_impl(tcx, |impl_did| { - let items = impl_items.get(&impl_did).unwrap(); + let items = tcx.associated_item_def_ids(impl_did); if items.is_empty() { // We'll error out later. For now, just don't ICE. return; } let method_def_id = items[0]; - let self_type = tcx.lookup_item_type(impl_did); - match self_type.ty.sty { - ty::TyEnum(type_def, _) | - ty::TyStruct(type_def, _) => { - type_def.set_destructor(method_def_id.def_id()); + let self_type = tcx.item_type(impl_did); + match self_type.sty { + ty::TyAdt(type_def, _) => { + type_def.set_destructor(method_def_id); } _ => { // Destructors only work on nominal types. if let Some(impl_node_id) = tcx.map.as_local_node_id(impl_did) { match tcx.map.find(impl_node_id) { Some(hir_map::NodeItem(item)) => { - span_err!(tcx.sess, item.span, E0120, - "the Drop trait may only be implemented on structures"); + let span = match item.node { + ItemImpl(.., ref ty, _) => ty.span, + _ => item.span, + }; + struct_span_err!(tcx.sess, + span, + E0120, + "the Drop trait may only be implemented on \ + structures") + .span_label(span, + &format!("implementing Drop requires a struct")) + .emit(); } _ => { - tcx.sess.bug("didn't find impl in ast \ - map"); + bug!("didn't find impl in ast map"); } } } else { - tcx.sess.bug("found external impl of Drop trait on \ - something other than a struct"); + bug!("found external impl of Drop trait on \ + something other than a struct"); } } } @@ -287,55 +226,77 @@ impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> { let copy_trait = tcx.lookup_trait_def(copy_trait); copy_trait.for_each_impl(tcx, |impl_did| { - debug!("check_implementations_of_copy: impl_did={:?}", - impl_did); + debug!("check_implementations_of_copy: impl_did={:?}", impl_did); let impl_node_id = if let Some(n) = tcx.map.as_local_node_id(impl_did) { n } else { debug!("check_implementations_of_copy(): impl not in this \ crate"); - return + return; }; - let self_type = tcx.lookup_item_type(impl_did); + let self_type = tcx.item_type(impl_did); debug!("check_implementations_of_copy: self_type={:?} (bound)", self_type); let span = tcx.map.span(impl_node_id); let param_env = ParameterEnvironment::for_item(tcx, impl_node_id); - let self_type = self_type.ty.subst(tcx, ¶m_env.free_substs); + let self_type = self_type.subst(tcx, ¶m_env.free_substs); assert!(!self_type.has_escaping_regions()); debug!("check_implementations_of_copy: self_type={:?} (free)", self_type); - match param_env.can_type_implement_copy(self_type, span) { + match param_env.can_type_implement_copy(tcx, self_type, span) { Ok(()) => {} Err(CopyImplementationError::InfrigingField(name)) => { - span_err!(tcx.sess, span, E0204, - "the trait `Copy` may not be \ - implemented for this type; field \ - `{}` does not implement `Copy`", - name) + struct_span_err!(tcx.sess, + span, + E0204, + "the trait `Copy` may not be implemented for this type") + .span_label(span, &format!("field `{}` does not implement `Copy`", name)) + .emit() } Err(CopyImplementationError::InfrigingVariant(name)) => { - span_err!(tcx.sess, span, E0205, - "the trait `Copy` may not be \ - implemented for this type; variant \ - `{}` does not implement `Copy`", - name) + let item = tcx.map.expect_item(impl_node_id); + let span = if let ItemImpl(.., Some(ref tr), _, _) = item.node { + tr.path.span + } else { + span + }; + + struct_span_err!(tcx.sess, + span, + E0205, + "the trait `Copy` may not be implemented for this type") + .span_label(span, + &format!("variant `{}` does not implement `Copy`", name)) + .emit() } Err(CopyImplementationError::NotAnAdt) => { - span_err!(tcx.sess, span, E0206, - "the trait `Copy` may not be implemented \ - for this type; type is not a structure or \ - enumeration") + let item = tcx.map.expect_item(impl_node_id); + let span = if let ItemImpl(.., ref ty, _) = item.node { + ty.span + } else { + span + }; + + struct_span_err!(tcx.sess, + span, + E0206, + "the trait `Copy` may not be implemented for this type") + .span_label(span, &format!("type is not a structure or enumeration")) + .emit(); } Err(CopyImplementationError::HasDestructor) => { - span_err!(tcx.sess, span, E0184, - "the trait `Copy` may not be implemented for this type; \ - the type has a destructor"); + struct_span_err!(tcx.sess, + span, + E0184, + "the trait `Copy` may not be implemented for this type; the \ + type has a destructor") + .span_label(span, &format!("Copy not allowed on types with destructors")) + .emit(); } } }); @@ -369,11 +330,12 @@ impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> { return; }; - let source = tcx.lookup_item_type(impl_did).ty; + let source = tcx.item_type(impl_did); let trait_ref = self.crate_context.tcx.impl_trait_ref(impl_did).unwrap(); - let target = *trait_ref.substs.types.get(subst::TypeSpace, 0); + let target = trait_ref.substs.type_at(1); debug!("check_implementations_of_coerce_unsized: {:?} -> {:?} (bound)", - source, target); + source, + target); let span = tcx.map.span(impl_node_id); let param_env = ParameterEnvironment::for_item(tcx, impl_node_id); @@ -382,127 +344,158 @@ impl<'a, 'tcx> CoherenceChecker<'a, 'tcx> { assert!(!source.has_escaping_regions()); debug!("check_implementations_of_coerce_unsized: {:?} -> {:?} (free)", - source, target); - - let infcx = new_infer_ctxt(tcx, &tcx.tables, Some(param_env)); + source, + target); + + tcx.infer_ctxt(None, Some(param_env), Reveal::ExactMatch).enter(|infcx| { + let cause = ObligationCause::misc(span, impl_node_id); + let check_mutbl = |mt_a: ty::TypeAndMut<'gcx>, + mt_b: ty::TypeAndMut<'gcx>, + mk_ptr: &Fn(Ty<'gcx>) -> Ty<'gcx>| { + if (mt_a.mutbl, mt_b.mutbl) == (hir::MutImmutable, hir::MutMutable) { + infcx.report_mismatched_types(&cause, + mk_ptr(mt_b.ty), + target, + ty::error::TypeError::Mutability); + } + (mt_a.ty, mt_b.ty, unsize_trait, None) + }; + let (source, target, trait_def_id, kind) = match (&source.sty, &target.sty) { + (&ty::TyBox(a), &ty::TyBox(b)) => (a, b, unsize_trait, None), + + (&ty::TyRef(r_a, mt_a), &ty::TyRef(r_b, mt_b)) => { + infcx.sub_regions(infer::RelateObjectBound(span), r_b, r_a); + check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ref(r_b, ty)) + } - let check_mutbl = |mt_a: ty::TypeAndMut<'tcx>, mt_b: ty::TypeAndMut<'tcx>, - mk_ptr: &Fn(Ty<'tcx>) -> Ty<'tcx>| { - if (mt_a.mutbl, mt_b.mutbl) == (hir::MutImmutable, hir::MutMutable) { - infcx.report_mismatched_types(span, mk_ptr(mt_b.ty), - target, &ty::error::TypeError::Mutability); - } - (mt_a.ty, mt_b.ty, unsize_trait, None) - }; - let (source, target, trait_def_id, kind) = match (&source.sty, &target.sty) { - (&ty::TyBox(a), &ty::TyBox(b)) => (a, b, unsize_trait, None), + (&ty::TyRef(_, mt_a), &ty::TyRawPtr(mt_b)) | + (&ty::TyRawPtr(mt_a), &ty::TyRawPtr(mt_b)) => { + check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ptr(ty)) + } - (&ty::TyRef(r_a, mt_a), &ty::TyRef(r_b, mt_b)) => { - infer::mk_subr(&infcx, infer::RelateObjectBound(span), *r_b, *r_a); - check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ref(r_b, ty)) - } + (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) + if def_a.is_struct() && def_b.is_struct() => { + if def_a != def_b { + let source_path = tcx.item_path_str(def_a.did); + let target_path = tcx.item_path_str(def_b.did); + span_err!(tcx.sess, + span, + E0377, + "the trait `CoerceUnsized` may only be implemented \ + for a coercion between structures with the same \ + definition; expected {}, found {}", + source_path, + target_path); + return; + } - (&ty::TyRef(_, mt_a), &ty::TyRawPtr(mt_b)) | - (&ty::TyRawPtr(mt_a), &ty::TyRawPtr(mt_b)) => { - check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ptr(ty)) - } + let fields = &def_a.struct_variant().fields; + let diff_fields = fields.iter() + .enumerate() + .filter_map(|(i, f)| { + let (a, b) = (f.ty(tcx, substs_a), f.ty(tcx, substs_b)); + + if tcx.item_type(f.did).is_phantom_data() { + // Ignore PhantomData fields + return None; + } + + // Ignore fields that aren't significantly changed + if let Ok(ok) = infcx.sub_types(false, &cause, b, a) { + if ok.obligations.is_empty() { + return None; + } + } + + // Collect up all fields that were significantly changed + // i.e. those that contain T in coerce_unsized T -> U + Some((i, a, b)) + }) + .collect::>(); + + if diff_fields.is_empty() { + span_err!(tcx.sess, + span, + E0374, + "the trait `CoerceUnsized` may only be implemented \ + for a coercion between structures with one field \ + being coerced, none found"); + return; + } else if diff_fields.len() > 1 { + let item = tcx.map.expect_item(impl_node_id); + let span = if let ItemImpl(.., Some(ref t), _, _) = item.node { + t.path.span + } else { + tcx.map.span(impl_node_id) + }; + + let mut err = struct_span_err!(tcx.sess, + span, + E0375, + "implementing the trait \ + `CoerceUnsized` requires multiple \ + coercions"); + err.note("`CoerceUnsized` may only be implemented for \ + a coercion between structures with one field being coerced"); + err.note(&format!("currently, {} fields need coercions: {}", + diff_fields.len(), + diff_fields.iter() + .map(|&(i, a, b)| { + format!("{} ({} to {})", fields[i].name, a, b) + }) + .collect::>() + .join(", "))); + err.span_label(span, &format!("requires multiple coercions")); + err.emit(); + return; + } - (&ty::TyStruct(def_a, substs_a), &ty::TyStruct(def_b, substs_b)) => { - if def_a != def_b { - let source_path = tcx.item_path_str(def_a.did); - let target_path = tcx.item_path_str(def_b.did); - span_err!(tcx.sess, span, E0377, - "the trait `CoerceUnsized` may only be implemented \ - for a coercion between structures with the same \ - definition; expected {}, found {}", - source_path, target_path); - return; + let (i, a, b) = diff_fields[0]; + let kind = ty::adjustment::CustomCoerceUnsized::Struct(i); + (a, b, coerce_unsized_trait, Some(kind)) } - let origin = TypeOrigin::Misc(span); - let fields = &def_a.struct_variant().fields; - let diff_fields = fields.iter().enumerate().filter_map(|(i, f)| { - let (a, b) = (f.ty(tcx, substs_a), f.ty(tcx, substs_b)); - - if f.unsubst_ty().is_phantom_data() { - // Ignore PhantomData fields - None - } else if infcx.sub_types(false, origin, b, a).is_ok() { - // Ignore fields that aren't significantly changed - None - } else { - // Collect up all fields that were significantly changed - // i.e. those that contain T in coerce_unsized T -> U - Some((i, a, b)) - } - }).collect::>(); - - if diff_fields.is_empty() { - span_err!(tcx.sess, span, E0374, + _ => { + span_err!(tcx.sess, + span, + E0376, "the trait `CoerceUnsized` may only be implemented \ - for a coercion between structures with one field \ - being coerced, none found"); - return; - } else if diff_fields.len() > 1 { - span_err!(tcx.sess, span, E0375, - "the trait `CoerceUnsized` may only be implemented \ - for a coercion between structures with one field \ - being coerced, but {} fields need coercions: {}", - diff_fields.len(), diff_fields.iter().map(|&(i, a, b)| { - let name = fields[i].name; - format!("{} ({} to {})", - if name == token::special_names::unnamed_field { - i.to_string() - } else { - name.to_string() - }, a, b) - }).collect::>().join(", ")); + for a coercion between structures"); return; } + }; - let (i, a, b) = diff_fields[0]; - let kind = ty::adjustment::CustomCoerceUnsized::Struct(i); - (a, b, coerce_unsized_trait, Some(kind)) - } - - _ => { - span_err!(tcx.sess, span, E0376, - "the trait `CoerceUnsized` may only be implemented \ - for a coercion between structures"); - return; - } - }; + let mut fulfill_cx = traits::FulfillmentContext::new(); - let mut fulfill_cx = infcx.fulfillment_cx.borrow_mut(); + // Register an obligation for `A: Trait`. + let cause = traits::ObligationCause::misc(span, impl_node_id); + let predicate = + tcx.predicate_for_trait_def(cause, trait_def_id, 0, source, &[target]); + fulfill_cx.register_predicate_obligation(&infcx, predicate); - // Register an obligation for `A: Trait`. - let cause = traits::ObligationCause::misc(span, impl_node_id); - let predicate = traits::predicate_for_trait_def(tcx, cause, trait_def_id, - 0, source, vec![target]); - fulfill_cx.register_predicate_obligation(&infcx, predicate); - - // Check that all transitive obligations are satisfied. - if let Err(errors) = fulfill_cx.select_all_or_error(&infcx) { - traits::report_fulfillment_errors(&infcx, &errors); - } + // Check that all transitive obligations are satisfied. + if let Err(errors) = fulfill_cx.select_all_or_error(&infcx) { + infcx.report_fulfillment_errors(&errors); + } - // Finally, resolve all regions. - let mut free_regions = FreeRegionMap::new(); - free_regions.relate_free_regions_from_predicates(tcx, &infcx.parameter_environment - .caller_bounds); - infcx.resolve_regions_and_report_errors(&free_regions, impl_node_id); + // Finally, resolve all regions. + let mut free_regions = FreeRegionMap::new(); + free_regions.relate_free_regions_from_predicates(&infcx.parameter_environment + .caller_bounds); + infcx.resolve_regions_and_report_errors(&free_regions, impl_node_id); - if let Some(kind) = kind { - tcx.custom_coerce_unsized_kinds.borrow_mut().insert(impl_did, kind); - } + if let Some(kind) = kind { + tcx.custom_coerce_unsized_kinds.borrow_mut().insert(impl_did, kind); + } + }); }); } } -fn enforce_trait_manually_implementable(tcx: &ty::ctxt, sp: Span, trait_def_id: DefId) { +fn enforce_trait_manually_implementable(tcx: TyCtxt, sp: Span, trait_def_id: DefId) { if tcx.sess.features.borrow().unboxed_closures { // the feature gate allows all of them - return + return; } let did = Some(trait_def_id); let li = &tcx.lang_items; @@ -514,27 +507,28 @@ fn enforce_trait_manually_implementable(tcx: &ty::ctxt, sp: Span, trait_def_id: } else if did == li.fn_once_trait() { "FnOnce" } else { - return // everything OK + return; // everything OK }; let mut err = struct_span_err!(tcx.sess, sp, E0183, "manual implementations of `{}` are experimental", trait_name); - fileline_help!(&mut err, sp, - "add `#![feature(unboxed_closures)]` to the crate attributes to enable"); + help!(&mut err, + "add `#![feature(unboxed_closures)]` to the crate attributes to enable"); err.emit(); } -pub fn check_coherence(crate_context: &CrateCtxt) { - let _task = crate_context.tcx.dep_graph.in_task(DepNode::Coherence); - let infcx = new_infer_ctxt(crate_context.tcx, &crate_context.tcx.tables, None); - CoherenceChecker { - crate_context: crate_context, - inference_context: infcx, - inherent_impls: RefCell::new(FnvHashMap()), - }.check(); - unsafety::check(crate_context.tcx); - orphan::check(crate_context.tcx); - overlap::check(crate_context.tcx); +pub fn check_coherence(ccx: &CrateCtxt) { + let _task = ccx.tcx.dep_graph.in_task(DepNode::Coherence); + ccx.tcx.infer_ctxt(None, None, Reveal::ExactMatch).enter(|infcx| { + CoherenceChecker { + crate_context: ccx, + inference_context: infcx, + } + .check(); + }); + unsafety::check(ccx.tcx); + orphan::check(ccx.tcx); + overlap::check(ccx.tcx); } diff --git a/src/librustc_typeck/coherence/orphan.rs b/src/librustc_typeck/coherence/orphan.rs index 69eb7f51f3785..2e8206ec95967 100644 --- a/src/librustc_typeck/coherence/orphan.rs +++ b/src/librustc_typeck/coherence/orphan.rs @@ -11,32 +11,36 @@ //! Orphan checker: every impl either implements a trait defined in this //! crate or pertains to a type defined in this crate. -use middle::cstore::LOCAL_CRATE; -use middle::def_id::DefId; -use middle::traits; -use middle::ty; +use hir::def_id::{DefId, LOCAL_CRATE}; +use rustc::traits; +use rustc::ty::{self, TyCtxt}; use syntax::ast; -use syntax::codemap::Span; +use syntax_pos::Span; use rustc::dep_graph::DepNode; -use rustc_front::intravisit; -use rustc_front::hir; +use rustc::hir::itemlikevisit::ItemLikeVisitor; +use rustc::hir; -pub fn check(tcx: &ty::ctxt) { +pub fn check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { let mut orphan = OrphanChecker { tcx: tcx }; - tcx.visit_all_items_in_krate(DepNode::CoherenceOrphanCheck, &mut orphan); + tcx.visit_all_item_likes_in_krate(DepNode::CoherenceOrphanCheck, &mut orphan); } -struct OrphanChecker<'cx, 'tcx:'cx> { - tcx: &'cx ty::ctxt<'tcx> +struct OrphanChecker<'cx, 'tcx: 'cx> { + tcx: TyCtxt<'cx, 'tcx, 'tcx>, } impl<'cx, 'tcx> OrphanChecker<'cx, 'tcx> { fn check_def_id(&self, item: &hir::Item, def_id: DefId) { if def_id.krate != LOCAL_CRATE { - span_err!(self.tcx.sess, item.span, E0116, - "cannot define inherent `impl` for a type outside of the \ - crate where the type is defined; define and implement \ - a trait or new type instead"); + struct_span_err!(self.tcx.sess, + item.span, + E0116, + "cannot define inherent `impl` for a type outside of the crate \ + where the type is defined") + .span_label(item.span, + &format!("impl for type defined outside of crate.")) + .note("define and implement a trait or new type instead") + .emit(); } } @@ -47,11 +51,17 @@ impl<'cx, 'tcx> OrphanChecker<'cx, 'tcx> { ty: &str, span: Span) { match lang_def_id { - Some(lang_def_id) if lang_def_id == impl_def_id => { /* OK */ }, + Some(lang_def_id) if lang_def_id == impl_def_id => { + // OK + } _ => { - struct_span_err!(self.tcx.sess, span, E0390, - "only a single inherent implementation marked with `#[lang = \"{}\"]` \ - is allowed for the `{}` primitive", lang, ty) + struct_span_err!(self.tcx.sess, + span, + E0390, + "only a single inherent implementation marked with `#[lang = \ + \"{}\"]` is allowed for the `{}` primitive", + lang, + ty) .span_help(span, "consider using a trait to implement these methods") .emit(); } @@ -66,19 +76,18 @@ impl<'cx, 'tcx> OrphanChecker<'cx, 'tcx> { fn check_item(&self, item: &hir::Item) { let def_id = self.tcx.map.local_def_id(item.id); match item.node { - hir::ItemImpl(_, _, _, None, _, _) => { + hir::ItemImpl(.., None, ref ty, _) => { // For inherent impls, self type must be a nominal type // defined in this crate. debug!("coherence2::orphan check: inherent impl {}", self.tcx.map.node_to_string(item.id)); - let self_ty = self.tcx.lookup_item_type(def_id).ty; + let self_ty = self.tcx.item_type(def_id); match self_ty.sty { - ty::TyEnum(def, _) | - ty::TyStruct(def, _) => { + ty::TyAdt(def, _) => { self.check_def_id(item, def.did); } - ty::TyTrait(ref data) => { - self.check_def_id(item, data.principal_def_id()); + ty::TyDynamic(ref data, ..) if data.principal().is_some() => { + self.check_def_id(item, data.principal().unwrap().def_id()); } ty::TyBox(..) => { match self.tcx.lang_items.require_owned_box() { @@ -121,84 +130,84 @@ impl<'cx, 'tcx> OrphanChecker<'cx, 'tcx> { "*mut T", item.span); } - ty::TyInt(ast::TyI8) => { + ty::TyInt(ast::IntTy::I8) => { self.check_primitive_impl(def_id, self.tcx.lang_items.i8_impl(), "i8", "i8", item.span); } - ty::TyInt(ast::TyI16) => { + ty::TyInt(ast::IntTy::I16) => { self.check_primitive_impl(def_id, self.tcx.lang_items.i16_impl(), "i16", "i16", item.span); } - ty::TyInt(ast::TyI32) => { + ty::TyInt(ast::IntTy::I32) => { self.check_primitive_impl(def_id, self.tcx.lang_items.i32_impl(), "i32", "i32", item.span); } - ty::TyInt(ast::TyI64) => { + ty::TyInt(ast::IntTy::I64) => { self.check_primitive_impl(def_id, self.tcx.lang_items.i64_impl(), "i64", "i64", item.span); } - ty::TyInt(ast::TyIs) => { + ty::TyInt(ast::IntTy::Is) => { self.check_primitive_impl(def_id, self.tcx.lang_items.isize_impl(), "isize", "isize", item.span); } - ty::TyUint(ast::TyU8) => { + ty::TyUint(ast::UintTy::U8) => { self.check_primitive_impl(def_id, self.tcx.lang_items.u8_impl(), "u8", "u8", item.span); } - ty::TyUint(ast::TyU16) => { + ty::TyUint(ast::UintTy::U16) => { self.check_primitive_impl(def_id, self.tcx.lang_items.u16_impl(), "u16", "u16", item.span); } - ty::TyUint(ast::TyU32) => { + ty::TyUint(ast::UintTy::U32) => { self.check_primitive_impl(def_id, self.tcx.lang_items.u32_impl(), "u32", "u32", item.span); } - ty::TyUint(ast::TyU64) => { + ty::TyUint(ast::UintTy::U64) => { self.check_primitive_impl(def_id, self.tcx.lang_items.u64_impl(), "u64", "u64", item.span); } - ty::TyUint(ast::TyUs) => { + ty::TyUint(ast::UintTy::Us) => { self.check_primitive_impl(def_id, self.tcx.lang_items.usize_impl(), "usize", "usize", item.span); } - ty::TyFloat(ast::TyF32) => { + ty::TyFloat(ast::FloatTy::F32) => { self.check_primitive_impl(def_id, self.tcx.lang_items.f32_impl(), "f32", "f32", item.span); } - ty::TyFloat(ast::TyF64) => { + ty::TyFloat(ast::FloatTy::F64) => { self.check_primitive_impl(def_id, self.tcx.lang_items.f64_impl(), "f64", @@ -209,32 +218,42 @@ impl<'cx, 'tcx> OrphanChecker<'cx, 'tcx> { return; } _ => { - span_err!(self.tcx.sess, item.span, E0118, - "no base type found for inherent implementation; \ - implement a trait or new type instead"); + struct_span_err!(self.tcx.sess, + ty.span, + E0118, + "no base type found for inherent implementation") + .span_label(ty.span, &format!("impl requires a base type")) + .note(&format!("either implement a trait on it or create a newtype \ + to wrap it instead")) + .emit(); return; } } } - hir::ItemImpl(_, _, _, Some(_), _, _) => { + hir::ItemImpl(.., Some(_), _, _) => { // "Trait" impl debug!("coherence2::orphan check: trait impl {}", self.tcx.map.node_to_string(item.id)); let trait_ref = self.tcx.impl_trait_ref(def_id).unwrap(); let trait_def_id = trait_ref.def_id; match traits::orphan_check(self.tcx, def_id) { - Ok(()) => { } + Ok(()) => {} Err(traits::OrphanCheckErr::NoLocalInputType) => { - span_err!( - self.tcx.sess, item.span, E0117, - "the impl does not reference any \ - types defined in this crate; \ - only traits defined in the current crate can be \ - implemented for arbitrary types"); + struct_span_err!(self.tcx.sess, + item.span, + E0117, + "only traits defined in the current crate can be \ + implemented for arbitrary types") + .span_label(item.span, &format!("impl doesn't use types inside crate")) + .note(&format!("the impl does not reference any types defined in \ + this crate")) + .emit(); return; } Err(traits::OrphanCheckErr::UncoveredTy(param_ty)) => { - span_err!(self.tcx.sess, item.span, E0210, + span_err!(self.tcx.sess, + item.span, + E0210, "type parameter `{}` must be used as the type parameter for \ some local type (e.g. `MyStruct`); only traits defined in \ the current crate can be implemented for a type parameter", @@ -280,18 +299,13 @@ impl<'cx, 'tcx> OrphanChecker<'cx, 'tcx> { trait_ref, trait_def_id, self.tcx.trait_has_default_impl(trait_def_id)); - if - self.tcx.trait_has_default_impl(trait_def_id) && - trait_def_id.krate != LOCAL_CRATE - { + if self.tcx.trait_has_default_impl(trait_def_id) && + trait_def_id.krate != LOCAL_CRATE { let self_ty = trait_ref.self_ty(); let opt_self_def_id = match self_ty.sty { - ty::TyStruct(self_def, _) | ty::TyEnum(self_def, _) => - Some(self_def.did), - ty::TyBox(..) => - self.tcx.lang_items.owned_box(), - _ => - None + ty::TyAdt(self_def, _) => Some(self_def.did), + ty::TyBox(..) => self.tcx.lang_items.owned_box(), + _ => None, }; let msg = match opt_self_def_id { @@ -303,20 +317,17 @@ impl<'cx, 'tcx> OrphanChecker<'cx, 'tcx> { if self_def_id.is_local() { None } else { - Some(format!( - "cross-crate traits with a default impl, like `{}`, \ - can only be implemented for a struct/enum type \ - defined in the current crate", - self.tcx.item_path_str(trait_def_id))) + Some(format!("cross-crate traits with a default impl, like `{}`, \ + can only be implemented for a struct/enum type \ + defined in the current crate", + self.tcx.item_path_str(trait_def_id))) } } _ => { - Some(format!( - "cross-crate traits with a default impl, like `{}`, \ - can only be implemented for a struct/enum type, \ - not `{}`", - self.tcx.item_path_str(trait_def_id), - self_ty)) + Some(format!("cross-crate traits with a default impl, like `{}`, can \ + only be implemented for a struct/enum type, not `{}`", + self.tcx.item_path_str(trait_def_id), + self_ty)) } }; @@ -328,25 +339,37 @@ impl<'cx, 'tcx> OrphanChecker<'cx, 'tcx> { // Disallow *all* explicit impls of `Sized` and `Unsize` for now. if Some(trait_def_id) == self.tcx.lang_items.sized_trait() { - span_err!(self.tcx.sess, item.span, E0322, - "explicit impls for the `Sized` trait are not permitted"); + struct_span_err!(self.tcx.sess, + item.span, + E0322, + "explicit impls for the `Sized` trait are not permitted") + .span_label(item.span, &format!("impl of 'Sized' not allowed")) + .emit(); return; } if Some(trait_def_id) == self.tcx.lang_items.unsize_trait() { - span_err!(self.tcx.sess, item.span, E0328, + span_err!(self.tcx.sess, + item.span, + E0328, "explicit impls for the `Unsize` trait are not permitted"); return; } } - hir::ItemDefaultImpl(..) => { + hir::ItemDefaultImpl(_, ref item_trait_ref) => { // "Trait" impl debug!("coherence2::orphan check: default trait impl {}", self.tcx.map.node_to_string(item.id)); let trait_ref = self.tcx.impl_trait_ref(def_id).unwrap(); if trait_ref.def_id.krate != LOCAL_CRATE { - span_err!(self.tcx.sess, item.span, E0318, - "cannot create default implementations for traits outside the \ - crate they're defined in; define a new trait instead"); + struct_span_err!(self.tcx.sess, + item_trait_ref.path.span, + E0318, + "cannot create default implementations for traits outside \ + the crate they're defined in; define a new trait instead") + .span_label(item_trait_ref.path.span, + &format!("`{}` trait not defined in this crate", + item_trait_ref.path)) + .emit(); return; } } @@ -357,8 +380,11 @@ impl<'cx, 'tcx> OrphanChecker<'cx, 'tcx> { } } -impl<'cx, 'tcx,'v> intravisit::Visitor<'v> for OrphanChecker<'cx, 'tcx> { +impl<'cx, 'tcx, 'v> ItemLikeVisitor<'v> for OrphanChecker<'cx, 'tcx> { fn visit_item(&mut self, item: &hir::Item) { self.check_item(item); } + + fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) { + } } diff --git a/src/librustc_typeck/coherence/overlap.rs b/src/librustc_typeck/coherence/overlap.rs index 470e954781f8b..815811675a54b 100644 --- a/src/librustc_typeck/coherence/overlap.rs +++ b/src/librustc_typeck/coherence/overlap.rs @@ -9,175 +9,102 @@ // except according to those terms. //! Overlap: No two impls for the same trait are implemented for the -//! same type. +//! same type. Likewise, no two inherent impls for a given type +//! constructor provide a method with the same name. -use middle::cstore::{CrateStore, LOCAL_CRATE}; -use middle::def_id::DefId; -use middle::traits; -use middle::ty; -use middle::infer; +use hir::def_id::DefId; +use rustc::traits::{self, Reveal}; +use rustc::ty::{self, TyCtxt, TypeFoldable}; use syntax::ast; -use syntax::codemap::Span; use rustc::dep_graph::DepNode; -use rustc_front::hir; -use rustc_front::intravisit; -use util::nodemap::{DefIdMap, DefIdSet}; +use rustc::hir; +use rustc::hir::itemlikevisit::ItemLikeVisitor; +use util::nodemap::DefIdMap; +use lint; -pub fn check(tcx: &ty::ctxt) { - let mut overlap = OverlapChecker { tcx: tcx, - traits_checked: DefIdSet(), - default_impls: DefIdMap() }; +pub fn check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { + let mut overlap = OverlapChecker { + tcx: tcx, + default_impls: DefIdMap(), + }; // this secondary walk specifically checks for some other cases, // like defaulted traits, for which additional overlap rules exist - tcx.visit_all_items_in_krate(DepNode::CoherenceOverlapCheckSpecial, &mut overlap); + tcx.visit_all_item_likes_in_krate(DepNode::CoherenceOverlapCheckSpecial, &mut overlap); } -struct OverlapChecker<'cx, 'tcx:'cx> { - tcx: &'cx ty::ctxt<'tcx>, - - // The set of traits where we have checked for overlap. This is - // used to avoid checking the same trait twice. - // - // NB. It's ok to skip tracking this set because we fully - // encapsulate it, and we always create a task - // (`CoherenceOverlapCheck`) corresponding to each entry. - traits_checked: DefIdSet, +struct OverlapChecker<'cx, 'tcx: 'cx> { + tcx: TyCtxt<'cx, 'tcx, 'tcx>, // maps from a trait def-id to an impl id default_impls: DefIdMap, } impl<'cx, 'tcx> OverlapChecker<'cx, 'tcx> { - fn check_for_overlapping_impls_of_trait(&mut self, trait_def_id: DefId) { - debug!("check_for_overlapping_impls_of_trait(trait_def_id={:?})", - trait_def_id); - - let _task = self.tcx.dep_graph.in_task(DepNode::CoherenceOverlapCheck(trait_def_id)); - if !self.traits_checked.insert(trait_def_id) { - return; + fn check_for_common_items_in_impls(&self, impl1: DefId, impl2: DefId) { + #[derive(Copy, Clone, PartialEq)] + enum Namespace { + Type, + Value, } - let trait_def = self.tcx.lookup_trait_def(trait_def_id); - self.tcx.populate_implementations_for_trait_if_necessary( - trait_def.trait_ref.def_id); - - // We should already know all impls of this trait, so these - // borrows are safe. - let (blanket_impls, nonblanket_impls) = trait_def.borrow_impl_lists(self.tcx); - - // Conflicts can only occur between a blanket impl and another impl, - // or between 2 non-blanket impls of the same kind. + let name_and_namespace = |def_id| { + let item = self.tcx.associated_item(def_id); + (item.name, match item.kind { + ty::AssociatedKind::Type => Namespace::Type, + ty::AssociatedKind::Const | + ty::AssociatedKind::Method => Namespace::Value, + }) + }; - for (i, &impl1_def_id) in blanket_impls.iter().enumerate() { - for &impl2_def_id in &blanket_impls[(i+1)..] { - self.check_if_impls_overlap(impl1_def_id, - impl2_def_id); - } + let impl_items1 = self.tcx.associated_item_def_ids(impl1); + let impl_items2 = self.tcx.associated_item_def_ids(impl2); - for v in nonblanket_impls.values() { - for &impl2_def_id in v { - self.check_if_impls_overlap(impl1_def_id, - impl2_def_id); - } - } - } + for &item1 in &impl_items1[..] { + let (name, namespace) = name_and_namespace(item1); - for impl_group in nonblanket_impls.values() { - for (i, &impl1_def_id) in impl_group.iter().enumerate() { - for &impl2_def_id in &impl_group[(i+1)..] { - self.check_if_impls_overlap(impl1_def_id, - impl2_def_id); + for &item2 in &impl_items2[..] { + if (name, namespace) == name_and_namespace(item2) { + let msg = format!("duplicate definitions with name `{}`", name); + let node_id = self.tcx.map.as_local_node_id(item1).unwrap(); + self.tcx.sess.add_lint(lint::builtin::OVERLAPPING_INHERENT_IMPLS, + node_id, + self.tcx.span_of_impl(item1).unwrap(), + msg); } } } } - // We need to coherently pick which impl will be displayed - // as causing the error message, and it must be the in the current - // crate. Just pick the smaller impl in the file. - fn order_impls(&self, impl1_def_id: DefId, impl2_def_id: DefId) - -> Option<(DefId, DefId)> { - if impl1_def_id.krate != LOCAL_CRATE { - if impl2_def_id.krate != LOCAL_CRATE { - // we don't need to check impls if both are external; - // that's the other crate's job. - None - } else { - Some((impl2_def_id, impl1_def_id)) - } - } else if impl2_def_id.krate != LOCAL_CRATE { - Some((impl1_def_id, impl2_def_id)) - } else if impl1_def_id < impl2_def_id { - Some((impl1_def_id, impl2_def_id)) - } else { - Some((impl2_def_id, impl1_def_id)) - } - } - + fn check_for_overlapping_inherent_impls(&self, ty_def_id: DefId) { + let _task = self.tcx.dep_graph.in_task(DepNode::CoherenceOverlapInherentCheck(ty_def_id)); - fn check_if_impls_overlap(&self, - impl1_def_id: DefId, - impl2_def_id: DefId) - { - if let Some((impl1_def_id, impl2_def_id)) = self.order_impls( - impl1_def_id, impl2_def_id) - { - debug!("check_if_impls_overlap({:?}, {:?})", - impl1_def_id, - impl2_def_id); - - let infcx = infer::new_infer_ctxt(self.tcx, &self.tcx.tables, None); - if let Some(trait_ref) = traits::overlapping_impls(&infcx, impl1_def_id, impl2_def_id) { - self.report_overlap_error(impl1_def_id, impl2_def_id, trait_ref); - } - } - } - - fn report_overlap_error(&self, - impl1: DefId, - impl2: DefId, - trait_ref: ty::TraitRef) - { - // only print the Self type if it's concrete; otherwise, it's not adding much information. - let self_type = { - trait_ref.substs.self_ty().and_then(|ty| { - if let ty::TyInfer(_) = ty.sty { - None - } else { - Some(format!(" for type `{}`", ty)) - } - }).unwrap_or(String::new()) + let inherent_impls = self.tcx.inherent_impls.borrow(); + let impls = match inherent_impls.get(&ty_def_id) { + Some(impls) => impls, + None => return, }; - let mut err = struct_span_err!(self.tcx.sess, self.span_of_impl(impl1), E0119, - "conflicting implementations of trait `{}`{}:", - trait_ref, - self_type); - - if impl2.is_local() { - span_note!(&mut err, self.span_of_impl(impl2), - "conflicting implementation is here:"); - } else { - let cname = self.tcx.sess.cstore.crate_name(impl2.krate); - err.note(&format!("conflicting implementation in crate `{}`", cname)); + for (i, &impl1_def_id) in impls.iter().enumerate() { + for &impl2_def_id in &impls[(i + 1)..] { + self.tcx.infer_ctxt(None, None, Reveal::ExactMatch).enter(|infcx| { + if traits::overlapping_impls(&infcx, impl1_def_id, impl2_def_id).is_some() { + self.check_for_common_items_in_impls(impl1_def_id, impl2_def_id) + } + }); + } } - err.emit(); - } - - fn span_of_impl(&self, impl_did: DefId) -> Span { - let node_id = self.tcx.map.as_local_node_id(impl_did).unwrap(); - self.tcx.map.span(node_id) } } - -impl<'cx, 'tcx,'v> intravisit::Visitor<'v> for OverlapChecker<'cx, 'tcx> { +impl<'cx, 'tcx, 'v> ItemLikeVisitor<'v> for OverlapChecker<'cx, 'tcx> { fn visit_item(&mut self, item: &'v hir::Item) { match item.node { - hir::ItemTrait(..) => { - let trait_def_id = self.tcx.map.local_def_id(item.id); - self.check_for_overlapping_impls_of_trait(trait_def_id); + hir::ItemEnum(..) | + hir::ItemStruct(..) | + hir::ItemUnion(..) => { + let type_def_id = self.tcx.map.local_def_id(item.id); + self.check_for_overlapping_inherent_impls(type_def_id); } hir::ItemDefaultImpl(..) => { @@ -187,50 +114,97 @@ impl<'cx, 'tcx,'v> intravisit::Visitor<'v> for OverlapChecker<'cx, 'tcx> { let impl_def_id = self.tcx.map.local_def_id(item.id); let trait_ref = self.tcx.impl_trait_ref(impl_def_id).unwrap(); - self.check_for_overlapping_impls_of_trait(trait_ref.def_id); - let prev_default_impl = self.default_impls.insert(trait_ref.def_id, item.id); - match prev_default_impl { - Some(prev_id) => { - self.report_overlap_error(impl_def_id, - self.tcx.map.local_def_id(prev_id), - trait_ref); - } - None => { } + if let Some(prev_id) = prev_default_impl { + let mut err = struct_span_err!(self.tcx.sess, + self.tcx.span_of_impl(impl_def_id).unwrap(), + E0521, + "redundant default implementations of trait \ + `{}`:", + trait_ref); + err.span_note(self.tcx + .span_of_impl(self.tcx.map.local_def_id(prev_id)) + .unwrap(), + "redundant implementation is here:"); + err.emit(); } } - hir::ItemImpl(_, _, _, Some(_), _, _) => { + hir::ItemImpl(.., Some(_), _, _) => { let impl_def_id = self.tcx.map.local_def_id(item.id); let trait_ref = self.tcx.impl_trait_ref(impl_def_id).unwrap(); let trait_def_id = trait_ref.def_id; - self.check_for_overlapping_impls_of_trait(trait_def_id); - match trait_ref.self_ty().sty { - ty::TyTrait(ref data) => { - // This is something like impl Trait1 for Trait2. Illegal - // if Trait1 is a supertrait of Trait2 or Trait2 is not object safe. - - if !traits::is_object_safe(self.tcx, data.principal_def_id()) { - // This is an error, but it will be - // reported by wfcheck. Ignore it - // here. This is tested by - // `coherence-impl-trait-for-trait-object-safe.rs`. - } else { - let mut supertrait_def_ids = - traits::supertrait_def_ids(self.tcx, data.principal_def_id()); - if supertrait_def_ids.any(|d| d == trait_def_id) { - span_err!(self.tcx.sess, item.span, E0371, - "the object type `{}` automatically \ - implements the trait `{}`", - trait_ref.self_ty(), - self.tcx.item_path_str(trait_def_id)); - } + + if trait_ref.references_error() { + debug!("coherence: skipping impl {:?} with error {:?}", + impl_def_id, trait_ref); + return + } + + let _task = + self.tcx.dep_graph.in_task(DepNode::CoherenceOverlapCheck(trait_def_id)); + + let def = self.tcx.lookup_trait_def(trait_def_id); + + // attempt to insert into the specialization graph + let insert_result = def.add_impl_for_specialization(self.tcx, impl_def_id); + + // insertion failed due to overlap + if let Err(overlap) = insert_result { + let mut err = struct_span_err!(self.tcx.sess, + self.tcx.span_of_impl(impl_def_id).unwrap(), + E0119, + "conflicting implementations of trait `{}`{}:", + overlap.trait_desc, + overlap.self_desc.clone().map_or(String::new(), + |ty| { + format!(" for type `{}`", ty) + })); + + match self.tcx.span_of_impl(overlap.with_impl) { + Ok(span) => { + err.span_label(span, &format!("first implementation here")); + err.span_label(self.tcx.span_of_impl(impl_def_id).unwrap(), + &format!("conflicting implementation{}", + overlap.self_desc + .map_or(String::new(), + |ty| format!(" for `{}`", ty)))); + } + Err(cname) => { + err.note(&format!("conflicting implementation in crate `{}`", cname)); + } + } + + err.emit(); + } + + // check for overlap with the automatic `impl Trait for Trait` + if let ty::TyDynamic(ref data, ..) = trait_ref.self_ty().sty { + // This is something like impl Trait1 for Trait2. Illegal + // if Trait1 is a supertrait of Trait2 or Trait2 is not object safe. + + if data.principal().map_or(true, |p| !self.tcx.is_object_safe(p.def_id())) { + // This is an error, but it will be reported by wfcheck. Ignore it here. + // This is tested by `coherence-impl-trait-for-trait-object-safe.rs`. + } else { + let mut supertrait_def_ids = + traits::supertrait_def_ids(self.tcx, + data.principal().unwrap().def_id()); + if supertrait_def_ids.any(|d| d == trait_def_id) { + span_err!(self.tcx.sess, + item.span, + E0371, + "the object type `{}` automatically \ + implements the trait `{}`", + trait_ref.self_ty(), + self.tcx.item_path_str(trait_def_id)); } } - _ => { } } } - _ => { - } + _ => {} } } + + fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) { + } } diff --git a/src/librustc_typeck/coherence/unsafety.rs b/src/librustc_typeck/coherence/unsafety.rs index 936d26f920850..6d5de8f250655 100644 --- a/src/librustc_typeck/coherence/unsafety.rs +++ b/src/librustc_typeck/coherence/unsafety.rs @@ -11,30 +11,36 @@ //! Unsafety checker: every impl either implements a trait defined in this //! crate or pertains to a type defined in this crate. -use middle::ty; -use rustc_front::intravisit; -use rustc_front::hir; +use rustc::ty::TyCtxt; +use rustc::hir::itemlikevisit::ItemLikeVisitor; +use rustc::hir::{self, Unsafety}; -pub fn check(tcx: &ty::ctxt) { - let mut orphan = UnsafetyChecker { tcx: tcx }; - tcx.map.krate().visit_all_items(&mut orphan); +pub fn check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { + let mut unsafety = UnsafetyChecker { tcx: tcx }; + tcx.map.krate().visit_all_item_likes(&mut unsafety); } -struct UnsafetyChecker<'cx, 'tcx:'cx> { - tcx: &'cx ty::ctxt<'tcx> +struct UnsafetyChecker<'cx, 'tcx: 'cx> { + tcx: TyCtxt<'cx, 'tcx, 'tcx>, } impl<'cx, 'tcx, 'v> UnsafetyChecker<'cx, 'tcx> { - fn check_unsafety_coherence(&mut self, item: &'v hir::Item, + fn check_unsafety_coherence(&mut self, + item: &'v hir::Item, + impl_generics: Option<&hir::Generics>, unsafety: hir::Unsafety, polarity: hir::ImplPolarity) { match self.tcx.impl_trait_ref(self.tcx.map.local_def_id(item.id)) { None => { // Inherent impl. match unsafety { - hir::Unsafety::Normal => { /* OK */ } + hir::Unsafety::Normal => { + // OK + } hir::Unsafety::Unsafe => { - span_err!(self.tcx.sess, item.span, E0197, + span_err!(self.tcx.sess, + item.span, + E0197, "inherent impls cannot be declared as unsafe"); } } @@ -42,32 +48,45 @@ impl<'cx, 'tcx, 'v> UnsafetyChecker<'cx, 'tcx> { Some(trait_ref) => { let trait_def = self.tcx.lookup_trait_def(trait_ref.def_id); - match (trait_def.unsafety, unsafety, polarity) { - (hir::Unsafety::Unsafe, - hir::Unsafety::Unsafe, hir::ImplPolarity::Negative) => { - span_err!(self.tcx.sess, item.span, E0198, + let unsafe_attr = impl_generics.and_then(|g| g.carries_unsafe_attr()); + match (trait_def.unsafety, unsafe_attr, unsafety, polarity) { + (_, _, Unsafety::Unsafe, hir::ImplPolarity::Negative) => { + span_err!(self.tcx.sess, + item.span, + E0198, "negative implementations are not unsafe"); } - (hir::Unsafety::Normal, hir::Unsafety::Unsafe, _) => { - span_err!(self.tcx.sess, item.span, E0199, + (Unsafety::Normal, None, Unsafety::Unsafe, _) => { + span_err!(self.tcx.sess, + item.span, + E0199, "implementing the trait `{}` is not unsafe", trait_ref); } - (hir::Unsafety::Unsafe, - hir::Unsafety::Normal, hir::ImplPolarity::Positive) => { - span_err!(self.tcx.sess, item.span, E0200, + (Unsafety::Unsafe, _, Unsafety::Normal, hir::ImplPolarity::Positive) => { + span_err!(self.tcx.sess, + item.span, + E0200, "the trait `{}` requires an `unsafe impl` declaration", trait_ref); } - (hir::Unsafety::Unsafe, - hir::Unsafety::Normal, hir::ImplPolarity::Negative) | - (hir::Unsafety::Unsafe, - hir::Unsafety::Unsafe, hir::ImplPolarity::Positive) | - (hir::Unsafety::Normal, hir::Unsafety::Normal, _) => { - /* OK */ + (Unsafety::Normal, Some(g), Unsafety::Normal, hir::ImplPolarity::Positive) => + { + span_err!(self.tcx.sess, + item.span, + E0569, + "requires an `unsafe impl` declaration due to `#[{}]` attribute", + g.attr_name()); + } + + (_, _, Unsafety::Normal, hir::ImplPolarity::Negative) | + (Unsafety::Unsafe, _, Unsafety::Unsafe, hir::ImplPolarity::Positive) | + (Unsafety::Normal, Some(_), Unsafety::Unsafe, hir::ImplPolarity::Positive) | + (Unsafety::Normal, None, Unsafety::Normal, _) => { + // OK } } } @@ -75,16 +94,19 @@ impl<'cx, 'tcx, 'v> UnsafetyChecker<'cx, 'tcx> { } } -impl<'cx, 'tcx,'v> intravisit::Visitor<'v> for UnsafetyChecker<'cx, 'tcx> { +impl<'cx, 'tcx, 'v> ItemLikeVisitor<'v> for UnsafetyChecker<'cx, 'tcx> { fn visit_item(&mut self, item: &'v hir::Item) { match item.node { hir::ItemDefaultImpl(unsafety, _) => { - self.check_unsafety_coherence(item, unsafety, hir::ImplPolarity::Positive); + self.check_unsafety_coherence(item, None, unsafety, hir::ImplPolarity::Positive); } - hir::ItemImpl(unsafety, polarity, _, _, _, _) => { - self.check_unsafety_coherence(item, unsafety, polarity); + hir::ItemImpl(unsafety, polarity, ref generics, ..) => { + self.check_unsafety_coherence(item, Some(generics), unsafety, polarity); } - _ => { } + _ => {} } } + + fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) { + } } diff --git a/src/librustc_typeck/collect.rs b/src/librustc_typeck/collect.rs index eb204c5641495..3bb7d6a77ba5c 100644 --- a/src/librustc_typeck/collect.rs +++ b/src/librustc_typeck/collect.rs @@ -13,7 +13,7 @@ # Collect phase The collect phase of type check has the job of visiting all items, -determining their type, and writing that type into the `tcx.tcache` +determining their type, and writing that type into the `tcx.types` table. Despite its name, this table does not really operate as a *cache*, at least not for the types of items defined within the current crate: we assume that after the collect phase, the types of @@ -22,8 +22,7 @@ all local items will be present in the table. Unlike most of the types that are present in Rust, the types computed for each item are in fact type schemes. This means that they are generic types that may have type parameters. TypeSchemes are -represented by an instance of `ty::TypeScheme`. This combines the -core type along with a list of the bounds for each parameter. Type +represented by a pair of `Generics` and `Ty`. Type parameters themselves are represented as `ty_param()` instances. The phasing of type conversion is somewhat complicated. There is no @@ -51,69 +50,53 @@ There are some shortcomings in this design: - Before walking the set of supertraits for a given trait, you must call `ensure_super_predicates` on that trait def-id. Otherwise, - `lookup_super_predicates` will result in ICEs. -- Because the type scheme includes defaults, cycles through type + `item_super_predicates` will result in ICEs. +- Because the item generics include defaults, cycles through type parameter defaults are illegal even if those defaults are never employed. This is not necessarily a bug. */ -use astconv::{self, AstConv, ty_of_arg, ast_ty_to_ty, ast_region_to_region}; +use astconv::{AstConv, ast_region_to_region, Bounds, PartitionedBounds, partition_bounds}; use lint; -use middle::def; -use middle::def_id::DefId; use constrained_type_params as ctp; use middle::lang_items::SizedTraitLangItem; -use middle::resolve_lifetime; -use middle::const_eval::{self, ConstVal}; -use middle::const_eval::EvalHint::UncheckedExprHint; -use middle::subst::{Substs, FnSpace, ParamSpace, SelfSpace, TypeSpace, VecPerParamSpace}; -use middle::ty::{ToPredicate, ImplContainer, ImplOrTraitItemContainer, TraitContainer}; -use middle::ty::{self, ToPolyTraitRef, Ty, TypeScheme}; -use middle::ty::{VariantKind}; -use middle::ty::fold::{TypeFolder}; -use middle::ty::util::IntTypeExt; +use middle::const_val::ConstVal; +use rustc_const_eval::EvalHint::UncheckedExprHint; +use rustc_const_eval::{eval_const_expr_partial, report_const_eval_err}; +use rustc::ty::subst::Substs; +use rustc::ty::{ToPredicate, ImplContainer, AssociatedItemContainer, TraitContainer}; +use rustc::ty::{self, AdtKind, ToPolyTraitRef, Ty, TyCtxt}; +use rustc::ty::util::IntTypeExt; use rscope::*; use rustc::dep_graph::DepNode; -use rustc::front::map as hir_map; use util::common::{ErrorReported, MemoizationMap}; -use util::nodemap::{FnvHashMap, FnvHashSet}; -use write_ty_to_tcx; +use util::nodemap::{NodeMap, FxHashMap, FxHashSet}; +use CrateCtxt; + +use rustc_const_math::ConstInt; use std::cell::RefCell; -use std::collections::HashSet; -use std::rc::Rc; - -use syntax::abi; -use syntax::ast; -use syntax::attr; -use syntax::codemap::Span; -use syntax::parse::token::special_idents; -use syntax::ptr::P; -use rustc_front::hir; -use rustc_front::intravisit; -use rustc_front::print::pprust; + +use syntax::{abi, ast, attr}; +use syntax::symbol::{Symbol, keywords}; +use syntax_pos::Span; + +use rustc::hir::{self, map as hir_map, print as pprust}; +use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; +use rustc::hir::def::{Def, CtorKind}; +use rustc::hir::def_id::DefId; /////////////////////////////////////////////////////////////////////////// // Main entry point -pub fn collect_item_types(tcx: &ty::ctxt) { - let ccx = &CrateCtxt { tcx: tcx, stack: RefCell::new(Vec::new()) }; - - let mut visitor = CollectItemTypesVisitor{ ccx: ccx }; - ccx.tcx.map.krate().visit_all_items(&mut visitor); +pub fn collect_item_types(ccx: &CrateCtxt) { + let mut visitor = CollectItemTypesVisitor { ccx: ccx }; + ccx.tcx.visit_all_item_likes_in_krate(DepNode::CollectItem, &mut visitor.as_deep_visitor()); } /////////////////////////////////////////////////////////////////////////// -struct CrateCtxt<'a,'tcx:'a> { - tcx: &'a ty::ctxt<'tcx>, - - // This stack is used to identify cycles in the user's source. - // Note that these cycles can cross multiple items. - stack: RefCell>, -} - /// Context specific to some particular item. This is what implements /// AstConv. It has information about the predicates that are defined /// on the trait. Unfortunately, this predicate information is @@ -131,7 +114,8 @@ struct ItemCtxt<'a,'tcx:'a> { } #[derive(Copy, Clone, PartialEq, Eq)] -enum AstConvRequest { +pub enum AstConvRequest { + GetGenerics(DefId), GetItemTypeScheme(DefId), GetTraitDef(DefId), EnsureSuperPredicates(DefId), @@ -144,12 +128,87 @@ struct CollectItemTypesVisitor<'a, 'tcx: 'a> { ccx: &'a CrateCtxt<'a, 'tcx> } -impl<'a, 'tcx, 'v> intravisit::Visitor<'v> for CollectItemTypesVisitor<'a, 'tcx> { - fn visit_item(&mut self, item: &hir::Item) { - let tcx = self.ccx.tcx; - let item_def_id = tcx.map.local_def_id(item.id); - let _task = tcx.dep_graph.in_task(DepNode::CollectItem(item_def_id)); - convert_item(self.ccx, item); +impl<'a, 'tcx> CollectItemTypesVisitor<'a, 'tcx> { + /// Collect item types is structured into two tasks. The outer + /// task, `CollectItem`, walks the entire content of an item-like + /// thing, including its body. It also spawns an inner task, + /// `CollectItemSig`, which walks only the signature. This inner + /// task is the one that writes the item-type into the various + /// maps. This setup ensures that the item body is never + /// accessible to the task that computes its signature, so that + /// changes to the body don't affect the signature. + /// + /// Consider an example function `foo` that also has a closure in its body: + /// + /// ``` + /// fn foo() { + /// ... + /// let bar = || ...; // we'll label this closure as "bar" below + /// } + /// ``` + /// + /// This results in a dep-graph like so. I've labeled the edges to + /// document where they arise. + /// + /// ``` + /// [HirBody(foo)] -2--> [CollectItem(foo)] -4-> [ItemSignature(bar)] + /// ^ ^ + /// 1 3 + /// [Hir(foo)] -----------+-6-> [CollectItemSig(foo)] -5-> [ItemSignature(foo)] + /// ``` + /// + /// 1. This is added by the `visit_all_item_likes_in_krate`. + /// 2. This is added when we fetch the item body. + /// 3. This is added because `CollectItem` launches `CollectItemSig`. + /// - it is arguably false; if we refactor the `with_task` system; + /// we could get probably rid of it, but it is also harmless enough. + /// 4. This is added by the code in `visit_expr` when we write to `item_types`. + /// 5. This is added by the code in `convert_item` when we write to `item_types`; + /// note that this write occurs inside the `CollectItemSig` task. + /// 6. Added by explicit `read` below + fn with_collect_item_sig(&self, id: ast::NodeId, op: OP) + where OP: FnOnce() + { + let def_id = self.ccx.tcx.map.local_def_id(id); + self.ccx.tcx.dep_graph.with_task(DepNode::CollectItemSig(def_id), || { + self.ccx.tcx.map.read(id); + op(); + }); + } +} + +impl<'a, 'tcx> Visitor<'tcx> for CollectItemTypesVisitor<'a, 'tcx> { + fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { + NestedVisitorMap::OnlyBodies(&self.ccx.tcx.map) + } + + fn visit_item(&mut self, item: &'tcx hir::Item) { + self.with_collect_item_sig(item.id, || convert_item(self.ccx, item)); + intravisit::walk_item(self, item); + } + + fn visit_expr(&mut self, expr: &'tcx hir::Expr) { + if let hir::ExprClosure(..) = expr.node { + let def_id = self.ccx.tcx.map.local_def_id(expr.id); + generics_of_def_id(self.ccx, def_id); + type_of_def_id(self.ccx, def_id); + } + intravisit::walk_expr(self, expr); + } + + fn visit_ty(&mut self, ty: &'tcx hir::Ty) { + if let hir::TyImplTrait(..) = ty.node { + let def_id = self.ccx.tcx.map.local_def_id(ty.id); + generics_of_def_id(self.ccx, def_id); + } + intravisit::walk_ty(self, ty); + } + + fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) { + self.with_collect_item_sig(impl_item.id, || { + convert_impl_item(self.ccx, impl_item) + }); + intravisit::walk_impl_item(self, impl_item); } } @@ -158,7 +217,10 @@ impl<'a, 'tcx, 'v> intravisit::Visitor<'v> for CollectItemTypesVisitor<'a, 'tcx> impl<'a,'tcx> CrateCtxt<'a,'tcx> { fn icx(&'a self, param_bounds: &'a GetTypeParameterBounds<'tcx>) -> ItemCtxt<'a,'tcx> { - ItemCtxt { ccx: self, param_bounds: param_bounds } + ItemCtxt { + ccx: self, + param_bounds: param_bounds, + } } fn cycle_check(&self, @@ -170,13 +232,10 @@ impl<'a,'tcx> CrateCtxt<'a,'tcx> { { { let mut stack = self.stack.borrow_mut(); - match stack.iter().enumerate().rev().find(|&(_, r)| *r == request) { - None => { } - Some((i, _)) => { - let cycle = &stack[i..]; - self.report_cycle(span, cycle); - return Err(ErrorReported); - } + if let Some((i, _)) = stack.iter().enumerate().rev().find(|&(_, r)| *r == request) { + let cycle = &stack[i..]; + self.report_cycle(span, cycle); + return Err(ErrorReported); } stack.push(request); } @@ -196,8 +255,10 @@ impl<'a,'tcx> CrateCtxt<'a,'tcx> { let mut err = struct_span_err!(tcx.sess, span, E0391, "unsupported cyclic reference between types/traits detected"); + err.span_label(span, &format!("cyclic reference")); match cycle[0] { + AstConvRequest::GetGenerics(def_id) | AstConvRequest::GetItemTypeScheme(def_id) | AstConvRequest::GetTraitDef(def_id) => { err.note( @@ -220,6 +281,7 @@ impl<'a,'tcx> CrateCtxt<'a,'tcx> { for request in &cycle[1..] { match *request { + AstConvRequest::GetGenerics(def_id) | AstConvRequest::GetItemTypeScheme(def_id) | AstConvRequest::GetTraitDef(def_id) => { err.note( @@ -242,6 +304,7 @@ impl<'a,'tcx> CrateCtxt<'a,'tcx> { } match cycle[0] { + AstConvRequest::GetGenerics(def_id) | AstConvRequest::GetItemTypeScheme(def_id) | AstConvRequest::GetTraitDef(def_id) => { err.note( @@ -266,20 +329,21 @@ impl<'a,'tcx> CrateCtxt<'a,'tcx> { } /// Loads the trait def for a given trait, returning ErrorReported if a cycle arises. - fn get_trait_def(&self, trait_id: DefId) - -> &'tcx ty::TraitDef<'tcx> + fn get_trait_def(&self, def_id: DefId) + -> &'tcx ty::TraitDef { let tcx = self.tcx; - if let Some(trait_id) = tcx.map.as_local_node_id(trait_id) { + if let Some(trait_id) = tcx.map.as_local_node_id(def_id) { let item = match tcx.map.get(trait_id) { hir_map::NodeItem(item) => item, - _ => tcx.sess.bug(&format!("get_trait_def({:?}): not an item", trait_id)) + _ => bug!("get_trait_def({:?}): not an item", trait_id) }; - trait_def_of_item(self, &*item) + generics_of_def_id(self, def_id); + trait_def_of_item(self, &item) } else { - tcx.lookup_trait_def(trait_id) + tcx.lookup_trait_def(def_id) } } @@ -293,7 +357,7 @@ impl<'a,'tcx> CrateCtxt<'a,'tcx> { let def_ids = ensure_super_predicates_step(self, trait_def_id); for def_id in def_ids { - try!(self.ensure_super_predicates(span, def_id)); + self.ensure_super_predicates(span, def_id)?; } Ok(()) @@ -303,23 +367,33 @@ impl<'a,'tcx> CrateCtxt<'a,'tcx> { impl<'a,'tcx> ItemCtxt<'a,'tcx> { fn to_ty(&self, rs: &RS, ast_ty: &hir::Ty) -> Ty<'tcx> { - ast_ty_to_ty(self, rs, ast_ty) + AstConv::ast_ty_to_ty(self, rs, ast_ty) } } -impl<'a, 'tcx> AstConv<'tcx> for ItemCtxt<'a, 'tcx> { - fn tcx(&self) -> &ty::ctxt<'tcx> { self.ccx.tcx } +impl<'a, 'tcx> AstConv<'tcx, 'tcx> for ItemCtxt<'a, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { self.ccx.tcx } + + fn ast_ty_to_ty_cache(&self) -> &RefCell>> { + &self.ccx.ast_ty_to_ty_cache + } - fn get_item_type_scheme(&self, span: Span, id: DefId) - -> Result, ErrorReported> + fn get_generics(&self, span: Span, id: DefId) + -> Result<&'tcx ty::Generics<'tcx>, ErrorReported> { + self.ccx.cycle_check(span, AstConvRequest::GetGenerics(id), || { + Ok(generics_of_def_id(self.ccx, id)) + }) + } + + fn get_item_type(&self, span: Span, id: DefId) -> Result, ErrorReported> { self.ccx.cycle_check(span, AstConvRequest::GetItemTypeScheme(id), || { - Ok(type_scheme_of_def_id(self.ccx, id)) + Ok(type_of_def_id(self.ccx, id)) }) } fn get_trait_def(&self, span: Span, id: DefId) - -> Result<&'tcx ty::TraitDef<'tcx>, ErrorReported> + -> Result<&'tcx ty::TraitDef, ErrorReported> { self.ccx.cycle_check(span, AstConvRequest::GetTraitDef(id), || { Ok(self.ccx.get_trait_def(id)) @@ -352,29 +426,38 @@ impl<'a, 'tcx> AstConv<'tcx> for ItemCtxt<'a, 'tcx> { }) } - fn trait_defines_associated_type_named(&self, - trait_def_id: DefId, - assoc_name: ast::Name) - -> bool - { - if let Some(trait_id) = self.tcx().map.as_local_node_id(trait_def_id) { - trait_defines_associated_type_named(self.ccx, trait_id, assoc_name) - } else { - let trait_def = self.tcx().lookup_trait_def(trait_def_id); - trait_def.associated_type_names.contains(&assoc_name) - } + fn get_free_substs(&self) -> Option<&Substs<'tcx>> { + None } - fn ty_infer(&self, - _ty_param_def: Option>, - _substs: Option<&mut Substs<'tcx>>, - _space: Option, - span: Span) -> Ty<'tcx> { - span_err!(self.tcx().sess, span, E0121, - "the type placeholder `_` is not allowed within types on item signatures"); + fn ty_infer(&self, span: Span) -> Ty<'tcx> { + struct_span_err!( + self.tcx().sess, + span, + E0121, + "the type placeholder `_` is not allowed within types on item signatures" + ).span_label(span, &format!("not allowed in type signatures")) + .emit(); self.tcx().types.err } + fn projected_ty_from_poly_trait_ref(&self, + span: Span, + poly_trait_ref: ty::PolyTraitRef<'tcx>, + item_name: ast::Name) + -> Ty<'tcx> + { + if let Some(trait_ref) = self.tcx().no_late_bound_regions(&poly_trait_ref) { + self.projected_ty(span, trait_ref, item_name) + } else { + // no late-bound regions, we can just ignore the binder + span_err!(self.tcx().sess, span, E0212, + "cannot extract an associated type from a higher-ranked trait bound \ + in this context"); + self.tcx().types.err + } + } + fn projected_ty(&self, _span: Span, trait_ref: ty::TraitRef<'tcx>, @@ -383,13 +466,17 @@ impl<'a, 'tcx> AstConv<'tcx> for ItemCtxt<'a, 'tcx> { { self.tcx().mk_projection(trait_ref, item_name) } + + fn set_tainted_by_errors(&self) { + // no obvious place to track this, just let it go + } } /// Interface used to find the bounds on a type parameter from within /// an `ItemCtxt`. This allows us to use multiple kinds of sources. trait GetTypeParameterBounds<'tcx> { fn get_type_parameter_bounds(&self, - astconv: &AstConv<'tcx>, + astconv: &AstConv<'tcx, 'tcx>, span: Span, node_id: ast::NodeId) -> Vec>; @@ -400,7 +487,7 @@ impl<'a,'b,'tcx,A,B> GetTypeParameterBounds<'tcx> for (&'a A,&'b B) where A : GetTypeParameterBounds<'tcx>, B : GetTypeParameterBounds<'tcx> { fn get_type_parameter_bounds(&self, - astconv: &AstConv<'tcx>, + astconv: &AstConv<'tcx, 'tcx>, span: Span, node_id: ast::NodeId) -> Vec> @@ -414,7 +501,7 @@ impl<'a,'b,'tcx,A,B> GetTypeParameterBounds<'tcx> for (&'a A,&'b B) /// Empty set of bounds. impl<'tcx> GetTypeParameterBounds<'tcx> for () { fn get_type_parameter_bounds(&self, - _astconv: &AstConv<'tcx>, + _astconv: &AstConv<'tcx, 'tcx>, _span: Span, _node_id: ast::NodeId) -> Vec> @@ -428,34 +515,38 @@ impl<'tcx> GetTypeParameterBounds<'tcx> for () { /// from the trait/impl have been fully converted. impl<'tcx> GetTypeParameterBounds<'tcx> for ty::GenericPredicates<'tcx> { fn get_type_parameter_bounds(&self, - astconv: &AstConv<'tcx>, - _span: Span, + astconv: &AstConv<'tcx, 'tcx>, + span: Span, node_id: ast::NodeId) -> Vec> { let def = astconv.tcx().type_parameter_def(node_id); - self.predicates - .iter() - .filter(|predicate| { - match **predicate { - ty::Predicate::Trait(ref data) => { - data.skip_binder().self_ty().is_param(def.space, def.index) - } - ty::Predicate::TypeOutlives(ref data) => { - data.skip_binder().0.is_param(def.space, def.index) - } - ty::Predicate::Equate(..) | - ty::Predicate::RegionOutlives(..) | - ty::Predicate::WellFormed(..) | - ty::Predicate::ObjectSafe(..) | - ty::Predicate::Projection(..) => { - false - } + let mut results = self.parent.map_or(vec![], |def_id| { + let parent = astconv.tcx().item_predicates(def_id); + parent.get_type_parameter_bounds(astconv, span, node_id) + }); + + results.extend(self.predicates.iter().filter(|predicate| { + match **predicate { + ty::Predicate::Trait(ref data) => { + data.skip_binder().self_ty().is_param(def.index) + } + ty::Predicate::TypeOutlives(ref data) => { + data.skip_binder().0.is_param(def.index) + } + ty::Predicate::Equate(..) | + ty::Predicate::RegionOutlives(..) | + ty::Predicate::WellFormed(..) | + ty::Predicate::ObjectSafe(..) | + ty::Predicate::ClosureKind(..) | + ty::Predicate::Projection(..) => { + false } - }) - .cloned() - .collect() + } + }).cloned()); + + results } } @@ -465,7 +556,7 @@ impl<'tcx> GetTypeParameterBounds<'tcx> for ty::GenericPredicates<'tcx> { /// bounds for a type parameter `X` if `X::Foo` is used. impl<'tcx> GetTypeParameterBounds<'tcx> for hir::Generics { fn get_type_parameter_bounds(&self, - astconv: &AstConv<'tcx>, + astconv: &AstConv<'tcx, 'tcx>, _: Span, node_id: ast::NodeId) -> Vec> @@ -504,138 +595,94 @@ impl<'tcx> GetTypeParameterBounds<'tcx> for hir::Generics { /// parameter with id `param_id`. We use this so as to avoid running /// `ast_ty_to_ty`, because we want to avoid triggering an all-out /// conversion of the type to avoid inducing unnecessary cycles. -fn is_param<'tcx>(tcx: &ty::ctxt<'tcx>, - ast_ty: &hir::Ty, - param_id: ast::NodeId) - -> bool +fn is_param<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + ast_ty: &hir::Ty, + param_id: ast::NodeId) + -> bool { - if let hir::TyPath(None, _) = ast_ty.node { - let path_res = *tcx.def_map.borrow().get(&ast_ty.id).unwrap(); - match path_res.base_def { - def::DefSelfTy(Some(def_id), None) => { - path_res.depth == 0 && def_id == tcx.map.local_def_id(param_id) - } - def::DefTyParam(_, _, def_id, _) => { - path_res.depth == 0 && def_id == tcx.map.local_def_id(param_id) - } - _ => { - false + if let hir::TyPath(hir::QPath::Resolved(None, ref path)) = ast_ty.node { + match path.def { + Def::SelfTy(Some(def_id), None) | + Def::TyParam(def_id) => { + def_id == tcx.map.local_def_id(param_id) } + _ => false } } else { false } } +fn convert_field<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + struct_generics: &'tcx ty::Generics<'tcx>, + struct_predicates: &ty::GenericPredicates<'tcx>, + field: &hir::StructField, + ty_f: &'tcx ty::FieldDef) +{ + let tt = ccx.icx(struct_predicates).to_ty(&ExplicitRscope, &field.ty); + ccx.tcx.item_types.borrow_mut().insert(ty_f.did, tt); + + let def_id = ccx.tcx.map.local_def_id(field.id); + ccx.tcx.item_types.borrow_mut().insert(def_id, tt); + ccx.tcx.generics.borrow_mut().insert(def_id, struct_generics); + ccx.tcx.predicates.borrow_mut().insert(def_id, struct_predicates.clone()); +} fn convert_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - container: ImplOrTraitItemContainer, - name: ast::Name, + container: AssociatedItemContainer, id: ast::NodeId, - vis: hir::Visibility, sig: &hir::MethodSig, untransformed_rcvr_ty: Ty<'tcx>, - rcvr_ty_generics: &ty::Generics<'tcx>, rcvr_ty_predicates: &ty::GenericPredicates<'tcx>) { - let ty_generics = ty_generics_for_fn(ccx, &sig.generics, rcvr_ty_generics); - - let ty_generic_predicates = - ty_generic_predicates_for_fn(ccx, &sig.generics, rcvr_ty_predicates); - - let (fty, explicit_self_category) = - astconv::ty_of_method(&ccx.icx(&(rcvr_ty_predicates, &sig.generics)), - sig, untransformed_rcvr_ty); - let def_id = ccx.tcx.map.local_def_id(id); - let ty_method = ty::Method::new(name, - ty_generics, - ty_generic_predicates, - fty, - explicit_self_category, - vis, - def_id, - container); - - let fty = ccx.tcx.mk_fn(Some(def_id), - ccx.tcx.mk_bare_fn(ty_method.fty.clone())); - debug!("method {} (id {}) has type {:?}", - name, id, fty); - ccx.tcx.register_item_type(def_id, TypeScheme { - generics: ty_method.generics.clone(), - ty: fty - }); - ccx.tcx.predicates.borrow_mut().insert(def_id, ty_method.predicates.clone()); + let ty_generics = generics_of_def_id(ccx, def_id); - write_ty_to_tcx(ccx.tcx, id, fty); - - debug!("writing method type: def_id={:?} mty={:?}", - def_id, ty_method); - - ccx.tcx.impl_or_trait_items.borrow_mut().insert(def_id, - ty::MethodTraitItem(Rc::new(ty_method))); -} + let ty_generic_predicates = + ty_generic_predicates(ccx, &sig.generics, ty_generics.parent, vec![], false); -fn convert_field<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - struct_generics: &ty::Generics<'tcx>, - struct_predicates: &ty::GenericPredicates<'tcx>, - v: &hir::StructField, - ty_f: ty::FieldDefMaster<'tcx>) -{ - let tt = ccx.icx(struct_predicates).to_ty(&ExplicitRscope, &*v.node.ty); - ty_f.fulfill_ty(tt); - write_ty_to_tcx(ccx.tcx, v.node.id, tt); - - /* add the field to the tcache */ - ccx.tcx.register_item_type(ccx.tcx.map.local_def_id(v.node.id), - ty::TypeScheme { - generics: struct_generics.clone(), - ty: tt - }); - ccx.tcx.predicates.borrow_mut().insert(ccx.tcx.map.local_def_id(v.node.id), - struct_predicates.clone()); + let anon_scope = match container { + ImplContainer(_) => Some(AnonTypeScope::new(def_id)), + TraitContainer(_) => None + }; + let fty = AstConv::ty_of_method(&ccx.icx(&(rcvr_ty_predicates, &sig.generics)), + sig, untransformed_rcvr_ty, anon_scope); + + let substs = mk_item_substs(&ccx.icx(&(rcvr_ty_predicates, &sig.generics)), + ccx.tcx.map.span(id), def_id); + let fty = ccx.tcx.mk_fn_def(def_id, substs, fty); + ccx.tcx.item_types.borrow_mut().insert(def_id, fty); + ccx.tcx.predicates.borrow_mut().insert(def_id, ty_generic_predicates); } fn convert_associated_const<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - container: ImplOrTraitItemContainer, - name: ast::Name, + container: AssociatedItemContainer, id: ast::NodeId, - vis: hir::Visibility, - ty: ty::Ty<'tcx>, - has_value: bool) + ty: ty::Ty<'tcx>) { - ccx.tcx.predicates.borrow_mut().insert(ccx.tcx.map.local_def_id(id), - ty::GenericPredicates::empty()); - - write_ty_to_tcx(ccx.tcx, id, ty); - - let associated_const = Rc::new(ty::AssociatedConst { - name: name, - vis: vis, - def_id: ccx.tcx.map.local_def_id(id), - container: container, - ty: ty, - has_value: has_value - }); - ccx.tcx.impl_or_trait_items.borrow_mut() - .insert(ccx.tcx.map.local_def_id(id), ty::ConstTraitItem(associated_const)); + let predicates = ty::GenericPredicates { + parent: Some(container.id()), + predicates: vec![] + }; + let def_id = ccx.tcx.map.local_def_id(id); + ccx.tcx.predicates.borrow_mut().insert(def_id, predicates); + ccx.tcx.item_types.borrow_mut().insert(def_id, ty); } fn convert_associated_type<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - container: ImplOrTraitItemContainer, - name: ast::Name, + container: AssociatedItemContainer, id: ast::NodeId, - vis: hir::Visibility, ty: Option>) { - let associated_type = Rc::new(ty::AssociatedType { - name: name, - vis: vis, - ty: ty, - def_id: ccx.tcx.map.local_def_id(id), - container: container - }); - ccx.tcx.impl_or_trait_items.borrow_mut() - .insert(ccx.tcx.map.local_def_id(id), ty::TypeTraitItem(associated_type)); + let predicates = ty::GenericPredicates { + parent: Some(container.id()), + predicates: vec![] + }; + let def_id = ccx.tcx.map.local_def_id(id); + ccx.tcx.predicates.borrow_mut().insert(def_id, predicates); + + if let Some(ty) = ty { + ccx.tcx.item_types.borrow_mut().insert(def_id, ty); + } } fn ensure_no_ty_param_bounds(ccx: &CrateCtxt, @@ -670,9 +717,10 @@ fn ensure_no_ty_param_bounds(ccx: &CrateCtxt, fn convert_item(ccx: &CrateCtxt, it: &hir::Item) { let tcx = ccx.tcx; debug!("convert: item {} with id {}", it.name, it.id); + let def_id = ccx.tcx.map.local_def_id(it.id); match it.node { // These don't define types. - hir::ItemExternCrate(_) | hir::ItemUse(_) | hir::ItemMod(_) => { + hir::ItemExternCrate(_) | hir::ItemUse(..) | hir::ItemMod(_) => { } hir::ItemForeignMod(ref foreign_mod) => { for item in &foreign_mod.items { @@ -680,150 +728,70 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) { } } hir::ItemEnum(ref enum_definition, _) => { - let (scheme, predicates) = convert_typed_item(ccx, it); - write_ty_to_tcx(tcx, it.id, scheme.ty); + let ty = type_of_def_id(ccx, def_id); + let generics = generics_of_def_id(ccx, def_id); + let predicates = predicates_of_item(ccx, it); convert_enum_variant_types(ccx, - tcx.lookup_adt_def_master(ccx.tcx.map.local_def_id(it.id)), - scheme, + tcx.lookup_adt_def(ccx.tcx.map.local_def_id(it.id)), + ty, + generics, predicates, &enum_definition.variants); }, hir::ItemDefaultImpl(_, ref ast_trait_ref) => { let trait_ref = - astconv::instantiate_mono_trait_ref(&ccx.icx(&()), + AstConv::instantiate_mono_trait_ref(&ccx.icx(&()), &ExplicitRscope, ast_trait_ref, - None); + tcx.mk_self_type()); tcx.record_trait_has_default_impl(trait_ref.def_id); tcx.impl_trait_refs.borrow_mut().insert(ccx.tcx.map.local_def_id(it.id), Some(trait_ref)); } - hir::ItemImpl(_, _, + hir::ItemImpl(.., ref generics, ref opt_trait_ref, ref selfty, - ref impl_items) => { + _) => { // Create generics from the generics specified in the impl head. debug!("convert: ast_generics={:?}", generics); - let def_id = ccx.tcx.map.local_def_id(it.id); - let ty_generics = ty_generics_for_type_or_impl(ccx, generics); - let mut ty_predicates = ty_generic_predicates_for_type_or_impl(ccx, generics); + generics_of_def_id(ccx, def_id); + let mut ty_predicates = + ty_generic_predicates(ccx, generics, None, vec![], false); debug!("convert: impl_bounds={:?}", ty_predicates); - let selfty = ccx.icx(&ty_predicates).to_ty(&ExplicitRscope, &**selfty); - write_ty_to_tcx(tcx, it.id, selfty); - - tcx.register_item_type(def_id, - TypeScheme { generics: ty_generics.clone(), - ty: selfty }); - if let &Some(ref ast_trait_ref) = opt_trait_ref { - tcx.impl_trait_refs.borrow_mut().insert( - def_id, - Some(astconv::instantiate_mono_trait_ref(&ccx.icx(&ty_predicates), - &ExplicitRscope, - ast_trait_ref, - Some(selfty))) - ); - } else { - tcx.impl_trait_refs.borrow_mut().insert(def_id, None); - } - - enforce_impl_params_are_constrained(tcx, generics, &mut ty_predicates, def_id); - tcx.predicates.borrow_mut().insert(def_id, ty_predicates.clone()); - - - // If there is a trait reference, treat the methods as always public. - // This is to work around some incorrect behavior in privacy checking: - // when the method belongs to a trait, it should acquire the privacy - // from the trait, not the impl. Forcing the visibility to be public - // makes things sorta work. - let parent_visibility = if opt_trait_ref.is_some() { - hir::Public - } else { - it.vis - }; - - // Convert all the associated consts. - // Also, check if there are any duplicate associated items - let mut seen_type_items = FnvHashSet(); - let mut seen_value_items = FnvHashSet(); - - for impl_item in impl_items { - let seen_items = match impl_item.node { - hir::ImplItemKind::Type(_) => &mut seen_type_items, - _ => &mut seen_value_items, - }; - if !seen_items.insert(impl_item.name) { - let desc = match impl_item.node { - hir::ImplItemKind::Const(_, _) => "associated constant", - hir::ImplItemKind::Type(_) => "associated type", - hir::ImplItemKind::Method(ref sig, _) => - match sig.explicit_self.node { - hir::SelfStatic => "associated function", - _ => "method", - }, - }; - - span_err!(tcx.sess, impl_item.span, E0201, "duplicate {}", desc); - } - - if let hir::ImplItemKind::Const(ref ty, _) = impl_item.node { - let ty = ccx.icx(&ty_predicates) - .to_ty(&ExplicitRscope, &*ty); - tcx.register_item_type(ccx.tcx.map.local_def_id(impl_item.id), - TypeScheme { - generics: ty_generics.clone(), - ty: ty, - }); - convert_associated_const(ccx, ImplContainer(def_id), - impl_item.name, impl_item.id, - impl_item.vis.inherit_from(parent_visibility), - ty, true /* has_value */); - } - } - - // Convert all the associated types. - for impl_item in impl_items { - if let hir::ImplItemKind::Type(ref ty) = impl_item.node { - if opt_trait_ref.is_none() { - span_err!(tcx.sess, impl_item.span, E0202, - "associated types are not allowed in inherent impls"); - } - - let typ = ccx.icx(&ty_predicates).to_ty(&ExplicitRscope, ty); + let selfty = ccx.icx(&ty_predicates).to_ty(&ExplicitRscope, &selfty); + tcx.item_types.borrow_mut().insert(def_id, selfty); - convert_associated_type(ccx, ImplContainer(def_id), - impl_item.name, impl_item.id, impl_item.vis, - Some(typ)); - } - } + let trait_ref = opt_trait_ref.as_ref().map(|ast_trait_ref| { + AstConv::instantiate_mono_trait_ref(&ccx.icx(&ty_predicates), + &ExplicitRscope, + ast_trait_ref, + selfty) + }); + tcx.impl_trait_refs.borrow_mut().insert(def_id, trait_ref); - for impl_item in impl_items { - if let hir::ImplItemKind::Method(ref sig, _) = impl_item.node { - // if the method specifies a visibility, use that, otherwise - // inherit the visibility from the impl (so `foo` in `pub impl - // { fn foo(); }` is public, but private in `impl { fn - // foo(); }`). - let method_vis = impl_item.vis.inherit_from(parent_visibility); - - convert_method(ccx, ImplContainer(def_id), - impl_item.name, impl_item.id, method_vis, - sig, selfty, &ty_generics, &ty_predicates); - } - } + // Subtle: before we store the predicates into the tcx, we + // sort them so that predicates like `T: Foo` come + // before uses of `U`. This avoids false ambiguity errors + // in trait checking. See `setup_constraining_predicates` + // for details. + ctp::setup_constraining_predicates(&mut ty_predicates.predicates, + trait_ref, + &mut ctp::parameters_for_impl(selfty, trait_ref)); - enforce_impl_lifetimes_are_constrained(tcx, generics, def_id, impl_items); + tcx.predicates.borrow_mut().insert(def_id, ty_predicates.clone()); }, - hir::ItemTrait(_, _, _, ref trait_items) => { - let trait_def = trait_def_of_item(ccx, it); - let def_id = trait_def.trait_ref.def_id; + hir::ItemTrait(.., ref trait_items) => { + generics_of_def_id(ccx, def_id); + trait_def_of_item(ccx, it); let _: Result<(), ErrorReported> = // any error is already reported, can ignore ccx.ensure_super_predicates(it.span, def_id); convert_trait_predicates(ccx, it); - let trait_predicates = tcx.lookup_predicates(def_id); + let trait_predicates = tcx.item_predicates(def_id); debug!("convert: trait_bounds={:?}", trait_predicates); @@ -832,37 +800,27 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) { // Convert all the associated constants. for trait_item in trait_items { - if let hir::ConstTraitItem(ref ty, ref default) = trait_item.node { + if let hir::ConstTraitItem(ref ty, _) = trait_item.node { + let const_def_id = ccx.tcx.map.local_def_id(trait_item.id); + generics_of_def_id(ccx, const_def_id); let ty = ccx.icx(&trait_predicates) .to_ty(&ExplicitRscope, ty); - tcx.register_item_type(ccx.tcx.map.local_def_id(trait_item.id), - TypeScheme { - generics: trait_def.generics.clone(), - ty: ty, - }); - convert_associated_const(ccx, - container, - trait_item.name, - trait_item.id, - hir::Public, - ty, - default.is_some()) + tcx.item_types.borrow_mut().insert(const_def_id, ty); + convert_associated_const(ccx, container, trait_item.id, ty) } } // Convert all the associated types. for trait_item in trait_items { if let hir::TypeTraitItem(_, ref opt_ty) = trait_item.node { + let type_def_id = ccx.tcx.map.local_def_id(trait_item.id); + generics_of_def_id(ccx, type_def_id); + let typ = opt_ty.as_ref().map({ |ty| ccx.icx(&trait_predicates).to_ty(&ExplicitRscope, &ty) }); - convert_associated_type(ccx, - container, - trait_item.name, - trait_item.id, - hir::Public, - typ); + convert_associated_type(ccx, container, trait_item.id, typ); } } @@ -871,261 +829,296 @@ fn convert_item(ccx: &CrateCtxt, it: &hir::Item) { if let hir::MethodTraitItem(ref sig, _) = trait_item.node { convert_method(ccx, container, - trait_item.name, trait_item.id, - hir::Inherited, sig, tcx.mk_self_type(), - &trait_def.generics, &trait_predicates); - } } - - // Add an entry mapping - let trait_item_def_ids = Rc::new(trait_items.iter().map(|trait_item| { - let def_id = ccx.tcx.map.local_def_id(trait_item.id); - match trait_item.node { - hir::ConstTraitItem(..) => ty::ConstTraitItemId(def_id), - hir::MethodTraitItem(..) => ty::MethodTraitItemId(def_id), - hir::TypeTraitItem(..) => ty::TypeTraitItemId(def_id) - } - }).collect()); - tcx.trait_item_def_ids.borrow_mut().insert(ccx.tcx.map.local_def_id(it.id), - trait_item_def_ids); }, - hir::ItemStruct(ref struct_def, _) => { - let (scheme, predicates) = convert_typed_item(ccx, it); - write_ty_to_tcx(tcx, it.id, scheme.ty); + hir::ItemStruct(ref struct_def, _) | + hir::ItemUnion(ref struct_def, _) => { + let ty = type_of_def_id(ccx, def_id); + let generics = generics_of_def_id(ccx, def_id); + let predicates = predicates_of_item(ccx, it); - let it_def_id = ccx.tcx.map.local_def_id(it.id); - let variant = tcx.lookup_adt_def_master(it_def_id).struct_variant(); + let variant = tcx.lookup_adt_def(def_id).struct_variant(); for (f, ty_f) in struct_def.fields().iter().zip(variant.fields.iter()) { - convert_field(ccx, &scheme.generics, &predicates, f, ty_f) + convert_field(ccx, generics, &predicates, f, ty_f) } if !struct_def.is_struct() { - convert_variant_ctor(tcx, struct_def.id(), variant, scheme, predicates); + convert_variant_ctor(ccx, struct_def.id(), variant, ty, predicates); } }, hir::ItemTy(_, ref generics) => { ensure_no_ty_param_bounds(ccx, it.span, generics, "type"); - let (scheme, _) = convert_typed_item(ccx, it); - write_ty_to_tcx(tcx, it.id, scheme.ty); + type_of_def_id(ccx, def_id); + generics_of_def_id(ccx, def_id); + predicates_of_item(ccx, it); }, _ => { - // This call populates the type cache with the converted type - // of the item in passing. All we have to do here is to write - // it into the node type table. - let (scheme, _) = convert_typed_item(ccx, it); - write_ty_to_tcx(tcx, it.id, scheme.ty); + type_of_def_id(ccx, def_id); + generics_of_def_id(ccx, def_id); + predicates_of_item(ccx, it); }, } } -fn convert_variant_ctor<'a, 'tcx>(tcx: &ty::ctxt<'tcx>, +fn convert_impl_item(ccx: &CrateCtxt, impl_item: &hir::ImplItem) { + let tcx = ccx.tcx; + + // we can lookup details about the impl because items are visited + // before impl-items + let impl_def_id = tcx.map.get_parent_did(impl_item.id); + let impl_predicates = tcx.item_predicates(impl_def_id); + let impl_trait_ref = tcx.impl_trait_ref(impl_def_id); + let impl_self_ty = tcx.item_type(impl_def_id); + + match impl_item.node { + hir::ImplItemKind::Const(ref ty, _) => { + let const_def_id = ccx.tcx.map.local_def_id(impl_item.id); + generics_of_def_id(ccx, const_def_id); + let ty = ccx.icx(&impl_predicates) + .to_ty(&ExplicitRscope, &ty); + tcx.item_types.borrow_mut().insert(const_def_id, ty); + convert_associated_const(ccx, ImplContainer(impl_def_id), + impl_item.id, ty); + } + + hir::ImplItemKind::Type(ref ty) => { + let type_def_id = ccx.tcx.map.local_def_id(impl_item.id); + generics_of_def_id(ccx, type_def_id); + + if impl_trait_ref.is_none() { + span_err!(tcx.sess, impl_item.span, E0202, + "associated types are not allowed in inherent impls"); + } + + let typ = ccx.icx(&impl_predicates).to_ty(&ExplicitRscope, ty); + + convert_associated_type(ccx, ImplContainer(impl_def_id), impl_item.id, Some(typ)); + } + + hir::ImplItemKind::Method(ref sig, _) => { + convert_method(ccx, ImplContainer(impl_def_id), + impl_item.id, sig, impl_self_ty, + &impl_predicates); + } + } +} + +fn convert_variant_ctor<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, ctor_id: ast::NodeId, - variant: ty::VariantDef<'tcx>, - scheme: ty::TypeScheme<'tcx>, + variant: &'tcx ty::VariantDef, + ty: Ty<'tcx>, predicates: ty::GenericPredicates<'tcx>) { - let ctor_ty = match variant.kind() { - VariantKind::Unit | VariantKind::Struct => scheme.ty, - VariantKind::Tuple => { + let tcx = ccx.tcx; + let def_id = tcx.map.local_def_id(ctor_id); + generics_of_def_id(ccx, def_id); + let ctor_ty = match variant.ctor_kind { + CtorKind::Fictive | CtorKind::Const => ty, + CtorKind::Fn => { let inputs: Vec<_> = variant.fields .iter() - .map(|field| field.unsubst_ty()) + .map(|field| tcx.item_type(field.did)) .collect(); - tcx.mk_ctor_fn(tcx.map.local_def_id(ctor_id), - &inputs[..], - scheme.ty) + let substs = mk_item_substs(&ccx.icx(&predicates), + ccx.tcx.map.span(ctor_id), def_id); + tcx.mk_fn_def(def_id, substs, tcx.mk_bare_fn(ty::BareFnTy { + unsafety: hir::Unsafety::Normal, + abi: abi::Abi::Rust, + sig: ty::Binder(ty::FnSig { + inputs: inputs, + output: ty, + variadic: false + }) + })) } }; - write_ty_to_tcx(tcx, ctor_id, ctor_ty); + tcx.item_types.borrow_mut().insert(def_id, ctor_ty); tcx.predicates.borrow_mut().insert(tcx.map.local_def_id(ctor_id), predicates); - tcx.register_item_type(tcx.map.local_def_id(ctor_id), - TypeScheme { - generics: scheme.generics, - ty: ctor_ty - }); } fn convert_enum_variant_types<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - def: ty::AdtDefMaster<'tcx>, - scheme: ty::TypeScheme<'tcx>, + def: &'tcx ty::AdtDef, + ty: Ty<'tcx>, + generics: &'tcx ty::Generics<'tcx>, predicates: ty::GenericPredicates<'tcx>, variants: &[hir::Variant]) { // fill the field types for (variant, ty_variant) in variants.iter().zip(def.variants.iter()) { for (f, ty_f) in variant.node.data.fields().iter().zip(ty_variant.fields.iter()) { - convert_field(ccx, &scheme.generics, &predicates, f, ty_f) + convert_field(ccx, generics, &predicates, f, ty_f) } // Convert the ctor, if any. This also registers the variant as // an item. convert_variant_ctor( - ccx.tcx, + ccx, variant.node.data.id(), ty_variant, - scheme.clone(), + ty, predicates.clone() ); } } -fn convert_struct_variant<'tcx>(tcx: &ty::ctxt<'tcx>, - did: DefId, - name: ast::Name, - disr_val: ty::Disr, - def: &hir::VariantData) -> ty::VariantDefData<'tcx, 'tcx> { - let mut seen_fields: FnvHashMap = FnvHashMap(); +fn convert_struct_variant<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + did: DefId, + name: ast::Name, + disr_val: ty::Disr, + def: &hir::VariantData) + -> ty::VariantDef { + let mut seen_fields: FxHashMap = FxHashMap(); + let node_id = ccx.tcx.map.as_local_node_id(did).unwrap(); let fields = def.fields().iter().map(|f| { - let fid = tcx.map.local_def_id(f.node.id); - match f.node.kind { - hir::NamedField(name, vis) => { - let dup_span = seen_fields.get(&name).cloned(); - if let Some(prev_span) = dup_span { - let mut err = struct_span_err!(tcx.sess, f.span, E0124, - "field `{}` is already declared", - name); - span_note!(&mut err, prev_span, "previously declared here"); - err.emit(); - } else { - seen_fields.insert(name, f.span); - } + let fid = ccx.tcx.map.local_def_id(f.id); + let dup_span = seen_fields.get(&f.name).cloned(); + if let Some(prev_span) = dup_span { + struct_span_err!(ccx.tcx.sess, f.span, E0124, + "field `{}` is already declared", + f.name) + .span_label(f.span, &"field already declared") + .span_label(prev_span, &format!("`{}` first declared here", f.name)) + .emit(); + } else { + seen_fields.insert(f.name, f.span); + } - ty::FieldDefData::new(fid, name, vis) - }, - hir::UnnamedField(vis) => { - ty::FieldDefData::new(fid, special_idents::unnamed_field.name, vis) - } + ty::FieldDef { + did: fid, + name: f.name, + vis: ty::Visibility::from_hir(&f.vis, node_id, ccx.tcx) } }).collect(); - ty::VariantDefData { + ty::VariantDef { did: did, name: name, disr_val: disr_val, fields: fields, - kind: match *def { - hir::VariantData::Struct(..) => ty::VariantKind::Struct, - hir::VariantData::Tuple(..) => ty::VariantKind::Tuple, - hir::VariantData::Unit(..) => ty::VariantKind::Unit, - } + ctor_kind: CtorKind::from_hir(def), } } -fn convert_struct_def<'tcx>(tcx: &ty::ctxt<'tcx>, - it: &hir::Item, - def: &hir::VariantData) - -> ty::AdtDefMaster<'tcx> +fn convert_struct_def<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + it: &hir::Item, + def: &hir::VariantData) + -> &'tcx ty::AdtDef { + let did = ccx.tcx.map.local_def_id(it.id); + // Use separate constructor id for unit/tuple structs and reuse did for braced structs. + let ctor_id = if !def.is_struct() { Some(ccx.tcx.map.local_def_id(def.id())) } else { None }; + let variants = vec![convert_struct_variant(ccx, ctor_id.unwrap_or(did), it.name, + ConstInt::Infer(0), def)]; + let adt = ccx.tcx.alloc_adt_def(did, AdtKind::Struct, variants); + if let Some(ctor_id) = ctor_id { + // Make adt definition available through constructor id as well. + ccx.tcx.adt_defs.borrow_mut().insert(ctor_id, adt); + } - let did = tcx.map.local_def_id(it.id); - let ctor_id = if !def.is_struct() { - tcx.map.local_def_id(def.id()) - } else { - did - }; - tcx.intern_adt_def( - did, - ty::AdtKind::Struct, - vec![convert_struct_variant(tcx, ctor_id, it.name, 0, def)] - ) + ccx.tcx.adt_defs.borrow_mut().insert(did, adt); + adt } -fn convert_enum_def<'tcx>(tcx: &ty::ctxt<'tcx>, - it: &hir::Item, - def: &hir::EnumDef) - -> ty::AdtDefMaster<'tcx> +fn convert_union_def<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + it: &hir::Item, + def: &hir::VariantData) + -> &'tcx ty::AdtDef { - fn evaluate_disr_expr<'tcx>(tcx: &ty::ctxt<'tcx>, - repr_ty: Ty<'tcx>, - e: &hir::Expr) -> Option { + let did = ccx.tcx.map.local_def_id(it.id); + let variants = vec![convert_struct_variant(ccx, did, it.name, ConstInt::Infer(0), def)]; + + let adt = ccx.tcx.alloc_adt_def(did, AdtKind::Union, variants); + ccx.tcx.adt_defs.borrow_mut().insert(did, adt); + adt +} + + fn evaluate_disr_expr(ccx: &CrateCtxt, repr_ty: attr::IntType, e: &hir::Expr) + -> Option { debug!("disr expr, checking {}", pprust::expr_to_string(e)); - let hint = UncheckedExprHint(repr_ty); - match const_eval::eval_const_expr_partial(tcx, e, hint, None) { - Ok(ConstVal::Int(val)) => Some(val as ty::Disr), - Ok(ConstVal::Uint(val)) => Some(val as ty::Disr), - Ok(_) => { - let sign_desc = if repr_ty.is_signed() { - "signed" - } else { - "unsigned" - }; - span_err!(tcx.sess, e.span, E0079, - "expected {} integer constant", - sign_desc); + let ty_hint = repr_ty.to_ty(ccx.tcx); + let print_err = |cv: ConstVal| { + struct_span_err!(ccx.tcx.sess, e.span, E0079, "mismatched types") + .note_expected_found(&"type", &ty_hint, &format!("{}", cv.description())) + .span_label(e.span, &format!("expected '{}' type", ty_hint)) + .emit(); + }; + + let hint = UncheckedExprHint(ty_hint); + match eval_const_expr_partial(ccx.tcx, e, hint, None) { + Ok(ConstVal::Integral(i)) => { + // FIXME: eval_const_expr_partial should return an error if the hint is wrong + match (repr_ty, i) { + (attr::SignedInt(ast::IntTy::I8), ConstInt::I8(_)) | + (attr::SignedInt(ast::IntTy::I16), ConstInt::I16(_)) | + (attr::SignedInt(ast::IntTy::I32), ConstInt::I32(_)) | + (attr::SignedInt(ast::IntTy::I64), ConstInt::I64(_)) | + (attr::SignedInt(ast::IntTy::Is), ConstInt::Isize(_)) | + (attr::UnsignedInt(ast::UintTy::U8), ConstInt::U8(_)) | + (attr::UnsignedInt(ast::UintTy::U16), ConstInt::U16(_)) | + (attr::UnsignedInt(ast::UintTy::U32), ConstInt::U32(_)) | + (attr::UnsignedInt(ast::UintTy::U64), ConstInt::U64(_)) | + (attr::UnsignedInt(ast::UintTy::Us), ConstInt::Usize(_)) => Some(i), + (_, i) => { + print_err(ConstVal::Integral(i)); + None + }, + } + }, + Ok(cv) => { + print_err(cv); None }, + // enum variant evaluation happens before the global constant check + // so we need to report the real error Err(err) => { - let mut diag = struct_span_err!(tcx.sess, err.span, E0080, - "constant evaluation error: {}", - err.description()); - if !e.span.contains(err.span) { - diag.span_note(e.span, "for enum discriminant here"); - } + let mut diag = report_const_eval_err( + ccx.tcx, &err, e.span, "enum discriminant"); diag.emit(); None } } } - fn report_discrim_overflow(tcx: &ty::ctxt, - variant_span: Span, - variant_name: &str, - repr_type: attr::IntType, - prev_val: ty::Disr) { - let computed_value = repr_type.disr_wrap_incr(Some(prev_val)); - let computed_value = repr_type.disr_string(computed_value); - let prev_val = repr_type.disr_string(prev_val); - let repr_type = repr_type.to_ty(tcx); - span_err!(tcx.sess, variant_span, E0370, - "enum discriminant overflowed on value after {}: {}; \ - set explicitly via {} = {} if that is desired outcome", - prev_val, repr_type, variant_name, computed_value); - } - - fn next_disr(tcx: &ty::ctxt, - v: &hir::Variant, - repr_type: attr::IntType, - prev_disr_val: Option) -> Option { - if let Some(prev_disr_val) = prev_disr_val { - let result = repr_type.disr_incr(prev_disr_val); - if let None = result { - report_discrim_overflow(tcx, v.span, &v.node.name.as_str(), - repr_type, prev_disr_val); - } - result - } else { - Some(ty::INITIAL_DISCRIMINANT_VALUE) - } - } - fn convert_enum_variant<'tcx>(tcx: &ty::ctxt<'tcx>, - v: &hir::Variant, - disr: ty::Disr) - -> ty::VariantDefData<'tcx, 'tcx> - { - let did = tcx.map.local_def_id(v.node.data.id()); - let name = v.node.name; - convert_struct_variant(tcx, did, name, disr, &v.node.data) - } +fn convert_enum_def<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + it: &hir::Item, + def: &hir::EnumDef) + -> &'tcx ty::AdtDef +{ + let tcx = ccx.tcx; let did = tcx.map.local_def_id(it.id); let repr_hints = tcx.lookup_repr_hints(did); - let (repr_type, repr_type_ty) = tcx.enum_repr_type(repr_hints.get(0)); - let mut prev_disr = None; + let repr_type = tcx.enum_repr_type(repr_hints.get(0)); + let initial = repr_type.initial_discriminant(tcx); + let mut prev_disr = None::; let variants = def.variants.iter().map(|v| { - let disr = match v.node.disr_expr { - Some(ref e) => evaluate_disr_expr(tcx, repr_type_ty, e), - None => next_disr(tcx, v, repr_type, prev_disr) - }.unwrap_or(repr_type.disr_wrap_incr(prev_disr)); - - let v = convert_enum_variant(tcx, v, disr); + let wrapped_disr = prev_disr.map_or(initial, |d| d.wrap_incr()); + let disr = if let Some(ref e) = v.node.disr_expr { + evaluate_disr_expr(ccx, repr_type, e) + } else if let Some(disr) = repr_type.disr_incr(tcx, prev_disr) { + Some(disr) + } else { + struct_span_err!(tcx.sess, v.span, E0370, + "enum discriminant overflowed") + .span_label(v.span, &format!("overflowed on value after {}", prev_disr.unwrap())) + .note(&format!("explicitly set `{} = {}` if that is desired outcome", + v.node.name, wrapped_disr)) + .emit(); + None + }.unwrap_or(wrapped_disr); prev_disr = Some(disr); - v + + let did = tcx.map.local_def_id(v.node.data.id()); + convert_struct_variant(ccx, did, v.node.name, disr, &v.node.data) }).collect(); - tcx.intern_adt_def(tcx.map.local_def_id(it.id), ty::AdtKind::Enum, variants) + + let adt = tcx.alloc_adt_def(did, AdtKind::Enum, variants); + tcx.adt_defs.borrow_mut().insert(did, adt); + adt } /// Ensures that the super-predicates of the trait with def-id @@ -1158,33 +1151,38 @@ fn ensure_super_predicates_step(ccx: &CrateCtxt, let superpredicates = superpredicates.unwrap_or_else(|| { let item = match ccx.tcx.map.get(trait_node_id) { hir_map::NodeItem(item) => item, - _ => ccx.tcx.sess.bug(&format!("trait_node_id {} is not an item", trait_node_id)) + _ => bug!("trait_node_id {} is not an item", trait_node_id) }; let (generics, bounds) = match item.node { hir::ItemTrait(_, ref generics, ref supertraits, _) => (generics, supertraits), - _ => tcx.sess.span_bug(item.span, - "ensure_super_predicates_step invoked on non-trait"), + _ => span_bug!(item.span, + "ensure_super_predicates_step invoked on non-trait"), }; // In-scope when converting the superbounds for `Trait` are // that `Self:Trait` as well as any bounds that appear on the // generic types: - let trait_def = trait_def_of_item(ccx, item); + generics_of_def_id(ccx, trait_def_id); + trait_def_of_item(ccx, item); + let trait_ref = ty::TraitRef { + def_id: trait_def_id, + substs: Substs::identity_for_item(tcx, trait_def_id) + }; let self_predicate = ty::GenericPredicates { - predicates: VecPerParamSpace::new(vec![], - vec![trait_def.trait_ref.to_predicate()], - vec![]) + parent: None, + predicates: vec![trait_ref.to_predicate()] }; let scope = &(generics, &self_predicate); // Convert the bounds that follow the colon, e.g. `Bar+Zed` in `trait Foo : Bar+Zed`. let self_param_ty = tcx.mk_self_type(); let superbounds1 = compute_bounds(&ccx.icx(scope), - self_param_ty, - bounds, - SizedByDefault::No, - item.span); + self_param_ty, + bounds, + SizedByDefault::No, + None, + item.span); let superbounds1 = superbounds1.predicates(tcx, self_param_ty); @@ -1195,7 +1193,8 @@ fn ensure_super_predicates_step(ccx: &CrateCtxt, // Combine the two lists to form the complete set of superbounds: let superbounds = superbounds1.into_iter().chain(superbounds2).collect(); let superpredicates = ty::GenericPredicates { - predicates: VecPerParamSpace::new(superbounds, vec![], vec![]) + parent: None, + predicates: superbounds }; debug!("superpredicates for trait {:?} = {:?}", tcx.map.local_def_id(item.id), @@ -1217,154 +1216,79 @@ fn ensure_super_predicates_step(ccx: &CrateCtxt, def_ids } -fn trait_def_of_item<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - it: &hir::Item) - -> &'tcx ty::TraitDef<'tcx> -{ +fn trait_def_of_item<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, it: &hir::Item) -> &'tcx ty::TraitDef { let def_id = ccx.tcx.map.local_def_id(it.id); let tcx = ccx.tcx; - if let Some(def) = tcx.trait_defs.borrow().get(&def_id) { - return def.clone(); - } - - let (unsafety, generics, items) = match it.node { - hir::ItemTrait(unsafety, ref generics, _, ref items) => (unsafety, generics, items), - _ => tcx.sess.span_bug(it.span, "trait_def_of_item invoked on non-trait"), - }; - - let paren_sugar = tcx.has_attr(def_id, "rustc_paren_sugar"); - if paren_sugar && !ccx.tcx.sess.features.borrow().unboxed_closures { - let mut err = ccx.tcx.sess.struct_span_err( - it.span, - "the `#[rustc_paren_sugar]` attribute is a temporary means of controlling \ - which traits can use parenthetical notation"); - fileline_help!(&mut err, it.span, - "add `#![feature(unboxed_closures)]` to \ - the crate attributes to use it"); - err.emit(); - } - - let substs = ccx.tcx.mk_substs(mk_trait_substs(ccx, generics)); - - let ty_generics = ty_generics_for_trait(ccx, it.id, substs, generics); + tcx.trait_defs.memoize(def_id, || { + let unsafety = match it.node { + hir::ItemTrait(unsafety, ..) => unsafety, + _ => span_bug!(it.span, "trait_def_of_item invoked on non-trait"), + }; - let associated_type_names: Vec<_> = items.iter().filter_map(|trait_item| { - match trait_item.node { - hir::TypeTraitItem(..) => Some(trait_item.name), - _ => None, + let paren_sugar = tcx.has_attr(def_id, "rustc_paren_sugar"); + if paren_sugar && !ccx.tcx.sess.features.borrow().unboxed_closures { + let mut err = ccx.tcx.sess.struct_span_err( + it.span, + "the `#[rustc_paren_sugar]` attribute is a temporary means of controlling \ + which traits can use parenthetical notation"); + help!(&mut err, + "add `#![feature(unboxed_closures)]` to \ + the crate attributes to use it"); + err.emit(); } - }).collect(); - - let trait_ref = ty::TraitRef { - def_id: def_id, - substs: substs, - }; - - let trait_def = ty::TraitDef::new(unsafety, - paren_sugar, - ty_generics, - trait_ref, - associated_type_names); - - return tcx.intern_trait_def(trait_def); - - fn mk_trait_substs<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - generics: &hir::Generics) - -> Substs<'tcx> - { - let tcx = ccx.tcx; - - // Creates a no-op substitution for the trait's type parameters. - let regions = - generics.lifetimes - .iter() - .enumerate() - .map(|(i, def)| ty::ReEarlyBound(ty::EarlyBoundRegion { - space: TypeSpace, - index: i as u32, - name: def.lifetime.name - })) - .collect(); - - // Start with the generics in the type parameters... - let types: Vec<_> = - generics.ty_params - .iter() - .enumerate() - .map(|(i, def)| tcx.mk_param(TypeSpace, - i as u32, def.name)) - .collect(); - - // ...and also create the `Self` parameter. - let self_ty = tcx.mk_self_type(); - - Substs::new_trait(types, regions, self_ty) - } -} - -fn trait_defines_associated_type_named(ccx: &CrateCtxt, - trait_node_id: ast::NodeId, - assoc_name: ast::Name) - -> bool -{ - let item = match ccx.tcx.map.get(trait_node_id) { - hir_map::NodeItem(item) => item, - _ => ccx.tcx.sess.bug(&format!("trait_node_id {} is not an item", trait_node_id)) - }; - - let trait_items = match item.node { - hir::ItemTrait(_, _, _, ref trait_items) => trait_items, - _ => ccx.tcx.sess.bug(&format!("trait_node_id {} is not a trait", trait_node_id)) - }; - trait_items.iter().any(|trait_item| { - match trait_item.node { - hir::TypeTraitItem(..) => trait_item.name == assoc_name, - _ => false, - } + let def_path_hash = tcx.def_path(def_id).deterministic_hash(tcx); + tcx.alloc_trait_def(ty::TraitDef::new(def_id, unsafety, paren_sugar, def_path_hash)) }) } fn convert_trait_predicates<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, it: &hir::Item) { let tcx = ccx.tcx; - let trait_def = trait_def_of_item(ccx, it); let def_id = ccx.tcx.map.local_def_id(it.id); + generics_of_def_id(ccx, def_id); + trait_def_of_item(ccx, it); + let (generics, items) = match it.node { hir::ItemTrait(_, ref generics, _, ref items) => (generics, items), ref s => { - tcx.sess.span_bug( + span_bug!( it.span, - &format!("trait_def_of_item invoked on {:?}", s)); + "trait_def_of_item invoked on {:?}", + s); } }; - let super_predicates = ccx.tcx.lookup_super_predicates(def_id); + let super_predicates = ccx.tcx.item_super_predicates(def_id); // `ty_generic_predicates` below will consider the bounds on the type // parameters (including `Self`) and the explicit where-clauses, // but to get the full set of predicates on a trait we need to add // in the supertrait bounds and anything declared on the // associated types. - let mut base_predicates = super_predicates; + let mut base_predicates = super_predicates.predicates; // Add in a predicate that `Self:Trait` (where `Trait` is the // current trait). This is needed for builtin bounds. - let self_predicate = trait_def.trait_ref.to_poly_trait_ref().to_predicate(); - base_predicates.predicates.push(SelfSpace, self_predicate); + let trait_ref = ty::TraitRef { + def_id: def_id, + substs: Substs::identity_for_item(tcx, def_id) + }; + let self_predicate = trait_ref.to_poly_trait_ref().to_predicate(); + base_predicates.push(self_predicate); // add in the explicit where-clauses let mut trait_predicates = - ty_generic_predicates(ccx, TypeSpace, generics, &base_predicates); + ty_generic_predicates(ccx, generics, None, base_predicates, true); let assoc_predicates = predicates_for_associated_types(ccx, generics, &trait_predicates, - trait_def.trait_ref, + trait_ref, items); - trait_predicates.predicates.extend(TypeSpace, assoc_predicates.into_iter()); + trait_predicates.predicates.extend(assoc_predicates); let prev_predicates = tcx.predicates.borrow_mut().insert(def_id, trait_predicates); assert!(prev_predicates.is_none()); @@ -1382,7 +1306,7 @@ fn convert_trait_predicates<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, it: &hir::Item) let bounds = match trait_item.node { hir::TypeTraitItem(ref bounds, _) => bounds, _ => { - return vec!().into_iter(); + return vec![].into_iter(); } }; @@ -1393,6 +1317,7 @@ fn convert_trait_predicates<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, it: &hir::Item) assoc_ty, bounds, SizedByDefault::Yes, + None, trait_item.span); bounds.predicates(ccx.tcx, assoc_ty).into_iter() @@ -1400,285 +1325,331 @@ fn convert_trait_predicates<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, it: &hir::Item) } } -fn type_scheme_of_def_id<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, - def_id: DefId) - -> ty::TypeScheme<'tcx> -{ - if let Some(node_id) = ccx.tcx.map.as_local_node_id(def_id) { - match ccx.tcx.map.find(node_id) { - Some(hir_map::NodeItem(item)) => { - type_scheme_of_item(ccx, &*item) +fn generics_of_def_id<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + def_id: DefId) + -> &'tcx ty::Generics<'tcx> { + let tcx = ccx.tcx; + let node_id = if let Some(id) = tcx.map.as_local_node_id(def_id) { + id + } else { + return tcx.item_generics(def_id); + }; + tcx.generics.memoize(def_id, || { + use rustc::hir::map::*; + use rustc::hir::*; + + let node = tcx.map.get(node_id); + let parent_def_id = match node { + NodeImplItem(_) | + NodeTraitItem(_) | + NodeVariant(_) | + NodeStructCtor(_) => { + let parent_id = tcx.map.get_parent(node_id); + Some(tcx.map.local_def_id(parent_id)) } - Some(hir_map::NodeForeignItem(foreign_item)) => { - let abi = ccx.tcx.map.get_foreign_abi(node_id); - type_scheme_of_foreign_item(ccx, &*foreign_item, abi) + NodeExpr(&hir::Expr { node: hir::ExprClosure(..), .. }) => { + Some(tcx.closure_base_def_id(def_id)) } - x => { - ccx.tcx.sess.bug(&format!("unexpected sort of node \ - in get_item_type_scheme(): {:?}", - x)); + NodeTy(&hir::Ty { node: hir::TyImplTrait(..), .. }) => { + let mut parent_id = node_id; + loop { + match tcx.map.get(parent_id) { + NodeItem(_) | NodeImplItem(_) | NodeTraitItem(_) => break, + _ => { + parent_id = tcx.map.get_parent_node(parent_id); + } + } + } + Some(tcx.map.local_def_id(parent_id)) } - } - } else { - ccx.tcx.lookup_item_type(def_id) - } -} + _ => None + }; -fn type_scheme_of_item<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, - item: &hir::Item) - -> ty::TypeScheme<'tcx> -{ - let item_def_id = ccx.tcx.map.local_def_id(item.id); - ccx.tcx.tcache.memoize(item_def_id, || { - // NB. Since the `memoized` function enters a new task, and we - // are giving this task access to the item `item`, we must - // register a read. - ccx.tcx.dep_graph.read(DepNode::Hir(item_def_id)); - compute_type_scheme_of_item(ccx, item) - }) -} + let mut opt_self = None; + let mut allow_defaults = false; -fn compute_type_scheme_of_item<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, - it: &hir::Item) - -> ty::TypeScheme<'tcx> -{ - let tcx = ccx.tcx; - match it.node { - hir::ItemStatic(ref t, _, _) | hir::ItemConst(ref t, _) => { - let ty = ccx.icx(&()).to_ty(&ExplicitRscope, &**t); - ty::TypeScheme { ty: ty, generics: ty::Generics::empty() } - } - hir::ItemFn(ref decl, unsafety, _, abi, ref generics, _) => { - let ty_generics = ty_generics_for_fn(ccx, generics, &ty::Generics::empty()); - let tofd = astconv::ty_of_bare_fn(&ccx.icx(generics), unsafety, abi, &**decl); - let ty = tcx.mk_fn(Some(ccx.tcx.map.local_def_id(it.id)), tcx.mk_bare_fn(tofd)); - ty::TypeScheme { ty: ty, generics: ty_generics } - } - hir::ItemTy(ref t, ref generics) => { - let ty_generics = ty_generics_for_type_or_impl(ccx, generics); - let ty = ccx.icx(generics).to_ty(&ExplicitRscope, &**t); - ty::TypeScheme { ty: ty, generics: ty_generics } - } - hir::ItemEnum(ref ei, ref generics) => { - let ty_generics = ty_generics_for_type_or_impl(ccx, generics); - let substs = mk_item_substs(ccx, &ty_generics); - let def = convert_enum_def(tcx, it, ei); - let t = tcx.mk_enum(def, tcx.mk_substs(substs)); - ty::TypeScheme { ty: t, generics: ty_generics } - } - hir::ItemStruct(ref si, ref generics) => { - let ty_generics = ty_generics_for_type_or_impl(ccx, generics); - let substs = mk_item_substs(ccx, &ty_generics); - let def = convert_struct_def(tcx, it, si); - let t = tcx.mk_struct(def, tcx.mk_substs(substs)); - ty::TypeScheme { ty: t, generics: ty_generics } - } - hir::ItemDefaultImpl(..) | - hir::ItemTrait(..) | - hir::ItemImpl(..) | - hir::ItemMod(..) | - hir::ItemForeignMod(..) | - hir::ItemExternCrate(..) | - hir::ItemUse(..) => { - tcx.sess.span_bug( - it.span, - &format!("compute_type_scheme_of_item: unexpected item type: {:?}", - it.node)); - } - } -} + let no_generics = hir::Generics::empty(); + let ast_generics = match node { + NodeTraitItem(item) => { + match item.node { + MethodTraitItem(ref sig, _) => &sig.generics, + _ => &no_generics + } + } -fn convert_typed_item<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - it: &hir::Item) - -> (ty::TypeScheme<'tcx>, ty::GenericPredicates<'tcx>) -{ - let tcx = ccx.tcx; + NodeImplItem(item) => { + match item.node { + ImplItemKind::Method(ref sig, _) => &sig.generics, + _ => &no_generics + } + } - let tag = type_scheme_of_item(ccx, it); - let scheme = TypeScheme { generics: tag.generics, ty: tag.ty }; - let predicates = match it.node { - hir::ItemStatic(..) | hir::ItemConst(..) => { - ty::GenericPredicates::empty() - } - hir::ItemFn(_, _, _, _, ref ast_generics, _) => { - ty_generic_predicates_for_fn(ccx, ast_generics, &ty::GenericPredicates::empty()) - } - hir::ItemTy(_, ref generics) => { - ty_generic_predicates_for_type_or_impl(ccx, generics) - } - hir::ItemEnum(_, ref generics) => { - ty_generic_predicates_for_type_or_impl(ccx, generics) - } - hir::ItemStruct(_, ref generics) => { - ty_generic_predicates_for_type_or_impl(ccx, generics) - } - hir::ItemDefaultImpl(..) | - hir::ItemTrait(..) | - hir::ItemExternCrate(..) | - hir::ItemUse(..) | - hir::ItemImpl(..) | - hir::ItemMod(..) | - hir::ItemForeignMod(..) => { - tcx.sess.span_bug( - it.span, - &format!("compute_type_scheme_of_item: unexpected item type: {:?}", - it.node)); - } - }; + NodeItem(item) => { + match item.node { + ItemFn(.., ref generics, _) | + ItemImpl(_, _, ref generics, ..) => generics, + + ItemTy(_, ref generics) | + ItemEnum(_, ref generics) | + ItemStruct(_, ref generics) | + ItemUnion(_, ref generics) => { + allow_defaults = true; + generics + } - let prev_predicates = tcx.predicates.borrow_mut().insert(ccx.tcx.map.local_def_id(it.id), - predicates.clone()); - assert!(prev_predicates.is_none()); + ItemTrait(_, ref generics, ..) => { + // Add in the self type parameter. + // + // Something of a hack: use the node id for the trait, also as + // the node id for the Self type parameter. + let param_id = item.id; + + let parent = ccx.tcx.map.get_parent(param_id); + + let def = ty::TypeParameterDef { + index: 0, + name: keywords::SelfType.name(), + def_id: tcx.map.local_def_id(param_id), + default_def_id: tcx.map.local_def_id(parent), + default: None, + object_lifetime_default: ty::ObjectLifetimeDefault::BaseDefault, + pure_wrt_drop: false, + }; + tcx.ty_param_defs.borrow_mut().insert(param_id, def.clone()); + opt_self = Some(def); + + allow_defaults = true; + generics + } - // Debugging aid. - if tcx.has_attr(ccx.tcx.map.local_def_id(it.id), "rustc_object_lifetime_default") { - let object_lifetime_default_reprs: String = - scheme.generics.types.iter() - .map(|t| match t.object_lifetime_default { - ty::ObjectLifetimeDefault::Specific(r) => r.to_string(), - d => format!("{:?}", d), - }) - .collect::>() - .join(","); - - tcx.sess.span_err(it.span, &object_lifetime_default_reprs); - } + _ => &no_generics + } + } - return (scheme, predicates); -} + NodeForeignItem(item) => { + match item.node { + ForeignItemStatic(..) => &no_generics, + ForeignItemFn(_, ref generics) => generics + } + } -fn type_scheme_of_foreign_item<'a, 'tcx>( - ccx: &CrateCtxt<'a, 'tcx>, - item: &hir::ForeignItem, - abi: abi::Abi) - -> ty::TypeScheme<'tcx> -{ - let item_def_id = ccx.tcx.map.local_def_id(item.id); - ccx.tcx.tcache.memoize(item_def_id, || { - // NB. Since the `memoized` function enters a new task, and we - // are giving this task access to the item `item`, we must - // register a read. - ccx.tcx.dep_graph.read(DepNode::Hir(item_def_id)); - compute_type_scheme_of_foreign_item(ccx, item, abi) - }) -} + _ => &no_generics + }; -fn compute_type_scheme_of_foreign_item<'a, 'tcx>( - ccx: &CrateCtxt<'a, 'tcx>, - it: &hir::ForeignItem, - abi: abi::Abi) - -> ty::TypeScheme<'tcx> -{ - match it.node { - hir::ForeignItemFn(ref fn_decl, ref generics) => { - compute_type_scheme_of_foreign_fn_decl(ccx, fn_decl, generics, abi) - } - hir::ForeignItemStatic(ref t, _) => { - ty::TypeScheme { - generics: ty::Generics::empty(), - ty: ast_ty_to_ty(&ccx.icx(&()), &ExplicitRscope, t) + let has_self = opt_self.is_some(); + let mut parent_has_self = false; + let mut own_start = has_self as u32; + let (parent_regions, parent_types) = parent_def_id.map_or((0, 0), |def_id| { + let generics = generics_of_def_id(ccx, def_id); + assert_eq!(has_self, false); + parent_has_self = generics.has_self; + own_start = generics.count() as u32; + (generics.parent_regions + generics.regions.len() as u32, + generics.parent_types + generics.types.len() as u32) + }); + + let early_lifetimes = early_bound_lifetimes_from_generics(ccx, ast_generics); + let regions = early_lifetimes.iter().enumerate().map(|(i, l)| { + ty::RegionParameterDef { + name: l.lifetime.name, + index: own_start + i as u32, + def_id: tcx.map.local_def_id(l.lifetime.id), + bounds: l.bounds.iter().map(|l| { + ast_region_to_region(tcx, l) + }).collect(), + pure_wrt_drop: l.pure_wrt_drop, } + }).collect::>(); + + // Now create the real type parameters. + let type_start = own_start + regions.len() as u32; + let types = ast_generics.ty_params.iter().enumerate().map(|(i, p)| { + let i = type_start + i as u32; + get_or_create_type_parameter_def(ccx, ast_generics, i, p, allow_defaults) + }); + let mut types: Vec<_> = opt_self.into_iter().chain(types).collect(); + + // provide junk type parameter defs - the only place that + // cares about anything but the length is instantiation, + // and we don't do that for closures. + if let NodeExpr(&hir::Expr { node: hir::ExprClosure(..), .. }) = node { + tcx.with_freevars(node_id, |fv| { + types.extend(fv.iter().enumerate().map(|(i, _)| ty::TypeParameterDef { + index: type_start + i as u32, + name: Symbol::intern(""), + def_id: def_id, + default_def_id: parent_def_id.unwrap(), + default: None, + object_lifetime_default: ty::ObjectLifetimeDefault::BaseDefault, + pure_wrt_drop: false, + })); + }); } - } -} - -fn convert_foreign_item<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - it: &hir::ForeignItem) -{ - // For reasons I cannot fully articulate, I do so hate the AST - // map, and I regard each time that I use it as a personal and - // moral failing, but at the moment it seems like the only - // convenient way to extract the ABI. - ndm - let tcx = ccx.tcx; - let abi = tcx.map.get_foreign_abi(it.id); - - let scheme = type_scheme_of_foreign_item(ccx, it, abi); - write_ty_to_tcx(ccx.tcx, it.id, scheme.ty); - let predicates = match it.node { - hir::ForeignItemFn(_, ref generics) => { - ty_generic_predicates_for_fn(ccx, generics, &ty::GenericPredicates::empty()) - } - hir::ForeignItemStatic(..) => { - ty::GenericPredicates::empty() + // Debugging aid. + if tcx.has_attr(def_id, "rustc_object_lifetime_default") { + let object_lifetime_default_reprs: String = + types.iter().map(|t| { + match t.object_lifetime_default { + ty::ObjectLifetimeDefault::Specific(r) => r.to_string(), + d => format!("{:?}", d), + } + }).collect::>().join(","); + tcx.sess.span_err(tcx.map.span(node_id), &object_lifetime_default_reprs); } - }; - - let prev_predicates = tcx.predicates.borrow_mut().insert(ccx.tcx.map.local_def_id(it.id), - predicates); - assert!(prev_predicates.is_none()); -} - -fn ty_generics_for_type_or_impl<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - generics: &hir::Generics) - -> ty::Generics<'tcx> { - ty_generics(ccx, TypeSpace, generics, &ty::Generics::empty()) -} -fn ty_generic_predicates_for_type_or_impl<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, - generics: &hir::Generics) - -> ty::GenericPredicates<'tcx> -{ - ty_generic_predicates(ccx, TypeSpace, generics, &ty::GenericPredicates::empty()) + tcx.alloc_generics(ty::Generics { + parent: parent_def_id, + parent_regions: parent_regions, + parent_types: parent_types, + regions: regions, + types: types, + has_self: has_self || parent_has_self + }) + }) } -fn ty_generics_for_trait<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - trait_id: ast::NodeId, - substs: &'tcx Substs<'tcx>, - ast_generics: &hir::Generics) - -> ty::Generics<'tcx> -{ - debug!("ty_generics_for_trait(trait_id={:?}, substs={:?})", - ccx.tcx.map.local_def_id(trait_id), substs); +fn type_of_def_id<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + def_id: DefId) + -> Ty<'tcx> { + let node_id = if let Some(id) = ccx.tcx.map.as_local_node_id(def_id) { + id + } else { + return ccx.tcx.item_type(def_id); + }; + ccx.tcx.item_types.memoize(def_id, || { + use rustc::hir::map::*; + use rustc::hir::*; + + // Alway bring in generics, as computing the type needs them. + generics_of_def_id(ccx, def_id); + + let ty = match ccx.tcx.map.get(node_id) { + NodeItem(item) => { + match item.node { + ItemStatic(ref t, ..) | ItemConst(ref t, _) => { + ccx.icx(&()).to_ty(&StaticRscope::new(&ccx.tcx), &t) + } + ItemFn(ref decl, unsafety, _, abi, ref generics, _) => { + let tofd = AstConv::ty_of_bare_fn(&ccx.icx(generics), unsafety, abi, &decl, + Some(AnonTypeScope::new(def_id))); + let substs = mk_item_substs(&ccx.icx(generics), item.span, def_id); + ccx.tcx.mk_fn_def(def_id, substs, tofd) + } + ItemTy(ref t, ref generics) => { + ccx.icx(generics).to_ty(&ExplicitRscope, &t) + } + ItemEnum(ref ei, ref generics) => { + let def = convert_enum_def(ccx, item, ei); + let substs = mk_item_substs(&ccx.icx(generics), item.span, def_id); + ccx.tcx.mk_adt(def, substs) + } + ItemStruct(ref si, ref generics) => { + let def = convert_struct_def(ccx, item, si); + let substs = mk_item_substs(&ccx.icx(generics), item.span, def_id); + ccx.tcx.mk_adt(def, substs) + } + ItemUnion(ref un, ref generics) => { + let def = convert_union_def(ccx, item, un); + let substs = mk_item_substs(&ccx.icx(generics), item.span, def_id); + ccx.tcx.mk_adt(def, substs) + } + ItemDefaultImpl(..) | + ItemTrait(..) | + ItemImpl(..) | + ItemMod(..) | + ItemForeignMod(..) | + ItemExternCrate(..) | + ItemUse(..) => { + span_bug!( + item.span, + "compute_type_of_item: unexpected item type: {:?}", + item.node); + } + } + } + NodeForeignItem(foreign_item) => { + let abi = ccx.tcx.map.get_foreign_abi(node_id); - let mut generics = ty_generics_for_type_or_impl(ccx, ast_generics); + match foreign_item.node { + ForeignItemFn(ref fn_decl, ref generics) => { + compute_type_of_foreign_fn_decl( + ccx, ccx.tcx.map.local_def_id(foreign_item.id), + fn_decl, generics, abi) + } + ForeignItemStatic(ref t, _) => { + ccx.icx(&()).to_ty(&ExplicitRscope, t) + } + } + } + NodeExpr(&hir::Expr { node: hir::ExprClosure(..), .. }) => { + ccx.tcx.mk_closure(def_id, Substs::for_item( + ccx.tcx, def_id, + |def, _| { + let region = def.to_early_bound_region_data(); + ccx.tcx.mk_region(ty::ReEarlyBound(region)) + }, + |def, _| ccx.tcx.mk_param_from_def(def) + )) + } + x => { + bug!("unexpected sort of node in type_of_def_id(): {:?}", x); + } + }; - // Add in the self type parameter. - // - // Something of a hack: use the node id for the trait, also as - // the node id for the Self type parameter. - let param_id = trait_id; + ty + }) +} - let parent = ccx.tcx.map.get_parent(param_id); +fn predicates_of_item<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + it: &hir::Item) + -> ty::GenericPredicates<'tcx> { + let def_id = ccx.tcx.map.local_def_id(it.id); - let def = ty::TypeParameterDef { - space: SelfSpace, - index: 0, - name: special_idents::type_self.name, - def_id: ccx.tcx.map.local_def_id(param_id), - default_def_id: ccx.tcx.map.local_def_id(parent), - default: None, - object_lifetime_default: ty::ObjectLifetimeDefault::BaseDefault, + let no_generics = hir::Generics::empty(); + let generics = match it.node { + hir::ItemFn(.., ref generics, _) | + hir::ItemTy(_, ref generics) | + hir::ItemEnum(_, ref generics) | + hir::ItemStruct(_, ref generics) | + hir::ItemUnion(_, ref generics) => generics, + _ => &no_generics }; - ccx.tcx.ty_param_defs.borrow_mut().insert(param_id, def.clone()); - - generics.types.push(SelfSpace, def); + let predicates = ty_generic_predicates(ccx, generics, None, vec![], false); + let prev_predicates = ccx.tcx.predicates.borrow_mut().insert(def_id, + predicates.clone()); + assert!(prev_predicates.is_none()); - return generics; + predicates } -fn ty_generics_for_fn<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, - generics: &hir::Generics, - base_generics: &ty::Generics<'tcx>) - -> ty::Generics<'tcx> +fn convert_foreign_item<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + it: &hir::ForeignItem) { - ty_generics(ccx, FnSpace, generics, base_generics) -} + // For reasons I cannot fully articulate, I do so hate the AST + // map, and I regard each time that I use it as a personal and + // moral failing, but at the moment it seems like the only + // convenient way to extract the ABI. - ndm + let def_id = ccx.tcx.map.local_def_id(it.id); + type_of_def_id(ccx, def_id); + generics_of_def_id(ccx, def_id); -fn ty_generic_predicates_for_fn<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, - generics: &hir::Generics, - base_predicates: &ty::GenericPredicates<'tcx>) - -> ty::GenericPredicates<'tcx> -{ - ty_generic_predicates(ccx, FnSpace, generics, base_predicates) + let no_generics = hir::Generics::empty(); + let generics = match it.node { + hir::ForeignItemFn(_, ref generics) => generics, + hir::ForeignItemStatic(..) => &no_generics + }; + + let predicates = ty_generic_predicates(ccx, generics, None, vec![], false); + let prev_predicates = ccx.tcx.predicates.borrow_mut().insert(def_id, predicates); + assert!(prev_predicates.is_none()); } -// Add the Sized bound, unless the type parameter is marked as `?Sized`. -fn add_unsized_bound<'tcx>(astconv: &AstConv<'tcx>, - bounds: &mut ty::BuiltinBounds, - ast_bounds: &[hir::TyParamBound], - span: Span) +// Is it marked with ?Sized +fn is_unsized<'gcx: 'tcx, 'tcx>(astconv: &AstConv<'gcx, 'tcx>, + ast_bounds: &[hir::TyParamBound], + span: Span) -> bool { let tcx = astconv.tcx(); @@ -1701,24 +1672,23 @@ fn add_unsized_bound<'tcx>(astconv: &AstConv<'tcx>, match unbound { Some(ref tpb) => { // FIXME(#8559) currently requires the unbound to be built-in. - let trait_def_id = tcx.trait_ref_to_def_id(tpb); - match kind_id { - Ok(kind_id) if trait_def_id != kind_id => { + if let Ok(kind_id) = kind_id { + if tpb.path.def != Def::Trait(kind_id) { tcx.sess.span_warn(span, "default bound relaxed for a type parameter, but \ this does nothing because the given bound is not \ a default. Only `?Sized` is supported"); - tcx.try_add_builtin_trait(kind_id, bounds); } - _ => {} } } _ if kind_id.is_ok() => { - tcx.try_add_builtin_trait(kind_id.unwrap(), bounds); + return false; } // No lang item for Sized, so we can't add it as a bound. None => {} } + + true } /// Returns the early-bound lifetimes declared in this generics @@ -1726,66 +1696,88 @@ fn add_unsized_bound<'tcx>(astconv: &AstConv<'tcx>, /// the lifetimes that are declared. For fns or methods, we have to /// screen out those that do not appear in any where-clauses etc using /// `resolve_lifetime::early_bound_lifetimes`. -fn early_bound_lifetimes_from_generics(space: ParamSpace, - ast_generics: &hir::Generics) - -> Vec +fn early_bound_lifetimes_from_generics<'a, 'tcx, 'hir>( + ccx: &CrateCtxt<'a, 'tcx>, + ast_generics: &'hir hir::Generics) + -> Vec<&'hir hir::LifetimeDef> { - match space { - SelfSpace | TypeSpace => ast_generics.lifetimes.to_vec(), - FnSpace => resolve_lifetime::early_bound_lifetimes(ast_generics), - } + ast_generics + .lifetimes + .iter() + .filter(|l| !ccx.tcx.named_region_map.late_bound.contains_key(&l.lifetime.id)) + .collect() } fn ty_generic_predicates<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, - space: ParamSpace, ast_generics: &hir::Generics, - base_predicates: &ty::GenericPredicates<'tcx>) + parent: Option, + super_predicates: Vec>, + has_self: bool) -> ty::GenericPredicates<'tcx> { let tcx = ccx.tcx; - let mut result = base_predicates.clone(); - - // Collect the predicates that were written inline by the user on each - // type parameter (e.g., ``). - for (index, param) in ast_generics.ty_params.iter().enumerate() { - let index = index as u32; - let param_ty = ty::ParamTy::new(space, index, param.name).to_ty(ccx.tcx); - let bounds = compute_bounds(&ccx.icx(&(base_predicates, ast_generics)), - param_ty, - ¶m.bounds, - SizedByDefault::Yes, - param.span); - let predicates = bounds.predicates(ccx.tcx, param_ty); - result.predicates.extend(space, predicates.into_iter()); - } + let parent_count = parent.map_or(0, |def_id| { + let generics = generics_of_def_id(ccx, def_id); + assert_eq!(generics.parent, None); + assert_eq!(generics.parent_regions, 0); + assert_eq!(generics.parent_types, 0); + generics.count() as u32 + }); + let ref base_predicates = match parent { + Some(def_id) => { + assert_eq!(super_predicates, vec![]); + tcx.item_predicates(def_id) + } + None => { + ty::GenericPredicates { + parent: None, + predicates: super_predicates.clone() + } + } + }; + let mut predicates = super_predicates; // Collect the region predicates that were declared inline as // well. In the case of parameters declared on a fn or method, we // have to be careful to only iterate over early-bound regions. - let early_lifetimes = early_bound_lifetimes_from_generics(space, ast_generics); + let own_start = parent_count + has_self as u32; + let early_lifetimes = early_bound_lifetimes_from_generics(ccx, ast_generics); for (index, param) in early_lifetimes.iter().enumerate() { - let index = index as u32; - let region = - ty::ReEarlyBound(ty::EarlyBoundRegion { - space: space, - index: index, - name: param.lifetime.name - }); + let index = own_start + index as u32; + let region = ccx.tcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion { + index: index, + name: param.lifetime.name + })); for bound in ¶m.bounds { let bound_region = ast_region_to_region(ccx.tcx, bound); let outlives = ty::Binder(ty::OutlivesPredicate(region, bound_region)); - result.predicates.push(space, outlives.to_predicate()); + predicates.push(outlives.to_predicate()); } } + // Collect the predicates that were written inline by the user on each + // type parameter (e.g., ``). + let type_start = own_start + early_lifetimes.len() as u32; + for (index, param) in ast_generics.ty_params.iter().enumerate() { + let index = type_start + index as u32; + let param_ty = ty::ParamTy::new(index, param.name).to_ty(ccx.tcx); + let bounds = compute_bounds(&ccx.icx(&(base_predicates, ast_generics)), + param_ty, + ¶m.bounds, + SizedByDefault::Yes, + None, + param.span); + predicates.extend(bounds.predicates(ccx.tcx, param_ty)); + } + // Add in the bounds that appear in the where-clause let where_clause = &ast_generics.where_clause; for predicate in &where_clause.predicates { match predicate { &hir::WherePredicate::BoundPredicate(ref bound_pred) => { - let ty = ast_ty_to_ty(&ccx.icx(&(base_predicates, ast_generics)), - &ExplicitRscope, - &*bound_pred.bounded_ty); + let ty = AstConv::ast_ty_to_ty(&ccx.icx(&(base_predicates, ast_generics)), + &ExplicitRscope, + &bound_pred.bounded_ty); for bound in bound_pred.bounds.iter() { match bound { @@ -1793,22 +1785,24 @@ fn ty_generic_predicates<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, let mut projections = Vec::new(); let trait_ref = - conv_poly_trait_ref(&ccx.icx(&(base_predicates, ast_generics)), - ty, - poly_trait_ref, - &mut projections); + AstConv::instantiate_poly_trait_ref(&ccx.icx(&(base_predicates, + ast_generics)), + &ExplicitRscope, + poly_trait_ref, + ty, + &mut projections); - result.predicates.push(space, trait_ref.to_predicate()); + predicates.push(trait_ref.to_predicate()); for projection in &projections { - result.predicates.push(space, projection.to_predicate()); + predicates.push(projection.to_predicate()); } } &hir::TyParamBound::RegionTyParamBound(ref lifetime) => { let region = ast_region_to_region(tcx, lifetime); let pred = ty::Binder(ty::OutlivesPredicate(ty, region)); - result.predicates.push(space, ty::Predicate::TypeOutlives(pred)) + predicates.push(ty::Predicate::TypeOutlives(pred)) } } } @@ -1819,96 +1813,40 @@ fn ty_generic_predicates<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, for bound in ®ion_pred.bounds { let r2 = ast_region_to_region(tcx, bound); let pred = ty::Binder(ty::OutlivesPredicate(r1, r2)); - result.predicates.push(space, ty::Predicate::RegionOutlives(pred)) + predicates.push(ty::Predicate::RegionOutlives(pred)) } } &hir::WherePredicate::EqPredicate(ref eq_pred) => { // FIXME(#20041) - tcx.sess.span_bug(eq_pred.span, - "Equality constraints are not yet \ - implemented (#20041)") + span_bug!(eq_pred.span, + "Equality constraints are not yet \ + implemented (#20041)") } } } - return result; -} - -fn ty_generics<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, - space: ParamSpace, - ast_generics: &hir::Generics, - base_generics: &ty::Generics<'tcx>) - -> ty::Generics<'tcx> -{ - let tcx = ccx.tcx; - let mut result = base_generics.clone(); - - let early_lifetimes = early_bound_lifetimes_from_generics(space, ast_generics); - for (i, l) in early_lifetimes.iter().enumerate() { - let bounds = l.bounds.iter() - .map(|l| ast_region_to_region(tcx, l)) - .collect(); - let def = ty::RegionParameterDef { name: l.lifetime.name, - space: space, - index: i as u32, - def_id: ccx.tcx.map.local_def_id(l.lifetime.id), - bounds: bounds }; - result.regions.push(space, def); - } - - assert!(result.types.is_empty_in(space)); - - // Now create the real type parameters. - for i in 0..ast_generics.ty_params.len() { - let def = get_or_create_type_parameter_def(ccx, ast_generics, space, i as u32); - debug!("ty_generics: def for type param: {:?}, {:?}", def, space); - result.types.push(space, def); - } - - result -} - -fn convert_default_type_parameter<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - path: &P, - space: ParamSpace, - index: u32) - -> Ty<'tcx> -{ - let ty = ast_ty_to_ty(&ccx.icx(&()), &ExplicitRscope, &path); - - for leaf_ty in ty.walk() { - if let ty::TyParam(p) = leaf_ty.sty { - if p.space == space && p.idx >= index { - span_err!(ccx.tcx.sess, path.span, E0128, - "type parameters with a default cannot use \ - forward declared identifiers"); - - return ccx.tcx.types.err - } - } + ty::GenericPredicates { + parent: parent, + predicates: predicates } - - ty } fn get_or_create_type_parameter_def<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, ast_generics: &hir::Generics, - space: ParamSpace, - index: u32) + index: u32, + param: &hir::TyParam, + allow_defaults: bool) -> ty::TypeParameterDef<'tcx> { - let param = &ast_generics.ty_params[index as usize]; - let tcx = ccx.tcx; match tcx.ty_param_defs.borrow().get(¶m.id) { Some(d) => { return d.clone(); } None => { } } - let default = param.default.as_ref().map( - |def| convert_default_type_parameter(ccx, def, space, index) - ); + let default = + param.default.as_ref().map(|def| ccx.icx(&()).to_ty(&ExplicitRscope, def)); let object_lifetime_default = compute_object_lifetime_default(ccx, param.id, @@ -1916,29 +1854,35 @@ fn get_or_create_type_parameter_def<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, let parent = tcx.map.get_parent(param.id); - if space != TypeSpace && default.is_some() { + if !allow_defaults && default.is_some() { if !tcx.sess.features.borrow().default_type_parameter_fallback { tcx.sess.add_lint( lint::builtin::INVALID_TYPE_PARAM_DEFAULT, param.id, param.span, - format!("defaults for type parameters are only allowed on type definitions, \ - like `struct` or `enum`")); + format!("defaults for type parameters are only allowed in `struct`, \ + `enum`, `type`, or `trait` definitions.")); } } let def = ty::TypeParameterDef { - space: space, index: index, name: param.name, def_id: ccx.tcx.map.local_def_id(param.id), default_def_id: ccx.tcx.map.local_def_id(parent), default: default, object_lifetime_default: object_lifetime_default, + pure_wrt_drop: param.pure_wrt_drop, }; + if def.name == keywords::SelfType.name() { + span_bug!(param.span, "`Self` should not be the name of a regular parameter"); + } + tcx.ty_param_defs.borrow_mut().insert(param.id, def.clone()); + debug!("get_or_create_type_parameter_def: def for type param: {:?}", def); + def } @@ -1952,13 +1896,13 @@ fn compute_object_lifetime_default<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, param_id: ast::NodeId, param_bounds: &[hir::TyParamBound], where_clause: &hir::WhereClause) - -> ty::ObjectLifetimeDefault + -> ty::ObjectLifetimeDefault<'tcx> { let inline_bounds = from_bounds(ccx, param_bounds); let where_bounds = from_predicates(ccx, param_id, &where_clause.predicates); - let all_bounds: HashSet<_> = inline_bounds.into_iter() - .chain(where_bounds) - .collect(); + let all_bounds: FxHashSet<_> = inline_bounds.into_iter() + .chain(where_bounds) + .collect(); return if all_bounds.len() > 1 { ty::ObjectLifetimeDefault::Ambiguous } else if all_bounds.len() == 0 { @@ -1970,7 +1914,7 @@ fn compute_object_lifetime_default<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, fn from_bounds<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, bounds: &[hir::TyParamBound]) - -> Vec + -> Vec<&'tcx ty::Region> { bounds.iter() .filter_map(|bound| { @@ -1978,7 +1922,7 @@ fn compute_object_lifetime_default<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, hir::TraitTyParamBound(..) => None, hir::RegionTyParamBound(ref lifetime) => - Some(astconv::ast_region_to_region(ccx.tcx, lifetime)), + Some(ast_region_to_region(ccx.tcx, lifetime)), } }) .collect() @@ -1987,7 +1931,7 @@ fn compute_object_lifetime_default<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, fn from_predicates<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, param_id: ast::NodeId, predicates: &[hir::WherePredicate]) - -> Vec + -> Vec<&'tcx ty::Region> { predicates.iter() .flat_map(|predicate| { @@ -2011,34 +1955,53 @@ fn compute_object_lifetime_default<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, } } -enum SizedByDefault { Yes, No, } +pub enum SizedByDefault { Yes, No, } /// Translate the AST's notion of ty param bounds (which are an enum consisting of a newtyped Ty or /// a region) to ty's notion of ty param bounds, which can either be user-defined traits, or the /// built-in trait (formerly known as kind): Send. -fn compute_bounds<'tcx>(astconv: &AstConv<'tcx>, - param_ty: ty::Ty<'tcx>, - ast_bounds: &[hir::TyParamBound], - sized_by_default: SizedByDefault, - span: Span) - -> astconv::Bounds<'tcx> +pub fn compute_bounds<'gcx: 'tcx, 'tcx>(astconv: &AstConv<'gcx, 'tcx>, + param_ty: ty::Ty<'tcx>, + ast_bounds: &[hir::TyParamBound], + sized_by_default: SizedByDefault, + anon_scope: Option, + span: Span) + -> Bounds<'tcx> { - let mut bounds = - conv_param_bounds(astconv, - span, - param_ty, - ast_bounds); - - if let SizedByDefault::Yes = sized_by_default { - add_unsized_bound(astconv, - &mut bounds.builtin_bounds, - ast_bounds, - span); - } + let tcx = astconv.tcx(); + let PartitionedBounds { + trait_bounds, + region_bounds + } = partition_bounds(&ast_bounds); + + let mut projection_bounds = vec![]; + + let rscope = MaybeWithAnonTypes::new(ExplicitRscope, anon_scope); + let mut trait_bounds: Vec<_> = trait_bounds.iter().map(|&bound| { + astconv.instantiate_poly_trait_ref(&rscope, + bound, + param_ty, + &mut projection_bounds) + }).collect(); + + let region_bounds = region_bounds.into_iter().map(|r| { + ast_region_to_region(tcx, r) + }).collect(); - bounds.trait_bounds.sort_by(|a,b| a.def_id().cmp(&b.def_id())); + trait_bounds.sort_by(|a,b| a.def_id().cmp(&b.def_id())); - bounds + let implicitly_sized = if let SizedByDefault::Yes = sized_by_default { + !is_unsized(astconv, ast_bounds, span) + } else { + false + }; + + Bounds { + region_bounds: region_bounds, + implicitly_sized: implicitly_sized, + trait_bounds: trait_bounds, + projection_bounds: projection_bounds, + } } /// Converts a specific TyParamBound from the AST into a set of @@ -2046,7 +2009,7 @@ fn compute_bounds<'tcx>(astconv: &AstConv<'tcx>, /// because this can be anywhere from 0 predicates (`T:?Sized` adds no /// predicates) to 1 (`T:Foo`) to many (`T:Bar` adds `T:Bar` /// and `::X == i32`). -fn predicates_from_bound<'tcx>(astconv: &AstConv<'tcx>, +fn predicates_from_bound<'tcx>(astconv: &AstConv<'tcx, 'tcx>, param_ty: Ty<'tcx>, bound: &hir::TyParamBound) -> Vec> @@ -2054,7 +2017,10 @@ fn predicates_from_bound<'tcx>(astconv: &AstConv<'tcx>, match *bound { hir::TraitTyParamBound(ref tr, hir::TraitBoundModifier::None) => { let mut projections = Vec::new(); - let pred = conv_poly_trait_ref(astconv, param_ty, tr, &mut projections); + let pred = astconv.instantiate_poly_trait_ref(&ExplicitRscope, + tr, + param_ty, + &mut projections); projections.into_iter() .map(|p| p.to_predicate()) .chain(Some(pred.to_predicate())) @@ -2071,228 +2037,71 @@ fn predicates_from_bound<'tcx>(astconv: &AstConv<'tcx>, } } -fn conv_poly_trait_ref<'tcx>(astconv: &AstConv<'tcx>, - param_ty: Ty<'tcx>, - trait_ref: &hir::PolyTraitRef, - projections: &mut Vec>) - -> ty::PolyTraitRef<'tcx> -{ - astconv::instantiate_poly_trait_ref(astconv, - &ExplicitRscope, - trait_ref, - Some(param_ty), - projections) -} - -fn conv_param_bounds<'a,'tcx>(astconv: &AstConv<'tcx>, - span: Span, - param_ty: ty::Ty<'tcx>, - ast_bounds: &[hir::TyParamBound]) - -> astconv::Bounds<'tcx> -{ - let tcx = astconv.tcx(); - let astconv::PartitionedBounds { - builtin_bounds, - trait_bounds, - region_bounds - } = astconv::partition_bounds(tcx, span, &ast_bounds); - - let mut projection_bounds = Vec::new(); - - let trait_bounds: Vec = - trait_bounds.iter() - .map(|bound| conv_poly_trait_ref(astconv, - param_ty, - *bound, - &mut projection_bounds)) - .collect(); - - let region_bounds: Vec = - region_bounds.into_iter() - .map(|r| ast_region_to_region(tcx, r)) - .collect(); - - astconv::Bounds { - region_bounds: region_bounds, - builtin_bounds: builtin_bounds, - trait_bounds: trait_bounds, - projection_bounds: projection_bounds, - } -} - -fn compute_type_scheme_of_foreign_fn_decl<'a, 'tcx>( +fn compute_type_of_foreign_fn_decl<'a, 'tcx>( ccx: &CrateCtxt<'a, 'tcx>, + def_id: DefId, decl: &hir::FnDecl, ast_generics: &hir::Generics, abi: abi::Abi) - -> ty::TypeScheme<'tcx> + -> Ty<'tcx> { - for i in &decl.inputs { - match (*i).pat.node { - hir::PatIdent(_, _, _) => (), - hir::PatWild => (), - _ => { - span_err!(ccx.tcx.sess, (*i).pat.span, E0130, - "patterns aren't allowed in foreign function declarations"); - } - } - } - - let ty_generics = ty_generics_for_fn(ccx, ast_generics, &ty::Generics::empty()); - let rb = BindingRscope::new(); let input_tys = decl.inputs .iter() - .map(|a| ty_of_arg(&ccx.icx(ast_generics), &rb, a, None)) - .collect(); + .map(|a| AstConv::ty_of_arg(&ccx.icx(ast_generics), &rb, a, None)) + .collect::>(); let output = match decl.output { hir::Return(ref ty) => - ty::FnConverging(ast_ty_to_ty(&ccx.icx(ast_generics), &rb, &**ty)), + AstConv::ast_ty_to_ty(&ccx.icx(ast_generics), &rb, &ty), hir::DefaultReturn(..) => - ty::FnConverging(ccx.tcx.mk_nil()), - hir::NoReturn(..) => - ty::FnDiverging + ccx.tcx.mk_nil(), }; - let t_fn = ccx.tcx.mk_fn(None, - ccx.tcx.mk_bare_fn(ty::BareFnTy { - abi: abi, - unsafety: hir::Unsafety::Unsafe, - sig: ty::Binder(ty::FnSig {inputs: input_tys, - output: output, - variadic: decl.variadic}), - })); - - ty::TypeScheme { - generics: ty_generics, - ty: t_fn - } -} - -fn mk_item_substs<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, - ty_generics: &ty::Generics<'tcx>) - -> Substs<'tcx> -{ - let types = - ty_generics.types.map( - |def| ccx.tcx.mk_param_from_def(def)); - - let regions = - ty_generics.regions.map( - |def| def.to_early_bound_region()); - - Substs::new(types, regions) -} - -/// Checks that all the type parameters on an impl -fn enforce_impl_params_are_constrained<'tcx>(tcx: &ty::ctxt<'tcx>, - ast_generics: &hir::Generics, - impl_predicates: &mut ty::GenericPredicates<'tcx>, - impl_def_id: DefId) -{ - let impl_scheme = tcx.lookup_item_type(impl_def_id); - let impl_trait_ref = tcx.impl_trait_ref(impl_def_id); - - assert!(impl_predicates.predicates.is_empty_in(FnSpace)); - assert!(impl_predicates.predicates.is_empty_in(SelfSpace)); - - // The trait reference is an input, so find all type parameters - // reachable from there, to start (if this is an inherent impl, - // then just examine the self type). - let mut input_parameters: HashSet<_> = - ctp::parameters_for_type(impl_scheme.ty, false).into_iter().collect(); - if let Some(ref trait_ref) = impl_trait_ref { - input_parameters.extend(ctp::parameters_for_trait_ref(trait_ref, false)); - } - - ctp::setup_constraining_predicates(tcx, - impl_predicates.predicates.get_mut_slice(TypeSpace), - impl_trait_ref, - &mut input_parameters); - - for (index, ty_param) in ast_generics.ty_params.iter().enumerate() { - let param_ty = ty::ParamTy { space: TypeSpace, - idx: index as u32, - name: ty_param.name }; - if !input_parameters.contains(&ctp::Parameter::Type(param_ty)) { - report_unused_parameter(tcx, ty_param.span, "type", ¶m_ty.to_string()); + // feature gate SIMD types in FFI, since I (huonw) am not sure the + // ABIs are handled at all correctly. + if abi != abi::Abi::RustIntrinsic && abi != abi::Abi::PlatformIntrinsic + && !ccx.tcx.sess.features.borrow().simd_ffi { + let check = |ast_ty: &hir::Ty, ty: ty::Ty| { + if ty.is_simd() { + ccx.tcx.sess.struct_span_err(ast_ty.span, + &format!("use of SIMD type `{}` in FFI is highly experimental and \ + may result in invalid code", + pprust::ty_to_string(ast_ty))) + .help("add #![feature(simd_ffi)] to the crate attributes to enable") + .emit(); + } + }; + for (input, ty) in decl.inputs.iter().zip(&input_tys) { + check(&input.ty, ty) } - } -} - -fn enforce_impl_lifetimes_are_constrained<'tcx>(tcx: &ty::ctxt<'tcx>, - ast_generics: &hir::Generics, - impl_def_id: DefId, - impl_items: &[hir::ImplItem]) -{ - // Every lifetime used in an associated type must be constrained. - let impl_scheme = tcx.lookup_item_type(impl_def_id); - let impl_predicates = tcx.lookup_predicates(impl_def_id); - let impl_trait_ref = tcx.impl_trait_ref(impl_def_id); - - let mut input_parameters: HashSet<_> = - ctp::parameters_for_type(impl_scheme.ty, false).into_iter().collect(); - if let Some(ref trait_ref) = impl_trait_ref { - input_parameters.extend(ctp::parameters_for_trait_ref(trait_ref, false)); - } - ctp::identify_constrained_type_params(tcx, - &impl_predicates.predicates.as_slice(), impl_trait_ref, &mut input_parameters); - - let lifetimes_in_associated_types: HashSet<_> = - impl_items.iter() - .map(|item| tcx.impl_or_trait_item(tcx.map.local_def_id(item.id))) - .filter_map(|item| match item { - ty::TypeTraitItem(ref assoc_ty) => assoc_ty.ty, - ty::ConstTraitItem(..) | ty::MethodTraitItem(..) => None - }) - .flat_map(|ty| ctp::parameters_for_type(ty, true)) - .filter_map(|p| match p { - ctp::Parameter::Type(_) => None, - ctp::Parameter::Region(r) => Some(r), - }) - .collect(); - - for (index, lifetime_def) in ast_generics.lifetimes.iter().enumerate() { - let region = ty::EarlyBoundRegion { space: TypeSpace, - index: index as u32, - name: lifetime_def.lifetime.name }; - if - lifetimes_in_associated_types.contains(®ion) && // (*) - !input_parameters.contains(&ctp::Parameter::Region(region)) - { - report_unused_parameter(tcx, lifetime_def.lifetime.span, - "lifetime", ®ion.name.to_string()); + if let hir::Return(ref ty) = decl.output { + check(&ty, output) } } - // (*) This is a horrible concession to reality. I think it'd be - // better to just ban unconstrianed lifetimes outright, but in - // practice people do non-hygenic macros like: - // - // ``` - // macro_rules! __impl_slice_eq1 { - // ($Lhs: ty, $Rhs: ty, $Bound: ident) => { - // impl<'a, 'b, A: $Bound, B> PartialEq<$Rhs> for $Lhs where A: PartialEq { - // .... - // } - // } - // } - // ``` - // - // In a concession to backwards compatbility, we continue to - // permit those, so long as the lifetimes aren't used in - // associated types. I believe this is sound, because lifetimes - // used elsewhere are not projected back out. + let id = ccx.tcx.map.as_local_node_id(def_id).unwrap(); + let substs = mk_item_substs(&ccx.icx(ast_generics), ccx.tcx.map.span(id), def_id); + ccx.tcx.mk_fn_def(def_id, substs, ccx.tcx.mk_bare_fn(ty::BareFnTy { + abi: abi, + unsafety: hir::Unsafety::Unsafe, + sig: ty::Binder(ty::FnSig {inputs: input_tys, + output: output, + variadic: decl.variadic}), + })) } -fn report_unused_parameter(tcx: &ty::ctxt, - span: Span, - kind: &str, - name: &str) -{ - span_err!(tcx.sess, span, E0207, - "the {} parameter `{}` is not constrained by the \ - impl trait, self type, or predicates", - kind, name); +pub fn mk_item_substs<'gcx: 'tcx, 'tcx>(astconv: &AstConv<'gcx, 'tcx>, + span: Span, + def_id: DefId) + -> &'tcx Substs<'tcx> { + let tcx = astconv.tcx(); + // FIXME(eddyb) Do this request from Substs::for_item in librustc. + if let Err(ErrorReported) = astconv.get_generics(span, def_id) { + // No convenient way to recover from a cycle here. Just bail. Sorry! + tcx.sess.abort_if_errors(); + bug!("ErrorReported returned, but no errors reports?") + } + + Substs::identity_for_item(tcx, def_id) } diff --git a/src/librustc_typeck/constrained_type_params.rs b/src/librustc_typeck/constrained_type_params.rs index 9abe101e2d9d6..22be4491273ef 100644 --- a/src/librustc_typeck/constrained_type_params.rs +++ b/src/librustc_typeck/constrained_type_params.rs @@ -8,98 +8,90 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use middle::subst; -use middle::ty::{self, Ty}; - -use std::collections::HashSet; +use rustc::ty::{self, Ty}; +use rustc::ty::fold::{TypeFoldable, TypeVisitor}; +use rustc::util::nodemap::FxHashSet; #[derive(Clone, PartialEq, Eq, Hash, Debug)] -pub enum Parameter { - Type(ty::ParamTy), - Region(ty::EarlyBoundRegion), +pub struct Parameter(pub u32); + +impl From for Parameter { + fn from(param: ty::ParamTy) -> Self { Parameter(param.idx) } +} + +impl From for Parameter { + fn from(param: ty::EarlyBoundRegion) -> Self { Parameter(param.index) } +} + +/// Return the set of parameters constrained by the impl header. +pub fn parameters_for_impl<'tcx>(impl_self_ty: Ty<'tcx>, + impl_trait_ref: Option>) + -> FxHashSet +{ + let vec = match impl_trait_ref { + Some(tr) => parameters_for(&tr, false), + None => parameters_for(&impl_self_ty, false), + }; + vec.into_iter().collect() } /// If `include_projections` is false, returns the list of parameters that are -/// constrained by the type `ty` - i.e. the value of each parameter in the list is -/// uniquely determined by `ty` (see RFC 447). If it is true, return the list +/// constrained by `t` - i.e. the value of each parameter in the list is +/// uniquely determined by `t` (see RFC 447). If it is true, return the list /// of parameters whose values are needed in order to constrain `ty` - these /// differ, with the latter being a superset, in the presence of projections. -pub fn parameters_for_type<'tcx>(ty: Ty<'tcx>, - include_projections: bool) -> Vec { - let mut result = vec![]; - ty.maybe_walk(|t| match t.sty { - ty::TyProjection(..) if !include_projections => { +pub fn parameters_for<'tcx, T>(t: &T, + include_nonconstraining: bool) + -> Vec + where T: TypeFoldable<'tcx> +{ - false // projections are not injective. - } - _ => { - result.append(&mut parameters_for_type_shallow(t)); - // non-projection type constructors are injective. - true - } - }); - result + let mut collector = ParameterCollector { + parameters: vec![], + include_nonconstraining: include_nonconstraining + }; + t.visit_with(&mut collector); + collector.parameters } -pub fn parameters_for_trait_ref<'tcx>(trait_ref: &ty::TraitRef<'tcx>, - include_projections: bool) -> Vec { - let mut region_parameters = - parameters_for_regions_in_substs(&trait_ref.substs); - - let type_parameters = - trait_ref.substs - .types - .iter() - .flat_map(|ty| parameters_for_type(ty, include_projections)); - - region_parameters.extend(type_parameters); - - region_parameters +struct ParameterCollector { + parameters: Vec, + include_nonconstraining: bool } -fn parameters_for_type_shallow<'tcx>(ty: Ty<'tcx>) -> Vec { - match ty.sty { - ty::TyParam(ref d) => - vec![Parameter::Type(d.clone())], - ty::TyRef(region, _) => - parameters_for_region(region).into_iter().collect(), - ty::TyStruct(_, substs) | - ty::TyEnum(_, substs) => - parameters_for_regions_in_substs(substs), - ty::TyTrait(ref data) => - parameters_for_regions_in_substs(&data.principal.skip_binder().substs), - ty::TyProjection(ref pi) => - parameters_for_regions_in_substs(&pi.trait_ref.substs), - ty::TyBool | ty::TyChar | ty::TyInt(..) | ty::TyUint(..) | - ty::TyFloat(..) | ty::TyBox(..) | ty::TyStr | - ty::TyArray(..) | ty::TySlice(..) | ty::TyBareFn(..) | - ty::TyTuple(..) | ty::TyRawPtr(..) | - ty::TyInfer(..) | ty::TyClosure(..) | ty::TyError => - vec![] - } -} +impl<'tcx> TypeVisitor<'tcx> for ParameterCollector { + fn visit_ty(&mut self, t: Ty<'tcx>) -> bool { + match t.sty { + ty::TyProjection(..) | ty::TyAnon(..) if !self.include_nonconstraining => { + // projections are not injective + return false; + } + ty::TyParam(data) => { + self.parameters.push(Parameter::from(data)); + } + _ => {} + } -fn parameters_for_regions_in_substs(substs: &subst::Substs) -> Vec { - substs.regions() - .iter() - .filter_map(|r| parameters_for_region(r)) - .collect() -} + t.super_visit_with(self) + } -fn parameters_for_region(region: &ty::Region) -> Option { - match *region { - ty::ReEarlyBound(data) => Some(Parameter::Region(data)), - _ => None, + fn visit_region(&mut self, r: &'tcx ty::Region) -> bool { + match *r { + ty::ReEarlyBound(data) => { + self.parameters.push(Parameter::from(data)); + } + _ => {} + } + false } } -pub fn identify_constrained_type_params<'tcx>(_tcx: &ty::ctxt<'tcx>, - predicates: &[ty::Predicate<'tcx>], +pub fn identify_constrained_type_params<'tcx>(predicates: &[ty::Predicate<'tcx>], impl_trait_ref: Option>, - input_parameters: &mut HashSet) + input_parameters: &mut FxHashSet) { let mut predicates = predicates.to_owned(); - setup_constraining_predicates(_tcx, &mut predicates, impl_trait_ref, input_parameters); + setup_constraining_predicates(&mut predicates, impl_trait_ref, input_parameters); } @@ -143,10 +135,9 @@ pub fn identify_constrained_type_params<'tcx>(_tcx: &ty::ctxt<'tcx>, /// which is determined by 1, which requires `U`, that is determined /// by 0. I should probably pick a less tangled example, but I can't /// think of any. -pub fn setup_constraining_predicates<'tcx>(_tcx: &ty::ctxt<'tcx>, - predicates: &mut [ty::Predicate<'tcx>], +pub fn setup_constraining_predicates<'tcx>(predicates: &mut [ty::Predicate<'tcx>], impl_trait_ref: Option>, - input_parameters: &mut HashSet) + input_parameters: &mut FxHashSet) { // The canonical way of doing the needed topological sort // would be a DFS, but getting the graph and its ownership @@ -167,13 +158,15 @@ pub fn setup_constraining_predicates<'tcx>(_tcx: &ty::ctxt<'tcx>, // * ::Item = T // * T: Debug // * U: Iterator + debug!("setup_constraining_predicates: predicates={:?} \ + impl_trait_ref={:?} input_parameters={:?}", + predicates, impl_trait_ref, input_parameters); let mut i = 0; let mut changed = true; while changed { changed = false; for j in i..predicates.len() { - if let ty::Predicate::Projection(ref poly_projection) = predicates[j] { // Note that we can skip binder here because the impl // trait ref never contains any late-bound regions. @@ -193,12 +186,12 @@ pub fn setup_constraining_predicates<'tcx>(_tcx: &ty::ctxt<'tcx>, // Then the projection only applies if `T` is known, but it still // does not determine `U`. - let inputs = parameters_for_trait_ref(&projection.projection_ty.trait_ref, true); + let inputs = parameters_for(&projection.projection_ty.trait_ref, true); let relies_only_on_inputs = inputs.iter().all(|p| input_parameters.contains(&p)); if !relies_only_on_inputs { continue; } - input_parameters.extend(parameters_for_type(projection.ty, false)); + input_parameters.extend(parameters_for(&projection.ty, false)); } else { continue; } @@ -207,5 +200,8 @@ pub fn setup_constraining_predicates<'tcx>(_tcx: &ty::ctxt<'tcx>, i += 1; changed = true; } + debug!("setup_constraining_predicates: predicates={:?} \ + i={} impl_trait_ref={:?} input_parameters={:?}", + predicates, i, impl_trait_ref, input_parameters); } } diff --git a/src/librustc_typeck/diagnostics.rs b/src/librustc_typeck/diagnostics.rs index 55a1021f0fb94..01e99a296e886 100644 --- a/src/librustc_typeck/diagnostics.rs +++ b/src/librustc_typeck/diagnostics.rs @@ -19,64 +19,54 @@ extract an incorrect number of fields from a variant. ``` enum Fruit { - Apple(String, String) - Pear(u32) + Apple(String, String), + Pear(u32), } ``` Here the `Apple` variant has two fields, and should be matched against like so: ``` -// Correct. -match x { - Apple(a, b) => ... +enum Fruit { + Apple(String, String), + Pear(u32), } -``` -Matching with the wrong number of fields has no sensible interpretation: +let x = Fruit::Apple(String::new(), String::new()); -``` -// Incorrect. +// Correct. match x { - Apple(a) => ..., - Apple(a, b, c) => ... + Fruit::Apple(a, b) => {}, + _ => {} } ``` -Check how many fields the enum was declared with and ensure that your pattern -uses the same number. -"##, - -E0024: r##" -This error indicates that a pattern attempted to extract the fields of an enum -variant with no fields. Here's a tiny example of this error: +Matching with the wrong number of fields has no sensible interpretation: -``` -// This enum has two variants. -enum Number { - // This variant has no fields. - Zero, - // This variant has one field. - One(u32) +```compile_fail,E0023 +enum Fruit { + Apple(String, String), + Pear(u32), } -// Assuming x is a Number we can pattern match on its contents. +let x = Fruit::Apple(String::new(), String::new()); + +// Incorrect. match x { - Zero(inside) => ..., - One(inside) => ... + Fruit::Apple(a) => {}, + Fruit::Apple(a, b, c) => {}, } ``` -The pattern match `Zero(inside)` is incorrect because the `Zero` variant -contains no fields, yet the `inside` name attempts to bind the first field of -the enum. +Check how many fields the enum was declared with and ensure that your pattern +uses the same number. "##, E0025: r##" Each field of a struct can only be bound once in a pattern. Erroneous code example: -``` +```compile_fail,E0025 struct Foo { a: u8, b: u8, @@ -122,23 +112,42 @@ struct Thing { } let thing = Thing { x: 1, y: 2 }; + match thing { - Thing { x: xfield, y: yfield } => ... + Thing { x: xfield, y: yfield } => {} } ``` If you are using shorthand field patterns but want to refer to the struct field by a different name, you should rename it explicitly. -``` -// Change this: +Change this: + +```compile_fail,E0026 +struct Thing { + x: u32, + y: u32 +} + +let thing = Thing { x: 0, y: 0 }; + match thing { - Thing { x, z } => ... + Thing { x, z } => {} +} +``` + +To this: + +``` +struct Thing { + x: u32, + y: u32 } -// To this: +let thing = Thing { x: 0, y: 0 }; + match thing { - Thing { x, y: z } => ... + Thing { x, y: z } => {} } ``` "##, @@ -150,27 +159,37 @@ definition is mentioned in the pattern, or use `..` to ignore unwanted fields. For example: -``` +```compile_fail,E0027 struct Dog { name: String, - age: u32 + age: u32, } let d = Dog { name: "Rusty".to_string(), age: 8 }; // This is incorrect. match d { - Dog { age: x } => ... + Dog { age: x } => {} +} +``` + +This is correct (explicit): + +``` +struct Dog { + name: String, + age: u32, } -// This is correct (explicit). +let d = Dog { name: "Rusty".to_string(), age: 8 }; + match d { - Dog { name: n, age: x } => ... + Dog { name: ref n, age: x } => {} } // This is also correct (ignore unused fields). match d { - Dog { age: x, .. } => ... + Dog { age: x, .. } => {} } ``` "##, @@ -182,18 +201,20 @@ compile-time, and is unable to evaluate arbitrary comparison functions. If you want to capture values of an orderable type between two end-points, you can use a guard. -``` +```compile_fail,E0029 +let string = "salutations !"; + // The ordering relation for strings can't be evaluated at compile time, // so this doesn't work: match string { - "hello" ... "world" => ... - _ => ... + "hello" ... "world" => {} + _ => {} } // This is a more general version, using a guard: match string { - s if s >= "hello" && s <= "world" => ... - _ => ... + s if s >= "hello" && s <= "world" => {} + _ => {} } ``` "##, @@ -205,7 +226,7 @@ size of trait implementors isn't fixed, this type has no compile-time size. Therefore, all accesses to trait types must be through pointers. If you encounter this error you should try to avoid dereferencing the pointer. -``` +```ignore let trait_obj: &SomeTrait = ...; // This tries to implicitly dereference to create an unsized local variable. @@ -224,9 +245,9 @@ https://doc.rust-lang.org/reference.html#trait-objects E0034: r##" The compiler doesn't know what method to call because more than one method -has the same prototype. Example: +has the same prototype. Erroneous code example: -``` +```compile_fail,E0034 struct Test; trait Trait1 { @@ -283,12 +304,37 @@ fn main() { ::foo() } ``` + +One last example: + +``` +trait F { + fn m(&self); +} + +trait G { + fn m(&self); +} + +struct X; + +impl F for X { fn m(&self) { println!("I am F"); } } +impl G for X { fn m(&self) { println!("I am G"); } } + +fn main() { + let f = X; + + F::m(&f); // it displays "I am F" + G::m(&f); // it displays "I am G" +} +``` "##, E0035: r##" -You tried to give a type parameter where it wasn't needed. Bad example: +You tried to give a type parameter where it wasn't needed. Erroneous code +example: -``` +```compile_fail,E0035 struct Test; impl Test { @@ -321,9 +367,9 @@ fn main() { E0036: r##" This error occurrs when you pass too many or not enough type parameters to -a method. Example: +a method. Erroneous code example: -``` +```compile_fail,E0036 struct Test; impl Test { @@ -334,7 +380,7 @@ impl Test { fn main() { let x = Test; - let v = &[0i32]; + let v = &[0]; x.method::(v); // error: only one type parameter is expected! } @@ -353,7 +399,7 @@ impl Test { fn main() { let x = Test; - let v = &[0i32]; + let v = &[0]; x.method::(v); // OK, we're good! } @@ -361,7 +407,7 @@ fn main() { Please note on the last example that we could have called `method` like this: -``` +```ignore x.method(v); ``` "##, @@ -373,7 +419,7 @@ out of scope. Here's an example of this error: -``` +```compile_fail,E0040 struct Foo { x: i32, } @@ -394,7 +440,7 @@ fn main() { E0044: r##" You can't use type parameters on foreign items. Example of erroneous code: -``` +```compile_fail,E0044 extern { fn some_func(x: T); } ``` @@ -412,17 +458,19 @@ Rust only supports variadic parameters for interoperability with C code in its FFI. As such, variadic parameters can only be used with functions which are using the C ABI. Examples of erroneous code: -``` +```compile_fail +#![feature(unboxed_closures)] + extern "rust-call" { fn foo(x: u8, ...); } + // or + fn foo(x: u8, ...) {} ``` To fix such code, put them in an extern "C" block: ``` -extern "C" fn foo (x: u8, ...); -// or: extern "C" { fn foo (x: u8, ...); } @@ -432,7 +480,7 @@ extern "C" { E0046: r##" Items are missing in a trait implementation. Erroneous code example: -``` +```compile_fail,E0046 trait Foo { fn foo(); } @@ -468,7 +516,7 @@ has the wrong number of type parameters. For example, the trait below has a method `foo` with a type parameter `T`, but the implementation of `foo` for the type `Bar` is missing this parameter: -``` +```compile_fail,E0049 trait Foo { fn foo(x: T) -> Self; } @@ -491,7 +539,7 @@ For example, the trait below has a method `foo` with two function parameters (`&self` and `u8`), but the implementation of `foo` for the type `Bar` omits the `u8` parameter: -``` +```compile_fail,E0050 trait Foo { fn foo(&self, x: u8) -> bool; } @@ -512,7 +560,7 @@ and the trait definition. Here are a couple examples of this error: -``` +```compile_fail,E0053 trait Foo { fn foo(x: u16); fn bar(&self); @@ -524,7 +572,7 @@ impl Foo for Bar { // error, expected u16, found i16 fn foo(x: i16) { } - // error, values differ in mutability + // error, types differ in mutability fn bar(&mut self) { } } ``` @@ -534,14 +582,18 @@ E0054: r##" It is not allowed to cast to a bool. If you are trying to cast a numeric type to a bool, you can compare it with zero instead: +```compile_fail,E0054 +let x = 5; + +// Not allowed, won't compile +let x_is_nonzero = x as bool; +``` + ``` let x = 5; // Ok let x_is_nonzero = x != 0; - -// Not allowed, won't compile -let x_is_nonzero = x as bool; ``` "##, @@ -553,7 +605,7 @@ recursion limit (which can be set via the `recursion_limit` attribute). For a somewhat artificial example: -``` +```compile_fail,E0055 #![recursion_limit="2"] struct Foo; @@ -583,7 +635,7 @@ function must match its definition. An example using a closure: -``` +```compile_fail,E0057 let f = |x| x * 3; let a = f(); // invalid, too few parameters let b = f(4); // this works! @@ -609,13 +661,17 @@ implemented by closures. The most likely source of this error is using angle-bracket notation without wrapping the function argument type into a tuple, for example: -``` +```compile_fail,E0059 +#![feature(unboxed_closures)] + fn foo>(f: F) -> F::Output { f(3) } ``` It can be fixed by adjusting the trait bound like this: ``` +#![feature(unboxed_closures)] + fn foo>(f: F) -> F::Output { f(3) } ``` @@ -628,7 +684,7 @@ External C functions are allowed to be variadic. However, a variadic function takes a minimum number of arguments. For example, consider C's variadic `printf` function: -``` +```ignore extern crate libc; use libc::{ c_char, c_int }; @@ -640,7 +696,7 @@ extern "C" { Using this declaration, it must be called with at least one argument, so simply calling `printf()` is invalid. But the following uses are allowed: -``` +```ignore unsafe { use std::ffi::CString; @@ -655,15 +711,15 @@ E0061: r##" The number of arguments passed to a function must match the number of arguments specified in the function signature. -For example, a function like +For example, a function like: ``` fn f(a: u16, b: &str) {} ``` -must always be called with exactly two arguments, e.g. `f(2, "test")`. +Must always be called with exactly two arguments, e.g. `f(2, "test")`. -Note, that Rust does not have a notion of optional function arguments or +Note that Rust does not have a notion of optional function arguments or variadic functions (except for its C-FFI). "##, @@ -672,9 +728,9 @@ This error indicates that during an attempt to build a struct or struct-like enum variant, one of the fields was specified more than once. Erroneous code example: -``` +```compile_fail,E0062 struct Foo { - x: i32 + x: i32, } fn main() { @@ -689,7 +745,7 @@ Each field should be specified exactly one time. Example: ``` struct Foo { - x: i32 + x: i32, } fn main() { @@ -702,10 +758,10 @@ E0063: r##" This error indicates that during an attempt to build a struct or struct-like enum variant, one of the fields was not provided. Erroneous code example: -``` +```compile_fail,E0063 struct Foo { x: i32, - y: i32 + y: i32, } fn main() { @@ -718,7 +774,7 @@ Each field should be specified exactly once. Example: ``` struct Foo { x: i32, - y: i32 + y: i32, } fn main() { @@ -743,9 +799,9 @@ expression. An lvalue expression represents a memory location and includes item paths (ie, namespaced variables), dereferences, indexing expressions, and field references. -Let's start with some bad examples: +Let's start with some erroneous code examples: -``` +```compile_fail,E0067 use std::collections::LinkedList; // Bad: assignment to non-lvalue expression @@ -758,7 +814,7 @@ fn some_func(i: &mut i32) { } ``` -And now some good examples: +And now some working examples: ``` let mut i : i32 = 0; @@ -777,7 +833,7 @@ E0069: r##" The compiler found a function whose body contains a `return;` statement but whose return type is not `()`. An example of this is: -``` +```compile_fail,E0069 // error fn foo() -> u8 { return; @@ -797,13 +853,14 @@ reference. More details can be found here: https://doc.rust-lang.org/reference.html#lvalues-rvalues-and-temporaries -Now, we can go further. Here are some bad examples: +Now, we can go further. Here are some erroneous code examples: -``` +```compile_fail,E0070 struct SomeStruct { x: i32, y: i32 } + const SOME_CONST : i32 = 12; fn some_other_func() {} @@ -817,7 +874,7 @@ fn some_function() { } ``` -And now let's give good examples: +And now let's give working examples: ``` struct SomeStruct { @@ -838,17 +895,14 @@ fn some_func(x: &mut i32) { E0071: r##" You tried to use structure-literal syntax to create an item that is -not a struct-style structure or enum variant. +not a structure or enum variant. Example of erroneous code: -``` -enum Foo { FirstValue(i32) }; - -let u = Foo::FirstValue { value: 0i32 }; // error: Foo::FirstValue - // isn't a structure! -// or even simpler, if the name doesn't refer to a structure at all. -let t = u32 { value: 4 }; // error: `u32` does not name a structure. +```compile_fail,E0071 +type U32 = u32; +let t = U32 { value: 4 }; // error: expected struct, variant or union type, + // found builtin type `u32` ``` To fix this, ensure that the name was correctly spelled, and that @@ -876,7 +930,7 @@ first instance of `Foo` could be made to initialize another instance! Here's an example of a struct that has this problem: -``` +```ignore struct Foo { x: Box } // error ``` @@ -895,12 +949,22 @@ tuple struct must all be of a concrete, nongeneric type so the compiler can reason about how to use SIMD with them. This error will occur if the types are generic. +This will cause an error: + +```ignore +#![feature(repr_simd)] + +#[repr(simd)] +struct Bad(T, T, T); +``` + +This will not: + ``` -#[simd] -struct Bad(T, T, T); // This will cause an error +#![feature(repr_simd)] -#[simd] -struct Good(u32, u32, u32); // This will not +#[repr(simd)] +struct Good(u32, u32, u32); ``` "##, @@ -909,12 +973,22 @@ The `#[simd]` attribute can only be applied to non empty tuple structs, because it doesn't make sense to try to use SIMD operations when there are no values to operate on. +This will cause an error: + +```compile_fail,E0075 +#![feature(repr_simd)] + +#[repr(simd)] +struct Bad; +``` + +This will not: + ``` -#[simd] -struct Bad; // This will cause an error +#![feature(repr_simd)] -#[simd] -struct Good(u32); // This will not +#[repr(simd)] +struct Good(u32); ``` "##, @@ -923,26 +997,45 @@ When using the `#[simd]` attribute to automatically use SIMD operations in tuple struct, the types in the struct must all be of the same type, or the compiler will trigger this error. +This will cause an error: + +```compile_fail,E0076 +#![feature(repr_simd)] + +#[repr(simd)] +struct Bad(u16, u32, u32); ``` -#[simd] -struct Bad(u16, u32, u32); // This will cause an error -#[simd] -struct Good(u32, u32, u32); // This will not +This will not: + ``` +#![feature(repr_simd)] +#[repr(simd)] +struct Good(u32, u32, u32); +``` "##, E0077: r##" When using the `#[simd]` attribute on a tuple struct, the elements in the tuple must be machine types so SIMD operations can be applied to them. +This will cause an error: + +```compile_fail,E0077 +#![feature(repr_simd)] + +#[repr(simd)] +struct Bad(String); +``` + +This will not: + ``` -#[simd] -struct Bad(String); // This will cause an error +#![feature(repr_simd)] -#[simd] -struct Good(u32, u32, u32); // This will not +#[repr(simd)] +struct Good(u32, u32, u32); ``` "##, @@ -951,26 +1044,26 @@ Enum variants which contain no data can be given a custom integer representation. This error indicates that the value provided is not an integer literal and is therefore invalid. -For example, in the following code, +For example, in the following code: -``` +```compile_fail,E0079 enum Foo { - Q = "32" + Q = "32", } ``` -we try to set the representation to a string. +We try to set the representation to a string. There's no general fix for this; if you can work with an integer then just set it to one: ``` enum Foo { - Q = 32 + Q = 32, } ``` -however if you actually wanted a mapping between variants and non-integer +However if you actually wanted a mapping between variants and non-integer objects, it may be preferable to use a method with a match instead: ``` @@ -985,43 +1078,26 @@ impl Foo { ``` "##, -E0080: r##" -This error indicates that the compiler was unable to sensibly evaluate an -integer expression provided as an enum discriminant. Attempting to divide by 0 -or causing integer overflow are two ways to induce this error. For example: - -``` -enum Enum { - X = (1 << 500), - Y = (1 / 0) -} -``` - -Ensure that the expressions given can be evaluated as the desired integer type. -See the FFI section of the Reference for more information about using a custom -integer type: - -https://doc.rust-lang.org/reference.html#ffi-attributes -"##, - E0081: r##" Enum discriminants are used to differentiate enum variants stored in memory. This error indicates that the same value was used for two or more variants, making them impossible to tell apart. -``` -// Good. +```compile_fail,E0081 +// Bad. enum Enum { - P, + P = 3, X = 3, - Y = 5 + Y = 5, } +``` -// Bad. +``` +// Good. enum Enum { - P = 3, + P, X = 3, - Y = 5 + Y = 5, } ``` @@ -1029,60 +1105,82 @@ Note that variants without a manually specified discriminant are numbered from top to bottom starting from 0, so clashes can occur with seemingly unrelated variants. -``` +```compile_fail,E0081 enum Bad { X, Y = 0 } ``` -Here `X` will have already been assigned the discriminant 0 by the time `Y` is +Here `X` will have already been specified the discriminant 0 by the time `Y` is encountered, so a conflict occurs. "##, E0082: r##" -The default type for enum discriminants is `isize`, but it can be adjusted by -adding the `repr` attribute to the enum declaration. This error indicates that -an integer literal given as a discriminant is not a member of the discriminant -type. For example: +When you specify enum discriminants with `=`, the compiler expects `isize` +values by default. Or you can add the `repr` attibute to the enum declaration +for an explicit choice of the discriminant type. In either cases, the +discriminant values must fall within a valid range for the expected type; +otherwise this error is raised. For example: -``` +```ignore #[repr(u8)] enum Thing { A = 1024, - B = 5 + B = 5, } ``` Here, 1024 lies outside the valid range for `u8`, so the discriminant for `A` is -invalid. You may want to change representation types to fix this, or else change -invalid discriminant values so that they fit within the existing type. +invalid. Here is another, more subtle example which depends on target word size: -Note also that without a representation manually defined, the compiler will -optimize by using the smallest integer type possible. -"##, +```ignore +enum DependsOnPointerSize { + A = 1 << 32, +} +``` -E0083: r##" -At present, it's not possible to define a custom representation for an enum with -a single variant. As a workaround you can add a `Dummy` variant. +Here, `1 << 32` is interpreted as an `isize` value. So it is invalid for 32 bit +target (`target_pointer_width = "32"`) but valid for 64 bit target. -See: https://github.com/rust-lang/rust/issues/10292 +You may want to change representation types to fix this, or else change invalid +discriminant values so that they fit within the existing type. "##, E0084: r##" +An unsupported representation was attempted on a zero-variant enum. + +Erroneous code example: + +```compile_fail,E0084 +#[repr(i32)] +enum NightsWatch {} // error: unsupported representation for zero-variant enum +``` + It is impossible to define an integer type to be used to represent zero-variant enum values because there are no zero-variant enum values. There is no way to -construct an instance of the following type using only safe code: +construct an instance of the following type using only safe code. So you have +two solutions. Either you add variants in your enum: + +``` +#[repr(i32)] +enum NightsWatch { + JonSnow, + Commander, +} +``` + +or you remove the integer represention of your enum: ``` -enum Empty {} +enum NightsWatch {} ``` "##, E0087: r##" Too many type parameters were supplied for a function. For example: -``` +```compile_fail,E0087 fn foo() {} fn main() { @@ -1097,7 +1195,7 @@ parameters. E0088: r##" You gave too many lifetime parameters. Erroneous code example: -``` +```compile_fail,E0088 fn f() {} fn main() { @@ -1142,7 +1240,7 @@ fn main() { E0089: r##" Not enough type parameters were supplied for a function. For example: -``` +```compile_fail,E0089 fn foo() {} fn main() { @@ -1153,7 +1251,7 @@ fn main() { Note that if a function takes multiple type parameters but you want the compiler to infer some of them, you can use type placeholders: -``` +```compile_fail,E0089 fn foo(x: T) {} fn main() { @@ -1168,7 +1266,7 @@ E0091: r##" You gave an unnecessary type parameter in a type alias. Erroneous code example: -``` +```compile_fail,E0091 type Foo = u32; // error: type parameter `T` is unused // or: type Foo = Box; // error: type parameter `B` is unused @@ -1178,7 +1276,7 @@ Please check you didn't write too many type parameters. Example: ``` type Foo = u32; // ok! -type Foo = Box; // ok! +type Foo2 = Box; // ok! ``` "##, @@ -1186,7 +1284,7 @@ E0092: r##" You tried to declare an undefined atomic operation function. Erroneous code example: -``` +```compile_fail,E0092 #![feature(intrinsics)] extern "rust-intrinsic" { @@ -1211,7 +1309,7 @@ extern "rust-intrinsic" { E0093: r##" You declared an unknown intrinsic function. Erroneous code example: -``` +```compile_fail,E0093 #![feature(intrinsics)] extern "rust-intrinsic" { @@ -1248,7 +1346,7 @@ E0094: r##" You gave an invalid number of type parameters to an intrinsic function. Erroneous code example: -``` +```compile_fail,E0094 #![feature(intrinsics)] extern "rust-intrinsic" { @@ -1257,7 +1355,7 @@ extern "rust-intrinsic" { } ``` -Please check that you provided the right number of lifetime parameters +Please check that you provided the right number of type parameters and verify with the function declaration in the Rust source code. Example: @@ -1274,66 +1372,42 @@ E0101: r##" You hit this error because the compiler lacks the information to determine a type for this expression. Erroneous code example: -``` -fn main() { - let x = |_| {}; // error: cannot determine a type for this expression -} +```compile_fail,E0101 +let x = |_| {}; // error: cannot determine a type for this expression ``` You have two possibilities to solve this situation: + * Give an explicit definition of the expression * Infer the expression Examples: ``` -fn main() { - let x = |_ : u32| {}; // ok! - // or: - let x = |_| {}; - x(0u32); -} +let x = |_ : u32| {}; // ok! +// or: +let x = |_| {}; +x(0u32); ``` "##, E0102: r##" -You hit this error because the compiler lacks information to -determine a type for this variable. Erroneous code example: - -``` -fn demo(devil: fn () -> !) { - let x: &_ = devil(); - // error: cannot determine a type for this local variable -} - -fn oh_no() -> ! { panic!("the devil is in the details") } +You hit this error because the compiler lacks the information to +determine the type of this variable. Erroneous code example: -fn main() { - demo(oh_no); -} +```compile_fail,E0102 +// could be an array of anything +let x = []; // error: cannot determine a type for this local variable ``` To solve this situation, constrain the type of the variable. Examples: ``` -fn some_func(x: &u32) { - // some code -} - -fn demo(devil: fn () -> !) { - let x: &u32 = devil(); - // Here we defined the type at the variable creation - - let x: &_ = devil(); - some_func(x); - // Here, the type is determined by the function argument type -} - -fn oh_no() -> ! { panic!("the devil is in the details") } +#![allow(unused_variables)] fn main() { - demo(oh_no); + let x: [u8; 0] = []; } ``` "##, @@ -1345,7 +1419,7 @@ lifetime elision rules (see below). Here are some simple examples of where you'll run into this error: -``` +```compile_fail,E0106 struct Foo { x: &bool } // error struct Foo<'a> { x: &'a bool } // correct @@ -1373,15 +1447,15 @@ same as the lifetime on `&self` or `&mut self`. Here are some examples of elision errors: -``` +```compile_fail,E0106 // error, no input lifetimes -fn foo() -> &str { ... } +fn foo() -> &str { } // error, `x` and `y` have distinct lifetimes inferred -fn bar(x: &str, y: &str) -> &str { ... } +fn bar(x: &str, y: &str) -> &str { } // error, `y`'s lifetime is inferred to be distinct from `x`'s -fn baz<'a>(x: &'a str, y: &str) -> &str { ... } +fn baz<'a>(x: &'a str, y: &str) -> &str { } ``` [book-le]: https://doc.rust-lang.org/nightly/book/lifetimes.html#lifetime-elision @@ -1393,7 +1467,7 @@ for a type (like a struct or enum) or trait. Some basic examples include: -``` +```compile_fail,E0107 struct Foo<'a>(&'a str); enum Bar { A, B, C } @@ -1406,7 +1480,7 @@ struct Baz<'a> { Here's an example that is currently an error, but may work in a future version of Rust: -``` +```compile_fail,E0107 struct Foo<'a>(&'a str); trait Quux { } @@ -1424,8 +1498,8 @@ You can only define an inherent implementation for a type in the same crate where the type was defined. For example, an `impl` block as below is not allowed since `Vec` is defined in the standard library: -``` -impl Vec { ... } // error +```compile_fail,E0116 +impl Vec { } // error ``` To fix this problem, you can do either of these things: @@ -1438,10 +1512,10 @@ To fix this problem, you can do either of these things: Note that using the `type` keyword does not work here because `type` only introduces a type alias: -``` +```compile_fail,E0116 type Bytes = Vec; -impl Bytes { ... } // error, same as above +impl Bytes { } // error, same as above ``` "##, @@ -1456,14 +1530,14 @@ trait defined in another crate) where Here's one example of this error: -``` +```compile_fail,E0117 impl Drop for u32 {} ``` To avoid this kind of error, ensure that at least one local type is referenced by the `impl`: -``` +```ignore pub struct Foo; // you define your type in your crate impl Drop for Foo { // and you can implement the trait on it! @@ -1496,22 +1570,46 @@ For information on the design of the orphan rules, see [RFC 1023]. "##, E0118: r##" -Rust can't find a base type for an implementation you are providing, or the type -cannot have an implementation. For example, only a named type or a trait can -have an implementation: +You're trying to write an inherent implementation for something which isn't a +struct nor an enum. Erroneous code example: + +```compile_fail,E0118 +impl (u8, u8) { // error: no base type found for inherent implementation + fn get_state(&self) -> String { + // ... + } +} +``` + +To fix this error, please implement a trait on the type or wrap it in a struct. +Example: ``` -type NineString = [char, ..9] // This isn't a named type (struct, enum or trait) -impl NineString { - // Some code here +// we create a trait here +trait LiveLongAndProsper { + fn get_state(&self) -> String; +} + +// and now you can implement it on (u8, u8) +impl LiveLongAndProsper for (u8, u8) { + fn get_state(&self) -> String { + "He's dead, Jim!".to_owned() + } } ``` -In the other, simpler case, Rust just can't find the type you are providing an -impelementation for: +Alternatively, you can create a newtype. A newtype is a wrapping tuple-struct. +For example, `NewType` is a newtype over `Foo` in `struct NewType(Foo)`. +Example: ``` -impl SomeTypeThatDoesntExist { } +struct TypeWrapper((u8, u8)); + +impl TypeWrapper { + fn get_state(&self) -> String { + "Fascinating!".to_owned() + } +} ``` "##, @@ -1519,7 +1617,7 @@ E0119: r##" There are conflicting trait implementations for the same type. Example of erroneous code: -``` +```compile_fail,E0119 trait MyTrait { fn get(&self) -> usize; } @@ -1544,6 +1642,10 @@ MyTrait for Foo`. Since a trait cannot be implemented multiple times, this is an error. So, when you write: ``` +trait MyTrait { + fn get(&self) -> usize; +} + impl MyTrait for T { fn get(&self) -> usize { 0 } } @@ -1576,7 +1678,7 @@ E0120: r##" An attempt was made to implement Drop on a trait, which is not allowed: only structs and enums can implement Drop. An example causing this error: -``` +```compile_fail,E0120 trait MyTrait {} impl Drop for MyTrait { @@ -1617,7 +1719,7 @@ placeholders are disallowed by design in item signatures. Examples of this error include: -``` +```compile_fail,E0121 fn foo() -> _ { 5 } // error, explicitly write out the return type instead static BAR: _ = "test"; // error, explicitly write out the type instead @@ -1648,10 +1750,10 @@ E0124: r##" You declared two fields of a struct with the same name. Erroneous code example: -``` +```compile_fail,E0124 struct Foo { field1: i32, - field1: i32 // error: field is already declared + field1: i32, // error: field is already declared } ``` @@ -1660,7 +1762,7 @@ Please verify that the field names have been correctly spelled. Example: ``` struct Foo { field1: i32, - field2: i32 // ok! + field2: i32, // ok! } ``` "##, @@ -1669,8 +1771,8 @@ E0128: r##" Type parameter defaults can only use parameters that occur before them. Erroneous code example: -``` -pub struct Foo { +```compile_fail,E0128 +struct Foo { field1: T, filed2: U, } @@ -1682,7 +1784,7 @@ Since type parameters are evaluated in-order, you may be able to fix this issue by doing: ``` -pub struct Foo { +struct Foo { field1: T, filed2: U, } @@ -1692,91 +1794,59 @@ Please also verify that this wasn't because of a name-clash and rename the type parameter if so. "##, -E0130: r##" -You declared a pattern as an argument in a foreign function declaration. -Erroneous code example: - -``` -extern { - fn foo((a, b): (u32, u32)); // error: patterns aren't allowed in foreign - // function declarations -} -``` - -Please replace the pattern argument with a regular one. Example: - -``` -struct SomeStruct { - a: u32, - b: u32, -} - -extern { - fn foo(s: SomeStruct); // ok! -} -// or -extern { - fn foo(a: (u32, u32)); // ok! -} -``` -"##, - E0131: r##" It is not possible to define `main` with type parameters, or even with function parameters. When `main` is present, it must take no arguments and return `()`. Erroneous code example: -``` +```compile_fail,E0131 fn main() { // error: main function is not allowed to have type parameters } ``` "##, E0132: r##" -It is not possible to declare type parameters on a function that has the `start` -attribute. Such a function must have the following type signature: +A function with the `start` attribute was declared with type parameters. -``` -fn(isize, *const *const u8) -> isize; -``` -"##, +Erroneous code example: -E0163: r##" -This error means that an attempt was made to match an enum variant as a -struct type when the variant isn't a struct type: +```compile_fail,E0132 +#![feature(start)] +#[start] +fn f() {} ``` -enum Foo { B(u32) } -fn bar(foo: Foo) -> u32 { - match foo { - Foo::B{i} => i // error 0163 - } -} +It is not possible to declare type parameters on a function that has the `start` +attribute. Such a function must have the following type signature (for more +information: http://doc.rust-lang.org/stable/book/no-stdlib.html): + +```ignore +fn(isize, *const *const u8) -> isize; ``` -Try using `()` instead: +Example: ``` -fn bar(foo: Foo) -> u32 { - match foo { - Foo::B(i) => i - } +#![feature(start)] + +#[start] +fn my_start(argc: isize, argv: *const *const u8) -> isize { + 0 } ``` "##, E0164: r##" - This error means that an attempt was made to match a struct type enum variant as a non-struct type: -``` -enum Foo { B{ i: u32 } } +```compile_fail,E0164 +enum Foo { B { i: u32 } } fn bar(foo: Foo) -> u32 { match foo { - Foo::B(i) => i // error 0164 + Foo::B(i) => i, // error E0164 } } ``` @@ -1784,34 +1854,21 @@ fn bar(foo: Foo) -> u32 { Try using `{}` instead: ``` +enum Foo { B { i: u32 } } + fn bar(foo: Foo) -> u32 { match foo { - Foo::B{i} => i + Foo::B{i} => i, } } ``` "##, - -E0166: r##" -This error means that the compiler found a return expression in a function -marked as diverging. A function diverges if it has `!` in the place of the -return type in its signature. For example: - -``` -fn foo() -> ! { return; } // error -``` - -For a function that diverges, every control path in the function must never -return, for example with a `loop` that never breaks or a call to another -diverging function (such as `panic!()`). -"##, - E0172: r##" This error means that an attempt was made to specify the type of a variable with a combination of a concrete type and a trait. Consider the following example: -``` +```compile_fail,E0172 fn foo(bar: i32+std::fmt::Display) {} ``` @@ -1840,7 +1897,7 @@ to use parentheses. For example: -``` +```compile_fail,E0178 trait Foo {} struct Bar<'a> { @@ -1856,6 +1913,45 @@ More details can be found in [RFC 438]. [RFC 438]: https://github.com/rust-lang/rfcs/pull/438 "##, +E0182: r##" +You bound an associated type in an expression path which is not +allowed. + +Erroneous code example: + +```compile_fail,E0182 +trait Foo { + type A; + fn bar() -> isize; +} + +impl Foo for isize { + type A = usize; + fn bar() -> isize { 42 } +} + +// error: unexpected binding of associated item in expression path +let x: isize = Foo::::bar(); +``` + +To give a concrete type when using the Universal Function Call Syntax, +use "Type as Trait". Example: + +``` +trait Foo { + type A; + fn bar() -> isize; +} + +impl Foo for isize { + type A = usize; + fn bar() -> isize { 42 } +} + +let x: isize = ::bar(); // ok! +``` +"##, + E0184: r##" Explicitly implementing both Drop and Copy for a type is currently disallowed. This feature can make some sense in theory, but the current implementation is @@ -1872,7 +1968,7 @@ take a `self` parameter). Here's an example of this error: -``` +```compile_fail,E0185 trait Foo { fn foo(); } @@ -1884,6 +1980,7 @@ impl Foo for Bar { // the trait fn foo(&self) {} } +``` "##, E0186: r##" @@ -1893,7 +1990,7 @@ to be static. Here's an example of this error: -``` +```compile_fail,E0186 trait Foo { fn foo(&self); } @@ -1912,7 +2009,7 @@ E0191: r##" Trait objects need to have all associated types specified. Erroneous code example: -``` +```compile_fail,E0191 trait Trait { type Bar; } @@ -1943,7 +2040,7 @@ E0193: r##" `where` clauses must use generic type parameters: it does not make sense to use them otherwise. An example causing this error: -``` +```ignore trait Foo { fn bar(&self); } @@ -1962,6 +2059,14 @@ This use of a `where` clause is strange - a more common usage would look something like the following: ``` +trait Foo { + fn bar(&self); +} + +#[derive(Copy,Clone)] +struct Wrapper { + Wrapped: T +} impl Foo for Wrapper where Wrapper: Clone { fn bar(&self) { } } @@ -1980,7 +2085,7 @@ E0194: r##" A type parameter was declared which shadows an existing one. An example of this error: -``` +```compile_fail,E0194 trait Foo { fn do_something(&self) -> T; fn do_something_else(&self, bar: T); @@ -1996,7 +2101,7 @@ E0195: r##" Your method's lifetime parameters do not match the trait declaration. Erroneous code example: -``` +```compile_fail,E0195 trait Trait { fn bar<'a,'b:'a>(x: &'a str, y: &'b str); } @@ -2035,7 +2140,7 @@ methods associated with a type) are always safe because they are not implementing an unsafe trait. Removing the `unsafe` keyword from the inherent implementation will resolve this error. -``` +```compile_fail,E0197 struct Foo; // this will cause this error @@ -2051,22 +2156,38 @@ particular trait. Not being able to use a trait is always a safe operation, so negative implementations are always safe and never need to be marked as unsafe. -``` +```compile_fail +#![feature(optin_builtin_traits)] + struct Foo; // unsafe is unnecessary unsafe impl !Clone for Foo { } -// this will compile -impl !Clone for Foo { } ``` + +This will compile: + +``` +#![feature(optin_builtin_traits)] + +struct Foo; + +trait Enterprise {} + +impl Enterprise for .. { } + +impl !Enterprise for Foo { } +``` + +Please note that negative impls are only allowed for traits with default impls. "##, E0199: r##" Safe traits should not have unsafe implementations, therefore marking an -implementation for a safe trait unsafe will cause a compiler error. Removing the -unsafe marker on the trait noted in the error will resolve this problem. +implementation for a safe trait unsafe will cause a compiler error. Removing +the unsafe marker on the trait noted in the error will resolve this problem. -``` +```compile_fail,E0199 struct Foo; trait Bar { } @@ -2083,7 +2204,7 @@ Unsafe traits must have unsafe implementations. This error occurs when an implementation for an unsafe trait isn't marked as unsafe. This may be resolved by marking the unsafe implementation as unsafe. -``` +```compile_fail,E0200 struct Foo; unsafe trait Bar { } @@ -2101,7 +2222,7 @@ associated functions, etc.) with the same identifier. For example: -``` +```compile_fail,E0201 struct Foo(u8); impl Foo { @@ -2126,6 +2247,21 @@ impl Baz for Foo { type Quux = u32; } ``` + +Note, however, that items with the same name are allowed for inherent `impl` +blocks that don't overlap: + +``` +struct Foo(T); + +impl Foo { + fn bar(&self) -> bool { self.0 > 5 } +} + +impl Foo { + fn bar(&self) -> bool { self.0 } +} +``` "##, E0202: r##" @@ -2141,7 +2277,7 @@ An attempt to implement the `Copy` trait for a struct failed because one of the fields does not implement `Copy`. To fix this, you must implement `Copy` for the mentioned field. Note that this may not be possible, as in the example of -``` +```compile_fail,E0204 struct Foo { foo : Vec, } @@ -2153,7 +2289,7 @@ This fails because `Vec` does not implement `Copy` for any `T`. Here's another example that will fail: -``` +```compile_fail,E0204 #[derive(Copy)] struct Foo<'a> { ty: &'a mut bool, @@ -2169,7 +2305,7 @@ An attempt to implement the `Copy` trait for an enum failed because one of the variants does not implement `Copy`. To fix this, you must implement `Copy` for the mentioned variant. Note that this may not be possible, as in the example of -``` +```compile_fail,E0205 enum Foo { Bar(Vec), Baz, @@ -2182,11 +2318,11 @@ This fails because `Vec` does not implement `Copy` for any `T`. Here's another example that will fail: -``` +```compile_fail,E0205 #[derive(Copy)] enum Foo<'a> { Bar(&'a mut bool), - Baz + Baz, } ``` @@ -2199,7 +2335,7 @@ You can only implement `Copy` for a struct or enum. Both of the following examples will fail, because neither `i32` (primitive type) nor `&'static Bar` (reference to `Bar`) is a struct or enum: -``` +```compile_fail,E0206 type Foo = i32; impl Copy for Foo { } // error @@ -2210,39 +2346,135 @@ impl Copy for &'static Bar { } // error "##, E0207: r##" -You declared an unused type parameter when implementing a trait on an object. -Erroneous code example: +Any type parameter or lifetime parameter of an `impl` must meet at least one of +the following criteria: -``` -trait MyTrait { - fn get(&self) -> usize; + - it appears in the self type of the impl + - for a trait impl, it appears in the trait reference + - it is bound as an associated type + +### Error example 1 + +Suppose we have a struct `Foo` and we would like to define some methods for it. +The following definition leads to a compiler error: + +```compile_fail,E0207 +struct Foo; + +impl Foo { +// error: the type parameter `T` is not constrained by the impl trait, self +// type, or predicates [E0207] + fn get(&self) -> T { + ::default() + } } +``` + +The problem is that the parameter `T` does not appear in the self type (`Foo`) +of the impl. In this case, we can fix the error by moving the type parameter +from the `impl` to the method `get`: + +``` struct Foo; -impl MyTrait for Foo { - fn get(&self) -> usize { - 0 +// Move the type parameter from the impl to the method +impl Foo { + fn get(&self) -> T { + ::default() + } +} +``` + +### Error example 2 + +As another example, suppose we have a `Maker` trait and want to establish a +type `FooMaker` that makes `Foo`s: + +```compile_fail,E0207 +trait Maker { + type Item; + fn make(&mut self) -> Self::Item; +} + +struct Foo { + foo: T +} + +struct FooMaker; + +impl Maker for FooMaker { +// error: the type parameter `T` is not constrained by the impl trait, self +// type, or predicates [E0207] + type Item = Foo; + + fn make(&mut self) -> Foo { + Foo { foo: ::default() } } } ``` -Please check your object definition and remove unused type -parameter(s). Example: +This fails to compile because `T` does not appear in the trait or in the +implementing type. + +One way to work around this is to introduce a phantom type parameter into +`FooMaker`, like so: ``` -trait MyTrait { - fn get(&self) -> usize; +use std::marker::PhantomData; + +trait Maker { + type Item; + fn make(&mut self) -> Self::Item; } -struct Foo; +struct Foo { + foo: T +} -impl MyTrait for Foo { - fn get(&self) -> usize { - 0 +// Add a type parameter to `FooMaker` +struct FooMaker { + phantom: PhantomData, +} + +impl Maker for FooMaker { + type Item = Foo; + + fn make(&mut self) -> Foo { + Foo { + foo: ::default(), + } + } +} +``` + +Another way is to do away with the associated type in `Maker` and use an input +type parameter instead: + +``` +// Use a type parameter instead of an associated type here +trait Maker { + fn make(&mut self) -> Item; +} + +struct Foo { + foo: T +} + +struct FooMaker; + +impl Maker> for FooMaker { + fn make(&mut self) -> Foo { + Foo { foo: ::default() } } } ``` + +### Additional information + +For more information, please see [RFC 447]. + +[RFC 447]: https://github.com/rust-lang/rfcs/blob/master/text/0447-no-unused-impl-parameters.md "##, E0210: r##" @@ -2255,27 +2487,31 @@ what this means, it is perhaps easiest to consider a few examples. If `ForeignTrait` is a trait defined in some external crate `foo`, then the following trait `impl` is an error: -``` -extern crate foo; -use foo::ForeignTrait; +```compile_fail,E0210 +extern crate collections; +use collections::range::RangeArgument; -impl ForeignTrait for T { ... } // error +impl RangeArgument for T { } // error + +fn main() {} ``` To work around this, it can be covered with a local type, `MyType`: -``` +```ignore struct MyType(T); -impl ForeignTrait for MyType { ... } // Ok +impl ForeignTrait for MyType { } // Ok ``` +Please note that a type alias is not sufficient. + For another example of an error, suppose there's another trait defined in `foo` named `ForeignTrait2` that takes two type parameters. Then this `impl` results in the same rule violation: -``` +```compile_fail struct MyType2; -impl ForeignTrait2> for MyType2 { ... } // error +impl ForeignTrait2> for MyType2 { } // error ``` The reason for this is that there are two appearances of type parameter `T` in @@ -2284,8 +2520,8 @@ is uncovered, and so runs afoul of the orphan rule. Consider one more example: -``` -impl ForeignTrait2, T> for MyType2 { ... } // Ok +```ignore +impl ForeignTrait2, T> for MyType2 { } // Ok ``` This only differs from the previous `impl` in that the parameters `T` and @@ -2295,7 +2531,7 @@ violate the orphan rule; it is permitted. To see why that last example was allowed, you need to understand the general rule. Unfortunately this rule is a bit tricky to state. Consider an `impl`: -``` +```ignore impl ForeignTrait for T0 { ... } ``` @@ -2310,11 +2546,12 @@ For information on the design of the orphan rules, see [RFC 1023]. [RFC 1023]: https://github.com/rust-lang/rfcs/pull/1023 "##, +/* E0211: r##" You used a function or type which doesn't fit the requirements for where it was used. Erroneous code examples: -``` +```compile_fail #![feature(intrinsics)] extern "rust-intrinsic" { @@ -2360,7 +2597,7 @@ extern "rust-intrinsic" { The second case example is a bit particular : the main function must always have this definition: -``` +```compile_fail fn main(); ``` @@ -2371,6 +2608,7 @@ as the type you're matching on. Example: ``` let x = 1u8; + match x { 0u8...3u8 => (), // ok! _ => () @@ -2388,12 +2626,13 @@ impl Foo { } ``` "##, + */ E0214: r##" A generic type was described using parentheses rather than angle brackets. For example: -``` +```compile_fail,E0214 fn main() { let v: Vec(&str) = vec!["foo"]; } @@ -2408,24 +2647,43 @@ E0220: r##" You used an associated type which isn't defined in the trait. Erroneous code example: -``` -trait Trait { +```compile_fail,E0220 +trait T1 { type Bar; } -type Foo = Trait; // error: associated type `F` not found for - // `Trait` +type Foo = T1; // error: associated type `F` not found for `T1` + +// or: + +trait T2 { + type Bar; + + // error: Baz is used but not declared + fn return_bool(&self, &Self::Bar, &Self::Baz) -> bool; +} ``` -Please verify you used the right trait or you didn't misspell the +Make sure that you have defined the associated type in the trait body. +Also, verify that you used the right trait or you didn't misspell the associated type name. Example: ``` -trait Trait { +trait T1 { type Bar; } -type Foo = Trait; // ok! +type Foo = T1; // ok! + +// or: + +trait T2 { + type Bar; + type Baz; // we declare `Baz` in our trait. + + // and now we can use it here: + fn return_bool(&self, &Self::Bar, &Self::Baz) -> bool; +} ``` "##, @@ -2433,7 +2691,7 @@ E0221: r##" An attempt was made to retrieve an associated type, but the type was ambiguous. For example: -``` +```compile_fail,E0221 trait T1 {} trait T2 {} @@ -2459,8 +2717,18 @@ one of the types. Alternatively, one can specify the intended type using the following syntax: ``` -fn do_something() { - let _: ::A; +trait T1 {} +trait T2 {} + +trait Foo { + type A: T1; +} + +trait Bar : Foo { + type A: T2; + fn do_something() { + let _: ::A; + } } ``` "##, @@ -2469,7 +2737,7 @@ E0223: r##" An attempt was made to retrieve an associated type, but the type was ambiguous. For example: -``` +```compile_fail,E0223 trait MyTrait {type X; } fn main() { @@ -2504,27 +2772,53 @@ E0225: r##" You attempted to use multiple types as bounds for a closure or trait object. Rust does not currently support this. A simple example that causes this error: -``` +```compile_fail,E0225 fn main() { - let _: Box; + let _: Box; } ``` -Builtin traits are an exception to this rule: it's possible to have bounds of -one non-builtin type, plus any number of builtin types. For example, the +Send and Sync are an exception to this rule: it's possible to have bounds of +one non-builtin trait, plus either or both of Send and Sync. For example, the following compiles correctly: ``` fn main() { - let _: Box; + let _: Box; } ``` "##, +E0230: r##" +The trait has more type parameters specified than appear in its definition. + +Erroneous example code: + +```compile_fail,E0230 +#![feature(on_unimplemented)] +#[rustc_on_unimplemented = "Trait error on `{Self}` with `<{A},{B},{C}>`"] +// error: there is no type parameter C on trait TraitWithThreeParams +trait TraitWithThreeParams +{} +``` + +Include the correct number of type parameters and the compilation should +proceed: + +``` +#![feature(on_unimplemented)] +#[rustc_on_unimplemented = "Trait error on `{Self}` with `<{A},{B},{C}>`"] +trait TraitWithThreeParams // ok! +{} +``` +"##, + E0232: r##" The attribute must have a value. Erroneous code example: -``` +```compile_fail,E0232 +#![feature(on_unimplemented)] + #[rustc_on_unimplemented] // error: this attribute must have a value trait Bar {} ``` @@ -2532,6 +2826,8 @@ trait Bar {} Please supply the missing value of the attribute. Example: ``` +#![feature(on_unimplemented)] + #[rustc_on_unimplemented = "foo"] // ok! trait Bar {} ``` @@ -2544,7 +2840,7 @@ trait. For example, the `Foo` struct below is defined to be generic in `T`, but the type parameter is missing in the definition of `Bar`: -``` +```compile_fail,E0243 struct Foo { x: T } struct Bar { x: Foo } @@ -2558,36 +2854,18 @@ trait. For example, the `Foo` struct below has no type parameters, but is supplied with two in the definition of `Bar`: -``` +```compile_fail,E0244 struct Foo { x: bool } struct Bar { x: Foo } ``` "##, -//NB: not currently reachable -E0247: r##" -This error indicates an attempt to use a module name where a type is expected. -For example: - -``` -mod MyMod { - mod MySubMod { } -} - -fn do_something(x: MyMod::MySubMod) { } -``` - -In this example, we're attempting to take a parameter of type `MyMod::MySubMod` -in the do_something function. This is not legal: `MyMod::MySubMod` is a module -name, not a type. -"##, - E0248: r##" This error indicates an attempt to use a value where a type is expected. For example: -``` +```compile_fail,E0248 enum Foo { Bar(u32) } @@ -2602,36 +2880,24 @@ not a distinct static type. Likewise, it's not legal to attempt to behavior for specific enum variants. "##, -E0249: r##" -This error indicates a constant expression for the array length was found, but -it was not an integer (signed or unsigned) expression. - -Some examples of code that produces this error are: - -``` -const A: [u32; "hello"] = []; // error -const B: [u32; true] = []; // error -const C: [u32; 0.0] = []; // error -"##, - -E0250: r##" -There was an error while evaluating the expression for the length of a fixed- -size array type. +E0569: r##" +If an impl has a generic parameter with the `#[may_dangle]` attribute, then +that impl must be declared as an `unsafe impl. For example: -Some examples of this error are: +```compile_fail,E0569 +#![feature(generic_param_attrs)] +#![feature(dropck_eyepatch)] +struct Foo(X); +impl<#[may_dangle] X> Drop for Foo { + fn drop(&mut self) { } +} ``` -// divide by zero in the length expression -const A: [u32; 1/0] = []; -// Rust currently will not evaluate the function `foo` at compile time -fn foo() -> usize { 12 } -const B: [u32; foo()] = []; - -// it is an error to try to add `u8` and `f64` -use std::{f64, u8}; -const C: [u32; u8::MAX + f64::EPSILON] = []; -``` +In this example, we are asserting that the destructor for `Foo` will not +access any data of type `X`, and require this assertion to be true for +overall safety in our program. The compiler does not currently attempt to +verify this assertion; therefore we must tag this `impl` as unsafe. "##, E0318: r##" @@ -2644,14 +2910,14 @@ E0321: r##" A cross-crate opt-out trait was implemented on something which wasn't a struct or enum type. Erroneous code example: -``` +```compile_fail,E0321 #![feature(optin_builtin_traits)] struct Foo; impl !Sync for Foo {} -unsafe impl Send for &'static Foo { +unsafe impl Send for &'static Foo {} // error: cross-crate traits with a default impl, like `core::marker::Send`, // can only be implemented for a struct/enum type, not // `&'static Foo` @@ -2673,7 +2939,9 @@ E0323: r##" An associated const was implemented when another trait item was expected. Erroneous code example: -``` +```compile_fail,E0323 +#![feature(associated_consts)] + trait Foo { type N; } @@ -2700,8 +2968,15 @@ trait Foo { impl Foo for Bar { type N = u32; // ok! } +``` + +Or: + +``` +#![feature(associated_consts)] + +struct Bar; -// or: trait Foo { const N : u32; } @@ -2716,7 +2991,9 @@ E0324: r##" A method was implemented when another trait item was expected. Erroneous code example: -``` +```compile_fail,E0324 +#![feature(associated_consts)] + struct Bar; trait Foo { @@ -2736,6 +3013,8 @@ To fix this error, please verify that the method name wasn't misspelled and verify that you are indeed implementing the correct trait items. Example: ``` +#![feature(associated_consts)] + struct Bar; trait Foo { @@ -2756,7 +3035,9 @@ E0325: r##" An associated type was implemented when another trait item was expected. Erroneous code example: -``` +```compile_fail,E0325 +#![feature(associated_consts)] + struct Bar; trait Foo { @@ -2783,8 +3064,15 @@ trait Foo { impl Foo for Bar { type N = u32; // ok! } +``` + +Or: + +``` +#![feature(associated_consts)] + +struct Bar; -//or: trait Foo { const N : u32; } @@ -2801,7 +3089,9 @@ types in the trait definition. This error indicates that there was a mismatch. Here's an example of this error: -``` +```compile_fail,E0326 +#![feature(associated_consts)] + trait Foo { const BAR: bool; } @@ -2814,67 +3104,41 @@ impl Foo for Bar { ``` "##, -E0327: r##" -You cannot use associated items other than constant items as patterns. This -includes method items. Example of erroneous code: +E0329: r##" +An attempt was made to access an associated constant through either a generic +type parameter or `Self`. This is not supported yet. An example causing this +error is shown below: -``` -enum B {} +```ignore +#![feature(associated_consts)] -impl B { - fn bb() -> i32 { 0 } +trait Foo { + const BAR: f64; } -fn main() { - match 0 { - B::bb => {} // error: associated items in match patterns must - // be constants - } -} -``` - -Please check that you're not using a method as a pattern. Example: +struct MyStruct; -``` -enum B { - ba, - bb +impl Foo for MyStruct { + const BAR: f64 = 0f64; } -fn main() { - match B::ba { - B::bb => {} // ok! - _ => {} - } +fn get_bar_bad(t: F) -> f64 { + F::BAR } ``` -"##, -E0329: r##" -An attempt was made to access an associated constant through either a generic -type parameter or `Self`. This is not supported yet. An example causing this -error is shown below: +Currently, the value of `BAR` for a particular type can only be accessed +through a concrete type, as shown below: + +```ignore +#![feature(associated_consts)] -``` trait Foo { const BAR: f64; } struct MyStruct; -impl Foo for MyStruct { - const BAR: f64 = 0f64; -} - -fn get_bar_bad(t: F) -> f64 { - F::BAR -} -``` - -Currently, the value of `BAR` for a particular type can only be accessed through -a concrete type, as shown below: - -``` fn get_bar_good() -> f64 { ::BAR } @@ -2885,7 +3149,7 @@ E0366: r##" An attempt was made to implement `Drop` on a concrete specialization of a generic type. An example is shown below: -``` +```compile_fail,E0366 struct Foo { t: T } @@ -2918,7 +3182,7 @@ E0367: r##" An attempt was made to implement `Drop` on a specialization of a generic type. An example is shown below: -``` +```compile_fail,E0367 trait Foo{} struct MyStruct { @@ -2956,9 +3220,9 @@ E0368: r##" This error indicates that a binary assignment operator like `+=` or `^=` was applied to a type that doesn't support it. For example: -``` +```compile_fail,E0368 let mut x = 12f32; // error: binary operation `<<` cannot be applied to - // type `f32` + // type `f32` x <<= 2; ``` @@ -2967,7 +3231,7 @@ To fix this error, please check that this type implements this binary operation. Example: ``` -let x = 12u32; // the `u32` type does implement the `ShlAssign` trait +let mut x = 12u32; // the `u32` type does implement the `ShlAssign` trait x <<= 2; // ok! ``` @@ -2979,7 +3243,7 @@ Another problem you might be facing is this: suppose you've overloaded the `+` operator for some type `Foo` by implementing the `std::ops::Add` trait for `Foo`, but you find that using `+=` does not work, as in this example: -``` +```compile_fail,E0368 use std::ops::Add; struct Foo(u32); @@ -3006,7 +3270,7 @@ E0369: r##" A binary operation was attempted on a type which doesn't support it. Erroneous code example: -``` +```compile_fail,E0369 let x = 12f32; // error: binary operation `<<` cannot be applied to // type `f32` @@ -3031,12 +3295,13 @@ E0370: r##" The maximum value of an enum was reached, so it cannot be automatically set in the next enum value. Erroneous code example: -``` +```compile_fail +#[deny(overflowing_literals)] enum Foo { X = 0x7fffffffffffffff, - Y // error: enum discriminant overflowed on value after - // 9223372036854775807: i64; set explicitly via - // Y = -9223372036854775808 if that is desired outcome + Y, // error: enum discriminant overflowed on value after + // 9223372036854775807: i64; set explicitly via + // Y = -9223372036854775808 if that is desired outcome } ``` @@ -3048,8 +3313,11 @@ enum Foo { X = 0x7fffffffffffffff, Y = 0, // ok! } +``` -// or: +Or: + +``` enum Foo { Y = 0, // ok! X = 0x7fffffffffffffff, @@ -3065,7 +3333,7 @@ definition, so it is not useful to do this. Example: -``` +```compile_fail,E0371 trait Foo { fn foo(&self) { } } trait Bar: Foo { } trait Baz: Bar { } @@ -3077,11 +3345,162 @@ impl Baz for Bar { } // Note: This is OK ``` "##, -E0379: r##" -Trait methods cannot be declared `const` by design. For more information, see -[RFC 911]. +E0374: r##" +A struct without a field containing an unsized type cannot implement +`CoerceUnsized`. An +[unsized type](https://doc.rust-lang.org/book/unsized-types.html) +is any type that the compiler doesn't know the length or alignment of at +compile time. Any struct containing an unsized type is also unsized. + +Example of erroneous code: + +```compile_fail,E0374 +#![feature(coerce_unsized)] +use std::ops::CoerceUnsized; + +struct Foo { + a: i32, +} + +// error: Struct `Foo` has no unsized fields that need `CoerceUnsized`. +impl CoerceUnsized> for Foo + where T: CoerceUnsized {} +``` + +`CoerceUnsized` is used to coerce one struct containing an unsized type +into another struct containing a different unsized type. If the struct +doesn't have any fields of unsized types then you don't need explicit +coercion to get the types you want. To fix this you can either +not try to implement `CoerceUnsized` or you can add a field that is +unsized to the struct. + +Example: + +``` +#![feature(coerce_unsized)] +use std::ops::CoerceUnsized; + +// We don't need to impl `CoerceUnsized` here. +struct Foo { + a: i32, +} + +// We add the unsized type field to the struct. +struct Bar { + a: i32, + b: T, +} + +// The struct has an unsized field so we can implement +// `CoerceUnsized` for it. +impl CoerceUnsized> for Bar + where T: CoerceUnsized {} +``` + +Note that `CoerceUnsized` is mainly used by smart pointers like `Box`, `Rc` +and `Arc` to be able to mark that they can coerce unsized types that they +are pointing at. +"##, + +E0375: r##" +A struct with more than one field containing an unsized type cannot implement +`CoerceUnsized`. This only occurs when you are trying to coerce one of the +types in your struct to another type in the struct. In this case we try to +impl `CoerceUnsized` from `T` to `U` which are both types that the struct +takes. An [unsized type](https://doc.rust-lang.org/book/unsized-types.html) +is any type that the compiler doesn't know the length or alignment of at +compile time. Any struct containing an unsized type is also unsized. + +Example of erroneous code: + +```compile_fail,E0375 +#![feature(coerce_unsized)] +use std::ops::CoerceUnsized; + +struct Foo { + a: i32, + b: T, + c: U, +} + +// error: Struct `Foo` has more than one unsized field. +impl CoerceUnsized> for Foo {} +``` + +`CoerceUnsized` only allows for coercion from a structure with a single +unsized type field to another struct with a single unsized type field. +In fact Rust only allows for a struct to have one unsized type in a struct +and that unsized type must be the last field in the struct. So having two +unsized types in a single struct is not allowed by the compiler. To fix this +use only one field containing an unsized type in the struct and then use +multiple structs to manage each unsized type field you need. + +Example: + +``` +#![feature(coerce_unsized)] +use std::ops::CoerceUnsized; + +struct Foo { + a: i32, + b: T, +} + +impl CoerceUnsized> for Foo + where T: CoerceUnsized {} + +fn coerce_foo, U>(t: T) -> Foo { + Foo { a: 12i32, b: t } // we use coercion to get the `Foo` type we need +} +``` + +"##, + +E0376: r##" +The type you are trying to impl `CoerceUnsized` for is not a struct. +`CoerceUnsized` can only be implemented for a struct. Unsized types are +already able to be coerced without an implementation of `CoerceUnsized` +whereas a struct containing an unsized type needs to know the unsized type +field it's containing is able to be coerced. An +[unsized type](https://doc.rust-lang.org/book/unsized-types.html) +is any type that the compiler doesn't know the length or alignment of at +compile time. Any struct containing an unsized type is also unsized. + +Example of erroneous code: -[RFC 911]: https://github.com/rust-lang/rfcs/pull/911 +```compile_fail,E0376 +#![feature(coerce_unsized)] +use std::ops::CoerceUnsized; + +struct Foo { + a: T, +} + +// error: The type `U` is not a struct +impl CoerceUnsized for Foo {} +``` + +The `CoerceUnsized` trait takes a struct type. Make sure the type you are +providing to `CoerceUnsized` is a struct with only the last field containing an +unsized type. + +Example: + +``` +#![feature(coerce_unsized)] +use std::ops::CoerceUnsized; + +struct Foo { + a: T, +} + +// The `Foo` is a struct so `CoerceUnsized` can be implemented +impl CoerceUnsized> for Foo where T: CoerceUnsized {} +``` + +Note that in Rust, structs can only contain an unsized type if the field +containing the unsized type is the last and only unsized type field in the +struct. "##, E0380: r##" @@ -3093,7 +3512,7 @@ For more information see the [opt-in builtin traits RFC](https://github.com/rust E0390: r##" You tried to implement methods for a primitive type. Erroneous code example: -``` +```compile_fail,E0390 struct Foo { x: i32 } @@ -3127,7 +3546,7 @@ and therefore cannot be constructed. The following example contains a circular dependency between two traits: -``` +```compile_fail,E0391 trait FirstTrait : SecondTrait { } @@ -3140,11 +3559,11 @@ trait SecondTrait : FirstTrait { E0392: r##" This error indicates that a type or lifetime parameter has been declared -but not actually used. Here is an example that demonstrates the error: +but not actually used. Here is an example that demonstrates the error: -``` +```compile_fail,E0392 enum Foo { - Bar + Bar, } ``` @@ -3153,7 +3572,7 @@ by simply removing the type parameter, as shown below: ``` enum Foo { - Bar + Bar, } ``` @@ -3162,7 +3581,7 @@ used. A simple fix is shown below: ``` enum Foo { - Bar(T) + Bar(T), } ``` @@ -3171,9 +3590,9 @@ example, when using raw pointers one may wish to specify the lifetime for which the pointed-at data is valid. An initial attempt (below) causes this error: -``` +```compile_fail,E0392 struct Foo<'a, T> { - x: *const T + x: *const T, } ``` @@ -3198,11 +3617,82 @@ parameters. You can read more about it in the API documentation: https://doc.rust-lang.org/std/marker/struct.PhantomData.html "##, +E0393: r##" +A type parameter which references `Self` in its default value was not specified. +Example of erroneous code: + +```compile_fail,E0393 +trait A {} + +fn together_we_will_rule_the_galaxy(son: &A) {} +// error: the type parameter `T` must be explicitly specified in an +// object type because its default value `Self` references the +// type `Self` +``` + +A trait object is defined over a single, fully-defined trait. With a regular +default parameter, this parameter can just be substituted in. However, if the +default parameter is `Self`, the trait changes for each concrete type; i.e. +`i32` will be expected to implement `A`, `bool` will be expected to +implement `A`, etc... These types will not share an implementation of a +fully-defined trait; instead they share implementations of a trait with +different parameters substituted in for each implementation. This is +irreconcilable with what we need to make a trait object work, and is thus +disallowed. Making the trait concrete by explicitly specifying the value of the +defaulted parameter will fix this issue. Fixed example: + +``` +trait A {} + +fn together_we_will_rule_the_galaxy(son: &A) {} // Ok! +``` +"##, + +E0399: r##" +You implemented a trait, overriding one or more of its associated types but did +not reimplement its default methods. + +Example of erroneous code: + +```compile_fail,E0399 +#![feature(associated_type_defaults)] + +pub trait Foo { + type Assoc = u8; + fn bar(&self) {} +} + +impl Foo for i32 { + // error - the following trait items need to be reimplemented as + // `Assoc` was overridden: `bar` + type Assoc = i32; +} +``` + +To fix this, add an implementation for each default method from the trait: + +``` +#![feature(associated_type_defaults)] + +pub trait Foo { + type Assoc = u8; + fn bar(&self) {} +} + +impl Foo for i32 { + type Assoc = i32; + fn bar(&self) {} // ok! +} +``` +"##, + E0439: r##" The length of the platform-intrinsic function `simd_shuffle` wasn't specified. Erroneous code example: -``` +```compile_fail,E0439 +#![feature(platform_intrinsics)] + extern "platform-intrinsic" { fn simd_shuffle(a: A, b: A, c: [u32; 8]) -> B; // error: invalid `simd_shuffle`, needs length: `simd_shuffle` @@ -3213,6 +3703,8 @@ The `simd_shuffle` function needs the length of the array passed as last parameter in its name. Example: ``` +#![feature(platform_intrinsics)] + extern "platform-intrinsic" { fn simd_shuffle8(a: A, b: A, c: [u32; 8]) -> B; } @@ -3223,7 +3715,10 @@ E0440: r##" A platform-specific intrinsic function has the wrong number of type parameters. Erroneous code example: -``` +```compile_fail,E0440 +#![feature(repr_simd)] +#![feature(platform_intrinsics)] + #[repr(simd)] struct f64x2(f64, f64); @@ -3238,6 +3733,9 @@ Please refer to the function declaration to see if it corresponds with yours. Example: ``` +#![feature(repr_simd)] +#![feature(platform_intrinsics)] + #[repr(simd)] struct f64x2(f64, f64); @@ -3251,7 +3749,10 @@ E0441: r##" An unknown platform-specific intrinsic function was used. Erroneous code example: -``` +```compile_fail,E0441 +#![feature(repr_simd)] +#![feature(platform_intrinsics)] + #[repr(simd)] struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16); @@ -3266,6 +3767,9 @@ that it is declared in the rust source code (in the file src/librustc_platform_intrinsics/x86.rs). Example: ``` +#![feature(repr_simd)] +#![feature(platform_intrinsics)] + #[repr(simd)] struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16); @@ -3279,7 +3783,10 @@ E0442: r##" Intrinsic argument(s) and/or return value have the wrong type. Erroneous code example: -``` +```compile_fail,E0442 +#![feature(repr_simd)] +#![feature(platform_intrinsics)] + #[repr(simd)] struct i8x16(i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8); @@ -3298,6 +3805,9 @@ To fix this error, please refer to the function declaration to give it the awaited types. Example: ``` +#![feature(repr_simd)] +#![feature(platform_intrinsics)] + #[repr(simd)] struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16); @@ -3311,7 +3821,10 @@ E0443: r##" Intrinsic argument(s) and/or return value have the wrong type. Erroneous code example: -``` +```compile_fail,E0443 +#![feature(repr_simd)] +#![feature(platform_intrinsics)] + #[repr(simd)] struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16); #[repr(simd)] @@ -3327,6 +3840,9 @@ To fix this error, please refer to the function declaration to give it the awaited types. Example: ``` +#![feature(repr_simd)] +#![feature(platform_intrinsics)] + #[repr(simd)] struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16); @@ -3340,7 +3856,10 @@ E0444: r##" A platform-specific intrinsic function has wrong number of arguments. Erroneous code example: -``` +```compile_fail,E0444 +#![feature(repr_simd)] +#![feature(platform_intrinsics)] + #[repr(simd)] struct f64x2(f64, f64); @@ -3354,6 +3873,9 @@ Please refer to the function declaration to see if it corresponds with yours. Example: ``` +#![feature(repr_simd)] +#![feature(platform_intrinsics)] + #[repr(simd)] struct f64x2(f64, f64); @@ -3363,11 +3885,50 @@ extern "platform-intrinsic" { ``` "##, +E0513: r##" +The type of the variable couldn't be found out. + +Erroneous code example: + +```compile_fail,E0513 +use std::mem; + +unsafe { + let size = mem::size_of::(); + mem::transmute_copy::(&8_8); + // error: no type for local variable +} +``` + +To fix this error, please use a constant size instead of `size`. To make +this error more obvious, you could run: + +```compile_fail,E0080 +use std::mem; + +unsafe { + mem::transmute_copy::()]>(&8_8); + // error: constant evaluation error +} +``` + +So now, you can fix your code by setting the size directly: + +``` +use std::mem; + +unsafe { + mem::transmute_copy::(&8_8); + // `u32` is 4 bytes so we replace the `mem::size_of` call with its size +} +``` +"##, + E0516: r##" The `typeof` keyword is currently reserved but unimplemented. Erroneous code example: -``` +```compile_fail,E0516 fn main() { let x: typeof(92) = 92; } @@ -3382,6 +3943,227 @@ fn main() { ``` "##, +E0520: r##" +A non-default implementation was already made on this type so it cannot be +specialized further. Erroneous code example: + +```compile_fail,E0520 +#![feature(specialization)] + +trait SpaceLlama { + fn fly(&self); +} + +// applies to all T +impl SpaceLlama for T { + default fn fly(&self) {} +} + +// non-default impl +// applies to all `Clone` T and overrides the previous impl +impl SpaceLlama for T { + fn fly(&self) {} +} + +// since `i32` is clone, this conflicts with the previous implementation +impl SpaceLlama for i32 { + default fn fly(&self) {} + // error: item `fly` is provided by an `impl` that specializes + // another, but the item in the parent `impl` is not marked + // `default` and so it cannot be specialized. +} +``` + +Specialization only allows you to override `default` functions in +implementations. + +To fix this error, you need to mark all the parent implementations as default. +Example: + +``` +#![feature(specialization)] + +trait SpaceLlama { + fn fly(&self); +} + +// applies to all T +impl SpaceLlama for T { + default fn fly(&self) {} // This is a parent implementation. +} + +// applies to all `Clone` T; overrides the previous impl +impl SpaceLlama for T { + default fn fly(&self) {} // This is a parent implementation but was + // previously not a default one, causing the error +} + +// applies to i32, overrides the previous two impls +impl SpaceLlama for i32 { + fn fly(&self) {} // And now that's ok! +} +``` +"##, + +E0527: r##" +The number of elements in an array or slice pattern differed from the number of +elements in the array being matched. + +Example of erroneous code: + +```compile_fail,E0527 +#![feature(slice_patterns)] + +let r = &[1, 2, 3, 4]; +match r { + &[a, b] => { // error: pattern requires 2 elements but array + // has 4 + println!("a={}, b={}", a, b); + } +} +``` + +Ensure that the pattern is consistent with the size of the matched +array. Additional elements can be matched with `..`: + +``` +#![feature(slice_patterns)] + +let r = &[1, 2, 3, 4]; +match r { + &[a, b, ..] => { // ok! + println!("a={}, b={}", a, b); + } +} +``` +"##, + +E0528: r##" +An array or slice pattern required more elements than were present in the +matched array. + +Example of erroneous code: + +```compile_fail,E0528 +#![feature(slice_patterns)] + +let r = &[1, 2]; +match r { + &[a, b, c, rest..] => { // error: pattern requires at least 3 + // elements but array has 2 + println!("a={}, b={}, c={} rest={:?}", a, b, c, rest); + } +} +``` + +Ensure that the matched array has at least as many elements as the pattern +requires. You can match an arbitrary number of remaining elements with `..`: + +``` +#![feature(slice_patterns)] + +let r = &[1, 2, 3, 4, 5]; +match r { + &[a, b, c, rest..] => { // ok! + // prints `a=1, b=2, c=3 rest=[4, 5]` + println!("a={}, b={}, c={} rest={:?}", a, b, c, rest); + } +} +``` +"##, + +E0529: r##" +An array or slice pattern was matched against some other type. + +Example of erroneous code: + +```compile_fail,E0529 +#![feature(slice_patterns)] + +let r: f32 = 1.0; +match r { + [a, b] => { // error: expected an array or slice, found `f32` + println!("a={}, b={}", a, b); + } +} +``` + +Ensure that the pattern and the expression being matched on are of consistent +types: + +``` +#![feature(slice_patterns)] + +let r = [1.0, 2.0]; +match r { + [a, b] => { // ok! + println!("a={}, b={}", a, b); + } +} +``` +"##, + +E0559: r##" +An unknown field was specified into an enum's structure variant. + +Erroneous code example: + +```compile_fail,E0559 +enum Field { + Fool { x: u32 }, +} + +let s = Field::Fool { joke: 0 }; +// error: struct variant `Field::Fool` has no field named `joke` +``` + +Verify you didn't misspell the field's name or that the field exists. Example: + +``` +enum Field { + Fool { joke: u32 }, +} + +let s = Field::Fool { joke: 0 }; // ok! +``` +"##, + +E0560: r##" +An unknown field was specified into a structure. + +Erroneous code example: + +```compile_fail,E0560 +struct Simba { + mother: u32, +} + +let s = Simba { mother: 1, father: 0 }; +// error: structure `Simba` has no field named `father` +``` + +Verify you didn't misspell the field's name or that the field exists. Example: + +``` +struct Simba { + mother: u32, + father: u32, +} + +let s = Simba { mother: 1, father: 0 }; // ok! +``` +"##, + +E0570: r##" +The requested ABI is unsupported by the current target. + +The rust compiler maintains for each target a blacklist of ABIs unsupported on +that target. If an ABI is present in such a list this usually means that the +target / ABI combination is currently unsupported by llvm. + +If necessary, you can circumvent this check using custom target specifications. +"##, + } register_diagnostics! { @@ -3396,11 +4178,11 @@ register_diagnostics! { // E0129, // E0141, // E0159, // use of trait `{}` as struct constructor - E0167, +// E0163, // merged into E0071 +// E0167, // E0168, // E0173, // manual implementations of unboxed closure traits are experimental - E0174, // explicit use of unboxed closure methods are experimental - E0182, +// E0174, E0183, // E0187, // can't infer the kind of the closure // E0188, // can not cast an immutable reference to a mutable pointer @@ -3424,37 +4206,35 @@ register_diagnostics! { E0226, // only a single explicit lifetime bound is permitted E0227, // ambiguous lifetime bound, explicit lifetime bound required E0228, // explicit lifetime bound required - E0230, // there is no type parameter on trait E0231, // only named substitution parameters are allowed // E0233, // E0234, // E0235, // structure constructor specifies a structure of type but - E0236, // no lang item for range syntax - E0237, // no lang item for range syntax - E0238, // parenthesized parameters may only be used with a trait +// E0236, // no lang item for range syntax +// E0237, // no lang item for range syntax +// E0238, // parenthesized parameters may only be used with a trait // E0239, // `next` method of `Iterator` trait has unexpected type // E0240, // E0241, - E0242, // internal error looking up a definition +// E0242, E0245, // not a trait // E0246, // invalid recursive type +// E0247, +// E0249, // E0319, // trait impls for defaulted traits allowed just for structs/enums E0320, // recursive overflow during dropck E0328, // cannot implement Unsize explicitly // E0372, // coherence not object safe - E0374, // the trait `CoerceUnsized` may only be implemented for a coercion - // between structures with one field being coerced, none found - E0375, // the trait `CoerceUnsized` may only be implemented for a coercion - // between structures with one field being coerced, but multiple - // fields need coercions - E0376, // the trait `CoerceUnsized` may only be implemented for a coercion - // between structures E0377, // the trait `CoerceUnsized` may only be implemented for a coercion // between structures with the same definition - E0393, // the type parameter `{}` must be explicitly specified in an object - // type because its default value `{}` references the type `Self`" - E0399, // trait items need to be implemented because the associated - // type `{}` was overridden E0436, // functional record update requires a struct - E0513 // no type for local variable .. + E0521, // redundant default implementations of trait + E0533, // `{}` does not name a unit variant, unit struct or a constant + E0562, // `impl Trait` not allowed outside of function + // and inherent method return types + E0563, // cannot determine a type for this `impl Trait`: {} + E0564, // only named lifetimes are allowed in `impl Trait`, + // but `{}` was found in the type `{}` + E0567, // auto traits can not have type parameters + E0568, // auto-traits can not have predicates, } diff --git a/src/librustc_typeck/impl_wf_check.rs b/src/librustc_typeck/impl_wf_check.rs new file mode 100644 index 0000000000000..9f5b73d9b3075 --- /dev/null +++ b/src/librustc_typeck/impl_wf_check.rs @@ -0,0 +1,203 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This pass enforces various "well-formedness constraints" on impls. +//! Logically, it is part of wfcheck -- but we do it early so that we +//! can stop compilation afterwards, since part of the trait matching +//! infrastructure gets very grumpy if these conditions don't hold. In +//! particular, if there are type parameters that are not part of the +//! impl, then coherence will report strange inference ambiguity +//! errors; if impls have duplicate items, we get misleading +//! specialization errors. These things can (and probably should) be +//! fixed, but for the moment it's easier to do these checks early. + +use constrained_type_params as ctp; +use rustc::dep_graph::DepNode; +use rustc::hir; +use rustc::hir::itemlikevisit::ItemLikeVisitor; +use rustc::hir::def_id::DefId; +use rustc::ty; +use rustc::util::nodemap::{FxHashMap, FxHashSet}; +use std::collections::hash_map::Entry::{Occupied, Vacant}; + +use syntax_pos::Span; + +use CrateCtxt; + +/// Checks that all the type/lifetime parameters on an impl also +/// appear in the trait ref or self-type (or are constrained by a +/// where-clause). These rules are needed to ensure that, given a +/// trait ref like `>`, we can derive the values of all +/// parameters on the impl (which is needed to make specialization +/// possible). +/// +/// However, in the case of lifetimes, we only enforce these rules if +/// the lifetime parameter is used in an associated type. This is a +/// concession to backwards compatibility; see comment at the end of +/// the fn for details. +/// +/// Example: +/// +/// ``` +/// impl Trait for Bar { ... } +/// ^ T does not appear in `Foo` or `Bar`, error! +/// +/// impl Trait> for Bar { ... } +/// ^ T appears in `Foo`, ok. +/// +/// impl Trait for Bar where Bar: Iterator { ... } +/// ^ T is bound to `::Item`, ok. +/// +/// impl<'a> Trait for Bar { } +/// ^ 'a is unused, but for back-compat we allow it +/// +/// impl<'a> Trait for Bar { type X = &'a i32; } +/// ^ 'a is unused and appears in assoc type, error +/// ``` +pub fn impl_wf_check<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>) { + // We will tag this as part of the WF check -- logically, it is, + // but it's one that we must perform earlier than the rest of + // WfCheck. + ccx.tcx.visit_all_item_likes_in_krate(DepNode::WfCheck, &mut ImplWfCheck { ccx: ccx }); +} + +struct ImplWfCheck<'a, 'tcx: 'a> { + ccx: &'a CrateCtxt<'a, 'tcx>, +} + +impl<'a, 'tcx> ItemLikeVisitor<'tcx> for ImplWfCheck<'a, 'tcx> { + fn visit_item(&mut self, item: &'tcx hir::Item) { + match item.node { + hir::ItemImpl(.., ref generics, _, _, ref impl_item_refs) => { + let impl_def_id = self.ccx.tcx.map.local_def_id(item.id); + enforce_impl_params_are_constrained(self.ccx, + generics, + impl_def_id, + impl_item_refs); + enforce_impl_items_are_distinct(self.ccx, impl_item_refs); + } + _ => { } + } + } + + fn visit_impl_item(&mut self, _impl_item: &'tcx hir::ImplItem) { } +} + +fn enforce_impl_params_are_constrained<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + impl_hir_generics: &hir::Generics, + impl_def_id: DefId, + impl_item_refs: &[hir::ImplItemRef]) +{ + // Every lifetime used in an associated type must be constrained. + let impl_self_ty = ccx.tcx.item_type(impl_def_id); + let impl_generics = ccx.tcx.item_generics(impl_def_id); + let impl_predicates = ccx.tcx.item_predicates(impl_def_id); + let impl_trait_ref = ccx.tcx.impl_trait_ref(impl_def_id); + + let mut input_parameters = ctp::parameters_for_impl(impl_self_ty, impl_trait_ref); + ctp::identify_constrained_type_params( + &impl_predicates.predicates.as_slice(), impl_trait_ref, &mut input_parameters); + + // Disallow ANY unconstrained type parameters. + for (ty_param, param) in impl_generics.types.iter().zip(&impl_hir_generics.ty_params) { + let param_ty = ty::ParamTy::for_def(ty_param); + if !input_parameters.contains(&ctp::Parameter::from(param_ty)) { + report_unused_parameter(ccx, param.span, "type", ¶m_ty.to_string()); + } + } + + // Disallow unconstrained lifetimes, but only if they appear in assoc types. + let lifetimes_in_associated_types: FxHashSet<_> = impl_item_refs.iter() + .map(|item_ref| ccx.tcx.map.local_def_id(item_ref.id.node_id)) + .filter(|&def_id| { + let item = ccx.tcx.associated_item(def_id); + item.kind == ty::AssociatedKind::Type && item.defaultness.has_value() + }) + .flat_map(|def_id| { + ctp::parameters_for(&ccx.tcx.item_type(def_id), true) + }).collect(); + for (ty_lifetime, lifetime) in impl_generics.regions.iter() + .zip(&impl_hir_generics.lifetimes) + { + let param = ctp::Parameter::from(ty_lifetime.to_early_bound_region_data()); + + if + lifetimes_in_associated_types.contains(¶m) && // (*) + !input_parameters.contains(¶m) + { + report_unused_parameter(ccx, lifetime.lifetime.span, + "lifetime", &lifetime.lifetime.name.to_string()); + } + } + + // (*) This is a horrible concession to reality. I think it'd be + // better to just ban unconstrianed lifetimes outright, but in + // practice people do non-hygenic macros like: + // + // ``` + // macro_rules! __impl_slice_eq1 { + // ($Lhs: ty, $Rhs: ty, $Bound: ident) => { + // impl<'a, 'b, A: $Bound, B> PartialEq<$Rhs> for $Lhs where A: PartialEq { + // .... + // } + // } + // } + // ``` + // + // In a concession to backwards compatbility, we continue to + // permit those, so long as the lifetimes aren't used in + // associated types. I believe this is sound, because lifetimes + // used elsewhere are not projected back out. +} + +fn report_unused_parameter(ccx: &CrateCtxt, + span: Span, + kind: &str, + name: &str) +{ + struct_span_err!( + ccx.tcx.sess, span, E0207, + "the {} parameter `{}` is not constrained by the \ + impl trait, self type, or predicates", + kind, name) + .span_label(span, &format!("unconstrained {} parameter", kind)) + .emit(); +} + +/// Enforce that we do not have two items in an impl with the same name. +fn enforce_impl_items_are_distinct<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + impl_item_refs: &[hir::ImplItemRef]) +{ + let tcx = ccx.tcx; + let mut seen_type_items = FxHashMap(); + let mut seen_value_items = FxHashMap(); + for impl_item_ref in impl_item_refs { + let impl_item = tcx.map.impl_item(impl_item_ref.id); + let seen_items = match impl_item.node { + hir::ImplItemKind::Type(_) => &mut seen_type_items, + _ => &mut seen_value_items, + }; + match seen_items.entry(impl_item.name) { + Occupied(entry) => { + let mut err = struct_span_err!(tcx.sess, impl_item.span, E0201, + "duplicate definitions with name `{}`:", + impl_item.name); + err.span_label(*entry.get(), + &format!("previous definition of `{}` here", + impl_item.name)); + err.span_label(impl_item.span, &format!("duplicate definition")); + err.emit(); + } + Vacant(entry) => { + entry.insert(impl_item.span); + } + } + } +} diff --git a/src/librustc_typeck/lib.rs b/src/librustc_typeck/lib.rs index acffbeabb24c1..dfa662590297f 100644 --- a/src/librustc_typeck/lib.rs +++ b/src/librustc_typeck/lib.rs @@ -44,7 +44,7 @@ independently: into the `ty` representation - collect: computes the types of each top-level item and enters them into - the `cx.tcache` table for later use + the `tcx.types` table for later use - coherence: enforces coherence rules, builds some tables @@ -70,190 +70,175 @@ This API is completely unstable and subject to change. #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] +#![cfg_attr(not(stage0), deny(warnings))] #![allow(non_camel_case_types)] #![feature(box_patterns)] #![feature(box_syntax)] -#![feature(iter_arith)] +#![feature(conservative_impl_trait)] #![feature(quote)] #![feature(rustc_diagnostic_macros)] #![feature(rustc_private)] #![feature(staged_api)] -#![feature(cell_extras)] #[macro_use] extern crate log; #[macro_use] extern crate syntax; +extern crate syntax_pos; extern crate arena; extern crate fmt_macros; -extern crate rustc; +#[macro_use] extern crate rustc; extern crate rustc_platform_intrinsics as intrinsics; -extern crate rustc_front; extern crate rustc_back; +extern crate rustc_const_math; +extern crate rustc_const_eval; +extern crate rustc_data_structures; +extern crate rustc_errors as errors; pub use rustc::dep_graph; -pub use rustc::front; +pub use rustc::hir; pub use rustc::lint; pub use rustc::middle; pub use rustc::session; pub use rustc::util; -use front::map as hir_map; -use middle::def; -use middle::infer::{self, TypeOrigin}; -use middle::subst; -use middle::ty::{self, Ty, TypeFoldable}; +use dep_graph::DepNode; +use hir::map as hir_map; +use rustc::infer::InferOk; +use rustc::ty::subst::Substs; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::traits::{self, ObligationCause, ObligationCauseCode, Reveal}; use session::config; use util::common::time; -use rustc_front::hir; -use syntax::codemap::Span; -use syntax::{ast, abi}; +use syntax::ast; +use syntax::abi::Abi; +use syntax_pos::Span; use std::cell::RefCell; +use util::nodemap::NodeMap; // NB: This module needs to be declared first so diagnostics are // registered before they are used. pub mod diagnostics; pub mod check; +pub mod check_unused; mod rscope; mod astconv; pub mod collect; mod constrained_type_params; +mod impl_wf_check; pub mod coherence; pub mod variance; pub struct TypeAndSubsts<'tcx> { - pub substs: subst::Substs<'tcx>, + pub substs: &'tcx Substs<'tcx>, pub ty: Ty<'tcx>, } pub struct CrateCtxt<'a, 'tcx: 'a> { - // A mapping from method call sites to traits that have that method. - pub trait_map: ty::TraitMap, + ast_ty_to_ty_cache: RefCell>>, + /// A vector of every trait accessible in the whole crate /// (i.e. including those from subcrates). This is used only for /// error reporting, and so is lazily initialised and generally /// shouldn't taint the common path (hence the RefCell). pub all_traits: RefCell>, - pub tcx: &'a ty::ctxt<'tcx>, -} - -// Functions that write types into the node type table -fn write_ty_to_tcx<'tcx>(tcx: &ty::ctxt<'tcx>, node_id: ast::NodeId, ty: Ty<'tcx>) { - debug!("write_ty_to_tcx({}, {:?})", node_id, ty); - assert!(!ty.needs_infer()); - tcx.node_type_insert(node_id, ty); -} - -fn write_substs_to_tcx<'tcx>(tcx: &ty::ctxt<'tcx>, - node_id: ast::NodeId, - item_substs: ty::ItemSubsts<'tcx>) { - if !item_substs.is_noop() { - debug!("write_substs_to_tcx({}, {:?})", - node_id, - item_substs); - assert!(!item_substs.substs.types.needs_infer()); + /// This stack is used to identify cycles in the user's source. + /// Note that these cycles can cross multiple items. + pub stack: RefCell>, - tcx.tables.borrow_mut().item_substs.insert(node_id, item_substs); - } -} + pub tcx: TyCtxt<'a, 'tcx, 'tcx>, -fn lookup_full_def(tcx: &ty::ctxt, sp: Span, id: ast::NodeId) -> def::Def { - match tcx.def_map.borrow().get(&id) { - Some(x) => x.full_def(), - None => { - span_fatal!(tcx.sess, sp, E0242, "internal error looking up a definition") - } - } + /// Obligations which will have to be checked at the end of + /// type-checking, after all functions have been inferred. + /// The key is the NodeId of the item the obligations were from. + pub deferred_obligations: RefCell>>>, } -fn require_c_abi_if_variadic(tcx: &ty::ctxt, +fn require_c_abi_if_variadic(tcx: TyCtxt, decl: &hir::FnDecl, - abi: abi::Abi, + abi: Abi, span: Span) { - if decl.variadic && abi != abi::C { - span_err!(tcx.sess, span, E0045, + if decl.variadic && abi != Abi::C { + let mut err = struct_span_err!(tcx.sess, span, E0045, "variadic function must have C calling convention"); + err.span_label(span, &("variadics require C calling conventions").to_string()) + .emit(); } } -fn require_same_types<'a, 'tcx, M>(tcx: &ty::ctxt<'tcx>, - maybe_infcx: Option<&infer::InferCtxt<'a, 'tcx>>, - t1_is_expected: bool, - span: Span, - t1: Ty<'tcx>, - t2: Ty<'tcx>, - msg: M) - -> bool where - M: FnOnce() -> String, -{ - let result = match maybe_infcx { - None => { - let infcx = infer::new_infer_ctxt(tcx, &tcx.tables, None); - infer::mk_eqty(&infcx, t1_is_expected, TypeOrigin::Misc(span), t1, t2) - } - Some(infcx) => { - infer::mk_eqty(infcx, t1_is_expected, TypeOrigin::Misc(span), t1, t2) - } - }; - - match result { - Ok(_) => true, - Err(ref terr) => { - let mut err = struct_span_err!(tcx.sess, span, E0211, "{}: {}", msg(), terr); - tcx.note_and_explain_type_err(&mut err, terr, span); - err.emit(); - false +fn require_same_types<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, + cause: &ObligationCause<'tcx>, + expected: Ty<'tcx>, + actual: Ty<'tcx>) + -> bool { + ccx.tcx.infer_ctxt(None, None, Reveal::NotSpecializable).enter(|infcx| { + match infcx.eq_types(false, &cause, expected, actual) { + Ok(InferOk { obligations, .. }) => { + // FIXME(#32730) propagate obligations + assert!(obligations.is_empty()); + true + } + Err(err) => { + infcx.report_mismatched_types(cause, expected, actual, err); + false + } } - } + }) } fn check_main_fn_ty(ccx: &CrateCtxt, main_id: ast::NodeId, main_span: Span) { let tcx = ccx.tcx; - let main_t = tcx.node_id_to_type(main_id); + let main_def_id = tcx.map.local_def_id(main_id); + let main_t = tcx.item_type(main_def_id); match main_t.sty { - ty::TyBareFn(..) => { + ty::TyFnDef(..) => { match tcx.map.find(main_id) { Some(hir_map::NodeItem(it)) => { match it.node { - hir::ItemFn(_, _, _, _, ref ps, _) - if ps.is_parameterized() => { - span_err!(ccx.tcx.sess, main_span, E0131, - "main function is not allowed to have type parameters"); - return; + hir::ItemFn(.., ref generics, _) => { + if generics.is_parameterized() { + struct_span_err!(ccx.tcx.sess, generics.span, E0131, + "main function is not allowed to have type parameters") + .span_label(generics.span, + &format!("main cannot have type parameters")) + .emit(); + return; + } } _ => () } } _ => () } - let main_def_id = tcx.map.local_def_id(main_id); - let se_ty = tcx.mk_fn(Some(main_def_id), tcx.mk_bare_fn(ty::BareFnTy { + let substs = tcx.intern_substs(&[]); + let se_ty = tcx.mk_fn_def(main_def_id, substs, + tcx.mk_bare_fn(ty::BareFnTy { unsafety: hir::Unsafety::Normal, - abi: abi::Rust, + abi: Abi::Rust, sig: ty::Binder(ty::FnSig { inputs: Vec::new(), - output: ty::FnConverging(tcx.mk_nil()), + output: tcx.mk_nil(), variadic: false }) })); - require_same_types(tcx, None, false, main_span, main_t, se_ty, - || { - format!("main function expects type: `{}`", - se_ty) - }); + require_same_types( + ccx, + &ObligationCause::new(main_span, main_id, ObligationCauseCode::MainFunctionType), + se_ty, + main_t); } _ => { - tcx.sess.span_bug(main_span, - &format!("main has a non-function type: found `{}`", - main_t)); + span_bug!(main_span, + "main has a non-function type: found `{}`", + main_t); } } } @@ -262,16 +247,20 @@ fn check_start_fn_ty(ccx: &CrateCtxt, start_id: ast::NodeId, start_span: Span) { let tcx = ccx.tcx; - let start_t = tcx.node_id_to_type(start_id); + let start_def_id = ccx.tcx.map.local_def_id(start_id); + let start_t = tcx.item_type(start_def_id); match start_t.sty { - ty::TyBareFn(..) => { + ty::TyFnDef(..) => { match tcx.map.find(start_id) { Some(hir_map::NodeItem(it)) => { match it.node { - hir::ItemFn(_,_,_,_,ref ps,_) + hir::ItemFn(..,ref ps,_) if ps.is_parameterized() => { - span_err!(tcx.sess, start_span, E0132, - "start function is not allowed to have type parameters"); + struct_span_err!(tcx.sess, ps.span, E0132, + "start function is not allowed to have type parameters") + .span_label(ps.span, + &format!("start function cannot have type parameters")) + .emit(); return; } _ => () @@ -280,86 +269,97 @@ fn check_start_fn_ty(ccx: &CrateCtxt, _ => () } - let se_ty = tcx.mk_fn(Some(ccx.tcx.map.local_def_id(start_id)), - tcx.mk_bare_fn(ty::BareFnTy { + let substs = tcx.intern_substs(&[]); + let se_ty = tcx.mk_fn_def(start_def_id, substs, + tcx.mk_bare_fn(ty::BareFnTy { unsafety: hir::Unsafety::Normal, - abi: abi::Rust, + abi: Abi::Rust, sig: ty::Binder(ty::FnSig { - inputs: vec!( + inputs: vec![ tcx.types.isize, tcx.mk_imm_ptr(tcx.mk_imm_ptr(tcx.types.u8)) - ), - output: ty::FnConverging(tcx.types.isize), + ], + output: tcx.types.isize, variadic: false, }), })); - require_same_types(tcx, None, false, start_span, start_t, se_ty, - || { - format!("start function expects type: `{}`", - se_ty) - }); - + require_same_types( + ccx, + &ObligationCause::new(start_span, start_id, ObligationCauseCode::StartFunctionType), + se_ty, + start_t); } _ => { - tcx.sess.span_bug(start_span, - &format!("start has a non-function type: found `{}`", - start_t)); + span_bug!(start_span, + "start has a non-function type: found `{}`", + start_t); } } } fn check_for_entry_fn(ccx: &CrateCtxt) { let tcx = ccx.tcx; - match *tcx.sess.entry_fn.borrow() { - Some((id, sp)) => match tcx.sess.entry_type.get() { + let _task = tcx.dep_graph.in_task(DepNode::CheckEntryFn); + if let Some((id, sp)) = *tcx.sess.entry_fn.borrow() { + match tcx.sess.entry_type.get() { Some(config::EntryMain) => check_main_fn_ty(ccx, id, sp), Some(config::EntryStart) => check_start_fn_ty(ccx, id, sp), Some(config::EntryNone) => {} - None => tcx.sess.bug("entry function without a type") - }, - None => {} + None => bug!("entry function without a type") + } } } -pub fn check_crate(tcx: &ty::ctxt, trait_map: ty::TraitMap) { +pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> Result>, usize> { let time_passes = tcx.sess.time_passes(); let ccx = CrateCtxt { - trait_map: trait_map, + ast_ty_to_ty_cache: RefCell::new(NodeMap()), all_traits: RefCell::new(None), - tcx: tcx + stack: RefCell::new(Vec::new()), + tcx: tcx, + deferred_obligations: RefCell::new(NodeMap()), }; // this ensures that later parts of type checking can assume that items // have valid types and not error - tcx.sess.abort_if_new_errors(|| { + tcx.sess.track_errors(|| { time(time_passes, "type collecting", || - collect::collect_item_types(tcx)); + collect::collect_item_types(&ccx)); - }); + })?; time(time_passes, "variance inference", || variance::infer_variance(tcx)); - tcx.sess.abort_if_new_errors(|| { + tcx.sess.track_errors(|| { + time(time_passes, "impl wf inference", || + impl_wf_check::impl_wf_check(&ccx)); + })?; + + tcx.sess.track_errors(|| { time(time_passes, "coherence checking", || coherence::check_coherence(&ccx)); - }); + })?; - time(time_passes, "wf checking", || - check::check_wf_new(&ccx)); + time(time_passes, "wf checking", || check::check_wf_new(&ccx))?; - time(time_passes, "item-types checking", || - check::check_item_types(&ccx)); + time(time_passes, "item-types checking", || check::check_item_types(&ccx))?; - time(time_passes, "item-bodies checking", || - check::check_item_bodies(&ccx)); + time(time_passes, "item-bodies checking", || check::check_item_bodies(&ccx))?; - time(time_passes, "drop-impl checking", || - check::check_drop_impls(&ccx)); + time(time_passes, "drop-impl checking", || check::check_drop_impls(&ccx))?; + check_unused::check_crate(tcx); check_for_entry_fn(&ccx); - tcx.sess.abort_if_errors(); + + let err_count = tcx.sess.err_count(); + if err_count == 0 { + Ok(ccx.ast_ty_to_ty_cache.into_inner()) + } else { + Err(err_count) + } } __build_diagnostic_array! { librustc_typeck, DIAGNOSTICS } diff --git a/src/librustc_typeck/rscope.rs b/src/librustc_typeck/rscope.rs index 1b02c736dce3b..131ecfc6e0c78 100644 --- a/src/librustc_typeck/rscope.rs +++ b/src/librustc_typeck/rscope.rs @@ -8,11 +8,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use rustc::hir::def_id::DefId; +use rustc::ty; +use rustc::ty::subst::Substs; -use middle::ty; +use astconv::AstConv; use std::cell::Cell; -use syntax::codemap::Span; +use syntax_pos::Span; #[derive(Clone)] pub struct ElisionFailureInfo { @@ -50,6 +53,80 @@ pub trait RegionScope { /// computing `object_lifetime_default` (in particular, in legacy /// modes, it may not be relevant). fn base_object_lifetime_default(&self, span: Span) -> ty::Region; + + /// If this scope allows anonymized types, return the generics in + /// scope, that anonymized types will close over. For example, + /// if you have a function like: + /// + /// fn foo<'a, T>() -> impl Trait { ... } + /// + /// then, for the rscope that is used when handling the return type, + /// `anon_type_scope()` would return a `Some(AnonTypeScope {...})`, + /// on which `.fresh_substs(...)` can be used to obtain identity + /// Substs for `'a` and `T`, to track them in `TyAnon`. This property + /// is controlled by the region scope because it's fine-grained enough + /// to allow restriction of anonymized types to the syntactical extent + /// of a function's return type. + fn anon_type_scope(&self) -> Option { + None + } +} + +#[derive(Copy, Clone)] +pub struct AnonTypeScope { + enclosing_item: DefId +} + +impl<'gcx: 'tcx, 'tcx> AnonTypeScope { + pub fn new(enclosing_item: DefId) -> AnonTypeScope { + AnonTypeScope { + enclosing_item: enclosing_item + } + } + + pub fn fresh_substs(&self, astconv: &AstConv<'gcx, 'tcx>, span: Span) + -> &'tcx Substs<'tcx> { + use collect::mk_item_substs; + + mk_item_substs(astconv, span, self.enclosing_item) + } +} + +/// A scope wrapper which optionally allows anonymized types. +#[derive(Copy, Clone)] +pub struct MaybeWithAnonTypes { + base_scope: R, + anon_scope: Option +} + +impl MaybeWithAnonTypes { + pub fn new(base_scope: R, anon_scope: Option) -> Self { + MaybeWithAnonTypes { + base_scope: base_scope, + anon_scope: anon_scope + } + } +} + +impl RegionScope for MaybeWithAnonTypes { + fn object_lifetime_default(&self, span: Span) -> Option { + self.base_scope.object_lifetime_default(span) + } + + fn anon_regions(&self, + span: Span, + count: usize) + -> Result, Option>> { + self.base_scope.anon_regions(span, count) + } + + fn base_object_lifetime_default(&self, span: Span) -> ty::Region { + self.base_scope.base_object_lifetime_default(span) + } + + fn anon_type_scope(&self) -> Option { + self.anon_scope + } } // A scope in which all regions must be explicitly named. This is used @@ -136,6 +213,45 @@ impl RegionScope for ElidableRscope { } } +/// A scope that behaves as an ElidabeRscope with a `'static` default region +/// that should also warn if the `static_in_const` feature is unset. +#[derive(Copy, Clone)] +pub struct StaticRscope<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { + tcx: &'a ty::TyCtxt<'a, 'gcx, 'tcx>, +} + +impl<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> StaticRscope<'a, 'gcx, 'tcx> { + /// create a new StaticRscope from a reference to the `TyCtxt` + pub fn new(tcx: &'a ty::TyCtxt<'a, 'gcx, 'tcx>) -> Self { + StaticRscope { tcx: tcx } + } +} + +impl<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> RegionScope for StaticRscope<'a, 'gcx, 'tcx> { + fn anon_regions(&self, + span: Span, + count: usize) + -> Result, Option>> { + if !self.tcx.sess.features.borrow().static_in_const { + self.tcx + .sess + .struct_span_err(span, + "this needs a `'static` lifetime or the \ + `static_in_const` feature, see #35897") + .emit(); + } + Ok(vec![ty::ReStatic; count]) + } + + fn object_lifetime_default(&self, span: Span) -> Option { + Some(self.base_object_lifetime_default(span)) + } + + fn base_object_lifetime_default(&self, _span: Span) -> ty::Region { + ty::ReStatic + } +} + /// A scope in which we generate anonymous, late-bound regions for /// omitted regions. This occurs in function signatures. pub struct BindingRscope { @@ -180,12 +296,12 @@ impl RegionScope for BindingRscope { /// A scope which overrides the default object lifetime but has no other effect. pub struct ObjectLifetimeDefaultRscope<'r> { base_scope: &'r (RegionScope+'r), - default: ty::ObjectLifetimeDefault, + default: ty::ObjectLifetimeDefault<'r>, } impl<'r> ObjectLifetimeDefaultRscope<'r> { pub fn new(base_scope: &'r (RegionScope+'r), - default: ty::ObjectLifetimeDefault) + default: ty::ObjectLifetimeDefault<'r>) -> ObjectLifetimeDefaultRscope<'r> { ObjectLifetimeDefaultRscope { @@ -206,7 +322,7 @@ impl<'r> RegionScope for ObjectLifetimeDefaultRscope<'r> { Some(self.base_object_lifetime_default(span)), ty::ObjectLifetimeDefault::Specific(r) => - Some(r), + Some(*r), } } @@ -221,6 +337,10 @@ impl<'r> RegionScope for ObjectLifetimeDefaultRscope<'r> { { self.base_scope.anon_regions(span, count) } + + fn anon_type_scope(&self) -> Option { + self.base_scope.anon_type_scope() + } } /// A scope which simply shifts the Debruijn index of other scopes @@ -262,4 +382,8 @@ impl<'r> RegionScope for ShiftedRscope<'r> { } } } + + fn anon_type_scope(&self) -> Option { + self.base_scope.anon_type_scope() + } } diff --git a/src/librustc_typeck/variance.rs b/src/librustc_typeck/variance.rs deleted file mode 100644 index ce0e9e14035f5..0000000000000 --- a/src/librustc_typeck/variance.rs +++ /dev/null @@ -1,1250 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! This file infers the variance of type and lifetime parameters. The -//! algorithm is taken from Section 4 of the paper "Taming the Wildcards: -//! Combining Definition- and Use-Site Variance" published in PLDI'11 and -//! written by Altidor et al., and hereafter referred to as The Paper. -//! -//! This inference is explicitly designed *not* to consider the uses of -//! types within code. To determine the variance of type parameters -//! defined on type `X`, we only consider the definition of the type `X` -//! and the definitions of any types it references. -//! -//! We only infer variance for type parameters found on *data types* -//! like structs and enums. In these cases, there is fairly straightforward -//! explanation for what variance means. The variance of the type -//! or lifetime parameters defines whether `T` is a subtype of `T` -//! (resp. `T<'a>` and `T<'b>`) based on the relationship of `A` and `B` -//! (resp. `'a` and `'b`). -//! -//! We do not infer variance for type parameters found on traits, fns, -//! or impls. Variance on trait parameters can make indeed make sense -//! (and we used to compute it) but it is actually rather subtle in -//! meaning and not that useful in practice, so we removed it. See the -//! addendum for some details. Variances on fn/impl parameters, otoh, -//! doesn't make sense because these parameters are instantiated and -//! then forgotten, they don't persist in types or compiled -//! byproducts. -//! -//! ### The algorithm -//! -//! The basic idea is quite straightforward. We iterate over the types -//! defined and, for each use of a type parameter X, accumulate a -//! constraint indicating that the variance of X must be valid for the -//! variance of that use site. We then iteratively refine the variance of -//! X until all constraints are met. There is *always* a sol'n, because at -//! the limit we can declare all type parameters to be invariant and all -//! constraints will be satisfied. -//! -//! As a simple example, consider: -//! -//! enum Option { Some(A), None } -//! enum OptionalFn { Some(|B|), None } -//! enum OptionalMap { Some(|C| -> C), None } -//! -//! Here, we will generate the constraints: -//! -//! 1. V(A) <= + -//! 2. V(B) <= - -//! 3. V(C) <= + -//! 4. V(C) <= - -//! -//! These indicate that (1) the variance of A must be at most covariant; -//! (2) the variance of B must be at most contravariant; and (3, 4) the -//! variance of C must be at most covariant *and* contravariant. All of these -//! results are based on a variance lattice defined as follows: -//! -//! * Top (bivariant) -//! - + -//! o Bottom (invariant) -//! -//! Based on this lattice, the solution V(A)=+, V(B)=-, V(C)=o is the -//! optimal solution. Note that there is always a naive solution which -//! just declares all variables to be invariant. -//! -//! You may be wondering why fixed-point iteration is required. The reason -//! is that the variance of a use site may itself be a function of the -//! variance of other type parameters. In full generality, our constraints -//! take the form: -//! -//! V(X) <= Term -//! Term := + | - | * | o | V(X) | Term x Term -//! -//! Here the notation V(X) indicates the variance of a type/region -//! parameter `X` with respect to its defining class. `Term x Term` -//! represents the "variance transform" as defined in the paper: -//! -//! If the variance of a type variable `X` in type expression `E` is `V2` -//! and the definition-site variance of the [corresponding] type parameter -//! of a class `C` is `V1`, then the variance of `X` in the type expression -//! `C` is `V3 = V1.xform(V2)`. -//! -//! ### Constraints -//! -//! If I have a struct or enum with where clauses: -//! -//! struct Foo { ... } -//! -//! you might wonder whether the variance of `T` with respect to `Bar` -//! affects the variance `T` with respect to `Foo`. I claim no. The -//! reason: assume that `T` is invariant w/r/t `Bar` but covariant w/r/t -//! `Foo`. And then we have a `Foo` that is upcast to `Foo`, where -//! `X <: Y`. However, while `X : Bar`, `Y : Bar` does not hold. In that -//! case, the upcast will be illegal, but not because of a variance -//! failure, but rather because the target type `Foo` is itself just -//! not well-formed. Basically we get to assume well-formedness of all -//! types involved before considering variance. -//! -//! ### Addendum: Variance on traits -//! -//! As mentioned above, we used to permit variance on traits. This was -//! computed based on the appearance of trait type parameters in -//! method signatures and was used to represent the compatibility of -//! vtables in trait objects (and also "virtual" vtables or dictionary -//! in trait bounds). One complication was that variance for -//! associated types is less obvious, since they can be projected out -//! and put to myriad uses, so it's not clear when it is safe to allow -//! `X::Bar` to vary (or indeed just what that means). Moreover (as -//! covered below) all inputs on any trait with an associated type had -//! to be invariant, limiting the applicability. Finally, the -//! annotations (`MarkerTrait`, `PhantomFn`) needed to ensure that all -//! trait type parameters had a variance were confusing and annoying -//! for little benefit. -//! -//! Just for historical reference,I am going to preserve some text indicating -//! how one could interpret variance and trait matching. -//! -//! #### Variance and object types -//! -//! Just as with structs and enums, we can decide the subtyping -//! relationship between two object types `&Trait` and `&Trait` -//! based on the relationship of `A` and `B`. Note that for object -//! types we ignore the `Self` type parameter -- it is unknown, and -//! the nature of dynamic dispatch ensures that we will always call a -//! function that is expected the appropriate `Self` type. However, we -//! must be careful with the other type parameters, or else we could -//! end up calling a function that is expecting one type but provided -//! another. -//! -//! To see what I mean, consider a trait like so: -//! -//! trait ConvertTo { -//! fn convertTo(&self) -> A; -//! } -//! -//! Intuitively, If we had one object `O=&ConvertTo` and another -//! `S=&ConvertTo`, then `S <: O` because `String <: Object` -//! (presuming Java-like "string" and "object" types, my go to examples -//! for subtyping). The actual algorithm would be to compare the -//! (explicit) type parameters pairwise respecting their variance: here, -//! the type parameter A is covariant (it appears only in a return -//! position), and hence we require that `String <: Object`. -//! -//! You'll note though that we did not consider the binding for the -//! (implicit) `Self` type parameter: in fact, it is unknown, so that's -//! good. The reason we can ignore that parameter is precisely because we -//! don't need to know its value until a call occurs, and at that time (as -//! you said) the dynamic nature of virtual dispatch means the code we run -//! will be correct for whatever value `Self` happens to be bound to for -//! the particular object whose method we called. `Self` is thus different -//! from `A`, because the caller requires that `A` be known in order to -//! know the return type of the method `convertTo()`. (As an aside, we -//! have rules preventing methods where `Self` appears outside of the -//! receiver position from being called via an object.) -//! -//! #### Trait variance and vtable resolution -//! -//! But traits aren't only used with objects. They're also used when -//! deciding whether a given impl satisfies a given trait bound. To set the -//! scene here, imagine I had a function: -//! -//! fn convertAll>(v: &[T]) { -//! ... -//! } -//! -//! Now imagine that I have an implementation of `ConvertTo` for `Object`: -//! -//! impl ConvertTo for Object { ... } -//! -//! And I want to call `convertAll` on an array of strings. Suppose -//! further that for whatever reason I specifically supply the value of -//! `String` for the type parameter `T`: -//! -//! let mut vector = vec!["string", ...]; -//! convertAll::(vector); -//! -//! Is this legal? To put another way, can we apply the `impl` for -//! `Object` to the type `String`? The answer is yes, but to see why -//! we have to expand out what will happen: -//! -//! - `convertAll` will create a pointer to one of the entries in the -//! vector, which will have type `&String` -//! - It will then call the impl of `convertTo()` that is intended -//! for use with objects. This has the type: -//! -//! fn(self: &Object) -> i32 -//! -//! It is ok to provide a value for `self` of type `&String` because -//! `&String <: &Object`. -//! -//! OK, so intuitively we want this to be legal, so let's bring this back -//! to variance and see whether we are computing the correct result. We -//! must first figure out how to phrase the question "is an impl for -//! `Object,i32` usable where an impl for `String,i32` is expected?" -//! -//! Maybe it's helpful to think of a dictionary-passing implementation of -//! type classes. In that case, `convertAll()` takes an implicit parameter -//! representing the impl. In short, we *have* an impl of type: -//! -//! V_O = ConvertTo for Object -//! -//! and the function prototype expects an impl of type: -//! -//! V_S = ConvertTo for String -//! -//! As with any argument, this is legal if the type of the value given -//! (`V_O`) is a subtype of the type expected (`V_S`). So is `V_O <: V_S`? -//! The answer will depend on the variance of the various parameters. In -//! this case, because the `Self` parameter is contravariant and `A` is -//! covariant, it means that: -//! -//! V_O <: V_S iff -//! i32 <: i32 -//! String <: Object -//! -//! These conditions are satisfied and so we are happy. -//! -//! #### Variance and associated types -//! -//! Traits with associated types -- or at minimum projection -//! expressions -- must be invariant with respect to all of their -//! inputs. To see why this makes sense, consider what subtyping for a -//! trait reference means: -//! -//! <: -//! -//! means that if I know that `T as Trait`, I also know that `U as -//! Trait`. Moreover, if you think of it as dictionary passing style, -//! it means that a dictionary for `` is safe to use where -//! a dictionary for `` is expected. -//! -//! The problem is that when you can project types out from ``, the relationship to types projected out of `` -//! is completely unknown unless `T==U` (see #21726 for more -//! details). Making `Trait` invariant ensures that this is true. -//! -//! Another related reason is that if we didn't make traits with -//! associated types invariant, then projection is no longer a -//! function with a single result. Consider: -//! -//! ``` -//! trait Identity { type Out; fn foo(&self); } -//! impl Identity for T { type Out = T; ... } -//! ``` -//! -//! Now if I have `<&'static () as Identity>::Out`, this can be -//! validly derived as `&'a ()` for any `'a`: -//! -//! <&'a () as Identity> <: <&'static () as Identity> -//! if &'static () < : &'a () -- Identity is contravariant in Self -//! if 'static : 'a -- Subtyping rules for relations -//! -//! This change otoh means that `<'static () as Identity>::Out` is -//! always `&'static ()` (which might then be upcast to `'a ()`, -//! separately). This was helpful in solving #21750. - -use self::VarianceTerm::*; -use self::ParamKind::*; - -use arena; -use arena::TypedArena; -use dep_graph::DepNode; -use middle::def_id::DefId; -use middle::resolve_lifetime as rl; -use middle::subst; -use middle::subst::{ParamSpace, FnSpace, TypeSpace, SelfSpace, VecPerParamSpace}; -use middle::ty::{self, Ty}; -use rustc::front::map as hir_map; -use std::fmt; -use std::rc::Rc; -use syntax::ast; -use rustc_front::hir; -use rustc_front::intravisit::Visitor; -use util::nodemap::NodeMap; - -pub fn infer_variance(tcx: &ty::ctxt) { - let _task = tcx.dep_graph.in_task(DepNode::Variance); - let krate = tcx.map.krate(); - let mut arena = arena::TypedArena::new(); - let terms_cx = determine_parameters_to_be_inferred(tcx, &mut arena, krate); - let constraints_cx = add_constraints_from_crate(terms_cx, krate); - solve_constraints(constraints_cx); - tcx.variance_computed.set(true); -} - -// Representing terms -// -// Terms are structured as a straightforward tree. Rather than rely on -// GC, we allocate terms out of a bounded arena (the lifetime of this -// arena is the lifetime 'a that is threaded around). -// -// We assign a unique index to each type/region parameter whose variance -// is to be inferred. We refer to such variables as "inferreds". An -// `InferredIndex` is a newtype'd int representing the index of such -// a variable. - -type VarianceTermPtr<'a> = &'a VarianceTerm<'a>; - -#[derive(Copy, Clone, Debug)] -struct InferredIndex(usize); - -#[derive(Copy, Clone)] -enum VarianceTerm<'a> { - ConstantTerm(ty::Variance), - TransformTerm(VarianceTermPtr<'a>, VarianceTermPtr<'a>), - InferredTerm(InferredIndex), -} - -impl<'a> fmt::Debug for VarianceTerm<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - ConstantTerm(c1) => write!(f, "{:?}", c1), - TransformTerm(v1, v2) => write!(f, "({:?} \u{00D7} {:?})", v1, v2), - InferredTerm(id) => write!(f, "[{}]", { let InferredIndex(i) = id; i }) - } - } -} - -// The first pass over the crate simply builds up the set of inferreds. - -struct TermsContext<'a, 'tcx: 'a> { - tcx: &'a ty::ctxt<'tcx>, - arena: &'a TypedArena>, - - empty_variances: Rc, - - // For marker types, UnsafeCell, and other lang items where - // variance is hardcoded, records the item-id and the hardcoded - // variance. - lang_items: Vec<(ast::NodeId, Vec)>, - - // Maps from the node id of a type/generic parameter to the - // corresponding inferred index. - inferred_map: NodeMap, - - // Maps from an InferredIndex to the info for that variable. - inferred_infos: Vec> , -} - -#[derive(Copy, Clone, Debug, PartialEq)] -enum ParamKind { - TypeParam, - RegionParam, -} - -struct InferredInfo<'a> { - item_id: ast::NodeId, - kind: ParamKind, - space: ParamSpace, - index: usize, - param_id: ast::NodeId, - term: VarianceTermPtr<'a>, - - // Initial value to use for this parameter when inferring - // variance. For most parameters, this is Bivariant. But for lang - // items and input type parameters on traits, it is different. - initial_variance: ty::Variance, -} - -fn determine_parameters_to_be_inferred<'a, 'tcx>(tcx: &'a ty::ctxt<'tcx>, - arena: &'a mut TypedArena>, - krate: &hir::Crate) - -> TermsContext<'a, 'tcx> { - let mut terms_cx = TermsContext { - tcx: tcx, - arena: arena, - inferred_map: NodeMap(), - inferred_infos: Vec::new(), - - lang_items: lang_items(tcx), - - // cache and share the variance struct used for items with - // no type/region parameters - empty_variances: Rc::new(ty::ItemVariances { - types: VecPerParamSpace::empty(), - regions: VecPerParamSpace::empty() - }) - }; - - krate.visit_all_items(&mut terms_cx); - - terms_cx -} - -fn lang_items(tcx: &ty::ctxt) -> Vec<(ast::NodeId,Vec)> { - let all = vec![ - (tcx.lang_items.phantom_data(), vec![ty::Covariant]), - (tcx.lang_items.unsafe_cell_type(), vec![ty::Invariant]), - - // Deprecated: - (tcx.lang_items.covariant_type(), vec![ty::Covariant]), - (tcx.lang_items.contravariant_type(), vec![ty::Contravariant]), - (tcx.lang_items.invariant_type(), vec![ty::Invariant]), - (tcx.lang_items.covariant_lifetime(), vec![ty::Covariant]), - (tcx.lang_items.contravariant_lifetime(), vec![ty::Contravariant]), - (tcx.lang_items.invariant_lifetime(), vec![ty::Invariant]), - - ]; - - all.into_iter() // iterating over (Option, Variance) - .filter(|&(ref d,_)| d.is_some()) - .map(|(d, v)| (d.unwrap(), v)) // (DefId, Variance) - .filter_map(|(d, v)| tcx.map.as_local_node_id(d).map(|n| (n, v))) // (NodeId, Variance) - .collect() -} - -impl<'a, 'tcx> TermsContext<'a, 'tcx> { - fn add_inferreds_for_item(&mut self, - item_id: ast::NodeId, - has_self: bool, - generics: &hir::Generics) - { - /*! - * Add "inferreds" for the generic parameters declared on this - * item. This has a lot of annoying parameters because we are - * trying to drive this from the AST, rather than the - * ty::Generics, so that we can get span info -- but this - * means we must accommodate syntactic distinctions. - */ - - // NB: In the code below for writing the results back into the - // tcx, we rely on the fact that all inferreds for a particular - // item are assigned continuous indices. - - let inferreds_on_entry = self.num_inferred(); - - if has_self { - self.add_inferred(item_id, TypeParam, SelfSpace, 0, item_id); - } - - for (i, p) in generics.lifetimes.iter().enumerate() { - let id = p.lifetime.id; - self.add_inferred(item_id, RegionParam, TypeSpace, i, id); - } - - for (i, p) in generics.ty_params.iter().enumerate() { - self.add_inferred(item_id, TypeParam, TypeSpace, i, p.id); - } - - // If this item has no type or lifetime parameters, - // then there are no variances to infer, so just - // insert an empty entry into the variance map. - // Arguably we could just leave the map empty in this - // case but it seems cleaner to be able to distinguish - // "invalid item id" from "item id with no - // parameters". - if self.num_inferred() == inferreds_on_entry { - let item_def_id = self.tcx.map.local_def_id(item_id); - let newly_added = - self.tcx.item_variance_map.borrow_mut().insert( - item_def_id, - self.empty_variances.clone()).is_none(); - assert!(newly_added); - } - } - - fn add_inferred(&mut self, - item_id: ast::NodeId, - kind: ParamKind, - space: ParamSpace, - index: usize, - param_id: ast::NodeId) { - let inf_index = InferredIndex(self.inferred_infos.len()); - let term = self.arena.alloc(InferredTerm(inf_index)); - let initial_variance = self.pick_initial_variance(item_id, space, index); - self.inferred_infos.push(InferredInfo { item_id: item_id, - kind: kind, - space: space, - index: index, - param_id: param_id, - term: term, - initial_variance: initial_variance }); - let newly_added = self.inferred_map.insert(param_id, inf_index).is_none(); - assert!(newly_added); - - debug!("add_inferred(item_path={}, \ - item_id={}, \ - kind={:?}, \ - space={:?}, \ - index={}, \ - param_id={}, \ - inf_index={:?}, \ - initial_variance={:?})", - self.tcx.item_path_str(self.tcx.map.local_def_id(item_id)), - item_id, kind, space, index, param_id, inf_index, - initial_variance); - } - - fn pick_initial_variance(&self, - item_id: ast::NodeId, - space: ParamSpace, - index: usize) - -> ty::Variance - { - match space { - SelfSpace | FnSpace => { - ty::Bivariant - } - - TypeSpace => { - match self.lang_items.iter().find(|&&(n, _)| n == item_id) { - Some(&(_, ref variances)) => variances[index], - None => ty::Bivariant - } - } - } - } - - fn num_inferred(&self) -> usize { - self.inferred_infos.len() - } -} - -impl<'a, 'tcx, 'v> Visitor<'v> for TermsContext<'a, 'tcx> { - fn visit_item(&mut self, item: &hir::Item) { - debug!("add_inferreds for item {}", self.tcx.map.node_to_string(item.id)); - - match item.node { - hir::ItemEnum(_, ref generics) | - hir::ItemStruct(_, ref generics) => { - self.add_inferreds_for_item(item.id, false, generics); - } - hir::ItemTrait(_, ref generics, _, _) => { - // Note: all inputs for traits are ultimately - // constrained to be invariant. See `visit_item` in - // the impl for `ConstraintContext` below. - self.add_inferreds_for_item(item.id, true, generics); - } - - hir::ItemExternCrate(_) | - hir::ItemUse(_) | - hir::ItemDefaultImpl(..) | - hir::ItemImpl(..) | - hir::ItemStatic(..) | - hir::ItemConst(..) | - hir::ItemFn(..) | - hir::ItemMod(..) | - hir::ItemForeignMod(..) | - hir::ItemTy(..) => { - } - } - } -} - -// Constraint construction and representation -// -// The second pass over the AST determines the set of constraints. -// We walk the set of items and, for each member, generate new constraints. - -struct ConstraintContext<'a, 'tcx: 'a> { - terms_cx: TermsContext<'a, 'tcx>, - - // These are pointers to common `ConstantTerm` instances - covariant: VarianceTermPtr<'a>, - contravariant: VarianceTermPtr<'a>, - invariant: VarianceTermPtr<'a>, - bivariant: VarianceTermPtr<'a>, - - constraints: Vec> , -} - -/// Declares that the variable `decl_id` appears in a location with -/// variance `variance`. -#[derive(Copy, Clone)] -struct Constraint<'a> { - inferred: InferredIndex, - variance: &'a VarianceTerm<'a>, -} - -fn add_constraints_from_crate<'a, 'tcx>(terms_cx: TermsContext<'a, 'tcx>, - krate: &hir::Crate) - -> ConstraintContext<'a, 'tcx> -{ - let covariant = terms_cx.arena.alloc(ConstantTerm(ty::Covariant)); - let contravariant = terms_cx.arena.alloc(ConstantTerm(ty::Contravariant)); - let invariant = terms_cx.arena.alloc(ConstantTerm(ty::Invariant)); - let bivariant = terms_cx.arena.alloc(ConstantTerm(ty::Bivariant)); - let mut constraint_cx = ConstraintContext { - terms_cx: terms_cx, - covariant: covariant, - contravariant: contravariant, - invariant: invariant, - bivariant: bivariant, - constraints: Vec::new(), - }; - krate.visit_all_items(&mut constraint_cx); - constraint_cx -} - -impl<'a, 'tcx, 'v> Visitor<'v> for ConstraintContext<'a, 'tcx> { - fn visit_item(&mut self, item: &hir::Item) { - let tcx = self.terms_cx.tcx; - let did = tcx.map.local_def_id(item.id); - - debug!("visit_item item={}", tcx.map.node_to_string(item.id)); - - match item.node { - hir::ItemEnum(..) | hir::ItemStruct(..) => { - let scheme = tcx.lookup_item_type(did); - - // Not entirely obvious: constraints on structs/enums do not - // affect the variance of their type parameters. See discussion - // in comment at top of module. - // - // self.add_constraints_from_generics(&scheme.generics); - - for field in tcx.lookup_adt_def(did).all_fields() { - self.add_constraints_from_ty(&scheme.generics, - field.unsubst_ty(), - self.covariant); - } - } - hir::ItemTrait(..) => { - let trait_def = tcx.lookup_trait_def(did); - self.add_constraints_from_trait_ref(&trait_def.generics, - trait_def.trait_ref, - self.invariant); - } - - hir::ItemExternCrate(_) | - hir::ItemUse(_) | - hir::ItemStatic(..) | - hir::ItemConst(..) | - hir::ItemFn(..) | - hir::ItemMod(..) | - hir::ItemForeignMod(..) | - hir::ItemTy(..) | - hir::ItemImpl(..) | - hir::ItemDefaultImpl(..) => { - } - } - } -} - -/// Is `param_id` a lifetime according to `map`? -fn is_lifetime(map: &hir_map::Map, param_id: ast::NodeId) -> bool { - match map.find(param_id) { - Some(hir_map::NodeLifetime(..)) => true, _ => false - } -} - -impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { - fn tcx(&self) -> &'a ty::ctxt<'tcx> { - self.terms_cx.tcx - } - - fn inferred_index(&self, param_id: ast::NodeId) -> InferredIndex { - match self.terms_cx.inferred_map.get(¶m_id) { - Some(&index) => index, - None => { - self.tcx().sess.bug(&format!( - "no inferred index entry for {}", - self.tcx().map.node_to_string(param_id))); - } - } - } - - fn find_binding_for_lifetime(&self, param_id: ast::NodeId) -> ast::NodeId { - let tcx = self.terms_cx.tcx; - assert!(is_lifetime(&tcx.map, param_id)); - match tcx.named_region_map.get(¶m_id) { - Some(&rl::DefEarlyBoundRegion(_, _, lifetime_decl_id)) - => lifetime_decl_id, - Some(_) => panic!("should not encounter non early-bound cases"), - - // The lookup should only fail when `param_id` is - // itself a lifetime binding: use it as the decl_id. - None => param_id, - } - - } - - /// Is `param_id` a type parameter for which we infer variance? - fn is_to_be_inferred(&self, param_id: ast::NodeId) -> bool { - let result = self.terms_cx.inferred_map.contains_key(¶m_id); - - // To safe-guard against invalid inferred_map constructions, - // double-check if variance is inferred at some use of a type - // parameter (by inspecting parent of its binding declaration - // to see if it is introduced by a type or by a fn/impl). - - let check_result = |this:&ConstraintContext| -> bool { - let tcx = this.terms_cx.tcx; - let decl_id = this.find_binding_for_lifetime(param_id); - // Currently only called on lifetimes; double-checking that. - assert!(is_lifetime(&tcx.map, param_id)); - let parent_id = tcx.map.get_parent(decl_id); - let parent = tcx.map.find(parent_id).unwrap_or_else( - || panic!("tcx.map missing entry for id: {}", parent_id)); - - let is_inferred; - macro_rules! cannot_happen { () => { { - panic!("invalid parent: {} for {}", - tcx.map.node_to_string(parent_id), - tcx.map.node_to_string(param_id)); - } } } - - match parent { - hir_map::NodeItem(p) => { - match p.node { - hir::ItemTy(..) | - hir::ItemEnum(..) | - hir::ItemStruct(..) | - hir::ItemTrait(..) => is_inferred = true, - hir::ItemFn(..) => is_inferred = false, - _ => cannot_happen!(), - } - } - hir_map::NodeTraitItem(..) => is_inferred = false, - hir_map::NodeImplItem(..) => is_inferred = false, - _ => cannot_happen!(), - } - - return is_inferred; - }; - - assert_eq!(result, check_result(self)); - - return result; - } - - /// Returns a variance term representing the declared variance of the type/region parameter - /// with the given id. - fn declared_variance(&self, - param_def_id: DefId, - item_def_id: DefId, - kind: ParamKind, - space: ParamSpace, - index: usize) - -> VarianceTermPtr<'a> { - assert_eq!(param_def_id.krate, item_def_id.krate); - - if let Some(param_node_id) = self.tcx().map.as_local_node_id(param_def_id) { - // Parameter on an item defined within current crate: - // variance not yet inferred, so return a symbolic - // variance. - let InferredIndex(index) = self.inferred_index(param_node_id); - self.terms_cx.inferred_infos[index].term - } else { - // Parameter on an item defined within another crate: - // variance already inferred, just look it up. - let variances = self.tcx().item_variances(item_def_id); - let variance = match kind { - TypeParam => *variances.types.get(space, index), - RegionParam => *variances.regions.get(space, index), - }; - self.constant_term(variance) - } - } - - fn add_constraint(&mut self, - InferredIndex(index): InferredIndex, - variance: VarianceTermPtr<'a>) { - debug!("add_constraint(index={}, variance={:?})", - index, variance); - self.constraints.push(Constraint { inferred: InferredIndex(index), - variance: variance }); - } - - fn contravariant(&mut self, - variance: VarianceTermPtr<'a>) - -> VarianceTermPtr<'a> { - self.xform(variance, self.contravariant) - } - - fn invariant(&mut self, - variance: VarianceTermPtr<'a>) - -> VarianceTermPtr<'a> { - self.xform(variance, self.invariant) - } - - fn constant_term(&self, v: ty::Variance) -> VarianceTermPtr<'a> { - match v { - ty::Covariant => self.covariant, - ty::Invariant => self.invariant, - ty::Contravariant => self.contravariant, - ty::Bivariant => self.bivariant, - } - } - - fn xform(&mut self, - v1: VarianceTermPtr<'a>, - v2: VarianceTermPtr<'a>) - -> VarianceTermPtr<'a> { - match (*v1, *v2) { - (_, ConstantTerm(ty::Covariant)) => { - // Applying a "covariant" transform is always a no-op - v1 - } - - (ConstantTerm(c1), ConstantTerm(c2)) => { - self.constant_term(c1.xform(c2)) - } - - _ => { - &*self.terms_cx.arena.alloc(TransformTerm(v1, v2)) - } - } - } - - fn add_constraints_from_trait_ref(&mut self, - generics: &ty::Generics<'tcx>, - trait_ref: ty::TraitRef<'tcx>, - variance: VarianceTermPtr<'a>) { - debug!("add_constraints_from_trait_ref: trait_ref={:?} variance={:?}", - trait_ref, - variance); - - let trait_def = self.tcx().lookup_trait_def(trait_ref.def_id); - - self.add_constraints_from_substs( - generics, - trait_ref.def_id, - trait_def.generics.types.as_slice(), - trait_def.generics.regions.as_slice(), - trait_ref.substs, - variance); - } - - /// Adds constraints appropriate for an instance of `ty` appearing - /// in a context with the generics defined in `generics` and - /// ambient variance `variance` - fn add_constraints_from_ty(&mut self, - generics: &ty::Generics<'tcx>, - ty: Ty<'tcx>, - variance: VarianceTermPtr<'a>) { - debug!("add_constraints_from_ty(ty={:?}, variance={:?})", - ty, - variance); - - match ty.sty { - ty::TyBool | - ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | - ty::TyFloat(_) | ty::TyStr => { - /* leaf type -- noop */ - } - - ty::TyClosure(..) => { - self.tcx().sess.bug("Unexpected closure type in variance computation"); - } - - ty::TyRef(region, ref mt) => { - let contra = self.contravariant(variance); - self.add_constraints_from_region(generics, *region, contra); - self.add_constraints_from_mt(generics, mt, variance); - } - - ty::TyBox(typ) | ty::TyArray(typ, _) | ty::TySlice(typ) => { - self.add_constraints_from_ty(generics, typ, variance); - } - - - ty::TyRawPtr(ref mt) => { - self.add_constraints_from_mt(generics, mt, variance); - } - - ty::TyTuple(ref subtys) => { - for &subty in subtys { - self.add_constraints_from_ty(generics, subty, variance); - } - } - - ty::TyEnum(def, substs) | - ty::TyStruct(def, substs) => { - let item_type = self.tcx().lookup_item_type(def.did); - - // All type parameters on enums and structs should be - // in the TypeSpace. - assert!(item_type.generics.types.is_empty_in(subst::SelfSpace)); - assert!(item_type.generics.types.is_empty_in(subst::FnSpace)); - assert!(item_type.generics.regions.is_empty_in(subst::SelfSpace)); - assert!(item_type.generics.regions.is_empty_in(subst::FnSpace)); - - self.add_constraints_from_substs( - generics, - def.did, - item_type.generics.types.get_slice(subst::TypeSpace), - item_type.generics.regions.get_slice(subst::TypeSpace), - substs, - variance); - } - - ty::TyProjection(ref data) => { - let trait_ref = &data.trait_ref; - let trait_def = self.tcx().lookup_trait_def(trait_ref.def_id); - self.add_constraints_from_substs( - generics, - trait_ref.def_id, - trait_def.generics.types.as_slice(), - trait_def.generics.regions.as_slice(), - trait_ref.substs, - variance); - } - - ty::TyTrait(ref data) => { - let poly_trait_ref = - data.principal_trait_ref_with_self_ty(self.tcx(), - self.tcx().types.err); - - // The type `Foo` is contravariant w/r/t `'a`: - let contra = self.contravariant(variance); - self.add_constraints_from_region(generics, data.bounds.region_bound, contra); - - // Ignore the SelfSpace, it is erased. - self.add_constraints_from_trait_ref(generics, poly_trait_ref.0, variance); - - let projections = data.projection_bounds_with_self_ty(self.tcx(), - self.tcx().types.err); - for projection in &projections { - self.add_constraints_from_ty(generics, projection.0.ty, self.invariant); - } - } - - ty::TyParam(ref data) => { - let def_id = generics.types.get(data.space, data.idx as usize).def_id; - let node_id = self.tcx().map.as_local_node_id(def_id).unwrap(); - match self.terms_cx.inferred_map.get(&node_id) { - Some(&index) => { - self.add_constraint(index, variance); - } - None => { - // We do not infer variance for type parameters - // declared on methods. They will not be present - // in the inferred_map. - } - } - } - - ty::TyBareFn(_, &ty::BareFnTy { ref sig, .. }) => { - self.add_constraints_from_sig(generics, sig, variance); - } - - ty::TyError => { - // we encounter this when walking the trait references for object - // types, where we use TyError as the Self type - } - - ty::TyInfer(..) => { - self.tcx().sess.bug( - &format!("unexpected type encountered in \ - variance inference: {}", ty)); - } - } - } - - - /// Adds constraints appropriate for a nominal type (enum, struct, - /// object, etc) appearing in a context with ambient variance `variance` - fn add_constraints_from_substs(&mut self, - generics: &ty::Generics<'tcx>, - def_id: DefId, - type_param_defs: &[ty::TypeParameterDef<'tcx>], - region_param_defs: &[ty::RegionParameterDef], - substs: &subst::Substs<'tcx>, - variance: VarianceTermPtr<'a>) { - debug!("add_constraints_from_substs(def_id={:?}, substs={:?}, variance={:?})", - def_id, - substs, - variance); - - for p in type_param_defs { - let variance_decl = - self.declared_variance(p.def_id, def_id, TypeParam, - p.space, p.index as usize); - let variance_i = self.xform(variance, variance_decl); - let substs_ty = *substs.types.get(p.space, p.index as usize); - debug!("add_constraints_from_substs: variance_decl={:?} variance_i={:?}", - variance_decl, variance_i); - self.add_constraints_from_ty(generics, substs_ty, variance_i); - } - - for p in region_param_defs { - let variance_decl = - self.declared_variance(p.def_id, def_id, - RegionParam, p.space, p.index as usize); - let variance_i = self.xform(variance, variance_decl); - let substs_r = *substs.regions().get(p.space, p.index as usize); - self.add_constraints_from_region(generics, substs_r, variance_i); - } - } - - /// Adds constraints appropriate for a function with signature - /// `sig` appearing in a context with ambient variance `variance` - fn add_constraints_from_sig(&mut self, - generics: &ty::Generics<'tcx>, - sig: &ty::PolyFnSig<'tcx>, - variance: VarianceTermPtr<'a>) { - let contra = self.contravariant(variance); - for &input in &sig.0.inputs { - self.add_constraints_from_ty(generics, input, contra); - } - if let ty::FnConverging(result_type) = sig.0.output { - self.add_constraints_from_ty(generics, result_type, variance); - } - } - - /// Adds constraints appropriate for a region appearing in a - /// context with ambient variance `variance` - fn add_constraints_from_region(&mut self, - generics: &ty::Generics<'tcx>, - region: ty::Region, - variance: VarianceTermPtr<'a>) { - match region { - ty::ReEarlyBound(ref data) => { - let def_id = - generics.regions.get(data.space, data.index as usize).def_id; - let node_id = self.tcx().map.as_local_node_id(def_id).unwrap(); - if self.is_to_be_inferred(node_id) { - let index = self.inferred_index(node_id); - self.add_constraint(index, variance); - } - } - - ty::ReStatic => { } - - ty::ReLateBound(..) => { - // We do not infer variance for region parameters on - // methods or in fn types. - } - - ty::ReFree(..) | ty::ReScope(..) | ty::ReVar(..) | - ty::ReSkolemized(..) | ty::ReEmpty => { - // We don't expect to see anything but 'static or bound - // regions when visiting member types or method types. - self.tcx() - .sess - .bug(&format!("unexpected region encountered in variance \ - inference: {:?}", - region)); - } - } - } - - /// Adds constraints appropriate for a mutability-type pair - /// appearing in a context with ambient variance `variance` - fn add_constraints_from_mt(&mut self, - generics: &ty::Generics<'tcx>, - mt: &ty::TypeAndMut<'tcx>, - variance: VarianceTermPtr<'a>) { - match mt.mutbl { - hir::MutMutable => { - let invar = self.invariant(variance); - self.add_constraints_from_ty(generics, mt.ty, invar); - } - - hir::MutImmutable => { - self.add_constraints_from_ty(generics, mt.ty, variance); - } - } - } -} - -// Constraint solving -// -// The final phase iterates over the constraints, refining the variance -// for each inferred until a fixed point is reached. This will be the -// optimal solution to the constraints. The final variance for each -// inferred is then written into the `variance_map` in the tcx. - -struct SolveContext<'a, 'tcx: 'a> { - terms_cx: TermsContext<'a, 'tcx>, - constraints: Vec> , - - // Maps from an InferredIndex to the inferred value for that variable. - solutions: Vec } - -fn solve_constraints(constraints_cx: ConstraintContext) { - let ConstraintContext { terms_cx, constraints, .. } = constraints_cx; - - let solutions = - terms_cx.inferred_infos.iter() - .map(|ii| ii.initial_variance) - .collect(); - - let mut solutions_cx = SolveContext { - terms_cx: terms_cx, - constraints: constraints, - solutions: solutions - }; - solutions_cx.solve(); - solutions_cx.write(); -} - -impl<'a, 'tcx> SolveContext<'a, 'tcx> { - fn solve(&mut self) { - // Propagate constraints until a fixed point is reached. Note - // that the maximum number of iterations is 2C where C is the - // number of constraints (each variable can change values at most - // twice). Since number of constraints is linear in size of the - // input, so is the inference process. - let mut changed = true; - while changed { - changed = false; - - for constraint in &self.constraints { - let Constraint { inferred, variance: term } = *constraint; - let InferredIndex(inferred) = inferred; - let variance = self.evaluate(term); - let old_value = self.solutions[inferred]; - let new_value = glb(variance, old_value); - if old_value != new_value { - debug!("Updating inferred {} (node {}) \ - from {:?} to {:?} due to {:?}", - inferred, - self.terms_cx - .inferred_infos[inferred] - .param_id, - old_value, - new_value, - term); - - self.solutions[inferred] = new_value; - changed = true; - } - } - } - } - - fn write(&self) { - // Collect all the variances for a particular item and stick - // them into the variance map. We rely on the fact that we - // generate all the inferreds for a particular item - // consecutively (that is, we collect solutions for an item - // until we see a new item id, and we assume (1) the solutions - // are in the same order as the type parameters were declared - // and (2) all solutions or a given item appear before a new - // item id). - - let tcx = self.terms_cx.tcx; - let solutions = &self.solutions; - let inferred_infos = &self.terms_cx.inferred_infos; - let mut index = 0; - let num_inferred = self.terms_cx.num_inferred(); - while index < num_inferred { - let item_id = inferred_infos[index].item_id; - let mut types = VecPerParamSpace::empty(); - let mut regions = VecPerParamSpace::empty(); - - while index < num_inferred && inferred_infos[index].item_id == item_id { - let info = &inferred_infos[index]; - let variance = solutions[index]; - debug!("Index {} Info {} / {:?} / {:?} Variance {:?}", - index, info.index, info.kind, info.space, variance); - match info.kind { - TypeParam => { types.push(info.space, variance); } - RegionParam => { regions.push(info.space, variance); } - } - - index += 1; - } - - let item_variances = ty::ItemVariances { - types: types, - regions: regions - }; - debug!("item_id={} item_variances={:?}", - item_id, - item_variances); - - let item_def_id = tcx.map.local_def_id(item_id); - - // For unit testing: check for a special "rustc_variance" - // attribute and report an error with various results if found. - if tcx.has_attr(item_def_id, "rustc_variance") { - span_err!(tcx.sess, tcx.map.span(item_id), E0208, "{:?}", item_variances); - } - - let newly_added = tcx.item_variance_map.borrow_mut() - .insert(item_def_id, Rc::new(item_variances)).is_none(); - assert!(newly_added); - } - } - - fn evaluate(&self, term: VarianceTermPtr<'a>) -> ty::Variance { - match *term { - ConstantTerm(v) => { - v - } - - TransformTerm(t1, t2) => { - let v1 = self.evaluate(t1); - let v2 = self.evaluate(t2); - v1.xform(v2) - } - - InferredTerm(InferredIndex(index)) => { - self.solutions[index] - } - } - } -} - -// Miscellany transformations on variance - -trait Xform { - fn xform(self, v: Self) -> Self; -} - -impl Xform for ty::Variance { - fn xform(self, v: ty::Variance) -> ty::Variance { - // "Variance transformation", Figure 1 of The Paper - match (self, v) { - // Figure 1, column 1. - (ty::Covariant, ty::Covariant) => ty::Covariant, - (ty::Covariant, ty::Contravariant) => ty::Contravariant, - (ty::Covariant, ty::Invariant) => ty::Invariant, - (ty::Covariant, ty::Bivariant) => ty::Bivariant, - - // Figure 1, column 2. - (ty::Contravariant, ty::Covariant) => ty::Contravariant, - (ty::Contravariant, ty::Contravariant) => ty::Covariant, - (ty::Contravariant, ty::Invariant) => ty::Invariant, - (ty::Contravariant, ty::Bivariant) => ty::Bivariant, - - // Figure 1, column 3. - (ty::Invariant, _) => ty::Invariant, - - // Figure 1, column 4. - (ty::Bivariant, _) => ty::Bivariant, - } - } -} - -fn glb(v1: ty::Variance, v2: ty::Variance) -> ty::Variance { - // Greatest lower bound of the variance lattice as - // defined in The Paper: - // - // * - // - + - // o - match (v1, v2) { - (ty::Invariant, _) | (_, ty::Invariant) => ty::Invariant, - - (ty::Covariant, ty::Contravariant) => ty::Invariant, - (ty::Contravariant, ty::Covariant) => ty::Invariant, - - (ty::Covariant, ty::Covariant) => ty::Covariant, - - (ty::Contravariant, ty::Contravariant) => ty::Contravariant, - - (x, ty::Bivariant) | (ty::Bivariant, x) => x, - } -} diff --git a/src/librustc_typeck/variance/README.md b/src/librustc_typeck/variance/README.md new file mode 100644 index 0000000000000..ac785e4058bde --- /dev/null +++ b/src/librustc_typeck/variance/README.md @@ -0,0 +1,304 @@ +## Variance of type and lifetime parameters + +This file infers the variance of type and lifetime parameters. The +algorithm is taken from Section 4 of the paper "Taming the Wildcards: +Combining Definition- and Use-Site Variance" published in PLDI'11 and +written by Altidor et al., and hereafter referred to as The Paper. + +This inference is explicitly designed *not* to consider the uses of +types within code. To determine the variance of type parameters +defined on type `X`, we only consider the definition of the type `X` +and the definitions of any types it references. + +We only infer variance for type parameters found on *data types* +like structs and enums. In these cases, there is fairly straightforward +explanation for what variance means. The variance of the type +or lifetime parameters defines whether `T` is a subtype of `T` +(resp. `T<'a>` and `T<'b>`) based on the relationship of `A` and `B` +(resp. `'a` and `'b`). + +We do not infer variance for type parameters found on traits, fns, +or impls. Variance on trait parameters can make indeed make sense +(and we used to compute it) but it is actually rather subtle in +meaning and not that useful in practice, so we removed it. See the +addendum for some details. Variances on fn/impl parameters, otoh, +doesn't make sense because these parameters are instantiated and +then forgotten, they don't persist in types or compiled +byproducts. + +### The algorithm + +The basic idea is quite straightforward. We iterate over the types +defined and, for each use of a type parameter X, accumulate a +constraint indicating that the variance of X must be valid for the +variance of that use site. We then iteratively refine the variance of +X until all constraints are met. There is *always* a sol'n, because at +the limit we can declare all type parameters to be invariant and all +constraints will be satisfied. + +As a simple example, consider: + + enum Option { Some(A), None } + enum OptionalFn { Some(|B|), None } + enum OptionalMap { Some(|C| -> C), None } + +Here, we will generate the constraints: + + 1. V(A) <= + + 2. V(B) <= - + 3. V(C) <= + + 4. V(C) <= - + +These indicate that (1) the variance of A must be at most covariant; +(2) the variance of B must be at most contravariant; and (3, 4) the +variance of C must be at most covariant *and* contravariant. All of these +results are based on a variance lattice defined as follows: + + * Top (bivariant) + - + + o Bottom (invariant) + +Based on this lattice, the solution `V(A)=+`, `V(B)=-`, `V(C)=o` is the +optimal solution. Note that there is always a naive solution which +just declares all variables to be invariant. + +You may be wondering why fixed-point iteration is required. The reason +is that the variance of a use site may itself be a function of the +variance of other type parameters. In full generality, our constraints +take the form: + + V(X) <= Term + Term := + | - | * | o | V(X) | Term x Term + +Here the notation `V(X)` indicates the variance of a type/region +parameter `X` with respect to its defining class. `Term x Term` +represents the "variance transform" as defined in the paper: + +> If the variance of a type variable `X` in type expression `E` is `V2` + and the definition-site variance of the [corresponding] type parameter + of a class `C` is `V1`, then the variance of `X` in the type expression + `C` is `V3 = V1.xform(V2)`. + +### Constraints + +If I have a struct or enum with where clauses: + + struct Foo { ... } + +you might wonder whether the variance of `T` with respect to `Bar` +affects the variance `T` with respect to `Foo`. I claim no. The +reason: assume that `T` is invariant w/r/t `Bar` but covariant w/r/t +`Foo`. And then we have a `Foo` that is upcast to `Foo`, where +`X <: Y`. However, while `X : Bar`, `Y : Bar` does not hold. In that +case, the upcast will be illegal, but not because of a variance +failure, but rather because the target type `Foo` is itself just +not well-formed. Basically we get to assume well-formedness of all +types involved before considering variance. + +#### Dependency graph management + +Because variance works in two phases, if we are not careful, we wind +up with a muddled mess of a dep-graph. Basically, when gathering up +the constraints, things are fairly well-structured, but then we do a +fixed-point iteration and write the results back where they +belong. You can't give this fixed-point iteration a single task +because it reads from (and writes to) the variance of all types in the +crate. In principle, we *could* switch the "current task" in a very +fine-grained way while propagating constraints in the fixed-point +iteration and everything would be automatically tracked, but that +would add some overhead and isn't really necessary anyway. + +Instead what we do is to add edges into the dependency graph as we +construct the constraint set: so, if computing the constraints for +node `X` requires loading the inference variables from node `Y`, then +we can add an edge `Y -> X`, since the variance we ultimately infer +for `Y` will affect the variance we ultimately infer for `X`. + +At this point, we've basically mirrored the inference graph in the +dependency graph. This means we can just completely ignore the +fixed-point iteration, since it is just shuffling values along this +graph. In other words, if we added the fine-grained switching of tasks +I described earlier, all it would show is that we repeatedly read the +values described by the constraints, but those edges were already +added when building the constraints in the first place. + +Here is how this is implemented (at least as of the time of this +writing). The associated `DepNode` for the variance map is (at least +presently) `Signature(DefId)`. This means that, in `constraints.rs`, +when we visit an item to load up its constraints, we set +`Signature(DefId)` as the current task (the "memoization" pattern +described in the `dep-graph` README). Then whenever we find an +embedded type or trait, we add a synthetic read of `Signature(DefId)`, +which covers the variances we will compute for all of its +parameters. This read is synthetic (i.e., we call +`variance_map.read()`) because, in fact, the final variance is not yet +computed -- the read *will* occur (repeatedly) during the fixed-point +iteration phase. + +In fact, we don't really *need* this synthetic read. That's because we +do wind up looking up the `TypeScheme` or `TraitDef` for all +references types/traits, and those reads add an edge from +`Signature(DefId)` (that is, they share the same dep node as +variance). However, I've kept the synthetic reads in place anyway, +just for future-proofing (in case we change the dep-nodes in the +future), and because it makes the intention a bit clearer I think. + +### Addendum: Variance on traits + +As mentioned above, we used to permit variance on traits. This was +computed based on the appearance of trait type parameters in +method signatures and was used to represent the compatibility of +vtables in trait objects (and also "virtual" vtables or dictionary +in trait bounds). One complication was that variance for +associated types is less obvious, since they can be projected out +and put to myriad uses, so it's not clear when it is safe to allow +`X::Bar` to vary (or indeed just what that means). Moreover (as +covered below) all inputs on any trait with an associated type had +to be invariant, limiting the applicability. Finally, the +annotations (`MarkerTrait`, `PhantomFn`) needed to ensure that all +trait type parameters had a variance were confusing and annoying +for little benefit. + +Just for historical reference,I am going to preserve some text indicating +how one could interpret variance and trait matching. + +#### Variance and object types + +Just as with structs and enums, we can decide the subtyping +relationship between two object types `&Trait` and `&Trait` +based on the relationship of `A` and `B`. Note that for object +types we ignore the `Self` type parameter -- it is unknown, and +the nature of dynamic dispatch ensures that we will always call a +function that is expected the appropriate `Self` type. However, we +must be careful with the other type parameters, or else we could +end up calling a function that is expecting one type but provided +another. + +To see what I mean, consider a trait like so: + + trait ConvertTo { + fn convertTo(&self) -> A; + } + +Intuitively, If we had one object `O=&ConvertTo` and another +`S=&ConvertTo`, then `S <: O` because `String <: Object` +(presuming Java-like "string" and "object" types, my go to examples +for subtyping). The actual algorithm would be to compare the +(explicit) type parameters pairwise respecting their variance: here, +the type parameter A is covariant (it appears only in a return +position), and hence we require that `String <: Object`. + +You'll note though that we did not consider the binding for the +(implicit) `Self` type parameter: in fact, it is unknown, so that's +good. The reason we can ignore that parameter is precisely because we +don't need to know its value until a call occurs, and at that time (as +you said) the dynamic nature of virtual dispatch means the code we run +will be correct for whatever value `Self` happens to be bound to for +the particular object whose method we called. `Self` is thus different +from `A`, because the caller requires that `A` be known in order to +know the return type of the method `convertTo()`. (As an aside, we +have rules preventing methods where `Self` appears outside of the +receiver position from being called via an object.) + +#### Trait variance and vtable resolution + +But traits aren't only used with objects. They're also used when +deciding whether a given impl satisfies a given trait bound. To set the +scene here, imagine I had a function: + + fn convertAll>(v: &[T]) { + ... + } + +Now imagine that I have an implementation of `ConvertTo` for `Object`: + + impl ConvertTo for Object { ... } + +And I want to call `convertAll` on an array of strings. Suppose +further that for whatever reason I specifically supply the value of +`String` for the type parameter `T`: + + let mut vector = vec!["string", ...]; + convertAll::(vector); + +Is this legal? To put another way, can we apply the `impl` for +`Object` to the type `String`? The answer is yes, but to see why +we have to expand out what will happen: + +- `convertAll` will create a pointer to one of the entries in the + vector, which will have type `&String` +- It will then call the impl of `convertTo()` that is intended + for use with objects. This has the type: + + fn(self: &Object) -> i32 + + It is ok to provide a value for `self` of type `&String` because + `&String <: &Object`. + +OK, so intuitively we want this to be legal, so let's bring this back +to variance and see whether we are computing the correct result. We +must first figure out how to phrase the question "is an impl for +`Object,i32` usable where an impl for `String,i32` is expected?" + +Maybe it's helpful to think of a dictionary-passing implementation of +type classes. In that case, `convertAll()` takes an implicit parameter +representing the impl. In short, we *have* an impl of type: + + V_O = ConvertTo for Object + +and the function prototype expects an impl of type: + + V_S = ConvertTo for String + +As with any argument, this is legal if the type of the value given +(`V_O`) is a subtype of the type expected (`V_S`). So is `V_O <: V_S`? +The answer will depend on the variance of the various parameters. In +this case, because the `Self` parameter is contravariant and `A` is +covariant, it means that: + + V_O <: V_S iff + i32 <: i32 + String <: Object + +These conditions are satisfied and so we are happy. + +#### Variance and associated types + +Traits with associated types -- or at minimum projection +expressions -- must be invariant with respect to all of their +inputs. To see why this makes sense, consider what subtyping for a +trait reference means: + + <: + +means that if I know that `T as Trait`, I also know that `U as +Trait`. Moreover, if you think of it as dictionary passing style, +it means that a dictionary for `` is safe to use where +a dictionary for `` is expected. + +The problem is that when you can project types out from ``, the relationship to types projected out of `` +is completely unknown unless `T==U` (see #21726 for more +details). Making `Trait` invariant ensures that this is true. + +Another related reason is that if we didn't make traits with +associated types invariant, then projection is no longer a +function with a single result. Consider: + +``` +trait Identity { type Out; fn foo(&self); } +impl Identity for T { type Out = T; ... } +``` + +Now if I have `<&'static () as Identity>::Out`, this can be +validly derived as `&'a ()` for any `'a`: + + <&'a () as Identity> <: <&'static () as Identity> + if &'static () < : &'a () -- Identity is contravariant in Self + if 'static : 'a -- Subtyping rules for relations + +This change otoh means that `<'static () as Identity>::Out` is +always `&'static ()` (which might then be upcast to `'a ()`, +separately). This was helpful in solving #21750. + + diff --git a/src/librustc_typeck/variance/constraints.rs b/src/librustc_typeck/variance/constraints.rs new file mode 100644 index 0000000000000..ded9df25d5c66 --- /dev/null +++ b/src/librustc_typeck/variance/constraints.rs @@ -0,0 +1,533 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Constraint construction and representation +//! +//! The second pass over the AST determines the set of constraints. +//! We walk the set of items and, for each member, generate new constraints. + +use dep_graph::DepTrackingMapConfig; +use hir::def_id::DefId; +use middle::resolve_lifetime as rl; +use rustc::ty::subst::Substs; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::maps::ItemVariances; +use rustc::hir::map as hir_map; +use syntax::ast; +use rustc::hir; +use rustc::hir::itemlikevisit::ItemLikeVisitor; + +use super::terms::*; +use super::terms::VarianceTerm::*; +use super::xform::*; + +pub struct ConstraintContext<'a, 'tcx: 'a> { + pub terms_cx: TermsContext<'a, 'tcx>, + + // These are pointers to common `ConstantTerm` instances + covariant: VarianceTermPtr<'a>, + contravariant: VarianceTermPtr<'a>, + invariant: VarianceTermPtr<'a>, + bivariant: VarianceTermPtr<'a>, + + pub constraints: Vec>, +} + +/// Declares that the variable `decl_id` appears in a location with +/// variance `variance`. +#[derive(Copy, Clone)] +pub struct Constraint<'a> { + pub inferred: InferredIndex, + pub variance: &'a VarianceTerm<'a>, +} + +pub fn add_constraints_from_crate<'a, 'tcx>(terms_cx: TermsContext<'a, 'tcx>) + -> ConstraintContext<'a, 'tcx> { + let tcx = terms_cx.tcx; + let covariant = terms_cx.arena.alloc(ConstantTerm(ty::Covariant)); + let contravariant = terms_cx.arena.alloc(ConstantTerm(ty::Contravariant)); + let invariant = terms_cx.arena.alloc(ConstantTerm(ty::Invariant)); + let bivariant = terms_cx.arena.alloc(ConstantTerm(ty::Bivariant)); + let mut constraint_cx = ConstraintContext { + terms_cx: terms_cx, + covariant: covariant, + contravariant: contravariant, + invariant: invariant, + bivariant: bivariant, + constraints: Vec::new(), + }; + + // See README.md for a discussion on dep-graph management. + tcx.visit_all_item_likes_in_krate(|def_id| ItemVariances::to_dep_node(&def_id), + &mut constraint_cx); + + constraint_cx +} + +impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for ConstraintContext<'a, 'tcx> { + fn visit_item(&mut self, item: &hir::Item) { + let tcx = self.terms_cx.tcx; + let did = tcx.map.local_def_id(item.id); + + debug!("visit_item item={}", tcx.map.node_to_string(item.id)); + + match item.node { + hir::ItemEnum(..) | + hir::ItemStruct(..) | + hir::ItemUnion(..) => { + let generics = tcx.item_generics(did); + + // Not entirely obvious: constraints on structs/enums do not + // affect the variance of their type parameters. See discussion + // in comment at top of module. + // + // self.add_constraints_from_generics(generics); + + for field in tcx.lookup_adt_def(did).all_fields() { + self.add_constraints_from_ty(generics, + tcx.item_type(field.did), + self.covariant); + } + } + hir::ItemTrait(..) => { + let generics = tcx.item_generics(did); + let trait_ref = ty::TraitRef { + def_id: did, + substs: Substs::identity_for_item(tcx, did) + }; + self.add_constraints_from_trait_ref(generics, + trait_ref, + self.invariant); + } + + hir::ItemExternCrate(_) | + hir::ItemUse(..) | + hir::ItemStatic(..) | + hir::ItemConst(..) | + hir::ItemFn(..) | + hir::ItemMod(..) | + hir::ItemForeignMod(..) | + hir::ItemTy(..) | + hir::ItemImpl(..) | + hir::ItemDefaultImpl(..) => {} + } + } + + fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) { + } +} + +/// Is `param_id` a lifetime according to `map`? +fn is_lifetime(map: &hir_map::Map, param_id: ast::NodeId) -> bool { + match map.find(param_id) { + Some(hir_map::NodeLifetime(..)) => true, + _ => false, + } +} + +impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { + fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { + self.terms_cx.tcx + } + + fn inferred_index(&self, param_id: ast::NodeId) -> InferredIndex { + match self.terms_cx.inferred_map.get(¶m_id) { + Some(&index) => index, + None => { + bug!("no inferred index entry for {}", + self.tcx().map.node_to_string(param_id)); + } + } + } + + fn find_binding_for_lifetime(&self, param_id: ast::NodeId) -> ast::NodeId { + let tcx = self.terms_cx.tcx; + assert!(is_lifetime(&tcx.map, param_id)); + match tcx.named_region_map.defs.get(¶m_id) { + Some(&rl::DefEarlyBoundRegion(_, lifetime_decl_id)) => lifetime_decl_id, + Some(_) => bug!("should not encounter non early-bound cases"), + + // The lookup should only fail when `param_id` is + // itself a lifetime binding: use it as the decl_id. + None => param_id, + } + + } + + /// Is `param_id` a type parameter for which we infer variance? + fn is_to_be_inferred(&self, param_id: ast::NodeId) -> bool { + let result = self.terms_cx.inferred_map.contains_key(¶m_id); + + // To safe-guard against invalid inferred_map constructions, + // double-check if variance is inferred at some use of a type + // parameter (by inspecting parent of its binding declaration + // to see if it is introduced by a type or by a fn/impl). + + let check_result = |this: &ConstraintContext| -> bool { + let tcx = this.terms_cx.tcx; + let decl_id = this.find_binding_for_lifetime(param_id); + // Currently only called on lifetimes; double-checking that. + assert!(is_lifetime(&tcx.map, param_id)); + let parent_id = tcx.map.get_parent(decl_id); + let parent = tcx.map + .find(parent_id) + .unwrap_or_else(|| bug!("tcx.map missing entry for id: {}", parent_id)); + + let is_inferred; + macro_rules! cannot_happen { () => { { + bug!("invalid parent: {} for {}", + tcx.map.node_to_string(parent_id), + tcx.map.node_to_string(param_id)); + } } } + + match parent { + hir_map::NodeItem(p) => { + match p.node { + hir::ItemTy(..) | + hir::ItemEnum(..) | + hir::ItemStruct(..) | + hir::ItemUnion(..) | + hir::ItemTrait(..) => is_inferred = true, + hir::ItemFn(..) => is_inferred = false, + _ => cannot_happen!(), + } + } + hir_map::NodeTraitItem(..) => is_inferred = false, + hir_map::NodeImplItem(..) => is_inferred = false, + _ => cannot_happen!(), + } + + return is_inferred; + }; + + assert_eq!(result, check_result(self)); + + return result; + } + + /// Returns a variance term representing the declared variance of the type/region parameter + /// with the given id. + fn declared_variance(&self, + param_def_id: DefId, + item_def_id: DefId, + index: usize) + -> VarianceTermPtr<'a> { + assert_eq!(param_def_id.krate, item_def_id.krate); + + if let Some(param_node_id) = self.tcx().map.as_local_node_id(param_def_id) { + // Parameter on an item defined within current crate: + // variance not yet inferred, so return a symbolic + // variance. + let InferredIndex(index) = self.inferred_index(param_node_id); + self.terms_cx.inferred_infos[index].term + } else { + // Parameter on an item defined within another crate: + // variance already inferred, just look it up. + let variances = self.tcx().item_variances(item_def_id); + self.constant_term(variances[index]) + } + } + + fn add_constraint(&mut self, + InferredIndex(index): InferredIndex, + variance: VarianceTermPtr<'a>) { + debug!("add_constraint(index={}, variance={:?})", index, variance); + self.constraints.push(Constraint { + inferred: InferredIndex(index), + variance: variance, + }); + } + + fn contravariant(&mut self, variance: VarianceTermPtr<'a>) -> VarianceTermPtr<'a> { + self.xform(variance, self.contravariant) + } + + fn invariant(&mut self, variance: VarianceTermPtr<'a>) -> VarianceTermPtr<'a> { + self.xform(variance, self.invariant) + } + + fn constant_term(&self, v: ty::Variance) -> VarianceTermPtr<'a> { + match v { + ty::Covariant => self.covariant, + ty::Invariant => self.invariant, + ty::Contravariant => self.contravariant, + ty::Bivariant => self.bivariant, + } + } + + fn xform(&mut self, v1: VarianceTermPtr<'a>, v2: VarianceTermPtr<'a>) -> VarianceTermPtr<'a> { + match (*v1, *v2) { + (_, ConstantTerm(ty::Covariant)) => { + // Applying a "covariant" transform is always a no-op + v1 + } + + (ConstantTerm(c1), ConstantTerm(c2)) => self.constant_term(c1.xform(c2)), + + _ => &*self.terms_cx.arena.alloc(TransformTerm(v1, v2)), + } + } + + fn add_constraints_from_trait_ref(&mut self, + generics: &ty::Generics<'tcx>, + trait_ref: ty::TraitRef<'tcx>, + variance: VarianceTermPtr<'a>) { + debug!("add_constraints_from_trait_ref: trait_ref={:?} variance={:?}", + trait_ref, + variance); + + let trait_generics = self.tcx().item_generics(trait_ref.def_id); + + // This edge is actually implied by the call to + // `lookup_trait_def`, but I'm trying to be future-proof. See + // README.md for a discussion on dep-graph management. + self.tcx().dep_graph.read(ItemVariances::to_dep_node(&trait_ref.def_id)); + + self.add_constraints_from_substs(generics, + trait_ref.def_id, + &trait_generics.types, + &trait_generics.regions, + trait_ref.substs, + variance); + } + + /// Adds constraints appropriate for an instance of `ty` appearing + /// in a context with the generics defined in `generics` and + /// ambient variance `variance` + fn add_constraints_from_ty(&mut self, + generics: &ty::Generics<'tcx>, + ty: Ty<'tcx>, + variance: VarianceTermPtr<'a>) { + debug!("add_constraints_from_ty(ty={:?}, variance={:?})", + ty, + variance); + + match ty.sty { + ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | + ty::TyStr | ty::TyNever => { + // leaf type -- noop + } + + ty::TyClosure(..) | + ty::TyAnon(..) => { + bug!("Unexpected closure type in variance computation"); + } + + ty::TyRef(region, ref mt) => { + let contra = self.contravariant(variance); + self.add_constraints_from_region(generics, region, contra); + self.add_constraints_from_mt(generics, mt, variance); + } + + ty::TyBox(typ) | + ty::TyArray(typ, _) | + ty::TySlice(typ) => { + self.add_constraints_from_ty(generics, typ, variance); + } + + ty::TyRawPtr(ref mt) => { + self.add_constraints_from_mt(generics, mt, variance); + } + + ty::TyTuple(subtys) => { + for &subty in subtys { + self.add_constraints_from_ty(generics, subty, variance); + } + } + + ty::TyAdt(def, substs) => { + let adt_generics = self.tcx().item_generics(def.did); + + // This edge is actually implied by the call to + // `lookup_trait_def`, but I'm trying to be future-proof. See + // README.md for a discussion on dep-graph management. + self.tcx().dep_graph.read(ItemVariances::to_dep_node(&def.did)); + + self.add_constraints_from_substs(generics, + def.did, + &adt_generics.types, + &adt_generics.regions, + substs, + variance); + } + + ty::TyProjection(ref data) => { + let trait_ref = &data.trait_ref; + let trait_generics = self.tcx().item_generics(trait_ref.def_id); + + // This edge is actually implied by the call to + // `lookup_trait_def`, but I'm trying to be future-proof. See + // README.md for a discussion on dep-graph management. + self.tcx().dep_graph.read(ItemVariances::to_dep_node(&trait_ref.def_id)); + + self.add_constraints_from_substs(generics, + trait_ref.def_id, + &trait_generics.types, + &trait_generics.regions, + trait_ref.substs, + variance); + } + + ty::TyDynamic(ref data, r) => { + // The type `Foo` is contravariant w/r/t `'a`: + let contra = self.contravariant(variance); + self.add_constraints_from_region(generics, r, contra); + + if let Some(p) = data.principal() { + let poly_trait_ref = p.with_self_ty(self.tcx(), self.tcx().types.err); + self.add_constraints_from_trait_ref(generics, poly_trait_ref.0, variance); + } + + for projection in data.projection_bounds() { + self.add_constraints_from_ty(generics, projection.0.ty, self.invariant); + } + } + + ty::TyParam(ref data) => { + assert_eq!(generics.parent, None); + let mut i = data.idx as usize; + if !generics.has_self || i > 0 { + i -= generics.regions.len(); + } + let def_id = generics.types[i].def_id; + let node_id = self.tcx().map.as_local_node_id(def_id).unwrap(); + match self.terms_cx.inferred_map.get(&node_id) { + Some(&index) => { + self.add_constraint(index, variance); + } + None => { + // We do not infer variance for type parameters + // declared on methods. They will not be present + // in the inferred_map. + } + } + } + + ty::TyFnDef(.., &ty::BareFnTy { ref sig, .. }) | + ty::TyFnPtr(&ty::BareFnTy { ref sig, .. }) => { + self.add_constraints_from_sig(generics, sig, variance); + } + + ty::TyError => { + // we encounter this when walking the trait references for object + // types, where we use TyError as the Self type + } + + ty::TyInfer(..) => { + bug!("unexpected type encountered in \ + variance inference: {}", + ty); + } + } + } + + /// Adds constraints appropriate for a nominal type (enum, struct, + /// object, etc) appearing in a context with ambient variance `variance` + fn add_constraints_from_substs(&mut self, + generics: &ty::Generics<'tcx>, + def_id: DefId, + type_param_defs: &[ty::TypeParameterDef<'tcx>], + region_param_defs: &[ty::RegionParameterDef], + substs: &Substs<'tcx>, + variance: VarianceTermPtr<'a>) { + debug!("add_constraints_from_substs(def_id={:?}, substs={:?}, variance={:?})", + def_id, + substs, + variance); + + for p in type_param_defs { + let variance_decl = self.declared_variance(p.def_id, def_id, p.index as usize); + let variance_i = self.xform(variance, variance_decl); + let substs_ty = substs.type_for_def(p); + debug!("add_constraints_from_substs: variance_decl={:?} variance_i={:?}", + variance_decl, + variance_i); + self.add_constraints_from_ty(generics, substs_ty, variance_i); + } + + for p in region_param_defs { + let variance_decl = self.declared_variance(p.def_id, def_id, p.index as usize); + let variance_i = self.xform(variance, variance_decl); + let substs_r = substs.region_for_def(p); + self.add_constraints_from_region(generics, substs_r, variance_i); + } + } + + /// Adds constraints appropriate for a function with signature + /// `sig` appearing in a context with ambient variance `variance` + fn add_constraints_from_sig(&mut self, + generics: &ty::Generics<'tcx>, + sig: &ty::PolyFnSig<'tcx>, + variance: VarianceTermPtr<'a>) { + let contra = self.contravariant(variance); + for &input in &sig.0.inputs { + self.add_constraints_from_ty(generics, input, contra); + } + self.add_constraints_from_ty(generics, sig.0.output, variance); + } + + /// Adds constraints appropriate for a region appearing in a + /// context with ambient variance `variance` + fn add_constraints_from_region(&mut self, + generics: &ty::Generics<'tcx>, + region: &'tcx ty::Region, + variance: VarianceTermPtr<'a>) { + match *region { + ty::ReEarlyBound(ref data) => { + assert_eq!(generics.parent, None); + let i = data.index as usize - generics.has_self as usize; + let def_id = generics.regions[i].def_id; + let node_id = self.tcx().map.as_local_node_id(def_id).unwrap(); + if self.is_to_be_inferred(node_id) { + let index = self.inferred_index(node_id); + self.add_constraint(index, variance); + } + } + + ty::ReStatic => {} + + ty::ReLateBound(..) => { + // We do not infer variance for region parameters on + // methods or in fn types. + } + + ty::ReFree(..) | + ty::ReScope(..) | + ty::ReVar(..) | + ty::ReSkolemized(..) | + ty::ReEmpty | + ty::ReErased => { + // We don't expect to see anything but 'static or bound + // regions when visiting member types or method types. + bug!("unexpected region encountered in variance \ + inference: {:?}", + region); + } + } + } + + /// Adds constraints appropriate for a mutability-type pair + /// appearing in a context with ambient variance `variance` + fn add_constraints_from_mt(&mut self, + generics: &ty::Generics<'tcx>, + mt: &ty::TypeAndMut<'tcx>, + variance: VarianceTermPtr<'a>) { + match mt.mutbl { + hir::MutMutable => { + let invar = self.invariant(variance); + self.add_constraints_from_ty(generics, mt.ty, invar); + } + + hir::MutImmutable => { + self.add_constraints_from_ty(generics, mt.ty, variance); + } + } + } +} diff --git a/src/librustc_typeck/variance/mod.rs b/src/librustc_typeck/variance/mod.rs new file mode 100644 index 0000000000000..cd0ab1cbb9543 --- /dev/null +++ b/src/librustc_typeck/variance/mod.rs @@ -0,0 +1,36 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Module for inferring the variance of type and lifetime +//! parameters. See README.md for details. + +use arena; +use rustc::ty::TyCtxt; + +/// Defines the `TermsContext` basically houses an arena where we can +/// allocate terms. +mod terms; + +/// Code to gather up constraints. +mod constraints; + +/// Code to solve constraints and write out the results. +mod solve; + +/// Code for transforming variances. +mod xform; + +pub fn infer_variance<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { + let mut arena = arena::TypedArena::new(); + let terms_cx = terms::determine_parameters_to_be_inferred(tcx, &mut arena); + let constraints_cx = constraints::add_constraints_from_crate(terms_cx); + solve::solve_constraints(constraints_cx); + tcx.variance_computed.set(true); +} diff --git a/src/librustc_typeck/variance/solve.rs b/src/librustc_typeck/variance/solve.rs new file mode 100644 index 0000000000000..a5c53b4c6291c --- /dev/null +++ b/src/librustc_typeck/variance/solve.rs @@ -0,0 +1,161 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Constraint solving +//! +//! The final phase iterates over the constraints, refining the variance +//! for each inferred until a fixed point is reached. This will be the +//! optimal solution to the constraints. The final variance for each +//! inferred is then written into the `variance_map` in the tcx. + +use rustc::ty; +use std::rc::Rc; + +use super::constraints::*; +use super::terms::*; +use super::terms::VarianceTerm::*; +use super::xform::*; + +struct SolveContext<'a, 'tcx: 'a> { + terms_cx: TermsContext<'a, 'tcx>, + constraints: Vec>, + + // Maps from an InferredIndex to the inferred value for that variable. + solutions: Vec, +} + +pub fn solve_constraints(constraints_cx: ConstraintContext) { + let ConstraintContext { terms_cx, constraints, .. } = constraints_cx; + + let solutions = terms_cx.inferred_infos + .iter() + .map(|ii| ii.initial_variance) + .collect(); + + let mut solutions_cx = SolveContext { + terms_cx: terms_cx, + constraints: constraints, + solutions: solutions, + }; + solutions_cx.solve(); + solutions_cx.write(); +} + +impl<'a, 'tcx> SolveContext<'a, 'tcx> { + fn solve(&mut self) { + // Propagate constraints until a fixed point is reached. Note + // that the maximum number of iterations is 2C where C is the + // number of constraints (each variable can change values at most + // twice). Since number of constraints is linear in size of the + // input, so is the inference process. + let mut changed = true; + while changed { + changed = false; + + for constraint in &self.constraints { + let Constraint { inferred, variance: term } = *constraint; + let InferredIndex(inferred) = inferred; + let variance = self.evaluate(term); + let old_value = self.solutions[inferred]; + let new_value = glb(variance, old_value); + if old_value != new_value { + debug!("Updating inferred {} (node {}) \ + from {:?} to {:?} due to {:?}", + inferred, + self.terms_cx + .inferred_infos[inferred] + .param_id, + old_value, + new_value, + term); + + self.solutions[inferred] = new_value; + changed = true; + } + } + } + } + + fn write(&self) { + // Collect all the variances for a particular item and stick + // them into the variance map. We rely on the fact that we + // generate all the inferreds for a particular item + // consecutively (that is, we collect solutions for an item + // until we see a new item id, and we assume (1) the solutions + // are in the same order as the type parameters were declared + // and (2) all solutions or a given item appear before a new + // item id). + + let tcx = self.terms_cx.tcx; + + // Ignore the writes here because the relevant edges were + // already accounted for in `constraints.rs`. See the section + // on dependency graph management in README.md for more + // information. + let _ignore = tcx.dep_graph.in_ignore(); + + let solutions = &self.solutions; + let inferred_infos = &self.terms_cx.inferred_infos; + let mut index = 0; + let num_inferred = self.terms_cx.num_inferred(); + while index < num_inferred { + let item_id = inferred_infos[index].item_id; + + let mut item_variances = vec![]; + + while index < num_inferred && inferred_infos[index].item_id == item_id { + let info = &inferred_infos[index]; + let variance = solutions[index]; + debug!("Index {} Info {} Variance {:?}", + index, + info.index, + variance); + + assert_eq!(item_variances.len(), info.index); + item_variances.push(variance); + index += 1; + } + + debug!("item_id={} item_variances={:?}", item_id, item_variances); + + let item_def_id = tcx.map.local_def_id(item_id); + + // For unit testing: check for a special "rustc_variance" + // attribute and report an error with various results if found. + if tcx.has_attr(item_def_id, "rustc_variance") { + span_err!(tcx.sess, + tcx.map.span(item_id), + E0208, + "{:?}", + item_variances); + } + + let newly_added = tcx.item_variance_map + .borrow_mut() + .insert(item_def_id, Rc::new(item_variances)) + .is_none(); + assert!(newly_added); + } + } + + fn evaluate(&self, term: VarianceTermPtr<'a>) -> ty::Variance { + match *term { + ConstantTerm(v) => v, + + TransformTerm(t1, t2) => { + let v1 = self.evaluate(t1); + let v2 = self.evaluate(t2); + v1.xform(v2) + } + + InferredTerm(InferredIndex(index)) => self.solutions[index], + } + } +} diff --git a/src/librustc_typeck/variance/terms.rs b/src/librustc_typeck/variance/terms.rs new file mode 100644 index 0000000000000..851cfcd87231f --- /dev/null +++ b/src/librustc_typeck/variance/terms.rs @@ -0,0 +1,263 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Representing terms +// +// Terms are structured as a straightforward tree. Rather than rely on +// GC, we allocate terms out of a bounded arena (the lifetime of this +// arena is the lifetime 'a that is threaded around). +// +// We assign a unique index to each type/region parameter whose variance +// is to be inferred. We refer to such variables as "inferreds". An +// `InferredIndex` is a newtype'd int representing the index of such +// a variable. + +use arena::TypedArena; +use dep_graph::DepTrackingMapConfig; +use rustc::ty::{self, TyCtxt}; +use rustc::ty::maps::ItemVariances; +use std::fmt; +use std::rc::Rc; +use syntax::ast; +use rustc::hir; +use rustc::hir::itemlikevisit::ItemLikeVisitor; +use util::nodemap::NodeMap; + +use self::VarianceTerm::*; + +pub type VarianceTermPtr<'a> = &'a VarianceTerm<'a>; + +#[derive(Copy, Clone, Debug)] +pub struct InferredIndex(pub usize); + +#[derive(Copy, Clone)] +pub enum VarianceTerm<'a> { + ConstantTerm(ty::Variance), + TransformTerm(VarianceTermPtr<'a>, VarianceTermPtr<'a>), + InferredTerm(InferredIndex), +} + +impl<'a> fmt::Debug for VarianceTerm<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ConstantTerm(c1) => write!(f, "{:?}", c1), + TransformTerm(v1, v2) => write!(f, "({:?} \u{00D7} {:?})", v1, v2), + InferredTerm(id) => { + write!(f, "[{}]", { + let InferredIndex(i) = id; + i + }) + } + } + } +} + +// The first pass over the crate simply builds up the set of inferreds. + +pub struct TermsContext<'a, 'tcx: 'a> { + pub tcx: TyCtxt<'a, 'tcx, 'tcx>, + pub arena: &'a TypedArena>, + + pub empty_variances: Rc>, + + // For marker types, UnsafeCell, and other lang items where + // variance is hardcoded, records the item-id and the hardcoded + // variance. + pub lang_items: Vec<(ast::NodeId, Vec)>, + + // Maps from the node id of a type/generic parameter to the + // corresponding inferred index. + pub inferred_map: NodeMap, + + // Maps from an InferredIndex to the info for that variable. + pub inferred_infos: Vec>, +} + +pub struct InferredInfo<'a> { + pub item_id: ast::NodeId, + pub index: usize, + pub param_id: ast::NodeId, + pub term: VarianceTermPtr<'a>, + + // Initial value to use for this parameter when inferring + // variance. For most parameters, this is Bivariant. But for lang + // items and input type parameters on traits, it is different. + pub initial_variance: ty::Variance, +} + +pub fn determine_parameters_to_be_inferred<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + arena: &'a mut TypedArena>) + -> TermsContext<'a, 'tcx> { + let mut terms_cx = TermsContext { + tcx: tcx, + arena: arena, + inferred_map: NodeMap(), + inferred_infos: Vec::new(), + + lang_items: lang_items(tcx), + + // cache and share the variance struct used for items with + // no type/region parameters + empty_variances: Rc::new(vec![]), + }; + + // See README.md for a discussion on dep-graph management. + tcx.visit_all_item_likes_in_krate(|def_id| ItemVariances::to_dep_node(&def_id), &mut terms_cx); + + terms_cx +} + +fn lang_items(tcx: TyCtxt) -> Vec<(ast::NodeId, Vec)> { + let all = vec![ + (tcx.lang_items.phantom_data(), vec![ty::Covariant]), + (tcx.lang_items.unsafe_cell_type(), vec![ty::Invariant]), + + // Deprecated: + (tcx.lang_items.covariant_type(), vec![ty::Covariant]), + (tcx.lang_items.contravariant_type(), vec![ty::Contravariant]), + (tcx.lang_items.invariant_type(), vec![ty::Invariant]), + (tcx.lang_items.covariant_lifetime(), vec![ty::Covariant]), + (tcx.lang_items.contravariant_lifetime(), vec![ty::Contravariant]), + (tcx.lang_items.invariant_lifetime(), vec![ty::Invariant]), + + ]; + + all.into_iter() // iterating over (Option, Variance) + .filter(|&(ref d,_)| d.is_some()) + .map(|(d, v)| (d.unwrap(), v)) // (DefId, Variance) + .filter_map(|(d, v)| tcx.map.as_local_node_id(d).map(|n| (n, v))) // (NodeId, Variance) + .collect() +} + +impl<'a, 'tcx> TermsContext<'a, 'tcx> { + fn add_inferreds_for_item(&mut self, + item_id: ast::NodeId, + has_self: bool, + generics: &hir::Generics) { + //! Add "inferreds" for the generic parameters declared on this + //! item. This has a lot of annoying parameters because we are + //! trying to drive this from the AST, rather than the + //! ty::Generics, so that we can get span info -- but this + //! means we must accommodate syntactic distinctions. + //! + + // NB: In the code below for writing the results back into the + // tcx, we rely on the fact that all inferreds for a particular + // item are assigned continuous indices. + + let inferreds_on_entry = self.num_inferred(); + + if has_self { + self.add_inferred(item_id, 0, item_id); + } + + for (i, p) in generics.lifetimes.iter().enumerate() { + let id = p.lifetime.id; + let i = has_self as usize + i; + self.add_inferred(item_id, i, id); + } + + for (i, p) in generics.ty_params.iter().enumerate() { + let i = has_self as usize + generics.lifetimes.len() + i; + self.add_inferred(item_id, i, p.id); + } + + // If this item has no type or lifetime parameters, + // then there are no variances to infer, so just + // insert an empty entry into the variance map. + // Arguably we could just leave the map empty in this + // case but it seems cleaner to be able to distinguish + // "invalid item id" from "item id with no + // parameters". + if self.num_inferred() == inferreds_on_entry { + let item_def_id = self.tcx.map.local_def_id(item_id); + let newly_added = self.tcx + .item_variance_map + .borrow_mut() + .insert(item_def_id, self.empty_variances.clone()) + .is_none(); + assert!(newly_added); + } + } + + fn add_inferred(&mut self, item_id: ast::NodeId, index: usize, param_id: ast::NodeId) { + let inf_index = InferredIndex(self.inferred_infos.len()); + let term = self.arena.alloc(InferredTerm(inf_index)); + let initial_variance = self.pick_initial_variance(item_id, index); + self.inferred_infos.push(InferredInfo { + item_id: item_id, + index: index, + param_id: param_id, + term: term, + initial_variance: initial_variance, + }); + let newly_added = self.inferred_map.insert(param_id, inf_index).is_none(); + assert!(newly_added); + + debug!("add_inferred(item_path={}, \ + item_id={}, \ + index={}, \ + param_id={}, \ + inf_index={:?}, \ + initial_variance={:?})", + self.tcx.item_path_str(self.tcx.map.local_def_id(item_id)), + item_id, + index, + param_id, + inf_index, + initial_variance); + } + + fn pick_initial_variance(&self, item_id: ast::NodeId, index: usize) -> ty::Variance { + match self.lang_items.iter().find(|&&(n, _)| n == item_id) { + Some(&(_, ref variances)) => variances[index], + None => ty::Bivariant, + } + } + + pub fn num_inferred(&self) -> usize { + self.inferred_infos.len() + } +} + +impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for TermsContext<'a, 'tcx> { + fn visit_item(&mut self, item: &hir::Item) { + debug!("add_inferreds for item {}", + self.tcx.map.node_to_string(item.id)); + + match item.node { + hir::ItemEnum(_, ref generics) | + hir::ItemStruct(_, ref generics) | + hir::ItemUnion(_, ref generics) => { + self.add_inferreds_for_item(item.id, false, generics); + } + hir::ItemTrait(_, ref generics, ..) => { + // Note: all inputs for traits are ultimately + // constrained to be invariant. See `visit_item` in + // the impl for `ConstraintContext` in `constraints.rs`. + self.add_inferreds_for_item(item.id, true, generics); + } + + hir::ItemExternCrate(_) | + hir::ItemUse(..) | + hir::ItemDefaultImpl(..) | + hir::ItemImpl(..) | + hir::ItemStatic(..) | + hir::ItemConst(..) | + hir::ItemFn(..) | + hir::ItemMod(..) | + hir::ItemForeignMod(..) | + hir::ItemTy(..) => {} + } + } + + fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) { + } +} diff --git a/src/librustc_typeck/variance/xform.rs b/src/librustc_typeck/variance/xform.rs new file mode 100644 index 0000000000000..507734ce35e44 --- /dev/null +++ b/src/librustc_typeck/variance/xform.rs @@ -0,0 +1,63 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::ty; + +pub trait Xform { + fn xform(self, v: Self) -> Self; +} + +impl Xform for ty::Variance { + fn xform(self, v: ty::Variance) -> ty::Variance { + // "Variance transformation", Figure 1 of The Paper + match (self, v) { + // Figure 1, column 1. + (ty::Covariant, ty::Covariant) => ty::Covariant, + (ty::Covariant, ty::Contravariant) => ty::Contravariant, + (ty::Covariant, ty::Invariant) => ty::Invariant, + (ty::Covariant, ty::Bivariant) => ty::Bivariant, + + // Figure 1, column 2. + (ty::Contravariant, ty::Covariant) => ty::Contravariant, + (ty::Contravariant, ty::Contravariant) => ty::Covariant, + (ty::Contravariant, ty::Invariant) => ty::Invariant, + (ty::Contravariant, ty::Bivariant) => ty::Bivariant, + + // Figure 1, column 3. + (ty::Invariant, _) => ty::Invariant, + + // Figure 1, column 4. + (ty::Bivariant, _) => ty::Bivariant, + } + } +} + +pub fn glb(v1: ty::Variance, v2: ty::Variance) -> ty::Variance { + // Greatest lower bound of the variance lattice as + // defined in The Paper: + // + // * + // - + + // o + match (v1, v2) { + (ty::Invariant, _) | + (_, ty::Invariant) => ty::Invariant, + + (ty::Covariant, ty::Contravariant) => ty::Invariant, + (ty::Contravariant, ty::Covariant) => ty::Invariant, + + (ty::Covariant, ty::Covariant) => ty::Covariant, + + (ty::Contravariant, ty::Contravariant) => ty::Contravariant, + + (x, ty::Bivariant) | + (ty::Bivariant, x) => x, + } +} diff --git a/src/librustc_unicode/Cargo.toml b/src/librustc_unicode/Cargo.toml new file mode 100644 index 0000000000000..e2b4afb2a5150 --- /dev/null +++ b/src/librustc_unicode/Cargo.toml @@ -0,0 +1,13 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_unicode" +version = "0.0.0" + +[lib] +name = "rustc_unicode" +path = "lib.rs" +test = false +bench = false + +[dependencies] +core = { path = "../libcore" } diff --git a/src/librustc_unicode/char.rs b/src/librustc_unicode/char.rs index 46ecd3a80b5d1..94599216db6a8 100644 --- a/src/librustc_unicode/char.rs +++ b/src/librustc_unicode/char.rs @@ -19,7 +19,7 @@ //! [Unicode code point]: http://www.unicode.org/glossary/#code_point //! //! This module exists for technical reasons, the primary documentation for -//! `char` is directly on [the `char` primitive type](../primitive.char.html) +//! `char` is directly on [the `char` primitive type](../../std/primitive.char.html) //! itself. //! //! This module is the home of the iterator implementations for the iterators @@ -29,15 +29,21 @@ #![stable(feature = "rust1", since = "1.0.0")] use core::char::CharExt as C; -use core::option::Option::{self, Some, None}; -use core::iter::Iterator; -use tables::{derived_property, property, general_category, conversions}; +use core::iter::FusedIterator; +use core::fmt; +use tables::{conversions, derived_property, general_category, property}; // stable reexports #[stable(feature = "rust1", since = "1.0.0")] -pub use core::char::{MAX, from_u32, from_u32_unchecked, from_digit, EscapeUnicode, EscapeDefault}; +pub use core::char::{MAX, from_digit, from_u32, from_u32_unchecked}; +#[stable(feature = "rust1", since = "1.0.0")] +pub use core::char::{EscapeDebug, EscapeDefault, EscapeUnicode}; // unstable reexports +#[unstable(feature = "try_from", issue = "33417")] +pub use core::char::CharTryFromError; +#[unstable(feature = "decode_utf8", issue = "33906")] +pub use core::char::{DecodeUtf8, decode_utf8}; #[unstable(feature = "unicode", issue = "27783")] pub use tables::UNICODE_VERSION; @@ -46,8 +52,8 @@ pub use tables::UNICODE_VERSION; /// This `struct` is created by the [`to_lowercase()`] method on [`char`]. See /// its documentation for more. /// -/// [`to_lowercase()`]: ../primitive.char.html#method.to_lowercase -/// [`char`]: ../primitive.char.html +/// [`to_lowercase()`]: ../../std/primitive.char.html#method.to_lowercase +/// [`char`]: ../../std/primitive.char.html #[stable(feature = "rust1", since = "1.0.0")] pub struct ToLowercase(CaseMappingIter); @@ -59,13 +65,16 @@ impl Iterator for ToLowercase { } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for ToLowercase {} + /// Returns an iterator that yields the uppercase equivalent of a `char`. /// /// This `struct` is created by the [`to_uppercase()`] method on [`char`]. See /// its documentation for more. /// -/// [`to_uppercase()`]: ../primitive.char.html#method.to_uppercase -/// [`char`]: ../primitive.char.html +/// [`to_uppercase()`]: ../../std/primitive.char.html#method.to_uppercase +/// [`char`]: ../../std/primitive.char.html #[stable(feature = "rust1", since = "1.0.0")] pub struct ToUppercase(CaseMappingIter); @@ -77,6 +86,8 @@ impl Iterator for ToUppercase { } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for ToUppercase {} enum CaseMappingIter { Three(char, char, char), @@ -127,7 +138,7 @@ impl char { /// A 'radix' here is sometimes also called a 'base'. A radix of two /// indicates a binary number, a radix of ten, decimal, and a radix of /// sixteen, hexadecimal, to give some common values. Arbitrary - /// radicum are supported. + /// radices are supported. /// /// Compared to `is_numeric()`, this function only recognizes the characters /// `0-9`, `a-z` and `A-Z`. @@ -151,14 +162,9 @@ impl char { /// Basic usage: /// /// ``` - /// let d = '1'; - /// - /// assert!(d.is_digit(10)); - /// - /// let d = 'f'; - /// - /// assert!(d.is_digit(16)); - /// assert!(!d.is_digit(10)); + /// assert!('1'.is_digit(10)); + /// assert!('f'.is_digit(16)); + /// assert!(!'f'.is_digit(10)); /// ``` /// /// Passing a large radix, causing a panic: @@ -167,10 +173,8 @@ impl char { /// use std::thread; /// /// let result = thread::spawn(|| { - /// let d = '1'; - /// /// // this panics - /// d.is_digit(37); + /// '1'.is_digit(37); /// }).join(); /// /// assert!(result.is_err()); @@ -186,7 +190,7 @@ impl char { /// A 'radix' here is sometimes also called a 'base'. A radix of two /// indicates a binary number, a radix of ten, decimal, and a radix of /// sixteen, hexadecimal, to give some common values. Arbitrary - /// radicum are supported. + /// radices are supported. /// /// 'Digit' is defined to be only the following characters: /// @@ -194,7 +198,7 @@ impl char { /// * `a-z` /// * `A-Z` /// - /// # Failure + /// # Errors /// /// Returns `None` if the `char` does not refer to a digit in the given radix. /// @@ -207,25 +211,15 @@ impl char { /// Basic usage: /// /// ``` - /// let d = '1'; - /// - /// assert_eq!(d.to_digit(10), Some(1)); - /// - /// let d = 'f'; - /// - /// assert_eq!(d.to_digit(16), Some(15)); + /// assert_eq!('1'.to_digit(10), Some(1)); + /// assert_eq!('f'.to_digit(16), Some(15)); /// ``` /// /// Passing a non-digit results in failure: /// /// ``` - /// let d = 'f'; - /// - /// assert_eq!(d.to_digit(10), None); - /// - /// let d = 'z'; - /// - /// assert_eq!(d.to_digit(16), None); + /// assert_eq!('f'.to_digit(10), None); + /// assert_eq!('z'.to_digit(16), None); /// ``` /// /// Passing a large radix, causing a panic: @@ -234,9 +228,7 @@ impl char { /// use std::thread; /// /// let result = thread::spawn(|| { - /// let d = '1'; - /// - /// d.to_digit(37); + /// '1'.to_digit(37); /// }).join(); /// /// assert!(result.is_err()); @@ -250,8 +242,8 @@ impl char { /// Returns an iterator that yields the hexadecimal Unicode escape of a /// character, as `char`s. /// - /// All characters are escaped with Rust syntax of the form `\\u{NNNN}` - /// where `NNNN` is the shortest hexadecimal representation. + /// All characters are escaped with Rust syntax of the form `\u{NNNNNN}` + /// where `NNNNNN` is the shortest hexadecimal representation. /// /// # Examples /// @@ -283,6 +275,41 @@ impl char { C::escape_unicode(self) } + /// Returns an iterator that yields the literal escape code of a `char`. + /// + /// This will escape the characters similar to the `Debug` implementations + /// of `str` or `char`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// for i in '\n'.escape_default() { + /// println!("{}", i); + /// } + /// ``` + /// + /// This prints: + /// + /// ```text + /// \ + /// n + /// ``` + /// + /// Collecting into a `String`: + /// + /// ``` + /// let quote: String = '\n'.escape_default().collect(); + /// + /// assert_eq!(quote, "\\n"); + /// ``` + #[unstable(feature = "char_escape_debug", issue = "35068")] + #[inline] + pub fn escape_debug(self) -> EscapeDebug { + C::escape_debug(self) + } + /// Returns an iterator that yields the literal escape code of a `char`. /// /// The default is chosen with a bias toward producing literals that are @@ -408,12 +435,13 @@ impl char { C::len_utf16(self) } - /// Encodes this character as UTF-8 into the provided byte buffer, and then - /// returns the number of bytes written. + /// Encodes this character as UTF-8 into the provided byte buffer, + /// and then returns the subslice of the buffer that contains the encoded character. + /// + /// # Panics /// - /// If the buffer is not large enough, nothing will be written into it and a - /// `None` will be returned. A buffer of length four is large enough to - /// encode any `char`. + /// Panics if the buffer is not large enough. + /// A buffer of length four is large enough to encode any `char`. /// /// # Examples /// @@ -426,65 +454,76 @@ impl char { /// /// let result = 'ß'.encode_utf8(&mut b); /// - /// assert_eq!(result, Some(2)); + /// assert_eq!(result, "ß"); + /// + /// assert_eq!(result.len(), 2); /// ``` /// /// A buffer that's too small: /// /// ``` /// #![feature(unicode)] + /// use std::thread; /// - /// let mut b = [0; 1]; + /// let result = thread::spawn(|| { + /// let mut b = [0; 1]; /// - /// let result = 'ß'.encode_utf8(&mut b); + /// // this panics + /// 'ß'.encode_utf8(&mut b); + /// }).join(); /// - /// assert_eq!(result, None); + /// assert!(result.is_err()); /// ``` #[unstable(feature = "unicode", reason = "pending decision about Iterator/Writer/Reader", issue = "27784")] #[inline] - pub fn encode_utf8(self, dst: &mut [u8]) -> Option { + pub fn encode_utf8(self, dst: &mut [u8]) -> &mut str { C::encode_utf8(self, dst) } - /// Encodes this character as UTF-16 into the provided `u16` buffer, and - /// then returns the number of `u16`s written. + /// Encodes this character as UTF-16 into the provided `u16` buffer, + /// and then returns the subslice of the buffer that contains the encoded character. /// - /// If the buffer is not large enough, nothing will be written into it and a - /// `None` will be returned. A buffer of length 2 is large enough to encode - /// any `char`. + /// # Panics + /// + /// Panics if the buffer is not large enough. + /// A buffer of length 2 is large enough to encode any `char`. /// /// # Examples /// - /// In both of these examples, 'ß' takes one `u16` to encode. + /// In both of these examples, '𝕊' takes two `u16`s to encode. /// /// ``` /// #![feature(unicode)] /// - /// let mut b = [0; 1]; + /// let mut b = [0; 2]; /// - /// let result = 'ß'.encode_utf16(&mut b); + /// let result = '𝕊'.encode_utf16(&mut b); /// - /// assert_eq!(result, Some(1)); + /// assert_eq!(result.len(), 2); /// ``` /// /// A buffer that's too small: /// /// ``` /// #![feature(unicode)] + /// use std::thread; /// - /// let mut b = [0; 0]; + /// let result = thread::spawn(|| { + /// let mut b = [0; 1]; /// - /// let result = 'ß'.encode_utf8(&mut b); + /// // this panics + /// '𝕊'.encode_utf16(&mut b); + /// }).join(); /// - /// assert_eq!(result, None); + /// assert!(result.is_err()); /// ``` #[unstable(feature = "unicode", reason = "pending decision about Iterator/Writer/Reader", issue = "27784")] #[inline] - pub fn encode_utf16(self, dst: &mut [u16]) -> Option { + pub fn encode_utf16(self, dst: &mut [u16]) -> &mut [u16] { C::encode_utf16(self, dst) } @@ -495,12 +534,8 @@ impl char { /// Basic usage: /// /// ``` - /// let c = 'a'; - /// - /// assert!(c.is_alphabetic()); - /// - /// let c = '京'; - /// assert!(c.is_alphabetic()); + /// assert!('a'.is_alphabetic()); + /// assert!('京'.is_alphabetic()); /// /// let c = '💝'; /// // love is many things, but it is not alphabetic @@ -554,21 +589,13 @@ impl char { /// Basic usage: /// /// ``` - /// let c = 'a'; - /// assert!(c.is_lowercase()); - /// - /// let c = 'δ'; - /// assert!(c.is_lowercase()); - /// - /// let c = 'A'; - /// assert!(!c.is_lowercase()); - /// - /// let c = 'Δ'; - /// assert!(!c.is_lowercase()); + /// assert!('a'.is_lowercase()); + /// assert!('δ'.is_lowercase()); + /// assert!(!'A'.is_lowercase()); + /// assert!(!'Δ'.is_lowercase()); /// /// // The various Chinese scripts do not have case, and so: - /// let c = '中'; - /// assert!(!c.is_lowercase()); + /// assert!(!'中'.is_lowercase()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -590,21 +617,13 @@ impl char { /// Basic usage: /// /// ``` - /// let c = 'a'; - /// assert!(!c.is_uppercase()); - /// - /// let c = 'δ'; - /// assert!(!c.is_uppercase()); - /// - /// let c = 'A'; - /// assert!(c.is_uppercase()); - /// - /// let c = 'Δ'; - /// assert!(c.is_uppercase()); + /// assert!(!'a'.is_uppercase()); + /// assert!(!'δ'.is_uppercase()); + /// assert!('A'.is_uppercase()); + /// assert!('Δ'.is_uppercase()); /// /// // The various Chinese scripts do not have case, and so: - /// let c = '中'; - /// assert!(!c.is_uppercase()); + /// assert!(!'中'.is_uppercase()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -626,15 +645,12 @@ impl char { /// Basic usage: /// /// ``` - /// let c = ' '; - /// assert!(c.is_whitespace()); + /// assert!(' '.is_whitespace()); /// /// // a non-breaking space - /// let c = '\u{A0}'; - /// assert!(c.is_whitespace()); + /// assert!('\u{A0}'.is_whitespace()); /// - /// let c = '越'; - /// assert!(!c.is_whitespace()); + /// assert!(!'越'.is_whitespace()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -656,29 +672,14 @@ impl char { /// Basic usage: /// /// ``` - /// let c = '٣'; - /// assert!(c.is_alphanumeric()); - /// - /// let c = '7'; - /// assert!(c.is_alphanumeric()); - /// - /// let c = '৬'; - /// assert!(c.is_alphanumeric()); - /// - /// let c = 'K'; - /// assert!(c.is_alphanumeric()); - /// - /// let c = 'و'; - /// assert!(c.is_alphanumeric()); - /// - /// let c = '藏'; - /// assert!(c.is_alphanumeric()); - /// - /// let c = '¾'; - /// assert!(!c.is_alphanumeric()); - /// - /// let c = '①'; - /// assert!(!c.is_alphanumeric()); + /// assert!('٣'.is_alphanumeric()); + /// assert!('7'.is_alphanumeric()); + /// assert!('৬'.is_alphanumeric()); + /// assert!('K'.is_alphanumeric()); + /// assert!('و'.is_alphanumeric()); + /// assert!('藏'.is_alphanumeric()); + /// assert!(!'¾'.is_alphanumeric()); + /// assert!(!'①'.is_alphanumeric()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -697,11 +698,8 @@ impl char { /// /// ``` /// // U+009C, STRING TERMINATOR - /// let c = 'œ'; - /// assert!(c.is_control()); - /// - /// let c = 'q'; - /// assert!(!c.is_control()); + /// assert!('œ'.is_control()); + /// assert!(!'q'.is_control()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -719,29 +717,14 @@ impl char { /// Basic usage: /// /// ``` - /// let c = '٣'; - /// assert!(c.is_numeric()); - /// - /// let c = '7'; - /// assert!(c.is_numeric()); - /// - /// let c = '৬'; - /// assert!(c.is_numeric()); - /// - /// let c = 'K'; - /// assert!(!c.is_numeric()); - /// - /// let c = 'و'; - /// assert!(!c.is_numeric()); - /// - /// let c = '藏'; - /// assert!(!c.is_numeric()); - /// - /// let c = '¾'; - /// assert!(!c.is_numeric()); - /// - /// let c = '①'; - /// assert!(!c.is_numeric()); + /// assert!('٣'.is_numeric()); + /// assert!('7'.is_numeric()); + /// assert!('৬'.is_numeric()); + /// assert!(!'K'.is_numeric()); + /// assert!(!'و'.is_numeric()); + /// assert!(!'藏'.is_numeric()); + /// assert!(!'¾'.is_numeric()); + /// assert!(!'①'.is_numeric()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -776,13 +759,13 @@ impl char { /// Basic usage: /// /// ``` - /// let c = 'c'; + /// assert_eq!('C'.to_lowercase().collect::(), "c"); /// - /// assert_eq!(c.to_uppercase().next(), Some('C')); + /// // Sometimes the result is more than one character: + /// assert_eq!('İ'.to_lowercase().collect::(), "i\u{307}"); /// /// // Japanese scripts do not have case, and so: - /// let c = '山'; - /// assert_eq!(c.to_uppercase().next(), Some('山')); + /// assert_eq!('山'.to_lowercase().collect::(), "山"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] @@ -813,12 +796,13 @@ impl char { /// Basic usage: /// /// ``` - /// let c = 'c'; - /// assert_eq!(c.to_uppercase().next(), Some('C')); + /// assert_eq!('c'.to_uppercase().collect::(), "C"); + /// + /// // Sometimes the result is more than one character: + /// assert_eq!('ß'.to_uppercase().collect::(), "SS"); /// /// // Japanese does not have case, and so: - /// let c = '山'; - /// assert_eq!(c.to_uppercase().next(), Some('山')); + /// assert_eq!('山'.to_uppercase().collect::(), "山"); /// ``` /// /// In Turkish, the equivalent of 'i' in Latin has five forms instead of two: @@ -829,21 +813,17 @@ impl char { /// Note that the lowercase dotted 'i' is the same as the Latin. Therefore: /// /// ``` - /// let i = 'i'; - /// - /// let upper_i = i.to_uppercase().next(); + /// let upper_i: String = 'i'.to_uppercase().collect(); /// ``` /// /// The value of `upper_i` here relies on the language of the text: if we're - /// in `en-US`, it should be `Some('I')`, but if we're in `tr_TR`, it should - /// be `Some('İ')`. `to_uppercase()` does not take this into account, and so: + /// in `en-US`, it should be `"I"`, but if we're in `tr_TR`, it should + /// be `"İ"`. `to_uppercase()` does not take this into account, and so: /// /// ``` - /// let i = 'i'; + /// let upper_i: String = 'i'.to_uppercase().collect(); /// - /// let upper_i = i.to_uppercase().next(); - /// - /// assert_eq!(Some('I'), upper_i); + /// assert_eq!(upper_i, "I"); /// ``` /// /// holds across languages. @@ -855,7 +835,7 @@ impl char { } /// An iterator that decodes UTF-16 encoded code points from an iterator of `u16`s. -#[unstable(feature = "decode_utf16", reason = "recently exposed", issue = "27830")] +#[stable(feature = "decode_utf16", since = "1.9.0")] #[derive(Clone)] pub struct DecodeUtf16 where I: Iterator @@ -864,7 +844,14 @@ pub struct DecodeUtf16 buf: Option, } -/// Create an iterator over the UTF-16 encoded code points in `iterable`, +/// An iterator that decodes UTF-16 encoded code points from an iterator of `u16`s. +#[stable(feature = "decode_utf16", since = "1.9.0")] +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct DecodeUtf16Error { + code: u16, +} + +/// Create an iterator over the UTF-16 encoded code points in `iter`, /// returning unpaired surrogates as `Err`s. /// /// # Examples @@ -872,8 +859,6 @@ pub struct DecodeUtf16 /// Basic usage: /// /// ``` -/// #![feature(decode_utf16)] -/// /// use std::char::decode_utf16; /// /// fn main() { @@ -882,7 +867,9 @@ pub struct DecodeUtf16 /// 0x0073, 0xDD1E, 0x0069, 0x0063, /// 0xD834]; /// -/// assert_eq!(decode_utf16(v.iter().cloned()).collect::>(), +/// assert_eq!(decode_utf16(v.iter().cloned()) +/// .map(|r| r.map_err(|e| e.unpaired_surrogate())) +/// .collect::>(), /// vec![Ok('𝄞'), /// Ok('m'), Ok('u'), Ok('s'), /// Err(0xDD1E), @@ -894,8 +881,6 @@ pub struct DecodeUtf16 /// A lossy decoder can be obtained by replacing `Err` results with the replacement character: /// /// ``` -/// #![feature(decode_utf16)] -/// /// use std::char::{decode_utf16, REPLACEMENT_CHARACTER}; /// /// fn main() { @@ -910,26 +895,28 @@ pub struct DecodeUtf16 /// "𝄞mus�ic�"); /// } /// ``` -#[unstable(feature = "decode_utf16", reason = "recently exposed", issue = "27830")] +#[stable(feature = "decode_utf16", since = "1.9.0")] #[inline] -pub fn decode_utf16>(iterable: I) -> DecodeUtf16 { +pub fn decode_utf16>(iter: I) -> DecodeUtf16 { DecodeUtf16 { - iter: iterable.into_iter(), + iter: iter.into_iter(), buf: None, } } -#[unstable(feature = "decode_utf16", reason = "recently exposed", issue = "27830")] -impl> Iterator for DecodeUtf16 { - type Item = Result; +#[stable(feature = "decode_utf16", since = "1.9.0")] +impl> Iterator for DecodeUtf16 { + type Item = Result; - fn next(&mut self) -> Option> { + fn next(&mut self) -> Option> { let u = match self.buf.take() { Some(buf) => buf, - None => match self.iter.next() { - Some(u) => u, - None => return None, - }, + None => { + match self.iter.next() { + Some(u) => u, + None => return None, + } + } }; if u < 0xD800 || 0xDFFF < u { @@ -937,18 +924,18 @@ impl> Iterator for DecodeUtf16 { Some(Ok(unsafe { from_u32_unchecked(u as u32) })) } else if u >= 0xDC00 { // a trailing surrogate - Some(Err(u)) + Some(Err(DecodeUtf16Error { code: u })) } else { let u2 = match self.iter.next() { Some(u2) => u2, // eof - None => return Some(Err(u)), + None => return Some(Err(DecodeUtf16Error { code: u })), }; if u2 < 0xDC00 || u2 > 0xDFFF { // not a trailing surrogate so we're not a valid // surrogate pair, so rewind to redecode u2 next time. self.buf = Some(u2); - return Some(Err(u)); + return Some(Err(DecodeUtf16Error { code: u })); } // all ok, so lets decode it. @@ -966,8 +953,25 @@ impl> Iterator for DecodeUtf16 { } } -/// `U+FFFD REPLACEMENT CHARACTER` (�) is used in Unicode to represent a decoding error. +impl DecodeUtf16Error { + /// Returns the unpaired surrogate which caused this error. + #[stable(feature = "decode_utf16", since = "1.9.0")] + pub fn unpaired_surrogate(&self) -> u16 { + self.code + } +} + +#[stable(feature = "decode_utf16", since = "1.9.0")] +impl fmt::Display for DecodeUtf16Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "unpaired surrogate found: {:x}", self.code) + } +} + +/// `U+FFFD REPLACEMENT CHARACTER` (�) is used in Unicode to represent a +/// decoding error. +/// /// It can occur, for example, when giving ill-formed UTF-8 bytes to -/// [`String::from_utf8_lossy`](../string/struct.String.html#method.from_utf8_lossy). -#[unstable(feature = "decode_utf16", reason = "recently added", issue = "27830")] +/// [`String::from_utf8_lossy`](../../std/string/struct.String.html#method.from_utf8_lossy). +#[stable(feature = "decode_utf16", since = "1.9.0")] pub const REPLACEMENT_CHARACTER: char = '\u{FFFD}'; diff --git a/src/librustc_unicode/lib.rs b/src/librustc_unicode/lib.rs index 161da07911061..65bd717e01a82 100644 --- a/src/librustc_unicode/lib.rs +++ b/src/librustc_unicode/lib.rs @@ -29,11 +29,17 @@ html_playground_url = "https://play.rust-lang.org/", issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/", test(no_crate_inject, attr(allow(unused_variables), deny(warnings))))] +#![cfg_attr(not(stage0), deny(warnings))] #![no_std] +#![feature(char_escape_debug)] #![feature(core_char_ext)] +#![feature(decode_utf8)] +#![feature(fused)] #![feature(lang_items)] #![feature(staged_api)] +#![feature(try_from)] +#![feature(unicode)] mod tables; mod u_str; @@ -41,12 +47,17 @@ pub mod char; #[allow(deprecated)] pub mod str { - pub use u_str::{UnicodeStr, SplitWhitespace}; - pub use u_str::{utf8_char_width, is_utf16}; - pub use u_str::{Utf16Encoder}; + pub use u_str::{SplitWhitespace, UnicodeStr}; + pub use u_str::{is_utf16, utf8_char_width}; + pub use u_str::Utf16Encoder; } // For use in libcollections, not re-exported in libstd. pub mod derived_property { - pub use tables::derived_property::{Cased, Case_Ignorable}; + pub use tables::derived_property::{Case_Ignorable, Cased}; +} + +// For use in libsyntax +pub mod property { + pub use tables::property::Pattern_White_Space; } diff --git a/src/librustc_unicode/tables.rs b/src/librustc_unicode/tables.rs index a147bea791c47..21543e2ad073e 100644 --- a/src/librustc_unicode/tables.rs +++ b/src/librustc_unicode/tables.rs @@ -1,4 +1,4 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // @@ -14,1180 +14,1335 @@ /// The version of [Unicode](http://www.unicode.org/) /// that the unicode parts of `CharExt` and `UnicodeStrPrelude` traits are based on. -pub const UNICODE_VERSION: (u64, u64, u64) = (8, 0, 0); +pub const UNICODE_VERSION: (u64, u64, u64) = (9, 0, 0); -fn bsearch_range_table(c: char, r: &'static [(char, char)]) -> bool { - use core::cmp::Ordering::{Equal, Less, Greater}; - r.binary_search_by(|&(lo, hi)| { - if c < lo { - Greater - } else if hi < c { - Less - } else { - Equal - } - }) - .is_ok() + +// BoolTrie is a trie for representing a set of Unicode codepoints. It is +// implemented with postfix compression (sharing of identical child nodes), +// which gives both compact size and fast lookup. +// +// The space of Unicode codepoints is divided into 3 subareas, each +// represented by a trie with different depth. In the first (0..0x800), there +// is no trie structure at all; each u64 entry corresponds to a bitvector +// effectively holding 64 bool values. +// +// In the second (0x800..0x10000), each child of the root node represents a +// 64-wide subrange, but instead of storing the full 64-bit value of the leaf, +// the trie stores an 8-bit index into a shared table of leaf values. This +// exploits the fact that in reasonable sets, many such leaves can be shared. +// +// In the third (0x10000..0x110000), each child of the root node represents a +// 4096-wide subrange, and the trie stores an 8-bit index into a 64-byte slice +// of a child tree. Each of these 64 bytes represents an index into the table +// of shared 64-bit leaf values. This exploits the sparse structure in the +// non-BMP range of most Unicode sets. +pub struct BoolTrie { + // 0..0x800 (corresponding to 1 and 2 byte utf-8 sequences) + r1: [u64; 32], // leaves + + // 0x800..0x10000 (corresponding to 3 byte utf-8 sequences) + r2: [u8; 992], // first level + r3: &'static [u64], // leaves + + // 0x10000..0x110000 (corresponding to 4 byte utf-8 sequences) + r4: [u8; 256], // first level + r5: &'static [u8], // second level + r6: &'static [u64], // leaves +} + +fn trie_range_leaf(c: usize, bitmap_chunk: u64) -> bool { + ((bitmap_chunk >> (c & 63)) & 1) != 0 +} + +fn trie_lookup_range_table(c: char, r: &'static BoolTrie) -> bool { + let c = c as usize; + if c < 0x800 { + trie_range_leaf(c, r.r1[c >> 6]) + } else if c < 0x10000 { + let child = r.r2[(c >> 6) - 0x20]; + trie_range_leaf(c, r.r3[child as usize]) + } else { + let child = r.r4[(c >> 12) - 0x10]; + let leaf = r.r5[((child as usize) << 6) + ((c >> 6) & 0x3f)]; + trie_range_leaf(c, r.r6[leaf as usize]) + } } pub mod general_category { - pub const Cc_table: &'static [(char, char)] = &[ - ('\0', '\u{1f}'), ('\u{7f}', '\u{9f}') - ]; + pub const Cc_table: &'static super::BoolTrie = &super::BoolTrie { + r1: [ + 0x00000000ffffffff, 0x8000000000000000, 0x00000000ffffffff, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 + ], + r2: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + ], + r3: &[ + 0x0000000000000000 + ], + r4: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ], + r5: &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + ], + r6: &[ + 0x0000000000000000 + ], + }; pub fn Cc(c: char) -> bool { - super::bsearch_range_table(c, Cc_table) + super::trie_lookup_range_table(c, Cc_table) } - pub const N_table: &'static [(char, char)] = &[ - ('\u{30}', '\u{39}'), ('\u{660}', '\u{669}'), ('\u{6f0}', '\u{6f9}'), ('\u{7c0}', - '\u{7c9}'), ('\u{966}', '\u{96f}'), ('\u{9e6}', '\u{9ef}'), ('\u{a66}', '\u{a6f}'), - ('\u{ae6}', '\u{aef}'), ('\u{b66}', '\u{b6f}'), ('\u{be6}', '\u{bef}'), ('\u{c66}', - '\u{c6f}'), ('\u{ce6}', '\u{cef}'), ('\u{d66}', '\u{d6f}'), ('\u{de6}', '\u{def}'), - ('\u{e50}', '\u{e59}'), ('\u{ed0}', '\u{ed9}'), ('\u{f20}', '\u{f29}'), ('\u{1040}', - '\u{1049}'), ('\u{1090}', '\u{1099}'), ('\u{16ee}', '\u{16f0}'), ('\u{17e0}', '\u{17e9}'), - ('\u{1810}', '\u{1819}'), ('\u{1946}', '\u{194f}'), ('\u{19d0}', '\u{19d9}'), ('\u{1a80}', - '\u{1a89}'), ('\u{1a90}', '\u{1a99}'), ('\u{1b50}', '\u{1b59}'), ('\u{1bb0}', '\u{1bb9}'), - ('\u{1c40}', '\u{1c49}'), ('\u{1c50}', '\u{1c59}'), ('\u{2160}', '\u{2182}'), ('\u{2185}', - '\u{2188}'), ('\u{3007}', '\u{3007}'), ('\u{3021}', '\u{3029}'), ('\u{3038}', '\u{303a}'), - ('\u{a620}', '\u{a629}'), ('\u{a6e6}', '\u{a6ef}'), ('\u{a8d0}', '\u{a8d9}'), ('\u{a900}', - '\u{a909}'), ('\u{a9d0}', '\u{a9d9}'), ('\u{a9f0}', '\u{a9f9}'), ('\u{aa50}', '\u{aa59}'), - ('\u{abf0}', '\u{abf9}'), ('\u{ff10}', '\u{ff19}'), ('\u{10140}', '\u{10174}'), - ('\u{10341}', '\u{10341}'), ('\u{1034a}', '\u{1034a}'), ('\u{103d1}', '\u{103d5}'), - ('\u{104a0}', '\u{104a9}'), ('\u{11066}', '\u{1106f}'), ('\u{110f0}', '\u{110f9}'), - ('\u{11136}', '\u{1113f}'), ('\u{111d0}', '\u{111d9}'), ('\u{112f0}', '\u{112f9}'), - ('\u{114d0}', '\u{114d9}'), ('\u{11650}', '\u{11659}'), ('\u{116c0}', '\u{116c9}'), - ('\u{11730}', '\u{11739}'), ('\u{118e0}', '\u{118e9}'), ('\u{12400}', '\u{1246e}'), - ('\u{16a60}', '\u{16a69}'), ('\u{16b50}', '\u{16b59}'), ('\u{1d7ce}', '\u{1d7ff}') - ]; + pub const N_table: &'static super::BoolTrie = &super::BoolTrie { + r1: [ + 0x03ff000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x000003ff00000000, 0x0000000000000000, 0x03ff000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x00000000000003ff + ], + r2: [ + 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 2, 0, 2, 3, + 0, 0, 0, 0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 5, 0, 0, 0, 3, 2, 0, 0, 0, 0, 6, 0, 2, 0, 0, 7, 0, 0, 2, 8, 0, 0, 7, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 2, 4, 0, 0, 12, 0, 2, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 2, 0, 0, 0 + ], + r3: &[ + 0x0000000000000000, 0x0000ffc000000000, 0x0000000003ff0000, 0x000003ff00000000, + 0x00000000000003ff, 0x0001c00000000000, 0x000000000000ffc0, 0x0000000003ff03ff, + 0x03ff000000000000, 0xffffffff00000000, 0x00000000000001e7, 0x070003fe00000080, + 0x03ff000003ff0000 + ], + r4: [ + 0, 1, 2, 3, 3, 3, 4, 3, 3, 3, 3, 3, 3, 5, 6, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 + ], + r5: &[ + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 3, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 5, 0, 6, 7, 0, 0, 8, 0, 0, 0, 6, 0, 0, 0, 0, 0, 8, 0, 8, 0, 0, 0, + 0, 0, 8, 0, 9, 6, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, + 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ], + r6: &[ + 0x0000000000000000, 0x001fffffffffffff, 0x0000000000000402, 0x00000000003e0000, + 0x000003ff00000000, 0x0000ffc000000000, 0x03ff000000000000, 0xffc0000000000000, + 0x0000000003ff0000, 0x00000000000003ff, 0xffffffffffffffff, 0x00007fffffffffff, + 0xffffffffffffc000 + ], + }; pub fn N(c: char) -> bool { - super::bsearch_range_table(c, N_table) + super::trie_lookup_range_table(c, N_table) } } pub mod derived_property { - pub const Alphabetic_table: &'static [(char, char)] = &[ - ('\u{41}', '\u{5a}'), ('\u{61}', '\u{7a}'), ('\u{aa}', '\u{aa}'), ('\u{b5}', '\u{b5}'), - ('\u{ba}', '\u{ba}'), ('\u{c0}', '\u{d6}'), ('\u{d8}', '\u{f6}'), ('\u{f8}', '\u{2c1}'), - ('\u{2c6}', '\u{2d1}'), ('\u{2e0}', '\u{2e4}'), ('\u{2ec}', '\u{2ec}'), ('\u{2ee}', - '\u{2ee}'), ('\u{345}', '\u{345}'), ('\u{370}', '\u{374}'), ('\u{376}', '\u{377}'), - ('\u{37a}', '\u{37d}'), ('\u{37f}', '\u{37f}'), ('\u{386}', '\u{386}'), ('\u{388}', - '\u{38a}'), ('\u{38c}', '\u{38c}'), ('\u{38e}', '\u{3a1}'), ('\u{3a3}', '\u{3f5}'), - ('\u{3f7}', '\u{481}'), ('\u{48a}', '\u{52f}'), ('\u{531}', '\u{556}'), ('\u{559}', - '\u{559}'), ('\u{561}', '\u{587}'), ('\u{5b0}', '\u{5bd}'), ('\u{5bf}', '\u{5bf}'), - ('\u{5c1}', '\u{5c2}'), ('\u{5c4}', '\u{5c5}'), ('\u{5c7}', '\u{5c7}'), ('\u{5d0}', - '\u{5ea}'), ('\u{5f0}', '\u{5f2}'), ('\u{610}', '\u{61a}'), ('\u{620}', '\u{657}'), - ('\u{659}', '\u{65f}'), ('\u{66e}', '\u{6d3}'), ('\u{6d5}', '\u{6dc}'), ('\u{6e1}', - '\u{6e8}'), ('\u{6ed}', '\u{6ef}'), ('\u{6fa}', '\u{6fc}'), ('\u{6ff}', '\u{6ff}'), - ('\u{710}', '\u{73f}'), ('\u{74d}', '\u{7b1}'), ('\u{7ca}', '\u{7ea}'), ('\u{7f4}', - '\u{7f5}'), ('\u{7fa}', '\u{7fa}'), ('\u{800}', '\u{817}'), ('\u{81a}', '\u{82c}'), - ('\u{840}', '\u{858}'), ('\u{8a0}', '\u{8b4}'), ('\u{8e3}', '\u{8e9}'), ('\u{8f0}', - '\u{93b}'), ('\u{93d}', '\u{94c}'), ('\u{94e}', '\u{950}'), ('\u{955}', '\u{963}'), - ('\u{971}', '\u{983}'), ('\u{985}', '\u{98c}'), ('\u{98f}', '\u{990}'), ('\u{993}', - '\u{9a8}'), ('\u{9aa}', '\u{9b0}'), ('\u{9b2}', '\u{9b2}'), ('\u{9b6}', '\u{9b9}'), - ('\u{9bd}', '\u{9c4}'), ('\u{9c7}', '\u{9c8}'), ('\u{9cb}', '\u{9cc}'), ('\u{9ce}', - '\u{9ce}'), ('\u{9d7}', '\u{9d7}'), ('\u{9dc}', '\u{9dd}'), ('\u{9df}', '\u{9e3}'), - ('\u{9f0}', '\u{9f1}'), ('\u{a01}', '\u{a03}'), ('\u{a05}', '\u{a0a}'), ('\u{a0f}', - '\u{a10}'), ('\u{a13}', '\u{a28}'), ('\u{a2a}', '\u{a30}'), ('\u{a32}', '\u{a33}'), - ('\u{a35}', '\u{a36}'), ('\u{a38}', '\u{a39}'), ('\u{a3e}', '\u{a42}'), ('\u{a47}', - '\u{a48}'), ('\u{a4b}', '\u{a4c}'), ('\u{a51}', '\u{a51}'), ('\u{a59}', '\u{a5c}'), - ('\u{a5e}', '\u{a5e}'), ('\u{a70}', '\u{a75}'), ('\u{a81}', '\u{a83}'), ('\u{a85}', - '\u{a8d}'), ('\u{a8f}', '\u{a91}'), ('\u{a93}', '\u{aa8}'), ('\u{aaa}', '\u{ab0}'), - ('\u{ab2}', '\u{ab3}'), ('\u{ab5}', '\u{ab9}'), ('\u{abd}', '\u{ac5}'), ('\u{ac7}', - '\u{ac9}'), ('\u{acb}', '\u{acc}'), ('\u{ad0}', '\u{ad0}'), ('\u{ae0}', '\u{ae3}'), - ('\u{af9}', '\u{af9}'), ('\u{b01}', '\u{b03}'), ('\u{b05}', '\u{b0c}'), ('\u{b0f}', - '\u{b10}'), ('\u{b13}', '\u{b28}'), ('\u{b2a}', '\u{b30}'), ('\u{b32}', '\u{b33}'), - ('\u{b35}', '\u{b39}'), ('\u{b3d}', '\u{b44}'), ('\u{b47}', '\u{b48}'), ('\u{b4b}', - '\u{b4c}'), ('\u{b56}', '\u{b57}'), ('\u{b5c}', '\u{b5d}'), ('\u{b5f}', '\u{b63}'), - ('\u{b71}', '\u{b71}'), ('\u{b82}', '\u{b83}'), ('\u{b85}', '\u{b8a}'), ('\u{b8e}', - '\u{b90}'), ('\u{b92}', '\u{b95}'), ('\u{b99}', '\u{b9a}'), ('\u{b9c}', '\u{b9c}'), - ('\u{b9e}', '\u{b9f}'), ('\u{ba3}', '\u{ba4}'), ('\u{ba8}', '\u{baa}'), ('\u{bae}', - '\u{bb9}'), ('\u{bbe}', '\u{bc2}'), ('\u{bc6}', '\u{bc8}'), ('\u{bca}', '\u{bcc}'), - ('\u{bd0}', '\u{bd0}'), ('\u{bd7}', '\u{bd7}'), ('\u{c00}', '\u{c03}'), ('\u{c05}', - '\u{c0c}'), ('\u{c0e}', '\u{c10}'), ('\u{c12}', '\u{c28}'), ('\u{c2a}', '\u{c39}'), - ('\u{c3d}', '\u{c44}'), ('\u{c46}', '\u{c48}'), ('\u{c4a}', '\u{c4c}'), ('\u{c55}', - '\u{c56}'), ('\u{c58}', '\u{c5a}'), ('\u{c60}', '\u{c63}'), ('\u{c81}', '\u{c83}'), - ('\u{c85}', '\u{c8c}'), ('\u{c8e}', '\u{c90}'), ('\u{c92}', '\u{ca8}'), ('\u{caa}', - '\u{cb3}'), ('\u{cb5}', '\u{cb9}'), ('\u{cbd}', '\u{cc4}'), ('\u{cc6}', '\u{cc8}'), - ('\u{cca}', '\u{ccc}'), ('\u{cd5}', '\u{cd6}'), ('\u{cde}', '\u{cde}'), ('\u{ce0}', - '\u{ce3}'), ('\u{cf1}', '\u{cf2}'), ('\u{d01}', '\u{d03}'), ('\u{d05}', '\u{d0c}'), - ('\u{d0e}', '\u{d10}'), ('\u{d12}', '\u{d3a}'), ('\u{d3d}', '\u{d44}'), ('\u{d46}', - '\u{d48}'), ('\u{d4a}', '\u{d4c}'), ('\u{d4e}', '\u{d4e}'), ('\u{d57}', '\u{d57}'), - ('\u{d5f}', '\u{d63}'), ('\u{d7a}', '\u{d7f}'), ('\u{d82}', '\u{d83}'), ('\u{d85}', - '\u{d96}'), ('\u{d9a}', '\u{db1}'), ('\u{db3}', '\u{dbb}'), ('\u{dbd}', '\u{dbd}'), - ('\u{dc0}', '\u{dc6}'), ('\u{dcf}', '\u{dd4}'), ('\u{dd6}', '\u{dd6}'), ('\u{dd8}', - '\u{ddf}'), ('\u{df2}', '\u{df3}'), ('\u{e01}', '\u{e3a}'), ('\u{e40}', '\u{e46}'), - ('\u{e4d}', '\u{e4d}'), ('\u{e81}', '\u{e82}'), ('\u{e84}', '\u{e84}'), ('\u{e87}', - '\u{e88}'), ('\u{e8a}', '\u{e8a}'), ('\u{e8d}', '\u{e8d}'), ('\u{e94}', '\u{e97}'), - ('\u{e99}', '\u{e9f}'), ('\u{ea1}', '\u{ea3}'), ('\u{ea5}', '\u{ea5}'), ('\u{ea7}', - '\u{ea7}'), ('\u{eaa}', '\u{eab}'), ('\u{ead}', '\u{eb9}'), ('\u{ebb}', '\u{ebd}'), - ('\u{ec0}', '\u{ec4}'), ('\u{ec6}', '\u{ec6}'), ('\u{ecd}', '\u{ecd}'), ('\u{edc}', - '\u{edf}'), ('\u{f00}', '\u{f00}'), ('\u{f40}', '\u{f47}'), ('\u{f49}', '\u{f6c}'), - ('\u{f71}', '\u{f81}'), ('\u{f88}', '\u{f97}'), ('\u{f99}', '\u{fbc}'), ('\u{1000}', - '\u{1036}'), ('\u{1038}', '\u{1038}'), ('\u{103b}', '\u{103f}'), ('\u{1050}', '\u{1062}'), - ('\u{1065}', '\u{1068}'), ('\u{106e}', '\u{1086}'), ('\u{108e}', '\u{108e}'), ('\u{109c}', - '\u{109d}'), ('\u{10a0}', '\u{10c5}'), ('\u{10c7}', '\u{10c7}'), ('\u{10cd}', '\u{10cd}'), - ('\u{10d0}', '\u{10fa}'), ('\u{10fc}', '\u{1248}'), ('\u{124a}', '\u{124d}'), ('\u{1250}', - '\u{1256}'), ('\u{1258}', '\u{1258}'), ('\u{125a}', '\u{125d}'), ('\u{1260}', '\u{1288}'), - ('\u{128a}', '\u{128d}'), ('\u{1290}', '\u{12b0}'), ('\u{12b2}', '\u{12b5}'), ('\u{12b8}', - '\u{12be}'), ('\u{12c0}', '\u{12c0}'), ('\u{12c2}', '\u{12c5}'), ('\u{12c8}', '\u{12d6}'), - ('\u{12d8}', '\u{1310}'), ('\u{1312}', '\u{1315}'), ('\u{1318}', '\u{135a}'), ('\u{135f}', - '\u{135f}'), ('\u{1380}', '\u{138f}'), ('\u{13a0}', '\u{13f5}'), ('\u{13f8}', '\u{13fd}'), - ('\u{1401}', '\u{166c}'), ('\u{166f}', '\u{167f}'), ('\u{1681}', '\u{169a}'), ('\u{16a0}', - '\u{16ea}'), ('\u{16ee}', '\u{16f8}'), ('\u{1700}', '\u{170c}'), ('\u{170e}', '\u{1713}'), - ('\u{1720}', '\u{1733}'), ('\u{1740}', '\u{1753}'), ('\u{1760}', '\u{176c}'), ('\u{176e}', - '\u{1770}'), ('\u{1772}', '\u{1773}'), ('\u{1780}', '\u{17b3}'), ('\u{17b6}', '\u{17c8}'), - ('\u{17d7}', '\u{17d7}'), ('\u{17dc}', '\u{17dc}'), ('\u{1820}', '\u{1877}'), ('\u{1880}', - '\u{18aa}'), ('\u{18b0}', '\u{18f5}'), ('\u{1900}', '\u{191e}'), ('\u{1920}', '\u{192b}'), - ('\u{1930}', '\u{1938}'), ('\u{1950}', '\u{196d}'), ('\u{1970}', '\u{1974}'), ('\u{1980}', - '\u{19ab}'), ('\u{19b0}', '\u{19c9}'), ('\u{1a00}', '\u{1a1b}'), ('\u{1a20}', '\u{1a5e}'), - ('\u{1a61}', '\u{1a74}'), ('\u{1aa7}', '\u{1aa7}'), ('\u{1b00}', '\u{1b33}'), ('\u{1b35}', - '\u{1b43}'), ('\u{1b45}', '\u{1b4b}'), ('\u{1b80}', '\u{1ba9}'), ('\u{1bac}', '\u{1baf}'), - ('\u{1bba}', '\u{1be5}'), ('\u{1be7}', '\u{1bf1}'), ('\u{1c00}', '\u{1c35}'), ('\u{1c4d}', - '\u{1c4f}'), ('\u{1c5a}', '\u{1c7d}'), ('\u{1ce9}', '\u{1cec}'), ('\u{1cee}', '\u{1cf3}'), - ('\u{1cf5}', '\u{1cf6}'), ('\u{1d00}', '\u{1dbf}'), ('\u{1de7}', '\u{1df4}'), ('\u{1e00}', - '\u{1f15}'), ('\u{1f18}', '\u{1f1d}'), ('\u{1f20}', '\u{1f45}'), ('\u{1f48}', '\u{1f4d}'), - ('\u{1f50}', '\u{1f57}'), ('\u{1f59}', '\u{1f59}'), ('\u{1f5b}', '\u{1f5b}'), ('\u{1f5d}', - '\u{1f5d}'), ('\u{1f5f}', '\u{1f7d}'), ('\u{1f80}', '\u{1fb4}'), ('\u{1fb6}', '\u{1fbc}'), - ('\u{1fbe}', '\u{1fbe}'), ('\u{1fc2}', '\u{1fc4}'), ('\u{1fc6}', '\u{1fcc}'), ('\u{1fd0}', - '\u{1fd3}'), ('\u{1fd6}', '\u{1fdb}'), ('\u{1fe0}', '\u{1fec}'), ('\u{1ff2}', '\u{1ff4}'), - ('\u{1ff6}', '\u{1ffc}'), ('\u{2071}', '\u{2071}'), ('\u{207f}', '\u{207f}'), ('\u{2090}', - '\u{209c}'), ('\u{2102}', '\u{2102}'), ('\u{2107}', '\u{2107}'), ('\u{210a}', '\u{2113}'), - ('\u{2115}', '\u{2115}'), ('\u{2119}', '\u{211d}'), ('\u{2124}', '\u{2124}'), ('\u{2126}', - '\u{2126}'), ('\u{2128}', '\u{2128}'), ('\u{212a}', '\u{212d}'), ('\u{212f}', '\u{2139}'), - ('\u{213c}', '\u{213f}'), ('\u{2145}', '\u{2149}'), ('\u{214e}', '\u{214e}'), ('\u{2160}', - '\u{2188}'), ('\u{24b6}', '\u{24e9}'), ('\u{2c00}', '\u{2c2e}'), ('\u{2c30}', '\u{2c5e}'), - ('\u{2c60}', '\u{2ce4}'), ('\u{2ceb}', '\u{2cee}'), ('\u{2cf2}', '\u{2cf3}'), ('\u{2d00}', - '\u{2d25}'), ('\u{2d27}', '\u{2d27}'), ('\u{2d2d}', '\u{2d2d}'), ('\u{2d30}', '\u{2d67}'), - ('\u{2d6f}', '\u{2d6f}'), ('\u{2d80}', '\u{2d96}'), ('\u{2da0}', '\u{2da6}'), ('\u{2da8}', - '\u{2dae}'), ('\u{2db0}', '\u{2db6}'), ('\u{2db8}', '\u{2dbe}'), ('\u{2dc0}', '\u{2dc6}'), - ('\u{2dc8}', '\u{2dce}'), ('\u{2dd0}', '\u{2dd6}'), ('\u{2dd8}', '\u{2dde}'), ('\u{2de0}', - '\u{2dff}'), ('\u{2e2f}', '\u{2e2f}'), ('\u{3005}', '\u{3007}'), ('\u{3021}', '\u{3029}'), - ('\u{3031}', '\u{3035}'), ('\u{3038}', '\u{303c}'), ('\u{3041}', '\u{3096}'), ('\u{309d}', - '\u{309f}'), ('\u{30a1}', '\u{30fa}'), ('\u{30fc}', '\u{30ff}'), ('\u{3105}', '\u{312d}'), - ('\u{3131}', '\u{318e}'), ('\u{31a0}', '\u{31ba}'), ('\u{31f0}', '\u{31ff}'), ('\u{3400}', - '\u{4db5}'), ('\u{4e00}', '\u{9fd5}'), ('\u{a000}', '\u{a48c}'), ('\u{a4d0}', '\u{a4fd}'), - ('\u{a500}', '\u{a60c}'), ('\u{a610}', '\u{a61f}'), ('\u{a62a}', '\u{a62b}'), ('\u{a640}', - '\u{a66e}'), ('\u{a674}', '\u{a67b}'), ('\u{a67f}', '\u{a6ef}'), ('\u{a717}', '\u{a71f}'), - ('\u{a722}', '\u{a788}'), ('\u{a78b}', '\u{a7ad}'), ('\u{a7b0}', '\u{a7b7}'), ('\u{a7f7}', - '\u{a801}'), ('\u{a803}', '\u{a805}'), ('\u{a807}', '\u{a80a}'), ('\u{a80c}', '\u{a827}'), - ('\u{a840}', '\u{a873}'), ('\u{a880}', '\u{a8c3}'), ('\u{a8f2}', '\u{a8f7}'), ('\u{a8fb}', - '\u{a8fb}'), ('\u{a8fd}', '\u{a8fd}'), ('\u{a90a}', '\u{a92a}'), ('\u{a930}', '\u{a952}'), - ('\u{a960}', '\u{a97c}'), ('\u{a980}', '\u{a9b2}'), ('\u{a9b4}', '\u{a9bf}'), ('\u{a9cf}', - '\u{a9cf}'), ('\u{a9e0}', '\u{a9e4}'), ('\u{a9e6}', '\u{a9ef}'), ('\u{a9fa}', '\u{a9fe}'), - ('\u{aa00}', '\u{aa36}'), ('\u{aa40}', '\u{aa4d}'), ('\u{aa60}', '\u{aa76}'), ('\u{aa7a}', - '\u{aa7a}'), ('\u{aa7e}', '\u{aabe}'), ('\u{aac0}', '\u{aac0}'), ('\u{aac2}', '\u{aac2}'), - ('\u{aadb}', '\u{aadd}'), ('\u{aae0}', '\u{aaef}'), ('\u{aaf2}', '\u{aaf5}'), ('\u{ab01}', - '\u{ab06}'), ('\u{ab09}', '\u{ab0e}'), ('\u{ab11}', '\u{ab16}'), ('\u{ab20}', '\u{ab26}'), - ('\u{ab28}', '\u{ab2e}'), ('\u{ab30}', '\u{ab5a}'), ('\u{ab5c}', '\u{ab65}'), ('\u{ab70}', - '\u{abea}'), ('\u{ac00}', '\u{d7a3}'), ('\u{d7b0}', '\u{d7c6}'), ('\u{d7cb}', '\u{d7fb}'), - ('\u{f900}', '\u{fa6d}'), ('\u{fa70}', '\u{fad9}'), ('\u{fb00}', '\u{fb06}'), ('\u{fb13}', - '\u{fb17}'), ('\u{fb1d}', '\u{fb28}'), ('\u{fb2a}', '\u{fb36}'), ('\u{fb38}', '\u{fb3c}'), - ('\u{fb3e}', '\u{fb3e}'), ('\u{fb40}', '\u{fb41}'), ('\u{fb43}', '\u{fb44}'), ('\u{fb46}', - '\u{fbb1}'), ('\u{fbd3}', '\u{fd3d}'), ('\u{fd50}', '\u{fd8f}'), ('\u{fd92}', '\u{fdc7}'), - ('\u{fdf0}', '\u{fdfb}'), ('\u{fe70}', '\u{fe74}'), ('\u{fe76}', '\u{fefc}'), ('\u{ff21}', - '\u{ff3a}'), ('\u{ff41}', '\u{ff5a}'), ('\u{ff66}', '\u{ffbe}'), ('\u{ffc2}', '\u{ffc7}'), - ('\u{ffca}', '\u{ffcf}'), ('\u{ffd2}', '\u{ffd7}'), ('\u{ffda}', '\u{ffdc}'), ('\u{10000}', - '\u{1000b}'), ('\u{1000d}', '\u{10026}'), ('\u{10028}', '\u{1003a}'), ('\u{1003c}', - '\u{1003d}'), ('\u{1003f}', '\u{1004d}'), ('\u{10050}', '\u{1005d}'), ('\u{10080}', - '\u{100fa}'), ('\u{10140}', '\u{10174}'), ('\u{10280}', '\u{1029c}'), ('\u{102a0}', - '\u{102d0}'), ('\u{10300}', '\u{1031f}'), ('\u{10330}', '\u{1034a}'), ('\u{10350}', - '\u{1037a}'), ('\u{10380}', '\u{1039d}'), ('\u{103a0}', '\u{103c3}'), ('\u{103c8}', - '\u{103cf}'), ('\u{103d1}', '\u{103d5}'), ('\u{10400}', '\u{1049d}'), ('\u{10500}', - '\u{10527}'), ('\u{10530}', '\u{10563}'), ('\u{10600}', '\u{10736}'), ('\u{10740}', - '\u{10755}'), ('\u{10760}', '\u{10767}'), ('\u{10800}', '\u{10805}'), ('\u{10808}', - '\u{10808}'), ('\u{1080a}', '\u{10835}'), ('\u{10837}', '\u{10838}'), ('\u{1083c}', - '\u{1083c}'), ('\u{1083f}', '\u{10855}'), ('\u{10860}', '\u{10876}'), ('\u{10880}', - '\u{1089e}'), ('\u{108e0}', '\u{108f2}'), ('\u{108f4}', '\u{108f5}'), ('\u{10900}', - '\u{10915}'), ('\u{10920}', '\u{10939}'), ('\u{10980}', '\u{109b7}'), ('\u{109be}', - '\u{109bf}'), ('\u{10a00}', '\u{10a03}'), ('\u{10a05}', '\u{10a06}'), ('\u{10a0c}', - '\u{10a13}'), ('\u{10a15}', '\u{10a17}'), ('\u{10a19}', '\u{10a33}'), ('\u{10a60}', - '\u{10a7c}'), ('\u{10a80}', '\u{10a9c}'), ('\u{10ac0}', '\u{10ac7}'), ('\u{10ac9}', - '\u{10ae4}'), ('\u{10b00}', '\u{10b35}'), ('\u{10b40}', '\u{10b55}'), ('\u{10b60}', - '\u{10b72}'), ('\u{10b80}', '\u{10b91}'), ('\u{10c00}', '\u{10c48}'), ('\u{10c80}', - '\u{10cb2}'), ('\u{10cc0}', '\u{10cf2}'), ('\u{11000}', '\u{11045}'), ('\u{11082}', - '\u{110b8}'), ('\u{110d0}', '\u{110e8}'), ('\u{11100}', '\u{11132}'), ('\u{11150}', - '\u{11172}'), ('\u{11176}', '\u{11176}'), ('\u{11180}', '\u{111bf}'), ('\u{111c1}', - '\u{111c4}'), ('\u{111da}', '\u{111da}'), ('\u{111dc}', '\u{111dc}'), ('\u{11200}', - '\u{11211}'), ('\u{11213}', '\u{11234}'), ('\u{11237}', '\u{11237}'), ('\u{11280}', - '\u{11286}'), ('\u{11288}', '\u{11288}'), ('\u{1128a}', '\u{1128d}'), ('\u{1128f}', - '\u{1129d}'), ('\u{1129f}', '\u{112a8}'), ('\u{112b0}', '\u{112e8}'), ('\u{11300}', - '\u{11303}'), ('\u{11305}', '\u{1130c}'), ('\u{1130f}', '\u{11310}'), ('\u{11313}', - '\u{11328}'), ('\u{1132a}', '\u{11330}'), ('\u{11332}', '\u{11333}'), ('\u{11335}', - '\u{11339}'), ('\u{1133d}', '\u{11344}'), ('\u{11347}', '\u{11348}'), ('\u{1134b}', - '\u{1134c}'), ('\u{11350}', '\u{11350}'), ('\u{11357}', '\u{11357}'), ('\u{1135d}', - '\u{11363}'), ('\u{11480}', '\u{114c1}'), ('\u{114c4}', '\u{114c5}'), ('\u{114c7}', - '\u{114c7}'), ('\u{11580}', '\u{115b5}'), ('\u{115b8}', '\u{115be}'), ('\u{115d8}', - '\u{115dd}'), ('\u{11600}', '\u{1163e}'), ('\u{11640}', '\u{11640}'), ('\u{11644}', - '\u{11644}'), ('\u{11680}', '\u{116b5}'), ('\u{11700}', '\u{11719}'), ('\u{1171d}', - '\u{1172a}'), ('\u{118a0}', '\u{118df}'), ('\u{118ff}', '\u{118ff}'), ('\u{11ac0}', - '\u{11af8}'), ('\u{12000}', '\u{12399}'), ('\u{12400}', '\u{1246e}'), ('\u{12480}', - '\u{12543}'), ('\u{13000}', '\u{1342e}'), ('\u{14400}', '\u{14646}'), ('\u{16800}', - '\u{16a38}'), ('\u{16a40}', '\u{16a5e}'), ('\u{16ad0}', '\u{16aed}'), ('\u{16b00}', - '\u{16b36}'), ('\u{16b40}', '\u{16b43}'), ('\u{16b63}', '\u{16b77}'), ('\u{16b7d}', - '\u{16b8f}'), ('\u{16f00}', '\u{16f44}'), ('\u{16f50}', '\u{16f7e}'), ('\u{16f93}', - '\u{16f9f}'), ('\u{1b000}', '\u{1b001}'), ('\u{1bc00}', '\u{1bc6a}'), ('\u{1bc70}', - '\u{1bc7c}'), ('\u{1bc80}', '\u{1bc88}'), ('\u{1bc90}', '\u{1bc99}'), ('\u{1bc9e}', - '\u{1bc9e}'), ('\u{1d400}', '\u{1d454}'), ('\u{1d456}', '\u{1d49c}'), ('\u{1d49e}', - '\u{1d49f}'), ('\u{1d4a2}', '\u{1d4a2}'), ('\u{1d4a5}', '\u{1d4a6}'), ('\u{1d4a9}', - '\u{1d4ac}'), ('\u{1d4ae}', '\u{1d4b9}'), ('\u{1d4bb}', '\u{1d4bb}'), ('\u{1d4bd}', - '\u{1d4c3}'), ('\u{1d4c5}', '\u{1d505}'), ('\u{1d507}', '\u{1d50a}'), ('\u{1d50d}', - '\u{1d514}'), ('\u{1d516}', '\u{1d51c}'), ('\u{1d51e}', '\u{1d539}'), ('\u{1d53b}', - '\u{1d53e}'), ('\u{1d540}', '\u{1d544}'), ('\u{1d546}', '\u{1d546}'), ('\u{1d54a}', - '\u{1d550}'), ('\u{1d552}', '\u{1d6a5}'), ('\u{1d6a8}', '\u{1d6c0}'), ('\u{1d6c2}', - '\u{1d6da}'), ('\u{1d6dc}', '\u{1d6fa}'), ('\u{1d6fc}', '\u{1d714}'), ('\u{1d716}', - '\u{1d734}'), ('\u{1d736}', '\u{1d74e}'), ('\u{1d750}', '\u{1d76e}'), ('\u{1d770}', - '\u{1d788}'), ('\u{1d78a}', '\u{1d7a8}'), ('\u{1d7aa}', '\u{1d7c2}'), ('\u{1d7c4}', - '\u{1d7cb}'), ('\u{1e800}', '\u{1e8c4}'), ('\u{1ee00}', '\u{1ee03}'), ('\u{1ee05}', - '\u{1ee1f}'), ('\u{1ee21}', '\u{1ee22}'), ('\u{1ee24}', '\u{1ee24}'), ('\u{1ee27}', - '\u{1ee27}'), ('\u{1ee29}', '\u{1ee32}'), ('\u{1ee34}', '\u{1ee37}'), ('\u{1ee39}', - '\u{1ee39}'), ('\u{1ee3b}', '\u{1ee3b}'), ('\u{1ee42}', '\u{1ee42}'), ('\u{1ee47}', - '\u{1ee47}'), ('\u{1ee49}', '\u{1ee49}'), ('\u{1ee4b}', '\u{1ee4b}'), ('\u{1ee4d}', - '\u{1ee4f}'), ('\u{1ee51}', '\u{1ee52}'), ('\u{1ee54}', '\u{1ee54}'), ('\u{1ee57}', - '\u{1ee57}'), ('\u{1ee59}', '\u{1ee59}'), ('\u{1ee5b}', '\u{1ee5b}'), ('\u{1ee5d}', - '\u{1ee5d}'), ('\u{1ee5f}', '\u{1ee5f}'), ('\u{1ee61}', '\u{1ee62}'), ('\u{1ee64}', - '\u{1ee64}'), ('\u{1ee67}', '\u{1ee6a}'), ('\u{1ee6c}', '\u{1ee72}'), ('\u{1ee74}', - '\u{1ee77}'), ('\u{1ee79}', '\u{1ee7c}'), ('\u{1ee7e}', '\u{1ee7e}'), ('\u{1ee80}', - '\u{1ee89}'), ('\u{1ee8b}', '\u{1ee9b}'), ('\u{1eea1}', '\u{1eea3}'), ('\u{1eea5}', - '\u{1eea9}'), ('\u{1eeab}', '\u{1eebb}'), ('\u{1f130}', '\u{1f149}'), ('\u{1f150}', - '\u{1f169}'), ('\u{1f170}', '\u{1f189}'), ('\u{20000}', '\u{2a6d6}'), ('\u{2a700}', - '\u{2b734}'), ('\u{2b740}', '\u{2b81d}'), ('\u{2b820}', '\u{2cea1}'), ('\u{2f800}', - '\u{2fa1d}') - ]; + pub const Alphabetic_table: &'static super::BoolTrie = &super::BoolTrie { + r1: [ + 0x0000000000000000, 0x07fffffe07fffffe, 0x0420040000000000, 0xff7fffffff7fffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x0000501f0003ffc3, + 0x0000000000000000, 0xbcdf000000000020, 0xfffffffbffffd740, 0xffbfffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xfffffffffffffc03, 0xffffffffffffffff, + 0xfffeffffffffffff, 0xfffffffe027fffff, 0xbfff0000000000ff, 0x000707ffffff00b6, + 0xffffffff07ff0000, 0xffffc000feffffff, 0xffffffffffffffff, 0x9c00e1fe1fefffff, + 0xffffffffffff0000, 0xffffffffffffe000, 0x0003ffffffffffff, 0x043007fffffffc00 + ], + r2: [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 36, 36, 36, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 36, 36, 36, 36, 36, 36, 36, 36, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 31, 63, 64, 65, 66, 55, 67, 68, 69, 36, 36, 36, 70, 36, 36, + 36, 36, 71, 72, 73, 74, 31, 75, 76, 31, 77, 78, 68, 31, 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 79, 80, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 81, 82, 36, 83, 84, 85, 86, 87, 88, 31, 31, 31, + 31, 31, 31, 31, 89, 44, 90, 91, 92, 36, 93, 94, 31, 31, 31, 31, 31, 31, 31, 31, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 55, 31, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 95, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 96, 97, 36, 36, 36, 36, 98, 99, 36, 100, 101, 36, 102, + 103, 104, 105, 36, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 36, 117, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 118, 119, + 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, + 36, 36, 36, 36, 36, 120, 36, 121, 122, 123, 124, 125, 36, 36, 36, 36, 126, 127, 128, + 129, 31, 130, 36, 131, 132, 133, 113, 134 + ], + r3: &[ + 0x00001ffffcffffff, 0x0000000001ffffff, 0x3fdfffff00000000, 0xffff03f8fff00000, + 0xefffffffffffffff, 0xfffe000fffe1dfff, 0xe3c5fdfffff99fef, 0x0003000fb080599f, + 0xc36dfdfffff987ee, 0x003f00005e021987, 0xe3edfdfffffbbfee, 0x0200000f00011bbf, + 0xe3edfdfffff99fee, 0x0002000fb0c0199f, 0xc3ffc718d63dc7ec, 0x0000000000811dc7, + 0xe3fffdfffffddfef, 0x0000000f07601ddf, 0xe3effdfffffddfef, 0x0006000f40601ddf, + 0xe7fffffffffddfee, 0xfc00000f80f05ddf, 0x2ffbfffffc7fffec, 0x000c0000ff5f807f, + 0x07fffffffffffffe, 0x000000000000207f, 0x3bffecaefef02596, 0x00000000f000205f, + 0x0000000000000001, 0xfffe1ffffffffeff, 0x1ffffffffeffff03, 0x0000000000000000, + 0xf97fffffffffffff, 0xffffc1e7ffff0000, 0xffffffff3000407f, 0xf7ffffffffff20bf, + 0xffffffffffffffff, 0xffffffff3d7f3dff, 0x7f3dffffffff3dff, 0xffffffffff7fff3d, + 0xffffffffff3dffff, 0x0000000087ffffff, 0xffffffff0000ffff, 0x3f3fffffffffffff, + 0xfffffffffffffffe, 0xffff9fffffffffff, 0xffffffff07fffffe, 0x01ffc7ffffffffff, + 0x000fffff000fdfff, 0x000ddfff000fffff, 0xffcfffffffffffff, 0x00000000108001ff, + 0xffffffff00000000, 0x00ffffffffffffff, 0xffff07ffffffffff, 0x003fffffffffffff, + 0x01ff0fff7fffffff, 0x001f3fffffff0000, 0xffff0fffffffffff, 0x00000000000003ff, + 0xffffffff0fffffff, 0x001ffffe7fffffff, 0x0000008000000000, 0xffefffffffffffff, + 0x0000000000000fef, 0xfc00f3ffffffffff, 0x0003ffbfffffffff, 0x3ffffffffc00e000, + 0x00000000000001ff, 0x006fde0000000000, 0x001fff8000000000, 0xffffffff3f3fffff, + 0x3fffffffaaff3f3f, 0x5fdfffffffffffff, 0x1fdc1fff0fcf1fdc, 0x8002000000000000, + 0x000000001fff0000, 0xf3ffbd503e2ffc84, 0xffffffff000043e0, 0xffc0000000000000, + 0x000003ffffffffff, 0xffff7fffffffffff, 0xffffffff7fffffff, 0x000c781fffffffff, + 0xffff20bfffffffff, 0x000080ffffffffff, 0x7f7f7f7f007fffff, 0xffffffff7f7f7f7f, + 0x0000800000000000, 0x1f3e03fe000000e0, 0xfffffffee07fffff, 0xf7ffffffffffffff, + 0xfffe3fffffffffe0, 0x07ffffff00007fff, 0xffff000000000000, 0x00000000003fffff, + 0x0000000000001fff, 0x3fffffffffff0000, 0x00000c00ffff1fff, 0x8ff07fffffffffff, + 0x0000ffffffffffff, 0xfffffffcff800000, 0x00ff7ffffffff9ff, 0xff80000000000000, + 0x000000fffffff7bb, 0x000fffffffffffff, 0x28fc00000000002f, 0xffff07fffffffc00, + 0x1fffffff0007ffff, 0xfff7ffffffffffff, 0x7c00ffdf00008000, 0x007fffffffffffff, + 0xc47fffff00003fff, 0x7fffffffffffffff, 0x003cffff38000005, 0xffff7f7f007e7e7e, + 0xffff003ff7ffffff, 0x000007ffffffffff, 0xffff000fffffffff, 0x0ffffffffffff87f, + 0xffff3fffffffffff, 0x0000000003ffffff, 0x5f7ffdffe0f8007f, 0xffffffffffffffdb, + 0x0003ffffffffffff, 0xfffffffffff80000, 0x3fffffffffffffff, 0xffffffffffff0000, + 0xfffffffffffcffff, 0x0fff0000000000ff, 0xffdf000000000000, 0x1fffffffffffffff, + 0x07fffffe00000000, 0xffffffc007fffffe, 0x000000001cfcfcfc + ], + r4: [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 5, 5, 9, 5, 10, 11, 12, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 13, 14, + 15, 5, 5, 16, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 + ], + r5: &[ + 0, 1, 2, 3, 4, 5, 4, 4, 4, 4, 6, 7, 8, 9, 10, 11, 2, 2, 12, 13, 14, 15, 4, 4, 2, 2, 2, + 2, 16, 17, 4, 4, 18, 19, 20, 21, 22, 4, 23, 4, 24, 25, 26, 27, 28, 29, 30, 4, 2, 31, 32, + 32, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 33, 34, 35, 32, 36, 2, 37, 38, 4, 39, 40, 41, + 42, 4, 4, 2, 43, 2, 44, 4, 4, 45, 46, 47, 48, 28, 4, 49, 4, 4, 4, 4, 4, 50, 51, 4, 4, 4, + 4, 4, 4, 4, 52, 4, 4, 4, 4, 53, 54, 55, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 56, 4, 2, 57, 2, 2, 2, 58, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 57, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 59, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, + 2, 2, 2, 2, 52, 20, 4, 60, 16, 61, 62, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 63, 64, + 65, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 66, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 32, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 67, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 2, 68, 69, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 2, 70, 71, 72, 73, 74, 2, 2, 2, 2, 75, 76, 77, 78, 79, 80, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 81, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 2, 2, 2, 82, 2, 83, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 84, 85, + 86, 4, 4, 4, 4, 4, 4, 4, 4, 4, 87, 88, 89, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 90, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 5, 2, 2, 2, 10, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 91, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 92, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4 + ], + r6: &[ + 0xb7ffff7fffffefff, 0x000000003fff3fff, 0xffffffffffffffff, 0x07ffffffffffffff, + 0x0000000000000000, 0x001fffffffffffff, 0xffffffff1fffffff, 0x000000000001ffff, + 0xffff0000ffffffff, 0x07ffffffffff07ff, 0xffffffff3fffffff, 0x00000000003eff0f, + 0xffff00003fffffff, 0x0fffffffff0fffff, 0xffff00ffffffffff, 0x0000000fffffffff, + 0x007fffffffffffff, 0x000000ff003fffff, 0x91bffffffffffd3f, 0x007fffff003fffff, + 0x000000007fffffff, 0x0037ffff00000000, 0x03ffffff003fffff, 0xc0ffffffffffffff, + 0x000ffffffeeff06f, 0x1fffffff00000000, 0x000000001fffffff, 0x0000001ffffffeff, + 0x003fffffffffffff, 0x0007ffff003fffff, 0x000000000003ffff, 0x00000000000001ff, + 0x0007ffffffffffff, 0x000000000000003f, 0x01fffffffffffffc, 0x000001ffffff0000, + 0x0047ffffffff0000, 0x000000001400001e, 0x409ffffffffbffff, 0xffff01ffbfffbd7f, + 0x000001ffffffffff, 0xe3edfdfffff99fef, 0x0000000fe081199f, 0x00000000000007bb, + 0x00000000000000b3, 0x7f3fffffffffffff, 0x000000003f000000, 0x7fffffffffffffff, + 0x0000000000000011, 0x000007ffe3ffffff, 0xffffffff00000000, 0x80000000ffffffff, + 0x01ffffffffffffff, 0x7f7ffffffffffdff, 0xfffc000000000001, 0x007ffefffffcffff, + 0x0000000003ffffff, 0x00007fffffffffff, 0x000000000000000f, 0x000000000000007f, + 0x00003fffffff0000, 0xe0fffff80000000f, 0x000000000000ffff, 0x7fffffffffff001f, + 0x00000000fff80000, 0x0000000100000000, 0x00001fffffffffff, 0x0000000000000003, + 0x1fff07ffffffffff, 0x0000000043ff01ff, 0xffffffffffdfffff, 0xebffde64dfffffff, + 0xffffffffffffffef, 0x7bffffffdfdfe7bf, 0xfffffffffffdfc5f, 0xffffff3fffffffff, + 0xf7fffffff7fffffd, 0xffdfffffffdfffff, 0xffff7fffffff7fff, 0xfffffdfffffffdff, + 0x0000000000000ff7, 0x000007dbf9ffff7f, 0x000000000000001f, 0x000000000000008f, + 0x0af7fe96ffffffef, 0x5ef7f796aa96ea84, 0x0ffffbee0ffffbff, 0xffff000000000000, + 0xffff03ffffff03ff, 0x00000000000003ff, 0x00000000007fffff, 0x00000003ffffffff, + 0x000000003fffffff + ], + }; pub fn Alphabetic(c: char) -> bool { - super::bsearch_range_table(c, Alphabetic_table) + super::trie_lookup_range_table(c, Alphabetic_table) } - pub const Case_Ignorable_table: &'static [(char, char)] = &[ - ('\u{27}', '\u{27}'), ('\u{2e}', '\u{2e}'), ('\u{3a}', '\u{3a}'), ('\u{5e}', '\u{5e}'), - ('\u{60}', '\u{60}'), ('\u{a8}', '\u{a8}'), ('\u{ad}', '\u{ad}'), ('\u{af}', '\u{af}'), - ('\u{b4}', '\u{b4}'), ('\u{b7}', '\u{b8}'), ('\u{2b0}', '\u{36f}'), ('\u{374}', '\u{375}'), - ('\u{37a}', '\u{37a}'), ('\u{384}', '\u{385}'), ('\u{387}', '\u{387}'), ('\u{483}', - '\u{489}'), ('\u{559}', '\u{559}'), ('\u{591}', '\u{5bd}'), ('\u{5bf}', '\u{5bf}'), - ('\u{5c1}', '\u{5c2}'), ('\u{5c4}', '\u{5c5}'), ('\u{5c7}', '\u{5c7}'), ('\u{5f4}', - '\u{5f4}'), ('\u{600}', '\u{605}'), ('\u{610}', '\u{61a}'), ('\u{61c}', '\u{61c}'), - ('\u{640}', '\u{640}'), ('\u{64b}', '\u{65f}'), ('\u{670}', '\u{670}'), ('\u{6d6}', - '\u{6dd}'), ('\u{6df}', '\u{6e8}'), ('\u{6ea}', '\u{6ed}'), ('\u{70f}', '\u{70f}'), - ('\u{711}', '\u{711}'), ('\u{730}', '\u{74a}'), ('\u{7a6}', '\u{7b0}'), ('\u{7eb}', - '\u{7f5}'), ('\u{7fa}', '\u{7fa}'), ('\u{816}', '\u{82d}'), ('\u{859}', '\u{85b}'), - ('\u{8e3}', '\u{902}'), ('\u{93a}', '\u{93a}'), ('\u{93c}', '\u{93c}'), ('\u{941}', - '\u{948}'), ('\u{94d}', '\u{94d}'), ('\u{951}', '\u{957}'), ('\u{962}', '\u{963}'), - ('\u{971}', '\u{971}'), ('\u{981}', '\u{981}'), ('\u{9bc}', '\u{9bc}'), ('\u{9c1}', - '\u{9c4}'), ('\u{9cd}', '\u{9cd}'), ('\u{9e2}', '\u{9e3}'), ('\u{a01}', '\u{a02}'), - ('\u{a3c}', '\u{a3c}'), ('\u{a41}', '\u{a42}'), ('\u{a47}', '\u{a48}'), ('\u{a4b}', - '\u{a4d}'), ('\u{a51}', '\u{a51}'), ('\u{a70}', '\u{a71}'), ('\u{a75}', '\u{a75}'), - ('\u{a81}', '\u{a82}'), ('\u{abc}', '\u{abc}'), ('\u{ac1}', '\u{ac5}'), ('\u{ac7}', - '\u{ac8}'), ('\u{acd}', '\u{acd}'), ('\u{ae2}', '\u{ae3}'), ('\u{b01}', '\u{b01}'), - ('\u{b3c}', '\u{b3c}'), ('\u{b3f}', '\u{b3f}'), ('\u{b41}', '\u{b44}'), ('\u{b4d}', - '\u{b4d}'), ('\u{b56}', '\u{b56}'), ('\u{b62}', '\u{b63}'), ('\u{b82}', '\u{b82}'), - ('\u{bc0}', '\u{bc0}'), ('\u{bcd}', '\u{bcd}'), ('\u{c00}', '\u{c00}'), ('\u{c3e}', - '\u{c40}'), ('\u{c46}', '\u{c48}'), ('\u{c4a}', '\u{c4d}'), ('\u{c55}', '\u{c56}'), - ('\u{c62}', '\u{c63}'), ('\u{c81}', '\u{c81}'), ('\u{cbc}', '\u{cbc}'), ('\u{cbf}', - '\u{cbf}'), ('\u{cc6}', '\u{cc6}'), ('\u{ccc}', '\u{ccd}'), ('\u{ce2}', '\u{ce3}'), - ('\u{d01}', '\u{d01}'), ('\u{d41}', '\u{d44}'), ('\u{d4d}', '\u{d4d}'), ('\u{d62}', - '\u{d63}'), ('\u{dca}', '\u{dca}'), ('\u{dd2}', '\u{dd4}'), ('\u{dd6}', '\u{dd6}'), - ('\u{e31}', '\u{e31}'), ('\u{e34}', '\u{e3a}'), ('\u{e46}', '\u{e4e}'), ('\u{eb1}', - '\u{eb1}'), ('\u{eb4}', '\u{eb9}'), ('\u{ebb}', '\u{ebc}'), ('\u{ec6}', '\u{ec6}'), - ('\u{ec8}', '\u{ecd}'), ('\u{f18}', '\u{f19}'), ('\u{f35}', '\u{f35}'), ('\u{f37}', - '\u{f37}'), ('\u{f39}', '\u{f39}'), ('\u{f71}', '\u{f7e}'), ('\u{f80}', '\u{f84}'), - ('\u{f86}', '\u{f87}'), ('\u{f8d}', '\u{f97}'), ('\u{f99}', '\u{fbc}'), ('\u{fc6}', - '\u{fc6}'), ('\u{102d}', '\u{1030}'), ('\u{1032}', '\u{1037}'), ('\u{1039}', '\u{103a}'), - ('\u{103d}', '\u{103e}'), ('\u{1058}', '\u{1059}'), ('\u{105e}', '\u{1060}'), ('\u{1071}', - '\u{1074}'), ('\u{1082}', '\u{1082}'), ('\u{1085}', '\u{1086}'), ('\u{108d}', '\u{108d}'), - ('\u{109d}', '\u{109d}'), ('\u{10fc}', '\u{10fc}'), ('\u{135d}', '\u{135f}'), ('\u{1712}', - '\u{1714}'), ('\u{1732}', '\u{1734}'), ('\u{1752}', '\u{1753}'), ('\u{1772}', '\u{1773}'), - ('\u{17b4}', '\u{17b5}'), ('\u{17b7}', '\u{17bd}'), ('\u{17c6}', '\u{17c6}'), ('\u{17c9}', - '\u{17d3}'), ('\u{17d7}', '\u{17d7}'), ('\u{17dd}', '\u{17dd}'), ('\u{180b}', '\u{180e}'), - ('\u{1843}', '\u{1843}'), ('\u{18a9}', '\u{18a9}'), ('\u{1920}', '\u{1922}'), ('\u{1927}', - '\u{1928}'), ('\u{1932}', '\u{1932}'), ('\u{1939}', '\u{193b}'), ('\u{1a17}', '\u{1a18}'), - ('\u{1a1b}', '\u{1a1b}'), ('\u{1a56}', '\u{1a56}'), ('\u{1a58}', '\u{1a5e}'), ('\u{1a60}', - '\u{1a60}'), ('\u{1a62}', '\u{1a62}'), ('\u{1a65}', '\u{1a6c}'), ('\u{1a73}', '\u{1a7c}'), - ('\u{1a7f}', '\u{1a7f}'), ('\u{1aa7}', '\u{1aa7}'), ('\u{1ab0}', '\u{1abe}'), ('\u{1b00}', - '\u{1b03}'), ('\u{1b34}', '\u{1b34}'), ('\u{1b36}', '\u{1b3a}'), ('\u{1b3c}', '\u{1b3c}'), - ('\u{1b42}', '\u{1b42}'), ('\u{1b6b}', '\u{1b73}'), ('\u{1b80}', '\u{1b81}'), ('\u{1ba2}', - '\u{1ba5}'), ('\u{1ba8}', '\u{1ba9}'), ('\u{1bab}', '\u{1bad}'), ('\u{1be6}', '\u{1be6}'), - ('\u{1be8}', '\u{1be9}'), ('\u{1bed}', '\u{1bed}'), ('\u{1bef}', '\u{1bf1}'), ('\u{1c2c}', - '\u{1c33}'), ('\u{1c36}', '\u{1c37}'), ('\u{1c78}', '\u{1c7d}'), ('\u{1cd0}', '\u{1cd2}'), - ('\u{1cd4}', '\u{1ce0}'), ('\u{1ce2}', '\u{1ce8}'), ('\u{1ced}', '\u{1ced}'), ('\u{1cf4}', - '\u{1cf4}'), ('\u{1cf8}', '\u{1cf9}'), ('\u{1d2c}', '\u{1d6a}'), ('\u{1d78}', '\u{1d78}'), - ('\u{1d9b}', '\u{1df5}'), ('\u{1dfc}', '\u{1dff}'), ('\u{1fbd}', '\u{1fbd}'), ('\u{1fbf}', - '\u{1fc1}'), ('\u{1fcd}', '\u{1fcf}'), ('\u{1fdd}', '\u{1fdf}'), ('\u{1fed}', '\u{1fef}'), - ('\u{1ffd}', '\u{1ffe}'), ('\u{200b}', '\u{200f}'), ('\u{2018}', '\u{2019}'), ('\u{2024}', - '\u{2024}'), ('\u{2027}', '\u{2027}'), ('\u{202a}', '\u{202e}'), ('\u{2060}', '\u{2064}'), - ('\u{2066}', '\u{206f}'), ('\u{2071}', '\u{2071}'), ('\u{207f}', '\u{207f}'), ('\u{2090}', - '\u{209c}'), ('\u{20d0}', '\u{20f0}'), ('\u{2c7c}', '\u{2c7d}'), ('\u{2cef}', '\u{2cf1}'), - ('\u{2d6f}', '\u{2d6f}'), ('\u{2d7f}', '\u{2d7f}'), ('\u{2de0}', '\u{2dff}'), ('\u{2e2f}', - '\u{2e2f}'), ('\u{3005}', '\u{3005}'), ('\u{302a}', '\u{302d}'), ('\u{3031}', '\u{3035}'), - ('\u{303b}', '\u{303b}'), ('\u{3099}', '\u{309e}'), ('\u{30fc}', '\u{30fe}'), ('\u{a015}', - '\u{a015}'), ('\u{a4f8}', '\u{a4fd}'), ('\u{a60c}', '\u{a60c}'), ('\u{a66f}', '\u{a672}'), - ('\u{a674}', '\u{a67d}'), ('\u{a67f}', '\u{a67f}'), ('\u{a69c}', '\u{a69f}'), ('\u{a6f0}', - '\u{a6f1}'), ('\u{a700}', '\u{a721}'), ('\u{a770}', '\u{a770}'), ('\u{a788}', '\u{a78a}'), - ('\u{a7f8}', '\u{a7f9}'), ('\u{a802}', '\u{a802}'), ('\u{a806}', '\u{a806}'), ('\u{a80b}', - '\u{a80b}'), ('\u{a825}', '\u{a826}'), ('\u{a8c4}', '\u{a8c4}'), ('\u{a8e0}', '\u{a8f1}'), - ('\u{a926}', '\u{a92d}'), ('\u{a947}', '\u{a951}'), ('\u{a980}', '\u{a982}'), ('\u{a9b3}', - '\u{a9b3}'), ('\u{a9b6}', '\u{a9b9}'), ('\u{a9bc}', '\u{a9bc}'), ('\u{a9cf}', '\u{a9cf}'), - ('\u{a9e5}', '\u{a9e6}'), ('\u{aa29}', '\u{aa2e}'), ('\u{aa31}', '\u{aa32}'), ('\u{aa35}', - '\u{aa36}'), ('\u{aa43}', '\u{aa43}'), ('\u{aa4c}', '\u{aa4c}'), ('\u{aa70}', '\u{aa70}'), - ('\u{aa7c}', '\u{aa7c}'), ('\u{aab0}', '\u{aab0}'), ('\u{aab2}', '\u{aab4}'), ('\u{aab7}', - '\u{aab8}'), ('\u{aabe}', '\u{aabf}'), ('\u{aac1}', '\u{aac1}'), ('\u{aadd}', '\u{aadd}'), - ('\u{aaec}', '\u{aaed}'), ('\u{aaf3}', '\u{aaf4}'), ('\u{aaf6}', '\u{aaf6}'), ('\u{ab5b}', - '\u{ab5f}'), ('\u{abe5}', '\u{abe5}'), ('\u{abe8}', '\u{abe8}'), ('\u{abed}', '\u{abed}'), - ('\u{fb1e}', '\u{fb1e}'), ('\u{fbb2}', '\u{fbc1}'), ('\u{fe00}', '\u{fe0f}'), ('\u{fe13}', - '\u{fe13}'), ('\u{fe20}', '\u{fe2f}'), ('\u{fe52}', '\u{fe52}'), ('\u{fe55}', '\u{fe55}'), - ('\u{feff}', '\u{feff}'), ('\u{ff07}', '\u{ff07}'), ('\u{ff0e}', '\u{ff0e}'), ('\u{ff1a}', - '\u{ff1a}'), ('\u{ff3e}', '\u{ff3e}'), ('\u{ff40}', '\u{ff40}'), ('\u{ff70}', '\u{ff70}'), - ('\u{ff9e}', '\u{ff9f}'), ('\u{ffe3}', '\u{ffe3}'), ('\u{fff9}', '\u{fffb}'), ('\u{101fd}', - '\u{101fd}'), ('\u{102e0}', '\u{102e0}'), ('\u{10376}', '\u{1037a}'), ('\u{10a01}', - '\u{10a03}'), ('\u{10a05}', '\u{10a06}'), ('\u{10a0c}', '\u{10a0f}'), ('\u{10a38}', - '\u{10a3a}'), ('\u{10a3f}', '\u{10a3f}'), ('\u{10ae5}', '\u{10ae6}'), ('\u{11001}', - '\u{11001}'), ('\u{11038}', '\u{11046}'), ('\u{1107f}', '\u{11081}'), ('\u{110b3}', - '\u{110b6}'), ('\u{110b9}', '\u{110ba}'), ('\u{110bd}', '\u{110bd}'), ('\u{11100}', - '\u{11102}'), ('\u{11127}', '\u{1112b}'), ('\u{1112d}', '\u{11134}'), ('\u{11173}', - '\u{11173}'), ('\u{11180}', '\u{11181}'), ('\u{111b6}', '\u{111be}'), ('\u{111ca}', - '\u{111cc}'), ('\u{1122f}', '\u{11231}'), ('\u{11234}', '\u{11234}'), ('\u{11236}', - '\u{11237}'), ('\u{112df}', '\u{112df}'), ('\u{112e3}', '\u{112ea}'), ('\u{11300}', - '\u{11301}'), ('\u{1133c}', '\u{1133c}'), ('\u{11340}', '\u{11340}'), ('\u{11366}', - '\u{1136c}'), ('\u{11370}', '\u{11374}'), ('\u{114b3}', '\u{114b8}'), ('\u{114ba}', - '\u{114ba}'), ('\u{114bf}', '\u{114c0}'), ('\u{114c2}', '\u{114c3}'), ('\u{115b2}', - '\u{115b5}'), ('\u{115bc}', '\u{115bd}'), ('\u{115bf}', '\u{115c0}'), ('\u{115dc}', - '\u{115dd}'), ('\u{11633}', '\u{1163a}'), ('\u{1163d}', '\u{1163d}'), ('\u{1163f}', - '\u{11640}'), ('\u{116ab}', '\u{116ab}'), ('\u{116ad}', '\u{116ad}'), ('\u{116b0}', - '\u{116b5}'), ('\u{116b7}', '\u{116b7}'), ('\u{1171d}', '\u{1171f}'), ('\u{11722}', - '\u{11725}'), ('\u{11727}', '\u{1172b}'), ('\u{16af0}', '\u{16af4}'), ('\u{16b30}', - '\u{16b36}'), ('\u{16b40}', '\u{16b43}'), ('\u{16f8f}', '\u{16f9f}'), ('\u{1bc9d}', - '\u{1bc9e}'), ('\u{1bca0}', '\u{1bca3}'), ('\u{1d167}', '\u{1d169}'), ('\u{1d173}', - '\u{1d182}'), ('\u{1d185}', '\u{1d18b}'), ('\u{1d1aa}', '\u{1d1ad}'), ('\u{1d242}', - '\u{1d244}'), ('\u{1da00}', '\u{1da36}'), ('\u{1da3b}', '\u{1da6c}'), ('\u{1da75}', - '\u{1da75}'), ('\u{1da84}', '\u{1da84}'), ('\u{1da9b}', '\u{1da9f}'), ('\u{1daa1}', - '\u{1daaf}'), ('\u{1e8d0}', '\u{1e8d6}'), ('\u{1f3fb}', '\u{1f3ff}'), ('\u{e0001}', - '\u{e0001}'), ('\u{e0020}', '\u{e007f}'), ('\u{e0100}', '\u{e01ef}') - ]; + pub const Case_Ignorable_table: &'static super::BoolTrie = &super::BoolTrie { + r1: [ + 0x0400408000000000, 0x0000000140000000, 0x0190a10000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0xffff000000000000, 0xffffffffffffffff, + 0xffffffffffffffff, 0x0430ffffffffffff, 0x00000000000000b0, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x00000000000003f8, 0x0000000000000000, + 0x0000000000000000, 0x0000000002000000, 0xbffffffffffe0000, 0x00100000000000b6, + 0x0000000017ff003f, 0x00010000fffff801, 0x0000000000000000, 0x00003dffbfc00000, + 0xffff000000028000, 0x00000000000007ff, 0x0001ffc000000000, 0x043ff80000000000 + ], + r2: [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 8, 10, 11, 12, 13, 14, 15, 16, 11, 17, 18, 7, 2, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 2, 2, 2, 2, 2, 2, 2, 2, 2, 32, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 33, 34, 35, 36, 37, 38, 39, 2, 40, 2, 2, 2, 41, 42, 43, 2, + 44, 45, 46, 47, 48, 49, 2, 50, 51, 52, 53, 54, 2, 2, 2, 2, 2, 2, 55, 56, 57, 58, 59, 60, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 61, 2, 62, 2, 63, 2, 64, 65, 2, 2, 2, 2, + 2, 2, 2, 66, 2, 67, 68, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 69, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 49, 2, 2, 2, 2, 70, 71, 72, 73, 74, 75, 76, 77, 78, 2, 2, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 2, 88, 2, 89, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 90, 2, 91, 92, 2, 2, 2, 2, 2, 2, 2, 2, 93, 94, 2, 95, + 96, 97, 98, 99 + ], + r3: &[ + 0x00003fffffc00000, 0x000000000e000000, 0x0000000000000000, 0xfffffffffff00000, + 0x1400000000000007, 0x0002000c00fe21fe, 0x1000000000000002, 0x0000000c0000201e, + 0x1000000000000006, 0x0023000000023986, 0x0000000c000021be, 0x9000000000000002, + 0x0000000c0040201e, 0x0000000000000004, 0x0000000000002001, 0xc000000000000001, + 0x0000000c00603dc1, 0x0000000c00003040, 0x0000000000000002, 0x00000000005c0400, + 0x07f2000000000000, 0x0000000000007fc0, 0x1bf2000000000000, 0x0000000000003f40, + 0x02a0000003000000, 0x7ffe000000000000, 0x1ffffffffeffe0df, 0x0000000000000040, + 0x66fde00000000000, 0x001e0001c3000000, 0x0000000020002064, 0x1000000000000000, + 0x00000000e0000000, 0x001c0000001c0000, 0x000c0000000c0000, 0x3fb0000000000000, + 0x00000000208ffe40, 0x0000000000007800, 0x0000000000000008, 0x0000020000000060, + 0x0e04018700000000, 0x0000000009800000, 0x9ff81fe57f400000, 0x7fff008000000000, + 0x17d000000000000f, 0x000ff80000000004, 0x00003b3c00000003, 0x0003a34000000000, + 0x00cff00000000000, 0x3f00000000000000, 0x031021fdfff70000, 0xfffff00000000000, + 0x010007ffffffffff, 0xfffffffff8000000, 0xf83fffffffffffff, 0xa000000000000000, + 0x6000e000e000e003, 0x00007c900300f800, 0x8002ffdf00000000, 0x000000001fff0000, + 0x0001ffffffff0000, 0x3000000000000000, 0x0003800000000000, 0x8000800000000000, + 0xffffffff00000000, 0x0000800000000000, 0x083e3c0000000020, 0x000000007e000000, + 0x7000000000000000, 0x0000000000200000, 0x0000000000001000, 0xbff7800000000000, + 0x00000000f0000000, 0x0003000000000000, 0x00000003ffffffff, 0x0001000000000000, + 0x0000000000000700, 0x0300000000000000, 0x0000006000000844, 0x0003ffff00000030, + 0x00003fc000000000, 0x000000000003ff80, 0x13c8000000000007, 0x0000006000008000, + 0x00667e0000000000, 0x1001000000001008, 0xc19d000000000000, 0x0058300020000002, + 0x00000000f8000000, 0x0000212000000000, 0x0000000040000000, 0xfffc000000000000, + 0x0000000000000003, 0x0000ffff0008ffff, 0x0000000000240000, 0x8000000000000000, + 0x4000000004004080, 0x0001000000000001, 0x00000000c0000000, 0x0e00000800000000 + ], + r4: [ + 0, 1, 2, 2, 2, 2, 3, 2, 2, 2, 2, 4, 2, 5, 6, 7, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 + ], + r5: &[ + 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 6, 7, 8, 0, 9, 10, 11, 12, 13, 0, 0, 14, 15, 16, 0, 0, 17, 18, 19, 20, + 0, 0, 21, 22, 23, 24, 25, 0, 26, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 27, 0, 28, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 29, 30, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, + 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 33, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 35, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 37, 38, 39, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 41, 0, 42, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 43, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 44, 45, + 0, 0, 45, 45, 45, 46, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0 + ], + r6: &[ + 0x0000000000000000, 0x2000000000000000, 0x0000000100000000, 0x07c0000000000000, + 0x870000000000f06e, 0x0000006000000000, 0xff00000000000002, 0x800000000000007f, + 0x2678000000000003, 0x001fef8000000007, 0x0008000000000000, 0x7fc0000000000003, + 0x0000000000001c00, 0x40d3800000000000, 0x000007f880000000, 0x1000000000000003, + 0x001f1fc000000001, 0xff00000000000000, 0x000000000000005c, 0x85f8000000000000, + 0x000000000000000d, 0xb03c000000000000, 0x0000000030000001, 0xa7f8000000000000, + 0x0000000000000001, 0x00bf280000000000, 0x00000fbce0000000, 0xbf7f000000000000, + 0x006dfcfffffc0000, 0x001f000000000000, 0x007f000000000000, 0x000000000000000f, + 0x00000000ffff8000, 0x0000000f60000000, 0xfff8038000000000, 0x00003c0000000fe7, + 0x000000000000001c, 0xf87fffffffffffff, 0x00201fffffffffff, 0x0000fffef8000010, + 0x000007dbf9ffff7f, 0x00000000007f0000, 0x00000000000007f0, 0xf800000000000000, + 0xffffffff00000002, 0xffffffffffffffff, 0x0000ffffffffffff + ], + }; pub fn Case_Ignorable(c: char) -> bool { - super::bsearch_range_table(c, Case_Ignorable_table) + super::trie_lookup_range_table(c, Case_Ignorable_table) } - pub const Cased_table: &'static [(char, char)] = &[ - ('\u{41}', '\u{5a}'), ('\u{61}', '\u{7a}'), ('\u{aa}', '\u{aa}'), ('\u{b5}', '\u{b5}'), - ('\u{ba}', '\u{ba}'), ('\u{c0}', '\u{d6}'), ('\u{d8}', '\u{f6}'), ('\u{f8}', '\u{1ba}'), - ('\u{1bc}', '\u{1bf}'), ('\u{1c4}', '\u{293}'), ('\u{295}', '\u{2b8}'), ('\u{2c0}', - '\u{2c1}'), ('\u{2e0}', '\u{2e4}'), ('\u{345}', '\u{345}'), ('\u{370}', '\u{373}'), - ('\u{376}', '\u{377}'), ('\u{37a}', '\u{37d}'), ('\u{37f}', '\u{37f}'), ('\u{386}', - '\u{386}'), ('\u{388}', '\u{38a}'), ('\u{38c}', '\u{38c}'), ('\u{38e}', '\u{3a1}'), - ('\u{3a3}', '\u{3f5}'), ('\u{3f7}', '\u{481}'), ('\u{48a}', '\u{52f}'), ('\u{531}', - '\u{556}'), ('\u{561}', '\u{587}'), ('\u{10a0}', '\u{10c5}'), ('\u{10c7}', '\u{10c7}'), - ('\u{10cd}', '\u{10cd}'), ('\u{13a0}', '\u{13f5}'), ('\u{13f8}', '\u{13fd}'), ('\u{1d00}', - '\u{1dbf}'), ('\u{1e00}', '\u{1f15}'), ('\u{1f18}', '\u{1f1d}'), ('\u{1f20}', '\u{1f45}'), - ('\u{1f48}', '\u{1f4d}'), ('\u{1f50}', '\u{1f57}'), ('\u{1f59}', '\u{1f59}'), ('\u{1f5b}', - '\u{1f5b}'), ('\u{1f5d}', '\u{1f5d}'), ('\u{1f5f}', '\u{1f7d}'), ('\u{1f80}', '\u{1fb4}'), - ('\u{1fb6}', '\u{1fbc}'), ('\u{1fbe}', '\u{1fbe}'), ('\u{1fc2}', '\u{1fc4}'), ('\u{1fc6}', - '\u{1fcc}'), ('\u{1fd0}', '\u{1fd3}'), ('\u{1fd6}', '\u{1fdb}'), ('\u{1fe0}', '\u{1fec}'), - ('\u{1ff2}', '\u{1ff4}'), ('\u{1ff6}', '\u{1ffc}'), ('\u{2071}', '\u{2071}'), ('\u{207f}', - '\u{207f}'), ('\u{2090}', '\u{209c}'), ('\u{2102}', '\u{2102}'), ('\u{2107}', '\u{2107}'), - ('\u{210a}', '\u{2113}'), ('\u{2115}', '\u{2115}'), ('\u{2119}', '\u{211d}'), ('\u{2124}', - '\u{2124}'), ('\u{2126}', '\u{2126}'), ('\u{2128}', '\u{2128}'), ('\u{212a}', '\u{212d}'), - ('\u{212f}', '\u{2134}'), ('\u{2139}', '\u{2139}'), ('\u{213c}', '\u{213f}'), ('\u{2145}', - '\u{2149}'), ('\u{214e}', '\u{214e}'), ('\u{2160}', '\u{217f}'), ('\u{2183}', '\u{2184}'), - ('\u{24b6}', '\u{24e9}'), ('\u{2c00}', '\u{2c2e}'), ('\u{2c30}', '\u{2c5e}'), ('\u{2c60}', - '\u{2ce4}'), ('\u{2ceb}', '\u{2cee}'), ('\u{2cf2}', '\u{2cf3}'), ('\u{2d00}', '\u{2d25}'), - ('\u{2d27}', '\u{2d27}'), ('\u{2d2d}', '\u{2d2d}'), ('\u{a640}', '\u{a66d}'), ('\u{a680}', - '\u{a69d}'), ('\u{a722}', '\u{a787}'), ('\u{a78b}', '\u{a78e}'), ('\u{a790}', '\u{a7ad}'), - ('\u{a7b0}', '\u{a7b7}'), ('\u{a7f8}', '\u{a7fa}'), ('\u{ab30}', '\u{ab5a}'), ('\u{ab5c}', - '\u{ab65}'), ('\u{ab70}', '\u{abbf}'), ('\u{fb00}', '\u{fb06}'), ('\u{fb13}', '\u{fb17}'), - ('\u{ff21}', '\u{ff3a}'), ('\u{ff41}', '\u{ff5a}'), ('\u{10400}', '\u{1044f}'), - ('\u{10c80}', '\u{10cb2}'), ('\u{10cc0}', '\u{10cf2}'), ('\u{118a0}', '\u{118df}'), - ('\u{1d400}', '\u{1d454}'), ('\u{1d456}', '\u{1d49c}'), ('\u{1d49e}', '\u{1d49f}'), - ('\u{1d4a2}', '\u{1d4a2}'), ('\u{1d4a5}', '\u{1d4a6}'), ('\u{1d4a9}', '\u{1d4ac}'), - ('\u{1d4ae}', '\u{1d4b9}'), ('\u{1d4bb}', '\u{1d4bb}'), ('\u{1d4bd}', '\u{1d4c3}'), - ('\u{1d4c5}', '\u{1d505}'), ('\u{1d507}', '\u{1d50a}'), ('\u{1d50d}', '\u{1d514}'), - ('\u{1d516}', '\u{1d51c}'), ('\u{1d51e}', '\u{1d539}'), ('\u{1d53b}', '\u{1d53e}'), - ('\u{1d540}', '\u{1d544}'), ('\u{1d546}', '\u{1d546}'), ('\u{1d54a}', '\u{1d550}'), - ('\u{1d552}', '\u{1d6a5}'), ('\u{1d6a8}', '\u{1d6c0}'), ('\u{1d6c2}', '\u{1d6da}'), - ('\u{1d6dc}', '\u{1d6fa}'), ('\u{1d6fc}', '\u{1d714}'), ('\u{1d716}', '\u{1d734}'), - ('\u{1d736}', '\u{1d74e}'), ('\u{1d750}', '\u{1d76e}'), ('\u{1d770}', '\u{1d788}'), - ('\u{1d78a}', '\u{1d7a8}'), ('\u{1d7aa}', '\u{1d7c2}'), ('\u{1d7c4}', '\u{1d7cb}'), - ('\u{1f130}', '\u{1f149}'), ('\u{1f150}', '\u{1f169}'), ('\u{1f170}', '\u{1f189}') - ]; + pub const Cased_table: &'static super::BoolTrie = &super::BoolTrie { + r1: [ + 0x0000000000000000, 0x07fffffe07fffffe, 0x0420040000000000, 0xff7fffffff7fffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xf7ffffffffffffff, 0xfffffffffffffff0, + 0xffffffffffffffff, 0xffffffffffffffff, 0x01ffffffffefffff, 0x0000001f00000003, + 0x0000000000000000, 0xbccf000000000020, 0xfffffffbffffd740, 0xffbfffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xfffffffffffffc03, 0xffffffffffffffff, + 0xfffeffffffffffff, 0xfffffffe007fffff, 0x00000000000000ff, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 + ], + r2: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 5, 5, 5, + 0, 5, 5, 5, 5, 6, 7, 8, 9, 0, 10, 11, 0, 12, 13, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 15, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 17, 18, 5, 19, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 21, 22, + 0, 23, 5, 24, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 26, 27, 5, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 29, 30, 0, 0 + ], + r3: &[ + 0x0000000000000000, 0xffffffff00000000, 0x00000000000020bf, 0x3f3fffffffffffff, + 0x00000000000001ff, 0xffffffffffffffff, 0xffffffff3f3fffff, 0x3fffffffaaff3f3f, + 0x5fdfffffffffffff, 0x1fdc1fff0fcf1fdc, 0x8002000000000000, 0x000000001fff0000, + 0xf21fbd503e2ffc84, 0xffffffff000043e0, 0x0000000000000018, 0xffc0000000000000, + 0x000003ffffffffff, 0xffff7fffffffffff, 0xffffffff7fffffff, 0x000c781fffffffff, + 0x000020bfffffffff, 0x00003fffffffffff, 0x000000003fffffff, 0xfffffffc00000000, + 0x00ff7fffffff78ff, 0x0700000000000000, 0xffff000000000000, 0xffff003ff7ffffff, + 0x0000000000f8007f, 0x07fffffe00000000, 0x0000000007fffffe + ], + r4: [ + 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 5, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 + ], + r5: &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 8, 9, 10, 11, 12, 1, 1, 1, 1, 13, 14, 15, 16, 17, 18, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 3, 20, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ], + r6: &[ + 0x0000000000000000, 0xffffffffffffffff, 0x000000000000ffff, 0xffff000000000000, + 0x0fffffffff0fffff, 0x0007ffffffffffff, 0xffffffff00000000, 0x00000000ffffffff, + 0xffffffffffdfffff, 0xebffde64dfffffff, 0xffffffffffffffef, 0x7bffffffdfdfe7bf, + 0xfffffffffffdfc5f, 0xffffff3fffffffff, 0xf7fffffff7fffffd, 0xffdfffffffdfffff, + 0xffff7fffffff7fff, 0xfffffdfffffffdff, 0x0000000000000ff7, 0x000000000000000f, + 0xffff03ffffff03ff, 0x00000000000003ff + ], + }; pub fn Cased(c: char) -> bool { - super::bsearch_range_table(c, Cased_table) + super::trie_lookup_range_table(c, Cased_table) } - pub const Lowercase_table: &'static [(char, char)] = &[ - ('\u{61}', '\u{7a}'), ('\u{aa}', '\u{aa}'), ('\u{b5}', '\u{b5}'), ('\u{ba}', '\u{ba}'), - ('\u{df}', '\u{f6}'), ('\u{f8}', '\u{ff}'), ('\u{101}', '\u{101}'), ('\u{103}', '\u{103}'), - ('\u{105}', '\u{105}'), ('\u{107}', '\u{107}'), ('\u{109}', '\u{109}'), ('\u{10b}', - '\u{10b}'), ('\u{10d}', '\u{10d}'), ('\u{10f}', '\u{10f}'), ('\u{111}', '\u{111}'), - ('\u{113}', '\u{113}'), ('\u{115}', '\u{115}'), ('\u{117}', '\u{117}'), ('\u{119}', - '\u{119}'), ('\u{11b}', '\u{11b}'), ('\u{11d}', '\u{11d}'), ('\u{11f}', '\u{11f}'), - ('\u{121}', '\u{121}'), ('\u{123}', '\u{123}'), ('\u{125}', '\u{125}'), ('\u{127}', - '\u{127}'), ('\u{129}', '\u{129}'), ('\u{12b}', '\u{12b}'), ('\u{12d}', '\u{12d}'), - ('\u{12f}', '\u{12f}'), ('\u{131}', '\u{131}'), ('\u{133}', '\u{133}'), ('\u{135}', - '\u{135}'), ('\u{137}', '\u{138}'), ('\u{13a}', '\u{13a}'), ('\u{13c}', '\u{13c}'), - ('\u{13e}', '\u{13e}'), ('\u{140}', '\u{140}'), ('\u{142}', '\u{142}'), ('\u{144}', - '\u{144}'), ('\u{146}', '\u{146}'), ('\u{148}', '\u{149}'), ('\u{14b}', '\u{14b}'), - ('\u{14d}', '\u{14d}'), ('\u{14f}', '\u{14f}'), ('\u{151}', '\u{151}'), ('\u{153}', - '\u{153}'), ('\u{155}', '\u{155}'), ('\u{157}', '\u{157}'), ('\u{159}', '\u{159}'), - ('\u{15b}', '\u{15b}'), ('\u{15d}', '\u{15d}'), ('\u{15f}', '\u{15f}'), ('\u{161}', - '\u{161}'), ('\u{163}', '\u{163}'), ('\u{165}', '\u{165}'), ('\u{167}', '\u{167}'), - ('\u{169}', '\u{169}'), ('\u{16b}', '\u{16b}'), ('\u{16d}', '\u{16d}'), ('\u{16f}', - '\u{16f}'), ('\u{171}', '\u{171}'), ('\u{173}', '\u{173}'), ('\u{175}', '\u{175}'), - ('\u{177}', '\u{177}'), ('\u{17a}', '\u{17a}'), ('\u{17c}', '\u{17c}'), ('\u{17e}', - '\u{180}'), ('\u{183}', '\u{183}'), ('\u{185}', '\u{185}'), ('\u{188}', '\u{188}'), - ('\u{18c}', '\u{18d}'), ('\u{192}', '\u{192}'), ('\u{195}', '\u{195}'), ('\u{199}', - '\u{19b}'), ('\u{19e}', '\u{19e}'), ('\u{1a1}', '\u{1a1}'), ('\u{1a3}', '\u{1a3}'), - ('\u{1a5}', '\u{1a5}'), ('\u{1a8}', '\u{1a8}'), ('\u{1aa}', '\u{1ab}'), ('\u{1ad}', - '\u{1ad}'), ('\u{1b0}', '\u{1b0}'), ('\u{1b4}', '\u{1b4}'), ('\u{1b6}', '\u{1b6}'), - ('\u{1b9}', '\u{1ba}'), ('\u{1bd}', '\u{1bf}'), ('\u{1c6}', '\u{1c6}'), ('\u{1c9}', - '\u{1c9}'), ('\u{1cc}', '\u{1cc}'), ('\u{1ce}', '\u{1ce}'), ('\u{1d0}', '\u{1d0}'), - ('\u{1d2}', '\u{1d2}'), ('\u{1d4}', '\u{1d4}'), ('\u{1d6}', '\u{1d6}'), ('\u{1d8}', - '\u{1d8}'), ('\u{1da}', '\u{1da}'), ('\u{1dc}', '\u{1dd}'), ('\u{1df}', '\u{1df}'), - ('\u{1e1}', '\u{1e1}'), ('\u{1e3}', '\u{1e3}'), ('\u{1e5}', '\u{1e5}'), ('\u{1e7}', - '\u{1e7}'), ('\u{1e9}', '\u{1e9}'), ('\u{1eb}', '\u{1eb}'), ('\u{1ed}', '\u{1ed}'), - ('\u{1ef}', '\u{1f0}'), ('\u{1f3}', '\u{1f3}'), ('\u{1f5}', '\u{1f5}'), ('\u{1f9}', - '\u{1f9}'), ('\u{1fb}', '\u{1fb}'), ('\u{1fd}', '\u{1fd}'), ('\u{1ff}', '\u{1ff}'), - ('\u{201}', '\u{201}'), ('\u{203}', '\u{203}'), ('\u{205}', '\u{205}'), ('\u{207}', - '\u{207}'), ('\u{209}', '\u{209}'), ('\u{20b}', '\u{20b}'), ('\u{20d}', '\u{20d}'), - ('\u{20f}', '\u{20f}'), ('\u{211}', '\u{211}'), ('\u{213}', '\u{213}'), ('\u{215}', - '\u{215}'), ('\u{217}', '\u{217}'), ('\u{219}', '\u{219}'), ('\u{21b}', '\u{21b}'), - ('\u{21d}', '\u{21d}'), ('\u{21f}', '\u{21f}'), ('\u{221}', '\u{221}'), ('\u{223}', - '\u{223}'), ('\u{225}', '\u{225}'), ('\u{227}', '\u{227}'), ('\u{229}', '\u{229}'), - ('\u{22b}', '\u{22b}'), ('\u{22d}', '\u{22d}'), ('\u{22f}', '\u{22f}'), ('\u{231}', - '\u{231}'), ('\u{233}', '\u{239}'), ('\u{23c}', '\u{23c}'), ('\u{23f}', '\u{240}'), - ('\u{242}', '\u{242}'), ('\u{247}', '\u{247}'), ('\u{249}', '\u{249}'), ('\u{24b}', - '\u{24b}'), ('\u{24d}', '\u{24d}'), ('\u{24f}', '\u{293}'), ('\u{295}', '\u{2b8}'), - ('\u{2c0}', '\u{2c1}'), ('\u{2e0}', '\u{2e4}'), ('\u{345}', '\u{345}'), ('\u{371}', - '\u{371}'), ('\u{373}', '\u{373}'), ('\u{377}', '\u{377}'), ('\u{37a}', '\u{37d}'), - ('\u{390}', '\u{390}'), ('\u{3ac}', '\u{3ce}'), ('\u{3d0}', '\u{3d1}'), ('\u{3d5}', - '\u{3d7}'), ('\u{3d9}', '\u{3d9}'), ('\u{3db}', '\u{3db}'), ('\u{3dd}', '\u{3dd}'), - ('\u{3df}', '\u{3df}'), ('\u{3e1}', '\u{3e1}'), ('\u{3e3}', '\u{3e3}'), ('\u{3e5}', - '\u{3e5}'), ('\u{3e7}', '\u{3e7}'), ('\u{3e9}', '\u{3e9}'), ('\u{3eb}', '\u{3eb}'), - ('\u{3ed}', '\u{3ed}'), ('\u{3ef}', '\u{3f3}'), ('\u{3f5}', '\u{3f5}'), ('\u{3f8}', - '\u{3f8}'), ('\u{3fb}', '\u{3fc}'), ('\u{430}', '\u{45f}'), ('\u{461}', '\u{461}'), - ('\u{463}', '\u{463}'), ('\u{465}', '\u{465}'), ('\u{467}', '\u{467}'), ('\u{469}', - '\u{469}'), ('\u{46b}', '\u{46b}'), ('\u{46d}', '\u{46d}'), ('\u{46f}', '\u{46f}'), - ('\u{471}', '\u{471}'), ('\u{473}', '\u{473}'), ('\u{475}', '\u{475}'), ('\u{477}', - '\u{477}'), ('\u{479}', '\u{479}'), ('\u{47b}', '\u{47b}'), ('\u{47d}', '\u{47d}'), - ('\u{47f}', '\u{47f}'), ('\u{481}', '\u{481}'), ('\u{48b}', '\u{48b}'), ('\u{48d}', - '\u{48d}'), ('\u{48f}', '\u{48f}'), ('\u{491}', '\u{491}'), ('\u{493}', '\u{493}'), - ('\u{495}', '\u{495}'), ('\u{497}', '\u{497}'), ('\u{499}', '\u{499}'), ('\u{49b}', - '\u{49b}'), ('\u{49d}', '\u{49d}'), ('\u{49f}', '\u{49f}'), ('\u{4a1}', '\u{4a1}'), - ('\u{4a3}', '\u{4a3}'), ('\u{4a5}', '\u{4a5}'), ('\u{4a7}', '\u{4a7}'), ('\u{4a9}', - '\u{4a9}'), ('\u{4ab}', '\u{4ab}'), ('\u{4ad}', '\u{4ad}'), ('\u{4af}', '\u{4af}'), - ('\u{4b1}', '\u{4b1}'), ('\u{4b3}', '\u{4b3}'), ('\u{4b5}', '\u{4b5}'), ('\u{4b7}', - '\u{4b7}'), ('\u{4b9}', '\u{4b9}'), ('\u{4bb}', '\u{4bb}'), ('\u{4bd}', '\u{4bd}'), - ('\u{4bf}', '\u{4bf}'), ('\u{4c2}', '\u{4c2}'), ('\u{4c4}', '\u{4c4}'), ('\u{4c6}', - '\u{4c6}'), ('\u{4c8}', '\u{4c8}'), ('\u{4ca}', '\u{4ca}'), ('\u{4cc}', '\u{4cc}'), - ('\u{4ce}', '\u{4cf}'), ('\u{4d1}', '\u{4d1}'), ('\u{4d3}', '\u{4d3}'), ('\u{4d5}', - '\u{4d5}'), ('\u{4d7}', '\u{4d7}'), ('\u{4d9}', '\u{4d9}'), ('\u{4db}', '\u{4db}'), - ('\u{4dd}', '\u{4dd}'), ('\u{4df}', '\u{4df}'), ('\u{4e1}', '\u{4e1}'), ('\u{4e3}', - '\u{4e3}'), ('\u{4e5}', '\u{4e5}'), ('\u{4e7}', '\u{4e7}'), ('\u{4e9}', '\u{4e9}'), - ('\u{4eb}', '\u{4eb}'), ('\u{4ed}', '\u{4ed}'), ('\u{4ef}', '\u{4ef}'), ('\u{4f1}', - '\u{4f1}'), ('\u{4f3}', '\u{4f3}'), ('\u{4f5}', '\u{4f5}'), ('\u{4f7}', '\u{4f7}'), - ('\u{4f9}', '\u{4f9}'), ('\u{4fb}', '\u{4fb}'), ('\u{4fd}', '\u{4fd}'), ('\u{4ff}', - '\u{4ff}'), ('\u{501}', '\u{501}'), ('\u{503}', '\u{503}'), ('\u{505}', '\u{505}'), - ('\u{507}', '\u{507}'), ('\u{509}', '\u{509}'), ('\u{50b}', '\u{50b}'), ('\u{50d}', - '\u{50d}'), ('\u{50f}', '\u{50f}'), ('\u{511}', '\u{511}'), ('\u{513}', '\u{513}'), - ('\u{515}', '\u{515}'), ('\u{517}', '\u{517}'), ('\u{519}', '\u{519}'), ('\u{51b}', - '\u{51b}'), ('\u{51d}', '\u{51d}'), ('\u{51f}', '\u{51f}'), ('\u{521}', '\u{521}'), - ('\u{523}', '\u{523}'), ('\u{525}', '\u{525}'), ('\u{527}', '\u{527}'), ('\u{529}', - '\u{529}'), ('\u{52b}', '\u{52b}'), ('\u{52d}', '\u{52d}'), ('\u{52f}', '\u{52f}'), - ('\u{561}', '\u{587}'), ('\u{13f8}', '\u{13fd}'), ('\u{1d00}', '\u{1dbf}'), ('\u{1e01}', - '\u{1e01}'), ('\u{1e03}', '\u{1e03}'), ('\u{1e05}', '\u{1e05}'), ('\u{1e07}', '\u{1e07}'), - ('\u{1e09}', '\u{1e09}'), ('\u{1e0b}', '\u{1e0b}'), ('\u{1e0d}', '\u{1e0d}'), ('\u{1e0f}', - '\u{1e0f}'), ('\u{1e11}', '\u{1e11}'), ('\u{1e13}', '\u{1e13}'), ('\u{1e15}', '\u{1e15}'), - ('\u{1e17}', '\u{1e17}'), ('\u{1e19}', '\u{1e19}'), ('\u{1e1b}', '\u{1e1b}'), ('\u{1e1d}', - '\u{1e1d}'), ('\u{1e1f}', '\u{1e1f}'), ('\u{1e21}', '\u{1e21}'), ('\u{1e23}', '\u{1e23}'), - ('\u{1e25}', '\u{1e25}'), ('\u{1e27}', '\u{1e27}'), ('\u{1e29}', '\u{1e29}'), ('\u{1e2b}', - '\u{1e2b}'), ('\u{1e2d}', '\u{1e2d}'), ('\u{1e2f}', '\u{1e2f}'), ('\u{1e31}', '\u{1e31}'), - ('\u{1e33}', '\u{1e33}'), ('\u{1e35}', '\u{1e35}'), ('\u{1e37}', '\u{1e37}'), ('\u{1e39}', - '\u{1e39}'), ('\u{1e3b}', '\u{1e3b}'), ('\u{1e3d}', '\u{1e3d}'), ('\u{1e3f}', '\u{1e3f}'), - ('\u{1e41}', '\u{1e41}'), ('\u{1e43}', '\u{1e43}'), ('\u{1e45}', '\u{1e45}'), ('\u{1e47}', - '\u{1e47}'), ('\u{1e49}', '\u{1e49}'), ('\u{1e4b}', '\u{1e4b}'), ('\u{1e4d}', '\u{1e4d}'), - ('\u{1e4f}', '\u{1e4f}'), ('\u{1e51}', '\u{1e51}'), ('\u{1e53}', '\u{1e53}'), ('\u{1e55}', - '\u{1e55}'), ('\u{1e57}', '\u{1e57}'), ('\u{1e59}', '\u{1e59}'), ('\u{1e5b}', '\u{1e5b}'), - ('\u{1e5d}', '\u{1e5d}'), ('\u{1e5f}', '\u{1e5f}'), ('\u{1e61}', '\u{1e61}'), ('\u{1e63}', - '\u{1e63}'), ('\u{1e65}', '\u{1e65}'), ('\u{1e67}', '\u{1e67}'), ('\u{1e69}', '\u{1e69}'), - ('\u{1e6b}', '\u{1e6b}'), ('\u{1e6d}', '\u{1e6d}'), ('\u{1e6f}', '\u{1e6f}'), ('\u{1e71}', - '\u{1e71}'), ('\u{1e73}', '\u{1e73}'), ('\u{1e75}', '\u{1e75}'), ('\u{1e77}', '\u{1e77}'), - ('\u{1e79}', '\u{1e79}'), ('\u{1e7b}', '\u{1e7b}'), ('\u{1e7d}', '\u{1e7d}'), ('\u{1e7f}', - '\u{1e7f}'), ('\u{1e81}', '\u{1e81}'), ('\u{1e83}', '\u{1e83}'), ('\u{1e85}', '\u{1e85}'), - ('\u{1e87}', '\u{1e87}'), ('\u{1e89}', '\u{1e89}'), ('\u{1e8b}', '\u{1e8b}'), ('\u{1e8d}', - '\u{1e8d}'), ('\u{1e8f}', '\u{1e8f}'), ('\u{1e91}', '\u{1e91}'), ('\u{1e93}', '\u{1e93}'), - ('\u{1e95}', '\u{1e9d}'), ('\u{1e9f}', '\u{1e9f}'), ('\u{1ea1}', '\u{1ea1}'), ('\u{1ea3}', - '\u{1ea3}'), ('\u{1ea5}', '\u{1ea5}'), ('\u{1ea7}', '\u{1ea7}'), ('\u{1ea9}', '\u{1ea9}'), - ('\u{1eab}', '\u{1eab}'), ('\u{1ead}', '\u{1ead}'), ('\u{1eaf}', '\u{1eaf}'), ('\u{1eb1}', - '\u{1eb1}'), ('\u{1eb3}', '\u{1eb3}'), ('\u{1eb5}', '\u{1eb5}'), ('\u{1eb7}', '\u{1eb7}'), - ('\u{1eb9}', '\u{1eb9}'), ('\u{1ebb}', '\u{1ebb}'), ('\u{1ebd}', '\u{1ebd}'), ('\u{1ebf}', - '\u{1ebf}'), ('\u{1ec1}', '\u{1ec1}'), ('\u{1ec3}', '\u{1ec3}'), ('\u{1ec5}', '\u{1ec5}'), - ('\u{1ec7}', '\u{1ec7}'), ('\u{1ec9}', '\u{1ec9}'), ('\u{1ecb}', '\u{1ecb}'), ('\u{1ecd}', - '\u{1ecd}'), ('\u{1ecf}', '\u{1ecf}'), ('\u{1ed1}', '\u{1ed1}'), ('\u{1ed3}', '\u{1ed3}'), - ('\u{1ed5}', '\u{1ed5}'), ('\u{1ed7}', '\u{1ed7}'), ('\u{1ed9}', '\u{1ed9}'), ('\u{1edb}', - '\u{1edb}'), ('\u{1edd}', '\u{1edd}'), ('\u{1edf}', '\u{1edf}'), ('\u{1ee1}', '\u{1ee1}'), - ('\u{1ee3}', '\u{1ee3}'), ('\u{1ee5}', '\u{1ee5}'), ('\u{1ee7}', '\u{1ee7}'), ('\u{1ee9}', - '\u{1ee9}'), ('\u{1eeb}', '\u{1eeb}'), ('\u{1eed}', '\u{1eed}'), ('\u{1eef}', '\u{1eef}'), - ('\u{1ef1}', '\u{1ef1}'), ('\u{1ef3}', '\u{1ef3}'), ('\u{1ef5}', '\u{1ef5}'), ('\u{1ef7}', - '\u{1ef7}'), ('\u{1ef9}', '\u{1ef9}'), ('\u{1efb}', '\u{1efb}'), ('\u{1efd}', '\u{1efd}'), - ('\u{1eff}', '\u{1f07}'), ('\u{1f10}', '\u{1f15}'), ('\u{1f20}', '\u{1f27}'), ('\u{1f30}', - '\u{1f37}'), ('\u{1f40}', '\u{1f45}'), ('\u{1f50}', '\u{1f57}'), ('\u{1f60}', '\u{1f67}'), - ('\u{1f70}', '\u{1f7d}'), ('\u{1f80}', '\u{1f87}'), ('\u{1f90}', '\u{1f97}'), ('\u{1fa0}', - '\u{1fa7}'), ('\u{1fb0}', '\u{1fb4}'), ('\u{1fb6}', '\u{1fb7}'), ('\u{1fbe}', '\u{1fbe}'), - ('\u{1fc2}', '\u{1fc4}'), ('\u{1fc6}', '\u{1fc7}'), ('\u{1fd0}', '\u{1fd3}'), ('\u{1fd6}', - '\u{1fd7}'), ('\u{1fe0}', '\u{1fe7}'), ('\u{1ff2}', '\u{1ff4}'), ('\u{1ff6}', '\u{1ff7}'), - ('\u{2071}', '\u{2071}'), ('\u{207f}', '\u{207f}'), ('\u{2090}', '\u{209c}'), ('\u{210a}', - '\u{210a}'), ('\u{210e}', '\u{210f}'), ('\u{2113}', '\u{2113}'), ('\u{212f}', '\u{212f}'), - ('\u{2134}', '\u{2134}'), ('\u{2139}', '\u{2139}'), ('\u{213c}', '\u{213d}'), ('\u{2146}', - '\u{2149}'), ('\u{214e}', '\u{214e}'), ('\u{2170}', '\u{217f}'), ('\u{2184}', '\u{2184}'), - ('\u{24d0}', '\u{24e9}'), ('\u{2c30}', '\u{2c5e}'), ('\u{2c61}', '\u{2c61}'), ('\u{2c65}', - '\u{2c66}'), ('\u{2c68}', '\u{2c68}'), ('\u{2c6a}', '\u{2c6a}'), ('\u{2c6c}', '\u{2c6c}'), - ('\u{2c71}', '\u{2c71}'), ('\u{2c73}', '\u{2c74}'), ('\u{2c76}', '\u{2c7d}'), ('\u{2c81}', - '\u{2c81}'), ('\u{2c83}', '\u{2c83}'), ('\u{2c85}', '\u{2c85}'), ('\u{2c87}', '\u{2c87}'), - ('\u{2c89}', '\u{2c89}'), ('\u{2c8b}', '\u{2c8b}'), ('\u{2c8d}', '\u{2c8d}'), ('\u{2c8f}', - '\u{2c8f}'), ('\u{2c91}', '\u{2c91}'), ('\u{2c93}', '\u{2c93}'), ('\u{2c95}', '\u{2c95}'), - ('\u{2c97}', '\u{2c97}'), ('\u{2c99}', '\u{2c99}'), ('\u{2c9b}', '\u{2c9b}'), ('\u{2c9d}', - '\u{2c9d}'), ('\u{2c9f}', '\u{2c9f}'), ('\u{2ca1}', '\u{2ca1}'), ('\u{2ca3}', '\u{2ca3}'), - ('\u{2ca5}', '\u{2ca5}'), ('\u{2ca7}', '\u{2ca7}'), ('\u{2ca9}', '\u{2ca9}'), ('\u{2cab}', - '\u{2cab}'), ('\u{2cad}', '\u{2cad}'), ('\u{2caf}', '\u{2caf}'), ('\u{2cb1}', '\u{2cb1}'), - ('\u{2cb3}', '\u{2cb3}'), ('\u{2cb5}', '\u{2cb5}'), ('\u{2cb7}', '\u{2cb7}'), ('\u{2cb9}', - '\u{2cb9}'), ('\u{2cbb}', '\u{2cbb}'), ('\u{2cbd}', '\u{2cbd}'), ('\u{2cbf}', '\u{2cbf}'), - ('\u{2cc1}', '\u{2cc1}'), ('\u{2cc3}', '\u{2cc3}'), ('\u{2cc5}', '\u{2cc5}'), ('\u{2cc7}', - '\u{2cc7}'), ('\u{2cc9}', '\u{2cc9}'), ('\u{2ccb}', '\u{2ccb}'), ('\u{2ccd}', '\u{2ccd}'), - ('\u{2ccf}', '\u{2ccf}'), ('\u{2cd1}', '\u{2cd1}'), ('\u{2cd3}', '\u{2cd3}'), ('\u{2cd5}', - '\u{2cd5}'), ('\u{2cd7}', '\u{2cd7}'), ('\u{2cd9}', '\u{2cd9}'), ('\u{2cdb}', '\u{2cdb}'), - ('\u{2cdd}', '\u{2cdd}'), ('\u{2cdf}', '\u{2cdf}'), ('\u{2ce1}', '\u{2ce1}'), ('\u{2ce3}', - '\u{2ce4}'), ('\u{2cec}', '\u{2cec}'), ('\u{2cee}', '\u{2cee}'), ('\u{2cf3}', '\u{2cf3}'), - ('\u{2d00}', '\u{2d25}'), ('\u{2d27}', '\u{2d27}'), ('\u{2d2d}', '\u{2d2d}'), ('\u{a641}', - '\u{a641}'), ('\u{a643}', '\u{a643}'), ('\u{a645}', '\u{a645}'), ('\u{a647}', '\u{a647}'), - ('\u{a649}', '\u{a649}'), ('\u{a64b}', '\u{a64b}'), ('\u{a64d}', '\u{a64d}'), ('\u{a64f}', - '\u{a64f}'), ('\u{a651}', '\u{a651}'), ('\u{a653}', '\u{a653}'), ('\u{a655}', '\u{a655}'), - ('\u{a657}', '\u{a657}'), ('\u{a659}', '\u{a659}'), ('\u{a65b}', '\u{a65b}'), ('\u{a65d}', - '\u{a65d}'), ('\u{a65f}', '\u{a65f}'), ('\u{a661}', '\u{a661}'), ('\u{a663}', '\u{a663}'), - ('\u{a665}', '\u{a665}'), ('\u{a667}', '\u{a667}'), ('\u{a669}', '\u{a669}'), ('\u{a66b}', - '\u{a66b}'), ('\u{a66d}', '\u{a66d}'), ('\u{a681}', '\u{a681}'), ('\u{a683}', '\u{a683}'), - ('\u{a685}', '\u{a685}'), ('\u{a687}', '\u{a687}'), ('\u{a689}', '\u{a689}'), ('\u{a68b}', - '\u{a68b}'), ('\u{a68d}', '\u{a68d}'), ('\u{a68f}', '\u{a68f}'), ('\u{a691}', '\u{a691}'), - ('\u{a693}', '\u{a693}'), ('\u{a695}', '\u{a695}'), ('\u{a697}', '\u{a697}'), ('\u{a699}', - '\u{a699}'), ('\u{a69b}', '\u{a69d}'), ('\u{a723}', '\u{a723}'), ('\u{a725}', '\u{a725}'), - ('\u{a727}', '\u{a727}'), ('\u{a729}', '\u{a729}'), ('\u{a72b}', '\u{a72b}'), ('\u{a72d}', - '\u{a72d}'), ('\u{a72f}', '\u{a731}'), ('\u{a733}', '\u{a733}'), ('\u{a735}', '\u{a735}'), - ('\u{a737}', '\u{a737}'), ('\u{a739}', '\u{a739}'), ('\u{a73b}', '\u{a73b}'), ('\u{a73d}', - '\u{a73d}'), ('\u{a73f}', '\u{a73f}'), ('\u{a741}', '\u{a741}'), ('\u{a743}', '\u{a743}'), - ('\u{a745}', '\u{a745}'), ('\u{a747}', '\u{a747}'), ('\u{a749}', '\u{a749}'), ('\u{a74b}', - '\u{a74b}'), ('\u{a74d}', '\u{a74d}'), ('\u{a74f}', '\u{a74f}'), ('\u{a751}', '\u{a751}'), - ('\u{a753}', '\u{a753}'), ('\u{a755}', '\u{a755}'), ('\u{a757}', '\u{a757}'), ('\u{a759}', - '\u{a759}'), ('\u{a75b}', '\u{a75b}'), ('\u{a75d}', '\u{a75d}'), ('\u{a75f}', '\u{a75f}'), - ('\u{a761}', '\u{a761}'), ('\u{a763}', '\u{a763}'), ('\u{a765}', '\u{a765}'), ('\u{a767}', - '\u{a767}'), ('\u{a769}', '\u{a769}'), ('\u{a76b}', '\u{a76b}'), ('\u{a76d}', '\u{a76d}'), - ('\u{a76f}', '\u{a778}'), ('\u{a77a}', '\u{a77a}'), ('\u{a77c}', '\u{a77c}'), ('\u{a77f}', - '\u{a77f}'), ('\u{a781}', '\u{a781}'), ('\u{a783}', '\u{a783}'), ('\u{a785}', '\u{a785}'), - ('\u{a787}', '\u{a787}'), ('\u{a78c}', '\u{a78c}'), ('\u{a78e}', '\u{a78e}'), ('\u{a791}', - '\u{a791}'), ('\u{a793}', '\u{a795}'), ('\u{a797}', '\u{a797}'), ('\u{a799}', '\u{a799}'), - ('\u{a79b}', '\u{a79b}'), ('\u{a79d}', '\u{a79d}'), ('\u{a79f}', '\u{a79f}'), ('\u{a7a1}', - '\u{a7a1}'), ('\u{a7a3}', '\u{a7a3}'), ('\u{a7a5}', '\u{a7a5}'), ('\u{a7a7}', '\u{a7a7}'), - ('\u{a7a9}', '\u{a7a9}'), ('\u{a7b5}', '\u{a7b5}'), ('\u{a7b7}', '\u{a7b7}'), ('\u{a7f8}', - '\u{a7fa}'), ('\u{ab30}', '\u{ab5a}'), ('\u{ab5c}', '\u{ab65}'), ('\u{ab70}', '\u{abbf}'), - ('\u{fb00}', '\u{fb06}'), ('\u{fb13}', '\u{fb17}'), ('\u{ff41}', '\u{ff5a}'), ('\u{10428}', - '\u{1044f}'), ('\u{10cc0}', '\u{10cf2}'), ('\u{118c0}', '\u{118df}'), ('\u{1d41a}', - '\u{1d433}'), ('\u{1d44e}', '\u{1d454}'), ('\u{1d456}', '\u{1d467}'), ('\u{1d482}', - '\u{1d49b}'), ('\u{1d4b6}', '\u{1d4b9}'), ('\u{1d4bb}', '\u{1d4bb}'), ('\u{1d4bd}', - '\u{1d4c3}'), ('\u{1d4c5}', '\u{1d4cf}'), ('\u{1d4ea}', '\u{1d503}'), ('\u{1d51e}', - '\u{1d537}'), ('\u{1d552}', '\u{1d56b}'), ('\u{1d586}', '\u{1d59f}'), ('\u{1d5ba}', - '\u{1d5d3}'), ('\u{1d5ee}', '\u{1d607}'), ('\u{1d622}', '\u{1d63b}'), ('\u{1d656}', - '\u{1d66f}'), ('\u{1d68a}', '\u{1d6a5}'), ('\u{1d6c2}', '\u{1d6da}'), ('\u{1d6dc}', - '\u{1d6e1}'), ('\u{1d6fc}', '\u{1d714}'), ('\u{1d716}', '\u{1d71b}'), ('\u{1d736}', - '\u{1d74e}'), ('\u{1d750}', '\u{1d755}'), ('\u{1d770}', '\u{1d788}'), ('\u{1d78a}', - '\u{1d78f}'), ('\u{1d7aa}', '\u{1d7c2}'), ('\u{1d7c4}', '\u{1d7c9}'), ('\u{1d7cb}', - '\u{1d7cb}') - ]; + pub const Lowercase_table: &'static super::BoolTrie = &super::BoolTrie { + r1: [ + 0x0000000000000000, 0x07fffffe00000000, 0x0420040000000000, 0xff7fffff80000000, + 0x55aaaaaaaaaaaaaa, 0xd4aaaaaaaaaaab55, 0xe6512d2a4e243129, 0xaa29aaaab5555240, + 0x93faaaaaaaaaaaaa, 0xffffffffffffaa85, 0x01ffffffffefffff, 0x0000001f00000003, + 0x0000000000000000, 0x3c8a000000000020, 0xfffff00000010000, 0x192faaaaaae37fff, + 0xffff000000000000, 0xaaaaaaaaffffffff, 0xaaaaaaaaaaaaa802, 0xaaaaaaaaaaaad554, + 0x0000aaaaaaaaaaaa, 0xfffffffe00000000, 0x00000000000000ff, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 + ], + r2: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 3, 3, 3, + 0, 4, 4, 5, 4, 6, 7, 8, 9, 0, 10, 11, 0, 12, 13, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 16, 17, 4, 18, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 0, + 22, 23, 24, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 26, 3, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 28, 0, 0 + ], + r3: &[ + 0x0000000000000000, 0x3f00000000000000, 0x00000000000001ff, 0xffffffffffffffff, + 0xaaaaaaaaaaaaaaaa, 0xaaaaaaaabfeaaaaa, 0x00ff00ff003f00ff, 0x3fff00ff00ff003f, + 0x40df00ff00ff00ff, 0x00dc00ff00cf00dc, 0x8002000000000000, 0x000000001fff0000, + 0x321080000008c400, 0xffff0000000043c0, 0x0000000000000010, 0x000003ffffff0000, + 0xffff000000000000, 0x3fda15627fffffff, 0x0008501aaaaaaaaa, 0x000020bfffffffff, + 0x00002aaaaaaaaaaa, 0x000000003aaaaaaa, 0xaaabaaa800000000, 0x95ffaaaaaaaaaaaa, + 0x00a002aaaaba50aa, 0x0700000000000000, 0xffff003ff7ffffff, 0x0000000000f8007f, + 0x0000000007fffffe + ], + r4: [ + 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 + ], + r5: &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 22, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0 + ], + r6: &[ + 0x0000000000000000, 0xffffff0000000000, 0x000000000000ffff, 0x0fffffffff000000, + 0x0007ffffffffffff, 0x00000000ffffffff, 0x000ffffffc000000, 0x000000ffffdfc000, + 0xebc000000ffffffc, 0xfffffc000000ffef, 0x00ffffffc000000f, 0x00000ffffffc0000, + 0xfc000000ffffffc0, 0xffffc000000fffff, 0x0ffffffc000000ff, 0x0000ffffffc00000, + 0x0000003ffffffc00, 0xf0000003f7fffffc, 0xffc000000fdfffff, 0xffff0000003f7fff, + 0xfffffc000000fdff, 0x0000000000000bf7, 0xfffffffc00000000, 0x000000000000000f + ], + }; pub fn Lowercase(c: char) -> bool { - super::bsearch_range_table(c, Lowercase_table) + super::trie_lookup_range_table(c, Lowercase_table) } - pub const Uppercase_table: &'static [(char, char)] = &[ - ('\u{41}', '\u{5a}'), ('\u{c0}', '\u{d6}'), ('\u{d8}', '\u{de}'), ('\u{100}', '\u{100}'), - ('\u{102}', '\u{102}'), ('\u{104}', '\u{104}'), ('\u{106}', '\u{106}'), ('\u{108}', - '\u{108}'), ('\u{10a}', '\u{10a}'), ('\u{10c}', '\u{10c}'), ('\u{10e}', '\u{10e}'), - ('\u{110}', '\u{110}'), ('\u{112}', '\u{112}'), ('\u{114}', '\u{114}'), ('\u{116}', - '\u{116}'), ('\u{118}', '\u{118}'), ('\u{11a}', '\u{11a}'), ('\u{11c}', '\u{11c}'), - ('\u{11e}', '\u{11e}'), ('\u{120}', '\u{120}'), ('\u{122}', '\u{122}'), ('\u{124}', - '\u{124}'), ('\u{126}', '\u{126}'), ('\u{128}', '\u{128}'), ('\u{12a}', '\u{12a}'), - ('\u{12c}', '\u{12c}'), ('\u{12e}', '\u{12e}'), ('\u{130}', '\u{130}'), ('\u{132}', - '\u{132}'), ('\u{134}', '\u{134}'), ('\u{136}', '\u{136}'), ('\u{139}', '\u{139}'), - ('\u{13b}', '\u{13b}'), ('\u{13d}', '\u{13d}'), ('\u{13f}', '\u{13f}'), ('\u{141}', - '\u{141}'), ('\u{143}', '\u{143}'), ('\u{145}', '\u{145}'), ('\u{147}', '\u{147}'), - ('\u{14a}', '\u{14a}'), ('\u{14c}', '\u{14c}'), ('\u{14e}', '\u{14e}'), ('\u{150}', - '\u{150}'), ('\u{152}', '\u{152}'), ('\u{154}', '\u{154}'), ('\u{156}', '\u{156}'), - ('\u{158}', '\u{158}'), ('\u{15a}', '\u{15a}'), ('\u{15c}', '\u{15c}'), ('\u{15e}', - '\u{15e}'), ('\u{160}', '\u{160}'), ('\u{162}', '\u{162}'), ('\u{164}', '\u{164}'), - ('\u{166}', '\u{166}'), ('\u{168}', '\u{168}'), ('\u{16a}', '\u{16a}'), ('\u{16c}', - '\u{16c}'), ('\u{16e}', '\u{16e}'), ('\u{170}', '\u{170}'), ('\u{172}', '\u{172}'), - ('\u{174}', '\u{174}'), ('\u{176}', '\u{176}'), ('\u{178}', '\u{179}'), ('\u{17b}', - '\u{17b}'), ('\u{17d}', '\u{17d}'), ('\u{181}', '\u{182}'), ('\u{184}', '\u{184}'), - ('\u{186}', '\u{187}'), ('\u{189}', '\u{18b}'), ('\u{18e}', '\u{191}'), ('\u{193}', - '\u{194}'), ('\u{196}', '\u{198}'), ('\u{19c}', '\u{19d}'), ('\u{19f}', '\u{1a0}'), - ('\u{1a2}', '\u{1a2}'), ('\u{1a4}', '\u{1a4}'), ('\u{1a6}', '\u{1a7}'), ('\u{1a9}', - '\u{1a9}'), ('\u{1ac}', '\u{1ac}'), ('\u{1ae}', '\u{1af}'), ('\u{1b1}', '\u{1b3}'), - ('\u{1b5}', '\u{1b5}'), ('\u{1b7}', '\u{1b8}'), ('\u{1bc}', '\u{1bc}'), ('\u{1c4}', - '\u{1c4}'), ('\u{1c7}', '\u{1c7}'), ('\u{1ca}', '\u{1ca}'), ('\u{1cd}', '\u{1cd}'), - ('\u{1cf}', '\u{1cf}'), ('\u{1d1}', '\u{1d1}'), ('\u{1d3}', '\u{1d3}'), ('\u{1d5}', - '\u{1d5}'), ('\u{1d7}', '\u{1d7}'), ('\u{1d9}', '\u{1d9}'), ('\u{1db}', '\u{1db}'), - ('\u{1de}', '\u{1de}'), ('\u{1e0}', '\u{1e0}'), ('\u{1e2}', '\u{1e2}'), ('\u{1e4}', - '\u{1e4}'), ('\u{1e6}', '\u{1e6}'), ('\u{1e8}', '\u{1e8}'), ('\u{1ea}', '\u{1ea}'), - ('\u{1ec}', '\u{1ec}'), ('\u{1ee}', '\u{1ee}'), ('\u{1f1}', '\u{1f1}'), ('\u{1f4}', - '\u{1f4}'), ('\u{1f6}', '\u{1f8}'), ('\u{1fa}', '\u{1fa}'), ('\u{1fc}', '\u{1fc}'), - ('\u{1fe}', '\u{1fe}'), ('\u{200}', '\u{200}'), ('\u{202}', '\u{202}'), ('\u{204}', - '\u{204}'), ('\u{206}', '\u{206}'), ('\u{208}', '\u{208}'), ('\u{20a}', '\u{20a}'), - ('\u{20c}', '\u{20c}'), ('\u{20e}', '\u{20e}'), ('\u{210}', '\u{210}'), ('\u{212}', - '\u{212}'), ('\u{214}', '\u{214}'), ('\u{216}', '\u{216}'), ('\u{218}', '\u{218}'), - ('\u{21a}', '\u{21a}'), ('\u{21c}', '\u{21c}'), ('\u{21e}', '\u{21e}'), ('\u{220}', - '\u{220}'), ('\u{222}', '\u{222}'), ('\u{224}', '\u{224}'), ('\u{226}', '\u{226}'), - ('\u{228}', '\u{228}'), ('\u{22a}', '\u{22a}'), ('\u{22c}', '\u{22c}'), ('\u{22e}', - '\u{22e}'), ('\u{230}', '\u{230}'), ('\u{232}', '\u{232}'), ('\u{23a}', '\u{23b}'), - ('\u{23d}', '\u{23e}'), ('\u{241}', '\u{241}'), ('\u{243}', '\u{246}'), ('\u{248}', - '\u{248}'), ('\u{24a}', '\u{24a}'), ('\u{24c}', '\u{24c}'), ('\u{24e}', '\u{24e}'), - ('\u{370}', '\u{370}'), ('\u{372}', '\u{372}'), ('\u{376}', '\u{376}'), ('\u{37f}', - '\u{37f}'), ('\u{386}', '\u{386}'), ('\u{388}', '\u{38a}'), ('\u{38c}', '\u{38c}'), - ('\u{38e}', '\u{38f}'), ('\u{391}', '\u{3a1}'), ('\u{3a3}', '\u{3ab}'), ('\u{3cf}', - '\u{3cf}'), ('\u{3d2}', '\u{3d4}'), ('\u{3d8}', '\u{3d8}'), ('\u{3da}', '\u{3da}'), - ('\u{3dc}', '\u{3dc}'), ('\u{3de}', '\u{3de}'), ('\u{3e0}', '\u{3e0}'), ('\u{3e2}', - '\u{3e2}'), ('\u{3e4}', '\u{3e4}'), ('\u{3e6}', '\u{3e6}'), ('\u{3e8}', '\u{3e8}'), - ('\u{3ea}', '\u{3ea}'), ('\u{3ec}', '\u{3ec}'), ('\u{3ee}', '\u{3ee}'), ('\u{3f4}', - '\u{3f4}'), ('\u{3f7}', '\u{3f7}'), ('\u{3f9}', '\u{3fa}'), ('\u{3fd}', '\u{42f}'), - ('\u{460}', '\u{460}'), ('\u{462}', '\u{462}'), ('\u{464}', '\u{464}'), ('\u{466}', - '\u{466}'), ('\u{468}', '\u{468}'), ('\u{46a}', '\u{46a}'), ('\u{46c}', '\u{46c}'), - ('\u{46e}', '\u{46e}'), ('\u{470}', '\u{470}'), ('\u{472}', '\u{472}'), ('\u{474}', - '\u{474}'), ('\u{476}', '\u{476}'), ('\u{478}', '\u{478}'), ('\u{47a}', '\u{47a}'), - ('\u{47c}', '\u{47c}'), ('\u{47e}', '\u{47e}'), ('\u{480}', '\u{480}'), ('\u{48a}', - '\u{48a}'), ('\u{48c}', '\u{48c}'), ('\u{48e}', '\u{48e}'), ('\u{490}', '\u{490}'), - ('\u{492}', '\u{492}'), ('\u{494}', '\u{494}'), ('\u{496}', '\u{496}'), ('\u{498}', - '\u{498}'), ('\u{49a}', '\u{49a}'), ('\u{49c}', '\u{49c}'), ('\u{49e}', '\u{49e}'), - ('\u{4a0}', '\u{4a0}'), ('\u{4a2}', '\u{4a2}'), ('\u{4a4}', '\u{4a4}'), ('\u{4a6}', - '\u{4a6}'), ('\u{4a8}', '\u{4a8}'), ('\u{4aa}', '\u{4aa}'), ('\u{4ac}', '\u{4ac}'), - ('\u{4ae}', '\u{4ae}'), ('\u{4b0}', '\u{4b0}'), ('\u{4b2}', '\u{4b2}'), ('\u{4b4}', - '\u{4b4}'), ('\u{4b6}', '\u{4b6}'), ('\u{4b8}', '\u{4b8}'), ('\u{4ba}', '\u{4ba}'), - ('\u{4bc}', '\u{4bc}'), ('\u{4be}', '\u{4be}'), ('\u{4c0}', '\u{4c1}'), ('\u{4c3}', - '\u{4c3}'), ('\u{4c5}', '\u{4c5}'), ('\u{4c7}', '\u{4c7}'), ('\u{4c9}', '\u{4c9}'), - ('\u{4cb}', '\u{4cb}'), ('\u{4cd}', '\u{4cd}'), ('\u{4d0}', '\u{4d0}'), ('\u{4d2}', - '\u{4d2}'), ('\u{4d4}', '\u{4d4}'), ('\u{4d6}', '\u{4d6}'), ('\u{4d8}', '\u{4d8}'), - ('\u{4da}', '\u{4da}'), ('\u{4dc}', '\u{4dc}'), ('\u{4de}', '\u{4de}'), ('\u{4e0}', - '\u{4e0}'), ('\u{4e2}', '\u{4e2}'), ('\u{4e4}', '\u{4e4}'), ('\u{4e6}', '\u{4e6}'), - ('\u{4e8}', '\u{4e8}'), ('\u{4ea}', '\u{4ea}'), ('\u{4ec}', '\u{4ec}'), ('\u{4ee}', - '\u{4ee}'), ('\u{4f0}', '\u{4f0}'), ('\u{4f2}', '\u{4f2}'), ('\u{4f4}', '\u{4f4}'), - ('\u{4f6}', '\u{4f6}'), ('\u{4f8}', '\u{4f8}'), ('\u{4fa}', '\u{4fa}'), ('\u{4fc}', - '\u{4fc}'), ('\u{4fe}', '\u{4fe}'), ('\u{500}', '\u{500}'), ('\u{502}', '\u{502}'), - ('\u{504}', '\u{504}'), ('\u{506}', '\u{506}'), ('\u{508}', '\u{508}'), ('\u{50a}', - '\u{50a}'), ('\u{50c}', '\u{50c}'), ('\u{50e}', '\u{50e}'), ('\u{510}', '\u{510}'), - ('\u{512}', '\u{512}'), ('\u{514}', '\u{514}'), ('\u{516}', '\u{516}'), ('\u{518}', - '\u{518}'), ('\u{51a}', '\u{51a}'), ('\u{51c}', '\u{51c}'), ('\u{51e}', '\u{51e}'), - ('\u{520}', '\u{520}'), ('\u{522}', '\u{522}'), ('\u{524}', '\u{524}'), ('\u{526}', - '\u{526}'), ('\u{528}', '\u{528}'), ('\u{52a}', '\u{52a}'), ('\u{52c}', '\u{52c}'), - ('\u{52e}', '\u{52e}'), ('\u{531}', '\u{556}'), ('\u{10a0}', '\u{10c5}'), ('\u{10c7}', - '\u{10c7}'), ('\u{10cd}', '\u{10cd}'), ('\u{13a0}', '\u{13f5}'), ('\u{1e00}', '\u{1e00}'), - ('\u{1e02}', '\u{1e02}'), ('\u{1e04}', '\u{1e04}'), ('\u{1e06}', '\u{1e06}'), ('\u{1e08}', - '\u{1e08}'), ('\u{1e0a}', '\u{1e0a}'), ('\u{1e0c}', '\u{1e0c}'), ('\u{1e0e}', '\u{1e0e}'), - ('\u{1e10}', '\u{1e10}'), ('\u{1e12}', '\u{1e12}'), ('\u{1e14}', '\u{1e14}'), ('\u{1e16}', - '\u{1e16}'), ('\u{1e18}', '\u{1e18}'), ('\u{1e1a}', '\u{1e1a}'), ('\u{1e1c}', '\u{1e1c}'), - ('\u{1e1e}', '\u{1e1e}'), ('\u{1e20}', '\u{1e20}'), ('\u{1e22}', '\u{1e22}'), ('\u{1e24}', - '\u{1e24}'), ('\u{1e26}', '\u{1e26}'), ('\u{1e28}', '\u{1e28}'), ('\u{1e2a}', '\u{1e2a}'), - ('\u{1e2c}', '\u{1e2c}'), ('\u{1e2e}', '\u{1e2e}'), ('\u{1e30}', '\u{1e30}'), ('\u{1e32}', - '\u{1e32}'), ('\u{1e34}', '\u{1e34}'), ('\u{1e36}', '\u{1e36}'), ('\u{1e38}', '\u{1e38}'), - ('\u{1e3a}', '\u{1e3a}'), ('\u{1e3c}', '\u{1e3c}'), ('\u{1e3e}', '\u{1e3e}'), ('\u{1e40}', - '\u{1e40}'), ('\u{1e42}', '\u{1e42}'), ('\u{1e44}', '\u{1e44}'), ('\u{1e46}', '\u{1e46}'), - ('\u{1e48}', '\u{1e48}'), ('\u{1e4a}', '\u{1e4a}'), ('\u{1e4c}', '\u{1e4c}'), ('\u{1e4e}', - '\u{1e4e}'), ('\u{1e50}', '\u{1e50}'), ('\u{1e52}', '\u{1e52}'), ('\u{1e54}', '\u{1e54}'), - ('\u{1e56}', '\u{1e56}'), ('\u{1e58}', '\u{1e58}'), ('\u{1e5a}', '\u{1e5a}'), ('\u{1e5c}', - '\u{1e5c}'), ('\u{1e5e}', '\u{1e5e}'), ('\u{1e60}', '\u{1e60}'), ('\u{1e62}', '\u{1e62}'), - ('\u{1e64}', '\u{1e64}'), ('\u{1e66}', '\u{1e66}'), ('\u{1e68}', '\u{1e68}'), ('\u{1e6a}', - '\u{1e6a}'), ('\u{1e6c}', '\u{1e6c}'), ('\u{1e6e}', '\u{1e6e}'), ('\u{1e70}', '\u{1e70}'), - ('\u{1e72}', '\u{1e72}'), ('\u{1e74}', '\u{1e74}'), ('\u{1e76}', '\u{1e76}'), ('\u{1e78}', - '\u{1e78}'), ('\u{1e7a}', '\u{1e7a}'), ('\u{1e7c}', '\u{1e7c}'), ('\u{1e7e}', '\u{1e7e}'), - ('\u{1e80}', '\u{1e80}'), ('\u{1e82}', '\u{1e82}'), ('\u{1e84}', '\u{1e84}'), ('\u{1e86}', - '\u{1e86}'), ('\u{1e88}', '\u{1e88}'), ('\u{1e8a}', '\u{1e8a}'), ('\u{1e8c}', '\u{1e8c}'), - ('\u{1e8e}', '\u{1e8e}'), ('\u{1e90}', '\u{1e90}'), ('\u{1e92}', '\u{1e92}'), ('\u{1e94}', - '\u{1e94}'), ('\u{1e9e}', '\u{1e9e}'), ('\u{1ea0}', '\u{1ea0}'), ('\u{1ea2}', '\u{1ea2}'), - ('\u{1ea4}', '\u{1ea4}'), ('\u{1ea6}', '\u{1ea6}'), ('\u{1ea8}', '\u{1ea8}'), ('\u{1eaa}', - '\u{1eaa}'), ('\u{1eac}', '\u{1eac}'), ('\u{1eae}', '\u{1eae}'), ('\u{1eb0}', '\u{1eb0}'), - ('\u{1eb2}', '\u{1eb2}'), ('\u{1eb4}', '\u{1eb4}'), ('\u{1eb6}', '\u{1eb6}'), ('\u{1eb8}', - '\u{1eb8}'), ('\u{1eba}', '\u{1eba}'), ('\u{1ebc}', '\u{1ebc}'), ('\u{1ebe}', '\u{1ebe}'), - ('\u{1ec0}', '\u{1ec0}'), ('\u{1ec2}', '\u{1ec2}'), ('\u{1ec4}', '\u{1ec4}'), ('\u{1ec6}', - '\u{1ec6}'), ('\u{1ec8}', '\u{1ec8}'), ('\u{1eca}', '\u{1eca}'), ('\u{1ecc}', '\u{1ecc}'), - ('\u{1ece}', '\u{1ece}'), ('\u{1ed0}', '\u{1ed0}'), ('\u{1ed2}', '\u{1ed2}'), ('\u{1ed4}', - '\u{1ed4}'), ('\u{1ed6}', '\u{1ed6}'), ('\u{1ed8}', '\u{1ed8}'), ('\u{1eda}', '\u{1eda}'), - ('\u{1edc}', '\u{1edc}'), ('\u{1ede}', '\u{1ede}'), ('\u{1ee0}', '\u{1ee0}'), ('\u{1ee2}', - '\u{1ee2}'), ('\u{1ee4}', '\u{1ee4}'), ('\u{1ee6}', '\u{1ee6}'), ('\u{1ee8}', '\u{1ee8}'), - ('\u{1eea}', '\u{1eea}'), ('\u{1eec}', '\u{1eec}'), ('\u{1eee}', '\u{1eee}'), ('\u{1ef0}', - '\u{1ef0}'), ('\u{1ef2}', '\u{1ef2}'), ('\u{1ef4}', '\u{1ef4}'), ('\u{1ef6}', '\u{1ef6}'), - ('\u{1ef8}', '\u{1ef8}'), ('\u{1efa}', '\u{1efa}'), ('\u{1efc}', '\u{1efc}'), ('\u{1efe}', - '\u{1efe}'), ('\u{1f08}', '\u{1f0f}'), ('\u{1f18}', '\u{1f1d}'), ('\u{1f28}', '\u{1f2f}'), - ('\u{1f38}', '\u{1f3f}'), ('\u{1f48}', '\u{1f4d}'), ('\u{1f59}', '\u{1f59}'), ('\u{1f5b}', - '\u{1f5b}'), ('\u{1f5d}', '\u{1f5d}'), ('\u{1f5f}', '\u{1f5f}'), ('\u{1f68}', '\u{1f6f}'), - ('\u{1fb8}', '\u{1fbb}'), ('\u{1fc8}', '\u{1fcb}'), ('\u{1fd8}', '\u{1fdb}'), ('\u{1fe8}', - '\u{1fec}'), ('\u{1ff8}', '\u{1ffb}'), ('\u{2102}', '\u{2102}'), ('\u{2107}', '\u{2107}'), - ('\u{210b}', '\u{210d}'), ('\u{2110}', '\u{2112}'), ('\u{2115}', '\u{2115}'), ('\u{2119}', - '\u{211d}'), ('\u{2124}', '\u{2124}'), ('\u{2126}', '\u{2126}'), ('\u{2128}', '\u{2128}'), - ('\u{212a}', '\u{212d}'), ('\u{2130}', '\u{2133}'), ('\u{213e}', '\u{213f}'), ('\u{2145}', - '\u{2145}'), ('\u{2160}', '\u{216f}'), ('\u{2183}', '\u{2183}'), ('\u{24b6}', '\u{24cf}'), - ('\u{2c00}', '\u{2c2e}'), ('\u{2c60}', '\u{2c60}'), ('\u{2c62}', '\u{2c64}'), ('\u{2c67}', - '\u{2c67}'), ('\u{2c69}', '\u{2c69}'), ('\u{2c6b}', '\u{2c6b}'), ('\u{2c6d}', '\u{2c70}'), - ('\u{2c72}', '\u{2c72}'), ('\u{2c75}', '\u{2c75}'), ('\u{2c7e}', '\u{2c80}'), ('\u{2c82}', - '\u{2c82}'), ('\u{2c84}', '\u{2c84}'), ('\u{2c86}', '\u{2c86}'), ('\u{2c88}', '\u{2c88}'), - ('\u{2c8a}', '\u{2c8a}'), ('\u{2c8c}', '\u{2c8c}'), ('\u{2c8e}', '\u{2c8e}'), ('\u{2c90}', - '\u{2c90}'), ('\u{2c92}', '\u{2c92}'), ('\u{2c94}', '\u{2c94}'), ('\u{2c96}', '\u{2c96}'), - ('\u{2c98}', '\u{2c98}'), ('\u{2c9a}', '\u{2c9a}'), ('\u{2c9c}', '\u{2c9c}'), ('\u{2c9e}', - '\u{2c9e}'), ('\u{2ca0}', '\u{2ca0}'), ('\u{2ca2}', '\u{2ca2}'), ('\u{2ca4}', '\u{2ca4}'), - ('\u{2ca6}', '\u{2ca6}'), ('\u{2ca8}', '\u{2ca8}'), ('\u{2caa}', '\u{2caa}'), ('\u{2cac}', - '\u{2cac}'), ('\u{2cae}', '\u{2cae}'), ('\u{2cb0}', '\u{2cb0}'), ('\u{2cb2}', '\u{2cb2}'), - ('\u{2cb4}', '\u{2cb4}'), ('\u{2cb6}', '\u{2cb6}'), ('\u{2cb8}', '\u{2cb8}'), ('\u{2cba}', - '\u{2cba}'), ('\u{2cbc}', '\u{2cbc}'), ('\u{2cbe}', '\u{2cbe}'), ('\u{2cc0}', '\u{2cc0}'), - ('\u{2cc2}', '\u{2cc2}'), ('\u{2cc4}', '\u{2cc4}'), ('\u{2cc6}', '\u{2cc6}'), ('\u{2cc8}', - '\u{2cc8}'), ('\u{2cca}', '\u{2cca}'), ('\u{2ccc}', '\u{2ccc}'), ('\u{2cce}', '\u{2cce}'), - ('\u{2cd0}', '\u{2cd0}'), ('\u{2cd2}', '\u{2cd2}'), ('\u{2cd4}', '\u{2cd4}'), ('\u{2cd6}', - '\u{2cd6}'), ('\u{2cd8}', '\u{2cd8}'), ('\u{2cda}', '\u{2cda}'), ('\u{2cdc}', '\u{2cdc}'), - ('\u{2cde}', '\u{2cde}'), ('\u{2ce0}', '\u{2ce0}'), ('\u{2ce2}', '\u{2ce2}'), ('\u{2ceb}', - '\u{2ceb}'), ('\u{2ced}', '\u{2ced}'), ('\u{2cf2}', '\u{2cf2}'), ('\u{a640}', '\u{a640}'), - ('\u{a642}', '\u{a642}'), ('\u{a644}', '\u{a644}'), ('\u{a646}', '\u{a646}'), ('\u{a648}', - '\u{a648}'), ('\u{a64a}', '\u{a64a}'), ('\u{a64c}', '\u{a64c}'), ('\u{a64e}', '\u{a64e}'), - ('\u{a650}', '\u{a650}'), ('\u{a652}', '\u{a652}'), ('\u{a654}', '\u{a654}'), ('\u{a656}', - '\u{a656}'), ('\u{a658}', '\u{a658}'), ('\u{a65a}', '\u{a65a}'), ('\u{a65c}', '\u{a65c}'), - ('\u{a65e}', '\u{a65e}'), ('\u{a660}', '\u{a660}'), ('\u{a662}', '\u{a662}'), ('\u{a664}', - '\u{a664}'), ('\u{a666}', '\u{a666}'), ('\u{a668}', '\u{a668}'), ('\u{a66a}', '\u{a66a}'), - ('\u{a66c}', '\u{a66c}'), ('\u{a680}', '\u{a680}'), ('\u{a682}', '\u{a682}'), ('\u{a684}', - '\u{a684}'), ('\u{a686}', '\u{a686}'), ('\u{a688}', '\u{a688}'), ('\u{a68a}', '\u{a68a}'), - ('\u{a68c}', '\u{a68c}'), ('\u{a68e}', '\u{a68e}'), ('\u{a690}', '\u{a690}'), ('\u{a692}', - '\u{a692}'), ('\u{a694}', '\u{a694}'), ('\u{a696}', '\u{a696}'), ('\u{a698}', '\u{a698}'), - ('\u{a69a}', '\u{a69a}'), ('\u{a722}', '\u{a722}'), ('\u{a724}', '\u{a724}'), ('\u{a726}', - '\u{a726}'), ('\u{a728}', '\u{a728}'), ('\u{a72a}', '\u{a72a}'), ('\u{a72c}', '\u{a72c}'), - ('\u{a72e}', '\u{a72e}'), ('\u{a732}', '\u{a732}'), ('\u{a734}', '\u{a734}'), ('\u{a736}', - '\u{a736}'), ('\u{a738}', '\u{a738}'), ('\u{a73a}', '\u{a73a}'), ('\u{a73c}', '\u{a73c}'), - ('\u{a73e}', '\u{a73e}'), ('\u{a740}', '\u{a740}'), ('\u{a742}', '\u{a742}'), ('\u{a744}', - '\u{a744}'), ('\u{a746}', '\u{a746}'), ('\u{a748}', '\u{a748}'), ('\u{a74a}', '\u{a74a}'), - ('\u{a74c}', '\u{a74c}'), ('\u{a74e}', '\u{a74e}'), ('\u{a750}', '\u{a750}'), ('\u{a752}', - '\u{a752}'), ('\u{a754}', '\u{a754}'), ('\u{a756}', '\u{a756}'), ('\u{a758}', '\u{a758}'), - ('\u{a75a}', '\u{a75a}'), ('\u{a75c}', '\u{a75c}'), ('\u{a75e}', '\u{a75e}'), ('\u{a760}', - '\u{a760}'), ('\u{a762}', '\u{a762}'), ('\u{a764}', '\u{a764}'), ('\u{a766}', '\u{a766}'), - ('\u{a768}', '\u{a768}'), ('\u{a76a}', '\u{a76a}'), ('\u{a76c}', '\u{a76c}'), ('\u{a76e}', - '\u{a76e}'), ('\u{a779}', '\u{a779}'), ('\u{a77b}', '\u{a77b}'), ('\u{a77d}', '\u{a77e}'), - ('\u{a780}', '\u{a780}'), ('\u{a782}', '\u{a782}'), ('\u{a784}', '\u{a784}'), ('\u{a786}', - '\u{a786}'), ('\u{a78b}', '\u{a78b}'), ('\u{a78d}', '\u{a78d}'), ('\u{a790}', '\u{a790}'), - ('\u{a792}', '\u{a792}'), ('\u{a796}', '\u{a796}'), ('\u{a798}', '\u{a798}'), ('\u{a79a}', - '\u{a79a}'), ('\u{a79c}', '\u{a79c}'), ('\u{a79e}', '\u{a79e}'), ('\u{a7a0}', '\u{a7a0}'), - ('\u{a7a2}', '\u{a7a2}'), ('\u{a7a4}', '\u{a7a4}'), ('\u{a7a6}', '\u{a7a6}'), ('\u{a7a8}', - '\u{a7a8}'), ('\u{a7aa}', '\u{a7ad}'), ('\u{a7b0}', '\u{a7b4}'), ('\u{a7b6}', '\u{a7b6}'), - ('\u{ff21}', '\u{ff3a}'), ('\u{10400}', '\u{10427}'), ('\u{10c80}', '\u{10cb2}'), - ('\u{118a0}', '\u{118bf}'), ('\u{1d400}', '\u{1d419}'), ('\u{1d434}', '\u{1d44d}'), - ('\u{1d468}', '\u{1d481}'), ('\u{1d49c}', '\u{1d49c}'), ('\u{1d49e}', '\u{1d49f}'), - ('\u{1d4a2}', '\u{1d4a2}'), ('\u{1d4a5}', '\u{1d4a6}'), ('\u{1d4a9}', '\u{1d4ac}'), - ('\u{1d4ae}', '\u{1d4b5}'), ('\u{1d4d0}', '\u{1d4e9}'), ('\u{1d504}', '\u{1d505}'), - ('\u{1d507}', '\u{1d50a}'), ('\u{1d50d}', '\u{1d514}'), ('\u{1d516}', '\u{1d51c}'), - ('\u{1d538}', '\u{1d539}'), ('\u{1d53b}', '\u{1d53e}'), ('\u{1d540}', '\u{1d544}'), - ('\u{1d546}', '\u{1d546}'), ('\u{1d54a}', '\u{1d550}'), ('\u{1d56c}', '\u{1d585}'), - ('\u{1d5a0}', '\u{1d5b9}'), ('\u{1d5d4}', '\u{1d5ed}'), ('\u{1d608}', '\u{1d621}'), - ('\u{1d63c}', '\u{1d655}'), ('\u{1d670}', '\u{1d689}'), ('\u{1d6a8}', '\u{1d6c0}'), - ('\u{1d6e2}', '\u{1d6fa}'), ('\u{1d71c}', '\u{1d734}'), ('\u{1d756}', '\u{1d76e}'), - ('\u{1d790}', '\u{1d7a8}'), ('\u{1d7ca}', '\u{1d7ca}'), ('\u{1f130}', '\u{1f149}'), - ('\u{1f150}', '\u{1f169}'), ('\u{1f170}', '\u{1f189}') - ]; + pub const Uppercase_table: &'static super::BoolTrie = &super::BoolTrie { + r1: [ + 0x0000000000000000, 0x0000000007fffffe, 0x0000000000000000, 0x000000007f7fffff, + 0xaa55555555555555, 0x2b555555555554aa, 0x11aed2d5b1dbced6, 0x55d255554aaaa490, + 0x6c05555555555555, 0x000000000000557a, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x8045000000000000, 0x00000ffbfffed740, 0xe6905555551c8000, + 0x0000ffffffffffff, 0x5555555500000000, 0x5555555555555401, 0x5555555555552aab, + 0xfffe555555555555, 0x00000000007fffff, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 + ], + r2: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 4, 4, 5, 4, 6, 7, 8, 9, 0, 0, 0, 0, 10, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, + 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 15, 16, 4, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 18, 19, 0, + 20, 21, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 23, 0, 0, 0 + ], + r3: &[ + 0x0000000000000000, 0xffffffff00000000, 0x00000000000020bf, 0x003fffffffffffff, + 0x5555555555555555, 0x5555555540155555, 0xff00ff003f00ff00, 0x0000ff00aa003f00, + 0x0f00000000000000, 0x0f001f000f000f00, 0xc00f3d503e273884, 0x0000ffff00000020, + 0x0000000000000008, 0xffc0000000000000, 0x000000000000ffff, 0x00007fffffffffff, + 0xc025ea9d00000000, 0x0004280555555555, 0x0000155555555555, 0x0000000005555555, + 0x5554555400000000, 0x6a00555555555555, 0x005f7d5555452855, 0x07fffffe00000000 + ], + r4: [ + 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 5, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 + ], + r5: &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 23, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ], + r6: &[ + 0x0000000000000000, 0x000000ffffffffff, 0xffff000000000000, 0x00000000000fffff, + 0x0007ffffffffffff, 0xffffffff00000000, 0xfff0000003ffffff, 0xffffff0000003fff, + 0x003fde64d0000003, 0x000003ffffff0000, 0x7b0000001fdfe7b0, 0xfffff0000001fc5f, + 0x03ffffff0000003f, 0x00003ffffff00000, 0xf0000003ffffff00, 0xffff0000003fffff, + 0xffffff00000003ff, 0x07fffffc00000001, 0x001ffffff0000000, 0x00007fffffc00000, + 0x000001ffffff0000, 0x0000000000000400, 0x00000003ffffffff, 0xffff03ffffff03ff, + 0x00000000000003ff + ], + }; pub fn Uppercase(c: char) -> bool { - super::bsearch_range_table(c, Uppercase_table) + super::trie_lookup_range_table(c, Uppercase_table) } - pub const XID_Continue_table: &'static [(char, char)] = &[ - ('\u{30}', '\u{39}'), ('\u{41}', '\u{5a}'), ('\u{5f}', '\u{5f}'), ('\u{61}', '\u{7a}'), - ('\u{aa}', '\u{aa}'), ('\u{b5}', '\u{b5}'), ('\u{b7}', '\u{b7}'), ('\u{ba}', '\u{ba}'), - ('\u{c0}', '\u{d6}'), ('\u{d8}', '\u{f6}'), ('\u{f8}', '\u{2c1}'), ('\u{2c6}', '\u{2d1}'), - ('\u{2e0}', '\u{2e4}'), ('\u{2ec}', '\u{2ec}'), ('\u{2ee}', '\u{2ee}'), ('\u{300}', - '\u{374}'), ('\u{376}', '\u{377}'), ('\u{37b}', '\u{37d}'), ('\u{37f}', '\u{37f}'), - ('\u{386}', '\u{38a}'), ('\u{38c}', '\u{38c}'), ('\u{38e}', '\u{3a1}'), ('\u{3a3}', - '\u{3f5}'), ('\u{3f7}', '\u{481}'), ('\u{483}', '\u{487}'), ('\u{48a}', '\u{52f}'), - ('\u{531}', '\u{556}'), ('\u{559}', '\u{559}'), ('\u{561}', '\u{587}'), ('\u{591}', - '\u{5bd}'), ('\u{5bf}', '\u{5bf}'), ('\u{5c1}', '\u{5c2}'), ('\u{5c4}', '\u{5c5}'), - ('\u{5c7}', '\u{5c7}'), ('\u{5d0}', '\u{5ea}'), ('\u{5f0}', '\u{5f2}'), ('\u{610}', - '\u{61a}'), ('\u{620}', '\u{669}'), ('\u{66e}', '\u{6d3}'), ('\u{6d5}', '\u{6dc}'), - ('\u{6df}', '\u{6e8}'), ('\u{6ea}', '\u{6fc}'), ('\u{6ff}', '\u{6ff}'), ('\u{710}', - '\u{74a}'), ('\u{74d}', '\u{7b1}'), ('\u{7c0}', '\u{7f5}'), ('\u{7fa}', '\u{7fa}'), - ('\u{800}', '\u{82d}'), ('\u{840}', '\u{85b}'), ('\u{8a0}', '\u{8b4}'), ('\u{8e3}', - '\u{963}'), ('\u{966}', '\u{96f}'), ('\u{971}', '\u{983}'), ('\u{985}', '\u{98c}'), - ('\u{98f}', '\u{990}'), ('\u{993}', '\u{9a8}'), ('\u{9aa}', '\u{9b0}'), ('\u{9b2}', - '\u{9b2}'), ('\u{9b6}', '\u{9b9}'), ('\u{9bc}', '\u{9c4}'), ('\u{9c7}', '\u{9c8}'), - ('\u{9cb}', '\u{9ce}'), ('\u{9d7}', '\u{9d7}'), ('\u{9dc}', '\u{9dd}'), ('\u{9df}', - '\u{9e3}'), ('\u{9e6}', '\u{9f1}'), ('\u{a01}', '\u{a03}'), ('\u{a05}', '\u{a0a}'), - ('\u{a0f}', '\u{a10}'), ('\u{a13}', '\u{a28}'), ('\u{a2a}', '\u{a30}'), ('\u{a32}', - '\u{a33}'), ('\u{a35}', '\u{a36}'), ('\u{a38}', '\u{a39}'), ('\u{a3c}', '\u{a3c}'), - ('\u{a3e}', '\u{a42}'), ('\u{a47}', '\u{a48}'), ('\u{a4b}', '\u{a4d}'), ('\u{a51}', - '\u{a51}'), ('\u{a59}', '\u{a5c}'), ('\u{a5e}', '\u{a5e}'), ('\u{a66}', '\u{a75}'), - ('\u{a81}', '\u{a83}'), ('\u{a85}', '\u{a8d}'), ('\u{a8f}', '\u{a91}'), ('\u{a93}', - '\u{aa8}'), ('\u{aaa}', '\u{ab0}'), ('\u{ab2}', '\u{ab3}'), ('\u{ab5}', '\u{ab9}'), - ('\u{abc}', '\u{ac5}'), ('\u{ac7}', '\u{ac9}'), ('\u{acb}', '\u{acd}'), ('\u{ad0}', - '\u{ad0}'), ('\u{ae0}', '\u{ae3}'), ('\u{ae6}', '\u{aef}'), ('\u{af9}', '\u{af9}'), - ('\u{b01}', '\u{b03}'), ('\u{b05}', '\u{b0c}'), ('\u{b0f}', '\u{b10}'), ('\u{b13}', - '\u{b28}'), ('\u{b2a}', '\u{b30}'), ('\u{b32}', '\u{b33}'), ('\u{b35}', '\u{b39}'), - ('\u{b3c}', '\u{b44}'), ('\u{b47}', '\u{b48}'), ('\u{b4b}', '\u{b4d}'), ('\u{b56}', - '\u{b57}'), ('\u{b5c}', '\u{b5d}'), ('\u{b5f}', '\u{b63}'), ('\u{b66}', '\u{b6f}'), - ('\u{b71}', '\u{b71}'), ('\u{b82}', '\u{b83}'), ('\u{b85}', '\u{b8a}'), ('\u{b8e}', - '\u{b90}'), ('\u{b92}', '\u{b95}'), ('\u{b99}', '\u{b9a}'), ('\u{b9c}', '\u{b9c}'), - ('\u{b9e}', '\u{b9f}'), ('\u{ba3}', '\u{ba4}'), ('\u{ba8}', '\u{baa}'), ('\u{bae}', - '\u{bb9}'), ('\u{bbe}', '\u{bc2}'), ('\u{bc6}', '\u{bc8}'), ('\u{bca}', '\u{bcd}'), - ('\u{bd0}', '\u{bd0}'), ('\u{bd7}', '\u{bd7}'), ('\u{be6}', '\u{bef}'), ('\u{c00}', - '\u{c03}'), ('\u{c05}', '\u{c0c}'), ('\u{c0e}', '\u{c10}'), ('\u{c12}', '\u{c28}'), - ('\u{c2a}', '\u{c39}'), ('\u{c3d}', '\u{c44}'), ('\u{c46}', '\u{c48}'), ('\u{c4a}', - '\u{c4d}'), ('\u{c55}', '\u{c56}'), ('\u{c58}', '\u{c5a}'), ('\u{c60}', '\u{c63}'), - ('\u{c66}', '\u{c6f}'), ('\u{c81}', '\u{c83}'), ('\u{c85}', '\u{c8c}'), ('\u{c8e}', - '\u{c90}'), ('\u{c92}', '\u{ca8}'), ('\u{caa}', '\u{cb3}'), ('\u{cb5}', '\u{cb9}'), - ('\u{cbc}', '\u{cc4}'), ('\u{cc6}', '\u{cc8}'), ('\u{cca}', '\u{ccd}'), ('\u{cd5}', - '\u{cd6}'), ('\u{cde}', '\u{cde}'), ('\u{ce0}', '\u{ce3}'), ('\u{ce6}', '\u{cef}'), - ('\u{cf1}', '\u{cf2}'), ('\u{d01}', '\u{d03}'), ('\u{d05}', '\u{d0c}'), ('\u{d0e}', - '\u{d10}'), ('\u{d12}', '\u{d3a}'), ('\u{d3d}', '\u{d44}'), ('\u{d46}', '\u{d48}'), - ('\u{d4a}', '\u{d4e}'), ('\u{d57}', '\u{d57}'), ('\u{d5f}', '\u{d63}'), ('\u{d66}', - '\u{d6f}'), ('\u{d7a}', '\u{d7f}'), ('\u{d82}', '\u{d83}'), ('\u{d85}', '\u{d96}'), - ('\u{d9a}', '\u{db1}'), ('\u{db3}', '\u{dbb}'), ('\u{dbd}', '\u{dbd}'), ('\u{dc0}', - '\u{dc6}'), ('\u{dca}', '\u{dca}'), ('\u{dcf}', '\u{dd4}'), ('\u{dd6}', '\u{dd6}'), - ('\u{dd8}', '\u{ddf}'), ('\u{de6}', '\u{def}'), ('\u{df2}', '\u{df3}'), ('\u{e01}', - '\u{e3a}'), ('\u{e40}', '\u{e4e}'), ('\u{e50}', '\u{e59}'), ('\u{e81}', '\u{e82}'), - ('\u{e84}', '\u{e84}'), ('\u{e87}', '\u{e88}'), ('\u{e8a}', '\u{e8a}'), ('\u{e8d}', - '\u{e8d}'), ('\u{e94}', '\u{e97}'), ('\u{e99}', '\u{e9f}'), ('\u{ea1}', '\u{ea3}'), - ('\u{ea5}', '\u{ea5}'), ('\u{ea7}', '\u{ea7}'), ('\u{eaa}', '\u{eab}'), ('\u{ead}', - '\u{eb9}'), ('\u{ebb}', '\u{ebd}'), ('\u{ec0}', '\u{ec4}'), ('\u{ec6}', '\u{ec6}'), - ('\u{ec8}', '\u{ecd}'), ('\u{ed0}', '\u{ed9}'), ('\u{edc}', '\u{edf}'), ('\u{f00}', - '\u{f00}'), ('\u{f18}', '\u{f19}'), ('\u{f20}', '\u{f29}'), ('\u{f35}', '\u{f35}'), - ('\u{f37}', '\u{f37}'), ('\u{f39}', '\u{f39}'), ('\u{f3e}', '\u{f47}'), ('\u{f49}', - '\u{f6c}'), ('\u{f71}', '\u{f84}'), ('\u{f86}', '\u{f97}'), ('\u{f99}', '\u{fbc}'), - ('\u{fc6}', '\u{fc6}'), ('\u{1000}', '\u{1049}'), ('\u{1050}', '\u{109d}'), ('\u{10a0}', - '\u{10c5}'), ('\u{10c7}', '\u{10c7}'), ('\u{10cd}', '\u{10cd}'), ('\u{10d0}', '\u{10fa}'), - ('\u{10fc}', '\u{1248}'), ('\u{124a}', '\u{124d}'), ('\u{1250}', '\u{1256}'), ('\u{1258}', - '\u{1258}'), ('\u{125a}', '\u{125d}'), ('\u{1260}', '\u{1288}'), ('\u{128a}', '\u{128d}'), - ('\u{1290}', '\u{12b0}'), ('\u{12b2}', '\u{12b5}'), ('\u{12b8}', '\u{12be}'), ('\u{12c0}', - '\u{12c0}'), ('\u{12c2}', '\u{12c5}'), ('\u{12c8}', '\u{12d6}'), ('\u{12d8}', '\u{1310}'), - ('\u{1312}', '\u{1315}'), ('\u{1318}', '\u{135a}'), ('\u{135d}', '\u{135f}'), ('\u{1369}', - '\u{1371}'), ('\u{1380}', '\u{138f}'), ('\u{13a0}', '\u{13f5}'), ('\u{13f8}', '\u{13fd}'), - ('\u{1401}', '\u{166c}'), ('\u{166f}', '\u{167f}'), ('\u{1681}', '\u{169a}'), ('\u{16a0}', - '\u{16ea}'), ('\u{16ee}', '\u{16f8}'), ('\u{1700}', '\u{170c}'), ('\u{170e}', '\u{1714}'), - ('\u{1720}', '\u{1734}'), ('\u{1740}', '\u{1753}'), ('\u{1760}', '\u{176c}'), ('\u{176e}', - '\u{1770}'), ('\u{1772}', '\u{1773}'), ('\u{1780}', '\u{17d3}'), ('\u{17d7}', '\u{17d7}'), - ('\u{17dc}', '\u{17dd}'), ('\u{17e0}', '\u{17e9}'), ('\u{180b}', '\u{180d}'), ('\u{1810}', - '\u{1819}'), ('\u{1820}', '\u{1877}'), ('\u{1880}', '\u{18aa}'), ('\u{18b0}', '\u{18f5}'), - ('\u{1900}', '\u{191e}'), ('\u{1920}', '\u{192b}'), ('\u{1930}', '\u{193b}'), ('\u{1946}', - '\u{196d}'), ('\u{1970}', '\u{1974}'), ('\u{1980}', '\u{19ab}'), ('\u{19b0}', '\u{19c9}'), - ('\u{19d0}', '\u{19da}'), ('\u{1a00}', '\u{1a1b}'), ('\u{1a20}', '\u{1a5e}'), ('\u{1a60}', - '\u{1a7c}'), ('\u{1a7f}', '\u{1a89}'), ('\u{1a90}', '\u{1a99}'), ('\u{1aa7}', '\u{1aa7}'), - ('\u{1ab0}', '\u{1abd}'), ('\u{1b00}', '\u{1b4b}'), ('\u{1b50}', '\u{1b59}'), ('\u{1b6b}', - '\u{1b73}'), ('\u{1b80}', '\u{1bf3}'), ('\u{1c00}', '\u{1c37}'), ('\u{1c40}', '\u{1c49}'), - ('\u{1c4d}', '\u{1c7d}'), ('\u{1cd0}', '\u{1cd2}'), ('\u{1cd4}', '\u{1cf6}'), ('\u{1cf8}', - '\u{1cf9}'), ('\u{1d00}', '\u{1df5}'), ('\u{1dfc}', '\u{1f15}'), ('\u{1f18}', '\u{1f1d}'), - ('\u{1f20}', '\u{1f45}'), ('\u{1f48}', '\u{1f4d}'), ('\u{1f50}', '\u{1f57}'), ('\u{1f59}', - '\u{1f59}'), ('\u{1f5b}', '\u{1f5b}'), ('\u{1f5d}', '\u{1f5d}'), ('\u{1f5f}', '\u{1f7d}'), - ('\u{1f80}', '\u{1fb4}'), ('\u{1fb6}', '\u{1fbc}'), ('\u{1fbe}', '\u{1fbe}'), ('\u{1fc2}', - '\u{1fc4}'), ('\u{1fc6}', '\u{1fcc}'), ('\u{1fd0}', '\u{1fd3}'), ('\u{1fd6}', '\u{1fdb}'), - ('\u{1fe0}', '\u{1fec}'), ('\u{1ff2}', '\u{1ff4}'), ('\u{1ff6}', '\u{1ffc}'), ('\u{203f}', - '\u{2040}'), ('\u{2054}', '\u{2054}'), ('\u{2071}', '\u{2071}'), ('\u{207f}', '\u{207f}'), - ('\u{2090}', '\u{209c}'), ('\u{20d0}', '\u{20dc}'), ('\u{20e1}', '\u{20e1}'), ('\u{20e5}', - '\u{20f0}'), ('\u{2102}', '\u{2102}'), ('\u{2107}', '\u{2107}'), ('\u{210a}', '\u{2113}'), - ('\u{2115}', '\u{2115}'), ('\u{2118}', '\u{211d}'), ('\u{2124}', '\u{2124}'), ('\u{2126}', - '\u{2126}'), ('\u{2128}', '\u{2128}'), ('\u{212a}', '\u{2139}'), ('\u{213c}', '\u{213f}'), - ('\u{2145}', '\u{2149}'), ('\u{214e}', '\u{214e}'), ('\u{2160}', '\u{2188}'), ('\u{2c00}', - '\u{2c2e}'), ('\u{2c30}', '\u{2c5e}'), ('\u{2c60}', '\u{2ce4}'), ('\u{2ceb}', '\u{2cf3}'), - ('\u{2d00}', '\u{2d25}'), ('\u{2d27}', '\u{2d27}'), ('\u{2d2d}', '\u{2d2d}'), ('\u{2d30}', - '\u{2d67}'), ('\u{2d6f}', '\u{2d6f}'), ('\u{2d7f}', '\u{2d96}'), ('\u{2da0}', '\u{2da6}'), - ('\u{2da8}', '\u{2dae}'), ('\u{2db0}', '\u{2db6}'), ('\u{2db8}', '\u{2dbe}'), ('\u{2dc0}', - '\u{2dc6}'), ('\u{2dc8}', '\u{2dce}'), ('\u{2dd0}', '\u{2dd6}'), ('\u{2dd8}', '\u{2dde}'), - ('\u{2de0}', '\u{2dff}'), ('\u{3005}', '\u{3007}'), ('\u{3021}', '\u{302f}'), ('\u{3031}', - '\u{3035}'), ('\u{3038}', '\u{303c}'), ('\u{3041}', '\u{3096}'), ('\u{3099}', '\u{309a}'), - ('\u{309d}', '\u{309f}'), ('\u{30a1}', '\u{30fa}'), ('\u{30fc}', '\u{30ff}'), ('\u{3105}', - '\u{312d}'), ('\u{3131}', '\u{318e}'), ('\u{31a0}', '\u{31ba}'), ('\u{31f0}', '\u{31ff}'), - ('\u{3400}', '\u{4db5}'), ('\u{4e00}', '\u{9fd5}'), ('\u{a000}', '\u{a48c}'), ('\u{a4d0}', - '\u{a4fd}'), ('\u{a500}', '\u{a60c}'), ('\u{a610}', '\u{a62b}'), ('\u{a640}', '\u{a66f}'), - ('\u{a674}', '\u{a67d}'), ('\u{a67f}', '\u{a6f1}'), ('\u{a717}', '\u{a71f}'), ('\u{a722}', - '\u{a788}'), ('\u{a78b}', '\u{a7ad}'), ('\u{a7b0}', '\u{a7b7}'), ('\u{a7f7}', '\u{a827}'), - ('\u{a840}', '\u{a873}'), ('\u{a880}', '\u{a8c4}'), ('\u{a8d0}', '\u{a8d9}'), ('\u{a8e0}', - '\u{a8f7}'), ('\u{a8fb}', '\u{a8fb}'), ('\u{a8fd}', '\u{a8fd}'), ('\u{a900}', '\u{a92d}'), - ('\u{a930}', '\u{a953}'), ('\u{a960}', '\u{a97c}'), ('\u{a980}', '\u{a9c0}'), ('\u{a9cf}', - '\u{a9d9}'), ('\u{a9e0}', '\u{a9fe}'), ('\u{aa00}', '\u{aa36}'), ('\u{aa40}', '\u{aa4d}'), - ('\u{aa50}', '\u{aa59}'), ('\u{aa60}', '\u{aa76}'), ('\u{aa7a}', '\u{aac2}'), ('\u{aadb}', - '\u{aadd}'), ('\u{aae0}', '\u{aaef}'), ('\u{aaf2}', '\u{aaf6}'), ('\u{ab01}', '\u{ab06}'), - ('\u{ab09}', '\u{ab0e}'), ('\u{ab11}', '\u{ab16}'), ('\u{ab20}', '\u{ab26}'), ('\u{ab28}', - '\u{ab2e}'), ('\u{ab30}', '\u{ab5a}'), ('\u{ab5c}', '\u{ab65}'), ('\u{ab70}', '\u{abea}'), - ('\u{abec}', '\u{abed}'), ('\u{abf0}', '\u{abf9}'), ('\u{ac00}', '\u{d7a3}'), ('\u{d7b0}', - '\u{d7c6}'), ('\u{d7cb}', '\u{d7fb}'), ('\u{f900}', '\u{fa6d}'), ('\u{fa70}', '\u{fad9}'), - ('\u{fb00}', '\u{fb06}'), ('\u{fb13}', '\u{fb17}'), ('\u{fb1d}', '\u{fb28}'), ('\u{fb2a}', - '\u{fb36}'), ('\u{fb38}', '\u{fb3c}'), ('\u{fb3e}', '\u{fb3e}'), ('\u{fb40}', '\u{fb41}'), - ('\u{fb43}', '\u{fb44}'), ('\u{fb46}', '\u{fbb1}'), ('\u{fbd3}', '\u{fc5d}'), ('\u{fc64}', - '\u{fd3d}'), ('\u{fd50}', '\u{fd8f}'), ('\u{fd92}', '\u{fdc7}'), ('\u{fdf0}', '\u{fdf9}'), - ('\u{fe00}', '\u{fe0f}'), ('\u{fe20}', '\u{fe2f}'), ('\u{fe33}', '\u{fe34}'), ('\u{fe4d}', - '\u{fe4f}'), ('\u{fe71}', '\u{fe71}'), ('\u{fe73}', '\u{fe73}'), ('\u{fe77}', '\u{fe77}'), - ('\u{fe79}', '\u{fe79}'), ('\u{fe7b}', '\u{fe7b}'), ('\u{fe7d}', '\u{fe7d}'), ('\u{fe7f}', - '\u{fefc}'), ('\u{ff10}', '\u{ff19}'), ('\u{ff21}', '\u{ff3a}'), ('\u{ff3f}', '\u{ff3f}'), - ('\u{ff41}', '\u{ff5a}'), ('\u{ff66}', '\u{ffbe}'), ('\u{ffc2}', '\u{ffc7}'), ('\u{ffca}', - '\u{ffcf}'), ('\u{ffd2}', '\u{ffd7}'), ('\u{ffda}', '\u{ffdc}'), ('\u{10000}', '\u{1000b}'), - ('\u{1000d}', '\u{10026}'), ('\u{10028}', '\u{1003a}'), ('\u{1003c}', '\u{1003d}'), - ('\u{1003f}', '\u{1004d}'), ('\u{10050}', '\u{1005d}'), ('\u{10080}', '\u{100fa}'), - ('\u{10140}', '\u{10174}'), ('\u{101fd}', '\u{101fd}'), ('\u{10280}', '\u{1029c}'), - ('\u{102a0}', '\u{102d0}'), ('\u{102e0}', '\u{102e0}'), ('\u{10300}', '\u{1031f}'), - ('\u{10330}', '\u{1034a}'), ('\u{10350}', '\u{1037a}'), ('\u{10380}', '\u{1039d}'), - ('\u{103a0}', '\u{103c3}'), ('\u{103c8}', '\u{103cf}'), ('\u{103d1}', '\u{103d5}'), - ('\u{10400}', '\u{1049d}'), ('\u{104a0}', '\u{104a9}'), ('\u{10500}', '\u{10527}'), - ('\u{10530}', '\u{10563}'), ('\u{10600}', '\u{10736}'), ('\u{10740}', '\u{10755}'), - ('\u{10760}', '\u{10767}'), ('\u{10800}', '\u{10805}'), ('\u{10808}', '\u{10808}'), - ('\u{1080a}', '\u{10835}'), ('\u{10837}', '\u{10838}'), ('\u{1083c}', '\u{1083c}'), - ('\u{1083f}', '\u{10855}'), ('\u{10860}', '\u{10876}'), ('\u{10880}', '\u{1089e}'), - ('\u{108e0}', '\u{108f2}'), ('\u{108f4}', '\u{108f5}'), ('\u{10900}', '\u{10915}'), - ('\u{10920}', '\u{10939}'), ('\u{10980}', '\u{109b7}'), ('\u{109be}', '\u{109bf}'), - ('\u{10a00}', '\u{10a03}'), ('\u{10a05}', '\u{10a06}'), ('\u{10a0c}', '\u{10a13}'), - ('\u{10a15}', '\u{10a17}'), ('\u{10a19}', '\u{10a33}'), ('\u{10a38}', '\u{10a3a}'), - ('\u{10a3f}', '\u{10a3f}'), ('\u{10a60}', '\u{10a7c}'), ('\u{10a80}', '\u{10a9c}'), - ('\u{10ac0}', '\u{10ac7}'), ('\u{10ac9}', '\u{10ae6}'), ('\u{10b00}', '\u{10b35}'), - ('\u{10b40}', '\u{10b55}'), ('\u{10b60}', '\u{10b72}'), ('\u{10b80}', '\u{10b91}'), - ('\u{10c00}', '\u{10c48}'), ('\u{10c80}', '\u{10cb2}'), ('\u{10cc0}', '\u{10cf2}'), - ('\u{11000}', '\u{11046}'), ('\u{11066}', '\u{1106f}'), ('\u{1107f}', '\u{110ba}'), - ('\u{110d0}', '\u{110e8}'), ('\u{110f0}', '\u{110f9}'), ('\u{11100}', '\u{11134}'), - ('\u{11136}', '\u{1113f}'), ('\u{11150}', '\u{11173}'), ('\u{11176}', '\u{11176}'), - ('\u{11180}', '\u{111c4}'), ('\u{111ca}', '\u{111cc}'), ('\u{111d0}', '\u{111da}'), - ('\u{111dc}', '\u{111dc}'), ('\u{11200}', '\u{11211}'), ('\u{11213}', '\u{11237}'), - ('\u{11280}', '\u{11286}'), ('\u{11288}', '\u{11288}'), ('\u{1128a}', '\u{1128d}'), - ('\u{1128f}', '\u{1129d}'), ('\u{1129f}', '\u{112a8}'), ('\u{112b0}', '\u{112ea}'), - ('\u{112f0}', '\u{112f9}'), ('\u{11300}', '\u{11303}'), ('\u{11305}', '\u{1130c}'), - ('\u{1130f}', '\u{11310}'), ('\u{11313}', '\u{11328}'), ('\u{1132a}', '\u{11330}'), - ('\u{11332}', '\u{11333}'), ('\u{11335}', '\u{11339}'), ('\u{1133c}', '\u{11344}'), - ('\u{11347}', '\u{11348}'), ('\u{1134b}', '\u{1134d}'), ('\u{11350}', '\u{11350}'), - ('\u{11357}', '\u{11357}'), ('\u{1135d}', '\u{11363}'), ('\u{11366}', '\u{1136c}'), - ('\u{11370}', '\u{11374}'), ('\u{11480}', '\u{114c5}'), ('\u{114c7}', '\u{114c7}'), - ('\u{114d0}', '\u{114d9}'), ('\u{11580}', '\u{115b5}'), ('\u{115b8}', '\u{115c0}'), - ('\u{115d8}', '\u{115dd}'), ('\u{11600}', '\u{11640}'), ('\u{11644}', '\u{11644}'), - ('\u{11650}', '\u{11659}'), ('\u{11680}', '\u{116b7}'), ('\u{116c0}', '\u{116c9}'), - ('\u{11700}', '\u{11719}'), ('\u{1171d}', '\u{1172b}'), ('\u{11730}', '\u{11739}'), - ('\u{118a0}', '\u{118e9}'), ('\u{118ff}', '\u{118ff}'), ('\u{11ac0}', '\u{11af8}'), - ('\u{12000}', '\u{12399}'), ('\u{12400}', '\u{1246e}'), ('\u{12480}', '\u{12543}'), - ('\u{13000}', '\u{1342e}'), ('\u{14400}', '\u{14646}'), ('\u{16800}', '\u{16a38}'), - ('\u{16a40}', '\u{16a5e}'), ('\u{16a60}', '\u{16a69}'), ('\u{16ad0}', '\u{16aed}'), - ('\u{16af0}', '\u{16af4}'), ('\u{16b00}', '\u{16b36}'), ('\u{16b40}', '\u{16b43}'), - ('\u{16b50}', '\u{16b59}'), ('\u{16b63}', '\u{16b77}'), ('\u{16b7d}', '\u{16b8f}'), - ('\u{16f00}', '\u{16f44}'), ('\u{16f50}', '\u{16f7e}'), ('\u{16f8f}', '\u{16f9f}'), - ('\u{1b000}', '\u{1b001}'), ('\u{1bc00}', '\u{1bc6a}'), ('\u{1bc70}', '\u{1bc7c}'), - ('\u{1bc80}', '\u{1bc88}'), ('\u{1bc90}', '\u{1bc99}'), ('\u{1bc9d}', '\u{1bc9e}'), - ('\u{1d165}', '\u{1d169}'), ('\u{1d16d}', '\u{1d172}'), ('\u{1d17b}', '\u{1d182}'), - ('\u{1d185}', '\u{1d18b}'), ('\u{1d1aa}', '\u{1d1ad}'), ('\u{1d242}', '\u{1d244}'), - ('\u{1d400}', '\u{1d454}'), ('\u{1d456}', '\u{1d49c}'), ('\u{1d49e}', '\u{1d49f}'), - ('\u{1d4a2}', '\u{1d4a2}'), ('\u{1d4a5}', '\u{1d4a6}'), ('\u{1d4a9}', '\u{1d4ac}'), - ('\u{1d4ae}', '\u{1d4b9}'), ('\u{1d4bb}', '\u{1d4bb}'), ('\u{1d4bd}', '\u{1d4c3}'), - ('\u{1d4c5}', '\u{1d505}'), ('\u{1d507}', '\u{1d50a}'), ('\u{1d50d}', '\u{1d514}'), - ('\u{1d516}', '\u{1d51c}'), ('\u{1d51e}', '\u{1d539}'), ('\u{1d53b}', '\u{1d53e}'), - ('\u{1d540}', '\u{1d544}'), ('\u{1d546}', '\u{1d546}'), ('\u{1d54a}', '\u{1d550}'), - ('\u{1d552}', '\u{1d6a5}'), ('\u{1d6a8}', '\u{1d6c0}'), ('\u{1d6c2}', '\u{1d6da}'), - ('\u{1d6dc}', '\u{1d6fa}'), ('\u{1d6fc}', '\u{1d714}'), ('\u{1d716}', '\u{1d734}'), - ('\u{1d736}', '\u{1d74e}'), ('\u{1d750}', '\u{1d76e}'), ('\u{1d770}', '\u{1d788}'), - ('\u{1d78a}', '\u{1d7a8}'), ('\u{1d7aa}', '\u{1d7c2}'), ('\u{1d7c4}', '\u{1d7cb}'), - ('\u{1d7ce}', '\u{1d7ff}'), ('\u{1da00}', '\u{1da36}'), ('\u{1da3b}', '\u{1da6c}'), - ('\u{1da75}', '\u{1da75}'), ('\u{1da84}', '\u{1da84}'), ('\u{1da9b}', '\u{1da9f}'), - ('\u{1daa1}', '\u{1daaf}'), ('\u{1e800}', '\u{1e8c4}'), ('\u{1e8d0}', '\u{1e8d6}'), - ('\u{1ee00}', '\u{1ee03}'), ('\u{1ee05}', '\u{1ee1f}'), ('\u{1ee21}', '\u{1ee22}'), - ('\u{1ee24}', '\u{1ee24}'), ('\u{1ee27}', '\u{1ee27}'), ('\u{1ee29}', '\u{1ee32}'), - ('\u{1ee34}', '\u{1ee37}'), ('\u{1ee39}', '\u{1ee39}'), ('\u{1ee3b}', '\u{1ee3b}'), - ('\u{1ee42}', '\u{1ee42}'), ('\u{1ee47}', '\u{1ee47}'), ('\u{1ee49}', '\u{1ee49}'), - ('\u{1ee4b}', '\u{1ee4b}'), ('\u{1ee4d}', '\u{1ee4f}'), ('\u{1ee51}', '\u{1ee52}'), - ('\u{1ee54}', '\u{1ee54}'), ('\u{1ee57}', '\u{1ee57}'), ('\u{1ee59}', '\u{1ee59}'), - ('\u{1ee5b}', '\u{1ee5b}'), ('\u{1ee5d}', '\u{1ee5d}'), ('\u{1ee5f}', '\u{1ee5f}'), - ('\u{1ee61}', '\u{1ee62}'), ('\u{1ee64}', '\u{1ee64}'), ('\u{1ee67}', '\u{1ee6a}'), - ('\u{1ee6c}', '\u{1ee72}'), ('\u{1ee74}', '\u{1ee77}'), ('\u{1ee79}', '\u{1ee7c}'), - ('\u{1ee7e}', '\u{1ee7e}'), ('\u{1ee80}', '\u{1ee89}'), ('\u{1ee8b}', '\u{1ee9b}'), - ('\u{1eea1}', '\u{1eea3}'), ('\u{1eea5}', '\u{1eea9}'), ('\u{1eeab}', '\u{1eebb}'), - ('\u{20000}', '\u{2a6d6}'), ('\u{2a700}', '\u{2b734}'), ('\u{2b740}', '\u{2b81d}'), - ('\u{2b820}', '\u{2cea1}'), ('\u{2f800}', '\u{2fa1d}'), ('\u{e0100}', '\u{e01ef}') - ]; + pub const XID_Continue_table: &'static super::BoolTrie = &super::BoolTrie { + r1: [ + 0x03ff000000000000, 0x07fffffe87fffffe, 0x04a0040000000000, 0xff7fffffff7fffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x0000501f0003ffc3, + 0xffffffffffffffff, 0xb8dfffffffffffff, 0xfffffffbffffd7c0, 0xffbfffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xfffffffffffffcfb, 0xffffffffffffffff, + 0xfffeffffffffffff, 0xfffffffe027fffff, 0xbffffffffffe00ff, 0x000707ffffff00b6, + 0xffffffff07ff0000, 0xffffc3ffffffffff, 0xffffffffffffffff, 0x9ffffdff9fefffff, + 0xffffffffffff0000, 0xffffffffffffe7ff, 0x0003ffffffffffff, 0x043fffffffffffff + ], + r2: [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 4, 32, 33, 34, 4, 4, 4, 4, 4, 35, 36, 37, 38, 39, 40, + 41, 42, 4, 4, 4, 4, 4, 4, 4, 4, 43, 44, 45, 46, 47, 4, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 4, 61, 4, 62, 50, 63, 64, 65, 4, 4, 4, 66, 4, 4, 4, 4, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 64, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 60, 60, 60, 60, 77, 78, 4, 79, 80, 81, 82, 83, 60, 60, 60, 60, 60, 60, 60, 60, 84, + 42, 85, 86, 87, 4, 88, 89, 60, 60, 60, 60, 60, 60, 60, 60, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 52, 60, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 90, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 91, 92, 4, 4, 4, 4, 93, 94, 4, 95, 96, 4, 97, 98, 99, 62, 4, 100, 101, + 102, 4, 103, 104, 105, 4, 106, 107, 108, 4, 109, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 110, 111, 60, 60, 60, 60, 60, 60, 60, + 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 4, 4, 4, 4, 4, 101, 4, 112, + 113, 114, 95, 115, 4, 116, 4, 4, 117, 118, 119, 120, 121, 122, 4, 123, 124, 125, 126, + 127 + ], + r3: &[ + 0x00003fffffffffff, 0x000000000fffffff, 0x3fdfffff00000000, 0xfffffffbfff00000, + 0xffffffffffffffff, 0xfffeffcfffffffff, 0xf3c5fdfffff99fef, 0x0003ffcfb080799f, + 0xd36dfdfffff987ee, 0x003fffc05e023987, 0xf3edfdfffffbbfee, 0x0200ffcf00013bbf, + 0xf3edfdfffff99fee, 0x0002ffcfb0c0399f, 0xc3ffc718d63dc7ec, 0x0000ffc000813dc7, + 0xe3fffdfffffddfef, 0x0000ffcf07603ddf, 0xf3effdfffffddfef, 0x0006ffcf40603ddf, + 0xe7fffffffffddfee, 0xfc00ffcf80f07ddf, 0x2ffbfffffc7fffec, 0x000cffc0ff5f847f, + 0x07fffffffffffffe, 0x0000000003ff7fff, 0x3bffecaefef02596, 0x00000000f3ff3f5f, + 0xc2a003ff03000001, 0xfffe1ffffffffeff, 0x1ffffffffeffffdf, 0x0000000000000040, + 0xffffffffffff03ff, 0xffffffff3fffffff, 0xf7ffffffffff20bf, 0xffffffff3d7f3dff, + 0x7f3dffffffff3dff, 0xffffffffff7fff3d, 0xffffffffff3dffff, 0x0003fe00e7ffffff, + 0xffffffff0000ffff, 0x3f3fffffffffffff, 0xfffffffffffffffe, 0xffff9fffffffffff, + 0xffffffff07fffffe, 0x01ffc7ffffffffff, 0x001fffff001fdfff, 0x000ddfff000fffff, + 0x000003ff308fffff, 0xffffffff03ff3800, 0x00ffffffffffffff, 0xffff07ffffffffff, + 0x003fffffffffffff, 0x0fff0fff7fffffff, 0x001f3fffffffffc0, 0xffff0fffffffffff, + 0x0000000007ff03ff, 0xffffffff0fffffff, 0x9fffffff7fffffff, 0x3fff008003ff03ff, + 0x0000000000000000, 0x000ff80003ff0fff, 0x000fffffffffffff, 0x3fffffffffffe3ff, + 0x00000000000001ff, 0x037ffffffff70000, 0xf83fffffffffffff, 0xffffffff3f3fffff, + 0x3fffffffaaff3f3f, 0x5fdfffffffffffff, 0x1fdc1fff0fcf1fdc, 0x8000000000000000, + 0x8002000000100001, 0x000000001fff0000, 0x0001ffe21fff0000, 0xf3fffd503f2ffc84, + 0xffffffff000043e0, 0xffff7fffffffffff, 0xffffffff7fffffff, 0x000ff81fffffffff, + 0xffff20bfffffffff, 0x800080ffffffffff, 0x7f7f7f7f007fffff, 0xffffffff7f7f7f7f, + 0x1f3efffe000000e0, 0xfffffffee67fffff, 0xf7ffffffffffffff, 0xfffe3fffffffffe0, + 0x07ffffff00007fff, 0xffff000000000000, 0x00000000003fffff, 0x0000000000001fff, + 0x3fffffffffff0000, 0x00000fffffff1fff, 0xbff0ffffffffffff, 0x0003ffffffffffff, + 0xfffffffcff800000, 0x00ff7ffffffff9ff, 0xff80000000000000, 0x000000ffffffffff, + 0x28ffffff03ff003f, 0xffff3fffffffffff, 0x1fffffff000fffff, 0x7fffffff03ff8001, + 0x007fffffffffffff, 0xfc7fffff03ff3fff, 0x007cffff38000007, 0xffff7f7f007e7e7e, + 0xffff003ff7ffffff, 0x03ff37ffffffffff, 0xffff000fffffffff, 0x0ffffffffffff87f, + 0x0000000003ffffff, 0x5f7ffdffe0f8007f, 0xffffffffffffffdb, 0xfffffffffff80000, + 0xfffffff03fffffff, 0x3fffffffffffffff, 0xffffffffffff0000, 0xfffffffffffcffff, + 0x03ff0000000000ff, 0x0018ffff0000ffff, 0xaa8a00000000e000, 0x1fffffffffffffff, + 0x87fffffe03ff0000, 0xffffffc007fffffe, 0x7fffffffffffffff, 0x000000001cfcfcfc + ], + r4: [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 5, 5, 9, 5, 10, 11, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 12, 13, + 14, 5, 5, 15, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 16, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 + ], + r5: &[ + 0, 1, 2, 3, 4, 5, 4, 6, 4, 4, 7, 8, 9, 10, 11, 12, 2, 2, 13, 14, 15, 16, 4, 4, 2, 2, 2, + 2, 17, 18, 4, 4, 19, 20, 21, 22, 23, 4, 24, 4, 25, 26, 27, 28, 29, 30, 31, 4, 2, 32, 33, + 33, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 34, 3, 35, 36, 37, 2, 38, 39, 4, 40, 41, 42, + 43, 4, 4, 2, 44, 2, 45, 4, 4, 46, 47, 2, 48, 49, 50, 51, 4, 4, 4, 4, 4, 52, 53, 4, 4, 4, + 4, 4, 4, 4, 54, 4, 4, 4, 4, 55, 56, 57, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 58, 4, 2, 59, 2, 2, 2, 60, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 59, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 61, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, + 2, 2, 2, 2, 54, 62, 4, 63, 17, 64, 65, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 66, 67, + 68, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 69, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 33, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 70, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 2, 71, 72, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 73, 74, 4, 4, + 75, 4, 4, 4, 4, 4, 4, 2, 76, 77, 78, 79, 80, 2, 2, 2, 2, 81, 82, 83, 84, 85, 86, 4, 4, + 4, 4, 4, 4, 4, 4, 87, 88, 89, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 90, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 2, 2, 2, 91, 2, 44, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 92, 93, 94, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 95, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 5, 2, 2, 2, 11, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 96, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 2, 2, 2, 2, 2, 2, 2, 2, 97, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 98, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 + ], + r6: &[ + 0xb7ffff7fffffefff, 0x000000003fff3fff, 0xffffffffffffffff, 0x07ffffffffffffff, + 0x0000000000000000, 0x001fffffffffffff, 0x2000000000000000, 0xffffffff1fffffff, + 0x000000010001ffff, 0xffff0000ffffffff, 0x07ffffffffff07ff, 0xffffffff3fffffff, + 0x00000000003eff0f, 0xffff03ff3fffffff, 0x0fffffffff0fffff, 0xffff00ffffffffff, + 0x0000000fffffffff, 0x007fffffffffffff, 0x000000ff003fffff, 0x91bffffffffffd3f, + 0x007fffff003fffff, 0x000000007fffffff, 0x0037ffff00000000, 0x03ffffff003fffff, + 0xc0ffffffffffffff, 0x870ffffffeeff06f, 0x1fffffff00000000, 0x000000001fffffff, + 0x0000007ffffffeff, 0x003fffffffffffff, 0x0007ffff003fffff, 0x000000000003ffff, + 0x00000000000001ff, 0x0007ffffffffffff, 0x8000ffc00000007f, 0x03ff01ffffff0000, + 0xffdfffffffffffff, 0x004fffffffff0000, 0x0000000017ff1c1f, 0x40fffffffffbffff, + 0xffff01ffbfffbd7f, 0x03ff07ffffffffff, 0xf3edfdfffff99fef, 0x001f1fcfe081399f, + 0x0000000003ff07ff, 0x0000000003ff00bf, 0xff3fffffffffffff, 0x000000003f000001, + 0x0000000003ff0011, 0x00ffffffffffffff, 0x00000000000003ff, 0x03ff0fffe3ffffff, + 0xffffffff00000000, 0x800003ffffffffff, 0x01ffffffffffffff, 0xff7ffffffffffdff, + 0xfffc000003ff0001, 0x007ffefffffcffff, 0x0000000003ffffff, 0x00007fffffffffff, + 0x000000000000000f, 0x000000000000007f, 0x000003ff7fffffff, 0x001f3fffffff0000, + 0xe0fffff803ff000f, 0x000000000000ffff, 0x7fffffffffff001f, 0x00000000ffff8000, + 0x0000000100000000, 0x00001fffffffffff, 0x0000000000000003, 0x1fff07ffffffffff, + 0x0000000063ff01ff, 0xf807e3e000000000, 0x00003c0000000fe7, 0x000000000000001c, + 0xffffffffffdfffff, 0xebffde64dfffffff, 0xffffffffffffffef, 0x7bffffffdfdfe7bf, + 0xfffffffffffdfc5f, 0xffffff3fffffffff, 0xf7fffffff7fffffd, 0xffdfffffffdfffff, + 0xffff7fffffff7fff, 0xfffffdfffffffdff, 0xffffffffffffcff7, 0xf87fffffffffffff, + 0x00201fffffffffff, 0x0000fffef8000010, 0x000007dbf9ffff7f, 0x00000000007f001f, + 0x0af7fe96ffffffef, 0x5ef7f796aa96ea84, 0x0ffffbee0ffffbff, 0x00000000007fffff, + 0x00000003ffffffff, 0x000000003fffffff, 0x0000ffffffffffff + ], + }; pub fn XID_Continue(c: char) -> bool { - super::bsearch_range_table(c, XID_Continue_table) + super::trie_lookup_range_table(c, XID_Continue_table) } - pub const XID_Start_table: &'static [(char, char)] = &[ - ('\u{41}', '\u{5a}'), ('\u{61}', '\u{7a}'), ('\u{aa}', '\u{aa}'), ('\u{b5}', '\u{b5}'), - ('\u{ba}', '\u{ba}'), ('\u{c0}', '\u{d6}'), ('\u{d8}', '\u{f6}'), ('\u{f8}', '\u{2c1}'), - ('\u{2c6}', '\u{2d1}'), ('\u{2e0}', '\u{2e4}'), ('\u{2ec}', '\u{2ec}'), ('\u{2ee}', - '\u{2ee}'), ('\u{370}', '\u{374}'), ('\u{376}', '\u{377}'), ('\u{37b}', '\u{37d}'), - ('\u{37f}', '\u{37f}'), ('\u{386}', '\u{386}'), ('\u{388}', '\u{38a}'), ('\u{38c}', - '\u{38c}'), ('\u{38e}', '\u{3a1}'), ('\u{3a3}', '\u{3f5}'), ('\u{3f7}', '\u{481}'), - ('\u{48a}', '\u{52f}'), ('\u{531}', '\u{556}'), ('\u{559}', '\u{559}'), ('\u{561}', - '\u{587}'), ('\u{5d0}', '\u{5ea}'), ('\u{5f0}', '\u{5f2}'), ('\u{620}', '\u{64a}'), - ('\u{66e}', '\u{66f}'), ('\u{671}', '\u{6d3}'), ('\u{6d5}', '\u{6d5}'), ('\u{6e5}', - '\u{6e6}'), ('\u{6ee}', '\u{6ef}'), ('\u{6fa}', '\u{6fc}'), ('\u{6ff}', '\u{6ff}'), - ('\u{710}', '\u{710}'), ('\u{712}', '\u{72f}'), ('\u{74d}', '\u{7a5}'), ('\u{7b1}', - '\u{7b1}'), ('\u{7ca}', '\u{7ea}'), ('\u{7f4}', '\u{7f5}'), ('\u{7fa}', '\u{7fa}'), - ('\u{800}', '\u{815}'), ('\u{81a}', '\u{81a}'), ('\u{824}', '\u{824}'), ('\u{828}', - '\u{828}'), ('\u{840}', '\u{858}'), ('\u{8a0}', '\u{8b4}'), ('\u{904}', '\u{939}'), - ('\u{93d}', '\u{93d}'), ('\u{950}', '\u{950}'), ('\u{958}', '\u{961}'), ('\u{971}', - '\u{980}'), ('\u{985}', '\u{98c}'), ('\u{98f}', '\u{990}'), ('\u{993}', '\u{9a8}'), - ('\u{9aa}', '\u{9b0}'), ('\u{9b2}', '\u{9b2}'), ('\u{9b6}', '\u{9b9}'), ('\u{9bd}', - '\u{9bd}'), ('\u{9ce}', '\u{9ce}'), ('\u{9dc}', '\u{9dd}'), ('\u{9df}', '\u{9e1}'), - ('\u{9f0}', '\u{9f1}'), ('\u{a05}', '\u{a0a}'), ('\u{a0f}', '\u{a10}'), ('\u{a13}', - '\u{a28}'), ('\u{a2a}', '\u{a30}'), ('\u{a32}', '\u{a33}'), ('\u{a35}', '\u{a36}'), - ('\u{a38}', '\u{a39}'), ('\u{a59}', '\u{a5c}'), ('\u{a5e}', '\u{a5e}'), ('\u{a72}', - '\u{a74}'), ('\u{a85}', '\u{a8d}'), ('\u{a8f}', '\u{a91}'), ('\u{a93}', '\u{aa8}'), - ('\u{aaa}', '\u{ab0}'), ('\u{ab2}', '\u{ab3}'), ('\u{ab5}', '\u{ab9}'), ('\u{abd}', - '\u{abd}'), ('\u{ad0}', '\u{ad0}'), ('\u{ae0}', '\u{ae1}'), ('\u{af9}', '\u{af9}'), - ('\u{b05}', '\u{b0c}'), ('\u{b0f}', '\u{b10}'), ('\u{b13}', '\u{b28}'), ('\u{b2a}', - '\u{b30}'), ('\u{b32}', '\u{b33}'), ('\u{b35}', '\u{b39}'), ('\u{b3d}', '\u{b3d}'), - ('\u{b5c}', '\u{b5d}'), ('\u{b5f}', '\u{b61}'), ('\u{b71}', '\u{b71}'), ('\u{b83}', - '\u{b83}'), ('\u{b85}', '\u{b8a}'), ('\u{b8e}', '\u{b90}'), ('\u{b92}', '\u{b95}'), - ('\u{b99}', '\u{b9a}'), ('\u{b9c}', '\u{b9c}'), ('\u{b9e}', '\u{b9f}'), ('\u{ba3}', - '\u{ba4}'), ('\u{ba8}', '\u{baa}'), ('\u{bae}', '\u{bb9}'), ('\u{bd0}', '\u{bd0}'), - ('\u{c05}', '\u{c0c}'), ('\u{c0e}', '\u{c10}'), ('\u{c12}', '\u{c28}'), ('\u{c2a}', - '\u{c39}'), ('\u{c3d}', '\u{c3d}'), ('\u{c58}', '\u{c5a}'), ('\u{c60}', '\u{c61}'), - ('\u{c85}', '\u{c8c}'), ('\u{c8e}', '\u{c90}'), ('\u{c92}', '\u{ca8}'), ('\u{caa}', - '\u{cb3}'), ('\u{cb5}', '\u{cb9}'), ('\u{cbd}', '\u{cbd}'), ('\u{cde}', '\u{cde}'), - ('\u{ce0}', '\u{ce1}'), ('\u{cf1}', '\u{cf2}'), ('\u{d05}', '\u{d0c}'), ('\u{d0e}', - '\u{d10}'), ('\u{d12}', '\u{d3a}'), ('\u{d3d}', '\u{d3d}'), ('\u{d4e}', '\u{d4e}'), - ('\u{d5f}', '\u{d61}'), ('\u{d7a}', '\u{d7f}'), ('\u{d85}', '\u{d96}'), ('\u{d9a}', - '\u{db1}'), ('\u{db3}', '\u{dbb}'), ('\u{dbd}', '\u{dbd}'), ('\u{dc0}', '\u{dc6}'), - ('\u{e01}', '\u{e30}'), ('\u{e32}', '\u{e32}'), ('\u{e40}', '\u{e46}'), ('\u{e81}', - '\u{e82}'), ('\u{e84}', '\u{e84}'), ('\u{e87}', '\u{e88}'), ('\u{e8a}', '\u{e8a}'), - ('\u{e8d}', '\u{e8d}'), ('\u{e94}', '\u{e97}'), ('\u{e99}', '\u{e9f}'), ('\u{ea1}', - '\u{ea3}'), ('\u{ea5}', '\u{ea5}'), ('\u{ea7}', '\u{ea7}'), ('\u{eaa}', '\u{eab}'), - ('\u{ead}', '\u{eb0}'), ('\u{eb2}', '\u{eb2}'), ('\u{ebd}', '\u{ebd}'), ('\u{ec0}', - '\u{ec4}'), ('\u{ec6}', '\u{ec6}'), ('\u{edc}', '\u{edf}'), ('\u{f00}', '\u{f00}'), - ('\u{f40}', '\u{f47}'), ('\u{f49}', '\u{f6c}'), ('\u{f88}', '\u{f8c}'), ('\u{1000}', - '\u{102a}'), ('\u{103f}', '\u{103f}'), ('\u{1050}', '\u{1055}'), ('\u{105a}', '\u{105d}'), - ('\u{1061}', '\u{1061}'), ('\u{1065}', '\u{1066}'), ('\u{106e}', '\u{1070}'), ('\u{1075}', - '\u{1081}'), ('\u{108e}', '\u{108e}'), ('\u{10a0}', '\u{10c5}'), ('\u{10c7}', '\u{10c7}'), - ('\u{10cd}', '\u{10cd}'), ('\u{10d0}', '\u{10fa}'), ('\u{10fc}', '\u{1248}'), ('\u{124a}', - '\u{124d}'), ('\u{1250}', '\u{1256}'), ('\u{1258}', '\u{1258}'), ('\u{125a}', '\u{125d}'), - ('\u{1260}', '\u{1288}'), ('\u{128a}', '\u{128d}'), ('\u{1290}', '\u{12b0}'), ('\u{12b2}', - '\u{12b5}'), ('\u{12b8}', '\u{12be}'), ('\u{12c0}', '\u{12c0}'), ('\u{12c2}', '\u{12c5}'), - ('\u{12c8}', '\u{12d6}'), ('\u{12d8}', '\u{1310}'), ('\u{1312}', '\u{1315}'), ('\u{1318}', - '\u{135a}'), ('\u{1380}', '\u{138f}'), ('\u{13a0}', '\u{13f5}'), ('\u{13f8}', '\u{13fd}'), - ('\u{1401}', '\u{166c}'), ('\u{166f}', '\u{167f}'), ('\u{1681}', '\u{169a}'), ('\u{16a0}', - '\u{16ea}'), ('\u{16ee}', '\u{16f8}'), ('\u{1700}', '\u{170c}'), ('\u{170e}', '\u{1711}'), - ('\u{1720}', '\u{1731}'), ('\u{1740}', '\u{1751}'), ('\u{1760}', '\u{176c}'), ('\u{176e}', - '\u{1770}'), ('\u{1780}', '\u{17b3}'), ('\u{17d7}', '\u{17d7}'), ('\u{17dc}', '\u{17dc}'), - ('\u{1820}', '\u{1877}'), ('\u{1880}', '\u{18a8}'), ('\u{18aa}', '\u{18aa}'), ('\u{18b0}', - '\u{18f5}'), ('\u{1900}', '\u{191e}'), ('\u{1950}', '\u{196d}'), ('\u{1970}', '\u{1974}'), - ('\u{1980}', '\u{19ab}'), ('\u{19b0}', '\u{19c9}'), ('\u{1a00}', '\u{1a16}'), ('\u{1a20}', - '\u{1a54}'), ('\u{1aa7}', '\u{1aa7}'), ('\u{1b05}', '\u{1b33}'), ('\u{1b45}', '\u{1b4b}'), - ('\u{1b83}', '\u{1ba0}'), ('\u{1bae}', '\u{1baf}'), ('\u{1bba}', '\u{1be5}'), ('\u{1c00}', - '\u{1c23}'), ('\u{1c4d}', '\u{1c4f}'), ('\u{1c5a}', '\u{1c7d}'), ('\u{1ce9}', '\u{1cec}'), - ('\u{1cee}', '\u{1cf1}'), ('\u{1cf5}', '\u{1cf6}'), ('\u{1d00}', '\u{1dbf}'), ('\u{1e00}', - '\u{1f15}'), ('\u{1f18}', '\u{1f1d}'), ('\u{1f20}', '\u{1f45}'), ('\u{1f48}', '\u{1f4d}'), - ('\u{1f50}', '\u{1f57}'), ('\u{1f59}', '\u{1f59}'), ('\u{1f5b}', '\u{1f5b}'), ('\u{1f5d}', - '\u{1f5d}'), ('\u{1f5f}', '\u{1f7d}'), ('\u{1f80}', '\u{1fb4}'), ('\u{1fb6}', '\u{1fbc}'), - ('\u{1fbe}', '\u{1fbe}'), ('\u{1fc2}', '\u{1fc4}'), ('\u{1fc6}', '\u{1fcc}'), ('\u{1fd0}', - '\u{1fd3}'), ('\u{1fd6}', '\u{1fdb}'), ('\u{1fe0}', '\u{1fec}'), ('\u{1ff2}', '\u{1ff4}'), - ('\u{1ff6}', '\u{1ffc}'), ('\u{2071}', '\u{2071}'), ('\u{207f}', '\u{207f}'), ('\u{2090}', - '\u{209c}'), ('\u{2102}', '\u{2102}'), ('\u{2107}', '\u{2107}'), ('\u{210a}', '\u{2113}'), - ('\u{2115}', '\u{2115}'), ('\u{2118}', '\u{211d}'), ('\u{2124}', '\u{2124}'), ('\u{2126}', - '\u{2126}'), ('\u{2128}', '\u{2128}'), ('\u{212a}', '\u{2139}'), ('\u{213c}', '\u{213f}'), - ('\u{2145}', '\u{2149}'), ('\u{214e}', '\u{214e}'), ('\u{2160}', '\u{2188}'), ('\u{2c00}', - '\u{2c2e}'), ('\u{2c30}', '\u{2c5e}'), ('\u{2c60}', '\u{2ce4}'), ('\u{2ceb}', '\u{2cee}'), - ('\u{2cf2}', '\u{2cf3}'), ('\u{2d00}', '\u{2d25}'), ('\u{2d27}', '\u{2d27}'), ('\u{2d2d}', - '\u{2d2d}'), ('\u{2d30}', '\u{2d67}'), ('\u{2d6f}', '\u{2d6f}'), ('\u{2d80}', '\u{2d96}'), - ('\u{2da0}', '\u{2da6}'), ('\u{2da8}', '\u{2dae}'), ('\u{2db0}', '\u{2db6}'), ('\u{2db8}', - '\u{2dbe}'), ('\u{2dc0}', '\u{2dc6}'), ('\u{2dc8}', '\u{2dce}'), ('\u{2dd0}', '\u{2dd6}'), - ('\u{2dd8}', '\u{2dde}'), ('\u{3005}', '\u{3007}'), ('\u{3021}', '\u{3029}'), ('\u{3031}', - '\u{3035}'), ('\u{3038}', '\u{303c}'), ('\u{3041}', '\u{3096}'), ('\u{309d}', '\u{309f}'), - ('\u{30a1}', '\u{30fa}'), ('\u{30fc}', '\u{30ff}'), ('\u{3105}', '\u{312d}'), ('\u{3131}', - '\u{318e}'), ('\u{31a0}', '\u{31ba}'), ('\u{31f0}', '\u{31ff}'), ('\u{3400}', '\u{4db5}'), - ('\u{4e00}', '\u{9fd5}'), ('\u{a000}', '\u{a48c}'), ('\u{a4d0}', '\u{a4fd}'), ('\u{a500}', - '\u{a60c}'), ('\u{a610}', '\u{a61f}'), ('\u{a62a}', '\u{a62b}'), ('\u{a640}', '\u{a66e}'), - ('\u{a67f}', '\u{a69d}'), ('\u{a6a0}', '\u{a6ef}'), ('\u{a717}', '\u{a71f}'), ('\u{a722}', - '\u{a788}'), ('\u{a78b}', '\u{a7ad}'), ('\u{a7b0}', '\u{a7b7}'), ('\u{a7f7}', '\u{a801}'), - ('\u{a803}', '\u{a805}'), ('\u{a807}', '\u{a80a}'), ('\u{a80c}', '\u{a822}'), ('\u{a840}', - '\u{a873}'), ('\u{a882}', '\u{a8b3}'), ('\u{a8f2}', '\u{a8f7}'), ('\u{a8fb}', '\u{a8fb}'), - ('\u{a8fd}', '\u{a8fd}'), ('\u{a90a}', '\u{a925}'), ('\u{a930}', '\u{a946}'), ('\u{a960}', - '\u{a97c}'), ('\u{a984}', '\u{a9b2}'), ('\u{a9cf}', '\u{a9cf}'), ('\u{a9e0}', '\u{a9e4}'), - ('\u{a9e6}', '\u{a9ef}'), ('\u{a9fa}', '\u{a9fe}'), ('\u{aa00}', '\u{aa28}'), ('\u{aa40}', - '\u{aa42}'), ('\u{aa44}', '\u{aa4b}'), ('\u{aa60}', '\u{aa76}'), ('\u{aa7a}', '\u{aa7a}'), - ('\u{aa7e}', '\u{aaaf}'), ('\u{aab1}', '\u{aab1}'), ('\u{aab5}', '\u{aab6}'), ('\u{aab9}', - '\u{aabd}'), ('\u{aac0}', '\u{aac0}'), ('\u{aac2}', '\u{aac2}'), ('\u{aadb}', '\u{aadd}'), - ('\u{aae0}', '\u{aaea}'), ('\u{aaf2}', '\u{aaf4}'), ('\u{ab01}', '\u{ab06}'), ('\u{ab09}', - '\u{ab0e}'), ('\u{ab11}', '\u{ab16}'), ('\u{ab20}', '\u{ab26}'), ('\u{ab28}', '\u{ab2e}'), - ('\u{ab30}', '\u{ab5a}'), ('\u{ab5c}', '\u{ab65}'), ('\u{ab70}', '\u{abe2}'), ('\u{ac00}', - '\u{d7a3}'), ('\u{d7b0}', '\u{d7c6}'), ('\u{d7cb}', '\u{d7fb}'), ('\u{f900}', '\u{fa6d}'), - ('\u{fa70}', '\u{fad9}'), ('\u{fb00}', '\u{fb06}'), ('\u{fb13}', '\u{fb17}'), ('\u{fb1d}', - '\u{fb1d}'), ('\u{fb1f}', '\u{fb28}'), ('\u{fb2a}', '\u{fb36}'), ('\u{fb38}', '\u{fb3c}'), - ('\u{fb3e}', '\u{fb3e}'), ('\u{fb40}', '\u{fb41}'), ('\u{fb43}', '\u{fb44}'), ('\u{fb46}', - '\u{fbb1}'), ('\u{fbd3}', '\u{fc5d}'), ('\u{fc64}', '\u{fd3d}'), ('\u{fd50}', '\u{fd8f}'), - ('\u{fd92}', '\u{fdc7}'), ('\u{fdf0}', '\u{fdf9}'), ('\u{fe71}', '\u{fe71}'), ('\u{fe73}', - '\u{fe73}'), ('\u{fe77}', '\u{fe77}'), ('\u{fe79}', '\u{fe79}'), ('\u{fe7b}', '\u{fe7b}'), - ('\u{fe7d}', '\u{fe7d}'), ('\u{fe7f}', '\u{fefc}'), ('\u{ff21}', '\u{ff3a}'), ('\u{ff41}', - '\u{ff5a}'), ('\u{ff66}', '\u{ff9d}'), ('\u{ffa0}', '\u{ffbe}'), ('\u{ffc2}', '\u{ffc7}'), - ('\u{ffca}', '\u{ffcf}'), ('\u{ffd2}', '\u{ffd7}'), ('\u{ffda}', '\u{ffdc}'), ('\u{10000}', - '\u{1000b}'), ('\u{1000d}', '\u{10026}'), ('\u{10028}', '\u{1003a}'), ('\u{1003c}', - '\u{1003d}'), ('\u{1003f}', '\u{1004d}'), ('\u{10050}', '\u{1005d}'), ('\u{10080}', - '\u{100fa}'), ('\u{10140}', '\u{10174}'), ('\u{10280}', '\u{1029c}'), ('\u{102a0}', - '\u{102d0}'), ('\u{10300}', '\u{1031f}'), ('\u{10330}', '\u{1034a}'), ('\u{10350}', - '\u{10375}'), ('\u{10380}', '\u{1039d}'), ('\u{103a0}', '\u{103c3}'), ('\u{103c8}', - '\u{103cf}'), ('\u{103d1}', '\u{103d5}'), ('\u{10400}', '\u{1049d}'), ('\u{10500}', - '\u{10527}'), ('\u{10530}', '\u{10563}'), ('\u{10600}', '\u{10736}'), ('\u{10740}', - '\u{10755}'), ('\u{10760}', '\u{10767}'), ('\u{10800}', '\u{10805}'), ('\u{10808}', - '\u{10808}'), ('\u{1080a}', '\u{10835}'), ('\u{10837}', '\u{10838}'), ('\u{1083c}', - '\u{1083c}'), ('\u{1083f}', '\u{10855}'), ('\u{10860}', '\u{10876}'), ('\u{10880}', - '\u{1089e}'), ('\u{108e0}', '\u{108f2}'), ('\u{108f4}', '\u{108f5}'), ('\u{10900}', - '\u{10915}'), ('\u{10920}', '\u{10939}'), ('\u{10980}', '\u{109b7}'), ('\u{109be}', - '\u{109bf}'), ('\u{10a00}', '\u{10a00}'), ('\u{10a10}', '\u{10a13}'), ('\u{10a15}', - '\u{10a17}'), ('\u{10a19}', '\u{10a33}'), ('\u{10a60}', '\u{10a7c}'), ('\u{10a80}', - '\u{10a9c}'), ('\u{10ac0}', '\u{10ac7}'), ('\u{10ac9}', '\u{10ae4}'), ('\u{10b00}', - '\u{10b35}'), ('\u{10b40}', '\u{10b55}'), ('\u{10b60}', '\u{10b72}'), ('\u{10b80}', - '\u{10b91}'), ('\u{10c00}', '\u{10c48}'), ('\u{10c80}', '\u{10cb2}'), ('\u{10cc0}', - '\u{10cf2}'), ('\u{11003}', '\u{11037}'), ('\u{11083}', '\u{110af}'), ('\u{110d0}', - '\u{110e8}'), ('\u{11103}', '\u{11126}'), ('\u{11150}', '\u{11172}'), ('\u{11176}', - '\u{11176}'), ('\u{11183}', '\u{111b2}'), ('\u{111c1}', '\u{111c4}'), ('\u{111da}', - '\u{111da}'), ('\u{111dc}', '\u{111dc}'), ('\u{11200}', '\u{11211}'), ('\u{11213}', - '\u{1122b}'), ('\u{11280}', '\u{11286}'), ('\u{11288}', '\u{11288}'), ('\u{1128a}', - '\u{1128d}'), ('\u{1128f}', '\u{1129d}'), ('\u{1129f}', '\u{112a8}'), ('\u{112b0}', - '\u{112de}'), ('\u{11305}', '\u{1130c}'), ('\u{1130f}', '\u{11310}'), ('\u{11313}', - '\u{11328}'), ('\u{1132a}', '\u{11330}'), ('\u{11332}', '\u{11333}'), ('\u{11335}', - '\u{11339}'), ('\u{1133d}', '\u{1133d}'), ('\u{11350}', '\u{11350}'), ('\u{1135d}', - '\u{11361}'), ('\u{11480}', '\u{114af}'), ('\u{114c4}', '\u{114c5}'), ('\u{114c7}', - '\u{114c7}'), ('\u{11580}', '\u{115ae}'), ('\u{115d8}', '\u{115db}'), ('\u{11600}', - '\u{1162f}'), ('\u{11644}', '\u{11644}'), ('\u{11680}', '\u{116aa}'), ('\u{11700}', - '\u{11719}'), ('\u{118a0}', '\u{118df}'), ('\u{118ff}', '\u{118ff}'), ('\u{11ac0}', - '\u{11af8}'), ('\u{12000}', '\u{12399}'), ('\u{12400}', '\u{1246e}'), ('\u{12480}', - '\u{12543}'), ('\u{13000}', '\u{1342e}'), ('\u{14400}', '\u{14646}'), ('\u{16800}', - '\u{16a38}'), ('\u{16a40}', '\u{16a5e}'), ('\u{16ad0}', '\u{16aed}'), ('\u{16b00}', - '\u{16b2f}'), ('\u{16b40}', '\u{16b43}'), ('\u{16b63}', '\u{16b77}'), ('\u{16b7d}', - '\u{16b8f}'), ('\u{16f00}', '\u{16f44}'), ('\u{16f50}', '\u{16f50}'), ('\u{16f93}', - '\u{16f9f}'), ('\u{1b000}', '\u{1b001}'), ('\u{1bc00}', '\u{1bc6a}'), ('\u{1bc70}', - '\u{1bc7c}'), ('\u{1bc80}', '\u{1bc88}'), ('\u{1bc90}', '\u{1bc99}'), ('\u{1d400}', - '\u{1d454}'), ('\u{1d456}', '\u{1d49c}'), ('\u{1d49e}', '\u{1d49f}'), ('\u{1d4a2}', - '\u{1d4a2}'), ('\u{1d4a5}', '\u{1d4a6}'), ('\u{1d4a9}', '\u{1d4ac}'), ('\u{1d4ae}', - '\u{1d4b9}'), ('\u{1d4bb}', '\u{1d4bb}'), ('\u{1d4bd}', '\u{1d4c3}'), ('\u{1d4c5}', - '\u{1d505}'), ('\u{1d507}', '\u{1d50a}'), ('\u{1d50d}', '\u{1d514}'), ('\u{1d516}', - '\u{1d51c}'), ('\u{1d51e}', '\u{1d539}'), ('\u{1d53b}', '\u{1d53e}'), ('\u{1d540}', - '\u{1d544}'), ('\u{1d546}', '\u{1d546}'), ('\u{1d54a}', '\u{1d550}'), ('\u{1d552}', - '\u{1d6a5}'), ('\u{1d6a8}', '\u{1d6c0}'), ('\u{1d6c2}', '\u{1d6da}'), ('\u{1d6dc}', - '\u{1d6fa}'), ('\u{1d6fc}', '\u{1d714}'), ('\u{1d716}', '\u{1d734}'), ('\u{1d736}', - '\u{1d74e}'), ('\u{1d750}', '\u{1d76e}'), ('\u{1d770}', '\u{1d788}'), ('\u{1d78a}', - '\u{1d7a8}'), ('\u{1d7aa}', '\u{1d7c2}'), ('\u{1d7c4}', '\u{1d7cb}'), ('\u{1e800}', - '\u{1e8c4}'), ('\u{1ee00}', '\u{1ee03}'), ('\u{1ee05}', '\u{1ee1f}'), ('\u{1ee21}', - '\u{1ee22}'), ('\u{1ee24}', '\u{1ee24}'), ('\u{1ee27}', '\u{1ee27}'), ('\u{1ee29}', - '\u{1ee32}'), ('\u{1ee34}', '\u{1ee37}'), ('\u{1ee39}', '\u{1ee39}'), ('\u{1ee3b}', - '\u{1ee3b}'), ('\u{1ee42}', '\u{1ee42}'), ('\u{1ee47}', '\u{1ee47}'), ('\u{1ee49}', - '\u{1ee49}'), ('\u{1ee4b}', '\u{1ee4b}'), ('\u{1ee4d}', '\u{1ee4f}'), ('\u{1ee51}', - '\u{1ee52}'), ('\u{1ee54}', '\u{1ee54}'), ('\u{1ee57}', '\u{1ee57}'), ('\u{1ee59}', - '\u{1ee59}'), ('\u{1ee5b}', '\u{1ee5b}'), ('\u{1ee5d}', '\u{1ee5d}'), ('\u{1ee5f}', - '\u{1ee5f}'), ('\u{1ee61}', '\u{1ee62}'), ('\u{1ee64}', '\u{1ee64}'), ('\u{1ee67}', - '\u{1ee6a}'), ('\u{1ee6c}', '\u{1ee72}'), ('\u{1ee74}', '\u{1ee77}'), ('\u{1ee79}', - '\u{1ee7c}'), ('\u{1ee7e}', '\u{1ee7e}'), ('\u{1ee80}', '\u{1ee89}'), ('\u{1ee8b}', - '\u{1ee9b}'), ('\u{1eea1}', '\u{1eea3}'), ('\u{1eea5}', '\u{1eea9}'), ('\u{1eeab}', - '\u{1eebb}'), ('\u{20000}', '\u{2a6d6}'), ('\u{2a700}', '\u{2b734}'), ('\u{2b740}', - '\u{2b81d}'), ('\u{2b820}', '\u{2cea1}'), ('\u{2f800}', '\u{2fa1d}') - ]; + pub const XID_Start_table: &'static super::BoolTrie = &super::BoolTrie { + r1: [ + 0x0000000000000000, 0x07fffffe07fffffe, 0x0420040000000000, 0xff7fffffff7fffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0x0000501f0003ffc3, + 0x0000000000000000, 0xb8df000000000000, 0xfffffffbffffd740, 0xffbfffffffffffff, + 0xffffffffffffffff, 0xffffffffffffffff, 0xfffffffffffffc03, 0xffffffffffffffff, + 0xfffeffffffffffff, 0xfffffffe027fffff, 0x00000000000000ff, 0x000707ffffff0000, + 0xffffffff00000000, 0xfffec000000007ff, 0xffffffffffffffff, 0x9c00c060002fffff, + 0x0000fffffffd0000, 0xffffffffffffe000, 0x0002003fffffffff, 0x043007fffffffc00 + ], + r2: [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 23, 25, 26, 27, 28, 29, 3, 30, 31, 32, 33, 34, 34, 34, 34, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 34, 34, 34, 34, 34, 34, 34, 34, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 3, 61, 62, 63, 64, 65, 66, 67, 68, 34, 34, 34, 3, 34, 34, + 34, 34, 69, 70, 71, 72, 3, 73, 74, 3, 75, 76, 67, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 77, + 78, 34, 79, 80, 81, 82, 83, 3, 3, 3, 3, 3, 3, 3, 3, 84, 42, 85, 86, 87, 34, 88, 89, 3, + 3, 3, 3, 3, 3, 3, 3, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 53, 3, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 90, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 91, 92, 34, 34, 34, 34, 93, + 94, 95, 96, 97, 34, 98, 99, 100, 48, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, + 111, 112, 34, 113, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 114, 115, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 34, 34, 34, 34, 34, + 116, 34, 117, 118, 119, 120, 121, 34, 122, 34, 34, 123, 124, 125, 126, 3, 127, 34, 128, + 129, 130, 131, 132 + ], + r3: &[ + 0x00000110043fffff, 0x0000000001ffffff, 0x3fdfffff00000000, 0x0000000000000000, + 0x23fffffffffffff0, 0xfffe0003ff010000, 0x23c5fdfffff99fe1, 0x00030003b0004000, + 0x036dfdfffff987e0, 0x001c00005e000000, 0x23edfdfffffbbfe0, 0x0200000300010000, + 0x23edfdfffff99fe0, 0x00020003b0000000, 0x03ffc718d63dc7e8, 0x0000000000010000, + 0x23fffdfffffddfe0, 0x0000000307000000, 0x23effdfffffddfe1, 0x0006000340000000, + 0x27fffffffffddfe0, 0xfc00000380704000, 0x2ffbfffffc7fffe0, 0x000000000000007f, + 0x0005fffffffffffe, 0x2005ecaefef02596, 0x00000000f000005f, 0x0000000000000001, + 0x00001ffffffffeff, 0x0000000000001f00, 0x800007ffffffffff, 0xffe1c0623c3f0000, + 0xffffffff00004003, 0xf7ffffffffff20bf, 0xffffffffffffffff, 0xffffffff3d7f3dff, + 0x7f3dffffffff3dff, 0xffffffffff7fff3d, 0xffffffffff3dffff, 0x0000000007ffffff, + 0xffffffff0000ffff, 0x3f3fffffffffffff, 0xfffffffffffffffe, 0xffff9fffffffffff, + 0xffffffff07fffffe, 0x01ffc7ffffffffff, 0x0003ffff0003dfff, 0x0001dfff0003ffff, + 0x000fffffffffffff, 0x0000000010800000, 0xffffffff00000000, 0x00ffffffffffffff, + 0xffff05ffffffffff, 0x003fffffffffffff, 0x000000007fffffff, 0x001f3fffffff0000, + 0xffff0fffffffffff, 0x00000000000003ff, 0xffffffff007fffff, 0x00000000001fffff, + 0x0000008000000000, 0x000fffffffffffe0, 0x0000000000000fe0, 0xfc00c001fffffff8, + 0x0000003fffffffff, 0x0000000fffffffff, 0x3ffffffffc00e000, 0x00000000000001ff, + 0x0063de0000000000, 0xffffffff3f3fffff, 0x3fffffffaaff3f3f, 0x5fdfffffffffffff, + 0x1fdc1fff0fcf1fdc, 0x8002000000000000, 0x000000001fff0000, 0xf3fffd503f2ffc84, + 0xffffffff000043e0, 0xffff7fffffffffff, 0xffffffff7fffffff, 0x000c781fffffffff, + 0xffff20bfffffffff, 0x000080ffffffffff, 0x7f7f7f7f007fffff, 0x000000007f7f7f7f, + 0x1f3e03fe000000e0, 0xfffffffee07fffff, 0xf7ffffffffffffff, 0xfffe3fffffffffe0, + 0x07ffffff00007fff, 0xffff000000000000, 0x00000000003fffff, 0x0000000000001fff, + 0x3fffffffffff0000, 0x00000c00ffff1fff, 0x80007fffffffffff, 0xffffffff3fffffff, + 0x0000ffffffffffff, 0xfffffffcff800000, 0x00ff7ffffffff9ff, 0xff80000000000000, + 0x00000007fffff7bb, 0x000ffffffffffffc, 0x28fc000000000000, 0xffff003ffffffc00, + 0x1fffffff0000007f, 0x0007fffffffffff0, 0x7c00ffdf00008000, 0x000001ffffffffff, + 0xc47fffff00000ff7, 0x3e62ffffffffffff, 0x001c07ff38000005, 0xffff7f7f007e7e7e, + 0xffff003ff7ffffff, 0x00000007ffffffff, 0xffff000fffffffff, 0x0ffffffffffff87f, + 0xffff3fffffffffff, 0x0000000003ffffff, 0x5f7ffdffa0f8007f, 0xffffffffffffffdb, + 0x0003ffffffffffff, 0xfffffffffff80000, 0xfffffff03fffffff, 0x3fffffffffffffff, + 0xffffffffffff0000, 0xfffffffffffcffff, 0x03ff0000000000ff, 0xaa8a000000000000, + 0x1fffffffffffffff, 0x07fffffe00000000, 0xffffffc007fffffe, 0x7fffffff3fffffff, + 0x000000001cfcfcfc + ], + r4: [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 5, 5, 9, 5, 10, 11, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 12, 13, + 14, 5, 5, 15, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 + ], + r5: &[ + 0, 1, 2, 3, 4, 5, 4, 4, 4, 4, 6, 7, 8, 9, 10, 11, 2, 2, 12, 13, 14, 15, 4, 4, 2, 2, 2, + 2, 16, 17, 4, 4, 18, 19, 20, 21, 22, 4, 23, 4, 24, 25, 26, 27, 28, 29, 30, 4, 2, 31, 32, + 32, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 33, 4, 34, 35, 36, 37, 38, 39, 40, 4, 41, 20, + 42, 43, 4, 4, 5, 44, 45, 46, 4, 4, 47, 48, 45, 49, 50, 4, 51, 4, 4, 4, 4, 4, 52, 53, 4, + 4, 4, 4, 4, 4, 4, 54, 4, 4, 4, 4, 55, 56, 57, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 51, 4, 2, 47, 2, 2, 2, 58, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 47, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 59, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, + 2, 2, 2, 2, 2, 2, 54, 20, 4, 60, 45, 61, 57, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, + 62, 63, 64, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 65, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 32, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 66, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 2, 67, 68, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 69, 70, 71, 72, 73, 2, 2, 2, 2, 74, 75, 76, 77, 78, 79, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 2, 2, 2, 80, 2, 58, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 81, 82, 83, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 84, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 5, 2, 2, 2, 10, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 85, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 2, 2, 2, 2, 2, 2, 2, 2, 86, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 4, 4 + ], + r6: &[ + 0xb7ffff7fffffefff, 0x000000003fff3fff, 0xffffffffffffffff, 0x07ffffffffffffff, + 0x0000000000000000, 0x001fffffffffffff, 0xffffffff1fffffff, 0x000000000001ffff, + 0xffff0000ffffffff, 0x003fffffffff07ff, 0xffffffff3fffffff, 0x00000000003eff0f, + 0xffff00003fffffff, 0x0fffffffff0fffff, 0xffff00ffffffffff, 0x0000000fffffffff, + 0x007fffffffffffff, 0x000000ff003fffff, 0x91bffffffffffd3f, 0x007fffff003fffff, + 0x000000007fffffff, 0x0037ffff00000000, 0x03ffffff003fffff, 0xc0ffffffffffffff, + 0x000ffffffeef0001, 0x1fffffff00000000, 0x000000001fffffff, 0x0000001ffffffeff, + 0x003fffffffffffff, 0x0007ffff003fffff, 0x000000000003ffff, 0x00000000000001ff, + 0x0007ffffffffffff, 0x00fffffffffffff8, 0x0000fffffffffff8, 0x000001ffffff0000, + 0x0000007ffffffff8, 0x0047ffffffff0000, 0x0007fffffffffff8, 0x000000001400001e, + 0x00000ffffffbffff, 0xffff01ffbfffbd7f, 0x23edfdfffff99fe0, 0x00000003e0010000, + 0x0000000000000780, 0x0000ffffffffffff, 0x00000000000000b0, 0x00007fffffffffff, + 0x000000000f000000, 0x0000000000000010, 0x000007ffffffffff, 0x0000000003ffffff, + 0xffffffff00000000, 0x80000000ffffffff, 0x01ffffffffffffff, 0x00007ffffffffdff, + 0xfffc000000000001, 0x000000000000ffff, 0x000000000000000f, 0x000000000000007f, + 0x00003fffffff0000, 0xe0fffff80000000f, 0x000000000001001f, 0x00000000fff80000, + 0x0000000100000000, 0x00001fffffffffff, 0x0000000000000003, 0x1fff07ffffffffff, + 0x0000000003ff01ff, 0xffffffffffdfffff, 0xebffde64dfffffff, 0xffffffffffffffef, + 0x7bffffffdfdfe7bf, 0xfffffffffffdfc5f, 0xffffff3fffffffff, 0xf7fffffff7fffffd, + 0xffdfffffffdfffff, 0xffff7fffffff7fff, 0xfffffdfffffffdff, 0x0000000000000ff7, + 0x000000000000001f, 0x0af7fe96ffffffef, 0x5ef7f796aa96ea84, 0x0ffffbee0ffffbff, + 0x00000000007fffff, 0x00000003ffffffff, 0x000000003fffffff + ], + }; pub fn XID_Start(c: char) -> bool { - super::bsearch_range_table(c, XID_Start_table) + super::trie_lookup_range_table(c, XID_Start_table) } } pub mod property { - pub const White_Space_table: &'static [(char, char)] = &[ - ('\u{9}', '\u{d}'), ('\u{20}', '\u{20}'), ('\u{85}', '\u{85}'), ('\u{a0}', '\u{a0}'), - ('\u{1680}', '\u{1680}'), ('\u{2000}', '\u{200a}'), ('\u{2028}', '\u{2029}'), ('\u{202f}', - '\u{202f}'), ('\u{205f}', '\u{205f}'), ('\u{3000}', '\u{3000}') - ]; + pub const Pattern_White_Space_table: &'static super::BoolTrie = &super::BoolTrie { + r1: [ + 0x0000000100003e00, 0x0000000000000000, 0x0000000000000020, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 + ], + r2: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + ], + r3: &[ + 0x0000000000000000, 0x000003000000c000 + ], + r4: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ], + r5: &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + ], + r6: &[ + 0x0000000000000000 + ], + }; + + pub fn Pattern_White_Space(c: char) -> bool { + super::trie_lookup_range_table(c, Pattern_White_Space_table) + } + + pub const White_Space_table: &'static super::BoolTrie = &super::BoolTrie { + r1: [ + 0x0000000100003e00, 0x0000000000000000, 0x0000000100000020, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, + 0x0000000000000000, 0x0000000000000000, 0x0000000000000000, 0x0000000000000000 + ], + r2: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + ], + r3: &[ + 0x0000000000000000, 0x0000000000000001, 0x00008300000007ff, 0x0000000080000000 + ], + r4: [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ], + r5: &[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0 + ], + r6: &[ + 0x0000000000000000 + ], + }; pub fn White_Space(c: char) -> bool { - super::bsearch_range_table(c, White_Space_table) + super::trie_lookup_range_table(c, White_Space_table) } } @@ -1684,83 +1839,118 @@ pub mod conversions { ('\u{a7a0}', ['\u{a7a1}', '\0', '\0']), ('\u{a7a2}', ['\u{a7a3}', '\0', '\0']), ('\u{a7a4}', ['\u{a7a5}', '\0', '\0']), ('\u{a7a6}', ['\u{a7a7}', '\0', '\0']), ('\u{a7a8}', ['\u{a7a9}', '\0', '\0']), ('\u{a7aa}', ['\u{266}', '\0', '\0']), ('\u{a7ab}', ['\u{25c}', '\0', '\0']), - ('\u{a7ac}', ['\u{261}', '\0', '\0']), ('\u{a7ad}', ['\u{26c}', '\0', '\0']), ('\u{a7b0}', - ['\u{29e}', '\0', '\0']), ('\u{a7b1}', ['\u{287}', '\0', '\0']), ('\u{a7b2}', ['\u{29d}', - '\0', '\0']), ('\u{a7b3}', ['\u{ab53}', '\0', '\0']), ('\u{a7b4}', ['\u{a7b5}', '\0', - '\0']), ('\u{a7b6}', ['\u{a7b7}', '\0', '\0']), ('\u{ff21}', ['\u{ff41}', '\0', '\0']), - ('\u{ff22}', ['\u{ff42}', '\0', '\0']), ('\u{ff23}', ['\u{ff43}', '\0', '\0']), ('\u{ff24}', - ['\u{ff44}', '\0', '\0']), ('\u{ff25}', ['\u{ff45}', '\0', '\0']), ('\u{ff26}', ['\u{ff46}', - '\0', '\0']), ('\u{ff27}', ['\u{ff47}', '\0', '\0']), ('\u{ff28}', ['\u{ff48}', '\0', - '\0']), ('\u{ff29}', ['\u{ff49}', '\0', '\0']), ('\u{ff2a}', ['\u{ff4a}', '\0', '\0']), - ('\u{ff2b}', ['\u{ff4b}', '\0', '\0']), ('\u{ff2c}', ['\u{ff4c}', '\0', '\0']), ('\u{ff2d}', - ['\u{ff4d}', '\0', '\0']), ('\u{ff2e}', ['\u{ff4e}', '\0', '\0']), ('\u{ff2f}', ['\u{ff4f}', - '\0', '\0']), ('\u{ff30}', ['\u{ff50}', '\0', '\0']), ('\u{ff31}', ['\u{ff51}', '\0', - '\0']), ('\u{ff32}', ['\u{ff52}', '\0', '\0']), ('\u{ff33}', ['\u{ff53}', '\0', '\0']), - ('\u{ff34}', ['\u{ff54}', '\0', '\0']), ('\u{ff35}', ['\u{ff55}', '\0', '\0']), ('\u{ff36}', - ['\u{ff56}', '\0', '\0']), ('\u{ff37}', ['\u{ff57}', '\0', '\0']), ('\u{ff38}', ['\u{ff58}', - '\0', '\0']), ('\u{ff39}', ['\u{ff59}', '\0', '\0']), ('\u{ff3a}', ['\u{ff5a}', '\0', - '\0']), ('\u{10400}', ['\u{10428}', '\0', '\0']), ('\u{10401}', ['\u{10429}', '\0', '\0']), - ('\u{10402}', ['\u{1042a}', '\0', '\0']), ('\u{10403}', ['\u{1042b}', '\0', '\0']), - ('\u{10404}', ['\u{1042c}', '\0', '\0']), ('\u{10405}', ['\u{1042d}', '\0', '\0']), - ('\u{10406}', ['\u{1042e}', '\0', '\0']), ('\u{10407}', ['\u{1042f}', '\0', '\0']), - ('\u{10408}', ['\u{10430}', '\0', '\0']), ('\u{10409}', ['\u{10431}', '\0', '\0']), - ('\u{1040a}', ['\u{10432}', '\0', '\0']), ('\u{1040b}', ['\u{10433}', '\0', '\0']), - ('\u{1040c}', ['\u{10434}', '\0', '\0']), ('\u{1040d}', ['\u{10435}', '\0', '\0']), - ('\u{1040e}', ['\u{10436}', '\0', '\0']), ('\u{1040f}', ['\u{10437}', '\0', '\0']), - ('\u{10410}', ['\u{10438}', '\0', '\0']), ('\u{10411}', ['\u{10439}', '\0', '\0']), - ('\u{10412}', ['\u{1043a}', '\0', '\0']), ('\u{10413}', ['\u{1043b}', '\0', '\0']), - ('\u{10414}', ['\u{1043c}', '\0', '\0']), ('\u{10415}', ['\u{1043d}', '\0', '\0']), - ('\u{10416}', ['\u{1043e}', '\0', '\0']), ('\u{10417}', ['\u{1043f}', '\0', '\0']), - ('\u{10418}', ['\u{10440}', '\0', '\0']), ('\u{10419}', ['\u{10441}', '\0', '\0']), - ('\u{1041a}', ['\u{10442}', '\0', '\0']), ('\u{1041b}', ['\u{10443}', '\0', '\0']), - ('\u{1041c}', ['\u{10444}', '\0', '\0']), ('\u{1041d}', ['\u{10445}', '\0', '\0']), - ('\u{1041e}', ['\u{10446}', '\0', '\0']), ('\u{1041f}', ['\u{10447}', '\0', '\0']), - ('\u{10420}', ['\u{10448}', '\0', '\0']), ('\u{10421}', ['\u{10449}', '\0', '\0']), - ('\u{10422}', ['\u{1044a}', '\0', '\0']), ('\u{10423}', ['\u{1044b}', '\0', '\0']), - ('\u{10424}', ['\u{1044c}', '\0', '\0']), ('\u{10425}', ['\u{1044d}', '\0', '\0']), - ('\u{10426}', ['\u{1044e}', '\0', '\0']), ('\u{10427}', ['\u{1044f}', '\0', '\0']), - ('\u{10c80}', ['\u{10cc0}', '\0', '\0']), ('\u{10c81}', ['\u{10cc1}', '\0', '\0']), - ('\u{10c82}', ['\u{10cc2}', '\0', '\0']), ('\u{10c83}', ['\u{10cc3}', '\0', '\0']), - ('\u{10c84}', ['\u{10cc4}', '\0', '\0']), ('\u{10c85}', ['\u{10cc5}', '\0', '\0']), - ('\u{10c86}', ['\u{10cc6}', '\0', '\0']), ('\u{10c87}', ['\u{10cc7}', '\0', '\0']), - ('\u{10c88}', ['\u{10cc8}', '\0', '\0']), ('\u{10c89}', ['\u{10cc9}', '\0', '\0']), - ('\u{10c8a}', ['\u{10cca}', '\0', '\0']), ('\u{10c8b}', ['\u{10ccb}', '\0', '\0']), - ('\u{10c8c}', ['\u{10ccc}', '\0', '\0']), ('\u{10c8d}', ['\u{10ccd}', '\0', '\0']), - ('\u{10c8e}', ['\u{10cce}', '\0', '\0']), ('\u{10c8f}', ['\u{10ccf}', '\0', '\0']), - ('\u{10c90}', ['\u{10cd0}', '\0', '\0']), ('\u{10c91}', ['\u{10cd1}', '\0', '\0']), - ('\u{10c92}', ['\u{10cd2}', '\0', '\0']), ('\u{10c93}', ['\u{10cd3}', '\0', '\0']), - ('\u{10c94}', ['\u{10cd4}', '\0', '\0']), ('\u{10c95}', ['\u{10cd5}', '\0', '\0']), - ('\u{10c96}', ['\u{10cd6}', '\0', '\0']), ('\u{10c97}', ['\u{10cd7}', '\0', '\0']), - ('\u{10c98}', ['\u{10cd8}', '\0', '\0']), ('\u{10c99}', ['\u{10cd9}', '\0', '\0']), - ('\u{10c9a}', ['\u{10cda}', '\0', '\0']), ('\u{10c9b}', ['\u{10cdb}', '\0', '\0']), - ('\u{10c9c}', ['\u{10cdc}', '\0', '\0']), ('\u{10c9d}', ['\u{10cdd}', '\0', '\0']), - ('\u{10c9e}', ['\u{10cde}', '\0', '\0']), ('\u{10c9f}', ['\u{10cdf}', '\0', '\0']), - ('\u{10ca0}', ['\u{10ce0}', '\0', '\0']), ('\u{10ca1}', ['\u{10ce1}', '\0', '\0']), - ('\u{10ca2}', ['\u{10ce2}', '\0', '\0']), ('\u{10ca3}', ['\u{10ce3}', '\0', '\0']), - ('\u{10ca4}', ['\u{10ce4}', '\0', '\0']), ('\u{10ca5}', ['\u{10ce5}', '\0', '\0']), - ('\u{10ca6}', ['\u{10ce6}', '\0', '\0']), ('\u{10ca7}', ['\u{10ce7}', '\0', '\0']), - ('\u{10ca8}', ['\u{10ce8}', '\0', '\0']), ('\u{10ca9}', ['\u{10ce9}', '\0', '\0']), - ('\u{10caa}', ['\u{10cea}', '\0', '\0']), ('\u{10cab}', ['\u{10ceb}', '\0', '\0']), - ('\u{10cac}', ['\u{10cec}', '\0', '\0']), ('\u{10cad}', ['\u{10ced}', '\0', '\0']), - ('\u{10cae}', ['\u{10cee}', '\0', '\0']), ('\u{10caf}', ['\u{10cef}', '\0', '\0']), - ('\u{10cb0}', ['\u{10cf0}', '\0', '\0']), ('\u{10cb1}', ['\u{10cf1}', '\0', '\0']), - ('\u{10cb2}', ['\u{10cf2}', '\0', '\0']), ('\u{118a0}', ['\u{118c0}', '\0', '\0']), - ('\u{118a1}', ['\u{118c1}', '\0', '\0']), ('\u{118a2}', ['\u{118c2}', '\0', '\0']), - ('\u{118a3}', ['\u{118c3}', '\0', '\0']), ('\u{118a4}', ['\u{118c4}', '\0', '\0']), - ('\u{118a5}', ['\u{118c5}', '\0', '\0']), ('\u{118a6}', ['\u{118c6}', '\0', '\0']), - ('\u{118a7}', ['\u{118c7}', '\0', '\0']), ('\u{118a8}', ['\u{118c8}', '\0', '\0']), - ('\u{118a9}', ['\u{118c9}', '\0', '\0']), ('\u{118aa}', ['\u{118ca}', '\0', '\0']), - ('\u{118ab}', ['\u{118cb}', '\0', '\0']), ('\u{118ac}', ['\u{118cc}', '\0', '\0']), - ('\u{118ad}', ['\u{118cd}', '\0', '\0']), ('\u{118ae}', ['\u{118ce}', '\0', '\0']), - ('\u{118af}', ['\u{118cf}', '\0', '\0']), ('\u{118b0}', ['\u{118d0}', '\0', '\0']), - ('\u{118b1}', ['\u{118d1}', '\0', '\0']), ('\u{118b2}', ['\u{118d2}', '\0', '\0']), - ('\u{118b3}', ['\u{118d3}', '\0', '\0']), ('\u{118b4}', ['\u{118d4}', '\0', '\0']), - ('\u{118b5}', ['\u{118d5}', '\0', '\0']), ('\u{118b6}', ['\u{118d6}', '\0', '\0']), - ('\u{118b7}', ['\u{118d7}', '\0', '\0']), ('\u{118b8}', ['\u{118d8}', '\0', '\0']), - ('\u{118b9}', ['\u{118d9}', '\0', '\0']), ('\u{118ba}', ['\u{118da}', '\0', '\0']), - ('\u{118bb}', ['\u{118db}', '\0', '\0']), ('\u{118bc}', ['\u{118dc}', '\0', '\0']), - ('\u{118bd}', ['\u{118dd}', '\0', '\0']), ('\u{118be}', ['\u{118de}', '\0', '\0']), - ('\u{118bf}', ['\u{118df}', '\0', '\0']) + ('\u{a7ac}', ['\u{261}', '\0', '\0']), ('\u{a7ad}', ['\u{26c}', '\0', '\0']), ('\u{a7ae}', + ['\u{26a}', '\0', '\0']), ('\u{a7b0}', ['\u{29e}', '\0', '\0']), ('\u{a7b1}', ['\u{287}', + '\0', '\0']), ('\u{a7b2}', ['\u{29d}', '\0', '\0']), ('\u{a7b3}', ['\u{ab53}', '\0', '\0']), + ('\u{a7b4}', ['\u{a7b5}', '\0', '\0']), ('\u{a7b6}', ['\u{a7b7}', '\0', '\0']), ('\u{ff21}', + ['\u{ff41}', '\0', '\0']), ('\u{ff22}', ['\u{ff42}', '\0', '\0']), ('\u{ff23}', ['\u{ff43}', + '\0', '\0']), ('\u{ff24}', ['\u{ff44}', '\0', '\0']), ('\u{ff25}', ['\u{ff45}', '\0', + '\0']), ('\u{ff26}', ['\u{ff46}', '\0', '\0']), ('\u{ff27}', ['\u{ff47}', '\0', '\0']), + ('\u{ff28}', ['\u{ff48}', '\0', '\0']), ('\u{ff29}', ['\u{ff49}', '\0', '\0']), ('\u{ff2a}', + ['\u{ff4a}', '\0', '\0']), ('\u{ff2b}', ['\u{ff4b}', '\0', '\0']), ('\u{ff2c}', ['\u{ff4c}', + '\0', '\0']), ('\u{ff2d}', ['\u{ff4d}', '\0', '\0']), ('\u{ff2e}', ['\u{ff4e}', '\0', + '\0']), ('\u{ff2f}', ['\u{ff4f}', '\0', '\0']), ('\u{ff30}', ['\u{ff50}', '\0', '\0']), + ('\u{ff31}', ['\u{ff51}', '\0', '\0']), ('\u{ff32}', ['\u{ff52}', '\0', '\0']), ('\u{ff33}', + ['\u{ff53}', '\0', '\0']), ('\u{ff34}', ['\u{ff54}', '\0', '\0']), ('\u{ff35}', ['\u{ff55}', + '\0', '\0']), ('\u{ff36}', ['\u{ff56}', '\0', '\0']), ('\u{ff37}', ['\u{ff57}', '\0', + '\0']), ('\u{ff38}', ['\u{ff58}', '\0', '\0']), ('\u{ff39}', ['\u{ff59}', '\0', '\0']), + ('\u{ff3a}', ['\u{ff5a}', '\0', '\0']), ('\u{10400}', ['\u{10428}', '\0', '\0']), + ('\u{10401}', ['\u{10429}', '\0', '\0']), ('\u{10402}', ['\u{1042a}', '\0', '\0']), + ('\u{10403}', ['\u{1042b}', '\0', '\0']), ('\u{10404}', ['\u{1042c}', '\0', '\0']), + ('\u{10405}', ['\u{1042d}', '\0', '\0']), ('\u{10406}', ['\u{1042e}', '\0', '\0']), + ('\u{10407}', ['\u{1042f}', '\0', '\0']), ('\u{10408}', ['\u{10430}', '\0', '\0']), + ('\u{10409}', ['\u{10431}', '\0', '\0']), ('\u{1040a}', ['\u{10432}', '\0', '\0']), + ('\u{1040b}', ['\u{10433}', '\0', '\0']), ('\u{1040c}', ['\u{10434}', '\0', '\0']), + ('\u{1040d}', ['\u{10435}', '\0', '\0']), ('\u{1040e}', ['\u{10436}', '\0', '\0']), + ('\u{1040f}', ['\u{10437}', '\0', '\0']), ('\u{10410}', ['\u{10438}', '\0', '\0']), + ('\u{10411}', ['\u{10439}', '\0', '\0']), ('\u{10412}', ['\u{1043a}', '\0', '\0']), + ('\u{10413}', ['\u{1043b}', '\0', '\0']), ('\u{10414}', ['\u{1043c}', '\0', '\0']), + ('\u{10415}', ['\u{1043d}', '\0', '\0']), ('\u{10416}', ['\u{1043e}', '\0', '\0']), + ('\u{10417}', ['\u{1043f}', '\0', '\0']), ('\u{10418}', ['\u{10440}', '\0', '\0']), + ('\u{10419}', ['\u{10441}', '\0', '\0']), ('\u{1041a}', ['\u{10442}', '\0', '\0']), + ('\u{1041b}', ['\u{10443}', '\0', '\0']), ('\u{1041c}', ['\u{10444}', '\0', '\0']), + ('\u{1041d}', ['\u{10445}', '\0', '\0']), ('\u{1041e}', ['\u{10446}', '\0', '\0']), + ('\u{1041f}', ['\u{10447}', '\0', '\0']), ('\u{10420}', ['\u{10448}', '\0', '\0']), + ('\u{10421}', ['\u{10449}', '\0', '\0']), ('\u{10422}', ['\u{1044a}', '\0', '\0']), + ('\u{10423}', ['\u{1044b}', '\0', '\0']), ('\u{10424}', ['\u{1044c}', '\0', '\0']), + ('\u{10425}', ['\u{1044d}', '\0', '\0']), ('\u{10426}', ['\u{1044e}', '\0', '\0']), + ('\u{10427}', ['\u{1044f}', '\0', '\0']), ('\u{104b0}', ['\u{104d8}', '\0', '\0']), + ('\u{104b1}', ['\u{104d9}', '\0', '\0']), ('\u{104b2}', ['\u{104da}', '\0', '\0']), + ('\u{104b3}', ['\u{104db}', '\0', '\0']), ('\u{104b4}', ['\u{104dc}', '\0', '\0']), + ('\u{104b5}', ['\u{104dd}', '\0', '\0']), ('\u{104b6}', ['\u{104de}', '\0', '\0']), + ('\u{104b7}', ['\u{104df}', '\0', '\0']), ('\u{104b8}', ['\u{104e0}', '\0', '\0']), + ('\u{104b9}', ['\u{104e1}', '\0', '\0']), ('\u{104ba}', ['\u{104e2}', '\0', '\0']), + ('\u{104bb}', ['\u{104e3}', '\0', '\0']), ('\u{104bc}', ['\u{104e4}', '\0', '\0']), + ('\u{104bd}', ['\u{104e5}', '\0', '\0']), ('\u{104be}', ['\u{104e6}', '\0', '\0']), + ('\u{104bf}', ['\u{104e7}', '\0', '\0']), ('\u{104c0}', ['\u{104e8}', '\0', '\0']), + ('\u{104c1}', ['\u{104e9}', '\0', '\0']), ('\u{104c2}', ['\u{104ea}', '\0', '\0']), + ('\u{104c3}', ['\u{104eb}', '\0', '\0']), ('\u{104c4}', ['\u{104ec}', '\0', '\0']), + ('\u{104c5}', ['\u{104ed}', '\0', '\0']), ('\u{104c6}', ['\u{104ee}', '\0', '\0']), + ('\u{104c7}', ['\u{104ef}', '\0', '\0']), ('\u{104c8}', ['\u{104f0}', '\0', '\0']), + ('\u{104c9}', ['\u{104f1}', '\0', '\0']), ('\u{104ca}', ['\u{104f2}', '\0', '\0']), + ('\u{104cb}', ['\u{104f3}', '\0', '\0']), ('\u{104cc}', ['\u{104f4}', '\0', '\0']), + ('\u{104cd}', ['\u{104f5}', '\0', '\0']), ('\u{104ce}', ['\u{104f6}', '\0', '\0']), + ('\u{104cf}', ['\u{104f7}', '\0', '\0']), ('\u{104d0}', ['\u{104f8}', '\0', '\0']), + ('\u{104d1}', ['\u{104f9}', '\0', '\0']), ('\u{104d2}', ['\u{104fa}', '\0', '\0']), + ('\u{104d3}', ['\u{104fb}', '\0', '\0']), ('\u{10c80}', ['\u{10cc0}', '\0', '\0']), + ('\u{10c81}', ['\u{10cc1}', '\0', '\0']), ('\u{10c82}', ['\u{10cc2}', '\0', '\0']), + ('\u{10c83}', ['\u{10cc3}', '\0', '\0']), ('\u{10c84}', ['\u{10cc4}', '\0', '\0']), + ('\u{10c85}', ['\u{10cc5}', '\0', '\0']), ('\u{10c86}', ['\u{10cc6}', '\0', '\0']), + ('\u{10c87}', ['\u{10cc7}', '\0', '\0']), ('\u{10c88}', ['\u{10cc8}', '\0', '\0']), + ('\u{10c89}', ['\u{10cc9}', '\0', '\0']), ('\u{10c8a}', ['\u{10cca}', '\0', '\0']), + ('\u{10c8b}', ['\u{10ccb}', '\0', '\0']), ('\u{10c8c}', ['\u{10ccc}', '\0', '\0']), + ('\u{10c8d}', ['\u{10ccd}', '\0', '\0']), ('\u{10c8e}', ['\u{10cce}', '\0', '\0']), + ('\u{10c8f}', ['\u{10ccf}', '\0', '\0']), ('\u{10c90}', ['\u{10cd0}', '\0', '\0']), + ('\u{10c91}', ['\u{10cd1}', '\0', '\0']), ('\u{10c92}', ['\u{10cd2}', '\0', '\0']), + ('\u{10c93}', ['\u{10cd3}', '\0', '\0']), ('\u{10c94}', ['\u{10cd4}', '\0', '\0']), + ('\u{10c95}', ['\u{10cd5}', '\0', '\0']), ('\u{10c96}', ['\u{10cd6}', '\0', '\0']), + ('\u{10c97}', ['\u{10cd7}', '\0', '\0']), ('\u{10c98}', ['\u{10cd8}', '\0', '\0']), + ('\u{10c99}', ['\u{10cd9}', '\0', '\0']), ('\u{10c9a}', ['\u{10cda}', '\0', '\0']), + ('\u{10c9b}', ['\u{10cdb}', '\0', '\0']), ('\u{10c9c}', ['\u{10cdc}', '\0', '\0']), + ('\u{10c9d}', ['\u{10cdd}', '\0', '\0']), ('\u{10c9e}', ['\u{10cde}', '\0', '\0']), + ('\u{10c9f}', ['\u{10cdf}', '\0', '\0']), ('\u{10ca0}', ['\u{10ce0}', '\0', '\0']), + ('\u{10ca1}', ['\u{10ce1}', '\0', '\0']), ('\u{10ca2}', ['\u{10ce2}', '\0', '\0']), + ('\u{10ca3}', ['\u{10ce3}', '\0', '\0']), ('\u{10ca4}', ['\u{10ce4}', '\0', '\0']), + ('\u{10ca5}', ['\u{10ce5}', '\0', '\0']), ('\u{10ca6}', ['\u{10ce6}', '\0', '\0']), + ('\u{10ca7}', ['\u{10ce7}', '\0', '\0']), ('\u{10ca8}', ['\u{10ce8}', '\0', '\0']), + ('\u{10ca9}', ['\u{10ce9}', '\0', '\0']), ('\u{10caa}', ['\u{10cea}', '\0', '\0']), + ('\u{10cab}', ['\u{10ceb}', '\0', '\0']), ('\u{10cac}', ['\u{10cec}', '\0', '\0']), + ('\u{10cad}', ['\u{10ced}', '\0', '\0']), ('\u{10cae}', ['\u{10cee}', '\0', '\0']), + ('\u{10caf}', ['\u{10cef}', '\0', '\0']), ('\u{10cb0}', ['\u{10cf0}', '\0', '\0']), + ('\u{10cb1}', ['\u{10cf1}', '\0', '\0']), ('\u{10cb2}', ['\u{10cf2}', '\0', '\0']), + ('\u{118a0}', ['\u{118c0}', '\0', '\0']), ('\u{118a1}', ['\u{118c1}', '\0', '\0']), + ('\u{118a2}', ['\u{118c2}', '\0', '\0']), ('\u{118a3}', ['\u{118c3}', '\0', '\0']), + ('\u{118a4}', ['\u{118c4}', '\0', '\0']), ('\u{118a5}', ['\u{118c5}', '\0', '\0']), + ('\u{118a6}', ['\u{118c6}', '\0', '\0']), ('\u{118a7}', ['\u{118c7}', '\0', '\0']), + ('\u{118a8}', ['\u{118c8}', '\0', '\0']), ('\u{118a9}', ['\u{118c9}', '\0', '\0']), + ('\u{118aa}', ['\u{118ca}', '\0', '\0']), ('\u{118ab}', ['\u{118cb}', '\0', '\0']), + ('\u{118ac}', ['\u{118cc}', '\0', '\0']), ('\u{118ad}', ['\u{118cd}', '\0', '\0']), + ('\u{118ae}', ['\u{118ce}', '\0', '\0']), ('\u{118af}', ['\u{118cf}', '\0', '\0']), + ('\u{118b0}', ['\u{118d0}', '\0', '\0']), ('\u{118b1}', ['\u{118d1}', '\0', '\0']), + ('\u{118b2}', ['\u{118d2}', '\0', '\0']), ('\u{118b3}', ['\u{118d3}', '\0', '\0']), + ('\u{118b4}', ['\u{118d4}', '\0', '\0']), ('\u{118b5}', ['\u{118d5}', '\0', '\0']), + ('\u{118b6}', ['\u{118d6}', '\0', '\0']), ('\u{118b7}', ['\u{118d7}', '\0', '\0']), + ('\u{118b8}', ['\u{118d8}', '\0', '\0']), ('\u{118b9}', ['\u{118d9}', '\0', '\0']), + ('\u{118ba}', ['\u{118da}', '\0', '\0']), ('\u{118bb}', ['\u{118db}', '\0', '\0']), + ('\u{118bc}', ['\u{118dc}', '\0', '\0']), ('\u{118bd}', ['\u{118dd}', '\0', '\0']), + ('\u{118be}', ['\u{118de}', '\0', '\0']), ('\u{118bf}', ['\u{118df}', '\0', '\0']), + ('\u{1e900}', ['\u{1e922}', '\0', '\0']), ('\u{1e901}', ['\u{1e923}', '\0', '\0']), + ('\u{1e902}', ['\u{1e924}', '\0', '\0']), ('\u{1e903}', ['\u{1e925}', '\0', '\0']), + ('\u{1e904}', ['\u{1e926}', '\0', '\0']), ('\u{1e905}', ['\u{1e927}', '\0', '\0']), + ('\u{1e906}', ['\u{1e928}', '\0', '\0']), ('\u{1e907}', ['\u{1e929}', '\0', '\0']), + ('\u{1e908}', ['\u{1e92a}', '\0', '\0']), ('\u{1e909}', ['\u{1e92b}', '\0', '\0']), + ('\u{1e90a}', ['\u{1e92c}', '\0', '\0']), ('\u{1e90b}', ['\u{1e92d}', '\0', '\0']), + ('\u{1e90c}', ['\u{1e92e}', '\0', '\0']), ('\u{1e90d}', ['\u{1e92f}', '\0', '\0']), + ('\u{1e90e}', ['\u{1e930}', '\0', '\0']), ('\u{1e90f}', ['\u{1e931}', '\0', '\0']), + ('\u{1e910}', ['\u{1e932}', '\0', '\0']), ('\u{1e911}', ['\u{1e933}', '\0', '\0']), + ('\u{1e912}', ['\u{1e934}', '\0', '\0']), ('\u{1e913}', ['\u{1e935}', '\0', '\0']), + ('\u{1e914}', ['\u{1e936}', '\0', '\0']), ('\u{1e915}', ['\u{1e937}', '\0', '\0']), + ('\u{1e916}', ['\u{1e938}', '\0', '\0']), ('\u{1e917}', ['\u{1e939}', '\0', '\0']), + ('\u{1e918}', ['\u{1e93a}', '\0', '\0']), ('\u{1e919}', ['\u{1e93b}', '\0', '\0']), + ('\u{1e91a}', ['\u{1e93c}', '\0', '\0']), ('\u{1e91b}', ['\u{1e93d}', '\0', '\0']), + ('\u{1e91c}', ['\u{1e93e}', '\0', '\0']), ('\u{1e91d}', ['\u{1e93f}', '\0', '\0']), + ('\u{1e91e}', ['\u{1e940}', '\0', '\0']), ('\u{1e91f}', ['\u{1e941}', '\0', '\0']), + ('\u{1e920}', ['\u{1e942}', '\0', '\0']), ('\u{1e921}', ['\u{1e943}', '\0', '\0']) ]; const to_uppercase_table: &'static [(char, [char; 3])] = &[ @@ -1860,499 +2050,538 @@ pub mod conversions { ('\u{260}', ['\u{193}', '\0', '\0']), ('\u{261}', ['\u{a7ac}', '\0', '\0']), ('\u{263}', ['\u{194}', '\0', '\0']), ('\u{265}', ['\u{a78d}', '\0', '\0']), ('\u{266}', ['\u{a7aa}', '\0', '\0']), ('\u{268}', ['\u{197}', '\0', '\0']), ('\u{269}', ['\u{196}', '\0', '\0']), - ('\u{26b}', ['\u{2c62}', '\0', '\0']), ('\u{26c}', ['\u{a7ad}', '\0', '\0']), ('\u{26f}', - ['\u{19c}', '\0', '\0']), ('\u{271}', ['\u{2c6e}', '\0', '\0']), ('\u{272}', ['\u{19d}', - '\0', '\0']), ('\u{275}', ['\u{19f}', '\0', '\0']), ('\u{27d}', ['\u{2c64}', '\0', '\0']), - ('\u{280}', ['\u{1a6}', '\0', '\0']), ('\u{283}', ['\u{1a9}', '\0', '\0']), ('\u{287}', - ['\u{a7b1}', '\0', '\0']), ('\u{288}', ['\u{1ae}', '\0', '\0']), ('\u{289}', ['\u{244}', - '\0', '\0']), ('\u{28a}', ['\u{1b1}', '\0', '\0']), ('\u{28b}', ['\u{1b2}', '\0', '\0']), - ('\u{28c}', ['\u{245}', '\0', '\0']), ('\u{292}', ['\u{1b7}', '\0', '\0']), ('\u{29d}', - ['\u{a7b2}', '\0', '\0']), ('\u{29e}', ['\u{a7b0}', '\0', '\0']), ('\u{345}', ['\u{399}', - '\0', '\0']), ('\u{371}', ['\u{370}', '\0', '\0']), ('\u{373}', ['\u{372}', '\0', '\0']), - ('\u{377}', ['\u{376}', '\0', '\0']), ('\u{37b}', ['\u{3fd}', '\0', '\0']), ('\u{37c}', - ['\u{3fe}', '\0', '\0']), ('\u{37d}', ['\u{3ff}', '\0', '\0']), ('\u{390}', ['\u{399}', - '\u{308}', '\u{301}']), ('\u{3ac}', ['\u{386}', '\0', '\0']), ('\u{3ad}', ['\u{388}', '\0', - '\0']), ('\u{3ae}', ['\u{389}', '\0', '\0']), ('\u{3af}', ['\u{38a}', '\0', '\0']), - ('\u{3b0}', ['\u{3a5}', '\u{308}', '\u{301}']), ('\u{3b1}', ['\u{391}', '\0', '\0']), - ('\u{3b2}', ['\u{392}', '\0', '\0']), ('\u{3b3}', ['\u{393}', '\0', '\0']), ('\u{3b4}', - ['\u{394}', '\0', '\0']), ('\u{3b5}', ['\u{395}', '\0', '\0']), ('\u{3b6}', ['\u{396}', - '\0', '\0']), ('\u{3b7}', ['\u{397}', '\0', '\0']), ('\u{3b8}', ['\u{398}', '\0', '\0']), - ('\u{3b9}', ['\u{399}', '\0', '\0']), ('\u{3ba}', ['\u{39a}', '\0', '\0']), ('\u{3bb}', - ['\u{39b}', '\0', '\0']), ('\u{3bc}', ['\u{39c}', '\0', '\0']), ('\u{3bd}', ['\u{39d}', - '\0', '\0']), ('\u{3be}', ['\u{39e}', '\0', '\0']), ('\u{3bf}', ['\u{39f}', '\0', '\0']), - ('\u{3c0}', ['\u{3a0}', '\0', '\0']), ('\u{3c1}', ['\u{3a1}', '\0', '\0']), ('\u{3c2}', - ['\u{3a3}', '\0', '\0']), ('\u{3c3}', ['\u{3a3}', '\0', '\0']), ('\u{3c4}', ['\u{3a4}', - '\0', '\0']), ('\u{3c5}', ['\u{3a5}', '\0', '\0']), ('\u{3c6}', ['\u{3a6}', '\0', '\0']), - ('\u{3c7}', ['\u{3a7}', '\0', '\0']), ('\u{3c8}', ['\u{3a8}', '\0', '\0']), ('\u{3c9}', - ['\u{3a9}', '\0', '\0']), ('\u{3ca}', ['\u{3aa}', '\0', '\0']), ('\u{3cb}', ['\u{3ab}', - '\0', '\0']), ('\u{3cc}', ['\u{38c}', '\0', '\0']), ('\u{3cd}', ['\u{38e}', '\0', '\0']), - ('\u{3ce}', ['\u{38f}', '\0', '\0']), ('\u{3d0}', ['\u{392}', '\0', '\0']), ('\u{3d1}', - ['\u{398}', '\0', '\0']), ('\u{3d5}', ['\u{3a6}', '\0', '\0']), ('\u{3d6}', ['\u{3a0}', - '\0', '\0']), ('\u{3d7}', ['\u{3cf}', '\0', '\0']), ('\u{3d9}', ['\u{3d8}', '\0', '\0']), - ('\u{3db}', ['\u{3da}', '\0', '\0']), ('\u{3dd}', ['\u{3dc}', '\0', '\0']), ('\u{3df}', - ['\u{3de}', '\0', '\0']), ('\u{3e1}', ['\u{3e0}', '\0', '\0']), ('\u{3e3}', ['\u{3e2}', - '\0', '\0']), ('\u{3e5}', ['\u{3e4}', '\0', '\0']), ('\u{3e7}', ['\u{3e6}', '\0', '\0']), - ('\u{3e9}', ['\u{3e8}', '\0', '\0']), ('\u{3eb}', ['\u{3ea}', '\0', '\0']), ('\u{3ed}', - ['\u{3ec}', '\0', '\0']), ('\u{3ef}', ['\u{3ee}', '\0', '\0']), ('\u{3f0}', ['\u{39a}', - '\0', '\0']), ('\u{3f1}', ['\u{3a1}', '\0', '\0']), ('\u{3f2}', ['\u{3f9}', '\0', '\0']), - ('\u{3f3}', ['\u{37f}', '\0', '\0']), ('\u{3f5}', ['\u{395}', '\0', '\0']), ('\u{3f8}', - ['\u{3f7}', '\0', '\0']), ('\u{3fb}', ['\u{3fa}', '\0', '\0']), ('\u{430}', ['\u{410}', - '\0', '\0']), ('\u{431}', ['\u{411}', '\0', '\0']), ('\u{432}', ['\u{412}', '\0', '\0']), - ('\u{433}', ['\u{413}', '\0', '\0']), ('\u{434}', ['\u{414}', '\0', '\0']), ('\u{435}', - ['\u{415}', '\0', '\0']), ('\u{436}', ['\u{416}', '\0', '\0']), ('\u{437}', ['\u{417}', - '\0', '\0']), ('\u{438}', ['\u{418}', '\0', '\0']), ('\u{439}', ['\u{419}', '\0', '\0']), - ('\u{43a}', ['\u{41a}', '\0', '\0']), ('\u{43b}', ['\u{41b}', '\0', '\0']), ('\u{43c}', - ['\u{41c}', '\0', '\0']), ('\u{43d}', ['\u{41d}', '\0', '\0']), ('\u{43e}', ['\u{41e}', - '\0', '\0']), ('\u{43f}', ['\u{41f}', '\0', '\0']), ('\u{440}', ['\u{420}', '\0', '\0']), - ('\u{441}', ['\u{421}', '\0', '\0']), ('\u{442}', ['\u{422}', '\0', '\0']), ('\u{443}', - ['\u{423}', '\0', '\0']), ('\u{444}', ['\u{424}', '\0', '\0']), ('\u{445}', ['\u{425}', - '\0', '\0']), ('\u{446}', ['\u{426}', '\0', '\0']), ('\u{447}', ['\u{427}', '\0', '\0']), - ('\u{448}', ['\u{428}', '\0', '\0']), ('\u{449}', ['\u{429}', '\0', '\0']), ('\u{44a}', - ['\u{42a}', '\0', '\0']), ('\u{44b}', ['\u{42b}', '\0', '\0']), ('\u{44c}', ['\u{42c}', - '\0', '\0']), ('\u{44d}', ['\u{42d}', '\0', '\0']), ('\u{44e}', ['\u{42e}', '\0', '\0']), - ('\u{44f}', ['\u{42f}', '\0', '\0']), ('\u{450}', ['\u{400}', '\0', '\0']), ('\u{451}', - ['\u{401}', '\0', '\0']), ('\u{452}', ['\u{402}', '\0', '\0']), ('\u{453}', ['\u{403}', - '\0', '\0']), ('\u{454}', ['\u{404}', '\0', '\0']), ('\u{455}', ['\u{405}', '\0', '\0']), - ('\u{456}', ['\u{406}', '\0', '\0']), ('\u{457}', ['\u{407}', '\0', '\0']), ('\u{458}', - ['\u{408}', '\0', '\0']), ('\u{459}', ['\u{409}', '\0', '\0']), ('\u{45a}', ['\u{40a}', - '\0', '\0']), ('\u{45b}', ['\u{40b}', '\0', '\0']), ('\u{45c}', ['\u{40c}', '\0', '\0']), - ('\u{45d}', ['\u{40d}', '\0', '\0']), ('\u{45e}', ['\u{40e}', '\0', '\0']), ('\u{45f}', - ['\u{40f}', '\0', '\0']), ('\u{461}', ['\u{460}', '\0', '\0']), ('\u{463}', ['\u{462}', - '\0', '\0']), ('\u{465}', ['\u{464}', '\0', '\0']), ('\u{467}', ['\u{466}', '\0', '\0']), - ('\u{469}', ['\u{468}', '\0', '\0']), ('\u{46b}', ['\u{46a}', '\0', '\0']), ('\u{46d}', - ['\u{46c}', '\0', '\0']), ('\u{46f}', ['\u{46e}', '\0', '\0']), ('\u{471}', ['\u{470}', - '\0', '\0']), ('\u{473}', ['\u{472}', '\0', '\0']), ('\u{475}', ['\u{474}', '\0', '\0']), - ('\u{477}', ['\u{476}', '\0', '\0']), ('\u{479}', ['\u{478}', '\0', '\0']), ('\u{47b}', - ['\u{47a}', '\0', '\0']), ('\u{47d}', ['\u{47c}', '\0', '\0']), ('\u{47f}', ['\u{47e}', - '\0', '\0']), ('\u{481}', ['\u{480}', '\0', '\0']), ('\u{48b}', ['\u{48a}', '\0', '\0']), - ('\u{48d}', ['\u{48c}', '\0', '\0']), ('\u{48f}', ['\u{48e}', '\0', '\0']), ('\u{491}', - ['\u{490}', '\0', '\0']), ('\u{493}', ['\u{492}', '\0', '\0']), ('\u{495}', ['\u{494}', - '\0', '\0']), ('\u{497}', ['\u{496}', '\0', '\0']), ('\u{499}', ['\u{498}', '\0', '\0']), - ('\u{49b}', ['\u{49a}', '\0', '\0']), ('\u{49d}', ['\u{49c}', '\0', '\0']), ('\u{49f}', - ['\u{49e}', '\0', '\0']), ('\u{4a1}', ['\u{4a0}', '\0', '\0']), ('\u{4a3}', ['\u{4a2}', - '\0', '\0']), ('\u{4a5}', ['\u{4a4}', '\0', '\0']), ('\u{4a7}', ['\u{4a6}', '\0', '\0']), - ('\u{4a9}', ['\u{4a8}', '\0', '\0']), ('\u{4ab}', ['\u{4aa}', '\0', '\0']), ('\u{4ad}', - ['\u{4ac}', '\0', '\0']), ('\u{4af}', ['\u{4ae}', '\0', '\0']), ('\u{4b1}', ['\u{4b0}', - '\0', '\0']), ('\u{4b3}', ['\u{4b2}', '\0', '\0']), ('\u{4b5}', ['\u{4b4}', '\0', '\0']), - ('\u{4b7}', ['\u{4b6}', '\0', '\0']), ('\u{4b9}', ['\u{4b8}', '\0', '\0']), ('\u{4bb}', - ['\u{4ba}', '\0', '\0']), ('\u{4bd}', ['\u{4bc}', '\0', '\0']), ('\u{4bf}', ['\u{4be}', - '\0', '\0']), ('\u{4c2}', ['\u{4c1}', '\0', '\0']), ('\u{4c4}', ['\u{4c3}', '\0', '\0']), - ('\u{4c6}', ['\u{4c5}', '\0', '\0']), ('\u{4c8}', ['\u{4c7}', '\0', '\0']), ('\u{4ca}', - ['\u{4c9}', '\0', '\0']), ('\u{4cc}', ['\u{4cb}', '\0', '\0']), ('\u{4ce}', ['\u{4cd}', - '\0', '\0']), ('\u{4cf}', ['\u{4c0}', '\0', '\0']), ('\u{4d1}', ['\u{4d0}', '\0', '\0']), - ('\u{4d3}', ['\u{4d2}', '\0', '\0']), ('\u{4d5}', ['\u{4d4}', '\0', '\0']), ('\u{4d7}', - ['\u{4d6}', '\0', '\0']), ('\u{4d9}', ['\u{4d8}', '\0', '\0']), ('\u{4db}', ['\u{4da}', - '\0', '\0']), ('\u{4dd}', ['\u{4dc}', '\0', '\0']), ('\u{4df}', ['\u{4de}', '\0', '\0']), - ('\u{4e1}', ['\u{4e0}', '\0', '\0']), ('\u{4e3}', ['\u{4e2}', '\0', '\0']), ('\u{4e5}', - ['\u{4e4}', '\0', '\0']), ('\u{4e7}', ['\u{4e6}', '\0', '\0']), ('\u{4e9}', ['\u{4e8}', - '\0', '\0']), ('\u{4eb}', ['\u{4ea}', '\0', '\0']), ('\u{4ed}', ['\u{4ec}', '\0', '\0']), - ('\u{4ef}', ['\u{4ee}', '\0', '\0']), ('\u{4f1}', ['\u{4f0}', '\0', '\0']), ('\u{4f3}', - ['\u{4f2}', '\0', '\0']), ('\u{4f5}', ['\u{4f4}', '\0', '\0']), ('\u{4f7}', ['\u{4f6}', - '\0', '\0']), ('\u{4f9}', ['\u{4f8}', '\0', '\0']), ('\u{4fb}', ['\u{4fa}', '\0', '\0']), - ('\u{4fd}', ['\u{4fc}', '\0', '\0']), ('\u{4ff}', ['\u{4fe}', '\0', '\0']), ('\u{501}', - ['\u{500}', '\0', '\0']), ('\u{503}', ['\u{502}', '\0', '\0']), ('\u{505}', ['\u{504}', - '\0', '\0']), ('\u{507}', ['\u{506}', '\0', '\0']), ('\u{509}', ['\u{508}', '\0', '\0']), - ('\u{50b}', ['\u{50a}', '\0', '\0']), ('\u{50d}', ['\u{50c}', '\0', '\0']), ('\u{50f}', - ['\u{50e}', '\0', '\0']), ('\u{511}', ['\u{510}', '\0', '\0']), ('\u{513}', ['\u{512}', - '\0', '\0']), ('\u{515}', ['\u{514}', '\0', '\0']), ('\u{517}', ['\u{516}', '\0', '\0']), - ('\u{519}', ['\u{518}', '\0', '\0']), ('\u{51b}', ['\u{51a}', '\0', '\0']), ('\u{51d}', - ['\u{51c}', '\0', '\0']), ('\u{51f}', ['\u{51e}', '\0', '\0']), ('\u{521}', ['\u{520}', - '\0', '\0']), ('\u{523}', ['\u{522}', '\0', '\0']), ('\u{525}', ['\u{524}', '\0', '\0']), - ('\u{527}', ['\u{526}', '\0', '\0']), ('\u{529}', ['\u{528}', '\0', '\0']), ('\u{52b}', - ['\u{52a}', '\0', '\0']), ('\u{52d}', ['\u{52c}', '\0', '\0']), ('\u{52f}', ['\u{52e}', - '\0', '\0']), ('\u{561}', ['\u{531}', '\0', '\0']), ('\u{562}', ['\u{532}', '\0', '\0']), - ('\u{563}', ['\u{533}', '\0', '\0']), ('\u{564}', ['\u{534}', '\0', '\0']), ('\u{565}', - ['\u{535}', '\0', '\0']), ('\u{566}', ['\u{536}', '\0', '\0']), ('\u{567}', ['\u{537}', - '\0', '\0']), ('\u{568}', ['\u{538}', '\0', '\0']), ('\u{569}', ['\u{539}', '\0', '\0']), - ('\u{56a}', ['\u{53a}', '\0', '\0']), ('\u{56b}', ['\u{53b}', '\0', '\0']), ('\u{56c}', - ['\u{53c}', '\0', '\0']), ('\u{56d}', ['\u{53d}', '\0', '\0']), ('\u{56e}', ['\u{53e}', - '\0', '\0']), ('\u{56f}', ['\u{53f}', '\0', '\0']), ('\u{570}', ['\u{540}', '\0', '\0']), - ('\u{571}', ['\u{541}', '\0', '\0']), ('\u{572}', ['\u{542}', '\0', '\0']), ('\u{573}', - ['\u{543}', '\0', '\0']), ('\u{574}', ['\u{544}', '\0', '\0']), ('\u{575}', ['\u{545}', - '\0', '\0']), ('\u{576}', ['\u{546}', '\0', '\0']), ('\u{577}', ['\u{547}', '\0', '\0']), - ('\u{578}', ['\u{548}', '\0', '\0']), ('\u{579}', ['\u{549}', '\0', '\0']), ('\u{57a}', - ['\u{54a}', '\0', '\0']), ('\u{57b}', ['\u{54b}', '\0', '\0']), ('\u{57c}', ['\u{54c}', - '\0', '\0']), ('\u{57d}', ['\u{54d}', '\0', '\0']), ('\u{57e}', ['\u{54e}', '\0', '\0']), - ('\u{57f}', ['\u{54f}', '\0', '\0']), ('\u{580}', ['\u{550}', '\0', '\0']), ('\u{581}', - ['\u{551}', '\0', '\0']), ('\u{582}', ['\u{552}', '\0', '\0']), ('\u{583}', ['\u{553}', - '\0', '\0']), ('\u{584}', ['\u{554}', '\0', '\0']), ('\u{585}', ['\u{555}', '\0', '\0']), - ('\u{586}', ['\u{556}', '\0', '\0']), ('\u{587}', ['\u{535}', '\u{552}', '\0']), - ('\u{13f8}', ['\u{13f0}', '\0', '\0']), ('\u{13f9}', ['\u{13f1}', '\0', '\0']), ('\u{13fa}', - ['\u{13f2}', '\0', '\0']), ('\u{13fb}', ['\u{13f3}', '\0', '\0']), ('\u{13fc}', ['\u{13f4}', - '\0', '\0']), ('\u{13fd}', ['\u{13f5}', '\0', '\0']), ('\u{1d79}', ['\u{a77d}', '\0', - '\0']), ('\u{1d7d}', ['\u{2c63}', '\0', '\0']), ('\u{1e01}', ['\u{1e00}', '\0', '\0']), - ('\u{1e03}', ['\u{1e02}', '\0', '\0']), ('\u{1e05}', ['\u{1e04}', '\0', '\0']), ('\u{1e07}', - ['\u{1e06}', '\0', '\0']), ('\u{1e09}', ['\u{1e08}', '\0', '\0']), ('\u{1e0b}', ['\u{1e0a}', - '\0', '\0']), ('\u{1e0d}', ['\u{1e0c}', '\0', '\0']), ('\u{1e0f}', ['\u{1e0e}', '\0', - '\0']), ('\u{1e11}', ['\u{1e10}', '\0', '\0']), ('\u{1e13}', ['\u{1e12}', '\0', '\0']), - ('\u{1e15}', ['\u{1e14}', '\0', '\0']), ('\u{1e17}', ['\u{1e16}', '\0', '\0']), ('\u{1e19}', - ['\u{1e18}', '\0', '\0']), ('\u{1e1b}', ['\u{1e1a}', '\0', '\0']), ('\u{1e1d}', ['\u{1e1c}', - '\0', '\0']), ('\u{1e1f}', ['\u{1e1e}', '\0', '\0']), ('\u{1e21}', ['\u{1e20}', '\0', - '\0']), ('\u{1e23}', ['\u{1e22}', '\0', '\0']), ('\u{1e25}', ['\u{1e24}', '\0', '\0']), - ('\u{1e27}', ['\u{1e26}', '\0', '\0']), ('\u{1e29}', ['\u{1e28}', '\0', '\0']), ('\u{1e2b}', - ['\u{1e2a}', '\0', '\0']), ('\u{1e2d}', ['\u{1e2c}', '\0', '\0']), ('\u{1e2f}', ['\u{1e2e}', - '\0', '\0']), ('\u{1e31}', ['\u{1e30}', '\0', '\0']), ('\u{1e33}', ['\u{1e32}', '\0', - '\0']), ('\u{1e35}', ['\u{1e34}', '\0', '\0']), ('\u{1e37}', ['\u{1e36}', '\0', '\0']), - ('\u{1e39}', ['\u{1e38}', '\0', '\0']), ('\u{1e3b}', ['\u{1e3a}', '\0', '\0']), ('\u{1e3d}', - ['\u{1e3c}', '\0', '\0']), ('\u{1e3f}', ['\u{1e3e}', '\0', '\0']), ('\u{1e41}', ['\u{1e40}', - '\0', '\0']), ('\u{1e43}', ['\u{1e42}', '\0', '\0']), ('\u{1e45}', ['\u{1e44}', '\0', - '\0']), ('\u{1e47}', ['\u{1e46}', '\0', '\0']), ('\u{1e49}', ['\u{1e48}', '\0', '\0']), - ('\u{1e4b}', ['\u{1e4a}', '\0', '\0']), ('\u{1e4d}', ['\u{1e4c}', '\0', '\0']), ('\u{1e4f}', - ['\u{1e4e}', '\0', '\0']), ('\u{1e51}', ['\u{1e50}', '\0', '\0']), ('\u{1e53}', ['\u{1e52}', - '\0', '\0']), ('\u{1e55}', ['\u{1e54}', '\0', '\0']), ('\u{1e57}', ['\u{1e56}', '\0', - '\0']), ('\u{1e59}', ['\u{1e58}', '\0', '\0']), ('\u{1e5b}', ['\u{1e5a}', '\0', '\0']), - ('\u{1e5d}', ['\u{1e5c}', '\0', '\0']), ('\u{1e5f}', ['\u{1e5e}', '\0', '\0']), ('\u{1e61}', - ['\u{1e60}', '\0', '\0']), ('\u{1e63}', ['\u{1e62}', '\0', '\0']), ('\u{1e65}', ['\u{1e64}', - '\0', '\0']), ('\u{1e67}', ['\u{1e66}', '\0', '\0']), ('\u{1e69}', ['\u{1e68}', '\0', - '\0']), ('\u{1e6b}', ['\u{1e6a}', '\0', '\0']), ('\u{1e6d}', ['\u{1e6c}', '\0', '\0']), - ('\u{1e6f}', ['\u{1e6e}', '\0', '\0']), ('\u{1e71}', ['\u{1e70}', '\0', '\0']), ('\u{1e73}', - ['\u{1e72}', '\0', '\0']), ('\u{1e75}', ['\u{1e74}', '\0', '\0']), ('\u{1e77}', ['\u{1e76}', - '\0', '\0']), ('\u{1e79}', ['\u{1e78}', '\0', '\0']), ('\u{1e7b}', ['\u{1e7a}', '\0', - '\0']), ('\u{1e7d}', ['\u{1e7c}', '\0', '\0']), ('\u{1e7f}', ['\u{1e7e}', '\0', '\0']), - ('\u{1e81}', ['\u{1e80}', '\0', '\0']), ('\u{1e83}', ['\u{1e82}', '\0', '\0']), ('\u{1e85}', - ['\u{1e84}', '\0', '\0']), ('\u{1e87}', ['\u{1e86}', '\0', '\0']), ('\u{1e89}', ['\u{1e88}', - '\0', '\0']), ('\u{1e8b}', ['\u{1e8a}', '\0', '\0']), ('\u{1e8d}', ['\u{1e8c}', '\0', - '\0']), ('\u{1e8f}', ['\u{1e8e}', '\0', '\0']), ('\u{1e91}', ['\u{1e90}', '\0', '\0']), - ('\u{1e93}', ['\u{1e92}', '\0', '\0']), ('\u{1e95}', ['\u{1e94}', '\0', '\0']), ('\u{1e96}', - ['\u{48}', '\u{331}', '\0']), ('\u{1e97}', ['\u{54}', '\u{308}', '\0']), ('\u{1e98}', - ['\u{57}', '\u{30a}', '\0']), ('\u{1e99}', ['\u{59}', '\u{30a}', '\0']), ('\u{1e9a}', - ['\u{41}', '\u{2be}', '\0']), ('\u{1e9b}', ['\u{1e60}', '\0', '\0']), ('\u{1ea1}', - ['\u{1ea0}', '\0', '\0']), ('\u{1ea3}', ['\u{1ea2}', '\0', '\0']), ('\u{1ea5}', ['\u{1ea4}', - '\0', '\0']), ('\u{1ea7}', ['\u{1ea6}', '\0', '\0']), ('\u{1ea9}', ['\u{1ea8}', '\0', - '\0']), ('\u{1eab}', ['\u{1eaa}', '\0', '\0']), ('\u{1ead}', ['\u{1eac}', '\0', '\0']), - ('\u{1eaf}', ['\u{1eae}', '\0', '\0']), ('\u{1eb1}', ['\u{1eb0}', '\0', '\0']), ('\u{1eb3}', - ['\u{1eb2}', '\0', '\0']), ('\u{1eb5}', ['\u{1eb4}', '\0', '\0']), ('\u{1eb7}', ['\u{1eb6}', - '\0', '\0']), ('\u{1eb9}', ['\u{1eb8}', '\0', '\0']), ('\u{1ebb}', ['\u{1eba}', '\0', - '\0']), ('\u{1ebd}', ['\u{1ebc}', '\0', '\0']), ('\u{1ebf}', ['\u{1ebe}', '\0', '\0']), - ('\u{1ec1}', ['\u{1ec0}', '\0', '\0']), ('\u{1ec3}', ['\u{1ec2}', '\0', '\0']), ('\u{1ec5}', - ['\u{1ec4}', '\0', '\0']), ('\u{1ec7}', ['\u{1ec6}', '\0', '\0']), ('\u{1ec9}', ['\u{1ec8}', - '\0', '\0']), ('\u{1ecb}', ['\u{1eca}', '\0', '\0']), ('\u{1ecd}', ['\u{1ecc}', '\0', - '\0']), ('\u{1ecf}', ['\u{1ece}', '\0', '\0']), ('\u{1ed1}', ['\u{1ed0}', '\0', '\0']), - ('\u{1ed3}', ['\u{1ed2}', '\0', '\0']), ('\u{1ed5}', ['\u{1ed4}', '\0', '\0']), ('\u{1ed7}', - ['\u{1ed6}', '\0', '\0']), ('\u{1ed9}', ['\u{1ed8}', '\0', '\0']), ('\u{1edb}', ['\u{1eda}', - '\0', '\0']), ('\u{1edd}', ['\u{1edc}', '\0', '\0']), ('\u{1edf}', ['\u{1ede}', '\0', - '\0']), ('\u{1ee1}', ['\u{1ee0}', '\0', '\0']), ('\u{1ee3}', ['\u{1ee2}', '\0', '\0']), - ('\u{1ee5}', ['\u{1ee4}', '\0', '\0']), ('\u{1ee7}', ['\u{1ee6}', '\0', '\0']), ('\u{1ee9}', - ['\u{1ee8}', '\0', '\0']), ('\u{1eeb}', ['\u{1eea}', '\0', '\0']), ('\u{1eed}', ['\u{1eec}', - '\0', '\0']), ('\u{1eef}', ['\u{1eee}', '\0', '\0']), ('\u{1ef1}', ['\u{1ef0}', '\0', - '\0']), ('\u{1ef3}', ['\u{1ef2}', '\0', '\0']), ('\u{1ef5}', ['\u{1ef4}', '\0', '\0']), - ('\u{1ef7}', ['\u{1ef6}', '\0', '\0']), ('\u{1ef9}', ['\u{1ef8}', '\0', '\0']), ('\u{1efb}', - ['\u{1efa}', '\0', '\0']), ('\u{1efd}', ['\u{1efc}', '\0', '\0']), ('\u{1eff}', ['\u{1efe}', - '\0', '\0']), ('\u{1f00}', ['\u{1f08}', '\0', '\0']), ('\u{1f01}', ['\u{1f09}', '\0', - '\0']), ('\u{1f02}', ['\u{1f0a}', '\0', '\0']), ('\u{1f03}', ['\u{1f0b}', '\0', '\0']), - ('\u{1f04}', ['\u{1f0c}', '\0', '\0']), ('\u{1f05}', ['\u{1f0d}', '\0', '\0']), ('\u{1f06}', - ['\u{1f0e}', '\0', '\0']), ('\u{1f07}', ['\u{1f0f}', '\0', '\0']), ('\u{1f10}', ['\u{1f18}', - '\0', '\0']), ('\u{1f11}', ['\u{1f19}', '\0', '\0']), ('\u{1f12}', ['\u{1f1a}', '\0', - '\0']), ('\u{1f13}', ['\u{1f1b}', '\0', '\0']), ('\u{1f14}', ['\u{1f1c}', '\0', '\0']), - ('\u{1f15}', ['\u{1f1d}', '\0', '\0']), ('\u{1f20}', ['\u{1f28}', '\0', '\0']), ('\u{1f21}', - ['\u{1f29}', '\0', '\0']), ('\u{1f22}', ['\u{1f2a}', '\0', '\0']), ('\u{1f23}', ['\u{1f2b}', - '\0', '\0']), ('\u{1f24}', ['\u{1f2c}', '\0', '\0']), ('\u{1f25}', ['\u{1f2d}', '\0', - '\0']), ('\u{1f26}', ['\u{1f2e}', '\0', '\0']), ('\u{1f27}', ['\u{1f2f}', '\0', '\0']), - ('\u{1f30}', ['\u{1f38}', '\0', '\0']), ('\u{1f31}', ['\u{1f39}', '\0', '\0']), ('\u{1f32}', - ['\u{1f3a}', '\0', '\0']), ('\u{1f33}', ['\u{1f3b}', '\0', '\0']), ('\u{1f34}', ['\u{1f3c}', - '\0', '\0']), ('\u{1f35}', ['\u{1f3d}', '\0', '\0']), ('\u{1f36}', ['\u{1f3e}', '\0', - '\0']), ('\u{1f37}', ['\u{1f3f}', '\0', '\0']), ('\u{1f40}', ['\u{1f48}', '\0', '\0']), - ('\u{1f41}', ['\u{1f49}', '\0', '\0']), ('\u{1f42}', ['\u{1f4a}', '\0', '\0']), ('\u{1f43}', - ['\u{1f4b}', '\0', '\0']), ('\u{1f44}', ['\u{1f4c}', '\0', '\0']), ('\u{1f45}', ['\u{1f4d}', - '\0', '\0']), ('\u{1f50}', ['\u{3a5}', '\u{313}', '\0']), ('\u{1f51}', ['\u{1f59}', '\0', - '\0']), ('\u{1f52}', ['\u{3a5}', '\u{313}', '\u{300}']), ('\u{1f53}', ['\u{1f5b}', '\0', - '\0']), ('\u{1f54}', ['\u{3a5}', '\u{313}', '\u{301}']), ('\u{1f55}', ['\u{1f5d}', '\0', - '\0']), ('\u{1f56}', ['\u{3a5}', '\u{313}', '\u{342}']), ('\u{1f57}', ['\u{1f5f}', '\0', - '\0']), ('\u{1f60}', ['\u{1f68}', '\0', '\0']), ('\u{1f61}', ['\u{1f69}', '\0', '\0']), - ('\u{1f62}', ['\u{1f6a}', '\0', '\0']), ('\u{1f63}', ['\u{1f6b}', '\0', '\0']), ('\u{1f64}', - ['\u{1f6c}', '\0', '\0']), ('\u{1f65}', ['\u{1f6d}', '\0', '\0']), ('\u{1f66}', ['\u{1f6e}', - '\0', '\0']), ('\u{1f67}', ['\u{1f6f}', '\0', '\0']), ('\u{1f70}', ['\u{1fba}', '\0', - '\0']), ('\u{1f71}', ['\u{1fbb}', '\0', '\0']), ('\u{1f72}', ['\u{1fc8}', '\0', '\0']), - ('\u{1f73}', ['\u{1fc9}', '\0', '\0']), ('\u{1f74}', ['\u{1fca}', '\0', '\0']), ('\u{1f75}', - ['\u{1fcb}', '\0', '\0']), ('\u{1f76}', ['\u{1fda}', '\0', '\0']), ('\u{1f77}', ['\u{1fdb}', - '\0', '\0']), ('\u{1f78}', ['\u{1ff8}', '\0', '\0']), ('\u{1f79}', ['\u{1ff9}', '\0', - '\0']), ('\u{1f7a}', ['\u{1fea}', '\0', '\0']), ('\u{1f7b}', ['\u{1feb}', '\0', '\0']), - ('\u{1f7c}', ['\u{1ffa}', '\0', '\0']), ('\u{1f7d}', ['\u{1ffb}', '\0', '\0']), ('\u{1f80}', - ['\u{1f08}', '\u{399}', '\0']), ('\u{1f81}', ['\u{1f09}', '\u{399}', '\0']), ('\u{1f82}', - ['\u{1f0a}', '\u{399}', '\0']), ('\u{1f83}', ['\u{1f0b}', '\u{399}', '\0']), ('\u{1f84}', - ['\u{1f0c}', '\u{399}', '\0']), ('\u{1f85}', ['\u{1f0d}', '\u{399}', '\0']), ('\u{1f86}', - ['\u{1f0e}', '\u{399}', '\0']), ('\u{1f87}', ['\u{1f0f}', '\u{399}', '\0']), ('\u{1f88}', - ['\u{1f08}', '\u{399}', '\0']), ('\u{1f89}', ['\u{1f09}', '\u{399}', '\0']), ('\u{1f8a}', - ['\u{1f0a}', '\u{399}', '\0']), ('\u{1f8b}', ['\u{1f0b}', '\u{399}', '\0']), ('\u{1f8c}', - ['\u{1f0c}', '\u{399}', '\0']), ('\u{1f8d}', ['\u{1f0d}', '\u{399}', '\0']), ('\u{1f8e}', - ['\u{1f0e}', '\u{399}', '\0']), ('\u{1f8f}', ['\u{1f0f}', '\u{399}', '\0']), ('\u{1f90}', - ['\u{1f28}', '\u{399}', '\0']), ('\u{1f91}', ['\u{1f29}', '\u{399}', '\0']), ('\u{1f92}', - ['\u{1f2a}', '\u{399}', '\0']), ('\u{1f93}', ['\u{1f2b}', '\u{399}', '\0']), ('\u{1f94}', - ['\u{1f2c}', '\u{399}', '\0']), ('\u{1f95}', ['\u{1f2d}', '\u{399}', '\0']), ('\u{1f96}', - ['\u{1f2e}', '\u{399}', '\0']), ('\u{1f97}', ['\u{1f2f}', '\u{399}', '\0']), ('\u{1f98}', - ['\u{1f28}', '\u{399}', '\0']), ('\u{1f99}', ['\u{1f29}', '\u{399}', '\0']), ('\u{1f9a}', - ['\u{1f2a}', '\u{399}', '\0']), ('\u{1f9b}', ['\u{1f2b}', '\u{399}', '\0']), ('\u{1f9c}', - ['\u{1f2c}', '\u{399}', '\0']), ('\u{1f9d}', ['\u{1f2d}', '\u{399}', '\0']), ('\u{1f9e}', - ['\u{1f2e}', '\u{399}', '\0']), ('\u{1f9f}', ['\u{1f2f}', '\u{399}', '\0']), ('\u{1fa0}', - ['\u{1f68}', '\u{399}', '\0']), ('\u{1fa1}', ['\u{1f69}', '\u{399}', '\0']), ('\u{1fa2}', - ['\u{1f6a}', '\u{399}', '\0']), ('\u{1fa3}', ['\u{1f6b}', '\u{399}', '\0']), ('\u{1fa4}', - ['\u{1f6c}', '\u{399}', '\0']), ('\u{1fa5}', ['\u{1f6d}', '\u{399}', '\0']), ('\u{1fa6}', - ['\u{1f6e}', '\u{399}', '\0']), ('\u{1fa7}', ['\u{1f6f}', '\u{399}', '\0']), ('\u{1fa8}', - ['\u{1f68}', '\u{399}', '\0']), ('\u{1fa9}', ['\u{1f69}', '\u{399}', '\0']), ('\u{1faa}', - ['\u{1f6a}', '\u{399}', '\0']), ('\u{1fab}', ['\u{1f6b}', '\u{399}', '\0']), ('\u{1fac}', - ['\u{1f6c}', '\u{399}', '\0']), ('\u{1fad}', ['\u{1f6d}', '\u{399}', '\0']), ('\u{1fae}', - ['\u{1f6e}', '\u{399}', '\0']), ('\u{1faf}', ['\u{1f6f}', '\u{399}', '\0']), ('\u{1fb0}', - ['\u{1fb8}', '\0', '\0']), ('\u{1fb1}', ['\u{1fb9}', '\0', '\0']), ('\u{1fb2}', ['\u{1fba}', - '\u{399}', '\0']), ('\u{1fb3}', ['\u{391}', '\u{399}', '\0']), ('\u{1fb4}', ['\u{386}', - '\u{399}', '\0']), ('\u{1fb6}', ['\u{391}', '\u{342}', '\0']), ('\u{1fb7}', ['\u{391}', - '\u{342}', '\u{399}']), ('\u{1fbc}', ['\u{391}', '\u{399}', '\0']), ('\u{1fbe}', ['\u{399}', - '\0', '\0']), ('\u{1fc2}', ['\u{1fca}', '\u{399}', '\0']), ('\u{1fc3}', ['\u{397}', - '\u{399}', '\0']), ('\u{1fc4}', ['\u{389}', '\u{399}', '\0']), ('\u{1fc6}', ['\u{397}', - '\u{342}', '\0']), ('\u{1fc7}', ['\u{397}', '\u{342}', '\u{399}']), ('\u{1fcc}', ['\u{397}', - '\u{399}', '\0']), ('\u{1fd0}', ['\u{1fd8}', '\0', '\0']), ('\u{1fd1}', ['\u{1fd9}', '\0', - '\0']), ('\u{1fd2}', ['\u{399}', '\u{308}', '\u{300}']), ('\u{1fd3}', ['\u{399}', '\u{308}', - '\u{301}']), ('\u{1fd6}', ['\u{399}', '\u{342}', '\0']), ('\u{1fd7}', ['\u{399}', '\u{308}', - '\u{342}']), ('\u{1fe0}', ['\u{1fe8}', '\0', '\0']), ('\u{1fe1}', ['\u{1fe9}', '\0', '\0']), - ('\u{1fe2}', ['\u{3a5}', '\u{308}', '\u{300}']), ('\u{1fe3}', ['\u{3a5}', '\u{308}', - '\u{301}']), ('\u{1fe4}', ['\u{3a1}', '\u{313}', '\0']), ('\u{1fe5}', ['\u{1fec}', '\0', - '\0']), ('\u{1fe6}', ['\u{3a5}', '\u{342}', '\0']), ('\u{1fe7}', ['\u{3a5}', '\u{308}', - '\u{342}']), ('\u{1ff2}', ['\u{1ffa}', '\u{399}', '\0']), ('\u{1ff3}', ['\u{3a9}', - '\u{399}', '\0']), ('\u{1ff4}', ['\u{38f}', '\u{399}', '\0']), ('\u{1ff6}', ['\u{3a9}', - '\u{342}', '\0']), ('\u{1ff7}', ['\u{3a9}', '\u{342}', '\u{399}']), ('\u{1ffc}', ['\u{3a9}', - '\u{399}', '\0']), ('\u{214e}', ['\u{2132}', '\0', '\0']), ('\u{2170}', ['\u{2160}', '\0', - '\0']), ('\u{2171}', ['\u{2161}', '\0', '\0']), ('\u{2172}', ['\u{2162}', '\0', '\0']), - ('\u{2173}', ['\u{2163}', '\0', '\0']), ('\u{2174}', ['\u{2164}', '\0', '\0']), ('\u{2175}', - ['\u{2165}', '\0', '\0']), ('\u{2176}', ['\u{2166}', '\0', '\0']), ('\u{2177}', ['\u{2167}', - '\0', '\0']), ('\u{2178}', ['\u{2168}', '\0', '\0']), ('\u{2179}', ['\u{2169}', '\0', - '\0']), ('\u{217a}', ['\u{216a}', '\0', '\0']), ('\u{217b}', ['\u{216b}', '\0', '\0']), - ('\u{217c}', ['\u{216c}', '\0', '\0']), ('\u{217d}', ['\u{216d}', '\0', '\0']), ('\u{217e}', - ['\u{216e}', '\0', '\0']), ('\u{217f}', ['\u{216f}', '\0', '\0']), ('\u{2184}', ['\u{2183}', - '\0', '\0']), ('\u{24d0}', ['\u{24b6}', '\0', '\0']), ('\u{24d1}', ['\u{24b7}', '\0', - '\0']), ('\u{24d2}', ['\u{24b8}', '\0', '\0']), ('\u{24d3}', ['\u{24b9}', '\0', '\0']), - ('\u{24d4}', ['\u{24ba}', '\0', '\0']), ('\u{24d5}', ['\u{24bb}', '\0', '\0']), ('\u{24d6}', - ['\u{24bc}', '\0', '\0']), ('\u{24d7}', ['\u{24bd}', '\0', '\0']), ('\u{24d8}', ['\u{24be}', - '\0', '\0']), ('\u{24d9}', ['\u{24bf}', '\0', '\0']), ('\u{24da}', ['\u{24c0}', '\0', - '\0']), ('\u{24db}', ['\u{24c1}', '\0', '\0']), ('\u{24dc}', ['\u{24c2}', '\0', '\0']), - ('\u{24dd}', ['\u{24c3}', '\0', '\0']), ('\u{24de}', ['\u{24c4}', '\0', '\0']), ('\u{24df}', - ['\u{24c5}', '\0', '\0']), ('\u{24e0}', ['\u{24c6}', '\0', '\0']), ('\u{24e1}', ['\u{24c7}', - '\0', '\0']), ('\u{24e2}', ['\u{24c8}', '\0', '\0']), ('\u{24e3}', ['\u{24c9}', '\0', - '\0']), ('\u{24e4}', ['\u{24ca}', '\0', '\0']), ('\u{24e5}', ['\u{24cb}', '\0', '\0']), - ('\u{24e6}', ['\u{24cc}', '\0', '\0']), ('\u{24e7}', ['\u{24cd}', '\0', '\0']), ('\u{24e8}', - ['\u{24ce}', '\0', '\0']), ('\u{24e9}', ['\u{24cf}', '\0', '\0']), ('\u{2c30}', ['\u{2c00}', - '\0', '\0']), ('\u{2c31}', ['\u{2c01}', '\0', '\0']), ('\u{2c32}', ['\u{2c02}', '\0', - '\0']), ('\u{2c33}', ['\u{2c03}', '\0', '\0']), ('\u{2c34}', ['\u{2c04}', '\0', '\0']), - ('\u{2c35}', ['\u{2c05}', '\0', '\0']), ('\u{2c36}', ['\u{2c06}', '\0', '\0']), ('\u{2c37}', - ['\u{2c07}', '\0', '\0']), ('\u{2c38}', ['\u{2c08}', '\0', '\0']), ('\u{2c39}', ['\u{2c09}', - '\0', '\0']), ('\u{2c3a}', ['\u{2c0a}', '\0', '\0']), ('\u{2c3b}', ['\u{2c0b}', '\0', - '\0']), ('\u{2c3c}', ['\u{2c0c}', '\0', '\0']), ('\u{2c3d}', ['\u{2c0d}', '\0', '\0']), - ('\u{2c3e}', ['\u{2c0e}', '\0', '\0']), ('\u{2c3f}', ['\u{2c0f}', '\0', '\0']), ('\u{2c40}', - ['\u{2c10}', '\0', '\0']), ('\u{2c41}', ['\u{2c11}', '\0', '\0']), ('\u{2c42}', ['\u{2c12}', - '\0', '\0']), ('\u{2c43}', ['\u{2c13}', '\0', '\0']), ('\u{2c44}', ['\u{2c14}', '\0', - '\0']), ('\u{2c45}', ['\u{2c15}', '\0', '\0']), ('\u{2c46}', ['\u{2c16}', '\0', '\0']), - ('\u{2c47}', ['\u{2c17}', '\0', '\0']), ('\u{2c48}', ['\u{2c18}', '\0', '\0']), ('\u{2c49}', - ['\u{2c19}', '\0', '\0']), ('\u{2c4a}', ['\u{2c1a}', '\0', '\0']), ('\u{2c4b}', ['\u{2c1b}', - '\0', '\0']), ('\u{2c4c}', ['\u{2c1c}', '\0', '\0']), ('\u{2c4d}', ['\u{2c1d}', '\0', - '\0']), ('\u{2c4e}', ['\u{2c1e}', '\0', '\0']), ('\u{2c4f}', ['\u{2c1f}', '\0', '\0']), - ('\u{2c50}', ['\u{2c20}', '\0', '\0']), ('\u{2c51}', ['\u{2c21}', '\0', '\0']), ('\u{2c52}', - ['\u{2c22}', '\0', '\0']), ('\u{2c53}', ['\u{2c23}', '\0', '\0']), ('\u{2c54}', ['\u{2c24}', - '\0', '\0']), ('\u{2c55}', ['\u{2c25}', '\0', '\0']), ('\u{2c56}', ['\u{2c26}', '\0', - '\0']), ('\u{2c57}', ['\u{2c27}', '\0', '\0']), ('\u{2c58}', ['\u{2c28}', '\0', '\0']), - ('\u{2c59}', ['\u{2c29}', '\0', '\0']), ('\u{2c5a}', ['\u{2c2a}', '\0', '\0']), ('\u{2c5b}', - ['\u{2c2b}', '\0', '\0']), ('\u{2c5c}', ['\u{2c2c}', '\0', '\0']), ('\u{2c5d}', ['\u{2c2d}', - '\0', '\0']), ('\u{2c5e}', ['\u{2c2e}', '\0', '\0']), ('\u{2c61}', ['\u{2c60}', '\0', - '\0']), ('\u{2c65}', ['\u{23a}', '\0', '\0']), ('\u{2c66}', ['\u{23e}', '\0', '\0']), - ('\u{2c68}', ['\u{2c67}', '\0', '\0']), ('\u{2c6a}', ['\u{2c69}', '\0', '\0']), ('\u{2c6c}', - ['\u{2c6b}', '\0', '\0']), ('\u{2c73}', ['\u{2c72}', '\0', '\0']), ('\u{2c76}', ['\u{2c75}', - '\0', '\0']), ('\u{2c81}', ['\u{2c80}', '\0', '\0']), ('\u{2c83}', ['\u{2c82}', '\0', - '\0']), ('\u{2c85}', ['\u{2c84}', '\0', '\0']), ('\u{2c87}', ['\u{2c86}', '\0', '\0']), - ('\u{2c89}', ['\u{2c88}', '\0', '\0']), ('\u{2c8b}', ['\u{2c8a}', '\0', '\0']), ('\u{2c8d}', - ['\u{2c8c}', '\0', '\0']), ('\u{2c8f}', ['\u{2c8e}', '\0', '\0']), ('\u{2c91}', ['\u{2c90}', - '\0', '\0']), ('\u{2c93}', ['\u{2c92}', '\0', '\0']), ('\u{2c95}', ['\u{2c94}', '\0', - '\0']), ('\u{2c97}', ['\u{2c96}', '\0', '\0']), ('\u{2c99}', ['\u{2c98}', '\0', '\0']), - ('\u{2c9b}', ['\u{2c9a}', '\0', '\0']), ('\u{2c9d}', ['\u{2c9c}', '\0', '\0']), ('\u{2c9f}', - ['\u{2c9e}', '\0', '\0']), ('\u{2ca1}', ['\u{2ca0}', '\0', '\0']), ('\u{2ca3}', ['\u{2ca2}', - '\0', '\0']), ('\u{2ca5}', ['\u{2ca4}', '\0', '\0']), ('\u{2ca7}', ['\u{2ca6}', '\0', - '\0']), ('\u{2ca9}', ['\u{2ca8}', '\0', '\0']), ('\u{2cab}', ['\u{2caa}', '\0', '\0']), - ('\u{2cad}', ['\u{2cac}', '\0', '\0']), ('\u{2caf}', ['\u{2cae}', '\0', '\0']), ('\u{2cb1}', - ['\u{2cb0}', '\0', '\0']), ('\u{2cb3}', ['\u{2cb2}', '\0', '\0']), ('\u{2cb5}', ['\u{2cb4}', - '\0', '\0']), ('\u{2cb7}', ['\u{2cb6}', '\0', '\0']), ('\u{2cb9}', ['\u{2cb8}', '\0', - '\0']), ('\u{2cbb}', ['\u{2cba}', '\0', '\0']), ('\u{2cbd}', ['\u{2cbc}', '\0', '\0']), - ('\u{2cbf}', ['\u{2cbe}', '\0', '\0']), ('\u{2cc1}', ['\u{2cc0}', '\0', '\0']), ('\u{2cc3}', - ['\u{2cc2}', '\0', '\0']), ('\u{2cc5}', ['\u{2cc4}', '\0', '\0']), ('\u{2cc7}', ['\u{2cc6}', - '\0', '\0']), ('\u{2cc9}', ['\u{2cc8}', '\0', '\0']), ('\u{2ccb}', ['\u{2cca}', '\0', - '\0']), ('\u{2ccd}', ['\u{2ccc}', '\0', '\0']), ('\u{2ccf}', ['\u{2cce}', '\0', '\0']), - ('\u{2cd1}', ['\u{2cd0}', '\0', '\0']), ('\u{2cd3}', ['\u{2cd2}', '\0', '\0']), ('\u{2cd5}', - ['\u{2cd4}', '\0', '\0']), ('\u{2cd7}', ['\u{2cd6}', '\0', '\0']), ('\u{2cd9}', ['\u{2cd8}', - '\0', '\0']), ('\u{2cdb}', ['\u{2cda}', '\0', '\0']), ('\u{2cdd}', ['\u{2cdc}', '\0', - '\0']), ('\u{2cdf}', ['\u{2cde}', '\0', '\0']), ('\u{2ce1}', ['\u{2ce0}', '\0', '\0']), - ('\u{2ce3}', ['\u{2ce2}', '\0', '\0']), ('\u{2cec}', ['\u{2ceb}', '\0', '\0']), ('\u{2cee}', - ['\u{2ced}', '\0', '\0']), ('\u{2cf3}', ['\u{2cf2}', '\0', '\0']), ('\u{2d00}', ['\u{10a0}', - '\0', '\0']), ('\u{2d01}', ['\u{10a1}', '\0', '\0']), ('\u{2d02}', ['\u{10a2}', '\0', - '\0']), ('\u{2d03}', ['\u{10a3}', '\0', '\0']), ('\u{2d04}', ['\u{10a4}', '\0', '\0']), - ('\u{2d05}', ['\u{10a5}', '\0', '\0']), ('\u{2d06}', ['\u{10a6}', '\0', '\0']), ('\u{2d07}', - ['\u{10a7}', '\0', '\0']), ('\u{2d08}', ['\u{10a8}', '\0', '\0']), ('\u{2d09}', ['\u{10a9}', - '\0', '\0']), ('\u{2d0a}', ['\u{10aa}', '\0', '\0']), ('\u{2d0b}', ['\u{10ab}', '\0', - '\0']), ('\u{2d0c}', ['\u{10ac}', '\0', '\0']), ('\u{2d0d}', ['\u{10ad}', '\0', '\0']), - ('\u{2d0e}', ['\u{10ae}', '\0', '\0']), ('\u{2d0f}', ['\u{10af}', '\0', '\0']), ('\u{2d10}', - ['\u{10b0}', '\0', '\0']), ('\u{2d11}', ['\u{10b1}', '\0', '\0']), ('\u{2d12}', ['\u{10b2}', - '\0', '\0']), ('\u{2d13}', ['\u{10b3}', '\0', '\0']), ('\u{2d14}', ['\u{10b4}', '\0', - '\0']), ('\u{2d15}', ['\u{10b5}', '\0', '\0']), ('\u{2d16}', ['\u{10b6}', '\0', '\0']), - ('\u{2d17}', ['\u{10b7}', '\0', '\0']), ('\u{2d18}', ['\u{10b8}', '\0', '\0']), ('\u{2d19}', - ['\u{10b9}', '\0', '\0']), ('\u{2d1a}', ['\u{10ba}', '\0', '\0']), ('\u{2d1b}', ['\u{10bb}', - '\0', '\0']), ('\u{2d1c}', ['\u{10bc}', '\0', '\0']), ('\u{2d1d}', ['\u{10bd}', '\0', - '\0']), ('\u{2d1e}', ['\u{10be}', '\0', '\0']), ('\u{2d1f}', ['\u{10bf}', '\0', '\0']), - ('\u{2d20}', ['\u{10c0}', '\0', '\0']), ('\u{2d21}', ['\u{10c1}', '\0', '\0']), ('\u{2d22}', - ['\u{10c2}', '\0', '\0']), ('\u{2d23}', ['\u{10c3}', '\0', '\0']), ('\u{2d24}', ['\u{10c4}', - '\0', '\0']), ('\u{2d25}', ['\u{10c5}', '\0', '\0']), ('\u{2d27}', ['\u{10c7}', '\0', - '\0']), ('\u{2d2d}', ['\u{10cd}', '\0', '\0']), ('\u{a641}', ['\u{a640}', '\0', '\0']), - ('\u{a643}', ['\u{a642}', '\0', '\0']), ('\u{a645}', ['\u{a644}', '\0', '\0']), ('\u{a647}', - ['\u{a646}', '\0', '\0']), ('\u{a649}', ['\u{a648}', '\0', '\0']), ('\u{a64b}', ['\u{a64a}', - '\0', '\0']), ('\u{a64d}', ['\u{a64c}', '\0', '\0']), ('\u{a64f}', ['\u{a64e}', '\0', - '\0']), ('\u{a651}', ['\u{a650}', '\0', '\0']), ('\u{a653}', ['\u{a652}', '\0', '\0']), - ('\u{a655}', ['\u{a654}', '\0', '\0']), ('\u{a657}', ['\u{a656}', '\0', '\0']), ('\u{a659}', - ['\u{a658}', '\0', '\0']), ('\u{a65b}', ['\u{a65a}', '\0', '\0']), ('\u{a65d}', ['\u{a65c}', - '\0', '\0']), ('\u{a65f}', ['\u{a65e}', '\0', '\0']), ('\u{a661}', ['\u{a660}', '\0', - '\0']), ('\u{a663}', ['\u{a662}', '\0', '\0']), ('\u{a665}', ['\u{a664}', '\0', '\0']), - ('\u{a667}', ['\u{a666}', '\0', '\0']), ('\u{a669}', ['\u{a668}', '\0', '\0']), ('\u{a66b}', - ['\u{a66a}', '\0', '\0']), ('\u{a66d}', ['\u{a66c}', '\0', '\0']), ('\u{a681}', ['\u{a680}', - '\0', '\0']), ('\u{a683}', ['\u{a682}', '\0', '\0']), ('\u{a685}', ['\u{a684}', '\0', - '\0']), ('\u{a687}', ['\u{a686}', '\0', '\0']), ('\u{a689}', ['\u{a688}', '\0', '\0']), - ('\u{a68b}', ['\u{a68a}', '\0', '\0']), ('\u{a68d}', ['\u{a68c}', '\0', '\0']), ('\u{a68f}', - ['\u{a68e}', '\0', '\0']), ('\u{a691}', ['\u{a690}', '\0', '\0']), ('\u{a693}', ['\u{a692}', - '\0', '\0']), ('\u{a695}', ['\u{a694}', '\0', '\0']), ('\u{a697}', ['\u{a696}', '\0', - '\0']), ('\u{a699}', ['\u{a698}', '\0', '\0']), ('\u{a69b}', ['\u{a69a}', '\0', '\0']), - ('\u{a723}', ['\u{a722}', '\0', '\0']), ('\u{a725}', ['\u{a724}', '\0', '\0']), ('\u{a727}', - ['\u{a726}', '\0', '\0']), ('\u{a729}', ['\u{a728}', '\0', '\0']), ('\u{a72b}', ['\u{a72a}', - '\0', '\0']), ('\u{a72d}', ['\u{a72c}', '\0', '\0']), ('\u{a72f}', ['\u{a72e}', '\0', - '\0']), ('\u{a733}', ['\u{a732}', '\0', '\0']), ('\u{a735}', ['\u{a734}', '\0', '\0']), - ('\u{a737}', ['\u{a736}', '\0', '\0']), ('\u{a739}', ['\u{a738}', '\0', '\0']), ('\u{a73b}', - ['\u{a73a}', '\0', '\0']), ('\u{a73d}', ['\u{a73c}', '\0', '\0']), ('\u{a73f}', ['\u{a73e}', - '\0', '\0']), ('\u{a741}', ['\u{a740}', '\0', '\0']), ('\u{a743}', ['\u{a742}', '\0', - '\0']), ('\u{a745}', ['\u{a744}', '\0', '\0']), ('\u{a747}', ['\u{a746}', '\0', '\0']), - ('\u{a749}', ['\u{a748}', '\0', '\0']), ('\u{a74b}', ['\u{a74a}', '\0', '\0']), ('\u{a74d}', - ['\u{a74c}', '\0', '\0']), ('\u{a74f}', ['\u{a74e}', '\0', '\0']), ('\u{a751}', ['\u{a750}', - '\0', '\0']), ('\u{a753}', ['\u{a752}', '\0', '\0']), ('\u{a755}', ['\u{a754}', '\0', - '\0']), ('\u{a757}', ['\u{a756}', '\0', '\0']), ('\u{a759}', ['\u{a758}', '\0', '\0']), - ('\u{a75b}', ['\u{a75a}', '\0', '\0']), ('\u{a75d}', ['\u{a75c}', '\0', '\0']), ('\u{a75f}', - ['\u{a75e}', '\0', '\0']), ('\u{a761}', ['\u{a760}', '\0', '\0']), ('\u{a763}', ['\u{a762}', - '\0', '\0']), ('\u{a765}', ['\u{a764}', '\0', '\0']), ('\u{a767}', ['\u{a766}', '\0', - '\0']), ('\u{a769}', ['\u{a768}', '\0', '\0']), ('\u{a76b}', ['\u{a76a}', '\0', '\0']), - ('\u{a76d}', ['\u{a76c}', '\0', '\0']), ('\u{a76f}', ['\u{a76e}', '\0', '\0']), ('\u{a77a}', - ['\u{a779}', '\0', '\0']), ('\u{a77c}', ['\u{a77b}', '\0', '\0']), ('\u{a77f}', ['\u{a77e}', - '\0', '\0']), ('\u{a781}', ['\u{a780}', '\0', '\0']), ('\u{a783}', ['\u{a782}', '\0', - '\0']), ('\u{a785}', ['\u{a784}', '\0', '\0']), ('\u{a787}', ['\u{a786}', '\0', '\0']), - ('\u{a78c}', ['\u{a78b}', '\0', '\0']), ('\u{a791}', ['\u{a790}', '\0', '\0']), ('\u{a793}', - ['\u{a792}', '\0', '\0']), ('\u{a797}', ['\u{a796}', '\0', '\0']), ('\u{a799}', ['\u{a798}', - '\0', '\0']), ('\u{a79b}', ['\u{a79a}', '\0', '\0']), ('\u{a79d}', ['\u{a79c}', '\0', - '\0']), ('\u{a79f}', ['\u{a79e}', '\0', '\0']), ('\u{a7a1}', ['\u{a7a0}', '\0', '\0']), - ('\u{a7a3}', ['\u{a7a2}', '\0', '\0']), ('\u{a7a5}', ['\u{a7a4}', '\0', '\0']), ('\u{a7a7}', - ['\u{a7a6}', '\0', '\0']), ('\u{a7a9}', ['\u{a7a8}', '\0', '\0']), ('\u{a7b5}', ['\u{a7b4}', - '\0', '\0']), ('\u{a7b7}', ['\u{a7b6}', '\0', '\0']), ('\u{ab53}', ['\u{a7b3}', '\0', - '\0']), ('\u{ab70}', ['\u{13a0}', '\0', '\0']), ('\u{ab71}', ['\u{13a1}', '\0', '\0']), - ('\u{ab72}', ['\u{13a2}', '\0', '\0']), ('\u{ab73}', ['\u{13a3}', '\0', '\0']), ('\u{ab74}', - ['\u{13a4}', '\0', '\0']), ('\u{ab75}', ['\u{13a5}', '\0', '\0']), ('\u{ab76}', ['\u{13a6}', - '\0', '\0']), ('\u{ab77}', ['\u{13a7}', '\0', '\0']), ('\u{ab78}', ['\u{13a8}', '\0', - '\0']), ('\u{ab79}', ['\u{13a9}', '\0', '\0']), ('\u{ab7a}', ['\u{13aa}', '\0', '\0']), - ('\u{ab7b}', ['\u{13ab}', '\0', '\0']), ('\u{ab7c}', ['\u{13ac}', '\0', '\0']), ('\u{ab7d}', - ['\u{13ad}', '\0', '\0']), ('\u{ab7e}', ['\u{13ae}', '\0', '\0']), ('\u{ab7f}', ['\u{13af}', - '\0', '\0']), ('\u{ab80}', ['\u{13b0}', '\0', '\0']), ('\u{ab81}', ['\u{13b1}', '\0', - '\0']), ('\u{ab82}', ['\u{13b2}', '\0', '\0']), ('\u{ab83}', ['\u{13b3}', '\0', '\0']), - ('\u{ab84}', ['\u{13b4}', '\0', '\0']), ('\u{ab85}', ['\u{13b5}', '\0', '\0']), ('\u{ab86}', - ['\u{13b6}', '\0', '\0']), ('\u{ab87}', ['\u{13b7}', '\0', '\0']), ('\u{ab88}', ['\u{13b8}', - '\0', '\0']), ('\u{ab89}', ['\u{13b9}', '\0', '\0']), ('\u{ab8a}', ['\u{13ba}', '\0', - '\0']), ('\u{ab8b}', ['\u{13bb}', '\0', '\0']), ('\u{ab8c}', ['\u{13bc}', '\0', '\0']), - ('\u{ab8d}', ['\u{13bd}', '\0', '\0']), ('\u{ab8e}', ['\u{13be}', '\0', '\0']), ('\u{ab8f}', - ['\u{13bf}', '\0', '\0']), ('\u{ab90}', ['\u{13c0}', '\0', '\0']), ('\u{ab91}', ['\u{13c1}', - '\0', '\0']), ('\u{ab92}', ['\u{13c2}', '\0', '\0']), ('\u{ab93}', ['\u{13c3}', '\0', - '\0']), ('\u{ab94}', ['\u{13c4}', '\0', '\0']), ('\u{ab95}', ['\u{13c5}', '\0', '\0']), - ('\u{ab96}', ['\u{13c6}', '\0', '\0']), ('\u{ab97}', ['\u{13c7}', '\0', '\0']), ('\u{ab98}', - ['\u{13c8}', '\0', '\0']), ('\u{ab99}', ['\u{13c9}', '\0', '\0']), ('\u{ab9a}', ['\u{13ca}', - '\0', '\0']), ('\u{ab9b}', ['\u{13cb}', '\0', '\0']), ('\u{ab9c}', ['\u{13cc}', '\0', - '\0']), ('\u{ab9d}', ['\u{13cd}', '\0', '\0']), ('\u{ab9e}', ['\u{13ce}', '\0', '\0']), - ('\u{ab9f}', ['\u{13cf}', '\0', '\0']), ('\u{aba0}', ['\u{13d0}', '\0', '\0']), ('\u{aba1}', - ['\u{13d1}', '\0', '\0']), ('\u{aba2}', ['\u{13d2}', '\0', '\0']), ('\u{aba3}', ['\u{13d3}', - '\0', '\0']), ('\u{aba4}', ['\u{13d4}', '\0', '\0']), ('\u{aba5}', ['\u{13d5}', '\0', - '\0']), ('\u{aba6}', ['\u{13d6}', '\0', '\0']), ('\u{aba7}', ['\u{13d7}', '\0', '\0']), - ('\u{aba8}', ['\u{13d8}', '\0', '\0']), ('\u{aba9}', ['\u{13d9}', '\0', '\0']), ('\u{abaa}', - ['\u{13da}', '\0', '\0']), ('\u{abab}', ['\u{13db}', '\0', '\0']), ('\u{abac}', ['\u{13dc}', - '\0', '\0']), ('\u{abad}', ['\u{13dd}', '\0', '\0']), ('\u{abae}', ['\u{13de}', '\0', - '\0']), ('\u{abaf}', ['\u{13df}', '\0', '\0']), ('\u{abb0}', ['\u{13e0}', '\0', '\0']), - ('\u{abb1}', ['\u{13e1}', '\0', '\0']), ('\u{abb2}', ['\u{13e2}', '\0', '\0']), ('\u{abb3}', - ['\u{13e3}', '\0', '\0']), ('\u{abb4}', ['\u{13e4}', '\0', '\0']), ('\u{abb5}', ['\u{13e5}', - '\0', '\0']), ('\u{abb6}', ['\u{13e6}', '\0', '\0']), ('\u{abb7}', ['\u{13e7}', '\0', - '\0']), ('\u{abb8}', ['\u{13e8}', '\0', '\0']), ('\u{abb9}', ['\u{13e9}', '\0', '\0']), - ('\u{abba}', ['\u{13ea}', '\0', '\0']), ('\u{abbb}', ['\u{13eb}', '\0', '\0']), ('\u{abbc}', - ['\u{13ec}', '\0', '\0']), ('\u{abbd}', ['\u{13ed}', '\0', '\0']), ('\u{abbe}', ['\u{13ee}', - '\0', '\0']), ('\u{abbf}', ['\u{13ef}', '\0', '\0']), ('\u{fb00}', ['\u{46}', '\u{46}', - '\0']), ('\u{fb01}', ['\u{46}', '\u{49}', '\0']), ('\u{fb02}', ['\u{46}', '\u{4c}', '\0']), - ('\u{fb03}', ['\u{46}', '\u{46}', '\u{49}']), ('\u{fb04}', ['\u{46}', '\u{46}', '\u{4c}']), - ('\u{fb05}', ['\u{53}', '\u{54}', '\0']), ('\u{fb06}', ['\u{53}', '\u{54}', '\0']), - ('\u{fb13}', ['\u{544}', '\u{546}', '\0']), ('\u{fb14}', ['\u{544}', '\u{535}', '\0']), - ('\u{fb15}', ['\u{544}', '\u{53b}', '\0']), ('\u{fb16}', ['\u{54e}', '\u{546}', '\0']), - ('\u{fb17}', ['\u{544}', '\u{53d}', '\0']), ('\u{ff41}', ['\u{ff21}', '\0', '\0']), - ('\u{ff42}', ['\u{ff22}', '\0', '\0']), ('\u{ff43}', ['\u{ff23}', '\0', '\0']), ('\u{ff44}', - ['\u{ff24}', '\0', '\0']), ('\u{ff45}', ['\u{ff25}', '\0', '\0']), ('\u{ff46}', ['\u{ff26}', - '\0', '\0']), ('\u{ff47}', ['\u{ff27}', '\0', '\0']), ('\u{ff48}', ['\u{ff28}', '\0', - '\0']), ('\u{ff49}', ['\u{ff29}', '\0', '\0']), ('\u{ff4a}', ['\u{ff2a}', '\0', '\0']), - ('\u{ff4b}', ['\u{ff2b}', '\0', '\0']), ('\u{ff4c}', ['\u{ff2c}', '\0', '\0']), ('\u{ff4d}', - ['\u{ff2d}', '\0', '\0']), ('\u{ff4e}', ['\u{ff2e}', '\0', '\0']), ('\u{ff4f}', ['\u{ff2f}', - '\0', '\0']), ('\u{ff50}', ['\u{ff30}', '\0', '\0']), ('\u{ff51}', ['\u{ff31}', '\0', - '\0']), ('\u{ff52}', ['\u{ff32}', '\0', '\0']), ('\u{ff53}', ['\u{ff33}', '\0', '\0']), - ('\u{ff54}', ['\u{ff34}', '\0', '\0']), ('\u{ff55}', ['\u{ff35}', '\0', '\0']), ('\u{ff56}', - ['\u{ff36}', '\0', '\0']), ('\u{ff57}', ['\u{ff37}', '\0', '\0']), ('\u{ff58}', ['\u{ff38}', - '\0', '\0']), ('\u{ff59}', ['\u{ff39}', '\0', '\0']), ('\u{ff5a}', ['\u{ff3a}', '\0', - '\0']), ('\u{10428}', ['\u{10400}', '\0', '\0']), ('\u{10429}', ['\u{10401}', '\0', '\0']), - ('\u{1042a}', ['\u{10402}', '\0', '\0']), ('\u{1042b}', ['\u{10403}', '\0', '\0']), - ('\u{1042c}', ['\u{10404}', '\0', '\0']), ('\u{1042d}', ['\u{10405}', '\0', '\0']), - ('\u{1042e}', ['\u{10406}', '\0', '\0']), ('\u{1042f}', ['\u{10407}', '\0', '\0']), - ('\u{10430}', ['\u{10408}', '\0', '\0']), ('\u{10431}', ['\u{10409}', '\0', '\0']), - ('\u{10432}', ['\u{1040a}', '\0', '\0']), ('\u{10433}', ['\u{1040b}', '\0', '\0']), - ('\u{10434}', ['\u{1040c}', '\0', '\0']), ('\u{10435}', ['\u{1040d}', '\0', '\0']), - ('\u{10436}', ['\u{1040e}', '\0', '\0']), ('\u{10437}', ['\u{1040f}', '\0', '\0']), - ('\u{10438}', ['\u{10410}', '\0', '\0']), ('\u{10439}', ['\u{10411}', '\0', '\0']), - ('\u{1043a}', ['\u{10412}', '\0', '\0']), ('\u{1043b}', ['\u{10413}', '\0', '\0']), - ('\u{1043c}', ['\u{10414}', '\0', '\0']), ('\u{1043d}', ['\u{10415}', '\0', '\0']), - ('\u{1043e}', ['\u{10416}', '\0', '\0']), ('\u{1043f}', ['\u{10417}', '\0', '\0']), - ('\u{10440}', ['\u{10418}', '\0', '\0']), ('\u{10441}', ['\u{10419}', '\0', '\0']), - ('\u{10442}', ['\u{1041a}', '\0', '\0']), ('\u{10443}', ['\u{1041b}', '\0', '\0']), - ('\u{10444}', ['\u{1041c}', '\0', '\0']), ('\u{10445}', ['\u{1041d}', '\0', '\0']), - ('\u{10446}', ['\u{1041e}', '\0', '\0']), ('\u{10447}', ['\u{1041f}', '\0', '\0']), - ('\u{10448}', ['\u{10420}', '\0', '\0']), ('\u{10449}', ['\u{10421}', '\0', '\0']), - ('\u{1044a}', ['\u{10422}', '\0', '\0']), ('\u{1044b}', ['\u{10423}', '\0', '\0']), - ('\u{1044c}', ['\u{10424}', '\0', '\0']), ('\u{1044d}', ['\u{10425}', '\0', '\0']), - ('\u{1044e}', ['\u{10426}', '\0', '\0']), ('\u{1044f}', ['\u{10427}', '\0', '\0']), - ('\u{10cc0}', ['\u{10c80}', '\0', '\0']), ('\u{10cc1}', ['\u{10c81}', '\0', '\0']), - ('\u{10cc2}', ['\u{10c82}', '\0', '\0']), ('\u{10cc3}', ['\u{10c83}', '\0', '\0']), - ('\u{10cc4}', ['\u{10c84}', '\0', '\0']), ('\u{10cc5}', ['\u{10c85}', '\0', '\0']), - ('\u{10cc6}', ['\u{10c86}', '\0', '\0']), ('\u{10cc7}', ['\u{10c87}', '\0', '\0']), - ('\u{10cc8}', ['\u{10c88}', '\0', '\0']), ('\u{10cc9}', ['\u{10c89}', '\0', '\0']), - ('\u{10cca}', ['\u{10c8a}', '\0', '\0']), ('\u{10ccb}', ['\u{10c8b}', '\0', '\0']), - ('\u{10ccc}', ['\u{10c8c}', '\0', '\0']), ('\u{10ccd}', ['\u{10c8d}', '\0', '\0']), - ('\u{10cce}', ['\u{10c8e}', '\0', '\0']), ('\u{10ccf}', ['\u{10c8f}', '\0', '\0']), - ('\u{10cd0}', ['\u{10c90}', '\0', '\0']), ('\u{10cd1}', ['\u{10c91}', '\0', '\0']), - ('\u{10cd2}', ['\u{10c92}', '\0', '\0']), ('\u{10cd3}', ['\u{10c93}', '\0', '\0']), - ('\u{10cd4}', ['\u{10c94}', '\0', '\0']), ('\u{10cd5}', ['\u{10c95}', '\0', '\0']), - ('\u{10cd6}', ['\u{10c96}', '\0', '\0']), ('\u{10cd7}', ['\u{10c97}', '\0', '\0']), - ('\u{10cd8}', ['\u{10c98}', '\0', '\0']), ('\u{10cd9}', ['\u{10c99}', '\0', '\0']), - ('\u{10cda}', ['\u{10c9a}', '\0', '\0']), ('\u{10cdb}', ['\u{10c9b}', '\0', '\0']), - ('\u{10cdc}', ['\u{10c9c}', '\0', '\0']), ('\u{10cdd}', ['\u{10c9d}', '\0', '\0']), - ('\u{10cde}', ['\u{10c9e}', '\0', '\0']), ('\u{10cdf}', ['\u{10c9f}', '\0', '\0']), - ('\u{10ce0}', ['\u{10ca0}', '\0', '\0']), ('\u{10ce1}', ['\u{10ca1}', '\0', '\0']), - ('\u{10ce2}', ['\u{10ca2}', '\0', '\0']), ('\u{10ce3}', ['\u{10ca3}', '\0', '\0']), - ('\u{10ce4}', ['\u{10ca4}', '\0', '\0']), ('\u{10ce5}', ['\u{10ca5}', '\0', '\0']), - ('\u{10ce6}', ['\u{10ca6}', '\0', '\0']), ('\u{10ce7}', ['\u{10ca7}', '\0', '\0']), - ('\u{10ce8}', ['\u{10ca8}', '\0', '\0']), ('\u{10ce9}', ['\u{10ca9}', '\0', '\0']), - ('\u{10cea}', ['\u{10caa}', '\0', '\0']), ('\u{10ceb}', ['\u{10cab}', '\0', '\0']), - ('\u{10cec}', ['\u{10cac}', '\0', '\0']), ('\u{10ced}', ['\u{10cad}', '\0', '\0']), - ('\u{10cee}', ['\u{10cae}', '\0', '\0']), ('\u{10cef}', ['\u{10caf}', '\0', '\0']), - ('\u{10cf0}', ['\u{10cb0}', '\0', '\0']), ('\u{10cf1}', ['\u{10cb1}', '\0', '\0']), - ('\u{10cf2}', ['\u{10cb2}', '\0', '\0']), ('\u{118c0}', ['\u{118a0}', '\0', '\0']), - ('\u{118c1}', ['\u{118a1}', '\0', '\0']), ('\u{118c2}', ['\u{118a2}', '\0', '\0']), - ('\u{118c3}', ['\u{118a3}', '\0', '\0']), ('\u{118c4}', ['\u{118a4}', '\0', '\0']), - ('\u{118c5}', ['\u{118a5}', '\0', '\0']), ('\u{118c6}', ['\u{118a6}', '\0', '\0']), - ('\u{118c7}', ['\u{118a7}', '\0', '\0']), ('\u{118c8}', ['\u{118a8}', '\0', '\0']), - ('\u{118c9}', ['\u{118a9}', '\0', '\0']), ('\u{118ca}', ['\u{118aa}', '\0', '\0']), - ('\u{118cb}', ['\u{118ab}', '\0', '\0']), ('\u{118cc}', ['\u{118ac}', '\0', '\0']), - ('\u{118cd}', ['\u{118ad}', '\0', '\0']), ('\u{118ce}', ['\u{118ae}', '\0', '\0']), - ('\u{118cf}', ['\u{118af}', '\0', '\0']), ('\u{118d0}', ['\u{118b0}', '\0', '\0']), - ('\u{118d1}', ['\u{118b1}', '\0', '\0']), ('\u{118d2}', ['\u{118b2}', '\0', '\0']), - ('\u{118d3}', ['\u{118b3}', '\0', '\0']), ('\u{118d4}', ['\u{118b4}', '\0', '\0']), - ('\u{118d5}', ['\u{118b5}', '\0', '\0']), ('\u{118d6}', ['\u{118b6}', '\0', '\0']), - ('\u{118d7}', ['\u{118b7}', '\0', '\0']), ('\u{118d8}', ['\u{118b8}', '\0', '\0']), - ('\u{118d9}', ['\u{118b9}', '\0', '\0']), ('\u{118da}', ['\u{118ba}', '\0', '\0']), - ('\u{118db}', ['\u{118bb}', '\0', '\0']), ('\u{118dc}', ['\u{118bc}', '\0', '\0']), - ('\u{118dd}', ['\u{118bd}', '\0', '\0']), ('\u{118de}', ['\u{118be}', '\0', '\0']), - ('\u{118df}', ['\u{118bf}', '\0', '\0']) + ('\u{26a}', ['\u{a7ae}', '\0', '\0']), ('\u{26b}', ['\u{2c62}', '\0', '\0']), ('\u{26c}', + ['\u{a7ad}', '\0', '\0']), ('\u{26f}', ['\u{19c}', '\0', '\0']), ('\u{271}', ['\u{2c6e}', + '\0', '\0']), ('\u{272}', ['\u{19d}', '\0', '\0']), ('\u{275}', ['\u{19f}', '\0', '\0']), + ('\u{27d}', ['\u{2c64}', '\0', '\0']), ('\u{280}', ['\u{1a6}', '\0', '\0']), ('\u{283}', + ['\u{1a9}', '\0', '\0']), ('\u{287}', ['\u{a7b1}', '\0', '\0']), ('\u{288}', ['\u{1ae}', + '\0', '\0']), ('\u{289}', ['\u{244}', '\0', '\0']), ('\u{28a}', ['\u{1b1}', '\0', '\0']), + ('\u{28b}', ['\u{1b2}', '\0', '\0']), ('\u{28c}', ['\u{245}', '\0', '\0']), ('\u{292}', + ['\u{1b7}', '\0', '\0']), ('\u{29d}', ['\u{a7b2}', '\0', '\0']), ('\u{29e}', ['\u{a7b0}', + '\0', '\0']), ('\u{345}', ['\u{399}', '\0', '\0']), ('\u{371}', ['\u{370}', '\0', '\0']), + ('\u{373}', ['\u{372}', '\0', '\0']), ('\u{377}', ['\u{376}', '\0', '\0']), ('\u{37b}', + ['\u{3fd}', '\0', '\0']), ('\u{37c}', ['\u{3fe}', '\0', '\0']), ('\u{37d}', ['\u{3ff}', + '\0', '\0']), ('\u{390}', ['\u{399}', '\u{308}', '\u{301}']), ('\u{3ac}', ['\u{386}', '\0', + '\0']), ('\u{3ad}', ['\u{388}', '\0', '\0']), ('\u{3ae}', ['\u{389}', '\0', '\0']), + ('\u{3af}', ['\u{38a}', '\0', '\0']), ('\u{3b0}', ['\u{3a5}', '\u{308}', '\u{301}']), + ('\u{3b1}', ['\u{391}', '\0', '\0']), ('\u{3b2}', ['\u{392}', '\0', '\0']), ('\u{3b3}', + ['\u{393}', '\0', '\0']), ('\u{3b4}', ['\u{394}', '\0', '\0']), ('\u{3b5}', ['\u{395}', + '\0', '\0']), ('\u{3b6}', ['\u{396}', '\0', '\0']), ('\u{3b7}', ['\u{397}', '\0', '\0']), + ('\u{3b8}', ['\u{398}', '\0', '\0']), ('\u{3b9}', ['\u{399}', '\0', '\0']), ('\u{3ba}', + ['\u{39a}', '\0', '\0']), ('\u{3bb}', ['\u{39b}', '\0', '\0']), ('\u{3bc}', ['\u{39c}', + '\0', '\0']), ('\u{3bd}', ['\u{39d}', '\0', '\0']), ('\u{3be}', ['\u{39e}', '\0', '\0']), + ('\u{3bf}', ['\u{39f}', '\0', '\0']), ('\u{3c0}', ['\u{3a0}', '\0', '\0']), ('\u{3c1}', + ['\u{3a1}', '\0', '\0']), ('\u{3c2}', ['\u{3a3}', '\0', '\0']), ('\u{3c3}', ['\u{3a3}', + '\0', '\0']), ('\u{3c4}', ['\u{3a4}', '\0', '\0']), ('\u{3c5}', ['\u{3a5}', '\0', '\0']), + ('\u{3c6}', ['\u{3a6}', '\0', '\0']), ('\u{3c7}', ['\u{3a7}', '\0', '\0']), ('\u{3c8}', + ['\u{3a8}', '\0', '\0']), ('\u{3c9}', ['\u{3a9}', '\0', '\0']), ('\u{3ca}', ['\u{3aa}', + '\0', '\0']), ('\u{3cb}', ['\u{3ab}', '\0', '\0']), ('\u{3cc}', ['\u{38c}', '\0', '\0']), + ('\u{3cd}', ['\u{38e}', '\0', '\0']), ('\u{3ce}', ['\u{38f}', '\0', '\0']), ('\u{3d0}', + ['\u{392}', '\0', '\0']), ('\u{3d1}', ['\u{398}', '\0', '\0']), ('\u{3d5}', ['\u{3a6}', + '\0', '\0']), ('\u{3d6}', ['\u{3a0}', '\0', '\0']), ('\u{3d7}', ['\u{3cf}', '\0', '\0']), + ('\u{3d9}', ['\u{3d8}', '\0', '\0']), ('\u{3db}', ['\u{3da}', '\0', '\0']), ('\u{3dd}', + ['\u{3dc}', '\0', '\0']), ('\u{3df}', ['\u{3de}', '\0', '\0']), ('\u{3e1}', ['\u{3e0}', + '\0', '\0']), ('\u{3e3}', ['\u{3e2}', '\0', '\0']), ('\u{3e5}', ['\u{3e4}', '\0', '\0']), + ('\u{3e7}', ['\u{3e6}', '\0', '\0']), ('\u{3e9}', ['\u{3e8}', '\0', '\0']), ('\u{3eb}', + ['\u{3ea}', '\0', '\0']), ('\u{3ed}', ['\u{3ec}', '\0', '\0']), ('\u{3ef}', ['\u{3ee}', + '\0', '\0']), ('\u{3f0}', ['\u{39a}', '\0', '\0']), ('\u{3f1}', ['\u{3a1}', '\0', '\0']), + ('\u{3f2}', ['\u{3f9}', '\0', '\0']), ('\u{3f3}', ['\u{37f}', '\0', '\0']), ('\u{3f5}', + ['\u{395}', '\0', '\0']), ('\u{3f8}', ['\u{3f7}', '\0', '\0']), ('\u{3fb}', ['\u{3fa}', + '\0', '\0']), ('\u{430}', ['\u{410}', '\0', '\0']), ('\u{431}', ['\u{411}', '\0', '\0']), + ('\u{432}', ['\u{412}', '\0', '\0']), ('\u{433}', ['\u{413}', '\0', '\0']), ('\u{434}', + ['\u{414}', '\0', '\0']), ('\u{435}', ['\u{415}', '\0', '\0']), ('\u{436}', ['\u{416}', + '\0', '\0']), ('\u{437}', ['\u{417}', '\0', '\0']), ('\u{438}', ['\u{418}', '\0', '\0']), + ('\u{439}', ['\u{419}', '\0', '\0']), ('\u{43a}', ['\u{41a}', '\0', '\0']), ('\u{43b}', + ['\u{41b}', '\0', '\0']), ('\u{43c}', ['\u{41c}', '\0', '\0']), ('\u{43d}', ['\u{41d}', + '\0', '\0']), ('\u{43e}', ['\u{41e}', '\0', '\0']), ('\u{43f}', ['\u{41f}', '\0', '\0']), + ('\u{440}', ['\u{420}', '\0', '\0']), ('\u{441}', ['\u{421}', '\0', '\0']), ('\u{442}', + ['\u{422}', '\0', '\0']), ('\u{443}', ['\u{423}', '\0', '\0']), ('\u{444}', ['\u{424}', + '\0', '\0']), ('\u{445}', ['\u{425}', '\0', '\0']), ('\u{446}', ['\u{426}', '\0', '\0']), + ('\u{447}', ['\u{427}', '\0', '\0']), ('\u{448}', ['\u{428}', '\0', '\0']), ('\u{449}', + ['\u{429}', '\0', '\0']), ('\u{44a}', ['\u{42a}', '\0', '\0']), ('\u{44b}', ['\u{42b}', + '\0', '\0']), ('\u{44c}', ['\u{42c}', '\0', '\0']), ('\u{44d}', ['\u{42d}', '\0', '\0']), + ('\u{44e}', ['\u{42e}', '\0', '\0']), ('\u{44f}', ['\u{42f}', '\0', '\0']), ('\u{450}', + ['\u{400}', '\0', '\0']), ('\u{451}', ['\u{401}', '\0', '\0']), ('\u{452}', ['\u{402}', + '\0', '\0']), ('\u{453}', ['\u{403}', '\0', '\0']), ('\u{454}', ['\u{404}', '\0', '\0']), + ('\u{455}', ['\u{405}', '\0', '\0']), ('\u{456}', ['\u{406}', '\0', '\0']), ('\u{457}', + ['\u{407}', '\0', '\0']), ('\u{458}', ['\u{408}', '\0', '\0']), ('\u{459}', ['\u{409}', + '\0', '\0']), ('\u{45a}', ['\u{40a}', '\0', '\0']), ('\u{45b}', ['\u{40b}', '\0', '\0']), + ('\u{45c}', ['\u{40c}', '\0', '\0']), ('\u{45d}', ['\u{40d}', '\0', '\0']), ('\u{45e}', + ['\u{40e}', '\0', '\0']), ('\u{45f}', ['\u{40f}', '\0', '\0']), ('\u{461}', ['\u{460}', + '\0', '\0']), ('\u{463}', ['\u{462}', '\0', '\0']), ('\u{465}', ['\u{464}', '\0', '\0']), + ('\u{467}', ['\u{466}', '\0', '\0']), ('\u{469}', ['\u{468}', '\0', '\0']), ('\u{46b}', + ['\u{46a}', '\0', '\0']), ('\u{46d}', ['\u{46c}', '\0', '\0']), ('\u{46f}', ['\u{46e}', + '\0', '\0']), ('\u{471}', ['\u{470}', '\0', '\0']), ('\u{473}', ['\u{472}', '\0', '\0']), + ('\u{475}', ['\u{474}', '\0', '\0']), ('\u{477}', ['\u{476}', '\0', '\0']), ('\u{479}', + ['\u{478}', '\0', '\0']), ('\u{47b}', ['\u{47a}', '\0', '\0']), ('\u{47d}', ['\u{47c}', + '\0', '\0']), ('\u{47f}', ['\u{47e}', '\0', '\0']), ('\u{481}', ['\u{480}', '\0', '\0']), + ('\u{48b}', ['\u{48a}', '\0', '\0']), ('\u{48d}', ['\u{48c}', '\0', '\0']), ('\u{48f}', + ['\u{48e}', '\0', '\0']), ('\u{491}', ['\u{490}', '\0', '\0']), ('\u{493}', ['\u{492}', + '\0', '\0']), ('\u{495}', ['\u{494}', '\0', '\0']), ('\u{497}', ['\u{496}', '\0', '\0']), + ('\u{499}', ['\u{498}', '\0', '\0']), ('\u{49b}', ['\u{49a}', '\0', '\0']), ('\u{49d}', + ['\u{49c}', '\0', '\0']), ('\u{49f}', ['\u{49e}', '\0', '\0']), ('\u{4a1}', ['\u{4a0}', + '\0', '\0']), ('\u{4a3}', ['\u{4a2}', '\0', '\0']), ('\u{4a5}', ['\u{4a4}', '\0', '\0']), + ('\u{4a7}', ['\u{4a6}', '\0', '\0']), ('\u{4a9}', ['\u{4a8}', '\0', '\0']), ('\u{4ab}', + ['\u{4aa}', '\0', '\0']), ('\u{4ad}', ['\u{4ac}', '\0', '\0']), ('\u{4af}', ['\u{4ae}', + '\0', '\0']), ('\u{4b1}', ['\u{4b0}', '\0', '\0']), ('\u{4b3}', ['\u{4b2}', '\0', '\0']), + ('\u{4b5}', ['\u{4b4}', '\0', '\0']), ('\u{4b7}', ['\u{4b6}', '\0', '\0']), ('\u{4b9}', + ['\u{4b8}', '\0', '\0']), ('\u{4bb}', ['\u{4ba}', '\0', '\0']), ('\u{4bd}', ['\u{4bc}', + '\0', '\0']), ('\u{4bf}', ['\u{4be}', '\0', '\0']), ('\u{4c2}', ['\u{4c1}', '\0', '\0']), + ('\u{4c4}', ['\u{4c3}', '\0', '\0']), ('\u{4c6}', ['\u{4c5}', '\0', '\0']), ('\u{4c8}', + ['\u{4c7}', '\0', '\0']), ('\u{4ca}', ['\u{4c9}', '\0', '\0']), ('\u{4cc}', ['\u{4cb}', + '\0', '\0']), ('\u{4ce}', ['\u{4cd}', '\0', '\0']), ('\u{4cf}', ['\u{4c0}', '\0', '\0']), + ('\u{4d1}', ['\u{4d0}', '\0', '\0']), ('\u{4d3}', ['\u{4d2}', '\0', '\0']), ('\u{4d5}', + ['\u{4d4}', '\0', '\0']), ('\u{4d7}', ['\u{4d6}', '\0', '\0']), ('\u{4d9}', ['\u{4d8}', + '\0', '\0']), ('\u{4db}', ['\u{4da}', '\0', '\0']), ('\u{4dd}', ['\u{4dc}', '\0', '\0']), + ('\u{4df}', ['\u{4de}', '\0', '\0']), ('\u{4e1}', ['\u{4e0}', '\0', '\0']), ('\u{4e3}', + ['\u{4e2}', '\0', '\0']), ('\u{4e5}', ['\u{4e4}', '\0', '\0']), ('\u{4e7}', ['\u{4e6}', + '\0', '\0']), ('\u{4e9}', ['\u{4e8}', '\0', '\0']), ('\u{4eb}', ['\u{4ea}', '\0', '\0']), + ('\u{4ed}', ['\u{4ec}', '\0', '\0']), ('\u{4ef}', ['\u{4ee}', '\0', '\0']), ('\u{4f1}', + ['\u{4f0}', '\0', '\0']), ('\u{4f3}', ['\u{4f2}', '\0', '\0']), ('\u{4f5}', ['\u{4f4}', + '\0', '\0']), ('\u{4f7}', ['\u{4f6}', '\0', '\0']), ('\u{4f9}', ['\u{4f8}', '\0', '\0']), + ('\u{4fb}', ['\u{4fa}', '\0', '\0']), ('\u{4fd}', ['\u{4fc}', '\0', '\0']), ('\u{4ff}', + ['\u{4fe}', '\0', '\0']), ('\u{501}', ['\u{500}', '\0', '\0']), ('\u{503}', ['\u{502}', + '\0', '\0']), ('\u{505}', ['\u{504}', '\0', '\0']), ('\u{507}', ['\u{506}', '\0', '\0']), + ('\u{509}', ['\u{508}', '\0', '\0']), ('\u{50b}', ['\u{50a}', '\0', '\0']), ('\u{50d}', + ['\u{50c}', '\0', '\0']), ('\u{50f}', ['\u{50e}', '\0', '\0']), ('\u{511}', ['\u{510}', + '\0', '\0']), ('\u{513}', ['\u{512}', '\0', '\0']), ('\u{515}', ['\u{514}', '\0', '\0']), + ('\u{517}', ['\u{516}', '\0', '\0']), ('\u{519}', ['\u{518}', '\0', '\0']), ('\u{51b}', + ['\u{51a}', '\0', '\0']), ('\u{51d}', ['\u{51c}', '\0', '\0']), ('\u{51f}', ['\u{51e}', + '\0', '\0']), ('\u{521}', ['\u{520}', '\0', '\0']), ('\u{523}', ['\u{522}', '\0', '\0']), + ('\u{525}', ['\u{524}', '\0', '\0']), ('\u{527}', ['\u{526}', '\0', '\0']), ('\u{529}', + ['\u{528}', '\0', '\0']), ('\u{52b}', ['\u{52a}', '\0', '\0']), ('\u{52d}', ['\u{52c}', + '\0', '\0']), ('\u{52f}', ['\u{52e}', '\0', '\0']), ('\u{561}', ['\u{531}', '\0', '\0']), + ('\u{562}', ['\u{532}', '\0', '\0']), ('\u{563}', ['\u{533}', '\0', '\0']), ('\u{564}', + ['\u{534}', '\0', '\0']), ('\u{565}', ['\u{535}', '\0', '\0']), ('\u{566}', ['\u{536}', + '\0', '\0']), ('\u{567}', ['\u{537}', '\0', '\0']), ('\u{568}', ['\u{538}', '\0', '\0']), + ('\u{569}', ['\u{539}', '\0', '\0']), ('\u{56a}', ['\u{53a}', '\0', '\0']), ('\u{56b}', + ['\u{53b}', '\0', '\0']), ('\u{56c}', ['\u{53c}', '\0', '\0']), ('\u{56d}', ['\u{53d}', + '\0', '\0']), ('\u{56e}', ['\u{53e}', '\0', '\0']), ('\u{56f}', ['\u{53f}', '\0', '\0']), + ('\u{570}', ['\u{540}', '\0', '\0']), ('\u{571}', ['\u{541}', '\0', '\0']), ('\u{572}', + ['\u{542}', '\0', '\0']), ('\u{573}', ['\u{543}', '\0', '\0']), ('\u{574}', ['\u{544}', + '\0', '\0']), ('\u{575}', ['\u{545}', '\0', '\0']), ('\u{576}', ['\u{546}', '\0', '\0']), + ('\u{577}', ['\u{547}', '\0', '\0']), ('\u{578}', ['\u{548}', '\0', '\0']), ('\u{579}', + ['\u{549}', '\0', '\0']), ('\u{57a}', ['\u{54a}', '\0', '\0']), ('\u{57b}', ['\u{54b}', + '\0', '\0']), ('\u{57c}', ['\u{54c}', '\0', '\0']), ('\u{57d}', ['\u{54d}', '\0', '\0']), + ('\u{57e}', ['\u{54e}', '\0', '\0']), ('\u{57f}', ['\u{54f}', '\0', '\0']), ('\u{580}', + ['\u{550}', '\0', '\0']), ('\u{581}', ['\u{551}', '\0', '\0']), ('\u{582}', ['\u{552}', + '\0', '\0']), ('\u{583}', ['\u{553}', '\0', '\0']), ('\u{584}', ['\u{554}', '\0', '\0']), + ('\u{585}', ['\u{555}', '\0', '\0']), ('\u{586}', ['\u{556}', '\0', '\0']), ('\u{587}', + ['\u{535}', '\u{552}', '\0']), ('\u{13f8}', ['\u{13f0}', '\0', '\0']), ('\u{13f9}', + ['\u{13f1}', '\0', '\0']), ('\u{13fa}', ['\u{13f2}', '\0', '\0']), ('\u{13fb}', ['\u{13f3}', + '\0', '\0']), ('\u{13fc}', ['\u{13f4}', '\0', '\0']), ('\u{13fd}', ['\u{13f5}', '\0', + '\0']), ('\u{1c80}', ['\u{412}', '\0', '\0']), ('\u{1c81}', ['\u{414}', '\0', '\0']), + ('\u{1c82}', ['\u{41e}', '\0', '\0']), ('\u{1c83}', ['\u{421}', '\0', '\0']), ('\u{1c84}', + ['\u{422}', '\0', '\0']), ('\u{1c85}', ['\u{422}', '\0', '\0']), ('\u{1c86}', ['\u{42a}', + '\0', '\0']), ('\u{1c87}', ['\u{462}', '\0', '\0']), ('\u{1c88}', ['\u{a64a}', '\0', '\0']), + ('\u{1d79}', ['\u{a77d}', '\0', '\0']), ('\u{1d7d}', ['\u{2c63}', '\0', '\0']), ('\u{1e01}', + ['\u{1e00}', '\0', '\0']), ('\u{1e03}', ['\u{1e02}', '\0', '\0']), ('\u{1e05}', ['\u{1e04}', + '\0', '\0']), ('\u{1e07}', ['\u{1e06}', '\0', '\0']), ('\u{1e09}', ['\u{1e08}', '\0', + '\0']), ('\u{1e0b}', ['\u{1e0a}', '\0', '\0']), ('\u{1e0d}', ['\u{1e0c}', '\0', '\0']), + ('\u{1e0f}', ['\u{1e0e}', '\0', '\0']), ('\u{1e11}', ['\u{1e10}', '\0', '\0']), ('\u{1e13}', + ['\u{1e12}', '\0', '\0']), ('\u{1e15}', ['\u{1e14}', '\0', '\0']), ('\u{1e17}', ['\u{1e16}', + '\0', '\0']), ('\u{1e19}', ['\u{1e18}', '\0', '\0']), ('\u{1e1b}', ['\u{1e1a}', '\0', + '\0']), ('\u{1e1d}', ['\u{1e1c}', '\0', '\0']), ('\u{1e1f}', ['\u{1e1e}', '\0', '\0']), + ('\u{1e21}', ['\u{1e20}', '\0', '\0']), ('\u{1e23}', ['\u{1e22}', '\0', '\0']), ('\u{1e25}', + ['\u{1e24}', '\0', '\0']), ('\u{1e27}', ['\u{1e26}', '\0', '\0']), ('\u{1e29}', ['\u{1e28}', + '\0', '\0']), ('\u{1e2b}', ['\u{1e2a}', '\0', '\0']), ('\u{1e2d}', ['\u{1e2c}', '\0', + '\0']), ('\u{1e2f}', ['\u{1e2e}', '\0', '\0']), ('\u{1e31}', ['\u{1e30}', '\0', '\0']), + ('\u{1e33}', ['\u{1e32}', '\0', '\0']), ('\u{1e35}', ['\u{1e34}', '\0', '\0']), ('\u{1e37}', + ['\u{1e36}', '\0', '\0']), ('\u{1e39}', ['\u{1e38}', '\0', '\0']), ('\u{1e3b}', ['\u{1e3a}', + '\0', '\0']), ('\u{1e3d}', ['\u{1e3c}', '\0', '\0']), ('\u{1e3f}', ['\u{1e3e}', '\0', + '\0']), ('\u{1e41}', ['\u{1e40}', '\0', '\0']), ('\u{1e43}', ['\u{1e42}', '\0', '\0']), + ('\u{1e45}', ['\u{1e44}', '\0', '\0']), ('\u{1e47}', ['\u{1e46}', '\0', '\0']), ('\u{1e49}', + ['\u{1e48}', '\0', '\0']), ('\u{1e4b}', ['\u{1e4a}', '\0', '\0']), ('\u{1e4d}', ['\u{1e4c}', + '\0', '\0']), ('\u{1e4f}', ['\u{1e4e}', '\0', '\0']), ('\u{1e51}', ['\u{1e50}', '\0', + '\0']), ('\u{1e53}', ['\u{1e52}', '\0', '\0']), ('\u{1e55}', ['\u{1e54}', '\0', '\0']), + ('\u{1e57}', ['\u{1e56}', '\0', '\0']), ('\u{1e59}', ['\u{1e58}', '\0', '\0']), ('\u{1e5b}', + ['\u{1e5a}', '\0', '\0']), ('\u{1e5d}', ['\u{1e5c}', '\0', '\0']), ('\u{1e5f}', ['\u{1e5e}', + '\0', '\0']), ('\u{1e61}', ['\u{1e60}', '\0', '\0']), ('\u{1e63}', ['\u{1e62}', '\0', + '\0']), ('\u{1e65}', ['\u{1e64}', '\0', '\0']), ('\u{1e67}', ['\u{1e66}', '\0', '\0']), + ('\u{1e69}', ['\u{1e68}', '\0', '\0']), ('\u{1e6b}', ['\u{1e6a}', '\0', '\0']), ('\u{1e6d}', + ['\u{1e6c}', '\0', '\0']), ('\u{1e6f}', ['\u{1e6e}', '\0', '\0']), ('\u{1e71}', ['\u{1e70}', + '\0', '\0']), ('\u{1e73}', ['\u{1e72}', '\0', '\0']), ('\u{1e75}', ['\u{1e74}', '\0', + '\0']), ('\u{1e77}', ['\u{1e76}', '\0', '\0']), ('\u{1e79}', ['\u{1e78}', '\0', '\0']), + ('\u{1e7b}', ['\u{1e7a}', '\0', '\0']), ('\u{1e7d}', ['\u{1e7c}', '\0', '\0']), ('\u{1e7f}', + ['\u{1e7e}', '\0', '\0']), ('\u{1e81}', ['\u{1e80}', '\0', '\0']), ('\u{1e83}', ['\u{1e82}', + '\0', '\0']), ('\u{1e85}', ['\u{1e84}', '\0', '\0']), ('\u{1e87}', ['\u{1e86}', '\0', + '\0']), ('\u{1e89}', ['\u{1e88}', '\0', '\0']), ('\u{1e8b}', ['\u{1e8a}', '\0', '\0']), + ('\u{1e8d}', ['\u{1e8c}', '\0', '\0']), ('\u{1e8f}', ['\u{1e8e}', '\0', '\0']), ('\u{1e91}', + ['\u{1e90}', '\0', '\0']), ('\u{1e93}', ['\u{1e92}', '\0', '\0']), ('\u{1e95}', ['\u{1e94}', + '\0', '\0']), ('\u{1e96}', ['\u{48}', '\u{331}', '\0']), ('\u{1e97}', ['\u{54}', '\u{308}', + '\0']), ('\u{1e98}', ['\u{57}', '\u{30a}', '\0']), ('\u{1e99}', ['\u{59}', '\u{30a}', + '\0']), ('\u{1e9a}', ['\u{41}', '\u{2be}', '\0']), ('\u{1e9b}', ['\u{1e60}', '\0', '\0']), + ('\u{1ea1}', ['\u{1ea0}', '\0', '\0']), ('\u{1ea3}', ['\u{1ea2}', '\0', '\0']), ('\u{1ea5}', + ['\u{1ea4}', '\0', '\0']), ('\u{1ea7}', ['\u{1ea6}', '\0', '\0']), ('\u{1ea9}', ['\u{1ea8}', + '\0', '\0']), ('\u{1eab}', ['\u{1eaa}', '\0', '\0']), ('\u{1ead}', ['\u{1eac}', '\0', + '\0']), ('\u{1eaf}', ['\u{1eae}', '\0', '\0']), ('\u{1eb1}', ['\u{1eb0}', '\0', '\0']), + ('\u{1eb3}', ['\u{1eb2}', '\0', '\0']), ('\u{1eb5}', ['\u{1eb4}', '\0', '\0']), ('\u{1eb7}', + ['\u{1eb6}', '\0', '\0']), ('\u{1eb9}', ['\u{1eb8}', '\0', '\0']), ('\u{1ebb}', ['\u{1eba}', + '\0', '\0']), ('\u{1ebd}', ['\u{1ebc}', '\0', '\0']), ('\u{1ebf}', ['\u{1ebe}', '\0', + '\0']), ('\u{1ec1}', ['\u{1ec0}', '\0', '\0']), ('\u{1ec3}', ['\u{1ec2}', '\0', '\0']), + ('\u{1ec5}', ['\u{1ec4}', '\0', '\0']), ('\u{1ec7}', ['\u{1ec6}', '\0', '\0']), ('\u{1ec9}', + ['\u{1ec8}', '\0', '\0']), ('\u{1ecb}', ['\u{1eca}', '\0', '\0']), ('\u{1ecd}', ['\u{1ecc}', + '\0', '\0']), ('\u{1ecf}', ['\u{1ece}', '\0', '\0']), ('\u{1ed1}', ['\u{1ed0}', '\0', + '\0']), ('\u{1ed3}', ['\u{1ed2}', '\0', '\0']), ('\u{1ed5}', ['\u{1ed4}', '\0', '\0']), + ('\u{1ed7}', ['\u{1ed6}', '\0', '\0']), ('\u{1ed9}', ['\u{1ed8}', '\0', '\0']), ('\u{1edb}', + ['\u{1eda}', '\0', '\0']), ('\u{1edd}', ['\u{1edc}', '\0', '\0']), ('\u{1edf}', ['\u{1ede}', + '\0', '\0']), ('\u{1ee1}', ['\u{1ee0}', '\0', '\0']), ('\u{1ee3}', ['\u{1ee2}', '\0', + '\0']), ('\u{1ee5}', ['\u{1ee4}', '\0', '\0']), ('\u{1ee7}', ['\u{1ee6}', '\0', '\0']), + ('\u{1ee9}', ['\u{1ee8}', '\0', '\0']), ('\u{1eeb}', ['\u{1eea}', '\0', '\0']), ('\u{1eed}', + ['\u{1eec}', '\0', '\0']), ('\u{1eef}', ['\u{1eee}', '\0', '\0']), ('\u{1ef1}', ['\u{1ef0}', + '\0', '\0']), ('\u{1ef3}', ['\u{1ef2}', '\0', '\0']), ('\u{1ef5}', ['\u{1ef4}', '\0', + '\0']), ('\u{1ef7}', ['\u{1ef6}', '\0', '\0']), ('\u{1ef9}', ['\u{1ef8}', '\0', '\0']), + ('\u{1efb}', ['\u{1efa}', '\0', '\0']), ('\u{1efd}', ['\u{1efc}', '\0', '\0']), ('\u{1eff}', + ['\u{1efe}', '\0', '\0']), ('\u{1f00}', ['\u{1f08}', '\0', '\0']), ('\u{1f01}', ['\u{1f09}', + '\0', '\0']), ('\u{1f02}', ['\u{1f0a}', '\0', '\0']), ('\u{1f03}', ['\u{1f0b}', '\0', + '\0']), ('\u{1f04}', ['\u{1f0c}', '\0', '\0']), ('\u{1f05}', ['\u{1f0d}', '\0', '\0']), + ('\u{1f06}', ['\u{1f0e}', '\0', '\0']), ('\u{1f07}', ['\u{1f0f}', '\0', '\0']), ('\u{1f10}', + ['\u{1f18}', '\0', '\0']), ('\u{1f11}', ['\u{1f19}', '\0', '\0']), ('\u{1f12}', ['\u{1f1a}', + '\0', '\0']), ('\u{1f13}', ['\u{1f1b}', '\0', '\0']), ('\u{1f14}', ['\u{1f1c}', '\0', + '\0']), ('\u{1f15}', ['\u{1f1d}', '\0', '\0']), ('\u{1f20}', ['\u{1f28}', '\0', '\0']), + ('\u{1f21}', ['\u{1f29}', '\0', '\0']), ('\u{1f22}', ['\u{1f2a}', '\0', '\0']), ('\u{1f23}', + ['\u{1f2b}', '\0', '\0']), ('\u{1f24}', ['\u{1f2c}', '\0', '\0']), ('\u{1f25}', ['\u{1f2d}', + '\0', '\0']), ('\u{1f26}', ['\u{1f2e}', '\0', '\0']), ('\u{1f27}', ['\u{1f2f}', '\0', + '\0']), ('\u{1f30}', ['\u{1f38}', '\0', '\0']), ('\u{1f31}', ['\u{1f39}', '\0', '\0']), + ('\u{1f32}', ['\u{1f3a}', '\0', '\0']), ('\u{1f33}', ['\u{1f3b}', '\0', '\0']), ('\u{1f34}', + ['\u{1f3c}', '\0', '\0']), ('\u{1f35}', ['\u{1f3d}', '\0', '\0']), ('\u{1f36}', ['\u{1f3e}', + '\0', '\0']), ('\u{1f37}', ['\u{1f3f}', '\0', '\0']), ('\u{1f40}', ['\u{1f48}', '\0', + '\0']), ('\u{1f41}', ['\u{1f49}', '\0', '\0']), ('\u{1f42}', ['\u{1f4a}', '\0', '\0']), + ('\u{1f43}', ['\u{1f4b}', '\0', '\0']), ('\u{1f44}', ['\u{1f4c}', '\0', '\0']), ('\u{1f45}', + ['\u{1f4d}', '\0', '\0']), ('\u{1f50}', ['\u{3a5}', '\u{313}', '\0']), ('\u{1f51}', + ['\u{1f59}', '\0', '\0']), ('\u{1f52}', ['\u{3a5}', '\u{313}', '\u{300}']), ('\u{1f53}', + ['\u{1f5b}', '\0', '\0']), ('\u{1f54}', ['\u{3a5}', '\u{313}', '\u{301}']), ('\u{1f55}', + ['\u{1f5d}', '\0', '\0']), ('\u{1f56}', ['\u{3a5}', '\u{313}', '\u{342}']), ('\u{1f57}', + ['\u{1f5f}', '\0', '\0']), ('\u{1f60}', ['\u{1f68}', '\0', '\0']), ('\u{1f61}', ['\u{1f69}', + '\0', '\0']), ('\u{1f62}', ['\u{1f6a}', '\0', '\0']), ('\u{1f63}', ['\u{1f6b}', '\0', + '\0']), ('\u{1f64}', ['\u{1f6c}', '\0', '\0']), ('\u{1f65}', ['\u{1f6d}', '\0', '\0']), + ('\u{1f66}', ['\u{1f6e}', '\0', '\0']), ('\u{1f67}', ['\u{1f6f}', '\0', '\0']), ('\u{1f70}', + ['\u{1fba}', '\0', '\0']), ('\u{1f71}', ['\u{1fbb}', '\0', '\0']), ('\u{1f72}', ['\u{1fc8}', + '\0', '\0']), ('\u{1f73}', ['\u{1fc9}', '\0', '\0']), ('\u{1f74}', ['\u{1fca}', '\0', + '\0']), ('\u{1f75}', ['\u{1fcb}', '\0', '\0']), ('\u{1f76}', ['\u{1fda}', '\0', '\0']), + ('\u{1f77}', ['\u{1fdb}', '\0', '\0']), ('\u{1f78}', ['\u{1ff8}', '\0', '\0']), ('\u{1f79}', + ['\u{1ff9}', '\0', '\0']), ('\u{1f7a}', ['\u{1fea}', '\0', '\0']), ('\u{1f7b}', ['\u{1feb}', + '\0', '\0']), ('\u{1f7c}', ['\u{1ffa}', '\0', '\0']), ('\u{1f7d}', ['\u{1ffb}', '\0', + '\0']), ('\u{1f80}', ['\u{1f08}', '\u{399}', '\0']), ('\u{1f81}', ['\u{1f09}', '\u{399}', + '\0']), ('\u{1f82}', ['\u{1f0a}', '\u{399}', '\0']), ('\u{1f83}', ['\u{1f0b}', '\u{399}', + '\0']), ('\u{1f84}', ['\u{1f0c}', '\u{399}', '\0']), ('\u{1f85}', ['\u{1f0d}', '\u{399}', + '\0']), ('\u{1f86}', ['\u{1f0e}', '\u{399}', '\0']), ('\u{1f87}', ['\u{1f0f}', '\u{399}', + '\0']), ('\u{1f88}', ['\u{1f08}', '\u{399}', '\0']), ('\u{1f89}', ['\u{1f09}', '\u{399}', + '\0']), ('\u{1f8a}', ['\u{1f0a}', '\u{399}', '\0']), ('\u{1f8b}', ['\u{1f0b}', '\u{399}', + '\0']), ('\u{1f8c}', ['\u{1f0c}', '\u{399}', '\0']), ('\u{1f8d}', ['\u{1f0d}', '\u{399}', + '\0']), ('\u{1f8e}', ['\u{1f0e}', '\u{399}', '\0']), ('\u{1f8f}', ['\u{1f0f}', '\u{399}', + '\0']), ('\u{1f90}', ['\u{1f28}', '\u{399}', '\0']), ('\u{1f91}', ['\u{1f29}', '\u{399}', + '\0']), ('\u{1f92}', ['\u{1f2a}', '\u{399}', '\0']), ('\u{1f93}', ['\u{1f2b}', '\u{399}', + '\0']), ('\u{1f94}', ['\u{1f2c}', '\u{399}', '\0']), ('\u{1f95}', ['\u{1f2d}', '\u{399}', + '\0']), ('\u{1f96}', ['\u{1f2e}', '\u{399}', '\0']), ('\u{1f97}', ['\u{1f2f}', '\u{399}', + '\0']), ('\u{1f98}', ['\u{1f28}', '\u{399}', '\0']), ('\u{1f99}', ['\u{1f29}', '\u{399}', + '\0']), ('\u{1f9a}', ['\u{1f2a}', '\u{399}', '\0']), ('\u{1f9b}', ['\u{1f2b}', '\u{399}', + '\0']), ('\u{1f9c}', ['\u{1f2c}', '\u{399}', '\0']), ('\u{1f9d}', ['\u{1f2d}', '\u{399}', + '\0']), ('\u{1f9e}', ['\u{1f2e}', '\u{399}', '\0']), ('\u{1f9f}', ['\u{1f2f}', '\u{399}', + '\0']), ('\u{1fa0}', ['\u{1f68}', '\u{399}', '\0']), ('\u{1fa1}', ['\u{1f69}', '\u{399}', + '\0']), ('\u{1fa2}', ['\u{1f6a}', '\u{399}', '\0']), ('\u{1fa3}', ['\u{1f6b}', '\u{399}', + '\0']), ('\u{1fa4}', ['\u{1f6c}', '\u{399}', '\0']), ('\u{1fa5}', ['\u{1f6d}', '\u{399}', + '\0']), ('\u{1fa6}', ['\u{1f6e}', '\u{399}', '\0']), ('\u{1fa7}', ['\u{1f6f}', '\u{399}', + '\0']), ('\u{1fa8}', ['\u{1f68}', '\u{399}', '\0']), ('\u{1fa9}', ['\u{1f69}', '\u{399}', + '\0']), ('\u{1faa}', ['\u{1f6a}', '\u{399}', '\0']), ('\u{1fab}', ['\u{1f6b}', '\u{399}', + '\0']), ('\u{1fac}', ['\u{1f6c}', '\u{399}', '\0']), ('\u{1fad}', ['\u{1f6d}', '\u{399}', + '\0']), ('\u{1fae}', ['\u{1f6e}', '\u{399}', '\0']), ('\u{1faf}', ['\u{1f6f}', '\u{399}', + '\0']), ('\u{1fb0}', ['\u{1fb8}', '\0', '\0']), ('\u{1fb1}', ['\u{1fb9}', '\0', '\0']), + ('\u{1fb2}', ['\u{1fba}', '\u{399}', '\0']), ('\u{1fb3}', ['\u{391}', '\u{399}', '\0']), + ('\u{1fb4}', ['\u{386}', '\u{399}', '\0']), ('\u{1fb6}', ['\u{391}', '\u{342}', '\0']), + ('\u{1fb7}', ['\u{391}', '\u{342}', '\u{399}']), ('\u{1fbc}', ['\u{391}', '\u{399}', '\0']), + ('\u{1fbe}', ['\u{399}', '\0', '\0']), ('\u{1fc2}', ['\u{1fca}', '\u{399}', '\0']), + ('\u{1fc3}', ['\u{397}', '\u{399}', '\0']), ('\u{1fc4}', ['\u{389}', '\u{399}', '\0']), + ('\u{1fc6}', ['\u{397}', '\u{342}', '\0']), ('\u{1fc7}', ['\u{397}', '\u{342}', '\u{399}']), + ('\u{1fcc}', ['\u{397}', '\u{399}', '\0']), ('\u{1fd0}', ['\u{1fd8}', '\0', '\0']), + ('\u{1fd1}', ['\u{1fd9}', '\0', '\0']), ('\u{1fd2}', ['\u{399}', '\u{308}', '\u{300}']), + ('\u{1fd3}', ['\u{399}', '\u{308}', '\u{301}']), ('\u{1fd6}', ['\u{399}', '\u{342}', '\0']), + ('\u{1fd7}', ['\u{399}', '\u{308}', '\u{342}']), ('\u{1fe0}', ['\u{1fe8}', '\0', '\0']), + ('\u{1fe1}', ['\u{1fe9}', '\0', '\0']), ('\u{1fe2}', ['\u{3a5}', '\u{308}', '\u{300}']), + ('\u{1fe3}', ['\u{3a5}', '\u{308}', '\u{301}']), ('\u{1fe4}', ['\u{3a1}', '\u{313}', '\0']), + ('\u{1fe5}', ['\u{1fec}', '\0', '\0']), ('\u{1fe6}', ['\u{3a5}', '\u{342}', '\0']), + ('\u{1fe7}', ['\u{3a5}', '\u{308}', '\u{342}']), ('\u{1ff2}', ['\u{1ffa}', '\u{399}', + '\0']), ('\u{1ff3}', ['\u{3a9}', '\u{399}', '\0']), ('\u{1ff4}', ['\u{38f}', '\u{399}', + '\0']), ('\u{1ff6}', ['\u{3a9}', '\u{342}', '\0']), ('\u{1ff7}', ['\u{3a9}', '\u{342}', + '\u{399}']), ('\u{1ffc}', ['\u{3a9}', '\u{399}', '\0']), ('\u{214e}', ['\u{2132}', '\0', + '\0']), ('\u{2170}', ['\u{2160}', '\0', '\0']), ('\u{2171}', ['\u{2161}', '\0', '\0']), + ('\u{2172}', ['\u{2162}', '\0', '\0']), ('\u{2173}', ['\u{2163}', '\0', '\0']), ('\u{2174}', + ['\u{2164}', '\0', '\0']), ('\u{2175}', ['\u{2165}', '\0', '\0']), ('\u{2176}', ['\u{2166}', + '\0', '\0']), ('\u{2177}', ['\u{2167}', '\0', '\0']), ('\u{2178}', ['\u{2168}', '\0', + '\0']), ('\u{2179}', ['\u{2169}', '\0', '\0']), ('\u{217a}', ['\u{216a}', '\0', '\0']), + ('\u{217b}', ['\u{216b}', '\0', '\0']), ('\u{217c}', ['\u{216c}', '\0', '\0']), ('\u{217d}', + ['\u{216d}', '\0', '\0']), ('\u{217e}', ['\u{216e}', '\0', '\0']), ('\u{217f}', ['\u{216f}', + '\0', '\0']), ('\u{2184}', ['\u{2183}', '\0', '\0']), ('\u{24d0}', ['\u{24b6}', '\0', + '\0']), ('\u{24d1}', ['\u{24b7}', '\0', '\0']), ('\u{24d2}', ['\u{24b8}', '\0', '\0']), + ('\u{24d3}', ['\u{24b9}', '\0', '\0']), ('\u{24d4}', ['\u{24ba}', '\0', '\0']), ('\u{24d5}', + ['\u{24bb}', '\0', '\0']), ('\u{24d6}', ['\u{24bc}', '\0', '\0']), ('\u{24d7}', ['\u{24bd}', + '\0', '\0']), ('\u{24d8}', ['\u{24be}', '\0', '\0']), ('\u{24d9}', ['\u{24bf}', '\0', + '\0']), ('\u{24da}', ['\u{24c0}', '\0', '\0']), ('\u{24db}', ['\u{24c1}', '\0', '\0']), + ('\u{24dc}', ['\u{24c2}', '\0', '\0']), ('\u{24dd}', ['\u{24c3}', '\0', '\0']), ('\u{24de}', + ['\u{24c4}', '\0', '\0']), ('\u{24df}', ['\u{24c5}', '\0', '\0']), ('\u{24e0}', ['\u{24c6}', + '\0', '\0']), ('\u{24e1}', ['\u{24c7}', '\0', '\0']), ('\u{24e2}', ['\u{24c8}', '\0', + '\0']), ('\u{24e3}', ['\u{24c9}', '\0', '\0']), ('\u{24e4}', ['\u{24ca}', '\0', '\0']), + ('\u{24e5}', ['\u{24cb}', '\0', '\0']), ('\u{24e6}', ['\u{24cc}', '\0', '\0']), ('\u{24e7}', + ['\u{24cd}', '\0', '\0']), ('\u{24e8}', ['\u{24ce}', '\0', '\0']), ('\u{24e9}', ['\u{24cf}', + '\0', '\0']), ('\u{2c30}', ['\u{2c00}', '\0', '\0']), ('\u{2c31}', ['\u{2c01}', '\0', + '\0']), ('\u{2c32}', ['\u{2c02}', '\0', '\0']), ('\u{2c33}', ['\u{2c03}', '\0', '\0']), + ('\u{2c34}', ['\u{2c04}', '\0', '\0']), ('\u{2c35}', ['\u{2c05}', '\0', '\0']), ('\u{2c36}', + ['\u{2c06}', '\0', '\0']), ('\u{2c37}', ['\u{2c07}', '\0', '\0']), ('\u{2c38}', ['\u{2c08}', + '\0', '\0']), ('\u{2c39}', ['\u{2c09}', '\0', '\0']), ('\u{2c3a}', ['\u{2c0a}', '\0', + '\0']), ('\u{2c3b}', ['\u{2c0b}', '\0', '\0']), ('\u{2c3c}', ['\u{2c0c}', '\0', '\0']), + ('\u{2c3d}', ['\u{2c0d}', '\0', '\0']), ('\u{2c3e}', ['\u{2c0e}', '\0', '\0']), ('\u{2c3f}', + ['\u{2c0f}', '\0', '\0']), ('\u{2c40}', ['\u{2c10}', '\0', '\0']), ('\u{2c41}', ['\u{2c11}', + '\0', '\0']), ('\u{2c42}', ['\u{2c12}', '\0', '\0']), ('\u{2c43}', ['\u{2c13}', '\0', + '\0']), ('\u{2c44}', ['\u{2c14}', '\0', '\0']), ('\u{2c45}', ['\u{2c15}', '\0', '\0']), + ('\u{2c46}', ['\u{2c16}', '\0', '\0']), ('\u{2c47}', ['\u{2c17}', '\0', '\0']), ('\u{2c48}', + ['\u{2c18}', '\0', '\0']), ('\u{2c49}', ['\u{2c19}', '\0', '\0']), ('\u{2c4a}', ['\u{2c1a}', + '\0', '\0']), ('\u{2c4b}', ['\u{2c1b}', '\0', '\0']), ('\u{2c4c}', ['\u{2c1c}', '\0', + '\0']), ('\u{2c4d}', ['\u{2c1d}', '\0', '\0']), ('\u{2c4e}', ['\u{2c1e}', '\0', '\0']), + ('\u{2c4f}', ['\u{2c1f}', '\0', '\0']), ('\u{2c50}', ['\u{2c20}', '\0', '\0']), ('\u{2c51}', + ['\u{2c21}', '\0', '\0']), ('\u{2c52}', ['\u{2c22}', '\0', '\0']), ('\u{2c53}', ['\u{2c23}', + '\0', '\0']), ('\u{2c54}', ['\u{2c24}', '\0', '\0']), ('\u{2c55}', ['\u{2c25}', '\0', + '\0']), ('\u{2c56}', ['\u{2c26}', '\0', '\0']), ('\u{2c57}', ['\u{2c27}', '\0', '\0']), + ('\u{2c58}', ['\u{2c28}', '\0', '\0']), ('\u{2c59}', ['\u{2c29}', '\0', '\0']), ('\u{2c5a}', + ['\u{2c2a}', '\0', '\0']), ('\u{2c5b}', ['\u{2c2b}', '\0', '\0']), ('\u{2c5c}', ['\u{2c2c}', + '\0', '\0']), ('\u{2c5d}', ['\u{2c2d}', '\0', '\0']), ('\u{2c5e}', ['\u{2c2e}', '\0', + '\0']), ('\u{2c61}', ['\u{2c60}', '\0', '\0']), ('\u{2c65}', ['\u{23a}', '\0', '\0']), + ('\u{2c66}', ['\u{23e}', '\0', '\0']), ('\u{2c68}', ['\u{2c67}', '\0', '\0']), ('\u{2c6a}', + ['\u{2c69}', '\0', '\0']), ('\u{2c6c}', ['\u{2c6b}', '\0', '\0']), ('\u{2c73}', ['\u{2c72}', + '\0', '\0']), ('\u{2c76}', ['\u{2c75}', '\0', '\0']), ('\u{2c81}', ['\u{2c80}', '\0', + '\0']), ('\u{2c83}', ['\u{2c82}', '\0', '\0']), ('\u{2c85}', ['\u{2c84}', '\0', '\0']), + ('\u{2c87}', ['\u{2c86}', '\0', '\0']), ('\u{2c89}', ['\u{2c88}', '\0', '\0']), ('\u{2c8b}', + ['\u{2c8a}', '\0', '\0']), ('\u{2c8d}', ['\u{2c8c}', '\0', '\0']), ('\u{2c8f}', ['\u{2c8e}', + '\0', '\0']), ('\u{2c91}', ['\u{2c90}', '\0', '\0']), ('\u{2c93}', ['\u{2c92}', '\0', + '\0']), ('\u{2c95}', ['\u{2c94}', '\0', '\0']), ('\u{2c97}', ['\u{2c96}', '\0', '\0']), + ('\u{2c99}', ['\u{2c98}', '\0', '\0']), ('\u{2c9b}', ['\u{2c9a}', '\0', '\0']), ('\u{2c9d}', + ['\u{2c9c}', '\0', '\0']), ('\u{2c9f}', ['\u{2c9e}', '\0', '\0']), ('\u{2ca1}', ['\u{2ca0}', + '\0', '\0']), ('\u{2ca3}', ['\u{2ca2}', '\0', '\0']), ('\u{2ca5}', ['\u{2ca4}', '\0', + '\0']), ('\u{2ca7}', ['\u{2ca6}', '\0', '\0']), ('\u{2ca9}', ['\u{2ca8}', '\0', '\0']), + ('\u{2cab}', ['\u{2caa}', '\0', '\0']), ('\u{2cad}', ['\u{2cac}', '\0', '\0']), ('\u{2caf}', + ['\u{2cae}', '\0', '\0']), ('\u{2cb1}', ['\u{2cb0}', '\0', '\0']), ('\u{2cb3}', ['\u{2cb2}', + '\0', '\0']), ('\u{2cb5}', ['\u{2cb4}', '\0', '\0']), ('\u{2cb7}', ['\u{2cb6}', '\0', + '\0']), ('\u{2cb9}', ['\u{2cb8}', '\0', '\0']), ('\u{2cbb}', ['\u{2cba}', '\0', '\0']), + ('\u{2cbd}', ['\u{2cbc}', '\0', '\0']), ('\u{2cbf}', ['\u{2cbe}', '\0', '\0']), ('\u{2cc1}', + ['\u{2cc0}', '\0', '\0']), ('\u{2cc3}', ['\u{2cc2}', '\0', '\0']), ('\u{2cc5}', ['\u{2cc4}', + '\0', '\0']), ('\u{2cc7}', ['\u{2cc6}', '\0', '\0']), ('\u{2cc9}', ['\u{2cc8}', '\0', + '\0']), ('\u{2ccb}', ['\u{2cca}', '\0', '\0']), ('\u{2ccd}', ['\u{2ccc}', '\0', '\0']), + ('\u{2ccf}', ['\u{2cce}', '\0', '\0']), ('\u{2cd1}', ['\u{2cd0}', '\0', '\0']), ('\u{2cd3}', + ['\u{2cd2}', '\0', '\0']), ('\u{2cd5}', ['\u{2cd4}', '\0', '\0']), ('\u{2cd7}', ['\u{2cd6}', + '\0', '\0']), ('\u{2cd9}', ['\u{2cd8}', '\0', '\0']), ('\u{2cdb}', ['\u{2cda}', '\0', + '\0']), ('\u{2cdd}', ['\u{2cdc}', '\0', '\0']), ('\u{2cdf}', ['\u{2cde}', '\0', '\0']), + ('\u{2ce1}', ['\u{2ce0}', '\0', '\0']), ('\u{2ce3}', ['\u{2ce2}', '\0', '\0']), ('\u{2cec}', + ['\u{2ceb}', '\0', '\0']), ('\u{2cee}', ['\u{2ced}', '\0', '\0']), ('\u{2cf3}', ['\u{2cf2}', + '\0', '\0']), ('\u{2d00}', ['\u{10a0}', '\0', '\0']), ('\u{2d01}', ['\u{10a1}', '\0', + '\0']), ('\u{2d02}', ['\u{10a2}', '\0', '\0']), ('\u{2d03}', ['\u{10a3}', '\0', '\0']), + ('\u{2d04}', ['\u{10a4}', '\0', '\0']), ('\u{2d05}', ['\u{10a5}', '\0', '\0']), ('\u{2d06}', + ['\u{10a6}', '\0', '\0']), ('\u{2d07}', ['\u{10a7}', '\0', '\0']), ('\u{2d08}', ['\u{10a8}', + '\0', '\0']), ('\u{2d09}', ['\u{10a9}', '\0', '\0']), ('\u{2d0a}', ['\u{10aa}', '\0', + '\0']), ('\u{2d0b}', ['\u{10ab}', '\0', '\0']), ('\u{2d0c}', ['\u{10ac}', '\0', '\0']), + ('\u{2d0d}', ['\u{10ad}', '\0', '\0']), ('\u{2d0e}', ['\u{10ae}', '\0', '\0']), ('\u{2d0f}', + ['\u{10af}', '\0', '\0']), ('\u{2d10}', ['\u{10b0}', '\0', '\0']), ('\u{2d11}', ['\u{10b1}', + '\0', '\0']), ('\u{2d12}', ['\u{10b2}', '\0', '\0']), ('\u{2d13}', ['\u{10b3}', '\0', + '\0']), ('\u{2d14}', ['\u{10b4}', '\0', '\0']), ('\u{2d15}', ['\u{10b5}', '\0', '\0']), + ('\u{2d16}', ['\u{10b6}', '\0', '\0']), ('\u{2d17}', ['\u{10b7}', '\0', '\0']), ('\u{2d18}', + ['\u{10b8}', '\0', '\0']), ('\u{2d19}', ['\u{10b9}', '\0', '\0']), ('\u{2d1a}', ['\u{10ba}', + '\0', '\0']), ('\u{2d1b}', ['\u{10bb}', '\0', '\0']), ('\u{2d1c}', ['\u{10bc}', '\0', + '\0']), ('\u{2d1d}', ['\u{10bd}', '\0', '\0']), ('\u{2d1e}', ['\u{10be}', '\0', '\0']), + ('\u{2d1f}', ['\u{10bf}', '\0', '\0']), ('\u{2d20}', ['\u{10c0}', '\0', '\0']), ('\u{2d21}', + ['\u{10c1}', '\0', '\0']), ('\u{2d22}', ['\u{10c2}', '\0', '\0']), ('\u{2d23}', ['\u{10c3}', + '\0', '\0']), ('\u{2d24}', ['\u{10c4}', '\0', '\0']), ('\u{2d25}', ['\u{10c5}', '\0', + '\0']), ('\u{2d27}', ['\u{10c7}', '\0', '\0']), ('\u{2d2d}', ['\u{10cd}', '\0', '\0']), + ('\u{a641}', ['\u{a640}', '\0', '\0']), ('\u{a643}', ['\u{a642}', '\0', '\0']), ('\u{a645}', + ['\u{a644}', '\0', '\0']), ('\u{a647}', ['\u{a646}', '\0', '\0']), ('\u{a649}', ['\u{a648}', + '\0', '\0']), ('\u{a64b}', ['\u{a64a}', '\0', '\0']), ('\u{a64d}', ['\u{a64c}', '\0', + '\0']), ('\u{a64f}', ['\u{a64e}', '\0', '\0']), ('\u{a651}', ['\u{a650}', '\0', '\0']), + ('\u{a653}', ['\u{a652}', '\0', '\0']), ('\u{a655}', ['\u{a654}', '\0', '\0']), ('\u{a657}', + ['\u{a656}', '\0', '\0']), ('\u{a659}', ['\u{a658}', '\0', '\0']), ('\u{a65b}', ['\u{a65a}', + '\0', '\0']), ('\u{a65d}', ['\u{a65c}', '\0', '\0']), ('\u{a65f}', ['\u{a65e}', '\0', + '\0']), ('\u{a661}', ['\u{a660}', '\0', '\0']), ('\u{a663}', ['\u{a662}', '\0', '\0']), + ('\u{a665}', ['\u{a664}', '\0', '\0']), ('\u{a667}', ['\u{a666}', '\0', '\0']), ('\u{a669}', + ['\u{a668}', '\0', '\0']), ('\u{a66b}', ['\u{a66a}', '\0', '\0']), ('\u{a66d}', ['\u{a66c}', + '\0', '\0']), ('\u{a681}', ['\u{a680}', '\0', '\0']), ('\u{a683}', ['\u{a682}', '\0', + '\0']), ('\u{a685}', ['\u{a684}', '\0', '\0']), ('\u{a687}', ['\u{a686}', '\0', '\0']), + ('\u{a689}', ['\u{a688}', '\0', '\0']), ('\u{a68b}', ['\u{a68a}', '\0', '\0']), ('\u{a68d}', + ['\u{a68c}', '\0', '\0']), ('\u{a68f}', ['\u{a68e}', '\0', '\0']), ('\u{a691}', ['\u{a690}', + '\0', '\0']), ('\u{a693}', ['\u{a692}', '\0', '\0']), ('\u{a695}', ['\u{a694}', '\0', + '\0']), ('\u{a697}', ['\u{a696}', '\0', '\0']), ('\u{a699}', ['\u{a698}', '\0', '\0']), + ('\u{a69b}', ['\u{a69a}', '\0', '\0']), ('\u{a723}', ['\u{a722}', '\0', '\0']), ('\u{a725}', + ['\u{a724}', '\0', '\0']), ('\u{a727}', ['\u{a726}', '\0', '\0']), ('\u{a729}', ['\u{a728}', + '\0', '\0']), ('\u{a72b}', ['\u{a72a}', '\0', '\0']), ('\u{a72d}', ['\u{a72c}', '\0', + '\0']), ('\u{a72f}', ['\u{a72e}', '\0', '\0']), ('\u{a733}', ['\u{a732}', '\0', '\0']), + ('\u{a735}', ['\u{a734}', '\0', '\0']), ('\u{a737}', ['\u{a736}', '\0', '\0']), ('\u{a739}', + ['\u{a738}', '\0', '\0']), ('\u{a73b}', ['\u{a73a}', '\0', '\0']), ('\u{a73d}', ['\u{a73c}', + '\0', '\0']), ('\u{a73f}', ['\u{a73e}', '\0', '\0']), ('\u{a741}', ['\u{a740}', '\0', + '\0']), ('\u{a743}', ['\u{a742}', '\0', '\0']), ('\u{a745}', ['\u{a744}', '\0', '\0']), + ('\u{a747}', ['\u{a746}', '\0', '\0']), ('\u{a749}', ['\u{a748}', '\0', '\0']), ('\u{a74b}', + ['\u{a74a}', '\0', '\0']), ('\u{a74d}', ['\u{a74c}', '\0', '\0']), ('\u{a74f}', ['\u{a74e}', + '\0', '\0']), ('\u{a751}', ['\u{a750}', '\0', '\0']), ('\u{a753}', ['\u{a752}', '\0', + '\0']), ('\u{a755}', ['\u{a754}', '\0', '\0']), ('\u{a757}', ['\u{a756}', '\0', '\0']), + ('\u{a759}', ['\u{a758}', '\0', '\0']), ('\u{a75b}', ['\u{a75a}', '\0', '\0']), ('\u{a75d}', + ['\u{a75c}', '\0', '\0']), ('\u{a75f}', ['\u{a75e}', '\0', '\0']), ('\u{a761}', ['\u{a760}', + '\0', '\0']), ('\u{a763}', ['\u{a762}', '\0', '\0']), ('\u{a765}', ['\u{a764}', '\0', + '\0']), ('\u{a767}', ['\u{a766}', '\0', '\0']), ('\u{a769}', ['\u{a768}', '\0', '\0']), + ('\u{a76b}', ['\u{a76a}', '\0', '\0']), ('\u{a76d}', ['\u{a76c}', '\0', '\0']), ('\u{a76f}', + ['\u{a76e}', '\0', '\0']), ('\u{a77a}', ['\u{a779}', '\0', '\0']), ('\u{a77c}', ['\u{a77b}', + '\0', '\0']), ('\u{a77f}', ['\u{a77e}', '\0', '\0']), ('\u{a781}', ['\u{a780}', '\0', + '\0']), ('\u{a783}', ['\u{a782}', '\0', '\0']), ('\u{a785}', ['\u{a784}', '\0', '\0']), + ('\u{a787}', ['\u{a786}', '\0', '\0']), ('\u{a78c}', ['\u{a78b}', '\0', '\0']), ('\u{a791}', + ['\u{a790}', '\0', '\0']), ('\u{a793}', ['\u{a792}', '\0', '\0']), ('\u{a797}', ['\u{a796}', + '\0', '\0']), ('\u{a799}', ['\u{a798}', '\0', '\0']), ('\u{a79b}', ['\u{a79a}', '\0', + '\0']), ('\u{a79d}', ['\u{a79c}', '\0', '\0']), ('\u{a79f}', ['\u{a79e}', '\0', '\0']), + ('\u{a7a1}', ['\u{a7a0}', '\0', '\0']), ('\u{a7a3}', ['\u{a7a2}', '\0', '\0']), ('\u{a7a5}', + ['\u{a7a4}', '\0', '\0']), ('\u{a7a7}', ['\u{a7a6}', '\0', '\0']), ('\u{a7a9}', ['\u{a7a8}', + '\0', '\0']), ('\u{a7b5}', ['\u{a7b4}', '\0', '\0']), ('\u{a7b7}', ['\u{a7b6}', '\0', + '\0']), ('\u{ab53}', ['\u{a7b3}', '\0', '\0']), ('\u{ab70}', ['\u{13a0}', '\0', '\0']), + ('\u{ab71}', ['\u{13a1}', '\0', '\0']), ('\u{ab72}', ['\u{13a2}', '\0', '\0']), ('\u{ab73}', + ['\u{13a3}', '\0', '\0']), ('\u{ab74}', ['\u{13a4}', '\0', '\0']), ('\u{ab75}', ['\u{13a5}', + '\0', '\0']), ('\u{ab76}', ['\u{13a6}', '\0', '\0']), ('\u{ab77}', ['\u{13a7}', '\0', + '\0']), ('\u{ab78}', ['\u{13a8}', '\0', '\0']), ('\u{ab79}', ['\u{13a9}', '\0', '\0']), + ('\u{ab7a}', ['\u{13aa}', '\0', '\0']), ('\u{ab7b}', ['\u{13ab}', '\0', '\0']), ('\u{ab7c}', + ['\u{13ac}', '\0', '\0']), ('\u{ab7d}', ['\u{13ad}', '\0', '\0']), ('\u{ab7e}', ['\u{13ae}', + '\0', '\0']), ('\u{ab7f}', ['\u{13af}', '\0', '\0']), ('\u{ab80}', ['\u{13b0}', '\0', + '\0']), ('\u{ab81}', ['\u{13b1}', '\0', '\0']), ('\u{ab82}', ['\u{13b2}', '\0', '\0']), + ('\u{ab83}', ['\u{13b3}', '\0', '\0']), ('\u{ab84}', ['\u{13b4}', '\0', '\0']), ('\u{ab85}', + ['\u{13b5}', '\0', '\0']), ('\u{ab86}', ['\u{13b6}', '\0', '\0']), ('\u{ab87}', ['\u{13b7}', + '\0', '\0']), ('\u{ab88}', ['\u{13b8}', '\0', '\0']), ('\u{ab89}', ['\u{13b9}', '\0', + '\0']), ('\u{ab8a}', ['\u{13ba}', '\0', '\0']), ('\u{ab8b}', ['\u{13bb}', '\0', '\0']), + ('\u{ab8c}', ['\u{13bc}', '\0', '\0']), ('\u{ab8d}', ['\u{13bd}', '\0', '\0']), ('\u{ab8e}', + ['\u{13be}', '\0', '\0']), ('\u{ab8f}', ['\u{13bf}', '\0', '\0']), ('\u{ab90}', ['\u{13c0}', + '\0', '\0']), ('\u{ab91}', ['\u{13c1}', '\0', '\0']), ('\u{ab92}', ['\u{13c2}', '\0', + '\0']), ('\u{ab93}', ['\u{13c3}', '\0', '\0']), ('\u{ab94}', ['\u{13c4}', '\0', '\0']), + ('\u{ab95}', ['\u{13c5}', '\0', '\0']), ('\u{ab96}', ['\u{13c6}', '\0', '\0']), ('\u{ab97}', + ['\u{13c7}', '\0', '\0']), ('\u{ab98}', ['\u{13c8}', '\0', '\0']), ('\u{ab99}', ['\u{13c9}', + '\0', '\0']), ('\u{ab9a}', ['\u{13ca}', '\0', '\0']), ('\u{ab9b}', ['\u{13cb}', '\0', + '\0']), ('\u{ab9c}', ['\u{13cc}', '\0', '\0']), ('\u{ab9d}', ['\u{13cd}', '\0', '\0']), + ('\u{ab9e}', ['\u{13ce}', '\0', '\0']), ('\u{ab9f}', ['\u{13cf}', '\0', '\0']), ('\u{aba0}', + ['\u{13d0}', '\0', '\0']), ('\u{aba1}', ['\u{13d1}', '\0', '\0']), ('\u{aba2}', ['\u{13d2}', + '\0', '\0']), ('\u{aba3}', ['\u{13d3}', '\0', '\0']), ('\u{aba4}', ['\u{13d4}', '\0', + '\0']), ('\u{aba5}', ['\u{13d5}', '\0', '\0']), ('\u{aba6}', ['\u{13d6}', '\0', '\0']), + ('\u{aba7}', ['\u{13d7}', '\0', '\0']), ('\u{aba8}', ['\u{13d8}', '\0', '\0']), ('\u{aba9}', + ['\u{13d9}', '\0', '\0']), ('\u{abaa}', ['\u{13da}', '\0', '\0']), ('\u{abab}', ['\u{13db}', + '\0', '\0']), ('\u{abac}', ['\u{13dc}', '\0', '\0']), ('\u{abad}', ['\u{13dd}', '\0', + '\0']), ('\u{abae}', ['\u{13de}', '\0', '\0']), ('\u{abaf}', ['\u{13df}', '\0', '\0']), + ('\u{abb0}', ['\u{13e0}', '\0', '\0']), ('\u{abb1}', ['\u{13e1}', '\0', '\0']), ('\u{abb2}', + ['\u{13e2}', '\0', '\0']), ('\u{abb3}', ['\u{13e3}', '\0', '\0']), ('\u{abb4}', ['\u{13e4}', + '\0', '\0']), ('\u{abb5}', ['\u{13e5}', '\0', '\0']), ('\u{abb6}', ['\u{13e6}', '\0', + '\0']), ('\u{abb7}', ['\u{13e7}', '\0', '\0']), ('\u{abb8}', ['\u{13e8}', '\0', '\0']), + ('\u{abb9}', ['\u{13e9}', '\0', '\0']), ('\u{abba}', ['\u{13ea}', '\0', '\0']), ('\u{abbb}', + ['\u{13eb}', '\0', '\0']), ('\u{abbc}', ['\u{13ec}', '\0', '\0']), ('\u{abbd}', ['\u{13ed}', + '\0', '\0']), ('\u{abbe}', ['\u{13ee}', '\0', '\0']), ('\u{abbf}', ['\u{13ef}', '\0', + '\0']), ('\u{fb00}', ['\u{46}', '\u{46}', '\0']), ('\u{fb01}', ['\u{46}', '\u{49}', '\0']), + ('\u{fb02}', ['\u{46}', '\u{4c}', '\0']), ('\u{fb03}', ['\u{46}', '\u{46}', '\u{49}']), + ('\u{fb04}', ['\u{46}', '\u{46}', '\u{4c}']), ('\u{fb05}', ['\u{53}', '\u{54}', '\0']), + ('\u{fb06}', ['\u{53}', '\u{54}', '\0']), ('\u{fb13}', ['\u{544}', '\u{546}', '\0']), + ('\u{fb14}', ['\u{544}', '\u{535}', '\0']), ('\u{fb15}', ['\u{544}', '\u{53b}', '\0']), + ('\u{fb16}', ['\u{54e}', '\u{546}', '\0']), ('\u{fb17}', ['\u{544}', '\u{53d}', '\0']), + ('\u{ff41}', ['\u{ff21}', '\0', '\0']), ('\u{ff42}', ['\u{ff22}', '\0', '\0']), ('\u{ff43}', + ['\u{ff23}', '\0', '\0']), ('\u{ff44}', ['\u{ff24}', '\0', '\0']), ('\u{ff45}', ['\u{ff25}', + '\0', '\0']), ('\u{ff46}', ['\u{ff26}', '\0', '\0']), ('\u{ff47}', ['\u{ff27}', '\0', + '\0']), ('\u{ff48}', ['\u{ff28}', '\0', '\0']), ('\u{ff49}', ['\u{ff29}', '\0', '\0']), + ('\u{ff4a}', ['\u{ff2a}', '\0', '\0']), ('\u{ff4b}', ['\u{ff2b}', '\0', '\0']), ('\u{ff4c}', + ['\u{ff2c}', '\0', '\0']), ('\u{ff4d}', ['\u{ff2d}', '\0', '\0']), ('\u{ff4e}', ['\u{ff2e}', + '\0', '\0']), ('\u{ff4f}', ['\u{ff2f}', '\0', '\0']), ('\u{ff50}', ['\u{ff30}', '\0', + '\0']), ('\u{ff51}', ['\u{ff31}', '\0', '\0']), ('\u{ff52}', ['\u{ff32}', '\0', '\0']), + ('\u{ff53}', ['\u{ff33}', '\0', '\0']), ('\u{ff54}', ['\u{ff34}', '\0', '\0']), ('\u{ff55}', + ['\u{ff35}', '\0', '\0']), ('\u{ff56}', ['\u{ff36}', '\0', '\0']), ('\u{ff57}', ['\u{ff37}', + '\0', '\0']), ('\u{ff58}', ['\u{ff38}', '\0', '\0']), ('\u{ff59}', ['\u{ff39}', '\0', + '\0']), ('\u{ff5a}', ['\u{ff3a}', '\0', '\0']), ('\u{10428}', ['\u{10400}', '\0', '\0']), + ('\u{10429}', ['\u{10401}', '\0', '\0']), ('\u{1042a}', ['\u{10402}', '\0', '\0']), + ('\u{1042b}', ['\u{10403}', '\0', '\0']), ('\u{1042c}', ['\u{10404}', '\0', '\0']), + ('\u{1042d}', ['\u{10405}', '\0', '\0']), ('\u{1042e}', ['\u{10406}', '\0', '\0']), + ('\u{1042f}', ['\u{10407}', '\0', '\0']), ('\u{10430}', ['\u{10408}', '\0', '\0']), + ('\u{10431}', ['\u{10409}', '\0', '\0']), ('\u{10432}', ['\u{1040a}', '\0', '\0']), + ('\u{10433}', ['\u{1040b}', '\0', '\0']), ('\u{10434}', ['\u{1040c}', '\0', '\0']), + ('\u{10435}', ['\u{1040d}', '\0', '\0']), ('\u{10436}', ['\u{1040e}', '\0', '\0']), + ('\u{10437}', ['\u{1040f}', '\0', '\0']), ('\u{10438}', ['\u{10410}', '\0', '\0']), + ('\u{10439}', ['\u{10411}', '\0', '\0']), ('\u{1043a}', ['\u{10412}', '\0', '\0']), + ('\u{1043b}', ['\u{10413}', '\0', '\0']), ('\u{1043c}', ['\u{10414}', '\0', '\0']), + ('\u{1043d}', ['\u{10415}', '\0', '\0']), ('\u{1043e}', ['\u{10416}', '\0', '\0']), + ('\u{1043f}', ['\u{10417}', '\0', '\0']), ('\u{10440}', ['\u{10418}', '\0', '\0']), + ('\u{10441}', ['\u{10419}', '\0', '\0']), ('\u{10442}', ['\u{1041a}', '\0', '\0']), + ('\u{10443}', ['\u{1041b}', '\0', '\0']), ('\u{10444}', ['\u{1041c}', '\0', '\0']), + ('\u{10445}', ['\u{1041d}', '\0', '\0']), ('\u{10446}', ['\u{1041e}', '\0', '\0']), + ('\u{10447}', ['\u{1041f}', '\0', '\0']), ('\u{10448}', ['\u{10420}', '\0', '\0']), + ('\u{10449}', ['\u{10421}', '\0', '\0']), ('\u{1044a}', ['\u{10422}', '\0', '\0']), + ('\u{1044b}', ['\u{10423}', '\0', '\0']), ('\u{1044c}', ['\u{10424}', '\0', '\0']), + ('\u{1044d}', ['\u{10425}', '\0', '\0']), ('\u{1044e}', ['\u{10426}', '\0', '\0']), + ('\u{1044f}', ['\u{10427}', '\0', '\0']), ('\u{104d8}', ['\u{104b0}', '\0', '\0']), + ('\u{104d9}', ['\u{104b1}', '\0', '\0']), ('\u{104da}', ['\u{104b2}', '\0', '\0']), + ('\u{104db}', ['\u{104b3}', '\0', '\0']), ('\u{104dc}', ['\u{104b4}', '\0', '\0']), + ('\u{104dd}', ['\u{104b5}', '\0', '\0']), ('\u{104de}', ['\u{104b6}', '\0', '\0']), + ('\u{104df}', ['\u{104b7}', '\0', '\0']), ('\u{104e0}', ['\u{104b8}', '\0', '\0']), + ('\u{104e1}', ['\u{104b9}', '\0', '\0']), ('\u{104e2}', ['\u{104ba}', '\0', '\0']), + ('\u{104e3}', ['\u{104bb}', '\0', '\0']), ('\u{104e4}', ['\u{104bc}', '\0', '\0']), + ('\u{104e5}', ['\u{104bd}', '\0', '\0']), ('\u{104e6}', ['\u{104be}', '\0', '\0']), + ('\u{104e7}', ['\u{104bf}', '\0', '\0']), ('\u{104e8}', ['\u{104c0}', '\0', '\0']), + ('\u{104e9}', ['\u{104c1}', '\0', '\0']), ('\u{104ea}', ['\u{104c2}', '\0', '\0']), + ('\u{104eb}', ['\u{104c3}', '\0', '\0']), ('\u{104ec}', ['\u{104c4}', '\0', '\0']), + ('\u{104ed}', ['\u{104c5}', '\0', '\0']), ('\u{104ee}', ['\u{104c6}', '\0', '\0']), + ('\u{104ef}', ['\u{104c7}', '\0', '\0']), ('\u{104f0}', ['\u{104c8}', '\0', '\0']), + ('\u{104f1}', ['\u{104c9}', '\0', '\0']), ('\u{104f2}', ['\u{104ca}', '\0', '\0']), + ('\u{104f3}', ['\u{104cb}', '\0', '\0']), ('\u{104f4}', ['\u{104cc}', '\0', '\0']), + ('\u{104f5}', ['\u{104cd}', '\0', '\0']), ('\u{104f6}', ['\u{104ce}', '\0', '\0']), + ('\u{104f7}', ['\u{104cf}', '\0', '\0']), ('\u{104f8}', ['\u{104d0}', '\0', '\0']), + ('\u{104f9}', ['\u{104d1}', '\0', '\0']), ('\u{104fa}', ['\u{104d2}', '\0', '\0']), + ('\u{104fb}', ['\u{104d3}', '\0', '\0']), ('\u{10cc0}', ['\u{10c80}', '\0', '\0']), + ('\u{10cc1}', ['\u{10c81}', '\0', '\0']), ('\u{10cc2}', ['\u{10c82}', '\0', '\0']), + ('\u{10cc3}', ['\u{10c83}', '\0', '\0']), ('\u{10cc4}', ['\u{10c84}', '\0', '\0']), + ('\u{10cc5}', ['\u{10c85}', '\0', '\0']), ('\u{10cc6}', ['\u{10c86}', '\0', '\0']), + ('\u{10cc7}', ['\u{10c87}', '\0', '\0']), ('\u{10cc8}', ['\u{10c88}', '\0', '\0']), + ('\u{10cc9}', ['\u{10c89}', '\0', '\0']), ('\u{10cca}', ['\u{10c8a}', '\0', '\0']), + ('\u{10ccb}', ['\u{10c8b}', '\0', '\0']), ('\u{10ccc}', ['\u{10c8c}', '\0', '\0']), + ('\u{10ccd}', ['\u{10c8d}', '\0', '\0']), ('\u{10cce}', ['\u{10c8e}', '\0', '\0']), + ('\u{10ccf}', ['\u{10c8f}', '\0', '\0']), ('\u{10cd0}', ['\u{10c90}', '\0', '\0']), + ('\u{10cd1}', ['\u{10c91}', '\0', '\0']), ('\u{10cd2}', ['\u{10c92}', '\0', '\0']), + ('\u{10cd3}', ['\u{10c93}', '\0', '\0']), ('\u{10cd4}', ['\u{10c94}', '\0', '\0']), + ('\u{10cd5}', ['\u{10c95}', '\0', '\0']), ('\u{10cd6}', ['\u{10c96}', '\0', '\0']), + ('\u{10cd7}', ['\u{10c97}', '\0', '\0']), ('\u{10cd8}', ['\u{10c98}', '\0', '\0']), + ('\u{10cd9}', ['\u{10c99}', '\0', '\0']), ('\u{10cda}', ['\u{10c9a}', '\0', '\0']), + ('\u{10cdb}', ['\u{10c9b}', '\0', '\0']), ('\u{10cdc}', ['\u{10c9c}', '\0', '\0']), + ('\u{10cdd}', ['\u{10c9d}', '\0', '\0']), ('\u{10cde}', ['\u{10c9e}', '\0', '\0']), + ('\u{10cdf}', ['\u{10c9f}', '\0', '\0']), ('\u{10ce0}', ['\u{10ca0}', '\0', '\0']), + ('\u{10ce1}', ['\u{10ca1}', '\0', '\0']), ('\u{10ce2}', ['\u{10ca2}', '\0', '\0']), + ('\u{10ce3}', ['\u{10ca3}', '\0', '\0']), ('\u{10ce4}', ['\u{10ca4}', '\0', '\0']), + ('\u{10ce5}', ['\u{10ca5}', '\0', '\0']), ('\u{10ce6}', ['\u{10ca6}', '\0', '\0']), + ('\u{10ce7}', ['\u{10ca7}', '\0', '\0']), ('\u{10ce8}', ['\u{10ca8}', '\0', '\0']), + ('\u{10ce9}', ['\u{10ca9}', '\0', '\0']), ('\u{10cea}', ['\u{10caa}', '\0', '\0']), + ('\u{10ceb}', ['\u{10cab}', '\0', '\0']), ('\u{10cec}', ['\u{10cac}', '\0', '\0']), + ('\u{10ced}', ['\u{10cad}', '\0', '\0']), ('\u{10cee}', ['\u{10cae}', '\0', '\0']), + ('\u{10cef}', ['\u{10caf}', '\0', '\0']), ('\u{10cf0}', ['\u{10cb0}', '\0', '\0']), + ('\u{10cf1}', ['\u{10cb1}', '\0', '\0']), ('\u{10cf2}', ['\u{10cb2}', '\0', '\0']), + ('\u{118c0}', ['\u{118a0}', '\0', '\0']), ('\u{118c1}', ['\u{118a1}', '\0', '\0']), + ('\u{118c2}', ['\u{118a2}', '\0', '\0']), ('\u{118c3}', ['\u{118a3}', '\0', '\0']), + ('\u{118c4}', ['\u{118a4}', '\0', '\0']), ('\u{118c5}', ['\u{118a5}', '\0', '\0']), + ('\u{118c6}', ['\u{118a6}', '\0', '\0']), ('\u{118c7}', ['\u{118a7}', '\0', '\0']), + ('\u{118c8}', ['\u{118a8}', '\0', '\0']), ('\u{118c9}', ['\u{118a9}', '\0', '\0']), + ('\u{118ca}', ['\u{118aa}', '\0', '\0']), ('\u{118cb}', ['\u{118ab}', '\0', '\0']), + ('\u{118cc}', ['\u{118ac}', '\0', '\0']), ('\u{118cd}', ['\u{118ad}', '\0', '\0']), + ('\u{118ce}', ['\u{118ae}', '\0', '\0']), ('\u{118cf}', ['\u{118af}', '\0', '\0']), + ('\u{118d0}', ['\u{118b0}', '\0', '\0']), ('\u{118d1}', ['\u{118b1}', '\0', '\0']), + ('\u{118d2}', ['\u{118b2}', '\0', '\0']), ('\u{118d3}', ['\u{118b3}', '\0', '\0']), + ('\u{118d4}', ['\u{118b4}', '\0', '\0']), ('\u{118d5}', ['\u{118b5}', '\0', '\0']), + ('\u{118d6}', ['\u{118b6}', '\0', '\0']), ('\u{118d7}', ['\u{118b7}', '\0', '\0']), + ('\u{118d8}', ['\u{118b8}', '\0', '\0']), ('\u{118d9}', ['\u{118b9}', '\0', '\0']), + ('\u{118da}', ['\u{118ba}', '\0', '\0']), ('\u{118db}', ['\u{118bb}', '\0', '\0']), + ('\u{118dc}', ['\u{118bc}', '\0', '\0']), ('\u{118dd}', ['\u{118bd}', '\0', '\0']), + ('\u{118de}', ['\u{118be}', '\0', '\0']), ('\u{118df}', ['\u{118bf}', '\0', '\0']), + ('\u{1e922}', ['\u{1e900}', '\0', '\0']), ('\u{1e923}', ['\u{1e901}', '\0', '\0']), + ('\u{1e924}', ['\u{1e902}', '\0', '\0']), ('\u{1e925}', ['\u{1e903}', '\0', '\0']), + ('\u{1e926}', ['\u{1e904}', '\0', '\0']), ('\u{1e927}', ['\u{1e905}', '\0', '\0']), + ('\u{1e928}', ['\u{1e906}', '\0', '\0']), ('\u{1e929}', ['\u{1e907}', '\0', '\0']), + ('\u{1e92a}', ['\u{1e908}', '\0', '\0']), ('\u{1e92b}', ['\u{1e909}', '\0', '\0']), + ('\u{1e92c}', ['\u{1e90a}', '\0', '\0']), ('\u{1e92d}', ['\u{1e90b}', '\0', '\0']), + ('\u{1e92e}', ['\u{1e90c}', '\0', '\0']), ('\u{1e92f}', ['\u{1e90d}', '\0', '\0']), + ('\u{1e930}', ['\u{1e90e}', '\0', '\0']), ('\u{1e931}', ['\u{1e90f}', '\0', '\0']), + ('\u{1e932}', ['\u{1e910}', '\0', '\0']), ('\u{1e933}', ['\u{1e911}', '\0', '\0']), + ('\u{1e934}', ['\u{1e912}', '\0', '\0']), ('\u{1e935}', ['\u{1e913}', '\0', '\0']), + ('\u{1e936}', ['\u{1e914}', '\0', '\0']), ('\u{1e937}', ['\u{1e915}', '\0', '\0']), + ('\u{1e938}', ['\u{1e916}', '\0', '\0']), ('\u{1e939}', ['\u{1e917}', '\0', '\0']), + ('\u{1e93a}', ['\u{1e918}', '\0', '\0']), ('\u{1e93b}', ['\u{1e919}', '\0', '\0']), + ('\u{1e93c}', ['\u{1e91a}', '\0', '\0']), ('\u{1e93d}', ['\u{1e91b}', '\0', '\0']), + ('\u{1e93e}', ['\u{1e91c}', '\0', '\0']), ('\u{1e93f}', ['\u{1e91d}', '\0', '\0']), + ('\u{1e940}', ['\u{1e91e}', '\0', '\0']), ('\u{1e941}', ['\u{1e91f}', '\0', '\0']), + ('\u{1e942}', ['\u{1e920}', '\0', '\0']), ('\u{1e943}', ['\u{1e921}', '\0', '\0']) ]; } diff --git a/src/librustc_unicode/u_str.rs b/src/librustc_unicode/u_str.rs index f65c05672f68b..1c7894794c9c8 100644 --- a/src/librustc_unicode/u_str.rs +++ b/src/librustc_unicode/u_str.rs @@ -14,7 +14,7 @@ //! methods provided by the unicode parts of the CharExt trait. use core::char; -use core::iter::Filter; +use core::iter::{Filter, FusedIterator}; use core::str::Split; /// An iterator over the non-whitespace substrings of a string, @@ -30,9 +30,9 @@ pub trait UnicodeStr { fn split_whitespace<'a>(&'a self) -> SplitWhitespace<'a>; fn is_whitespace(&self) -> bool; fn is_alphanumeric(&self) -> bool; - fn trim<'a>(&'a self) -> &'a str; - fn trim_left<'a>(&'a self) -> &'a str; - fn trim_right<'a>(&'a self) -> &'a str; + fn trim(&self) -> &str; + fn trim_left(&self) -> &str; + fn trim_right(&self) -> &str; } impl UnicodeStr for str { @@ -144,7 +144,9 @@ impl Utf16Encoder { } } -impl Iterator for Utf16Encoder where I: Iterator { +impl Iterator for Utf16Encoder + where I: Iterator +{ type Item = u16; #[inline] @@ -157,7 +159,7 @@ impl Iterator for Utf16Encoder where I: Iterator { let mut buf = [0; 2]; self.chars.next().map(|ch| { - let n = CharExt::encode_utf16(ch, &mut buf).unwrap_or(0); + let n = CharExt::encode_utf16(ch, &mut buf).len(); if n == 2 { self.extra = buf[1]; } @@ -175,6 +177,11 @@ impl Iterator for Utf16Encoder where I: Iterator { } } +#[unstable(feature = "fused", issue = "35602")] +impl FusedIterator for Utf16Encoder + where I: FusedIterator {} + +#[stable(feature = "split_whitespace", since = "1.1.0")] impl<'a> Iterator for SplitWhitespace<'a> { type Item = &'a str; @@ -182,8 +189,13 @@ impl<'a> Iterator for SplitWhitespace<'a> { self.inner.next() } } + +#[stable(feature = "split_whitespace", since = "1.1.0")] impl<'a> DoubleEndedIterator for SplitWhitespace<'a> { fn next_back(&mut self) -> Option<&'a str> { self.inner.next_back() } } + +#[unstable(feature = "fused", issue = "35602")] +impl<'a> FusedIterator for SplitWhitespace<'a> {} diff --git a/src/librustdoc/Cargo.toml b/src/librustdoc/Cargo.toml new file mode 100644 index 0000000000000..d66d2001f2304 --- /dev/null +++ b/src/librustdoc/Cargo.toml @@ -0,0 +1,32 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustdoc" +version = "0.0.0" +build = "build.rs" + +[lib] +name = "rustdoc" +path = "lib.rs" +crate-type = ["dylib"] + +[dependencies] +arena = { path = "../libarena" } +rustc = { path = "../librustc" } +rustc_back = { path = "../librustc_back" } +rustc_const_eval = { path = "../librustc_const_eval" } +rustc_const_math = { path = "../librustc_const_math" } +rustc_driver = { path = "../librustc_driver" } +rustc_data_structures = { path = "../librustc_data_structures" } +rustc_errors = { path = "../librustc_errors" } +rustc_lint = { path = "../librustc_lint" } +rustc_metadata = { path = "../librustc_metadata" } +rustc_resolve = { path = "../librustc_resolve" } +rustc_trans = { path = "../librustc_trans" } +serialize = { path = "../libserialize" } +syntax = { path = "../libsyntax" } +syntax_pos = { path = "../libsyntax_pos" } +log = { path = "../liblog" } + +[build-dependencies] +build_helper = { path = "../build_helper" } +gcc = "0.3.27" diff --git a/src/librustdoc/build.rs b/src/librustdoc/build.rs new file mode 100644 index 0000000000000..171954f325a5e --- /dev/null +++ b/src/librustdoc/build.rs @@ -0,0 +1,27 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +extern crate gcc; + +fn main() { + println!("cargo:rustc-cfg=cargobuild"); + let mut cfg = gcc::Config::new(); + cfg.file("../rt/hoedown/src/autolink.c") + .file("../rt/hoedown/src/buffer.c") + .file("../rt/hoedown/src/document.c") + .file("../rt/hoedown/src/escape.c") + .file("../rt/hoedown/src/html.c") + .file("../rt/hoedown/src/html_blocks.c") + .file("../rt/hoedown/src/html_smartypants.c") + .file("../rt/hoedown/src/stack.c") + .file("../rt/hoedown/src/version.c") + .include("../rt/hoedown/src") + .compile("libhoedown.a"); +} diff --git a/src/librustdoc/clean/inline.rs b/src/librustdoc/clean/inline.rs index 30b478f486e0a..94e9fdbfc3e2c 100644 --- a/src/librustdoc/clean/inline.rs +++ b/src/librustdoc/clean/inline.rs @@ -10,51 +10,43 @@ //! Support for inlining external documentation into the current AST. -use std::collections::HashSet; +use std::iter::once; use syntax::ast; -use syntax::attr::AttrMetaMethods; -use rustc_front::hir; - -use rustc::middle::cstore::{self, CrateStore}; -use rustc::middle::def; -use rustc::middle::def_id::DefId; -use rustc::middle::ty; -use rustc::middle::subst; -use rustc::middle::stability; -use rustc::middle::const_eval; - -use core::DocContext; +use rustc::hir; + +use rustc::hir::def::{Def, CtorKind}; +use rustc::hir::def_id::DefId; +use rustc::hir::print as pprust; +use rustc::ty; +use rustc::util::nodemap::FxHashSet; + +use rustc_const_eval::lookup_const_by_id; + +use core::{DocContext, DocAccessLevels}; use doctree; -use clean; +use clean::{self, GetDefId}; -use super::{Clean, ToSource}; +use super::Clean; -/// Attempt to inline the definition of a local node id into this AST. +/// Attempt to inline a definition into this AST. /// -/// This function will fetch the definition of the id specified, and if it is -/// from another crate it will attempt to inline the documentation from the -/// other crate into this crate. +/// This function will fetch the definition specified, and if it is +/// from another crate it will attempt to inline the documentation +/// from the other crate into this crate. /// /// This is primarily used for `pub use` statements which are, in general, /// implementation details. Inlining the documentation should help provide a /// better experience when reading the documentation in this use case. /// -/// The returned value is `None` if the `id` could not be inlined, and `Some` -/// of a vector of items if it was successfully expanded. -pub fn try_inline(cx: &DocContext, id: ast::NodeId, into: Option) +/// The returned value is `None` if the definition could not be inlined, +/// and `Some` of a vector of items if it was successfully expanded. +pub fn try_inline(cx: &DocContext, def: Def, into: Option) -> Option> { - let tcx = match cx.tcx_opt() { - Some(tcx) => tcx, - None => return None, - }; - let def = match tcx.def_map.borrow().get(&id) { - Some(d) => d.full_def(), - None => return None, - }; + if def == Def::Err { return None } let did = def.def_id(); if did.is_local() { return None } - try_inline_def(cx, tcx, def).map(|vec| { + try_inline_def(cx, def).map(|vec| { vec.into_iter().map(|mut item| { match into { Some(into) if item.name.is_some() => { @@ -67,69 +59,76 @@ pub fn try_inline(cx: &DocContext, id: ast::NodeId, into: Option) }) } -fn try_inline_def(cx: &DocContext, tcx: &ty::ctxt, - def: def::Def) -> Option> { +fn try_inline_def(cx: &DocContext, def: Def) -> Option> { + let tcx = cx.tcx; let mut ret = Vec::new(); - let did = def.def_id(); let inner = match def { - def::DefTrait(did) => { - record_extern_fqn(cx, did, clean::TypeTrait); - clean::TraitItem(build_external_trait(cx, tcx, did)) + Def::Trait(did) => { + record_extern_fqn(cx, did, clean::TypeKind::Trait); + ret.extend(build_impls(cx, did)); + clean::TraitItem(build_external_trait(cx, did)) } - def::DefFn(did, false) => { - // If this function is a tuple struct constructor, we just skip it - record_extern_fqn(cx, did, clean::TypeFunction); - clean::FunctionItem(build_external_function(cx, tcx, did)) + Def::Fn(did) => { + record_extern_fqn(cx, did, clean::TypeKind::Function); + clean::FunctionItem(build_external_function(cx, did)) } - def::DefStruct(did) => { - record_extern_fqn(cx, did, clean::TypeStruct); - ret.extend(build_impls(cx, tcx, did)); - clean::StructItem(build_struct(cx, tcx, did)) + Def::Struct(did) => { + record_extern_fqn(cx, did, clean::TypeKind::Struct); + ret.extend(build_impls(cx, did)); + clean::StructItem(build_struct(cx, did)) } - def::DefTy(did, false) => { - record_extern_fqn(cx, did, clean::TypeTypedef); - ret.extend(build_impls(cx, tcx, did)); - build_type(cx, tcx, did) + Def::Union(did) => { + record_extern_fqn(cx, did, clean::TypeKind::Union); + ret.extend(build_impls(cx, did)); + clean::UnionItem(build_union(cx, did)) } - def::DefTy(did, true) => { - record_extern_fqn(cx, did, clean::TypeEnum); - ret.extend(build_impls(cx, tcx, did)); - build_type(cx, tcx, did) + Def::TyAlias(did) => { + record_extern_fqn(cx, did, clean::TypeKind::Typedef); + ret.extend(build_impls(cx, did)); + clean::TypedefItem(build_type_alias(cx, did), false) + } + Def::Enum(did) => { + record_extern_fqn(cx, did, clean::TypeKind::Enum); + ret.extend(build_impls(cx, did)); + clean::EnumItem(build_enum(cx, did)) } // Assume that the enum type is reexported next to the variant, and // variants don't show up in documentation specially. - def::DefVariant(..) => return Some(Vec::new()), - def::DefMod(did) => { - record_extern_fqn(cx, did, clean::TypeModule); - clean::ModuleItem(build_module(cx, tcx, did)) + // Similarly, consider that struct type is reexported next to its constructor. + Def::Variant(..) | + Def::VariantCtor(..) | + Def::StructCtor(..) => return Some(Vec::new()), + Def::Mod(did) => { + record_extern_fqn(cx, did, clean::TypeKind::Module); + clean::ModuleItem(build_module(cx, did)) } - def::DefStatic(did, mtbl) => { - record_extern_fqn(cx, did, clean::TypeStatic); - clean::StaticItem(build_static(cx, tcx, did, mtbl)) + Def::Static(did, mtbl) => { + record_extern_fqn(cx, did, clean::TypeKind::Static); + clean::StaticItem(build_static(cx, did, mtbl)) } - def::DefConst(did) | def::DefAssociatedConst(did) => { - record_extern_fqn(cx, did, clean::TypeConst); - clean::ConstantItem(build_const(cx, tcx, did)) + Def::Const(did) => { + record_extern_fqn(cx, did, clean::TypeKind::Const); + clean::ConstantItem(build_const(cx, did)) } _ => return None, }; - cx.inlined.borrow_mut().as_mut().unwrap().insert(did); + let did = def.def_id(); + cx.renderinfo.borrow_mut().inlined.insert(did); ret.push(clean::Item { - source: clean::Span::empty(), + source: tcx.def_span(did).clean(cx), name: Some(tcx.item_name(did).to_string()), - attrs: load_attrs(cx, tcx, did), + attrs: load_attrs(cx, did), inner: inner, - visibility: Some(hir::Public), - stability: stability::lookup_stability(tcx, did).clean(cx), - deprecation: stability::lookup_deprecation(tcx, did).clean(cx), + visibility: Some(clean::Public), + stability: tcx.lookup_stability(did).clean(cx), + deprecation: tcx.lookup_deprecation(did).clean(cx), def_id: did, }); Some(ret) } -pub fn load_attrs(cx: &DocContext, tcx: &ty::ctxt, - did: DefId) -> Vec { - tcx.get_attrs(did).iter().map(|a| a.clean(cx)).collect() +pub fn load_attrs(cx: &DocContext, did: DefId) -> clean::Attributes { + cx.tcx.get_attrs(did).clean(cx) } /// Record an external fully qualified name in the external_paths cache. @@ -137,154 +136,175 @@ pub fn load_attrs(cx: &DocContext, tcx: &ty::ctxt, /// These names are used later on by HTML rendering to generate things like /// source links back to the original item. pub fn record_extern_fqn(cx: &DocContext, did: DefId, kind: clean::TypeKind) { - match cx.tcx_opt() { - Some(tcx) => { - let fqn = tcx.sess.cstore.extern_item_path(did); - let fqn = fqn.into_iter().map(|i| i.to_string()).collect(); - cx.external_paths.borrow_mut().as_mut().unwrap().insert(did, (fqn, kind)); + let crate_name = cx.tcx.sess.cstore.crate_name(did.krate).to_string(); + let relative = cx.tcx.def_path(did).data.into_iter().filter_map(|elem| { + // extern blocks have an empty name + let s = elem.data.to_string(); + if !s.is_empty() { + Some(s) + } else { + None } - None => {} - } + }); + let fqn = once(crate_name).chain(relative).collect(); + cx.renderinfo.borrow_mut().external_paths.insert(did, (fqn, kind)); } -pub fn build_external_trait(cx: &DocContext, tcx: &ty::ctxt, - did: DefId) -> clean::Trait { - let def = tcx.lookup_trait_def(did); - let trait_items = tcx.trait_items(did).clean(cx); - let predicates = tcx.lookup_predicates(did); - let generics = (&def.generics, &predicates, subst::TypeSpace).clean(cx); +pub fn build_external_trait(cx: &DocContext, did: DefId) -> clean::Trait { + let trait_items = cx.tcx.associated_items(did).map(|item| item.clean(cx)).collect(); + let predicates = cx.tcx.item_predicates(did); + let generics = (cx.tcx.item_generics(did), &predicates).clean(cx); let generics = filter_non_trait_generics(did, generics); let (generics, supertrait_bounds) = separate_supertrait_bounds(generics); clean::Trait { - unsafety: def.unsafety, + unsafety: cx.tcx.lookup_trait_def(did).unsafety, generics: generics, items: trait_items, bounds: supertrait_bounds, } } -fn build_external_function(cx: &DocContext, tcx: &ty::ctxt, did: DefId) -> clean::Function { - let t = tcx.lookup_item_type(did); - let (decl, style, abi) = match t.ty.sty { - ty::TyBareFn(_, ref f) => ((did, &f.sig).clean(cx), f.unsafety, f.abi), +fn build_external_function(cx: &DocContext, did: DefId) -> clean::Function { + let ty = cx.tcx.item_type(did); + let (decl, style, abi) = match ty.sty { + ty::TyFnDef(.., ref f) => ((did, &f.sig).clean(cx), f.unsafety, f.abi), _ => panic!("bad function"), }; - let constness = if tcx.sess.cstore.is_const_fn(did) { + let constness = if cx.tcx.sess.cstore.is_const_fn(did) { hir::Constness::Const } else { hir::Constness::NotConst }; - let predicates = tcx.lookup_predicates(did); + let predicates = cx.tcx.item_predicates(did); clean::Function { decl: decl, - generics: (&t.generics, &predicates, subst::FnSpace).clean(cx), + generics: (cx.tcx.item_generics(did), &predicates).clean(cx), unsafety: style, constness: constness, abi: abi, } } -fn build_struct(cx: &DocContext, tcx: &ty::ctxt, did: DefId) -> clean::Struct { - use syntax::parse::token::special_idents::unnamed_field; +fn build_enum(cx: &DocContext, did: DefId) -> clean::Enum { + let predicates = cx.tcx.item_predicates(did); - let t = tcx.lookup_item_type(did); - let predicates = tcx.lookup_predicates(did); - let variant = tcx.lookup_adt_def(did).struct_variant(); + clean::Enum { + generics: (cx.tcx.item_generics(did), &predicates).clean(cx), + variants_stripped: false, + variants: cx.tcx.lookup_adt_def(did).variants.clean(cx), + } +} + +fn build_struct(cx: &DocContext, did: DefId) -> clean::Struct { + let predicates = cx.tcx.item_predicates(did); + let variant = cx.tcx.lookup_adt_def(did).struct_variant(); clean::Struct { - struct_type: match &*variant.fields { - [] => doctree::Unit, - [ref f] if f.name == unnamed_field.name => doctree::Newtype, - [ref f, ..] if f.name == unnamed_field.name => doctree::Tuple, - _ => doctree::Plain, + struct_type: match variant.ctor_kind { + CtorKind::Fictive => doctree::Plain, + CtorKind::Fn => doctree::Tuple, + CtorKind::Const => doctree::Unit, }, - generics: (&t.generics, &predicates, subst::TypeSpace).clean(cx), + generics: (cx.tcx.item_generics(did), &predicates).clean(cx), fields: variant.fields.clean(cx), fields_stripped: false, } } -fn build_type(cx: &DocContext, tcx: &ty::ctxt, did: DefId) -> clean::ItemEnum { - let t = tcx.lookup_item_type(did); - let predicates = tcx.lookup_predicates(did); - match t.ty.sty { - ty::TyEnum(edef, _) if !tcx.sess.cstore.is_typedef(did) => { - return clean::EnumItem(clean::Enum { - generics: (&t.generics, &predicates, subst::TypeSpace).clean(cx), - variants_stripped: false, - variants: edef.variants.clean(cx), - }) - } - _ => {} +fn build_union(cx: &DocContext, did: DefId) -> clean::Union { + let predicates = cx.tcx.item_predicates(did); + let variant = cx.tcx.lookup_adt_def(did).struct_variant(); + + clean::Union { + struct_type: doctree::Plain, + generics: (cx.tcx.item_generics(did), &predicates).clean(cx), + fields: variant.fields.clean(cx), + fields_stripped: false, } +} + +fn build_type_alias(cx: &DocContext, did: DefId) -> clean::Typedef { + let predicates = cx.tcx.item_predicates(did); - clean::TypedefItem(clean::Typedef { - type_: t.ty.clean(cx), - generics: (&t.generics, &predicates, subst::TypeSpace).clean(cx), - }, false) + clean::Typedef { + type_: cx.tcx.item_type(did).clean(cx), + generics: (cx.tcx.item_generics(did), &predicates).clean(cx), + } } -pub fn build_impls(cx: &DocContext, tcx: &ty::ctxt, - did: DefId) -> Vec { +pub fn build_impls(cx: &DocContext, did: DefId) -> Vec { + let tcx = cx.tcx; tcx.populate_inherent_implementations_for_type_if_necessary(did); let mut impls = Vec::new(); - match tcx.inherent_impls.borrow().get(&did) { - None => {} - Some(i) => { - for &did in i.iter() { - build_impl(cx, tcx, did, &mut impls); - } + if let Some(i) = tcx.inherent_impls.borrow().get(&did) { + for &did in i.iter() { + build_impl(cx, did, &mut impls); } } - - // If this is the first time we've inlined something from this crate, then - // we inline *all* impls from the crate into this crate. Note that there's + // If this is the first time we've inlined something from another crate, then + // we inline *all* impls from all the crates into this crate. Note that there's // currently no way for us to filter this based on type, and we likely need // many impls for a variety of reasons. // // Primarily, the impls will be used to populate the documentation for this // type being inlined, but impls can also be used when generating // documentation for primitives (no way to find those specifically). - if cx.populated_crate_impls.borrow_mut().insert(did.krate) { - for item in tcx.sess.cstore.crate_top_level_items(did.krate) { - populate_impls(cx, tcx, item.def, &mut impls); - } + if cx.populated_all_crate_impls.get() { + return impls; + } - fn populate_impls(cx: &DocContext, tcx: &ty::ctxt, - def: cstore::DefLike, - impls: &mut Vec) { - match def { - cstore::DlImpl(did) => build_impl(cx, tcx, did, impls), - cstore::DlDef(def::DefMod(did)) => { - for item in tcx.sess.cstore.item_children(did) { - populate_impls(cx, tcx, item.def, impls) - } - } - _ => {} - } + cx.populated_all_crate_impls.set(true); + + for did in tcx.sess.cstore.implementations_of_trait(None) { + build_impl(cx, did, &mut impls); + } + + // Also try to inline primitive impls from other crates. + let primitive_impls = [ + tcx.lang_items.isize_impl(), + tcx.lang_items.i8_impl(), + tcx.lang_items.i16_impl(), + tcx.lang_items.i32_impl(), + tcx.lang_items.i64_impl(), + tcx.lang_items.usize_impl(), + tcx.lang_items.u8_impl(), + tcx.lang_items.u16_impl(), + tcx.lang_items.u32_impl(), + tcx.lang_items.u64_impl(), + tcx.lang_items.f32_impl(), + tcx.lang_items.f64_impl(), + tcx.lang_items.char_impl(), + tcx.lang_items.str_impl(), + tcx.lang_items.slice_impl(), + tcx.lang_items.const_ptr_impl(), + tcx.lang_items.mut_ptr_impl(), + ]; + + for def_id in primitive_impls.iter().filter_map(|&def_id| def_id) { + if !def_id.is_local() { + build_impl(cx, def_id, &mut impls); } } - return impls; + impls } -pub fn build_impl(cx: &DocContext, - tcx: &ty::ctxt, - did: DefId, - ret: &mut Vec) { - if !cx.inlined.borrow_mut().as_mut().unwrap().insert(did) { +pub fn build_impl(cx: &DocContext, did: DefId, ret: &mut Vec) { + if !cx.renderinfo.borrow_mut().inlined.insert(did) { return } - let attrs = load_attrs(cx, tcx, did); + let attrs = load_attrs(cx, did); + let tcx = cx.tcx; let associated_trait = tcx.impl_trait_ref(did); - if let Some(ref t) = associated_trait { - // If this is an impl for a #[doc(hidden)] trait, be sure to not inline - let trait_attrs = load_attrs(cx, tcx, t.def_id); - if trait_attrs.iter().any(|a| is_doc_hidden(a)) { + + // Only inline impl if the implemented trait is + // reachable in rustdoc generated documentation + if let Some(traitref) = associated_trait { + if !cx.access_levels.borrow().is_doc_reachable(traitref.def_id) { return } } @@ -300,56 +320,60 @@ pub fn build_impl(cx: &DocContext, clean::RegionBound(..) => unreachable!(), }, }), - source: clean::Span::empty(), + source: tcx.def_span(did).clean(cx), name: None, attrs: attrs, - visibility: Some(hir::Inherited), - stability: stability::lookup_stability(tcx, did).clean(cx), - deprecation: stability::lookup_deprecation(tcx, did).clean(cx), + visibility: Some(clean::Inherited), + stability: tcx.lookup_stability(did).clean(cx), + deprecation: tcx.lookup_deprecation(did).clean(cx), def_id: did, }); } - let predicates = tcx.lookup_predicates(did); - let trait_items = tcx.sess.cstore.impl_items(did) - .iter() - .filter_map(|did| { - let did = did.def_id(); - let impl_item = tcx.impl_or_trait_item(did); - match impl_item { - ty::ConstTraitItem(ref assoc_const) => { - let did = assoc_const.def_id; - let type_scheme = tcx.lookup_item_type(did); - let default = if assoc_const.has_value { - Some(const_eval::lookup_const_by_id(tcx, did, None, None) - .unwrap().span.to_src(cx)) + let for_ = tcx.item_type(did).clean(cx); + + // Only inline impl if the implementing type is + // reachable in rustdoc generated documentation + if let Some(did) = for_.def_id() { + if !cx.access_levels.borrow().is_doc_reachable(did) { + return + } + } + + let predicates = tcx.item_predicates(did); + let trait_items = tcx.associated_items(did).filter_map(|item| { + match item.kind { + ty::AssociatedKind::Const => { + let default = if item.defaultness.has_value() { + Some(pprust::expr_to_string( + lookup_const_by_id(tcx, item.def_id, None).unwrap().0)) } else { None }; Some(clean::Item { - name: Some(assoc_const.name.clean(cx)), + name: Some(item.name.clean(cx)), inner: clean::AssociatedConstItem( - type_scheme.ty.clean(cx), + tcx.item_type(item.def_id).clean(cx), default, ), - source: clean::Span::empty(), - attrs: vec![], + source: tcx.def_span(item.def_id).clean(cx), + attrs: clean::Attributes::default(), visibility: None, - stability: stability::lookup_stability(tcx, did).clean(cx), - deprecation: stability::lookup_deprecation(tcx, did).clean(cx), - def_id: did + stability: tcx.lookup_stability(item.def_id).clean(cx), + deprecation: tcx.lookup_deprecation(item.def_id).clean(cx), + def_id: item.def_id }) } - ty::MethodTraitItem(method) => { - if method.vis != hir::Public && associated_trait.is_none() { + ty::AssociatedKind::Method => { + if item.vis != ty::Visibility::Public && associated_trait.is_none() { return None } - let mut item = method.clean(cx); - item.inner = match item.inner.clone() { + let mut cleaned = item.clean(cx); + cleaned.inner = match cleaned.inner.clone() { clean::TyMethodItem(clean::TyMethod { - unsafety, decl, self_, generics, abi + unsafety, decl, generics, abi }) => { - let constness = if tcx.sess.cstore.is_const_fn(did) { + let constness = if tcx.sess.cstore.is_const_fn(item.def_id) { hir::Constness::Const } else { hir::Constness::NotConst @@ -359,127 +383,101 @@ pub fn build_impl(cx: &DocContext, unsafety: unsafety, constness: constness, decl: decl, - self_: self_, generics: generics, abi: abi }) } - _ => panic!("not a tymethod"), + ref r => panic!("not a tymethod: {:?}", r), }; - Some(item) + Some(cleaned) } - ty::TypeTraitItem(ref assoc_ty) => { - let did = assoc_ty.def_id; - let type_scheme = ty::TypeScheme { - ty: assoc_ty.ty.unwrap(), - generics: ty::Generics::empty() + ty::AssociatedKind::Type => { + let typedef = clean::Typedef { + type_: tcx.item_type(item.def_id).clean(cx), + generics: clean::Generics { + lifetimes: vec![], + type_params: vec![], + where_predicates: vec![] + } }; - // Not sure the choice of ParamSpace actually matters here, - // because an associated type won't have generics on the LHS - let typedef = (type_scheme, ty::GenericPredicates::empty(), - subst::ParamSpace::TypeSpace).clean(cx); Some(clean::Item { - name: Some(assoc_ty.name.clean(cx)), + name: Some(item.name.clean(cx)), inner: clean::TypedefItem(typedef, true), - source: clean::Span::empty(), - attrs: vec![], + source: tcx.def_span(item.def_id).clean(cx), + attrs: clean::Attributes::default(), visibility: None, - stability: stability::lookup_stability(tcx, did).clean(cx), - deprecation: stability::lookup_deprecation(tcx, did).clean(cx), - def_id: did + stability: tcx.lookup_stability(item.def_id).clean(cx), + deprecation: tcx.lookup_deprecation(item.def_id).clean(cx), + def_id: item.def_id }) } } }).collect::>(); let polarity = tcx.trait_impl_polarity(did); - let ty = tcx.lookup_item_type(did); let trait_ = associated_trait.clean(cx).map(|bound| { match bound { clean::TraitBound(polyt, _) => polyt.trait_, clean::RegionBound(..) => unreachable!(), } }); - if let Some(clean::ResolvedPath { did, .. }) = trait_ { - if Some(did) == cx.deref_trait_did.get() { - super::build_deref_target_impls(cx, &trait_items, ret); - } + if trait_.def_id() == tcx.lang_items.deref_trait() { + super::build_deref_target_impls(cx, &trait_items, ret); } + + let provided = trait_.def_id().map(|did| { + tcx.provided_trait_methods(did) + .into_iter() + .map(|meth| meth.name.to_string()) + .collect() + }).unwrap_or(FxHashSet()); + ret.push(clean::Item { inner: clean::ImplItem(clean::Impl { unsafety: hir::Unsafety::Normal, // FIXME: this should be decoded - derived: clean::detect_derived(&attrs), + provided_trait_methods: provided, trait_: trait_, - for_: ty.ty.clean(cx), - generics: (&ty.generics, &predicates, subst::TypeSpace).clean(cx), + for_: for_, + generics: (tcx.item_generics(did), &predicates).clean(cx), items: trait_items, - polarity: polarity.map(|p| { p.clean(cx) }), + polarity: Some(polarity.clean(cx)), }), - source: clean::Span::empty(), + source: tcx.def_span(did).clean(cx), name: None, attrs: attrs, - visibility: Some(hir::Inherited), - stability: stability::lookup_stability(tcx, did).clean(cx), - deprecation: stability::lookup_deprecation(tcx, did).clean(cx), + visibility: Some(clean::Inherited), + stability: tcx.lookup_stability(did).clean(cx), + deprecation: tcx.lookup_deprecation(did).clean(cx), def_id: did, }); - - fn is_doc_hidden(a: &clean::Attribute) -> bool { - match *a { - clean::List(ref name, ref inner) if *name == "doc" => { - inner.iter().any(|a| { - match *a { - clean::Word(ref s) => *s == "hidden", - _ => false, - } - }) - } - _ => false - } - } } -fn build_module(cx: &DocContext, tcx: &ty::ctxt, - did: DefId) -> clean::Module { +fn build_module(cx: &DocContext, did: DefId) -> clean::Module { let mut items = Vec::new(); - fill_in(cx, tcx, did, &mut items); + fill_in(cx, did, &mut items); return clean::Module { items: items, is_crate: false, }; - fn fill_in(cx: &DocContext, tcx: &ty::ctxt, did: DefId, - items: &mut Vec) { + fn fill_in(cx: &DocContext, did: DefId, items: &mut Vec) { // If we're reexporting a reexport it may actually reexport something in // two namespaces, so the target may be listed twice. Make sure we only // visit each node at most once. - let mut visited = HashSet::new(); - for item in tcx.sess.cstore.item_children(did) { - match item.def { - cstore::DlDef(def::DefForeignMod(did)) => { - fill_in(cx, tcx, did, items); - } - cstore::DlDef(def) if item.vis == hir::Public => { - if !visited.insert(def) { continue } - match try_inline_def(cx, tcx, def) { - Some(i) => items.extend(i), - None => {} - } + let mut visited = FxHashSet(); + for item in cx.tcx.sess.cstore.item_children(did) { + let def_id = item.def.def_id(); + if cx.tcx.sess.cstore.visibility(def_id) == ty::Visibility::Public { + if !visited.insert(def_id) { continue } + if let Some(i) = try_inline_def(cx, item.def) { + items.extend(i) } - cstore::DlDef(..) => {} - // All impls were inlined above - cstore::DlImpl(..) => {} - cstore::DlField => panic!("unimplemented field"), } } } } -fn build_const(cx: &DocContext, tcx: &ty::ctxt, - did: DefId) -> clean::Constant { - use rustc::middle::const_eval; - use rustc_front::print::pprust; - - let expr = const_eval::lookup_const_by_id(tcx, did, None, None).unwrap_or_else(|| { +fn build_const(cx: &DocContext, did: DefId) -> clean::Constant { + let (expr, ty) = lookup_const_by_id(cx.tcx, did, None).unwrap_or_else(|| { panic!("expected lookup_const_by_id to succeed for {:?}", did); }); debug!("converting constant expr {:?} to snippet", expr); @@ -487,16 +485,14 @@ fn build_const(cx: &DocContext, tcx: &ty::ctxt, debug!("got snippet {}", sn); clean::Constant { - type_: tcx.lookup_item_type(did).ty.clean(cx), + type_: ty.map(|t| t.clean(cx)).unwrap_or_else(|| cx.tcx.item_type(did).clean(cx)), expr: sn } } -fn build_static(cx: &DocContext, tcx: &ty::ctxt, - did: DefId, - mutable: bool) -> clean::Static { +fn build_static(cx: &DocContext, did: DefId, mutable: bool) -> clean::Static { clean::Static { - type_: tcx.lookup_item_type(did).ty.clean(cx), + type_: cx.tcx.item_type(did).clean(cx), mutability: if mutable {clean::Mutable} else {clean::Immutable}, expr: "\n\n\n".to_string(), // trigger the "[definition]" links } @@ -506,11 +502,32 @@ fn build_static(cx: &DocContext, tcx: &ty::ctxt, /// its associated types as well. We specifically move these clauses to the /// associated types instead when displaying, so when we're genering the /// generics for the trait itself we need to be sure to remove them. +/// We also need to remove the implied "recursive" Self: Trait bound. /// /// The inverse of this filtering logic can be found in the `Clean` /// implementation for `AssociatedType` fn filter_non_trait_generics(trait_did: DefId, mut g: clean::Generics) -> clean::Generics { + for pred in &mut g.where_predicates { + match *pred { + clean::WherePredicate::BoundPredicate { + ty: clean::Generic(ref s), + ref mut bounds + } if *s == "Self" => { + bounds.retain(|bound| { + match *bound { + clean::TyParamBound::TraitBound(clean::PolyTrait { + trait_: clean::ResolvedPath { did, .. }, + .. + }, _) => did != trait_did, + _ => true + } + }); + } + _ => {} + } + } + g.where_predicates.retain(|pred| { match *pred { clean::WherePredicate::BoundPredicate { @@ -518,12 +535,12 @@ fn filter_non_trait_generics(trait_did: DefId, mut g: clean::Generics) self_type: box clean::Generic(ref s), trait_: box clean::ResolvedPath { did, .. }, name: ref _name, - }, .. - } => *s != "Self" || did != trait_did, + }, ref bounds + } => !(*s == "Self" && did == trait_did) && !bounds.is_empty(), _ => true, } }); - return g; + g } /// Supertrait bounds for a trait are also listed in the generics coming from diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs index d2a5fd457d2fb..bc8472bb6b760 100644 --- a/src/librustdoc/clean/mod.rs +++ b/src/librustdoc/clean/mod.rs @@ -12,61 +12,56 @@ //! that clean them. pub use self::Type::*; -pub use self::PrimitiveType::*; -pub use self::TypeKind::*; -pub use self::StructField::*; -pub use self::VariantKind::*; pub use self::Mutability::*; -pub use self::Import::*; pub use self::ItemEnum::*; -pub use self::Attribute::*; pub use self::TyParamBound::*; pub use self::SelfTy::*; pub use self::FunctionRetTy::*; +pub use self::Visibility::*; -use syntax; -use syntax::abi; +use syntax::abi::Abi; use syntax::ast; use syntax::attr; -use syntax::attr::{AttributeMethods, AttrMetaMethods}; -use syntax::codemap; -use syntax::codemap::{DUMMY_SP, Pos, Spanned}; -use syntax::parse::token::{self, InternedString, special_idents}; +use syntax::codemap::Spanned; use syntax::ptr::P; - -use rustc_trans::back::link; -use rustc::middle::cstore::{self, CrateStore}; -use rustc::middle::def; -use rustc::middle::def_id::{DefId, DefIndex}; -use rustc::middle::subst::{self, ParamSpace, VecPerParamSpace}; -use rustc::middle::ty; +use syntax::symbol::keywords; +use syntax_pos::{self, DUMMY_SP, Pos}; + +use rustc::middle::privacy::AccessLevels; +use rustc::middle::resolve_lifetime::DefRegion::*; +use rustc::middle::lang_items; +use rustc::hir::def::{Def, CtorKind}; +use rustc::hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE}; +use rustc::hir::print as pprust; +use rustc::ty::subst::Substs; +use rustc::ty::{self, AdtKind}; use rustc::middle::stability; +use rustc::util::nodemap::{FxHashMap, FxHashSet}; -use rustc_front::hir; +use rustc::hir; -use std::collections::HashMap; use std::path::PathBuf; use std::rc::Rc; +use std::slice; +use std::sync::Arc; use std::u32; +use std::mem; use core::DocContext; use doctree; use visit_ast; +use html::item_type::ItemType; -/// A stable identifier to the particular version of JSON output. -/// Increment this when the `Crate` and related structures change. -pub const SCHEMA_VERSION: &'static str = "0.8.3"; - -mod inline; +pub mod inline; mod simplify; // extract the stability index for a node from tcx, if possible fn get_stability(cx: &DocContext, def_id: DefId) -> Option { - cx.tcx_opt().and_then(|tcx| stability::lookup_stability(tcx, def_id)).clean(cx) + cx.tcx.lookup_stability(def_id).clean(cx) } fn get_deprecation(cx: &DocContext, def_id: DefId) -> Option { - cx.tcx_opt().and_then(|tcx| stability::lookup_deprecation(tcx, def_id)).clean(cx) + cx.tcx.lookup_deprecation(def_id).clean(cx) } pub trait Clean { @@ -79,12 +74,6 @@ impl, U> Clean> for [T] { } } -impl, U> Clean> for VecPerParamSpace { - fn clean(&self, cx: &DocContext) -> VecPerParamSpace { - self.map(|x| x.clean(cx)) - } -} - impl, U> Clean for P { fn clean(&self, cx: &DocContext) -> U { (**self).clean(cx) @@ -99,10 +88,7 @@ impl, U> Clean for Rc { impl, U> Clean> for Option { fn clean(&self, cx: &DocContext) -> Option { - match self { - &None => None, - &Some(ref v) => Some(v.clean(cx)) - } + self.as_ref().map(|v| v.clean(cx)) } } @@ -118,101 +104,72 @@ impl, U> Clean> for P<[T]> { } } -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +#[derive(Clone, Debug)] pub struct Crate { pub name: String, pub src: PathBuf, pub module: Option, - pub externs: Vec<(ast::CrateNum, ExternalCrate)>, - pub primitives: Vec, - pub external_traits: HashMap, + pub externs: Vec<(CrateNum, ExternalCrate)>, + pub primitives: Vec<(DefId, PrimitiveType, Attributes)>, + pub access_levels: Arc>, + // These are later on moved into `CACHEKEY`, leaving the map empty. + // Only here so that they can be filtered through the rustdoc passes. + pub external_traits: FxHashMap, } -struct CrateNum(ast::CrateNum); - impl<'a, 'tcx> Clean for visit_ast::RustdocVisitor<'a, 'tcx> { fn clean(&self, cx: &DocContext) -> Crate { - use rustc::session::config::Input; + use ::visit_lib::LibEmbargoVisitor; - if let Some(t) = cx.tcx_opt() { - cx.deref_trait_did.set(t.lang_items.deref_trait()); + { + let mut r = cx.renderinfo.borrow_mut(); + r.deref_trait_did = cx.tcx.lang_items.deref_trait(); + r.deref_mut_trait_did = cx.tcx.lang_items.deref_mut_trait(); } let mut externs = Vec::new(); for cnum in cx.sess().cstore.crates() { - externs.push((cnum, CrateNum(cnum).clean(cx))); + externs.push((cnum, cnum.clean(cx))); + // Analyze doc-reachability for extern items + LibEmbargoVisitor::new(cx).visit_lib(cnum); } externs.sort_by(|&(a, _), &(b, _)| a.cmp(&b)); - // Figure out the name of this crate - let input = &cx.input; - let name = link::find_crate_name(None, &self.attrs, input); - // Clean the crate, translating the entire libsyntax AST to one that is // understood by rustdoc. let mut module = self.module.clean(cx); - // Collect all inner modules which are tagged as implementations of - // primitives. - // - // Note that this loop only searches the top-level items of the crate, - // and this is intentional. If we were to search the entire crate for an - // item tagged with `#[doc(primitive)]` then we would also have to - // search the entirety of external modules for items tagged - // `#[doc(primitive)]`, which is a pretty inefficient process (decoding - // all that metadata unconditionally). - // - // In order to keep the metadata load under control, the - // `#[doc(primitive)]` feature is explicitly designed to only allow the - // primitive tags to show up as the top level items in a crate. - // - // Also note that this does not attempt to deal with modules tagged - // duplicately for the same primitive. This is handled later on when - // rendering by delegating everything to a hash map. - let mut primitives = Vec::new(); + let ExternalCrate { name, src, primitives, .. } = LOCAL_CRATE.clean(cx); { let m = match module.inner { ModuleItem(ref mut m) => m, _ => unreachable!(), }; - let mut tmp = Vec::new(); - for child in &mut m.items { - match child.inner { - ModuleItem(..) => {} - _ => continue, - } - let prim = match PrimitiveType::find(&child.attrs) { - Some(prim) => prim, - None => continue, - }; - primitives.push(prim); - tmp.push(Item { + m.items.extend(primitives.iter().map(|&(def_id, prim, ref attrs)| { + Item { source: Span::empty(), name: Some(prim.to_url_str().to_string()), - attrs: child.attrs.clone(), - visibility: Some(hir::Public), + attrs: attrs.clone(), + visibility: Some(Public), stability: None, deprecation: None, - def_id: DefId::local(prim.to_def_index()), + def_id: def_id, inner: PrimitiveItem(prim), - }); - } - m.items.extend(tmp); + } + })); } - let src = match cx.input { - Input::File(ref path) => path.clone(), - Input::Str(_) => PathBuf::new() // FIXME: this is wrong - }; + let mut access_levels = cx.access_levels.borrow_mut(); + let mut external_traits = cx.external_traits.borrow_mut(); Crate { - name: name.to_string(), + name: name, src: src, module: Some(module), externs: externs, primitives: primitives, - external_traits: cx.external_traits.borrow_mut().take() - .unwrap_or(HashMap::new()), + access_levels: Arc::new(mem::replace(&mut access_levels, Default::default())), + external_traits: mem::replace(&mut external_traits, Default::default()), } } } @@ -220,26 +177,78 @@ impl<'a, 'tcx> Clean for visit_ast::RustdocVisitor<'a, 'tcx> { #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub struct ExternalCrate { pub name: String, - pub attrs: Vec, - pub primitives: Vec, + pub src: PathBuf, + pub attrs: Attributes, + pub primitives: Vec<(DefId, PrimitiveType, Attributes)>, } impl Clean for CrateNum { fn clean(&self, cx: &DocContext) -> ExternalCrate { - let mut primitives = Vec::new(); - cx.tcx_opt().map(|tcx| { - for item in tcx.sess.cstore.crate_top_level_items(self.0) { - let did = match item.def { - cstore::DlDef(def::DefMod(did)) => did, - _ => continue - }; - let attrs = inline::load_attrs(cx, tcx, did); - PrimitiveType::find(&attrs).map(|prim| primitives.push(prim)); + let root = DefId { krate: *self, index: CRATE_DEF_INDEX }; + let krate_span = cx.tcx.def_span(root); + let krate_src = cx.sess().codemap().span_to_filename(krate_span); + + // Collect all inner modules which are tagged as implementations of + // primitives. + // + // Note that this loop only searches the top-level items of the crate, + // and this is intentional. If we were to search the entire crate for an + // item tagged with `#[doc(primitive)]` then we would also have to + // search the entirety of external modules for items tagged + // `#[doc(primitive)]`, which is a pretty inefficient process (decoding + // all that metadata unconditionally). + // + // In order to keep the metadata load under control, the + // `#[doc(primitive)]` feature is explicitly designed to only allow the + // primitive tags to show up as the top level items in a crate. + // + // Also note that this does not attempt to deal with modules tagged + // duplicately for the same primitive. This is handled later on when + // rendering by delegating everything to a hash map. + let as_primitive = |def: Def| { + if let Def::Mod(def_id) = def { + let attrs = cx.tcx.get_attrs(def_id).clean(cx); + let mut prim = None; + for attr in attrs.lists("doc") { + if let Some(v) = attr.value_str() { + if attr.check_name("primitive") { + prim = PrimitiveType::from_str(&v.as_str()); + if prim.is_some() { + break; + } + } + } + } + return prim.map(|p| (def_id, p, attrs)); } - }); + None + }; + let primitives = if root.is_local() { + cx.tcx.map.krate().module.item_ids.iter().filter_map(|&id| { + let item = cx.tcx.map.expect_item(id.id); + match item.node { + hir::ItemMod(_) => { + as_primitive(Def::Mod(cx.tcx.map.local_def_id(id.id))) + } + hir::ItemUse(ref path, hir::UseKind::Single) + if item.vis == hir::Visibility::Public => { + as_primitive(path.def).map(|(_, prim, attrs)| { + // Pretend the primitive is local. + (cx.tcx.map.local_def_id(id.id), prim, attrs) + }) + } + _ => None + } + }).collect() + } else { + cx.tcx.sess.cstore.item_children(root).iter().map(|item| item.def) + .filter_map(as_primitive).collect() + }; + ExternalCrate { - name: cx.sess().cstore.crate_name(self.0), - attrs: cx.sess().cstore.crate_attrs(self.0).clean(cx), + name: cx.tcx.crate_name(*self).to_string(), + src: PathBuf::from(krate_src), + attrs: cx.tcx.get_attrs(root).clean(cx), primitives: primitives, } } @@ -254,7 +263,7 @@ pub struct Item { pub source: Span, /// Not everything has a name. E.g., impls pub name: Option, - pub attrs: Vec , + pub attrs: Attributes, pub inner: ItemEnum, pub visibility: Option, pub def_id: DefId, @@ -263,81 +272,82 @@ pub struct Item { } impl Item { - /// Finds the `doc` attribute as a List and returns the list of attributes - /// nested inside. - pub fn doc_list<'a>(&'a self) -> Option<&'a [Attribute]> { - for attr in &self.attrs { - match *attr { - List(ref x, ref list) if "doc" == *x => { - return Some(list); - } - _ => {} - } - } - return None; - } - /// Finds the `doc` attribute as a NameValue and returns the corresponding /// value found. pub fn doc_value<'a>(&'a self) -> Option<&'a str> { - for attr in &self.attrs { - match *attr { - NameValue(ref x, ref v) if "doc" == *x => { - return Some(v); - } - _ => {} - } - } - return None; + self.attrs.doc_value() } - - pub fn is_hidden_from_doc(&self) -> bool { - match self.doc_list() { - Some(l) => { - for innerattr in l { - match *innerattr { - Word(ref s) if "hidden" == *s => { - return true - } - _ => (), - } - } - }, - None => () + pub fn is_crate(&self) -> bool { + match self.inner { + StrippedItem(box ModuleItem(Module { is_crate: true, ..})) | + ModuleItem(Module { is_crate: true, ..}) => true, + _ => false, } - return false; } - pub fn is_mod(&self) -> bool { - match self.inner { ModuleItem(..) => true, _ => false } + self.type_() == ItemType::Module } pub fn is_trait(&self) -> bool { - match self.inner { TraitItem(..) => true, _ => false } + self.type_() == ItemType::Trait } pub fn is_struct(&self) -> bool { - match self.inner { StructItem(..) => true, _ => false } + self.type_() == ItemType::Struct } pub fn is_enum(&self) -> bool { - match self.inner { EnumItem(..) => true, _ => false } + self.type_() == ItemType::Module } pub fn is_fn(&self) -> bool { - match self.inner { FunctionItem(..) => true, _ => false } + self.type_() == ItemType::Function + } + pub fn is_associated_type(&self) -> bool { + self.type_() == ItemType::AssociatedType + } + pub fn is_associated_const(&self) -> bool { + self.type_() == ItemType::AssociatedConst + } + pub fn is_method(&self) -> bool { + self.type_() == ItemType::Method + } + pub fn is_ty_method(&self) -> bool { + self.type_() == ItemType::TyMethod + } + pub fn is_primitive(&self) -> bool { + self.type_() == ItemType::Primitive + } + pub fn is_stripped(&self) -> bool { + match self.inner { StrippedItem(..) => true, _ => false } + } + pub fn has_stripped_fields(&self) -> Option { + match self.inner { + StructItem(ref _struct) => Some(_struct.fields_stripped), + UnionItem(ref union) => Some(union.fields_stripped), + VariantItem(Variant { kind: VariantKind::Struct(ref vstruct)} ) => { + Some(vstruct.fields_stripped) + }, + _ => None, + } } pub fn stability_class(&self) -> String { - match self.stability { - Some(ref s) => { - let mut base = match s.level { - stability::Unstable => "unstable".to_string(), - stability::Stable => String::new(), - }; - if !s.deprecated_since.is_empty() { - base.push_str(" deprecated"); - } - base + self.stability.as_ref().map(|ref s| { + let mut base = match s.level { + stability::Unstable => "unstable".to_string(), + stability::Stable => String::new(), + }; + if !s.deprecated_since.is_empty() { + base.push_str(" deprecated"); } - _ => String::new(), - } + base + }).unwrap_or(String::new()) + } + + pub fn stable_since(&self) -> Option<&str> { + self.stability.as_ref().map(|s| &s.since[..]) + } + + /// Returns a documentation-level item type from the item. + pub fn type_(&self) -> ItemType { + ItemType::from(self) } } @@ -346,6 +356,7 @@ pub enum ItemEnum { ExternCrateItem(String, Option), ImportItem(Import), StructItem(Struct), + UnionItem(Union), EnumItem(Enum), FunctionItem(Function), ModuleItem(Module), @@ -359,7 +370,7 @@ pub enum ItemEnum { TyMethodItem(TyMethod), /// A method with a body. MethodItem(Method), - StructFieldItem(StructField), + StructFieldItem(Type), VariantItem(Variant), /// `fn`s from an extern block ForeignFunctionItem(Function), @@ -370,6 +381,25 @@ pub enum ItemEnum { AssociatedConstItem(Type, Option), AssociatedTypeItem(Vec, Option), DefaultImplItem(DefaultImpl), + /// An item that has been stripped by a rustdoc pass + StrippedItem(Box), +} + +impl ItemEnum { + pub fn generics(&self) -> Option<&Generics> { + Some(match *self { + ItemEnum::StructItem(ref s) => &s.generics, + ItemEnum::EnumItem(ref e) => &e.generics, + ItemEnum::FunctionItem(ref f) => &f.generics, + ItemEnum::TypedefItem(ref t, _) => &t.generics, + ItemEnum::TraitItem(ref t) => &t.generics, + ItemEnum::ImplItem(ref i) => &i.generics, + ItemEnum::TyMethodItem(ref i) => &i.generics, + ItemEnum::MethodItem(ref i) => &i.generics, + ItemEnum::ForeignFunctionItem(ref f) => &f.generics, + _ => return None, + }) + } } #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] @@ -390,6 +420,7 @@ impl Clean for doctree::Module { items.extend(self.extern_crates.iter().map(|x| x.clean(cx))); items.extend(self.imports.iter().flat_map(|x| x.clean(cx))); items.extend(self.structs.iter().map(|x| x.clean(cx))); + items.extend(self.unions.iter().map(|x| x.clean(cx))); items.extend(self.enums.iter().map(|x| x.clean(cx))); items.extend(self.fns.iter().map(|x| x.clean(cx))); items.extend(self.foreigns.iter().flat_map(|x| x.clean(cx))); @@ -424,7 +455,7 @@ impl Clean for doctree::Module { visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), - def_id: cx.map.local_def_id(self.id), + def_id: cx.tcx.map.local_def_id(self.id), inner: ModuleItem(Module { is_crate: self.is_crate, items: items @@ -433,59 +464,105 @@ impl Clean for doctree::Module { } } -#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug)] -pub enum Attribute { - Word(String), - List(String, Vec ), - NameValue(String, String) +pub struct ListAttributesIter<'a> { + attrs: slice::Iter<'a, ast::Attribute>, + current_list: slice::Iter<'a, ast::NestedMetaItem>, + name: &'a str } -impl Clean for ast::MetaItem { - fn clean(&self, cx: &DocContext) -> Attribute { - match self.node { - ast::MetaWord(ref s) => Word(s.to_string()), - ast::MetaList(ref s, ref l) => { - List(s.to_string(), l.clean(cx)) - } - ast::MetaNameValue(ref s, ref v) => { - NameValue(s.to_string(), lit_to_string(v)) +impl<'a> Iterator for ListAttributesIter<'a> { + type Item = &'a ast::NestedMetaItem; + + fn next(&mut self) -> Option { + if let Some(nested) = self.current_list.next() { + return Some(nested); + } + + for attr in &mut self.attrs { + if let Some(ref list) = attr.meta_item_list() { + if attr.check_name(self.name) { + self.current_list = list.iter(); + if let Some(nested) = self.current_list.next() { + return Some(nested); + } + } } } + + None } } -impl Clean for ast::Attribute { - fn clean(&self, cx: &DocContext) -> Attribute { - self.with_desugared_doc(|a| a.node.value.clean(cx)) - } +pub trait AttributesExt { + /// Finds an attribute as List and returns the list of attributes nested inside. + fn lists<'a>(&'a self, &'a str) -> ListAttributesIter<'a>; } -// This is a rough approximation that gets us what we want. -impl attr::AttrMetaMethods for Attribute { - fn name(&self) -> InternedString { - match *self { - Word(ref n) | List(ref n, _) | NameValue(ref n, _) => { - token::intern_and_get_ident(n) - } +impl AttributesExt for [ast::Attribute] { + fn lists<'a>(&'a self, name: &'a str) -> ListAttributesIter<'a> { + ListAttributesIter { + attrs: self.iter(), + current_list: [].iter(), + name: name } } +} - fn value_str(&self) -> Option { - match *self { - NameValue(_, ref v) => { - Some(token::intern_and_get_ident(v)) - } - _ => None, +pub trait NestedAttributesExt { + /// Returns whether the attribute list contains a specific `Word` + fn has_word(self, &str) -> bool; +} + +impl<'a, I: IntoIterator> NestedAttributesExt for I { + fn has_word(self, word: &str) -> bool { + self.into_iter().any(|attr| attr.is_word() && attr.check_name(word)) + } +} + +#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug, Default)] +pub struct Attributes { + pub doc_strings: Vec, + pub other_attrs: Vec +} + +impl Attributes { + pub fn from_ast(attrs: &[ast::Attribute]) -> Attributes { + let mut doc_strings = vec![]; + let other_attrs = attrs.iter().filter_map(|attr| { + attr.with_desugared_doc(|attr| { + if let Some(value) = attr.value_str() { + if attr.check_name("doc") { + doc_strings.push(value.to_string()); + return None; + } + } + + Some(attr.clone()) + }) + }).collect(); + Attributes { + doc_strings: doc_strings, + other_attrs: other_attrs } } - fn meta_item_list<'a>(&'a self) -> Option<&'a [P]> { None } - fn span(&self) -> codemap::Span { unimplemented!() } + + /// Finds the `doc` attribute as a NameValue and returns the corresponding + /// value found. + pub fn doc_value<'a>(&'a self) -> Option<&'a str> { + self.doc_strings.first().map(|s| &s[..]) + } } -impl<'a> attr::AttrMetaMethods for &'a Attribute { - fn name(&self) -> InternedString { (**self).name() } - fn value_str(&self) -> Option { (**self).value_str() } - fn meta_item_list(&self) -> Option<&[P]> { None } - fn span(&self) -> codemap::Span { unimplemented!() } + +impl AttributesExt for Attributes { + fn lists<'a>(&'a self, name: &'a str) -> ListAttributesIter<'a> { + self.other_attrs.lists(name) + } +} + +impl Clean for [ast::Attribute] { + fn clean(&self, _cx: &DocContext) -> Attributes { + Attributes::from_ast(self) + } } #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug)] @@ -500,7 +577,7 @@ impl Clean for hir::TyParam { fn clean(&self, cx: &DocContext) -> TyParam { TyParam { name: self.name.clean(cx), - did: cx.map.local_def_id(self.id), + did: cx.tcx.map.local_def_id(self.id), bounds: self.bounds.clean(cx), default: self.default.clean(cx), } @@ -509,8 +586,7 @@ impl Clean for hir::TyParam { impl<'tcx> Clean for ty::TypeParameterDef<'tcx> { fn clean(&self, cx: &DocContext) -> TyParam { - cx.external_typarams.borrow_mut().as_mut().unwrap() - .insert(self.def_id, self.name.clean(cx)); + cx.renderinfo.borrow_mut().external_typarams.insert(self.def_id, self.name.clean(cx)); TyParam { name: self.name.clean(cx), did: self.def_id, @@ -528,27 +604,27 @@ pub enum TyParamBound { impl TyParamBound { fn maybe_sized(cx: &DocContext) -> TyParamBound { - use rustc_front::hir::TraitBoundModifier as TBM; - let mut sized_bound = ty::BoundSized.clean(cx); - if let TyParamBound::TraitBound(_, ref mut tbm) = sized_bound { - *tbm = TBM::Maybe - }; - sized_bound + let did = cx.tcx.require_lang_item(lang_items::SizedTraitLangItem); + let empty = cx.tcx.intern_substs(&[]); + let path = external_path(cx, &cx.tcx.item_name(did).as_str(), + Some(did), false, vec![], empty); + inline::record_extern_fqn(cx, did, TypeKind::Trait); + TraitBound(PolyTrait { + trait_: ResolvedPath { + path: path, + typarams: None, + did: did, + is_generic: false, + }, + lifetimes: vec![] + }, hir::TraitBoundModifier::Maybe) } fn is_sized_bound(&self, cx: &DocContext) -> bool { - use rustc_front::hir::TraitBoundModifier as TBM; - if let Some(tcx) = cx.tcx_opt() { - let sized_did = match tcx.lang_items.sized_trait() { - Some(did) => did, - None => return false - }; - if let TyParamBound::TraitBound(PolyTrait { - trait_: Type::ResolvedPath { did, .. }, .. - }, TBM::None) = *self { - if did == sized_did { - return true - } + use rustc::hir::TraitBoundModifier as TBM; + if let TyParamBound::TraitBound(PolyTrait { ref trait_, .. }, TBM::None) = *self { + if trait_.def_id() == cx.tcx.lang_items.sized_trait() { + return true; } } false @@ -564,37 +640,14 @@ impl Clean for hir::TyParamBound { } } -impl<'tcx> Clean<(Vec, Vec)> for ty::ExistentialBounds<'tcx> { - fn clean(&self, cx: &DocContext) -> (Vec, Vec) { - let mut tp_bounds = vec![]; - self.region_bound.clean(cx).map(|b| tp_bounds.push(RegionBound(b))); - for bb in &self.builtin_bounds { - tp_bounds.push(bb.clean(cx)); - } - - let mut bindings = vec![]; - for &ty::Binder(ref pb) in &self.projection_bounds { - bindings.push(TypeBinding { - name: pb.projection_ty.item_name.clean(cx), - ty: pb.ty.clean(cx) - }); - } - - (tp_bounds, bindings) - } -} - -fn external_path_params(cx: &DocContext, trait_did: Option, - bindings: Vec, substs: &subst::Substs) -> PathParameters { - let lifetimes = substs.regions().get_slice(subst::TypeSpace) - .iter() - .filter_map(|v| v.clean(cx)) - .collect(); - let types = substs.types.get_slice(subst::TypeSpace).to_vec(); +fn external_path_params(cx: &DocContext, trait_did: Option, has_self: bool, + bindings: Vec, substs: &Substs) -> PathParameters { + let lifetimes = substs.regions().filter_map(|v| v.clean(cx)).collect(); + let types = substs.types().skip(has_self as usize).collect::>(); - match (trait_did, cx.tcx_opt()) { + match trait_did { // Attempt to sugar an external path like Fn<(A, B,), C> to Fn(A, B) -> C - (Some(did), Some(ref tcx)) if tcx.lang_items.fn_trait_kind(did).is_some() => { + Some(did) if cx.tcx.lang_items.fn_trait_kind(did).is_some() => { assert_eq!(types.len(), 1); let inputs = match types[0].sty { ty::TyTuple(ref tys) => tys.iter().map(|t| t.clean(cx)).collect(), @@ -617,7 +670,7 @@ fn external_path_params(cx: &DocContext, trait_did: Option, output: output } }, - (_, _) => { + _ => { PathParameters::AngleBracketed { lifetimes: lifetimes, types: types.clean(cx), @@ -629,74 +682,36 @@ fn external_path_params(cx: &DocContext, trait_did: Option, // trait_did should be set to a trait's DefId if called on a TraitRef, in order to sugar // from Fn<(A, B,), C> to Fn(A, B) -> C -fn external_path(cx: &DocContext, name: &str, trait_did: Option, - bindings: Vec, substs: &subst::Substs) -> Path { +fn external_path(cx: &DocContext, name: &str, trait_did: Option, has_self: bool, + bindings: Vec, substs: &Substs) -> Path { Path { global: false, + def: Def::Err, segments: vec![PathSegment { name: name.to_string(), - params: external_path_params(cx, trait_did, bindings, substs) + params: external_path_params(cx, trait_did, has_self, bindings, substs) }], } } -impl Clean for ty::BuiltinBound { - fn clean(&self, cx: &DocContext) -> TyParamBound { - let tcx = match cx.tcx_opt() { - Some(tcx) => tcx, - None => return RegionBound(Lifetime::statik()) - }; - let empty = subst::Substs::empty(); - let (did, path) = match *self { - ty::BoundSend => - (tcx.lang_items.send_trait().unwrap(), - external_path(cx, "Send", None, vec![], &empty)), - ty::BoundSized => - (tcx.lang_items.sized_trait().unwrap(), - external_path(cx, "Sized", None, vec![], &empty)), - ty::BoundCopy => - (tcx.lang_items.copy_trait().unwrap(), - external_path(cx, "Copy", None, vec![], &empty)), - ty::BoundSync => - (tcx.lang_items.sync_trait().unwrap(), - external_path(cx, "Sync", None, vec![], &empty)), - }; - inline::record_extern_fqn(cx, did, TypeTrait); - TraitBound(PolyTrait { - trait_: ResolvedPath { - path: path, - typarams: None, - did: did, - is_generic: false, - }, - lifetimes: vec![] - }, hir::TraitBoundModifier::None) - } -} - impl<'tcx> Clean for ty::TraitRef<'tcx> { fn clean(&self, cx: &DocContext) -> TyParamBound { - let tcx = match cx.tcx_opt() { - Some(tcx) => tcx, - None => return RegionBound(Lifetime::statik()) - }; - inline::record_extern_fqn(cx, self.def_id, TypeTrait); - let path = external_path(cx, &tcx.item_name(self.def_id).as_str(), - Some(self.def_id), vec![], self.substs); + inline::record_extern_fqn(cx, self.def_id, TypeKind::Trait); + let path = external_path(cx, &cx.tcx.item_name(self.def_id).as_str(), + Some(self.def_id), true, vec![], self.substs); - debug!("ty::TraitRef\n substs.types(TypeSpace): {:?}\n", - self.substs.types.get_slice(ParamSpace::TypeSpace)); + debug!("ty::TraitRef\n subst: {:?}\n", self.substs); // collect any late bound regions let mut late_bounds = vec![]; - for &ty_s in self.substs.types.get_slice(ParamSpace::TypeSpace) { - if let ty::TyTuple(ref ts) = ty_s.sty { + for ty_s in self.input_types().skip(1) { + if let ty::TyTuple(ts) = ty_s.sty { for &ty_s in ts { if let ty::TyRef(ref reg, _) = ty_s.sty { - if let &ty::Region::ReLateBound(_, _) = *reg { + if let &ty::Region::ReLateBound(..) = *reg { debug!(" hit an ReLateBound {:?}", reg); if let Some(lt) = reg.clean(cx) { - late_bounds.push(lt) + late_bounds.push(lt); } } } @@ -704,23 +719,27 @@ impl<'tcx> Clean for ty::TraitRef<'tcx> { } } - TraitBound(PolyTrait { - trait_: ResolvedPath { - path: path, - typarams: None, - did: self.def_id, - is_generic: false, + TraitBound( + PolyTrait { + trait_: ResolvedPath { + path: path, + typarams: None, + did: self.def_id, + is_generic: false, + }, + lifetimes: late_bounds, }, - lifetimes: late_bounds - }, hir::TraitBoundModifier::None) + hir::TraitBoundModifier::None + ) } } -impl<'tcx> Clean>> for subst::Substs<'tcx> { +impl<'tcx> Clean>> for Substs<'tcx> { fn clean(&self, cx: &DocContext) -> Option> { let mut v = Vec::new(); - v.extend(self.regions().iter().filter_map(|r| r.clean(cx)).map(RegionBound)); - v.extend(self.types.iter().map(|t| TraitBound(PolyTrait { + v.extend(self.regions().filter_map(|r| r.clean(cx)) + .map(RegionBound)); + v.extend(self.types().map(|t| TraitBound(PolyTrait { trait_: t.clean(cx), lifetimes: vec![] }, hir::TraitBoundModifier::None))); @@ -735,7 +754,7 @@ impl Lifetime { pub fn get_ref<'a>(&'a self) -> &'a str { let Lifetime(ref s) = *self; let s: &'a str = s; - return s; + s } pub fn statik() -> Lifetime { @@ -744,18 +763,39 @@ impl Lifetime { } impl Clean for hir::Lifetime { - fn clean(&self, _: &DocContext) -> Lifetime { + fn clean(&self, cx: &DocContext) -> Lifetime { + let def = cx.tcx.named_region_map.defs.get(&self.id).cloned(); + match def { + Some(DefEarlyBoundRegion(_, node_id)) | + Some(DefLateBoundRegion(_, node_id)) | + Some(DefFreeRegion(_, node_id)) => { + if let Some(lt) = cx.lt_substs.borrow().get(&node_id).cloned() { + return lt; + } + } + _ => {} + } Lifetime(self.name.to_string()) } } impl Clean for hir::LifetimeDef { fn clean(&self, _: &DocContext) -> Lifetime { - Lifetime(self.lifetime.name.to_string()) + if self.bounds.len() > 0 { + let mut s = format!("{}: {}", + self.lifetime.name.to_string(), + self.bounds[0].name.to_string()); + for bound in self.bounds.iter().skip(1) { + s.push_str(&format!(" + {}", bound.name.to_string())); + } + Lifetime(s) + } else { + Lifetime(self.lifetime.name.to_string()) + } } } -impl Clean for ty::RegionParameterDef { +impl<'tcx> Clean for ty::RegionParameterDef<'tcx> { fn clean(&self, _: &DocContext) -> Lifetime { Lifetime(self.name.to_string()) } @@ -765,8 +805,7 @@ impl Clean> for ty::Region { fn clean(&self, cx: &DocContext) -> Option { match *self { ty::ReStatic => Some(Lifetime::statik()), - ty::ReLateBound(_, ty::BrNamed(_, name)) => - Some(Lifetime(name.to_string())), + ty::ReLateBound(_, ty::BrNamed(_, name, _)) => Some(Lifetime(name.to_string())), ty::ReEarlyBound(ref data) => Some(Lifetime(data.name.clean(cx))), ty::ReLateBound(..) | @@ -774,7 +813,8 @@ impl Clean> for ty::Region { ty::ReScope(..) | ty::ReVar(..) | ty::ReSkolemized(..) | - ty::ReEmpty => None + ty::ReEmpty | + ty::ReErased => None } } } @@ -812,7 +852,7 @@ impl Clean for hir::WherePredicate { impl<'a> Clean for ty::Predicate<'a> { fn clean(&self, cx: &DocContext) -> WherePredicate { - use rustc::middle::ty::Predicate; + use rustc::ty::Predicate; match *self { Predicate::Trait(ref pred) => pred.clean(cx), @@ -822,6 +862,7 @@ impl<'a> Clean for ty::Predicate<'a> { Predicate::Projection(ref pred) => pred.clean(cx), Predicate::WellFormed(_) => panic!("not user writable"), Predicate::ObjectSafe(_) => panic!("not user writable"), + Predicate::ClosureKind(..) => panic!("not user writable"), } } } @@ -829,7 +870,7 @@ impl<'a> Clean for ty::Predicate<'a> { impl<'a> Clean for ty::TraitPredicate<'a> { fn clean(&self, cx: &DocContext) -> WherePredicate { WherePredicate::BoundPredicate { - ty: self.trait_ref.substs.self_ty().clean(cx).unwrap(), + ty: self.trait_ref.self_ty().clean(cx), bounds: vec![self.trait_ref.clean(cx)] } } @@ -845,7 +886,7 @@ impl<'tcx> Clean for ty::EquatePredicate<'tcx> { } } -impl Clean for ty::OutlivesPredicate { +impl<'tcx> Clean for ty::OutlivesPredicate<&'tcx ty::Region, &'tcx ty::Region> { fn clean(&self, cx: &DocContext) -> WherePredicate { let ty::OutlivesPredicate(ref a, ref b) = *self; WherePredicate::RegionPredicate { @@ -855,7 +896,7 @@ impl Clean for ty::OutlivesPredicate { } } -impl<'tcx> Clean for ty::OutlivesPredicate, ty::Region> { +impl<'tcx> Clean for ty::OutlivesPredicate, &'tcx ty::Region> { fn clean(&self, cx: &DocContext) -> WherePredicate { let ty::OutlivesPredicate(ref ty, ref lt) = *self; @@ -910,28 +951,30 @@ impl Clean for hir::Generics { } impl<'a, 'tcx> Clean for (&'a ty::Generics<'tcx>, - &'a ty::GenericPredicates<'tcx>, - subst::ParamSpace) { + &'a ty::GenericPredicates<'tcx>) { fn clean(&self, cx: &DocContext) -> Generics { - use std::collections::HashSet; use self::WherePredicate as WP; - let (gens, preds, space) = *self; + let (gens, preds) = *self; // Bounds in the type_params and lifetimes fields are repeated in the // predicates field (see rustc_typeck::collect::ty_generics), so remove // them. - let stripped_typarams = gens.types.get_slice(space).iter().map(|tp| { - tp.clean(cx) + let stripped_typarams = gens.types.iter().filter_map(|tp| { + if tp.name == keywords::SelfType.name() { + assert_eq!(tp.index, 0); + None + } else { + Some(tp.clean(cx)) + } }).collect::>(); - let stripped_lifetimes = gens.regions.get_slice(space).iter().map(|rp| { + let stripped_lifetimes = gens.regions.iter().map(|rp| { let mut srp = rp.clone(); srp.bounds = Vec::new(); srp.clean(cx) }).collect::>(); - let mut where_predicates = preds.predicates.get_slice(space) - .to_vec().clean(cx); + let mut where_predicates = preds.predicates.to_vec().clean(cx); // Type parameters and have a Sized bound by default unless removed with // ?Sized. Scan through the predicates and mark any type parameter with @@ -940,7 +983,7 @@ impl<'a, 'tcx> Clean for (&'a ty::Generics<'tcx>, // Note that associated types also have a sized bound by default, but we // don't actually know the set of associated types right here so that's // handled in cleaning associated types - let mut sized_params = HashSet::new(); + let mut sized_params = FxHashSet(); where_predicates.retain(|pred| { match *pred { WP::BoundPredicate { ty: Generic(ref g), ref bounds } => { @@ -981,31 +1024,24 @@ impl<'a, 'tcx> Clean for (&'a ty::Generics<'tcx>, #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub struct Method { pub generics: Generics, - pub self_: SelfTy, pub unsafety: hir::Unsafety, pub constness: hir::Constness, pub decl: FnDecl, - pub abi: abi::Abi + pub abi: Abi, } impl Clean for hir::MethodSig { fn clean(&self, cx: &DocContext) -> Method { - let all_inputs = &self.decl.inputs; - let inputs = match self.explicit_self.node { - hir::SelfStatic => &**all_inputs, - _ => &all_inputs[1..] - }; let decl = FnDecl { inputs: Arguments { - values: inputs.clean(cx), + values: self.decl.inputs.clean(cx), }, output: self.decl.output.clean(cx), variadic: false, - attrs: Vec::new() + attrs: Attributes::default() }; Method { generics: self.generics.clean(cx), - self_: self.explicit_self.node.clean(cx), unsafety: self.unsafety, constness: self.constness, decl: decl, @@ -1019,62 +1055,35 @@ pub struct TyMethod { pub unsafety: hir::Unsafety, pub decl: FnDecl, pub generics: Generics, - pub self_: SelfTy, - pub abi: abi::Abi + pub abi: Abi, } impl Clean for hir::MethodSig { fn clean(&self, cx: &DocContext) -> TyMethod { - let inputs = match self.explicit_self.node { - hir::SelfStatic => &*self.decl.inputs, - _ => &self.decl.inputs[1..] - }; let decl = FnDecl { inputs: Arguments { - values: inputs.clean(cx), + values: self.decl.inputs.clean(cx), }, output: self.decl.output.clean(cx), variadic: false, - attrs: Vec::new() + attrs: Attributes::default() }; TyMethod { unsafety: self.unsafety.clone(), decl: decl, - self_: self.explicit_self.node.clean(cx), generics: self.generics.clean(cx), abi: self.abi } } } -#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug)] -pub enum SelfTy { - SelfStatic, - SelfValue, - SelfBorrowed(Option, Mutability), - SelfExplicit(Type), -} - -impl Clean for hir::ExplicitSelf_ { - fn clean(&self, cx: &DocContext) -> SelfTy { - match *self { - hir::SelfStatic => SelfStatic, - hir::SelfValue(_) => SelfValue, - hir::SelfRegion(ref lt, ref mt, _) => { - SelfBorrowed(lt.clean(cx), mt.clean(cx)) - } - hir::SelfExplicit(ref typ, _) => SelfExplicit(typ.clean(cx)), - } - } -} - #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub struct Function { pub decl: FnDecl, pub generics: Generics, pub unsafety: hir::Unsafety, pub constness: hir::Constness, - pub abi: abi::Abi, + pub abi: Abi, } impl Clean for doctree::Function { @@ -1086,7 +1095,7 @@ impl Clean for doctree::Function { visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), - def_id: cx.map.local_def_id(self.id), + def_id: cx.tcx.map.local_def_id(self.id), inner: FunctionItem(Function { decl: self.decl.clean(cx), generics: self.generics.clean(cx), @@ -1103,7 +1112,17 @@ pub struct FnDecl { pub inputs: Arguments, pub output: FunctionRetTy, pub variadic: bool, - pub attrs: Vec, + pub attrs: Attributes, +} + +impl FnDecl { + pub fn has_self(&self) -> bool { + self.inputs.values.len() > 0 && self.inputs.values[0].name == "self" + } + + pub fn self_type(&self) -> Option { + self.inputs.values.get(0).and_then(|v| v.to_self()) + } } #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug)] @@ -1119,16 +1138,7 @@ impl Clean for hir::FnDecl { }, output: self.output.clean(cx), variadic: self.variadic, - attrs: Vec::new() - } - } -} - -impl<'tcx> Clean for ty::FnOutput<'tcx> { - fn clean(&self, cx: &DocContext) -> Type { - match *self { - ty::FnConverging(ty) => ty.clean(cx), - ty::FnDiverging => Bottom + attrs: Attributes::default() } } } @@ -1136,24 +1146,21 @@ impl<'tcx> Clean for ty::FnOutput<'tcx> { impl<'a, 'tcx> Clean for (DefId, &'a ty::PolyFnSig<'tcx>) { fn clean(&self, cx: &DocContext) -> FnDecl { let (did, sig) = *self; - let mut names = if let Some(_) = cx.map.as_local_node_id(did) { + let mut names = if cx.tcx.map.as_local_node_id(did).is_some() { vec![].into_iter() } else { - cx.tcx().sess.cstore.method_arg_names(did).into_iter() + cx.tcx.sess.cstore.fn_arg_names(did).into_iter() }.peekable(); - if names.peek().map(|s| &**s) == Some("self") { - let _ = names.next(); - } FnDecl { output: Return(sig.0.output.clean(cx)), - attrs: Vec::new(), + attrs: Attributes::default(), variadic: sig.0.variadic, inputs: Arguments { values: sig.0.inputs.iter().map(|t| { Argument { type_: t.clean(cx), - id: 0, - name: names.next().unwrap_or("".to_string()), + id: ast::CRATE_NODE_ID, + name: names.next().map_or("".to_string(), |name| name.to_string()), } }).collect(), }, @@ -1168,6 +1175,29 @@ pub struct Argument { pub id: ast::NodeId, } +#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug)] +pub enum SelfTy { + SelfValue, + SelfBorrowed(Option, Mutability), + SelfExplicit(Type), +} + +impl Argument { + pub fn to_self(&self) -> Option { + if self.name == "self" { + match self.type_ { + Infer => Some(SelfValue), + BorrowedRef{ref lifetime, mutability, ref type_} if **type_ == Infer => { + Some(SelfBorrowed(lifetime.clone(), mutability)) + } + _ => Some(SelfExplicit(self.type_.clone())) + } + } else { + None + } + } +} + impl Clean for hir::Arg { fn clean(&self, cx: &DocContext) -> Argument { Argument { @@ -1182,7 +1212,6 @@ impl Clean for hir::Arg { pub enum FunctionRetTy { Return(Type), DefaultReturn, - NoReturn } impl Clean for hir::FunctionRetTy { @@ -1190,7 +1219,6 @@ impl Clean for hir::FunctionRetTy { match *self { hir::Return(ref typ) => Return(typ.clean(cx)), hir::DefaultReturn(..) => DefaultReturn, - hir::NoReturn(..) => NoReturn } } } @@ -1209,7 +1237,7 @@ impl Clean for doctree::Trait { name: Some(self.name.clean(cx)), attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.map.local_def_id(self.id), + def_id: cx.tcx.map.local_def_id(self.id), visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), @@ -1243,8 +1271,7 @@ impl Clean for hir::TraitItem { let inner = match self.node { hir::ConstTraitItem(ref ty, ref default) => { AssociatedConstItem(ty.clean(cx), - default.as_ref().map(|expr| - expr.span.to_src(cx))) + default.as_ref().map(|e| pprust::expr_to_string(&e))) } hir::MethodTraitItem(ref sig, Some(_)) => { MethodItem(sig.clean(cx)) @@ -1260,10 +1287,10 @@ impl Clean for hir::TraitItem { name: Some(self.name.clean(cx)), attrs: self.attrs.clean(cx), source: self.span.clean(cx), - def_id: cx.map.local_def_id(self.id), + def_id: cx.tcx.map.local_def_id(self.id), visibility: None, - stability: get_stability(cx, cx.map.local_def_id(self.id)), - deprecation: get_deprecation(cx, cx.map.local_def_id(self.id)), + stability: get_stability(cx, cx.tcx.map.local_def_id(self.id)), + deprecation: get_deprecation(cx, cx.tcx.map.local_def_id(self.id)), inner: inner } } @@ -1273,10 +1300,8 @@ impl Clean for hir::ImplItem { fn clean(&self, cx: &DocContext) -> Item { let inner = match self.node { hir::ImplItemKind::Const(ref ty, ref expr) => { - ConstantItem(Constant{ - type_: ty.clean(cx), - expr: expr.span.to_src(cx), - }) + AssociatedConstItem(ty.clean(cx), + Some(pprust::expr_to_string(expr))) } hir::ImplItemKind::Method(ref sig, _) => { MethodItem(sig.clean(cx)) @@ -1294,99 +1319,140 @@ impl Clean for hir::ImplItem { name: Some(self.name.clean(cx)), source: self.span.clean(cx), attrs: self.attrs.clean(cx), - def_id: cx.map.local_def_id(self.id), + def_id: cx.tcx.map.local_def_id(self.id), visibility: self.vis.clean(cx), - stability: get_stability(cx, cx.map.local_def_id(self.id)), - deprecation: get_deprecation(cx, cx.map.local_def_id(self.id)), + stability: get_stability(cx, cx.tcx.map.local_def_id(self.id)), + deprecation: get_deprecation(cx, cx.tcx.map.local_def_id(self.id)), inner: inner } } } -impl<'tcx> Clean for ty::Method<'tcx> { +impl<'tcx> Clean for ty::AssociatedItem { fn clean(&self, cx: &DocContext) -> Item { - let (self_, sig) = match self.explicit_self { - ty::ExplicitSelfCategory::Static => (hir::SelfStatic.clean(cx), - self.fty.sig.clone()), - s => { - let sig = ty::Binder(ty::FnSig { - inputs: self.fty.sig.0.inputs[1..].to_vec(), - ..self.fty.sig.0.clone() - }); - let s = match s { - ty::ExplicitSelfCategory::ByValue => SelfValue, - ty::ExplicitSelfCategory::ByReference(..) => { - match self.fty.sig.0.inputs[0].sty { - ty::TyRef(r, mt) => { - SelfBorrowed(r.clean(cx), mt.mutbl.clean(cx)) + let inner = match self.kind { + ty::AssociatedKind::Const => { + let ty = cx.tcx.item_type(self.def_id); + AssociatedConstItem(ty.clean(cx), None) + } + ty::AssociatedKind::Method => { + let generics = (cx.tcx.item_generics(self.def_id), + &cx.tcx.item_predicates(self.def_id)).clean(cx); + let fty = match cx.tcx.item_type(self.def_id).sty { + ty::TyFnDef(_, _, f) => f, + _ => unreachable!() + }; + let mut decl = (self.def_id, &fty.sig).clean(cx); + + if self.method_has_self_argument { + let self_ty = match self.container { + ty::ImplContainer(def_id) => { + cx.tcx.item_type(def_id) + } + ty::TraitContainer(_) => cx.tcx.mk_self_type() + }; + let self_arg_ty = *fty.sig.input(0).skip_binder(); + if self_arg_ty == self_ty { + decl.inputs.values[0].type_ = Infer; + } else if let ty::TyRef(_, mt) = self_arg_ty.sty { + if mt.ty == self_ty { + match decl.inputs.values[0].type_ { + BorrowedRef{ref mut type_, ..} => **type_ = Infer, + _ => unreachable!(), } - _ => unreachable!(), } } - ty::ExplicitSelfCategory::ByBox => { - SelfExplicit(self.fty.sig.0.inputs[0].clean(cx)) - } - ty::ExplicitSelfCategory::Static => unreachable!(), + } + + let provided = match self.container { + ty::ImplContainer(_) => false, + ty::TraitContainer(_) => self.defaultness.has_value() }; - (s, sig) + if provided { + MethodItem(Method { + unsafety: fty.unsafety, + generics: generics, + decl: decl, + abi: fty.abi, + + // trait methods canot (currently, at least) be const + constness: hir::Constness::NotConst, + }) + } else { + TyMethodItem(TyMethod { + unsafety: fty.unsafety, + generics: generics, + decl: decl, + abi: fty.abi, + }) + } } - }; + ty::AssociatedKind::Type => { + let my_name = self.name.clean(cx); + + let mut bounds = if let ty::TraitContainer(did) = self.container { + // When loading a cross-crate associated type, the bounds for this type + // are actually located on the trait/impl itself, so we need to load + // all of the generics from there and then look for bounds that are + // applied to this associated type in question. + let predicates = cx.tcx.item_predicates(did); + let generics = (cx.tcx.item_generics(did), &predicates).clean(cx); + generics.where_predicates.iter().filter_map(|pred| { + let (name, self_type, trait_, bounds) = match *pred { + WherePredicate::BoundPredicate { + ty: QPath { ref name, ref self_type, ref trait_ }, + ref bounds + } => (name, self_type, trait_, bounds), + _ => return None, + }; + if *name != my_name { return None } + match **trait_ { + ResolvedPath { did, .. } if did == self.container.id() => {} + _ => return None, + } + match **self_type { + Generic(ref s) if *s == "Self" => {} + _ => return None, + } + Some(bounds) + }).flat_map(|i| i.iter().cloned()).collect::>() + } else { + vec![] + }; - let generics = (&self.generics, &self.predicates, - subst::FnSpace).clean(cx); - let decl = (self.def_id, &sig).clean(cx); - let provided = match self.container { - ty::ImplContainer(..) => false, - ty::TraitContainer(did) => { - cx.tcx().provided_trait_methods(did).iter().any(|m| { - m.def_id == self.def_id - }) + // Our Sized/?Sized bound didn't get handled when creating the generics + // because we didn't actually get our whole set of bounds until just now + // (some of them may have come from the trait). If we do have a sized + // bound, we remove it, and if we don't then we add the `?Sized` bound + // at the end. + match bounds.iter().position(|b| b.is_sized_bound(cx)) { + Some(i) => { bounds.remove(i); } + None => bounds.push(TyParamBound::maybe_sized(cx)), + } + + let ty = if self.defaultness.has_value() { + Some(cx.tcx.item_type(self.def_id)) + } else { + None + }; + + AssociatedTypeItem(bounds, ty.clean(cx)) } }; - let inner = if provided { - MethodItem(Method { - unsafety: self.fty.unsafety, - generics: generics, - self_: self_, - decl: decl, - abi: self.fty.abi, - - // trait methods canot (currently, at least) be const - constness: hir::Constness::NotConst, - }) - } else { - TyMethodItem(TyMethod { - unsafety: self.fty.unsafety, - generics: generics, - self_: self_, - decl: decl, - abi: self.fty.abi, - }) - }; Item { name: Some(self.name.clean(cx)), - visibility: Some(hir::Inherited), + visibility: Some(Inherited), stability: get_stability(cx, self.def_id), deprecation: get_deprecation(cx, self.def_id), def_id: self.def_id, - attrs: inline::load_attrs(cx, cx.tcx(), self.def_id), - source: Span::empty(), + attrs: inline::load_attrs(cx, self.def_id), + source: cx.tcx.def_span(self.def_id).clean(cx), inner: inner, } } } -impl<'tcx> Clean for ty::ImplOrTraitItem<'tcx> { - fn clean(&self, cx: &DocContext) -> Item { - match *self { - ty::ConstTraitItem(ref cti) => cti.clean(cx), - ty::MethodTraitItem(ref mti) => mti.clean(cx), - ty::TypeTraitItem(ref tti) => tti.clean(cx), - } - } -} - /// A trait reference, which may have higher ranked lifetimes. #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug)] pub struct PolyTrait { @@ -1395,7 +1461,7 @@ pub struct PolyTrait { } /// A representation of a Type suitable for hyperlinking purposes. Ideally one can get the original -/// type out of the AST/ty::ctxt given one of these, if more information is needed. Most importantly +/// type out of the AST/TyCtxt given one of these, if more information is needed. Most importantly /// it does not preserve mutability or boxes. #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug)] pub enum Type { @@ -1418,8 +1484,7 @@ pub enum Type { Tuple(Vec), Vector(Box), FixedVector(Box, String), - /// aka TyBot - Bottom, + Never, Unique(Box), RawPointer(Mutability, Box), BorrowedRef { @@ -1440,6 +1505,9 @@ pub enum Type { // for<'a> Foo(&'a) PolyTraitRef(Vec), + + // impl TraitA+TraitB + ImplTrait(Vec), } #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Eq, Hash, Copy, Debug)] @@ -1452,33 +1520,60 @@ pub enum PrimitiveType { Str, Slice, Array, - PrimitiveTuple, - PrimitiveRawPointer, + Tuple, + RawPointer, } #[derive(Clone, RustcEncodable, RustcDecodable, Copy, Debug)] pub enum TypeKind { - TypeEnum, - TypeFunction, - TypeModule, - TypeConst, - TypeStatic, - TypeStruct, - TypeTrait, - TypeVariant, - TypeTypedef, + Enum, + Function, + Module, + Const, + Static, + Struct, + Union, + Trait, + Variant, + Typedef, +} + +pub trait GetDefId { + fn def_id(&self) -> Option; +} + +impl GetDefId for Option { + fn def_id(&self) -> Option { + self.as_ref().and_then(|d| d.def_id()) + } } impl Type { pub fn primitive_type(&self) -> Option { match *self { Primitive(p) | BorrowedRef { type_: box Primitive(p), ..} => Some(p), - Vector(..) | BorrowedRef{ type_: box Vector(..), .. } => Some(Slice), + Vector(..) | BorrowedRef{ type_: box Vector(..), .. } => Some(PrimitiveType::Slice), FixedVector(..) | BorrowedRef { type_: box FixedVector(..), .. } => { - Some(Array) + Some(PrimitiveType::Array) } - Tuple(..) => Some(PrimitiveTuple), - RawPointer(..) => Some(PrimitiveRawPointer), + Tuple(..) => Some(PrimitiveType::Tuple), + RawPointer(..) => Some(PrimitiveType::RawPointer), + _ => None, + } + } + + pub fn is_generic(&self) -> bool { + match *self { + ResolvedPath { is_generic, .. } => is_generic, + _ => false, + } + } +} + +impl GetDefId for Type { + fn def_id(&self) -> Option { + match *self { + ResolvedPath { did, .. } => Some(did), _ => None, } } @@ -1487,113 +1582,187 @@ impl Type { impl PrimitiveType { fn from_str(s: &str) -> Option { match s { - "isize" => Some(Isize), - "i8" => Some(I8), - "i16" => Some(I16), - "i32" => Some(I32), - "i64" => Some(I64), - "usize" => Some(Usize), - "u8" => Some(U8), - "u16" => Some(U16), - "u32" => Some(U32), - "u64" => Some(U64), - "bool" => Some(Bool), - "char" => Some(Char), - "str" => Some(Str), - "f32" => Some(F32), - "f64" => Some(F64), - "array" => Some(Array), - "slice" => Some(Slice), - "tuple" => Some(PrimitiveTuple), - "pointer" => Some(PrimitiveRawPointer), + "isize" => Some(PrimitiveType::Isize), + "i8" => Some(PrimitiveType::I8), + "i16" => Some(PrimitiveType::I16), + "i32" => Some(PrimitiveType::I32), + "i64" => Some(PrimitiveType::I64), + "usize" => Some(PrimitiveType::Usize), + "u8" => Some(PrimitiveType::U8), + "u16" => Some(PrimitiveType::U16), + "u32" => Some(PrimitiveType::U32), + "u64" => Some(PrimitiveType::U64), + "bool" => Some(PrimitiveType::Bool), + "char" => Some(PrimitiveType::Char), + "str" => Some(PrimitiveType::Str), + "f32" => Some(PrimitiveType::F32), + "f64" => Some(PrimitiveType::F64), + "array" => Some(PrimitiveType::Array), + "slice" => Some(PrimitiveType::Slice), + "tuple" => Some(PrimitiveType::Tuple), + "pointer" => Some(PrimitiveType::RawPointer), _ => None, } } - fn find(attrs: &[Attribute]) -> Option { - for attr in attrs { - let list = match *attr { - List(ref k, ref l) if *k == "doc" => l, - _ => continue, - }; - for sub_attr in list { - let value = match *sub_attr { - NameValue(ref k, ref v) - if *k == "primitive" => v, - _ => continue, - }; - match PrimitiveType::from_str(value) { - Some(p) => return Some(p), - None => {} - } - } + pub fn as_str(&self) -> &'static str { + match *self { + PrimitiveType::Isize => "isize", + PrimitiveType::I8 => "i8", + PrimitiveType::I16 => "i16", + PrimitiveType::I32 => "i32", + PrimitiveType::I64 => "i64", + PrimitiveType::Usize => "usize", + PrimitiveType::U8 => "u8", + PrimitiveType::U16 => "u16", + PrimitiveType::U32 => "u32", + PrimitiveType::U64 => "u64", + PrimitiveType::F32 => "f32", + PrimitiveType::F64 => "f64", + PrimitiveType::Str => "str", + PrimitiveType::Bool => "bool", + PrimitiveType::Char => "char", + PrimitiveType::Array => "array", + PrimitiveType::Slice => "slice", + PrimitiveType::Tuple => "tuple", + PrimitiveType::RawPointer => "pointer", } - return None } - pub fn to_string(&self) -> &'static str { - match *self { - Isize => "isize", - I8 => "i8", - I16 => "i16", - I32 => "i32", - I64 => "i64", - Usize => "usize", - U8 => "u8", - U16 => "u16", - U32 => "u32", - U64 => "u64", - F32 => "f32", - F64 => "f64", - Str => "str", - Bool => "bool", - Char => "char", - Array => "array", - Slice => "slice", - PrimitiveTuple => "tuple", - PrimitiveRawPointer => "pointer", + pub fn to_url_str(&self) -> &'static str { + self.as_str() + } +} + +impl From for PrimitiveType { + fn from(int_ty: ast::IntTy) -> PrimitiveType { + match int_ty { + ast::IntTy::Is => PrimitiveType::Isize, + ast::IntTy::I8 => PrimitiveType::I8, + ast::IntTy::I16 => PrimitiveType::I16, + ast::IntTy::I32 => PrimitiveType::I32, + ast::IntTy::I64 => PrimitiveType::I64, } } +} - pub fn to_url_str(&self) -> &'static str { - self.to_string() +impl From for PrimitiveType { + fn from(uint_ty: ast::UintTy) -> PrimitiveType { + match uint_ty { + ast::UintTy::Us => PrimitiveType::Usize, + ast::UintTy::U8 => PrimitiveType::U8, + ast::UintTy::U16 => PrimitiveType::U16, + ast::UintTy::U32 => PrimitiveType::U32, + ast::UintTy::U64 => PrimitiveType::U64, + } } +} - /// Creates a rustdoc-specific node id for primitive types. - /// - /// These node ids are generally never used by the AST itself. - pub fn to_def_index(&self) -> DefIndex { - let x = u32::MAX - 1 - (*self as u32); - DefIndex::new(x as usize) +impl From for PrimitiveType { + fn from(float_ty: ast::FloatTy) -> PrimitiveType { + match float_ty { + ast::FloatTy::F32 => PrimitiveType::F32, + ast::FloatTy::F64 => PrimitiveType::F64, + } } } impl Clean for hir::Ty { fn clean(&self, cx: &DocContext) -> Type { - use rustc_front::hir::*; + use rustc::hir::*; match self.node { + TyNever => Never, TyPtr(ref m) => RawPointer(m.mutbl.clean(cx), box m.ty.clean(cx)), TyRptr(ref l, ref m) => BorrowedRef {lifetime: l.clean(cx), mutability: m.mutbl.clean(cx), type_: box m.ty.clean(cx)}, - TyVec(ref ty) => Vector(box ty.clean(cx)), - TyFixedLengthVec(ref ty, ref e) => FixedVector(box ty.clean(cx), - e.span.to_src(cx)), + TySlice(ref ty) => Vector(box ty.clean(cx)), + TyArray(ref ty, ref e) => { + use rustc_const_math::{ConstInt, ConstUsize}; + use rustc_const_eval::eval_const_expr; + use rustc::middle::const_val::ConstVal; + + let n = match eval_const_expr(cx.tcx, e) { + ConstVal::Integral(ConstInt::Usize(u)) => match u { + ConstUsize::Us16(u) => u.to_string(), + ConstUsize::Us32(u) => u.to_string(), + ConstUsize::Us64(u) => u.to_string(), + }, + // after type checking this can't fail + _ => unreachable!(), + }; + FixedVector(box ty.clean(cx), n) + }, TyTup(ref tys) => Tuple(tys.clean(cx)), - TyPath(None, ref p) => { - resolve_type(cx, p.clean(cx), self.id) + TyPath(hir::QPath::Resolved(None, ref path)) => { + if let Some(new_ty) = cx.ty_substs.borrow().get(&path.def).cloned() { + return new_ty; + } + + let mut alias = None; + if let Def::TyAlias(def_id) = path.def { + // Substitute private type aliases + if let Some(node_id) = cx.tcx.map.as_local_node_id(def_id) { + if !cx.access_levels.borrow().is_exported(def_id) { + alias = Some(&cx.tcx.map.expect_item(node_id).node); + } + } + }; + + if let Some(&hir::ItemTy(ref ty, ref generics)) = alias { + let provided_params = &path.segments.last().unwrap().parameters; + let mut ty_substs = FxHashMap(); + let mut lt_substs = FxHashMap(); + for (i, ty_param) in generics.ty_params.iter().enumerate() { + let ty_param_def = Def::TyParam(cx.tcx.map.local_def_id(ty_param.id)); + if let Some(ty) = provided_params.types().get(i).cloned() + .cloned() { + ty_substs.insert(ty_param_def, ty.unwrap().clean(cx)); + } else if let Some(default) = ty_param.default.clone() { + ty_substs.insert(ty_param_def, default.unwrap().clean(cx)); + } + } + for (i, lt_param) in generics.lifetimes.iter().enumerate() { + if let Some(lt) = provided_params.lifetimes().get(i).cloned() + .cloned() { + lt_substs.insert(lt_param.lifetime.id, lt.clean(cx)); + } + } + return cx.enter_alias(ty_substs, lt_substs, || ty.clean(cx)); + } + resolve_type(cx, path.clean(cx), self.id) } - TyPath(Some(ref qself), ref p) => { + TyPath(hir::QPath::Resolved(Some(ref qself), ref p)) => { let mut segments: Vec<_> = p.segments.clone().into(); segments.pop(); let trait_path = hir::Path { span: p.span, global: p.global, + def: Def::Trait(cx.tcx.associated_item(p.def.def_id()).container.id()), segments: segments.into(), }; Type::QPath { - name: p.segments.last().unwrap().identifier.name.clean(cx), - self_type: box qself.ty.clean(cx), + name: p.segments.last().unwrap().name.clean(cx), + self_type: box qself.clean(cx), + trait_: box resolve_type(cx, trait_path.clean(cx), self.id) + } + } + TyPath(hir::QPath::TypeRelative(ref qself, ref segment)) => { + let mut def = Def::Err; + if let Some(ty) = cx.hir_ty_to_ty.get(&self.id) { + if let ty::TyProjection(proj) = ty.sty { + def = Def::Trait(proj.trait_ref.def_id); + } + } + let trait_path = hir::Path { + span: self.span, + global: false, + def: def, + segments: vec![].into(), + }; + Type::QPath { + name: segment.name.clean(cx), + self_type: box qself.clean(cx), trait_: box resolve_type(cx, trait_path.clean(cx), self.id) } } @@ -1614,15 +1783,10 @@ impl Clean for hir::Ty { } } TyBareFn(ref barefn) => BareFunction(box barefn.clean(cx)), - TyPolyTraitRef(ref bounds) => { - PolyTraitRef(bounds.clean(cx)) - }, - TyInfer => { - Infer - }, - TyTypeof(..) => { - panic!("Unimplemented type {:?}", self.node) - }, + TyPolyTraitRef(ref bounds) => PolyTraitRef(bounds.clean(cx)), + TyImplTrait(ref bounds) => ImplTrait(bounds.clean(cx)), + TyInfer => Infer, + TyTypeof(..) => panic!("Unimplemented type {:?}", self.node), } } } @@ -1630,25 +1794,15 @@ impl Clean for hir::Ty { impl<'tcx> Clean for ty::Ty<'tcx> { fn clean(&self, cx: &DocContext) -> Type { match self.sty { - ty::TyBool => Primitive(Bool), - ty::TyChar => Primitive(Char), - ty::TyInt(ast::TyIs) => Primitive(Isize), - ty::TyInt(ast::TyI8) => Primitive(I8), - ty::TyInt(ast::TyI16) => Primitive(I16), - ty::TyInt(ast::TyI32) => Primitive(I32), - ty::TyInt(ast::TyI64) => Primitive(I64), - ty::TyUint(ast::TyUs) => Primitive(Usize), - ty::TyUint(ast::TyU8) => Primitive(U8), - ty::TyUint(ast::TyU16) => Primitive(U16), - ty::TyUint(ast::TyU32) => Primitive(U32), - ty::TyUint(ast::TyU64) => Primitive(U64), - ty::TyFloat(ast::TyF32) => Primitive(F32), - ty::TyFloat(ast::TyF64) => Primitive(F64), - ty::TyStr => Primitive(Str), + ty::TyNever => Never, + ty::TyBool => Primitive(PrimitiveType::Bool), + ty::TyChar => Primitive(PrimitiveType::Char), + ty::TyInt(int_ty) => Primitive(int_ty.into()), + ty::TyUint(uint_ty) => Primitive(uint_ty.into()), + ty::TyFloat(float_ty) => Primitive(float_ty.into()), + ty::TyStr => Primitive(PrimitiveType::Str), ty::TyBox(t) => { - let box_did = cx.tcx_opt().and_then(|tcx| { - tcx.lang_items.owned_box() - }); + let box_did = cx.tcx.lang_items.owned_box(); lang_struct(cx, box_did, t, "Box", Unique) } ty::TySlice(ty) => Vector(box ty.clean(cx)), @@ -1660,26 +1814,27 @@ impl<'tcx> Clean for ty::Ty<'tcx> { mutability: mt.mutbl.clean(cx), type_: box mt.ty.clean(cx), }, - ty::TyBareFn(_, ref fty) => BareFunction(box BareFunctionDecl { + ty::TyFnDef(.., ref fty) | + ty::TyFnPtr(ref fty) => BareFunction(box BareFunctionDecl { unsafety: fty.unsafety, generics: Generics { lifetimes: Vec::new(), type_params: Vec::new(), where_predicates: Vec::new() }, - decl: (cx.map.local_def_id(0), &fty.sig).clean(cx), - abi: fty.abi.to_string(), + decl: (cx.tcx.map.local_def_id(ast::CRATE_NODE_ID), &fty.sig).clean(cx), + abi: fty.abi, }), - ty::TyStruct(def, substs) | - ty::TyEnum(def, substs) => { + ty::TyAdt(def, substs) => { let did = def.did; - let kind = match self.sty { - ty::TyStruct(..) => TypeStruct, - _ => TypeEnum, + let kind = match def.adt_kind() { + AdtKind::Struct => TypeKind::Struct, + AdtKind::Union => TypeKind::Union, + AdtKind::Enum => TypeKind::Enum, }; inline::record_extern_fqn(cx, did, kind); - let path = external_path(cx, &cx.tcx().item_name(did).as_str(), - None, vec![], substs); + let path = external_path(cx, &cx.tcx.item_name(did).as_str(), + None, false, vec![], substs); ResolvedPath { path: path, typarams: None, @@ -1687,17 +1842,48 @@ impl<'tcx> Clean for ty::Ty<'tcx> { is_generic: false, } } - ty::TyTrait(box ty::TraitTy { ref principal, ref bounds }) => { - let did = principal.def_id(); - inline::record_extern_fqn(cx, did, TypeTrait); - let (typarams, bindings) = bounds.clean(cx); - let path = external_path(cx, &cx.tcx().item_name(did).as_str(), - Some(did), bindings, principal.substs()); - ResolvedPath { - path: path, - typarams: Some(typarams), - did: did, - is_generic: false, + ty::TyDynamic(ref obj, ref reg) => { + if let Some(principal) = obj.principal() { + let did = principal.def_id(); + inline::record_extern_fqn(cx, did, TypeKind::Trait); + + let mut typarams = vec![]; + reg.clean(cx).map(|b| typarams.push(RegionBound(b))); + for did in obj.auto_traits() { + let empty = cx.tcx.intern_substs(&[]); + let path = external_path(cx, &cx.tcx.item_name(did).as_str(), + Some(did), false, vec![], empty); + inline::record_extern_fqn(cx, did, TypeKind::Trait); + let bound = TraitBound(PolyTrait { + trait_: ResolvedPath { + path: path, + typarams: None, + did: did, + is_generic: false, + }, + lifetimes: vec![] + }, hir::TraitBoundModifier::None); + typarams.push(bound); + } + + let mut bindings = vec![]; + for ty::Binder(ref pb) in obj.projection_bounds() { + bindings.push(TypeBinding { + name: pb.item_name.clean(cx), + ty: pb.ty.clean(cx) + }); + } + + let path = external_path(cx, &cx.tcx.item_name(did).as_str(), Some(did), + false, bindings, principal.0.substs); + ResolvedPath { + path: path, + typarams: Some(typarams), + did: did, + is_generic: false, + } + } else { + Never } } ty::TyTuple(ref t) => Tuple(t.clean(cx)), @@ -1706,6 +1892,17 @@ impl<'tcx> Clean for ty::Ty<'tcx> { ty::TyParam(ref p) => Generic(p.name.to_string()), + ty::TyAnon(def_id, substs) => { + // Grab the "TraitA + TraitB" from `impl TraitA + TraitB`, + // by looking up the projections associated with the def_id. + let item_predicates = cx.tcx.item_predicates(def_id); + let substs = cx.tcx.lift(&substs).unwrap(); + let bounds = item_predicates.instantiate(cx.tcx, substs); + ImplTrait(bounds.predicates.into_iter().filter_map(|predicate| { + predicate.to_opt_poly_trait_ref().clean(cx) + }).collect()) + } + ty::TyClosure(..) => Tuple(vec![]), // FIXME(pcwalton) ty::TyInfer(..) => panic!("TyInfer"), @@ -1714,62 +1911,51 @@ impl<'tcx> Clean for ty::Ty<'tcx> { } } -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] -pub enum StructField { - HiddenStructField, // inserted later by strip passes - TypedStructField(Type), -} - impl Clean for hir::StructField { fn clean(&self, cx: &DocContext) -> Item { - let (name, vis) = match self.node.kind { - hir::NamedField(id, vis) => (Some(id), vis), - hir::UnnamedField(vis) => (None, vis) - }; Item { - name: name.clean(cx), - attrs: self.node.attrs.clean(cx), + name: Some(self.name).clean(cx), + attrs: self.attrs.clean(cx), source: self.span.clean(cx), - visibility: Some(vis), - stability: get_stability(cx, cx.map.local_def_id(self.node.id)), - deprecation: get_deprecation(cx, cx.map.local_def_id(self.node.id)), - def_id: cx.map.local_def_id(self.node.id), - inner: StructFieldItem(TypedStructField(self.node.ty.clean(cx))), + visibility: self.vis.clean(cx), + stability: get_stability(cx, cx.tcx.map.local_def_id(self.id)), + deprecation: get_deprecation(cx, cx.tcx.map.local_def_id(self.id)), + def_id: cx.tcx.map.local_def_id(self.id), + inner: StructFieldItem(self.ty.clean(cx)), } } } -impl<'tcx> Clean for ty::FieldDefData<'tcx, 'static> { +impl<'tcx> Clean for ty::FieldDef { fn clean(&self, cx: &DocContext) -> Item { - use syntax::parse::token::special_idents::unnamed_field; - // FIXME: possible O(n^2)-ness! Not my fault. - let attr_map = - cx.tcx().sess.cstore.crate_struct_field_attrs(self.did.krate); - - let (name, attrs) = if self.name == unnamed_field.name { - (None, None) - } else { - (Some(self.name), Some(attr_map.get(&self.did).unwrap())) - }; - Item { - name: name.clean(cx), - attrs: attrs.unwrap_or(&Vec::new()).clean(cx), - source: Span::empty(), - visibility: Some(self.vis), + name: Some(self.name).clean(cx), + attrs: cx.tcx.get_attrs(self.did).clean(cx), + source: cx.tcx.def_span(self.did).clean(cx), + visibility: self.vis.clean(cx), stability: get_stability(cx, self.did), deprecation: get_deprecation(cx, self.did), def_id: self.did, - inner: StructFieldItem(TypedStructField(self.unsubst_ty().clean(cx))), + inner: StructFieldItem(cx.tcx.item_type(self.did).clean(cx)), } } } -pub type Visibility = hir::Visibility; +#[derive(Clone, PartialEq, Eq, RustcDecodable, RustcEncodable, Debug)] +pub enum Visibility { + Public, + Inherited, +} impl Clean> for hir::Visibility { fn clean(&self, _: &DocContext) -> Option { - Some(*self) + Some(if *self == hir::Visibility::Public { Public } else { Inherited }) + } +} + +impl Clean> for ty::Visibility { + fn clean(&self, _: &DocContext) -> Option { + Some(if *self == ty::Visibility::Public { Public } else { Inherited }) } } @@ -1781,13 +1967,21 @@ pub struct Struct { pub fields_stripped: bool, } +#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +pub struct Union { + pub struct_type: doctree::StructType, + pub generics: Generics, + pub fields: Vec, + pub fields_stripped: bool, +} + impl Clean for doctree::Struct { fn clean(&self, cx: &DocContext) -> Item { Item { name: Some(self.name.clean(cx)), attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.map.local_def_id(self.id), + def_id: cx.tcx.map.local_def_id(self.id), visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), @@ -1801,6 +1995,26 @@ impl Clean for doctree::Struct { } } +impl Clean for doctree::Union { + fn clean(&self, cx: &DocContext) -> Item { + Item { + name: Some(self.name.clean(cx)), + attrs: self.attrs.clean(cx), + source: self.whence.clean(cx), + def_id: cx.tcx.map.local_def_id(self.id), + visibility: self.vis.clean(cx), + stability: self.stab.clean(cx), + deprecation: self.depr.clean(cx), + inner: UnionItem(Union { + struct_type: self.struct_type, + generics: self.generics.clean(cx), + fields: self.fields.clean(cx), + fields_stripped: false, + }), + } + } +} + /// This is a more limited form of the standard Struct, different in that /// it lacks the things most items have (name, id, parameterization). Found /// only as a variant in an enum. @@ -1811,7 +2025,7 @@ pub struct VariantStruct { pub fields_stripped: bool, } -impl Clean for ::rustc_front::hir::VariantData { +impl Clean for ::rustc::hir::VariantData { fn clean(&self, cx: &DocContext) -> VariantStruct { VariantStruct { struct_type: doctree::struct_type_from_def(self), @@ -1834,7 +2048,7 @@ impl Clean for doctree::Enum { name: Some(self.name.clean(cx)), attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.map.local_def_id(self.id), + def_id: cx.tcx.map.local_def_id(self.id), visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), @@ -1861,47 +2075,37 @@ impl Clean for doctree::Variant { visibility: None, stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), - def_id: cx.map.local_def_id(self.def.id()), + def_id: cx.tcx.map.local_def_id(self.def.id()), inner: VariantItem(Variant { - kind: struct_def_to_variant_kind(&self.def, cx), + kind: self.def.clean(cx), }), } } } -impl<'tcx> Clean for ty::VariantDefData<'tcx, 'static> { +impl<'tcx> Clean for ty::VariantDef { fn clean(&self, cx: &DocContext) -> Item { - // use syntax::parse::token::special_idents::unnamed_field; - let kind = match self.kind() { - ty::VariantKind::Unit => CLikeVariant, - ty::VariantKind::Tuple => { - TupleVariant( - self.fields.iter().map(|f| f.unsubst_ty().clean(cx)).collect() + let kind = match self.ctor_kind { + CtorKind::Const => VariantKind::CLike, + CtorKind::Fn => { + VariantKind::Tuple( + self.fields.iter().map(|f| cx.tcx.item_type(f.did).clean(cx)).collect() ) } - ty::VariantKind::Struct => { - StructVariant(VariantStruct { + CtorKind::Fictive => { + VariantKind::Struct(VariantStruct { struct_type: doctree::Plain, fields_stripped: false, fields: self.fields.iter().map(|field| { Item { - source: Span::empty(), + source: cx.tcx.def_span(field.did).clean(cx), name: Some(field.name.clean(cx)), - attrs: Vec::new(), - visibility: Some(hir::Public), - // FIXME: this is not accurate, we need an id for - // the specific field but we're using the id - // for the whole variant. Thus we read the - // stability from the whole variant as well. - // Struct variants are experimental and need - // more infrastructure work before we can get - // at the needed information here. - def_id: self.did, - stability: get_stability(cx, self.did), - deprecation: get_deprecation(cx, self.did), - inner: StructFieldItem( - TypedStructField(field.unsubst_ty().clean(cx)) - ) + attrs: cx.tcx.get_attrs(field.did).clean(cx), + visibility: field.vis.clean(cx), + def_id: field.did, + stability: get_stability(cx, field.did), + deprecation: get_deprecation(cx, field.did), + inner: StructFieldItem(cx.tcx.item_type(field.did).clean(cx)) } }).collect() }) @@ -1909,9 +2113,9 @@ impl<'tcx> Clean for ty::VariantDefData<'tcx, 'static> { }; Item { name: Some(self.name.clean(cx)), - attrs: inline::load_attrs(cx, cx.tcx(), self.did), - source: Span::empty(), - visibility: Some(hir::Public), + attrs: inline::load_attrs(cx, self.did), + source: cx.tcx.def_span(self.did).clean(cx), + visibility: Some(Inherited), def_id: self.did, inner: VariantItem(Variant { kind: kind }), stability: get_stability(cx, self.did), @@ -1922,18 +2126,20 @@ impl<'tcx> Clean for ty::VariantDefData<'tcx, 'static> { #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub enum VariantKind { - CLikeVariant, - TupleVariant(Vec), - StructVariant(VariantStruct), + CLike, + Tuple(Vec), + Struct(VariantStruct), } -fn struct_def_to_variant_kind(struct_def: &hir::VariantData, cx: &DocContext) -> VariantKind { - if struct_def.is_struct() { - StructVariant(struct_def.clean(cx)) - } else if struct_def.is_unit() { - CLikeVariant - } else { - TupleVariant(struct_def.fields().iter().map(|x| x.node.ty.clean(cx)).collect()) +impl Clean for hir::VariantData { + fn clean(&self, cx: &DocContext) -> VariantKind { + if self.is_struct() { + VariantKind::Struct(self.clean(cx)) + } else if self.is_unit() { + VariantKind::CLike + } else { + VariantKind::Tuple(self.fields().iter().map(|x| x.ty.clean(cx)).collect()) + } } } @@ -1956,7 +2162,7 @@ impl Span { } } -impl Clean for syntax::codemap::Span { +impl Clean for syntax_pos::Span { fn clean(&self, cx: &DocContext) -> Span { if *self == DUMMY_SP { return Span::empty(); @@ -1979,6 +2185,7 @@ impl Clean for syntax::codemap::Span { #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug)] pub struct Path { pub global: bool, + pub def: Def, pub segments: Vec, } @@ -1986,6 +2193,7 @@ impl Path { pub fn singleton(name: String) -> Path { Path { global: false, + def: Def::Err, segments: vec![PathSegment { name: name, params: PathParameters::AngleBracketed { @@ -1996,12 +2204,17 @@ impl Path { }] } } + + pub fn last_name(&self) -> String { + self.segments.last().unwrap().name.clone() + } } impl Clean for hir::Path { fn clean(&self, cx: &DocContext) -> Path { Path { global: self.global, + def: self.def, segments: self.segments.clean(cx), } } @@ -2050,17 +2263,26 @@ pub struct PathSegment { impl Clean for hir::PathSegment { fn clean(&self, cx: &DocContext) -> PathSegment { PathSegment { - name: self.identifier.name.clean(cx), + name: self.name.clean(cx), params: self.parameters.clean(cx) } } } -fn path_to_string(p: &hir::Path) -> String { +fn qpath_to_string(p: &hir::QPath) -> String { + let (segments, global) = match *p { + hir::QPath::Resolved(_, ref path) => { + (&path.segments, path.global) + } + hir::QPath::TypeRelative(_, ref segment) => { + return segment.name.to_string() + } + }; + let mut s = String::new(); let mut first = true; - for i in p.segments.iter().map(|x| x.identifier.name.as_str()) { - if !first || p.global { + for i in segments.iter().map(|x| x.name.as_str()) { + if !first || global { s.push_str("::"); } else { first = false; @@ -2088,7 +2310,7 @@ impl Clean for doctree::Typedef { name: Some(self.name.clean(cx)), attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.map.local_def_id(self.id.clone()), + def_id: cx.tcx.map.local_def_id(self.id.clone()), visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), @@ -2105,7 +2327,7 @@ pub struct BareFunctionDecl { pub unsafety: hir::Unsafety, pub generics: Generics, pub decl: FnDecl, - pub abi: String, + pub abi: Abi, } impl Clean for hir::BareFnTy { @@ -2118,7 +2340,7 @@ impl Clean for hir::BareFnTy { where_predicates: Vec::new() }, decl: self.decl.clean(cx), - abi: self.abi.to_string(), + abi: self.abi, } } } @@ -2140,14 +2362,14 @@ impl Clean for doctree::Static { name: Some(self.name.clean(cx)), attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.map.local_def_id(self.id), + def_id: cx.tcx.map.local_def_id(self.id), visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), inner: StaticItem(Static { type_: self.type_.clean(cx), mutability: self.mutability.clean(cx), - expr: self.expr.span.to_src(cx), + expr: pprust::expr_to_string(&self.expr), }), } } @@ -2165,13 +2387,13 @@ impl Clean for doctree::Constant { name: Some(self.name.clean(cx)), attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.map.local_def_id(self.id), + def_id: cx.tcx.map.local_def_id(self.id), visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), inner: ConstantItem(Constant { type_: self.type_.clean(cx), - expr: self.expr.span.to_src(cx), + expr: pprust::expr_to_string(&self.expr), }), } } @@ -2211,17 +2433,13 @@ impl Clean for hir::ImplPolarity { pub struct Impl { pub unsafety: hir::Unsafety, pub generics: Generics, + pub provided_trait_methods: FxHashSet, pub trait_: Option, pub for_: Type, pub items: Vec, - pub derived: bool, pub polarity: Option, } -fn detect_derived(attrs: &[M]) -> bool { - attr::contains_name(attrs, "automatically_derived") -} - impl Clean> for doctree::Impl { fn clean(&self, cx: &DocContext) -> Vec { let mut ret = Vec::new(); @@ -2230,41 +2448,43 @@ impl Clean> for doctree::Impl { // If this impl block is an implementation of the Deref trait, then we // need to try inlining the target's inherent impl blocks as well. - if let Some(ResolvedPath { did, .. }) = trait_ { - if Some(did) == cx.deref_trait_did.get() { - build_deref_target_impls(cx, &items, &mut ret); - } + if trait_.def_id() == cx.tcx.lang_items.deref_trait() { + build_deref_target_impls(cx, &items, &mut ret); } + let provided = trait_.def_id().map(|did| { + cx.tcx.provided_trait_methods(did) + .into_iter() + .map(|meth| meth.name.to_string()) + .collect() + }).unwrap_or(FxHashSet()); + ret.push(Item { name: None, attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.map.local_def_id(self.id), + def_id: cx.tcx.map.local_def_id(self.id), visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), inner: ImplItem(Impl { unsafety: self.unsafety, generics: self.generics.clean(cx), + provided_trait_methods: provided, trait_: trait_, for_: self.for_.clean(cx), items: items, - derived: detect_derived(&self.attrs), polarity: Some(self.polarity.clean(cx)), }), }); - return ret; + ret } } fn build_deref_target_impls(cx: &DocContext, items: &[Item], ret: &mut Vec) { - let tcx = match cx.tcx_opt() { - Some(t) => t, - None => return, - }; + let tcx = cx.tcx; for item in items { let target = match item.inner { @@ -2274,7 +2494,7 @@ fn build_deref_target_impls(cx: &DocContext, let primitive = match *target { ResolvedPath { did, .. } if did.is_local() => continue, ResolvedPath { did, .. } => { - ret.extend(inline::build_impls(cx, tcx, did)); + ret.extend(inline::build_impls(cx, did)); continue } _ => match target.primitive_type() { @@ -2283,29 +2503,29 @@ fn build_deref_target_impls(cx: &DocContext, } }; let did = match primitive { - Isize => tcx.lang_items.isize_impl(), - I8 => tcx.lang_items.i8_impl(), - I16 => tcx.lang_items.i16_impl(), - I32 => tcx.lang_items.i32_impl(), - I64 => tcx.lang_items.i64_impl(), - Usize => tcx.lang_items.usize_impl(), - U8 => tcx.lang_items.u8_impl(), - U16 => tcx.lang_items.u16_impl(), - U32 => tcx.lang_items.u32_impl(), - U64 => tcx.lang_items.u64_impl(), - F32 => tcx.lang_items.f32_impl(), - F64 => tcx.lang_items.f64_impl(), - Char => tcx.lang_items.char_impl(), - Bool => None, - Str => tcx.lang_items.str_impl(), - Slice => tcx.lang_items.slice_impl(), - Array => tcx.lang_items.slice_impl(), - PrimitiveTuple => None, - PrimitiveRawPointer => tcx.lang_items.const_ptr_impl(), + PrimitiveType::Isize => tcx.lang_items.isize_impl(), + PrimitiveType::I8 => tcx.lang_items.i8_impl(), + PrimitiveType::I16 => tcx.lang_items.i16_impl(), + PrimitiveType::I32 => tcx.lang_items.i32_impl(), + PrimitiveType::I64 => tcx.lang_items.i64_impl(), + PrimitiveType::Usize => tcx.lang_items.usize_impl(), + PrimitiveType::U8 => tcx.lang_items.u8_impl(), + PrimitiveType::U16 => tcx.lang_items.u16_impl(), + PrimitiveType::U32 => tcx.lang_items.u32_impl(), + PrimitiveType::U64 => tcx.lang_items.u64_impl(), + PrimitiveType::F32 => tcx.lang_items.f32_impl(), + PrimitiveType::F64 => tcx.lang_items.f64_impl(), + PrimitiveType::Char => tcx.lang_items.char_impl(), + PrimitiveType::Bool => None, + PrimitiveType::Str => tcx.lang_items.str_impl(), + PrimitiveType::Slice => tcx.lang_items.slice_impl(), + PrimitiveType::Array => tcx.lang_items.slice_impl(), + PrimitiveType::Tuple => None, + PrimitiveType::RawPointer => tcx.lang_items.const_ptr_impl(), }; if let Some(did) = did { if !did.is_local() { - inline::build_impl(cx, tcx, did, ret); + inline::build_impl(cx, did, ret); } } } @@ -2323,8 +2543,8 @@ impl Clean for doctree::DefaultImpl { name: None, attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.map.local_def_id(self.id), - visibility: Some(hir::Public), + def_id: cx.tcx.map.local_def_id(self.id), + visibility: Some(Public), stability: None, deprecation: None, inner: DefaultImplItem(DefaultImpl { @@ -2341,7 +2561,7 @@ impl Clean for doctree::ExternCrate { name: None, attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.map.local_def_id(0), + def_id: DefId { krate: self.cnum, index: CRATE_DEF_INDEX }, visibility: self.vis.clean(cx), stability: None, deprecation: None, @@ -2355,76 +2575,45 @@ impl Clean> for doctree::Import { // We consider inlining the documentation of `pub use` statements, but we // forcefully don't inline if this is not public or if the // #[doc(no_inline)] attribute is present. + // Don't inline doc(hidden) imports so they can be stripped at a later stage. let denied = self.vis != hir::Public || self.attrs.iter().any(|a| { - &a.name()[..] == "doc" && match a.meta_item_list() { - Some(l) => attr::contains_name(l, "no_inline"), + a.name() == "doc" && match a.meta_item_list() { + Some(l) => attr::list_contains_name(l, "no_inline") || + attr::list_contains_name(l, "hidden"), None => false, } }); - let (mut ret, inner) = match self.node { - hir::ViewPathGlob(ref p) => { - (vec![], GlobImport(resolve_use_source(cx, p.clean(cx), self.id))) - } - hir::ViewPathList(ref p, ref list) => { - // Attempt to inline all reexported items, but be sure - // to keep any non-inlineable reexports so they can be - // listed in the documentation. - let mut ret = vec![]; - let remaining = if !denied { - let mut remaining = vec![]; - for path in list { - match inline::try_inline(cx, path.node.id(), path.node.rename()) { - Some(items) => { - ret.extend(items); - } - None => { - remaining.push(path.clean(cx)); - } - } - } - remaining - } else { - list.clean(cx) - }; - if remaining.is_empty() { - return ret; - } - (ret, ImportList(resolve_use_source(cx, p.clean(cx), self.id), - remaining)) - } - hir::ViewPathSimple(name, ref p) => { - if !denied { - match inline::try_inline(cx, self.id, Some(name)) { - Some(items) => return items, - None => {} - } + let path = self.path.clean(cx); + let inner = if self.glob { + Import::Glob(resolve_use_source(cx, path)) + } else { + let name = self.name; + if !denied { + if let Some(items) = inline::try_inline(cx, path.def, Some(name)) { + return items; } - (vec![], SimpleImport(name.clean(cx), - resolve_use_source(cx, p.clean(cx), self.id))) } + Import::Simple(name.clean(cx), resolve_use_source(cx, path)) }; - ret.push(Item { + vec![Item { name: None, attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.map.local_def_id(0), + def_id: cx.tcx.map.local_def_id(ast::CRATE_NODE_ID), visibility: self.vis.clean(cx), stability: None, deprecation: None, inner: ImportItem(inner) - }); - ret + }] } } #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub enum Import { // use source as str; - SimpleImport(String, ImportSource), + Simple(String, ImportSource), // use source::*; - GlobImport(ImportSource), - // use source::{a, b, c}; - ImportList(ImportSource, Vec), + Glob(ImportSource) } #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] @@ -2433,37 +2622,12 @@ pub struct ImportSource { pub did: Option, } -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] -pub struct ViewListIdent { - pub name: String, - pub rename: Option, - pub source: Option, -} - -impl Clean for hir::PathListItem { - fn clean(&self, cx: &DocContext) -> ViewListIdent { - match self.node { - hir::PathListIdent { id, name, rename } => ViewListIdent { - name: name.clean(cx), - rename: rename.map(|r| r.clean(cx)), - source: resolve_def(cx, id) - }, - hir::PathListMod { id, rename } => ViewListIdent { - name: "self".to_string(), - rename: rename.map(|r| r.clean(cx)), - source: resolve_def(cx, id) - } - } - } -} - impl Clean> for hir::ForeignMod { fn clean(&self, cx: &DocContext) -> Vec { let mut items = self.items.clean(cx); for item in &mut items { - match item.inner { - ForeignFunctionItem(ref mut f) => f.abi = self.abi, - _ => {} + if let ForeignFunctionItem(ref mut f) = item.inner { + f.abi = self.abi; } } items @@ -2478,7 +2642,7 @@ impl Clean for hir::ForeignItem { decl: decl.clean(cx), generics: generics.clean(cx), unsafety: hir::Unsafety::Unsafe, - abi: abi::Rust, + abi: Abi::Rust, constness: hir::Constness::NotConst, }) } @@ -2494,10 +2658,10 @@ impl Clean for hir::ForeignItem { name: Some(self.name.clean(cx)), attrs: self.attrs.clean(cx), source: self.span.clean(cx), - def_id: cx.map.local_def_id(self.id), + def_id: cx.tcx.map.local_def_id(self.id), visibility: self.vis.clean(cx), - stability: get_stability(cx, cx.map.local_def_id(self.id)), - deprecation: get_deprecation(cx, cx.map.local_def_id(self.id)), + stability: get_stability(cx, cx.tcx.map.local_def_id(self.id)), + deprecation: get_deprecation(cx, cx.tcx.map.local_def_id(self.id)), inner: inner, } } @@ -2509,7 +2673,7 @@ trait ToSource { fn to_src(&self, cx: &DocContext) -> String; } -impl ToSource for syntax::codemap::Span { +impl ToSource for syntax_pos::Span { fn to_src(&self, cx: &DocContext) -> String { debug!("converting span {:?} to snippet", self.clean(cx)); let sn = match cx.sess().codemap().span_to_snippet(*self) { @@ -2521,56 +2685,34 @@ impl ToSource for syntax::codemap::Span { } } -fn lit_to_string(lit: &ast::Lit) -> String { - match lit.node { - ast::LitStr(ref st, _) => st.to_string(), - ast::LitByteStr(ref data) => format!("{:?}", data), - ast::LitByte(b) => { - let mut res = String::from("b'"); - for c in (b as char).escape_default() { - res.push(c); - } - res.push('\''); - res - }, - ast::LitChar(c) => format!("'{}'", c), - ast::LitInt(i, _t) => i.to_string(), - ast::LitFloat(ref f, _t) => f.to_string(), - ast::LitFloatUnsuffixed(ref f) => f.to_string(), - ast::LitBool(b) => b.to_string(), - } -} - fn name_from_pat(p: &hir::Pat) -> String { - use rustc_front::hir::*; + use rustc::hir::*; debug!("Trying to get a name from pattern: {:?}", p); match p.node { - PatWild => "_".to_string(), - PatIdent(_, ref p, _) => p.node.to_string(), - PatEnum(ref p, _) => path_to_string(p), - PatQPath(..) => panic!("tried to get argument name from PatQPath, \ - which is not allowed in function arguments"), - PatStruct(ref name, ref fields, etc) => { - format!("{} {{ {}{} }}", path_to_string(name), + PatKind::Wild => "_".to_string(), + PatKind::Binding(_, _, ref p, _) => p.node.to_string(), + PatKind::TupleStruct(ref p, ..) | PatKind::Path(ref p) => qpath_to_string(p), + PatKind::Struct(ref name, ref fields, etc) => { + format!("{} {{ {}{} }}", qpath_to_string(name), fields.iter().map(|&Spanned { node: ref fp, .. }| format!("{}: {}", fp.name, name_from_pat(&*fp.pat))) .collect::>().join(", "), if etc { ", ..." } else { "" } ) - }, - PatTup(ref elts) => format!("({})", elts.iter().map(|p| name_from_pat(&**p)) + } + PatKind::Tuple(ref elts, _) => format!("({})", elts.iter().map(|p| name_from_pat(&**p)) .collect::>().join(", ")), - PatBox(ref p) => name_from_pat(&**p), - PatRegion(ref p, _) => name_from_pat(&**p), - PatLit(..) => { - warn!("tried to get argument name from PatLit, \ + PatKind::Box(ref p) => name_from_pat(&**p), + PatKind::Ref(ref p, _) => name_from_pat(&**p), + PatKind::Lit(..) => { + warn!("tried to get argument name from PatKind::Lit, \ which is silly in function arguments"); "()".to_string() }, - PatRange(..) => panic!("tried to get argument name from PatRange, \ + PatKind::Range(..) => panic!("tried to get argument name from PatKind::Range, \ which is not allowed in function arguments"), - PatVec(ref begin, ref mid, ref end) => { + PatKind::Slice(ref begin, ref mid, ref end) => { let begin = begin.iter().map(|p| name_from_pat(&**p)); let mid = mid.as_ref().map(|p| format!("..{}", name_from_pat(&**p))).into_iter(); let end = end.iter().map(|p| name_from_pat(&**p)); @@ -2579,103 +2721,70 @@ fn name_from_pat(p: &hir::Pat) -> String { } } -/// Given a Type, resolve it using the def_map +/// Given a type Path, resolve it to a Type using the TyCtxt fn resolve_type(cx: &DocContext, path: Path, id: ast::NodeId) -> Type { debug!("resolve_type({:?},{:?})", path, id); - let tcx = match cx.tcx_opt() { - Some(tcx) => tcx, - // If we're extracting tests, this return value's accuracy is not - // important, all we want is a string representation to help people - // figure out what doctests are failing. - None => { - let did = DefId::local(DefIndex::from_u32(0)); - return ResolvedPath { - path: path, - typarams: None, - did: did, - is_generic: false - }; - } - }; - let def = match tcx.def_map.borrow().get(&id) { - Some(k) => k.full_def(), - None => panic!("unresolved id not in defmap") - }; - debug!("resolve_type: def={:?}", def); - - let is_generic = match def { - def::DefPrimTy(p) => match p { - hir::TyStr => return Primitive(Str), - hir::TyBool => return Primitive(Bool), - hir::TyChar => return Primitive(Char), - hir::TyInt(ast::TyIs) => return Primitive(Isize), - hir::TyInt(ast::TyI8) => return Primitive(I8), - hir::TyInt(ast::TyI16) => return Primitive(I16), - hir::TyInt(ast::TyI32) => return Primitive(I32), - hir::TyInt(ast::TyI64) => return Primitive(I64), - hir::TyUint(ast::TyUs) => return Primitive(Usize), - hir::TyUint(ast::TyU8) => return Primitive(U8), - hir::TyUint(ast::TyU16) => return Primitive(U16), - hir::TyUint(ast::TyU32) => return Primitive(U32), - hir::TyUint(ast::TyU64) => return Primitive(U64), - hir::TyFloat(ast::TyF32) => return Primitive(F32), - hir::TyFloat(ast::TyF64) => return Primitive(F64), + let is_generic = match path.def { + Def::PrimTy(p) => match p { + hir::TyStr => return Primitive(PrimitiveType::Str), + hir::TyBool => return Primitive(PrimitiveType::Bool), + hir::TyChar => return Primitive(PrimitiveType::Char), + hir::TyInt(int_ty) => return Primitive(int_ty.into()), + hir::TyUint(uint_ty) => return Primitive(uint_ty.into()), + hir::TyFloat(float_ty) => return Primitive(float_ty.into()), }, - def::DefSelfTy(..) if path.segments.len() == 1 => { - return Generic(special_idents::type_self.name.to_string()); + Def::SelfTy(..) if path.segments.len() == 1 => { + return Generic(keywords::SelfType.name().to_string()); } - def::DefSelfTy(..) | def::DefTyParam(..) => true, + Def::SelfTy(..) | Def::TyParam(..) | Def::AssociatedTy(..) => true, _ => false, }; - let did = register_def(&*cx, def); + let did = register_def(&*cx, path.def); ResolvedPath { path: path, typarams: None, did: did, is_generic: is_generic } } -fn register_def(cx: &DocContext, def: def::Def) -> DefId { +fn register_def(cx: &DocContext, def: Def) -> DefId { debug!("register_def({:?})", def); let (did, kind) = match def { - def::DefFn(i, _) => (i, TypeFunction), - def::DefTy(i, false) => (i, TypeTypedef), - def::DefTy(i, true) => (i, TypeEnum), - def::DefTrait(i) => (i, TypeTrait), - def::DefStruct(i) => (i, TypeStruct), - def::DefMod(i) => (i, TypeModule), - def::DefStatic(i, _) => (i, TypeStatic), - def::DefVariant(i, _, _) => (i, TypeEnum), - def::DefSelfTy(Some(def_id), _) => (def_id, TypeTrait), - def::DefSelfTy(_, Some((impl_id, _))) => return cx.map.local_def_id(impl_id), + Def::Fn(i) => (i, TypeKind::Function), + Def::TyAlias(i) => (i, TypeKind::Typedef), + Def::Enum(i) => (i, TypeKind::Enum), + Def::Trait(i) => (i, TypeKind::Trait), + Def::Struct(i) => (i, TypeKind::Struct), + Def::Union(i) => (i, TypeKind::Union), + Def::Mod(i) => (i, TypeKind::Module), + Def::Static(i, _) => (i, TypeKind::Static), + Def::Variant(i) => (cx.tcx.parent_def_id(i).unwrap(), TypeKind::Enum), + Def::SelfTy(Some(def_id), _) => (def_id, TypeKind::Trait), + Def::SelfTy(_, Some(impl_def_id)) => { + return impl_def_id + } _ => return def.def_id() }; if did.is_local() { return did } - let tcx = match cx.tcx_opt() { - Some(tcx) => tcx, - None => return did - }; inline::record_extern_fqn(cx, did, kind); - if let TypeTrait = kind { - let t = inline::build_external_trait(cx, tcx, did); - cx.external_traits.borrow_mut().as_mut().unwrap().insert(did, t); + if let TypeKind::Trait = kind { + let t = inline::build_external_trait(cx, did); + cx.external_traits.borrow_mut().insert(did, t); } - return did; + did } -fn resolve_use_source(cx: &DocContext, path: Path, id: ast::NodeId) -> ImportSource { +fn resolve_use_source(cx: &DocContext, path: Path) -> ImportSource { ImportSource { + did: if path.def == Def::Err { + None + } else { + Some(register_def(cx, path.def)) + }, path: path, - did: resolve_def(cx, id), } } -fn resolve_def(cx: &DocContext, id: ast::NodeId) -> Option { - cx.tcx_opt().and_then(|tcx| { - tcx.def_map.borrow().get(&id).map(|d| register_def(cx, d.full_def())) - }) -} - #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub struct Macro { pub source: String, @@ -2684,19 +2793,21 @@ pub struct Macro { impl Clean for doctree::Macro { fn clean(&self, cx: &DocContext) -> Item { - let name = format!("{}!", self.name.clean(cx)); + let name = self.name.clean(cx); Item { name: Some(name.clone()), attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - visibility: hir::Public.clean(cx), + visibility: Some(Public), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), - def_id: cx.map.local_def_id(self.id), + def_id: cx.tcx.map.local_def_id(self.id), inner: MacroItem(Macro { source: format!("macro_rules! {} {{\n{}}}", - name.trim_right_matches('!'), self.matchers.iter().map(|span| - format!(" {} => {{ ... }};\n", span.to_src(cx))).collect::()), + name, + self.matchers.iter().map(|span| { + format!(" {} => {{ ... }};\n", span.to_src(cx)) + }).collect::()), imported_from: self.imported_from.clean(cx), }), } @@ -2709,7 +2820,8 @@ pub struct Stability { pub feature: String, pub since: String, pub deprecated_since: String, - pub reason: String, + pub deprecated_reason: String, + pub unstable_reason: String, pub issue: Option } @@ -2732,14 +2844,13 @@ impl Clean for attr::Stability { Some(attr::RustcDeprecation {ref since, ..}) => since.to_string(), _=> "".to_string(), }, - reason: { - if let Some(ref depr) = self.rustc_depr { - depr.reason.to_string() - } else if let attr::Unstable {reason: Some(ref reason), ..} = self.level { - reason.to_string() - } else { - "".to_string() - } + deprecated_reason: match self.rustc_depr { + Some(ref depr) => depr.reason.to_string(), + _ => "".to_string(), + }, + unstable_reason: match self.level { + attr::Unstable { reason: Some(ref reason), .. } => reason.to_string(), + _ => "".to_string(), }, issue: match self.level { attr::Unstable {issue, ..} => Some(issue), @@ -2764,90 +2875,6 @@ impl Clean for attr::Deprecation { } } -impl<'tcx> Clean for ty::AssociatedConst<'tcx> { - fn clean(&self, cx: &DocContext) -> Item { - Item { - source: DUMMY_SP.clean(cx), - name: Some(self.name.clean(cx)), - attrs: Vec::new(), - inner: AssociatedConstItem(self.ty.clean(cx), None), - visibility: None, - def_id: self.def_id, - stability: None, - deprecation: None, - } - } -} - -impl<'tcx> Clean for ty::AssociatedType<'tcx> { - fn clean(&self, cx: &DocContext) -> Item { - let my_name = self.name.clean(cx); - - let mut bounds = if let ty::TraitContainer(did) = self.container { - // When loading a cross-crate associated type, the bounds for this type - // are actually located on the trait/impl itself, so we need to load - // all of the generics from there and then look for bounds that are - // applied to this associated type in question. - let def = cx.tcx().lookup_trait_def(did); - let predicates = cx.tcx().lookup_predicates(did); - let generics = (&def.generics, &predicates, subst::TypeSpace).clean(cx); - generics.where_predicates.iter().filter_map(|pred| { - let (name, self_type, trait_, bounds) = match *pred { - WherePredicate::BoundPredicate { - ty: QPath { ref name, ref self_type, ref trait_ }, - ref bounds - } => (name, self_type, trait_, bounds), - _ => return None, - }; - if *name != my_name { return None } - match **trait_ { - ResolvedPath { did, .. } if did == self.container.id() => {} - _ => return None, - } - match **self_type { - Generic(ref s) if *s == "Self" => {} - _ => return None, - } - Some(bounds) - }).flat_map(|i| i.iter().cloned()).collect::>() - } else { - vec![] - }; - - // Our Sized/?Sized bound didn't get handled when creating the generics - // because we didn't actually get our whole set of bounds until just now - // (some of them may have come from the trait). If we do have a sized - // bound, we remove it, and if we don't then we add the `?Sized` bound - // at the end. - match bounds.iter().position(|b| b.is_sized_bound(cx)) { - Some(i) => { bounds.remove(i); } - None => bounds.push(TyParamBound::maybe_sized(cx)), - } - - Item { - source: DUMMY_SP.clean(cx), - name: Some(self.name.clean(cx)), - attrs: inline::load_attrs(cx, cx.tcx(), self.def_id), - inner: AssociatedTypeItem(bounds, self.ty.clean(cx)), - visibility: self.vis.clean(cx), - def_id: self.def_id, - stability: stability::lookup_stability(cx.tcx(), self.def_id).clean(cx), - deprecation: stability::lookup_deprecation(cx.tcx(), self.def_id).clean(cx), - } - } -} - -impl<'a> Clean for (ty::TypeScheme<'a>, ty::GenericPredicates<'a>, - ParamSpace) { - fn clean(&self, cx: &DocContext) -> Typedef { - let (ref ty_scheme, ref predicates, ps) = *self; - Typedef { - type_: ty_scheme.ty.clean(cx), - generics: (&ty_scheme.generics, predicates, ps).clean(cx) - } - } -} - fn lang_struct(cx: &DocContext, did: Option, t: ty::Ty, name: &str, fallback: fn(Box) -> Type) -> Type { @@ -2855,12 +2882,13 @@ fn lang_struct(cx: &DocContext, did: Option, Some(did) => did, None => return fallback(box t.clean(cx)), }; - inline::record_extern_fqn(cx, did, TypeStruct); + inline::record_extern_fqn(cx, did, TypeKind::Struct); ResolvedPath { typarams: None, did: did, path: Path { global: false, + def: Def::Err, segments: vec![PathSegment { name: name.to_string(), params: PathParameters::AngleBracketed { diff --git a/src/librustdoc/clean/simplify.rs b/src/librustdoc/clean/simplify.rs index 716b88d5534ae..7240f0aedbd27 100644 --- a/src/librustdoc/clean/simplify.rs +++ b/src/librustdoc/clean/simplify.rs @@ -11,7 +11,7 @@ //! Simplification of where clauses and parameter bounds into a prettier and //! more canonical form. //! -//! Currently all cross-crate-inlined function use `middle::ty` to reconstruct +//! Currently all cross-crate-inlined function use `rustc::ty` to reconstruct //! the AST (e.g. see all of `clean::inline`), but this is not always a //! non-lossy transformation. The current format of storage for where clauses //! for functions and such is simply a list of predicates. One example of this @@ -27,19 +27,19 @@ //! bounds by special casing scenarios such as these. Fun! use std::mem; -use std::collections::HashMap; +use std::collections::BTreeMap; -use rustc::middle::def_id::DefId; -use rustc::middle::subst; +use rustc::hir::def_id::DefId; +use rustc::ty; use clean::PathParameters as PP; use clean::WherePredicate as WP; -use clean::{self, Clean}; +use clean; use core::DocContext; pub fn where_clauses(cx: &DocContext, clauses: Vec) -> Vec { // First, partition the where clause into its separate components - let mut params = HashMap::new(); + let mut params = BTreeMap::new(); let mut lifetimes = Vec::new(); let mut equalities = Vec::new(); let mut tybounds = Vec::new(); @@ -62,7 +62,7 @@ pub fn where_clauses(cx: &DocContext, clauses: Vec) -> Vec { // Simplify the type parameter bounds on all the generics let mut params = params.into_iter().map(|(k, v)| { (k, ty_bounds(v)) - }).collect::>(); + }).collect::>(); // Look for equality predicates on associated types that can be merged into // general bound predicates @@ -141,7 +141,7 @@ pub fn ty_params(mut params: Vec) -> Vec { for param in &mut params { param.bounds = ty_bounds(mem::replace(&mut param.bounds, Vec::new())); } - return params; + params } fn ty_bounds(bounds: Vec) -> Vec { @@ -153,27 +153,16 @@ fn trait_is_same_or_supertrait(cx: &DocContext, child: DefId, if child == trait_ { return true } - let def = cx.tcx().lookup_trait_def(child); - let predicates = cx.tcx().lookup_predicates(child); - let generics = (&def.generics, &predicates, subst::TypeSpace).clean(cx); - generics.where_predicates.iter().filter_map(|pred| { - match *pred { - clean::WherePredicate::BoundPredicate { - ty: clean::Generic(ref s), - ref bounds - } if *s == "Self" => Some(bounds), - _ => None, - } - }).flat_map(|bounds| bounds).any(|bound| { - let poly_trait = match *bound { - clean::TraitBound(ref t, _) => t, - _ => return false, - }; - match poly_trait.trait_ { - clean::ResolvedPath { did, .. } => { - trait_is_same_or_supertrait(cx, did, trait_) + let predicates = cx.tcx.item_super_predicates(child).predicates; + predicates.iter().filter_map(|pred| { + if let ty::Predicate::Trait(ref pred) = *pred { + if pred.0.trait_ref.self_ty().is_self() { + Some(pred.def_id()) + } else { + None } - _ => false, + } else { + None } - }) + }).any(|did| trait_is_same_or_supertrait(cx, did, trait_)) } diff --git a/src/librustdoc/core.rs b/src/librustdoc/core.rs index a7fd170b91c37..df25473ddd916 100644 --- a/src/librustdoc/core.rs +++ b/src/librustdoc/core.rs @@ -7,93 +7,108 @@ // , at your // option. This file may not be copied, modified, or distributed // except according to those terms. -pub use self::MaybeTyped::*; use rustc_lint; -use rustc_driver::{driver, target_features}; +use rustc_driver::{driver, target_features, abort_on_err}; +use rustc::dep_graph::DepGraph; use rustc::session::{self, config}; -use rustc::middle::def_id::DefId; +use rustc::hir::def_id::DefId; +use rustc::hir::def::{Def, ExportMap}; use rustc::middle::privacy::AccessLevels; -use rustc::middle::ty; -use rustc::front::map as hir_map; +use rustc::ty::{self, TyCtxt, Ty}; +use rustc::hir::map as hir_map; use rustc::lint; +use rustc::util::nodemap::{FxHashMap, NodeMap}; use rustc_trans::back::link; use rustc_resolve as resolve; -use rustc_front::lowering::{lower_crate, LoweringContext}; use rustc_metadata::cstore::CStore; -use syntax::{ast, codemap, errors}; -use syntax::errors::emitter::ColorConfig; +use syntax::{ast, codemap}; use syntax::feature_gate::UnstableFeatures; -use syntax::parse::token; +use errors; +use errors::emitter::ColorConfig; use std::cell::{RefCell, Cell}; -use std::collections::{HashMap, HashSet}; +use std::mem; use std::rc::Rc; +use std::path::PathBuf; use visit_ast::RustdocVisitor; use clean; use clean::Clean; +use html::render::RenderInfo; pub use rustc::session::config::Input; pub use rustc::session::search_paths::SearchPaths; -/// Are we generating documentation (`Typed`) or tests (`NotTyped`)? -pub enum MaybeTyped<'a, 'tcx: 'a> { - Typed(&'a ty::ctxt<'tcx>), - NotTyped(&'a session::Session) -} - -pub type ExternalPaths = RefCell, clean::TypeKind)>>>; +pub type ExternalPaths = FxHashMap, clean::TypeKind)>; pub struct DocContext<'a, 'tcx: 'a> { - pub map: &'a hir_map::Map<'tcx>, - pub maybe_typed: MaybeTyped<'a, 'tcx>, - pub input: Input, - pub external_paths: ExternalPaths, - pub external_traits: RefCell>>, - pub external_typarams: RefCell>>, - pub inlined: RefCell>>, - pub populated_crate_impls: RefCell>, - pub deref_trait_did: Cell>, + pub tcx: TyCtxt<'a, 'tcx, 'tcx>, + pub populated_all_crate_impls: Cell, + // Note that external items for which `doc(hidden)` applies to are shown as + // non-reachable while local items aren't. This is because we're reusing + // the access levels from crateanalysis. + /// Later on moved into `clean::Crate` + pub access_levels: RefCell>, + /// Later on moved into `html::render::CACHE_KEY` + pub renderinfo: RefCell, + /// Later on moved through `clean::Crate` into `html::render::CACHE_KEY` + pub external_traits: RefCell>, + + // The current set of type and lifetime substitutions, + // for expanding type aliases at the HIR level: + + /// Table type parameter definition -> substituted type + pub ty_substs: RefCell>, + /// Table node id of lifetime parameter definition -> substituted lifetime + pub lt_substs: RefCell>, + pub export_map: ExportMap, + + /// Table from HIR Ty nodes to their resolved Ty. + pub hir_ty_to_ty: NodeMap>, } -impl<'b, 'tcx> DocContext<'b, 'tcx> { - pub fn sess<'a>(&'a self) -> &'a session::Session { - match self.maybe_typed { - Typed(tcx) => &tcx.sess, - NotTyped(ref sess) => sess - } +impl<'a, 'tcx> DocContext<'a, 'tcx> { + pub fn sess(&self) -> &session::Session { + &self.tcx.sess } - pub fn tcx_opt<'a>(&'a self) -> Option<&'a ty::ctxt<'tcx>> { - match self.maybe_typed { - Typed(tcx) => Some(tcx), - NotTyped(_) => None - } - } - - pub fn tcx<'a>(&'a self) -> &'a ty::ctxt<'tcx> { - let tcx_opt = self.tcx_opt(); - tcx_opt.expect("tcx not present") + /// Call the closure with the given parameters set as + /// the substitutions for a type alias' RHS. + pub fn enter_alias(&self, + ty_substs: FxHashMap, + lt_substs: FxHashMap, + f: F) -> R + where F: FnOnce() -> R { + let (old_tys, old_lts) = + (mem::replace(&mut *self.ty_substs.borrow_mut(), ty_substs), + mem::replace(&mut *self.lt_substs.borrow_mut(), lt_substs)); + let r = f(); + *self.ty_substs.borrow_mut() = old_tys; + *self.lt_substs.borrow_mut() = old_lts; + r } } -pub struct CrateAnalysis { - pub access_levels: AccessLevels, - pub external_paths: ExternalPaths, - pub external_typarams: RefCell>>, - pub inlined: RefCell>>, - pub deref_trait_did: Option, +pub trait DocAccessLevels { + fn is_doc_reachable(&self, DefId) -> bool; } -pub type Externs = HashMap>; +impl DocAccessLevels for AccessLevels { + fn is_doc_reachable(&self, did: DefId) -> bool { + self.is_public(did) + } +} -pub fn run_core(search_paths: SearchPaths, cfgs: Vec, externs: Externs, - input: Input, triple: Option) - -> (clean::Crate, CrateAnalysis) { +pub fn run_core(search_paths: SearchPaths, + cfgs: Vec, + externs: config::Externs, + input: Input, + triple: Option, + maybe_sysroot: Option) -> (clean::Crate, RenderInfo) +{ // Parse, resolve, and typecheck the given crate. let cpath = match input { @@ -104,58 +119,62 @@ pub fn run_core(search_paths: SearchPaths, cfgs: Vec, externs: Externs, let warning_lint = lint::builtin::WARNINGS.name_lower(); let sessopts = config::Options { - maybe_sysroot: None, + maybe_sysroot: maybe_sysroot, search_paths: search_paths, - crate_types: vec!(config::CrateTypeRlib), - lint_opts: vec!((warning_lint, lint::Allow)), + crate_types: vec![config::CrateTypeRlib], + lint_opts: vec![(warning_lint, lint::Allow)], lint_cap: Some(lint::Allow), externs: externs, target_triple: triple.unwrap_or(config::host_triple().to_string()), - cfg: config::parse_cfgspecs(cfgs), // Ensure that rustdoc works even if rustc is feature-staged unstable_features: UnstableFeatures::Allow, + actually_rustdoc: true, ..config::basic_options().clone() }; let codemap = Rc::new(codemap::CodeMap::new()); let diagnostic_handler = errors::Handler::with_tty_emitter(ColorConfig::Auto, - None, true, false, - codemap.clone()); - - let cstore = Rc::new(CStore::new(token::get_ident_interner())); - let sess = session::build_session_(sessopts, cpath, diagnostic_handler, - codemap, cstore.clone()); + Some(codemap.clone())); + + let dep_graph = DepGraph::new(false); + let _ignore = dep_graph.in_ignore(); + let cstore = Rc::new(CStore::new(&dep_graph)); + let mut sess = session::build_session_( + sessopts, &dep_graph, cpath, diagnostic_handler, codemap, cstore.clone() + ); rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess)); - let mut cfg = config::build_configuration(&sess); + let mut cfg = config::build_configuration(&sess, config::parse_cfgspecs(cfgs)); target_features::add_configuration(&mut cfg, &sess); + sess.parse_sess.config = cfg; - let krate = driver::phase_1_parse_input(&sess, cfg, &input); + let krate = panictry!(driver::phase_1_parse_input(&sess, &input)); - let name = link::find_crate_name(Some(&sess), &krate.attrs, - &input); + let name = link::find_crate_name(Some(&sess), &krate.attrs, &input); - let krate = driver::phase_2_configure_and_expand(&sess, &cstore, krate, &name, None) - .expect("phase_2_configure_and_expand aborted in rustdoc!"); + let driver::ExpansionResult { defs, analysis, resolutions, mut hir_forest, .. } = { + driver::phase_2_configure_and_expand( + &sess, &cstore, krate, None, &name, None, resolve::MakeGlobMap::No, |_| Ok(()), + ).expect("phase_2_configure_and_expand aborted in rustdoc!") + }; - let krate = driver::assign_node_ids(&sess, krate); - // Lower ast -> hir. - let lcx = LoweringContext::new(&sess, Some(&krate)); - let mut hir_forest = hir_map::Forest::new(lower_crate(&lcx, &krate)); let arenas = ty::CtxtArenas::new(); - let hir_map = driver::make_map(&sess, &mut hir_forest); - - driver::phase_3_run_analysis_passes(&sess, - &cstore, - hir_map, - &arenas, - &name, - resolve::MakeGlobMap::No, - |tcx, _, analysis| { - let _ignore = tcx.dep_graph.in_ignore(); - let ty::CrateAnalysis { access_levels, .. } = analysis; + let hir_map = hir_map::map_crate(&mut hir_forest, defs); + + abort_on_err(driver::phase_3_run_analysis_passes(&sess, + hir_map, + analysis, + resolutions, + &arenas, + &name, + |tcx, analysis, _, result| { + if let Err(_) = result { + sess.fatal("Compilation failed, aborting rustdoc"); + } + + let ty::CrateAnalysis { access_levels, export_map, hir_ty_to_ty, .. } = analysis; // Convert from a NodeId set to a DefId set since we don't always have easy access // to the map from defid -> nodeid @@ -166,39 +185,24 @@ pub fn run_core(search_paths: SearchPaths, cfgs: Vec, externs: Externs, }; let ctxt = DocContext { - map: &tcx.map, - maybe_typed: Typed(tcx), - input: input, - external_traits: RefCell::new(Some(HashMap::new())), - external_typarams: RefCell::new(Some(HashMap::new())), - external_paths: RefCell::new(Some(HashMap::new())), - inlined: RefCell::new(Some(HashSet::new())), - populated_crate_impls: RefCell::new(HashSet::new()), - deref_trait_did: Cell::new(None), - }; - debug!("crate: {:?}", ctxt.map.krate()); - - let mut analysis = CrateAnalysis { - access_levels: access_levels, - external_paths: RefCell::new(None), - external_typarams: RefCell::new(None), - inlined: RefCell::new(None), - deref_trait_did: None, + tcx: tcx, + populated_all_crate_impls: Cell::new(false), + access_levels: RefCell::new(access_levels), + external_traits: Default::default(), + renderinfo: Default::default(), + ty_substs: Default::default(), + lt_substs: Default::default(), + export_map: export_map, + hir_ty_to_ty: hir_ty_to_ty, }; + debug!("crate: {:?}", tcx.map.krate()); let krate = { - let mut v = RustdocVisitor::new(&ctxt, Some(&analysis)); - v.visit(ctxt.map.krate()); + let mut v = RustdocVisitor::new(&ctxt); + v.visit(tcx.map.krate()); v.clean(&ctxt) }; - let external_paths = ctxt.external_paths.borrow_mut().take(); - *analysis.external_paths.borrow_mut() = external_paths; - let map = ctxt.external_typarams.borrow_mut().take(); - *analysis.external_typarams.borrow_mut() = map; - let map = ctxt.inlined.borrow_mut().take(); - *analysis.inlined.borrow_mut() = map; - analysis.deref_trait_did = ctxt.deref_trait_did.get(); - (krate, analysis) - }) + (krate, ctxt.renderinfo.into_inner()) + }), &sess) } diff --git a/src/librustdoc/doctree.rs b/src/librustdoc/doctree.rs index fc0422b3a3f03..21fc135eaadae 100644 --- a/src/librustdoc/doctree.rs +++ b/src/librustdoc/doctree.rs @@ -13,14 +13,15 @@ pub use self::StructType::*; pub use self::TypeBound::*; -use syntax; -use syntax::codemap::Span; use syntax::abi; use syntax::ast; use syntax::ast::{Name, NodeId}; use syntax::attr; use syntax::ptr::P; -use rustc_front::hir; +use syntax_pos::{self, Span}; + +use rustc::hir; +use rustc::hir::def_id::CrateNum; pub struct Module { pub name: Option, @@ -30,6 +31,7 @@ pub struct Module { pub extern_crates: Vec, pub imports: Vec, pub structs: Vec, + pub unions: Vec, pub enums: Vec, pub fns: Vec, pub mods: Vec, @@ -52,16 +54,17 @@ impl Module { pub fn new(name: Option) -> Module { Module { name : name, - id: 0, + id: ast::CRATE_NODE_ID, vis: hir::Inherited, stab: None, depr: None, - where_outer: syntax::codemap::DUMMY_SP, - where_inner: syntax::codemap::DUMMY_SP, + where_outer: syntax_pos::DUMMY_SP, + where_inner: syntax_pos::DUMMY_SP, attrs : hir::HirVec::new(), extern_crates: Vec::new(), imports : Vec::new(), structs : Vec::new(), + unions : Vec::new(), enums : Vec::new(), fns : Vec::new(), mods : Vec::new(), @@ -80,14 +83,12 @@ impl Module { #[derive(Debug, Clone, RustcEncodable, RustcDecodable, Copy)] pub enum StructType { - /// A normal struct + /// A braced struct Plain, /// A tuple struct Tuple, - /// A newtype struct (tuple struct with one element) - Newtype, /// A unit struct - Unit + Unit, } pub enum TypeBound { @@ -108,6 +109,19 @@ pub struct Struct { pub whence: Span, } +pub struct Union { + pub vis: hir::Visibility, + pub stab: Option, + pub depr: Option, + pub id: NodeId, + pub struct_type: StructType, + pub name: Name, + pub generics: hir::Generics, + pub attrs: hir::HirVec, + pub fields: hir::HirVec, + pub whence: Span, +} + pub struct Enum { pub vis: hir::Visibility, pub stab: Option, @@ -232,6 +246,7 @@ pub struct Macro { pub struct ExternCrate { pub name: Name, + pub cnum: CrateNum, pub path: Option, pub vis: hir::Visibility, pub attrs: hir::HirVec, @@ -239,22 +254,19 @@ pub struct ExternCrate { } pub struct Import { + pub name: Name, pub id: NodeId, pub vis: hir::Visibility, pub attrs: hir::HirVec, - pub node: hir::ViewPath_, + pub path: hir::Path, + pub glob: bool, pub whence: Span, } -pub fn struct_type_from_def(sd: &hir::VariantData) -> StructType { - if !sd.is_struct() { - // We are in a tuple-struct - match sd.fields().len() { - 0 => Unit, - 1 => Newtype, - _ => Tuple - } - } else { - Plain +pub fn struct_type_from_def(vdata: &hir::VariantData) -> StructType { + match *vdata { + hir::VariantData::Struct(..) => Plain, + hir::VariantData::Tuple(..) => Tuple, + hir::VariantData::Unit(..) => Unit, } } diff --git a/src/librustdoc/externalfiles.rs b/src/librustdoc/externalfiles.rs index 57cb87e1b2d01..d78f00497ca55 100644 --- a/src/librustdoc/externalfiles.rs +++ b/src/librustdoc/externalfiles.rs @@ -11,64 +11,79 @@ use std::fs::File; use std::io::prelude::*; use std::io; -use std::path::{PathBuf, Path}; +use std::path::Path; use std::str; #[derive(Clone)] pub struct ExternalHtml{ + /// Content that will be included inline in the section of a + /// rendered Markdown file or generated documentation pub in_header: String, + /// Content that will be included inline between and the content of + /// a rendered Markdown file or generated documentation pub before_content: String, + /// Content that will be included inline between the content and of + /// a rendered Markdown file or generated documentation pub after_content: String } impl ExternalHtml { pub fn load(in_header: &[String], before_content: &[String], after_content: &[String]) -> Option { - match (load_external_files(in_header), - load_external_files(before_content), - load_external_files(after_content)) { - (Some(ih), Some(bc), Some(ac)) => Some(ExternalHtml { - in_header: ih, - before_content: bc, - after_content: ac - }), - _ => None - } + load_external_files(in_header) + .and_then(|ih| + load_external_files(before_content) + .map(|bc| (ih, bc)) + ) + .and_then(|(ih, bc)| + load_external_files(after_content) + .map(|ac| (ih, bc, ac)) + ) + .map(|(ih, bc, ac)| + ExternalHtml { + in_header: ih, + before_content: bc, + after_content: ac, + } + ) } } -pub fn load_string(input: &Path) -> io::Result> { - let mut f = try!(File::open(input)); - let mut d = Vec::new(); - try!(f.read_to_end(&mut d)); - Ok(str::from_utf8(&d).map(|s| s.to_string()).ok()) +pub enum LoadStringError { + ReadFail, + BadUtf8, } -macro_rules! load_or_return { - ($input: expr, $cant_read: expr, $not_utf8: expr) => { - { - let input = PathBuf::from(&$input[..]); - match ::externalfiles::load_string(&input) { - Err(e) => { - let _ = writeln!(&mut io::stderr(), - "error reading `{}`: {}", input.display(), e); - return $cant_read; - } - Ok(None) => { - let _ = writeln!(&mut io::stderr(), - "error reading `{}`: not UTF-8", input.display()); - return $not_utf8; - } - Ok(Some(s)) => s - } +pub fn load_string>(file_path: P) -> Result { + let file_path = file_path.as_ref(); + let mut contents = vec![]; + let result = File::open(file_path) + .and_then(|mut f| f.read_to_end(&mut contents)); + if let Err(e) = result { + let _ = writeln!(&mut io::stderr(), + "error reading `{}`: {}", + file_path.display(), e); + return Err(LoadStringError::ReadFail); + } + match str::from_utf8(&contents) { + Ok(s) => Ok(s.to_string()), + Err(_) => { + let _ = writeln!(&mut io::stderr(), + "error reading `{}`: not UTF-8", + file_path.display()); + Err(LoadStringError::BadUtf8) } } } -pub fn load_external_files(names: &[String]) -> Option { +fn load_external_files(names: &[String]) -> Option { let mut out = String::new(); for name in names { - out.push_str(&*load_or_return!(&name, None, None)); + let s = match load_string(name) { + Ok(s) => s, + Err(_) => return None, + }; + out.push_str(&s); out.push('\n'); } Some(out) diff --git a/src/librustdoc/flock.rs b/src/librustdoc/flock.rs deleted file mode 100644 index 72a5043178baf..0000000000000 --- a/src/librustdoc/flock.rs +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Simple file-locking apis for each OS. -//! -//! This is not meant to be in the standard library, it does nothing with -//! green/native threading. This is just a bare-bones enough solution for -//! librustdoc, it is not production quality at all. - -#![allow(non_camel_case_types)] - -pub use self::imp::Lock; - -#[cfg(unix)] -mod imp { - use std::ffi::{CString, OsStr}; - use std::os::unix::prelude::*; - use std::path::Path; - use std::io; - use libc; - - #[cfg(target_os = "linux")] - mod os { - use libc; - - pub struct flock { - pub l_type: libc::c_short, - pub l_whence: libc::c_short, - pub l_start: libc::off_t, - pub l_len: libc::off_t, - pub l_pid: libc::pid_t, - - // not actually here, but brings in line with freebsd - pub l_sysid: libc::c_int, - } - - pub const F_WRLCK: libc::c_short = 1; - pub const F_UNLCK: libc::c_short = 2; - pub const F_SETLK: libc::c_int = 6; - pub const F_SETLKW: libc::c_int = 7; - } - - #[cfg(target_os = "freebsd")] - mod os { - use libc; - - pub struct flock { - pub l_start: libc::off_t, - pub l_len: libc::off_t, - pub l_pid: libc::pid_t, - pub l_type: libc::c_short, - pub l_whence: libc::c_short, - pub l_sysid: libc::c_int, - } - - pub const F_UNLCK: libc::c_short = 2; - pub const F_WRLCK: libc::c_short = 3; - pub const F_SETLK: libc::c_int = 12; - pub const F_SETLKW: libc::c_int = 13; - } - - #[cfg(any(target_os = "dragonfly", - target_os = "bitrig", - target_os = "netbsd", - target_os = "openbsd"))] - mod os { - use libc; - - pub struct flock { - pub l_start: libc::off_t, - pub l_len: libc::off_t, - pub l_pid: libc::pid_t, - pub l_type: libc::c_short, - pub l_whence: libc::c_short, - - // not actually here, but brings in line with freebsd - pub l_sysid: libc::c_int, - } - - pub const F_UNLCK: libc::c_short = 2; - pub const F_WRLCK: libc::c_short = 3; - pub const F_SETLK: libc::c_int = 8; - pub const F_SETLKW: libc::c_int = 9; - } - - #[cfg(any(target_os = "macos", target_os = "ios"))] - mod os { - use libc; - - pub struct flock { - pub l_start: libc::off_t, - pub l_len: libc::off_t, - pub l_pid: libc::pid_t, - pub l_type: libc::c_short, - pub l_whence: libc::c_short, - - // not actually here, but brings in line with freebsd - pub l_sysid: libc::c_int, - } - - pub const F_UNLCK: libc::c_short = 2; - pub const F_WRLCK: libc::c_short = 3; - pub const F_SETLK: libc::c_int = 8; - pub const F_SETLKW: libc::c_int = 9; - } - - pub struct Lock { - fd: libc::c_int, - } - - impl Lock { - pub fn new(p: &Path) -> Lock { - let os: &OsStr = p.as_ref(); - let buf = CString::new(os.as_bytes()).unwrap(); - let fd = unsafe { - libc::open(buf.as_ptr(), libc::O_RDWR | libc::O_CREAT, - libc::S_IRWXU as libc::c_int) - }; - assert!(fd > 0, "failed to open lockfile: {}", - io::Error::last_os_error()); - let flock = os::flock { - l_start: 0, - l_len: 0, - l_pid: 0, - l_whence: libc::SEEK_SET as libc::c_short, - l_type: os::F_WRLCK, - l_sysid: 0, - }; - let ret = unsafe { - libc::fcntl(fd, os::F_SETLKW, &flock) - }; - if ret == -1 { - let err = io::Error::last_os_error(); - unsafe { libc::close(fd); } - panic!("could not lock `{}`: {}", p.display(), err); - } - Lock { fd: fd } - } - } - - impl Drop for Lock { - fn drop(&mut self) { - let flock = os::flock { - l_start: 0, - l_len: 0, - l_pid: 0, - l_whence: libc::SEEK_SET as libc::c_short, - l_type: os::F_UNLCK, - l_sysid: 0, - }; - unsafe { - libc::fcntl(self.fd, os::F_SETLK, &flock); - libc::close(self.fd); - } - } - } -} - -#[cfg(windows)] -#[allow(bad_style)] -mod imp { - use std::io; - use std::mem; - use std::os::windows::prelude::*; - use std::os::windows::raw::HANDLE; - use std::path::Path; - use std::fs::{File, OpenOptions}; - - type DWORD = u32; - type LPOVERLAPPED = *mut OVERLAPPED; - type BOOL = i32; - const LOCKFILE_EXCLUSIVE_LOCK: DWORD = 0x00000002; - - #[repr(C)] - struct OVERLAPPED { - Internal: usize, - InternalHigh: usize, - Pointer: *mut u8, - hEvent: *mut u8, - } - - extern "system" { - fn LockFileEx(hFile: HANDLE, - dwFlags: DWORD, - dwReserved: DWORD, - nNumberOfBytesToLockLow: DWORD, - nNumberOfBytesToLockHigh: DWORD, - lpOverlapped: LPOVERLAPPED) -> BOOL; - } - - pub struct Lock { - _file: File, - } - - impl Lock { - pub fn new(p: &Path) -> Lock { - let f = OpenOptions::new().read(true).write(true).create(true) - .open(p).unwrap(); - let ret = unsafe { - let mut overlapped: OVERLAPPED = mem::zeroed(); - LockFileEx(f.as_raw_handle(), LOCKFILE_EXCLUSIVE_LOCK, 0, 100, 0, - &mut overlapped) - }; - if ret == 0 { - let err = io::Error::last_os_error(); - panic!("could not lock `{}`: {}", p.display(), err); - } - Lock { _file: f } - } - } -} diff --git a/src/librustdoc/fold.rs b/src/librustdoc/fold.rs index 5a4f95d1a1a5a..e269d940bfabf 100644 --- a/src/librustdoc/fold.rs +++ b/src/librustdoc/fold.rs @@ -9,8 +9,26 @@ // except according to those terms. use clean::*; -use std::collections::HashMap; -use std::mem::{replace, swap}; + +pub enum FoldItem { + Retain(Item), + Strip(Item), + Erase, +} + +impl FoldItem { + pub fn fold(self) -> Option { + match self { + FoldItem::Erase => None, + FoldItem::Retain(i) => Some(i), + FoldItem::Strip(item@ Item { inner: StrippedItem(..), .. } ) => Some(item), + FoldItem::Strip(mut i) => { + i.inner = StrippedItem(box i.inner); + Some(i) + } + } + } +} pub trait DocFolder : Sized { fn fold_item(&mut self, item: Item) -> Option { @@ -18,51 +36,65 @@ pub trait DocFolder : Sized { } /// don't override! - fn fold_item_recur(&mut self, item: Item) -> Option { - let Item { attrs, name, source, visibility, def_id, inner, stability, deprecation } = item; - let inner = inner; - let inner = match inner { + fn fold_inner_recur(&mut self, inner: ItemEnum) -> ItemEnum { + match inner { + StrippedItem(..) => unreachable!(), + ModuleItem(i) => { + ModuleItem(self.fold_mod(i)) + }, StructItem(mut i) => { - let mut foo = Vec::new(); swap(&mut foo, &mut i.fields); - let num_fields = foo.len(); - i.fields.extend(foo.into_iter().filter_map(|x| self.fold_item(x))); - i.fields_stripped |= num_fields != i.fields.len(); + let num_fields = i.fields.len(); + i.fields = i.fields.into_iter().filter_map(|x| self.fold_item(x)).collect(); + i.fields_stripped |= num_fields != i.fields.len() || + i.fields.iter().any(|f| f.is_stripped()); StructItem(i) }, - ModuleItem(i) => { - ModuleItem(self.fold_mod(i)) + UnionItem(mut i) => { + let num_fields = i.fields.len(); + i.fields = i.fields.into_iter().filter_map(|x| self.fold_item(x)).collect(); + i.fields_stripped |= num_fields != i.fields.len() || + i.fields.iter().any(|f| f.is_stripped()); + UnionItem(i) }, EnumItem(mut i) => { - let mut foo = Vec::new(); swap(&mut foo, &mut i.variants); - let num_variants = foo.len(); - i.variants.extend(foo.into_iter().filter_map(|x| self.fold_item(x))); - i.variants_stripped |= num_variants != i.variants.len(); + let num_variants = i.variants.len(); + i.variants = i.variants.into_iter().filter_map(|x| self.fold_item(x)).collect(); + i.variants_stripped |= num_variants != i.variants.len() || + i.variants.iter().any(|f| f.is_stripped()); EnumItem(i) }, TraitItem(mut i) => { - let mut foo = Vec::new(); swap(&mut foo, &mut i.items); - i.items.extend(foo.into_iter().filter_map(|x| self.fold_item(x))); + i.items = i.items.into_iter().filter_map(|x| self.fold_item(x)).collect(); TraitItem(i) }, ImplItem(mut i) => { - let mut foo = Vec::new(); swap(&mut foo, &mut i.items); - i.items.extend(foo.into_iter().filter_map(|x| self.fold_item(x))); + i.items = i.items.into_iter().filter_map(|x| self.fold_item(x)).collect(); ImplItem(i) }, VariantItem(i) => { let i2 = i.clone(); // this clone is small match i.kind { - StructVariant(mut j) => { - let mut foo = Vec::new(); swap(&mut foo, &mut j.fields); - let num_fields = foo.len(); - j.fields.extend(foo.into_iter().filter_map(|x| self.fold_item(x))); - j.fields_stripped |= num_fields != j.fields.len(); - VariantItem(Variant {kind: StructVariant(j), ..i2}) + VariantKind::Struct(mut j) => { + let num_fields = j.fields.len(); + j.fields = j.fields.into_iter().filter_map(|x| self.fold_item(x)).collect(); + j.fields_stripped |= num_fields != j.fields.len() || + j.fields.iter().any(|f| f.is_stripped()); + VariantItem(Variant {kind: VariantKind::Struct(j), ..i2}) }, _ => VariantItem(i2) } }, x => x + } + } + + /// don't override! + fn fold_item_recur(&mut self, item: Item) -> Option { + let Item { attrs, name, source, visibility, def_id, inner, stability, deprecation } = item; + + let inner = match inner { + StrippedItem(box i) => StrippedItem(box self.fold_inner_recur(i)), + _ => self.fold_inner_recur(inner), }; Some(Item { attrs: attrs, name: name, source: source, inner: inner, @@ -78,16 +110,12 @@ pub trait DocFolder : Sized { } fn fold_crate(&mut self, mut c: Crate) -> Crate { - c.module = match replace(&mut c.module, None) { - Some(module) => self.fold_item(module), None => None - }; - let external_traits = replace(&mut c.external_traits, HashMap::new()); - c.external_traits = external_traits.into_iter().map(|(k, mut v)| { - let items = replace(&mut v.items, Vec::new()); - v.items = items.into_iter().filter_map(|i| self.fold_item(i)) - .collect(); + c.module = c.module.and_then(|module| self.fold_item(module)); + + c.external_traits = c.external_traits.into_iter().map(|(k, mut v)| { + v.items = v.items.into_iter().filter_map(|i| self.fold_item(i)).collect(); (k, v) }).collect(); - return c; + c } } diff --git a/src/librustdoc/html/escape.rs b/src/librustdoc/html/escape.rs index f04e1cc75202f..1173e6447f50c 100644 --- a/src/librustdoc/html/escape.rs +++ b/src/librustdoc/html/escape.rs @@ -29,7 +29,7 @@ impl<'a> fmt::Display for Escape<'a> { for (i, ch) in s.bytes().enumerate() { match ch as char { '<' | '>' | '&' | '\'' | '"' => { - try!(fmt.write_str(&pile_o_bits[last.. i])); + fmt.write_str(&pile_o_bits[last.. i])?; let s = match ch as char { '>' => ">", '<' => "<", @@ -38,7 +38,7 @@ impl<'a> fmt::Display for Escape<'a> { '"' => """, _ => unreachable!() }; - try!(fmt.write_str(s)); + fmt.write_str(s)?; last = i + 1; } _ => {} @@ -46,7 +46,7 @@ impl<'a> fmt::Display for Escape<'a> { } if last < s.len() { - try!(fmt.write_str(&pile_o_bits[last..])); + fmt.write_str(&pile_o_bits[last..])?; } Ok(()) } diff --git a/src/librustdoc/html/format.rs b/src/librustdoc/html/format.rs index 1a4085e30e8f3..6dc6e80dae0b8 100644 --- a/src/librustdoc/html/format.rs +++ b/src/librustdoc/html/format.rs @@ -18,20 +18,21 @@ use std::fmt; use std::iter::repeat; -use rustc::middle::cstore::LOCAL_CRATE; -use rustc::middle::def_id::{CRATE_DEF_INDEX, DefId}; +use rustc::hir::def_id::DefId; use syntax::abi::Abi; -use rustc_front::hir; +use rustc::hir; -use clean; +use clean::{self, PrimitiveType}; +use core::DocAccessLevels; use html::item_type::ItemType; +use html::escape::Escape; use html::render; use html::render::{cache, CURRENT_LOCATION_KEY}; /// Helper to render an optional visibility with a space after it (if the /// visibility is preset) #[derive(Copy, Clone)] -pub struct VisSpace(pub Option); +pub struct VisSpace<'a>(pub &'a Option); /// Similarly to VisSpace, this structure is used to render a function style with a /// space after it. #[derive(Copy, Clone)] @@ -41,7 +42,7 @@ pub struct UnsafetySpace(pub hir::Unsafety); #[derive(Copy, Clone)] pub struct ConstnessSpace(pub hir::Constness); /// Wrapper struct for properly emitting a method declaration. -pub struct Method<'a>(pub &'a clean::SelfTy, pub &'a clean::FnDecl); +pub struct Method<'a>(pub &'a clean::FnDecl, pub usize); /// Similar to VisSpace, but used for mutability #[derive(Copy, Clone)] pub struct MutableSpace(pub clean::Mutability); @@ -49,16 +50,21 @@ pub struct MutableSpace(pub clean::Mutability); #[derive(Copy, Clone)] pub struct RawMutableSpace(pub clean::Mutability); /// Wrapper struct for emitting a where clause from Generics. -pub struct WhereClause<'a>(pub &'a clean::Generics); +pub struct WhereClause<'a>(pub &'a clean::Generics, pub usize); /// Wrapper struct for emitting type parameter bounds. pub struct TyParamBounds<'a>(pub &'a [clean::TyParamBound]); /// Wrapper struct for emitting a comma-separated list of items pub struct CommaSep<'a, T: 'a>(pub &'a [T]); pub struct AbiSpace(pub Abi); -impl VisSpace { - pub fn get(&self) -> Option { - let VisSpace(v) = *self; v +pub struct HRef<'a> { + pub did: DefId, + pub text: &'a str, +} + +impl<'a> VisSpace<'a> { + pub fn get(self) -> &'a Option { + let VisSpace(v) = self; v } } @@ -77,8 +83,8 @@ impl ConstnessSpace { impl<'a, T: fmt::Display> fmt::Display for CommaSep<'a, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { for (i, item) in self.0.iter().enumerate() { - if i != 0 { try!(write!(f, ", ")); } - try!(write!(f, "{}", item)); + if i != 0 { write!(f, ", ")?; } + fmt::Display::fmt(item, f)?; } Ok(()) } @@ -89,9 +95,9 @@ impl<'a> fmt::Display for TyParamBounds<'a> { let &TyParamBounds(bounds) = self; for (i, bound) in bounds.iter().enumerate() { if i > 0 { - try!(f.write_str(" + ")); + f.write_str(" + ")?; } - try!(write!(f, "{}", *bound)); + fmt::Display::fmt(bound, f)?; } Ok(()) } @@ -100,80 +106,128 @@ impl<'a> fmt::Display for TyParamBounds<'a> { impl fmt::Display for clean::Generics { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.lifetimes.is_empty() && self.type_params.is_empty() { return Ok(()) } - try!(f.write_str("<")); + if f.alternate() { + f.write_str("<")?; + } else { + f.write_str("<")?; + } for (i, life) in self.lifetimes.iter().enumerate() { if i > 0 { - try!(f.write_str(", ")); + f.write_str(", ")?; } - try!(write!(f, "{}", *life)); + write!(f, "{}", *life)?; } if !self.type_params.is_empty() { if !self.lifetimes.is_empty() { - try!(f.write_str(", ")); + f.write_str(", ")?; } for (i, tp) in self.type_params.iter().enumerate() { if i > 0 { - try!(f.write_str(", ")) + f.write_str(", ")? } - try!(f.write_str(&tp.name)); + f.write_str(&tp.name)?; if !tp.bounds.is_empty() { - try!(write!(f, ": {}", TyParamBounds(&tp.bounds))); + if f.alternate() { + write!(f, ": {:#}", TyParamBounds(&tp.bounds))?; + } else { + write!(f, ": {}", TyParamBounds(&tp.bounds))?; + } } - match tp.default { - Some(ref ty) => { try!(write!(f, " = {}", ty)); }, - None => {} + if let Some(ref ty) = tp.default { + if f.alternate() { + write!(f, " = {:#}", ty)?; + } else { + write!(f, " = {}", ty)?; + } }; } } - try!(f.write_str(">")); + if f.alternate() { + f.write_str(">")?; + } else { + f.write_str(">")?; + } Ok(()) } } impl<'a> fmt::Display for WhereClause<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let &WhereClause(gens) = self; + let &WhereClause(gens, pad) = self; if gens.where_predicates.is_empty() { return Ok(()); } - try!(f.write_str(" where ")); + let mut clause = String::new(); + if f.alternate() { + clause.push_str(" where "); + } else { + clause.push_str(" where "); + } for (i, pred) in gens.where_predicates.iter().enumerate() { if i > 0 { - try!(f.write_str(", ")); + if f.alternate() { + clause.push_str(", "); + } else { + clause.push_str(",
"); + } } match pred { &clean::WherePredicate::BoundPredicate { ref ty, ref bounds } => { let bounds = bounds; - try!(write!(f, "{}: {}", ty, TyParamBounds(bounds))); + if f.alternate() { + clause.push_str(&format!("{:#}: {:#}", ty, TyParamBounds(bounds))); + } else { + clause.push_str(&format!("{}: {}", ty, TyParamBounds(bounds))); + } } &clean::WherePredicate::RegionPredicate { ref lifetime, ref bounds } => { - try!(write!(f, "{}: ", lifetime)); + clause.push_str(&format!("{}: ", lifetime)); for (i, lifetime) in bounds.iter().enumerate() { if i > 0 { - try!(f.write_str(" + ")); + clause.push_str(" + "); } - try!(write!(f, "{}", lifetime)); + clause.push_str(&format!("{}", lifetime)); } } &clean::WherePredicate::EqPredicate { ref lhs, ref rhs } => { - try!(write!(f, "{} == {}", lhs, rhs)); + if f.alternate() { + clause.push_str(&format!("{:#} == {:#}", lhs, rhs)); + } else { + clause.push_str(&format!("{} == {}", lhs, rhs)); + } } } } - try!(f.write_str("
")); - Ok(()) + if !f.alternate() { + clause.push_str("
"); + let plain = format!("{:#}", self); + if plain.len() > 80 { + //break it onto its own line regardless, but make sure method impls and trait + //blocks keep their fixed padding (2 and 9, respectively) + let padding = if pad > 10 { + clause = clause.replace("class='where'", "class='where fmt-newline'"); + repeat(" ").take(8).collect::() + } else { + repeat(" ").take(pad + 6).collect::() + }; + clause = clause.replace("
", &format!("
{}", padding)); + } else { + clause = clause.replace("
", " "); + } + } + write!(f, "{}", clause) } } impl fmt::Display for clean::Lifetime { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - try!(f.write_str(self.get_ref())); + f.write_str(self.get_ref())?; Ok(()) } } @@ -181,16 +235,28 @@ impl fmt::Display for clean::Lifetime { impl fmt::Display for clean::PolyTrait { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if !self.lifetimes.is_empty() { - try!(f.write_str("for<")); + if f.alternate() { + f.write_str("for<")?; + } else { + f.write_str("for<")?; + } for (i, lt) in self.lifetimes.iter().enumerate() { if i > 0 { - try!(f.write_str(", ")); + f.write_str(", ")?; } - try!(write!(f, "{}", lt)); + write!(f, "{}", lt)?; + } + if f.alternate() { + f.write_str("> ")?; + } else { + f.write_str("> ")?; } - try!(f.write_str("> ")); } - write!(f, "{}", self.trait_) + if f.alternate() { + write!(f, "{:#}", self.trait_) + } else { + write!(f, "{}", self.trait_) + } } } @@ -205,7 +271,11 @@ impl fmt::Display for clean::TyParamBound { hir::TraitBoundModifier::None => "", hir::TraitBoundModifier::Maybe => "?", }; - write!(f, "{}{}", modifier_str, *ty) + if f.alternate() { + write!(f, "{}{:#}", modifier_str, *ty) + } else { + write!(f, "{}{}", modifier_str, *ty) + } } } } @@ -218,46 +288,69 @@ impl fmt::Display for clean::PathParameters { ref lifetimes, ref types, ref bindings } => { if !lifetimes.is_empty() || !types.is_empty() || !bindings.is_empty() { - try!(f.write_str("<")); + if f.alternate() { + f.write_str("<")?; + } else { + f.write_str("<")?; + } let mut comma = false; for lifetime in lifetimes { if comma { - try!(f.write_str(", ")); + f.write_str(", ")?; } comma = true; - try!(write!(f, "{}", *lifetime)); + write!(f, "{}", *lifetime)?; } for ty in types { if comma { - try!(f.write_str(", ")); + f.write_str(", ")?; } comma = true; - try!(write!(f, "{}", *ty)); + if f.alternate() { + write!(f, "{:#}", *ty)?; + } else { + write!(f, "{}", *ty)?; + } } for binding in bindings { if comma { - try!(f.write_str(", ")); + f.write_str(", ")?; } comma = true; - try!(write!(f, "{}", *binding)); + if f.alternate() { + write!(f, "{:#}", *binding)?; + } else { + write!(f, "{}", *binding)?; + } + } + if f.alternate() { + f.write_str(">")?; + } else { + f.write_str(">")?; } - try!(f.write_str(">")); } } clean::PathParameters::Parenthesized { ref inputs, ref output } => { - try!(f.write_str("(")); + f.write_str("(")?; let mut comma = false; for ty in inputs { if comma { - try!(f.write_str(", ")); + f.write_str(", ")?; } comma = true; - try!(write!(f, "{}", *ty)); + if f.alternate() { + write!(f, "{:#}", *ty)?; + } else { + write!(f, "{}", *ty)?; + } } - try!(f.write_str(")")); + f.write_str(")")?; if let Some(ref ty) = *output { - try!(f.write_str(" -> ")); - try!(write!(f, "{}", ty)); + if f.alternate() { + write!(f, " -> {:#}", ty)?; + } else { + write!(f, " -> {}", ty)?; + } } } } @@ -267,22 +360,30 @@ impl fmt::Display for clean::PathParameters { impl fmt::Display for clean::PathSegment { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - try!(f.write_str(&self.name)); - write!(f, "{}", self.params) + f.write_str(&self.name)?; + if f.alternate() { + write!(f, "{:#}", self.params) + } else { + write!(f, "{}", self.params) + } } } impl fmt::Display for clean::Path { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.global { - try!(f.write_str("::")) + f.write_str("::")? } for (i, seg) in self.segments.iter().enumerate() { if i > 0 { - try!(f.write_str("::")) + f.write_str("::")? + } + if f.alternate() { + write!(f, "{:#}", seg)?; + } else { + write!(f, "{}", seg)?; } - try!(write!(f, "{}", seg)); } Ok(()) } @@ -290,18 +391,24 @@ impl fmt::Display for clean::Path { pub fn href(did: DefId) -> Option<(String, ItemType, Vec)> { let cache = cache(); + if !did.is_local() && !cache.access_levels.is_doc_reachable(did) { + return None + } + let loc = CURRENT_LOCATION_KEY.with(|l| l.borrow().clone()); - let &(ref fqp, shortty) = match cache.paths.get(&did) { - Some(p) => p, - None => return None, - }; - let mut url = if did.is_local() || cache.inlined.contains(&did) { - repeat("../").take(loc.len()).collect::() - } else { - match cache.extern_locations[&did.krate] { - (_, render::Remote(ref s)) => s.to_string(), - (_, render::Local) => repeat("../").take(loc.len()).collect(), - (_, render::Unknown) => return None, + let (fqp, shortty, mut url) = match cache.paths.get(&did) { + Some(&(ref fqp, shortty)) => { + (fqp, shortty, repeat("../").take(loc.len()).collect()) + } + None => match cache.external_paths.get(&did) { + Some(&(ref fqp, shortty)) => { + (fqp, shortty, match cache.extern_locations[&did.krate] { + (.., render::Remote(ref s)) => s.to_string(), + (.., render::Local) => repeat("../").take(loc.len()).collect(), + (.., render::Unknown) => return None, + }) + } + None => return None, } }; for component in &fqp[..fqp.len() - 1] { @@ -314,7 +421,7 @@ pub fn href(did: DefId) -> Option<(String, ItemType, Vec)> { url.push_str("/index.html"); } _ => { - url.push_str(shortty.to_static_str()); + url.push_str(shortty.css_class()); url.push_str("."); url.push_str(fqp.last().unwrap()); url.push_str(".html"); @@ -338,34 +445,30 @@ fn resolved_path(w: &mut fmt::Formatter, did: DefId, path: &clean::Path, match rel_root { Some(mut root) => { for seg in &path.segments[..amt] { - if "super" == seg.name || "self" == seg.name { - try!(write!(w, "{}::", seg.name)); + if "super" == seg.name || "self" == seg.name || w.alternate() { + write!(w, "{}::", seg.name)?; } else { root.push_str(&seg.name); root.push_str("/"); - try!(write!(w, "{}::", - root, - seg.name)); + write!(w, "{}::", + root, + seg.name)?; } } } None => { for seg in &path.segments[..amt] { - try!(write!(w, "{}::", seg.name)); + write!(w, "{}::", seg.name)?; } } } } - - match href(did) { - Some((url, shortty, fqp)) => { - try!(write!(w, "{}", - shortty, url, fqp.join("::"), last.name)); - } - _ => try!(write!(w, "{}", last.name)), + if w.alternate() { + write!(w, "{:#}{:#}", HRef::new(did, &last.name), last.params)?; + } else { + write!(w, "{}{}", HRef::new(did, &last.name), last.params)?; } - try!(write!(w, "{}", last.params)); Ok(()) } @@ -374,44 +477,41 @@ fn primitive_link(f: &mut fmt::Formatter, name: &str) -> fmt::Result { let m = cache(); let mut needs_termination = false; - match m.primitive_locations.get(&prim) { - Some(&LOCAL_CRATE) => { - let len = CURRENT_LOCATION_KEY.with(|s| s.borrow().len()); - let len = if len == 0 {0} else {len - 1}; - try!(write!(f, "", - repeat("../").take(len).collect::(), - prim.to_url_str())); - needs_termination = true; - } - Some(&cnum) => { - let path = &m.paths[&DefId { - krate: cnum, - index: CRATE_DEF_INDEX, - }]; - let loc = match m.extern_locations[&cnum] { - (_, render::Remote(ref s)) => Some(s.to_string()), - (_, render::Local) => { - let len = CURRENT_LOCATION_KEY.with(|s| s.borrow().len()); - Some(repeat("../").take(len).collect::()) - } - (_, render::Unknown) => None, - }; - match loc { - Some(root) => { - try!(write!(f, "", - root, - path.0.first().unwrap(), - prim.to_url_str())); + if !f.alternate() { + match m.primitive_locations.get(&prim) { + Some(&def_id) if def_id.is_local() => { + let len = CURRENT_LOCATION_KEY.with(|s| s.borrow().len()); + let len = if len == 0 {0} else {len - 1}; + write!(f, "", + repeat("../").take(len).collect::(), + prim.to_url_str())?; + needs_termination = true; + } + Some(&def_id) => { + let loc = match m.extern_locations[&def_id.krate] { + (ref cname, _, render::Remote(ref s)) => { + Some((cname, s.to_string())) + } + (ref cname, _, render::Local) => { + let len = CURRENT_LOCATION_KEY.with(|s| s.borrow().len()); + Some((cname, repeat("../").take(len).collect::())) + } + (.., render::Unknown) => None, + }; + if let Some((cname, root)) = loc { + write!(f, "", + root, + cname, + prim.to_url_str())?; needs_termination = true; } - None => {} } + None => {} } - None => {} } - try!(write!(f, "{}", name)); + write!(f, "{}", name)?; if needs_termination { - try!(write!(f, "")); + write!(f, "")?; } Ok(()) } @@ -422,8 +522,8 @@ fn tybounds(w: &mut fmt::Formatter, match *typarams { Some(ref params) => { for param in params { - try!(write!(w, " + ")); - try!(write!(w, "{}", *param)); + write!(w, " + ")?; + fmt::Display::fmt(param, w)?; } Ok(()) } @@ -431,6 +531,26 @@ fn tybounds(w: &mut fmt::Formatter, } } +impl<'a> HRef<'a> { + pub fn new(did: DefId, text: &'a str) -> HRef<'a> { + HRef { did: did, text: text } + } +} + +impl<'a> fmt::Display for HRef<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match href(self.did) { + Some((url, shortty, fqp)) => if !f.alternate() { + write!(f, "{}", + shortty, url, fqp.join("::"), self.text) + } else { + write!(f, "{}", self.text) + }, + _ => write!(f, "{}", self.text), + } + } +} + impl fmt::Display for clean::Type { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { @@ -439,40 +559,76 @@ impl fmt::Display for clean::Type { } clean::ResolvedPath{ did, ref typarams, ref path, is_generic } => { // Paths like T::Output and Self::Output should be rendered with all segments - try!(resolved_path(f, did, path, is_generic)); + resolved_path(f, did, path, is_generic)?; tybounds(f, typarams) } clean::Infer => write!(f, "_"), - clean::Primitive(prim) => primitive_link(f, prim, prim.to_string()), + clean::Primitive(prim) => primitive_link(f, prim, prim.as_str()), clean::BareFunction(ref decl) => { - write!(f, "{}{}fn{}{}", - UnsafetySpace(decl.unsafety), - match &*decl.abi { - "" => " extern ".to_string(), - "\"Rust\"" => "".to_string(), - s => format!(" extern {} ", s) - }, - decl.generics, - decl.decl) + if f.alternate() { + write!(f, "{}{}fn{:#}{:#}", + UnsafetySpace(decl.unsafety), + AbiSpace(decl.abi), + decl.generics, + decl.decl) + } else { + write!(f, "{}{}fn{}{}", + UnsafetySpace(decl.unsafety), + AbiSpace(decl.abi), + decl.generics, + decl.decl) + } } clean::Tuple(ref typs) => { - primitive_link(f, clean::PrimitiveTuple, - &*match &**typs { - [ref one] => format!("({},)", one), - many => format!("({})", CommaSep(&many)), - }) + match &typs[..] { + &[] => primitive_link(f, PrimitiveType::Tuple, "()"), + &[ref one] => { + primitive_link(f, PrimitiveType::Tuple, "(")?; + //carry f.alternate() into this display w/o branching manually + fmt::Display::fmt(one, f)?; + primitive_link(f, PrimitiveType::Tuple, ",)") + } + many => { + primitive_link(f, PrimitiveType::Tuple, "(")?; + fmt::Display::fmt(&CommaSep(&many), f)?; + primitive_link(f, PrimitiveType::Tuple, ")") + } + } } clean::Vector(ref t) => { - primitive_link(f, clean::Slice, &format!("[{}]", **t)) + primitive_link(f, PrimitiveType::Slice, &format!("["))?; + fmt::Display::fmt(t, f)?; + primitive_link(f, PrimitiveType::Slice, &format!("]")) } clean::FixedVector(ref t, ref s) => { - primitive_link(f, clean::PrimitiveType::Array, - &format!("[{}; {}]", **t, *s)) + primitive_link(f, PrimitiveType::Array, "[")?; + fmt::Display::fmt(t, f)?; + if f.alternate() { + primitive_link(f, PrimitiveType::Array, + &format!("; {}]", s)) + } else { + primitive_link(f, PrimitiveType::Array, + &format!("; {}]", Escape(s))) + } } - clean::Bottom => f.write_str("!"), + clean::Never => f.write_str("!"), clean::RawPointer(m, ref t) => { - primitive_link(f, clean::PrimitiveType::PrimitiveRawPointer, - &format!("*{}{}", RawMutableSpace(m), **t)) + match **t { + clean::Generic(_) | clean::ResolvedPath {is_generic: true, ..} => { + if f.alternate() { + primitive_link(f, clean::PrimitiveType::RawPointer, + &format!("*{}{:#}", RawMutableSpace(m), t)) + } else { + primitive_link(f, clean::PrimitiveType::RawPointer, + &format!("*{}{}", RawMutableSpace(m), t)) + } + } + _ => { + primitive_link(f, clean::PrimitiveType::RawPointer, + &format!("*{}", RawMutableSpace(m)))?; + fmt::Display::fmt(t, f) + } + } } clean::BorrowedRef{ lifetime: ref l, mutability, type_: ref ty} => { let lt = match *l { @@ -484,27 +640,60 @@ impl fmt::Display for clean::Type { clean::Vector(ref bt) => { // BorrowedRef{ ... Vector(T) } is &[T] match **bt { clean::Generic(_) => - primitive_link(f, clean::Slice, - &format!("&{}{}[{}]", lt, m, **bt)), + if f.alternate() { + primitive_link(f, PrimitiveType::Slice, + &format!("&{}{}[{:#}]", lt, m, **bt)) + } else { + primitive_link(f, PrimitiveType::Slice, + &format!("&{}{}[{}]", lt, m, **bt)) + }, _ => { - try!(primitive_link(f, clean::Slice, - &format!("&{}{}[", lt, m))); - try!(write!(f, "{}", **bt)); - primitive_link(f, clean::Slice, "]") + if f.alternate() { + primitive_link(f, PrimitiveType::Slice, + &format!("&{}{}[", lt, m))?; + write!(f, "{:#}", **bt)?; + } else { + primitive_link(f, PrimitiveType::Slice, + &format!("&{}{}[", lt, m))?; + write!(f, "{}", **bt)?; + } + primitive_link(f, PrimitiveType::Slice, "]") } } } _ => { - write!(f, "&{}{}{}", lt, m, **ty) + if f.alternate() { + write!(f, "&{}{}{:#}", lt, m, **ty) + } else { + write!(f, "&{}{}{}", lt, m, **ty) + } } } } clean::PolyTraitRef(ref bounds) => { for (i, bound) in bounds.iter().enumerate() { if i != 0 { - try!(write!(f, " + ")); + write!(f, " + ")?; + } + if f.alternate() { + write!(f, "{:#}", *bound)?; + } else { + write!(f, "{}", *bound)?; + } + } + Ok(()) + } + clean::ImplTrait(ref bounds) => { + write!(f, "impl ")?; + for (i, bound) in bounds.iter().enumerate() { + if i != 0 { + write!(f, " + ")?; + } + if f.alternate() { + write!(f, "{:#}", *bound)?; + } else { + write!(f, "{}", *bound)?; } - try!(write!(f, "{}", *bound)); } Ok(()) } @@ -523,16 +712,24 @@ impl fmt::Display for clean::Type { ref self_type, trait_: box clean::ResolvedPath { did, ref typarams, .. }, } => { - try!(write!(f, "{}::", self_type)); + if f.alternate() { + write!(f, "{:#}::", self_type)?; + } else { + write!(f, "{}::", self_type)?; + } let path = clean::Path::singleton(name.clone()); - try!(resolved_path(f, did, &path, false)); + resolved_path(f, did, &path, false)?; // FIXME: `typarams` are not rendered, and this seems bad? drop(typarams); Ok(()) } clean::QPath { ref name, ref self_type, ref trait_ } => { - write!(f, "<{} as {}>::{}", self_type, trait_, name) + if f.alternate() { + write!(f, "<{:#} as {:#}>::{}", self_type, trait_, name) + } else { + write!(f, "<{} as {}>::{}", self_type, trait_, name) + } } clean::Unique(..) => { panic!("should have been cleaned") @@ -541,27 +738,70 @@ impl fmt::Display for clean::Type { } } +fn fmt_impl(i: &clean::Impl, f: &mut fmt::Formatter, link_trait: bool) -> fmt::Result { + let mut plain = String::new(); + + if f.alternate() { + write!(f, "impl{:#} ", i.generics)?; + } else { + write!(f, "impl{} ", i.generics)?; + } + plain.push_str(&format!("impl{:#} ", i.generics)); + + if let Some(ref ty) = i.trait_ { + if i.polarity == Some(clean::ImplPolarity::Negative) { + write!(f, "!")?; + plain.push_str("!"); + } + + if link_trait { + fmt::Display::fmt(ty, f)?; + plain.push_str(&format!("{:#}", ty)); + } else { + match *ty { + clean::ResolvedPath{ typarams: None, ref path, is_generic: false, .. } => { + let last = path.segments.last().unwrap(); + fmt::Display::fmt(&last.name, f)?; + fmt::Display::fmt(&last.params, f)?; + plain.push_str(&format!("{:#}{:#}", last.name, last.params)); + } + _ => unreachable!(), + } + } + write!(f, " for ")?; + plain.push_str(" for "); + } + + fmt::Display::fmt(&i.for_, f)?; + plain.push_str(&format!("{:#}", i.for_)); + + fmt::Display::fmt(&WhereClause(&i.generics, plain.len() + 1), f)?; + Ok(()) +} + impl fmt::Display for clean::Impl { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - try!(write!(f, "impl{} ", self.generics)); - if let Some(ref ty) = self.trait_ { - try!(write!(f, "{}{} for ", - if self.polarity == Some(clean::ImplPolarity::Negative) { "!" } else { "" }, - *ty)); - } - try!(write!(f, "{}{}", self.for_, WhereClause(&self.generics))); - Ok(()) + fmt_impl(self, f, true) } } +// The difference from above is that trait is not hyperlinked. +pub fn fmt_impl_for_trait_page(i: &clean::Impl, f: &mut fmt::Formatter) -> fmt::Result { + fmt_impl(i, f, false) +} + impl fmt::Display for clean::Arguments { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { for (i, input) in self.values.iter().enumerate() { - if i > 0 { try!(write!(f, ", ")); } if !input.name.is_empty() { - try!(write!(f, "{}: ", input.name)); + write!(f, "{}: ", input.name)?; } - try!(write!(f, "{}", input.type_)); + if f.alternate() { + write!(f, "{:#}", input.type_)?; + } else { + write!(f, "{}", input.type_)?; + } + if i + 1 < self.values.len() { write!(f, ", ")?; } } Ok(()) } @@ -571,9 +811,9 @@ impl fmt::Display for clean::FunctionRetTy { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { clean::Return(clean::Tuple(ref tys)) if tys.is_empty() => Ok(()), + clean::Return(ref ty) if f.alternate() => write!(f, " -> {:#}", ty), clean::Return(ref ty) => write!(f, " -> {}", ty), clean::DefaultReturn => Ok(()), - clean::NoReturn => write!(f, " -> !") } } } @@ -581,46 +821,121 @@ impl fmt::Display for clean::FunctionRetTy { impl fmt::Display for clean::FnDecl { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.variadic { - write!(f, "({args}, ...){arrow}", args = self.inputs, arrow = self.output) + if f.alternate() { + write!(f, "({args:#}, ...){arrow:#}", args = self.inputs, arrow = self.output) + } else { + write!(f, "({args}, ...){arrow}", args = self.inputs, arrow = self.output) + } } else { - write!(f, "({args}){arrow}", args = self.inputs, arrow = self.output) + if f.alternate() { + write!(f, "({args:#}){arrow:#}", args = self.inputs, arrow = self.output) + } else { + write!(f, "({args}){arrow}", args = self.inputs, arrow = self.output) + } } } } impl<'a> fmt::Display for Method<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let Method(selfty, d) = *self; + let decl = self.0; + let indent = self.1; + let amp = if f.alternate() { "&" } else { "&" }; let mut args = String::new(); - match *selfty { - clean::SelfStatic => {}, - clean::SelfValue => args.push_str("self"), - clean::SelfBorrowed(Some(ref lt), mtbl) => { - args.push_str(&format!("&{} {}self", *lt, MutableSpace(mtbl))); - } - clean::SelfBorrowed(None, mtbl) => { - args.push_str(&format!("&{}self", MutableSpace(mtbl))); + let mut args_plain = String::new(); + for (i, input) in decl.inputs.values.iter().enumerate() { + if let Some(selfty) = input.to_self() { + match selfty { + clean::SelfValue => { + args.push_str("self"); + args_plain.push_str("self"); + } + clean::SelfBorrowed(Some(ref lt), mtbl) => { + args.push_str(&format!("{}{} {}self", amp, *lt, MutableSpace(mtbl))); + args_plain.push_str(&format!("&{} {}self", *lt, MutableSpace(mtbl))); + } + clean::SelfBorrowed(None, mtbl) => { + args.push_str(&format!("{}{}self", amp, MutableSpace(mtbl))); + args_plain.push_str(&format!("&{}self", MutableSpace(mtbl))); + } + clean::SelfExplicit(ref typ) => { + if f.alternate() { + args.push_str(&format!("self: {:#}", *typ)); + } else { + args.push_str(&format!("self: {}", *typ)); + } + args_plain.push_str(&format!("self: {:#}", *typ)); + } + } + } else { + if i > 0 { + args.push_str("
"); + args_plain.push_str(" "); + } + if !input.name.is_empty() { + args.push_str(&format!("{}: ", input.name)); + args_plain.push_str(&format!("{}: ", input.name)); + } + + if f.alternate() { + args.push_str(&format!("{:#}", input.type_)); + } else { + args.push_str(&format!("{}", input.type_)); + } + args_plain.push_str(&format!("{:#}", input.type_)); } - clean::SelfExplicit(ref typ) => { - args.push_str(&format!("self: {}", *typ)); + if i + 1 < decl.inputs.values.len() { + args.push_str(","); + args_plain.push_str(","); } } - for (i, input) in d.inputs.values.iter().enumerate() { - if i > 0 || !args.is_empty() { args.push_str(", "); } - if !input.name.is_empty() { - args.push_str(&format!("{}: ", input.name)); - } - args.push_str(&format!("{}", input.type_)); + + if decl.variadic { + args.push_str(",
..."); + args_plain.push_str(", ..."); + } + + let arrow_plain = format!("{:#}", decl.output); + let arrow = if f.alternate() { + format!("{:#}", decl.output) + } else { + format!("{}", decl.output) + }; + + let mut output: String; + let plain: String; + let pad = repeat(" ").take(indent).collect::(); + if arrow.is_empty() { + output = format!("({})", args); + plain = format!("{}({})", pad, args_plain); + } else { + output = format!("({args})
{arrow}", args = args, arrow = arrow); + plain = format!("{pad}({args}){arrow}", + pad = pad, + args = args_plain, + arrow = arrow_plain); + } + + if plain.len() > 80 { + let pad = repeat(" ").take(indent).collect::(); + let pad = format!("
{}", pad); + output = output.replace("
", &pad); + } else { + output = output.replace("
", ""); + } + if f.alternate() { + write!(f, "{}", output.replace("
", "\n")) + } else { + write!(f, "{}", output) } - write!(f, "({args}){arrow}", args = args, arrow = d.output) } } -impl fmt::Display for VisSpace { +impl<'a> fmt::Display for VisSpace<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self.get() { - Some(hir::Public) => write!(f, "pub "), - Some(hir::Inherited) | None => Ok(()) + match *self.get() { + Some(clean::Public) => write!(f, "pub "), + Some(clean::Inherited) | None => Ok(()) } } } @@ -646,26 +961,16 @@ impl fmt::Display for ConstnessSpace { impl fmt::Display for clean::Import { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - clean::SimpleImport(ref name, ref src) => { - if *name == src.path.segments.last().unwrap().name { + clean::Import::Simple(ref name, ref src) => { + if *name == src.path.last_name() { write!(f, "use {};", *src) } else { write!(f, "use {} as {};", *src, *name) } } - clean::GlobImport(ref src) => { + clean::Import::Glob(ref src) => { write!(f, "use {}::*;", *src) } - clean::ImportList(ref src, ref names) => { - try!(write!(f, "use {}::{{", *src)); - for (i, n) in names.iter().enumerate() { - if i > 0 { - try!(write!(f, ", ")); - } - try!(write!(f, "{}", *n)); - } - write!(f, "}};") - } } } } @@ -677,9 +982,9 @@ impl fmt::Display for clean::ImportSource { _ => { for (i, seg) in self.path.segments.iter().enumerate() { if i > 0 { - try!(write!(f, "::")) + write!(f, "::")? } - try!(write!(f, "{}", seg.name)); + write!(f, "{}", seg.name)?; } Ok(()) } @@ -687,26 +992,13 @@ impl fmt::Display for clean::ImportSource { } } -impl fmt::Display for clean::ViewListIdent { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self.source { - Some(did) => { - let path = clean::Path::singleton(self.name.clone()); - try!(resolved_path(f, did, &path, false)); - } - _ => try!(write!(f, "{}", self.name)), - } - - if let Some(ref name) = self.rename { - try!(write!(f, " as {}", name)); - } - Ok(()) - } -} - impl fmt::Display for clean::TypeBinding { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}={}", self.name, self.ty) + if f.alternate() { + write!(f, "{}={:#}", self.name, self.ty) + } else { + write!(f, "{}={}", self.name, self.ty) + } } } @@ -730,10 +1022,11 @@ impl fmt::Display for RawMutableSpace { impl fmt::Display for AbiSpace { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let quot = if f.alternate() { "\"" } else { """ }; match self.0 { Abi::Rust => Ok(()), Abi::C => write!(f, "extern "), - abi => write!(f, "extern {} ", abi), + abi => write!(f, "extern {0}{1}{0} ", quot, abi.name()), } } } diff --git a/src/librustdoc/html/highlight.rs b/src/librustdoc/html/highlight.rs index cca365d16c85c..bd47b1e7c121c 100644 --- a/src/librustdoc/html/highlight.rs +++ b/src/librustdoc/html/highlight.rs @@ -1,4 +1,4 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// Copyright 2014-2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // @@ -8,103 +8,246 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Basic html highlighting functionality +//! Basic syntax highlighting functionality. //! //! This module uses libsyntax's lexer to provide token-based highlighting for //! the HTML documentation generated by rustdoc. +//! +//! If you just want to syntax highlighting for a Rust program, then you can use +//! the `render_inner_with_highlighting` or `render_with_highlighting` +//! functions. For more advanced use cases (if you want to supply your own css +//! classes or control how the HTML is generated, or even generate something +//! other then HTML), then you should implement the `Writer` trait and use a +//! `Classifier`. use html::escape::Escape; +use std::fmt::Display; use std::io; use std::io::prelude::*; -use syntax::parse::lexer; + +use syntax::codemap::CodeMap; +use syntax::parse::lexer::{self, Reader, TokenAndSpan}; use syntax::parse::token; use syntax::parse; +use syntax_pos::Span; -/// Highlights some source code, returning the HTML output. -pub fn highlight(src: &str, class: Option<&str>, id: Option<&str>) -> String { +/// Highlights `src`, returning the HTML output. +pub fn render_with_highlighting(src: &str, class: Option<&str>, id: Option<&str>, + extension: Option<&str>) -> String { debug!("highlighting: ================\n{}\n==============", src); let sess = parse::ParseSess::new(); - let fm = sess.codemap().new_filemap("".to_string(), src.to_string()); + let fm = sess.codemap().new_filemap("".to_string(), None, src.to_string()); let mut out = Vec::new(); - doit(&sess, - lexer::StringReader::new(&sess.span_diagnostic, fm), - class, - id, - &mut out).unwrap(); + write_header(class, id, &mut out).unwrap(); + + let mut classifier = Classifier::new(lexer::StringReader::new(&sess.span_diagnostic, fm), + sess.codemap()); + if let Err(_) = classifier.write_source(&mut out) { + return format!("
{}
", src); + } + + if let Some(extension) = extension { + write!(out, "{}", extension).unwrap(); + } + write_footer(&mut out).unwrap(); String::from_utf8_lossy(&out[..]).into_owned() } -/// Exhausts the `lexer` writing the output into `out`. +/// Highlights `src`, returning the HTML output. Returns only the inner html to +/// be inserted into an element. C.f., `render_with_highlighting` which includes +/// an enclosing `
` block.
+pub fn render_inner_with_highlighting(src: &str) -> io::Result {
+    let sess = parse::ParseSess::new();
+    let fm = sess.codemap().new_filemap("".to_string(), None, src.to_string());
+
+    let mut out = Vec::new();
+    let mut classifier = Classifier::new(lexer::StringReader::new(&sess.span_diagnostic, fm),
+                                         sess.codemap());
+    classifier.write_source(&mut out)?;
+
+    Ok(String::from_utf8_lossy(&out).into_owned())
+}
+
+/// Processes a program (nested in the internal `lexer`), classifying strings of
+/// text by highlighting category (`Class`). Calls out to a `Writer` to write
+/// each span of text in sequence.
+pub struct Classifier<'a> {
+    lexer: lexer::StringReader<'a>,
+    codemap: &'a CodeMap,
+
+    // State of the classifier.
+    in_attribute: bool,
+    in_macro: bool,
+    in_macro_nonterminal: bool,
+}
+
+/// How a span of text is classified. Mostly corresponds to token kinds.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum Class {
+    None,
+    Comment,
+    DocComment,
+    Attribute,
+    KeyWord,
+    // Keywords that do pointer/reference stuff.
+    RefKeyWord,
+    Self_,
+    Op,
+    Macro,
+    MacroNonTerminal,
+    String,
+    Number,
+    Bool,
+    Ident,
+    Lifetime,
+    PreludeTy,
+    PreludeVal,
+    QuestionMark,
+}
+
+/// Trait that controls writing the output of syntax highlighting. Users should
+/// implement this trait to customise writing output.
 ///
-/// The general structure for this method is to iterate over each token,
-/// possibly giving it an HTML span with a class specifying what flavor of token
-/// it's used. All source code emission is done as slices from the source map,
-/// not from the tokens themselves, in order to stay true to the original
-/// source.
-fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
-        class: Option<&str>, id: Option<&str>,
-        out: &mut Write) -> io::Result<()> {
-    use syntax::parse::lexer::Reader;
-
-    try!(write!(out, "
 try!(write!(out, "id='{}' ", id)),
-        None => {}
+/// The classifier will call into the `Writer` implementation as it finds spans
+/// of text to highlight. Exactly how that text should be highlighted is up to
+/// the implementation.
+pub trait Writer {
+    /// Called when we start processing a span of text that should be highlighted.
+    /// The `Class` argument specifies how it should be highlighted.
+    fn enter_span(&mut self, Class) -> io::Result<()>;
+
+    /// Called at the end of a span of highlighted text.
+    fn exit_span(&mut self) -> io::Result<()>;
+
+    /// Called for a span of text, usually, but not always, a single token. If
+    /// the string of text (`T`) does correspond to a token, then the token will
+    /// also be passed. If the text should be highlighted differently from the
+    /// surrounding text, then the `Class` argument will be a value other than
+    /// `None`.
+    /// The following sequences of callbacks are equivalent:
+    /// ```plain
+    ///     enter_span(Foo), string("text", None), exit_span()
+    ///     string("text", Foo)
+    /// ```
+    /// The latter can be thought of as a shorthand for the former, which is
+    /// more flexible.
+    fn string(&mut self, T, Class, Option<&TokenAndSpan>) -> io::Result<()>;
+}
+
+// Implement `Writer` for anthing that can be written to, this just implements
+// the default rustdoc behaviour.
+impl Writer for U {
+    fn string(&mut self,
+                          text: T,
+                          klass: Class,
+                          _tas: Option<&TokenAndSpan>)
+                          -> io::Result<()> {
+        match klass {
+            Class::None => write!(self, "{}", text),
+            klass => write!(self, "{}", klass.rustdoc_class(), text),
+        }
     }
-    try!(write!(out, "class='rust {}'>\n", class.unwrap_or("")));
-    let mut is_attribute = false;
-    let mut is_macro = false;
-    let mut is_macro_nonterminal = false;
-    loop {
-        let next = lexer.next_token();
 
-        let snip = |sp| sess.codemap().span_to_snippet(sp).unwrap();
+    fn enter_span(&mut self, klass: Class) -> io::Result<()> {
+        write!(self, "", klass.rustdoc_class())
+    }
 
-        if next.tok == token::Eof { break }
+    fn exit_span(&mut self) -> io::Result<()> {
+        write!(self, "")
+    }
+}
 
-        let klass = match next.tok {
-            token::Whitespace => {
-                try!(write!(out, "{}", Escape(&snip(next.sp))));
-                continue
-            },
-            token::Comment => {
-                try!(write!(out, "{}",
-                            Escape(&snip(next.sp))));
-                continue
-            },
+impl<'a> Classifier<'a> {
+    pub fn new(lexer: lexer::StringReader<'a>, codemap: &'a CodeMap) -> Classifier<'a> {
+        Classifier {
+            lexer: lexer,
+            codemap: codemap,
+            in_attribute: false,
+            in_macro: false,
+            in_macro_nonterminal: false,
+        }
+    }
+
+    /// Exhausts the `lexer` writing the output into `out`.
+    ///
+    /// The general structure for this method is to iterate over each token,
+    /// possibly giving it an HTML span with a class specifying what flavor of token
+    /// is used. All source code emission is done as slices from the source map,
+    /// not from the tokens themselves, in order to stay true to the original
+    /// source.
+    pub fn write_source(&mut self,
+                                   out: &mut W)
+                                   -> io::Result<()> {
+        loop {
+            let next = match self.lexer.try_next_token() {
+                Ok(tas) => tas,
+                Err(_) => {
+                    self.lexer.emit_fatal_errors();
+                    self.lexer.span_diagnostic.struct_warn("Backing out of syntax highlighting")
+                                              .note("You probably did not intend to render this \
+                                                     as a rust code-block")
+                                              .emit();
+                    return Err(io::Error::new(io::ErrorKind::Other, ""));
+                }
+            };
+
+            if next.tok == token::Eof {
+                break;
+            }
+
+            self.write_token(out, next)?;
+        }
+
+        Ok(())
+    }
+
+    // Handles an individual token from the lexer.
+    fn write_token(&mut self,
+                              out: &mut W,
+                              tas: TokenAndSpan)
+                              -> io::Result<()> {
+        let klass = match tas.tok {
             token::Shebang(s) => {
-                try!(write!(out, "{}", Escape(&s.as_str())));
-                continue
+                out.string(Escape(&s.as_str()), Class::None, Some(&tas))?;
+                return Ok(());
             },
+
+            token::Whitespace => Class::None,
+            token::Comment => Class::Comment,
+            token::DocComment(..) => Class::DocComment,
+
             // If this '&' token is directly adjacent to another token, assume
             // that it's the address-of operator instead of the and-operator.
-            // This allows us to give all pointers their own class (`Box` and
-            // `@` are below).
-            token::BinOp(token::And) if lexer.peek().sp.lo == next.sp.hi => "kw-2",
-            token::At | token::Tilde => "kw-2",
+            token::BinOp(token::And) if self.lexer.peek().sp.lo == tas.sp.hi => Class::RefKeyWord,
 
-            // consider this as part of a macro invocation if there was a
-            // leading identifier
-            token::Not if is_macro => { is_macro = false; "macro" }
+            // Consider this as part of a macro invocation if there was a
+            // leading identifier.
+            token::Not if self.in_macro => {
+                self.in_macro = false;
+                Class::Macro
+            }
 
-            // operators
+            // Operators.
             token::Eq | token::Lt | token::Le | token::EqEq | token::Ne | token::Ge | token::Gt |
                 token::AndAnd | token::OrOr | token::Not | token::BinOp(..) | token::RArrow |
-                token::BinOpEq(..) | token::FatArrow => "op",
+                token::BinOpEq(..) | token::FatArrow => Class::Op,
 
-            // miscellaneous, no highlighting
+            // Miscellaneous, no highlighting.
             token::Dot | token::DotDot | token::DotDotDot | token::Comma | token::Semi |
                 token::Colon | token::ModSep | token::LArrow | token::OpenDelim(_) |
                 token::CloseDelim(token::Brace) | token::CloseDelim(token::Paren) |
-                token::Question => "",
+                token::CloseDelim(token::NoDelim) => Class::None,
+
+            token::Question => Class::QuestionMark,
+
             token::Dollar => {
-                if lexer.peek().tok.is_ident() {
-                    is_macro_nonterminal = true;
-                    "macro-nonterminal"
+                if self.lexer.peek().tok.is_ident() {
+                    self.in_macro_nonterminal = true;
+                    Class::MacroNonTerminal
                 } else {
-                    ""
+                    Class::None
                 }
             }
 
@@ -113,77 +256,116 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader,
             // seen, so skip out early. Down below we terminate the attribute
             // span when we see the ']'.
             token::Pound => {
-                is_attribute = true;
-                try!(write!(out, r"#"));
-                continue
+                self.in_attribute = true;
+                out.enter_span(Class::Attribute)?;
+                out.string("#", Class::None, None)?;
+                return Ok(());
             }
             token::CloseDelim(token::Bracket) => {
-                if is_attribute {
-                    is_attribute = false;
-                    try!(write!(out, "]"));
-                    continue
+                if self.in_attribute {
+                    self.in_attribute = false;
+                    out.string("]", Class::None, None)?;
+                    out.exit_span()?;
+                    return Ok(());
                 } else {
-                    ""
+                    Class::None
                 }
             }
 
             token::Literal(lit, _suf) => {
                 match lit {
-                    // text literals
+                    // Text literals.
                     token::Byte(..) | token::Char(..) |
                         token::ByteStr(..) | token::ByteStrRaw(..) |
-                        token::Str_(..) | token::StrRaw(..) => "string",
+                        token::Str_(..) | token::StrRaw(..) => Class::String,
 
-                    // number literals
-                    token::Integer(..) | token::Float(..) => "number",
+                    // Number literals.
+                    token::Integer(..) | token::Float(..) => Class::Number,
                 }
             }
 
-            // keywords are also included in the identifier set
-            token::Ident(ident, _is_mod_sep) => {
+            // Keywords are also included in the identifier set.
+            token::Ident(ident) => {
                 match &*ident.name.as_str() {
-                    "ref" | "mut" => "kw-2",
+                    "ref" | "mut" => Class::RefKeyWord,
+
+                    "self" |"Self" => Class::Self_,
+                    "false" | "true" => Class::Bool,
 
-                    "self" => "self",
-                    "false" | "true" => "boolval",
+                    "Option" | "Result" => Class::PreludeTy,
+                    "Some" | "None" | "Ok" | "Err" => Class::PreludeVal,
 
-                    "Option" | "Result" => "prelude-ty",
-                    "Some" | "None" | "Ok" | "Err" => "prelude-val",
+                    "$crate" => Class::KeyWord,
+                    _ if tas.tok.is_any_keyword() => Class::KeyWord,
 
-                    _ if next.tok.is_any_keyword() => "kw",
                     _ => {
-                        if is_macro_nonterminal {
-                            is_macro_nonterminal = false;
-                            "macro-nonterminal"
-                        } else if lexer.peek().tok == token::Not {
-                            is_macro = true;
-                            "macro"
+                        if self.in_macro_nonterminal {
+                            self.in_macro_nonterminal = false;
+                            Class::MacroNonTerminal
+                        } else if self.lexer.peek().tok == token::Not {
+                            self.in_macro = true;
+                            Class::Macro
                         } else {
-                            "ident"
+                            Class::Ident
                         }
                     }
                 }
             }
 
-            // Special macro vars are like keywords
-            token::SpecialVarNt(_) => "kw-2",
+            token::Lifetime(..) => Class::Lifetime,
 
-            token::Lifetime(..) => "lifetime",
-            token::DocComment(..) => "doccomment",
             token::Underscore | token::Eof | token::Interpolated(..) |
-                token::MatchNt(..) | token::SubstNt(..) => "",
+            token::MatchNt(..) | token::SubstNt(..) | token::Tilde | token::At => Class::None,
         };
 
-        // as mentioned above, use the original source code instead of
-        // stringifying this token
-        let snip = sess.codemap().span_to_snippet(next.sp).unwrap();
-        if klass == "" {
-            try!(write!(out, "{}", Escape(&snip)));
-        } else {
-            try!(write!(out, "{}", klass,
-                          Escape(&snip)));
+        // Anything that didn't return above is the simple case where we the
+        // class just spans a single token, so we can use the `string` method.
+        out.string(Escape(&self.snip(tas.sp)), klass, Some(&tas))
+    }
+
+    // Helper function to get a snippet from the codemap.
+    fn snip(&self, sp: Span) -> String {
+        self.codemap.span_to_snippet(sp).unwrap()
+    }
+}
+
+impl Class {
+    /// Returns the css class expected by rustdoc for each `Class`.
+    pub fn rustdoc_class(self) -> &'static str {
+        match self {
+            Class::None => "",
+            Class::Comment => "comment",
+            Class::DocComment => "doccomment",
+            Class::Attribute => "attribute",
+            Class::KeyWord => "kw",
+            Class::RefKeyWord => "kw-2",
+            Class::Self_ => "self",
+            Class::Op => "op",
+            Class::Macro => "macro",
+            Class::MacroNonTerminal => "macro-nonterminal",
+            Class::String => "string",
+            Class::Number => "number",
+            Class::Bool => "bool-val",
+            Class::Ident => "ident",
+            Class::Lifetime => "lifetime",
+            Class::PreludeTy => "prelude-ty",
+            Class::PreludeVal => "prelude-val",
+            Class::QuestionMark => "question-mark"
         }
     }
+}
+
+fn write_header(class: Option<&str>,
+                id: Option<&str>,
+                out: &mut Write)
+                -> io::Result<()> {
+    write!(out, "
\n", class.unwrap_or(""))
+}
 
+fn write_footer(out: &mut Write) -> io::Result<()> {
     write!(out, "
\n") } diff --git a/src/librustdoc/html/item_type.rs b/src/librustdoc/html/item_type.rs index afc93f41172e8..f584c4e2f4d9c 100644 --- a/src/librustdoc/html/item_type.rs +++ b/src/librustdoc/html/item_type.rs @@ -40,15 +40,30 @@ pub enum ItemType { AssociatedType = 16, Constant = 17, AssociatedConst = 18, + Union = 19, } -impl ItemType { - pub fn from_item(item: &clean::Item) -> ItemType { - match item.inner { + +#[derive(Copy, Eq, PartialEq, Clone)] +pub enum NameSpace { + Type, + Value, + Macro, +} + +impl<'a> From<&'a clean::Item> for ItemType { + fn from(item: &'a clean::Item) -> ItemType { + let inner = match item.inner { + clean::StrippedItem(box ref item) => item, + ref inner@_ => inner, + }; + + match *inner { clean::ModuleItem(..) => ItemType::Module, clean::ExternCrateItem(..) => ItemType::ExternCrate, clean::ImportItem(..) => ItemType::Import, clean::StructItem(..) => ItemType::Struct, + clean::UnionItem(..) => ItemType::Union, clean::EnumItem(..) => ItemType::Enum, clean::FunctionItem(..) => ItemType::Function, clean::TypedefItem(..) => ItemType::Typedef, @@ -67,29 +82,36 @@ impl ItemType { clean::AssociatedConstItem(..) => ItemType::AssociatedConst, clean::AssociatedTypeItem(..) => ItemType::AssociatedType, clean::DefaultImplItem(..) => ItemType::Impl, + clean::StrippedItem(..) => unreachable!(), } } +} - pub fn from_type_kind(kind: clean::TypeKind) -> ItemType { +impl From for ItemType { + fn from(kind: clean::TypeKind) -> ItemType { match kind { - clean::TypeStruct => ItemType::Struct, - clean::TypeEnum => ItemType::Enum, - clean::TypeFunction => ItemType::Function, - clean::TypeTrait => ItemType::Trait, - clean::TypeModule => ItemType::Module, - clean::TypeStatic => ItemType::Static, - clean::TypeConst => ItemType::Constant, - clean::TypeVariant => ItemType::Variant, - clean::TypeTypedef => ItemType::Typedef, + clean::TypeKind::Struct => ItemType::Struct, + clean::TypeKind::Union => ItemType::Union, + clean::TypeKind::Enum => ItemType::Enum, + clean::TypeKind::Function => ItemType::Function, + clean::TypeKind::Trait => ItemType::Trait, + clean::TypeKind::Module => ItemType::Module, + clean::TypeKind::Static => ItemType::Static, + clean::TypeKind::Const => ItemType::Constant, + clean::TypeKind::Variant => ItemType::Variant, + clean::TypeKind::Typedef => ItemType::Typedef, } } +} - pub fn to_static_str(&self) -> &'static str { +impl ItemType { + pub fn css_class(&self) -> &'static str { match *self { ItemType::Module => "mod", ItemType::ExternCrate => "externcrate", ItemType::Import => "import", ItemType::Struct => "struct", + ItemType::Union => "union", ItemType::Enum => "enum", ItemType::Function => "fn", ItemType::Typedef => "type", @@ -107,9 +129,56 @@ impl ItemType { ItemType::AssociatedConst => "associatedconstant", } } + + pub fn name_space(&self) -> NameSpace { + match *self { + ItemType::Struct | + ItemType::Union | + ItemType::Enum | + ItemType::Module | + ItemType::Typedef | + ItemType::Trait | + ItemType::Primitive | + ItemType::AssociatedType => NameSpace::Type, + + ItemType::ExternCrate | + ItemType::Import | + ItemType::Function | + ItemType::Static | + ItemType::Impl | + ItemType::TyMethod | + ItemType::Method | + ItemType::StructField | + ItemType::Variant | + ItemType::Constant | + ItemType::AssociatedConst => NameSpace::Value, + + ItemType::Macro => NameSpace::Macro, + } + } } impl fmt::Display for ItemType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.css_class().fmt(f) + } +} + +pub const NAMESPACE_TYPE: &'static str = "t"; +pub const NAMESPACE_VALUE: &'static str = "v"; +pub const NAMESPACE_MACRO: &'static str = "m"; + +impl NameSpace { + pub fn to_static_str(&self) -> &'static str { + match *self { + NameSpace::Type => NAMESPACE_TYPE, + NameSpace::Value => NAMESPACE_VALUE, + NameSpace::Macro => NAMESPACE_MACRO, + } + } +} + +impl fmt::Display for NameSpace { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.to_static_str().fmt(f) } diff --git a/src/librustdoc/html/layout.rs b/src/librustdoc/html/layout.rs index 227981d68fbb4..5353642e29425 100644 --- a/src/librustdoc/html/layout.rs +++ b/src/librustdoc/html/layout.rs @@ -9,7 +9,6 @@ // except according to those terms. use std::fmt; -use std::io::prelude::*; use std::io; use externalfiles::ExternalHtml; @@ -20,19 +19,19 @@ pub struct Layout { pub favicon: String, pub external_html: ExternalHtml, pub krate: String, - pub playground_url: String, } pub struct Page<'a> { pub title: &'a str, - pub ty: &'a str, + pub css_class: &'a str, pub root_path: &'a str, pub description: &'a str, - pub keywords: &'a str + pub keywords: &'a str, } pub fn render( - dst: &mut io::Write, layout: &Layout, page: &Page, sidebar: &S, t: &T) + dst: &mut io::Write, layout: &Layout, page: &Page, sidebar: &S, t: &T, + css_file_extension: bool) -> io::Result<()> { write!(dst, @@ -49,6 +48,7 @@ r##" + {css_extension} {favicon} {in_header} @@ -79,7 +79,7 @@ r##" -
{content}
+
{content}
@@ -102,6 +102,8 @@ r##"
Move down in search results
Go to active search result
+
+
+
Collapse/expand all sections
@@ -122,7 +124,7 @@ r##"

Search functions by type signature (e.g. - vec -> usize) + vec -> usize or * -> vec)

@@ -133,22 +135,26 @@ r##" - {play_js} "##, + css_extension = if css_file_extension { + format!("", + root_path = page.root_path) + } else { + "".to_owned() + }, content = *t, root_path = page.root_path, - ty = page.ty, + css_class = page.css_class, logo = if layout.logo.is_empty() { "".to_string() } else { format!("\ - ", + logo", page.root_path, layout.krate, layout.logo) }, @@ -165,12 +171,6 @@ r##" after_content = layout.external_html.after_content, sidebar = *sidebar, krate = layout.krate, - play_url = layout.playground_url, - play_js = if layout.playground_url.is_empty() { - "".to_string() - } else { - format!(r#""#, page.root_path) - }, ) } diff --git a/src/librustdoc/html/markdown.rs b/src/librustdoc/html/markdown.rs index a5436886a7e8c..67cf12f4f4a6e 100644 --- a/src/librustdoc/html/markdown.rs +++ b/src/librustdoc/html/markdown.rs @@ -31,9 +31,10 @@ use std::ascii::AsciiExt; use std::cell::RefCell; use std::default::Default; use std::ffi::CString; -use std::fmt; +use std::fmt::{self, Write}; use std::slice; use std::str; +use syntax::feature_gate::UnstableFeatures; use html::render::derive_id; use html::toc::TocBuilder; @@ -157,6 +158,9 @@ struct hoedown_buffer { // hoedown FFI #[link(name = "hoedown", kind = "static")] +#[cfg(not(cargobuild))] +extern {} + extern { fn hoedown_html_renderer_new(render_flags: libc::c_uint, nesting_level: libc::c_int) @@ -210,7 +214,9 @@ fn collapse_whitespace(s: &str) -> String { s.split_whitespace().collect::>().join(" ") } -thread_local!(pub static PLAYGROUND_KRATE: RefCell>> = { +// Information about the playground if a URL has been specified, containing an +// optional crate name and the URL. +thread_local!(pub static PLAYGROUND: RefCell, String)>> = { RefCell::new(None) }); @@ -244,20 +250,53 @@ pub fn render(w: &mut fmt::Formatter, s: &str, print_toc: bool) -> fmt::Result { }); let text = lines.collect::>().join("\n"); if rendered { return } - PLAYGROUND_KRATE.with(|krate| { - let mut s = String::new(); - krate.borrow().as_ref().map(|krate| { + PLAYGROUND.with(|play| { + // insert newline to clearly separate it from the + // previous block so we can shorten the html output + let mut s = String::from("\n"); + let playground_button = play.borrow().as_ref().and_then(|&(ref krate, ref url)| { + if url.is_empty() { + return None; + } let test = origtext.lines().map(|l| { stripped_filtered_line(l).unwrap_or(l) }).collect::>().join("\n"); let krate = krate.as_ref().map(|s| &**s); let test = test::maketest(&test, krate, false, &Default::default()); - s.push_str(&format!("{}", Escape(&test))); + let channel = if test.contains("#![feature(") { + "&version=nightly" + } else { + "" + }; + // These characters don't need to be escaped in a URI. + // FIXME: use a library function for percent encoding. + fn dont_escape(c: u8) -> bool { + (b'a' <= c && c <= b'z') || + (b'A' <= c && c <= b'Z') || + (b'0' <= c && c <= b'9') || + c == b'-' || c == b'_' || c == b'.' || + c == b'~' || c == b'!' || c == b'\'' || + c == b'(' || c == b')' || c == b'*' + } + let mut test_escaped = String::new(); + for b in test.bytes() { + if dont_escape(b) { + test_escaped.push(char::from(b)); + } else { + write!(test_escaped, "%{:02X}", b).unwrap(); + } + } + Some(format!( + r#"Run"#, + url, test_escaped, channel + )) }); - s.push_str(&highlight::highlight(&text, - Some("rust-example-rendered"), - None)); + s.push_str(&highlight::render_with_highlighting( + &text, + Some("rust-example-rendered"), + None, + playground_button.as_ref().map(String::as_str))); let output = CString::new(s).unwrap(); hoedown_buffer_puts(ob, output.as_ptr()); }) @@ -400,7 +439,8 @@ pub fn find_testable_code(doc: &str, tests: &mut ::test::Collector) { let text = lines.collect::>().join("\n"); tests.add_test(text.to_owned(), block_info.should_panic, block_info.no_run, - block_info.ignore, block_info.test_harness); + block_info.ignore, block_info.test_harness, + block_info.compile_fail, block_info.error_codes); } } @@ -445,6 +485,8 @@ struct LangString { ignore: bool, rust: bool, test_harness: bool, + compile_fail: bool, + error_codes: Vec, } impl LangString { @@ -455,6 +497,8 @@ impl LangString { ignore: false, rust: true, // NB This used to be `notrust = false` test_harness: false, + compile_fail: false, + error_codes: Vec::new(), } } @@ -462,6 +506,12 @@ impl LangString { let mut seen_rust_tags = false; let mut seen_other_tags = false; let mut data = LangString::all_false(); + let mut allow_compile_fail = false; + let mut allow_error_code_check = false; + if UnstableFeatures::from_environment().is_nightly_build() { + allow_compile_fail = true; + allow_error_code_check = true; + } let tokens = string.split(|c: char| !(c == '_' || c == '-' || c.is_alphanumeric()) @@ -474,7 +524,20 @@ impl LangString { "no_run" => { data.no_run = true; seen_rust_tags = true; }, "ignore" => { data.ignore = true; seen_rust_tags = true; }, "rust" => { data.rust = true; seen_rust_tags = true; }, - "test_harness" => { data.test_harness = true; seen_rust_tags = true; } + "test_harness" => { data.test_harness = true; seen_rust_tags = true; }, + "compile_fail" if allow_compile_fail => { + data.compile_fail = true; + seen_rust_tags = true; + data.no_run = true; + } + x if allow_error_code_check && x.starts_with("E") && x.len() == 5 => { + if let Ok(_) = x[1..].parse::() { + data.error_codes.push(x.to_owned()); + seen_rust_tags = true; + } else { + seen_other_tags = true; + } + } _ => { seen_other_tags = true } } } @@ -557,35 +620,42 @@ mod tests { #[test] fn test_lang_string_parse() { fn t(s: &str, - should_panic: bool, no_run: bool, ignore: bool, rust: bool, test_harness: bool) { + should_panic: bool, no_run: bool, ignore: bool, rust: bool, test_harness: bool, + compile_fail: bool, error_codes: Vec) { assert_eq!(LangString::parse(s), LangString { should_panic: should_panic, no_run: no_run, ignore: ignore, rust: rust, test_harness: test_harness, + compile_fail: compile_fail, + error_codes: error_codes, }) } - // marker | should_panic| no_run | ignore | rust | test_harness - t("", false, false, false, true, false); - t("rust", false, false, false, true, false); - t("sh", false, false, false, false, false); - t("ignore", false, false, true, true, false); - t("should_panic", true, false, false, true, false); - t("no_run", false, true, false, true, false); - t("test_harness", false, false, false, true, true); - t("{.no_run .example}", false, true, false, true, false); - t("{.sh .should_panic}", true, false, false, true, false); - t("{.example .rust}", false, false, false, true, false); - t("{.test_harness .rust}", false, false, false, true, true); + // marker | should_panic| no_run| ignore| rust | test_harness| compile_fail + // | error_codes + t("", false, false, false, true, false, false, Vec::new()); + t("rust", false, false, false, true, false, false, Vec::new()); + t("sh", false, false, false, false, false, false, Vec::new()); + t("ignore", false, false, true, true, false, false, Vec::new()); + t("should_panic", true, false, false, true, false, false, Vec::new()); + t("no_run", false, true, false, true, false, false, Vec::new()); + t("test_harness", false, false, false, true, true, false, Vec::new()); + t("compile_fail", false, true, false, true, false, true, Vec::new()); + t("E0450", false, false, false, true, false, false, + vec!["E0450".to_owned()]); + t("{.no_run .example}", false, true, false, true, false, false, Vec::new()); + t("{.sh .should_panic}", true, false, false, true, false, false, Vec::new()); + t("{.example .rust}", false, false, false, true, false, false, Vec::new()); + t("{.test_harness .rust}", false, false, false, true, true, false, Vec::new()); } #[test] fn issue_17736() { let markdown = "# title"; format!("{}", Markdown(markdown)); - reset_ids(); + reset_ids(true); } #[test] @@ -593,7 +663,7 @@ mod tests { fn t(input: &str, expect: &str) { let output = format!("{}", Markdown(input)); assert_eq!(output, expect); - reset_ids(); + reset_ids(true); } t("# Foo bar", "\n

\ @@ -632,7 +702,7 @@ mod tests { Panics

"); }; test(); - reset_ids(); + reset_ids(true); test(); } diff --git a/src/librustdoc/html/render.rs b/src/librustdoc/html/render.rs index 850045382e1f2..40f4d13b2942f 100644 --- a/src/librustdoc/html/render.rs +++ b/src/librustdoc/html/render.rs @@ -37,36 +37,39 @@ pub use self::ExternalLocation::*; use std::ascii::AsciiExt; use std::cell::RefCell; use std::cmp::Ordering; -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::BTreeMap; use std::default::Default; use std::error; use std::fmt::{self, Display, Formatter}; -use std::fs::{self, File}; +use std::fs::{self, File, OpenOptions}; use std::io::prelude::*; use std::io::{self, BufWriter, BufReader}; use std::iter::repeat; use std::mem; -use std::path::{PathBuf, Path}; +use std::path::{PathBuf, Path, Component}; use std::str; use std::sync::Arc; use externalfiles::ExternalHtml; -use serialize::json::{self, ToJson}; +use serialize::json::{ToJson, Json, as_json}; use syntax::{abi, ast}; -use rustc::middle::cstore::LOCAL_CRATE; -use rustc::middle::def_id::{CRATE_DEF_INDEX, DefId}; +use syntax::feature_gate::UnstableFeatures; +use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId}; use rustc::middle::privacy::AccessLevels; use rustc::middle::stability; -use rustc_front::hir; +use rustc::hir; +use rustc::util::nodemap::{FxHashMap, FxHashSet}; +use rustc_data_structures::flock; -use clean::{self, SelfTy}; +use clean::{self, AttributesExt, GetDefId, SelfTy, Mutability}; use doctree; use fold::DocFolder; use html::escape::Escape; use html::format::{ConstnessSpace}; use html::format::{TyParamBounds, WhereClause, href, AbiSpace}; use html::format::{VisSpace, Method, UnsafetySpace, MutableSpace}; +use html::format::fmt_impl_for_trait_page; use html::item_type::ItemType; use html::markdown::{self, Markdown}; use html::{highlight, layout}; @@ -86,15 +89,20 @@ pub struct Context { /// Current hierarchy of components leading down to what's currently being /// rendered pub current: Vec, - /// String representation of how to get back to the root path of the 'doc/' - /// folder in terms of a relative URL. - pub root_path: String, - /// The path to the crate root source minus the file name. - /// Used for simplifying paths to the highlighted source code files. - pub src_root: PathBuf, /// The current destination folder of where HTML artifacts should be placed. /// This changes as the context descends into the module hierarchy. pub dst: PathBuf, + /// A flag, which when `true`, will render pages which redirect to the + /// real location of an item. This is used to allow external links to + /// publicly reused items to redirect to the right location. + pub render_redirect_pages: bool, + pub shared: Arc, +} + +pub struct SharedContext { + /// The path to the crate root source minus the file name. + /// Used for simplifying paths to the highlighted source code files. + pub src_root: PathBuf, /// This describes the layout of each page, and is not modified after /// creation of the context (contains info like the favicon and added html). pub layout: layout::Layout, @@ -102,15 +110,16 @@ pub struct Context { /// the source files are present in the html rendering, then this will be /// `true`. pub include_sources: bool, - /// A flag, which when turned off, will render pages which redirect to the - /// real location of an item. This is used to allow external links to - /// publicly reused items to redirect to the right location. - pub render_redirect_pages: bool, + /// The local file sources we've emitted and their respective url-paths. + pub local_sources: FxHashMap, /// All the passes that were run on this crate. - pub passes: HashSet, + pub passes: FxHashSet, /// The base-URL of the issue tracker for when an item has been tagged with /// an issue number. pub issue_tracker_base_url: Option, + /// The given user css file which allow to customize the generated + /// documentation theme. + pub css_file_extension: Option, } /// Indicates where an external crate can be found. @@ -133,16 +142,19 @@ pub struct Implementor { /// Metadata about implementations for a type. #[derive(Clone)] pub struct Impl { - pub impl_: clean::Impl, - pub dox: Option, - pub stability: Option, + pub impl_item: clean::Item, } impl Impl { + fn inner_impl(&self) -> &clean::Impl { + match self.impl_item.inner { + clean::ImplItem(ref impl_) => impl_, + _ => panic!("non-impl item found in impl") + } + } + fn trait_did(&self) -> Option { - self.impl_.trait_.as_ref().and_then(|tr| { - if let clean::ResolvedPath { did, .. } = *tr {Some(did)} else {None} - }) + self.inner_impl().trait_.def_id() } } @@ -196,7 +208,7 @@ pub struct Cache { /// Mapping of typaram ids to the name of the type parameter. This is used /// when pretty-printing a type (so pretty printing doesn't have to /// painfully maintain a context like this) - pub typarams: HashMap, + pub typarams: FxHashMap, /// Maps a type id to all known implementations for that type. This is only /// recognized for intra-crate `ResolvedPath` types, and is used to print @@ -204,63 +216,74 @@ pub struct Cache { /// /// The values of the map are a list of implementations and documentation /// found on that implementation. - pub impls: HashMap>, + pub impls: FxHashMap>, /// Maintains a mapping of local crate node ids to the fully qualified name /// and "short type description" of that node. This is used when generating /// URLs when a type is being linked to. External paths are not located in /// this map because the `External` type itself has all the information /// necessary. - pub paths: HashMap, ItemType)>, + pub paths: FxHashMap, ItemType)>, /// Similar to `paths`, but only holds external paths. This is only used for /// generating explicit hyperlinks to other crates. - pub external_paths: HashMap>, + pub external_paths: FxHashMap, ItemType)>, /// This map contains information about all known traits of this crate. /// Implementations of a crate should inherit the documentation of the /// parent trait if no extra documentation is specified, and default methods /// should show up in documentation about trait implementations. - pub traits: HashMap, + pub traits: FxHashMap, /// When rendering traits, it's often useful to be able to list all /// implementors of the trait, and this mapping is exactly, that: a mapping /// of trait ids to the list of known implementors of the trait - pub implementors: HashMap>, + pub implementors: FxHashMap>, /// Cache of where external crate documentation can be found. - pub extern_locations: HashMap, + pub extern_locations: FxHashMap, /// Cache of where documentation for primitives can be found. - pub primitive_locations: HashMap, + pub primitive_locations: FxHashMap, - /// Set of definitions which have been inlined from external crates. - pub inlined: HashSet, + // Note that external items for which `doc(hidden)` applies to are shown as + // non-reachable while local items aren't. This is because we're reusing + // the access levels from crateanalysis. + pub access_levels: Arc>, // Private fields only used when initially crawling a crate to build a cache stack: Vec, parent_stack: Vec, + parent_is_trait_impl: bool, search_index: Vec, - privmod: bool, - remove_priv: bool, - access_levels: AccessLevels, + stripped_mod: bool, deref_trait_did: Option, + deref_mut_trait_did: Option, // In rare case where a structure is defined in one module but implemented // in another, if the implementing module is parsed before defining module, // then the fully qualified name of the structure isn't presented in `paths` // yet when its implementation methods are being indexed. Caches such methods // and their parent id here and indexes them at the end of crate parsing. - orphan_methods: Vec<(DefId, clean::Item)>, + orphan_impl_items: Vec<(DefId, clean::Item)>, +} + +/// Temporary storage for data obtained during `RustdocVisitor::clean()`. +/// Later on moved into `CACHE_KEY`. +#[derive(Default)] +pub struct RenderInfo { + pub inlined: FxHashSet, + pub external_paths: ::core::ExternalPaths, + pub external_typarams: FxHashMap, + pub deref_trait_did: Option, + pub deref_mut_trait_did: Option, } /// Helper struct to render all source code to HTML pages struct SourceCollector<'a> { - cx: &'a mut Context, + scx: &'a mut SharedContext, - /// Processed source-file paths - seen: HashSet, /// Root destination to place all HTML output into dst: PathBuf, } @@ -288,22 +311,40 @@ struct IndexItem { path: String, desc: String, parent: Option, + parent_idx: Option, search_type: Option, } +impl ToJson for IndexItem { + fn to_json(&self) -> Json { + assert_eq!(self.parent.is_some(), self.parent_idx.is_some()); + + let mut data = Vec::with_capacity(6); + data.push((self.ty as usize).to_json()); + data.push(self.name.to_json()); + data.push(self.path.to_json()); + data.push(self.desc.to_json()); + data.push(self.parent_idx.to_json()); + data.push(self.search_type.to_json()); + + Json::Array(data) + } +} + /// A type used for the search index. struct Type { name: Option, } -impl fmt::Display for Type { - /// Formats type as {name: $name}. - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // Wrapping struct fmt should never call us when self.name is None, - // but just to be safe we write `null` in that case. +impl ToJson for Type { + fn to_json(&self) -> Json { match self.name { - Some(ref n) => write!(f, "{{\"name\":\"{}\"}}", n), - None => write!(f, "null") + Some(ref name) => { + let mut data = BTreeMap::new(); + data.insert("name".to_owned(), name.to_json()); + Json::Object(data) + }, + None => Json::Null } } } @@ -314,26 +355,17 @@ struct IndexItemFunctionType { output: Option } -impl fmt::Display for IndexItemFunctionType { - /// Formats a full fn type as a JSON {inputs: [Type], outputs: Type/null}. - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl ToJson for IndexItemFunctionType { + fn to_json(&self) -> Json { // If we couldn't figure out a type, just write `null`. - if self.inputs.iter().any(|ref i| i.name.is_none()) || - (self.output.is_some() && self.output.as_ref().unwrap().name.is_none()) { - return write!(f, "null") + if self.inputs.iter().chain(self.output.iter()).any(|ref i| i.name.is_none()) { + Json::Null + } else { + let mut data = BTreeMap::new(); + data.insert("inputs".to_owned(), self.inputs.to_json()); + data.insert("output".to_owned(), self.output.to_json()); + Json::Object(data) } - - let inputs: Vec = self.inputs.iter().map(|ref t| { - format!("{}", t) - }).collect(); - try!(write!(f, "{{\"inputs\":[{}],\"output\":", inputs.join(","))); - - match self.output { - Some(ref t) => try!(write!(f, "{}", t)), - None => try!(write!(f, "null")) - }; - - Ok(try!(write!(f, "}}"))) } } @@ -342,10 +374,10 @@ impl fmt::Display for IndexItemFunctionType { thread_local!(static CACHE_KEY: RefCell> = Default::default()); thread_local!(pub static CURRENT_LOCATION_KEY: RefCell> = RefCell::new(Vec::new())); -thread_local!(static USED_ID_MAP: RefCell> = +thread_local!(static USED_ID_MAP: RefCell> = RefCell::new(init_ids())); -fn init_ids() -> HashMap { +fn init_ids() -> FxHashMap { [ "main", "search", @@ -361,15 +393,20 @@ fn init_ids() -> HashMap { "methods", "deref-methods", "implementations", - "derived_implementations" - ].into_iter().map(|id| (String::from(*id), 1)).collect::>() + ].into_iter().map(|id| (String::from(*id), 1)).collect() } /// This method resets the local table of used ID attributes. This is typically /// used at the beginning of rendering an entire HTML page to reset from the /// previous state (if any). -pub fn reset_ids() { - USED_ID_MAP.with(|s| *s.borrow_mut() = init_ids()); +pub fn reset_ids(embedded: bool) { + USED_ID_MAP.with(|s| { + *s.borrow_mut() = if embedded { + init_ids() + } else { + FxHashMap() + }; + }); } pub fn derive_id(candidate: String) -> String { @@ -391,126 +428,131 @@ pub fn derive_id(candidate: String) -> String { /// Generates the documentation for `crate` into the directory `dst` pub fn run(mut krate: clean::Crate, external_html: &ExternalHtml, + playground_url: Option, dst: PathBuf, - passes: HashSet) -> Result<(), Error> { + passes: FxHashSet, + css_file_extension: Option, + renderinfo: RenderInfo) -> Result<(), Error> { let src_root = match krate.src.parent() { Some(p) => p.to_path_buf(), None => PathBuf::new(), }; - let mut cx = Context { - dst: dst, + let mut scx = SharedContext { src_root: src_root, passes: passes, - current: Vec::new(), - root_path: String::new(), + include_sources: true, + local_sources: FxHashMap(), + issue_tracker_base_url: None, layout: layout::Layout { logo: "".to_string(), favicon: "".to_string(), external_html: external_html.clone(), krate: krate.name.clone(), - playground_url: "".to_string(), }, - include_sources: true, - render_redirect_pages: false, - issue_tracker_base_url: None, + css_file_extension: css_file_extension.clone(), }; - try_err!(mkdir(&cx.dst), &cx.dst); + // If user passed in `--playground-url` arg, we fill in crate name here + if let Some(url) = playground_url { + markdown::PLAYGROUND.with(|slot| { + *slot.borrow_mut() = Some((Some(krate.name.clone()), url)); + }); + } // Crawl the crate attributes looking for attributes which control how we're // going to emit HTML - let default: &[_] = &[]; - match krate.module.as_ref().map(|m| m.doc_list().unwrap_or(default)) { - Some(attrs) => { - for attr in attrs { - match *attr { - clean::NameValue(ref x, ref s) - if "html_favicon_url" == *x => { - cx.layout.favicon = s.to_string(); - } - clean::NameValue(ref x, ref s) - if "html_logo_url" == *x => { - cx.layout.logo = s.to_string(); - } - clean::NameValue(ref x, ref s) - if "html_playground_url" == *x => { - cx.layout.playground_url = s.to_string(); - markdown::PLAYGROUND_KRATE.with(|slot| { - if slot.borrow().is_none() { - let name = krate.name.clone(); - *slot.borrow_mut() = Some(Some(name)); - } - }); - } - clean::NameValue(ref x, ref s) - if "issue_tracker_base_url" == *x => { - cx.issue_tracker_base_url = Some(s.to_string()); - } - clean::Word(ref x) - if "html_no_source" == *x => { - cx.include_sources = false; - } - _ => {} + if let Some(attrs) = krate.module.as_ref().map(|m| &m.attrs) { + for attr in attrs.lists("doc") { + let name = attr.name().map(|s| s.as_str()); + match (name.as_ref().map(|s| &s[..]), attr.value_str()) { + (Some("html_favicon_url"), Some(s)) => { + scx.layout.favicon = s.to_string(); + } + (Some("html_logo_url"), Some(s)) => { + scx.layout.logo = s.to_string(); + } + (Some("html_playground_url"), Some(s)) => { + markdown::PLAYGROUND.with(|slot| { + let name = krate.name.clone(); + *slot.borrow_mut() = Some((Some(name), s.to_string())); + }); + } + (Some("issue_tracker_base_url"), Some(s)) => { + scx.issue_tracker_base_url = Some(s.to_string()); + } + (Some("html_no_source"), None) if attr.is_word() => { + scx.include_sources = false; } + _ => {} } } - None => {} } + try_err!(mkdir(&dst), &dst); + krate = render_sources(&dst, &mut scx, krate)?; + let cx = Context { + current: Vec::new(), + dst: dst, + render_redirect_pages: false, + shared: Arc::new(scx), + }; // Crawl the crate to build various caches used for the output - let analysis = ::ANALYSISKEY.with(|a| a.clone()); - let analysis = analysis.borrow(); - let access_levels = analysis.as_ref().map(|a| a.access_levels.clone()); - let access_levels = access_levels.unwrap_or(Default::default()); - let paths: HashMap, ItemType)> = - analysis.as_ref().map(|a| { - let paths = a.external_paths.borrow_mut().take().unwrap(); - paths.into_iter().map(|(k, (v, t))| (k, (v, ItemType::from_type_kind(t)))).collect() - }).unwrap_or(HashMap::new()); + let RenderInfo { + inlined: _, + external_paths, + external_typarams, + deref_trait_did, + deref_mut_trait_did, + } = renderinfo; + + let external_paths = external_paths.into_iter() + .map(|(k, (v, t))| (k, (v, ItemType::from(t)))) + .collect(); + let mut cache = Cache { - impls: HashMap::new(), - external_paths: paths.iter().map(|(&k, v)| (k, v.0.clone())) - .collect(), - paths: paths, - implementors: HashMap::new(), + impls: FxHashMap(), + external_paths: external_paths, + paths: FxHashMap(), + implementors: FxHashMap(), stack: Vec::new(), parent_stack: Vec::new(), search_index: Vec::new(), - extern_locations: HashMap::new(), - primitive_locations: HashMap::new(), - remove_priv: cx.passes.contains("strip-private"), - privmod: false, - access_levels: access_levels, - orphan_methods: Vec::new(), - traits: mem::replace(&mut krate.external_traits, HashMap::new()), - deref_trait_did: analysis.as_ref().and_then(|a| a.deref_trait_did), - typarams: analysis.as_ref().map(|a| { - a.external_typarams.borrow_mut().take().unwrap() - }).unwrap_or(HashMap::new()), - inlined: analysis.as_ref().map(|a| { - a.inlined.borrow_mut().take().unwrap() - }).unwrap_or(HashSet::new()), + parent_is_trait_impl: false, + extern_locations: FxHashMap(), + primitive_locations: FxHashMap(), + stripped_mod: false, + access_levels: krate.access_levels.clone(), + orphan_impl_items: Vec::new(), + traits: mem::replace(&mut krate.external_traits, FxHashMap()), + deref_trait_did: deref_trait_did, + deref_mut_trait_did: deref_mut_trait_did, + typarams: external_typarams, }; // Cache where all our extern crates are located for &(n, ref e) in &krate.externs { - cache.extern_locations.insert(n, (e.name.clone(), + let src_root = match Path::new(&e.src).parent() { + Some(p) => p.to_path_buf(), + None => PathBuf::new(), + }; + cache.extern_locations.insert(n, (e.name.clone(), src_root, extern_location(e, &cx.dst))); + let did = DefId { krate: n, index: CRATE_DEF_INDEX }; - cache.paths.insert(did, (vec![e.name.to_string()], ItemType::Module)); + cache.external_paths.insert(did, (vec![e.name.to_string()], ItemType::Module)); } // Cache where all known primitives have their documentation located. // // Favor linking to as local extern as possible, so iterate all crates in // reverse topological order. - for &(n, ref e) in krate.externs.iter().rev() { - for &prim in &e.primitives { - cache.primitive_locations.insert(prim, n); + for &(_, ref e) in krate.externs.iter().rev() { + for &(def_id, prim, _) in &e.primitives { + cache.primitive_locations.insert(prim, def_id); } } - for &prim in &krate.primitives { - cache.primitive_locations.insert(prim, LOCAL_CRATE); + for &(def_id, prim, _) in &krate.primitives { + cache.primitive_locations.insert(prim, def_id); } cache.stack.push(krate.name.clone()); @@ -525,108 +567,80 @@ pub fn run(mut krate: clean::Crate, CACHE_KEY.with(|v| *v.borrow_mut() = cache.clone()); CURRENT_LOCATION_KEY.with(|s| s.borrow_mut().clear()); - try!(write_shared(&cx, &krate, &*cache, index)); - let krate = try!(render_sources(&mut cx, krate)); + write_shared(&cx, &krate, &*cache, index)?; // And finally render the whole crate's documentation cx.krate(krate) } +/// Build the search index from the collected metadata fn build_index(krate: &clean::Crate, cache: &mut Cache) -> String { - // Build the search index from the collected metadata - let mut nodeid_to_pathid = HashMap::new(); - let mut pathid_to_nodeid = Vec::new(); - { - let Cache { ref mut search_index, - ref orphan_methods, - ref mut paths, .. } = *cache; - - // Attach all orphan methods to the type's definition if the type - // has since been learned. - for &(did, ref item) in orphan_methods { - match paths.get(&did) { - Some(&(ref fqp, _)) => { - // Needed to determine `self` type. - let parent_basename = Some(fqp[fqp.len() - 1].clone()); - search_index.push(IndexItem { - ty: shortty(item), - name: item.name.clone().unwrap(), - path: fqp[..fqp.len() - 1].join("::"), - desc: shorter(item.doc_value()), - parent: Some(did), - search_type: get_index_search_type(&item, parent_basename), - }); - }, - None => {} - } - } - - // Reduce `NodeId` in paths into smaller sequential numbers, - // and prune the paths that do not appear in the index. - for item in search_index.iter() { - match item.parent { - Some(nodeid) => { - if !nodeid_to_pathid.contains_key(&nodeid) { - let pathid = pathid_to_nodeid.len(); - nodeid_to_pathid.insert(nodeid, pathid); - pathid_to_nodeid.push(nodeid); - } - } - None => {} - } + let mut nodeid_to_pathid = FxHashMap(); + let mut crate_items = Vec::with_capacity(cache.search_index.len()); + let mut crate_paths = Vec::::new(); + + let Cache { ref mut search_index, + ref orphan_impl_items, + ref mut paths, .. } = *cache; + + // Attach all orphan items to the type's definition if the type + // has since been learned. + for &(did, ref item) in orphan_impl_items { + if let Some(&(ref fqp, _)) = paths.get(&did) { + search_index.push(IndexItem { + ty: item.type_(), + name: item.name.clone().unwrap(), + path: fqp[..fqp.len() - 1].join("::"), + desc: Escape(&shorter(item.doc_value())).to_string(), + parent: Some(did), + parent_idx: None, + search_type: get_index_search_type(&item), + }); } - assert_eq!(nodeid_to_pathid.len(), pathid_to_nodeid.len()); } - // Collect the index into a string - let mut w = io::Cursor::new(Vec::new()); - write!(&mut w, r#"searchIndex['{}'] = {{"items":["#, krate.name).unwrap(); + // Reduce `NodeId` in paths into smaller sequential numbers, + // and prune the paths that do not appear in the index. + let mut lastpath = String::new(); + let mut lastpathid = 0usize; - let mut lastpath = "".to_string(); - for (i, item) in cache.search_index.iter().enumerate() { - // Omit the path if it is same to that of the prior item. - let path; - if lastpath == item.path { - path = ""; - } else { - lastpath = item.path.to_string(); - path = &item.path; - }; + for item in search_index { + item.parent_idx = item.parent.map(|nodeid| { + if nodeid_to_pathid.contains_key(&nodeid) { + *nodeid_to_pathid.get(&nodeid).unwrap() + } else { + let pathid = lastpathid; + nodeid_to_pathid.insert(nodeid, pathid); + lastpathid += 1; - if i > 0 { - write!(&mut w, ",").unwrap(); - } - write!(&mut w, r#"[{},"{}","{}",{}"#, - item.ty as usize, item.name, path, - item.desc.to_json().to_string()).unwrap(); - match item.parent { - Some(nodeid) => { - let pathid = *nodeid_to_pathid.get(&nodeid).unwrap(); - write!(&mut w, ",{}", pathid).unwrap(); + let &(ref fqp, short) = paths.get(&nodeid).unwrap(); + crate_paths.push(((short as usize), fqp.last().unwrap().clone()).to_json()); + pathid } - None => write!(&mut w, ",null").unwrap() - } - match item.search_type { - Some(ref t) => write!(&mut w, ",{}", t).unwrap(), - None => write!(&mut w, ",null").unwrap() - } - write!(&mut w, "]").unwrap(); - } - - write!(&mut w, r#"],"paths":["#).unwrap(); + }); - for (i, &did) in pathid_to_nodeid.iter().enumerate() { - let &(ref fqp, short) = cache.paths.get(&did).unwrap(); - if i > 0 { - write!(&mut w, ",").unwrap(); + // Omit the parent path if it is same to that of the prior item. + if lastpath == item.path { + item.path.clear(); + } else { + lastpath = item.path.clone(); } - write!(&mut w, r#"[{},"{}"]"#, - short as usize, *fqp.last().unwrap()).unwrap(); + crate_items.push(item.to_json()); } - write!(&mut w, "]}};").unwrap(); + let crate_doc = krate.module.as_ref().map(|module| { + Escape(&shorter(module.doc_value())).to_string() + }).unwrap_or(String::new()); + + let mut crate_data = BTreeMap::new(); + crate_data.insert("doc".to_owned(), Json::String(crate_doc)); + crate_data.insert("items".to_owned(), Json::Array(crate_items)); + crate_data.insert("paths".to_owned(), Json::Array(crate_paths)); - String::from_utf8(w.into_inner()).unwrap() + // Collect the index into a string + format!("searchIndex[{}] = {};", + as_json(&krate.name), + Json::Object(crate_data)) } fn write_shared(cx: &Context, @@ -637,67 +651,77 @@ fn write_shared(cx: &Context, // docs placed in the output directory, so this needs to be a synchronized // operation with respect to all other rustdocs running around. try_err!(mkdir(&cx.dst), &cx.dst); - let _lock = ::flock::Lock::new(&cx.dst.join(".lock")); + let _lock = flock::Lock::panicking_new(&cx.dst.join(".lock"), true, true, true); // Add all the static files. These may already exist, but we just // overwrite them anyway to make sure that they're fresh and up-to-date. - try!(write(cx.dst.join("jquery.js"), - include_bytes!("static/jquery-2.1.4.min.js"))); - try!(write(cx.dst.join("main.js"), - include_bytes!("static/main.js"))); - try!(write(cx.dst.join("playpen.js"), - include_bytes!("static/playpen.js"))); - try!(write(cx.dst.join("rustdoc.css"), - include_bytes!("static/rustdoc.css"))); - try!(write(cx.dst.join("main.css"), - include_bytes!("static/styles/main.css"))); - try!(write(cx.dst.join("normalize.css"), - include_bytes!("static/normalize.css"))); - try!(write(cx.dst.join("FiraSans-Regular.woff"), - include_bytes!("static/FiraSans-Regular.woff"))); - try!(write(cx.dst.join("FiraSans-Medium.woff"), - include_bytes!("static/FiraSans-Medium.woff"))); - try!(write(cx.dst.join("FiraSans-LICENSE.txt"), - include_bytes!("static/FiraSans-LICENSE.txt"))); - try!(write(cx.dst.join("Heuristica-Italic.woff"), - include_bytes!("static/Heuristica-Italic.woff"))); - try!(write(cx.dst.join("Heuristica-LICENSE.txt"), - include_bytes!("static/Heuristica-LICENSE.txt"))); - try!(write(cx.dst.join("SourceSerifPro-Regular.woff"), - include_bytes!("static/SourceSerifPro-Regular.woff"))); - try!(write(cx.dst.join("SourceSerifPro-Bold.woff"), - include_bytes!("static/SourceSerifPro-Bold.woff"))); - try!(write(cx.dst.join("SourceSerifPro-LICENSE.txt"), - include_bytes!("static/SourceSerifPro-LICENSE.txt"))); - try!(write(cx.dst.join("SourceCodePro-Regular.woff"), - include_bytes!("static/SourceCodePro-Regular.woff"))); - try!(write(cx.dst.join("SourceCodePro-Semibold.woff"), - include_bytes!("static/SourceCodePro-Semibold.woff"))); - try!(write(cx.dst.join("SourceCodePro-LICENSE.txt"), - include_bytes!("static/SourceCodePro-LICENSE.txt"))); - try!(write(cx.dst.join("LICENSE-MIT.txt"), - include_bytes!("static/LICENSE-MIT.txt"))); - try!(write(cx.dst.join("LICENSE-APACHE.txt"), - include_bytes!("static/LICENSE-APACHE.txt"))); - try!(write(cx.dst.join("COPYRIGHT.txt"), - include_bytes!("static/COPYRIGHT.txt"))); + + write(cx.dst.join("jquery.js"), + include_bytes!("static/jquery-2.1.4.min.js"))?; + write(cx.dst.join("main.js"), + include_bytes!("static/main.js"))?; + write(cx.dst.join("rustdoc.css"), + include_bytes!("static/rustdoc.css"))?; + write(cx.dst.join("main.css"), + include_bytes!("static/styles/main.css"))?; + if let Some(ref css) = cx.shared.css_file_extension { + let mut content = String::new(); + let css = css.as_path(); + let mut f = try_err!(File::open(css), css); + + try_err!(f.read_to_string(&mut content), css); + let css = cx.dst.join("theme.css"); + let css = css.as_path(); + let mut f = try_err!(File::create(css), css); + try_err!(write!(f, "{}", &content), css); + } + write(cx.dst.join("normalize.css"), + include_bytes!("static/normalize.css"))?; + write(cx.dst.join("FiraSans-Regular.woff"), + include_bytes!("static/FiraSans-Regular.woff"))?; + write(cx.dst.join("FiraSans-Medium.woff"), + include_bytes!("static/FiraSans-Medium.woff"))?; + write(cx.dst.join("FiraSans-LICENSE.txt"), + include_bytes!("static/FiraSans-LICENSE.txt"))?; + write(cx.dst.join("Heuristica-Italic.woff"), + include_bytes!("static/Heuristica-Italic.woff"))?; + write(cx.dst.join("Heuristica-LICENSE.txt"), + include_bytes!("static/Heuristica-LICENSE.txt"))?; + write(cx.dst.join("SourceSerifPro-Regular.woff"), + include_bytes!("static/SourceSerifPro-Regular.woff"))?; + write(cx.dst.join("SourceSerifPro-Bold.woff"), + include_bytes!("static/SourceSerifPro-Bold.woff"))?; + write(cx.dst.join("SourceSerifPro-LICENSE.txt"), + include_bytes!("static/SourceSerifPro-LICENSE.txt"))?; + write(cx.dst.join("SourceCodePro-Regular.woff"), + include_bytes!("static/SourceCodePro-Regular.woff"))?; + write(cx.dst.join("SourceCodePro-Semibold.woff"), + include_bytes!("static/SourceCodePro-Semibold.woff"))?; + write(cx.dst.join("SourceCodePro-LICENSE.txt"), + include_bytes!("static/SourceCodePro-LICENSE.txt"))?; + write(cx.dst.join("LICENSE-MIT.txt"), + include_bytes!("static/LICENSE-MIT.txt"))?; + write(cx.dst.join("LICENSE-APACHE.txt"), + include_bytes!("static/LICENSE-APACHE.txt"))?; + write(cx.dst.join("COPYRIGHT.txt"), + include_bytes!("static/COPYRIGHT.txt"))?; fn collect(path: &Path, krate: &str, key: &str) -> io::Result> { let mut ret = Vec::new(); if path.exists() { - for line in BufReader::new(try!(File::open(path))).lines() { - let line = try!(line); + for line in BufReader::new(File::open(path)?).lines() { + let line = line?; if !line.starts_with(key) { - continue + continue; } - if line.starts_with(&format!("{}['{}']", key, krate)) { - continue + if line.starts_with(&format!(r#"{}["{}"]"#, key, krate)) { + continue; } ret.push(line.to_string()); } } - return Ok(ret); + Ok(ret) } // Update the search index @@ -724,7 +748,10 @@ fn write_shared(cx: &Context, // theory it should be... let &(ref remote_path, remote_item_type) = match cache.paths.get(&did) { Some(p) => p, - None => continue, + None => match cache.external_paths.get(&did) { + Some(p) => p, + None => continue, + } }; let mut mydst = dst.clone(); @@ -733,7 +760,7 @@ fn write_shared(cx: &Context, try_err!(mkdir(&mydst), &mydst); } mydst.push(&format!("{}.{}.js", - remote_item_type.to_static_str(), + remote_item_type.css_class(), remote_path[remote_path.len() - 1])); let all_implementors = try_err!(collect(&mydst, &krate.name, "implementors"), @@ -748,7 +775,7 @@ fn write_shared(cx: &Context, try_err!(write!(&mut f, "{}", *implementor), &mydst); } - try_err!(write!(&mut f, r"implementors['{}'] = [", krate.name), &mydst); + try_err!(write!(&mut f, r#"implementors["{}"] = ["#, krate.name), &mydst); for imp in imps { // If the trait and implementation are in the same crate, then // there's no need to emit information about it (there's inlining @@ -770,20 +797,17 @@ fn write_shared(cx: &Context, Ok(()) } -fn render_sources(cx: &mut Context, +fn render_sources(dst: &Path, scx: &mut SharedContext, krate: clean::Crate) -> Result { info!("emitting source files"); - let dst = cx.dst.join("src"); + let dst = dst.join("src"); try_err!(mkdir(&dst), &dst); let dst = dst.join(&krate.name); try_err!(mkdir(&dst), &dst); let mut folder = SourceCollector { dst: dst, - seen: HashSet::new(), - cx: cx, + scx: scx, }; - // skip all invalid spans - folder.seen.insert("".to_string()); Ok(folder.fold_crate(krate)) } @@ -793,21 +817,19 @@ fn write(dst: PathBuf, contents: &[u8]) -> Result<(), Error> { Ok(try_err!(try_err!(File::create(&dst), &dst).write_all(contents), &dst)) } -/// Makes a directory on the filesystem, failing the thread if an error occurs and -/// skipping if the directory already exists. +/// Makes a directory on the filesystem, failing the thread if an error occurs +/// and skipping if the directory already exists. +/// +/// Note that this also handles races as rustdoc is likely to be run +/// concurrently against another invocation. fn mkdir(path: &Path) -> io::Result<()> { - if !path.exists() { - fs::create_dir(path) - } else { - Ok(()) + match fs::create_dir(path) { + Ok(()) => Ok(()), + Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok(()), + Err(e) => Err(e) } } -/// Returns a documentation-level item type from the item. -fn shortty(item: &clean::Item) -> ItemType { - ItemType::from_item(item) -} - /// Takes a path to a source file and cleans the path to it. This canonicalizes /// things like ".." to components which preserve the "top down" hierarchy of a /// static HTML tree. Each component in the cleaned path will be passed as an @@ -821,16 +843,17 @@ fn clean_srcpath(src_root: &Path, p: &Path, keep_filename: bool, mut f: F) wh // make it relative, if possible let p = p.strip_prefix(src_root).unwrap_or(p); - let mut iter = p.iter().map(|x| x.to_str().unwrap()).peekable(); + let mut iter = p.components().peekable(); + while let Some(c) = iter.next() { if !keep_filename && iter.peek().is_none() { break; } - if ".." == c { - f("up"); - } else { - f(c) + match c { + Component::ParentDir => f("up"), + Component::Normal(c) => f(c.to_str().unwrap()), + _ => continue, } } } @@ -846,43 +869,39 @@ fn extern_location(e: &clean::ExternalCrate, dst: &Path) -> ExternalLocation { // Failing that, see if there's an attribute specifying where to find this // external crate - for attr in &e.attrs { - match *attr { - clean::List(ref x, ref list) if "doc" == *x => { - for attr in list { - match *attr { - clean::NameValue(ref x, ref s) - if "html_root_url" == *x => { - if s.ends_with("/") { - return Remote(s.to_string()); - } - return Remote(format!("{}/", s)); - } - _ => {} - } - } - } - _ => {} + e.attrs.lists("doc") + .filter(|a| a.check_name("html_root_url")) + .filter_map(|a| a.value_str()) + .map(|url| { + let mut url = url.to_string(); + if !url.ends_with("/") { + url.push('/') } - } - - // Well, at least we tried. - return Unknown; + Remote(url) + }).next().unwrap_or(Unknown) // Well, at least we tried. } impl<'a> DocFolder for SourceCollector<'a> { fn fold_item(&mut self, item: clean::Item) -> Option { // If we're including source files, and we haven't seen this file yet, - // then we need to render it out to the filesystem - if self.cx.include_sources && !self.seen.contains(&item.source.filename) { + // then we need to render it out to the filesystem. + if self.scx.include_sources + // skip all invalid spans + && item.source.filename != "" + // skip non-local items + && item.def_id.is_local() + // Macros from other libraries get special filenames which we can + // safely ignore. + && !(item.source.filename.starts_with("<") + && item.source.filename.ends_with("macros>")) { // If it turns out that we couldn't read this file, then we probably // can't read any of the files (generating html output from json or // something like that), so just don't include sources for the // entire crate. The other option is maintaining this mapping on a // per-file basis, but that's probably not worth it... - self.cx - .include_sources = match self.emit_source(&item.source .filename) { + self.scx + .include_sources = match self.emit_source(&item.source.filename) { Ok(()) => true, Err(e) => { println!("warning: source code was requested to be rendered, \ @@ -892,9 +911,7 @@ impl<'a> DocFolder for SourceCollector<'a> { false } }; - self.seen.insert(item.source.filename.clone()); } - self.fold_item_recur(item) } } @@ -903,19 +920,14 @@ impl<'a> SourceCollector<'a> { /// Renders the given filename into its corresponding HTML source file. fn emit_source(&mut self, filename: &str) -> io::Result<()> { let p = PathBuf::from(filename); + if self.scx.local_sources.contains_key(&p) { + // We've already emitted this source + return Ok(()); + } - // If we couldn't open this file, then just returns because it - // probably means that it's some standard library macro thing and we - // can't have the source to it anyway. let mut contents = Vec::new(); - match File::open(&p).and_then(|mut f| f.read_to_end(&mut contents)) { - Ok(r) => r, - // macros from other libraries get special filenames which we can - // safely ignore - Err(..) if filename.starts_with("<") && - filename.ends_with("macros>") => return Ok(()), - Err(e) => return Err(e) - }; + File::open(&p).and_then(|mut f| f.read_to_end(&mut contents))?; + let contents = str::from_utf8(&contents).unwrap(); // Remove the utf-8 BOM if any @@ -928,86 +940,84 @@ impl<'a> SourceCollector<'a> { // Create the intermediate directories let mut cur = self.dst.clone(); let mut root_path = String::from("../../"); - clean_srcpath(&self.cx.src_root, &p, false, |component| { + let mut href = String::new(); + clean_srcpath(&self.scx.src_root, &p, false, |component| { cur.push(component); mkdir(&cur).unwrap(); root_path.push_str("../"); + href.push_str(component); + href.push('/'); }); - let mut fname = p.file_name().expect("source has no filename") .to_os_string(); fname.push(".html"); - cur.push(&fname[..]); - let mut w = BufWriter::new(try!(File::create(&cur))); + cur.push(&fname); + href.push_str(&fname.to_string_lossy()); + + let mut w = BufWriter::new(File::create(&cur)?); let title = format!("{} -- source", cur.file_name().unwrap() .to_string_lossy()); let desc = format!("Source to the Rust file `{}`.", filename); let page = layout::Page { title: &title, - ty: "source", + css_class: "source", root_path: &root_path, description: &desc, - keywords: get_basic_keywords(), + keywords: BASIC_KEYWORDS, }; - try!(layout::render(&mut w, &self.cx.layout, - &page, &(""), &Source(contents))); - try!(w.flush()); - return Ok(()); + layout::render(&mut w, &self.scx.layout, + &page, &(""), &Source(contents), + self.scx.css_file_extension.is_some())?; + w.flush()?; + self.scx.local_sources.insert(p, href); + Ok(()) } } impl DocFolder for Cache { fn fold_item(&mut self, item: clean::Item) -> Option { - // If this is a private module, we don't want it in the search index. - let orig_privmod = match item.inner { - clean::ModuleItem(..) => { - let prev = self.privmod; - self.privmod = prev || (self.remove_priv && item.visibility != Some(hir::Public)); - prev + // If this is a stripped module, + // we don't want it or its children in the search index. + let orig_stripped_mod = match item.inner { + clean::StrippedItem(box clean::ModuleItem(..)) => { + mem::replace(&mut self.stripped_mod, true) } - _ => self.privmod, + _ => self.stripped_mod, }; // Register any generics to their corresponding string. This is used - // when pretty-printing types - match item.inner { - clean::StructItem(ref s) => self.generics(&s.generics), - clean::EnumItem(ref e) => self.generics(&e.generics), - clean::FunctionItem(ref f) => self.generics(&f.generics), - clean::TypedefItem(ref t, _) => self.generics(&t.generics), - clean::TraitItem(ref t) => self.generics(&t.generics), - clean::ImplItem(ref i) => self.generics(&i.generics), - clean::TyMethodItem(ref i) => self.generics(&i.generics), - clean::MethodItem(ref i) => self.generics(&i.generics), - clean::ForeignFunctionItem(ref f) => self.generics(&f.generics), - _ => {} + // when pretty-printing types. + if let Some(generics) = item.inner.generics() { + self.generics(generics); } - // Propagate a trait methods' documentation to all implementors of the - // trait + // Propagate a trait method's documentation to all implementors of the + // trait. if let clean::TraitItem(ref t) = item.inner { - self.traits.insert(item.def_id, t.clone()); + self.traits.entry(item.def_id).or_insert_with(|| t.clone()); } // Collect all the implementors of traits. if let clean::ImplItem(ref i) = item.inner { - match i.trait_ { - Some(clean::ResolvedPath{ did, .. }) => { - self.implementors.entry(did).or_insert(vec![]).push(Implementor { - def_id: item.def_id, - stability: item.stability.clone(), - impl_: i.clone(), - }); - } - Some(..) | None => {} + if let Some(did) = i.trait_.def_id() { + self.implementors.entry(did).or_insert(vec![]).push(Implementor { + def_id: item.def_id, + stability: item.stability.clone(), + impl_: i.clone(), + }); } } - // Index this method for searching later on + // Index this method for searching later on. if let Some(ref s) = item.name { - let (parent, is_method) = match item.inner { - clean::AssociatedTypeItem(..) | + let (parent, is_inherent_impl_item) = match item.inner { + clean::StrippedItem(..) => ((None, None), false), clean::AssociatedConstItem(..) | + clean::TypedefItem(_, true) if self.parent_is_trait_impl => { + // skip associated items in trait impls + ((None, None), false) + } + clean::AssociatedTypeItem(..) | clean::TyMethodItem(..) | clean::StructFieldItem(..) | clean::VariantItem(..) => { @@ -1015,20 +1025,20 @@ impl DocFolder for Cache { Some(&self.stack[..self.stack.len() - 1])), false) } - clean::MethodItem(..) => { + clean::MethodItem(..) | clean::AssociatedConstItem(..) => { if self.parent_stack.is_empty() { ((None, None), false) } else { let last = self.parent_stack.last().unwrap(); let did = *last; let path = match self.paths.get(&did) { - Some(&(_, ItemType::Trait)) => - Some(&self.stack[..self.stack.len() - 1]), // The current stack not necessarily has correlation // for where the type was defined. On the other // hand, `paths` always has the right // information if present. + Some(&(ref fqp, ItemType::Trait)) | Some(&(ref fqp, ItemType::Struct)) | + Some(&(ref fqp, ItemType::Union)) | Some(&(ref fqp, ItemType::Enum)) => Some(&fqp[..fqp.len() - 1]), Some(..) => Some(&*self.stack), @@ -1037,62 +1047,54 @@ impl DocFolder for Cache { ((Some(*last), path), true) } } - clean::TypedefItem(_, true) => { - // skip associated types in impls - ((None, None), false) - } _ => ((None, Some(&*self.stack)), false) }; - let hidden_field = match item.inner { - clean::StructFieldItem(clean::HiddenStructField) => true, - _ => false - }; match parent { - (parent, Some(path)) if is_method || (!self.privmod && !hidden_field) => { - // Needed to determine `self` type. - let parent_basename = self.parent_stack.first().and_then(|parent| { - match self.paths.get(parent) { - Some(&(ref fqp, _)) => Some(fqp[fqp.len() - 1].clone()), - _ => None - } - }); + (parent, Some(path)) if is_inherent_impl_item || (!self.stripped_mod) => { + debug_assert!(!item.is_stripped()); + // A crate has a module at its root, containing all items, + // which should not be indexed. The crate-item itself is + // inserted later on when serializing the search-index. if item.def_id.index != CRATE_DEF_INDEX { self.search_index.push(IndexItem { - ty: shortty(&item), + ty: item.type_(), name: s.to_string(), path: path.join("::").to_string(), - desc: shorter(item.doc_value()), + desc: Escape(&shorter(item.doc_value())).to_string(), parent: parent, - search_type: get_index_search_type(&item, parent_basename), + parent_idx: None, + search_type: get_index_search_type(&item), }); } } - (Some(parent), None) if is_method || (!self.privmod && !hidden_field)=> { - if parent.is_local() { - // We have a parent, but we don't know where they're - // defined yet. Wait for later to index this item. - self.orphan_methods.push((parent, item.clone())) - } + (Some(parent), None) if is_inherent_impl_item => { + // We have a parent, but we don't know where they're + // defined yet. Wait for later to index this item. + self.orphan_impl_items.push((parent, item.clone())); } _ => {} } } // Keep track of the fully qualified path for this item. - let pushed = if item.name.is_some() { - let n = item.name.as_ref().unwrap(); - if !n.is_empty() { + let pushed = match item.name { + Some(ref n) if !n.is_empty() => { self.stack.push(n.to_string()); true - } else { false } - } else { false }; + } + _ => false, + }; + match item.inner { clean::StructItem(..) | clean::EnumItem(..) | clean::TypedefItem(..) | clean::TraitItem(..) | clean::FunctionItem(..) | clean::ModuleItem(..) | - clean::ForeignFunctionItem(..) if !self.privmod => { + clean::ForeignFunctionItem(..) | clean::ForeignStaticItem(..) | + clean::ConstantItem(..) | clean::StaticItem(..) | + clean::UnionItem(..) + if !self.stripped_mod => { // Reexported items mean that the same id can show up twice // in the rustdoc ast that we're looking at. We know, // however, that a reexported item doesn't show up in the @@ -1101,16 +1103,15 @@ impl DocFolder for Cache { // not a public item. if !self.paths.contains_key(&item.def_id) || - !item.def_id.is_local() || self.access_levels.is_public(item.def_id) { self.paths.insert(item.def_id, - (self.stack.clone(), shortty(&item))); + (self.stack.clone(), item.type_())); } } - // link variants to their parent enum because pages aren't emitted - // for each variant - clean::VariantItem(..) if !self.privmod => { + // Link variants to their parent enum because pages aren't emitted + // for each variant. + clean::VariantItem(..) if !self.stripped_mod => { let mut stack = self.stack.clone(); stack.pop(); self.paths.insert(item.def_id, (stack, ItemType::Enum)); @@ -1118,32 +1119,38 @@ impl DocFolder for Cache { clean::PrimitiveItem(..) if item.visibility.is_some() => { self.paths.insert(item.def_id, (self.stack.clone(), - shortty(&item))); + item.type_())); } _ => {} } // Maintain the parent stack + let orig_parent_is_trait_impl = self.parent_is_trait_impl; let parent_pushed = match item.inner { - clean::TraitItem(..) | clean::EnumItem(..) | clean::StructItem(..) => { + clean::TraitItem(..) | clean::EnumItem(..) | + clean::StructItem(..) | clean::UnionItem(..) => { self.parent_stack.push(item.def_id); + self.parent_is_trait_impl = false; true } clean::ImplItem(ref i) => { + self.parent_is_trait_impl = i.trait_.is_some(); match i.for_ { clean::ResolvedPath{ did, .. } => { self.parent_stack.push(did); true } ref t => { - match t.primitive_type() { - Some(prim) => { - let did = DefId::local(prim.to_def_index()); + let prim_did = t.primitive_type().and_then(|t| { + self.primitive_locations.get(&t).cloned() + }); + match prim_did { + Some(did) => { self.parent_stack.push(did); true } - _ => false, + None => false, } } } @@ -1151,67 +1158,46 @@ impl DocFolder for Cache { _ => false }; - // Once we've recursively found all the generics, then hoard off all the - // implementations elsewhere - let ret = match self.fold_item_recur(item) { - Some(item) => { - match item { - clean::Item{ attrs, inner: clean::ImplItem(i), .. } => { - // extract relevant documentation for this impl - let dox = match attrs.into_iter().find(|a| { - match *a { - clean::NameValue(ref x, _) - if "doc" == *x => { - true - } - _ => false - } - }) { - Some(clean::NameValue(_, dox)) => Some(dox), - Some(..) | None => None, - }; - - // Figure out the id of this impl. This may map to a - // primitive rather than always to a struct/enum. - let did = match i.for_ { - clean::ResolvedPath { did, .. } | - clean::BorrowedRef { - type_: box clean::ResolvedPath { did, .. }, .. - } => { - Some(did) - } - - ref t => { - t.primitive_type().and_then(|t| { - self.primitive_locations.get(&t).map(|n| { - let id = t.to_def_index(); - DefId { krate: *n, index: id } - }) - }) - } - }; - - if let Some(did) = did { - self.impls.entry(did).or_insert(vec![]).push(Impl { - impl_: i, - dox: dox, - stability: item.stability.clone(), - }); + // Once we've recursively found all the generics, hoard off all the + // implementations elsewhere. + let ret = self.fold_item_recur(item).and_then(|item| { + if let clean::Item { inner: clean::ImplItem(_), .. } = item { + // Figure out the id of this impl. This may map to a + // primitive rather than always to a struct/enum. + // Note: matching twice to restrict the lifetime of the `i` borrow. + let did = if let clean::Item { inner: clean::ImplItem(ref i), .. } = item { + match i.for_ { + clean::ResolvedPath { did, .. } | + clean::BorrowedRef { + type_: box clean::ResolvedPath { did, .. }, .. + } => { + Some(did) + } + ref t => { + t.primitive_type().and_then(|t| { + self.primitive_locations.get(&t).cloned() + }) } - - None } - - i => Some(i), + } else { + unreachable!() + }; + if let Some(did) = did { + self.impls.entry(did).or_insert(vec![]).push(Impl { + impl_item: item, + }); } + None + } else { + Some(item) } - i => i, - }; + }); if pushed { self.stack.pop().unwrap(); } if parent_pushed { self.parent_stack.pop().unwrap(); } - self.privmod = orig_privmod; - return ret; + self.stripped_mod = orig_stripped_mod; + self.parent_is_trait_impl = orig_parent_is_trait_impl; + ret } } @@ -1224,8 +1210,14 @@ impl<'a> Cache { } impl Context { + /// String representation of how to get back to the root path of the 'doc/' + /// folder in terms of a relative URL. + fn root_path(&self) -> String { + repeat("../").take(self.current.len()).collect::() + } + /// Recurse in the directory structure and change the "root path" to make - /// sure it always points to the top (relatively) + /// sure it always points to the top (relatively). fn recurse(&mut self, s: String, f: F) -> T where F: FnOnce(&mut Context) -> T, { @@ -1234,23 +1226,19 @@ impl Context { } let prev = self.dst.clone(); self.dst.push(&s); - self.root_path.push_str("../"); self.current.push(s); info!("Recursing into {}", self.dst.display()); - mkdir(&self.dst).unwrap(); let ret = f(self); info!("Recursed; leaving {}", self.dst.display()); // Go back to where we were at self.dst = prev; - let len = self.root_path.len(); - self.root_path.truncate(len - 3); self.current.pop().unwrap(); - return ret; + ret } /// Main method for rendering a crate. @@ -1260,21 +1248,80 @@ impl Context { fn krate(self, mut krate: clean::Crate) -> Result<(), Error> { let mut item = match krate.module.take() { Some(i) => i, - None => return Ok(()) + None => return Ok(()), }; item.name = Some(krate.name); - // render the crate documentation - let mut work = vec!((self, item)); - loop { - match work.pop() { - Some((mut cx, item)) => try!(cx.item(item, |cx, item| { - work.push((cx.clone(), item)); - })), - None => break, + // Render the crate documentation + let mut work = vec![(self, item)]; + + while let Some((mut cx, item)) = work.pop() { + cx.item(item, |cx, item| { + work.push((cx.clone(), item)) + })? + } + Ok(()) + } + + fn render_item(&self, + writer: &mut io::Write, + it: &clean::Item, + pushname: bool) + -> io::Result<()> { + // A little unfortunate that this is done like this, but it sure + // does make formatting *a lot* nicer. + CURRENT_LOCATION_KEY.with(|slot| { + *slot.borrow_mut() = self.current.clone(); + }); + + let mut title = if it.is_primitive() { + // No need to include the namespace for primitive types + String::new() + } else { + self.current.join("::") + }; + if pushname { + if !title.is_empty() { + title.push_str("::"); } + title.push_str(it.name.as_ref().unwrap()); } + title.push_str(" - Rust"); + let tyname = it.type_().css_class(); + let desc = if it.is_crate() { + format!("API documentation for the Rust `{}` crate.", + self.shared.layout.krate) + } else { + format!("API documentation for the Rust `{}` {} in crate `{}`.", + it.name.as_ref().unwrap(), tyname, self.shared.layout.krate) + }; + let keywords = make_item_keywords(it); + let page = layout::Page { + css_class: tyname, + root_path: &self.root_path(), + title: &title, + description: &desc, + keywords: &keywords, + }; + reset_ids(true); + + if !self.render_redirect_pages { + layout::render(writer, &self.shared.layout, &page, + &Sidebar{ cx: self, item: it }, + &Item{ cx: self, item: it }, + self.shared.css_file_extension.is_some())?; + } else { + let mut url = self.root_path(); + if let Some(&(ref names, ty)) = cache().paths.get(&it.def_id) { + for name in &names[..names.len() - 1] { + url.push_str(name); + url.push_str("/"); + } + url.push_str(&item_path(ty, names.last().unwrap())); + layout::redirect(writer, &url)?; + } + } Ok(()) } @@ -1286,136 +1333,100 @@ impl Context { fn item(&mut self, item: clean::Item, mut f: F) -> Result<(), Error> where F: FnMut(&mut Context, clean::Item), { - fn render(w: File, cx: &Context, it: &clean::Item, - pushname: bool) -> io::Result<()> { - // A little unfortunate that this is done like this, but it sure - // does make formatting *a lot* nicer. - CURRENT_LOCATION_KEY.with(|slot| { - *slot.borrow_mut() = cx.current.clone(); - }); - - let mut title = cx.current.join("::"); - if pushname { - if !title.is_empty() { - title.push_str("::"); - } - title.push_str(it.name.as_ref().unwrap()); - } - title.push_str(" - Rust"); - let tyname = shortty(it).to_static_str(); - let is_crate = match it.inner { - clean::ModuleItem(clean::Module { items: _, is_crate: true }) => true, - _ => false - }; - let desc = if is_crate { - format!("API documentation for the Rust `{}` crate.", - cx.layout.krate) - } else { - format!("API documentation for the Rust `{}` {} in crate `{}`.", - it.name.as_ref().unwrap(), tyname, cx.layout.krate) - }; - let keywords = make_item_keywords(it); - let page = layout::Page { - ty: tyname, - root_path: &cx.root_path, - title: &title, - description: &desc, - keywords: &keywords, - }; - - reset_ids(); - - // We have a huge number of calls to write, so try to alleviate some - // of the pain by using a buffered writer instead of invoking the - // write syscall all the time. - let mut writer = BufWriter::new(w); - if !cx.render_redirect_pages { - try!(layout::render(&mut writer, &cx.layout, &page, - &Sidebar{ cx: cx, item: it }, - &Item{ cx: cx, item: it })); - } else { - let mut url = repeat("../").take(cx.current.len()) - .collect::(); - match cache().paths.get(&it.def_id) { - Some(&(ref names, _)) => { - for name in &names[..names.len() - 1] { - url.push_str(name); - url.push_str("/"); - } - url.push_str(&item_path(it)); - try!(layout::redirect(&mut writer, &url)); - } - None => {} - } - } - writer.flush() - } - - // Private modules may survive the strip-private pass if they - // contain impls for public types. These modules can also + // Stripped modules survive the rustdoc passes (i.e. `strip-private`) + // if they contain impls for public types. These modules can also // contain items such as publicly reexported structures. // // External crates will provide links to these structures, so - // these modules are recursed into, but not rendered normally (a - // flag on the context). + // these modules are recursed into, but not rendered normally + // (a flag on the context). if !self.render_redirect_pages { - self.render_redirect_pages = self.ignore_private_item(&item); + self.render_redirect_pages = maybe_ignore_item(&item); } - match item.inner { + if item.is_mod() { // modules are special because they add a namespace. We also need to // recurse into the items of the module as well. - clean::ModuleItem(..) => { - let name = item.name.as_ref().unwrap().to_string(); - let mut item = Some(item); - self.recurse(name, |this| { - let item = item.take().unwrap(); + let name = item.name.as_ref().unwrap().to_string(); + let mut item = Some(item); + self.recurse(name, |this| { + let item = item.take().unwrap(); + + let mut buf = Vec::new(); + this.render_item(&mut buf, &item, false).unwrap(); + // buf will be empty if the module is stripped and there is no redirect for it + if !buf.is_empty() { let joint_dst = this.dst.join("index.html"); - let dst = try_err!(File::create(&joint_dst), &joint_dst); - try_err!(render(dst, this, &item, false), &joint_dst); - - let m = match item.inner { - clean::ModuleItem(m) => m, - _ => unreachable!() - }; + try_err!(fs::create_dir_all(&this.dst), &this.dst); + let mut dst = try_err!(File::create(&joint_dst), &joint_dst); + try_err!(dst.write_all(&buf), &joint_dst); + } - // render sidebar-items.js used throughout this module - { - let items = this.build_sidebar_items(&m); - let js_dst = this.dst.join("sidebar-items.js"); - let mut js_out = BufWriter::new(try_err!(File::create(&js_dst), &js_dst)); - try_err!(write!(&mut js_out, "initSidebarItems({});", - json::as_json(&items)), &js_dst); - } + let m = match item.inner { + clean::StrippedItem(box clean::ModuleItem(m)) | + clean::ModuleItem(m) => m, + _ => unreachable!() + }; - for item in m.items { - f(this,item); - } - Ok(()) - }) - } + // Render sidebar-items.js used throughout this module. + if !this.render_redirect_pages { + let items = this.build_sidebar_items(&m); + let js_dst = this.dst.join("sidebar-items.js"); + let mut js_out = BufWriter::new(try_err!(File::create(&js_dst), &js_dst)); + try_err!(write!(&mut js_out, "initSidebarItems({});", + as_json(&items)), &js_dst); + } - // Things which don't have names (like impls) don't get special - // pages dedicated to them. - _ if item.name.is_some() => { - let joint_dst = self.dst.join(&item_path(&item)); + for item in m.items { + f(this,item); + } - let dst = try_err!(File::create(&joint_dst), &joint_dst); - try_err!(render(dst, self, &item, true), &joint_dst); Ok(()) - } + })?; + } else if item.name.is_some() { + let mut buf = Vec::new(); + self.render_item(&mut buf, &item, true).unwrap(); + // buf will be empty if the item is stripped and there is no redirect for it + if !buf.is_empty() { + let name = item.name.as_ref().unwrap(); + let item_type = item.type_(); + let file_name = &item_path(item_type, name); + let joint_dst = self.dst.join(file_name); + try_err!(fs::create_dir_all(&self.dst), &self.dst); + let mut dst = try_err!(File::create(&joint_dst), &joint_dst); + try_err!(dst.write_all(&buf), &joint_dst); + + // Redirect from a sane URL using the namespace to Rustdoc's + // URL for the page. + let redir_name = format!("{}.{}.html", name, item_type.name_space()); + let redir_dst = self.dst.join(redir_name); + if let Ok(mut redirect_out) = OpenOptions::new().create_new(true) + .write(true) + .open(&redir_dst) { + try_err!(layout::redirect(&mut redirect_out, file_name), &redir_dst); + } - _ => Ok(()) + // If the item is a macro, redirect from the old macro URL (with !) + // to the new one (without). + // FIXME(#35705) remove this redirect. + if item_type == ItemType::Macro { + let redir_name = format!("{}.{}!.html", item_type, name); + let redir_dst = self.dst.join(redir_name); + let mut redirect_out = try_err!(File::create(&redir_dst), &redir_dst); + try_err!(layout::redirect(&mut redirect_out, file_name), &redir_dst); + } + } } + Ok(()) } fn build_sidebar_items(&self, m: &clean::Module) -> BTreeMap> { // BTreeMap instead of HashMap to get a sorted output let mut map = BTreeMap::new(); for item in &m.items { - if self.ignore_private_item(item) { continue } + if maybe_ignore_item(item) { continue } - let short = shortty(item).to_static_str(); + let short = item.type_().css_class(); let myname = match item.name { None => continue, Some(ref s) => s.to_string(), @@ -1428,30 +1439,11 @@ impl Context { for (_, items) in &mut map { items.sort(); } - return map; - } - - fn ignore_private_item(&self, it: &clean::Item) -> bool { - match it.inner { - clean::ModuleItem(ref m) => { - (m.items.is_empty() && - it.doc_value().is_none() && - it.visibility != Some(hir::Public)) || - (self.passes.contains("strip-private") && it.visibility != Some(hir::Public)) - } - clean::PrimitiveItem(..) => it.visibility != Some(hir::Public), - _ => false, - } + map } } impl<'a> Item<'a> { - fn ismodule(&self) -> bool { - match self.item.inner { - clean::ModuleItem(..) => true, _ => false - } - } - /// Generate a url appropriate for an `href` attribute back to the source of /// this item. /// @@ -1461,121 +1453,106 @@ impl<'a> Item<'a> { /// If `None` is returned, then a source link couldn't be generated. This /// may happen, for example, with externally inlined items where the source /// of their crate documentation isn't known. - fn href(&self, cx: &Context) -> Option { - let href = if self.item.source.loline == self.item.source.hiline { - format!("{}", self.item.source.loline) + fn src_href(&self) -> Option { + let mut root = self.cx.root_path(); + + let cache = cache(); + let mut path = String::new(); + let (krate, path) = if self.item.def_id.is_local() { + let path = PathBuf::from(&self.item.source.filename); + if let Some(path) = self.cx.shared.local_sources.get(&path) { + (&self.cx.shared.layout.krate, path) + } else { + return None; + } } else { - format!("{}-{}", self.item.source.loline, self.item.source.hiline) - }; - - // First check to see if this is an imported macro source. In this case - // we need to handle it specially as cross-crate inlined macros have... - // odd locations! - let imported_macro_from = match self.item.inner { - clean::MacroItem(ref m) => m.imported_from.as_ref(), - _ => None, - }; - if let Some(krate) = imported_macro_from { - let cache = cache(); - let root = cache.extern_locations.values().find(|&&(ref n, _)| { - *krate == *n - }).map(|l| &l.1); - let root = match root { - Some(&Remote(ref s)) => s.to_string(), - Some(&Local) => self.cx.root_path.clone(), - None | Some(&Unknown) => return None, + let (krate, src_root) = match cache.extern_locations.get(&self.item.def_id.krate) { + Some(&(ref name, ref src, Local)) => (name, src), + Some(&(ref name, ref src, Remote(ref s))) => { + root = s.to_string(); + (name, src) + } + Some(&(_, _, Unknown)) | None => return None, }; - Some(format!("{root}/{krate}/macro.{name}.html?gotomacrosrc=1", - root = root, - krate = krate, - name = self.item.name.as_ref().unwrap())) - - // If this item is part of the local crate, then we're guaranteed to - // know the span, so we plow forward and generate a proper url. The url - // has anchors for the line numbers that we're linking to. - } else if self.item.def_id.is_local() { - let mut path = Vec::new(); - clean_srcpath(&cx.src_root, Path::new(&self.item.source.filename), - true, |component| { - path.push(component.to_string()); + + let file = Path::new(&self.item.source.filename); + clean_srcpath(&src_root, file, false, |component| { + path.push_str(component); + path.push('/'); }); - Some(format!("{root}src/{krate}/{path}.html#{href}", - root = self.cx.root_path, - krate = self.cx.layout.krate, - path = path.join("/"), - href = href)) - - // If this item is not part of the local crate, then things get a little - // trickier. We don't actually know the span of the external item, but - // we know that the documentation on the other end knows the span! - // - // In this case, we generate a link to the *documentation* for this type - // in the original crate. There's an extra URL parameter which says that - // we want to go somewhere else, and the JS on the destination page will - // pick it up and instantly redirect the browser to the source code. - // - // If we don't know where the external documentation for this crate is - // located, then we return `None`. + let mut fname = file.file_name().expect("source has no filename") + .to_os_string(); + fname.push(".html"); + path.push_str(&fname.to_string_lossy()); + (krate, &path) + }; + + let lines = if self.item.source.loline == self.item.source.hiline { + format!("{}", self.item.source.loline) } else { - let cache = cache(); - let path = &cache.external_paths[&self.item.def_id]; - let root = match cache.extern_locations[&self.item.def_id.krate] { - (_, Remote(ref s)) => s.to_string(), - (_, Local) => self.cx.root_path.clone(), - (_, Unknown) => return None, - }; - Some(format!("{root}{path}/{file}?gotosrc={goto}", - root = root, - path = path[..path.len() - 1].join("/"), - file = item_path(self.item), - goto = self.item.def_id.index.as_usize())) - } + format!("{}-{}", self.item.source.loline, self.item.source.hiline) + }; + Some(format!("{root}src/{krate}/{path}#{lines}", + root = root, + krate = krate, + path = path, + lines = lines)) } } - impl<'a> fmt::Display for Item<'a> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + debug_assert!(!self.item.is_stripped()); // Write the breadcrumb trail header for the top - try!(write!(fmt, "\n

")); + write!(fmt, "\n

")?; match self.item.inner { clean::ModuleItem(ref m) => if m.is_crate { - try!(write!(fmt, "Crate ")); + write!(fmt, "Crate ")?; } else { - try!(write!(fmt, "Module ")); + write!(fmt, "Module ")?; }, - clean::FunctionItem(..) => try!(write!(fmt, "Function ")), - clean::TraitItem(..) => try!(write!(fmt, "Trait ")), - clean::StructItem(..) => try!(write!(fmt, "Struct ")), - clean::EnumItem(..) => try!(write!(fmt, "Enum ")), - clean::PrimitiveItem(..) => try!(write!(fmt, "Primitive Type ")), - _ => {} + clean::FunctionItem(..) | clean::ForeignFunctionItem(..) => + write!(fmt, "Function ")?, + clean::TraitItem(..) => write!(fmt, "Trait ")?, + clean::StructItem(..) => write!(fmt, "Struct ")?, + clean::UnionItem(..) => write!(fmt, "Union ")?, + clean::EnumItem(..) => write!(fmt, "Enum ")?, + clean::TypedefItem(..) => write!(fmt, "Type Definition ")?, + clean::MacroItem(..) => write!(fmt, "Macro ")?, + clean::PrimitiveItem(..) => write!(fmt, "Primitive Type ")?, + clean::StaticItem(..) | clean::ForeignStaticItem(..) => + write!(fmt, "Static ")?, + clean::ConstantItem(..) => write!(fmt, "Constant ")?, + _ => { + // We don't generate pages for any other type. + unreachable!(); + } } - let is_primitive = match self.item.inner { - clean::PrimitiveItem(..) => true, - _ => false, - }; - if !is_primitive { + if !self.item.is_primitive() { let cur = &self.cx.current; - let amt = if self.ismodule() { cur.len() - 1 } else { cur.len() }; + let amt = if self.item.is_mod() { cur.len() - 1 } else { cur.len() }; for (i, component) in cur.iter().enumerate().take(amt) { - try!(write!(fmt, "{}::", - repeat("../").take(cur.len() - i - 1) - .collect::(), - component)); + write!(fmt, "{}::", + repeat("../").take(cur.len() - i - 1) + .collect::(), + component)?; } } - try!(write!(fmt, "{}", - shortty(self.item), self.item.name.as_ref().unwrap())); + write!(fmt, "{}", + self.item.type_(), self.item.name.as_ref().unwrap())?; - try!(write!(fmt, "")); // in-band - try!(write!(fmt, "")); - try!(write!(fmt, - r##" - - [] - - "##)); + write!(fmt, "")?; // in-band + write!(fmt, "")?; + if let Some(version) = self.item.stable_since() { + write!(fmt, "{0}", + version)?; + } + write!(fmt, + r##" + + [] + + "##)?; // Write `src` tag // @@ -1583,20 +1560,16 @@ impl<'a> fmt::Display for Item<'a> { // [src] link in the downstream documentation will actually come back to // this page, and this link will be auto-clicked. The `id` attribute is // used to find the link to auto-click. - if self.cx.include_sources && !is_primitive { - match self.href(self.cx) { - Some(l) => { - try!(write!(fmt, "[src]", - self.item.def_id.index.as_usize(), l, "goto source code")); - } - None => {} + if self.cx.shared.include_sources && !self.item.is_primitive() { + if let Some(l) = self.src_href() { + write!(fmt, "[src]", + l, "goto source code")?; } } - try!(write!(fmt, "")); // out-of-band + write!(fmt, "")?; // out-of-band - try!(write!(fmt, "

\n")); + write!(fmt, "

\n")?; match self.item.inner { clean::ModuleItem(ref m) => { @@ -1606,6 +1579,7 @@ impl<'a> fmt::Display for Item<'a> { item_function(fmt, self.cx, self.item, f), clean::TraitItem(ref t) => item_trait(fmt, self.cx, self.item, t), clean::StructItem(ref s) => item_struct(fmt, self.cx, self.item, s), + clean::UnionItem(ref s) => item_union(fmt, self.cx, self.item, s), clean::EnumItem(ref e) => item_enum(fmt, self.cx, self.item, e), clean::TypedefItem(ref t, _) => item_typedef(fmt, self.cx, self.item, t), clean::MacroItem(ref m) => item_macro(fmt, self.cx, self.item, m), @@ -1613,21 +1587,18 @@ impl<'a> fmt::Display for Item<'a> { clean::StaticItem(ref i) | clean::ForeignStaticItem(ref i) => item_static(fmt, self.cx, self.item, i), clean::ConstantItem(ref c) => item_constant(fmt, self.cx, self.item, c), - _ => Ok(()) + _ => { + // We don't generate pages for any other type. + unreachable!(); + } } } } -fn item_path(item: &clean::Item) -> String { - match item.inner { - clean::ModuleItem(..) => { - format!("{}/index.html", item.name.as_ref().unwrap()) - } - _ => { - format!("{}.{}.html", - shortty(item).to_static_str(), - *item.name.as_ref().unwrap()) - } +fn item_path(ty: ItemType, name: &str) -> String { + match ty { + ItemType::Module => format!("{}/index.html", name), + _ => format!("{}.{}.html", ty.css_class(), name), } } @@ -1635,7 +1606,7 @@ fn full_path(cx: &Context, item: &clean::Item) -> String { let mut s = cx.current.join("::"); s.push_str("::"); s.push_str(item.name.as_ref().unwrap()); - return s + s } fn shorter<'a>(s: Option<&'a str>) -> String { @@ -1656,21 +1627,47 @@ fn plain_summary_line(s: Option<&str>) -> String { } fn document(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item) -> fmt::Result { - if let Some(s) = short_stability(item, cx, true) { - try!(write!(w, "
{}
", s)); + document_stability(w, cx, item)?; + document_full(w, item)?; + Ok(()) +} + +fn document_short(w: &mut fmt::Formatter, item: &clean::Item, link: AssocItemLink) -> fmt::Result { + if let Some(s) = item.doc_value() { + let markdown = if s.contains('\n') { + format!("{} [Read more]({})", + &plain_summary_line(Some(s)), naive_assoc_href(item, link)) + } else { + format!("{}", &plain_summary_line(Some(s))) + }; + write!(w, "
{}
", Markdown(&markdown))?; } + Ok(()) +} + +fn document_full(w: &mut fmt::Formatter, item: &clean::Item) -> fmt::Result { if let Some(s) = item.doc_value() { - try!(write!(w, "
{}
", Markdown(s))); + write!(w, "
{}
", Markdown(s))?; + } + Ok(()) +} + +fn document_stability(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item) -> fmt::Result { + for stability in short_stability(item, cx, true) { + write!(w, "
{}
", stability)?; } Ok(()) } fn item_module(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item, items: &[clean::Item]) -> fmt::Result { - try!(document(w, cx, item)); + document(w, cx, item)?; let mut indices = (0..items.len()).filter(|i| { - !cx.ignore_private_item(&items[*i]) + if let clean::DefaultImplItem(..) = items[*i].inner { + return false; + } + !maybe_ignore_item(&items[*i]) }).collect::>(); // the order of item types in the listing @@ -1688,13 +1685,14 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context, ItemType::Trait => 9, ItemType::Function => 10, ItemType::Typedef => 12, - _ => 13 + ty as u8, + ItemType::Union => 13, + _ => 14 + ty as u8, } } fn cmp(i1: &clean::Item, i2: &clean::Item, idx1: usize, idx2: usize) -> Ordering { - let ty1 = shortty(i1); - let ty2 = shortty(i2); + let ty1 = i1.type_(); + let ty2 = i2.type_(); if ty1 != ty2 { return (reorder(ty1), idx1).cmp(&(reorder(ty2), idx2)) } @@ -1714,14 +1712,17 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context, let mut curty = None; for &idx in &indices { let myitem = &items[idx]; + if myitem.is_stripped() { + continue; + } - let myty = Some(shortty(myitem)); + let myty = Some(myitem.type_()); if curty == Some(ItemType::ExternCrate) && myty == Some(ItemType::Import) { // Put `extern crate` and `use` re-exports in the same section. curty = myty; } else if myty != curty { if curty.is_some() { - try!(write!(w, "")); + write!(w, "")?; } curty = myty; let (short, name) = match myty.unwrap() { @@ -1729,6 +1730,7 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context, ItemType::Import => ("reexports", "Reexports"), ItemType::Module => ("modules", "Modules"), ItemType::Struct => ("structs", "Structs"), + ItemType::Union => ("unions", "Unions"), ItemType::Enum => ("enums", "Enums"), ItemType::Function => ("functions", "Functions"), ItemType::Typedef => ("types", "Type Definitions"), @@ -1745,120 +1747,156 @@ fn item_module(w: &mut fmt::Formatter, cx: &Context, ItemType::AssociatedType => ("associated-types", "Associated Types"), ItemType::AssociatedConst => ("associated-consts", "Associated Constants"), }; - try!(write!(w, "

\ - {name}

\n", - id = derive_id(short.to_owned()), name = name)); + write!(w, "

\ + {name}

\n
", + id = derive_id(short.to_owned()), name = name)?; } match myitem.inner { clean::ExternCrateItem(ref name, ref src) => { + use html::format::HRef; + match *src { Some(ref src) => { - try!(write!(w, "")); + write!(w, "")?; } clean::ImportItem(ref import) => { - try!(write!(w, "", - VisSpace(myitem.visibility), *import)); + write!(w, "", + VisSpace(&myitem.visibility), *import)?; } _ => { if myitem.name.is_none() { continue } - let stab_docs = if let Some(s) = short_stability(myitem, cx, false) { - format!("[{}]", s) + + let stabilities = short_stability(myitem, cx, false); + + let stab_docs = if !stabilities.is_empty() { + stabilities.iter() + .map(|s| format!("[{}]", s)) + .collect::>() + .as_slice() + .join(" ") } else { String::new() }; - try!(write!(w, " - - - - - ", - name = *myitem.name.as_ref().unwrap(), - stab_docs = stab_docs, - docs = Markdown(&shorter(myitem.doc_value())), - class = shortty(myitem), - stab = myitem.stability_class(), - href = item_path(myitem), - title = full_path(cx, myitem))); + + let mut unsafety_flag = ""; + if let clean::FunctionItem(ref func) = myitem.inner { + if func.unsafety == hir::Unsafety::Unsafe { + unsafety_flag = ""; + } + } + + let doc_value = myitem.doc_value().unwrap_or(""); + write!(w, " + + + + ", + name = *myitem.name.as_ref().unwrap(), + stab_docs = stab_docs, + docs = shorter(Some(&Markdown(doc_value).to_string())), + class = myitem.type_(), + stab = myitem.stability_class(), + unsafety_flag = unsafety_flag, + href = item_path(myitem.type_(), myitem.name.as_ref().unwrap()), + title = full_path(cx, myitem))?; } } } - write!(w, "
{}extern crate {} as {};", - VisSpace(myitem.visibility), - src, - name)) + write!(w, "
{}extern crate {} as {};", + VisSpace(&myitem.visibility), + HRef::new(myitem.def_id, src), + name)? } None => { - try!(write!(w, "
{}extern crate {};", - VisSpace(myitem.visibility), name)) + write!(w, "
{}extern crate {};", + VisSpace(&myitem.visibility), + HRef::new(myitem.def_id, name))? } } - try!(write!(w, "
{}{}
{}{}
{name} - {stab_docs} {docs} -
{name}{unsafety_flag} + {stab_docs} {docs} +
") + if curty.is_some() { + write!(w, "")?; + } + Ok(()) } -fn short_stability(item: &clean::Item, cx: &Context, show_reason: bool) -> Option { - let mut result = item.stability.as_ref().and_then(|stab| { - let reason = if show_reason && !stab.reason.is_empty() { - format!(": {}", stab.reason) +fn maybe_ignore_item(it: &clean::Item) -> bool { + match it.inner { + clean::StrippedItem(..) => true, + clean::ModuleItem(ref m) => { + it.doc_value().is_none() && m.items.is_empty() + && it.visibility != Some(clean::Public) + }, + _ => false, + } +} + +fn short_stability(item: &clean::Item, cx: &Context, show_reason: bool) -> Vec { + let mut stability = vec![]; + + if let Some(stab) = item.stability.as_ref() { + let deprecated_reason = if show_reason && !stab.deprecated_reason.is_empty() { + format!(": {}", stab.deprecated_reason) } else { String::new() }; - let text = if !stab.deprecated_since.is_empty() { + if !stab.deprecated_since.is_empty() { let since = if show_reason { format!(" since {}", Escape(&stab.deprecated_since)) } else { String::new() }; - format!("Deprecated{}{}", since, Markdown(&reason)) - } else if stab.level == stability::Unstable { + let text = format!("Deprecated{}{}", since, Markdown(&deprecated_reason)); + stability.push(format!("{}", text)) + }; + + if stab.level == stability::Unstable { let unstable_extra = if show_reason { - match (!stab.feature.is_empty(), &cx.issue_tracker_base_url, stab.issue) { + match (!stab.feature.is_empty(), &cx.shared.issue_tracker_base_url, stab.issue) { (true, &Some(ref tracker_url), Some(issue_no)) if issue_no > 0 => format!(" ({} #{})", Escape(&stab.feature), tracker_url, issue_no, issue_no), (false, &Some(ref tracker_url), Some(issue_no)) if issue_no > 0 => format!(" (#{})", Escape(&tracker_url), issue_no, issue_no), - (true, _, _) => + (true, ..) => format!(" ({})", Escape(&stab.feature)), _ => String::new(), } } else { String::new() }; - format!("Unstable{}{}", unstable_extra, Markdown(&reason)) - } else { - return None - }; - Some(format!("{}", - item.stability_class(), text)) - }); - - if result.is_none() { - result = item.deprecation.as_ref().and_then(|depr| { - let note = if show_reason && !depr.note.is_empty() { - format!(": {}", depr.note) - } else { - String::new() - }; - let since = if show_reason && !depr.since.is_empty() { - format!(" since {}", Escape(&depr.since)) + let unstable_reason = if show_reason && !stab.unstable_reason.is_empty() { + format!(": {}", stab.unstable_reason) } else { String::new() }; + let text = format!("Unstable{}{}", unstable_extra, Markdown(&unstable_reason)); + stability.push(format!("{}", text)) + }; + } else if let Some(depr) = item.deprecation.as_ref() { + let note = if show_reason && !depr.note.is_empty() { + format!(": {}", depr.note) + } else { + String::new() + }; + let since = if show_reason && !depr.since.is_empty() { + format!(" since {}", Escape(&depr.since)) + } else { + String::new() + }; - let text = format!("Deprecated{}{}", since, Markdown(¬e)); - Some(format!("{}", text)) - }); + let text = format!("Deprecated{}{}", since, Markdown(¬e)); + stability.push(format!("{}", text)) } - result + stability } struct Initializer<'a>(&'a str); @@ -1867,290 +1905,379 @@ impl<'a> fmt::Display for Initializer<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let Initializer(s) = *self; if s.is_empty() { return Ok(()); } - try!(write!(f, " = ")); - write!(f, "{}", s) + write!(f, " = ")?; + write!(f, "{}", Escape(s)) } } fn item_constant(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, c: &clean::Constant) -> fmt::Result { - try!(write!(w, "
{vis}const \
-                    {name}: {typ}{init}
", - vis = VisSpace(it.visibility), + write!(w, "
{vis}const \
+               {name}: {typ}{init}
", + vis = VisSpace(&it.visibility), name = it.name.as_ref().unwrap(), typ = c.type_, - init = Initializer(&c.expr))); + init = Initializer(&c.expr))?; document(w, cx, it) } fn item_static(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, s: &clean::Static) -> fmt::Result { - try!(write!(w, "
{vis}static {mutability}\
-                    {name}: {typ}{init}
", - vis = VisSpace(it.visibility), + write!(w, "
{vis}static {mutability}\
+               {name}: {typ}{init}
", + vis = VisSpace(&it.visibility), mutability = MutableSpace(s.mutability), name = it.name.as_ref().unwrap(), typ = s.type_, - init = Initializer(&s.expr))); + init = Initializer(&s.expr))?; document(w, cx, it) } fn item_function(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, f: &clean::Function) -> fmt::Result { - try!(write!(w, "
{vis}{constness}{unsafety}{abi}fn \
-                    {name}{generics}{decl}{where_clause}
", - vis = VisSpace(it.visibility), - constness = ConstnessSpace(f.constness), + // FIXME(#24111): remove when `const_fn` is stabilized + let vis_constness = match UnstableFeatures::from_environment() { + UnstableFeatures::Allow => f.constness, + _ => hir::Constness::NotConst + }; + let indent = format!("{}{}{}{:#}fn {}{:#}", + VisSpace(&it.visibility), + ConstnessSpace(vis_constness), + UnsafetySpace(f.unsafety), + AbiSpace(f.abi), + it.name.as_ref().unwrap(), + f.generics).len(); + write!(w, "
{vis}{constness}{unsafety}{abi}fn \
+               {name}{generics}{decl}{where_clause}
", + vis = VisSpace(&it.visibility), + constness = ConstnessSpace(vis_constness), unsafety = UnsafetySpace(f.unsafety), abi = AbiSpace(f.abi), name = it.name.as_ref().unwrap(), generics = f.generics, - where_clause = WhereClause(&f.generics), - decl = f.decl)); + where_clause = WhereClause(&f.generics, 2), + decl = Method(&f.decl, indent))?; document(w, cx, it) } fn item_trait(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, t: &clean::Trait) -> fmt::Result { let mut bounds = String::new(); + let mut bounds_plain = String::new(); if !t.bounds.is_empty() { if !bounds.is_empty() { bounds.push(' '); + bounds_plain.push(' '); } bounds.push_str(": "); + bounds_plain.push_str(": "); for (i, p) in t.bounds.iter().enumerate() { - if i > 0 { bounds.push_str(" + "); } + if i > 0 { + bounds.push_str(" + "); + bounds_plain.push_str(" + "); + } bounds.push_str(&format!("{}", *p)); + bounds_plain.push_str(&format!("{:#}", *p)); } } // Output the trait definition - try!(write!(w, "
{}{}trait {}{}{}{} ",
-                  VisSpace(it.visibility),
-                  UnsafetySpace(t.unsafety),
-                  it.name.as_ref().unwrap(),
-                  t.generics,
-                  bounds,
-                  WhereClause(&t.generics)));
-
-    let types = t.items.iter().filter(|m| {
-        match m.inner { clean::AssociatedTypeItem(..) => true, _ => false }
-    }).collect::>();
-    let consts = t.items.iter().filter(|m| {
-        match m.inner { clean::AssociatedConstItem(..) => true, _ => false }
-    }).collect::>();
-    let required = t.items.iter().filter(|m| {
-        match m.inner { clean::TyMethodItem(_) => true, _ => false }
-    }).collect::>();
-    let provided = t.items.iter().filter(|m| {
-        match m.inner { clean::MethodItem(_) => true, _ => false }
-    }).collect::>();
+    write!(w, "
{}{}trait {}{}{}{} ",
+           VisSpace(&it.visibility),
+           UnsafetySpace(t.unsafety),
+           it.name.as_ref().unwrap(),
+           t.generics,
+           bounds,
+           // Where clauses in traits are indented nine spaces, per rustdoc.css
+           WhereClause(&t.generics, 9))?;
+
+    let types = t.items.iter().filter(|m| m.is_associated_type()).collect::>();
+    let consts = t.items.iter().filter(|m| m.is_associated_const()).collect::>();
+    let required = t.items.iter().filter(|m| m.is_ty_method()).collect::>();
+    let provided = t.items.iter().filter(|m| m.is_method()).collect::>();
 
     if t.items.is_empty() {
-        try!(write!(w, "{{ }}"));
+        write!(w, "{{ }}")?;
     } else {
-        try!(write!(w, "{{\n"));
+        // FIXME: we should be using a derived_id for the Anchors here
+        write!(w, "{{\n")?;
         for t in &types {
-            try!(write!(w, "    "));
-            try!(render_assoc_item(w, t, AssocItemLink::Anchor));
-            try!(write!(w, ";\n"));
+            write!(w, "    ")?;
+            render_assoc_item(w, t, AssocItemLink::Anchor(None), ItemType::Trait)?;
+            write!(w, ";\n")?;
         }
         if !types.is_empty() && !consts.is_empty() {
-            try!(w.write_str("\n"));
+            w.write_str("\n")?;
         }
         for t in &consts {
-            try!(write!(w, "    "));
-            try!(render_assoc_item(w, t, AssocItemLink::Anchor));
-            try!(write!(w, ";\n"));
+            write!(w, "    ")?;
+            render_assoc_item(w, t, AssocItemLink::Anchor(None), ItemType::Trait)?;
+            write!(w, ";\n")?;
         }
         if !consts.is_empty() && !required.is_empty() {
-            try!(w.write_str("\n"));
+            w.write_str("\n")?;
         }
         for m in &required {
-            try!(write!(w, "    "));
-            try!(render_assoc_item(w, m, AssocItemLink::Anchor));
-            try!(write!(w, ";\n"));
+            write!(w, "    ")?;
+            render_assoc_item(w, m, AssocItemLink::Anchor(None), ItemType::Trait)?;
+            write!(w, ";\n")?;
         }
         if !required.is_empty() && !provided.is_empty() {
-            try!(w.write_str("\n"));
+            w.write_str("\n")?;
         }
         for m in &provided {
-            try!(write!(w, "    "));
-            try!(render_assoc_item(w, m, AssocItemLink::Anchor));
-            try!(write!(w, " {{ ... }}\n"));
+            write!(w, "    ")?;
+            render_assoc_item(w, m, AssocItemLink::Anchor(None), ItemType::Trait)?;
+            write!(w, " {{ ... }}\n")?;
         }
-        try!(write!(w, "}}"));
+        write!(w, "}}")?;
     }
-    try!(write!(w, "
")); + write!(w, "
")?; // Trait documentation - try!(document(w, cx, it)); + document(w, cx, it)?; - fn trait_item(w: &mut fmt::Formatter, cx: &Context, m: &clean::Item) + fn trait_item(w: &mut fmt::Formatter, cx: &Context, m: &clean::Item, t: &clean::Item) -> fmt::Result { let name = m.name.as_ref().unwrap(); - let id = derive_id(format!("{}.{}", shortty(m), name)); - try!(write!(w, "

", - id = id, - stab = m.stability_class())); - try!(render_assoc_item(w, m, AssocItemLink::Anchor)); - try!(write!(w, "

")); - try!(document(w, cx, m)); + let item_type = m.type_(); + let id = derive_id(format!("{}.{}", item_type, name)); + let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); + write!(w, "

\ +

")?; + document(w, cx, m)?; Ok(()) } if !types.is_empty() { - try!(write!(w, " + write!(w, "

Associated Types

- ")); + ")?; for t in &types { - try!(trait_item(w, cx, *t)); + trait_item(w, cx, *t, it)?; } - try!(write!(w, "
")); + write!(w, "")?; } if !consts.is_empty() { - try!(write!(w, " + write!(w, "

Associated Constants

- ")); + ")?; for t in &consts { - try!(trait_item(w, cx, *t)); + trait_item(w, cx, *t, it)?; } - try!(write!(w, "
")); + write!(w, "")?; } // Output the documentation for each function individually if !required.is_empty() { - try!(write!(w, " + write!(w, "

Required Methods

- ")); + ")?; for m in &required { - try!(trait_item(w, cx, *m)); + trait_item(w, cx, *m, it)?; } - try!(write!(w, "
")); + write!(w, "")?; } if !provided.is_empty() { - try!(write!(w, " + write!(w, "

Provided Methods

- ")); + ")?; for m in &provided { - try!(trait_item(w, cx, *m)); + trait_item(w, cx, *m, it)?; } - try!(write!(w, "
")); + write!(w, "")?; } // If there are methods directly on this trait object, render them here. - try!(render_assoc_items(w, cx, it.def_id, AssocItemRender::All)); + render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All)?; let cache = cache(); - try!(write!(w, " + write!(w, "

Implementors

    - ")); - match cache.implementors.get(&it.def_id) { - Some(implementors) => { - for i in implementors { - try!(writeln!(w, "
  • {}
  • ", i.impl_)); - } + ")?; + if let Some(implementors) = cache.implementors.get(&it.def_id) { + for i in implementors { + write!(w, "
  • ")?; + fmt_impl_for_trait_page(&i.impl_, w)?; + writeln!(w, "
  • ")?; + } + } + write!(w, "
")?; + write!(w, r#""#, + root_path = vec![".."; cx.current.len()].join("/"), + path = if it.def_id.is_local() { + cx.current.join("/") + } else { + let (ref path, _) = cache.external_paths[&it.def_id]; + path[..path.len() - 1].join("/") + }, + ty = it.type_().css_class(), + name = *it.name.as_ref().unwrap())?; + Ok(()) +} + +fn naive_assoc_href(it: &clean::Item, link: AssocItemLink) -> String { + use html::item_type::ItemType::*; + + let name = it.name.as_ref().unwrap(); + let ty = match it.type_() { + Typedef | AssociatedType => AssociatedType, + s@_ => s, + }; + + let anchor = format!("#{}.{}", ty, name); + match link { + AssocItemLink::Anchor(Some(ref id)) => format!("#{}", id), + AssocItemLink::Anchor(None) => anchor, + AssocItemLink::GotoSource(did, _) => { + href(did).map(|p| format!("{}{}", p.0, anchor)).unwrap_or(anchor) } - None => {} } - try!(write!(w, "")); - try!(write!(w, r#""#, - root_path = vec![".."; cx.current.len()].join("/"), - path = if it.def_id.is_local() { - cx.current.join("/") - } else { - let path = &cache.external_paths[&it.def_id]; - path[..path.len() - 1].join("/") - }, - ty = shortty(it).to_static_str(), - name = *it.name.as_ref().unwrap())); - Ok(()) } -fn assoc_const(w: &mut fmt::Formatter, it: &clean::Item, - ty: &clean::Type, default: Option<&String>) - -> fmt::Result { - try!(write!(w, "const {}", it.name.as_ref().unwrap())); - try!(write!(w, ": {}", ty)); +fn assoc_const(w: &mut fmt::Formatter, + it: &clean::Item, + ty: &clean::Type, + default: Option<&String>, + link: AssocItemLink) -> fmt::Result { + write!(w, "const {}", + naive_assoc_href(it, link), + it.name.as_ref().unwrap())?; + + write!(w, ": {}", ty)?; if let Some(default) = default { - try!(write!(w, " = {}", default)); + write!(w, " = {}", Escape(default))?; } Ok(()) } fn assoc_type(w: &mut fmt::Formatter, it: &clean::Item, bounds: &Vec, - default: &Option) - -> fmt::Result { - try!(write!(w, "type {}", it.name.as_ref().unwrap())); + default: Option<&clean::Type>, + link: AssocItemLink) -> fmt::Result { + write!(w, "type {}", + naive_assoc_href(it, link), + it.name.as_ref().unwrap())?; if !bounds.is_empty() { - try!(write!(w, ": {}", TyParamBounds(bounds))) + write!(w, ": {}", TyParamBounds(bounds))? + } + if let Some(default) = default { + write!(w, " = {}", default)?; } - if let Some(ref default) = *default { - try!(write!(w, " = {}", default)); + Ok(()) +} + +fn render_stability_since_raw<'a>(w: &mut fmt::Formatter, + ver: Option<&'a str>, + containing_ver: Option<&'a str>) -> fmt::Result { + if let Some(v) = ver { + if containing_ver != ver && v.len() > 0 { + write!(w, "
{0}
", + v)? + } } Ok(()) } -fn render_assoc_item(w: &mut fmt::Formatter, meth: &clean::Item, - link: AssocItemLink) -> fmt::Result { +fn render_stability_since(w: &mut fmt::Formatter, + item: &clean::Item, + containing_item: &clean::Item) -> fmt::Result { + render_stability_since_raw(w, item.stable_since(), containing_item.stable_since()) +} + +fn render_assoc_item(w: &mut fmt::Formatter, + item: &clean::Item, + link: AssocItemLink, + parent: ItemType) -> fmt::Result { fn method(w: &mut fmt::Formatter, - it: &clean::Item, + meth: &clean::Item, unsafety: hir::Unsafety, constness: hir::Constness, abi: abi::Abi, g: &clean::Generics, - selfty: &clean::SelfTy, d: &clean::FnDecl, - link: AssocItemLink) + link: AssocItemLink, + parent: ItemType) -> fmt::Result { - use syntax::abi::Abi; - - let name = it.name.as_ref().unwrap(); - let anchor = format!("#{}.{}", shortty(it), name); + let name = meth.name.as_ref().unwrap(); + let anchor = format!("#{}.{}", meth.type_(), name); let href = match link { - AssocItemLink::Anchor => anchor, - AssocItemLink::GotoSource(did) => { - href(did).map(|p| format!("{}{}", p.0, anchor)).unwrap_or(anchor) + AssocItemLink::Anchor(Some(ref id)) => format!("#{}", id), + AssocItemLink::Anchor(None) => anchor, + AssocItemLink::GotoSource(did, provided_methods) => { + // We're creating a link from an impl-item to the corresponding + // trait-item and need to map the anchored type accordingly. + let ty = if provided_methods.contains(name) { + ItemType::Method + } else { + ItemType::TyMethod + }; + + href(did).map(|p| format!("{}#{}.{}", p.0, ty, name)).unwrap_or(anchor) } }; + // FIXME(#24111): remove when `const_fn` is stabilized + let vis_constness = match UnstableFeatures::from_environment() { + UnstableFeatures::Allow => constness, + _ => hir::Constness::NotConst + }; + let prefix = format!("{}{}{:#}fn {}{:#}", + ConstnessSpace(vis_constness), + UnsafetySpace(unsafety), + AbiSpace(abi), + name, + *g); + let mut indent = prefix.len(); + let where_indent = if parent == ItemType::Trait { + indent += 4; + 8 + } else if parent == ItemType::Impl { + 2 + } else { + let prefix = prefix + &format!("{:#}", Method(d, indent)); + prefix.lines().last().unwrap().len() + 1 + }; write!(w, "{}{}{}fn {name}\ {generics}{decl}{where_clause}", - ConstnessSpace(constness), + ConstnessSpace(vis_constness), UnsafetySpace(unsafety), - match abi { - Abi::Rust => String::new(), - a => format!("extern {} ", a.to_string()) - }, + AbiSpace(abi), href = href, name = name, generics = *g, - decl = Method(selfty, d), - where_clause = WhereClause(g)) + decl = Method(d, indent), + where_clause = WhereClause(g, where_indent)) } - match meth.inner { + match item.inner { + clean::StrippedItem(..) => Ok(()), clean::TyMethodItem(ref m) => { - method(w, meth, m.unsafety, hir::Constness::NotConst, - m.abi, &m.generics, &m.self_, &m.decl, link) + method(w, item, m.unsafety, hir::Constness::NotConst, + m.abi, &m.generics, &m.decl, link, parent) } clean::MethodItem(ref m) => { - method(w, meth, m.unsafety, m.constness, - m.abi, &m.generics, &m.self_, &m.decl, - link) + method(w, item, m.unsafety, m.constness, + m.abi, &m.generics, &m.decl, link, parent) } clean::AssociatedConstItem(ref ty, ref default) => { - assoc_const(w, meth, ty, default.as_ref()) + assoc_const(w, item, ty, default.as_ref(), link) } clean::AssociatedTypeItem(ref bounds, ref default) => { - assoc_type(w, meth, bounds, default) + assoc_type(w, item, bounds, default.as_ref(), link) } _ => panic!("render_assoc_item called on non-associated-item") } @@ -2158,154 +2285,261 @@ fn render_assoc_item(w: &mut fmt::Formatter, meth: &clean::Item, fn item_struct(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, s: &clean::Struct) -> fmt::Result { - try!(write!(w, "
"));
-    try!(render_attributes(w, it));
-    try!(render_struct(w,
-                       it,
-                       Some(&s.generics),
-                       s.struct_type,
-                       &s.fields,
-                       "",
-                       true));
-    try!(write!(w, "
")); - - try!(document(w, cx, it)); - let mut fields = s.fields.iter().filter(|f| { + write!(w, "
")?;
+    render_attributes(w, it)?;
+    render_struct(w,
+                  it,
+                  Some(&s.generics),
+                  s.struct_type,
+                  &s.fields,
+                  "",
+                  true)?;
+    write!(w, "
")?; + + document(w, cx, it)?; + let mut fields = s.fields.iter().filter_map(|f| { match f.inner { - clean::StructFieldItem(clean::HiddenStructField) => false, - clean::StructFieldItem(clean::TypedStructField(..)) => true, - _ => false, + clean::StructFieldItem(ref ty) => Some((f, ty)), + _ => None, } }).peekable(); if let doctree::Plain = s.struct_type { if fields.peek().is_some() { - try!(write!(w, "

Fields

\n")); - for field in fields { - try!(write!(w, " - ")); + write!(w, "

Fields

")?; + for (field, ty) in fields { + let id = derive_id(format!("{}.{}", + ItemType::StructField, + field.name.as_ref().unwrap())); + let ns_id = derive_id(format!("{}.{}", + field.name.as_ref().unwrap(), + ItemType::StructField.name_space())); + write!(w, " + ", + item_type = ItemType::StructField, + id = id, + ns_id = ns_id, + stab = field.stability_class(), + name = field.name.as_ref().unwrap(), + ty = ty)?; + document(w, cx, field)?; } - try!(write!(w, "
\ - {name}", - stab = field.stability_class(), - name = field.name.as_ref().unwrap())); - try!(document(w, cx, field)); - try!(write!(w, "
")); } } - render_assoc_items(w, cx, it.def_id, AssocItemRender::All) + render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) +} + +fn item_union(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, + s: &clean::Union) -> fmt::Result { + write!(w, "
")?;
+    render_attributes(w, it)?;
+    render_union(w,
+                 it,
+                 Some(&s.generics),
+                 &s.fields,
+                 "",
+                 true)?;
+    write!(w, "
")?; + + document(w, cx, it)?; + let mut fields = s.fields.iter().filter_map(|f| { + match f.inner { + clean::StructFieldItem(ref ty) => Some((f, ty)), + _ => None, + } + }).peekable(); + if fields.peek().is_some() { + write!(w, "

Fields

")?; + for (field, ty) in fields { + write!(w, "{name}: {ty} + ", + shortty = ItemType::StructField, + stab = field.stability_class(), + name = field.name.as_ref().unwrap(), + ty = ty)?; + document(w, cx, field)?; + } + } + render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } fn item_enum(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, e: &clean::Enum) -> fmt::Result { - try!(write!(w, "
"));
-    try!(render_attributes(w, it));
-    try!(write!(w, "{}enum {}{}{}",
-                  VisSpace(it.visibility),
-                  it.name.as_ref().unwrap(),
-                  e.generics,
-                  WhereClause(&e.generics)));
+    write!(w, "
")?;
+    render_attributes(w, it)?;
+    let padding = format!("{}enum {}{:#} ",
+                          VisSpace(&it.visibility),
+                          it.name.as_ref().unwrap(),
+                          e.generics).len();
+    write!(w, "{}enum {}{}{}",
+           VisSpace(&it.visibility),
+           it.name.as_ref().unwrap(),
+           e.generics,
+           WhereClause(&e.generics, padding))?;
     if e.variants.is_empty() && !e.variants_stripped {
-        try!(write!(w, " {{}}"));
+        write!(w, " {{}}")?;
     } else {
-        try!(write!(w, " {{\n"));
+        write!(w, " {{\n")?;
         for v in &e.variants {
-            try!(write!(w, "    "));
+            write!(w, "    ")?;
             let name = v.name.as_ref().unwrap();
             match v.inner {
                 clean::VariantItem(ref var) => {
                     match var.kind {
-                        clean::CLikeVariant => try!(write!(w, "{}", name)),
-                        clean::TupleVariant(ref tys) => {
-                            try!(write!(w, "{}(", name));
+                        clean::VariantKind::CLike => write!(w, "{}", name)?,
+                        clean::VariantKind::Tuple(ref tys) => {
+                            write!(w, "{}(", name)?;
                             for (i, ty) in tys.iter().enumerate() {
                                 if i > 0 {
-                                    try!(write!(w, ", "))
+                                    write!(w, ", ")?
                                 }
-                                try!(write!(w, "{}", *ty));
+                                write!(w, "{}", *ty)?;
                             }
-                            try!(write!(w, ")"));
+                            write!(w, ")")?;
                         }
-                        clean::StructVariant(ref s) => {
-                            try!(render_struct(w,
-                                               v,
-                                               None,
-                                               s.struct_type,
-                                               &s.fields,
-                                               "    ",
-                                               false));
+                        clean::VariantKind::Struct(ref s) => {
+                            render_struct(w,
+                                          v,
+                                          None,
+                                          s.struct_type,
+                                          &s.fields,
+                                          "    ",
+                                          false)?;
                         }
                     }
                 }
                 _ => unreachable!()
             }
-            try!(write!(w, ",\n"));
+            write!(w, ",\n")?;
         }
 
         if e.variants_stripped {
-            try!(write!(w, "    // some variants omitted\n"));
+            write!(w, "    // some variants omitted\n")?;
         }
-        try!(write!(w, "}}"));
+        write!(w, "}}")?;
     }
-    try!(write!(w, "
")); + write!(w, "
")?; - try!(document(w, cx, it)); + document(w, cx, it)?; if !e.variants.is_empty() { - try!(write!(w, "

Variants

\n")); + write!(w, "

Variants

\n")?; for variant in &e.variants { - try!(write!(w, "")); + render_stability_since(w, variant, it)?; } - try!(write!(w, "
{name}", - name = variant.name.as_ref().unwrap())); - try!(document(w, cx, variant)); - match variant.inner { - clean::VariantItem(ref var) => { - match var.kind { - clean::StructVariant(ref s) => { - let fields = s.fields.iter().filter(|f| { - match f.inner { - clean::StructFieldItem(ref t) => match *t { - clean::HiddenStructField => false, - clean::TypedStructField(..) => true, - }, - _ => false, - } - }); - try!(write!(w, "

Fields

\n - ")); - for field in fields { - try!(write!(w, "")); - } - try!(write!(w, "
\ - {f}", - v = variant.name.as_ref().unwrap(), - f = field.name.as_ref().unwrap())); - try!(document(w, cx, field)); - try!(write!(w, "
")); + let id = derive_id(format!("{}.{}", + ItemType::Variant, + variant.name.as_ref().unwrap())); + let ns_id = derive_id(format!("{}.{}", + variant.name.as_ref().unwrap(), + ItemType::Variant.name_space())); + write!(w, "\ + ")?; + document(w, cx, variant)?; + + use clean::{Variant, VariantKind}; + if let clean::VariantItem(Variant { + kind: VariantKind::Struct(ref s) + }) = variant.inner { + let variant_id = derive_id(format!("{}.{}.fields", + ItemType::Variant, + variant.name.as_ref().unwrap())); + write!(w, "", + id = variant_id)?; + write!(w, "

Fields of {name}

\n + ", name = variant.name.as_ref().unwrap())?; + for field in &s.fields { + use clean::StructFieldItem; + if let StructFieldItem(ref ty) = field.inner { + let id = derive_id(format!("variant.{}.field.{}", + variant.name.as_ref().unwrap(), + field.name.as_ref().unwrap())); + let ns_id = derive_id(format!("{}.{}.{}.{}", + variant.name.as_ref().unwrap(), + ItemType::Variant.name_space(), + field.name.as_ref().unwrap(), + ItemType::StructField.name_space())); + write!(w, "")?; } } - _ => () + write!(w, "
\ + ", + id = id, + ns_id = ns_id, + f = field.name.as_ref().unwrap(), + t = *ty)?; + document(w, cx, field)?; + write!(w, "
")?; } - try!(write!(w, "
")); - } - try!(render_assoc_items(w, cx, it.def_id, AssocItemRender::All)); + render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All)?; Ok(()) } +fn render_attribute(attr: &ast::MetaItem) -> Option { + let name = attr.name(); + + if attr.is_word() { + Some(format!("{}", name)) + } else if let Some(v) = attr.value_str() { + Some(format!("{} = {:?}", name, &v.as_str()[..])) + } else if let Some(values) = attr.meta_item_list() { + let display: Vec<_> = values.iter().filter_map(|attr| { + attr.meta_item().and_then(|mi| render_attribute(mi)) + }).collect(); + + if display.len() > 0 { + Some(format!("{}({})", name, display.join(", "))) + } else { + None + } + } else { + None + } +} + +const ATTRIBUTE_WHITELIST: &'static [&'static str] = &[ + "export_name", + "lang", + "link_section", + "must_use", + "no_mangle", + "repr", + "unsafe_destructor_blind_to_params" +]; + fn render_attributes(w: &mut fmt::Formatter, it: &clean::Item) -> fmt::Result { - for attr in &it.attrs { - match *attr { - clean::Word(ref s) if *s == "must_use" => { - try!(write!(w, "#[{}]\n", s)); - } - clean::NameValue(ref k, ref v) if *k == "must_use" => { - try!(write!(w, "#[{} = \"{}\"]\n", k, v)); - } - _ => () + let mut attrs = String::new(); + + for attr in &it.attrs.other_attrs { + let name = attr.name(); + if !ATTRIBUTE_WHITELIST.contains(&&name.as_str()[..]) { + continue; } + if let Some(s) = render_attribute(attr.meta()) { + attrs.push_str(&format!("#[{}]\n", s)); + } + } + if attrs.len() > 0 { + write!(w, "
{}
", &attrs)?; } Ok(()) } @@ -2316,77 +2550,154 @@ fn render_struct(w: &mut fmt::Formatter, it: &clean::Item, fields: &[clean::Item], tab: &str, structhead: bool) -> fmt::Result { - try!(write!(w, "{}{}{}", - VisSpace(it.visibility), - if structhead {"struct "} else {""}, - it.name.as_ref().unwrap())); - match g { - Some(g) => try!(write!(w, "{}{}", *g, WhereClause(g))), - None => {} + let mut plain = String::new(); + write!(w, "{}{}{}", + VisSpace(&it.visibility), + if structhead {"struct "} else {""}, + it.name.as_ref().unwrap())?; + plain.push_str(&format!("{}{}{}", + VisSpace(&it.visibility), + if structhead {"struct "} else {""}, + it.name.as_ref().unwrap())); + if let Some(g) = g { + plain.push_str(&format!("{:#}", g)); + write!(w, "{}", g)? } match ty { doctree::Plain => { - try!(write!(w, " {{\n{}", tab)); - let mut fields_stripped = false; + if let Some(g) = g { + write!(w, "{}", WhereClause(g, plain.len() + 1))? + } + let mut has_visible_fields = false; + write!(w, " {{")?; for field in fields { - match field.inner { - clean::StructFieldItem(clean::HiddenStructField) => { - fields_stripped = true; - } - clean::StructFieldItem(clean::TypedStructField(ref ty)) => { - try!(write!(w, " {}{}: {},\n{}", - VisSpace(field.visibility), - field.name.as_ref().unwrap(), - *ty, - tab)); - } - _ => unreachable!(), - }; + if let clean::StructFieldItem(ref ty) = field.inner { + write!(w, "\n{} {}{}: {},", + tab, + VisSpace(&field.visibility), + field.name.as_ref().unwrap(), + *ty)?; + has_visible_fields = true; + } } - if fields_stripped { - try!(write!(w, " // some fields omitted\n{}", tab)); + if has_visible_fields { + if it.has_stripped_fields().unwrap() { + write!(w, "\n{} // some fields omitted", tab)?; + } + write!(w, "\n{}", tab)?; + } else if it.has_stripped_fields().unwrap() { + // If there are no visible fields we can just display + // `{ /* fields omitted */ }` to save space. + write!(w, " /* fields omitted */ ")?; } - try!(write!(w, "}}")); + write!(w, "}}")?; } - doctree::Tuple | doctree::Newtype => { - try!(write!(w, "(")); + doctree::Tuple => { + write!(w, "(")?; + plain.push_str("("); for (i, field) in fields.iter().enumerate() { if i > 0 { - try!(write!(w, ", ")); + write!(w, ", ")?; + plain.push_str(", "); } match field.inner { - clean::StructFieldItem(clean::HiddenStructField) => { - try!(write!(w, "_")) + clean::StrippedItem(box clean::StructFieldItem(..)) => { + plain.push_str("_"); + write!(w, "_")? } - clean::StructFieldItem(clean::TypedStructField(ref ty)) => { - try!(write!(w, "{}{}", VisSpace(field.visibility), *ty)) + clean::StructFieldItem(ref ty) => { + plain.push_str(&format!("{}{:#}", VisSpace(&field.visibility), *ty)); + write!(w, "{}{}", VisSpace(&field.visibility), *ty)? } _ => unreachable!() } } - try!(write!(w, ");")); + write!(w, ")")?; + plain.push_str(")"); + if let Some(g) = g { + write!(w, "{}", WhereClause(g, plain.len() + 1))? + } + write!(w, ";")?; } doctree::Unit => { - try!(write!(w, ";")); + // Needed for PhantomData. + if let Some(g) = g { + write!(w, "{}", WhereClause(g, plain.len() + 1))? + } + write!(w, ";")?; } } Ok(()) } +fn render_union(w: &mut fmt::Formatter, it: &clean::Item, + g: Option<&clean::Generics>, + fields: &[clean::Item], + tab: &str, + structhead: bool) -> fmt::Result { + let mut plain = String::new(); + write!(w, "{}{}{}", + VisSpace(&it.visibility), + if structhead {"union "} else {""}, + it.name.as_ref().unwrap())?; + plain.push_str(&format!("{}{}{}", + VisSpace(&it.visibility), + if structhead {"union "} else {""}, + it.name.as_ref().unwrap())); + if let Some(g) = g { + write!(w, "{}", g)?; + plain.push_str(&format!("{:#}", g)); + write!(w, "{}", WhereClause(g, plain.len() + 1))?; + } + + write!(w, " {{\n{}", tab)?; + for field in fields { + if let clean::StructFieldItem(ref ty) = field.inner { + write!(w, " {}{}: {},\n{}", + VisSpace(&field.visibility), + field.name.as_ref().unwrap(), + *ty, + tab)?; + } + } + + if it.has_stripped_fields().unwrap() { + write!(w, " // some fields omitted\n{}", tab)?; + } + write!(w, "}}")?; + Ok(()) +} + #[derive(Copy, Clone)] -enum AssocItemLink { - Anchor, - GotoSource(DefId), +enum AssocItemLink<'a> { + Anchor(Option<&'a str>), + GotoSource(DefId, &'a FxHashSet), +} + +impl<'a> AssocItemLink<'a> { + fn anchor(&self, id: &'a String) -> Self { + match *self { + AssocItemLink::Anchor(_) => { AssocItemLink::Anchor(Some(&id)) }, + ref other => *other, + } + } } enum AssocItemRender<'a> { All, - DerefFor { trait_: &'a clean::Type, type_: &'a clean::Type }, + DerefFor { trait_: &'a clean::Type, type_: &'a clean::Type, deref_mut_: bool } +} + +#[derive(Copy, Clone, PartialEq)] +enum RenderMode { + Normal, + ForDeref { mut_: bool }, } fn render_assoc_items(w: &mut fmt::Formatter, cx: &Context, + containing_item: &clean::Item, it: DefId, what: AssocItemRender) -> fmt::Result { let c = cache(); @@ -2395,199 +2706,253 @@ fn render_assoc_items(w: &mut fmt::Formatter, None => return Ok(()), }; let (non_trait, traits): (Vec<_>, _) = v.iter().partition(|i| { - i.impl_.trait_.is_none() + i.inner_impl().trait_.is_none() }); if !non_trait.is_empty() { - let render_header = match what { + let render_mode = match what { AssocItemRender::All => { - try!(write!(w, "

Methods

")); - true + write!(w, "

Methods

")?; + RenderMode::Normal } - AssocItemRender::DerefFor { trait_, type_ } => { - try!(write!(w, "

Methods from \ - {}<Target={}>

", trait_, type_)); - false + AssocItemRender::DerefFor { trait_, type_, deref_mut_ } => { + write!(w, "

Methods from \ + {}<Target={}>

", trait_, type_)?; + RenderMode::ForDeref { mut_: deref_mut_ } } }; for i in &non_trait { - try!(render_impl(w, cx, i, AssocItemLink::Anchor, render_header)); + render_impl(w, cx, i, AssocItemLink::Anchor(None), render_mode, + containing_item.stable_since())?; } } if let AssocItemRender::DerefFor { .. } = what { - return Ok(()) + return Ok(()); } if !traits.is_empty() { let deref_impl = traits.iter().find(|t| { - match *t.impl_.trait_.as_ref().unwrap() { - clean::ResolvedPath { did, .. } => { - Some(did) == c.deref_trait_did - } - _ => false - } + t.inner_impl().trait_.def_id() == c.deref_trait_did }); if let Some(impl_) = deref_impl { - try!(render_deref_methods(w, cx, impl_)); - } - try!(write!(w, "

Trait \ - Implementations

")); - let (derived, manual): (Vec<_>, Vec<&Impl>) = traits.iter().partition(|i| { - i.impl_.derived - }); - for i in &manual { + let has_deref_mut = traits.iter().find(|t| { + t.inner_impl().trait_.def_id() == c.deref_mut_trait_did + }).is_some(); + render_deref_methods(w, cx, impl_, containing_item, has_deref_mut)?; + } + write!(w, "

Trait \ + Implementations

")?; + for i in &traits { let did = i.trait_did().unwrap(); - try!(render_impl(w, cx, i, AssocItemLink::GotoSource(did), true)); - } - if !derived.is_empty() { - try!(write!(w, "

\ - Derived Implementations \ -

")); - for i in &derived { - let did = i.trait_did().unwrap(); - try!(render_impl(w, cx, i, AssocItemLink::GotoSource(did), true)); - } + let assoc_link = AssocItemLink::GotoSource(did, &i.inner_impl().provided_trait_methods); + render_impl(w, cx, i, assoc_link, + RenderMode::Normal, containing_item.stable_since())?; } } Ok(()) } -fn render_deref_methods(w: &mut fmt::Formatter, cx: &Context, impl_: &Impl) -> fmt::Result { - let deref_type = impl_.impl_.trait_.as_ref().unwrap(); - let target = impl_.impl_.items.iter().filter_map(|item| { +fn render_deref_methods(w: &mut fmt::Formatter, cx: &Context, impl_: &Impl, + container_item: &clean::Item, deref_mut: bool) -> fmt::Result { + let deref_type = impl_.inner_impl().trait_.as_ref().unwrap(); + let target = impl_.inner_impl().items.iter().filter_map(|item| { match item.inner { clean::TypedefItem(ref t, true) => Some(&t.type_), _ => None, } }).next().expect("Expected associated type binding"); - let what = AssocItemRender::DerefFor { trait_: deref_type, type_: target }; - match *target { - clean::ResolvedPath { did, .. } => render_assoc_items(w, cx, did, what), - _ => { - if let Some(prim) = target.primitive_type() { - if let Some(c) = cache().primitive_locations.get(&prim) { - let did = DefId { krate: *c, index: prim.to_def_index() }; - try!(render_assoc_items(w, cx, did, what)); - } + let what = AssocItemRender::DerefFor { trait_: deref_type, type_: target, + deref_mut_: deref_mut }; + if let Some(did) = target.def_id() { + render_assoc_items(w, cx, container_item, did, what) + } else { + if let Some(prim) = target.primitive_type() { + if let Some(&did) = cache().primitive_locations.get(&prim) { + render_assoc_items(w, cx, container_item, did, what)?; } - Ok(()) } + Ok(()) } } -// Render_header is false when we are rendering a `Deref` impl and true -// otherwise. If render_header is false, we will avoid rendering static -// methods, since they are not accessible for the type implementing `Deref` fn render_impl(w: &mut fmt::Formatter, cx: &Context, i: &Impl, link: AssocItemLink, - render_header: bool) -> fmt::Result { - if render_header { - try!(write!(w, "

{}

", i.impl_)); - if let Some(ref dox) = i.dox { - try!(write!(w, "
{}
", Markdown(dox))); + render_mode: RenderMode, outer_version: Option<&str>) -> fmt::Result { + if render_mode == RenderMode::Normal { + write!(w, "

{}", i.inner_impl())?; + write!(w, "")?; + let since = i.impl_item.stability.as_ref().map(|s| &s.since[..]); + if let Some(l) = (Item { item: &i.impl_item, cx: cx }).src_href() { + write!(w, "
")?; + render_stability_since_raw(w, since, outer_version)?; + write!(w, "[src]", + l, "goto source code")?; + } else { + render_stability_since_raw(w, since, outer_version)?; + } + write!(w, "
")?; + write!(w, "

\n")?; + if let Some(ref dox) = i.impl_item.doc_value() { + write!(w, "
{}
", Markdown(dox))?; } } - fn doctraititem(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item, - link: AssocItemLink, render_static: bool) -> fmt::Result { + fn doc_impl_item(w: &mut fmt::Formatter, cx: &Context, item: &clean::Item, + link: AssocItemLink, render_mode: RenderMode, + is_default_item: bool, outer_version: Option<&str>, + trait_: Option<&clean::Trait>) -> fmt::Result { + let item_type = item.type_(); let name = item.name.as_ref().unwrap(); + + let render_method_item: bool = match render_mode { + RenderMode::Normal => true, + RenderMode::ForDeref { mut_: deref_mut_ } => { + let self_type_opt = match item.inner { + clean::MethodItem(ref method) => method.decl.self_type(), + clean::TyMethodItem(ref method) => method.decl.self_type(), + _ => None + }; + + if let Some(self_ty) = self_type_opt { + let by_mut_ref = match self_ty { + SelfTy::SelfBorrowed(_lifetime, mutability) => { + mutability == Mutability::Mutable + }, + SelfTy::SelfExplicit(clean::BorrowedRef { mutability, .. }) => { + mutability == Mutability::Mutable + }, + _ => false, + }; + + deref_mut_ || !by_mut_ref + } else { + false + } + }, + }; + match item.inner { clean::MethodItem(..) | clean::TyMethodItem(..) => { // Only render when the method is not static or we allow static methods - if !is_static_method(item) || render_static { - let id = derive_id(format!("method.{}", name)); - try!(write!(w, "

", id, shortty(item))); - try!(render_assoc_item(w, item, link)); - try!(write!(w, "

\n")); + if render_method_item { + let id = derive_id(format!("{}.{}", item_type, name)); + let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); + write!(w, "

", id, item_type)?; + write!(w, "

\n")?; } } clean::TypedefItem(ref tydef, _) => { - let id = derive_id(format!("assoc_type.{}", name)); - try!(write!(w, "

", id, shortty(item))); - try!(write!(w, "type {} = {}", name, tydef.type_)); - try!(write!(w, "

\n")); + let id = derive_id(format!("{}.{}", ItemType::AssociatedType, name)); + let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); + write!(w, "

", id, item_type)?; + write!(w, "

\n")?; } clean::AssociatedConstItem(ref ty, ref default) => { - let id = derive_id(format!("assoc_const.{}", name)); - try!(write!(w, "

", id, shortty(item))); - try!(assoc_const(w, item, ty, default.as_ref())); - try!(write!(w, "

\n")); + let id = derive_id(format!("{}.{}", item_type, name)); + let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); + write!(w, "

", id, item_type)?; + write!(w, "

\n")?; } clean::ConstantItem(ref c) => { - let id = derive_id(format!("assoc_const.{}", name)); - try!(write!(w, "

", id, shortty(item))); - try!(assoc_const(w, item, &c.type_, Some(&c.expr))); - try!(write!(w, "

\n")); + let id = derive_id(format!("{}.{}", item_type, name)); + let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); + write!(w, "

", id, item_type)?; + write!(w, "

\n")?; } clean::AssociatedTypeItem(ref bounds, ref default) => { - let id = derive_id(format!("assoc_type.{}", name)); - try!(write!(w, "

", id, shortty(item))); - try!(assoc_type(w, item, bounds, default)); - try!(write!(w, "

\n")); + let id = derive_id(format!("{}.{}", item_type, name)); + let ns_id = derive_id(format!("{}.{}", name, item_type.name_space())); + write!(w, "

", id, item_type)?; + write!(w, "

\n")?; } + clean::StrippedItem(..) => return Ok(()), _ => panic!("can't make docs for trait item with name {:?}", item.name) } - return if let AssocItemLink::Anchor = link { - if is_static_method(item) && !render_static { - Ok(()) + if render_method_item || render_mode == RenderMode::Normal { + if !is_default_item { + if let Some(t) = trait_ { + // The trait item may have been stripped so we might not + // find any documentation or stability for it. + if let Some(it) = t.items.iter().find(|i| i.name == item.name) { + // We need the stability of the item from the trait + // because impls can't have a stability. + document_stability(w, cx, it)?; + if item.doc_value().is_some() { + document_full(w, item)?; + } else { + // In case the item isn't documented, + // provide short documentation from the trait. + document_short(w, it, link)?; + } + } + } else { + document(w, cx, item)?; + } } else { - document(w, cx, item) - } - } else { - Ok(()) - }; - - fn is_static_method(item: &clean::Item) -> bool { - match item.inner { - clean::MethodItem(ref method) => method.self_ == SelfTy::SelfStatic, - clean::TyMethodItem(ref method) => method.self_ == SelfTy::SelfStatic, - _ => false + document_stability(w, cx, item)?; + document_short(w, item, link)?; } } + Ok(()) } - try!(write!(w, "
")); - for trait_item in &i.impl_.items { - try!(doctraititem(w, cx, trait_item, link, render_header)); + let traits = &cache().traits; + let trait_ = i.trait_did().and_then(|did| traits.get(&did)); + + write!(w, "
")?; + for trait_item in &i.inner_impl().items { + doc_impl_item(w, cx, trait_item, link, render_mode, + false, outer_version, trait_)?; } fn render_default_items(w: &mut fmt::Formatter, cx: &Context, - did: DefId, t: &clean::Trait, - i: &clean::Impl, - render_static: bool) -> fmt::Result { + i: &clean::Impl, + render_mode: RenderMode, + outer_version: Option<&str>) -> fmt::Result { for trait_item in &t.items { let n = trait_item.name.clone(); - match i.items.iter().find(|m| { m.name == n }) { - Some(..) => continue, - None => {} + if i.items.iter().find(|m| m.name == n).is_some() { + continue; } + let did = i.trait_.as_ref().unwrap().def_id().unwrap(); + let assoc_link = AssocItemLink::GotoSource(did, &i.provided_trait_methods); - try!(doctraititem(w, cx, trait_item, AssocItemLink::GotoSource(did), render_static)); + doc_impl_item(w, cx, trait_item, assoc_link, render_mode, true, + outer_version, None)?; } Ok(()) } // If we've implemented a trait, then also emit documentation for all - // default methods which weren't overridden in the implementation block. - // FIXME: this also needs to be done for associated types, whenever defaults - // for them work. - if let Some(clean::ResolvedPath { did, .. }) = i.impl_.trait_ { - if let Some(t) = cache().traits.get(&did) { - try!(render_default_items(w, cx, did, t, &i.impl_, render_header)); - - } + // default items which weren't overridden in the implementation block. + if let Some(t) = trait_ { + render_default_items(w, cx, t, &i.inner_impl(), render_mode, outer_version)?; } - try!(write!(w, "
")); + write!(w, "
")?; Ok(()) } fn item_typedef(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, t: &clean::Typedef) -> fmt::Result { - try!(write!(w, "
type {}{}{where_clause} = {type_};
", - it.name.as_ref().unwrap(), - t.generics, - where_clause = WhereClause(&t.generics), - type_ = t.type_)); + let indent = format!("type {}{:#} ", it.name.as_ref().unwrap(), t.generics).len(); + write!(w, "
type {}{}{where_clause} = {type_};
", + it.name.as_ref().unwrap(), + t.generics, + where_clause = WhereClause(&t.generics, indent), + type_ = t.type_)?; document(w, cx, it) } @@ -2598,42 +2963,42 @@ impl<'a> fmt::Display for Sidebar<'a> { let it = self.item; let parentlen = cx.current.len() - if it.is_mod() {1} else {0}; - // the sidebar is designed to display sibling functions, modules and - // other miscellaneous informations. since there are lots of sibling + // The sidebar is designed to display sibling functions, modules and + // other miscellaneous information. since there are lots of sibling // items (and that causes quadratic growth in large modules), // we refactor common parts into a shared JavaScript file per module. // still, we don't move everything into JS because we want to preserve // as much HTML as possible in order to allow non-JS-enabled browsers // to navigate the documentation (though slightly inefficiently). - try!(write!(fmt, "

")); + write!(fmt, "

")?; for (i, name) in cx.current.iter().take(parentlen).enumerate() { if i > 0 { - try!(write!(fmt, "::")); + write!(fmt, "::")?; } - try!(write!(fmt, "{}", - &cx.root_path[..(cx.current.len() - i - 1) * 3], - *name)); - } - try!(write!(fmt, "

")); - - // sidebar refers to the enclosing module, not this module - let relpath = if shortty(it) == ItemType::Module { "../" } else { "" }; - try!(write!(fmt, - "", - name = it.name.as_ref().map(|x| &x[..]).unwrap_or(""), - ty = shortty(it).to_static_str(), - path = relpath)); + write!(fmt, "{}", + &cx.root_path()[..(cx.current.len() - i - 1) * 3], + *name)?; + } + write!(fmt, "

")?; + + // Sidebar refers to the enclosing module, not this module. + let relpath = if it.is_mod() { "../" } else { "" }; + write!(fmt, + "", + name = it.name.as_ref().map(|x| &x[..]).unwrap_or(""), + ty = it.type_().css_class(), + path = relpath)?; if parentlen == 0 { - // there is no sidebar-items.js beyond the crate root path + // There is no sidebar-items.js beyond the crate root path // FIXME maybe dynamic crate loading can be merged here } else { - try!(write!(fmt, "", - path = relpath)); + write!(fmt, "", + path = relpath)?; } Ok(()) @@ -2650,41 +3015,39 @@ impl<'a> fmt::Display for Source<'a> { cols += 1; tmp /= 10; } - try!(write!(fmt, "
"));
+        write!(fmt, "
")?;
         for i in 1..lines + 1 {
-            try!(write!(fmt, "{0:1$}\n", i, cols));
+            write!(fmt, "{0:1$}\n", i, cols)?;
         }
-        try!(write!(fmt, "
")); - try!(write!(fmt, "{}", highlight::highlight(s, None, None))); + write!(fmt, "
")?; + write!(fmt, "{}", highlight::render_with_highlighting(s, None, None, None))?; Ok(()) } } fn item_macro(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, t: &clean::Macro) -> fmt::Result { - try!(w.write_str(&highlight::highlight(&t.source, - Some("macro"), - None))); + w.write_str(&highlight::render_with_highlighting(&t.source, + Some("macro"), + None, + None))?; document(w, cx, it) } fn item_primitive(w: &mut fmt::Formatter, cx: &Context, it: &clean::Item, _p: &clean::PrimitiveType) -> fmt::Result { - try!(document(w, cx, it)); - render_assoc_items(w, cx, it.def_id, AssocItemRender::All) + document(w, cx, it)?; + render_assoc_items(w, cx, it, it.def_id, AssocItemRender::All) } -fn get_basic_keywords() -> &'static str { - "rust, rustlang, rust-lang" -} +const BASIC_KEYWORDS: &'static str = "rust, rustlang, rust-lang"; fn make_item_keywords(it: &clean::Item) -> String { - format!("{}, {}", get_basic_keywords(), it.name.as_ref().unwrap()) + format!("{}, {}", BASIC_KEYWORDS, it.name.as_ref().unwrap()) } -fn get_index_search_type(item: &clean::Item, - parent: Option) -> Option { +fn get_index_search_type(item: &clean::Item) -> Option { let decl = match item.inner { clean::FunctionItem(ref f) => &f.decl, clean::MethodItem(ref m) => &m.decl, @@ -2692,17 +3055,7 @@ fn get_index_search_type(item: &clean::Item, _ => return None }; - let mut inputs = Vec::new(); - - // Consider `self` an argument as well. - if let Some(name) = parent { - inputs.push(Type { name: Some(name.to_ascii_lowercase()) }); - } - - inputs.extend(&mut decl.inputs.values.iter().map(|arg| { - get_index_type(&arg.type_) - })); - + let inputs = decl.inputs.values.iter().map(|arg| get_index_type(&arg.type_)).collect(); let output = match decl.output { clean::FunctionRetTy::Return(ref return_type) => Some(get_index_type(return_type)), _ => None @@ -2748,6 +3101,6 @@ fn test_unique_id() { assert_eq!(&actual[..], expected); }; test(); - reset_ids(); + reset_ids(true); test(); } diff --git a/src/librustdoc/html/static/main.js b/src/librustdoc/html/static/main.js index 8844ed82bb5e2..6ea25fa1241f8 100644 --- a/src/librustdoc/html/static/main.js +++ b/src/librustdoc/html/static/main.js @@ -34,7 +34,8 @@ "primitive", "associatedtype", "constant", - "associatedconstant"]; + "associatedconstant", + "union"]; // used for special search precedence var TY_PRIMITIVE = itemTypes.indexOf("primitive"); @@ -124,6 +125,11 @@ focusSearchBar(); break; + case "+": + ev.preventDefault(); + toggleAllDocs(); + break; + case "?": if (ev.shiftKey && $("#help").hasClass("hidden")) { ev.preventDefault(); @@ -280,7 +286,7 @@ var parts = val.split("->").map(trimmer); var input = parts[0]; // sort inputs so that order does not matter - var inputs = input.split(",").map(trimmer).sort(); + var inputs = input.split(",").map(trimmer).sort().toString(); var output = parts[1]; for (var i = 0; i < nSearchWords; ++i) { @@ -296,8 +302,8 @@ // allow searching for void (no output) functions as well var typeOutput = type.output ? type.output.name : ""; - if (inputs.toString() === typeInputs.toString() && - output == typeOutput) { + if ((inputs === "*" || inputs === typeInputs.toString()) && + (output === "*" || output == typeOutput)) { results.push({id: i, index: -1, dontValidate: true}); } } @@ -572,20 +578,24 @@ displayPath = item.path + '::'; href = rootPath + item.path.replace(/::/g, '/') + '/' + name + '/index.html'; - } else if (type === 'static' || type === 'reexport') { - displayPath = item.path + '::'; - href = rootPath + item.path.replace(/::/g, '/') + - '/index.html'; } else if (type === "primitive") { displayPath = ""; href = rootPath + item.path.replace(/::/g, '/') + '/' + type + '.' + name + '.html'; + } else if (type === "externcrate") { + displayPath = ""; + href = rootPath + name + '/index.html'; } else if (item.parent !== undefined) { var myparent = item.parent; var anchor = '#' + type + '.' + name; - displayPath = item.path + '::' + myparent.name + '::'; + var parentType = itemTypes[myparent.ty]; + if (parentType === "primitive") { + displayPath = myparent.name + '::'; + } else { + displayPath = item.path + '::' + myparent.name + '::'; + } href = rootPath + item.path.replace(/::/g, '/') + - '/' + itemTypes[myparent.ty] + + '/' + parentType + '.' + myparent.name + '.html' + anchor; } else { @@ -678,6 +688,16 @@ for (var crate in rawSearchIndex) { if (!rawSearchIndex.hasOwnProperty(crate)) { continue; } + searchWords.push(crate); + searchIndex.push({ + crate: crate, + ty: 1, // == ExternCrate + name: crate, + path: "", + desc: rawSearchIndex[crate].doc, + type: null, + }); + // an array of [(Number) item type, // (String) name, // (String) full path or empty string for previous path, @@ -727,7 +747,9 @@ $(".search-input").on("keyup input",function() { clearTimeout(searchTimeout); if ($(this).val().length === 0) { - window.history.replaceState("", "std - Rust", "?search="); + if (browserSupportsHistoryApi()) { + history.replaceState("", "std - Rust", "?search="); + } $('#main.content').removeClass('hidden'); $('#search.content').addClass('hidden'); } else { @@ -864,12 +886,16 @@ sidebar.append(div); } + block("primitive", "Primitive Types"); block("mod", "Modules"); + block("macro", "Macros"); block("struct", "Structs"); block("enum", "Enums"); + block("constant", "Constants"); + block("static", "Statics"); block("trait", "Traits"); block("fn", "Functions"); - block("macro", "Macros"); + block("type", "Type Definitions"); } window.initSidebarItems = initSidebarItems; @@ -897,15 +923,6 @@ window.register_implementors(window.pending_implementors); } - // See documentation in html/render.rs for what this is doing. - var query = getQueryStringParams(); - if (query['gotosrc']) { - window.location = $('#src-' + query['gotosrc']).attr('href'); - } - if (query['gotomacrosrc']) { - window.location = $('.srclink').attr('href'); - } - function labelForToggleButton(sectionIsCollapsed) { if (sectionIsCollapsed) { // button will expand the section @@ -916,7 +933,7 @@ return "\u2212"; // "\u2212" is '−' minus sign } - $("#toggle-all-docs").on("click", function() { + function toggleAllDocs() { var toggle = $("#toggle-all-docs"); if (toggle.hasClass("will-expand")) { toggle.removeClass("will-expand"); @@ -935,20 +952,24 @@ $(".toggle-wrapper").addClass("collapsed"); $(".collapse-toggle").children(".inner").text(labelForToggleButton(true)); } - }); + } - $(document).on("click", ".collapse-toggle", function() { - var toggle = $(this); + function collapseDocs(toggle, animate) { var relatedDoc = toggle.parent().next(); if (relatedDoc.is(".stability")) { relatedDoc = relatedDoc.next(); } if (relatedDoc.is(".docblock")) { if (relatedDoc.is(":visible")) { - relatedDoc.slideUp({duration: 'fast', easing: 'linear'}); + if (animate === true) { + relatedDoc.slideUp({duration: 'fast', easing: 'linear'}); + toggle.children(".toggle-label").fadeIn(); + } else { + relatedDoc.hide(); + toggle.children(".toggle-label").show(); + } toggle.parent(".toggle-wrapper").addClass("collapsed"); toggle.children(".inner").text(labelForToggleButton(true)); - toggle.children(".toggle-label").fadeIn(); } else { relatedDoc.slideDown({duration: 'fast', easing: 'linear'}); toggle.parent(".toggle-wrapper").removeClass("collapsed"); @@ -956,6 +977,12 @@ toggle.children(".toggle-label").hide(); } } + } + + $("#toggle-all-docs").on("click", toggleAllDocs); + + $(document).on("click", ".collapse-toggle", function() { + collapseDocs($(this), true) }); $(function() { @@ -966,24 +993,50 @@ $(".method").each(function() { if ($(this).next().is(".docblock") || ($(this).next().is(".stability") && $(this).next().next().is(".docblock"))) { - $(this).children().first().after(toggle.clone()); + $(this).children().last().after(toggle.clone()); } }); var mainToggle = - $(toggle).append( + $(toggle.clone()).append( $('', {'class': 'toggle-label'}) .css('display', 'none') .html(' Expand description')); var wrapper = $("
").append(mainToggle); $("#main > .docblock").before(wrapper); + + $(".docblock.autohide").each(function() { + var wrap = $(this).prev(); + if (wrap.is(".toggle-wrapper")) { + var toggle = wrap.children().first(); + if ($(this).children().first().is("h3")) { + toggle.children(".toggle-label") + .text(" Show " + $(this).children().first().text()); + } + $(this).hide(); + wrap.addClass("collapsed"); + toggle.children(".inner").text(labelForToggleButton(true)); + toggle.children(".toggle-label").show(); + } + }); + + var mainToggle = + $(toggle).append( + $('', {'class': 'toggle-label'}) + .css('display', 'none') + .html(' Expand attributes')); + var wrapper = $("
").append(mainToggle); + $("#main > pre > .attributes").each(function() { + $(this).before(wrapper); + collapseDocs($($(this).prev().children()[0]), false); + }); }); $('pre.line-numbers').on('click', 'span', function() { var prev_id = 0; function set_fragment(name) { - if (history.replaceState) { + if (browserSupportsHistoryApi()) { history.replaceState(null, null, '#' + name); $(window).trigger('hashchange'); } else { diff --git a/src/librustdoc/html/static/playpen.js b/src/librustdoc/html/static/playpen.js deleted file mode 100644 index 8f8a753b06c96..0000000000000 --- a/src/librustdoc/html/static/playpen.js +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/*jslint browser: true, es5: true */ -/*globals $: true, rootPath: true */ - -document.addEventListener('DOMContentLoaded', function() { - 'use strict'; - - if (!window.playgroundUrl) { - return; - } - - var featureRegexp = new RegExp('^\s*#!\\[feature\\(\.*?\\)\\]'); - var elements = document.querySelectorAll('pre.rust-example-rendered'); - - Array.prototype.forEach.call(elements, function(el) { - el.onmouseover = function(e) { - if (el.contains(e.relatedTarget)) { - return; - } - - var a = document.createElement('a'); - a.setAttribute('class', 'test-arrow'); - a.textContent = 'Run'; - - var code = el.previousElementSibling.textContent; - - var channel = ''; - if (featureRegexp.test(code)) { - channel = '&version=nightly'; - } - - a.setAttribute('href', window.playgroundUrl + '?code=' + - encodeURIComponent(code) + channel); - a.setAttribute('target', '_blank'); - - el.appendChild(a); - }; - - el.onmouseout = function(e) { - if (el.contains(e.relatedTarget)) { - return; - } - - el.removeChild(el.querySelectorAll('a.test-arrow')[0]); - }; - }); -}); diff --git a/src/librustdoc/html/static/rustdoc.css b/src/librustdoc/html/static/rustdoc.css index 0bde582c19f28..7ee184c089ca4 100644 --- a/src/librustdoc/html/static/rustdoc.css +++ b/src/librustdoc/html/static/rustdoc.css @@ -1,3 +1,5 @@ +@import "normalize.css"; + /** * Copyright 2013 The Rust Project Developers. See the COPYRIGHT * file at the top-level directory of this distribution and at @@ -12,160 +14,160 @@ /* See FiraSans-LICENSE.txt for the Fira Sans license. */ @font-face { - font-family: 'Fira Sans'; - font-style: normal; - font-weight: 400; - src: local('Fira Sans'), url("FiraSans-Regular.woff") format('woff'); + font-family: 'Fira Sans'; + font-style: normal; + font-weight: 400; + src: local('Fira Sans'), url("FiraSans-Regular.woff") format('woff'); } @font-face { - font-family: 'Fira Sans'; - font-style: normal; - font-weight: 500; - src: local('Fira Sans Medium'), url("FiraSans-Medium.woff") format('woff'); + font-family: 'Fira Sans'; + font-style: normal; + font-weight: 500; + src: local('Fira Sans Medium'), url("FiraSans-Medium.woff") format('woff'); } /* See SourceSerifPro-LICENSE.txt for the Source Serif Pro license and * Heuristica-LICENSE.txt for the Heuristica license. */ @font-face { - font-family: 'Source Serif Pro'; - font-style: normal; - font-weight: 400; - src: local('Source Serif Pro'), url("SourceSerifPro-Regular.woff") format('woff'); + font-family: 'Source Serif Pro'; + font-style: normal; + font-weight: 400; + src: local('Source Serif Pro'), url("SourceSerifPro-Regular.woff") format('woff'); } @font-face { - font-family: 'Source Serif Pro'; - font-style: italic; - font-weight: 400; - src: url("Heuristica-Italic.woff") format('woff'); + font-family: 'Source Serif Pro'; + font-style: italic; + font-weight: 400; + src: url("Heuristica-Italic.woff") format('woff'); } @font-face { - font-family: 'Source Serif Pro'; - font-style: normal; - font-weight: 700; - src: local('Source Serif Pro Bold'), url("SourceSerifPro-Bold.woff") format('woff'); + font-family: 'Source Serif Pro'; + font-style: normal; + font-weight: 700; + src: local('Source Serif Pro Bold'), url("SourceSerifPro-Bold.woff") format('woff'); } /* See SourceCodePro-LICENSE.txt for the Source Code Pro license. */ @font-face { - font-family: 'Source Code Pro'; - font-style: normal; - font-weight: 400; - src: local('Source Code Pro'), url("SourceCodePro-Regular.woff") format('woff'); + font-family: 'Source Code Pro'; + font-style: normal; + font-weight: 400; + src: local('Source Code Pro'), url("SourceCodePro-Regular.woff") format('woff'); } @font-face { - font-family: 'Source Code Pro'; - font-style: normal; - font-weight: 600; - src: local('Source Code Pro Semibold'), url("SourceCodePro-Semibold.woff") format('woff'); + font-family: 'Source Code Pro'; + font-style: normal; + font-weight: 600; + src: local('Source Code Pro Semibold'), url("SourceCodePro-Semibold.woff") format('woff'); } -@import "normalize.css"; - * { -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; } /* General structure and fonts */ body { - font: 16px/1.4 "Source Serif Pro", Georgia, Times, "Times New Roman", serif; - margin: 0; - position: relative; - padding: 10px 15px 20px 15px; + font: 16px/1.4 "Source Serif Pro", Georgia, Times, "Times New Roman", serif; + margin: 0; + position: relative; + padding: 10px 15px 20px 15px; - -webkit-font-feature-settings: "kern", "liga"; - -moz-font-feature-settings: "kern", "liga"; - font-feature-settings: "kern", "liga"; + -webkit-font-feature-settings: "kern", "liga"; + -moz-font-feature-settings: "kern", "liga"; + font-feature-settings: "kern", "liga"; } h1 { - font-size: 1.5em; + font-size: 1.5em; } h2 { - font-size: 1.4em; + font-size: 1.4em; } h3 { - font-size: 1.3em; + font-size: 1.3em; } h1, h2, h3:not(.impl):not(.method):not(.type):not(.tymethod), h4:not(.method):not(.type):not(.tymethod) { - font-weight: 500; - margin: 20px 0 15px 0; - padding-bottom: 6px; + font-weight: 500; + margin: 20px 0 15px 0; + padding-bottom: 6px; } h1.fqn { - border-bottom: 1px dashed; - margin-top: 0; + border-bottom: 1px dashed; + margin-top: 0; + position: relative; } h2, h3:not(.impl):not(.method):not(.type):not(.tymethod), h4:not(.method):not(.type):not(.tymethod) { - border-bottom: 1px solid; + border-bottom: 1px solid; } h3.impl, h3.method, h4.method, h3.type, h4.type { - font-weight: 600; - margin-top: 10px; - margin-bottom: 10px; + font-weight: 600; + margin-top: 10px; + margin-bottom: 10px; + position: relative; } h3.impl, h3.method, h3.type { - margin-top: 15px; + margin-top: 15px; } h1, h2, h3, h4, .sidebar, a.source, .search-input, .content table :not(code)>a, .collapse-toggle { - font-family: "Fira Sans", "Helvetica Neue", Helvetica, Arial, sans-serif; + font-family: "Fira Sans", "Helvetica Neue", Helvetica, Arial, sans-serif; } ol, ul { - padding-left: 25px; + padding-left: 25px; } ul ul, ol ul, ul ol, ol ol { - margin-bottom: 0; + margin-bottom: 0; } p { - margin: 0 0 .6em 0; + margin: 0 0 .6em 0; } code, pre { - font-family: "Source Code Pro", Menlo, Monaco, Consolas, "DejaVu Sans Mono", Inconsolata, monospace; - white-space: pre-wrap; + font-family: "Source Code Pro", Menlo, Monaco, Consolas, "DejaVu Sans Mono", Inconsolata, monospace; + white-space: pre-wrap; } -.docblock code { - border-radius: 3px; - padding: 0 0.2em; +.docblock code, .docblock-short code { + border-radius: 3px; + padding: 0 0.2em; } -.docblock pre code { - padding: 0; +.docblock pre code, .docblock-short pre code { + padding: 0; } pre { - padding: 14px; + padding: 14px; } .source pre { - padding: 20px; + padding: 20px; } img { - max-width: 100%; + max-width: 100%; } .content.source { - margin-top: 50px; - max-width: none; - overflow: visible; - margin-left: 0px; - min-width: 70em; + margin-top: 50px; + max-width: none; + overflow: visible; + margin-left: 0px; + min-width: 70em; } nav.sub { - font-size: 16px; - text-transform: uppercase; + font-size: 16px; + text-transform: uppercase; } .sidebar { - width: 200px; - position: absolute; - left: 0; - top: 0; - min-height: 100%; + width: 200px; + position: absolute; + left: 0; + top: 0; + min-height: 100%; } .content, nav { max-width: 960px; } @@ -175,113 +177,142 @@ nav.sub { .js-only, .hidden { display: none !important; } .sidebar { - padding: 10px; + padding: 10px; } .sidebar img { - margin: 20px auto; - display: block; + margin: 20px auto; + display: block; } .sidebar .location { - font-size: 17px; - margin: 30px 0 20px 0; - text-align: center; + font-size: 17px; + margin: 30px 0 20px 0; + text-align: center; } .location a:first-child { font-weight: 500; } .block { - padding: 0 10px; - margin-bottom: 14px; + padding: 0 10px; + margin-bottom: 14px; } .block h2, .block h3 { - margin-top: 0; - margin-bottom: 8px; - text-align: center; + margin-top: 0; + margin-bottom: 8px; + text-align: center; } .block ul, .block li { - margin: 0; - padding: 0; - list-style: none; + margin: 0; + padding: 0; + list-style: none; } .block a { - display: block; - text-overflow: ellipsis; - overflow: hidden; - line-height: 15px; - padding: 7px 5px; - font-size: 14px; - font-weight: 300; - transition: border 500ms ease-out; + display: block; + text-overflow: ellipsis; + overflow: hidden; + line-height: 15px; + padding: 7px 5px; + font-size: 14px; + font-weight: 300; + transition: border 500ms ease-out; } .content { - padding: 15px 0; + padding: 15px 0; } .content.source pre.rust { - white-space: pre; - overflow: auto; - padding-left: 0; + white-space: pre; + overflow: auto; + padding-left: 0; } .content pre.line-numbers { - float: left; - border: none; - position: relative; + float: left; + border: none; + position: relative; - -webkit-user-select: none; - -moz-user-select: none; - -ms-user-select: none; - user-select: none; + -webkit-user-select: none; + -moz-user-select: none; + -ms-user-select: none; + user-select: none; } .line-numbers span { cursor: pointer; } -.docblock.short p { - display: inline; +.docblock-short p { + display: inline; } -.docblock.short.nowrap { - display: block; - overflow: hidden; - white-space: nowrap; - text-overflow: ellipsis; +.docblock-short.nowrap { + display: block; + overflow: hidden; + white-space: nowrap; + text-overflow: ellipsis; } -.docblock.short p { - overflow: hidden; - text-overflow: ellipsis; - margin: 0; +.docblock-short p { + overflow: hidden; + text-overflow: ellipsis; + margin: 0; } -.docblock.short code { white-space: nowrap; } +.docblock-short code { white-space: nowrap; } .docblock h1, .docblock h2, .docblock h3, .docblock h4, .docblock h5 { - border-bottom: 1px solid; + border-bottom: 1px solid; } .docblock h1 { font-size: 1.3em; } .docblock h2 { font-size: 1.15em; } .docblock h3, .docblock h4, .docblock h5 { font-size: 1em; } +.docblock { + margin-left: 24px; +} + .content .out-of-band { - font-size: 23px; - width: 40%; - margin: 0px; - padding: 0px; - text-align: right; - display: inline-block; + font-size: 23px; + margin: 0px; + padding: 0px; + text-align: right; + display: inline-block; + font-weight: normal; + position: absolute; + right: 0; +} + +h3.impl > .out-of-band { + font-size: 21px; +} + +h4 > code, h3 > code, .invisible > code { + position: inherit; +} + +.in-band, code { + z-index: 5; +} + +.invisible { + background: rgba(0, 0, 0, 0); + width: 100%; + display: inline-block; } .content .in-band { - width: 60%; - margin: 0px; - padding: 0px; - display: inline-block; + margin: 0px; + padding: 0px; + display: inline-block; +} + +#main { position: relative; } +#main > .since { + top: inherit; + font-family: "Fira Sans", "Helvetica Neue", Helvetica, Arial, sans-serif; } .content table { - border-spacing: 0 5px; - border-collapse: separate; + border-spacing: 0 5px; + border-collapse: separate; } .content td { vertical-align: top; } .content td:first-child { padding-right: 20px; } @@ -289,221 +320,256 @@ nav.sub { .content td h1, .content td h2 { margin-left: 0; font-size: 1.1em; } .docblock table { - border: 1px solid; - margin: .5em 0; - border-collapse: collapse; - width: 100%; + border: 1px solid; + margin: .5em 0; + border-collapse: collapse; + width: 100%; } .docblock table td { - padding: .5em; - border-top: 1px dashed; - border-bottom: 1px dashed; + padding: .5em; + border-top: 1px dashed; + border-bottom: 1px dashed; } .docblock table th { - padding: .5em; - text-align: left; - border-top: 1px solid; - border-bottom: 1px solid; + padding: .5em; + text-align: left; + border-top: 1px solid; + border-bottom: 1px solid; +} + +.fields + table { + margin-bottom: 1em; } .content .item-list { - list-style-type: none; - padding: 0; + list-style-type: none; + padding: 0; } .content .item-list li { margin-bottom: 3px; } .content .multi-column { - -moz-column-count: 5; - -moz-column-gap: 2.5em; - -webkit-column-count: 5; - -webkit-column-gap: 2.5em; - column-count: 5; - column-gap: 2.5em; + -moz-column-count: 5; + -moz-column-gap: 2.5em; + -webkit-column-count: 5; + -webkit-column-gap: 2.5em; + column-count: 5; + column-gap: 2.5em; } .content .multi-column li { width: 100%; display: inline-block; } .content .method { - font-size: 1em; - position: relative; + font-size: 1em; + position: relative; } /* Shift "where ..." part of method or fn definition down a line */ -.content .method .where, .content .fn .where { display: block; } +.content .method .where, +.content .fn .where, +.content .where.fmt-newline { + display: block; +} /* Bit of whitespace to indent it */ -.content .method .where::before, .content .fn .where::before { content: ' '; } +.content .method .where::before, +.content .fn .where::before, +.content .where.fmt-newline::before { + content: ' '; +} .content .methods > div { margin-left: 40px; } .content .impl-items .docblock, .content .impl-items .stability { - margin-left: 40px; + margin-left: 40px; } .content .impl-items .method, .content .impl-items > .type { - margin-left: 20px; + margin-left: 20px; } .content .stability code { - font-size: 90%; + font-size: 90%; +} + +/* Shift where in trait listing down a line */ +pre.trait .where::before { + content: '\a '; } nav { - border-bottom: 1px solid; - padding-bottom: 10px; - margin-bottom: 10px; + border-bottom: 1px solid; + padding-bottom: 10px; + margin-bottom: 10px; } nav.main { - padding: 20px 0; - text-align: center; + padding: 20px 0; + text-align: center; } nav.main .current { - border-top: 1px solid; - border-bottom: 1px solid; + border-top: 1px solid; + border-bottom: 1px solid; } nav.main .separator { - border: 1px solid; - display: inline-block; - height: 23px; - margin: 0 20px; + border: 1px solid; + display: inline-block; + height: 23px; + margin: 0 20px; } nav.sum { text-align: right; } nav.sub form { display: inline; } nav.sub, .content { - margin-left: 230px; + margin-left: 230px; } a { - text-decoration: none; - background: transparent; + text-decoration: none; + background: transparent; } -.docblock a:hover, .stability a { - text-decoration: underline; +.docblock a:hover, .docblock-short a:hover, .stability a { + text-decoration: underline; } .content span.enum, .content a.enum, .block a.current.enum { color: #5e9766; } -.content span.struct, .content a.struct, .block a.current.struct { color: #e53700; } -.content a.type { color: #e57300; } -.content a.macro { color: #068000; } +.content span.struct, .content a.struct, .block a.current.struct { color: #df3600; } +.content span.type, .content a.type, .block a.current.type { color: #e57300; } +.content span.macro, .content a.macro, .block a.current.macro { color: #068000; } .block a.current.crate { font-weight: 500; } .search-input { - width: 100%; - /* Override Normalize.css: we have margins and do - not want to overflow - the `moz` attribute is necessary - until Firefox 29, too early to drop at this point */ - -moz-box-sizing: border-box !important; - box-sizing: border-box !important; - outline: none; - border: none; - border-radius: 1px; - margin-top: 5px; - padding: 10px 16px; - font-size: 17px; - transition: border-color 300ms ease; - transition: border-radius 300ms ease-in-out; - transition: box-shadow 300ms ease-in-out; + width: 100%; + /* Override Normalize.css: we have margins and do + not want to overflow - the `moz` attribute is necessary + until Firefox 29, too early to drop at this point */ + -moz-box-sizing: border-box !important; + box-sizing: border-box !important; + outline: none; + border: none; + border-radius: 1px; + margin-top: 5px; + padding: 10px 16px; + font-size: 17px; + transition: border-color 300ms ease; + transition: border-radius 300ms ease-in-out; + transition: box-shadow 300ms ease-in-out; } .search-input:focus { - border-color: #66afe9; - border-radius: 2px; - border: 0; - outline: 0; - box-shadow: 0 0 8px #078dd8; + border-color: #66afe9; + border-radius: 2px; + border: 0; + outline: 0; + box-shadow: 0 0 8px #078dd8; } .search-results .desc { - white-space: nowrap; - text-overflow: ellipsis; - overflow: hidden; - display: block; + white-space: nowrap; + text-overflow: ellipsis; + overflow: hidden; + display: block; } .search-results a { - display: block; + display: block; } .content .search-results td:first-child { padding-right: 0; } .content .search-results td:first-child a { padding-right: 10px; } -tr.result span.primitive::after { content: ' (primitive type)'; font-style: italic; } +tr.result span.primitive::after { content: ' (primitive type)'; font-style: italic; color: black; +} body.blur > :not(#help) { - filter: blur(8px); - -webkit-filter: blur(8px); - opacity: .7; + filter: blur(8px); + -webkit-filter: blur(8px); + opacity: .7; } #help { - width: 100%; - height: 100vh; - position: fixed; - top: 0; - left: 0; - display: flex; - justify-content: center; - align-items: center; + width: 100%; + height: 100vh; + position: fixed; + top: 0; + left: 0; + display: flex; + justify-content: center; + align-items: center; } #help > div { - flex: 0 0 auto; - background: #e9e9e9; - box-shadow: 0 0 6px rgba(0,0,0,.2); - width: 550px; - height: 300px; - border: 1px solid #bfbfbf; + flex: 0 0 auto; + background: #e9e9e9; + box-shadow: 0 0 6px rgba(0,0,0,.2); + width: 550px; + height: 330px; + border: 1px solid #bfbfbf; } #help dt { - float: left; - border-radius: 4px; - border: 1px solid #bfbfbf; - background: #fff; - width: 23px; - text-align: center; - clear: left; - display: block; - margin-top: -1px; + float: left; + border-radius: 4px; + border: 1px solid #bfbfbf; + background: #fff; + width: 23px; + text-align: center; + clear: left; + display: block; + margin-top: -1px; } #help dd { margin: 5px 33px; } #help .infos { padding-left: 0; } #help h1, #help h2 { margin-top: 0; } #help > div div { - width: 50%; - float: left; - padding: 20px; + width: 50%; + float: left; + padding: 20px; } em.stab { - display: inline-block; - border-width: 1px; - border-style: solid; - padding: 3px; - margin-bottom: 5px; - font-size: 90%; - font-style: normal; + display: inline-block; + border-width: 1px; + border-style: solid; + padding: 3px; + margin-bottom: 5px; + font-size: 90%; + font-style: normal; } em.stab p { - display: inline; + display: inline; } .module-item .stab { - border-width: 0; - padding: 0; - margin: 0; - background: inherit !important; + border-width: 0; + padding: 0; + margin: 0; + background: inherit !important; } .module-item.unstable { - opacity: 0.65; + opacity: 0.65; +} + +.since { + font-weight: normal; + font-size: initial; + color: grey; + position: absolute; + right: 0; + top: 0; +} + +.variants_table { + width: 100%; +} + +.variants_table tbody tr td:first-child { + width: 1%; /* make the variant name as small as possible */ } td.summary-column { - width: 100%; + width: 100%; } .summary { - padding-right: 0px; + padding-right: 0px; } .line-numbers :target { background-color: transparent; } @@ -512,142 +578,202 @@ td.summary-column { pre.rust .kw { color: #8959A8; } pre.rust .kw-2, pre.rust .prelude-ty { color: #4271AE; } pre.rust .number, pre.rust .string { color: #718C00; } -pre.rust .self, pre.rust .boolval, pre.rust .prelude-val, +pre.rust .self, pre.rust .bool-val, pre.rust .prelude-val, pre.rust .attribute, pre.rust .attribute .ident { color: #C82829; } pre.rust .macro, pre.rust .macro-nonterminal { color: #3E999F; } pre.rust .lifetime { color: #B76514; } +pre.rust .question-mark { + color: #ff9011; + font-weight: bold; +} -.rusttest { display: none; } pre.rust { position: relative; } a.test-arrow { - display: inline-block; - position: absolute; - background-color: #4e8bca; - padding: 5px 10px 5px 10px; - border-radius: 5px; - font-size: 130%; - top: 5px; - right: 5px; + background-color: rgba(78, 139, 202, 0.2); + display: inline-block; + position: absolute; + padding: 5px 10px 5px 10px; + border-radius: 5px; + font-size: 130%; + top: 5px; + right: 5px; } - -.methods .section-header { - /* Override parent class attributes. */ - border-bottom: none !important; - font-size: 1.1em !important; - margin: 0 0 -5px; - padding: 0; +a.test-arrow:hover{ + background-color: #4e8bca; + text-decoration: none; } .section-header:hover a:after { - content: '\2002\00a7\2002'; + content: '\2002\00a7\2002'; } .section-header:hover a { - text-decoration: none; + text-decoration: none; } .section-header a { - color: inherit; + color: inherit; } .collapse-toggle { - font-weight: 300; - position: absolute; - left: -23px; - color: #999; - top: 0; + font-weight: 300; + position: absolute; + left: -23px; + color: #999; + top: 0; } .toggle-wrapper > .collapse-toggle { - left: -24px; - margin-top: 0px; + left: -24px; + margin-top: 0px; } .toggle-wrapper { - position: relative; + position: relative; } .toggle-wrapper.collapsed { - height: 1em; - transition: height .2s; + height: 1em; + transition: height .2s; } .collapse-toggle > .inner { - display: inline-block; - width: 1.2ch; - text-align: center; + display: inline-block; + width: 1.2ch; + text-align: center; } .toggle-label { - color: #999; + color: #999; +} + +.ghost { + display: none; +} + +.ghost + .since { + position: initial; + display: table-cell; +} + +.since + .srclink { + display: table-cell; + padding-left: 10px; } +span.since { + position: initial; + font-size: 20px; + margin-right: 5px; +} +.toggle-wrapper > .collapse-toggle { + left: 0; +} + +.variant + .toggle-wrapper > a { + margin-top: 5px; +} + +.sub-variant, .sub-variant > h3 { + margin-top: 0 !important; +} + +.enum > .toggle-wrapper + .docblock, .struct > .toggle-wrapper + .docblock { + margin-left: 30px; + margin-bottom: 20px; + margin-top: 5px; +} + +.enum > .collapsed, .struct > .collapsed { + margin-bottom: 25px; +} + +.enum .variant, .struct .structfield { + display: block; +} + +.attributes { + display: block; + margin: 0px 0px 0px 30px !important; +} +.toggle-attributes.collapsed { + margin-bottom: 5px; +} + +:target > code { + background: #FDFFD3; + opacity: 1; +} /* Media Queries */ @media (max-width: 700px) { - body { - padding-top: 0px; - } - - .sidebar { - height: 40px; - min-height: 40px; - width: 100%; - margin: 0px; - padding: 0px; - position: static; - } - - .sidebar .location { - float: left; - margin: 0px; - padding: 5px; - width: 60%; - background: inherit; - text-align: left; - font-size: 24px; - } - - .sidebar img { - width: 35px; - margin-top: 5px; - margin-bottom: 0px; - float: left; - } - - nav.sub { - margin: 0 auto; - } - - .sidebar .block { - display: none; - } - - .content { - margin-left: 0px; - } - - .content .in-band { - width: 100%; - } - - .content .out-of-band { - display: none; - } - - .toggle-wrapper > .collapse-toggle { - left: 0px; - } - - .toggle-wrapper { - height: 1.5em; - } + body { + padding-top: 0px; + } + + .sidebar { + height: 40px; + min-height: 40px; + width: 100%; + margin: 0px; + padding: 0px; + position: static; + } + + .sidebar .location { + float: right; + margin: 0px; + padding: 3px 10px 1px 10px; + min-height: 39px; + background: inherit; + text-align: left; + font-size: 24px; + } + + .sidebar .location:empty { + padding: 0; + } + + .sidebar img { + width: 35px; + margin-top: 5px; + margin-bottom: 0px; + float: left; + } + + nav.sub { + margin: 0 auto; + } + + .sidebar .block { + display: none; + } + + .content { + margin-left: 0px; + } + + .content .in-band { + width: 100%; + } + + .content .out-of-band { + display: none; + } + + .toggle-wrapper > .collapse-toggle { + left: 0px; + } + + .toggle-wrapper { + height: 1.5em; + } } @media print { - nav.sub, .content .out-of-band, .collapse-toggle { - display: none; - } + nav.sub, .content .out-of-band, .collapse-toggle { + display: none; + } } diff --git a/src/librustdoc/html/static/styles/main.css b/src/librustdoc/html/static/styles/main.css index e138d62f986c1..6a9a24f69960a 100644 --- a/src/librustdoc/html/static/styles/main.css +++ b/src/librustdoc/html/static/styles/main.css @@ -26,8 +26,15 @@ h1.fqn { h2, h3:not(.impl):not(.method):not(.type):not(.tymethod), h4:not(.method):not(.type):not(.tymethod) { border-bottom-color: #DDDDDD; } +.in-band { + background-color: white; +} + +div.stability > em > code { + background-color: initial; +} -.docblock code { +.docblock code, .docblock-short code { background-color: #F5F5F5; } pre { @@ -81,8 +88,9 @@ pre { border-bottom-color: #ddd; } -.content a.primitive { color: #39a7bf; } -.content span.mod, .content a.mod, block a.current.mod { color: #4d76ae; } +.content span.primitive, .content a.primitive, .block a.current.primitive { color: #39a7bf; } +.content span.externcrate, +.content span.mod, .content a.mod, .block a.current.mod { color: #4d76ae; } .content span.fn, .content a.fn, .block a.current.fn, .content span.method, .content a.method, .block a.current.method, .content span.tymethod, .content a.tymethod, .block a.current.tymethod, @@ -99,21 +107,21 @@ nav.main .current { border-bottom-color: #000; } nav.main .separator { - border-color: 1px solid #000; + border: 1px solid #000; } a { color: #000; } -.docblock a, .stability a { - color: #4e8bca; +.docblock a, .docblock-short a, .stability a { + color: #3873AD; } a.test-arrow { color: #f5f5f5; } -.content span.trait, .content a.trait, .block a.current.trait { color: #8866ff; } +.content span.trait, .content a.trait, .block a.current.trait { color: #7c5af3; } .search-input { color: #555; diff --git a/src/librustdoc/html/toc.rs b/src/librustdoc/html/toc.rs index 53be8b5bc06c8..a7da1c5cca48c 100644 --- a/src/librustdoc/html/toc.rs +++ b/src/librustdoc/html/toc.rs @@ -183,15 +183,15 @@ impl fmt::Debug for Toc { impl fmt::Display for Toc { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - try!(write!(fmt, "
")?; + } + + fs::create_dir_all(&out_path)?; + + let rustdoc_args: &[String] = &[ + "".to_string(), + preprocessed_path.display().to_string(), + format!("-o{}", out_path.display()), + format!("--html-before-content={}", prelude.display()), + format!("--html-after-content={}", postlude.display()), + format!("--markdown-playground-url=https://play.rust-lang.org/"), + format!("--markdown-css={}", item.path_to_root.join("rustbook.css").display()), + "--markdown-no-toc".to_string(), + ]; + let output_result = rustdoc::main_args(rustdoc_args); + if output_result != 0 { + let message = format!("Could not execute `rustdoc` with {:?}: {}", + rustdoc_args, output_result); + return Err(err(&message)); + } + } + + // create index.html from the root README + fs::copy(&tgt.join("README.html"), &tgt.join("index.html"))?; + + Ok(()) +} + +impl Subcommand for Build { + fn parse_args(&mut self, _: &[String]) -> CliResult<()> { + Ok(()) + } + fn usage(&self) {} + fn execute(&mut self, term: &mut Term) -> CommandResult<()> { + let cwd = env::current_dir().unwrap(); + let src; + let tgt; + + if env::args().len() < 3 { + src = cwd.clone(); + } else { + src = PathBuf::from(&env::args().nth(2).unwrap()); + } + + if env::args().len() < 4 { + tgt = cwd.join("_book"); + } else { + tgt = PathBuf::from(&env::args().nth(3).unwrap()); + } + + // `_book` directory may already exist from previous runs. Check and + // delete it if it exists. + for entry in fs::read_dir(&cwd)? { + let path = entry?.path(); + if path == tgt { fs::remove_dir_all(&tgt)? } + } + fs::create_dir(&tgt)?; + + // Copy static files + let css = include_bytes!("static/rustbook.css"); + let js = include_bytes!("static/rustbook.js"); + + let mut css_file = File::create(tgt.join("rustbook.css"))?; + css_file.write_all(css)?; + + let mut js_file = File::create(tgt.join("rustbook.js"))?; + js_file.write_all(js)?; + + + let mut summary = File::open(&src.join("SUMMARY.md"))?; + match book::parse_summary(&mut summary, &src) { + Ok(book) => { + // execute rustdoc on the whole book + render(&book, &tgt) + } + Err(errors) => { + let n = errors.len(); + for err in errors { + term.err(&format!("error: {}", err)[..]); + } + + Err(err(&format!("{} errors occurred", n))) + } + } + } +} diff --git a/src/rustbook/error.rs b/src/tools/rustbook/error.rs similarity index 100% rename from src/rustbook/error.rs rename to src/tools/rustbook/error.rs diff --git a/src/rustbook/help.rs b/src/tools/rustbook/help.rs similarity index 100% rename from src/rustbook/help.rs rename to src/tools/rustbook/help.rs diff --git a/src/tools/rustbook/main.rs b/src/tools/rustbook/main.rs new file mode 100644 index 0000000000000..906251db1c2f2 --- /dev/null +++ b/src/tools/rustbook/main.rs @@ -0,0 +1,69 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![deny(warnings)] + +#![feature(rustc_private)] +#![feature(rustdoc)] + +extern crate rustdoc; +extern crate rustc_back; + +use std::env; +use std::process; +use std::sync::atomic::{AtomicIsize, ATOMIC_ISIZE_INIT, Ordering}; +use term::Term; + +mod term; +mod error; +mod book; + +mod subcommand; +mod help; +mod build; +mod serve; +mod test; + +static EXIT_STATUS: AtomicIsize = ATOMIC_ISIZE_INIT; + +pub fn main() { + let mut term = Term::new(); + let cmd: Vec<_> = env::args().collect(); + + if cmd.len() <= 1 { + help::usage() + } else { + match subcommand::parse_name(&cmd[1][..]) { + Some(mut subcmd) => { + match subcmd.parse_args(&cmd[..cmd.len()-1]) { + Ok(_) => { + match subcmd.execute(&mut term) { + Ok(_) => (), + Err(err) => { + term.err(&format!("error: {}", err)); + } + } + } + Err(err) => { + println!("{}", err.description()); + println!(""); + subcmd.usage(); + } + } + } + None => { + println!("Unrecognized command '{}'.", cmd[1]); + println!(""); + help::usage(); + } + } + } + process::exit(EXIT_STATUS.load(Ordering::SeqCst) as i32); +} diff --git a/src/rustbook/serve.rs b/src/tools/rustbook/serve.rs similarity index 100% rename from src/rustbook/serve.rs rename to src/tools/rustbook/serve.rs diff --git a/src/rustbook/static/rustbook.css b/src/tools/rustbook/static/rustbook.css similarity index 100% rename from src/rustbook/static/rustbook.css rename to src/tools/rustbook/static/rustbook.css diff --git a/src/rustbook/static/rustbook.js b/src/tools/rustbook/static/rustbook.js similarity index 100% rename from src/rustbook/static/rustbook.js rename to src/tools/rustbook/static/rustbook.js diff --git a/src/rustbook/subcommand.rs b/src/tools/rustbook/subcommand.rs similarity index 100% rename from src/rustbook/subcommand.rs rename to src/tools/rustbook/subcommand.rs diff --git a/src/rustbook/term.rs b/src/tools/rustbook/term.rs similarity index 100% rename from src/rustbook/term.rs rename to src/tools/rustbook/term.rs diff --git a/src/tools/rustbook/test.rs b/src/tools/rustbook/test.rs new file mode 100644 index 0000000000000..002c46a7af48d --- /dev/null +++ b/src/tools/rustbook/test.rs @@ -0,0 +1,75 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation of the `test` subcommand. Just a stub for now. + +use subcommand::Subcommand; +use error::{err, CliResult, CommandResult}; +use term::Term; +use book; + +use std::fs::File; +use std::env; +use std::process::Command; + +struct Test; + +pub fn parse_cmd(name: &str) -> Option> { + if name == "test" { + Some(Box::new(Test)) + } else { + None + } +} + +impl Subcommand for Test { + fn parse_args(&mut self, _: &[String]) -> CliResult<()> { + Ok(()) + } + fn usage(&self) {} + fn execute(&mut self, term: &mut Term) -> CommandResult<()> { + let cwd = env::current_dir().unwrap(); + let src = cwd.clone(); + + let mut summary = File::open(&src.join("SUMMARY.md"))?; + match book::parse_summary(&mut summary, &src) { + Ok(book) => { + for (_, item) in book.iter() { + let output_result = Command::new("rustdoc") + .arg(&item.path) + .arg("--test") + .output(); + match output_result { + Ok(output) => { + if !output.status.success() { + term.err(&format!("{}\n{}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr))); + return Err(err("some tests failed")); + } + + } + Err(e) => { + let message = format!("could not execute `rustdoc`: {}", e); + return Err(err(&message)) + } + } + } + } + Err(errors) => { + for err in errors { + term.err(&err[..]); + } + return Err(err("there was an error")) + } + } + Ok(()) // lol + } +} diff --git a/src/tools/tidy/Cargo.toml b/src/tools/tidy/Cargo.toml new file mode 100644 index 0000000000000..e900bd47fb7bd --- /dev/null +++ b/src/tools/tidy/Cargo.toml @@ -0,0 +1,6 @@ +[package] +name = "tidy" +version = "0.1.0" +authors = ["Alex Crichton "] + +[dependencies] diff --git a/src/tools/tidy/src/bins.rs b/src/tools/tidy/src/bins.rs new file mode 100644 index 0000000000000..ef93b0858b02f --- /dev/null +++ b/src/tools/tidy/src/bins.rs @@ -0,0 +1,70 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Tidy check to ensure that there are no binaries checked into the source tree +//! by accident. +//! +//! In the past we've accidentally checked in test binaries and such which add a +//! huge amount of bloat to the git history, so it's good to just ensure we +//! don't do that again :) + +use std::path::Path; + +// All files are executable on Windows, so just check on Unix +#[cfg(windows)] +pub fn check(_path: &Path, _bad: &mut bool) {} + +#[cfg(unix)] +pub fn check(path: &Path, bad: &mut bool) { + use std::fs; + use std::io::Read; + use std::process::{Command, Stdio}; + use std::os::unix::prelude::*; + + if let Ok(mut file) = fs::File::open("/proc/version") { + let mut contents = String::new(); + file.read_to_string(&mut contents).unwrap(); + // Probably on Windows Linux Subsystem, all files will be marked as + // executable, so skip checking. + if contents.contains("Microsoft") { + return; + } + } + + super::walk(path, + &mut |path| super::filter_dirs(path) || path.ends_with("src/etc"), + &mut |file| { + let filename = file.file_name().unwrap().to_string_lossy(); + let extensions = [".py", ".sh"]; + if extensions.iter().any(|e| filename.ends_with(e)) { + return; + } + + let metadata = t!(fs::symlink_metadata(&file), &file); + if metadata.mode() & 0o111 != 0 { + let rel_path = file.strip_prefix(path).unwrap(); + let git_friendly_path = rel_path.to_str().unwrap().replace("\\", "/"); + let output = Command::new("git") + .arg("ls-files") + .arg(&git_friendly_path) + .current_dir(path) + .stderr(Stdio::null()) + .output() + .unwrap_or_else(|e| { + panic!("could not run git ls-files: {}", e); + }); + let path_bytes = rel_path.as_os_str().as_bytes(); + if output.status.success() && output.stdout.starts_with(path_bytes) { + println!("binary checked into source: {}", file.display()); + *bad = true; + } + } + }) +} diff --git a/src/tools/tidy/src/cargo.rs b/src/tools/tidy/src/cargo.rs new file mode 100644 index 0000000000000..11acb64743a7a --- /dev/null +++ b/src/tools/tidy/src/cargo.rs @@ -0,0 +1,108 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Tidy check to ensure that `[dependencies]` and `extern crate` are in sync. +//! +//! This tidy check ensures that all crates listed in the `[dependencies]` +//! section of a `Cargo.toml` are present in the corresponding `lib.rs` as +//! `extern crate` declarations. This should help us keep the DAG correctly +//! structured through various refactorings to prune out unnecessary edges. + +use std::io::prelude::*; +use std::fs::File; +use std::path::Path; + +pub fn check(path: &Path, bad: &mut bool) { + if path.ends_with("vendor") { + return + } + for entry in t!(path.read_dir(), path).map(|e| t!(e)) { + // Look for `Cargo.toml` with a sibling `src/lib.rs` or `lib.rs` + if entry.file_name().to_str() == Some("Cargo.toml") { + if path.join("src/lib.rs").is_file() { + verify(&entry.path(), &path.join("src/lib.rs"), bad) + } + if path.join("lib.rs").is_file() { + verify(&entry.path(), &path.join("lib.rs"), bad) + } + } else if t!(entry.file_type()).is_dir() { + check(&entry.path(), bad); + } + } +} + +// Verify that the dependencies in Cargo.toml at `tomlfile` are sync'd with the +// `extern crate` annotations in the lib.rs at `libfile`. +fn verify(tomlfile: &Path, libfile: &Path, bad: &mut bool) { + let mut toml = String::new(); + let mut librs = String::new(); + t!(t!(File::open(tomlfile)).read_to_string(&mut toml)); + t!(t!(File::open(libfile)).read_to_string(&mut librs)); + + if toml.contains("name = \"bootstrap\"") { + return + } + + // "Poor man's TOML parser", just assume we use one syntax for now + // + // We just look for: + // + // [dependencies] + // name = ... + // name2 = ... + // name3 = ... + // + // If we encounter a line starting with `[` then we assume it's the end of + // the dependency section and bail out. + let deps = match toml.find("[dependencies]") { + Some(i) => &toml[i+1..], + None => return, + }; + let mut lines = deps.lines().peekable(); + while let Some(line) = lines.next() { + if line.starts_with("[") { + break + } + + let mut parts = line.splitn(2, '='); + let krate = parts.next().unwrap().trim(); + if parts.next().is_none() { + continue + } + + // Don't worry about depending on core/std but not saying `extern crate + // core/std`, that's intentional. + if krate == "core" || krate == "std" { + continue + } + + // This is intentional, this dependency just makes the crate available + // for others later on. Cover cases + let whitelisted = krate == "alloc_jemalloc"; + let whitelisted = whitelisted || krate.starts_with("panic"); + if toml.contains("name = \"std\"") && whitelisted { + continue + } + + // We want the compiler to depend on the proc_macro_plugin crate so + // that it is built and included in the end, but we don't want to + // actually use it in the compiler. + if toml.contains("name = \"rustc_driver\"") && + krate == "proc_macro_plugin" { + continue + } + + if !librs.contains(&format!("extern crate {}", krate)) { + println!("{} doesn't have `extern crate {}`, but Cargo.toml \ + depends on it", libfile.display(), krate); + *bad = true; + } + } +} diff --git a/src/tools/tidy/src/errors.rs b/src/tools/tidy/src/errors.rs new file mode 100644 index 0000000000000..3a70e54ff9745 --- /dev/null +++ b/src/tools/tidy/src/errors.rs @@ -0,0 +1,93 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Tidy check to verify the validity of long error diagnostic codes. +//! +//! This ensures that error codes are used at most once and also prints out some +//! statistics about the error codes. + +use std::collections::HashMap; +use std::fs::File; +use std::io::prelude::*; +use std::path::Path; + +pub fn check(path: &Path, bad: &mut bool) { + let mut contents = String::new(); + let mut map = HashMap::new(); + super::walk(path, + &mut |path| super::filter_dirs(path) || path.ends_with("src/test"), + &mut |file| { + let filename = file.file_name().unwrap().to_string_lossy(); + if filename != "diagnostics.rs" && filename != "diagnostic_list.rs" { + return + } + + contents.truncate(0); + t!(t!(File::open(file)).read_to_string(&mut contents)); + + // In the register_long_diagnostics! macro, entries look like this: + // + // EXXXX: r##" + // + // "##, + // + // and these long messages often have error codes themselves inside + // them, but we don't want to report duplicates in these cases. This + // variable keeps track of whether we're currently inside one of these + // long diagnostic messages. + let mut inside_long_diag = false; + for (num, line) in contents.lines().enumerate() { + if inside_long_diag { + inside_long_diag = !line.contains("\"##"); + continue + } + + let mut search = line; + while let Some(i) = search.find("E") { + search = &search[i + 1..]; + let code = if search.len() > 4 { + search[..4].parse::() + } else { + continue + }; + let code = match code { + Ok(n) => n, + Err(..) => continue, + }; + map.entry(code).or_insert(Vec::new()) + .push((file.to_owned(), num + 1, line.to_owned())); + break + } + + inside_long_diag = line.contains("r##\""); + } + }); + + let mut max = 0; + for (&code, entries) in map.iter() { + if code > max { + max = code; + } + if entries.len() == 1 { + continue + } + + println!("duplicate error code: {}", code); + for &(ref file, line_num, ref line) in entries.iter() { + println!("{}:{}: {}", file.display(), line_num, line); + } + *bad = true; + } + + if !*bad { + println!("* {} error codes", map.len()); + println!("* highest error code: E{:04}", max); + } +} diff --git a/src/tools/tidy/src/features.rs b/src/tools/tidy/src/features.rs new file mode 100644 index 0000000000000..4ef07f7e4b896 --- /dev/null +++ b/src/tools/tidy/src/features.rs @@ -0,0 +1,175 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Tidy check to ensure that unstable features are all in order +//! +//! This check will ensure properties like: +//! +//! * All stability attributes look reasonably well formed +//! * The set of library features is disjoint from the set of language features +//! * Library features have at most one stability level +//! * Library features have at most one `since` value + +use std::collections::HashMap; +use std::fmt; +use std::fs::File; +use std::io::prelude::*; +use std::path::Path; + +#[derive(PartialEq)] +enum Status { + Stable, + Unstable, +} + +impl fmt::Display for Status { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let as_str = match *self { + Status::Stable => "stable", + Status::Unstable => "unstable", + }; + fmt::Display::fmt(as_str, f) + } +} + + +struct Feature { + name: String, + level: Status, + since: String, +} + +struct LibFeature { + level: Status, + since: String, +} + +pub fn check(path: &Path, bad: &mut bool) { + let features = collect_lang_features(&path.join("libsyntax/feature_gate.rs")); + assert!(!features.is_empty()); + let mut lib_features = HashMap::::new(); + + let mut contents = String::new(); + super::walk(path, + &mut |path| super::filter_dirs(path) || path.ends_with("src/test"), + &mut |file| { + let filename = file.file_name().unwrap().to_string_lossy(); + if !filename.ends_with(".rs") || filename == "features.rs" || + filename == "diagnostic_list.rs" { + return; + } + + contents.truncate(0); + t!(t!(File::open(file)).read_to_string(&mut contents)); + + for (i, line) in contents.lines().enumerate() { + let mut err = |msg: &str| { + println!("{}:{}: {}", file.display(), i + 1, msg); + *bad = true; + }; + let level = if line.contains("[unstable(") { + Status::Unstable + } else if line.contains("[stable(") { + Status::Stable + } else { + continue; + }; + let feature_name = match find_attr_val(line, "feature") { + Some(name) => name, + None => { + err("malformed stability attribute"); + continue; + } + }; + let since = match find_attr_val(line, "since") { + Some(name) => name, + None if level == Status::Stable => { + err("malformed stability attribute"); + continue; + } + None => "None", + }; + + if features.iter().any(|f| f.name == feature_name) { + err("duplicating a lang feature"); + } + if let Some(ref s) = lib_features.get(feature_name) { + if s.level != level { + err("different stability level than before"); + } + if s.since != since { + err("different `since` than before"); + } + continue; + } + lib_features.insert(feature_name.to_owned(), + LibFeature { + level: level, + since: since.to_owned(), + }); + } + }); + + if *bad { + return; + } + + let mut lines = Vec::new(); + for feature in features { + lines.push(format!("{:<32} {:<8} {:<12} {:<8}", + feature.name, + "lang", + feature.level, + feature.since)); + } + for (name, feature) in lib_features { + lines.push(format!("{:<32} {:<8} {:<12} {:<8}", + name, + "lib", + feature.level, + feature.since)); + } + + lines.sort(); + for line in lines { + println!("* {}", line); + } +} + +fn find_attr_val<'a>(line: &'a str, attr: &str) -> Option<&'a str> { + line.find(attr) + .and_then(|i| line[i..].find('"').map(|j| i + j + 1)) + .and_then(|i| line[i..].find('"').map(|j| (i, i + j))) + .map(|(i, j)| &line[i..j]) +} + +fn collect_lang_features(path: &Path) -> Vec { + let mut contents = String::new(); + t!(t!(File::open(path)).read_to_string(&mut contents)); + + contents.lines() + .filter_map(|line| { + let mut parts = line.trim().split(","); + let level = match parts.next().map(|l| l.trim().trim_left_matches('(')) { + Some("active") => Status::Unstable, + Some("removed") => Status::Unstable, + Some("accepted") => Status::Stable, + _ => return None, + }; + let name = parts.next().unwrap().trim(); + let since = parts.next().unwrap().trim().trim_matches('"'); + Some(Feature { + name: name.to_owned(), + level: level, + since: since.to_owned(), + }) + }) + .collect() +} diff --git a/src/tools/tidy/src/main.rs b/src/tools/tidy/src/main.rs new file mode 100644 index 0000000000000..cb11fe261c459 --- /dev/null +++ b/src/tools/tidy/src/main.rs @@ -0,0 +1,86 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Tidy checks for source code in this repository +//! +//! This program runs all of the various tidy checks for style, cleanliness, +//! etc. This is run by default on `make check` and as part of the auto +//! builders. + +use std::fs; +use std::path::{PathBuf, Path}; +use std::env; + +macro_rules! t { + ($e:expr, $p:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed on {} with {}", stringify!($e), ($p).display(), e), + }); + + ($e:expr) => (match $e { + Ok(e) => e, + Err(e) => panic!("{} failed with {}", stringify!($e), e), + }) +} + +mod bins; +mod style; +mod errors; +mod features; +mod cargo; +mod pal; + +fn main() { + let path = env::args_os().skip(1).next().expect("need an argument"); + let path = PathBuf::from(path); + + let mut bad = false; + bins::check(&path, &mut bad); + style::check(&path, &mut bad); + errors::check(&path, &mut bad); + cargo::check(&path, &mut bad); + features::check(&path, &mut bad); + pal::check(&path, &mut bad); + + if bad { + panic!("some tidy checks failed"); + } +} + +fn filter_dirs(path: &Path) -> bool { + let skip = [ + "src/jemalloc", + "src/llvm", + "src/libbacktrace", + "src/compiler-rt", + "src/rt/hoedown", + "src/rustllvm", + "src/rust-installer", + "src/liblibc", + "src/vendor", + ]; + skip.iter().any(|p| path.ends_with(p)) +} + + +fn walk(path: &Path, skip: &mut FnMut(&Path) -> bool, f: &mut FnMut(&Path)) { + for entry in t!(fs::read_dir(path), path) { + let entry = t!(entry); + let kind = t!(entry.file_type()); + let path = entry.path(); + if kind.is_dir() { + if !skip(&path) { + walk(&path, skip, f); + } + } else { + f(&path); + } + } +} diff --git a/src/tools/tidy/src/pal.rs b/src/tools/tidy/src/pal.rs new file mode 100644 index 0000000000000..a5e4e5a4c2672 --- /dev/null +++ b/src/tools/tidy/src/pal.rs @@ -0,0 +1,226 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Tidy check to enforce rules about platform-specific code in std +//! +//! This is intended to maintain existing standards of code +//! organization in hopes that the standard library will continue to +//! be refactored to isolate platform-specific bits, making porting +//! easier; where "standard library" roughly means "all the +//! dependencies of the std and test crates". +//! +//! This generally means placing restrictions on where `cfg(unix)`, +//! `cfg(windows)`, `cfg(target_os)` and `cfg(target_env)` may appear, +//! the basic objective being to isolate platform-specific code to the +//! platform-specific `std::sys` modules, and to the allocation, +//! unwinding, and libc crates. +//! +//! Following are the basic rules, though there are currently +//! exceptions: +//! +//! - core may not have platform-specific code +//! - liballoc_system may have platform-specific code +//! - liballoc_jemalloc may have platform-specific code +//! - libpanic_abort may have platform-specific code +//! - libpanic_unwind may have platform-specific code +//! - libunwind may have platform-specific code +//! - other crates in the std facade may not +//! - std may have platform-specific code in the following places +//! - sys/unix/ +//! - sys/windows/ +//! - os/ +//! +//! `std/sys_common` should _not_ contain platform-specific code. +//! Finally, because std contains tests with platform-specific +//! `ignore` attributes, once the parser encounters `mod tests`, +//! platform-specific cfgs are allowed. Not sure yet how to deal with +//! this in the long term. + +use std::fs::File; +use std::io::Read; +use std::path::Path; +use std::iter::Iterator; + +// Paths that may contain platform-specific code +const EXCEPTION_PATHS: &'static [&'static str] = &[ + // std crates + "src/liballoc_jemalloc", + "src/liballoc_system", + "src/liblibc", + "src/libpanic_abort", + "src/libpanic_unwind", + "src/libunwind", + "src/libstd/sys/", // Platform-specific code for std lives here. + // This has the trailing slash so that sys_common is not excepted. + "src/libstd/os", // Platform-specific public interfaces + "src/rtstartup", // Not sure what to do about this. magic stuff for mingw + + // temporary exceptions + "src/libstd/rtdeps.rs", // Until rustbuild replaces make + "src/libstd/path.rs", + "src/libstd/f32.rs", + "src/libstd/f64.rs", + "src/libstd/sys_common/mod.rs", + "src/libstd/sys_common/net.rs", + "src/libterm", // Not sure how to make this crate portable, but test needs it + "src/libtest", // Probably should defer to unstable std::sys APIs + + // std testing crates, ok for now at least + "src/libcoretest", + + // non-std crates + "src/test", + "src/tools", + "src/librustc", + "src/librustdoc", + "src/libsyntax", + "src/bootstrap", +]; + +pub fn check(path: &Path, bad: &mut bool) { + let ref mut contents = String::new(); + // Sanity check that the complex parsing here works + let ref mut saw_target_arch = false; + let ref mut saw_cfg_bang = false; + super::walk(path, &mut super::filter_dirs, &mut |file| { + let filestr = file.to_string_lossy().replace("\\", "/"); + if !filestr.ends_with(".rs") { return } + + let is_exception_path = EXCEPTION_PATHS.iter().any(|s| filestr.contains(&**s)); + if is_exception_path { return } + + check_cfgs(contents, &file, bad, saw_target_arch, saw_cfg_bang); + }); + + assert!(*saw_target_arch); + assert!(*saw_cfg_bang); +} + +fn check_cfgs(contents: &mut String, file: &Path, + bad: &mut bool, saw_target_arch: &mut bool, saw_cfg_bang: &mut bool) { + contents.truncate(0); + t!(t!(File::open(file), file).read_to_string(contents)); + + // For now it's ok to have platform-specific code after 'mod tests'. + let mod_tests_idx = find_test_mod(contents); + let contents = &contents[..mod_tests_idx]; + // Pull out all "cfg(...)" and "cfg!(...)" strings + let cfgs = parse_cfgs(contents); + + let mut line_numbers: Option> = None; + let mut err = |idx: usize, cfg: &str| { + if line_numbers.is_none() { + line_numbers = Some(contents.match_indices('\n').map(|(i, _)| i).collect()); + } + let line_numbers = line_numbers.as_ref().expect(""); + let line = match line_numbers.binary_search(&idx) { + Ok(_) => unreachable!(), + Err(i) => i + 1 + }; + println!("{}:{}: platform-specific cfg: {}", file.display(), line, cfg); + *bad = true; + }; + + for (idx, cfg) in cfgs.into_iter() { + // Sanity check that the parsing here works + if !*saw_target_arch && cfg.contains("target_arch") { *saw_target_arch = true } + if !*saw_cfg_bang && cfg.contains("cfg!") { *saw_cfg_bang = true } + + let contains_platform_specific_cfg = + cfg.contains("target_os") + || cfg.contains("target_env") + || cfg.contains("target_vendor") + || cfg.contains("unix") + || cfg.contains("windows"); + + if !contains_platform_specific_cfg { continue } + + let preceeded_by_doc_comment = { + let pre_contents = &contents[..idx]; + let pre_newline = pre_contents.rfind('\n'); + let pre_doc_comment = pre_contents.rfind("///"); + match (pre_newline, pre_doc_comment) { + (Some(n), Some(c)) => n < c, + (None, Some(_)) => true, + (_, None) => false, + } + }; + + if preceeded_by_doc_comment { continue } + + err(idx, cfg); + } +} + +fn find_test_mod(contents: &str) -> usize { + if let Some(mod_tests_idx) = contents.find("mod tests") { + // Also capture a previos line indicating "mod tests" in cfg-ed out + let prev_newline_idx = contents[..mod_tests_idx].rfind('\n').unwrap_or(mod_tests_idx); + let prev_newline_idx = contents[..prev_newline_idx].rfind('\n'); + if let Some(nl) = prev_newline_idx { + let prev_line = &contents[nl + 1 .. mod_tests_idx]; + let emcc_cfg = "cfg(all(test, not(target_os"; + if prev_line.contains(emcc_cfg) { + nl + } else { + mod_tests_idx + } + } else { + mod_tests_idx + } + } else { + contents.len() + } +} + +fn parse_cfgs<'a>(contents: &'a str) -> Vec<(usize, &'a str)> { + let candidate_cfgs = contents.match_indices("cfg"); + let candidate_cfg_idxs = candidate_cfgs.map(|(i, _)| i); + // This is puling out the indexes of all "cfg" strings + // that appear to be tokens succeeded by a paren. + let cfgs = candidate_cfg_idxs.filter(|i| { + let pre_idx = i.saturating_sub(*i); + let succeeds_non_ident = !contents.as_bytes().get(pre_idx) + .cloned() + .map(char::from) + .map(char::is_alphanumeric) + .unwrap_or(false); + let contents_after = &contents[*i..]; + let first_paren = contents_after.find('('); + let paren_idx = first_paren.map(|ip| i + ip); + let preceeds_whitespace_and_paren = paren_idx.map(|ip| { + let maybe_space = &contents[*i + "cfg".len() .. ip]; + maybe_space.chars().all(|c| char::is_whitespace(c) || c == '!') + }).unwrap_or(false); + + succeeds_non_ident && preceeds_whitespace_and_paren + }); + + cfgs.map(|i| { + let mut depth = 0; + let contents_from = &contents[i..]; + for (j, byte) in contents_from.bytes().enumerate() { + match byte { + b'(' => { + depth += 1; + } + b')' => { + depth -= 1; + if depth == 0 { + return (i, &contents_from[.. j + 1]); + } + } + _ => { } + } + } + + unreachable!() + }).collect() +} diff --git a/src/tools/tidy/src/style.rs b/src/tools/tidy/src/style.rs new file mode 100644 index 0000000000000..c722eb690b8c3 --- /dev/null +++ b/src/tools/tidy/src/style.rs @@ -0,0 +1,127 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Tidy check to enforce various stylistic guidelines on the Rust codebase. +//! +//! Example checks are: +//! +//! * No lines over 100 characters +//! * No tabs +//! * No trailing whitespace +//! * No CR characters +//! * No `TODO` or `XXX` directives +//! * A valid license header is at the top +//! +//! A number of these checks can be opted-out of with various directives like +//! `// ignore-tidy-linelength`. + +use std::fs::File; +use std::io::prelude::*; +use std::path::Path; + +const COLS: usize = 100; +const LICENSE: &'static str = "\ +Copyright The Rust Project Developers. See the COPYRIGHT +file at the top-level directory of this distribution and at +http://rust-lang.org/COPYRIGHT. + +Licensed under the Apache License, Version 2.0 or the MIT license +, at your +option. This file may not be copied, modified, or distributed +except according to those terms."; + +pub fn check(path: &Path, bad: &mut bool) { + let mut contents = String::new(); + super::walk(path, &mut super::filter_dirs, &mut |file| { + let filename = file.file_name().unwrap().to_string_lossy(); + let extensions = [".rs", ".py", ".js", ".sh", ".c", ".h"]; + if extensions.iter().all(|e| !filename.ends_with(e)) || + filename.starts_with(".#") { + return + } + if filename == "miniz.c" || filename.contains("jquery") { + return + } + + contents.truncate(0); + t!(t!(File::open(file), file).read_to_string(&mut contents)); + let skip_cr = contents.contains("ignore-tidy-cr"); + let skip_tab = contents.contains("ignore-tidy-tab"); + let skip_length = contents.contains("ignore-tidy-linelength"); + for (i, line) in contents.split("\n").enumerate() { + let mut err = |msg: &str| { + println!("{}:{}: {}", file.display(), i + 1, msg); + *bad = true; + }; + if line.chars().count() > COLS && !skip_length { + err(&format!("line longer than {} chars", COLS)); + } + if line.contains("\t") && !skip_tab { + err("tab character"); + } + if line.ends_with(" ") || line.ends_with("\t") { + err("trailing whitespace"); + } + if line.contains("\r") && !skip_cr { + err("CR character"); + } + if filename != "style.rs" { + if line.contains("TODO") { + err("TODO is deprecated; use FIXME") + } + if line.contains("//") && line.contains(" XXX") { + err("XXX is deprecated; use FIXME") + } + } + } + if !licenseck(file, &contents) { + println!("{}: incorrect license", file.display()); + *bad = true; + } + }) +} + +fn licenseck(file: &Path, contents: &str) -> bool { + if contents.contains("ignore-license") { + return true + } + let exceptions = [ + "libstd/sync/mpsc/mpsc_queue.rs", + "libstd/sync/mpsc/spsc_queue.rs", + ]; + if exceptions.iter().any(|f| file.ends_with(f)) { + return true + } + + // Skip the BOM if it's there + let bom = "\u{feff}"; + let contents = if contents.starts_with(bom) {&contents[3..]} else {contents}; + + // See if the license shows up in the first 100 lines + let lines = contents.lines().take(100).collect::>(); + lines.windows(LICENSE.lines().count()).any(|window| { + let offset = if window.iter().all(|w| w.starts_with("//")) { + 2 + } else if window.iter().all(|w| w.starts_with("#")) { + 1 + } else { + return false + }; + window.iter().map(|a| a[offset..].trim()) + .zip(LICENSE.lines()).all(|(a, b)| { + a == b || match b.find("") { + Some(i) => a.starts_with(&b[..i]) && a.ends_with(&b[i+6..]), + None => false, + } + }) + }) + +} diff --git a/src/vendor/cmake/.cargo-checksum.json b/src/vendor/cmake/.cargo-checksum.json new file mode 100644 index 0000000000000..b81d7d2fa04ea --- /dev/null +++ b/src/vendor/cmake/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"c1e953ee360e77de57f7b02f1b7880bd6a3dc22d1a69e953c2ac2c52cc52d247",".travis.yml":"5d83ed1ae0b80cd6cebfc6a25b1fdb58c893ead400f0f84cd0ebf08d9ad48b28","Cargo.toml":"2266412ecb4504137a90d378ebdbf3a41f0e8b7188858cfb149da54792f7f8d9","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"8ca528d20639506546044c676ff9069e3e850937b02bff4194dcf9e5c3c50d64","src/lib.rs":"dae5d93c005bf8d16427e29eb3bfb50c5527a1ec7c39a383d0694a8e8e38af90","src/registry.rs":"ca16433f51b5e3aedb0560bba41370b0c42de9238926a5118d1c0a3a072b64b2"},"package":"0e5bcf27e097a184c1df4437654ed98df3d7a516e8508a6ba45d8b092bbdf283"} \ No newline at end of file diff --git a/src/vendor/cmake/.cargo-ok b/src/vendor/cmake/.cargo-ok new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/src/vendor/cmake/.gitignore b/src/vendor/cmake/.gitignore new file mode 100644 index 0000000000000..4fffb2f89cbd8 --- /dev/null +++ b/src/vendor/cmake/.gitignore @@ -0,0 +1,2 @@ +/target +/Cargo.lock diff --git a/src/vendor/cmake/.travis.yml b/src/vendor/cmake/.travis.yml new file mode 100644 index 0000000000000..3ac040c5c0949 --- /dev/null +++ b/src/vendor/cmake/.travis.yml @@ -0,0 +1,19 @@ +language: rust +rust: + - stable + - beta + - nightly +sudo: false +before_script: + - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH +script: + - cargo test --verbose + - cargo doc --no-deps +after_success: + - travis-cargo --only nightly doc-upload +env: + global: + secure: WSQJRyheeMf7eRdivHextSEQzyFnTIw2yeemO2+ZkHVftp0XYsTXQVca3RGlQNsVmjI0RP8lbDVe7HG23uwbTMeRgm+9hzSwNMa0ndJZ06TNMpPM6nqcXFUaNGeuf7EqU370xcgVBO+ZA0cSh55pJkOBg5ALd9bfRWbjEAjHkx8= +notifications: + email: + on_success: never diff --git a/src/vendor/cmake/Cargo.toml b/src/vendor/cmake/Cargo.toml new file mode 100644 index 0000000000000..c17bbff922582 --- /dev/null +++ b/src/vendor/cmake/Cargo.toml @@ -0,0 +1,17 @@ +[package] + +name = "cmake" +version = "0.1.18" +authors = ["Alex Crichton "] +license = "MIT/Apache-2.0" +readme = "README.md" +keywords = ["build-dependencies"] +repository = "https://github.com/alexcrichton/cmake-rs" +homepage = "https://github.com/alexcrichton/cmake-rs" +documentation = "http://alexcrichton.com/cmake-rs" +description = """ +A build dependency for running `cmake` to build a native library +""" + +[dependencies] +gcc = "0.3.17" diff --git a/src/vendor/cmake/LICENSE-APACHE b/src/vendor/cmake/LICENSE-APACHE new file mode 100644 index 0000000000000..16fe87b06e802 --- /dev/null +++ b/src/vendor/cmake/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/src/vendor/cmake/LICENSE-MIT b/src/vendor/cmake/LICENSE-MIT new file mode 100644 index 0000000000000..39e0ed6602151 --- /dev/null +++ b/src/vendor/cmake/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 Alex Crichton + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/cmake/README.md b/src/vendor/cmake/README.md new file mode 100644 index 0000000000000..8b2586eb01e25 --- /dev/null +++ b/src/vendor/cmake/README.md @@ -0,0 +1,22 @@ +# cmake + +[![Build Status](https://travis-ci.org/alexcrichton/cmake-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/cmake-rs) + +[Documentation](http://alexcrichton.com/cmake-rs) + +A build dependency for running the `cmake` build tool to compile a native +library. + +```toml +# Cargo.toml +[build-dependencies] +cmake = "0.2" +``` + +# License + +`cmake-rs` is primarily distributed under the terms of both the MIT license and +the Apache License (Version 2.0), with portions covered by various BSD-like +licenses. + +See LICENSE-APACHE, and LICENSE-MIT for details. diff --git a/src/vendor/cmake/src/lib.rs b/src/vendor/cmake/src/lib.rs new file mode 100644 index 0000000000000..3607d29026a03 --- /dev/null +++ b/src/vendor/cmake/src/lib.rs @@ -0,0 +1,522 @@ +//! A build dependency for running `cmake` to build a native library +//! +//! This crate provides some necessary boilerplate and shim support for running +//! the system `cmake` command to build a native library. It will add +//! appropriate cflags for building code to link into Rust, handle cross +//! compilation, and use the necessary generator for the platform being +//! targeted. +//! +//! The builder-style configuration allows for various variables and such to be +//! passed down into the build as well. +//! +//! ## Installation +//! +//! Add this to your `Cargo.toml`: +//! +//! ```toml +//! [build-dependencies] +//! cmake = "0.1" +//! ``` +//! +//! ## Examples +//! +//! ```no_run +//! use cmake; +//! +//! // Builds the project in the directory located in `libfoo`, installing it +//! // into $OUT_DIR +//! let dst = cmake::build("libfoo"); +//! +//! println!("cargo:rustc-link-search=native={}", dst.display()); +//! println!("cargo:rustc-link-lib=static=foo"); +//! ``` +//! +//! ```no_run +//! use cmake::Config; +//! +//! let dst = Config::new("libfoo") +//! .define("FOO", "BAR") +//! .cflag("-foo") +//! .build(); +//! println!("cargo:rustc-link-search=native={}", dst.display()); +//! println!("cargo:rustc-link-lib=static=foo"); +//! ``` + +#![deny(missing_docs)] + +extern crate gcc; + +use std::env; +use std::ffi::{OsString, OsStr}; +use std::fs::{self, File}; +use std::io::ErrorKind; +use std::io::prelude::*; +use std::path::{Path, PathBuf}; +use std::process::Command; + +#[cfg(windows)] +mod registry; + +/// Builder style configuration for a pending CMake build. +pub struct Config { + path: PathBuf, + generator: Option, + cflags: OsString, + cxxflags: OsString, + defines: Vec<(OsString, OsString)>, + deps: Vec, + target: Option, + host: Option, + out_dir: Option, + profile: Option, + build_args: Vec, + cmake_target: Option, +} + +/// Builds the native library rooted at `path` with the default cmake options. +/// This will return the directory in which the library was installed. +/// +/// # Examples +/// +/// ```no_run +/// use cmake; +/// +/// // Builds the project in the directory located in `libfoo`, installing it +/// // into $OUT_DIR +/// let dst = cmake::build("libfoo"); +/// +/// println!("cargo:rustc-link-search=native={}", dst.display()); +/// println!("cargo:rustc-link-lib=static=foo"); +/// ``` +/// +pub fn build>(path: P) -> PathBuf { + Config::new(path.as_ref()).build() +} + +impl Config { + /// Creates a new blank set of configuration to build the project specified + /// at the path `path`. + pub fn new>(path: P) -> Config { + Config { + path: env::current_dir().unwrap().join(path), + generator: None, + cflags: OsString::new(), + cxxflags: OsString::new(), + defines: Vec::new(), + deps: Vec::new(), + profile: None, + out_dir: None, + target: None, + host: None, + build_args: Vec::new(), + cmake_target: None, + } + } + + /// Sets the build-tool generator (`-G`) for this compilation. + pub fn generator>(&mut self, generator: T) -> &mut Config { + self.generator = Some(generator.as_ref().to_owned()); + self + } + + /// Adds a custom flag to pass down to the C compiler, supplementing those + /// that this library already passes. + pub fn cflag>(&mut self, flag: P) -> &mut Config { + self.cflags.push(" "); + self.cflags.push(flag.as_ref()); + self + } + + /// Adds a custom flag to pass down to the C++ compiler, supplementing those + /// that this library already passes. + pub fn cxxflag>(&mut self, flag: P) -> &mut Config { + self.cxxflags.push(" "); + self.cxxflags.push(flag.as_ref()); + self + } + + /// Adds a new `-D` flag to pass to cmake during the generation step. + pub fn define(&mut self, k: K, v: V) -> &mut Config + where K: AsRef, V: AsRef + { + self.defines.push((k.as_ref().to_owned(), v.as_ref().to_owned())); + self + } + + /// Registers a dependency for this compilation on the native library built + /// by Cargo previously. + /// + /// This registration will modify the `CMAKE_PREFIX_PATH` environment + /// variable for the build system generation step. + pub fn register_dep(&mut self, dep: &str) -> &mut Config { + self.deps.push(dep.to_string()); + self + } + + /// Sets the target triple for this compilation. + /// + /// This is automatically scraped from `$TARGET` which is set for Cargo + /// build scripts so it's not necessary to call this from a build script. + pub fn target(&mut self, target: &str) -> &mut Config { + self.target = Some(target.to_string()); + self + } + + /// Sets the host triple for this compilation. + /// + /// This is automatically scraped from `$HOST` which is set for Cargo + /// build scripts so it's not necessary to call this from a build script. + pub fn host(&mut self, host: &str) -> &mut Config { + self.host = Some(host.to_string()); + self + } + + /// Sets the output directory for this compilation. + /// + /// This is automatically scraped from `$OUT_DIR` which is set for Cargo + /// build scripts so it's not necessary to call this from a build script. + pub fn out_dir>(&mut self, out: P) -> &mut Config { + self.out_dir = Some(out.as_ref().to_path_buf()); + self + } + + /// Sets the profile for this compilation. + /// + /// This is automatically scraped from `$PROFILE` which is set for Cargo + /// build scripts so it's not necessary to call this from a build script. + pub fn profile(&mut self, profile: &str) -> &mut Config { + self.profile = Some(profile.to_string()); + self + } + + /// Add an argument to the final `cmake` build step + pub fn build_arg>(&mut self, arg: A) -> &mut Config { + self.build_args.push(arg.as_ref().to_owned()); + self + } + + /// Sets the build target for the final `cmake` build step, this will + /// default to "install" if not specified. + pub fn build_target(&mut self, target: &str) -> &mut Config { + self.cmake_target = Some(target.to_string()); + self + } + + /// Run this configuration, compiling the library with all the configured + /// options. + /// + /// This will run both the build system generator command as well as the + /// command to build the library. + pub fn build(&mut self) -> PathBuf { + let target = self.target.clone().unwrap_or_else(|| { + getenv_unwrap("TARGET") + }); + let host = self.host.clone().unwrap_or_else(|| { + getenv_unwrap("HOST") + }); + let msvc = target.contains("msvc"); + let c_compiler = gcc::Config::new().cargo_metadata(false) + .opt_level(0) + .debug(false) + .target(&target) + .host(&host) + .get_compiler(); + let cxx_compiler = gcc::Config::new().cargo_metadata(false) + .cpp(true) + .opt_level(0) + .debug(false) + .target(&target) + .host(&host) + .get_compiler(); + + let dst = self.out_dir.clone().unwrap_or_else(|| { + PathBuf::from(getenv_unwrap("OUT_DIR")) + }); + let build = dst.join("build"); + self.maybe_clear(&build); + let _ = fs::create_dir(&build); + + // Add all our dependencies to our cmake paths + let mut cmake_prefix_path = Vec::new(); + for dep in &self.deps { + if let Some(root) = env::var_os(&format!("DEP_{}_ROOT", dep)) { + cmake_prefix_path.push(PathBuf::from(root)); + } + } + let system_prefix = env::var_os("CMAKE_PREFIX_PATH") + .unwrap_or(OsString::new()); + cmake_prefix_path.extend(env::split_paths(&system_prefix) + .map(|s| s.to_owned())); + let cmake_prefix_path = env::join_paths(&cmake_prefix_path).unwrap(); + + // Build up the first cmake command to build the build system. + let mut cmd = Command::new("cmake"); + cmd.arg(&self.path) + .current_dir(&build); + if target.contains("windows-gnu") { + if host.contains("windows") { + // On MinGW we need to coerce cmake to not generate a visual + // studio build system but instead use makefiles that MinGW can + // use to build. + if self.generator.is_none() { + cmd.arg("-G").arg("MSYS Makefiles"); + } + } else { + // If we're cross compiling onto windows, then set some + // variables which will hopefully get things to succeed. Some + // systems may need the `windres` or `dlltool` variables set, so + // set them if possible. + if !self.defined("CMAKE_SYSTEM_NAME") { + cmd.arg("-DCMAKE_SYSTEM_NAME=Windows"); + } + if !self.defined("CMAKE_RC_COMPILER") { + let exe = find_exe(c_compiler.path()); + if let Some(name) = exe.file_name().unwrap().to_str() { + let name = name.replace("gcc", "windres"); + let windres = exe.with_file_name(name); + if windres.is_file() { + let mut arg = OsString::from("-DCMAKE_RC_COMPILER="); + arg.push(&windres); + cmd.arg(arg); + } + } + } + } + } else if msvc { + // If we're on MSVC we need to be sure to use the right generator or + // otherwise we won't get 32/64 bit correct automatically. + if self.generator.is_none() { + cmd.arg("-G").arg(self.visual_studio_generator(&target)); + } + } + if let Some(ref generator) = self.generator { + cmd.arg("-G").arg(generator); + } + let profile = self.profile.clone().unwrap_or_else(|| { + match &getenv_unwrap("PROFILE")[..] { + "bench" | "release" => "Release", + // currently we need to always use the same CRT for MSVC + _ if msvc => "Release", + _ => "Debug", + }.to_string() + }); + for &(ref k, ref v) in &self.defines { + let mut os = OsString::from("-D"); + os.push(k); + os.push("="); + os.push(v); + cmd.arg(os); + } + + if !self.defined("CMAKE_INSTALL_PREFIX") { + let mut dstflag = OsString::from("-DCMAKE_INSTALL_PREFIX="); + dstflag.push(&dst); + cmd.arg(dstflag); + } + + { + let mut set_compiler = |kind: &str, + compiler: &gcc::Tool, + extra: &OsString| { + let flag_var = format!("CMAKE_{}_FLAGS", kind); + let tool_var = format!("CMAKE_{}_COMPILER", kind); + if !self.defined(&flag_var) { + let mut flagsflag = OsString::from("-D"); + flagsflag.push(&flag_var); + flagsflag.push("="); + flagsflag.push(extra); + for arg in compiler.args() { + flagsflag.push(" "); + flagsflag.push(arg); + } + cmd.arg(flagsflag); + } + + // Apparently cmake likes to have an absolute path to the + // compiler as otherwise it sometimes thinks that this variable + // changed as it thinks the found compiler, /usr/bin/cc, + // differs from the specified compiler, cc. Not entirely sure + // what's up, but at least this means cmake doesn't get + // confused? + // + // Also don't specify this on Windows as it's not needed for + // MSVC and for MinGW it doesn't really vary. + if !self.defined("CMAKE_TOOLCHAIN_FILE") + && !self.defined(&tool_var) + && env::consts::FAMILY != "windows" { + let mut ccompiler = OsString::from("-D"); + ccompiler.push(&tool_var); + ccompiler.push("="); + ccompiler.push(find_exe(compiler.path())); + cmd.arg(ccompiler); + } + }; + + set_compiler("C", &c_compiler, &self.cflags); + set_compiler("CXX", &cxx_compiler, &self.cxxflags); + } + + if !self.defined("CMAKE_BUILD_TYPE") { + cmd.arg(&format!("-DCMAKE_BUILD_TYPE={}", profile)); + } + + if !self.defined("CMAKE_TOOLCHAIN_FILE") { + if let Ok(s) = env::var("CMAKE_TOOLCHAIN_FILE") { + cmd.arg(&format!("-DCMAKE_TOOLCHAIN_FILE={}", s)); + } + } + + run(cmd.env("CMAKE_PREFIX_PATH", cmake_prefix_path), "cmake"); + + let mut parallel_args = Vec::new(); + if fs::metadata(&dst.join("build/Makefile")).is_ok() { + if let Ok(s) = env::var("NUM_JOBS") { + parallel_args.push(format!("-j{}", s)); + } + } + + // And build! + let target = self.cmake_target.clone().unwrap_or("install".to_string()); + run(Command::new("cmake") + .arg("--build").arg(".") + .arg("--target").arg(target) + .arg("--config").arg(profile) + .arg("--").args(&self.build_args) + .args(¶llel_args) + .current_dir(&build), "cmake"); + + println!("cargo:root={}", dst.display()); + return dst + } + + fn visual_studio_generator(&self, target: &str) -> String { + let base = match std::env::var("VisualStudioVersion") { + Ok(version) => { + match &version[..] { + "15.0" => "Visual Studio 15", + "14.0" => "Visual Studio 14 2015", + "12.0" => "Visual Studio 12 2013", + vers => panic!("\n\n\ + unsupported or unknown VisualStudio version: {}\n\ + if another version is installed consider running \ + the appropriate vcvars script before building this \ + crate\n\ + ", vers), + } + } + _ => { + // Check for the presense of a specific registry key + // that indicates visual studio is installed. + if self.has_msbuild_version("15.0") { + "Visual Studio 15" + } else if self.has_msbuild_version("14.0") { + "Visual Studio 14 2015" + } else if self.has_msbuild_version("12.0") { + "Visual Studio 12 2013" + } else { + panic!("\n\n\ + couldn't determine visual studio generator\n\ + if VisualStudio is installed, however, consider \ + running the appropriate vcvars script before building \ + this crate\n\ + "); + } + } + }; + + if target.contains("i686") { + base.to_string() + } else if target.contains("x86_64") { + format!("{} Win64", base) + } else { + panic!("unsupported msvc target: {}", target); + } + } + + #[cfg(not(windows))] + fn has_msbuild_version(&self, _version: &str) -> bool { + false + } + + #[cfg(windows)] + fn has_msbuild_version(&self, version: &str) -> bool { + let key = format!("SOFTWARE\\Microsoft\\MSBuild\\ToolsVersions\\{}", + version); + registry::LOCAL_MACHINE.open(key.as_ref()).is_ok() + } + + fn defined(&self, var: &str) -> bool { + self.defines.iter().any(|&(ref a, _)| a == var) + } + + // If a cmake project has previously been built (e.g. CMakeCache.txt already + // exists), then cmake will choke if the source directory for the original + // project being built has changed. Detect this situation through the + // `CMAKE_HOME_DIRECTORY` variable that cmake emits and if it doesn't match + // we blow away the build directory and start from scratch (the recommended + // solution apparently [1]). + // + // [1]: https://cmake.org/pipermail/cmake/2012-August/051545.html + fn maybe_clear(&self, dir: &Path) { + // CMake will apparently store canonicalized paths which normally + // isn't relevant to us but we canonicalize it here to ensure + // we're both checking the same thing. + let path = fs::canonicalize(&self.path).unwrap_or(self.path.clone()); + let src = match path.to_str() { + Some(src) => src, + None => return, + }; + let mut f = match File::open(dir.join("CMakeCache.txt")) { + Ok(f) => f, + Err(..) => return, + }; + let mut u8contents = Vec::new(); + match f.read_to_end(&mut u8contents) { + Ok(f) => f, + Err(..) => return, + }; + let contents = String::from_utf8_lossy(&u8contents); + drop(f); + for line in contents.lines() { + if line.contains("CMAKE_HOME_DIRECTORY") && !line.contains(src) { + println!("detected home dir change, cleaning out entire build \ + directory"); + fs::remove_dir_all(dir).unwrap(); + break + } + } + } +} + +fn run(cmd: &mut Command, program: &str) { + println!("running: {:?}", cmd); + let status = match cmd.status() { + Ok(status) => status, + Err(ref e) if e.kind() == ErrorKind::NotFound => { + fail(&format!("failed to execute command: {}\nis `{}` not installed?", + e, program)); + } + Err(e) => fail(&format!("failed to execute command: {}", e)), + }; + if !status.success() { + fail(&format!("command did not execute successfully, got: {}", status)); + } +} + +fn find_exe(path: &Path) -> PathBuf { + env::split_paths(&env::var_os("PATH").unwrap_or(OsString::new())) + .map(|p| p.join(path)) + .find(|p| fs::metadata(p).is_ok()) + .unwrap_or(path.to_owned()) +} + +fn getenv_unwrap(v: &str) -> String { + match env::var(v) { + Ok(s) => s, + Err(..) => fail(&format!("environment variable `{}` not defined", v)), + } +} + +fn fail(s: &str) -> ! { + panic!("\n{}\n\nbuild script failed, must exit now", s) +} diff --git a/src/vendor/cmake/src/registry.rs b/src/vendor/cmake/src/registry.rs new file mode 100644 index 0000000000000..8819b094151e7 --- /dev/null +++ b/src/vendor/cmake/src/registry.rs @@ -0,0 +1,84 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::ffi::OsStr; +use std::io; +use std::os::raw; +use std::os::windows::prelude::*; + +pub struct RegistryKey(Repr); + +type HKEY = *mut u8; +type DWORD = u32; +type LPDWORD = *mut DWORD; +type LPCWSTR = *const u16; +type LPWSTR = *mut u16; +type LONG = raw::c_long; +type PHKEY = *mut HKEY; +type PFILETIME = *mut u8; +type LPBYTE = *mut u8; +type REGSAM = u32; + +const ERROR_SUCCESS: DWORD = 0; +const HKEY_LOCAL_MACHINE: HKEY = 0x80000002 as HKEY; +const KEY_READ: DWORD = 0x20019; +const KEY_WOW64_32KEY: DWORD = 0x200; + +#[link(name = "advapi32")] +extern "system" { + fn RegOpenKeyExW(key: HKEY, + lpSubKey: LPCWSTR, + ulOptions: DWORD, + samDesired: REGSAM, + phkResult: PHKEY) -> LONG; + fn RegCloseKey(hKey: HKEY) -> LONG; +} + +struct OwnedKey(HKEY); + +enum Repr { + Const(HKEY), + Owned(OwnedKey), +} + +unsafe impl Sync for Repr {} +unsafe impl Send for Repr {} + +pub static LOCAL_MACHINE: RegistryKey = + RegistryKey(Repr::Const(HKEY_LOCAL_MACHINE)); + +impl RegistryKey { + fn raw(&self) -> HKEY { + match self.0 { + Repr::Const(val) => val, + Repr::Owned(ref val) => val.0, + } + } + + pub fn open(&self, key: &OsStr) -> io::Result { + let key = key.encode_wide().chain(Some(0)).collect::>(); + let mut ret = 0 as *mut _; + let err = unsafe { + RegOpenKeyExW(self.raw(), key.as_ptr(), 0, + KEY_READ | KEY_WOW64_32KEY, &mut ret) + }; + if err == ERROR_SUCCESS as LONG { + Ok(RegistryKey(Repr::Owned(OwnedKey(ret)))) + } else { + Err(io::Error::from_raw_os_error(err as i32)) + } + } +} + +impl Drop for OwnedKey { + fn drop(&mut self) { + unsafe { RegCloseKey(self.0); } + } +} diff --git a/src/vendor/env_logger/.cargo-checksum.json b/src/vendor/env_logger/.cargo-checksum.json new file mode 100644 index 0000000000000..e3d83501ad079 --- /dev/null +++ b/src/vendor/env_logger/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","Cargo.toml":"4af0565a97a599bba727315d9aff1f57a350dcfee7d9f00986c851e54a24b4ca","src/lib.rs":"484cec14a5f18a25b71d7b1842f7b184f0530165021b71b36dde9fc57b7fc15a","src/regex.rs":"d8e2a6958d4ed8084867063aae4b5c77ffc5d271dc2e17909d56c5a5e1552034","src/string.rs":"26ede9ab41a2673c3ad6001bc1802c005ce9a4f190f55860a24aa66b6b71bbc7","tests/regexp_filter.rs":"a3f9c01623e90e54b247a62c53b25caf5f502d054f28c0bdf92abbea486a95b5"},"package":"15abd780e45b3ea4f76b4e9a26ff4843258dd8a3eed2775a0e7368c2e7936c2f"} \ No newline at end of file diff --git a/src/vendor/env_logger/.cargo-ok b/src/vendor/env_logger/.cargo-ok new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/src/vendor/env_logger/Cargo.toml b/src/vendor/env_logger/Cargo.toml new file mode 100644 index 0000000000000..5efadbf0d6293 --- /dev/null +++ b/src/vendor/env_logger/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "env_logger" +version = "0.3.5" +authors = ["The Rust Project Developers"] +license = "MIT/Apache-2.0" +repository = "https://github.com/rust-lang/log" +documentation = "http://doc.rust-lang.org/log/env_logger" +homepage = "https://github.com/rust-lang/log" +description = """ +An logging implementation for `log` which is configured via an environment +variable. +""" + +[dependencies] +log = { version = "0.3", path = ".." } +regex = { version = "0.1", optional = true } + +[[test]] +name = "regexp_filter" +harness = false + +[features] +default = ["regex"] diff --git a/src/vendor/env_logger/src/lib.rs b/src/vendor/env_logger/src/lib.rs new file mode 100644 index 0000000000000..9105c19c65cd4 --- /dev/null +++ b/src/vendor/env_logger/src/lib.rs @@ -0,0 +1,623 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A logger configured via an environment variable which writes to standard +//! error. +//! +//! ## Example +//! +//! ``` +//! #[macro_use] extern crate log; +//! extern crate env_logger; +//! +//! use log::LogLevel; +//! +//! fn main() { +//! env_logger::init().unwrap(); +//! +//! debug!("this is a debug {}", "message"); +//! error!("this is printed by default"); +//! +//! if log_enabled!(LogLevel::Info) { +//! let x = 3 * 4; // expensive computation +//! info!("the answer was: {}", x); +//! } +//! } +//! ``` +//! +//! Assumes the binary is `main`: +//! +//! ```{.bash} +//! $ RUST_LOG=error ./main +//! ERROR:main: this is printed by default +//! ``` +//! +//! ```{.bash} +//! $ RUST_LOG=info ./main +//! ERROR:main: this is printed by default +//! INFO:main: the answer was: 12 +//! ``` +//! +//! ```{.bash} +//! $ RUST_LOG=debug ./main +//! DEBUG:main: this is a debug message +//! ERROR:main: this is printed by default +//! INFO:main: the answer was: 12 +//! ``` +//! +//! You can also set the log level on a per module basis: +//! +//! ```{.bash} +//! $ RUST_LOG=main=info ./main +//! ERROR:main: this is printed by default +//! INFO:main: the answer was: 12 +//! ``` +//! +//! And enable all logging: +//! +//! ```{.bash} +//! $ RUST_LOG=main ./main +//! DEBUG:main: this is a debug message +//! ERROR:main: this is printed by default +//! INFO:main: the answer was: 12 +//! ``` +//! +//! See the documentation for the log crate for more information about its API. +//! +//! ## Enabling logging +//! +//! Log levels are controlled on a per-module basis, and by default all logging +//! is disabled except for `error!`. Logging is controlled via the `RUST_LOG` +//! environment variable. The value of this environment variable is a +//! comma-separated list of logging directives. A logging directive is of the +//! form: +//! +//! ```text +//! path::to::module=log_level +//! ``` +//! +//! The path to the module is rooted in the name of the crate it was compiled +//! for, so if your program is contained in a file `hello.rs`, for example, to +//! turn on logging for this file you would use a value of `RUST_LOG=hello`. +//! Furthermore, this path is a prefix-search, so all modules nested in the +//! specified module will also have logging enabled. +//! +//! The actual `log_level` is optional to specify. If omitted, all logging will +//! be enabled. If specified, it must be one of the strings `debug`, `error`, +//! `info`, `warn`, or `trace`. +//! +//! As the log level for a module is optional, the module to enable logging for +//! is also optional. If only a `log_level` is provided, then the global log +//! level for all modules is set to this value. +//! +//! Some examples of valid values of `RUST_LOG` are: +//! +//! * `hello` turns on all logging for the 'hello' module +//! * `info` turns on all info logging +//! * `hello=debug` turns on debug logging for 'hello' +//! * `hello,std::option` turns on hello, and std's option logging +//! * `error,hello=warn` turn on global error logging and also warn for hello +//! +//! ## Filtering results +//! +//! A RUST_LOG directive may include a regex filter. The syntax is to append `/` +//! followed by a regex. Each message is checked against the regex, and is only +//! logged if it matches. Note that the matching is done after formatting the +//! log string but before adding any logging meta-data. There is a single filter +//! for all modules. +//! +//! Some examples: +//! +//! * `hello/foo` turns on all logging for the 'hello' module where the log +//! message includes 'foo'. +//! * `info/f.o` turns on all info logging where the log message includes 'foo', +//! 'f1o', 'fao', etc. +//! * `hello=debug/foo*foo` turns on debug logging for 'hello' where the log +//! message includes 'foofoo' or 'fofoo' or 'fooooooofoo', etc. +//! * `error,hello=warn/[0-9] scopes` turn on global error logging and also +//! warn for hello. In both cases the log message must include a single digit +//! number followed by 'scopes'. + +#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "http://www.rust-lang.org/favicon.ico", + html_root_url = "http://doc.rust-lang.org/env_logger/")] +#![cfg_attr(test, deny(warnings))] + +extern crate log; + +use std::env; +use std::io::prelude::*; +use std::io; +use std::mem; + +use log::{Log, LogLevel, LogLevelFilter, LogRecord, SetLoggerError, LogMetadata}; + +#[cfg(feature = "regex")] +#[path = "regex.rs"] +mod filter; + +#[cfg(not(feature = "regex"))] +#[path = "string.rs"] +mod filter; + +/// The logger. +pub struct Logger { + directives: Vec, + filter: Option, + format: Box String + Sync + Send>, +} + +/// LogBuilder acts as builder for initializing the Logger. +/// It can be used to customize the log format, change the enviromental variable used +/// to provide the logging directives and also set the default log level filter. +/// +/// ## Example +/// +/// ``` +/// #[macro_use] +/// extern crate log; +/// extern crate env_logger; +/// +/// use std::env; +/// use log::{LogRecord, LogLevelFilter}; +/// use env_logger::LogBuilder; +/// +/// fn main() { +/// let format = |record: &LogRecord| { +/// format!("{} - {}", record.level(), record.args()) +/// }; +/// +/// let mut builder = LogBuilder::new(); +/// builder.format(format).filter(None, LogLevelFilter::Info); +/// +/// if env::var("RUST_LOG").is_ok() { +/// builder.parse(&env::var("RUST_LOG").unwrap()); +/// } +/// +/// builder.init().unwrap(); +/// +/// error!("error message"); +/// info!("info message"); +/// } +/// ``` +pub struct LogBuilder { + directives: Vec, + filter: Option, + format: Box String + Sync + Send>, +} + +impl LogBuilder { + /// Initializes the log builder with defaults + pub fn new() -> LogBuilder { + LogBuilder { + directives: Vec::new(), + filter: None, + format: Box::new(|record: &LogRecord| { + format!("{}:{}: {}", record.level(), + record.location().module_path(), record.args()) + }), + } + } + + /// Adds filters to the logger + /// + /// The given module (if any) will log at most the specified level provided. + /// If no module is provided then the filter will apply to all log messages. + pub fn filter(&mut self, + module: Option<&str>, + level: LogLevelFilter) -> &mut Self { + self.directives.push(LogDirective { + name: module.map(|s| s.to_string()), + level: level, + }); + self + } + + /// Sets the format function for formatting the log output. + /// + /// This function is called on each record logged to produce a string which + /// is actually printed out. + pub fn format(&mut self, format: F) -> &mut Self + where F: Fn(&LogRecord) -> String + Sync + Send + { + self.format = Box::new(format); + self + } + + /// Parses the directives string in the same form as the RUST_LOG + /// environment variable. + /// + /// See the module documentation for more details. + pub fn parse(&mut self, filters: &str) -> &mut Self { + let (directives, filter) = parse_logging_spec(filters); + + self.filter = filter; + + for directive in directives { + self.directives.push(directive); + } + self + } + + /// Initializes the global logger with an env logger. + /// + /// This should be called early in the execution of a Rust program, and the + /// global logger may only be initialized once. Future initialization + /// attempts will return an error. + pub fn init(&mut self) -> Result<(), SetLoggerError> { + log::set_logger(|max_level| { + let logger = self.build(); + max_level.set(logger.filter()); + Box::new(logger) + }) + } + + /// Build an env logger. + pub fn build(&mut self) -> Logger { + if self.directives.is_empty() { + // Adds the default filter if none exist + self.directives.push(LogDirective { + name: None, + level: LogLevelFilter::Error, + }); + } else { + // Sort the directives by length of their name, this allows a + // little more efficient lookup at runtime. + self.directives.sort_by(|a, b| { + let alen = a.name.as_ref().map(|a| a.len()).unwrap_or(0); + let blen = b.name.as_ref().map(|b| b.len()).unwrap_or(0); + alen.cmp(&blen) + }); + } + + Logger { + directives: mem::replace(&mut self.directives, Vec::new()), + filter: mem::replace(&mut self.filter, None), + format: mem::replace(&mut self.format, Box::new(|_| String::new())), + } + } +} + +impl Logger { + pub fn new() -> Logger { + let mut builder = LogBuilder::new(); + + if let Ok(s) = env::var("RUST_LOG") { + builder.parse(&s); + } + + builder.build() + } + + pub fn filter(&self) -> LogLevelFilter { + self.directives.iter() + .map(|d| d.level).max() + .unwrap_or(LogLevelFilter::Off) + } + + fn enabled(&self, level: LogLevel, target: &str) -> bool { + // Search for the longest match, the vector is assumed to be pre-sorted. + for directive in self.directives.iter().rev() { + match directive.name { + Some(ref name) if !target.starts_with(&**name) => {}, + Some(..) | None => { + return level <= directive.level + } + } + } + false + } +} + +impl Log for Logger { + fn enabled(&self, metadata: &LogMetadata) -> bool { + self.enabled(metadata.level(), metadata.target()) + } + + fn log(&self, record: &LogRecord) { + if !Log::enabled(self, record.metadata()) { + return; + } + + if let Some(filter) = self.filter.as_ref() { + if !filter.is_match(&*record.args().to_string()) { + return; + } + } + + let _ = writeln!(&mut io::stderr(), "{}", (self.format)(record)); + } +} + +struct LogDirective { + name: Option, + level: LogLevelFilter, +} + +/// Initializes the global logger with an env logger. +/// +/// This should be called early in the execution of a Rust program, and the +/// global logger may only be initialized once. Future initialization attempts +/// will return an error. +pub fn init() -> Result<(), SetLoggerError> { + let mut builder = LogBuilder::new(); + + if let Ok(s) = env::var("RUST_LOG") { + builder.parse(&s); + } + + builder.init() +} + +/// Parse a logging specification string (e.g: "crate1,crate2::mod3,crate3::x=error/foo") +/// and return a vector with log directives. +fn parse_logging_spec(spec: &str) -> (Vec, Option) { + let mut dirs = Vec::new(); + + let mut parts = spec.split('/'); + let mods = parts.next(); + let filter = parts.next(); + if parts.next().is_some() { + println!("warning: invalid logging spec '{}', \ + ignoring it (too many '/'s)", spec); + return (dirs, None); + } + mods.map(|m| { for s in m.split(',') { + if s.len() == 0 { continue } + let mut parts = s.split('='); + let (log_level, name) = match (parts.next(), parts.next().map(|s| s.trim()), parts.next()) { + (Some(part0), None, None) => { + // if the single argument is a log-level string or number, + // treat that as a global fallback + match part0.parse() { + Ok(num) => (num, None), + Err(_) => (LogLevelFilter::max(), Some(part0)), + } + } + (Some(part0), Some(""), None) => (LogLevelFilter::max(), Some(part0)), + (Some(part0), Some(part1), None) => { + match part1.parse() { + Ok(num) => (num, Some(part0)), + _ => { + println!("warning: invalid logging spec '{}', \ + ignoring it", part1); + continue + } + } + }, + _ => { + println!("warning: invalid logging spec '{}', \ + ignoring it", s); + continue + } + }; + dirs.push(LogDirective { + name: name.map(|s| s.to_string()), + level: log_level, + }); + }}); + + let filter = filter.map_or(None, |filter| { + match filter::Filter::new(filter) { + Ok(re) => Some(re), + Err(e) => { + println!("warning: invalid regex filter - {}", e); + None + } + } + }); + + return (dirs, filter); +} + +#[cfg(test)] +mod tests { + use log::{LogLevel, LogLevelFilter}; + + use super::{LogBuilder, Logger, LogDirective, parse_logging_spec}; + + fn make_logger(dirs: Vec) -> Logger { + let mut logger = LogBuilder::new().build(); + logger.directives = dirs; + logger + } + + #[test] + fn filter_info() { + let logger = LogBuilder::new().filter(None, LogLevelFilter::Info).build(); + assert!(logger.enabled(LogLevel::Info, "crate1")); + assert!(!logger.enabled(LogLevel::Debug, "crate1")); + } + + #[test] + fn filter_beginning_longest_match() { + let logger = LogBuilder::new() + .filter(Some("crate2"), LogLevelFilter::Info) + .filter(Some("crate2::mod"), LogLevelFilter::Debug) + .filter(Some("crate1::mod1"), LogLevelFilter::Warn) + .build(); + assert!(logger.enabled(LogLevel::Debug, "crate2::mod1")); + assert!(!logger.enabled(LogLevel::Debug, "crate2")); + } + + #[test] + fn parse_default() { + let logger = LogBuilder::new().parse("info,crate1::mod1=warn").build(); + assert!(logger.enabled(LogLevel::Warn, "crate1::mod1")); + assert!(logger.enabled(LogLevel::Info, "crate2::mod2")); + } + + #[test] + fn match_full_path() { + let logger = make_logger(vec![ + LogDirective { + name: Some("crate2".to_string()), + level: LogLevelFilter::Info + }, + LogDirective { + name: Some("crate1::mod1".to_string()), + level: LogLevelFilter::Warn + } + ]); + assert!(logger.enabled(LogLevel::Warn, "crate1::mod1")); + assert!(!logger.enabled(LogLevel::Info, "crate1::mod1")); + assert!(logger.enabled(LogLevel::Info, "crate2")); + assert!(!logger.enabled(LogLevel::Debug, "crate2")); + } + + #[test] + fn no_match() { + let logger = make_logger(vec![ + LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info }, + LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } + ]); + assert!(!logger.enabled(LogLevel::Warn, "crate3")); + } + + #[test] + fn match_beginning() { + let logger = make_logger(vec![ + LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info }, + LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } + ]); + assert!(logger.enabled(LogLevel::Info, "crate2::mod1")); + } + + #[test] + fn match_beginning_longest_match() { + let logger = make_logger(vec![ + LogDirective { name: Some("crate2".to_string()), level: LogLevelFilter::Info }, + LogDirective { name: Some("crate2::mod".to_string()), level: LogLevelFilter::Debug }, + LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } + ]); + assert!(logger.enabled(LogLevel::Debug, "crate2::mod1")); + assert!(!logger.enabled(LogLevel::Debug, "crate2")); + } + + #[test] + fn match_default() { + let logger = make_logger(vec![ + LogDirective { name: None, level: LogLevelFilter::Info }, + LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Warn } + ]); + assert!(logger.enabled(LogLevel::Warn, "crate1::mod1")); + assert!(logger.enabled(LogLevel::Info, "crate2::mod2")); + } + + #[test] + fn zero_level() { + let logger = make_logger(vec![ + LogDirective { name: None, level: LogLevelFilter::Info }, + LogDirective { name: Some("crate1::mod1".to_string()), level: LogLevelFilter::Off } + ]); + assert!(!logger.enabled(LogLevel::Error, "crate1::mod1")); + assert!(logger.enabled(LogLevel::Info, "crate2::mod2")); + } + + #[test] + fn parse_logging_spec_valid() { + let (dirs, filter) = parse_logging_spec("crate1::mod1=error,crate1::mod2,crate2=debug"); + assert_eq!(dirs.len(), 3); + assert_eq!(dirs[0].name, Some("crate1::mod1".to_string())); + assert_eq!(dirs[0].level, LogLevelFilter::Error); + + assert_eq!(dirs[1].name, Some("crate1::mod2".to_string())); + assert_eq!(dirs[1].level, LogLevelFilter::max()); + + assert_eq!(dirs[2].name, Some("crate2".to_string())); + assert_eq!(dirs[2].level, LogLevelFilter::Debug); + assert!(filter.is_none()); + } + + #[test] + fn parse_logging_spec_invalid_crate() { + // test parse_logging_spec with multiple = in specification + let (dirs, filter) = parse_logging_spec("crate1::mod1=warn=info,crate2=debug"); + assert_eq!(dirs.len(), 1); + assert_eq!(dirs[0].name, Some("crate2".to_string())); + assert_eq!(dirs[0].level, LogLevelFilter::Debug); + assert!(filter.is_none()); + } + + #[test] + fn parse_logging_spec_invalid_log_level() { + // test parse_logging_spec with 'noNumber' as log level + let (dirs, filter) = parse_logging_spec("crate1::mod1=noNumber,crate2=debug"); + assert_eq!(dirs.len(), 1); + assert_eq!(dirs[0].name, Some("crate2".to_string())); + assert_eq!(dirs[0].level, LogLevelFilter::Debug); + assert!(filter.is_none()); + } + + #[test] + fn parse_logging_spec_string_log_level() { + // test parse_logging_spec with 'warn' as log level + let (dirs, filter) = parse_logging_spec("crate1::mod1=wrong,crate2=warn"); + assert_eq!(dirs.len(), 1); + assert_eq!(dirs[0].name, Some("crate2".to_string())); + assert_eq!(dirs[0].level, LogLevelFilter::Warn); + assert!(filter.is_none()); + } + + #[test] + fn parse_logging_spec_empty_log_level() { + // test parse_logging_spec with '' as log level + let (dirs, filter) = parse_logging_spec("crate1::mod1=wrong,crate2="); + assert_eq!(dirs.len(), 1); + assert_eq!(dirs[0].name, Some("crate2".to_string())); + assert_eq!(dirs[0].level, LogLevelFilter::max()); + assert!(filter.is_none()); + } + + #[test] + fn parse_logging_spec_global() { + // test parse_logging_spec with no crate + let (dirs, filter) = parse_logging_spec("warn,crate2=debug"); + assert_eq!(dirs.len(), 2); + assert_eq!(dirs[0].name, None); + assert_eq!(dirs[0].level, LogLevelFilter::Warn); + assert_eq!(dirs[1].name, Some("crate2".to_string())); + assert_eq!(dirs[1].level, LogLevelFilter::Debug); + assert!(filter.is_none()); + } + + #[test] + fn parse_logging_spec_valid_filter() { + let (dirs, filter) = parse_logging_spec("crate1::mod1=error,crate1::mod2,crate2=debug/abc"); + assert_eq!(dirs.len(), 3); + assert_eq!(dirs[0].name, Some("crate1::mod1".to_string())); + assert_eq!(dirs[0].level, LogLevelFilter::Error); + + assert_eq!(dirs[1].name, Some("crate1::mod2".to_string())); + assert_eq!(dirs[1].level, LogLevelFilter::max()); + + assert_eq!(dirs[2].name, Some("crate2".to_string())); + assert_eq!(dirs[2].level, LogLevelFilter::Debug); + assert!(filter.is_some() && filter.unwrap().to_string() == "abc"); + } + + #[test] + fn parse_logging_spec_invalid_crate_filter() { + let (dirs, filter) = parse_logging_spec("crate1::mod1=error=warn,crate2=debug/a.c"); + assert_eq!(dirs.len(), 1); + assert_eq!(dirs[0].name, Some("crate2".to_string())); + assert_eq!(dirs[0].level, LogLevelFilter::Debug); + assert!(filter.is_some() && filter.unwrap().to_string() == "a.c"); + } + + #[test] + fn parse_logging_spec_empty_with_filter() { + let (dirs, filter) = parse_logging_spec("crate1/a*c"); + assert_eq!(dirs.len(), 1); + assert_eq!(dirs[0].name, Some("crate1".to_string())); + assert_eq!(dirs[0].level, LogLevelFilter::max()); + assert!(filter.is_some() && filter.unwrap().to_string() == "a*c"); + } +} diff --git a/src/vendor/env_logger/src/regex.rs b/src/vendor/env_logger/src/regex.rs new file mode 100644 index 0000000000000..0df03e673304d --- /dev/null +++ b/src/vendor/env_logger/src/regex.rs @@ -0,0 +1,28 @@ +extern crate regex; + +use std::fmt; + +use self::regex::Regex; + +pub struct Filter { + inner: Regex, +} + +impl Filter { + pub fn new(spec: &str) -> Result { + match Regex::new(spec){ + Ok(r) => Ok(Filter { inner: r }), + Err(e) => Err(e.to_string()), + } + } + + pub fn is_match(&self, s: &str) -> bool { + self.inner.is_match(s) + } +} + +impl fmt::Display for Filter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(f) + } +} diff --git a/src/vendor/env_logger/src/string.rs b/src/vendor/env_logger/src/string.rs new file mode 100644 index 0000000000000..74d0e04dbd6ed --- /dev/null +++ b/src/vendor/env_logger/src/string.rs @@ -0,0 +1,21 @@ +use std::fmt; + +pub struct Filter { + inner: String, +} + +impl Filter { + pub fn new(spec: &str) -> Result { + Ok(Filter { inner: spec.to_string() }) + } + + pub fn is_match(&self, s: &str) -> bool { + s.contains(&self.inner) + } +} + +impl fmt::Display for Filter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.inner.fmt(f) + } +} diff --git a/src/vendor/env_logger/tests/regexp_filter.rs b/src/vendor/env_logger/tests/regexp_filter.rs new file mode 100644 index 0000000000000..5036fb8e3c9c3 --- /dev/null +++ b/src/vendor/env_logger/tests/regexp_filter.rs @@ -0,0 +1,51 @@ +#[macro_use] extern crate log; +extern crate env_logger; + +use std::process; +use std::env; +use std::str; + +fn main() { + if env::var("LOG_REGEXP_TEST").ok() == Some(String::from("1")) { + child_main(); + } else { + parent_main() + } +} + +fn child_main() { + env_logger::init().unwrap(); + info!("XYZ Message"); +} + +fn run_child(rust_log: String) -> bool { + let exe = env::current_exe().unwrap(); + let out = process::Command::new(exe) + .env("LOG_REGEXP_TEST", "1") + .env("RUST_LOG", rust_log) + .output() + .unwrap_or_else(|e| panic!("Unable to start child process: {}", e)); + str::from_utf8(out.stderr.as_ref()).unwrap().contains("XYZ Message") +} + +fn assert_message_printed(rust_log: &str) { + if !run_child(rust_log.to_string()) { + panic!("RUST_LOG={} should allow the test log message", rust_log) + } +} + +fn assert_message_not_printed(rust_log: &str) { + if run_child(rust_log.to_string()) { + panic!("RUST_LOG={} should not allow the test log message", rust_log) + } +} + +fn parent_main() { + // test normal log severity levels + assert_message_printed("info"); + assert_message_not_printed("warn"); + + // test of regular expression filters + assert_message_printed("info/XYZ"); + assert_message_not_printed("info/XXX"); +} diff --git a/src/vendor/filetime/.cargo-checksum.json b/src/vendor/filetime/.cargo-checksum.json new file mode 100644 index 0000000000000..674ae31b296ba --- /dev/null +++ b/src/vendor/filetime/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"c8cfe2c700e7b1d6500d0ad8084694be7009095e9572aaf54bf695c1fe7822d6","Cargo.toml":"4e414fe72ef2afcae81fb5a89f39e59ec40844272b589381746623f612333305","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"fef1998633eb2f460e6b12bc1133a21f5674e0b53ae5914ba1e53f1b63a185c3","appveyor.yml":"da991211b72fa6f231af7adb84c9fb72f5a9131d1c0a3d47b8ceffe5a82c8542","src/lib.rs":"8fa03e69ab113e5a30c742f60b6beddc0b77ef41a1eb45e82f9df867c9265815"},"package":"5363ab8e4139b8568a6237db5248646e5a8a2f89bd5ccb02092182b11fd3e922"} \ No newline at end of file diff --git a/src/vendor/filetime/.cargo-ok b/src/vendor/filetime/.cargo-ok new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/src/vendor/filetime/.gitignore b/src/vendor/filetime/.gitignore new file mode 100644 index 0000000000000..a9d37c560c6ab --- /dev/null +++ b/src/vendor/filetime/.gitignore @@ -0,0 +1,2 @@ +target +Cargo.lock diff --git a/src/vendor/filetime/.travis.yml b/src/vendor/filetime/.travis.yml new file mode 100644 index 0000000000000..001cdd259ecf3 --- /dev/null +++ b/src/vendor/filetime/.travis.yml @@ -0,0 +1,26 @@ +language: rust +rust: + - stable + - beta + - nightly +sudo: false +script: + - cargo build --verbose + - cargo test --verbose + - cargo doc --no-deps +after_success: | + [ $TRAVIS_BRANCH = master ] && + [ $TRAVIS_PULL_REQUEST = false ] && + echo '' > target/doc/index.html && + pip install ghp-import --user $USER && + $HOME/.local/bin/ghp-import -n target/doc && + git push -qf https://${TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages +notifications: + email: + on_success: never +env: + global: + secure: dsIj09BQvGF872zKmqzG+WwCl7gfqwsnxcm3GZlAMgyLYm4juvHOwCRhIERCN3BCxPvdlSRKhe9Rwmp1RkiKuqTK3ITUTAy29Maf2vuL1T+zcdpZE0t6JSCU1gbEwzCA2foB1jzgy7Q47EzeJusmGNwibscjYmXKlH6JCFwTobM= +os: + - linux + - osx diff --git a/src/vendor/filetime/Cargo.toml b/src/vendor/filetime/Cargo.toml new file mode 100644 index 0000000000000..971eaf601469d --- /dev/null +++ b/src/vendor/filetime/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "filetime" +authors = ["Alex Crichton "] +version = "0.1.10" +license = "MIT/Apache-2.0" +readme = "README.md" +keywords = ["timestamp", "mtime"] +repository = "https://github.com/alexcrichton/filetime" +homepage = "https://github.com/alexcrichton/filetime" +documentation = "http://alexcrichton.com/filetime" +description = """ +Platform-agnostic accessors of timestamps in File metadata +""" + +[dependencies] +libc = "0.2" + +[dev-dependencies] +tempdir = "0.3" diff --git a/src/vendor/filetime/LICENSE-APACHE b/src/vendor/filetime/LICENSE-APACHE new file mode 100644 index 0000000000000..16fe87b06e802 --- /dev/null +++ b/src/vendor/filetime/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/src/vendor/filetime/LICENSE-MIT b/src/vendor/filetime/LICENSE-MIT new file mode 100644 index 0000000000000..39e0ed6602151 --- /dev/null +++ b/src/vendor/filetime/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 Alex Crichton + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/filetime/README.md b/src/vendor/filetime/README.md new file mode 100644 index 0000000000000..0422084e7e206 --- /dev/null +++ b/src/vendor/filetime/README.md @@ -0,0 +1,25 @@ +# filetime + +[![Build Status](https://travis-ci.org/alexcrichton/filetime.svg?branch=master)](https://travis-ci.org/alexcrichton/filetime) +[![Build status](https://ci.appveyor.com/api/projects/status/9tatexq47i3ee13k?svg=true)](https://ci.appveyor.com/project/alexcrichton/filetime) + +[Documentation](http://alexcrichton.com/filetime/filetime/index.html) + +A helper library for inspecting the various timestamps of files in Rust. This +library takes into account cross-platform differences in terms of where the +timestamps are located, what they are called, and how to convert them into a +platform-independent representation. + +```toml +# Cargo.toml +[dependencies] +filetime = "0.1" +``` + +# License + +`filetime` is primarily distributed under the terms of both the MIT license and +the Apache License (Version 2.0), with portions covered by various BSD-like +licenses. + +See LICENSE-APACHE, and LICENSE-MIT for details. diff --git a/src/vendor/filetime/appveyor.yml b/src/vendor/filetime/appveyor.yml new file mode 100644 index 0000000000000..6a1b8dc19c039 --- /dev/null +++ b/src/vendor/filetime/appveyor.yml @@ -0,0 +1,17 @@ +environment: + matrix: + - TARGET: x86_64-pc-windows-msvc + - TARGET: i686-pc-windows-msvc + - TARGET: i686-pc-windows-gnu +install: + - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" + - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" + - SET PATH=%PATH%;C:\Program Files (x86)\Rust\bin + - SET PATH=%PATH%;C:\MinGW\bin + - rustc -V + - cargo -V + +build: false + +test_script: + - cargo test --verbose diff --git a/src/vendor/filetime/src/lib.rs b/src/vendor/filetime/src/lib.rs new file mode 100644 index 0000000000000..aa6bec1dfefe3 --- /dev/null +++ b/src/vendor/filetime/src/lib.rs @@ -0,0 +1,305 @@ +//! Timestamps for files in Rust +//! +//! This library provides platform-agnostic inspection of the various timestamps +//! present in the standard `fs::Metadata` structure. +//! +//! # Installation +//! +//! Add this to you `Cargo.toml`: +//! +//! ```toml +//! [dependencies] +//! filetime = "0.1" +//! ``` +//! +//! # Usage +//! +//! ```no_run +//! use std::fs; +//! use filetime::FileTime; +//! +//! let metadata = fs::metadata("foo.txt").unwrap(); +//! +//! let mtime = FileTime::from_last_modification_time(&metadata); +//! println!("{}", mtime); +//! +//! let atime = FileTime::from_last_access_time(&metadata); +//! assert!(mtime < atime); +//! +//! // Inspect values that can be interpreted across platforms +//! println!("{}", mtime.seconds_relative_to_1970()); +//! println!("{}", mtime.nanoseconds()); +//! +//! // Print the platform-specific value of seconds +//! println!("{}", mtime.seconds()); +//! ``` + +extern crate libc; + +#[cfg(unix)] use std::os::unix::prelude::*; +#[cfg(windows)] use std::os::windows::prelude::*; + +use std::fmt; +use std::fs; +use std::io; +use std::path::Path; + +/// A helper structure to represent a timestamp for a file. +/// +/// The actual value contined within is platform-specific and does not have the +/// same meaning across platforms, but comparisons and stringification can be +/// significant among the same platform. +#[derive(Eq, PartialEq, Ord, PartialOrd, Debug, Copy, Clone, Hash)] +pub struct FileTime { + seconds: u64, + nanos: u32, +} + +impl FileTime { + /// Creates a new timestamp representing a 0 time. + /// + /// Useful for creating the base of a cmp::max chain of times. + pub fn zero() -> FileTime { + FileTime { seconds: 0, nanos: 0 } + } + + /// Creates a new instance of `FileTime` with a number of seconds and + /// nanoseconds relative to January 1, 1970. + /// + /// Note that this is typically the relative point that Unix time stamps are + /// from, but on Windows the native time stamp is relative to January 1, + /// 1601 so the return value of `seconds` from the returned `FileTime` + /// instance may not be the same as that passed in. + pub fn from_seconds_since_1970(seconds: u64, nanos: u32) -> FileTime { + FileTime { + seconds: seconds + if cfg!(windows) {11644473600} else {0}, + nanos: nanos, + } + } + + /// Creates a new timestamp from the last modification time listed in the + /// specified metadata. + /// + /// The returned value corresponds to the `mtime` field of `stat` on Unix + /// platforms and the `ftLastWriteTime` field on Windows platforms. + pub fn from_last_modification_time(meta: &fs::Metadata) -> FileTime { + #[cfg(unix)] + fn imp(meta: &fs::Metadata) -> FileTime { + FileTime::from_os_repr(meta.mtime() as u64, meta.mtime_nsec() as u32) + } + #[cfg(windows)] + fn imp(meta: &fs::Metadata) -> FileTime { + FileTime::from_os_repr(meta.last_write_time()) + } + imp(meta) + } + + /// Creates a new timestamp from the last access time listed in the + /// specified metadata. + /// + /// The returned value corresponds to the `atime` field of `stat` on Unix + /// platforms and the `ftLastAccessTime` field on Windows platforms. + pub fn from_last_access_time(meta: &fs::Metadata) -> FileTime { + #[cfg(unix)] + fn imp(meta: &fs::Metadata) -> FileTime { + FileTime::from_os_repr(meta.atime() as u64, meta.atime_nsec() as u32) + } + #[cfg(windows)] + fn imp(meta: &fs::Metadata) -> FileTime { + FileTime::from_os_repr(meta.last_access_time()) + } + imp(meta) + } + + /// Creates a new timestamp from the creation time listed in the specified + /// metadata. + /// + /// The returned value corresponds to the `birthtime` field of `stat` on + /// Unix platforms and the `ftCreationTime` field on Windows platforms. Note + /// that not all Unix platforms have this field available and may return + /// `None` in some circumstances. + pub fn from_creation_time(meta: &fs::Metadata) -> Option { + macro_rules! birthtim { + ($(($e:expr, $i:ident)),*) => { + #[cfg(any($(target_os = $e),*))] + fn imp(meta: &fs::Metadata) -> Option { + $( + #[cfg(target_os = $e)] + use std::os::$i::fs::MetadataExt; + )* + let raw = meta.as_raw_stat(); + Some(FileTime::from_os_repr(raw.st_birthtime as u64, + raw.st_birthtime_nsec as u32)) + } + + #[cfg(all(not(windows), + $(not(target_os = $e)),*))] + fn imp(_meta: &fs::Metadata) -> Option { + None + } + } + } + + birthtim! { + ("bitrig", bitrig), + ("freebsd", freebsd), + ("ios", ios), + ("macos", macos), + ("openbsd", openbsd) + } + + #[cfg(windows)] + fn imp(meta: &fs::Metadata) -> Option { + Some(FileTime::from_os_repr(meta.last_access_time())) + } + imp(meta) + } + + #[cfg(windows)] + fn from_os_repr(time: u64) -> FileTime { + // Windows write times are in 100ns intervals, so do a little math to + // get it into the right representation. + FileTime { + seconds: time / (1_000_000_000 / 100), + nanos: ((time % (1_000_000_000 / 100)) * 100) as u32, + } + } + + #[cfg(unix)] + fn from_os_repr(seconds: u64, nanos: u32) -> FileTime { + FileTime { seconds: seconds, nanos: nanos } + } + + /// Returns the whole number of seconds represented by this timestamp. + /// + /// Note that this value's meaning is **platform specific**. On Unix + /// platform time stamps are typically relative to January 1, 1970, but on + /// Windows platforms time stamps are relative to January 1, 1601. + pub fn seconds(&self) -> u64 { self.seconds } + + /// Returns the whole number of seconds represented by this timestamp, + /// relative to the Unix epoch start of January 1, 1970. + /// + /// Note that this does not return the same value as `seconds` for Windows + /// platforms as seconds are relative to a different date there. + pub fn seconds_relative_to_1970(&self) -> u64 { + self.seconds - if cfg!(windows) {11644473600} else {0} + } + + /// Returns the nanosecond precision of this timestamp. + /// + /// The returned value is always less than one billion and represents a + /// portion of a second forward from the seconds returned by the `seconds` + /// method. + pub fn nanoseconds(&self) -> u32 { self.nanos } +} + +impl fmt::Display for FileTime { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}.{:09}s", self.seconds, self.nanos) + } +} + +/// Set the last access and modification times for a file on the filesystem. +/// +/// This function will set the `atime` and `mtime` metadata fields for a file +/// on the local filesystem, returning any error encountered. +pub fn set_file_times

(p: P, atime: FileTime, mtime: FileTime) + -> io::Result<()> where P: AsRef { + set_file_times_(p.as_ref(), atime, mtime) +} + +#[cfg(unix)] +fn set_file_times_(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + use std::ffi::CString; + use libc::{timeval, time_t, suseconds_t, utimes}; + + let times = [to_timeval(&atime), to_timeval(&mtime)]; + let p = try!(CString::new(p.as_os_str().as_bytes())); + return unsafe { + if utimes(p.as_ptr() as *const _, times.as_ptr()) == 0 { + Ok(()) + } else { + Err(io::Error::last_os_error()) + } + }; + + fn to_timeval(ft: &FileTime) -> timeval { + timeval { + tv_sec: ft.seconds() as time_t, + tv_usec: (ft.nanoseconds() / 1000) as suseconds_t, + } + } +} + +#[cfg(windows)] +#[allow(bad_style)] +fn set_file_times_(p: &Path, atime: FileTime, mtime: FileTime) -> io::Result<()> { + use std::fs::OpenOptions; + + type BOOL = i32; + type HANDLE = *mut u8; + type DWORD = u32; + #[repr(C)] + struct FILETIME { + dwLowDateTime: u32, + dwHighDateTime: u32, + } + extern "system" { + fn SetFileTime(hFile: HANDLE, + lpCreationTime: *const FILETIME, + lpLastAccessTime: *const FILETIME, + lpLastWriteTime: *const FILETIME) -> BOOL; + } + + let f = try!(OpenOptions::new().write(true).open(p)); + let atime = to_filetime(&atime); + let mtime = to_filetime(&mtime); + return unsafe { + let ret = SetFileTime(f.as_raw_handle() as *mut _, + 0 as *const _, + &atime, &mtime); + if ret != 0 { + Ok(()) + } else { + Err(io::Error::last_os_error()) + } + }; + + fn to_filetime(ft: &FileTime) -> FILETIME { + let intervals = ft.seconds() * (1_000_000_000 / 100) + + ((ft.nanoseconds() as u64) / 100); + FILETIME { + dwLowDateTime: intervals as DWORD, + dwHighDateTime: (intervals >> 32) as DWORD, + } + } +} + +#[cfg(test)] +mod tests { + extern crate tempdir; + + use std::fs::{self, File}; + use self::tempdir::TempDir; + use super::{FileTime, set_file_times}; + + #[test] + fn set_file_times_test() { + let td = TempDir::new("filetime").unwrap(); + let path = td.path().join("foo.txt"); + File::create(&path).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + let atime = FileTime::from_last_access_time(&metadata); + set_file_times(&path, atime, mtime).unwrap(); + + let new_mtime = FileTime::from_seconds_since_1970(10_000, 0); + set_file_times(&path, atime, new_mtime).unwrap(); + + let metadata = fs::metadata(&path).unwrap(); + let mtime = FileTime::from_last_modification_time(&metadata); + assert_eq!(mtime, new_mtime); + } +} diff --git a/src/vendor/gcc/.cargo-checksum.json b/src/vendor/gcc/.cargo-checksum.json new file mode 100644 index 0000000000000..efe1ebb7d44c3 --- /dev/null +++ b/src/vendor/gcc/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"f9b1ca6ae27d1c18215265024629a8960c31379f206d9ed20f64e0b2dcf79805",".travis.yml":"5cee7774cf6d876246a0ae0f8362cceeecec5924b751049c945faac9342565ff","Cargo.toml":"2634dedd87889b33a794e31b41a8d8d4713ef40382be3d464229707679bd83da","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"ecb2d93f4c81edbd48d8742ff7887dc0a4530a5890967839090bbc972d49bebe","appveyor.yml":"46c77d913eaa45871296942c2cd96ef092c9dcaf19201cb5c500a5107faeb06f","src/bin/gcc-shim.rs":"11edfe1fc6f932bd42ffffda5145833302bc163e0b87dc0d54f4bd0997ad4708","src/lib.rs":"5eb0e311367226ed0420f5e2dac10cc35fc0a3be639a612b6e8ea6d24f646634","src/registry.rs":"3e2a42581ebb82e325dd5600c6571cef937b35003b2927dc618967f5238a2058","src/windows_registry.rs":"906653c020ffe9d572e435f3fc3a8892d9e0a13240ba297db01ce0a288e08cdb","tests/cc_env.rs":"d92c5e3d3d43ac244e63b2cd2c93a521fcf124bf1ccf8d4c6bfa7f8333d88976","tests/support/mod.rs":"d11ed0db4dda5ecf5fb970c9b0c56428cd47421a2742f07032e2cc6b0a0f07e2","tests/test.rs":"164220f11be2eebc20315826513999970660a82feff8cc4b15b4e9d73d98324e"},"package":"553f11439bdefe755bf366b264820f1da70f3aaf3924e594b886beb9c831bcf5"} \ No newline at end of file diff --git a/src/vendor/gcc/.cargo-ok b/src/vendor/gcc/.cargo-ok new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/src/vendor/gcc/.gitignore b/src/vendor/gcc/.gitignore new file mode 100644 index 0000000000000..a9d37c560c6ab --- /dev/null +++ b/src/vendor/gcc/.gitignore @@ -0,0 +1,2 @@ +target +Cargo.lock diff --git a/src/vendor/gcc/.travis.yml b/src/vendor/gcc/.travis.yml new file mode 100644 index 0000000000000..6b508b9d8bc97 --- /dev/null +++ b/src/vendor/gcc/.travis.yml @@ -0,0 +1,40 @@ +language: rust +rust: + - stable + - beta + - nightly +sudo: false +install: + - if [ "$TRAVIS_OS_NAME" = "linux" ]; then OS=unknown-linux-gnu; else OS=apple-darwin; fi + - export TARGET=$ARCH-$OS + - curl https://static.rust-lang.org/rustup.sh | + sh -s -- --add-target=$TARGET --disable-sudo -y --prefix=`rustc --print sysroot` +before_script: + - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH +script: + - cargo build --verbose + - cargo test --verbose + - cargo test --verbose --features parallel + - cargo test --manifest-path gcc-test/Cargo.toml --target $TARGET + - cargo test --manifest-path gcc-test/Cargo.toml --target $TARGET --features parallel + - cargo test --manifest-path gcc-test/Cargo.toml --target $TARGET --release + - cargo doc + - rustdoc --test README.md -L target/debug -L target/debug/deps +after_success: + - travis-cargo --only nightly doc-upload +env: + global: + secure: ilbcq9zX+UaiBcwqkBGldeanbEQus9npLsi0/nF1PUxKbQsoWSVtVOehAD8Hy92D3hX2npIRyNL8GxBn85XEcBYc1h7DiWUhLcXfZie79v8Ly/qboHCfZLXlB1ofbypbyQfouEdOE9zHf0ZILYVpAgUkliv6KuVShsrKNlbn4QE= + matrix: + - ARCH=x86_64 + - ARCH=i686 +notifications: + email: + on_success: never +os: + - linux + - osx +addons: + apt: + packages: + - g++-multilib diff --git a/src/vendor/gcc/Cargo.toml b/src/vendor/gcc/Cargo.toml new file mode 100644 index 0000000000000..fd51ce0e9f457 --- /dev/null +++ b/src/vendor/gcc/Cargo.toml @@ -0,0 +1,23 @@ +[package] + +name = "gcc" +version = "0.3.38" +authors = ["Alex Crichton "] +license = "MIT/Apache-2.0" +repository = "https://github.com/alexcrichton/gcc-rs" +documentation = "http://alexcrichton.com/gcc-rs" +description = """ +A build-time dependency for Cargo build scripts to assist in invoking the native +C compiler to compile native C code into a static archive to be linked into Rust +code. +""" +keywords = ["build-dependencies"] + +[dependencies] +rayon = { version = "0.4", optional = true } + +[features] +parallel = ["rayon"] + +[dev-dependencies] +tempdir = "0.3" diff --git a/src/vendor/gcc/LICENSE-APACHE b/src/vendor/gcc/LICENSE-APACHE new file mode 100644 index 0000000000000..16fe87b06e802 --- /dev/null +++ b/src/vendor/gcc/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/src/vendor/gcc/LICENSE-MIT b/src/vendor/gcc/LICENSE-MIT new file mode 100644 index 0000000000000..39e0ed6602151 --- /dev/null +++ b/src/vendor/gcc/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 Alex Crichton + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/gcc/README.md b/src/vendor/gcc/README.md new file mode 100644 index 0000000000000..ecc79c6735266 --- /dev/null +++ b/src/vendor/gcc/README.md @@ -0,0 +1,161 @@ +# gcc-rs + +A library to compile C/C++ code into a Rust library/application. + +[![Build Status](https://travis-ci.org/alexcrichton/gcc-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/gcc-rs) +[![Build status](https://ci.appveyor.com/api/projects/status/onu270iw98h81nwv?svg=true)](https://ci.appveyor.com/project/alexcrichton/gcc-rs) + +[Documentation](http://alexcrichton.com/gcc-rs) + +A simple library meant to be used as a build dependency with Cargo packages in +order to build a set of C/C++ files into a static archive. Note that while this +crate is called "gcc", it actually calls out to the most relevant compile for +a platform, for example using `cl` on MSVC. That is, this crate does indeed work +on MSVC! + +## Using gcc-rs + +First, you'll want to both add a build script for your crate (`build.rs`) and +also add this crate to your `Cargo.toml` via: + +```toml +[package] +# ... +build = "build.rs" + +[build-dependencies] +gcc = "0.3" +``` + +Next up, you'll want to write a build script like so: + +```rust,no_run +// build.rs + +extern crate gcc; + +fn main() { + gcc::compile_library("libfoo.a", &["foo.c", "bar.c"]); +} +``` + +And that's it! Running `cargo build` should take care of the rest and your Rust +application will now have the C files `foo.c` and `bar.c` compiled into it. You +can call the functions in Rust by declaring functions in your Rust code like so: + +``` +extern { + fn foo_function(); + fn bar_function(); +} + +pub fn call() { + unsafe { + foo_function(); + bar_function(); + } +} + +fn main() { + // ... +} +``` + +## External configuration via environment variables + +To control the programs and flags used for building, the builder can set a +number of different environment variables. + +* `CFLAGS` - a series of space separated flags passed to "gcc". Note that + individual flags cannot currently contain spaces, so doing + something like: "-L=foo\ bar" is not possible. +* `CC` - the actual C compiler used. Note that this is used as an exact + executable name, so (for example) no extra flags can be passed inside + this variable, and the builder must ensure that there aren't any + trailing spaces. This compiler must understand the `-c` flag. For + certain `TARGET`s, it also is assumed to know about other flags (most + common is `-fPIC`). +* `AR` - the `ar` (archiver) executable to use to build the static library. + +Each of these variables can also be supplied with certain prefixes and suffixes, +in the following prioritized order: + +1. `_` - for example, `CC_x86_64-unknown-linux-gnu` +2. `_` - for example, `CC_x86_64_unknown_linux_gnu` +3. `_` - for example, `HOST_CC` or `TARGET_CFLAGS` +4. `` - a plain `CC`, `AR` as above. + +If none of these variables exist, gcc-rs uses built-in defaults + +In addition to the the above optional environment variables, `gcc-rs` has some +functions with hard requirements on some variables supplied by [cargo's +build-script driver][cargo] that it has the `TARGET`, `OUT_DIR`, `OPT_LEVEL`, +and `HOST` variables. + +[cargo]: http://doc.crates.io/build-script.html#inputs-to-the-build-script + +## Optional features + +Currently gcc-rs supports parallel compilation (think `make -jN`) but this +feature is turned off by default. To enable gcc-rs to compile C/C++ in parallel, +you can change your dependency to: + +```toml +[build-dependencies] +gcc = { version = "0.3", features = ["parallel"] } +``` + +By default gcc-rs will limit parallelism to `$NUM_JOBS`, or if not present it +will limit it to the number of cpus on the machine. + +## Compile-time Requirements + +To work properly this crate needs access to a C compiler when the build script +is being run. This crate does not ship a C compiler with it. The compiler +required varies per platform, but there are three broad categories: + +* Unix platforms require `cc` to be the C compiler. This can be found by + installing gcc/clang on Linux distributions and Xcode on OSX, for example. +* Windows platforms targeting MSVC (e.g. your target triple ends in `-msvc`) + require `cl.exe` to be available and in `PATH`. This is typically found in + standard Visual Studio installations and the `PATH` can be set up by running + the appropriate developer tools shell. +* Windows platforms targeting MinGW (e.g. your target triple ends in `-gnu`) + require `gcc` to be available in `PATH`. We recommend the + [MinGW-w64](http://mingw-w64.org) distribution, which is using the + [Win-builds](http://win-builds.org) installation system. + You may also acquire it via + [MSYS2](http://msys2.github.io), as explained [here][msys2-help]. Make sure + to install the appropriate architecture corresponding to your installation of + rustc. GCC from older [MinGW](http://www.mingw.org) project is compatible + only with 32-bit rust compiler. + +[msys2-help]: http://github.com/rust-lang/rust#building-on-windows + +## C++ support + +`gcc-rs` supports C++ libraries compilation by using the `cpp` method on +`Config`: + +```rust,no_run +extern crate gcc; + +fn main() { + gcc::Config::new() + .cpp(true) // Switch to C++ library compilation. + .file("foo.cpp") + .compile("libfoo.a"); +} +``` + +When using C++ library compilation switch, the `CXX` and `CXXFLAGS` env +variables are used instead of `CC` and `CFLAGS` and the C++ standard library is +linked to the crate target. + +## License + +`gcc-rs` is primarily distributed under the terms of both the MIT license and +the Apache License (Version 2.0), with portions covered by various BSD-like +licenses. + +See LICENSE-APACHE, and LICENSE-MIT for details. diff --git a/src/vendor/gcc/appveyor.yml b/src/vendor/gcc/appveyor.yml new file mode 100644 index 0000000000000..f6108c66514e9 --- /dev/null +++ b/src/vendor/gcc/appveyor.yml @@ -0,0 +1,35 @@ +environment: + matrix: + - TARGET: x86_64-pc-windows-msvc + ARCH: amd64 + VS: C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat + - TARGET: x86_64-pc-windows-msvc + ARCH: amd64 + VS: C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat + - TARGET: i686-pc-windows-msvc + ARCH: x86 + VS: C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat + - TARGET: i686-pc-windows-msvc + ARCH: x86 + VS: C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat + - TARGET: x86_64-pc-windows-gnu + MSYS_BITS: 64 + - TARGET: i686-pc-windows-gnu + MSYS_BITS: 32 +install: + - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" + - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" + - if defined VS call "%VS%" %ARCH% + - set PATH=%PATH%;C:\Program Files (x86)\Rust\bin + - if defined MSYS_BITS set PATH=%PATH%;C:\msys64\mingw%MSYS_BITS%\bin + - rustc -V + - cargo -V + +build: false + +test_script: + - cargo test --target %TARGET% + - cargo test --features parallel --target %TARGET% + - cargo test --manifest-path gcc-test/Cargo.toml --target %TARGET% + - cargo test --manifest-path gcc-test/Cargo.toml --features parallel --target %TARGET% + - cargo test --manifest-path gcc-test/Cargo.toml --release --target %TARGET% diff --git a/src/vendor/gcc/src/bin/gcc-shim.rs b/src/vendor/gcc/src/bin/gcc-shim.rs new file mode 100644 index 0000000000000..43fd811d3615b --- /dev/null +++ b/src/vendor/gcc/src/bin/gcc-shim.rs @@ -0,0 +1,23 @@ +#![cfg_attr(test, allow(dead_code))] + +use std::env; +use std::fs::File; +use std::io::prelude::*; +use std::path::PathBuf; + +fn main() { + let out_dir = PathBuf::from(env::var_os("GCCTEST_OUT_DIR").unwrap()); + for i in 0.. { + let candidate = out_dir.join(format!("out{}", i)); + if candidate.exists() { + continue + } + let mut f = File::create(candidate).unwrap(); + for arg in env::args().skip(1) { + writeln!(f, "{}", arg).unwrap(); + } + + File::create(out_dir.join("libfoo.a")).unwrap(); + break + } +} diff --git a/src/vendor/gcc/src/lib.rs b/src/vendor/gcc/src/lib.rs new file mode 100644 index 0000000000000..f319e9313ad7b --- /dev/null +++ b/src/vendor/gcc/src/lib.rs @@ -0,0 +1,959 @@ +//! A library for build scripts to compile custom C code +//! +//! This library is intended to be used as a `build-dependencies` entry in +//! `Cargo.toml`: +//! +//! ```toml +//! [build-dependencies] +//! gcc = "0.3" +//! ``` +//! +//! The purpose of this crate is to provide the utility functions necessary to +//! compile C code into a static archive which is then linked into a Rust crate. +//! The top-level `compile_library` function serves as a convenience and more +//! advanced configuration is available through the `Config` builder. +//! +//! This crate will automatically detect situations such as cross compilation or +//! other environment variables set by Cargo and will build code appropriately. +//! +//! # Examples +//! +//! Use the default configuration: +//! +//! ```no_run +//! extern crate gcc; +//! +//! fn main() { +//! gcc::compile_library("libfoo.a", &["src/foo.c"]); +//! } +//! ``` +//! +//! Use more advanced configuration: +//! +//! ```no_run +//! extern crate gcc; +//! +//! fn main() { +//! gcc::Config::new() +//! .file("src/foo.c") +//! .define("FOO", Some("bar")) +//! .include("src") +//! .compile("libfoo.a"); +//! } +//! ``` + +#![doc(html_root_url = "http://alexcrichton.com/gcc-rs")] +#![cfg_attr(test, deny(warnings))] +#![deny(missing_docs)] + +#[cfg(feature = "parallel")] +extern crate rayon; + +use std::env; +use std::ffi::{OsString, OsStr}; +use std::fs; +use std::io; +use std::path::{PathBuf, Path}; +use std::process::{Command, Stdio}; +use std::io::{BufReader, BufRead, Write}; + +#[cfg(windows)] +mod registry; +pub mod windows_registry; + +/// Extra configuration to pass to gcc. +pub struct Config { + include_directories: Vec, + definitions: Vec<(String, Option)>, + objects: Vec, + flags: Vec, + files: Vec, + cpp: bool, + cpp_link_stdlib: Option>, + cpp_set_stdlib: Option, + target: Option, + host: Option, + out_dir: Option, + opt_level: Option, + debug: Option, + env: Vec<(OsString, OsString)>, + compiler: Option, + archiver: Option, + cargo_metadata: bool, + pic: Option, +} + +/// Configuration used to represent an invocation of a C compiler. +/// +/// This can be used to figure out what compiler is in use, what the arguments +/// to it are, and what the environment variables look like for the compiler. +/// This can be used to further configure other build systems (e.g. forward +/// along CC and/or CFLAGS) or the `to_command` method can be used to run the +/// compiler itself. +pub struct Tool { + path: PathBuf, + args: Vec, + env: Vec<(OsString, OsString)>, +} + +/// Compile a library from the given set of input C files. +/// +/// This will simply compile all files into object files and then assemble them +/// into the output. This will read the standard environment variables to detect +/// cross compilations and such. +/// +/// This function will also print all metadata on standard output for Cargo. +/// +/// # Example +/// +/// ```no_run +/// gcc::compile_library("libfoo.a", &["foo.c", "bar.c"]); +/// ``` +pub fn compile_library(output: &str, files: &[&str]) { + let mut c = Config::new(); + for f in files.iter() { + c.file(*f); + } + c.compile(output) +} + +impl Config { + /// Construct a new instance of a blank set of configuration. + /// + /// This builder is finished with the `compile` function. + pub fn new() -> Config { + Config { + include_directories: Vec::new(), + definitions: Vec::new(), + objects: Vec::new(), + flags: Vec::new(), + files: Vec::new(), + cpp: false, + cpp_link_stdlib: None, + cpp_set_stdlib: None, + target: None, + host: None, + out_dir: None, + opt_level: None, + debug: None, + env: Vec::new(), + compiler: None, + archiver: None, + cargo_metadata: true, + pic: None, + } + } + + /// Add a directory to the `-I` or include path for headers + pub fn include>(&mut self, dir: P) -> &mut Config { + self.include_directories.push(dir.as_ref().to_path_buf()); + self + } + + /// Specify a `-D` variable with an optional value. + pub fn define(&mut self, var: &str, val: Option<&str>) -> &mut Config { + self.definitions.push((var.to_string(), val.map(|s| s.to_string()))); + self + } + + /// Add an arbitrary object file to link in + pub fn object>(&mut self, obj: P) -> &mut Config { + self.objects.push(obj.as_ref().to_path_buf()); + self + } + + /// Add an arbitrary flag to the invocation of the compiler + pub fn flag(&mut self, flag: &str) -> &mut Config { + self.flags.push(flag.to_string()); + self + } + + /// Add a file which will be compiled + pub fn file>(&mut self, p: P) -> &mut Config { + self.files.push(p.as_ref().to_path_buf()); + self + } + + /// Set C++ support. + /// + /// The other `cpp_*` options will only become active if this is set to + /// `true`. + pub fn cpp(&mut self, cpp: bool) -> &mut Config { + self.cpp = cpp; + self + } + + /// Set the standard library to link against when compiling with C++ + /// support. + /// + /// The default value of this property depends on the current target: On + /// OS X `Some("c++")` is used, when compiling for a Visual Studio based + /// target `None` is used and for other targets `Some("stdc++")` is used. + /// + /// A value of `None` indicates that no automatic linking should happen, + /// otherwise cargo will link against the specified library. + /// + /// The given library name must not contain the `lib` prefix. + pub fn cpp_link_stdlib(&mut self, cpp_link_stdlib: Option<&str>) + -> &mut Config { + self.cpp_link_stdlib = Some(cpp_link_stdlib.map(|s| s.into())); + self + } + + /// Force the C++ compiler to use the specified standard library. + /// + /// Setting this option will automatically set `cpp_link_stdlib` to the same + /// value. + /// + /// The default value of this option is always `None`. + /// + /// This option has no effect when compiling for a Visual Studio based + /// target. + /// + /// This option sets the `-stdlib` flag, which is only supported by some + /// compilers (clang, icc) but not by others (gcc). The library will not + /// detect which compiler is used, as such it is the responsibility of the + /// caller to ensure that this option is only used in conjuction with a + /// compiler which supports the `-stdlib` flag. + /// + /// A value of `None` indicates that no specific C++ standard library should + /// be used, otherwise `-stdlib` is added to the compile invocation. + /// + /// The given library name must not contain the `lib` prefix. + pub fn cpp_set_stdlib(&mut self, cpp_set_stdlib: Option<&str>) + -> &mut Config { + self.cpp_set_stdlib = cpp_set_stdlib.map(|s| s.into()); + self.cpp_link_stdlib(cpp_set_stdlib); + self + } + + /// Configures the target this configuration will be compiling for. + /// + /// This option is automatically scraped from the `TARGET` environment + /// variable by build scripts, so it's not required to call this function. + pub fn target(&mut self, target: &str) -> &mut Config { + self.target = Some(target.to_string()); + self + } + + /// Configures the host assumed by this configuration. + /// + /// This option is automatically scraped from the `HOST` environment + /// variable by build scripts, so it's not required to call this function. + pub fn host(&mut self, host: &str) -> &mut Config { + self.host = Some(host.to_string()); + self + } + + /// Configures the optimization level of the generated object files. + /// + /// This option is automatically scraped from the `OPT_LEVEL` environment + /// variable by build scripts, so it's not required to call this function. + pub fn opt_level(&mut self, opt_level: u32) -> &mut Config { + self.opt_level = Some(opt_level.to_string()); + self + } + + /// Configures the optimization level of the generated object files. + /// + /// This option is automatically scraped from the `OPT_LEVEL` environment + /// variable by build scripts, so it's not required to call this function. + pub fn opt_level_str(&mut self, opt_level: &str) -> &mut Config { + self.opt_level = Some(opt_level.to_string()); + self + } + + /// Configures whether the compiler will emit debug information when + /// generating object files. + /// + /// This option is automatically scraped from the `PROFILE` environment + /// variable by build scripts (only enabled when the profile is "debug"), so + /// it's not required to call this function. + pub fn debug(&mut self, debug: bool) -> &mut Config { + self.debug = Some(debug); + self + } + + /// Configures the output directory where all object files and static + /// libraries will be located. + /// + /// This option is automatically scraped from the `OUT_DIR` environment + /// variable by build scripts, so it's not required to call this function. + pub fn out_dir>(&mut self, out_dir: P) -> &mut Config { + self.out_dir = Some(out_dir.as_ref().to_owned()); + self + } + + /// Configures the compiler to be used to produce output. + /// + /// This option is automatically determined from the target platform or a + /// number of environment variables, so it's not required to call this + /// function. + pub fn compiler>(&mut self, compiler: P) -> &mut Config { + self.compiler = Some(compiler.as_ref().to_owned()); + self + } + + /// Configures the tool used to assemble archives. + /// + /// This option is automatically determined from the target platform or a + /// number of environment variables, so it's not required to call this + /// function. + pub fn archiver>(&mut self, archiver: P) -> &mut Config { + self.archiver = Some(archiver.as_ref().to_owned()); + self + } + /// Define whether metadata should be emitted for cargo allowing it to + /// automatically link the binary. Defaults to `true`. + pub fn cargo_metadata(&mut self, cargo_metadata: bool) -> &mut Config { + self.cargo_metadata = cargo_metadata; + self + } + + /// Configures whether the compiler will emit position independent code. + /// + /// This option defaults to `false` for `i686` and `windows-gnu` targets and to `true` for all + /// other targets. + pub fn pic(&mut self, pic: bool) -> &mut Config { + self.pic = Some(pic); + self + } + + + #[doc(hidden)] + pub fn __set_env(&mut self, a: A, b: B) -> &mut Config + where A: AsRef, B: AsRef + { + self.env.push((a.as_ref().to_owned(), b.as_ref().to_owned())); + self + } + + /// Run the compiler, generating the file `output` + /// + /// The name `output` must begin with `lib` and end with `.a` + pub fn compile(&self, output: &str) { + assert!(output.starts_with("lib")); + assert!(output.ends_with(".a")); + let lib_name = &output[3..output.len() - 2]; + let dst = self.get_out_dir(); + + let mut objects = Vec::new(); + let mut src_dst = Vec::new(); + for file in self.files.iter() { + let obj = dst.join(file).with_extension("o"); + let obj = if !obj.starts_with(&dst) { + dst.join(obj.file_name().unwrap()) + } else { + obj + }; + fs::create_dir_all(&obj.parent().unwrap()).unwrap(); + src_dst.push((file.to_path_buf(), obj.clone())); + objects.push(obj); + } + self.compile_objects(&src_dst); + self.assemble(lib_name, &dst.join(output), &objects); + + self.print(&format!("cargo:rustc-link-lib=static={}", + &output[3..output.len() - 2])); + self.print(&format!("cargo:rustc-link-search=native={}", dst.display())); + + // Add specific C++ libraries, if enabled. + if self.cpp { + if let Some(stdlib) = self.get_cpp_link_stdlib() { + self.print(&format!("cargo:rustc-link-lib={}", stdlib)); + } + } + } + + #[cfg(feature = "parallel")] + fn compile_objects(&self, objs: &[(PathBuf, PathBuf)]) { + use self::rayon::prelude::*; + + let mut cfg = rayon::Configuration::new(); + if let Ok(amt) = env::var("NUM_JOBS") { + if let Ok(amt) = amt.parse() { + cfg = cfg.set_num_threads(amt); + } + } + drop(rayon::initialize(cfg)); + + objs.par_iter().weight_max().for_each(|&(ref src, ref dst)| { + self.compile_object(src, dst) + }) + } + + #[cfg(not(feature = "parallel"))] + fn compile_objects(&self, objs: &[(PathBuf, PathBuf)]) { + for &(ref src, ref dst) in objs { + self.compile_object(src, dst); + } + } + + fn compile_object(&self, file: &Path, dst: &Path) { + let is_asm = file.extension().and_then(|s| s.to_str()) == Some("asm"); + let msvc = self.get_target().contains("msvc"); + let (mut cmd, name) = if msvc && is_asm { + self.msvc_macro_assembler() + } else { + let compiler = self.get_compiler(); + let mut cmd = compiler.to_command(); + for &(ref a, ref b) in self.env.iter() { + cmd.env(a, b); + } + (cmd, compiler.path.file_name().unwrap() + .to_string_lossy().into_owned()) + }; + if msvc && is_asm { + cmd.arg("/Fo").arg(dst); + } else if msvc { + let mut s = OsString::from("/Fo"); + s.push(&dst); + cmd.arg(s); + } else { + cmd.arg("-o").arg(&dst); + } + cmd.arg(if msvc {"/c"} else {"-c"}); + cmd.arg(file); + + run(&mut cmd, &name); + } + + /// Get the compiler that's in use for this configuration. + /// + /// This function will return a `Tool` which represents the culmination + /// of this configuration at a snapshot in time. The returned compiler can + /// be inspected (e.g. the path, arguments, environment) to forward along to + /// other tools, or the `to_command` method can be used to invoke the + /// compiler itself. + /// + /// This method will take into account all configuration such as debug + /// information, optimization level, include directories, defines, etc. + /// Additionally, the compiler binary in use follows the standard + /// conventions for this path, e.g. looking at the explicitly set compiler, + /// environment variables (a number of which are inspected here), and then + /// falling back to the default configuration. + pub fn get_compiler(&self) -> Tool { + let opt_level = self.get_opt_level(); + let debug = self.get_debug(); + let target = self.get_target(); + let msvc = target.contains("msvc"); + self.print(&format!("debug={} opt-level={}", debug, opt_level)); + + let mut cmd = self.get_base_compiler(); + let nvcc = cmd.path.to_str() + .map(|path| path.contains("nvcc")) + .unwrap_or(false); + + if msvc { + cmd.args.push("/nologo".into()); + cmd.args.push("/MD".into()); // link against msvcrt.dll for now + match &opt_level[..] { + "z" | "s" => cmd.args.push("/Os".into()), + "2" => cmd.args.push("/O2".into()), + "1" => cmd.args.push("/O1".into()), + _ => {} + } + if target.contains("i686") { + cmd.args.push("/SAFESEH".into()); + } else if target.contains("i586") { + cmd.args.push("/SAFESEH".into()); + cmd.args.push("/ARCH:IA32".into()); + } + } else if nvcc { + cmd.args.push(format!("-O{}", opt_level).into()); + } else { + cmd.args.push(format!("-O{}", opt_level).into()); + cmd.args.push("-ffunction-sections".into()); + cmd.args.push("-fdata-sections".into()); + } + for arg in self.envflags(if self.cpp {"CXXFLAGS"} else {"CFLAGS"}) { + cmd.args.push(arg.into()); + } + + if debug { + cmd.args.push(if msvc {"/Z7"} else {"-g"}.into()); + } + + if target.contains("-ios") { + self.ios_flags(&mut cmd); + } else if !msvc { + if target.contains("i686") || target.contains("i586") { + cmd.args.push("-m32".into()); + } else if target.contains("x86_64") || target.contains("powerpc64") { + cmd.args.push("-m64".into()); + } + + if !nvcc && self.pic.unwrap_or(!target.contains("i686") && !target.contains("windows-gnu")) { + cmd.args.push("-fPIC".into()); + } else if nvcc && self.pic.unwrap_or(false) { + cmd.args.push("-Xcompiler".into()); + cmd.args.push("\'-fPIC\'".into()); + } + if target.contains("musl") { + cmd.args.push("-static".into()); + } + + if target.starts_with("armv7-unknown-linux-") { + cmd.args.push("-march=armv7-a".into()); + } + if target.starts_with("armv7-linux-androideabi") { + cmd.args.push("-march=armv7-a".into()); + cmd.args.push("-mfpu=vfpv3-d16".into()); + } + if target.starts_with("arm-unknown-linux-") { + cmd.args.push("-march=armv6".into()); + cmd.args.push("-marm".into()); + } + if target.starts_with("i586-unknown-linux-") { + cmd.args.push("-march=pentium".into()); + } + if target.starts_with("i686-unknown-linux-") { + cmd.args.push("-march=i686".into()); + } + if target.starts_with("thumb") { + cmd.args.push("-mthumb".into()); + + if target.ends_with("eabihf") { + cmd.args.push("-mfloat-abi=hard".into()) + } + } + if target.starts_with("thumbv6m") { + cmd.args.push("-march=armv6-m".into()); + } + if target.starts_with("thumbv7em") { + cmd.args.push("-march=armv7e-m".into()); + } + if target.starts_with("thumbv7m") { + cmd.args.push("-march=armv7-m".into()); + } + } + + if self.cpp && !msvc { + if let Some(ref stdlib) = self.cpp_set_stdlib { + cmd.args.push(format!("-stdlib=lib{}", stdlib).into()); + } + } + + for directory in self.include_directories.iter() { + cmd.args.push(if msvc {"/I"} else {"-I"}.into()); + cmd.args.push(directory.into()); + } + + for flag in self.flags.iter() { + cmd.args.push(flag.into()); + } + + for &(ref key, ref value) in self.definitions.iter() { + let lead = if msvc {"/"} else {"-"}; + if let &Some(ref value) = value { + cmd.args.push(format!("{}D{}={}", lead, key, value).into()); + } else { + cmd.args.push(format!("{}D{}", lead, key).into()); + } + } + cmd + } + + fn msvc_macro_assembler(&self) -> (Command, String) { + let target = self.get_target(); + let tool = if target.contains("x86_64") {"ml64.exe"} else {"ml.exe"}; + let mut cmd = windows_registry::find(&target, tool).unwrap_or_else(|| { + self.cmd(tool) + }); + for directory in self.include_directories.iter() { + cmd.arg("/I").arg(directory); + } + for &(ref key, ref value) in self.definitions.iter() { + if let &Some(ref value) = value { + cmd.arg(&format!("/D{}={}", key, value)); + } else { + cmd.arg(&format!("/D{}", key)); + } + } + + if target.contains("i686") || target.contains("i586") { + cmd.arg("/safeseh"); + } + for flag in self.flags.iter() { + cmd.arg(flag); + } + + (cmd, tool.to_string()) + } + + fn assemble(&self, lib_name: &str, dst: &Path, objects: &[PathBuf]) { + // Delete the destination if it exists as the `ar` tool at least on Unix + // appends to it, which we don't want. + let _ = fs::remove_file(&dst); + + let target = self.get_target(); + if target.contains("msvc") { + let mut cmd = match self.archiver { + Some(ref s) => self.cmd(s), + None => windows_registry::find(&target, "lib.exe") + .unwrap_or(self.cmd("lib.exe")), + }; + let mut out = OsString::from("/OUT:"); + out.push(dst); + run(cmd.arg(out).arg("/nologo") + .args(objects) + .args(&self.objects), "lib.exe"); + + // The Rust compiler will look for libfoo.a and foo.lib, but the + // MSVC linker will also be passed foo.lib, so be sure that both + // exist for now. + let lib_dst = dst.with_file_name(format!("{}.lib", lib_name)); + let _ = fs::remove_file(&lib_dst); + fs::hard_link(&dst, &lib_dst).or_else(|_| { + //if hard-link fails, just copy (ignoring the number of bytes written) + fs::copy(&dst, &lib_dst).map(|_| ()) + }).ok().expect("Copying from {:?} to {:?} failed.");; + } else { + let ar = self.get_ar(); + let cmd = ar.file_name().unwrap().to_string_lossy(); + run(self.cmd(&ar).arg("crs") + .arg(dst) + .args(objects) + .args(&self.objects), &cmd); + } + } + + fn ios_flags(&self, cmd: &mut Tool) { + enum ArchSpec { + Device(&'static str), + Simulator(&'static str), + } + + let target = self.get_target(); + let arch = target.split('-').nth(0).unwrap(); + let arch = match arch { + "arm" | "armv7" | "thumbv7" => ArchSpec::Device("armv7"), + "armv7s" | "thumbv7s" => ArchSpec::Device("armv7s"), + "arm64" | "aarch64" => ArchSpec::Device("arm64"), + "i386" | "i686" => ArchSpec::Simulator("-m32"), + "x86_64" => ArchSpec::Simulator("-m64"), + _ => fail("Unknown arch for iOS target") + }; + + let sdk = match arch { + ArchSpec::Device(arch) => { + cmd.args.push("-arch".into()); + cmd.args.push(arch.into()); + cmd.args.push("-miphoneos-version-min=7.0".into()); + "iphoneos" + }, + ArchSpec::Simulator(arch) => { + cmd.args.push(arch.into()); + cmd.args.push("-mios-simulator-version-min=7.0".into()); + "iphonesimulator" + } + }; + + self.print(&format!("Detecting iOS SDK path for {}", sdk)); + let sdk_path = self.cmd("xcrun") + .arg("--show-sdk-path") + .arg("--sdk") + .arg(sdk) + .stderr(Stdio::inherit()) + .output() + .unwrap() + .stdout; + + let sdk_path = String::from_utf8(sdk_path).unwrap(); + + cmd.args.push("-isysroot".into()); + cmd.args.push(sdk_path.trim().into()); + } + + fn cmd>(&self, prog: P) -> Command { + let mut cmd = Command::new(prog); + for &(ref a, ref b) in self.env.iter() { + cmd.env(a, b); + } + return cmd + } + + fn get_base_compiler(&self) -> Tool { + if let Some(ref c) = self.compiler { + return Tool::new(c.clone()) + } + let host = self.get_host(); + let target = self.get_target(); + let (env, msvc, gnu, default) = if self.cpp { + ("CXX", "cl.exe", "g++", "c++") + } else { + ("CC", "cl.exe", "gcc", "cc") + }; + self.env_tool(env).map(|(tool, args)| { + let mut t = Tool::new(PathBuf::from(tool)); + for arg in args { + t.args.push(arg.into()); + } + return t + }).or_else(|| { + if target.contains("emscripten") { + if self.cpp { + Some(Tool::new(PathBuf::from("em++"))) + } else { + Some(Tool::new(PathBuf::from("emcc"))) + } + } else { + None + } + }).or_else(|| { + windows_registry::find_tool(&target, "cl.exe") + }).unwrap_or_else(|| { + let compiler = if host.contains("windows") && + target.contains("windows") { + if target.contains("msvc") { + msvc.to_string() + } else { + format!("{}.exe", gnu) + } + } else if target.contains("android") { + format!("{}-{}", target, gnu) + } else if self.get_host() != target { + // CROSS_COMPILE is of the form: "arm-linux-gnueabi-" + let cc_env = self.getenv("CROSS_COMPILE"); + let cross_compile = cc_env.as_ref().map(|s| s.trim_right_matches('-')); + let prefix = cross_compile.or(match &target[..] { + "aarch64-unknown-linux-gnu" => Some("aarch64-linux-gnu"), + "arm-unknown-linux-gnueabi" => Some("arm-linux-gnueabi"), + "arm-unknown-linux-gnueabihf" => Some("arm-linux-gnueabihf"), + "arm-unknown-linux-musleabi" => Some("arm-linux-musleabi"), + "arm-unknown-linux-musleabihf" => Some("arm-linux-musleabihf"), + "arm-unknown-netbsdelf-eabi" => Some("arm--netbsdelf-eabi"), + "armv6-unknown-netbsdelf-eabihf" => Some("armv6--netbsdelf-eabihf"), + "armv7-unknown-linux-gnueabihf" => Some("arm-linux-gnueabihf"), + "armv7-unknown-linux-musleabihf" => Some("arm-linux-musleabihf"), + "armv7-unknown-netbsdelf-eabihf" => Some("armv7--netbsdelf-eabihf"), + "i686-pc-windows-gnu" => Some("i686-w64-mingw32"), + "i686-unknown-linux-musl" => Some("musl"), + "i686-unknown-netbsdelf" => Some("i486--netbsdelf"), + "mips-unknown-linux-gnu" => Some("mips-linux-gnu"), + "mipsel-unknown-linux-gnu" => Some("mipsel-linux-gnu"), + "mips64-unknown-linux-gnuabi64" => Some("mips64-linux-gnuabi64"), + "mips64el-unknown-linux-gnuabi64" => Some("mips64el-linux-gnuabi64"), + "powerpc-unknown-linux-gnu" => Some("powerpc-linux-gnu"), + "powerpc-unknown-netbsd" => Some("powerpc--netbsd"), + "powerpc64-unknown-linux-gnu" => Some("powerpc-linux-gnu"), + "powerpc64le-unknown-linux-gnu" => Some("powerpc64le-linux-gnu"), + "s390x-unknown-linux-gnu" => Some("s390x-linux-gnu"), + "thumbv6m-none-eabi" => Some("arm-none-eabi"), + "thumbv7em-none-eabi" => Some("arm-none-eabi"), + "thumbv7em-none-eabihf" => Some("arm-none-eabi"), + "thumbv7m-none-eabi" => Some("arm-none-eabi"), + "x86_64-pc-windows-gnu" => Some("x86_64-w64-mingw32"), + "x86_64-rumprun-netbsd" => Some("x86_64-rumprun-netbsd"), + "x86_64-unknown-linux-musl" => Some("musl"), + "x86_64-unknown-netbsd" => Some("x86_64--netbsd"), + _ => None, + }); + match prefix { + Some(prefix) => format!("{}-{}", prefix, gnu), + None => default.to_string(), + } + } else { + default.to_string() + }; + Tool::new(PathBuf::from(compiler)) + }) + } + + fn get_var(&self, var_base: &str) -> Result { + let target = self.get_target(); + let host = self.get_host(); + let kind = if host == target {"HOST"} else {"TARGET"}; + let target_u = target.replace("-", "_"); + let res = self.getenv(&format!("{}_{}", var_base, target)) + .or_else(|| self.getenv(&format!("{}_{}", var_base, target_u))) + .or_else(|| self.getenv(&format!("{}_{}", kind, var_base))) + .or_else(|| self.getenv(var_base)); + + match res { + Some(res) => Ok(res), + None => Err("could not get environment variable".to_string()), + } + } + + fn envflags(&self, name: &str) -> Vec { + self.get_var(name).unwrap_or(String::new()) + .split(|c: char| c.is_whitespace()).filter(|s| !s.is_empty()) + .map(|s| s.to_string()) + .collect() + } + + fn env_tool(&self, name: &str) -> Option<(String, Vec)> { + self.get_var(name).ok().map(|tool| { + let whitelist = ["ccache", "distcc"]; + for t in whitelist.iter() { + if tool.starts_with(t) && tool[t.len()..].starts_with(" ") { + return (t.to_string(), + vec![tool[t.len()..].trim_left().to_string()]) + } + } + (tool, Vec::new()) + }) + } + + /// Returns the default C++ standard library for the current target: `libc++` + /// for OS X and `libstdc++` for anything else. + fn get_cpp_link_stdlib(&self) -> Option { + self.cpp_link_stdlib.clone().unwrap_or_else(|| { + let target = self.get_target(); + if target.contains("msvc") { + None + } else if target.contains("darwin") { + Some("c++".to_string()) + } else { + Some("stdc++".to_string()) + } + }) + } + + fn get_ar(&self) -> PathBuf { + self.archiver.clone().or_else(|| { + self.get_var("AR").map(PathBuf::from).ok() + }).unwrap_or_else(|| { + if self.get_target().contains("android") { + PathBuf::from(format!("{}-ar", self.get_target())) + } else if self.get_target().contains("emscripten") { + PathBuf::from("emar") + } else { + PathBuf::from("ar") + } + }) + } + + fn get_target(&self) -> String { + self.target.clone().unwrap_or_else(|| self.getenv_unwrap("TARGET")) + } + + fn get_host(&self) -> String { + self.host.clone().unwrap_or_else(|| self.getenv_unwrap("HOST")) + } + + fn get_opt_level(&self) -> String { + self.opt_level.as_ref().cloned().unwrap_or_else(|| { + self.getenv_unwrap("OPT_LEVEL") + }) + } + + fn get_debug(&self) -> bool { + self.debug.unwrap_or_else(|| self.getenv_unwrap("PROFILE") == "debug") + } + + fn get_out_dir(&self) -> PathBuf { + self.out_dir.clone().unwrap_or_else(|| { + env::var_os("OUT_DIR").map(PathBuf::from).unwrap() + }) + } + + fn getenv(&self, v: &str) -> Option { + let r = env::var(v).ok(); + self.print(&format!("{} = {:?}", v, r)); + r + } + + fn getenv_unwrap(&self, v: &str) -> String { + match self.getenv(v) { + Some(s) => s, + None => fail(&format!("environment variable `{}` not defined", v)), + } + } + + fn print(&self, s: &str) { + if self.cargo_metadata { + println!("{}", s); + } + } +} + +impl Tool { + fn new(path: PathBuf) -> Tool { + Tool { + path: path, + args: Vec::new(), + env: Vec::new(), + } + } + + /// Converts this compiler into a `Command` that's ready to be run. + /// + /// This is useful for when the compiler needs to be executed and the + /// command returned will already have the initial arguments and environment + /// variables configured. + pub fn to_command(&self) -> Command { + let mut cmd = Command::new(&self.path); + cmd.args(&self.args); + for &(ref k, ref v) in self.env.iter() { + cmd.env(k, v); + } + return cmd + } + + /// Returns the path for this compiler. + /// + /// Note that this may not be a path to a file on the filesystem, e.g. "cc", + /// but rather something which will be resolved when a process is spawned. + pub fn path(&self) -> &Path { + &self.path + } + + /// Returns the default set of arguments to the compiler needed to produce + /// executables for the target this compiler generates. + pub fn args(&self) -> &[OsString] { + &self.args + } + + /// Returns the set of environment variables needed for this compiler to + /// operate. + /// + /// This is typically only used for MSVC compilers currently. + pub fn env(&self) -> &[(OsString, OsString)] { + &self.env + } +} + +fn run(cmd: &mut Command, program: &str) { + println!("running: {:?}", cmd); + // Capture the standard error coming from these programs, and write it out + // with cargo:warning= prefixes. Note that this is a bit wonky to avoid + // requiring the output to be UTF-8, we instead just ship bytes from one + // location to another. + let spawn_result = match cmd.stderr(Stdio::piped()).spawn() { + Ok(mut child) => { + let stderr = BufReader::new(child.stderr.take().unwrap()); + for line in stderr.split(b'\n').filter_map(|l| l.ok()) { + print!("cargo:warning="); + std::io::stdout().write_all(&line).unwrap(); + println!(""); + } + child.wait() + } + Err(e) => Err(e), + }; + let status = match spawn_result { + Ok(status) => status, + Err(ref e) if e.kind() == io::ErrorKind::NotFound => { + let extra = if cfg!(windows) { + " (see https://github.com/alexcrichton/gcc-rs#compile-time-requirements \ + for help)" + } else { + "" + }; + fail(&format!("failed to execute command: {}\nIs `{}` \ + not installed?{}", e, program, extra)); + } + Err(e) => fail(&format!("failed to execute command: {}", e)), + }; + println!("{:?}", status); + if !status.success() { + fail(&format!("command did not execute successfully, got: {}", status)); + } +} + +fn fail(s: &str) -> ! { + println!("\n\n{}\n\n", s); + panic!() +} diff --git a/src/vendor/gcc/src/registry.rs b/src/vendor/gcc/src/registry.rs new file mode 100644 index 0000000000000..d871cd21f3c06 --- /dev/null +++ b/src/vendor/gcc/src/registry.rs @@ -0,0 +1,169 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::ffi::{OsString, OsStr}; +use std::io; +use std::ops::RangeFrom; +use std::os::raw; +use std::os::windows::prelude::*; + +pub struct RegistryKey(Repr); + +type HKEY = *mut u8; +type DWORD = u32; +type LPDWORD = *mut DWORD; +type LPCWSTR = *const u16; +type LPWSTR = *mut u16; +type LONG = raw::c_long; +type PHKEY = *mut HKEY; +type PFILETIME = *mut u8; +type LPBYTE = *mut u8; +type REGSAM = u32; + +const ERROR_SUCCESS: DWORD = 0; +const ERROR_NO_MORE_ITEMS: DWORD = 259; +const HKEY_LOCAL_MACHINE: HKEY = 0x80000002 as HKEY; +const REG_SZ: DWORD = 1; +const KEY_READ: DWORD = 0x20019; +const KEY_WOW64_32KEY: DWORD = 0x200; + +#[link(name = "advapi32")] +extern "system" { + fn RegOpenKeyExW(key: HKEY, + lpSubKey: LPCWSTR, + ulOptions: DWORD, + samDesired: REGSAM, + phkResult: PHKEY) -> LONG; + fn RegEnumKeyExW(key: HKEY, + dwIndex: DWORD, + lpName: LPWSTR, + lpcName: LPDWORD, + lpReserved: LPDWORD, + lpClass: LPWSTR, + lpcClass: LPDWORD, + lpftLastWriteTime: PFILETIME) -> LONG; + fn RegQueryValueExW(hKey: HKEY, + lpValueName: LPCWSTR, + lpReserved: LPDWORD, + lpType: LPDWORD, + lpData: LPBYTE, + lpcbData: LPDWORD) -> LONG; + fn RegCloseKey(hKey: HKEY) -> LONG; +} + +struct OwnedKey(HKEY); + +enum Repr { + Const(HKEY), + Owned(OwnedKey), +} + +pub struct Iter<'a> { + idx: RangeFrom, + key: &'a RegistryKey, +} + +unsafe impl Sync for Repr {} +unsafe impl Send for Repr {} + +pub static LOCAL_MACHINE: RegistryKey = + RegistryKey(Repr::Const(HKEY_LOCAL_MACHINE)); + +impl RegistryKey { + fn raw(&self) -> HKEY { + match self.0 { + Repr::Const(val) => val, + Repr::Owned(ref val) => val.0, + } + } + + pub fn open(&self, key: &OsStr) -> io::Result { + let key = key.encode_wide().chain(Some(0)).collect::>(); + let mut ret = 0 as *mut _; + let err = unsafe { + RegOpenKeyExW(self.raw(), key.as_ptr(), 0, + KEY_READ | KEY_WOW64_32KEY, &mut ret) + }; + if err == ERROR_SUCCESS as LONG { + Ok(RegistryKey(Repr::Owned(OwnedKey(ret)))) + } else { + Err(io::Error::from_raw_os_error(err as i32)) + } + } + + pub fn iter(&self) -> Iter { + Iter { idx: 0.., key: self } + } + + pub fn query_str(&self, name: &str) -> io::Result { + let name: &OsStr = name.as_ref(); + let name = name.encode_wide().chain(Some(0)).collect::>(); + let mut len = 0; + let mut kind = 0; + unsafe { + let err = RegQueryValueExW(self.raw(), name.as_ptr(), 0 as *mut _, + &mut kind, 0 as *mut _, &mut len); + if err != ERROR_SUCCESS as LONG { + return Err(io::Error::from_raw_os_error(err as i32)) + } + if kind != REG_SZ { + return Err(io::Error::new(io::ErrorKind::Other, + "registry key wasn't a string")) + } + + // The length here is the length in bytes, but we're using wide + // characters so we need to be sure to halve it for the capacity + // passed in. + let mut v = Vec::with_capacity(len as usize / 2); + let err = RegQueryValueExW(self.raw(), name.as_ptr(), 0 as *mut _, + 0 as *mut _, v.as_mut_ptr() as *mut _, + &mut len); + if err != ERROR_SUCCESS as LONG { + return Err(io::Error::from_raw_os_error(err as i32)) + } + v.set_len(len as usize / 2); + + // Some registry keys may have a terminating nul character, but + // we're not interested in that, so chop it off if it's there. + if v[v.len() - 1] == 0 { + v.pop(); + } + Ok(OsString::from_wide(&v)) + } + } +} + +impl Drop for OwnedKey { + fn drop(&mut self) { + unsafe { RegCloseKey(self.0); } + } +} + +impl<'a> Iterator for Iter<'a> { + type Item = io::Result; + + fn next(&mut self) -> Option> { + self.idx.next().and_then(|i| unsafe { + let mut v = Vec::with_capacity(256); + let mut len = v.capacity() as DWORD; + let ret = RegEnumKeyExW(self.key.raw(), i, v.as_mut_ptr(), &mut len, + 0 as *mut _, 0 as *mut _, 0 as *mut _, + 0 as *mut _); + if ret == ERROR_NO_MORE_ITEMS as LONG { + None + } else if ret != ERROR_SUCCESS as LONG { + Some(Err(io::Error::from_raw_os_error(ret as i32))) + } else { + v.set_len(len as usize); + Some(Ok(OsString::from_wide(&v))) + } + }) + } +} diff --git a/src/vendor/gcc/src/windows_registry.rs b/src/vendor/gcc/src/windows_registry.rs new file mode 100644 index 0000000000000..b2c719d27ffdc --- /dev/null +++ b/src/vendor/gcc/src/windows_registry.rs @@ -0,0 +1,425 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A helper module to probe the Windows Registry when looking for +//! windows-specific tools. + +use std::process::Command; + +use Tool; + +macro_rules! otry { + ($expr:expr) => (match $expr { + Some(val) => val, + None => return None, + }) +} + +/// Attempts to find a tool within an MSVC installation using the Windows +/// registry as a point to search from. +/// +/// The `target` argument is the target that the tool should work for (e.g. +/// compile or link for) and the `tool` argument is the tool to find (e.g. +/// `cl.exe` or `link.exe`). +/// +/// This function will return `None` if the tool could not be found, or it will +/// return `Some(cmd)` which represents a command that's ready to execute the +/// tool with the appropriate environment variables set. +/// +/// Note that this function always returns `None` for non-MSVC targets. +pub fn find(target: &str, tool: &str) -> Option { + find_tool(target, tool).map(|c| c.to_command()) +} + +/// Similar to the `find` function above, this function will attempt the same +/// operation (finding a MSVC tool in a local install) but instead returns a +/// `Tool` which may be introspected. +#[cfg(not(windows))] +pub fn find_tool(_target: &str, _tool: &str) -> Option { + None +} + +/// Documented above. +#[cfg(windows)] +pub fn find_tool(target: &str, tool: &str) -> Option { + use std::env; + use std::ffi::OsString; + use std::mem; + use std::path::{Path, PathBuf}; + use registry::{RegistryKey, LOCAL_MACHINE}; + + struct MsvcTool { + tool: PathBuf, + libs: Vec, + path: Vec, + include: Vec, + } + + impl MsvcTool { + fn new(tool: PathBuf) -> MsvcTool { + MsvcTool { + tool: tool, + libs: Vec::new(), + path: Vec::new(), + include: Vec::new(), + } + } + + fn into_tool(self) -> Tool { + let MsvcTool { tool, libs, path, include } = self; + let mut tool = Tool::new(tool.into()); + add_env(&mut tool, "LIB", libs); + add_env(&mut tool, "PATH", path); + add_env(&mut tool, "INCLUDE", include); + return tool + } + } + + // This logic is all tailored for MSVC, if we're not that then bail out + // early. + if !target.contains("msvc") { + return None + } + + // Looks like msbuild isn't located in the same location as other tools like + // cl.exe and lib.exe. To handle this we probe for it manually with + // dedicated registry keys. + if tool.contains("msbuild") { + return find_msbuild(target) + } + + // If VCINSTALLDIR is set, then someone's probably already run vcvars and we + // should just find whatever that indicates. + if env::var_os("VCINSTALLDIR").is_some() { + return env::var_os("PATH").and_then(|path| { + env::split_paths(&path).map(|p| p.join(tool)).find(|p| p.exists()) + }).map(|path| { + Tool::new(path.into()) + }) + } + + // Ok, if we're here, now comes the fun part of the probing. Default shells + // or shells like MSYS aren't really configured to execute `cl.exe` and the + // various compiler tools shipped as part of Visual Studio. Here we try to + // first find the relevant tool, then we also have to be sure to fill in + // environment variables like `LIB`, `INCLUDE`, and `PATH` to ensure that + // the tool is actually usable. + + return find_msvc_latest(tool, target, "15.0").or_else(|| { + find_msvc_latest(tool, target, "14.0") + }).or_else(|| { + find_msvc_12(tool, target) + }).or_else(|| { + find_msvc_11(tool, target) + }); + + // For MSVC 14 or newer we need to find the Universal CRT as well as either + // the Windows 10 SDK or Windows 8.1 SDK. + fn find_msvc_latest(tool: &str, target: &str, ver: &str) -> Option { + let vcdir = otry!(get_vc_dir(ver)); + let mut tool = otry!(get_tool(tool, &vcdir, target)); + let sub = otry!(lib_subdir(target)); + let (ucrt, ucrt_version) = otry!(get_ucrt_dir()); + + let ucrt_include = ucrt.join("include").join(&ucrt_version); + tool.include.push(ucrt_include.join("ucrt")); + + let ucrt_lib = ucrt.join("lib").join(&ucrt_version); + tool.libs.push(ucrt_lib.join("ucrt").join(sub)); + + if let Some((sdk, version)) = get_sdk10_dir() { + tool.path.push(sdk.join("bin").join(sub)); + let sdk_lib = sdk.join("lib").join(&version); + tool.libs.push(sdk_lib.join("um").join(sub)); + let sdk_include = sdk.join("include").join(&version); + tool.include.push(sdk_include.join("um")); + tool.include.push(sdk_include.join("winrt")); + tool.include.push(sdk_include.join("shared")); + } else if let Some(sdk) = get_sdk81_dir() { + tool.path.push(sdk.join("bin").join(sub)); + let sdk_lib = sdk.join("lib").join("winv6.3"); + tool.libs.push(sdk_lib.join("um").join(sub)); + let sdk_include = sdk.join("include"); + tool.include.push(sdk_include.join("um")); + tool.include.push(sdk_include.join("winrt")); + tool.include.push(sdk_include.join("shared")); + } else { + return None + } + Some(tool.into_tool()) + } + + // For MSVC 12 we need to find the Windows 8.1 SDK. + fn find_msvc_12(tool: &str, target: &str) -> Option { + let vcdir = otry!(get_vc_dir("12.0")); + let mut tool = otry!(get_tool(tool, &vcdir, target)); + let sub = otry!(lib_subdir(target)); + let sdk81 = otry!(get_sdk81_dir()); + tool.path.push(sdk81.join("bin").join(sub)); + let sdk_lib = sdk81.join("lib").join("winv6.3"); + tool.libs.push(sdk_lib.join("um").join(sub)); + let sdk_include = sdk81.join("include"); + tool.include.push(sdk_include.join("shared")); + tool.include.push(sdk_include.join("um")); + tool.include.push(sdk_include.join("winrt")); + Some(tool.into_tool()) + } + + // For MSVC 11 we need to find the Windows 8 SDK. + fn find_msvc_11(tool: &str, target: &str) -> Option { + let vcdir = otry!(get_vc_dir("11.0")); + let mut tool = otry!(get_tool(tool, &vcdir, target)); + let sub = otry!(lib_subdir(target)); + let sdk8 = otry!(get_sdk8_dir()); + tool.path.push(sdk8.join("bin").join(sub)); + let sdk_lib = sdk8.join("lib").join("win8"); + tool.libs.push(sdk_lib.join("um").join(sub)); + let sdk_include = sdk8.join("include"); + tool.include.push(sdk_include.join("shared")); + tool.include.push(sdk_include.join("um")); + tool.include.push(sdk_include.join("winrt")); + Some(tool.into_tool()) + } + + fn add_env(tool: &mut Tool, env: &str, paths: Vec) { + let prev = env::var_os(env).unwrap_or(OsString::new()); + let prev = env::split_paths(&prev); + let new = paths.into_iter().chain(prev); + tool.env.push((env.to_string().into(), env::join_paths(new).unwrap())); + } + + // Given a possible MSVC installation directory, we look for the linker and + // then add the MSVC library path. + fn get_tool(tool: &str, path: &Path, target: &str) -> Option { + bin_subdir(target).into_iter().map(|(sub, host)| { + (path.join("bin").join(sub).join(tool), + path.join("bin").join(host)) + }).filter(|&(ref path, _)| { + path.is_file() + }).map(|(path, host)| { + let mut tool = MsvcTool::new(path); + tool.path.push(host); + tool + }).filter_map(|mut tool| { + let sub = otry!(vc_lib_subdir(target)); + tool.libs.push(path.join("lib").join(sub)); + tool.include.push(path.join("include")); + Some(tool) + }).next() + } + + // To find MSVC we look in a specific registry key for the version we are + // trying to find. + fn get_vc_dir(ver: &str) -> Option { + let key = r"SOFTWARE\Microsoft\VisualStudio\SxS\VC7"; + let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); + let path = otry!(key.query_str(ver).ok()); + Some(path.into()) + } + + // To find the Universal CRT we look in a specific registry key for where + // all the Universal CRTs are located and then sort them asciibetically to + // find the newest version. While this sort of sorting isn't ideal, it is + // what vcvars does so that's good enough for us. + // + // Returns a pair of (root, version) for the ucrt dir if found + fn get_ucrt_dir() -> Option<(PathBuf, String)> { + let key = r"SOFTWARE\Microsoft\Windows Kits\Installed Roots"; + let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); + let root = otry!(key.query_str("KitsRoot10").ok()); + let readdir = otry!(Path::new(&root).join("lib").read_dir().ok()); + let max_libdir = otry!(readdir.filter_map(|dir| { + dir.ok() + }).map(|dir| { + dir.path() + }).filter(|dir| { + dir.components().last().and_then(|c| { + c.as_os_str().to_str() + }).map(|c| { + c.starts_with("10.") && dir.join("ucrt").is_dir() + }).unwrap_or(false) + }).max()); + let version = max_libdir.components().last().unwrap(); + let version = version.as_os_str().to_str().unwrap().to_string(); + Some((root.into(), version)) + } + + // Vcvars finds the correct version of the Windows 10 SDK by looking + // for the include `um\Windows.h` because sometimes a given version will + // only have UCRT bits without the rest of the SDK. Since we only care about + // libraries and not includes, we instead look for `um\x64\kernel32.lib`. + // Since the 32-bit and 64-bit libraries are always installed together we + // only need to bother checking x64, making this code a tiny bit simpler. + // Like we do for the Universal CRT, we sort the possibilities + // asciibetically to find the newest one as that is what vcvars does. + fn get_sdk10_dir() -> Option<(PathBuf, String)> { + let key = r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\v10.0"; + let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); + let root = otry!(key.query_str("InstallationFolder").ok()); + let readdir = otry!(Path::new(&root).join("lib").read_dir().ok()); + let mut dirs = readdir.filter_map(|dir| dir.ok()) + .map(|dir| dir.path()) + .collect::>(); + dirs.sort(); + let dir = otry!(dirs.into_iter().rev().filter(|dir| { + dir.join("um").join("x64").join("kernel32.lib").is_file() + }).next()); + let version = dir.components().last().unwrap(); + let version = version.as_os_str().to_str().unwrap().to_string(); + Some((root.into(), version)) + } + + // Interestingly there are several subdirectories, `win7` `win8` and + // `winv6.3`. Vcvars seems to only care about `winv6.3` though, so the same + // applies to us. Note that if we were targetting kernel mode drivers + // instead of user mode applications, we would care. + fn get_sdk81_dir() -> Option { + let key = r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\v8.1"; + let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); + let root = otry!(key.query_str("InstallationFolder").ok()); + Some(root.into()) + } + + fn get_sdk8_dir() -> Option { + let key = r"SOFTWARE\Microsoft\Microsoft SDKs\Windows\v8.0"; + let key = otry!(LOCAL_MACHINE.open(key.as_ref()).ok()); + let root = otry!(key.query_str("InstallationFolder").ok()); + Some(root.into()) + } + + const PROCESSOR_ARCHITECTURE_INTEL: u16 = 0; + const PROCESSOR_ARCHITECTURE_AMD64: u16 = 9; + const X86: u16 = PROCESSOR_ARCHITECTURE_INTEL; + const X86_64: u16 = PROCESSOR_ARCHITECTURE_AMD64; + + // When choosing the tool to use, we have to choose the one which matches + // the target architecture. Otherwise we end up in situations where someone + // on 32-bit Windows is trying to cross compile to 64-bit and it tries to + // invoke the native 64-bit compiler which won't work. + // + // For the return value of this function, the first member of the tuple is + // the folder of the tool we will be invoking, while the second member is + // the folder of the host toolchain for that tool which is essential when + // using a cross linker. We return a Vec since on x64 there are often two + // linkers that can target the architecture we desire. The 64-bit host + // linker is preferred, and hence first, due to 64-bit allowing it more + // address space to work with and potentially being faster. + fn bin_subdir(target: &str) -> Vec<(&'static str, &'static str)> { + let arch = target.split('-').next().unwrap(); + match (arch, host_arch()) { + ("i586", X86) | + ("i686", X86) => vec![("", "")], + ("i586", X86_64) | + ("i686", X86_64) => vec![("amd64_x86", "amd64"), ("", "")], + ("x86_64", X86) => vec![("x86_amd64", "")], + ("x86_64", X86_64) => vec![("amd64", "amd64"), ("x86_amd64", "")], + ("arm", X86) => vec![("x86_arm", "")], + ("arm", X86_64) => vec![("amd64_arm", "amd64"), ("x86_arm", "")], + _ => vec![], + } + } + + fn lib_subdir(target: &str) -> Option<&'static str> { + let arch = target.split('-').next().unwrap(); + match arch { + "i586" | "i686" => Some("x86"), + "x86_64" => Some("x64"), + "arm" => Some("arm"), + _ => None, + } + } + + // MSVC's x86 libraries are not in a subfolder + fn vc_lib_subdir(target: &str) -> Option<&'static str> { + let arch = target.split('-').next().unwrap(); + match arch { + "i586" | "i686" => Some(""), + "x86_64" => Some("amd64"), + "arm" => Some("arm"), + _ => None, + } + } + + #[allow(bad_style)] + fn host_arch() -> u16 { + type DWORD = u32; + type WORD = u16; + type LPVOID = *mut u8; + type DWORD_PTR = usize; + + #[repr(C)] + struct SYSTEM_INFO { + wProcessorArchitecture: WORD, + _wReserved: WORD, + _dwPageSize: DWORD, + _lpMinimumApplicationAddress: LPVOID, + _lpMaximumApplicationAddress: LPVOID, + _dwActiveProcessorMask: DWORD_PTR, + _dwNumberOfProcessors: DWORD, + _dwProcessorType: DWORD, + _dwAllocationGranularity: DWORD, + _wProcessorLevel: WORD, + _wProcessorRevision: WORD, + } + + extern "system" { + fn GetNativeSystemInfo(lpSystemInfo: *mut SYSTEM_INFO); + } + + unsafe { + let mut info = mem::zeroed(); + GetNativeSystemInfo(&mut info); + info.wProcessorArchitecture + } + } + + // Given a registry key, look at all the sub keys and find the one which has + // the maximal numeric value. + // + // Returns the name of the maximal key as well as the opened maximal key. + fn max_version(key: &RegistryKey) -> Option<(OsString, RegistryKey)> { + let mut max_vers = 0; + let mut max_key = None; + for subkey in key.iter().filter_map(|k| k.ok()) { + let val = subkey.to_str().and_then(|s| { + s.trim_left_matches("v").replace(".", "").parse().ok() + }); + let val = match val { + Some(s) => s, + None => continue, + }; + if val > max_vers { + if let Ok(k) = key.open(&subkey) { + max_vers = val; + max_key = Some((subkey, k)); + } + } + } + return max_key + } + + // see http://stackoverflow.com/questions/328017/path-to-msbuild + fn find_msbuild(target: &str) -> Option { + let key = r"SOFTWARE\Microsoft\MSBuild\ToolsVersions"; + LOCAL_MACHINE.open(key.as_ref()).ok().and_then(|key| { + max_version(&key).and_then(|(_vers, key)| { + key.query_str("MSBuildToolsPath").ok() + }) + }).map(|path| { + let mut path = PathBuf::from(path); + path.push("MSBuild.exe"); + let mut tool = Tool::new(path); + if target.contains("x86_64") { + tool.env.push(("Platform".into(), "X64".into())); + } + tool + }) + } +} diff --git a/src/vendor/gcc/tests/cc_env.rs b/src/vendor/gcc/tests/cc_env.rs new file mode 100644 index 0000000000000..559dbe8ad4e50 --- /dev/null +++ b/src/vendor/gcc/tests/cc_env.rs @@ -0,0 +1,49 @@ +extern crate tempdir; +extern crate gcc; + +use std::env; + +mod support; +use support::Test; + +#[test] +fn main() { + ccache(); + distcc(); + ccache_spaces(); +} + +fn ccache() { + let test = Test::gnu(); + test.shim("ccache"); + + env::set_var("CC", "ccache lol-this-is-not-a-compiler foo"); + test.gcc().file("foo.c").compile("libfoo.a"); + + test.cmd(0) + .must_have("lol-this-is-not-a-compiler foo") + .must_have("foo.c") + .must_not_have("ccache"); +} + +fn ccache_spaces() { + let test = Test::gnu(); + test.shim("ccache"); + + env::set_var("CC", "ccache lol-this-is-not-a-compiler foo"); + test.gcc().file("foo.c").compile("libfoo.a"); + test.cmd(0).must_have("lol-this-is-not-a-compiler foo"); +} + +fn distcc() { + let test = Test::gnu(); + test.shim("distcc"); + + env::set_var("CC", "distcc lol-this-is-not-a-compiler foo"); + test.gcc().file("foo.c").compile("libfoo.a"); + + test.cmd(0) + .must_have("lol-this-is-not-a-compiler foo") + .must_have("foo.c") + .must_not_have("distcc"); +} diff --git a/src/vendor/gcc/tests/support/mod.rs b/src/vendor/gcc/tests/support/mod.rs new file mode 100644 index 0000000000000..b5703d2fd8b1a --- /dev/null +++ b/src/vendor/gcc/tests/support/mod.rs @@ -0,0 +1,111 @@ +#![allow(dead_code)] + +use std::env; +use std::ffi::OsStr; +use std::fs::{self, File}; +use std::io::prelude::*; +use std::path::PathBuf; + +use gcc; +use tempdir::TempDir; + +pub struct Test { + pub td: TempDir, + pub gcc: PathBuf, + pub msvc: bool, +} + +pub struct Execution { + args: Vec, +} + +impl Test { + pub fn new() -> Test { + let mut gcc = PathBuf::from(env::current_exe().unwrap()); + gcc.pop(); + gcc.push(format!("gcc-shim{}", env::consts::EXE_SUFFIX)); + Test { + td: TempDir::new("gcc-test").unwrap(), + gcc: gcc, + msvc: false, + } + } + + pub fn gnu() -> Test { + let t = Test::new(); + t.shim("cc").shim("ar"); + return t + } + + pub fn msvc() -> Test { + let mut t = Test::new(); + t.shim("cl").shim("lib.exe"); + t.msvc = true; + return t + } + + pub fn shim(&self, name: &str) -> &Test { + let fname = format!("{}{}", name, env::consts::EXE_SUFFIX); + fs::hard_link(&self.gcc, self.td.path().join(&fname)).or_else(|_| { + fs::copy(&self.gcc, self.td.path().join(&fname)).map(|_| ()) + }).unwrap(); + self + } + + pub fn gcc(&self) -> gcc::Config { + let mut cfg = gcc::Config::new(); + let mut path = env::split_paths(&env::var_os("PATH").unwrap()) + .collect::>(); + path.insert(0, self.td.path().to_owned()); + let target = if self.msvc { + "x86_64-pc-windows-msvc" + } else { + "x86_64-unknown-linux-gnu" + }; + + cfg.target(target).host(target) + .opt_level(2) + .debug(false) + .out_dir(self.td.path()) + .__set_env("PATH", env::join_paths(path).unwrap()) + .__set_env("GCCTEST_OUT_DIR", self.td.path()); + if self.msvc { + cfg.compiler(self.td.path().join("cl")); + cfg.archiver(self.td.path().join("lib.exe")); + } + return cfg + } + + pub fn cmd(&self, i: u32) -> Execution { + let mut s = String::new(); + File::open(self.td.path().join(format!("out{}", i))).unwrap() + .read_to_string(&mut s).unwrap(); + Execution { + args: s.lines().map(|s| s.to_string()).collect(), + } + } +} + +impl Execution { + pub fn must_have>(&self, p: P) -> &Execution { + if !self.has(p.as_ref()) { + panic!("didn't find {:?} in {:?}", p.as_ref(), self.args); + } else { + self + } + } + + pub fn must_not_have>(&self, p: P) -> &Execution { + if self.has(p.as_ref()) { + panic!("found {:?}", p.as_ref()); + } else { + self + } + } + + pub fn has(&self, p: &OsStr) -> bool { + self.args.iter().any(|arg| { + OsStr::new(arg) == p + }) + } +} diff --git a/src/vendor/gcc/tests/test.rs b/src/vendor/gcc/tests/test.rs new file mode 100644 index 0000000000000..1b6a0bd0d10a6 --- /dev/null +++ b/src/vendor/gcc/tests/test.rs @@ -0,0 +1,207 @@ +extern crate gcc; +extern crate tempdir; + +use support::Test; + +mod support; + +#[test] +fn gnu_smoke() { + let test = Test::gnu(); + test.gcc() + .file("foo.c").compile("libfoo.a"); + + test.cmd(0).must_have("-O2") + .must_have("foo.c") + .must_not_have("-g") + .must_have("-c") + .must_have("-ffunction-sections") + .must_have("-fdata-sections"); + test.cmd(1).must_have(test.td.path().join("foo.o")); +} + +#[test] +fn gnu_opt_level_1() { + let test = Test::gnu(); + test.gcc() + .opt_level(1) + .file("foo.c").compile("libfoo.a"); + + test.cmd(0).must_have("-O1") + .must_not_have("-O2"); +} + +#[test] +fn gnu_opt_level_s() { + let test = Test::gnu(); + test.gcc() + .opt_level_str("s") + .file("foo.c").compile("libfoo.a"); + + test.cmd(0).must_have("-Os") + .must_not_have("-O1") + .must_not_have("-O2") + .must_not_have("-O3") + .must_not_have("-Oz"); +} + +#[test] +fn gnu_debug() { + let test = Test::gnu(); + test.gcc() + .debug(true) + .file("foo.c").compile("libfoo.a"); + test.cmd(0).must_have("-g"); +} + +#[test] +fn gnu_x86_64() { + for vendor in &["unknown-linux-gnu", "apple-darwin"] { + let target = format!("x86_64-{}", vendor); + let test = Test::gnu(); + test.gcc() + .target(&target) + .host(&target) + .file("foo.c").compile("libfoo.a"); + + test.cmd(0).must_have("-fPIC") + .must_have("-m64"); + } +} + +#[test] +fn gnu_x86_64_no_pic() { + for vendor in &["unknown-linux-gnu", "apple-darwin"] { + let target = format!("x86_64-{}", vendor); + let test = Test::gnu(); + test.gcc() + .pic(false) + .target(&target) + .host(&target) + .file("foo.c").compile("libfoo.a"); + + test.cmd(0).must_not_have("-fPIC"); + } +} + +#[test] +fn gnu_i686() { + for vendor in &["unknown-linux-gnu", "apple-darwin"] { + let target = format!("i686-{}", vendor); + let test = Test::gnu(); + test.gcc() + .target(&target) + .host(&target) + .file("foo.c").compile("libfoo.a"); + + test.cmd(0).must_not_have("-fPIC") + .must_have("-m32"); + } +} + +#[test] +fn gnu_i686_pic() { + for vendor in &["unknown-linux-gnu", "apple-darwin"] { + let target = format!("i686-{}", vendor); + let test = Test::gnu(); + test.gcc() + .pic(true) + .target(&target) + .host(&target) + .file("foo.c").compile("libfoo.a"); + + test.cmd(0).must_have("-fPIC"); + } +} + +#[test] +fn gnu_set_stdlib() { + let test = Test::gnu(); + test.gcc() + .cpp_set_stdlib(Some("foo")) + .file("foo.c").compile("libfoo.a"); + + test.cmd(0).must_not_have("-stdlib=foo"); +} + +#[test] +fn gnu_include() { + let test = Test::gnu(); + test.gcc() + .include("foo/bar") + .file("foo.c").compile("libfoo.a"); + + test.cmd(0).must_have("-I").must_have("foo/bar"); +} + +#[test] +fn gnu_define() { + let test = Test::gnu(); + test.gcc() + .define("FOO", Some("bar")) + .define("BAR", None) + .file("foo.c").compile("libfoo.a"); + + test.cmd(0).must_have("-DFOO=bar").must_have("-DBAR"); +} + +#[test] +fn gnu_compile_assembly() { + let test = Test::gnu(); + test.gcc() + .file("foo.S").compile("libfoo.a"); + test.cmd(0).must_have("foo.S"); +} + +#[test] +fn msvc_smoke() { + let test = Test::msvc(); + test.gcc() + .file("foo.c").compile("libfoo.a"); + + test.cmd(0).must_have("/O2") + .must_have("foo.c") + .must_not_have("/Z7") + .must_have("/c"); + test.cmd(1).must_have(test.td.path().join("foo.o")); +} + +#[test] +fn msvc_opt_level_0() { + let test = Test::msvc(); + test.gcc() + .opt_level(0) + .file("foo.c").compile("libfoo.a"); + + test.cmd(0).must_not_have("/O2"); +} + +#[test] +fn msvc_debug() { + let test = Test::msvc(); + test.gcc() + .debug(true) + .file("foo.c").compile("libfoo.a"); + test.cmd(0).must_have("/Z7"); +} + +#[test] +fn msvc_include() { + let test = Test::msvc(); + test.gcc() + .include("foo/bar") + .file("foo.c").compile("libfoo.a"); + + test.cmd(0).must_have("/I").must_have("foo/bar"); +} + +#[test] +fn msvc_define() { + let test = Test::msvc(); + test.gcc() + .define("FOO", Some("bar")) + .define("BAR", None) + .file("foo.c").compile("libfoo.a"); + + test.cmd(0).must_have("/DFOO=bar").must_have("/DBAR"); +} diff --git a/src/vendor/getopts/.cargo-checksum.json b/src/vendor/getopts/.cargo-checksum.json new file mode 100644 index 0000000000000..0c13fda1c1168 --- /dev/null +++ b/src/vendor/getopts/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"c1e953ee360e77de57f7b02f1b7880bd6a3dc22d1a69e953c2ac2c52cc52d247",".travis.yml":"f01015154ac55bebd8ff25742496135c40395959f772005bdf7c63bc9b373c12","Cargo.toml":"a027aa6d21622b42c545707ba04f78341cc28079b46da775827ab1ec37fe3ca7","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"4002d78e71c4e1fb82c77590eddb999371f40dce037d895f96e6d6df42c728d3","appveyor.yml":"da991211b72fa6f231af7adb84c9fb72f5a9131d1c0a3d47b8ceffe5a82c8542","src/lib.rs":"9512dd4ec1053c9fc61f630d869053ca50c55e0839e3ab7091246a8654423bf0","tests/smoke.rs":"26a95ac42e42b766ae752fe8531fb740fd147d5cdff352dec0763d175ce91806"},"package":"d9047cfbd08a437050b363d35ef160452c5fe8ea5187ae0a624708c91581d685"} \ No newline at end of file diff --git a/src/vendor/getopts/.cargo-ok b/src/vendor/getopts/.cargo-ok new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/src/vendor/getopts/.gitignore b/src/vendor/getopts/.gitignore new file mode 100644 index 0000000000000..4fffb2f89cbd8 --- /dev/null +++ b/src/vendor/getopts/.gitignore @@ -0,0 +1,2 @@ +/target +/Cargo.lock diff --git a/src/vendor/getopts/.travis.yml b/src/vendor/getopts/.travis.yml new file mode 100644 index 0000000000000..d7e3f4787aea5 --- /dev/null +++ b/src/vendor/getopts/.travis.yml @@ -0,0 +1,20 @@ +language: rust +rust: + - 1.0.0 + - beta + - nightly +sudo: false +before_script: + - pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH +script: + - cargo build --verbose + - cargo test --verbose + - cargo doc --no-deps +after_success: + - travis-cargo --only nightly doc-upload +env: + global: + secure: by+Jo/boBPbcF5c1N6RNCA008oJm2aRFE5T0SUc3OIfTXxY08dZc0WCBJCHrplp44VjpeKRp/89Y+k1CKncIeU8LiS6ZgsKqaQcCglE2O1KS90B6FYB7+rBqT3ib25taq1nW38clnBHYHV9nz4gOElSdKGRxCcBy+efQ5ZXr2tY= +notifications: + email: + on_success: never diff --git a/src/vendor/getopts/Cargo.toml b/src/vendor/getopts/Cargo.toml new file mode 100644 index 0000000000000..f84899fe8120e --- /dev/null +++ b/src/vendor/getopts/Cargo.toml @@ -0,0 +1,16 @@ +[package] + +name = "getopts" +version = "0.2.14" +authors = ["The Rust Project Developers"] +license = "MIT/Apache-2.0" +readme = "README.md" +repository = "https://github.com/rust-lang/getopts" +documentation = "http://doc.rust-lang.org/getopts" +homepage = "https://github.com/rust-lang/getopts" +description = """ +getopts-like option parsing. +""" + +[dev-dependencies] +log = "0.3" diff --git a/src/vendor/getopts/LICENSE-APACHE b/src/vendor/getopts/LICENSE-APACHE new file mode 100644 index 0000000000000..16fe87b06e802 --- /dev/null +++ b/src/vendor/getopts/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/src/vendor/getopts/LICENSE-MIT b/src/vendor/getopts/LICENSE-MIT new file mode 100644 index 0000000000000..39d4bdb5acd31 --- /dev/null +++ b/src/vendor/getopts/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/getopts/README.md b/src/vendor/getopts/README.md new file mode 100644 index 0000000000000..c19f48fb06b5c --- /dev/null +++ b/src/vendor/getopts/README.md @@ -0,0 +1,23 @@ +getopts +=== + +A Rust library for option parsing for CLI utilities. + +[![Build Status](https://travis-ci.org/rust-lang/getopts.svg?branch=master)](https://travis-ci.org/rust-lang/getopts) + +[Documentation](http://doc.rust-lang.org/getopts) + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +getopts = "0.2.4" +``` + +and this to your crate root: + +```rust +extern crate getopts; +``` diff --git a/src/vendor/getopts/appveyor.yml b/src/vendor/getopts/appveyor.yml new file mode 100644 index 0000000000000..6a1b8dc19c039 --- /dev/null +++ b/src/vendor/getopts/appveyor.yml @@ -0,0 +1,17 @@ +environment: + matrix: + - TARGET: x86_64-pc-windows-msvc + - TARGET: i686-pc-windows-msvc + - TARGET: i686-pc-windows-gnu +install: + - ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-${env:TARGET}.exe" + - rust-nightly-%TARGET%.exe /VERYSILENT /NORESTART /DIR="C:\Program Files (x86)\Rust" + - SET PATH=%PATH%;C:\Program Files (x86)\Rust\bin + - SET PATH=%PATH%;C:\MinGW\bin + - rustc -V + - cargo -V + +build: false + +test_script: + - cargo test --verbose diff --git a/src/vendor/getopts/src/lib.rs b/src/vendor/getopts/src/lib.rs new file mode 100644 index 0000000000000..8f0c866fae906 --- /dev/null +++ b/src/vendor/getopts/src/lib.rs @@ -0,0 +1,1831 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +// +// ignore-lexer-test FIXME #15677 + +//! Simple getopt alternative. +//! +//! Construct a vector of options, either by using `reqopt`, `optopt`, and +//! `optflag` or by building them from components yourself, and pass them to +//! `getopts`, along with a vector of actual arguments (not including +//! `argv[0]`). You'll either get a failure code back, or a match. You'll have +//! to verify whether the amount of 'free' arguments in the match is what you +//! expect. Use `opt_*` accessors to get argument values out of the matches +//! object. +//! +//! Single-character options are expected to appear on the command line with a +//! single preceding dash; multiple-character options are expected to be +//! proceeded by two dashes. Options that expect an argument accept their +//! argument following either a space or an equals sign. Single-character +//! options don't require the space. +//! +//! # Usage +//! +//! This crate is [on crates.io](https://crates.io/crates/getopts) and can be +//! used by adding `getopts` to the dependencies in your project's `Cargo.toml`. +//! +//! ```toml +//! [dependencies] +//! getopts = "0.2" +//! ``` +//! +//! and this to your crate root: +//! +//! ```rust +//! extern crate getopts; +//! ``` +//! +//! # Example +//! +//! The following example shows simple command line parsing for an application +//! that requires an input file to be specified, accepts an optional output file +//! name following `-o`, and accepts both `-h` and `--help` as optional flags. +//! +//! ```{.rust} +//! extern crate getopts; +//! use getopts::Options; +//! use std::env; +//! +//! fn do_work(inp: &str, out: Option) { +//! println!("{}", inp); +//! match out { +//! Some(x) => println!("{}", x), +//! None => println!("No Output"), +//! } +//! } +//! +//! fn print_usage(program: &str, opts: Options) { +//! let brief = format!("Usage: {} FILE [options]", program); +//! print!("{}", opts.usage(&brief)); +//! } +//! +//! fn main() { +//! let args: Vec = env::args().collect(); +//! let program = args[0].clone(); +//! +//! let mut opts = Options::new(); +//! opts.optopt("o", "", "set output file name", "NAME"); +//! opts.optflag("h", "help", "print this help menu"); +//! let matches = match opts.parse(&args[1..]) { +//! Ok(m) => { m } +//! Err(f) => { panic!(f.to_string()) } +//! }; +//! if matches.opt_present("h") { +//! print_usage(&program, opts); +//! return; +//! } +//! let output = matches.opt_str("o"); +//! let input = if !matches.free.is_empty() { +//! matches.free[0].clone() +//! } else { +//! print_usage(&program, opts); +//! return; +//! }; +//! do_work(&input, output); +//! } +//! ``` + +#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "http://www.rust-lang.org/favicon.ico", + html_root_url = "http://doc.rust-lang.org/getopts/")] +#![deny(missing_docs)] +#![cfg_attr(test, deny(warnings))] +#![cfg_attr(rust_build, feature(staged_api))] +#![cfg_attr(rust_build, staged_api)] +#![cfg_attr(rust_build, + unstable(feature = "rustc_private", + reason = "use the crates.io `getopts` library instead"))] + +#[cfg(test)] #[macro_use] extern crate log; + +use self::Name::*; +use self::HasArg::*; +use self::Occur::*; +use self::Fail::*; +use self::Optval::*; +use self::SplitWithinState::*; +use self::Whitespace::*; +use self::LengthLimit::*; + +use std::error::Error; +use std::ffi::OsStr; +use std::fmt; +use std::iter::{repeat, IntoIterator}; +use std::result; + +/// A description of the options that a program can handle. +pub struct Options { + grps: Vec, + parsing_style : ParsingStyle +} + +impl Options { + /// Create a blank set of options. + pub fn new() -> Options { + Options { + grps: Vec::new(), + parsing_style: ParsingStyle::FloatingFrees + } + } + + /// Set the parsing style. + pub fn parsing_style(&mut self, style: ParsingStyle) -> &mut Options { + self.parsing_style = style; + self + } + + /// Create a generic option group, stating all parameters explicitly. + pub fn opt(&mut self, short_name: &str, long_name: &str, desc: &str, + hint: &str, hasarg: HasArg, occur: Occur) -> &mut Options { + let len = short_name.len(); + assert!(len == 1 || len == 0); + self.grps.push(OptGroup { + short_name: short_name.to_string(), + long_name: long_name.to_string(), + hint: hint.to_string(), + desc: desc.to_string(), + hasarg: hasarg, + occur: occur + }); + self + } + + /// Create a long option that is optional and does not take an argument. + /// + /// * `short_name` - e.g. `"h"` for a `-h` option, or `""` for none + /// * `long_name` - e.g. `"help"` for a `--help` option, or `""` for none + /// * `desc` - Description for usage help + pub fn optflag(&mut self, short_name: &str, long_name: &str, desc: &str) + -> &mut Options { + let len = short_name.len(); + assert!(len == 1 || len == 0); + self.grps.push(OptGroup { + short_name: short_name.to_string(), + long_name: long_name.to_string(), + hint: "".to_string(), + desc: desc.to_string(), + hasarg: No, + occur: Optional + }); + self + } + + /// Create a long option that can occur more than once and does not + /// take an argument. + /// + /// * `short_name` - e.g. `"h"` for a `-h` option, or `""` for none + /// * `long_name` - e.g. `"help"` for a `--help` option, or `""` for none + /// * `desc` - Description for usage help + pub fn optflagmulti(&mut self, short_name: &str, long_name: &str, desc: &str) + -> &mut Options { + let len = short_name.len(); + assert!(len == 1 || len == 0); + self.grps.push(OptGroup { + short_name: short_name.to_string(), + long_name: long_name.to_string(), + hint: "".to_string(), + desc: desc.to_string(), + hasarg: No, + occur: Multi + }); + self + } + + /// Create a long option that is optional and takes an optional argument. + /// + /// * `short_name` - e.g. `"h"` for a `-h` option, or `""` for none + /// * `long_name` - e.g. `"help"` for a `--help` option, or `""` for none + /// * `desc` - Description for usage help + /// * `hint` - Hint that is used in place of the argument in the usage help, + /// e.g. `"FILE"` for a `-o FILE` option + pub fn optflagopt(&mut self, short_name: &str, long_name: &str, desc: &str, + hint: &str) -> &mut Options { + let len = short_name.len(); + assert!(len == 1 || len == 0); + self.grps.push(OptGroup { + short_name: short_name.to_string(), + long_name: long_name.to_string(), + hint: hint.to_string(), + desc: desc.to_string(), + hasarg: Maybe, + occur: Optional + }); + self + } + + /// Create a long option that is optional, takes an argument, and may occur + /// multiple times. + /// + /// * `short_name` - e.g. `"h"` for a `-h` option, or `""` for none + /// * `long_name` - e.g. `"help"` for a `--help` option, or `""` for none + /// * `desc` - Description for usage help + /// * `hint` - Hint that is used in place of the argument in the usage help, + /// e.g. `"FILE"` for a `-o FILE` option + pub fn optmulti(&mut self, short_name: &str, long_name: &str, desc: &str, hint: &str) + -> &mut Options { + let len = short_name.len(); + assert!(len == 1 || len == 0); + self.grps.push(OptGroup { + short_name: short_name.to_string(), + long_name: long_name.to_string(), + hint: hint.to_string(), + desc: desc.to_string(), + hasarg: Yes, + occur: Multi + }); + self + } + + /// Create a long option that is optional and takes an argument. + /// + /// * `short_name` - e.g. `"h"` for a `-h` option, or `""` for none + /// * `long_name` - e.g. `"help"` for a `--help` option, or `""` for none + /// * `desc` - Description for usage help + /// * `hint` - Hint that is used in place of the argument in the usage help, + /// e.g. `"FILE"` for a `-o FILE` option + pub fn optopt(&mut self, short_name: &str, long_name: &str, desc: &str, hint: &str) + -> &mut Options { + let len = short_name.len(); + assert!(len == 1 || len == 0); + self.grps.push(OptGroup { + short_name: short_name.to_string(), + long_name: long_name.to_string(), + hint: hint.to_string(), + desc: desc.to_string(), + hasarg: Yes, + occur: Optional + }); + self + } + + /// Create a long option that is required and takes an argument. + /// + /// * `short_name` - e.g. `"h"` for a `-h` option, or `""` for none + /// * `long_name` - e.g. `"help"` for a `--help` option, or `""` for none + /// * `desc` - Description for usage help + /// * `hint` - Hint that is used in place of the argument in the usage help, + /// e.g. `"FILE"` for a `-o FILE` option + pub fn reqopt(&mut self, short_name: &str, long_name: &str, desc: &str, hint: &str) + -> &mut Options { + let len = short_name.len(); + assert!(len == 1 || len == 0); + self.grps.push(OptGroup { + short_name: short_name.to_string(), + long_name: long_name.to_string(), + hint: hint.to_string(), + desc: desc.to_string(), + hasarg: Yes, + occur: Req + }); + self + } + + /// Parse command line arguments according to the provided options. + /// + /// On success returns `Ok(Matches)`. Use methods such as `opt_present` + /// `opt_str`, etc. to interrogate results. + /// # Panics + /// + /// Returns `Err(Fail)` on failure: use the `Debug` implementation of `Fail` + /// to display information about it. + pub fn parse(&self, args: C) -> Result + where C::Item: AsRef + { + let opts: Vec = self.grps.iter().map(|x| x.long_to_short()).collect(); + let n_opts = opts.len(); + + fn f(_x: usize) -> Vec { return Vec::new(); } + + let mut vals = (0 .. n_opts).map(f).collect::>(); + let mut free: Vec = Vec::new(); + let args = try!(args.into_iter().map(|i| { + i.as_ref().to_str().ok_or_else(|| { + Fail::UnrecognizedOption(format!("{:?}", i.as_ref())) + }).map(|s| s.to_owned()) + }).collect::<::std::result::Result, _>>()); + let l = args.len(); + let mut i = 0; + while i < l { + let cur = args[i].clone(); + let curlen = cur.len(); + if !is_arg(&cur) { + match self.parsing_style { + ParsingStyle::FloatingFrees => free.push(cur), + ParsingStyle::StopAtFirstFree => { + while i < l { + free.push(args[i].clone()); + i += 1; + } + break; + } + } + } else if cur == "--" { + let mut j = i + 1; + while j < l { free.push(args[j].clone()); j += 1; } + break; + } else { + let mut names; + let mut i_arg = None; + if cur.as_bytes()[1] == b'-' { + let tail = &cur[2..curlen]; + let tail_eq: Vec<&str> = tail.splitn(2, '=').collect(); + if tail_eq.len() <= 1 { + names = vec!(Long(tail.to_string())); + } else { + names = + vec!(Long(tail_eq[0].to_string())); + i_arg = Some(tail_eq[1].to_string()); + } + } else { + names = Vec::new(); + for (j, ch) in cur.char_indices().skip(1) { + let opt = Short(ch); + + /* In a series of potential options (eg. -aheJ), if we + see one which takes an argument, we assume all + subsequent characters make up the argument. This + allows options such as -L/usr/local/lib/foo to be + interpreted correctly + */ + + let opt_id = match find_opt(&opts, opt.clone()) { + Some(id) => id, + None => return Err(UnrecognizedOption(opt.to_string())) + }; + + names.push(opt); + + let arg_follows = match opts[opt_id].hasarg { + Yes | Maybe => true, + No => false + }; + + if arg_follows { + let next = j + ch.len_utf8(); + if next < curlen { + i_arg = Some(cur[next..curlen].to_string()); + break; + } + } + } + } + let mut name_pos = 0; + for nm in names.iter() { + name_pos += 1; + let optid = match find_opt(&opts, (*nm).clone()) { + Some(id) => id, + None => return Err(UnrecognizedOption(nm.to_string())) + }; + match opts[optid].hasarg { + No => { + if name_pos == names.len() && !i_arg.is_none() { + return Err(UnexpectedArgument(nm.to_string())); + } + vals[optid].push(Given); + } + Maybe => { + if !i_arg.is_none() { + vals[optid] + .push(Val((i_arg.clone()) + .unwrap())); + } else if name_pos < names.len() || i + 1 == l || + is_arg(&args[i + 1]) { + vals[optid].push(Given); + } else { + i += 1; + vals[optid].push(Val(args[i].clone())); + } + } + Yes => { + if !i_arg.is_none() { + vals[optid].push(Val(i_arg.clone().unwrap())); + } else if i + 1 == l { + return Err(ArgumentMissing(nm.to_string())); + } else { + i += 1; + vals[optid].push(Val(args[i].clone())); + } + } + } + } + } + i += 1; + } + for i in 0 .. n_opts { + let n = vals[i].len(); + let occ = opts[i].occur; + if occ == Req && n == 0 { + return Err(OptionMissing(opts[i].name.to_string())); + } + if occ != Multi && n > 1 { + return Err(OptionDuplicated(opts[i].name.to_string())); + } + } + Ok(Matches { + opts: opts, + vals: vals, + free: free + }) + } + + /// Derive a short one-line usage summary from a set of long options. + #[allow(deprecated)] // connect => join in 1.3 + pub fn short_usage(&self, program_name: &str) -> String { + let mut line = format!("Usage: {} ", program_name); + line.push_str(&self.grps.iter() + .map(format_option) + .collect::>() + .connect(" ")); + line + } + + /// Derive a usage message from a set of options. + #[allow(deprecated)] // connect => join in 1.3 + pub fn usage(&self, brief: &str) -> String { + let desc_sep = format!("\n{}", repeat(" ").take(24).collect::()); + + let any_short = self.grps.iter().any(|optref| { + optref.short_name.len() > 0 + }); + + let rows = self.grps.iter().map(|optref| { + let OptGroup{short_name, + long_name, + hint, + desc, + hasarg, + ..} = (*optref).clone(); + + let mut row = " ".to_string(); + + // short option + match short_name.len() { + 0 => { + if any_short { + row.push_str(" "); + } + } + 1 => { + row.push('-'); + row.push_str(&short_name); + if long_name.len() > 0 { + row.push_str(", "); + } else { + // Only a single space here, so that any + // argument is printed in the correct spot. + row.push(' '); + } + } + _ => panic!("the short name should only be 1 ascii char long"), + } + + // long option + match long_name.len() { + 0 => {} + _ => { + row.push_str("--"); + row.push_str(&long_name); + row.push(' '); + } + } + + // arg + match hasarg { + No => {} + Yes => row.push_str(&hint), + Maybe => { + row.push('['); + row.push_str(&hint); + row.push(']'); + } + } + + // FIXME: #5516 should be graphemes not codepoints + // here we just need to indent the start of the description + let rowlen = row.chars().count(); + if rowlen < 24 { + for _ in 0 .. 24 - rowlen { + row.push(' '); + } + } else { + row.push_str(&desc_sep) + } + + // Normalize desc to contain words separated by one space character + let mut desc_normalized_whitespace = String::new(); + for word in desc.split(|c: char| c.is_whitespace()) + .filter(|s| !s.is_empty()) { + desc_normalized_whitespace.push_str(word); + desc_normalized_whitespace.push(' '); + } + + // FIXME: #5516 should be graphemes not codepoints + let mut desc_rows = Vec::new(); + each_split_within(&desc_normalized_whitespace, + 54, + |substr| { + desc_rows.push(substr.to_string()); + true + }); + + // FIXME: #5516 should be graphemes not codepoints + // wrapped description + row.push_str(&desc_rows.connect(&desc_sep)); + + row + }); + + format!("{}\n\nOptions:\n{}\n", brief, + rows.collect::>().connect("\n")) + } +} + +/// What parsing style to use when parsing arguments. +#[derive(Clone, Copy, PartialEq, Eq)] +pub enum ParsingStyle { + /// Flags and "free" arguments can be freely inter-mixed. + FloatingFrees, + /// As soon as a "free" argument (i.e. non-flag) is encountered, stop + /// considering any remaining arguments as flags. + StopAtFirstFree +} + +/// Name of an option. Either a string or a single char. +#[derive(Clone, PartialEq, Eq)] +enum Name { + /// A string representing the long name of an option. + /// For example: "help" + Long(String), + /// A char representing the short name of an option. + /// For example: 'h' + Short(char), +} + +/// Describes whether an option has an argument. +#[derive(Clone, Copy, PartialEq, Eq)] +pub enum HasArg { + /// The option requires an argument. + Yes, + /// The option takes no argument. + No, + /// The option argument is optional. + Maybe, +} + +/// Describes how often an option may occur. +#[derive(Clone, Copy, PartialEq, Eq)] +pub enum Occur { + /// The option occurs once. + Req, + /// The option occurs at most once. + Optional, + /// The option occurs zero or more times. + Multi, +} + +/// A description of a possible option. +#[derive(Clone, PartialEq, Eq)] +struct Opt { + /// Name of the option + name: Name, + /// Whether it has an argument + hasarg: HasArg, + /// How often it can occur + occur: Occur, + /// Which options it aliases + aliases: Vec, +} + +/// One group of options, e.g., both `-h` and `--help`, along with +/// their shared description and properties. +#[derive(Clone, PartialEq, Eq)] +struct OptGroup { + /// Short name of the option, e.g. `h` for a `-h` option + short_name: String, + /// Long name of the option, e.g. `help` for a `--help` option + long_name: String, + /// Hint for argument, e.g. `FILE` for a `-o FILE` option + hint: String, + /// Description for usage help text + desc: String, + /// Whether option has an argument + hasarg: HasArg, + /// How often it can occur + occur: Occur +} + +/// Describes whether an option is given at all or has a value. +#[derive(Clone, PartialEq, Eq)] +enum Optval { + Val(String), + Given, +} + +/// The result of checking command line arguments. Contains a vector +/// of matches and a vector of free strings. +#[derive(Clone, PartialEq, Eq)] +pub struct Matches { + /// Options that matched + opts: Vec, + /// Values of the Options that matched + vals: Vec>, + /// Free string fragments + pub free: Vec, +} + +/// The type returned when the command line does not conform to the +/// expected format. Use the `Debug` implementation to output detailed +/// information. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum Fail { + /// The option requires an argument but none was passed. + ArgumentMissing(String), + /// The passed option is not declared among the possible options. + UnrecognizedOption(String), + /// A required option is not present. + OptionMissing(String), + /// A single occurrence option is being used multiple times. + OptionDuplicated(String), + /// There's an argument being passed to a non-argument option. + UnexpectedArgument(String), +} + +impl Error for Fail { + fn description(&self) -> &str { + match *self { + ArgumentMissing(_) => "missing argument", + UnrecognizedOption(_) => "unrecognized option", + OptionMissing(_) => "missing option", + OptionDuplicated(_) => "duplicated option", + UnexpectedArgument(_) => "unexpected argument", + } + } +} + +/// The type of failure that occurred. +#[derive(Clone, Copy, PartialEq, Eq)] +#[allow(missing_docs)] +pub enum FailType { + ArgumentMissing_, + UnrecognizedOption_, + OptionMissing_, + OptionDuplicated_, + UnexpectedArgument_, +} + +/// The result of parsing a command line with a set of options. +pub type Result = result::Result; + +impl Name { + fn from_str(nm: &str) -> Name { + if nm.len() == 1 { + Short(nm.as_bytes()[0] as char) + } else { + Long(nm.to_string()) + } + } + + fn to_string(&self) -> String { + match *self { + Short(ch) => ch.to_string(), + Long(ref s) => s.to_string() + } + } +} + +impl OptGroup { + /// Translate OptGroup into Opt. + /// (Both short and long names correspond to different Opts). + fn long_to_short(&self) -> Opt { + let OptGroup { + short_name, + long_name, + hasarg, + occur, + .. + } = (*self).clone(); + + match (short_name.len(), long_name.len()) { + (0,0) => panic!("this long-format option was given no name"), + (0,_) => Opt { + name: Long((long_name)), + hasarg: hasarg, + occur: occur, + aliases: Vec::new() + }, + (1,0) => Opt { + name: Short(short_name.as_bytes()[0] as char), + hasarg: hasarg, + occur: occur, + aliases: Vec::new() + }, + (1,_) => Opt { + name: Long((long_name)), + hasarg: hasarg, + occur: occur, + aliases: vec!( + Opt { + name: Short(short_name.as_bytes()[0] as char), + hasarg: hasarg, + occur: occur, + aliases: Vec::new() + } + ) + }, + (_,_) => panic!("something is wrong with the long-form opt") + } + } +} + +impl Matches { + fn opt_vals(&self, nm: &str) -> Vec { + match find_opt(&self.opts, Name::from_str(nm)) { + Some(id) => self.vals[id].clone(), + None => panic!("No option '{}' defined", nm) + } + } + + fn opt_val(&self, nm: &str) -> Option { + self.opt_vals(nm).into_iter().next() + } + + /// Returns true if an option was matched. + pub fn opt_present(&self, nm: &str) -> bool { + !self.opt_vals(nm).is_empty() + } + + /// Returns the number of times an option was matched. + pub fn opt_count(&self, nm: &str) -> usize { + self.opt_vals(nm).len() + } + + /// Returns true if any of several options were matched. + pub fn opts_present(&self, names: &[String]) -> bool { + names.iter().any(|nm| { + match find_opt(&self.opts, Name::from_str(&nm)) { + Some(id) if !self.vals[id].is_empty() => true, + _ => false, + } + }) + } + + /// Returns the string argument supplied to one of several matching options or `None`. + pub fn opts_str(&self, names: &[String]) -> Option { + names.iter().filter_map(|nm| { + match self.opt_val(&nm) { + Some(Val(s)) => Some(s), + _ => None, + } + }).next() + } + + /// Returns a vector of the arguments provided to all matches of the given + /// option. + /// + /// Used when an option accepts multiple values. + pub fn opt_strs(&self, nm: &str) -> Vec { + self.opt_vals(nm).into_iter().filter_map(|v| { + match v { + Val(s) => Some(s), + _ => None, + } + }).collect() + } + + /// Returns the string argument supplied to a matching option or `None`. + pub fn opt_str(&self, nm: &str) -> Option { + match self.opt_val(nm) { + Some(Val(s)) => Some(s), + _ => None, + } + } + + + /// Returns the matching string, a default, or `None`. + /// + /// Returns `None` if the option was not present, `def` if the option was + /// present but no argument was provided, and the argument if the option was + /// present and an argument was provided. + pub fn opt_default(&self, nm: &str, def: &str) -> Option { + match self.opt_val(nm) { + Some(Val(s)) => Some(s), + Some(_) => Some(def.to_string()), + None => None, + } + } + +} + +fn is_arg(arg: &str) -> bool { + arg.as_bytes().get(0) == Some(&b'-') && arg.len() > 1 +} + +fn find_opt(opts: &[Opt], nm: Name) -> Option { + // Search main options. + let pos = opts.iter().position(|opt| opt.name == nm); + if pos.is_some() { + return pos + } + + // Search in aliases. + for candidate in opts.iter() { + if candidate.aliases.iter().position(|opt| opt.name == nm).is_some() { + return opts.iter().position(|opt| opt.name == candidate.name); + } + } + + None +} + +impl fmt::Display for Fail { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ArgumentMissing(ref nm) => { + write!(f, "Argument to option '{}' missing.", *nm) + } + UnrecognizedOption(ref nm) => { + write!(f, "Unrecognized option: '{}'.", *nm) + } + OptionMissing(ref nm) => { + write!(f, "Required option '{}' missing.", *nm) + } + OptionDuplicated(ref nm) => { + write!(f, "Option '{}' given more than once.", *nm) + } + UnexpectedArgument(ref nm) => { + write!(f, "Option '{}' does not take an argument.", *nm) + } + } + } +} + +fn format_option(opt: &OptGroup) -> String { + let mut line = String::new(); + + if opt.occur != Req { + line.push('['); + } + + // Use short_name if possible, but fall back to long_name. + if opt.short_name.len() > 0 { + line.push('-'); + line.push_str(&opt.short_name); + } else { + line.push_str("--"); + line.push_str(&opt.long_name); + } + + if opt.hasarg != No { + line.push(' '); + if opt.hasarg == Maybe { + line.push('['); + } + line.push_str(&opt.hint); + if opt.hasarg == Maybe { + line.push(']'); + } + } + + if opt.occur != Req { + line.push(']'); + } + if opt.occur == Multi { + line.push_str(".."); + } + + line +} + +#[derive(Clone, Copy)] +enum SplitWithinState { + A, // leading whitespace, initial state + B, // words + C, // internal and trailing whitespace +} + +#[derive(Clone, Copy)] +enum Whitespace { + Ws, // current char is whitespace + Cr // current char is not whitespace +} + +#[derive(Clone, Copy)] +enum LengthLimit { + UnderLim, // current char makes current substring still fit in limit + OverLim // current char makes current substring no longer fit in limit +} + + +/// Splits a string into substrings with possibly internal whitespace, +/// each of them at most `lim` bytes long. The substrings have leading and trailing +/// whitespace removed, and are only cut at whitespace boundaries. +/// +/// Note: Function was moved here from `std::str` because this module is the only place that +/// uses it, and because it was too specific for a general string function. +/// +/// # Panics +/// +/// Panics during iteration if the string contains a non-whitespace +/// sequence longer than the limit. +fn each_split_within<'a, F>(ss: &'a str, lim: usize, mut it: F) + -> bool where F: FnMut(&'a str) -> bool { + // Just for fun, let's write this as a state machine: + + let mut slice_start = 0; + let mut last_start = 0; + let mut last_end = 0; + let mut state = A; + let mut fake_i = ss.len(); + let mut lim = lim; + + let mut cont = true; + + // if the limit is larger than the string, lower it to save cycles + if lim >= fake_i { + lim = fake_i; + } + + let mut machine = |cont: &mut bool, (i, c): (usize, char)| { + let whitespace = if c.is_whitespace() { Ws } else { Cr }; + let limit = if (i - slice_start + 1) <= lim { UnderLim } else { OverLim }; + + state = match (state, whitespace, limit) { + (A, Ws, _) => { A } + (A, Cr, _) => { slice_start = i; last_start = i; B } + + (B, Cr, UnderLim) => { B } + (B, Cr, OverLim) if (i - last_start + 1) > lim + => panic!("word starting with {} longer than limit!", + &ss[last_start..i + 1]), + (B, Cr, OverLim) => { + *cont = it(&ss[slice_start..last_end]); + slice_start = last_start; + B + } + (B, Ws, UnderLim) => { + last_end = i; + C + } + (B, Ws, OverLim) => { + last_end = i; + *cont = it(&ss[slice_start..last_end]); + A + } + + (C, Cr, UnderLim) => { + last_start = i; + B + } + (C, Cr, OverLim) => { + *cont = it(&ss[slice_start..last_end]); + slice_start = i; + last_start = i; + last_end = i; + B + } + (C, Ws, OverLim) => { + *cont = it(&ss[slice_start..last_end]); + A + } + (C, Ws, UnderLim) => { + C + } + }; + + *cont + }; + + ss.char_indices().all(|x| machine(&mut cont, x)); + + // Let the automaton 'run out' by supplying trailing whitespace + while cont && match state { B | C => true, A => false } { + machine(&mut cont, (fake_i, ' ')); + fake_i += 1; + } + return cont; +} + +#[test] +fn test_split_within() { + fn t(s: &str, i: usize, u: &[String]) { + let mut v = Vec::new(); + each_split_within(s, i, |s| { v.push(s.to_string()); true }); + assert!(v.iter().zip(u.iter()).all(|(a,b)| a == b)); + } + t("", 0, &[]); + t("", 15, &[]); + t("hello", 15, &["hello".to_string()]); + t("\nMary had a little lamb\nLittle lamb\n", 15, &[ + "Mary had a".to_string(), + "little lamb".to_string(), + "Little lamb".to_string() + ]); + t("\nMary had a little lamb\nLittle lamb\n", ::std::usize::MAX, + &["Mary had a little lamb\nLittle lamb".to_string()]); +} + +#[cfg(test)] +mod tests { + use super::{HasArg, Name, Occur, Opt, Options, ParsingStyle}; + use super::Fail::*; + + // Tests for reqopt + #[test] + fn test_reqopt() { + let long_args = vec!("--test=20".to_string()); + let mut opts = Options::new(); + opts.reqopt("t", "test", "testing", "TEST"); + match opts.parse(&long_args) { + Ok(ref m) => { + assert!(m.opt_present("test")); + assert_eq!(m.opt_str("test").unwrap(), "20"); + assert!(m.opt_present("t")); + assert_eq!(m.opt_str("t").unwrap(), "20"); + } + _ => { panic!("test_reqopt failed (long arg)"); } + } + let short_args = vec!("-t".to_string(), "20".to_string()); + match opts.parse(&short_args) { + Ok(ref m) => { + assert!((m.opt_present("test"))); + assert_eq!(m.opt_str("test").unwrap(), "20"); + assert!((m.opt_present("t"))); + assert_eq!(m.opt_str("t").unwrap(), "20"); + } + _ => { panic!("test_reqopt failed (short arg)"); } + } + } + + #[test] + fn test_reqopt_missing() { + let args = vec!("blah".to_string()); + match Options::new() + .reqopt("t", "test", "testing", "TEST") + .parse(&args) { + Err(OptionMissing(_)) => {}, + _ => panic!() + } + } + + #[test] + fn test_reqopt_no_arg() { + let long_args = vec!("--test".to_string()); + let mut opts = Options::new(); + opts.reqopt("t", "test", "testing", "TEST"); + match opts.parse(&long_args) { + Err(ArgumentMissing(_)) => {}, + _ => panic!() + } + let short_args = vec!("-t".to_string()); + match opts.parse(&short_args) { + Err(ArgumentMissing(_)) => {}, + _ => panic!() + } + } + + #[test] + fn test_reqopt_multi() { + let args = vec!("--test=20".to_string(), "-t".to_string(), "30".to_string()); + match Options::new() + .reqopt("t", "test", "testing", "TEST") + .parse(&args) { + Err(OptionDuplicated(_)) => {}, + _ => panic!() + } + } + + // Tests for optopt + #[test] + fn test_optopt() { + let long_args = vec!("--test=20".to_string()); + let mut opts = Options::new(); + opts.optopt("t", "test", "testing", "TEST"); + match opts.parse(&long_args) { + Ok(ref m) => { + assert!(m.opt_present("test")); + assert_eq!(m.opt_str("test").unwrap(), "20"); + assert!((m.opt_present("t"))); + assert_eq!(m.opt_str("t").unwrap(), "20"); + } + _ => panic!() + } + let short_args = vec!("-t".to_string(), "20".to_string()); + match opts.parse(&short_args) { + Ok(ref m) => { + assert!((m.opt_present("test"))); + assert_eq!(m.opt_str("test").unwrap(), "20"); + assert!((m.opt_present("t"))); + assert_eq!(m.opt_str("t").unwrap(), "20"); + } + _ => panic!() + } + } + + #[test] + fn test_optopt_missing() { + let args = vec!("blah".to_string()); + match Options::new() + .optopt("t", "test", "testing", "TEST") + .parse(&args) { + Ok(ref m) => { + assert!(!m.opt_present("test")); + assert!(!m.opt_present("t")); + } + _ => panic!() + } + } + + #[test] + fn test_optopt_no_arg() { + let long_args = vec!("--test".to_string()); + let mut opts = Options::new(); + opts.optopt("t", "test", "testing", "TEST"); + match opts.parse(&long_args) { + Err(ArgumentMissing(_)) => {}, + _ => panic!() + } + let short_args = vec!("-t".to_string()); + match opts.parse(&short_args) { + Err(ArgumentMissing(_)) => {}, + _ => panic!() + } + } + + #[test] + fn test_optopt_multi() { + let args = vec!("--test=20".to_string(), "-t".to_string(), "30".to_string()); + match Options::new() + .optopt("t", "test", "testing", "TEST") + .parse(&args) { + Err(OptionDuplicated(_)) => {}, + _ => panic!() + } + } + + // Tests for optflag + #[test] + fn test_optflag() { + let long_args = vec!("--test".to_string()); + let mut opts = Options::new(); + opts.optflag("t", "test", "testing"); + match opts.parse(&long_args) { + Ok(ref m) => { + assert!(m.opt_present("test")); + assert!(m.opt_present("t")); + } + _ => panic!() + } + let short_args = vec!("-t".to_string()); + match opts.parse(&short_args) { + Ok(ref m) => { + assert!(m.opt_present("test")); + assert!(m.opt_present("t")); + } + _ => panic!() + } + } + + #[test] + fn test_optflag_missing() { + let args = vec!("blah".to_string()); + match Options::new() + .optflag("t", "test", "testing") + .parse(&args) { + Ok(ref m) => { + assert!(!m.opt_present("test")); + assert!(!m.opt_present("t")); + } + _ => panic!() + } + } + + #[test] + fn test_optflag_long_arg() { + let args = vec!("--test=20".to_string()); + match Options::new() + .optflag("t", "test", "testing") + .parse(&args) { + Err(UnexpectedArgument(_)) => {}, + _ => panic!() + } + } + + #[test] + fn test_optflag_multi() { + let args = vec!("--test".to_string(), "-t".to_string()); + match Options::new() + .optflag("t", "test", "testing") + .parse(&args) { + Err(OptionDuplicated(_)) => {}, + _ => panic!() + } + } + + #[test] + fn test_optflag_short_arg() { + let args = vec!("-t".to_string(), "20".to_string()); + match Options::new() + .optflag("t", "test", "testing") + .parse(&args) { + Ok(ref m) => { + // The next variable after the flag is just a free argument + + assert!(m.free[0] == "20"); + } + _ => panic!() + } + } + + // Tests for optflagmulti + #[test] + fn test_optflagmulti_short1() { + let args = vec!("-v".to_string()); + match Options::new() + .optflagmulti("v", "verbose", "verbosity") + .parse(&args) { + Ok(ref m) => { + assert_eq!(m.opt_count("v"), 1); + } + _ => panic!() + } + } + + #[test] + fn test_optflagmulti_short2a() { + let args = vec!("-v".to_string(), "-v".to_string()); + match Options::new() + .optflagmulti("v", "verbose", "verbosity") + .parse(&args) { + Ok(ref m) => { + assert_eq!(m.opt_count("v"), 2); + } + _ => panic!() + } + } + + #[test] + fn test_optflagmulti_short2b() { + let args = vec!("-vv".to_string()); + match Options::new() + .optflagmulti("v", "verbose", "verbosity") + .parse(&args) { + Ok(ref m) => { + assert_eq!(m.opt_count("v"), 2); + } + _ => panic!() + } + } + + #[test] + fn test_optflagmulti_long1() { + let args = vec!("--verbose".to_string()); + match Options::new() + .optflagmulti("v", "verbose", "verbosity") + .parse(&args) { + Ok(ref m) => { + assert_eq!(m.opt_count("verbose"), 1); + } + _ => panic!() + } + } + + #[test] + fn test_optflagmulti_long2() { + let args = vec!("--verbose".to_string(), "--verbose".to_string()); + match Options::new() + .optflagmulti("v", "verbose", "verbosity") + .parse(&args) { + Ok(ref m) => { + assert_eq!(m.opt_count("verbose"), 2); + } + _ => panic!() + } + } + + #[test] + fn test_optflagmulti_mix() { + let args = vec!("--verbose".to_string(), "-v".to_string(), + "-vv".to_string(), "verbose".to_string()); + match Options::new() + .optflagmulti("v", "verbose", "verbosity") + .parse(&args) { + Ok(ref m) => { + assert_eq!(m.opt_count("verbose"), 4); + assert_eq!(m.opt_count("v"), 4); + } + _ => panic!() + } + } + + // Tests for optflagopt + #[test] + fn test_optflagopt() { + let long_args = vec!("--test".to_string()); + let mut opts = Options::new(); + opts.optflag("t", "test", "testing"); + match opts.parse(&long_args) { + Ok(ref m) => { + assert!(m.opt_present("test")); + assert!(m.opt_present("t")); + } + _ => panic!() + } + let short_args = vec!("-t".to_string()); + match opts.parse(&short_args) { + Ok(ref m) => { + assert!(m.opt_present("test")); + assert!(m.opt_present("t")); + } + _ => panic!() + } + let no_args: Vec = vec!(); + match opts.parse(&no_args) { + Ok(ref m) => { + assert!(!m.opt_present("test")); + assert!(!m.opt_present("t")); + } + _ => panic!() + } + } + + // Tests for optmulti + #[test] + fn test_optmulti() { + let long_args = vec!("--test=20".to_string()); + let mut opts = Options::new(); + opts.optmulti("t", "test", "testing", "TEST"); + match opts.parse(&long_args) { + Ok(ref m) => { + assert!((m.opt_present("test"))); + assert_eq!(m.opt_str("test").unwrap(), "20"); + assert!((m.opt_present("t"))); + assert_eq!(m.opt_str("t").unwrap(), "20"); + } + _ => panic!() + } + let short_args = vec!("-t".to_string(), "20".to_string()); + match opts.parse(&short_args) { + Ok(ref m) => { + assert!((m.opt_present("test"))); + assert_eq!(m.opt_str("test").unwrap(), "20"); + assert!((m.opt_present("t"))); + assert_eq!(m.opt_str("t").unwrap(), "20"); + } + _ => panic!() + } + } + + #[test] + fn test_optmulti_missing() { + let args = vec!("blah".to_string()); + match Options::new() + .optmulti("t", "test", "testing", "TEST") + .parse(&args) { + Ok(ref m) => { + assert!(!m.opt_present("test")); + assert!(!m.opt_present("t")); + } + _ => panic!() + } + } + + #[test] + fn test_optmulti_no_arg() { + let long_args = vec!("--test".to_string()); + let mut opts = Options::new(); + opts.optmulti("t", "test", "testing", "TEST"); + match opts.parse(&long_args) { + Err(ArgumentMissing(_)) => {}, + _ => panic!() + } + let short_args = vec!("-t".to_string()); + match opts.parse(&short_args) { + Err(ArgumentMissing(_)) => {}, + _ => panic!() + } + } + + #[test] + fn test_optmulti_multi() { + let args = vec!("--test=20".to_string(), "-t".to_string(), "30".to_string()); + match Options::new() + .optmulti("t", "test", "testing", "TEST") + .parse(&args) { + Ok(ref m) => { + assert!(m.opt_present("test")); + assert_eq!(m.opt_str("test").unwrap(), "20"); + assert!(m.opt_present("t")); + assert_eq!(m.opt_str("t").unwrap(), "20"); + let pair = m.opt_strs("test"); + assert!(pair[0] == "20"); + assert!(pair[1] == "30"); + } + _ => panic!() + } + } + + #[test] + fn test_free_argument_is_hyphen() { + let args = vec!("-".to_string()); + match Options::new().parse(&args) { + Ok(ref m) => { + assert_eq!(m.free.len(), 1); + assert_eq!(m.free[0], "-"); + } + _ => panic!() + } + } + + #[test] + fn test_unrecognized_option() { + let long_args = vec!("--untest".to_string()); + let mut opts = Options::new(); + opts.optmulti("t", "test", "testing", "TEST"); + match opts.parse(&long_args) { + Err(UnrecognizedOption(_)) => {}, + _ => panic!() + } + let short_args = vec!("-u".to_string()); + match opts.parse(&short_args) { + Err(UnrecognizedOption(_)) => {}, + _ => panic!() + } + } + + #[test] + fn test_combined() { + let args = + vec!("prog".to_string(), + "free1".to_string(), + "-s".to_string(), + "20".to_string(), + "free2".to_string(), + "--flag".to_string(), + "--long=30".to_string(), + "-f".to_string(), + "-m".to_string(), + "40".to_string(), + "-m".to_string(), + "50".to_string(), + "-n".to_string(), + "-A B".to_string(), + "-n".to_string(), + "-60 70".to_string()); + match Options::new() + .optopt("s", "something", "something", "SOMETHING") + .optflag("", "flag", "a flag") + .reqopt("", "long", "hi", "LONG") + .optflag("f", "", "another flag") + .optmulti("m", "", "mmmmmm", "YUM") + .optmulti("n", "", "nothing", "NOTHING") + .optopt("", "notpresent", "nothing to see here", "NOPE") + .parse(&args) { + Ok(ref m) => { + assert!(m.free[0] == "prog"); + assert!(m.free[1] == "free1"); + assert_eq!(m.opt_str("s").unwrap(), "20"); + assert!(m.free[2] == "free2"); + assert!((m.opt_present("flag"))); + assert_eq!(m.opt_str("long").unwrap(), "30"); + assert!((m.opt_present("f"))); + let pair = m.opt_strs("m"); + assert!(pair[0] == "40"); + assert!(pair[1] == "50"); + let pair = m.opt_strs("n"); + assert!(pair[0] == "-A B"); + assert!(pair[1] == "-60 70"); + assert!((!m.opt_present("notpresent"))); + } + _ => panic!() + } + } + + #[test] + fn test_mixed_stop() { + let args = + vec!("-a".to_string(), + "b".to_string(), + "-c".to_string(), + "d".to_string()); + match Options::new() + .parsing_style(ParsingStyle::StopAtFirstFree) + .optflag("a", "", "") + .optopt("c", "", "", "") + .parse(&args) { + Ok(ref m) => { + println!("{}", m.opt_present("c")); + assert!(m.opt_present("a")); + assert!(!m.opt_present("c")); + assert_eq!(m.free.len(), 3); + assert_eq!(m.free[0], "b"); + assert_eq!(m.free[1], "-c"); + assert_eq!(m.free[2], "d"); + } + _ => panic!() + } + } + + #[test] + fn test_mixed_stop_hyphen() { + let args = + vec!("-a".to_string(), + "-".to_string(), + "-c".to_string(), + "d".to_string()); + match Options::new() + .parsing_style(ParsingStyle::StopAtFirstFree) + .optflag("a", "", "") + .optopt("c", "", "", "") + .parse(&args) { + Ok(ref m) => { + println!("{}", m.opt_present("c")); + assert!(m.opt_present("a")); + assert!(!m.opt_present("c")); + assert_eq!(m.free.len(), 3); + assert_eq!(m.free[0], "-"); + assert_eq!(m.free[1], "-c"); + assert_eq!(m.free[2], "d"); + } + _ => panic!() + } + } + + #[test] + fn test_multi() { + let mut opts = Options::new(); + opts.optopt("e", "", "encrypt", "ENCRYPT"); + opts.optopt("", "encrypt", "encrypt", "ENCRYPT"); + opts.optopt("f", "", "flag", "FLAG"); + + let args_single = vec!("-e".to_string(), "foo".to_string()); + let matches_single = &match opts.parse(&args_single) { + Ok(m) => m, + Err(_) => panic!() + }; + assert!(matches_single.opts_present(&["e".to_string()])); + assert!(matches_single.opts_present(&["encrypt".to_string(), "e".to_string()])); + assert!(matches_single.opts_present(&["e".to_string(), "encrypt".to_string()])); + assert!(!matches_single.opts_present(&["encrypt".to_string()])); + assert!(!matches_single.opts_present(&["thing".to_string()])); + assert!(!matches_single.opts_present(&[])); + + assert_eq!(matches_single.opts_str(&["e".to_string()]).unwrap(), "foo"); + assert_eq!(matches_single.opts_str(&["e".to_string(), "encrypt".to_string()]).unwrap(), + "foo"); + assert_eq!(matches_single.opts_str(&["encrypt".to_string(), "e".to_string()]).unwrap(), + "foo"); + + let args_both = vec!("-e".to_string(), "foo".to_string(), "--encrypt".to_string(), + "foo".to_string()); + let matches_both = &match opts.parse(&args_both) { + Ok(m) => m, + Err(_) => panic!() + }; + assert!(matches_both.opts_present(&["e".to_string()])); + assert!(matches_both.opts_present(&["encrypt".to_string()])); + assert!(matches_both.opts_present(&["encrypt".to_string(), "e".to_string()])); + assert!(matches_both.opts_present(&["e".to_string(), "encrypt".to_string()])); + assert!(!matches_both.opts_present(&["f".to_string()])); + assert!(!matches_both.opts_present(&["thing".to_string()])); + assert!(!matches_both.opts_present(&[])); + + assert_eq!(matches_both.opts_str(&["e".to_string()]).unwrap(), "foo"); + assert_eq!(matches_both.opts_str(&["encrypt".to_string()]).unwrap(), "foo"); + assert_eq!(matches_both.opts_str(&["e".to_string(), "encrypt".to_string()]).unwrap(), + "foo"); + assert_eq!(matches_both.opts_str(&["encrypt".to_string(), "e".to_string()]).unwrap(), + "foo"); + } + + #[test] + fn test_nospace() { + let args = vec!("-Lfoo".to_string(), "-M.".to_string()); + let matches = &match Options::new() + .optmulti("L", "", "library directory", "LIB") + .optmulti("M", "", "something", "MMMM") + .parse(&args) { + Ok(m) => m, + Err(_) => panic!() + }; + assert!(matches.opts_present(&["L".to_string()])); + assert_eq!(matches.opts_str(&["L".to_string()]).unwrap(), "foo"); + assert!(matches.opts_present(&["M".to_string()])); + assert_eq!(matches.opts_str(&["M".to_string()]).unwrap(), "."); + + } + + #[test] + fn test_nospace_conflict() { + let args = vec!("-vvLverbose".to_string(), "-v".to_string() ); + let matches = &match Options::new() + .optmulti("L", "", "library directory", "LIB") + .optflagmulti("v", "verbose", "Verbose") + .parse(&args) { + Ok(m) => m, + Err(e) => panic!( "{}", e ) + }; + assert!(matches.opts_present(&["L".to_string()])); + assert_eq!(matches.opts_str(&["L".to_string()]).unwrap(), "verbose"); + assert!(matches.opts_present(&["v".to_string()])); + assert_eq!(3, matches.opt_count("v")); + } + + #[test] + fn test_long_to_short() { + let mut short = Opt { + name: Name::Long("banana".to_string()), + hasarg: HasArg::Yes, + occur: Occur::Req, + aliases: Vec::new(), + }; + short.aliases = vec!(Opt { name: Name::Short('b'), + hasarg: HasArg::Yes, + occur: Occur::Req, + aliases: Vec::new() }); + let mut opts = Options::new(); + opts.reqopt("b", "banana", "some bananas", "VAL"); + let ref verbose = opts.grps[0]; + assert!(verbose.long_to_short() == short); + } + + #[test] + fn test_aliases_long_and_short() { + let args = vec!("-a".to_string(), "--apple".to_string(), "-a".to_string()); + + let matches = Options::new() + .optflagmulti("a", "apple", "Desc") + .parse(&args) + .unwrap(); + assert_eq!(3, matches.opt_count("a")); + assert_eq!(3, matches.opt_count("apple")); + } + + #[test] + fn test_usage() { + let mut opts = Options::new(); + opts.reqopt("b", "banana", "Desc", "VAL"); + opts.optopt("a", "012345678901234567890123456789", + "Desc", "VAL"); + opts.optflag("k", "kiwi", "Desc"); + opts.optflagopt("p", "", "Desc", "VAL"); + opts.optmulti("l", "", "Desc", "VAL"); + opts.optflag("", "starfruit", "Starfruit"); + + let expected = +"Usage: fruits + +Options: + -b, --banana VAL Desc + -a, --012345678901234567890123456789 VAL + Desc + -k, --kiwi Desc + -p [VAL] Desc + -l VAL Desc + --starfruit Starfruit +"; + + let generated_usage = opts.usage("Usage: fruits"); + + debug!("expected: <<{}>>", expected); + debug!("generated: <<{}>>", generated_usage); + assert_eq!(generated_usage, expected); + } + + #[test] + fn test_usage_description_wrapping() { + // indentation should be 24 spaces + // lines wrap after 78: or rather descriptions wrap after 54 + + let mut opts = Options::new(); + opts.optflag("k", "kiwi", + "This is a long description which won't be wrapped..+.."); // 54 + opts.optflag("a", "apple", + "This is a long description which _will_ be wrapped..+.."); + + let expected = +"Usage: fruits + +Options: + -k, --kiwi This is a long description which won't be wrapped..+.. + -a, --apple This is a long description which _will_ be + wrapped..+.. +"; + + let usage = opts.usage("Usage: fruits"); + + debug!("expected: <<{}>>", expected); + debug!("generated: <<{}>>", usage); + assert!(usage == expected) + } + + #[test] + fn test_usage_description_multibyte_handling() { + let mut opts = Options::new(); + opts.optflag("k", "k\u{2013}w\u{2013}", + "The word kiwi is normally spelled with two i's"); + opts.optflag("a", "apple", + "This \u{201C}description\u{201D} has some characters that could \ +confuse the line wrapping; an apple costs 0.51€ in some parts of Europe."); + + let expected = +"Usage: fruits + +Options: + -k, --k–w– The word kiwi is normally spelled with two i's + -a, --apple This “description” has some characters that could + confuse the line wrapping; an apple costs 0.51€ in + some parts of Europe. +"; + + let usage = opts.usage("Usage: fruits"); + + debug!("expected: <<{}>>", expected); + debug!("generated: <<{}>>", usage); + assert!(usage == expected) + } + + #[test] + fn test_usage_short_only() { + let mut opts = Options::new(); + opts.optopt("k", "", "Kiwi", "VAL"); + opts.optflag("s", "", "Starfruit"); + opts.optflagopt("a", "", "Apple", "TYPE"); + + let expected = +"Usage: fruits + +Options: + -k VAL Kiwi + -s Starfruit + -a [TYPE] Apple +"; + + let usage = opts.usage("Usage: fruits"); + debug!("expected: <<{}>>", expected); + debug!("generated: <<{}>>", usage); + assert!(usage == expected) + } + + #[test] + fn test_usage_long_only() { + let mut opts = Options::new(); + opts.optopt("", "kiwi", "Kiwi", "VAL"); + opts.optflag("", "starfruit", "Starfruit"); + opts.optflagopt("", "apple", "Apple", "TYPE"); + + let expected = +"Usage: fruits + +Options: + --kiwi VAL Kiwi + --starfruit Starfruit + --apple [TYPE] Apple +"; + + let usage = opts.usage("Usage: fruits"); + debug!("expected: <<{}>>", expected); + debug!("generated: <<{}>>", usage); + assert!(usage == expected) + } + + #[test] + fn test_short_usage() { + let mut opts = Options::new(); + opts.reqopt("b", "banana", "Desc", "VAL"); + opts.optopt("a", "012345678901234567890123456789", + "Desc", "VAL"); + opts.optflag("k", "kiwi", "Desc"); + opts.optflagopt("p", "", "Desc", "VAL"); + opts.optmulti("l", "", "Desc", "VAL"); + + let expected = "Usage: fruits -b VAL [-a VAL] [-k] [-p [VAL]] [-l VAL]..".to_string(); + let generated_usage = opts.short_usage("fruits"); + + debug!("expected: <<{}>>", expected); + debug!("generated: <<{}>>", generated_usage); + assert_eq!(generated_usage, expected); + } + + #[test] + fn test_args_with_equals() { + let mut opts = Options::new(); + opts.optopt("o", "one", "One", "INFO"); + opts.optopt("t", "two", "Two", "INFO"); + + let args = vec!("--one".to_string(), "A=B".to_string(), + "--two=C=D".to_string()); + let matches = &match opts.parse(&args) { + Ok(m) => m, + Err(e) => panic!("{}", e) + }; + assert_eq!(matches.opts_str(&["o".to_string()]).unwrap(), "A=B"); + assert_eq!(matches.opts_str(&["t".to_string()]).unwrap(), "C=D"); + } +} diff --git a/src/vendor/getopts/tests/smoke.rs b/src/vendor/getopts/tests/smoke.rs new file mode 100644 index 0000000000000..a46f9c0167ab3 --- /dev/null +++ b/src/vendor/getopts/tests/smoke.rs @@ -0,0 +1,8 @@ +extern crate getopts; + +use std::env; + +#[test] +fn main() { + getopts::Options::new().parse(env::args()).unwrap(); +} diff --git a/src/vendor/libc/.cargo-checksum.json b/src/vendor/libc/.cargo-checksum.json new file mode 100644 index 0000000000000..56c0bb8d2559c --- /dev/null +++ b/src/vendor/libc/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"7150ee9391a955b2ef7e0762fc61c0c1aab167620ca36d88d78062d93b8334ba",".travis.yml":"ca5e05b688a8c9a3215de3b38f22f4b468f73d26738a80bd939af503ddb222e1","Cargo.toml":"4b1f0d59b5fb939877a639d1d4cac5a12440c6e2d366edf2abcb45c46e3dcd3e","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"c1f46480074340f17f1c3ea989b28e6b632b9d324e57792293a60399b90bfda0","appveyor.yml":"c0d70c650b6231e6ff78a352224f1a522a9be69d9da4251adbaddb3f0393294d","ci/README.md":"be804f15e2128e5fd4b160cb0b13cff5f19e7d77b55ec5254aa6fd8731c84f0d","ci/docker/aarch64-unknown-linux-gnu/Dockerfile":"62ca7317439f9c303990e897450a91cd467be05eb75dfc01456d417932ac8672","ci/docker/arm-linux-androideabi/Dockerfile":"c3d60f2ba389e60e59cb6973542751c66a0e7bd484e11589c8ee7346e9ff2bab","ci/docker/arm-unknown-linux-gnueabihf/Dockerfile":"e349f7caa463adbde8d6ec4d2b9f7720ed81c77f48d75bbfb78c89751f55c2dc","ci/docker/i686-unknown-linux-gnu/Dockerfile":"07e9df6ba91025cbec7ae81ade63f8cfb8a54c5e1e5a8f8def0617e17bd59db0","ci/docker/i686-unknown-linux-musl/Dockerfile":"1a4d064adff4a8f58773305567cfe5d915bcd0762bcb0e101cf6f4ca628a96da","ci/docker/mips-unknown-linux-gnu/Dockerfile":"860299d96ee50ebdbd788e65eb6ba1f561ef66107647bddffcb2567ac350896b","ci/docker/mips-unknown-linux-musl/Dockerfile":"b5917a15c0998adb79ebfdb8aff9ab0e5c4098c4bd5ca78e90ee05859dcfbda3","ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile":"163776e0fd38f66df7415421202ac29efc7d345a628947434e573c3885594ab5","ci/docker/mipsel-unknown-linux-musl/Dockerfile":"b2dd4c26890c1070228df9694adde8fdb1fe78d7d5a71a8cb5c1b54835f93c46","ci/docker/powerpc-unknown-linux-gnu/Dockerfile":"08b846a338c2ee70100f4e80db812668dc58bfb536c44a95cd1cf004d965186b","ci/docker/powerpc64-unknown-linux-gnu/Dockerfile":"4da285ffd035d16f5da9e3701841eb86049c8cfa417fa81e53da4ef74152eac0","ci/docker/x86_64-rumprun-netbsd/Dockerfile":"44c3107fb30380785aaed6ff73fa334017a5bb4e3b5c7d4876154f09023a2b99","ci/docker/x86_64-unknown-freebsd/Dockerfile":"56fce89ceb70792be9005425f3e896361f5ba8a0553db659da87daced93f9785","ci/docker/x86_64-unknown-linux-gnu/Dockerfile":"67fabbc8c6ac02376cf9344251ad49ecdac396b71accb572fd1ae65225325bc0","ci/docker/x86_64-unknown-linux-musl/Dockerfile":"f71019fed5204b950843ef5e56144161fda7e27fad68ed0e8bc4353c388c7bcf","ci/docker/x86_64-unknown-openbsd/Dockerfile":"4a5583797a613056d87f6ae0b1d7a3d3a55552efa7c30e1e0aa67e34d69b4d9c","ci/dox.sh":"2161cb17ee0d6a2279a64149c6b7c73a5b2eab344f248ea1fa0e6c8f6335ec5f","ci/landing-page-footer.html":"b70b3112c2147f5c967e7481061ef38bc2d79a28dd55a16fb916d9c9426da2c4","ci/landing-page-head.html":"ad69663fac7924f27d0209bc519d55838e86edfc4133713a6fd08caadac1b142","ci/run-docker.sh":"325648a92ff4d74f18fdf3d190a5cd483306ed2a98479c0742ca7284acd6b948","ci/run-qemu.sh":"bb859421170871ef23a8940c5e150efec0c01b95e32d2ce2d37b79a45d9d346c","ci/run.sh":"3bb839c2d28986c6915b8f11ed820ff6c62e755fb96bd921a18899ee5f7efd32","ci/style.rs":"60564abc1d5197ed1598426dd0d6ee9939a16d2875b03373538f58843bb616c4","src/dox.rs":"eb6fbcc0b8b59430271bb71ee023961fd165337fc5fd6ca433882457a3c735bd","src/lib.rs":"4cece0e880ec8731913e5110b58d1b134148b0a43e72d6b990c1d999916fc706","src/macros.rs":"bd9802772b0e5c8b3c550d1c24307f06c0d1e4ce656b4ae1cf092142bbe5412c","src/unix/bsd/apple/b32.rs":"110ecff78da0e8d405d861447904da403d8b3f6da1f0f9dc9987633f3f04fe46","src/unix/bsd/apple/b64.rs":"e6808081c0b276cca3189628716f507c7c0d00b62417cd44addbdaefe848cec7","src/unix/bsd/apple/mod.rs":"6691f81221d455b882d68d1102de049d5b9729bb4b59050c1d62c835dcaddafb","src/unix/bsd/freebsdlike/dragonfly/mod.rs":"d87f02c64649ce63367d9f0e39de7213bd30366bbd5e497f7d88f0dc3c319294","src/unix/bsd/freebsdlike/freebsd/mod.rs":"0a675c4b7f54b410547e10e433503487eb1e738394ab81cac82112a96d275bdc","src/unix/bsd/freebsdlike/freebsd/x86.rs":"54311d3ebf2bb091ab22361e377e6ef9224aec2ecfe459fbfcedde4932db9c58","src/unix/bsd/freebsdlike/freebsd/x86_64.rs":"c7f46b9ae23fde5a9e245a28ed1380066e67f081323b4d253a18e9da3b97b860","src/unix/bsd/freebsdlike/mod.rs":"574f7a1368058fad551cdebea4f576fe672f9bbe95a85468c91f9ff5661908c3","src/unix/bsd/mod.rs":"bd422d4bca87a3e8ea4bd78b9ae019643399807d036913f42fdd7476f260297d","src/unix/bsd/netbsdlike/mod.rs":"7b62b89c6ba0d5a8e0cf0937587a81e0314f9c5dabb0c9a9164106b677cf4dd8","src/unix/bsd/netbsdlike/netbsd/mod.rs":"d62a02a78275ed705b2080cae452eb8954ef0f66ac9acb0f44c819d453904c5c","src/unix/bsd/netbsdlike/netbsd/other/b32/mod.rs":"bd251a102bed65d5cb3459275f6ec3310fe5803ff4c9651212115548f86256d0","src/unix/bsd/netbsdlike/netbsd/other/b64/mod.rs":"927eeccaf3269d299db4c2a55f8010807bf43dfa894aea6a783215f5d3560baa","src/unix/bsd/netbsdlike/netbsd/other/mod.rs":"8ce39030f3e4fb45a3d676ade97da8f6d1b3d5f6d8d141224d341c993c57e090","src/unix/bsd/netbsdlike/openbsdlike/bitrig.rs":"f8cd05dacd3a3136c58da5a2fbe26f703767823b28e74fe8a2b57a7bd98d6d5c","src/unix/bsd/netbsdlike/openbsdlike/mod.rs":"769647209be7b8fc5b7e5c1970f16d5cf9cc3fba04bb456c9584f19a5c406e08","src/unix/bsd/netbsdlike/openbsdlike/openbsd.rs":"b1b9cf7be9f0e4d294a57092594074ad03a65fe0eeac9d1104fa874c313e7900","src/unix/haiku/b32.rs":"bd251a102bed65d5cb3459275f6ec3310fe5803ff4c9651212115548f86256d0","src/unix/haiku/b64.rs":"b422430c550c0ba833c9206d1350861e344e3a2eb33d7d58693efb35044be1cc","src/unix/haiku/mod.rs":"d14c45d536f24cd9cd8d5170b9829026da4c782ff2d5855644cc217553e309cf","src/unix/mod.rs":"82952d405742b8b21bfbc29648115b3909d9c64422ad04fb6aca443c16ddaa99","src/unix/notbsd/android/b32.rs":"148e1b4ed8b4f700d5aa24178af925164176e1c18b54db877ced4b55ba9f03d4","src/unix/notbsd/android/b64.rs":"302caf0aa95fa022030717c58de17d85d814b04350eca081a722ec435bc4f217","src/unix/notbsd/android/mod.rs":"f7c0145110a406c5cb14243dc71b98af8971674aa7620e5f55dabfa5c8b344c8","src/unix/notbsd/linux/mips.rs":"7736e565499b04560bc7e6f8636fd39c74f4a588c671ece931d27de8ca263963","src/unix/notbsd/linux/mips64.rs":"f269d516e0f5203fbfd18ff6b22ff33f206be1584d9df03c35743f5e80127d8b","src/unix/notbsd/linux/mod.rs":"81dbebd7dd798dc57e5b5b84cec69af2b6027a415262f4ad07b8c609ad2c95ee","src/unix/notbsd/linux/musl/b32/arm.rs":"a8416bc6e36460f3c60e2f7730dad7c43466790d11214441ef227ffb05ea450f","src/unix/notbsd/linux/musl/b32/asmjs.rs":"c660c5eef21a5f7580e9258eb44881014d2aeba5928af431dfc782b6c4393f33","src/unix/notbsd/linux/musl/b32/mips.rs":"76d835acd06c7bcd07a293a6f141b715ac88b959b633df9af3610e8d6eeb1ab4","src/unix/notbsd/linux/musl/b32/mod.rs":"bd29a02c67b69791e7cabd7666503c35ed5322d244a005b9cc7fd0cb28b552a8","src/unix/notbsd/linux/musl/b32/x86.rs":"da2e557a6afa9d15649d8862a5d17032597c924cd8bb290105500905fe975133","src/unix/notbsd/linux/musl/b64/aarch64.rs":"4009c7eaf703472daef2a70bdac910d9fc395a33689ef2e8cf1c4e692445d3f0","src/unix/notbsd/linux/musl/b64/mod.rs":"20f34e48124d8ca2a08cc0d28353b310238d37a345dfa0d58993e2e930a1ae23","src/unix/notbsd/linux/musl/b64/powerpc64.rs":"dc28f5b7284235d6cf5519053cac59a1c16dc39223b71cca0871e4880755f852","src/unix/notbsd/linux/musl/b64/x86_64.rs":"43291acc0dfc92c2fec8ba6ce77ee9ca3c20bcdccec18e149f95ba911cee704b","src/unix/notbsd/linux/musl/mod.rs":"c195e04167d26f82885f9157e32a28caccfd4eabe807af683708f33e28562021","src/unix/notbsd/linux/other/b32/arm.rs":"f5cb989075fa3b5f997e7101495532c8d5c9f3577412d4c07e4c8c1a16f7b43c","src/unix/notbsd/linux/other/b32/mod.rs":"8b774feb5510b963ed031db7ab3d7e24f1ba5524a6396db0b851d237ccc16fd3","src/unix/notbsd/linux/other/b32/powerpc.rs":"3b62052bb9741afa5349098e6e9c675b60e822e41fed6b5e1b694be1872097b1","src/unix/notbsd/linux/other/b32/x86.rs":"1eda37736f5966c7968b594f74f5018f56b6b8c67bbdeb31fc3db1b6e4ac31b4","src/unix/notbsd/linux/other/b64/aarch64.rs":"a978e82d037a9c8127b2f704323864aff42ac910e721ecc69c255671ca96b950","src/unix/notbsd/linux/other/b64/mod.rs":"efb7740c2fb925ea98977a6a3ff52bc0b72205c1f88a9ba281a939b66b7f0efe","src/unix/notbsd/linux/other/b64/powerpc64.rs":"06a795bca8e91a0143ef1787b034201ed7a21d01960ce9fe869d18c274d5bdb4","src/unix/notbsd/linux/other/b64/x86_64.rs":"0ed128e93f212c0d65660bd95e29190a2dae7c9d15d6fa0d3c4c6656f89e9bdc","src/unix/notbsd/linux/other/mod.rs":"0f7b29425273101ce90a9565637e5f7f61905db2a1e8f5360b285c73b1287da1","src/unix/notbsd/linux/s390x.rs":"6eddef139e18191bc3894f759ca8bd83c59b547bc572ad8938dc61fb5a97d2e9","src/unix/notbsd/mod.rs":"6ba17e2e9a6d05d4470ba595fd38dc55f70fea874a46425a4733ae52d93ee8ff","src/unix/solaris/mod.rs":"6d1f023b637467fe26385d23b32219dbb4573ea177d159e32dad75e4a6ff95de","src/windows.rs":"08f351462388566dcdc6566fb183a467942db63a1caa1bc97f85284fb7a74063"},"package":"044d1360593a78f5c8e5e710beccdc24ab71d1f01bc19a29bcacdba22e8475d8"} \ No newline at end of file diff --git a/src/vendor/libc/.cargo-ok b/src/vendor/libc/.cargo-ok new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/src/vendor/libc/.gitignore b/src/vendor/libc/.gitignore new file mode 100644 index 0000000000000..f0ff2599d09b5 --- /dev/null +++ b/src/vendor/libc/.gitignore @@ -0,0 +1,3 @@ +target +Cargo.lock +*~ diff --git a/src/vendor/libc/.travis.yml b/src/vendor/libc/.travis.yml new file mode 100644 index 0000000000000..703329b705727 --- /dev/null +++ b/src/vendor/libc/.travis.yml @@ -0,0 +1,125 @@ +language: rust +sudo: required +dist: trusty +services: + - docker +install: + - curl https://static.rust-lang.org/rustup.sh | + sh -s -- --add-target=$TARGET --disable-sudo -y --prefix=`rustc --print sysroot` +script: + - cargo build + - cargo build --no-default-features + - cargo generate-lockfile --manifest-path libc-test/Cargo.toml + - if [[ $TRAVIS_OS_NAME = "linux" ]]; then + sh ci/run-docker.sh $TARGET; + else + export CARGO_TARGET_DIR=`pwd`/target; + sh ci/run.sh $TARGET; + fi + - rustc ci/style.rs && ./style src +osx_image: xcode7.3 +env: + global: + secure: eIDEoQdTyglcsTD13zSGotAX2HDhRSXIaaTnVZTThqLSrySOc3/6KY3qmOc2Msf7XaBqfFy9QA+alk7OwfePp253eiy1Kced67ffjjFOytEcRT7FlQiYpcYQD6WNHZEj62/bJBO4LTM9sGtWNCTJVEDKW0WM8mUK7qNuC+honPM= +matrix: + include: + # 1.0.0 compat + - os: linux + env: TARGET=x86_64-unknown-linux-gnu + rust: 1.0.0 + script: cargo build + install: + + # build documentation + - os: linux + env: TARGET=x86_64-unknown-linux-gnu + rust: stable + script: sh ci/dox.sh + + # stable compat + - os: linux + env: TARGET=x86_64-unknown-linux-gnu + rust: stable + - os: linux + env: TARGET=i686-unknown-linux-gnu + rust: stable + - os: osx + env: TARGET=x86_64-apple-darwin + rust: stable + - os: osx + env: TARGET=i686-apple-darwin + rust: stable + - os: linux + env: TARGET=arm-linux-androideabi + rust: stable + - os: linux + env: TARGET=x86_64-unknown-linux-musl + rust: stable + - os: linux + env: TARGET=i686-unknown-linux-musl + rust: stable + - os: linux + env: TARGET=arm-unknown-linux-gnueabihf + rust: stable + - os: linux + env: TARGET=aarch64-unknown-linux-gnu + rust: stable + - os: osx + env: TARGET=i386-apple-ios + rust: stable + - os: osx + env: TARGET=x86_64-apple-ios + rust: stable + - os: linux + env: TARGET=x86_64-rumprun-netbsd + rust: stable + - os: linux + env: TARGET=powerpc-unknown-linux-gnu + rust: stable + - os: linux + env: TARGET=powerpc64-unknown-linux-gnu + rust: stable + - os: linux + env: TARGET=mips-unknown-linux-musl + rust: stable + - os: linux + env: TARGET=mipsel-unknown-linux-musl + rust: stable + - os: linux + env: TARGET=mips64-unknown-linux-gnuabi64 + rust: nightly + + # beta + - os: linux + env: TARGET=x86_64-unknown-linux-gnu + rust: beta + - os: osx + env: TARGET=x86_64-apple-darwin + rust: beta + + # nightly + - os: linux + env: TARGET=x86_64-unknown-linux-gnu + rust: nightly + - os: osx + env: TARGET=x86_64-apple-darwin + rust: nightly + - os: linux + env: TARGET=mips-unknown-linux-gnu + # not sure why this has to be nightly... + rust: nightly + + # QEMU based targets that compile in an emulator + - os: linux + env: TARGET=x86_64-unknown-freebsd + rust: stable + - os: linux + env: TARGET=x86_64-unknown-openbsd QEMU=openbsd.qcow2 + rust: stable + script: sh ci/run-docker.sh $TARGET + install: + +notifications: + email: + on_success: never + webhooks: https://buildbot.rust-lang.org/homu/travis diff --git a/src/vendor/libc/Cargo.toml b/src/vendor/libc/Cargo.toml new file mode 100644 index 0000000000000..c08ab3aab9da4 --- /dev/null +++ b/src/vendor/libc/Cargo.toml @@ -0,0 +1,21 @@ +[package] + +name = "libc" +version = "0.2.17" +authors = ["The Rust Project Developers"] +license = "MIT/Apache-2.0" +readme = "README.md" +repository = "https://github.com/rust-lang/libc" +homepage = "https://github.com/rust-lang/libc" +documentation = "http://doc.rust-lang.org/libc" +description = """ +A library for types and bindings to native C functions often found in libc or +other common platform libraries. +""" + +[features] +default = ["use_std"] +use_std = [] + +[workspace] +members = ["libc-test", "libc-test/generate-files"] diff --git a/src/vendor/libc/LICENSE-APACHE b/src/vendor/libc/LICENSE-APACHE new file mode 100644 index 0000000000000..16fe87b06e802 --- /dev/null +++ b/src/vendor/libc/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/src/vendor/libc/LICENSE-MIT b/src/vendor/libc/LICENSE-MIT new file mode 100644 index 0000000000000..39d4bdb5acd31 --- /dev/null +++ b/src/vendor/libc/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/src/vendor/libc/README.md b/src/vendor/libc/README.md new file mode 100644 index 0000000000000..5ea812320f05f --- /dev/null +++ b/src/vendor/libc/README.md @@ -0,0 +1,137 @@ +libc +==== + +A Rust library with native bindings to the types and functions commonly found on +various systems, including libc. + +[![Build Status](https://travis-ci.org/rust-lang/libc.svg?branch=master)](https://travis-ci.org/rust-lang/libc) +[![Build status](https://ci.appveyor.com/api/projects/status/34csq3uurnw7c0rl?svg=true)](https://ci.appveyor.com/project/alexcrichton/libc) + +[Documentation](#platforms-and-documentation) + +## Usage + +First, add the following to your `Cargo.toml`: + +```toml +[dependencies] +libc = "0.2" +``` + +Next, add this to your crate root: + +```rust +extern crate libc; +``` + +Currently libc by default links to the standard library, but if you would +instead like to use libc in a `#![no_std]` situation or crate you can request +this via: + +```toml +[dependencies] +libc = { version = "0.2", default-features = false } +``` + +## What is libc? + +The primary purpose of this crate is to provide all of the definitions necessary +to easily interoperate with C code (or "C-like" code) on each of the platforms +that Rust supports. This includes type definitions (e.g. `c_int`), constants +(e.g. `EINVAL`) as well as function headers (e.g. `malloc`). + +This crate does not strive to have any form of compatibility across platforms, +but rather it is simply a straight binding to the system libraries on the +platform in question. + +## Public API + +This crate exports all underlying platform types, functions, and constants under +the crate root, so all items are accessible as `libc::foo`. The types and values +of all the exported APIs match the platform that libc is compiled for. + +More detailed information about the design of this library can be found in its +[associated RFC][rfc]. + +[rfc]: https://github.com/rust-lang/rfcs/blob/master/text/1291-promote-libc.md + +## Adding an API + +Want to use an API which currently isn't bound in `libc`? It's quite easy to add +one! + +The internal structure of this crate is designed to minimize the number of +`#[cfg]` attributes in order to easily be able to add new items which apply +to all platforms in the future. As a result, the crate is organized +hierarchically based on platform. Each module has a number of `#[cfg]`'d +children, but only one is ever actually compiled. Each module then reexports all +the contents of its children. + +This means that for each platform that libc supports, the path from a +leaf module to the root will contain all bindings for the platform in question. +Consequently, this indicates where an API should be added! Adding an API at a +particular level in the hierarchy means that it is supported on all the child +platforms of that level. For example, when adding a Unix API it should be added +to `src/unix/mod.rs`, but when adding a Linux-only API it should be added to +`src/unix/notbsd/linux/mod.rs`. + +If you're not 100% sure at what level of the hierarchy an API should be added +at, fear not! This crate has CI support which tests any binding against all +platforms supported, so you'll see failures if an API is added at the wrong +level or has different signatures across platforms. + +With that in mind, the steps for adding a new API are: + +1. Determine where in the module hierarchy your API should be added. +2. Add the API. +3. Send a PR to this repo. +4. Wait for CI to pass, fixing errors. +5. Wait for a merge! + +### Test before you commit + +We have two automated tests running on [Travis](https://travis-ci.org/rust-lang/libc): + +1. [`libc-test`](https://github.com/alexcrichton/ctest) + - `cd libc-test && cargo run` + - Use the `skip_*()` functions in `build.rs` if you really need a workaround. +2. Style checker + - `rustc ci/style.rs && ./style src` + +## Platforms and Documentation + +The following platforms are currently tested and have documentation available: + +Tested: + * [`i686-pc-windows-msvc`](https://doc.rust-lang.org/libc/i686-pc-windows-msvc/libc/) + * [`x86_64-pc-windows-msvc`](https://doc.rust-lang.org/libc/x86_64-pc-windows-msvc/libc/) + (Windows) + * [`i686-pc-windows-gnu`](https://doc.rust-lang.org/libc/i686-pc-windows-gnu/libc/) + * [`x86_64-pc-windows-gnu`](https://doc.rust-lang.org/libc/x86_64-pc-windows-gnu/libc/) + * [`i686-apple-darwin`](https://doc.rust-lang.org/libc/i686-apple-darwin/libc/) + * [`x86_64-apple-darwin`](https://doc.rust-lang.org/libc/x86_64-apple-darwin/libc/) + (OSX) + * `i686-apple-ios` + * `x86_64-apple-ios` + * [`i686-unknown-linux-gnu`](https://doc.rust-lang.org/libc/i686-unknown-linux-gnu/libc/) + * [`x86_64-unknown-linux-gnu`](https://doc.rust-lang.org/libc/x86_64-unknown-linux-gnu/libc/) + (Linux) + * [`x86_64-unknown-linux-musl`](https://doc.rust-lang.org/libc/x86_64-unknown-linux-musl/libc/) + (Linux MUSL) + * [`aarch64-unknown-linux-gnu`](https://doc.rust-lang.org/libc/aarch64-unknown-linux-gnu/libc/) + * [`mips-unknown-linux-gnu`](https://doc.rust-lang.org/libc/mips-unknown-linux-gnu/libc/) + * [`arm-unknown-linux-gnueabihf`](https://doc.rust-lang.org/libc/arm-unknown-linux-gnueabihf/libc/) + * [`arm-linux-androideabi`](https://doc.rust-lang.org/libc/arm-linux-androideabi/libc/) + (Android) + * [`x86_64-unknown-freebsd`](https://doc.rust-lang.org/libc/x86_64-unknown-freebsd/libc/) + * [`x86_64-unknown-openbsd`](https://doc.rust-lang.org/libc/x86_64-unknown-openbsd/libc/) + * [`x86_64-rumprun-netbsd`](https://doc.rust-lang.org/libc/x86_64-unknown-netbsd/libc/) + +The following may be supported, but are not guaranteed to always work: + + * `i686-unknown-freebsd` + * [`x86_64-unknown-bitrig`](https://doc.rust-lang.org/libc/x86_64-unknown-bitrig/libc/) + * [`x86_64-unknown-dragonfly`](https://doc.rust-lang.org/libc/x86_64-unknown-dragonfly/libc/) + * `i686-unknown-haiku` + * `x86_64-unknown-haiku` + * [`x86_64-unknown-netbsd`](https://doc.rust-lang.org/libc/x86_64-unknown-netbsd/libc/) diff --git a/src/vendor/libc/appveyor.yml b/src/vendor/libc/appveyor.yml new file mode 100644 index 0000000000000..a851bb87b6c3b --- /dev/null +++ b/src/vendor/libc/appveyor.yml @@ -0,0 +1,25 @@ +environment: + matrix: + - TARGET: x86_64-pc-windows-gnu + MSYS2_BITS: 64 + - TARGET: i686-pc-windows-gnu + MSYS2_BITS: 32 + - TARGET: x86_64-pc-windows-msvc + - TARGET: i686-pc-windows-msvc +install: + - curl -sSf -o rustup-init.exe https://win.rustup.rs/ + - rustup-init.exe -y --default-host %TARGET% + - set PATH=%PATH%;C:\Users\appveyor\.cargo\bin + - if defined MSYS2_BITS set PATH=%PATH%;C:\msys64\mingw%MSYS2_BITS%\bin + - rustc -V + - cargo -V + +build: false + +test_script: + - cargo test --target %TARGET% + - cargo run --manifest-path libc-test/Cargo.toml --target %TARGET% + +cache: + - target + - C:\Users\appveyor\.cargo\registry diff --git a/src/vendor/libc/ci/README.md b/src/vendor/libc/ci/README.md new file mode 100644 index 0000000000000..13c7c8da52fc5 --- /dev/null +++ b/src/vendor/libc/ci/README.md @@ -0,0 +1,203 @@ +The goal of the libc crate is to have CI running everywhere to have the +strongest guarantees about the definitions that this library contains, and as a +result the CI is pretty complicated and also pretty large! Hopefully this can +serve as a guide through the sea of scripts in this directory and elsewhere in +this project. + +# Files + +First up, let's talk about the files in this directory: + +* `run-travis.sh` - a shell script run by all Travis builders, this is + responsible for setting up the rest of the environment such as installing new + packages, downloading Rust target libraries, etc. + +* `run.sh` - the actual script which runs tests for a particular architecture. + Called from the `run-travis.sh` script this will run all tests for the target + specified. + +* `cargo-config` - Cargo configuration of linkers to use copied into place by + the `run-travis.sh` script before builds are run. + +* `dox.sh` - script called from `run-travis.sh` on only the linux 64-bit nightly + Travis bots to build documentation for this crate. + +* `landing-page-*.html` - used by `dox.sh` to generate a landing page for all + architectures' documentation. + +* `run-qemu.sh` - see discussion about QEMU below + +* `mips`, `rumprun` - instructions to build the docker image for each respective + CI target + +# CI Systems + +Currently this repository leverages a combination of Travis CI and AppVeyor for +running tests. The triples tested are: + +* AppVeyor + * `{i686,x86_64}-pc-windows-{msvc,gnu}` +* Travis + * `{i686,x86_64,mips,aarch64}-unknown-linux-gnu` + * `x86_64-unknown-linux-musl` + * `arm-unknown-linux-gnueabihf` + * `arm-linux-androideabi` + * `{i686,x86_64}-apple-{darwin,ios}` + * `x86_64-rumprun-netbsd` + * `x86_64-unknown-freebsd` + * `x86_64-unknown-openbsd` + +The Windows triples are all pretty standard, they just set up their environment +then run tests, no need for downloading any extra target libs (we just download +the right installer). The Intel Linux/OSX builds are similar in that we just +download the right target libs and run tests. Note that the Intel Linux/OSX +builds are run on stable/beta/nightly, but are the only ones that do so. + +The remaining architectures look like: + +* Android runs in a [docker image][android-docker] with an emulator, the NDK, + and the SDK already set up. The entire build happens within the docker image. +* The MIPS, ARM, and AArch64 builds all use the QEMU userspace emulator to run + the generated binary to actually verify the tests pass. +* The MUSL build just has to download a MUSL compiler and target libraries and + then otherwise runs tests normally. +* iOS builds need an extra linker flag currently, but beyond that they're built + as standard as everything else. +* The rumprun target builds an entire kernel from the test suite and then runs + it inside QEMU using the serial console to test whether it succeeded or + failed. +* The BSD builds, currently OpenBSD and FreeBSD, use QEMU to boot up a system + and compile/run tests. More information on that below. + +[android-docker]: https://github.com/rust-lang/rust-buildbot/blob/master/slaves/android/Dockerfile + +## QEMU + +Lots of the architectures tested here use QEMU in the tests, so it's worth going +over all the crazy capabilities QEMU has and the various flavors in which we use +it! + +First up, QEMU has userspace emulation where it doesn't boot a full kernel, it +just runs a binary from another architecture (using the `qemu-` wrappers). +We provide it the runtime path for the dynamically loaded system libraries, +however. This strategy is used for all Linux architectures that aren't intel. +Note that one downside of this QEMU system is that threads are barely +implemented, so we're careful to not spawn many threads. + +For the rumprun target the only output is a kernel image, so we just use that +plus the `rumpbake` command to create a full kernel image which is then run from +within QEMU. + +Finally, the fun part, the BSDs. Quite a few hoops are jumped through to get CI +working for these platforms, but the gist of it looks like: + +* Cross compiling from Linux to any of the BSDs seems to be quite non-standard. + We may be able to get it working but it might be difficult at that point to + ensure that the libc definitions align with what you'd get on the BSD itself. + As a result, we try to do compiles within the BSD distro. +* On Travis we can't run a VM-in-a-VM, so we resort to userspace emulation + (QEMU). +* Unfortunately on Travis we also can't use KVM, so the emulation is super slow. + +With all that in mind, the way BSD is tested looks like: + +1. Download a pre-prepared image for the OS being tested. +2. Generate the tests for the OS being tested. This involves running the `ctest` + library over libc to generate a Rust file and a C file which will then be + compiled into the final test. +3. Generate a disk image which will later be mounted by the OS being tested. + This image is mostly just the libc directory, but some modifications are made + to compile the generated files from step 2. +4. The kernel is booted in QEMU, and it is configured to detect the libc-test + image being available, run the test script, and then shut down afterwards. +5. Look for whether the tests passed in the serial console output of the kernel. + +There's some pretty specific instructions for setting up each image (detailed +below), but the main gist of this is that we must avoid a vanilla `cargo run` +inside of the `libc-test` directory (which is what it's intended for) because +that would compile `syntex_syntax`, a large library, with userspace emulation. +This invariably times out on Travis, so we can't do that. + +Once all those hoops are jumped through, however, we can be happy that we're +testing almost everything! + +Below are some details of how to set up the initial OS images which are +downloaded. Each image must be enabled have input/output over the serial +console, log in automatically at the serial console, detect if a second drive in +QEMU is available, and if so mount it, run a script (it'll specifically be +`run-qemu.sh` in this folder which is copied into the generated image talked +about above), and then shut down. + +### QEMU setup - FreeBSD + +1. Download CD installer (most minimal is fine) +2. `qemu-img create -f qcow2 foo.qcow2 2G` +3. `qemu -cdrom foo.iso -drive if=virtio,file=foo.qcow2 -net nic,model=virtio -net user` +4. run installer +5. `echo 'console="comconsole"' >> /boot/loader.conf` +6. `echo 'autoboot_delay="0"' >> /boot/loader.conf` +7. look at /etc/ttys, see what getty argument is for ttyu0 +8. edit /etc/gettytab, look for ttyu0 argument, prepend `:al=root` to line + beneath + +(note that the current image has a `freebsd` user, but this isn't really +necessary) + +Once that's done, arrange for this script to run at login: + +``` +#!/bin/sh + +sudo kldload ext2fs +[ -e /dev/vtbd1 ] || exit 0 +sudo mount -t ext2fs /dev/vtbd1 /mnt +sh /mnt/run.sh /mnt +sudo poweroff +``` + +Helpful links + +* https://en.wikibooks.org/wiki/QEMU/Images +* https://blog.nekoconeko.nl/blog/2015/06/04/creating-an-openstack-freebsd-image.html +* https://www.freebsd.org/doc/handbook/serialconsole-setup.html + + +### QEMU setup - OpenBSD + +1. Download CD installer +2. `qemu-img create -f qcow2 foo.qcow2 2G` +3. `qemu -cdrom foo.iso -drive if=virtio,file=foo.qcow2 -net nic,model=virtio -net user` +4. run installer +5. `echo 'set tty com0' >> /etc/boot.conf` +6. `echo 'boot' >> /etc/boot.conf` +7. Modify /etc/ttys, change the `tty00` at the end from 'unknown off' to + 'vt220 on secure' +8. Modify same line in /etc/ttys to have `"/root/foo.sh"` as the shell +9. Add this script to `/root/foo.sh` + +``` +#!/bin/sh +exec 1>/dev/tty00 +exec 2>&1 + +if mount -t ext2fs /dev/sd1c /mnt; then + sh /mnt/run.sh /mnt + shutdown -ph now +fi + +# limited shell... +exec /bin/sh < /dev/tty00 +``` + +10. `chmod +x /root/foo.sh` + +Helpful links: + +* https://en.wikibooks.org/wiki/QEMU/Images +* http://www.openbsd.org/faq/faq7.html#SerCon + +# Questions? + +Hopefully that's at least somewhat of an introduction to everything going on +here, and feel free to ping @alexcrichton with questions! + diff --git a/src/vendor/libc/ci/docker/aarch64-unknown-linux-gnu/Dockerfile b/src/vendor/libc/ci/docker/aarch64-unknown-linux-gnu/Dockerfile new file mode 100644 index 0000000000000..2ba69e15442f3 --- /dev/null +++ b/src/vendor/libc/ci/docker/aarch64-unknown-linux-gnu/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:16.10 +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev ca-certificates \ + gcc-aarch64-linux-gnu libc6-dev-arm64-cross qemu-user +ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc \ + PATH=$PATH:/rust/bin diff --git a/src/vendor/libc/ci/docker/arm-linux-androideabi/Dockerfile b/src/vendor/libc/ci/docker/arm-linux-androideabi/Dockerfile new file mode 100644 index 0000000000000..0e41ba6dbee66 --- /dev/null +++ b/src/vendor/libc/ci/docker/arm-linux-androideabi/Dockerfile @@ -0,0 +1,4 @@ +FROM alexcrichton/rust-slave-android:2015-11-22 +ENV CARGO_TARGET_ARM_LINUX_ANDROIDEABI_LINKER=arm-linux-androideabi-gcc \ + PATH=$PATH:/rust/bin +ENTRYPOINT ["sh"] diff --git a/src/vendor/libc/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile b/src/vendor/libc/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile new file mode 100644 index 0000000000000..3824c0466401f --- /dev/null +++ b/src/vendor/libc/ci/docker/arm-unknown-linux-gnueabihf/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:16.10 +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev ca-certificates \ + gcc-arm-linux-gnueabihf libc6-dev-armhf-cross qemu-user +ENV CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABIHF_LINKER=arm-linux-gnueabihf-gcc \ + PATH=$PATH:/rust/bin diff --git a/src/vendor/libc/ci/docker/i686-unknown-linux-gnu/Dockerfile b/src/vendor/libc/ci/docker/i686-unknown-linux-gnu/Dockerfile new file mode 100644 index 0000000000000..c149d84072912 --- /dev/null +++ b/src/vendor/libc/ci/docker/i686-unknown-linux-gnu/Dockerfile @@ -0,0 +1,5 @@ +FROM ubuntu:16.10 +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc-multilib libc6-dev ca-certificates +ENV PATH=$PATH:/rust/bin diff --git a/src/vendor/libc/ci/docker/i686-unknown-linux-musl/Dockerfile b/src/vendor/libc/ci/docker/i686-unknown-linux-musl/Dockerfile new file mode 100644 index 0000000000000..87459a1672bdc --- /dev/null +++ b/src/vendor/libc/ci/docker/i686-unknown-linux-musl/Dockerfile @@ -0,0 +1,22 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc make libc6-dev git curl ca-certificates +# Below we're cross-compiling musl for i686 using the system compiler on an +# x86_64 system. This is an awkward thing to be doing and so we have to jump +# through a couple hoops to get musl to be happy. In particular: +# +# * We specifically pass -m32 in CFLAGS and override CC when running ./configure, +# since otherwise the script will fail to find a compiler. +# * We manually unset CROSS_COMPILE when running make; otherwise the makefile +# will call the non-existent binary 'i686-ar'. +RUN curl https://www.musl-libc.org/releases/musl-1.1.15.tar.gz | \ + tar xzf - && \ + cd musl-1.1.15 && \ + CC=gcc CFLAGS=-m32 ./configure --prefix=/musl-i686 --disable-shared --target=i686 && \ + make CROSS_COMPILE= install -j4 && \ + cd .. && \ + rm -rf musl-1.1.15 +ENV PATH=$PATH:/musl-i686/bin:/rust/bin \ + CC_i686_unknown_linux_musl=musl-gcc diff --git a/src/vendor/libc/ci/docker/mips-unknown-linux-gnu/Dockerfile b/src/vendor/libc/ci/docker/mips-unknown-linux-gnu/Dockerfile new file mode 100644 index 0000000000000..eea1f652c3cbd --- /dev/null +++ b/src/vendor/libc/ci/docker/mips-unknown-linux-gnu/Dockerfile @@ -0,0 +1,10 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev qemu-user ca-certificates \ + gcc-mips-linux-gnu libc6-dev-mips-cross \ + qemu-system-mips + +ENV CARGO_TARGET_MIPS_UNKNOWN_LINUX_GNU_LINKER=mips-linux-gnu-gcc \ + PATH=$PATH:/rust/bin diff --git a/src/vendor/libc/ci/docker/mips-unknown-linux-musl/Dockerfile b/src/vendor/libc/ci/docker/mips-unknown-linux-musl/Dockerfile new file mode 100644 index 0000000000000..77c6adb435f1d --- /dev/null +++ b/src/vendor/libc/ci/docker/mips-unknown-linux-musl/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev qemu-user ca-certificates qemu-system-mips curl \ + bzip2 + +RUN mkdir /toolchain +RUN curl -L https://downloads.openwrt.org/snapshots/trunk/ar71xx/generic/OpenWrt-SDK-ar71xx-generic_gcc-5.3.0_musl-1.1.15.Linux-x86_64.tar.bz2 | \ + tar xjf - -C /toolchain --strip-components=1 + +ENV PATH=$PATH:/rust/bin:/toolchain/staging_dir/toolchain-mips_34kc_gcc-5.3.0_musl-1.1.15/bin \ + CC_mips_unknown_linux_musl=mips-openwrt-linux-gcc \ + CARGO_TARGET_MIPS_UNKNOWN_LINUX_MUSL_LINKER=mips-openwrt-linux-gcc diff --git a/src/vendor/libc/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile b/src/vendor/libc/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile new file mode 100644 index 0000000000000..2eb5de2453800 --- /dev/null +++ b/src/vendor/libc/ci/docker/mips64-unknown-linux-gnuabi64/Dockerfile @@ -0,0 +1,11 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev qemu-user ca-certificates \ + gcc-mips64-linux-gnuabi64 libc6-dev-mips64-cross \ + qemu-system-mips64 + +ENV CARGO_TARGET_MIPS64_UNKNOWN_LINUX_GNUABI64_LINKER=mips64-linux-gnuabi64-gcc \ + CC_mips64_unknown_linux_gnuabi64=mips64-linux-gnuabi64-gcc \ + PATH=$PATH:/rust/bin diff --git a/src/vendor/libc/ci/docker/mipsel-unknown-linux-musl/Dockerfile b/src/vendor/libc/ci/docker/mipsel-unknown-linux-musl/Dockerfile new file mode 100644 index 0000000000000..36c4d90ef68f6 --- /dev/null +++ b/src/vendor/libc/ci/docker/mipsel-unknown-linux-musl/Dockerfile @@ -0,0 +1,14 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev qemu-user ca-certificates qemu-system-mips curl \ + bzip2 + +RUN mkdir /toolchain +RUN curl -L https://downloads.openwrt.org/snapshots/trunk/malta/generic/OpenWrt-Toolchain-malta-le_gcc-5.3.0_musl-1.1.15.Linux-x86_64.tar.bz2 | \ + tar xjf - -C /toolchain --strip-components=2 + +ENV PATH=$PATH:/rust/bin:/toolchain/bin \ + CC_mipsel_unknown_linux_musl=mipsel-openwrt-linux-gcc \ + CARGO_TARGET_MIPSEL_UNKNOWN_LINUX_MUSL_LINKER=mipsel-openwrt-linux-gcc diff --git a/src/vendor/libc/ci/docker/powerpc-unknown-linux-gnu/Dockerfile b/src/vendor/libc/ci/docker/powerpc-unknown-linux-gnu/Dockerfile new file mode 100644 index 0000000000000..d9d7db0f41dd2 --- /dev/null +++ b/src/vendor/libc/ci/docker/powerpc-unknown-linux-gnu/Dockerfile @@ -0,0 +1,10 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev qemu-user ca-certificates \ + gcc-powerpc-linux-gnu libc6-dev-powerpc-cross \ + qemu-system-ppc + +ENV CARGO_TARGET_POWERPC_UNKNOWN_LINUX_GNU_LINKER=powerpc-linux-gnu-gcc \ + PATH=$PATH:/rust/bin diff --git a/src/vendor/libc/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile b/src/vendor/libc/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile new file mode 100644 index 0000000000000..df0e6057b4f7f --- /dev/null +++ b/src/vendor/libc/ci/docker/powerpc64-unknown-linux-gnu/Dockerfile @@ -0,0 +1,11 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev qemu-user ca-certificates \ + gcc-powerpc64-linux-gnu libc6-dev-ppc64-cross \ + qemu-system-ppc + +ENV CARGO_TARGET_POWERPC64_UNKNOWN_LINUX_GNU_LINKER=powerpc64-linux-gnu-gcc \ + CC=powerpc64-linux-gnu-gcc \ + PATH=$PATH:/rust/bin diff --git a/src/vendor/libc/ci/docker/x86_64-rumprun-netbsd/Dockerfile b/src/vendor/libc/ci/docker/x86_64-rumprun-netbsd/Dockerfile new file mode 100644 index 0000000000000..129771e76b74d --- /dev/null +++ b/src/vendor/libc/ci/docker/x86_64-rumprun-netbsd/Dockerfile @@ -0,0 +1,6 @@ +FROM mato/rumprun-toolchain-hw-x86_64 +USER root +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + qemu +ENV PATH=$PATH:/rust/bin diff --git a/src/vendor/libc/ci/docker/x86_64-unknown-freebsd/Dockerfile b/src/vendor/libc/ci/docker/x86_64-unknown-freebsd/Dockerfile new file mode 100644 index 0000000000000..b127338222363 --- /dev/null +++ b/src/vendor/libc/ci/docker/x86_64-unknown-freebsd/Dockerfile @@ -0,0 +1,13 @@ +FROM alexcrichton/rust-slave-linux-cross:2016-04-15 +USER root + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + qemu genext2fs + +ENTRYPOINT ["sh"] + +ENV PATH=$PATH:/rust/bin \ + QEMU=freebsd.qcow2.gz \ + CAN_CROSS=1 \ + CARGO_TARGET_X86_64_UNKNOWN_FREEBSD_LINKER=x86_64-unknown-freebsd10-gcc diff --git a/src/vendor/libc/ci/docker/x86_64-unknown-linux-gnu/Dockerfile b/src/vendor/libc/ci/docker/x86_64-unknown-linux-gnu/Dockerfile new file mode 100644 index 0000000000000..4af3f834cbe6e --- /dev/null +++ b/src/vendor/libc/ci/docker/x86_64-unknown-linux-gnu/Dockerfile @@ -0,0 +1,5 @@ +FROM ubuntu:16.10 +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev ca-certificates +ENV PATH=$PATH:/rust/bin diff --git a/src/vendor/libc/ci/docker/x86_64-unknown-linux-musl/Dockerfile b/src/vendor/libc/ci/docker/x86_64-unknown-linux-musl/Dockerfile new file mode 100644 index 0000000000000..9c2499948a287 --- /dev/null +++ b/src/vendor/libc/ci/docker/x86_64-unknown-linux-musl/Dockerfile @@ -0,0 +1,13 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc make libc6-dev git curl ca-certificates +RUN curl https://www.musl-libc.org/releases/musl-1.1.15.tar.gz | \ + tar xzf - && \ + cd musl-1.1.15 && \ + ./configure --prefix=/musl-x86_64 && \ + make install -j4 && \ + cd .. && \ + rm -rf musl-1.1.15 +ENV PATH=$PATH:/musl-x86_64/bin:/rust/bin diff --git a/src/vendor/libc/ci/docker/x86_64-unknown-openbsd/Dockerfile b/src/vendor/libc/ci/docker/x86_64-unknown-openbsd/Dockerfile new file mode 100644 index 0000000000000..26340a5ed1eca --- /dev/null +++ b/src/vendor/libc/ci/docker/x86_64-unknown-openbsd/Dockerfile @@ -0,0 +1,8 @@ +FROM ubuntu:16.10 + +RUN apt-get update +RUN apt-get install -y --no-install-recommends \ + gcc libc6-dev qemu curl ca-certificates \ + genext2fs +ENV PATH=$PATH:/rust/bin \ + QEMU=2016-09-07/openbsd-6.0-without-pkgs.qcow2 diff --git a/src/vendor/libc/ci/dox.sh b/src/vendor/libc/ci/dox.sh new file mode 100644 index 0000000000000..88d882dcacdd3 --- /dev/null +++ b/src/vendor/libc/ci/dox.sh @@ -0,0 +1,33 @@ +#!/bin/sh + +# Builds documentation for all target triples that we have a registered URL for +# in liblibc. This scrapes the list of triples to document from `src/lib.rs` +# which has a bunch of `html_root_url` directives we pick up. + +set -e + +TARGETS=`grep html_root_url src/lib.rs | sed 's/.*".*\/\(.*\)"/\1/'` + +rm -rf target/doc +mkdir -p target/doc + +cp ci/landing-page-head.html target/doc/index.html + +for target in $TARGETS; do + echo documenting $target + + rustdoc -o target/doc/$target --target $target src/lib.rs --cfg dox \ + --crate-name libc + + echo "

" \ + >> target/doc/index.html +done + +cat ci/landing-page-footer.html >> target/doc/index.html + +# If we're on travis, not a PR, and on the right branch, publish! +if [ "$TRAVIS_PULL_REQUEST" = "false" ] && [ "$TRAVIS_BRANCH" = "master" ]; then + pip install ghp-import --user $USER + $HOME/.local/bin/ghp-import -n target/doc + git push -qf https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages +fi diff --git a/src/vendor/libc/ci/landing-page-footer.html b/src/vendor/libc/ci/landing-page-footer.html new file mode 100644 index 0000000000000..941cc8d2b4030 --- /dev/null +++ b/src/vendor/libc/ci/landing-page-footer.html @@ -0,0 +1,3 @@ + + + diff --git a/src/vendor/libc/ci/landing-page-head.html b/src/vendor/libc/ci/landing-page-head.html new file mode 100644 index 0000000000000..fc69fa88eb5ce --- /dev/null +++ b/src/vendor/libc/ci/landing-page-head.html @@ -0,0 +1,7 @@ + + + + + + +
  • $target
  • (&self, value: P) -> Obligation<'tcx,P> { + Obligation { cause: self.cause.clone(), + recursion_depth: self.recursion_depth, + predicate: value } + } +} + +impl<'tcx> ObligationCause<'tcx> { + pub fn new(span: Span, + body_id: ast::NodeId, + code: ObligationCauseCode<'tcx>) + -> ObligationCause<'tcx> { + ObligationCause { span: span, body_id: body_id, code: code } + } + + pub fn misc(span: Span, body_id: ast::NodeId) -> ObligationCause<'tcx> { + ObligationCause { span: span, body_id: body_id, code: MiscObligation } + } + + pub fn dummy() -> ObligationCause<'tcx> { + ObligationCause { span: DUMMY_SP, body_id: ast::CRATE_NODE_ID, code: MiscObligation } + } +} + +impl<'tcx, N> Vtable<'tcx, N> { + pub fn nested_obligations(self) -> Vec { + match self { + VtableImpl(i) => i.nested, + VtableParam(n) => n, + VtableBuiltin(i) => i.nested, + VtableDefaultImpl(d) => d.nested, + VtableClosure(c) => c.nested, + VtableObject(d) => d.nested, + VtableFnPointer(d) => d.nested, + } + } + + fn nested_obligations_mut(&mut self) -> &mut Vec { + match self { + &mut VtableImpl(ref mut i) => &mut i.nested, + &mut VtableParam(ref mut n) => n, + &mut VtableBuiltin(ref mut i) => &mut i.nested, + &mut VtableDefaultImpl(ref mut d) => &mut d.nested, + &mut VtableClosure(ref mut c) => &mut c.nested, + &mut VtableObject(ref mut d) => &mut d.nested, + &mut VtableFnPointer(ref mut d) => &mut d.nested, + } + } + + pub fn map(self, f: F) -> Vtable<'tcx, M> where F: FnMut(N) -> M { + match self { + VtableImpl(i) => VtableImpl(VtableImplData { + impl_def_id: i.impl_def_id, + substs: i.substs, + nested: i.nested.into_iter().map(f).collect(), + }), + VtableParam(n) => VtableParam(n.into_iter().map(f).collect()), + VtableBuiltin(i) => VtableBuiltin(VtableBuiltinData { + nested: i.nested.into_iter().map(f).collect(), + }), + VtableObject(o) => VtableObject(VtableObjectData { + upcast_trait_ref: o.upcast_trait_ref, + vtable_base: o.vtable_base, + nested: o.nested.into_iter().map(f).collect(), + }), + VtableDefaultImpl(d) => VtableDefaultImpl(VtableDefaultImplData { + trait_def_id: d.trait_def_id, + nested: d.nested.into_iter().map(f).collect(), + }), + VtableFnPointer(p) => VtableFnPointer(VtableFnPointerData { + fn_ty: p.fn_ty, + nested: p.nested.into_iter().map(f).collect(), + }), + VtableClosure(c) => VtableClosure(VtableClosureData { + closure_def_id: c.closure_def_id, + substs: c.substs, + nested: c.nested.into_iter().map(f).collect(), + }) + } + } +} + +impl<'tcx> FulfillmentError<'tcx> { + fn new(obligation: PredicateObligation<'tcx>, + code: FulfillmentErrorCode<'tcx>) + -> FulfillmentError<'tcx> + { + FulfillmentError { obligation: obligation, code: code } + } +} + +impl<'tcx> TraitObligation<'tcx> { + fn self_ty(&self) -> ty::Binder> { + ty::Binder(self.predicate.skip_binder().self_ty()) + } +} diff --git a/src/librustc/traits/object_safety.rs b/src/librustc/traits/object_safety.rs new file mode 100644 index 0000000000000..ceee6c236e4e3 --- /dev/null +++ b/src/librustc/traits/object_safety.rs @@ -0,0 +1,354 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! "Object safety" refers to the ability for a trait to be converted +//! to an object. In general, traits may only be converted to an +//! object if all of their methods meet certain criteria. In particular, +//! they must: +//! +//! - have a suitable receiver from which we can extract a vtable; +//! - not reference the erased type `Self` except for in this receiver; +//! - not have generic type parameters + +use super::elaborate_predicates; + +use hir::def_id::DefId; +use traits; +use ty::{self, Ty, TyCtxt, TypeFoldable}; +use ty::subst::Substs; +use syntax::ast; + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum ObjectSafetyViolation { + /// Self : Sized declared on the trait + SizedSelf, + + /// Supertrait reference references `Self` an in illegal location + /// (e.g. `trait Foo : Bar`) + SupertraitSelf, + + /// Method has something illegal + Method(ast::Name, MethodViolationCode), +} + +/// Reasons a method might not be object-safe. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub enum MethodViolationCode { + /// e.g., `fn foo()` + StaticMethod, + + /// e.g., `fn foo(&self, x: Self)` or `fn foo(&self) -> Self` + ReferencesSelf, + + /// e.g., `fn foo()` + Generic, +} + +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + pub fn is_object_safe(self, trait_def_id: DefId) -> bool { + // Because we query yes/no results frequently, we keep a cache: + let def = self.lookup_trait_def(trait_def_id); + + let result = def.object_safety().unwrap_or_else(|| { + let result = self.object_safety_violations(trait_def_id).is_empty(); + + // Record just a yes/no result in the cache; this is what is + // queried most frequently. Note that this may overwrite a + // previous result, but always with the same thing. + def.set_object_safety(result); + + result + }); + + debug!("is_object_safe({:?}) = {}", trait_def_id, result); + + result + } + + /// Returns the object safety violations that affect + /// astconv - currently, Self in supertraits. This is needed + /// because `object_safety_violations` can't be used during + /// type collection. + pub fn astconv_object_safety_violations(self, trait_def_id: DefId) + -> Vec + { + let mut violations = vec![]; + + if self.supertraits_reference_self(trait_def_id) { + violations.push(ObjectSafetyViolation::SupertraitSelf); + } + + debug!("astconv_object_safety_violations(trait_def_id={:?}) = {:?}", + trait_def_id, + violations); + + violations + } + + pub fn object_safety_violations(self, trait_def_id: DefId) + -> Vec + { + traits::supertrait_def_ids(self, trait_def_id) + .flat_map(|def_id| self.object_safety_violations_for_trait(def_id)) + .collect() + } + + fn object_safety_violations_for_trait(self, trait_def_id: DefId) + -> Vec + { + // Check methods for violations. + let mut violations: Vec<_> = self.associated_items(trait_def_id) + .filter(|item| item.kind == ty::AssociatedKind::Method) + .filter_map(|item| { + self.object_safety_violation_for_method(trait_def_id, &item) + .map(|code| ObjectSafetyViolation::Method(item.name, code)) + }).collect(); + + // Check the trait itself. + if self.trait_has_sized_self(trait_def_id) { + violations.push(ObjectSafetyViolation::SizedSelf); + } + if self.supertraits_reference_self(trait_def_id) { + violations.push(ObjectSafetyViolation::SupertraitSelf); + } + + debug!("object_safety_violations_for_trait(trait_def_id={:?}) = {:?}", + trait_def_id, + violations); + + violations + } + + fn supertraits_reference_self(self, trait_def_id: DefId) -> bool { + let trait_ref = ty::Binder(ty::TraitRef { + def_id: trait_def_id, + substs: Substs::identity_for_item(self, trait_def_id) + }); + let predicates = self.item_super_predicates(trait_def_id); + predicates + .predicates + .into_iter() + .map(|predicate| predicate.subst_supertrait(self, &trait_ref)) + .any(|predicate| { + match predicate { + ty::Predicate::Trait(ref data) => { + // In the case of a trait predicate, we can skip the "self" type. + data.skip_binder().input_types().skip(1).any(|t| t.has_self_ty()) + } + ty::Predicate::Projection(..) | + ty::Predicate::WellFormed(..) | + ty::Predicate::ObjectSafe(..) | + ty::Predicate::TypeOutlives(..) | + ty::Predicate::RegionOutlives(..) | + ty::Predicate::ClosureKind(..) | + ty::Predicate::Equate(..) => { + false + } + } + }) + } + + fn trait_has_sized_self(self, trait_def_id: DefId) -> bool { + self.generics_require_sized_self(trait_def_id) + } + + fn generics_require_sized_self(self, def_id: DefId) -> bool { + let sized_def_id = match self.lang_items.sized_trait() { + Some(def_id) => def_id, + None => { return false; /* No Sized trait, can't require it! */ } + }; + + // Search for a predicate like `Self : Sized` amongst the trait bounds. + let free_substs = self.construct_free_substs(def_id, + self.region_maps.node_extent(ast::DUMMY_NODE_ID)); + let predicates = self.item_predicates(def_id); + let predicates = predicates.instantiate(self, free_substs).predicates; + elaborate_predicates(self, predicates) + .any(|predicate| { + match predicate { + ty::Predicate::Trait(ref trait_pred) if trait_pred.def_id() == sized_def_id => { + trait_pred.0.self_ty().is_self() + } + ty::Predicate::Projection(..) | + ty::Predicate::Trait(..) | + ty::Predicate::Equate(..) | + ty::Predicate::RegionOutlives(..) | + ty::Predicate::WellFormed(..) | + ty::Predicate::ObjectSafe(..) | + ty::Predicate::ClosureKind(..) | + ty::Predicate::TypeOutlives(..) => { + false + } + } + }) + } + + /// Returns `Some(_)` if this method makes the containing trait not object safe. + fn object_safety_violation_for_method(self, + trait_def_id: DefId, + method: &ty::AssociatedItem) + -> Option + { + // Any method that has a `Self : Sized` requisite is otherwise + // exempt from the regulations. + if self.generics_require_sized_self(method.def_id) { + return None; + } + + self.virtual_call_violation_for_method(trait_def_id, method) + } + + /// We say a method is *vtable safe* if it can be invoked on a trait + /// object. Note that object-safe traits can have some + /// non-vtable-safe methods, so long as they require `Self:Sized` or + /// otherwise ensure that they cannot be used when `Self=Trait`. + pub fn is_vtable_safe_method(self, + trait_def_id: DefId, + method: &ty::AssociatedItem) + -> bool + { + // Any method that has a `Self : Sized` requisite can't be called. + if self.generics_require_sized_self(method.def_id) { + return false; + } + + self.virtual_call_violation_for_method(trait_def_id, method).is_none() + } + + /// Returns `Some(_)` if this method cannot be called on a trait + /// object; this does not necessarily imply that the enclosing trait + /// is not object safe, because the method might have a where clause + /// `Self:Sized`. + fn virtual_call_violation_for_method(self, + trait_def_id: DefId, + method: &ty::AssociatedItem) + -> Option + { + // The method's first parameter must be something that derefs (or + // autorefs) to `&self`. For now, we only accept `self`, `&self` + // and `Box`. + if !method.method_has_self_argument { + return Some(MethodViolationCode::StaticMethod); + } + + // The `Self` type is erased, so it should not appear in list of + // arguments or return type apart from the receiver. + let ref sig = self.item_type(method.def_id).fn_sig(); + for &input_ty in &sig.0.inputs[1..] { + if self.contains_illegal_self_type_reference(trait_def_id, input_ty) { + return Some(MethodViolationCode::ReferencesSelf); + } + } + if self.contains_illegal_self_type_reference(trait_def_id, sig.0.output) { + return Some(MethodViolationCode::ReferencesSelf); + } + + // We can't monomorphize things like `fn foo(...)`. + if !self.item_generics(method.def_id).types.is_empty() { + return Some(MethodViolationCode::Generic); + } + + None + } + + fn contains_illegal_self_type_reference(self, + trait_def_id: DefId, + ty: Ty<'tcx>) + -> bool + { + // This is somewhat subtle. In general, we want to forbid + // references to `Self` in the argument and return types, + // since the value of `Self` is erased. However, there is one + // exception: it is ok to reference `Self` in order to access + // an associated type of the current trait, since we retain + // the value of those associated types in the object type + // itself. + // + // ```rust + // trait SuperTrait { + // type X; + // } + // + // trait Trait : SuperTrait { + // type Y; + // fn foo(&self, x: Self) // bad + // fn foo(&self) -> Self // bad + // fn foo(&self) -> Option // bad + // fn foo(&self) -> Self::Y // OK, desugars to next example + // fn foo(&self) -> ::Y // OK + // fn foo(&self) -> Self::X // OK, desugars to next example + // fn foo(&self) -> ::X // OK + // } + // ``` + // + // However, it is not as simple as allowing `Self` in a projected + // type, because there are illegal ways to use `Self` as well: + // + // ```rust + // trait Trait : SuperTrait { + // ... + // fn foo(&self) -> ::X; + // } + // ``` + // + // Here we will not have the type of `X` recorded in the + // object type, and we cannot resolve `Self as SomeOtherTrait` + // without knowing what `Self` is. + + let mut supertraits: Option>> = None; + let mut error = false; + ty.maybe_walk(|ty| { + match ty.sty { + ty::TyParam(ref param_ty) => { + if param_ty.is_self() { + error = true; + } + + false // no contained types to walk + } + + ty::TyProjection(ref data) => { + // This is a projected type `::X`. + + // Compute supertraits of current trait lazily. + if supertraits.is_none() { + let trait_ref = ty::Binder(ty::TraitRef { + def_id: trait_def_id, + substs: Substs::identity_for_item(self, trait_def_id) + }); + supertraits = Some(traits::supertraits(self, trait_ref).collect()); + } + + // Determine whether the trait reference `Foo as + // SomeTrait` is in fact a supertrait of the + // current trait. In that case, this type is + // legal, because the type `X` will be specified + // in the object type. Note that we can just use + // direct equality here because all of these types + // are part of the formal parameter listing, and + // hence there should be no inference variables. + let projection_trait_ref = ty::Binder(data.trait_ref.clone()); + let is_supertrait_of_current_trait = + supertraits.as_ref().unwrap().contains(&projection_trait_ref); + + if is_supertrait_of_current_trait { + false // do not walk contained types, do not report error, do collect $200 + } else { + true // DO walk contained types, POSSIBLY reporting an error + } + } + + _ => true, // walk contained types, if any + } + }); + + error + } +} diff --git a/src/librustc/traits/project.rs b/src/librustc/traits/project.rs new file mode 100644 index 0000000000000..173f2a0299d6c --- /dev/null +++ b/src/librustc/traits/project.rs @@ -0,0 +1,1447 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Code for projecting associated types out of trait references. + +use super::elaborate_predicates; +use super::specialization_graph; +use super::translate_substs; +use super::Obligation; +use super::ObligationCause; +use super::PredicateObligation; +use super::SelectionContext; +use super::SelectionError; +use super::VtableClosureData; +use super::VtableFnPointerData; +use super::VtableImplData; +use super::util; + +use hir::def_id::DefId; +use infer::InferOk; +use rustc_data_structures::snapshot_map::{Snapshot, SnapshotMap}; +use syntax::ast; +use syntax::symbol::Symbol; +use ty::subst::Subst; +use ty::{self, ToPredicate, ToPolyTraitRef, Ty, TyCtxt}; +use ty::fold::{TypeFoldable, TypeFolder}; +use util::common::FN_OUTPUT_NAME; + +/// Depending on the stage of compilation, we want projection to be +/// more or less conservative. +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum Reveal { + /// FIXME (#32205) + /// At coherence-checking time, we're still constructing the + /// specialization graph, and thus we only project + /// non-`default` associated types that are defined directly in + /// the applicable impl. (This behavior should be improved over + /// time, to allow for successful projections modulo cycles + /// between different impls). + /// + /// Here's an example that will fail due to the restriction: + /// + /// ``` + /// trait Assoc { + /// type Output; + /// } + /// + /// impl Assoc for T { + /// type Output = bool; + /// } + /// + /// impl Assoc for u8 {} // <- inherits the non-default type from above + /// + /// trait Foo {} + /// impl Foo for u32 {} + /// impl Foo for ::Output {} // <- this projection will fail + /// ``` + /// + /// The projection would succeed if `Output` had been defined + /// directly in the impl for `u8`. + ExactMatch, + + /// At type-checking time, we refuse to project any associated + /// type that is marked `default`. Non-`default` ("final") types + /// are always projected. This is necessary in general for + /// soundness of specialization. However, we *could* allow + /// projections in fully-monomorphic cases. We choose not to, + /// because we prefer for `default type` to force the type + /// definition to be treated abstractly by any consumers of the + /// impl. Concretely, that means that the following example will + /// fail to compile: + /// + /// ``` + /// trait Assoc { + /// type Output; + /// } + /// + /// impl Assoc for T { + /// default type Output = bool; + /// } + /// + /// fn main() { + /// let <() as Assoc>::Output = true; + /// } + NotSpecializable, + + /// At trans time, all monomorphic projections will succeed. + /// Also, `impl Trait` is normalized to the concrete type, + /// which has to be already collected by type-checking. + /// + /// NOTE: As `impl Trait`'s concrete type should *never* + /// be observable directly by the user, `Reveal::All` + /// should not be used by checks which may expose + /// type equality or type contents to the user. + /// There are some exceptions, e.g. around OIBITS and + /// transmute-checking, which expose some details, but + /// not the whole concrete type of the `impl Trait`. + All, +} + +pub type PolyProjectionObligation<'tcx> = + Obligation<'tcx, ty::PolyProjectionPredicate<'tcx>>; + +pub type ProjectionObligation<'tcx> = + Obligation<'tcx, ty::ProjectionPredicate<'tcx>>; + +pub type ProjectionTyObligation<'tcx> = + Obligation<'tcx, ty::ProjectionTy<'tcx>>; + +/// When attempting to resolve `::Name` ... +#[derive(Debug)] +pub enum ProjectionTyError<'tcx> { + /// ...we found multiple sources of information and couldn't resolve the ambiguity. + TooManyCandidates, + + /// ...an error occurred matching `T : TraitRef` + TraitSelectionError(SelectionError<'tcx>), +} + +#[derive(Clone)] +pub struct MismatchedProjectionTypes<'tcx> { + pub err: ty::error::TypeError<'tcx> +} + +#[derive(PartialEq, Eq, Debug)] +enum ProjectionTyCandidate<'tcx> { + // from a where-clause in the env or object type + ParamEnv(ty::PolyProjectionPredicate<'tcx>), + + // from the definition of `Trait` when you have something like <::B as Trait2>::C + TraitDef(ty::PolyProjectionPredicate<'tcx>), + + // from a "impl" (or a "pseudo-impl" returned by select) + Select, +} + +struct ProjectionTyCandidateSet<'tcx> { + vec: Vec>, + ambiguous: bool +} + +/// Evaluates constraints of the form: +/// +/// for<...> ::U == V +/// +/// If successful, this may result in additional obligations. +pub fn poly_project_and_unify_type<'cx, 'gcx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + obligation: &PolyProjectionObligation<'tcx>) + -> Result>>, MismatchedProjectionTypes<'tcx>> +{ + debug!("poly_project_and_unify_type(obligation={:?})", + obligation); + + let infcx = selcx.infcx(); + infcx.commit_if_ok(|snapshot| { + let (skol_predicate, skol_map) = + infcx.skolemize_late_bound_regions(&obligation.predicate, snapshot); + + let skol_obligation = obligation.with(skol_predicate); + let r = match project_and_unify_type(selcx, &skol_obligation) { + Ok(result) => { + let span = obligation.cause.span; + match infcx.leak_check(false, span, &skol_map, snapshot) { + Ok(()) => Ok(infcx.plug_leaks(skol_map, snapshot, result)), + Err(e) => Err(MismatchedProjectionTypes { err: e }), + } + } + Err(e) => { + Err(e) + } + }; + + r + }) +} + +/// Evaluates constraints of the form: +/// +/// ::U == V +/// +/// If successful, this may result in additional obligations. +fn project_and_unify_type<'cx, 'gcx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + obligation: &ProjectionObligation<'tcx>) + -> Result>>, MismatchedProjectionTypes<'tcx>> +{ + debug!("project_and_unify_type(obligation={:?})", + obligation); + + let Normalized { value: normalized_ty, mut obligations } = + match opt_normalize_projection_type(selcx, + obligation.predicate.projection_ty.clone(), + obligation.cause.clone(), + obligation.recursion_depth) { + Some(n) => n, + None => return Ok(None), + }; + + debug!("project_and_unify_type: normalized_ty={:?} obligations={:?}", + normalized_ty, + obligations); + + let infcx = selcx.infcx(); + match infcx.eq_types(true, &obligation.cause, normalized_ty, obligation.predicate.ty) { + Ok(InferOk { obligations: inferred_obligations, value: () }) => { + obligations.extend(inferred_obligations); + Ok(Some(obligations)) + }, + Err(err) => Err(MismatchedProjectionTypes { err: err }), + } +} + +/// Normalizes any associated type projections in `value`, replacing +/// them with a fully resolved type where possible. The return value +/// combines the normalized result and any additional obligations that +/// were incurred as result. +pub fn normalize<'a, 'b, 'gcx, 'tcx, T>(selcx: &'a mut SelectionContext<'b, 'gcx, 'tcx>, + cause: ObligationCause<'tcx>, + value: &T) + -> Normalized<'tcx, T> + where T : TypeFoldable<'tcx> +{ + normalize_with_depth(selcx, cause, 0, value) +} + +/// As `normalize`, but with a custom depth. +pub fn normalize_with_depth<'a, 'b, 'gcx, 'tcx, T>( + selcx: &'a mut SelectionContext<'b, 'gcx, 'tcx>, + cause: ObligationCause<'tcx>, + depth: usize, + value: &T) + -> Normalized<'tcx, T> + + where T : TypeFoldable<'tcx> +{ + debug!("normalize_with_depth(depth={}, value={:?})", depth, value); + let mut normalizer = AssociatedTypeNormalizer::new(selcx, cause, depth); + let result = normalizer.fold(value); + debug!("normalize_with_depth: depth={} result={:?} with {} obligations", + depth, result, normalizer.obligations.len()); + debug!("normalize_with_depth: depth={} obligations={:?}", + depth, normalizer.obligations); + Normalized { + value: result, + obligations: normalizer.obligations, + } +} + +struct AssociatedTypeNormalizer<'a, 'b: 'a, 'gcx: 'b+'tcx, 'tcx: 'b> { + selcx: &'a mut SelectionContext<'b, 'gcx, 'tcx>, + cause: ObligationCause<'tcx>, + obligations: Vec>, + depth: usize, +} + +impl<'a, 'b, 'gcx, 'tcx> AssociatedTypeNormalizer<'a, 'b, 'gcx, 'tcx> { + fn new(selcx: &'a mut SelectionContext<'b, 'gcx, 'tcx>, + cause: ObligationCause<'tcx>, + depth: usize) + -> AssociatedTypeNormalizer<'a, 'b, 'gcx, 'tcx> + { + AssociatedTypeNormalizer { + selcx: selcx, + cause: cause, + obligations: vec![], + depth: depth, + } + } + + fn fold>(&mut self, value: &T) -> T { + let value = self.selcx.infcx().resolve_type_vars_if_possible(value); + + if !value.has_projection_types() { + value.clone() + } else { + value.fold_with(self) + } + } +} + +impl<'a, 'b, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for AssociatedTypeNormalizer<'a, 'b, 'gcx, 'tcx> { + fn tcx<'c>(&'c self) -> TyCtxt<'c, 'gcx, 'tcx> { + self.selcx.tcx() + } + + fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { + // We don't want to normalize associated types that occur inside of region + // binders, because they may contain bound regions, and we can't cope with that. + // + // Example: + // + // for<'a> fn(>::A) + // + // Instead of normalizing `>::A` here, we'll + // normalize it when we instantiate those bound regions (which + // should occur eventually). + + let ty = ty.super_fold_with(self); + match ty.sty { + ty::TyAnon(def_id, substs) if !substs.has_escaping_regions() => { // (*) + // Only normalize `impl Trait` after type-checking, usually in trans. + if self.selcx.projection_mode() == Reveal::All { + let generic_ty = self.tcx().item_type(def_id); + let concrete_ty = generic_ty.subst(self.tcx(), substs); + self.fold_ty(concrete_ty) + } else { + ty + } + } + + ty::TyProjection(ref data) if !data.has_escaping_regions() => { // (*) + + // (*) This is kind of hacky -- we need to be able to + // handle normalization within binders because + // otherwise we wind up a need to normalize when doing + // trait matching (since you can have a trait + // obligation like `for<'a> T::B : Fn(&'a int)`), but + // we can't normalize with bound regions in scope. So + // far now we just ignore binders but only normalize + // if all bound regions are gone (and then we still + // have to renormalize whenever we instantiate a + // binder). It would be better to normalize in a + // binding-aware fashion. + + let Normalized { value: normalized_ty, obligations } = + normalize_projection_type(self.selcx, + data.clone(), + self.cause.clone(), + self.depth); + debug!("AssociatedTypeNormalizer: depth={} normalized {:?} to {:?} \ + with {} add'l obligations", + self.depth, ty, normalized_ty, obligations.len()); + self.obligations.extend(obligations); + normalized_ty + } + + _ => { + ty + } + } + } +} + +#[derive(Clone)] +pub struct Normalized<'tcx,T> { + pub value: T, + pub obligations: Vec>, +} + +pub type NormalizedTy<'tcx> = Normalized<'tcx, Ty<'tcx>>; + +impl<'tcx,T> Normalized<'tcx,T> { + pub fn with(self, value: U) -> Normalized<'tcx,U> { + Normalized { value: value, obligations: self.obligations } + } +} + +/// The guts of `normalize`: normalize a specific projection like `::Item`. The result is always a type (and possibly +/// additional obligations). If ambiguity arises, which implies that +/// there are unresolved type variables in the projection, we will +/// substitute a fresh type variable `$X` and generate a new +/// obligation `::Item == $X` for later. +pub fn normalize_projection_type<'a, 'b, 'gcx, 'tcx>( + selcx: &'a mut SelectionContext<'b, 'gcx, 'tcx>, + projection_ty: ty::ProjectionTy<'tcx>, + cause: ObligationCause<'tcx>, + depth: usize) + -> NormalizedTy<'tcx> +{ + opt_normalize_projection_type(selcx, projection_ty.clone(), cause.clone(), depth) + .unwrap_or_else(move || { + // if we bottom out in ambiguity, create a type variable + // and a deferred predicate to resolve this when more type + // information is available. + + let ty_var = selcx.infcx().next_ty_var(); + let projection = ty::Binder(ty::ProjectionPredicate { + projection_ty: projection_ty, + ty: ty_var + }); + let obligation = Obligation::with_depth( + cause, depth + 1, projection.to_predicate()); + Normalized { + value: ty_var, + obligations: vec![obligation] + } + }) +} + +/// The guts of `normalize`: normalize a specific projection like `::Item`. The result is always a type (and possibly +/// additional obligations). Returns `None` in the case of ambiguity, +/// which indicates that there are unbound type variables. +fn opt_normalize_projection_type<'a, 'b, 'gcx, 'tcx>( + selcx: &'a mut SelectionContext<'b, 'gcx, 'tcx>, + projection_ty: ty::ProjectionTy<'tcx>, + cause: ObligationCause<'tcx>, + depth: usize) + -> Option> +{ + let infcx = selcx.infcx(); + + let projection_ty = infcx.resolve_type_vars_if_possible(&projection_ty); + + debug!("opt_normalize_projection_type(\ + projection_ty={:?}, \ + depth={})", + projection_ty, + depth); + + // FIXME(#20304) For now, I am caching here, which is good, but it + // means we don't capture the type variables that are created in + // the case of ambiguity. Which means we may create a large stream + // of such variables. OTOH, if we move the caching up a level, we + // would not benefit from caching when proving `T: Trait` + // bounds. It might be the case that we want two distinct caches, + // or else another kind of cache entry. + + match infcx.projection_cache.borrow_mut().try_start(projection_ty) { + Ok(()) => { } + Err(ProjectionCacheEntry::Ambiguous) => { + // If we found ambiguity the last time, that generally + // means we will continue to do so until some type in the + // key changes (and we know it hasn't, because we just + // fully resolved it). One exception though is closure + // types, which can transition from having a fixed kind to + // no kind with no visible change in the key. + // + // FIXME(#32286) refactor this so that closure type + // changes + debug!("opt_normalize_projection_type: \ + found cache entry: ambiguous"); + if !projection_ty.has_closure_types() { + return None; + } + } + Err(ProjectionCacheEntry::InProgress) => { + // If while normalized A::B, we are asked to normalize + // A::B, just return A::B itself. This is a conservative + // answer, in the sense that A::B *is* clearly equivalent + // to A::B, though there may be a better value we can + // find. + + // Under lazy normalization, this can arise when + // bootstrapping. That is, imagine an environment with a + // where-clause like `A::B == u32`. Now, if we are asked + // to normalize `A::B`, we will want to check the + // where-clauses in scope. So we will try to unify `A::B` + // with `A::B`, which can trigger a recursive + // normalization. In that case, I think we will want this code: + // + // ``` + // let ty = selcx.tcx().mk_projection(projection_ty.trait_ref, + // projection_ty.item_name); + // return Some(NormalizedTy { value: v, obligations: vec![] }); + // ``` + + debug!("opt_normalize_projection_type: \ + found cache entry: in-progress"); + + // But for now, let's classify this as an overflow: + let recursion_limit = selcx.tcx().sess.recursion_limit.get(); + let obligation = Obligation::with_depth(cause.clone(), + recursion_limit, + projection_ty); + selcx.infcx().report_overflow_error(&obligation, false); + } + Err(ProjectionCacheEntry::NormalizedTy(ty)) => { + // If we find the value in the cache, then the obligations + // have already been returned from the previous entry (and + // should therefore have been honored). + debug!("opt_normalize_projection_type: \ + found normalized ty `{:?}`", + ty); + return Some(NormalizedTy { value: ty, obligations: vec![] }); + } + Err(ProjectionCacheEntry::Error) => { + debug!("opt_normalize_projection_type: \ + found error"); + return Some(normalize_to_error(selcx, projection_ty, cause, depth)); + } + } + + let obligation = Obligation::with_depth(cause.clone(), depth, projection_ty.clone()); + match project_type(selcx, &obligation) { + Ok(ProjectedTy::Progress(Progress { ty: projected_ty, + mut obligations, + cacheable })) => { + // if projection succeeded, then what we get out of this + // is also non-normalized (consider: it was derived from + // an impl, where-clause etc) and hence we must + // re-normalize it + + debug!("opt_normalize_projection_type: \ + projected_ty={:?} \ + depth={} \ + obligations={:?} \ + cacheable={:?}", + projected_ty, + depth, + obligations, + cacheable); + + let result = if projected_ty.has_projection_types() { + let mut normalizer = AssociatedTypeNormalizer::new(selcx, cause, depth+1); + let normalized_ty = normalizer.fold(&projected_ty); + + debug!("opt_normalize_projection_type: \ + normalized_ty={:?} depth={}", + normalized_ty, + depth); + + obligations.extend(normalizer.obligations); + Normalized { + value: normalized_ty, + obligations: obligations, + } + } else { + Normalized { + value: projected_ty, + obligations: obligations, + } + }; + infcx.projection_cache.borrow_mut() + .complete(projection_ty, &result, cacheable); + Some(result) + } + Ok(ProjectedTy::NoProgress(projected_ty)) => { + debug!("opt_normalize_projection_type: \ + projected_ty={:?} no progress", + projected_ty); + let result = Normalized { + value: projected_ty, + obligations: vec![] + }; + infcx.projection_cache.borrow_mut() + .complete(projection_ty, &result, true); + Some(result) + } + Err(ProjectionTyError::TooManyCandidates) => { + debug!("opt_normalize_projection_type: \ + too many candidates"); + infcx.projection_cache.borrow_mut() + .ambiguous(projection_ty); + None + } + Err(ProjectionTyError::TraitSelectionError(_)) => { + debug!("opt_normalize_projection_type: ERROR"); + // if we got an error processing the `T as Trait` part, + // just return `ty::err` but add the obligation `T : + // Trait`, which when processed will cause the error to be + // reported later + + infcx.projection_cache.borrow_mut() + .error(projection_ty); + Some(normalize_to_error(selcx, projection_ty, cause, depth)) + } + } +} + +/// If we are projecting `::Item`, but `T: Trait` does not +/// hold. In various error cases, we cannot generate a valid +/// normalized projection. Therefore, we create an inference variable +/// return an associated obligation that, when fulfilled, will lead to +/// an error. +/// +/// Note that we used to return `TyError` here, but that was quite +/// dubious -- the premise was that an error would *eventually* be +/// reported, when the obligation was processed. But in general once +/// you see a `TyError` you are supposed to be able to assume that an +/// error *has been* reported, so that you can take whatever heuristic +/// paths you want to take. To make things worse, it was possible for +/// cycles to arise, where you basically had a setup like ` +/// as Trait>::Foo == $0`. Here, normalizing ` as +/// Trait>::Foo> to `[type error]` would lead to an obligation of +/// ` as Trait>::Foo`. We are supposed to report +/// an error for this obligation, but we legitimately should not, +/// because it contains `[type error]`. Yuck! (See issue #29857 for +/// one case where this arose.) +fn normalize_to_error<'a, 'gcx, 'tcx>(selcx: &mut SelectionContext<'a, 'gcx, 'tcx>, + projection_ty: ty::ProjectionTy<'tcx>, + cause: ObligationCause<'tcx>, + depth: usize) + -> NormalizedTy<'tcx> +{ + let trait_ref = projection_ty.trait_ref.to_poly_trait_ref(); + let trait_obligation = Obligation { cause: cause, + recursion_depth: depth, + predicate: trait_ref.to_predicate() }; + let new_value = selcx.infcx().next_ty_var(); + Normalized { + value: new_value, + obligations: vec![trait_obligation] + } +} + +enum ProjectedTy<'tcx> { + Progress(Progress<'tcx>), + NoProgress(Ty<'tcx>), +} + +struct Progress<'tcx> { + ty: Ty<'tcx>, + obligations: Vec>, + cacheable: bool, +} + +impl<'tcx> Progress<'tcx> { + fn error<'a,'gcx>(tcx: TyCtxt<'a,'gcx,'tcx>) -> Self { + Progress { + ty: tcx.types.err, + obligations: vec![], + cacheable: true + } + } + + fn with_addl_obligations(mut self, + mut obligations: Vec>) + -> Self { + debug!("with_addl_obligations: self.obligations.len={} obligations.len={}", + self.obligations.len(), obligations.len()); + + debug!("with_addl_obligations: self.obligations={:?} obligations={:?}", + self.obligations, obligations); + + self.obligations.append(&mut obligations); + self + } +} + +/// Compute the result of a projection type (if we can). +/// +/// IMPORTANT: +/// - `obligation` must be fully normalized +fn project_type<'cx, 'gcx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + obligation: &ProjectionTyObligation<'tcx>) + -> Result, ProjectionTyError<'tcx>> +{ + debug!("project(obligation={:?})", + obligation); + + let recursion_limit = selcx.tcx().sess.recursion_limit.get(); + if obligation.recursion_depth >= recursion_limit { + debug!("project: overflow!"); + selcx.infcx().report_overflow_error(&obligation, true); + } + + let obligation_trait_ref = &obligation.predicate.trait_ref; + + debug!("project: obligation_trait_ref={:?}", obligation_trait_ref); + + if obligation_trait_ref.references_error() { + return Ok(ProjectedTy::Progress(Progress::error(selcx.tcx()))); + } + + let mut candidates = ProjectionTyCandidateSet { + vec: Vec::new(), + ambiguous: false, + }; + + assemble_candidates_from_param_env(selcx, + obligation, + &obligation_trait_ref, + &mut candidates); + + assemble_candidates_from_trait_def(selcx, + obligation, + &obligation_trait_ref, + &mut candidates); + + if let Err(e) = assemble_candidates_from_impls(selcx, + obligation, + &obligation_trait_ref, + &mut candidates) { + return Err(ProjectionTyError::TraitSelectionError(e)); + } + + debug!("{} candidates, ambiguous={}", + candidates.vec.len(), + candidates.ambiguous); + + // Inherent ambiguity that prevents us from even enumerating the + // candidates. + if candidates.ambiguous { + return Err(ProjectionTyError::TooManyCandidates); + } + + // Drop duplicates. + // + // Note: `candidates.vec` seems to be on the critical path of the + // compiler. Replacing it with an hash set was also tried, which would + // render the following dedup unnecessary. It led to cleaner code but + // prolonged compiling time of `librustc` from 5m30s to 6m in one test, or + // ~9% performance lost. + if candidates.vec.len() > 1 { + let mut i = 0; + while i < candidates.vec.len() { + let has_dup = (0..i).any(|j| candidates.vec[i] == candidates.vec[j]); + if has_dup { + candidates.vec.swap_remove(i); + } else { + i += 1; + } + } + } + + // Prefer where-clauses. As in select, if there are multiple + // candidates, we prefer where-clause candidates over impls. This + // may seem a bit surprising, since impls are the source of + // "truth" in some sense, but in fact some of the impls that SEEM + // applicable are not, because of nested obligations. Where + // clauses are the safer choice. See the comment on + // `select::SelectionCandidate` and #21974 for more details. + if candidates.vec.len() > 1 { + debug!("retaining param-env candidates only from {:?}", candidates.vec); + candidates.vec.retain(|c| match *c { + ProjectionTyCandidate::ParamEnv(..) => true, + ProjectionTyCandidate::TraitDef(..) | + ProjectionTyCandidate::Select => false, + }); + debug!("resulting candidate set: {:?}", candidates.vec); + if candidates.vec.len() != 1 { + return Err(ProjectionTyError::TooManyCandidates); + } + } + + assert!(candidates.vec.len() <= 1); + + match candidates.vec.pop() { + Some(candidate) => { + Ok(ProjectedTy::Progress( + confirm_candidate(selcx, + obligation, + &obligation_trait_ref, + candidate))) + } + None => { + Ok(ProjectedTy::NoProgress( + selcx.tcx().mk_projection( + obligation.predicate.trait_ref.clone(), + obligation.predicate.item_name))) + } + } +} + +/// The first thing we have to do is scan through the parameter +/// environment to see whether there are any projection predicates +/// there that can answer this question. +fn assemble_candidates_from_param_env<'cx, 'gcx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + obligation: &ProjectionTyObligation<'tcx>, + obligation_trait_ref: &ty::TraitRef<'tcx>, + candidate_set: &mut ProjectionTyCandidateSet<'tcx>) +{ + debug!("assemble_candidates_from_param_env(..)"); + let env_predicates = selcx.param_env().caller_bounds.iter().cloned(); + assemble_candidates_from_predicates(selcx, + obligation, + obligation_trait_ref, + candidate_set, + ProjectionTyCandidate::ParamEnv, + env_predicates); +} + +/// In the case of a nested projection like <::FooT as Bar>::BarT, we may find +/// that the definition of `Foo` has some clues: +/// +/// ``` +/// trait Foo { +/// type FooT : Bar +/// } +/// ``` +/// +/// Here, for example, we could conclude that the result is `i32`. +fn assemble_candidates_from_trait_def<'cx, 'gcx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + obligation: &ProjectionTyObligation<'tcx>, + obligation_trait_ref: &ty::TraitRef<'tcx>, + candidate_set: &mut ProjectionTyCandidateSet<'tcx>) +{ + debug!("assemble_candidates_from_trait_def(..)"); + + // Check whether the self-type is itself a projection. + let (def_id, substs) = match obligation_trait_ref.self_ty().sty { + ty::TyProjection(ref data) => { + (data.trait_ref.def_id, data.trait_ref.substs) + } + ty::TyAnon(def_id, substs) => (def_id, substs), + ty::TyInfer(ty::TyVar(_)) => { + // If the self-type is an inference variable, then it MAY wind up + // being a projected type, so induce an ambiguity. + candidate_set.ambiguous = true; + return; + } + _ => { return; } + }; + + // If so, extract what we know from the trait and try to come up with a good answer. + let trait_predicates = selcx.tcx().item_predicates(def_id); + let bounds = trait_predicates.instantiate(selcx.tcx(), substs); + let bounds = elaborate_predicates(selcx.tcx(), bounds.predicates); + assemble_candidates_from_predicates(selcx, + obligation, + obligation_trait_ref, + candidate_set, + ProjectionTyCandidate::TraitDef, + bounds) +} + +fn assemble_candidates_from_predicates<'cx, 'gcx, 'tcx, I>( + selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + obligation: &ProjectionTyObligation<'tcx>, + obligation_trait_ref: &ty::TraitRef<'tcx>, + candidate_set: &mut ProjectionTyCandidateSet<'tcx>, + ctor: fn(ty::PolyProjectionPredicate<'tcx>) -> ProjectionTyCandidate<'tcx>, + env_predicates: I) + where I: Iterator> +{ + debug!("assemble_candidates_from_predicates(obligation={:?})", + obligation); + let infcx = selcx.infcx(); + for predicate in env_predicates { + debug!("assemble_candidates_from_predicates: predicate={:?}", + predicate); + match predicate { + ty::Predicate::Projection(ref data) => { + let same_name = data.item_name() == obligation.predicate.item_name; + + let is_match = same_name && infcx.probe(|_| { + let data_poly_trait_ref = + data.to_poly_trait_ref(); + let obligation_poly_trait_ref = + obligation_trait_ref.to_poly_trait_ref(); + infcx.sub_poly_trait_refs(false, + obligation.cause.clone(), + data_poly_trait_ref, + obligation_poly_trait_ref) + .map(|InferOk { obligations: _, value: () }| { + // FIXME(#32730) -- do we need to take obligations + // into account in any way? At the moment, no. + }) + .is_ok() + }); + + debug!("assemble_candidates_from_predicates: candidate={:?} \ + is_match={} same_name={}", + data, is_match, same_name); + + if is_match { + candidate_set.vec.push(ctor(data.clone())); + } + } + _ => { } + } + } +} + +fn assemble_candidates_from_impls<'cx, 'gcx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + obligation: &ProjectionTyObligation<'tcx>, + obligation_trait_ref: &ty::TraitRef<'tcx>, + candidate_set: &mut ProjectionTyCandidateSet<'tcx>) + -> Result<(), SelectionError<'tcx>> +{ + // If we are resolving `>::Item == Type`, + // start out by selecting the predicate `T as TraitRef<...>`: + let poly_trait_ref = obligation_trait_ref.to_poly_trait_ref(); + let trait_obligation = obligation.with(poly_trait_ref.to_poly_trait_predicate()); + selcx.infcx().probe(|_| { + let vtable = match selcx.select(&trait_obligation) { + Ok(Some(vtable)) => vtable, + Ok(None) => { + candidate_set.ambiguous = true; + return Ok(()); + } + Err(e) => { + debug!("assemble_candidates_from_impls: selection error {:?}", + e); + return Err(e); + } + }; + + match vtable { + super::VtableClosure(_) | + super::VtableFnPointer(_) | + super::VtableObject(_) => { + debug!("assemble_candidates_from_impls: vtable={:?}", + vtable); + + candidate_set.vec.push(ProjectionTyCandidate::Select); + } + super::VtableImpl(ref impl_data) => { + // We have to be careful when projecting out of an + // impl because of specialization. If we are not in + // trans (i.e., projection mode is not "any"), and the + // impl's type is declared as default, then we disable + // projection (even if the trait ref is fully + // monomorphic). In the case where trait ref is not + // fully monomorphic (i.e., includes type parameters), + // this is because those type parameters may + // ultimately be bound to types from other crates that + // may have specialized impls we can't see. In the + // case where the trait ref IS fully monomorphic, this + // is a policy decision that we made in the RFC in + // order to preserve flexibility for the crate that + // defined the specializable impl to specialize later + // for existing types. + // + // In either case, we handle this by not adding a + // candidate for an impl if it contains a `default` + // type. + let opt_node_item = assoc_ty_def(selcx, + impl_data.impl_def_id, + obligation.predicate.item_name); + let new_candidate = if let Some(node_item) = opt_node_item { + let is_default = if node_item.node.is_from_trait() { + // If true, the impl inherited a `type Foo = Bar` + // given in the trait, which is implicitly default. + // Otherwise, the impl did not specify `type` and + // neither did the trait: + // + // ```rust + // trait Foo { type T; } + // impl Foo for Bar { } + // ``` + // + // This is an error, but it will be + // reported in `check_impl_items_against_trait`. + // We accept it here but will flag it as + // an error when we confirm the candidate + // (which will ultimately lead to `normalize_to_error` + // being invoked). + node_item.item.defaultness.has_value() + } else { + node_item.item.defaultness.is_default() + }; + + // Only reveal a specializable default if we're past type-checking + // and the obligations is monomorphic, otherwise passes such as + // transmute checking and polymorphic MIR optimizations could + // get a result which isn't correct for all monomorphizations. + if !is_default { + Some(ProjectionTyCandidate::Select) + } else if selcx.projection_mode() == Reveal::All { + assert!(!poly_trait_ref.needs_infer()); + if !poly_trait_ref.needs_subst() { + Some(ProjectionTyCandidate::Select) + } else { + None + } + } else { + None + } + } else { + // This is saying that neither the trait nor + // the impl contain a definition for this + // associated type. Normally this situation + // could only arise through a compiler bug -- + // if the user wrote a bad item name, it + // should have failed in astconv. **However**, + // at coherence-checking time, we only look at + // the topmost impl (we don't even consider + // the trait itself) for the definition -- and + // so in that case it may be that the trait + // *DOES* have a declaration, but we don't see + // it, and we end up in this branch. + // + // This is kind of tricky to handle actually. + // For now, we just unconditionally ICE, + // because otherwise, examples like the + // following will succeed: + // + // ``` + // trait Assoc { + // type Output; + // } + // + // impl Assoc for T { + // default type Output = bool; + // } + // + // impl Assoc for u8 {} + // impl Assoc for u16 {} + // + // trait Foo {} + // impl Foo for ::Output {} + // impl Foo for ::Output {} + // return None; + // } + // ``` + // + // The essential problem here is that the + // projection fails, leaving two unnormalized + // types, which appear not to unify -- so the + // overlap check succeeds, when it should + // fail. + span_bug!(obligation.cause.span, + "Tried to project an inherited associated type during \ + coherence checking, which is currently not supported."); + }; + candidate_set.vec.extend(new_candidate); + } + super::VtableParam(..) => { + // This case tell us nothing about the value of an + // associated type. Consider: + // + // ``` + // trait SomeTrait { type Foo; } + // fn foo(...) { } + // ``` + // + // If the user writes `::Foo`, then the `T + // : SomeTrait` binding does not help us decide what the + // type `Foo` is (at least, not more specifically than + // what we already knew). + // + // But wait, you say! What about an example like this: + // + // ``` + // fn bar>(...) { ... } + // ``` + // + // Doesn't the `T : Sometrait` predicate help + // resolve `T::Foo`? And of course it does, but in fact + // that single predicate is desugared into two predicates + // in the compiler: a trait predicate (`T : SomeTrait`) and a + // projection. And the projection where clause is handled + // in `assemble_candidates_from_param_env`. + } + super::VtableDefaultImpl(..) | + super::VtableBuiltin(..) => { + // These traits have no associated types. + span_bug!( + obligation.cause.span, + "Cannot project an associated type from `{:?}`", + vtable); + } + } + + Ok(()) + }) +} + +fn confirm_candidate<'cx, 'gcx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + obligation: &ProjectionTyObligation<'tcx>, + obligation_trait_ref: &ty::TraitRef<'tcx>, + candidate: ProjectionTyCandidate<'tcx>) + -> Progress<'tcx> +{ + debug!("confirm_candidate(candidate={:?}, obligation={:?})", + candidate, + obligation); + + match candidate { + ProjectionTyCandidate::ParamEnv(poly_projection) | + ProjectionTyCandidate::TraitDef(poly_projection) => { + confirm_param_env_candidate(selcx, obligation, poly_projection) + } + + ProjectionTyCandidate::Select => { + confirm_select_candidate(selcx, obligation, obligation_trait_ref) + } + } +} + +fn confirm_select_candidate<'cx, 'gcx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + obligation: &ProjectionTyObligation<'tcx>, + obligation_trait_ref: &ty::TraitRef<'tcx>) + -> Progress<'tcx> +{ + let poly_trait_ref = obligation_trait_ref.to_poly_trait_ref(); + let trait_obligation = obligation.with(poly_trait_ref.to_poly_trait_predicate()); + let vtable = match selcx.select(&trait_obligation) { + Ok(Some(vtable)) => vtable, + _ => { + span_bug!( + obligation.cause.span, + "Failed to select `{:?}`", + trait_obligation); + } + }; + + match vtable { + super::VtableImpl(data) => + confirm_impl_candidate(selcx, obligation, data), + super::VtableClosure(data) => + confirm_closure_candidate(selcx, obligation, data), + super::VtableFnPointer(data) => + confirm_fn_pointer_candidate(selcx, obligation, data), + super::VtableObject(_) => + confirm_object_candidate(selcx, obligation, obligation_trait_ref), + super::VtableDefaultImpl(..) | + super::VtableParam(..) | + super::VtableBuiltin(..) => + // we don't create Select candidates with this kind of resolution + span_bug!( + obligation.cause.span, + "Cannot project an associated type from `{:?}`", + vtable), + } +} + +fn confirm_object_candidate<'cx, 'gcx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + obligation: &ProjectionTyObligation<'tcx>, + obligation_trait_ref: &ty::TraitRef<'tcx>) + -> Progress<'tcx> +{ + let self_ty = obligation_trait_ref.self_ty(); + let object_ty = selcx.infcx().shallow_resolve(self_ty); + debug!("confirm_object_candidate(object_ty={:?})", + object_ty); + let data = match object_ty.sty { + ty::TyDynamic(ref data, ..) => data, + _ => { + span_bug!( + obligation.cause.span, + "confirm_object_candidate called with non-object: {:?}", + object_ty) + } + }; + let env_predicates = data.projection_bounds().map(|p| { + p.with_self_ty(selcx.tcx(), object_ty).to_predicate() + }).collect(); + let env_predicate = { + let env_predicates = elaborate_predicates(selcx.tcx(), env_predicates); + + // select only those projections that are actually projecting an + // item with the correct name + let env_predicates = env_predicates.filter_map(|p| match p { + ty::Predicate::Projection(data) => + if data.item_name() == obligation.predicate.item_name { + Some(data) + } else { + None + }, + _ => None + }); + + // select those with a relevant trait-ref + let mut env_predicates = env_predicates.filter(|data| { + let data_poly_trait_ref = data.to_poly_trait_ref(); + let obligation_poly_trait_ref = obligation_trait_ref.to_poly_trait_ref(); + selcx.infcx().probe(|_| { + selcx.infcx().sub_poly_trait_refs(false, + obligation.cause.clone(), + data_poly_trait_ref, + obligation_poly_trait_ref).is_ok() + }) + }); + + // select the first matching one; there really ought to be one or + // else the object type is not WF, since an object type should + // include all of its projections explicitly + match env_predicates.next() { + Some(env_predicate) => env_predicate, + None => { + debug!("confirm_object_candidate: no env-predicate \ + found in object type `{:?}`; ill-formed", + object_ty); + return Progress::error(selcx.tcx()); + } + } + }; + + confirm_param_env_candidate(selcx, obligation, env_predicate) +} + +fn confirm_fn_pointer_candidate<'cx, 'gcx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + obligation: &ProjectionTyObligation<'tcx>, + fn_pointer_vtable: VtableFnPointerData<'tcx, PredicateObligation<'tcx>>) + -> Progress<'tcx> +{ + let fn_type = selcx.infcx().shallow_resolve(fn_pointer_vtable.fn_ty); + let sig = fn_type.fn_sig(); + confirm_callable_candidate(selcx, obligation, sig, util::TupleArgumentsFlag::Yes) + .with_addl_obligations(fn_pointer_vtable.nested) +} + +fn confirm_closure_candidate<'cx, 'gcx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + obligation: &ProjectionTyObligation<'tcx>, + vtable: VtableClosureData<'tcx, PredicateObligation<'tcx>>) + -> Progress<'tcx> +{ + let closure_typer = selcx.closure_typer(); + let closure_type = closure_typer.closure_type(vtable.closure_def_id, vtable.substs); + let Normalized { + value: closure_type, + obligations + } = normalize_with_depth(selcx, + obligation.cause.clone(), + obligation.recursion_depth+1, + &closure_type); + + debug!("confirm_closure_candidate: obligation={:?},closure_type={:?},obligations={:?}", + obligation, + closure_type, + obligations); + + confirm_callable_candidate(selcx, + obligation, + &closure_type.sig, + util::TupleArgumentsFlag::No) + .with_addl_obligations(vtable.nested) + .with_addl_obligations(obligations) +} + +fn confirm_callable_candidate<'cx, 'gcx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + obligation: &ProjectionTyObligation<'tcx>, + fn_sig: &ty::PolyFnSig<'tcx>, + flag: util::TupleArgumentsFlag) + -> Progress<'tcx> +{ + let tcx = selcx.tcx(); + + debug!("confirm_callable_candidate({:?},{:?})", + obligation, + fn_sig); + + // the `Output` associated type is declared on `FnOnce` + let fn_once_def_id = tcx.lang_items.fn_once_trait().unwrap(); + + // Note: we unwrap the binder here but re-create it below (1) + let ty::Binder((trait_ref, ret_type)) = + tcx.closure_trait_ref_and_return_type(fn_once_def_id, + obligation.predicate.trait_ref.self_ty(), + fn_sig, + flag); + + let predicate = ty::Binder(ty::ProjectionPredicate { // (1) recreate binder here + projection_ty: ty::ProjectionTy { + trait_ref: trait_ref, + item_name: Symbol::intern(FN_OUTPUT_NAME), + }, + ty: ret_type + }); + + confirm_param_env_candidate(selcx, obligation, predicate) +} + +fn confirm_param_env_candidate<'cx, 'gcx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + obligation: &ProjectionTyObligation<'tcx>, + poly_projection: ty::PolyProjectionPredicate<'tcx>) + -> Progress<'tcx> +{ + let infcx = selcx.infcx(); + let cause = obligation.cause.clone(); + let trait_ref = obligation.predicate.trait_ref; + match infcx.match_poly_projection_predicate(cause, poly_projection, trait_ref) { + Ok(InferOk { value: ty_match, obligations }) => { + Progress { + ty: ty_match.value, + obligations: obligations, + cacheable: ty_match.unconstrained_regions.is_empty(), + } + } + Err(e) => { + span_bug!( + obligation.cause.span, + "Failed to unify obligation `{:?}` \ + with poly_projection `{:?}`: {:?}", + obligation, + poly_projection, + e); + } + } +} + +fn confirm_impl_candidate<'cx, 'gcx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + obligation: &ProjectionTyObligation<'tcx>, + impl_vtable: VtableImplData<'tcx, PredicateObligation<'tcx>>) + -> Progress<'tcx> +{ + let VtableImplData { substs, nested, impl_def_id } = impl_vtable; + + let tcx = selcx.tcx(); + let trait_ref = obligation.predicate.trait_ref; + let assoc_ty = assoc_ty_def(selcx, impl_def_id, obligation.predicate.item_name); + + match assoc_ty { + Some(node_item) => { + let ty = if !node_item.item.defaultness.has_value() { + // This means that the impl is missing a definition for the + // associated type. This error will be reported by the type + // checker method `check_impl_items_against_trait`, so here we + // just return TyError. + debug!("confirm_impl_candidate: no associated type {:?} for {:?}", + node_item.item.name, + obligation.predicate.trait_ref); + tcx.types.err + } else { + tcx.item_type(node_item.item.def_id) + }; + let substs = translate_substs(selcx.infcx(), impl_def_id, substs, node_item.node); + Progress { + ty: ty.subst(tcx, substs), + obligations: nested, + cacheable: true + } + } + None => { + span_bug!(obligation.cause.span, + "No associated type for {:?}", + trait_ref); + } + } +} + +/// Locate the definition of an associated type in the specialization hierarchy, +/// starting from the given impl. +/// +/// Based on the "projection mode", this lookup may in fact only examine the +/// topmost impl. See the comments for `Reveal` for more details. +fn assoc_ty_def<'cx, 'gcx, 'tcx>( + selcx: &SelectionContext<'cx, 'gcx, 'tcx>, + impl_def_id: DefId, + assoc_ty_name: ast::Name) + -> Option> +{ + let trait_def_id = selcx.tcx().impl_trait_ref(impl_def_id).unwrap().def_id; + + if selcx.projection_mode() == Reveal::ExactMatch { + let impl_node = specialization_graph::Node::Impl(impl_def_id); + for item in impl_node.items(selcx.tcx()) { + if item.kind == ty::AssociatedKind::Type && item.name == assoc_ty_name { + return Some(specialization_graph::NodeItem { + node: specialization_graph::Node::Impl(impl_def_id), + item: item, + }); + } + } + None + } else { + selcx.tcx().lookup_trait_def(trait_def_id) + .ancestors(impl_def_id) + .defs(selcx.tcx(), assoc_ty_name, ty::AssociatedKind::Type) + .next() + } +} + +// # Cache + +pub struct ProjectionCache<'tcx> { + map: SnapshotMap, ProjectionCacheEntry<'tcx>>, +} + +#[derive(Clone, Debug)] +enum ProjectionCacheEntry<'tcx> { + InProgress, + Ambiguous, + Error, + NormalizedTy(Ty<'tcx>), +} + +// NB: intentionally not Clone +pub struct ProjectionCacheSnapshot { + snapshot: Snapshot +} + +impl<'tcx> ProjectionCache<'tcx> { + pub fn new() -> Self { + ProjectionCache { + map: SnapshotMap::new() + } + } + + pub fn snapshot(&mut self) -> ProjectionCacheSnapshot { + ProjectionCacheSnapshot { snapshot: self.map.snapshot() } + } + + pub fn rollback_to(&mut self, snapshot: ProjectionCacheSnapshot) { + self.map.rollback_to(snapshot.snapshot); + } + + pub fn rollback_skolemized(&mut self, snapshot: &ProjectionCacheSnapshot) { + self.map.partial_rollback(&snapshot.snapshot, &|k| k.has_re_skol()); + } + + pub fn commit(&mut self, snapshot: ProjectionCacheSnapshot) { + self.map.commit(snapshot.snapshot); + } + + /// Try to start normalize `key`; returns an error if + /// normalization already occured (this error corresponds to a + /// cache hit, so it's actually a good thing). + fn try_start(&mut self, key: ty::ProjectionTy<'tcx>) + -> Result<(), ProjectionCacheEntry<'tcx>> { + if let Some(entry) = self.map.get(&key) { + return Err(entry.clone()); + } + + self.map.insert(key, ProjectionCacheEntry::InProgress); + Ok(()) + } + + /// Indicates that `key` was normalized to `value`. If `cacheable` is false, + /// then this result is sadly not cacheable. + fn complete(&mut self, + key: ty::ProjectionTy<'tcx>, + value: &NormalizedTy<'tcx>, + cacheable: bool) { + let fresh_key = if cacheable { + debug!("ProjectionCacheEntry::complete: adding cache entry: key={:?}, value={:?}", + key, value); + self.map.insert(key, ProjectionCacheEntry::NormalizedTy(value.value)) + } else { + debug!("ProjectionCacheEntry::complete: cannot cache: key={:?}, value={:?}", + key, value); + !self.map.remove(key) + }; + + assert!(!fresh_key, "never started projecting `{:?}`", key); + } + + /// Indicates that trying to normalize `key` resulted in + /// ambiguity. No point in trying it again then until we gain more + /// type information (in which case, the "fully resolved" key will + /// be different). + fn ambiguous(&mut self, key: ty::ProjectionTy<'tcx>) { + let fresh = self.map.insert(key, ProjectionCacheEntry::Ambiguous); + assert!(!fresh, "never started projecting `{:?}`", key); + } + + /// Indicates that trying to normalize `key` resulted in + /// error. + fn error(&mut self, key: ty::ProjectionTy<'tcx>) { + let fresh = self.map.insert(key, ProjectionCacheEntry::Error); + assert!(!fresh, "never started projecting `{:?}`", key); + } +} diff --git a/src/librustc/traits/select.rs b/src/librustc/traits/select.rs new file mode 100644 index 0000000000000..c54c0bf74ef7a --- /dev/null +++ b/src/librustc/traits/select.rs @@ -0,0 +1,2995 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! See `README.md` for high-level documentation + +pub use self::MethodMatchResult::*; +pub use self::MethodMatchedData::*; +use self::SelectionCandidate::*; +use self::EvaluationResult::*; + +use super::coherence; +use super::DerivedObligationCause; +use super::project; +use super::project::{normalize_with_depth, Normalized}; +use super::{PredicateObligation, TraitObligation, ObligationCause}; +use super::{ObligationCauseCode, BuiltinDerivedObligation, ImplDerivedObligation}; +use super::{SelectionError, Unimplemented, OutputTypeParameterMismatch}; +use super::{ObjectCastObligation, Obligation}; +use super::Reveal; +use super::TraitNotObjectSafe; +use super::Selection; +use super::SelectionResult; +use super::{VtableBuiltin, VtableImpl, VtableParam, VtableClosure, + VtableFnPointer, VtableObject, VtableDefaultImpl}; +use super::{VtableImplData, VtableObjectData, VtableBuiltinData, + VtableClosureData, VtableDefaultImplData, VtableFnPointerData}; +use super::util; + +use hir::def_id::DefId; +use infer; +use infer::{InferCtxt, InferOk, TypeFreshener}; +use ty::subst::{Kind, Subst, Substs}; +use ty::{self, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable}; +use traits; +use ty::fast_reject; +use ty::relate::TypeRelation; +use middle::lang_items; + +use rustc_data_structures::bitvec::BitVector; +use rustc_data_structures::snapshot_vec::{SnapshotVecDelegate, SnapshotVec}; +use std::cell::RefCell; +use std::fmt; +use std::marker::PhantomData; +use std::mem; +use std::rc::Rc; +use syntax::abi::Abi; +use hir; +use util::nodemap::FxHashMap; + +struct InferredObligationsSnapshotVecDelegate<'tcx> { + phantom: PhantomData<&'tcx i32>, +} +impl<'tcx> SnapshotVecDelegate for InferredObligationsSnapshotVecDelegate<'tcx> { + type Value = PredicateObligation<'tcx>; + type Undo = (); + fn reverse(_: &mut Vec, _: Self::Undo) {} +} + +pub struct SelectionContext<'cx, 'gcx: 'cx+'tcx, 'tcx: 'cx> { + infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, + + /// Freshener used specifically for skolemizing entries on the + /// obligation stack. This ensures that all entries on the stack + /// at one time will have the same set of skolemized entries, + /// which is important for checking for trait bounds that + /// recursively require themselves. + freshener: TypeFreshener<'cx, 'gcx, 'tcx>, + + /// If true, indicates that the evaluation should be conservative + /// and consider the possibility of types outside this crate. + /// This comes up primarily when resolving ambiguity. Imagine + /// there is some trait reference `$0 : Bar` where `$0` is an + /// inference variable. If `intercrate` is true, then we can never + /// say for sure that this reference is not implemented, even if + /// there are *no impls at all for `Bar`*, because `$0` could be + /// bound to some type that in a downstream crate that implements + /// `Bar`. This is the suitable mode for coherence. Elsewhere, + /// though, we set this to false, because we are only interested + /// in types that the user could actually have written --- in + /// other words, we consider `$0 : Bar` to be unimplemented if + /// there is no type that the user could *actually name* that + /// would satisfy it. This avoids crippling inference, basically. + intercrate: bool, + + inferred_obligations: SnapshotVec>, +} + +// A stack that walks back up the stack frame. +struct TraitObligationStack<'prev, 'tcx: 'prev> { + obligation: &'prev TraitObligation<'tcx>, + + /// Trait ref from `obligation` but skolemized with the + /// selection-context's freshener. Used to check for recursion. + fresh_trait_ref: ty::PolyTraitRef<'tcx>, + + previous: TraitObligationStackList<'prev, 'tcx>, +} + +#[derive(Clone)] +pub struct SelectionCache<'tcx> { + hashmap: RefCell, + SelectionResult<'tcx, SelectionCandidate<'tcx>>>>, +} + +pub enum MethodMatchResult { + MethodMatched(MethodMatchedData), + MethodAmbiguous(/* list of impls that could apply */ Vec), + MethodDidNotMatch, +} + +#[derive(Copy, Clone, Debug)] +pub enum MethodMatchedData { + // In the case of a precise match, we don't really need to store + // how the match was found. So don't. + PreciseMethodMatch, + + // In the case of a coercion, we need to know the precise impl so + // that we can determine the type to which things were coerced. + CoerciveMethodMatch(/* impl we matched */ DefId) +} + +/// The selection process begins by considering all impls, where +/// clauses, and so forth that might resolve an obligation. Sometimes +/// we'll be able to say definitively that (e.g.) an impl does not +/// apply to the obligation: perhaps it is defined for `usize` but the +/// obligation is for `int`. In that case, we drop the impl out of the +/// list. But the other cases are considered *candidates*. +/// +/// For selection to succeed, there must be exactly one matching +/// candidate. If the obligation is fully known, this is guaranteed +/// by coherence. However, if the obligation contains type parameters +/// or variables, there may be multiple such impls. +/// +/// It is not a real problem if multiple matching impls exist because +/// of type variables - it just means the obligation isn't sufficiently +/// elaborated. In that case we report an ambiguity, and the caller can +/// try again after more type information has been gathered or report a +/// "type annotations required" error. +/// +/// However, with type parameters, this can be a real problem - type +/// parameters don't unify with regular types, but they *can* unify +/// with variables from blanket impls, and (unless we know its bounds +/// will always be satisfied) picking the blanket impl will be wrong +/// for at least *some* substitutions. To make this concrete, if we have +/// +/// trait AsDebug { type Out : fmt::Debug; fn debug(self) -> Self::Out; } +/// impl AsDebug for T { +/// type Out = T; +/// fn debug(self) -> fmt::Debug { self } +/// } +/// fn foo(t: T) { println!("{:?}", ::debug(t)); } +/// +/// we can't just use the impl to resolve the obligation +/// - a type from another crate (that doesn't implement fmt::Debug) could +/// implement AsDebug. +/// +/// Because where-clauses match the type exactly, multiple clauses can +/// only match if there are unresolved variables, and we can mostly just +/// report this ambiguity in that case. This is still a problem - we can't +/// *do anything* with ambiguities that involve only regions. This is issue +/// #21974. +/// +/// If a single where-clause matches and there are no inference +/// variables left, then it definitely matches and we can just select +/// it. +/// +/// In fact, we even select the where-clause when the obligation contains +/// inference variables. The can lead to inference making "leaps of logic", +/// for example in this situation: +/// +/// pub trait Foo { fn foo(&self) -> T; } +/// impl Foo<()> for T { fn foo(&self) { } } +/// impl Foo for bool { fn foo(&self) -> bool { *self } } +/// +/// pub fn foo(t: T) where T: Foo { +/// println!("{:?}", >::foo(&t)); +/// } +/// fn main() { foo(false); } +/// +/// Here the obligation > can be matched by both the blanket +/// impl and the where-clause. We select the where-clause and unify $0=bool, +/// so the program prints "false". However, if the where-clause is omitted, +/// the blanket impl is selected, we unify $0=(), and the program prints +/// "()". +/// +/// Exactly the same issues apply to projection and object candidates, except +/// that we can have both a projection candidate and a where-clause candidate +/// for the same obligation. In that case either would do (except that +/// different "leaps of logic" would occur if inference variables are +/// present), and we just pick the where-clause. This is, for example, +/// required for associated types to work in default impls, as the bounds +/// are visible both as projection bounds and as where-clauses from the +/// parameter environment. +#[derive(PartialEq,Eq,Debug,Clone)] +enum SelectionCandidate<'tcx> { + BuiltinCandidate { has_nested: bool }, + ParamCandidate(ty::PolyTraitRef<'tcx>), + ImplCandidate(DefId), + DefaultImplCandidate(DefId), + DefaultImplObjectCandidate(DefId), + + /// This is a trait matching with a projected type as `Self`, and + /// we found an applicable bound in the trait definition. + ProjectionCandidate, + + /// Implementation of a `Fn`-family trait by one of the anonymous types + /// generated for a `||` expression. The ty::ClosureKind informs the + /// confirmation step what ClosureKind obligation to emit. + ClosureCandidate(/* closure */ DefId, ty::ClosureSubsts<'tcx>, ty::ClosureKind), + + /// Implementation of a `Fn`-family trait by one of the anonymous + /// types generated for a fn pointer type (e.g., `fn(int)->int`) + FnPointerCandidate, + + ObjectCandidate, + + BuiltinObjectCandidate, + + BuiltinUnsizeCandidate, +} + +impl<'a, 'tcx> ty::Lift<'tcx> for SelectionCandidate<'a> { + type Lifted = SelectionCandidate<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + Some(match *self { + BuiltinCandidate { has_nested } => { + BuiltinCandidate { + has_nested: has_nested + } + } + ImplCandidate(def_id) => ImplCandidate(def_id), + DefaultImplCandidate(def_id) => DefaultImplCandidate(def_id), + DefaultImplObjectCandidate(def_id) => { + DefaultImplObjectCandidate(def_id) + } + ProjectionCandidate => ProjectionCandidate, + FnPointerCandidate => FnPointerCandidate, + ObjectCandidate => ObjectCandidate, + BuiltinObjectCandidate => BuiltinObjectCandidate, + BuiltinUnsizeCandidate => BuiltinUnsizeCandidate, + + ParamCandidate(ref trait_ref) => { + return tcx.lift(trait_ref).map(ParamCandidate); + } + ClosureCandidate(def_id, ref substs, kind) => { + return tcx.lift(substs).map(|substs| { + ClosureCandidate(def_id, substs, kind) + }); + } + }) + } +} + +struct SelectionCandidateSet<'tcx> { + // a list of candidates that definitely apply to the current + // obligation (meaning: types unify). + vec: Vec>, + + // if this is true, then there were candidates that might or might + // not have applied, but we couldn't tell. This occurs when some + // of the input types are type variables, in which case there are + // various "builtin" rules that might or might not trigger. + ambiguous: bool, +} + +#[derive(PartialEq,Eq,Debug,Clone)] +struct EvaluatedCandidate<'tcx> { + candidate: SelectionCandidate<'tcx>, + evaluation: EvaluationResult, +} + +/// When does the builtin impl for `T: Trait` apply? +enum BuiltinImplConditions<'tcx> { + /// The impl is conditional on T1,T2,.. : Trait + Where(ty::Binder>>), + /// There is no built-in impl. There may be some other + /// candidate (a where-clause or user-defined impl). + None, + /// There is *no* impl for this, builtin or not. Ignore + /// all where-clauses. + Never, + /// It is unknown whether there is an impl. + Ambiguous +} + +#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq)] +/// The result of trait evaluation. The order is important +/// here as the evaluation of a list is the maximum of the +/// evaluations. +enum EvaluationResult { + /// Evaluation successful + EvaluatedToOk, + /// Evaluation failed because of recursion - treated as ambiguous + EvaluatedToUnknown, + /// Evaluation is known to be ambiguous + EvaluatedToAmbig, + /// Evaluation failed + EvaluatedToErr, +} + +#[derive(Clone)] +pub struct EvaluationCache<'tcx> { + hashmap: RefCell, EvaluationResult>> +} + +impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { + pub fn new(infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>) -> SelectionContext<'cx, 'gcx, 'tcx> { + SelectionContext { + infcx: infcx, + freshener: infcx.freshener(), + intercrate: false, + inferred_obligations: SnapshotVec::new(), + } + } + + pub fn intercrate(infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>) -> SelectionContext<'cx, 'gcx, 'tcx> { + SelectionContext { + infcx: infcx, + freshener: infcx.freshener(), + intercrate: true, + inferred_obligations: SnapshotVec::new(), + } + } + + pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'gcx, 'tcx> { + self.infcx + } + + pub fn tcx(&self) -> TyCtxt<'cx, 'gcx, 'tcx> { + self.infcx.tcx + } + + pub fn param_env(&self) -> &'cx ty::ParameterEnvironment<'gcx> { + self.infcx.param_env() + } + + pub fn closure_typer(&self) -> &'cx InferCtxt<'cx, 'gcx, 'tcx> { + self.infcx + } + + pub fn projection_mode(&self) -> Reveal { + self.infcx.projection_mode() + } + + /// Wraps the inference context's in_snapshot s.t. snapshot handling is only from the selection + /// context's self. + fn in_snapshot(&mut self, f: F) -> R + where F: FnOnce(&mut Self, &infer::CombinedSnapshot) -> R + { + // The irrefutable nature of the operation means we don't need to snapshot the + // inferred_obligations vector. + self.infcx.in_snapshot(|snapshot| f(self, snapshot)) + } + + /// Wraps a probe s.t. obligations collected during it are ignored and old obligations are + /// retained. + fn probe(&mut self, f: F) -> R + where F: FnOnce(&mut Self, &infer::CombinedSnapshot) -> R + { + let inferred_obligations_snapshot = self.inferred_obligations.start_snapshot(); + let result = self.infcx.probe(|snapshot| f(self, snapshot)); + self.inferred_obligations.rollback_to(inferred_obligations_snapshot); + result + } + + /// Wraps a commit_if_ok s.t. obligations collected during it are not returned in selection if + /// the transaction fails and s.t. old obligations are retained. + fn commit_if_ok(&mut self, f: F) -> Result where + F: FnOnce(&mut Self, &infer::CombinedSnapshot) -> Result + { + let inferred_obligations_snapshot = self.inferred_obligations.start_snapshot(); + match self.infcx.commit_if_ok(|snapshot| f(self, snapshot)) { + Ok(ok) => { + self.inferred_obligations.commit(inferred_obligations_snapshot); + Ok(ok) + }, + Err(err) => { + self.inferred_obligations.rollback_to(inferred_obligations_snapshot); + Err(err) + } + } + } + + + /////////////////////////////////////////////////////////////////////////// + // Selection + // + // The selection phase tries to identify *how* an obligation will + // be resolved. For example, it will identify which impl or + // parameter bound is to be used. The process can be inconclusive + // if the self type in the obligation is not fully inferred. Selection + // can result in an error in one of two ways: + // + // 1. If no applicable impl or parameter bound can be found. + // 2. If the output type parameters in the obligation do not match + // those specified by the impl/bound. For example, if the obligation + // is `Vec:Iterable`, but the impl specifies + // `impl Iterable for Vec`, than an error would result. + + /// Attempts to satisfy the obligation. If successful, this will affect the surrounding + /// type environment by performing unification. + pub fn select(&mut self, obligation: &TraitObligation<'tcx>) + -> SelectionResult<'tcx, Selection<'tcx>> { + debug!("select({:?})", obligation); + assert!(!obligation.predicate.has_escaping_regions()); + + let dep_node = obligation.predicate.dep_node(); + let _task = self.tcx().dep_graph.in_task(dep_node); + + let stack = self.push_stack(TraitObligationStackList::empty(), obligation); + match self.candidate_from_obligation(&stack)? { + None => Ok(None), + Some(candidate) => { + let mut candidate = self.confirm_candidate(obligation, candidate)?; + let inferred_obligations = (*self.inferred_obligations).into_iter().cloned(); + candidate.nested_obligations_mut().extend(inferred_obligations); + Ok(Some(candidate)) + }, + } + } + + /////////////////////////////////////////////////////////////////////////// + // EVALUATION + // + // Tests whether an obligation can be selected or whether an impl + // can be applied to particular types. It skips the "confirmation" + // step and hence completely ignores output type parameters. + // + // The result is "true" if the obligation *may* hold and "false" if + // we can be sure it does not. + + /// Evaluates whether the obligation `obligation` can be satisfied (by any means). + pub fn evaluate_obligation(&mut self, + obligation: &PredicateObligation<'tcx>) + -> bool + { + debug!("evaluate_obligation({:?})", + obligation); + + self.probe(|this, _| { + this.evaluate_predicate_recursively(TraitObligationStackList::empty(), obligation) + .may_apply() + }) + } + + /// Evaluates whether the obligation `obligation` can be satisfied, + /// and returns `false` if not certain. However, this is not entirely + /// accurate if inference variables are involved. + pub fn evaluate_obligation_conservatively(&mut self, + obligation: &PredicateObligation<'tcx>) + -> bool + { + debug!("evaluate_obligation_conservatively({:?})", + obligation); + + self.probe(|this, _| { + this.evaluate_predicate_recursively(TraitObligationStackList::empty(), obligation) + == EvaluatedToOk + }) + } + + /// Evaluates the predicates in `predicates` recursively. Note that + /// this applies projections in the predicates, and therefore + /// is run within an inference probe. + fn evaluate_predicates_recursively<'a,'o,I>(&mut self, + stack: TraitObligationStackList<'o, 'tcx>, + predicates: I) + -> EvaluationResult + where I : Iterator>, 'tcx:'a + { + let mut result = EvaluatedToOk; + for obligation in predicates { + let eval = self.evaluate_predicate_recursively(stack, obligation); + debug!("evaluate_predicate_recursively({:?}) = {:?}", + obligation, eval); + match eval { + EvaluatedToErr => { return EvaluatedToErr; } + EvaluatedToAmbig => { result = EvaluatedToAmbig; } + EvaluatedToUnknown => { + if result < EvaluatedToUnknown { + result = EvaluatedToUnknown; + } + } + EvaluatedToOk => { } + } + } + result + } + + fn evaluate_predicate_recursively<'o>(&mut self, + previous_stack: TraitObligationStackList<'o, 'tcx>, + obligation: &PredicateObligation<'tcx>) + -> EvaluationResult + { + debug!("evaluate_predicate_recursively({:?})", + obligation); + + // Check the cache from the tcx of predicates that we know + // have been proven elsewhere. This cache only contains + // predicates that are global in scope and hence unaffected by + // the current environment. + if self.tcx().fulfilled_predicates.borrow().check_duplicate(&obligation.predicate) { + return EvaluatedToOk; + } + + match obligation.predicate { + ty::Predicate::Trait(ref t) => { + assert!(!t.has_escaping_regions()); + let obligation = obligation.with(t.clone()); + self.evaluate_obligation_recursively(previous_stack, &obligation) + } + + ty::Predicate::Equate(ref p) => { + // does this code ever run? + match self.infcx.equality_predicate(&obligation.cause, p) { + Ok(InferOk { obligations, .. }) => { + self.inferred_obligations.extend(obligations); + EvaluatedToOk + }, + Err(_) => EvaluatedToErr + } + } + + ty::Predicate::WellFormed(ty) => { + match ty::wf::obligations(self.infcx, obligation.cause.body_id, + ty, obligation.cause.span) { + Some(obligations) => + self.evaluate_predicates_recursively(previous_stack, obligations.iter()), + None => + EvaluatedToAmbig, + } + } + + ty::Predicate::TypeOutlives(..) | ty::Predicate::RegionOutlives(..) => { + // we do not consider region relationships when + // evaluating trait matches + EvaluatedToOk + } + + ty::Predicate::ObjectSafe(trait_def_id) => { + if self.tcx().is_object_safe(trait_def_id) { + EvaluatedToOk + } else { + EvaluatedToErr + } + } + + ty::Predicate::Projection(ref data) => { + let project_obligation = obligation.with(data.clone()); + match project::poly_project_and_unify_type(self, &project_obligation) { + Ok(Some(subobligations)) => { + self.evaluate_predicates_recursively(previous_stack, + subobligations.iter()) + } + Ok(None) => { + EvaluatedToAmbig + } + Err(_) => { + EvaluatedToErr + } + } + } + + ty::Predicate::ClosureKind(closure_def_id, kind) => { + match self.infcx.closure_kind(closure_def_id) { + Some(closure_kind) => { + if closure_kind.extends(kind) { + EvaluatedToOk + } else { + EvaluatedToErr + } + } + None => { + EvaluatedToAmbig + } + } + } + } + } + + fn evaluate_obligation_recursively<'o>(&mut self, + previous_stack: TraitObligationStackList<'o, 'tcx>, + obligation: &TraitObligation<'tcx>) + -> EvaluationResult + { + debug!("evaluate_obligation_recursively({:?})", + obligation); + + let stack = self.push_stack(previous_stack, obligation); + let fresh_trait_ref = stack.fresh_trait_ref; + if let Some(result) = self.check_evaluation_cache(fresh_trait_ref) { + debug!("CACHE HIT: EVAL({:?})={:?}", + fresh_trait_ref, + result); + return result; + } + + let result = self.evaluate_stack(&stack); + + debug!("CACHE MISS: EVAL({:?})={:?}", + fresh_trait_ref, + result); + self.insert_evaluation_cache(fresh_trait_ref, result); + + result + } + + fn evaluate_stack<'o>(&mut self, + stack: &TraitObligationStack<'o, 'tcx>) + -> EvaluationResult + { + // In intercrate mode, whenever any of the types are unbound, + // there can always be an impl. Even if there are no impls in + // this crate, perhaps the type would be unified with + // something from another crate that does provide an impl. + // + // In intra mode, we must still be conservative. The reason is + // that we want to avoid cycles. Imagine an impl like: + // + // impl Eq for Vec + // + // and a trait reference like `$0 : Eq` where `$0` is an + // unbound variable. When we evaluate this trait-reference, we + // will unify `$0` with `Vec<$1>` (for some fresh variable + // `$1`), on the condition that `$1 : Eq`. We will then wind + // up with many candidates (since that are other `Eq` impls + // that apply) and try to winnow things down. This results in + // a recursive evaluation that `$1 : Eq` -- as you can + // imagine, this is just where we started. To avoid that, we + // check for unbound variables and return an ambiguous (hence possible) + // match if we've seen this trait before. + // + // This suffices to allow chains like `FnMut` implemented in + // terms of `Fn` etc, but we could probably make this more + // precise still. + let unbound_input_types = stack.fresh_trait_ref.input_types().any(|ty| ty.is_fresh()); + if unbound_input_types && self.intercrate { + debug!("evaluate_stack({:?}) --> unbound argument, intercrate --> ambiguous", + stack.fresh_trait_ref); + return EvaluatedToAmbig; + } + if unbound_input_types && + stack.iter().skip(1).any( + |prev| self.match_fresh_trait_refs(&stack.fresh_trait_ref, + &prev.fresh_trait_ref)) + { + debug!("evaluate_stack({:?}) --> unbound argument, recursive --> giving up", + stack.fresh_trait_ref); + return EvaluatedToUnknown; + } + + // If there is any previous entry on the stack that precisely + // matches this obligation, then we can assume that the + // obligation is satisfied for now (still all other conditions + // must be met of course). One obvious case this comes up is + // marker traits like `Send`. Think of a linked list: + // + // struct List { data: T, next: Option>> { + // + // `Box>` will be `Send` if `T` is `Send` and + // `Option>>` is `Send`, and in turn + // `Option>>` is `Send` if `Box>` is + // `Send`. + // + // Note that we do this comparison using the `fresh_trait_ref` + // fields. Because these have all been skolemized using + // `self.freshener`, we can be sure that (a) this will not + // affect the inferencer state and (b) that if we see two + // skolemized types with the same index, they refer to the + // same unbound type variable. + if + stack.iter() + .skip(1) // skip top-most frame + .any(|prev| stack.fresh_trait_ref == prev.fresh_trait_ref) + { + debug!("evaluate_stack({:?}) --> recursive", + stack.fresh_trait_ref); + return EvaluatedToOk; + } + + match self.candidate_from_obligation(stack) { + Ok(Some(c)) => self.evaluate_candidate(stack, &c), + Ok(None) => EvaluatedToAmbig, + Err(..) => EvaluatedToErr + } + } + + /// Further evaluate `candidate` to decide whether all type parameters match and whether nested + /// obligations are met. Returns true if `candidate` remains viable after this further + /// scrutiny. + fn evaluate_candidate<'o>(&mut self, + stack: &TraitObligationStack<'o, 'tcx>, + candidate: &SelectionCandidate<'tcx>) + -> EvaluationResult + { + debug!("evaluate_candidate: depth={} candidate={:?}", + stack.obligation.recursion_depth, candidate); + let result = self.probe(|this, _| { + let candidate = (*candidate).clone(); + match this.confirm_candidate(stack.obligation, candidate) { + Ok(selection) => { + this.evaluate_predicates_recursively( + stack.list(), + selection.nested_obligations().iter()) + } + Err(..) => EvaluatedToErr + } + }); + debug!("evaluate_candidate: depth={} result={:?}", + stack.obligation.recursion_depth, result); + result + } + + fn check_evaluation_cache(&self, trait_ref: ty::PolyTraitRef<'tcx>) + -> Option + { + if self.can_use_global_caches() { + let cache = self.tcx().evaluation_cache.hashmap.borrow(); + if let Some(cached) = cache.get(&trait_ref) { + return Some(cached.clone()); + } + } + self.infcx.evaluation_cache.hashmap.borrow().get(&trait_ref).cloned() + } + + fn insert_evaluation_cache(&mut self, + trait_ref: ty::PolyTraitRef<'tcx>, + result: EvaluationResult) + { + // Avoid caching results that depend on more than just the trait-ref: + // The stack can create EvaluatedToUnknown, and closure signatures + // being yet uninferred can create "spurious" EvaluatedToAmbig + // and EvaluatedToOk. + if result == EvaluatedToUnknown || + ((result == EvaluatedToAmbig || result == EvaluatedToOk) + && trait_ref.has_closure_types()) + { + return; + } + + if self.can_use_global_caches() { + let mut cache = self.tcx().evaluation_cache.hashmap.borrow_mut(); + if let Some(trait_ref) = self.tcx().lift_to_global(&trait_ref) { + cache.insert(trait_ref, result); + return; + } + } + + self.infcx.evaluation_cache.hashmap.borrow_mut().insert(trait_ref, result); + } + + /////////////////////////////////////////////////////////////////////////// + // CANDIDATE ASSEMBLY + // + // The selection process begins by examining all in-scope impls, + // caller obligations, and so forth and assembling a list of + // candidates. See `README.md` and the `Candidate` type for more + // details. + + fn candidate_from_obligation<'o>(&mut self, + stack: &TraitObligationStack<'o, 'tcx>) + -> SelectionResult<'tcx, SelectionCandidate<'tcx>> + { + // Watch out for overflow. This intentionally bypasses (and does + // not update) the cache. + let recursion_limit = self.infcx.tcx.sess.recursion_limit.get(); + if stack.obligation.recursion_depth >= recursion_limit { + self.infcx().report_overflow_error(&stack.obligation, true); + } + + // Check the cache. Note that we skolemize the trait-ref + // separately rather than using `stack.fresh_trait_ref` -- this + // is because we want the unbound variables to be replaced + // with fresh skolemized types starting from index 0. + let cache_fresh_trait_pred = + self.infcx.freshen(stack.obligation.predicate.clone()); + debug!("candidate_from_obligation(cache_fresh_trait_pred={:?}, obligation={:?})", + cache_fresh_trait_pred, + stack); + assert!(!stack.obligation.predicate.has_escaping_regions()); + + if let Some(c) = self.check_candidate_cache(&cache_fresh_trait_pred) { + debug!("CACHE HIT: SELECT({:?})={:?}", + cache_fresh_trait_pred, + c); + return c; + } + + // If no match, compute result and insert into cache. + let candidate = self.candidate_from_obligation_no_cache(stack); + + if self.should_update_candidate_cache(&cache_fresh_trait_pred, &candidate) { + debug!("CACHE MISS: SELECT({:?})={:?}", + cache_fresh_trait_pred, candidate); + self.insert_candidate_cache(cache_fresh_trait_pred, candidate.clone()); + } + + candidate + } + + // Treat negative impls as unimplemented + fn filter_negative_impls(&self, candidate: SelectionCandidate<'tcx>) + -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { + if let ImplCandidate(def_id) = candidate { + if self.tcx().trait_impl_polarity(def_id) == hir::ImplPolarity::Negative { + return Err(Unimplemented) + } + } + Ok(Some(candidate)) + } + + fn candidate_from_obligation_no_cache<'o>(&mut self, + stack: &TraitObligationStack<'o, 'tcx>) + -> SelectionResult<'tcx, SelectionCandidate<'tcx>> + { + if stack.obligation.predicate.references_error() { + // If we encounter a `TyError`, we generally prefer the + // most "optimistic" result in response -- that is, the + // one least likely to report downstream errors. But + // because this routine is shared by coherence and by + // trait selection, there isn't an obvious "right" choice + // here in that respect, so we opt to just return + // ambiguity and let the upstream clients sort it out. + return Ok(None); + } + + if !self.is_knowable(stack) { + debug!("coherence stage: not knowable"); + return Ok(None); + } + + let candidate_set = self.assemble_candidates(stack)?; + + if candidate_set.ambiguous { + debug!("candidate set contains ambig"); + return Ok(None); + } + + let mut candidates = candidate_set.vec; + + debug!("assembled {} candidates for {:?}: {:?}", + candidates.len(), + stack, + candidates); + + // At this point, we know that each of the entries in the + // candidate set is *individually* applicable. Now we have to + // figure out if they contain mutual incompatibilities. This + // frequently arises if we have an unconstrained input type -- + // for example, we are looking for $0:Eq where $0 is some + // unconstrained type variable. In that case, we'll get a + // candidate which assumes $0 == int, one that assumes $0 == + // usize, etc. This spells an ambiguity. + + // If there is more than one candidate, first winnow them down + // by considering extra conditions (nested obligations and so + // forth). We don't winnow if there is exactly one + // candidate. This is a relatively minor distinction but it + // can lead to better inference and error-reporting. An + // example would be if there was an impl: + // + // impl Vec { fn push_clone(...) { ... } } + // + // and we were to see some code `foo.push_clone()` where `boo` + // is a `Vec` and `Bar` does not implement `Clone`. If + // we were to winnow, we'd wind up with zero candidates. + // Instead, we select the right impl now but report `Bar does + // not implement Clone`. + if candidates.len() == 1 { + return self.filter_negative_impls(candidates.pop().unwrap()); + } + + // Winnow, but record the exact outcome of evaluation, which + // is needed for specialization. + let mut candidates: Vec<_> = candidates.into_iter().filter_map(|c| { + let eval = self.evaluate_candidate(stack, &c); + if eval.may_apply() { + Some(EvaluatedCandidate { + candidate: c, + evaluation: eval, + }) + } else { + None + } + }).collect(); + + // If there are STILL multiple candidate, we can further + // reduce the list by dropping duplicates -- including + // resolving specializations. + if candidates.len() > 1 { + let mut i = 0; + while i < candidates.len() { + let is_dup = + (0..candidates.len()) + .filter(|&j| i != j) + .any(|j| self.candidate_should_be_dropped_in_favor_of(&candidates[i], + &candidates[j])); + if is_dup { + debug!("Dropping candidate #{}/{}: {:?}", + i, candidates.len(), candidates[i]); + candidates.swap_remove(i); + } else { + debug!("Retaining candidate #{}/{}: {:?}", + i, candidates.len(), candidates[i]); + i += 1; + } + } + } + + // If there are *STILL* multiple candidates, give up and + // report ambiguity. + if candidates.len() > 1 { + debug!("multiple matches, ambig"); + return Ok(None); + } + + // If there are *NO* candidates, then there are no impls -- + // that we know of, anyway. Note that in the case where there + // are unbound type variables within the obligation, it might + // be the case that you could still satisfy the obligation + // from another crate by instantiating the type variables with + // a type from another crate that does have an impl. This case + // is checked for in `evaluate_stack` (and hence users + // who might care about this case, like coherence, should use + // that function). + if candidates.is_empty() { + return Err(Unimplemented); + } + + // Just one candidate left. + self.filter_negative_impls(candidates.pop().unwrap().candidate) + } + + fn is_knowable<'o>(&mut self, + stack: &TraitObligationStack<'o, 'tcx>) + -> bool + { + debug!("is_knowable(intercrate={})", self.intercrate); + + if !self.intercrate { + return true; + } + + let obligation = &stack.obligation; + let predicate = self.infcx().resolve_type_vars_if_possible(&obligation.predicate); + + // ok to skip binder because of the nature of the + // trait-ref-is-knowable check, which does not care about + // bound regions + let trait_ref = &predicate.skip_binder().trait_ref; + + coherence::trait_ref_is_knowable(self.tcx(), trait_ref) + } + + /// Returns true if the global caches can be used. + /// Do note that if the type itself is not in the + /// global tcx, the local caches will be used. + fn can_use_global_caches(&self) -> bool { + // If there are any where-clauses in scope, then we always use + // a cache local to this particular scope. Otherwise, we + // switch to a global cache. We used to try and draw + // finer-grained distinctions, but that led to a serious of + // annoying and weird bugs like #22019 and #18290. This simple + // rule seems to be pretty clearly safe and also still retains + // a very high hit rate (~95% when compiling rustc). + if !self.param_env().caller_bounds.is_empty() { + return false; + } + + // Avoid using the master cache during coherence and just rely + // on the local cache. This effectively disables caching + // during coherence. It is really just a simplification to + // avoid us having to fear that coherence results "pollute" + // the master cache. Since coherence executes pretty quickly, + // it's not worth going to more trouble to increase the + // hit-rate I don't think. + if self.intercrate { + return false; + } + + // Otherwise, we can use the global cache. + true + } + + fn check_candidate_cache(&mut self, + cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>) + -> Option>> + { + let trait_ref = &cache_fresh_trait_pred.0.trait_ref; + if self.can_use_global_caches() { + let cache = self.tcx().selection_cache.hashmap.borrow(); + if let Some(cached) = cache.get(&trait_ref) { + return Some(cached.clone()); + } + } + self.infcx.selection_cache.hashmap.borrow().get(trait_ref).cloned() + } + + fn insert_candidate_cache(&mut self, + cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>, + candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>) + { + let trait_ref = cache_fresh_trait_pred.0.trait_ref; + if self.can_use_global_caches() { + let mut cache = self.tcx().selection_cache.hashmap.borrow_mut(); + if let Some(trait_ref) = self.tcx().lift_to_global(&trait_ref) { + if let Some(candidate) = self.tcx().lift_to_global(&candidate) { + cache.insert(trait_ref, candidate); + return; + } + } + } + + self.infcx.selection_cache.hashmap.borrow_mut().insert(trait_ref, candidate); + } + + fn should_update_candidate_cache(&mut self, + cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>, + candidate: &SelectionResult<'tcx, SelectionCandidate<'tcx>>) + -> bool + { + // In general, it's a good idea to cache results, even + // ambiguous ones, to save us some trouble later. But we have + // to be careful not to cache results that could be + // invalidated later by advances in inference. Normally, this + // is not an issue, because any inference variables whose + // types are not yet bound are "freshened" in the cache key, + // which means that if we later get the same request once that + // type variable IS bound, we'll have a different cache key. + // For example, if we have `Vec<_#0t> : Foo`, and `_#0t` is + // not yet known, we may cache the result as `None`. But if + // later `_#0t` is bound to `Bar`, then when we freshen we'll + // have `Vec : Foo` as the cache key. + // + // HOWEVER, it CAN happen that we get an ambiguity result in + // one particular case around closures where the cache key + // would not change. That is when the precise types of the + // upvars that a closure references have not yet been figured + // out (i.e., because it is not yet known if they are captured + // by ref, and if by ref, what kind of ref). In these cases, + // when matching a builtin bound, we will yield back an + // ambiguous result. But the *cache key* is just the closure type, + // it doesn't capture the state of the upvar computation. + // + // To avoid this trap, just don't cache ambiguous results if + // the self-type contains no inference byproducts (that really + // shouldn't happen in other circumstances anyway, given + // coherence). + + match *candidate { + Ok(Some(_)) | Err(_) => true, + Ok(None) => cache_fresh_trait_pred.has_infer_types() + } + } + + fn assemble_candidates<'o>(&mut self, + stack: &TraitObligationStack<'o, 'tcx>) + -> Result, SelectionError<'tcx>> + { + let TraitObligationStack { obligation, .. } = *stack; + let ref obligation = Obligation { + cause: obligation.cause.clone(), + recursion_depth: obligation.recursion_depth, + predicate: self.infcx().resolve_type_vars_if_possible(&obligation.predicate) + }; + + if obligation.predicate.skip_binder().self_ty().is_ty_var() { + // FIXME(#20297): Self is a type variable (e.g. `_: AsRef`). + // + // This is somewhat problematic, as the current scheme can't really + // handle it turning to be a projection. This does end up as truly + // ambiguous in most cases anyway. + // + // Until this is fixed, take the fast path out - this also improves + // performance by preventing assemble_candidates_from_impls from + // matching every impl for this trait. + return Ok(SelectionCandidateSet { vec: vec![], ambiguous: true }); + } + + let mut candidates = SelectionCandidateSet { + vec: Vec::new(), + ambiguous: false + }; + + // Other bounds. Consider both in-scope bounds from fn decl + // and applicable impls. There is a certain set of precedence rules here. + + let def_id = obligation.predicate.def_id(); + if self.tcx().lang_items.copy_trait() == Some(def_id) { + debug!("obligation self ty is {:?}", + obligation.predicate.0.self_ty()); + + // User-defined copy impls are permitted, but only for + // structs and enums. + self.assemble_candidates_from_impls(obligation, &mut candidates)?; + + // For other types, we'll use the builtin rules. + let copy_conditions = self.copy_conditions(obligation); + self.assemble_builtin_bound_candidates(copy_conditions, &mut candidates)?; + } else if self.tcx().lang_items.sized_trait() == Some(def_id) { + // Sized is never implementable by end-users, it is + // always automatically computed. + let sized_conditions = self.sized_conditions(obligation); + self.assemble_builtin_bound_candidates(sized_conditions, + &mut candidates)?; + } else if self.tcx().lang_items.unsize_trait() == Some(def_id) { + self.assemble_candidates_for_unsizing(obligation, &mut candidates); + } else { + self.assemble_closure_candidates(obligation, &mut candidates)?; + self.assemble_fn_pointer_candidates(obligation, &mut candidates)?; + self.assemble_candidates_from_impls(obligation, &mut candidates)?; + self.assemble_candidates_from_object_ty(obligation, &mut candidates); + } + + self.assemble_candidates_from_projected_tys(obligation, &mut candidates); + self.assemble_candidates_from_caller_bounds(stack, &mut candidates)?; + // Default implementations have lower priority, so we only + // consider triggering a default if there is no other impl that can apply. + if candidates.vec.is_empty() { + self.assemble_candidates_from_default_impls(obligation, &mut candidates)?; + } + debug!("candidate list size: {}", candidates.vec.len()); + Ok(candidates) + } + + fn assemble_candidates_from_projected_tys(&mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>) + { + debug!("assemble_candidates_for_projected_tys({:?})", obligation); + + // FIXME(#20297) -- just examining the self-type is very simplistic + + // before we go into the whole skolemization thing, just + // quickly check if the self-type is a projection at all. + match obligation.predicate.0.trait_ref.self_ty().sty { + ty::TyProjection(_) | ty::TyAnon(..) => {} + ty::TyInfer(ty::TyVar(_)) => { + span_bug!(obligation.cause.span, + "Self=_ should have been handled by assemble_candidates"); + } + _ => return + } + + let result = self.probe(|this, snapshot| { + this.match_projection_obligation_against_definition_bounds(obligation, + snapshot) + }); + + if result { + candidates.vec.push(ProjectionCandidate); + } + } + + fn match_projection_obligation_against_definition_bounds( + &mut self, + obligation: &TraitObligation<'tcx>, + snapshot: &infer::CombinedSnapshot) + -> bool + { + let poly_trait_predicate = + self.infcx().resolve_type_vars_if_possible(&obligation.predicate); + let (skol_trait_predicate, skol_map) = + self.infcx().skolemize_late_bound_regions(&poly_trait_predicate, snapshot); + debug!("match_projection_obligation_against_definition_bounds: \ + skol_trait_predicate={:?} skol_map={:?}", + skol_trait_predicate, + skol_map); + + let (def_id, substs) = match skol_trait_predicate.trait_ref.self_ty().sty { + ty::TyProjection(ref data) => (data.trait_ref.def_id, data.trait_ref.substs), + ty::TyAnon(def_id, substs) => (def_id, substs), + _ => { + span_bug!( + obligation.cause.span, + "match_projection_obligation_against_definition_bounds() called \ + but self-ty not a projection: {:?}", + skol_trait_predicate.trait_ref.self_ty()); + } + }; + debug!("match_projection_obligation_against_definition_bounds: \ + def_id={:?}, substs={:?}", + def_id, substs); + + let item_predicates = self.tcx().item_predicates(def_id); + let bounds = item_predicates.instantiate(self.tcx(), substs); + debug!("match_projection_obligation_against_definition_bounds: \ + bounds={:?}", + bounds); + + let matching_bound = + util::elaborate_predicates(self.tcx(), bounds.predicates) + .filter_to_traits() + .find( + |bound| self.probe( + |this, _| this.match_projection(obligation, + bound.clone(), + skol_trait_predicate.trait_ref.clone(), + &skol_map, + snapshot))); + + debug!("match_projection_obligation_against_definition_bounds: \ + matching_bound={:?}", + matching_bound); + match matching_bound { + None => false, + Some(bound) => { + // Repeat the successful match, if any, this time outside of a probe. + let result = self.match_projection(obligation, + bound, + skol_trait_predicate.trait_ref.clone(), + &skol_map, + snapshot); + + self.infcx.pop_skolemized(skol_map, snapshot); + + assert!(result); + true + } + } + } + + fn match_projection(&mut self, + obligation: &TraitObligation<'tcx>, + trait_bound: ty::PolyTraitRef<'tcx>, + skol_trait_ref: ty::TraitRef<'tcx>, + skol_map: &infer::SkolemizationMap<'tcx>, + snapshot: &infer::CombinedSnapshot) + -> bool + { + assert!(!skol_trait_ref.has_escaping_regions()); + let cause = obligation.cause.clone(); + match self.infcx.sub_poly_trait_refs(false, + cause, + trait_bound.clone(), + ty::Binder(skol_trait_ref.clone())) { + Ok(InferOk { obligations, .. }) => { + self.inferred_obligations.extend(obligations); + } + Err(_) => { return false; } + } + + self.infcx.leak_check(false, obligation.cause.span, skol_map, snapshot).is_ok() + } + + /// Given an obligation like ``, search the obligations that the caller + /// supplied to find out whether it is listed among them. + /// + /// Never affects inference environment. + fn assemble_candidates_from_caller_bounds<'o>(&mut self, + stack: &TraitObligationStack<'o, 'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>) + -> Result<(),SelectionError<'tcx>> + { + debug!("assemble_candidates_from_caller_bounds({:?})", + stack.obligation); + + let all_bounds = + self.param_env().caller_bounds + .iter() + .filter_map(|o| o.to_opt_poly_trait_ref()); + + let matching_bounds = + all_bounds.filter( + |bound| self.evaluate_where_clause(stack, bound.clone()).may_apply()); + + let param_candidates = + matching_bounds.map(|bound| ParamCandidate(bound)); + + candidates.vec.extend(param_candidates); + + Ok(()) + } + + fn evaluate_where_clause<'o>(&mut self, + stack: &TraitObligationStack<'o, 'tcx>, + where_clause_trait_ref: ty::PolyTraitRef<'tcx>) + -> EvaluationResult + { + self.probe(move |this, _| { + match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) { + Ok(obligations) => { + this.evaluate_predicates_recursively(stack.list(), obligations.iter()) + } + Err(()) => EvaluatedToErr + } + }) + } + + /// Check for the artificial impl that the compiler will create for an obligation like `X : + /// FnMut<..>` where `X` is a closure type. + /// + /// Note: the type parameters on a closure candidate are modeled as *output* type + /// parameters and hence do not affect whether this trait is a match or not. They will be + /// unified during the confirmation step. + fn assemble_closure_candidates(&mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>) + -> Result<(),SelectionError<'tcx>> + { + let kind = match self.tcx().lang_items.fn_trait_kind(obligation.predicate.0.def_id()) { + Some(k) => k, + None => { return Ok(()); } + }; + + // ok to skip binder because the substs on closure types never + // touch bound regions, they just capture the in-scope + // type/region parameters + let self_ty = *obligation.self_ty().skip_binder(); + let (closure_def_id, substs) = match self_ty.sty { + ty::TyClosure(id, substs) => (id, substs), + ty::TyInfer(ty::TyVar(_)) => { + debug!("assemble_unboxed_closure_candidates: ambiguous self-type"); + candidates.ambiguous = true; + return Ok(()); + } + _ => { return Ok(()); } + }; + + debug!("assemble_unboxed_candidates: self_ty={:?} kind={:?} obligation={:?}", + self_ty, + kind, + obligation); + + match self.infcx.closure_kind(closure_def_id) { + Some(closure_kind) => { + debug!("assemble_unboxed_candidates: closure_kind = {:?}", closure_kind); + if closure_kind.extends(kind) { + candidates.vec.push(ClosureCandidate(closure_def_id, substs, kind)); + } + } + None => { + debug!("assemble_unboxed_candidates: closure_kind not yet known"); + candidates.vec.push(ClosureCandidate(closure_def_id, substs, kind)); + } + } + + Ok(()) + } + + /// Implement one of the `Fn()` family for a fn pointer. + fn assemble_fn_pointer_candidates(&mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>) + -> Result<(),SelectionError<'tcx>> + { + // We provide impl of all fn traits for fn pointers. + if self.tcx().lang_items.fn_trait_kind(obligation.predicate.def_id()).is_none() { + return Ok(()); + } + + // ok to skip binder because what we are inspecting doesn't involve bound regions + let self_ty = *obligation.self_ty().skip_binder(); + match self_ty.sty { + ty::TyInfer(ty::TyVar(_)) => { + debug!("assemble_fn_pointer_candidates: ambiguous self-type"); + candidates.ambiguous = true; // could wind up being a fn() type + } + + // provide an impl, but only for suitable `fn` pointers + ty::TyFnDef(.., &ty::BareFnTy { + unsafety: hir::Unsafety::Normal, + abi: Abi::Rust, + sig: ty::Binder(ty::FnSig { + inputs: _, + output: _, + variadic: false + }) + }) | + ty::TyFnPtr(&ty::BareFnTy { + unsafety: hir::Unsafety::Normal, + abi: Abi::Rust, + sig: ty::Binder(ty::FnSig { + inputs: _, + output: _, + variadic: false + }) + }) => { + candidates.vec.push(FnPointerCandidate); + } + + _ => { } + } + + Ok(()) + } + + /// Search for impls that might apply to `obligation`. + fn assemble_candidates_from_impls(&mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>) + -> Result<(), SelectionError<'tcx>> + { + debug!("assemble_candidates_from_impls(obligation={:?})", obligation); + + let def = self.tcx().lookup_trait_def(obligation.predicate.def_id()); + + def.for_each_relevant_impl( + self.tcx(), + obligation.predicate.0.trait_ref.self_ty(), + |impl_def_id| { + self.probe(|this, snapshot| { /* [1] */ + match this.match_impl(impl_def_id, obligation, snapshot) { + Ok(skol_map) => { + candidates.vec.push(ImplCandidate(impl_def_id)); + + // NB: we can safely drop the skol map + // since we are in a probe [1] + mem::drop(skol_map); + } + Err(_) => { } + } + }); + } + ); + + Ok(()) + } + + fn assemble_candidates_from_default_impls(&mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>) + -> Result<(), SelectionError<'tcx>> + { + // OK to skip binder here because the tests we do below do not involve bound regions + let self_ty = *obligation.self_ty().skip_binder(); + debug!("assemble_candidates_from_default_impls(self_ty={:?})", self_ty); + + let def_id = obligation.predicate.def_id(); + + if self.tcx().trait_has_default_impl(def_id) { + match self_ty.sty { + ty::TyDynamic(..) => { + // For object types, we don't know what the closed + // over types are. For most traits, this means we + // conservatively say nothing; a candidate may be + // added by `assemble_candidates_from_object_ty`. + // However, for the kind of magic reflect trait, + // we consider it to be implemented even for + // object types, because it just lets you reflect + // onto the object type, not into the object's + // interior. + if self.tcx().has_attr(def_id, "rustc_reflect_like") { + candidates.vec.push(DefaultImplObjectCandidate(def_id)); + } + } + ty::TyParam(..) | + ty::TyProjection(..) | + ty::TyAnon(..) => { + // In these cases, we don't know what the actual + // type is. Therefore, we cannot break it down + // into its constituent types. So we don't + // consider the `..` impl but instead just add no + // candidates: this means that typeck will only + // succeed if there is another reason to believe + // that this obligation holds. That could be a + // where-clause or, in the case of an object type, + // it could be that the object type lists the + // trait (e.g. `Foo+Send : Send`). See + // `compile-fail/typeck-default-trait-impl-send-param.rs` + // for an example of a test case that exercises + // this path. + } + ty::TyInfer(ty::TyVar(_)) => { + // the defaulted impl might apply, we don't know + candidates.ambiguous = true; + } + _ => { + candidates.vec.push(DefaultImplCandidate(def_id.clone())) + } + } + } + + Ok(()) + } + + /// Search for impls that might apply to `obligation`. + fn assemble_candidates_from_object_ty(&mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>) + { + debug!("assemble_candidates_from_object_ty(self_ty={:?})", + obligation.self_ty().skip_binder()); + + // Object-safety candidates are only applicable to object-safe + // traits. Including this check is useful because it helps + // inference in cases of traits like `BorrowFrom`, which are + // not object-safe, and which rely on being able to infer the + // self-type from one of the other inputs. Without this check, + // these cases wind up being considered ambiguous due to a + // (spurious) ambiguity introduced here. + let predicate_trait_ref = obligation.predicate.to_poly_trait_ref(); + if !self.tcx().is_object_safe(predicate_trait_ref.def_id()) { + return; + } + + self.probe(|this, _snapshot| { + // the code below doesn't care about regions, and the + // self-ty here doesn't escape this probe, so just erase + // any LBR. + let self_ty = this.tcx().erase_late_bound_regions(&obligation.self_ty()); + let poly_trait_ref = match self_ty.sty { + ty::TyDynamic(ref data, ..) => { + if data.auto_traits().any(|did| did == obligation.predicate.def_id()) { + debug!("assemble_candidates_from_object_ty: matched builtin bound, \ + pushing candidate"); + candidates.vec.push(BuiltinObjectCandidate); + return; + } + + match data.principal() { + Some(p) => p.with_self_ty(this.tcx(), self_ty), + None => return, + } + } + ty::TyInfer(ty::TyVar(_)) => { + debug!("assemble_candidates_from_object_ty: ambiguous"); + candidates.ambiguous = true; // could wind up being an object type + return; + } + _ => { + return; + } + }; + + debug!("assemble_candidates_from_object_ty: poly_trait_ref={:?}", + poly_trait_ref); + + // Count only those upcast versions that match the trait-ref + // we are looking for. Specifically, do not only check for the + // correct trait, but also the correct type parameters. + // For example, we may be trying to upcast `Foo` to `Bar`, + // but `Foo` is declared as `trait Foo : Bar`. + let upcast_trait_refs = + util::supertraits(this.tcx(), poly_trait_ref) + .filter(|upcast_trait_ref| { + this.probe(|this, _| { + let upcast_trait_ref = upcast_trait_ref.clone(); + this.match_poly_trait_ref(obligation, upcast_trait_ref).is_ok() + }) + }) + .count(); + + if upcast_trait_refs > 1 { + // can be upcast in many ways; need more type information + candidates.ambiguous = true; + } else if upcast_trait_refs == 1 { + candidates.vec.push(ObjectCandidate); + } + }) + } + + /// Search for unsizing that might apply to `obligation`. + fn assemble_candidates_for_unsizing(&mut self, + obligation: &TraitObligation<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>) { + // We currently never consider higher-ranked obligations e.g. + // `for<'a> &'a T: Unsize` to be implemented. This is not + // because they are a priori invalid, and we could potentially add support + // for them later, it's just that there isn't really a strong need for it. + // A `T: Unsize` obligation is always used as part of a `T: CoerceUnsize` + // impl, and those are generally applied to concrete types. + // + // That said, one might try to write a fn with a where clause like + // for<'a> Foo<'a, T>: Unsize> + // where the `'a` is kind of orthogonal to the relevant part of the `Unsize`. + // Still, you'd be more likely to write that where clause as + // T: Trait + // so it seems ok if we (conservatively) fail to accept that `Unsize` + // obligation above. Should be possible to extend this in the future. + let source = match self.tcx().no_late_bound_regions(&obligation.self_ty()) { + Some(t) => t, + None => { + // Don't add any candidates if there are bound regions. + return; + } + }; + let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1); + + debug!("assemble_candidates_for_unsizing(source={:?}, target={:?})", + source, target); + + let may_apply = match (&source.sty, &target.sty) { + // Trait+Kx+'a -> Trait+Ky+'b (upcasts). + (&ty::TyDynamic(ref data_a, ..), &ty::TyDynamic(ref data_b, ..)) => { + // Upcasts permit two things: + // + // 1. Dropping builtin bounds, e.g. `Foo+Send` to `Foo` + // 2. Tightening the region bound, e.g. `Foo+'a` to `Foo+'b` if `'a : 'b` + // + // Note that neither of these changes requires any + // change at runtime. Eventually this will be + // generalized. + // + // We always upcast when we can because of reason + // #2 (region bounds). + match (data_a.principal(), data_b.principal()) { + (Some(a), Some(b)) => a.def_id() == b.def_id() && + data_b.auto_traits() + // All of a's auto traits need to be in b's auto traits. + .all(|b| data_a.auto_traits().any(|a| a == b)), + _ => false + } + } + + // T -> Trait. + (_, &ty::TyDynamic(..)) => true, + + // Ambiguous handling is below T -> Trait, because inference + // variables can still implement Unsize and nested + // obligations will have the final say (likely deferred). + (&ty::TyInfer(ty::TyVar(_)), _) | + (_, &ty::TyInfer(ty::TyVar(_))) => { + debug!("assemble_candidates_for_unsizing: ambiguous"); + candidates.ambiguous = true; + false + } + + // [T; n] -> [T]. + (&ty::TyArray(..), &ty::TySlice(_)) => true, + + // Struct -> Struct. + (&ty::TyAdt(def_id_a, _), &ty::TyAdt(def_id_b, _)) if def_id_a.is_struct() => { + def_id_a == def_id_b + } + + _ => false + }; + + if may_apply { + candidates.vec.push(BuiltinUnsizeCandidate); + } + } + + /////////////////////////////////////////////////////////////////////////// + // WINNOW + // + // Winnowing is the process of attempting to resolve ambiguity by + // probing further. During the winnowing process, we unify all + // type variables (ignoring skolemization) and then we also + // attempt to evaluate recursive bounds to see if they are + // satisfied. + + /// Returns true if `candidate_i` should be dropped in favor of + /// `candidate_j`. Generally speaking we will drop duplicate + /// candidates and prefer where-clause candidates. + /// Returns true if `victim` should be dropped in favor of + /// `other`. Generally speaking we will drop duplicate + /// candidates and prefer where-clause candidates. + /// + /// See the comment for "SelectionCandidate" for more details. + fn candidate_should_be_dropped_in_favor_of<'o>( + &mut self, + victim: &EvaluatedCandidate<'tcx>, + other: &EvaluatedCandidate<'tcx>) + -> bool + { + if victim.candidate == other.candidate { + return true; + } + + match other.candidate { + ObjectCandidate | + ParamCandidate(_) | ProjectionCandidate => match victim.candidate { + DefaultImplCandidate(..) => { + bug!( + "default implementations shouldn't be recorded \ + when there are other valid candidates"); + } + ImplCandidate(..) | + ClosureCandidate(..) | + FnPointerCandidate | + BuiltinObjectCandidate | + BuiltinUnsizeCandidate | + DefaultImplObjectCandidate(..) | + BuiltinCandidate { .. } => { + // We have a where-clause so don't go around looking + // for impls. + true + } + ObjectCandidate | + ProjectionCandidate => { + // Arbitrarily give param candidates priority + // over projection and object candidates. + true + }, + ParamCandidate(..) => false, + }, + ImplCandidate(other_def) => { + // See if we can toss out `victim` based on specialization. + // This requires us to know *for sure* that the `other` impl applies + // i.e. EvaluatedToOk: + if other.evaluation == EvaluatedToOk { + if let ImplCandidate(victim_def) = victim.candidate { + let tcx = self.tcx().global_tcx(); + return traits::specializes(tcx, other_def, victim_def); + } + } + + false + }, + _ => false + } + } + + /////////////////////////////////////////////////////////////////////////// + // BUILTIN BOUNDS + // + // These cover the traits that are built-in to the language + // itself. This includes `Copy` and `Sized` for sure. For the + // moment, it also includes `Send` / `Sync` and a few others, but + // those will hopefully change to library-defined traits in the + // future. + + // HACK: if this returns an error, selection exits without considering + // other impls. + fn assemble_builtin_bound_candidates<'o>(&mut self, + conditions: BuiltinImplConditions<'tcx>, + candidates: &mut SelectionCandidateSet<'tcx>) + -> Result<(),SelectionError<'tcx>> + { + match conditions { + BuiltinImplConditions::Where(nested) => { + debug!("builtin_bound: nested={:?}", nested); + candidates.vec.push(BuiltinCandidate { + has_nested: nested.skip_binder().len() > 0 + }); + Ok(()) + } + BuiltinImplConditions::None => { Ok(()) } + BuiltinImplConditions::Ambiguous => { + debug!("assemble_builtin_bound_candidates: ambiguous builtin"); + Ok(candidates.ambiguous = true) + } + BuiltinImplConditions::Never => { Err(Unimplemented) } + } + } + + fn sized_conditions(&mut self, obligation: &TraitObligation<'tcx>) + -> BuiltinImplConditions<'tcx> + { + use self::BuiltinImplConditions::{Ambiguous, None, Never, Where}; + + // NOTE: binder moved to (*) + let self_ty = self.infcx.shallow_resolve( + obligation.predicate.skip_binder().self_ty()); + + match self_ty.sty { + ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) | + ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) | + ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyRawPtr(..) | + ty::TyChar | ty::TyBox(_) | ty::TyRef(..) | + ty::TyArray(..) | ty::TyClosure(..) | ty::TyNever | + ty::TyError => { + // safe for everything + Where(ty::Binder(Vec::new())) + } + + ty::TyStr | ty::TySlice(_) | ty::TyDynamic(..) => Never, + + ty::TyTuple(tys) => { + Where(ty::Binder(tys.last().into_iter().cloned().collect())) + } + + ty::TyAdt(def, substs) => { + let sized_crit = def.sized_constraint(self.tcx()); + // (*) binder moved here + Where(ty::Binder(match sized_crit.sty { + ty::TyTuple(tys) => tys.to_vec().subst(self.tcx(), substs), + ty::TyBool => vec![], + _ => vec![sized_crit.subst(self.tcx(), substs)] + })) + } + + ty::TyProjection(_) | ty::TyParam(_) | ty::TyAnon(..) => None, + ty::TyInfer(ty::TyVar(_)) => Ambiguous, + + ty::TyInfer(ty::FreshTy(_)) + | ty::TyInfer(ty::FreshIntTy(_)) + | ty::TyInfer(ty::FreshFloatTy(_)) => { + bug!("asked to assemble builtin bounds of unexpected type: {:?}", + self_ty); + } + } + } + + fn copy_conditions(&mut self, obligation: &TraitObligation<'tcx>) + -> BuiltinImplConditions<'tcx> + { + // NOTE: binder moved to (*) + let self_ty = self.infcx.shallow_resolve( + obligation.predicate.skip_binder().self_ty()); + + use self::BuiltinImplConditions::{Ambiguous, None, Never, Where}; + + match self_ty.sty { + ty::TyInfer(ty::IntVar(_)) | ty::TyInfer(ty::FloatVar(_)) | + ty::TyUint(_) | ty::TyInt(_) | ty::TyBool | ty::TyFloat(_) | + ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyChar | + ty::TyRawPtr(..) | ty::TyError | ty::TyNever | + ty::TyRef(_, ty::TypeAndMut { ty: _, mutbl: hir::MutImmutable }) => { + Where(ty::Binder(Vec::new())) + } + + ty::TyBox(_) | ty::TyDynamic(..) | ty::TyStr | ty::TySlice(..) | + ty::TyClosure(..) | + ty::TyRef(_, ty::TypeAndMut { ty: _, mutbl: hir::MutMutable }) => { + Never + } + + ty::TyArray(element_ty, _) => { + // (*) binder moved here + Where(ty::Binder(vec![element_ty])) + } + + ty::TyTuple(tys) => { + // (*) binder moved here + Where(ty::Binder(tys.to_vec())) + } + + ty::TyAdt(..) | ty::TyProjection(..) | ty::TyParam(..) | ty::TyAnon(..) => { + // Fallback to whatever user-defined impls exist in this case. + None + } + + ty::TyInfer(ty::TyVar(_)) => { + // Unbound type variable. Might or might not have + // applicable impls and so forth, depending on what + // those type variables wind up being bound to. + Ambiguous + } + + ty::TyInfer(ty::FreshTy(_)) + | ty::TyInfer(ty::FreshIntTy(_)) + | ty::TyInfer(ty::FreshFloatTy(_)) => { + bug!("asked to assemble builtin bounds of unexpected type: {:?}", + self_ty); + } + } + } + + /// For default impls, we need to break apart a type into its + /// "constituent types" -- meaning, the types that it contains. + /// + /// Here are some (simple) examples: + /// + /// ``` + /// (i32, u32) -> [i32, u32] + /// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32] + /// Bar where struct Bar { x: T, y: u32 } -> [i32, u32] + /// Zed where enum Zed { A(T), B(u32) } -> [i32, u32] + /// ``` + fn constituent_types_for_ty(&self, t: Ty<'tcx>) -> Vec> { + match t.sty { + ty::TyUint(_) | + ty::TyInt(_) | + ty::TyBool | + ty::TyFloat(_) | + ty::TyFnDef(..) | + ty::TyFnPtr(_) | + ty::TyStr | + ty::TyError | + ty::TyInfer(ty::IntVar(_)) | + ty::TyInfer(ty::FloatVar(_)) | + ty::TyNever | + ty::TyChar => { + Vec::new() + } + + ty::TyDynamic(..) | + ty::TyParam(..) | + ty::TyProjection(..) | + ty::TyAnon(..) | + ty::TyInfer(ty::TyVar(_)) | + ty::TyInfer(ty::FreshTy(_)) | + ty::TyInfer(ty::FreshIntTy(_)) | + ty::TyInfer(ty::FreshFloatTy(_)) => { + bug!("asked to assemble constituent types of unexpected type: {:?}", + t); + } + + ty::TyBox(referent_ty) => { // Box + vec![referent_ty] + } + + ty::TyRawPtr(ty::TypeAndMut { ty: element_ty, ..}) | + ty::TyRef(_, ty::TypeAndMut { ty: element_ty, ..}) => { + vec![element_ty] + }, + + ty::TyArray(element_ty, _) | ty::TySlice(element_ty) => { + vec![element_ty] + } + + ty::TyTuple(ref tys) => { + // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet + tys.to_vec() + } + + ty::TyClosure(def_id, ref substs) => { + // FIXME(#27086). We are invariant w/r/t our + // func_substs, but we don't see them as + // constituent types; this seems RIGHT but also like + // something that a normal type couldn't simulate. Is + // this just a gap with the way that PhantomData and + // OIBIT interact? That is, there is no way to say + // "make me invariant with respect to this TYPE, but + // do not act as though I can reach it" + substs.upvar_tys(def_id, self.tcx()).collect() + } + + // for `PhantomData`, we pass `T` + ty::TyAdt(def, substs) if def.is_phantom_data() => { + substs.types().collect() + } + + ty::TyAdt(def, substs) => { + def.all_fields() + .map(|f| f.ty(self.tcx(), substs)) + .collect() + } + } + } + + fn collect_predicates_for_types(&mut self, + cause: ObligationCause<'tcx>, + recursion_depth: usize, + trait_def_id: DefId, + types: ty::Binder>>) + -> Vec> + { + // Because the types were potentially derived from + // higher-ranked obligations they may reference late-bound + // regions. For example, `for<'a> Foo<&'a int> : Copy` would + // yield a type like `for<'a> &'a int`. In general, we + // maintain the invariant that we never manipulate bound + // regions, so we have to process these bound regions somehow. + // + // The strategy is to: + // + // 1. Instantiate those regions to skolemized regions (e.g., + // `for<'a> &'a int` becomes `&0 int`. + // 2. Produce something like `&'0 int : Copy` + // 3. Re-bind the regions back to `for<'a> &'a int : Copy` + + types.skip_binder().into_iter().flat_map(|ty| { // binder moved -\ + let ty: ty::Binder> = ty::Binder(ty); // <----------/ + + self.in_snapshot(|this, snapshot| { + let (skol_ty, skol_map) = + this.infcx().skolemize_late_bound_regions(&ty, snapshot); + let Normalized { value: normalized_ty, mut obligations } = + project::normalize_with_depth(this, + cause.clone(), + recursion_depth, + &skol_ty); + let skol_obligation = + this.tcx().predicate_for_trait_def( + cause.clone(), + trait_def_id, + recursion_depth, + normalized_ty, + &[]); + obligations.push(skol_obligation); + this.infcx().plug_leaks(skol_map, snapshot, obligations) + }) + }).collect() + } + + /////////////////////////////////////////////////////////////////////////// + // CONFIRMATION + // + // Confirmation unifies the output type parameters of the trait + // with the values found in the obligation, possibly yielding a + // type error. See `README.md` for more details. + + fn confirm_candidate(&mut self, + obligation: &TraitObligation<'tcx>, + candidate: SelectionCandidate<'tcx>) + -> Result,SelectionError<'tcx>> + { + debug!("confirm_candidate({:?}, {:?})", + obligation, + candidate); + + match candidate { + BuiltinCandidate { has_nested } => { + Ok(VtableBuiltin( + self.confirm_builtin_candidate(obligation, has_nested))) + } + + ParamCandidate(param) => { + let obligations = self.confirm_param_candidate(obligation, param); + Ok(VtableParam(obligations)) + } + + DefaultImplCandidate(trait_def_id) => { + let data = self.confirm_default_impl_candidate(obligation, trait_def_id); + Ok(VtableDefaultImpl(data)) + } + + DefaultImplObjectCandidate(trait_def_id) => { + let data = self.confirm_default_impl_object_candidate(obligation, trait_def_id); + Ok(VtableDefaultImpl(data)) + } + + ImplCandidate(impl_def_id) => { + Ok(VtableImpl(self.confirm_impl_candidate(obligation, impl_def_id))) + } + + ClosureCandidate(closure_def_id, substs, kind) => { + let vtable_closure = + self.confirm_closure_candidate(obligation, closure_def_id, substs, kind)?; + Ok(VtableClosure(vtable_closure)) + } + + BuiltinObjectCandidate => { + // This indicates something like `(Trait+Send) : + // Send`. In this case, we know that this holds + // because that's what the object type is telling us, + // and there's really no additional obligations to + // prove and no types in particular to unify etc. + Ok(VtableParam(Vec::new())) + } + + ObjectCandidate => { + let data = self.confirm_object_candidate(obligation); + Ok(VtableObject(data)) + } + + FnPointerCandidate => { + let data = + self.confirm_fn_pointer_candidate(obligation)?; + Ok(VtableFnPointer(data)) + } + + ProjectionCandidate => { + self.confirm_projection_candidate(obligation); + Ok(VtableParam(Vec::new())) + } + + BuiltinUnsizeCandidate => { + let data = self.confirm_builtin_unsize_candidate(obligation)?; + Ok(VtableBuiltin(data)) + } + } + } + + fn confirm_projection_candidate(&mut self, + obligation: &TraitObligation<'tcx>) + { + self.in_snapshot(|this, snapshot| { + let result = + this.match_projection_obligation_against_definition_bounds(obligation, + snapshot); + assert!(result); + }) + } + + fn confirm_param_candidate(&mut self, + obligation: &TraitObligation<'tcx>, + param: ty::PolyTraitRef<'tcx>) + -> Vec> + { + debug!("confirm_param_candidate({:?},{:?})", + obligation, + param); + + // During evaluation, we already checked that this + // where-clause trait-ref could be unified with the obligation + // trait-ref. Repeat that unification now without any + // transactional boundary; it should not fail. + match self.match_where_clause_trait_ref(obligation, param.clone()) { + Ok(obligations) => obligations, + Err(()) => { + bug!("Where clause `{:?}` was applicable to `{:?}` but now is not", + param, + obligation); + } + } + } + + fn confirm_builtin_candidate(&mut self, + obligation: &TraitObligation<'tcx>, + has_nested: bool) + -> VtableBuiltinData> + { + debug!("confirm_builtin_candidate({:?}, {:?})", + obligation, has_nested); + + let obligations = if has_nested { + let trait_def = obligation.predicate.def_id(); + let conditions = match trait_def { + _ if Some(trait_def) == self.tcx().lang_items.sized_trait() => { + self.sized_conditions(obligation) + } + _ if Some(trait_def) == self.tcx().lang_items.copy_trait() => { + self.copy_conditions(obligation) + } + _ => bug!("unexpected builtin trait {:?}", trait_def) + }; + let nested = match conditions { + BuiltinImplConditions::Where(nested) => nested, + _ => bug!("obligation {:?} had matched a builtin impl but now doesn't", + obligation) + }; + + let cause = obligation.derived_cause(BuiltinDerivedObligation); + self.collect_predicates_for_types(cause, + obligation.recursion_depth+1, + trait_def, + nested) + } else { + vec![] + }; + + debug!("confirm_builtin_candidate: obligations={:?}", + obligations); + VtableBuiltinData { nested: obligations } + } + + /// This handles the case where a `impl Foo for ..` impl is being used. + /// The idea is that the impl applies to `X : Foo` if the following conditions are met: + /// + /// 1. For each constituent type `Y` in `X`, `Y : Foo` holds + /// 2. For each where-clause `C` declared on `Foo`, `[Self => X] C` holds. + fn confirm_default_impl_candidate(&mut self, + obligation: &TraitObligation<'tcx>, + trait_def_id: DefId) + -> VtableDefaultImplData> + { + debug!("confirm_default_impl_candidate({:?}, {:?})", + obligation, + trait_def_id); + + // binder is moved below + let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty()); + let types = self.constituent_types_for_ty(self_ty); + self.vtable_default_impl(obligation, trait_def_id, ty::Binder(types)) + } + + fn confirm_default_impl_object_candidate(&mut self, + obligation: &TraitObligation<'tcx>, + trait_def_id: DefId) + -> VtableDefaultImplData> + { + debug!("confirm_default_impl_object_candidate({:?}, {:?})", + obligation, + trait_def_id); + + assert!(self.tcx().has_attr(trait_def_id, "rustc_reflect_like")); + + // OK to skip binder, it is reintroduced below + let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty()); + match self_ty.sty { + ty::TyDynamic(ref data, ..) => { + // OK to skip the binder, it is reintroduced below + let principal = data.principal().unwrap(); + let input_types = principal.input_types(); + let assoc_types = data.projection_bounds() + .map(|pb| pb.skip_binder().ty); + let all_types: Vec<_> = input_types.chain(assoc_types) + .collect(); + + // reintroduce the two binding levels we skipped, then flatten into one + let all_types = ty::Binder(ty::Binder(all_types)); + let all_types = self.tcx().flatten_late_bound_regions(&all_types); + + self.vtable_default_impl(obligation, trait_def_id, all_types) + } + _ => { + bug!("asked to confirm default object implementation for non-object type: {:?}", + self_ty); + } + } + } + + /// See `confirm_default_impl_candidate` + fn vtable_default_impl(&mut self, + obligation: &TraitObligation<'tcx>, + trait_def_id: DefId, + nested: ty::Binder>>) + -> VtableDefaultImplData> + { + debug!("vtable_default_impl: nested={:?}", nested); + + let cause = obligation.derived_cause(BuiltinDerivedObligation); + let mut obligations = self.collect_predicates_for_types( + cause, + obligation.recursion_depth+1, + trait_def_id, + nested); + + let trait_obligations = self.in_snapshot(|this, snapshot| { + let poly_trait_ref = obligation.predicate.to_poly_trait_ref(); + let (trait_ref, skol_map) = + this.infcx().skolemize_late_bound_regions(&poly_trait_ref, snapshot); + let cause = obligation.derived_cause(ImplDerivedObligation); + this.impl_or_trait_obligations(cause, + obligation.recursion_depth + 1, + trait_def_id, + &trait_ref.substs, + skol_map, + snapshot) + }); + + obligations.extend(trait_obligations); + + debug!("vtable_default_impl: obligations={:?}", obligations); + + VtableDefaultImplData { + trait_def_id: trait_def_id, + nested: obligations + } + } + + fn confirm_impl_candidate(&mut self, + obligation: &TraitObligation<'tcx>, + impl_def_id: DefId) + -> VtableImplData<'tcx, PredicateObligation<'tcx>> + { + debug!("confirm_impl_candidate({:?},{:?})", + obligation, + impl_def_id); + + // First, create the substitutions by matching the impl again, + // this time not in a probe. + self.in_snapshot(|this, snapshot| { + let (substs, skol_map) = + this.rematch_impl(impl_def_id, obligation, + snapshot); + debug!("confirm_impl_candidate substs={:?}", substs); + let cause = obligation.derived_cause(ImplDerivedObligation); + this.vtable_impl(impl_def_id, substs, cause, + obligation.recursion_depth + 1, + skol_map, snapshot) + }) + } + + fn vtable_impl(&mut self, + impl_def_id: DefId, + mut substs: Normalized<'tcx, &'tcx Substs<'tcx>>, + cause: ObligationCause<'tcx>, + recursion_depth: usize, + skol_map: infer::SkolemizationMap<'tcx>, + snapshot: &infer::CombinedSnapshot) + -> VtableImplData<'tcx, PredicateObligation<'tcx>> + { + debug!("vtable_impl(impl_def_id={:?}, substs={:?}, recursion_depth={}, skol_map={:?})", + impl_def_id, + substs, + recursion_depth, + skol_map); + + let mut impl_obligations = + self.impl_or_trait_obligations(cause, + recursion_depth, + impl_def_id, + &substs.value, + skol_map, + snapshot); + + debug!("vtable_impl: impl_def_id={:?} impl_obligations={:?}", + impl_def_id, + impl_obligations); + + // Because of RFC447, the impl-trait-ref and obligations + // are sufficient to determine the impl substs, without + // relying on projections in the impl-trait-ref. + // + // e.g. `impl> Foo<::T> for V` + impl_obligations.append(&mut substs.obligations); + + VtableImplData { impl_def_id: impl_def_id, + substs: substs.value, + nested: impl_obligations } + } + + fn confirm_object_candidate(&mut self, + obligation: &TraitObligation<'tcx>) + -> VtableObjectData<'tcx, PredicateObligation<'tcx>> + { + debug!("confirm_object_candidate({:?})", + obligation); + + // FIXME skipping binder here seems wrong -- we should + // probably flatten the binder from the obligation and the + // binder from the object. Have to try to make a broken test + // case that results. -nmatsakis + let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); + let poly_trait_ref = match self_ty.sty { + ty::TyDynamic(ref data, ..) => { + data.principal().unwrap().with_self_ty(self.tcx(), self_ty) + } + _ => { + span_bug!(obligation.cause.span, + "object candidate with non-object"); + } + }; + + let mut upcast_trait_ref = None; + let vtable_base; + + { + let tcx = self.tcx(); + + // We want to find the first supertrait in the list of + // supertraits that we can unify with, and do that + // unification. We know that there is exactly one in the list + // where we can unify because otherwise select would have + // reported an ambiguity. (When we do find a match, also + // record it for later.) + let nonmatching = + util::supertraits(tcx, poly_trait_ref) + .take_while(|&t| { + match + self.commit_if_ok( + |this, _| this.match_poly_trait_ref(obligation, t)) + { + Ok(_) => { upcast_trait_ref = Some(t); false } + Err(_) => { true } + } + }); + + // Additionally, for each of the nonmatching predicates that + // we pass over, we sum up the set of number of vtable + // entries, so that we can compute the offset for the selected + // trait. + vtable_base = + nonmatching.map(|t| tcx.count_own_vtable_entries(t)) + .sum(); + + } + + VtableObjectData { + upcast_trait_ref: upcast_trait_ref.unwrap(), + vtable_base: vtable_base, + nested: vec![] + } + } + + fn confirm_fn_pointer_candidate(&mut self, obligation: &TraitObligation<'tcx>) + -> Result>, SelectionError<'tcx>> + { + debug!("confirm_fn_pointer_candidate({:?})", + obligation); + + // ok to skip binder; it is reintroduced below + let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder()); + let sig = self_ty.fn_sig(); + let trait_ref = + self.tcx().closure_trait_ref_and_return_type(obligation.predicate.def_id(), + self_ty, + sig, + util::TupleArgumentsFlag::Yes) + .map_bound(|(trait_ref, _)| trait_ref); + + self.confirm_poly_trait_refs(obligation.cause.clone(), + obligation.predicate.to_poly_trait_ref(), + trait_ref)?; + Ok(VtableFnPointerData { fn_ty: self_ty, nested: vec![] }) + } + + fn confirm_closure_candidate(&mut self, + obligation: &TraitObligation<'tcx>, + closure_def_id: DefId, + substs: ty::ClosureSubsts<'tcx>, + kind: ty::ClosureKind) + -> Result>, + SelectionError<'tcx>> + { + debug!("confirm_closure_candidate({:?},{:?},{:?})", + obligation, + closure_def_id, + substs); + + let Normalized { + value: trait_ref, + mut obligations + } = self.closure_trait_ref(obligation, closure_def_id, substs); + + debug!("confirm_closure_candidate(closure_def_id={:?}, trait_ref={:?}, obligations={:?})", + closure_def_id, + trait_ref, + obligations); + + self.confirm_poly_trait_refs(obligation.cause.clone(), + obligation.predicate.to_poly_trait_ref(), + trait_ref)?; + + obligations.push(Obligation::new( + obligation.cause.clone(), + ty::Predicate::ClosureKind(closure_def_id, kind))); + + Ok(VtableClosureData { + closure_def_id: closure_def_id, + substs: substs.clone(), + nested: obligations + }) + } + + /// In the case of closure types and fn pointers, + /// we currently treat the input type parameters on the trait as + /// outputs. This means that when we have a match we have only + /// considered the self type, so we have to go back and make sure + /// to relate the argument types too. This is kind of wrong, but + /// since we control the full set of impls, also not that wrong, + /// and it DOES yield better error messages (since we don't report + /// errors as if there is no applicable impl, but rather report + /// errors are about mismatched argument types. + /// + /// Here is an example. Imagine we have a closure expression + /// and we desugared it so that the type of the expression is + /// `Closure`, and `Closure` expects an int as argument. Then it + /// is "as if" the compiler generated this impl: + /// + /// impl Fn(int) for Closure { ... } + /// + /// Now imagine our obligation is `Fn(usize) for Closure`. So far + /// we have matched the self-type `Closure`. At this point we'll + /// compare the `int` to `usize` and generate an error. + /// + /// Note that this checking occurs *after* the impl has selected, + /// because these output type parameters should not affect the + /// selection of the impl. Therefore, if there is a mismatch, we + /// report an error to the user. + fn confirm_poly_trait_refs(&mut self, + obligation_cause: ObligationCause<'tcx>, + obligation_trait_ref: ty::PolyTraitRef<'tcx>, + expected_trait_ref: ty::PolyTraitRef<'tcx>) + -> Result<(), SelectionError<'tcx>> + { + let obligation_trait_ref = obligation_trait_ref.clone(); + self.infcx.sub_poly_trait_refs(false, + obligation_cause.clone(), + expected_trait_ref.clone(), + obligation_trait_ref.clone()) + .map(|InferOk { obligations, .. }| self.inferred_obligations.extend(obligations)) + .map_err(|e| OutputTypeParameterMismatch(expected_trait_ref, obligation_trait_ref, e)) + } + + fn confirm_builtin_unsize_candidate(&mut self, + obligation: &TraitObligation<'tcx>,) + -> Result>, + SelectionError<'tcx>> { + let tcx = self.tcx(); + + // assemble_candidates_for_unsizing should ensure there are no late bound + // regions here. See the comment there for more details. + let source = self.infcx.shallow_resolve( + tcx.no_late_bound_regions(&obligation.self_ty()).unwrap()); + let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1); + let target = self.infcx.shallow_resolve(target); + + debug!("confirm_builtin_unsize_candidate(source={:?}, target={:?})", + source, target); + + let mut nested = vec![]; + match (&source.sty, &target.sty) { + // Trait+Kx+'a -> Trait+Ky+'b (upcasts). + (&ty::TyDynamic(ref data_a, r_a), &ty::TyDynamic(ref data_b, r_b)) => { + // See assemble_candidates_for_unsizing for more info. + // Binders reintroduced below in call to mk_existential_predicates. + let principal = data_a.skip_binder().principal(); + let iter = principal.into_iter().map(ty::ExistentialPredicate::Trait) + .chain(data_a.skip_binder().projection_bounds() + .map(|x| ty::ExistentialPredicate::Projection(x))) + .chain(data_b.auto_traits().map(ty::ExistentialPredicate::AutoTrait)); + let new_trait = tcx.mk_dynamic( + ty::Binder(tcx.mk_existential_predicates(iter)), r_b); + let InferOk { obligations, .. } = + self.infcx.sub_types(false, &obligation.cause, new_trait, target) + .map_err(|_| Unimplemented)?; + self.inferred_obligations.extend(obligations); + + // Register one obligation for 'a: 'b. + let cause = ObligationCause::new(obligation.cause.span, + obligation.cause.body_id, + ObjectCastObligation(target)); + let outlives = ty::OutlivesPredicate(r_a, r_b); + nested.push(Obligation::with_depth(cause, + obligation.recursion_depth + 1, + ty::Binder(outlives).to_predicate())); + } + + // T -> Trait. + (_, &ty::TyDynamic(ref data, r)) => { + let mut object_dids = + data.auto_traits().chain(data.principal().map(|p| p.def_id())); + if let Some(did) = object_dids.find(|did| { + !tcx.is_object_safe(*did) + }) { + return Err(TraitNotObjectSafe(did)) + } + + let cause = ObligationCause::new(obligation.cause.span, + obligation.cause.body_id, + ObjectCastObligation(target)); + let mut push = |predicate| { + nested.push(Obligation::with_depth(cause.clone(), + obligation.recursion_depth + 1, + predicate)); + }; + + // Create obligations: + // - Casting T to Trait + // - For all the various builtin bounds attached to the object cast. (In other + // words, if the object type is Foo+Send, this would create an obligation for the + // Send check.) + // - Projection predicates + for predicate in data.iter() { + push(predicate.with_self_ty(tcx, source)); + } + + // We can only make objects from sized types. + let tr = ty::TraitRef { + def_id: tcx.require_lang_item(lang_items::SizedTraitLangItem), + substs: tcx.mk_substs_trait(source, &[]), + }; + push(tr.to_predicate()); + + // If the type is `Foo+'a`, ensures that the type + // being cast to `Foo+'a` outlives `'a`: + let outlives = ty::OutlivesPredicate(source, r); + push(ty::Binder(outlives).to_predicate()); + } + + // [T; n] -> [T]. + (&ty::TyArray(a, _), &ty::TySlice(b)) => { + let InferOk { obligations, .. } = + self.infcx.sub_types(false, &obligation.cause, a, b) + .map_err(|_| Unimplemented)?; + self.inferred_obligations.extend(obligations); + } + + // Struct -> Struct. + (&ty::TyAdt(def, substs_a), &ty::TyAdt(_, substs_b)) => { + let fields = def + .all_fields() + .map(|f| tcx.item_type(f.did)) + .collect::>(); + + // The last field of the structure has to exist and contain type parameters. + let field = if let Some(&field) = fields.last() { + field + } else { + return Err(Unimplemented); + }; + let mut ty_params = BitVector::new(substs_a.types().count()); + let mut found = false; + for ty in field.walk() { + if let ty::TyParam(p) = ty.sty { + ty_params.insert(p.idx as usize); + found = true; + } + } + if !found { + return Err(Unimplemented); + } + + // Replace type parameters used in unsizing with + // TyError and ensure they do not affect any other fields. + // This could be checked after type collection for any struct + // with a potentially unsized trailing field. + let params = substs_a.params().iter().enumerate().map(|(i, &k)| { + if ty_params.contains(i) { + Kind::from(tcx.types.err) + } else { + k + } + }); + let substs = tcx.mk_substs(params); + for &ty in fields.split_last().unwrap().1 { + if ty.subst(tcx, substs).references_error() { + return Err(Unimplemented); + } + } + + // Extract Field and Field from Struct and Struct. + let inner_source = field.subst(tcx, substs_a); + let inner_target = field.subst(tcx, substs_b); + + // Check that the source structure with the target's + // type parameters is a subtype of the target. + let params = substs_a.params().iter().enumerate().map(|(i, &k)| { + if ty_params.contains(i) { + Kind::from(substs_b.type_at(i)) + } else { + k + } + }); + let new_struct = tcx.mk_adt(def, tcx.mk_substs(params)); + let InferOk { obligations, .. } = + self.infcx.sub_types(false, &obligation.cause, new_struct, target) + .map_err(|_| Unimplemented)?; + self.inferred_obligations.extend(obligations); + + // Construct the nested Field: Unsize> predicate. + nested.push(tcx.predicate_for_trait_def( + obligation.cause.clone(), + obligation.predicate.def_id(), + obligation.recursion_depth + 1, + inner_source, + &[inner_target])); + } + + _ => bug!() + }; + + Ok(VtableBuiltinData { nested: nested }) + } + + /////////////////////////////////////////////////////////////////////////// + // Matching + // + // Matching is a common path used for both evaluation and + // confirmation. It basically unifies types that appear in impls + // and traits. This does affect the surrounding environment; + // therefore, when used during evaluation, match routines must be + // run inside of a `probe()` so that their side-effects are + // contained. + + fn rematch_impl(&mut self, + impl_def_id: DefId, + obligation: &TraitObligation<'tcx>, + snapshot: &infer::CombinedSnapshot) + -> (Normalized<'tcx, &'tcx Substs<'tcx>>, + infer::SkolemizationMap<'tcx>) + { + match self.match_impl(impl_def_id, obligation, snapshot) { + Ok((substs, skol_map)) => (substs, skol_map), + Err(()) => { + bug!("Impl {:?} was matchable against {:?} but now is not", + impl_def_id, + obligation); + } + } + } + + fn match_impl(&mut self, + impl_def_id: DefId, + obligation: &TraitObligation<'tcx>, + snapshot: &infer::CombinedSnapshot) + -> Result<(Normalized<'tcx, &'tcx Substs<'tcx>>, + infer::SkolemizationMap<'tcx>), ()> + { + let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap(); + + // Before we create the substitutions and everything, first + // consider a "quick reject". This avoids creating more types + // and so forth that we need to. + if self.fast_reject_trait_refs(obligation, &impl_trait_ref) { + return Err(()); + } + + let (skol_obligation, skol_map) = self.infcx().skolemize_late_bound_regions( + &obligation.predicate, + snapshot); + let skol_obligation_trait_ref = skol_obligation.trait_ref; + + let impl_substs = self.infcx.fresh_substs_for_item(obligation.cause.span, + impl_def_id); + + let impl_trait_ref = impl_trait_ref.subst(self.tcx(), + impl_substs); + + let impl_trait_ref = + project::normalize_with_depth(self, + obligation.cause.clone(), + obligation.recursion_depth + 1, + &impl_trait_ref); + + debug!("match_impl(impl_def_id={:?}, obligation={:?}, \ + impl_trait_ref={:?}, skol_obligation_trait_ref={:?})", + impl_def_id, + obligation, + impl_trait_ref, + skol_obligation_trait_ref); + + let InferOk { obligations, .. } = + self.infcx.eq_trait_refs(false, + &obligation.cause, + impl_trait_ref.value.clone(), + skol_obligation_trait_ref) + .map_err(|e| { + debug!("match_impl: failed eq_trait_refs due to `{}`", e); + () + })?; + self.inferred_obligations.extend(obligations); + + if let Err(e) = self.infcx.leak_check(false, + obligation.cause.span, + &skol_map, + snapshot) { + debug!("match_impl: failed leak check due to `{}`", e); + return Err(()); + } + + debug!("match_impl: success impl_substs={:?}", impl_substs); + Ok((Normalized { + value: impl_substs, + obligations: impl_trait_ref.obligations + }, skol_map)) + } + + fn fast_reject_trait_refs(&mut self, + obligation: &TraitObligation, + impl_trait_ref: &ty::TraitRef) + -> bool + { + // We can avoid creating type variables and doing the full + // substitution if we find that any of the input types, when + // simplified, do not match. + + obligation.predicate.skip_binder().input_types() + .zip(impl_trait_ref.input_types()) + .any(|(obligation_ty, impl_ty)| { + let simplified_obligation_ty = + fast_reject::simplify_type(self.tcx(), obligation_ty, true); + let simplified_impl_ty = + fast_reject::simplify_type(self.tcx(), impl_ty, false); + + simplified_obligation_ty.is_some() && + simplified_impl_ty.is_some() && + simplified_obligation_ty != simplified_impl_ty + }) + } + + /// Normalize `where_clause_trait_ref` and try to match it against + /// `obligation`. If successful, return any predicates that + /// result from the normalization. Normalization is necessary + /// because where-clauses are stored in the parameter environment + /// unnormalized. + fn match_where_clause_trait_ref(&mut self, + obligation: &TraitObligation<'tcx>, + where_clause_trait_ref: ty::PolyTraitRef<'tcx>) + -> Result>,()> + { + self.match_poly_trait_ref(obligation, where_clause_trait_ref)?; + Ok(Vec::new()) + } + + /// Returns `Ok` if `poly_trait_ref` being true implies that the + /// obligation is satisfied. + fn match_poly_trait_ref(&mut self, + obligation: &TraitObligation<'tcx>, + poly_trait_ref: ty::PolyTraitRef<'tcx>) + -> Result<(),()> + { + debug!("match_poly_trait_ref: obligation={:?} poly_trait_ref={:?}", + obligation, + poly_trait_ref); + + self.infcx.sub_poly_trait_refs(false, + obligation.cause.clone(), + poly_trait_ref, + obligation.predicate.to_poly_trait_ref()) + .map(|InferOk { obligations, .. }| self.inferred_obligations.extend(obligations)) + .map_err(|_| ()) + } + + /////////////////////////////////////////////////////////////////////////// + // Miscellany + + fn match_fresh_trait_refs(&self, + previous: &ty::PolyTraitRef<'tcx>, + current: &ty::PolyTraitRef<'tcx>) + -> bool + { + let mut matcher = ty::_match::Match::new(self.tcx()); + matcher.relate(previous, current).is_ok() + } + + fn push_stack<'o,'s:'o>(&mut self, + previous_stack: TraitObligationStackList<'s, 'tcx>, + obligation: &'o TraitObligation<'tcx>) + -> TraitObligationStack<'o, 'tcx> + { + let fresh_trait_ref = + obligation.predicate.to_poly_trait_ref().fold_with(&mut self.freshener); + + TraitObligationStack { + obligation: obligation, + fresh_trait_ref: fresh_trait_ref, + previous: previous_stack, + } + } + + fn closure_trait_ref_unnormalized(&mut self, + obligation: &TraitObligation<'tcx>, + closure_def_id: DefId, + substs: ty::ClosureSubsts<'tcx>) + -> ty::PolyTraitRef<'tcx> + { + let closure_type = self.infcx.closure_type(closure_def_id, substs); + let ty::Binder((trait_ref, _)) = + self.tcx().closure_trait_ref_and_return_type(obligation.predicate.def_id(), + obligation.predicate.0.self_ty(), // (1) + &closure_type.sig, + util::TupleArgumentsFlag::No); + // (1) Feels icky to skip the binder here, but OTOH we know + // that the self-type is an unboxed closure type and hence is + // in fact unparameterized (or at least does not reference any + // regions bound in the obligation). Still probably some + // refactoring could make this nicer. + + ty::Binder(trait_ref) + } + + fn closure_trait_ref(&mut self, + obligation: &TraitObligation<'tcx>, + closure_def_id: DefId, + substs: ty::ClosureSubsts<'tcx>) + -> Normalized<'tcx, ty::PolyTraitRef<'tcx>> + { + let trait_ref = self.closure_trait_ref_unnormalized( + obligation, closure_def_id, substs); + + // A closure signature can contain associated types which + // must be normalized. + normalize_with_depth(self, + obligation.cause.clone(), + obligation.recursion_depth+1, + &trait_ref) + } + + /// Returns the obligations that are implied by instantiating an + /// impl or trait. The obligations are substituted and fully + /// normalized. This is used when confirming an impl or default + /// impl. + fn impl_or_trait_obligations(&mut self, + cause: ObligationCause<'tcx>, + recursion_depth: usize, + def_id: DefId, // of impl or trait + substs: &Substs<'tcx>, // for impl or trait + skol_map: infer::SkolemizationMap<'tcx>, + snapshot: &infer::CombinedSnapshot) + -> Vec> + { + debug!("impl_or_trait_obligations(def_id={:?})", def_id); + let tcx = self.tcx(); + + // To allow for one-pass evaluation of the nested obligation, + // each predicate must be preceded by the obligations required + // to normalize it. + // for example, if we have: + // impl> Foo for V where U::Item: Copy + // the impl will have the following predicates: + // ::Item = U, + // U: Iterator, U: Sized, + // V: Iterator, V: Sized, + // ::Item: Copy + // When we substitute, say, `V => IntoIter, U => $0`, the last + // obligation will normalize to `<$0 as Iterator>::Item = $1` and + // `$1: Copy`, so we must ensure the obligations are emitted in + // that order. + let predicates = tcx.item_predicates(def_id); + assert_eq!(predicates.parent, None); + let predicates = predicates.predicates.iter().flat_map(|predicate| { + let predicate = normalize_with_depth(self, cause.clone(), recursion_depth, + &predicate.subst(tcx, substs)); + predicate.obligations.into_iter().chain( + Some(Obligation { + cause: cause.clone(), + recursion_depth: recursion_depth, + predicate: predicate.value + })) + }).collect(); + self.infcx().plug_leaks(skol_map, snapshot, predicates) + } +} + +impl<'tcx> TraitObligation<'tcx> { + #[allow(unused_comparisons)] + pub fn derived_cause(&self, + variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>) + -> ObligationCause<'tcx> + { + /*! + * Creates a cause for obligations that are derived from + * `obligation` by a recursive search (e.g., for a builtin + * bound, or eventually a `impl Foo for ..`). If `obligation` + * is itself a derived obligation, this is just a clone, but + * otherwise we create a "derived obligation" cause so as to + * keep track of the original root obligation for error + * reporting. + */ + + let obligation = self; + + // NOTE(flaper87): As of now, it keeps track of the whole error + // chain. Ideally, we should have a way to configure this either + // by using -Z verbose or just a CLI argument. + if obligation.recursion_depth >= 0 { + let derived_cause = DerivedObligationCause { + parent_trait_ref: obligation.predicate.to_poly_trait_ref(), + parent_code: Rc::new(obligation.cause.code.clone()) + }; + let derived_code = variant(derived_cause); + ObligationCause::new(obligation.cause.span, obligation.cause.body_id, derived_code) + } else { + obligation.cause.clone() + } + } +} + +impl<'tcx> SelectionCache<'tcx> { + pub fn new() -> SelectionCache<'tcx> { + SelectionCache { + hashmap: RefCell::new(FxHashMap()) + } + } +} + +impl<'tcx> EvaluationCache<'tcx> { + pub fn new() -> EvaluationCache<'tcx> { + EvaluationCache { + hashmap: RefCell::new(FxHashMap()) + } + } +} + +impl<'o,'tcx> TraitObligationStack<'o,'tcx> { + fn list(&'o self) -> TraitObligationStackList<'o,'tcx> { + TraitObligationStackList::with(self) + } + + fn iter(&'o self) -> TraitObligationStackList<'o,'tcx> { + self.list() + } +} + +#[derive(Copy, Clone)] +struct TraitObligationStackList<'o,'tcx:'o> { + head: Option<&'o TraitObligationStack<'o,'tcx>> +} + +impl<'o,'tcx> TraitObligationStackList<'o,'tcx> { + fn empty() -> TraitObligationStackList<'o,'tcx> { + TraitObligationStackList { head: None } + } + + fn with(r: &'o TraitObligationStack<'o,'tcx>) -> TraitObligationStackList<'o,'tcx> { + TraitObligationStackList { head: Some(r) } + } +} + +impl<'o,'tcx> Iterator for TraitObligationStackList<'o,'tcx>{ + type Item = &'o TraitObligationStack<'o,'tcx>; + + fn next(&mut self) -> Option<&'o TraitObligationStack<'o,'tcx>> { + match self.head { + Some(o) => { + *self = o.previous; + Some(o) + } + None => None + } + } +} + +impl<'o,'tcx> fmt::Debug for TraitObligationStack<'o,'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "TraitObligationStack({:?})", self.obligation) + } +} + +impl EvaluationResult { + fn may_apply(&self) -> bool { + match *self { + EvaluatedToOk | + EvaluatedToAmbig | + EvaluatedToUnknown => true, + + EvaluatedToErr => false + } + } +} + +impl MethodMatchResult { + pub fn may_apply(&self) -> bool { + match *self { + MethodMatched(_) => true, + MethodAmbiguous(_) => true, + MethodDidNotMatch => false, + } + } +} diff --git a/src/librustc/traits/specialize/mod.rs b/src/librustc/traits/specialize/mod.rs new file mode 100644 index 0000000000000..870494363c85a --- /dev/null +++ b/src/librustc/traits/specialize/mod.rs @@ -0,0 +1,293 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Logic and data structures related to impl specialization, explained in +// greater detail below. +// +// At the moment, this implementation support only the simple "chain" rule: +// If any two impls overlap, one must be a strict subset of the other. +// +// See traits/README.md for a bit more detail on how specialization +// fits together with the rest of the trait machinery. + +use super::{SelectionContext, FulfillmentContext}; +use super::util::impl_trait_ref_and_oblig; + +use rustc_data_structures::fx::FxHashMap; +use hir::def_id::DefId; +use infer::{InferCtxt, InferOk}; +use middle::region; +use ty::subst::{Subst, Substs}; +use traits::{self, Reveal, ObligationCause}; +use ty::{self, TyCtxt, TypeFoldable}; +use syntax_pos::DUMMY_SP; + +use syntax::ast; + +pub mod specialization_graph; + +/// Information pertinent to an overlapping impl error. +pub struct OverlapError { + pub with_impl: DefId, + pub trait_desc: String, + pub self_desc: Option +} + +/// Given a subst for the requested impl, translate it to a subst +/// appropriate for the actual item definition (whether it be in that impl, +/// a parent impl, or the trait). +/// When we have selected one impl, but are actually using item definitions from +/// a parent impl providing a default, we need a way to translate between the +/// type parameters of the two impls. Here the `source_impl` is the one we've +/// selected, and `source_substs` is a substitution of its generics. +/// And `target_node` is the impl/trait we're actually going to get the +/// definition from. The resulting substitution will map from `target_node`'s +/// generics to `source_impl`'s generics as instantiated by `source_subst`. +/// +/// For example, consider the following scenario: +/// +/// ```rust +/// trait Foo { ... } +/// impl Foo for (T, U) { ... } // target impl +/// impl Foo for (V, V) { ... } // source impl +/// ``` +/// +/// Suppose we have selected "source impl" with `V` instantiated with `u32`. +/// This function will produce a substitution with `T` and `U` both mapping to `u32`. +/// +/// Where clauses add some trickiness here, because they can be used to "define" +/// an argument indirectly: +/// +/// ```rust +/// impl<'a, I, T: 'a> Iterator for Cloned +/// where I: Iterator, T: Clone +/// ``` +/// +/// In a case like this, the substitution for `T` is determined indirectly, +/// through associated type projection. We deal with such cases by using +/// *fulfillment* to relate the two impls, requiring that all projections are +/// resolved. +pub fn translate_substs<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + source_impl: DefId, + source_substs: &'tcx Substs<'tcx>, + target_node: specialization_graph::Node) + -> &'tcx Substs<'tcx> { + let source_trait_ref = infcx.tcx + .impl_trait_ref(source_impl) + .unwrap() + .subst(infcx.tcx, &source_substs); + + // translate the Self and TyParam parts of the substitution, since those + // vary across impls + let target_substs = match target_node { + specialization_graph::Node::Impl(target_impl) => { + // no need to translate if we're targetting the impl we started with + if source_impl == target_impl { + return source_substs; + } + + fulfill_implication(infcx, source_trait_ref, target_impl).unwrap_or_else(|_| { + bug!("When translating substitutions for specialization, the expected \ + specializaiton failed to hold") + }) + } + specialization_graph::Node::Trait(..) => source_trait_ref.substs, + }; + + // directly inherent the method generics, since those do not vary across impls + source_substs.rebase_onto(infcx.tcx, source_impl, target_substs) +} + +/// Given a selected impl described by `impl_data`, returns the +/// definition and substitions for the method with the name `name`, +/// and trait method substitutions `substs`, in that impl, a less +/// specialized impl, or the trait default, whichever applies. +pub fn find_method<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + name: ast::Name, + substs: &'tcx Substs<'tcx>, + impl_data: &super::VtableImplData<'tcx, ()>) + -> (DefId, &'tcx Substs<'tcx>) +{ + assert!(!substs.needs_infer()); + + let trait_def_id = tcx.trait_id_of_impl(impl_data.impl_def_id).unwrap(); + let trait_def = tcx.lookup_trait_def(trait_def_id); + + let ancestors = trait_def.ancestors(impl_data.impl_def_id); + match ancestors.defs(tcx, name, ty::AssociatedKind::Method).next() { + Some(node_item) => { + let substs = tcx.infer_ctxt(None, None, Reveal::All).enter(|infcx| { + let substs = substs.rebase_onto(tcx, trait_def_id, impl_data.substs); + let substs = translate_substs(&infcx, impl_data.impl_def_id, + substs, node_item.node); + tcx.lift(&substs).unwrap_or_else(|| { + bug!("find_method: translate_substs \ + returned {:?} which contains inference types/regions", + substs); + }) + }); + (node_item.item.def_id, substs) + } + None => { + bug!("method {:?} not found in {:?}", name, impl_data.impl_def_id) + } + } +} + +/// Is impl1 a specialization of impl2? +/// +/// Specialization is determined by the sets of types to which the impls apply; +/// impl1 specializes impl2 if it applies to a subset of the types impl2 applies +/// to. +pub fn specializes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + impl1_def_id: DefId, + impl2_def_id: DefId) -> bool { + debug!("specializes({:?}, {:?})", impl1_def_id, impl2_def_id); + + if let Some(r) = tcx.specializes_cache.borrow().check(impl1_def_id, impl2_def_id) { + return r; + } + + // The feature gate should prevent introducing new specializations, but not + // taking advantage of upstream ones. + if !tcx.sess.features.borrow().specialization && + (impl1_def_id.is_local() || impl2_def_id.is_local()) { + return false; + } + + // We determine whether there's a subset relationship by: + // + // - skolemizing impl1, + // - assuming the where clauses for impl1, + // - instantiating impl2 with fresh inference variables, + // - unifying, + // - attempting to prove the where clauses for impl2 + // + // The last three steps are encapsulated in `fulfill_implication`. + // + // See RFC 1210 for more details and justification. + + // Currently we do not allow e.g. a negative impl to specialize a positive one + if tcx.trait_impl_polarity(impl1_def_id) != tcx.trait_impl_polarity(impl2_def_id) { + return false; + } + + // create a parameter environment corresponding to a (skolemized) instantiation of impl1 + let penv = tcx.construct_parameter_environment(DUMMY_SP, + impl1_def_id, + region::DUMMY_CODE_EXTENT); + let impl1_trait_ref = tcx.impl_trait_ref(impl1_def_id) + .unwrap() + .subst(tcx, &penv.free_substs); + + // Create a infcx, taking the predicates of impl1 as assumptions: + let result = tcx.infer_ctxt(None, Some(penv), Reveal::ExactMatch).enter(|infcx| { + // Normalize the trait reference. The WF rules ought to ensure + // that this always succeeds. + let impl1_trait_ref = + match traits::fully_normalize(&infcx, ObligationCause::dummy(), &impl1_trait_ref) { + Ok(impl1_trait_ref) => impl1_trait_ref, + Err(err) => { + bug!("failed to fully normalize {:?}: {:?}", impl1_trait_ref, err); + } + }; + + // Attempt to prove that impl2 applies, given all of the above. + fulfill_implication(&infcx, impl1_trait_ref, impl2_def_id).is_ok() + }); + + tcx.specializes_cache.borrow_mut().insert(impl1_def_id, impl2_def_id, result); + result +} + +/// Attempt to fulfill all obligations of `target_impl` after unification with +/// `source_trait_ref`. If successful, returns a substitution for *all* the +/// generics of `target_impl`, including both those needed to unify with +/// `source_trait_ref` and those whose identity is determined via a where +/// clause in the impl. +fn fulfill_implication<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + source_trait_ref: ty::TraitRef<'tcx>, + target_impl: DefId) + -> Result<&'tcx Substs<'tcx>, ()> { + let selcx = &mut SelectionContext::new(&infcx); + let target_substs = infcx.fresh_substs_for_item(DUMMY_SP, target_impl); + let (target_trait_ref, obligations) = impl_trait_ref_and_oblig(selcx, + target_impl, + target_substs); + + // do the impls unify? If not, no specialization. + match infcx.eq_trait_refs(true, + &ObligationCause::dummy(), + source_trait_ref, + target_trait_ref) { + Ok(InferOk { obligations, .. }) => { + // FIXME(#32730) propagate obligations + assert!(obligations.is_empty()) + } + Err(_) => { + debug!("fulfill_implication: {:?} does not unify with {:?}", + source_trait_ref, + target_trait_ref); + return Err(()); + } + } + + // attempt to prove all of the predicates for impl2 given those for impl1 + // (which are packed up in penv) + + infcx.save_and_restore_obligations_in_snapshot_flag(|infcx| { + let mut fulfill_cx = FulfillmentContext::new(); + for oblig in obligations.into_iter() { + fulfill_cx.register_predicate_obligation(&infcx, oblig); + } + match fulfill_cx.select_all_or_error(infcx) { + Err(errors) => { + // no dice! + debug!("fulfill_implication: for impls on {:?} and {:?}, \ + could not fulfill: {:?} given {:?}", + source_trait_ref, + target_trait_ref, + errors, + infcx.parameter_environment.caller_bounds); + Err(()) + } + + Ok(()) => { + debug!("fulfill_implication: an impl for {:?} specializes {:?}", + source_trait_ref, + target_trait_ref); + + // Now resolve the *substitution* we built for the target earlier, replacing + // the inference variables inside with whatever we got from fulfillment. + Ok(infcx.resolve_type_vars_if_possible(&target_substs)) + } + } + }) +} + +pub struct SpecializesCache { + map: FxHashMap<(DefId, DefId), bool> +} + +impl SpecializesCache { + pub fn new() -> Self { + SpecializesCache { + map: FxHashMap() + } + } + + pub fn check(&self, a: DefId, b: DefId) -> Option { + self.map.get(&(a, b)).cloned() + } + + pub fn insert(&mut self, a: DefId, b: DefId, result: bool) { + self.map.insert((a, b), result); + } +} diff --git a/src/librustc/traits/specialize/specialization_graph.rs b/src/librustc/traits/specialize/specialization_graph.rs new file mode 100644 index 0000000000000..a41523f2def76 --- /dev/null +++ b/src/librustc/traits/specialize/specialization_graph.rs @@ -0,0 +1,355 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::{OverlapError, specializes}; + +use hir::def_id::DefId; +use traits::{self, Reveal}; +use ty::{self, TyCtxt, TraitDef, TypeFoldable}; +use ty::fast_reject::{self, SimplifiedType}; +use syntax::ast::Name; +use util::nodemap::{DefIdMap, FxHashMap}; + +/// A per-trait graph of impls in specialization order. At the moment, this +/// graph forms a tree rooted with the trait itself, with all other nodes +/// representing impls, and parent-child relationships representing +/// specializations. +/// +/// The graph provides two key services: +/// +/// - Construction, which implicitly checks for overlapping impls (i.e., impls +/// that overlap but where neither specializes the other -- an artifact of the +/// simple "chain" rule. +/// +/// - Parent extraction. In particular, the graph can give you the *immediate* +/// parents of a given specializing impl, which is needed for extracting +/// default items amongst other thigns. In the simple "chain" rule, every impl +/// has at most one parent. +pub struct Graph { + // all impls have a parent; the "root" impls have as their parent the def_id + // of the trait + parent: DefIdMap, + + // the "root" impls are found by looking up the trait's def_id. + children: DefIdMap, +} + +/// Children of a given impl, grouped into blanket/non-blanket varieties as is +/// done in `TraitDef`. +struct Children { + // Impls of a trait (or specializations of a given impl). To allow for + // quicker lookup, the impls are indexed by a simplified version of their + // `Self` type: impls with a simplifiable `Self` are stored in + // `nonblanket_impls` keyed by it, while all other impls are stored in + // `blanket_impls`. + // + // A similar division is used within `TraitDef`, but the lists there collect + // together *all* the impls for a trait, and are populated prior to building + // the specialization graph. + + /// Impls of the trait. + nonblanket_impls: FxHashMap>, + + /// Blanket impls associated with the trait. + blanket_impls: Vec, +} + +/// The result of attempting to insert an impl into a group of children. +enum Inserted { + /// The impl was inserted as a new child in this group of children. + BecameNewSibling, + + /// The impl replaced an existing impl that specializes it. + Replaced(DefId), + + /// The impl is a specialization of an existing child. + ShouldRecurseOn(DefId), +} + +impl<'a, 'gcx, 'tcx> Children { + fn new() -> Children { + Children { + nonblanket_impls: FxHashMap(), + blanket_impls: vec![], + } + } + + /// Insert an impl into this set of children without comparing to any existing impls + fn insert_blindly(&mut self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + impl_def_id: DefId) { + let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap(); + if let Some(sty) = fast_reject::simplify_type(tcx, trait_ref.self_ty(), false) { + self.nonblanket_impls.entry(sty).or_insert(vec![]).push(impl_def_id) + } else { + self.blanket_impls.push(impl_def_id) + } + } + + /// Attempt to insert an impl into this set of children, while comparing for + /// specialiation relationships. + fn insert(&mut self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + impl_def_id: DefId, + simplified_self: Option) + -> Result + { + for slot in match simplified_self { + Some(sty) => self.filtered_mut(sty), + None => self.iter_mut(), + } { + let possible_sibling = *slot; + + let tcx = tcx.global_tcx(); + let (le, ge) = tcx.infer_ctxt(None, None, Reveal::ExactMatch).enter(|infcx| { + let overlap = traits::overlapping_impls(&infcx, + possible_sibling, + impl_def_id); + if let Some(impl_header) = overlap { + let le = specializes(tcx, impl_def_id, possible_sibling); + let ge = specializes(tcx, possible_sibling, impl_def_id); + + if le == ge { + // overlap, but no specialization; error out + let trait_ref = impl_header.trait_ref.unwrap(); + let self_ty = trait_ref.self_ty(); + Err(OverlapError { + with_impl: possible_sibling, + trait_desc: trait_ref.to_string(), + // only report the Self type if it has at least + // some outer concrete shell; otherwise, it's + // not adding much information. + self_desc: if self_ty.has_concrete_skeleton() { + Some(self_ty.to_string()) + } else { + None + } + }) + } else { + Ok((le, ge)) + } + } else { + Ok((false, false)) + } + })?; + + if le && !ge { + debug!("descending as child of TraitRef {:?}", + tcx.impl_trait_ref(possible_sibling).unwrap()); + + // the impl specializes possible_sibling + return Ok(Inserted::ShouldRecurseOn(possible_sibling)); + } else if ge && !le { + debug!("placing as parent of TraitRef {:?}", + tcx.impl_trait_ref(possible_sibling).unwrap()); + + // possible_sibling specializes the impl + *slot = impl_def_id; + return Ok(Inserted::Replaced(possible_sibling)); + } else { + // no overlap (error bailed already via ?) + } + } + + // no overlap with any potential siblings, so add as a new sibling + debug!("placing as new sibling"); + self.insert_blindly(tcx, impl_def_id); + Ok(Inserted::BecameNewSibling) + } + + fn iter_mut(&'a mut self) -> Box + 'a> { + let nonblanket = self.nonblanket_impls.iter_mut().flat_map(|(_, v)| v.iter_mut()); + Box::new(self.blanket_impls.iter_mut().chain(nonblanket)) + } + + fn filtered_mut(&'a mut self, sty: SimplifiedType) + -> Box + 'a> { + let nonblanket = self.nonblanket_impls.entry(sty).or_insert(vec![]).iter_mut(); + Box::new(self.blanket_impls.iter_mut().chain(nonblanket)) + } +} + +impl<'a, 'gcx, 'tcx> Graph { + pub fn new() -> Graph { + Graph { + parent: Default::default(), + children: Default::default(), + } + } + + /// Insert a local impl into the specialization graph. If an existing impl + /// conflicts with it (has overlap, but neither specializes the other), + /// information about the area of overlap is returned in the `Err`. + pub fn insert(&mut self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + impl_def_id: DefId) + -> Result<(), OverlapError> { + assert!(impl_def_id.is_local()); + + let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap(); + let trait_def_id = trait_ref.def_id; + + debug!("insert({:?}): inserting TraitRef {:?} into specialization graph", + impl_def_id, trait_ref); + + // if the reference itself contains an earlier error (e.g., due to a + // resolution failure), then we just insert the impl at the top level of + // the graph and claim that there's no overlap (in order to supress + // bogus errors). + if trait_ref.references_error() { + debug!("insert: inserting dummy node for erroneous TraitRef {:?}, \ + impl_def_id={:?}, trait_def_id={:?}", + trait_ref, impl_def_id, trait_def_id); + + self.parent.insert(impl_def_id, trait_def_id); + self.children.entry(trait_def_id).or_insert(Children::new()) + .insert_blindly(tcx, impl_def_id); + return Ok(()); + } + + let mut parent = trait_def_id; + let simplified = fast_reject::simplify_type(tcx, trait_ref.self_ty(), false); + + // Descend the specialization tree, where `parent` is the current parent node + loop { + use self::Inserted::*; + + let insert_result = self.children.entry(parent).or_insert(Children::new()) + .insert(tcx, impl_def_id, simplified)?; + + match insert_result { + BecameNewSibling => { + break; + } + Replaced(new_child) => { + self.parent.insert(new_child, impl_def_id); + let mut new_children = Children::new(); + new_children.insert_blindly(tcx, new_child); + self.children.insert(impl_def_id, new_children); + break; + } + ShouldRecurseOn(new_parent) => { + parent = new_parent; + } + } + } + + self.parent.insert(impl_def_id, parent); + Ok(()) + } + + /// Insert cached metadata mapping from a child impl back to its parent. + pub fn record_impl_from_cstore(&mut self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + parent: DefId, + child: DefId) { + if self.parent.insert(child, parent).is_some() { + bug!("When recording an impl from the crate store, information about its parent \ + was already present."); + } + + self.children.entry(parent).or_insert(Children::new()).insert_blindly(tcx, child); + } + + /// The parent of a given impl, which is the def id of the trait when the + /// impl is a "specialization root". + pub fn parent(&self, child: DefId) -> DefId { + *self.parent.get(&child).unwrap() + } +} + +/// A node in the specialization graph is either an impl or a trait +/// definition; either can serve as a source of item definitions. +/// There is always exactly one trait definition node: the root. +#[derive(Debug, Copy, Clone)] +pub enum Node { + Impl(DefId), + Trait(DefId), +} + +impl<'a, 'gcx, 'tcx> Node { + pub fn is_from_trait(&self) -> bool { + match *self { + Node::Trait(..) => true, + _ => false, + } + } + + /// Iterate over the items defined directly by the given (impl or trait) node. + #[inline] // FIXME(#35870) Avoid closures being unexported due to impl Trait. + pub fn items(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) + -> impl Iterator + 'a { + tcx.associated_items(self.def_id()) + } + + pub fn def_id(&self) -> DefId { + match *self { + Node::Impl(did) => did, + Node::Trait(did) => did, + } + } +} + +pub struct Ancestors<'a> { + trait_def: &'a TraitDef, + current_source: Option, +} + +impl<'a> Iterator for Ancestors<'a> { + type Item = Node; + fn next(&mut self) -> Option { + let cur = self.current_source.take(); + if let Some(Node::Impl(cur_impl)) = cur { + let parent = self.trait_def.specialization_graph.borrow().parent(cur_impl); + if parent == self.trait_def.def_id { + self.current_source = Some(Node::Trait(parent)); + } else { + self.current_source = Some(Node::Impl(parent)); + } + } + cur + } +} + +pub struct NodeItem { + pub node: Node, + pub item: T, +} + +impl NodeItem { + pub fn map U>(self, f: F) -> NodeItem { + NodeItem { + node: self.node, + item: f(self.item), + } + } +} + +impl<'a, 'gcx, 'tcx> Ancestors<'a> { + /// Search the items from the given ancestors, returning each definition + /// with the given name and the given kind. + #[inline] // FIXME(#35870) Avoid closures being unexported due to impl Trait. + pub fn defs(self, tcx: TyCtxt<'a, 'gcx, 'tcx>, name: Name, kind: ty::AssociatedKind) + -> impl Iterator> + 'a { + self.flat_map(move |node| { + node.items(tcx).filter(move |item| item.kind == kind && item.name == name) + .map(move |item| NodeItem { node: node, item: item }) + }) + } +} + +/// Walk up the specialization ancestors of a given impl, starting with that +/// impl itself. +pub fn ancestors<'a>(trait_def: &'a TraitDef, start_from_impl: DefId) -> Ancestors<'a> { + Ancestors { + trait_def: trait_def, + current_source: Some(Node::Impl(start_from_impl)), + } +} diff --git a/src/librustc/traits/structural_impls.rs b/src/librustc/traits/structural_impls.rs new file mode 100644 index 0000000000000..dedb126d7ff6d --- /dev/null +++ b/src/librustc/traits/structural_impls.rs @@ -0,0 +1,604 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use traits; +use traits::project::Normalized; +use ty::{Lift, TyCtxt}; +use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; + +use std::fmt; +use std::rc::Rc; + +// structural impls for the structs in traits + +impl<'tcx, T: fmt::Debug> fmt::Debug for Normalized<'tcx, T> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Normalized({:?},{:?})", + self.value, + self.obligations) + } +} + +impl<'tcx> fmt::Debug for traits::RegionObligation<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "RegionObligation(sub_region={:?}, sup_type={:?})", + self.sub_region, + self.sup_type) + } +} +impl<'tcx, O: fmt::Debug> fmt::Debug for traits::Obligation<'tcx, O> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Obligation(predicate={:?},depth={})", + self.predicate, + self.recursion_depth) + } +} + +impl<'tcx, N: fmt::Debug> fmt::Debug for traits::Vtable<'tcx, N> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + super::VtableImpl(ref v) => + write!(f, "{:?}", v), + + super::VtableDefaultImpl(ref t) => + write!(f, "{:?}", t), + + super::VtableClosure(ref d) => + write!(f, "{:?}", d), + + super::VtableFnPointer(ref d) => + write!(f, "VtableFnPointer({:?})", d), + + super::VtableObject(ref d) => + write!(f, "{:?}", d), + + super::VtableParam(ref n) => + write!(f, "VtableParam({:?})", n), + + super::VtableBuiltin(ref d) => + write!(f, "{:?}", d) + } + } +} + +impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableImplData<'tcx, N> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "VtableImpl(impl_def_id={:?}, substs={:?}, nested={:?})", + self.impl_def_id, + self.substs, + self.nested) + } +} + +impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableClosureData<'tcx, N> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "VtableClosure(closure_def_id={:?}, substs={:?}, nested={:?})", + self.closure_def_id, + self.substs, + self.nested) + } +} + +impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableBuiltinData { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "VtableBuiltin(nested={:?})", self.nested) + } +} + +impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableDefaultImplData { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "VtableDefaultImplData(trait_def_id={:?}, nested={:?})", + self.trait_def_id, + self.nested) + } +} + +impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableObjectData<'tcx, N> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "VtableObject(upcast={:?}, vtable_base={}, nested={:?})", + self.upcast_trait_ref, + self.vtable_base, + self.nested) + } +} + +impl<'tcx, N: fmt::Debug> fmt::Debug for traits::VtableFnPointerData<'tcx, N> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "VtableFnPointer(fn_ty={:?}, nested={:?})", + self.fn_ty, + self.nested) + } +} + +impl<'tcx> fmt::Debug for traits::FulfillmentError<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "FulfillmentError({:?},{:?})", + self.obligation, + self.code) + } +} + +impl<'tcx> fmt::Debug for traits::FulfillmentErrorCode<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + super::CodeSelectionError(ref e) => write!(f, "{:?}", e), + super::CodeProjectionError(ref e) => write!(f, "{:?}", e), + super::CodeAmbiguity => write!(f, "Ambiguity") + } + } +} + +impl<'tcx> fmt::Debug for traits::MismatchedProjectionTypes<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "MismatchedProjectionTypes({:?})", self.err) + } +} + +/////////////////////////////////////////////////////////////////////////// +// Lift implementations + +impl<'a, 'tcx> Lift<'tcx> for traits::SelectionError<'a> { + type Lifted = traits::SelectionError<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + match *self { + super::Unimplemented => Some(super::Unimplemented), + super::OutputTypeParameterMismatch(a, b, ref err) => { + tcx.lift(&(a, b)).and_then(|(a, b)| { + tcx.lift(err).map(|err| { + super::OutputTypeParameterMismatch(a, b, err) + }) + }) + } + super::TraitNotObjectSafe(def_id) => { + Some(super::TraitNotObjectSafe(def_id)) + } + } + } +} + +impl<'a, 'tcx> Lift<'tcx> for traits::ObligationCauseCode<'a> { + type Lifted = traits::ObligationCauseCode<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + match *self { + super::MiscObligation => Some(super::MiscObligation), + super::SliceOrArrayElem => Some(super::SliceOrArrayElem), + super::TupleElem => Some(super::TupleElem), + super::ProjectionWf(proj) => tcx.lift(&proj).map(super::ProjectionWf), + super::ItemObligation(def_id) => Some(super::ItemObligation(def_id)), + super::ReferenceOutlivesReferent(ty) => { + tcx.lift(&ty).map(super::ReferenceOutlivesReferent) + } + super::ObjectTypeBound(ty, r) => { + tcx.lift(&ty).and_then(|ty| { + tcx.lift(&r).and_then(|r| { + Some(super::ObjectTypeBound(ty, r)) + }) + }) + } + super::ObjectCastObligation(ty) => { + tcx.lift(&ty).map(super::ObjectCastObligation) + } + super::AssignmentLhsSized => Some(super::AssignmentLhsSized), + super::StructInitializerSized => Some(super::StructInitializerSized), + super::VariableType(id) => Some(super::VariableType(id)), + super::ReturnType => Some(super::ReturnType), + super::RepeatVec => Some(super::RepeatVec), + super::FieldSized => Some(super::FieldSized), + super::ConstSized => Some(super::ConstSized), + super::SharedStatic => Some(super::SharedStatic), + super::BuiltinDerivedObligation(ref cause) => { + tcx.lift(cause).map(super::BuiltinDerivedObligation) + } + super::ImplDerivedObligation(ref cause) => { + tcx.lift(cause).map(super::ImplDerivedObligation) + } + super::CompareImplMethodObligation { item_name, + impl_item_def_id, + trait_item_def_id, + lint_id } => { + Some(super::CompareImplMethodObligation { + item_name: item_name, + impl_item_def_id: impl_item_def_id, + trait_item_def_id: trait_item_def_id, + lint_id: lint_id, + }) + } + super::ExprAssignable => { + Some(super::ExprAssignable) + } + super::MatchExpressionArm { arm_span, source } => { + Some(super::MatchExpressionArm { arm_span: arm_span, + source: source }) + } + super::IfExpression => { + Some(super::IfExpression) + } + super::IfExpressionWithNoElse => { + Some(super::IfExpressionWithNoElse) + } + super::EquatePredicate => { + Some(super::EquatePredicate) + } + super::MainFunctionType => { + Some(super::MainFunctionType) + } + super::StartFunctionType => { + Some(super::StartFunctionType) + } + super::IntrinsicType => { + Some(super::IntrinsicType) + } + super::MethodReceiver => { + Some(super::MethodReceiver) + } + } + } +} + +impl<'a, 'tcx> Lift<'tcx> for traits::DerivedObligationCause<'a> { + type Lifted = traits::DerivedObligationCause<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.parent_trait_ref).and_then(|trait_ref| { + tcx.lift(&*self.parent_code).map(|code| { + traits::DerivedObligationCause { + parent_trait_ref: trait_ref, + parent_code: Rc::new(code) + } + }) + }) + } +} + +impl<'a, 'tcx> Lift<'tcx> for traits::ObligationCause<'a> { + type Lifted = traits::ObligationCause<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.code).map(|code| { + traits::ObligationCause { + span: self.span, + body_id: self.body_id, + code: code, + } + }) + } +} + +impl<'a, 'tcx> Lift<'tcx> for traits::DeferredObligation<'a> { + type Lifted = traits::DeferredObligation<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.predicate).and_then(|predicate| { + tcx.lift(&self.cause).map(|cause| { + traits::DeferredObligation { + predicate: predicate, + cause: cause + } + }) + }) + } +} + +// For trans only. +impl<'a, 'tcx> Lift<'tcx> for traits::Vtable<'a, ()> { + type Lifted = traits::Vtable<'tcx, ()>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + match self.clone() { + traits::VtableImpl(traits::VtableImplData { + impl_def_id, + substs, + nested + }) => { + tcx.lift(&substs).map(|substs| { + traits::VtableImpl(traits::VtableImplData { + impl_def_id: impl_def_id, + substs: substs, + nested: nested + }) + }) + } + traits::VtableDefaultImpl(t) => Some(traits::VtableDefaultImpl(t)), + traits::VtableClosure(traits::VtableClosureData { + closure_def_id, + substs, + nested + }) => { + tcx.lift(&substs).map(|substs| { + traits::VtableClosure(traits::VtableClosureData { + closure_def_id: closure_def_id, + substs: substs, + nested: nested + }) + }) + } + traits::VtableFnPointer(traits::VtableFnPointerData { fn_ty, nested }) => { + tcx.lift(&fn_ty).map(|fn_ty| { + traits::VtableFnPointer(traits::VtableFnPointerData { + fn_ty: fn_ty, + nested: nested, + }) + }) + } + traits::VtableParam(n) => Some(traits::VtableParam(n)), + traits::VtableBuiltin(d) => Some(traits::VtableBuiltin(d)), + traits::VtableObject(traits::VtableObjectData { + upcast_trait_ref, + vtable_base, + nested + }) => { + tcx.lift(&upcast_trait_ref).map(|trait_ref| { + traits::VtableObject(traits::VtableObjectData { + upcast_trait_ref: trait_ref, + vtable_base: vtable_base, + nested: nested + }) + }) + } + } + } +} + +/////////////////////////////////////////////////////////////////////////// +// TypeFoldable implementations. + +impl<'tcx, O: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::Obligation<'tcx, O> +{ + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + traits::Obligation { + cause: self.cause.clone(), + recursion_depth: self.recursion_depth, + predicate: self.predicate.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.predicate.visit_with(visitor) + } +} + +impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableImplData<'tcx, N> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + traits::VtableImplData { + impl_def_id: self.impl_def_id, + substs: self.substs.fold_with(folder), + nested: self.nested.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.substs.visit_with(visitor) || self.nested.visit_with(visitor) + } +} + +impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableClosureData<'tcx, N> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + traits::VtableClosureData { + closure_def_id: self.closure_def_id, + substs: self.substs.fold_with(folder), + nested: self.nested.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.substs.visit_with(visitor) || self.nested.visit_with(visitor) + } +} + +impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableDefaultImplData { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + traits::VtableDefaultImplData { + trait_def_id: self.trait_def_id, + nested: self.nested.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.nested.visit_with(visitor) + } +} + +impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableBuiltinData { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + traits::VtableBuiltinData { + nested: self.nested.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.nested.visit_with(visitor) + } +} + +impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableObjectData<'tcx, N> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + traits::VtableObjectData { + upcast_trait_ref: self.upcast_trait_ref.fold_with(folder), + vtable_base: self.vtable_base, + nested: self.nested.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.upcast_trait_ref.visit_with(visitor) || self.nested.visit_with(visitor) + } +} + +impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::VtableFnPointerData<'tcx, N> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + traits::VtableFnPointerData { + fn_ty: self.fn_ty.fold_with(folder), + nested: self.nested.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.fn_ty.visit_with(visitor) || self.nested.visit_with(visitor) + } +} + +impl<'tcx, N: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::Vtable<'tcx, N> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + match *self { + traits::VtableImpl(ref v) => traits::VtableImpl(v.fold_with(folder)), + traits::VtableDefaultImpl(ref t) => traits::VtableDefaultImpl(t.fold_with(folder)), + traits::VtableClosure(ref d) => { + traits::VtableClosure(d.fold_with(folder)) + } + traits::VtableFnPointer(ref d) => { + traits::VtableFnPointer(d.fold_with(folder)) + } + traits::VtableParam(ref n) => traits::VtableParam(n.fold_with(folder)), + traits::VtableBuiltin(ref d) => traits::VtableBuiltin(d.fold_with(folder)), + traits::VtableObject(ref d) => traits::VtableObject(d.fold_with(folder)), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + match *self { + traits::VtableImpl(ref v) => v.visit_with(visitor), + traits::VtableDefaultImpl(ref t) => t.visit_with(visitor), + traits::VtableClosure(ref d) => d.visit_with(visitor), + traits::VtableFnPointer(ref d) => d.visit_with(visitor), + traits::VtableParam(ref n) => n.visit_with(visitor), + traits::VtableBuiltin(ref d) => d.visit_with(visitor), + traits::VtableObject(ref d) => d.visit_with(visitor), + } + } +} + +impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Normalized<'tcx, T> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + Normalized { + value: self.value.fold_with(folder), + obligations: self.obligations.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.value.visit_with(visitor) || self.obligations.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for traits::ObligationCauseCode<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + match *self { + super::ExprAssignable | + super::MatchExpressionArm { arm_span: _, source: _ } | + super::IfExpression | + super::IfExpressionWithNoElse | + super::EquatePredicate | + super::MainFunctionType | + super::StartFunctionType | + super::IntrinsicType | + super::MethodReceiver | + super::MiscObligation | + super::SliceOrArrayElem | + super::TupleElem | + super::ItemObligation(_) | + super::AssignmentLhsSized | + super::StructInitializerSized | + super::VariableType(_) | + super::ReturnType | + super::RepeatVec | + super::FieldSized | + super::ConstSized | + super::SharedStatic | + super::CompareImplMethodObligation { .. } => self.clone(), + + super::ProjectionWf(proj) => super::ProjectionWf(proj.fold_with(folder)), + super::ReferenceOutlivesReferent(ty) => { + super::ReferenceOutlivesReferent(ty.fold_with(folder)) + } + super::ObjectTypeBound(ty, r) => { + super::ObjectTypeBound(ty.fold_with(folder), r.fold_with(folder)) + } + super::ObjectCastObligation(ty) => { + super::ObjectCastObligation(ty.fold_with(folder)) + } + super::BuiltinDerivedObligation(ref cause) => { + super::BuiltinDerivedObligation(cause.fold_with(folder)) + } + super::ImplDerivedObligation(ref cause) => { + super::ImplDerivedObligation(cause.fold_with(folder)) + } + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + match *self { + super::ExprAssignable | + super::MatchExpressionArm { arm_span: _, source: _ } | + super::IfExpression | + super::IfExpressionWithNoElse | + super::EquatePredicate | + super::MainFunctionType | + super::StartFunctionType | + super::IntrinsicType | + super::MethodReceiver | + super::MiscObligation | + super::SliceOrArrayElem | + super::TupleElem | + super::ItemObligation(_) | + super::AssignmentLhsSized | + super::StructInitializerSized | + super::VariableType(_) | + super::ReturnType | + super::RepeatVec | + super::FieldSized | + super::ConstSized | + super::SharedStatic | + super::CompareImplMethodObligation { .. } => false, + + super::ProjectionWf(proj) => proj.visit_with(visitor), + super::ReferenceOutlivesReferent(ty) => ty.visit_with(visitor), + super::ObjectTypeBound(ty, r) => ty.visit_with(visitor) || r.visit_with(visitor), + super::ObjectCastObligation(ty) => ty.visit_with(visitor), + super::BuiltinDerivedObligation(ref cause) => cause.visit_with(visitor), + super::ImplDerivedObligation(ref cause) => cause.visit_with(visitor) + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for traits::DerivedObligationCause<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + traits::DerivedObligationCause { + parent_trait_ref: self.parent_trait_ref.fold_with(folder), + parent_code: self.parent_code.fold_with(folder) + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.parent_trait_ref.visit_with(visitor) || self.parent_code.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for traits::ObligationCause<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + traits::ObligationCause { + span: self.span, + body_id: self.body_id, + code: self.code.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.code.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for traits::DeferredObligation<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + traits::DeferredObligation { + predicate: self.predicate.fold_with(folder), + cause: self.cause.fold_with(folder) + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.predicate.visit_with(visitor) || self.cause.visit_with(visitor) + } +} diff --git a/src/librustc/traits/util.rs b/src/librustc/traits/util.rs new file mode 100644 index 0000000000000..321936fe54be1 --- /dev/null +++ b/src/librustc/traits/util.rs @@ -0,0 +1,501 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use hir::def_id::DefId; +use ty::subst::{Subst, Substs}; +use ty::{self, Ty, TyCtxt, ToPredicate, ToPolyTraitRef}; +use ty::outlives::Component; +use util::nodemap::FxHashSet; + +use super::{Obligation, ObligationCause, PredicateObligation, SelectionContext, Normalized}; + +fn anonymize_predicate<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + pred: &ty::Predicate<'tcx>) + -> ty::Predicate<'tcx> { + match *pred { + ty::Predicate::Trait(ref data) => + ty::Predicate::Trait(tcx.anonymize_late_bound_regions(data)), + + ty::Predicate::Equate(ref data) => + ty::Predicate::Equate(tcx.anonymize_late_bound_regions(data)), + + ty::Predicate::RegionOutlives(ref data) => + ty::Predicate::RegionOutlives(tcx.anonymize_late_bound_regions(data)), + + ty::Predicate::TypeOutlives(ref data) => + ty::Predicate::TypeOutlives(tcx.anonymize_late_bound_regions(data)), + + ty::Predicate::Projection(ref data) => + ty::Predicate::Projection(tcx.anonymize_late_bound_regions(data)), + + ty::Predicate::WellFormed(data) => + ty::Predicate::WellFormed(data), + + ty::Predicate::ObjectSafe(data) => + ty::Predicate::ObjectSafe(data), + + ty::Predicate::ClosureKind(closure_def_id, kind) => + ty::Predicate::ClosureKind(closure_def_id, kind) + } +} + + +struct PredicateSet<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + tcx: TyCtxt<'a, 'gcx, 'tcx>, + set: FxHashSet>, +} + +impl<'a, 'gcx, 'tcx> PredicateSet<'a, 'gcx, 'tcx> { + fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> PredicateSet<'a, 'gcx, 'tcx> { + PredicateSet { tcx: tcx, set: FxHashSet() } + } + + fn insert(&mut self, pred: &ty::Predicate<'tcx>) -> bool { + // We have to be careful here because we want + // + // for<'a> Foo<&'a int> + // + // and + // + // for<'b> Foo<&'b int> + // + // to be considered equivalent. So normalize all late-bound + // regions before we throw things into the underlying set. + self.set.insert(anonymize_predicate(self.tcx, pred)) + } +} + +/////////////////////////////////////////////////////////////////////////// +// `Elaboration` iterator +/////////////////////////////////////////////////////////////////////////// + +/// "Elaboration" is the process of identifying all the predicates that +/// are implied by a source predicate. Currently this basically means +/// walking the "supertraits" and other similar assumptions. For +/// example, if we know that `T : Ord`, the elaborator would deduce +/// that `T : PartialOrd` holds as well. Similarly, if we have `trait +/// Foo : 'static`, and we know that `T : Foo`, then we know that `T : +/// 'static`. +pub struct Elaborator<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + stack: Vec>, + visited: PredicateSet<'a, 'gcx, 'tcx>, +} + +pub fn elaborate_trait_ref<'cx, 'gcx, 'tcx>( + tcx: TyCtxt<'cx, 'gcx, 'tcx>, + trait_ref: ty::PolyTraitRef<'tcx>) + -> Elaborator<'cx, 'gcx, 'tcx> +{ + elaborate_predicates(tcx, vec![trait_ref.to_predicate()]) +} + +pub fn elaborate_trait_refs<'cx, 'gcx, 'tcx>( + tcx: TyCtxt<'cx, 'gcx, 'tcx>, + trait_refs: &[ty::PolyTraitRef<'tcx>]) + -> Elaborator<'cx, 'gcx, 'tcx> +{ + let predicates = trait_refs.iter() + .map(|trait_ref| trait_ref.to_predicate()) + .collect(); + elaborate_predicates(tcx, predicates) +} + +pub fn elaborate_predicates<'cx, 'gcx, 'tcx>( + tcx: TyCtxt<'cx, 'gcx, 'tcx>, + mut predicates: Vec>) + -> Elaborator<'cx, 'gcx, 'tcx> +{ + let mut visited = PredicateSet::new(tcx); + predicates.retain(|pred| visited.insert(pred)); + Elaborator { stack: predicates, visited: visited } +} + +impl<'cx, 'gcx, 'tcx> Elaborator<'cx, 'gcx, 'tcx> { + pub fn filter_to_traits(self) -> FilterToTraits { + FilterToTraits::new(self) + } + + fn push(&mut self, predicate: &ty::Predicate<'tcx>) { + let tcx = self.visited.tcx; + match *predicate { + ty::Predicate::Trait(ref data) => { + // Predicates declared on the trait. + let predicates = tcx.item_super_predicates(data.def_id()); + + let mut predicates: Vec<_> = + predicates.predicates + .iter() + .map(|p| p.subst_supertrait(tcx, &data.to_poly_trait_ref())) + .collect(); + + debug!("super_predicates: data={:?} predicates={:?}", + data, predicates); + + // Only keep those bounds that we haven't already + // seen. This is necessary to prevent infinite + // recursion in some cases. One common case is when + // people define `trait Sized: Sized { }` rather than `trait + // Sized { }`. + predicates.retain(|r| self.visited.insert(r)); + + self.stack.extend(predicates); + } + ty::Predicate::WellFormed(..) => { + // Currently, we do not elaborate WF predicates, + // although we easily could. + } + ty::Predicate::ObjectSafe(..) => { + // Currently, we do not elaborate object-safe + // predicates. + } + ty::Predicate::Equate(..) => { + // Currently, we do not "elaborate" predicates like + // `X == Y`, though conceivably we might. For example, + // `&X == &Y` implies that `X == Y`. + } + ty::Predicate::Projection(..) => { + // Nothing to elaborate in a projection predicate. + } + ty::Predicate::ClosureKind(..) => { + // Nothing to elaborate when waiting for a closure's kind to be inferred. + } + + ty::Predicate::RegionOutlives(..) => { + // Nothing to elaborate from `'a: 'b`. + } + + ty::Predicate::TypeOutlives(ref data) => { + // We know that `T: 'a` for some type `T`. We can + // often elaborate this. For example, if we know that + // `[U]: 'a`, that implies that `U: 'a`. Similarly, if + // we know `&'a U: 'b`, then we know that `'a: 'b` and + // `U: 'b`. + // + // We can basically ignore bound regions here. So for + // example `for<'c> Foo<'a,'c>: 'b` can be elaborated to + // `'a: 'b`. + + // Ignore `for<'a> T: 'a` -- we might in the future + // consider this as evidence that `T: 'static`, but + // I'm a bit wary of such constructions and so for now + // I want to be conservative. --nmatsakis + let ty_max = data.skip_binder().0; + let r_min = data.skip_binder().1; + if r_min.is_bound() { + return; + } + + let visited = &mut self.visited; + self.stack.extend( + tcx.outlives_components(ty_max) + .into_iter() + .filter_map(|component| match component { + Component::Region(r) => if r.is_bound() { + None + } else { + Some(ty::Predicate::RegionOutlives( + ty::Binder(ty::OutlivesPredicate(r, r_min)))) + }, + + Component::Param(p) => { + let ty = tcx.mk_param(p.idx, p.name); + Some(ty::Predicate::TypeOutlives( + ty::Binder(ty::OutlivesPredicate(ty, r_min)))) + }, + + Component::UnresolvedInferenceVariable(_) => { + None + }, + + Component::Projection(_) | + Component::EscapingProjection(_) => { + // We can probably do more here. This + // corresponds to a case like `>::U: 'b`. + None + }, + }) + .filter(|p| visited.insert(p))); + } + } + } +} + +impl<'cx, 'gcx, 'tcx> Iterator for Elaborator<'cx, 'gcx, 'tcx> { + type Item = ty::Predicate<'tcx>; + + fn next(&mut self) -> Option> { + // Extract next item from top-most stack frame, if any. + let next_predicate = match self.stack.pop() { + Some(predicate) => predicate, + None => { + // No more stack frames. Done. + return None; + } + }; + self.push(&next_predicate); + return Some(next_predicate); + } +} + +/////////////////////////////////////////////////////////////////////////// +// Supertrait iterator +/////////////////////////////////////////////////////////////////////////// + +pub type Supertraits<'cx, 'gcx, 'tcx> = FilterToTraits>; + +pub fn supertraits<'cx, 'gcx, 'tcx>(tcx: TyCtxt<'cx, 'gcx, 'tcx>, + trait_ref: ty::PolyTraitRef<'tcx>) + -> Supertraits<'cx, 'gcx, 'tcx> +{ + elaborate_trait_ref(tcx, trait_ref).filter_to_traits() +} + +pub fn transitive_bounds<'cx, 'gcx, 'tcx>(tcx: TyCtxt<'cx, 'gcx, 'tcx>, + bounds: &[ty::PolyTraitRef<'tcx>]) + -> Supertraits<'cx, 'gcx, 'tcx> +{ + elaborate_trait_refs(tcx, bounds).filter_to_traits() +} + +/////////////////////////////////////////////////////////////////////////// +// Iterator over def-ids of supertraits + +pub struct SupertraitDefIds<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + tcx: TyCtxt<'a, 'gcx, 'tcx>, + stack: Vec, + visited: FxHashSet, +} + +pub fn supertrait_def_ids<'cx, 'gcx, 'tcx>(tcx: TyCtxt<'cx, 'gcx, 'tcx>, + trait_def_id: DefId) + -> SupertraitDefIds<'cx, 'gcx, 'tcx> +{ + SupertraitDefIds { + tcx: tcx, + stack: vec![trait_def_id], + visited: Some(trait_def_id).into_iter().collect(), + } +} + +impl<'cx, 'gcx, 'tcx> Iterator for SupertraitDefIds<'cx, 'gcx, 'tcx> { + type Item = DefId; + + fn next(&mut self) -> Option { + let def_id = match self.stack.pop() { + Some(def_id) => def_id, + None => { return None; } + }; + + let predicates = self.tcx.item_super_predicates(def_id); + let visited = &mut self.visited; + self.stack.extend( + predicates.predicates + .iter() + .filter_map(|p| p.to_opt_poly_trait_ref()) + .map(|t| t.def_id()) + .filter(|&super_def_id| visited.insert(super_def_id))); + Some(def_id) + } +} + +/////////////////////////////////////////////////////////////////////////// +// Other +/////////////////////////////////////////////////////////////////////////// + +/// A filter around an iterator of predicates that makes it yield up +/// just trait references. +pub struct FilterToTraits { + base_iterator: I +} + +impl FilterToTraits { + fn new(base: I) -> FilterToTraits { + FilterToTraits { base_iterator: base } + } +} + +impl<'tcx,I:Iterator>> Iterator for FilterToTraits { + type Item = ty::PolyTraitRef<'tcx>; + + fn next(&mut self) -> Option> { + loop { + match self.base_iterator.next() { + None => { + return None; + } + Some(ty::Predicate::Trait(data)) => { + return Some(data.to_poly_trait_ref()); + } + Some(_) => { + } + } + } + } +} + +/////////////////////////////////////////////////////////////////////////// +// Other +/////////////////////////////////////////////////////////////////////////// + +/// Instantiate all bound parameters of the impl with the given substs, +/// returning the resulting trait ref and all obligations that arise. +/// The obligations are closed under normalization. +pub fn impl_trait_ref_and_oblig<'a, 'gcx, 'tcx>(selcx: &mut SelectionContext<'a, 'gcx, 'tcx>, + impl_def_id: DefId, + impl_substs: &Substs<'tcx>) + -> (ty::TraitRef<'tcx>, + Vec>) +{ + let impl_trait_ref = + selcx.tcx().impl_trait_ref(impl_def_id).unwrap(); + let impl_trait_ref = + impl_trait_ref.subst(selcx.tcx(), impl_substs); + let Normalized { value: impl_trait_ref, obligations: normalization_obligations1 } = + super::normalize(selcx, ObligationCause::dummy(), &impl_trait_ref); + + let predicates = selcx.tcx().item_predicates(impl_def_id); + let predicates = predicates.instantiate(selcx.tcx(), impl_substs); + let Normalized { value: predicates, obligations: normalization_obligations2 } = + super::normalize(selcx, ObligationCause::dummy(), &predicates); + let impl_obligations = + predicates_for_generics(ObligationCause::dummy(), 0, &predicates); + + let impl_obligations: Vec<_> = + impl_obligations.into_iter() + .chain(normalization_obligations1) + .chain(normalization_obligations2) + .collect(); + + (impl_trait_ref, impl_obligations) +} + +/// See `super::obligations_for_generics` +pub fn predicates_for_generics<'tcx>(cause: ObligationCause<'tcx>, + recursion_depth: usize, + generic_bounds: &ty::InstantiatedPredicates<'tcx>) + -> Vec> +{ + debug!("predicates_for_generics(generic_bounds={:?})", + generic_bounds); + + generic_bounds.predicates.iter().map(|predicate| { + Obligation { cause: cause.clone(), + recursion_depth: recursion_depth, + predicate: predicate.clone() } + }).collect() +} + +pub fn predicate_for_trait_ref<'tcx>( + cause: ObligationCause<'tcx>, + trait_ref: ty::TraitRef<'tcx>, + recursion_depth: usize) + -> PredicateObligation<'tcx> +{ + Obligation { + cause: cause, + recursion_depth: recursion_depth, + predicate: trait_ref.to_predicate(), + } +} + +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + pub fn predicate_for_trait_def(self, + cause: ObligationCause<'tcx>, + trait_def_id: DefId, + recursion_depth: usize, + param_ty: Ty<'tcx>, + ty_params: &[Ty<'tcx>]) + -> PredicateObligation<'tcx> + { + let trait_ref = ty::TraitRef { + def_id: trait_def_id, + substs: self.mk_substs_trait(param_ty, ty_params) + }; + predicate_for_trait_ref(cause, trait_ref, recursion_depth) + } + + /// Cast a trait reference into a reference to one of its super + /// traits; returns `None` if `target_trait_def_id` is not a + /// supertrait. + pub fn upcast_choices(self, + source_trait_ref: ty::PolyTraitRef<'tcx>, + target_trait_def_id: DefId) + -> Vec> + { + if source_trait_ref.def_id() == target_trait_def_id { + return vec![source_trait_ref]; // shorcut the most common case + } + + supertraits(self, source_trait_ref) + .filter(|r| r.def_id() == target_trait_def_id) + .collect() + } + + /// Given a trait `trait_ref`, returns the number of vtable entries + /// that come from `trait_ref`, excluding its supertraits. Used in + /// computing the vtable base for an upcast trait of a trait object. + pub fn count_own_vtable_entries(self, trait_ref: ty::PolyTraitRef<'tcx>) -> usize { + let mut entries = 0; + // Count number of methods and add them to the total offset. + // Skip over associated types and constants. + for trait_item in self.associated_items(trait_ref.def_id()) { + if trait_item.kind == ty::AssociatedKind::Method { + entries += 1; + } + } + entries + } + + /// Given an upcast trait object described by `object`, returns the + /// index of the method `method_def_id` (which should be part of + /// `object.upcast_trait_ref`) within the vtable for `object`. + pub fn get_vtable_index_of_object_method(self, + object: &super::VtableObjectData<'tcx, N>, + method_def_id: DefId) -> usize { + // Count number of methods preceding the one we are selecting and + // add them to the total offset. + // Skip over associated types and constants. + let mut entries = object.vtable_base; + for trait_item in self.associated_items(object.upcast_trait_ref.def_id()) { + if trait_item.def_id == method_def_id { + // The item with the ID we were given really ought to be a method. + assert_eq!(trait_item.kind, ty::AssociatedKind::Method); + return entries; + } + if trait_item.kind == ty::AssociatedKind::Method { + entries += 1; + } + } + + bug!("get_vtable_index_of_object_method: {:?} was not found", + method_def_id); + } + + pub fn closure_trait_ref_and_return_type(self, + fn_trait_def_id: DefId, + self_ty: Ty<'tcx>, + sig: &ty::PolyFnSig<'tcx>, + tuple_arguments: TupleArgumentsFlag) + -> ty::Binder<(ty::TraitRef<'tcx>, Ty<'tcx>)> + { + let arguments_tuple = match tuple_arguments { + TupleArgumentsFlag::No => sig.0.inputs[0], + TupleArgumentsFlag::Yes => self.intern_tup(&sig.0.inputs[..]), + }; + let trait_ref = ty::TraitRef { + def_id: fn_trait_def_id, + substs: self.mk_substs_trait(self_ty, &[arguments_tuple]), + }; + ty::Binder((trait_ref, sig.0.output)) + } +} + +pub enum TupleArgumentsFlag { Yes, No } diff --git a/src/librustc/ty/_match.rs b/src/librustc/ty/_match.rs new file mode 100644 index 0000000000000..b1846e0394148 --- /dev/null +++ b/src/librustc/ty/_match.rs @@ -0,0 +1,97 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use ty::{self, Ty, TyCtxt}; +use ty::error::TypeError; +use ty::relate::{self, Relate, TypeRelation, RelateResult}; + +/// A type "A" *matches* "B" if the fresh types in B could be +/// substituted with values so as to make it equal to A. Matching is +/// intended to be used only on freshened types, and it basically +/// indicates if the non-freshened versions of A and B could have been +/// unified. +/// +/// It is only an approximation. If it yields false, unification would +/// definitely fail, but a true result doesn't mean unification would +/// succeed. This is because we don't track the "side-constraints" on +/// type variables, nor do we track if the same freshened type appears +/// more than once. To some extent these approximations could be +/// fixed, given effort. +/// +/// Like subtyping, matching is really a binary relation, so the only +/// important thing about the result is Ok/Err. Also, matching never +/// affects any type variables or unification state. +pub struct Match<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + tcx: TyCtxt<'a, 'gcx, 'tcx> +} + +impl<'a, 'gcx, 'tcx> Match<'a, 'gcx, 'tcx> { + pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Match<'a, 'gcx, 'tcx> { + Match { tcx: tcx } + } +} + +impl<'a, 'gcx, 'tcx> TypeRelation<'a, 'gcx, 'tcx> for Match<'a, 'gcx, 'tcx> { + fn tag(&self) -> &'static str { "Match" } + fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> { self.tcx } + fn a_is_expected(&self) -> bool { true } // irrelevant + + fn relate_with_variance>(&mut self, + _: ty::Variance, + a: &T, + b: &T) + -> RelateResult<'tcx, T> + { + self.relate(a, b) + } + + fn regions(&mut self, a: &'tcx ty::Region, b: &'tcx ty::Region) + -> RelateResult<'tcx, &'tcx ty::Region> { + debug!("{}.regions({:?}, {:?})", + self.tag(), + a, + b); + Ok(a) + } + + fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> { + debug!("{}.tys({:?}, {:?})", self.tag(), + a, b); + if a == b { return Ok(a); } + + match (&a.sty, &b.sty) { + (_, &ty::TyInfer(ty::FreshTy(_))) | + (_, &ty::TyInfer(ty::FreshIntTy(_))) | + (_, &ty::TyInfer(ty::FreshFloatTy(_))) => { + Ok(a) + } + + (&ty::TyInfer(_), _) | + (_, &ty::TyInfer(_)) => { + Err(TypeError::Sorts(relate::expected_found(self, &a, &b))) + } + + (&ty::TyError, _) | (_, &ty::TyError) => { + Ok(self.tcx().types.err) + } + + _ => { + relate::super_relate_tys(self, a, b) + } + } + } + + fn binders(&mut self, a: &ty::Binder, b: &ty::Binder) + -> RelateResult<'tcx, ty::Binder> + where T: Relate<'tcx> + { + Ok(ty::Binder(self.relate(a.skip_binder(), b.skip_binder())?)) + } +} diff --git a/src/librustc/ty/adjustment.rs b/src/librustc/ty/adjustment.rs new file mode 100644 index 0000000000000..333a5c74cb45c --- /dev/null +++ b/src/librustc/ty/adjustment.rs @@ -0,0 +1,187 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use ty::{self, Ty, TyCtxt, TypeAndMut}; +use ty::LvaluePreference::{NoPreference}; + +use syntax::ast; +use syntax_pos::Span; + +use hir; + +#[derive(Copy, Clone, RustcEncodable, RustcDecodable)] +pub struct Adjustment<'tcx> { + pub kind: Adjust<'tcx>, + pub target: Ty<'tcx> +} + +#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)] +pub enum Adjust<'tcx> { + /// Go from ! to any type. + NeverToAny, + + /// Go from a fn-item type to a fn-pointer type. + ReifyFnPointer, + + /// Go from a safe fn pointer to an unsafe fn pointer. + UnsafeFnPointer, + + /// Go from a mut raw pointer to a const raw pointer. + MutToConstPointer, + + /// Represents coercing a pointer to a different kind of pointer - where 'kind' + /// here means either or both of raw vs borrowed vs unique and fat vs thin. + /// + /// We transform pointers by following the following steps in order: + /// 1. Deref the pointer `self.autoderefs` times (may be 0). + /// 2. If `autoref` is `Some(_)`, then take the address and produce either a + /// `&` or `*` pointer. + /// 3. If `unsize` is `Some(_)`, then apply the unsize transformation, + /// which will do things like convert thin pointers to fat + /// pointers, or convert structs containing thin pointers to + /// structs containing fat pointers, or convert between fat + /// pointers. We don't store the details of how the transform is + /// done (in fact, we don't know that, because it might depend on + /// the precise type parameters). We just store the target + /// type. Trans figures out what has to be done at monomorphization + /// time based on the precise source/target type at hand. + /// + /// To make that more concrete, here are some common scenarios: + /// + /// 1. The simplest cases are where the pointer is not adjusted fat vs thin. + /// Here the pointer will be dereferenced N times (where a dereference can + /// happen to raw or borrowed pointers or any smart pointer which implements + /// Deref, including Box<_>). The number of dereferences is given by + /// `autoderefs`. It can then be auto-referenced zero or one times, indicated + /// by `autoref`, to either a raw or borrowed pointer. In these cases unsize is + /// None. + /// + /// 2. A thin-to-fat coercon involves unsizing the underlying data. We start + /// with a thin pointer, deref a number of times, unsize the underlying data, + /// then autoref. The 'unsize' phase may change a fixed length array to a + /// dynamically sized one, a concrete object to a trait object, or statically + /// sized struct to a dyncamically sized one. E.g., &[i32; 4] -> &[i32] is + /// represented by: + /// + /// ``` + /// Adjust::DerefRef { + /// autoderefs: 1, // &[i32; 4] -> [i32; 4] + /// autoref: Some(AutoBorrow::Ref), // [i32] -> &[i32] + /// unsize: Some([i32]), // [i32; 4] -> [i32] + /// } + /// ``` + /// + /// Note that for a struct, the 'deep' unsizing of the struct is not recorded. + /// E.g., `struct Foo { x: T }` we can coerce &Foo<[i32; 4]> to &Foo<[i32]> + /// The autoderef and -ref are the same as in the above example, but the type + /// stored in `unsize` is `Foo<[i32]>`, we don't store any further detail about + /// the underlying conversions from `[i32; 4]` to `[i32]`. + /// + /// 3. Coercing a `Box` to `Box` is an interesting special case. In + /// that case, we have the pointer we need coming in, so there are no + /// autoderefs, and no autoref. Instead we just do the `Unsize` transformation. + /// At some point, of course, `Box` should move out of the compiler, in which + /// case this is analogous to transformating a struct. E.g., Box<[i32; 4]> -> + /// Box<[i32]> is represented by: + /// + /// ``` + /// Adjust::DerefRef { + /// autoderefs: 0, + /// autoref: None, + /// unsize: Some(Box<[i32]>), + /// } + /// ``` + DerefRef { + /// Step 1. Apply a number of dereferences, producing an lvalue. + autoderefs: usize, + + /// Step 2. Optionally produce a pointer/reference from the value. + autoref: Option>, + + /// Step 3. Unsize a pointer/reference value, e.g. `&[T; n]` to + /// `&[T]`. Note that the source could be a thin or fat pointer. + unsize: bool, + } +} + +impl<'tcx> Adjustment<'tcx> { + pub fn is_identity(&self) -> bool { + match self.kind { + Adjust::NeverToAny => self.target.is_never(), + + Adjust::DerefRef { autoderefs: 0, autoref: None, unsize: false } => true, + + Adjust::ReifyFnPointer | + Adjust::UnsafeFnPointer | + Adjust::MutToConstPointer | + Adjust::DerefRef {..} => false, + } + } +} + +#[derive(Copy, Clone, PartialEq, Debug, RustcEncodable, RustcDecodable)] +pub enum AutoBorrow<'tcx> { + /// Convert from T to &T. + Ref(&'tcx ty::Region, hir::Mutability), + + /// Convert from T to *T. + RawPtr(hir::Mutability), +} + +#[derive(Clone, Copy, RustcEncodable, RustcDecodable, Debug)] +pub enum CustomCoerceUnsized { + /// Records the index of the field being coerced. + Struct(usize) +} + +impl<'a, 'gcx, 'tcx> ty::TyS<'tcx> { + pub fn adjust_for_autoderef(&'tcx self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + expr_id: ast::NodeId, + expr_span: Span, + autoderef: u32, // how many autoderefs so far? + mut method_type: F) + -> Ty<'tcx> where + F: FnMut(ty::MethodCall) -> Option>, + { + let method_call = ty::MethodCall::autoderef(expr_id, autoderef); + let mut adjusted_ty = self; + if let Some(method_ty) = method_type(method_call) { + // Method calls always have all late-bound regions + // fully instantiated. + adjusted_ty = tcx.no_late_bound_regions(&method_ty.fn_ret()).unwrap(); + } + match adjusted_ty.builtin_deref(true, NoPreference) { + Some(mt) => mt.ty, + None => { + span_bug!( + expr_span, + "the {}th autoderef for {} failed: {}", + autoderef, + expr_id, + adjusted_ty); + } + } + } + + pub fn adjust_for_autoref(&'tcx self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + autoref: Option>) + -> Ty<'tcx> { + match autoref { + None => self, + Some(AutoBorrow::Ref(r, m)) => { + tcx.mk_ref(r, TypeAndMut { ty: self, mutbl: m }) + } + Some(AutoBorrow::RawPtr(m)) => { + tcx.mk_ptr(TypeAndMut { ty: self, mutbl: m }) + } + } + } +} diff --git a/src/librustc/middle/ty/cast.rs b/src/librustc/ty/cast.rs similarity index 93% rename from src/librustc/middle/ty/cast.rs rename to src/librustc/ty/cast.rs index 8233b6b2b2b6e..0badb85e9e095 100644 --- a/src/librustc/middle/ty/cast.rs +++ b/src/librustc/ty/cast.rs @@ -11,7 +11,7 @@ // Helpers for handling cast expressions, used in both // typeck and trans. -use middle::ty::{self, Ty}; +use ty::{self, Ty}; use syntax::ast; @@ -65,11 +65,11 @@ impl<'tcx> CastTy<'tcx> { ty::TyInt(_) => Some(CastTy::Int(IntTy::I)), ty::TyUint(u) => Some(CastTy::Int(IntTy::U(u))), ty::TyFloat(_) => Some(CastTy::Float), - ty::TyEnum(d,_) if d.is_payloadfree() => + ty::TyAdt(d,_) if d.is_enum() && d.is_payloadfree() => Some(CastTy::Int(IntTy::CEnum)), ty::TyRawPtr(ref mt) => Some(CastTy::Ptr(mt)), ty::TyRef(_, ref mt) => Some(CastTy::RPtr(mt)), - ty::TyBareFn(..) => Some(CastTy::FnPtr), + ty::TyFnPtr(..) => Some(CastTy::FnPtr), _ => None, } } diff --git a/src/librustc/middle/ty/contents.rs b/src/librustc/ty/contents.rs similarity index 75% rename from src/librustc/middle/ty/contents.rs rename to src/librustc/ty/contents.rs index 619201a4a9feb..8c3cb79294880 100644 --- a/src/librustc/middle/ty/contents.rs +++ b/src/librustc/ty/contents.rs @@ -8,10 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use middle::def_id::{DefId}; -use middle::ty::{self, Ty}; +use hir::def_id::{DefId}; +use ty::{self, Ty, TyCtxt}; use util::common::MemoizationMap; -use util::nodemap::FnvHashMap; +use util::nodemap::FxHashMap; use std::fmt; use std::ops; @@ -89,7 +89,7 @@ impl TypeContents { self.intersects(TC::InteriorUnsafe) } - pub fn needs_drop(&self, _: &ty::ctxt) -> bool { + pub fn needs_drop(&self, _: TyCtxt) -> bool { self.intersects(TC::NeedsDrop) } @@ -98,10 +98,11 @@ impl TypeContents { TC::OwnsOwned | (*self & TC::OwnsAll) } - pub fn union(v: &[T], mut f: F) -> TypeContents where - F: FnMut(&T) -> TypeContents, + pub fn union(v: I, mut f: F) -> TypeContents where + I: IntoIterator, + F: FnMut(T) -> TypeContents, { - v.iter().fold(TC::None, |tc, ty| tc | f(ty)) + v.into_iter().fold(TC::None, |tc, ty| tc | f(ty)) } pub fn has_dtor(&self) -> bool { @@ -139,15 +140,15 @@ impl fmt::Debug for TypeContents { } } -impl<'tcx> ty::TyS<'tcx> { - pub fn type_contents(&'tcx self, cx: &ty::ctxt<'tcx>) -> TypeContents { - return cx.tc_cache.memoize(self, || tc_ty(cx, self, &mut FnvHashMap())); +impl<'a, 'tcx> ty::TyS<'tcx> { + pub fn type_contents(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> TypeContents { + return tcx.tc_cache.memoize(self, || tc_ty(tcx, self, &mut FxHashMap())); - fn tc_ty<'tcx>(cx: &ty::ctxt<'tcx>, - ty: Ty<'tcx>, - cache: &mut FnvHashMap, TypeContents>) -> TypeContents + fn tc_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + ty: Ty<'tcx>, + cache: &mut FxHashMap, TypeContents>) -> TypeContents { - // Subtle: Note that we are *not* using cx.tc_cache here but rather a + // Subtle: Note that we are *not* using tcx.tc_cache here but rather a // private cache for this walk. This is needed in the case of cyclic // types like: // @@ -163,39 +164,38 @@ impl<'tcx> ty::TyS<'tcx> { // The problem is, as we are doing the computation, we will also // compute an *intermediate* contents for, e.g., Option of // TC::None. This is ok during the computation of List itself, but if - // we stored this intermediate value into cx.tc_cache, then later + // we stored this intermediate value into tcx.tc_cache, then later // requests for the contents of Option would also yield TC::None // which is incorrect. This value was computed based on the crutch // value for the type contents of list. The correct value is // TC::OwnsOwned. This manifested as issue #4821. - match cache.get(&ty) { - Some(tc) => { return *tc; } - None => {} + if let Some(tc) = cache.get(&ty) { + return *tc; } - match cx.tc_cache.borrow().get(&ty) { // Must check both caches! - Some(tc) => { return *tc; } - None => {} + // Must check both caches! + if let Some(tc) = tcx.tc_cache.borrow().get(&ty) { + return *tc; } cache.insert(ty, TC::None); let result = match ty.sty { // usize and isize are ffi-unsafe - ty::TyUint(ast::TyUs) | ty::TyInt(ast::TyIs) => { + ty::TyUint(ast::UintTy::Us) | ty::TyInt(ast::IntTy::Is) => { TC::None } // Scalar and unique types are sendable, and durable ty::TyInfer(ty::FreshIntTy(_)) | ty::TyInfer(ty::FreshFloatTy(_)) | - ty::TyBool | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | - ty::TyBareFn(..) | ty::TyChar => { + ty::TyBool | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | ty::TyNever | + ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyChar => { TC::None } ty::TyBox(typ) => { - tc_ty(cx, typ, cache).owned_pointer() + tc_ty(tcx, typ, cache).owned_pointer() } - ty::TyTrait(_) => { + ty::TyDynamic(..) => { TC::All - TC::InteriorParam } @@ -203,33 +203,35 @@ impl<'tcx> ty::TyS<'tcx> { TC::None } - ty::TyRef(_, _) => { + ty::TyRef(..) => { TC::None } ty::TyArray(ty, _) => { - tc_ty(cx, ty, cache) + tc_ty(tcx, ty, cache) } ty::TySlice(ty) => { - tc_ty(cx, ty, cache) + tc_ty(tcx, ty, cache) } ty::TyStr => TC::None, - ty::TyClosure(_, ref substs) => { - TypeContents::union(&substs.upvar_tys, |ty| tc_ty(cx, &ty, cache)) + ty::TyClosure(def_id, ref substs) => { + TypeContents::union( + substs.upvar_tys(def_id, tcx), + |ty| tc_ty(tcx, &ty, cache)) } ty::TyTuple(ref tys) => { TypeContents::union(&tys[..], - |ty| tc_ty(cx, *ty, cache)) + |ty| tc_ty(tcx, *ty, cache)) } - ty::TyStruct(def, substs) | ty::TyEnum(def, substs) => { + ty::TyAdt(def, substs) => { let mut res = TypeContents::union(&def.variants, |v| { TypeContents::union(&v.fields, |f| { - tc_ty(cx, f.ty(cx, substs), cache) + tc_ty(tcx, f.ty(tcx, substs), cache) }) }); @@ -237,17 +239,18 @@ impl<'tcx> ty::TyS<'tcx> { res = res | TC::OwnsDtor; } - apply_lang_items(cx, def.did, res) + apply_lang_items(tcx, def.did, res) } ty::TyProjection(..) | - ty::TyParam(_) => { + ty::TyParam(_) | + ty::TyAnon(..) => { TC::All } ty::TyInfer(_) | ty::TyError => { - cx.sess.bug("asked to compute contents of error type"); + bug!("asked to compute contents of error type"); } }; @@ -255,9 +258,10 @@ impl<'tcx> ty::TyS<'tcx> { result } - fn apply_lang_items(cx: &ty::ctxt, did: DefId, tc: TypeContents) - -> TypeContents { - if Some(did) == cx.lang_items.unsafe_cell_type() { + fn apply_lang_items<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + did: DefId, tc: TypeContents) + -> TypeContents { + if Some(did) == tcx.lang_items.unsafe_cell_type() { tc | TC::InteriorUnsafe } else { tc diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs new file mode 100644 index 0000000000000..17c335fc9c72f --- /dev/null +++ b/src/librustc/ty/context.rs @@ -0,0 +1,1612 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! type context book-keeping + +use dep_graph::{DepGraph, DepTrackingMap}; +use session::Session; +use middle; +use hir::TraitMap; +use hir::def::Def; +use hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE}; +use hir::map as ast_map; +use hir::map::{DefKey, DefPathData, DisambiguatedDefPathData}; +use middle::free_region::FreeRegionMap; +use middle::region::RegionMaps; +use middle::resolve_lifetime; +use middle::stability; +use mir::Mir; +use ty::subst::{Kind, Substs}; +use traits; +use ty::{self, TraitRef, Ty, TypeAndMut}; +use ty::{TyS, TypeVariants, Slice}; +use ty::{AdtKind, AdtDef, ClosureSubsts, Region}; +use hir::FreevarMap; +use ty::{BareFnTy, InferTy, ParamTy, ProjectionTy, ExistentialPredicate}; +use ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid}; +use ty::TypeVariants::*; +use ty::layout::{Layout, TargetDataLayout}; +use ty::maps; +use util::common::MemoizationMap; +use util::nodemap::{NodeMap, NodeSet, DefIdMap, DefIdSet}; +use util::nodemap::{FxHashMap, FxHashSet}; +use rustc_data_structures::accumulate_vec::AccumulateVec; + +use arena::TypedArena; +use std::borrow::Borrow; +use std::cell::{Cell, RefCell}; +use std::hash::{Hash, Hasher}; +use std::mem; +use std::ops::Deref; +use std::rc::Rc; +use std::iter; +use std::cmp::Ordering; +use syntax::ast::{self, Name, NodeId}; +use syntax::attr; +use syntax::symbol::{Symbol, keywords}; + +use hir; + +/// Internal storage +pub struct CtxtArenas<'tcx> { + // internings + type_: TypedArena>, + type_list: TypedArena>, + substs: TypedArena>, + bare_fn: TypedArena>, + region: TypedArena, + stability: TypedArena, + layout: TypedArena, + existential_predicates: TypedArena>, + + // references + generics: TypedArena>, + trait_def: TypedArena, + adt_def: TypedArena, + mir: TypedArena>>, +} + +impl<'tcx> CtxtArenas<'tcx> { + pub fn new() -> CtxtArenas<'tcx> { + CtxtArenas { + type_: TypedArena::new(), + type_list: TypedArena::new(), + substs: TypedArena::new(), + bare_fn: TypedArena::new(), + region: TypedArena::new(), + stability: TypedArena::new(), + layout: TypedArena::new(), + existential_predicates: TypedArena::new(), + + generics: TypedArena::new(), + trait_def: TypedArena::new(), + adt_def: TypedArena::new(), + mir: TypedArena::new() + } + } +} + +pub struct CtxtInterners<'tcx> { + /// The arenas that types etc are allocated from. + arenas: &'tcx CtxtArenas<'tcx>, + + /// Specifically use a speedy hash algorithm for these hash sets, + /// they're accessed quite often. + type_: RefCell>>>, + type_list: RefCell>>>>, + substs: RefCell>>>, + bare_fn: RefCell>>>, + region: RefCell>>, + stability: RefCell>, + layout: RefCell>, + existential_predicates: RefCell>>>>, +} + +impl<'gcx: 'tcx, 'tcx> CtxtInterners<'tcx> { + fn new(arenas: &'tcx CtxtArenas<'tcx>) -> CtxtInterners<'tcx> { + CtxtInterners { + arenas: arenas, + type_: RefCell::new(FxHashSet()), + type_list: RefCell::new(FxHashSet()), + substs: RefCell::new(FxHashSet()), + bare_fn: RefCell::new(FxHashSet()), + region: RefCell::new(FxHashSet()), + stability: RefCell::new(FxHashSet()), + layout: RefCell::new(FxHashSet()), + existential_predicates: RefCell::new(FxHashSet()), + } + } + + /// Intern a type. global_interners is Some only if this is + /// a local interner and global_interners is its counterpart. + fn intern_ty(&self, st: TypeVariants<'tcx>, + global_interners: Option<&CtxtInterners<'gcx>>) + -> Ty<'tcx> { + let ty = { + let mut interner = self.type_.borrow_mut(); + let global_interner = global_interners.map(|interners| { + interners.type_.borrow_mut() + }); + if let Some(&Interned(ty)) = interner.get(&st) { + return ty; + } + if let Some(ref interner) = global_interner { + if let Some(&Interned(ty)) = interner.get(&st) { + return ty; + } + } + + let flags = super::flags::FlagComputation::for_sty(&st); + let ty_struct = TyS { + sty: st, + flags: Cell::new(flags.flags), + region_depth: flags.depth, + }; + + // HACK(eddyb) Depend on flags being accurate to + // determine that all contents are in the global tcx. + // See comments on Lift for why we can't use that. + if !flags.flags.intersects(ty::TypeFlags::KEEP_IN_LOCAL_TCX) { + if let Some(interner) = global_interners { + let ty_struct: TyS<'gcx> = unsafe { + mem::transmute(ty_struct) + }; + let ty: Ty<'gcx> = interner.arenas.type_.alloc(ty_struct); + global_interner.unwrap().insert(Interned(ty)); + return ty; + } + } else { + // Make sure we don't end up with inference + // types/regions in the global tcx. + if global_interners.is_none() { + drop(interner); + bug!("Attempted to intern `{:?}` which contains \ + inference types/regions in the global type context", + &ty_struct); + } + } + + // Don't be &mut TyS. + let ty: Ty<'tcx> = self.arenas.type_.alloc(ty_struct); + interner.insert(Interned(ty)); + ty + }; + + debug!("Interned type: {:?} Pointer: {:?}", + ty, ty as *const TyS); + ty + } + +} + +pub struct CommonTypes<'tcx> { + pub bool: Ty<'tcx>, + pub char: Ty<'tcx>, + pub isize: Ty<'tcx>, + pub i8: Ty<'tcx>, + pub i16: Ty<'tcx>, + pub i32: Ty<'tcx>, + pub i64: Ty<'tcx>, + pub usize: Ty<'tcx>, + pub u8: Ty<'tcx>, + pub u16: Ty<'tcx>, + pub u32: Ty<'tcx>, + pub u64: Ty<'tcx>, + pub f32: Ty<'tcx>, + pub f64: Ty<'tcx>, + pub never: Ty<'tcx>, + pub err: Ty<'tcx>, +} + +pub struct Tables<'tcx> { + /// Resolved definitions for `::X` associated paths. + pub type_relative_path_defs: NodeMap, + + /// Stores the types for various nodes in the AST. Note that this table + /// is not guaranteed to be populated until after typeck. See + /// typeck::check::fn_ctxt for details. + pub node_types: NodeMap>, + + /// Stores the type parameters which were substituted to obtain the type + /// of this node. This only applies to nodes that refer to entities + /// parameterized by type parameters, such as generic fns, types, or + /// other items. + pub item_substs: NodeMap>, + + pub adjustments: NodeMap>, + + pub method_map: ty::MethodMap<'tcx>, + + /// Borrows + pub upvar_capture_map: ty::UpvarCaptureMap<'tcx>, + + /// Records the type of each closure. The def ID is the ID of the + /// expression defining the closure. + pub closure_tys: DefIdMap>, + + /// Records the type of each closure. The def ID is the ID of the + /// expression defining the closure. + pub closure_kinds: DefIdMap, + + /// For each fn, records the "liberated" types of its arguments + /// and return type. Liberated means that all bound regions + /// (including late-bound regions) are replaced with free + /// equivalents. This table is not used in trans (since regions + /// are erased there) and hence is not serialized to metadata. + pub liberated_fn_sigs: NodeMap>, + + /// For each FRU expression, record the normalized types of the fields + /// of the struct - this is needed because it is non-trivial to + /// normalize while preserving regions. This table is used only in + /// MIR construction and hence is not serialized to metadata. + pub fru_field_types: NodeMap>> +} + +impl<'a, 'gcx, 'tcx> Tables<'tcx> { + pub fn empty() -> Tables<'tcx> { + Tables { + type_relative_path_defs: NodeMap(), + node_types: FxHashMap(), + item_substs: NodeMap(), + adjustments: NodeMap(), + method_map: FxHashMap(), + upvar_capture_map: FxHashMap(), + closure_tys: DefIdMap(), + closure_kinds: DefIdMap(), + liberated_fn_sigs: NodeMap(), + fru_field_types: NodeMap() + } + } + + /// Returns the final resolution of a `QPath` in an `Expr` or `Pat` node. + pub fn qpath_def(&self, qpath: &hir::QPath, id: NodeId) -> Def { + match *qpath { + hir::QPath::Resolved(_, ref path) => path.def, + hir::QPath::TypeRelative(..) => { + self.type_relative_path_defs.get(&id).cloned().unwrap_or(Def::Err) + } + } + } + + pub fn node_id_to_type(&self, id: NodeId) -> Ty<'tcx> { + match self.node_id_to_type_opt(id) { + Some(ty) => ty, + None => { + bug!("node_id_to_type: no type for node `{}`", + tls::with(|tcx| tcx.map.node_to_string(id))) + } + } + } + + pub fn node_id_to_type_opt(&self, id: NodeId) -> Option> { + self.node_types.get(&id).cloned() + } + + pub fn node_id_item_substs(&self, id: NodeId) -> Option<&'tcx Substs<'tcx>> { + self.item_substs.get(&id).map(|ts| ts.substs) + } + + // Returns the type of a pattern as a monotype. Like @expr_ty, this function + // doesn't provide type parameter substitutions. + pub fn pat_ty(&self, pat: &hir::Pat) -> Ty<'tcx> { + self.node_id_to_type(pat.id) + } + + pub fn pat_ty_opt(&self, pat: &hir::Pat) -> Option> { + self.node_id_to_type_opt(pat.id) + } + + // Returns the type of an expression as a monotype. + // + // NB (1): This is the PRE-ADJUSTMENT TYPE for the expression. That is, in + // some cases, we insert `Adjustment` annotations such as auto-deref or + // auto-ref. The type returned by this function does not consider such + // adjustments. See `expr_ty_adjusted()` instead. + // + // NB (2): This type doesn't provide type parameter substitutions; e.g. if you + // ask for the type of "id" in "id(3)", it will return "fn(&isize) -> isize" + // instead of "fn(ty) -> T with T = isize". + pub fn expr_ty(&self, expr: &hir::Expr) -> Ty<'tcx> { + self.node_id_to_type(expr.id) + } + + pub fn expr_ty_opt(&self, expr: &hir::Expr) -> Option> { + self.node_id_to_type_opt(expr.id) + } + + /// Returns the type of `expr`, considering any `Adjustment` + /// entry recorded for that expression. + pub fn expr_ty_adjusted(&self, expr: &hir::Expr) -> Ty<'tcx> { + self.adjustments.get(&expr.id) + .map_or_else(|| self.expr_ty(expr), |adj| adj.target) + } + + pub fn expr_ty_adjusted_opt(&self, expr: &hir::Expr) -> Option> { + self.adjustments.get(&expr.id) + .map(|adj| adj.target).or_else(|| self.expr_ty_opt(expr)) + } + + pub fn is_method_call(&self, expr_id: NodeId) -> bool { + self.method_map.contains_key(&ty::MethodCall::expr(expr_id)) + } + + pub fn is_overloaded_autoderef(&self, expr_id: NodeId, autoderefs: u32) -> bool { + self.method_map.contains_key(&ty::MethodCall::autoderef(expr_id, autoderefs)) + } + + pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> Option> { + Some(self.upvar_capture_map.get(&upvar_id).unwrap().clone()) + } +} + +impl<'tcx> CommonTypes<'tcx> { + fn new(interners: &CtxtInterners<'tcx>) -> CommonTypes<'tcx> { + let mk = |sty| interners.intern_ty(sty, None); + CommonTypes { + bool: mk(TyBool), + char: mk(TyChar), + never: mk(TyNever), + err: mk(TyError), + isize: mk(TyInt(ast::IntTy::Is)), + i8: mk(TyInt(ast::IntTy::I8)), + i16: mk(TyInt(ast::IntTy::I16)), + i32: mk(TyInt(ast::IntTy::I32)), + i64: mk(TyInt(ast::IntTy::I64)), + usize: mk(TyUint(ast::UintTy::Us)), + u8: mk(TyUint(ast::UintTy::U8)), + u16: mk(TyUint(ast::UintTy::U16)), + u32: mk(TyUint(ast::UintTy::U32)), + u64: mk(TyUint(ast::UintTy::U64)), + f32: mk(TyFloat(ast::FloatTy::F32)), + f64: mk(TyFloat(ast::FloatTy::F64)), + } + } +} + +/// The data structure to keep track of all the information that typechecker +/// generates so that so that it can be reused and doesn't have to be redone +/// later on. +#[derive(Copy, Clone)] +pub struct TyCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + gcx: &'a GlobalCtxt<'gcx>, + interners: &'a CtxtInterners<'tcx> +} + +impl<'a, 'gcx, 'tcx> Deref for TyCtxt<'a, 'gcx, 'tcx> { + type Target = &'a GlobalCtxt<'gcx>; + fn deref(&self) -> &Self::Target { + &self.gcx + } +} + +pub struct GlobalCtxt<'tcx> { + global_interners: CtxtInterners<'tcx>, + + pub specializes_cache: RefCell, + + pub dep_graph: DepGraph, + + /// Common types, pre-interned for your convenience. + pub types: CommonTypes<'tcx>, + + pub sess: &'tcx Session, + + /// Map indicating what traits are in scope for places where this + /// is relevant; generated by resolve. + pub trait_map: TraitMap, + + pub named_region_map: resolve_lifetime::NamedRegionMap, + + pub region_maps: RegionMaps, + + // For each fn declared in the local crate, type check stores the + // free-region relationships that were deduced from its where + // clauses and parameter types. These are then read-again by + // borrowck. (They are not used during trans, and hence are not + // serialized or needed for cross-crate fns.) + free_region_maps: RefCell>, + // FIXME: jroesch make this a refcell + + pub tables: RefCell>, + + /// Maps from a trait item to the trait item "descriptor" + pub associated_items: RefCell>>, + + /// Maps from an impl/trait def-id to a list of the def-ids of its items + pub associated_item_def_ids: RefCell>>, + + pub impl_trait_refs: RefCell>>, + pub trait_defs: RefCell>>, + pub adt_defs: RefCell>>, + pub adt_sized_constraint: RefCell>>, + + /// Maps from the def-id of an item (trait/struct/enum/fn) to its + /// associated generics and predicates. + pub generics: RefCell>>, + pub predicates: RefCell>>, + + /// Maps from the def-id of a trait to the list of + /// super-predicates. This is a subset of the full list of + /// predicates. We store these in a separate map because we must + /// evaluate them even during type conversion, often before the + /// full predicates are available (note that supertraits have + /// additional acyclicity requirements). + pub super_predicates: RefCell>>, + + pub map: ast_map::Map<'tcx>, + + /// Maps from the def-id of a function/method or const/static + /// to its MIR. Mutation is done at an item granularity to + /// allow MIR optimization passes to function and still + /// access cross-crate MIR (e.g. inlining or const eval). + /// + /// Note that cross-crate MIR appears to be always borrowed + /// (in the `RefCell` sense) to prevent accidental mutation. + pub mir_map: RefCell>>, + + // Records the free variables refrenced by every closure + // expression. Do not track deps for this, just recompute it from + // scratch every time. + pub freevars: RefCell, + + pub maybe_unused_trait_imports: NodeSet, + + // Records the type of every item. + pub item_types: RefCell>>, + + // Internal cache for metadata decoding. No need to track deps on this. + pub rcache: RefCell>>, + + // Cache for the type-contents routine. FIXME -- track deps? + pub tc_cache: RefCell, ty::contents::TypeContents>>, + + // FIXME no dep tracking, but we should be able to remove this + pub ty_param_defs: RefCell>>, + + // FIXME dep tracking -- should be harmless enough + pub normalized_cache: RefCell, Ty<'tcx>>>, + + pub lang_items: middle::lang_items::LanguageItems, + + /// Maps from def-id of a type or region parameter to its + /// (inferred) variance. + pub item_variance_map: RefCell>>, + + /// True if the variance has been computed yet; false otherwise. + pub variance_computed: Cell, + + /// Maps a DefId of a type to a list of its inherent impls. + /// Contains implementations of methods that are inherent to a type. + /// Methods in these implementations don't need to be exported. + pub inherent_impls: RefCell>>, + + /// Set of used unsafe nodes (functions or blocks). Unsafe nodes not + /// present in this set can be warned about. + pub used_unsafe: RefCell, + + /// Set of nodes which mark locals as mutable which end up getting used at + /// some point. Local variable definitions not in this set can be warned + /// about. + pub used_mut_nodes: RefCell, + + /// Set of trait imports actually used in the method resolution. + /// This is used for warning unused imports. + pub used_trait_imports: RefCell, + + /// The set of external nominal types whose implementations have been read. + /// This is used for lazy resolution of methods. + pub populated_external_types: RefCell, + + /// The set of external primitive types whose implementations have been read. + /// FIXME(arielb1): why is this separate from populated_external_types? + pub populated_external_primitive_impls: RefCell, + + /// Cache used by const_eval when decoding external constants. + /// Contains `None` when the constant has been fetched but doesn't exist. + /// Constains `Some(expr_id, type)` otherwise. + /// `type` is `None` in case it's not a primitive type + pub extern_const_statics: RefCell>)>>>, + /// Cache used by const_eval when decoding extern const fns + pub extern_const_fns: RefCell>, + + /// Maps any item's def-id to its stability index. + pub stability: RefCell>, + + /// Caches the results of trait selection. This cache is used + /// for things that do not have to do with the parameters in scope. + pub selection_cache: traits::SelectionCache<'tcx>, + + /// Caches the results of trait evaluation. This cache is used + /// for things that do not have to do with the parameters in scope. + /// Merge this with `selection_cache`? + pub evaluation_cache: traits::EvaluationCache<'tcx>, + + /// A set of predicates that have been fulfilled *somewhere*. + /// This is used to avoid duplicate work. Predicates are only + /// added to this set when they mention only "global" names + /// (i.e., no type or lifetime parameters). + pub fulfilled_predicates: RefCell>, + + /// Caches the representation hints for struct definitions. + repr_hint_cache: RefCell>>, + + /// Maps Expr NodeId's to their constant qualification. + pub const_qualif_map: RefCell>, + + /// Caches CoerceUnsized kinds for impls on custom types. + pub custom_coerce_unsized_kinds: RefCell>, + + /// Maps a cast expression to its kind. This is keyed on the + /// *from* expression of the cast, not the cast itself. + pub cast_kinds: RefCell>, + + /// Maps Fn items to a collection of fragment infos. + /// + /// The main goal is to identify data (each of which may be moved + /// or assigned) whose subparts are not moved nor assigned + /// (i.e. their state is *unfragmented*) and corresponding ast + /// nodes where the path to that data is moved or assigned. + /// + /// In the long term, unfragmented values will have their + /// destructor entirely driven by a single stack-local drop-flag, + /// and their parents, the collections of the unfragmented values + /// (or more simply, "fragmented values"), are mapped to the + /// corresponding collections of stack-local drop-flags. + /// + /// (However, in the short term that is not the case; e.g. some + /// unfragmented paths still need to be zeroed, namely when they + /// reference parent data from an outer scope that was not + /// entirely moved, and therefore that needs to be zeroed so that + /// we do not get double-drop when we hit the end of the parent + /// scope.) + /// + /// Also: currently the table solely holds keys for node-ids of + /// unfragmented values (see `FragmentInfo` enum definition), but + /// longer-term we will need to also store mappings from + /// fragmented data to the set of unfragmented pieces that + /// constitute it. + pub fragment_infos: RefCell>>, + + /// The definite name of the current crate after taking into account + /// attributes, commandline parameters, etc. + pub crate_name: Symbol, + + /// Data layout specification for the current target. + pub data_layout: TargetDataLayout, + + /// Cache for layouts computed from types. + pub layout_cache: RefCell, &'tcx Layout>>, + + /// Used to prevent layout from recursing too deeply. + pub layout_depth: Cell, + + /// Map from function to the `#[derive]` mode that it's defining. Only used + /// by `proc-macro` crates. + pub derive_macros: RefCell>, +} + +impl<'tcx> GlobalCtxt<'tcx> { + /// Get the global TyCtxt. + pub fn global_tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> { + TyCtxt { + gcx: self, + interners: &self.global_interners + } + } +} + +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + pub fn crate_name(self, cnum: CrateNum) -> Symbol { + if cnum == LOCAL_CRATE { + self.crate_name + } else { + self.sess.cstore.crate_name(cnum) + } + } + + pub fn original_crate_name(self, cnum: CrateNum) -> Symbol { + if cnum == LOCAL_CRATE { + self.crate_name.clone() + } else { + self.sess.cstore.original_crate_name(cnum) + } + } + + pub fn crate_disambiguator(self, cnum: CrateNum) -> Symbol { + if cnum == LOCAL_CRATE { + self.sess.local_crate_disambiguator() + } else { + self.sess.cstore.crate_disambiguator(cnum) + } + } + + /// Given a def-key `key` and a crate `krate`, finds the def-index + /// that `krate` assigned to `key`. This `DefIndex` will always be + /// relative to `krate`. + /// + /// Returns `None` if there is no `DefIndex` with that key. + pub fn def_index_for_def_key(self, krate: CrateNum, key: DefKey) + -> Option { + if krate == LOCAL_CRATE { + self.map.def_index_for_def_key(key) + } else { + self.sess.cstore.def_index_for_def_key(krate, key) + } + } + + pub fn retrace_path(self, + krate: CrateNum, + path_data: &[DisambiguatedDefPathData]) + -> Option { + debug!("retrace_path(path={:?}, krate={:?})", path_data, self.crate_name(krate)); + + let root_key = DefKey { + parent: None, + disambiguated_data: DisambiguatedDefPathData { + data: DefPathData::CrateRoot, + disambiguator: 0, + }, + }; + + let root_index = self.def_index_for_def_key(krate, root_key) + .expect("no root key?"); + + debug!("retrace_path: root_index={:?}", root_index); + + let mut index = root_index; + for data in path_data { + let key = DefKey { parent: Some(index), disambiguated_data: data.clone() }; + debug!("retrace_path: key={:?}", key); + match self.def_index_for_def_key(krate, key) { + Some(i) => index = i, + None => return None, + } + } + + Some(DefId { krate: krate, index: index }) + } + + pub fn type_parameter_def(self, + node_id: NodeId) + -> ty::TypeParameterDef<'tcx> + { + self.ty_param_defs.borrow().get(&node_id).unwrap().clone() + } + + pub fn alloc_generics(self, generics: ty::Generics<'gcx>) + -> &'gcx ty::Generics<'gcx> { + self.global_interners.arenas.generics.alloc(generics) + } + + pub fn alloc_mir(self, mir: Mir<'gcx>) -> &'gcx RefCell> { + self.global_interners.arenas.mir.alloc(RefCell::new(mir)) + } + + pub fn alloc_trait_def(self, def: ty::TraitDef) -> &'gcx ty::TraitDef { + self.global_interners.arenas.trait_def.alloc(def) + } + + pub fn alloc_adt_def(self, + did: DefId, + kind: AdtKind, + variants: Vec) + -> &'gcx ty::AdtDef { + let def = ty::AdtDef::new(self, did, kind, variants); + self.global_interners.arenas.adt_def.alloc(def) + } + + pub fn intern_stability(self, stab: attr::Stability) -> &'gcx attr::Stability { + if let Some(st) = self.global_interners.stability.borrow().get(&stab) { + return st; + } + + let interned = self.global_interners.arenas.stability.alloc(stab); + if let Some(prev) = self.global_interners.stability + .borrow_mut() + .replace(interned) { + bug!("Tried to overwrite interned Stability: {:?}", prev) + } + interned + } + + pub fn intern_layout(self, layout: Layout) -> &'gcx Layout { + if let Some(layout) = self.global_interners.layout.borrow().get(&layout) { + return layout; + } + + let interned = self.global_interners.arenas.layout.alloc(layout); + if let Some(prev) = self.global_interners.layout + .borrow_mut() + .replace(interned) { + bug!("Tried to overwrite interned Layout: {:?}", prev) + } + interned + } + + pub fn store_free_region_map(self, id: NodeId, map: FreeRegionMap) { + if self.free_region_maps.borrow_mut().insert(id, map).is_some() { + bug!("Tried to overwrite interned FreeRegionMap for NodeId {:?}", id) + } + } + + pub fn free_region_map(self, id: NodeId) -> FreeRegionMap { + self.free_region_maps.borrow()[&id].clone() + } + + pub fn lift>(self, value: &T) -> Option { + value.lift_to_tcx(self) + } + + /// Like lift, but only tries in the global tcx. + pub fn lift_to_global>(self, value: &T) -> Option { + value.lift_to_tcx(self.global_tcx()) + } + + /// Returns true if self is the same as self.global_tcx(). + fn is_global(self) -> bool { + let local = self.interners as *const _; + let global = &self.global_interners as *const _; + local as usize == global as usize + } + + /// Create a type context and call the closure with a `TyCtxt` reference + /// to the context. The closure enforces that the type context and any interned + /// value (types, substs, etc.) can only be used while `ty::tls` has a valid + /// reference to the context, to allow formatting values that need it. + pub fn create_and_enter(s: &'tcx Session, + arenas: &'tcx CtxtArenas<'tcx>, + trait_map: TraitMap, + named_region_map: resolve_lifetime::NamedRegionMap, + map: ast_map::Map<'tcx>, + freevars: FreevarMap, + maybe_unused_trait_imports: NodeSet, + region_maps: RegionMaps, + lang_items: middle::lang_items::LanguageItems, + stability: stability::Index<'tcx>, + crate_name: &str, + f: F) -> R + where F: for<'b> FnOnce(TyCtxt<'b, 'tcx, 'tcx>) -> R + { + let data_layout = TargetDataLayout::parse(s); + let interners = CtxtInterners::new(arenas); + let common_types = CommonTypes::new(&interners); + let dep_graph = map.dep_graph.clone(); + let fulfilled_predicates = traits::GlobalFulfilledPredicates::new(dep_graph.clone()); + tls::enter_global(GlobalCtxt { + specializes_cache: RefCell::new(traits::SpecializesCache::new()), + global_interners: interners, + dep_graph: dep_graph.clone(), + types: common_types, + named_region_map: named_region_map, + region_maps: region_maps, + free_region_maps: RefCell::new(FxHashMap()), + item_variance_map: RefCell::new(DepTrackingMap::new(dep_graph.clone())), + variance_computed: Cell::new(false), + sess: s, + trait_map: trait_map, + tables: RefCell::new(Tables::empty()), + impl_trait_refs: RefCell::new(DepTrackingMap::new(dep_graph.clone())), + trait_defs: RefCell::new(DepTrackingMap::new(dep_graph.clone())), + adt_defs: RefCell::new(DepTrackingMap::new(dep_graph.clone())), + adt_sized_constraint: RefCell::new(DepTrackingMap::new(dep_graph.clone())), + generics: RefCell::new(DepTrackingMap::new(dep_graph.clone())), + predicates: RefCell::new(DepTrackingMap::new(dep_graph.clone())), + super_predicates: RefCell::new(DepTrackingMap::new(dep_graph.clone())), + fulfilled_predicates: RefCell::new(fulfilled_predicates), + map: map, + mir_map: RefCell::new(DepTrackingMap::new(dep_graph.clone())), + freevars: RefCell::new(freevars), + maybe_unused_trait_imports: maybe_unused_trait_imports, + item_types: RefCell::new(DepTrackingMap::new(dep_graph.clone())), + rcache: RefCell::new(FxHashMap()), + tc_cache: RefCell::new(FxHashMap()), + associated_items: RefCell::new(DepTrackingMap::new(dep_graph.clone())), + associated_item_def_ids: RefCell::new(DepTrackingMap::new(dep_graph.clone())), + ty_param_defs: RefCell::new(NodeMap()), + normalized_cache: RefCell::new(FxHashMap()), + lang_items: lang_items, + inherent_impls: RefCell::new(DepTrackingMap::new(dep_graph.clone())), + used_unsafe: RefCell::new(NodeSet()), + used_mut_nodes: RefCell::new(NodeSet()), + used_trait_imports: RefCell::new(NodeSet()), + populated_external_types: RefCell::new(DefIdSet()), + populated_external_primitive_impls: RefCell::new(DefIdSet()), + extern_const_statics: RefCell::new(DefIdMap()), + extern_const_fns: RefCell::new(DefIdMap()), + stability: RefCell::new(stability), + selection_cache: traits::SelectionCache::new(), + evaluation_cache: traits::EvaluationCache::new(), + repr_hint_cache: RefCell::new(DepTrackingMap::new(dep_graph.clone())), + const_qualif_map: RefCell::new(NodeMap()), + custom_coerce_unsized_kinds: RefCell::new(DefIdMap()), + cast_kinds: RefCell::new(NodeMap()), + fragment_infos: RefCell::new(DefIdMap()), + crate_name: Symbol::intern(crate_name), + data_layout: data_layout, + layout_cache: RefCell::new(FxHashMap()), + layout_depth: Cell::new(0), + derive_macros: RefCell::new(NodeMap()), + }, f) + } +} + +impl<'gcx: 'tcx, 'tcx> GlobalCtxt<'gcx> { + /// Call the closure with a local `TyCtxt` using the given arenas. + pub fn enter_local(&self, arenas: &'tcx CtxtArenas<'tcx>, f: F) -> R + where F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R + { + let interners = CtxtInterners::new(arenas); + tls::enter(self, &interners, f) + } +} + +/// A trait implemented for all X<'a> types which can be safely and +/// efficiently converted to X<'tcx> as long as they are part of the +/// provided TyCtxt<'tcx>. +/// This can be done, for example, for Ty<'tcx> or &'tcx Substs<'tcx> +/// by looking them up in their respective interners. +/// +/// However, this is still not the best implementation as it does +/// need to compare the components, even for interned values. +/// It would be more efficient if TypedArena provided a way to +/// determine whether the address is in the allocated range. +/// +/// None is returned if the value or one of the components is not part +/// of the provided context. +/// For Ty, None can be returned if either the type interner doesn't +/// contain the TypeVariants key or if the address of the interned +/// pointer differs. The latter case is possible if a primitive type, +/// e.g. `()` or `u8`, was interned in a different context. +pub trait Lift<'tcx> { + type Lifted; + fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option; +} + +impl<'a, 'tcx> Lift<'tcx> for Ty<'a> { + type Lifted = Ty<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option> { + if let Some(&Interned(ty)) = tcx.interners.type_.borrow().get(&self.sty) { + if *self as *const _ == ty as *const _ { + return Some(ty); + } + } + // Also try in the global tcx if we're not that. + if !tcx.is_global() { + self.lift_to_tcx(tcx.global_tcx()) + } else { + None + } + } +} + +impl<'a, 'tcx> Lift<'tcx> for &'a Substs<'a> { + type Lifted = &'tcx Substs<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<&'tcx Substs<'tcx>> { + if self.len() == 0 { + return Some(Slice::empty()); + } + if let Some(&Interned(substs)) = tcx.interners.substs.borrow().get(&self[..]) { + if *self as *const _ == substs as *const _ { + return Some(substs); + } + } + // Also try in the global tcx if we're not that. + if !tcx.is_global() { + self.lift_to_tcx(tcx.global_tcx()) + } else { + None + } + } +} + +impl<'a, 'tcx> Lift<'tcx> for &'a Region { + type Lifted = &'tcx Region; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<&'tcx Region> { + if let Some(&Interned(region)) = tcx.interners.region.borrow().get(*self) { + if *self as *const _ == region as *const _ { + return Some(region); + } + } + // Also try in the global tcx if we're not that. + if !tcx.is_global() { + self.lift_to_tcx(tcx.global_tcx()) + } else { + None + } + } +} + +impl<'a, 'tcx> Lift<'tcx> for &'a Slice> { + type Lifted = &'tcx Slice>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) + -> Option<&'tcx Slice>> { + if self.len() == 0 { + return Some(Slice::empty()); + } + if let Some(&Interned(list)) = tcx.interners.type_list.borrow().get(&self[..]) { + if *self as *const _ == list as *const _ { + return Some(list); + } + } + // Also try in the global tcx if we're not that. + if !tcx.is_global() { + self.lift_to_tcx(tcx.global_tcx()) + } else { + None + } + } +} + +impl<'a, 'tcx> Lift<'tcx> for &'a Slice> { + type Lifted = &'tcx Slice>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) + -> Option<&'tcx Slice>> { + if self.is_empty() { + return Some(Slice::empty()); + } + if let Some(&Interned(eps)) = tcx.interners.existential_predicates.borrow().get(&self[..]) { + if *self as *const _ == eps as *const _ { + return Some(eps); + } + } + // Also try in the global tcx if we're not that. + if !tcx.is_global() { + self.lift_to_tcx(tcx.global_tcx()) + } else { + None + } + } +} + +impl<'a, 'tcx> Lift<'tcx> for &'a BareFnTy<'a> { + type Lifted = &'tcx BareFnTy<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) + -> Option<&'tcx BareFnTy<'tcx>> { + if let Some(&Interned(fty)) = tcx.interners.bare_fn.borrow().get(*self) { + if *self as *const _ == fty as *const _ { + return Some(fty); + } + } + // Also try in the global tcx if we're not that. + if !tcx.is_global() { + self.lift_to_tcx(tcx.global_tcx()) + } else { + None + } + } +} + + +pub mod tls { + use super::{CtxtInterners, GlobalCtxt, TyCtxt}; + + use std::cell::Cell; + use std::fmt; + use syntax_pos; + + /// Marker types used for the scoped TLS slot. + /// The type context cannot be used directly because the scoped TLS + /// in libstd doesn't allow types generic over lifetimes. + enum ThreadLocalGlobalCtxt {} + enum ThreadLocalInterners {} + + thread_local! { + static TLS_TCX: Cell> = Cell::new(None) + } + + fn span_debug(span: syntax_pos::Span, f: &mut fmt::Formatter) -> fmt::Result { + with(|tcx| { + write!(f, "{}", tcx.sess.codemap().span_to_string(span)) + }) + } + + pub fn enter_global<'gcx, F, R>(gcx: GlobalCtxt<'gcx>, f: F) -> R + where F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'gcx>) -> R + { + syntax_pos::SPAN_DEBUG.with(|span_dbg| { + let original_span_debug = span_dbg.get(); + span_dbg.set(span_debug); + let result = enter(&gcx, &gcx.global_interners, f); + span_dbg.set(original_span_debug); + result + }) + } + + pub fn enter<'a, 'gcx: 'tcx, 'tcx, F, R>(gcx: &'a GlobalCtxt<'gcx>, + interners: &'a CtxtInterners<'tcx>, + f: F) -> R + where F: FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R + { + let gcx_ptr = gcx as *const _ as *const ThreadLocalGlobalCtxt; + let interners_ptr = interners as *const _ as *const ThreadLocalInterners; + TLS_TCX.with(|tls| { + let prev = tls.get(); + tls.set(Some((gcx_ptr, interners_ptr))); + let ret = f(TyCtxt { + gcx: gcx, + interners: interners + }); + tls.set(prev); + ret + }) + } + + pub fn with(f: F) -> R + where F: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R + { + TLS_TCX.with(|tcx| { + let (gcx, interners) = tcx.get().unwrap(); + let gcx = unsafe { &*(gcx as *const GlobalCtxt) }; + let interners = unsafe { &*(interners as *const CtxtInterners) }; + f(TyCtxt { + gcx: gcx, + interners: interners + }) + }) + } + + pub fn with_opt(f: F) -> R + where F: for<'a, 'gcx, 'tcx> FnOnce(Option>) -> R + { + if TLS_TCX.with(|tcx| tcx.get().is_some()) { + with(|v| f(Some(v))) + } else { + f(None) + } + } +} + +macro_rules! sty_debug_print { + ($ctxt: expr, $($variant: ident),*) => {{ + // curious inner module to allow variant names to be used as + // variable names. + #[allow(non_snake_case)] + mod inner { + use ty::{self, TyCtxt}; + use ty::context::Interned; + + #[derive(Copy, Clone)] + struct DebugStat { + total: usize, + region_infer: usize, + ty_infer: usize, + both_infer: usize, + } + + pub fn go(tcx: TyCtxt) { + let mut total = DebugStat { + total: 0, + region_infer: 0, ty_infer: 0, both_infer: 0, + }; + $(let mut $variant = total;)* + + + for &Interned(t) in tcx.interners.type_.borrow().iter() { + let variant = match t.sty { + ty::TyBool | ty::TyChar | ty::TyInt(..) | ty::TyUint(..) | + ty::TyFloat(..) | ty::TyStr | ty::TyNever => continue, + ty::TyError => /* unimportant */ continue, + $(ty::$variant(..) => &mut $variant,)* + }; + let region = t.flags.get().intersects(ty::TypeFlags::HAS_RE_INFER); + let ty = t.flags.get().intersects(ty::TypeFlags::HAS_TY_INFER); + + variant.total += 1; + total.total += 1; + if region { total.region_infer += 1; variant.region_infer += 1 } + if ty { total.ty_infer += 1; variant.ty_infer += 1 } + if region && ty { total.both_infer += 1; variant.both_infer += 1 } + } + println!("Ty interner total ty region both"); + $(println!(" {:18}: {uses:6} {usespc:4.1}%, \ +{ty:4.1}% {region:5.1}% {both:4.1}%", + stringify!($variant), + uses = $variant.total, + usespc = $variant.total as f64 * 100.0 / total.total as f64, + ty = $variant.ty_infer as f64 * 100.0 / total.total as f64, + region = $variant.region_infer as f64 * 100.0 / total.total as f64, + both = $variant.both_infer as f64 * 100.0 / total.total as f64); + )* + println!(" total {uses:6} \ +{ty:4.1}% {region:5.1}% {both:4.1}%", + uses = total.total, + ty = total.ty_infer as f64 * 100.0 / total.total as f64, + region = total.region_infer as f64 * 100.0 / total.total as f64, + both = total.both_infer as f64 * 100.0 / total.total as f64) + } + } + + inner::go($ctxt) + }} +} + +impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { + pub fn print_debug_stats(self) { + sty_debug_print!( + self, + TyAdt, TyBox, TyArray, TySlice, TyRawPtr, TyRef, TyFnDef, TyFnPtr, + TyDynamic, TyClosure, TyTuple, TyParam, TyInfer, TyProjection, TyAnon); + + println!("Substs interner: #{}", self.interners.substs.borrow().len()); + println!("BareFnTy interner: #{}", self.interners.bare_fn.borrow().len()); + println!("Region interner: #{}", self.interners.region.borrow().len()); + println!("Stability interner: #{}", self.interners.stability.borrow().len()); + println!("Layout interner: #{}", self.interners.layout.borrow().len()); + } +} + + +/// An entry in an interner. +struct Interned<'tcx, T: 'tcx+?Sized>(&'tcx T); + +// NB: An Interned compares and hashes as a sty. +impl<'tcx> PartialEq for Interned<'tcx, TyS<'tcx>> { + fn eq(&self, other: &Interned<'tcx, TyS<'tcx>>) -> bool { + self.0.sty == other.0.sty + } +} + +impl<'tcx> Eq for Interned<'tcx, TyS<'tcx>> {} + +impl<'tcx> Hash for Interned<'tcx, TyS<'tcx>> { + fn hash(&self, s: &mut H) { + self.0.sty.hash(s) + } +} + +impl<'tcx: 'lcx, 'lcx> Borrow> for Interned<'tcx, TyS<'tcx>> { + fn borrow<'a>(&'a self) -> &'a TypeVariants<'lcx> { + &self.0.sty + } +} + +// NB: An Interned> compares and hashes as its elements. +impl<'tcx, T: PartialEq> PartialEq for Interned<'tcx, Slice> { + fn eq(&self, other: &Interned<'tcx, Slice>) -> bool { + self.0[..] == other.0[..] + } +} + +impl<'tcx, T: Eq> Eq for Interned<'tcx, Slice> {} + +impl<'tcx, T: Hash> Hash for Interned<'tcx, Slice> { + fn hash(&self, s: &mut H) { + self.0[..].hash(s) + } +} + +impl<'tcx: 'lcx, 'lcx> Borrow<[Ty<'lcx>]> for Interned<'tcx, Slice>> { + fn borrow<'a>(&'a self) -> &'a [Ty<'lcx>] { + &self.0[..] + } +} + +impl<'tcx: 'lcx, 'lcx> Borrow<[Kind<'lcx>]> for Interned<'tcx, Substs<'tcx>> { + fn borrow<'a>(&'a self) -> &'a [Kind<'lcx>] { + &self.0[..] + } +} + +impl<'tcx: 'lcx, 'lcx> Borrow> for Interned<'tcx, BareFnTy<'tcx>> { + fn borrow<'a>(&'a self) -> &'a BareFnTy<'lcx> { + self.0 + } +} + +impl<'tcx> Borrow for Interned<'tcx, Region> { + fn borrow<'a>(&'a self) -> &'a Region { + self.0 + } +} + +impl<'tcx: 'lcx, 'lcx> Borrow<[ExistentialPredicate<'lcx>]> + for Interned<'tcx, Slice>> { + fn borrow<'a>(&'a self) -> &'a [ExistentialPredicate<'lcx>] { + &self.0[..] + } +} + +macro_rules! intern_method { + ($lt_tcx:tt, $name:ident: $method:ident($alloc:ty, + $alloc_method:ident, + $alloc_to_key:expr, + $alloc_to_ret:expr, + $needs_infer:expr) -> $ty:ty) => { + impl<'a, 'gcx, $lt_tcx> TyCtxt<'a, 'gcx, $lt_tcx> { + pub fn $method(self, v: $alloc) -> &$lt_tcx $ty { + { + let key = ($alloc_to_key)(&v); + if let Some(i) = self.interners.$name.borrow().get(key) { + return i.0; + } + if !self.is_global() { + if let Some(i) = self.global_interners.$name.borrow().get(key) { + return i.0; + } + } + } + + // HACK(eddyb) Depend on flags being accurate to + // determine that all contents are in the global tcx. + // See comments on Lift for why we can't use that. + if !($needs_infer)(&v) { + if !self.is_global() { + let v = unsafe { + mem::transmute(v) + }; + let i = ($alloc_to_ret)(self.global_interners.arenas.$name + .$alloc_method(v)); + self.global_interners.$name.borrow_mut().insert(Interned(i)); + return i; + } + } else { + // Make sure we don't end up with inference + // types/regions in the global tcx. + if self.is_global() { + bug!("Attempted to intern `{:?}` which contains \ + inference types/regions in the global type context", + v); + } + } + + let i = ($alloc_to_ret)(self.interners.arenas.$name.$alloc_method(v)); + self.interners.$name.borrow_mut().insert(Interned(i)); + i + } + } + } +} + +macro_rules! direct_interners { + ($lt_tcx:tt, $($name:ident: $method:ident($needs_infer:expr) -> $ty:ty),+) => { + $(impl<$lt_tcx> PartialEq for Interned<$lt_tcx, $ty> { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + + impl<$lt_tcx> Eq for Interned<$lt_tcx, $ty> {} + + impl<$lt_tcx> Hash for Interned<$lt_tcx, $ty> { + fn hash(&self, s: &mut H) { + self.0.hash(s) + } + } + + intern_method!($lt_tcx, $name: $method($ty, alloc, |x| x, |x| x, $needs_infer) -> $ty);)+ + } +} + +fn keep_local<'tcx, T: ty::TypeFoldable<'tcx>>(x: &T) -> bool { + x.has_type_flags(ty::TypeFlags::KEEP_IN_LOCAL_TCX) +} + +direct_interners!('tcx, + bare_fn: mk_bare_fn(|fty: &BareFnTy| { + keep_local(&fty.sig) + }) -> BareFnTy<'tcx>, + region: mk_region(|r| { + match r { + &ty::ReVar(_) | &ty::ReSkolemized(..) => true, + _ => false + } + }) -> Region +); + +macro_rules! slice_interners { + ($($field:ident: $method:ident($ty:ident)),+) => ( + $(intern_method!('tcx, $field: $method(&[$ty<'tcx>], alloc_slice, Deref::deref, + |xs: &[$ty]| -> &Slice<$ty> { + unsafe { mem::transmute(xs) } + }, |xs: &[$ty]| xs.iter().any(keep_local)) -> Slice<$ty<'tcx>>);)+ + ) +} + +slice_interners!( + existential_predicates: _intern_existential_predicates(ExistentialPredicate), + type_list: _intern_type_list(Ty), + substs: _intern_substs(Kind) +); + +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + /// Create an unsafe fn ty based on a safe fn ty. + pub fn safe_to_unsafe_fn_ty(self, bare_fn: &BareFnTy<'tcx>) -> Ty<'tcx> { + assert_eq!(bare_fn.unsafety, hir::Unsafety::Normal); + self.mk_fn_ptr(self.mk_bare_fn(ty::BareFnTy { + unsafety: hir::Unsafety::Unsafe, + abi: bare_fn.abi, + sig: bare_fn.sig.clone() + })) + } + + // Interns a type/name combination, stores the resulting box in cx.interners, + // and returns the box as cast to an unsafe ptr (see comments for Ty above). + pub fn mk_ty(self, st: TypeVariants<'tcx>) -> Ty<'tcx> { + let global_interners = if !self.is_global() { + Some(&self.global_interners) + } else { + None + }; + self.interners.intern_ty(st, global_interners) + } + + pub fn mk_mach_int(self, tm: ast::IntTy) -> Ty<'tcx> { + match tm { + ast::IntTy::Is => self.types.isize, + ast::IntTy::I8 => self.types.i8, + ast::IntTy::I16 => self.types.i16, + ast::IntTy::I32 => self.types.i32, + ast::IntTy::I64 => self.types.i64, + } + } + + pub fn mk_mach_uint(self, tm: ast::UintTy) -> Ty<'tcx> { + match tm { + ast::UintTy::Us => self.types.usize, + ast::UintTy::U8 => self.types.u8, + ast::UintTy::U16 => self.types.u16, + ast::UintTy::U32 => self.types.u32, + ast::UintTy::U64 => self.types.u64, + } + } + + pub fn mk_mach_float(self, tm: ast::FloatTy) -> Ty<'tcx> { + match tm { + ast::FloatTy::F32 => self.types.f32, + ast::FloatTy::F64 => self.types.f64, + } + } + + pub fn mk_str(self) -> Ty<'tcx> { + self.mk_ty(TyStr) + } + + pub fn mk_static_str(self) -> Ty<'tcx> { + self.mk_imm_ref(self.mk_region(ty::ReStatic), self.mk_str()) + } + + pub fn mk_adt(self, def: &'tcx AdtDef, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { + // take a copy of substs so that we own the vectors inside + self.mk_ty(TyAdt(def, substs)) + } + + pub fn mk_box(self, ty: Ty<'tcx>) -> Ty<'tcx> { + self.mk_ty(TyBox(ty)) + } + + pub fn mk_ptr(self, tm: TypeAndMut<'tcx>) -> Ty<'tcx> { + self.mk_ty(TyRawPtr(tm)) + } + + pub fn mk_ref(self, r: &'tcx Region, tm: TypeAndMut<'tcx>) -> Ty<'tcx> { + self.mk_ty(TyRef(r, tm)) + } + + pub fn mk_mut_ref(self, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> { + self.mk_ref(r, TypeAndMut {ty: ty, mutbl: hir::MutMutable}) + } + + pub fn mk_imm_ref(self, r: &'tcx Region, ty: Ty<'tcx>) -> Ty<'tcx> { + self.mk_ref(r, TypeAndMut {ty: ty, mutbl: hir::MutImmutable}) + } + + pub fn mk_mut_ptr(self, ty: Ty<'tcx>) -> Ty<'tcx> { + self.mk_ptr(TypeAndMut {ty: ty, mutbl: hir::MutMutable}) + } + + pub fn mk_imm_ptr(self, ty: Ty<'tcx>) -> Ty<'tcx> { + self.mk_ptr(TypeAndMut {ty: ty, mutbl: hir::MutImmutable}) + } + + pub fn mk_nil_ptr(self) -> Ty<'tcx> { + self.mk_imm_ptr(self.mk_nil()) + } + + pub fn mk_array(self, ty: Ty<'tcx>, n: usize) -> Ty<'tcx> { + self.mk_ty(TyArray(ty, n)) + } + + pub fn mk_slice(self, ty: Ty<'tcx>) -> Ty<'tcx> { + self.mk_ty(TySlice(ty)) + } + + pub fn intern_tup(self, ts: &[Ty<'tcx>]) -> Ty<'tcx> { + self.mk_ty(TyTuple(self.intern_type_list(ts))) + } + + pub fn mk_tup], Ty<'tcx>>>(self, iter: I) -> I::Output { + iter.intern_with(|ts| self.mk_ty(TyTuple(self.intern_type_list(ts)))) + } + + pub fn mk_nil(self) -> Ty<'tcx> { + self.intern_tup(&[]) + } + + pub fn mk_diverging_default(self) -> Ty<'tcx> { + if self.sess.features.borrow().never_type { + self.types.never + } else { + self.mk_nil() + } + } + + pub fn mk_bool(self) -> Ty<'tcx> { + self.mk_ty(TyBool) + } + + pub fn mk_fn_def(self, def_id: DefId, + substs: &'tcx Substs<'tcx>, + fty: &'tcx BareFnTy<'tcx>) -> Ty<'tcx> { + self.mk_ty(TyFnDef(def_id, substs, fty)) + } + + pub fn mk_fn_ptr(self, fty: &'tcx BareFnTy<'tcx>) -> Ty<'tcx> { + self.mk_ty(TyFnPtr(fty)) + } + + pub fn mk_dynamic( + self, + obj: ty::Binder<&'tcx Slice>>, + reg: &'tcx ty::Region + ) -> Ty<'tcx> { + self.mk_ty(TyDynamic(obj, reg)) + } + + pub fn mk_projection(self, + trait_ref: TraitRef<'tcx>, + item_name: Name) + -> Ty<'tcx> { + // take a copy of substs so that we own the vectors inside + let inner = ProjectionTy { trait_ref: trait_ref, item_name: item_name }; + self.mk_ty(TyProjection(inner)) + } + + pub fn mk_closure(self, + closure_id: DefId, + substs: &'tcx Substs<'tcx>) + -> Ty<'tcx> { + self.mk_closure_from_closure_substs(closure_id, ClosureSubsts { + substs: substs + }) + } + + pub fn mk_closure_from_closure_substs(self, + closure_id: DefId, + closure_substs: ClosureSubsts<'tcx>) + -> Ty<'tcx> { + self.mk_ty(TyClosure(closure_id, closure_substs)) + } + + pub fn mk_var(self, v: TyVid) -> Ty<'tcx> { + self.mk_infer(TyVar(v)) + } + + pub fn mk_int_var(self, v: IntVid) -> Ty<'tcx> { + self.mk_infer(IntVar(v)) + } + + pub fn mk_float_var(self, v: FloatVid) -> Ty<'tcx> { + self.mk_infer(FloatVar(v)) + } + + pub fn mk_infer(self, it: InferTy) -> Ty<'tcx> { + self.mk_ty(TyInfer(it)) + } + + pub fn mk_param(self, + index: u32, + name: Name) -> Ty<'tcx> { + self.mk_ty(TyParam(ParamTy { idx: index, name: name })) + } + + pub fn mk_self_type(self) -> Ty<'tcx> { + self.mk_param(0, keywords::SelfType.name()) + } + + pub fn mk_param_from_def(self, def: &ty::TypeParameterDef) -> Ty<'tcx> { + self.mk_param(def.index, def.name) + } + + pub fn mk_anon(self, def_id: DefId, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { + self.mk_ty(TyAnon(def_id, substs)) + } + + pub fn intern_existential_predicates(self, eps: &[ExistentialPredicate<'tcx>]) + -> &'tcx Slice> { + assert!(!eps.is_empty()); + assert!(eps.windows(2).all(|w| w[0].cmp(self, &w[1]) != Ordering::Greater)); + self._intern_existential_predicates(eps) + } + + pub fn intern_type_list(self, ts: &[Ty<'tcx>]) -> &'tcx Slice> { + if ts.len() == 0 { + Slice::empty() + } else { + self._intern_type_list(ts) + } + } + + pub fn intern_substs(self, ts: &[Kind<'tcx>]) -> &'tcx Slice> { + if ts.len() == 0 { + Slice::empty() + } else { + self._intern_substs(ts) + } + } + + pub fn mk_existential_predicates], + &'tcx Slice>>>(self, iter: I) + -> I::Output { + iter.intern_with(|xs| self.intern_existential_predicates(xs)) + } + + pub fn mk_type_list], + &'tcx Slice>>>(self, iter: I) -> I::Output { + iter.intern_with(|xs| self.intern_type_list(xs)) + } + + pub fn mk_substs], + &'tcx Slice>>>(self, iter: I) -> I::Output { + iter.intern_with(|xs| self.intern_substs(xs)) + } + + pub fn mk_substs_trait(self, + s: Ty<'tcx>, + t: &[Ty<'tcx>]) + -> &'tcx Substs<'tcx> + { + self.mk_substs(iter::once(s).chain(t.into_iter().cloned()).map(Kind::from)) + } + + /// Obtain the representation annotation for a struct definition. + pub fn lookup_repr_hints(self, did: DefId) -> Rc> { + self.repr_hint_cache.memoize(did, || { + Rc::new(self.get_attrs(did).iter().flat_map(|meta| { + attr::find_repr_attrs(self.sess.diagnostic(), meta).into_iter() + }).collect()) + }) + } +} + +pub trait InternAs { + type Output; + fn intern_with(self, F) -> Self::Output + where F: FnOnce(&T) -> R; +} + +impl InternAs<[T], R> for I + where E: InternIteratorElement, + I: Iterator { + type Output = E::Output; + fn intern_with(self, f: F) -> Self::Output + where F: FnOnce(&[T]) -> R { + E::intern_with(self, f) + } +} + +pub trait InternIteratorElement: Sized { + type Output; + fn intern_with, F: FnOnce(&[T]) -> R>(iter: I, f: F) -> Self::Output; +} + +impl InternIteratorElement for T { + type Output = R; + fn intern_with, F: FnOnce(&[T]) -> R>(iter: I, f: F) -> Self::Output { + f(&iter.collect::>()) + } +} + +impl InternIteratorElement for Result { + type Output = Result; + fn intern_with, F: FnOnce(&[T]) -> R>(iter: I, f: F) -> Self::Output { + Ok(f(&iter.collect::, _>>()?)) + } +} diff --git a/src/librustc/middle/ty/error.rs b/src/librustc/ty/error.rs similarity index 76% rename from src/librustc/middle/ty/error.rs rename to src/librustc/ty/error.rs index ab48fd7fb8665..e95ce97e13577 100644 --- a/src/librustc/middle/ty/error.rs +++ b/src/librustc/ty/error.rs @@ -8,18 +8,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use middle::def_id::DefId; -use middle::subst; -use middle::infer::type_variable; -use middle::ty::{self, BoundRegion, Region, Ty}; +use hir::def_id::DefId; +use infer::type_variable; +use ty::{self, BoundRegion, Region, Ty, TyCtxt}; use std::fmt; use syntax::abi; use syntax::ast::{self, Name}; -use syntax::codemap::Span; -use syntax::errors::DiagnosticBuilder; +use errors::DiagnosticBuilder; +use syntax_pos::Span; -use rustc_front::hir; +use hir; #[derive(Clone, Copy, Debug)] pub struct ExpectedFound { @@ -34,31 +33,24 @@ pub enum TypeError<'tcx> { UnsafetyMismatch(ExpectedFound), AbiMismatch(ExpectedFound), Mutability, - BoxMutability, - PtrMutability, - RefMutability, - VecMutability, TupleSize(ExpectedFound), FixedArraySize(ExpectedFound), - TyParamSize(ExpectedFound), ArgCount, - RegionsDoesNotOutlive(Region, Region), - RegionsNotSame(Region, Region), - RegionsNoOverlap(Region, Region), - RegionsInsufficientlyPolymorphic(BoundRegion, Region), - RegionsOverlyPolymorphic(BoundRegion, Region), + RegionsDoesNotOutlive(&'tcx Region, &'tcx Region), + RegionsNotSame(&'tcx Region, &'tcx Region), + RegionsNoOverlap(&'tcx Region, &'tcx Region), + RegionsInsufficientlyPolymorphic(BoundRegion, &'tcx Region), + RegionsOverlyPolymorphic(BoundRegion, &'tcx Region), Sorts(ExpectedFound>), - IntegerAsChar, IntMismatch(ExpectedFound), FloatMismatch(ExpectedFound), Traits(ExpectedFound), - BuiltinBoundsMismatch(ExpectedFound), VariadicMismatch(ExpectedFound), CyclicTy, - ConvergenceMismatch(ExpectedFound), ProjectionNameMismatched(ExpectedFound), ProjectionBoundsLength(ExpectedFound), - TyParamDefaultMismatch(ExpectedFound>) + TyParamDefaultMismatch(ExpectedFound>), + ExistentialMismatch(ExpectedFound<&'tcx ty::Slice>>), } #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Eq, Hash, Debug, Copy)] @@ -99,19 +91,7 @@ impl<'tcx> fmt::Display for TypeError<'tcx> { values.expected, values.found) } - Mutability => write!(f, "values differ in mutability"), - BoxMutability => { - write!(f, "boxed values differ in mutability") - } - VecMutability => write!(f, "vectors differ in mutability"), - PtrMutability => write!(f, "pointers differ in mutability"), - RefMutability => write!(f, "references differ in mutability"), - TyParamSize(values) => { - write!(f, "expected a type with {} type params, \ - found one with {} type params", - values.expected, - values.found) - } + Mutability => write!(f, "types differ in mutability"), FixedArraySize(values) => { write!(f, "expected an array with a fixed size of {} elements, \ found one with {} elements", @@ -155,22 +135,6 @@ impl<'tcx> fmt::Display for TypeError<'tcx> { format!("trait `{}`", tcx.item_path_str(values.found))) }), - BuiltinBoundsMismatch(values) => { - if values.expected.is_empty() { - write!(f, "expected no bounds, found `{}`", - values.found) - } else if values.found.is_empty() { - write!(f, "expected bounds `{}`, found no bounds", - values.expected) - } else { - write!(f, "expected bounds `{}`, found bounds `{}`", - values.expected, - values.found) - } - } - IntegerAsChar => { - write!(f, "expected an integral type, found `char`") - } IntMismatch(ref values) => { write!(f, "expected `{:?}`, found `{:?}`", values.expected, @@ -186,11 +150,6 @@ impl<'tcx> fmt::Display for TypeError<'tcx> { if values.expected { "variadic" } else { "non-variadic" }, if values.found { "variadic" } else { "non-variadic" }) } - ConvergenceMismatch(ref values) => { - write!(f, "expected {} fn, found {} function", - if values.expected { "converging" } else { "diverging" }, - if values.found { "converging" } else { "diverging" }) - } ProjectionNameMismatched(ref values) => { write!(f, "expected {}, found {}", values.expected, @@ -206,30 +165,49 @@ impl<'tcx> fmt::Display for TypeError<'tcx> { values.expected.ty, values.found.ty) } + ExistentialMismatch(ref values) => { + report_maybe_different(f, format!("trait `{}`", values.expected), + format!("trait `{}`", values.found)) + } } } } -impl<'tcx> ty::TyS<'tcx> { - fn sort_string(&self, cx: &ty::ctxt) -> String { +impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> { + pub fn sort_string(&self, tcx: TyCtxt<'a, 'gcx, 'lcx>) -> String { match self.sty { ty::TyBool | ty::TyChar | ty::TyInt(_) | - ty::TyUint(_) | ty::TyFloat(_) | ty::TyStr => self.to_string(), + ty::TyUint(_) | ty::TyFloat(_) | ty::TyStr | ty::TyNever => self.to_string(), ty::TyTuple(ref tys) if tys.is_empty() => self.to_string(), - ty::TyEnum(def, _) => format!("enum `{}`", cx.item_path_str(def.did)), + ty::TyAdt(def, _) => format!("{} `{}`", def.descr(), tcx.item_path_str(def.did)), ty::TyBox(_) => "box".to_string(), ty::TyArray(_, n) => format!("array of {} elements", n), ty::TySlice(_) => "slice".to_string(), ty::TyRawPtr(_) => "*-ptr".to_string(), - ty::TyRef(_, _) => "&-ptr".to_string(), - ty::TyBareFn(Some(_), _) => format!("fn item"), - ty::TyBareFn(None, _) => "fn pointer".to_string(), - ty::TyTrait(ref inner) => { - format!("trait {}", cx.item_path_str(inner.principal_def_id())) + ty::TyRef(region, tymut) => { + let tymut_string = tymut.to_string(); + if tymut_string == "_" || //unknown type name, + tymut_string.len() > 10 || //name longer than saying "reference", + region.to_string() != "" //... or a complex type + { + match tymut { + ty::TypeAndMut{mutbl, ..} => { + format!("{}reference", match mutbl { + hir::Mutability::MutMutable => "mutable ", + _ => "" + }) + } + } + } else { + format!("&{}", tymut_string) + } } - ty::TyStruct(def, _) => { - format!("struct `{}`", cx.item_path_str(def.did)) + ty::TyFnDef(..) => format!("fn item"), + ty::TyFnPtr(_) => "fn pointer".to_string(), + ty::TyDynamic(ref inner, ..) => { + inner.principal().map_or_else(|| "trait".to_string(), + |p| format!("trait {}", tcx.item_path_str(p.def_id()))) } ty::TyClosure(..) => "closure".to_string(), ty::TyTuple(_) => "tuple".to_string(), @@ -241,19 +219,20 @@ impl<'tcx> ty::TyS<'tcx> { ty::TyInfer(ty::FreshFloatTy(_)) => "skolemized floating-point type".to_string(), ty::TyProjection(_) => "associated type".to_string(), ty::TyParam(ref p) => { - if p.space == subst::SelfSpace { + if p.is_self() { "Self".to_string() } else { "type parameter".to_string() } } + ty::TyAnon(..) => "anonymized type".to_string(), ty::TyError => "type error".to_string(), } } } -impl<'tcx> ty::ctxt<'tcx> { - pub fn note_and_explain_type_err(&self, +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + pub fn note_and_explain_type_err(self, db: &mut DiagnosticBuilder, err: &TypeError<'tcx>, sp: Span) { @@ -279,7 +258,7 @@ impl<'tcx> ty::ctxt<'tcx> { self.note_and_explain_region(db, "concrete lifetime that was found is ", conc_region, ""); } - RegionsOverlyPolymorphic(_, ty::ReVar(_)) => { + RegionsOverlyPolymorphic(_, &ty::ReVar(_)) => { // don't bother to print out the message below for // inference variables, it's not very illuminating. } @@ -304,10 +283,7 @@ impl<'tcx> ty::ctxt<'tcx> { expected.ty, found.ty)); - match - self.map.as_local_node_id(expected.def_id) - .and_then(|node_id| self.map.opt_span(node_id)) - { + match self.map.span_if_local(expected.def_id) { Some(span) => { db.span_note(span, "a default was defined here..."); } @@ -321,10 +297,7 @@ impl<'tcx> ty::ctxt<'tcx> { expected.origin_span, "...that was applied to an unconstrained type variable here"); - match - self.map.as_local_node_id(found.def_id) - .and_then(|node_id| self.map.opt_span(node_id)) - { + match self.map.span_if_local(found.def_id) { Some(span) => { db.span_note(span, "a second default was defined here..."); } diff --git a/src/librustc/middle/ty/fast_reject.rs b/src/librustc/ty/fast_reject.rs similarity index 77% rename from src/librustc/middle/ty/fast_reject.rs rename to src/librustc/ty/fast_reject.rs index a06e8a72c44ee..ade6cad6866df 100644 --- a/src/librustc/middle/ty/fast_reject.rs +++ b/src/librustc/ty/fast_reject.rs @@ -8,9 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use middle::def_id::DefId; -use middle::ty::{self, Ty}; +use hir::def_id::DefId; +use ty::{self, Ty, TyCtxt}; use syntax::ast; +use middle::lang_items::OwnedBoxLangItem; use self::SimplifiedType::*; @@ -22,14 +23,15 @@ pub enum SimplifiedType { IntSimplifiedType(ast::IntTy), UintSimplifiedType(ast::UintTy), FloatSimplifiedType(ast::FloatTy), - EnumSimplifiedType(DefId), + AdtSimplifiedType(DefId), StrSimplifiedType, - VecSimplifiedType, + ArraySimplifiedType, PtrSimplifiedType, + NeverSimplifiedType, TupleSimplifiedType(usize), TraitSimplifiedType(DefId), - StructSimplifiedType(DefId), ClosureSimplifiedType(DefId), + AnonSimplifiedType(DefId), FunctionSimplifiedType(usize), ParameterSimplifiedType, } @@ -43,10 +45,10 @@ pub enum SimplifiedType { /// then we can't say much about whether two types would unify. Put another way, /// `can_simplify_params` should be true if type parameters appear free in `ty` and `false` if they /// are to be considered bound. -pub fn simplify_type(tcx: &ty::ctxt, - ty: Ty, - can_simplify_params: bool) - -> Option +pub fn simplify_type<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + ty: Ty, + can_simplify_params: bool) + -> Option { match ty.sty { ty::TyBool => Some(BoolSimplifiedType), @@ -54,15 +56,12 @@ pub fn simplify_type(tcx: &ty::ctxt, ty::TyInt(int_type) => Some(IntSimplifiedType(int_type)), ty::TyUint(uint_type) => Some(UintSimplifiedType(uint_type)), ty::TyFloat(float_type) => Some(FloatSimplifiedType(float_type)), - ty::TyEnum(def, _) => Some(EnumSimplifiedType(def.did)), + ty::TyAdt(def, _) => Some(AdtSimplifiedType(def.did)), ty::TyStr => Some(StrSimplifiedType), - ty::TyArray(..) | ty::TySlice(_) => Some(VecSimplifiedType), + ty::TyArray(..) | ty::TySlice(_) => Some(ArraySimplifiedType), ty::TyRawPtr(_) => Some(PtrSimplifiedType), - ty::TyTrait(ref trait_info) => { - Some(TraitSimplifiedType(trait_info.principal_def_id())) - } - ty::TyStruct(def, _) => { - Some(StructSimplifiedType(def.did)) + ty::TyDynamic(ref trait_info, ..) => { + trait_info.principal().map(|p| TraitSimplifiedType(p.def_id())) } ty::TyRef(_, mt) => { // since we introduce auto-refs during method lookup, we @@ -72,18 +71,16 @@ pub fn simplify_type(tcx: &ty::ctxt, } ty::TyBox(_) => { // treat like we would treat `Box` - match tcx.lang_items.require_owned_box() { - Ok(def_id) => Some(StructSimplifiedType(def_id)), - Err(msg) => tcx.sess.fatal(&msg), - } + Some(AdtSimplifiedType(tcx.require_lang_item(OwnedBoxLangItem))) } ty::TyClosure(def_id, _) => { Some(ClosureSimplifiedType(def_id)) } + ty::TyNever => Some(NeverSimplifiedType), ty::TyTuple(ref tys) => { Some(TupleSimplifiedType(tys.len())) } - ty::TyBareFn(_, ref f) => { + ty::TyFnDef(.., ref f) | ty::TyFnPtr(ref f) => { Some(FunctionSimplifiedType(f.sig.0.inputs.len())) } ty::TyProjection(_) | ty::TyParam(_) => { @@ -98,6 +95,9 @@ pub fn simplify_type(tcx: &ty::ctxt, None } } + ty::TyAnon(def_id, _) => { + Some(AnonSimplifiedType(def_id)) + } ty::TyInfer(_) | ty::TyError => None, } } diff --git a/src/librustc/ty/flags.rs b/src/librustc/ty/flags.rs new file mode 100644 index 0000000000000..2bcbccb7d0505 --- /dev/null +++ b/src/librustc/ty/flags.rs @@ -0,0 +1,214 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use ty::subst::Substs; +use ty::{self, Ty, TypeFlags, TypeFoldable}; + +#[derive(Debug)] +pub struct FlagComputation { + pub flags: TypeFlags, + + // maximum depth of any bound region that we have seen thus far + pub depth: u32, +} + +impl FlagComputation { + fn new() -> FlagComputation { + FlagComputation { flags: TypeFlags::empty(), depth: 0 } + } + + pub fn for_sty(st: &ty::TypeVariants) -> FlagComputation { + let mut result = FlagComputation::new(); + result.add_sty(st); + result + } + + fn add_flags(&mut self, flags: TypeFlags) { + self.flags = self.flags | (flags & TypeFlags::NOMINAL_FLAGS); + } + + fn add_depth(&mut self, depth: u32) { + if depth > self.depth { + self.depth = depth; + } + } + + /// Adds the flags/depth from a set of types that appear within the current type, but within a + /// region binder. + fn add_bound_computation(&mut self, computation: &FlagComputation) { + self.add_flags(computation.flags); + + // The types that contributed to `computation` occurred within + // a region binder, so subtract one from the region depth + // within when adding the depth to `self`. + let depth = computation.depth; + if depth > 0 { + self.add_depth(depth - 1); + } + } + + fn add_sty(&mut self, st: &ty::TypeVariants) { + match st { + &ty::TyBool | + &ty::TyChar | + &ty::TyInt(_) | + &ty::TyFloat(_) | + &ty::TyUint(_) | + &ty::TyNever | + &ty::TyStr => { + } + + // You might think that we could just return TyError for + // any type containing TyError as a component, and get + // rid of the TypeFlags::HAS_TY_ERR flag -- likewise for ty_bot (with + // the exception of function types that return bot). + // But doing so caused sporadic memory corruption, and + // neither I (tjc) nor nmatsakis could figure out why, + // so we're doing it this way. + &ty::TyError => { + self.add_flags(TypeFlags::HAS_TY_ERR) + } + + &ty::TyParam(ref p) => { + self.add_flags(TypeFlags::HAS_LOCAL_NAMES); + if p.is_self() { + self.add_flags(TypeFlags::HAS_SELF); + } else { + self.add_flags(TypeFlags::HAS_PARAMS); + } + } + + &ty::TyClosure(_, ref substs) => { + self.add_flags(TypeFlags::HAS_TY_CLOSURE); + self.add_flags(TypeFlags::HAS_LOCAL_NAMES); + self.add_substs(&substs.substs); + } + + &ty::TyInfer(infer) => { + self.add_flags(TypeFlags::HAS_LOCAL_NAMES); // it might, right? + self.add_flags(TypeFlags::HAS_TY_INFER); + match infer { + ty::FreshTy(_) | + ty::FreshIntTy(_) | + ty::FreshFloatTy(_) => {} + _ => self.add_flags(TypeFlags::KEEP_IN_LOCAL_TCX) + } + } + + &ty::TyAdt(_, substs) => { + self.add_substs(substs); + } + + &ty::TyProjection(ref data) => { + // currently we can't normalize projections that + // include bound regions, so track those separately. + if !data.has_escaping_regions() { + self.add_flags(TypeFlags::HAS_NORMALIZABLE_PROJECTION); + } + self.add_flags(TypeFlags::HAS_PROJECTION); + self.add_projection_ty(data); + } + + &ty::TyAnon(_, substs) => { + self.add_flags(TypeFlags::HAS_PROJECTION); + self.add_substs(substs); + } + + &ty::TyDynamic(ref obj, r) => { + let mut computation = FlagComputation::new(); + for predicate in obj.skip_binder().iter() { + match *predicate { + ty::ExistentialPredicate::Trait(tr) => computation.add_substs(tr.substs), + ty::ExistentialPredicate::Projection(p) => { + let mut proj_computation = FlagComputation::new(); + proj_computation.add_existential_projection(&p); + self.add_bound_computation(&proj_computation); + } + ty::ExistentialPredicate::AutoTrait(_) => {} + } + } + self.add_bound_computation(&computation); + self.add_region(r); + } + + &ty::TyBox(tt) | &ty::TyArray(tt, _) | &ty::TySlice(tt) => { + self.add_ty(tt) + } + + &ty::TyRawPtr(ref m) => { + self.add_ty(m.ty); + } + + &ty::TyRef(r, ref m) => { + self.add_region(r); + self.add_ty(m.ty); + } + + &ty::TyTuple(ref ts) => { + self.add_tys(&ts[..]); + } + + &ty::TyFnDef(_, substs, ref f) => { + self.add_substs(substs); + self.add_fn_sig(&f.sig); + } + + &ty::TyFnPtr(ref f) => { + self.add_fn_sig(&f.sig); + } + } + } + + fn add_ty(&mut self, ty: Ty) { + self.add_flags(ty.flags.get()); + self.add_depth(ty.region_depth); + } + + fn add_tys(&mut self, tys: &[Ty]) { + for &ty in tys { + self.add_ty(ty); + } + } + + fn add_fn_sig(&mut self, fn_sig: &ty::PolyFnSig) { + let mut computation = FlagComputation::new(); + + computation.add_tys(&fn_sig.0.inputs); + computation.add_ty(fn_sig.0.output); + + self.add_bound_computation(&computation); + } + + fn add_region(&mut self, r: &ty::Region) { + self.add_flags(r.type_flags()); + if let ty::ReLateBound(debruijn, _) = *r { + self.add_depth(debruijn.depth); + } + } + + fn add_existential_projection(&mut self, projection: &ty::ExistentialProjection) { + self.add_substs(projection.trait_ref.substs); + self.add_ty(projection.ty); + } + + fn add_projection_ty(&mut self, projection_ty: &ty::ProjectionTy) { + self.add_substs(projection_ty.trait_ref.substs); + } + + fn add_substs(&mut self, substs: &Substs) { + for ty in substs.types() { + self.add_ty(ty); + } + + for r in substs.regions() { + self.add_region(r); + } + } +} diff --git a/src/librustc/ty/fold.rs b/src/librustc/ty/fold.rs new file mode 100644 index 0000000000000..10754825a8c18 --- /dev/null +++ b/src/librustc/ty/fold.rs @@ -0,0 +1,702 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Generalized type folding mechanism. The setup is a bit convoluted +//! but allows for convenient usage. Let T be an instance of some +//! "foldable type" (one which implements `TypeFoldable`) and F be an +//! instance of a "folder" (a type which implements `TypeFolder`). Then +//! the setup is intended to be: +//! +//! T.fold_with(F) --calls--> F.fold_T(T) --calls--> T.super_fold_with(F) +//! +//! This way, when you define a new folder F, you can override +//! `fold_T()` to customize the behavior, and invoke `T.super_fold_with()` +//! to get the original behavior. Meanwhile, to actually fold +//! something, you can just write `T.fold_with(F)`, which is +//! convenient. (Note that `fold_with` will also transparently handle +//! things like a `Vec` where T is foldable and so on.) +//! +//! In this ideal setup, the only function that actually *does* +//! anything is `T.super_fold_with()`, which traverses the type `T`. +//! Moreover, `T.super_fold_with()` should only ever call `T.fold_with()`. +//! +//! In some cases, we follow a degenerate pattern where we do not have +//! a `fold_T` method. Instead, `T.fold_with` traverses the structure directly. +//! This is suboptimal because the behavior cannot be overridden, but it's +//! much less work to implement. If you ever *do* need an override that +//! doesn't exist, it's not hard to convert the degenerate pattern into the +//! proper thing. +//! +//! A `TypeFoldable` T can also be visited by a `TypeVisitor` V using similar setup: +//! T.visit_with(V) --calls--> V.visit_T(T) --calls--> T.super_visit_with(V). +//! These methods return true to indicate that the visitor has found what it is looking for +//! and does not need to visit anything else. + +use middle::region; +use ty::subst::Substs; +use ty::adjustment; +use ty::{self, Binder, Ty, TyCtxt, TypeFlags}; + +use std::fmt; +use util::nodemap::{FxHashMap, FxHashSet}; + +/// The TypeFoldable trait is implemented for every type that can be folded. +/// Basically, every type that has a corresponding method in TypeFolder. +pub trait TypeFoldable<'tcx>: fmt::Debug + Clone { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self; + fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + self.super_fold_with(folder) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool; + fn visit_with>(&self, visitor: &mut V) -> bool { + self.super_visit_with(visitor) + } + + fn has_regions_escaping_depth(&self, depth: u32) -> bool { + self.visit_with(&mut HasEscapingRegionsVisitor { depth: depth }) + } + fn has_escaping_regions(&self) -> bool { + self.has_regions_escaping_depth(0) + } + + fn has_type_flags(&self, flags: TypeFlags) -> bool { + self.visit_with(&mut HasTypeFlagsVisitor { flags: flags }) + } + fn has_projection_types(&self) -> bool { + self.has_type_flags(TypeFlags::HAS_PROJECTION) + } + fn references_error(&self) -> bool { + self.has_type_flags(TypeFlags::HAS_TY_ERR) + } + fn has_param_types(&self) -> bool { + self.has_type_flags(TypeFlags::HAS_PARAMS) + } + fn has_self_ty(&self) -> bool { + self.has_type_flags(TypeFlags::HAS_SELF) + } + fn has_infer_types(&self) -> bool { + self.has_type_flags(TypeFlags::HAS_TY_INFER) + } + fn needs_infer(&self) -> bool { + self.has_type_flags(TypeFlags::HAS_TY_INFER | TypeFlags::HAS_RE_INFER) + } + fn needs_subst(&self) -> bool { + self.has_type_flags(TypeFlags::NEEDS_SUBST) + } + fn has_re_skol(&self) -> bool { + self.has_type_flags(TypeFlags::HAS_RE_SKOL) + } + fn has_closure_types(&self) -> bool { + self.has_type_flags(TypeFlags::HAS_TY_CLOSURE) + } + fn has_erasable_regions(&self) -> bool { + self.has_type_flags(TypeFlags::HAS_RE_EARLY_BOUND | + TypeFlags::HAS_RE_INFER | + TypeFlags::HAS_FREE_REGIONS) + } + fn is_normalized_for_trans(&self) -> bool { + !self.has_type_flags(TypeFlags::HAS_RE_EARLY_BOUND | + TypeFlags::HAS_RE_INFER | + TypeFlags::HAS_FREE_REGIONS | + TypeFlags::HAS_TY_INFER | + TypeFlags::HAS_PARAMS | + TypeFlags::HAS_NORMALIZABLE_PROJECTION | + TypeFlags::HAS_TY_ERR | + TypeFlags::HAS_SELF) + } + /// Indicates whether this value references only 'global' + /// types/lifetimes that are the same regardless of what fn we are + /// in. This is used for caching. Errs on the side of returning + /// false. + fn is_global(&self) -> bool { + !self.has_type_flags(TypeFlags::HAS_LOCAL_NAMES) + } +} + +/// The TypeFolder trait defines the actual *folding*. There is a +/// method defined for every foldable type. Each of these has a +/// default implementation that does an "identity" fold. Within each +/// identity fold, it should invoke `foo.fold_with(self)` to fold each +/// sub-item. +pub trait TypeFolder<'gcx: 'tcx, 'tcx> : Sized { + fn tcx<'a>(&'a self) -> TyCtxt<'a, 'gcx, 'tcx>; + + fn fold_binder(&mut self, t: &Binder) -> Binder + where T : TypeFoldable<'tcx> + { + t.super_fold_with(self) + } + + fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { + t.super_fold_with(self) + } + + fn fold_mt(&mut self, t: &ty::TypeAndMut<'tcx>) -> ty::TypeAndMut<'tcx> { + t.super_fold_with(self) + } + + fn fold_impl_header(&mut self, imp: &ty::ImplHeader<'tcx>) -> ty::ImplHeader<'tcx> { + imp.super_fold_with(self) + } + + fn fold_substs(&mut self, + substs: &'tcx Substs<'tcx>) + -> &'tcx Substs<'tcx> { + substs.super_fold_with(self) + } + + fn fold_fn_sig(&mut self, + sig: &ty::FnSig<'tcx>) + -> ty::FnSig<'tcx> { + sig.super_fold_with(self) + } + + fn fold_bare_fn_ty(&mut self, + fty: &'tcx ty::BareFnTy<'tcx>) + -> &'tcx ty::BareFnTy<'tcx> + { + fty.super_fold_with(self) + } + + fn fold_closure_ty(&mut self, + fty: &ty::ClosureTy<'tcx>) + -> ty::ClosureTy<'tcx> { + fty.super_fold_with(self) + } + + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { + r.super_fold_with(self) + } + + fn fold_autoref(&mut self, ar: &adjustment::AutoBorrow<'tcx>) + -> adjustment::AutoBorrow<'tcx> { + ar.super_fold_with(self) + } +} + +pub trait TypeVisitor<'tcx> : Sized { + fn visit_binder>(&mut self, t: &Binder) -> bool { + t.super_visit_with(self) + } + + fn visit_ty(&mut self, t: Ty<'tcx>) -> bool { + t.super_visit_with(self) + } + + fn visit_trait_ref(&mut self, trait_ref: ty::TraitRef<'tcx>) -> bool { + trait_ref.super_visit_with(self) + } + + fn visit_region(&mut self, r: &'tcx ty::Region) -> bool { + r.super_visit_with(self) + } +} + +/////////////////////////////////////////////////////////////////////////// +// Some sample folders + +pub struct BottomUpFolder<'a, 'gcx: 'a+'tcx, 'tcx: 'a, F> + where F: FnMut(Ty<'tcx>) -> Ty<'tcx> +{ + pub tcx: TyCtxt<'a, 'gcx, 'tcx>, + pub fldop: F, +} + +impl<'a, 'gcx, 'tcx, F> TypeFolder<'gcx, 'tcx> for BottomUpFolder<'a, 'gcx, 'tcx, F> + where F: FnMut(Ty<'tcx>) -> Ty<'tcx>, +{ + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx } + + fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { + let t1 = ty.super_fold_with(self); + (self.fldop)(t1) + } +} + +/////////////////////////////////////////////////////////////////////////// +// Region folder + +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + /// Collects the free and escaping regions in `value` into `region_set`. Returns + /// whether any late-bound regions were skipped + pub fn collect_regions(self, + value: &T, + region_set: &mut FxHashSet<&'tcx ty::Region>) + -> bool + where T : TypeFoldable<'tcx> + { + let mut have_bound_regions = false; + self.fold_regions(value, &mut have_bound_regions, |r, d| { + region_set.insert(self.mk_region(r.from_depth(d))); + r + }); + have_bound_regions + } + + /// Folds the escaping and free regions in `value` using `f`, and + /// sets `skipped_regions` to true if any late-bound region was found + /// and skipped. + pub fn fold_regions(self, + value: &T, + skipped_regions: &mut bool, + mut f: F) + -> T + where F : FnMut(&'tcx ty::Region, u32) -> &'tcx ty::Region, + T : TypeFoldable<'tcx>, + { + value.fold_with(&mut RegionFolder::new(self, skipped_regions, &mut f)) + } +} + +/// Folds over the substructure of a type, visiting its component +/// types and all regions that occur *free* within it. +/// +/// That is, `Ty` can contain function or method types that bind +/// regions at the call site (`ReLateBound`), and occurrences of +/// regions (aka "lifetimes") that are bound within a type are not +/// visited by this folder; only regions that occur free will be +/// visited by `fld_r`. + +pub struct RegionFolder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + tcx: TyCtxt<'a, 'gcx, 'tcx>, + skipped_regions: &'a mut bool, + current_depth: u32, + fld_r: &'a mut (FnMut(&'tcx ty::Region, u32) -> &'tcx ty::Region + 'a), +} + +impl<'a, 'gcx, 'tcx> RegionFolder<'a, 'gcx, 'tcx> { + pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>, + skipped_regions: &'a mut bool, + fld_r: &'a mut F) -> RegionFolder<'a, 'gcx, 'tcx> + where F : FnMut(&'tcx ty::Region, u32) -> &'tcx ty::Region + { + RegionFolder { + tcx: tcx, + skipped_regions: skipped_regions, + current_depth: 1, + fld_r: fld_r, + } + } +} + +impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionFolder<'a, 'gcx, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx } + + fn fold_binder>(&mut self, t: &ty::Binder) -> ty::Binder { + self.current_depth += 1; + let t = t.super_fold_with(self); + self.current_depth -= 1; + t + } + + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { + match *r { + ty::ReLateBound(debruijn, _) if debruijn.depth < self.current_depth => { + debug!("RegionFolder.fold_region({:?}) skipped bound region (current depth={})", + r, self.current_depth); + *self.skipped_regions = true; + r + } + _ => { + debug!("RegionFolder.fold_region({:?}) folding free region (current_depth={})", + r, self.current_depth); + (self.fld_r)(r, self.current_depth) + } + } + } +} + +/////////////////////////////////////////////////////////////////////////// +// Late-bound region replacer + +// Replaces the escaping regions in a type. + +struct RegionReplacer<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + tcx: TyCtxt<'a, 'gcx, 'tcx>, + current_depth: u32, + fld_r: &'a mut (FnMut(ty::BoundRegion) -> &'tcx ty::Region + 'a), + map: FxHashMap +} + +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + pub fn replace_late_bound_regions(self, + value: &Binder, + mut f: F) + -> (T, FxHashMap) + where F : FnMut(ty::BoundRegion) -> &'tcx ty::Region, + T : TypeFoldable<'tcx>, + { + let mut replacer = RegionReplacer::new(self, &mut f); + let result = value.skip_binder().fold_with(&mut replacer); + (result, replacer.map) + } + + + /// Replace any late-bound regions bound in `value` with free variants attached to scope-id + /// `scope_id`. + pub fn liberate_late_bound_regions(self, + all_outlive_scope: region::CodeExtent, + value: &Binder) + -> T + where T : TypeFoldable<'tcx> + { + self.replace_late_bound_regions(value, |br| { + self.mk_region(ty::ReFree(ty::FreeRegion { + scope: all_outlive_scope, + bound_region: br + })) + }).0 + } + + /// Flattens two binding levels into one. So `for<'a> for<'b> Foo` + /// becomes `for<'a,'b> Foo`. + pub fn flatten_late_bound_regions(self, bound2_value: &Binder>) + -> Binder + where T: TypeFoldable<'tcx> + { + let bound0_value = bound2_value.skip_binder().skip_binder(); + let value = self.fold_regions(bound0_value, &mut false, + |region, current_depth| { + match *region { + ty::ReLateBound(debruijn, br) if debruijn.depth >= current_depth => { + // should be true if no escaping regions from bound2_value + assert!(debruijn.depth - current_depth <= 1); + self.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(current_depth), br)) + } + _ => { + region + } + } + }); + Binder(value) + } + + pub fn no_late_bound_regions(self, value: &Binder) -> Option + where T : TypeFoldable<'tcx> + { + if value.0.has_escaping_regions() { + None + } else { + Some(value.0.clone()) + } + } + + /// Returns a set of all late-bound regions that are constrained + /// by `value`, meaning that if we instantiate those LBR with + /// variables and equate `value` with something else, those + /// variables will also be equated. + pub fn collect_constrained_late_bound_regions(&self, value: &Binder) + -> FxHashSet + where T : TypeFoldable<'tcx> + { + self.collect_late_bound_regions(value, true) + } + + /// Returns a set of all late-bound regions that appear in `value` anywhere. + pub fn collect_referenced_late_bound_regions(&self, value: &Binder) + -> FxHashSet + where T : TypeFoldable<'tcx> + { + self.collect_late_bound_regions(value, false) + } + + fn collect_late_bound_regions(&self, value: &Binder, just_constraint: bool) + -> FxHashSet + where T : TypeFoldable<'tcx> + { + let mut collector = LateBoundRegionsCollector::new(just_constraint); + let result = value.skip_binder().visit_with(&mut collector); + assert!(!result); // should never have stopped early + collector.regions + } + + /// Replace any late-bound regions bound in `value` with `'erased`. Useful in trans but also + /// method lookup and a few other places where precise region relationships are not required. + pub fn erase_late_bound_regions(self, value: &Binder) -> T + where T : TypeFoldable<'tcx> + { + self.replace_late_bound_regions(value, |_| self.mk_region(ty::ReErased)).0 + } + + /// Rewrite any late-bound regions so that they are anonymous. Region numbers are + /// assigned starting at 1 and increasing monotonically in the order traversed + /// by the fold operation. + /// + /// The chief purpose of this function is to canonicalize regions so that two + /// `FnSig`s or `TraitRef`s which are equivalent up to region naming will become + /// structurally identical. For example, `for<'a, 'b> fn(&'a isize, &'b isize)` and + /// `for<'a, 'b> fn(&'b isize, &'a isize)` will become identical after anonymization. + pub fn anonymize_late_bound_regions(self, sig: &Binder) -> Binder + where T : TypeFoldable<'tcx>, + { + let mut counter = 0; + Binder(self.replace_late_bound_regions(sig, |_| { + counter += 1; + self.mk_region(ty::ReLateBound(ty::DebruijnIndex::new(1), ty::BrAnon(counter))) + }).0) + } +} + +impl<'a, 'gcx, 'tcx> RegionReplacer<'a, 'gcx, 'tcx> { + fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>, fld_r: &'a mut F) + -> RegionReplacer<'a, 'gcx, 'tcx> + where F : FnMut(ty::BoundRegion) -> &'tcx ty::Region + { + RegionReplacer { + tcx: tcx, + current_depth: 1, + fld_r: fld_r, + map: FxHashMap() + } + } +} + +impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionReplacer<'a, 'gcx, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx } + + fn fold_binder>(&mut self, t: &ty::Binder) -> ty::Binder { + self.current_depth += 1; + let t = t.super_fold_with(self); + self.current_depth -= 1; + t + } + + fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { + if !t.has_regions_escaping_depth(self.current_depth-1) { + return t; + } + + t.super_fold_with(self) + } + + fn fold_region(&mut self, r:&'tcx ty::Region) -> &'tcx ty::Region { + match *r { + ty::ReLateBound(debruijn, br) if debruijn.depth == self.current_depth => { + let fld_r = &mut self.fld_r; + let region = *self.map.entry(br).or_insert_with(|| fld_r(br)); + if let ty::ReLateBound(debruijn1, br) = *region { + // If the callback returns a late-bound region, + // that region should always use depth 1. Then we + // adjust it to the correct depth. + assert_eq!(debruijn1.depth, 1); + self.tcx.mk_region(ty::ReLateBound(debruijn, br)) + } else { + region + } + } + _ => r + } + } +} + +/////////////////////////////////////////////////////////////////////////// +// Region eraser + +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + /// Returns an equivalent value with all free regions removed (note + /// that late-bound regions remain, because they are important for + /// subtyping, but they are anonymized and normalized as well).. + pub fn erase_regions(self, value: &T) -> T + where T : TypeFoldable<'tcx> + { + let value1 = value.fold_with(&mut RegionEraser(self)); + debug!("erase_regions({:?}) = {:?}", + value, value1); + return value1; + + struct RegionEraser<'a, 'gcx: 'a+'tcx, 'tcx: 'a>(TyCtxt<'a, 'gcx, 'tcx>); + + impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for RegionEraser<'a, 'gcx, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.0 } + + fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { + if let Some(u) = self.tcx().normalized_cache.borrow().get(&ty).cloned() { + return u; + } + + // FIXME(eddyb) should local contexts have a cache too? + if let Some(ty_lifted) = self.tcx().lift_to_global(&ty) { + let tcx = self.tcx().global_tcx(); + let t_norm = ty_lifted.super_fold_with(&mut RegionEraser(tcx)); + tcx.normalized_cache.borrow_mut().insert(ty_lifted, t_norm); + t_norm + } else { + ty.super_fold_with(self) + } + } + + fn fold_binder(&mut self, t: &ty::Binder) -> ty::Binder + where T : TypeFoldable<'tcx> + { + let u = self.tcx().anonymize_late_bound_regions(t); + u.super_fold_with(self) + } + + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { + // because late-bound regions affect subtyping, we can't + // erase the bound/free distinction, but we can replace + // all free regions with 'erased. + // + // Note that we *CAN* replace early-bound regions -- the + // type system never "sees" those, they get substituted + // away. In trans, they will always be erased to 'erased + // whenever a substitution occurs. + match *r { + ty::ReLateBound(..) => r, + _ => self.tcx().mk_region(ty::ReErased) + } + } + } + } +} + +/////////////////////////////////////////////////////////////////////////// +// Region shifter +// +// Shifts the De Bruijn indices on all escaping bound regions by a +// fixed amount. Useful in substitution or when otherwise introducing +// a binding level that is not intended to capture the existing bound +// regions. See comment on `shift_regions_through_binders` method in +// `subst.rs` for more details. + +pub fn shift_region(region: ty::Region, amount: u32) -> ty::Region { + match region { + ty::ReLateBound(debruijn, br) => { + ty::ReLateBound(debruijn.shifted(amount), br) + } + _ => { + region + } + } +} + +pub fn shift_regions<'a, 'gcx, 'tcx, T>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + amount: u32, value: &T) -> T + where T: TypeFoldable<'tcx> +{ + debug!("shift_regions(value={:?}, amount={})", + value, amount); + + value.fold_with(&mut RegionFolder::new(tcx, &mut false, &mut |region, _current_depth| { + tcx.mk_region(shift_region(*region, amount)) + })) +} + +/// An "escaping region" is a bound region whose binder is not part of `t`. +/// +/// So, for example, consider a type like the following, which has two binders: +/// +/// for<'a> fn(x: for<'b> fn(&'a isize, &'b isize)) +/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ outer scope +/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ inner scope +/// +/// This type has *bound regions* (`'a`, `'b`), but it does not have escaping regions, because the +/// binders of both `'a` and `'b` are part of the type itself. However, if we consider the *inner +/// fn type*, that type has an escaping region: `'a`. +/// +/// Note that what I'm calling an "escaping region" is often just called a "free region". However, +/// we already use the term "free region". It refers to the regions that we use to represent bound +/// regions on a fn definition while we are typechecking its body. +/// +/// To clarify, conceptually there is no particular difference between an "escaping" region and a +/// "free" region. However, there is a big difference in practice. Basically, when "entering" a +/// binding level, one is generally required to do some sort of processing to a bound region, such +/// as replacing it with a fresh/skolemized region, or making an entry in the environment to +/// represent the scope to which it is attached, etc. An escaping region represents a bound region +/// for which this processing has not yet been done. +struct HasEscapingRegionsVisitor { + depth: u32, +} + +impl<'tcx> TypeVisitor<'tcx> for HasEscapingRegionsVisitor { + fn visit_binder>(&mut self, t: &Binder) -> bool { + self.depth += 1; + let result = t.super_visit_with(self); + self.depth -= 1; + result + } + + fn visit_ty(&mut self, t: Ty<'tcx>) -> bool { + t.region_depth > self.depth + } + + fn visit_region(&mut self, r: &'tcx ty::Region) -> bool { + r.escapes_depth(self.depth) + } +} + +struct HasTypeFlagsVisitor { + flags: ty::TypeFlags, +} + +impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor { + fn visit_ty(&mut self, t: Ty) -> bool { + let flags = t.flags.get(); + debug!("HasTypeFlagsVisitor: t={:?} t.flags={:?} self.flags={:?}", t, flags, self.flags); + flags.intersects(self.flags) + } + + fn visit_region(&mut self, r: &'tcx ty::Region) -> bool { + let flags = r.type_flags(); + debug!("HasTypeFlagsVisitor: r={:?} r.flags={:?} self.flags={:?}", r, flags, self.flags); + flags.intersects(self.flags) + } +} + +/// Collects all the late-bound regions it finds into a hash set. +struct LateBoundRegionsCollector { + current_depth: u32, + regions: FxHashSet, + just_constrained: bool, +} + +impl LateBoundRegionsCollector { + fn new(just_constrained: bool) -> Self { + LateBoundRegionsCollector { + current_depth: 1, + regions: FxHashSet(), + just_constrained: just_constrained, + } + } +} + +impl<'tcx> TypeVisitor<'tcx> for LateBoundRegionsCollector { + fn visit_binder>(&mut self, t: &Binder) -> bool { + self.current_depth += 1; + let result = t.super_visit_with(self); + self.current_depth -= 1; + result + } + + fn visit_ty(&mut self, t: Ty<'tcx>) -> bool { + // if we are only looking for "constrained" region, we have to + // ignore the inputs to a projection, as they may not appear + // in the normalized form + if self.just_constrained { + match t.sty { + ty::TyProjection(..) | ty::TyAnon(..) => { return false; } + _ => { } + } + } + + t.super_visit_with(self) + } + + fn visit_region(&mut self, r: &'tcx ty::Region) -> bool { + match *r { + ty::ReLateBound(debruijn, br) if debruijn.depth == self.current_depth => { + self.regions.insert(br); + } + _ => { } + } + false + } +} diff --git a/src/librustc/ty/item_path.rs b/src/librustc/ty/item_path.rs new file mode 100644 index 0000000000000..440a3916786fa --- /dev/null +++ b/src/librustc/ty/item_path.rs @@ -0,0 +1,403 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use hir::map::DefPathData; +use hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE}; +use ty::{self, Ty, TyCtxt}; +use syntax::ast; +use syntax::symbol::Symbol; + +use std::cell::Cell; + +thread_local! { + static FORCE_ABSOLUTE: Cell = Cell::new(false) +} + +/// Enforces that item_path_str always returns an absolute path. +/// This is useful when building symbols that contain types, +/// where we want the crate name to be part of the symbol. +pub fn with_forced_absolute_paths R, R>(f: F) -> R { + FORCE_ABSOLUTE.with(|force| { + let old = force.get(); + force.set(true); + let result = f(); + force.set(old); + result + }) +} + +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + /// Returns a string identifying this def-id. This string is + /// suitable for user output. It is relative to the current crate + /// root, unless with_forced_absolute_paths was used. + pub fn item_path_str(self, def_id: DefId) -> String { + let mode = FORCE_ABSOLUTE.with(|force| { + if force.get() { + RootMode::Absolute + } else { + RootMode::Local + } + }); + let mut buffer = LocalPathBuffer::new(mode); + self.push_item_path(&mut buffer, def_id); + buffer.into_string() + } + + /// Returns a string identifying this local node-id. + pub fn node_path_str(self, id: ast::NodeId) -> String { + self.item_path_str(self.map.local_def_id(id)) + } + + /// Returns a string identifying this def-id. This string is + /// suitable for user output. It always begins with a crate identifier. + pub fn absolute_item_path_str(self, def_id: DefId) -> String { + let mut buffer = LocalPathBuffer::new(RootMode::Absolute); + self.push_item_path(&mut buffer, def_id); + buffer.into_string() + } + + /// Returns the "path" to a particular crate. This can proceed in + /// various ways, depending on the `root_mode` of the `buffer`. + /// (See `RootMode` enum for more details.) + pub fn push_krate_path(self, buffer: &mut T, cnum: CrateNum) + where T: ItemPathBuffer + { + match *buffer.root_mode() { + RootMode::Local => { + // In local mode, when we encounter a crate other than + // LOCAL_CRATE, execution proceeds in one of two ways: + // + // 1. for a direct dependency, where user added an + // `extern crate` manually, we put the `extern + // crate` as the parent. So you wind up with + // something relative to the current crate. + // 2. for an indirect crate, where there is no extern + // crate, we just prepend the crate name. + // + // Returns `None` for the local crate. + if cnum != LOCAL_CRATE { + let opt_extern_crate = self.sess.cstore.extern_crate(cnum); + let opt_extern_crate = opt_extern_crate.and_then(|extern_crate| { + if extern_crate.direct { + Some(extern_crate.def_id) + } else { + None + } + }); + if let Some(extern_crate_def_id) = opt_extern_crate { + self.push_item_path(buffer, extern_crate_def_id); + } else { + buffer.push(&self.crate_name(cnum).as_str()); + } + } + } + RootMode::Absolute => { + // In absolute mode, just write the crate name + // unconditionally. + buffer.push(&self.original_crate_name(cnum).as_str()); + } + } + } + + /// If possible, this pushes a global path resolving to `external_def_id` that is visible + /// from at least one local module and returns true. If the crate defining `external_def_id` is + /// declared with an `extern crate`, the path is guarenteed to use the `extern crate`. + pub fn try_push_visible_item_path(self, buffer: &mut T, external_def_id: DefId) -> bool + where T: ItemPathBuffer + { + let visible_parent_map = self.sess.cstore.visible_parent_map(); + + let (mut cur_def, mut cur_path) = (external_def_id, Vec::::new()); + loop { + // If `cur_def` is a direct or injected extern crate, push the path to the crate + // followed by the path to the item within the crate and return. + if cur_def.index == CRATE_DEF_INDEX { + match self.sess.cstore.extern_crate(cur_def.krate) { + Some(extern_crate) if extern_crate.direct => { + self.push_item_path(buffer, extern_crate.def_id); + cur_path.iter().rev().map(|segment| buffer.push(&segment.as_str())).count(); + return true; + } + None => { + buffer.push(&self.crate_name(cur_def.krate).as_str()); + cur_path.iter().rev().map(|segment| buffer.push(&segment.as_str())).count(); + return true; + } + _ => {}, + } + } + + cur_path.push(self.sess.cstore.def_key(cur_def) + .disambiguated_data.data.get_opt_name().unwrap_or_else(|| + Symbol::intern(""))); + match visible_parent_map.get(&cur_def) { + Some(&def) => cur_def = def, + None => return false, + }; + } + } + + pub fn push_item_path(self, buffer: &mut T, def_id: DefId) + where T: ItemPathBuffer + { + match *buffer.root_mode() { + RootMode::Local if !def_id.is_local() => + if self.try_push_visible_item_path(buffer, def_id) { return }, + _ => {} + } + + let key = self.def_key(def_id); + match key.disambiguated_data.data { + DefPathData::CrateRoot => { + assert!(key.parent.is_none()); + self.push_krate_path(buffer, def_id.krate); + } + + DefPathData::InlinedRoot(ref root_path) => { + assert!(key.parent.is_none()); + self.push_item_path(buffer, root_path.def_id); + } + + DefPathData::Impl => { + self.push_impl_path(buffer, def_id); + } + + // Unclear if there is any value in distinguishing these. + // Probably eventually (and maybe we would even want + // finer-grained distinctions, e.g. between enum/struct). + data @ DefPathData::Misc | + data @ DefPathData::TypeNs(..) | + data @ DefPathData::ValueNs(..) | + data @ DefPathData::Module(..) | + data @ DefPathData::TypeParam(..) | + data @ DefPathData::LifetimeDef(..) | + data @ DefPathData::EnumVariant(..) | + data @ DefPathData::Field(..) | + data @ DefPathData::StructCtor | + data @ DefPathData::Initializer | + data @ DefPathData::MacroDef(..) | + data @ DefPathData::ClosureExpr | + data @ DefPathData::Binding(..) | + data @ DefPathData::ImplTrait => { + let parent_def_id = self.parent_def_id(def_id).unwrap(); + self.push_item_path(buffer, parent_def_id); + buffer.push(&data.as_interned_str()); + } + } + } + + fn push_impl_path(self, + buffer: &mut T, + impl_def_id: DefId) + where T: ItemPathBuffer + { + let parent_def_id = self.parent_def_id(impl_def_id).unwrap(); + + let use_types = if !impl_def_id.is_local() { + // always have full types available for extern crates + true + } else { + // for local crates, check whether type info is + // available; typeck might not have completed yet + self.impl_trait_refs.borrow().contains_key(&impl_def_id) + }; + + if !use_types { + return self.push_impl_path_fallback(buffer, impl_def_id); + } + + // Decide whether to print the parent path for the impl. + // Logically, since impls are global, it's never needed, but + // users may find it useful. Currently, we omit the parent if + // the impl is either in the same module as the self-type or + // as the trait. + let self_ty = self.item_type(impl_def_id); + let in_self_mod = match characteristic_def_id_of_type(self_ty) { + None => false, + Some(ty_def_id) => self.parent_def_id(ty_def_id) == Some(parent_def_id), + }; + + let impl_trait_ref = self.impl_trait_ref(impl_def_id); + let in_trait_mod = match impl_trait_ref { + None => false, + Some(trait_ref) => self.parent_def_id(trait_ref.def_id) == Some(parent_def_id), + }; + + if !in_self_mod && !in_trait_mod { + // If the impl is not co-located with either self-type or + // trait-type, then fallback to a format that identifies + // the module more clearly. + self.push_item_path(buffer, parent_def_id); + if let Some(trait_ref) = impl_trait_ref { + buffer.push(&format!("", trait_ref, self_ty)); + } else { + buffer.push(&format!("", self_ty)); + } + return; + } + + // Otherwise, try to give a good form that would be valid language + // syntax. Preferably using associated item notation. + + if let Some(trait_ref) = impl_trait_ref { + // Trait impls. + buffer.push(&format!("<{} as {}>", + self_ty, + trait_ref)); + return; + } + + // Inherent impls. Try to print `Foo::bar` for an inherent + // impl on `Foo`, but fallback to `::bar` if self-type is + // anything other than a simple path. + match self_ty.sty { + ty::TyAdt(adt_def, substs) => { + if substs.types().next().is_none() { // ignore regions + self.push_item_path(buffer, adt_def.did); + } else { + buffer.push(&format!("<{}>", self_ty)); + } + } + + ty::TyBool | + ty::TyChar | + ty::TyInt(_) | + ty::TyUint(_) | + ty::TyFloat(_) | + ty::TyStr => { + buffer.push(&format!("{}", self_ty)); + } + + _ => { + buffer.push(&format!("<{}>", self_ty)); + } + } + } + + fn push_impl_path_fallback(self, + buffer: &mut T, + impl_def_id: DefId) + where T: ItemPathBuffer + { + // If no type info is available, fall back to + // pretty printing some span information. This should + // only occur very early in the compiler pipeline. + let parent_def_id = self.parent_def_id(impl_def_id).unwrap(); + self.push_item_path(buffer, parent_def_id); + let node_id = self.map.as_local_node_id(impl_def_id).unwrap(); + let item = self.map.expect_item(node_id); + let span_str = self.sess.codemap().span_to_string(item.span); + buffer.push(&format!("", span_str)); + } + + /// Returns the def-id of `def_id`'s parent in the def tree. If + /// this returns `None`, then `def_id` represents a crate root or + /// inlined root. + pub fn parent_def_id(self, def_id: DefId) -> Option { + let key = self.def_key(def_id); + key.parent.map(|index| DefId { krate: def_id.krate, index: index }) + } +} + +/// As a heuristic, when we see an impl, if we see that the +/// 'self-type' is a type defined in the same module as the impl, +/// we can omit including the path to the impl itself. This +/// function tries to find a "characteristic def-id" for a +/// type. It's just a heuristic so it makes some questionable +/// decisions and we may want to adjust it later. +pub fn characteristic_def_id_of_type(ty: Ty) -> Option { + match ty.sty { + ty::TyAdt(adt_def, _) => Some(adt_def.did), + + ty::TyDynamic(data, ..) => data.principal().map(|p| p.def_id()), + + ty::TyArray(subty, _) | + ty::TySlice(subty) | + ty::TyBox(subty) => characteristic_def_id_of_type(subty), + + ty::TyRawPtr(mt) | + ty::TyRef(_, mt) => characteristic_def_id_of_type(mt.ty), + + ty::TyTuple(ref tys) => tys.iter() + .filter_map(|ty| characteristic_def_id_of_type(ty)) + .next(), + + ty::TyFnDef(def_id, ..) | + ty::TyClosure(def_id, _) => Some(def_id), + + ty::TyBool | + ty::TyChar | + ty::TyInt(_) | + ty::TyUint(_) | + ty::TyStr | + ty::TyFnPtr(_) | + ty::TyProjection(_) | + ty::TyParam(_) | + ty::TyAnon(..) | + ty::TyInfer(_) | + ty::TyError | + ty::TyNever | + ty::TyFloat(_) => None, + } +} + +/// Unifying Trait for different kinds of item paths we might +/// construct. The basic interface is that components get pushed: the +/// instance can also customize how we handle the root of a crate. +pub trait ItemPathBuffer { + fn root_mode(&self) -> &RootMode; + fn push(&mut self, text: &str); +} + +#[derive(Debug)] +pub enum RootMode { + /// Try to make a path relative to the local crate. In + /// particular, local paths have no prefix, and if the path comes + /// from an extern crate, start with the path to the `extern + /// crate` declaration. + Local, + + /// Always prepend the crate name to the path, forming an absolute + /// path from within a given set of crates. + Absolute, +} + +#[derive(Debug)] +struct LocalPathBuffer { + root_mode: RootMode, + str: String, +} + +impl LocalPathBuffer { + fn new(root_mode: RootMode) -> LocalPathBuffer { + LocalPathBuffer { + root_mode: root_mode, + str: String::new() + } + } + + fn into_string(self) -> String { + self.str + } + +} + +impl ItemPathBuffer for LocalPathBuffer { + fn root_mode(&self) -> &RootMode { + &self.root_mode + } + + fn push(&mut self, text: &str) { + if !self.str.is_empty() { + self.str.push_str("::"); + } + self.str.push_str(text); + } +} diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs new file mode 100644 index 0000000000000..8646bccf1e9ed --- /dev/null +++ b/src/librustc/ty/layout.rs @@ -0,0 +1,1444 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub use self::Integer::*; +pub use self::Layout::*; +pub use self::Primitive::*; + +use infer::InferCtxt; +use session::Session; +use traits; +use ty::{self, Ty, TyCtxt, TypeFoldable}; + +use syntax::ast::{FloatTy, IntTy, UintTy}; +use syntax::attr; +use syntax_pos::DUMMY_SP; + +use std::cmp; +use std::fmt; +use std::i64; + +/// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout) +/// for a target, which contains everything needed to compute layouts. +pub struct TargetDataLayout { + pub endian: Endian, + pub i1_align: Align, + pub i8_align: Align, + pub i16_align: Align, + pub i32_align: Align, + pub i64_align: Align, + pub f32_align: Align, + pub f64_align: Align, + pub pointer_size: Size, + pub pointer_align: Align, + pub aggregate_align: Align, + + /// Alignments for vector types. + pub vector_align: Vec<(Size, Align)> +} + +impl Default for TargetDataLayout { + /// Creates an instance of `TargetDataLayout`. + fn default() -> TargetDataLayout { + TargetDataLayout { + endian: Endian::Big, + i1_align: Align::from_bits(8, 8).unwrap(), + i8_align: Align::from_bits(8, 8).unwrap(), + i16_align: Align::from_bits(16, 16).unwrap(), + i32_align: Align::from_bits(32, 32).unwrap(), + i64_align: Align::from_bits(32, 64).unwrap(), + f32_align: Align::from_bits(32, 32).unwrap(), + f64_align: Align::from_bits(64, 64).unwrap(), + pointer_size: Size::from_bits(64), + pointer_align: Align::from_bits(64, 64).unwrap(), + aggregate_align: Align::from_bits(0, 64).unwrap(), + vector_align: vec![ + (Size::from_bits(64), Align::from_bits(64, 64).unwrap()), + (Size::from_bits(128), Align::from_bits(128, 128).unwrap()) + ] + } + } +} + +impl TargetDataLayout { + pub fn parse(sess: &Session) -> TargetDataLayout { + // Parse a bit count from a string. + let parse_bits = |s: &str, kind: &str, cause: &str| { + s.parse::().unwrap_or_else(|err| { + sess.err(&format!("invalid {} `{}` for `{}` in \"data-layout\": {}", + kind, s, cause, err)); + 0 + }) + }; + + // Parse a size string. + let size = |s: &str, cause: &str| { + Size::from_bits(parse_bits(s, "size", cause)) + }; + + // Parse an alignment string. + let align = |s: &[&str], cause: &str| { + if s.is_empty() { + sess.err(&format!("missing alignment for `{}` in \"data-layout\"", cause)); + } + let abi = parse_bits(s[0], "alignment", cause); + let pref = s.get(1).map_or(abi, |pref| parse_bits(pref, "alignment", cause)); + Align::from_bits(abi, pref).unwrap_or_else(|err| { + sess.err(&format!("invalid alignment for `{}` in \"data-layout\": {}", + cause, err)); + Align::from_bits(8, 8).unwrap() + }) + }; + + let mut dl = TargetDataLayout::default(); + for spec in sess.target.target.data_layout.split("-") { + match &spec.split(":").collect::>()[..] { + &["e"] => dl.endian = Endian::Little, + &["E"] => dl.endian = Endian::Big, + &["a", ref a..] => dl.aggregate_align = align(a, "a"), + &["f32", ref a..] => dl.f32_align = align(a, "f32"), + &["f64", ref a..] => dl.f64_align = align(a, "f64"), + &[p @ "p", s, ref a..] | &[p @ "p0", s, ref a..] => { + dl.pointer_size = size(s, p); + dl.pointer_align = align(a, p); + } + &[s, ref a..] if s.starts_with("i") => { + let ty_align = match s[1..].parse::() { + Ok(1) => &mut dl.i8_align, + Ok(8) => &mut dl.i8_align, + Ok(16) => &mut dl.i16_align, + Ok(32) => &mut dl.i32_align, + Ok(64) => &mut dl.i64_align, + Ok(_) => continue, + Err(_) => { + size(&s[1..], "i"); // For the user error. + continue; + } + }; + *ty_align = align(a, s); + } + &[s, ref a..] if s.starts_with("v") => { + let v_size = size(&s[1..], "v"); + let a = align(a, s); + if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) { + v.1 = a; + continue; + } + // No existing entry, add a new one. + dl.vector_align.push((v_size, a)); + } + _ => {} // Ignore everything else. + } + } + + // Perform consistency checks against the Target information. + let endian_str = match dl.endian { + Endian::Little => "little", + Endian::Big => "big" + }; + if endian_str != sess.target.target.target_endian { + sess.err(&format!("inconsistent target specification: \"data-layout\" claims \ + architecture is {}-endian, while \"target-endian\" is `{}`", + endian_str, sess.target.target.target_endian)); + } + + if dl.pointer_size.bits().to_string() != sess.target.target.target_pointer_width { + sess.err(&format!("inconsistent target specification: \"data-layout\" claims \ + pointers are {}-bit, while \"target-pointer-width\" is `{}`", + dl.pointer_size.bits(), sess.target.target.target_pointer_width)); + } + + dl + } + + /// Return exclusive upper bound on object size. + /// + /// The theoretical maximum object size is defined as the maximum positive `isize` value. + /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly + /// index every address within an object along with one byte past the end, along with allowing + /// `isize` to store the difference between any two pointers into an object. + /// + /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer + /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is + /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable + /// address space on 64-bit ARMv8 and x86_64. + pub fn obj_size_bound(&self) -> u64 { + match self.pointer_size.bits() { + 16 => 1 << 15, + 32 => 1 << 31, + 64 => 1 << 47, + bits => bug!("obj_size_bound: unknown pointer bit size {}", bits) + } + } + + pub fn ptr_sized_integer(&self) -> Integer { + match self.pointer_size.bits() { + 16 => I16, + 32 => I32, + 64 => I64, + bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits) + } + } +} + +/// Endianness of the target, which must match cfg(target-endian). +#[derive(Copy, Clone)] +pub enum Endian { + Little, + Big +} + +/// Size of a type in bytes. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub struct Size { + raw: u64 +} + +impl Size { + pub fn from_bits(bits: u64) -> Size { + Size::from_bytes((bits + 7) / 8) + } + + pub fn from_bytes(bytes: u64) -> Size { + if bytes >= (1 << 61) { + bug!("Size::from_bytes: {} bytes in bits doesn't fit in u64", bytes) + } + Size { + raw: bytes + } + } + + pub fn bytes(self) -> u64 { + self.raw + } + + pub fn bits(self) -> u64 { + self.bytes() * 8 + } + + pub fn abi_align(self, align: Align) -> Size { + let mask = align.abi() - 1; + Size::from_bytes((self.bytes() + mask) & !mask) + } + + pub fn checked_add(self, offset: Size, dl: &TargetDataLayout) -> Option { + // Each Size is less than dl.obj_size_bound(), so the sum is + // also less than 1 << 62 (and therefore can't overflow). + let bytes = self.bytes() + offset.bytes(); + + if bytes < dl.obj_size_bound() { + Some(Size::from_bytes(bytes)) + } else { + None + } + } + + pub fn checked_mul(self, count: u64, dl: &TargetDataLayout) -> Option { + // Each Size is less than dl.obj_size_bound(), so the sum is + // also less than 1 << 62 (and therefore can't overflow). + match self.bytes().checked_mul(count) { + Some(bytes) if bytes < dl.obj_size_bound() => { + Some(Size::from_bytes(bytes)) + } + _ => None + } + } +} + +/// Alignment of a type in bytes, both ABI-mandated and preferred. +/// Since alignments are always powers of 2, we can pack both in one byte, +/// giving each a nibble (4 bits) for a maximum alignment of 2^15 = 32768. +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub struct Align { + raw: u8 +} + +impl Align { + pub fn from_bits(abi: u64, pref: u64) -> Result { + Align::from_bytes((abi + 7) / 8, (pref + 7) / 8) + } + + pub fn from_bytes(abi: u64, pref: u64) -> Result { + let pack = |align: u64| { + // Treat an alignment of 0 bytes like 1-byte alignment. + if align == 0 { + return Ok(0); + } + + let mut bytes = align; + let mut pow: u8 = 0; + while (bytes & 1) == 0 { + pow += 1; + bytes >>= 1; + } + if bytes != 1 { + Err(format!("`{}` is not a power of 2", align)) + } else if pow > 0x0f { + Err(format!("`{}` is too large", align)) + } else { + Ok(pow) + } + }; + + Ok(Align { + raw: pack(abi)? | (pack(pref)? << 4) + }) + } + + pub fn abi(self) -> u64 { + 1 << (self.raw & 0xf) + } + + pub fn pref(self) -> u64 { + 1 << (self.raw >> 4) + } + + pub fn min(self, other: Align) -> Align { + let abi = cmp::min(self.raw & 0x0f, other.raw & 0x0f); + let pref = cmp::min(self.raw & 0xf0, other.raw & 0xf0); + Align { + raw: abi | pref + } + } + + pub fn max(self, other: Align) -> Align { + let abi = cmp::max(self.raw & 0x0f, other.raw & 0x0f); + let pref = cmp::max(self.raw & 0xf0, other.raw & 0xf0); + Align { + raw: abi | pref + } + } +} + +/// Integers, also used for enum discriminants. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub enum Integer { + I1, + I8, + I16, + I32, + I64 +} + +impl Integer { + pub fn size(&self) -> Size { + match *self { + I1 => Size::from_bits(1), + I8 => Size::from_bytes(1), + I16 => Size::from_bytes(2), + I32 => Size::from_bytes(4), + I64 => Size::from_bytes(8), + } + } + + pub fn align(&self, dl: &TargetDataLayout)-> Align { + match *self { + I1 => dl.i1_align, + I8 => dl.i8_align, + I16 => dl.i16_align, + I32 => dl.i32_align, + I64 => dl.i64_align, + } + } + + pub fn to_ty<'a, 'tcx>(&self, tcx: &ty::TyCtxt<'a, 'tcx, 'tcx>, + signed: bool) -> Ty<'tcx> { + match (*self, signed) { + (I1, false) => tcx.types.u8, + (I8, false) => tcx.types.u8, + (I16, false) => tcx.types.u16, + (I32, false) => tcx.types.u32, + (I64, false) => tcx.types.u64, + (I1, true) => tcx.types.i8, + (I8, true) => tcx.types.i8, + (I16, true) => tcx.types.i16, + (I32, true) => tcx.types.i32, + (I64, true) => tcx.types.i64, + } + } + + /// Find the smallest Integer type which can represent the signed value. + pub fn fit_signed(x: i64) -> Integer { + match x { + -0x0000_0001...0x0000_0000 => I1, + -0x0000_0080...0x0000_007f => I8, + -0x0000_8000...0x0000_7fff => I16, + -0x8000_0000...0x7fff_ffff => I32, + _ => I64 + } + } + + /// Find the smallest Integer type which can represent the unsigned value. + pub fn fit_unsigned(x: u64) -> Integer { + match x { + 0...0x0000_0001 => I1, + 0...0x0000_00ff => I8, + 0...0x0000_ffff => I16, + 0...0xffff_ffff => I32, + _ => I64 + } + } + + /// Find the smallest integer with the given alignment. + pub fn for_abi_align(dl: &TargetDataLayout, align: Align) -> Option { + let wanted = align.abi(); + for &candidate in &[I8, I16, I32, I64] { + let ty = Int(candidate); + if wanted == ty.align(dl).abi() && wanted == ty.size(dl).bytes() { + return Some(candidate); + } + } + None + } + + /// Get the Integer type from an attr::IntType. + pub fn from_attr(dl: &TargetDataLayout, ity: attr::IntType) -> Integer { + match ity { + attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8, + attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16, + attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32, + attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64, + attr::SignedInt(IntTy::Is) | attr::UnsignedInt(UintTy::Us) => { + dl.ptr_sized_integer() + } + } + } + + /// Find the appropriate Integer type and signedness for the given + /// signed discriminant range and #[repr] attribute. + /// N.B.: u64 values above i64::MAX will be treated as signed, but + /// that shouldn't affect anything, other than maybe debuginfo. + pub fn repr_discr(tcx: TyCtxt, ty: Ty, hint: attr::ReprAttr, min: i64, max: i64) + -> (Integer, bool) { + // Theoretically, negative values could be larger in unsigned representation + // than the unsigned representation of the signed minimum. However, if there + // are any negative values, the only valid unsigned representation is u64 + // which can fit all i64 values, so the result remains unaffected. + let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u64, max as u64)); + let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max)); + + let at_least = match hint { + attr::ReprInt(ity) => { + let discr = Integer::from_attr(&tcx.data_layout, ity); + let fit = if ity.is_signed() { signed_fit } else { unsigned_fit }; + if discr < fit { + bug!("Integer::repr_discr: `#[repr]` hint too small for \ + discriminant range of enum `{}", ty) + } + return (discr, ity.is_signed()); + } + attr::ReprExtern => { + match &tcx.sess.target.target.arch[..] { + // WARNING: the ARM EABI has two variants; the one corresponding + // to `at_least == I32` appears to be used on Linux and NetBSD, + // but some systems may use the variant corresponding to no + // lower bound. However, we don't run on those yet...? + "arm" => I32, + _ => I32, + } + } + attr::ReprAny => I8, + attr::ReprPacked => { + bug!("Integer::repr_discr: found #[repr(packed)] on enum `{}", ty); + } + attr::ReprSimd => { + bug!("Integer::repr_discr: found #[repr(simd)] on enum `{}", ty); + } + }; + + // If there are no negative values, we can use the unsigned fit. + if min >= 0 { + (cmp::max(unsigned_fit, at_least), false) + } else { + (cmp::max(signed_fit, at_least), true) + } + } +} + +/// Fundamental unit of memory access and layout. +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub enum Primitive { + Int(Integer), + F32, + F64, + Pointer +} + +impl Primitive { + pub fn size(self, dl: &TargetDataLayout) -> Size { + match self { + Int(I1) | Int(I8) => Size::from_bits(8), + Int(I16) => Size::from_bits(16), + Int(I32) | F32 => Size::from_bits(32), + Int(I64) | F64 => Size::from_bits(64), + Pointer => dl.pointer_size + } + } + + pub fn align(self, dl: &TargetDataLayout) -> Align { + match self { + Int(I1) => dl.i1_align, + Int(I8) => dl.i8_align, + Int(I16) => dl.i16_align, + Int(I32) => dl.i32_align, + Int(I64) => dl.i64_align, + F32 => dl.f32_align, + F64 => dl.f64_align, + Pointer => dl.pointer_align + } + } +} + +/// Path through fields of nested structures. +// FIXME(eddyb) use small vector optimization for the common case. +pub type FieldPath = Vec; + +/// A structure, a product type in ADT terms. +#[derive(PartialEq, Eq, Hash, Debug)] +pub struct Struct { + pub align: Align, + + /// If true, no alignment padding is used. + pub packed: bool, + + /// If true, the size is exact, otherwise it's only a lower bound. + pub sized: bool, + + /// Offsets for the first byte of each field. + /// FIXME(eddyb) use small vector optimization for the common case. + pub offsets: Vec, + + pub min_size: Size, +} + +impl<'a, 'gcx, 'tcx> Struct { + pub fn new(dl: &TargetDataLayout, packed: bool) -> Struct { + Struct { + align: if packed { dl.i8_align } else { dl.aggregate_align }, + packed: packed, + sized: true, + offsets: vec![], + min_size: Size::from_bytes(0), + } + } + + /// Extend the Struct with more fields. + pub fn extend(&mut self, dl: &TargetDataLayout, + fields: I, + scapegoat: Ty<'gcx>) + -> Result<(), LayoutError<'gcx>> + where I: Iterator>> { + self.offsets.reserve(fields.size_hint().0); + + let mut offset = self.min_size; + + for field in fields { + if !self.sized { + bug!("Struct::extend: field #{} of `{}` comes after unsized field", + self.offsets.len(), scapegoat); + } + + let field = field?; + if field.is_unsized() { + self.sized = false; + } + + // Invariant: offset < dl.obj_size_bound() <= 1<<61 + if !self.packed { + let align = field.align(dl); + self.align = self.align.max(align); + offset = offset.abi_align(align); + } + + self.offsets.push(offset); + + debug!("Struct::extend offset: {:?} field: {:?} {:?}", offset, field, field.size(dl)); + + offset = offset.checked_add(field.size(dl), dl) + .map_or(Err(LayoutError::SizeOverflow(scapegoat)), Ok)?; + } + + debug!("Struct::extend min_size: {:?}", offset); + + self.min_size = offset; + + Ok(()) + } + + /// Get the size without trailing alignment padding. + + /// Get the size with trailing aligment padding. + pub fn stride(&self) -> Size { + self.min_size.abi_align(self.align) + } + + /// Determine whether a structure would be zero-sized, given its fields. + pub fn would_be_zero_sized(dl: &TargetDataLayout, fields: I) + -> Result> + where I: Iterator>> { + for field in fields { + let field = field?; + if field.is_unsized() || field.size(dl).bytes() > 0 { + return Ok(false); + } + } + Ok(true) + } + + /// Find the path leading to a non-zero leaf field, starting from + /// the given type and recursing through aggregates. + // FIXME(eddyb) track value ranges and traverse already optimized enums. + pub fn non_zero_field_in_type(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + ty: Ty<'gcx>) + -> Result, LayoutError<'gcx>> { + let tcx = infcx.tcx.global_tcx(); + match (ty.layout(infcx)?, &ty.sty) { + (&Scalar { non_zero: true, .. }, _) | + (&CEnum { non_zero: true, .. }, _) => Ok(Some(vec![])), + (&FatPointer { non_zero: true, .. }, _) => { + Ok(Some(vec![FAT_PTR_ADDR as u32])) + } + + // Is this the NonZero lang item wrapping a pointer or integer type? + (&Univariant { non_zero: true, .. }, &ty::TyAdt(def, substs)) => { + let fields = &def.struct_variant().fields; + assert_eq!(fields.len(), 1); + match *fields[0].ty(tcx, substs).layout(infcx)? { + // FIXME(eddyb) also allow floating-point types here. + Scalar { value: Int(_), non_zero: false } | + Scalar { value: Pointer, non_zero: false } => { + Ok(Some(vec![0])) + } + FatPointer { non_zero: false, .. } => { + Ok(Some(vec![FAT_PTR_ADDR as u32, 0])) + } + _ => Ok(None) + } + } + + // Perhaps one of the fields of this struct is non-zero + // let's recurse and find out + (_, &ty::TyAdt(def, substs)) if def.is_struct() => { + Struct::non_zero_field_path(infcx, def.struct_variant().fields + .iter().map(|field| { + field.ty(tcx, substs) + })) + } + + // Perhaps one of the upvars of this closure is non-zero + // Let's recurse and find out! + (_, &ty::TyClosure(def_id, ref substs)) => { + Struct::non_zero_field_path(infcx, substs.upvar_tys(def_id, tcx)) + } + // Can we use one of the fields in this tuple? + (_, &ty::TyTuple(tys)) => { + Struct::non_zero_field_path(infcx, tys.iter().cloned()) + } + + // Is this a fixed-size array of something non-zero + // with at least one element? + (_, &ty::TyArray(ety, d)) if d > 0 => { + Struct::non_zero_field_path(infcx, Some(ety).into_iter()) + } + + (_, &ty::TyProjection(_)) | (_, &ty::TyAnon(..)) => { + let normalized = normalize_associated_type(infcx, ty); + if ty == normalized { + return Ok(None); + } + return Struct::non_zero_field_in_type(infcx, normalized); + } + + // Anything else is not a non-zero type. + _ => Ok(None) + } + } + + /// Find the path leading to a non-zero leaf field, starting from + /// the given set of fields and recursing through aggregates. + pub fn non_zero_field_path(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + fields: I) + -> Result, LayoutError<'gcx>> + where I: Iterator> { + for (i, ty) in fields.enumerate() { + if let Some(mut path) = Struct::non_zero_field_in_type(infcx, ty)? { + path.push(i as u32); + return Ok(Some(path)); + } + } + Ok(None) + } +} + +/// An untagged union. +#[derive(PartialEq, Eq, Hash, Debug)] +pub struct Union { + pub align: Align, + + pub min_size: Size, + + /// If true, no alignment padding is used. + pub packed: bool, +} + +impl<'a, 'gcx, 'tcx> Union { + pub fn new(dl: &TargetDataLayout, packed: bool) -> Union { + Union { + align: if packed { dl.i8_align } else { dl.aggregate_align }, + min_size: Size::from_bytes(0), + packed: packed, + } + } + + /// Extend the Struct with more fields. + pub fn extend(&mut self, dl: &TargetDataLayout, + fields: I, + scapegoat: Ty<'gcx>) + -> Result<(), LayoutError<'gcx>> + where I: Iterator>> { + for (index, field) in fields.enumerate() { + let field = field?; + if field.is_unsized() { + bug!("Union::extend: field #{} of `{}` is unsized", + index, scapegoat); + } + + debug!("Union::extend field: {:?} {:?}", field, field.size(dl)); + + if !self.packed { + self.align = self.align.max(field.align(dl)); + } + self.min_size = cmp::max(self.min_size, field.size(dl)); + } + + debug!("Union::extend min-size: {:?}", self.min_size); + + Ok(()) + } + + /// Get the size with trailing aligment padding. + pub fn stride(&self) -> Size { + self.min_size.abi_align(self.align) + } +} + +/// The first half of a fat pointer. +/// - For a trait object, this is the address of the box. +/// - For a slice, this is the base address. +pub const FAT_PTR_ADDR: usize = 0; + +/// The second half of a fat pointer. +/// - For a trait object, this is the address of the vtable. +/// - For a slice, this is the length. +pub const FAT_PTR_EXTRA: usize = 1; + +/// Type layout, from which size and alignment can be cheaply computed. +/// For ADTs, it also includes field placement and enum optimizations. +/// NOTE: Because Layout is interned, redundant information should be +/// kept to a minimum, e.g. it includes no sub-component Ty or Layout. +#[derive(Debug, PartialEq, Eq, Hash)] +pub enum Layout { + /// TyBool, TyChar, TyInt, TyUint, TyFloat, TyRawPtr, TyRef or TyFnPtr. + Scalar { + value: Primitive, + // If true, the value cannot represent a bit pattern of all zeroes. + non_zero: bool + }, + + /// SIMD vectors, from structs marked with #[repr(simd)]. + Vector { + element: Primitive, + count: u64 + }, + + /// TyArray, TySlice or TyStr. + Array { + /// If true, the size is exact, otherwise it's only a lower bound. + sized: bool, + align: Align, + size: Size + }, + + /// TyRawPtr or TyRef with a !Sized pointee. + FatPointer { + metadata: Primitive, + // If true, the pointer cannot be null. + non_zero: bool + }, + + // Remaining variants are all ADTs such as structs, enums or tuples. + + /// C-like enums; basically an integer. + CEnum { + discr: Integer, + signed: bool, + non_zero: bool, + // Inclusive discriminant range. + // If min > max, it represents min...u64::MAX followed by 0...max. + // FIXME(eddyb) always use the shortest range, e.g. by finding + // the largest space between two consecutive discriminants and + // taking everything else as the (shortest) discriminant range. + min: u64, + max: u64 + }, + + /// Single-case enums, and structs/tuples. + Univariant { + variant: Struct, + // If true, the structure is NonZero. + // FIXME(eddyb) use a newtype Layout kind for this. + non_zero: bool + }, + + /// Untagged unions. + UntaggedUnion { + variants: Union, + }, + + /// General-case enums: for each case there is a struct, and they + /// all start with a field for the discriminant. + General { + discr: Integer, + variants: Vec, + size: Size, + align: Align + }, + + /// Two cases distinguished by a nullable pointer: the case with discriminant + /// `nndiscr` must have single field which is known to be nonnull due to its type. + /// The other case is known to be zero sized. Hence we represent the enum + /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant, + /// otherwise it indicates the other case. + /// + /// For example, `std::option::Option` instantiated at a safe pointer type + /// is represented such that `None` is a null pointer and `Some` is the + /// identity function. + RawNullablePointer { + nndiscr: u64, + value: Primitive + }, + + /// Two cases distinguished by a nullable pointer: the case with discriminant + /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th + /// field is known to be nonnull due to its type; if that field is null, then + /// it represents the other case, which is known to be zero sized. + StructWrappedNullablePointer { + nndiscr: u64, + nonnull: Struct, + // N.B. There is a 0 at the start, for LLVM GEP through a pointer. + discrfield: FieldPath + } +} + +#[derive(Copy, Clone, Debug)] +pub enum LayoutError<'tcx> { + Unknown(Ty<'tcx>), + SizeOverflow(Ty<'tcx>) +} + +impl<'tcx> fmt::Display for LayoutError<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + LayoutError::Unknown(ty) => { + write!(f, "the type `{:?}` has an unknown layout", ty) + } + LayoutError::SizeOverflow(ty) => { + write!(f, "the type `{:?}` is too big for the current architecture", ty) + } + } + } +} + +/// Helper function for normalizing associated types in an inference context. +fn normalize_associated_type<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + ty: Ty<'gcx>) + -> Ty<'gcx> { + if !ty.has_projection_types() { + return ty; + } + + let mut selcx = traits::SelectionContext::new(infcx); + let cause = traits::ObligationCause::dummy(); + let traits::Normalized { value: result, obligations } = + traits::normalize(&mut selcx, cause, &ty); + + let mut fulfill_cx = traits::FulfillmentContext::new(); + + for obligation in obligations { + fulfill_cx.register_predicate_obligation(infcx, obligation); + } + + infcx.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &result) +} + +impl<'a, 'gcx, 'tcx> Layout { + pub fn compute_uncached(ty: Ty<'gcx>, + infcx: &InferCtxt<'a, 'gcx, 'tcx>) + -> Result<&'gcx Layout, LayoutError<'gcx>> { + let tcx = infcx.tcx.global_tcx(); + let success = |layout| Ok(tcx.intern_layout(layout)); + let dl = &tcx.data_layout; + assert!(!ty.has_infer_types()); + + let layout = match ty.sty { + // Basic scalars. + ty::TyBool => Scalar { value: Int(I1), non_zero: false }, + ty::TyChar => Scalar { value: Int(I32), non_zero: false }, + ty::TyInt(ity) => { + Scalar { + value: Int(Integer::from_attr(dl, attr::SignedInt(ity))), + non_zero: false + } + } + ty::TyUint(ity) => { + Scalar { + value: Int(Integer::from_attr(dl, attr::UnsignedInt(ity))), + non_zero: false + } + } + ty::TyFloat(FloatTy::F32) => Scalar { value: F32, non_zero: false }, + ty::TyFloat(FloatTy::F64) => Scalar { value: F64, non_zero: false }, + ty::TyFnPtr(_) => Scalar { value: Pointer, non_zero: true }, + + // The never type. + ty::TyNever => Univariant { variant: Struct::new(dl, false), non_zero: false }, + + // Potentially-fat pointers. + ty::TyBox(pointee) | + ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) | + ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => { + let non_zero = !ty.is_unsafe_ptr(); + let pointee = normalize_associated_type(infcx, pointee); + if pointee.is_sized(tcx, &infcx.parameter_environment, DUMMY_SP) { + Scalar { value: Pointer, non_zero: non_zero } + } else { + let unsized_part = tcx.struct_tail(pointee); + let meta = match unsized_part.sty { + ty::TySlice(_) | ty::TyStr => { + Int(dl.ptr_sized_integer()) + } + ty::TyDynamic(..) => Pointer, + _ => return Err(LayoutError::Unknown(unsized_part)) + }; + FatPointer { metadata: meta, non_zero: non_zero } + } + } + + // Arrays and slices. + ty::TyArray(element, count) => { + let element = element.layout(infcx)?; + Array { + sized: true, + align: element.align(dl), + size: element.size(dl).checked_mul(count as u64, dl) + .map_or(Err(LayoutError::SizeOverflow(ty)), Ok)? + } + } + ty::TySlice(element) => { + Array { + sized: false, + align: element.layout(infcx)?.align(dl), + size: Size::from_bytes(0) + } + } + ty::TyStr => { + Array { + sized: false, + align: dl.i8_align, + size: Size::from_bytes(0) + } + } + + // Odd unit types. + ty::TyFnDef(..) => { + Univariant { + variant: Struct::new(dl, false), + non_zero: false + } + } + ty::TyDynamic(..) => { + let mut unit = Struct::new(dl, false); + unit.sized = false; + Univariant { variant: unit, non_zero: false } + } + + // Tuples and closures. + ty::TyClosure(def_id, ref substs) => { + let mut st = Struct::new(dl, false); + let tys = substs.upvar_tys(def_id, tcx); + st.extend(dl, tys.map(|ty| ty.layout(infcx)), ty)?; + Univariant { variant: st, non_zero: false } + } + + ty::TyTuple(tys) => { + let mut st = Struct::new(dl, false); + st.extend(dl, tys.iter().map(|ty| ty.layout(infcx)), ty)?; + Univariant { variant: st, non_zero: false } + } + + // SIMD vector types. + ty::TyAdt(def, ..) if def.is_simd() => { + let element = ty.simd_type(tcx); + match *element.layout(infcx)? { + Scalar { value, .. } => { + return success(Vector { + element: value, + count: ty.simd_size(tcx) as u64 + }); + } + _ => { + tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \ + a non-machine element type `{}`", + ty, element)); + } + } + } + + // ADTs. + ty::TyAdt(def, substs) => { + let hint = *tcx.lookup_repr_hints(def.did).get(0) + .unwrap_or(&attr::ReprAny); + + if def.variants.is_empty() { + // Uninhabitable; represent as unit + // (Typechecking will reject discriminant-sizing attrs.) + assert_eq!(hint, attr::ReprAny); + + return success(Univariant { + variant: Struct::new(dl, false), + non_zero: false + }); + } + + if def.is_enum() && def.variants.iter().all(|v| v.fields.is_empty()) { + // All bodies empty -> intlike + let (mut min, mut max, mut non_zero) = (i64::MAX, i64::MIN, true); + for v in &def.variants { + let x = v.disr_val.to_u64_unchecked() as i64; + if x == 0 { non_zero = false; } + if x < min { min = x; } + if x > max { max = x; } + } + + let (discr, signed) = Integer::repr_discr(tcx, ty, hint, min, max); + return success(CEnum { + discr: discr, + signed: signed, + non_zero: non_zero, + min: min as u64, + max: max as u64 + }); + } + + if !def.is_enum() || def.variants.len() == 1 && hint == attr::ReprAny { + // Struct, or union, or univariant enum equivalent to a struct. + // (Typechecking will reject discriminant-sizing attrs.) + + let fields = def.variants[0].fields.iter().map(|field| { + field.ty(tcx, substs).layout(infcx) + }); + let packed = tcx.lookup_packed(def.did); + let layout = if def.is_union() { + let mut un = Union::new(dl, packed); + un.extend(dl, fields, ty)?; + UntaggedUnion { variants: un } + } else { + let mut st = Struct::new(dl, packed); + st.extend(dl, fields, ty)?; + let non_zero = Some(def.did) == tcx.lang_items.non_zero(); + Univariant { variant: st, non_zero: non_zero } + }; + return success(layout); + } + + // Since there's at least one + // non-empty body, explicit discriminants should have + // been rejected by a checker before this point. + for (i, v) in def.variants.iter().enumerate() { + if i as u64 != v.disr_val.to_u64_unchecked() { + bug!("non-C-like enum {} with specified discriminants", + tcx.item_path_str(def.did)); + } + } + + // Cache the substituted and normalized variant field types. + let variants = def.variants.iter().map(|v| { + v.fields.iter().map(|field| field.ty(tcx, substs)).collect::>() + }).collect::>(); + + if variants.len() == 2 && hint == attr::ReprAny { + // Nullable pointer optimization + for discr in 0..2 { + let other_fields = variants[1 - discr].iter().map(|ty| { + ty.layout(infcx) + }); + if !Struct::would_be_zero_sized(dl, other_fields)? { + continue; + } + let path = Struct::non_zero_field_path(infcx, + variants[discr].iter().cloned())?; + let mut path = if let Some(p) = path { p } else { continue }; + + // FIXME(eddyb) should take advantage of a newtype. + if path == &[0] && variants[discr].len() == 1 { + let value = match *variants[discr][0].layout(infcx)? { + Scalar { value, .. } => value, + CEnum { discr, .. } => Int(discr), + _ => bug!("Layout::compute: `{}`'s non-zero \ + `{}` field not scalar?!", + ty, variants[discr][0]) + }; + return success(RawNullablePointer { + nndiscr: discr as u64, + value: value, + }); + } + + path.push(0); // For GEP through a pointer. + path.reverse(); + let mut st = Struct::new(dl, false); + st.extend(dl, variants[discr].iter().map(|ty| ty.layout(infcx)), ty)?; + return success(StructWrappedNullablePointer { + nndiscr: discr as u64, + nonnull: st, + discrfield: path + }); + } + } + + // The general case. + let discr_max = (variants.len() - 1) as i64; + assert!(discr_max >= 0); + let (min_ity, _) = Integer::repr_discr(tcx, ty, hint, 0, discr_max); + + let mut align = dl.aggregate_align; + let mut size = Size::from_bytes(0); + + // We're interested in the smallest alignment, so start large. + let mut start_align = Align::from_bytes(256, 256).unwrap(); + + // Create the set of structs that represent each variant + // Use the minimum integer type we figured out above + let discr = Some(Scalar { value: Int(min_ity), non_zero: false }); + let mut variants = variants.into_iter().map(|fields| { + let mut found_start = false; + let fields = fields.into_iter().map(|field| { + let field = field.layout(infcx)?; + if !found_start { + // Find the first field we can't move later + // to make room for a larger discriminant. + let field_align = field.align(dl); + if field.size(dl).bytes() != 0 || field_align.abi() != 1 { + start_align = start_align.min(field_align); + found_start = true; + } + } + Ok(field) + }); + let mut st = Struct::new(dl, false); + st.extend(dl, discr.iter().map(Ok).chain(fields), ty)?; + size = cmp::max(size, st.min_size); + align = align.max(st.align); + Ok(st) + }).collect::, _>>()?; + + // Align the maximum variant size to the largest alignment. + size = size.abi_align(align); + + if size.bytes() >= dl.obj_size_bound() { + return Err(LayoutError::SizeOverflow(ty)); + } + + // Check to see if we should use a different type for the + // discriminant. We can safely use a type with the same size + // as the alignment of the first field of each variant. + // We increase the size of the discriminant to avoid LLVM copying + // padding when it doesn't need to. This normally causes unaligned + // load/stores and excessive memcpy/memset operations. By using a + // bigger integer size, LLVM can be sure about it's contents and + // won't be so conservative. + + // Use the initial field alignment + let mut ity = Integer::for_abi_align(dl, start_align).unwrap_or(min_ity); + + // If the alignment is not larger than the chosen discriminant size, + // don't use the alignment as the final size. + if ity <= min_ity { + ity = min_ity; + } else { + // Patch up the variants' first few fields. + let old_ity_size = Int(min_ity).size(dl); + let new_ity_size = Int(ity).size(dl); + for variant in &mut variants { + for offset in &mut variant.offsets[1..] { + if *offset > old_ity_size { + break; + } + *offset = new_ity_size; + } + // We might be making the struct larger. + if variant.min_size <= old_ity_size { + variant.min_size = new_ity_size; + } + } + } + + General { + discr: ity, + variants: variants, + size: size, + align: align + } + } + + // Types with no meaningful known layout. + ty::TyProjection(_) | ty::TyAnon(..) => { + let normalized = normalize_associated_type(infcx, ty); + if ty == normalized { + return Err(LayoutError::Unknown(ty)); + } + return normalized.layout(infcx); + } + ty::TyParam(_) => { + return Err(LayoutError::Unknown(ty)); + } + ty::TyInfer(_) | ty::TyError => { + bug!("Layout::compute: unexpected type `{}`", ty) + } + }; + + success(layout) + } + + /// Returns true if the layout corresponds to an unsized type. + pub fn is_unsized(&self) -> bool { + match *self { + Scalar {..} | Vector {..} | FatPointer {..} | + CEnum {..} | UntaggedUnion {..} | General {..} | + RawNullablePointer {..} | + StructWrappedNullablePointer {..} => false, + + Array { sized, .. } | + Univariant { variant: Struct { sized, .. }, .. } => !sized + } + } + + pub fn size(&self, dl: &TargetDataLayout) -> Size { + match *self { + Scalar { value, .. } | RawNullablePointer { value, .. } => { + value.size(dl) + } + + Vector { element, count } => { + let elem_size = element.size(dl); + let vec_size = match elem_size.checked_mul(count, dl) { + Some(size) => size, + None => bug!("Layout::size({:?}): {} * {} overflowed", + self, elem_size.bytes(), count) + }; + vec_size.abi_align(self.align(dl)) + } + + FatPointer { metadata, .. } => { + // Effectively a (ptr, meta) tuple. + Pointer.size(dl).abi_align(metadata.align(dl)) + .checked_add(metadata.size(dl), dl).unwrap() + .abi_align(self.align(dl)) + } + + CEnum { discr, .. } => Int(discr).size(dl), + Array { size, .. } | General { size, .. } => size, + UntaggedUnion { ref variants } => variants.stride(), + + Univariant { ref variant, .. } | + StructWrappedNullablePointer { nonnull: ref variant, .. } => { + variant.stride() + } + } + } + + pub fn align(&self, dl: &TargetDataLayout) -> Align { + match *self { + Scalar { value, .. } | RawNullablePointer { value, .. } => { + value.align(dl) + } + + Vector { element, count } => { + let elem_size = element.size(dl); + let vec_size = match elem_size.checked_mul(count, dl) { + Some(size) => size, + None => bug!("Layout::align({:?}): {} * {} overflowed", + self, elem_size.bytes(), count) + }; + for &(size, align) in &dl.vector_align { + if size == vec_size { + return align; + } + } + // Default to natural alignment, which is what LLVM does. + // That is, use the size, rounded up to a power of 2. + let align = vec_size.bytes().next_power_of_two(); + Align::from_bytes(align, align).unwrap() + } + + FatPointer { metadata, .. } => { + // Effectively a (ptr, meta) tuple. + Pointer.align(dl).max(metadata.align(dl)) + } + + CEnum { discr, .. } => Int(discr).align(dl), + Array { align, .. } | General { align, .. } => align, + UntaggedUnion { ref variants } => variants.align, + + Univariant { ref variant, .. } | + StructWrappedNullablePointer { nonnull: ref variant, .. } => { + variant.align + } + } + } +} + +/// Type size "skeleton", i.e. the only information determining a type's size. +/// While this is conservative, (aside from constant sizes, only pointers, +/// newtypes thereof and null pointer optimized enums are allowed), it is +/// enough to statically check common usecases of transmute. +#[derive(Copy, Clone, Debug)] +pub enum SizeSkeleton<'tcx> { + /// Any statically computable Layout. + Known(Size), + + /// A potentially-fat pointer. + Pointer { + // If true, this pointer is never null. + non_zero: bool, + // The type which determines the unsized metadata, if any, + // of this pointer. Either a type parameter or a projection + // depending on one, with regions erased. + tail: Ty<'tcx> + } +} + +impl<'a, 'gcx, 'tcx> SizeSkeleton<'gcx> { + pub fn compute(ty: Ty<'gcx>, infcx: &InferCtxt<'a, 'gcx, 'tcx>) + -> Result, LayoutError<'gcx>> { + let tcx = infcx.tcx.global_tcx(); + assert!(!ty.has_infer_types()); + + // First try computing a static layout. + let err = match ty.layout(infcx) { + Ok(layout) => { + return Ok(SizeSkeleton::Known(layout.size(&tcx.data_layout))); + } + Err(err) => err + }; + + match ty.sty { + ty::TyBox(pointee) | + ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) | + ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => { + let non_zero = !ty.is_unsafe_ptr(); + let tail = tcx.struct_tail(pointee); + match tail.sty { + ty::TyParam(_) | ty::TyProjection(_) => { + assert!(tail.has_param_types() || tail.has_self_ty()); + Ok(SizeSkeleton::Pointer { + non_zero: non_zero, + tail: tcx.erase_regions(&tail) + }) + } + _ => { + bug!("SizeSkeleton::compute({}): layout errored ({}), yet \ + tail `{}` is not a type parameter or a projection", + ty, err, tail) + } + } + } + + ty::TyAdt(def, substs) => { + // Only newtypes and enums w/ nullable pointer optimization. + if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 { + return Err(err); + } + + // Get a zero-sized variant or a pointer newtype. + let zero_or_ptr_variant = |i: usize| { + let fields = def.variants[i].fields.iter().map(|field| { + SizeSkeleton::compute(field.ty(tcx, substs), infcx) + }); + let mut ptr = None; + for field in fields { + let field = field?; + match field { + SizeSkeleton::Known(size) => { + if size.bytes() > 0 { + return Err(err); + } + } + SizeSkeleton::Pointer {..} => { + if ptr.is_some() { + return Err(err); + } + ptr = Some(field); + } + } + } + Ok(ptr) + }; + + let v0 = zero_or_ptr_variant(0)?; + // Newtype. + if def.variants.len() == 1 { + if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 { + return Ok(SizeSkeleton::Pointer { + non_zero: non_zero || + Some(def.did) == tcx.lang_items.non_zero(), + tail: tail + }); + } else { + return Err(err); + } + } + + let v1 = zero_or_ptr_variant(1)?; + // Nullable pointer enum optimization. + match (v0, v1) { + (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) | + (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => { + Ok(SizeSkeleton::Pointer { + non_zero: false, + tail: tail + }) + } + _ => Err(err) + } + } + + ty::TyProjection(_) | ty::TyAnon(..) => { + let normalized = normalize_associated_type(infcx, ty); + if ty == normalized { + Err(err) + } else { + SizeSkeleton::compute(normalized, infcx) + } + } + + _ => Err(err) + } + } + + pub fn same_size(self, other: SizeSkeleton) -> bool { + match (self, other) { + (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b, + (SizeSkeleton::Pointer { tail: a, .. }, + SizeSkeleton::Pointer { tail: b, .. }) => a == b, + _ => false + } + } +} diff --git a/src/librustc/ty/maps.rs b/src/librustc/ty/maps.rs new file mode 100644 index 0000000000000..42b3544421f8b --- /dev/null +++ b/src/librustc/ty/maps.rs @@ -0,0 +1,48 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use dep_graph::{DepNode, DepTrackingMapConfig}; +use hir::def_id::DefId; +use mir; +use ty::{self, Ty}; + +use std::cell::RefCell; +use std::marker::PhantomData; +use std::rc::Rc; +use syntax::attr; + +macro_rules! dep_map_ty { + ($ty_name:ident : $node_name:ident ($key:ty) -> $value:ty) => { + pub struct $ty_name<'tcx> { + data: PhantomData<&'tcx ()> + } + + impl<'tcx> DepTrackingMapConfig for $ty_name<'tcx> { + type Key = $key; + type Value = $value; + fn to_dep_node(key: &$key) -> DepNode { DepNode::$node_name(*key) } + } + } +} + +dep_map_ty! { AssociatedItems: AssociatedItems(DefId) -> ty::AssociatedItem } +dep_map_ty! { Types: ItemSignature(DefId) -> Ty<'tcx> } +dep_map_ty! { Generics: ItemSignature(DefId) -> &'tcx ty::Generics<'tcx> } +dep_map_ty! { Predicates: ItemSignature(DefId) -> ty::GenericPredicates<'tcx> } +dep_map_ty! { SuperPredicates: ItemSignature(DefId) -> ty::GenericPredicates<'tcx> } +dep_map_ty! { AssociatedItemDefIds: AssociatedItemDefIds(DefId) -> Rc> } +dep_map_ty! { ImplTraitRefs: ItemSignature(DefId) -> Option> } +dep_map_ty! { TraitDefs: ItemSignature(DefId) -> &'tcx ty::TraitDef } +dep_map_ty! { AdtDefs: ItemSignature(DefId) -> &'tcx ty::AdtDef } +dep_map_ty! { AdtSizedConstraint: SizedConstraint(DefId) -> Ty<'tcx> } +dep_map_ty! { ItemVariances: ItemSignature(DefId) -> Rc> } +dep_map_ty! { InherentImpls: InherentImpls(DefId) -> Vec } +dep_map_ty! { ReprHints: ReprHints(DefId) -> Rc> } +dep_map_ty! { Mir: Mir(DefId) -> &'tcx RefCell> } diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs new file mode 100644 index 0000000000000..df12c252907a5 --- /dev/null +++ b/src/librustc/ty/mod.rs @@ -0,0 +1,2705 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub use self::Variance::*; +pub use self::DtorKind::*; +pub use self::AssociatedItemContainer::*; +pub use self::BorrowKind::*; +pub use self::IntVarValue::*; +pub use self::LvaluePreference::*; +pub use self::fold::TypeFoldable; + +use dep_graph::{self, DepNode}; +use hir::map as ast_map; +use middle; +use hir::def::{Def, CtorKind, ExportMap}; +use hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE}; +use middle::lang_items::{FnTraitLangItem, FnMutTraitLangItem, FnOnceTraitLangItem}; +use middle::region::{CodeExtent, ROOT_CODE_EXTENT}; +use mir::Mir; +use traits; +use ty; +use ty::subst::{Subst, Substs}; +use ty::walk::TypeWalker; +use util::common::MemoizationMap; +use util::nodemap::{NodeSet, NodeMap, FxHashMap, FxHashSet}; + +use serialize::{self, Encodable, Encoder}; +use std::borrow::Cow; +use std::cell::{Cell, RefCell, Ref}; +use std::hash::{Hash, Hasher}; +use std::iter; +use std::ops::Deref; +use std::rc::Rc; +use std::slice; +use std::vec::IntoIter; +use std::mem; +use syntax::ast::{self, Name, NodeId}; +use syntax::attr; +use syntax::symbol::{Symbol, InternedString}; +use syntax_pos::{DUMMY_SP, Span}; + +use rustc_const_math::ConstInt; +use rustc_data_structures::accumulate_vec::IntoIter as AccIntoIter; + +use hir; +use hir::itemlikevisit::ItemLikeVisitor; + +pub use self::sty::{Binder, DebruijnIndex}; +pub use self::sty::{BareFnTy, FnSig, PolyFnSig}; +pub use self::sty::{ClosureTy, InferTy, ParamTy, ProjectionTy, ExistentialPredicate}; +pub use self::sty::{ClosureSubsts, TypeAndMut}; +pub use self::sty::{TraitRef, TypeVariants, PolyTraitRef}; +pub use self::sty::{ExistentialTraitRef, PolyExistentialTraitRef}; +pub use self::sty::{ExistentialProjection, PolyExistentialProjection}; +pub use self::sty::{BoundRegion, EarlyBoundRegion, FreeRegion, Region}; +pub use self::sty::Issue32330; +pub use self::sty::{TyVid, IntVid, FloatVid, RegionVid, SkolemizedRegionVid}; +pub use self::sty::BoundRegion::*; +pub use self::sty::InferTy::*; +pub use self::sty::Region::*; +pub use self::sty::TypeVariants::*; + +pub use self::contents::TypeContents; +pub use self::context::{TyCtxt, tls}; +pub use self::context::{CtxtArenas, Lift, Tables}; + +pub use self::trait_def::{TraitDef, TraitFlags}; + +pub mod adjustment; +pub mod cast; +pub mod error; +pub mod fast_reject; +pub mod fold; +pub mod item_path; +pub mod layout; +pub mod _match; +pub mod maps; +pub mod outlives; +pub mod relate; +pub mod subst; +pub mod trait_def; +pub mod walk; +pub mod wf; +pub mod util; + +mod contents; +mod context; +mod flags; +mod structural_impls; +mod sty; + +pub type Disr = ConstInt; + +// Data types + +/// The complete set of all analyses described in this module. This is +/// produced by the driver and fed to trans and later passes. +#[derive(Clone)] +pub struct CrateAnalysis<'tcx> { + pub export_map: ExportMap, + pub access_levels: middle::privacy::AccessLevels, + pub reachable: NodeSet, + pub name: String, + pub glob_map: Option, + pub hir_ty_to_ty: NodeMap>, +} + +#[derive(Copy, Clone)] +pub enum DtorKind { + NoDtor, + TraitDtor +} + +impl DtorKind { + pub fn is_present(&self) -> bool { + match *self { + TraitDtor => true, + _ => false + } + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum AssociatedItemContainer { + TraitContainer(DefId), + ImplContainer(DefId), +} + +impl AssociatedItemContainer { + pub fn id(&self) -> DefId { + match *self { + TraitContainer(id) => id, + ImplContainer(id) => id, + } + } +} + +/// The "header" of an impl is everything outside the body: a Self type, a trait +/// ref (in the case of a trait impl), and a set of predicates (from the +/// bounds/where clauses). +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub struct ImplHeader<'tcx> { + pub impl_def_id: DefId, + pub self_ty: Ty<'tcx>, + pub trait_ref: Option>, + pub predicates: Vec>, +} + +impl<'a, 'gcx, 'tcx> ImplHeader<'tcx> { + pub fn with_fresh_ty_vars(selcx: &mut traits::SelectionContext<'a, 'gcx, 'tcx>, + impl_def_id: DefId) + -> ImplHeader<'tcx> + { + let tcx = selcx.tcx(); + let impl_substs = selcx.infcx().fresh_substs_for_item(DUMMY_SP, impl_def_id); + + let header = ImplHeader { + impl_def_id: impl_def_id, + self_ty: tcx.item_type(impl_def_id), + trait_ref: tcx.impl_trait_ref(impl_def_id), + predicates: tcx.item_predicates(impl_def_id).predicates + }.subst(tcx, impl_substs); + + let traits::Normalized { value: mut header, obligations } = + traits::normalize(selcx, traits::ObligationCause::dummy(), &header); + + header.predicates.extend(obligations.into_iter().map(|o| o.predicate)); + header + } +} + +#[derive(Copy, Clone, Debug)] +pub struct AssociatedItem { + pub def_id: DefId, + pub name: Name, + pub kind: AssociatedKind, + pub vis: Visibility, + pub defaultness: hir::Defaultness, + pub container: AssociatedItemContainer, + + /// Whether this is a method with an explicit self + /// as its first argument, allowing method calls. + pub method_has_self_argument: bool, +} + +#[derive(Copy, Clone, PartialEq, Eq, Debug, RustcEncodable, RustcDecodable)] +pub enum AssociatedKind { + Const, + Method, + Type +} + +impl AssociatedItem { + pub fn def(&self) -> Def { + match self.kind { + AssociatedKind::Const => Def::AssociatedConst(self.def_id), + AssociatedKind::Method => Def::Method(self.def_id), + AssociatedKind::Type => Def::AssociatedTy(self.def_id), + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Copy, RustcEncodable, RustcDecodable)] +pub enum Visibility { + /// Visible everywhere (including in other crates). + Public, + /// Visible only in the given crate-local module. + Restricted(NodeId), + /// Not visible anywhere in the local crate. This is the visibility of private external items. + PrivateExternal, +} + +pub trait NodeIdTree { + fn is_descendant_of(&self, node: NodeId, ancestor: NodeId) -> bool; +} + +impl<'a> NodeIdTree for ast_map::Map<'a> { + fn is_descendant_of(&self, node: NodeId, ancestor: NodeId) -> bool { + let mut node_ancestor = node; + while node_ancestor != ancestor { + let node_ancestor_parent = self.get_module_parent(node_ancestor); + if node_ancestor_parent == node_ancestor { + return false; + } + node_ancestor = node_ancestor_parent; + } + true + } +} + +impl Visibility { + pub fn from_hir(visibility: &hir::Visibility, id: NodeId, tcx: TyCtxt) -> Self { + match *visibility { + hir::Public => Visibility::Public, + hir::Visibility::Crate => Visibility::Restricted(ast::CRATE_NODE_ID), + hir::Visibility::Restricted { ref path, .. } => match path.def { + // If there is no resolution, `resolve` will have already reported an error, so + // assume that the visibility is public to avoid reporting more privacy errors. + Def::Err => Visibility::Public, + def => Visibility::Restricted(tcx.map.as_local_node_id(def.def_id()).unwrap()), + }, + hir::Inherited => Visibility::Restricted(tcx.map.get_module_parent(id)), + } + } + + /// Returns true if an item with this visibility is accessible from the given block. + pub fn is_accessible_from(self, block: NodeId, tree: &T) -> bool { + let restriction = match self { + // Public items are visible everywhere. + Visibility::Public => return true, + // Private items from other crates are visible nowhere. + Visibility::PrivateExternal => return false, + // Restricted items are visible in an arbitrary local module. + Visibility::Restricted(module) => module, + }; + + tree.is_descendant_of(block, restriction) + } + + /// Returns true if this visibility is at least as accessible as the given visibility + pub fn is_at_least(self, vis: Visibility, tree: &T) -> bool { + let vis_restriction = match vis { + Visibility::Public => return self == Visibility::Public, + Visibility::PrivateExternal => return true, + Visibility::Restricted(module) => module, + }; + + self.is_accessible_from(vis_restriction, tree) + } +} + +#[derive(Clone, PartialEq, RustcDecodable, RustcEncodable, Copy)] +pub enum Variance { + Covariant, // T <: T iff A <: B -- e.g., function return type + Invariant, // T <: T iff B == A -- e.g., type of mutable cell + Contravariant, // T <: T iff B <: A -- e.g., function param type + Bivariant, // T <: T -- e.g., unused type parameter +} + +#[derive(Clone, Copy, Debug, RustcDecodable, RustcEncodable)] +pub struct MethodCallee<'tcx> { + /// Impl method ID, for inherent methods, or trait method ID, otherwise. + pub def_id: DefId, + pub ty: Ty<'tcx>, + pub substs: &'tcx Substs<'tcx> +} + +/// With method calls, we store some extra information in +/// side tables (i.e method_map). We use +/// MethodCall as a key to index into these tables instead of +/// just directly using the expression's NodeId. The reason +/// for this being that we may apply adjustments (coercions) +/// with the resulting expression also needing to use the +/// side tables. The problem with this is that we don't +/// assign a separate NodeId to this new expression +/// and so it would clash with the base expression if both +/// needed to add to the side tables. Thus to disambiguate +/// we also keep track of whether there's an adjustment in +/// our key. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub struct MethodCall { + pub expr_id: NodeId, + pub autoderef: u32 +} + +impl MethodCall { + pub fn expr(id: NodeId) -> MethodCall { + MethodCall { + expr_id: id, + autoderef: 0 + } + } + + pub fn autoderef(expr_id: NodeId, autoderef: u32) -> MethodCall { + MethodCall { + expr_id: expr_id, + autoderef: 1 + autoderef + } + } +} + +// maps from an expression id that corresponds to a method call to the details +// of the method to be invoked +pub type MethodMap<'tcx> = FxHashMap>; + +// Contains information needed to resolve types and (in the future) look up +// the types of AST nodes. +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +pub struct CReaderCacheKey { + pub cnum: CrateNum, + pub pos: usize, +} + +/// Describes the fragment-state associated with a NodeId. +/// +/// Currently only unfragmented paths have entries in the table, +/// but longer-term this enum is expected to expand to also +/// include data for fragmented paths. +#[derive(Copy, Clone, Debug)] +pub enum FragmentInfo { + Moved { var: NodeId, move_expr: NodeId }, + Assigned { var: NodeId, assign_expr: NodeId, assignee_id: NodeId }, +} + +// Flags that we track on types. These flags are propagated upwards +// through the type during type construction, so that we can quickly +// check whether the type has various kinds of types in it without +// recursing over the type itself. +bitflags! { + flags TypeFlags: u32 { + const HAS_PARAMS = 1 << 0, + const HAS_SELF = 1 << 1, + const HAS_TY_INFER = 1 << 2, + const HAS_RE_INFER = 1 << 3, + const HAS_RE_SKOL = 1 << 4, + const HAS_RE_EARLY_BOUND = 1 << 5, + const HAS_FREE_REGIONS = 1 << 6, + const HAS_TY_ERR = 1 << 7, + const HAS_PROJECTION = 1 << 8, + const HAS_TY_CLOSURE = 1 << 9, + + // true if there are "names" of types and regions and so forth + // that are local to a particular fn + const HAS_LOCAL_NAMES = 1 << 10, + + // Present if the type belongs in a local type context. + // Only set for TyInfer other than Fresh. + const KEEP_IN_LOCAL_TCX = 1 << 11, + + // Is there a projection that does not involve a bound region? + // Currently we can't normalize projections w/ bound regions. + const HAS_NORMALIZABLE_PROJECTION = 1 << 12, + + const NEEDS_SUBST = TypeFlags::HAS_PARAMS.bits | + TypeFlags::HAS_SELF.bits | + TypeFlags::HAS_RE_EARLY_BOUND.bits, + + // Flags representing the nominal content of a type, + // computed by FlagsComputation. If you add a new nominal + // flag, it should be added here too. + const NOMINAL_FLAGS = TypeFlags::HAS_PARAMS.bits | + TypeFlags::HAS_SELF.bits | + TypeFlags::HAS_TY_INFER.bits | + TypeFlags::HAS_RE_INFER.bits | + TypeFlags::HAS_RE_SKOL.bits | + TypeFlags::HAS_RE_EARLY_BOUND.bits | + TypeFlags::HAS_FREE_REGIONS.bits | + TypeFlags::HAS_TY_ERR.bits | + TypeFlags::HAS_PROJECTION.bits | + TypeFlags::HAS_TY_CLOSURE.bits | + TypeFlags::HAS_LOCAL_NAMES.bits | + TypeFlags::KEEP_IN_LOCAL_TCX.bits, + + // Caches for type_is_sized, type_moves_by_default + const SIZEDNESS_CACHED = 1 << 16, + const IS_SIZED = 1 << 17, + const MOVENESS_CACHED = 1 << 18, + const MOVES_BY_DEFAULT = 1 << 19, + } +} + +pub struct TyS<'tcx> { + pub sty: TypeVariants<'tcx>, + pub flags: Cell, + + // the maximal depth of any bound regions appearing in this type. + region_depth: u32, +} + +impl<'tcx> PartialEq for TyS<'tcx> { + #[inline] + fn eq(&self, other: &TyS<'tcx>) -> bool { + // (self as *const _) == (other as *const _) + (self as *const TyS<'tcx>) == (other as *const TyS<'tcx>) + } +} +impl<'tcx> Eq for TyS<'tcx> {} + +impl<'tcx> Hash for TyS<'tcx> { + fn hash(&self, s: &mut H) { + (self as *const TyS).hash(s) + } +} + +pub type Ty<'tcx> = &'tcx TyS<'tcx>; + +impl<'tcx> serialize::UseSpecializedEncodable for Ty<'tcx> {} +impl<'tcx> serialize::UseSpecializedDecodable for Ty<'tcx> {} + +/// A wrapper for slices with the additional invariant +/// that the slice is interned and no other slice with +/// the same contents can exist in the same context. +/// This means we can use pointer + length for both +/// equality comparisons and hashing. +#[derive(Debug, RustcEncodable)] +pub struct Slice([T]); + +impl PartialEq for Slice { + #[inline] + fn eq(&self, other: &Slice) -> bool { + (&self.0 as *const [T]) == (&other.0 as *const [T]) + } +} +impl Eq for Slice {} + +impl Hash for Slice { + fn hash(&self, s: &mut H) { + (self.as_ptr(), self.len()).hash(s) + } +} + +impl Deref for Slice { + type Target = [T]; + fn deref(&self) -> &[T] { + &self.0 + } +} + +impl<'a, T> IntoIterator for &'a Slice { + type Item = &'a T; + type IntoIter = <&'a [T] as IntoIterator>::IntoIter; + fn into_iter(self) -> Self::IntoIter { + self[..].iter() + } +} + +impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Slice> {} + +impl Slice { + pub fn empty<'a>() -> &'a Slice { + unsafe { + mem::transmute(slice::from_raw_parts(0x1 as *const T, 0)) + } + } +} + +/// Upvars do not get their own node-id. Instead, we use the pair of +/// the original var id (that is, the root variable that is referenced +/// by the upvar) and the id of the closure expression. +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +pub struct UpvarId { + pub var_id: NodeId, + pub closure_expr_id: NodeId, +} + +#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable, Copy)] +pub enum BorrowKind { + /// Data must be immutable and is aliasable. + ImmBorrow, + + /// Data must be immutable but not aliasable. This kind of borrow + /// cannot currently be expressed by the user and is used only in + /// implicit closure bindings. It is needed when you the closure + /// is borrowing or mutating a mutable referent, e.g.: + /// + /// let x: &mut isize = ...; + /// let y = || *x += 5; + /// + /// If we were to try to translate this closure into a more explicit + /// form, we'd encounter an error with the code as written: + /// + /// struct Env { x: & &mut isize } + /// let x: &mut isize = ...; + /// let y = (&mut Env { &x }, fn_ptr); // Closure is pair of env and fn + /// fn fn_ptr(env: &mut Env) { **env.x += 5; } + /// + /// This is then illegal because you cannot mutate a `&mut` found + /// in an aliasable location. To solve, you'd have to translate with + /// an `&mut` borrow: + /// + /// struct Env { x: & &mut isize } + /// let x: &mut isize = ...; + /// let y = (&mut Env { &mut x }, fn_ptr); // changed from &x to &mut x + /// fn fn_ptr(env: &mut Env) { **env.x += 5; } + /// + /// Now the assignment to `**env.x` is legal, but creating a + /// mutable pointer to `x` is not because `x` is not mutable. We + /// could fix this by declaring `x` as `let mut x`. This is ok in + /// user code, if awkward, but extra weird for closures, since the + /// borrow is hidden. + /// + /// So we introduce a "unique imm" borrow -- the referent is + /// immutable, but not aliasable. This solves the problem. For + /// simplicity, we don't give users the way to express this + /// borrow, it's just used when translating closures. + UniqueImmBorrow, + + /// Data is mutable and not aliasable. + MutBorrow +} + +/// Information describing the capture of an upvar. This is computed +/// during `typeck`, specifically by `regionck`. +#[derive(PartialEq, Clone, Debug, Copy, RustcEncodable, RustcDecodable)] +pub enum UpvarCapture<'tcx> { + /// Upvar is captured by value. This is always true when the + /// closure is labeled `move`, but can also be true in other cases + /// depending on inference. + ByValue, + + /// Upvar is captured by reference. + ByRef(UpvarBorrow<'tcx>), +} + +#[derive(PartialEq, Clone, Copy, RustcEncodable, RustcDecodable)] +pub struct UpvarBorrow<'tcx> { + /// The kind of borrow: by-ref upvars have access to shared + /// immutable borrows, which are not part of the normal language + /// syntax. + pub kind: BorrowKind, + + /// Region of the resulting reference. + pub region: &'tcx ty::Region, +} + +pub type UpvarCaptureMap<'tcx> = FxHashMap>; + +#[derive(Copy, Clone)] +pub struct ClosureUpvar<'tcx> { + pub def: Def, + pub span: Span, + pub ty: Ty<'tcx>, +} + +#[derive(Clone, Copy, PartialEq)] +pub enum IntVarValue { + IntType(ast::IntTy), + UintType(ast::UintTy), +} + +/// Default region to use for the bound of objects that are +/// supplied as the value for this type parameter. This is derived +/// from `T:'a` annotations appearing in the type definition. If +/// this is `None`, then the default is inherited from the +/// surrounding context. See RFC #599 for details. +#[derive(Copy, Clone, RustcEncodable, RustcDecodable)] +pub enum ObjectLifetimeDefault<'tcx> { + /// Require an explicit annotation. Occurs when multiple + /// `T:'a` constraints are found. + Ambiguous, + + /// Use the base default, typically 'static, but in a fn body it is a fresh variable + BaseDefault, + + /// Use the given region as the default. + Specific(&'tcx Region), +} + +#[derive(Clone, RustcEncodable, RustcDecodable)] +pub struct TypeParameterDef<'tcx> { + pub name: Name, + pub def_id: DefId, + pub index: u32, + pub default_def_id: DefId, // for use in error reporing about defaults + pub default: Option>, + pub object_lifetime_default: ObjectLifetimeDefault<'tcx>, + + /// `pure_wrt_drop`, set by the (unsafe) `#[may_dangle]` attribute + /// on generic parameter `T`, asserts data behind the parameter + /// `T` won't be accessed during the parent type's `Drop` impl. + pub pure_wrt_drop: bool, +} + +#[derive(Clone, RustcEncodable, RustcDecodable)] +pub struct RegionParameterDef<'tcx> { + pub name: Name, + pub def_id: DefId, + pub index: u32, + pub bounds: Vec<&'tcx ty::Region>, + + /// `pure_wrt_drop`, set by the (unsafe) `#[may_dangle]` attribute + /// on generic parameter `'a`, asserts data of lifetime `'a` + /// won't be accessed during the parent type's `Drop` impl. + pub pure_wrt_drop: bool, +} + +impl<'tcx> RegionParameterDef<'tcx> { + pub fn to_early_bound_region_data(&self) -> ty::EarlyBoundRegion { + ty::EarlyBoundRegion { + index: self.index, + name: self.name, + } + } + + pub fn to_bound_region(&self) -> ty::BoundRegion { + // this is an early bound region, so unaffected by #32330 + ty::BoundRegion::BrNamed(self.def_id, self.name, Issue32330::WontChange) + } +} + +/// Information about the formal type/lifetime parameters associated +/// with an item or method. Analogous to hir::Generics. +#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +pub struct Generics<'tcx> { + pub parent: Option, + pub parent_regions: u32, + pub parent_types: u32, + pub regions: Vec>, + pub types: Vec>, + pub has_self: bool, +} + +impl<'tcx> Generics<'tcx> { + pub fn parent_count(&self) -> usize { + self.parent_regions as usize + self.parent_types as usize + } + + pub fn own_count(&self) -> usize { + self.regions.len() + self.types.len() + } + + pub fn count(&self) -> usize { + self.parent_count() + self.own_count() + } + + pub fn region_param(&self, param: &EarlyBoundRegion) -> &RegionParameterDef<'tcx> { + &self.regions[param.index as usize - self.has_self as usize] + } + + pub fn type_param(&self, param: &ParamTy) -> &TypeParameterDef<'tcx> { + &self.types[param.idx as usize - self.has_self as usize - self.regions.len()] + } +} + +/// Bounds on generics. +#[derive(Clone)] +pub struct GenericPredicates<'tcx> { + pub parent: Option, + pub predicates: Vec>, +} + +impl<'tcx> serialize::UseSpecializedEncodable for GenericPredicates<'tcx> {} +impl<'tcx> serialize::UseSpecializedDecodable for GenericPredicates<'tcx> {} + +impl<'a, 'gcx, 'tcx> GenericPredicates<'tcx> { + pub fn instantiate(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, substs: &Substs<'tcx>) + -> InstantiatedPredicates<'tcx> { + let mut instantiated = InstantiatedPredicates::empty(); + self.instantiate_into(tcx, &mut instantiated, substs); + instantiated + } + pub fn instantiate_own(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, substs: &Substs<'tcx>) + -> InstantiatedPredicates<'tcx> { + InstantiatedPredicates { + predicates: self.predicates.subst(tcx, substs) + } + } + + fn instantiate_into(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + instantiated: &mut InstantiatedPredicates<'tcx>, + substs: &Substs<'tcx>) { + if let Some(def_id) = self.parent { + tcx.item_predicates(def_id).instantiate_into(tcx, instantiated, substs); + } + instantiated.predicates.extend(self.predicates.iter().map(|p| p.subst(tcx, substs))) + } + + pub fn instantiate_supertrait(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + poly_trait_ref: &ty::PolyTraitRef<'tcx>) + -> InstantiatedPredicates<'tcx> + { + assert_eq!(self.parent, None); + InstantiatedPredicates { + predicates: self.predicates.iter().map(|pred| { + pred.subst_supertrait(tcx, poly_trait_ref) + }).collect() + } + } +} + +#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub enum Predicate<'tcx> { + /// Corresponds to `where Foo : Bar`. `Foo` here would be + /// the `Self` type of the trait reference and `A`, `B`, and `C` + /// would be the type parameters. + Trait(PolyTraitPredicate<'tcx>), + + /// where `T1 == T2`. + Equate(PolyEquatePredicate<'tcx>), + + /// where 'a : 'b + RegionOutlives(PolyRegionOutlivesPredicate<'tcx>), + + /// where T : 'a + TypeOutlives(PolyTypeOutlivesPredicate<'tcx>), + + /// where ::Name == X, approximately. + /// See `ProjectionPredicate` struct for details. + Projection(PolyProjectionPredicate<'tcx>), + + /// no syntax: T WF + WellFormed(Ty<'tcx>), + + /// trait must be object-safe + ObjectSafe(DefId), + + /// No direct syntax. May be thought of as `where T : FnFoo<...>` + /// for some substitutions `...` and T being a closure type. + /// Satisfied (or refuted) once we know the closure's kind. + ClosureKind(DefId, ClosureKind), +} + +impl<'a, 'gcx, 'tcx> Predicate<'tcx> { + /// Performs a substitution suitable for going from a + /// poly-trait-ref to supertraits that must hold if that + /// poly-trait-ref holds. This is slightly different from a normal + /// substitution in terms of what happens with bound regions. See + /// lengthy comment below for details. + pub fn subst_supertrait(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + trait_ref: &ty::PolyTraitRef<'tcx>) + -> ty::Predicate<'tcx> + { + // The interaction between HRTB and supertraits is not entirely + // obvious. Let me walk you (and myself) through an example. + // + // Let's start with an easy case. Consider two traits: + // + // trait Foo<'a> : Bar<'a,'a> { } + // trait Bar<'b,'c> { } + // + // Now, if we have a trait reference `for<'x> T : Foo<'x>`, then + // we can deduce that `for<'x> T : Bar<'x,'x>`. Basically, if we + // knew that `Foo<'x>` (for any 'x) then we also know that + // `Bar<'x,'x>` (for any 'x). This more-or-less falls out from + // normal substitution. + // + // In terms of why this is sound, the idea is that whenever there + // is an impl of `T:Foo<'a>`, it must show that `T:Bar<'a,'a>` + // holds. So if there is an impl of `T:Foo<'a>` that applies to + // all `'a`, then we must know that `T:Bar<'a,'a>` holds for all + // `'a`. + // + // Another example to be careful of is this: + // + // trait Foo1<'a> : for<'b> Bar1<'a,'b> { } + // trait Bar1<'b,'c> { } + // + // Here, if we have `for<'x> T : Foo1<'x>`, then what do we know? + // The answer is that we know `for<'x,'b> T : Bar1<'x,'b>`. The + // reason is similar to the previous example: any impl of + // `T:Foo1<'x>` must show that `for<'b> T : Bar1<'x, 'b>`. So + // basically we would want to collapse the bound lifetimes from + // the input (`trait_ref`) and the supertraits. + // + // To achieve this in practice is fairly straightforward. Let's + // consider the more complicated scenario: + // + // - We start out with `for<'x> T : Foo1<'x>`. In this case, `'x` + // has a De Bruijn index of 1. We want to produce `for<'x,'b> T : Bar1<'x,'b>`, + // where both `'x` and `'b` would have a DB index of 1. + // The substitution from the input trait-ref is therefore going to be + // `'a => 'x` (where `'x` has a DB index of 1). + // - The super-trait-ref is `for<'b> Bar1<'a,'b>`, where `'a` is an + // early-bound parameter and `'b' is a late-bound parameter with a + // DB index of 1. + // - If we replace `'a` with `'x` from the input, it too will have + // a DB index of 1, and thus we'll have `for<'x,'b> Bar1<'x,'b>` + // just as we wanted. + // + // There is only one catch. If we just apply the substitution `'a + // => 'x` to `for<'b> Bar1<'a,'b>`, the substitution code will + // adjust the DB index because we substituting into a binder (it + // tries to be so smart...) resulting in `for<'x> for<'b> + // Bar1<'x,'b>` (we have no syntax for this, so use your + // imagination). Basically the 'x will have DB index of 2 and 'b + // will have DB index of 1. Not quite what we want. So we apply + // the substitution to the *contents* of the trait reference, + // rather than the trait reference itself (put another way, the + // substitution code expects equal binding levels in the values + // from the substitution and the value being substituted into, and + // this trick achieves that). + + let substs = &trait_ref.0.substs; + match *self { + Predicate::Trait(ty::Binder(ref data)) => + Predicate::Trait(ty::Binder(data.subst(tcx, substs))), + Predicate::Equate(ty::Binder(ref data)) => + Predicate::Equate(ty::Binder(data.subst(tcx, substs))), + Predicate::RegionOutlives(ty::Binder(ref data)) => + Predicate::RegionOutlives(ty::Binder(data.subst(tcx, substs))), + Predicate::TypeOutlives(ty::Binder(ref data)) => + Predicate::TypeOutlives(ty::Binder(data.subst(tcx, substs))), + Predicate::Projection(ty::Binder(ref data)) => + Predicate::Projection(ty::Binder(data.subst(tcx, substs))), + Predicate::WellFormed(data) => + Predicate::WellFormed(data.subst(tcx, substs)), + Predicate::ObjectSafe(trait_def_id) => + Predicate::ObjectSafe(trait_def_id), + Predicate::ClosureKind(closure_def_id, kind) => + Predicate::ClosureKind(closure_def_id, kind), + } + } +} + +#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct TraitPredicate<'tcx> { + pub trait_ref: TraitRef<'tcx> +} +pub type PolyTraitPredicate<'tcx> = ty::Binder>; + +impl<'tcx> TraitPredicate<'tcx> { + pub fn def_id(&self) -> DefId { + self.trait_ref.def_id + } + + /// Creates the dep-node for selecting/evaluating this trait reference. + fn dep_node(&self) -> DepNode { + // Ideally, the dep-node would just have all the input types + // in it. But they are limited to including def-ids. So as an + // approximation we include the def-ids for all nominal types + // found somewhere. This means that we will e.g. conflate the + // dep-nodes for `u32: SomeTrait` and `u64: SomeTrait`, but we + // would have distinct dep-nodes for `Vec: SomeTrait`, + // `Rc: SomeTrait`, and `(Vec, Rc): SomeTrait`. + // Note that it's always sound to conflate dep-nodes, it just + // leads to more recompilation. + let def_ids: Vec<_> = + self.input_types() + .flat_map(|t| t.walk()) + .filter_map(|t| match t.sty { + ty::TyAdt(adt_def, _) => + Some(adt_def.did), + _ => + None + }) + .chain(iter::once(self.def_id())) + .collect(); + DepNode::TraitSelect(def_ids) + } + + pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator> + 'a { + self.trait_ref.input_types() + } + + pub fn self_ty(&self) -> Ty<'tcx> { + self.trait_ref.self_ty() + } +} + +impl<'tcx> PolyTraitPredicate<'tcx> { + pub fn def_id(&self) -> DefId { + // ok to skip binder since trait def-id does not care about regions + self.0.def_id() + } + + pub fn dep_node(&self) -> DepNode { + // ok to skip binder since depnode does not care about regions + self.0.dep_node() + } +} + +#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct EquatePredicate<'tcx>(pub Ty<'tcx>, pub Ty<'tcx>); // `0 == 1` +pub type PolyEquatePredicate<'tcx> = ty::Binder>; + +#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct OutlivesPredicate(pub A, pub B); // `A : B` +pub type PolyOutlivesPredicate = ty::Binder>; +pub type PolyRegionOutlivesPredicate<'tcx> = PolyOutlivesPredicate<&'tcx ty::Region, + &'tcx ty::Region>; +pub type PolyTypeOutlivesPredicate<'tcx> = PolyOutlivesPredicate, &'tcx ty::Region>; + +/// This kind of predicate has no *direct* correspondent in the +/// syntax, but it roughly corresponds to the syntactic forms: +/// +/// 1. `T : TraitRef<..., Item=Type>` +/// 2. `>::Item == Type` (NYI) +/// +/// In particular, form #1 is "desugared" to the combination of a +/// normal trait predicate (`T : TraitRef<...>`) and one of these +/// predicates. Form #2 is a broader form in that it also permits +/// equality between arbitrary types. Processing an instance of Form +/// #2 eventually yields one of these `ProjectionPredicate` +/// instances to normalize the LHS. +#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct ProjectionPredicate<'tcx> { + pub projection_ty: ProjectionTy<'tcx>, + pub ty: Ty<'tcx>, +} + +pub type PolyProjectionPredicate<'tcx> = Binder>; + +impl<'tcx> PolyProjectionPredicate<'tcx> { + pub fn item_name(&self) -> Name { + self.0.projection_ty.item_name // safe to skip the binder to access a name + } +} + +pub trait ToPolyTraitRef<'tcx> { + fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx>; +} + +impl<'tcx> ToPolyTraitRef<'tcx> for TraitRef<'tcx> { + fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> { + assert!(!self.has_escaping_regions()); + ty::Binder(self.clone()) + } +} + +impl<'tcx> ToPolyTraitRef<'tcx> for PolyTraitPredicate<'tcx> { + fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> { + self.map_bound_ref(|trait_pred| trait_pred.trait_ref) + } +} + +impl<'tcx> ToPolyTraitRef<'tcx> for PolyProjectionPredicate<'tcx> { + fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> { + // Note: unlike with TraitRef::to_poly_trait_ref(), + // self.0.trait_ref is permitted to have escaping regions. + // This is because here `self` has a `Binder` and so does our + // return value, so we are preserving the number of binding + // levels. + ty::Binder(self.0.projection_ty.trait_ref) + } +} + +pub trait ToPredicate<'tcx> { + fn to_predicate(&self) -> Predicate<'tcx>; +} + +impl<'tcx> ToPredicate<'tcx> for TraitRef<'tcx> { + fn to_predicate(&self) -> Predicate<'tcx> { + // we're about to add a binder, so let's check that we don't + // accidentally capture anything, or else that might be some + // weird debruijn accounting. + assert!(!self.has_escaping_regions()); + + ty::Predicate::Trait(ty::Binder(ty::TraitPredicate { + trait_ref: self.clone() + })) + } +} + +impl<'tcx> ToPredicate<'tcx> for PolyTraitRef<'tcx> { + fn to_predicate(&self) -> Predicate<'tcx> { + ty::Predicate::Trait(self.to_poly_trait_predicate()) + } +} + +impl<'tcx> ToPredicate<'tcx> for PolyEquatePredicate<'tcx> { + fn to_predicate(&self) -> Predicate<'tcx> { + Predicate::Equate(self.clone()) + } +} + +impl<'tcx> ToPredicate<'tcx> for PolyRegionOutlivesPredicate<'tcx> { + fn to_predicate(&self) -> Predicate<'tcx> { + Predicate::RegionOutlives(self.clone()) + } +} + +impl<'tcx> ToPredicate<'tcx> for PolyTypeOutlivesPredicate<'tcx> { + fn to_predicate(&self) -> Predicate<'tcx> { + Predicate::TypeOutlives(self.clone()) + } +} + +impl<'tcx> ToPredicate<'tcx> for PolyProjectionPredicate<'tcx> { + fn to_predicate(&self) -> Predicate<'tcx> { + Predicate::Projection(self.clone()) + } +} + +impl<'tcx> Predicate<'tcx> { + /// Iterates over the types in this predicate. Note that in all + /// cases this is skipping over a binder, so late-bound regions + /// with depth 0 are bound by the predicate. + pub fn walk_tys(&self) -> IntoIter> { + let vec: Vec<_> = match *self { + ty::Predicate::Trait(ref data) => { + data.skip_binder().input_types().collect() + } + ty::Predicate::Equate(ty::Binder(ref data)) => { + vec![data.0, data.1] + } + ty::Predicate::TypeOutlives(ty::Binder(ref data)) => { + vec![data.0] + } + ty::Predicate::RegionOutlives(..) => { + vec![] + } + ty::Predicate::Projection(ref data) => { + let trait_inputs = data.0.projection_ty.trait_ref.input_types(); + trait_inputs.chain(Some(data.0.ty)).collect() + } + ty::Predicate::WellFormed(data) => { + vec![data] + } + ty::Predicate::ObjectSafe(_trait_def_id) => { + vec![] + } + ty::Predicate::ClosureKind(_closure_def_id, _kind) => { + vec![] + } + }; + + // The only reason to collect into a vector here is that I was + // too lazy to make the full (somewhat complicated) iterator + // type that would be needed here. But I wanted this fn to + // return an iterator conceptually, rather than a `Vec`, so as + // to be closer to `Ty::walk`. + vec.into_iter() + } + + pub fn to_opt_poly_trait_ref(&self) -> Option> { + match *self { + Predicate::Trait(ref t) => { + Some(t.to_poly_trait_ref()) + } + Predicate::Projection(..) | + Predicate::Equate(..) | + Predicate::RegionOutlives(..) | + Predicate::WellFormed(..) | + Predicate::ObjectSafe(..) | + Predicate::ClosureKind(..) | + Predicate::TypeOutlives(..) => { + None + } + } + } +} + +/// Represents the bounds declared on a particular set of type +/// parameters. Should eventually be generalized into a flag list of +/// where clauses. You can obtain a `InstantiatedPredicates` list from a +/// `GenericPredicates` by using the `instantiate` method. Note that this method +/// reflects an important semantic invariant of `InstantiatedPredicates`: while +/// the `GenericPredicates` are expressed in terms of the bound type +/// parameters of the impl/trait/whatever, an `InstantiatedPredicates` instance +/// represented a set of bounds for some particular instantiation, +/// meaning that the generic parameters have been substituted with +/// their values. +/// +/// Example: +/// +/// struct Foo> { ... } +/// +/// Here, the `GenericPredicates` for `Foo` would contain a list of bounds like +/// `[[], [U:Bar]]`. Now if there were some particular reference +/// like `Foo`, then the `InstantiatedPredicates` would be `[[], +/// [usize:Bar]]`. +#[derive(Clone)] +pub struct InstantiatedPredicates<'tcx> { + pub predicates: Vec>, +} + +impl<'tcx> InstantiatedPredicates<'tcx> { + pub fn empty() -> InstantiatedPredicates<'tcx> { + InstantiatedPredicates { predicates: vec![] } + } + + pub fn is_empty(&self) -> bool { + self.predicates.is_empty() + } +} + +impl<'tcx> TraitRef<'tcx> { + pub fn new(def_id: DefId, substs: &'tcx Substs<'tcx>) -> TraitRef<'tcx> { + TraitRef { def_id: def_id, substs: substs } + } + + pub fn self_ty(&self) -> Ty<'tcx> { + self.substs.type_at(0) + } + + pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator> + 'a { + // Select only the "input types" from a trait-reference. For + // now this is all the types that appear in the + // trait-reference, but it should eventually exclude + // associated types. + self.substs.types() + } +} + +/// When type checking, we use the `ParameterEnvironment` to track +/// details about the type/lifetime parameters that are in scope. +/// It primarily stores the bounds information. +/// +/// Note: This information might seem to be redundant with the data in +/// `tcx.ty_param_defs`, but it is not. That table contains the +/// parameter definitions from an "outside" perspective, but this +/// struct will contain the bounds for a parameter as seen from inside +/// the function body. Currently the only real distinction is that +/// bound lifetime parameters are replaced with free ones, but in the +/// future I hope to refine the representation of types so as to make +/// more distinctions clearer. +#[derive(Clone)] +pub struct ParameterEnvironment<'tcx> { + /// See `construct_free_substs` for details. + pub free_substs: &'tcx Substs<'tcx>, + + /// Each type parameter has an implicit region bound that + /// indicates it must outlive at least the function body (the user + /// may specify stronger requirements). This field indicates the + /// region of the callee. + pub implicit_region_bound: &'tcx ty::Region, + + /// Obligations that the caller must satisfy. This is basically + /// the set of bounds on the in-scope type parameters, translated + /// into Obligations, and elaborated and normalized. + pub caller_bounds: Vec>, + + /// Scope that is attached to free regions for this scope. This + /// is usually the id of the fn body, but for more abstract scopes + /// like structs we often use the node-id of the struct. + /// + /// FIXME(#3696). It would be nice to refactor so that free + /// regions don't have this implicit scope and instead introduce + /// relationships in the environment. + pub free_id_outlive: CodeExtent, + + /// A cache for `moves_by_default`. + pub is_copy_cache: RefCell, bool>>, + + /// A cache for `type_is_sized` + pub is_sized_cache: RefCell, bool>>, +} + +impl<'a, 'tcx> ParameterEnvironment<'tcx> { + pub fn with_caller_bounds(&self, + caller_bounds: Vec>) + -> ParameterEnvironment<'tcx> + { + ParameterEnvironment { + free_substs: self.free_substs, + implicit_region_bound: self.implicit_region_bound, + caller_bounds: caller_bounds, + free_id_outlive: self.free_id_outlive, + is_copy_cache: RefCell::new(FxHashMap()), + is_sized_cache: RefCell::new(FxHashMap()), + } + } + + /// Construct a parameter environment given an item, impl item, or trait item + pub fn for_item(tcx: TyCtxt<'a, 'tcx, 'tcx>, id: NodeId) + -> ParameterEnvironment<'tcx> { + match tcx.map.find(id) { + Some(ast_map::NodeImplItem(ref impl_item)) => { + match impl_item.node { + hir::ImplItemKind::Type(_) | hir::ImplItemKind::Const(..) => { + // associated types don't have their own entry (for some reason), + // so for now just grab environment for the impl + let impl_id = tcx.map.get_parent(id); + let impl_def_id = tcx.map.local_def_id(impl_id); + tcx.construct_parameter_environment(impl_item.span, + impl_def_id, + tcx.region_maps.item_extent(id)) + } + hir::ImplItemKind::Method(_, ref body) => { + tcx.construct_parameter_environment( + impl_item.span, + tcx.map.local_def_id(id), + tcx.region_maps.call_site_extent(id, body.node_id())) + } + } + } + Some(ast_map::NodeTraitItem(trait_item)) => { + match trait_item.node { + hir::TypeTraitItem(..) | hir::ConstTraitItem(..) => { + // associated types don't have their own entry (for some reason), + // so for now just grab environment for the trait + let trait_id = tcx.map.get_parent(id); + let trait_def_id = tcx.map.local_def_id(trait_id); + tcx.construct_parameter_environment(trait_item.span, + trait_def_id, + tcx.region_maps.item_extent(id)) + } + hir::MethodTraitItem(_, ref body) => { + // Use call-site for extent (unless this is a + // trait method with no default; then fallback + // to the method id). + let extent = if let Some(body_id) = *body { + // default impl: use call_site extent as free_id_outlive bound. + tcx.region_maps.call_site_extent(id, body_id.node_id()) + } else { + // no default impl: use item extent as free_id_outlive bound. + tcx.region_maps.item_extent(id) + }; + tcx.construct_parameter_environment( + trait_item.span, + tcx.map.local_def_id(id), + extent) + } + } + } + Some(ast_map::NodeItem(item)) => { + match item.node { + hir::ItemFn(.., body_id) => { + // We assume this is a function. + let fn_def_id = tcx.map.local_def_id(id); + + tcx.construct_parameter_environment( + item.span, + fn_def_id, + tcx.region_maps.call_site_extent(id, body_id.node_id())) + } + hir::ItemEnum(..) | + hir::ItemStruct(..) | + hir::ItemUnion(..) | + hir::ItemTy(..) | + hir::ItemImpl(..) | + hir::ItemConst(..) | + hir::ItemStatic(..) => { + let def_id = tcx.map.local_def_id(id); + tcx.construct_parameter_environment(item.span, + def_id, + tcx.region_maps.item_extent(id)) + } + hir::ItemTrait(..) => { + let def_id = tcx.map.local_def_id(id); + tcx.construct_parameter_environment(item.span, + def_id, + tcx.region_maps.item_extent(id)) + } + _ => { + span_bug!(item.span, + "ParameterEnvironment::for_item(): + can't create a parameter \ + environment for this kind of item") + } + } + } + Some(ast_map::NodeExpr(expr)) => { + // This is a convenience to allow closures to work. + if let hir::ExprClosure(.., body, _) = expr.node { + let def_id = tcx.map.local_def_id(id); + let base_def_id = tcx.closure_base_def_id(def_id); + tcx.construct_parameter_environment( + expr.span, + base_def_id, + tcx.region_maps.call_site_extent(id, body.node_id())) + } else { + tcx.empty_parameter_environment() + } + } + Some(ast_map::NodeForeignItem(item)) => { + let def_id = tcx.map.local_def_id(id); + tcx.construct_parameter_environment(item.span, + def_id, + ROOT_CODE_EXTENT) + } + _ => { + bug!("ParameterEnvironment::from_item(): \ + `{}` is not an item", + tcx.map.node_to_string(id)) + } + } + } +} + +bitflags! { + flags AdtFlags: u32 { + const NO_ADT_FLAGS = 0, + const IS_ENUM = 1 << 0, + const IS_DTORCK = 1 << 1, // is this a dtorck type? + const IS_DTORCK_VALID = 1 << 2, + const IS_PHANTOM_DATA = 1 << 3, + const IS_SIMD = 1 << 4, + const IS_FUNDAMENTAL = 1 << 5, + const IS_UNION = 1 << 6, + } +} + +pub struct VariantDef { + /// The variant's DefId. If this is a tuple-like struct, + /// this is the DefId of the struct's ctor. + pub did: DefId, + pub name: Name, // struct's name if this is a struct + pub disr_val: Disr, + pub fields: Vec, + pub ctor_kind: CtorKind, +} + +pub struct FieldDef { + pub did: DefId, + pub name: Name, + pub vis: Visibility, +} + +/// The definition of an abstract data type - a struct or enum. +/// +/// These are all interned (by intern_adt_def) into the adt_defs +/// table. +pub struct AdtDef { + pub did: DefId, + pub variants: Vec, + destructor: Cell>, + flags: Cell +} + +impl PartialEq for AdtDef { + // AdtDef are always interned and this is part of TyS equality + #[inline] + fn eq(&self, other: &Self) -> bool { self as *const _ == other as *const _ } +} + +impl Eq for AdtDef {} + +impl Hash for AdtDef { + #[inline] + fn hash(&self, s: &mut H) { + (self as *const AdtDef).hash(s) + } +} + +impl<'tcx> serialize::UseSpecializedEncodable for &'tcx AdtDef { + fn default_encode(&self, s: &mut S) -> Result<(), S::Error> { + self.did.encode(s) + } +} + +impl<'tcx> serialize::UseSpecializedDecodable for &'tcx AdtDef {} + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum AdtKind { Struct, Union, Enum } + +impl<'a, 'gcx, 'tcx> AdtDef { + fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>, + did: DefId, + kind: AdtKind, + variants: Vec) -> Self { + let mut flags = AdtFlags::NO_ADT_FLAGS; + let attrs = tcx.get_attrs(did); + if attr::contains_name(&attrs, "fundamental") { + flags = flags | AdtFlags::IS_FUNDAMENTAL; + } + if tcx.lookup_simd(did) { + flags = flags | AdtFlags::IS_SIMD; + } + if Some(did) == tcx.lang_items.phantom_data() { + flags = flags | AdtFlags::IS_PHANTOM_DATA; + } + match kind { + AdtKind::Enum => flags = flags | AdtFlags::IS_ENUM, + AdtKind::Union => flags = flags | AdtFlags::IS_UNION, + AdtKind::Struct => {} + } + AdtDef { + did: did, + variants: variants, + flags: Cell::new(flags), + destructor: Cell::new(None), + } + } + + fn calculate_dtorck(&'gcx self, tcx: TyCtxt) { + if tcx.is_adt_dtorck(self) { + self.flags.set(self.flags.get() | AdtFlags::IS_DTORCK); + } + self.flags.set(self.flags.get() | AdtFlags::IS_DTORCK_VALID) + } + + #[inline] + pub fn is_uninhabited_recurse(&self, + visited: &mut FxHashSet<(DefId, &'tcx Substs<'tcx>)>, + block: Option, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + substs: &'tcx Substs<'tcx>) -> bool { + if !visited.insert((self.did, substs)) { + return false; + }; + self.variants.iter().all(|v| { + v.is_uninhabited_recurse(visited, block, tcx, substs, self.is_union()) + }) + } + + #[inline] + pub fn is_struct(&self) -> bool { + !self.is_union() && !self.is_enum() + } + + #[inline] + pub fn is_union(&self) -> bool { + self.flags.get().intersects(AdtFlags::IS_UNION) + } + + #[inline] + pub fn is_enum(&self) -> bool { + self.flags.get().intersects(AdtFlags::IS_ENUM) + } + + /// Returns the kind of the ADT - Struct or Enum. + #[inline] + pub fn adt_kind(&self) -> AdtKind { + if self.is_enum() { + AdtKind::Enum + } else if self.is_union() { + AdtKind::Union + } else { + AdtKind::Struct + } + } + + pub fn descr(&self) -> &'static str { + match self.adt_kind() { + AdtKind::Struct => "struct", + AdtKind::Union => "union", + AdtKind::Enum => "enum", + } + } + + pub fn variant_descr(&self) -> &'static str { + match self.adt_kind() { + AdtKind::Struct => "struct", + AdtKind::Union => "union", + AdtKind::Enum => "variant", + } + } + + /// Returns whether this is a dtorck type. If this returns + /// true, this type being safe for destruction requires it to be + /// alive; Otherwise, only the contents are required to be. + #[inline] + pub fn is_dtorck(&'gcx self, tcx: TyCtxt) -> bool { + if !self.flags.get().intersects(AdtFlags::IS_DTORCK_VALID) { + self.calculate_dtorck(tcx) + } + self.flags.get().intersects(AdtFlags::IS_DTORCK) + } + + /// Returns whether this type is #[fundamental] for the purposes + /// of coherence checking. + #[inline] + pub fn is_fundamental(&self) -> bool { + self.flags.get().intersects(AdtFlags::IS_FUNDAMENTAL) + } + + #[inline] + pub fn is_simd(&self) -> bool { + self.flags.get().intersects(AdtFlags::IS_SIMD) + } + + /// Returns true if this is PhantomData. + #[inline] + pub fn is_phantom_data(&self) -> bool { + self.flags.get().intersects(AdtFlags::IS_PHANTOM_DATA) + } + + /// Returns whether this type has a destructor. + pub fn has_dtor(&self) -> bool { + self.dtor_kind().is_present() + } + + /// Asserts this is a struct and returns the struct's unique + /// variant. + pub fn struct_variant(&self) -> &VariantDef { + assert!(!self.is_enum()); + &self.variants[0] + } + + #[inline] + pub fn predicates(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> GenericPredicates<'gcx> { + tcx.item_predicates(self.did) + } + + /// Returns an iterator over all fields contained + /// by this ADT. + #[inline] + pub fn all_fields<'s>(&'s self) -> impl Iterator { + self.variants.iter().flat_map(|v| v.fields.iter()) + } + + #[inline] + pub fn is_univariant(&self) -> bool { + self.variants.len() == 1 + } + + pub fn is_payloadfree(&self) -> bool { + !self.variants.is_empty() && + self.variants.iter().all(|v| v.fields.is_empty()) + } + + pub fn variant_with_id(&self, vid: DefId) -> &VariantDef { + self.variants + .iter() + .find(|v| v.did == vid) + .expect("variant_with_id: unknown variant") + } + + pub fn variant_index_with_id(&self, vid: DefId) -> usize { + self.variants + .iter() + .position(|v| v.did == vid) + .expect("variant_index_with_id: unknown variant") + } + + pub fn variant_of_def(&self, def: Def) -> &VariantDef { + match def { + Def::Variant(vid) | Def::VariantCtor(vid, ..) => self.variant_with_id(vid), + Def::Struct(..) | Def::StructCtor(..) | Def::Union(..) | + Def::TyAlias(..) | Def::AssociatedTy(..) | Def::SelfTy(..) => self.struct_variant(), + _ => bug!("unexpected def {:?} in variant_of_def", def) + } + } + + pub fn destructor(&self) -> Option { + self.destructor.get() + } + + pub fn set_destructor(&self, dtor: DefId) { + self.destructor.set(Some(dtor)); + } + + pub fn dtor_kind(&self) -> DtorKind { + match self.destructor.get() { + Some(_) => TraitDtor, + None => NoDtor, + } + } + + /// Returns a simpler type such that `Self: Sized` if and only + /// if that type is Sized, or `TyErr` if this type is recursive. + /// + /// HACK: instead of returning a list of types, this function can + /// return a tuple. In that case, the result is Sized only if + /// all elements of the tuple are Sized. + /// + /// This is generally the `struct_tail` if this is a struct, or a + /// tuple of them if this is an enum. + /// + /// Oddly enough, checking that the sized-constraint is Sized is + /// actually more expressive than checking all members: + /// the Sized trait is inductive, so an associated type that references + /// Self would prevent its containing ADT from being Sized. + /// + /// Due to normalization being eager, this applies even if + /// the associated type is behind a pointer, e.g. issue #31299. + pub fn sized_constraint(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { + self.calculate_sized_constraint_inner(tcx.global_tcx(), &mut Vec::new()) + } + + /// Calculates the Sized-constraint. + /// + /// As the Sized-constraint of enums can be a *set* of types, + /// the Sized-constraint may need to be a set also. Because introducing + /// a new type of IVar is currently a complex affair, the Sized-constraint + /// may be a tuple. + /// + /// In fact, there are only a few options for the constraint: + /// - `bool`, if the type is always Sized + /// - an obviously-unsized type + /// - a type parameter or projection whose Sizedness can't be known + /// - a tuple of type parameters or projections, if there are multiple + /// such. + /// - a TyError, if a type contained itself. The representability + /// check should catch this case. + fn calculate_sized_constraint_inner(&self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + stack: &mut Vec) + -> Ty<'tcx> + { + if let Some(ty) = tcx.adt_sized_constraint.borrow().get(&self.did) { + return ty; + } + + // Follow the memoization pattern: push the computation of + // DepNode::SizedConstraint as our current task. + let _task = tcx.dep_graph.in_task(DepNode::SizedConstraint(self.did)); + + if stack.contains(&self.did) { + debug!("calculate_sized_constraint: {:?} is recursive", self); + // This should be reported as an error by `check_representable`. + // + // Consider the type as Sized in the meanwhile to avoid + // further errors. + tcx.adt_sized_constraint.borrow_mut().insert(self.did, tcx.types.err); + return tcx.types.err; + } + + stack.push(self.did); + + let tys : Vec<_> = + self.variants.iter().flat_map(|v| { + v.fields.last() + }).flat_map(|f| { + let ty = tcx.item_type(f.did); + self.sized_constraint_for_ty(tcx, stack, ty) + }).collect(); + + let self_ = stack.pop().unwrap(); + assert_eq!(self_, self.did); + + let ty = match tys.len() { + _ if tys.references_error() => tcx.types.err, + 0 => tcx.types.bool, + 1 => tys[0], + _ => tcx.intern_tup(&tys[..]) + }; + + let old = tcx.adt_sized_constraint.borrow().get(&self.did).cloned(); + match old { + Some(old_ty) => { + debug!("calculate_sized_constraint: {:?} recurred", self); + assert_eq!(old_ty, tcx.types.err); + old_ty + } + None => { + debug!("calculate_sized_constraint: {:?} => {:?}", self, ty); + tcx.adt_sized_constraint.borrow_mut().insert(self.did, ty); + ty + } + } + } + + fn sized_constraint_for_ty(&self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + stack: &mut Vec, + ty: Ty<'tcx>) + -> Vec> { + let result = match ty.sty { + TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) | + TyBox(..) | TyRawPtr(..) | TyRef(..) | TyFnDef(..) | TyFnPtr(_) | + TyArray(..) | TyClosure(..) | TyNever => { + vec![] + } + + TyStr | TyDynamic(..) | TySlice(_) | TyError => { + // these are never sized - return the target type + vec![ty] + } + + TyTuple(ref tys) => { + match tys.last() { + None => vec![], + Some(ty) => self.sized_constraint_for_ty(tcx, stack, ty) + } + } + + TyAdt(adt, substs) => { + // recursive case + let adt_ty = + adt.calculate_sized_constraint_inner(tcx, stack) + .subst(tcx, substs); + debug!("sized_constraint_for_ty({:?}) intermediate = {:?}", + ty, adt_ty); + if let ty::TyTuple(ref tys) = adt_ty.sty { + tys.iter().flat_map(|ty| { + self.sized_constraint_for_ty(tcx, stack, ty) + }).collect() + } else { + self.sized_constraint_for_ty(tcx, stack, adt_ty) + } + } + + TyProjection(..) | TyAnon(..) => { + // must calculate explicitly. + // FIXME: consider special-casing always-Sized projections + vec![ty] + } + + TyParam(..) => { + // perf hack: if there is a `T: Sized` bound, then + // we know that `T` is Sized and do not need to check + // it on the impl. + + let sized_trait = match tcx.lang_items.sized_trait() { + Some(x) => x, + _ => return vec![ty] + }; + let sized_predicate = Binder(TraitRef { + def_id: sized_trait, + substs: tcx.mk_substs_trait(ty, &[]) + }).to_predicate(); + let predicates = tcx.item_predicates(self.did).predicates; + if predicates.into_iter().any(|p| p == sized_predicate) { + vec![] + } else { + vec![ty] + } + } + + TyInfer(..) => { + bug!("unexpected type `{:?}` in sized_constraint_for_ty", + ty) + } + }; + debug!("sized_constraint_for_ty({:?}) = {:?}", ty, result); + result + } +} + +impl<'a, 'gcx, 'tcx> VariantDef { + #[inline] + pub fn find_field_named(&self, + name: ast::Name) + -> Option<&FieldDef> { + self.fields.iter().find(|f| f.name == name) + } + + #[inline] + pub fn index_of_field_named(&self, + name: ast::Name) + -> Option { + self.fields.iter().position(|f| f.name == name) + } + + #[inline] + pub fn field_named(&self, name: ast::Name) -> &FieldDef { + self.find_field_named(name).unwrap() + } + + #[inline] + pub fn is_uninhabited_recurse(&self, + visited: &mut FxHashSet<(DefId, &'tcx Substs<'tcx>)>, + block: Option, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + substs: &'tcx Substs<'tcx>, + is_union: bool) -> bool { + if is_union { + self.fields.iter().all(|f| f.is_uninhabited_recurse(visited, block, tcx, substs)) + } else { + self.fields.iter().any(|f| f.is_uninhabited_recurse(visited, block, tcx, substs)) + } + } +} + +impl<'a, 'gcx, 'tcx> FieldDef { + pub fn ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, subst: &Substs<'tcx>) -> Ty<'tcx> { + tcx.item_type(self.did).subst(tcx, subst) + } + + #[inline] + pub fn is_uninhabited_recurse(&self, + visited: &mut FxHashSet<(DefId, &'tcx Substs<'tcx>)>, + block: Option, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + substs: &'tcx Substs<'tcx>) -> bool { + block.map_or(true, |b| self.vis.is_accessible_from(b, &tcx.map)) && + self.ty(tcx, substs).is_uninhabited_recurse(visited, block, tcx) + } +} + +/// Records the substitutions used to translate the polytype for an +/// item into the monotype of an item reference. +#[derive(Clone, RustcEncodable, RustcDecodable)] +pub struct ItemSubsts<'tcx> { + pub substs: &'tcx Substs<'tcx>, +} + +#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +pub enum ClosureKind { + // Warning: Ordering is significant here! The ordering is chosen + // because the trait Fn is a subtrait of FnMut and so in turn, and + // hence we order it so that Fn < FnMut < FnOnce. + Fn, + FnMut, + FnOnce, +} + +impl<'a, 'tcx> ClosureKind { + pub fn trait_did(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> DefId { + match *self { + ClosureKind::Fn => tcx.require_lang_item(FnTraitLangItem), + ClosureKind::FnMut => { + tcx.require_lang_item(FnMutTraitLangItem) + } + ClosureKind::FnOnce => { + tcx.require_lang_item(FnOnceTraitLangItem) + } + } + } + + /// True if this a type that impls this closure kind + /// must also implement `other`. + pub fn extends(self, other: ty::ClosureKind) -> bool { + match (self, other) { + (ClosureKind::Fn, ClosureKind::Fn) => true, + (ClosureKind::Fn, ClosureKind::FnMut) => true, + (ClosureKind::Fn, ClosureKind::FnOnce) => true, + (ClosureKind::FnMut, ClosureKind::FnMut) => true, + (ClosureKind::FnMut, ClosureKind::FnOnce) => true, + (ClosureKind::FnOnce, ClosureKind::FnOnce) => true, + _ => false, + } + } +} + +impl<'tcx> TyS<'tcx> { + /// Iterator that walks `self` and any types reachable from + /// `self`, in depth-first order. Note that just walks the types + /// that appear in `self`, it does not descend into the fields of + /// structs or variants. For example: + /// + /// ```notrust + /// isize => { isize } + /// Foo> => { Foo>, Bar, isize } + /// [isize] => { [isize], isize } + /// ``` + pub fn walk(&'tcx self) -> TypeWalker<'tcx> { + TypeWalker::new(self) + } + + /// Iterator that walks the immediate children of `self`. Hence + /// `Foo, u32>` yields the sequence `[Bar, u32]` + /// (but not `i32`, like `walk`). + pub fn walk_shallow(&'tcx self) -> AccIntoIter> { + walk::walk_shallow(self) + } + + /// Walks `ty` and any types appearing within `ty`, invoking the + /// callback `f` on each type. If the callback returns false, then the + /// children of the current type are ignored. + /// + /// Note: prefer `ty.walk()` where possible. + pub fn maybe_walk(&'tcx self, mut f: F) + where F : FnMut(Ty<'tcx>) -> bool + { + let mut walker = self.walk(); + while let Some(ty) = walker.next() { + if !f(ty) { + walker.skip_current_subtree(); + } + } + } +} + +impl<'tcx> ItemSubsts<'tcx> { + pub fn is_noop(&self) -> bool { + self.substs.is_noop() + } +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum LvaluePreference { + PreferMutLvalue, + NoPreference +} + +impl LvaluePreference { + pub fn from_mutbl(m: hir::Mutability) -> Self { + match m { + hir::MutMutable => PreferMutLvalue, + hir::MutImmutable => NoPreference, + } + } +} + +/// Helper for looking things up in the various maps that are populated during +/// typeck::collect (e.g., `tcx.associated_items`, `tcx.types`, etc). All of +/// these share the pattern that if the id is local, it should have been loaded +/// into the map by the `typeck::collect` phase. If the def-id is external, +/// then we have to go consult the crate loading code (and cache the result for +/// the future). +fn lookup_locally_or_in_crate_store(descr: &str, + def_id: DefId, + map: &M, + load_external: F) + -> M::Value where + M: MemoizationMap, + F: FnOnce() -> M::Value, +{ + map.memoize(def_id, || { + if def_id.is_local() { + bug!("No def'n found for {:?} in tcx.{}", def_id, descr); + } + load_external() + }) +} + +impl BorrowKind { + pub fn from_mutbl(m: hir::Mutability) -> BorrowKind { + match m { + hir::MutMutable => MutBorrow, + hir::MutImmutable => ImmBorrow, + } + } + + /// Returns a mutability `m` such that an `&m T` pointer could be used to obtain this borrow + /// kind. Because borrow kinds are richer than mutabilities, we sometimes have to pick a + /// mutability that is stronger than necessary so that it at least *would permit* the borrow in + /// question. + pub fn to_mutbl_lossy(self) -> hir::Mutability { + match self { + MutBorrow => hir::MutMutable, + ImmBorrow => hir::MutImmutable, + + // We have no type corresponding to a unique imm borrow, so + // use `&mut`. It gives all the capabilities of an `&uniq` + // and hence is a safe "over approximation". + UniqueImmBorrow => hir::MutMutable, + } + } + + pub fn to_user_str(&self) -> &'static str { + match *self { + MutBorrow => "mutable", + ImmBorrow => "immutable", + UniqueImmBorrow => "uniquely immutable", + } + } +} + +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + pub fn tables(self) -> Ref<'a, Tables<'gcx>> { + self.tables.borrow() + } + + pub fn expr_span(self, id: NodeId) -> Span { + match self.map.find(id) { + Some(ast_map::NodeExpr(e)) => { + e.span + } + Some(f) => { + bug!("Node id {} is not an expr: {:?}", id, f); + } + None => { + bug!("Node id {} is not present in the node map", id); + } + } + } + + pub fn local_var_name_str(self, id: NodeId) -> InternedString { + match self.map.find(id) { + Some(ast_map::NodeLocal(pat)) => { + match pat.node { + hir::PatKind::Binding(_, _, ref path1, _) => path1.node.as_str(), + _ => { + bug!("Variable id {} maps to {:?}, not local", id, pat); + }, + } + }, + r => bug!("Variable id {} maps to {:?}, not local", id, r), + } + } + + pub fn expr_is_lval(self, expr: &hir::Expr) -> bool { + match expr.node { + hir::ExprPath(hir::QPath::Resolved(_, ref path)) => { + match path.def { + Def::Local(..) | Def::Upvar(..) | Def::Static(..) | Def::Err => true, + _ => false, + } + } + + hir::ExprType(ref e, _) => { + self.expr_is_lval(e) + } + + hir::ExprUnary(hir::UnDeref, _) | + hir::ExprField(..) | + hir::ExprTupField(..) | + hir::ExprIndex(..) => { + true + } + + // Partially qualified paths in expressions can only legally + // refer to associated items which are always rvalues. + hir::ExprPath(hir::QPath::TypeRelative(..)) | + + hir::ExprCall(..) | + hir::ExprMethodCall(..) | + hir::ExprStruct(..) | + hir::ExprTup(..) | + hir::ExprIf(..) | + hir::ExprMatch(..) | + hir::ExprClosure(..) | + hir::ExprBlock(..) | + hir::ExprRepeat(..) | + hir::ExprArray(..) | + hir::ExprBreak(..) | + hir::ExprAgain(..) | + hir::ExprRet(..) | + hir::ExprWhile(..) | + hir::ExprLoop(..) | + hir::ExprAssign(..) | + hir::ExprInlineAsm(..) | + hir::ExprAssignOp(..) | + hir::ExprLit(_) | + hir::ExprUnary(..) | + hir::ExprBox(..) | + hir::ExprAddrOf(..) | + hir::ExprBinary(..) | + hir::ExprCast(..) => { + false + } + } + } + + pub fn provided_trait_methods(self, id: DefId) -> Vec { + self.associated_items(id) + .filter(|item| item.kind == AssociatedKind::Method && item.defaultness.has_value()) + .collect() + } + + pub fn trait_impl_polarity(self, id: DefId) -> hir::ImplPolarity { + if let Some(id) = self.map.as_local_node_id(id) { + match self.map.expect_item(id).node { + hir::ItemImpl(_, polarity, ..) => polarity, + ref item => bug!("trait_impl_polarity: {:?} not an impl", item) + } + } else { + self.sess.cstore.impl_polarity(id) + } + } + + pub fn custom_coerce_unsized_kind(self, did: DefId) -> adjustment::CustomCoerceUnsized { + self.custom_coerce_unsized_kinds.memoize(did, || { + let (kind, src) = if did.krate != LOCAL_CRATE { + (self.sess.cstore.custom_coerce_unsized_kind(did), "external") + } else { + (None, "local") + }; + + match kind { + Some(kind) => kind, + None => { + bug!("custom_coerce_unsized_kind: \ + {} impl `{}` is missing its kind", + src, self.item_path_str(did)); + } + } + }) + } + + pub fn associated_item(self, def_id: DefId) -> AssociatedItem { + self.associated_items.memoize(def_id, || { + if !def_id.is_local() { + return self.sess.cstore.associated_item(self.global_tcx(), def_id) + .expect("missing AssociatedItem in metadata"); + } + + // When the user asks for a given associated item, we + // always go ahead and convert all the associated items in + // the container. Note that we are also careful only to + // ever register a read on the *container* of the assoc + // item, not the assoc item itself. This prevents changes + // in the details of an item (for example, the type to + // which an associated type is bound) from contaminating + // those tasks that just need to scan the names of items + // and so forth. + + let id = self.map.as_local_node_id(def_id).unwrap(); + let parent_id = self.map.get_parent(id); + let parent_def_id = self.map.local_def_id(parent_id); + let parent_item = self.map.expect_item(parent_id); + match parent_item.node { + hir::ItemImpl(.., ref impl_trait_ref, _, ref impl_item_refs) => { + for impl_item_ref in impl_item_refs { + let assoc_item = + self.associated_item_from_impl_item_ref(parent_def_id, + impl_trait_ref.is_some(), + impl_item_ref); + self.associated_items.borrow_mut().insert(assoc_item.def_id, assoc_item); + } + } + + hir::ItemTrait(.., ref trait_items) => { + for trait_item in trait_items { + let assoc_item = + self.associated_item_from_trait_item_ref(parent_def_id, trait_item); + self.associated_items.borrow_mut().insert(assoc_item.def_id, assoc_item); + } + } + + ref r => { + panic!("unexpected container of associated items: {:?}", r) + } + } + + // memoize wants us to return something, so return + // the one we generated for this def-id + *self.associated_items.borrow().get(&def_id).unwrap() + }) + } + + fn associated_item_from_trait_item_ref(self, + parent_def_id: DefId, + trait_item: &hir::TraitItem) + -> AssociatedItem { + let def_id = self.map.local_def_id(trait_item.id); + + let (kind, has_self, has_value) = match trait_item.node { + hir::MethodTraitItem(ref sig, ref body) => { + (AssociatedKind::Method, sig.decl.get_self().is_some(), + body.is_some()) + } + hir::ConstTraitItem(_, ref value) => { + (AssociatedKind::Const, false, value.is_some()) + } + hir::TypeTraitItem(_, ref ty) => { + (AssociatedKind::Type, false, ty.is_some()) + } + }; + + AssociatedItem { + name: trait_item.name, + kind: kind, + vis: Visibility::from_hir(&hir::Inherited, trait_item.id, self), + defaultness: hir::Defaultness::Default { has_value: has_value }, + def_id: def_id, + container: TraitContainer(parent_def_id), + method_has_self_argument: has_self + } + } + + fn associated_item_from_impl_item_ref(self, + parent_def_id: DefId, + from_trait_impl: bool, + impl_item_ref: &hir::ImplItemRef) + -> AssociatedItem { + let def_id = self.map.local_def_id(impl_item_ref.id.node_id); + let (kind, has_self) = match impl_item_ref.kind { + hir::AssociatedItemKind::Const => (ty::AssociatedKind::Const, false), + hir::AssociatedItemKind::Method { has_self } => { + (ty::AssociatedKind::Method, has_self) + } + hir::AssociatedItemKind::Type => (ty::AssociatedKind::Type, false), + }; + + // Trait impl items are always public. + let public = hir::Public; + let vis = if from_trait_impl { &public } else { &impl_item_ref.vis }; + + ty::AssociatedItem { + name: impl_item_ref.name, + kind: kind, + vis: ty::Visibility::from_hir(vis, impl_item_ref.id.node_id, self), + defaultness: impl_item_ref.defaultness, + def_id: def_id, + container: ImplContainer(parent_def_id), + method_has_self_argument: has_self + } + } + + pub fn associated_item_def_ids(self, def_id: DefId) -> Rc> { + self.associated_item_def_ids.memoize(def_id, || { + if !def_id.is_local() { + return Rc::new(self.sess.cstore.associated_item_def_ids(def_id)); + } + + let id = self.map.as_local_node_id(def_id).unwrap(); + let item = self.map.expect_item(id); + let vec: Vec<_> = match item.node { + hir::ItemTrait(.., ref trait_items) => { + trait_items.iter() + .map(|trait_item| trait_item.id) + .map(|id| self.map.local_def_id(id)) + .collect() + } + hir::ItemImpl(.., ref impl_item_refs) => { + impl_item_refs.iter() + .map(|impl_item_ref| impl_item_ref.id) + .map(|id| self.map.local_def_id(id.node_id)) + .collect() + } + _ => span_bug!(item.span, "associated_item_def_ids: not impl or trait") + }; + Rc::new(vec) + }) + } + + #[inline] // FIXME(#35870) Avoid closures being unexported due to impl Trait. + pub fn associated_items(self, def_id: DefId) + -> impl Iterator + 'a { + let def_ids = self.associated_item_def_ids(def_id); + (0..def_ids.len()).map(move |i| self.associated_item(def_ids[i])) + } + + /// Returns the trait-ref corresponding to a given impl, or None if it is + /// an inherent impl. + pub fn impl_trait_ref(self, id: DefId) -> Option> { + lookup_locally_or_in_crate_store( + "impl_trait_refs", id, &self.impl_trait_refs, + || self.sess.cstore.impl_trait_ref(self.global_tcx(), id)) + } + + // Returns `ty::VariantDef` if `def` refers to a struct, + // or variant or their constructors, panics otherwise. + pub fn expect_variant_def(self, def: Def) -> &'tcx VariantDef { + match def { + Def::Variant(did) | Def::VariantCtor(did, ..) => { + let enum_did = self.parent_def_id(did).unwrap(); + self.lookup_adt_def(enum_did).variant_with_id(did) + } + Def::Struct(did) | Def::Union(did) => { + self.lookup_adt_def(did).struct_variant() + } + Def::StructCtor(ctor_did, ..) => { + let did = self.parent_def_id(ctor_did).expect("struct ctor has no parent"); + self.lookup_adt_def(did).struct_variant() + } + _ => bug!("expect_variant_def used with unexpected def {:?}", def) + } + } + + pub fn def_key(self, id: DefId) -> ast_map::DefKey { + if id.is_local() { + self.map.def_key(id) + } else { + self.sess.cstore.def_key(id) + } + } + + /// Convert a `DefId` into its fully expanded `DefPath` (every + /// `DefId` is really just an interned def-path). + /// + /// Note that if `id` is not local to this crate -- or is + /// inlined into this crate -- the result will be a non-local + /// `DefPath`. + /// + /// This function is only safe to use when you are sure that the + /// full def-path is accessible. Examples that are known to be + /// safe are local def-ids or items; see `opt_def_path` for more + /// details. + pub fn def_path(self, id: DefId) -> ast_map::DefPath { + self.opt_def_path(id).unwrap_or_else(|| { + bug!("could not load def-path for {:?}", id) + }) + } + + /// Convert a `DefId` into its fully expanded `DefPath` (every + /// `DefId` is really just an interned def-path). + /// + /// When going across crates, we do not save the full info for + /// every cross-crate def-id, and hence we may not always be able + /// to create a def-path. Therefore, this returns + /// `Option` to cover that possibility. It will always + /// return `Some` for local def-ids, however, as well as for + /// items. The problems arise with "minor" def-ids like those + /// associated with a pattern, `impl Trait`, or other internal + /// detail to a fn. + /// + /// Note that if `id` is not local to this crate -- or is + /// inlined into this crate -- the result will be a non-local + /// `DefPath`. + pub fn opt_def_path(self, id: DefId) -> Option { + if id.is_local() { + Some(self.map.def_path(id)) + } else { + self.sess.cstore.relative_def_path(id) + } + } + + pub fn def_span(self, def_id: DefId) -> Span { + if let Some(id) = self.map.as_local_node_id(def_id) { + self.map.span(id) + } else { + self.sess.cstore.def_span(&self.sess, def_id) + } + } + + pub fn item_name(self, id: DefId) -> ast::Name { + if let Some(id) = self.map.as_local_node_id(id) { + self.map.name(id) + } else if id.index == CRATE_DEF_INDEX { + self.sess.cstore.original_crate_name(id.krate) + } else { + let def_key = self.sess.cstore.def_key(id); + // The name of a StructCtor is that of its struct parent. + if let ast_map::DefPathData::StructCtor = def_key.disambiguated_data.data { + self.item_name(DefId { + krate: id.krate, + index: def_key.parent.unwrap() + }) + } else { + def_key.disambiguated_data.data.get_opt_name().unwrap_or_else(|| { + bug!("item_name: no name for {:?}", self.def_path(id)); + }) + } + } + } + + // If the given item is in an external crate, looks up its type and adds it to + // the type cache. Returns the type parameters and type. + pub fn item_type(self, did: DefId) -> Ty<'gcx> { + lookup_locally_or_in_crate_store( + "item_types", did, &self.item_types, + || self.sess.cstore.item_type(self.global_tcx(), did)) + } + + /// Given the did of a trait, returns its canonical trait ref. + pub fn lookup_trait_def(self, did: DefId) -> &'gcx TraitDef { + lookup_locally_or_in_crate_store( + "trait_defs", did, &self.trait_defs, + || self.alloc_trait_def(self.sess.cstore.trait_def(self.global_tcx(), did)) + ) + } + + /// Given the did of an ADT, return a reference to its definition. + pub fn lookup_adt_def(self, did: DefId) -> &'gcx AdtDef { + lookup_locally_or_in_crate_store( + "adt_defs", did, &self.adt_defs, + || self.sess.cstore.adt_def(self.global_tcx(), did)) + } + + /// Given the did of an item, returns its generics. + pub fn item_generics(self, did: DefId) -> &'gcx Generics<'gcx> { + lookup_locally_or_in_crate_store( + "generics", did, &self.generics, + || self.alloc_generics(self.sess.cstore.item_generics(self.global_tcx(), did))) + } + + /// Given the did of an item, returns its full set of predicates. + pub fn item_predicates(self, did: DefId) -> GenericPredicates<'gcx> { + lookup_locally_or_in_crate_store( + "predicates", did, &self.predicates, + || self.sess.cstore.item_predicates(self.global_tcx(), did)) + } + + /// Given the did of a trait, returns its superpredicates. + pub fn item_super_predicates(self, did: DefId) -> GenericPredicates<'gcx> { + lookup_locally_or_in_crate_store( + "super_predicates", did, &self.super_predicates, + || self.sess.cstore.item_super_predicates(self.global_tcx(), did)) + } + + /// Given the did of an item, returns its MIR, borrowed immutably. + pub fn item_mir(self, did: DefId) -> Ref<'gcx, Mir<'gcx>> { + lookup_locally_or_in_crate_store("mir_map", did, &self.mir_map, || { + let mir = self.sess.cstore.get_item_mir(self.global_tcx(), did); + let mir = self.alloc_mir(mir); + + // Perma-borrow MIR from extern crates to prevent mutation. + mem::forget(mir.borrow()); + + mir + }).borrow() + } + + /// If `type_needs_drop` returns true, then `ty` is definitely + /// non-copy and *might* have a destructor attached; if it returns + /// false, then `ty` definitely has no destructor (i.e. no drop glue). + /// + /// (Note that this implies that if `ty` has a destructor attached, + /// then `type_needs_drop` will definitely return `true` for `ty`.) + pub fn type_needs_drop_given_env(self, + ty: Ty<'gcx>, + param_env: &ty::ParameterEnvironment<'gcx>) -> bool { + // Issue #22536: We first query type_moves_by_default. It sees a + // normalized version of the type, and therefore will definitely + // know whether the type implements Copy (and thus needs no + // cleanup/drop/zeroing) ... + let tcx = self.global_tcx(); + let implements_copy = !ty.moves_by_default(tcx, param_env, DUMMY_SP); + + if implements_copy { return false; } + + // ... (issue #22536 continued) but as an optimization, still use + // prior logic of asking if the `needs_drop` bit is set; we need + // not zero non-Copy types if they have no destructor. + + // FIXME(#22815): Note that calling `ty::type_contents` is a + // conservative heuristic; it may report that `needs_drop` is set + // when actual type does not actually have a destructor associated + // with it. But since `ty` absolutely did not have the `Copy` + // bound attached (see above), it is sound to treat it as having a + // destructor (e.g. zero its memory on move). + + let contents = ty.type_contents(tcx); + debug!("type_needs_drop ty={:?} contents={:?}", ty, contents); + contents.needs_drop(tcx) + } + + /// Get the attributes of a definition. + pub fn get_attrs(self, did: DefId) -> Cow<'gcx, [ast::Attribute]> { + if let Some(id) = self.map.as_local_node_id(did) { + Cow::Borrowed(self.map.attrs(id)) + } else { + Cow::Owned(self.sess.cstore.item_attrs(did)) + } + } + + /// Determine whether an item is annotated with an attribute + pub fn has_attr(self, did: DefId, attr: &str) -> bool { + self.get_attrs(did).iter().any(|item| item.check_name(attr)) + } + + /// Determine whether an item is annotated with `#[repr(packed)]` + pub fn lookup_packed(self, did: DefId) -> bool { + self.lookup_repr_hints(did).contains(&attr::ReprPacked) + } + + /// Determine whether an item is annotated with `#[simd]` + pub fn lookup_simd(self, did: DefId) -> bool { + self.has_attr(did, "simd") + || self.lookup_repr_hints(did).contains(&attr::ReprSimd) + } + + pub fn item_variances(self, item_id: DefId) -> Rc> { + lookup_locally_or_in_crate_store( + "item_variance_map", item_id, &self.item_variance_map, + || Rc::new(self.sess.cstore.item_variances(item_id))) + } + + pub fn trait_has_default_impl(self, trait_def_id: DefId) -> bool { + self.populate_implementations_for_trait_if_necessary(trait_def_id); + + let def = self.lookup_trait_def(trait_def_id); + def.flags.get().intersects(TraitFlags::HAS_DEFAULT_IMPL) + } + + /// Records a trait-to-implementation mapping. + pub fn record_trait_has_default_impl(self, trait_def_id: DefId) { + let def = self.lookup_trait_def(trait_def_id); + def.flags.set(def.flags.get() | TraitFlags::HAS_DEFAULT_IMPL) + } + + /// Populates the type context with all the inherent implementations for + /// the given type if necessary. + pub fn populate_inherent_implementations_for_type_if_necessary(self, + type_id: DefId) { + if type_id.is_local() { + return + } + + // The type is not local, hence we are reading this out of + // metadata and don't need to track edges. + let _ignore = self.dep_graph.in_ignore(); + + if self.populated_external_types.borrow().contains(&type_id) { + return + } + + debug!("populate_inherent_implementations_for_type_if_necessary: searching for {:?}", + type_id); + + let inherent_impls = self.sess.cstore.inherent_implementations_for_type(type_id); + + self.inherent_impls.borrow_mut().insert(type_id, inherent_impls); + self.populated_external_types.borrow_mut().insert(type_id); + } + + /// Populates the type context with all the implementations for the given + /// trait if necessary. + pub fn populate_implementations_for_trait_if_necessary(self, trait_id: DefId) { + if trait_id.is_local() { + return + } + + // The type is not local, hence we are reading this out of + // metadata and don't need to track edges. + let _ignore = self.dep_graph.in_ignore(); + + let def = self.lookup_trait_def(trait_id); + if def.flags.get().intersects(TraitFlags::IMPLS_VALID) { + return; + } + + debug!("populate_implementations_for_trait_if_necessary: searching for {:?}", def); + + if self.sess.cstore.is_defaulted_trait(trait_id) { + self.record_trait_has_default_impl(trait_id); + } + + for impl_def_id in self.sess.cstore.implementations_of_trait(Some(trait_id)) { + let trait_ref = self.impl_trait_ref(impl_def_id).unwrap(); + + // Record the trait->implementation mapping. + let parent = self.sess.cstore.impl_parent(impl_def_id).unwrap_or(trait_id); + def.record_remote_impl(self, impl_def_id, trait_ref, parent); + } + + def.flags.set(def.flags.get() | TraitFlags::IMPLS_VALID); + } + + pub fn closure_kind(self, def_id: DefId) -> ty::ClosureKind { + // If this is a local def-id, it should be inserted into the + // tables by typeck; else, it will be retreived from + // the external crate metadata. + if let Some(&kind) = self.tables.borrow().closure_kinds.get(&def_id) { + return kind; + } + + let kind = self.sess.cstore.closure_kind(def_id); + self.tables.borrow_mut().closure_kinds.insert(def_id, kind); + kind + } + + pub fn closure_type(self, + def_id: DefId, + substs: ClosureSubsts<'tcx>) + -> ty::ClosureTy<'tcx> + { + // If this is a local def-id, it should be inserted into the + // tables by typeck; else, it will be retreived from + // the external crate metadata. + if let Some(ty) = self.tables.borrow().closure_tys.get(&def_id) { + return ty.subst(self, substs.substs); + } + + let ty = self.sess.cstore.closure_ty(self.global_tcx(), def_id); + self.tables.borrow_mut().closure_tys.insert(def_id, ty.clone()); + ty.subst(self, substs.substs) + } + + /// Given the def_id of an impl, return the def_id of the trait it implements. + /// If it implements no trait, return `None`. + pub fn trait_id_of_impl(self, def_id: DefId) -> Option { + self.impl_trait_ref(def_id).map(|tr| tr.def_id) + } + + /// If the given def ID describes a method belonging to an impl, return the + /// ID of the impl that the method belongs to. Otherwise, return `None`. + pub fn impl_of_method(self, def_id: DefId) -> Option { + if def_id.krate != LOCAL_CRATE { + return self.sess.cstore.associated_item(self.global_tcx(), def_id) + .and_then(|item| { + match item.container { + TraitContainer(_) => None, + ImplContainer(def_id) => Some(def_id), + } + }); + } + match self.associated_items.borrow().get(&def_id).cloned() { + Some(trait_item) => { + match trait_item.container { + TraitContainer(_) => None, + ImplContainer(def_id) => Some(def_id), + } + } + None => None + } + } + + /// If the given def ID describes an item belonging to a trait, + /// return the ID of the trait that the trait item belongs to. + /// Otherwise, return `None`. + pub fn trait_of_item(self, def_id: DefId) -> Option { + if def_id.krate != LOCAL_CRATE { + return self.sess.cstore.trait_of_item(def_id); + } + match self.associated_items.borrow().get(&def_id) { + Some(associated_item) => { + match associated_item.container { + TraitContainer(def_id) => Some(def_id), + ImplContainer(_) => None + } + } + None => None + } + } + + /// Construct a parameter environment suitable for static contexts or other contexts where there + /// are no free type/lifetime parameters in scope. + pub fn empty_parameter_environment(self) -> ParameterEnvironment<'tcx> { + + // for an empty parameter environment, there ARE no free + // regions, so it shouldn't matter what we use for the free id + let free_id_outlive = self.region_maps.node_extent(ast::DUMMY_NODE_ID); + ty::ParameterEnvironment { + free_substs: self.intern_substs(&[]), + caller_bounds: Vec::new(), + implicit_region_bound: self.mk_region(ty::ReEmpty), + free_id_outlive: free_id_outlive, + is_copy_cache: RefCell::new(FxHashMap()), + is_sized_cache: RefCell::new(FxHashMap()), + } + } + + /// Constructs and returns a substitution that can be applied to move from + /// the "outer" view of a type or method to the "inner" view. + /// In general, this means converting from bound parameters to + /// free parameters. Since we currently represent bound/free type + /// parameters in the same way, this only has an effect on regions. + pub fn construct_free_substs(self, def_id: DefId, + free_id_outlive: CodeExtent) + -> &'gcx Substs<'gcx> { + + let substs = Substs::for_item(self.global_tcx(), def_id, |def, _| { + // map bound 'a => free 'a + self.global_tcx().mk_region(ReFree(FreeRegion { + scope: free_id_outlive, + bound_region: def.to_bound_region() + })) + }, |def, _| { + // map T => T + self.global_tcx().mk_param_from_def(def) + }); + + debug!("construct_parameter_environment: {:?}", substs); + substs + } + + /// See `ParameterEnvironment` struct def'n for details. + /// If you were using `free_id: NodeId`, you might try `self.region_maps.item_extent(free_id)` + /// for the `free_id_outlive` parameter. (But note that this is not always quite right.) + pub fn construct_parameter_environment(self, + span: Span, + def_id: DefId, + free_id_outlive: CodeExtent) + -> ParameterEnvironment<'gcx> + { + // + // Construct the free substs. + // + + let free_substs = self.construct_free_substs(def_id, free_id_outlive); + + // + // Compute the bounds on Self and the type parameters. + // + + let tcx = self.global_tcx(); + let generic_predicates = tcx.item_predicates(def_id); + let bounds = generic_predicates.instantiate(tcx, free_substs); + let bounds = tcx.liberate_late_bound_regions(free_id_outlive, &ty::Binder(bounds)); + let predicates = bounds.predicates; + + // Finally, we have to normalize the bounds in the environment, in + // case they contain any associated type projections. This process + // can yield errors if the put in illegal associated types, like + // `::Bar` where `i32` does not implement `Foo`. We + // report these errors right here; this doesn't actually feel + // right to me, because constructing the environment feels like a + // kind of a "idempotent" action, but I'm not sure where would be + // a better place. In practice, we construct environments for + // every fn once during type checking, and we'll abort if there + // are any errors at that point, so after type checking you can be + // sure that this will succeed without errors anyway. + // + + let unnormalized_env = ty::ParameterEnvironment { + free_substs: free_substs, + implicit_region_bound: tcx.mk_region(ty::ReScope(free_id_outlive)), + caller_bounds: predicates, + free_id_outlive: free_id_outlive, + is_copy_cache: RefCell::new(FxHashMap()), + is_sized_cache: RefCell::new(FxHashMap()), + }; + + let cause = traits::ObligationCause::misc(span, free_id_outlive.node_id(&self.region_maps)); + traits::normalize_param_env_or_error(tcx, unnormalized_env, cause) + } + + pub fn node_scope_region(self, id: NodeId) -> &'tcx Region { + self.mk_region(ty::ReScope(self.region_maps.node_extent(id))) + } + + pub fn visit_all_item_likes_in_krate(self, + dep_node_fn: F, + visitor: &mut V) + where F: FnMut(DefId) -> DepNode, V: ItemLikeVisitor<'gcx> + { + dep_graph::visit_all_item_likes_in_krate(self.global_tcx(), dep_node_fn, visitor); + } + + /// Looks up the span of `impl_did` if the impl is local; otherwise returns `Err` + /// with the name of the crate containing the impl. + pub fn span_of_impl(self, impl_did: DefId) -> Result { + if impl_did.is_local() { + let node_id = self.map.as_local_node_id(impl_did).unwrap(); + Ok(self.map.span(node_id)) + } else { + Err(self.sess.cstore.crate_name(impl_did.krate)) + } + } +} + +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + pub fn with_freevars(self, fid: NodeId, f: F) -> T where + F: FnOnce(&[hir::Freevar]) -> T, + { + match self.freevars.borrow().get(&fid) { + None => f(&[]), + Some(d) => f(&d[..]) + } + } +} diff --git a/src/librustc/ty/outlives.rs b/src/librustc/ty/outlives.rs new file mode 100644 index 0000000000000..eb384eec6a6f1 --- /dev/null +++ b/src/librustc/ty/outlives.rs @@ -0,0 +1,212 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// The outlines relation `T: 'a` or `'a: 'b`. This code frequently +// refers to rules defined in RFC 1214 (`OutlivesFooBar`), so see that +// RFC for reference. + +use ty::{self, Ty, TyCtxt, TypeFoldable}; + +#[derive(Debug)] +pub enum Component<'tcx> { + Region(&'tcx ty::Region), + Param(ty::ParamTy), + UnresolvedInferenceVariable(ty::InferTy), + + // Projections like `T::Foo` are tricky because a constraint like + // `T::Foo: 'a` can be satisfied in so many ways. There may be a + // where-clause that says `T::Foo: 'a`, or the defining trait may + // include a bound like `type Foo: 'static`, or -- in the most + // conservative way -- we can prove that `T: 'a` (more generally, + // that all components in the projection outlive `'a`). This code + // is not in a position to judge which is the best technique, so + // we just product the projection as a component and leave it to + // the consumer to decide (but see `EscapingProjection` below). + Projection(ty::ProjectionTy<'tcx>), + + // In the case where a projection has escaping regions -- meaning + // regions bound within the type itself -- we always use + // the most conservative rule, which requires that all components + // outlive the bound. So for example if we had a type like this: + // + // for<'a> Trait1< >::Foo > + // ~~~~~~~~~~~~~~~~~~~~~~~~~ + // + // then the inner projection (underlined) has an escaping region + // `'a`. We consider that outer trait `'c` to meet a bound if `'b` + // outlives `'b: 'c`, and we don't consider whether the trait + // declares that `Foo: 'static` etc. Therefore, we just return the + // free components of such a projection (in this case, `'b`). + // + // However, in the future, we may want to get smarter, and + // actually return a "higher-ranked projection" here. Therefore, + // we mark that these components are part of an escaping + // projection, so that implied bounds code can avoid relying on + // them. This gives us room to improve the regionck reasoning in + // the future without breaking backwards compat. + EscapingProjection(Vec>), +} + +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + /// Returns all the things that must outlive `'a` for the condition + /// `ty0: 'a` to hold. Note that `ty0` must be a **fully resolved type**. + pub fn outlives_components(&self, ty0: Ty<'tcx>) + -> Vec> { + let mut components = vec![]; + self.compute_components(ty0, &mut components); + debug!("components({:?}) = {:?}", ty0, components); + components + } + + fn compute_components(&self, ty: Ty<'tcx>, out: &mut Vec>) { + // Descend through the types, looking for the various "base" + // components and collecting them into `out`. This is not written + // with `collect()` because of the need to sometimes skip subtrees + // in the `subtys` iterator (e.g., when encountering a + // projection). + match ty.sty { + ty::TyClosure(def_id, ref substs) => { + // FIXME(#27086). We do not accumulate from substs, since they + // don't represent reachable data. This means that, in + // practice, some of the lifetime parameters might not + // be in scope when the body runs, so long as there is + // no reachable data with that lifetime. For better or + // worse, this is consistent with fn types, however, + // which can also encapsulate data in this fashion + // (though it's somewhat harder, and typically + // requires virtual dispatch). + // + // Note that changing this (in a naive way, at least) + // causes regressions for what appears to be perfectly + // reasonable code like this: + // + // ``` + // fn foo<'a>(p: &Data<'a>) { + // bar(|q: &mut Parser| q.read_addr()) + // } + // fn bar(p: Box) { + // } + // ``` + // + // Note that `p` (and `'a`) are not used in the + // closure at all, but to meet the requirement that + // the closure type `C: 'static` (so it can be coerced + // to the object type), we get the requirement that + // `'a: 'static` since `'a` appears in the closure + // type `C`. + // + // A smarter fix might "prune" unused `func_substs` -- + // this would avoid breaking simple examples like + // this, but would still break others (which might + // indeed be invalid, depending on your POV). Pruning + // would be a subtle process, since we have to see + // what func/type parameters are used and unused, + // taking into consideration UFCS and so forth. + + for upvar_ty in substs.upvar_tys(def_id, *self) { + self.compute_components(upvar_ty, out); + } + } + + // OutlivesTypeParameterEnv -- the actual checking that `X:'a` + // is implied by the environment is done in regionck. + ty::TyParam(p) => { + out.push(Component::Param(p)); + } + + // For projections, we prefer to generate an obligation like + // `>::Foo: 'a`, because this gives the + // regionck more ways to prove that it holds. However, + // regionck is not (at least currently) prepared to deal with + // higher-ranked regions that may appear in the + // trait-ref. Therefore, if we see any higher-ranke regions, + // we simply fallback to the most restrictive rule, which + // requires that `Pi: 'a` for all `i`. + ty::TyProjection(ref data) => { + if !data.has_escaping_regions() { + // best case: no escaping regions, so push the + // projection and skip the subtree (thus generating no + // constraints for Pi). This defers the choice between + // the rules OutlivesProjectionEnv, + // OutlivesProjectionTraitDef, and + // OutlivesProjectionComponents to regionck. + out.push(Component::Projection(*data)); + } else { + // fallback case: hard code + // OutlivesProjectionComponents. Continue walking + // through and constrain Pi. + let subcomponents = self.capture_components(ty); + out.push(Component::EscapingProjection(subcomponents)); + } + } + + // We assume that inference variables are fully resolved. + // So, if we encounter an inference variable, just record + // the unresolved variable as a component. + ty::TyInfer(infer_ty) => { + out.push(Component::UnresolvedInferenceVariable(infer_ty)); + } + + // Most types do not introduce any region binders, nor + // involve any other subtle cases, and so the WF relation + // simply constraints any regions referenced directly by + // the type and then visits the types that are lexically + // contained within. (The comments refer to relevant rules + // from RFC1214.) + ty::TyBool | // OutlivesScalar + ty::TyChar | // OutlivesScalar + ty::TyInt(..) | // OutlivesScalar + ty::TyUint(..) | // OutlivesScalar + ty::TyFloat(..) | // OutlivesScalar + ty::TyNever | // ... + ty::TyAdt(..) | // OutlivesNominalType + ty::TyBox(..) | // OutlivesNominalType (ish) + ty::TyAnon(..) | // OutlivesNominalType (ish) + ty::TyStr | // OutlivesScalar (ish) + ty::TyArray(..) | // ... + ty::TySlice(..) | // ... + ty::TyRawPtr(..) | // ... + ty::TyRef(..) | // OutlivesReference + ty::TyTuple(..) | // ... + ty::TyFnDef(..) | // OutlivesFunction (*) + ty::TyFnPtr(_) | // OutlivesFunction (*) + ty::TyDynamic(..) | // OutlivesObject, OutlivesFragment (*) + ty::TyError => { + // (*) Bare functions and traits are both binders. In the + // RFC, this means we would add the bound regions to the + // "bound regions list". In our representation, no such + // list is maintained explicitly, because bound regions + // themselves can be readily identified. + + push_region_constraints(out, ty.regions()); + for subty in ty.walk_shallow() { + self.compute_components(subty, out); + } + } + } + } + + fn capture_components(&self, ty: Ty<'tcx>) -> Vec> { + let mut temp = vec![]; + push_region_constraints(&mut temp, ty.regions()); + for subty in ty.walk_shallow() { + self.compute_components(subty, &mut temp); + } + temp + } +} + +fn push_region_constraints<'tcx>(out: &mut Vec>, regions: Vec<&'tcx ty::Region>) { + for r in regions { + if !r.is_bound() { + out.push(Component::Region(r)); + } + } +} diff --git a/src/librustc/ty/relate.rs b/src/librustc/ty/relate.rs new file mode 100644 index 0000000000000..8cb1483107ff1 --- /dev/null +++ b/src/librustc/ty/relate.rs @@ -0,0 +1,626 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Generalized type relating mechanism. A type relation R relates a +//! pair of values (A, B). A and B are usually types or regions but +//! can be other things. Examples of type relations are subtyping, +//! type equality, etc. + +use hir::def_id::DefId; +use ty::subst::{Kind, Substs}; +use ty::{self, Ty, TyCtxt, TypeFoldable}; +use ty::error::{ExpectedFound, TypeError}; +use std::rc::Rc; +use syntax::abi; +use hir as ast; + +pub type RelateResult<'tcx, T> = Result>; + +#[derive(Clone, Debug)] +pub enum Cause { + ExistentialRegionBound, // relating an existential region bound +} + +pub trait TypeRelation<'a, 'gcx: 'a+'tcx, 'tcx: 'a> : Sized { + fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx>; + + /// Returns a static string we can use for printouts. + fn tag(&self) -> &'static str; + + /// Returns true if the value `a` is the "expected" type in the + /// relation. Just affects error messages. + fn a_is_expected(&self) -> bool; + + fn with_cause(&mut self, _cause: Cause, f: F) -> R + where F: FnOnce(&mut Self) -> R + { + f(self) + } + + /// Generic relation routine suitable for most anything. + fn relate>(&mut self, a: &T, b: &T) -> RelateResult<'tcx, T> { + Relate::relate(self, a, b) + } + + /// Switch variance for the purpose of relating `a` and `b`. + fn relate_with_variance>(&mut self, + variance: ty::Variance, + a: &T, + b: &T) + -> RelateResult<'tcx, T>; + + // Overrideable relations. You shouldn't typically call these + // directly, instead call `relate()`, which in turn calls + // these. This is both more uniform but also allows us to add + // additional hooks for other types in the future if needed + // without making older code, which called `relate`, obsolete. + + fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) + -> RelateResult<'tcx, Ty<'tcx>>; + + fn regions(&mut self, a: &'tcx ty::Region, b: &'tcx ty::Region) + -> RelateResult<'tcx, &'tcx ty::Region>; + + fn binders(&mut self, a: &ty::Binder, b: &ty::Binder) + -> RelateResult<'tcx, ty::Binder> + where T: Relate<'tcx>; +} + +pub trait Relate<'tcx>: TypeFoldable<'tcx> { + fn relate<'a, 'gcx, R>(relation: &mut R, a: &Self, b: &Self) + -> RelateResult<'tcx, Self> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a; +} + +/////////////////////////////////////////////////////////////////////////// +// Relate impls + +impl<'tcx> Relate<'tcx> for ty::TypeAndMut<'tcx> { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &ty::TypeAndMut<'tcx>, + b: &ty::TypeAndMut<'tcx>) + -> RelateResult<'tcx, ty::TypeAndMut<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + debug!("{}.mts({:?}, {:?})", + relation.tag(), + a, + b); + if a.mutbl != b.mutbl { + Err(TypeError::Mutability) + } else { + let mutbl = a.mutbl; + let variance = match mutbl { + ast::Mutability::MutImmutable => ty::Covariant, + ast::Mutability::MutMutable => ty::Invariant, + }; + let ty = relation.relate_with_variance(variance, &a.ty, &b.ty)?; + Ok(ty::TypeAndMut {ty: ty, mutbl: mutbl}) + } + } +} + +// substitutions are not themselves relatable without more context, +// but they is an important subroutine for things that ARE relatable, +// like traits etc. +fn relate_item_substs<'a, 'gcx, 'tcx, R>(relation: &mut R, + item_def_id: DefId, + a_subst: &'tcx Substs<'tcx>, + b_subst: &'tcx Substs<'tcx>) + -> RelateResult<'tcx, &'tcx Substs<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a +{ + debug!("substs: item_def_id={:?} a_subst={:?} b_subst={:?}", + item_def_id, + a_subst, + b_subst); + + let variances; + let opt_variances = if relation.tcx().variance_computed.get() { + variances = relation.tcx().item_variances(item_def_id); + Some(&*variances) + } else { + None + }; + relate_substs(relation, opt_variances, a_subst, b_subst) +} + +pub fn relate_substs<'a, 'gcx, 'tcx, R>(relation: &mut R, + variances: Option<&Vec>, + a_subst: &'tcx Substs<'tcx>, + b_subst: &'tcx Substs<'tcx>) + -> RelateResult<'tcx, &'tcx Substs<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a +{ + let tcx = relation.tcx(); + + let params = a_subst.params().iter().zip(b_subst.params()).enumerate().map(|(i, (a, b))| { + let variance = variances.map_or(ty::Invariant, |v| v[i]); + if let (Some(a_ty), Some(b_ty)) = (a.as_type(), b.as_type()) { + Ok(Kind::from(relation.relate_with_variance(variance, &a_ty, &b_ty)?)) + } else if let (Some(a_r), Some(b_r)) = (a.as_region(), b.as_region()) { + Ok(Kind::from(relation.relate_with_variance(variance, &a_r, &b_r)?)) + } else { + bug!() + } + }); + + Ok(tcx.mk_substs(params)?) +} + +impl<'tcx> Relate<'tcx> for &'tcx ty::BareFnTy<'tcx> { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &&'tcx ty::BareFnTy<'tcx>, + b: &&'tcx ty::BareFnTy<'tcx>) + -> RelateResult<'tcx, &'tcx ty::BareFnTy<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + let unsafety = relation.relate(&a.unsafety, &b.unsafety)?; + let abi = relation.relate(&a.abi, &b.abi)?; + let sig = relation.relate(&a.sig, &b.sig)?; + Ok(relation.tcx().mk_bare_fn(ty::BareFnTy { + unsafety: unsafety, + abi: abi, + sig: sig + })) + } +} + +impl<'tcx> Relate<'tcx> for ty::FnSig<'tcx> { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &ty::FnSig<'tcx>, + b: &ty::FnSig<'tcx>) + -> RelateResult<'tcx, ty::FnSig<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + if a.variadic != b.variadic { + return Err(TypeError::VariadicMismatch( + expected_found(relation, &a.variadic, &b.variadic))); + } + + let inputs = relate_arg_vecs(relation, + &a.inputs, + &b.inputs)?; + let output = relation.relate(&a.output, &b.output)?; + + Ok(ty::FnSig {inputs: inputs, + output: output, + variadic: a.variadic}) + } +} + +fn relate_arg_vecs<'a, 'gcx, 'tcx, R>(relation: &mut R, + a_args: &[Ty<'tcx>], + b_args: &[Ty<'tcx>]) + -> RelateResult<'tcx, Vec>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a +{ + if a_args.len() != b_args.len() { + return Err(TypeError::ArgCount); + } + + a_args.iter().zip(b_args) + .map(|(a, b)| relation.relate_with_variance(ty::Contravariant, a, b)) + .collect() +} + +impl<'tcx> Relate<'tcx> for ast::Unsafety { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &ast::Unsafety, + b: &ast::Unsafety) + -> RelateResult<'tcx, ast::Unsafety> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + if a != b { + Err(TypeError::UnsafetyMismatch(expected_found(relation, a, b))) + } else { + Ok(*a) + } + } +} + +impl<'tcx> Relate<'tcx> for abi::Abi { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &abi::Abi, + b: &abi::Abi) + -> RelateResult<'tcx, abi::Abi> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + if a == b { + Ok(*a) + } else { + Err(TypeError::AbiMismatch(expected_found(relation, a, b))) + } + } +} + +impl<'tcx> Relate<'tcx> for ty::ProjectionTy<'tcx> { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &ty::ProjectionTy<'tcx>, + b: &ty::ProjectionTy<'tcx>) + -> RelateResult<'tcx, ty::ProjectionTy<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + if a.item_name != b.item_name { + Err(TypeError::ProjectionNameMismatched( + expected_found(relation, &a.item_name, &b.item_name))) + } else { + let trait_ref = relation.relate(&a.trait_ref, &b.trait_ref)?; + Ok(ty::ProjectionTy { trait_ref: trait_ref, item_name: a.item_name }) + } + } +} + +impl<'tcx> Relate<'tcx> for ty::ExistentialProjection<'tcx> { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &ty::ExistentialProjection<'tcx>, + b: &ty::ExistentialProjection<'tcx>) + -> RelateResult<'tcx, ty::ExistentialProjection<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + if a.item_name != b.item_name { + Err(TypeError::ProjectionNameMismatched( + expected_found(relation, &a.item_name, &b.item_name))) + } else { + let trait_ref = relation.relate(&a.trait_ref, &b.trait_ref)?; + let ty = relation.relate(&a.ty, &b.ty)?; + Ok(ty::ExistentialProjection { + trait_ref: trait_ref, + item_name: a.item_name, + ty: ty + }) + } + } +} + +impl<'tcx> Relate<'tcx> for Vec> { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &Vec>, + b: &Vec>) + -> RelateResult<'tcx, Vec>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + // To be compatible, `a` and `b` must be for precisely the + // same set of traits and item names. We always require that + // projection bounds lists are sorted by trait-def-id and item-name, + // so we can just iterate through the lists pairwise, so long as they are the + // same length. + if a.len() != b.len() { + Err(TypeError::ProjectionBoundsLength(expected_found(relation, &a.len(), &b.len()))) + } else { + a.iter().zip(b) + .map(|(a, b)| relation.relate(a, b)) + .collect() + } + } +} + +impl<'tcx> Relate<'tcx> for ty::TraitRef<'tcx> { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &ty::TraitRef<'tcx>, + b: &ty::TraitRef<'tcx>) + -> RelateResult<'tcx, ty::TraitRef<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + // Different traits cannot be related + if a.def_id != b.def_id { + Err(TypeError::Traits(expected_found(relation, &a.def_id, &b.def_id))) + } else { + let substs = relate_item_substs(relation, a.def_id, a.substs, b.substs)?; + Ok(ty::TraitRef { def_id: a.def_id, substs: substs }) + } + } +} + +impl<'tcx> Relate<'tcx> for ty::ExistentialTraitRef<'tcx> { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &ty::ExistentialTraitRef<'tcx>, + b: &ty::ExistentialTraitRef<'tcx>) + -> RelateResult<'tcx, ty::ExistentialTraitRef<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + // Different traits cannot be related + if a.def_id != b.def_id { + Err(TypeError::Traits(expected_found(relation, &a.def_id, &b.def_id))) + } else { + let substs = relate_item_substs(relation, a.def_id, a.substs, b.substs)?; + Ok(ty::ExistentialTraitRef { def_id: a.def_id, substs: substs }) + } + } +} + +impl<'tcx> Relate<'tcx> for Ty<'tcx> { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &Ty<'tcx>, + b: &Ty<'tcx>) + -> RelateResult<'tcx, Ty<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + relation.tys(a, b) + } +} + +/// The main "type relation" routine. Note that this does not handle +/// inference artifacts, so you should filter those out before calling +/// it. +pub fn super_relate_tys<'a, 'gcx, 'tcx, R>(relation: &mut R, + a: Ty<'tcx>, + b: Ty<'tcx>) + -> RelateResult<'tcx, Ty<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a +{ + let tcx = relation.tcx(); + let a_sty = &a.sty; + let b_sty = &b.sty; + debug!("super_tys: a_sty={:?} b_sty={:?}", a_sty, b_sty); + match (a_sty, b_sty) { + (&ty::TyInfer(_), _) | + (_, &ty::TyInfer(_)) => + { + // The caller should handle these cases! + bug!("var types encountered in super_relate_tys") + } + + (&ty::TyError, _) | (_, &ty::TyError) => + { + Ok(tcx.types.err) + } + + (&ty::TyNever, _) | + (&ty::TyChar, _) | + (&ty::TyBool, _) | + (&ty::TyInt(_), _) | + (&ty::TyUint(_), _) | + (&ty::TyFloat(_), _) | + (&ty::TyStr, _) + if a == b => + { + Ok(a) + } + + (&ty::TyParam(ref a_p), &ty::TyParam(ref b_p)) + if a_p.idx == b_p.idx => + { + Ok(a) + } + + (&ty::TyAdt(a_def, a_substs), &ty::TyAdt(b_def, b_substs)) + if a_def == b_def => + { + let substs = relate_item_substs(relation, a_def.did, a_substs, b_substs)?; + Ok(tcx.mk_adt(a_def, substs)) + } + + (&ty::TyDynamic(ref a_obj, ref a_region), &ty::TyDynamic(ref b_obj, ref b_region)) => { + let region_bound = relation.with_cause(Cause::ExistentialRegionBound, + |relation| { + relation.relate_with_variance( + ty::Contravariant, + a_region, + b_region) + })?; + Ok(tcx.mk_dynamic(relation.relate(a_obj, b_obj)?, region_bound)) + } + + (&ty::TyClosure(a_id, a_substs), + &ty::TyClosure(b_id, b_substs)) + if a_id == b_id => + { + // All TyClosure types with the same id represent + // the (anonymous) type of the same closure expression. So + // all of their regions should be equated. + let substs = relation.relate(&a_substs, &b_substs)?; + Ok(tcx.mk_closure_from_closure_substs(a_id, substs)) + } + + (&ty::TyBox(a_inner), &ty::TyBox(b_inner)) => + { + let typ = relation.relate(&a_inner, &b_inner)?; + Ok(tcx.mk_box(typ)) + } + + (&ty::TyRawPtr(ref a_mt), &ty::TyRawPtr(ref b_mt)) => + { + let mt = relation.relate(a_mt, b_mt)?; + Ok(tcx.mk_ptr(mt)) + } + + (&ty::TyRef(a_r, ref a_mt), &ty::TyRef(b_r, ref b_mt)) => + { + let r = relation.relate_with_variance(ty::Contravariant, &a_r, &b_r)?; + let mt = relation.relate(a_mt, b_mt)?; + Ok(tcx.mk_ref(r, mt)) + } + + (&ty::TyArray(a_t, sz_a), &ty::TyArray(b_t, sz_b)) => + { + let t = relation.relate(&a_t, &b_t)?; + if sz_a == sz_b { + Ok(tcx.mk_array(t, sz_a)) + } else { + Err(TypeError::FixedArraySize(expected_found(relation, &sz_a, &sz_b))) + } + } + + (&ty::TySlice(a_t), &ty::TySlice(b_t)) => + { + let t = relation.relate(&a_t, &b_t)?; + Ok(tcx.mk_slice(t)) + } + + (&ty::TyTuple(as_), &ty::TyTuple(bs)) => + { + if as_.len() == bs.len() { + Ok(tcx.mk_tup(as_.iter().zip(bs).map(|(a, b)| relation.relate(a, b)))?) + } else if !(as_.is_empty() || bs.is_empty()) { + Err(TypeError::TupleSize( + expected_found(relation, &as_.len(), &bs.len()))) + } else { + Err(TypeError::Sorts(expected_found(relation, &a, &b))) + } + } + + (&ty::TyFnDef(a_def_id, a_substs, a_fty), + &ty::TyFnDef(b_def_id, b_substs, b_fty)) + if a_def_id == b_def_id => + { + let substs = relate_substs(relation, None, a_substs, b_substs)?; + let fty = relation.relate(&a_fty, &b_fty)?; + Ok(tcx.mk_fn_def(a_def_id, substs, fty)) + } + + (&ty::TyFnPtr(a_fty), &ty::TyFnPtr(b_fty)) => + { + let fty = relation.relate(&a_fty, &b_fty)?; + Ok(tcx.mk_fn_ptr(fty)) + } + + (&ty::TyProjection(ref a_data), &ty::TyProjection(ref b_data)) => + { + let projection_ty = relation.relate(a_data, b_data)?; + Ok(tcx.mk_projection(projection_ty.trait_ref, projection_ty.item_name)) + } + + (&ty::TyAnon(a_def_id, a_substs), &ty::TyAnon(b_def_id, b_substs)) + if a_def_id == b_def_id => + { + let substs = relate_substs(relation, None, a_substs, b_substs)?; + Ok(tcx.mk_anon(a_def_id, substs)) + } + + _ => + { + Err(TypeError::Sorts(expected_found(relation, &a, &b))) + } + } +} + +impl<'tcx> Relate<'tcx> for &'tcx ty::Slice> { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &Self, + b: &Self) + -> RelateResult<'tcx, Self> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a { + + if a.len() != b.len() { + return Err(TypeError::ExistentialMismatch(expected_found(relation, a, b))); + } + + let tcx = relation.tcx(); + let v = a.iter().zip(b.iter()).map(|(ep_a, ep_b)| { + use ty::ExistentialPredicate::*; + match (*ep_a, *ep_b) { + (Trait(ref a), Trait(ref b)) => Ok(Trait(relation.relate(a, b)?)), + (Projection(ref a), Projection(ref b)) => Ok(Projection(relation.relate(a, b)?)), + (AutoTrait(ref a), AutoTrait(ref b)) if a == b => Ok(AutoTrait(*a)), + _ => Err(TypeError::ExistentialMismatch(expected_found(relation, a, b))) + } + }); + Ok(tcx.mk_existential_predicates(v)?) + } +} + +impl<'tcx> Relate<'tcx> for ty::ClosureSubsts<'tcx> { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &ty::ClosureSubsts<'tcx>, + b: &ty::ClosureSubsts<'tcx>) + -> RelateResult<'tcx, ty::ClosureSubsts<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + let substs = relate_substs(relation, None, a.substs, b.substs)?; + Ok(ty::ClosureSubsts { substs: substs }) + } +} + +impl<'tcx> Relate<'tcx> for &'tcx Substs<'tcx> { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &&'tcx Substs<'tcx>, + b: &&'tcx Substs<'tcx>) + -> RelateResult<'tcx, &'tcx Substs<'tcx>> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + relate_substs(relation, None, a, b) + } +} + +impl<'tcx> Relate<'tcx> for &'tcx ty::Region { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &&'tcx ty::Region, + b: &&'tcx ty::Region) + -> RelateResult<'tcx, &'tcx ty::Region> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + relation.regions(*a, *b) + } +} + +impl<'tcx, T: Relate<'tcx>> Relate<'tcx> for ty::Binder { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &ty::Binder, + b: &ty::Binder) + -> RelateResult<'tcx, ty::Binder> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + relation.binders(a, b) + } +} + +impl<'tcx, T: Relate<'tcx>> Relate<'tcx> for Rc { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &Rc, + b: &Rc) + -> RelateResult<'tcx, Rc> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + let a: &T = a; + let b: &T = b; + Ok(Rc::new(relation.relate(a, b)?)) + } +} + +impl<'tcx, T: Relate<'tcx>> Relate<'tcx> for Box { + fn relate<'a, 'gcx, R>(relation: &mut R, + a: &Box, + b: &Box) + -> RelateResult<'tcx, Box> + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a + { + let a: &T = a; + let b: &T = b; + Ok(Box::new(relation.relate(a, b)?)) + } +} + +/////////////////////////////////////////////////////////////////////////// +// Error handling + +pub fn expected_found<'a, 'gcx, 'tcx, R, T>(relation: &mut R, + a: &T, + b: &T) + -> ExpectedFound + where R: TypeRelation<'a, 'gcx, 'tcx>, T: Clone, 'gcx: 'a+'tcx, 'tcx: 'a +{ + expected_found_bool(relation.a_is_expected(), a, b) +} + +pub fn expected_found_bool(a_is_expected: bool, + a: &T, + b: &T) + -> ExpectedFound + where T: Clone +{ + let a = a.clone(); + let b = b.clone(); + if a_is_expected { + ExpectedFound {expected: a, found: b} + } else { + ExpectedFound {expected: b, found: a} + } +} diff --git a/src/librustc/ty/structural_impls.rs b/src/librustc/ty/structural_impls.rs new file mode 100644 index 0000000000000..88de3575274cc --- /dev/null +++ b/src/librustc/ty/structural_impls.rs @@ -0,0 +1,958 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use infer::type_variable; +use ty::{self, Lift, Ty, TyCtxt}; +use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; +use rustc_data_structures::accumulate_vec::AccumulateVec; + +use std::rc::Rc; +use syntax::abi; + +use hir; + +/////////////////////////////////////////////////////////////////////////// +// Lift implementations + +impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>> Lift<'tcx> for (A, B) { + type Lifted = (A::Lifted, B::Lifted); + fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.0).and_then(|a| tcx.lift(&self.1).map(|b| (a, b))) + } +} + +impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Option { + type Lifted = Option; + fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { + match *self { + Some(ref x) => tcx.lift(x).map(Some), + None => Some(None) + } + } +} + +impl<'tcx, T: Lift<'tcx>, E: Lift<'tcx>> Lift<'tcx> for Result { + type Lifted = Result; + fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { + match *self { + Ok(ref x) => tcx.lift(x).map(Ok), + Err(ref e) => tcx.lift(e).map(Err) + } + } +} + +impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for [T] { + type Lifted = Vec; + fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { + // type annotation needed to inform `projection_must_outlive` + let mut result : Vec<>::Lifted> + = Vec::with_capacity(self.len()); + for x in self { + if let Some(value) = tcx.lift(x) { + result.push(value); + } else { + return None; + } + } + Some(result) + } +} + +impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Vec { + type Lifted = Vec; + fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { + tcx.lift(&self[..]) + } +} + +impl<'a, 'tcx> Lift<'tcx> for ty::TraitRef<'a> { + type Lifted = ty::TraitRef<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.substs).map(|substs| ty::TraitRef { + def_id: self.def_id, + substs: substs + }) + } +} + +impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialTraitRef<'a> { + type Lifted = ty::ExistentialTraitRef<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.substs).map(|substs| ty::ExistentialTraitRef { + def_id: self.def_id, + substs: substs + }) + } +} + +impl<'a, 'tcx> Lift<'tcx> for ty::TraitPredicate<'a> { + type Lifted = ty::TraitPredicate<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) + -> Option> { + tcx.lift(&self.trait_ref).map(|trait_ref| ty::TraitPredicate { + trait_ref: trait_ref + }) + } +} + +impl<'a, 'tcx> Lift<'tcx> for ty::EquatePredicate<'a> { + type Lifted = ty::EquatePredicate<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) + -> Option> { + tcx.lift(&(self.0, self.1)).map(|(a, b)| ty::EquatePredicate(a, b)) + } +} + +impl<'tcx, A: Copy+Lift<'tcx>, B: Copy+Lift<'tcx>> Lift<'tcx> for ty::OutlivesPredicate { + type Lifted = ty::OutlivesPredicate; + fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { + tcx.lift(&(self.0, self.1)).map(|(a, b)| ty::OutlivesPredicate(a, b)) + } +} + +impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionTy<'a> { + type Lifted = ty::ProjectionTy<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) + -> Option> { + tcx.lift(&self.trait_ref).map(|trait_ref| { + ty::ProjectionTy { + trait_ref: trait_ref, + item_name: self.item_name + } + }) + } +} + +impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionPredicate<'a> { + type Lifted = ty::ProjectionPredicate<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) + -> Option> { + tcx.lift(&(self.projection_ty, self.ty)).map(|(projection_ty, ty)| { + ty::ProjectionPredicate { + projection_ty: projection_ty, + ty: ty + } + }) + } +} + +impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialProjection<'a> { + type Lifted = ty::ExistentialProjection<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&(self.trait_ref, self.ty)).map(|(trait_ref, ty)| { + ty::ExistentialProjection { + trait_ref: trait_ref, + item_name: self.item_name, + ty: ty + } + }) + } +} + +impl<'a, 'tcx> Lift<'tcx> for ty::Predicate<'a> { + type Lifted = ty::Predicate<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + match *self { + ty::Predicate::Trait(ref binder) => { + tcx.lift(binder).map(ty::Predicate::Trait) + } + ty::Predicate::Equate(ref binder) => { + tcx.lift(binder).map(ty::Predicate::Equate) + } + ty::Predicate::RegionOutlives(ref binder) => { + tcx.lift(binder).map(ty::Predicate::RegionOutlives) + } + ty::Predicate::TypeOutlives(ref binder) => { + tcx.lift(binder).map(ty::Predicate::TypeOutlives) + } + ty::Predicate::Projection(ref binder) => { + tcx.lift(binder).map(ty::Predicate::Projection) + } + ty::Predicate::WellFormed(ty) => { + tcx.lift(&ty).map(ty::Predicate::WellFormed) + } + ty::Predicate::ClosureKind(closure_def_id, kind) => { + Some(ty::Predicate::ClosureKind(closure_def_id, kind)) + } + ty::Predicate::ObjectSafe(trait_def_id) => { + Some(ty::Predicate::ObjectSafe(trait_def_id)) + } + } + } +} + +impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::Binder { + type Lifted = ty::Binder; + fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.0).map(|x| ty::Binder(x)) + } +} + +impl<'a, 'tcx> Lift<'tcx> for ty::ClosureSubsts<'a> { + type Lifted = ty::ClosureSubsts<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.substs).map(|substs| { + ty::ClosureSubsts { substs: substs } + }) + } +} + +impl<'a, 'tcx> Lift<'tcx> for ty::ItemSubsts<'a> { + type Lifted = ty::ItemSubsts<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.substs).map(|substs| { + ty::ItemSubsts { + substs: substs + } + }) + } +} + +impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::AutoBorrow<'a> { + type Lifted = ty::adjustment::AutoBorrow<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + match *self { + ty::adjustment::AutoBorrow::Ref(r, m) => { + tcx.lift(&r).map(|r| ty::adjustment::AutoBorrow::Ref(r, m)) + } + ty::adjustment::AutoBorrow::RawPtr(m) => { + Some(ty::adjustment::AutoBorrow::RawPtr(m)) + } + } + } +} + +impl<'a, 'tcx> Lift<'tcx> for ty::FnSig<'a> { + type Lifted = ty::FnSig<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.inputs[..]).and_then(|inputs| { + tcx.lift(&self.output).map(|output| { + ty::FnSig { + inputs: inputs, + output: output, + variadic: self.variadic + } + }) + }) + } +} + +impl<'a, 'tcx> Lift<'tcx> for ty::ClosureTy<'a> { + type Lifted = ty::ClosureTy<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.sig).map(|sig| { + ty::ClosureTy { + sig: sig, + unsafety: self.unsafety, + abi: self.abi + } + }) + } +} + +impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::error::ExpectedFound { + type Lifted = ty::error::ExpectedFound; + fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.expected).and_then(|expected| { + tcx.lift(&self.found).map(|found| { + ty::error::ExpectedFound { + expected: expected, + found: found + } + }) + }) + } +} + +impl<'a, 'tcx> Lift<'tcx> for type_variable::Default<'a> { + type Lifted = type_variable::Default<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + tcx.lift(&self.ty).map(|ty| { + type_variable::Default { + ty: ty, + origin_span: self.origin_span, + def_id: self.def_id + } + }) + } +} + +impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> { + type Lifted = ty::error::TypeError<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + use ty::error::TypeError::*; + + Some(match *self { + Mismatch => Mismatch, + UnsafetyMismatch(x) => UnsafetyMismatch(x), + AbiMismatch(x) => AbiMismatch(x), + Mutability => Mutability, + TupleSize(x) => TupleSize(x), + FixedArraySize(x) => FixedArraySize(x), + ArgCount => ArgCount, + RegionsDoesNotOutlive(a, b) => { + return tcx.lift(&(a, b)).map(|(a, b)| RegionsDoesNotOutlive(a, b)) + } + RegionsNotSame(a, b) => { + return tcx.lift(&(a, b)).map(|(a, b)| RegionsNotSame(a, b)) + } + RegionsNoOverlap(a, b) => { + return tcx.lift(&(a, b)).map(|(a, b)| RegionsNoOverlap(a, b)) + } + RegionsInsufficientlyPolymorphic(a, b) => { + return tcx.lift(&b).map(|b| RegionsInsufficientlyPolymorphic(a, b)) + } + RegionsOverlyPolymorphic(a, b) => { + return tcx.lift(&b).map(|b| RegionsOverlyPolymorphic(a, b)) + } + IntMismatch(x) => IntMismatch(x), + FloatMismatch(x) => FloatMismatch(x), + Traits(x) => Traits(x), + VariadicMismatch(x) => VariadicMismatch(x), + CyclicTy => CyclicTy, + ProjectionNameMismatched(x) => ProjectionNameMismatched(x), + ProjectionBoundsLength(x) => ProjectionBoundsLength(x), + + Sorts(ref x) => return tcx.lift(x).map(Sorts), + TyParamDefaultMismatch(ref x) => { + return tcx.lift(x).map(TyParamDefaultMismatch) + } + ExistentialMismatch(ref x) => return tcx.lift(x).map(ExistentialMismatch) + }) + } +} + +/////////////////////////////////////////////////////////////////////////// +// TypeFoldable implementations. +// +// Ideally, each type should invoke `folder.fold_foo(self)` and +// nothing else. In some cases, though, we haven't gotten around to +// adding methods on the `folder` yet, and thus the folding is +// hard-coded here. This is less-flexible, because folders cannot +// override the behavior, but there are a lot of random types and one +// can easily refactor the folding into the TypeFolder trait as +// needed. + +macro_rules! CopyImpls { + ($($ty:ty),+) => { + $( + impl<'tcx> TypeFoldable<'tcx> for $ty { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, _: &mut F) -> $ty { + *self + } + + fn super_visit_with>(&self, _: &mut F) -> bool { + false + } + } + )+ + } +} + +CopyImpls! { (), hir::Unsafety, abi::Abi } + +impl<'tcx, T:TypeFoldable<'tcx>, U:TypeFoldable<'tcx>> TypeFoldable<'tcx> for (T, U) { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> (T, U) { + (self.0.fold_with(folder), self.1.fold_with(folder)) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.0.visit_with(visitor) || self.1.visit_with(visitor) + } +} + +impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Option { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + self.as_ref().map(|t| t.fold_with(folder)) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.iter().any(|t| t.visit_with(visitor)) + } +} + +impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Rc { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + Rc::new((**self).fold_with(folder)) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + (**self).visit_with(visitor) + } +} + +impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + let content: T = (**self).fold_with(folder); + box content + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + (**self).visit_with(visitor) + } +} + +impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Vec { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + self.iter().map(|t| t.fold_with(folder)).collect() + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.iter().any(|t| t.visit_with(visitor)) + } +} + +impl<'tcx, T:TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::Binder { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::Binder(self.0.fold_with(folder)) + } + + fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + folder.fold_binder(self) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.0.visit_with(visitor) + } + + fn visit_with>(&self, visitor: &mut V) -> bool { + visitor.visit_binder(self) + } +} + +impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + let v = self.iter().map(|p| p.fold_with(folder)).collect::>(); + folder.tcx().intern_existential_predicates(&v) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.iter().any(|p| p.visit_with(visitor)) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::ExistentialPredicate<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + use ty::ExistentialPredicate::*; + match *self { + Trait(ref tr) => Trait(tr.fold_with(folder)), + Projection(ref p) => Projection(p.fold_with(folder)), + AutoTrait(did) => AutoTrait(did), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + match *self { + ty::ExistentialPredicate::Trait(ref tr) => tr.visit_with(visitor), + ty::ExistentialPredicate::Projection(ref p) => p.visit_with(visitor), + ty::ExistentialPredicate::AutoTrait(_) => false, + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + let v = self.iter().map(|t| t.fold_with(folder)).collect::>(); + folder.tcx().intern_type_list(&v) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.iter().any(|t| t.visit_with(visitor)) + } +} + +impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + let sty = match self.sty { + ty::TyBox(typ) => ty::TyBox(typ.fold_with(folder)), + ty::TyRawPtr(tm) => ty::TyRawPtr(tm.fold_with(folder)), + ty::TyArray(typ, sz) => ty::TyArray(typ.fold_with(folder), sz), + ty::TySlice(typ) => ty::TySlice(typ.fold_with(folder)), + ty::TyAdt(tid, substs) => ty::TyAdt(tid, substs.fold_with(folder)), + ty::TyDynamic(ref trait_ty, ref region) => + ty::TyDynamic(trait_ty.fold_with(folder), region.fold_with(folder)), + ty::TyTuple(ts) => ty::TyTuple(ts.fold_with(folder)), + ty::TyFnDef(def_id, substs, f) => { + ty::TyFnDef(def_id, + substs.fold_with(folder), + f.fold_with(folder)) + } + ty::TyFnPtr(f) => ty::TyFnPtr(f.fold_with(folder)), + ty::TyRef(ref r, tm) => { + ty::TyRef(r.fold_with(folder), tm.fold_with(folder)) + } + ty::TyClosure(did, substs) => ty::TyClosure(did, substs.fold_with(folder)), + ty::TyProjection(ref data) => ty::TyProjection(data.fold_with(folder)), + ty::TyAnon(did, substs) => ty::TyAnon(did, substs.fold_with(folder)), + ty::TyBool | ty::TyChar | ty::TyStr | ty::TyInt(_) | + ty::TyUint(_) | ty::TyFloat(_) | ty::TyError | ty::TyInfer(_) | + ty::TyParam(..) | ty::TyNever => return self + }; + + if self.sty == sty { + self + } else { + folder.tcx().mk_ty(sty) + } + } + + fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + folder.fold_ty(*self) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + match self.sty { + ty::TyBox(typ) => typ.visit_with(visitor), + ty::TyRawPtr(ref tm) => tm.visit_with(visitor), + ty::TyArray(typ, _sz) => typ.visit_with(visitor), + ty::TySlice(typ) => typ.visit_with(visitor), + ty::TyAdt(_, substs) => substs.visit_with(visitor), + ty::TyDynamic(ref trait_ty, ref reg) => + trait_ty.visit_with(visitor) || reg.visit_with(visitor), + ty::TyTuple(ts) => ts.visit_with(visitor), + ty::TyFnDef(_, substs, ref f) => { + substs.visit_with(visitor) || f.visit_with(visitor) + } + ty::TyFnPtr(ref f) => f.visit_with(visitor), + ty::TyRef(r, ref tm) => r.visit_with(visitor) || tm.visit_with(visitor), + ty::TyClosure(_did, ref substs) => substs.visit_with(visitor), + ty::TyProjection(ref data) => data.visit_with(visitor), + ty::TyAnon(_, ref substs) => substs.visit_with(visitor), + ty::TyBool | ty::TyChar | ty::TyStr | ty::TyInt(_) | + ty::TyUint(_) | ty::TyFloat(_) | ty::TyError | ty::TyInfer(_) | + ty::TyParam(..) | ty::TyNever => false, + } + } + + fn visit_with>(&self, visitor: &mut V) -> bool { + visitor.visit_ty(self) + } +} + +impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::BareFnTy<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + let fty = ty::BareFnTy { + sig: self.sig.fold_with(folder), + abi: self.abi, + unsafety: self.unsafety + }; + folder.tcx().mk_bare_fn(fty) + } + + fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + folder.fold_bare_fn_ty(self) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.sig.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::ClosureTy<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::ClosureTy { + sig: self.sig.fold_with(folder), + unsafety: self.unsafety, + abi: self.abi, + } + } + + fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + folder.fold_closure_ty(self) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.sig.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::TypeAndMut<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::TypeAndMut { ty: self.ty.fold_with(folder), mutbl: self.mutbl } + } + + fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + folder.fold_mt(self) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.ty.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::FnSig<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::FnSig { inputs: self.inputs.fold_with(folder), + output: self.output.fold_with(folder), + variadic: self.variadic } + } + + fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + folder.fold_fn_sig(self) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.inputs.visit_with(visitor) || self.output.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::TraitRef<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::TraitRef { + def_id: self.def_id, + substs: self.substs.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.substs.visit_with(visitor) + } + + fn visit_with>(&self, visitor: &mut V) -> bool { + visitor.visit_trait_ref(*self) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::ExistentialTraitRef<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::ExistentialTraitRef { + def_id: self.def_id, + substs: self.substs.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.substs.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::ImplHeader<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::ImplHeader { + impl_def_id: self.impl_def_id, + self_ty: self.self_ty.fold_with(folder), + trait_ref: self.trait_ref.map(|t| t.fold_with(folder)), + predicates: self.predicates.iter().map(|p| p.fold_with(folder)).collect(), + } + } + + fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + folder.fold_impl_header(self) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.self_ty.visit_with(visitor) || + self.trait_ref.map(|r| r.visit_with(visitor)).unwrap_or(false) || + self.predicates.iter().any(|p| p.visit_with(visitor)) + } +} + +impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Region { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, _folder: &mut F) -> Self { + *self + } + + fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + folder.fold_region(*self) + } + + fn super_visit_with>(&self, _visitor: &mut V) -> bool { + false + } + + fn visit_with>(&self, visitor: &mut V) -> bool { + visitor.visit_region(*self) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::ClosureSubsts<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::ClosureSubsts { + substs: self.substs.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.substs.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::ItemSubsts<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::ItemSubsts { + substs: self.substs.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.substs.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::adjustment::AutoBorrow<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + match *self { + ty::adjustment::AutoBorrow::Ref(ref r, m) => { + ty::adjustment::AutoBorrow::Ref(r.fold_with(folder), m) + } + ty::adjustment::AutoBorrow::RawPtr(m) => ty::adjustment::AutoBorrow::RawPtr(m) + } + } + + fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + folder.fold_autoref(self) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + match *self { + ty::adjustment::AutoBorrow::Ref(r, _m) => r.visit_with(visitor), + ty::adjustment::AutoBorrow::RawPtr(_m) => false, + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::TypeParameterDef<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::TypeParameterDef { + name: self.name, + def_id: self.def_id, + index: self.index, + default: self.default.fold_with(folder), + default_def_id: self.default_def_id, + object_lifetime_default: self.object_lifetime_default.fold_with(folder), + pure_wrt_drop: self.pure_wrt_drop, + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.default.visit_with(visitor) || + self.object_lifetime_default.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::ObjectLifetimeDefault<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + match *self { + ty::ObjectLifetimeDefault::Ambiguous => + ty::ObjectLifetimeDefault::Ambiguous, + + ty::ObjectLifetimeDefault::BaseDefault => + ty::ObjectLifetimeDefault::BaseDefault, + + ty::ObjectLifetimeDefault::Specific(r) => + ty::ObjectLifetimeDefault::Specific(r.fold_with(folder)), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + match *self { + ty::ObjectLifetimeDefault::Specific(r) => r.visit_with(visitor), + _ => false, + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::RegionParameterDef<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::RegionParameterDef { + name: self.name, + def_id: self.def_id, + index: self.index, + bounds: self.bounds.fold_with(folder), + pure_wrt_drop: self.pure_wrt_drop, + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.bounds.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::Generics<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::Generics { + parent: self.parent, + parent_regions: self.parent_regions, + parent_types: self.parent_types, + regions: self.regions.fold_with(folder), + types: self.types.fold_with(folder), + has_self: self.has_self, + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.regions.visit_with(visitor) || self.types.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::GenericPredicates<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::GenericPredicates { + parent: self.parent, + predicates: self.predicates.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.predicates.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::Predicate<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + match *self { + ty::Predicate::Trait(ref a) => + ty::Predicate::Trait(a.fold_with(folder)), + ty::Predicate::Equate(ref binder) => + ty::Predicate::Equate(binder.fold_with(folder)), + ty::Predicate::RegionOutlives(ref binder) => + ty::Predicate::RegionOutlives(binder.fold_with(folder)), + ty::Predicate::TypeOutlives(ref binder) => + ty::Predicate::TypeOutlives(binder.fold_with(folder)), + ty::Predicate::Projection(ref binder) => + ty::Predicate::Projection(binder.fold_with(folder)), + ty::Predicate::WellFormed(data) => + ty::Predicate::WellFormed(data.fold_with(folder)), + ty::Predicate::ClosureKind(closure_def_id, kind) => + ty::Predicate::ClosureKind(closure_def_id, kind), + ty::Predicate::ObjectSafe(trait_def_id) => + ty::Predicate::ObjectSafe(trait_def_id), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + match *self { + ty::Predicate::Trait(ref a) => a.visit_with(visitor), + ty::Predicate::Equate(ref binder) => binder.visit_with(visitor), + ty::Predicate::RegionOutlives(ref binder) => binder.visit_with(visitor), + ty::Predicate::TypeOutlives(ref binder) => binder.visit_with(visitor), + ty::Predicate::Projection(ref binder) => binder.visit_with(visitor), + ty::Predicate::WellFormed(data) => data.visit_with(visitor), + ty::Predicate::ClosureKind(_closure_def_id, _kind) => false, + ty::Predicate::ObjectSafe(_trait_def_id) => false, + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::ProjectionPredicate<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::ProjectionPredicate { + projection_ty: self.projection_ty.fold_with(folder), + ty: self.ty.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.projection_ty.visit_with(visitor) || self.ty.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::ExistentialProjection<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::ExistentialProjection { + trait_ref: self.trait_ref.fold_with(folder), + item_name: self.item_name, + ty: self.ty.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.trait_ref.visit_with(visitor) || self.ty.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::ProjectionTy<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::ProjectionTy { + trait_ref: self.trait_ref.fold_with(folder), + item_name: self.item_name, + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.trait_ref.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::InstantiatedPredicates<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::InstantiatedPredicates { + predicates: self.predicates.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.predicates.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::EquatePredicate<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::EquatePredicate(self.0.fold_with(folder), + self.1.fold_with(folder)) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.0.visit_with(visitor) || self.1.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::TraitPredicate<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::TraitPredicate { + trait_ref: self.trait_ref.fold_with(folder) + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.trait_ref.visit_with(visitor) + } +} + +impl<'tcx,T,U> TypeFoldable<'tcx> for ty::OutlivesPredicate + where T : TypeFoldable<'tcx>, + U : TypeFoldable<'tcx>, +{ + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::OutlivesPredicate(self.0.fold_with(folder), + self.1.fold_with(folder)) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.0.visit_with(visitor) || self.1.visit_with(visitor) + } +} + +impl<'tcx> TypeFoldable<'tcx> for ty::ClosureUpvar<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::ClosureUpvar { + def: self.def, + span: self.span, + ty: self.ty.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.ty.visit_with(visitor) + } +} + +impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::error::ExpectedFound { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + ty::error::ExpectedFound { + expected: self.expected.fold_with(folder), + found: self.found.fold_with(folder), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.expected.visit_with(visitor) || self.found.visit_with(visitor) + } +} diff --git a/src/librustc/ty/sty.rs b/src/librustc/ty/sty.rs new file mode 100644 index 0000000000000..59f774b954cf9 --- /dev/null +++ b/src/librustc/ty/sty.rs @@ -0,0 +1,1322 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This module contains TypeVariants and its major components + +use hir::def_id::DefId; + +use middle::region; +use ty::subst::Substs; +use ty::{self, AdtDef, TypeFlags, Ty, TyCtxt, TypeFoldable}; +use ty::{Slice, TyS}; +use ty::subst::Kind; + +use std::fmt; +use std::iter; +use std::cmp::Ordering; +use syntax::abi; +use syntax::ast::{self, Name, NodeId}; +use syntax::symbol::{keywords, InternedString}; +use util::nodemap::FxHashSet; + +use serialize; + +use hir; + +use self::InferTy::*; +use self::TypeVariants::*; + +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct TypeAndMut<'tcx> { + pub ty: Ty<'tcx>, + pub mutbl: hir::Mutability, +} + +#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, + RustcEncodable, RustcDecodable, Copy)] +/// A "free" region `fr` can be interpreted as "some region +/// at least as big as the scope `fr.scope`". +pub struct FreeRegion { + pub scope: region::CodeExtent, + pub bound_region: BoundRegion +} + +#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, + RustcEncodable, RustcDecodable, Copy)] +pub enum BoundRegion { + /// An anonymous region parameter for a given fn (&T) + BrAnon(u32), + + /// Named region parameters for functions (a in &'a T) + /// + /// The def-id is needed to distinguish free regions in + /// the event of shadowing. + BrNamed(DefId, Name, Issue32330), + + /// Fresh bound identifiers created during GLB computations. + BrFresh(u32), + + // Anonymous region for the implicit env pointer parameter + // to a closure + BrEnv +} + +/// True if this late-bound region is unconstrained, and hence will +/// become early-bound once #32330 is fixed. +#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash, + RustcEncodable, RustcDecodable)] +pub enum Issue32330 { + WontChange, + + /// this region will change from late-bound to early-bound once + /// #32330 is fixed. + WillChange { + /// fn where is region declared + fn_def_id: DefId, + + /// name of region; duplicates the info in BrNamed but convenient + /// to have it here, and this code is only temporary + region_name: ast::Name, + } +} + +// NB: If you change this, you'll probably want to change the corresponding +// AST structure in libsyntax/ast.rs as well. +#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +pub enum TypeVariants<'tcx> { + /// The primitive boolean type. Written as `bool`. + TyBool, + + /// The primitive character type; holds a Unicode scalar value + /// (a non-surrogate code point). Written as `char`. + TyChar, + + /// A primitive signed integer type. For example, `i32`. + TyInt(ast::IntTy), + + /// A primitive unsigned integer type. For example, `u32`. + TyUint(ast::UintTy), + + /// A primitive floating-point type. For example, `f64`. + TyFloat(ast::FloatTy), + + /// Structures, enumerations and unions. + /// + /// Substs here, possibly against intuition, *may* contain `TyParam`s. + /// That is, even after substitution it is possible that there are type + /// variables. This happens when the `TyAdt` corresponds to an ADT + /// definition and not a concrete use of it. + TyAdt(&'tcx AdtDef, &'tcx Substs<'tcx>), + + /// `Box`; this is nominally a struct in the documentation, but is + /// special-cased internally. For example, it is possible to implicitly + /// move the contents of a box out of that box, and methods of any type + /// can have type `Box`. + TyBox(Ty<'tcx>), + + /// The pointee of a string slice. Written as `str`. + TyStr, + + /// An array with the given length. Written as `[T; n]`. + TyArray(Ty<'tcx>, usize), + + /// The pointee of an array slice. Written as `[T]`. + TySlice(Ty<'tcx>), + + /// A raw pointer. Written as `*mut T` or `*const T` + TyRawPtr(TypeAndMut<'tcx>), + + /// A reference; a pointer with an associated lifetime. Written as + /// `&a mut T` or `&'a T`. + TyRef(&'tcx Region, TypeAndMut<'tcx>), + + /// The anonymous type of a function declaration/definition. Each + /// function has a unique type. + TyFnDef(DefId, &'tcx Substs<'tcx>, &'tcx BareFnTy<'tcx>), + + /// A pointer to a function. Written as `fn() -> i32`. + /// FIXME: This is currently also used to represent the callee of a method; + /// see ty::MethodCallee etc. + TyFnPtr(&'tcx BareFnTy<'tcx>), + + /// A trait, defined with `trait`. + TyDynamic(Binder<&'tcx Slice>>, &'tcx ty::Region), + + /// The anonymous type of a closure. Used to represent the type of + /// `|a| a`. + TyClosure(DefId, ClosureSubsts<'tcx>), + + /// The never type `!` + TyNever, + + /// A tuple type. For example, `(i32, bool)`. + TyTuple(&'tcx Slice>), + + /// The projection of an associated type. For example, + /// `>::N`. + TyProjection(ProjectionTy<'tcx>), + + /// Anonymized (`impl Trait`) type found in a return type. + /// The DefId comes from the `impl Trait` ast::Ty node, and the + /// substitutions are for the generics of the function in question. + /// After typeck, the concrete type can be found in the `types` map. + TyAnon(DefId, &'tcx Substs<'tcx>), + + /// A type parameter; for example, `T` in `fn f(x: T) {} + TyParam(ParamTy), + + /// A type variable used during type-checking. + TyInfer(InferTy), + + /// A placeholder for a type which could not be computed; this is + /// propagated to avoid useless error messages. + TyError, +} + +/// A closure can be modeled as a struct that looks like: +/// +/// struct Closure<'l0...'li, T0...Tj, U0...Uk> { +/// upvar0: U0, +/// ... +/// upvark: Uk +/// } +/// +/// where 'l0...'li and T0...Tj are the lifetime and type parameters +/// in scope on the function that defined the closure, and U0...Uk are +/// type parameters representing the types of its upvars (borrowed, if +/// appropriate). +/// +/// So, for example, given this function: +/// +/// fn foo<'a, T>(data: &'a mut T) { +/// do(|| data.count += 1) +/// } +/// +/// the type of the closure would be something like: +/// +/// struct Closure<'a, T, U0> { +/// data: U0 +/// } +/// +/// Note that the type of the upvar is not specified in the struct. +/// You may wonder how the impl would then be able to use the upvar, +/// if it doesn't know it's type? The answer is that the impl is +/// (conceptually) not fully generic over Closure but rather tied to +/// instances with the expected upvar types: +/// +/// impl<'b, 'a, T> FnMut() for Closure<'a, T, &'b mut &'a mut T> { +/// ... +/// } +/// +/// You can see that the *impl* fully specified the type of the upvar +/// and thus knows full well that `data` has type `&'b mut &'a mut T`. +/// (Here, I am assuming that `data` is mut-borrowed.) +/// +/// Now, the last question you may ask is: Why include the upvar types +/// as extra type parameters? The reason for this design is that the +/// upvar types can reference lifetimes that are internal to the +/// creating function. In my example above, for example, the lifetime +/// `'b` represents the extent of the closure itself; this is some +/// subset of `foo`, probably just the extent of the call to the to +/// `do()`. If we just had the lifetime/type parameters from the +/// enclosing function, we couldn't name this lifetime `'b`. Note that +/// there can also be lifetimes in the types of the upvars themselves, +/// if one of them happens to be a reference to something that the +/// creating fn owns. +/// +/// OK, you say, so why not create a more minimal set of parameters +/// that just includes the extra lifetime parameters? The answer is +/// primarily that it would be hard --- we don't know at the time when +/// we create the closure type what the full types of the upvars are, +/// nor do we know which are borrowed and which are not. In this +/// design, we can just supply a fresh type parameter and figure that +/// out later. +/// +/// All right, you say, but why include the type parameters from the +/// original function then? The answer is that trans may need them +/// when monomorphizing, and they may not appear in the upvars. A +/// closure could capture no variables but still make use of some +/// in-scope type parameter with a bound (e.g., if our example above +/// had an extra `U: Default`, and the closure called `U::default()`). +/// +/// There is another reason. This design (implicitly) prohibits +/// closures from capturing themselves (except via a trait +/// object). This simplifies closure inference considerably, since it +/// means that when we infer the kind of a closure or its upvars, we +/// don't have to handle cycles where the decisions we make for +/// closure C wind up influencing the decisions we ought to make for +/// closure C (which would then require fixed point iteration to +/// handle). Plus it fixes an ICE. :P +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct ClosureSubsts<'tcx> { + /// Lifetime and type parameters from the enclosing function, + /// concatenated with the types of the upvars. + /// + /// These are separated out because trans wants to pass them around + /// when monomorphizing. + pub substs: &'tcx Substs<'tcx>, +} + +impl<'a, 'gcx, 'acx, 'tcx> ClosureSubsts<'tcx> { + #[inline] + pub fn upvar_tys(self, def_id: DefId, tcx: TyCtxt<'a, 'gcx, 'acx>) -> + impl Iterator> + 'tcx + { + let generics = tcx.item_generics(def_id); + self.substs[self.substs.len()-generics.own_count()..].iter().map( + |t| t.as_type().expect("unexpected region in upvars")) + } +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub enum ExistentialPredicate<'tcx> { + // e.g. Iterator + Trait(ExistentialTraitRef<'tcx>), + // e.g. Iterator::Item = T + Projection(ExistentialProjection<'tcx>), + // e.g. Send + AutoTrait(DefId), +} + +impl<'a, 'gcx, 'tcx> ExistentialPredicate<'tcx> { + pub fn cmp(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, other: &Self) -> Ordering { + use self::ExistentialPredicate::*; + match (*self, *other) { + (Trait(_), Trait(_)) => Ordering::Equal, + (Projection(ref a), Projection(ref b)) => a.sort_key(tcx).cmp(&b.sort_key(tcx)), + (AutoTrait(ref a), AutoTrait(ref b)) => + tcx.lookup_trait_def(*a).def_path_hash.cmp(&tcx.lookup_trait_def(*b).def_path_hash), + (Trait(_), _) => Ordering::Less, + (Projection(_), Trait(_)) => Ordering::Greater, + (Projection(_), _) => Ordering::Less, + (AutoTrait(_), _) => Ordering::Greater, + } + } + +} + +impl<'a, 'gcx, 'tcx> Binder> { + pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, self_ty: Ty<'tcx>) + -> ty::Predicate<'tcx> { + use ty::ToPredicate; + match *self.skip_binder() { + ExistentialPredicate::Trait(tr) => Binder(tr).with_self_ty(tcx, self_ty).to_predicate(), + ExistentialPredicate::Projection(p) => + ty::Predicate::Projection(Binder(p.with_self_ty(tcx, self_ty))), + ExistentialPredicate::AutoTrait(did) => { + let trait_ref = Binder(ty::TraitRef { + def_id: did, + substs: tcx.mk_substs_trait(self_ty, &[]), + }); + trait_ref.to_predicate() + } + } + } +} + +impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Slice> {} + +impl<'tcx> Slice> { + pub fn principal(&self) -> Option> { + match self.get(0) { + Some(&ExistentialPredicate::Trait(tr)) => Some(tr), + _ => None + } + } + + #[inline] + pub fn projection_bounds<'a>(&'a self) -> + impl Iterator> + 'a { + self.iter().filter_map(|predicate| { + match *predicate { + ExistentialPredicate::Projection(p) => Some(p), + _ => None, + } + }) + } + + #[inline] + pub fn auto_traits<'a>(&'a self) -> impl Iterator + 'a { + self.iter().filter_map(|predicate| { + match *predicate { + ExistentialPredicate::AutoTrait(d) => Some(d), + _ => None + } + }) + } +} + +impl<'tcx> Binder<&'tcx Slice>> { + pub fn principal(&self) -> Option> { + self.skip_binder().principal().map(Binder) + } + + #[inline] + pub fn projection_bounds<'a>(&'a self) -> + impl Iterator> + 'a { + self.skip_binder().projection_bounds().map(Binder) + } + + #[inline] + pub fn auto_traits<'a>(&'a self) -> impl Iterator + 'a { + self.skip_binder().auto_traits() + } + + pub fn iter<'a>(&'a self) + -> impl DoubleEndedIterator>> + 'tcx { + self.skip_binder().iter().cloned().map(Binder) + } +} + +/// A complete reference to a trait. These take numerous guises in syntax, +/// but perhaps the most recognizable form is in a where clause: +/// +/// T : Foo +/// +/// This would be represented by a trait-reference where the def-id is the +/// def-id for the trait `Foo` and the substs define `T` as parameter 0, +/// and `U` as parameter 1. +/// +/// Trait references also appear in object types like `Foo`, but in +/// that case the `Self` parameter is absent from the substitutions. +/// +/// Note that a `TraitRef` introduces a level of region binding, to +/// account for higher-ranked trait bounds like `T : for<'a> Foo<&'a +/// U>` or higher-ranked object types. +#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct TraitRef<'tcx> { + pub def_id: DefId, + pub substs: &'tcx Substs<'tcx>, +} + +pub type PolyTraitRef<'tcx> = Binder>; + +impl<'tcx> PolyTraitRef<'tcx> { + pub fn self_ty(&self) -> Ty<'tcx> { + self.0.self_ty() + } + + pub fn def_id(&self) -> DefId { + self.0.def_id + } + + pub fn substs(&self) -> &'tcx Substs<'tcx> { + // FIXME(#20664) every use of this fn is probably a bug, it should yield Binder<> + self.0.substs + } + + pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator> + 'a { + // FIXME(#20664) every use of this fn is probably a bug, it should yield Binder<> + self.0.input_types() + } + + pub fn to_poly_trait_predicate(&self) -> ty::PolyTraitPredicate<'tcx> { + // Note that we preserve binding levels + Binder(ty::TraitPredicate { trait_ref: self.0.clone() }) + } +} + +/// An existential reference to a trait, where `Self` is erased. +/// For example, the trait object `Trait<'a, 'b, X, Y>` is: +/// +/// exists T. T: Trait<'a, 'b, X, Y> +/// +/// The substitutions don't include the erased `Self`, only trait +/// type and lifetime parameters (`[X, Y]` and `['a, 'b]` above). +#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct ExistentialTraitRef<'tcx> { + pub def_id: DefId, + pub substs: &'tcx Substs<'tcx>, +} + +impl<'a, 'gcx, 'tcx> ExistentialTraitRef<'tcx> { + pub fn input_types<'b>(&'b self) -> impl DoubleEndedIterator> + 'b { + // Select only the "input types" from a trait-reference. For + // now this is all the types that appear in the + // trait-reference, but it should eventually exclude + // associated types. + self.substs.types() + } + + /// Object types don't have a self-type specified. Therefore, when + /// we convert the principal trait-ref into a normal trait-ref, + /// you must give *some* self-type. A common choice is `mk_err()` + /// or some skolemized type. + pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, self_ty: Ty<'tcx>) + -> ty::TraitRef<'tcx> { + // otherwise the escaping regions would be captured by the binder + assert!(!self_ty.has_escaping_regions()); + + ty::TraitRef { + def_id: self.def_id, + substs: tcx.mk_substs( + iter::once(Kind::from(self_ty)).chain(self.substs.iter().cloned())) + } + } +} + +pub type PolyExistentialTraitRef<'tcx> = Binder>; + +impl<'tcx> PolyExistentialTraitRef<'tcx> { + pub fn def_id(&self) -> DefId { + self.0.def_id + } + + pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator> + 'a { + // FIXME(#20664) every use of this fn is probably a bug, it should yield Binder<> + self.0.input_types() + } +} + +/// Binder is a binder for higher-ranked lifetimes. It is part of the +/// compiler's representation for things like `for<'a> Fn(&'a isize)` +/// (which would be represented by the type `PolyTraitRef == +/// Binder`). Note that when we skolemize, instantiate, +/// erase, or otherwise "discharge" these bound regions, we change the +/// type from `Binder` to just `T` (see +/// e.g. `liberate_late_bound_regions`). +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct Binder(pub T); + +impl Binder { + /// Skips the binder and returns the "bound" value. This is a + /// risky thing to do because it's easy to get confused about + /// debruijn indices and the like. It is usually better to + /// discharge the binder using `no_late_bound_regions` or + /// `replace_late_bound_regions` or something like + /// that. `skip_binder` is only valid when you are either + /// extracting data that has nothing to do with bound regions, you + /// are doing some sort of test that does not involve bound + /// regions, or you are being very careful about your depth + /// accounting. + /// + /// Some examples where `skip_binder` is reasonable: + /// - extracting the def-id from a PolyTraitRef; + /// - comparing the self type of a PolyTraitRef to see if it is equal to + /// a type parameter `X`, since the type `X` does not reference any regions + pub fn skip_binder(&self) -> &T { + &self.0 + } + + pub fn as_ref(&self) -> Binder<&T> { + ty::Binder(&self.0) + } + + pub fn map_bound_ref(&self, f: F) -> Binder + where F: FnOnce(&T) -> U + { + self.as_ref().map_bound(f) + } + + pub fn map_bound(self, f: F) -> Binder + where F: FnOnce(T) -> U + { + ty::Binder(f(self.0)) + } +} + +impl fmt::Debug for TypeFlags { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:x}", self.bits) + } +} + +/// Represents the projection of an associated type. In explicit UFCS +/// form this would be written `>::N`. +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct ProjectionTy<'tcx> { + /// The trait reference `T as Trait<..>`. + pub trait_ref: ty::TraitRef<'tcx>, + + /// The name `N` of the associated type. + pub item_name: Name, +} + +#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct BareFnTy<'tcx> { + pub unsafety: hir::Unsafety, + pub abi: abi::Abi, + pub sig: PolyFnSig<'tcx>, +} + +impl<'tcx> serialize::UseSpecializedDecodable for &'tcx BareFnTy<'tcx> {} + +#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct ClosureTy<'tcx> { + pub unsafety: hir::Unsafety, + pub abi: abi::Abi, + pub sig: PolyFnSig<'tcx>, +} + +/// Signature of a function type, which I have arbitrarily +/// decided to use to refer to the input/output types. +/// +/// - `inputs` is the list of arguments and their modes. +/// - `output` is the return type. +/// - `variadic` indicates whether this is a variadic function. (only true for foreign fns) +#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct FnSig<'tcx> { + pub inputs: Vec>, + pub output: Ty<'tcx>, + pub variadic: bool +} + +pub type PolyFnSig<'tcx> = Binder>; + +impl<'tcx> PolyFnSig<'tcx> { + pub fn inputs(&self) -> ty::Binder>> { + self.map_bound_ref(|fn_sig| fn_sig.inputs.clone()) + } + pub fn input(&self, index: usize) -> ty::Binder> { + self.map_bound_ref(|fn_sig| fn_sig.inputs[index]) + } + pub fn output(&self) -> ty::Binder> { + self.map_bound_ref(|fn_sig| fn_sig.output.clone()) + } + pub fn variadic(&self) -> bool { + self.skip_binder().variadic + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct ParamTy { + pub idx: u32, + pub name: Name, +} + +impl<'a, 'gcx, 'tcx> ParamTy { + pub fn new(index: u32, name: Name) -> ParamTy { + ParamTy { idx: index, name: name } + } + + pub fn for_self() -> ParamTy { + ParamTy::new(0, keywords::SelfType.name()) + } + + pub fn for_def(def: &ty::TypeParameterDef) -> ParamTy { + ParamTy::new(def.index, def.name) + } + + pub fn to_ty(self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { + tcx.mk_param(self.idx, self.name) + } + + pub fn is_self(&self) -> bool { + if self.name == keywords::SelfType.name() { + assert_eq!(self.idx, 0); + true + } else { + false + } + } +} + +/// A [De Bruijn index][dbi] is a standard means of representing +/// regions (and perhaps later types) in a higher-ranked setting. In +/// particular, imagine a type like this: +/// +/// for<'a> fn(for<'b> fn(&'b isize, &'a isize), &'a char) +/// ^ ^ | | | +/// | | | | | +/// | +------------+ 1 | | +/// | | | +/// +--------------------------------+ 2 | +/// | | +/// +------------------------------------------+ 1 +/// +/// In this type, there are two binders (the outer fn and the inner +/// fn). We need to be able to determine, for any given region, which +/// fn type it is bound by, the inner or the outer one. There are +/// various ways you can do this, but a De Bruijn index is one of the +/// more convenient and has some nice properties. The basic idea is to +/// count the number of binders, inside out. Some examples should help +/// clarify what I mean. +/// +/// Let's start with the reference type `&'b isize` that is the first +/// argument to the inner function. This region `'b` is assigned a De +/// Bruijn index of 1, meaning "the innermost binder" (in this case, a +/// fn). The region `'a` that appears in the second argument type (`&'a +/// isize`) would then be assigned a De Bruijn index of 2, meaning "the +/// second-innermost binder". (These indices are written on the arrays +/// in the diagram). +/// +/// What is interesting is that De Bruijn index attached to a particular +/// variable will vary depending on where it appears. For example, +/// the final type `&'a char` also refers to the region `'a` declared on +/// the outermost fn. But this time, this reference is not nested within +/// any other binders (i.e., it is not an argument to the inner fn, but +/// rather the outer one). Therefore, in this case, it is assigned a +/// De Bruijn index of 1, because the innermost binder in that location +/// is the outer fn. +/// +/// [dbi]: http://en.wikipedia.org/wiki/De_Bruijn_index +#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug, Copy)] +pub struct DebruijnIndex { + // We maintain the invariant that this is never 0. So 1 indicates + // the innermost binder. To ensure this, create with `DebruijnIndex::new`. + pub depth: u32, +} + +/// Representation of regions. +/// +/// Unlike types, most region variants are "fictitious", not concrete, +/// regions. Among these, `ReStatic`, `ReEmpty` and `ReScope` are the only +/// ones representing concrete regions. +/// +/// ## Bound Regions +/// +/// These are regions that are stored behind a binder and must be substituted +/// with some concrete region before being used. There are 2 kind of +/// bound regions: early-bound, which are bound in an item's Generics, +/// and are substituted by a Substs, and late-bound, which are part of +/// higher-ranked types (e.g. `for<'a> fn(&'a ())`) and are substituted by +/// the likes of `liberate_late_bound_regions`. The distinction exists +/// because higher-ranked lifetimes aren't supported in all places. See [1][2]. +/// +/// Unlike TyParam-s, bound regions are not supposed to exist "in the wild" +/// outside their binder, e.g. in types passed to type inference, and +/// should first be substituted (by skolemized regions, free regions, +/// or region variables). +/// +/// ## Skolemized and Free Regions +/// +/// One often wants to work with bound regions without knowing their precise +/// identity. For example, when checking a function, the lifetime of a borrow +/// can end up being assigned to some region parameter. In these cases, +/// it must be ensured that bounds on the region can't be accidentally +/// assumed without being checked. +/// +/// The process of doing that is called "skolemization". The bound regions +/// are replaced by skolemized markers, which don't satisfy any relation +/// not explicity provided. +/// +/// There are 2 kinds of skolemized regions in rustc: `ReFree` and +/// `ReSkolemized`. When checking an item's body, `ReFree` is supposed +/// to be used. These also support explicit bounds: both the internally-stored +/// *scope*, which the region is assumed to outlive, as well as other +/// relations stored in the `FreeRegionMap`. Note that these relations +/// aren't checked when you `make_subregion` (or `eq_types`), only by +/// `resolve_regions_and_report_errors`. +/// +/// When working with higher-ranked types, some region relations aren't +/// yet known, so you can't just call `resolve_regions_and_report_errors`. +/// `ReSkolemized` is designed for this purpose. In these contexts, +/// there's also the risk that some inference variable laying around will +/// get unified with your skolemized region: if you want to check whether +/// `for<'a> Foo<'_>: 'a`, and you substitute your bound region `'a` +/// with a skolemized region `'%a`, the variable `'_` would just be +/// instantiated to the skolemized region `'%a`, which is wrong because +/// the inference variable is supposed to satisfy the relation +/// *for every value of the skolemized region*. To ensure that doesn't +/// happen, you can use `leak_check`. This is more clearly explained +/// by infer/higher_ranked/README.md. +/// +/// [1] http://smallcultfollowing.com/babysteps/blog/2013/10/29/intermingled-parameter-lists/ +/// [2] http://smallcultfollowing.com/babysteps/blog/2013/11/04/intermingled-parameter-lists/ +#[derive(Clone, PartialEq, Eq, Hash, Copy, RustcEncodable, RustcDecodable)] +pub enum Region { + // Region bound in a type or fn declaration which will be + // substituted 'early' -- that is, at the same time when type + // parameters are substituted. + ReEarlyBound(EarlyBoundRegion), + + // Region bound in a function scope, which will be substituted when the + // function is called. + ReLateBound(DebruijnIndex, BoundRegion), + + /// When checking a function body, the types of all arguments and so forth + /// that refer to bound region parameters are modified to refer to free + /// region parameters. + ReFree(FreeRegion), + + /// A concrete region naming some statically determined extent + /// (e.g. an expression or sequence of statements) within the + /// current function. + ReScope(region::CodeExtent), + + /// Static data that has an "infinite" lifetime. Top in the region lattice. + ReStatic, + + /// A region variable. Should not exist after typeck. + ReVar(RegionVid), + + /// A skolemized region - basically the higher-ranked version of ReFree. + /// Should not exist after typeck. + ReSkolemized(SkolemizedRegionVid, BoundRegion), + + /// Empty lifetime is for data that is never accessed. + /// Bottom in the region lattice. We treat ReEmpty somewhat + /// specially; at least right now, we do not generate instances of + /// it during the GLB computations, but rather + /// generate an error instead. This is to improve error messages. + /// The only way to get an instance of ReEmpty is to have a region + /// variable with no constraints. + ReEmpty, + + /// Erased region, used by trait selection, in MIR and during trans. + ReErased, +} + +impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Region {} + +#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug)] +pub struct EarlyBoundRegion { + pub index: u32, + pub name: Name, +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct TyVid { + pub index: u32, +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct IntVid { + pub index: u32 +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct FloatVid { + pub index: u32 +} + +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy)] +pub struct RegionVid { + pub index: u32 +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub struct SkolemizedRegionVid { + pub index: u32 +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +pub enum InferTy { + TyVar(TyVid), + IntVar(IntVid), + FloatVar(FloatVid), + + /// A `FreshTy` is one that is generated as a replacement for an + /// unbound type variable. This is convenient for caching etc. See + /// `infer::freshen` for more details. + FreshTy(u32), + FreshIntTy(u32), + FreshFloatTy(u32) +} + +/// A `ProjectionPredicate` for an `ExistentialTraitRef`. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +pub struct ExistentialProjection<'tcx> { + pub trait_ref: ExistentialTraitRef<'tcx>, + pub item_name: Name, + pub ty: Ty<'tcx> +} + +pub type PolyExistentialProjection<'tcx> = Binder>; + +impl<'a, 'tcx, 'gcx> ExistentialProjection<'tcx> { + pub fn item_name(&self) -> Name { + self.item_name // safe to skip the binder to access a name + } + + pub fn sort_key(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> (u64, InternedString) { + // We want something here that is stable across crate boundaries. + // The DefId isn't but the `deterministic_hash` of the corresponding + // DefPath is. + let trait_def = tcx.lookup_trait_def(self.trait_ref.def_id); + let def_path_hash = trait_def.def_path_hash; + + // An `ast::Name` is also not stable (it's just an index into an + // interning table), so map to the corresponding `InternedString`. + let item_name = self.item_name.as_str(); + (def_path_hash, item_name) + } + + pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + self_ty: Ty<'tcx>) + -> ty::ProjectionPredicate<'tcx> + { + // otherwise the escaping regions would be captured by the binders + assert!(!self_ty.has_escaping_regions()); + + ty::ProjectionPredicate { + projection_ty: ty::ProjectionTy { + trait_ref: self.trait_ref.with_self_ty(tcx, self_ty), + item_name: self.item_name + }, + ty: self.ty + } + } +} + +impl<'a, 'tcx, 'gcx> PolyExistentialProjection<'tcx> { + pub fn item_name(&self) -> Name { + self.skip_binder().item_name() + } + + pub fn sort_key(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> (u64, InternedString) { + self.skip_binder().sort_key(tcx) + } + + pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, self_ty: Ty<'tcx>) + -> ty::PolyProjectionPredicate<'tcx> { + self.map_bound(|p| p.with_self_ty(tcx, self_ty)) + } +} + +impl DebruijnIndex { + pub fn new(depth: u32) -> DebruijnIndex { + assert!(depth > 0); + DebruijnIndex { depth: depth } + } + + pub fn shifted(&self, amount: u32) -> DebruijnIndex { + DebruijnIndex { depth: self.depth + amount } + } +} + +// Region utilities +impl Region { + pub fn is_bound(&self) -> bool { + match *self { + ty::ReEarlyBound(..) => true, + ty::ReLateBound(..) => true, + _ => false + } + } + + pub fn needs_infer(&self) -> bool { + match *self { + ty::ReVar(..) | ty::ReSkolemized(..) => true, + _ => false + } + } + + pub fn escapes_depth(&self, depth: u32) -> bool { + match *self { + ty::ReLateBound(debruijn, _) => debruijn.depth > depth, + _ => false, + } + } + + /// Returns the depth of `self` from the (1-based) binding level `depth` + pub fn from_depth(&self, depth: u32) -> Region { + match *self { + ty::ReLateBound(debruijn, r) => ty::ReLateBound(DebruijnIndex { + depth: debruijn.depth - (depth - 1) + }, r), + r => r + } + } + + pub fn type_flags(&self) -> TypeFlags { + let mut flags = TypeFlags::empty(); + + match *self { + ty::ReVar(..) => { + flags = flags | TypeFlags::HAS_RE_INFER; + flags = flags | TypeFlags::KEEP_IN_LOCAL_TCX; + } + ty::ReSkolemized(..) => { + flags = flags | TypeFlags::HAS_RE_INFER; + flags = flags | TypeFlags::HAS_RE_SKOL; + flags = flags | TypeFlags::KEEP_IN_LOCAL_TCX; + } + ty::ReLateBound(..) => { } + ty::ReEarlyBound(..) => { flags = flags | TypeFlags::HAS_RE_EARLY_BOUND; } + ty::ReStatic | ty::ReErased => { } + _ => { flags = flags | TypeFlags::HAS_FREE_REGIONS; } + } + + match *self { + ty::ReStatic | ty::ReEmpty | ty::ReErased => (), + _ => flags = flags | TypeFlags::HAS_LOCAL_NAMES, + } + + debug!("type_flags({:?}) = {:?}", self, flags); + + flags + } +} + +// Type utilities +impl<'a, 'gcx, 'tcx> TyS<'tcx> { + pub fn as_opt_param_ty(&self) -> Option { + match self.sty { + ty::TyParam(ref d) => Some(d.clone()), + _ => None, + } + } + + pub fn is_nil(&self) -> bool { + match self.sty { + TyTuple(ref tys) => tys.is_empty(), + _ => false + } + } + + pub fn is_never(&self) -> bool { + match self.sty { + TyNever => true, + _ => false, + } + } + + /// Checks whether a type is uninhabited. + /// If `block` is `Some(id)` it also checks that the uninhabited-ness is visible from `id`. + pub fn is_uninhabited(&self, block: Option, cx: TyCtxt<'a, 'gcx, 'tcx>) -> bool { + let mut visited = FxHashSet::default(); + self.is_uninhabited_recurse(&mut visited, block, cx) + } + + pub fn is_uninhabited_recurse(&self, + visited: &mut FxHashSet<(DefId, &'tcx Substs<'tcx>)>, + block: Option, + cx: TyCtxt<'a, 'gcx, 'tcx>) -> bool { + match self.sty { + TyAdt(def, substs) => { + def.is_uninhabited_recurse(visited, block, cx, substs) + }, + + TyNever => true, + TyTuple(ref tys) => tys.iter().any(|ty| ty.is_uninhabited_recurse(visited, block, cx)), + TyArray(ty, len) => len > 0 && ty.is_uninhabited_recurse(visited, block, cx), + TyRef(_, ref tm) => tm.ty.is_uninhabited_recurse(visited, block, cx), + + _ => false, + } + } + + pub fn is_primitive(&self) -> bool { + match self.sty { + TyBool | TyChar | TyInt(_) | TyUint(_) | TyFloat(_) => true, + _ => false, + } + } + + pub fn is_ty_var(&self) -> bool { + match self.sty { + TyInfer(TyVar(_)) => true, + _ => false + } + } + + pub fn is_phantom_data(&self) -> bool { + if let TyAdt(def, _) = self.sty { + def.is_phantom_data() + } else { + false + } + } + + pub fn is_bool(&self) -> bool { self.sty == TyBool } + + pub fn is_param(&self, index: u32) -> bool { + match self.sty { + ty::TyParam(ref data) => data.idx == index, + _ => false, + } + } + + pub fn is_self(&self) -> bool { + match self.sty { + TyParam(ref p) => p.is_self(), + _ => false + } + } + + pub fn is_slice(&self) -> bool { + match self.sty { + TyRawPtr(mt) | TyRef(_, mt) => match mt.ty.sty { + TySlice(_) | TyStr => true, + _ => false, + }, + _ => false + } + } + + pub fn is_structural(&self) -> bool { + match self.sty { + TyAdt(..) | TyTuple(..) | TyArray(..) | TyClosure(..) => true, + _ => self.is_slice() | self.is_trait() + } + } + + #[inline] + pub fn is_simd(&self) -> bool { + match self.sty { + TyAdt(def, _) => def.is_simd(), + _ => false + } + } + + pub fn sequence_element_type(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { + match self.sty { + TyArray(ty, _) | TySlice(ty) => ty, + TyStr => tcx.mk_mach_uint(ast::UintTy::U8), + _ => bug!("sequence_element_type called on non-sequence value: {}", self), + } + } + + pub fn simd_type(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { + match self.sty { + TyAdt(def, substs) => { + def.struct_variant().fields[0].ty(tcx, substs) + } + _ => bug!("simd_type called on invalid type") + } + } + + pub fn simd_size(&self, _cx: TyCtxt) -> usize { + match self.sty { + TyAdt(def, _) => def.struct_variant().fields.len(), + _ => bug!("simd_size called on invalid type") + } + } + + pub fn is_region_ptr(&self) -> bool { + match self.sty { + TyRef(..) => true, + _ => false + } + } + + pub fn is_unsafe_ptr(&self) -> bool { + match self.sty { + TyRawPtr(_) => return true, + _ => return false + } + } + + pub fn is_unique(&self) -> bool { + match self.sty { + TyBox(_) => true, + _ => false + } + } + + /* + A scalar type is one that denotes an atomic datum, with no sub-components. + (A TyRawPtr is scalar because it represents a non-managed pointer, so its + contents are abstract to rustc.) + */ + pub fn is_scalar(&self) -> bool { + match self.sty { + TyBool | TyChar | TyInt(_) | TyFloat(_) | TyUint(_) | + TyInfer(IntVar(_)) | TyInfer(FloatVar(_)) | + TyFnDef(..) | TyFnPtr(_) | TyRawPtr(_) => true, + _ => false + } + } + + /// Returns true if this type is a floating point type and false otherwise. + pub fn is_floating_point(&self) -> bool { + match self.sty { + TyFloat(_) | + TyInfer(FloatVar(_)) => true, + _ => false, + } + } + + pub fn is_trait(&self) -> bool { + match self.sty { + TyDynamic(..) => true, + _ => false + } + } + + pub fn is_integral(&self) -> bool { + match self.sty { + TyInfer(IntVar(_)) | TyInt(_) | TyUint(_) => true, + _ => false + } + } + + pub fn is_fresh(&self) -> bool { + match self.sty { + TyInfer(FreshTy(_)) => true, + TyInfer(FreshIntTy(_)) => true, + TyInfer(FreshFloatTy(_)) => true, + _ => false + } + } + + pub fn is_uint(&self) -> bool { + match self.sty { + TyInfer(IntVar(_)) | TyUint(ast::UintTy::Us) => true, + _ => false + } + } + + pub fn is_char(&self) -> bool { + match self.sty { + TyChar => true, + _ => false + } + } + + pub fn is_fp(&self) -> bool { + match self.sty { + TyInfer(FloatVar(_)) | TyFloat(_) => true, + _ => false + } + } + + pub fn is_numeric(&self) -> bool { + self.is_integral() || self.is_fp() + } + + pub fn is_signed(&self) -> bool { + match self.sty { + TyInt(_) => true, + _ => false + } + } + + pub fn is_machine(&self) -> bool { + match self.sty { + TyInt(ast::IntTy::Is) | TyUint(ast::UintTy::Us) => false, + TyInt(..) | TyUint(..) | TyFloat(..) => true, + _ => false + } + } + + pub fn has_concrete_skeleton(&self) -> bool { + match self.sty { + TyParam(_) | TyInfer(_) | TyError => false, + _ => true, + } + } + + // Returns the type and mutability of *ty. + // + // The parameter `explicit` indicates if this is an *explicit* dereference. + // Some types---notably unsafe ptrs---can only be dereferenced explicitly. + pub fn builtin_deref(&self, explicit: bool, pref: ty::LvaluePreference) + -> Option> + { + match self.sty { + TyBox(ty) => { + Some(TypeAndMut { + ty: ty, + mutbl: if pref == ty::PreferMutLvalue { + hir::MutMutable + } else { + hir::MutImmutable + }, + }) + }, + TyRef(_, mt) => Some(mt), + TyRawPtr(mt) if explicit => Some(mt), + _ => None + } + } + + // Returns the type of ty[i] + pub fn builtin_index(&self) -> Option> { + match self.sty { + TyArray(ty, _) | TySlice(ty) => Some(ty), + _ => None + } + } + + pub fn fn_sig(&self) -> &'tcx PolyFnSig<'tcx> { + match self.sty { + TyFnDef(.., ref f) | TyFnPtr(ref f) => &f.sig, + _ => bug!("Ty::fn_sig() called on non-fn type: {:?}", self) + } + } + + /// Returns the ABI of the given function. + pub fn fn_abi(&self) -> abi::Abi { + match self.sty { + TyFnDef(.., ref f) | TyFnPtr(ref f) => f.abi, + _ => bug!("Ty::fn_abi() called on non-fn type"), + } + } + + // Type accessors for substructures of types + pub fn fn_args(&self) -> ty::Binder>> { + self.fn_sig().inputs() + } + + pub fn fn_ret(&self) -> Binder> { + self.fn_sig().output() + } + + pub fn is_fn(&self) -> bool { + match self.sty { + TyFnDef(..) | TyFnPtr(_) => true, + _ => false + } + } + + pub fn ty_to_def_id(&self) -> Option { + match self.sty { + TyDynamic(ref tt, ..) => tt.principal().map(|p| p.def_id()), + TyAdt(def, _) => Some(def.did), + TyClosure(id, _) => Some(id), + _ => None + } + } + + pub fn ty_adt_def(&self) -> Option<&'tcx AdtDef> { + match self.sty { + TyAdt(adt, _) => Some(adt), + _ => None + } + } + + /// Returns the regions directly referenced from this type (but + /// not types reachable from this type via `walk_tys`). This + /// ignores late-bound regions binders. + pub fn regions(&self) -> Vec<&'tcx ty::Region> { + match self.sty { + TyRef(region, _) => { + vec![region] + } + TyDynamic(ref obj, region) => { + let mut v = vec![region]; + if let Some(p) = obj.principal() { + v.extend(p.skip_binder().substs.regions()); + } + v + } + TyAdt(_, substs) | TyAnon(_, substs) => { + substs.regions().collect() + } + TyClosure(_, ref substs) => { + substs.substs.regions().collect() + } + TyProjection(ref data) => { + data.trait_ref.substs.regions().collect() + } + TyFnDef(..) | + TyFnPtr(_) | + TyBool | + TyChar | + TyInt(_) | + TyUint(_) | + TyFloat(_) | + TyBox(_) | + TyStr | + TyArray(..) | + TySlice(_) | + TyRawPtr(_) | + TyNever | + TyTuple(_) | + TyParam(_) | + TyInfer(_) | + TyError => { + vec![] + } + } + } +} diff --git a/src/librustc/ty/subst.rs b/src/librustc/ty/subst.rs new file mode 100644 index 0000000000000..d6f61a12a3c6e --- /dev/null +++ b/src/librustc/ty/subst.rs @@ -0,0 +1,601 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Type substitutions. + +use hir::def_id::DefId; +use ty::{self, Slice, Ty, TyCtxt}; +use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; + +use serialize::{self, Encodable, Encoder, Decodable, Decoder}; +use syntax_pos::{Span, DUMMY_SP}; +use rustc_data_structures::accumulate_vec::AccumulateVec; + +use core::nonzero::NonZero; +use std::fmt; +use std::iter; +use std::marker::PhantomData; +use std::mem; + +/// An entity in the Rust typesystem, which can be one of +/// several kinds (only types and lifetimes for now). +/// To reduce memory usage, a `Kind` is a interned pointer, +/// with the lowest 2 bits being reserved for a tag to +/// indicate the type (`Ty` or `Region`) it points to. +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +pub struct Kind<'tcx> { + ptr: NonZero, + marker: PhantomData<(Ty<'tcx>, &'tcx ty::Region)> +} + +const TAG_MASK: usize = 0b11; +const TYPE_TAG: usize = 0b00; +const REGION_TAG: usize = 0b01; + +impl<'tcx> From> for Kind<'tcx> { + fn from(ty: Ty<'tcx>) -> Kind<'tcx> { + // Ensure we can use the tag bits. + assert_eq!(mem::align_of_val(ty) & TAG_MASK, 0); + + let ptr = ty as *const _ as usize; + Kind { + ptr: unsafe { + NonZero::new(ptr | TYPE_TAG) + }, + marker: PhantomData + } + } +} + +impl<'tcx> From<&'tcx ty::Region> for Kind<'tcx> { + fn from(r: &'tcx ty::Region) -> Kind<'tcx> { + // Ensure we can use the tag bits. + assert_eq!(mem::align_of_val(r) & TAG_MASK, 0); + + let ptr = r as *const _ as usize; + Kind { + ptr: unsafe { + NonZero::new(ptr | REGION_TAG) + }, + marker: PhantomData + } + } +} + +impl<'tcx> Kind<'tcx> { + #[inline] + unsafe fn downcast(self, tag: usize) -> Option<&'tcx T> { + let ptr = *self.ptr; + if ptr & TAG_MASK == tag { + Some(&*((ptr & !TAG_MASK) as *const _)) + } else { + None + } + } + + #[inline] + pub fn as_type(self) -> Option> { + unsafe { + self.downcast(TYPE_TAG) + } + } + + #[inline] + pub fn as_region(self) -> Option<&'tcx ty::Region> { + unsafe { + self.downcast(REGION_TAG) + } + } +} + +impl<'tcx> fmt::Debug for Kind<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + if let Some(ty) = self.as_type() { + write!(f, "{:?}", ty) + } else if let Some(r) = self.as_region() { + write!(f, "{:?}", r) + } else { + write!(f, "", *self.ptr as *const ()) + } + } +} + +impl<'tcx> TypeFoldable<'tcx> for Kind<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + if let Some(ty) = self.as_type() { + Kind::from(ty.fold_with(folder)) + } else if let Some(r) = self.as_region() { + Kind::from(r.fold_with(folder)) + } else { + bug!() + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + if let Some(ty) = self.as_type() { + ty.visit_with(visitor) + } else if let Some(r) = self.as_region() { + r.visit_with(visitor) + } else { + bug!() + } + } +} + +impl<'tcx> Encodable for Kind<'tcx> { + fn encode(&self, e: &mut E) -> Result<(), E::Error> { + e.emit_enum("Kind", |e| { + if let Some(ty) = self.as_type() { + e.emit_enum_variant("Ty", TYPE_TAG, 1, |e| { + e.emit_enum_variant_arg(0, |e| ty.encode(e)) + }) + } else if let Some(r) = self.as_region() { + e.emit_enum_variant("Region", REGION_TAG, 1, |e| { + e.emit_enum_variant_arg(0, |e| r.encode(e)) + }) + } else { + bug!() + } + }) + } +} + +impl<'tcx> Decodable for Kind<'tcx> { + fn decode(d: &mut D) -> Result, D::Error> { + d.read_enum("Kind", |d| { + d.read_enum_variant(&["Ty", "Region"], |d, tag| { + match tag { + TYPE_TAG => Ty::decode(d).map(Kind::from), + REGION_TAG => <&ty::Region>::decode(d).map(Kind::from), + _ => Err(d.error("invalid Kind tag")) + } + }) + }) + } +} + +/// A substitution mapping type/region parameters to new values. +pub type Substs<'tcx> = Slice>; + +impl<'a, 'gcx, 'tcx> Substs<'tcx> { + /// Creates a Substs that maps each generic parameter to itself. + pub fn identity_for_item(tcx: TyCtxt<'a, 'gcx, 'tcx>, def_id: DefId) + -> &'tcx Substs<'tcx> { + Substs::for_item(tcx, def_id, |def, _| { + tcx.mk_region(ty::ReEarlyBound(def.to_early_bound_region_data())) + }, |def, _| tcx.mk_param_from_def(def)) + } + + /// Creates a Substs for generic parameter definitions, + /// by calling closures to obtain each region and type. + /// The closures get to observe the Substs as they're + /// being built, which can be used to correctly + /// substitute defaults of type parameters. + pub fn for_item(tcx: TyCtxt<'a, 'gcx, 'tcx>, + def_id: DefId, + mut mk_region: FR, + mut mk_type: FT) + -> &'tcx Substs<'tcx> + where FR: FnMut(&ty::RegionParameterDef, &[Kind<'tcx>]) -> &'tcx ty::Region, + FT: FnMut(&ty::TypeParameterDef<'tcx>, &[Kind<'tcx>]) -> Ty<'tcx> { + let defs = tcx.item_generics(def_id); + let mut substs = Vec::with_capacity(defs.count()); + Substs::fill_item(&mut substs, tcx, defs, &mut mk_region, &mut mk_type); + tcx.intern_substs(&substs) + } + + pub fn extend_to(&self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + def_id: DefId, + mut mk_region: FR, + mut mk_type: FT) + -> &'tcx Substs<'tcx> + where FR: FnMut(&ty::RegionParameterDef, &[Kind<'tcx>]) -> &'tcx ty::Region, + FT: FnMut(&ty::TypeParameterDef<'tcx>, &[Kind<'tcx>]) -> Ty<'tcx> + { + let defs = tcx.item_generics(def_id); + let mut result = Vec::with_capacity(defs.count()); + result.extend(self[..].iter().cloned()); + Substs::fill_single(&mut result, defs, &mut mk_region, &mut mk_type); + tcx.intern_substs(&result) + } + + fn fill_item(substs: &mut Vec>, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + defs: &ty::Generics<'tcx>, + mk_region: &mut FR, + mk_type: &mut FT) + where FR: FnMut(&ty::RegionParameterDef, &[Kind<'tcx>]) -> &'tcx ty::Region, + FT: FnMut(&ty::TypeParameterDef<'tcx>, &[Kind<'tcx>]) -> Ty<'tcx> { + + if let Some(def_id) = defs.parent { + let parent_defs = tcx.item_generics(def_id); + Substs::fill_item(substs, tcx, parent_defs, mk_region, mk_type); + } + Substs::fill_single(substs, defs, mk_region, mk_type) + } + + fn fill_single(substs: &mut Vec>, + defs: &ty::Generics<'tcx>, + mk_region: &mut FR, + mk_type: &mut FT) + where FR: FnMut(&ty::RegionParameterDef, &[Kind<'tcx>]) -> &'tcx ty::Region, + FT: FnMut(&ty::TypeParameterDef<'tcx>, &[Kind<'tcx>]) -> Ty<'tcx> { + // Handle Self first, before all regions. + let mut types = defs.types.iter(); + if defs.parent.is_none() && defs.has_self { + let def = types.next().unwrap(); + let ty = mk_type(def, substs); + assert_eq!(def.index as usize, substs.len()); + substs.push(Kind::from(ty)); + } + + for def in &defs.regions { + let region = mk_region(def, substs); + assert_eq!(def.index as usize, substs.len()); + substs.push(Kind::from(region)); + } + + for def in types { + let ty = mk_type(def, substs); + assert_eq!(def.index as usize, substs.len()); + substs.push(Kind::from(ty)); + } + } + + pub fn is_noop(&self) -> bool { + self.is_empty() + } + + #[inline] + pub fn params(&self) -> &[Kind<'tcx>] { + // FIXME (dikaiosune) this should be removed, and corresponding compilation errors fixed + self + } + + #[inline] + pub fn types(&'a self) -> impl DoubleEndedIterator> + 'a { + self.iter().filter_map(|k| k.as_type()) + } + + #[inline] + pub fn regions(&'a self) -> impl DoubleEndedIterator + 'a { + self.iter().filter_map(|k| k.as_region()) + } + + #[inline] + pub fn type_at(&self, i: usize) -> Ty<'tcx> { + self[i].as_type().unwrap_or_else(|| { + bug!("expected type for param #{} in {:?}", i, self); + }) + } + + #[inline] + pub fn region_at(&self, i: usize) -> &'tcx ty::Region { + self[i].as_region().unwrap_or_else(|| { + bug!("expected region for param #{} in {:?}", i, self); + }) + } + + #[inline] + pub fn type_for_def(&self, ty_param_def: &ty::TypeParameterDef) -> Ty<'tcx> { + self.type_at(ty_param_def.index as usize) + } + + #[inline] + pub fn region_for_def(&self, def: &ty::RegionParameterDef) -> &'tcx ty::Region { + self.region_at(def.index as usize) + } + + /// Transform from substitutions for a child of `source_ancestor` + /// (e.g. a trait or impl) to substitutions for the same child + /// in a different item, with `target_substs` as the base for + /// the target impl/trait, with the source child-specific + /// parameters (e.g. method parameters) on top of that base. + pub fn rebase_onto(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + source_ancestor: DefId, + target_substs: &Substs<'tcx>) + -> &'tcx Substs<'tcx> { + let defs = tcx.item_generics(source_ancestor); + tcx.mk_substs(target_substs.iter().chain(&self[defs.own_count()..]).cloned()) + } + + pub fn truncate_to(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, generics: &ty::Generics<'tcx>) + -> &'tcx Substs<'tcx> { + tcx.mk_substs(self.iter().take(generics.count()).cloned()) + } +} + +impl<'tcx> TypeFoldable<'tcx> for &'tcx Substs<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + let params: AccumulateVec<[_; 8]> = self.iter().map(|k| k.fold_with(folder)).collect(); + + // If folding doesn't change the substs, it's faster to avoid + // calling `mk_substs` and instead reuse the existing substs. + if params[..] == self[..] { + self + } else { + folder.tcx().intern_substs(¶ms) + } + } + + fn fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + folder.fold_substs(self) + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + self.iter().any(|t| t.visit_with(visitor)) + } +} + +impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Substs<'tcx> {} + +/////////////////////////////////////////////////////////////////////////// +// Public trait `Subst` +// +// Just call `foo.subst(tcx, substs)` to perform a substitution across +// `foo`. Or use `foo.subst_spanned(tcx, substs, Some(span))` when +// there is more information available (for better errors). + +pub trait Subst<'tcx> : Sized { + fn subst<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + substs: &[Kind<'tcx>]) -> Self { + self.subst_spanned(tcx, substs, None) + } + + fn subst_spanned<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + substs: &[Kind<'tcx>], + span: Option) + -> Self; +} + +impl<'tcx, T:TypeFoldable<'tcx>> Subst<'tcx> for T { + fn subst_spanned<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + substs: &[Kind<'tcx>], + span: Option) + -> T + { + let mut folder = SubstFolder { tcx: tcx, + substs: substs, + span: span, + root_ty: None, + ty_stack_depth: 0, + region_binders_passed: 0 }; + (*self).fold_with(&mut folder) + } +} + +/////////////////////////////////////////////////////////////////////////// +// The actual substitution engine itself is a type folder. + +struct SubstFolder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + tcx: TyCtxt<'a, 'gcx, 'tcx>, + substs: &'a [Kind<'tcx>], + + // The location for which the substitution is performed, if available. + span: Option, + + // The root type that is being substituted, if available. + root_ty: Option>, + + // Depth of type stack + ty_stack_depth: usize, + + // Number of region binders we have passed through while doing the substitution + region_binders_passed: u32, +} + +impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for SubstFolder<'a, 'gcx, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx } + + fn fold_binder>(&mut self, t: &ty::Binder) -> ty::Binder { + self.region_binders_passed += 1; + let t = t.super_fold_with(self); + self.region_binders_passed -= 1; + t + } + + fn fold_region(&mut self, r: &'tcx ty::Region) -> &'tcx ty::Region { + // Note: This routine only handles regions that are bound on + // type declarations and other outer declarations, not those + // bound in *fn types*. Region substitution of the bound + // regions that appear in a function signature is done using + // the specialized routine `ty::replace_late_regions()`. + match *r { + ty::ReEarlyBound(data) => { + let r = self.substs.get(data.index as usize) + .and_then(|k| k.as_region()); + match r { + Some(r) => { + self.shift_region_through_binders(r) + } + None => { + let span = self.span.unwrap_or(DUMMY_SP); + span_bug!( + span, + "Region parameter out of range \ + when substituting in region {} (root type={:?}) \ + (index={})", + data.name, + self.root_ty, + data.index); + } + } + } + _ => r + } + } + + fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { + if !t.needs_subst() { + return t; + } + + // track the root type we were asked to substitute + let depth = self.ty_stack_depth; + if depth == 0 { + self.root_ty = Some(t); + } + self.ty_stack_depth += 1; + + let t1 = match t.sty { + ty::TyParam(p) => { + self.ty_for_param(p, t) + } + _ => { + t.super_fold_with(self) + } + }; + + assert_eq!(depth + 1, self.ty_stack_depth); + self.ty_stack_depth -= 1; + if depth == 0 { + self.root_ty = None; + } + + return t1; + } +} + +impl<'a, 'gcx, 'tcx> SubstFolder<'a, 'gcx, 'tcx> { + fn ty_for_param(&self, p: ty::ParamTy, source_ty: Ty<'tcx>) -> Ty<'tcx> { + // Look up the type in the substitutions. It really should be in there. + let opt_ty = self.substs.get(p.idx as usize) + .and_then(|k| k.as_type()); + let ty = match opt_ty { + Some(t) => t, + None => { + let span = self.span.unwrap_or(DUMMY_SP); + span_bug!( + span, + "Type parameter `{:?}` ({:?}/{}) out of range \ + when substituting (root type={:?}) substs={:?}", + p, + source_ty, + p.idx, + self.root_ty, + self.substs); + } + }; + + self.shift_regions_through_binders(ty) + } + + /// It is sometimes necessary to adjust the debruijn indices during substitution. This occurs + /// when we are substituting a type with escaping regions into a context where we have passed + /// through region binders. That's quite a mouthful. Let's see an example: + /// + /// ``` + /// type Func = fn(A); + /// type MetaFunc = for<'a> fn(Func<&'a int>) + /// ``` + /// + /// The type `MetaFunc`, when fully expanded, will be + /// + /// for<'a> fn(fn(&'a int)) + /// ^~ ^~ ^~~ + /// | | | + /// | | DebruijnIndex of 2 + /// Binders + /// + /// Here the `'a` lifetime is bound in the outer function, but appears as an argument of the + /// inner one. Therefore, that appearance will have a DebruijnIndex of 2, because we must skip + /// over the inner binder (remember that we count Debruijn indices from 1). However, in the + /// definition of `MetaFunc`, the binder is not visible, so the type `&'a int` will have a + /// debruijn index of 1. It's only during the substitution that we can see we must increase the + /// depth by 1 to account for the binder that we passed through. + /// + /// As a second example, consider this twist: + /// + /// ``` + /// type FuncTuple = (A,fn(A)); + /// type MetaFuncTuple = for<'a> fn(FuncTuple<&'a int>) + /// ``` + /// + /// Here the final type will be: + /// + /// for<'a> fn((&'a int, fn(&'a int))) + /// ^~~ ^~~ + /// | | + /// DebruijnIndex of 1 | + /// DebruijnIndex of 2 + /// + /// As indicated in the diagram, here the same type `&'a int` is substituted once, but in the + /// first case we do not increase the Debruijn index and in the second case we do. The reason + /// is that only in the second case have we passed through a fn binder. + fn shift_regions_through_binders(&self, ty: Ty<'tcx>) -> Ty<'tcx> { + debug!("shift_regions(ty={:?}, region_binders_passed={:?}, has_escaping_regions={:?})", + ty, self.region_binders_passed, ty.has_escaping_regions()); + + if self.region_binders_passed == 0 || !ty.has_escaping_regions() { + return ty; + } + + let result = ty::fold::shift_regions(self.tcx(), self.region_binders_passed, &ty); + debug!("shift_regions: shifted result = {:?}", result); + + result + } + + fn shift_region_through_binders(&self, region: &'tcx ty::Region) -> &'tcx ty::Region { + self.tcx().mk_region(ty::fold::shift_region(*region, self.region_binders_passed)) + } +} + +// Helper methods that modify substitutions. + +impl<'a, 'gcx, 'tcx> ty::TraitRef<'tcx> { + pub fn from_method(tcx: TyCtxt<'a, 'gcx, 'tcx>, + trait_id: DefId, + substs: &Substs<'tcx>) + -> ty::TraitRef<'tcx> { + let defs = tcx.item_generics(trait_id); + + ty::TraitRef { + def_id: trait_id, + substs: tcx.intern_substs(&substs[..defs.own_count()]) + } + } +} + +impl<'a, 'gcx, 'tcx> ty::ExistentialTraitRef<'tcx> { + pub fn erase_self_ty(tcx: TyCtxt<'a, 'gcx, 'tcx>, + trait_ref: ty::TraitRef<'tcx>) + -> ty::ExistentialTraitRef<'tcx> { + // Assert there is a Self. + trait_ref.substs.type_at(0); + + ty::ExistentialTraitRef { + def_id: trait_ref.def_id, + substs: tcx.intern_substs(&trait_ref.substs[1..]) + } + } +} + +impl<'a, 'gcx, 'tcx> ty::PolyExistentialTraitRef<'tcx> { + /// Object types don't have a self-type specified. Therefore, when + /// we convert the principal trait-ref into a normal trait-ref, + /// you must give *some* self-type. A common choice is `mk_err()` + /// or some skolemized type. + pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + self_ty: Ty<'tcx>) + -> ty::PolyTraitRef<'tcx> { + // otherwise the escaping regions would be captured by the binder + assert!(!self_ty.has_escaping_regions()); + + self.map_bound(|trait_ref| { + ty::TraitRef { + def_id: trait_ref.def_id, + substs: tcx.mk_substs( + iter::once(Kind::from(self_ty)).chain(trait_ref.substs.iter().cloned())) + } + }) + } +} diff --git a/src/librustc/ty/trait_def.rs b/src/librustc/ty/trait_def.rs new file mode 100644 index 0000000000000..c6d862b23bd5e --- /dev/null +++ b/src/librustc/ty/trait_def.rs @@ -0,0 +1,263 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use dep_graph::DepNode; +use hir::def_id::DefId; +use traits::{self, specialization_graph}; +use ty; +use ty::fast_reject; +use ty::{Ty, TyCtxt, TraitRef}; +use std::cell::{Cell, RefCell}; +use hir; +use util::nodemap::FxHashMap; + +/// A trait's definition with type information. +pub struct TraitDef { + pub def_id: DefId, + + pub unsafety: hir::Unsafety, + + /// If `true`, then this trait had the `#[rustc_paren_sugar]` + /// attribute, indicating that it should be used with `Foo()` + /// sugar. This is a temporary thing -- eventually any trait wil + /// be usable with the sugar (or without it). + pub paren_sugar: bool, + + // Impls of a trait. To allow for quicker lookup, the impls are indexed by a + // simplified version of their `Self` type: impls with a simplifiable `Self` + // are stored in `nonblanket_impls` keyed by it, while all other impls are + // stored in `blanket_impls`. + // + // A similar division is used within `specialization_graph`, but the ones + // here are (1) stored as a flat list for the trait and (2) populated prior + // to -- and used while -- determining specialization order. + // + // FIXME: solve the reentrancy issues and remove these lists in favor of the + // ones in `specialization_graph`. + // + // These lists are tracked by `DepNode::TraitImpls`; we don't use + // a DepTrackingMap but instead have the `TraitDef` insert the + // required reads/writes. + + /// Impls of the trait. + nonblanket_impls: RefCell< + FxHashMap> + >, + + /// Blanket impls associated with the trait. + blanket_impls: RefCell>, + + /// The specialization order for impls of this trait. + pub specialization_graph: RefCell, + + /// Various flags + pub flags: Cell, + + /// The ICH of this trait's DefPath, cached here so it doesn't have to be + /// recomputed all the time. + pub def_path_hash: u64, +} + +impl<'a, 'gcx, 'tcx> TraitDef { + pub fn new(def_id: DefId, + unsafety: hir::Unsafety, + paren_sugar: bool, + def_path_hash: u64) + -> TraitDef { + TraitDef { + def_id: def_id, + paren_sugar: paren_sugar, + unsafety: unsafety, + nonblanket_impls: RefCell::new(FxHashMap()), + blanket_impls: RefCell::new(vec![]), + flags: Cell::new(ty::TraitFlags::NO_TRAIT_FLAGS), + specialization_graph: RefCell::new(traits::specialization_graph::Graph::new()), + def_path_hash: def_path_hash, + } + } + + // returns None if not yet calculated + pub fn object_safety(&self) -> Option { + if self.flags.get().intersects(TraitFlags::OBJECT_SAFETY_VALID) { + Some(self.flags.get().intersects(TraitFlags::IS_OBJECT_SAFE)) + } else { + None + } + } + + pub fn set_object_safety(&self, is_safe: bool) { + assert!(self.object_safety().map(|cs| cs == is_safe).unwrap_or(true)); + self.flags.set( + self.flags.get() | if is_safe { + TraitFlags::OBJECT_SAFETY_VALID | TraitFlags::IS_OBJECT_SAFE + } else { + TraitFlags::OBJECT_SAFETY_VALID + } + ); + } + + fn write_trait_impls(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) { + tcx.dep_graph.write(DepNode::TraitImpls(self.def_id)); + } + + fn read_trait_impls(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) { + tcx.dep_graph.read(DepNode::TraitImpls(self.def_id)); + } + + /// Records a basic trait-to-implementation mapping. + /// + /// Returns `true` iff the impl has not previously been recorded. + fn record_impl(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + impl_def_id: DefId, + impl_trait_ref: TraitRef<'tcx>) + -> bool { + debug!("TraitDef::record_impl for {:?}, from {:?}", + self, impl_trait_ref); + + // Record the write into the impl set, but only for local + // impls: external impls are handled differently. + if impl_def_id.is_local() { + self.write_trait_impls(tcx); + } + + // We don't want to borrow_mut after we already populated all impls, + // so check if an impl is present with an immutable borrow first. + if let Some(sty) = fast_reject::simplify_type(tcx, + impl_trait_ref.self_ty(), false) { + if let Some(is) = self.nonblanket_impls.borrow().get(&sty) { + if is.contains(&impl_def_id) { + return false; // duplicate - skip + } + } + + self.nonblanket_impls.borrow_mut().entry(sty).or_insert(vec![]).push(impl_def_id) + } else { + if self.blanket_impls.borrow().contains(&impl_def_id) { + return false; // duplicate - skip + } + self.blanket_impls.borrow_mut().push(impl_def_id) + } + + true + } + + /// Records a trait-to-implementation mapping for a crate-local impl. + pub fn record_local_impl(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + impl_def_id: DefId, + impl_trait_ref: TraitRef<'tcx>) { + assert!(impl_def_id.is_local()); + let was_new = self.record_impl(tcx, impl_def_id, impl_trait_ref); + assert!(was_new); + } + + /// Records a trait-to-implementation mapping for a non-local impl. + /// + /// The `parent_impl` is the immediately-less-specialized impl, or the + /// trait's def ID if the impl is not a specialization -- information that + /// should be pulled from the metadata. + pub fn record_remote_impl(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, + impl_def_id: DefId, + impl_trait_ref: TraitRef<'tcx>, + parent_impl: DefId) { + assert!(!impl_def_id.is_local()); + + // if the impl has not previously been recorded + if self.record_impl(tcx, impl_def_id, impl_trait_ref) { + // if the impl is non-local, it's placed directly into the + // specialization graph using parent information drawn from metadata. + self.specialization_graph.borrow_mut() + .record_impl_from_cstore(tcx, parent_impl, impl_def_id) + } + } + + /// Adds a local impl into the specialization graph, returning an error with + /// overlap information if the impl overlaps but does not specialize an + /// existing impl. + pub fn add_impl_for_specialization(&self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + impl_def_id: DefId) + -> Result<(), traits::OverlapError> { + assert!(impl_def_id.is_local()); + + self.specialization_graph.borrow_mut() + .insert(tcx, impl_def_id) + } + + pub fn ancestors(&'a self, of_impl: DefId) -> specialization_graph::Ancestors<'a> { + specialization_graph::ancestors(self, of_impl) + } + + pub fn for_each_impl(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, mut f: F) { + self.read_trait_impls(tcx); + tcx.populate_implementations_for_trait_if_necessary(self.def_id); + + for &impl_def_id in self.blanket_impls.borrow().iter() { + f(impl_def_id); + } + + for v in self.nonblanket_impls.borrow().values() { + for &impl_def_id in v { + f(impl_def_id); + } + } + } + + /// Iterate over every impl that could possibly match the + /// self-type `self_ty`. + pub fn for_each_relevant_impl(&self, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + self_ty: Ty<'tcx>, + mut f: F) + { + self.read_trait_impls(tcx); + + tcx.populate_implementations_for_trait_if_necessary(self.def_id); + + for &impl_def_id in self.blanket_impls.borrow().iter() { + f(impl_def_id); + } + + // simplify_type(.., false) basically replaces type parameters and + // projections with infer-variables. This is, of course, done on + // the impl trait-ref when it is instantiated, but not on the + // predicate trait-ref which is passed here. + // + // for example, if we match `S: Copy` against an impl like + // `impl Copy for Option`, we replace the type variable + // in `Option` with an infer variable, to `Option<_>` (this + // doesn't actually change fast_reject output), but we don't + // replace `S` with anything - this impl of course can't be + // selected, and as there are hundreds of similar impls, + // considering them would significantly harm performance. + if let Some(simp) = fast_reject::simplify_type(tcx, self_ty, true) { + if let Some(impls) = self.nonblanket_impls.borrow().get(&simp) { + for &impl_def_id in impls { + f(impl_def_id); + } + } + } else { + for v in self.nonblanket_impls.borrow().values() { + for &impl_def_id in v { + f(impl_def_id); + } + } + } + } +} + +bitflags! { + flags TraitFlags: u32 { + const NO_TRAIT_FLAGS = 0, + const HAS_DEFAULT_IMPL = 1 << 0, + const IS_OBJECT_SAFE = 1 << 1, + const OBJECT_SAFETY_VALID = 1 << 2, + const IMPLS_VALID = 1 << 3, + } +} diff --git a/src/librustc/ty/util.rs b/src/librustc/ty/util.rs new file mode 100644 index 0000000000000..6bb9d67db6f65 --- /dev/null +++ b/src/librustc/ty/util.rs @@ -0,0 +1,875 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! misc. type-system utilities too small to deserve their own file + +use hir::def_id::DefId; +use hir::map::DefPathData; +use infer::InferCtxt; +use hir::map as ast_map; +use traits::{self, Reveal}; +use ty::{self, Ty, AdtKind, TyCtxt, TypeAndMut, TypeFlags, TypeFoldable}; +use ty::{Disr, ParameterEnvironment}; +use ty::fold::TypeVisitor; +use ty::layout::{Layout, LayoutError}; +use ty::TypeVariants::*; +use util::nodemap::FxHashMap; +use middle::lang_items; + +use rustc_const_math::{ConstInt, ConstIsize, ConstUsize}; + +use std::cell::RefCell; +use std::cmp; +use std::hash::{Hash, Hasher}; +use std::collections::hash_map::DefaultHasher; +use std::intrinsics; +use syntax::ast::{self, Name}; +use syntax::attr::{self, SignedInt, UnsignedInt}; +use syntax_pos::Span; + +use hir; + +pub trait IntTypeExt { + fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>; + fn disr_incr<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, val: Option) + -> Option; + fn assert_ty_matches(&self, val: Disr); + fn initial_discriminant<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Disr; +} + +impl IntTypeExt for attr::IntType { + fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> { + match *self { + SignedInt(ast::IntTy::I8) => tcx.types.i8, + SignedInt(ast::IntTy::I16) => tcx.types.i16, + SignedInt(ast::IntTy::I32) => tcx.types.i32, + SignedInt(ast::IntTy::I64) => tcx.types.i64, + SignedInt(ast::IntTy::Is) => tcx.types.isize, + UnsignedInt(ast::UintTy::U8) => tcx.types.u8, + UnsignedInt(ast::UintTy::U16) => tcx.types.u16, + UnsignedInt(ast::UintTy::U32) => tcx.types.u32, + UnsignedInt(ast::UintTy::U64) => tcx.types.u64, + UnsignedInt(ast::UintTy::Us) => tcx.types.usize, + } + } + + fn initial_discriminant<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Disr { + match *self { + SignedInt(ast::IntTy::I8) => ConstInt::I8(0), + SignedInt(ast::IntTy::I16) => ConstInt::I16(0), + SignedInt(ast::IntTy::I32) => ConstInt::I32(0), + SignedInt(ast::IntTy::I64) => ConstInt::I64(0), + SignedInt(ast::IntTy::Is) => match tcx.sess.target.int_type { + ast::IntTy::I16 => ConstInt::Isize(ConstIsize::Is16(0)), + ast::IntTy::I32 => ConstInt::Isize(ConstIsize::Is32(0)), + ast::IntTy::I64 => ConstInt::Isize(ConstIsize::Is64(0)), + _ => bug!(), + }, + UnsignedInt(ast::UintTy::U8) => ConstInt::U8(0), + UnsignedInt(ast::UintTy::U16) => ConstInt::U16(0), + UnsignedInt(ast::UintTy::U32) => ConstInt::U32(0), + UnsignedInt(ast::UintTy::U64) => ConstInt::U64(0), + UnsignedInt(ast::UintTy::Us) => match tcx.sess.target.uint_type { + ast::UintTy::U16 => ConstInt::Usize(ConstUsize::Us16(0)), + ast::UintTy::U32 => ConstInt::Usize(ConstUsize::Us32(0)), + ast::UintTy::U64 => ConstInt::Usize(ConstUsize::Us64(0)), + _ => bug!(), + }, + } + } + + fn assert_ty_matches(&self, val: Disr) { + match (*self, val) { + (SignedInt(ast::IntTy::I8), ConstInt::I8(_)) => {}, + (SignedInt(ast::IntTy::I16), ConstInt::I16(_)) => {}, + (SignedInt(ast::IntTy::I32), ConstInt::I32(_)) => {}, + (SignedInt(ast::IntTy::I64), ConstInt::I64(_)) => {}, + (SignedInt(ast::IntTy::Is), ConstInt::Isize(_)) => {}, + (UnsignedInt(ast::UintTy::U8), ConstInt::U8(_)) => {}, + (UnsignedInt(ast::UintTy::U16), ConstInt::U16(_)) => {}, + (UnsignedInt(ast::UintTy::U32), ConstInt::U32(_)) => {}, + (UnsignedInt(ast::UintTy::U64), ConstInt::U64(_)) => {}, + (UnsignedInt(ast::UintTy::Us), ConstInt::Usize(_)) => {}, + _ => bug!("disr type mismatch: {:?} vs {:?}", self, val), + } + } + + fn disr_incr<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, val: Option) + -> Option { + if let Some(val) = val { + self.assert_ty_matches(val); + (val + ConstInt::Infer(1)).ok() + } else { + Some(self.initial_discriminant(tcx)) + } + } +} + + +#[derive(Copy, Clone)] +pub enum CopyImplementationError { + InfrigingField(Name), + InfrigingVariant(Name), + NotAnAdt, + HasDestructor +} + +/// Describes whether a type is representable. For types that are not +/// representable, 'SelfRecursive' and 'ContainsRecursive' are used to +/// distinguish between types that are recursive with themselves and types that +/// contain a different recursive type. These cases can therefore be treated +/// differently when reporting errors. +/// +/// The ordering of the cases is significant. They are sorted so that cmp::max +/// will keep the "more erroneous" of two values. +#[derive(Copy, Clone, PartialOrd, Ord, Eq, PartialEq, Debug)] +pub enum Representability { + Representable, + ContainsRecursive, + SelfRecursive, +} + +impl<'tcx> ParameterEnvironment<'tcx> { + pub fn can_type_implement_copy<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + self_type: Ty<'tcx>, span: Span) + -> Result<(),CopyImplementationError> { + // FIXME: (@jroesch) float this code up + tcx.infer_ctxt(None, Some(self.clone()), Reveal::ExactMatch).enter(|infcx| { + let adt = match self_type.sty { + ty::TyAdt(adt, substs) => match adt.adt_kind() { + AdtKind::Struct | AdtKind::Union => { + for field in adt.all_fields() { + let field_ty = field.ty(tcx, substs); + if infcx.type_moves_by_default(field_ty, span) { + return Err(CopyImplementationError::InfrigingField( + field.name)) + } + } + adt + } + AdtKind::Enum => { + for variant in &adt.variants { + for field in &variant.fields { + let field_ty = field.ty(tcx, substs); + if infcx.type_moves_by_default(field_ty, span) { + return Err(CopyImplementationError::InfrigingVariant( + variant.name)) + } + } + } + adt + } + }, + _ => return Err(CopyImplementationError::NotAnAdt) + }; + + if adt.has_dtor() { + return Err(CopyImplementationError::HasDestructor); + } + + Ok(()) + }) + } +} + +impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { + pub fn has_error_field(self, ty: Ty<'tcx>) -> bool { + match ty.sty { + ty::TyAdt(def, substs) => { + for field in def.all_fields() { + let field_ty = field.ty(self, substs); + if let TyError = field_ty.sty { + return true; + } + } + } + _ => () + } + false + } + + /// Returns the type of element at index `i` in tuple or tuple-like type `t`. + /// For an enum `t`, `variant` is None only if `t` is a univariant enum. + pub fn positional_element_ty(self, + ty: Ty<'tcx>, + i: usize, + variant: Option) -> Option> { + match (&ty.sty, variant) { + (&TyAdt(adt, substs), Some(vid)) => { + adt.variant_with_id(vid).fields.get(i).map(|f| f.ty(self, substs)) + } + (&TyAdt(adt, substs), None) => { + // Don't use `struct_variant`, this may be a univariant enum. + adt.variants[0].fields.get(i).map(|f| f.ty(self, substs)) + } + (&TyTuple(ref v), None) => v.get(i).cloned(), + _ => None + } + } + + /// Returns the type of element at field `n` in struct or struct-like type `t`. + /// For an enum `t`, `variant` must be some def id. + pub fn named_element_ty(self, + ty: Ty<'tcx>, + n: Name, + variant: Option) -> Option> { + match (&ty.sty, variant) { + (&TyAdt(adt, substs), Some(vid)) => { + adt.variant_with_id(vid).find_field_named(n).map(|f| f.ty(self, substs)) + } + (&TyAdt(adt, substs), None) => { + adt.struct_variant().find_field_named(n).map(|f| f.ty(self, substs)) + } + _ => return None + } + } + + /// Returns the IntType representation. + /// This used to ensure `int_ty` doesn't contain `usize` and `isize` + /// by converting them to their actual types. That doesn't happen anymore. + pub fn enum_repr_type(self, opt_hint: Option<&attr::ReprAttr>) -> attr::IntType { + match opt_hint { + // Feed in the given type + Some(&attr::ReprInt(int_t)) => int_t, + // ... but provide sensible default if none provided + // + // NB. Historically `fn enum_variants` generate i64 here, while + // rustc_typeck::check would generate isize. + _ => SignedInt(ast::IntTy::Is), + } + } + + /// Returns the deeply last field of nested structures, or the same type, + /// if not a structure at all. Corresponds to the only possible unsized + /// field, and its type can be used to determine unsizing strategy. + pub fn struct_tail(self, mut ty: Ty<'tcx>) -> Ty<'tcx> { + while let TyAdt(def, substs) = ty.sty { + if !def.is_struct() { + break + } + match def.struct_variant().fields.last() { + Some(f) => ty = f.ty(self, substs), + None => break + } + } + ty + } + + /// Same as applying struct_tail on `source` and `target`, but only + /// keeps going as long as the two types are instances of the same + /// structure definitions. + /// For `(Foo>, Foo)`, the result will be `(Foo, Trait)`, + /// whereas struct_tail produces `T`, and `Trait`, respectively. + pub fn struct_lockstep_tails(self, + source: Ty<'tcx>, + target: Ty<'tcx>) + -> (Ty<'tcx>, Ty<'tcx>) { + let (mut a, mut b) = (source, target); + while let (&TyAdt(a_def, a_substs), &TyAdt(b_def, b_substs)) = (&a.sty, &b.sty) { + if a_def != b_def || !a_def.is_struct() { + break + } + match a_def.struct_variant().fields.last() { + Some(f) => { + a = f.ty(self, a_substs); + b = f.ty(self, b_substs); + } + _ => break + } + } + (a, b) + } + + /// Given a set of predicates that apply to an object type, returns + /// the region bounds that the (erased) `Self` type must + /// outlive. Precisely *because* the `Self` type is erased, the + /// parameter `erased_self_ty` must be supplied to indicate what type + /// has been used to represent `Self` in the predicates + /// themselves. This should really be a unique type; `FreshTy(0)` is a + /// popular choice. + /// + /// NB: in some cases, particularly around higher-ranked bounds, + /// this function returns a kind of conservative approximation. + /// That is, all regions returned by this function are definitely + /// required, but there may be other region bounds that are not + /// returned, as well as requirements like `for<'a> T: 'a`. + /// + /// Requires that trait definitions have been processed so that we can + /// elaborate predicates and walk supertraits. + pub fn required_region_bounds(self, + erased_self_ty: Ty<'tcx>, + predicates: Vec>) + -> Vec<&'tcx ty::Region> { + debug!("required_region_bounds(erased_self_ty={:?}, predicates={:?})", + erased_self_ty, + predicates); + + assert!(!erased_self_ty.has_escaping_regions()); + + traits::elaborate_predicates(self, predicates) + .filter_map(|predicate| { + match predicate { + ty::Predicate::Projection(..) | + ty::Predicate::Trait(..) | + ty::Predicate::Equate(..) | + ty::Predicate::WellFormed(..) | + ty::Predicate::ObjectSafe(..) | + ty::Predicate::ClosureKind(..) | + ty::Predicate::RegionOutlives(..) => { + None + } + ty::Predicate::TypeOutlives(ty::Binder(ty::OutlivesPredicate(t, r))) => { + // Search for a bound of the form `erased_self_ty + // : 'a`, but be wary of something like `for<'a> + // erased_self_ty : 'a` (we interpret a + // higher-ranked bound like that as 'static, + // though at present the code in `fulfill.rs` + // considers such bounds to be unsatisfiable, so + // it's kind of a moot point since you could never + // construct such an object, but this seems + // correct even if that code changes). + if t == erased_self_ty && !r.has_escaping_regions() { + Some(r) + } else { + None + } + } + } + }) + .collect() + } + + /// Creates a hash of the type `Ty` which will be the same no matter what crate + /// context it's calculated within. This is used by the `type_id` intrinsic. + pub fn type_id_hash(self, ty: Ty<'tcx>) -> u64 { + let mut hasher = TypeIdHasher::new(self, DefaultHasher::default()); + hasher.visit_ty(ty); + hasher.finish() + } + + /// Returns true if this ADT is a dtorck type. + /// + /// Invoking the destructor of a dtorck type during usual cleanup + /// (e.g. the glue emitted for stack unwinding) requires all + /// lifetimes in the type-structure of `adt` to strictly outlive + /// the adt value itself. + /// + /// If `adt` is not dtorck, then the adt's destructor can be + /// invoked even when there are lifetimes in the type-structure of + /// `adt` that do not strictly outlive the adt value itself. + /// (This allows programs to make cyclic structures without + /// resorting to unasfe means; see RFCs 769 and 1238). + pub fn is_adt_dtorck(self, adt: &ty::AdtDef) -> bool { + let dtor_method = match adt.destructor() { + Some(dtor) => dtor, + None => return false + }; + + // RFC 1238: if the destructor method is tagged with the + // attribute `unsafe_destructor_blind_to_params`, then the + // compiler is being instructed to *assume* that the + // destructor will not access borrowed data, + // even if such data is otherwise reachable. + // + // Such access can be in plain sight (e.g. dereferencing + // `*foo.0` of `Foo<'a>(&'a u32)`) or indirectly hidden + // (e.g. calling `foo.0.clone()` of `Foo`). + return !self.has_attr(dtor_method, "unsafe_destructor_blind_to_params"); + } + + pub fn closure_base_def_id(&self, def_id: DefId) -> DefId { + let mut def_id = def_id; + while self.def_key(def_id).disambiguated_data.data == DefPathData::ClosureExpr { + def_id = self.parent_def_id(def_id).unwrap_or_else(|| { + bug!("closure {:?} has no parent", def_id); + }); + } + def_id + } +} + +/// When hashing a type this ends up affecting properties like symbol names. We +/// want these symbol names to be calculated independent of other factors like +/// what architecture you're compiling *from*. +/// +/// The hashing just uses the standard `Hash` trait, but the implementations of +/// `Hash` for the `usize` and `isize` types are *not* architecture independent +/// (e.g. they has 4 or 8 bytes). As a result we want to avoid `usize` and +/// `isize` completely when hashing. To ensure that these don't leak in we use a +/// custom hasher implementation here which inflates the size of these to a `u64` +/// and `i64`. +/// +/// The same goes for endianess: We always convert multi-byte integers to little +/// endian before hashing. +#[derive(Debug)] +pub struct ArchIndependentHasher { + inner: H, +} + +impl ArchIndependentHasher { + pub fn new(inner: H) -> ArchIndependentHasher { + ArchIndependentHasher { inner: inner } + } + + pub fn into_inner(self) -> H { + self.inner + } +} + +impl Hasher for ArchIndependentHasher { + fn write(&mut self, bytes: &[u8]) { + self.inner.write(bytes) + } + + fn finish(&self) -> u64 { + self.inner.finish() + } + + fn write_u8(&mut self, i: u8) { + self.inner.write_u8(i) + } + fn write_u16(&mut self, i: u16) { + self.inner.write_u16(i.to_le()) + } + fn write_u32(&mut self, i: u32) { + self.inner.write_u32(i.to_le()) + } + fn write_u64(&mut self, i: u64) { + self.inner.write_u64(i.to_le()) + } + fn write_usize(&mut self, i: usize) { + self.inner.write_u64((i as u64).to_le()) + } + fn write_i8(&mut self, i: i8) { + self.inner.write_i8(i) + } + fn write_i16(&mut self, i: i16) { + self.inner.write_i16(i.to_le()) + } + fn write_i32(&mut self, i: i32) { + self.inner.write_i32(i.to_le()) + } + fn write_i64(&mut self, i: i64) { + self.inner.write_i64(i.to_le()) + } + fn write_isize(&mut self, i: isize) { + self.inner.write_i64((i as i64).to_le()) + } +} + +pub struct TypeIdHasher<'a, 'gcx: 'a+'tcx, 'tcx: 'a, H> { + tcx: TyCtxt<'a, 'gcx, 'tcx>, + state: ArchIndependentHasher, +} + +impl<'a, 'gcx, 'tcx, H: Hasher> TypeIdHasher<'a, 'gcx, 'tcx, H> { + pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>, state: H) -> Self { + TypeIdHasher { + tcx: tcx, + state: ArchIndependentHasher::new(state), + } + } + + pub fn hash(&mut self, x: T) { + x.hash(&mut self.state); + } + + pub fn finish(self) -> u64 { + self.state.finish() + } + + fn hash_discriminant_u8(&mut self, x: &T) { + let v = unsafe { + intrinsics::discriminant_value(x) + }; + let b = v as u8; + assert_eq!(v, b as u64); + self.hash(b) + } + + fn def_id(&mut self, did: DefId) { + // Hash the DefPath corresponding to the DefId, which is independent + // of compiler internal state. + let path = self.tcx.def_path(did); + self.def_path(&path) + } + + pub fn def_path(&mut self, def_path: &ast_map::DefPath) { + def_path.deterministic_hash_to(self.tcx, &mut self.state); + } + + pub fn into_inner(self) -> H { + self.state.inner + } +} + +impl<'a, 'gcx, 'tcx, H: Hasher> TypeVisitor<'tcx> for TypeIdHasher<'a, 'gcx, 'tcx, H> { + fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool { + // Distinguish between the Ty variants uniformly. + self.hash_discriminant_u8(&ty.sty); + + match ty.sty { + TyInt(i) => self.hash(i), + TyUint(u) => self.hash(u), + TyFloat(f) => self.hash(f), + TyArray(_, n) => self.hash(n), + TyRawPtr(m) | + TyRef(_, m) => self.hash(m.mutbl), + TyClosure(def_id, _) | + TyAnon(def_id, _) | + TyFnDef(def_id, ..) => self.def_id(def_id), + TyAdt(d, _) => self.def_id(d.did), + TyFnPtr(f) => { + self.hash(f.unsafety); + self.hash(f.abi); + self.hash(f.sig.variadic()); + self.hash(f.sig.inputs().skip_binder().len()); + } + TyDynamic(ref data, ..) => { + if let Some(p) = data.principal() { + self.def_id(p.def_id()); + } + for d in data.auto_traits() { + self.def_id(d); + } + } + TyTuple(tys) => { + self.hash(tys.len()); + } + TyParam(p) => { + self.hash(p.idx); + self.hash(p.name.as_str()); + } + TyProjection(ref data) => { + self.def_id(data.trait_ref.def_id); + self.hash(data.item_name.as_str()); + } + TyNever | + TyBool | + TyChar | + TyStr | + TyBox(_) | + TySlice(_) => {} + + TyError | + TyInfer(_) => bug!("TypeIdHasher: unexpected type {}", ty) + } + + ty.super_visit_with(self) + } + + fn visit_region(&mut self, r: &'tcx ty::Region) -> bool { + match *r { + ty::ReErased => { + self.hash::(0); + } + ty::ReLateBound(db, ty::BrAnon(i)) => { + assert!(db.depth > 0); + self.hash::(db.depth); + self.hash(i); + } + ty::ReStatic | + ty::ReEmpty | + ty::ReEarlyBound(..) | + ty::ReLateBound(..) | + ty::ReFree(..) | + ty::ReScope(..) | + ty::ReVar(..) | + ty::ReSkolemized(..) => { + bug!("TypeIdHasher: unexpected region {:?}", r) + } + } + false + } + + fn visit_binder>(&mut self, x: &ty::Binder) -> bool { + // Anonymize late-bound regions so that, for example: + // `for<'a, b> fn(&'a &'b T)` and `for<'a, b> fn(&'b &'a T)` + // result in the same TypeId (the two types are equivalent). + self.tcx.anonymize_late_bound_regions(x).super_visit_with(self) + } +} + +impl<'a, 'tcx> ty::TyS<'tcx> { + fn impls_bound(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: &ParameterEnvironment<'tcx>, + def_id: DefId, + cache: &RefCell, bool>>, + span: Span) -> bool + { + if self.has_param_types() || self.has_self_ty() { + if let Some(result) = cache.borrow().get(self) { + return *result; + } + } + let result = + tcx.infer_ctxt(None, Some(param_env.clone()), Reveal::ExactMatch) + .enter(|infcx| { + traits::type_known_to_meet_bound(&infcx, self, def_id, span) + }); + if self.has_param_types() || self.has_self_ty() { + cache.borrow_mut().insert(self, result); + } + return result; + } + + // FIXME (@jroesch): I made this public to use it, not sure if should be private + pub fn moves_by_default(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: &ParameterEnvironment<'tcx>, + span: Span) -> bool { + if self.flags.get().intersects(TypeFlags::MOVENESS_CACHED) { + return self.flags.get().intersects(TypeFlags::MOVES_BY_DEFAULT); + } + + assert!(!self.needs_infer()); + + // Fast-path for primitive types + let result = match self.sty { + TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) | TyNever | + TyRawPtr(..) | TyFnDef(..) | TyFnPtr(_) | TyRef(_, TypeAndMut { + mutbl: hir::MutImmutable, .. + }) => Some(false), + + TyStr | TyBox(..) | TyRef(_, TypeAndMut { + mutbl: hir::MutMutable, .. + }) => Some(true), + + TyArray(..) | TySlice(..) | TyDynamic(..) | TyTuple(..) | + TyClosure(..) | TyAdt(..) | TyAnon(..) | + TyProjection(..) | TyParam(..) | TyInfer(..) | TyError => None + }.unwrap_or_else(|| { + !self.impls_bound(tcx, param_env, + tcx.require_lang_item(lang_items::CopyTraitLangItem), + ¶m_env.is_copy_cache, span) }); + + if !self.has_param_types() && !self.has_self_ty() { + self.flags.set(self.flags.get() | if result { + TypeFlags::MOVENESS_CACHED | TypeFlags::MOVES_BY_DEFAULT + } else { + TypeFlags::MOVENESS_CACHED + }); + } + + result + } + + #[inline] + pub fn is_sized(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: &ParameterEnvironment<'tcx>, + span: Span) -> bool + { + if self.flags.get().intersects(TypeFlags::SIZEDNESS_CACHED) { + return self.flags.get().intersects(TypeFlags::IS_SIZED); + } + + self.is_sized_uncached(tcx, param_env, span) + } + + fn is_sized_uncached(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: &ParameterEnvironment<'tcx>, + span: Span) -> bool { + assert!(!self.needs_infer()); + + // Fast-path for primitive types + let result = match self.sty { + TyBool | TyChar | TyInt(..) | TyUint(..) | TyFloat(..) | + TyBox(..) | TyRawPtr(..) | TyRef(..) | TyFnDef(..) | TyFnPtr(_) | + TyArray(..) | TyTuple(..) | TyClosure(..) | TyNever => Some(true), + + TyStr | TyDynamic(..) | TySlice(_) => Some(false), + + TyAdt(..) | TyProjection(..) | TyParam(..) | + TyInfer(..) | TyAnon(..) | TyError => None + }.unwrap_or_else(|| { + self.impls_bound(tcx, param_env, tcx.require_lang_item(lang_items::SizedTraitLangItem), + ¶m_env.is_sized_cache, span) }); + + if !self.has_param_types() && !self.has_self_ty() { + self.flags.set(self.flags.get() | if result { + TypeFlags::SIZEDNESS_CACHED | TypeFlags::IS_SIZED + } else { + TypeFlags::SIZEDNESS_CACHED + }); + } + + result + } + + #[inline] + pub fn layout<'lcx>(&'tcx self, infcx: &InferCtxt<'a, 'tcx, 'lcx>) + -> Result<&'tcx Layout, LayoutError<'tcx>> { + let tcx = infcx.tcx.global_tcx(); + let can_cache = !self.has_param_types() && !self.has_self_ty(); + if can_cache { + if let Some(&cached) = tcx.layout_cache.borrow().get(&self) { + return Ok(cached); + } + } + + let rec_limit = tcx.sess.recursion_limit.get(); + let depth = tcx.layout_depth.get(); + if depth > rec_limit { + tcx.sess.fatal( + &format!("overflow representing the type `{}`", self)); + } + + tcx.layout_depth.set(depth+1); + let layout = Layout::compute_uncached(self, infcx)?; + if can_cache { + tcx.layout_cache.borrow_mut().insert(self, layout); + } + tcx.layout_depth.set(depth); + Ok(layout) + } + + + /// Check whether a type is representable. This means it cannot contain unboxed + /// structural recursion. This check is needed for structs and enums. + pub fn is_representable(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span) + -> Representability { + + // Iterate until something non-representable is found + fn find_nonrepresentable<'a, 'tcx, It>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + sp: Span, + seen: &mut Vec>, + iter: It) + -> Representability + where It: Iterator> { + iter.fold(Representability::Representable, + |r, ty| cmp::max(r, is_type_structurally_recursive(tcx, sp, seen, ty))) + } + + fn are_inner_types_recursive<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, + seen: &mut Vec>, ty: Ty<'tcx>) + -> Representability { + match ty.sty { + TyTuple(ref ts) => { + find_nonrepresentable(tcx, sp, seen, ts.iter().cloned()) + } + // Fixed-length vectors. + // FIXME(#11924) Behavior undecided for zero-length vectors. + TyArray(ty, _) => { + is_type_structurally_recursive(tcx, sp, seen, ty) + } + TyAdt(def, substs) => { + find_nonrepresentable(tcx, + sp, + seen, + def.all_fields().map(|f| f.ty(tcx, substs))) + } + TyClosure(..) => { + // this check is run on type definitions, so we don't expect + // to see closure types + bug!("requires check invoked on inapplicable type: {:?}", ty) + } + _ => Representability::Representable, + } + } + + fn same_struct_or_enum<'tcx>(ty: Ty<'tcx>, def: &'tcx ty::AdtDef) -> bool { + match ty.sty { + TyAdt(ty_def, _) => { + ty_def == def + } + _ => false + } + } + + fn same_type<'tcx>(a: Ty<'tcx>, b: Ty<'tcx>) -> bool { + match (&a.sty, &b.sty) { + (&TyAdt(did_a, substs_a), &TyAdt(did_b, substs_b)) => { + if did_a != did_b { + return false; + } + + substs_a.types().zip(substs_b.types()).all(|(a, b)| same_type(a, b)) + } + _ => { + a == b + } + } + } + + // Does the type `ty` directly (without indirection through a pointer) + // contain any types on stack `seen`? + fn is_type_structurally_recursive<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + sp: Span, + seen: &mut Vec>, + ty: Ty<'tcx>) -> Representability { + debug!("is_type_structurally_recursive: {:?}", ty); + + match ty.sty { + TyAdt(def, _) => { + { + // Iterate through stack of previously seen types. + let mut iter = seen.iter(); + + // The first item in `seen` is the type we are actually curious about. + // We want to return SelfRecursive if this type contains itself. + // It is important that we DON'T take generic parameters into account + // for this check, so that Bar in this example counts as SelfRecursive: + // + // struct Foo; + // struct Bar { x: Bar } + + if let Some(&seen_type) = iter.next() { + if same_struct_or_enum(seen_type, def) { + debug!("SelfRecursive: {:?} contains {:?}", + seen_type, + ty); + return Representability::SelfRecursive; + } + } + + // We also need to know whether the first item contains other types + // that are structurally recursive. If we don't catch this case, we + // will recurse infinitely for some inputs. + // + // It is important that we DO take generic parameters into account + // here, so that code like this is considered SelfRecursive, not + // ContainsRecursive: + // + // struct Foo { Option> } + + for &seen_type in iter { + if same_type(ty, seen_type) { + debug!("ContainsRecursive: {:?} contains {:?}", + seen_type, + ty); + return Representability::ContainsRecursive; + } + } + } + + // For structs and enums, track all previously seen types by pushing them + // onto the 'seen' stack. + seen.push(ty); + let out = are_inner_types_recursive(tcx, sp, seen, ty); + seen.pop(); + out + } + _ => { + // No need to push in other cases. + are_inner_types_recursive(tcx, sp, seen, ty) + } + } + } + + debug!("is_type_representable: {:?}", self); + + // To avoid a stack overflow when checking an enum variant or struct that + // contains a different, structurally recursive type, maintain a stack + // of seen types and check recursion for each of them (issues #3008, #3779). + let mut seen: Vec = Vec::new(); + let r = is_type_structurally_recursive(tcx, sp, &mut seen, self); + debug!("is_type_representable: {:?} is {:?}", self, r); + r + } +} diff --git a/src/librustc/ty/walk.rs b/src/librustc/ty/walk.rs new file mode 100644 index 0000000000000..0848dcd2c8d21 --- /dev/null +++ b/src/librustc/ty/walk.rs @@ -0,0 +1,131 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! An iterator over the type substructure. +//! WARNING: this does not keep track of the region depth. + +use ty::{self, Ty}; +use rustc_data_structures::small_vec::SmallVec; +use rustc_data_structures::accumulate_vec::IntoIter as AccIntoIter; + +// The TypeWalker's stack is hot enough that it's worth going to some effort to +// avoid heap allocations. +pub type TypeWalkerArray<'tcx> = [Ty<'tcx>; 8]; +pub type TypeWalkerStack<'tcx> = SmallVec>; + +pub struct TypeWalker<'tcx> { + stack: TypeWalkerStack<'tcx>, + last_subtree: usize, +} + +impl<'tcx> TypeWalker<'tcx> { + pub fn new(ty: Ty<'tcx>) -> TypeWalker<'tcx> { + TypeWalker { stack: SmallVec::one(ty), last_subtree: 1, } + } + + /// Skips the subtree of types corresponding to the last type + /// returned by `next()`. + /// + /// Example: Imagine you are walking `Foo, usize>`. + /// + /// ``` + /// let mut iter: TypeWalker = ...; + /// iter.next(); // yields Foo + /// iter.next(); // yields Bar + /// iter.skip_current_subtree(); // skips int + /// iter.next(); // yields usize + /// ``` + pub fn skip_current_subtree(&mut self) { + self.stack.truncate(self.last_subtree); + } +} + +impl<'tcx> Iterator for TypeWalker<'tcx> { + type Item = Ty<'tcx>; + + fn next(&mut self) -> Option> { + debug!("next(): stack={:?}", self.stack); + match self.stack.pop() { + None => { + return None; + } + Some(ty) => { + self.last_subtree = self.stack.len(); + push_subtypes(&mut self.stack, ty); + debug!("next: stack={:?}", self.stack); + Some(ty) + } + } + } +} + +pub fn walk_shallow<'tcx>(ty: Ty<'tcx>) -> AccIntoIter> { + let mut stack = SmallVec::new(); + push_subtypes(&mut stack, ty); + stack.into_iter() +} + +// We push types on the stack in reverse order so as to +// maintain a pre-order traversal. As of the time of this +// writing, the fact that the traversal is pre-order is not +// known to be significant to any code, but it seems like the +// natural order one would expect (basically, the order of the +// types as they are written). +fn push_subtypes<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent_ty: Ty<'tcx>) { + match parent_ty.sty { + ty::TyBool | ty::TyChar | ty::TyInt(_) | ty::TyUint(_) | ty::TyFloat(_) | + ty::TyStr | ty::TyInfer(_) | ty::TyParam(_) | ty::TyNever | ty::TyError => { + } + ty::TyBox(ty) | ty::TyArray(ty, _) | ty::TySlice(ty) => { + stack.push(ty); + } + ty::TyRawPtr(ref mt) | ty::TyRef(_, ref mt) => { + stack.push(mt.ty); + } + ty::TyProjection(ref data) => { + stack.extend(data.trait_ref.substs.types().rev()); + } + ty::TyDynamic(ref obj, ..) => { + stack.extend(obj.iter().rev().flat_map(|predicate| { + let (substs, opt_ty) = match *predicate.skip_binder() { + ty::ExistentialPredicate::Trait(tr) => (tr.substs, None), + ty::ExistentialPredicate::Projection(p) => + (p.trait_ref.substs, Some(p.ty)), + ty::ExistentialPredicate::AutoTrait(_) => + // Empty iterator + (ty::Substs::empty(), None), + }; + + substs.types().rev().chain(opt_ty) + })); + } + ty::TyAdt(_, substs) | ty::TyAnon(_, substs) => { + stack.extend(substs.types().rev()); + } + ty::TyClosure(_, ref substs) => { + stack.extend(substs.substs.types().rev()); + } + ty::TyTuple(ts) => { + stack.extend(ts.iter().cloned().rev()); + } + ty::TyFnDef(_, substs, ref ft) => { + stack.extend(substs.types().rev()); + push_sig_subtypes(stack, &ft.sig); + } + ty::TyFnPtr(ref ft) => { + push_sig_subtypes(stack, &ft.sig); + } + } +} + +fn push_sig_subtypes<'tcx>(stack: &mut TypeWalkerStack<'tcx>, sig: &ty::PolyFnSig<'tcx>) { + stack.push(sig.0.output); + stack.extend(sig.0.inputs.iter().cloned().rev()); +} diff --git a/src/librustc/middle/ty/wf.rs b/src/librustc/ty/wf.rs similarity index 77% rename from src/librustc/middle/ty/wf.rs rename to src/librustc/ty/wf.rs index 5f0fc306c24f8..bab9964651dca 100644 --- a/src/librustc/middle/ty/wf.rs +++ b/src/librustc/ty/wf.rs @@ -8,16 +8,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use middle::def_id::DefId; -use middle::infer::InferCtxt; -use middle::ty::outlives::{self, Component}; -use middle::subst::Substs; -use middle::traits; -use middle::ty::{self, ToPredicate, Ty, TypeFoldable}; +use hir::def_id::DefId; +use infer::InferCtxt; +use ty::outlives::Component; +use ty::subst::Substs; +use traits; +use ty::{self, ToPredicate, Ty, TyCtxt, TypeFoldable}; use std::iter::once; use syntax::ast; -use syntax::codemap::Span; -use util::common::ErrorReported; +use syntax_pos::Span; +use middle::lang_items; /// Returns the set of obligations needed to make `ty` well-formed. /// If `ty` contains unresolved inference variables, this may include @@ -25,11 +25,11 @@ use util::common::ErrorReported; /// inference variable, returns `None`, because we are not able to /// make any progress at all. This is to prevent "livelock" where we /// say "$0 is WF if $0 is WF". -pub fn obligations<'a,'tcx>(infcx: &InferCtxt<'a, 'tcx>, - body_id: ast::NodeId, - ty: Ty<'tcx>, - span: Span) - -> Option>> +pub fn obligations<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + body_id: ast::NodeId, + ty: Ty<'tcx>, + span: Span) + -> Option>> { let mut wf = WfPredicates { infcx: infcx, body_id: body_id, @@ -49,22 +49,22 @@ pub fn obligations<'a,'tcx>(infcx: &InferCtxt<'a, 'tcx>, /// well-formed. For example, if there is a trait `Set` defined like /// `trait Set`, then the trait reference `Foo: Set` is WF /// if `Bar: Eq`. -pub fn trait_obligations<'a,'tcx>(infcx: &InferCtxt<'a, 'tcx>, - body_id: ast::NodeId, - trait_ref: &ty::TraitRef<'tcx>, - span: Span) - -> Vec> +pub fn trait_obligations<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + body_id: ast::NodeId, + trait_ref: &ty::TraitRef<'tcx>, + span: Span) + -> Vec> { let mut wf = WfPredicates { infcx: infcx, body_id: body_id, span: span, out: vec![] }; wf.compute_trait_ref(trait_ref); wf.normalize() } -pub fn predicate_obligations<'a,'tcx>(infcx: &InferCtxt<'a, 'tcx>, - body_id: ast::NodeId, - predicate: &ty::Predicate<'tcx>, - span: Span) - -> Vec> +pub fn predicate_obligations<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, + body_id: ast::NodeId, + predicate: &ty::Predicate<'tcx>, + span: Span) + -> Vec> { let mut wf = WfPredicates { infcx: infcx, body_id: body_id, span: span, out: vec![] }; @@ -92,6 +92,8 @@ pub fn predicate_obligations<'a,'tcx>(infcx: &InferCtxt<'a, 'tcx>, } ty::Predicate::ObjectSafe(_) => { } + ty::Predicate::ClosureKind(..) => { + } } wf.normalize() @@ -110,16 +112,16 @@ pub fn predicate_obligations<'a,'tcx>(infcx: &InferCtxt<'a, 'tcx>, /// For `&'a T` to be WF, `T: 'a` must hold. So we can assume `T: 'a`. #[derive(Debug)] pub enum ImpliedBound<'tcx> { - RegionSubRegion(ty::Region, ty::Region), - RegionSubParam(ty::Region, ty::ParamTy), - RegionSubProjection(ty::Region, ty::ProjectionTy<'tcx>), + RegionSubRegion(&'tcx ty::Region, &'tcx ty::Region), + RegionSubParam(&'tcx ty::Region, ty::ParamTy), + RegionSubProjection(&'tcx ty::Region, ty::ProjectionTy<'tcx>), } /// Compute the implied bounds that a callee/impl can assume based on /// the fact that caller/projector has ensured that `ty` is WF. See /// the `ImpliedBound` type for more details. -pub fn implied_bounds<'a,'tcx>( - infcx: &'a InferCtxt<'a,'tcx>, +pub fn implied_bounds<'a, 'gcx, 'tcx>( + infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, body_id: ast::NodeId, ty: Ty<'tcx>, span: Span) @@ -155,6 +157,7 @@ pub fn implied_bounds<'a,'tcx>( ty::Predicate::Trait(..) | ty::Predicate::Equate(..) | ty::Predicate::Projection(..) | + ty::Predicate::ClosureKind(..) | ty::Predicate::ObjectSafe(..) => vec![], @@ -175,7 +178,8 @@ pub fn implied_bounds<'a,'tcx>( match infcx.tcx.no_late_bound_regions(data) { None => vec![], Some(ty::OutlivesPredicate(ty_a, r_b)) => { - let components = outlives::components(infcx, ty_a); + let ty_a = infcx.resolve_type_vars_if_possible(&ty_a); + let components = infcx.tcx.outlives_components(ty_a); implied_bounds_from_components(r_b, components) } }, @@ -189,7 +193,7 @@ pub fn implied_bounds<'a,'tcx>( /// this down to determine what relationships would have to hold for /// `T: 'a` to hold. We get to assume that the caller has validated /// those relationships. -fn implied_bounds_from_components<'tcx>(sub_region: ty::Region, +fn implied_bounds_from_components<'tcx>(sub_region: &'tcx ty::Region, sup_components: Vec>) -> Vec> { @@ -198,11 +202,11 @@ fn implied_bounds_from_components<'tcx>(sub_region: ty::Region, .flat_map(|component| { match component { Component::Region(r) => - vec!(ImpliedBound::RegionSubRegion(sub_region, r)), + vec![ImpliedBound::RegionSubRegion(sub_region, r)], Component::Param(p) => - vec!(ImpliedBound::RegionSubParam(sub_region, p)), + vec![ImpliedBound::RegionSubParam(sub_region, p)], Component::Projection(p) => - vec!(ImpliedBound::RegionSubProjection(sub_region, p)), + vec![ImpliedBound::RegionSubProjection(sub_region, p)], Component::EscapingProjection(_) => // If the projection has escaping regions, don't // try to infer any implied bounds even for its @@ -212,22 +216,22 @@ fn implied_bounds_from_components<'tcx>(sub_region: ty::Region, // idea is that the WAY that the caller proves // that may change in the future and we want to // give ourselves room to get smarter here. - vec!(), + vec![], Component::UnresolvedInferenceVariable(..) => - vec!(), + vec![], } }) .collect() } -struct WfPredicates<'a,'tcx:'a> { - infcx: &'a InferCtxt<'a, 'tcx>, +struct WfPredicates<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, body_id: ast::NodeId, span: Span, out: Vec>, } -impl<'a,'tcx> WfPredicates<'a,'tcx> { +impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { fn cause(&mut self, code: traits::ObligationCauseCode<'tcx>) -> traits::ObligationCause<'tcx> { traits::ObligationCause::new(self.span, self.body_id, code) } @@ -253,9 +257,7 @@ impl<'a,'tcx> WfPredicates<'a,'tcx> { let cause = self.cause(traits::MiscObligation); self.out.extend( - trait_ref.substs.types - .as_slice() - .iter() + trait_ref.substs.types() .filter(|ty| !ty.has_escaping_regions()) .map(|ty| traits::Obligation::new(cause.clone(), ty::Predicate::WellFormed(ty)))); @@ -265,7 +267,7 @@ impl<'a,'tcx> WfPredicates<'a,'tcx> { /// into `self.out`. fn compute_projection(&mut self, data: ty::ProjectionTy<'tcx>) { // A projection is well-formed if (a) the trait ref itself is - // WF WF and (b) the trait-ref holds. (It may also be + // WF and (b) the trait-ref holds. (It may also be // normalizable and be WF that way.) self.compute_trait_ref(&data.trait_ref); @@ -277,6 +279,17 @@ impl<'a,'tcx> WfPredicates<'a,'tcx> { } } + fn require_sized(&mut self, subty: Ty<'tcx>, cause: traits::ObligationCauseCode<'tcx>) { + if !subty.has_escaping_regions() { + let cause = self.cause(cause); + let trait_ref = ty::TraitRef { + def_id: self.infcx.tcx.require_lang_item(lang_items::SizedTraitLangItem), + substs: self.infcx.tcx.mk_substs_trait(subty, &[]), + }; + self.out.push(traits::Obligation::new(cause, trait_ref.to_predicate())); + } + } + /// Push new obligations into `out`. Returns true if it was able /// to generate all the predicates needed to validate that `ty0` /// is WF. Returns false if `ty0` is an unresolved type variable, @@ -292,29 +305,25 @@ impl<'a,'tcx> WfPredicates<'a,'tcx> { ty::TyFloat(..) | ty::TyError | ty::TyStr | + ty::TyNever | ty::TyParam(_) => { // WfScalar, WfParameter, etc } ty::TySlice(subty) | ty::TyArray(subty, _) => { - if !subty.has_escaping_regions() { - let cause = self.cause(traits::SliceOrArrayElem); - match traits::trait_ref_for_builtin_bound(self.infcx.tcx, - ty::BoundSized, - subty) { - Ok(trait_ref) => { - self.out.push( - traits::Obligation::new(cause, - trait_ref.to_predicate())); - } - Err(ErrorReported) => { } + self.require_sized(subty, traits::SliceOrArrayElem); + } + + ty::TyTuple(ref tys) => { + if let Some((_last, rest)) = tys.split_last() { + for elem in rest { + self.require_sized(elem, traits::TupleElem); } } } ty::TyBox(_) | - ty::TyTuple(_) | ty::TyRawPtr(_) => { // simple cases that are WF if their type args are WF } @@ -324,8 +333,7 @@ impl<'a,'tcx> WfPredicates<'a,'tcx> { self.compute_projection(data); } - ty::TyEnum(def, substs) | - ty::TyStruct(def, substs) => { + ty::TyAdt(def, substs) => { // WfNominalType let obligations = self.nominal_obligations(def.did, substs); self.out.extend(obligations); @@ -340,7 +348,7 @@ impl<'a,'tcx> WfPredicates<'a,'tcx> { cause, ty::Predicate::TypeOutlives( ty::Binder( - ty::OutlivesPredicate(mt.ty, *r))))); + ty::OutlivesPredicate(mt.ty, r))))); } } @@ -354,27 +362,38 @@ impl<'a,'tcx> WfPredicates<'a,'tcx> { // WFedness.) } - ty::TyBareFn(..) => { - // let the loop iterator into the argument/return + ty::TyFnDef(..) | ty::TyFnPtr(_) => { + // let the loop iterate into the argument/return // types appearing in the fn signature } - ty::TyTrait(ref data) => { + ty::TyAnon(..) => { + // all of the requirements on type parameters + // should've been checked by the instantiation + // of whatever returned this exact `impl Trait`. + } + + ty::TyDynamic(data, r) => { // WfObject // // Here, we defer WF checking due to higher-ranked // regions. This is perhaps not ideal. - self.from_object_ty(ty, data); + self.from_object_ty(ty, data, r); // FIXME(#27579) RFC also considers adding trait // obligations that don't refer to Self and // checking those let cause = self.cause(traits::MiscObligation); - self.out.push( - traits::Obligation::new( - cause, - ty::Predicate::ObjectSafe(data.principal_def_id()))); + + let component_traits = + data.auto_traits().chain(data.principal().map(|p| p.def_id())); + self.out.extend( + component_traits.map(|did| traits::Obligation::new( + cause.clone(), + ty::Predicate::ObjectSafe(did) + )) + ); } // Inference variables are the complicated case, since we don't @@ -420,7 +439,7 @@ impl<'a,'tcx> WfPredicates<'a,'tcx> { -> Vec> { let predicates = - self.infcx.tcx.lookup_predicates(def_id) + self.infcx.tcx.item_predicates(def_id) .instantiate(self.infcx.tcx, substs); let cause = self.cause(traits::ItemObligation(def_id)); predicates.predicates @@ -430,7 +449,9 @@ impl<'a,'tcx> WfPredicates<'a,'tcx> { .collect() } - fn from_object_ty(&mut self, ty: Ty<'tcx>, data: &ty::TraitTy<'tcx>) { + fn from_object_ty(&mut self, ty: Ty<'tcx>, + data: ty::Binder<&'tcx ty::Slice>>, + region: &'tcx ty::Region) { // Imagine a type like this: // // trait Foo { } @@ -465,14 +486,12 @@ impl<'a,'tcx> WfPredicates<'a,'tcx> { if !data.has_escaping_regions() { let implicit_bounds = - object_region_bounds(self.infcx.tcx, - &data.principal, - data.bounds.builtin_bounds); + object_region_bounds(self.infcx.tcx, data); - let explicit_bound = data.bounds.region_bound; + let explicit_bound = region; for implicit_bound in implicit_bounds { - let cause = self.cause(traits::ReferenceOutlivesReferent(ty)); + let cause = self.cause(traits::ObjectTypeBound(ty, explicit_bound)); let outlives = ty::Binder(ty::OutlivesPredicate(explicit_bound, implicit_bound)); self.out.push(traits::Obligation::new(cause, outlives.to_predicate())); } @@ -486,24 +505,23 @@ impl<'a,'tcx> WfPredicates<'a,'tcx> { /// they declare `trait SomeTrait : 'static`, for example, then /// `'static` would appear in the list. The hard work is done by /// `ty::required_region_bounds`, see that for more information. -pub fn object_region_bounds<'tcx>( - tcx: &ty::ctxt<'tcx>, - principal: &ty::PolyTraitRef<'tcx>, - others: ty::BuiltinBounds) - -> Vec +pub fn object_region_bounds<'a, 'gcx, 'tcx>( + tcx: TyCtxt<'a, 'gcx, 'tcx>, + existential_predicates: ty::Binder<&'tcx ty::Slice>>) + -> Vec<&'tcx ty::Region> { // Since we don't actually *know* the self type for an object, // this "open(err)" serves as a kind of dummy standin -- basically // a skolemized type. let open_ty = tcx.mk_infer(ty::FreshTy(0)); - // Note that we preserve the overall binding levels here. - assert!(!open_ty.has_escaping_regions()); - let substs = tcx.mk_substs(principal.0.substs.with_self_ty(open_ty)); - let trait_refs = vec!(ty::Binder(ty::TraitRef::new(principal.0.def_id, substs))); - - let mut predicates = others.to_predicates(tcx, open_ty); - predicates.extend(trait_refs.iter().map(|t| t.to_predicate())); + let predicates = existential_predicates.iter().filter_map(|predicate| { + if let ty::ExistentialPredicate::Projection(_) = *predicate.skip_binder() { + None + } else { + Some(predicate.with_self_ty(tcx, open_ty)) + } + }).collect(); tcx.required_region_bounds(open_ty, predicates) } diff --git a/src/librustc/util/common.rs b/src/librustc/util/common.rs index 2481cab78b4d6..e01856b2a4762 100644 --- a/src/librustc/util/common.rs +++ b/src/librustc/util/common.rs @@ -12,17 +12,12 @@ use std::cell::{RefCell, Cell}; use std::collections::HashMap; -use std::collections::hash_state::HashState; use std::ffi::CString; use std::fmt::Debug; -use std::hash::Hash; +use std::hash::{Hash, BuildHasher}; use std::iter::repeat; use std::path::Path; -use std::time::Instant; - -use rustc_front::hir; -use rustc_front::intravisit; -use rustc_front::intravisit::Visitor; +use std::time::{Duration, Instant}; // The name of the associated type for `Fn` return types pub const FN_OUTPUT_NAME: &'static str = "Output"; @@ -48,12 +43,6 @@ pub fn time(do_it: bool, what: &str, f: F) -> T where let rv = f(); let dur = start.elapsed(); - // Hack up our own formatting for the duration to make it easier for scripts - // to parse (always use the same number of decimal places and the same unit). - const NANOS_PER_SEC: f64 = 1_000_000_000.0; - let secs = dur.as_secs() as f64; - let secs = secs + dur.subsec_nanos() as f64 / NANOS_PER_SEC; - let mem_string = match get_resident() { Some(n) => { let mb = n as f64 / 1_000_000.0; @@ -61,14 +50,57 @@ pub fn time(do_it: bool, what: &str, f: F) -> T where } None => "".to_owned(), }; - println!("{}time: {:.3}{}\t{}", repeat(" ").take(old).collect::(), - secs, mem_string, what); + println!("{}time: {}{}\t{}", + repeat(" ").take(old).collect::(), + duration_to_secs_str(dur), + mem_string, + what); DEPTH.with(|slot| slot.set(old)); rv } +// Hack up our own formatting for the duration to make it easier for scripts +// to parse (always use the same number of decimal places and the same unit). +pub fn duration_to_secs_str(dur: Duration) -> String { + const NANOS_PER_SEC: f64 = 1_000_000_000.0; + let secs = dur.as_secs() as f64 + + dur.subsec_nanos() as f64 / NANOS_PER_SEC; + + format!("{:.3}", secs) +} + +pub fn to_readable_str(mut val: usize) -> String { + let mut groups = vec![]; + loop { + let group = val % 1000; + + val /= 1000; + + if val == 0 { + groups.push(format!("{}", group)); + break + } else { + groups.push(format!("{:03}", group)); + } + } + + groups.reverse(); + + groups.join("_") +} + +pub fn record_time(accu: &Cell, f: F) -> T where + F: FnOnce() -> T, +{ + let start = Instant::now(); + let rv = f(); + let duration = start.elapsed(); + accu.set(duration + accu.get()); + rv +} + // Like std::macros::try!, but for Option<>. macro_rules! option_try( ($e:expr) => (match $e { Some(e) => e, None => return None }) @@ -150,57 +182,6 @@ pub fn indenter() -> Indenter { Indenter { _cannot_construct_outside_of_this_module: () } } -struct LoopQueryVisitor

    (&mut self, mut predicate: P) -> Option where - P: FnMut(Self::Item) -> bool, - Self: Sized + ExactSizeIterator + DoubleEndedIterator - { - let mut i = self.len(); - - while let Some(v) = self.next_back() { - if predicate(v) { - return Some(i - 1); - } - // No need for an overflow check here, because `ExactSizeIterator` - // implies that the number of elements fits into a `usize`. - i -= 1; - } - None - } - - /// Returns the maximum element of an iterator. - /// - /// If the two elements are equally maximum, the latest element is - /// returned. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// assert_eq!(a.iter().max(), Some(&3)); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn max(self) -> Option where Self: Sized, Self::Item: Ord - { - select_fold1(self, - |_| (), - // switch to y even if it is only equal, to preserve - // stability. - |_, x, _, y| *x <= *y) - .map(|(_, x)| x) - } - - /// Returns the minimum element of an iterator. - /// - /// If the two elements are equally minimum, the first element is - /// returned. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// assert_eq!(a.iter().min(), Some(&1)); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn min(self) -> Option where Self: Sized, Self::Item: Ord - { - select_fold1(self, - |_| (), - // only switch to y if it is strictly smaller, to - // preserve stability. - |_, x, _, y| *x > *y) - .map(|(_, x)| x) - } - - #[allow(missing_docs)] - #[inline] - #[unstable(feature = "iter_cmp", - reason = "may want to produce an Ordering directly; see #15311", - issue = "27724")] - #[rustc_deprecated(reason = "renamed to max_by_key", since = "1.6.0")] - fn max_by(self, f: F) -> Option where - Self: Sized, - F: FnMut(&Self::Item) -> B, - { - self.max_by_key(f) - } - - /// Returns the element that gives the maximum value from the - /// specified function. - /// - /// Returns the rightmost element if the comparison determines two elements - /// to be equally maximum. - /// - /// # Examples - /// - /// ``` - /// let a = [-3_i32, 0, 1, 5, -10]; - /// assert_eq!(*a.iter().max_by_key(|x| x.abs()).unwrap(), -10); - /// ``` - #[inline] - #[stable(feature = "iter_cmp_by_key", since = "1.6.0")] - fn max_by_key(self, f: F) -> Option - where Self: Sized, F: FnMut(&Self::Item) -> B, - { - select_fold1(self, - f, - // switch to y even if it is only equal, to preserve - // stability. - |x_p, _, y_p, _| x_p <= y_p) - .map(|(_, x)| x) - } - - #[inline] - #[allow(missing_docs)] - #[unstable(feature = "iter_cmp", - reason = "may want to produce an Ordering directly; see #15311", - issue = "27724")] - #[rustc_deprecated(reason = "renamed to min_by_key", since = "1.6.0")] - fn min_by(self, f: F) -> Option where - Self: Sized, - F: FnMut(&Self::Item) -> B, - { - self.min_by_key(f) - } - - /// Returns the element that gives the minimum value from the - /// specified function. - /// - /// Returns the latest element if the comparison determines two elements - /// to be equally minimum. - /// - /// # Examples - /// - /// ``` - /// let a = [-3_i32, 0, 1, 5, -10]; - /// assert_eq!(*a.iter().min_by_key(|x| x.abs()).unwrap(), 0); - /// ``` - #[stable(feature = "iter_cmp_by_key", since = "1.6.0")] - fn min_by_key(self, f: F) -> Option - where Self: Sized, F: FnMut(&Self::Item) -> B, - { - select_fold1(self, - f, - // only switch to y if it is strictly smaller, to - // preserve stability. - |x_p, _, y_p, _| x_p > y_p) - .map(|(_, x)| x) - } - - /// Reverses an iterator's direction. - /// - /// Usually, iterators iterate from left to right. After using `rev()`, - /// an iterator will instead iterate from right to left. - /// - /// This is only possible if the iterator has an end, so `rev()` only - /// works on [`DoubleEndedIterator`]s. - /// - /// [`DoubleEndedIterator`]: trait.DoubleEndedIterator.html - /// - /// # Examples - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let mut iter = a.iter().rev(); - /// - /// assert_eq!(iter.next(), Some(&3)); - /// assert_eq!(iter.next(), Some(&2)); - /// assert_eq!(iter.next(), Some(&1)); - /// - /// assert_eq!(iter.next(), None); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn rev(self) -> Rev where Self: Sized + DoubleEndedIterator { - Rev{iter: self} - } - - /// Converts an iterator of pairs into a pair of containers. - /// - /// `unzip()` consumes an entire iterator of pairs, producing two - /// collections: one from the left elements of the pairs, and one - /// from the right elements. - /// - /// This function is, in some sense, the opposite of [`zip()`]. - /// - /// [`zip()`]: #method.zip - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [(1, 2), (3, 4)]; - /// - /// let (left, right): (Vec<_>, Vec<_>) = a.iter().cloned().unzip(); - /// - /// assert_eq!(left, [1, 3]); - /// assert_eq!(right, [2, 4]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - fn unzip(self) -> (FromA, FromB) where - FromA: Default + Extend, - FromB: Default + Extend, - Self: Sized + Iterator, - { - struct SizeHint(usize, Option, marker::PhantomData); - impl Iterator for SizeHint { - type Item = A; - - fn next(&mut self) -> Option { None } - fn size_hint(&self) -> (usize, Option) { - (self.0, self.1) - } - } - - let (lo, hi) = self.size_hint(); - let mut ts: FromA = Default::default(); - let mut us: FromB = Default::default(); - - ts.extend(SizeHint(lo, hi, marker::PhantomData)); - us.extend(SizeHint(lo, hi, marker::PhantomData)); - - for (t, u) in self { - ts.extend(Some(t)); - us.extend(Some(u)); - } - - (ts, us) - } - - /// Creates an iterator which clone()s all of its elements. - /// - /// This is useful when you have an iterator over `&T`, but you need an - /// iterator over `T`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let v_cloned: Vec<_> = a.iter().cloned().collect(); - /// - /// // cloned is the same as .map(|&x| x), for integers - /// let v_map: Vec<_> = a.iter().map(|&x| x).collect(); - /// - /// assert_eq!(v_cloned, vec![1, 2, 3]); - /// assert_eq!(v_map, vec![1, 2, 3]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - fn cloned<'a, T: 'a>(self) -> Cloned - where Self: Sized + Iterator, T: Clone - { - Cloned { it: self } - } - - /// Repeats an iterator endlessly. - /// - /// Instead of stopping at `None`, the iterator will instead start again, - /// from the beginning. After iterating again, it will start at the - /// beginning again. And again. And again. Forever. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let mut it = a.iter().cycle(); - /// - /// assert_eq!(it.next(), Some(&1)); - /// assert_eq!(it.next(), Some(&2)); - /// assert_eq!(it.next(), Some(&3)); - /// assert_eq!(it.next(), Some(&1)); - /// assert_eq!(it.next(), Some(&2)); - /// assert_eq!(it.next(), Some(&3)); - /// assert_eq!(it.next(), Some(&1)); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - #[inline] - fn cycle(self) -> Cycle where Self: Sized + Clone { - Cycle{orig: self.clone(), iter: self} - } - - /// Sums the elements of an iterator. - /// - /// Takes each element, adds them together, and returns the result. - /// - /// An empty iterator returns the zero value of the type. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// #![feature(iter_arith)] - /// - /// let a = [1, 2, 3]; - /// let sum: i32 = a.iter().sum(); - /// - /// assert_eq!(sum, 6); - /// ``` - #[unstable(feature = "iter_arith", reason = "bounds recently changed", - issue = "27739")] - fn sum(self) -> S where - S: Add + Zero, - Self: Sized, - { - self.fold(Zero::zero(), |s, e| s + e) - } - - /// Iterates over the entire iterator, multiplying all the elements - /// - /// An empty iterator returns the one value of the type. - /// - /// # Examples - /// - /// ``` - /// #![feature(iter_arith)] - /// - /// fn factorial(n: u32) -> u32 { - /// (1..).take_while(|&i| i <= n).product() - /// } - /// assert_eq!(factorial(0), 1); - /// assert_eq!(factorial(1), 1); - /// assert_eq!(factorial(5), 120); - /// ``` - #[unstable(feature="iter_arith", reason = "bounds recently changed", - issue = "27739")] - fn product